Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _BCACHE_WRITEBACK_H |
| 3 | #define _BCACHE_WRITEBACK_H |
| 4 | |
| 5 | #define CUTOFF_WRITEBACK 40 |
| 6 | #define CUTOFF_WRITEBACK_SYNC 70 |
| 7 | |
| 8 | #define MAX_WRITEBACKS_IN_PASS 5 |
| 9 | #define MAX_WRITESIZE_IN_PASS 5000 /* *512b */ |
| 10 | |
| 11 | #define WRITEBACK_RATE_UPDATE_SECS_MAX 60 |
| 12 | #define WRITEBACK_RATE_UPDATE_SECS_DEFAULT 5 |
| 13 | |
| 14 | /* |
| 15 | * 14 (16384ths) is chosen here as something that each backing device |
| 16 | * should be a reasonable fraction of the share, and not to blow up |
| 17 | * until individual backing devices are a petabyte. |
| 18 | */ |
| 19 | #define WRITEBACK_SHARE_SHIFT 14 |
| 20 | |
| 21 | static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d) |
| 22 | { |
| 23 | uint64_t i, ret = 0; |
| 24 | |
| 25 | for (i = 0; i < d->nr_stripes; i++) |
| 26 | ret += atomic_read(d->stripe_sectors_dirty + i); |
| 27 | |
| 28 | return ret; |
| 29 | } |
| 30 | |
| 31 | static inline unsigned int offset_to_stripe(struct bcache_device *d, |
| 32 | uint64_t offset) |
| 33 | { |
| 34 | do_div(offset, d->stripe_size); |
| 35 | return offset; |
| 36 | } |
| 37 | |
| 38 | static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc, |
| 39 | uint64_t offset, |
| 40 | unsigned int nr_sectors) |
| 41 | { |
| 42 | unsigned int stripe = offset_to_stripe(&dc->disk, offset); |
| 43 | |
| 44 | while (1) { |
| 45 | if (atomic_read(dc->disk.stripe_sectors_dirty + stripe)) |
| 46 | return true; |
| 47 | |
| 48 | if (nr_sectors <= dc->disk.stripe_size) |
| 49 | return false; |
| 50 | |
| 51 | nr_sectors -= dc->disk.stripe_size; |
| 52 | stripe++; |
| 53 | } |
| 54 | } |
| 55 | |
| 56 | static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, |
| 57 | unsigned int cache_mode, bool would_skip) |
| 58 | { |
| 59 | unsigned int in_use = dc->disk.c->gc_stats.in_use; |
| 60 | |
| 61 | if (cache_mode != CACHE_MODE_WRITEBACK || |
| 62 | test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || |
| 63 | in_use > CUTOFF_WRITEBACK_SYNC) |
| 64 | return false; |
| 65 | |
| 66 | if (dc->partial_stripes_expensive && |
| 67 | bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector, |
| 68 | bio_sectors(bio))) |
| 69 | return true; |
| 70 | |
| 71 | if (would_skip) |
| 72 | return false; |
| 73 | |
| 74 | return (op_is_sync(bio->bi_opf) || |
| 75 | bio->bi_opf & (REQ_META|REQ_PRIO) || |
| 76 | in_use <= CUTOFF_WRITEBACK); |
| 77 | } |
| 78 | |
| 79 | static inline void bch_writeback_queue(struct cached_dev *dc) |
| 80 | { |
| 81 | if (!IS_ERR_OR_NULL(dc->writeback_thread)) |
| 82 | wake_up_process(dc->writeback_thread); |
| 83 | } |
| 84 | |
| 85 | static inline void bch_writeback_add(struct cached_dev *dc) |
| 86 | { |
| 87 | if (!atomic_read(&dc->has_dirty) && |
| 88 | !atomic_xchg(&dc->has_dirty, 1)) { |
| 89 | if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) { |
| 90 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY); |
| 91 | /* XXX: should do this synchronously */ |
| 92 | bch_write_bdev_super(dc, NULL); |
| 93 | } |
| 94 | |
| 95 | bch_writeback_queue(dc); |
| 96 | } |
| 97 | } |
| 98 | |
| 99 | void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode, |
| 100 | uint64_t offset, int nr_sectors); |
| 101 | |
| 102 | void bch_sectors_dirty_init(struct bcache_device *d); |
| 103 | void bch_cached_dev_writeback_init(struct cached_dev *dc); |
| 104 | int bch_cached_dev_writeback_start(struct cached_dev *dc); |
| 105 | |
| 106 | #endif |