blob: 5a33910aea78842ae585371ced819f230b8db189 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4 *
5 * Uses a block device as cache for other block devices; optimized for SSDs.
6 * All allocation is done in buckets, which should match the erase block size
7 * of the device.
8 *
9 * Buckets containing cached data are kept on a heap sorted by priority;
10 * bucket priority is increased on cache hit, and periodically all the buckets
11 * on the heap have their priority scaled down. This currently is just used as
12 * an LRU but in the future should allow for more intelligent heuristics.
13 *
14 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15 * counter. Garbage collection is used to remove stale pointers.
16 *
17 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18 * as keys are inserted we only sort the pages that have not yet been written.
19 * When garbage collection is run, we resort the entire node.
20 *
21 * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
22 */
23
24#include "bcache.h"
25#include "btree.h"
26#include "debug.h"
27#include "extents.h"
28
29#include <linux/slab.h>
30#include <linux/bitops.h>
31#include <linux/hash.h>
32#include <linux/kthread.h>
33#include <linux/prefetch.h>
34#include <linux/random.h>
35#include <linux/rcupdate.h>
36#include <linux/sched/clock.h>
37#include <linux/rculist.h>
David Brazdil0f672f62019-12-10 10:32:29 +000038#include <linux/delay.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000039#include <trace/events/bcache.h>
40
41/*
42 * Todo:
43 * register_bcache: Return errors out to userspace correctly
44 *
45 * Writeback: don't undirty key until after a cache flush
46 *
47 * Create an iterator for key pointers
48 *
49 * On btree write error, mark bucket such that it won't be freed from the cache
50 *
51 * Journalling:
52 * Check for bad keys in replay
53 * Propagate barriers
54 * Refcount journal entries in journal_replay
55 *
56 * Garbage collection:
57 * Finish incremental gc
58 * Gc should free old UUIDs, data for invalid UUIDs
59 *
60 * Provide a way to list backing device UUIDs we have data cached for, and
61 * probably how long it's been since we've seen them, and a way to invalidate
62 * dirty data for devices that will never be attached again
63 *
64 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
65 * that based on that and how much dirty data we have we can keep writeback
66 * from being starved
67 *
68 * Add a tracepoint or somesuch to watch for writeback starvation
69 *
70 * When btree depth > 1 and splitting an interior node, we have to make sure
71 * alloc_bucket() cannot fail. This should be true but is not completely
72 * obvious.
73 *
74 * Plugging?
75 *
76 * If data write is less than hard sector size of ssd, round up offset in open
77 * bucket to the next whole sector
78 *
79 * Superblock needs to be fleshed out for multiple cache devices
80 *
81 * Add a sysfs tunable for the number of writeback IOs in flight
82 *
83 * Add a sysfs tunable for the number of open data buckets
84 *
85 * IO tracking: Can we track when one process is doing io on behalf of another?
86 * IO tracking: Don't use just an average, weigh more recent stuff higher
87 *
88 * Test module load/unload
89 */
90
91#define MAX_NEED_GC 64
92#define MAX_SAVE_PRIO 72
93#define MAX_GC_TIMES 100
94#define MIN_GC_NODES 100
95#define GC_SLEEP_MS 100
96
97#define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
98
99#define PTR_HASH(c, k) \
100 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
101
Olivier Deprez0e641232021-09-23 10:07:05 +0200102static struct workqueue_struct *btree_io_wq;
103
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000104#define insert_lock(s, b) ((b)->level <= (s)->lock)
105
106/*
107 * These macros are for recursing down the btree - they handle the details of
108 * locking and looking up nodes in the cache for you. They're best treated as
109 * mere syntax when reading code that uses them.
110 *
111 * op->lock determines whether we take a read or a write lock at a given depth.
112 * If you've got a read lock and find that you need a write lock (i.e. you're
113 * going to have to split), set op->lock and return -EINTR; btree_root() will
114 * call you again and you'll have the correct lock.
115 */
116
117/**
118 * btree - recurse down the btree on a specified key
119 * @fn: function to call, which will be passed the child node
120 * @key: key to recurse on
121 * @b: parent btree node
122 * @op: pointer to struct btree_op
123 */
124#define btree(fn, key, b, op, ...) \
125({ \
126 int _r, l = (b)->level - 1; \
127 bool _w = l <= (op)->lock; \
128 struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
129 _w, b); \
130 if (!IS_ERR(_child)) { \
131 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
132 rw_unlock(_w, _child); \
133 } else \
134 _r = PTR_ERR(_child); \
135 _r; \
136})
137
138/**
139 * btree_root - call a function on the root of the btree
140 * @fn: function to call, which will be passed the child node
141 * @c: cache set
142 * @op: pointer to struct btree_op
143 */
144#define btree_root(fn, c, op, ...) \
145({ \
146 int _r = -EINTR; \
147 do { \
148 struct btree *_b = (c)->root; \
149 bool _w = insert_lock(op, _b); \
150 rw_lock(_w, _b, _b->level); \
151 if (_b == (c)->root && \
152 _w == insert_lock(op, _b)) { \
153 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
154 } \
155 rw_unlock(_w, _b); \
156 bch_cannibalize_unlock(c); \
157 if (_r == -EINTR) \
158 schedule(); \
159 } while (_r == -EINTR); \
160 \
161 finish_wait(&(c)->btree_cache_wait, &(op)->wait); \
162 _r; \
163})
164
165static inline struct bset *write_block(struct btree *b)
166{
167 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
168}
169
170static void bch_btree_init_next(struct btree *b)
171{
172 /* If not a leaf node, always sort */
173 if (b->level && b->keys.nsets)
174 bch_btree_sort(&b->keys, &b->c->sort);
175 else
176 bch_btree_sort_lazy(&b->keys, &b->c->sort);
177
178 if (b->written < btree_blocks(b))
179 bch_bset_init_next(&b->keys, write_block(b),
180 bset_magic(&b->c->sb));
181
182}
183
184/* Btree key manipulation */
185
186void bkey_put(struct cache_set *c, struct bkey *k)
187{
188 unsigned int i;
189
190 for (i = 0; i < KEY_PTRS(k); i++)
191 if (ptr_available(c, k, i))
192 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
193}
194
195/* Btree IO */
196
197static uint64_t btree_csum_set(struct btree *b, struct bset *i)
198{
199 uint64_t crc = b->key.ptr[0];
200 void *data = (void *) i + 8, *end = bset_bkey_last(i);
201
202 crc = bch_crc64_update(crc, data, end - data);
203 return crc ^ 0xffffffffffffffffULL;
204}
205
206void bch_btree_node_read_done(struct btree *b)
207{
208 const char *err = "bad btree header";
209 struct bset *i = btree_bset_first(b);
210 struct btree_iter *iter;
211
David Brazdil0f672f62019-12-10 10:32:29 +0000212 /*
213 * c->fill_iter can allocate an iterator with more memory space
214 * than static MAX_BSETS.
215 * See the comment arount cache_set->fill_iter.
216 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000217 iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
218 iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
219 iter->used = 0;
220
221#ifdef CONFIG_BCACHE_DEBUG
222 iter->b = &b->keys;
223#endif
224
225 if (!i->seq)
226 goto err;
227
228 for (;
229 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
230 i = write_block(b)) {
231 err = "unsupported bset version";
232 if (i->version > BCACHE_BSET_VERSION)
233 goto err;
234
235 err = "bad btree header";
236 if (b->written + set_blocks(i, block_bytes(b->c)) >
237 btree_blocks(b))
238 goto err;
239
240 err = "bad magic";
241 if (i->magic != bset_magic(&b->c->sb))
242 goto err;
243
244 err = "bad checksum";
245 switch (i->version) {
246 case 0:
247 if (i->csum != csum_set(i))
248 goto err;
249 break;
250 case BCACHE_BSET_VERSION:
251 if (i->csum != btree_csum_set(b, i))
252 goto err;
253 break;
254 }
255
256 err = "empty set";
257 if (i != b->keys.set[0].data && !i->keys)
258 goto err;
259
260 bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
261
262 b->written += set_blocks(i, block_bytes(b->c));
263 }
264
265 err = "corrupted btree";
266 for (i = write_block(b);
267 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
268 i = ((void *) i) + block_bytes(b->c))
269 if (i->seq == b->keys.set[0].data->seq)
270 goto err;
271
272 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
273
274 i = b->keys.set[0].data;
275 err = "short btree key";
276 if (b->keys.set[0].size &&
277 bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
278 goto err;
279
280 if (b->written < btree_blocks(b))
281 bch_bset_init_next(&b->keys, write_block(b),
282 bset_magic(&b->c->sb));
283out:
284 mempool_free(iter, &b->c->fill_iter);
285 return;
286err:
287 set_btree_node_io_error(b);
288 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
289 err, PTR_BUCKET_NR(b->c, &b->key, 0),
290 bset_block_offset(b, i), i->keys);
291 goto out;
292}
293
294static void btree_node_read_endio(struct bio *bio)
295{
296 struct closure *cl = bio->bi_private;
297
298 closure_put(cl);
299}
300
301static void bch_btree_node_read(struct btree *b)
302{
303 uint64_t start_time = local_clock();
304 struct closure cl;
305 struct bio *bio;
306
307 trace_bcache_btree_read(b);
308
309 closure_init_stack(&cl);
310
311 bio = bch_bbio_alloc(b->c);
312 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
313 bio->bi_end_io = btree_node_read_endio;
314 bio->bi_private = &cl;
315 bio->bi_opf = REQ_OP_READ | REQ_META;
316
317 bch_bio_map(bio, b->keys.set[0].data);
318
319 bch_submit_bbio(bio, b->c, &b->key, 0);
320 closure_sync(&cl);
321
322 if (bio->bi_status)
323 set_btree_node_io_error(b);
324
325 bch_bbio_free(bio, b->c);
326
327 if (btree_node_io_error(b))
328 goto err;
329
330 bch_btree_node_read_done(b);
331 bch_time_stats_update(&b->c->btree_read_time, start_time);
332
333 return;
334err:
335 bch_cache_set_error(b->c, "io error reading bucket %zu",
336 PTR_BUCKET_NR(b->c, &b->key, 0));
337}
338
339static void btree_complete_write(struct btree *b, struct btree_write *w)
340{
341 if (w->prio_blocked &&
342 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
343 wake_up_allocators(b->c);
344
345 if (w->journal) {
346 atomic_dec_bug(w->journal);
347 __closure_wake_up(&b->c->journal.wait);
348 }
349
350 w->prio_blocked = 0;
351 w->journal = NULL;
352}
353
354static void btree_node_write_unlock(struct closure *cl)
355{
356 struct btree *b = container_of(cl, struct btree, io);
357
358 up(&b->io_mutex);
359}
360
361static void __btree_node_write_done(struct closure *cl)
362{
363 struct btree *b = container_of(cl, struct btree, io);
364 struct btree_write *w = btree_prev_write(b);
365
366 bch_bbio_free(b->bio, b->c);
367 b->bio = NULL;
368 btree_complete_write(b, w);
369
370 if (btree_node_dirty(b))
Olivier Deprez0e641232021-09-23 10:07:05 +0200371 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000372
373 closure_return_with_destructor(cl, btree_node_write_unlock);
374}
375
376static void btree_node_write_done(struct closure *cl)
377{
378 struct btree *b = container_of(cl, struct btree, io);
379
380 bio_free_pages(b->bio);
381 __btree_node_write_done(cl);
382}
383
384static void btree_node_write_endio(struct bio *bio)
385{
386 struct closure *cl = bio->bi_private;
387 struct btree *b = container_of(cl, struct btree, io);
388
389 if (bio->bi_status)
390 set_btree_node_io_error(b);
391
392 bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
393 closure_put(cl);
394}
395
396static void do_btree_node_write(struct btree *b)
397{
398 struct closure *cl = &b->io;
399 struct bset *i = btree_bset_last(b);
400 BKEY_PADDED(key) k;
401
402 i->version = BCACHE_BSET_VERSION;
403 i->csum = btree_csum_set(b, i);
404
405 BUG_ON(b->bio);
406 b->bio = bch_bbio_alloc(b->c);
407
408 b->bio->bi_end_io = btree_node_write_endio;
409 b->bio->bi_private = cl;
410 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
411 b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
412 bch_bio_map(b->bio, i);
413
414 /*
415 * If we're appending to a leaf node, we don't technically need FUA -
416 * this write just needs to be persisted before the next journal write,
417 * which will be marked FLUSH|FUA.
418 *
419 * Similarly if we're writing a new btree root - the pointer is going to
420 * be in the next journal entry.
421 *
422 * But if we're writing a new btree node (that isn't a root) or
423 * appending to a non leaf btree node, we need either FUA or a flush
424 * when we write the parent with the new pointer. FUA is cheaper than a
425 * flush, and writes appending to leaf nodes aren't blocking anything so
426 * just make all btree node writes FUA to keep things sane.
427 */
428
429 bkey_copy(&k.key, &b->key);
430 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
431 bset_sector_offset(&b->keys, i));
432
433 if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000434 struct bio_vec *bv;
David Brazdil0f672f62019-12-10 10:32:29 +0000435 void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
436 struct bvec_iter_all iter_all;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000437
David Brazdil0f672f62019-12-10 10:32:29 +0000438 bio_for_each_segment_all(bv, b->bio, iter_all) {
439 memcpy(page_address(bv->bv_page), addr, PAGE_SIZE);
440 addr += PAGE_SIZE;
441 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000442
443 bch_submit_bbio(b->bio, b->c, &k.key, 0);
444
445 continue_at(cl, btree_node_write_done, NULL);
446 } else {
447 /*
448 * No problem for multipage bvec since the bio is
449 * just allocated
450 */
451 b->bio->bi_vcnt = 0;
452 bch_bio_map(b->bio, i);
453
454 bch_submit_bbio(b->bio, b->c, &k.key, 0);
455
456 closure_sync(cl);
457 continue_at_nobarrier(cl, __btree_node_write_done, NULL);
458 }
459}
460
461void __bch_btree_node_write(struct btree *b, struct closure *parent)
462{
463 struct bset *i = btree_bset_last(b);
464
465 lockdep_assert_held(&b->write_lock);
466
467 trace_bcache_btree_write(b);
468
469 BUG_ON(current->bio_list);
470 BUG_ON(b->written >= btree_blocks(b));
471 BUG_ON(b->written && !i->keys);
472 BUG_ON(btree_bset_first(b)->seq != i->seq);
473 bch_check_keys(&b->keys, "writing");
474
475 cancel_delayed_work(&b->work);
476
477 /* If caller isn't waiting for write, parent refcount is cache set */
478 down(&b->io_mutex);
479 closure_init(&b->io, parent ?: &b->c->cl);
480
481 clear_bit(BTREE_NODE_dirty, &b->flags);
482 change_bit(BTREE_NODE_write_idx, &b->flags);
483
484 do_btree_node_write(b);
485
486 atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
487 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
488
489 b->written += set_blocks(i, block_bytes(b->c));
490}
491
492void bch_btree_node_write(struct btree *b, struct closure *parent)
493{
494 unsigned int nsets = b->keys.nsets;
495
496 lockdep_assert_held(&b->lock);
497
498 __bch_btree_node_write(b, parent);
499
500 /*
501 * do verify if there was more than one set initially (i.e. we did a
502 * sort) and we sorted down to a single set:
503 */
504 if (nsets && !b->keys.nsets)
505 bch_btree_verify(b);
506
507 bch_btree_init_next(b);
508}
509
510static void bch_btree_node_write_sync(struct btree *b)
511{
512 struct closure cl;
513
514 closure_init_stack(&cl);
515
516 mutex_lock(&b->write_lock);
517 bch_btree_node_write(b, &cl);
518 mutex_unlock(&b->write_lock);
519
520 closure_sync(&cl);
521}
522
523static void btree_node_write_work(struct work_struct *w)
524{
525 struct btree *b = container_of(to_delayed_work(w), struct btree, work);
526
527 mutex_lock(&b->write_lock);
528 if (btree_node_dirty(b))
529 __bch_btree_node_write(b, NULL);
530 mutex_unlock(&b->write_lock);
531}
532
533static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
534{
535 struct bset *i = btree_bset_last(b);
536 struct btree_write *w = btree_current_write(b);
537
538 lockdep_assert_held(&b->write_lock);
539
540 BUG_ON(!b->written);
541 BUG_ON(!i->keys);
542
543 if (!btree_node_dirty(b))
Olivier Deprez0e641232021-09-23 10:07:05 +0200544 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000545
546 set_btree_node_dirty(b);
547
548 if (journal_ref) {
549 if (w->journal &&
550 journal_pin_cmp(b->c, w->journal, journal_ref)) {
551 atomic_dec_bug(w->journal);
552 w->journal = NULL;
553 }
554
555 if (!w->journal) {
556 w->journal = journal_ref;
557 atomic_inc(w->journal);
558 }
559 }
560
561 /* Force write if set is too big */
562 if (set_bytes(i) > PAGE_SIZE - 48 &&
563 !current->bio_list)
564 bch_btree_node_write(b, NULL);
565}
566
567/*
568 * Btree in memory cache - allocation/freeing
569 * mca -> memory cache
570 */
571
572#define mca_reserve(c) (((c->root && c->root->level) \
573 ? c->root->level : 1) * 8 + 16)
574#define mca_can_free(c) \
575 max_t(int, 0, c->btree_cache_used - mca_reserve(c))
576
577static void mca_data_free(struct btree *b)
578{
579 BUG_ON(b->io_mutex.count != 1);
580
581 bch_btree_keys_free(&b->keys);
582
583 b->c->btree_cache_used--;
584 list_move(&b->list, &b->c->btree_cache_freed);
585}
586
587static void mca_bucket_free(struct btree *b)
588{
589 BUG_ON(btree_node_dirty(b));
590
591 b->key.ptr[0] = 0;
592 hlist_del_init_rcu(&b->hash);
593 list_move(&b->list, &b->c->btree_cache_freeable);
594}
595
596static unsigned int btree_order(struct bkey *k)
597{
598 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
599}
600
601static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
602{
603 if (!bch_btree_keys_alloc(&b->keys,
604 max_t(unsigned int,
605 ilog2(b->c->btree_pages),
606 btree_order(k)),
607 gfp)) {
608 b->c->btree_cache_used++;
609 list_move(&b->list, &b->c->btree_cache);
610 } else {
611 list_move(&b->list, &b->c->btree_cache_freed);
612 }
613}
614
615static struct btree *mca_bucket_alloc(struct cache_set *c,
616 struct bkey *k, gfp_t gfp)
617{
David Brazdil0f672f62019-12-10 10:32:29 +0000618 /*
619 * kzalloc() is necessary here for initialization,
620 * see code comments in bch_btree_keys_init().
621 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000622 struct btree *b = kzalloc(sizeof(struct btree), gfp);
623
624 if (!b)
625 return NULL;
626
627 init_rwsem(&b->lock);
628 lockdep_set_novalidate_class(&b->lock);
629 mutex_init(&b->write_lock);
630 lockdep_set_novalidate_class(&b->write_lock);
631 INIT_LIST_HEAD(&b->list);
632 INIT_DELAYED_WORK(&b->work, btree_node_write_work);
633 b->c = c;
634 sema_init(&b->io_mutex, 1);
635
636 mca_data_alloc(b, k, gfp);
637 return b;
638}
639
640static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
641{
642 struct closure cl;
643
644 closure_init_stack(&cl);
645 lockdep_assert_held(&b->c->bucket_lock);
646
647 if (!down_write_trylock(&b->lock))
648 return -ENOMEM;
649
650 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
651
652 if (b->keys.page_order < min_order)
653 goto out_unlock;
654
655 if (!flush) {
656 if (btree_node_dirty(b))
657 goto out_unlock;
658
659 if (down_trylock(&b->io_mutex))
660 goto out_unlock;
661 up(&b->io_mutex);
662 }
663
David Brazdil0f672f62019-12-10 10:32:29 +0000664retry:
665 /*
666 * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
667 * __bch_btree_node_write(). To avoid an extra flush, acquire
668 * b->write_lock before checking BTREE_NODE_dirty bit.
669 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000670 mutex_lock(&b->write_lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000671 /*
672 * If this btree node is selected in btree_flush_write() by journal
673 * code, delay and retry until the node is flushed by journal code
674 * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
675 */
676 if (btree_node_journal_flush(b)) {
677 pr_debug("bnode %p is flushing by journal, retry", b);
678 mutex_unlock(&b->write_lock);
679 udelay(1);
680 goto retry;
681 }
682
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000683 if (btree_node_dirty(b))
684 __bch_btree_node_write(b, &cl);
685 mutex_unlock(&b->write_lock);
686
687 closure_sync(&cl);
688
689 /* wait for any in flight btree write */
690 down(&b->io_mutex);
691 up(&b->io_mutex);
692
693 return 0;
694out_unlock:
695 rw_unlock(true, b);
696 return -ENOMEM;
697}
698
699static unsigned long bch_mca_scan(struct shrinker *shrink,
700 struct shrink_control *sc)
701{
702 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
703 struct btree *b, *t;
704 unsigned long i, nr = sc->nr_to_scan;
705 unsigned long freed = 0;
706 unsigned int btree_cache_used;
707
708 if (c->shrinker_disabled)
709 return SHRINK_STOP;
710
711 if (c->btree_cache_alloc_lock)
712 return SHRINK_STOP;
713
714 /* Return -1 if we can't do anything right now */
715 if (sc->gfp_mask & __GFP_IO)
716 mutex_lock(&c->bucket_lock);
717 else if (!mutex_trylock(&c->bucket_lock))
718 return -1;
719
720 /*
721 * It's _really_ critical that we don't free too many btree nodes - we
722 * have to always leave ourselves a reserve. The reserve is how we
723 * guarantee that allocating memory for a new btree node can always
724 * succeed, so that inserting keys into the btree can always succeed and
725 * IO can always make forward progress:
726 */
727 nr /= c->btree_pages;
Olivier Deprez0e641232021-09-23 10:07:05 +0200728 if (nr == 0)
729 nr = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000730 nr = min_t(unsigned long, nr, mca_can_free(c));
731
732 i = 0;
733 btree_cache_used = c->btree_cache_used;
734 list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
735 if (nr <= 0)
736 goto out;
737
738 if (++i > 3 &&
739 !mca_reap(b, 0, false)) {
740 mca_data_free(b);
741 rw_unlock(true, b);
742 freed++;
743 }
744 nr--;
745 }
746
747 for (; (nr--) && i < btree_cache_used; i++) {
748 if (list_empty(&c->btree_cache))
749 goto out;
750
751 b = list_first_entry(&c->btree_cache, struct btree, list);
752 list_rotate_left(&c->btree_cache);
753
754 if (!b->accessed &&
755 !mca_reap(b, 0, false)) {
756 mca_bucket_free(b);
757 mca_data_free(b);
758 rw_unlock(true, b);
759 freed++;
760 } else
761 b->accessed = 0;
762 }
763out:
764 mutex_unlock(&c->bucket_lock);
765 return freed * c->btree_pages;
766}
767
768static unsigned long bch_mca_count(struct shrinker *shrink,
769 struct shrink_control *sc)
770{
771 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
772
773 if (c->shrinker_disabled)
774 return 0;
775
776 if (c->btree_cache_alloc_lock)
777 return 0;
778
779 return mca_can_free(c) * c->btree_pages;
780}
781
782void bch_btree_cache_free(struct cache_set *c)
783{
784 struct btree *b;
785 struct closure cl;
786
787 closure_init_stack(&cl);
788
789 if (c->shrink.list.next)
790 unregister_shrinker(&c->shrink);
791
792 mutex_lock(&c->bucket_lock);
793
794#ifdef CONFIG_BCACHE_DEBUG
795 if (c->verify_data)
796 list_move(&c->verify_data->list, &c->btree_cache);
797
798 free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
799#endif
800
801 list_splice(&c->btree_cache_freeable,
802 &c->btree_cache);
803
804 while (!list_empty(&c->btree_cache)) {
805 b = list_first_entry(&c->btree_cache, struct btree, list);
806
David Brazdil0f672f62019-12-10 10:32:29 +0000807 /*
808 * This function is called by cache_set_free(), no I/O
809 * request on cache now, it is unnecessary to acquire
810 * b->write_lock before clearing BTREE_NODE_dirty anymore.
811 */
812 if (btree_node_dirty(b)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000813 btree_complete_write(b, btree_current_write(b));
David Brazdil0f672f62019-12-10 10:32:29 +0000814 clear_bit(BTREE_NODE_dirty, &b->flags);
815 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000816 mca_data_free(b);
817 }
818
819 while (!list_empty(&c->btree_cache_freed)) {
820 b = list_first_entry(&c->btree_cache_freed,
821 struct btree, list);
822 list_del(&b->list);
823 cancel_delayed_work_sync(&b->work);
824 kfree(b);
825 }
826
827 mutex_unlock(&c->bucket_lock);
828}
829
830int bch_btree_cache_alloc(struct cache_set *c)
831{
832 unsigned int i;
833
834 for (i = 0; i < mca_reserve(c); i++)
835 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
836 return -ENOMEM;
837
838 list_splice_init(&c->btree_cache,
839 &c->btree_cache_freeable);
840
841#ifdef CONFIG_BCACHE_DEBUG
842 mutex_init(&c->verify_lock);
843
844 c->verify_ondisk = (void *)
Olivier Deprez0e641232021-09-23 10:07:05 +0200845 __get_free_pages(GFP_KERNEL|__GFP_COMP, ilog2(bucket_pages(c)));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000846
847 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
848
849 if (c->verify_data &&
850 c->verify_data->keys.set->data)
851 list_del_init(&c->verify_data->list);
852 else
853 c->verify_data = NULL;
854#endif
855
856 c->shrink.count_objects = bch_mca_count;
857 c->shrink.scan_objects = bch_mca_scan;
858 c->shrink.seeks = 4;
859 c->shrink.batch = c->btree_pages * 2;
860
861 if (register_shrinker(&c->shrink))
862 pr_warn("bcache: %s: could not register shrinker",
863 __func__);
864
865 return 0;
866}
867
868/* Btree in memory cache - hash table */
869
870static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
871{
872 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
873}
874
875static struct btree *mca_find(struct cache_set *c, struct bkey *k)
876{
877 struct btree *b;
878
879 rcu_read_lock();
880 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
881 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
882 goto out;
883 b = NULL;
884out:
885 rcu_read_unlock();
886 return b;
887}
888
889static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
890{
Olivier Deprez0e641232021-09-23 10:07:05 +0200891 spin_lock(&c->btree_cannibalize_lock);
892 if (likely(c->btree_cache_alloc_lock == NULL)) {
893 c->btree_cache_alloc_lock = current;
894 } else if (c->btree_cache_alloc_lock != current) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000895 if (op)
896 prepare_to_wait(&c->btree_cache_wait, &op->wait,
897 TASK_UNINTERRUPTIBLE);
Olivier Deprez0e641232021-09-23 10:07:05 +0200898 spin_unlock(&c->btree_cannibalize_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000899 return -EINTR;
900 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200901 spin_unlock(&c->btree_cannibalize_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000902
903 return 0;
904}
905
906static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
907 struct bkey *k)
908{
909 struct btree *b;
910
911 trace_bcache_btree_cache_cannibalize(c);
912
913 if (mca_cannibalize_lock(c, op))
914 return ERR_PTR(-EINTR);
915
916 list_for_each_entry_reverse(b, &c->btree_cache, list)
917 if (!mca_reap(b, btree_order(k), false))
918 return b;
919
920 list_for_each_entry_reverse(b, &c->btree_cache, list)
921 if (!mca_reap(b, btree_order(k), true))
922 return b;
923
924 WARN(1, "btree cache cannibalize failed\n");
925 return ERR_PTR(-ENOMEM);
926}
927
928/*
929 * We can only have one thread cannibalizing other cached btree nodes at a time,
930 * or we'll deadlock. We use an open coded mutex to ensure that, which a
931 * cannibalize_bucket() will take. This means every time we unlock the root of
932 * the btree, we need to release this lock if we have it held.
933 */
934static void bch_cannibalize_unlock(struct cache_set *c)
935{
Olivier Deprez0e641232021-09-23 10:07:05 +0200936 spin_lock(&c->btree_cannibalize_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000937 if (c->btree_cache_alloc_lock == current) {
938 c->btree_cache_alloc_lock = NULL;
939 wake_up(&c->btree_cache_wait);
940 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200941 spin_unlock(&c->btree_cannibalize_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000942}
943
944static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
945 struct bkey *k, int level)
946{
947 struct btree *b;
948
949 BUG_ON(current->bio_list);
950
951 lockdep_assert_held(&c->bucket_lock);
952
953 if (mca_find(c, k))
954 return NULL;
955
956 /* btree_free() doesn't free memory; it sticks the node on the end of
957 * the list. Check if there's any freed nodes there:
958 */
959 list_for_each_entry(b, &c->btree_cache_freeable, list)
960 if (!mca_reap(b, btree_order(k), false))
961 goto out;
962
963 /* We never free struct btree itself, just the memory that holds the on
964 * disk node. Check the freed list before allocating a new one:
965 */
966 list_for_each_entry(b, &c->btree_cache_freed, list)
967 if (!mca_reap(b, 0, false)) {
968 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
969 if (!b->keys.set[0].data)
970 goto err;
971 else
972 goto out;
973 }
974
975 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
976 if (!b)
977 goto err;
978
979 BUG_ON(!down_write_trylock(&b->lock));
980 if (!b->keys.set->data)
981 goto err;
982out:
983 BUG_ON(b->io_mutex.count != 1);
984
985 bkey_copy(&b->key, k);
986 list_move(&b->list, &c->btree_cache);
987 hlist_del_init_rcu(&b->hash);
988 hlist_add_head_rcu(&b->hash, mca_hash(c, k));
989
990 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
991 b->parent = (void *) ~0UL;
992 b->flags = 0;
993 b->written = 0;
994 b->level = level;
995
996 if (!b->level)
997 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
998 &b->c->expensive_debug_checks);
999 else
1000 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
1001 &b->c->expensive_debug_checks);
1002
1003 return b;
1004err:
1005 if (b)
1006 rw_unlock(true, b);
1007
1008 b = mca_cannibalize(c, op, k);
1009 if (!IS_ERR(b))
1010 goto out;
1011
1012 return b;
1013}
1014
1015/*
1016 * bch_btree_node_get - find a btree node in the cache and lock it, reading it
1017 * in from disk if necessary.
1018 *
1019 * If IO is necessary and running under generic_make_request, returns -EAGAIN.
1020 *
1021 * The btree node will have either a read or a write lock held, depending on
1022 * level and op->lock.
1023 */
1024struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
1025 struct bkey *k, int level, bool write,
1026 struct btree *parent)
1027{
1028 int i = 0;
1029 struct btree *b;
1030
1031 BUG_ON(level < 0);
1032retry:
1033 b = mca_find(c, k);
1034
1035 if (!b) {
1036 if (current->bio_list)
1037 return ERR_PTR(-EAGAIN);
1038
1039 mutex_lock(&c->bucket_lock);
1040 b = mca_alloc(c, op, k, level);
1041 mutex_unlock(&c->bucket_lock);
1042
1043 if (!b)
1044 goto retry;
1045 if (IS_ERR(b))
1046 return b;
1047
1048 bch_btree_node_read(b);
1049
1050 if (!write)
1051 downgrade_write(&b->lock);
1052 } else {
1053 rw_lock(write, b, level);
1054 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1055 rw_unlock(write, b);
1056 goto retry;
1057 }
1058 BUG_ON(b->level != level);
1059 }
1060
1061 if (btree_node_io_error(b)) {
1062 rw_unlock(write, b);
1063 return ERR_PTR(-EIO);
1064 }
1065
1066 BUG_ON(!b->written);
1067
1068 b->parent = parent;
1069 b->accessed = 1;
1070
1071 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1072 prefetch(b->keys.set[i].tree);
1073 prefetch(b->keys.set[i].data);
1074 }
1075
1076 for (; i <= b->keys.nsets; i++)
1077 prefetch(b->keys.set[i].data);
1078
1079 return b;
1080}
1081
1082static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1083{
1084 struct btree *b;
1085
1086 mutex_lock(&parent->c->bucket_lock);
1087 b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1088 mutex_unlock(&parent->c->bucket_lock);
1089
1090 if (!IS_ERR_OR_NULL(b)) {
1091 b->parent = parent;
1092 bch_btree_node_read(b);
1093 rw_unlock(true, b);
1094 }
1095}
1096
1097/* Btree alloc */
1098
1099static void btree_node_free(struct btree *b)
1100{
1101 trace_bcache_btree_node_free(b);
1102
1103 BUG_ON(b == b->c->root);
1104
David Brazdil0f672f62019-12-10 10:32:29 +00001105retry:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001106 mutex_lock(&b->write_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00001107 /*
1108 * If the btree node is selected and flushing in btree_flush_write(),
1109 * delay and retry until the BTREE_NODE_journal_flush bit cleared,
1110 * then it is safe to free the btree node here. Otherwise this btree
1111 * node will be in race condition.
1112 */
1113 if (btree_node_journal_flush(b)) {
1114 mutex_unlock(&b->write_lock);
1115 pr_debug("bnode %p journal_flush set, retry", b);
1116 udelay(1);
1117 goto retry;
1118 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001119
David Brazdil0f672f62019-12-10 10:32:29 +00001120 if (btree_node_dirty(b)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001121 btree_complete_write(b, btree_current_write(b));
David Brazdil0f672f62019-12-10 10:32:29 +00001122 clear_bit(BTREE_NODE_dirty, &b->flags);
1123 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001124
1125 mutex_unlock(&b->write_lock);
1126
1127 cancel_delayed_work(&b->work);
1128
1129 mutex_lock(&b->c->bucket_lock);
1130 bch_bucket_free(b->c, &b->key);
1131 mca_bucket_free(b);
1132 mutex_unlock(&b->c->bucket_lock);
1133}
1134
1135struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
1136 int level, bool wait,
1137 struct btree *parent)
1138{
1139 BKEY_PADDED(key) k;
1140 struct btree *b = ERR_PTR(-EAGAIN);
1141
1142 mutex_lock(&c->bucket_lock);
1143retry:
1144 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
1145 goto err;
1146
1147 bkey_put(c, &k.key);
1148 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1149
1150 b = mca_alloc(c, op, &k.key, level);
1151 if (IS_ERR(b))
1152 goto err_free;
1153
1154 if (!b) {
1155 cache_bug(c,
1156 "Tried to allocate bucket that was in btree cache");
1157 goto retry;
1158 }
1159
1160 b->accessed = 1;
1161 b->parent = parent;
1162 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
1163
1164 mutex_unlock(&c->bucket_lock);
1165
1166 trace_bcache_btree_node_alloc(b);
1167 return b;
1168err_free:
1169 bch_bucket_free(c, &k.key);
1170err:
1171 mutex_unlock(&c->bucket_lock);
1172
1173 trace_bcache_btree_node_alloc_fail(c);
1174 return b;
1175}
1176
1177static struct btree *bch_btree_node_alloc(struct cache_set *c,
1178 struct btree_op *op, int level,
1179 struct btree *parent)
1180{
1181 return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1182}
1183
1184static struct btree *btree_node_alloc_replacement(struct btree *b,
1185 struct btree_op *op)
1186{
1187 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1188
1189 if (!IS_ERR_OR_NULL(n)) {
1190 mutex_lock(&n->write_lock);
1191 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1192 bkey_copy_key(&n->key, &b->key);
1193 mutex_unlock(&n->write_lock);
1194 }
1195
1196 return n;
1197}
1198
1199static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1200{
1201 unsigned int i;
1202
1203 mutex_lock(&b->c->bucket_lock);
1204
1205 atomic_inc(&b->c->prio_blocked);
1206
1207 bkey_copy(k, &b->key);
1208 bkey_copy_key(k, &ZERO_KEY);
1209
1210 for (i = 0; i < KEY_PTRS(k); i++)
1211 SET_PTR_GEN(k, i,
1212 bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1213 PTR_BUCKET(b->c, &b->key, i)));
1214
1215 mutex_unlock(&b->c->bucket_lock);
1216}
1217
1218static int btree_check_reserve(struct btree *b, struct btree_op *op)
1219{
1220 struct cache_set *c = b->c;
1221 struct cache *ca;
1222 unsigned int i, reserve = (c->root->level - b->level) * 2 + 1;
1223
1224 mutex_lock(&c->bucket_lock);
1225
1226 for_each_cache(ca, c, i)
1227 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1228 if (op)
1229 prepare_to_wait(&c->btree_cache_wait, &op->wait,
1230 TASK_UNINTERRUPTIBLE);
1231 mutex_unlock(&c->bucket_lock);
1232 return -EINTR;
1233 }
1234
1235 mutex_unlock(&c->bucket_lock);
1236
1237 return mca_cannibalize_lock(b->c, op);
1238}
1239
1240/* Garbage collection */
1241
1242static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1243 struct bkey *k)
1244{
1245 uint8_t stale = 0;
1246 unsigned int i;
1247 struct bucket *g;
1248
1249 /*
1250 * ptr_invalid() can't return true for the keys that mark btree nodes as
1251 * freed, but since ptr_bad() returns true we'll never actually use them
1252 * for anything and thus we don't want mark their pointers here
1253 */
1254 if (!bkey_cmp(k, &ZERO_KEY))
1255 return stale;
1256
1257 for (i = 0; i < KEY_PTRS(k); i++) {
1258 if (!ptr_available(c, k, i))
1259 continue;
1260
1261 g = PTR_BUCKET(c, k, i);
1262
1263 if (gen_after(g->last_gc, PTR_GEN(k, i)))
1264 g->last_gc = PTR_GEN(k, i);
1265
1266 if (ptr_stale(c, k, i)) {
1267 stale = max(stale, ptr_stale(c, k, i));
1268 continue;
1269 }
1270
1271 cache_bug_on(GC_MARK(g) &&
1272 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1273 c, "inconsistent ptrs: mark = %llu, level = %i",
1274 GC_MARK(g), level);
1275
1276 if (level)
1277 SET_GC_MARK(g, GC_MARK_METADATA);
1278 else if (KEY_DIRTY(k))
1279 SET_GC_MARK(g, GC_MARK_DIRTY);
1280 else if (!GC_MARK(g))
1281 SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1282
1283 /* guard against overflow */
1284 SET_GC_SECTORS_USED(g, min_t(unsigned int,
1285 GC_SECTORS_USED(g) + KEY_SIZE(k),
1286 MAX_GC_SECTORS_USED));
1287
1288 BUG_ON(!GC_SECTORS_USED(g));
1289 }
1290
1291 return stale;
1292}
1293
1294#define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1295
1296void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1297{
1298 unsigned int i;
1299
1300 for (i = 0; i < KEY_PTRS(k); i++)
1301 if (ptr_available(c, k, i) &&
1302 !ptr_stale(c, k, i)) {
1303 struct bucket *b = PTR_BUCKET(c, k, i);
1304
1305 b->gen = PTR_GEN(k, i);
1306
1307 if (level && bkey_cmp(k, &ZERO_KEY))
1308 b->prio = BTREE_PRIO;
1309 else if (!level && b->prio == BTREE_PRIO)
1310 b->prio = INITIAL_PRIO;
1311 }
1312
1313 __bch_btree_mark_key(c, level, k);
1314}
1315
1316void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
1317{
1318 stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
1319}
1320
1321static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1322{
1323 uint8_t stale = 0;
1324 unsigned int keys = 0, good_keys = 0;
1325 struct bkey *k;
1326 struct btree_iter iter;
1327 struct bset_tree *t;
1328
1329 gc->nodes++;
1330
1331 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1332 stale = max(stale, btree_mark_key(b, k));
1333 keys++;
1334
1335 if (bch_ptr_bad(&b->keys, k))
1336 continue;
1337
1338 gc->key_bytes += bkey_u64s(k);
1339 gc->nkeys++;
1340 good_keys++;
1341
1342 gc->data += KEY_SIZE(k);
1343 }
1344
1345 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1346 btree_bug_on(t->size &&
1347 bset_written(&b->keys, t) &&
1348 bkey_cmp(&b->key, &t->end) < 0,
1349 b, "found short btree key in gc");
1350
1351 if (b->c->gc_always_rewrite)
1352 return true;
1353
1354 if (stale > 10)
1355 return true;
1356
1357 if ((keys - good_keys) * 2 > keys)
1358 return true;
1359
1360 return false;
1361}
1362
1363#define GC_MERGE_NODES 4U
1364
1365struct gc_merge_info {
1366 struct btree *b;
1367 unsigned int keys;
1368};
1369
1370static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
1371 struct keylist *insert_keys,
1372 atomic_t *journal_ref,
1373 struct bkey *replace_key);
1374
1375static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1376 struct gc_stat *gc, struct gc_merge_info *r)
1377{
1378 unsigned int i, nodes = 0, keys = 0, blocks;
1379 struct btree *new_nodes[GC_MERGE_NODES];
1380 struct keylist keylist;
1381 struct closure cl;
1382 struct bkey *k;
1383
1384 bch_keylist_init(&keylist);
1385
1386 if (btree_check_reserve(b, NULL))
1387 return 0;
1388
1389 memset(new_nodes, 0, sizeof(new_nodes));
1390 closure_init_stack(&cl);
1391
1392 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1393 keys += r[nodes++].keys;
1394
1395 blocks = btree_default_blocks(b->c) * 2 / 3;
1396
1397 if (nodes < 2 ||
1398 __set_blocks(b->keys.set[0].data, keys,
1399 block_bytes(b->c)) > blocks * (nodes - 1))
1400 return 0;
1401
1402 for (i = 0; i < nodes; i++) {
1403 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1404 if (IS_ERR_OR_NULL(new_nodes[i]))
1405 goto out_nocoalesce;
1406 }
1407
1408 /*
1409 * We have to check the reserve here, after we've allocated our new
1410 * nodes, to make sure the insert below will succeed - we also check
1411 * before as an optimization to potentially avoid a bunch of expensive
1412 * allocs/sorts
1413 */
1414 if (btree_check_reserve(b, NULL))
1415 goto out_nocoalesce;
1416
1417 for (i = 0; i < nodes; i++)
1418 mutex_lock(&new_nodes[i]->write_lock);
1419
1420 for (i = nodes - 1; i > 0; --i) {
1421 struct bset *n1 = btree_bset_first(new_nodes[i]);
1422 struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1423 struct bkey *k, *last = NULL;
1424
1425 keys = 0;
1426
1427 if (i > 1) {
1428 for (k = n2->start;
1429 k < bset_bkey_last(n2);
1430 k = bkey_next(k)) {
1431 if (__set_blocks(n1, n1->keys + keys +
1432 bkey_u64s(k),
1433 block_bytes(b->c)) > blocks)
1434 break;
1435
1436 last = k;
1437 keys += bkey_u64s(k);
1438 }
1439 } else {
1440 /*
1441 * Last node we're not getting rid of - we're getting
1442 * rid of the node at r[0]. Have to try and fit all of
1443 * the remaining keys into this node; we can't ensure
1444 * they will always fit due to rounding and variable
1445 * length keys (shouldn't be possible in practice,
1446 * though)
1447 */
1448 if (__set_blocks(n1, n1->keys + n2->keys,
1449 block_bytes(b->c)) >
1450 btree_blocks(new_nodes[i]))
Olivier Deprez0e641232021-09-23 10:07:05 +02001451 goto out_unlock_nocoalesce;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001452
1453 keys = n2->keys;
1454 /* Take the key of the node we're getting rid of */
1455 last = &r->b->key;
1456 }
1457
1458 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
1459 btree_blocks(new_nodes[i]));
1460
1461 if (last)
1462 bkey_copy_key(&new_nodes[i]->key, last);
1463
1464 memcpy(bset_bkey_last(n1),
1465 n2->start,
1466 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1467
1468 n1->keys += keys;
1469 r[i].keys = n1->keys;
1470
1471 memmove(n2->start,
1472 bset_bkey_idx(n2, keys),
1473 (void *) bset_bkey_last(n2) -
1474 (void *) bset_bkey_idx(n2, keys));
1475
1476 n2->keys -= keys;
1477
1478 if (__bch_keylist_realloc(&keylist,
1479 bkey_u64s(&new_nodes[i]->key)))
Olivier Deprez0e641232021-09-23 10:07:05 +02001480 goto out_unlock_nocoalesce;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001481
1482 bch_btree_node_write(new_nodes[i], &cl);
1483 bch_keylist_add(&keylist, &new_nodes[i]->key);
1484 }
1485
1486 for (i = 0; i < nodes; i++)
1487 mutex_unlock(&new_nodes[i]->write_lock);
1488
1489 closure_sync(&cl);
1490
1491 /* We emptied out this node */
1492 BUG_ON(btree_bset_first(new_nodes[0])->keys);
1493 btree_node_free(new_nodes[0]);
1494 rw_unlock(true, new_nodes[0]);
1495 new_nodes[0] = NULL;
1496
1497 for (i = 0; i < nodes; i++) {
1498 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1499 goto out_nocoalesce;
1500
1501 make_btree_freeing_key(r[i].b, keylist.top);
1502 bch_keylist_push(&keylist);
1503 }
1504
1505 bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1506 BUG_ON(!bch_keylist_empty(&keylist));
1507
1508 for (i = 0; i < nodes; i++) {
1509 btree_node_free(r[i].b);
1510 rw_unlock(true, r[i].b);
1511
1512 r[i].b = new_nodes[i];
1513 }
1514
1515 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1516 r[nodes - 1].b = ERR_PTR(-EINTR);
1517
1518 trace_bcache_btree_gc_coalesce(nodes);
1519 gc->nodes--;
1520
1521 bch_keylist_free(&keylist);
1522
1523 /* Invalidated our iterator */
1524 return -EINTR;
1525
Olivier Deprez0e641232021-09-23 10:07:05 +02001526out_unlock_nocoalesce:
1527 for (i = 0; i < nodes; i++)
1528 mutex_unlock(&new_nodes[i]->write_lock);
1529
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001530out_nocoalesce:
1531 closure_sync(&cl);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001532
1533 while ((k = bch_keylist_pop(&keylist)))
1534 if (!bkey_cmp(k, &ZERO_KEY))
1535 atomic_dec(&b->c->prio_blocked);
David Brazdil0f672f62019-12-10 10:32:29 +00001536 bch_keylist_free(&keylist);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001537
1538 for (i = 0; i < nodes; i++)
1539 if (!IS_ERR_OR_NULL(new_nodes[i])) {
1540 btree_node_free(new_nodes[i]);
1541 rw_unlock(true, new_nodes[i]);
1542 }
1543 return 0;
1544}
1545
1546static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1547 struct btree *replace)
1548{
1549 struct keylist keys;
1550 struct btree *n;
1551
1552 if (btree_check_reserve(b, NULL))
1553 return 0;
1554
1555 n = btree_node_alloc_replacement(replace, NULL);
1556
1557 /* recheck reserve after allocating replacement node */
1558 if (btree_check_reserve(b, NULL)) {
1559 btree_node_free(n);
1560 rw_unlock(true, n);
1561 return 0;
1562 }
1563
1564 bch_btree_node_write_sync(n);
1565
1566 bch_keylist_init(&keys);
1567 bch_keylist_add(&keys, &n->key);
1568
1569 make_btree_freeing_key(replace, keys.top);
1570 bch_keylist_push(&keys);
1571
1572 bch_btree_insert_node(b, op, &keys, NULL, NULL);
1573 BUG_ON(!bch_keylist_empty(&keys));
1574
1575 btree_node_free(replace);
1576 rw_unlock(true, n);
1577
1578 /* Invalidated our iterator */
1579 return -EINTR;
1580}
1581
1582static unsigned int btree_gc_count_keys(struct btree *b)
1583{
1584 struct bkey *k;
1585 struct btree_iter iter;
1586 unsigned int ret = 0;
1587
1588 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1589 ret += bkey_u64s(k);
1590
1591 return ret;
1592}
1593
1594static size_t btree_gc_min_nodes(struct cache_set *c)
1595{
1596 size_t min_nodes;
1597
1598 /*
1599 * Since incremental GC would stop 100ms when front
1600 * side I/O comes, so when there are many btree nodes,
1601 * if GC only processes constant (100) nodes each time,
1602 * GC would last a long time, and the front side I/Os
1603 * would run out of the buckets (since no new bucket
1604 * can be allocated during GC), and be blocked again.
1605 * So GC should not process constant nodes, but varied
1606 * nodes according to the number of btree nodes, which
1607 * realized by dividing GC into constant(100) times,
1608 * so when there are many btree nodes, GC can process
1609 * more nodes each time, otherwise, GC will process less
1610 * nodes each time (but no less than MIN_GC_NODES)
1611 */
1612 min_nodes = c->gc_stats.nodes / MAX_GC_TIMES;
1613 if (min_nodes < MIN_GC_NODES)
1614 min_nodes = MIN_GC_NODES;
1615
1616 return min_nodes;
1617}
1618
1619
1620static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1621 struct closure *writes, struct gc_stat *gc)
1622{
1623 int ret = 0;
1624 bool should_rewrite;
1625 struct bkey *k;
1626 struct btree_iter iter;
1627 struct gc_merge_info r[GC_MERGE_NODES];
1628 struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1629
1630 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1631
1632 for (i = r; i < r + ARRAY_SIZE(r); i++)
1633 i->b = ERR_PTR(-EINTR);
1634
1635 while (1) {
1636 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1637 if (k) {
1638 r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
1639 true, b);
1640 if (IS_ERR(r->b)) {
1641 ret = PTR_ERR(r->b);
1642 break;
1643 }
1644
1645 r->keys = btree_gc_count_keys(r->b);
1646
1647 ret = btree_gc_coalesce(b, op, gc, r);
1648 if (ret)
1649 break;
1650 }
1651
1652 if (!last->b)
1653 break;
1654
1655 if (!IS_ERR(last->b)) {
1656 should_rewrite = btree_gc_mark_node(last->b, gc);
1657 if (should_rewrite) {
1658 ret = btree_gc_rewrite_node(b, op, last->b);
1659 if (ret)
1660 break;
1661 }
1662
1663 if (last->b->level) {
1664 ret = btree_gc_recurse(last->b, op, writes, gc);
1665 if (ret)
1666 break;
1667 }
1668
1669 bkey_copy_key(&b->c->gc_done, &last->b->key);
1670
1671 /*
1672 * Must flush leaf nodes before gc ends, since replace
1673 * operations aren't journalled
1674 */
1675 mutex_lock(&last->b->write_lock);
1676 if (btree_node_dirty(last->b))
1677 bch_btree_node_write(last->b, writes);
1678 mutex_unlock(&last->b->write_lock);
1679 rw_unlock(true, last->b);
1680 }
1681
1682 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1683 r->b = NULL;
1684
1685 if (atomic_read(&b->c->search_inflight) &&
1686 gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) {
1687 gc->nodes_pre = gc->nodes;
1688 ret = -EAGAIN;
1689 break;
1690 }
1691
1692 if (need_resched()) {
1693 ret = -EAGAIN;
1694 break;
1695 }
1696 }
1697
1698 for (i = r; i < r + ARRAY_SIZE(r); i++)
1699 if (!IS_ERR_OR_NULL(i->b)) {
1700 mutex_lock(&i->b->write_lock);
1701 if (btree_node_dirty(i->b))
1702 bch_btree_node_write(i->b, writes);
1703 mutex_unlock(&i->b->write_lock);
1704 rw_unlock(true, i->b);
1705 }
1706
1707 return ret;
1708}
1709
1710static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1711 struct closure *writes, struct gc_stat *gc)
1712{
1713 struct btree *n = NULL;
1714 int ret = 0;
1715 bool should_rewrite;
1716
1717 should_rewrite = btree_gc_mark_node(b, gc);
1718 if (should_rewrite) {
1719 n = btree_node_alloc_replacement(b, NULL);
1720
1721 if (!IS_ERR_OR_NULL(n)) {
1722 bch_btree_node_write_sync(n);
1723
1724 bch_btree_set_root(n);
1725 btree_node_free(b);
1726 rw_unlock(true, n);
1727
1728 return -EINTR;
1729 }
1730 }
1731
1732 __bch_btree_mark_key(b->c, b->level + 1, &b->key);
1733
1734 if (b->level) {
1735 ret = btree_gc_recurse(b, op, writes, gc);
1736 if (ret)
1737 return ret;
1738 }
1739
1740 bkey_copy_key(&b->c->gc_done, &b->key);
1741
1742 return ret;
1743}
1744
1745static void btree_gc_start(struct cache_set *c)
1746{
1747 struct cache *ca;
1748 struct bucket *b;
1749 unsigned int i;
1750
1751 if (!c->gc_mark_valid)
1752 return;
1753
1754 mutex_lock(&c->bucket_lock);
1755
1756 c->gc_mark_valid = 0;
1757 c->gc_done = ZERO_KEY;
1758
1759 for_each_cache(ca, c, i)
1760 for_each_bucket(b, ca) {
1761 b->last_gc = b->gen;
1762 if (!atomic_read(&b->pin)) {
1763 SET_GC_MARK(b, 0);
1764 SET_GC_SECTORS_USED(b, 0);
1765 }
1766 }
1767
1768 mutex_unlock(&c->bucket_lock);
1769}
1770
1771static void bch_btree_gc_finish(struct cache_set *c)
1772{
1773 struct bucket *b;
1774 struct cache *ca;
1775 unsigned int i;
1776
1777 mutex_lock(&c->bucket_lock);
1778
1779 set_gc_sectors(c);
1780 c->gc_mark_valid = 1;
1781 c->need_gc = 0;
1782
1783 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1784 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1785 GC_MARK_METADATA);
1786
1787 /* don't reclaim buckets to which writeback keys point */
1788 rcu_read_lock();
1789 for (i = 0; i < c->devices_max_used; i++) {
1790 struct bcache_device *d = c->devices[i];
1791 struct cached_dev *dc;
1792 struct keybuf_key *w, *n;
1793 unsigned int j;
1794
1795 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1796 continue;
1797 dc = container_of(d, struct cached_dev, disk);
1798
1799 spin_lock(&dc->writeback_keys.lock);
1800 rbtree_postorder_for_each_entry_safe(w, n,
1801 &dc->writeback_keys.keys, node)
1802 for (j = 0; j < KEY_PTRS(&w->key); j++)
1803 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1804 GC_MARK_DIRTY);
1805 spin_unlock(&dc->writeback_keys.lock);
1806 }
1807 rcu_read_unlock();
1808
1809 c->avail_nbuckets = 0;
1810 for_each_cache(ca, c, i) {
1811 uint64_t *i;
1812
1813 ca->invalidate_needs_gc = 0;
1814
1815 for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1816 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1817
1818 for (i = ca->prio_buckets;
1819 i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1820 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1821
1822 for_each_bucket(b, ca) {
1823 c->need_gc = max(c->need_gc, bucket_gc_gen(b));
1824
1825 if (atomic_read(&b->pin))
1826 continue;
1827
1828 BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1829
1830 if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1831 c->avail_nbuckets++;
1832 }
1833 }
1834
1835 mutex_unlock(&c->bucket_lock);
1836}
1837
1838static void bch_btree_gc(struct cache_set *c)
1839{
1840 int ret;
1841 struct gc_stat stats;
1842 struct closure writes;
1843 struct btree_op op;
1844 uint64_t start_time = local_clock();
1845
1846 trace_bcache_gc_start(c);
1847
1848 memset(&stats, 0, sizeof(struct gc_stat));
1849 closure_init_stack(&writes);
1850 bch_btree_op_init(&op, SHRT_MAX);
1851
1852 btree_gc_start(c);
1853
1854 /* if CACHE_SET_IO_DISABLE set, gc thread should stop too */
1855 do {
1856 ret = btree_root(gc_root, c, &op, &writes, &stats);
1857 closure_sync(&writes);
1858 cond_resched();
1859
1860 if (ret == -EAGAIN)
1861 schedule_timeout_interruptible(msecs_to_jiffies
1862 (GC_SLEEP_MS));
1863 else if (ret)
1864 pr_warn("gc failed!");
1865 } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
1866
1867 bch_btree_gc_finish(c);
1868 wake_up_allocators(c);
1869
1870 bch_time_stats_update(&c->btree_gc_time, start_time);
1871
1872 stats.key_bytes *= sizeof(uint64_t);
1873 stats.data <<= 9;
1874 bch_update_bucket_in_use(c, &stats);
1875 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1876
1877 trace_bcache_gc_end(c);
1878
1879 bch_moving_gc(c);
1880}
1881
1882static bool gc_should_run(struct cache_set *c)
1883{
1884 struct cache *ca;
1885 unsigned int i;
1886
1887 for_each_cache(ca, c, i)
1888 if (ca->invalidate_needs_gc)
1889 return true;
1890
1891 if (atomic_read(&c->sectors_to_gc) < 0)
1892 return true;
1893
1894 return false;
1895}
1896
1897static int bch_gc_thread(void *arg)
1898{
1899 struct cache_set *c = arg;
1900
1901 while (1) {
1902 wait_event_interruptible(c->gc_wait,
1903 kthread_should_stop() ||
1904 test_bit(CACHE_SET_IO_DISABLE, &c->flags) ||
1905 gc_should_run(c));
1906
1907 if (kthread_should_stop() ||
1908 test_bit(CACHE_SET_IO_DISABLE, &c->flags))
1909 break;
1910
1911 set_gc_sectors(c);
1912 bch_btree_gc(c);
1913 }
1914
1915 wait_for_kthread_stop();
1916 return 0;
1917}
1918
1919int bch_gc_thread_start(struct cache_set *c)
1920{
1921 c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
1922 return PTR_ERR_OR_ZERO(c->gc_thread);
1923}
1924
1925/* Initial partial gc */
1926
1927static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1928{
1929 int ret = 0;
1930 struct bkey *k, *p = NULL;
1931 struct btree_iter iter;
1932
1933 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1934 bch_initial_mark_key(b->c, b->level, k);
1935
1936 bch_initial_mark_key(b->c, b->level + 1, &b->key);
1937
1938 if (b->level) {
1939 bch_btree_iter_init(&b->keys, &iter, NULL);
1940
1941 do {
1942 k = bch_btree_iter_next_filter(&iter, &b->keys,
1943 bch_ptr_bad);
1944 if (k) {
1945 btree_node_prefetch(b, k);
1946 /*
1947 * initiallize c->gc_stats.nodes
1948 * for incremental GC
1949 */
1950 b->c->gc_stats.nodes++;
1951 }
1952
1953 if (p)
1954 ret = btree(check_recurse, p, b, op);
1955
1956 p = k;
1957 } while (p && !ret);
1958 }
1959
1960 return ret;
1961}
1962
1963int bch_btree_check(struct cache_set *c)
1964{
1965 struct btree_op op;
1966
1967 bch_btree_op_init(&op, SHRT_MAX);
1968
1969 return btree_root(check_recurse, c, &op);
1970}
1971
1972void bch_initial_gc_finish(struct cache_set *c)
1973{
1974 struct cache *ca;
1975 struct bucket *b;
1976 unsigned int i;
1977
1978 bch_btree_gc_finish(c);
1979
1980 mutex_lock(&c->bucket_lock);
1981
1982 /*
1983 * We need to put some unused buckets directly on the prio freelist in
1984 * order to get the allocator thread started - it needs freed buckets in
1985 * order to rewrite the prios and gens, and it needs to rewrite prios
1986 * and gens in order to free buckets.
1987 *
1988 * This is only safe for buckets that have no live data in them, which
1989 * there should always be some of.
1990 */
1991 for_each_cache(ca, c, i) {
1992 for_each_bucket(b, ca) {
1993 if (fifo_full(&ca->free[RESERVE_PRIO]) &&
1994 fifo_full(&ca->free[RESERVE_BTREE]))
1995 break;
1996
1997 if (bch_can_invalidate_bucket(ca, b) &&
1998 !GC_MARK(b)) {
1999 __bch_invalidate_one_bucket(ca, b);
2000 if (!fifo_push(&ca->free[RESERVE_PRIO],
2001 b - ca->buckets))
2002 fifo_push(&ca->free[RESERVE_BTREE],
2003 b - ca->buckets);
2004 }
2005 }
2006 }
2007
2008 mutex_unlock(&c->bucket_lock);
2009}
2010
2011/* Btree insertion */
2012
2013static bool btree_insert_key(struct btree *b, struct bkey *k,
2014 struct bkey *replace_key)
2015{
2016 unsigned int status;
2017
2018 BUG_ON(bkey_cmp(k, &b->key) > 0);
2019
2020 status = bch_btree_insert_key(&b->keys, k, replace_key);
2021 if (status != BTREE_INSERT_STATUS_NO_INSERT) {
2022 bch_check_keys(&b->keys, "%u for %s", status,
2023 replace_key ? "replace" : "insert");
2024
2025 trace_bcache_btree_insert_key(b, k, replace_key != NULL,
2026 status);
2027 return true;
2028 } else
2029 return false;
2030}
2031
2032static size_t insert_u64s_remaining(struct btree *b)
2033{
2034 long ret = bch_btree_keys_u64s_remaining(&b->keys);
2035
2036 /*
2037 * Might land in the middle of an existing extent and have to split it
2038 */
2039 if (b->keys.ops->is_extents)
2040 ret -= KEY_MAX_U64S;
2041
2042 return max(ret, 0L);
2043}
2044
2045static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
2046 struct keylist *insert_keys,
2047 struct bkey *replace_key)
2048{
2049 bool ret = false;
2050 int oldsize = bch_count_data(&b->keys);
2051
2052 while (!bch_keylist_empty(insert_keys)) {
2053 struct bkey *k = insert_keys->keys;
2054
2055 if (bkey_u64s(k) > insert_u64s_remaining(b))
2056 break;
2057
2058 if (bkey_cmp(k, &b->key) <= 0) {
2059 if (!b->level)
2060 bkey_put(b->c, k);
2061
2062 ret |= btree_insert_key(b, k, replace_key);
2063 bch_keylist_pop_front(insert_keys);
2064 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
2065 BKEY_PADDED(key) temp;
2066 bkey_copy(&temp.key, insert_keys->keys);
2067
2068 bch_cut_back(&b->key, &temp.key);
2069 bch_cut_front(&b->key, insert_keys->keys);
2070
2071 ret |= btree_insert_key(b, &temp.key, replace_key);
2072 break;
2073 } else {
2074 break;
2075 }
2076 }
2077
2078 if (!ret)
2079 op->insert_collision = true;
2080
2081 BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2082
2083 BUG_ON(bch_count_data(&b->keys) < oldsize);
2084 return ret;
2085}
2086
2087static int btree_split(struct btree *b, struct btree_op *op,
2088 struct keylist *insert_keys,
2089 struct bkey *replace_key)
2090{
2091 bool split;
2092 struct btree *n1, *n2 = NULL, *n3 = NULL;
2093 uint64_t start_time = local_clock();
2094 struct closure cl;
2095 struct keylist parent_keys;
2096
2097 closure_init_stack(&cl);
2098 bch_keylist_init(&parent_keys);
2099
2100 if (btree_check_reserve(b, op)) {
2101 if (!b->level)
2102 return -EINTR;
2103 else
2104 WARN(1, "insufficient reserve for split\n");
2105 }
2106
2107 n1 = btree_node_alloc_replacement(b, op);
2108 if (IS_ERR(n1))
2109 goto err;
2110
2111 split = set_blocks(btree_bset_first(n1),
2112 block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
2113
2114 if (split) {
2115 unsigned int keys = 0;
2116
2117 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
2118
2119 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
2120 if (IS_ERR(n2))
2121 goto err_free1;
2122
2123 if (!b->parent) {
2124 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
2125 if (IS_ERR(n3))
2126 goto err_free2;
2127 }
2128
2129 mutex_lock(&n1->write_lock);
2130 mutex_lock(&n2->write_lock);
2131
2132 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2133
2134 /*
2135 * Has to be a linear search because we don't have an auxiliary
2136 * search tree yet
2137 */
2138
2139 while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2140 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2141 keys));
2142
2143 bkey_copy_key(&n1->key,
2144 bset_bkey_idx(btree_bset_first(n1), keys));
2145 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2146
2147 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2148 btree_bset_first(n1)->keys = keys;
2149
2150 memcpy(btree_bset_first(n2)->start,
2151 bset_bkey_last(btree_bset_first(n1)),
2152 btree_bset_first(n2)->keys * sizeof(uint64_t));
2153
2154 bkey_copy_key(&n2->key, &b->key);
2155
2156 bch_keylist_add(&parent_keys, &n2->key);
2157 bch_btree_node_write(n2, &cl);
2158 mutex_unlock(&n2->write_lock);
2159 rw_unlock(true, n2);
2160 } else {
2161 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2162
2163 mutex_lock(&n1->write_lock);
2164 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2165 }
2166
2167 bch_keylist_add(&parent_keys, &n1->key);
2168 bch_btree_node_write(n1, &cl);
2169 mutex_unlock(&n1->write_lock);
2170
2171 if (n3) {
2172 /* Depth increases, make a new root */
2173 mutex_lock(&n3->write_lock);
2174 bkey_copy_key(&n3->key, &MAX_KEY);
2175 bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2176 bch_btree_node_write(n3, &cl);
2177 mutex_unlock(&n3->write_lock);
2178
2179 closure_sync(&cl);
2180 bch_btree_set_root(n3);
2181 rw_unlock(true, n3);
2182 } else if (!b->parent) {
2183 /* Root filled up but didn't need to be split */
2184 closure_sync(&cl);
2185 bch_btree_set_root(n1);
2186 } else {
2187 /* Split a non root node */
2188 closure_sync(&cl);
2189 make_btree_freeing_key(b, parent_keys.top);
2190 bch_keylist_push(&parent_keys);
2191
2192 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2193 BUG_ON(!bch_keylist_empty(&parent_keys));
2194 }
2195
2196 btree_node_free(b);
2197 rw_unlock(true, n1);
2198
2199 bch_time_stats_update(&b->c->btree_split_time, start_time);
2200
2201 return 0;
2202err_free2:
2203 bkey_put(b->c, &n2->key);
2204 btree_node_free(n2);
2205 rw_unlock(true, n2);
2206err_free1:
2207 bkey_put(b->c, &n1->key);
2208 btree_node_free(n1);
2209 rw_unlock(true, n1);
2210err:
2211 WARN(1, "bcache: btree split failed (level %u)", b->level);
2212
2213 if (n3 == ERR_PTR(-EAGAIN) ||
2214 n2 == ERR_PTR(-EAGAIN) ||
2215 n1 == ERR_PTR(-EAGAIN))
2216 return -EAGAIN;
2217
2218 return -ENOMEM;
2219}
2220
2221static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2222 struct keylist *insert_keys,
2223 atomic_t *journal_ref,
2224 struct bkey *replace_key)
2225{
2226 struct closure cl;
2227
2228 BUG_ON(b->level && replace_key);
2229
2230 closure_init_stack(&cl);
2231
2232 mutex_lock(&b->write_lock);
2233
2234 if (write_block(b) != btree_bset_last(b) &&
2235 b->keys.last_set_unwritten)
2236 bch_btree_init_next(b); /* just wrote a set */
2237
2238 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2239 mutex_unlock(&b->write_lock);
2240 goto split;
2241 }
2242
2243 BUG_ON(write_block(b) != btree_bset_last(b));
2244
2245 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2246 if (!b->level)
2247 bch_btree_leaf_dirty(b, journal_ref);
2248 else
2249 bch_btree_node_write(b, &cl);
2250 }
2251
2252 mutex_unlock(&b->write_lock);
2253
2254 /* wait for btree node write if necessary, after unlock */
2255 closure_sync(&cl);
2256
2257 return 0;
2258split:
2259 if (current->bio_list) {
2260 op->lock = b->c->root->level + 1;
2261 return -EAGAIN;
2262 } else if (op->lock <= b->c->root->level) {
2263 op->lock = b->c->root->level + 1;
2264 return -EINTR;
2265 } else {
2266 /* Invalidated all iterators */
2267 int ret = btree_split(b, op, insert_keys, replace_key);
2268
2269 if (bch_keylist_empty(insert_keys))
2270 return 0;
2271 else if (!ret)
2272 return -EINTR;
2273 return ret;
2274 }
2275}
2276
2277int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2278 struct bkey *check_key)
2279{
2280 int ret = -EINTR;
2281 uint64_t btree_ptr = b->key.ptr[0];
2282 unsigned long seq = b->seq;
2283 struct keylist insert;
2284 bool upgrade = op->lock == -1;
2285
2286 bch_keylist_init(&insert);
2287
2288 if (upgrade) {
2289 rw_unlock(false, b);
2290 rw_lock(true, b, b->level);
2291
2292 if (b->key.ptr[0] != btree_ptr ||
2293 b->seq != seq + 1) {
2294 op->lock = b->level;
2295 goto out;
2296 }
2297 }
2298
2299 SET_KEY_PTRS(check_key, 1);
2300 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2301
2302 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2303
2304 bch_keylist_add(&insert, check_key);
2305
2306 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2307
2308 BUG_ON(!ret && !bch_keylist_empty(&insert));
2309out:
2310 if (upgrade)
2311 downgrade_write(&b->lock);
2312 return ret;
2313}
2314
2315struct btree_insert_op {
2316 struct btree_op op;
2317 struct keylist *keys;
2318 atomic_t *journal_ref;
2319 struct bkey *replace_key;
2320};
2321
2322static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2323{
2324 struct btree_insert_op *op = container_of(b_op,
2325 struct btree_insert_op, op);
2326
2327 int ret = bch_btree_insert_node(b, &op->op, op->keys,
2328 op->journal_ref, op->replace_key);
2329 if (ret && !bch_keylist_empty(op->keys))
2330 return ret;
2331 else
2332 return MAP_DONE;
2333}
2334
2335int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2336 atomic_t *journal_ref, struct bkey *replace_key)
2337{
2338 struct btree_insert_op op;
2339 int ret = 0;
2340
2341 BUG_ON(current->bio_list);
2342 BUG_ON(bch_keylist_empty(keys));
2343
2344 bch_btree_op_init(&op.op, 0);
2345 op.keys = keys;
2346 op.journal_ref = journal_ref;
2347 op.replace_key = replace_key;
2348
2349 while (!ret && !bch_keylist_empty(keys)) {
2350 op.op.lock = 0;
2351 ret = bch_btree_map_leaf_nodes(&op.op, c,
2352 &START_KEY(keys->keys),
2353 btree_insert_fn);
2354 }
2355
2356 if (ret) {
2357 struct bkey *k;
2358
2359 pr_err("error %i", ret);
2360
2361 while ((k = bch_keylist_pop(keys)))
2362 bkey_put(c, k);
2363 } else if (op.op.insert_collision)
2364 ret = -ESRCH;
2365
2366 return ret;
2367}
2368
2369void bch_btree_set_root(struct btree *b)
2370{
2371 unsigned int i;
2372 struct closure cl;
2373
2374 closure_init_stack(&cl);
2375
2376 trace_bcache_btree_set_root(b);
2377
2378 BUG_ON(!b->written);
2379
2380 for (i = 0; i < KEY_PTRS(&b->key); i++)
2381 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2382
2383 mutex_lock(&b->c->bucket_lock);
2384 list_del_init(&b->list);
2385 mutex_unlock(&b->c->bucket_lock);
2386
2387 b->c->root = b;
2388
2389 bch_journal_meta(b->c, &cl);
2390 closure_sync(&cl);
2391}
2392
2393/* Map across nodes or keys */
2394
2395static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2396 struct bkey *from,
2397 btree_map_nodes_fn *fn, int flags)
2398{
2399 int ret = MAP_CONTINUE;
2400
2401 if (b->level) {
2402 struct bkey *k;
2403 struct btree_iter iter;
2404
2405 bch_btree_iter_init(&b->keys, &iter, from);
2406
2407 while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2408 bch_ptr_bad))) {
2409 ret = btree(map_nodes_recurse, k, b,
2410 op, from, fn, flags);
2411 from = NULL;
2412
2413 if (ret != MAP_CONTINUE)
2414 return ret;
2415 }
2416 }
2417
2418 if (!b->level || flags == MAP_ALL_NODES)
2419 ret = fn(op, b);
2420
2421 return ret;
2422}
2423
2424int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2425 struct bkey *from, btree_map_nodes_fn *fn, int flags)
2426{
2427 return btree_root(map_nodes_recurse, c, op, from, fn, flags);
2428}
2429
2430static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2431 struct bkey *from, btree_map_keys_fn *fn,
2432 int flags)
2433{
2434 int ret = MAP_CONTINUE;
2435 struct bkey *k;
2436 struct btree_iter iter;
2437
2438 bch_btree_iter_init(&b->keys, &iter, from);
2439
2440 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2441 ret = !b->level
2442 ? fn(op, b, k)
2443 : btree(map_keys_recurse, k, b, op, from, fn, flags);
2444 from = NULL;
2445
2446 if (ret != MAP_CONTINUE)
2447 return ret;
2448 }
2449
2450 if (!b->level && (flags & MAP_END_KEY))
2451 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2452 KEY_OFFSET(&b->key), 0));
2453
2454 return ret;
2455}
2456
2457int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2458 struct bkey *from, btree_map_keys_fn *fn, int flags)
2459{
2460 return btree_root(map_keys_recurse, c, op, from, fn, flags);
2461}
2462
2463/* Keybuf code */
2464
2465static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2466{
2467 /* Overlapping keys compare equal */
2468 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2469 return -1;
2470 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2471 return 1;
2472 return 0;
2473}
2474
2475static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2476 struct keybuf_key *r)
2477{
2478 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2479}
2480
2481struct refill {
2482 struct btree_op op;
2483 unsigned int nr_found;
2484 struct keybuf *buf;
2485 struct bkey *end;
2486 keybuf_pred_fn *pred;
2487};
2488
2489static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2490 struct bkey *k)
2491{
2492 struct refill *refill = container_of(op, struct refill, op);
2493 struct keybuf *buf = refill->buf;
2494 int ret = MAP_CONTINUE;
2495
2496 if (bkey_cmp(k, refill->end) > 0) {
2497 ret = MAP_DONE;
2498 goto out;
2499 }
2500
2501 if (!KEY_SIZE(k)) /* end key */
2502 goto out;
2503
2504 if (refill->pred(buf, k)) {
2505 struct keybuf_key *w;
2506
2507 spin_lock(&buf->lock);
2508
2509 w = array_alloc(&buf->freelist);
2510 if (!w) {
2511 spin_unlock(&buf->lock);
2512 return MAP_DONE;
2513 }
2514
2515 w->private = NULL;
2516 bkey_copy(&w->key, k);
2517
2518 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2519 array_free(&buf->freelist, w);
2520 else
2521 refill->nr_found++;
2522
2523 if (array_freelist_empty(&buf->freelist))
2524 ret = MAP_DONE;
2525
2526 spin_unlock(&buf->lock);
2527 }
2528out:
2529 buf->last_scanned = *k;
2530 return ret;
2531}
2532
2533void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2534 struct bkey *end, keybuf_pred_fn *pred)
2535{
2536 struct bkey start = buf->last_scanned;
2537 struct refill refill;
2538
2539 cond_resched();
2540
2541 bch_btree_op_init(&refill.op, -1);
2542 refill.nr_found = 0;
2543 refill.buf = buf;
2544 refill.end = end;
2545 refill.pred = pred;
2546
2547 bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2548 refill_keybuf_fn, MAP_END_KEY);
2549
2550 trace_bcache_keyscan(refill.nr_found,
2551 KEY_INODE(&start), KEY_OFFSET(&start),
2552 KEY_INODE(&buf->last_scanned),
2553 KEY_OFFSET(&buf->last_scanned));
2554
2555 spin_lock(&buf->lock);
2556
2557 if (!RB_EMPTY_ROOT(&buf->keys)) {
2558 struct keybuf_key *w;
2559
2560 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2561 buf->start = START_KEY(&w->key);
2562
2563 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2564 buf->end = w->key;
2565 } else {
2566 buf->start = MAX_KEY;
2567 buf->end = MAX_KEY;
2568 }
2569
2570 spin_unlock(&buf->lock);
2571}
2572
2573static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2574{
2575 rb_erase(&w->node, &buf->keys);
2576 array_free(&buf->freelist, w);
2577}
2578
2579void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2580{
2581 spin_lock(&buf->lock);
2582 __bch_keybuf_del(buf, w);
2583 spin_unlock(&buf->lock);
2584}
2585
2586bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2587 struct bkey *end)
2588{
2589 bool ret = false;
2590 struct keybuf_key *p, *w, s;
2591
2592 s.key = *start;
2593
2594 if (bkey_cmp(end, &buf->start) <= 0 ||
2595 bkey_cmp(start, &buf->end) >= 0)
2596 return false;
2597
2598 spin_lock(&buf->lock);
2599 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2600
2601 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2602 p = w;
2603 w = RB_NEXT(w, node);
2604
2605 if (p->private)
2606 ret = true;
2607 else
2608 __bch_keybuf_del(buf, p);
2609 }
2610
2611 spin_unlock(&buf->lock);
2612 return ret;
2613}
2614
2615struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2616{
2617 struct keybuf_key *w;
2618
2619 spin_lock(&buf->lock);
2620
2621 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2622
2623 while (w && w->private)
2624 w = RB_NEXT(w, node);
2625
2626 if (w)
2627 w->private = ERR_PTR(-EINTR);
2628
2629 spin_unlock(&buf->lock);
2630 return w;
2631}
2632
2633struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2634 struct keybuf *buf,
2635 struct bkey *end,
2636 keybuf_pred_fn *pred)
2637{
2638 struct keybuf_key *ret;
2639
2640 while (1) {
2641 ret = bch_keybuf_next(buf);
2642 if (ret)
2643 break;
2644
2645 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2646 pr_debug("scan finished");
2647 break;
2648 }
2649
2650 bch_refill_keybuf(c, buf, end, pred);
2651 }
2652
2653 return ret;
2654}
2655
2656void bch_keybuf_init(struct keybuf *buf)
2657{
2658 buf->last_scanned = MAX_KEY;
2659 buf->keys = RB_ROOT;
2660
2661 spin_lock_init(&buf->lock);
2662 array_allocator_init(&buf->freelist);
2663}
Olivier Deprez0e641232021-09-23 10:07:05 +02002664
2665void bch_btree_exit(void)
2666{
2667 if (btree_io_wq)
2668 destroy_workqueue(btree_io_wq);
2669}
2670
2671int __init bch_btree_init(void)
2672{
2673 btree_io_wq = alloc_workqueue("bch_btree_io", WQ_MEM_RECLAIM, 0);
2674 if (!btree_io_wq)
2675 return -ENOMEM;
2676
2677 return 0;
2678}