blob: ec10fda3f24f468baa8e0954e9bd98f4b3b5607c [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 Red Hat. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include <linux/device-mapper.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/vmalloc.h>
12#include <linux/kthread.h>
13#include <linux/dm-io.h>
14#include <linux/dm-kcopyd.h>
15#include <linux/dax.h>
16#include <linux/pfn_t.h>
17#include <linux/libnvdimm.h>
18
19#define DM_MSG_PREFIX "writecache"
20
21#define HIGH_WATERMARK 50
22#define LOW_WATERMARK 45
23#define MAX_WRITEBACK_JOBS 0
24#define ENDIO_LATENCY 16
25#define WRITEBACK_LATENCY 64
26#define AUTOCOMMIT_BLOCKS_SSD 65536
27#define AUTOCOMMIT_BLOCKS_PMEM 64
28#define AUTOCOMMIT_MSEC 1000
29
30#define BITMAP_GRANULARITY 65536
31#if BITMAP_GRANULARITY < PAGE_SIZE
32#undef BITMAP_GRANULARITY
33#define BITMAP_GRANULARITY PAGE_SIZE
34#endif
35
36#if IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && IS_ENABLED(CONFIG_DAX_DRIVER)
37#define DM_WRITECACHE_HAS_PMEM
38#endif
39
40#ifdef DM_WRITECACHE_HAS_PMEM
41#define pmem_assign(dest, src) \
42do { \
43 typeof(dest) uniq = (src); \
44 memcpy_flushcache(&(dest), &uniq, sizeof(dest)); \
45} while (0)
46#else
47#define pmem_assign(dest, src) ((dest) = (src))
48#endif
49
50#if defined(__HAVE_ARCH_MEMCPY_MCSAFE) && defined(DM_WRITECACHE_HAS_PMEM)
51#define DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
52#endif
53
54#define MEMORY_SUPERBLOCK_MAGIC 0x23489321
55#define MEMORY_SUPERBLOCK_VERSION 1
56
57struct wc_memory_entry {
58 __le64 original_sector;
59 __le64 seq_count;
60};
61
62struct wc_memory_superblock {
63 union {
64 struct {
65 __le32 magic;
66 __le32 version;
67 __le32 block_size;
68 __le32 pad;
69 __le64 n_blocks;
70 __le64 seq_count;
71 };
72 __le64 padding[8];
73 };
74 struct wc_memory_entry entries[0];
75};
76
77struct wc_entry {
78 struct rb_node rb_node;
79 struct list_head lru;
80 unsigned short wc_list_contiguous;
81 bool write_in_progress
82#if BITS_PER_LONG == 64
83 :1
84#endif
85 ;
86 unsigned long index
87#if BITS_PER_LONG == 64
88 :47
89#endif
90 ;
91#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
92 uint64_t original_sector;
93 uint64_t seq_count;
94#endif
95};
96
97#ifdef DM_WRITECACHE_HAS_PMEM
98#define WC_MODE_PMEM(wc) ((wc)->pmem_mode)
99#define WC_MODE_FUA(wc) ((wc)->writeback_fua)
100#else
101#define WC_MODE_PMEM(wc) false
102#define WC_MODE_FUA(wc) false
103#endif
104#define WC_MODE_SORT_FREELIST(wc) (!WC_MODE_PMEM(wc))
105
106struct dm_writecache {
107 struct mutex lock;
108 struct list_head lru;
109 union {
110 struct list_head freelist;
111 struct {
112 struct rb_root freetree;
113 struct wc_entry *current_free;
114 };
115 };
116 struct rb_root tree;
117
118 size_t freelist_size;
119 size_t writeback_size;
120 size_t freelist_high_watermark;
121 size_t freelist_low_watermark;
122
123 unsigned uncommitted_blocks;
124 unsigned autocommit_blocks;
125 unsigned max_writeback_jobs;
126
127 int error;
128
129 unsigned long autocommit_jiffies;
130 struct timer_list autocommit_timer;
131 struct wait_queue_head freelist_wait;
132
133 atomic_t bio_in_progress[2];
134 struct wait_queue_head bio_in_progress_wait[2];
135
136 struct dm_target *ti;
137 struct dm_dev *dev;
138 struct dm_dev *ssd_dev;
139 sector_t start_sector;
140 void *memory_map;
141 uint64_t memory_map_size;
142 size_t metadata_sectors;
143 size_t n_blocks;
144 uint64_t seq_count;
Olivier Deprez0e641232021-09-23 10:07:05 +0200145 sector_t data_device_sectors;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000146 void *block_start;
147 struct wc_entry *entries;
148 unsigned block_size;
149 unsigned char block_size_bits;
150
151 bool pmem_mode:1;
152 bool writeback_fua:1;
153
154 bool overwrote_committed:1;
155 bool memory_vmapped:1;
156
Olivier Deprez0e641232021-09-23 10:07:05 +0200157 bool start_sector_set:1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000158 bool high_wm_percent_set:1;
159 bool low_wm_percent_set:1;
160 bool max_writeback_jobs_set:1;
161 bool autocommit_blocks_set:1;
162 bool autocommit_time_set:1;
163 bool writeback_fua_set:1;
164 bool flush_on_suspend:1;
165
Olivier Deprez0e641232021-09-23 10:07:05 +0200166 unsigned high_wm_percent_value;
167 unsigned low_wm_percent_value;
168 unsigned autocommit_time_value;
169
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000170 unsigned writeback_all;
171 struct workqueue_struct *writeback_wq;
172 struct work_struct writeback_work;
173 struct work_struct flush_work;
174
175 struct dm_io_client *dm_io;
176
177 raw_spinlock_t endio_list_lock;
178 struct list_head endio_list;
179 struct task_struct *endio_thread;
180
181 struct task_struct *flush_thread;
182 struct bio_list flush_list;
183
184 struct dm_kcopyd_client *dm_kcopyd;
185 unsigned long *dirty_bitmap;
186 unsigned dirty_bitmap_size;
187
188 struct bio_set bio_set;
189 mempool_t copy_pool;
190};
191
192#define WB_LIST_INLINE 16
193
194struct writeback_struct {
195 struct list_head endio_entry;
196 struct dm_writecache *wc;
197 struct wc_entry **wc_list;
198 unsigned wc_list_n;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000199 struct wc_entry *wc_list_inline[WB_LIST_INLINE];
200 struct bio bio;
201};
202
203struct copy_struct {
204 struct list_head endio_entry;
205 struct dm_writecache *wc;
206 struct wc_entry *e;
207 unsigned n_entries;
208 int error;
209};
210
211DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(dm_writecache_throttle,
212 "A percentage of time allocated for data copying");
213
214static void wc_lock(struct dm_writecache *wc)
215{
216 mutex_lock(&wc->lock);
217}
218
219static void wc_unlock(struct dm_writecache *wc)
220{
221 mutex_unlock(&wc->lock);
222}
223
224#ifdef DM_WRITECACHE_HAS_PMEM
225static int persistent_memory_claim(struct dm_writecache *wc)
226{
227 int r;
228 loff_t s;
229 long p, da;
230 pfn_t pfn;
231 int id;
232 struct page **pages;
Olivier Deprez0e641232021-09-23 10:07:05 +0200233 sector_t offset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000234
235 wc->memory_vmapped = false;
236
237 if (!wc->ssd_dev->dax_dev) {
238 r = -EOPNOTSUPP;
239 goto err1;
240 }
241 s = wc->memory_map_size;
242 p = s >> PAGE_SHIFT;
243 if (!p) {
244 r = -EINVAL;
245 goto err1;
246 }
247 if (p != s >> PAGE_SHIFT) {
248 r = -EOVERFLOW;
249 goto err1;
250 }
251
Olivier Deprez0e641232021-09-23 10:07:05 +0200252 offset = get_start_sect(wc->ssd_dev->bdev);
253 if (offset & (PAGE_SIZE / 512 - 1)) {
254 r = -EINVAL;
255 goto err1;
256 }
257 offset >>= PAGE_SHIFT - 9;
258
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000259 id = dax_read_lock();
260
Olivier Deprez0e641232021-09-23 10:07:05 +0200261 da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000262 if (da < 0) {
263 wc->memory_map = NULL;
264 r = da;
265 goto err2;
266 }
267 if (!pfn_t_has_page(pfn)) {
268 wc->memory_map = NULL;
269 r = -EOPNOTSUPP;
270 goto err2;
271 }
272 if (da != p) {
273 long i;
274 wc->memory_map = NULL;
275 pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
276 if (!pages) {
277 r = -ENOMEM;
278 goto err2;
279 }
280 i = 0;
281 do {
282 long daa;
Olivier Deprez0e641232021-09-23 10:07:05 +0200283 daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000284 NULL, &pfn);
285 if (daa <= 0) {
286 r = daa ? daa : -EINVAL;
287 goto err3;
288 }
289 if (!pfn_t_has_page(pfn)) {
290 r = -EOPNOTSUPP;
291 goto err3;
292 }
293 while (daa-- && i < p) {
294 pages[i++] = pfn_t_to_page(pfn);
295 pfn.val++;
Olivier Deprez0e641232021-09-23 10:07:05 +0200296 if (!(i & 15))
297 cond_resched();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000298 }
299 } while (i < p);
300 wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL);
301 if (!wc->memory_map) {
302 r = -ENOMEM;
303 goto err3;
304 }
305 kvfree(pages);
306 wc->memory_vmapped = true;
307 }
308
309 dax_read_unlock(id);
310
311 wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT;
312 wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT;
313
314 return 0;
315err3:
316 kvfree(pages);
317err2:
318 dax_read_unlock(id);
319err1:
320 return r;
321}
322#else
323static int persistent_memory_claim(struct dm_writecache *wc)
324{
Olivier Deprez0e641232021-09-23 10:07:05 +0200325 return -EOPNOTSUPP;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000326}
327#endif
328
329static void persistent_memory_release(struct dm_writecache *wc)
330{
331 if (wc->memory_vmapped)
332 vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT));
333}
334
335static struct page *persistent_memory_page(void *addr)
336{
337 if (is_vmalloc_addr(addr))
338 return vmalloc_to_page(addr);
339 else
340 return virt_to_page(addr);
341}
342
343static unsigned persistent_memory_page_offset(void *addr)
344{
345 return (unsigned long)addr & (PAGE_SIZE - 1);
346}
347
348static void persistent_memory_flush_cache(void *ptr, size_t size)
349{
350 if (is_vmalloc_addr(ptr))
351 flush_kernel_vmap_range(ptr, size);
352}
353
354static void persistent_memory_invalidate_cache(void *ptr, size_t size)
355{
356 if (is_vmalloc_addr(ptr))
357 invalidate_kernel_vmap_range(ptr, size);
358}
359
360static struct wc_memory_superblock *sb(struct dm_writecache *wc)
361{
362 return wc->memory_map;
363}
364
365static struct wc_memory_entry *memory_entry(struct dm_writecache *wc, struct wc_entry *e)
366{
David Brazdil0f672f62019-12-10 10:32:29 +0000367 return &sb(wc)->entries[e->index];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000368}
369
370static void *memory_data(struct dm_writecache *wc, struct wc_entry *e)
371{
372 return (char *)wc->block_start + (e->index << wc->block_size_bits);
373}
374
375static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e)
376{
377 return wc->start_sector + wc->metadata_sectors +
378 ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT));
379}
380
381static uint64_t read_original_sector(struct dm_writecache *wc, struct wc_entry *e)
382{
383#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
384 return e->original_sector;
385#else
386 return le64_to_cpu(memory_entry(wc, e)->original_sector);
387#endif
388}
389
390static uint64_t read_seq_count(struct dm_writecache *wc, struct wc_entry *e)
391{
392#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
393 return e->seq_count;
394#else
395 return le64_to_cpu(memory_entry(wc, e)->seq_count);
396#endif
397}
398
399static void clear_seq_count(struct dm_writecache *wc, struct wc_entry *e)
400{
401#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
402 e->seq_count = -1;
403#endif
404 pmem_assign(memory_entry(wc, e)->seq_count, cpu_to_le64(-1));
405}
406
407static void write_original_sector_seq_count(struct dm_writecache *wc, struct wc_entry *e,
408 uint64_t original_sector, uint64_t seq_count)
409{
410 struct wc_memory_entry me;
411#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
412 e->original_sector = original_sector;
413 e->seq_count = seq_count;
414#endif
415 me.original_sector = cpu_to_le64(original_sector);
416 me.seq_count = cpu_to_le64(seq_count);
417 pmem_assign(*memory_entry(wc, e), me);
418}
419
420#define writecache_error(wc, err, msg, arg...) \
421do { \
422 if (!cmpxchg(&(wc)->error, 0, err)) \
423 DMERR(msg, ##arg); \
424 wake_up(&(wc)->freelist_wait); \
425} while (0)
426
427#define writecache_has_error(wc) (unlikely(READ_ONCE((wc)->error)))
428
429static void writecache_flush_all_metadata(struct dm_writecache *wc)
430{
431 if (!WC_MODE_PMEM(wc))
432 memset(wc->dirty_bitmap, -1, wc->dirty_bitmap_size);
433}
434
435static void writecache_flush_region(struct dm_writecache *wc, void *ptr, size_t size)
436{
437 if (!WC_MODE_PMEM(wc))
438 __set_bit(((char *)ptr - (char *)wc->memory_map) / BITMAP_GRANULARITY,
439 wc->dirty_bitmap);
440}
441
442static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev);
443
444struct io_notify {
445 struct dm_writecache *wc;
446 struct completion c;
447 atomic_t count;
448};
449
450static void writecache_notify_io(unsigned long error, void *context)
451{
452 struct io_notify *endio = context;
453
454 if (unlikely(error != 0))
455 writecache_error(endio->wc, -EIO, "error writing metadata");
456 BUG_ON(atomic_read(&endio->count) <= 0);
457 if (atomic_dec_and_test(&endio->count))
458 complete(&endio->c);
459}
460
Olivier Deprez0e641232021-09-23 10:07:05 +0200461static void writecache_wait_for_ios(struct dm_writecache *wc, int direction)
462{
463 wait_event(wc->bio_in_progress_wait[direction],
464 !atomic_read(&wc->bio_in_progress[direction]));
465}
466
467static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000468{
469 struct dm_io_region region;
470 struct dm_io_request req;
471 struct io_notify endio = {
472 wc,
473 COMPLETION_INITIALIZER_ONSTACK(endio.c),
474 ATOMIC_INIT(1),
475 };
476 unsigned bitmap_bits = wc->dirty_bitmap_size * 8;
477 unsigned i = 0;
478
479 while (1) {
480 unsigned j;
481 i = find_next_bit(wc->dirty_bitmap, bitmap_bits, i);
482 if (unlikely(i == bitmap_bits))
483 break;
484 j = find_next_zero_bit(wc->dirty_bitmap, bitmap_bits, i);
485
486 region.bdev = wc->ssd_dev->bdev;
487 region.sector = (sector_t)i * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
488 region.count = (sector_t)(j - i) * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
489
490 if (unlikely(region.sector >= wc->metadata_sectors))
491 break;
492 if (unlikely(region.sector + region.count > wc->metadata_sectors))
493 region.count = wc->metadata_sectors - region.sector;
494
495 region.sector += wc->start_sector;
496 atomic_inc(&endio.count);
497 req.bi_op = REQ_OP_WRITE;
498 req.bi_op_flags = REQ_SYNC;
499 req.mem.type = DM_IO_VMA;
500 req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY;
501 req.client = wc->dm_io;
502 req.notify.fn = writecache_notify_io;
503 req.notify.context = &endio;
504
505 /* writing via async dm-io (implied by notify.fn above) won't return an error */
506 (void) dm_io(&req, 1, &region, NULL);
507 i = j;
508 }
509
510 writecache_notify_io(0, &endio);
511 wait_for_completion_io(&endio.c);
512
Olivier Deprez0e641232021-09-23 10:07:05 +0200513 if (wait_for_ios)
514 writecache_wait_for_ios(wc, WRITE);
515
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000516 writecache_disk_flush(wc, wc->ssd_dev);
517
518 memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size);
519}
520
Olivier Deprez0e641232021-09-23 10:07:05 +0200521static void writecache_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000522{
523 if (WC_MODE_PMEM(wc))
524 wmb();
525 else
Olivier Deprez0e641232021-09-23 10:07:05 +0200526 ssd_commit_flushed(wc, wait_for_ios);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000527}
528
529static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
530{
531 int r;
532 struct dm_io_region region;
533 struct dm_io_request req;
534
535 region.bdev = dev->bdev;
536 region.sector = 0;
537 region.count = 0;
538 req.bi_op = REQ_OP_WRITE;
539 req.bi_op_flags = REQ_PREFLUSH;
540 req.mem.type = DM_IO_KMEM;
541 req.mem.ptr.addr = NULL;
542 req.client = wc->dm_io;
543 req.notify.fn = NULL;
544
545 r = dm_io(&req, 1, &region, NULL);
546 if (unlikely(r))
547 writecache_error(wc, r, "error flushing metadata: %d", r);
548}
549
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000550#define WFE_RETURN_FOLLOWING 1
551#define WFE_LOWEST_SEQ 2
552
553static struct wc_entry *writecache_find_entry(struct dm_writecache *wc,
554 uint64_t block, int flags)
555{
556 struct wc_entry *e;
557 struct rb_node *node = wc->tree.rb_node;
558
559 if (unlikely(!node))
560 return NULL;
561
562 while (1) {
563 e = container_of(node, struct wc_entry, rb_node);
564 if (read_original_sector(wc, e) == block)
565 break;
David Brazdil0f672f62019-12-10 10:32:29 +0000566
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000567 node = (read_original_sector(wc, e) >= block ?
568 e->rb_node.rb_left : e->rb_node.rb_right);
569 if (unlikely(!node)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000570 if (!(flags & WFE_RETURN_FOLLOWING))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000571 return NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000572 if (read_original_sector(wc, e) >= block) {
David Brazdil0f672f62019-12-10 10:32:29 +0000573 return e;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000574 } else {
575 node = rb_next(&e->rb_node);
David Brazdil0f672f62019-12-10 10:32:29 +0000576 if (unlikely(!node))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000577 return NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000578 e = container_of(node, struct wc_entry, rb_node);
David Brazdil0f672f62019-12-10 10:32:29 +0000579 return e;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000580 }
581 }
582 }
583
584 while (1) {
585 struct wc_entry *e2;
586 if (flags & WFE_LOWEST_SEQ)
587 node = rb_prev(&e->rb_node);
588 else
589 node = rb_next(&e->rb_node);
David Brazdil0f672f62019-12-10 10:32:29 +0000590 if (unlikely(!node))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000591 return e;
592 e2 = container_of(node, struct wc_entry, rb_node);
593 if (read_original_sector(wc, e2) != block)
594 return e;
595 e = e2;
596 }
597}
598
599static void writecache_insert_entry(struct dm_writecache *wc, struct wc_entry *ins)
600{
601 struct wc_entry *e;
602 struct rb_node **node = &wc->tree.rb_node, *parent = NULL;
603
604 while (*node) {
605 e = container_of(*node, struct wc_entry, rb_node);
606 parent = &e->rb_node;
607 if (read_original_sector(wc, e) > read_original_sector(wc, ins))
608 node = &parent->rb_left;
609 else
610 node = &parent->rb_right;
611 }
612 rb_link_node(&ins->rb_node, parent, node);
613 rb_insert_color(&ins->rb_node, &wc->tree);
614 list_add(&ins->lru, &wc->lru);
615}
616
617static void writecache_unlink(struct dm_writecache *wc, struct wc_entry *e)
618{
619 list_del(&e->lru);
620 rb_erase(&e->rb_node, &wc->tree);
621}
622
623static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry *e)
624{
625 if (WC_MODE_SORT_FREELIST(wc)) {
626 struct rb_node **node = &wc->freetree.rb_node, *parent = NULL;
627 if (unlikely(!*node))
628 wc->current_free = e;
629 while (*node) {
630 parent = *node;
631 if (&e->rb_node < *node)
632 node = &parent->rb_left;
633 else
634 node = &parent->rb_right;
635 }
636 rb_link_node(&e->rb_node, parent, node);
637 rb_insert_color(&e->rb_node, &wc->freetree);
638 } else {
639 list_add_tail(&e->lru, &wc->freelist);
640 }
641 wc->freelist_size++;
642}
643
Olivier Deprez0e641232021-09-23 10:07:05 +0200644static inline void writecache_verify_watermark(struct dm_writecache *wc)
645{
646 if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark))
647 queue_work(wc->writeback_wq, &wc->writeback_work);
648}
649
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000650static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc)
651{
652 struct wc_entry *e;
653
654 if (WC_MODE_SORT_FREELIST(wc)) {
655 struct rb_node *next;
656 if (unlikely(!wc->current_free))
657 return NULL;
658 e = wc->current_free;
659 next = rb_next(&e->rb_node);
660 rb_erase(&e->rb_node, &wc->freetree);
661 if (unlikely(!next))
662 next = rb_first(&wc->freetree);
663 wc->current_free = next ? container_of(next, struct wc_entry, rb_node) : NULL;
664 } else {
665 if (unlikely(list_empty(&wc->freelist)))
666 return NULL;
667 e = container_of(wc->freelist.next, struct wc_entry, lru);
668 list_del(&e->lru);
669 }
670 wc->freelist_size--;
Olivier Deprez0e641232021-09-23 10:07:05 +0200671
672 writecache_verify_watermark(wc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000673
674 return e;
675}
676
677static void writecache_free_entry(struct dm_writecache *wc, struct wc_entry *e)
678{
679 writecache_unlink(wc, e);
680 writecache_add_to_freelist(wc, e);
681 clear_seq_count(wc, e);
682 writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
683 if (unlikely(waitqueue_active(&wc->freelist_wait)))
684 wake_up(&wc->freelist_wait);
685}
686
687static void writecache_wait_on_freelist(struct dm_writecache *wc)
688{
689 DEFINE_WAIT(wait);
690
691 prepare_to_wait(&wc->freelist_wait, &wait, TASK_UNINTERRUPTIBLE);
692 wc_unlock(wc);
693 io_schedule();
694 finish_wait(&wc->freelist_wait, &wait);
695 wc_lock(wc);
696}
697
698static void writecache_poison_lists(struct dm_writecache *wc)
699{
700 /*
701 * Catch incorrect access to these values while the device is suspended.
702 */
703 memset(&wc->tree, -1, sizeof wc->tree);
704 wc->lru.next = LIST_POISON1;
705 wc->lru.prev = LIST_POISON2;
706 wc->freelist.next = LIST_POISON1;
707 wc->freelist.prev = LIST_POISON2;
708}
709
710static void writecache_flush_entry(struct dm_writecache *wc, struct wc_entry *e)
711{
712 writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
713 if (WC_MODE_PMEM(wc))
714 writecache_flush_region(wc, memory_data(wc, e), wc->block_size);
715}
716
717static bool writecache_entry_is_committed(struct dm_writecache *wc, struct wc_entry *e)
718{
719 return read_seq_count(wc, e) < wc->seq_count;
720}
721
722static void writecache_flush(struct dm_writecache *wc)
723{
724 struct wc_entry *e, *e2;
725 bool need_flush_after_free;
726
727 wc->uncommitted_blocks = 0;
728 del_timer(&wc->autocommit_timer);
729
730 if (list_empty(&wc->lru))
731 return;
732
733 e = container_of(wc->lru.next, struct wc_entry, lru);
734 if (writecache_entry_is_committed(wc, e)) {
735 if (wc->overwrote_committed) {
736 writecache_wait_for_ios(wc, WRITE);
737 writecache_disk_flush(wc, wc->ssd_dev);
738 wc->overwrote_committed = false;
739 }
740 return;
741 }
742 while (1) {
743 writecache_flush_entry(wc, e);
744 if (unlikely(e->lru.next == &wc->lru))
745 break;
746 e2 = container_of(e->lru.next, struct wc_entry, lru);
747 if (writecache_entry_is_committed(wc, e2))
748 break;
749 e = e2;
750 cond_resched();
751 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200752 writecache_commit_flushed(wc, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000753
754 wc->seq_count++;
755 pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count));
756 writecache_flush_region(wc, &sb(wc)->seq_count, sizeof sb(wc)->seq_count);
Olivier Deprez0e641232021-09-23 10:07:05 +0200757 writecache_commit_flushed(wc, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000758
759 wc->overwrote_committed = false;
760
761 need_flush_after_free = false;
762 while (1) {
763 /* Free another committed entry with lower seq-count */
764 struct rb_node *rb_node = rb_prev(&e->rb_node);
765
766 if (rb_node) {
767 e2 = container_of(rb_node, struct wc_entry, rb_node);
768 if (read_original_sector(wc, e2) == read_original_sector(wc, e) &&
769 likely(!e2->write_in_progress)) {
770 writecache_free_entry(wc, e2);
771 need_flush_after_free = true;
772 }
773 }
774 if (unlikely(e->lru.prev == &wc->lru))
775 break;
776 e = container_of(e->lru.prev, struct wc_entry, lru);
777 cond_resched();
778 }
779
780 if (need_flush_after_free)
Olivier Deprez0e641232021-09-23 10:07:05 +0200781 writecache_commit_flushed(wc, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000782}
783
784static void writecache_flush_work(struct work_struct *work)
785{
786 struct dm_writecache *wc = container_of(work, struct dm_writecache, flush_work);
787
788 wc_lock(wc);
789 writecache_flush(wc);
790 wc_unlock(wc);
791}
792
793static void writecache_autocommit_timer(struct timer_list *t)
794{
795 struct dm_writecache *wc = from_timer(wc, t, autocommit_timer);
796 if (!writecache_has_error(wc))
797 queue_work(wc->writeback_wq, &wc->flush_work);
798}
799
800static void writecache_schedule_autocommit(struct dm_writecache *wc)
801{
802 if (!timer_pending(&wc->autocommit_timer))
803 mod_timer(&wc->autocommit_timer, jiffies + wc->autocommit_jiffies);
804}
805
806static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_t end)
807{
808 struct wc_entry *e;
809 bool discarded_something = false;
810
811 e = writecache_find_entry(wc, start, WFE_RETURN_FOLLOWING | WFE_LOWEST_SEQ);
812 if (unlikely(!e))
813 return;
814
815 while (read_original_sector(wc, e) < end) {
816 struct rb_node *node = rb_next(&e->rb_node);
817
818 if (likely(!e->write_in_progress)) {
819 if (!discarded_something) {
820 writecache_wait_for_ios(wc, READ);
821 writecache_wait_for_ios(wc, WRITE);
822 discarded_something = true;
823 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200824 if (!writecache_entry_is_committed(wc, e))
825 wc->uncommitted_blocks--;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000826 writecache_free_entry(wc, e);
827 }
828
David Brazdil0f672f62019-12-10 10:32:29 +0000829 if (unlikely(!node))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000830 break;
831
832 e = container_of(node, struct wc_entry, rb_node);
833 }
834
835 if (discarded_something)
Olivier Deprez0e641232021-09-23 10:07:05 +0200836 writecache_commit_flushed(wc, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000837}
838
839static bool writecache_wait_for_writeback(struct dm_writecache *wc)
840{
841 if (wc->writeback_size) {
842 writecache_wait_on_freelist(wc);
843 return true;
844 }
845 return false;
846}
847
848static void writecache_suspend(struct dm_target *ti)
849{
850 struct dm_writecache *wc = ti->private;
851 bool flush_on_suspend;
852
853 del_timer_sync(&wc->autocommit_timer);
854
855 wc_lock(wc);
856 writecache_flush(wc);
857 flush_on_suspend = wc->flush_on_suspend;
858 if (flush_on_suspend) {
859 wc->flush_on_suspend = false;
860 wc->writeback_all++;
861 queue_work(wc->writeback_wq, &wc->writeback_work);
862 }
863 wc_unlock(wc);
864
Olivier Deprez0e641232021-09-23 10:07:05 +0200865 drain_workqueue(wc->writeback_wq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000866
867 wc_lock(wc);
868 if (flush_on_suspend)
869 wc->writeback_all--;
870 while (writecache_wait_for_writeback(wc));
871
872 if (WC_MODE_PMEM(wc))
873 persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
874
875 writecache_poison_lists(wc);
876
877 wc_unlock(wc);
878}
879
880static int writecache_alloc_entries(struct dm_writecache *wc)
881{
882 size_t b;
883
884 if (wc->entries)
885 return 0;
886 wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
887 if (!wc->entries)
888 return -ENOMEM;
889 for (b = 0; b < wc->n_blocks; b++) {
890 struct wc_entry *e = &wc->entries[b];
891 e->index = b;
892 e->write_in_progress = false;
Olivier Deprez0e641232021-09-23 10:07:05 +0200893 cond_resched();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000894 }
895
896 return 0;
897}
898
Olivier Deprez0e641232021-09-23 10:07:05 +0200899static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors)
900{
901 struct dm_io_region region;
902 struct dm_io_request req;
903
904 region.bdev = wc->ssd_dev->bdev;
905 region.sector = wc->start_sector;
906 region.count = n_sectors;
907 req.bi_op = REQ_OP_READ;
908 req.bi_op_flags = REQ_SYNC;
909 req.mem.type = DM_IO_VMA;
910 req.mem.ptr.vma = (char *)wc->memory_map;
911 req.client = wc->dm_io;
912 req.notify.fn = NULL;
913
914 return dm_io(&req, 1, &region, NULL);
915}
916
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000917static void writecache_resume(struct dm_target *ti)
918{
919 struct dm_writecache *wc = ti->private;
920 size_t b;
921 bool need_flush = false;
922 __le64 sb_seq_count;
923 int r;
924
925 wc_lock(wc);
926
Olivier Deprez0e641232021-09-23 10:07:05 +0200927 wc->data_device_sectors = i_size_read(wc->dev->bdev->bd_inode) >> SECTOR_SHIFT;
928
929 if (WC_MODE_PMEM(wc)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000930 persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
Olivier Deprez0e641232021-09-23 10:07:05 +0200931 } else {
932 r = writecache_read_metadata(wc, wc->metadata_sectors);
933 if (r) {
934 size_t sb_entries_offset;
935 writecache_error(wc, r, "unable to read metadata: %d", r);
936 sb_entries_offset = offsetof(struct wc_memory_superblock, entries);
937 memset((char *)wc->memory_map + sb_entries_offset, -1,
938 (wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset);
939 }
940 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000941
942 wc->tree = RB_ROOT;
943 INIT_LIST_HEAD(&wc->lru);
944 if (WC_MODE_SORT_FREELIST(wc)) {
945 wc->freetree = RB_ROOT;
946 wc->current_free = NULL;
947 } else {
948 INIT_LIST_HEAD(&wc->freelist);
949 }
950 wc->freelist_size = 0;
951
952 r = memcpy_mcsafe(&sb_seq_count, &sb(wc)->seq_count, sizeof(uint64_t));
953 if (r) {
954 writecache_error(wc, r, "hardware memory error when reading superblock: %d", r);
955 sb_seq_count = cpu_to_le64(0);
956 }
957 wc->seq_count = le64_to_cpu(sb_seq_count);
958
959#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
960 for (b = 0; b < wc->n_blocks; b++) {
961 struct wc_entry *e = &wc->entries[b];
962 struct wc_memory_entry wme;
963 if (writecache_has_error(wc)) {
964 e->original_sector = -1;
965 e->seq_count = -1;
966 continue;
967 }
968 r = memcpy_mcsafe(&wme, memory_entry(wc, e), sizeof(struct wc_memory_entry));
969 if (r) {
970 writecache_error(wc, r, "hardware memory error when reading metadata entry %lu: %d",
971 (unsigned long)b, r);
972 e->original_sector = -1;
973 e->seq_count = -1;
974 } else {
975 e->original_sector = le64_to_cpu(wme.original_sector);
976 e->seq_count = le64_to_cpu(wme.seq_count);
977 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200978 cond_resched();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000979 }
980#endif
981 for (b = 0; b < wc->n_blocks; b++) {
982 struct wc_entry *e = &wc->entries[b];
983 if (!writecache_entry_is_committed(wc, e)) {
984 if (read_seq_count(wc, e) != -1) {
985erase_this:
986 clear_seq_count(wc, e);
987 need_flush = true;
988 }
989 writecache_add_to_freelist(wc, e);
990 } else {
991 struct wc_entry *old;
992
993 old = writecache_find_entry(wc, read_original_sector(wc, e), 0);
994 if (!old) {
995 writecache_insert_entry(wc, e);
996 } else {
997 if (read_seq_count(wc, old) == read_seq_count(wc, e)) {
998 writecache_error(wc, -EINVAL,
999 "two identical entries, position %llu, sector %llu, sequence %llu",
1000 (unsigned long long)b, (unsigned long long)read_original_sector(wc, e),
1001 (unsigned long long)read_seq_count(wc, e));
1002 }
1003 if (read_seq_count(wc, old) > read_seq_count(wc, e)) {
1004 goto erase_this;
1005 } else {
1006 writecache_free_entry(wc, old);
1007 writecache_insert_entry(wc, e);
1008 need_flush = true;
1009 }
1010 }
1011 }
1012 cond_resched();
1013 }
1014
1015 if (need_flush) {
1016 writecache_flush_all_metadata(wc);
Olivier Deprez0e641232021-09-23 10:07:05 +02001017 writecache_commit_flushed(wc, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001018 }
1019
Olivier Deprez0e641232021-09-23 10:07:05 +02001020 writecache_verify_watermark(wc);
1021
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001022 wc_unlock(wc);
1023}
1024
1025static int process_flush_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1026{
1027 if (argc != 1)
1028 return -EINVAL;
1029
1030 wc_lock(wc);
1031 if (dm_suspended(wc->ti)) {
1032 wc_unlock(wc);
1033 return -EBUSY;
1034 }
1035 if (writecache_has_error(wc)) {
1036 wc_unlock(wc);
1037 return -EIO;
1038 }
1039
1040 writecache_flush(wc);
1041 wc->writeback_all++;
1042 queue_work(wc->writeback_wq, &wc->writeback_work);
1043 wc_unlock(wc);
1044
1045 flush_workqueue(wc->writeback_wq);
1046
1047 wc_lock(wc);
1048 wc->writeback_all--;
1049 if (writecache_has_error(wc)) {
1050 wc_unlock(wc);
1051 return -EIO;
1052 }
1053 wc_unlock(wc);
1054
1055 return 0;
1056}
1057
1058static int process_flush_on_suspend_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1059{
1060 if (argc != 1)
1061 return -EINVAL;
1062
1063 wc_lock(wc);
1064 wc->flush_on_suspend = true;
1065 wc_unlock(wc);
1066
1067 return 0;
1068}
1069
1070static int writecache_message(struct dm_target *ti, unsigned argc, char **argv,
1071 char *result, unsigned maxlen)
1072{
1073 int r = -EINVAL;
1074 struct dm_writecache *wc = ti->private;
1075
1076 if (!strcasecmp(argv[0], "flush"))
1077 r = process_flush_mesg(argc, argv, wc);
1078 else if (!strcasecmp(argv[0], "flush_on_suspend"))
1079 r = process_flush_on_suspend_mesg(argc, argv, wc);
1080 else
1081 DMERR("unrecognised message received: %s", argv[0]);
1082
1083 return r;
1084}
1085
1086static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
1087{
1088 void *buf;
1089 unsigned long flags;
1090 unsigned size;
1091 int rw = bio_data_dir(bio);
1092 unsigned remaining_size = wc->block_size;
1093
1094 do {
1095 struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
1096 buf = bvec_kmap_irq(&bv, &flags);
1097 size = bv.bv_len;
1098 if (unlikely(size > remaining_size))
1099 size = remaining_size;
1100
1101 if (rw == READ) {
1102 int r;
1103 r = memcpy_mcsafe(buf, data, size);
1104 flush_dcache_page(bio_page(bio));
1105 if (unlikely(r)) {
1106 writecache_error(wc, r, "hardware memory error when reading data: %d", r);
1107 bio->bi_status = BLK_STS_IOERR;
1108 }
1109 } else {
1110 flush_dcache_page(bio_page(bio));
1111 memcpy_flushcache(data, buf, size);
1112 }
1113
1114 bvec_kunmap_irq(buf, &flags);
1115
1116 data = (char *)data + size;
1117 remaining_size -= size;
1118 bio_advance(bio, size);
1119 } while (unlikely(remaining_size));
1120}
1121
1122static int writecache_flush_thread(void *data)
1123{
1124 struct dm_writecache *wc = data;
1125
1126 while (1) {
1127 struct bio *bio;
1128
1129 wc_lock(wc);
1130 bio = bio_list_pop(&wc->flush_list);
1131 if (!bio) {
1132 set_current_state(TASK_INTERRUPTIBLE);
1133 wc_unlock(wc);
1134
1135 if (unlikely(kthread_should_stop())) {
1136 set_current_state(TASK_RUNNING);
1137 break;
1138 }
1139
1140 schedule();
1141 continue;
1142 }
1143
1144 if (bio_op(bio) == REQ_OP_DISCARD) {
1145 writecache_discard(wc, bio->bi_iter.bi_sector,
1146 bio_end_sector(bio));
1147 wc_unlock(wc);
1148 bio_set_dev(bio, wc->dev->bdev);
1149 generic_make_request(bio);
1150 } else {
1151 writecache_flush(wc);
1152 wc_unlock(wc);
1153 if (writecache_has_error(wc))
1154 bio->bi_status = BLK_STS_IOERR;
1155 bio_endio(bio);
1156 }
1157 }
1158
1159 return 0;
1160}
1161
1162static void writecache_offload_bio(struct dm_writecache *wc, struct bio *bio)
1163{
1164 if (bio_list_empty(&wc->flush_list))
1165 wake_up_process(wc->flush_thread);
1166 bio_list_add(&wc->flush_list, bio);
1167}
1168
1169static int writecache_map(struct dm_target *ti, struct bio *bio)
1170{
1171 struct wc_entry *e;
1172 struct dm_writecache *wc = ti->private;
1173
1174 bio->bi_private = NULL;
1175
1176 wc_lock(wc);
1177
1178 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1179 if (writecache_has_error(wc))
1180 goto unlock_error;
1181 if (WC_MODE_PMEM(wc)) {
1182 writecache_flush(wc);
1183 if (writecache_has_error(wc))
1184 goto unlock_error;
1185 goto unlock_submit;
1186 } else {
1187 writecache_offload_bio(wc, bio);
1188 goto unlock_return;
1189 }
1190 }
1191
1192 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1193
1194 if (unlikely((((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
1195 (wc->block_size / 512 - 1)) != 0)) {
1196 DMERR("I/O is not aligned, sector %llu, size %u, block size %u",
1197 (unsigned long long)bio->bi_iter.bi_sector,
1198 bio->bi_iter.bi_size, wc->block_size);
1199 goto unlock_error;
1200 }
1201
1202 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
1203 if (writecache_has_error(wc))
1204 goto unlock_error;
1205 if (WC_MODE_PMEM(wc)) {
1206 writecache_discard(wc, bio->bi_iter.bi_sector, bio_end_sector(bio));
1207 goto unlock_remap_origin;
1208 } else {
1209 writecache_offload_bio(wc, bio);
1210 goto unlock_return;
1211 }
1212 }
1213
1214 if (bio_data_dir(bio) == READ) {
1215read_next_block:
1216 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
1217 if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) {
1218 if (WC_MODE_PMEM(wc)) {
1219 bio_copy_block(wc, bio, memory_data(wc, e));
1220 if (bio->bi_iter.bi_size)
1221 goto read_next_block;
1222 goto unlock_submit;
1223 } else {
1224 dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
1225 bio_set_dev(bio, wc->ssd_dev->bdev);
1226 bio->bi_iter.bi_sector = cache_sector(wc, e);
1227 if (!writecache_entry_is_committed(wc, e))
1228 writecache_wait_for_ios(wc, WRITE);
1229 goto unlock_remap;
1230 }
1231 } else {
1232 if (e) {
1233 sector_t next_boundary =
1234 read_original_sector(wc, e) - bio->bi_iter.bi_sector;
1235 if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
1236 dm_accept_partial_bio(bio, next_boundary);
1237 }
1238 }
1239 goto unlock_remap_origin;
1240 }
1241 } else {
1242 do {
1243 if (writecache_has_error(wc))
1244 goto unlock_error;
1245 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
1246 if (e) {
1247 if (!writecache_entry_is_committed(wc, e))
1248 goto bio_copy;
1249 if (!WC_MODE_PMEM(wc) && !e->write_in_progress) {
1250 wc->overwrote_committed = true;
1251 goto bio_copy;
1252 }
1253 }
1254 e = writecache_pop_from_freelist(wc);
1255 if (unlikely(!e)) {
1256 writecache_wait_on_freelist(wc);
1257 continue;
1258 }
1259 write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count);
1260 writecache_insert_entry(wc, e);
1261 wc->uncommitted_blocks++;
1262bio_copy:
1263 if (WC_MODE_PMEM(wc)) {
1264 bio_copy_block(wc, bio, memory_data(wc, e));
1265 } else {
1266 dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
1267 bio_set_dev(bio, wc->ssd_dev->bdev);
1268 bio->bi_iter.bi_sector = cache_sector(wc, e);
1269 if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) {
1270 wc->uncommitted_blocks = 0;
1271 queue_work(wc->writeback_wq, &wc->flush_work);
1272 } else {
1273 writecache_schedule_autocommit(wc);
1274 }
1275 goto unlock_remap;
1276 }
1277 } while (bio->bi_iter.bi_size);
1278
Olivier Deprez0e641232021-09-23 10:07:05 +02001279 if (unlikely(bio->bi_opf & REQ_FUA ||
1280 wc->uncommitted_blocks >= wc->autocommit_blocks))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001281 writecache_flush(wc);
1282 else
1283 writecache_schedule_autocommit(wc);
1284 goto unlock_submit;
1285 }
1286
1287unlock_remap_origin:
1288 bio_set_dev(bio, wc->dev->bdev);
1289 wc_unlock(wc);
1290 return DM_MAPIO_REMAPPED;
1291
1292unlock_remap:
1293 /* make sure that writecache_end_io decrements bio_in_progress: */
1294 bio->bi_private = (void *)1;
1295 atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]);
1296 wc_unlock(wc);
1297 return DM_MAPIO_REMAPPED;
1298
1299unlock_submit:
1300 wc_unlock(wc);
1301 bio_endio(bio);
1302 return DM_MAPIO_SUBMITTED;
1303
1304unlock_return:
1305 wc_unlock(wc);
1306 return DM_MAPIO_SUBMITTED;
1307
1308unlock_error:
1309 wc_unlock(wc);
1310 bio_io_error(bio);
1311 return DM_MAPIO_SUBMITTED;
1312}
1313
1314static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status)
1315{
1316 struct dm_writecache *wc = ti->private;
1317
1318 if (bio->bi_private != NULL) {
1319 int dir = bio_data_dir(bio);
1320 if (atomic_dec_and_test(&wc->bio_in_progress[dir]))
1321 if (unlikely(waitqueue_active(&wc->bio_in_progress_wait[dir])))
1322 wake_up(&wc->bio_in_progress_wait[dir]);
1323 }
1324 return 0;
1325}
1326
1327static int writecache_iterate_devices(struct dm_target *ti,
1328 iterate_devices_callout_fn fn, void *data)
1329{
1330 struct dm_writecache *wc = ti->private;
1331
1332 return fn(ti, wc->dev, 0, ti->len, data);
1333}
1334
1335static void writecache_io_hints(struct dm_target *ti, struct queue_limits *limits)
1336{
1337 struct dm_writecache *wc = ti->private;
1338
1339 if (limits->logical_block_size < wc->block_size)
1340 limits->logical_block_size = wc->block_size;
1341
1342 if (limits->physical_block_size < wc->block_size)
1343 limits->physical_block_size = wc->block_size;
1344
1345 if (limits->io_min < wc->block_size)
1346 limits->io_min = wc->block_size;
1347}
1348
1349
1350static void writecache_writeback_endio(struct bio *bio)
1351{
1352 struct writeback_struct *wb = container_of(bio, struct writeback_struct, bio);
1353 struct dm_writecache *wc = wb->wc;
1354 unsigned long flags;
1355
1356 raw_spin_lock_irqsave(&wc->endio_list_lock, flags);
1357 if (unlikely(list_empty(&wc->endio_list)))
1358 wake_up_process(wc->endio_thread);
1359 list_add_tail(&wb->endio_entry, &wc->endio_list);
1360 raw_spin_unlock_irqrestore(&wc->endio_list_lock, flags);
1361}
1362
1363static void writecache_copy_endio(int read_err, unsigned long write_err, void *ptr)
1364{
1365 struct copy_struct *c = ptr;
1366 struct dm_writecache *wc = c->wc;
1367
1368 c->error = likely(!(read_err | write_err)) ? 0 : -EIO;
1369
1370 raw_spin_lock_irq(&wc->endio_list_lock);
1371 if (unlikely(list_empty(&wc->endio_list)))
1372 wake_up_process(wc->endio_thread);
1373 list_add_tail(&c->endio_entry, &wc->endio_list);
1374 raw_spin_unlock_irq(&wc->endio_list_lock);
1375}
1376
1377static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head *list)
1378{
1379 unsigned i;
1380 struct writeback_struct *wb;
1381 struct wc_entry *e;
1382 unsigned long n_walked = 0;
1383
1384 do {
1385 wb = list_entry(list->next, struct writeback_struct, endio_entry);
1386 list_del(&wb->endio_entry);
1387
1388 if (unlikely(wb->bio.bi_status != BLK_STS_OK))
1389 writecache_error(wc, blk_status_to_errno(wb->bio.bi_status),
1390 "write error %d", wb->bio.bi_status);
1391 i = 0;
1392 do {
1393 e = wb->wc_list[i];
1394 BUG_ON(!e->write_in_progress);
1395 e->write_in_progress = false;
1396 INIT_LIST_HEAD(&e->lru);
1397 if (!writecache_has_error(wc))
1398 writecache_free_entry(wc, e);
1399 BUG_ON(!wc->writeback_size);
1400 wc->writeback_size--;
1401 n_walked++;
1402 if (unlikely(n_walked >= ENDIO_LATENCY)) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001403 writecache_commit_flushed(wc, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001404 wc_unlock(wc);
1405 wc_lock(wc);
1406 n_walked = 0;
1407 }
1408 } while (++i < wb->wc_list_n);
1409
1410 if (wb->wc_list != wb->wc_list_inline)
1411 kfree(wb->wc_list);
1412 bio_put(&wb->bio);
1413 } while (!list_empty(list));
1414}
1415
1416static void __writecache_endio_ssd(struct dm_writecache *wc, struct list_head *list)
1417{
1418 struct copy_struct *c;
1419 struct wc_entry *e;
1420
1421 do {
1422 c = list_entry(list->next, struct copy_struct, endio_entry);
1423 list_del(&c->endio_entry);
1424
1425 if (unlikely(c->error))
1426 writecache_error(wc, c->error, "copy error");
1427
1428 e = c->e;
1429 do {
1430 BUG_ON(!e->write_in_progress);
1431 e->write_in_progress = false;
1432 INIT_LIST_HEAD(&e->lru);
1433 if (!writecache_has_error(wc))
1434 writecache_free_entry(wc, e);
1435
1436 BUG_ON(!wc->writeback_size);
1437 wc->writeback_size--;
1438 e++;
1439 } while (--c->n_entries);
1440 mempool_free(c, &wc->copy_pool);
1441 } while (!list_empty(list));
1442}
1443
1444static int writecache_endio_thread(void *data)
1445{
1446 struct dm_writecache *wc = data;
1447
1448 while (1) {
1449 struct list_head list;
1450
1451 raw_spin_lock_irq(&wc->endio_list_lock);
1452 if (!list_empty(&wc->endio_list))
1453 goto pop_from_list;
1454 set_current_state(TASK_INTERRUPTIBLE);
1455 raw_spin_unlock_irq(&wc->endio_list_lock);
1456
1457 if (unlikely(kthread_should_stop())) {
1458 set_current_state(TASK_RUNNING);
1459 break;
1460 }
1461
1462 schedule();
1463
1464 continue;
1465
1466pop_from_list:
1467 list = wc->endio_list;
1468 list.next->prev = list.prev->next = &list;
1469 INIT_LIST_HEAD(&wc->endio_list);
1470 raw_spin_unlock_irq(&wc->endio_list_lock);
1471
1472 if (!WC_MODE_FUA(wc))
1473 writecache_disk_flush(wc, wc->dev);
1474
1475 wc_lock(wc);
1476
1477 if (WC_MODE_PMEM(wc)) {
1478 __writecache_endio_pmem(wc, &list);
1479 } else {
1480 __writecache_endio_ssd(wc, &list);
1481 writecache_wait_for_ios(wc, READ);
1482 }
1483
Olivier Deprez0e641232021-09-23 10:07:05 +02001484 writecache_commit_flushed(wc, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001485
1486 wc_unlock(wc);
1487 }
1488
1489 return 0;
1490}
1491
1492static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e, gfp_t gfp)
1493{
1494 struct dm_writecache *wc = wb->wc;
1495 unsigned block_size = wc->block_size;
1496 void *address = memory_data(wc, e);
1497
1498 persistent_memory_flush_cache(address, block_size);
Olivier Deprez0e641232021-09-23 10:07:05 +02001499
1500 if (unlikely(bio_end_sector(&wb->bio) >= wc->data_device_sectors))
1501 return true;
1502
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001503 return bio_add_page(&wb->bio, persistent_memory_page(address),
1504 block_size, persistent_memory_page_offset(address)) != 0;
1505}
1506
1507struct writeback_list {
1508 struct list_head list;
1509 size_t size;
1510};
1511
1512static void __writeback_throttle(struct dm_writecache *wc, struct writeback_list *wbl)
1513{
1514 if (unlikely(wc->max_writeback_jobs)) {
1515 if (READ_ONCE(wc->writeback_size) - wbl->size >= wc->max_writeback_jobs) {
1516 wc_lock(wc);
1517 while (wc->writeback_size - wbl->size >= wc->max_writeback_jobs)
1518 writecache_wait_on_freelist(wc);
1519 wc_unlock(wc);
1520 }
1521 }
1522 cond_resched();
1523}
1524
1525static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeback_list *wbl)
1526{
1527 struct wc_entry *e, *f;
1528 struct bio *bio;
1529 struct writeback_struct *wb;
1530 unsigned max_pages;
1531
1532 while (wbl->size) {
1533 wbl->size--;
1534 e = container_of(wbl->list.prev, struct wc_entry, lru);
1535 list_del(&e->lru);
1536
1537 max_pages = e->wc_list_contiguous;
1538
1539 bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set);
1540 wb = container_of(bio, struct writeback_struct, bio);
1541 wb->wc = wc;
David Brazdil0f672f62019-12-10 10:32:29 +00001542 bio->bi_end_io = writecache_writeback_endio;
1543 bio_set_dev(bio, wc->dev->bdev);
1544 bio->bi_iter.bi_sector = read_original_sector(wc, e);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001545 if (max_pages <= WB_LIST_INLINE ||
1546 unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
1547 GFP_NOIO | __GFP_NORETRY |
1548 __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
1549 wb->wc_list = wb->wc_list_inline;
1550 max_pages = WB_LIST_INLINE;
1551 }
1552
1553 BUG_ON(!wc_add_block(wb, e, GFP_NOIO));
1554
1555 wb->wc_list[0] = e;
1556 wb->wc_list_n = 1;
1557
1558 while (wbl->size && wb->wc_list_n < max_pages) {
1559 f = container_of(wbl->list.prev, struct wc_entry, lru);
1560 if (read_original_sector(wc, f) !=
1561 read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
1562 break;
1563 if (!wc_add_block(wb, f, GFP_NOWAIT | __GFP_NOWARN))
1564 break;
1565 wbl->size--;
1566 list_del(&f->lru);
1567 wb->wc_list[wb->wc_list_n++] = f;
1568 e = f;
1569 }
David Brazdil0f672f62019-12-10 10:32:29 +00001570 bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001571 if (writecache_has_error(wc)) {
1572 bio->bi_status = BLK_STS_IOERR;
David Brazdil0f672f62019-12-10 10:32:29 +00001573 bio_endio(bio);
Olivier Deprez0e641232021-09-23 10:07:05 +02001574 } else if (unlikely(!bio_sectors(bio))) {
1575 bio->bi_status = BLK_STS_OK;
1576 bio_endio(bio);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001577 } else {
David Brazdil0f672f62019-12-10 10:32:29 +00001578 submit_bio(bio);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001579 }
1580
1581 __writeback_throttle(wc, wbl);
1582 }
1583}
1584
1585static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writeback_list *wbl)
1586{
1587 struct wc_entry *e, *f;
1588 struct dm_io_region from, to;
1589 struct copy_struct *c;
1590
1591 while (wbl->size) {
1592 unsigned n_sectors;
1593
1594 wbl->size--;
1595 e = container_of(wbl->list.prev, struct wc_entry, lru);
1596 list_del(&e->lru);
1597
1598 n_sectors = e->wc_list_contiguous << (wc->block_size_bits - SECTOR_SHIFT);
1599
1600 from.bdev = wc->ssd_dev->bdev;
1601 from.sector = cache_sector(wc, e);
1602 from.count = n_sectors;
1603 to.bdev = wc->dev->bdev;
1604 to.sector = read_original_sector(wc, e);
1605 to.count = n_sectors;
1606
1607 c = mempool_alloc(&wc->copy_pool, GFP_NOIO);
1608 c->wc = wc;
1609 c->e = e;
1610 c->n_entries = e->wc_list_contiguous;
1611
1612 while ((n_sectors -= wc->block_size >> SECTOR_SHIFT)) {
1613 wbl->size--;
1614 f = container_of(wbl->list.prev, struct wc_entry, lru);
1615 BUG_ON(f != e + 1);
1616 list_del(&f->lru);
1617 e = f;
1618 }
1619
Olivier Deprez0e641232021-09-23 10:07:05 +02001620 if (unlikely(to.sector + to.count > wc->data_device_sectors)) {
1621 if (to.sector >= wc->data_device_sectors) {
1622 writecache_copy_endio(0, 0, c);
1623 continue;
1624 }
1625 from.count = to.count = wc->data_device_sectors - to.sector;
1626 }
1627
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001628 dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c);
1629
1630 __writeback_throttle(wc, wbl);
1631 }
1632}
1633
1634static void writecache_writeback(struct work_struct *work)
1635{
1636 struct dm_writecache *wc = container_of(work, struct dm_writecache, writeback_work);
1637 struct blk_plug plug;
David Brazdil0f672f62019-12-10 10:32:29 +00001638 struct wc_entry *f, *g, *e = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001639 struct rb_node *node, *next_node;
1640 struct list_head skipped;
1641 struct writeback_list wbl;
1642 unsigned long n_walked;
1643
1644 wc_lock(wc);
1645restart:
1646 if (writecache_has_error(wc)) {
1647 wc_unlock(wc);
1648 return;
1649 }
1650
1651 if (unlikely(wc->writeback_all)) {
1652 if (writecache_wait_for_writeback(wc))
1653 goto restart;
1654 }
1655
1656 if (wc->overwrote_committed) {
1657 writecache_wait_for_ios(wc, WRITE);
1658 }
1659
1660 n_walked = 0;
1661 INIT_LIST_HEAD(&skipped);
1662 INIT_LIST_HEAD(&wbl.list);
1663 wbl.size = 0;
1664 while (!list_empty(&wc->lru) &&
1665 (wc->writeback_all ||
1666 wc->freelist_size + wc->writeback_size <= wc->freelist_low_watermark)) {
1667
1668 n_walked++;
1669 if (unlikely(n_walked > WRITEBACK_LATENCY) &&
1670 likely(!wc->writeback_all) && likely(!dm_suspended(wc->ti))) {
1671 queue_work(wc->writeback_wq, &wc->writeback_work);
1672 break;
1673 }
1674
David Brazdil0f672f62019-12-10 10:32:29 +00001675 if (unlikely(wc->writeback_all)) {
1676 if (unlikely(!e)) {
1677 writecache_flush(wc);
1678 e = container_of(rb_first(&wc->tree), struct wc_entry, rb_node);
1679 } else
1680 e = g;
1681 } else
1682 e = container_of(wc->lru.prev, struct wc_entry, lru);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001683 BUG_ON(e->write_in_progress);
1684 if (unlikely(!writecache_entry_is_committed(wc, e))) {
1685 writecache_flush(wc);
1686 }
1687 node = rb_prev(&e->rb_node);
1688 if (node) {
1689 f = container_of(node, struct wc_entry, rb_node);
1690 if (unlikely(read_original_sector(wc, f) ==
1691 read_original_sector(wc, e))) {
1692 BUG_ON(!f->write_in_progress);
1693 list_del(&e->lru);
1694 list_add(&e->lru, &skipped);
1695 cond_resched();
1696 continue;
1697 }
1698 }
1699 wc->writeback_size++;
1700 list_del(&e->lru);
1701 list_add(&e->lru, &wbl.list);
1702 wbl.size++;
1703 e->write_in_progress = true;
1704 e->wc_list_contiguous = 1;
1705
1706 f = e;
1707
1708 while (1) {
1709 next_node = rb_next(&f->rb_node);
1710 if (unlikely(!next_node))
1711 break;
1712 g = container_of(next_node, struct wc_entry, rb_node);
David Brazdil0f672f62019-12-10 10:32:29 +00001713 if (unlikely(read_original_sector(wc, g) ==
1714 read_original_sector(wc, f))) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001715 f = g;
1716 continue;
1717 }
1718 if (read_original_sector(wc, g) !=
1719 read_original_sector(wc, f) + (wc->block_size >> SECTOR_SHIFT))
1720 break;
1721 if (unlikely(g->write_in_progress))
1722 break;
1723 if (unlikely(!writecache_entry_is_committed(wc, g)))
1724 break;
1725
1726 if (!WC_MODE_PMEM(wc)) {
1727 if (g != f + 1)
1728 break;
1729 }
1730
1731 n_walked++;
1732 //if (unlikely(n_walked > WRITEBACK_LATENCY) && likely(!wc->writeback_all))
1733 // break;
1734
1735 wc->writeback_size++;
1736 list_del(&g->lru);
1737 list_add(&g->lru, &wbl.list);
1738 wbl.size++;
1739 g->write_in_progress = true;
1740 g->wc_list_contiguous = BIO_MAX_PAGES;
1741 f = g;
1742 e->wc_list_contiguous++;
David Brazdil0f672f62019-12-10 10:32:29 +00001743 if (unlikely(e->wc_list_contiguous == BIO_MAX_PAGES)) {
1744 if (unlikely(wc->writeback_all)) {
1745 next_node = rb_next(&f->rb_node);
1746 if (likely(next_node))
1747 g = container_of(next_node, struct wc_entry, rb_node);
1748 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001749 break;
David Brazdil0f672f62019-12-10 10:32:29 +00001750 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001751 }
1752 cond_resched();
1753 }
1754
1755 if (!list_empty(&skipped)) {
1756 list_splice_tail(&skipped, &wc->lru);
1757 /*
1758 * If we didn't do any progress, we must wait until some
1759 * writeback finishes to avoid burning CPU in a loop
1760 */
1761 if (unlikely(!wbl.size))
1762 writecache_wait_for_writeback(wc);
1763 }
1764
1765 wc_unlock(wc);
1766
1767 blk_start_plug(&plug);
1768
1769 if (WC_MODE_PMEM(wc))
1770 __writecache_writeback_pmem(wc, &wbl);
1771 else
1772 __writecache_writeback_ssd(wc, &wbl);
1773
1774 blk_finish_plug(&plug);
1775
1776 if (unlikely(wc->writeback_all)) {
1777 wc_lock(wc);
1778 while (writecache_wait_for_writeback(wc));
1779 wc_unlock(wc);
1780 }
1781}
1782
1783static int calculate_memory_size(uint64_t device_size, unsigned block_size,
1784 size_t *n_blocks_p, size_t *n_metadata_blocks_p)
1785{
1786 uint64_t n_blocks, offset;
1787 struct wc_entry e;
1788
1789 n_blocks = device_size;
1790 do_div(n_blocks, block_size + sizeof(struct wc_memory_entry));
1791
1792 while (1) {
1793 if (!n_blocks)
1794 return -ENOSPC;
1795 /* Verify the following entries[n_blocks] won't overflow */
1796 if (n_blocks >= ((size_t)-sizeof(struct wc_memory_superblock) /
1797 sizeof(struct wc_memory_entry)))
1798 return -EFBIG;
1799 offset = offsetof(struct wc_memory_superblock, entries[n_blocks]);
1800 offset = (offset + block_size - 1) & ~(uint64_t)(block_size - 1);
1801 if (offset + n_blocks * block_size <= device_size)
1802 break;
1803 n_blocks--;
1804 }
1805
1806 /* check if the bit field overflows */
1807 e.index = n_blocks;
1808 if (e.index != n_blocks)
1809 return -EFBIG;
1810
1811 if (n_blocks_p)
1812 *n_blocks_p = n_blocks;
1813 if (n_metadata_blocks_p)
1814 *n_metadata_blocks_p = offset >> __ffs(block_size);
1815 return 0;
1816}
1817
1818static int init_memory(struct dm_writecache *wc)
1819{
1820 size_t b;
1821 int r;
1822
1823 r = calculate_memory_size(wc->memory_map_size, wc->block_size, &wc->n_blocks, NULL);
1824 if (r)
1825 return r;
1826
1827 r = writecache_alloc_entries(wc);
1828 if (r)
1829 return r;
1830
1831 for (b = 0; b < ARRAY_SIZE(sb(wc)->padding); b++)
1832 pmem_assign(sb(wc)->padding[b], cpu_to_le64(0));
1833 pmem_assign(sb(wc)->version, cpu_to_le32(MEMORY_SUPERBLOCK_VERSION));
1834 pmem_assign(sb(wc)->block_size, cpu_to_le32(wc->block_size));
1835 pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks));
1836 pmem_assign(sb(wc)->seq_count, cpu_to_le64(0));
1837
Olivier Deprez0e641232021-09-23 10:07:05 +02001838 for (b = 0; b < wc->n_blocks; b++) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001839 write_original_sector_seq_count(wc, &wc->entries[b], -1, -1);
Olivier Deprez0e641232021-09-23 10:07:05 +02001840 cond_resched();
1841 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001842
1843 writecache_flush_all_metadata(wc);
Olivier Deprez0e641232021-09-23 10:07:05 +02001844 writecache_commit_flushed(wc, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001845 pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC));
1846 writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic);
Olivier Deprez0e641232021-09-23 10:07:05 +02001847 writecache_commit_flushed(wc, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001848
1849 return 0;
1850}
1851
1852static void writecache_dtr(struct dm_target *ti)
1853{
1854 struct dm_writecache *wc = ti->private;
1855
1856 if (!wc)
1857 return;
1858
1859 if (wc->endio_thread)
1860 kthread_stop(wc->endio_thread);
1861
1862 if (wc->flush_thread)
1863 kthread_stop(wc->flush_thread);
1864
1865 bioset_exit(&wc->bio_set);
1866
1867 mempool_exit(&wc->copy_pool);
1868
1869 if (wc->writeback_wq)
1870 destroy_workqueue(wc->writeback_wq);
1871
1872 if (wc->dev)
1873 dm_put_device(ti, wc->dev);
1874
1875 if (wc->ssd_dev)
1876 dm_put_device(ti, wc->ssd_dev);
1877
1878 if (wc->entries)
1879 vfree(wc->entries);
1880
1881 if (wc->memory_map) {
1882 if (WC_MODE_PMEM(wc))
1883 persistent_memory_release(wc);
1884 else
1885 vfree(wc->memory_map);
1886 }
1887
1888 if (wc->dm_kcopyd)
1889 dm_kcopyd_client_destroy(wc->dm_kcopyd);
1890
1891 if (wc->dm_io)
1892 dm_io_client_destroy(wc->dm_io);
1893
1894 if (wc->dirty_bitmap)
1895 vfree(wc->dirty_bitmap);
1896
1897 kfree(wc);
1898}
1899
1900static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
1901{
1902 struct dm_writecache *wc;
1903 struct dm_arg_set as;
1904 const char *string;
1905 unsigned opt_params;
1906 size_t offset, data_size;
1907 int i, r;
1908 char dummy;
1909 int high_wm_percent = HIGH_WATERMARK;
1910 int low_wm_percent = LOW_WATERMARK;
1911 uint64_t x;
1912 struct wc_memory_superblock s;
1913
1914 static struct dm_arg _args[] = {
Olivier Deprez0e641232021-09-23 10:07:05 +02001915 {0, 16, "Invalid number of feature args"},
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001916 };
1917
1918 as.argc = argc;
1919 as.argv = argv;
1920
1921 wc = kzalloc(sizeof(struct dm_writecache), GFP_KERNEL);
1922 if (!wc) {
1923 ti->error = "Cannot allocate writecache structure";
1924 r = -ENOMEM;
1925 goto bad;
1926 }
1927 ti->private = wc;
1928 wc->ti = ti;
1929
1930 mutex_init(&wc->lock);
1931 writecache_poison_lists(wc);
1932 init_waitqueue_head(&wc->freelist_wait);
1933 timer_setup(&wc->autocommit_timer, writecache_autocommit_timer, 0);
1934
1935 for (i = 0; i < 2; i++) {
1936 atomic_set(&wc->bio_in_progress[i], 0);
1937 init_waitqueue_head(&wc->bio_in_progress_wait[i]);
1938 }
1939
1940 wc->dm_io = dm_io_client_create();
1941 if (IS_ERR(wc->dm_io)) {
1942 r = PTR_ERR(wc->dm_io);
1943 ti->error = "Unable to allocate dm-io client";
1944 wc->dm_io = NULL;
1945 goto bad;
1946 }
1947
David Brazdil0f672f62019-12-10 10:32:29 +00001948 wc->writeback_wq = alloc_workqueue("writecache-writeback", WQ_MEM_RECLAIM, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001949 if (!wc->writeback_wq) {
1950 r = -ENOMEM;
1951 ti->error = "Could not allocate writeback workqueue";
1952 goto bad;
1953 }
1954 INIT_WORK(&wc->writeback_work, writecache_writeback);
1955 INIT_WORK(&wc->flush_work, writecache_flush_work);
1956
1957 raw_spin_lock_init(&wc->endio_list_lock);
1958 INIT_LIST_HEAD(&wc->endio_list);
1959 wc->endio_thread = kthread_create(writecache_endio_thread, wc, "writecache_endio");
1960 if (IS_ERR(wc->endio_thread)) {
1961 r = PTR_ERR(wc->endio_thread);
1962 wc->endio_thread = NULL;
1963 ti->error = "Couldn't spawn endio thread";
1964 goto bad;
1965 }
1966 wake_up_process(wc->endio_thread);
1967
1968 /*
1969 * Parse the mode (pmem or ssd)
1970 */
1971 string = dm_shift_arg(&as);
1972 if (!string)
1973 goto bad_arguments;
1974
1975 if (!strcasecmp(string, "s")) {
1976 wc->pmem_mode = false;
1977 } else if (!strcasecmp(string, "p")) {
1978#ifdef DM_WRITECACHE_HAS_PMEM
1979 wc->pmem_mode = true;
1980 wc->writeback_fua = true;
1981#else
1982 /*
1983 * If the architecture doesn't support persistent memory or
1984 * the kernel doesn't support any DAX drivers, this driver can
1985 * only be used in SSD-only mode.
1986 */
1987 r = -EOPNOTSUPP;
1988 ti->error = "Persistent memory or DAX not supported on this system";
1989 goto bad;
1990#endif
1991 } else {
1992 goto bad_arguments;
1993 }
1994
1995 if (WC_MODE_PMEM(wc)) {
1996 r = bioset_init(&wc->bio_set, BIO_POOL_SIZE,
1997 offsetof(struct writeback_struct, bio),
1998 BIOSET_NEED_BVECS);
1999 if (r) {
2000 ti->error = "Could not allocate bio set";
2001 goto bad;
2002 }
2003 } else {
2004 r = mempool_init_kmalloc_pool(&wc->copy_pool, 1, sizeof(struct copy_struct));
2005 if (r) {
2006 ti->error = "Could not allocate mempool";
2007 goto bad;
2008 }
2009 }
2010
2011 /*
2012 * Parse the origin data device
2013 */
2014 string = dm_shift_arg(&as);
2015 if (!string)
2016 goto bad_arguments;
2017 r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->dev);
2018 if (r) {
2019 ti->error = "Origin data device lookup failed";
2020 goto bad;
2021 }
2022
2023 /*
2024 * Parse cache data device (be it pmem or ssd)
2025 */
2026 string = dm_shift_arg(&as);
2027 if (!string)
2028 goto bad_arguments;
2029
2030 r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->ssd_dev);
2031 if (r) {
2032 ti->error = "Cache data device lookup failed";
2033 goto bad;
2034 }
2035 wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode);
2036
2037 /*
2038 * Parse the cache block size
2039 */
2040 string = dm_shift_arg(&as);
2041 if (!string)
2042 goto bad_arguments;
2043 if (sscanf(string, "%u%c", &wc->block_size, &dummy) != 1 ||
2044 wc->block_size < 512 || wc->block_size > PAGE_SIZE ||
2045 (wc->block_size & (wc->block_size - 1))) {
2046 r = -EINVAL;
2047 ti->error = "Invalid block size";
2048 goto bad;
2049 }
Olivier Deprez0e641232021-09-23 10:07:05 +02002050 if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) ||
2051 wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) {
2052 r = -EINVAL;
2053 ti->error = "Block size is smaller than device logical block size";
2054 goto bad;
2055 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002056 wc->block_size_bits = __ffs(wc->block_size);
2057
2058 wc->max_writeback_jobs = MAX_WRITEBACK_JOBS;
2059 wc->autocommit_blocks = !WC_MODE_PMEM(wc) ? AUTOCOMMIT_BLOCKS_SSD : AUTOCOMMIT_BLOCKS_PMEM;
2060 wc->autocommit_jiffies = msecs_to_jiffies(AUTOCOMMIT_MSEC);
2061
2062 /*
2063 * Parse optional arguments
2064 */
2065 r = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
2066 if (r)
2067 goto bad;
2068
2069 while (opt_params) {
2070 string = dm_shift_arg(&as), opt_params--;
2071 if (!strcasecmp(string, "start_sector") && opt_params >= 1) {
2072 unsigned long long start_sector;
2073 string = dm_shift_arg(&as), opt_params--;
2074 if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
2075 goto invalid_optional;
2076 wc->start_sector = start_sector;
Olivier Deprez0e641232021-09-23 10:07:05 +02002077 wc->start_sector_set = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002078 if (wc->start_sector != start_sector ||
2079 wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT)
2080 goto invalid_optional;
2081 } else if (!strcasecmp(string, "high_watermark") && opt_params >= 1) {
2082 string = dm_shift_arg(&as), opt_params--;
2083 if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1)
2084 goto invalid_optional;
2085 if (high_wm_percent < 0 || high_wm_percent > 100)
2086 goto invalid_optional;
Olivier Deprez0e641232021-09-23 10:07:05 +02002087 wc->high_wm_percent_value = high_wm_percent;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002088 wc->high_wm_percent_set = true;
2089 } else if (!strcasecmp(string, "low_watermark") && opt_params >= 1) {
2090 string = dm_shift_arg(&as), opt_params--;
2091 if (sscanf(string, "%d%c", &low_wm_percent, &dummy) != 1)
2092 goto invalid_optional;
2093 if (low_wm_percent < 0 || low_wm_percent > 100)
2094 goto invalid_optional;
Olivier Deprez0e641232021-09-23 10:07:05 +02002095 wc->low_wm_percent_value = low_wm_percent;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002096 wc->low_wm_percent_set = true;
2097 } else if (!strcasecmp(string, "writeback_jobs") && opt_params >= 1) {
2098 string = dm_shift_arg(&as), opt_params--;
2099 if (sscanf(string, "%u%c", &wc->max_writeback_jobs, &dummy) != 1)
2100 goto invalid_optional;
2101 wc->max_writeback_jobs_set = true;
2102 } else if (!strcasecmp(string, "autocommit_blocks") && opt_params >= 1) {
2103 string = dm_shift_arg(&as), opt_params--;
2104 if (sscanf(string, "%u%c", &wc->autocommit_blocks, &dummy) != 1)
2105 goto invalid_optional;
2106 wc->autocommit_blocks_set = true;
2107 } else if (!strcasecmp(string, "autocommit_time") && opt_params >= 1) {
2108 unsigned autocommit_msecs;
2109 string = dm_shift_arg(&as), opt_params--;
2110 if (sscanf(string, "%u%c", &autocommit_msecs, &dummy) != 1)
2111 goto invalid_optional;
2112 if (autocommit_msecs > 3600000)
2113 goto invalid_optional;
2114 wc->autocommit_jiffies = msecs_to_jiffies(autocommit_msecs);
Olivier Deprez0e641232021-09-23 10:07:05 +02002115 wc->autocommit_time_value = autocommit_msecs;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002116 wc->autocommit_time_set = true;
2117 } else if (!strcasecmp(string, "fua")) {
2118 if (WC_MODE_PMEM(wc)) {
2119 wc->writeback_fua = true;
2120 wc->writeback_fua_set = true;
2121 } else goto invalid_optional;
2122 } else if (!strcasecmp(string, "nofua")) {
2123 if (WC_MODE_PMEM(wc)) {
2124 wc->writeback_fua = false;
2125 wc->writeback_fua_set = true;
2126 } else goto invalid_optional;
2127 } else {
2128invalid_optional:
2129 r = -EINVAL;
2130 ti->error = "Invalid optional argument";
2131 goto bad;
2132 }
2133 }
2134
2135 if (high_wm_percent < low_wm_percent) {
2136 r = -EINVAL;
2137 ti->error = "High watermark must be greater than or equal to low watermark";
2138 goto bad;
2139 }
2140
2141 if (WC_MODE_PMEM(wc)) {
Olivier Deprez0e641232021-09-23 10:07:05 +02002142 if (!dax_synchronous(wc->ssd_dev->dax_dev)) {
2143 r = -EOPNOTSUPP;
2144 ti->error = "Asynchronous persistent memory not supported as pmem cache";
2145 goto bad;
2146 }
2147
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002148 r = persistent_memory_claim(wc);
2149 if (r) {
2150 ti->error = "Unable to map persistent memory for cache";
2151 goto bad;
2152 }
2153 } else {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002154 size_t n_blocks, n_metadata_blocks;
2155 uint64_t n_bitmap_bits;
2156
2157 wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT;
2158
2159 bio_list_init(&wc->flush_list);
2160 wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush");
2161 if (IS_ERR(wc->flush_thread)) {
2162 r = PTR_ERR(wc->flush_thread);
2163 wc->flush_thread = NULL;
David Brazdil0f672f62019-12-10 10:32:29 +00002164 ti->error = "Couldn't spawn flush thread";
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002165 goto bad;
2166 }
2167 wake_up_process(wc->flush_thread);
2168
2169 r = calculate_memory_size(wc->memory_map_size, wc->block_size,
2170 &n_blocks, &n_metadata_blocks);
2171 if (r) {
2172 ti->error = "Invalid device size";
2173 goto bad;
2174 }
2175
2176 n_bitmap_bits = (((uint64_t)n_metadata_blocks << wc->block_size_bits) +
2177 BITMAP_GRANULARITY - 1) / BITMAP_GRANULARITY;
2178 /* this is limitation of test_bit functions */
2179 if (n_bitmap_bits > 1U << 31) {
2180 r = -EFBIG;
2181 ti->error = "Invalid device size";
2182 goto bad;
2183 }
2184
2185 wc->memory_map = vmalloc(n_metadata_blocks << wc->block_size_bits);
2186 if (!wc->memory_map) {
2187 r = -ENOMEM;
2188 ti->error = "Unable to allocate memory for metadata";
2189 goto bad;
2190 }
2191
2192 wc->dm_kcopyd = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2193 if (IS_ERR(wc->dm_kcopyd)) {
2194 r = PTR_ERR(wc->dm_kcopyd);
2195 ti->error = "Unable to allocate dm-kcopyd client";
2196 wc->dm_kcopyd = NULL;
2197 goto bad;
2198 }
2199
2200 wc->metadata_sectors = n_metadata_blocks << (wc->block_size_bits - SECTOR_SHIFT);
2201 wc->dirty_bitmap_size = (n_bitmap_bits + BITS_PER_LONG - 1) /
2202 BITS_PER_LONG * sizeof(unsigned long);
2203 wc->dirty_bitmap = vzalloc(wc->dirty_bitmap_size);
2204 if (!wc->dirty_bitmap) {
2205 r = -ENOMEM;
2206 ti->error = "Unable to allocate dirty bitmap";
2207 goto bad;
2208 }
2209
Olivier Deprez0e641232021-09-23 10:07:05 +02002210 r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002211 if (r) {
Olivier Deprez0e641232021-09-23 10:07:05 +02002212 ti->error = "Unable to read first block of metadata";
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002213 goto bad;
2214 }
2215 }
2216
2217 r = memcpy_mcsafe(&s, sb(wc), sizeof(struct wc_memory_superblock));
2218 if (r) {
2219 ti->error = "Hardware memory error when reading superblock";
2220 goto bad;
2221 }
2222 if (!le32_to_cpu(s.magic) && !le32_to_cpu(s.version)) {
2223 r = init_memory(wc);
2224 if (r) {
2225 ti->error = "Unable to initialize device";
2226 goto bad;
2227 }
2228 r = memcpy_mcsafe(&s, sb(wc), sizeof(struct wc_memory_superblock));
2229 if (r) {
2230 ti->error = "Hardware memory error when reading superblock";
2231 goto bad;
2232 }
2233 }
2234
2235 if (le32_to_cpu(s.magic) != MEMORY_SUPERBLOCK_MAGIC) {
2236 ti->error = "Invalid magic in the superblock";
2237 r = -EINVAL;
2238 goto bad;
2239 }
2240
2241 if (le32_to_cpu(s.version) != MEMORY_SUPERBLOCK_VERSION) {
2242 ti->error = "Invalid version in the superblock";
2243 r = -EINVAL;
2244 goto bad;
2245 }
2246
2247 if (le32_to_cpu(s.block_size) != wc->block_size) {
2248 ti->error = "Block size does not match superblock";
2249 r = -EINVAL;
2250 goto bad;
2251 }
2252
2253 wc->n_blocks = le64_to_cpu(s.n_blocks);
2254
2255 offset = wc->n_blocks * sizeof(struct wc_memory_entry);
2256 if (offset / sizeof(struct wc_memory_entry) != le64_to_cpu(sb(wc)->n_blocks)) {
2257overflow:
2258 ti->error = "Overflow in size calculation";
2259 r = -EINVAL;
2260 goto bad;
2261 }
2262 offset += sizeof(struct wc_memory_superblock);
2263 if (offset < sizeof(struct wc_memory_superblock))
2264 goto overflow;
2265 offset = (offset + wc->block_size - 1) & ~(size_t)(wc->block_size - 1);
2266 data_size = wc->n_blocks * (size_t)wc->block_size;
2267 if (!offset || (data_size / wc->block_size != wc->n_blocks) ||
2268 (offset + data_size < offset))
2269 goto overflow;
2270 if (offset + data_size > wc->memory_map_size) {
2271 ti->error = "Memory area is too small";
2272 r = -EINVAL;
2273 goto bad;
2274 }
2275
2276 wc->metadata_sectors = offset >> SECTOR_SHIFT;
2277 wc->block_start = (char *)sb(wc) + offset;
2278
2279 x = (uint64_t)wc->n_blocks * (100 - high_wm_percent);
2280 x += 50;
2281 do_div(x, 100);
2282 wc->freelist_high_watermark = x;
2283 x = (uint64_t)wc->n_blocks * (100 - low_wm_percent);
2284 x += 50;
2285 do_div(x, 100);
2286 wc->freelist_low_watermark = x;
2287
2288 r = writecache_alloc_entries(wc);
2289 if (r) {
2290 ti->error = "Cannot allocate memory";
2291 goto bad;
2292 }
2293
2294 ti->num_flush_bios = 1;
2295 ti->flush_supported = true;
2296 ti->num_discard_bios = 1;
2297
2298 if (WC_MODE_PMEM(wc))
2299 persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
2300
2301 return 0;
2302
2303bad_arguments:
2304 r = -EINVAL;
2305 ti->error = "Bad arguments";
2306bad:
2307 writecache_dtr(ti);
2308 return r;
2309}
2310
2311static void writecache_status(struct dm_target *ti, status_type_t type,
2312 unsigned status_flags, char *result, unsigned maxlen)
2313{
2314 struct dm_writecache *wc = ti->private;
2315 unsigned extra_args;
2316 unsigned sz = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002317
2318 switch (type) {
2319 case STATUSTYPE_INFO:
2320 DMEMIT("%ld %llu %llu %llu", writecache_has_error(wc),
2321 (unsigned long long)wc->n_blocks, (unsigned long long)wc->freelist_size,
2322 (unsigned long long)wc->writeback_size);
2323 break;
2324 case STATUSTYPE_TABLE:
2325 DMEMIT("%c %s %s %u ", WC_MODE_PMEM(wc) ? 'p' : 's',
2326 wc->dev->name, wc->ssd_dev->name, wc->block_size);
2327 extra_args = 0;
Olivier Deprez0e641232021-09-23 10:07:05 +02002328 if (wc->start_sector_set)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002329 extra_args += 2;
2330 if (wc->high_wm_percent_set)
2331 extra_args += 2;
2332 if (wc->low_wm_percent_set)
2333 extra_args += 2;
2334 if (wc->max_writeback_jobs_set)
2335 extra_args += 2;
2336 if (wc->autocommit_blocks_set)
2337 extra_args += 2;
2338 if (wc->autocommit_time_set)
2339 extra_args += 2;
2340 if (wc->writeback_fua_set)
2341 extra_args++;
2342
2343 DMEMIT("%u", extra_args);
Olivier Deprez0e641232021-09-23 10:07:05 +02002344 if (wc->start_sector_set)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002345 DMEMIT(" start_sector %llu", (unsigned long long)wc->start_sector);
Olivier Deprez0e641232021-09-23 10:07:05 +02002346 if (wc->high_wm_percent_set)
2347 DMEMIT(" high_watermark %u", wc->high_wm_percent_value);
2348 if (wc->low_wm_percent_set)
2349 DMEMIT(" low_watermark %u", wc->low_wm_percent_value);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002350 if (wc->max_writeback_jobs_set)
2351 DMEMIT(" writeback_jobs %u", wc->max_writeback_jobs);
2352 if (wc->autocommit_blocks_set)
2353 DMEMIT(" autocommit_blocks %u", wc->autocommit_blocks);
2354 if (wc->autocommit_time_set)
Olivier Deprez0e641232021-09-23 10:07:05 +02002355 DMEMIT(" autocommit_time %u", wc->autocommit_time_value);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002356 if (wc->writeback_fua_set)
2357 DMEMIT(" %sfua", wc->writeback_fua ? "" : "no");
2358 break;
2359 }
2360}
2361
2362static struct target_type writecache_target = {
2363 .name = "writecache",
2364 .version = {1, 1, 1},
2365 .module = THIS_MODULE,
2366 .ctr = writecache_ctr,
2367 .dtr = writecache_dtr,
2368 .status = writecache_status,
2369 .postsuspend = writecache_suspend,
2370 .resume = writecache_resume,
2371 .message = writecache_message,
2372 .map = writecache_map,
2373 .end_io = writecache_end_io,
2374 .iterate_devices = writecache_iterate_devices,
2375 .io_hints = writecache_io_hints,
2376};
2377
2378static int __init dm_writecache_init(void)
2379{
2380 int r;
2381
2382 r = dm_register_target(&writecache_target);
2383 if (r < 0) {
2384 DMERR("register failed %d", r);
2385 return r;
2386 }
2387
2388 return 0;
2389}
2390
2391static void __exit dm_writecache_exit(void)
2392{
2393 dm_unregister_target(&writecache_target);
2394}
2395
2396module_init(dm_writecache_init);
2397module_exit(dm_writecache_exit);
2398
2399MODULE_DESCRIPTION(DM_NAME " writecache target");
2400MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2401MODULE_LICENSE("GPL");