Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index ae4b33d..4fb1a40 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1,6 +1,4 @@
/*
- * dm-snapshot.c
- *
* Copyright (C) 2001-2002 Sistina Software (UK) Limited.
*
* This file is released under the GPL.
@@ -13,6 +11,7 @@
#include <linux/init.h>
#include <linux/kdev_t.h>
#include <linux/list.h>
+#include <linux/list_bl.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -43,11 +42,11 @@
struct dm_exception_table {
uint32_t hash_mask;
unsigned hash_shift;
- struct list_head *table;
+ struct hlist_bl_head *table;
};
struct dm_snapshot {
- struct mutex lock;
+ struct rw_semaphore lock;
struct dm_dev *origin;
struct dm_dev *cow;
@@ -75,7 +74,9 @@
atomic_t pending_exceptions_count;
- /* Protected by "lock" */
+ spinlock_t pe_allocation_lock;
+
+ /* Protected by "pe_allocation_lock" */
sector_t exception_start_sequence;
/* Protected by kcopyd single-threaded callback */
@@ -105,6 +106,9 @@
/* The on disk metadata handler */
struct dm_exception_store *store;
+ unsigned in_progress;
+ struct wait_queue_head in_progress_wait;
+
struct dm_kcopyd_client *kcopyd_client;
/* Wait for events based on state_bits */
@@ -127,7 +131,10 @@
* - I/O error while merging
* => stop merging; set merge_failed; process I/O normally.
*/
- int merge_failed;
+ bool merge_failed:1;
+
+ bool discard_zeroes_cow:1;
+ bool discard_passdown_origin:1;
/*
* Incoming bios that overlap with chunks being merged must wait
@@ -145,6 +152,19 @@
#define RUNNING_MERGE 0
#define SHUTDOWN_MERGE 1
+/*
+ * Maximum number of chunks being copied on write.
+ *
+ * The value was decided experimentally as a trade-off between memory
+ * consumption, stalling the kernel's workqueues and maintaining a high enough
+ * throughput.
+ */
+#define DEFAULT_COW_THRESHOLD 2048
+
+static unsigned cow_threshold = DEFAULT_COW_THRESHOLD;
+module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644);
+MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
+
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
"A percentage of time allocated for copy on write");
@@ -440,9 +460,9 @@
if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
continue;
- mutex_lock(&s->lock);
+ down_read(&s->lock);
active = s->active;
- mutex_unlock(&s->lock);
+ up_read(&s->lock);
if (active) {
if (snap_src)
@@ -601,6 +621,36 @@
* The lowest hash_shift bits of the chunk number are ignored, allowing
* some consecutive chunks to be grouped together.
*/
+static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk);
+
+/* Lock to protect access to the completed and pending exception hash tables. */
+struct dm_exception_table_lock {
+ struct hlist_bl_head *complete_slot;
+ struct hlist_bl_head *pending_slot;
+};
+
+static void dm_exception_table_lock_init(struct dm_snapshot *s, chunk_t chunk,
+ struct dm_exception_table_lock *lock)
+{
+ struct dm_exception_table *complete = &s->complete;
+ struct dm_exception_table *pending = &s->pending;
+
+ lock->complete_slot = &complete->table[exception_hash(complete, chunk)];
+ lock->pending_slot = &pending->table[exception_hash(pending, chunk)];
+}
+
+static void dm_exception_table_lock(struct dm_exception_table_lock *lock)
+{
+ hlist_bl_lock(lock->complete_slot);
+ hlist_bl_lock(lock->pending_slot);
+}
+
+static void dm_exception_table_unlock(struct dm_exception_table_lock *lock)
+{
+ hlist_bl_unlock(lock->pending_slot);
+ hlist_bl_unlock(lock->complete_slot);
+}
+
static int dm_exception_table_init(struct dm_exception_table *et,
uint32_t size, unsigned hash_shift)
{
@@ -608,12 +658,12 @@
et->hash_shift = hash_shift;
et->hash_mask = size - 1;
- et->table = dm_vcalloc(size, sizeof(struct list_head));
+ et->table = dm_vcalloc(size, sizeof(struct hlist_bl_head));
if (!et->table)
return -ENOMEM;
for (i = 0; i < size; i++)
- INIT_LIST_HEAD(et->table + i);
+ INIT_HLIST_BL_HEAD(et->table + i);
return 0;
}
@@ -621,15 +671,16 @@
static void dm_exception_table_exit(struct dm_exception_table *et,
struct kmem_cache *mem)
{
- struct list_head *slot;
- struct dm_exception *ex, *next;
+ struct hlist_bl_head *slot;
+ struct dm_exception *ex;
+ struct hlist_bl_node *pos, *n;
int i, size;
size = et->hash_mask + 1;
for (i = 0; i < size; i++) {
slot = et->table + i;
- list_for_each_entry_safe (ex, next, slot, hash_list)
+ hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list)
kmem_cache_free(mem, ex);
}
@@ -643,7 +694,7 @@
static void dm_remove_exception(struct dm_exception *e)
{
- list_del(&e->hash_list);
+ hlist_bl_del(&e->hash_list);
}
/*
@@ -653,11 +704,12 @@
static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
chunk_t chunk)
{
- struct list_head *slot;
+ struct hlist_bl_head *slot;
+ struct hlist_bl_node *pos;
struct dm_exception *e;
slot = &et->table[exception_hash(et, chunk)];
- list_for_each_entry (e, slot, hash_list)
+ hlist_bl_for_each_entry(e, pos, slot, hash_list)
if (chunk >= e->old_chunk &&
chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
return e;
@@ -704,7 +756,8 @@
static void dm_insert_exception(struct dm_exception_table *eh,
struct dm_exception *new_e)
{
- struct list_head *l;
+ struct hlist_bl_head *l;
+ struct hlist_bl_node *pos;
struct dm_exception *e = NULL;
l = &eh->table[exception_hash(eh, new_e->old_chunk)];
@@ -714,7 +767,7 @@
goto out;
/* List is ordered by old_chunk */
- list_for_each_entry_reverse(e, l, hash_list) {
+ hlist_bl_for_each_entry(e, pos, l, hash_list) {
/* Insert after an existing chunk? */
if (new_e->old_chunk == (e->old_chunk +
dm_consecutive_chunk_count(e) + 1) &&
@@ -735,12 +788,24 @@
return;
}
- if (new_e->old_chunk > e->old_chunk)
+ if (new_e->old_chunk < e->old_chunk)
break;
}
out:
- list_add(&new_e->hash_list, e ? &e->hash_list : l);
+ if (!e) {
+ /*
+ * Either the table doesn't support consecutive chunks or slot
+ * l is empty.
+ */
+ hlist_bl_add_head(&new_e->hash_list, l);
+ } else if (new_e->old_chunk < e->old_chunk) {
+ /* Add before an existing exception */
+ hlist_bl_add_before(&new_e->hash_list, &e->hash_list);
+ } else {
+ /* Add to l's tail: e is the last exception in this slot */
+ hlist_bl_add_behind(&new_e->hash_list, &e->hash_list);
+ }
}
/*
@@ -749,6 +814,7 @@
*/
static int dm_add_exception(void *context, chunk_t old, chunk_t new)
{
+ struct dm_exception_table_lock lock;
struct dm_snapshot *s = context;
struct dm_exception *e;
@@ -761,7 +827,17 @@
/* Consecutive_count is implicitly initialised to zero */
e->new_chunk = new;
+ /*
+ * Although there is no need to lock access to the exception tables
+ * here, if we don't then hlist_bl_add_head(), called by
+ * dm_insert_exception(), will complain about accessing the
+ * corresponding list without locking it first.
+ */
+ dm_exception_table_lock_init(s, old, &lock);
+
+ dm_exception_table_lock(&lock);
dm_insert_exception(&s->complete, e);
+ dm_exception_table_unlock(&lock);
return 0;
}
@@ -790,7 +866,7 @@
{
/* use a fixed size of 2MB */
unsigned long mem = 2 * 1024 * 1024;
- mem /= sizeof(struct list_head);
+ mem /= sizeof(struct hlist_bl_head);
return mem;
}
@@ -910,7 +986,7 @@
int r;
chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
- mutex_lock(&s->lock);
+ down_write(&s->lock);
/*
* Process chunks (and associated exceptions) in reverse order
@@ -925,7 +1001,7 @@
b = __release_queued_bios_after_merge(s);
out:
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
if (b)
flush_bios(b);
@@ -984,9 +1060,9 @@
if (linear_chunks < 0) {
DMERR("Read error in exception store: "
"shutting down merge");
- mutex_lock(&s->lock);
+ down_write(&s->lock);
s->merge_failed = 1;
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
}
goto shut;
}
@@ -1027,10 +1103,10 @@
previous_count = read_pending_exceptions_done_count();
}
- mutex_lock(&s->lock);
+ down_write(&s->lock);
s->first_merging_chunk = old_chunk;
s->num_merging_chunks = linear_chunks;
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
/* Wait until writes to all 'linear_chunks' drain */
for (i = 0; i < linear_chunks; i++)
@@ -1072,10 +1148,10 @@
return;
shut:
- mutex_lock(&s->lock);
+ down_write(&s->lock);
s->merge_failed = 1;
b = __release_queued_bios_after_merge(s);
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
error_bios(b);
merge_shutdown(s);
@@ -1097,12 +1173,64 @@
clear_bit(SHUTDOWN_MERGE, &s->state_bits);
}
+static int parse_snapshot_features(struct dm_arg_set *as, struct dm_snapshot *s,
+ struct dm_target *ti)
+{
+ int r;
+ unsigned argc;
+ const char *arg_name;
+
+ static const struct dm_arg _args[] = {
+ {0, 2, "Invalid number of feature arguments"},
+ };
+
+ /*
+ * No feature arguments supplied.
+ */
+ if (!as->argc)
+ return 0;
+
+ r = dm_read_arg_group(_args, as, &argc, &ti->error);
+ if (r)
+ return -EINVAL;
+
+ while (argc && !r) {
+ arg_name = dm_shift_arg(as);
+ argc--;
+
+ if (!strcasecmp(arg_name, "discard_zeroes_cow"))
+ s->discard_zeroes_cow = true;
+
+ else if (!strcasecmp(arg_name, "discard_passdown_origin"))
+ s->discard_passdown_origin = true;
+
+ else {
+ ti->error = "Unrecognised feature requested";
+ r = -EINVAL;
+ break;
+ }
+ }
+
+ if (!s->discard_zeroes_cow && s->discard_passdown_origin) {
+ /*
+ * TODO: really these are disjoint.. but ti->num_discard_bios
+ * and dm_bio_get_target_bio_nr() require rigid constraints.
+ */
+ ti->error = "discard_passdown_origin feature depends on discard_zeroes_cow";
+ r = -EINVAL;
+ }
+
+ return r;
+}
+
/*
- * Construct a snapshot mapping: <origin_dev> <COW-dev> <p|po|n> <chunk-size>
+ * Construct a snapshot mapping:
+ * <origin_dev> <COW-dev> <p|po|n> <chunk-size> [<# feature args> [<arg>]*]
*/
static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct dm_snapshot *s;
+ struct dm_arg_set as;
int i;
int r = -EINVAL;
char *origin_path, *cow_path;
@@ -1110,8 +1238,8 @@
unsigned args_used, num_flush_bios = 1;
fmode_t origin_mode = FMODE_READ;
- if (argc != 4) {
- ti->error = "requires exactly 4 arguments";
+ if (argc < 4) {
+ ti->error = "requires 4 or more arguments";
r = -EINVAL;
goto bad;
}
@@ -1128,6 +1256,13 @@
goto bad;
}
+ as.argc = argc;
+ as.argv = argv;
+ dm_consume_args(&as, 4);
+ r = parse_snapshot_features(&as, s, ti);
+ if (r)
+ goto bad_features;
+
origin_path = argv[0];
argv++;
argc--;
@@ -1171,10 +1306,11 @@
s->snapshot_overflowed = 0;
s->active = 0;
atomic_set(&s->pending_exceptions_count, 0);
+ spin_lock_init(&s->pe_allocation_lock);
s->exception_start_sequence = 0;
s->exception_complete_sequence = 0;
s->out_of_order_tree = RB_ROOT;
- mutex_init(&s->lock);
+ init_rwsem(&s->lock);
INIT_LIST_HEAD(&s->list);
spin_lock_init(&s->pe_lock);
s->state_bits = 0;
@@ -1190,6 +1326,8 @@
goto bad_hash_tables;
}
+ init_waitqueue_head(&s->in_progress_wait);
+
s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
if (IS_ERR(s->kcopyd_client)) {
r = PTR_ERR(s->kcopyd_client);
@@ -1210,6 +1348,8 @@
ti->private = s;
ti->num_flush_bios = num_flush_bios;
+ if (s->discard_zeroes_cow)
+ ti->num_discard_bios = (s->discard_passdown_origin ? 2 : 1);
ti->per_io_data_size = sizeof(struct dm_snap_tracked_chunk);
/* Add snapshot to the list of snapshots for this origin */
@@ -1257,29 +1397,22 @@
bad_read_metadata:
unregister_snapshot(s);
-
bad_load_and_register:
mempool_exit(&s->pending_pool);
-
bad_pending_pool:
dm_kcopyd_client_destroy(s->kcopyd_client);
-
bad_kcopyd:
dm_exception_table_exit(&s->pending, pending_cache);
dm_exception_table_exit(&s->complete, exception_cache);
-
bad_hash_tables:
dm_exception_store_destroy(s->store);
-
bad_store:
dm_put_device(ti, s->cow);
-
bad_cow:
dm_put_device(ti, s->origin);
-
bad_origin:
+bad_features:
kfree(s);
-
bad:
return r;
}
@@ -1338,9 +1471,9 @@
/* Check whether exception handover must be cancelled */
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest && (s == snap_src)) {
- mutex_lock(&snap_dest->lock);
+ down_write(&snap_dest->lock);
snap_dest->valid = 0;
- mutex_unlock(&snap_dest->lock);
+ up_write(&snap_dest->lock);
DMERR("Cancelling snapshot handover.");
}
up_read(&_origins_lock);
@@ -1371,15 +1504,60 @@
dm_exception_store_destroy(s->store);
- mutex_destroy(&s->lock);
-
dm_put_device(ti, s->cow);
dm_put_device(ti, s->origin);
+ WARN_ON(s->in_progress);
+
kfree(s);
}
+static void account_start_copy(struct dm_snapshot *s)
+{
+ spin_lock(&s->in_progress_wait.lock);
+ s->in_progress++;
+ spin_unlock(&s->in_progress_wait.lock);
+}
+
+static void account_end_copy(struct dm_snapshot *s)
+{
+ spin_lock(&s->in_progress_wait.lock);
+ BUG_ON(!s->in_progress);
+ s->in_progress--;
+ if (likely(s->in_progress <= cow_threshold) &&
+ unlikely(waitqueue_active(&s->in_progress_wait)))
+ wake_up_locked(&s->in_progress_wait);
+ spin_unlock(&s->in_progress_wait.lock);
+}
+
+static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins)
+{
+ if (unlikely(s->in_progress > cow_threshold)) {
+ spin_lock(&s->in_progress_wait.lock);
+ if (likely(s->in_progress > cow_threshold)) {
+ /*
+ * NOTE: this throttle doesn't account for whether
+ * the caller is servicing an IO that will trigger a COW
+ * so excess throttling may result for chunks not required
+ * to be COW'd. But if cow_threshold was reached, extra
+ * throttling is unlikely to negatively impact performance.
+ */
+ DECLARE_WAITQUEUE(wait, current);
+ __add_wait_queue(&s->in_progress_wait, &wait);
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ spin_unlock(&s->in_progress_wait.lock);
+ if (unlock_origins)
+ up_read(&_origins_lock);
+ io_schedule();
+ remove_wait_queue(&s->in_progress_wait, &wait);
+ return false;
+ }
+ spin_unlock(&s->in_progress_wait.lock);
+ }
+ return true;
+}
+
/*
* Flush a list of buffers.
*/
@@ -1395,7 +1573,7 @@
}
}
-static int do_origin(struct dm_dev *origin, struct bio *bio);
+static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit);
/*
* Flush a list of buffers.
@@ -1408,7 +1586,7 @@
while (bio) {
n = bio->bi_next;
bio->bi_next = NULL;
- r = do_origin(s->origin, bio);
+ r = do_origin(s->origin, bio, false);
if (r == DM_MAPIO_REMAPPED)
generic_make_request(bio);
bio = n;
@@ -1448,6 +1626,13 @@
dm_table_event(s->ti->table);
}
+static void invalidate_snapshot(struct dm_snapshot *s, int err)
+{
+ down_write(&s->lock);
+ __invalidate_snapshot(s, err);
+ up_write(&s->lock);
+}
+
static void pending_complete(void *context, int success)
{
struct dm_snap_pending_exception *pe = context;
@@ -1456,43 +1641,63 @@
struct bio *origin_bios = NULL;
struct bio *snapshot_bios = NULL;
struct bio *full_bio = NULL;
+ struct dm_exception_table_lock lock;
int error = 0;
+ dm_exception_table_lock_init(s, pe->e.old_chunk, &lock);
+
if (!success) {
/* Read/write error - snapshot is unusable */
- mutex_lock(&s->lock);
- __invalidate_snapshot(s, -EIO);
+ invalidate_snapshot(s, -EIO);
error = 1;
+
+ dm_exception_table_lock(&lock);
goto out;
}
e = alloc_completed_exception(GFP_NOIO);
if (!e) {
- mutex_lock(&s->lock);
- __invalidate_snapshot(s, -ENOMEM);
+ invalidate_snapshot(s, -ENOMEM);
error = 1;
+
+ dm_exception_table_lock(&lock);
goto out;
}
*e = pe->e;
- mutex_lock(&s->lock);
+ down_read(&s->lock);
+ dm_exception_table_lock(&lock);
if (!s->valid) {
+ up_read(&s->lock);
free_completed_exception(e);
error = 1;
+
goto out;
}
- /* Check for conflicting reads */
- __check_for_conflicting_io(s, pe->e.old_chunk);
-
/*
- * Add a proper exception, and remove the
- * in-flight exception from the list.
+ * Add a proper exception. After inserting the completed exception all
+ * subsequent snapshot reads to this chunk will be redirected to the
+ * COW device. This ensures that we do not starve. Moreover, as long
+ * as the pending exception exists, neither origin writes nor snapshot
+ * merging can overwrite the chunk in origin.
*/
dm_insert_exception(&s->complete, e);
+ up_read(&s->lock);
+
+ /* Wait for conflicting reads to drain */
+ if (__chunk_is_tracked(s, pe->e.old_chunk)) {
+ dm_exception_table_unlock(&lock);
+ __check_for_conflicting_io(s, pe->e.old_chunk);
+ dm_exception_table_lock(&lock);
+ }
out:
+ /* Remove the in-flight exception from the list */
dm_remove_exception(&pe->e);
+
+ dm_exception_table_unlock(&lock);
+
snapshot_bios = bio_list_get(&pe->snapshot_bios);
origin_bios = bio_list_get(&pe->origin_bios);
full_bio = pe->full_bio;
@@ -1500,8 +1705,6 @@
full_bio->bi_end_io = pe->full_bio_end_io;
increment_pending_exceptions_done_count();
- mutex_unlock(&s->lock);
-
/* Submit any pending write bios */
if (error) {
if (full_bio)
@@ -1575,6 +1778,7 @@
rb_link_node(&pe->out_of_order_node, parent, p);
rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree);
}
+ account_end_copy(s);
}
/*
@@ -1598,6 +1802,7 @@
dest.count = src.count;
/* Hand over to kcopyd */
+ account_start_copy(s);
dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
}
@@ -1617,6 +1822,7 @@
pe->full_bio = bio;
pe->full_bio_end_io = bio->bi_end_io;
+ account_start_copy(s);
callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
copy_callback, pe);
@@ -1638,12 +1844,43 @@
}
/*
+ * Inserts a pending exception into the pending table.
+ *
+ * NOTE: a write lock must be held on the chunk's pending exception table slot
+ * before calling this.
+ */
+static struct dm_snap_pending_exception *
+__insert_pending_exception(struct dm_snapshot *s,
+ struct dm_snap_pending_exception *pe, chunk_t chunk)
+{
+ pe->e.old_chunk = chunk;
+ bio_list_init(&pe->origin_bios);
+ bio_list_init(&pe->snapshot_bios);
+ pe->started = 0;
+ pe->full_bio = NULL;
+
+ spin_lock(&s->pe_allocation_lock);
+ if (s->store->type->prepare_exception(s->store, &pe->e)) {
+ spin_unlock(&s->pe_allocation_lock);
+ free_pending_exception(pe);
+ return NULL;
+ }
+
+ pe->exception_sequence = s->exception_start_sequence++;
+ spin_unlock(&s->pe_allocation_lock);
+
+ dm_insert_exception(&s->pending, &pe->e);
+
+ return pe;
+}
+
+/*
* Looks to see if this snapshot already has a pending exception
* for this chunk, otherwise it allocates a new one and inserts
* it into the pending table.
*
- * NOTE: a write lock must be held on snap->lock before calling
- * this.
+ * NOTE: a write lock must be held on the chunk's pending exception table slot
+ * before calling this.
*/
static struct dm_snap_pending_exception *
__find_pending_exception(struct dm_snapshot *s,
@@ -1657,22 +1894,7 @@
return pe2;
}
- pe->e.old_chunk = chunk;
- bio_list_init(&pe->origin_bios);
- bio_list_init(&pe->snapshot_bios);
- pe->started = 0;
- pe->full_bio = NULL;
-
- if (s->store->type->prepare_exception(s->store, &pe->e)) {
- free_pending_exception(pe);
- return NULL;
- }
-
- pe->exception_sequence = s->exception_start_sequence++;
-
- dm_insert_exception(&s->pending, &pe->e);
-
- return pe;
+ return __insert_pending_exception(s, pe, chunk);
}
static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
@@ -1685,6 +1907,37 @@
(bio->bi_iter.bi_sector & s->store->chunk_mask);
}
+static void zero_callback(int read_err, unsigned long write_err, void *context)
+{
+ struct bio *bio = context;
+ struct dm_snapshot *s = bio->bi_private;
+
+ account_end_copy(s);
+ bio->bi_status = write_err ? BLK_STS_IOERR : 0;
+ bio_endio(bio);
+}
+
+static void zero_exception(struct dm_snapshot *s, struct dm_exception *e,
+ struct bio *bio, chunk_t chunk)
+{
+ struct dm_io_region dest;
+
+ dest.bdev = s->cow->bdev;
+ dest.sector = bio->bi_iter.bi_sector;
+ dest.count = s->store->chunk_size;
+
+ account_start_copy(s);
+ WARN_ON_ONCE(bio->bi_private);
+ bio->bi_private = s;
+ dm_kcopyd_zero(s->kcopyd_client, 1, &dest, 0, zero_callback, bio);
+}
+
+static bool io_overlaps_chunk(struct dm_snapshot *s, struct bio *bio)
+{
+ return bio->bi_iter.bi_size ==
+ (s->store->chunk_size << SECTOR_SHIFT);
+}
+
static int snapshot_map(struct dm_target *ti, struct bio *bio)
{
struct dm_exception *e;
@@ -1692,6 +1945,7 @@
int r = DM_MAPIO_REMAPPED;
chunk_t chunk;
struct dm_snap_pending_exception *pe = NULL;
+ struct dm_exception_table_lock lock;
init_tracked_chunk(bio);
@@ -1701,13 +1955,20 @@
}
chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
+ dm_exception_table_lock_init(s, chunk, &lock);
/* Full snapshots are not usable */
/* To get here the table must be live so s->active is always set. */
if (!s->valid)
return DM_MAPIO_KILL;
- mutex_lock(&s->lock);
+ if (bio_data_dir(bio) == WRITE) {
+ while (unlikely(!wait_for_in_progress(s, false)))
+ ; /* wait_for_in_progress() has slept */
+ }
+
+ down_read(&s->lock);
+ dm_exception_table_lock(&lock);
if (!s->valid || (unlikely(s->snapshot_overflowed) &&
bio_data_dir(bio) == WRITE)) {
@@ -1715,10 +1976,43 @@
goto out_unlock;
}
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
+ if (s->discard_passdown_origin && dm_bio_get_target_bio_nr(bio)) {
+ /*
+ * passdown discard to origin (without triggering
+ * snapshot exceptions via do_origin; doing so would
+ * defeat the goal of freeing space in origin that is
+ * implied by the "discard_passdown_origin" feature)
+ */
+ bio_set_dev(bio, s->origin->bdev);
+ track_chunk(s, bio, chunk);
+ goto out_unlock;
+ }
+ /* discard to snapshot (target_bio_nr == 0) zeroes exceptions */
+ }
+
/* If the block is already remapped - use that, else remap it */
e = dm_lookup_exception(&s->complete, chunk);
if (e) {
remap_exception(s, e, bio, chunk);
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD) &&
+ io_overlaps_chunk(s, bio)) {
+ dm_exception_table_unlock(&lock);
+ up_read(&s->lock);
+ zero_exception(s, e, bio, chunk);
+ r = DM_MAPIO_SUBMITTED; /* discard is not issued */
+ goto out;
+ }
+ goto out_unlock;
+ }
+
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
+ /*
+ * If no exception exists, complete discard immediately
+ * otherwise it'll trigger copy-out.
+ */
+ bio_endio(bio);
+ r = DM_MAPIO_SUBMITTED;
goto out_unlock;
}
@@ -1730,15 +2024,9 @@
if (bio_data_dir(bio) == WRITE) {
pe = __lookup_pending_exception(s, chunk);
if (!pe) {
- mutex_unlock(&s->lock);
+ dm_exception_table_unlock(&lock);
pe = alloc_pending_exception(s);
- mutex_lock(&s->lock);
-
- if (!s->valid || s->snapshot_overflowed) {
- free_pending_exception(pe);
- r = DM_MAPIO_KILL;
- goto out_unlock;
- }
+ dm_exception_table_lock(&lock);
e = dm_lookup_exception(&s->complete, chunk);
if (e) {
@@ -1749,13 +2037,22 @@
pe = __find_pending_exception(s, pe, chunk);
if (!pe) {
+ dm_exception_table_unlock(&lock);
+ up_read(&s->lock);
+
+ down_write(&s->lock);
+
if (s->store->userspace_supports_overflow) {
- s->snapshot_overflowed = 1;
- DMERR("Snapshot overflowed: Unable to allocate exception.");
+ if (s->valid && !s->snapshot_overflowed) {
+ s->snapshot_overflowed = 1;
+ DMERR("Snapshot overflowed: Unable to allocate exception.");
+ }
} else
__invalidate_snapshot(s, -ENOMEM);
+ up_write(&s->lock);
+
r = DM_MAPIO_KILL;
- goto out_unlock;
+ goto out;
}
}
@@ -1763,11 +2060,12 @@
r = DM_MAPIO_SUBMITTED;
- if (!pe->started &&
- bio->bi_iter.bi_size ==
- (s->store->chunk_size << SECTOR_SHIFT)) {
+ if (!pe->started && io_overlaps_chunk(s, bio)) {
pe->started = 1;
- mutex_unlock(&s->lock);
+
+ dm_exception_table_unlock(&lock);
+ up_read(&s->lock);
+
start_full_bio(pe, bio);
goto out;
}
@@ -1775,9 +2073,12 @@
bio_list_add(&pe->snapshot_bios, bio);
if (!pe->started) {
- /* this is protected by snap->lock */
+ /* this is protected by the exception table lock */
pe->started = 1;
- mutex_unlock(&s->lock);
+
+ dm_exception_table_unlock(&lock);
+ up_read(&s->lock);
+
start_copy(pe);
goto out;
}
@@ -1787,7 +2088,8 @@
}
out_unlock:
- mutex_unlock(&s->lock);
+ dm_exception_table_unlock(&lock);
+ up_read(&s->lock);
out:
return r;
}
@@ -1821,9 +2123,15 @@
return DM_MAPIO_REMAPPED;
}
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
+ /* Once merging, discards no longer effect change */
+ bio_endio(bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+
chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
- mutex_lock(&s->lock);
+ down_write(&s->lock);
/* Full merging snapshots are redirected to the origin */
if (!s->valid)
@@ -1854,12 +2162,12 @@
bio_set_dev(bio, s->origin->bdev);
if (bio_data_dir(bio) == WRITE) {
- mutex_unlock(&s->lock);
- return do_origin(s->origin, bio);
+ up_write(&s->lock);
+ return do_origin(s->origin, bio, false);
}
out_unlock:
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
return r;
}
@@ -1891,7 +2199,7 @@
down_read(&_origins_lock);
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) {
- mutex_lock(&snap_src->lock);
+ down_read(&snap_src->lock);
if (s == snap_src) {
DMERR("Unable to resume snapshot source until "
"handover completes.");
@@ -1901,7 +2209,7 @@
"source is suspended.");
r = -EINVAL;
}
- mutex_unlock(&snap_src->lock);
+ up_read(&snap_src->lock);
}
up_read(&_origins_lock);
@@ -1947,11 +2255,11 @@
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) {
- mutex_lock(&snap_src->lock);
- mutex_lock_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
+ down_write(&snap_src->lock);
+ down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
__handover_exceptions(snap_src, snap_dest);
- mutex_unlock(&snap_dest->lock);
- mutex_unlock(&snap_src->lock);
+ up_write(&snap_dest->lock);
+ up_write(&snap_src->lock);
}
up_read(&_origins_lock);
@@ -1966,9 +2274,9 @@
/* Now we have correct chunk size, reregister */
reregister_snapshot(s);
- mutex_lock(&s->lock);
+ down_write(&s->lock);
s->active = 1;
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
}
static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
@@ -2004,11 +2312,12 @@
{
unsigned sz = 0;
struct dm_snapshot *snap = ti->private;
+ unsigned num_features;
switch (type) {
case STATUSTYPE_INFO:
- mutex_lock(&snap->lock);
+ down_write(&snap->lock);
if (!snap->valid)
DMEMIT("Invalid");
@@ -2033,7 +2342,7 @@
DMEMIT("Unknown");
}
- mutex_unlock(&snap->lock);
+ up_write(&snap->lock);
break;
@@ -2044,8 +2353,16 @@
* make sense.
*/
DMEMIT("%s %s", snap->origin->name, snap->cow->name);
- snap->store->type->status(snap->store, type, result + sz,
- maxlen - sz);
+ sz += snap->store->type->status(snap->store, type, result + sz,
+ maxlen - sz);
+ num_features = snap->discard_zeroes_cow + snap->discard_passdown_origin;
+ if (num_features) {
+ DMEMIT(" %u", num_features);
+ if (snap->discard_zeroes_cow)
+ DMEMIT(" discard_zeroes_cow");
+ if (snap->discard_passdown_origin)
+ DMEMIT(" discard_passdown_origin");
+ }
break;
}
}
@@ -2064,6 +2381,26 @@
return r;
}
+static void snapshot_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+ struct dm_snapshot *snap = ti->private;
+
+ if (snap->discard_zeroes_cow) {
+ struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
+
+ down_read(&_origins_lock);
+
+ (void) __find_snapshots_sharing_cow(snap, &snap_src, &snap_dest, NULL);
+ if (snap_src && snap_dest)
+ snap = snap_src;
+
+ /* All discards are split on chunk_size boundary */
+ limits->discard_granularity = snap->store->chunk_size;
+ limits->max_discard_sectors = snap->store->chunk_size;
+
+ up_read(&_origins_lock);
+ }
+}
/*-----------------------------------------------------------------
* Origin methods
@@ -2085,9 +2422,10 @@
int r = DM_MAPIO_REMAPPED;
struct dm_snapshot *snap;
struct dm_exception *e;
- struct dm_snap_pending_exception *pe;
+ struct dm_snap_pending_exception *pe, *pe2;
struct dm_snap_pending_exception *pe_to_start_now = NULL;
struct dm_snap_pending_exception *pe_to_start_last = NULL;
+ struct dm_exception_table_lock lock;
chunk_t chunk;
/* Do all the snapshots on this origin */
@@ -2099,52 +2437,59 @@
if (dm_target_is_snapshot_merge(snap->ti))
continue;
- mutex_lock(&snap->lock);
-
- /* Only deal with valid and active snapshots */
- if (!snap->valid || !snap->active)
- goto next_snapshot;
-
/* Nothing to do if writing beyond end of snapshot */
if (sector >= dm_table_get_size(snap->ti->table))
- goto next_snapshot;
+ continue;
/*
* Remember, different snapshots can have
* different chunk sizes.
*/
chunk = sector_to_chunk(snap->store, sector);
+ dm_exception_table_lock_init(snap, chunk, &lock);
- /*
- * Check exception table to see if block
- * is already remapped in this snapshot
- * and trigger an exception if not.
- */
- e = dm_lookup_exception(&snap->complete, chunk);
- if (e)
+ down_read(&snap->lock);
+ dm_exception_table_lock(&lock);
+
+ /* Only deal with valid and active snapshots */
+ if (!snap->valid || !snap->active)
goto next_snapshot;
pe = __lookup_pending_exception(snap, chunk);
if (!pe) {
- mutex_unlock(&snap->lock);
- pe = alloc_pending_exception(snap);
- mutex_lock(&snap->lock);
-
- if (!snap->valid) {
- free_pending_exception(pe);
- goto next_snapshot;
- }
-
+ /*
+ * Check exception table to see if block is already
+ * remapped in this snapshot and trigger an exception
+ * if not.
+ */
e = dm_lookup_exception(&snap->complete, chunk);
- if (e) {
- free_pending_exception(pe);
+ if (e)
goto next_snapshot;
- }
- pe = __find_pending_exception(snap, pe, chunk);
- if (!pe) {
- __invalidate_snapshot(snap, -ENOMEM);
- goto next_snapshot;
+ dm_exception_table_unlock(&lock);
+ pe = alloc_pending_exception(snap);
+ dm_exception_table_lock(&lock);
+
+ pe2 = __lookup_pending_exception(snap, chunk);
+
+ if (!pe2) {
+ e = dm_lookup_exception(&snap->complete, chunk);
+ if (e) {
+ free_pending_exception(pe);
+ goto next_snapshot;
+ }
+
+ pe = __insert_pending_exception(snap, pe, chunk);
+ if (!pe) {
+ dm_exception_table_unlock(&lock);
+ up_read(&snap->lock);
+
+ invalidate_snapshot(snap, -ENOMEM);
+ continue;
+ }
+ } else {
+ free_pending_exception(pe);
+ pe = pe2;
}
}
@@ -2171,7 +2516,8 @@
}
next_snapshot:
- mutex_unlock(&snap->lock);
+ dm_exception_table_unlock(&lock);
+ up_read(&snap->lock);
if (pe_to_start_now) {
start_copy(pe_to_start_now);
@@ -2192,15 +2538,24 @@
/*
* Called on a write from the origin driver.
*/
-static int do_origin(struct dm_dev *origin, struct bio *bio)
+static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit)
{
struct origin *o;
int r = DM_MAPIO_REMAPPED;
+again:
down_read(&_origins_lock);
o = __lookup_origin(origin->bdev);
- if (o)
+ if (o) {
+ if (limit) {
+ struct dm_snapshot *s;
+ list_for_each_entry(s, &o->snapshots, list)
+ if (unlikely(!wait_for_in_progress(s, true)))
+ goto again;
+ }
+
r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
+ }
up_read(&_origins_lock);
return r;
@@ -2313,14 +2668,7 @@
dm_accept_partial_bio(bio, available_sectors);
/* Only tell snapshots if this is a write */
- return do_origin(o->dev, bio);
-}
-
-static long origin_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
-{
- DMWARN("device does not support dax.");
- return -EIO;
+ return do_origin(o->dev, bio, true);
}
/*
@@ -2382,12 +2730,11 @@
.postsuspend = origin_postsuspend,
.status = origin_status,
.iterate_devices = origin_iterate_devices,
- .direct_access = origin_dax_direct_access,
};
static struct target_type snapshot_target = {
.name = "snapshot",
- .version = {1, 15, 0},
+ .version = {1, 16, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
@@ -2397,11 +2744,12 @@
.resume = snapshot_resume,
.status = snapshot_status,
.iterate_devices = snapshot_iterate_devices,
+ .io_hints = snapshot_io_hints,
};
static struct target_type merge_target = {
.name = dm_snapshot_merge_target_name,
- .version = {1, 4, 0},
+ .version = {1, 5, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
@@ -2412,6 +2760,7 @@
.resume = snapshot_merge_resume,
.status = snapshot_status,
.iterate_devices = snapshot_iterate_devices,
+ .io_hints = snapshot_io_hints,
};
static int __init dm_snapshot_init(void)