Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 85fb2ba..d3bcc41 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 Western Digital Corporation or its affiliates.
*
@@ -19,7 +20,7 @@
struct dmz_target *target;
struct dm_zone *zone;
struct bio *bio;
- atomic_t ref;
+ refcount_t ref;
};
/*
@@ -27,7 +28,7 @@
*/
struct dm_chunk_work {
struct work_struct work;
- atomic_t refcount;
+ refcount_t refcount;
struct dmz_target *target;
unsigned int chunk;
struct bio_list bio_list;
@@ -80,7 +81,7 @@
if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
bio->bi_status = status;
- if (atomic_dec_and_test(&bioctx->ref)) {
+ if (refcount_dec_and_test(&bioctx->ref)) {
struct dm_zone *zone = bioctx->zone;
if (zone) {
@@ -131,7 +132,7 @@
bio_advance(bio, clone->bi_iter.bi_size);
- atomic_inc(&bioctx->ref);
+ refcount_inc(&bioctx->ref);
generic_make_request(clone);
if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
@@ -277,8 +278,8 @@
/* Get the buffer zone. One will be allocated if needed */
bzone = dmz_get_chunk_buffer(zmd, zone);
- if (!bzone)
- return -ENOSPC;
+ if (IS_ERR(bzone))
+ return PTR_ERR(bzone);
if (dmz_is_readonly(bzone))
return -EROFS;
@@ -389,6 +390,11 @@
dmz_lock_metadata(zmd);
+ if (dmz->dev->flags & DMZ_BDEV_DYING) {
+ ret = -EIO;
+ goto out;
+ }
+
/*
* Get the data zone mapping the chunk. There may be no
* mapping for read and discard. If a mapping is obtained,
@@ -441,7 +447,7 @@
*/
static inline void dmz_get_chunk_work(struct dm_chunk_work *cw)
{
- atomic_inc(&cw->refcount);
+ refcount_inc(&cw->refcount);
}
/*
@@ -450,7 +456,7 @@
*/
static void dmz_put_chunk_work(struct dm_chunk_work *cw)
{
- if (atomic_dec_and_test(&cw->refcount)) {
+ if (refcount_dec_and_test(&cw->refcount)) {
WARN_ON(!bio_list_empty(&cw->bio_list));
radix_tree_delete(&cw->target->chunk_rxtree, cw->chunk);
kfree(cw);
@@ -493,6 +499,8 @@
/* Flush dirty metadata blocks */
ret = dmz_flush_metadata(dmz->metadata);
+ if (ret)
+ dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
/* Process queued flush requests */
while (1) {
@@ -513,25 +521,27 @@
* Get a chunk work and start it to process a new BIO.
* If the BIO chunk has no work yet, create one.
*/
-static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
+static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
{
unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
struct dm_chunk_work *cw;
+ int ret = 0;
mutex_lock(&dmz->chunk_lock);
/* Get the BIO chunk work. If one is not active yet, create one */
cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
if (!cw) {
- int ret;
/* Create a new chunk work */
cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
- if (!cw)
+ if (unlikely(!cw)) {
+ ret = -ENOMEM;
goto out;
+ }
INIT_WORK(&cw->work, dmz_chunk_work);
- atomic_set(&cw->refcount, 0);
+ refcount_set(&cw->refcount, 0);
cw->target = dmz;
cw->chunk = chunk;
bio_list_init(&cw->bio_list);
@@ -539,7 +549,6 @@
ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
if (unlikely(ret)) {
kfree(cw);
- cw = NULL;
goto out;
}
}
@@ -547,10 +556,38 @@
bio_list_add(&cw->bio_list, bio);
dmz_get_chunk_work(cw);
+ dmz_reclaim_bio_acc(dmz->reclaim);
if (queue_work(dmz->chunk_wq, &cw->work))
dmz_get_chunk_work(cw);
out:
mutex_unlock(&dmz->chunk_lock);
+ return ret;
+}
+
+/*
+ * Check the backing device availability. If it's on the way out,
+ * start failing I/O. Reclaim and metadata components also call this
+ * function to cleanly abort operation in the event of such failure.
+ */
+bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
+{
+ struct gendisk *disk;
+
+ if (!(dmz_dev->flags & DMZ_BDEV_DYING)) {
+ disk = dmz_dev->bdev->bd_disk;
+ if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
+ dmz_dev_warn(dmz_dev, "Backing device queue dying");
+ dmz_dev->flags |= DMZ_BDEV_DYING;
+ } else if (disk->fops->check_events) {
+ if (disk->fops->check_events(disk, 0) &
+ DISK_EVENT_MEDIA_CHANGE) {
+ dmz_dev_warn(dmz_dev, "Backing device offline");
+ dmz_dev->flags |= DMZ_BDEV_DYING;
+ }
+ }
+ }
+
+ return dmz_dev->flags & DMZ_BDEV_DYING;
}
/*
@@ -564,6 +601,10 @@
sector_t sector = bio->bi_iter.bi_sector;
unsigned int nr_sectors = bio_sectors(bio);
sector_t chunk_sector;
+ int ret;
+
+ if (dmz_bdev_is_dying(dmz->dev))
+ return DM_MAPIO_KILL;
dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
bio_op(bio), (unsigned long long)sector, nr_sectors,
@@ -584,7 +625,7 @@
bioctx->target = dmz;
bioctx->zone = NULL;
bioctx->bio = bio;
- atomic_set(&bioctx->ref, 1);
+ refcount_set(&bioctx->ref, 1);
/* Set the BIO pending in the flush list */
if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
@@ -601,8 +642,14 @@
dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
/* Now ready to handle this BIO */
- dmz_reclaim_bio_acc(dmz->reclaim);
- dmz_queue_chunk_work(dmz, bio);
+ ret = dmz_queue_chunk_work(dmz, bio);
+ if (ret) {
+ dmz_dev_debug(dmz->dev,
+ "BIO op %d, can't process chunk %llu, err %i\n",
+ bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
+ ret);
+ return DM_MAPIO_REQUEUE;
+ }
return DM_MAPIO_SUBMITTED;
}
@@ -643,7 +690,8 @@
q = bdev_get_queue(dev->bdev);
dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
- aligned_capacity = dev->capacity & ~(blk_queue_zone_sectors(q) - 1);
+ aligned_capacity = dev->capacity &
+ ~((sector_t)blk_queue_zone_sectors(q) - 1);
if (ti->begin ||
((ti->len != dev->capacity) && (ti->len != aligned_capacity))) {
ti->error = "Partial mapping not supported";
@@ -657,8 +705,7 @@
dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors);
dev->zone_nr_blocks_shift = ilog2(dev->zone_nr_blocks);
- dev->nr_zones = (dev->capacity + dev->zone_nr_sectors - 1)
- >> dev->zone_nr_sectors_shift;
+ dev->nr_zones = blkdev_nr_zones(dev->bdev);
dmz->dev = dev;
@@ -728,7 +775,6 @@
ti->per_io_data_size = sizeof(struct dmz_bioctx);
ti->flush_supported = true;
ti->discards_supported = true;
- ti->split_discard_bios = true;
/* The exposed capacity is the number of chunks that can be mapped */
ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) << dev->zone_nr_sectors_shift;
@@ -856,6 +902,9 @@
{
struct dmz_target *dmz = ti->private;
+ if (dmz_bdev_is_dying(dmz->dev))
+ return -ENODEV;
+
*bdev = dmz->dev->bdev;
return 0;