Update Linux to v5.10.109
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz
Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 761d438..cc38765 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -58,18 +58,16 @@
#include <linux/delay.h>
#include <linux/raid/md_p.h>
#include <linux/raid/md_u.h>
+#include <linux/raid/detect.h>
#include <linux/slab.h>
#include <linux/percpu-refcount.h>
+#include <linux/part_stat.h>
#include <trace/events/block.h>
#include "md.h"
#include "md-bitmap.h"
#include "md-cluster.h"
-#ifndef MODULE
-static void autostart_arrays(int part);
-#endif
-
/* pers_list is a list of registered personalities protected
* by pers_lock.
* pers_lock does extra service to protect accesses to
@@ -87,6 +85,7 @@
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
static struct workqueue_struct *md_wq;
static struct workqueue_struct *md_misc_wq;
+static struct workqueue_struct *md_rdev_misc_wq;
static int remove_and_add_spares(struct mddev *mddev,
struct md_rdev *this);
@@ -98,6 +97,8 @@
* count by 2 for every hour elapsed between read errors.
*/
#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
+/* Default safemode delay: 200 msec */
+#define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1)
/*
* Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
* is 1000 KB/sec, so the extra system load does not show up that much.
@@ -125,76 +126,167 @@
mddev->sync_speed_max : sysctl_speed_limit_max;
}
-static int rdev_init_wb(struct md_rdev *rdev)
+static void rdev_uninit_serial(struct md_rdev *rdev)
{
- if (rdev->bdev->bd_queue->nr_hw_queues == 1)
+ if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
+ return;
+
+ kvfree(rdev->serial);
+ rdev->serial = NULL;
+}
+
+static void rdevs_uninit_serial(struct mddev *mddev)
+{
+ struct md_rdev *rdev;
+
+ rdev_for_each(rdev, mddev)
+ rdev_uninit_serial(rdev);
+}
+
+static int rdev_init_serial(struct md_rdev *rdev)
+{
+ /* serial_nums equals with BARRIER_BUCKETS_NR */
+ int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t))));
+ struct serial_in_rdev *serial = NULL;
+
+ if (test_bit(CollisionCheck, &rdev->flags))
return 0;
- spin_lock_init(&rdev->wb_list_lock);
- INIT_LIST_HEAD(&rdev->wb_list);
- init_waitqueue_head(&rdev->wb_io_wait);
- set_bit(WBCollisionCheck, &rdev->flags);
+ serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums,
+ GFP_KERNEL);
+ if (!serial)
+ return -ENOMEM;
- return 1;
+ for (i = 0; i < serial_nums; i++) {
+ struct serial_in_rdev *serial_tmp = &serial[i];
+
+ spin_lock_init(&serial_tmp->serial_lock);
+ serial_tmp->serial_rb = RB_ROOT_CACHED;
+ init_waitqueue_head(&serial_tmp->serial_io_wait);
+ }
+
+ rdev->serial = serial;
+ set_bit(CollisionCheck, &rdev->flags);
+
+ return 0;
+}
+
+static int rdevs_init_serial(struct mddev *mddev)
+{
+ struct md_rdev *rdev;
+ int ret = 0;
+
+ rdev_for_each(rdev, mddev) {
+ ret = rdev_init_serial(rdev);
+ if (ret)
+ break;
+ }
+
+ /* Free all resources if pool is not existed */
+ if (ret && !mddev->serial_info_pool)
+ rdevs_uninit_serial(mddev);
+
+ return ret;
}
/*
- * Create wb_info_pool if rdev is the first multi-queue device flaged
- * with writemostly, also write-behind mode is enabled.
+ * rdev needs to enable serial stuffs if it meets the conditions:
+ * 1. it is multi-queue device flaged with writemostly.
+ * 2. the write-behind mode is enabled.
*/
-void mddev_create_wb_pool(struct mddev *mddev, struct md_rdev *rdev,
- bool is_suspend)
+static int rdev_need_serial(struct md_rdev *rdev)
{
- if (mddev->bitmap_info.max_write_behind == 0)
+ return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
+ rdev->bdev->bd_disk->queue->nr_hw_queues != 1 &&
+ test_bit(WriteMostly, &rdev->flags));
+}
+
+/*
+ * Init resource for rdev(s), then create serial_info_pool if:
+ * 1. rdev is the first device which return true from rdev_enable_serial.
+ * 2. rdev is NULL, means we want to enable serialization for all rdevs.
+ */
+void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
+ bool is_suspend)
+{
+ int ret = 0;
+
+ if (rdev && !rdev_need_serial(rdev) &&
+ !test_bit(CollisionCheck, &rdev->flags))
return;
- if (!test_bit(WriteMostly, &rdev->flags) || !rdev_init_wb(rdev))
+ if (!is_suspend)
+ mddev_suspend(mddev);
+
+ if (!rdev)
+ ret = rdevs_init_serial(mddev);
+ else
+ ret = rdev_init_serial(rdev);
+ if (ret)
+ goto abort;
+
+ if (mddev->serial_info_pool == NULL) {
+ /*
+ * already in memalloc noio context by
+ * mddev_suspend()
+ */
+ mddev->serial_info_pool =
+ mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
+ sizeof(struct serial_info));
+ if (!mddev->serial_info_pool) {
+ rdevs_uninit_serial(mddev);
+ pr_err("can't alloc memory pool for serialization\n");
+ }
+ }
+
+abort:
+ if (!is_suspend)
+ mddev_resume(mddev);
+}
+
+/*
+ * Free resource from rdev(s), and destroy serial_info_pool under conditions:
+ * 1. rdev is the last device flaged with CollisionCheck.
+ * 2. when bitmap is destroyed while policy is not enabled.
+ * 3. for disable policy, the pool is destroyed only when no rdev needs it.
+ */
+void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
+ bool is_suspend)
+{
+ if (rdev && !test_bit(CollisionCheck, &rdev->flags))
return;
- if (mddev->wb_info_pool == NULL) {
- unsigned int noio_flag;
+ if (mddev->serial_info_pool) {
+ struct md_rdev *temp;
+ int num = 0; /* used to track if other rdevs need the pool */
if (!is_suspend)
mddev_suspend(mddev);
- noio_flag = memalloc_noio_save();
- mddev->wb_info_pool = mempool_create_kmalloc_pool(NR_WB_INFOS,
- sizeof(struct wb_info));
- memalloc_noio_restore(noio_flag);
- if (!mddev->wb_info_pool)
- pr_err("can't alloc memory pool for writemostly\n");
+ rdev_for_each(temp, mddev) {
+ if (!rdev) {
+ if (!mddev->serialize_policy ||
+ !rdev_need_serial(temp))
+ rdev_uninit_serial(temp);
+ else
+ num++;
+ } else if (temp != rdev &&
+ test_bit(CollisionCheck, &temp->flags))
+ num++;
+ }
+
+ if (rdev)
+ rdev_uninit_serial(rdev);
+
+ if (num)
+ pr_info("The mempool could be used by other devices\n");
+ else {
+ mempool_destroy(mddev->serial_info_pool);
+ mddev->serial_info_pool = NULL;
+ }
if (!is_suspend)
mddev_resume(mddev);
}
}
-EXPORT_SYMBOL_GPL(mddev_create_wb_pool);
-
-/*
- * destroy wb_info_pool if rdev is the last device flaged with WBCollisionCheck.
- */
-static void mddev_destroy_wb_pool(struct mddev *mddev, struct md_rdev *rdev)
-{
- if (!test_and_clear_bit(WBCollisionCheck, &rdev->flags))
- return;
-
- if (mddev->wb_info_pool) {
- struct md_rdev *temp;
- int num = 0;
-
- /*
- * Check if other rdevs need wb_info_pool.
- */
- rdev_for_each(temp, mddev)
- if (temp != rdev &&
- test_bit(WBCollisionCheck, &temp->flags))
- num++;
- if (!num) {
- mddev_suspend(rdev->mddev);
- mempool_destroy(mddev->wb_info_pool);
- mddev->wb_info_pool = NULL;
- mddev_resume(rdev->mddev);
- }
- }
-}
static struct ctl_table_header *raid_table_header;
@@ -236,8 +328,6 @@
{ }
};
-static const struct block_device_operations md_fops;
-
static int start_readonly;
/*
@@ -369,11 +459,11 @@
}
EXPORT_SYMBOL(md_handle_request);
-static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t md_submit_bio(struct bio *bio)
{
const int rw = bio_data_dir(bio);
const int sgrp = op_stat_group(bio_op(bio));
- struct mddev *mddev = q->queuedata;
+ struct mddev *mddev = bio->bi_disk->private_data;
unsigned int sectors;
if (mddev == NULL || mddev->pers == NULL) {
@@ -386,7 +476,7 @@
return BLK_QC_T_NONE;
}
- blk_queue_split(q, &bio);
+ blk_queue_split(&bio);
if (mddev->ro == 1 && unlikely(rw == WRITE)) {
if (bio_sectors(bio) != 0)
@@ -435,11 +525,15 @@
wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
del_timer_sync(&mddev->safemode_timer);
+ /* restrict memory reclaim I/O during raid array is suspend */
+ mddev->noio_flag = memalloc_noio_save();
}
EXPORT_SYMBOL_GPL(mddev_suspend);
void mddev_resume(struct mddev *mddev)
{
+ /* entred the memalloc scope from mddev_suspend() */
+ memalloc_noio_restore(mddev->noio_flag);
lockdep_assert_held(&mddev->reconfig_mutex);
if (--mddev->suspended)
return;
@@ -452,26 +546,6 @@
}
EXPORT_SYMBOL_GPL(mddev_resume);
-int mddev_congested(struct mddev *mddev, int bits)
-{
- struct md_personality *pers = mddev->pers;
- int ret = 0;
-
- rcu_read_lock();
- if (mddev->suspended)
- ret = 1;
- else if (pers && pers->congested)
- ret = pers->congested(mddev, bits);
- rcu_read_unlock();
- return ret;
-}
-EXPORT_SYMBOL_GPL(mddev_congested);
-static int md_congested(void *data, int bits)
-{
- struct mddev *mddev = data;
- return mddev_congested(mddev, bits);
-}
-
/*
* Generic flush handling for md
*/
@@ -773,7 +847,13 @@
sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
if (mddev->sysfs_action)
sysfs_put(mddev->sysfs_action);
+ if (mddev->sysfs_completed)
+ sysfs_put(mddev->sysfs_completed);
+ if (mddev->sysfs_degraded)
+ sysfs_put(mddev->sysfs_degraded);
mddev->sysfs_action = NULL;
+ mddev->sysfs_completed = NULL;
+ mddev->sysfs_degraded = NULL;
}
}
mddev->sysfs_active = 0;
@@ -875,7 +955,8 @@
struct mddev *mddev = rdev->mddev;
if (bio->bi_status) {
- pr_err("md: super_written gets error=%d\n", bio->bi_status);
+ pr_err("md: %s gets error=%d\n", __func__,
+ blk_status_to_errno(bio->bi_status));
md_error(mddev, rdev);
if (!test_bit(Faulty, &rdev->flags)
&& (bio->bi_opf & MD_FAILFAST)) {
@@ -2090,6 +2171,24 @@
sb->sb_csum = calc_sb_1_csum(sb);
}
+static sector_t super_1_choose_bm_space(sector_t dev_size)
+{
+ sector_t bm_space;
+
+ /* if the device is bigger than 8Gig, save 64k for bitmap
+ * usage, if bigger than 200Gig, save 128k
+ */
+ if (dev_size < 64*2)
+ bm_space = 0;
+ else if (dev_size - 64*2 >= 200*1024*1024*2)
+ bm_space = 128*2;
+ else if (dev_size - 4*2 > 8*1024*1024*2)
+ bm_space = 64*2;
+ else
+ bm_space = 4*2;
+ return bm_space;
+}
+
static unsigned long long
super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
{
@@ -2110,10 +2209,20 @@
return 0;
} else {
/* minor version 0; superblock after data */
- sector_t sb_start;
- sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
+ sector_t sb_start, bm_space;
+ sector_t dev_size = i_size_read(rdev->bdev->bd_inode) >> 9;
+
+ /* 8K is for superblock */
+ sb_start = dev_size - 8*2;
sb_start &= ~(sector_t)(4*2 - 1);
- max_sectors = rdev->sectors + sb_start - rdev->sb_start;
+
+ bm_space = super_1_choose_bm_space(dev_size);
+
+ /* Space that can be used to store date needs to decrease
+ * superblock bitmap space and bad block space(4K)
+ */
+ max_sectors = sb_start - bm_space - 4*2;
+
if (!num_sectors || num_sectors > max_sectors)
num_sectors = max_sectors;
rdev->sb_start = sb_start;
@@ -2211,8 +2320,7 @@
test_bit(Journal, &rdev2->flags) ||
rdev2->raid_disk == -1)
continue;
- if (rdev->bdev->bd_contains ==
- rdev2->bdev->bd_contains) {
+ if (rdev->bdev->bd_disk == rdev2->bdev->bd_disk) {
rcu_read_unlock();
return 1;
}
@@ -2362,15 +2470,19 @@
pr_debug("md: bind<%s>\n", b);
if (mddev->raid_disks)
- mddev_create_wb_pool(mddev, rdev, false);
+ mddev_create_serial_pool(mddev, rdev, false);
if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
goto fail;
ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
- if (sysfs_create_link(&rdev->kobj, ko, "block"))
- /* failure here is OK */;
+ /* failure here is OK */
+ err = sysfs_create_link(&rdev->kobj, ko, "block");
rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
+ rdev->sysfs_unack_badblocks =
+ sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks");
+ rdev->sysfs_badblocks =
+ sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks");
list_add_rcu(&rdev->same_set, &mddev->disks);
bd_link_disk_holder(rdev->bdev, mddev->gendisk);
@@ -2386,7 +2498,7 @@
return err;
}
-static void md_delayed_delete(struct work_struct *ws)
+static void rdev_delayed_delete(struct work_struct *ws)
{
struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
kobject_del(&rdev->kobj);
@@ -2400,20 +2512,24 @@
bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
list_del_rcu(&rdev->same_set);
pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
- mddev_destroy_wb_pool(rdev->mddev, rdev);
+ mddev_destroy_serial_pool(rdev->mddev, rdev, false);
rdev->mddev = NULL;
sysfs_remove_link(&rdev->kobj, "block");
sysfs_put(rdev->sysfs_state);
+ sysfs_put(rdev->sysfs_unack_badblocks);
+ sysfs_put(rdev->sysfs_badblocks);
rdev->sysfs_state = NULL;
+ rdev->sysfs_unack_badblocks = NULL;
+ rdev->sysfs_badblocks = NULL;
rdev->badblocks.count = 0;
/* We need to delay this, otherwise we can deadlock when
* writing to 'remove' to "dev/state". We also need
* to delay it due to rcu usage.
*/
synchronize_rcu();
- INIT_WORK(&rdev->del_work, md_delayed_delete);
+ INIT_WORK(&rdev->del_work, rdev_delayed_delete);
kobject_get(&rdev->kobj);
- queue_work(md_misc_wq, &rdev->del_work);
+ queue_work(md_rdev_misc_wq, &rdev->del_work);
}
/*
@@ -2425,12 +2541,12 @@
{
int err = 0;
struct block_device *bdev;
- char b[BDEVNAME_SIZE];
bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
shared ? (struct md_rdev *)lock_rdev : rdev);
if (IS_ERR(bdev)) {
- pr_warn("md: could not open %s.\n", __bdevname(dev, b));
+ pr_warn("md: could not open device unknown-block(%u,%u).\n",
+ MAJOR(dev), MINOR(dev));
return PTR_ERR(bdev);
}
rdev->bdev = bdev;
@@ -2749,7 +2865,7 @@
goto repeat;
wake_up(&mddev->sb_wait);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
- sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+ sysfs_notify_dirent_safe(mddev->sysfs_completed);
rdev_for_each(rdev, mddev) {
if (test_and_clear_bit(FaultRecorded, &rdev->flags))
@@ -2882,7 +2998,11 @@
* -write_error - clears WriteErrorSeen
* {,-}failfast - set/clear FailFast
*/
+
+ struct mddev *mddev = rdev->mddev;
int err = -EINVAL;
+ bool need_update_sb = false;
+
if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
md_error(rdev->mddev, rdev);
if (test_bit(Faulty, &rdev->flags))
@@ -2897,7 +3017,6 @@
if (rdev->raid_disk >= 0)
err = -EBUSY;
else {
- struct mddev *mddev = rdev->mddev;
err = 0;
if (mddev_is_clustered(mddev))
err = md_cluster_ops->remove_disk(mddev, rdev);
@@ -2913,11 +3032,13 @@
}
} else if (cmd_match(buf, "writemostly")) {
set_bit(WriteMostly, &rdev->flags);
- mddev_create_wb_pool(rdev->mddev, rdev, false);
+ mddev_create_serial_pool(rdev->mddev, rdev, false);
+ need_update_sb = true;
err = 0;
} else if (cmd_match(buf, "-writemostly")) {
- mddev_destroy_wb_pool(rdev->mddev, rdev);
+ mddev_destroy_serial_pool(rdev->mddev, rdev, false);
clear_bit(WriteMostly, &rdev->flags);
+ need_update_sb = true;
err = 0;
} else if (cmd_match(buf, "blocked")) {
set_bit(Blocked, &rdev->flags);
@@ -2943,9 +3064,11 @@
err = 0;
} else if (cmd_match(buf, "failfast")) {
set_bit(FailFast, &rdev->flags);
+ need_update_sb = true;
err = 0;
} else if (cmd_match(buf, "-failfast")) {
clear_bit(FailFast, &rdev->flags);
+ need_update_sb = true;
err = 0;
} else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
!test_bit(Journal, &rdev->flags)) {
@@ -3024,6 +3147,8 @@
clear_bit(ExternalBbl, &rdev->flags);
err = 0;
}
+ if (need_update_sb)
+ md_update_sb(mddev, 1);
if (!err)
sysfs_notify_dirent_safe(rdev->sysfs_state);
return err ? err : len;
@@ -3123,15 +3248,14 @@
rdev->saved_raid_disk = -1;
clear_bit(In_sync, &rdev->flags);
clear_bit(Bitmap_sync, &rdev->flags);
- err = rdev->mddev->pers->
- hot_add_disk(rdev->mddev, rdev);
+ err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev);
if (err) {
rdev->raid_disk = -1;
return err;
} else
sysfs_notify_dirent_safe(rdev->sysfs_state);
- if (sysfs_link_rdev(rdev->mddev, rdev))
- /* failure here is OK */;
+ /* failure here is OK */;
+ sysfs_link_rdev(rdev->mddev, rdev);
/* don't wakeup anyone, leave that to userspace. */
} else {
if (slot >= rdev->mddev->raid_disks &&
@@ -3956,6 +4080,8 @@
pr_warn("md: cannot register extra attributes for %s\n",
mdname(mddev));
mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
+ mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
+ mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
}
if (oldpers->sync_request != NULL &&
pers->sync_request == NULL) {
@@ -4003,7 +4129,7 @@
mddev_resume(mddev);
if (!mddev->thread)
md_update_sb(mddev, 1);
- sysfs_notify(&mddev->kobj, NULL, "level");
+ sysfs_notify_dirent_safe(mddev->sysfs_level);
md_new_event(mddev);
rv = len;
out_unlock:
@@ -4116,6 +4242,14 @@
__ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
static ssize_t
+uuid_show(struct mddev *mddev, char *page)
+{
+ return sprintf(page, "%pU\n", mddev->uuid);
+}
+static struct md_sysfs_entry md_uuid =
+__ATTR(uuid, S_IRUGO, uuid_show, NULL);
+
+static ssize_t
chunk_size_show(struct mddev *mddev, char *page)
{
if (mddev->reshape_position != MaxSector &&
@@ -4300,7 +4434,6 @@
static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
-static int do_md_run(struct mddev *mddev);
static int restart_array(struct mddev *mddev);
static ssize_t
@@ -4446,6 +4579,20 @@
return -EINVAL;
}
+/* need to ensure rdev_delayed_delete() has completed */
+static void flush_rdev_wq(struct mddev *mddev)
+{
+ struct md_rdev *rdev;
+
+ rcu_read_lock();
+ rdev_for_each_rcu(rdev, mddev)
+ if (work_pending(&rdev->del_work)) {
+ flush_workqueue(md_rdev_misc_wq);
+ break;
+ }
+ rcu_read_unlock();
+}
+
static ssize_t
new_dev_store(struct mddev *mddev, const char *buf, size_t len)
{
@@ -4473,8 +4620,7 @@
minor != MINOR(dev))
return -EOVERFLOW;
- flush_workqueue(md_misc_wq);
-
+ flush_rdev_wq(mddev);
err = mddev_lock(mddev);
if (err)
return err;
@@ -4712,7 +4858,8 @@
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
mddev_lock(mddev) == 0) {
- flush_workqueue(md_misc_wq);
+ if (work_pending(&mddev->del_work))
+ flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
@@ -4742,7 +4889,7 @@
}
if (err)
return err;
- sysfs_notify(&mddev->kobj, NULL, "degraded");
+ sysfs_notify_dirent_safe(mddev->sysfs_degraded);
} else {
if (cmd_match(page, "check"))
set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
@@ -5217,7 +5364,7 @@
mddev->array_sectors = sectors;
if (mddev->pers) {
set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
+ revalidate_disk_size(mddev->gendisk, true);
}
}
mddev_unlock(mddev);
@@ -5302,10 +5449,62 @@
__ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
fail_last_dev_store);
+static ssize_t serialize_policy_show(struct mddev *mddev, char *page)
+{
+ if (mddev->pers == NULL || (mddev->pers->level != 1))
+ return sprintf(page, "n/a\n");
+ else
+ return sprintf(page, "%d\n", mddev->serialize_policy);
+}
+
+/*
+ * Setting serialize_policy to true to enforce write IO is not reordered
+ * for raid1.
+ */
+static ssize_t
+serialize_policy_store(struct mddev *mddev, const char *buf, size_t len)
+{
+ int err;
+ bool value;
+
+ err = kstrtobool(buf, &value);
+ if (err)
+ return err;
+
+ if (value == mddev->serialize_policy)
+ return len;
+
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
+ if (mddev->pers == NULL || (mddev->pers->level != 1)) {
+ pr_err("md: serialize_policy is only effective for raid1\n");
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ mddev_suspend(mddev);
+ if (value)
+ mddev_create_serial_pool(mddev, NULL, true);
+ else
+ mddev_destroy_serial_pool(mddev, NULL, true);
+ mddev->serialize_policy = value;
+ mddev_resume(mddev);
+unlock:
+ mddev_unlock(mddev);
+ return err ?: len;
+}
+
+static struct md_sysfs_entry md_serialize_policy =
+__ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
+ serialize_policy_store);
+
+
static struct attribute *md_default_attrs[] = {
&md_level.attr,
&md_layout.attr,
&md_raid_disks.attr,
+ &md_uuid.attr,
&md_chunk_size.attr,
&md_size.attr,
&md_resync_start.attr,
@@ -5319,6 +5518,7 @@
&max_corr_read_errors.attr,
&md_consistency_policy.attr,
&md_fail_last_dev.attr,
+ &md_serialize_policy.attr,
NULL,
};
@@ -5396,6 +5596,8 @@
if (mddev->sysfs_state)
sysfs_put(mddev->sysfs_state);
+ if (mddev->sysfs_level)
+ sysfs_put(mddev->sysfs_level);
if (mddev->gendisk)
del_gendisk(mddev->gendisk);
@@ -5503,12 +5705,10 @@
mddev->hold_active = UNTIL_STOP;
error = -ENOMEM;
- mddev->queue = blk_alloc_queue(GFP_KERNEL);
+ mddev->queue = blk_alloc_queue(NUMA_NO_NODE);
if (!mddev->queue)
goto abort;
- mddev->queue->queuedata = mddev;
- blk_queue_make_request(mddev->queue, md_make_request);
blk_set_stacking_limits(&mddev->queue->limits);
disk = alloc_disk(1 << shift);
@@ -5534,11 +5734,8 @@
* remove it now.
*/
disk->flags |= GENHD_FL_EXT_DEVT;
+ disk->events |= DISK_EVENT_MEDIA_CHANGE;
mddev->gendisk = disk;
- /* As soon as we call add_disk(), another thread could get
- * through to md_open, so make sure it doesn't get too far
- */
- mutex_lock(&mddev->open_mutex);
add_disk(disk);
error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
@@ -5553,12 +5750,12 @@
if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_bitmap_group))
pr_debug("pointless warning\n");
- mutex_unlock(&mddev->open_mutex);
abort:
mutex_unlock(&disks_mutex);
if (!error && mddev->kobj.sd) {
kobject_uevent(&mddev->kobj, KOBJ_ADD);
mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
+ mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
}
mddev_put(mddev);
return error;
@@ -5742,8 +5939,8 @@
rdev_for_each(rdev, mddev)
rdev_for_each(rdev2, mddev) {
if (rdev < rdev2 &&
- rdev->bdev->bd_contains ==
- rdev2->bdev->bd_contains) {
+ rdev->bdev->bd_disk ==
+ rdev2->bdev->bd_disk) {
pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
mdname(mddev),
bdevname(rdev->bdev,b),
@@ -5794,18 +5991,18 @@
goto bitmap_abort;
if (mddev->bitmap_info.max_write_behind > 0) {
- bool creat_pool = false;
+ bool create_pool = false;
rdev_for_each(rdev, mddev) {
if (test_bit(WriteMostly, &rdev->flags) &&
- rdev_init_wb(rdev))
- creat_pool = true;
+ rdev_init_serial(rdev))
+ create_pool = true;
}
- if (creat_pool && mddev->wb_info_pool == NULL) {
- mddev->wb_info_pool =
- mempool_create_kmalloc_pool(NR_WB_INFOS,
- sizeof(struct wb_info));
- if (!mddev->wb_info_pool) {
+ if (create_pool && mddev->serial_info_pool == NULL) {
+ mddev->serial_info_pool =
+ mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
+ sizeof(struct serial_info));
+ if (!mddev->serial_info_pool) {
err = -ENOMEM;
goto bitmap_abort;
}
@@ -5828,8 +6025,6 @@
blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
else
blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
- mddev->queue->backing_dev_info->congested_data = mddev;
- mddev->queue->backing_dev_info->congested_fn = md_congested;
}
if (pers->sync_request) {
if (mddev->kobj.sd &&
@@ -5837,6 +6032,8 @@
pr_warn("md: cannot register extra attributes for %s\n",
mdname(mddev));
mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
+ mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
+ mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
} else if (mddev->ro == 2) /* auto-readonly not meaningful */
mddev->ro = 0;
@@ -5846,7 +6043,7 @@
if (mddev_is_clustered(mddev))
mddev->safemode_delay = 0;
else
- mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
+ mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
mddev->in_sync = 1;
smp_wmb();
spin_lock(&mddev->lock);
@@ -5883,7 +6080,7 @@
}
EXPORT_SYMBOL_GPL(md_run);
-static int do_md_run(struct mddev *mddev)
+int do_md_run(struct mddev *mddev)
{
int err;
@@ -5907,13 +6104,13 @@
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
+ revalidate_disk_size(mddev->gendisk, true);
clear_bit(MD_NOT_READY, &mddev->flags);
mddev->changed = 1;
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
sysfs_notify_dirent_safe(mddev->sysfs_state);
sysfs_notify_dirent_safe(mddev->sysfs_action);
- sysfs_notify(&mddev->kobj, NULL, "degraded");
+ sysfs_notify_dirent_safe(mddev->sysfs_degraded);
out:
clear_bit(MD_NOT_READY, &mddev->flags);
return err;
@@ -6028,7 +6225,8 @@
static void __md_stop_writes(struct mddev *mddev)
{
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- flush_workqueue(md_misc_wq);
+ if (work_pending(&mddev->del_work))
+ flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
@@ -6050,8 +6248,9 @@
mddev->in_sync = 1;
md_update_sb(mddev, 1);
}
- mempool_destroy(mddev->wb_info_pool);
- mddev->wb_info_pool = NULL;
+ /* disable policy to guarantee rdevs free resources for serialization */
+ mddev->serialize_policy = 0;
+ mddev_destroy_serial_pool(mddev, NULL, true);
}
void md_stop_writes(struct mddev *mddev)
@@ -6080,7 +6279,8 @@
md_bitmap_destroy(mddev);
mddev_detach(mddev);
/* Ensure ->event_work is done */
- flush_workqueue(md_misc_wq);
+ if (mddev->event_work.func)
+ flush_workqueue(md_misc_wq);
spin_lock(&mddev->lock);
mddev->pers = NULL;
spin_unlock(&mddev->lock);
@@ -6211,7 +6411,6 @@
__md_stop_writes(mddev);
__md_stop(mddev);
- mddev->queue->backing_dev_info->congested_fn = NULL;
/* tell userspace to handle 'inactive' */
sysfs_notify_dirent_safe(mddev->sysfs_state);
@@ -6223,7 +6422,7 @@
set_capacity(disk, 0);
mutex_unlock(&mddev->open_mutex);
mddev->changed = 1;
- revalidate_disk(disk);
+ revalidate_disk_size(disk, true);
if (mddev->ro)
mddev->ro = 0;
@@ -6514,7 +6713,7 @@
return 0;
}
-static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
+int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
{
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
struct md_rdev *rdev;
@@ -6560,7 +6759,7 @@
}
/*
- * add_new_disk can be used once the array is assembled
+ * md_add_new_disk can be used once the array is assembled
* to add "hot spares". They must already have a superblock
* written
*/
@@ -6673,7 +6872,7 @@
return err;
}
- /* otherwise, add_new_disk is only allowed
+ /* otherwise, md_add_new_disk is only allowed
* for major_version==0 superblocks
*/
if (mddev->major_version != 0) {
@@ -6920,7 +7119,7 @@
}
/*
- * set_array_info is used two different ways
+ * md_set_array_info is used two different ways
* The original usage is when creating a new array.
* In this usage, raid_disks is > 0 and it together with
* level, size, not_persistent,layout,chunksize determine the
@@ -6932,9 +7131,8 @@
* The minor and patch _version numbers are also kept incase the
* super_block handler wishes to interpret them.
*/
-static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
+int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info)
{
-
if (info->raid_disks == 0) {
/* just setting version number for superblock loading */
if (info->major_version < 0 ||
@@ -7056,7 +7254,7 @@
md_cluster_ops->update_size(mddev, old_dev_sectors);
else if (mddev->queue) {
set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
+ revalidate_disk_size(mddev->gendisk, true);
}
}
return rv;
@@ -7223,6 +7421,8 @@
mddev->bitmap_info.nodes = 0;
md_cluster_ops->leave(mddev);
+ module_put(md_cluster_mod);
+ mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
}
mddev_suspend(mddev);
md_bitmap_destroy(mddev);
@@ -7283,7 +7483,6 @@
case GET_DISK_INFO:
case HOT_ADD_DISK:
case HOT_REMOVE_DISK:
- case RAID_AUTORUN:
case RAID_VERSION:
case RESTART_ARRAY_RW:
case RUN_ARRAY:
@@ -7329,13 +7528,6 @@
case RAID_VERSION:
err = get_version(argp);
goto out;
-
-#ifndef MODULE
- case RAID_AUTORUN:
- err = 0;
- autostart_arrays(arg);
- goto out;
-#endif
default:;
}
@@ -7376,9 +7568,8 @@
}
- if (cmd == ADD_NEW_DISK)
- /* need to ensure md_delayed_delete() has completed */
- flush_workqueue(md_misc_wq);
+ if (cmd == ADD_NEW_DISK || cmd == HOT_ADD_DISK)
+ flush_rdev_wq(mddev);
if (cmd == HOT_REMOVE_DISK)
/* need to ensure recovery thread has run */
@@ -7438,7 +7629,7 @@
err = -EBUSY;
goto unlock;
}
- err = set_array_info(mddev, &info);
+ err = md_set_array_info(mddev, &info);
if (err) {
pr_warn("md: couldn't set array info. %d\n", err);
goto unlock;
@@ -7492,7 +7683,7 @@
/* Need to clear read-only for this */
break;
else
- err = add_new_disk(mddev, &info);
+ err = md_add_new_disk(mddev, &info);
goto unlock;
}
break;
@@ -7560,7 +7751,7 @@
if (copy_from_user(&info, argp, sizeof(info)))
err = -EFAULT;
else
- err = add_new_disk(mddev, &info);
+ err = md_add_new_disk(mddev, &info);
goto unlock;
}
@@ -7655,7 +7846,7 @@
atomic_inc(&mddev->openers);
mutex_unlock(&mddev->open_mutex);
- check_disk_change(bdev);
+ bdev_check_media_change(bdev);
out:
if (err)
mddev_put(mddev);
@@ -7671,23 +7862,21 @@
mddev_put(mddev);
}
-static int md_media_changed(struct gendisk *disk)
+static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing)
{
struct mddev *mddev = disk->private_data;
+ unsigned int ret = 0;
- return mddev->changed;
-}
-
-static int md_revalidate(struct gendisk *disk)
-{
- struct mddev *mddev = disk->private_data;
-
+ if (mddev->changed)
+ ret = DISK_EVENT_MEDIA_CHANGE;
mddev->changed = 0;
- return 0;
+ return ret;
}
-static const struct block_device_operations md_fops =
+
+const struct block_device_operations md_fops =
{
.owner = THIS_MODULE,
+ .submit_bio = md_submit_bio,
.open = md_open,
.release = md_release,
.ioctl = md_ioctl,
@@ -7695,8 +7884,7 @@
.compat_ioctl = md_compat_ioctl,
#endif
.getgeo = md_getgeo,
- .media_changed = md_media_changed,
- .revalidate_disk= md_revalidate,
+ .check_events = md_check_events,
};
static int md_thread(void *arg)
@@ -8168,13 +8356,12 @@
return mask;
}
-static const struct file_operations md_seq_fops = {
- .owner = THIS_MODULE,
- .open = md_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
- .poll = mdstat_poll,
+static const struct proc_ops mdstat_proc_ops = {
+ .proc_open = md_seq_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release,
+ .proc_poll = mdstat_poll,
};
int register_md_personality(struct md_personality *p)
@@ -8225,6 +8412,7 @@
int md_setup_cluster(struct mddev *mddev, int nodes)
{
+ int ret;
if (!md_cluster_ops)
request_module("md-cluster");
spin_lock(&pers_lock);
@@ -8236,7 +8424,10 @@
}
spin_unlock(&pers_lock);
- return md_cluster_ops->join(mddev, nodes);
+ ret = md_cluster_ops->join(mddev, nodes);
+ if (!ret)
+ mddev->safemode_delay = 0;
+ return ret;
}
void md_cluster_stop(struct mddev *mddev)
@@ -8256,7 +8447,7 @@
idle = 1;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) {
- struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
+ struct gendisk *disk = rdev->bdev->bd_disk;
curr_events = (int)part_stat_read_accum(&disk->part0, sectors) -
atomic_read(&disk->sync_io);
/* sync IO will cause sync_io to increase before the disk_stats
@@ -8637,7 +8828,7 @@
} else
mddev->curr_resync = 3; /* no longer delayed */
mddev->curr_resync_completed = j;
- sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+ sysfs_notify_dirent_safe(mddev->sysfs_completed);
md_new_event(mddev);
update_time = jiffies;
@@ -8665,7 +8856,7 @@
mddev->recovery_cp = j;
update_time = jiffies;
set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
- sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+ sysfs_notify_dirent_safe(mddev->sysfs_completed);
}
while (j >= mddev->resync_max &&
@@ -8772,7 +8963,7 @@
!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
mddev->curr_resync > 3) {
mddev->curr_resync_completed = mddev->curr_resync;
- sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+ sysfs_notify_dirent_safe(mddev->sysfs_completed);
}
mddev->pers->sync_request(mddev, max_sectors, &skipped);
@@ -8829,7 +9020,7 @@
mddev_unlock(mddev);
if (!mddev_is_clustered(mddev)) {
set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
+ revalidate_disk_size(mddev->gendisk, true);
}
}
@@ -8902,7 +9093,7 @@
}
if (removed && mddev->kobj.sd)
- sysfs_notify(&mddev->kobj, NULL, "degraded");
+ sysfs_notify_dirent_safe(mddev->sysfs_degraded);
if (this && removed)
goto no_add;
@@ -8929,10 +9120,9 @@
rdev->recovery_offset = 0;
}
- if (mddev->pers->
- hot_add_disk(mddev, rdev) == 0) {
- if (sysfs_link_rdev(mddev, rdev))
- /* failure here is OK */;
+ if (mddev->pers->hot_add_disk(mddev, rdev) == 0) {
+ /* failure here is OK */
+ sysfs_link_rdev(mddev, rdev);
if (!test_bit(Journal, &rdev->flags))
spares++;
md_new_event(mddev);
@@ -9186,8 +9376,7 @@
/* success...*/
/* activate any spares */
if (mddev->pers->spare_active(mddev)) {
- sysfs_notify(&mddev->kobj, NULL,
- "degraded");
+ sysfs_notify_dirent_safe(mddev->sysfs_degraded);
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
}
}
@@ -9277,8 +9466,7 @@
if (rv == 0) {
/* Make sure they get written out promptly */
if (test_bit(ExternalBbl, &rdev->flags))
- sysfs_notify(&rdev->kobj, NULL,
- "unacknowledged_bad_blocks");
+ sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks);
sysfs_notify_dirent_safe(rdev->sysfs_state);
set_mask_bits(&mddev->sb_flags, 0,
BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
@@ -9299,7 +9487,7 @@
s += rdev->data_offset;
rv = badblocks_clear(&rdev->badblocks, s, sectors);
if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
- sysfs_notify(&rdev->kobj, NULL, "bad_blocks");
+ sysfs_notify_dirent_safe(rdev->sysfs_badblocks);
return rv;
}
EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
@@ -9343,7 +9531,7 @@
{
pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
- proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
+ proc_create("mdstat", S_IRUGO, NULL, &mdstat_proc_ops);
}
static int __init md_init(void)
@@ -9358,6 +9546,10 @@
if (!md_misc_wq)
goto err_misc_wq;
+ md_rdev_misc_wq = alloc_workqueue("md_rdev_misc", 0, 0);
+ if (!md_rdev_misc_wq)
+ goto err_rdev_misc_wq;
+
if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
goto err_md;
@@ -9379,6 +9571,8 @@
err_mdp:
unregister_blkdev(MD_MAJOR, "md");
err_md:
+ destroy_workqueue(md_rdev_misc_wq);
+err_rdev_misc_wq:
destroy_workqueue(md_misc_wq);
err_misc_wq:
destroy_workqueue(md_wq);
@@ -9526,7 +9720,7 @@
if (rdev->recovery_offset == MaxSector &&
!test_bit(In_sync, &rdev->flags) &&
mddev->pers->spare_active(mddev))
- sysfs_notify(&mddev->kobj, NULL, "degraded");
+ sysfs_notify_dirent_safe(mddev->sysfs_degraded);
put_page(swapout);
return 0;
@@ -9589,7 +9783,7 @@
}
}
-static void autostart_arrays(int part)
+void md_autostart_arrays(int part)
{
struct md_rdev *rdev;
struct detected_devices_node *node_detected_dev;
@@ -9668,6 +9862,7 @@
* destroy_workqueue() below will wait for that to complete.
*/
}
+ destroy_workqueue(md_rdev_misc_wq);
destroy_workqueue(md_misc_wq);
destroy_workqueue(md_wq);
}
@@ -9677,7 +9872,7 @@
static int get_ro(char *buffer, const struct kernel_param *kp)
{
- return sprintf(buffer, "%d", start_readonly);
+ return sprintf(buffer, "%d\n", start_readonly);
}
static int set_ro(const char *val, const struct kernel_param *kp)
{