blob: 761d43829b2b7c8fe1d97c028118d05341d73625 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-or-later
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 md.c : Multiple Devices driver for Linux
4 Copyright (C) 1998, 1999, 2000 Ingo Molnar
5
6 completely rewritten, based on the MD driver code from Marc Zyngier
7
8 Changes:
9
10 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
11 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
12 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
13 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
14 - kmod support by: Cyrus Durgin
15 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
16 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17
18 - lots of fixes and improvements to the RAID1/RAID5 and generic
19 RAID code (such as request based resynchronization):
20
21 Neil Brown <neilb@cse.unsw.edu.au>.
22
23 - persistent bitmap code
24 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000026
27 Errors, Warnings, etc.
28 Please use:
29 pr_crit() for error conditions that risk data loss
30 pr_err() for error conditions that are unexpected, like an IO error
31 or internal inconsistency
32 pr_warn() for error conditions that could have been predicated, like
33 adding a device to an array when it has incompatible metadata
34 pr_info() for every interesting, very rare events, like an array starting
35 or stopping, or resync starting or stopping
36 pr_debug() for everything else.
37
38*/
39
David Brazdil0f672f62019-12-10 10:32:29 +000040#include <linux/sched/mm.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000041#include <linux/sched/signal.h>
42#include <linux/kthread.h>
43#include <linux/blkdev.h>
44#include <linux/badblocks.h>
45#include <linux/sysctl.h>
46#include <linux/seq_file.h>
47#include <linux/fs.h>
48#include <linux/poll.h>
49#include <linux/ctype.h>
50#include <linux/string.h>
51#include <linux/hdreg.h>
52#include <linux/proc_fs.h>
53#include <linux/random.h>
54#include <linux/module.h>
55#include <linux/reboot.h>
56#include <linux/file.h>
57#include <linux/compat.h>
58#include <linux/delay.h>
59#include <linux/raid/md_p.h>
60#include <linux/raid/md_u.h>
61#include <linux/slab.h>
62#include <linux/percpu-refcount.h>
63
64#include <trace/events/block.h>
65#include "md.h"
66#include "md-bitmap.h"
67#include "md-cluster.h"
68
69#ifndef MODULE
70static void autostart_arrays(int part);
71#endif
72
73/* pers_list is a list of registered personalities protected
74 * by pers_lock.
75 * pers_lock does extra service to protect accesses to
76 * mddev->thread when the mutex cannot be held.
77 */
78static LIST_HEAD(pers_list);
79static DEFINE_SPINLOCK(pers_lock);
80
81static struct kobj_type md_ktype;
82
83struct md_cluster_operations *md_cluster_ops;
84EXPORT_SYMBOL(md_cluster_ops);
David Brazdil0f672f62019-12-10 10:32:29 +000085static struct module *md_cluster_mod;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000086
87static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
88static struct workqueue_struct *md_wq;
89static struct workqueue_struct *md_misc_wq;
90
91static int remove_and_add_spares(struct mddev *mddev,
92 struct md_rdev *this);
93static void mddev_detach(struct mddev *mddev);
94
95/*
96 * Default number of read corrections we'll attempt on an rdev
97 * before ejecting it from the array. We divide the read error
98 * count by 2 for every hour elapsed between read errors.
99 */
100#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
101/*
102 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
103 * is 1000 KB/sec, so the extra system load does not show up that much.
104 * Increase it if you want to have more _guaranteed_ speed. Note that
105 * the RAID driver will use the maximum available bandwidth if the IO
106 * subsystem is idle. There is also an 'absolute maximum' reconstruction
107 * speed limit - in case reconstruction slows down your system despite
108 * idle IO detection.
109 *
110 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
111 * or /sys/block/mdX/md/sync_speed_{min,max}
112 */
113
114static int sysctl_speed_limit_min = 1000;
115static int sysctl_speed_limit_max = 200000;
116static inline int speed_min(struct mddev *mddev)
117{
118 return mddev->sync_speed_min ?
119 mddev->sync_speed_min : sysctl_speed_limit_min;
120}
121
122static inline int speed_max(struct mddev *mddev)
123{
124 return mddev->sync_speed_max ?
125 mddev->sync_speed_max : sysctl_speed_limit_max;
126}
127
David Brazdil0f672f62019-12-10 10:32:29 +0000128static int rdev_init_wb(struct md_rdev *rdev)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000129{
David Brazdil0f672f62019-12-10 10:32:29 +0000130 if (rdev->bdev->bd_queue->nr_hw_queues == 1)
131 return 0;
132
133 spin_lock_init(&rdev->wb_list_lock);
134 INIT_LIST_HEAD(&rdev->wb_list);
135 init_waitqueue_head(&rdev->wb_io_wait);
136 set_bit(WBCollisionCheck, &rdev->flags);
137
138 return 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000139}
140
David Brazdil0f672f62019-12-10 10:32:29 +0000141/*
142 * Create wb_info_pool if rdev is the first multi-queue device flaged
143 * with writemostly, also write-behind mode is enabled.
144 */
145void mddev_create_wb_pool(struct mddev *mddev, struct md_rdev *rdev,
146 bool is_suspend)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000147{
David Brazdil0f672f62019-12-10 10:32:29 +0000148 if (mddev->bitmap_info.max_write_behind == 0)
149 return;
150
151 if (!test_bit(WriteMostly, &rdev->flags) || !rdev_init_wb(rdev))
152 return;
153
154 if (mddev->wb_info_pool == NULL) {
155 unsigned int noio_flag;
156
157 if (!is_suspend)
158 mddev_suspend(mddev);
159 noio_flag = memalloc_noio_save();
160 mddev->wb_info_pool = mempool_create_kmalloc_pool(NR_WB_INFOS,
161 sizeof(struct wb_info));
162 memalloc_noio_restore(noio_flag);
163 if (!mddev->wb_info_pool)
164 pr_err("can't alloc memory pool for writemostly\n");
165 if (!is_suspend)
166 mddev_resume(mddev);
167 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000168}
David Brazdil0f672f62019-12-10 10:32:29 +0000169EXPORT_SYMBOL_GPL(mddev_create_wb_pool);
170
171/*
172 * destroy wb_info_pool if rdev is the last device flaged with WBCollisionCheck.
173 */
174static void mddev_destroy_wb_pool(struct mddev *mddev, struct md_rdev *rdev)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000175{
David Brazdil0f672f62019-12-10 10:32:29 +0000176 if (!test_and_clear_bit(WBCollisionCheck, &rdev->flags))
177 return;
178
179 if (mddev->wb_info_pool) {
180 struct md_rdev *temp;
181 int num = 0;
182
183 /*
184 * Check if other rdevs need wb_info_pool.
185 */
186 rdev_for_each(temp, mddev)
187 if (temp != rdev &&
188 test_bit(WBCollisionCheck, &temp->flags))
189 num++;
190 if (!num) {
191 mddev_suspend(rdev->mddev);
192 mempool_destroy(mddev->wb_info_pool);
193 mddev->wb_info_pool = NULL;
194 mddev_resume(rdev->mddev);
195 }
196 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000197}
198
199static struct ctl_table_header *raid_table_header;
200
201static struct ctl_table raid_table[] = {
202 {
203 .procname = "speed_limit_min",
204 .data = &sysctl_speed_limit_min,
205 .maxlen = sizeof(int),
206 .mode = S_IRUGO|S_IWUSR,
207 .proc_handler = proc_dointvec,
208 },
209 {
210 .procname = "speed_limit_max",
211 .data = &sysctl_speed_limit_max,
212 .maxlen = sizeof(int),
213 .mode = S_IRUGO|S_IWUSR,
214 .proc_handler = proc_dointvec,
215 },
216 { }
217};
218
219static struct ctl_table raid_dir_table[] = {
220 {
221 .procname = "raid",
222 .maxlen = 0,
223 .mode = S_IRUGO|S_IXUGO,
224 .child = raid_table,
225 },
226 { }
227};
228
229static struct ctl_table raid_root_table[] = {
230 {
231 .procname = "dev",
232 .maxlen = 0,
233 .mode = 0555,
234 .child = raid_dir_table,
235 },
236 { }
237};
238
239static const struct block_device_operations md_fops;
240
241static int start_readonly;
242
243/*
244 * The original mechanism for creating an md device is to create
245 * a device node in /dev and to open it. This causes races with device-close.
246 * The preferred method is to write to the "new_array" module parameter.
247 * This can avoid races.
248 * Setting create_on_open to false disables the original mechanism
249 * so all the races disappear.
250 */
251static bool create_on_open = true;
252
253struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
254 struct mddev *mddev)
255{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000256 if (!mddev || !bioset_initialized(&mddev->bio_set))
257 return bio_alloc(gfp_mask, nr_iovecs);
258
David Brazdil0f672f62019-12-10 10:32:29 +0000259 return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000260}
261EXPORT_SYMBOL_GPL(bio_alloc_mddev);
262
263static struct bio *md_bio_alloc_sync(struct mddev *mddev)
264{
265 if (!mddev || !bioset_initialized(&mddev->sync_set))
266 return bio_alloc(GFP_NOIO, 1);
267
268 return bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set);
269}
270
271/*
272 * We have a system wide 'event count' that is incremented
273 * on any 'interesting' event, and readers of /proc/mdstat
274 * can use 'poll' or 'select' to find out when the event
275 * count increases.
276 *
277 * Events are:
278 * start array, stop array, error, add device, remove device,
279 * start build, activate spare
280 */
281static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
282static atomic_t md_event_count;
283void md_new_event(struct mddev *mddev)
284{
285 atomic_inc(&md_event_count);
286 wake_up(&md_event_waiters);
287}
288EXPORT_SYMBOL_GPL(md_new_event);
289
290/*
291 * Enables to iterate over all existing md arrays
292 * all_mddevs_lock protects this list.
293 */
294static LIST_HEAD(all_mddevs);
295static DEFINE_SPINLOCK(all_mddevs_lock);
296
297/*
298 * iterates through all used mddevs in the system.
299 * We take care to grab the all_mddevs_lock whenever navigating
300 * the list, and to always hold a refcount when unlocked.
301 * Any code which breaks out of this loop while own
302 * a reference to the current mddev and must mddev_put it.
303 */
304#define for_each_mddev(_mddev,_tmp) \
305 \
306 for (({ spin_lock(&all_mddevs_lock); \
307 _tmp = all_mddevs.next; \
308 _mddev = NULL;}); \
309 ({ if (_tmp != &all_mddevs) \
310 mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
311 spin_unlock(&all_mddevs_lock); \
312 if (_mddev) mddev_put(_mddev); \
313 _mddev = list_entry(_tmp, struct mddev, all_mddevs); \
314 _tmp != &all_mddevs;}); \
315 ({ spin_lock(&all_mddevs_lock); \
316 _tmp = _tmp->next;}) \
317 )
318
319/* Rather than calling directly into the personality make_request function,
320 * IO requests come here first so that we can check if the device is
321 * being suspended pending a reconfiguration.
322 * We hold a refcount over the call to ->make_request. By the time that
323 * call has finished, the bio has been linked into some internal structure
324 * and so is visible to ->quiesce(), so we don't need the refcount any more.
325 */
326static bool is_suspended(struct mddev *mddev, struct bio *bio)
327{
328 if (mddev->suspended)
329 return true;
330 if (bio_data_dir(bio) != WRITE)
331 return false;
332 if (mddev->suspend_lo >= mddev->suspend_hi)
333 return false;
334 if (bio->bi_iter.bi_sector >= mddev->suspend_hi)
335 return false;
336 if (bio_end_sector(bio) < mddev->suspend_lo)
337 return false;
338 return true;
339}
340
341void md_handle_request(struct mddev *mddev, struct bio *bio)
342{
343check_suspended:
344 rcu_read_lock();
345 if (is_suspended(mddev, bio)) {
346 DEFINE_WAIT(__wait);
347 for (;;) {
348 prepare_to_wait(&mddev->sb_wait, &__wait,
349 TASK_UNINTERRUPTIBLE);
350 if (!is_suspended(mddev, bio))
351 break;
352 rcu_read_unlock();
353 schedule();
354 rcu_read_lock();
355 }
356 finish_wait(&mddev->sb_wait, &__wait);
357 }
358 atomic_inc(&mddev->active_io);
359 rcu_read_unlock();
360
361 if (!mddev->pers->make_request(mddev, bio)) {
362 atomic_dec(&mddev->active_io);
363 wake_up(&mddev->sb_wait);
364 goto check_suspended;
365 }
366
367 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
368 wake_up(&mddev->sb_wait);
369}
370EXPORT_SYMBOL(md_handle_request);
371
372static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
373{
374 const int rw = bio_data_dir(bio);
375 const int sgrp = op_stat_group(bio_op(bio));
376 struct mddev *mddev = q->queuedata;
377 unsigned int sectors;
David Brazdil0f672f62019-12-10 10:32:29 +0000378
Olivier Deprez0e641232021-09-23 10:07:05 +0200379 if (mddev == NULL || mddev->pers == NULL) {
380 bio_io_error(bio);
381 return BLK_QC_T_NONE;
382 }
383
David Brazdil0f672f62019-12-10 10:32:29 +0000384 if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
385 bio_io_error(bio);
386 return BLK_QC_T_NONE;
387 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000388
389 blk_queue_split(q, &bio);
390
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000391 if (mddev->ro == 1 && unlikely(rw == WRITE)) {
392 if (bio_sectors(bio) != 0)
393 bio->bi_status = BLK_STS_IOERR;
394 bio_endio(bio);
395 return BLK_QC_T_NONE;
396 }
397
398 /*
399 * save the sectors now since our bio can
400 * go away inside make_request
401 */
402 sectors = bio_sectors(bio);
403 /* bio could be mergeable after passing to underlayer */
404 bio->bi_opf &= ~REQ_NOMERGE;
405
406 md_handle_request(mddev, bio);
407
David Brazdil0f672f62019-12-10 10:32:29 +0000408 part_stat_lock();
409 part_stat_inc(&mddev->gendisk->part0, ios[sgrp]);
410 part_stat_add(&mddev->gendisk->part0, sectors[sgrp], sectors);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000411 part_stat_unlock();
412
413 return BLK_QC_T_NONE;
414}
415
416/* mddev_suspend makes sure no new requests are submitted
417 * to the device, and that any requests that have been submitted
418 * are completely handled.
419 * Once mddev_detach() is called and completes, the module will be
420 * completely unused.
421 */
422void mddev_suspend(struct mddev *mddev)
423{
424 WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
425 lockdep_assert_held(&mddev->reconfig_mutex);
426 if (mddev->suspended++)
427 return;
428 synchronize_rcu();
429 wake_up(&mddev->sb_wait);
430 set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
431 smp_mb__after_atomic();
432 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
433 mddev->pers->quiesce(mddev, 1);
434 clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
435 wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
436
437 del_timer_sync(&mddev->safemode_timer);
438}
439EXPORT_SYMBOL_GPL(mddev_suspend);
440
441void mddev_resume(struct mddev *mddev)
442{
443 lockdep_assert_held(&mddev->reconfig_mutex);
444 if (--mddev->suspended)
445 return;
446 wake_up(&mddev->sb_wait);
447 mddev->pers->quiesce(mddev, 0);
448
449 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
450 md_wakeup_thread(mddev->thread);
451 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
452}
453EXPORT_SYMBOL_GPL(mddev_resume);
454
455int mddev_congested(struct mddev *mddev, int bits)
456{
457 struct md_personality *pers = mddev->pers;
458 int ret = 0;
459
460 rcu_read_lock();
461 if (mddev->suspended)
462 ret = 1;
463 else if (pers && pers->congested)
464 ret = pers->congested(mddev, bits);
465 rcu_read_unlock();
466 return ret;
467}
468EXPORT_SYMBOL_GPL(mddev_congested);
469static int md_congested(void *data, int bits)
470{
471 struct mddev *mddev = data;
472 return mddev_congested(mddev, bits);
473}
474
475/*
476 * Generic flush handling for md
477 */
David Brazdil0f672f62019-12-10 10:32:29 +0000478
479static void md_end_flush(struct bio *bio)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000480{
David Brazdil0f672f62019-12-10 10:32:29 +0000481 struct md_rdev *rdev = bio->bi_private;
482 struct mddev *mddev = rdev->mddev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000483
484 rdev_dec_pending(rdev, mddev);
485
David Brazdil0f672f62019-12-10 10:32:29 +0000486 if (atomic_dec_and_test(&mddev->flush_pending)) {
487 /* The pre-request flush has finished */
488 queue_work(md_wq, &mddev->flush_work);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000489 }
David Brazdil0f672f62019-12-10 10:32:29 +0000490 bio_put(bio);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000491}
492
David Brazdil0f672f62019-12-10 10:32:29 +0000493static void md_submit_flush_data(struct work_struct *ws);
494
495static void submit_flushes(struct work_struct *ws)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000496{
David Brazdil0f672f62019-12-10 10:32:29 +0000497 struct mddev *mddev = container_of(ws, struct mddev, flush_work);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000498 struct md_rdev *rdev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000499
David Brazdil0f672f62019-12-10 10:32:29 +0000500 mddev->start_flush = ktime_get_boottime();
501 INIT_WORK(&mddev->flush_work, md_submit_flush_data);
502 atomic_set(&mddev->flush_pending, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000503 rcu_read_lock();
504 rdev_for_each_rcu(rdev, mddev)
505 if (rdev->raid_disk >= 0 &&
506 !test_bit(Faulty, &rdev->flags)) {
507 /* Take two references, one is dropped
508 * when request finishes, one after
509 * we reclaim rcu_read_lock
510 */
511 struct bio *bi;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000512 atomic_inc(&rdev->nr_pending);
513 atomic_inc(&rdev->nr_pending);
514 rcu_read_unlock();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000515 bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000516 bi->bi_end_io = md_end_flush;
David Brazdil0f672f62019-12-10 10:32:29 +0000517 bi->bi_private = rdev;
518 bio_set_dev(bi, rdev->bdev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000519 bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
David Brazdil0f672f62019-12-10 10:32:29 +0000520 atomic_inc(&mddev->flush_pending);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000521 submit_bio(bi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000522 rcu_read_lock();
523 rdev_dec_pending(rdev, mddev);
524 }
525 rcu_read_unlock();
David Brazdil0f672f62019-12-10 10:32:29 +0000526 if (atomic_dec_and_test(&mddev->flush_pending))
527 queue_work(md_wq, &mddev->flush_work);
528}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000529
David Brazdil0f672f62019-12-10 10:32:29 +0000530static void md_submit_flush_data(struct work_struct *ws)
531{
532 struct mddev *mddev = container_of(ws, struct mddev, flush_work);
533 struct bio *bio = mddev->flush_bio;
534
535 /*
536 * must reset flush_bio before calling into md_handle_request to avoid a
537 * deadlock, because other bios passed md_handle_request suspend check
538 * could wait for this and below md_handle_request could wait for those
539 * bios because of suspend check
540 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200541 spin_lock_irq(&mddev->lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000542 mddev->last_flush = mddev->start_flush;
543 mddev->flush_bio = NULL;
Olivier Deprez0e641232021-09-23 10:07:05 +0200544 spin_unlock_irq(&mddev->lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000545 wake_up(&mddev->sb_wait);
546
547 if (bio->bi_iter.bi_size == 0) {
548 /* an empty barrier - all done */
549 bio_endio(bio);
550 } else {
551 bio->bi_opf &= ~REQ_PREFLUSH;
552 md_handle_request(mddev, bio);
553 }
554}
555
Olivier Deprez0e641232021-09-23 10:07:05 +0200556/*
557 * Manages consolidation of flushes and submitting any flushes needed for
558 * a bio with REQ_PREFLUSH. Returns true if the bio is finished or is
559 * being finished in another context. Returns false if the flushing is
560 * complete but still needs the I/O portion of the bio to be processed.
561 */
562bool md_flush_request(struct mddev *mddev, struct bio *bio)
David Brazdil0f672f62019-12-10 10:32:29 +0000563{
564 ktime_t start = ktime_get_boottime();
565 spin_lock_irq(&mddev->lock);
566 wait_event_lock_irq(mddev->sb_wait,
567 !mddev->flush_bio ||
568 ktime_after(mddev->last_flush, start),
569 mddev->lock);
570 if (!ktime_after(mddev->last_flush, start)) {
571 WARN_ON(mddev->flush_bio);
572 mddev->flush_bio = bio;
573 bio = NULL;
574 }
575 spin_unlock_irq(&mddev->lock);
576
577 if (!bio) {
578 INIT_WORK(&mddev->flush_work, submit_flushes);
579 queue_work(md_wq, &mddev->flush_work);
580 } else {
581 /* flush was performed for some other bio while we waited. */
582 if (bio->bi_iter.bi_size == 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000583 /* an empty barrier - all done */
584 bio_endio(bio);
David Brazdil0f672f62019-12-10 10:32:29 +0000585 else {
586 bio->bi_opf &= ~REQ_PREFLUSH;
Olivier Deprez0e641232021-09-23 10:07:05 +0200587 return false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000588 }
589 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200590 return true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000591}
592EXPORT_SYMBOL(md_flush_request);
593
594static inline struct mddev *mddev_get(struct mddev *mddev)
595{
596 atomic_inc(&mddev->active);
597 return mddev;
598}
599
600static void mddev_delayed_delete(struct work_struct *ws);
601
602static void mddev_put(struct mddev *mddev)
603{
604 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
605 return;
606 if (!mddev->raid_disks && list_empty(&mddev->disks) &&
607 mddev->ctime == 0 && !mddev->hold_active) {
608 /* Array is not configured at all, and not held active,
609 * so destroy it */
610 list_del_init(&mddev->all_mddevs);
611
612 /*
613 * Call queue_work inside the spinlock so that
614 * flush_workqueue() after mddev_find will succeed in waiting
615 * for the work to be done.
616 */
617 INIT_WORK(&mddev->del_work, mddev_delayed_delete);
618 queue_work(md_misc_wq, &mddev->del_work);
619 }
620 spin_unlock(&all_mddevs_lock);
621}
622
623static void md_safemode_timeout(struct timer_list *t);
624
625void mddev_init(struct mddev *mddev)
626{
627 kobject_init(&mddev->kobj, &md_ktype);
628 mutex_init(&mddev->open_mutex);
629 mutex_init(&mddev->reconfig_mutex);
630 mutex_init(&mddev->bitmap_info.mutex);
631 INIT_LIST_HEAD(&mddev->disks);
632 INIT_LIST_HEAD(&mddev->all_mddevs);
633 timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
634 atomic_set(&mddev->active, 1);
635 atomic_set(&mddev->openers, 0);
636 atomic_set(&mddev->active_io, 0);
637 spin_lock_init(&mddev->lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000638 atomic_set(&mddev->flush_pending, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000639 init_waitqueue_head(&mddev->sb_wait);
640 init_waitqueue_head(&mddev->recovery_wait);
641 mddev->reshape_position = MaxSector;
642 mddev->reshape_backwards = 0;
643 mddev->last_sync_action = "none";
644 mddev->resync_min = 0;
645 mddev->resync_max = MaxSector;
646 mddev->level = LEVEL_NONE;
647}
648EXPORT_SYMBOL_GPL(mddev_init);
649
Olivier Deprez0e641232021-09-23 10:07:05 +0200650static struct mddev *mddev_find_locked(dev_t unit)
651{
652 struct mddev *mddev;
653
654 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
655 if (mddev->unit == unit)
656 return mddev;
657
658 return NULL;
659}
660
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000661static struct mddev *mddev_find(dev_t unit)
662{
Olivier Deprez0e641232021-09-23 10:07:05 +0200663 struct mddev *mddev;
664
665 if (MAJOR(unit) != MD_MAJOR)
666 unit &= ~((1 << MdpMinorShift) - 1);
667
668 spin_lock(&all_mddevs_lock);
669 mddev = mddev_find_locked(unit);
670 if (mddev)
671 mddev_get(mddev);
672 spin_unlock(&all_mddevs_lock);
673
674 return mddev;
675}
676
677static struct mddev *mddev_find_or_alloc(dev_t unit)
678{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000679 struct mddev *mddev, *new = NULL;
680
681 if (unit && MAJOR(unit) != MD_MAJOR)
682 unit &= ~((1<<MdpMinorShift)-1);
683
684 retry:
685 spin_lock(&all_mddevs_lock);
686
687 if (unit) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200688 mddev = mddev_find_locked(unit);
689 if (mddev) {
690 mddev_get(mddev);
691 spin_unlock(&all_mddevs_lock);
692 kfree(new);
693 return mddev;
694 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000695
696 if (new) {
697 list_add(&new->all_mddevs, &all_mddevs);
698 spin_unlock(&all_mddevs_lock);
699 new->hold_active = UNTIL_IOCTL;
700 return new;
701 }
702 } else if (new) {
703 /* find an unused unit number */
704 static int next_minor = 512;
705 int start = next_minor;
706 int is_free = 0;
707 int dev = 0;
708 while (!is_free) {
709 dev = MKDEV(MD_MAJOR, next_minor);
710 next_minor++;
711 if (next_minor > MINORMASK)
712 next_minor = 0;
713 if (next_minor == start) {
714 /* Oh dear, all in use. */
715 spin_unlock(&all_mddevs_lock);
716 kfree(new);
717 return NULL;
718 }
719
Olivier Deprez0e641232021-09-23 10:07:05 +0200720 is_free = !mddev_find_locked(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000721 }
722 new->unit = dev;
723 new->md_minor = MINOR(dev);
724 new->hold_active = UNTIL_STOP;
725 list_add(&new->all_mddevs, &all_mddevs);
726 spin_unlock(&all_mddevs_lock);
727 return new;
728 }
729 spin_unlock(&all_mddevs_lock);
730
731 new = kzalloc(sizeof(*new), GFP_KERNEL);
732 if (!new)
733 return NULL;
734
735 new->unit = unit;
736 if (MAJOR(unit) == MD_MAJOR)
737 new->md_minor = MINOR(unit);
738 else
739 new->md_minor = MINOR(unit) >> MdpMinorShift;
740
741 mddev_init(new);
742
743 goto retry;
744}
745
746static struct attribute_group md_redundancy_group;
747
748void mddev_unlock(struct mddev *mddev)
749{
750 if (mddev->to_remove) {
751 /* These cannot be removed under reconfig_mutex as
752 * an access to the files will try to take reconfig_mutex
753 * while holding the file unremovable, which leads to
754 * a deadlock.
755 * So hold set sysfs_active while the remove in happeing,
756 * and anything else which might set ->to_remove or my
757 * otherwise change the sysfs namespace will fail with
758 * -EBUSY if sysfs_active is still set.
759 * We set sysfs_active under reconfig_mutex and elsewhere
760 * test it under the same mutex to ensure its correct value
761 * is seen.
762 */
763 struct attribute_group *to_remove = mddev->to_remove;
764 mddev->to_remove = NULL;
765 mddev->sysfs_active = 1;
766 mutex_unlock(&mddev->reconfig_mutex);
767
768 if (mddev->kobj.sd) {
769 if (to_remove != &md_redundancy_group)
770 sysfs_remove_group(&mddev->kobj, to_remove);
771 if (mddev->pers == NULL ||
772 mddev->pers->sync_request == NULL) {
773 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
774 if (mddev->sysfs_action)
775 sysfs_put(mddev->sysfs_action);
776 mddev->sysfs_action = NULL;
777 }
778 }
779 mddev->sysfs_active = 0;
780 } else
781 mutex_unlock(&mddev->reconfig_mutex);
782
783 /* As we've dropped the mutex we need a spinlock to
784 * make sure the thread doesn't disappear
785 */
786 spin_lock(&pers_lock);
787 md_wakeup_thread(mddev->thread);
788 wake_up(&mddev->sb_wait);
789 spin_unlock(&pers_lock);
790}
791EXPORT_SYMBOL_GPL(mddev_unlock);
792
793struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
794{
795 struct md_rdev *rdev;
796
797 rdev_for_each_rcu(rdev, mddev)
798 if (rdev->desc_nr == nr)
799 return rdev;
800
801 return NULL;
802}
803EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
804
805static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
806{
807 struct md_rdev *rdev;
808
809 rdev_for_each(rdev, mddev)
810 if (rdev->bdev->bd_dev == dev)
811 return rdev;
812
813 return NULL;
814}
815
816struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev)
817{
818 struct md_rdev *rdev;
819
820 rdev_for_each_rcu(rdev, mddev)
821 if (rdev->bdev->bd_dev == dev)
822 return rdev;
823
824 return NULL;
825}
826EXPORT_SYMBOL_GPL(md_find_rdev_rcu);
827
828static struct md_personality *find_pers(int level, char *clevel)
829{
830 struct md_personality *pers;
831 list_for_each_entry(pers, &pers_list, list) {
832 if (level != LEVEL_NONE && pers->level == level)
833 return pers;
834 if (strcmp(pers->name, clevel)==0)
835 return pers;
836 }
837 return NULL;
838}
839
840/* return the offset of the super block in 512byte sectors */
841static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
842{
843 sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
844 return MD_NEW_SIZE_SECTORS(num_sectors);
845}
846
847static int alloc_disk_sb(struct md_rdev *rdev)
848{
849 rdev->sb_page = alloc_page(GFP_KERNEL);
850 if (!rdev->sb_page)
851 return -ENOMEM;
852 return 0;
853}
854
855void md_rdev_clear(struct md_rdev *rdev)
856{
857 if (rdev->sb_page) {
858 put_page(rdev->sb_page);
859 rdev->sb_loaded = 0;
860 rdev->sb_page = NULL;
861 rdev->sb_start = 0;
862 rdev->sectors = 0;
863 }
864 if (rdev->bb_page) {
865 put_page(rdev->bb_page);
866 rdev->bb_page = NULL;
867 }
868 badblocks_exit(&rdev->badblocks);
869}
870EXPORT_SYMBOL_GPL(md_rdev_clear);
871
872static void super_written(struct bio *bio)
873{
874 struct md_rdev *rdev = bio->bi_private;
875 struct mddev *mddev = rdev->mddev;
876
877 if (bio->bi_status) {
878 pr_err("md: super_written gets error=%d\n", bio->bi_status);
879 md_error(mddev, rdev);
880 if (!test_bit(Faulty, &rdev->flags)
881 && (bio->bi_opf & MD_FAILFAST)) {
882 set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
883 set_bit(LastDev, &rdev->flags);
884 }
885 } else
886 clear_bit(LastDev, &rdev->flags);
887
888 if (atomic_dec_and_test(&mddev->pending_writes))
889 wake_up(&mddev->sb_wait);
890 rdev_dec_pending(rdev, mddev);
891 bio_put(bio);
892}
893
894void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
895 sector_t sector, int size, struct page *page)
896{
897 /* write first size bytes of page to sector of rdev
898 * Increment mddev->pending_writes before returning
899 * and decrement it on completion, waking up sb_wait
900 * if zero is reached.
901 * If an error occurred, call md_error
902 */
903 struct bio *bio;
904 int ff = 0;
905
906 if (!page)
907 return;
908
909 if (test_bit(Faulty, &rdev->flags))
910 return;
911
912 bio = md_bio_alloc_sync(mddev);
913
914 atomic_inc(&rdev->nr_pending);
915
916 bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev);
917 bio->bi_iter.bi_sector = sector;
918 bio_add_page(bio, page, size, 0);
919 bio->bi_private = rdev;
920 bio->bi_end_io = super_written;
921
922 if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
923 test_bit(FailFast, &rdev->flags) &&
924 !test_bit(LastDev, &rdev->flags))
925 ff = MD_FAILFAST;
926 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff;
927
928 atomic_inc(&mddev->pending_writes);
929 submit_bio(bio);
930}
931
932int md_super_wait(struct mddev *mddev)
933{
934 /* wait for all superblock writes that were scheduled to complete */
935 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
936 if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
937 return -EAGAIN;
938 return 0;
939}
940
941int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
942 struct page *page, int op, int op_flags, bool metadata_op)
943{
944 struct bio *bio = md_bio_alloc_sync(rdev->mddev);
945 int ret;
946
947 if (metadata_op && rdev->meta_bdev)
948 bio_set_dev(bio, rdev->meta_bdev);
949 else
950 bio_set_dev(bio, rdev->bdev);
951 bio_set_op_attrs(bio, op, op_flags);
952 if (metadata_op)
953 bio->bi_iter.bi_sector = sector + rdev->sb_start;
954 else if (rdev->mddev->reshape_position != MaxSector &&
955 (rdev->mddev->reshape_backwards ==
956 (sector >= rdev->mddev->reshape_position)))
957 bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
958 else
959 bio->bi_iter.bi_sector = sector + rdev->data_offset;
960 bio_add_page(bio, page, size, 0);
961
962 submit_bio_wait(bio);
963
964 ret = !bio->bi_status;
965 bio_put(bio);
966 return ret;
967}
968EXPORT_SYMBOL_GPL(sync_page_io);
969
970static int read_disk_sb(struct md_rdev *rdev, int size)
971{
972 char b[BDEVNAME_SIZE];
973
974 if (rdev->sb_loaded)
975 return 0;
976
977 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
978 goto fail;
979 rdev->sb_loaded = 1;
980 return 0;
981
982fail:
983 pr_err("md: disabled device %s, could not read superblock.\n",
984 bdevname(rdev->bdev,b));
985 return -EINVAL;
986}
987
988static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
989{
990 return sb1->set_uuid0 == sb2->set_uuid0 &&
991 sb1->set_uuid1 == sb2->set_uuid1 &&
992 sb1->set_uuid2 == sb2->set_uuid2 &&
993 sb1->set_uuid3 == sb2->set_uuid3;
994}
995
996static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
997{
998 int ret;
999 mdp_super_t *tmp1, *tmp2;
1000
1001 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
1002 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
1003
1004 if (!tmp1 || !tmp2) {
1005 ret = 0;
1006 goto abort;
1007 }
1008
1009 *tmp1 = *sb1;
1010 *tmp2 = *sb2;
1011
1012 /*
1013 * nr_disks is not constant
1014 */
1015 tmp1->nr_disks = 0;
1016 tmp2->nr_disks = 0;
1017
1018 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
1019abort:
1020 kfree(tmp1);
1021 kfree(tmp2);
1022 return ret;
1023}
1024
1025static u32 md_csum_fold(u32 csum)
1026{
1027 csum = (csum & 0xffff) + (csum >> 16);
1028 return (csum & 0xffff) + (csum >> 16);
1029}
1030
1031static unsigned int calc_sb_csum(mdp_super_t *sb)
1032{
1033 u64 newcsum = 0;
1034 u32 *sb32 = (u32*)sb;
1035 int i;
1036 unsigned int disk_csum, csum;
1037
1038 disk_csum = sb->sb_csum;
1039 sb->sb_csum = 0;
1040
1041 for (i = 0; i < MD_SB_BYTES/4 ; i++)
1042 newcsum += sb32[i];
1043 csum = (newcsum & 0xffffffff) + (newcsum>>32);
1044
1045#ifdef CONFIG_ALPHA
1046 /* This used to use csum_partial, which was wrong for several
1047 * reasons including that different results are returned on
1048 * different architectures. It isn't critical that we get exactly
1049 * the same return value as before (we always csum_fold before
1050 * testing, and that removes any differences). However as we
1051 * know that csum_partial always returned a 16bit value on
1052 * alphas, do a fold to maximise conformity to previous behaviour.
1053 */
1054 sb->sb_csum = md_csum_fold(disk_csum);
1055#else
1056 sb->sb_csum = disk_csum;
1057#endif
1058 return csum;
1059}
1060
1061/*
1062 * Handle superblock details.
1063 * We want to be able to handle multiple superblock formats
1064 * so we have a common interface to them all, and an array of
1065 * different handlers.
1066 * We rely on user-space to write the initial superblock, and support
1067 * reading and updating of superblocks.
1068 * Interface methods are:
1069 * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
1070 * loads and validates a superblock on dev.
1071 * if refdev != NULL, compare superblocks on both devices
1072 * Return:
1073 * 0 - dev has a superblock that is compatible with refdev
1074 * 1 - dev has a superblock that is compatible and newer than refdev
1075 * so dev should be used as the refdev in future
1076 * -EINVAL superblock incompatible or invalid
1077 * -othererror e.g. -EIO
1078 *
1079 * int validate_super(struct mddev *mddev, struct md_rdev *dev)
1080 * Verify that dev is acceptable into mddev.
1081 * The first time, mddev->raid_disks will be 0, and data from
1082 * dev should be merged in. Subsequent calls check that dev
1083 * is new enough. Return 0 or -EINVAL
1084 *
1085 * void sync_super(struct mddev *mddev, struct md_rdev *dev)
1086 * Update the superblock for rdev with data in mddev
1087 * This does not write to disc.
1088 *
1089 */
1090
1091struct super_type {
1092 char *name;
1093 struct module *owner;
1094 int (*load_super)(struct md_rdev *rdev,
1095 struct md_rdev *refdev,
1096 int minor_version);
1097 int (*validate_super)(struct mddev *mddev,
1098 struct md_rdev *rdev);
1099 void (*sync_super)(struct mddev *mddev,
1100 struct md_rdev *rdev);
1101 unsigned long long (*rdev_size_change)(struct md_rdev *rdev,
1102 sector_t num_sectors);
1103 int (*allow_new_offset)(struct md_rdev *rdev,
1104 unsigned long long new_offset);
1105};
1106
1107/*
1108 * Check that the given mddev has no bitmap.
1109 *
1110 * This function is called from the run method of all personalities that do not
1111 * support bitmaps. It prints an error message and returns non-zero if mddev
1112 * has a bitmap. Otherwise, it returns 0.
1113 *
1114 */
1115int md_check_no_bitmap(struct mddev *mddev)
1116{
1117 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
1118 return 0;
1119 pr_warn("%s: bitmaps are not supported for %s\n",
1120 mdname(mddev), mddev->pers->name);
1121 return 1;
1122}
1123EXPORT_SYMBOL(md_check_no_bitmap);
1124
1125/*
1126 * load_super for 0.90.0
1127 */
1128static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1129{
1130 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1131 mdp_super_t *sb;
1132 int ret;
Olivier Deprez0e641232021-09-23 10:07:05 +02001133 bool spare_disk = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001134
1135 /*
1136 * Calculate the position of the superblock (512byte sectors),
1137 * it's at the end of the disk.
1138 *
1139 * It also happens to be a multiple of 4Kb.
1140 */
1141 rdev->sb_start = calc_dev_sboffset(rdev);
1142
1143 ret = read_disk_sb(rdev, MD_SB_BYTES);
1144 if (ret)
1145 return ret;
1146
1147 ret = -EINVAL;
1148
1149 bdevname(rdev->bdev, b);
1150 sb = page_address(rdev->sb_page);
1151
1152 if (sb->md_magic != MD_SB_MAGIC) {
1153 pr_warn("md: invalid raid superblock magic on %s\n", b);
1154 goto abort;
1155 }
1156
1157 if (sb->major_version != 0 ||
1158 sb->minor_version < 90 ||
1159 sb->minor_version > 91) {
1160 pr_warn("Bad version number %d.%d on %s\n",
1161 sb->major_version, sb->minor_version, b);
1162 goto abort;
1163 }
1164
1165 if (sb->raid_disks <= 0)
1166 goto abort;
1167
1168 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
1169 pr_warn("md: invalid superblock checksum on %s\n", b);
1170 goto abort;
1171 }
1172
1173 rdev->preferred_minor = sb->md_minor;
1174 rdev->data_offset = 0;
1175 rdev->new_data_offset = 0;
1176 rdev->sb_size = MD_SB_BYTES;
1177 rdev->badblocks.shift = -1;
1178
1179 if (sb->level == LEVEL_MULTIPATH)
1180 rdev->desc_nr = -1;
1181 else
1182 rdev->desc_nr = sb->this_disk.number;
1183
Olivier Deprez0e641232021-09-23 10:07:05 +02001184 /* not spare disk, or LEVEL_MULTIPATH */
1185 if (sb->level == LEVEL_MULTIPATH ||
1186 (rdev->desc_nr >= 0 &&
1187 rdev->desc_nr < MD_SB_DISKS &&
1188 sb->disks[rdev->desc_nr].state &
1189 ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
1190 spare_disk = false;
1191
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001192 if (!refdev) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001193 if (!spare_disk)
1194 ret = 1;
1195 else
1196 ret = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001197 } else {
1198 __u64 ev1, ev2;
1199 mdp_super_t *refsb = page_address(refdev->sb_page);
1200 if (!md_uuid_equal(refsb, sb)) {
1201 pr_warn("md: %s has different UUID to %s\n",
1202 b, bdevname(refdev->bdev,b2));
1203 goto abort;
1204 }
1205 if (!md_sb_equal(refsb, sb)) {
1206 pr_warn("md: %s has same UUID but different superblock to %s\n",
1207 b, bdevname(refdev->bdev, b2));
1208 goto abort;
1209 }
1210 ev1 = md_event(sb);
1211 ev2 = md_event(refsb);
Olivier Deprez0e641232021-09-23 10:07:05 +02001212
1213 if (!spare_disk && ev1 > ev2)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001214 ret = 1;
1215 else
1216 ret = 0;
1217 }
1218 rdev->sectors = rdev->sb_start;
1219 /* Limit to 4TB as metadata cannot record more than that.
1220 * (not needed for Linear and RAID0 as metadata doesn't
1221 * record this size)
1222 */
David Brazdil0f672f62019-12-10 10:32:29 +00001223 if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001224 rdev->sectors = (sector_t)(2ULL << 32) - 2;
1225
1226 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1227 /* "this cannot possibly happen" ... */
1228 ret = -EINVAL;
1229
1230 abort:
1231 return ret;
1232}
1233
1234/*
1235 * validate_super for 0.90.0
1236 */
1237static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
1238{
1239 mdp_disk_t *desc;
1240 mdp_super_t *sb = page_address(rdev->sb_page);
1241 __u64 ev1 = md_event(sb);
1242
1243 rdev->raid_disk = -1;
1244 clear_bit(Faulty, &rdev->flags);
1245 clear_bit(In_sync, &rdev->flags);
1246 clear_bit(Bitmap_sync, &rdev->flags);
1247 clear_bit(WriteMostly, &rdev->flags);
1248
1249 if (mddev->raid_disks == 0) {
1250 mddev->major_version = 0;
1251 mddev->minor_version = sb->minor_version;
1252 mddev->patch_version = sb->patch_version;
1253 mddev->external = 0;
1254 mddev->chunk_sectors = sb->chunk_size >> 9;
1255 mddev->ctime = sb->ctime;
1256 mddev->utime = sb->utime;
1257 mddev->level = sb->level;
1258 mddev->clevel[0] = 0;
1259 mddev->layout = sb->layout;
1260 mddev->raid_disks = sb->raid_disks;
1261 mddev->dev_sectors = ((sector_t)sb->size) * 2;
1262 mddev->events = ev1;
1263 mddev->bitmap_info.offset = 0;
1264 mddev->bitmap_info.space = 0;
1265 /* bitmap can use 60 K after the 4K superblocks */
1266 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1267 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
1268 mddev->reshape_backwards = 0;
1269
1270 if (mddev->minor_version >= 91) {
1271 mddev->reshape_position = sb->reshape_position;
1272 mddev->delta_disks = sb->delta_disks;
1273 mddev->new_level = sb->new_level;
1274 mddev->new_layout = sb->new_layout;
1275 mddev->new_chunk_sectors = sb->new_chunk >> 9;
1276 if (mddev->delta_disks < 0)
1277 mddev->reshape_backwards = 1;
1278 } else {
1279 mddev->reshape_position = MaxSector;
1280 mddev->delta_disks = 0;
1281 mddev->new_level = mddev->level;
1282 mddev->new_layout = mddev->layout;
1283 mddev->new_chunk_sectors = mddev->chunk_sectors;
1284 }
David Brazdil0f672f62019-12-10 10:32:29 +00001285 if (mddev->level == 0)
1286 mddev->layout = -1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001287
1288 if (sb->state & (1<<MD_SB_CLEAN))
1289 mddev->recovery_cp = MaxSector;
1290 else {
1291 if (sb->events_hi == sb->cp_events_hi &&
1292 sb->events_lo == sb->cp_events_lo) {
1293 mddev->recovery_cp = sb->recovery_cp;
1294 } else
1295 mddev->recovery_cp = 0;
1296 }
1297
1298 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1299 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1300 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1301 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1302
1303 mddev->max_disks = MD_SB_DISKS;
1304
1305 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1306 mddev->bitmap_info.file == NULL) {
1307 mddev->bitmap_info.offset =
1308 mddev->bitmap_info.default_offset;
1309 mddev->bitmap_info.space =
1310 mddev->bitmap_info.default_space;
1311 }
1312
1313 } else if (mddev->pers == NULL) {
1314 /* Insist on good event counter while assembling, except
1315 * for spares (which don't need an event count) */
1316 ++ev1;
1317 if (sb->disks[rdev->desc_nr].state & (
1318 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1319 if (ev1 < mddev->events)
1320 return -EINVAL;
1321 } else if (mddev->bitmap) {
1322 /* if adding to array with a bitmap, then we can accept an
1323 * older device ... but not too old.
1324 */
1325 if (ev1 < mddev->bitmap->events_cleared)
1326 return 0;
1327 if (ev1 < mddev->events)
1328 set_bit(Bitmap_sync, &rdev->flags);
1329 } else {
1330 if (ev1 < mddev->events)
1331 /* just a hot-add of a new device, leave raid_disk at -1 */
1332 return 0;
1333 }
1334
1335 if (mddev->level != LEVEL_MULTIPATH) {
1336 desc = sb->disks + rdev->desc_nr;
1337
1338 if (desc->state & (1<<MD_DISK_FAULTY))
1339 set_bit(Faulty, &rdev->flags);
1340 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1341 desc->raid_disk < mddev->raid_disks */) {
1342 set_bit(In_sync, &rdev->flags);
1343 rdev->raid_disk = desc->raid_disk;
1344 rdev->saved_raid_disk = desc->raid_disk;
1345 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1346 /* active but not in sync implies recovery up to
1347 * reshape position. We don't know exactly where
1348 * that is, so set to zero for now */
1349 if (mddev->minor_version >= 91) {
1350 rdev->recovery_offset = 0;
1351 rdev->raid_disk = desc->raid_disk;
1352 }
1353 }
1354 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1355 set_bit(WriteMostly, &rdev->flags);
1356 if (desc->state & (1<<MD_DISK_FAILFAST))
1357 set_bit(FailFast, &rdev->flags);
1358 } else /* MULTIPATH are always insync */
1359 set_bit(In_sync, &rdev->flags);
1360 return 0;
1361}
1362
1363/*
1364 * sync_super for 0.90.0
1365 */
1366static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
1367{
1368 mdp_super_t *sb;
1369 struct md_rdev *rdev2;
1370 int next_spare = mddev->raid_disks;
1371
1372 /* make rdev->sb match mddev data..
1373 *
1374 * 1/ zero out disks
1375 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1376 * 3/ any empty disks < next_spare become removed
1377 *
1378 * disks[0] gets initialised to REMOVED because
1379 * we cannot be sure from other fields if it has
1380 * been initialised or not.
1381 */
1382 int i;
1383 int active=0, working=0,failed=0,spare=0,nr_disks=0;
1384
1385 rdev->sb_size = MD_SB_BYTES;
1386
1387 sb = page_address(rdev->sb_page);
1388
1389 memset(sb, 0, sizeof(*sb));
1390
1391 sb->md_magic = MD_SB_MAGIC;
1392 sb->major_version = mddev->major_version;
1393 sb->patch_version = mddev->patch_version;
1394 sb->gvalid_words = 0; /* ignored */
1395 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1396 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1397 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1398 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1399
1400 sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
1401 sb->level = mddev->level;
1402 sb->size = mddev->dev_sectors / 2;
1403 sb->raid_disks = mddev->raid_disks;
1404 sb->md_minor = mddev->md_minor;
1405 sb->not_persistent = 0;
1406 sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
1407 sb->state = 0;
1408 sb->events_hi = (mddev->events>>32);
1409 sb->events_lo = (u32)mddev->events;
1410
1411 if (mddev->reshape_position == MaxSector)
1412 sb->minor_version = 90;
1413 else {
1414 sb->minor_version = 91;
1415 sb->reshape_position = mddev->reshape_position;
1416 sb->new_level = mddev->new_level;
1417 sb->delta_disks = mddev->delta_disks;
1418 sb->new_layout = mddev->new_layout;
1419 sb->new_chunk = mddev->new_chunk_sectors << 9;
1420 }
1421 mddev->minor_version = sb->minor_version;
1422 if (mddev->in_sync)
1423 {
1424 sb->recovery_cp = mddev->recovery_cp;
1425 sb->cp_events_hi = (mddev->events>>32);
1426 sb->cp_events_lo = (u32)mddev->events;
1427 if (mddev->recovery_cp == MaxSector)
1428 sb->state = (1<< MD_SB_CLEAN);
1429 } else
1430 sb->recovery_cp = 0;
1431
1432 sb->layout = mddev->layout;
1433 sb->chunk_size = mddev->chunk_sectors << 9;
1434
1435 if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1436 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1437
1438 sb->disks[0].state = (1<<MD_DISK_REMOVED);
1439 rdev_for_each(rdev2, mddev) {
1440 mdp_disk_t *d;
1441 int desc_nr;
1442 int is_active = test_bit(In_sync, &rdev2->flags);
1443
1444 if (rdev2->raid_disk >= 0 &&
1445 sb->minor_version >= 91)
1446 /* we have nowhere to store the recovery_offset,
1447 * but if it is not below the reshape_position,
1448 * we can piggy-back on that.
1449 */
1450 is_active = 1;
1451 if (rdev2->raid_disk < 0 ||
1452 test_bit(Faulty, &rdev2->flags))
1453 is_active = 0;
1454 if (is_active)
1455 desc_nr = rdev2->raid_disk;
1456 else
1457 desc_nr = next_spare++;
1458 rdev2->desc_nr = desc_nr;
1459 d = &sb->disks[rdev2->desc_nr];
1460 nr_disks++;
1461 d->number = rdev2->desc_nr;
1462 d->major = MAJOR(rdev2->bdev->bd_dev);
1463 d->minor = MINOR(rdev2->bdev->bd_dev);
1464 if (is_active)
1465 d->raid_disk = rdev2->raid_disk;
1466 else
1467 d->raid_disk = rdev2->desc_nr; /* compatibility */
1468 if (test_bit(Faulty, &rdev2->flags))
1469 d->state = (1<<MD_DISK_FAULTY);
1470 else if (is_active) {
1471 d->state = (1<<MD_DISK_ACTIVE);
1472 if (test_bit(In_sync, &rdev2->flags))
1473 d->state |= (1<<MD_DISK_SYNC);
1474 active++;
1475 working++;
1476 } else {
1477 d->state = 0;
1478 spare++;
1479 working++;
1480 }
1481 if (test_bit(WriteMostly, &rdev2->flags))
1482 d->state |= (1<<MD_DISK_WRITEMOSTLY);
1483 if (test_bit(FailFast, &rdev2->flags))
1484 d->state |= (1<<MD_DISK_FAILFAST);
1485 }
1486 /* now set the "removed" and "faulty" bits on any missing devices */
1487 for (i=0 ; i < mddev->raid_disks ; i++) {
1488 mdp_disk_t *d = &sb->disks[i];
1489 if (d->state == 0 && d->number == 0) {
1490 d->number = i;
1491 d->raid_disk = i;
1492 d->state = (1<<MD_DISK_REMOVED);
1493 d->state |= (1<<MD_DISK_FAULTY);
1494 failed++;
1495 }
1496 }
1497 sb->nr_disks = nr_disks;
1498 sb->active_disks = active;
1499 sb->working_disks = working;
1500 sb->failed_disks = failed;
1501 sb->spare_disks = spare;
1502
1503 sb->this_disk = sb->disks[rdev->desc_nr];
1504 sb->sb_csum = calc_sb_csum(sb);
1505}
1506
1507/*
1508 * rdev_size_change for 0.90.0
1509 */
1510static unsigned long long
1511super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1512{
1513 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1514 return 0; /* component must fit device */
1515 if (rdev->mddev->bitmap_info.offset)
1516 return 0; /* can't move bitmap */
1517 rdev->sb_start = calc_dev_sboffset(rdev);
1518 if (!num_sectors || num_sectors > rdev->sb_start)
1519 num_sectors = rdev->sb_start;
1520 /* Limit to 4TB as metadata cannot record more than that.
1521 * 4TB == 2^32 KB, or 2*2^32 sectors.
1522 */
David Brazdil0f672f62019-12-10 10:32:29 +00001523 if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001524 num_sectors = (sector_t)(2ULL << 32) - 2;
1525 do {
1526 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1527 rdev->sb_page);
1528 } while (md_super_wait(rdev->mddev) < 0);
1529 return num_sectors;
1530}
1531
1532static int
1533super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1534{
1535 /* non-zero offset changes not possible with v0.90 */
1536 return new_offset == 0;
1537}
1538
1539/*
1540 * version 1 superblock
1541 */
1542
1543static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
1544{
1545 __le32 disk_csum;
1546 u32 csum;
1547 unsigned long long newcsum;
1548 int size = 256 + le32_to_cpu(sb->max_dev)*2;
1549 __le32 *isuper = (__le32*)sb;
1550
1551 disk_csum = sb->sb_csum;
1552 sb->sb_csum = 0;
1553 newcsum = 0;
1554 for (; size >= 4; size -= 4)
1555 newcsum += le32_to_cpu(*isuper++);
1556
1557 if (size == 2)
1558 newcsum += le16_to_cpu(*(__le16*) isuper);
1559
1560 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1561 sb->sb_csum = disk_csum;
1562 return cpu_to_le32(csum);
1563}
1564
1565static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1566{
1567 struct mdp_superblock_1 *sb;
1568 int ret;
1569 sector_t sb_start;
1570 sector_t sectors;
1571 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1572 int bmask;
Olivier Deprez0e641232021-09-23 10:07:05 +02001573 bool spare_disk = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001574
1575 /*
1576 * Calculate the position of the superblock in 512byte sectors.
1577 * It is always aligned to a 4K boundary and
1578 * depeding on minor_version, it can be:
1579 * 0: At least 8K, but less than 12K, from end of device
1580 * 1: At start of device
1581 * 2: 4K from start of device.
1582 */
1583 switch(minor_version) {
1584 case 0:
1585 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
1586 sb_start -= 8*2;
1587 sb_start &= ~(sector_t)(4*2-1);
1588 break;
1589 case 1:
1590 sb_start = 0;
1591 break;
1592 case 2:
1593 sb_start = 8;
1594 break;
1595 default:
1596 return -EINVAL;
1597 }
1598 rdev->sb_start = sb_start;
1599
1600 /* superblock is rarely larger than 1K, but it can be larger,
1601 * and it is safe to read 4k, so we do that
1602 */
1603 ret = read_disk_sb(rdev, 4096);
1604 if (ret) return ret;
1605
1606 sb = page_address(rdev->sb_page);
1607
1608 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1609 sb->major_version != cpu_to_le32(1) ||
1610 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1611 le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1612 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1613 return -EINVAL;
1614
1615 if (calc_sb_1_csum(sb) != sb->sb_csum) {
1616 pr_warn("md: invalid superblock checksum on %s\n",
1617 bdevname(rdev->bdev,b));
1618 return -EINVAL;
1619 }
1620 if (le64_to_cpu(sb->data_size) < 10) {
1621 pr_warn("md: data_size too small on %s\n",
1622 bdevname(rdev->bdev,b));
1623 return -EINVAL;
1624 }
1625 if (sb->pad0 ||
1626 sb->pad3[0] ||
1627 memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
1628 /* Some padding is non-zero, might be a new feature */
1629 return -EINVAL;
1630
1631 rdev->preferred_minor = 0xffff;
1632 rdev->data_offset = le64_to_cpu(sb->data_offset);
1633 rdev->new_data_offset = rdev->data_offset;
1634 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1635 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1636 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
1637 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1638
1639 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1640 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1641 if (rdev->sb_size & bmask)
1642 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1643
1644 if (minor_version
1645 && rdev->data_offset < sb_start + (rdev->sb_size/512))
1646 return -EINVAL;
1647 if (minor_version
1648 && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1649 return -EINVAL;
1650
1651 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1652 rdev->desc_nr = -1;
1653 else
1654 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1655
1656 if (!rdev->bb_page) {
1657 rdev->bb_page = alloc_page(GFP_KERNEL);
1658 if (!rdev->bb_page)
1659 return -ENOMEM;
1660 }
1661 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1662 rdev->badblocks.count == 0) {
1663 /* need to load the bad block list.
1664 * Currently we limit it to one page.
1665 */
1666 s32 offset;
1667 sector_t bb_sector;
David Brazdil0f672f62019-12-10 10:32:29 +00001668 __le64 *bbp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001669 int i;
1670 int sectors = le16_to_cpu(sb->bblog_size);
1671 if (sectors > (PAGE_SIZE / 512))
1672 return -EINVAL;
1673 offset = le32_to_cpu(sb->bblog_offset);
1674 if (offset == 0)
1675 return -EINVAL;
1676 bb_sector = (long long)offset;
1677 if (!sync_page_io(rdev, bb_sector, sectors << 9,
1678 rdev->bb_page, REQ_OP_READ, 0, true))
1679 return -EIO;
David Brazdil0f672f62019-12-10 10:32:29 +00001680 bbp = (__le64 *)page_address(rdev->bb_page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001681 rdev->badblocks.shift = sb->bblog_shift;
1682 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1683 u64 bb = le64_to_cpu(*bbp);
1684 int count = bb & (0x3ff);
1685 u64 sector = bb >> 10;
1686 sector <<= sb->bblog_shift;
1687 count <<= sb->bblog_shift;
1688 if (bb + 1 == 0)
1689 break;
1690 if (badblocks_set(&rdev->badblocks, sector, count, 1))
1691 return -EINVAL;
1692 }
1693 } else if (sb->bblog_offset != 0)
1694 rdev->badblocks.shift = 0;
1695
1696 if ((le32_to_cpu(sb->feature_map) &
1697 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) {
1698 rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset);
1699 rdev->ppl.size = le16_to_cpu(sb->ppl.size);
1700 rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset;
1701 }
1702
David Brazdil0f672f62019-12-10 10:32:29 +00001703 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) &&
1704 sb->level != 0)
1705 return -EINVAL;
1706
Olivier Deprez0e641232021-09-23 10:07:05 +02001707 /* not spare disk, or LEVEL_MULTIPATH */
1708 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) ||
1709 (rdev->desc_nr >= 0 &&
1710 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1711 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1712 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)))
1713 spare_disk = false;
1714
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001715 if (!refdev) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001716 if (!spare_disk)
1717 ret = 1;
1718 else
1719 ret = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001720 } else {
1721 __u64 ev1, ev2;
1722 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
1723
1724 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1725 sb->level != refsb->level ||
1726 sb->layout != refsb->layout ||
1727 sb->chunksize != refsb->chunksize) {
1728 pr_warn("md: %s has strangely different superblock to %s\n",
1729 bdevname(rdev->bdev,b),
1730 bdevname(refdev->bdev,b2));
1731 return -EINVAL;
1732 }
1733 ev1 = le64_to_cpu(sb->events);
1734 ev2 = le64_to_cpu(refsb->events);
1735
Olivier Deprez0e641232021-09-23 10:07:05 +02001736 if (!spare_disk && ev1 > ev2)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001737 ret = 1;
1738 else
1739 ret = 0;
1740 }
1741 if (minor_version) {
1742 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
1743 sectors -= rdev->data_offset;
1744 } else
1745 sectors = rdev->sb_start;
1746 if (sectors < le64_to_cpu(sb->data_size))
1747 return -EINVAL;
1748 rdev->sectors = le64_to_cpu(sb->data_size);
1749 return ret;
1750}
1751
1752static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
1753{
1754 struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1755 __u64 ev1 = le64_to_cpu(sb->events);
1756
1757 rdev->raid_disk = -1;
1758 clear_bit(Faulty, &rdev->flags);
1759 clear_bit(In_sync, &rdev->flags);
1760 clear_bit(Bitmap_sync, &rdev->flags);
1761 clear_bit(WriteMostly, &rdev->flags);
1762
1763 if (mddev->raid_disks == 0) {
1764 mddev->major_version = 1;
1765 mddev->patch_version = 0;
1766 mddev->external = 0;
1767 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1768 mddev->ctime = le64_to_cpu(sb->ctime);
1769 mddev->utime = le64_to_cpu(sb->utime);
1770 mddev->level = le32_to_cpu(sb->level);
1771 mddev->clevel[0] = 0;
1772 mddev->layout = le32_to_cpu(sb->layout);
1773 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1774 mddev->dev_sectors = le64_to_cpu(sb->size);
1775 mddev->events = ev1;
1776 mddev->bitmap_info.offset = 0;
1777 mddev->bitmap_info.space = 0;
1778 /* Default location for bitmap is 1K after superblock
1779 * using 3K - total of 4K
1780 */
1781 mddev->bitmap_info.default_offset = 1024 >> 9;
1782 mddev->bitmap_info.default_space = (4096-1024) >> 9;
1783 mddev->reshape_backwards = 0;
1784
1785 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1786 memcpy(mddev->uuid, sb->set_uuid, 16);
1787
1788 mddev->max_disks = (4096-256)/2;
1789
1790 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1791 mddev->bitmap_info.file == NULL) {
1792 mddev->bitmap_info.offset =
1793 (__s32)le32_to_cpu(sb->bitmap_offset);
1794 /* Metadata doesn't record how much space is available.
1795 * For 1.0, we assume we can use up to the superblock
1796 * if before, else to 4K beyond superblock.
1797 * For others, assume no change is possible.
1798 */
1799 if (mddev->minor_version > 0)
1800 mddev->bitmap_info.space = 0;
1801 else if (mddev->bitmap_info.offset > 0)
1802 mddev->bitmap_info.space =
1803 8 - mddev->bitmap_info.offset;
1804 else
1805 mddev->bitmap_info.space =
1806 -mddev->bitmap_info.offset;
1807 }
1808
1809 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1810 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1811 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1812 mddev->new_level = le32_to_cpu(sb->new_level);
1813 mddev->new_layout = le32_to_cpu(sb->new_layout);
1814 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1815 if (mddev->delta_disks < 0 ||
1816 (mddev->delta_disks == 0 &&
1817 (le32_to_cpu(sb->feature_map)
1818 & MD_FEATURE_RESHAPE_BACKWARDS)))
1819 mddev->reshape_backwards = 1;
1820 } else {
1821 mddev->reshape_position = MaxSector;
1822 mddev->delta_disks = 0;
1823 mddev->new_level = mddev->level;
1824 mddev->new_layout = mddev->layout;
1825 mddev->new_chunk_sectors = mddev->chunk_sectors;
1826 }
1827
David Brazdil0f672f62019-12-10 10:32:29 +00001828 if (mddev->level == 0 &&
1829 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT))
1830 mddev->layout = -1;
1831
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001832 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
1833 set_bit(MD_HAS_JOURNAL, &mddev->flags);
1834
1835 if (le32_to_cpu(sb->feature_map) &
1836 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) {
1837 if (le32_to_cpu(sb->feature_map) &
1838 (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL))
1839 return -EINVAL;
1840 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) &&
1841 (le32_to_cpu(sb->feature_map) &
1842 MD_FEATURE_MULTIPLE_PPLS))
1843 return -EINVAL;
1844 set_bit(MD_HAS_PPL, &mddev->flags);
1845 }
1846 } else if (mddev->pers == NULL) {
1847 /* Insist of good event counter while assembling, except for
1848 * spares (which don't need an event count) */
1849 ++ev1;
1850 if (rdev->desc_nr >= 0 &&
1851 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1852 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1853 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
1854 if (ev1 < mddev->events)
1855 return -EINVAL;
1856 } else if (mddev->bitmap) {
1857 /* If adding to array with a bitmap, then we can accept an
1858 * older device, but not too old.
1859 */
1860 if (ev1 < mddev->bitmap->events_cleared)
1861 return 0;
1862 if (ev1 < mddev->events)
1863 set_bit(Bitmap_sync, &rdev->flags);
1864 } else {
1865 if (ev1 < mddev->events)
1866 /* just a hot-add of a new device, leave raid_disk at -1 */
1867 return 0;
1868 }
1869 if (mddev->level != LEVEL_MULTIPATH) {
1870 int role;
1871 if (rdev->desc_nr < 0 ||
1872 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1873 role = MD_DISK_ROLE_SPARE;
1874 rdev->desc_nr = -1;
1875 } else
1876 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1877 switch(role) {
1878 case MD_DISK_ROLE_SPARE: /* spare */
1879 break;
1880 case MD_DISK_ROLE_FAULTY: /* faulty */
1881 set_bit(Faulty, &rdev->flags);
1882 break;
1883 case MD_DISK_ROLE_JOURNAL: /* journal device */
1884 if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
1885 /* journal device without journal feature */
1886 pr_warn("md: journal device provided without journal feature, ignoring the device\n");
1887 return -EINVAL;
1888 }
1889 set_bit(Journal, &rdev->flags);
1890 rdev->journal_tail = le64_to_cpu(sb->journal_tail);
1891 rdev->raid_disk = 0;
1892 break;
1893 default:
1894 rdev->saved_raid_disk = role;
1895 if ((le32_to_cpu(sb->feature_map) &
1896 MD_FEATURE_RECOVERY_OFFSET)) {
1897 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1898 if (!(le32_to_cpu(sb->feature_map) &
1899 MD_FEATURE_RECOVERY_BITMAP))
1900 rdev->saved_raid_disk = -1;
David Brazdil0f672f62019-12-10 10:32:29 +00001901 } else {
1902 /*
1903 * If the array is FROZEN, then the device can't
1904 * be in_sync with rest of array.
1905 */
1906 if (!test_bit(MD_RECOVERY_FROZEN,
1907 &mddev->recovery))
1908 set_bit(In_sync, &rdev->flags);
1909 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001910 rdev->raid_disk = role;
1911 break;
1912 }
1913 if (sb->devflags & WriteMostly1)
1914 set_bit(WriteMostly, &rdev->flags);
1915 if (sb->devflags & FailFast1)
1916 set_bit(FailFast, &rdev->flags);
1917 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
1918 set_bit(Replacement, &rdev->flags);
1919 } else /* MULTIPATH are always insync */
1920 set_bit(In_sync, &rdev->flags);
1921
1922 return 0;
1923}
1924
1925static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
1926{
1927 struct mdp_superblock_1 *sb;
1928 struct md_rdev *rdev2;
1929 int max_dev, i;
1930 /* make rdev->sb match mddev and rdev data. */
1931
1932 sb = page_address(rdev->sb_page);
1933
1934 sb->feature_map = 0;
1935 sb->pad0 = 0;
1936 sb->recovery_offset = cpu_to_le64(0);
1937 memset(sb->pad3, 0, sizeof(sb->pad3));
1938
1939 sb->utime = cpu_to_le64((__u64)mddev->utime);
1940 sb->events = cpu_to_le64(mddev->events);
1941 if (mddev->in_sync)
1942 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1943 else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
1944 sb->resync_offset = cpu_to_le64(MaxSector);
1945 else
1946 sb->resync_offset = cpu_to_le64(0);
1947
1948 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1949
1950 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1951 sb->size = cpu_to_le64(mddev->dev_sectors);
1952 sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1953 sb->level = cpu_to_le32(mddev->level);
1954 sb->layout = cpu_to_le32(mddev->layout);
1955 if (test_bit(FailFast, &rdev->flags))
1956 sb->devflags |= FailFast1;
1957 else
1958 sb->devflags &= ~FailFast1;
1959
1960 if (test_bit(WriteMostly, &rdev->flags))
1961 sb->devflags |= WriteMostly1;
1962 else
1963 sb->devflags &= ~WriteMostly1;
1964 sb->data_offset = cpu_to_le64(rdev->data_offset);
1965 sb->data_size = cpu_to_le64(rdev->sectors);
1966
1967 if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
1968 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1969 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1970 }
1971
1972 if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
1973 !test_bit(In_sync, &rdev->flags)) {
1974 sb->feature_map |=
1975 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1976 sb->recovery_offset =
1977 cpu_to_le64(rdev->recovery_offset);
1978 if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
1979 sb->feature_map |=
1980 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
1981 }
1982 /* Note: recovery_offset and journal_tail share space */
1983 if (test_bit(Journal, &rdev->flags))
1984 sb->journal_tail = cpu_to_le64(rdev->journal_tail);
1985 if (test_bit(Replacement, &rdev->flags))
1986 sb->feature_map |=
1987 cpu_to_le32(MD_FEATURE_REPLACEMENT);
1988
1989 if (mddev->reshape_position != MaxSector) {
1990 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1991 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1992 sb->new_layout = cpu_to_le32(mddev->new_layout);
1993 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1994 sb->new_level = cpu_to_le32(mddev->new_level);
1995 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1996 if (mddev->delta_disks == 0 &&
1997 mddev->reshape_backwards)
1998 sb->feature_map
1999 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
2000 if (rdev->new_data_offset != rdev->data_offset) {
2001 sb->feature_map
2002 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
2003 sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
2004 - rdev->data_offset));
2005 }
2006 }
2007
2008 if (mddev_is_clustered(mddev))
2009 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
2010
2011 if (rdev->badblocks.count == 0)
2012 /* Nothing to do for bad blocks*/ ;
2013 else if (sb->bblog_offset == 0)
2014 /* Cannot record bad blocks on this device */
2015 md_error(mddev, rdev);
2016 else {
2017 struct badblocks *bb = &rdev->badblocks;
David Brazdil0f672f62019-12-10 10:32:29 +00002018 __le64 *bbp = (__le64 *)page_address(rdev->bb_page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002019 u64 *p = bb->page;
2020 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
2021 if (bb->changed) {
2022 unsigned seq;
2023
2024retry:
2025 seq = read_seqbegin(&bb->lock);
2026
2027 memset(bbp, 0xff, PAGE_SIZE);
2028
2029 for (i = 0 ; i < bb->count ; i++) {
2030 u64 internal_bb = p[i];
2031 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
2032 | BB_LEN(internal_bb));
2033 bbp[i] = cpu_to_le64(store_bb);
2034 }
2035 bb->changed = 0;
2036 if (read_seqretry(&bb->lock, seq))
2037 goto retry;
2038
2039 bb->sector = (rdev->sb_start +
2040 (int)le32_to_cpu(sb->bblog_offset));
2041 bb->size = le16_to_cpu(sb->bblog_size);
2042 }
2043 }
2044
2045 max_dev = 0;
2046 rdev_for_each(rdev2, mddev)
2047 if (rdev2->desc_nr+1 > max_dev)
2048 max_dev = rdev2->desc_nr+1;
2049
2050 if (max_dev > le32_to_cpu(sb->max_dev)) {
2051 int bmask;
2052 sb->max_dev = cpu_to_le32(max_dev);
2053 rdev->sb_size = max_dev * 2 + 256;
2054 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
2055 if (rdev->sb_size & bmask)
2056 rdev->sb_size = (rdev->sb_size | bmask) + 1;
2057 } else
2058 max_dev = le32_to_cpu(sb->max_dev);
2059
2060 for (i=0; i<max_dev;i++)
2061 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
2062
2063 if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
2064 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
2065
2066 if (test_bit(MD_HAS_PPL, &mddev->flags)) {
2067 if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags))
2068 sb->feature_map |=
2069 cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS);
2070 else
2071 sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL);
2072 sb->ppl.offset = cpu_to_le16(rdev->ppl.offset);
2073 sb->ppl.size = cpu_to_le16(rdev->ppl.size);
2074 }
2075
2076 rdev_for_each(rdev2, mddev) {
2077 i = rdev2->desc_nr;
2078 if (test_bit(Faulty, &rdev2->flags))
2079 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
2080 else if (test_bit(In_sync, &rdev2->flags))
2081 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
2082 else if (test_bit(Journal, &rdev2->flags))
2083 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
2084 else if (rdev2->raid_disk >= 0)
2085 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
2086 else
2087 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
2088 }
2089
2090 sb->sb_csum = calc_sb_1_csum(sb);
2091}
2092
2093static unsigned long long
2094super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
2095{
2096 struct mdp_superblock_1 *sb;
2097 sector_t max_sectors;
2098 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
2099 return 0; /* component must fit device */
2100 if (rdev->data_offset != rdev->new_data_offset)
2101 return 0; /* too confusing */
2102 if (rdev->sb_start < rdev->data_offset) {
2103 /* minor versions 1 and 2; superblock before data */
2104 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
2105 max_sectors -= rdev->data_offset;
2106 if (!num_sectors || num_sectors > max_sectors)
2107 num_sectors = max_sectors;
2108 } else if (rdev->mddev->bitmap_info.offset) {
2109 /* minor version 0 with bitmap we can't move */
2110 return 0;
2111 } else {
2112 /* minor version 0; superblock after data */
2113 sector_t sb_start;
2114 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
2115 sb_start &= ~(sector_t)(4*2 - 1);
2116 max_sectors = rdev->sectors + sb_start - rdev->sb_start;
2117 if (!num_sectors || num_sectors > max_sectors)
2118 num_sectors = max_sectors;
2119 rdev->sb_start = sb_start;
2120 }
2121 sb = page_address(rdev->sb_page);
2122 sb->data_size = cpu_to_le64(num_sectors);
2123 sb->super_offset = cpu_to_le64(rdev->sb_start);
2124 sb->sb_csum = calc_sb_1_csum(sb);
2125 do {
2126 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
2127 rdev->sb_page);
2128 } while (md_super_wait(rdev->mddev) < 0);
2129 return num_sectors;
2130
2131}
2132
2133static int
2134super_1_allow_new_offset(struct md_rdev *rdev,
2135 unsigned long long new_offset)
2136{
2137 /* All necessary checks on new >= old have been done */
2138 struct bitmap *bitmap;
2139 if (new_offset >= rdev->data_offset)
2140 return 1;
2141
2142 /* with 1.0 metadata, there is no metadata to tread on
2143 * so we can always move back */
2144 if (rdev->mddev->minor_version == 0)
2145 return 1;
2146
2147 /* otherwise we must be sure not to step on
2148 * any metadata, so stay:
2149 * 36K beyond start of superblock
2150 * beyond end of badblocks
2151 * beyond write-intent bitmap
2152 */
2153 if (rdev->sb_start + (32+4)*2 > new_offset)
2154 return 0;
2155 bitmap = rdev->mddev->bitmap;
2156 if (bitmap && !rdev->mddev->bitmap_info.file &&
2157 rdev->sb_start + rdev->mddev->bitmap_info.offset +
2158 bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
2159 return 0;
2160 if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
2161 return 0;
2162
2163 return 1;
2164}
2165
2166static struct super_type super_types[] = {
2167 [0] = {
2168 .name = "0.90.0",
2169 .owner = THIS_MODULE,
2170 .load_super = super_90_load,
2171 .validate_super = super_90_validate,
2172 .sync_super = super_90_sync,
2173 .rdev_size_change = super_90_rdev_size_change,
2174 .allow_new_offset = super_90_allow_new_offset,
2175 },
2176 [1] = {
2177 .name = "md-1",
2178 .owner = THIS_MODULE,
2179 .load_super = super_1_load,
2180 .validate_super = super_1_validate,
2181 .sync_super = super_1_sync,
2182 .rdev_size_change = super_1_rdev_size_change,
2183 .allow_new_offset = super_1_allow_new_offset,
2184 },
2185};
2186
2187static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
2188{
2189 if (mddev->sync_super) {
2190 mddev->sync_super(mddev, rdev);
2191 return;
2192 }
2193
2194 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
2195
2196 super_types[mddev->major_version].sync_super(mddev, rdev);
2197}
2198
2199static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
2200{
2201 struct md_rdev *rdev, *rdev2;
2202
2203 rcu_read_lock();
2204 rdev_for_each_rcu(rdev, mddev1) {
2205 if (test_bit(Faulty, &rdev->flags) ||
2206 test_bit(Journal, &rdev->flags) ||
2207 rdev->raid_disk == -1)
2208 continue;
2209 rdev_for_each_rcu(rdev2, mddev2) {
2210 if (test_bit(Faulty, &rdev2->flags) ||
2211 test_bit(Journal, &rdev2->flags) ||
2212 rdev2->raid_disk == -1)
2213 continue;
2214 if (rdev->bdev->bd_contains ==
2215 rdev2->bdev->bd_contains) {
2216 rcu_read_unlock();
2217 return 1;
2218 }
2219 }
2220 }
2221 rcu_read_unlock();
2222 return 0;
2223}
2224
2225static LIST_HEAD(pending_raid_disks);
2226
2227/*
2228 * Try to register data integrity profile for an mddev
2229 *
2230 * This is called when an array is started and after a disk has been kicked
2231 * from the array. It only succeeds if all working and active component devices
2232 * are integrity capable with matching profiles.
2233 */
2234int md_integrity_register(struct mddev *mddev)
2235{
2236 struct md_rdev *rdev, *reference = NULL;
2237
2238 if (list_empty(&mddev->disks))
2239 return 0; /* nothing to do */
2240 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
2241 return 0; /* shouldn't register, or already is */
2242 rdev_for_each(rdev, mddev) {
2243 /* skip spares and non-functional disks */
2244 if (test_bit(Faulty, &rdev->flags))
2245 continue;
2246 if (rdev->raid_disk < 0)
2247 continue;
2248 if (!reference) {
2249 /* Use the first rdev as the reference */
2250 reference = rdev;
2251 continue;
2252 }
2253 /* does this rdev's profile match the reference profile? */
2254 if (blk_integrity_compare(reference->bdev->bd_disk,
2255 rdev->bdev->bd_disk) < 0)
2256 return -EINVAL;
2257 }
2258 if (!reference || !bdev_get_integrity(reference->bdev))
2259 return 0;
2260 /*
2261 * All component devices are integrity capable and have matching
2262 * profiles, register the common profile for the md device.
2263 */
2264 blk_integrity_register(mddev->gendisk,
2265 bdev_get_integrity(reference->bdev));
2266
2267 pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
2268 if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE)) {
2269 pr_err("md: failed to create integrity pool for %s\n",
2270 mdname(mddev));
2271 return -EINVAL;
2272 }
2273 return 0;
2274}
2275EXPORT_SYMBOL(md_integrity_register);
2276
2277/*
2278 * Attempt to add an rdev, but only if it is consistent with the current
2279 * integrity profile
2280 */
2281int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
2282{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002283 struct blk_integrity *bi_mddev;
2284 char name[BDEVNAME_SIZE];
2285
2286 if (!mddev->gendisk)
2287 return 0;
2288
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002289 bi_mddev = blk_get_integrity(mddev->gendisk);
2290
2291 if (!bi_mddev) /* nothing to do */
2292 return 0;
2293
2294 if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
2295 pr_err("%s: incompatible integrity profile for %s\n",
2296 mdname(mddev), bdevname(rdev->bdev, name));
2297 return -ENXIO;
2298 }
2299
2300 return 0;
2301}
2302EXPORT_SYMBOL(md_integrity_add_rdev);
2303
2304static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
2305{
2306 char b[BDEVNAME_SIZE];
2307 struct kobject *ko;
2308 int err;
2309
2310 /* prevent duplicates */
2311 if (find_rdev(mddev, rdev->bdev->bd_dev))
2312 return -EEXIST;
2313
2314 if ((bdev_read_only(rdev->bdev) || bdev_read_only(rdev->meta_bdev)) &&
2315 mddev->pers)
2316 return -EROFS;
2317
2318 /* make sure rdev->sectors exceeds mddev->dev_sectors */
2319 if (!test_bit(Journal, &rdev->flags) &&
2320 rdev->sectors &&
2321 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
2322 if (mddev->pers) {
2323 /* Cannot change size, so fail
2324 * If mddev->level <= 0, then we don't care
2325 * about aligning sizes (e.g. linear)
2326 */
2327 if (mddev->level > 0)
2328 return -ENOSPC;
2329 } else
2330 mddev->dev_sectors = rdev->sectors;
2331 }
2332
2333 /* Verify rdev->desc_nr is unique.
2334 * If it is -1, assign a free number, else
2335 * check number is not in use
2336 */
2337 rcu_read_lock();
2338 if (rdev->desc_nr < 0) {
2339 int choice = 0;
2340 if (mddev->pers)
2341 choice = mddev->raid_disks;
2342 while (md_find_rdev_nr_rcu(mddev, choice))
2343 choice++;
2344 rdev->desc_nr = choice;
2345 } else {
2346 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
2347 rcu_read_unlock();
2348 return -EBUSY;
2349 }
2350 }
2351 rcu_read_unlock();
2352 if (!test_bit(Journal, &rdev->flags) &&
2353 mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
2354 pr_warn("md: %s: array is limited to %d devices\n",
2355 mdname(mddev), mddev->max_disks);
2356 return -EBUSY;
2357 }
2358 bdevname(rdev->bdev,b);
2359 strreplace(b, '/', '!');
2360
2361 rdev->mddev = mddev;
2362 pr_debug("md: bind<%s>\n", b);
2363
David Brazdil0f672f62019-12-10 10:32:29 +00002364 if (mddev->raid_disks)
2365 mddev_create_wb_pool(mddev, rdev, false);
2366
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002367 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
2368 goto fail;
2369
2370 ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
2371 if (sysfs_create_link(&rdev->kobj, ko, "block"))
2372 /* failure here is OK */;
2373 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
2374
2375 list_add_rcu(&rdev->same_set, &mddev->disks);
2376 bd_link_disk_holder(rdev->bdev, mddev->gendisk);
2377
2378 /* May as well allow recovery to be retried once */
2379 mddev->recovery_disabled++;
2380
2381 return 0;
2382
2383 fail:
2384 pr_warn("md: failed to register dev-%s for %s\n",
2385 b, mdname(mddev));
2386 return err;
2387}
2388
2389static void md_delayed_delete(struct work_struct *ws)
2390{
2391 struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
2392 kobject_del(&rdev->kobj);
2393 kobject_put(&rdev->kobj);
2394}
2395
2396static void unbind_rdev_from_array(struct md_rdev *rdev)
2397{
2398 char b[BDEVNAME_SIZE];
2399
2400 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
2401 list_del_rcu(&rdev->same_set);
2402 pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
David Brazdil0f672f62019-12-10 10:32:29 +00002403 mddev_destroy_wb_pool(rdev->mddev, rdev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002404 rdev->mddev = NULL;
2405 sysfs_remove_link(&rdev->kobj, "block");
2406 sysfs_put(rdev->sysfs_state);
2407 rdev->sysfs_state = NULL;
2408 rdev->badblocks.count = 0;
2409 /* We need to delay this, otherwise we can deadlock when
2410 * writing to 'remove' to "dev/state". We also need
2411 * to delay it due to rcu usage.
2412 */
2413 synchronize_rcu();
2414 INIT_WORK(&rdev->del_work, md_delayed_delete);
2415 kobject_get(&rdev->kobj);
2416 queue_work(md_misc_wq, &rdev->del_work);
2417}
2418
2419/*
2420 * prevent the device from being mounted, repartitioned or
2421 * otherwise reused by a RAID array (or any other kernel
2422 * subsystem), by bd_claiming the device.
2423 */
2424static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
2425{
2426 int err = 0;
2427 struct block_device *bdev;
2428 char b[BDEVNAME_SIZE];
2429
2430 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
2431 shared ? (struct md_rdev *)lock_rdev : rdev);
2432 if (IS_ERR(bdev)) {
2433 pr_warn("md: could not open %s.\n", __bdevname(dev, b));
2434 return PTR_ERR(bdev);
2435 }
2436 rdev->bdev = bdev;
2437 return err;
2438}
2439
2440static void unlock_rdev(struct md_rdev *rdev)
2441{
2442 struct block_device *bdev = rdev->bdev;
2443 rdev->bdev = NULL;
2444 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2445}
2446
2447void md_autodetect_dev(dev_t dev);
2448
2449static void export_rdev(struct md_rdev *rdev)
2450{
2451 char b[BDEVNAME_SIZE];
2452
2453 pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
2454 md_rdev_clear(rdev);
2455#ifndef MODULE
2456 if (test_bit(AutoDetected, &rdev->flags))
2457 md_autodetect_dev(rdev->bdev->bd_dev);
2458#endif
2459 unlock_rdev(rdev);
2460 kobject_put(&rdev->kobj);
2461}
2462
2463void md_kick_rdev_from_array(struct md_rdev *rdev)
2464{
2465 unbind_rdev_from_array(rdev);
2466 export_rdev(rdev);
2467}
2468EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
2469
2470static void export_array(struct mddev *mddev)
2471{
2472 struct md_rdev *rdev;
2473
2474 while (!list_empty(&mddev->disks)) {
2475 rdev = list_first_entry(&mddev->disks, struct md_rdev,
2476 same_set);
2477 md_kick_rdev_from_array(rdev);
2478 }
2479 mddev->raid_disks = 0;
2480 mddev->major_version = 0;
2481}
2482
2483static bool set_in_sync(struct mddev *mddev)
2484{
2485 lockdep_assert_held(&mddev->lock);
2486 if (!mddev->in_sync) {
2487 mddev->sync_checkers++;
2488 spin_unlock(&mddev->lock);
2489 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending);
2490 spin_lock(&mddev->lock);
2491 if (!mddev->in_sync &&
2492 percpu_ref_is_zero(&mddev->writes_pending)) {
2493 mddev->in_sync = 1;
2494 /*
2495 * Ensure ->in_sync is visible before we clear
2496 * ->sync_checkers.
2497 */
2498 smp_mb();
2499 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2500 sysfs_notify_dirent_safe(mddev->sysfs_state);
2501 }
2502 if (--mddev->sync_checkers == 0)
2503 percpu_ref_switch_to_percpu(&mddev->writes_pending);
2504 }
2505 if (mddev->safemode == 1)
2506 mddev->safemode = 0;
2507 return mddev->in_sync;
2508}
2509
2510static void sync_sbs(struct mddev *mddev, int nospares)
2511{
2512 /* Update each superblock (in-memory image), but
2513 * if we are allowed to, skip spares which already
2514 * have the right event counter, or have one earlier
2515 * (which would mean they aren't being marked as dirty
2516 * with the rest of the array)
2517 */
2518 struct md_rdev *rdev;
2519 rdev_for_each(rdev, mddev) {
2520 if (rdev->sb_events == mddev->events ||
2521 (nospares &&
2522 rdev->raid_disk < 0 &&
2523 rdev->sb_events+1 == mddev->events)) {
2524 /* Don't update this superblock */
2525 rdev->sb_loaded = 2;
2526 } else {
2527 sync_super(mddev, rdev);
2528 rdev->sb_loaded = 1;
2529 }
2530 }
2531}
2532
2533static bool does_sb_need_changing(struct mddev *mddev)
2534{
2535 struct md_rdev *rdev;
2536 struct mdp_superblock_1 *sb;
2537 int role;
2538
2539 /* Find a good rdev */
2540 rdev_for_each(rdev, mddev)
2541 if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags))
2542 break;
2543
2544 /* No good device found. */
2545 if (!rdev)
2546 return false;
2547
2548 sb = page_address(rdev->sb_page);
2549 /* Check if a device has become faulty or a spare become active */
2550 rdev_for_each(rdev, mddev) {
2551 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2552 /* Device activated? */
2553 if (role == 0xffff && rdev->raid_disk >=0 &&
2554 !test_bit(Faulty, &rdev->flags))
2555 return true;
2556 /* Device turned faulty? */
2557 if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd))
2558 return true;
2559 }
2560
2561 /* Check if any mddev parameters have changed */
2562 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
2563 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
2564 (mddev->layout != le32_to_cpu(sb->layout)) ||
2565 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
2566 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
2567 return true;
2568
2569 return false;
2570}
2571
2572void md_update_sb(struct mddev *mddev, int force_change)
2573{
2574 struct md_rdev *rdev;
2575 int sync_req;
2576 int nospares = 0;
2577 int any_badblocks_changed = 0;
2578 int ret = -1;
2579
2580 if (mddev->ro) {
2581 if (force_change)
2582 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2583 return;
2584 }
2585
2586repeat:
2587 if (mddev_is_clustered(mddev)) {
2588 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
2589 force_change = 1;
2590 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
2591 nospares = 1;
2592 ret = md_cluster_ops->metadata_update_start(mddev);
2593 /* Has someone else has updated the sb */
2594 if (!does_sb_need_changing(mddev)) {
2595 if (ret == 0)
2596 md_cluster_ops->metadata_update_cancel(mddev);
2597 bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2598 BIT(MD_SB_CHANGE_DEVS) |
2599 BIT(MD_SB_CHANGE_CLEAN));
2600 return;
2601 }
2602 }
2603
2604 /*
2605 * First make sure individual recovery_offsets are correct
2606 * curr_resync_completed can only be used during recovery.
2607 * During reshape/resync it might use array-addresses rather
2608 * that device addresses.
2609 */
2610 rdev_for_each(rdev, mddev) {
2611 if (rdev->raid_disk >= 0 &&
2612 mddev->delta_disks >= 0 &&
2613 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
2614 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
2615 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
2616 !test_bit(Journal, &rdev->flags) &&
2617 !test_bit(In_sync, &rdev->flags) &&
2618 mddev->curr_resync_completed > rdev->recovery_offset)
2619 rdev->recovery_offset = mddev->curr_resync_completed;
2620
2621 }
2622 if (!mddev->persistent) {
2623 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2624 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2625 if (!mddev->external) {
2626 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2627 rdev_for_each(rdev, mddev) {
2628 if (rdev->badblocks.changed) {
2629 rdev->badblocks.changed = 0;
2630 ack_all_badblocks(&rdev->badblocks);
2631 md_error(mddev, rdev);
2632 }
2633 clear_bit(Blocked, &rdev->flags);
2634 clear_bit(BlockedBadBlocks, &rdev->flags);
2635 wake_up(&rdev->blocked_wait);
2636 }
2637 }
2638 wake_up(&mddev->sb_wait);
2639 return;
2640 }
2641
2642 spin_lock(&mddev->lock);
2643
2644 mddev->utime = ktime_get_real_seconds();
2645
2646 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
2647 force_change = 1;
2648 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
2649 /* just a clean<-> dirty transition, possibly leave spares alone,
2650 * though if events isn't the right even/odd, we will have to do
2651 * spares after all
2652 */
2653 nospares = 1;
2654 if (force_change)
2655 nospares = 0;
2656 if (mddev->degraded)
2657 /* If the array is degraded, then skipping spares is both
2658 * dangerous and fairly pointless.
2659 * Dangerous because a device that was removed from the array
2660 * might have a event_count that still looks up-to-date,
2661 * so it can be re-added without a resync.
2662 * Pointless because if there are any spares to skip,
2663 * then a recovery will happen and soon that array won't
2664 * be degraded any more and the spare can go back to sleep then.
2665 */
2666 nospares = 0;
2667
2668 sync_req = mddev->in_sync;
2669
2670 /* If this is just a dirty<->clean transition, and the array is clean
2671 * and 'events' is odd, we can roll back to the previous clean state */
2672 if (nospares
2673 && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2674 && mddev->can_decrease_events
2675 && mddev->events != 1) {
2676 mddev->events--;
2677 mddev->can_decrease_events = 0;
2678 } else {
2679 /* otherwise we have to go forward and ... */
2680 mddev->events ++;
2681 mddev->can_decrease_events = nospares;
2682 }
2683
2684 /*
2685 * This 64-bit counter should never wrap.
2686 * Either we are in around ~1 trillion A.C., assuming
2687 * 1 reboot per second, or we have a bug...
2688 */
2689 WARN_ON(mddev->events == 0);
2690
2691 rdev_for_each(rdev, mddev) {
2692 if (rdev->badblocks.changed)
2693 any_badblocks_changed++;
2694 if (test_bit(Faulty, &rdev->flags))
2695 set_bit(FaultRecorded, &rdev->flags);
2696 }
2697
2698 sync_sbs(mddev, nospares);
2699 spin_unlock(&mddev->lock);
2700
2701 pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2702 mdname(mddev), mddev->in_sync);
2703
2704 if (mddev->queue)
2705 blk_add_trace_msg(mddev->queue, "md md_update_sb");
2706rewrite:
2707 md_bitmap_update_sb(mddev->bitmap);
2708 rdev_for_each(rdev, mddev) {
2709 char b[BDEVNAME_SIZE];
2710
2711 if (rdev->sb_loaded != 1)
2712 continue; /* no noise on spare devices */
2713
2714 if (!test_bit(Faulty, &rdev->flags)) {
2715 md_super_write(mddev,rdev,
2716 rdev->sb_start, rdev->sb_size,
2717 rdev->sb_page);
2718 pr_debug("md: (write) %s's sb offset: %llu\n",
2719 bdevname(rdev->bdev, b),
2720 (unsigned long long)rdev->sb_start);
2721 rdev->sb_events = mddev->events;
2722 if (rdev->badblocks.size) {
2723 md_super_write(mddev, rdev,
2724 rdev->badblocks.sector,
2725 rdev->badblocks.size << 9,
2726 rdev->bb_page);
2727 rdev->badblocks.size = 0;
2728 }
2729
2730 } else
2731 pr_debug("md: %s (skipping faulty)\n",
2732 bdevname(rdev->bdev, b));
2733
2734 if (mddev->level == LEVEL_MULTIPATH)
2735 /* only need to write one superblock... */
2736 break;
2737 }
2738 if (md_super_wait(mddev) < 0)
2739 goto rewrite;
2740 /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
2741
2742 if (mddev_is_clustered(mddev) && ret == 0)
2743 md_cluster_ops->metadata_update_finish(mddev);
2744
2745 if (mddev->in_sync != sync_req ||
2746 !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2747 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
2748 /* have to write it out again */
2749 goto repeat;
2750 wake_up(&mddev->sb_wait);
2751 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2752 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2753
2754 rdev_for_each(rdev, mddev) {
2755 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2756 clear_bit(Blocked, &rdev->flags);
2757
2758 if (any_badblocks_changed)
2759 ack_all_badblocks(&rdev->badblocks);
2760 clear_bit(BlockedBadBlocks, &rdev->flags);
2761 wake_up(&rdev->blocked_wait);
2762 }
2763}
2764EXPORT_SYMBOL(md_update_sb);
2765
2766static int add_bound_rdev(struct md_rdev *rdev)
2767{
2768 struct mddev *mddev = rdev->mddev;
2769 int err = 0;
2770 bool add_journal = test_bit(Journal, &rdev->flags);
2771
2772 if (!mddev->pers->hot_remove_disk || add_journal) {
2773 /* If there is hot_add_disk but no hot_remove_disk
2774 * then added disks for geometry changes,
2775 * and should be added immediately.
2776 */
2777 super_types[mddev->major_version].
2778 validate_super(mddev, rdev);
2779 if (add_journal)
2780 mddev_suspend(mddev);
2781 err = mddev->pers->hot_add_disk(mddev, rdev);
2782 if (add_journal)
2783 mddev_resume(mddev);
2784 if (err) {
2785 md_kick_rdev_from_array(rdev);
2786 return err;
2787 }
2788 }
2789 sysfs_notify_dirent_safe(rdev->sysfs_state);
2790
2791 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2792 if (mddev->degraded)
2793 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2794 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2795 md_new_event(mddev);
2796 md_wakeup_thread(mddev->thread);
2797 return 0;
2798}
2799
2800/* words written to sysfs files may, or may not, be \n terminated.
2801 * We want to accept with case. For this we use cmd_match.
2802 */
2803static int cmd_match(const char *cmd, const char *str)
2804{
2805 /* See if cmd, written into a sysfs file, matches
2806 * str. They must either be the same, or cmd can
2807 * have a trailing newline
2808 */
2809 while (*cmd && *str && *cmd == *str) {
2810 cmd++;
2811 str++;
2812 }
2813 if (*cmd == '\n')
2814 cmd++;
2815 if (*str || *cmd)
2816 return 0;
2817 return 1;
2818}
2819
2820struct rdev_sysfs_entry {
2821 struct attribute attr;
2822 ssize_t (*show)(struct md_rdev *, char *);
2823 ssize_t (*store)(struct md_rdev *, const char *, size_t);
2824};
2825
2826static ssize_t
2827state_show(struct md_rdev *rdev, char *page)
2828{
2829 char *sep = ",";
2830 size_t len = 0;
2831 unsigned long flags = READ_ONCE(rdev->flags);
2832
2833 if (test_bit(Faulty, &flags) ||
2834 (!test_bit(ExternalBbl, &flags) &&
2835 rdev->badblocks.unacked_exist))
2836 len += sprintf(page+len, "faulty%s", sep);
2837 if (test_bit(In_sync, &flags))
2838 len += sprintf(page+len, "in_sync%s", sep);
2839 if (test_bit(Journal, &flags))
2840 len += sprintf(page+len, "journal%s", sep);
2841 if (test_bit(WriteMostly, &flags))
2842 len += sprintf(page+len, "write_mostly%s", sep);
2843 if (test_bit(Blocked, &flags) ||
2844 (rdev->badblocks.unacked_exist
2845 && !test_bit(Faulty, &flags)))
2846 len += sprintf(page+len, "blocked%s", sep);
2847 if (!test_bit(Faulty, &flags) &&
2848 !test_bit(Journal, &flags) &&
2849 !test_bit(In_sync, &flags))
2850 len += sprintf(page+len, "spare%s", sep);
2851 if (test_bit(WriteErrorSeen, &flags))
2852 len += sprintf(page+len, "write_error%s", sep);
2853 if (test_bit(WantReplacement, &flags))
2854 len += sprintf(page+len, "want_replacement%s", sep);
2855 if (test_bit(Replacement, &flags))
2856 len += sprintf(page+len, "replacement%s", sep);
2857 if (test_bit(ExternalBbl, &flags))
2858 len += sprintf(page+len, "external_bbl%s", sep);
2859 if (test_bit(FailFast, &flags))
2860 len += sprintf(page+len, "failfast%s", sep);
2861
2862 if (len)
2863 len -= strlen(sep);
2864
2865 return len+sprintf(page+len, "\n");
2866}
2867
2868static ssize_t
2869state_store(struct md_rdev *rdev, const char *buf, size_t len)
2870{
2871 /* can write
2872 * faulty - simulates an error
2873 * remove - disconnects the device
2874 * writemostly - sets write_mostly
2875 * -writemostly - clears write_mostly
2876 * blocked - sets the Blocked flags
2877 * -blocked - clears the Blocked and possibly simulates an error
2878 * insync - sets Insync providing device isn't active
2879 * -insync - clear Insync for a device with a slot assigned,
2880 * so that it gets rebuilt based on bitmap
2881 * write_error - sets WriteErrorSeen
2882 * -write_error - clears WriteErrorSeen
2883 * {,-}failfast - set/clear FailFast
2884 */
2885 int err = -EINVAL;
2886 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2887 md_error(rdev->mddev, rdev);
2888 if (test_bit(Faulty, &rdev->flags))
2889 err = 0;
2890 else
2891 err = -EBUSY;
2892 } else if (cmd_match(buf, "remove")) {
2893 if (rdev->mddev->pers) {
2894 clear_bit(Blocked, &rdev->flags);
2895 remove_and_add_spares(rdev->mddev, rdev);
2896 }
2897 if (rdev->raid_disk >= 0)
2898 err = -EBUSY;
2899 else {
2900 struct mddev *mddev = rdev->mddev;
2901 err = 0;
2902 if (mddev_is_clustered(mddev))
2903 err = md_cluster_ops->remove_disk(mddev, rdev);
2904
2905 if (err == 0) {
2906 md_kick_rdev_from_array(rdev);
2907 if (mddev->pers) {
2908 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2909 md_wakeup_thread(mddev->thread);
2910 }
2911 md_new_event(mddev);
2912 }
2913 }
2914 } else if (cmd_match(buf, "writemostly")) {
2915 set_bit(WriteMostly, &rdev->flags);
David Brazdil0f672f62019-12-10 10:32:29 +00002916 mddev_create_wb_pool(rdev->mddev, rdev, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002917 err = 0;
2918 } else if (cmd_match(buf, "-writemostly")) {
David Brazdil0f672f62019-12-10 10:32:29 +00002919 mddev_destroy_wb_pool(rdev->mddev, rdev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002920 clear_bit(WriteMostly, &rdev->flags);
2921 err = 0;
2922 } else if (cmd_match(buf, "blocked")) {
2923 set_bit(Blocked, &rdev->flags);
2924 err = 0;
2925 } else if (cmd_match(buf, "-blocked")) {
2926 if (!test_bit(Faulty, &rdev->flags) &&
2927 !test_bit(ExternalBbl, &rdev->flags) &&
2928 rdev->badblocks.unacked_exist) {
2929 /* metadata handler doesn't understand badblocks,
2930 * so we need to fail the device
2931 */
2932 md_error(rdev->mddev, rdev);
2933 }
2934 clear_bit(Blocked, &rdev->flags);
2935 clear_bit(BlockedBadBlocks, &rdev->flags);
2936 wake_up(&rdev->blocked_wait);
2937 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2938 md_wakeup_thread(rdev->mddev->thread);
2939
2940 err = 0;
2941 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2942 set_bit(In_sync, &rdev->flags);
2943 err = 0;
2944 } else if (cmd_match(buf, "failfast")) {
2945 set_bit(FailFast, &rdev->flags);
2946 err = 0;
2947 } else if (cmd_match(buf, "-failfast")) {
2948 clear_bit(FailFast, &rdev->flags);
2949 err = 0;
2950 } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
2951 !test_bit(Journal, &rdev->flags)) {
2952 if (rdev->mddev->pers == NULL) {
2953 clear_bit(In_sync, &rdev->flags);
2954 rdev->saved_raid_disk = rdev->raid_disk;
2955 rdev->raid_disk = -1;
2956 err = 0;
2957 }
2958 } else if (cmd_match(buf, "write_error")) {
2959 set_bit(WriteErrorSeen, &rdev->flags);
2960 err = 0;
2961 } else if (cmd_match(buf, "-write_error")) {
2962 clear_bit(WriteErrorSeen, &rdev->flags);
2963 err = 0;
2964 } else if (cmd_match(buf, "want_replacement")) {
2965 /* Any non-spare device that is not a replacement can
2966 * become want_replacement at any time, but we then need to
2967 * check if recovery is needed.
2968 */
2969 if (rdev->raid_disk >= 0 &&
2970 !test_bit(Journal, &rdev->flags) &&
2971 !test_bit(Replacement, &rdev->flags))
2972 set_bit(WantReplacement, &rdev->flags);
2973 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2974 md_wakeup_thread(rdev->mddev->thread);
2975 err = 0;
2976 } else if (cmd_match(buf, "-want_replacement")) {
2977 /* Clearing 'want_replacement' is always allowed.
2978 * Once replacements starts it is too late though.
2979 */
2980 err = 0;
2981 clear_bit(WantReplacement, &rdev->flags);
2982 } else if (cmd_match(buf, "replacement")) {
2983 /* Can only set a device as a replacement when array has not
2984 * yet been started. Once running, replacement is automatic
2985 * from spares, or by assigning 'slot'.
2986 */
2987 if (rdev->mddev->pers)
2988 err = -EBUSY;
2989 else {
2990 set_bit(Replacement, &rdev->flags);
2991 err = 0;
2992 }
2993 } else if (cmd_match(buf, "-replacement")) {
2994 /* Similarly, can only clear Replacement before start */
2995 if (rdev->mddev->pers)
2996 err = -EBUSY;
2997 else {
2998 clear_bit(Replacement, &rdev->flags);
2999 err = 0;
3000 }
3001 } else if (cmd_match(buf, "re-add")) {
David Brazdil0f672f62019-12-10 10:32:29 +00003002 if (!rdev->mddev->pers)
3003 err = -EINVAL;
3004 else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
3005 rdev->saved_raid_disk >= 0) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003006 /* clear_bit is performed _after_ all the devices
3007 * have their local Faulty bit cleared. If any writes
3008 * happen in the meantime in the local node, they
3009 * will land in the local bitmap, which will be synced
3010 * by this node eventually
3011 */
3012 if (!mddev_is_clustered(rdev->mddev) ||
3013 (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
3014 clear_bit(Faulty, &rdev->flags);
3015 err = add_bound_rdev(rdev);
3016 }
3017 } else
3018 err = -EBUSY;
3019 } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
3020 set_bit(ExternalBbl, &rdev->flags);
3021 rdev->badblocks.shift = 0;
3022 err = 0;
3023 } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
3024 clear_bit(ExternalBbl, &rdev->flags);
3025 err = 0;
3026 }
3027 if (!err)
3028 sysfs_notify_dirent_safe(rdev->sysfs_state);
3029 return err ? err : len;
3030}
3031static struct rdev_sysfs_entry rdev_state =
3032__ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
3033
3034static ssize_t
3035errors_show(struct md_rdev *rdev, char *page)
3036{
3037 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
3038}
3039
3040static ssize_t
3041errors_store(struct md_rdev *rdev, const char *buf, size_t len)
3042{
3043 unsigned int n;
3044 int rv;
3045
3046 rv = kstrtouint(buf, 10, &n);
3047 if (rv < 0)
3048 return rv;
3049 atomic_set(&rdev->corrected_errors, n);
3050 return len;
3051}
3052static struct rdev_sysfs_entry rdev_errors =
3053__ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
3054
3055static ssize_t
3056slot_show(struct md_rdev *rdev, char *page)
3057{
3058 if (test_bit(Journal, &rdev->flags))
3059 return sprintf(page, "journal\n");
3060 else if (rdev->raid_disk < 0)
3061 return sprintf(page, "none\n");
3062 else
3063 return sprintf(page, "%d\n", rdev->raid_disk);
3064}
3065
3066static ssize_t
3067slot_store(struct md_rdev *rdev, const char *buf, size_t len)
3068{
3069 int slot;
3070 int err;
3071
3072 if (test_bit(Journal, &rdev->flags))
3073 return -EBUSY;
3074 if (strncmp(buf, "none", 4)==0)
3075 slot = -1;
3076 else {
3077 err = kstrtouint(buf, 10, (unsigned int *)&slot);
3078 if (err < 0)
3079 return err;
3080 }
3081 if (rdev->mddev->pers && slot == -1) {
3082 /* Setting 'slot' on an active array requires also
3083 * updating the 'rd%d' link, and communicating
3084 * with the personality with ->hot_*_disk.
3085 * For now we only support removing
3086 * failed/spare devices. This normally happens automatically,
3087 * but not when the metadata is externally managed.
3088 */
3089 if (rdev->raid_disk == -1)
3090 return -EEXIST;
3091 /* personality does all needed checks */
3092 if (rdev->mddev->pers->hot_remove_disk == NULL)
3093 return -EINVAL;
3094 clear_bit(Blocked, &rdev->flags);
3095 remove_and_add_spares(rdev->mddev, rdev);
3096 if (rdev->raid_disk >= 0)
3097 return -EBUSY;
3098 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3099 md_wakeup_thread(rdev->mddev->thread);
3100 } else if (rdev->mddev->pers) {
3101 /* Activating a spare .. or possibly reactivating
3102 * if we ever get bitmaps working here.
3103 */
3104 int err;
3105
3106 if (rdev->raid_disk != -1)
3107 return -EBUSY;
3108
3109 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
3110 return -EBUSY;
3111
3112 if (rdev->mddev->pers->hot_add_disk == NULL)
3113 return -EINVAL;
3114
3115 if (slot >= rdev->mddev->raid_disks &&
3116 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3117 return -ENOSPC;
3118
3119 rdev->raid_disk = slot;
3120 if (test_bit(In_sync, &rdev->flags))
3121 rdev->saved_raid_disk = slot;
3122 else
3123 rdev->saved_raid_disk = -1;
3124 clear_bit(In_sync, &rdev->flags);
3125 clear_bit(Bitmap_sync, &rdev->flags);
3126 err = rdev->mddev->pers->
3127 hot_add_disk(rdev->mddev, rdev);
3128 if (err) {
3129 rdev->raid_disk = -1;
3130 return err;
3131 } else
3132 sysfs_notify_dirent_safe(rdev->sysfs_state);
3133 if (sysfs_link_rdev(rdev->mddev, rdev))
3134 /* failure here is OK */;
3135 /* don't wakeup anyone, leave that to userspace. */
3136 } else {
3137 if (slot >= rdev->mddev->raid_disks &&
3138 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3139 return -ENOSPC;
3140 rdev->raid_disk = slot;
3141 /* assume it is working */
3142 clear_bit(Faulty, &rdev->flags);
3143 clear_bit(WriteMostly, &rdev->flags);
3144 set_bit(In_sync, &rdev->flags);
3145 sysfs_notify_dirent_safe(rdev->sysfs_state);
3146 }
3147 return len;
3148}
3149
3150static struct rdev_sysfs_entry rdev_slot =
3151__ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
3152
3153static ssize_t
3154offset_show(struct md_rdev *rdev, char *page)
3155{
3156 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
3157}
3158
3159static ssize_t
3160offset_store(struct md_rdev *rdev, const char *buf, size_t len)
3161{
3162 unsigned long long offset;
3163 if (kstrtoull(buf, 10, &offset) < 0)
3164 return -EINVAL;
3165 if (rdev->mddev->pers && rdev->raid_disk >= 0)
3166 return -EBUSY;
3167 if (rdev->sectors && rdev->mddev->external)
3168 /* Must set offset before size, so overlap checks
3169 * can be sane */
3170 return -EBUSY;
3171 rdev->data_offset = offset;
3172 rdev->new_data_offset = offset;
3173 return len;
3174}
3175
3176static struct rdev_sysfs_entry rdev_offset =
3177__ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
3178
3179static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
3180{
3181 return sprintf(page, "%llu\n",
3182 (unsigned long long)rdev->new_data_offset);
3183}
3184
3185static ssize_t new_offset_store(struct md_rdev *rdev,
3186 const char *buf, size_t len)
3187{
3188 unsigned long long new_offset;
3189 struct mddev *mddev = rdev->mddev;
3190
3191 if (kstrtoull(buf, 10, &new_offset) < 0)
3192 return -EINVAL;
3193
3194 if (mddev->sync_thread ||
3195 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
3196 return -EBUSY;
3197 if (new_offset == rdev->data_offset)
3198 /* reset is always permitted */
3199 ;
3200 else if (new_offset > rdev->data_offset) {
3201 /* must not push array size beyond rdev_sectors */
3202 if (new_offset - rdev->data_offset
3203 + mddev->dev_sectors > rdev->sectors)
3204 return -E2BIG;
3205 }
3206 /* Metadata worries about other space details. */
3207
3208 /* decreasing the offset is inconsistent with a backwards
3209 * reshape.
3210 */
3211 if (new_offset < rdev->data_offset &&
3212 mddev->reshape_backwards)
3213 return -EINVAL;
3214 /* Increasing offset is inconsistent with forwards
3215 * reshape. reshape_direction should be set to
3216 * 'backwards' first.
3217 */
3218 if (new_offset > rdev->data_offset &&
3219 !mddev->reshape_backwards)
3220 return -EINVAL;
3221
3222 if (mddev->pers && mddev->persistent &&
3223 !super_types[mddev->major_version]
3224 .allow_new_offset(rdev, new_offset))
3225 return -E2BIG;
3226 rdev->new_data_offset = new_offset;
3227 if (new_offset > rdev->data_offset)
3228 mddev->reshape_backwards = 1;
3229 else if (new_offset < rdev->data_offset)
3230 mddev->reshape_backwards = 0;
3231
3232 return len;
3233}
3234static struct rdev_sysfs_entry rdev_new_offset =
3235__ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
3236
3237static ssize_t
3238rdev_size_show(struct md_rdev *rdev, char *page)
3239{
3240 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
3241}
3242
3243static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
3244{
3245 /* check if two start/length pairs overlap */
3246 if (s1+l1 <= s2)
3247 return 0;
3248 if (s2+l2 <= s1)
3249 return 0;
3250 return 1;
3251}
3252
3253static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
3254{
3255 unsigned long long blocks;
3256 sector_t new;
3257
3258 if (kstrtoull(buf, 10, &blocks) < 0)
3259 return -EINVAL;
3260
3261 if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
3262 return -EINVAL; /* sector conversion overflow */
3263
3264 new = blocks * 2;
3265 if (new != blocks * 2)
3266 return -EINVAL; /* unsigned long long to sector_t overflow */
3267
3268 *sectors = new;
3269 return 0;
3270}
3271
3272static ssize_t
3273rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3274{
3275 struct mddev *my_mddev = rdev->mddev;
3276 sector_t oldsectors = rdev->sectors;
3277 sector_t sectors;
3278
3279 if (test_bit(Journal, &rdev->flags))
3280 return -EBUSY;
3281 if (strict_blocks_to_sectors(buf, &sectors) < 0)
3282 return -EINVAL;
3283 if (rdev->data_offset != rdev->new_data_offset)
3284 return -EINVAL; /* too confusing */
3285 if (my_mddev->pers && rdev->raid_disk >= 0) {
3286 if (my_mddev->persistent) {
3287 sectors = super_types[my_mddev->major_version].
3288 rdev_size_change(rdev, sectors);
3289 if (!sectors)
3290 return -EBUSY;
3291 } else if (!sectors)
3292 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
3293 rdev->data_offset;
3294 if (!my_mddev->pers->resize)
3295 /* Cannot change size for RAID0 or Linear etc */
3296 return -EINVAL;
3297 }
3298 if (sectors < my_mddev->dev_sectors)
3299 return -EINVAL; /* component must fit device */
3300
3301 rdev->sectors = sectors;
3302 if (sectors > oldsectors && my_mddev->external) {
3303 /* Need to check that all other rdevs with the same
3304 * ->bdev do not overlap. 'rcu' is sufficient to walk
3305 * the rdev lists safely.
3306 * This check does not provide a hard guarantee, it
3307 * just helps avoid dangerous mistakes.
3308 */
3309 struct mddev *mddev;
3310 int overlap = 0;
3311 struct list_head *tmp;
3312
3313 rcu_read_lock();
3314 for_each_mddev(mddev, tmp) {
3315 struct md_rdev *rdev2;
3316
3317 rdev_for_each(rdev2, mddev)
3318 if (rdev->bdev == rdev2->bdev &&
3319 rdev != rdev2 &&
3320 overlaps(rdev->data_offset, rdev->sectors,
3321 rdev2->data_offset,
3322 rdev2->sectors)) {
3323 overlap = 1;
3324 break;
3325 }
3326 if (overlap) {
3327 mddev_put(mddev);
3328 break;
3329 }
3330 }
3331 rcu_read_unlock();
3332 if (overlap) {
3333 /* Someone else could have slipped in a size
3334 * change here, but doing so is just silly.
3335 * We put oldsectors back because we *know* it is
3336 * safe, and trust userspace not to race with
3337 * itself
3338 */
3339 rdev->sectors = oldsectors;
3340 return -EBUSY;
3341 }
3342 }
3343 return len;
3344}
3345
3346static struct rdev_sysfs_entry rdev_size =
3347__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
3348
3349static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
3350{
3351 unsigned long long recovery_start = rdev->recovery_offset;
3352
3353 if (test_bit(In_sync, &rdev->flags) ||
3354 recovery_start == MaxSector)
3355 return sprintf(page, "none\n");
3356
3357 return sprintf(page, "%llu\n", recovery_start);
3358}
3359
3360static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
3361{
3362 unsigned long long recovery_start;
3363
3364 if (cmd_match(buf, "none"))
3365 recovery_start = MaxSector;
3366 else if (kstrtoull(buf, 10, &recovery_start))
3367 return -EINVAL;
3368
3369 if (rdev->mddev->pers &&
3370 rdev->raid_disk >= 0)
3371 return -EBUSY;
3372
3373 rdev->recovery_offset = recovery_start;
3374 if (recovery_start == MaxSector)
3375 set_bit(In_sync, &rdev->flags);
3376 else
3377 clear_bit(In_sync, &rdev->flags);
3378 return len;
3379}
3380
3381static struct rdev_sysfs_entry rdev_recovery_start =
3382__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
3383
3384/* sysfs access to bad-blocks list.
3385 * We present two files.
3386 * 'bad-blocks' lists sector numbers and lengths of ranges that
3387 * are recorded as bad. The list is truncated to fit within
3388 * the one-page limit of sysfs.
3389 * Writing "sector length" to this file adds an acknowledged
3390 * bad block list.
3391 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
3392 * been acknowledged. Writing to this file adds bad blocks
3393 * without acknowledging them. This is largely for testing.
3394 */
3395static ssize_t bb_show(struct md_rdev *rdev, char *page)
3396{
3397 return badblocks_show(&rdev->badblocks, page, 0);
3398}
3399static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
3400{
3401 int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3402 /* Maybe that ack was all we needed */
3403 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3404 wake_up(&rdev->blocked_wait);
3405 return rv;
3406}
3407static struct rdev_sysfs_entry rdev_bad_blocks =
3408__ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
3409
3410static ssize_t ubb_show(struct md_rdev *rdev, char *page)
3411{
3412 return badblocks_show(&rdev->badblocks, page, 1);
3413}
3414static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
3415{
3416 return badblocks_store(&rdev->badblocks, page, len, 1);
3417}
3418static struct rdev_sysfs_entry rdev_unack_bad_blocks =
3419__ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
3420
3421static ssize_t
3422ppl_sector_show(struct md_rdev *rdev, char *page)
3423{
3424 return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector);
3425}
3426
3427static ssize_t
3428ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len)
3429{
3430 unsigned long long sector;
3431
3432 if (kstrtoull(buf, 10, &sector) < 0)
3433 return -EINVAL;
3434 if (sector != (sector_t)sector)
3435 return -EINVAL;
3436
3437 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3438 rdev->raid_disk >= 0)
3439 return -EBUSY;
3440
3441 if (rdev->mddev->persistent) {
3442 if (rdev->mddev->major_version == 0)
3443 return -EINVAL;
3444 if ((sector > rdev->sb_start &&
3445 sector - rdev->sb_start > S16_MAX) ||
3446 (sector < rdev->sb_start &&
3447 rdev->sb_start - sector > -S16_MIN))
3448 return -EINVAL;
3449 rdev->ppl.offset = sector - rdev->sb_start;
3450 } else if (!rdev->mddev->external) {
3451 return -EBUSY;
3452 }
3453 rdev->ppl.sector = sector;
3454 return len;
3455}
3456
3457static struct rdev_sysfs_entry rdev_ppl_sector =
3458__ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store);
3459
3460static ssize_t
3461ppl_size_show(struct md_rdev *rdev, char *page)
3462{
3463 return sprintf(page, "%u\n", rdev->ppl.size);
3464}
3465
3466static ssize_t
3467ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3468{
3469 unsigned int size;
3470
3471 if (kstrtouint(buf, 10, &size) < 0)
3472 return -EINVAL;
3473
3474 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3475 rdev->raid_disk >= 0)
3476 return -EBUSY;
3477
3478 if (rdev->mddev->persistent) {
3479 if (rdev->mddev->major_version == 0)
3480 return -EINVAL;
3481 if (size > U16_MAX)
3482 return -EINVAL;
3483 } else if (!rdev->mddev->external) {
3484 return -EBUSY;
3485 }
3486 rdev->ppl.size = size;
3487 return len;
3488}
3489
3490static struct rdev_sysfs_entry rdev_ppl_size =
3491__ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store);
3492
3493static struct attribute *rdev_default_attrs[] = {
3494 &rdev_state.attr,
3495 &rdev_errors.attr,
3496 &rdev_slot.attr,
3497 &rdev_offset.attr,
3498 &rdev_new_offset.attr,
3499 &rdev_size.attr,
3500 &rdev_recovery_start.attr,
3501 &rdev_bad_blocks.attr,
3502 &rdev_unack_bad_blocks.attr,
3503 &rdev_ppl_sector.attr,
3504 &rdev_ppl_size.attr,
3505 NULL,
3506};
3507static ssize_t
3508rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3509{
3510 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3511 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3512
3513 if (!entry->show)
3514 return -EIO;
3515 if (!rdev->mddev)
David Brazdil0f672f62019-12-10 10:32:29 +00003516 return -ENODEV;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003517 return entry->show(rdev, page);
3518}
3519
3520static ssize_t
3521rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3522 const char *page, size_t length)
3523{
3524 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3525 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3526 ssize_t rv;
3527 struct mddev *mddev = rdev->mddev;
3528
3529 if (!entry->store)
3530 return -EIO;
3531 if (!capable(CAP_SYS_ADMIN))
3532 return -EACCES;
David Brazdil0f672f62019-12-10 10:32:29 +00003533 rv = mddev ? mddev_lock(mddev) : -ENODEV;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003534 if (!rv) {
3535 if (rdev->mddev == NULL)
David Brazdil0f672f62019-12-10 10:32:29 +00003536 rv = -ENODEV;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003537 else
3538 rv = entry->store(rdev, page, length);
3539 mddev_unlock(mddev);
3540 }
3541 return rv;
3542}
3543
3544static void rdev_free(struct kobject *ko)
3545{
3546 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
3547 kfree(rdev);
3548}
3549static const struct sysfs_ops rdev_sysfs_ops = {
3550 .show = rdev_attr_show,
3551 .store = rdev_attr_store,
3552};
3553static struct kobj_type rdev_ktype = {
3554 .release = rdev_free,
3555 .sysfs_ops = &rdev_sysfs_ops,
3556 .default_attrs = rdev_default_attrs,
3557};
3558
3559int md_rdev_init(struct md_rdev *rdev)
3560{
3561 rdev->desc_nr = -1;
3562 rdev->saved_raid_disk = -1;
3563 rdev->raid_disk = -1;
3564 rdev->flags = 0;
3565 rdev->data_offset = 0;
3566 rdev->new_data_offset = 0;
3567 rdev->sb_events = 0;
3568 rdev->last_read_error = 0;
3569 rdev->sb_loaded = 0;
3570 rdev->bb_page = NULL;
3571 atomic_set(&rdev->nr_pending, 0);
3572 atomic_set(&rdev->read_errors, 0);
3573 atomic_set(&rdev->corrected_errors, 0);
3574
3575 INIT_LIST_HEAD(&rdev->same_set);
3576 init_waitqueue_head(&rdev->blocked_wait);
3577
3578 /* Add space to store bad block list.
3579 * This reserves the space even on arrays where it cannot
3580 * be used - I wonder if that matters
3581 */
3582 return badblocks_init(&rdev->badblocks, 0);
3583}
3584EXPORT_SYMBOL_GPL(md_rdev_init);
3585/*
3586 * Import a device. If 'super_format' >= 0, then sanity check the superblock
3587 *
3588 * mark the device faulty if:
3589 *
3590 * - the device is nonexistent (zero size)
3591 * - the device has no valid superblock
3592 *
3593 * a faulty rdev _never_ has rdev->sb set.
3594 */
3595static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
3596{
3597 char b[BDEVNAME_SIZE];
3598 int err;
3599 struct md_rdev *rdev;
3600 sector_t size;
3601
3602 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
3603 if (!rdev)
3604 return ERR_PTR(-ENOMEM);
3605
3606 err = md_rdev_init(rdev);
3607 if (err)
3608 goto abort_free;
3609 err = alloc_disk_sb(rdev);
3610 if (err)
3611 goto abort_free;
3612
3613 err = lock_rdev(rdev, newdev, super_format == -2);
3614 if (err)
3615 goto abort_free;
3616
3617 kobject_init(&rdev->kobj, &rdev_ktype);
3618
3619 size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
3620 if (!size) {
3621 pr_warn("md: %s has zero or unknown size, marking faulty!\n",
3622 bdevname(rdev->bdev,b));
3623 err = -EINVAL;
3624 goto abort_free;
3625 }
3626
3627 if (super_format >= 0) {
3628 err = super_types[super_format].
3629 load_super(rdev, NULL, super_minor);
3630 if (err == -EINVAL) {
3631 pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n",
3632 bdevname(rdev->bdev,b),
3633 super_format, super_minor);
3634 goto abort_free;
3635 }
3636 if (err < 0) {
3637 pr_warn("md: could not read %s's sb, not importing!\n",
3638 bdevname(rdev->bdev,b));
3639 goto abort_free;
3640 }
3641 }
3642
3643 return rdev;
3644
3645abort_free:
3646 if (rdev->bdev)
3647 unlock_rdev(rdev);
3648 md_rdev_clear(rdev);
3649 kfree(rdev);
3650 return ERR_PTR(err);
3651}
3652
3653/*
3654 * Check a full RAID array for plausibility
3655 */
3656
Olivier Deprez0e641232021-09-23 10:07:05 +02003657static int analyze_sbs(struct mddev *mddev)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003658{
3659 int i;
3660 struct md_rdev *rdev, *freshest, *tmp;
3661 char b[BDEVNAME_SIZE];
3662
3663 freshest = NULL;
3664 rdev_for_each_safe(rdev, tmp, mddev)
3665 switch (super_types[mddev->major_version].
3666 load_super(rdev, freshest, mddev->minor_version)) {
3667 case 1:
3668 freshest = rdev;
3669 break;
3670 case 0:
3671 break;
3672 default:
3673 pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n",
3674 bdevname(rdev->bdev,b));
3675 md_kick_rdev_from_array(rdev);
3676 }
3677
Olivier Deprez0e641232021-09-23 10:07:05 +02003678 /* Cannot find a valid fresh disk */
3679 if (!freshest) {
3680 pr_warn("md: cannot find a valid disk\n");
3681 return -EINVAL;
3682 }
3683
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003684 super_types[mddev->major_version].
3685 validate_super(mddev, freshest);
3686
3687 i = 0;
3688 rdev_for_each_safe(rdev, tmp, mddev) {
3689 if (mddev->max_disks &&
3690 (rdev->desc_nr >= mddev->max_disks ||
3691 i > mddev->max_disks)) {
3692 pr_warn("md: %s: %s: only %d devices permitted\n",
3693 mdname(mddev), bdevname(rdev->bdev, b),
3694 mddev->max_disks);
3695 md_kick_rdev_from_array(rdev);
3696 continue;
3697 }
3698 if (rdev != freshest) {
3699 if (super_types[mddev->major_version].
3700 validate_super(mddev, rdev)) {
3701 pr_warn("md: kicking non-fresh %s from array!\n",
3702 bdevname(rdev->bdev,b));
3703 md_kick_rdev_from_array(rdev);
3704 continue;
3705 }
3706 }
3707 if (mddev->level == LEVEL_MULTIPATH) {
3708 rdev->desc_nr = i++;
3709 rdev->raid_disk = rdev->desc_nr;
3710 set_bit(In_sync, &rdev->flags);
3711 } else if (rdev->raid_disk >=
3712 (mddev->raid_disks - min(0, mddev->delta_disks)) &&
3713 !test_bit(Journal, &rdev->flags)) {
3714 rdev->raid_disk = -1;
3715 clear_bit(In_sync, &rdev->flags);
3716 }
3717 }
Olivier Deprez0e641232021-09-23 10:07:05 +02003718
3719 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003720}
3721
3722/* Read a fixed-point number.
3723 * Numbers in sysfs attributes should be in "standard" units where
3724 * possible, so time should be in seconds.
3725 * However we internally use a a much smaller unit such as
3726 * milliseconds or jiffies.
3727 * This function takes a decimal number with a possible fractional
3728 * component, and produces an integer which is the result of
3729 * multiplying that number by 10^'scale'.
3730 * all without any floating-point arithmetic.
3731 */
3732int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3733{
3734 unsigned long result = 0;
3735 long decimals = -1;
3736 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3737 if (*cp == '.')
3738 decimals = 0;
3739 else if (decimals < scale) {
3740 unsigned int value;
3741 value = *cp - '0';
3742 result = result * 10 + value;
3743 if (decimals >= 0)
3744 decimals++;
3745 }
3746 cp++;
3747 }
3748 if (*cp == '\n')
3749 cp++;
3750 if (*cp)
3751 return -EINVAL;
3752 if (decimals < 0)
3753 decimals = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00003754 *res = result * int_pow(10, scale - decimals);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003755 return 0;
3756}
3757
3758static ssize_t
3759safe_delay_show(struct mddev *mddev, char *page)
3760{
3761 int msec = (mddev->safemode_delay*1000)/HZ;
3762 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
3763}
3764static ssize_t
3765safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
3766{
3767 unsigned long msec;
3768
3769 if (mddev_is_clustered(mddev)) {
3770 pr_warn("md: Safemode is disabled for clustered mode\n");
3771 return -EINVAL;
3772 }
3773
3774 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
3775 return -EINVAL;
3776 if (msec == 0)
3777 mddev->safemode_delay = 0;
3778 else {
3779 unsigned long old_delay = mddev->safemode_delay;
3780 unsigned long new_delay = (msec*HZ)/1000;
3781
3782 if (new_delay == 0)
3783 new_delay = 1;
3784 mddev->safemode_delay = new_delay;
3785 if (new_delay < old_delay || old_delay == 0)
3786 mod_timer(&mddev->safemode_timer, jiffies+1);
3787 }
3788 return len;
3789}
3790static struct md_sysfs_entry md_safe_delay =
3791__ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
3792
3793static ssize_t
3794level_show(struct mddev *mddev, char *page)
3795{
3796 struct md_personality *p;
3797 int ret;
3798 spin_lock(&mddev->lock);
3799 p = mddev->pers;
3800 if (p)
3801 ret = sprintf(page, "%s\n", p->name);
3802 else if (mddev->clevel[0])
3803 ret = sprintf(page, "%s\n", mddev->clevel);
3804 else if (mddev->level != LEVEL_NONE)
3805 ret = sprintf(page, "%d\n", mddev->level);
3806 else
3807 ret = 0;
3808 spin_unlock(&mddev->lock);
3809 return ret;
3810}
3811
3812static ssize_t
3813level_store(struct mddev *mddev, const char *buf, size_t len)
3814{
3815 char clevel[16];
3816 ssize_t rv;
3817 size_t slen = len;
3818 struct md_personality *pers, *oldpers;
3819 long level;
3820 void *priv, *oldpriv;
3821 struct md_rdev *rdev;
3822
3823 if (slen == 0 || slen >= sizeof(clevel))
3824 return -EINVAL;
3825
3826 rv = mddev_lock(mddev);
3827 if (rv)
3828 return rv;
3829
3830 if (mddev->pers == NULL) {
3831 strncpy(mddev->clevel, buf, slen);
3832 if (mddev->clevel[slen-1] == '\n')
3833 slen--;
3834 mddev->clevel[slen] = 0;
3835 mddev->level = LEVEL_NONE;
3836 rv = len;
3837 goto out_unlock;
3838 }
3839 rv = -EROFS;
3840 if (mddev->ro)
3841 goto out_unlock;
3842
3843 /* request to change the personality. Need to ensure:
3844 * - array is not engaged in resync/recovery/reshape
3845 * - old personality can be suspended
3846 * - new personality will access other array.
3847 */
3848
3849 rv = -EBUSY;
3850 if (mddev->sync_thread ||
3851 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3852 mddev->reshape_position != MaxSector ||
3853 mddev->sysfs_active)
3854 goto out_unlock;
3855
3856 rv = -EINVAL;
3857 if (!mddev->pers->quiesce) {
3858 pr_warn("md: %s: %s does not support online personality change\n",
3859 mdname(mddev), mddev->pers->name);
3860 goto out_unlock;
3861 }
3862
3863 /* Now find the new personality */
3864 strncpy(clevel, buf, slen);
3865 if (clevel[slen-1] == '\n')
3866 slen--;
3867 clevel[slen] = 0;
3868 if (kstrtol(clevel, 10, &level))
3869 level = LEVEL_NONE;
3870
3871 if (request_module("md-%s", clevel) != 0)
3872 request_module("md-level-%s", clevel);
3873 spin_lock(&pers_lock);
3874 pers = find_pers(level, clevel);
3875 if (!pers || !try_module_get(pers->owner)) {
3876 spin_unlock(&pers_lock);
3877 pr_warn("md: personality %s not loaded\n", clevel);
3878 rv = -EINVAL;
3879 goto out_unlock;
3880 }
3881 spin_unlock(&pers_lock);
3882
3883 if (pers == mddev->pers) {
3884 /* Nothing to do! */
3885 module_put(pers->owner);
3886 rv = len;
3887 goto out_unlock;
3888 }
3889 if (!pers->takeover) {
3890 module_put(pers->owner);
3891 pr_warn("md: %s: %s does not support personality takeover\n",
3892 mdname(mddev), clevel);
3893 rv = -EINVAL;
3894 goto out_unlock;
3895 }
3896
3897 rdev_for_each(rdev, mddev)
3898 rdev->new_raid_disk = rdev->raid_disk;
3899
3900 /* ->takeover must set new_* and/or delta_disks
3901 * if it succeeds, and may set them when it fails.
3902 */
3903 priv = pers->takeover(mddev);
3904 if (IS_ERR(priv)) {
3905 mddev->new_level = mddev->level;
3906 mddev->new_layout = mddev->layout;
3907 mddev->new_chunk_sectors = mddev->chunk_sectors;
3908 mddev->raid_disks -= mddev->delta_disks;
3909 mddev->delta_disks = 0;
3910 mddev->reshape_backwards = 0;
3911 module_put(pers->owner);
3912 pr_warn("md: %s: %s would not accept array\n",
3913 mdname(mddev), clevel);
3914 rv = PTR_ERR(priv);
3915 goto out_unlock;
3916 }
3917
3918 /* Looks like we have a winner */
3919 mddev_suspend(mddev);
3920 mddev_detach(mddev);
3921
3922 spin_lock(&mddev->lock);
3923 oldpers = mddev->pers;
3924 oldpriv = mddev->private;
3925 mddev->pers = pers;
3926 mddev->private = priv;
3927 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3928 mddev->level = mddev->new_level;
3929 mddev->layout = mddev->new_layout;
3930 mddev->chunk_sectors = mddev->new_chunk_sectors;
3931 mddev->delta_disks = 0;
3932 mddev->reshape_backwards = 0;
3933 mddev->degraded = 0;
3934 spin_unlock(&mddev->lock);
3935
3936 if (oldpers->sync_request == NULL &&
3937 mddev->external) {
3938 /* We are converting from a no-redundancy array
3939 * to a redundancy array and metadata is managed
3940 * externally so we need to be sure that writes
3941 * won't block due to a need to transition
3942 * clean->dirty
3943 * until external management is started.
3944 */
3945 mddev->in_sync = 0;
3946 mddev->safemode_delay = 0;
3947 mddev->safemode = 0;
3948 }
3949
3950 oldpers->free(mddev, oldpriv);
3951
3952 if (oldpers->sync_request == NULL &&
3953 pers->sync_request != NULL) {
3954 /* need to add the md_redundancy_group */
3955 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3956 pr_warn("md: cannot register extra attributes for %s\n",
3957 mdname(mddev));
3958 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
3959 }
3960 if (oldpers->sync_request != NULL &&
3961 pers->sync_request == NULL) {
3962 /* need to remove the md_redundancy_group */
3963 if (mddev->to_remove == NULL)
3964 mddev->to_remove = &md_redundancy_group;
3965 }
3966
3967 module_put(oldpers->owner);
3968
3969 rdev_for_each(rdev, mddev) {
3970 if (rdev->raid_disk < 0)
3971 continue;
3972 if (rdev->new_raid_disk >= mddev->raid_disks)
3973 rdev->new_raid_disk = -1;
3974 if (rdev->new_raid_disk == rdev->raid_disk)
3975 continue;
3976 sysfs_unlink_rdev(mddev, rdev);
3977 }
3978 rdev_for_each(rdev, mddev) {
3979 if (rdev->raid_disk < 0)
3980 continue;
3981 if (rdev->new_raid_disk == rdev->raid_disk)
3982 continue;
3983 rdev->raid_disk = rdev->new_raid_disk;
3984 if (rdev->raid_disk < 0)
3985 clear_bit(In_sync, &rdev->flags);
3986 else {
3987 if (sysfs_link_rdev(mddev, rdev))
3988 pr_warn("md: cannot register rd%d for %s after level change\n",
3989 rdev->raid_disk, mdname(mddev));
3990 }
3991 }
3992
3993 if (pers->sync_request == NULL) {
3994 /* this is now an array without redundancy, so
3995 * it must always be in_sync
3996 */
3997 mddev->in_sync = 1;
3998 del_timer_sync(&mddev->safemode_timer);
3999 }
4000 blk_set_stacking_limits(&mddev->queue->limits);
4001 pers->run(mddev);
4002 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4003 mddev_resume(mddev);
4004 if (!mddev->thread)
4005 md_update_sb(mddev, 1);
4006 sysfs_notify(&mddev->kobj, NULL, "level");
4007 md_new_event(mddev);
4008 rv = len;
4009out_unlock:
4010 mddev_unlock(mddev);
4011 return rv;
4012}
4013
4014static struct md_sysfs_entry md_level =
4015__ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
4016
4017static ssize_t
4018layout_show(struct mddev *mddev, char *page)
4019{
4020 /* just a number, not meaningful for all levels */
4021 if (mddev->reshape_position != MaxSector &&
4022 mddev->layout != mddev->new_layout)
4023 return sprintf(page, "%d (%d)\n",
4024 mddev->new_layout, mddev->layout);
4025 return sprintf(page, "%d\n", mddev->layout);
4026}
4027
4028static ssize_t
4029layout_store(struct mddev *mddev, const char *buf, size_t len)
4030{
4031 unsigned int n;
4032 int err;
4033
4034 err = kstrtouint(buf, 10, &n);
4035 if (err < 0)
4036 return err;
4037 err = mddev_lock(mddev);
4038 if (err)
4039 return err;
4040
4041 if (mddev->pers) {
4042 if (mddev->pers->check_reshape == NULL)
4043 err = -EBUSY;
4044 else if (mddev->ro)
4045 err = -EROFS;
4046 else {
4047 mddev->new_layout = n;
4048 err = mddev->pers->check_reshape(mddev);
4049 if (err)
4050 mddev->new_layout = mddev->layout;
4051 }
4052 } else {
4053 mddev->new_layout = n;
4054 if (mddev->reshape_position == MaxSector)
4055 mddev->layout = n;
4056 }
4057 mddev_unlock(mddev);
4058 return err ?: len;
4059}
4060static struct md_sysfs_entry md_layout =
4061__ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
4062
4063static ssize_t
4064raid_disks_show(struct mddev *mddev, char *page)
4065{
4066 if (mddev->raid_disks == 0)
4067 return 0;
4068 if (mddev->reshape_position != MaxSector &&
4069 mddev->delta_disks != 0)
4070 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
4071 mddev->raid_disks - mddev->delta_disks);
4072 return sprintf(page, "%d\n", mddev->raid_disks);
4073}
4074
4075static int update_raid_disks(struct mddev *mddev, int raid_disks);
4076
4077static ssize_t
4078raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
4079{
4080 unsigned int n;
4081 int err;
4082
4083 err = kstrtouint(buf, 10, &n);
4084 if (err < 0)
4085 return err;
4086
4087 err = mddev_lock(mddev);
4088 if (err)
4089 return err;
4090 if (mddev->pers)
4091 err = update_raid_disks(mddev, n);
4092 else if (mddev->reshape_position != MaxSector) {
4093 struct md_rdev *rdev;
4094 int olddisks = mddev->raid_disks - mddev->delta_disks;
4095
4096 err = -EINVAL;
4097 rdev_for_each(rdev, mddev) {
4098 if (olddisks < n &&
4099 rdev->data_offset < rdev->new_data_offset)
4100 goto out_unlock;
4101 if (olddisks > n &&
4102 rdev->data_offset > rdev->new_data_offset)
4103 goto out_unlock;
4104 }
4105 err = 0;
4106 mddev->delta_disks = n - olddisks;
4107 mddev->raid_disks = n;
4108 mddev->reshape_backwards = (mddev->delta_disks < 0);
4109 } else
4110 mddev->raid_disks = n;
4111out_unlock:
4112 mddev_unlock(mddev);
4113 return err ? err : len;
4114}
4115static struct md_sysfs_entry md_raid_disks =
4116__ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
4117
4118static ssize_t
4119chunk_size_show(struct mddev *mddev, char *page)
4120{
4121 if (mddev->reshape_position != MaxSector &&
4122 mddev->chunk_sectors != mddev->new_chunk_sectors)
4123 return sprintf(page, "%d (%d)\n",
4124 mddev->new_chunk_sectors << 9,
4125 mddev->chunk_sectors << 9);
4126 return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
4127}
4128
4129static ssize_t
4130chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
4131{
4132 unsigned long n;
4133 int err;
4134
4135 err = kstrtoul(buf, 10, &n);
4136 if (err < 0)
4137 return err;
4138
4139 err = mddev_lock(mddev);
4140 if (err)
4141 return err;
4142 if (mddev->pers) {
4143 if (mddev->pers->check_reshape == NULL)
4144 err = -EBUSY;
4145 else if (mddev->ro)
4146 err = -EROFS;
4147 else {
4148 mddev->new_chunk_sectors = n >> 9;
4149 err = mddev->pers->check_reshape(mddev);
4150 if (err)
4151 mddev->new_chunk_sectors = mddev->chunk_sectors;
4152 }
4153 } else {
4154 mddev->new_chunk_sectors = n >> 9;
4155 if (mddev->reshape_position == MaxSector)
4156 mddev->chunk_sectors = n >> 9;
4157 }
4158 mddev_unlock(mddev);
4159 return err ?: len;
4160}
4161static struct md_sysfs_entry md_chunk_size =
4162__ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
4163
4164static ssize_t
4165resync_start_show(struct mddev *mddev, char *page)
4166{
4167 if (mddev->recovery_cp == MaxSector)
4168 return sprintf(page, "none\n");
4169 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
4170}
4171
4172static ssize_t
4173resync_start_store(struct mddev *mddev, const char *buf, size_t len)
4174{
4175 unsigned long long n;
4176 int err;
4177
4178 if (cmd_match(buf, "none"))
4179 n = MaxSector;
4180 else {
4181 err = kstrtoull(buf, 10, &n);
4182 if (err < 0)
4183 return err;
4184 if (n != (sector_t)n)
4185 return -EINVAL;
4186 }
4187
4188 err = mddev_lock(mddev);
4189 if (err)
4190 return err;
4191 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
4192 err = -EBUSY;
4193
4194 if (!err) {
4195 mddev->recovery_cp = n;
4196 if (mddev->pers)
4197 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
4198 }
4199 mddev_unlock(mddev);
4200 return err ?: len;
4201}
4202static struct md_sysfs_entry md_resync_start =
4203__ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
4204 resync_start_show, resync_start_store);
4205
4206/*
4207 * The array state can be:
4208 *
4209 * clear
4210 * No devices, no size, no level
4211 * Equivalent to STOP_ARRAY ioctl
4212 * inactive
4213 * May have some settings, but array is not active
4214 * all IO results in error
4215 * When written, doesn't tear down array, but just stops it
4216 * suspended (not supported yet)
4217 * All IO requests will block. The array can be reconfigured.
4218 * Writing this, if accepted, will block until array is quiescent
4219 * readonly
4220 * no resync can happen. no superblocks get written.
4221 * write requests fail
4222 * read-auto
4223 * like readonly, but behaves like 'clean' on a write request.
4224 *
4225 * clean - no pending writes, but otherwise active.
4226 * When written to inactive array, starts without resync
4227 * If a write request arrives then
4228 * if metadata is known, mark 'dirty' and switch to 'active'.
4229 * if not known, block and switch to write-pending
4230 * If written to an active array that has pending writes, then fails.
4231 * active
4232 * fully active: IO and resync can be happening.
4233 * When written to inactive array, starts with resync
4234 *
4235 * write-pending
4236 * clean, but writes are blocked waiting for 'active' to be written.
4237 *
4238 * active-idle
4239 * like active, but no writes have been seen for a while (100msec).
4240 *
David Brazdil0f672f62019-12-10 10:32:29 +00004241 * broken
4242 * RAID0/LINEAR-only: same as clean, but array is missing a member.
4243 * It's useful because RAID0/LINEAR mounted-arrays aren't stopped
4244 * when a member is gone, so this state will at least alert the
4245 * user that something is wrong.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004246 */
4247enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
David Brazdil0f672f62019-12-10 10:32:29 +00004248 write_pending, active_idle, broken, bad_word};
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004249static char *array_states[] = {
4250 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
David Brazdil0f672f62019-12-10 10:32:29 +00004251 "write-pending", "active-idle", "broken", NULL };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004252
4253static int match_word(const char *word, char **list)
4254{
4255 int n;
4256 for (n=0; list[n]; n++)
4257 if (cmd_match(word, list[n]))
4258 break;
4259 return n;
4260}
4261
4262static ssize_t
4263array_state_show(struct mddev *mddev, char *page)
4264{
4265 enum array_state st = inactive;
4266
David Brazdil0f672f62019-12-10 10:32:29 +00004267 if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004268 switch(mddev->ro) {
4269 case 1:
4270 st = readonly;
4271 break;
4272 case 2:
4273 st = read_auto;
4274 break;
4275 case 0:
4276 spin_lock(&mddev->lock);
4277 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
4278 st = write_pending;
4279 else if (mddev->in_sync)
4280 st = clean;
4281 else if (mddev->safemode)
4282 st = active_idle;
4283 else
4284 st = active;
4285 spin_unlock(&mddev->lock);
4286 }
David Brazdil0f672f62019-12-10 10:32:29 +00004287
4288 if (test_bit(MD_BROKEN, &mddev->flags) && st == clean)
4289 st = broken;
4290 } else {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004291 if (list_empty(&mddev->disks) &&
4292 mddev->raid_disks == 0 &&
4293 mddev->dev_sectors == 0)
4294 st = clear;
4295 else
4296 st = inactive;
4297 }
4298 return sprintf(page, "%s\n", array_states[st]);
4299}
4300
4301static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
4302static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
4303static int do_md_run(struct mddev *mddev);
4304static int restart_array(struct mddev *mddev);
4305
4306static ssize_t
4307array_state_store(struct mddev *mddev, const char *buf, size_t len)
4308{
4309 int err = 0;
4310 enum array_state st = match_word(buf, array_states);
4311
4312 if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
4313 /* don't take reconfig_mutex when toggling between
4314 * clean and active
4315 */
4316 spin_lock(&mddev->lock);
4317 if (st == active) {
4318 restart_array(mddev);
4319 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
4320 md_wakeup_thread(mddev->thread);
4321 wake_up(&mddev->sb_wait);
4322 } else /* st == clean */ {
4323 restart_array(mddev);
4324 if (!set_in_sync(mddev))
4325 err = -EBUSY;
4326 }
4327 if (!err)
4328 sysfs_notify_dirent_safe(mddev->sysfs_state);
4329 spin_unlock(&mddev->lock);
4330 return err ?: len;
4331 }
4332 err = mddev_lock(mddev);
4333 if (err)
4334 return err;
4335 err = -EINVAL;
4336 switch(st) {
4337 case bad_word:
4338 break;
4339 case clear:
4340 /* stopping an active array */
4341 err = do_md_stop(mddev, 0, NULL);
4342 break;
4343 case inactive:
4344 /* stopping an active array */
4345 if (mddev->pers)
4346 err = do_md_stop(mddev, 2, NULL);
4347 else
4348 err = 0; /* already inactive */
4349 break;
4350 case suspended:
4351 break; /* not supported yet */
4352 case readonly:
4353 if (mddev->pers)
4354 err = md_set_readonly(mddev, NULL);
4355 else {
4356 mddev->ro = 1;
4357 set_disk_ro(mddev->gendisk, 1);
4358 err = do_md_run(mddev);
4359 }
4360 break;
4361 case read_auto:
4362 if (mddev->pers) {
4363 if (mddev->ro == 0)
4364 err = md_set_readonly(mddev, NULL);
4365 else if (mddev->ro == 1)
4366 err = restart_array(mddev);
4367 if (err == 0) {
4368 mddev->ro = 2;
4369 set_disk_ro(mddev->gendisk, 0);
4370 }
4371 } else {
4372 mddev->ro = 2;
4373 err = do_md_run(mddev);
4374 }
4375 break;
4376 case clean:
4377 if (mddev->pers) {
4378 err = restart_array(mddev);
4379 if (err)
4380 break;
4381 spin_lock(&mddev->lock);
4382 if (!set_in_sync(mddev))
4383 err = -EBUSY;
4384 spin_unlock(&mddev->lock);
4385 } else
4386 err = -EINVAL;
4387 break;
4388 case active:
4389 if (mddev->pers) {
4390 err = restart_array(mddev);
4391 if (err)
4392 break;
4393 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
4394 wake_up(&mddev->sb_wait);
4395 err = 0;
4396 } else {
4397 mddev->ro = 0;
4398 set_disk_ro(mddev->gendisk, 0);
4399 err = do_md_run(mddev);
4400 }
4401 break;
4402 case write_pending:
4403 case active_idle:
David Brazdil0f672f62019-12-10 10:32:29 +00004404 case broken:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004405 /* these cannot be set */
4406 break;
4407 }
4408
4409 if (!err) {
4410 if (mddev->hold_active == UNTIL_IOCTL)
4411 mddev->hold_active = 0;
4412 sysfs_notify_dirent_safe(mddev->sysfs_state);
4413 }
4414 mddev_unlock(mddev);
4415 return err ?: len;
4416}
4417static struct md_sysfs_entry md_array_state =
4418__ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
4419
4420static ssize_t
4421max_corrected_read_errors_show(struct mddev *mddev, char *page) {
4422 return sprintf(page, "%d\n",
4423 atomic_read(&mddev->max_corr_read_errors));
4424}
4425
4426static ssize_t
4427max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
4428{
4429 unsigned int n;
4430 int rv;
4431
4432 rv = kstrtouint(buf, 10, &n);
4433 if (rv < 0)
4434 return rv;
4435 atomic_set(&mddev->max_corr_read_errors, n);
4436 return len;
4437}
4438
4439static struct md_sysfs_entry max_corr_read_errors =
4440__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
4441 max_corrected_read_errors_store);
4442
4443static ssize_t
4444null_show(struct mddev *mddev, char *page)
4445{
4446 return -EINVAL;
4447}
4448
4449static ssize_t
4450new_dev_store(struct mddev *mddev, const char *buf, size_t len)
4451{
4452 /* buf must be %d:%d\n? giving major and minor numbers */
4453 /* The new device is added to the array.
4454 * If the array has a persistent superblock, we read the
4455 * superblock to initialise info and check validity.
4456 * Otherwise, only checking done is that in bind_rdev_to_array,
4457 * which mainly checks size.
4458 */
4459 char *e;
4460 int major = simple_strtoul(buf, &e, 10);
4461 int minor;
4462 dev_t dev;
4463 struct md_rdev *rdev;
4464 int err;
4465
4466 if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
4467 return -EINVAL;
4468 minor = simple_strtoul(e+1, &e, 10);
4469 if (*e && *e != '\n')
4470 return -EINVAL;
4471 dev = MKDEV(major, minor);
4472 if (major != MAJOR(dev) ||
4473 minor != MINOR(dev))
4474 return -EOVERFLOW;
4475
4476 flush_workqueue(md_misc_wq);
4477
4478 err = mddev_lock(mddev);
4479 if (err)
4480 return err;
4481 if (mddev->persistent) {
4482 rdev = md_import_device(dev, mddev->major_version,
4483 mddev->minor_version);
4484 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
4485 struct md_rdev *rdev0
4486 = list_entry(mddev->disks.next,
4487 struct md_rdev, same_set);
4488 err = super_types[mddev->major_version]
4489 .load_super(rdev, rdev0, mddev->minor_version);
4490 if (err < 0)
4491 goto out;
4492 }
4493 } else if (mddev->external)
4494 rdev = md_import_device(dev, -2, -1);
4495 else
4496 rdev = md_import_device(dev, -1, -1);
4497
4498 if (IS_ERR(rdev)) {
4499 mddev_unlock(mddev);
4500 return PTR_ERR(rdev);
4501 }
4502 err = bind_rdev_to_array(rdev, mddev);
4503 out:
4504 if (err)
4505 export_rdev(rdev);
4506 mddev_unlock(mddev);
4507 if (!err)
4508 md_new_event(mddev);
4509 return err ? err : len;
4510}
4511
4512static struct md_sysfs_entry md_new_device =
4513__ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
4514
4515static ssize_t
4516bitmap_store(struct mddev *mddev, const char *buf, size_t len)
4517{
4518 char *end;
4519 unsigned long chunk, end_chunk;
4520 int err;
4521
4522 err = mddev_lock(mddev);
4523 if (err)
4524 return err;
4525 if (!mddev->bitmap)
4526 goto out;
4527 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
4528 while (*buf) {
4529 chunk = end_chunk = simple_strtoul(buf, &end, 0);
4530 if (buf == end) break;
4531 if (*end == '-') { /* range */
4532 buf = end + 1;
4533 end_chunk = simple_strtoul(buf, &end, 0);
4534 if (buf == end) break;
4535 }
4536 if (*end && !isspace(*end)) break;
4537 md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
4538 buf = skip_spaces(end);
4539 }
4540 md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
4541out:
4542 mddev_unlock(mddev);
4543 return len;
4544}
4545
4546static struct md_sysfs_entry md_bitmap =
4547__ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
4548
4549static ssize_t
4550size_show(struct mddev *mddev, char *page)
4551{
4552 return sprintf(page, "%llu\n",
4553 (unsigned long long)mddev->dev_sectors / 2);
4554}
4555
4556static int update_size(struct mddev *mddev, sector_t num_sectors);
4557
4558static ssize_t
4559size_store(struct mddev *mddev, const char *buf, size_t len)
4560{
4561 /* If array is inactive, we can reduce the component size, but
4562 * not increase it (except from 0).
4563 * If array is active, we can try an on-line resize
4564 */
4565 sector_t sectors;
4566 int err = strict_blocks_to_sectors(buf, &sectors);
4567
4568 if (err < 0)
4569 return err;
4570 err = mddev_lock(mddev);
4571 if (err)
4572 return err;
4573 if (mddev->pers) {
4574 err = update_size(mddev, sectors);
4575 if (err == 0)
4576 md_update_sb(mddev, 1);
4577 } else {
4578 if (mddev->dev_sectors == 0 ||
4579 mddev->dev_sectors > sectors)
4580 mddev->dev_sectors = sectors;
4581 else
4582 err = -ENOSPC;
4583 }
4584 mddev_unlock(mddev);
4585 return err ? err : len;
4586}
4587
4588static struct md_sysfs_entry md_size =
4589__ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
4590
4591/* Metadata version.
4592 * This is one of
4593 * 'none' for arrays with no metadata (good luck...)
4594 * 'external' for arrays with externally managed metadata,
4595 * or N.M for internally known formats
4596 */
4597static ssize_t
4598metadata_show(struct mddev *mddev, char *page)
4599{
4600 if (mddev->persistent)
4601 return sprintf(page, "%d.%d\n",
4602 mddev->major_version, mddev->minor_version);
4603 else if (mddev->external)
4604 return sprintf(page, "external:%s\n", mddev->metadata_type);
4605 else
4606 return sprintf(page, "none\n");
4607}
4608
4609static ssize_t
4610metadata_store(struct mddev *mddev, const char *buf, size_t len)
4611{
4612 int major, minor;
4613 char *e;
4614 int err;
4615 /* Changing the details of 'external' metadata is
4616 * always permitted. Otherwise there must be
4617 * no devices attached to the array.
4618 */
4619
4620 err = mddev_lock(mddev);
4621 if (err)
4622 return err;
4623 err = -EBUSY;
4624 if (mddev->external && strncmp(buf, "external:", 9) == 0)
4625 ;
4626 else if (!list_empty(&mddev->disks))
4627 goto out_unlock;
4628
4629 err = 0;
4630 if (cmd_match(buf, "none")) {
4631 mddev->persistent = 0;
4632 mddev->external = 0;
4633 mddev->major_version = 0;
4634 mddev->minor_version = 90;
4635 goto out_unlock;
4636 }
4637 if (strncmp(buf, "external:", 9) == 0) {
4638 size_t namelen = len-9;
4639 if (namelen >= sizeof(mddev->metadata_type))
4640 namelen = sizeof(mddev->metadata_type)-1;
4641 strncpy(mddev->metadata_type, buf+9, namelen);
4642 mddev->metadata_type[namelen] = 0;
4643 if (namelen && mddev->metadata_type[namelen-1] == '\n')
4644 mddev->metadata_type[--namelen] = 0;
4645 mddev->persistent = 0;
4646 mddev->external = 1;
4647 mddev->major_version = 0;
4648 mddev->minor_version = 90;
4649 goto out_unlock;
4650 }
4651 major = simple_strtoul(buf, &e, 10);
4652 err = -EINVAL;
4653 if (e==buf || *e != '.')
4654 goto out_unlock;
4655 buf = e+1;
4656 minor = simple_strtoul(buf, &e, 10);
4657 if (e==buf || (*e && *e != '\n') )
4658 goto out_unlock;
4659 err = -ENOENT;
4660 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
4661 goto out_unlock;
4662 mddev->major_version = major;
4663 mddev->minor_version = minor;
4664 mddev->persistent = 1;
4665 mddev->external = 0;
4666 err = 0;
4667out_unlock:
4668 mddev_unlock(mddev);
4669 return err ?: len;
4670}
4671
4672static struct md_sysfs_entry md_metadata =
4673__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
4674
4675static ssize_t
4676action_show(struct mddev *mddev, char *page)
4677{
4678 char *type = "idle";
4679 unsigned long recovery = mddev->recovery;
4680 if (test_bit(MD_RECOVERY_FROZEN, &recovery))
4681 type = "frozen";
4682 else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
4683 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
4684 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
4685 type = "reshape";
4686 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
4687 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
4688 type = "resync";
4689 else if (test_bit(MD_RECOVERY_CHECK, &recovery))
4690 type = "check";
4691 else
4692 type = "repair";
4693 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
4694 type = "recover";
4695 else if (mddev->reshape_position != MaxSector)
4696 type = "reshape";
4697 }
4698 return sprintf(page, "%s\n", type);
4699}
4700
4701static ssize_t
4702action_store(struct mddev *mddev, const char *page, size_t len)
4703{
4704 if (!mddev->pers || !mddev->pers->sync_request)
4705 return -EINVAL;
4706
4707
4708 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
4709 if (cmd_match(page, "frozen"))
4710 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4711 else
4712 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4713 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
4714 mddev_lock(mddev) == 0) {
4715 flush_workqueue(md_misc_wq);
4716 if (mddev->sync_thread) {
4717 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4718 md_reap_sync_thread(mddev);
4719 }
4720 mddev_unlock(mddev);
4721 }
4722 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4723 return -EBUSY;
4724 else if (cmd_match(page, "resync"))
4725 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4726 else if (cmd_match(page, "recover")) {
4727 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4728 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4729 } else if (cmd_match(page, "reshape")) {
4730 int err;
4731 if (mddev->pers->start_reshape == NULL)
4732 return -EINVAL;
4733 err = mddev_lock(mddev);
4734 if (!err) {
4735 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4736 err = -EBUSY;
4737 else {
4738 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4739 err = mddev->pers->start_reshape(mddev);
4740 }
4741 mddev_unlock(mddev);
4742 }
4743 if (err)
4744 return err;
4745 sysfs_notify(&mddev->kobj, NULL, "degraded");
4746 } else {
4747 if (cmd_match(page, "check"))
4748 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4749 else if (!cmd_match(page, "repair"))
4750 return -EINVAL;
4751 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4752 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4753 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4754 }
4755 if (mddev->ro == 2) {
4756 /* A write to sync_action is enough to justify
4757 * canceling read-auto mode
4758 */
4759 mddev->ro = 0;
4760 md_wakeup_thread(mddev->sync_thread);
4761 }
4762 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4763 md_wakeup_thread(mddev->thread);
4764 sysfs_notify_dirent_safe(mddev->sysfs_action);
4765 return len;
4766}
4767
4768static struct md_sysfs_entry md_scan_mode =
4769__ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
4770
4771static ssize_t
4772last_sync_action_show(struct mddev *mddev, char *page)
4773{
4774 return sprintf(page, "%s\n", mddev->last_sync_action);
4775}
4776
4777static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
4778
4779static ssize_t
4780mismatch_cnt_show(struct mddev *mddev, char *page)
4781{
4782 return sprintf(page, "%llu\n",
4783 (unsigned long long)
4784 atomic64_read(&mddev->resync_mismatches));
4785}
4786
4787static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
4788
4789static ssize_t
4790sync_min_show(struct mddev *mddev, char *page)
4791{
4792 return sprintf(page, "%d (%s)\n", speed_min(mddev),
4793 mddev->sync_speed_min ? "local": "system");
4794}
4795
4796static ssize_t
4797sync_min_store(struct mddev *mddev, const char *buf, size_t len)
4798{
4799 unsigned int min;
4800 int rv;
4801
4802 if (strncmp(buf, "system", 6)==0) {
4803 min = 0;
4804 } else {
4805 rv = kstrtouint(buf, 10, &min);
4806 if (rv < 0)
4807 return rv;
4808 if (min == 0)
4809 return -EINVAL;
4810 }
4811 mddev->sync_speed_min = min;
4812 return len;
4813}
4814
4815static struct md_sysfs_entry md_sync_min =
4816__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
4817
4818static ssize_t
4819sync_max_show(struct mddev *mddev, char *page)
4820{
4821 return sprintf(page, "%d (%s)\n", speed_max(mddev),
4822 mddev->sync_speed_max ? "local": "system");
4823}
4824
4825static ssize_t
4826sync_max_store(struct mddev *mddev, const char *buf, size_t len)
4827{
4828 unsigned int max;
4829 int rv;
4830
4831 if (strncmp(buf, "system", 6)==0) {
4832 max = 0;
4833 } else {
4834 rv = kstrtouint(buf, 10, &max);
4835 if (rv < 0)
4836 return rv;
4837 if (max == 0)
4838 return -EINVAL;
4839 }
4840 mddev->sync_speed_max = max;
4841 return len;
4842}
4843
4844static struct md_sysfs_entry md_sync_max =
4845__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
4846
4847static ssize_t
4848degraded_show(struct mddev *mddev, char *page)
4849{
4850 return sprintf(page, "%d\n", mddev->degraded);
4851}
4852static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
4853
4854static ssize_t
4855sync_force_parallel_show(struct mddev *mddev, char *page)
4856{
4857 return sprintf(page, "%d\n", mddev->parallel_resync);
4858}
4859
4860static ssize_t
4861sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
4862{
4863 long n;
4864
4865 if (kstrtol(buf, 10, &n))
4866 return -EINVAL;
4867
4868 if (n != 0 && n != 1)
4869 return -EINVAL;
4870
4871 mddev->parallel_resync = n;
4872
4873 if (mddev->sync_thread)
4874 wake_up(&resync_wait);
4875
4876 return len;
4877}
4878
4879/* force parallel resync, even with shared block devices */
4880static struct md_sysfs_entry md_sync_force_parallel =
4881__ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
4882 sync_force_parallel_show, sync_force_parallel_store);
4883
4884static ssize_t
4885sync_speed_show(struct mddev *mddev, char *page)
4886{
4887 unsigned long resync, dt, db;
4888 if (mddev->curr_resync == 0)
4889 return sprintf(page, "none\n");
4890 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
4891 dt = (jiffies - mddev->resync_mark) / HZ;
4892 if (!dt) dt++;
4893 db = resync - mddev->resync_mark_cnt;
4894 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
4895}
4896
4897static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
4898
4899static ssize_t
4900sync_completed_show(struct mddev *mddev, char *page)
4901{
4902 unsigned long long max_sectors, resync;
4903
4904 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4905 return sprintf(page, "none\n");
4906
4907 if (mddev->curr_resync == 1 ||
4908 mddev->curr_resync == 2)
4909 return sprintf(page, "delayed\n");
4910
4911 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
4912 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4913 max_sectors = mddev->resync_max_sectors;
4914 else
4915 max_sectors = mddev->dev_sectors;
4916
4917 resync = mddev->curr_resync_completed;
4918 return sprintf(page, "%llu / %llu\n", resync, max_sectors);
4919}
4920
4921static struct md_sysfs_entry md_sync_completed =
4922 __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
4923
4924static ssize_t
4925min_sync_show(struct mddev *mddev, char *page)
4926{
4927 return sprintf(page, "%llu\n",
4928 (unsigned long long)mddev->resync_min);
4929}
4930static ssize_t
4931min_sync_store(struct mddev *mddev, const char *buf, size_t len)
4932{
4933 unsigned long long min;
4934 int err;
4935
4936 if (kstrtoull(buf, 10, &min))
4937 return -EINVAL;
4938
4939 spin_lock(&mddev->lock);
4940 err = -EINVAL;
4941 if (min > mddev->resync_max)
4942 goto out_unlock;
4943
4944 err = -EBUSY;
4945 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4946 goto out_unlock;
4947
4948 /* Round down to multiple of 4K for safety */
4949 mddev->resync_min = round_down(min, 8);
4950 err = 0;
4951
4952out_unlock:
4953 spin_unlock(&mddev->lock);
4954 return err ?: len;
4955}
4956
4957static struct md_sysfs_entry md_min_sync =
4958__ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
4959
4960static ssize_t
4961max_sync_show(struct mddev *mddev, char *page)
4962{
4963 if (mddev->resync_max == MaxSector)
4964 return sprintf(page, "max\n");
4965 else
4966 return sprintf(page, "%llu\n",
4967 (unsigned long long)mddev->resync_max);
4968}
4969static ssize_t
4970max_sync_store(struct mddev *mddev, const char *buf, size_t len)
4971{
4972 int err;
4973 spin_lock(&mddev->lock);
4974 if (strncmp(buf, "max", 3) == 0)
4975 mddev->resync_max = MaxSector;
4976 else {
4977 unsigned long long max;
4978 int chunk;
4979
4980 err = -EINVAL;
4981 if (kstrtoull(buf, 10, &max))
4982 goto out_unlock;
4983 if (max < mddev->resync_min)
4984 goto out_unlock;
4985
4986 err = -EBUSY;
4987 if (max < mddev->resync_max &&
4988 mddev->ro == 0 &&
4989 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4990 goto out_unlock;
4991
4992 /* Must be a multiple of chunk_size */
4993 chunk = mddev->chunk_sectors;
4994 if (chunk) {
4995 sector_t temp = max;
4996
4997 err = -EINVAL;
4998 if (sector_div(temp, chunk))
4999 goto out_unlock;
5000 }
5001 mddev->resync_max = max;
5002 }
5003 wake_up(&mddev->recovery_wait);
5004 err = 0;
5005out_unlock:
5006 spin_unlock(&mddev->lock);
5007 return err ?: len;
5008}
5009
5010static struct md_sysfs_entry md_max_sync =
5011__ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
5012
5013static ssize_t
5014suspend_lo_show(struct mddev *mddev, char *page)
5015{
5016 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
5017}
5018
5019static ssize_t
5020suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
5021{
5022 unsigned long long new;
5023 int err;
5024
5025 err = kstrtoull(buf, 10, &new);
5026 if (err < 0)
5027 return err;
5028 if (new != (sector_t)new)
5029 return -EINVAL;
5030
5031 err = mddev_lock(mddev);
5032 if (err)
5033 return err;
5034 err = -EINVAL;
5035 if (mddev->pers == NULL ||
5036 mddev->pers->quiesce == NULL)
5037 goto unlock;
5038 mddev_suspend(mddev);
5039 mddev->suspend_lo = new;
5040 mddev_resume(mddev);
5041
5042 err = 0;
5043unlock:
5044 mddev_unlock(mddev);
5045 return err ?: len;
5046}
5047static struct md_sysfs_entry md_suspend_lo =
5048__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
5049
5050static ssize_t
5051suspend_hi_show(struct mddev *mddev, char *page)
5052{
5053 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
5054}
5055
5056static ssize_t
5057suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
5058{
5059 unsigned long long new;
5060 int err;
5061
5062 err = kstrtoull(buf, 10, &new);
5063 if (err < 0)
5064 return err;
5065 if (new != (sector_t)new)
5066 return -EINVAL;
5067
5068 err = mddev_lock(mddev);
5069 if (err)
5070 return err;
5071 err = -EINVAL;
5072 if (mddev->pers == NULL)
5073 goto unlock;
5074
5075 mddev_suspend(mddev);
5076 mddev->suspend_hi = new;
5077 mddev_resume(mddev);
5078
5079 err = 0;
5080unlock:
5081 mddev_unlock(mddev);
5082 return err ?: len;
5083}
5084static struct md_sysfs_entry md_suspend_hi =
5085__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
5086
5087static ssize_t
5088reshape_position_show(struct mddev *mddev, char *page)
5089{
5090 if (mddev->reshape_position != MaxSector)
5091 return sprintf(page, "%llu\n",
5092 (unsigned long long)mddev->reshape_position);
5093 strcpy(page, "none\n");
5094 return 5;
5095}
5096
5097static ssize_t
5098reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
5099{
5100 struct md_rdev *rdev;
5101 unsigned long long new;
5102 int err;
5103
5104 err = kstrtoull(buf, 10, &new);
5105 if (err < 0)
5106 return err;
5107 if (new != (sector_t)new)
5108 return -EINVAL;
5109 err = mddev_lock(mddev);
5110 if (err)
5111 return err;
5112 err = -EBUSY;
5113 if (mddev->pers)
5114 goto unlock;
5115 mddev->reshape_position = new;
5116 mddev->delta_disks = 0;
5117 mddev->reshape_backwards = 0;
5118 mddev->new_level = mddev->level;
5119 mddev->new_layout = mddev->layout;
5120 mddev->new_chunk_sectors = mddev->chunk_sectors;
5121 rdev_for_each(rdev, mddev)
5122 rdev->new_data_offset = rdev->data_offset;
5123 err = 0;
5124unlock:
5125 mddev_unlock(mddev);
5126 return err ?: len;
5127}
5128
5129static struct md_sysfs_entry md_reshape_position =
5130__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
5131 reshape_position_store);
5132
5133static ssize_t
5134reshape_direction_show(struct mddev *mddev, char *page)
5135{
5136 return sprintf(page, "%s\n",
5137 mddev->reshape_backwards ? "backwards" : "forwards");
5138}
5139
5140static ssize_t
5141reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
5142{
5143 int backwards = 0;
5144 int err;
5145
5146 if (cmd_match(buf, "forwards"))
5147 backwards = 0;
5148 else if (cmd_match(buf, "backwards"))
5149 backwards = 1;
5150 else
5151 return -EINVAL;
5152 if (mddev->reshape_backwards == backwards)
5153 return len;
5154
5155 err = mddev_lock(mddev);
5156 if (err)
5157 return err;
5158 /* check if we are allowed to change */
5159 if (mddev->delta_disks)
5160 err = -EBUSY;
5161 else if (mddev->persistent &&
5162 mddev->major_version == 0)
5163 err = -EINVAL;
5164 else
5165 mddev->reshape_backwards = backwards;
5166 mddev_unlock(mddev);
5167 return err ?: len;
5168}
5169
5170static struct md_sysfs_entry md_reshape_direction =
5171__ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
5172 reshape_direction_store);
5173
5174static ssize_t
5175array_size_show(struct mddev *mddev, char *page)
5176{
5177 if (mddev->external_size)
5178 return sprintf(page, "%llu\n",
5179 (unsigned long long)mddev->array_sectors/2);
5180 else
5181 return sprintf(page, "default\n");
5182}
5183
5184static ssize_t
5185array_size_store(struct mddev *mddev, const char *buf, size_t len)
5186{
5187 sector_t sectors;
5188 int err;
5189
5190 err = mddev_lock(mddev);
5191 if (err)
5192 return err;
5193
5194 /* cluster raid doesn't support change array_sectors */
5195 if (mddev_is_clustered(mddev)) {
5196 mddev_unlock(mddev);
5197 return -EINVAL;
5198 }
5199
5200 if (strncmp(buf, "default", 7) == 0) {
5201 if (mddev->pers)
5202 sectors = mddev->pers->size(mddev, 0, 0);
5203 else
5204 sectors = mddev->array_sectors;
5205
5206 mddev->external_size = 0;
5207 } else {
5208 if (strict_blocks_to_sectors(buf, &sectors) < 0)
5209 err = -EINVAL;
5210 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
5211 err = -E2BIG;
5212 else
5213 mddev->external_size = 1;
5214 }
5215
5216 if (!err) {
5217 mddev->array_sectors = sectors;
5218 if (mddev->pers) {
5219 set_capacity(mddev->gendisk, mddev->array_sectors);
5220 revalidate_disk(mddev->gendisk);
5221 }
5222 }
5223 mddev_unlock(mddev);
5224 return err ?: len;
5225}
5226
5227static struct md_sysfs_entry md_array_size =
5228__ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
5229 array_size_store);
5230
5231static ssize_t
5232consistency_policy_show(struct mddev *mddev, char *page)
5233{
5234 int ret;
5235
5236 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
5237 ret = sprintf(page, "journal\n");
5238 } else if (test_bit(MD_HAS_PPL, &mddev->flags)) {
5239 ret = sprintf(page, "ppl\n");
5240 } else if (mddev->bitmap) {
5241 ret = sprintf(page, "bitmap\n");
5242 } else if (mddev->pers) {
5243 if (mddev->pers->sync_request)
5244 ret = sprintf(page, "resync\n");
5245 else
5246 ret = sprintf(page, "none\n");
5247 } else {
5248 ret = sprintf(page, "unknown\n");
5249 }
5250
5251 return ret;
5252}
5253
5254static ssize_t
5255consistency_policy_store(struct mddev *mddev, const char *buf, size_t len)
5256{
5257 int err = 0;
5258
5259 if (mddev->pers) {
5260 if (mddev->pers->change_consistency_policy)
5261 err = mddev->pers->change_consistency_policy(mddev, buf);
5262 else
5263 err = -EBUSY;
5264 } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) {
5265 set_bit(MD_HAS_PPL, &mddev->flags);
5266 } else {
5267 err = -EINVAL;
5268 }
5269
5270 return err ? err : len;
5271}
5272
5273static struct md_sysfs_entry md_consistency_policy =
5274__ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show,
5275 consistency_policy_store);
5276
David Brazdil0f672f62019-12-10 10:32:29 +00005277static ssize_t fail_last_dev_show(struct mddev *mddev, char *page)
5278{
5279 return sprintf(page, "%d\n", mddev->fail_last_dev);
5280}
5281
5282/*
5283 * Setting fail_last_dev to true to allow last device to be forcibly removed
5284 * from RAID1/RAID10.
5285 */
5286static ssize_t
5287fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len)
5288{
5289 int ret;
5290 bool value;
5291
5292 ret = kstrtobool(buf, &value);
5293 if (ret)
5294 return ret;
5295
5296 if (value != mddev->fail_last_dev)
5297 mddev->fail_last_dev = value;
5298
5299 return len;
5300}
5301static struct md_sysfs_entry md_fail_last_dev =
5302__ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
5303 fail_last_dev_store);
5304
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005305static struct attribute *md_default_attrs[] = {
5306 &md_level.attr,
5307 &md_layout.attr,
5308 &md_raid_disks.attr,
5309 &md_chunk_size.attr,
5310 &md_size.attr,
5311 &md_resync_start.attr,
5312 &md_metadata.attr,
5313 &md_new_device.attr,
5314 &md_safe_delay.attr,
5315 &md_array_state.attr,
5316 &md_reshape_position.attr,
5317 &md_reshape_direction.attr,
5318 &md_array_size.attr,
5319 &max_corr_read_errors.attr,
5320 &md_consistency_policy.attr,
David Brazdil0f672f62019-12-10 10:32:29 +00005321 &md_fail_last_dev.attr,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005322 NULL,
5323};
5324
5325static struct attribute *md_redundancy_attrs[] = {
5326 &md_scan_mode.attr,
5327 &md_last_scan_mode.attr,
5328 &md_mismatches.attr,
5329 &md_sync_min.attr,
5330 &md_sync_max.attr,
5331 &md_sync_speed.attr,
5332 &md_sync_force_parallel.attr,
5333 &md_sync_completed.attr,
5334 &md_min_sync.attr,
5335 &md_max_sync.attr,
5336 &md_suspend_lo.attr,
5337 &md_suspend_hi.attr,
5338 &md_bitmap.attr,
5339 &md_degraded.attr,
5340 NULL,
5341};
5342static struct attribute_group md_redundancy_group = {
5343 .name = NULL,
5344 .attrs = md_redundancy_attrs,
5345};
5346
5347static ssize_t
5348md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
5349{
5350 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
5351 struct mddev *mddev = container_of(kobj, struct mddev, kobj);
5352 ssize_t rv;
5353
5354 if (!entry->show)
5355 return -EIO;
5356 spin_lock(&all_mddevs_lock);
5357 if (list_empty(&mddev->all_mddevs)) {
5358 spin_unlock(&all_mddevs_lock);
5359 return -EBUSY;
5360 }
5361 mddev_get(mddev);
5362 spin_unlock(&all_mddevs_lock);
5363
5364 rv = entry->show(mddev, page);
5365 mddev_put(mddev);
5366 return rv;
5367}
5368
5369static ssize_t
5370md_attr_store(struct kobject *kobj, struct attribute *attr,
5371 const char *page, size_t length)
5372{
5373 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
5374 struct mddev *mddev = container_of(kobj, struct mddev, kobj);
5375 ssize_t rv;
5376
5377 if (!entry->store)
5378 return -EIO;
5379 if (!capable(CAP_SYS_ADMIN))
5380 return -EACCES;
5381 spin_lock(&all_mddevs_lock);
5382 if (list_empty(&mddev->all_mddevs)) {
5383 spin_unlock(&all_mddevs_lock);
5384 return -EBUSY;
5385 }
5386 mddev_get(mddev);
5387 spin_unlock(&all_mddevs_lock);
5388 rv = entry->store(mddev, page, length);
5389 mddev_put(mddev);
5390 return rv;
5391}
5392
5393static void md_free(struct kobject *ko)
5394{
5395 struct mddev *mddev = container_of(ko, struct mddev, kobj);
5396
5397 if (mddev->sysfs_state)
5398 sysfs_put(mddev->sysfs_state);
5399
5400 if (mddev->gendisk)
5401 del_gendisk(mddev->gendisk);
5402 if (mddev->queue)
5403 blk_cleanup_queue(mddev->queue);
5404 if (mddev->gendisk)
5405 put_disk(mddev->gendisk);
5406 percpu_ref_exit(&mddev->writes_pending);
5407
5408 bioset_exit(&mddev->bio_set);
5409 bioset_exit(&mddev->sync_set);
5410 kfree(mddev);
5411}
5412
5413static const struct sysfs_ops md_sysfs_ops = {
5414 .show = md_attr_show,
5415 .store = md_attr_store,
5416};
5417static struct kobj_type md_ktype = {
5418 .release = md_free,
5419 .sysfs_ops = &md_sysfs_ops,
5420 .default_attrs = md_default_attrs,
5421};
5422
5423int mdp_major = 0;
5424
5425static void mddev_delayed_delete(struct work_struct *ws)
5426{
5427 struct mddev *mddev = container_of(ws, struct mddev, del_work);
5428
5429 sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
5430 kobject_del(&mddev->kobj);
5431 kobject_put(&mddev->kobj);
5432}
5433
5434static void no_op(struct percpu_ref *r) {}
5435
5436int mddev_init_writes_pending(struct mddev *mddev)
5437{
5438 if (mddev->writes_pending.percpu_count_ptr)
5439 return 0;
David Brazdil0f672f62019-12-10 10:32:29 +00005440 if (percpu_ref_init(&mddev->writes_pending, no_op,
5441 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL) < 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005442 return -ENOMEM;
5443 /* We want to start with the refcount at zero */
5444 percpu_ref_put(&mddev->writes_pending);
5445 return 0;
5446}
5447EXPORT_SYMBOL_GPL(mddev_init_writes_pending);
5448
5449static int md_alloc(dev_t dev, char *name)
5450{
5451 /*
5452 * If dev is zero, name is the name of a device to allocate with
5453 * an arbitrary minor number. It will be "md_???"
5454 * If dev is non-zero it must be a device number with a MAJOR of
5455 * MD_MAJOR or mdp_major. In this case, if "name" is NULL, then
5456 * the device is being created by opening a node in /dev.
5457 * If "name" is not NULL, the device is being created by
5458 * writing to /sys/module/md_mod/parameters/new_array.
5459 */
5460 static DEFINE_MUTEX(disks_mutex);
Olivier Deprez0e641232021-09-23 10:07:05 +02005461 struct mddev *mddev = mddev_find_or_alloc(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005462 struct gendisk *disk;
5463 int partitioned;
5464 int shift;
5465 int unit;
5466 int error;
5467
5468 if (!mddev)
5469 return -ENODEV;
5470
5471 partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
5472 shift = partitioned ? MdpMinorShift : 0;
5473 unit = MINOR(mddev->unit) >> shift;
5474
5475 /* wait for any previous instance of this device to be
5476 * completely removed (mddev_delayed_delete).
5477 */
5478 flush_workqueue(md_misc_wq);
5479
5480 mutex_lock(&disks_mutex);
5481 error = -EEXIST;
5482 if (mddev->gendisk)
5483 goto abort;
5484
5485 if (name && !dev) {
5486 /* Need to ensure that 'name' is not a duplicate.
5487 */
5488 struct mddev *mddev2;
5489 spin_lock(&all_mddevs_lock);
5490
5491 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
5492 if (mddev2->gendisk &&
5493 strcmp(mddev2->gendisk->disk_name, name) == 0) {
5494 spin_unlock(&all_mddevs_lock);
5495 goto abort;
5496 }
5497 spin_unlock(&all_mddevs_lock);
5498 }
5499 if (name && dev)
5500 /*
5501 * Creating /dev/mdNNN via "newarray", so adjust hold_active.
5502 */
5503 mddev->hold_active = UNTIL_STOP;
5504
5505 error = -ENOMEM;
5506 mddev->queue = blk_alloc_queue(GFP_KERNEL);
5507 if (!mddev->queue)
5508 goto abort;
5509 mddev->queue->queuedata = mddev;
5510
5511 blk_queue_make_request(mddev->queue, md_make_request);
5512 blk_set_stacking_limits(&mddev->queue->limits);
5513
5514 disk = alloc_disk(1 << shift);
5515 if (!disk) {
5516 blk_cleanup_queue(mddev->queue);
5517 mddev->queue = NULL;
5518 goto abort;
5519 }
5520 disk->major = MAJOR(mddev->unit);
5521 disk->first_minor = unit << shift;
5522 if (name)
5523 strcpy(disk->disk_name, name);
5524 else if (partitioned)
5525 sprintf(disk->disk_name, "md_d%d", unit);
5526 else
5527 sprintf(disk->disk_name, "md%d", unit);
5528 disk->fops = &md_fops;
5529 disk->private_data = mddev;
5530 disk->queue = mddev->queue;
5531 blk_queue_write_cache(mddev->queue, true, true);
5532 /* Allow extended partitions. This makes the
5533 * 'mdp' device redundant, but we can't really
5534 * remove it now.
5535 */
5536 disk->flags |= GENHD_FL_EXT_DEVT;
5537 mddev->gendisk = disk;
5538 /* As soon as we call add_disk(), another thread could get
5539 * through to md_open, so make sure it doesn't get too far
5540 */
5541 mutex_lock(&mddev->open_mutex);
5542 add_disk(disk);
5543
5544 error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
5545 if (error) {
5546 /* This isn't possible, but as kobject_init_and_add is marked
5547 * __must_check, we must do something with the result
5548 */
5549 pr_debug("md: cannot register %s/md - name in use\n",
5550 disk->disk_name);
5551 error = 0;
5552 }
5553 if (mddev->kobj.sd &&
5554 sysfs_create_group(&mddev->kobj, &md_bitmap_group))
5555 pr_debug("pointless warning\n");
5556 mutex_unlock(&mddev->open_mutex);
5557 abort:
5558 mutex_unlock(&disks_mutex);
5559 if (!error && mddev->kobj.sd) {
5560 kobject_uevent(&mddev->kobj, KOBJ_ADD);
5561 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
5562 }
5563 mddev_put(mddev);
5564 return error;
5565}
5566
5567static struct kobject *md_probe(dev_t dev, int *part, void *data)
5568{
5569 if (create_on_open)
5570 md_alloc(dev, NULL);
5571 return NULL;
5572}
5573
5574static int add_named_array(const char *val, const struct kernel_param *kp)
5575{
5576 /*
5577 * val must be "md_*" or "mdNNN".
5578 * For "md_*" we allocate an array with a large free minor number, and
5579 * set the name to val. val must not already be an active name.
5580 * For "mdNNN" we allocate an array with the minor number NNN
5581 * which must not already be in use.
5582 */
5583 int len = strlen(val);
5584 char buf[DISK_NAME_LEN];
5585 unsigned long devnum;
5586
5587 while (len && val[len-1] == '\n')
5588 len--;
5589 if (len >= DISK_NAME_LEN)
5590 return -E2BIG;
5591 strlcpy(buf, val, len+1);
5592 if (strncmp(buf, "md_", 3) == 0)
5593 return md_alloc(0, buf);
5594 if (strncmp(buf, "md", 2) == 0 &&
5595 isdigit(buf[2]) &&
5596 kstrtoul(buf+2, 10, &devnum) == 0 &&
5597 devnum <= MINORMASK)
5598 return md_alloc(MKDEV(MD_MAJOR, devnum), NULL);
5599
5600 return -EINVAL;
5601}
5602
5603static void md_safemode_timeout(struct timer_list *t)
5604{
5605 struct mddev *mddev = from_timer(mddev, t, safemode_timer);
5606
5607 mddev->safemode = 1;
5608 if (mddev->external)
5609 sysfs_notify_dirent_safe(mddev->sysfs_state);
5610
5611 md_wakeup_thread(mddev->thread);
5612}
5613
5614static int start_dirty_degraded;
5615
5616int md_run(struct mddev *mddev)
5617{
5618 int err;
5619 struct md_rdev *rdev;
5620 struct md_personality *pers;
5621
5622 if (list_empty(&mddev->disks))
5623 /* cannot run an array with no devices.. */
5624 return -EINVAL;
5625
5626 if (mddev->pers)
5627 return -EBUSY;
5628 /* Cannot run until previous stop completes properly */
5629 if (mddev->sysfs_active)
5630 return -EBUSY;
5631
5632 /*
5633 * Analyze all RAID superblock(s)
5634 */
5635 if (!mddev->raid_disks) {
5636 if (!mddev->persistent)
5637 return -EINVAL;
Olivier Deprez0e641232021-09-23 10:07:05 +02005638 err = analyze_sbs(mddev);
5639 if (err)
5640 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005641 }
5642
5643 if (mddev->level != LEVEL_NONE)
5644 request_module("md-level-%d", mddev->level);
5645 else if (mddev->clevel[0])
5646 request_module("md-%s", mddev->clevel);
5647
5648 /*
5649 * Drop all container device buffers, from now on
5650 * the only valid external interface is through the md
5651 * device.
5652 */
5653 mddev->has_superblocks = false;
5654 rdev_for_each(rdev, mddev) {
5655 if (test_bit(Faulty, &rdev->flags))
5656 continue;
5657 sync_blockdev(rdev->bdev);
5658 invalidate_bdev(rdev->bdev);
5659 if (mddev->ro != 1 &&
5660 (bdev_read_only(rdev->bdev) ||
5661 bdev_read_only(rdev->meta_bdev))) {
5662 mddev->ro = 1;
5663 if (mddev->gendisk)
5664 set_disk_ro(mddev->gendisk, 1);
5665 }
5666
5667 if (rdev->sb_page)
5668 mddev->has_superblocks = true;
5669
5670 /* perform some consistency tests on the device.
5671 * We don't want the data to overlap the metadata,
5672 * Internal Bitmap issues have been handled elsewhere.
5673 */
5674 if (rdev->meta_bdev) {
5675 /* Nothing to check */;
5676 } else if (rdev->data_offset < rdev->sb_start) {
5677 if (mddev->dev_sectors &&
5678 rdev->data_offset + mddev->dev_sectors
5679 > rdev->sb_start) {
5680 pr_warn("md: %s: data overlaps metadata\n",
5681 mdname(mddev));
5682 return -EINVAL;
5683 }
5684 } else {
5685 if (rdev->sb_start + rdev->sb_size/512
5686 > rdev->data_offset) {
5687 pr_warn("md: %s: metadata overlaps data\n",
5688 mdname(mddev));
5689 return -EINVAL;
5690 }
5691 }
5692 sysfs_notify_dirent_safe(rdev->sysfs_state);
5693 }
5694
5695 if (!bioset_initialized(&mddev->bio_set)) {
5696 err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5697 if (err)
5698 return err;
5699 }
5700 if (!bioset_initialized(&mddev->sync_set)) {
5701 err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5702 if (err)
5703 return err;
5704 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005705
5706 spin_lock(&pers_lock);
5707 pers = find_pers(mddev->level, mddev->clevel);
5708 if (!pers || !try_module_get(pers->owner)) {
5709 spin_unlock(&pers_lock);
5710 if (mddev->level != LEVEL_NONE)
5711 pr_warn("md: personality for level %d is not loaded!\n",
5712 mddev->level);
5713 else
5714 pr_warn("md: personality for level %s is not loaded!\n",
5715 mddev->clevel);
5716 err = -EINVAL;
5717 goto abort;
5718 }
5719 spin_unlock(&pers_lock);
5720 if (mddev->level != pers->level) {
5721 mddev->level = pers->level;
5722 mddev->new_level = pers->level;
5723 }
5724 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
5725
5726 if (mddev->reshape_position != MaxSector &&
5727 pers->start_reshape == NULL) {
5728 /* This personality cannot handle reshaping... */
5729 module_put(pers->owner);
5730 err = -EINVAL;
5731 goto abort;
5732 }
5733
5734 if (pers->sync_request) {
5735 /* Warn if this is a potentially silly
5736 * configuration.
5737 */
5738 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5739 struct md_rdev *rdev2;
5740 int warned = 0;
5741
5742 rdev_for_each(rdev, mddev)
5743 rdev_for_each(rdev2, mddev) {
5744 if (rdev < rdev2 &&
5745 rdev->bdev->bd_contains ==
5746 rdev2->bdev->bd_contains) {
5747 pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
5748 mdname(mddev),
5749 bdevname(rdev->bdev,b),
5750 bdevname(rdev2->bdev,b2));
5751 warned = 1;
5752 }
5753 }
5754
5755 if (warned)
5756 pr_warn("True protection against single-disk failure might be compromised.\n");
5757 }
5758
5759 mddev->recovery = 0;
5760 /* may be over-ridden by personality */
5761 mddev->resync_max_sectors = mddev->dev_sectors;
5762
5763 mddev->ok_start_degraded = start_dirty_degraded;
5764
5765 if (start_readonly && mddev->ro == 0)
5766 mddev->ro = 2; /* read-only, but switch on first write */
5767
5768 err = pers->run(mddev);
5769 if (err)
5770 pr_warn("md: pers->run() failed ...\n");
5771 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
5772 WARN_ONCE(!mddev->external_size,
5773 "%s: default size too small, but 'external_size' not in effect?\n",
5774 __func__);
5775 pr_warn("md: invalid array_size %llu > default size %llu\n",
5776 (unsigned long long)mddev->array_sectors / 2,
5777 (unsigned long long)pers->size(mddev, 0, 0) / 2);
5778 err = -EINVAL;
5779 }
5780 if (err == 0 && pers->sync_request &&
5781 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
5782 struct bitmap *bitmap;
5783
5784 bitmap = md_bitmap_create(mddev, -1);
5785 if (IS_ERR(bitmap)) {
5786 err = PTR_ERR(bitmap);
5787 pr_warn("%s: failed to create bitmap (%d)\n",
5788 mdname(mddev), err);
5789 } else
5790 mddev->bitmap = bitmap;
5791
5792 }
David Brazdil0f672f62019-12-10 10:32:29 +00005793 if (err)
5794 goto bitmap_abort;
5795
5796 if (mddev->bitmap_info.max_write_behind > 0) {
5797 bool creat_pool = false;
5798
5799 rdev_for_each(rdev, mddev) {
5800 if (test_bit(WriteMostly, &rdev->flags) &&
5801 rdev_init_wb(rdev))
5802 creat_pool = true;
5803 }
5804 if (creat_pool && mddev->wb_info_pool == NULL) {
5805 mddev->wb_info_pool =
5806 mempool_create_kmalloc_pool(NR_WB_INFOS,
5807 sizeof(struct wb_info));
5808 if (!mddev->wb_info_pool) {
5809 err = -ENOMEM;
5810 goto bitmap_abort;
5811 }
5812 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005813 }
David Brazdil0f672f62019-12-10 10:32:29 +00005814
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005815 if (mddev->queue) {
5816 bool nonrot = true;
5817
5818 rdev_for_each(rdev, mddev) {
5819 if (rdev->raid_disk >= 0 &&
5820 !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
5821 nonrot = false;
5822 break;
5823 }
5824 }
5825 if (mddev->degraded)
5826 nonrot = false;
5827 if (nonrot)
5828 blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
5829 else
5830 blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
5831 mddev->queue->backing_dev_info->congested_data = mddev;
5832 mddev->queue->backing_dev_info->congested_fn = md_congested;
5833 }
5834 if (pers->sync_request) {
5835 if (mddev->kobj.sd &&
5836 sysfs_create_group(&mddev->kobj, &md_redundancy_group))
5837 pr_warn("md: cannot register extra attributes for %s\n",
5838 mdname(mddev));
5839 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
5840 } else if (mddev->ro == 2) /* auto-readonly not meaningful */
5841 mddev->ro = 0;
5842
5843 atomic_set(&mddev->max_corr_read_errors,
5844 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
5845 mddev->safemode = 0;
5846 if (mddev_is_clustered(mddev))
5847 mddev->safemode_delay = 0;
5848 else
5849 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
5850 mddev->in_sync = 1;
5851 smp_wmb();
5852 spin_lock(&mddev->lock);
5853 mddev->pers = pers;
5854 spin_unlock(&mddev->lock);
5855 rdev_for_each(rdev, mddev)
5856 if (rdev->raid_disk >= 0)
David Brazdil0f672f62019-12-10 10:32:29 +00005857 sysfs_link_rdev(mddev, rdev); /* failure here is OK */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005858
5859 if (mddev->degraded && !mddev->ro)
5860 /* This ensures that recovering status is reported immediately
5861 * via sysfs - until a lack of spares is confirmed.
5862 */
5863 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5864 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5865
5866 if (mddev->sb_flags)
5867 md_update_sb(mddev, 0);
5868
5869 md_new_event(mddev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005870 return 0;
5871
David Brazdil0f672f62019-12-10 10:32:29 +00005872bitmap_abort:
5873 mddev_detach(mddev);
5874 if (mddev->private)
5875 pers->free(mddev, mddev->private);
5876 mddev->private = NULL;
5877 module_put(pers->owner);
5878 md_bitmap_destroy(mddev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005879abort:
David Brazdil0f672f62019-12-10 10:32:29 +00005880 bioset_exit(&mddev->bio_set);
5881 bioset_exit(&mddev->sync_set);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005882 return err;
5883}
5884EXPORT_SYMBOL_GPL(md_run);
5885
5886static int do_md_run(struct mddev *mddev)
5887{
5888 int err;
5889
David Brazdil0f672f62019-12-10 10:32:29 +00005890 set_bit(MD_NOT_READY, &mddev->flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005891 err = md_run(mddev);
5892 if (err)
5893 goto out;
5894 err = md_bitmap_load(mddev);
5895 if (err) {
5896 md_bitmap_destroy(mddev);
5897 goto out;
5898 }
5899
5900 if (mddev_is_clustered(mddev))
5901 md_allow_write(mddev);
5902
5903 /* run start up tasks that require md_thread */
5904 md_start(mddev);
5905
5906 md_wakeup_thread(mddev->thread);
5907 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
5908
5909 set_capacity(mddev->gendisk, mddev->array_sectors);
5910 revalidate_disk(mddev->gendisk);
David Brazdil0f672f62019-12-10 10:32:29 +00005911 clear_bit(MD_NOT_READY, &mddev->flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005912 mddev->changed = 1;
5913 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
David Brazdil0f672f62019-12-10 10:32:29 +00005914 sysfs_notify_dirent_safe(mddev->sysfs_state);
5915 sysfs_notify_dirent_safe(mddev->sysfs_action);
5916 sysfs_notify(&mddev->kobj, NULL, "degraded");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005917out:
David Brazdil0f672f62019-12-10 10:32:29 +00005918 clear_bit(MD_NOT_READY, &mddev->flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005919 return err;
5920}
5921
5922int md_start(struct mddev *mddev)
5923{
5924 int ret = 0;
5925
5926 if (mddev->pers->start) {
5927 set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
5928 md_wakeup_thread(mddev->thread);
5929 ret = mddev->pers->start(mddev);
5930 clear_bit(MD_RECOVERY_WAIT, &mddev->recovery);
5931 md_wakeup_thread(mddev->sync_thread);
5932 }
5933 return ret;
5934}
5935EXPORT_SYMBOL_GPL(md_start);
5936
5937static int restart_array(struct mddev *mddev)
5938{
5939 struct gendisk *disk = mddev->gendisk;
5940 struct md_rdev *rdev;
5941 bool has_journal = false;
5942 bool has_readonly = false;
5943
5944 /* Complain if it has no devices */
5945 if (list_empty(&mddev->disks))
5946 return -ENXIO;
5947 if (!mddev->pers)
5948 return -EINVAL;
5949 if (!mddev->ro)
5950 return -EBUSY;
5951
5952 rcu_read_lock();
5953 rdev_for_each_rcu(rdev, mddev) {
5954 if (test_bit(Journal, &rdev->flags) &&
5955 !test_bit(Faulty, &rdev->flags))
5956 has_journal = true;
5957 if (bdev_read_only(rdev->bdev))
5958 has_readonly = true;
5959 }
5960 rcu_read_unlock();
5961 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal)
5962 /* Don't restart rw with journal missing/faulty */
5963 return -EINVAL;
5964 if (has_readonly)
5965 return -EROFS;
5966
5967 mddev->safemode = 0;
5968 mddev->ro = 0;
5969 set_disk_ro(disk, 0);
5970 pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
5971 /* Kick recovery or resync if necessary */
5972 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5973 md_wakeup_thread(mddev->thread);
5974 md_wakeup_thread(mddev->sync_thread);
5975 sysfs_notify_dirent_safe(mddev->sysfs_state);
5976 return 0;
5977}
5978
5979static void md_clean(struct mddev *mddev)
5980{
5981 mddev->array_sectors = 0;
5982 mddev->external_size = 0;
5983 mddev->dev_sectors = 0;
5984 mddev->raid_disks = 0;
5985 mddev->recovery_cp = 0;
5986 mddev->resync_min = 0;
5987 mddev->resync_max = MaxSector;
5988 mddev->reshape_position = MaxSector;
5989 mddev->external = 0;
5990 mddev->persistent = 0;
5991 mddev->level = LEVEL_NONE;
5992 mddev->clevel[0] = 0;
5993 mddev->flags = 0;
5994 mddev->sb_flags = 0;
5995 mddev->ro = 0;
5996 mddev->metadata_type[0] = 0;
5997 mddev->chunk_sectors = 0;
5998 mddev->ctime = mddev->utime = 0;
5999 mddev->layout = 0;
6000 mddev->max_disks = 0;
6001 mddev->events = 0;
6002 mddev->can_decrease_events = 0;
6003 mddev->delta_disks = 0;
6004 mddev->reshape_backwards = 0;
6005 mddev->new_level = LEVEL_NONE;
6006 mddev->new_layout = 0;
6007 mddev->new_chunk_sectors = 0;
6008 mddev->curr_resync = 0;
6009 atomic64_set(&mddev->resync_mismatches, 0);
6010 mddev->suspend_lo = mddev->suspend_hi = 0;
6011 mddev->sync_speed_min = mddev->sync_speed_max = 0;
6012 mddev->recovery = 0;
6013 mddev->in_sync = 0;
6014 mddev->changed = 0;
6015 mddev->degraded = 0;
6016 mddev->safemode = 0;
6017 mddev->private = NULL;
6018 mddev->cluster_info = NULL;
6019 mddev->bitmap_info.offset = 0;
6020 mddev->bitmap_info.default_offset = 0;
6021 mddev->bitmap_info.default_space = 0;
6022 mddev->bitmap_info.chunksize = 0;
6023 mddev->bitmap_info.daemon_sleep = 0;
6024 mddev->bitmap_info.max_write_behind = 0;
6025 mddev->bitmap_info.nodes = 0;
6026}
6027
6028static void __md_stop_writes(struct mddev *mddev)
6029{
6030 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6031 flush_workqueue(md_misc_wq);
6032 if (mddev->sync_thread) {
6033 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6034 md_reap_sync_thread(mddev);
6035 }
6036
6037 del_timer_sync(&mddev->safemode_timer);
6038
6039 if (mddev->pers && mddev->pers->quiesce) {
6040 mddev->pers->quiesce(mddev, 1);
6041 mddev->pers->quiesce(mddev, 0);
6042 }
6043 md_bitmap_flush(mddev);
6044
6045 if (mddev->ro == 0 &&
6046 ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
6047 mddev->sb_flags)) {
6048 /* mark array as shutdown cleanly */
6049 if (!mddev_is_clustered(mddev))
6050 mddev->in_sync = 1;
6051 md_update_sb(mddev, 1);
6052 }
David Brazdil0f672f62019-12-10 10:32:29 +00006053 mempool_destroy(mddev->wb_info_pool);
6054 mddev->wb_info_pool = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006055}
6056
6057void md_stop_writes(struct mddev *mddev)
6058{
6059 mddev_lock_nointr(mddev);
6060 __md_stop_writes(mddev);
6061 mddev_unlock(mddev);
6062}
6063EXPORT_SYMBOL_GPL(md_stop_writes);
6064
6065static void mddev_detach(struct mddev *mddev)
6066{
6067 md_bitmap_wait_behind_writes(mddev);
Olivier Deprez0e641232021-09-23 10:07:05 +02006068 if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006069 mddev->pers->quiesce(mddev, 1);
6070 mddev->pers->quiesce(mddev, 0);
6071 }
6072 md_unregister_thread(&mddev->thread);
6073 if (mddev->queue)
6074 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
6075}
6076
6077static void __md_stop(struct mddev *mddev)
6078{
6079 struct md_personality *pers = mddev->pers;
6080 md_bitmap_destroy(mddev);
6081 mddev_detach(mddev);
6082 /* Ensure ->event_work is done */
6083 flush_workqueue(md_misc_wq);
6084 spin_lock(&mddev->lock);
6085 mddev->pers = NULL;
6086 spin_unlock(&mddev->lock);
6087 pers->free(mddev, mddev->private);
6088 mddev->private = NULL;
6089 if (pers->sync_request && mddev->to_remove == NULL)
6090 mddev->to_remove = &md_redundancy_group;
6091 module_put(pers->owner);
6092 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006093}
6094
6095void md_stop(struct mddev *mddev)
6096{
6097 /* stop the array and free an attached data structures.
6098 * This is called from dm-raid
6099 */
6100 __md_stop(mddev);
6101 bioset_exit(&mddev->bio_set);
6102 bioset_exit(&mddev->sync_set);
6103}
6104
6105EXPORT_SYMBOL_GPL(md_stop);
6106
6107static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
6108{
6109 int err = 0;
6110 int did_freeze = 0;
6111
6112 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6113 did_freeze = 1;
6114 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6115 md_wakeup_thread(mddev->thread);
6116 }
6117 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
6118 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6119 if (mddev->sync_thread)
6120 /* Thread might be blocked waiting for metadata update
6121 * which will now never happen */
6122 wake_up_process(mddev->sync_thread->tsk);
6123
6124 if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
6125 return -EBUSY;
6126 mddev_unlock(mddev);
6127 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
6128 &mddev->recovery));
6129 wait_event(mddev->sb_wait,
6130 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
6131 mddev_lock_nointr(mddev);
6132
6133 mutex_lock(&mddev->open_mutex);
6134 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
6135 mddev->sync_thread ||
6136 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
6137 pr_warn("md: %s still in use.\n",mdname(mddev));
6138 if (did_freeze) {
6139 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6140 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6141 md_wakeup_thread(mddev->thread);
6142 }
6143 err = -EBUSY;
6144 goto out;
6145 }
6146 if (mddev->pers) {
6147 __md_stop_writes(mddev);
6148
6149 err = -ENXIO;
6150 if (mddev->ro==1)
6151 goto out;
6152 mddev->ro = 1;
6153 set_disk_ro(mddev->gendisk, 1);
6154 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6155 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6156 md_wakeup_thread(mddev->thread);
6157 sysfs_notify_dirent_safe(mddev->sysfs_state);
6158 err = 0;
6159 }
6160out:
6161 mutex_unlock(&mddev->open_mutex);
6162 return err;
6163}
6164
6165/* mode:
6166 * 0 - completely stop and dis-assemble array
6167 * 2 - stop but do not disassemble array
6168 */
6169static int do_md_stop(struct mddev *mddev, int mode,
6170 struct block_device *bdev)
6171{
6172 struct gendisk *disk = mddev->gendisk;
6173 struct md_rdev *rdev;
6174 int did_freeze = 0;
6175
6176 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6177 did_freeze = 1;
6178 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6179 md_wakeup_thread(mddev->thread);
6180 }
6181 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
6182 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6183 if (mddev->sync_thread)
6184 /* Thread might be blocked waiting for metadata update
6185 * which will now never happen */
6186 wake_up_process(mddev->sync_thread->tsk);
6187
6188 mddev_unlock(mddev);
6189 wait_event(resync_wait, (mddev->sync_thread == NULL &&
6190 !test_bit(MD_RECOVERY_RUNNING,
6191 &mddev->recovery)));
6192 mddev_lock_nointr(mddev);
6193
6194 mutex_lock(&mddev->open_mutex);
6195 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
6196 mddev->sysfs_active ||
6197 mddev->sync_thread ||
6198 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
6199 pr_warn("md: %s still in use.\n",mdname(mddev));
6200 mutex_unlock(&mddev->open_mutex);
6201 if (did_freeze) {
6202 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6203 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6204 md_wakeup_thread(mddev->thread);
6205 }
6206 return -EBUSY;
6207 }
6208 if (mddev->pers) {
6209 if (mddev->ro)
6210 set_disk_ro(disk, 0);
6211
6212 __md_stop_writes(mddev);
6213 __md_stop(mddev);
6214 mddev->queue->backing_dev_info->congested_fn = NULL;
6215
6216 /* tell userspace to handle 'inactive' */
6217 sysfs_notify_dirent_safe(mddev->sysfs_state);
6218
6219 rdev_for_each(rdev, mddev)
6220 if (rdev->raid_disk >= 0)
6221 sysfs_unlink_rdev(mddev, rdev);
6222
6223 set_capacity(disk, 0);
6224 mutex_unlock(&mddev->open_mutex);
6225 mddev->changed = 1;
6226 revalidate_disk(disk);
6227
6228 if (mddev->ro)
6229 mddev->ro = 0;
6230 } else
6231 mutex_unlock(&mddev->open_mutex);
6232 /*
6233 * Free resources if final stop
6234 */
6235 if (mode == 0) {
6236 pr_info("md: %s stopped.\n", mdname(mddev));
6237
6238 if (mddev->bitmap_info.file) {
6239 struct file *f = mddev->bitmap_info.file;
6240 spin_lock(&mddev->lock);
6241 mddev->bitmap_info.file = NULL;
6242 spin_unlock(&mddev->lock);
6243 fput(f);
6244 }
6245 mddev->bitmap_info.offset = 0;
6246
6247 export_array(mddev);
6248
6249 md_clean(mddev);
6250 if (mddev->hold_active == UNTIL_STOP)
6251 mddev->hold_active = 0;
6252 }
6253 md_new_event(mddev);
6254 sysfs_notify_dirent_safe(mddev->sysfs_state);
6255 return 0;
6256}
6257
6258#ifndef MODULE
6259static void autorun_array(struct mddev *mddev)
6260{
6261 struct md_rdev *rdev;
6262 int err;
6263
6264 if (list_empty(&mddev->disks))
6265 return;
6266
6267 pr_info("md: running: ");
6268
6269 rdev_for_each(rdev, mddev) {
6270 char b[BDEVNAME_SIZE];
6271 pr_cont("<%s>", bdevname(rdev->bdev,b));
6272 }
6273 pr_cont("\n");
6274
6275 err = do_md_run(mddev);
6276 if (err) {
6277 pr_warn("md: do_md_run() returned %d\n", err);
6278 do_md_stop(mddev, 0, NULL);
6279 }
6280}
6281
6282/*
6283 * lets try to run arrays based on all disks that have arrived
6284 * until now. (those are in pending_raid_disks)
6285 *
6286 * the method: pick the first pending disk, collect all disks with
6287 * the same UUID, remove all from the pending list and put them into
6288 * the 'same_array' list. Then order this list based on superblock
6289 * update time (freshest comes first), kick out 'old' disks and
6290 * compare superblocks. If everything's fine then run it.
6291 *
6292 * If "unit" is allocated, then bump its reference count
6293 */
6294static void autorun_devices(int part)
6295{
6296 struct md_rdev *rdev0, *rdev, *tmp;
6297 struct mddev *mddev;
6298 char b[BDEVNAME_SIZE];
6299
6300 pr_info("md: autorun ...\n");
6301 while (!list_empty(&pending_raid_disks)) {
6302 int unit;
6303 dev_t dev;
6304 LIST_HEAD(candidates);
6305 rdev0 = list_entry(pending_raid_disks.next,
6306 struct md_rdev, same_set);
6307
6308 pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
6309 INIT_LIST_HEAD(&candidates);
6310 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
6311 if (super_90_load(rdev, rdev0, 0) >= 0) {
6312 pr_debug("md: adding %s ...\n",
6313 bdevname(rdev->bdev,b));
6314 list_move(&rdev->same_set, &candidates);
6315 }
6316 /*
6317 * now we have a set of devices, with all of them having
6318 * mostly sane superblocks. It's time to allocate the
6319 * mddev.
6320 */
6321 if (part) {
6322 dev = MKDEV(mdp_major,
6323 rdev0->preferred_minor << MdpMinorShift);
6324 unit = MINOR(dev) >> MdpMinorShift;
6325 } else {
6326 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
6327 unit = MINOR(dev);
6328 }
6329 if (rdev0->preferred_minor != unit) {
6330 pr_warn("md: unit number in %s is bad: %d\n",
6331 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
6332 break;
6333 }
6334
6335 md_probe(dev, NULL, NULL);
6336 mddev = mddev_find(dev);
Olivier Deprez0e641232021-09-23 10:07:05 +02006337 if (!mddev)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006338 break;
Olivier Deprez0e641232021-09-23 10:07:05 +02006339
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006340 if (mddev_lock(mddev))
6341 pr_warn("md: %s locked, cannot run\n", mdname(mddev));
6342 else if (mddev->raid_disks || mddev->major_version
6343 || !list_empty(&mddev->disks)) {
6344 pr_warn("md: %s already running, cannot run %s\n",
6345 mdname(mddev), bdevname(rdev0->bdev,b));
6346 mddev_unlock(mddev);
6347 } else {
6348 pr_debug("md: created %s\n", mdname(mddev));
6349 mddev->persistent = 1;
6350 rdev_for_each_list(rdev, tmp, &candidates) {
6351 list_del_init(&rdev->same_set);
6352 if (bind_rdev_to_array(rdev, mddev))
6353 export_rdev(rdev);
6354 }
6355 autorun_array(mddev);
6356 mddev_unlock(mddev);
6357 }
6358 /* on success, candidates will be empty, on error
6359 * it won't...
6360 */
6361 rdev_for_each_list(rdev, tmp, &candidates) {
6362 list_del_init(&rdev->same_set);
6363 export_rdev(rdev);
6364 }
6365 mddev_put(mddev);
6366 }
6367 pr_info("md: ... autorun DONE.\n");
6368}
6369#endif /* !MODULE */
6370
6371static int get_version(void __user *arg)
6372{
6373 mdu_version_t ver;
6374
6375 ver.major = MD_MAJOR_VERSION;
6376 ver.minor = MD_MINOR_VERSION;
6377 ver.patchlevel = MD_PATCHLEVEL_VERSION;
6378
6379 if (copy_to_user(arg, &ver, sizeof(ver)))
6380 return -EFAULT;
6381
6382 return 0;
6383}
6384
6385static int get_array_info(struct mddev *mddev, void __user *arg)
6386{
6387 mdu_array_info_t info;
6388 int nr,working,insync,failed,spare;
6389 struct md_rdev *rdev;
6390
6391 nr = working = insync = failed = spare = 0;
6392 rcu_read_lock();
6393 rdev_for_each_rcu(rdev, mddev) {
6394 nr++;
6395 if (test_bit(Faulty, &rdev->flags))
6396 failed++;
6397 else {
6398 working++;
6399 if (test_bit(In_sync, &rdev->flags))
6400 insync++;
6401 else if (test_bit(Journal, &rdev->flags))
6402 /* TODO: add journal count to md_u.h */
6403 ;
6404 else
6405 spare++;
6406 }
6407 }
6408 rcu_read_unlock();
6409
6410 info.major_version = mddev->major_version;
6411 info.minor_version = mddev->minor_version;
6412 info.patch_version = MD_PATCHLEVEL_VERSION;
6413 info.ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
6414 info.level = mddev->level;
6415 info.size = mddev->dev_sectors / 2;
6416 if (info.size != mddev->dev_sectors / 2) /* overflow */
6417 info.size = -1;
6418 info.nr_disks = nr;
6419 info.raid_disks = mddev->raid_disks;
6420 info.md_minor = mddev->md_minor;
6421 info.not_persistent= !mddev->persistent;
6422
6423 info.utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
6424 info.state = 0;
6425 if (mddev->in_sync)
6426 info.state = (1<<MD_SB_CLEAN);
6427 if (mddev->bitmap && mddev->bitmap_info.offset)
6428 info.state |= (1<<MD_SB_BITMAP_PRESENT);
6429 if (mddev_is_clustered(mddev))
6430 info.state |= (1<<MD_SB_CLUSTERED);
6431 info.active_disks = insync;
6432 info.working_disks = working;
6433 info.failed_disks = failed;
6434 info.spare_disks = spare;
6435
6436 info.layout = mddev->layout;
6437 info.chunk_size = mddev->chunk_sectors << 9;
6438
6439 if (copy_to_user(arg, &info, sizeof(info)))
6440 return -EFAULT;
6441
6442 return 0;
6443}
6444
6445static int get_bitmap_file(struct mddev *mddev, void __user * arg)
6446{
6447 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
6448 char *ptr;
6449 int err;
6450
6451 file = kzalloc(sizeof(*file), GFP_NOIO);
6452 if (!file)
6453 return -ENOMEM;
6454
6455 err = 0;
6456 spin_lock(&mddev->lock);
6457 /* bitmap enabled */
6458 if (mddev->bitmap_info.file) {
6459 ptr = file_path(mddev->bitmap_info.file, file->pathname,
6460 sizeof(file->pathname));
6461 if (IS_ERR(ptr))
6462 err = PTR_ERR(ptr);
6463 else
6464 memmove(file->pathname, ptr,
6465 sizeof(file->pathname)-(ptr-file->pathname));
6466 }
6467 spin_unlock(&mddev->lock);
6468
6469 if (err == 0 &&
6470 copy_to_user(arg, file, sizeof(*file)))
6471 err = -EFAULT;
6472
6473 kfree(file);
6474 return err;
6475}
6476
6477static int get_disk_info(struct mddev *mddev, void __user * arg)
6478{
6479 mdu_disk_info_t info;
6480 struct md_rdev *rdev;
6481
6482 if (copy_from_user(&info, arg, sizeof(info)))
6483 return -EFAULT;
6484
6485 rcu_read_lock();
6486 rdev = md_find_rdev_nr_rcu(mddev, info.number);
6487 if (rdev) {
6488 info.major = MAJOR(rdev->bdev->bd_dev);
6489 info.minor = MINOR(rdev->bdev->bd_dev);
6490 info.raid_disk = rdev->raid_disk;
6491 info.state = 0;
6492 if (test_bit(Faulty, &rdev->flags))
6493 info.state |= (1<<MD_DISK_FAULTY);
6494 else if (test_bit(In_sync, &rdev->flags)) {
6495 info.state |= (1<<MD_DISK_ACTIVE);
6496 info.state |= (1<<MD_DISK_SYNC);
6497 }
6498 if (test_bit(Journal, &rdev->flags))
6499 info.state |= (1<<MD_DISK_JOURNAL);
6500 if (test_bit(WriteMostly, &rdev->flags))
6501 info.state |= (1<<MD_DISK_WRITEMOSTLY);
6502 if (test_bit(FailFast, &rdev->flags))
6503 info.state |= (1<<MD_DISK_FAILFAST);
6504 } else {
6505 info.major = info.minor = 0;
6506 info.raid_disk = -1;
6507 info.state = (1<<MD_DISK_REMOVED);
6508 }
6509 rcu_read_unlock();
6510
6511 if (copy_to_user(arg, &info, sizeof(info)))
6512 return -EFAULT;
6513
6514 return 0;
6515}
6516
6517static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
6518{
6519 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
6520 struct md_rdev *rdev;
6521 dev_t dev = MKDEV(info->major,info->minor);
6522
6523 if (mddev_is_clustered(mddev) &&
6524 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
6525 pr_warn("%s: Cannot add to clustered mddev.\n",
6526 mdname(mddev));
6527 return -EINVAL;
6528 }
6529
6530 if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
6531 return -EOVERFLOW;
6532
6533 if (!mddev->raid_disks) {
6534 int err;
6535 /* expecting a device which has a superblock */
6536 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
6537 if (IS_ERR(rdev)) {
6538 pr_warn("md: md_import_device returned %ld\n",
6539 PTR_ERR(rdev));
6540 return PTR_ERR(rdev);
6541 }
6542 if (!list_empty(&mddev->disks)) {
6543 struct md_rdev *rdev0
6544 = list_entry(mddev->disks.next,
6545 struct md_rdev, same_set);
6546 err = super_types[mddev->major_version]
6547 .load_super(rdev, rdev0, mddev->minor_version);
6548 if (err < 0) {
6549 pr_warn("md: %s has different UUID to %s\n",
6550 bdevname(rdev->bdev,b),
6551 bdevname(rdev0->bdev,b2));
6552 export_rdev(rdev);
6553 return -EINVAL;
6554 }
6555 }
6556 err = bind_rdev_to_array(rdev, mddev);
6557 if (err)
6558 export_rdev(rdev);
6559 return err;
6560 }
6561
6562 /*
6563 * add_new_disk can be used once the array is assembled
6564 * to add "hot spares". They must already have a superblock
6565 * written
6566 */
6567 if (mddev->pers) {
6568 int err;
6569 if (!mddev->pers->hot_add_disk) {
6570 pr_warn("%s: personality does not support diskops!\n",
6571 mdname(mddev));
6572 return -EINVAL;
6573 }
6574 if (mddev->persistent)
6575 rdev = md_import_device(dev, mddev->major_version,
6576 mddev->minor_version);
6577 else
6578 rdev = md_import_device(dev, -1, -1);
6579 if (IS_ERR(rdev)) {
6580 pr_warn("md: md_import_device returned %ld\n",
6581 PTR_ERR(rdev));
6582 return PTR_ERR(rdev);
6583 }
6584 /* set saved_raid_disk if appropriate */
6585 if (!mddev->persistent) {
6586 if (info->state & (1<<MD_DISK_SYNC) &&
6587 info->raid_disk < mddev->raid_disks) {
6588 rdev->raid_disk = info->raid_disk;
6589 set_bit(In_sync, &rdev->flags);
6590 clear_bit(Bitmap_sync, &rdev->flags);
6591 } else
6592 rdev->raid_disk = -1;
6593 rdev->saved_raid_disk = rdev->raid_disk;
6594 } else
6595 super_types[mddev->major_version].
6596 validate_super(mddev, rdev);
6597 if ((info->state & (1<<MD_DISK_SYNC)) &&
6598 rdev->raid_disk != info->raid_disk) {
6599 /* This was a hot-add request, but events doesn't
6600 * match, so reject it.
6601 */
6602 export_rdev(rdev);
6603 return -EINVAL;
6604 }
6605
6606 clear_bit(In_sync, &rdev->flags); /* just to be sure */
6607 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6608 set_bit(WriteMostly, &rdev->flags);
6609 else
6610 clear_bit(WriteMostly, &rdev->flags);
6611 if (info->state & (1<<MD_DISK_FAILFAST))
6612 set_bit(FailFast, &rdev->flags);
6613 else
6614 clear_bit(FailFast, &rdev->flags);
6615
6616 if (info->state & (1<<MD_DISK_JOURNAL)) {
6617 struct md_rdev *rdev2;
6618 bool has_journal = false;
6619
6620 /* make sure no existing journal disk */
6621 rdev_for_each(rdev2, mddev) {
6622 if (test_bit(Journal, &rdev2->flags)) {
6623 has_journal = true;
6624 break;
6625 }
6626 }
6627 if (has_journal || mddev->bitmap) {
6628 export_rdev(rdev);
6629 return -EBUSY;
6630 }
6631 set_bit(Journal, &rdev->flags);
6632 }
6633 /*
6634 * check whether the device shows up in other nodes
6635 */
6636 if (mddev_is_clustered(mddev)) {
6637 if (info->state & (1 << MD_DISK_CANDIDATE))
6638 set_bit(Candidate, &rdev->flags);
6639 else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
6640 /* --add initiated by this node */
6641 err = md_cluster_ops->add_new_disk(mddev, rdev);
6642 if (err) {
6643 export_rdev(rdev);
6644 return err;
6645 }
6646 }
6647 }
6648
6649 rdev->raid_disk = -1;
6650 err = bind_rdev_to_array(rdev, mddev);
6651
6652 if (err)
6653 export_rdev(rdev);
6654
6655 if (mddev_is_clustered(mddev)) {
6656 if (info->state & (1 << MD_DISK_CANDIDATE)) {
6657 if (!err) {
6658 err = md_cluster_ops->new_disk_ack(mddev,
6659 err == 0);
6660 if (err)
6661 md_kick_rdev_from_array(rdev);
6662 }
6663 } else {
6664 if (err)
6665 md_cluster_ops->add_new_disk_cancel(mddev);
6666 else
6667 err = add_bound_rdev(rdev);
6668 }
6669
6670 } else if (!err)
6671 err = add_bound_rdev(rdev);
6672
6673 return err;
6674 }
6675
6676 /* otherwise, add_new_disk is only allowed
6677 * for major_version==0 superblocks
6678 */
6679 if (mddev->major_version != 0) {
6680 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
6681 return -EINVAL;
6682 }
6683
6684 if (!(info->state & (1<<MD_DISK_FAULTY))) {
6685 int err;
6686 rdev = md_import_device(dev, -1, 0);
6687 if (IS_ERR(rdev)) {
6688 pr_warn("md: error, md_import_device() returned %ld\n",
6689 PTR_ERR(rdev));
6690 return PTR_ERR(rdev);
6691 }
6692 rdev->desc_nr = info->number;
6693 if (info->raid_disk < mddev->raid_disks)
6694 rdev->raid_disk = info->raid_disk;
6695 else
6696 rdev->raid_disk = -1;
6697
6698 if (rdev->raid_disk < mddev->raid_disks)
6699 if (info->state & (1<<MD_DISK_SYNC))
6700 set_bit(In_sync, &rdev->flags);
6701
6702 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6703 set_bit(WriteMostly, &rdev->flags);
6704 if (info->state & (1<<MD_DISK_FAILFAST))
6705 set_bit(FailFast, &rdev->flags);
6706
6707 if (!mddev->persistent) {
6708 pr_debug("md: nonpersistent superblock ...\n");
6709 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6710 } else
6711 rdev->sb_start = calc_dev_sboffset(rdev);
6712 rdev->sectors = rdev->sb_start;
6713
6714 err = bind_rdev_to_array(rdev, mddev);
6715 if (err) {
6716 export_rdev(rdev);
6717 return err;
6718 }
6719 }
6720
6721 return 0;
6722}
6723
6724static int hot_remove_disk(struct mddev *mddev, dev_t dev)
6725{
6726 char b[BDEVNAME_SIZE];
6727 struct md_rdev *rdev;
6728
6729 if (!mddev->pers)
6730 return -ENODEV;
6731
6732 rdev = find_rdev(mddev, dev);
6733 if (!rdev)
6734 return -ENXIO;
6735
6736 if (rdev->raid_disk < 0)
6737 goto kick_rdev;
6738
6739 clear_bit(Blocked, &rdev->flags);
6740 remove_and_add_spares(mddev, rdev);
6741
6742 if (rdev->raid_disk >= 0)
6743 goto busy;
6744
6745kick_rdev:
Olivier Deprez0e641232021-09-23 10:07:05 +02006746 if (mddev_is_clustered(mddev)) {
6747 if (md_cluster_ops->remove_disk(mddev, rdev))
6748 goto busy;
6749 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006750
6751 md_kick_rdev_from_array(rdev);
6752 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6753 if (mddev->thread)
6754 md_wakeup_thread(mddev->thread);
6755 else
6756 md_update_sb(mddev, 1);
6757 md_new_event(mddev);
6758
6759 return 0;
6760busy:
6761 pr_debug("md: cannot remove active disk %s from %s ...\n",
6762 bdevname(rdev->bdev,b), mdname(mddev));
6763 return -EBUSY;
6764}
6765
6766static int hot_add_disk(struct mddev *mddev, dev_t dev)
6767{
6768 char b[BDEVNAME_SIZE];
6769 int err;
6770 struct md_rdev *rdev;
6771
6772 if (!mddev->pers)
6773 return -ENODEV;
6774
6775 if (mddev->major_version != 0) {
6776 pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
6777 mdname(mddev));
6778 return -EINVAL;
6779 }
6780 if (!mddev->pers->hot_add_disk) {
6781 pr_warn("%s: personality does not support diskops!\n",
6782 mdname(mddev));
6783 return -EINVAL;
6784 }
6785
6786 rdev = md_import_device(dev, -1, 0);
6787 if (IS_ERR(rdev)) {
6788 pr_warn("md: error, md_import_device() returned %ld\n",
6789 PTR_ERR(rdev));
6790 return -EINVAL;
6791 }
6792
6793 if (mddev->persistent)
6794 rdev->sb_start = calc_dev_sboffset(rdev);
6795 else
6796 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6797
6798 rdev->sectors = rdev->sb_start;
6799
6800 if (test_bit(Faulty, &rdev->flags)) {
6801 pr_warn("md: can not hot-add faulty %s disk to %s!\n",
6802 bdevname(rdev->bdev,b), mdname(mddev));
6803 err = -EINVAL;
6804 goto abort_export;
6805 }
6806
6807 clear_bit(In_sync, &rdev->flags);
6808 rdev->desc_nr = -1;
6809 rdev->saved_raid_disk = -1;
6810 err = bind_rdev_to_array(rdev, mddev);
6811 if (err)
6812 goto abort_export;
6813
6814 /*
6815 * The rest should better be atomic, we can have disk failures
6816 * noticed in interrupt contexts ...
6817 */
6818
6819 rdev->raid_disk = -1;
6820
6821 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6822 if (!mddev->thread)
6823 md_update_sb(mddev, 1);
6824 /*
6825 * Kick recovery, maybe this spare has to be added to the
6826 * array immediately.
6827 */
6828 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6829 md_wakeup_thread(mddev->thread);
6830 md_new_event(mddev);
6831 return 0;
6832
6833abort_export:
6834 export_rdev(rdev);
6835 return err;
6836}
6837
6838static int set_bitmap_file(struct mddev *mddev, int fd)
6839{
6840 int err = 0;
6841
6842 if (mddev->pers) {
6843 if (!mddev->pers->quiesce || !mddev->thread)
6844 return -EBUSY;
6845 if (mddev->recovery || mddev->sync_thread)
6846 return -EBUSY;
6847 /* we should be able to change the bitmap.. */
6848 }
6849
6850 if (fd >= 0) {
6851 struct inode *inode;
6852 struct file *f;
6853
6854 if (mddev->bitmap || mddev->bitmap_info.file)
6855 return -EEXIST; /* cannot add when bitmap is present */
6856 f = fget(fd);
6857
6858 if (f == NULL) {
6859 pr_warn("%s: error: failed to get bitmap file\n",
6860 mdname(mddev));
6861 return -EBADF;
6862 }
6863
6864 inode = f->f_mapping->host;
6865 if (!S_ISREG(inode->i_mode)) {
6866 pr_warn("%s: error: bitmap file must be a regular file\n",
6867 mdname(mddev));
6868 err = -EBADF;
6869 } else if (!(f->f_mode & FMODE_WRITE)) {
6870 pr_warn("%s: error: bitmap file must open for write\n",
6871 mdname(mddev));
6872 err = -EBADF;
6873 } else if (atomic_read(&inode->i_writecount) != 1) {
6874 pr_warn("%s: error: bitmap file is already in use\n",
6875 mdname(mddev));
6876 err = -EBUSY;
6877 }
6878 if (err) {
6879 fput(f);
6880 return err;
6881 }
6882 mddev->bitmap_info.file = f;
6883 mddev->bitmap_info.offset = 0; /* file overrides offset */
6884 } else if (mddev->bitmap == NULL)
6885 return -ENOENT; /* cannot remove what isn't there */
6886 err = 0;
6887 if (mddev->pers) {
6888 if (fd >= 0) {
6889 struct bitmap *bitmap;
6890
6891 bitmap = md_bitmap_create(mddev, -1);
6892 mddev_suspend(mddev);
6893 if (!IS_ERR(bitmap)) {
6894 mddev->bitmap = bitmap;
6895 err = md_bitmap_load(mddev);
6896 } else
6897 err = PTR_ERR(bitmap);
6898 if (err) {
6899 md_bitmap_destroy(mddev);
6900 fd = -1;
6901 }
6902 mddev_resume(mddev);
6903 } else if (fd < 0) {
6904 mddev_suspend(mddev);
6905 md_bitmap_destroy(mddev);
6906 mddev_resume(mddev);
6907 }
6908 }
6909 if (fd < 0) {
6910 struct file *f = mddev->bitmap_info.file;
6911 if (f) {
6912 spin_lock(&mddev->lock);
6913 mddev->bitmap_info.file = NULL;
6914 spin_unlock(&mddev->lock);
6915 fput(f);
6916 }
6917 }
6918
6919 return err;
6920}
6921
6922/*
6923 * set_array_info is used two different ways
6924 * The original usage is when creating a new array.
6925 * In this usage, raid_disks is > 0 and it together with
6926 * level, size, not_persistent,layout,chunksize determine the
6927 * shape of the array.
6928 * This will always create an array with a type-0.90.0 superblock.
6929 * The newer usage is when assembling an array.
6930 * In this case raid_disks will be 0, and the major_version field is
6931 * use to determine which style super-blocks are to be found on the devices.
6932 * The minor and patch _version numbers are also kept incase the
6933 * super_block handler wishes to interpret them.
6934 */
6935static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
6936{
6937
6938 if (info->raid_disks == 0) {
6939 /* just setting version number for superblock loading */
6940 if (info->major_version < 0 ||
6941 info->major_version >= ARRAY_SIZE(super_types) ||
6942 super_types[info->major_version].name == NULL) {
6943 /* maybe try to auto-load a module? */
6944 pr_warn("md: superblock version %d not known\n",
6945 info->major_version);
6946 return -EINVAL;
6947 }
6948 mddev->major_version = info->major_version;
6949 mddev->minor_version = info->minor_version;
6950 mddev->patch_version = info->patch_version;
6951 mddev->persistent = !info->not_persistent;
6952 /* ensure mddev_put doesn't delete this now that there
6953 * is some minimal configuration.
6954 */
6955 mddev->ctime = ktime_get_real_seconds();
6956 return 0;
6957 }
6958 mddev->major_version = MD_MAJOR_VERSION;
6959 mddev->minor_version = MD_MINOR_VERSION;
6960 mddev->patch_version = MD_PATCHLEVEL_VERSION;
6961 mddev->ctime = ktime_get_real_seconds();
6962
6963 mddev->level = info->level;
6964 mddev->clevel[0] = 0;
6965 mddev->dev_sectors = 2 * (sector_t)info->size;
6966 mddev->raid_disks = info->raid_disks;
6967 /* don't set md_minor, it is determined by which /dev/md* was
6968 * openned
6969 */
6970 if (info->state & (1<<MD_SB_CLEAN))
6971 mddev->recovery_cp = MaxSector;
6972 else
6973 mddev->recovery_cp = 0;
6974 mddev->persistent = ! info->not_persistent;
6975 mddev->external = 0;
6976
6977 mddev->layout = info->layout;
David Brazdil0f672f62019-12-10 10:32:29 +00006978 if (mddev->level == 0)
6979 /* Cannot trust RAID0 layout info here */
6980 mddev->layout = -1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006981 mddev->chunk_sectors = info->chunk_size >> 9;
6982
6983 if (mddev->persistent) {
6984 mddev->max_disks = MD_SB_DISKS;
6985 mddev->flags = 0;
6986 mddev->sb_flags = 0;
6987 }
6988 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6989
6990 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
6991 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
6992 mddev->bitmap_info.offset = 0;
6993
6994 mddev->reshape_position = MaxSector;
6995
6996 /*
6997 * Generate a 128 bit UUID
6998 */
6999 get_random_bytes(mddev->uuid, 16);
7000
7001 mddev->new_level = mddev->level;
7002 mddev->new_chunk_sectors = mddev->chunk_sectors;
7003 mddev->new_layout = mddev->layout;
7004 mddev->delta_disks = 0;
7005 mddev->reshape_backwards = 0;
7006
7007 return 0;
7008}
7009
7010void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
7011{
7012 lockdep_assert_held(&mddev->reconfig_mutex);
7013
7014 if (mddev->external_size)
7015 return;
7016
7017 mddev->array_sectors = array_sectors;
7018}
7019EXPORT_SYMBOL(md_set_array_sectors);
7020
7021static int update_size(struct mddev *mddev, sector_t num_sectors)
7022{
7023 struct md_rdev *rdev;
7024 int rv;
7025 int fit = (num_sectors == 0);
7026 sector_t old_dev_sectors = mddev->dev_sectors;
7027
7028 if (mddev->pers->resize == NULL)
7029 return -EINVAL;
7030 /* The "num_sectors" is the number of sectors of each device that
7031 * is used. This can only make sense for arrays with redundancy.
7032 * linear and raid0 always use whatever space is available. We can only
7033 * consider changing this number if no resync or reconstruction is
7034 * happening, and if the new size is acceptable. It must fit before the
7035 * sb_start or, if that is <data_offset, it must fit before the size
7036 * of each device. If num_sectors is zero, we find the largest size
7037 * that fits.
7038 */
7039 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
7040 mddev->sync_thread)
7041 return -EBUSY;
7042 if (mddev->ro)
7043 return -EROFS;
7044
7045 rdev_for_each(rdev, mddev) {
7046 sector_t avail = rdev->sectors;
7047
7048 if (fit && (num_sectors == 0 || num_sectors > avail))
7049 num_sectors = avail;
7050 if (avail < num_sectors)
7051 return -ENOSPC;
7052 }
7053 rv = mddev->pers->resize(mddev, num_sectors);
7054 if (!rv) {
7055 if (mddev_is_clustered(mddev))
7056 md_cluster_ops->update_size(mddev, old_dev_sectors);
7057 else if (mddev->queue) {
7058 set_capacity(mddev->gendisk, mddev->array_sectors);
7059 revalidate_disk(mddev->gendisk);
7060 }
7061 }
7062 return rv;
7063}
7064
7065static int update_raid_disks(struct mddev *mddev, int raid_disks)
7066{
7067 int rv;
7068 struct md_rdev *rdev;
7069 /* change the number of raid disks */
7070 if (mddev->pers->check_reshape == NULL)
7071 return -EINVAL;
7072 if (mddev->ro)
7073 return -EROFS;
7074 if (raid_disks <= 0 ||
7075 (mddev->max_disks && raid_disks >= mddev->max_disks))
7076 return -EINVAL;
7077 if (mddev->sync_thread ||
7078 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
Olivier Deprez0e641232021-09-23 10:07:05 +02007079 test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) ||
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007080 mddev->reshape_position != MaxSector)
7081 return -EBUSY;
7082
7083 rdev_for_each(rdev, mddev) {
7084 if (mddev->raid_disks < raid_disks &&
7085 rdev->data_offset < rdev->new_data_offset)
7086 return -EINVAL;
7087 if (mddev->raid_disks > raid_disks &&
7088 rdev->data_offset > rdev->new_data_offset)
7089 return -EINVAL;
7090 }
7091
7092 mddev->delta_disks = raid_disks - mddev->raid_disks;
7093 if (mddev->delta_disks < 0)
7094 mddev->reshape_backwards = 1;
7095 else if (mddev->delta_disks > 0)
7096 mddev->reshape_backwards = 0;
7097
7098 rv = mddev->pers->check_reshape(mddev);
7099 if (rv < 0) {
7100 mddev->delta_disks = 0;
7101 mddev->reshape_backwards = 0;
7102 }
7103 return rv;
7104}
7105
7106/*
7107 * update_array_info is used to change the configuration of an
7108 * on-line array.
7109 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
7110 * fields in the info are checked against the array.
7111 * Any differences that cannot be handled will cause an error.
7112 * Normally, only one change can be managed at a time.
7113 */
7114static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
7115{
7116 int rv = 0;
7117 int cnt = 0;
7118 int state = 0;
7119
7120 /* calculate expected state,ignoring low bits */
7121 if (mddev->bitmap && mddev->bitmap_info.offset)
7122 state |= (1 << MD_SB_BITMAP_PRESENT);
7123
7124 if (mddev->major_version != info->major_version ||
7125 mddev->minor_version != info->minor_version ||
7126/* mddev->patch_version != info->patch_version || */
7127 mddev->ctime != info->ctime ||
7128 mddev->level != info->level ||
7129/* mddev->layout != info->layout || */
7130 mddev->persistent != !info->not_persistent ||
7131 mddev->chunk_sectors != info->chunk_size >> 9 ||
7132 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
7133 ((state^info->state) & 0xfffffe00)
7134 )
7135 return -EINVAL;
7136 /* Check there is only one change */
7137 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
7138 cnt++;
7139 if (mddev->raid_disks != info->raid_disks)
7140 cnt++;
7141 if (mddev->layout != info->layout)
7142 cnt++;
7143 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
7144 cnt++;
7145 if (cnt == 0)
7146 return 0;
7147 if (cnt > 1)
7148 return -EINVAL;
7149
7150 if (mddev->layout != info->layout) {
7151 /* Change layout
7152 * we don't need to do anything at the md level, the
7153 * personality will take care of it all.
7154 */
7155 if (mddev->pers->check_reshape == NULL)
7156 return -EINVAL;
7157 else {
7158 mddev->new_layout = info->layout;
7159 rv = mddev->pers->check_reshape(mddev);
7160 if (rv)
7161 mddev->new_layout = mddev->layout;
7162 return rv;
7163 }
7164 }
7165 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
7166 rv = update_size(mddev, (sector_t)info->size * 2);
7167
7168 if (mddev->raid_disks != info->raid_disks)
7169 rv = update_raid_disks(mddev, info->raid_disks);
7170
7171 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
7172 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
7173 rv = -EINVAL;
7174 goto err;
7175 }
7176 if (mddev->recovery || mddev->sync_thread) {
7177 rv = -EBUSY;
7178 goto err;
7179 }
7180 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
7181 struct bitmap *bitmap;
7182 /* add the bitmap */
7183 if (mddev->bitmap) {
7184 rv = -EEXIST;
7185 goto err;
7186 }
7187 if (mddev->bitmap_info.default_offset == 0) {
7188 rv = -EINVAL;
7189 goto err;
7190 }
7191 mddev->bitmap_info.offset =
7192 mddev->bitmap_info.default_offset;
7193 mddev->bitmap_info.space =
7194 mddev->bitmap_info.default_space;
7195 bitmap = md_bitmap_create(mddev, -1);
7196 mddev_suspend(mddev);
7197 if (!IS_ERR(bitmap)) {
7198 mddev->bitmap = bitmap;
7199 rv = md_bitmap_load(mddev);
7200 } else
7201 rv = PTR_ERR(bitmap);
7202 if (rv)
7203 md_bitmap_destroy(mddev);
7204 mddev_resume(mddev);
7205 } else {
7206 /* remove the bitmap */
7207 if (!mddev->bitmap) {
7208 rv = -ENOENT;
7209 goto err;
7210 }
7211 if (mddev->bitmap->storage.file) {
7212 rv = -EINVAL;
7213 goto err;
7214 }
7215 if (mddev->bitmap_info.nodes) {
7216 /* hold PW on all the bitmap lock */
7217 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
7218 pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
7219 rv = -EPERM;
7220 md_cluster_ops->unlock_all_bitmaps(mddev);
7221 goto err;
7222 }
7223
7224 mddev->bitmap_info.nodes = 0;
7225 md_cluster_ops->leave(mddev);
7226 }
7227 mddev_suspend(mddev);
7228 md_bitmap_destroy(mddev);
7229 mddev_resume(mddev);
7230 mddev->bitmap_info.offset = 0;
7231 }
7232 }
7233 md_update_sb(mddev, 1);
7234 return rv;
7235err:
7236 return rv;
7237}
7238
7239static int set_disk_faulty(struct mddev *mddev, dev_t dev)
7240{
7241 struct md_rdev *rdev;
7242 int err = 0;
7243
7244 if (mddev->pers == NULL)
7245 return -ENODEV;
7246
7247 rcu_read_lock();
7248 rdev = md_find_rdev_rcu(mddev, dev);
7249 if (!rdev)
7250 err = -ENODEV;
7251 else {
7252 md_error(mddev, rdev);
7253 if (!test_bit(Faulty, &rdev->flags))
7254 err = -EBUSY;
7255 }
7256 rcu_read_unlock();
7257 return err;
7258}
7259
7260/*
7261 * We have a problem here : there is no easy way to give a CHS
7262 * virtual geometry. We currently pretend that we have a 2 heads
7263 * 4 sectors (with a BIG number of cylinders...). This drives
7264 * dosfs just mad... ;-)
7265 */
7266static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
7267{
7268 struct mddev *mddev = bdev->bd_disk->private_data;
7269
7270 geo->heads = 2;
7271 geo->sectors = 4;
7272 geo->cylinders = mddev->array_sectors / 8;
7273 return 0;
7274}
7275
7276static inline bool md_ioctl_valid(unsigned int cmd)
7277{
7278 switch (cmd) {
7279 case ADD_NEW_DISK:
7280 case BLKROSET:
7281 case GET_ARRAY_INFO:
7282 case GET_BITMAP_FILE:
7283 case GET_DISK_INFO:
7284 case HOT_ADD_DISK:
7285 case HOT_REMOVE_DISK:
7286 case RAID_AUTORUN:
7287 case RAID_VERSION:
7288 case RESTART_ARRAY_RW:
7289 case RUN_ARRAY:
7290 case SET_ARRAY_INFO:
7291 case SET_BITMAP_FILE:
7292 case SET_DISK_FAULTY:
7293 case STOP_ARRAY:
7294 case STOP_ARRAY_RO:
7295 case CLUSTERED_DISK_NACK:
7296 return true;
7297 default:
7298 return false;
7299 }
7300}
7301
7302static int md_ioctl(struct block_device *bdev, fmode_t mode,
7303 unsigned int cmd, unsigned long arg)
7304{
7305 int err = 0;
7306 void __user *argp = (void __user *)arg;
7307 struct mddev *mddev = NULL;
7308 int ro;
7309 bool did_set_md_closing = false;
7310
7311 if (!md_ioctl_valid(cmd))
7312 return -ENOTTY;
7313
7314 switch (cmd) {
7315 case RAID_VERSION:
7316 case GET_ARRAY_INFO:
7317 case GET_DISK_INFO:
7318 break;
7319 default:
7320 if (!capable(CAP_SYS_ADMIN))
7321 return -EACCES;
7322 }
7323
7324 /*
7325 * Commands dealing with the RAID driver but not any
7326 * particular array:
7327 */
7328 switch (cmd) {
7329 case RAID_VERSION:
7330 err = get_version(argp);
7331 goto out;
7332
7333#ifndef MODULE
7334 case RAID_AUTORUN:
7335 err = 0;
7336 autostart_arrays(arg);
7337 goto out;
7338#endif
7339 default:;
7340 }
7341
7342 /*
7343 * Commands creating/starting a new array:
7344 */
7345
7346 mddev = bdev->bd_disk->private_data;
7347
7348 if (!mddev) {
7349 BUG();
7350 goto out;
7351 }
7352
7353 /* Some actions do not requires the mutex */
7354 switch (cmd) {
7355 case GET_ARRAY_INFO:
7356 if (!mddev->raid_disks && !mddev->external)
7357 err = -ENODEV;
7358 else
7359 err = get_array_info(mddev, argp);
7360 goto out;
7361
7362 case GET_DISK_INFO:
7363 if (!mddev->raid_disks && !mddev->external)
7364 err = -ENODEV;
7365 else
7366 err = get_disk_info(mddev, argp);
7367 goto out;
7368
7369 case SET_DISK_FAULTY:
7370 err = set_disk_faulty(mddev, new_decode_dev(arg));
7371 goto out;
7372
7373 case GET_BITMAP_FILE:
7374 err = get_bitmap_file(mddev, argp);
7375 goto out;
7376
7377 }
7378
7379 if (cmd == ADD_NEW_DISK)
7380 /* need to ensure md_delayed_delete() has completed */
7381 flush_workqueue(md_misc_wq);
7382
7383 if (cmd == HOT_REMOVE_DISK)
7384 /* need to ensure recovery thread has run */
7385 wait_event_interruptible_timeout(mddev->sb_wait,
7386 !test_bit(MD_RECOVERY_NEEDED,
7387 &mddev->recovery),
7388 msecs_to_jiffies(5000));
7389 if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
7390 /* Need to flush page cache, and ensure no-one else opens
7391 * and writes
7392 */
7393 mutex_lock(&mddev->open_mutex);
7394 if (mddev->pers && atomic_read(&mddev->openers) > 1) {
7395 mutex_unlock(&mddev->open_mutex);
7396 err = -EBUSY;
7397 goto out;
7398 }
Olivier Deprez0e641232021-09-23 10:07:05 +02007399 if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
7400 mutex_unlock(&mddev->open_mutex);
7401 err = -EBUSY;
7402 goto out;
7403 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007404 did_set_md_closing = true;
7405 mutex_unlock(&mddev->open_mutex);
7406 sync_blockdev(bdev);
7407 }
7408 err = mddev_lock(mddev);
7409 if (err) {
7410 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
7411 err, cmd);
7412 goto out;
7413 }
7414
7415 if (cmd == SET_ARRAY_INFO) {
7416 mdu_array_info_t info;
7417 if (!arg)
7418 memset(&info, 0, sizeof(info));
7419 else if (copy_from_user(&info, argp, sizeof(info))) {
7420 err = -EFAULT;
7421 goto unlock;
7422 }
7423 if (mddev->pers) {
7424 err = update_array_info(mddev, &info);
7425 if (err) {
7426 pr_warn("md: couldn't update array info. %d\n", err);
7427 goto unlock;
7428 }
7429 goto unlock;
7430 }
7431 if (!list_empty(&mddev->disks)) {
7432 pr_warn("md: array %s already has disks!\n", mdname(mddev));
7433 err = -EBUSY;
7434 goto unlock;
7435 }
7436 if (mddev->raid_disks) {
7437 pr_warn("md: array %s already initialised!\n", mdname(mddev));
7438 err = -EBUSY;
7439 goto unlock;
7440 }
7441 err = set_array_info(mddev, &info);
7442 if (err) {
7443 pr_warn("md: couldn't set array info. %d\n", err);
7444 goto unlock;
7445 }
7446 goto unlock;
7447 }
7448
7449 /*
7450 * Commands querying/configuring an existing array:
7451 */
7452 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
7453 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
7454 if ((!mddev->raid_disks && !mddev->external)
7455 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
7456 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
7457 && cmd != GET_BITMAP_FILE) {
7458 err = -ENODEV;
7459 goto unlock;
7460 }
7461
7462 /*
7463 * Commands even a read-only array can execute:
7464 */
7465 switch (cmd) {
7466 case RESTART_ARRAY_RW:
7467 err = restart_array(mddev);
7468 goto unlock;
7469
7470 case STOP_ARRAY:
7471 err = do_md_stop(mddev, 0, bdev);
7472 goto unlock;
7473
7474 case STOP_ARRAY_RO:
7475 err = md_set_readonly(mddev, bdev);
7476 goto unlock;
7477
7478 case HOT_REMOVE_DISK:
7479 err = hot_remove_disk(mddev, new_decode_dev(arg));
7480 goto unlock;
7481
7482 case ADD_NEW_DISK:
7483 /* We can support ADD_NEW_DISK on read-only arrays
7484 * only if we are re-adding a preexisting device.
7485 * So require mddev->pers and MD_DISK_SYNC.
7486 */
7487 if (mddev->pers) {
7488 mdu_disk_info_t info;
7489 if (copy_from_user(&info, argp, sizeof(info)))
7490 err = -EFAULT;
7491 else if (!(info.state & (1<<MD_DISK_SYNC)))
7492 /* Need to clear read-only for this */
7493 break;
7494 else
7495 err = add_new_disk(mddev, &info);
7496 goto unlock;
7497 }
7498 break;
7499
7500 case BLKROSET:
7501 if (get_user(ro, (int __user *)(arg))) {
7502 err = -EFAULT;
7503 goto unlock;
7504 }
7505 err = -EINVAL;
7506
7507 /* if the bdev is going readonly the value of mddev->ro
7508 * does not matter, no writes are coming
7509 */
7510 if (ro)
7511 goto unlock;
7512
7513 /* are we are already prepared for writes? */
7514 if (mddev->ro != 1)
7515 goto unlock;
7516
7517 /* transitioning to readauto need only happen for
7518 * arrays that call md_write_start
7519 */
7520 if (mddev->pers) {
7521 err = restart_array(mddev);
7522 if (err == 0) {
7523 mddev->ro = 2;
7524 set_disk_ro(mddev->gendisk, 0);
7525 }
7526 }
7527 goto unlock;
7528 }
7529
7530 /*
7531 * The remaining ioctls are changing the state of the
7532 * superblock, so we do not allow them on read-only arrays.
7533 */
7534 if (mddev->ro && mddev->pers) {
7535 if (mddev->ro == 2) {
7536 mddev->ro = 0;
7537 sysfs_notify_dirent_safe(mddev->sysfs_state);
7538 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7539 /* mddev_unlock will wake thread */
7540 /* If a device failed while we were read-only, we
7541 * need to make sure the metadata is updated now.
7542 */
7543 if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
7544 mddev_unlock(mddev);
7545 wait_event(mddev->sb_wait,
7546 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
7547 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
7548 mddev_lock_nointr(mddev);
7549 }
7550 } else {
7551 err = -EROFS;
7552 goto unlock;
7553 }
7554 }
7555
7556 switch (cmd) {
7557 case ADD_NEW_DISK:
7558 {
7559 mdu_disk_info_t info;
7560 if (copy_from_user(&info, argp, sizeof(info)))
7561 err = -EFAULT;
7562 else
7563 err = add_new_disk(mddev, &info);
7564 goto unlock;
7565 }
7566
7567 case CLUSTERED_DISK_NACK:
7568 if (mddev_is_clustered(mddev))
7569 md_cluster_ops->new_disk_ack(mddev, false);
7570 else
7571 err = -EINVAL;
7572 goto unlock;
7573
7574 case HOT_ADD_DISK:
7575 err = hot_add_disk(mddev, new_decode_dev(arg));
7576 goto unlock;
7577
7578 case RUN_ARRAY:
7579 err = do_md_run(mddev);
7580 goto unlock;
7581
7582 case SET_BITMAP_FILE:
7583 err = set_bitmap_file(mddev, (int)arg);
7584 goto unlock;
7585
7586 default:
7587 err = -EINVAL;
7588 goto unlock;
7589 }
7590
7591unlock:
7592 if (mddev->hold_active == UNTIL_IOCTL &&
7593 err != -EINVAL)
7594 mddev->hold_active = 0;
7595 mddev_unlock(mddev);
7596out:
7597 if(did_set_md_closing)
7598 clear_bit(MD_CLOSING, &mddev->flags);
7599 return err;
7600}
7601#ifdef CONFIG_COMPAT
7602static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
7603 unsigned int cmd, unsigned long arg)
7604{
7605 switch (cmd) {
7606 case HOT_REMOVE_DISK:
7607 case HOT_ADD_DISK:
7608 case SET_DISK_FAULTY:
7609 case SET_BITMAP_FILE:
7610 /* These take in integer arg, do not convert */
7611 break;
7612 default:
7613 arg = (unsigned long)compat_ptr(arg);
7614 break;
7615 }
7616
7617 return md_ioctl(bdev, mode, cmd, arg);
7618}
7619#endif /* CONFIG_COMPAT */
7620
7621static int md_open(struct block_device *bdev, fmode_t mode)
7622{
7623 /*
7624 * Succeed if we can lock the mddev, which confirms that
7625 * it isn't being stopped right now.
7626 */
7627 struct mddev *mddev = mddev_find(bdev->bd_dev);
7628 int err;
7629
7630 if (!mddev)
7631 return -ENODEV;
7632
7633 if (mddev->gendisk != bdev->bd_disk) {
7634 /* we are racing with mddev_put which is discarding this
7635 * bd_disk.
7636 */
7637 mddev_put(mddev);
7638 /* Wait until bdev->bd_disk is definitely gone */
Olivier Deprez0e641232021-09-23 10:07:05 +02007639 if (work_pending(&mddev->del_work))
7640 flush_workqueue(md_misc_wq);
7641 return -EBUSY;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007642 }
7643 BUG_ON(mddev != bdev->bd_disk->private_data);
7644
7645 if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
7646 goto out;
7647
7648 if (test_bit(MD_CLOSING, &mddev->flags)) {
7649 mutex_unlock(&mddev->open_mutex);
7650 err = -ENODEV;
7651 goto out;
7652 }
7653
7654 err = 0;
7655 atomic_inc(&mddev->openers);
7656 mutex_unlock(&mddev->open_mutex);
7657
7658 check_disk_change(bdev);
7659 out:
7660 if (err)
7661 mddev_put(mddev);
7662 return err;
7663}
7664
7665static void md_release(struct gendisk *disk, fmode_t mode)
7666{
7667 struct mddev *mddev = disk->private_data;
7668
7669 BUG_ON(!mddev);
7670 atomic_dec(&mddev->openers);
7671 mddev_put(mddev);
7672}
7673
7674static int md_media_changed(struct gendisk *disk)
7675{
7676 struct mddev *mddev = disk->private_data;
7677
7678 return mddev->changed;
7679}
7680
7681static int md_revalidate(struct gendisk *disk)
7682{
7683 struct mddev *mddev = disk->private_data;
7684
7685 mddev->changed = 0;
7686 return 0;
7687}
7688static const struct block_device_operations md_fops =
7689{
7690 .owner = THIS_MODULE,
7691 .open = md_open,
7692 .release = md_release,
7693 .ioctl = md_ioctl,
7694#ifdef CONFIG_COMPAT
7695 .compat_ioctl = md_compat_ioctl,
7696#endif
7697 .getgeo = md_getgeo,
7698 .media_changed = md_media_changed,
7699 .revalidate_disk= md_revalidate,
7700};
7701
7702static int md_thread(void *arg)
7703{
7704 struct md_thread *thread = arg;
7705
7706 /*
7707 * md_thread is a 'system-thread', it's priority should be very
7708 * high. We avoid resource deadlocks individually in each
7709 * raid personality. (RAID5 does preallocation) We also use RR and
7710 * the very same RT priority as kswapd, thus we will never get
7711 * into a priority inversion deadlock.
7712 *
7713 * we definitely have to have equal or higher priority than
7714 * bdflush, otherwise bdflush will deadlock if there are too
7715 * many dirty RAID5 blocks.
7716 */
7717
7718 allow_signal(SIGKILL);
7719 while (!kthread_should_stop()) {
7720
7721 /* We need to wait INTERRUPTIBLE so that
7722 * we don't add to the load-average.
7723 * That means we need to be sure no signals are
7724 * pending
7725 */
7726 if (signal_pending(current))
7727 flush_signals(current);
7728
7729 wait_event_interruptible_timeout
7730 (thread->wqueue,
7731 test_bit(THREAD_WAKEUP, &thread->flags)
7732 || kthread_should_stop() || kthread_should_park(),
7733 thread->timeout);
7734
7735 clear_bit(THREAD_WAKEUP, &thread->flags);
7736 if (kthread_should_park())
7737 kthread_parkme();
7738 if (!kthread_should_stop())
7739 thread->run(thread);
7740 }
7741
7742 return 0;
7743}
7744
7745void md_wakeup_thread(struct md_thread *thread)
7746{
7747 if (thread) {
7748 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
7749 set_bit(THREAD_WAKEUP, &thread->flags);
7750 wake_up(&thread->wqueue);
7751 }
7752}
7753EXPORT_SYMBOL(md_wakeup_thread);
7754
7755struct md_thread *md_register_thread(void (*run) (struct md_thread *),
7756 struct mddev *mddev, const char *name)
7757{
7758 struct md_thread *thread;
7759
7760 thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
7761 if (!thread)
7762 return NULL;
7763
7764 init_waitqueue_head(&thread->wqueue);
7765
7766 thread->run = run;
7767 thread->mddev = mddev;
7768 thread->timeout = MAX_SCHEDULE_TIMEOUT;
7769 thread->tsk = kthread_run(md_thread, thread,
7770 "%s_%s",
7771 mdname(thread->mddev),
7772 name);
7773 if (IS_ERR(thread->tsk)) {
7774 kfree(thread);
7775 return NULL;
7776 }
7777 return thread;
7778}
7779EXPORT_SYMBOL(md_register_thread);
7780
7781void md_unregister_thread(struct md_thread **threadp)
7782{
7783 struct md_thread *thread = *threadp;
7784 if (!thread)
7785 return;
7786 pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
7787 /* Locking ensures that mddev_unlock does not wake_up a
7788 * non-existent thread
7789 */
7790 spin_lock(&pers_lock);
7791 *threadp = NULL;
7792 spin_unlock(&pers_lock);
7793
7794 kthread_stop(thread->tsk);
7795 kfree(thread);
7796}
7797EXPORT_SYMBOL(md_unregister_thread);
7798
7799void md_error(struct mddev *mddev, struct md_rdev *rdev)
7800{
7801 if (!rdev || test_bit(Faulty, &rdev->flags))
7802 return;
7803
7804 if (!mddev->pers || !mddev->pers->error_handler)
7805 return;
7806 mddev->pers->error_handler(mddev,rdev);
7807 if (mddev->degraded)
7808 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7809 sysfs_notify_dirent_safe(rdev->sysfs_state);
7810 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7811 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7812 md_wakeup_thread(mddev->thread);
7813 if (mddev->event_work.func)
7814 queue_work(md_misc_wq, &mddev->event_work);
7815 md_new_event(mddev);
7816}
7817EXPORT_SYMBOL(md_error);
7818
7819/* seq_file implementation /proc/mdstat */
7820
7821static void status_unused(struct seq_file *seq)
7822{
7823 int i = 0;
7824 struct md_rdev *rdev;
7825
7826 seq_printf(seq, "unused devices: ");
7827
7828 list_for_each_entry(rdev, &pending_raid_disks, same_set) {
7829 char b[BDEVNAME_SIZE];
7830 i++;
7831 seq_printf(seq, "%s ",
7832 bdevname(rdev->bdev,b));
7833 }
7834 if (!i)
7835 seq_printf(seq, "<none>");
7836
7837 seq_printf(seq, "\n");
7838}
7839
7840static int status_resync(struct seq_file *seq, struct mddev *mddev)
7841{
7842 sector_t max_sectors, resync, res;
David Brazdil0f672f62019-12-10 10:32:29 +00007843 unsigned long dt, db = 0;
7844 sector_t rt, curr_mark_cnt, resync_mark_cnt;
7845 int scale, recovery_active;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007846 unsigned int per_milli;
7847
7848 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
7849 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7850 max_sectors = mddev->resync_max_sectors;
7851 else
7852 max_sectors = mddev->dev_sectors;
7853
7854 resync = mddev->curr_resync;
7855 if (resync <= 3) {
7856 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7857 /* Still cleaning up */
7858 resync = max_sectors;
7859 } else if (resync > max_sectors)
7860 resync = max_sectors;
7861 else
7862 resync -= atomic_read(&mddev->recovery_active);
7863
7864 if (resync == 0) {
7865 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) {
7866 struct md_rdev *rdev;
7867
7868 rdev_for_each(rdev, mddev)
7869 if (rdev->raid_disk >= 0 &&
7870 !test_bit(Faulty, &rdev->flags) &&
7871 rdev->recovery_offset != MaxSector &&
7872 rdev->recovery_offset) {
7873 seq_printf(seq, "\trecover=REMOTE");
7874 return 1;
7875 }
7876 if (mddev->reshape_position != MaxSector)
7877 seq_printf(seq, "\treshape=REMOTE");
7878 else
7879 seq_printf(seq, "\tresync=REMOTE");
7880 return 1;
7881 }
7882 if (mddev->recovery_cp < MaxSector) {
7883 seq_printf(seq, "\tresync=PENDING");
7884 return 1;
7885 }
7886 return 0;
7887 }
7888 if (resync < 3) {
7889 seq_printf(seq, "\tresync=DELAYED");
7890 return 1;
7891 }
7892
7893 WARN_ON(max_sectors == 0);
7894 /* Pick 'scale' such that (resync>>scale)*1000 will fit
7895 * in a sector_t, and (max_sectors>>scale) will fit in a
7896 * u32, as those are the requirements for sector_div.
7897 * Thus 'scale' must be at least 10
7898 */
7899 scale = 10;
7900 if (sizeof(sector_t) > sizeof(unsigned long)) {
7901 while ( max_sectors/2 > (1ULL<<(scale+32)))
7902 scale++;
7903 }
7904 res = (resync>>scale)*1000;
7905 sector_div(res, (u32)((max_sectors>>scale)+1));
7906
7907 per_milli = res;
7908 {
7909 int i, x = per_milli/50, y = 20-x;
7910 seq_printf(seq, "[");
7911 for (i = 0; i < x; i++)
7912 seq_printf(seq, "=");
7913 seq_printf(seq, ">");
7914 for (i = 0; i < y; i++)
7915 seq_printf(seq, ".");
7916 seq_printf(seq, "] ");
7917 }
7918 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
7919 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
7920 "reshape" :
7921 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
7922 "check" :
7923 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
7924 "resync" : "recovery"))),
7925 per_milli/10, per_milli % 10,
7926 (unsigned long long) resync/2,
7927 (unsigned long long) max_sectors/2);
7928
7929 /*
7930 * dt: time from mark until now
7931 * db: blocks written from mark until now
7932 * rt: remaining time
7933 *
David Brazdil0f672f62019-12-10 10:32:29 +00007934 * rt is a sector_t, which is always 64bit now. We are keeping
7935 * the original algorithm, but it is not really necessary.
7936 *
7937 * Original algorithm:
7938 * So we divide before multiply in case it is 32bit and close
7939 * to the limit.
7940 * We scale the divisor (db) by 32 to avoid losing precision
7941 * near the end of resync when the number of remaining sectors
7942 * is close to 'db'.
7943 * We then divide rt by 32 after multiplying by db to compensate.
7944 * The '+1' avoids division by zero if db is very small.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007945 */
7946 dt = ((jiffies - mddev->resync_mark) / HZ);
7947 if (!dt) dt++;
David Brazdil0f672f62019-12-10 10:32:29 +00007948
7949 curr_mark_cnt = mddev->curr_mark_cnt;
7950 recovery_active = atomic_read(&mddev->recovery_active);
7951 resync_mark_cnt = mddev->resync_mark_cnt;
7952
7953 if (curr_mark_cnt >= (recovery_active + resync_mark_cnt))
7954 db = curr_mark_cnt - (recovery_active + resync_mark_cnt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007955
7956 rt = max_sectors - resync; /* number of remaining sectors */
David Brazdil0f672f62019-12-10 10:32:29 +00007957 rt = div64_u64(rt, db/32+1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007958 rt *= dt;
7959 rt >>= 5;
7960
7961 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
7962 ((unsigned long)rt % 60)/6);
7963
7964 seq_printf(seq, " speed=%ldK/sec", db/2/dt);
7965 return 1;
7966}
7967
7968static void *md_seq_start(struct seq_file *seq, loff_t *pos)
7969{
7970 struct list_head *tmp;
7971 loff_t l = *pos;
7972 struct mddev *mddev;
7973
Olivier Deprez0e641232021-09-23 10:07:05 +02007974 if (l == 0x10000) {
7975 ++*pos;
7976 return (void *)2;
7977 }
7978 if (l > 0x10000)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007979 return NULL;
7980 if (!l--)
7981 /* header */
7982 return (void*)1;
7983
7984 spin_lock(&all_mddevs_lock);
7985 list_for_each(tmp,&all_mddevs)
7986 if (!l--) {
7987 mddev = list_entry(tmp, struct mddev, all_mddevs);
7988 mddev_get(mddev);
7989 spin_unlock(&all_mddevs_lock);
7990 return mddev;
7991 }
7992 spin_unlock(&all_mddevs_lock);
7993 if (!l--)
7994 return (void*)2;/* tail */
7995 return NULL;
7996}
7997
7998static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
7999{
8000 struct list_head *tmp;
8001 struct mddev *next_mddev, *mddev = v;
8002
8003 ++*pos;
8004 if (v == (void*)2)
8005 return NULL;
8006
8007 spin_lock(&all_mddevs_lock);
8008 if (v == (void*)1)
8009 tmp = all_mddevs.next;
8010 else
8011 tmp = mddev->all_mddevs.next;
8012 if (tmp != &all_mddevs)
8013 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
8014 else {
8015 next_mddev = (void*)2;
8016 *pos = 0x10000;
8017 }
8018 spin_unlock(&all_mddevs_lock);
8019
8020 if (v != (void*)1)
8021 mddev_put(mddev);
8022 return next_mddev;
8023
8024}
8025
8026static void md_seq_stop(struct seq_file *seq, void *v)
8027{
8028 struct mddev *mddev = v;
8029
8030 if (mddev && v != (void*)1 && v != (void*)2)
8031 mddev_put(mddev);
8032}
8033
8034static int md_seq_show(struct seq_file *seq, void *v)
8035{
8036 struct mddev *mddev = v;
8037 sector_t sectors;
8038 struct md_rdev *rdev;
8039
8040 if (v == (void*)1) {
8041 struct md_personality *pers;
8042 seq_printf(seq, "Personalities : ");
8043 spin_lock(&pers_lock);
8044 list_for_each_entry(pers, &pers_list, list)
8045 seq_printf(seq, "[%s] ", pers->name);
8046
8047 spin_unlock(&pers_lock);
8048 seq_printf(seq, "\n");
8049 seq->poll_event = atomic_read(&md_event_count);
8050 return 0;
8051 }
8052 if (v == (void*)2) {
8053 status_unused(seq);
8054 return 0;
8055 }
8056
8057 spin_lock(&mddev->lock);
8058 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
8059 seq_printf(seq, "%s : %sactive", mdname(mddev),
8060 mddev->pers ? "" : "in");
8061 if (mddev->pers) {
8062 if (mddev->ro==1)
8063 seq_printf(seq, " (read-only)");
8064 if (mddev->ro==2)
8065 seq_printf(seq, " (auto-read-only)");
8066 seq_printf(seq, " %s", mddev->pers->name);
8067 }
8068
8069 sectors = 0;
8070 rcu_read_lock();
8071 rdev_for_each_rcu(rdev, mddev) {
8072 char b[BDEVNAME_SIZE];
8073 seq_printf(seq, " %s[%d]",
8074 bdevname(rdev->bdev,b), rdev->desc_nr);
8075 if (test_bit(WriteMostly, &rdev->flags))
8076 seq_printf(seq, "(W)");
8077 if (test_bit(Journal, &rdev->flags))
8078 seq_printf(seq, "(J)");
8079 if (test_bit(Faulty, &rdev->flags)) {
8080 seq_printf(seq, "(F)");
8081 continue;
8082 }
8083 if (rdev->raid_disk < 0)
8084 seq_printf(seq, "(S)"); /* spare */
8085 if (test_bit(Replacement, &rdev->flags))
8086 seq_printf(seq, "(R)");
8087 sectors += rdev->sectors;
8088 }
8089 rcu_read_unlock();
8090
8091 if (!list_empty(&mddev->disks)) {
8092 if (mddev->pers)
8093 seq_printf(seq, "\n %llu blocks",
8094 (unsigned long long)
8095 mddev->array_sectors / 2);
8096 else
8097 seq_printf(seq, "\n %llu blocks",
8098 (unsigned long long)sectors / 2);
8099 }
8100 if (mddev->persistent) {
8101 if (mddev->major_version != 0 ||
8102 mddev->minor_version != 90) {
8103 seq_printf(seq," super %d.%d",
8104 mddev->major_version,
8105 mddev->minor_version);
8106 }
8107 } else if (mddev->external)
8108 seq_printf(seq, " super external:%s",
8109 mddev->metadata_type);
8110 else
8111 seq_printf(seq, " super non-persistent");
8112
8113 if (mddev->pers) {
8114 mddev->pers->status(seq, mddev);
8115 seq_printf(seq, "\n ");
8116 if (mddev->pers->sync_request) {
8117 if (status_resync(seq, mddev))
8118 seq_printf(seq, "\n ");
8119 }
8120 } else
8121 seq_printf(seq, "\n ");
8122
8123 md_bitmap_status(seq, mddev->bitmap);
8124
8125 seq_printf(seq, "\n");
8126 }
8127 spin_unlock(&mddev->lock);
8128
8129 return 0;
8130}
8131
8132static const struct seq_operations md_seq_ops = {
8133 .start = md_seq_start,
8134 .next = md_seq_next,
8135 .stop = md_seq_stop,
8136 .show = md_seq_show,
8137};
8138
8139static int md_seq_open(struct inode *inode, struct file *file)
8140{
8141 struct seq_file *seq;
8142 int error;
8143
8144 error = seq_open(file, &md_seq_ops);
8145 if (error)
8146 return error;
8147
8148 seq = file->private_data;
8149 seq->poll_event = atomic_read(&md_event_count);
8150 return error;
8151}
8152
8153static int md_unloading;
8154static __poll_t mdstat_poll(struct file *filp, poll_table *wait)
8155{
8156 struct seq_file *seq = filp->private_data;
8157 __poll_t mask;
8158
8159 if (md_unloading)
8160 return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
8161 poll_wait(filp, &md_event_waiters, wait);
8162
8163 /* always allow read */
8164 mask = EPOLLIN | EPOLLRDNORM;
8165
8166 if (seq->poll_event != atomic_read(&md_event_count))
8167 mask |= EPOLLERR | EPOLLPRI;
8168 return mask;
8169}
8170
8171static const struct file_operations md_seq_fops = {
8172 .owner = THIS_MODULE,
8173 .open = md_seq_open,
8174 .read = seq_read,
8175 .llseek = seq_lseek,
8176 .release = seq_release,
8177 .poll = mdstat_poll,
8178};
8179
8180int register_md_personality(struct md_personality *p)
8181{
8182 pr_debug("md: %s personality registered for level %d\n",
8183 p->name, p->level);
8184 spin_lock(&pers_lock);
8185 list_add_tail(&p->list, &pers_list);
8186 spin_unlock(&pers_lock);
8187 return 0;
8188}
8189EXPORT_SYMBOL(register_md_personality);
8190
8191int unregister_md_personality(struct md_personality *p)
8192{
8193 pr_debug("md: %s personality unregistered\n", p->name);
8194 spin_lock(&pers_lock);
8195 list_del_init(&p->list);
8196 spin_unlock(&pers_lock);
8197 return 0;
8198}
8199EXPORT_SYMBOL(unregister_md_personality);
8200
8201int register_md_cluster_operations(struct md_cluster_operations *ops,
8202 struct module *module)
8203{
8204 int ret = 0;
8205 spin_lock(&pers_lock);
8206 if (md_cluster_ops != NULL)
8207 ret = -EALREADY;
8208 else {
8209 md_cluster_ops = ops;
8210 md_cluster_mod = module;
8211 }
8212 spin_unlock(&pers_lock);
8213 return ret;
8214}
8215EXPORT_SYMBOL(register_md_cluster_operations);
8216
8217int unregister_md_cluster_operations(void)
8218{
8219 spin_lock(&pers_lock);
8220 md_cluster_ops = NULL;
8221 spin_unlock(&pers_lock);
8222 return 0;
8223}
8224EXPORT_SYMBOL(unregister_md_cluster_operations);
8225
8226int md_setup_cluster(struct mddev *mddev, int nodes)
8227{
8228 if (!md_cluster_ops)
8229 request_module("md-cluster");
8230 spin_lock(&pers_lock);
8231 /* ensure module won't be unloaded */
8232 if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
8233 pr_warn("can't find md-cluster module or get it's reference.\n");
8234 spin_unlock(&pers_lock);
8235 return -ENOENT;
8236 }
8237 spin_unlock(&pers_lock);
8238
8239 return md_cluster_ops->join(mddev, nodes);
8240}
8241
8242void md_cluster_stop(struct mddev *mddev)
8243{
8244 if (!md_cluster_ops)
8245 return;
8246 md_cluster_ops->leave(mddev);
8247 module_put(md_cluster_mod);
8248}
8249
8250static int is_mddev_idle(struct mddev *mddev, int init)
8251{
8252 struct md_rdev *rdev;
8253 int idle;
8254 int curr_events;
8255
8256 idle = 1;
8257 rcu_read_lock();
8258 rdev_for_each_rcu(rdev, mddev) {
8259 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
8260 curr_events = (int)part_stat_read_accum(&disk->part0, sectors) -
8261 atomic_read(&disk->sync_io);
8262 /* sync IO will cause sync_io to increase before the disk_stats
8263 * as sync_io is counted when a request starts, and
8264 * disk_stats is counted when it completes.
8265 * So resync activity will cause curr_events to be smaller than
8266 * when there was no such activity.
8267 * non-sync IO will cause disk_stat to increase without
8268 * increasing sync_io so curr_events will (eventually)
8269 * be larger than it was before. Once it becomes
8270 * substantially larger, the test below will cause
8271 * the array to appear non-idle, and resync will slow
8272 * down.
8273 * If there is a lot of outstanding resync activity when
8274 * we set last_event to curr_events, then all that activity
8275 * completing might cause the array to appear non-idle
8276 * and resync will be slowed down even though there might
8277 * not have been non-resync activity. This will only
8278 * happen once though. 'last_events' will soon reflect
8279 * the state where there is little or no outstanding
8280 * resync requests, and further resync activity will
8281 * always make curr_events less than last_events.
8282 *
8283 */
8284 if (init || curr_events - rdev->last_events > 64) {
8285 rdev->last_events = curr_events;
8286 idle = 0;
8287 }
8288 }
8289 rcu_read_unlock();
8290 return idle;
8291}
8292
8293void md_done_sync(struct mddev *mddev, int blocks, int ok)
8294{
8295 /* another "blocks" (512byte) blocks have been synced */
8296 atomic_sub(blocks, &mddev->recovery_active);
8297 wake_up(&mddev->recovery_wait);
8298 if (!ok) {
8299 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8300 set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
8301 md_wakeup_thread(mddev->thread);
8302 // stop recovery, signal do_sync ....
8303 }
8304}
8305EXPORT_SYMBOL(md_done_sync);
8306
8307/* md_write_start(mddev, bi)
8308 * If we need to update some array metadata (e.g. 'active' flag
8309 * in superblock) before writing, schedule a superblock update
8310 * and wait for it to complete.
8311 * A return value of 'false' means that the write wasn't recorded
8312 * and cannot proceed as the array is being suspend.
8313 */
8314bool md_write_start(struct mddev *mddev, struct bio *bi)
8315{
8316 int did_change = 0;
8317
8318 if (bio_data_dir(bi) != WRITE)
8319 return true;
8320
8321 BUG_ON(mddev->ro == 1);
8322 if (mddev->ro == 2) {
8323 /* need to switch to read/write */
8324 mddev->ro = 0;
8325 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8326 md_wakeup_thread(mddev->thread);
8327 md_wakeup_thread(mddev->sync_thread);
8328 did_change = 1;
8329 }
8330 rcu_read_lock();
8331 percpu_ref_get(&mddev->writes_pending);
8332 smp_mb(); /* Match smp_mb in set_in_sync() */
8333 if (mddev->safemode == 1)
8334 mddev->safemode = 0;
8335 /* sync_checkers is always 0 when writes_pending is in per-cpu mode */
8336 if (mddev->in_sync || mddev->sync_checkers) {
8337 spin_lock(&mddev->lock);
8338 if (mddev->in_sync) {
8339 mddev->in_sync = 0;
8340 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8341 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
8342 md_wakeup_thread(mddev->thread);
8343 did_change = 1;
8344 }
8345 spin_unlock(&mddev->lock);
8346 }
8347 rcu_read_unlock();
8348 if (did_change)
8349 sysfs_notify_dirent_safe(mddev->sysfs_state);
8350 if (!mddev->has_superblocks)
8351 return true;
8352 wait_event(mddev->sb_wait,
8353 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
8354 mddev->suspended);
8355 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
8356 percpu_ref_put(&mddev->writes_pending);
8357 return false;
8358 }
8359 return true;
8360}
8361EXPORT_SYMBOL(md_write_start);
8362
8363/* md_write_inc can only be called when md_write_start() has
8364 * already been called at least once of the current request.
8365 * It increments the counter and is useful when a single request
8366 * is split into several parts. Each part causes an increment and
8367 * so needs a matching md_write_end().
8368 * Unlike md_write_start(), it is safe to call md_write_inc() inside
8369 * a spinlocked region.
8370 */
8371void md_write_inc(struct mddev *mddev, struct bio *bi)
8372{
8373 if (bio_data_dir(bi) != WRITE)
8374 return;
8375 WARN_ON_ONCE(mddev->in_sync || mddev->ro);
8376 percpu_ref_get(&mddev->writes_pending);
8377}
8378EXPORT_SYMBOL(md_write_inc);
8379
8380void md_write_end(struct mddev *mddev)
8381{
8382 percpu_ref_put(&mddev->writes_pending);
8383
8384 if (mddev->safemode == 2)
8385 md_wakeup_thread(mddev->thread);
8386 else if (mddev->safemode_delay)
8387 /* The roundup() ensures this only performs locking once
8388 * every ->safemode_delay jiffies
8389 */
8390 mod_timer(&mddev->safemode_timer,
8391 roundup(jiffies, mddev->safemode_delay) +
8392 mddev->safemode_delay);
8393}
8394
8395EXPORT_SYMBOL(md_write_end);
8396
8397/* md_allow_write(mddev)
8398 * Calling this ensures that the array is marked 'active' so that writes
8399 * may proceed without blocking. It is important to call this before
8400 * attempting a GFP_KERNEL allocation while holding the mddev lock.
8401 * Must be called with mddev_lock held.
8402 */
8403void md_allow_write(struct mddev *mddev)
8404{
8405 if (!mddev->pers)
8406 return;
8407 if (mddev->ro)
8408 return;
8409 if (!mddev->pers->sync_request)
8410 return;
8411
8412 spin_lock(&mddev->lock);
8413 if (mddev->in_sync) {
8414 mddev->in_sync = 0;
8415 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8416 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
8417 if (mddev->safemode_delay &&
8418 mddev->safemode == 0)
8419 mddev->safemode = 1;
8420 spin_unlock(&mddev->lock);
8421 md_update_sb(mddev, 0);
8422 sysfs_notify_dirent_safe(mddev->sysfs_state);
8423 /* wait for the dirty state to be recorded in the metadata */
8424 wait_event(mddev->sb_wait,
8425 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
8426 } else
8427 spin_unlock(&mddev->lock);
8428}
8429EXPORT_SYMBOL_GPL(md_allow_write);
8430
8431#define SYNC_MARKS 10
8432#define SYNC_MARK_STEP (3*HZ)
8433#define UPDATE_FREQUENCY (5*60*HZ)
8434void md_do_sync(struct md_thread *thread)
8435{
8436 struct mddev *mddev = thread->mddev;
8437 struct mddev *mddev2;
David Brazdil0f672f62019-12-10 10:32:29 +00008438 unsigned int currspeed = 0, window;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008439 sector_t max_sectors,j, io_sectors, recovery_done;
8440 unsigned long mark[SYNC_MARKS];
8441 unsigned long update_time;
8442 sector_t mark_cnt[SYNC_MARKS];
8443 int last_mark,m;
8444 struct list_head *tmp;
8445 sector_t last_check;
8446 int skipped = 0;
8447 struct md_rdev *rdev;
8448 char *desc, *action = NULL;
8449 struct blk_plug plug;
8450 int ret;
8451
8452 /* just incase thread restarts... */
8453 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
8454 test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
8455 return;
8456 if (mddev->ro) {/* never try to sync a read-only array */
8457 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8458 return;
8459 }
8460
8461 if (mddev_is_clustered(mddev)) {
8462 ret = md_cluster_ops->resync_start(mddev);
8463 if (ret)
8464 goto skip;
8465
8466 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
8467 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8468 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
8469 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
8470 && ((unsigned long long)mddev->curr_resync_completed
8471 < (unsigned long long)mddev->resync_max_sectors))
8472 goto skip;
8473 }
8474
8475 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8476 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
8477 desc = "data-check";
8478 action = "check";
8479 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
8480 desc = "requested-resync";
8481 action = "repair";
8482 } else
8483 desc = "resync";
8484 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
8485 desc = "reshape";
8486 else
8487 desc = "recovery";
8488
8489 mddev->last_sync_action = action ?: desc;
8490
8491 /* we overload curr_resync somewhat here.
8492 * 0 == not engaged in resync at all
8493 * 2 == checking that there is no conflict with another sync
8494 * 1 == like 2, but have yielded to allow conflicting resync to
David Brazdil0f672f62019-12-10 10:32:29 +00008495 * commence
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008496 * other == active in resync - this many blocks
8497 *
8498 * Before starting a resync we must have set curr_resync to
8499 * 2, and then checked that every "conflicting" array has curr_resync
8500 * less than ours. When we find one that is the same or higher
8501 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
8502 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
8503 * This will mean we have to start checking from the beginning again.
8504 *
8505 */
8506
8507 do {
8508 int mddev2_minor = -1;
8509 mddev->curr_resync = 2;
8510
8511 try_again:
8512 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8513 goto skip;
8514 for_each_mddev(mddev2, tmp) {
8515 if (mddev2 == mddev)
8516 continue;
8517 if (!mddev->parallel_resync
8518 && mddev2->curr_resync
8519 && match_mddev_units(mddev, mddev2)) {
8520 DEFINE_WAIT(wq);
8521 if (mddev < mddev2 && mddev->curr_resync == 2) {
8522 /* arbitrarily yield */
8523 mddev->curr_resync = 1;
8524 wake_up(&resync_wait);
8525 }
8526 if (mddev > mddev2 && mddev->curr_resync == 1)
8527 /* no need to wait here, we can wait the next
8528 * time 'round when curr_resync == 2
8529 */
8530 continue;
8531 /* We need to wait 'interruptible' so as not to
8532 * contribute to the load average, and not to
8533 * be caught by 'softlockup'
8534 */
8535 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
8536 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8537 mddev2->curr_resync >= mddev->curr_resync) {
8538 if (mddev2_minor != mddev2->md_minor) {
8539 mddev2_minor = mddev2->md_minor;
8540 pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
8541 desc, mdname(mddev),
8542 mdname(mddev2));
8543 }
8544 mddev_put(mddev2);
8545 if (signal_pending(current))
8546 flush_signals(current);
8547 schedule();
8548 finish_wait(&resync_wait, &wq);
8549 goto try_again;
8550 }
8551 finish_wait(&resync_wait, &wq);
8552 }
8553 }
8554 } while (mddev->curr_resync < 2);
8555
8556 j = 0;
8557 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8558 /* resync follows the size requested by the personality,
8559 * which defaults to physical size, but can be virtual size
8560 */
8561 max_sectors = mddev->resync_max_sectors;
8562 atomic64_set(&mddev->resync_mismatches, 0);
8563 /* we don't use the checkpoint if there's a bitmap */
8564 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8565 j = mddev->resync_min;
8566 else if (!mddev->bitmap)
8567 j = mddev->recovery_cp;
8568
David Brazdil0f672f62019-12-10 10:32:29 +00008569 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008570 max_sectors = mddev->resync_max_sectors;
David Brazdil0f672f62019-12-10 10:32:29 +00008571 /*
8572 * If the original node aborts reshaping then we continue the
8573 * reshaping, so set j again to avoid restart reshape from the
8574 * first beginning
8575 */
8576 if (mddev_is_clustered(mddev) &&
8577 mddev->reshape_position != MaxSector)
8578 j = mddev->reshape_position;
8579 } else {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008580 /* recovery follows the physical size of devices */
8581 max_sectors = mddev->dev_sectors;
8582 j = MaxSector;
8583 rcu_read_lock();
8584 rdev_for_each_rcu(rdev, mddev)
8585 if (rdev->raid_disk >= 0 &&
8586 !test_bit(Journal, &rdev->flags) &&
8587 !test_bit(Faulty, &rdev->flags) &&
8588 !test_bit(In_sync, &rdev->flags) &&
8589 rdev->recovery_offset < j)
8590 j = rdev->recovery_offset;
8591 rcu_read_unlock();
8592
8593 /* If there is a bitmap, we need to make sure all
8594 * writes that started before we added a spare
8595 * complete before we start doing a recovery.
8596 * Otherwise the write might complete and (via
8597 * bitmap_endwrite) set a bit in the bitmap after the
8598 * recovery has checked that bit and skipped that
8599 * region.
8600 */
8601 if (mddev->bitmap) {
8602 mddev->pers->quiesce(mddev, 1);
8603 mddev->pers->quiesce(mddev, 0);
8604 }
8605 }
8606
8607 pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
8608 pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev));
8609 pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
8610 speed_max(mddev), desc);
8611
8612 is_mddev_idle(mddev, 1); /* this initializes IO event counters */
8613
8614 io_sectors = 0;
8615 for (m = 0; m < SYNC_MARKS; m++) {
8616 mark[m] = jiffies;
8617 mark_cnt[m] = io_sectors;
8618 }
8619 last_mark = 0;
8620 mddev->resync_mark = mark[last_mark];
8621 mddev->resync_mark_cnt = mark_cnt[last_mark];
8622
8623 /*
8624 * Tune reconstruction:
8625 */
David Brazdil0f672f62019-12-10 10:32:29 +00008626 window = 32 * (PAGE_SIZE / 512);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008627 pr_debug("md: using %dk window, over a total of %lluk.\n",
8628 window/2, (unsigned long long)max_sectors/2);
8629
8630 atomic_set(&mddev->recovery_active, 0);
8631 last_check = 0;
8632
8633 if (j>2) {
8634 pr_debug("md: resuming %s of %s from checkpoint.\n",
8635 desc, mdname(mddev));
8636 mddev->curr_resync = j;
8637 } else
8638 mddev->curr_resync = 3; /* no longer delayed */
8639 mddev->curr_resync_completed = j;
8640 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8641 md_new_event(mddev);
8642 update_time = jiffies;
8643
8644 blk_start_plug(&plug);
8645 while (j < max_sectors) {
8646 sector_t sectors;
8647
8648 skipped = 0;
8649
8650 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8651 ((mddev->curr_resync > mddev->curr_resync_completed &&
8652 (mddev->curr_resync - mddev->curr_resync_completed)
8653 > (max_sectors >> 4)) ||
8654 time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
8655 (j - mddev->curr_resync_completed)*2
8656 >= mddev->resync_max - mddev->curr_resync_completed ||
8657 mddev->curr_resync_completed > mddev->resync_max
8658 )) {
8659 /* time to update curr_resync_completed */
8660 wait_event(mddev->recovery_wait,
8661 atomic_read(&mddev->recovery_active) == 0);
8662 mddev->curr_resync_completed = j;
8663 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
8664 j > mddev->recovery_cp)
8665 mddev->recovery_cp = j;
8666 update_time = jiffies;
8667 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8668 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8669 }
8670
8671 while (j >= mddev->resync_max &&
8672 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8673 /* As this condition is controlled by user-space,
8674 * we can block indefinitely, so use '_interruptible'
8675 * to avoid triggering warnings.
8676 */
8677 flush_signals(current); /* just in case */
8678 wait_event_interruptible(mddev->recovery_wait,
8679 mddev->resync_max > j
8680 || test_bit(MD_RECOVERY_INTR,
8681 &mddev->recovery));
8682 }
8683
8684 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8685 break;
8686
8687 sectors = mddev->pers->sync_request(mddev, j, &skipped);
8688 if (sectors == 0) {
8689 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8690 break;
8691 }
8692
8693 if (!skipped) { /* actual IO requested */
8694 io_sectors += sectors;
8695 atomic_add(sectors, &mddev->recovery_active);
8696 }
8697
8698 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8699 break;
8700
8701 j += sectors;
8702 if (j > max_sectors)
8703 /* when skipping, extra large numbers can be returned. */
8704 j = max_sectors;
8705 if (j > 2)
8706 mddev->curr_resync = j;
8707 mddev->curr_mark_cnt = io_sectors;
8708 if (last_check == 0)
8709 /* this is the earliest that rebuild will be
8710 * visible in /proc/mdstat
8711 */
8712 md_new_event(mddev);
8713
8714 if (last_check + window > io_sectors || j == max_sectors)
8715 continue;
8716
8717 last_check = io_sectors;
8718 repeat:
8719 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
8720 /* step marks */
8721 int next = (last_mark+1) % SYNC_MARKS;
8722
8723 mddev->resync_mark = mark[next];
8724 mddev->resync_mark_cnt = mark_cnt[next];
8725 mark[next] = jiffies;
8726 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
8727 last_mark = next;
8728 }
8729
8730 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8731 break;
8732
8733 /*
8734 * this loop exits only if either when we are slower than
8735 * the 'hard' speed limit, or the system was IO-idle for
8736 * a jiffy.
8737 * the system might be non-idle CPU-wise, but we only care
8738 * about not overloading the IO subsystem. (things like an
8739 * e2fsck being done on the RAID array should execute fast)
8740 */
8741 cond_resched();
8742
8743 recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
8744 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
8745 /((jiffies-mddev->resync_mark)/HZ +1) +1;
8746
8747 if (currspeed > speed_min(mddev)) {
8748 if (currspeed > speed_max(mddev)) {
8749 msleep(500);
8750 goto repeat;
8751 }
8752 if (!is_mddev_idle(mddev, 0)) {
8753 /*
8754 * Give other IO more of a chance.
8755 * The faster the devices, the less we wait.
8756 */
8757 wait_event(mddev->recovery_wait,
8758 !atomic_read(&mddev->recovery_active));
8759 }
8760 }
8761 }
8762 pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
8763 test_bit(MD_RECOVERY_INTR, &mddev->recovery)
8764 ? "interrupted" : "done");
8765 /*
8766 * this also signals 'finished resyncing' to md_stop
8767 */
8768 blk_finish_plug(&plug);
8769 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
8770
8771 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8772 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8773 mddev->curr_resync > 3) {
8774 mddev->curr_resync_completed = mddev->curr_resync;
8775 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8776 }
8777 mddev->pers->sync_request(mddev, max_sectors, &skipped);
8778
8779 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
8780 mddev->curr_resync > 3) {
8781 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8782 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8783 if (mddev->curr_resync >= mddev->recovery_cp) {
8784 pr_debug("md: checkpointing %s of %s.\n",
8785 desc, mdname(mddev));
8786 if (test_bit(MD_RECOVERY_ERROR,
8787 &mddev->recovery))
8788 mddev->recovery_cp =
8789 mddev->curr_resync_completed;
8790 else
8791 mddev->recovery_cp =
8792 mddev->curr_resync;
8793 }
8794 } else
8795 mddev->recovery_cp = MaxSector;
8796 } else {
8797 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8798 mddev->curr_resync = MaxSector;
8799 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8800 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
8801 rcu_read_lock();
8802 rdev_for_each_rcu(rdev, mddev)
8803 if (rdev->raid_disk >= 0 &&
8804 mddev->delta_disks >= 0 &&
8805 !test_bit(Journal, &rdev->flags) &&
8806 !test_bit(Faulty, &rdev->flags) &&
8807 !test_bit(In_sync, &rdev->flags) &&
8808 rdev->recovery_offset < mddev->curr_resync)
8809 rdev->recovery_offset = mddev->curr_resync;
8810 rcu_read_unlock();
8811 }
8812 }
8813 }
8814 skip:
8815 /* set CHANGE_PENDING here since maybe another update is needed,
8816 * so other nodes are informed. It should be harmless for normal
8817 * raid */
8818 set_mask_bits(&mddev->sb_flags, 0,
8819 BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
8820
8821 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8822 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8823 mddev->delta_disks > 0 &&
8824 mddev->pers->finish_reshape &&
8825 mddev->pers->size &&
8826 mddev->queue) {
8827 mddev_lock_nointr(mddev);
8828 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
8829 mddev_unlock(mddev);
David Brazdil0f672f62019-12-10 10:32:29 +00008830 if (!mddev_is_clustered(mddev)) {
8831 set_capacity(mddev->gendisk, mddev->array_sectors);
8832 revalidate_disk(mddev->gendisk);
8833 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008834 }
8835
8836 spin_lock(&mddev->lock);
8837 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8838 /* We completed so min/max setting can be forgotten if used. */
8839 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8840 mddev->resync_min = 0;
8841 mddev->resync_max = MaxSector;
8842 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8843 mddev->resync_min = mddev->curr_resync_completed;
8844 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
8845 mddev->curr_resync = 0;
8846 spin_unlock(&mddev->lock);
8847
8848 wake_up(&resync_wait);
8849 md_wakeup_thread(mddev->thread);
8850 return;
8851}
8852EXPORT_SYMBOL_GPL(md_do_sync);
8853
8854static int remove_and_add_spares(struct mddev *mddev,
8855 struct md_rdev *this)
8856{
8857 struct md_rdev *rdev;
8858 int spares = 0;
8859 int removed = 0;
8860 bool remove_some = false;
8861
8862 if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
8863 /* Mustn't remove devices when resync thread is running */
8864 return 0;
8865
8866 rdev_for_each(rdev, mddev) {
8867 if ((this == NULL || rdev == this) &&
8868 rdev->raid_disk >= 0 &&
8869 !test_bit(Blocked, &rdev->flags) &&
8870 test_bit(Faulty, &rdev->flags) &&
8871 atomic_read(&rdev->nr_pending)==0) {
8872 /* Faulty non-Blocked devices with nr_pending == 0
8873 * never get nr_pending incremented,
8874 * never get Faulty cleared, and never get Blocked set.
8875 * So we can synchronize_rcu now rather than once per device
8876 */
8877 remove_some = true;
8878 set_bit(RemoveSynchronized, &rdev->flags);
8879 }
8880 }
8881
8882 if (remove_some)
8883 synchronize_rcu();
8884 rdev_for_each(rdev, mddev) {
8885 if ((this == NULL || rdev == this) &&
8886 rdev->raid_disk >= 0 &&
8887 !test_bit(Blocked, &rdev->flags) &&
8888 ((test_bit(RemoveSynchronized, &rdev->flags) ||
8889 (!test_bit(In_sync, &rdev->flags) &&
8890 !test_bit(Journal, &rdev->flags))) &&
8891 atomic_read(&rdev->nr_pending)==0)) {
8892 if (mddev->pers->hot_remove_disk(
8893 mddev, rdev) == 0) {
8894 sysfs_unlink_rdev(mddev, rdev);
8895 rdev->saved_raid_disk = rdev->raid_disk;
8896 rdev->raid_disk = -1;
8897 removed++;
8898 }
8899 }
8900 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
8901 clear_bit(RemoveSynchronized, &rdev->flags);
8902 }
8903
8904 if (removed && mddev->kobj.sd)
8905 sysfs_notify(&mddev->kobj, NULL, "degraded");
8906
8907 if (this && removed)
8908 goto no_add;
8909
8910 rdev_for_each(rdev, mddev) {
8911 if (this && this != rdev)
8912 continue;
8913 if (test_bit(Candidate, &rdev->flags))
8914 continue;
8915 if (rdev->raid_disk >= 0 &&
8916 !test_bit(In_sync, &rdev->flags) &&
8917 !test_bit(Journal, &rdev->flags) &&
8918 !test_bit(Faulty, &rdev->flags))
8919 spares++;
8920 if (rdev->raid_disk >= 0)
8921 continue;
8922 if (test_bit(Faulty, &rdev->flags))
8923 continue;
8924 if (!test_bit(Journal, &rdev->flags)) {
8925 if (mddev->ro &&
8926 ! (rdev->saved_raid_disk >= 0 &&
8927 !test_bit(Bitmap_sync, &rdev->flags)))
8928 continue;
8929
8930 rdev->recovery_offset = 0;
8931 }
8932 if (mddev->pers->
8933 hot_add_disk(mddev, rdev) == 0) {
8934 if (sysfs_link_rdev(mddev, rdev))
8935 /* failure here is OK */;
8936 if (!test_bit(Journal, &rdev->flags))
8937 spares++;
8938 md_new_event(mddev);
8939 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
8940 }
8941 }
8942no_add:
8943 if (removed)
8944 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
8945 return spares;
8946}
8947
8948static void md_start_sync(struct work_struct *ws)
8949{
8950 struct mddev *mddev = container_of(ws, struct mddev, del_work);
8951
8952 mddev->sync_thread = md_register_thread(md_do_sync,
8953 mddev,
8954 "resync");
8955 if (!mddev->sync_thread) {
8956 pr_warn("%s: could not start resync thread...\n",
8957 mdname(mddev));
8958 /* leave the spares where they are, it shouldn't hurt */
8959 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8960 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8961 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8962 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8963 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8964 wake_up(&resync_wait);
8965 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
8966 &mddev->recovery))
8967 if (mddev->sysfs_action)
8968 sysfs_notify_dirent_safe(mddev->sysfs_action);
8969 } else
8970 md_wakeup_thread(mddev->sync_thread);
8971 sysfs_notify_dirent_safe(mddev->sysfs_action);
8972 md_new_event(mddev);
8973}
8974
8975/*
8976 * This routine is regularly called by all per-raid-array threads to
8977 * deal with generic issues like resync and super-block update.
8978 * Raid personalities that don't have a thread (linear/raid0) do not
8979 * need this as they never do any recovery or update the superblock.
8980 *
8981 * It does not do any resync itself, but rather "forks" off other threads
8982 * to do that as needed.
8983 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
8984 * "->recovery" and create a thread at ->sync_thread.
8985 * When the thread finishes it sets MD_RECOVERY_DONE
8986 * and wakeups up this thread which will reap the thread and finish up.
8987 * This thread also removes any faulty devices (with nr_pending == 0).
8988 *
8989 * The overall approach is:
8990 * 1/ if the superblock needs updating, update it.
8991 * 2/ If a recovery thread is running, don't do anything else.
8992 * 3/ If recovery has finished, clean up, possibly marking spares active.
8993 * 4/ If there are any faulty devices, remove them.
8994 * 5/ If array is degraded, try to add spares devices
8995 * 6/ If array has spares or is not in-sync, start a resync thread.
8996 */
8997void md_check_recovery(struct mddev *mddev)
8998{
David Brazdil0f672f62019-12-10 10:32:29 +00008999 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
9000 /* Write superblock - thread that called mddev_suspend()
9001 * holds reconfig_mutex for us.
9002 */
9003 set_bit(MD_UPDATING_SB, &mddev->flags);
9004 smp_mb__after_atomic();
9005 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
9006 md_update_sb(mddev, 0);
9007 clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
9008 wake_up(&mddev->sb_wait);
9009 }
9010
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009011 if (mddev->suspended)
9012 return;
9013
9014 if (mddev->bitmap)
9015 md_bitmap_daemon_work(mddev);
9016
9017 if (signal_pending(current)) {
9018 if (mddev->pers->sync_request && !mddev->external) {
9019 pr_debug("md: %s in immediate safe mode\n",
9020 mdname(mddev));
9021 mddev->safemode = 2;
9022 }
9023 flush_signals(current);
9024 }
9025
9026 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
9027 return;
9028 if ( ! (
9029 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
9030 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
9031 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
9032 (mddev->external == 0 && mddev->safemode == 1) ||
9033 (mddev->safemode == 2
9034 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
9035 ))
9036 return;
9037
9038 if (mddev_trylock(mddev)) {
9039 int spares = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00009040 bool try_set_sync = mddev->safemode != 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009041
9042 if (!mddev->external && mddev->safemode == 1)
9043 mddev->safemode = 0;
9044
9045 if (mddev->ro) {
9046 struct md_rdev *rdev;
9047 if (!mddev->external && mddev->in_sync)
9048 /* 'Blocked' flag not needed as failed devices
9049 * will be recorded if array switched to read/write.
9050 * Leaving it set will prevent the device
9051 * from being removed.
9052 */
9053 rdev_for_each(rdev, mddev)
9054 clear_bit(Blocked, &rdev->flags);
9055 /* On a read-only array we can:
9056 * - remove failed devices
9057 * - add already-in_sync devices if the array itself
9058 * is in-sync.
9059 * As we only add devices that are already in-sync,
9060 * we can activate the spares immediately.
9061 */
9062 remove_and_add_spares(mddev, NULL);
9063 /* There is no thread, but we need to call
9064 * ->spare_active and clear saved_raid_disk
9065 */
9066 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
9067 md_reap_sync_thread(mddev);
9068 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9069 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9070 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
9071 goto unlock;
9072 }
9073
9074 if (mddev_is_clustered(mddev)) {
Olivier Deprez0e641232021-09-23 10:07:05 +02009075 struct md_rdev *rdev, *tmp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009076 /* kick the device if another node issued a
9077 * remove disk.
9078 */
Olivier Deprez0e641232021-09-23 10:07:05 +02009079 rdev_for_each_safe(rdev, tmp, mddev) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009080 if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
9081 rdev->raid_disk < 0)
9082 md_kick_rdev_from_array(rdev);
9083 }
9084 }
9085
David Brazdil0f672f62019-12-10 10:32:29 +00009086 if (try_set_sync && !mddev->external && !mddev->in_sync) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009087 spin_lock(&mddev->lock);
9088 set_in_sync(mddev);
9089 spin_unlock(&mddev->lock);
9090 }
9091
9092 if (mddev->sb_flags)
9093 md_update_sb(mddev, 0);
9094
9095 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
9096 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
9097 /* resync/recovery still happening */
9098 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9099 goto unlock;
9100 }
9101 if (mddev->sync_thread) {
9102 md_reap_sync_thread(mddev);
9103 goto unlock;
9104 }
9105 /* Set RUNNING before clearing NEEDED to avoid
9106 * any transients in the value of "sync_action".
9107 */
9108 mddev->curr_resync_completed = 0;
9109 spin_lock(&mddev->lock);
9110 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9111 spin_unlock(&mddev->lock);
9112 /* Clear some bits that don't mean anything, but
9113 * might be left set
9114 */
9115 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
9116 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
9117
9118 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
9119 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
9120 goto not_running;
9121 /* no recovery is running.
9122 * remove any failed drives, then
9123 * add spares if possible.
9124 * Spares are also removed and re-added, to allow
9125 * the personality to fail the re-add.
9126 */
9127
9128 if (mddev->reshape_position != MaxSector) {
9129 if (mddev->pers->check_reshape == NULL ||
9130 mddev->pers->check_reshape(mddev) != 0)
9131 /* Cannot proceed */
9132 goto not_running;
9133 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9134 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9135 } else if ((spares = remove_and_add_spares(mddev, NULL))) {
9136 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9137 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9138 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9139 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9140 } else if (mddev->recovery_cp < MaxSector) {
9141 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9142 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9143 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
9144 /* nothing to be done ... */
9145 goto not_running;
9146
9147 if (mddev->pers->sync_request) {
9148 if (spares) {
9149 /* We are adding a device or devices to an array
9150 * which has the bitmap stored on all devices.
9151 * So make sure all bitmap pages get written
9152 */
9153 md_bitmap_write_all(mddev->bitmap);
9154 }
9155 INIT_WORK(&mddev->del_work, md_start_sync);
9156 queue_work(md_misc_wq, &mddev->del_work);
9157 goto unlock;
9158 }
9159 not_running:
9160 if (!mddev->sync_thread) {
9161 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9162 wake_up(&resync_wait);
9163 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
9164 &mddev->recovery))
9165 if (mddev->sysfs_action)
9166 sysfs_notify_dirent_safe(mddev->sysfs_action);
9167 }
9168 unlock:
9169 wake_up(&mddev->sb_wait);
9170 mddev_unlock(mddev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009171 }
9172}
9173EXPORT_SYMBOL(md_check_recovery);
9174
9175void md_reap_sync_thread(struct mddev *mddev)
9176{
9177 struct md_rdev *rdev;
David Brazdil0f672f62019-12-10 10:32:29 +00009178 sector_t old_dev_sectors = mddev->dev_sectors;
9179 bool is_reshaped = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009180
9181 /* resync has finished, collect result */
9182 md_unregister_thread(&mddev->sync_thread);
9183 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
David Brazdil0f672f62019-12-10 10:32:29 +00009184 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
9185 mddev->degraded != mddev->raid_disks) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009186 /* success...*/
9187 /* activate any spares */
9188 if (mddev->pers->spare_active(mddev)) {
9189 sysfs_notify(&mddev->kobj, NULL,
9190 "degraded");
9191 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
9192 }
9193 }
9194 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
David Brazdil0f672f62019-12-10 10:32:29 +00009195 mddev->pers->finish_reshape) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009196 mddev->pers->finish_reshape(mddev);
David Brazdil0f672f62019-12-10 10:32:29 +00009197 if (mddev_is_clustered(mddev))
9198 is_reshaped = true;
9199 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009200
9201 /* If array is no-longer degraded, then any saved_raid_disk
9202 * information must be scrapped.
9203 */
9204 if (!mddev->degraded)
9205 rdev_for_each(rdev, mddev)
9206 rdev->saved_raid_disk = -1;
9207
9208 md_update_sb(mddev, 1);
9209 /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
9210 * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
9211 * clustered raid */
9212 if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
9213 md_cluster_ops->resync_finish(mddev);
9214 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9215 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
9216 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9217 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9218 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9219 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
David Brazdil0f672f62019-12-10 10:32:29 +00009220 /*
9221 * We call md_cluster_ops->update_size here because sync_size could
9222 * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
9223 * so it is time to update size across cluster.
9224 */
9225 if (mddev_is_clustered(mddev) && is_reshaped
9226 && !test_bit(MD_CLOSING, &mddev->flags))
9227 md_cluster_ops->update_size(mddev, old_dev_sectors);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009228 wake_up(&resync_wait);
9229 /* flag recovery needed just to double check */
9230 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9231 sysfs_notify_dirent_safe(mddev->sysfs_action);
9232 md_new_event(mddev);
9233 if (mddev->event_work.func)
9234 queue_work(md_misc_wq, &mddev->event_work);
9235}
9236EXPORT_SYMBOL(md_reap_sync_thread);
9237
9238void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
9239{
9240 sysfs_notify_dirent_safe(rdev->sysfs_state);
9241 wait_event_timeout(rdev->blocked_wait,
9242 !test_bit(Blocked, &rdev->flags) &&
9243 !test_bit(BlockedBadBlocks, &rdev->flags),
9244 msecs_to_jiffies(5000));
9245 rdev_dec_pending(rdev, mddev);
9246}
9247EXPORT_SYMBOL(md_wait_for_blocked_rdev);
9248
9249void md_finish_reshape(struct mddev *mddev)
9250{
9251 /* called be personality module when reshape completes. */
9252 struct md_rdev *rdev;
9253
9254 rdev_for_each(rdev, mddev) {
9255 if (rdev->data_offset > rdev->new_data_offset)
9256 rdev->sectors += rdev->data_offset - rdev->new_data_offset;
9257 else
9258 rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
9259 rdev->data_offset = rdev->new_data_offset;
9260 }
9261}
9262EXPORT_SYMBOL(md_finish_reshape);
9263
9264/* Bad block management */
9265
9266/* Returns 1 on success, 0 on failure */
9267int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
9268 int is_new)
9269{
9270 struct mddev *mddev = rdev->mddev;
9271 int rv;
9272 if (is_new)
9273 s += rdev->new_data_offset;
9274 else
9275 s += rdev->data_offset;
9276 rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
9277 if (rv == 0) {
9278 /* Make sure they get written out promptly */
9279 if (test_bit(ExternalBbl, &rdev->flags))
9280 sysfs_notify(&rdev->kobj, NULL,
9281 "unacknowledged_bad_blocks");
9282 sysfs_notify_dirent_safe(rdev->sysfs_state);
9283 set_mask_bits(&mddev->sb_flags, 0,
9284 BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
9285 md_wakeup_thread(rdev->mddev->thread);
9286 return 1;
9287 } else
9288 return 0;
9289}
9290EXPORT_SYMBOL_GPL(rdev_set_badblocks);
9291
9292int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
9293 int is_new)
9294{
9295 int rv;
9296 if (is_new)
9297 s += rdev->new_data_offset;
9298 else
9299 s += rdev->data_offset;
9300 rv = badblocks_clear(&rdev->badblocks, s, sectors);
9301 if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
9302 sysfs_notify(&rdev->kobj, NULL, "bad_blocks");
9303 return rv;
9304}
9305EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
9306
9307static int md_notify_reboot(struct notifier_block *this,
9308 unsigned long code, void *x)
9309{
9310 struct list_head *tmp;
9311 struct mddev *mddev;
9312 int need_delay = 0;
9313
9314 for_each_mddev(mddev, tmp) {
9315 if (mddev_trylock(mddev)) {
9316 if (mddev->pers)
9317 __md_stop_writes(mddev);
9318 if (mddev->persistent)
9319 mddev->safemode = 2;
9320 mddev_unlock(mddev);
9321 }
9322 need_delay = 1;
9323 }
9324 /*
9325 * certain more exotic SCSI devices are known to be
9326 * volatile wrt too early system reboots. While the
9327 * right place to handle this issue is the given
9328 * driver, we do want to have a safe RAID driver ...
9329 */
9330 if (need_delay)
9331 mdelay(1000*1);
9332
9333 return NOTIFY_DONE;
9334}
9335
9336static struct notifier_block md_notifier = {
9337 .notifier_call = md_notify_reboot,
9338 .next = NULL,
9339 .priority = INT_MAX, /* before any real devices */
9340};
9341
9342static void md_geninit(void)
9343{
9344 pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
9345
9346 proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
9347}
9348
9349static int __init md_init(void)
9350{
9351 int ret = -ENOMEM;
9352
9353 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
9354 if (!md_wq)
9355 goto err_wq;
9356
9357 md_misc_wq = alloc_workqueue("md_misc", 0, 0);
9358 if (!md_misc_wq)
9359 goto err_misc_wq;
9360
9361 if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
9362 goto err_md;
9363
9364 if ((ret = register_blkdev(0, "mdp")) < 0)
9365 goto err_mdp;
9366 mdp_major = ret;
9367
9368 blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE,
9369 md_probe, NULL, NULL);
9370 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
9371 md_probe, NULL, NULL);
9372
9373 register_reboot_notifier(&md_notifier);
9374 raid_table_header = register_sysctl_table(raid_root_table);
9375
9376 md_geninit();
9377 return 0;
9378
9379err_mdp:
9380 unregister_blkdev(MD_MAJOR, "md");
9381err_md:
9382 destroy_workqueue(md_misc_wq);
9383err_misc_wq:
9384 destroy_workqueue(md_wq);
9385err_wq:
9386 return ret;
9387}
9388
9389static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
9390{
9391 struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
Olivier Deprez0e641232021-09-23 10:07:05 +02009392 struct md_rdev *rdev2, *tmp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009393 int role, ret;
9394 char b[BDEVNAME_SIZE];
9395
9396 /*
9397 * If size is changed in another node then we need to
9398 * do resize as well.
9399 */
9400 if (mddev->dev_sectors != le64_to_cpu(sb->size)) {
9401 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size));
9402 if (ret)
9403 pr_info("md-cluster: resize failed\n");
9404 else
9405 md_bitmap_update_sb(mddev->bitmap);
9406 }
9407
9408 /* Check for change of roles in the active devices */
Olivier Deprez0e641232021-09-23 10:07:05 +02009409 rdev_for_each_safe(rdev2, tmp, mddev) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009410 if (test_bit(Faulty, &rdev2->flags))
9411 continue;
9412
9413 /* Check if the roles changed */
9414 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
9415
9416 if (test_bit(Candidate, &rdev2->flags)) {
9417 if (role == 0xfffe) {
9418 pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b));
9419 md_kick_rdev_from_array(rdev2);
9420 continue;
9421 }
9422 else
9423 clear_bit(Candidate, &rdev2->flags);
9424 }
9425
9426 if (role != rdev2->raid_disk) {
David Brazdil0f672f62019-12-10 10:32:29 +00009427 /*
9428 * got activated except reshape is happening.
9429 */
9430 if (rdev2->raid_disk == -1 && role != 0xffff &&
9431 !(le32_to_cpu(sb->feature_map) &
9432 MD_FEATURE_RESHAPE_ACTIVE)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009433 rdev2->saved_raid_disk = role;
9434 ret = remove_and_add_spares(mddev, rdev2);
9435 pr_info("Activated spare: %s\n",
9436 bdevname(rdev2->bdev,b));
9437 /* wakeup mddev->thread here, so array could
9438 * perform resync with the new activated disk */
9439 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9440 md_wakeup_thread(mddev->thread);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009441 }
9442 /* device faulty
9443 * We just want to do the minimum to mark the disk
9444 * as faulty. The recovery is performed by the
9445 * one who initiated the error.
9446 */
9447 if ((role == 0xfffe) || (role == 0xfffd)) {
9448 md_error(mddev, rdev2);
9449 clear_bit(Blocked, &rdev2->flags);
9450 }
9451 }
9452 }
9453
Olivier Deprez0e641232021-09-23 10:07:05 +02009454 if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) {
9455 ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
9456 if (ret)
9457 pr_warn("md: updating array disks failed. %d\n", ret);
9458 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009459
David Brazdil0f672f62019-12-10 10:32:29 +00009460 /*
9461 * Since mddev->delta_disks has already updated in update_raid_disks,
9462 * so it is time to check reshape.
9463 */
9464 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9465 (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9466 /*
9467 * reshape is happening in the remote node, we need to
9468 * update reshape_position and call start_reshape.
9469 */
9470 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
9471 if (mddev->pers->update_reshape_pos)
9472 mddev->pers->update_reshape_pos(mddev);
9473 if (mddev->pers->start_reshape)
9474 mddev->pers->start_reshape(mddev);
9475 } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9476 mddev->reshape_position != MaxSector &&
9477 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9478 /* reshape is just done in another node. */
9479 mddev->reshape_position = MaxSector;
9480 if (mddev->pers->update_reshape_pos)
9481 mddev->pers->update_reshape_pos(mddev);
9482 }
9483
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009484 /* Finally set the event to be up to date */
9485 mddev->events = le64_to_cpu(sb->events);
9486}
9487
9488static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
9489{
9490 int err;
9491 struct page *swapout = rdev->sb_page;
9492 struct mdp_superblock_1 *sb;
9493
9494 /* Store the sb page of the rdev in the swapout temporary
9495 * variable in case we err in the future
9496 */
9497 rdev->sb_page = NULL;
9498 err = alloc_disk_sb(rdev);
9499 if (err == 0) {
9500 ClearPageUptodate(rdev->sb_page);
9501 rdev->sb_loaded = 0;
9502 err = super_types[mddev->major_version].
9503 load_super(rdev, NULL, mddev->minor_version);
9504 }
9505 if (err < 0) {
9506 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
9507 __func__, __LINE__, rdev->desc_nr, err);
9508 if (rdev->sb_page)
9509 put_page(rdev->sb_page);
9510 rdev->sb_page = swapout;
9511 rdev->sb_loaded = 1;
9512 return err;
9513 }
9514
9515 sb = page_address(rdev->sb_page);
9516 /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
9517 * is not set
9518 */
9519
9520 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
9521 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
9522
9523 /* The other node finished recovery, call spare_active to set
9524 * device In_sync and mddev->degraded
9525 */
9526 if (rdev->recovery_offset == MaxSector &&
9527 !test_bit(In_sync, &rdev->flags) &&
9528 mddev->pers->spare_active(mddev))
9529 sysfs_notify(&mddev->kobj, NULL, "degraded");
9530
9531 put_page(swapout);
9532 return 0;
9533}
9534
9535void md_reload_sb(struct mddev *mddev, int nr)
9536{
9537 struct md_rdev *rdev;
9538 int err;
9539
9540 /* Find the rdev */
9541 rdev_for_each_rcu(rdev, mddev) {
9542 if (rdev->desc_nr == nr)
9543 break;
9544 }
9545
9546 if (!rdev || rdev->desc_nr != nr) {
9547 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
9548 return;
9549 }
9550
9551 err = read_rdev(mddev, rdev);
9552 if (err < 0)
9553 return;
9554
9555 check_sb_changes(mddev, rdev);
9556
9557 /* Read all rdev's to update recovery_offset */
9558 rdev_for_each_rcu(rdev, mddev) {
9559 if (!test_bit(Faulty, &rdev->flags))
9560 read_rdev(mddev, rdev);
9561 }
9562}
9563EXPORT_SYMBOL(md_reload_sb);
9564
9565#ifndef MODULE
9566
9567/*
9568 * Searches all registered partitions for autorun RAID arrays
9569 * at boot time.
9570 */
9571
9572static DEFINE_MUTEX(detected_devices_mutex);
9573static LIST_HEAD(all_detected_devices);
9574struct detected_devices_node {
9575 struct list_head list;
9576 dev_t dev;
9577};
9578
9579void md_autodetect_dev(dev_t dev)
9580{
9581 struct detected_devices_node *node_detected_dev;
9582
9583 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
9584 if (node_detected_dev) {
9585 node_detected_dev->dev = dev;
9586 mutex_lock(&detected_devices_mutex);
9587 list_add_tail(&node_detected_dev->list, &all_detected_devices);
9588 mutex_unlock(&detected_devices_mutex);
9589 }
9590}
9591
9592static void autostart_arrays(int part)
9593{
9594 struct md_rdev *rdev;
9595 struct detected_devices_node *node_detected_dev;
9596 dev_t dev;
9597 int i_scanned, i_passed;
9598
9599 i_scanned = 0;
9600 i_passed = 0;
9601
9602 pr_info("md: Autodetecting RAID arrays.\n");
9603
9604 mutex_lock(&detected_devices_mutex);
9605 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
9606 i_scanned++;
9607 node_detected_dev = list_entry(all_detected_devices.next,
9608 struct detected_devices_node, list);
9609 list_del(&node_detected_dev->list);
9610 dev = node_detected_dev->dev;
9611 kfree(node_detected_dev);
9612 mutex_unlock(&detected_devices_mutex);
9613 rdev = md_import_device(dev,0, 90);
9614 mutex_lock(&detected_devices_mutex);
9615 if (IS_ERR(rdev))
9616 continue;
9617
9618 if (test_bit(Faulty, &rdev->flags))
9619 continue;
9620
9621 set_bit(AutoDetected, &rdev->flags);
9622 list_add(&rdev->same_set, &pending_raid_disks);
9623 i_passed++;
9624 }
9625 mutex_unlock(&detected_devices_mutex);
9626
9627 pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
9628
9629 autorun_devices(part);
9630}
9631
9632#endif /* !MODULE */
9633
9634static __exit void md_exit(void)
9635{
9636 struct mddev *mddev;
9637 struct list_head *tmp;
9638 int delay = 1;
9639
9640 blk_unregister_region(MKDEV(MD_MAJOR,0), 512);
9641 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
9642
9643 unregister_blkdev(MD_MAJOR,"md");
9644 unregister_blkdev(mdp_major, "mdp");
9645 unregister_reboot_notifier(&md_notifier);
9646 unregister_sysctl_table(raid_table_header);
9647
9648 /* We cannot unload the modules while some process is
9649 * waiting for us in select() or poll() - wake them up
9650 */
9651 md_unloading = 1;
9652 while (waitqueue_active(&md_event_waiters)) {
9653 /* not safe to leave yet */
9654 wake_up(&md_event_waiters);
9655 msleep(delay);
9656 delay += delay;
9657 }
9658 remove_proc_entry("mdstat", NULL);
9659
9660 for_each_mddev(mddev, tmp) {
9661 export_array(mddev);
9662 mddev->ctime = 0;
9663 mddev->hold_active = 0;
9664 /*
9665 * for_each_mddev() will call mddev_put() at the end of each
9666 * iteration. As the mddev is now fully clear, this will
9667 * schedule the mddev for destruction by a workqueue, and the
9668 * destroy_workqueue() below will wait for that to complete.
9669 */
9670 }
9671 destroy_workqueue(md_misc_wq);
9672 destroy_workqueue(md_wq);
9673}
9674
9675subsys_initcall(md_init);
9676module_exit(md_exit)
9677
9678static int get_ro(char *buffer, const struct kernel_param *kp)
9679{
9680 return sprintf(buffer, "%d", start_readonly);
9681}
9682static int set_ro(const char *val, const struct kernel_param *kp)
9683{
9684 return kstrtouint(val, 10, (unsigned int *)&start_readonly);
9685}
9686
9687module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
9688module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
9689module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
9690module_param(create_on_open, bool, S_IRUSR|S_IWUSR);
9691
9692MODULE_LICENSE("GPL");
9693MODULE_DESCRIPTION("MD RAID framework");
9694MODULE_ALIAS("md");
9695MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);