blob: 590b040e90a3466db51c8e3361ca203c4ae8f7b2 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Copyright (c) 2017-2018 Christoph Hellwig.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004 */
5
Olivier Deprez0e641232021-09-23 10:07:05 +02006#include <linux/backing-dev.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007#include <linux/moduleparam.h>
8#include <trace/events/block.h>
9#include "nvme.h"
10
11static bool multipath = true;
12module_param(multipath, bool, 0444);
13MODULE_PARM_DESC(multipath,
14 "turn on native support for multiple controllers per subsystem");
15
David Brazdil0f672f62019-12-10 10:32:29 +000016void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000017{
David Brazdil0f672f62019-12-10 10:32:29 +000018 struct nvme_ns_head *h;
19
20 lockdep_assert_held(&subsys->lock);
21 list_for_each_entry(h, &subsys->nsheads, entry)
22 if (h->disk)
23 blk_mq_unfreeze_queue(h->disk->queue);
24}
25
26void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
27{
28 struct nvme_ns_head *h;
29
30 lockdep_assert_held(&subsys->lock);
31 list_for_each_entry(h, &subsys->nsheads, entry)
32 if (h->disk)
33 blk_mq_freeze_queue_wait(h->disk->queue);
34}
35
36void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
37{
38 struct nvme_ns_head *h;
39
40 lockdep_assert_held(&subsys->lock);
41 list_for_each_entry(h, &subsys->nsheads, entry)
42 if (h->disk)
43 blk_freeze_queue_start(h->disk->queue);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000044}
45
46/*
47 * If multipathing is enabled we need to always use the subsystem instance
48 * number for numbering our devices to avoid conflicts between subsystems that
49 * have multiple controllers and thus use the multipath-aware subsystem node
50 * and those that have a single controller and use the controller node
51 * directly.
52 */
53void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
54 struct nvme_ctrl *ctrl, int *flags)
55{
56 if (!multipath) {
57 sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
58 } else if (ns->head->disk) {
59 sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
David Brazdil0f672f62019-12-10 10:32:29 +000060 ctrl->instance, ns->head->instance);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000061 *flags = GENHD_FL_HIDDEN;
62 } else {
63 sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
64 ns->head->instance);
65 }
66}
67
Olivier Deprez0e641232021-09-23 10:07:05 +020068bool nvme_failover_req(struct request *req)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000069{
70 struct nvme_ns *ns = req->q->queuedata;
71 u16 status = nvme_req(req)->status;
72 unsigned long flags;
73
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000074 switch (status & 0x7ff) {
75 case NVME_SC_ANA_TRANSITION:
76 case NVME_SC_ANA_INACCESSIBLE:
77 case NVME_SC_ANA_PERSISTENT_LOSS:
78 /*
79 * If we got back an ANA error we know the controller is alive,
80 * but not ready to serve this namespaces. The spec suggests
81 * we should update our general state here, but due to the fact
82 * that the admin and I/O queues are not serialized that is
83 * fundamentally racy. So instead just clear the current path,
84 * mark the the path as pending and kick of a re-read of the ANA
85 * log page ASAP.
86 */
87 nvme_mpath_clear_current_path(ns);
88 if (ns->ctrl->ana_log_buf) {
89 set_bit(NVME_NS_ANA_PENDING, &ns->flags);
90 queue_work(nvme_wq, &ns->ctrl->ana_work);
91 }
92 break;
93 case NVME_SC_HOST_PATH_ERROR:
Olivier Deprez0e641232021-09-23 10:07:05 +020094 case NVME_SC_HOST_ABORTED_CMD:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000095 /*
96 * Temporary transport disruption in talking to the controller.
97 * Try to send on a new path.
98 */
99 nvme_mpath_clear_current_path(ns);
100 break;
101 default:
Olivier Deprez0e641232021-09-23 10:07:05 +0200102 /* This was a non-ANA error so follow the normal error path. */
103 return false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000104 }
105
Olivier Deprez0e641232021-09-23 10:07:05 +0200106 spin_lock_irqsave(&ns->head->requeue_lock, flags);
107 blk_steal_bios(&ns->head->requeue_list, req);
108 spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
109 blk_mq_end_request(req, 0);
110
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000111 kblockd_schedule_work(&ns->head->requeue_work);
Olivier Deprez0e641232021-09-23 10:07:05 +0200112 return true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000113}
114
115void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
116{
117 struct nvme_ns *ns;
118
119 down_read(&ctrl->namespaces_rwsem);
120 list_for_each_entry(ns, &ctrl->namespaces, list) {
121 if (ns->head->disk)
122 kblockd_schedule_work(&ns->head->requeue_work);
123 }
124 up_read(&ctrl->namespaces_rwsem);
125}
126
127static const char *nvme_ana_state_names[] = {
128 [0] = "invalid state",
129 [NVME_ANA_OPTIMIZED] = "optimized",
130 [NVME_ANA_NONOPTIMIZED] = "non-optimized",
131 [NVME_ANA_INACCESSIBLE] = "inaccessible",
132 [NVME_ANA_PERSISTENT_LOSS] = "persistent-loss",
133 [NVME_ANA_CHANGE] = "change",
134};
135
David Brazdil0f672f62019-12-10 10:32:29 +0000136bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000137{
David Brazdil0f672f62019-12-10 10:32:29 +0000138 struct nvme_ns_head *head = ns->head;
139 bool changed = false;
140 int node;
141
142 if (!head)
143 goto out;
144
145 for_each_node(node) {
146 if (ns == rcu_access_pointer(head->current_path[node])) {
147 rcu_assign_pointer(head->current_path[node], NULL);
148 changed = true;
149 }
150 }
151out:
152 return changed;
153}
154
155void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
156{
157 struct nvme_ns *ns;
158
159 mutex_lock(&ctrl->scan_lock);
160 down_read(&ctrl->namespaces_rwsem);
161 list_for_each_entry(ns, &ctrl->namespaces, list)
162 if (nvme_mpath_clear_current_path(ns))
163 kblockd_schedule_work(&ns->head->requeue_work);
164 up_read(&ctrl->namespaces_rwsem);
165 mutex_unlock(&ctrl->scan_lock);
166}
167
168static bool nvme_path_is_disabled(struct nvme_ns *ns)
169{
170 return ns->ctrl->state != NVME_CTRL_LIVE ||
171 test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
172 test_bit(NVME_NS_REMOVING, &ns->flags);
173}
174
175static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
176{
177 int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
178 struct nvme_ns *found = NULL, *fallback = NULL, *ns;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000179
180 list_for_each_entry_rcu(ns, &head->list, siblings) {
David Brazdil0f672f62019-12-10 10:32:29 +0000181 if (nvme_path_is_disabled(ns))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000182 continue;
David Brazdil0f672f62019-12-10 10:32:29 +0000183
184 if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
185 distance = node_distance(node, ns->ctrl->numa_node);
186 else
187 distance = LOCAL_DISTANCE;
188
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000189 switch (ns->ana_state) {
190 case NVME_ANA_OPTIMIZED:
David Brazdil0f672f62019-12-10 10:32:29 +0000191 if (distance < found_distance) {
192 found_distance = distance;
193 found = ns;
194 }
195 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000196 case NVME_ANA_NONOPTIMIZED:
David Brazdil0f672f62019-12-10 10:32:29 +0000197 if (distance < fallback_distance) {
198 fallback_distance = distance;
199 fallback = ns;
200 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000201 break;
202 default:
203 break;
204 }
205 }
206
David Brazdil0f672f62019-12-10 10:32:29 +0000207 if (!found)
208 found = fallback;
209 if (found)
210 rcu_assign_pointer(head->current_path[node], found);
211 return found;
212}
213
214static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head,
215 struct nvme_ns *ns)
216{
217 ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns,
218 siblings);
219 if (ns)
220 return ns;
221 return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings);
222}
223
224static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
225 int node, struct nvme_ns *old)
226{
227 struct nvme_ns *ns, *found, *fallback = NULL;
228
229 if (list_is_singular(&head->list)) {
230 if (nvme_path_is_disabled(old))
231 return NULL;
232 return old;
233 }
234
235 for (ns = nvme_next_ns(head, old);
Olivier Deprez0e641232021-09-23 10:07:05 +0200236 ns && ns != old;
David Brazdil0f672f62019-12-10 10:32:29 +0000237 ns = nvme_next_ns(head, ns)) {
238 if (nvme_path_is_disabled(ns))
239 continue;
240
241 if (ns->ana_state == NVME_ANA_OPTIMIZED) {
242 found = ns;
243 goto out;
244 }
245 if (ns->ana_state == NVME_ANA_NONOPTIMIZED)
246 fallback = ns;
247 }
248
Olivier Deprez0e641232021-09-23 10:07:05 +0200249 /*
250 * The loop above skips the current path for round-robin semantics.
251 * Fall back to the current path if either:
252 * - no other optimized path found and current is optimized,
253 * - no other usable path found and current is usable.
254 */
255 if (!nvme_path_is_disabled(old) &&
256 (old->ana_state == NVME_ANA_OPTIMIZED ||
257 (!fallback && old->ana_state == NVME_ANA_NONOPTIMIZED)))
258 return old;
259
David Brazdil0f672f62019-12-10 10:32:29 +0000260 if (!fallback)
261 return NULL;
262 found = fallback;
263out:
264 rcu_assign_pointer(head->current_path[node], found);
265 return found;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000266}
267
268static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
269{
270 return ns->ctrl->state == NVME_CTRL_LIVE &&
271 ns->ana_state == NVME_ANA_OPTIMIZED;
272}
273
274inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
275{
David Brazdil0f672f62019-12-10 10:32:29 +0000276 int node = numa_node_id();
277 struct nvme_ns *ns;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000278
David Brazdil0f672f62019-12-10 10:32:29 +0000279 ns = srcu_dereference(head->current_path[node], &head->srcu);
Olivier Deprez0e641232021-09-23 10:07:05 +0200280 if (unlikely(!ns))
281 return __nvme_find_path(head, node);
282
283 if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR)
284 return nvme_round_robin_path(head, node, ns);
285 if (unlikely(!nvme_path_is_optimized(ns)))
286 return __nvme_find_path(head, node);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000287 return ns;
288}
289
David Brazdil0f672f62019-12-10 10:32:29 +0000290static bool nvme_available_path(struct nvme_ns_head *head)
291{
292 struct nvme_ns *ns;
293
294 list_for_each_entry_rcu(ns, &head->list, siblings) {
295 switch (ns->ctrl->state) {
296 case NVME_CTRL_LIVE:
297 case NVME_CTRL_RESETTING:
298 case NVME_CTRL_CONNECTING:
299 /* fallthru */
300 return true;
301 default:
302 break;
303 }
304 }
305 return false;
306}
307
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000308static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
309 struct bio *bio)
310{
311 struct nvme_ns_head *head = q->queuedata;
312 struct device *dev = disk_to_dev(head->disk);
313 struct nvme_ns *ns;
314 blk_qc_t ret = BLK_QC_T_NONE;
315 int srcu_idx;
316
David Brazdil0f672f62019-12-10 10:32:29 +0000317 /*
318 * The namespace might be going away and the bio might
319 * be moved to a different queue via blk_steal_bios(),
320 * so we need to use the bio_split pool from the original
321 * queue to allocate the bvecs from.
322 */
323 blk_queue_split(q, &bio);
324
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000325 srcu_idx = srcu_read_lock(&head->srcu);
326 ns = nvme_find_path(head);
327 if (likely(ns)) {
328 bio->bi_disk = ns->disk;
329 bio->bi_opf |= REQ_NVME_MPATH;
330 trace_block_bio_remap(bio->bi_disk->queue, bio,
331 disk_devt(ns->head->disk),
332 bio->bi_iter.bi_sector);
Olivier Deprez0e641232021-09-23 10:07:05 +0200333 ret = generic_make_request(bio);
David Brazdil0f672f62019-12-10 10:32:29 +0000334 } else if (nvme_available_path(head)) {
335 dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000336
337 spin_lock_irq(&head->requeue_lock);
338 bio_list_add(&head->requeue_list, bio);
339 spin_unlock_irq(&head->requeue_lock);
340 } else {
David Brazdil0f672f62019-12-10 10:32:29 +0000341 dev_warn_ratelimited(dev, "no available path - failing I/O\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000342
343 bio->bi_status = BLK_STS_IOERR;
344 bio_endio(bio);
345 }
346
347 srcu_read_unlock(&head->srcu, srcu_idx);
348 return ret;
349}
350
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000351static void nvme_requeue_work(struct work_struct *work)
352{
353 struct nvme_ns_head *head =
354 container_of(work, struct nvme_ns_head, requeue_work);
355 struct bio *bio, *next;
356
357 spin_lock_irq(&head->requeue_lock);
358 next = bio_list_get(&head->requeue_list);
359 spin_unlock_irq(&head->requeue_lock);
360
361 while ((bio = next) != NULL) {
362 next = bio->bi_next;
363 bio->bi_next = NULL;
364
365 /*
366 * Reset disk to the mpath node and resubmit to select a new
367 * path.
368 */
369 bio->bi_disk = head->disk;
370 generic_make_request(bio);
371 }
372}
373
374int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
375{
376 struct request_queue *q;
377 bool vwc = false;
378
379 mutex_init(&head->lock);
380 bio_list_init(&head->requeue_list);
381 spin_lock_init(&head->requeue_lock);
382 INIT_WORK(&head->requeue_work, nvme_requeue_work);
383
384 /*
385 * Add a multipath node if the subsystems supports multiple controllers.
386 * We also do this for private namespaces as the namespace sharing data could
387 * change after a rescan.
388 */
389 if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath)
390 return 0;
391
David Brazdil0f672f62019-12-10 10:32:29 +0000392 q = blk_alloc_queue_node(GFP_KERNEL, ctrl->numa_node);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000393 if (!q)
394 goto out;
395 q->queuedata = head;
396 blk_queue_make_request(q, nvme_ns_head_make_request);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000397 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
398 /* set to a default value for 512 until disk is validated */
399 blk_queue_logical_block_size(q, 512);
400 blk_set_stacking_limits(&q->limits);
401
402 /* we need to propagate up the VMC settings */
403 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
404 vwc = true;
405 blk_queue_write_cache(q, vwc, vwc);
406
407 head->disk = alloc_disk(0);
408 if (!head->disk)
409 goto out_cleanup_queue;
410 head->disk->fops = &nvme_ns_head_ops;
411 head->disk->private_data = head;
412 head->disk->queue = q;
413 head->disk->flags = GENHD_FL_EXT_DEVT;
414 sprintf(head->disk->disk_name, "nvme%dn%d",
415 ctrl->subsys->instance, head->instance);
416 return 0;
417
418out_cleanup_queue:
419 blk_cleanup_queue(q);
420out:
421 return -ENOMEM;
422}
423
424static void nvme_mpath_set_live(struct nvme_ns *ns)
425{
426 struct nvme_ns_head *head = ns->head;
427
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000428 if (!head->disk)
429 return;
430
Olivier Deprez0e641232021-09-23 10:07:05 +0200431 if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags))
David Brazdil0f672f62019-12-10 10:32:29 +0000432 device_add_disk(&head->subsys->dev, head->disk,
433 nvme_ns_id_attr_groups);
434
Olivier Deprez0e641232021-09-23 10:07:05 +0200435 mutex_lock(&head->lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000436 if (nvme_path_is_optimized(ns)) {
437 int node, srcu_idx;
438
439 srcu_idx = srcu_read_lock(&head->srcu);
440 for_each_node(node)
441 __nvme_find_path(head, node);
442 srcu_read_unlock(&head->srcu, srcu_idx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000443 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200444 mutex_unlock(&head->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000445
Olivier Deprez0e641232021-09-23 10:07:05 +0200446 synchronize_srcu(&head->srcu);
447 kblockd_schedule_work(&head->requeue_work);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000448}
449
450static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
451 int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *,
452 void *))
453{
454 void *base = ctrl->ana_log_buf;
455 size_t offset = sizeof(struct nvme_ana_rsp_hdr);
456 int error, i;
457
458 lockdep_assert_held(&ctrl->ana_lock);
459
460 for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) {
461 struct nvme_ana_group_desc *desc = base + offset;
462 u32 nr_nsids = le32_to_cpu(desc->nnsids);
463 size_t nsid_buf_size = nr_nsids * sizeof(__le32);
464
465 if (WARN_ON_ONCE(desc->grpid == 0))
466 return -EINVAL;
467 if (WARN_ON_ONCE(le32_to_cpu(desc->grpid) > ctrl->anagrpmax))
468 return -EINVAL;
469 if (WARN_ON_ONCE(desc->state == 0))
470 return -EINVAL;
471 if (WARN_ON_ONCE(desc->state > NVME_ANA_CHANGE))
472 return -EINVAL;
473
474 offset += sizeof(*desc);
475 if (WARN_ON_ONCE(offset > ctrl->ana_log_size - nsid_buf_size))
476 return -EINVAL;
477
478 error = cb(ctrl, desc, data);
479 if (error)
480 return error;
481
482 offset += nsid_buf_size;
483 if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
484 return -EINVAL;
485 }
486
487 return 0;
488}
489
490static inline bool nvme_state_is_live(enum nvme_ana_state state)
491{
492 return state == NVME_ANA_OPTIMIZED || state == NVME_ANA_NONOPTIMIZED;
493}
494
495static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
496 struct nvme_ns *ns)
497{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000498 ns->ana_grpid = le32_to_cpu(desc->grpid);
499 ns->ana_state = desc->state;
500 clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
501
David Brazdil0f672f62019-12-10 10:32:29 +0000502 if (nvme_state_is_live(ns->ana_state))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000503 nvme_mpath_set_live(ns);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000504}
505
506static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
507 struct nvme_ana_group_desc *desc, void *data)
508{
509 u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0;
510 unsigned *nr_change_groups = data;
511 struct nvme_ns *ns;
512
David Brazdil0f672f62019-12-10 10:32:29 +0000513 dev_dbg(ctrl->device, "ANA group %d: %s.\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000514 le32_to_cpu(desc->grpid),
515 nvme_ana_state_names[desc->state]);
516
517 if (desc->state == NVME_ANA_CHANGE)
518 (*nr_change_groups)++;
519
520 if (!nr_nsids)
521 return 0;
522
Olivier Deprez0e641232021-09-23 10:07:05 +0200523 down_read(&ctrl->namespaces_rwsem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000524 list_for_each_entry(ns, &ctrl->namespaces, list) {
David Brazdil0f672f62019-12-10 10:32:29 +0000525 unsigned nsid = le32_to_cpu(desc->nsids[n]);
526
527 if (ns->head->ns_id < nsid)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000528 continue;
David Brazdil0f672f62019-12-10 10:32:29 +0000529 if (ns->head->ns_id == nsid)
530 nvme_update_ns_ana_state(desc, ns);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000531 if (++n == nr_nsids)
532 break;
533 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200534 up_read(&ctrl->namespaces_rwsem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000535 return 0;
536}
537
David Brazdil0f672f62019-12-10 10:32:29 +0000538static int nvme_read_ana_log(struct nvme_ctrl *ctrl)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000539{
540 u32 nr_change_groups = 0;
541 int error;
542
543 mutex_lock(&ctrl->ana_lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000544 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000545 ctrl->ana_log_buf, ctrl->ana_log_size, 0);
546 if (error) {
547 dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
548 goto out_unlock;
549 }
550
551 error = nvme_parse_ana_log(ctrl, &nr_change_groups,
552 nvme_update_ana_state);
553 if (error)
554 goto out_unlock;
555
556 /*
557 * In theory we should have an ANATT timer per group as they might enter
558 * the change state at different times. But that is a lot of overhead
559 * just to protect against a target that keeps entering new changes
560 * states while never finishing previous ones. But we'll still
561 * eventually time out once all groups are in change state, so this
562 * isn't a big deal.
563 *
564 * We also double the ANATT value to provide some slack for transports
565 * or AEN processing overhead.
566 */
567 if (nr_change_groups)
568 mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies);
569 else
570 del_timer_sync(&ctrl->anatt_timer);
571out_unlock:
572 mutex_unlock(&ctrl->ana_lock);
573 return error;
574}
575
576static void nvme_ana_work(struct work_struct *work)
577{
578 struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
579
David Brazdil0f672f62019-12-10 10:32:29 +0000580 nvme_read_ana_log(ctrl);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000581}
582
583static void nvme_anatt_timeout(struct timer_list *t)
584{
585 struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
586
587 dev_info(ctrl->device, "ANATT timeout, resetting controller.\n");
588 nvme_reset_ctrl(ctrl);
589}
590
591void nvme_mpath_stop(struct nvme_ctrl *ctrl)
592{
593 if (!nvme_ctrl_use_ana(ctrl))
594 return;
595 del_timer_sync(&ctrl->anatt_timer);
596 cancel_work_sync(&ctrl->ana_work);
597}
598
David Brazdil0f672f62019-12-10 10:32:29 +0000599#define SUBSYS_ATTR_RW(_name, _mode, _show, _store) \
600 struct device_attribute subsys_attr_##_name = \
601 __ATTR(_name, _mode, _show, _store)
602
603static const char *nvme_iopolicy_names[] = {
604 [NVME_IOPOLICY_NUMA] = "numa",
605 [NVME_IOPOLICY_RR] = "round-robin",
606};
607
608static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
609 struct device_attribute *attr, char *buf)
610{
611 struct nvme_subsystem *subsys =
612 container_of(dev, struct nvme_subsystem, dev);
613
614 return sprintf(buf, "%s\n",
615 nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
616}
617
618static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
619 struct device_attribute *attr, const char *buf, size_t count)
620{
621 struct nvme_subsystem *subsys =
622 container_of(dev, struct nvme_subsystem, dev);
623 int i;
624
625 for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) {
626 if (sysfs_streq(buf, nvme_iopolicy_names[i])) {
627 WRITE_ONCE(subsys->iopolicy, i);
628 return count;
629 }
630 }
631
632 return -EINVAL;
633}
634SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
635 nvme_subsys_iopolicy_show, nvme_subsys_iopolicy_store);
636
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000637static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
638 char *buf)
639{
640 return sprintf(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
641}
642DEVICE_ATTR_RO(ana_grpid);
643
644static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
645 char *buf)
646{
647 struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
648
649 return sprintf(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
650}
651DEVICE_ATTR_RO(ana_state);
652
Olivier Deprez0e641232021-09-23 10:07:05 +0200653static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000654 struct nvme_ana_group_desc *desc, void *data)
655{
Olivier Deprez0e641232021-09-23 10:07:05 +0200656 struct nvme_ana_group_desc *dst = data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000657
Olivier Deprez0e641232021-09-23 10:07:05 +0200658 if (desc->grpid != dst->grpid)
659 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000660
Olivier Deprez0e641232021-09-23 10:07:05 +0200661 *dst = *desc;
662 return -ENXIO; /* just break out of the loop */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000663}
664
665void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
666{
667 if (nvme_ctrl_use_ana(ns->ctrl)) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200668 struct nvme_ana_group_desc desc = {
669 .grpid = id->anagrpid,
670 .state = 0,
671 };
672
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000673 mutex_lock(&ns->ctrl->ana_lock);
674 ns->ana_grpid = le32_to_cpu(id->anagrpid);
Olivier Deprez0e641232021-09-23 10:07:05 +0200675 nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000676 mutex_unlock(&ns->ctrl->ana_lock);
Olivier Deprez0e641232021-09-23 10:07:05 +0200677 if (desc.state) {
678 /* found the group desc: update */
679 nvme_update_ns_ana_state(&desc, ns);
680 } else {
681 /* group desc not found: trigger a re-read */
682 set_bit(NVME_NS_ANA_PENDING, &ns->flags);
683 queue_work(nvme_wq, &ns->ctrl->ana_work);
684 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000685 } else {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000686 ns->ana_state = NVME_ANA_OPTIMIZED;
687 nvme_mpath_set_live(ns);
Olivier Deprez0e641232021-09-23 10:07:05 +0200688 }
689
690 if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) {
691 struct gendisk *disk = ns->head->disk;
692
693 if (disk)
694 disk->queue->backing_dev_info->capabilities |=
695 BDI_CAP_STABLE_WRITES;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000696 }
697}
698
699void nvme_mpath_remove_disk(struct nvme_ns_head *head)
700{
701 if (!head->disk)
702 return;
David Brazdil0f672f62019-12-10 10:32:29 +0000703 if (head->disk->flags & GENHD_FL_UP)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000704 del_gendisk(head->disk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000705 blk_set_queue_dying(head->disk->queue);
706 /* make sure all pending bios are cleaned up */
707 kblockd_schedule_work(&head->requeue_work);
708 flush_work(&head->requeue_work);
709 blk_cleanup_queue(head->disk->queue);
Olivier Deprez0e641232021-09-23 10:07:05 +0200710 if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
711 /*
712 * if device_add_disk wasn't called, prevent
713 * disk release to put a bogus reference on the
714 * request queue
715 */
716 head->disk->queue = NULL;
717 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000718 put_disk(head->disk);
719}
720
Olivier Deprez0e641232021-09-23 10:07:05 +0200721void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000722{
Olivier Deprez0e641232021-09-23 10:07:05 +0200723 mutex_init(&ctrl->ana_lock);
724 timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
725 INIT_WORK(&ctrl->ana_work, nvme_ana_work);
726}
727
728int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
729{
730 size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT;
731 size_t ana_log_size;
732 int error = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000733
David Brazdil0f672f62019-12-10 10:32:29 +0000734 /* check if multipath is enabled and we have the capability */
735 if (!multipath || !ctrl->subsys || !(ctrl->subsys->cmic & (1 << 3)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000736 return 0;
737
738 ctrl->anacap = id->anacap;
739 ctrl->anatt = id->anatt;
740 ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
741 ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
742
Olivier Deprez0e641232021-09-23 10:07:05 +0200743 ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
744 ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc) +
745 ctrl->max_namespaces * sizeof(__le32);
746 if (ana_log_size > max_transfer_size) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000747 dev_err(ctrl->device,
Olivier Deprez0e641232021-09-23 10:07:05 +0200748 "ANA log page size (%zd) larger than MDTS (%zd).\n",
749 ana_log_size, max_transfer_size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000750 dev_err(ctrl->device, "disabling ANA support.\n");
Olivier Deprez0e641232021-09-23 10:07:05 +0200751 goto out_uninit;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000752 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200753 if (ana_log_size > ctrl->ana_log_size) {
754 nvme_mpath_stop(ctrl);
755 kfree(ctrl->ana_log_buf);
756 ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL);
757 if (!ctrl->ana_log_buf)
758 return -ENOMEM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000759 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200760 ctrl->ana_log_size = ana_log_size;
David Brazdil0f672f62019-12-10 10:32:29 +0000761 error = nvme_read_ana_log(ctrl);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000762 if (error)
Olivier Deprez0e641232021-09-23 10:07:05 +0200763 goto out_uninit;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000764 return 0;
Olivier Deprez0e641232021-09-23 10:07:05 +0200765
766out_uninit:
767 nvme_mpath_uninit(ctrl);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000768 return error;
769}
770
771void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
772{
773 kfree(ctrl->ana_log_buf);
David Brazdil0f672f62019-12-10 10:32:29 +0000774 ctrl->ana_log_buf = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000775}
776