blob: 18a756444d5a91dd79ddc50148e9cd144965ed3f [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Copyright (c) 2017-2018 Christoph Hellwig.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004 */
5
Olivier Deprez0e641232021-09-23 10:07:05 +02006#include <linux/backing-dev.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007#include <linux/moduleparam.h>
8#include <trace/events/block.h>
9#include "nvme.h"
10
11static bool multipath = true;
12module_param(multipath, bool, 0444);
13MODULE_PARM_DESC(multipath,
14 "turn on native support for multiple controllers per subsystem");
15
David Brazdil0f672f62019-12-10 10:32:29 +000016void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000017{
David Brazdil0f672f62019-12-10 10:32:29 +000018 struct nvme_ns_head *h;
19
20 lockdep_assert_held(&subsys->lock);
21 list_for_each_entry(h, &subsys->nsheads, entry)
22 if (h->disk)
23 blk_mq_unfreeze_queue(h->disk->queue);
24}
25
26void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
27{
28 struct nvme_ns_head *h;
29
30 lockdep_assert_held(&subsys->lock);
31 list_for_each_entry(h, &subsys->nsheads, entry)
32 if (h->disk)
33 blk_mq_freeze_queue_wait(h->disk->queue);
34}
35
36void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
37{
38 struct nvme_ns_head *h;
39
40 lockdep_assert_held(&subsys->lock);
41 list_for_each_entry(h, &subsys->nsheads, entry)
42 if (h->disk)
43 blk_freeze_queue_start(h->disk->queue);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000044}
45
46/*
47 * If multipathing is enabled we need to always use the subsystem instance
48 * number for numbering our devices to avoid conflicts between subsystems that
49 * have multiple controllers and thus use the multipath-aware subsystem node
50 * and those that have a single controller and use the controller node
51 * directly.
52 */
53void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
54 struct nvme_ctrl *ctrl, int *flags)
55{
56 if (!multipath) {
57 sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
58 } else if (ns->head->disk) {
59 sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
David Brazdil0f672f62019-12-10 10:32:29 +000060 ctrl->instance, ns->head->instance);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000061 *flags = GENHD_FL_HIDDEN;
62 } else {
63 sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
64 ns->head->instance);
65 }
66}
67
Olivier Deprez157378f2022-04-04 15:47:50 +020068void nvme_failover_req(struct request *req)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000069{
70 struct nvme_ns *ns = req->q->queuedata;
Olivier Deprez157378f2022-04-04 15:47:50 +020071 u16 status = nvme_req(req)->status & 0x7ff;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000072 unsigned long flags;
73
Olivier Deprez157378f2022-04-04 15:47:50 +020074 nvme_mpath_clear_current_path(ns);
75
76 /*
77 * If we got back an ANA error, we know the controller is alive but not
78 * ready to serve this namespace. Kick of a re-read of the ANA
79 * information page, and just try any other available path for now.
80 */
81 if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) {
82 set_bit(NVME_NS_ANA_PENDING, &ns->flags);
83 queue_work(nvme_wq, &ns->ctrl->ana_work);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000084 }
85
Olivier Deprez0e641232021-09-23 10:07:05 +020086 spin_lock_irqsave(&ns->head->requeue_lock, flags);
87 blk_steal_bios(&ns->head->requeue_list, req);
88 spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
Olivier Deprez0e641232021-09-23 10:07:05 +020089
Olivier Deprez157378f2022-04-04 15:47:50 +020090 blk_mq_end_request(req, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000091 kblockd_schedule_work(&ns->head->requeue_work);
92}
93
94void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
95{
96 struct nvme_ns *ns;
97
98 down_read(&ctrl->namespaces_rwsem);
99 list_for_each_entry(ns, &ctrl->namespaces, list) {
100 if (ns->head->disk)
101 kblockd_schedule_work(&ns->head->requeue_work);
102 }
103 up_read(&ctrl->namespaces_rwsem);
104}
105
106static const char *nvme_ana_state_names[] = {
107 [0] = "invalid state",
108 [NVME_ANA_OPTIMIZED] = "optimized",
109 [NVME_ANA_NONOPTIMIZED] = "non-optimized",
110 [NVME_ANA_INACCESSIBLE] = "inaccessible",
111 [NVME_ANA_PERSISTENT_LOSS] = "persistent-loss",
112 [NVME_ANA_CHANGE] = "change",
113};
114
David Brazdil0f672f62019-12-10 10:32:29 +0000115bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000116{
David Brazdil0f672f62019-12-10 10:32:29 +0000117 struct nvme_ns_head *head = ns->head;
118 bool changed = false;
119 int node;
120
121 if (!head)
122 goto out;
123
124 for_each_node(node) {
125 if (ns == rcu_access_pointer(head->current_path[node])) {
126 rcu_assign_pointer(head->current_path[node], NULL);
127 changed = true;
128 }
129 }
130out:
131 return changed;
132}
133
134void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
135{
136 struct nvme_ns *ns;
137
David Brazdil0f672f62019-12-10 10:32:29 +0000138 down_read(&ctrl->namespaces_rwsem);
Olivier Deprez157378f2022-04-04 15:47:50 +0200139 list_for_each_entry(ns, &ctrl->namespaces, list) {
140 nvme_mpath_clear_current_path(ns);
141 kblockd_schedule_work(&ns->head->requeue_work);
142 }
David Brazdil0f672f62019-12-10 10:32:29 +0000143 up_read(&ctrl->namespaces_rwsem);
David Brazdil0f672f62019-12-10 10:32:29 +0000144}
145
146static bool nvme_path_is_disabled(struct nvme_ns *ns)
147{
Olivier Deprez157378f2022-04-04 15:47:50 +0200148 /*
149 * We don't treat NVME_CTRL_DELETING as a disabled path as I/O should
150 * still be able to complete assuming that the controller is connected.
151 * Otherwise it will fail immediately and return to the requeue list.
152 */
153 if (ns->ctrl->state != NVME_CTRL_LIVE &&
154 ns->ctrl->state != NVME_CTRL_DELETING)
155 return true;
156 if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
157 test_bit(NVME_NS_REMOVING, &ns->flags))
158 return true;
159 return false;
David Brazdil0f672f62019-12-10 10:32:29 +0000160}
161
162static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
163{
164 int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
165 struct nvme_ns *found = NULL, *fallback = NULL, *ns;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000166
167 list_for_each_entry_rcu(ns, &head->list, siblings) {
David Brazdil0f672f62019-12-10 10:32:29 +0000168 if (nvme_path_is_disabled(ns))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000169 continue;
David Brazdil0f672f62019-12-10 10:32:29 +0000170
171 if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
172 distance = node_distance(node, ns->ctrl->numa_node);
173 else
174 distance = LOCAL_DISTANCE;
175
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000176 switch (ns->ana_state) {
177 case NVME_ANA_OPTIMIZED:
David Brazdil0f672f62019-12-10 10:32:29 +0000178 if (distance < found_distance) {
179 found_distance = distance;
180 found = ns;
181 }
182 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000183 case NVME_ANA_NONOPTIMIZED:
David Brazdil0f672f62019-12-10 10:32:29 +0000184 if (distance < fallback_distance) {
185 fallback_distance = distance;
186 fallback = ns;
187 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000188 break;
189 default:
190 break;
191 }
192 }
193
David Brazdil0f672f62019-12-10 10:32:29 +0000194 if (!found)
195 found = fallback;
196 if (found)
197 rcu_assign_pointer(head->current_path[node], found);
198 return found;
199}
200
201static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head,
202 struct nvme_ns *ns)
203{
204 ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns,
205 siblings);
206 if (ns)
207 return ns;
208 return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings);
209}
210
211static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
212 int node, struct nvme_ns *old)
213{
Olivier Deprez157378f2022-04-04 15:47:50 +0200214 struct nvme_ns *ns, *found = NULL;
David Brazdil0f672f62019-12-10 10:32:29 +0000215
216 if (list_is_singular(&head->list)) {
217 if (nvme_path_is_disabled(old))
218 return NULL;
219 return old;
220 }
221
222 for (ns = nvme_next_ns(head, old);
Olivier Deprez0e641232021-09-23 10:07:05 +0200223 ns && ns != old;
David Brazdil0f672f62019-12-10 10:32:29 +0000224 ns = nvme_next_ns(head, ns)) {
225 if (nvme_path_is_disabled(ns))
226 continue;
227
228 if (ns->ana_state == NVME_ANA_OPTIMIZED) {
229 found = ns;
230 goto out;
231 }
232 if (ns->ana_state == NVME_ANA_NONOPTIMIZED)
Olivier Deprez157378f2022-04-04 15:47:50 +0200233 found = ns;
David Brazdil0f672f62019-12-10 10:32:29 +0000234 }
235
Olivier Deprez0e641232021-09-23 10:07:05 +0200236 /*
237 * The loop above skips the current path for round-robin semantics.
238 * Fall back to the current path if either:
239 * - no other optimized path found and current is optimized,
240 * - no other usable path found and current is usable.
241 */
242 if (!nvme_path_is_disabled(old) &&
243 (old->ana_state == NVME_ANA_OPTIMIZED ||
Olivier Deprez157378f2022-04-04 15:47:50 +0200244 (!found && old->ana_state == NVME_ANA_NONOPTIMIZED)))
Olivier Deprez0e641232021-09-23 10:07:05 +0200245 return old;
246
Olivier Deprez157378f2022-04-04 15:47:50 +0200247 if (!found)
David Brazdil0f672f62019-12-10 10:32:29 +0000248 return NULL;
David Brazdil0f672f62019-12-10 10:32:29 +0000249out:
250 rcu_assign_pointer(head->current_path[node], found);
251 return found;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000252}
253
254static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
255{
256 return ns->ctrl->state == NVME_CTRL_LIVE &&
257 ns->ana_state == NVME_ANA_OPTIMIZED;
258}
259
260inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
261{
David Brazdil0f672f62019-12-10 10:32:29 +0000262 int node = numa_node_id();
263 struct nvme_ns *ns;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000264
David Brazdil0f672f62019-12-10 10:32:29 +0000265 ns = srcu_dereference(head->current_path[node], &head->srcu);
Olivier Deprez0e641232021-09-23 10:07:05 +0200266 if (unlikely(!ns))
267 return __nvme_find_path(head, node);
268
269 if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR)
270 return nvme_round_robin_path(head, node, ns);
271 if (unlikely(!nvme_path_is_optimized(ns)))
272 return __nvme_find_path(head, node);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000273 return ns;
274}
275
David Brazdil0f672f62019-12-10 10:32:29 +0000276static bool nvme_available_path(struct nvme_ns_head *head)
277{
278 struct nvme_ns *ns;
279
280 list_for_each_entry_rcu(ns, &head->list, siblings) {
281 switch (ns->ctrl->state) {
282 case NVME_CTRL_LIVE:
283 case NVME_CTRL_RESETTING:
284 case NVME_CTRL_CONNECTING:
285 /* fallthru */
286 return true;
287 default:
288 break;
289 }
290 }
291 return false;
292}
293
Olivier Deprez157378f2022-04-04 15:47:50 +0200294blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000295{
Olivier Deprez157378f2022-04-04 15:47:50 +0200296 struct nvme_ns_head *head = bio->bi_disk->private_data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000297 struct device *dev = disk_to_dev(head->disk);
298 struct nvme_ns *ns;
299 blk_qc_t ret = BLK_QC_T_NONE;
300 int srcu_idx;
301
David Brazdil0f672f62019-12-10 10:32:29 +0000302 /*
Olivier Deprez157378f2022-04-04 15:47:50 +0200303 * The namespace might be going away and the bio might be moved to a
304 * different queue via blk_steal_bios(), so we need to use the bio_split
305 * pool from the original queue to allocate the bvecs from.
David Brazdil0f672f62019-12-10 10:32:29 +0000306 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200307 blk_queue_split(&bio);
David Brazdil0f672f62019-12-10 10:32:29 +0000308
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000309 srcu_idx = srcu_read_lock(&head->srcu);
310 ns = nvme_find_path(head);
311 if (likely(ns)) {
312 bio->bi_disk = ns->disk;
313 bio->bi_opf |= REQ_NVME_MPATH;
314 trace_block_bio_remap(bio->bi_disk->queue, bio,
315 disk_devt(ns->head->disk),
316 bio->bi_iter.bi_sector);
Olivier Deprez157378f2022-04-04 15:47:50 +0200317 ret = submit_bio_noacct(bio);
David Brazdil0f672f62019-12-10 10:32:29 +0000318 } else if (nvme_available_path(head)) {
319 dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000320
321 spin_lock_irq(&head->requeue_lock);
322 bio_list_add(&head->requeue_list, bio);
323 spin_unlock_irq(&head->requeue_lock);
324 } else {
David Brazdil0f672f62019-12-10 10:32:29 +0000325 dev_warn_ratelimited(dev, "no available path - failing I/O\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000326
327 bio->bi_status = BLK_STS_IOERR;
328 bio_endio(bio);
329 }
330
331 srcu_read_unlock(&head->srcu, srcu_idx);
332 return ret;
333}
334
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000335static void nvme_requeue_work(struct work_struct *work)
336{
337 struct nvme_ns_head *head =
338 container_of(work, struct nvme_ns_head, requeue_work);
339 struct bio *bio, *next;
340
341 spin_lock_irq(&head->requeue_lock);
342 next = bio_list_get(&head->requeue_list);
343 spin_unlock_irq(&head->requeue_lock);
344
345 while ((bio = next) != NULL) {
346 next = bio->bi_next;
347 bio->bi_next = NULL;
348
349 /*
350 * Reset disk to the mpath node and resubmit to select a new
351 * path.
352 */
353 bio->bi_disk = head->disk;
Olivier Deprez157378f2022-04-04 15:47:50 +0200354 submit_bio_noacct(bio);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000355 }
356}
357
358int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
359{
360 struct request_queue *q;
361 bool vwc = false;
362
363 mutex_init(&head->lock);
364 bio_list_init(&head->requeue_list);
365 spin_lock_init(&head->requeue_lock);
366 INIT_WORK(&head->requeue_work, nvme_requeue_work);
367
368 /*
369 * Add a multipath node if the subsystems supports multiple controllers.
370 * We also do this for private namespaces as the namespace sharing data could
371 * change after a rescan.
372 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200373 if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || !multipath)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000374 return 0;
375
Olivier Deprez157378f2022-04-04 15:47:50 +0200376 q = blk_alloc_queue(ctrl->numa_node);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000377 if (!q)
378 goto out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000379 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
380 /* set to a default value for 512 until disk is validated */
381 blk_queue_logical_block_size(q, 512);
382 blk_set_stacking_limits(&q->limits);
383
384 /* we need to propagate up the VMC settings */
385 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
386 vwc = true;
387 blk_queue_write_cache(q, vwc, vwc);
388
389 head->disk = alloc_disk(0);
390 if (!head->disk)
391 goto out_cleanup_queue;
392 head->disk->fops = &nvme_ns_head_ops;
393 head->disk->private_data = head;
394 head->disk->queue = q;
395 head->disk->flags = GENHD_FL_EXT_DEVT;
396 sprintf(head->disk->disk_name, "nvme%dn%d",
397 ctrl->subsys->instance, head->instance);
398 return 0;
399
400out_cleanup_queue:
401 blk_cleanup_queue(q);
402out:
403 return -ENOMEM;
404}
405
406static void nvme_mpath_set_live(struct nvme_ns *ns)
407{
408 struct nvme_ns_head *head = ns->head;
409
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000410 if (!head->disk)
411 return;
412
Olivier Deprez0e641232021-09-23 10:07:05 +0200413 if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags))
David Brazdil0f672f62019-12-10 10:32:29 +0000414 device_add_disk(&head->subsys->dev, head->disk,
415 nvme_ns_id_attr_groups);
416
Olivier Deprez0e641232021-09-23 10:07:05 +0200417 mutex_lock(&head->lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000418 if (nvme_path_is_optimized(ns)) {
419 int node, srcu_idx;
420
421 srcu_idx = srcu_read_lock(&head->srcu);
422 for_each_node(node)
423 __nvme_find_path(head, node);
424 srcu_read_unlock(&head->srcu, srcu_idx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000425 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200426 mutex_unlock(&head->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000427
Olivier Deprez0e641232021-09-23 10:07:05 +0200428 synchronize_srcu(&head->srcu);
429 kblockd_schedule_work(&head->requeue_work);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000430}
431
432static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
433 int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *,
434 void *))
435{
436 void *base = ctrl->ana_log_buf;
437 size_t offset = sizeof(struct nvme_ana_rsp_hdr);
438 int error, i;
439
440 lockdep_assert_held(&ctrl->ana_lock);
441
442 for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) {
443 struct nvme_ana_group_desc *desc = base + offset;
Olivier Deprez157378f2022-04-04 15:47:50 +0200444 u32 nr_nsids;
445 size_t nsid_buf_size;
446
447 if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
448 return -EINVAL;
449
450 nr_nsids = le32_to_cpu(desc->nnsids);
451 nsid_buf_size = nr_nsids * sizeof(__le32);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000452
453 if (WARN_ON_ONCE(desc->grpid == 0))
454 return -EINVAL;
455 if (WARN_ON_ONCE(le32_to_cpu(desc->grpid) > ctrl->anagrpmax))
456 return -EINVAL;
457 if (WARN_ON_ONCE(desc->state == 0))
458 return -EINVAL;
459 if (WARN_ON_ONCE(desc->state > NVME_ANA_CHANGE))
460 return -EINVAL;
461
462 offset += sizeof(*desc);
463 if (WARN_ON_ONCE(offset > ctrl->ana_log_size - nsid_buf_size))
464 return -EINVAL;
465
466 error = cb(ctrl, desc, data);
467 if (error)
468 return error;
469
470 offset += nsid_buf_size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000471 }
472
473 return 0;
474}
475
476static inline bool nvme_state_is_live(enum nvme_ana_state state)
477{
478 return state == NVME_ANA_OPTIMIZED || state == NVME_ANA_NONOPTIMIZED;
479}
480
481static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
482 struct nvme_ns *ns)
483{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000484 ns->ana_grpid = le32_to_cpu(desc->grpid);
485 ns->ana_state = desc->state;
486 clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
487
David Brazdil0f672f62019-12-10 10:32:29 +0000488 if (nvme_state_is_live(ns->ana_state))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000489 nvme_mpath_set_live(ns);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000490}
491
492static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
493 struct nvme_ana_group_desc *desc, void *data)
494{
495 u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0;
496 unsigned *nr_change_groups = data;
497 struct nvme_ns *ns;
498
David Brazdil0f672f62019-12-10 10:32:29 +0000499 dev_dbg(ctrl->device, "ANA group %d: %s.\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000500 le32_to_cpu(desc->grpid),
501 nvme_ana_state_names[desc->state]);
502
503 if (desc->state == NVME_ANA_CHANGE)
504 (*nr_change_groups)++;
505
506 if (!nr_nsids)
507 return 0;
508
Olivier Deprez0e641232021-09-23 10:07:05 +0200509 down_read(&ctrl->namespaces_rwsem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000510 list_for_each_entry(ns, &ctrl->namespaces, list) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200511 unsigned nsid;
512again:
513 nsid = le32_to_cpu(desc->nsids[n]);
David Brazdil0f672f62019-12-10 10:32:29 +0000514 if (ns->head->ns_id < nsid)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000515 continue;
David Brazdil0f672f62019-12-10 10:32:29 +0000516 if (ns->head->ns_id == nsid)
517 nvme_update_ns_ana_state(desc, ns);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000518 if (++n == nr_nsids)
519 break;
Olivier Deprez157378f2022-04-04 15:47:50 +0200520 if (ns->head->ns_id > nsid)
521 goto again;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000522 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200523 up_read(&ctrl->namespaces_rwsem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000524 return 0;
525}
526
David Brazdil0f672f62019-12-10 10:32:29 +0000527static int nvme_read_ana_log(struct nvme_ctrl *ctrl)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000528{
529 u32 nr_change_groups = 0;
530 int error;
531
532 mutex_lock(&ctrl->ana_lock);
Olivier Deprez157378f2022-04-04 15:47:50 +0200533 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0, NVME_CSI_NVM,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000534 ctrl->ana_log_buf, ctrl->ana_log_size, 0);
535 if (error) {
536 dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
537 goto out_unlock;
538 }
539
540 error = nvme_parse_ana_log(ctrl, &nr_change_groups,
541 nvme_update_ana_state);
542 if (error)
543 goto out_unlock;
544
545 /*
546 * In theory we should have an ANATT timer per group as they might enter
547 * the change state at different times. But that is a lot of overhead
548 * just to protect against a target that keeps entering new changes
549 * states while never finishing previous ones. But we'll still
550 * eventually time out once all groups are in change state, so this
551 * isn't a big deal.
552 *
553 * We also double the ANATT value to provide some slack for transports
554 * or AEN processing overhead.
555 */
556 if (nr_change_groups)
557 mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies);
558 else
559 del_timer_sync(&ctrl->anatt_timer);
560out_unlock:
561 mutex_unlock(&ctrl->ana_lock);
562 return error;
563}
564
565static void nvme_ana_work(struct work_struct *work)
566{
567 struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
568
Olivier Deprez157378f2022-04-04 15:47:50 +0200569 if (ctrl->state != NVME_CTRL_LIVE)
570 return;
571
David Brazdil0f672f62019-12-10 10:32:29 +0000572 nvme_read_ana_log(ctrl);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000573}
574
575static void nvme_anatt_timeout(struct timer_list *t)
576{
577 struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
578
579 dev_info(ctrl->device, "ANATT timeout, resetting controller.\n");
580 nvme_reset_ctrl(ctrl);
581}
582
583void nvme_mpath_stop(struct nvme_ctrl *ctrl)
584{
585 if (!nvme_ctrl_use_ana(ctrl))
586 return;
587 del_timer_sync(&ctrl->anatt_timer);
588 cancel_work_sync(&ctrl->ana_work);
589}
590
David Brazdil0f672f62019-12-10 10:32:29 +0000591#define SUBSYS_ATTR_RW(_name, _mode, _show, _store) \
592 struct device_attribute subsys_attr_##_name = \
593 __ATTR(_name, _mode, _show, _store)
594
595static const char *nvme_iopolicy_names[] = {
596 [NVME_IOPOLICY_NUMA] = "numa",
597 [NVME_IOPOLICY_RR] = "round-robin",
598};
599
600static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
601 struct device_attribute *attr, char *buf)
602{
603 struct nvme_subsystem *subsys =
604 container_of(dev, struct nvme_subsystem, dev);
605
606 return sprintf(buf, "%s\n",
607 nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
608}
609
610static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
611 struct device_attribute *attr, const char *buf, size_t count)
612{
613 struct nvme_subsystem *subsys =
614 container_of(dev, struct nvme_subsystem, dev);
615 int i;
616
617 for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) {
618 if (sysfs_streq(buf, nvme_iopolicy_names[i])) {
619 WRITE_ONCE(subsys->iopolicy, i);
620 return count;
621 }
622 }
623
624 return -EINVAL;
625}
626SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
627 nvme_subsys_iopolicy_show, nvme_subsys_iopolicy_store);
628
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000629static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
630 char *buf)
631{
632 return sprintf(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
633}
634DEVICE_ATTR_RO(ana_grpid);
635
636static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
637 char *buf)
638{
639 struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
640
641 return sprintf(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
642}
643DEVICE_ATTR_RO(ana_state);
644
Olivier Deprez0e641232021-09-23 10:07:05 +0200645static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000646 struct nvme_ana_group_desc *desc, void *data)
647{
Olivier Deprez0e641232021-09-23 10:07:05 +0200648 struct nvme_ana_group_desc *dst = data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000649
Olivier Deprez0e641232021-09-23 10:07:05 +0200650 if (desc->grpid != dst->grpid)
651 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000652
Olivier Deprez0e641232021-09-23 10:07:05 +0200653 *dst = *desc;
654 return -ENXIO; /* just break out of the loop */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000655}
656
657void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
658{
659 if (nvme_ctrl_use_ana(ns->ctrl)) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200660 struct nvme_ana_group_desc desc = {
661 .grpid = id->anagrpid,
662 .state = 0,
663 };
664
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000665 mutex_lock(&ns->ctrl->ana_lock);
666 ns->ana_grpid = le32_to_cpu(id->anagrpid);
Olivier Deprez0e641232021-09-23 10:07:05 +0200667 nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000668 mutex_unlock(&ns->ctrl->ana_lock);
Olivier Deprez0e641232021-09-23 10:07:05 +0200669 if (desc.state) {
670 /* found the group desc: update */
671 nvme_update_ns_ana_state(&desc, ns);
672 } else {
673 /* group desc not found: trigger a re-read */
674 set_bit(NVME_NS_ANA_PENDING, &ns->flags);
675 queue_work(nvme_wq, &ns->ctrl->ana_work);
676 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000677 } else {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000678 ns->ana_state = NVME_ANA_OPTIMIZED;
679 nvme_mpath_set_live(ns);
Olivier Deprez0e641232021-09-23 10:07:05 +0200680 }
681
Olivier Deprez157378f2022-04-04 15:47:50 +0200682 if (blk_queue_stable_writes(ns->queue) && ns->head->disk)
683 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
684 ns->head->disk->queue);
685#ifdef CONFIG_BLK_DEV_ZONED
686 if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
687 ns->head->disk->queue->nr_zones = ns->queue->nr_zones;
688#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000689}
690
691void nvme_mpath_remove_disk(struct nvme_ns_head *head)
692{
693 if (!head->disk)
694 return;
David Brazdil0f672f62019-12-10 10:32:29 +0000695 if (head->disk->flags & GENHD_FL_UP)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000696 del_gendisk(head->disk);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000697 blk_set_queue_dying(head->disk->queue);
698 /* make sure all pending bios are cleaned up */
699 kblockd_schedule_work(&head->requeue_work);
700 flush_work(&head->requeue_work);
701 blk_cleanup_queue(head->disk->queue);
Olivier Deprez0e641232021-09-23 10:07:05 +0200702 if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
703 /*
704 * if device_add_disk wasn't called, prevent
705 * disk release to put a bogus reference on the
706 * request queue
707 */
708 head->disk->queue = NULL;
709 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000710 put_disk(head->disk);
711}
712
Olivier Deprez0e641232021-09-23 10:07:05 +0200713void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000714{
Olivier Deprez0e641232021-09-23 10:07:05 +0200715 mutex_init(&ctrl->ana_lock);
716 timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
717 INIT_WORK(&ctrl->ana_work, nvme_ana_work);
718}
719
720int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
721{
722 size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT;
723 size_t ana_log_size;
724 int error = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000725
David Brazdil0f672f62019-12-10 10:32:29 +0000726 /* check if multipath is enabled and we have the capability */
Olivier Deprez157378f2022-04-04 15:47:50 +0200727 if (!multipath || !ctrl->subsys ||
728 !(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000729 return 0;
730
731 ctrl->anacap = id->anacap;
732 ctrl->anatt = id->anatt;
733 ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
734 ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
735
Olivier Deprez0e641232021-09-23 10:07:05 +0200736 ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
737 ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc) +
738 ctrl->max_namespaces * sizeof(__le32);
739 if (ana_log_size > max_transfer_size) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000740 dev_err(ctrl->device,
Olivier Deprez0e641232021-09-23 10:07:05 +0200741 "ANA log page size (%zd) larger than MDTS (%zd).\n",
742 ana_log_size, max_transfer_size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000743 dev_err(ctrl->device, "disabling ANA support.\n");
Olivier Deprez0e641232021-09-23 10:07:05 +0200744 goto out_uninit;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000745 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200746 if (ana_log_size > ctrl->ana_log_size) {
747 nvme_mpath_stop(ctrl);
748 kfree(ctrl->ana_log_buf);
749 ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL);
750 if (!ctrl->ana_log_buf)
751 return -ENOMEM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000752 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200753 ctrl->ana_log_size = ana_log_size;
David Brazdil0f672f62019-12-10 10:32:29 +0000754 error = nvme_read_ana_log(ctrl);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000755 if (error)
Olivier Deprez0e641232021-09-23 10:07:05 +0200756 goto out_uninit;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000757 return 0;
Olivier Deprez0e641232021-09-23 10:07:05 +0200758
759out_uninit:
760 nvme_mpath_uninit(ctrl);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000761 return error;
762}
763
764void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
765{
766 kfree(ctrl->ana_log_buf);
David Brazdil0f672f62019-12-10 10:32:29 +0000767 ctrl->ana_log_buf = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000768}
769