xref: /OK3568_Linux_fs/kernel/drivers/nvme/host/multipath.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2017-2018 Christoph Hellwig.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/backing-dev.h>
7*4882a593Smuzhiyun #include <linux/moduleparam.h>
8*4882a593Smuzhiyun #include <trace/events/block.h>
9*4882a593Smuzhiyun #include "nvme.h"
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun static bool multipath = true;
12*4882a593Smuzhiyun module_param(multipath, bool, 0444);
13*4882a593Smuzhiyun MODULE_PARM_DESC(multipath,
14*4882a593Smuzhiyun 	"turn on native support for multiple controllers per subsystem");
15*4882a593Smuzhiyun 
nvme_mpath_unfreeze(struct nvme_subsystem * subsys)16*4882a593Smuzhiyun void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun 	struct nvme_ns_head *h;
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun 	lockdep_assert_held(&subsys->lock);
21*4882a593Smuzhiyun 	list_for_each_entry(h, &subsys->nsheads, entry)
22*4882a593Smuzhiyun 		if (h->disk)
23*4882a593Smuzhiyun 			blk_mq_unfreeze_queue(h->disk->queue);
24*4882a593Smuzhiyun }
25*4882a593Smuzhiyun 
nvme_mpath_wait_freeze(struct nvme_subsystem * subsys)26*4882a593Smuzhiyun void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	struct nvme_ns_head *h;
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	lockdep_assert_held(&subsys->lock);
31*4882a593Smuzhiyun 	list_for_each_entry(h, &subsys->nsheads, entry)
32*4882a593Smuzhiyun 		if (h->disk)
33*4882a593Smuzhiyun 			blk_mq_freeze_queue_wait(h->disk->queue);
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun 
nvme_mpath_start_freeze(struct nvme_subsystem * subsys)36*4882a593Smuzhiyun void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	struct nvme_ns_head *h;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	lockdep_assert_held(&subsys->lock);
41*4882a593Smuzhiyun 	list_for_each_entry(h, &subsys->nsheads, entry)
42*4882a593Smuzhiyun 		if (h->disk)
43*4882a593Smuzhiyun 			blk_freeze_queue_start(h->disk->queue);
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun /*
47*4882a593Smuzhiyun  * If multipathing is enabled we need to always use the subsystem instance
48*4882a593Smuzhiyun  * number for numbering our devices to avoid conflicts between subsystems that
49*4882a593Smuzhiyun  * have multiple controllers and thus use the multipath-aware subsystem node
50*4882a593Smuzhiyun  * and those that have a single controller and use the controller node
51*4882a593Smuzhiyun  * directly.
52*4882a593Smuzhiyun  */
nvme_set_disk_name(char * disk_name,struct nvme_ns * ns,struct nvme_ctrl * ctrl,int * flags)53*4882a593Smuzhiyun void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
54*4882a593Smuzhiyun 			struct nvme_ctrl *ctrl, int *flags)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	if (!multipath) {
57*4882a593Smuzhiyun 		sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
58*4882a593Smuzhiyun 	} else if (ns->head->disk) {
59*4882a593Smuzhiyun 		sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
60*4882a593Smuzhiyun 				ctrl->instance, ns->head->instance);
61*4882a593Smuzhiyun 		*flags = GENHD_FL_HIDDEN;
62*4882a593Smuzhiyun 	} else {
63*4882a593Smuzhiyun 		sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
64*4882a593Smuzhiyun 				ns->head->instance);
65*4882a593Smuzhiyun 	}
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
nvme_failover_req(struct request * req)68*4882a593Smuzhiyun void nvme_failover_req(struct request *req)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	struct nvme_ns *ns = req->q->queuedata;
71*4882a593Smuzhiyun 	u16 status = nvme_req(req)->status & 0x7ff;
72*4882a593Smuzhiyun 	unsigned long flags;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	nvme_mpath_clear_current_path(ns);
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	/*
77*4882a593Smuzhiyun 	 * If we got back an ANA error, we know the controller is alive but not
78*4882a593Smuzhiyun 	 * ready to serve this namespace.  Kick of a re-read of the ANA
79*4882a593Smuzhiyun 	 * information page, and just try any other available path for now.
80*4882a593Smuzhiyun 	 */
81*4882a593Smuzhiyun 	if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) {
82*4882a593Smuzhiyun 		set_bit(NVME_NS_ANA_PENDING, &ns->flags);
83*4882a593Smuzhiyun 		queue_work(nvme_wq, &ns->ctrl->ana_work);
84*4882a593Smuzhiyun 	}
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	spin_lock_irqsave(&ns->head->requeue_lock, flags);
87*4882a593Smuzhiyun 	blk_steal_bios(&ns->head->requeue_list, req);
88*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	blk_mq_end_request(req, 0);
91*4882a593Smuzhiyun 	kblockd_schedule_work(&ns->head->requeue_work);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
nvme_kick_requeue_lists(struct nvme_ctrl * ctrl)94*4882a593Smuzhiyun void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	struct nvme_ns *ns;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	down_read(&ctrl->namespaces_rwsem);
99*4882a593Smuzhiyun 	list_for_each_entry(ns, &ctrl->namespaces, list) {
100*4882a593Smuzhiyun 		if (ns->head->disk)
101*4882a593Smuzhiyun 			kblockd_schedule_work(&ns->head->requeue_work);
102*4882a593Smuzhiyun 	}
103*4882a593Smuzhiyun 	up_read(&ctrl->namespaces_rwsem);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun static const char *nvme_ana_state_names[] = {
107*4882a593Smuzhiyun 	[0]				= "invalid state",
108*4882a593Smuzhiyun 	[NVME_ANA_OPTIMIZED]		= "optimized",
109*4882a593Smuzhiyun 	[NVME_ANA_NONOPTIMIZED]		= "non-optimized",
110*4882a593Smuzhiyun 	[NVME_ANA_INACCESSIBLE]		= "inaccessible",
111*4882a593Smuzhiyun 	[NVME_ANA_PERSISTENT_LOSS]	= "persistent-loss",
112*4882a593Smuzhiyun 	[NVME_ANA_CHANGE]		= "change",
113*4882a593Smuzhiyun };
114*4882a593Smuzhiyun 
nvme_mpath_clear_current_path(struct nvme_ns * ns)115*4882a593Smuzhiyun bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	struct nvme_ns_head *head = ns->head;
118*4882a593Smuzhiyun 	bool changed = false;
119*4882a593Smuzhiyun 	int node;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	if (!head)
122*4882a593Smuzhiyun 		goto out;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	for_each_node(node) {
125*4882a593Smuzhiyun 		if (ns == rcu_access_pointer(head->current_path[node])) {
126*4882a593Smuzhiyun 			rcu_assign_pointer(head->current_path[node], NULL);
127*4882a593Smuzhiyun 			changed = true;
128*4882a593Smuzhiyun 		}
129*4882a593Smuzhiyun 	}
130*4882a593Smuzhiyun out:
131*4882a593Smuzhiyun 	return changed;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
nvme_mpath_clear_ctrl_paths(struct nvme_ctrl * ctrl)134*4882a593Smuzhiyun void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	struct nvme_ns *ns;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	down_read(&ctrl->namespaces_rwsem);
139*4882a593Smuzhiyun 	list_for_each_entry(ns, &ctrl->namespaces, list) {
140*4882a593Smuzhiyun 		nvme_mpath_clear_current_path(ns);
141*4882a593Smuzhiyun 		kblockd_schedule_work(&ns->head->requeue_work);
142*4882a593Smuzhiyun 	}
143*4882a593Smuzhiyun 	up_read(&ctrl->namespaces_rwsem);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun 
nvme_path_is_disabled(struct nvme_ns * ns)146*4882a593Smuzhiyun static bool nvme_path_is_disabled(struct nvme_ns *ns)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	/*
149*4882a593Smuzhiyun 	 * We don't treat NVME_CTRL_DELETING as a disabled path as I/O should
150*4882a593Smuzhiyun 	 * still be able to complete assuming that the controller is connected.
151*4882a593Smuzhiyun 	 * Otherwise it will fail immediately and return to the requeue list.
152*4882a593Smuzhiyun 	 */
153*4882a593Smuzhiyun 	if (ns->ctrl->state != NVME_CTRL_LIVE &&
154*4882a593Smuzhiyun 	    ns->ctrl->state != NVME_CTRL_DELETING)
155*4882a593Smuzhiyun 		return true;
156*4882a593Smuzhiyun 	if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
157*4882a593Smuzhiyun 	    test_bit(NVME_NS_REMOVING, &ns->flags))
158*4882a593Smuzhiyun 		return true;
159*4882a593Smuzhiyun 	return false;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
__nvme_find_path(struct nvme_ns_head * head,int node)162*4882a593Smuzhiyun static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun 	int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
165*4882a593Smuzhiyun 	struct nvme_ns *found = NULL, *fallback = NULL, *ns;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	list_for_each_entry_rcu(ns, &head->list, siblings) {
168*4882a593Smuzhiyun 		if (nvme_path_is_disabled(ns))
169*4882a593Smuzhiyun 			continue;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 		if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
172*4882a593Smuzhiyun 			distance = node_distance(node, ns->ctrl->numa_node);
173*4882a593Smuzhiyun 		else
174*4882a593Smuzhiyun 			distance = LOCAL_DISTANCE;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 		switch (ns->ana_state) {
177*4882a593Smuzhiyun 		case NVME_ANA_OPTIMIZED:
178*4882a593Smuzhiyun 			if (distance < found_distance) {
179*4882a593Smuzhiyun 				found_distance = distance;
180*4882a593Smuzhiyun 				found = ns;
181*4882a593Smuzhiyun 			}
182*4882a593Smuzhiyun 			break;
183*4882a593Smuzhiyun 		case NVME_ANA_NONOPTIMIZED:
184*4882a593Smuzhiyun 			if (distance < fallback_distance) {
185*4882a593Smuzhiyun 				fallback_distance = distance;
186*4882a593Smuzhiyun 				fallback = ns;
187*4882a593Smuzhiyun 			}
188*4882a593Smuzhiyun 			break;
189*4882a593Smuzhiyun 		default:
190*4882a593Smuzhiyun 			break;
191*4882a593Smuzhiyun 		}
192*4882a593Smuzhiyun 	}
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	if (!found)
195*4882a593Smuzhiyun 		found = fallback;
196*4882a593Smuzhiyun 	if (found)
197*4882a593Smuzhiyun 		rcu_assign_pointer(head->current_path[node], found);
198*4882a593Smuzhiyun 	return found;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
nvme_next_ns(struct nvme_ns_head * head,struct nvme_ns * ns)201*4882a593Smuzhiyun static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head,
202*4882a593Smuzhiyun 		struct nvme_ns *ns)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun 	ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns,
205*4882a593Smuzhiyun 			siblings);
206*4882a593Smuzhiyun 	if (ns)
207*4882a593Smuzhiyun 		return ns;
208*4882a593Smuzhiyun 	return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
nvme_round_robin_path(struct nvme_ns_head * head,int node,struct nvme_ns * old)211*4882a593Smuzhiyun static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
212*4882a593Smuzhiyun 		int node, struct nvme_ns *old)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	struct nvme_ns *ns, *found = NULL;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	if (list_is_singular(&head->list)) {
217*4882a593Smuzhiyun 		if (nvme_path_is_disabled(old))
218*4882a593Smuzhiyun 			return NULL;
219*4882a593Smuzhiyun 		return old;
220*4882a593Smuzhiyun 	}
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	for (ns = nvme_next_ns(head, old);
223*4882a593Smuzhiyun 	     ns && ns != old;
224*4882a593Smuzhiyun 	     ns = nvme_next_ns(head, ns)) {
225*4882a593Smuzhiyun 		if (nvme_path_is_disabled(ns))
226*4882a593Smuzhiyun 			continue;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 		if (ns->ana_state == NVME_ANA_OPTIMIZED) {
229*4882a593Smuzhiyun 			found = ns;
230*4882a593Smuzhiyun 			goto out;
231*4882a593Smuzhiyun 		}
232*4882a593Smuzhiyun 		if (ns->ana_state == NVME_ANA_NONOPTIMIZED)
233*4882a593Smuzhiyun 			found = ns;
234*4882a593Smuzhiyun 	}
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	/*
237*4882a593Smuzhiyun 	 * The loop above skips the current path for round-robin semantics.
238*4882a593Smuzhiyun 	 * Fall back to the current path if either:
239*4882a593Smuzhiyun 	 *  - no other optimized path found and current is optimized,
240*4882a593Smuzhiyun 	 *  - no other usable path found and current is usable.
241*4882a593Smuzhiyun 	 */
242*4882a593Smuzhiyun 	if (!nvme_path_is_disabled(old) &&
243*4882a593Smuzhiyun 	    (old->ana_state == NVME_ANA_OPTIMIZED ||
244*4882a593Smuzhiyun 	     (!found && old->ana_state == NVME_ANA_NONOPTIMIZED)))
245*4882a593Smuzhiyun 		return old;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	if (!found)
248*4882a593Smuzhiyun 		return NULL;
249*4882a593Smuzhiyun out:
250*4882a593Smuzhiyun 	rcu_assign_pointer(head->current_path[node], found);
251*4882a593Smuzhiyun 	return found;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
nvme_path_is_optimized(struct nvme_ns * ns)254*4882a593Smuzhiyun static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun 	return ns->ctrl->state == NVME_CTRL_LIVE &&
257*4882a593Smuzhiyun 		ns->ana_state == NVME_ANA_OPTIMIZED;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun 
nvme_find_path(struct nvme_ns_head * head)260*4882a593Smuzhiyun inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun 	int node = numa_node_id();
263*4882a593Smuzhiyun 	struct nvme_ns *ns;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	ns = srcu_dereference(head->current_path[node], &head->srcu);
266*4882a593Smuzhiyun 	if (unlikely(!ns))
267*4882a593Smuzhiyun 		return __nvme_find_path(head, node);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR)
270*4882a593Smuzhiyun 		return nvme_round_robin_path(head, node, ns);
271*4882a593Smuzhiyun 	if (unlikely(!nvme_path_is_optimized(ns)))
272*4882a593Smuzhiyun 		return __nvme_find_path(head, node);
273*4882a593Smuzhiyun 	return ns;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun 
nvme_available_path(struct nvme_ns_head * head)276*4882a593Smuzhiyun static bool nvme_available_path(struct nvme_ns_head *head)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun 	struct nvme_ns *ns;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	list_for_each_entry_rcu(ns, &head->list, siblings) {
281*4882a593Smuzhiyun 		switch (ns->ctrl->state) {
282*4882a593Smuzhiyun 		case NVME_CTRL_LIVE:
283*4882a593Smuzhiyun 		case NVME_CTRL_RESETTING:
284*4882a593Smuzhiyun 		case NVME_CTRL_CONNECTING:
285*4882a593Smuzhiyun 			/* fallthru */
286*4882a593Smuzhiyun 			return true;
287*4882a593Smuzhiyun 		default:
288*4882a593Smuzhiyun 			break;
289*4882a593Smuzhiyun 		}
290*4882a593Smuzhiyun 	}
291*4882a593Smuzhiyun 	return false;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
nvme_ns_head_submit_bio(struct bio * bio)294*4882a593Smuzhiyun blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	struct nvme_ns_head *head = bio->bi_disk->private_data;
297*4882a593Smuzhiyun 	struct device *dev = disk_to_dev(head->disk);
298*4882a593Smuzhiyun 	struct nvme_ns *ns;
299*4882a593Smuzhiyun 	blk_qc_t ret = BLK_QC_T_NONE;
300*4882a593Smuzhiyun 	int srcu_idx;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	/*
303*4882a593Smuzhiyun 	 * The namespace might be going away and the bio might be moved to a
304*4882a593Smuzhiyun 	 * different queue via blk_steal_bios(), so we need to use the bio_split
305*4882a593Smuzhiyun 	 * pool from the original queue to allocate the bvecs from.
306*4882a593Smuzhiyun 	 */
307*4882a593Smuzhiyun 	blk_queue_split(&bio);
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	srcu_idx = srcu_read_lock(&head->srcu);
310*4882a593Smuzhiyun 	ns = nvme_find_path(head);
311*4882a593Smuzhiyun 	if (likely(ns)) {
312*4882a593Smuzhiyun 		bio->bi_disk = ns->disk;
313*4882a593Smuzhiyun 		bio->bi_opf |= REQ_NVME_MPATH;
314*4882a593Smuzhiyun 		trace_block_bio_remap(bio->bi_disk->queue, bio,
315*4882a593Smuzhiyun 				      disk_devt(ns->head->disk),
316*4882a593Smuzhiyun 				      bio->bi_iter.bi_sector);
317*4882a593Smuzhiyun 		ret = submit_bio_noacct(bio);
318*4882a593Smuzhiyun 	} else if (nvme_available_path(head)) {
319*4882a593Smuzhiyun 		dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 		spin_lock_irq(&head->requeue_lock);
322*4882a593Smuzhiyun 		bio_list_add(&head->requeue_list, bio);
323*4882a593Smuzhiyun 		spin_unlock_irq(&head->requeue_lock);
324*4882a593Smuzhiyun 	} else {
325*4882a593Smuzhiyun 		dev_warn_ratelimited(dev, "no available path - failing I/O\n");
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 		bio->bi_status = BLK_STS_IOERR;
328*4882a593Smuzhiyun 		bio_endio(bio);
329*4882a593Smuzhiyun 	}
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	srcu_read_unlock(&head->srcu, srcu_idx);
332*4882a593Smuzhiyun 	return ret;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun 
nvme_requeue_work(struct work_struct * work)335*4882a593Smuzhiyun static void nvme_requeue_work(struct work_struct *work)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun 	struct nvme_ns_head *head =
338*4882a593Smuzhiyun 		container_of(work, struct nvme_ns_head, requeue_work);
339*4882a593Smuzhiyun 	struct bio *bio, *next;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	spin_lock_irq(&head->requeue_lock);
342*4882a593Smuzhiyun 	next = bio_list_get(&head->requeue_list);
343*4882a593Smuzhiyun 	spin_unlock_irq(&head->requeue_lock);
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	while ((bio = next) != NULL) {
346*4882a593Smuzhiyun 		next = bio->bi_next;
347*4882a593Smuzhiyun 		bio->bi_next = NULL;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 		/*
350*4882a593Smuzhiyun 		 * Reset disk to the mpath node and resubmit to select a new
351*4882a593Smuzhiyun 		 * path.
352*4882a593Smuzhiyun 		 */
353*4882a593Smuzhiyun 		bio->bi_disk = head->disk;
354*4882a593Smuzhiyun 		submit_bio_noacct(bio);
355*4882a593Smuzhiyun 	}
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun 
nvme_mpath_alloc_disk(struct nvme_ctrl * ctrl,struct nvme_ns_head * head)358*4882a593Smuzhiyun int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun 	struct request_queue *q;
361*4882a593Smuzhiyun 	bool vwc = false;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	mutex_init(&head->lock);
364*4882a593Smuzhiyun 	bio_list_init(&head->requeue_list);
365*4882a593Smuzhiyun 	spin_lock_init(&head->requeue_lock);
366*4882a593Smuzhiyun 	INIT_WORK(&head->requeue_work, nvme_requeue_work);
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	/*
369*4882a593Smuzhiyun 	 * Add a multipath node if the subsystems supports multiple controllers.
370*4882a593Smuzhiyun 	 * We also do this for private namespaces as the namespace sharing data could
371*4882a593Smuzhiyun 	 * change after a rescan.
372*4882a593Smuzhiyun 	 */
373*4882a593Smuzhiyun 	if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || !multipath)
374*4882a593Smuzhiyun 		return 0;
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	q = blk_alloc_queue(ctrl->numa_node);
377*4882a593Smuzhiyun 	if (!q)
378*4882a593Smuzhiyun 		goto out;
379*4882a593Smuzhiyun 	blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
380*4882a593Smuzhiyun 	/* set to a default value for 512 until disk is validated */
381*4882a593Smuzhiyun 	blk_queue_logical_block_size(q, 512);
382*4882a593Smuzhiyun 	blk_set_stacking_limits(&q->limits);
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	/* we need to propagate up the VMC settings */
385*4882a593Smuzhiyun 	if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
386*4882a593Smuzhiyun 		vwc = true;
387*4882a593Smuzhiyun 	blk_queue_write_cache(q, vwc, vwc);
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	head->disk = alloc_disk(0);
390*4882a593Smuzhiyun 	if (!head->disk)
391*4882a593Smuzhiyun 		goto out_cleanup_queue;
392*4882a593Smuzhiyun 	head->disk->fops = &nvme_ns_head_ops;
393*4882a593Smuzhiyun 	head->disk->private_data = head;
394*4882a593Smuzhiyun 	head->disk->queue = q;
395*4882a593Smuzhiyun 	head->disk->flags = GENHD_FL_EXT_DEVT;
396*4882a593Smuzhiyun 	sprintf(head->disk->disk_name, "nvme%dn%d",
397*4882a593Smuzhiyun 			ctrl->subsys->instance, head->instance);
398*4882a593Smuzhiyun 	return 0;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun out_cleanup_queue:
401*4882a593Smuzhiyun 	blk_cleanup_queue(q);
402*4882a593Smuzhiyun out:
403*4882a593Smuzhiyun 	return -ENOMEM;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun 
nvme_mpath_set_live(struct nvme_ns * ns)406*4882a593Smuzhiyun static void nvme_mpath_set_live(struct nvme_ns *ns)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun 	struct nvme_ns_head *head = ns->head;
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	if (!head->disk)
411*4882a593Smuzhiyun 		return;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags))
414*4882a593Smuzhiyun 		device_add_disk(&head->subsys->dev, head->disk,
415*4882a593Smuzhiyun 				nvme_ns_id_attr_groups);
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	mutex_lock(&head->lock);
418*4882a593Smuzhiyun 	if (nvme_path_is_optimized(ns)) {
419*4882a593Smuzhiyun 		int node, srcu_idx;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 		srcu_idx = srcu_read_lock(&head->srcu);
422*4882a593Smuzhiyun 		for_each_node(node)
423*4882a593Smuzhiyun 			__nvme_find_path(head, node);
424*4882a593Smuzhiyun 		srcu_read_unlock(&head->srcu, srcu_idx);
425*4882a593Smuzhiyun 	}
426*4882a593Smuzhiyun 	mutex_unlock(&head->lock);
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	synchronize_srcu(&head->srcu);
429*4882a593Smuzhiyun 	kblockd_schedule_work(&head->requeue_work);
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun 
nvme_parse_ana_log(struct nvme_ctrl * ctrl,void * data,int (* cb)(struct nvme_ctrl * ctrl,struct nvme_ana_group_desc *,void *))432*4882a593Smuzhiyun static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
433*4882a593Smuzhiyun 		int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *,
434*4882a593Smuzhiyun 			void *))
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun 	void *base = ctrl->ana_log_buf;
437*4882a593Smuzhiyun 	size_t offset = sizeof(struct nvme_ana_rsp_hdr);
438*4882a593Smuzhiyun 	int error, i;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	lockdep_assert_held(&ctrl->ana_lock);
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) {
443*4882a593Smuzhiyun 		struct nvme_ana_group_desc *desc = base + offset;
444*4882a593Smuzhiyun 		u32 nr_nsids;
445*4882a593Smuzhiyun 		size_t nsid_buf_size;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 		if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
448*4882a593Smuzhiyun 			return -EINVAL;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 		nr_nsids = le32_to_cpu(desc->nnsids);
451*4882a593Smuzhiyun 		nsid_buf_size = nr_nsids * sizeof(__le32);
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 		if (WARN_ON_ONCE(desc->grpid == 0))
454*4882a593Smuzhiyun 			return -EINVAL;
455*4882a593Smuzhiyun 		if (WARN_ON_ONCE(le32_to_cpu(desc->grpid) > ctrl->anagrpmax))
456*4882a593Smuzhiyun 			return -EINVAL;
457*4882a593Smuzhiyun 		if (WARN_ON_ONCE(desc->state == 0))
458*4882a593Smuzhiyun 			return -EINVAL;
459*4882a593Smuzhiyun 		if (WARN_ON_ONCE(desc->state > NVME_ANA_CHANGE))
460*4882a593Smuzhiyun 			return -EINVAL;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 		offset += sizeof(*desc);
463*4882a593Smuzhiyun 		if (WARN_ON_ONCE(offset > ctrl->ana_log_size - nsid_buf_size))
464*4882a593Smuzhiyun 			return -EINVAL;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 		error = cb(ctrl, desc, data);
467*4882a593Smuzhiyun 		if (error)
468*4882a593Smuzhiyun 			return error;
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 		offset += nsid_buf_size;
471*4882a593Smuzhiyun 	}
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	return 0;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun 
nvme_state_is_live(enum nvme_ana_state state)476*4882a593Smuzhiyun static inline bool nvme_state_is_live(enum nvme_ana_state state)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun 	return state == NVME_ANA_OPTIMIZED || state == NVME_ANA_NONOPTIMIZED;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun 
nvme_update_ns_ana_state(struct nvme_ana_group_desc * desc,struct nvme_ns * ns)481*4882a593Smuzhiyun static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
482*4882a593Smuzhiyun 		struct nvme_ns *ns)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun 	ns->ana_grpid = le32_to_cpu(desc->grpid);
485*4882a593Smuzhiyun 	ns->ana_state = desc->state;
486*4882a593Smuzhiyun 	clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
487*4882a593Smuzhiyun 	/*
488*4882a593Smuzhiyun 	 * nvme_mpath_set_live() will trigger I/O to the multipath path device
489*4882a593Smuzhiyun 	 * and in turn to this path device.  However we cannot accept this I/O
490*4882a593Smuzhiyun 	 * if the controller is not live.  This may deadlock if called from
491*4882a593Smuzhiyun 	 * nvme_mpath_init_identify() and the ctrl will never complete
492*4882a593Smuzhiyun 	 * initialization, preventing I/O from completing.  For this case we
493*4882a593Smuzhiyun 	 * will reprocess the ANA log page in nvme_mpath_update() once the
494*4882a593Smuzhiyun 	 * controller is ready.
495*4882a593Smuzhiyun 	 */
496*4882a593Smuzhiyun 	if (nvme_state_is_live(ns->ana_state) &&
497*4882a593Smuzhiyun 	    ns->ctrl->state == NVME_CTRL_LIVE)
498*4882a593Smuzhiyun 		nvme_mpath_set_live(ns);
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun 
nvme_update_ana_state(struct nvme_ctrl * ctrl,struct nvme_ana_group_desc * desc,void * data)501*4882a593Smuzhiyun static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
502*4882a593Smuzhiyun 		struct nvme_ana_group_desc *desc, void *data)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun 	u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0;
505*4882a593Smuzhiyun 	unsigned *nr_change_groups = data;
506*4882a593Smuzhiyun 	struct nvme_ns *ns;
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	dev_dbg(ctrl->device, "ANA group %d: %s.\n",
509*4882a593Smuzhiyun 			le32_to_cpu(desc->grpid),
510*4882a593Smuzhiyun 			nvme_ana_state_names[desc->state]);
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	if (desc->state == NVME_ANA_CHANGE)
513*4882a593Smuzhiyun 		(*nr_change_groups)++;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	if (!nr_nsids)
516*4882a593Smuzhiyun 		return 0;
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	down_read(&ctrl->namespaces_rwsem);
519*4882a593Smuzhiyun 	list_for_each_entry(ns, &ctrl->namespaces, list) {
520*4882a593Smuzhiyun 		unsigned nsid;
521*4882a593Smuzhiyun again:
522*4882a593Smuzhiyun 		nsid = le32_to_cpu(desc->nsids[n]);
523*4882a593Smuzhiyun 		if (ns->head->ns_id < nsid)
524*4882a593Smuzhiyun 			continue;
525*4882a593Smuzhiyun 		if (ns->head->ns_id == nsid)
526*4882a593Smuzhiyun 			nvme_update_ns_ana_state(desc, ns);
527*4882a593Smuzhiyun 		if (++n == nr_nsids)
528*4882a593Smuzhiyun 			break;
529*4882a593Smuzhiyun 		if (ns->head->ns_id > nsid)
530*4882a593Smuzhiyun 			goto again;
531*4882a593Smuzhiyun 	}
532*4882a593Smuzhiyun 	up_read(&ctrl->namespaces_rwsem);
533*4882a593Smuzhiyun 	return 0;
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun 
nvme_read_ana_log(struct nvme_ctrl * ctrl)536*4882a593Smuzhiyun static int nvme_read_ana_log(struct nvme_ctrl *ctrl)
537*4882a593Smuzhiyun {
538*4882a593Smuzhiyun 	u32 nr_change_groups = 0;
539*4882a593Smuzhiyun 	int error;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	mutex_lock(&ctrl->ana_lock);
542*4882a593Smuzhiyun 	error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0, NVME_CSI_NVM,
543*4882a593Smuzhiyun 			ctrl->ana_log_buf, ctrl->ana_log_size, 0);
544*4882a593Smuzhiyun 	if (error) {
545*4882a593Smuzhiyun 		dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
546*4882a593Smuzhiyun 		goto out_unlock;
547*4882a593Smuzhiyun 	}
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	error = nvme_parse_ana_log(ctrl, &nr_change_groups,
550*4882a593Smuzhiyun 			nvme_update_ana_state);
551*4882a593Smuzhiyun 	if (error)
552*4882a593Smuzhiyun 		goto out_unlock;
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	/*
555*4882a593Smuzhiyun 	 * In theory we should have an ANATT timer per group as they might enter
556*4882a593Smuzhiyun 	 * the change state at different times.  But that is a lot of overhead
557*4882a593Smuzhiyun 	 * just to protect against a target that keeps entering new changes
558*4882a593Smuzhiyun 	 * states while never finishing previous ones.  But we'll still
559*4882a593Smuzhiyun 	 * eventually time out once all groups are in change state, so this
560*4882a593Smuzhiyun 	 * isn't a big deal.
561*4882a593Smuzhiyun 	 *
562*4882a593Smuzhiyun 	 * We also double the ANATT value to provide some slack for transports
563*4882a593Smuzhiyun 	 * or AEN processing overhead.
564*4882a593Smuzhiyun 	 */
565*4882a593Smuzhiyun 	if (nr_change_groups)
566*4882a593Smuzhiyun 		mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies);
567*4882a593Smuzhiyun 	else
568*4882a593Smuzhiyun 		del_timer_sync(&ctrl->anatt_timer);
569*4882a593Smuzhiyun out_unlock:
570*4882a593Smuzhiyun 	mutex_unlock(&ctrl->ana_lock);
571*4882a593Smuzhiyun 	return error;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun 
nvme_ana_work(struct work_struct * work)574*4882a593Smuzhiyun static void nvme_ana_work(struct work_struct *work)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun 	struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	if (ctrl->state != NVME_CTRL_LIVE)
579*4882a593Smuzhiyun 		return;
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	nvme_read_ana_log(ctrl);
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun 
nvme_mpath_update(struct nvme_ctrl * ctrl)584*4882a593Smuzhiyun void nvme_mpath_update(struct nvme_ctrl *ctrl)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun 	u32 nr_change_groups = 0;
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	if (!ctrl->ana_log_buf)
589*4882a593Smuzhiyun 		return;
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	mutex_lock(&ctrl->ana_lock);
592*4882a593Smuzhiyun 	nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state);
593*4882a593Smuzhiyun 	mutex_unlock(&ctrl->ana_lock);
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun 
nvme_anatt_timeout(struct timer_list * t)596*4882a593Smuzhiyun static void nvme_anatt_timeout(struct timer_list *t)
597*4882a593Smuzhiyun {
598*4882a593Smuzhiyun 	struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	dev_info(ctrl->device, "ANATT timeout, resetting controller.\n");
601*4882a593Smuzhiyun 	nvme_reset_ctrl(ctrl);
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun 
nvme_mpath_stop(struct nvme_ctrl * ctrl)604*4882a593Smuzhiyun void nvme_mpath_stop(struct nvme_ctrl *ctrl)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun 	if (!nvme_ctrl_use_ana(ctrl))
607*4882a593Smuzhiyun 		return;
608*4882a593Smuzhiyun 	del_timer_sync(&ctrl->anatt_timer);
609*4882a593Smuzhiyun 	cancel_work_sync(&ctrl->ana_work);
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun #define SUBSYS_ATTR_RW(_name, _mode, _show, _store)  \
613*4882a593Smuzhiyun 	struct device_attribute subsys_attr_##_name =	\
614*4882a593Smuzhiyun 		__ATTR(_name, _mode, _show, _store)
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun static const char *nvme_iopolicy_names[] = {
617*4882a593Smuzhiyun 	[NVME_IOPOLICY_NUMA]	= "numa",
618*4882a593Smuzhiyun 	[NVME_IOPOLICY_RR]	= "round-robin",
619*4882a593Smuzhiyun };
620*4882a593Smuzhiyun 
nvme_subsys_iopolicy_show(struct device * dev,struct device_attribute * attr,char * buf)621*4882a593Smuzhiyun static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
622*4882a593Smuzhiyun 		struct device_attribute *attr, char *buf)
623*4882a593Smuzhiyun {
624*4882a593Smuzhiyun 	struct nvme_subsystem *subsys =
625*4882a593Smuzhiyun 		container_of(dev, struct nvme_subsystem, dev);
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	return sysfs_emit(buf, "%s\n",
628*4882a593Smuzhiyun 			  nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun 
nvme_subsys_iopolicy_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)631*4882a593Smuzhiyun static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
632*4882a593Smuzhiyun 		struct device_attribute *attr, const char *buf, size_t count)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun 	struct nvme_subsystem *subsys =
635*4882a593Smuzhiyun 		container_of(dev, struct nvme_subsystem, dev);
636*4882a593Smuzhiyun 	int i;
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) {
639*4882a593Smuzhiyun 		if (sysfs_streq(buf, nvme_iopolicy_names[i])) {
640*4882a593Smuzhiyun 			WRITE_ONCE(subsys->iopolicy, i);
641*4882a593Smuzhiyun 			return count;
642*4882a593Smuzhiyun 		}
643*4882a593Smuzhiyun 	}
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	return -EINVAL;
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
648*4882a593Smuzhiyun 		      nvme_subsys_iopolicy_show, nvme_subsys_iopolicy_store);
649*4882a593Smuzhiyun 
ana_grpid_show(struct device * dev,struct device_attribute * attr,char * buf)650*4882a593Smuzhiyun static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
651*4882a593Smuzhiyun 		char *buf)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun 	return sysfs_emit(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun DEVICE_ATTR_RO(ana_grpid);
656*4882a593Smuzhiyun 
ana_state_show(struct device * dev,struct device_attribute * attr,char * buf)657*4882a593Smuzhiyun static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
658*4882a593Smuzhiyun 		char *buf)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun 	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun DEVICE_ATTR_RO(ana_state);
665*4882a593Smuzhiyun 
nvme_lookup_ana_group_desc(struct nvme_ctrl * ctrl,struct nvme_ana_group_desc * desc,void * data)666*4882a593Smuzhiyun static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
667*4882a593Smuzhiyun 		struct nvme_ana_group_desc *desc, void *data)
668*4882a593Smuzhiyun {
669*4882a593Smuzhiyun 	struct nvme_ana_group_desc *dst = data;
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	if (desc->grpid != dst->grpid)
672*4882a593Smuzhiyun 		return 0;
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	*dst = *desc;
675*4882a593Smuzhiyun 	return -ENXIO; /* just break out of the loop */
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun 
nvme_mpath_add_disk(struct nvme_ns * ns,struct nvme_id_ns * id)678*4882a593Smuzhiyun void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun 	if (nvme_ctrl_use_ana(ns->ctrl)) {
681*4882a593Smuzhiyun 		struct nvme_ana_group_desc desc = {
682*4882a593Smuzhiyun 			.grpid = id->anagrpid,
683*4882a593Smuzhiyun 			.state = 0,
684*4882a593Smuzhiyun 		};
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 		mutex_lock(&ns->ctrl->ana_lock);
687*4882a593Smuzhiyun 		ns->ana_grpid = le32_to_cpu(id->anagrpid);
688*4882a593Smuzhiyun 		nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
689*4882a593Smuzhiyun 		mutex_unlock(&ns->ctrl->ana_lock);
690*4882a593Smuzhiyun 		if (desc.state) {
691*4882a593Smuzhiyun 			/* found the group desc: update */
692*4882a593Smuzhiyun 			nvme_update_ns_ana_state(&desc, ns);
693*4882a593Smuzhiyun 		} else {
694*4882a593Smuzhiyun 			/* group desc not found: trigger a re-read */
695*4882a593Smuzhiyun 			set_bit(NVME_NS_ANA_PENDING, &ns->flags);
696*4882a593Smuzhiyun 			queue_work(nvme_wq, &ns->ctrl->ana_work);
697*4882a593Smuzhiyun 		}
698*4882a593Smuzhiyun 	} else {
699*4882a593Smuzhiyun 		ns->ana_state = NVME_ANA_OPTIMIZED;
700*4882a593Smuzhiyun 		nvme_mpath_set_live(ns);
701*4882a593Smuzhiyun 	}
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	if (blk_queue_stable_writes(ns->queue) && ns->head->disk)
704*4882a593Smuzhiyun 		blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
705*4882a593Smuzhiyun 				   ns->head->disk->queue);
706*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_ZONED
707*4882a593Smuzhiyun 	if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
708*4882a593Smuzhiyun 		ns->head->disk->queue->nr_zones = ns->queue->nr_zones;
709*4882a593Smuzhiyun #endif
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun 
nvme_mpath_remove_disk(struct nvme_ns_head * head)712*4882a593Smuzhiyun void nvme_mpath_remove_disk(struct nvme_ns_head *head)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun 	if (!head->disk)
715*4882a593Smuzhiyun 		return;
716*4882a593Smuzhiyun 	if (head->disk->flags & GENHD_FL_UP)
717*4882a593Smuzhiyun 		del_gendisk(head->disk);
718*4882a593Smuzhiyun 	blk_set_queue_dying(head->disk->queue);
719*4882a593Smuzhiyun 	/* make sure all pending bios are cleaned up */
720*4882a593Smuzhiyun 	kblockd_schedule_work(&head->requeue_work);
721*4882a593Smuzhiyun 	flush_work(&head->requeue_work);
722*4882a593Smuzhiyun 	blk_cleanup_queue(head->disk->queue);
723*4882a593Smuzhiyun 	if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
724*4882a593Smuzhiyun 		/*
725*4882a593Smuzhiyun 		 * if device_add_disk wasn't called, prevent
726*4882a593Smuzhiyun 		 * disk release to put a bogus reference on the
727*4882a593Smuzhiyun 		 * request queue
728*4882a593Smuzhiyun 		 */
729*4882a593Smuzhiyun 		head->disk->queue = NULL;
730*4882a593Smuzhiyun 	}
731*4882a593Smuzhiyun 	put_disk(head->disk);
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun 
nvme_mpath_init_ctrl(struct nvme_ctrl * ctrl)734*4882a593Smuzhiyun void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
735*4882a593Smuzhiyun {
736*4882a593Smuzhiyun 	mutex_init(&ctrl->ana_lock);
737*4882a593Smuzhiyun 	timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
738*4882a593Smuzhiyun 	INIT_WORK(&ctrl->ana_work, nvme_ana_work);
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun 
nvme_mpath_init_identify(struct nvme_ctrl * ctrl,struct nvme_id_ctrl * id)741*4882a593Smuzhiyun int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
742*4882a593Smuzhiyun {
743*4882a593Smuzhiyun 	size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT;
744*4882a593Smuzhiyun 	size_t ana_log_size;
745*4882a593Smuzhiyun 	int error = 0;
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 	/* check if multipath is enabled and we have the capability */
748*4882a593Smuzhiyun 	if (!multipath || !ctrl->subsys ||
749*4882a593Smuzhiyun 	    !(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA))
750*4882a593Smuzhiyun 		return 0;
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 	ctrl->anacap = id->anacap;
753*4882a593Smuzhiyun 	ctrl->anatt = id->anatt;
754*4882a593Smuzhiyun 	ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
755*4882a593Smuzhiyun 	ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
758*4882a593Smuzhiyun 		ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc) +
759*4882a593Smuzhiyun 		ctrl->max_namespaces * sizeof(__le32);
760*4882a593Smuzhiyun 	if (ana_log_size > max_transfer_size) {
761*4882a593Smuzhiyun 		dev_err(ctrl->device,
762*4882a593Smuzhiyun 			"ANA log page size (%zd) larger than MDTS (%zd).\n",
763*4882a593Smuzhiyun 			ana_log_size, max_transfer_size);
764*4882a593Smuzhiyun 		dev_err(ctrl->device, "disabling ANA support.\n");
765*4882a593Smuzhiyun 		goto out_uninit;
766*4882a593Smuzhiyun 	}
767*4882a593Smuzhiyun 	if (ana_log_size > ctrl->ana_log_size) {
768*4882a593Smuzhiyun 		nvme_mpath_stop(ctrl);
769*4882a593Smuzhiyun 		kfree(ctrl->ana_log_buf);
770*4882a593Smuzhiyun 		ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL);
771*4882a593Smuzhiyun 		if (!ctrl->ana_log_buf)
772*4882a593Smuzhiyun 			return -ENOMEM;
773*4882a593Smuzhiyun 	}
774*4882a593Smuzhiyun 	ctrl->ana_log_size = ana_log_size;
775*4882a593Smuzhiyun 	error = nvme_read_ana_log(ctrl);
776*4882a593Smuzhiyun 	if (error)
777*4882a593Smuzhiyun 		goto out_uninit;
778*4882a593Smuzhiyun 	return 0;
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun out_uninit:
781*4882a593Smuzhiyun 	nvme_mpath_uninit(ctrl);
782*4882a593Smuzhiyun 	return error;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun 
nvme_mpath_uninit(struct nvme_ctrl * ctrl)785*4882a593Smuzhiyun void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
786*4882a593Smuzhiyun {
787*4882a593Smuzhiyun 	kfree(ctrl->ana_log_buf);
788*4882a593Smuzhiyun 	ctrl->ana_log_buf = NULL;
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun 
791