xref: /OK3568_Linux_fs/kernel/drivers/nvme/host/tcp.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * NVMe over Fabrics TCP host.
4*4882a593Smuzhiyun  * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7*4882a593Smuzhiyun #include <linux/module.h>
8*4882a593Smuzhiyun #include <linux/init.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/err.h>
11*4882a593Smuzhiyun #include <linux/nvme-tcp.h>
12*4882a593Smuzhiyun #include <net/sock.h>
13*4882a593Smuzhiyun #include <net/tcp.h>
14*4882a593Smuzhiyun #include <linux/blk-mq.h>
15*4882a593Smuzhiyun #include <crypto/hash.h>
16*4882a593Smuzhiyun #include <net/busy_poll.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include "nvme.h"
19*4882a593Smuzhiyun #include "fabrics.h"
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun struct nvme_tcp_queue;
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun /* Define the socket priority to use for connections were it is desirable
24*4882a593Smuzhiyun  * that the NIC consider performing optimized packet processing or filtering.
25*4882a593Smuzhiyun  * A non-zero value being sufficient to indicate general consideration of any
26*4882a593Smuzhiyun  * possible optimization.  Making it a module param allows for alternative
27*4882a593Smuzhiyun  * values that may be unique for some NIC implementations.
28*4882a593Smuzhiyun  */
29*4882a593Smuzhiyun static int so_priority;
30*4882a593Smuzhiyun module_param(so_priority, int, 0644);
31*4882a593Smuzhiyun MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_LOCK_ALLOC
34*4882a593Smuzhiyun /* lockdep can detect a circular dependency of the form
35*4882a593Smuzhiyun  *   sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
36*4882a593Smuzhiyun  * because dependencies are tracked for both nvme-tcp and user contexts. Using
37*4882a593Smuzhiyun  * a separate class prevents lockdep from conflating nvme-tcp socket use with
38*4882a593Smuzhiyun  * user-space socket API use.
39*4882a593Smuzhiyun  */
40*4882a593Smuzhiyun static struct lock_class_key nvme_tcp_sk_key[2];
41*4882a593Smuzhiyun static struct lock_class_key nvme_tcp_slock_key[2];
42*4882a593Smuzhiyun 
nvme_tcp_reclassify_socket(struct socket * sock)43*4882a593Smuzhiyun static void nvme_tcp_reclassify_socket(struct socket *sock)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	struct sock *sk = sock->sk;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
48*4882a593Smuzhiyun 		return;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	switch (sk->sk_family) {
51*4882a593Smuzhiyun 	case AF_INET:
52*4882a593Smuzhiyun 		sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME",
53*4882a593Smuzhiyun 					      &nvme_tcp_slock_key[0],
54*4882a593Smuzhiyun 					      "sk_lock-AF_INET-NVME",
55*4882a593Smuzhiyun 					      &nvme_tcp_sk_key[0]);
56*4882a593Smuzhiyun 		break;
57*4882a593Smuzhiyun 	case AF_INET6:
58*4882a593Smuzhiyun 		sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME",
59*4882a593Smuzhiyun 					      &nvme_tcp_slock_key[1],
60*4882a593Smuzhiyun 					      "sk_lock-AF_INET6-NVME",
61*4882a593Smuzhiyun 					      &nvme_tcp_sk_key[1]);
62*4882a593Smuzhiyun 		break;
63*4882a593Smuzhiyun 	default:
64*4882a593Smuzhiyun 		WARN_ON_ONCE(1);
65*4882a593Smuzhiyun 	}
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun #else
nvme_tcp_reclassify_socket(struct socket * sock)68*4882a593Smuzhiyun static void nvme_tcp_reclassify_socket(struct socket *sock) { }
69*4882a593Smuzhiyun #endif
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun enum nvme_tcp_send_state {
72*4882a593Smuzhiyun 	NVME_TCP_SEND_CMD_PDU = 0,
73*4882a593Smuzhiyun 	NVME_TCP_SEND_H2C_PDU,
74*4882a593Smuzhiyun 	NVME_TCP_SEND_DATA,
75*4882a593Smuzhiyun 	NVME_TCP_SEND_DDGST,
76*4882a593Smuzhiyun };
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun struct nvme_tcp_request {
79*4882a593Smuzhiyun 	struct nvme_request	req;
80*4882a593Smuzhiyun 	void			*pdu;
81*4882a593Smuzhiyun 	struct nvme_tcp_queue	*queue;
82*4882a593Smuzhiyun 	u32			data_len;
83*4882a593Smuzhiyun 	u32			pdu_len;
84*4882a593Smuzhiyun 	u32			pdu_sent;
85*4882a593Smuzhiyun 	u16			ttag;
86*4882a593Smuzhiyun 	struct list_head	entry;
87*4882a593Smuzhiyun 	struct llist_node	lentry;
88*4882a593Smuzhiyun 	__le32			ddgst;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	struct bio		*curr_bio;
91*4882a593Smuzhiyun 	struct iov_iter		iter;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	/* send state */
94*4882a593Smuzhiyun 	size_t			offset;
95*4882a593Smuzhiyun 	size_t			data_sent;
96*4882a593Smuzhiyun 	enum nvme_tcp_send_state state;
97*4882a593Smuzhiyun };
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun enum nvme_tcp_queue_flags {
100*4882a593Smuzhiyun 	NVME_TCP_Q_ALLOCATED	= 0,
101*4882a593Smuzhiyun 	NVME_TCP_Q_LIVE		= 1,
102*4882a593Smuzhiyun 	NVME_TCP_Q_POLLING	= 2,
103*4882a593Smuzhiyun };
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun enum nvme_tcp_recv_state {
106*4882a593Smuzhiyun 	NVME_TCP_RECV_PDU = 0,
107*4882a593Smuzhiyun 	NVME_TCP_RECV_DATA,
108*4882a593Smuzhiyun 	NVME_TCP_RECV_DDGST,
109*4882a593Smuzhiyun };
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun struct nvme_tcp_ctrl;
112*4882a593Smuzhiyun struct nvme_tcp_queue {
113*4882a593Smuzhiyun 	struct socket		*sock;
114*4882a593Smuzhiyun 	struct work_struct	io_work;
115*4882a593Smuzhiyun 	int			io_cpu;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	struct mutex		queue_lock;
118*4882a593Smuzhiyun 	struct mutex		send_mutex;
119*4882a593Smuzhiyun 	struct llist_head	req_list;
120*4882a593Smuzhiyun 	struct list_head	send_list;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	/* recv state */
123*4882a593Smuzhiyun 	void			*pdu;
124*4882a593Smuzhiyun 	int			pdu_remaining;
125*4882a593Smuzhiyun 	int			pdu_offset;
126*4882a593Smuzhiyun 	size_t			data_remaining;
127*4882a593Smuzhiyun 	size_t			ddgst_remaining;
128*4882a593Smuzhiyun 	unsigned int		nr_cqe;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	/* send state */
131*4882a593Smuzhiyun 	struct nvme_tcp_request *request;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	int			queue_size;
134*4882a593Smuzhiyun 	size_t			cmnd_capsule_len;
135*4882a593Smuzhiyun 	struct nvme_tcp_ctrl	*ctrl;
136*4882a593Smuzhiyun 	unsigned long		flags;
137*4882a593Smuzhiyun 	bool			rd_enabled;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	bool			hdr_digest;
140*4882a593Smuzhiyun 	bool			data_digest;
141*4882a593Smuzhiyun 	struct ahash_request	*rcv_hash;
142*4882a593Smuzhiyun 	struct ahash_request	*snd_hash;
143*4882a593Smuzhiyun 	__le32			exp_ddgst;
144*4882a593Smuzhiyun 	__le32			recv_ddgst;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	struct page_frag_cache	pf_cache;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	void (*state_change)(struct sock *);
149*4882a593Smuzhiyun 	void (*data_ready)(struct sock *);
150*4882a593Smuzhiyun 	void (*write_space)(struct sock *);
151*4882a593Smuzhiyun };
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun struct nvme_tcp_ctrl {
154*4882a593Smuzhiyun 	/* read only in the hot path */
155*4882a593Smuzhiyun 	struct nvme_tcp_queue	*queues;
156*4882a593Smuzhiyun 	struct blk_mq_tag_set	tag_set;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	/* other member variables */
159*4882a593Smuzhiyun 	struct list_head	list;
160*4882a593Smuzhiyun 	struct blk_mq_tag_set	admin_tag_set;
161*4882a593Smuzhiyun 	struct sockaddr_storage addr;
162*4882a593Smuzhiyun 	struct sockaddr_storage src_addr;
163*4882a593Smuzhiyun 	struct nvme_ctrl	ctrl;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	struct work_struct	err_work;
166*4882a593Smuzhiyun 	struct delayed_work	connect_work;
167*4882a593Smuzhiyun 	struct nvme_tcp_request async_req;
168*4882a593Smuzhiyun 	u32			io_queues[HCTX_MAX_TYPES];
169*4882a593Smuzhiyun };
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun static LIST_HEAD(nvme_tcp_ctrl_list);
172*4882a593Smuzhiyun static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
173*4882a593Smuzhiyun static struct workqueue_struct *nvme_tcp_wq;
174*4882a593Smuzhiyun static const struct blk_mq_ops nvme_tcp_mq_ops;
175*4882a593Smuzhiyun static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
176*4882a593Smuzhiyun static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
177*4882a593Smuzhiyun 
to_tcp_ctrl(struct nvme_ctrl * ctrl)178*4882a593Smuzhiyun static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun 	return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun 
nvme_tcp_queue_id(struct nvme_tcp_queue * queue)183*4882a593Smuzhiyun static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	return queue - queue->ctrl->queues;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun 
nvme_tcp_tagset(struct nvme_tcp_queue * queue)188*4882a593Smuzhiyun static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	u32 queue_idx = nvme_tcp_queue_id(queue);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	if (queue_idx == 0)
193*4882a593Smuzhiyun 		return queue->ctrl->admin_tag_set.tags[queue_idx];
194*4882a593Smuzhiyun 	return queue->ctrl->tag_set.tags[queue_idx - 1];
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun 
nvme_tcp_hdgst_len(struct nvme_tcp_queue * queue)197*4882a593Smuzhiyun static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun 	return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun 
nvme_tcp_ddgst_len(struct nvme_tcp_queue * queue)202*4882a593Smuzhiyun static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun 	return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun 
nvme_tcp_inline_data_size(struct nvme_tcp_queue * queue)207*4882a593Smuzhiyun static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun 	return queue->cmnd_capsule_len - sizeof(struct nvme_command);
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
nvme_tcp_async_req(struct nvme_tcp_request * req)212*4882a593Smuzhiyun static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	return req == &req->queue->ctrl->async_req;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
nvme_tcp_has_inline_data(struct nvme_tcp_request * req)217*4882a593Smuzhiyun static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	struct request *rq;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	if (unlikely(nvme_tcp_async_req(req)))
222*4882a593Smuzhiyun 		return false; /* async events don't have a request */
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	rq = blk_mq_rq_from_pdu(req);
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	return rq_data_dir(rq) == WRITE && req->data_len &&
227*4882a593Smuzhiyun 		req->data_len <= nvme_tcp_inline_data_size(req->queue);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun 
nvme_tcp_req_cur_page(struct nvme_tcp_request * req)230*4882a593Smuzhiyun static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun 	return req->iter.bvec->bv_page;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun 
nvme_tcp_req_cur_offset(struct nvme_tcp_request * req)235*4882a593Smuzhiyun static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	return req->iter.bvec->bv_offset + req->iter.iov_offset;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
nvme_tcp_req_cur_length(struct nvme_tcp_request * req)240*4882a593Smuzhiyun static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun 	return min_t(size_t, iov_iter_single_seg_count(&req->iter),
243*4882a593Smuzhiyun 			req->pdu_len - req->pdu_sent);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun 
nvme_tcp_req_offset(struct nvme_tcp_request * req)246*4882a593Smuzhiyun static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	return req->iter.iov_offset;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
nvme_tcp_pdu_data_left(struct nvme_tcp_request * req)251*4882a593Smuzhiyun static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
254*4882a593Smuzhiyun 			req->pdu_len - req->pdu_sent : 0;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
nvme_tcp_pdu_last_send(struct nvme_tcp_request * req,int len)257*4882a593Smuzhiyun static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
258*4882a593Smuzhiyun 		int len)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	return nvme_tcp_pdu_data_left(req) <= len;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun 
nvme_tcp_init_iter(struct nvme_tcp_request * req,unsigned int dir)263*4882a593Smuzhiyun static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
264*4882a593Smuzhiyun 		unsigned int dir)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	struct request *rq = blk_mq_rq_from_pdu(req);
267*4882a593Smuzhiyun 	struct bio_vec *vec;
268*4882a593Smuzhiyun 	unsigned int size;
269*4882a593Smuzhiyun 	int nsegs;
270*4882a593Smuzhiyun 	size_t offset;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
273*4882a593Smuzhiyun 		vec = &rq->special_vec;
274*4882a593Smuzhiyun 		nsegs = 1;
275*4882a593Smuzhiyun 		size = blk_rq_payload_bytes(rq);
276*4882a593Smuzhiyun 		offset = 0;
277*4882a593Smuzhiyun 	} else {
278*4882a593Smuzhiyun 		struct bio *bio = req->curr_bio;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 		vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
281*4882a593Smuzhiyun 		nsegs = bio_segments(bio);
282*4882a593Smuzhiyun 		size = bio->bi_iter.bi_size;
283*4882a593Smuzhiyun 		offset = bio->bi_iter.bi_bvec_done;
284*4882a593Smuzhiyun 	}
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
287*4882a593Smuzhiyun 	req->iter.iov_offset = offset;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
nvme_tcp_advance_req(struct nvme_tcp_request * req,int len)290*4882a593Smuzhiyun static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
291*4882a593Smuzhiyun 		int len)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun 	req->data_sent += len;
294*4882a593Smuzhiyun 	req->pdu_sent += len;
295*4882a593Smuzhiyun 	iov_iter_advance(&req->iter, len);
296*4882a593Smuzhiyun 	if (!iov_iter_count(&req->iter) &&
297*4882a593Smuzhiyun 	    req->data_sent < req->data_len) {
298*4882a593Smuzhiyun 		req->curr_bio = req->curr_bio->bi_next;
299*4882a593Smuzhiyun 		nvme_tcp_init_iter(req, WRITE);
300*4882a593Smuzhiyun 	}
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun 
nvme_tcp_send_all(struct nvme_tcp_queue * queue)303*4882a593Smuzhiyun static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun 	int ret;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	/* drain the send queue as much as we can... */
308*4882a593Smuzhiyun 	do {
309*4882a593Smuzhiyun 		ret = nvme_tcp_try_send(queue);
310*4882a593Smuzhiyun 	} while (ret > 0);
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun 
nvme_tcp_queue_more(struct nvme_tcp_queue * queue)313*4882a593Smuzhiyun static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	return !list_empty(&queue->send_list) ||
316*4882a593Smuzhiyun 		!llist_empty(&queue->req_list);
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun 
nvme_tcp_queue_request(struct nvme_tcp_request * req,bool sync,bool last)319*4882a593Smuzhiyun static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
320*4882a593Smuzhiyun 		bool sync, bool last)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun 	struct nvme_tcp_queue *queue = req->queue;
323*4882a593Smuzhiyun 	bool empty;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	empty = llist_add(&req->lentry, &queue->req_list) &&
326*4882a593Smuzhiyun 		list_empty(&queue->send_list) && !queue->request;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	/*
329*4882a593Smuzhiyun 	 * if we're the first on the send_list and we can try to send
330*4882a593Smuzhiyun 	 * directly, otherwise queue io_work. Also, only do that if we
331*4882a593Smuzhiyun 	 * are on the same cpu, so we don't introduce contention.
332*4882a593Smuzhiyun 	 */
333*4882a593Smuzhiyun 	if (queue->io_cpu == raw_smp_processor_id() &&
334*4882a593Smuzhiyun 	    sync && empty && mutex_trylock(&queue->send_mutex)) {
335*4882a593Smuzhiyun 		nvme_tcp_send_all(queue);
336*4882a593Smuzhiyun 		mutex_unlock(&queue->send_mutex);
337*4882a593Smuzhiyun 	}
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	if (last && nvme_tcp_queue_more(queue))
340*4882a593Smuzhiyun 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun 
nvme_tcp_process_req_list(struct nvme_tcp_queue * queue)343*4882a593Smuzhiyun static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun 	struct nvme_tcp_request *req;
346*4882a593Smuzhiyun 	struct llist_node *node;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	for (node = llist_del_all(&queue->req_list); node; node = node->next) {
349*4882a593Smuzhiyun 		req = llist_entry(node, struct nvme_tcp_request, lentry);
350*4882a593Smuzhiyun 		list_add(&req->entry, &queue->send_list);
351*4882a593Smuzhiyun 	}
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun static inline struct nvme_tcp_request *
nvme_tcp_fetch_request(struct nvme_tcp_queue * queue)355*4882a593Smuzhiyun nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun 	struct nvme_tcp_request *req;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	req = list_first_entry_or_null(&queue->send_list,
360*4882a593Smuzhiyun 			struct nvme_tcp_request, entry);
361*4882a593Smuzhiyun 	if (!req) {
362*4882a593Smuzhiyun 		nvme_tcp_process_req_list(queue);
363*4882a593Smuzhiyun 		req = list_first_entry_or_null(&queue->send_list,
364*4882a593Smuzhiyun 				struct nvme_tcp_request, entry);
365*4882a593Smuzhiyun 		if (unlikely(!req))
366*4882a593Smuzhiyun 			return NULL;
367*4882a593Smuzhiyun 	}
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	list_del(&req->entry);
370*4882a593Smuzhiyun 	return req;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun 
nvme_tcp_ddgst_final(struct ahash_request * hash,__le32 * dgst)373*4882a593Smuzhiyun static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
374*4882a593Smuzhiyun 		__le32 *dgst)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun 	ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
377*4882a593Smuzhiyun 	crypto_ahash_final(hash);
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun 
nvme_tcp_ddgst_update(struct ahash_request * hash,struct page * page,off_t off,size_t len)380*4882a593Smuzhiyun static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
381*4882a593Smuzhiyun 		struct page *page, off_t off, size_t len)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun 	struct scatterlist sg;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	sg_init_marker(&sg, 1);
386*4882a593Smuzhiyun 	sg_set_page(&sg, page, len, off);
387*4882a593Smuzhiyun 	ahash_request_set_crypt(hash, &sg, NULL, len);
388*4882a593Smuzhiyun 	crypto_ahash_update(hash);
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun 
nvme_tcp_hdgst(struct ahash_request * hash,void * pdu,size_t len)391*4882a593Smuzhiyun static inline void nvme_tcp_hdgst(struct ahash_request *hash,
392*4882a593Smuzhiyun 		void *pdu, size_t len)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun 	struct scatterlist sg;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	sg_init_one(&sg, pdu, len);
397*4882a593Smuzhiyun 	ahash_request_set_crypt(hash, &sg, pdu + len, len);
398*4882a593Smuzhiyun 	crypto_ahash_digest(hash);
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun 
nvme_tcp_verify_hdgst(struct nvme_tcp_queue * queue,void * pdu,size_t pdu_len)401*4882a593Smuzhiyun static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
402*4882a593Smuzhiyun 		void *pdu, size_t pdu_len)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun 	struct nvme_tcp_hdr *hdr = pdu;
405*4882a593Smuzhiyun 	__le32 recv_digest;
406*4882a593Smuzhiyun 	__le32 exp_digest;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
409*4882a593Smuzhiyun 		dev_err(queue->ctrl->ctrl.device,
410*4882a593Smuzhiyun 			"queue %d: header digest flag is cleared\n",
411*4882a593Smuzhiyun 			nvme_tcp_queue_id(queue));
412*4882a593Smuzhiyun 		return -EPROTO;
413*4882a593Smuzhiyun 	}
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	recv_digest = *(__le32 *)(pdu + hdr->hlen);
416*4882a593Smuzhiyun 	nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
417*4882a593Smuzhiyun 	exp_digest = *(__le32 *)(pdu + hdr->hlen);
418*4882a593Smuzhiyun 	if (recv_digest != exp_digest) {
419*4882a593Smuzhiyun 		dev_err(queue->ctrl->ctrl.device,
420*4882a593Smuzhiyun 			"header digest error: recv %#x expected %#x\n",
421*4882a593Smuzhiyun 			le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
422*4882a593Smuzhiyun 		return -EIO;
423*4882a593Smuzhiyun 	}
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	return 0;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun 
nvme_tcp_check_ddgst(struct nvme_tcp_queue * queue,void * pdu)428*4882a593Smuzhiyun static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun 	struct nvme_tcp_hdr *hdr = pdu;
431*4882a593Smuzhiyun 	u8 digest_len = nvme_tcp_hdgst_len(queue);
432*4882a593Smuzhiyun 	u32 len;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	len = le32_to_cpu(hdr->plen) - hdr->hlen -
435*4882a593Smuzhiyun 		((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
438*4882a593Smuzhiyun 		dev_err(queue->ctrl->ctrl.device,
439*4882a593Smuzhiyun 			"queue %d: data digest flag is cleared\n",
440*4882a593Smuzhiyun 		nvme_tcp_queue_id(queue));
441*4882a593Smuzhiyun 		return -EPROTO;
442*4882a593Smuzhiyun 	}
443*4882a593Smuzhiyun 	crypto_ahash_init(queue->rcv_hash);
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	return 0;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun 
nvme_tcp_exit_request(struct blk_mq_tag_set * set,struct request * rq,unsigned int hctx_idx)448*4882a593Smuzhiyun static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
449*4882a593Smuzhiyun 		struct request *rq, unsigned int hctx_idx)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	page_frag_free(req->pdu);
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun 
nvme_tcp_init_request(struct blk_mq_tag_set * set,struct request * rq,unsigned int hctx_idx,unsigned int numa_node)456*4882a593Smuzhiyun static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
457*4882a593Smuzhiyun 		struct request *rq, unsigned int hctx_idx,
458*4882a593Smuzhiyun 		unsigned int numa_node)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun 	struct nvme_tcp_ctrl *ctrl = set->driver_data;
461*4882a593Smuzhiyun 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
462*4882a593Smuzhiyun 	int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
463*4882a593Smuzhiyun 	struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
464*4882a593Smuzhiyun 	u8 hdgst = nvme_tcp_hdgst_len(queue);
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	req->pdu = page_frag_alloc(&queue->pf_cache,
467*4882a593Smuzhiyun 		sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
468*4882a593Smuzhiyun 		GFP_KERNEL | __GFP_ZERO);
469*4882a593Smuzhiyun 	if (!req->pdu)
470*4882a593Smuzhiyun 		return -ENOMEM;
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	req->queue = queue;
473*4882a593Smuzhiyun 	nvme_req(rq)->ctrl = &ctrl->ctrl;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	return 0;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun 
nvme_tcp_init_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int hctx_idx)478*4882a593Smuzhiyun static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
479*4882a593Smuzhiyun 		unsigned int hctx_idx)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun 	struct nvme_tcp_ctrl *ctrl = data;
482*4882a593Smuzhiyun 	struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	hctx->driver_data = queue;
485*4882a593Smuzhiyun 	return 0;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun 
nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int hctx_idx)488*4882a593Smuzhiyun static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
489*4882a593Smuzhiyun 		unsigned int hctx_idx)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun 	struct nvme_tcp_ctrl *ctrl = data;
492*4882a593Smuzhiyun 	struct nvme_tcp_queue *queue = &ctrl->queues[0];
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	hctx->driver_data = queue;
495*4882a593Smuzhiyun 	return 0;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun static enum nvme_tcp_recv_state
nvme_tcp_recv_state(struct nvme_tcp_queue * queue)499*4882a593Smuzhiyun nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun 	return  (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
502*4882a593Smuzhiyun 		(queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
503*4882a593Smuzhiyun 		NVME_TCP_RECV_DATA;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun 
nvme_tcp_init_recv_ctx(struct nvme_tcp_queue * queue)506*4882a593Smuzhiyun static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun 	queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
509*4882a593Smuzhiyun 				nvme_tcp_hdgst_len(queue);
510*4882a593Smuzhiyun 	queue->pdu_offset = 0;
511*4882a593Smuzhiyun 	queue->data_remaining = -1;
512*4882a593Smuzhiyun 	queue->ddgst_remaining = 0;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun 
nvme_tcp_error_recovery(struct nvme_ctrl * ctrl)515*4882a593Smuzhiyun static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
518*4882a593Smuzhiyun 		return;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	dev_warn(ctrl->device, "starting error recovery\n");
521*4882a593Smuzhiyun 	queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun 
nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue * queue,struct nvme_completion * cqe)524*4882a593Smuzhiyun static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
525*4882a593Smuzhiyun 		struct nvme_completion *cqe)
526*4882a593Smuzhiyun {
527*4882a593Smuzhiyun 	struct request *rq;
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
530*4882a593Smuzhiyun 	if (!rq) {
531*4882a593Smuzhiyun 		dev_err(queue->ctrl->ctrl.device,
532*4882a593Smuzhiyun 			"got bad cqe.command_id %#x on queue %d\n",
533*4882a593Smuzhiyun 			cqe->command_id, nvme_tcp_queue_id(queue));
534*4882a593Smuzhiyun 		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
535*4882a593Smuzhiyun 		return -EINVAL;
536*4882a593Smuzhiyun 	}
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
539*4882a593Smuzhiyun 		nvme_complete_rq(rq);
540*4882a593Smuzhiyun 	queue->nr_cqe++;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	return 0;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun 
nvme_tcp_handle_c2h_data(struct nvme_tcp_queue * queue,struct nvme_tcp_data_pdu * pdu)545*4882a593Smuzhiyun static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
546*4882a593Smuzhiyun 		struct nvme_tcp_data_pdu *pdu)
547*4882a593Smuzhiyun {
548*4882a593Smuzhiyun 	struct request *rq;
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
551*4882a593Smuzhiyun 	if (!rq) {
552*4882a593Smuzhiyun 		dev_err(queue->ctrl->ctrl.device,
553*4882a593Smuzhiyun 			"got bad c2hdata.command_id %#x on queue %d\n",
554*4882a593Smuzhiyun 			pdu->command_id, nvme_tcp_queue_id(queue));
555*4882a593Smuzhiyun 		return -ENOENT;
556*4882a593Smuzhiyun 	}
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	if (!blk_rq_payload_bytes(rq)) {
559*4882a593Smuzhiyun 		dev_err(queue->ctrl->ctrl.device,
560*4882a593Smuzhiyun 			"queue %d tag %#x unexpected data\n",
561*4882a593Smuzhiyun 			nvme_tcp_queue_id(queue), rq->tag);
562*4882a593Smuzhiyun 		return -EIO;
563*4882a593Smuzhiyun 	}
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	queue->data_remaining = le32_to_cpu(pdu->data_length);
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
568*4882a593Smuzhiyun 	    unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
569*4882a593Smuzhiyun 		dev_err(queue->ctrl->ctrl.device,
570*4882a593Smuzhiyun 			"queue %d tag %#x SUCCESS set but not last PDU\n",
571*4882a593Smuzhiyun 			nvme_tcp_queue_id(queue), rq->tag);
572*4882a593Smuzhiyun 		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
573*4882a593Smuzhiyun 		return -EPROTO;
574*4882a593Smuzhiyun 	}
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	return 0;
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun 
nvme_tcp_handle_comp(struct nvme_tcp_queue * queue,struct nvme_tcp_rsp_pdu * pdu)579*4882a593Smuzhiyun static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
580*4882a593Smuzhiyun 		struct nvme_tcp_rsp_pdu *pdu)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun 	struct nvme_completion *cqe = &pdu->cqe;
583*4882a593Smuzhiyun 	int ret = 0;
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	/*
586*4882a593Smuzhiyun 	 * AEN requests are special as they don't time out and can
587*4882a593Smuzhiyun 	 * survive any kind of queue freeze and often don't respond to
588*4882a593Smuzhiyun 	 * aborts.  We don't even bother to allocate a struct request
589*4882a593Smuzhiyun 	 * for them but rather special case them here.
590*4882a593Smuzhiyun 	 */
591*4882a593Smuzhiyun 	if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
592*4882a593Smuzhiyun 				     cqe->command_id)))
593*4882a593Smuzhiyun 		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
594*4882a593Smuzhiyun 				&cqe->result);
595*4882a593Smuzhiyun 	else
596*4882a593Smuzhiyun 		ret = nvme_tcp_process_nvme_cqe(queue, cqe);
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	return ret;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun 
nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request * req,struct nvme_tcp_r2t_pdu * pdu)601*4882a593Smuzhiyun static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
602*4882a593Smuzhiyun 		struct nvme_tcp_r2t_pdu *pdu)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun 	struct nvme_tcp_data_pdu *data = req->pdu;
605*4882a593Smuzhiyun 	struct nvme_tcp_queue *queue = req->queue;
606*4882a593Smuzhiyun 	struct request *rq = blk_mq_rq_from_pdu(req);
607*4882a593Smuzhiyun 	u8 hdgst = nvme_tcp_hdgst_len(queue);
608*4882a593Smuzhiyun 	u8 ddgst = nvme_tcp_ddgst_len(queue);
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	req->pdu_len = le32_to_cpu(pdu->r2t_length);
611*4882a593Smuzhiyun 	req->pdu_sent = 0;
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	if (unlikely(!req->pdu_len)) {
614*4882a593Smuzhiyun 		dev_err(queue->ctrl->ctrl.device,
615*4882a593Smuzhiyun 			"req %d r2t len is %u, probably a bug...\n",
616*4882a593Smuzhiyun 			rq->tag, req->pdu_len);
617*4882a593Smuzhiyun 		return -EPROTO;
618*4882a593Smuzhiyun 	}
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
621*4882a593Smuzhiyun 		dev_err(queue->ctrl->ctrl.device,
622*4882a593Smuzhiyun 			"req %d r2t len %u exceeded data len %u (%zu sent)\n",
623*4882a593Smuzhiyun 			rq->tag, req->pdu_len, req->data_len,
624*4882a593Smuzhiyun 			req->data_sent);
625*4882a593Smuzhiyun 		return -EPROTO;
626*4882a593Smuzhiyun 	}
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
629*4882a593Smuzhiyun 		dev_err(queue->ctrl->ctrl.device,
630*4882a593Smuzhiyun 			"req %d unexpected r2t offset %u (expected %zu)\n",
631*4882a593Smuzhiyun 			rq->tag, le32_to_cpu(pdu->r2t_offset),
632*4882a593Smuzhiyun 			req->data_sent);
633*4882a593Smuzhiyun 		return -EPROTO;
634*4882a593Smuzhiyun 	}
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	memset(data, 0, sizeof(*data));
637*4882a593Smuzhiyun 	data->hdr.type = nvme_tcp_h2c_data;
638*4882a593Smuzhiyun 	data->hdr.flags = NVME_TCP_F_DATA_LAST;
639*4882a593Smuzhiyun 	if (queue->hdr_digest)
640*4882a593Smuzhiyun 		data->hdr.flags |= NVME_TCP_F_HDGST;
641*4882a593Smuzhiyun 	if (queue->data_digest)
642*4882a593Smuzhiyun 		data->hdr.flags |= NVME_TCP_F_DDGST;
643*4882a593Smuzhiyun 	data->hdr.hlen = sizeof(*data);
644*4882a593Smuzhiyun 	data->hdr.pdo = data->hdr.hlen + hdgst;
645*4882a593Smuzhiyun 	data->hdr.plen =
646*4882a593Smuzhiyun 		cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
647*4882a593Smuzhiyun 	data->ttag = pdu->ttag;
648*4882a593Smuzhiyun 	data->command_id = nvme_cid(rq);
649*4882a593Smuzhiyun 	data->data_offset = pdu->r2t_offset;
650*4882a593Smuzhiyun 	data->data_length = cpu_to_le32(req->pdu_len);
651*4882a593Smuzhiyun 	return 0;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun 
nvme_tcp_handle_r2t(struct nvme_tcp_queue * queue,struct nvme_tcp_r2t_pdu * pdu)654*4882a593Smuzhiyun static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
655*4882a593Smuzhiyun 		struct nvme_tcp_r2t_pdu *pdu)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun 	struct nvme_tcp_request *req;
658*4882a593Smuzhiyun 	struct request *rq;
659*4882a593Smuzhiyun 	int ret;
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 	rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
662*4882a593Smuzhiyun 	if (!rq) {
663*4882a593Smuzhiyun 		dev_err(queue->ctrl->ctrl.device,
664*4882a593Smuzhiyun 			"got bad r2t.command_id %#x on queue %d\n",
665*4882a593Smuzhiyun 			pdu->command_id, nvme_tcp_queue_id(queue));
666*4882a593Smuzhiyun 		return -ENOENT;
667*4882a593Smuzhiyun 	}
668*4882a593Smuzhiyun 	req = blk_mq_rq_to_pdu(rq);
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
671*4882a593Smuzhiyun 	if (unlikely(ret))
672*4882a593Smuzhiyun 		return ret;
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	req->state = NVME_TCP_SEND_H2C_PDU;
675*4882a593Smuzhiyun 	req->offset = 0;
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	nvme_tcp_queue_request(req, false, true);
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	return 0;
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun 
nvme_tcp_recv_pdu(struct nvme_tcp_queue * queue,struct sk_buff * skb,unsigned int * offset,size_t * len)682*4882a593Smuzhiyun static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
683*4882a593Smuzhiyun 		unsigned int *offset, size_t *len)
684*4882a593Smuzhiyun {
685*4882a593Smuzhiyun 	struct nvme_tcp_hdr *hdr;
686*4882a593Smuzhiyun 	char *pdu = queue->pdu;
687*4882a593Smuzhiyun 	size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
688*4882a593Smuzhiyun 	int ret;
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	ret = skb_copy_bits(skb, *offset,
691*4882a593Smuzhiyun 		&pdu[queue->pdu_offset], rcv_len);
692*4882a593Smuzhiyun 	if (unlikely(ret))
693*4882a593Smuzhiyun 		return ret;
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	queue->pdu_remaining -= rcv_len;
696*4882a593Smuzhiyun 	queue->pdu_offset += rcv_len;
697*4882a593Smuzhiyun 	*offset += rcv_len;
698*4882a593Smuzhiyun 	*len -= rcv_len;
699*4882a593Smuzhiyun 	if (queue->pdu_remaining)
700*4882a593Smuzhiyun 		return 0;
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 	hdr = queue->pdu;
703*4882a593Smuzhiyun 	if (queue->hdr_digest) {
704*4882a593Smuzhiyun 		ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
705*4882a593Smuzhiyun 		if (unlikely(ret))
706*4882a593Smuzhiyun 			return ret;
707*4882a593Smuzhiyun 	}
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	if (queue->data_digest) {
711*4882a593Smuzhiyun 		ret = nvme_tcp_check_ddgst(queue, queue->pdu);
712*4882a593Smuzhiyun 		if (unlikely(ret))
713*4882a593Smuzhiyun 			return ret;
714*4882a593Smuzhiyun 	}
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 	switch (hdr->type) {
717*4882a593Smuzhiyun 	case nvme_tcp_c2h_data:
718*4882a593Smuzhiyun 		return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
719*4882a593Smuzhiyun 	case nvme_tcp_rsp:
720*4882a593Smuzhiyun 		nvme_tcp_init_recv_ctx(queue);
721*4882a593Smuzhiyun 		return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
722*4882a593Smuzhiyun 	case nvme_tcp_r2t:
723*4882a593Smuzhiyun 		nvme_tcp_init_recv_ctx(queue);
724*4882a593Smuzhiyun 		return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
725*4882a593Smuzhiyun 	default:
726*4882a593Smuzhiyun 		dev_err(queue->ctrl->ctrl.device,
727*4882a593Smuzhiyun 			"unsupported pdu type (%d)\n", hdr->type);
728*4882a593Smuzhiyun 		return -EINVAL;
729*4882a593Smuzhiyun 	}
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun 
nvme_tcp_end_request(struct request * rq,u16 status)732*4882a593Smuzhiyun static inline void nvme_tcp_end_request(struct request *rq, u16 status)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun 	union nvme_result res = {};
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 	if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
737*4882a593Smuzhiyun 		nvme_complete_rq(rq);
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun 
nvme_tcp_recv_data(struct nvme_tcp_queue * queue,struct sk_buff * skb,unsigned int * offset,size_t * len)740*4882a593Smuzhiyun static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
741*4882a593Smuzhiyun 			      unsigned int *offset, size_t *len)
742*4882a593Smuzhiyun {
743*4882a593Smuzhiyun 	struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
744*4882a593Smuzhiyun 	struct request *rq =
745*4882a593Smuzhiyun 		nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
746*4882a593Smuzhiyun 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	while (true) {
749*4882a593Smuzhiyun 		int recv_len, ret;
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 		recv_len = min_t(size_t, *len, queue->data_remaining);
752*4882a593Smuzhiyun 		if (!recv_len)
753*4882a593Smuzhiyun 			break;
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 		if (!iov_iter_count(&req->iter)) {
756*4882a593Smuzhiyun 			req->curr_bio = req->curr_bio->bi_next;
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 			/*
759*4882a593Smuzhiyun 			 * If we don`t have any bios it means that controller
760*4882a593Smuzhiyun 			 * sent more data than we requested, hence error
761*4882a593Smuzhiyun 			 */
762*4882a593Smuzhiyun 			if (!req->curr_bio) {
763*4882a593Smuzhiyun 				dev_err(queue->ctrl->ctrl.device,
764*4882a593Smuzhiyun 					"queue %d no space in request %#x",
765*4882a593Smuzhiyun 					nvme_tcp_queue_id(queue), rq->tag);
766*4882a593Smuzhiyun 				nvme_tcp_init_recv_ctx(queue);
767*4882a593Smuzhiyun 				return -EIO;
768*4882a593Smuzhiyun 			}
769*4882a593Smuzhiyun 			nvme_tcp_init_iter(req, READ);
770*4882a593Smuzhiyun 		}
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 		/* we can read only from what is left in this bio */
773*4882a593Smuzhiyun 		recv_len = min_t(size_t, recv_len,
774*4882a593Smuzhiyun 				iov_iter_count(&req->iter));
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 		if (queue->data_digest)
777*4882a593Smuzhiyun 			ret = skb_copy_and_hash_datagram_iter(skb, *offset,
778*4882a593Smuzhiyun 				&req->iter, recv_len, queue->rcv_hash);
779*4882a593Smuzhiyun 		else
780*4882a593Smuzhiyun 			ret = skb_copy_datagram_iter(skb, *offset,
781*4882a593Smuzhiyun 					&req->iter, recv_len);
782*4882a593Smuzhiyun 		if (ret) {
783*4882a593Smuzhiyun 			dev_err(queue->ctrl->ctrl.device,
784*4882a593Smuzhiyun 				"queue %d failed to copy request %#x data",
785*4882a593Smuzhiyun 				nvme_tcp_queue_id(queue), rq->tag);
786*4882a593Smuzhiyun 			return ret;
787*4882a593Smuzhiyun 		}
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 		*len -= recv_len;
790*4882a593Smuzhiyun 		*offset += recv_len;
791*4882a593Smuzhiyun 		queue->data_remaining -= recv_len;
792*4882a593Smuzhiyun 	}
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	if (!queue->data_remaining) {
795*4882a593Smuzhiyun 		if (queue->data_digest) {
796*4882a593Smuzhiyun 			nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
797*4882a593Smuzhiyun 			queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
798*4882a593Smuzhiyun 		} else {
799*4882a593Smuzhiyun 			if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
800*4882a593Smuzhiyun 				nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
801*4882a593Smuzhiyun 				queue->nr_cqe++;
802*4882a593Smuzhiyun 			}
803*4882a593Smuzhiyun 			nvme_tcp_init_recv_ctx(queue);
804*4882a593Smuzhiyun 		}
805*4882a593Smuzhiyun 	}
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	return 0;
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun 
nvme_tcp_recv_ddgst(struct nvme_tcp_queue * queue,struct sk_buff * skb,unsigned int * offset,size_t * len)810*4882a593Smuzhiyun static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
811*4882a593Smuzhiyun 		struct sk_buff *skb, unsigned int *offset, size_t *len)
812*4882a593Smuzhiyun {
813*4882a593Smuzhiyun 	struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
814*4882a593Smuzhiyun 	char *ddgst = (char *)&queue->recv_ddgst;
815*4882a593Smuzhiyun 	size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
816*4882a593Smuzhiyun 	off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
817*4882a593Smuzhiyun 	int ret;
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
820*4882a593Smuzhiyun 	if (unlikely(ret))
821*4882a593Smuzhiyun 		return ret;
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	queue->ddgst_remaining -= recv_len;
824*4882a593Smuzhiyun 	*offset += recv_len;
825*4882a593Smuzhiyun 	*len -= recv_len;
826*4882a593Smuzhiyun 	if (queue->ddgst_remaining)
827*4882a593Smuzhiyun 		return 0;
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	if (queue->recv_ddgst != queue->exp_ddgst) {
830*4882a593Smuzhiyun 		dev_err(queue->ctrl->ctrl.device,
831*4882a593Smuzhiyun 			"data digest error: recv %#x expected %#x\n",
832*4882a593Smuzhiyun 			le32_to_cpu(queue->recv_ddgst),
833*4882a593Smuzhiyun 			le32_to_cpu(queue->exp_ddgst));
834*4882a593Smuzhiyun 		return -EIO;
835*4882a593Smuzhiyun 	}
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
838*4882a593Smuzhiyun 		struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
839*4882a593Smuzhiyun 					pdu->command_id);
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 		nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
842*4882a593Smuzhiyun 		queue->nr_cqe++;
843*4882a593Smuzhiyun 	}
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	nvme_tcp_init_recv_ctx(queue);
846*4882a593Smuzhiyun 	return 0;
847*4882a593Smuzhiyun }
848*4882a593Smuzhiyun 
nvme_tcp_recv_skb(read_descriptor_t * desc,struct sk_buff * skb,unsigned int offset,size_t len)849*4882a593Smuzhiyun static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
850*4882a593Smuzhiyun 			     unsigned int offset, size_t len)
851*4882a593Smuzhiyun {
852*4882a593Smuzhiyun 	struct nvme_tcp_queue *queue = desc->arg.data;
853*4882a593Smuzhiyun 	size_t consumed = len;
854*4882a593Smuzhiyun 	int result;
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 	while (len) {
857*4882a593Smuzhiyun 		switch (nvme_tcp_recv_state(queue)) {
858*4882a593Smuzhiyun 		case NVME_TCP_RECV_PDU:
859*4882a593Smuzhiyun 			result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
860*4882a593Smuzhiyun 			break;
861*4882a593Smuzhiyun 		case NVME_TCP_RECV_DATA:
862*4882a593Smuzhiyun 			result = nvme_tcp_recv_data(queue, skb, &offset, &len);
863*4882a593Smuzhiyun 			break;
864*4882a593Smuzhiyun 		case NVME_TCP_RECV_DDGST:
865*4882a593Smuzhiyun 			result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
866*4882a593Smuzhiyun 			break;
867*4882a593Smuzhiyun 		default:
868*4882a593Smuzhiyun 			result = -EFAULT;
869*4882a593Smuzhiyun 		}
870*4882a593Smuzhiyun 		if (result) {
871*4882a593Smuzhiyun 			dev_err(queue->ctrl->ctrl.device,
872*4882a593Smuzhiyun 				"receive failed:  %d\n", result);
873*4882a593Smuzhiyun 			queue->rd_enabled = false;
874*4882a593Smuzhiyun 			nvme_tcp_error_recovery(&queue->ctrl->ctrl);
875*4882a593Smuzhiyun 			return result;
876*4882a593Smuzhiyun 		}
877*4882a593Smuzhiyun 	}
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 	return consumed;
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun 
nvme_tcp_data_ready(struct sock * sk)882*4882a593Smuzhiyun static void nvme_tcp_data_ready(struct sock *sk)
883*4882a593Smuzhiyun {
884*4882a593Smuzhiyun 	struct nvme_tcp_queue *queue;
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 	read_lock_bh(&sk->sk_callback_lock);
887*4882a593Smuzhiyun 	queue = sk->sk_user_data;
888*4882a593Smuzhiyun 	if (likely(queue && queue->rd_enabled) &&
889*4882a593Smuzhiyun 	    !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
890*4882a593Smuzhiyun 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
891*4882a593Smuzhiyun 	read_unlock_bh(&sk->sk_callback_lock);
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun 
nvme_tcp_write_space(struct sock * sk)894*4882a593Smuzhiyun static void nvme_tcp_write_space(struct sock *sk)
895*4882a593Smuzhiyun {
896*4882a593Smuzhiyun 	struct nvme_tcp_queue *queue;
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 	read_lock_bh(&sk->sk_callback_lock);
899*4882a593Smuzhiyun 	queue = sk->sk_user_data;
900*4882a593Smuzhiyun 	if (likely(queue && sk_stream_is_writeable(sk))) {
901*4882a593Smuzhiyun 		clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
902*4882a593Smuzhiyun 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
903*4882a593Smuzhiyun 	}
904*4882a593Smuzhiyun 	read_unlock_bh(&sk->sk_callback_lock);
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun 
nvme_tcp_state_change(struct sock * sk)907*4882a593Smuzhiyun static void nvme_tcp_state_change(struct sock *sk)
908*4882a593Smuzhiyun {
909*4882a593Smuzhiyun 	struct nvme_tcp_queue *queue;
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	read_lock_bh(&sk->sk_callback_lock);
912*4882a593Smuzhiyun 	queue = sk->sk_user_data;
913*4882a593Smuzhiyun 	if (!queue)
914*4882a593Smuzhiyun 		goto done;
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	switch (sk->sk_state) {
917*4882a593Smuzhiyun 	case TCP_CLOSE:
918*4882a593Smuzhiyun 	case TCP_CLOSE_WAIT:
919*4882a593Smuzhiyun 	case TCP_LAST_ACK:
920*4882a593Smuzhiyun 	case TCP_FIN_WAIT1:
921*4882a593Smuzhiyun 	case TCP_FIN_WAIT2:
922*4882a593Smuzhiyun 		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
923*4882a593Smuzhiyun 		break;
924*4882a593Smuzhiyun 	default:
925*4882a593Smuzhiyun 		dev_info(queue->ctrl->ctrl.device,
926*4882a593Smuzhiyun 			"queue %d socket state %d\n",
927*4882a593Smuzhiyun 			nvme_tcp_queue_id(queue), sk->sk_state);
928*4882a593Smuzhiyun 	}
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun 	queue->state_change(sk);
931*4882a593Smuzhiyun done:
932*4882a593Smuzhiyun 	read_unlock_bh(&sk->sk_callback_lock);
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun 
nvme_tcp_done_send_req(struct nvme_tcp_queue * queue)935*4882a593Smuzhiyun static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
936*4882a593Smuzhiyun {
937*4882a593Smuzhiyun 	queue->request = NULL;
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun 
nvme_tcp_fail_request(struct nvme_tcp_request * req)940*4882a593Smuzhiyun static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
941*4882a593Smuzhiyun {
942*4882a593Smuzhiyun 	if (nvme_tcp_async_req(req)) {
943*4882a593Smuzhiyun 		union nvme_result res = {};
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun 		nvme_complete_async_event(&req->queue->ctrl->ctrl,
946*4882a593Smuzhiyun 				cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
947*4882a593Smuzhiyun 	} else {
948*4882a593Smuzhiyun 		nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
949*4882a593Smuzhiyun 				NVME_SC_HOST_PATH_ERROR);
950*4882a593Smuzhiyun 	}
951*4882a593Smuzhiyun }
952*4882a593Smuzhiyun 
nvme_tcp_try_send_data(struct nvme_tcp_request * req)953*4882a593Smuzhiyun static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
954*4882a593Smuzhiyun {
955*4882a593Smuzhiyun 	struct nvme_tcp_queue *queue = req->queue;
956*4882a593Smuzhiyun 	int req_data_len = req->data_len;
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	while (true) {
959*4882a593Smuzhiyun 		struct page *page = nvme_tcp_req_cur_page(req);
960*4882a593Smuzhiyun 		size_t offset = nvme_tcp_req_cur_offset(req);
961*4882a593Smuzhiyun 		size_t len = nvme_tcp_req_cur_length(req);
962*4882a593Smuzhiyun 		bool last = nvme_tcp_pdu_last_send(req, len);
963*4882a593Smuzhiyun 		int req_data_sent = req->data_sent;
964*4882a593Smuzhiyun 		int ret, flags = MSG_DONTWAIT;
965*4882a593Smuzhiyun 
966*4882a593Smuzhiyun 		if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
967*4882a593Smuzhiyun 			flags |= MSG_EOR;
968*4882a593Smuzhiyun 		else
969*4882a593Smuzhiyun 			flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 		if (sendpage_ok(page)) {
972*4882a593Smuzhiyun 			ret = kernel_sendpage(queue->sock, page, offset, len,
973*4882a593Smuzhiyun 					flags);
974*4882a593Smuzhiyun 		} else {
975*4882a593Smuzhiyun 			ret = sock_no_sendpage(queue->sock, page, offset, len,
976*4882a593Smuzhiyun 					flags);
977*4882a593Smuzhiyun 		}
978*4882a593Smuzhiyun 		if (ret <= 0)
979*4882a593Smuzhiyun 			return ret;
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun 		if (queue->data_digest)
982*4882a593Smuzhiyun 			nvme_tcp_ddgst_update(queue->snd_hash, page,
983*4882a593Smuzhiyun 					offset, ret);
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 		/*
986*4882a593Smuzhiyun 		 * update the request iterator except for the last payload send
987*4882a593Smuzhiyun 		 * in the request where we don't want to modify it as we may
988*4882a593Smuzhiyun 		 * compete with the RX path completing the request.
989*4882a593Smuzhiyun 		 */
990*4882a593Smuzhiyun 		if (req_data_sent + ret < req_data_len)
991*4882a593Smuzhiyun 			nvme_tcp_advance_req(req, ret);
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 		/* fully successful last send in current PDU */
994*4882a593Smuzhiyun 		if (last && ret == len) {
995*4882a593Smuzhiyun 			if (queue->data_digest) {
996*4882a593Smuzhiyun 				nvme_tcp_ddgst_final(queue->snd_hash,
997*4882a593Smuzhiyun 					&req->ddgst);
998*4882a593Smuzhiyun 				req->state = NVME_TCP_SEND_DDGST;
999*4882a593Smuzhiyun 				req->offset = 0;
1000*4882a593Smuzhiyun 			} else {
1001*4882a593Smuzhiyun 				nvme_tcp_done_send_req(queue);
1002*4882a593Smuzhiyun 			}
1003*4882a593Smuzhiyun 			return 1;
1004*4882a593Smuzhiyun 		}
1005*4882a593Smuzhiyun 	}
1006*4882a593Smuzhiyun 	return -EAGAIN;
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun 
nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request * req)1009*4882a593Smuzhiyun static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
1010*4882a593Smuzhiyun {
1011*4882a593Smuzhiyun 	struct nvme_tcp_queue *queue = req->queue;
1012*4882a593Smuzhiyun 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
1013*4882a593Smuzhiyun 	bool inline_data = nvme_tcp_has_inline_data(req);
1014*4882a593Smuzhiyun 	u8 hdgst = nvme_tcp_hdgst_len(queue);
1015*4882a593Smuzhiyun 	int len = sizeof(*pdu) + hdgst - req->offset;
1016*4882a593Smuzhiyun 	int flags = MSG_DONTWAIT;
1017*4882a593Smuzhiyun 	int ret;
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 	if (inline_data || nvme_tcp_queue_more(queue))
1020*4882a593Smuzhiyun 		flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
1021*4882a593Smuzhiyun 	else
1022*4882a593Smuzhiyun 		flags |= MSG_EOR;
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun 	if (queue->hdr_digest && !req->offset)
1025*4882a593Smuzhiyun 		nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1028*4882a593Smuzhiyun 			offset_in_page(pdu) + req->offset, len,  flags);
1029*4882a593Smuzhiyun 	if (unlikely(ret <= 0))
1030*4882a593Smuzhiyun 		return ret;
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	len -= ret;
1033*4882a593Smuzhiyun 	if (!len) {
1034*4882a593Smuzhiyun 		if (inline_data) {
1035*4882a593Smuzhiyun 			req->state = NVME_TCP_SEND_DATA;
1036*4882a593Smuzhiyun 			if (queue->data_digest)
1037*4882a593Smuzhiyun 				crypto_ahash_init(queue->snd_hash);
1038*4882a593Smuzhiyun 			nvme_tcp_init_iter(req, WRITE);
1039*4882a593Smuzhiyun 		} else {
1040*4882a593Smuzhiyun 			nvme_tcp_done_send_req(queue);
1041*4882a593Smuzhiyun 		}
1042*4882a593Smuzhiyun 		return 1;
1043*4882a593Smuzhiyun 	}
1044*4882a593Smuzhiyun 	req->offset += ret;
1045*4882a593Smuzhiyun 
1046*4882a593Smuzhiyun 	return -EAGAIN;
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun 
nvme_tcp_try_send_data_pdu(struct nvme_tcp_request * req)1049*4882a593Smuzhiyun static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
1050*4882a593Smuzhiyun {
1051*4882a593Smuzhiyun 	struct nvme_tcp_queue *queue = req->queue;
1052*4882a593Smuzhiyun 	struct nvme_tcp_data_pdu *pdu = req->pdu;
1053*4882a593Smuzhiyun 	u8 hdgst = nvme_tcp_hdgst_len(queue);
1054*4882a593Smuzhiyun 	int len = sizeof(*pdu) - req->offset + hdgst;
1055*4882a593Smuzhiyun 	int ret;
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 	if (queue->hdr_digest && !req->offset)
1058*4882a593Smuzhiyun 		nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun 	ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1061*4882a593Smuzhiyun 			offset_in_page(pdu) + req->offset, len,
1062*4882a593Smuzhiyun 			MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
1063*4882a593Smuzhiyun 	if (unlikely(ret <= 0))
1064*4882a593Smuzhiyun 		return ret;
1065*4882a593Smuzhiyun 
1066*4882a593Smuzhiyun 	len -= ret;
1067*4882a593Smuzhiyun 	if (!len) {
1068*4882a593Smuzhiyun 		req->state = NVME_TCP_SEND_DATA;
1069*4882a593Smuzhiyun 		if (queue->data_digest)
1070*4882a593Smuzhiyun 			crypto_ahash_init(queue->snd_hash);
1071*4882a593Smuzhiyun 		if (!req->data_sent)
1072*4882a593Smuzhiyun 			nvme_tcp_init_iter(req, WRITE);
1073*4882a593Smuzhiyun 		return 1;
1074*4882a593Smuzhiyun 	}
1075*4882a593Smuzhiyun 	req->offset += ret;
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	return -EAGAIN;
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun 
nvme_tcp_try_send_ddgst(struct nvme_tcp_request * req)1080*4882a593Smuzhiyun static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
1081*4882a593Smuzhiyun {
1082*4882a593Smuzhiyun 	struct nvme_tcp_queue *queue = req->queue;
1083*4882a593Smuzhiyun 	size_t offset = req->offset;
1084*4882a593Smuzhiyun 	int ret;
1085*4882a593Smuzhiyun 	struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1086*4882a593Smuzhiyun 	struct kvec iov = {
1087*4882a593Smuzhiyun 		.iov_base = (u8 *)&req->ddgst + req->offset,
1088*4882a593Smuzhiyun 		.iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
1089*4882a593Smuzhiyun 	};
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 	if (nvme_tcp_queue_more(queue))
1092*4882a593Smuzhiyun 		msg.msg_flags |= MSG_MORE;
1093*4882a593Smuzhiyun 	else
1094*4882a593Smuzhiyun 		msg.msg_flags |= MSG_EOR;
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun 	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1097*4882a593Smuzhiyun 	if (unlikely(ret <= 0))
1098*4882a593Smuzhiyun 		return ret;
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 	if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
1101*4882a593Smuzhiyun 		nvme_tcp_done_send_req(queue);
1102*4882a593Smuzhiyun 		return 1;
1103*4882a593Smuzhiyun 	}
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 	req->offset += ret;
1106*4882a593Smuzhiyun 	return -EAGAIN;
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun 
nvme_tcp_try_send(struct nvme_tcp_queue * queue)1109*4882a593Smuzhiyun static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
1110*4882a593Smuzhiyun {
1111*4882a593Smuzhiyun 	struct nvme_tcp_request *req;
1112*4882a593Smuzhiyun 	int ret = 1;
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 	if (!queue->request) {
1115*4882a593Smuzhiyun 		queue->request = nvme_tcp_fetch_request(queue);
1116*4882a593Smuzhiyun 		if (!queue->request)
1117*4882a593Smuzhiyun 			return 0;
1118*4882a593Smuzhiyun 	}
1119*4882a593Smuzhiyun 	req = queue->request;
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	if (req->state == NVME_TCP_SEND_CMD_PDU) {
1122*4882a593Smuzhiyun 		ret = nvme_tcp_try_send_cmd_pdu(req);
1123*4882a593Smuzhiyun 		if (ret <= 0)
1124*4882a593Smuzhiyun 			goto done;
1125*4882a593Smuzhiyun 		if (!nvme_tcp_has_inline_data(req))
1126*4882a593Smuzhiyun 			return ret;
1127*4882a593Smuzhiyun 	}
1128*4882a593Smuzhiyun 
1129*4882a593Smuzhiyun 	if (req->state == NVME_TCP_SEND_H2C_PDU) {
1130*4882a593Smuzhiyun 		ret = nvme_tcp_try_send_data_pdu(req);
1131*4882a593Smuzhiyun 		if (ret <= 0)
1132*4882a593Smuzhiyun 			goto done;
1133*4882a593Smuzhiyun 	}
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun 	if (req->state == NVME_TCP_SEND_DATA) {
1136*4882a593Smuzhiyun 		ret = nvme_tcp_try_send_data(req);
1137*4882a593Smuzhiyun 		if (ret <= 0)
1138*4882a593Smuzhiyun 			goto done;
1139*4882a593Smuzhiyun 	}
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun 	if (req->state == NVME_TCP_SEND_DDGST)
1142*4882a593Smuzhiyun 		ret = nvme_tcp_try_send_ddgst(req);
1143*4882a593Smuzhiyun done:
1144*4882a593Smuzhiyun 	if (ret == -EAGAIN) {
1145*4882a593Smuzhiyun 		ret = 0;
1146*4882a593Smuzhiyun 	} else if (ret < 0) {
1147*4882a593Smuzhiyun 		dev_err(queue->ctrl->ctrl.device,
1148*4882a593Smuzhiyun 			"failed to send request %d\n", ret);
1149*4882a593Smuzhiyun 		nvme_tcp_fail_request(queue->request);
1150*4882a593Smuzhiyun 		nvme_tcp_done_send_req(queue);
1151*4882a593Smuzhiyun 	}
1152*4882a593Smuzhiyun 	return ret;
1153*4882a593Smuzhiyun }
1154*4882a593Smuzhiyun 
nvme_tcp_try_recv(struct nvme_tcp_queue * queue)1155*4882a593Smuzhiyun static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1156*4882a593Smuzhiyun {
1157*4882a593Smuzhiyun 	struct socket *sock = queue->sock;
1158*4882a593Smuzhiyun 	struct sock *sk = sock->sk;
1159*4882a593Smuzhiyun 	read_descriptor_t rd_desc;
1160*4882a593Smuzhiyun 	int consumed;
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 	rd_desc.arg.data = queue;
1163*4882a593Smuzhiyun 	rd_desc.count = 1;
1164*4882a593Smuzhiyun 	lock_sock(sk);
1165*4882a593Smuzhiyun 	queue->nr_cqe = 0;
1166*4882a593Smuzhiyun 	consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
1167*4882a593Smuzhiyun 	release_sock(sk);
1168*4882a593Smuzhiyun 	return consumed;
1169*4882a593Smuzhiyun }
1170*4882a593Smuzhiyun 
nvme_tcp_io_work(struct work_struct * w)1171*4882a593Smuzhiyun static void nvme_tcp_io_work(struct work_struct *w)
1172*4882a593Smuzhiyun {
1173*4882a593Smuzhiyun 	struct nvme_tcp_queue *queue =
1174*4882a593Smuzhiyun 		container_of(w, struct nvme_tcp_queue, io_work);
1175*4882a593Smuzhiyun 	unsigned long deadline = jiffies + msecs_to_jiffies(1);
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun 	do {
1178*4882a593Smuzhiyun 		bool pending = false;
1179*4882a593Smuzhiyun 		int result;
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun 		if (mutex_trylock(&queue->send_mutex)) {
1182*4882a593Smuzhiyun 			result = nvme_tcp_try_send(queue);
1183*4882a593Smuzhiyun 			mutex_unlock(&queue->send_mutex);
1184*4882a593Smuzhiyun 			if (result > 0)
1185*4882a593Smuzhiyun 				pending = true;
1186*4882a593Smuzhiyun 			else if (unlikely(result < 0))
1187*4882a593Smuzhiyun 				break;
1188*4882a593Smuzhiyun 		}
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 		result = nvme_tcp_try_recv(queue);
1191*4882a593Smuzhiyun 		if (result > 0)
1192*4882a593Smuzhiyun 			pending = true;
1193*4882a593Smuzhiyun 		else if (unlikely(result < 0))
1194*4882a593Smuzhiyun 			return;
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 		if (!pending || !queue->rd_enabled)
1197*4882a593Smuzhiyun 			return;
1198*4882a593Smuzhiyun 
1199*4882a593Smuzhiyun 	} while (!time_after(jiffies, deadline)); /* quota is exhausted */
1200*4882a593Smuzhiyun 
1201*4882a593Smuzhiyun 	queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun 
nvme_tcp_free_crypto(struct nvme_tcp_queue * queue)1204*4882a593Smuzhiyun static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1205*4882a593Smuzhiyun {
1206*4882a593Smuzhiyun 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1207*4882a593Smuzhiyun 
1208*4882a593Smuzhiyun 	ahash_request_free(queue->rcv_hash);
1209*4882a593Smuzhiyun 	ahash_request_free(queue->snd_hash);
1210*4882a593Smuzhiyun 	crypto_free_ahash(tfm);
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun 
nvme_tcp_alloc_crypto(struct nvme_tcp_queue * queue)1213*4882a593Smuzhiyun static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1214*4882a593Smuzhiyun {
1215*4882a593Smuzhiyun 	struct crypto_ahash *tfm;
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 	tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1218*4882a593Smuzhiyun 	if (IS_ERR(tfm))
1219*4882a593Smuzhiyun 		return PTR_ERR(tfm);
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 	queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1222*4882a593Smuzhiyun 	if (!queue->snd_hash)
1223*4882a593Smuzhiyun 		goto free_tfm;
1224*4882a593Smuzhiyun 	ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun 	queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1227*4882a593Smuzhiyun 	if (!queue->rcv_hash)
1228*4882a593Smuzhiyun 		goto free_snd_hash;
1229*4882a593Smuzhiyun 	ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun 	return 0;
1232*4882a593Smuzhiyun free_snd_hash:
1233*4882a593Smuzhiyun 	ahash_request_free(queue->snd_hash);
1234*4882a593Smuzhiyun free_tfm:
1235*4882a593Smuzhiyun 	crypto_free_ahash(tfm);
1236*4882a593Smuzhiyun 	return -ENOMEM;
1237*4882a593Smuzhiyun }
1238*4882a593Smuzhiyun 
nvme_tcp_free_async_req(struct nvme_tcp_ctrl * ctrl)1239*4882a593Smuzhiyun static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1240*4882a593Smuzhiyun {
1241*4882a593Smuzhiyun 	struct nvme_tcp_request *async = &ctrl->async_req;
1242*4882a593Smuzhiyun 
1243*4882a593Smuzhiyun 	page_frag_free(async->pdu);
1244*4882a593Smuzhiyun }
1245*4882a593Smuzhiyun 
nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl * ctrl)1246*4882a593Smuzhiyun static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1247*4882a593Smuzhiyun {
1248*4882a593Smuzhiyun 	struct nvme_tcp_queue *queue = &ctrl->queues[0];
1249*4882a593Smuzhiyun 	struct nvme_tcp_request *async = &ctrl->async_req;
1250*4882a593Smuzhiyun 	u8 hdgst = nvme_tcp_hdgst_len(queue);
1251*4882a593Smuzhiyun 
1252*4882a593Smuzhiyun 	async->pdu = page_frag_alloc(&queue->pf_cache,
1253*4882a593Smuzhiyun 		sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1254*4882a593Smuzhiyun 		GFP_KERNEL | __GFP_ZERO);
1255*4882a593Smuzhiyun 	if (!async->pdu)
1256*4882a593Smuzhiyun 		return -ENOMEM;
1257*4882a593Smuzhiyun 
1258*4882a593Smuzhiyun 	async->queue = &ctrl->queues[0];
1259*4882a593Smuzhiyun 	return 0;
1260*4882a593Smuzhiyun }
1261*4882a593Smuzhiyun 
nvme_tcp_free_queue(struct nvme_ctrl * nctrl,int qid)1262*4882a593Smuzhiyun static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1263*4882a593Smuzhiyun {
1264*4882a593Smuzhiyun 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1265*4882a593Smuzhiyun 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1266*4882a593Smuzhiyun 
1267*4882a593Smuzhiyun 	if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1268*4882a593Smuzhiyun 		return;
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun 	if (queue->hdr_digest || queue->data_digest)
1271*4882a593Smuzhiyun 		nvme_tcp_free_crypto(queue);
1272*4882a593Smuzhiyun 
1273*4882a593Smuzhiyun 	sock_release(queue->sock);
1274*4882a593Smuzhiyun 	kfree(queue->pdu);
1275*4882a593Smuzhiyun 	mutex_destroy(&queue->queue_lock);
1276*4882a593Smuzhiyun }
1277*4882a593Smuzhiyun 
nvme_tcp_init_connection(struct nvme_tcp_queue * queue)1278*4882a593Smuzhiyun static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1279*4882a593Smuzhiyun {
1280*4882a593Smuzhiyun 	struct nvme_tcp_icreq_pdu *icreq;
1281*4882a593Smuzhiyun 	struct nvme_tcp_icresp_pdu *icresp;
1282*4882a593Smuzhiyun 	struct msghdr msg = {};
1283*4882a593Smuzhiyun 	struct kvec iov;
1284*4882a593Smuzhiyun 	bool ctrl_hdgst, ctrl_ddgst;
1285*4882a593Smuzhiyun 	int ret;
1286*4882a593Smuzhiyun 
1287*4882a593Smuzhiyun 	icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1288*4882a593Smuzhiyun 	if (!icreq)
1289*4882a593Smuzhiyun 		return -ENOMEM;
1290*4882a593Smuzhiyun 
1291*4882a593Smuzhiyun 	icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1292*4882a593Smuzhiyun 	if (!icresp) {
1293*4882a593Smuzhiyun 		ret = -ENOMEM;
1294*4882a593Smuzhiyun 		goto free_icreq;
1295*4882a593Smuzhiyun 	}
1296*4882a593Smuzhiyun 
1297*4882a593Smuzhiyun 	icreq->hdr.type = nvme_tcp_icreq;
1298*4882a593Smuzhiyun 	icreq->hdr.hlen = sizeof(*icreq);
1299*4882a593Smuzhiyun 	icreq->hdr.pdo = 0;
1300*4882a593Smuzhiyun 	icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1301*4882a593Smuzhiyun 	icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1302*4882a593Smuzhiyun 	icreq->maxr2t = 0; /* single inflight r2t supported */
1303*4882a593Smuzhiyun 	icreq->hpda = 0; /* no alignment constraint */
1304*4882a593Smuzhiyun 	if (queue->hdr_digest)
1305*4882a593Smuzhiyun 		icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1306*4882a593Smuzhiyun 	if (queue->data_digest)
1307*4882a593Smuzhiyun 		icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1308*4882a593Smuzhiyun 
1309*4882a593Smuzhiyun 	iov.iov_base = icreq;
1310*4882a593Smuzhiyun 	iov.iov_len = sizeof(*icreq);
1311*4882a593Smuzhiyun 	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1312*4882a593Smuzhiyun 	if (ret < 0)
1313*4882a593Smuzhiyun 		goto free_icresp;
1314*4882a593Smuzhiyun 
1315*4882a593Smuzhiyun 	memset(&msg, 0, sizeof(msg));
1316*4882a593Smuzhiyun 	iov.iov_base = icresp;
1317*4882a593Smuzhiyun 	iov.iov_len = sizeof(*icresp);
1318*4882a593Smuzhiyun 	ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1319*4882a593Smuzhiyun 			iov.iov_len, msg.msg_flags);
1320*4882a593Smuzhiyun 	if (ret < 0)
1321*4882a593Smuzhiyun 		goto free_icresp;
1322*4882a593Smuzhiyun 
1323*4882a593Smuzhiyun 	ret = -EINVAL;
1324*4882a593Smuzhiyun 	if (icresp->hdr.type != nvme_tcp_icresp) {
1325*4882a593Smuzhiyun 		pr_err("queue %d: bad type returned %d\n",
1326*4882a593Smuzhiyun 			nvme_tcp_queue_id(queue), icresp->hdr.type);
1327*4882a593Smuzhiyun 		goto free_icresp;
1328*4882a593Smuzhiyun 	}
1329*4882a593Smuzhiyun 
1330*4882a593Smuzhiyun 	if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1331*4882a593Smuzhiyun 		pr_err("queue %d: bad pdu length returned %d\n",
1332*4882a593Smuzhiyun 			nvme_tcp_queue_id(queue), icresp->hdr.plen);
1333*4882a593Smuzhiyun 		goto free_icresp;
1334*4882a593Smuzhiyun 	}
1335*4882a593Smuzhiyun 
1336*4882a593Smuzhiyun 	if (icresp->pfv != NVME_TCP_PFV_1_0) {
1337*4882a593Smuzhiyun 		pr_err("queue %d: bad pfv returned %d\n",
1338*4882a593Smuzhiyun 			nvme_tcp_queue_id(queue), icresp->pfv);
1339*4882a593Smuzhiyun 		goto free_icresp;
1340*4882a593Smuzhiyun 	}
1341*4882a593Smuzhiyun 
1342*4882a593Smuzhiyun 	ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1343*4882a593Smuzhiyun 	if ((queue->data_digest && !ctrl_ddgst) ||
1344*4882a593Smuzhiyun 	    (!queue->data_digest && ctrl_ddgst)) {
1345*4882a593Smuzhiyun 		pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1346*4882a593Smuzhiyun 			nvme_tcp_queue_id(queue),
1347*4882a593Smuzhiyun 			queue->data_digest ? "enabled" : "disabled",
1348*4882a593Smuzhiyun 			ctrl_ddgst ? "enabled" : "disabled");
1349*4882a593Smuzhiyun 		goto free_icresp;
1350*4882a593Smuzhiyun 	}
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun 	ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1353*4882a593Smuzhiyun 	if ((queue->hdr_digest && !ctrl_hdgst) ||
1354*4882a593Smuzhiyun 	    (!queue->hdr_digest && ctrl_hdgst)) {
1355*4882a593Smuzhiyun 		pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1356*4882a593Smuzhiyun 			nvme_tcp_queue_id(queue),
1357*4882a593Smuzhiyun 			queue->hdr_digest ? "enabled" : "disabled",
1358*4882a593Smuzhiyun 			ctrl_hdgst ? "enabled" : "disabled");
1359*4882a593Smuzhiyun 		goto free_icresp;
1360*4882a593Smuzhiyun 	}
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun 	if (icresp->cpda != 0) {
1363*4882a593Smuzhiyun 		pr_err("queue %d: unsupported cpda returned %d\n",
1364*4882a593Smuzhiyun 			nvme_tcp_queue_id(queue), icresp->cpda);
1365*4882a593Smuzhiyun 		goto free_icresp;
1366*4882a593Smuzhiyun 	}
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 	ret = 0;
1369*4882a593Smuzhiyun free_icresp:
1370*4882a593Smuzhiyun 	kfree(icresp);
1371*4882a593Smuzhiyun free_icreq:
1372*4882a593Smuzhiyun 	kfree(icreq);
1373*4882a593Smuzhiyun 	return ret;
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun 
nvme_tcp_admin_queue(struct nvme_tcp_queue * queue)1376*4882a593Smuzhiyun static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
1377*4882a593Smuzhiyun {
1378*4882a593Smuzhiyun 	return nvme_tcp_queue_id(queue) == 0;
1379*4882a593Smuzhiyun }
1380*4882a593Smuzhiyun 
nvme_tcp_default_queue(struct nvme_tcp_queue * queue)1381*4882a593Smuzhiyun static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
1382*4882a593Smuzhiyun {
1383*4882a593Smuzhiyun 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1384*4882a593Smuzhiyun 	int qid = nvme_tcp_queue_id(queue);
1385*4882a593Smuzhiyun 
1386*4882a593Smuzhiyun 	return !nvme_tcp_admin_queue(queue) &&
1387*4882a593Smuzhiyun 		qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
1388*4882a593Smuzhiyun }
1389*4882a593Smuzhiyun 
nvme_tcp_read_queue(struct nvme_tcp_queue * queue)1390*4882a593Smuzhiyun static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
1391*4882a593Smuzhiyun {
1392*4882a593Smuzhiyun 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1393*4882a593Smuzhiyun 	int qid = nvme_tcp_queue_id(queue);
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun 	return !nvme_tcp_admin_queue(queue) &&
1396*4882a593Smuzhiyun 		!nvme_tcp_default_queue(queue) &&
1397*4882a593Smuzhiyun 		qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1398*4882a593Smuzhiyun 			  ctrl->io_queues[HCTX_TYPE_READ];
1399*4882a593Smuzhiyun }
1400*4882a593Smuzhiyun 
nvme_tcp_poll_queue(struct nvme_tcp_queue * queue)1401*4882a593Smuzhiyun static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
1402*4882a593Smuzhiyun {
1403*4882a593Smuzhiyun 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1404*4882a593Smuzhiyun 	int qid = nvme_tcp_queue_id(queue);
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 	return !nvme_tcp_admin_queue(queue) &&
1407*4882a593Smuzhiyun 		!nvme_tcp_default_queue(queue) &&
1408*4882a593Smuzhiyun 		!nvme_tcp_read_queue(queue) &&
1409*4882a593Smuzhiyun 		qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1410*4882a593Smuzhiyun 			  ctrl->io_queues[HCTX_TYPE_READ] +
1411*4882a593Smuzhiyun 			  ctrl->io_queues[HCTX_TYPE_POLL];
1412*4882a593Smuzhiyun }
1413*4882a593Smuzhiyun 
nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue * queue)1414*4882a593Smuzhiyun static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
1415*4882a593Smuzhiyun {
1416*4882a593Smuzhiyun 	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1417*4882a593Smuzhiyun 	int qid = nvme_tcp_queue_id(queue);
1418*4882a593Smuzhiyun 	int n = 0;
1419*4882a593Smuzhiyun 
1420*4882a593Smuzhiyun 	if (nvme_tcp_default_queue(queue))
1421*4882a593Smuzhiyun 		n = qid - 1;
1422*4882a593Smuzhiyun 	else if (nvme_tcp_read_queue(queue))
1423*4882a593Smuzhiyun 		n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
1424*4882a593Smuzhiyun 	else if (nvme_tcp_poll_queue(queue))
1425*4882a593Smuzhiyun 		n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
1426*4882a593Smuzhiyun 				ctrl->io_queues[HCTX_TYPE_READ] - 1;
1427*4882a593Smuzhiyun 	queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1428*4882a593Smuzhiyun }
1429*4882a593Smuzhiyun 
nvme_tcp_alloc_queue(struct nvme_ctrl * nctrl,int qid,size_t queue_size)1430*4882a593Smuzhiyun static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1431*4882a593Smuzhiyun 		int qid, size_t queue_size)
1432*4882a593Smuzhiyun {
1433*4882a593Smuzhiyun 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1434*4882a593Smuzhiyun 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1435*4882a593Smuzhiyun 	int ret, rcv_pdu_size;
1436*4882a593Smuzhiyun 
1437*4882a593Smuzhiyun 	mutex_init(&queue->queue_lock);
1438*4882a593Smuzhiyun 	queue->ctrl = ctrl;
1439*4882a593Smuzhiyun 	init_llist_head(&queue->req_list);
1440*4882a593Smuzhiyun 	INIT_LIST_HEAD(&queue->send_list);
1441*4882a593Smuzhiyun 	mutex_init(&queue->send_mutex);
1442*4882a593Smuzhiyun 	INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1443*4882a593Smuzhiyun 	queue->queue_size = queue_size;
1444*4882a593Smuzhiyun 
1445*4882a593Smuzhiyun 	if (qid > 0)
1446*4882a593Smuzhiyun 		queue->cmnd_capsule_len = nctrl->ioccsz * 16;
1447*4882a593Smuzhiyun 	else
1448*4882a593Smuzhiyun 		queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1449*4882a593Smuzhiyun 						NVME_TCP_ADMIN_CCSZ;
1450*4882a593Smuzhiyun 
1451*4882a593Smuzhiyun 	ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1452*4882a593Smuzhiyun 			IPPROTO_TCP, &queue->sock);
1453*4882a593Smuzhiyun 	if (ret) {
1454*4882a593Smuzhiyun 		dev_err(nctrl->device,
1455*4882a593Smuzhiyun 			"failed to create socket: %d\n", ret);
1456*4882a593Smuzhiyun 		goto err_destroy_mutex;
1457*4882a593Smuzhiyun 	}
1458*4882a593Smuzhiyun 
1459*4882a593Smuzhiyun 	nvme_tcp_reclassify_socket(queue->sock);
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun 	/* Single syn retry */
1462*4882a593Smuzhiyun 	tcp_sock_set_syncnt(queue->sock->sk, 1);
1463*4882a593Smuzhiyun 
1464*4882a593Smuzhiyun 	/* Set TCP no delay */
1465*4882a593Smuzhiyun 	tcp_sock_set_nodelay(queue->sock->sk);
1466*4882a593Smuzhiyun 
1467*4882a593Smuzhiyun 	/*
1468*4882a593Smuzhiyun 	 * Cleanup whatever is sitting in the TCP transmit queue on socket
1469*4882a593Smuzhiyun 	 * close. This is done to prevent stale data from being sent should
1470*4882a593Smuzhiyun 	 * the network connection be restored before TCP times out.
1471*4882a593Smuzhiyun 	 */
1472*4882a593Smuzhiyun 	sock_no_linger(queue->sock->sk);
1473*4882a593Smuzhiyun 
1474*4882a593Smuzhiyun 	if (so_priority > 0)
1475*4882a593Smuzhiyun 		sock_set_priority(queue->sock->sk, so_priority);
1476*4882a593Smuzhiyun 
1477*4882a593Smuzhiyun 	/* Set socket type of service */
1478*4882a593Smuzhiyun 	if (nctrl->opts->tos >= 0)
1479*4882a593Smuzhiyun 		ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
1480*4882a593Smuzhiyun 
1481*4882a593Smuzhiyun 	/* Set 10 seconds timeout for icresp recvmsg */
1482*4882a593Smuzhiyun 	queue->sock->sk->sk_rcvtimeo = 10 * HZ;
1483*4882a593Smuzhiyun 
1484*4882a593Smuzhiyun 	queue->sock->sk->sk_allocation = GFP_ATOMIC;
1485*4882a593Smuzhiyun 	nvme_tcp_set_queue_io_cpu(queue);
1486*4882a593Smuzhiyun 	queue->request = NULL;
1487*4882a593Smuzhiyun 	queue->data_remaining = 0;
1488*4882a593Smuzhiyun 	queue->ddgst_remaining = 0;
1489*4882a593Smuzhiyun 	queue->pdu_remaining = 0;
1490*4882a593Smuzhiyun 	queue->pdu_offset = 0;
1491*4882a593Smuzhiyun 	sk_set_memalloc(queue->sock->sk);
1492*4882a593Smuzhiyun 
1493*4882a593Smuzhiyun 	if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
1494*4882a593Smuzhiyun 		ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1495*4882a593Smuzhiyun 			sizeof(ctrl->src_addr));
1496*4882a593Smuzhiyun 		if (ret) {
1497*4882a593Smuzhiyun 			dev_err(nctrl->device,
1498*4882a593Smuzhiyun 				"failed to bind queue %d socket %d\n",
1499*4882a593Smuzhiyun 				qid, ret);
1500*4882a593Smuzhiyun 			goto err_sock;
1501*4882a593Smuzhiyun 		}
1502*4882a593Smuzhiyun 	}
1503*4882a593Smuzhiyun 
1504*4882a593Smuzhiyun 	queue->hdr_digest = nctrl->opts->hdr_digest;
1505*4882a593Smuzhiyun 	queue->data_digest = nctrl->opts->data_digest;
1506*4882a593Smuzhiyun 	if (queue->hdr_digest || queue->data_digest) {
1507*4882a593Smuzhiyun 		ret = nvme_tcp_alloc_crypto(queue);
1508*4882a593Smuzhiyun 		if (ret) {
1509*4882a593Smuzhiyun 			dev_err(nctrl->device,
1510*4882a593Smuzhiyun 				"failed to allocate queue %d crypto\n", qid);
1511*4882a593Smuzhiyun 			goto err_sock;
1512*4882a593Smuzhiyun 		}
1513*4882a593Smuzhiyun 	}
1514*4882a593Smuzhiyun 
1515*4882a593Smuzhiyun 	rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1516*4882a593Smuzhiyun 			nvme_tcp_hdgst_len(queue);
1517*4882a593Smuzhiyun 	queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1518*4882a593Smuzhiyun 	if (!queue->pdu) {
1519*4882a593Smuzhiyun 		ret = -ENOMEM;
1520*4882a593Smuzhiyun 		goto err_crypto;
1521*4882a593Smuzhiyun 	}
1522*4882a593Smuzhiyun 
1523*4882a593Smuzhiyun 	dev_dbg(nctrl->device, "connecting queue %d\n",
1524*4882a593Smuzhiyun 			nvme_tcp_queue_id(queue));
1525*4882a593Smuzhiyun 
1526*4882a593Smuzhiyun 	ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1527*4882a593Smuzhiyun 		sizeof(ctrl->addr), 0);
1528*4882a593Smuzhiyun 	if (ret) {
1529*4882a593Smuzhiyun 		dev_err(nctrl->device,
1530*4882a593Smuzhiyun 			"failed to connect socket: %d\n", ret);
1531*4882a593Smuzhiyun 		goto err_rcv_pdu;
1532*4882a593Smuzhiyun 	}
1533*4882a593Smuzhiyun 
1534*4882a593Smuzhiyun 	ret = nvme_tcp_init_connection(queue);
1535*4882a593Smuzhiyun 	if (ret)
1536*4882a593Smuzhiyun 		goto err_init_connect;
1537*4882a593Smuzhiyun 
1538*4882a593Smuzhiyun 	queue->rd_enabled = true;
1539*4882a593Smuzhiyun 	set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1540*4882a593Smuzhiyun 	nvme_tcp_init_recv_ctx(queue);
1541*4882a593Smuzhiyun 
1542*4882a593Smuzhiyun 	write_lock_bh(&queue->sock->sk->sk_callback_lock);
1543*4882a593Smuzhiyun 	queue->sock->sk->sk_user_data = queue;
1544*4882a593Smuzhiyun 	queue->state_change = queue->sock->sk->sk_state_change;
1545*4882a593Smuzhiyun 	queue->data_ready = queue->sock->sk->sk_data_ready;
1546*4882a593Smuzhiyun 	queue->write_space = queue->sock->sk->sk_write_space;
1547*4882a593Smuzhiyun 	queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1548*4882a593Smuzhiyun 	queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1549*4882a593Smuzhiyun 	queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1550*4882a593Smuzhiyun #ifdef CONFIG_NET_RX_BUSY_POLL
1551*4882a593Smuzhiyun 	queue->sock->sk->sk_ll_usec = 1;
1552*4882a593Smuzhiyun #endif
1553*4882a593Smuzhiyun 	write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1554*4882a593Smuzhiyun 
1555*4882a593Smuzhiyun 	return 0;
1556*4882a593Smuzhiyun 
1557*4882a593Smuzhiyun err_init_connect:
1558*4882a593Smuzhiyun 	kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1559*4882a593Smuzhiyun err_rcv_pdu:
1560*4882a593Smuzhiyun 	kfree(queue->pdu);
1561*4882a593Smuzhiyun err_crypto:
1562*4882a593Smuzhiyun 	if (queue->hdr_digest || queue->data_digest)
1563*4882a593Smuzhiyun 		nvme_tcp_free_crypto(queue);
1564*4882a593Smuzhiyun err_sock:
1565*4882a593Smuzhiyun 	sock_release(queue->sock);
1566*4882a593Smuzhiyun 	queue->sock = NULL;
1567*4882a593Smuzhiyun err_destroy_mutex:
1568*4882a593Smuzhiyun 	mutex_destroy(&queue->queue_lock);
1569*4882a593Smuzhiyun 	return ret;
1570*4882a593Smuzhiyun }
1571*4882a593Smuzhiyun 
nvme_tcp_restore_sock_calls(struct nvme_tcp_queue * queue)1572*4882a593Smuzhiyun static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1573*4882a593Smuzhiyun {
1574*4882a593Smuzhiyun 	struct socket *sock = queue->sock;
1575*4882a593Smuzhiyun 
1576*4882a593Smuzhiyun 	write_lock_bh(&sock->sk->sk_callback_lock);
1577*4882a593Smuzhiyun 	sock->sk->sk_user_data  = NULL;
1578*4882a593Smuzhiyun 	sock->sk->sk_data_ready = queue->data_ready;
1579*4882a593Smuzhiyun 	sock->sk->sk_state_change = queue->state_change;
1580*4882a593Smuzhiyun 	sock->sk->sk_write_space  = queue->write_space;
1581*4882a593Smuzhiyun 	write_unlock_bh(&sock->sk->sk_callback_lock);
1582*4882a593Smuzhiyun }
1583*4882a593Smuzhiyun 
__nvme_tcp_stop_queue(struct nvme_tcp_queue * queue)1584*4882a593Smuzhiyun static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1585*4882a593Smuzhiyun {
1586*4882a593Smuzhiyun 	kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1587*4882a593Smuzhiyun 	nvme_tcp_restore_sock_calls(queue);
1588*4882a593Smuzhiyun 	cancel_work_sync(&queue->io_work);
1589*4882a593Smuzhiyun }
1590*4882a593Smuzhiyun 
nvme_tcp_stop_queue(struct nvme_ctrl * nctrl,int qid)1591*4882a593Smuzhiyun static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1592*4882a593Smuzhiyun {
1593*4882a593Smuzhiyun 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1594*4882a593Smuzhiyun 	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1595*4882a593Smuzhiyun 
1596*4882a593Smuzhiyun 	mutex_lock(&queue->queue_lock);
1597*4882a593Smuzhiyun 	if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1598*4882a593Smuzhiyun 		__nvme_tcp_stop_queue(queue);
1599*4882a593Smuzhiyun 	mutex_unlock(&queue->queue_lock);
1600*4882a593Smuzhiyun }
1601*4882a593Smuzhiyun 
nvme_tcp_start_queue(struct nvme_ctrl * nctrl,int idx)1602*4882a593Smuzhiyun static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1603*4882a593Smuzhiyun {
1604*4882a593Smuzhiyun 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1605*4882a593Smuzhiyun 	int ret;
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 	if (idx)
1608*4882a593Smuzhiyun 		ret = nvmf_connect_io_queue(nctrl, idx, false);
1609*4882a593Smuzhiyun 	else
1610*4882a593Smuzhiyun 		ret = nvmf_connect_admin_queue(nctrl);
1611*4882a593Smuzhiyun 
1612*4882a593Smuzhiyun 	if (!ret) {
1613*4882a593Smuzhiyun 		set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1614*4882a593Smuzhiyun 	} else {
1615*4882a593Smuzhiyun 		if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
1616*4882a593Smuzhiyun 			__nvme_tcp_stop_queue(&ctrl->queues[idx]);
1617*4882a593Smuzhiyun 		dev_err(nctrl->device,
1618*4882a593Smuzhiyun 			"failed to connect queue: %d ret=%d\n", idx, ret);
1619*4882a593Smuzhiyun 	}
1620*4882a593Smuzhiyun 	return ret;
1621*4882a593Smuzhiyun }
1622*4882a593Smuzhiyun 
nvme_tcp_alloc_tagset(struct nvme_ctrl * nctrl,bool admin)1623*4882a593Smuzhiyun static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
1624*4882a593Smuzhiyun 		bool admin)
1625*4882a593Smuzhiyun {
1626*4882a593Smuzhiyun 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1627*4882a593Smuzhiyun 	struct blk_mq_tag_set *set;
1628*4882a593Smuzhiyun 	int ret;
1629*4882a593Smuzhiyun 
1630*4882a593Smuzhiyun 	if (admin) {
1631*4882a593Smuzhiyun 		set = &ctrl->admin_tag_set;
1632*4882a593Smuzhiyun 		memset(set, 0, sizeof(*set));
1633*4882a593Smuzhiyun 		set->ops = &nvme_tcp_admin_mq_ops;
1634*4882a593Smuzhiyun 		set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1635*4882a593Smuzhiyun 		set->reserved_tags = 2; /* connect + keep-alive */
1636*4882a593Smuzhiyun 		set->numa_node = nctrl->numa_node;
1637*4882a593Smuzhiyun 		set->flags = BLK_MQ_F_BLOCKING;
1638*4882a593Smuzhiyun 		set->cmd_size = sizeof(struct nvme_tcp_request);
1639*4882a593Smuzhiyun 		set->driver_data = ctrl;
1640*4882a593Smuzhiyun 		set->nr_hw_queues = 1;
1641*4882a593Smuzhiyun 		set->timeout = ADMIN_TIMEOUT;
1642*4882a593Smuzhiyun 	} else {
1643*4882a593Smuzhiyun 		set = &ctrl->tag_set;
1644*4882a593Smuzhiyun 		memset(set, 0, sizeof(*set));
1645*4882a593Smuzhiyun 		set->ops = &nvme_tcp_mq_ops;
1646*4882a593Smuzhiyun 		set->queue_depth = nctrl->sqsize + 1;
1647*4882a593Smuzhiyun 		set->reserved_tags = 1; /* fabric connect */
1648*4882a593Smuzhiyun 		set->numa_node = nctrl->numa_node;
1649*4882a593Smuzhiyun 		set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
1650*4882a593Smuzhiyun 		set->cmd_size = sizeof(struct nvme_tcp_request);
1651*4882a593Smuzhiyun 		set->driver_data = ctrl;
1652*4882a593Smuzhiyun 		set->nr_hw_queues = nctrl->queue_count - 1;
1653*4882a593Smuzhiyun 		set->timeout = NVME_IO_TIMEOUT;
1654*4882a593Smuzhiyun 		set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
1655*4882a593Smuzhiyun 	}
1656*4882a593Smuzhiyun 
1657*4882a593Smuzhiyun 	ret = blk_mq_alloc_tag_set(set);
1658*4882a593Smuzhiyun 	if (ret)
1659*4882a593Smuzhiyun 		return ERR_PTR(ret);
1660*4882a593Smuzhiyun 
1661*4882a593Smuzhiyun 	return set;
1662*4882a593Smuzhiyun }
1663*4882a593Smuzhiyun 
nvme_tcp_free_admin_queue(struct nvme_ctrl * ctrl)1664*4882a593Smuzhiyun static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1665*4882a593Smuzhiyun {
1666*4882a593Smuzhiyun 	if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1667*4882a593Smuzhiyun 		cancel_work_sync(&ctrl->async_event_work);
1668*4882a593Smuzhiyun 		nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1669*4882a593Smuzhiyun 		to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1670*4882a593Smuzhiyun 	}
1671*4882a593Smuzhiyun 
1672*4882a593Smuzhiyun 	nvme_tcp_free_queue(ctrl, 0);
1673*4882a593Smuzhiyun }
1674*4882a593Smuzhiyun 
nvme_tcp_free_io_queues(struct nvme_ctrl * ctrl)1675*4882a593Smuzhiyun static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1676*4882a593Smuzhiyun {
1677*4882a593Smuzhiyun 	int i;
1678*4882a593Smuzhiyun 
1679*4882a593Smuzhiyun 	for (i = 1; i < ctrl->queue_count; i++)
1680*4882a593Smuzhiyun 		nvme_tcp_free_queue(ctrl, i);
1681*4882a593Smuzhiyun }
1682*4882a593Smuzhiyun 
nvme_tcp_stop_io_queues(struct nvme_ctrl * ctrl)1683*4882a593Smuzhiyun static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1684*4882a593Smuzhiyun {
1685*4882a593Smuzhiyun 	int i;
1686*4882a593Smuzhiyun 
1687*4882a593Smuzhiyun 	for (i = 1; i < ctrl->queue_count; i++)
1688*4882a593Smuzhiyun 		nvme_tcp_stop_queue(ctrl, i);
1689*4882a593Smuzhiyun }
1690*4882a593Smuzhiyun 
nvme_tcp_start_io_queues(struct nvme_ctrl * ctrl)1691*4882a593Smuzhiyun static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1692*4882a593Smuzhiyun {
1693*4882a593Smuzhiyun 	int i, ret = 0;
1694*4882a593Smuzhiyun 
1695*4882a593Smuzhiyun 	for (i = 1; i < ctrl->queue_count; i++) {
1696*4882a593Smuzhiyun 		ret = nvme_tcp_start_queue(ctrl, i);
1697*4882a593Smuzhiyun 		if (ret)
1698*4882a593Smuzhiyun 			goto out_stop_queues;
1699*4882a593Smuzhiyun 	}
1700*4882a593Smuzhiyun 
1701*4882a593Smuzhiyun 	return 0;
1702*4882a593Smuzhiyun 
1703*4882a593Smuzhiyun out_stop_queues:
1704*4882a593Smuzhiyun 	for (i--; i >= 1; i--)
1705*4882a593Smuzhiyun 		nvme_tcp_stop_queue(ctrl, i);
1706*4882a593Smuzhiyun 	return ret;
1707*4882a593Smuzhiyun }
1708*4882a593Smuzhiyun 
nvme_tcp_alloc_admin_queue(struct nvme_ctrl * ctrl)1709*4882a593Smuzhiyun static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1710*4882a593Smuzhiyun {
1711*4882a593Smuzhiyun 	int ret;
1712*4882a593Smuzhiyun 
1713*4882a593Smuzhiyun 	ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1714*4882a593Smuzhiyun 	if (ret)
1715*4882a593Smuzhiyun 		return ret;
1716*4882a593Smuzhiyun 
1717*4882a593Smuzhiyun 	ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1718*4882a593Smuzhiyun 	if (ret)
1719*4882a593Smuzhiyun 		goto out_free_queue;
1720*4882a593Smuzhiyun 
1721*4882a593Smuzhiyun 	return 0;
1722*4882a593Smuzhiyun 
1723*4882a593Smuzhiyun out_free_queue:
1724*4882a593Smuzhiyun 	nvme_tcp_free_queue(ctrl, 0);
1725*4882a593Smuzhiyun 	return ret;
1726*4882a593Smuzhiyun }
1727*4882a593Smuzhiyun 
__nvme_tcp_alloc_io_queues(struct nvme_ctrl * ctrl)1728*4882a593Smuzhiyun static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1729*4882a593Smuzhiyun {
1730*4882a593Smuzhiyun 	int i, ret;
1731*4882a593Smuzhiyun 
1732*4882a593Smuzhiyun 	for (i = 1; i < ctrl->queue_count; i++) {
1733*4882a593Smuzhiyun 		ret = nvme_tcp_alloc_queue(ctrl, i,
1734*4882a593Smuzhiyun 				ctrl->sqsize + 1);
1735*4882a593Smuzhiyun 		if (ret)
1736*4882a593Smuzhiyun 			goto out_free_queues;
1737*4882a593Smuzhiyun 	}
1738*4882a593Smuzhiyun 
1739*4882a593Smuzhiyun 	return 0;
1740*4882a593Smuzhiyun 
1741*4882a593Smuzhiyun out_free_queues:
1742*4882a593Smuzhiyun 	for (i--; i >= 1; i--)
1743*4882a593Smuzhiyun 		nvme_tcp_free_queue(ctrl, i);
1744*4882a593Smuzhiyun 
1745*4882a593Smuzhiyun 	return ret;
1746*4882a593Smuzhiyun }
1747*4882a593Smuzhiyun 
nvme_tcp_nr_io_queues(struct nvme_ctrl * ctrl)1748*4882a593Smuzhiyun static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1749*4882a593Smuzhiyun {
1750*4882a593Smuzhiyun 	unsigned int nr_io_queues;
1751*4882a593Smuzhiyun 
1752*4882a593Smuzhiyun 	nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1753*4882a593Smuzhiyun 	nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
1754*4882a593Smuzhiyun 	nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
1755*4882a593Smuzhiyun 
1756*4882a593Smuzhiyun 	return nr_io_queues;
1757*4882a593Smuzhiyun }
1758*4882a593Smuzhiyun 
nvme_tcp_set_io_queues(struct nvme_ctrl * nctrl,unsigned int nr_io_queues)1759*4882a593Smuzhiyun static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1760*4882a593Smuzhiyun 		unsigned int nr_io_queues)
1761*4882a593Smuzhiyun {
1762*4882a593Smuzhiyun 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1763*4882a593Smuzhiyun 	struct nvmf_ctrl_options *opts = nctrl->opts;
1764*4882a593Smuzhiyun 
1765*4882a593Smuzhiyun 	if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1766*4882a593Smuzhiyun 		/*
1767*4882a593Smuzhiyun 		 * separate read/write queues
1768*4882a593Smuzhiyun 		 * hand out dedicated default queues only after we have
1769*4882a593Smuzhiyun 		 * sufficient read queues.
1770*4882a593Smuzhiyun 		 */
1771*4882a593Smuzhiyun 		ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1772*4882a593Smuzhiyun 		nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1773*4882a593Smuzhiyun 		ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1774*4882a593Smuzhiyun 			min(opts->nr_write_queues, nr_io_queues);
1775*4882a593Smuzhiyun 		nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1776*4882a593Smuzhiyun 	} else {
1777*4882a593Smuzhiyun 		/*
1778*4882a593Smuzhiyun 		 * shared read/write queues
1779*4882a593Smuzhiyun 		 * either no write queues were requested, or we don't have
1780*4882a593Smuzhiyun 		 * sufficient queue count to have dedicated default queues.
1781*4882a593Smuzhiyun 		 */
1782*4882a593Smuzhiyun 		ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1783*4882a593Smuzhiyun 			min(opts->nr_io_queues, nr_io_queues);
1784*4882a593Smuzhiyun 		nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1785*4882a593Smuzhiyun 	}
1786*4882a593Smuzhiyun 
1787*4882a593Smuzhiyun 	if (opts->nr_poll_queues && nr_io_queues) {
1788*4882a593Smuzhiyun 		/* map dedicated poll queues only if we have queues left */
1789*4882a593Smuzhiyun 		ctrl->io_queues[HCTX_TYPE_POLL] =
1790*4882a593Smuzhiyun 			min(opts->nr_poll_queues, nr_io_queues);
1791*4882a593Smuzhiyun 	}
1792*4882a593Smuzhiyun }
1793*4882a593Smuzhiyun 
nvme_tcp_alloc_io_queues(struct nvme_ctrl * ctrl)1794*4882a593Smuzhiyun static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1795*4882a593Smuzhiyun {
1796*4882a593Smuzhiyun 	unsigned int nr_io_queues;
1797*4882a593Smuzhiyun 	int ret;
1798*4882a593Smuzhiyun 
1799*4882a593Smuzhiyun 	nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1800*4882a593Smuzhiyun 	ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1801*4882a593Smuzhiyun 	if (ret)
1802*4882a593Smuzhiyun 		return ret;
1803*4882a593Smuzhiyun 
1804*4882a593Smuzhiyun 	if (nr_io_queues == 0) {
1805*4882a593Smuzhiyun 		dev_err(ctrl->device,
1806*4882a593Smuzhiyun 			"unable to set any I/O queues\n");
1807*4882a593Smuzhiyun 		return -ENOMEM;
1808*4882a593Smuzhiyun 	}
1809*4882a593Smuzhiyun 
1810*4882a593Smuzhiyun 	ctrl->queue_count = nr_io_queues + 1;
1811*4882a593Smuzhiyun 	dev_info(ctrl->device,
1812*4882a593Smuzhiyun 		"creating %d I/O queues.\n", nr_io_queues);
1813*4882a593Smuzhiyun 
1814*4882a593Smuzhiyun 	nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1815*4882a593Smuzhiyun 
1816*4882a593Smuzhiyun 	return __nvme_tcp_alloc_io_queues(ctrl);
1817*4882a593Smuzhiyun }
1818*4882a593Smuzhiyun 
nvme_tcp_destroy_io_queues(struct nvme_ctrl * ctrl,bool remove)1819*4882a593Smuzhiyun static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1820*4882a593Smuzhiyun {
1821*4882a593Smuzhiyun 	nvme_tcp_stop_io_queues(ctrl);
1822*4882a593Smuzhiyun 	if (remove) {
1823*4882a593Smuzhiyun 		blk_cleanup_queue(ctrl->connect_q);
1824*4882a593Smuzhiyun 		blk_mq_free_tag_set(ctrl->tagset);
1825*4882a593Smuzhiyun 	}
1826*4882a593Smuzhiyun 	nvme_tcp_free_io_queues(ctrl);
1827*4882a593Smuzhiyun }
1828*4882a593Smuzhiyun 
nvme_tcp_configure_io_queues(struct nvme_ctrl * ctrl,bool new)1829*4882a593Smuzhiyun static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1830*4882a593Smuzhiyun {
1831*4882a593Smuzhiyun 	int ret;
1832*4882a593Smuzhiyun 
1833*4882a593Smuzhiyun 	ret = nvme_tcp_alloc_io_queues(ctrl);
1834*4882a593Smuzhiyun 	if (ret)
1835*4882a593Smuzhiyun 		return ret;
1836*4882a593Smuzhiyun 
1837*4882a593Smuzhiyun 	if (new) {
1838*4882a593Smuzhiyun 		ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
1839*4882a593Smuzhiyun 		if (IS_ERR(ctrl->tagset)) {
1840*4882a593Smuzhiyun 			ret = PTR_ERR(ctrl->tagset);
1841*4882a593Smuzhiyun 			goto out_free_io_queues;
1842*4882a593Smuzhiyun 		}
1843*4882a593Smuzhiyun 
1844*4882a593Smuzhiyun 		ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1845*4882a593Smuzhiyun 		if (IS_ERR(ctrl->connect_q)) {
1846*4882a593Smuzhiyun 			ret = PTR_ERR(ctrl->connect_q);
1847*4882a593Smuzhiyun 			goto out_free_tag_set;
1848*4882a593Smuzhiyun 		}
1849*4882a593Smuzhiyun 	}
1850*4882a593Smuzhiyun 
1851*4882a593Smuzhiyun 	ret = nvme_tcp_start_io_queues(ctrl);
1852*4882a593Smuzhiyun 	if (ret)
1853*4882a593Smuzhiyun 		goto out_cleanup_connect_q;
1854*4882a593Smuzhiyun 
1855*4882a593Smuzhiyun 	if (!new) {
1856*4882a593Smuzhiyun 		nvme_start_queues(ctrl);
1857*4882a593Smuzhiyun 		if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
1858*4882a593Smuzhiyun 			/*
1859*4882a593Smuzhiyun 			 * If we timed out waiting for freeze we are likely to
1860*4882a593Smuzhiyun 			 * be stuck.  Fail the controller initialization just
1861*4882a593Smuzhiyun 			 * to be safe.
1862*4882a593Smuzhiyun 			 */
1863*4882a593Smuzhiyun 			ret = -ENODEV;
1864*4882a593Smuzhiyun 			goto out_wait_freeze_timed_out;
1865*4882a593Smuzhiyun 		}
1866*4882a593Smuzhiyun 		blk_mq_update_nr_hw_queues(ctrl->tagset,
1867*4882a593Smuzhiyun 			ctrl->queue_count - 1);
1868*4882a593Smuzhiyun 		nvme_unfreeze(ctrl);
1869*4882a593Smuzhiyun 	}
1870*4882a593Smuzhiyun 
1871*4882a593Smuzhiyun 	return 0;
1872*4882a593Smuzhiyun 
1873*4882a593Smuzhiyun out_wait_freeze_timed_out:
1874*4882a593Smuzhiyun 	nvme_stop_queues(ctrl);
1875*4882a593Smuzhiyun 	nvme_sync_io_queues(ctrl);
1876*4882a593Smuzhiyun 	nvme_tcp_stop_io_queues(ctrl);
1877*4882a593Smuzhiyun out_cleanup_connect_q:
1878*4882a593Smuzhiyun 	nvme_cancel_tagset(ctrl);
1879*4882a593Smuzhiyun 	if (new)
1880*4882a593Smuzhiyun 		blk_cleanup_queue(ctrl->connect_q);
1881*4882a593Smuzhiyun out_free_tag_set:
1882*4882a593Smuzhiyun 	if (new)
1883*4882a593Smuzhiyun 		blk_mq_free_tag_set(ctrl->tagset);
1884*4882a593Smuzhiyun out_free_io_queues:
1885*4882a593Smuzhiyun 	nvme_tcp_free_io_queues(ctrl);
1886*4882a593Smuzhiyun 	return ret;
1887*4882a593Smuzhiyun }
1888*4882a593Smuzhiyun 
nvme_tcp_destroy_admin_queue(struct nvme_ctrl * ctrl,bool remove)1889*4882a593Smuzhiyun static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1890*4882a593Smuzhiyun {
1891*4882a593Smuzhiyun 	nvme_tcp_stop_queue(ctrl, 0);
1892*4882a593Smuzhiyun 	if (remove) {
1893*4882a593Smuzhiyun 		blk_cleanup_queue(ctrl->admin_q);
1894*4882a593Smuzhiyun 		blk_cleanup_queue(ctrl->fabrics_q);
1895*4882a593Smuzhiyun 		blk_mq_free_tag_set(ctrl->admin_tagset);
1896*4882a593Smuzhiyun 	}
1897*4882a593Smuzhiyun 	nvme_tcp_free_admin_queue(ctrl);
1898*4882a593Smuzhiyun }
1899*4882a593Smuzhiyun 
nvme_tcp_configure_admin_queue(struct nvme_ctrl * ctrl,bool new)1900*4882a593Smuzhiyun static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1901*4882a593Smuzhiyun {
1902*4882a593Smuzhiyun 	int error;
1903*4882a593Smuzhiyun 
1904*4882a593Smuzhiyun 	error = nvme_tcp_alloc_admin_queue(ctrl);
1905*4882a593Smuzhiyun 	if (error)
1906*4882a593Smuzhiyun 		return error;
1907*4882a593Smuzhiyun 
1908*4882a593Smuzhiyun 	if (new) {
1909*4882a593Smuzhiyun 		ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
1910*4882a593Smuzhiyun 		if (IS_ERR(ctrl->admin_tagset)) {
1911*4882a593Smuzhiyun 			error = PTR_ERR(ctrl->admin_tagset);
1912*4882a593Smuzhiyun 			goto out_free_queue;
1913*4882a593Smuzhiyun 		}
1914*4882a593Smuzhiyun 
1915*4882a593Smuzhiyun 		ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
1916*4882a593Smuzhiyun 		if (IS_ERR(ctrl->fabrics_q)) {
1917*4882a593Smuzhiyun 			error = PTR_ERR(ctrl->fabrics_q);
1918*4882a593Smuzhiyun 			goto out_free_tagset;
1919*4882a593Smuzhiyun 		}
1920*4882a593Smuzhiyun 
1921*4882a593Smuzhiyun 		ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1922*4882a593Smuzhiyun 		if (IS_ERR(ctrl->admin_q)) {
1923*4882a593Smuzhiyun 			error = PTR_ERR(ctrl->admin_q);
1924*4882a593Smuzhiyun 			goto out_cleanup_fabrics_q;
1925*4882a593Smuzhiyun 		}
1926*4882a593Smuzhiyun 	}
1927*4882a593Smuzhiyun 
1928*4882a593Smuzhiyun 	error = nvme_tcp_start_queue(ctrl, 0);
1929*4882a593Smuzhiyun 	if (error)
1930*4882a593Smuzhiyun 		goto out_cleanup_queue;
1931*4882a593Smuzhiyun 
1932*4882a593Smuzhiyun 	error = nvme_enable_ctrl(ctrl);
1933*4882a593Smuzhiyun 	if (error)
1934*4882a593Smuzhiyun 		goto out_stop_queue;
1935*4882a593Smuzhiyun 
1936*4882a593Smuzhiyun 	blk_mq_unquiesce_queue(ctrl->admin_q);
1937*4882a593Smuzhiyun 
1938*4882a593Smuzhiyun 	error = nvme_init_identify(ctrl);
1939*4882a593Smuzhiyun 	if (error)
1940*4882a593Smuzhiyun 		goto out_quiesce_queue;
1941*4882a593Smuzhiyun 
1942*4882a593Smuzhiyun 	return 0;
1943*4882a593Smuzhiyun 
1944*4882a593Smuzhiyun out_quiesce_queue:
1945*4882a593Smuzhiyun 	blk_mq_quiesce_queue(ctrl->admin_q);
1946*4882a593Smuzhiyun 	blk_sync_queue(ctrl->admin_q);
1947*4882a593Smuzhiyun out_stop_queue:
1948*4882a593Smuzhiyun 	nvme_tcp_stop_queue(ctrl, 0);
1949*4882a593Smuzhiyun 	nvme_cancel_admin_tagset(ctrl);
1950*4882a593Smuzhiyun out_cleanup_queue:
1951*4882a593Smuzhiyun 	if (new)
1952*4882a593Smuzhiyun 		blk_cleanup_queue(ctrl->admin_q);
1953*4882a593Smuzhiyun out_cleanup_fabrics_q:
1954*4882a593Smuzhiyun 	if (new)
1955*4882a593Smuzhiyun 		blk_cleanup_queue(ctrl->fabrics_q);
1956*4882a593Smuzhiyun out_free_tagset:
1957*4882a593Smuzhiyun 	if (new)
1958*4882a593Smuzhiyun 		blk_mq_free_tag_set(ctrl->admin_tagset);
1959*4882a593Smuzhiyun out_free_queue:
1960*4882a593Smuzhiyun 	nvme_tcp_free_admin_queue(ctrl);
1961*4882a593Smuzhiyun 	return error;
1962*4882a593Smuzhiyun }
1963*4882a593Smuzhiyun 
nvme_tcp_teardown_admin_queue(struct nvme_ctrl * ctrl,bool remove)1964*4882a593Smuzhiyun static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
1965*4882a593Smuzhiyun 		bool remove)
1966*4882a593Smuzhiyun {
1967*4882a593Smuzhiyun 	blk_mq_quiesce_queue(ctrl->admin_q);
1968*4882a593Smuzhiyun 	blk_sync_queue(ctrl->admin_q);
1969*4882a593Smuzhiyun 	nvme_tcp_stop_queue(ctrl, 0);
1970*4882a593Smuzhiyun 	if (ctrl->admin_tagset) {
1971*4882a593Smuzhiyun 		blk_mq_tagset_busy_iter(ctrl->admin_tagset,
1972*4882a593Smuzhiyun 			nvme_cancel_request, ctrl);
1973*4882a593Smuzhiyun 		blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
1974*4882a593Smuzhiyun 	}
1975*4882a593Smuzhiyun 	if (remove)
1976*4882a593Smuzhiyun 		blk_mq_unquiesce_queue(ctrl->admin_q);
1977*4882a593Smuzhiyun 	nvme_tcp_destroy_admin_queue(ctrl, remove);
1978*4882a593Smuzhiyun }
1979*4882a593Smuzhiyun 
nvme_tcp_teardown_io_queues(struct nvme_ctrl * ctrl,bool remove)1980*4882a593Smuzhiyun static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
1981*4882a593Smuzhiyun 		bool remove)
1982*4882a593Smuzhiyun {
1983*4882a593Smuzhiyun 	if (ctrl->queue_count <= 1)
1984*4882a593Smuzhiyun 		return;
1985*4882a593Smuzhiyun 	blk_mq_quiesce_queue(ctrl->admin_q);
1986*4882a593Smuzhiyun 	nvme_start_freeze(ctrl);
1987*4882a593Smuzhiyun 	nvme_stop_queues(ctrl);
1988*4882a593Smuzhiyun 	nvme_sync_io_queues(ctrl);
1989*4882a593Smuzhiyun 	nvme_tcp_stop_io_queues(ctrl);
1990*4882a593Smuzhiyun 	if (ctrl->tagset) {
1991*4882a593Smuzhiyun 		blk_mq_tagset_busy_iter(ctrl->tagset,
1992*4882a593Smuzhiyun 			nvme_cancel_request, ctrl);
1993*4882a593Smuzhiyun 		blk_mq_tagset_wait_completed_request(ctrl->tagset);
1994*4882a593Smuzhiyun 	}
1995*4882a593Smuzhiyun 	if (remove)
1996*4882a593Smuzhiyun 		nvme_start_queues(ctrl);
1997*4882a593Smuzhiyun 	nvme_tcp_destroy_io_queues(ctrl, remove);
1998*4882a593Smuzhiyun }
1999*4882a593Smuzhiyun 
nvme_tcp_reconnect_or_remove(struct nvme_ctrl * ctrl)2000*4882a593Smuzhiyun static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
2001*4882a593Smuzhiyun {
2002*4882a593Smuzhiyun 	/* If we are resetting/deleting then do nothing */
2003*4882a593Smuzhiyun 	if (ctrl->state != NVME_CTRL_CONNECTING) {
2004*4882a593Smuzhiyun 		WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
2005*4882a593Smuzhiyun 			ctrl->state == NVME_CTRL_LIVE);
2006*4882a593Smuzhiyun 		return;
2007*4882a593Smuzhiyun 	}
2008*4882a593Smuzhiyun 
2009*4882a593Smuzhiyun 	if (nvmf_should_reconnect(ctrl)) {
2010*4882a593Smuzhiyun 		dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
2011*4882a593Smuzhiyun 			ctrl->opts->reconnect_delay);
2012*4882a593Smuzhiyun 		queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
2013*4882a593Smuzhiyun 				ctrl->opts->reconnect_delay * HZ);
2014*4882a593Smuzhiyun 	} else {
2015*4882a593Smuzhiyun 		dev_info(ctrl->device, "Removing controller...\n");
2016*4882a593Smuzhiyun 		nvme_delete_ctrl(ctrl);
2017*4882a593Smuzhiyun 	}
2018*4882a593Smuzhiyun }
2019*4882a593Smuzhiyun 
nvme_tcp_setup_ctrl(struct nvme_ctrl * ctrl,bool new)2020*4882a593Smuzhiyun static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
2021*4882a593Smuzhiyun {
2022*4882a593Smuzhiyun 	struct nvmf_ctrl_options *opts = ctrl->opts;
2023*4882a593Smuzhiyun 	int ret;
2024*4882a593Smuzhiyun 
2025*4882a593Smuzhiyun 	ret = nvme_tcp_configure_admin_queue(ctrl, new);
2026*4882a593Smuzhiyun 	if (ret)
2027*4882a593Smuzhiyun 		return ret;
2028*4882a593Smuzhiyun 
2029*4882a593Smuzhiyun 	if (ctrl->icdoff) {
2030*4882a593Smuzhiyun 		dev_err(ctrl->device, "icdoff is not supported!\n");
2031*4882a593Smuzhiyun 		goto destroy_admin;
2032*4882a593Smuzhiyun 	}
2033*4882a593Smuzhiyun 
2034*4882a593Smuzhiyun 	if (opts->queue_size > ctrl->sqsize + 1)
2035*4882a593Smuzhiyun 		dev_warn(ctrl->device,
2036*4882a593Smuzhiyun 			"queue_size %zu > ctrl sqsize %u, clamping down\n",
2037*4882a593Smuzhiyun 			opts->queue_size, ctrl->sqsize + 1);
2038*4882a593Smuzhiyun 
2039*4882a593Smuzhiyun 	if (ctrl->sqsize + 1 > ctrl->maxcmd) {
2040*4882a593Smuzhiyun 		dev_warn(ctrl->device,
2041*4882a593Smuzhiyun 			"sqsize %u > ctrl maxcmd %u, clamping down\n",
2042*4882a593Smuzhiyun 			ctrl->sqsize + 1, ctrl->maxcmd);
2043*4882a593Smuzhiyun 		ctrl->sqsize = ctrl->maxcmd - 1;
2044*4882a593Smuzhiyun 	}
2045*4882a593Smuzhiyun 
2046*4882a593Smuzhiyun 	if (ctrl->queue_count > 1) {
2047*4882a593Smuzhiyun 		ret = nvme_tcp_configure_io_queues(ctrl, new);
2048*4882a593Smuzhiyun 		if (ret)
2049*4882a593Smuzhiyun 			goto destroy_admin;
2050*4882a593Smuzhiyun 	}
2051*4882a593Smuzhiyun 
2052*4882a593Smuzhiyun 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
2053*4882a593Smuzhiyun 		/*
2054*4882a593Smuzhiyun 		 * state change failure is ok if we started ctrl delete,
2055*4882a593Smuzhiyun 		 * unless we're during creation of a new controller to
2056*4882a593Smuzhiyun 		 * avoid races with teardown flow.
2057*4882a593Smuzhiyun 		 */
2058*4882a593Smuzhiyun 		WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2059*4882a593Smuzhiyun 			     ctrl->state != NVME_CTRL_DELETING_NOIO);
2060*4882a593Smuzhiyun 		WARN_ON_ONCE(new);
2061*4882a593Smuzhiyun 		ret = -EINVAL;
2062*4882a593Smuzhiyun 		goto destroy_io;
2063*4882a593Smuzhiyun 	}
2064*4882a593Smuzhiyun 
2065*4882a593Smuzhiyun 	nvme_start_ctrl(ctrl);
2066*4882a593Smuzhiyun 	return 0;
2067*4882a593Smuzhiyun 
2068*4882a593Smuzhiyun destroy_io:
2069*4882a593Smuzhiyun 	if (ctrl->queue_count > 1) {
2070*4882a593Smuzhiyun 		nvme_stop_queues(ctrl);
2071*4882a593Smuzhiyun 		nvme_sync_io_queues(ctrl);
2072*4882a593Smuzhiyun 		nvme_tcp_stop_io_queues(ctrl);
2073*4882a593Smuzhiyun 		nvme_cancel_tagset(ctrl);
2074*4882a593Smuzhiyun 		nvme_tcp_destroy_io_queues(ctrl, new);
2075*4882a593Smuzhiyun 	}
2076*4882a593Smuzhiyun destroy_admin:
2077*4882a593Smuzhiyun 	blk_mq_quiesce_queue(ctrl->admin_q);
2078*4882a593Smuzhiyun 	blk_sync_queue(ctrl->admin_q);
2079*4882a593Smuzhiyun 	nvme_tcp_stop_queue(ctrl, 0);
2080*4882a593Smuzhiyun 	nvme_cancel_admin_tagset(ctrl);
2081*4882a593Smuzhiyun 	nvme_tcp_destroy_admin_queue(ctrl, new);
2082*4882a593Smuzhiyun 	return ret;
2083*4882a593Smuzhiyun }
2084*4882a593Smuzhiyun 
nvme_tcp_reconnect_ctrl_work(struct work_struct * work)2085*4882a593Smuzhiyun static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
2086*4882a593Smuzhiyun {
2087*4882a593Smuzhiyun 	struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
2088*4882a593Smuzhiyun 			struct nvme_tcp_ctrl, connect_work);
2089*4882a593Smuzhiyun 	struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2090*4882a593Smuzhiyun 
2091*4882a593Smuzhiyun 	++ctrl->nr_reconnects;
2092*4882a593Smuzhiyun 
2093*4882a593Smuzhiyun 	if (nvme_tcp_setup_ctrl(ctrl, false))
2094*4882a593Smuzhiyun 		goto requeue;
2095*4882a593Smuzhiyun 
2096*4882a593Smuzhiyun 	dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
2097*4882a593Smuzhiyun 			ctrl->nr_reconnects);
2098*4882a593Smuzhiyun 
2099*4882a593Smuzhiyun 	ctrl->nr_reconnects = 0;
2100*4882a593Smuzhiyun 
2101*4882a593Smuzhiyun 	return;
2102*4882a593Smuzhiyun 
2103*4882a593Smuzhiyun requeue:
2104*4882a593Smuzhiyun 	dev_info(ctrl->device, "Failed reconnect attempt %d\n",
2105*4882a593Smuzhiyun 			ctrl->nr_reconnects);
2106*4882a593Smuzhiyun 	nvme_tcp_reconnect_or_remove(ctrl);
2107*4882a593Smuzhiyun }
2108*4882a593Smuzhiyun 
nvme_tcp_error_recovery_work(struct work_struct * work)2109*4882a593Smuzhiyun static void nvme_tcp_error_recovery_work(struct work_struct *work)
2110*4882a593Smuzhiyun {
2111*4882a593Smuzhiyun 	struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
2112*4882a593Smuzhiyun 				struct nvme_tcp_ctrl, err_work);
2113*4882a593Smuzhiyun 	struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2114*4882a593Smuzhiyun 
2115*4882a593Smuzhiyun 	nvme_stop_keep_alive(ctrl);
2116*4882a593Smuzhiyun 	flush_work(&ctrl->async_event_work);
2117*4882a593Smuzhiyun 	nvme_tcp_teardown_io_queues(ctrl, false);
2118*4882a593Smuzhiyun 	/* unquiesce to fail fast pending requests */
2119*4882a593Smuzhiyun 	nvme_start_queues(ctrl);
2120*4882a593Smuzhiyun 	nvme_tcp_teardown_admin_queue(ctrl, false);
2121*4882a593Smuzhiyun 	blk_mq_unquiesce_queue(ctrl->admin_q);
2122*4882a593Smuzhiyun 
2123*4882a593Smuzhiyun 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2124*4882a593Smuzhiyun 		/* state change failure is ok if we started ctrl delete */
2125*4882a593Smuzhiyun 		WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2126*4882a593Smuzhiyun 			     ctrl->state != NVME_CTRL_DELETING_NOIO);
2127*4882a593Smuzhiyun 		return;
2128*4882a593Smuzhiyun 	}
2129*4882a593Smuzhiyun 
2130*4882a593Smuzhiyun 	nvme_tcp_reconnect_or_remove(ctrl);
2131*4882a593Smuzhiyun }
2132*4882a593Smuzhiyun 
nvme_tcp_teardown_ctrl(struct nvme_ctrl * ctrl,bool shutdown)2133*4882a593Smuzhiyun static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2134*4882a593Smuzhiyun {
2135*4882a593Smuzhiyun 	nvme_tcp_teardown_io_queues(ctrl, shutdown);
2136*4882a593Smuzhiyun 	blk_mq_quiesce_queue(ctrl->admin_q);
2137*4882a593Smuzhiyun 	if (shutdown)
2138*4882a593Smuzhiyun 		nvme_shutdown_ctrl(ctrl);
2139*4882a593Smuzhiyun 	else
2140*4882a593Smuzhiyun 		nvme_disable_ctrl(ctrl);
2141*4882a593Smuzhiyun 	nvme_tcp_teardown_admin_queue(ctrl, shutdown);
2142*4882a593Smuzhiyun }
2143*4882a593Smuzhiyun 
nvme_tcp_delete_ctrl(struct nvme_ctrl * ctrl)2144*4882a593Smuzhiyun static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
2145*4882a593Smuzhiyun {
2146*4882a593Smuzhiyun 	nvme_tcp_teardown_ctrl(ctrl, true);
2147*4882a593Smuzhiyun }
2148*4882a593Smuzhiyun 
nvme_reset_ctrl_work(struct work_struct * work)2149*4882a593Smuzhiyun static void nvme_reset_ctrl_work(struct work_struct *work)
2150*4882a593Smuzhiyun {
2151*4882a593Smuzhiyun 	struct nvme_ctrl *ctrl =
2152*4882a593Smuzhiyun 		container_of(work, struct nvme_ctrl, reset_work);
2153*4882a593Smuzhiyun 
2154*4882a593Smuzhiyun 	nvme_stop_ctrl(ctrl);
2155*4882a593Smuzhiyun 	nvme_tcp_teardown_ctrl(ctrl, false);
2156*4882a593Smuzhiyun 
2157*4882a593Smuzhiyun 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2158*4882a593Smuzhiyun 		/* state change failure is ok if we started ctrl delete */
2159*4882a593Smuzhiyun 		WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2160*4882a593Smuzhiyun 			     ctrl->state != NVME_CTRL_DELETING_NOIO);
2161*4882a593Smuzhiyun 		return;
2162*4882a593Smuzhiyun 	}
2163*4882a593Smuzhiyun 
2164*4882a593Smuzhiyun 	if (nvme_tcp_setup_ctrl(ctrl, false))
2165*4882a593Smuzhiyun 		goto out_fail;
2166*4882a593Smuzhiyun 
2167*4882a593Smuzhiyun 	return;
2168*4882a593Smuzhiyun 
2169*4882a593Smuzhiyun out_fail:
2170*4882a593Smuzhiyun 	++ctrl->nr_reconnects;
2171*4882a593Smuzhiyun 	nvme_tcp_reconnect_or_remove(ctrl);
2172*4882a593Smuzhiyun }
2173*4882a593Smuzhiyun 
nvme_tcp_stop_ctrl(struct nvme_ctrl * ctrl)2174*4882a593Smuzhiyun static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
2175*4882a593Smuzhiyun {
2176*4882a593Smuzhiyun 	cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
2177*4882a593Smuzhiyun 	cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
2178*4882a593Smuzhiyun }
2179*4882a593Smuzhiyun 
nvme_tcp_free_ctrl(struct nvme_ctrl * nctrl)2180*4882a593Smuzhiyun static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
2181*4882a593Smuzhiyun {
2182*4882a593Smuzhiyun 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2183*4882a593Smuzhiyun 
2184*4882a593Smuzhiyun 	if (list_empty(&ctrl->list))
2185*4882a593Smuzhiyun 		goto free_ctrl;
2186*4882a593Smuzhiyun 
2187*4882a593Smuzhiyun 	mutex_lock(&nvme_tcp_ctrl_mutex);
2188*4882a593Smuzhiyun 	list_del(&ctrl->list);
2189*4882a593Smuzhiyun 	mutex_unlock(&nvme_tcp_ctrl_mutex);
2190*4882a593Smuzhiyun 
2191*4882a593Smuzhiyun 	nvmf_free_options(nctrl->opts);
2192*4882a593Smuzhiyun free_ctrl:
2193*4882a593Smuzhiyun 	kfree(ctrl->queues);
2194*4882a593Smuzhiyun 	kfree(ctrl);
2195*4882a593Smuzhiyun }
2196*4882a593Smuzhiyun 
nvme_tcp_set_sg_null(struct nvme_command * c)2197*4882a593Smuzhiyun static void nvme_tcp_set_sg_null(struct nvme_command *c)
2198*4882a593Smuzhiyun {
2199*4882a593Smuzhiyun 	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2200*4882a593Smuzhiyun 
2201*4882a593Smuzhiyun 	sg->addr = 0;
2202*4882a593Smuzhiyun 	sg->length = 0;
2203*4882a593Smuzhiyun 	sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2204*4882a593Smuzhiyun 			NVME_SGL_FMT_TRANSPORT_A;
2205*4882a593Smuzhiyun }
2206*4882a593Smuzhiyun 
nvme_tcp_set_sg_inline(struct nvme_tcp_queue * queue,struct nvme_command * c,u32 data_len)2207*4882a593Smuzhiyun static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2208*4882a593Smuzhiyun 		struct nvme_command *c, u32 data_len)
2209*4882a593Smuzhiyun {
2210*4882a593Smuzhiyun 	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2211*4882a593Smuzhiyun 
2212*4882a593Smuzhiyun 	sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2213*4882a593Smuzhiyun 	sg->length = cpu_to_le32(data_len);
2214*4882a593Smuzhiyun 	sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
2215*4882a593Smuzhiyun }
2216*4882a593Smuzhiyun 
nvme_tcp_set_sg_host_data(struct nvme_command * c,u32 data_len)2217*4882a593Smuzhiyun static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
2218*4882a593Smuzhiyun 		u32 data_len)
2219*4882a593Smuzhiyun {
2220*4882a593Smuzhiyun 	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2221*4882a593Smuzhiyun 
2222*4882a593Smuzhiyun 	sg->addr = 0;
2223*4882a593Smuzhiyun 	sg->length = cpu_to_le32(data_len);
2224*4882a593Smuzhiyun 	sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2225*4882a593Smuzhiyun 			NVME_SGL_FMT_TRANSPORT_A;
2226*4882a593Smuzhiyun }
2227*4882a593Smuzhiyun 
nvme_tcp_submit_async_event(struct nvme_ctrl * arg)2228*4882a593Smuzhiyun static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2229*4882a593Smuzhiyun {
2230*4882a593Smuzhiyun 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2231*4882a593Smuzhiyun 	struct nvme_tcp_queue *queue = &ctrl->queues[0];
2232*4882a593Smuzhiyun 	struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2233*4882a593Smuzhiyun 	struct nvme_command *cmd = &pdu->cmd;
2234*4882a593Smuzhiyun 	u8 hdgst = nvme_tcp_hdgst_len(queue);
2235*4882a593Smuzhiyun 
2236*4882a593Smuzhiyun 	memset(pdu, 0, sizeof(*pdu));
2237*4882a593Smuzhiyun 	pdu->hdr.type = nvme_tcp_cmd;
2238*4882a593Smuzhiyun 	if (queue->hdr_digest)
2239*4882a593Smuzhiyun 		pdu->hdr.flags |= NVME_TCP_F_HDGST;
2240*4882a593Smuzhiyun 	pdu->hdr.hlen = sizeof(*pdu);
2241*4882a593Smuzhiyun 	pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2242*4882a593Smuzhiyun 
2243*4882a593Smuzhiyun 	cmd->common.opcode = nvme_admin_async_event;
2244*4882a593Smuzhiyun 	cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2245*4882a593Smuzhiyun 	cmd->common.flags |= NVME_CMD_SGL_METABUF;
2246*4882a593Smuzhiyun 	nvme_tcp_set_sg_null(cmd);
2247*4882a593Smuzhiyun 
2248*4882a593Smuzhiyun 	ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2249*4882a593Smuzhiyun 	ctrl->async_req.offset = 0;
2250*4882a593Smuzhiyun 	ctrl->async_req.curr_bio = NULL;
2251*4882a593Smuzhiyun 	ctrl->async_req.data_len = 0;
2252*4882a593Smuzhiyun 
2253*4882a593Smuzhiyun 	nvme_tcp_queue_request(&ctrl->async_req, true, true);
2254*4882a593Smuzhiyun }
2255*4882a593Smuzhiyun 
nvme_tcp_complete_timed_out(struct request * rq)2256*4882a593Smuzhiyun static void nvme_tcp_complete_timed_out(struct request *rq)
2257*4882a593Smuzhiyun {
2258*4882a593Smuzhiyun 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2259*4882a593Smuzhiyun 	struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2260*4882a593Smuzhiyun 
2261*4882a593Smuzhiyun 	nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
2262*4882a593Smuzhiyun 	if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
2263*4882a593Smuzhiyun 		nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
2264*4882a593Smuzhiyun 		blk_mq_complete_request(rq);
2265*4882a593Smuzhiyun 	}
2266*4882a593Smuzhiyun }
2267*4882a593Smuzhiyun 
2268*4882a593Smuzhiyun static enum blk_eh_timer_return
nvme_tcp_timeout(struct request * rq,bool reserved)2269*4882a593Smuzhiyun nvme_tcp_timeout(struct request *rq, bool reserved)
2270*4882a593Smuzhiyun {
2271*4882a593Smuzhiyun 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2272*4882a593Smuzhiyun 	struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2273*4882a593Smuzhiyun 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2274*4882a593Smuzhiyun 
2275*4882a593Smuzhiyun 	dev_warn(ctrl->device,
2276*4882a593Smuzhiyun 		"queue %d: timeout request %#x type %d\n",
2277*4882a593Smuzhiyun 		nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
2278*4882a593Smuzhiyun 
2279*4882a593Smuzhiyun 	if (ctrl->state != NVME_CTRL_LIVE) {
2280*4882a593Smuzhiyun 		/*
2281*4882a593Smuzhiyun 		 * If we are resetting, connecting or deleting we should
2282*4882a593Smuzhiyun 		 * complete immediately because we may block controller
2283*4882a593Smuzhiyun 		 * teardown or setup sequence
2284*4882a593Smuzhiyun 		 * - ctrl disable/shutdown fabrics requests
2285*4882a593Smuzhiyun 		 * - connect requests
2286*4882a593Smuzhiyun 		 * - initialization admin requests
2287*4882a593Smuzhiyun 		 * - I/O requests that entered after unquiescing and
2288*4882a593Smuzhiyun 		 *   the controller stopped responding
2289*4882a593Smuzhiyun 		 *
2290*4882a593Smuzhiyun 		 * All other requests should be cancelled by the error
2291*4882a593Smuzhiyun 		 * recovery work, so it's fine that we fail it here.
2292*4882a593Smuzhiyun 		 */
2293*4882a593Smuzhiyun 		nvme_tcp_complete_timed_out(rq);
2294*4882a593Smuzhiyun 		return BLK_EH_DONE;
2295*4882a593Smuzhiyun 	}
2296*4882a593Smuzhiyun 
2297*4882a593Smuzhiyun 	/*
2298*4882a593Smuzhiyun 	 * LIVE state should trigger the normal error recovery which will
2299*4882a593Smuzhiyun 	 * handle completing this request.
2300*4882a593Smuzhiyun 	 */
2301*4882a593Smuzhiyun 	nvme_tcp_error_recovery(ctrl);
2302*4882a593Smuzhiyun 	return BLK_EH_RESET_TIMER;
2303*4882a593Smuzhiyun }
2304*4882a593Smuzhiyun 
nvme_tcp_map_data(struct nvme_tcp_queue * queue,struct request * rq)2305*4882a593Smuzhiyun static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2306*4882a593Smuzhiyun 			struct request *rq)
2307*4882a593Smuzhiyun {
2308*4882a593Smuzhiyun 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2309*4882a593Smuzhiyun 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2310*4882a593Smuzhiyun 	struct nvme_command *c = &pdu->cmd;
2311*4882a593Smuzhiyun 
2312*4882a593Smuzhiyun 	c->common.flags |= NVME_CMD_SGL_METABUF;
2313*4882a593Smuzhiyun 
2314*4882a593Smuzhiyun 	if (!blk_rq_nr_phys_segments(rq))
2315*4882a593Smuzhiyun 		nvme_tcp_set_sg_null(c);
2316*4882a593Smuzhiyun 	else if (rq_data_dir(rq) == WRITE &&
2317*4882a593Smuzhiyun 	    req->data_len <= nvme_tcp_inline_data_size(queue))
2318*4882a593Smuzhiyun 		nvme_tcp_set_sg_inline(queue, c, req->data_len);
2319*4882a593Smuzhiyun 	else
2320*4882a593Smuzhiyun 		nvme_tcp_set_sg_host_data(c, req->data_len);
2321*4882a593Smuzhiyun 
2322*4882a593Smuzhiyun 	return 0;
2323*4882a593Smuzhiyun }
2324*4882a593Smuzhiyun 
nvme_tcp_setup_cmd_pdu(struct nvme_ns * ns,struct request * rq)2325*4882a593Smuzhiyun static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2326*4882a593Smuzhiyun 		struct request *rq)
2327*4882a593Smuzhiyun {
2328*4882a593Smuzhiyun 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2329*4882a593Smuzhiyun 	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2330*4882a593Smuzhiyun 	struct nvme_tcp_queue *queue = req->queue;
2331*4882a593Smuzhiyun 	u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2332*4882a593Smuzhiyun 	blk_status_t ret;
2333*4882a593Smuzhiyun 
2334*4882a593Smuzhiyun 	ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
2335*4882a593Smuzhiyun 	if (ret)
2336*4882a593Smuzhiyun 		return ret;
2337*4882a593Smuzhiyun 
2338*4882a593Smuzhiyun 	req->state = NVME_TCP_SEND_CMD_PDU;
2339*4882a593Smuzhiyun 	req->offset = 0;
2340*4882a593Smuzhiyun 	req->data_sent = 0;
2341*4882a593Smuzhiyun 	req->pdu_len = 0;
2342*4882a593Smuzhiyun 	req->pdu_sent = 0;
2343*4882a593Smuzhiyun 	req->data_len = blk_rq_nr_phys_segments(rq) ?
2344*4882a593Smuzhiyun 				blk_rq_payload_bytes(rq) : 0;
2345*4882a593Smuzhiyun 	req->curr_bio = rq->bio;
2346*4882a593Smuzhiyun 
2347*4882a593Smuzhiyun 	if (rq_data_dir(rq) == WRITE &&
2348*4882a593Smuzhiyun 	    req->data_len <= nvme_tcp_inline_data_size(queue))
2349*4882a593Smuzhiyun 		req->pdu_len = req->data_len;
2350*4882a593Smuzhiyun 	else if (req->curr_bio)
2351*4882a593Smuzhiyun 		nvme_tcp_init_iter(req, READ);
2352*4882a593Smuzhiyun 
2353*4882a593Smuzhiyun 	pdu->hdr.type = nvme_tcp_cmd;
2354*4882a593Smuzhiyun 	pdu->hdr.flags = 0;
2355*4882a593Smuzhiyun 	if (queue->hdr_digest)
2356*4882a593Smuzhiyun 		pdu->hdr.flags |= NVME_TCP_F_HDGST;
2357*4882a593Smuzhiyun 	if (queue->data_digest && req->pdu_len) {
2358*4882a593Smuzhiyun 		pdu->hdr.flags |= NVME_TCP_F_DDGST;
2359*4882a593Smuzhiyun 		ddgst = nvme_tcp_ddgst_len(queue);
2360*4882a593Smuzhiyun 	}
2361*4882a593Smuzhiyun 	pdu->hdr.hlen = sizeof(*pdu);
2362*4882a593Smuzhiyun 	pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2363*4882a593Smuzhiyun 	pdu->hdr.plen =
2364*4882a593Smuzhiyun 		cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2365*4882a593Smuzhiyun 
2366*4882a593Smuzhiyun 	ret = nvme_tcp_map_data(queue, rq);
2367*4882a593Smuzhiyun 	if (unlikely(ret)) {
2368*4882a593Smuzhiyun 		nvme_cleanup_cmd(rq);
2369*4882a593Smuzhiyun 		dev_err(queue->ctrl->ctrl.device,
2370*4882a593Smuzhiyun 			"Failed to map data (%d)\n", ret);
2371*4882a593Smuzhiyun 		return ret;
2372*4882a593Smuzhiyun 	}
2373*4882a593Smuzhiyun 
2374*4882a593Smuzhiyun 	return 0;
2375*4882a593Smuzhiyun }
2376*4882a593Smuzhiyun 
nvme_tcp_commit_rqs(struct blk_mq_hw_ctx * hctx)2377*4882a593Smuzhiyun static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
2378*4882a593Smuzhiyun {
2379*4882a593Smuzhiyun 	struct nvme_tcp_queue *queue = hctx->driver_data;
2380*4882a593Smuzhiyun 
2381*4882a593Smuzhiyun 	if (!llist_empty(&queue->req_list))
2382*4882a593Smuzhiyun 		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
2383*4882a593Smuzhiyun }
2384*4882a593Smuzhiyun 
nvme_tcp_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)2385*4882a593Smuzhiyun static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2386*4882a593Smuzhiyun 		const struct blk_mq_queue_data *bd)
2387*4882a593Smuzhiyun {
2388*4882a593Smuzhiyun 	struct nvme_ns *ns = hctx->queue->queuedata;
2389*4882a593Smuzhiyun 	struct nvme_tcp_queue *queue = hctx->driver_data;
2390*4882a593Smuzhiyun 	struct request *rq = bd->rq;
2391*4882a593Smuzhiyun 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2392*4882a593Smuzhiyun 	bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2393*4882a593Smuzhiyun 	blk_status_t ret;
2394*4882a593Smuzhiyun 
2395*4882a593Smuzhiyun 	if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2396*4882a593Smuzhiyun 		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2397*4882a593Smuzhiyun 
2398*4882a593Smuzhiyun 	ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2399*4882a593Smuzhiyun 	if (unlikely(ret))
2400*4882a593Smuzhiyun 		return ret;
2401*4882a593Smuzhiyun 
2402*4882a593Smuzhiyun 	blk_mq_start_request(rq);
2403*4882a593Smuzhiyun 
2404*4882a593Smuzhiyun 	nvme_tcp_queue_request(req, true, bd->last);
2405*4882a593Smuzhiyun 
2406*4882a593Smuzhiyun 	return BLK_STS_OK;
2407*4882a593Smuzhiyun }
2408*4882a593Smuzhiyun 
nvme_tcp_map_queues(struct blk_mq_tag_set * set)2409*4882a593Smuzhiyun static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2410*4882a593Smuzhiyun {
2411*4882a593Smuzhiyun 	struct nvme_tcp_ctrl *ctrl = set->driver_data;
2412*4882a593Smuzhiyun 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2413*4882a593Smuzhiyun 
2414*4882a593Smuzhiyun 	if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
2415*4882a593Smuzhiyun 		/* separate read/write queues */
2416*4882a593Smuzhiyun 		set->map[HCTX_TYPE_DEFAULT].nr_queues =
2417*4882a593Smuzhiyun 			ctrl->io_queues[HCTX_TYPE_DEFAULT];
2418*4882a593Smuzhiyun 		set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2419*4882a593Smuzhiyun 		set->map[HCTX_TYPE_READ].nr_queues =
2420*4882a593Smuzhiyun 			ctrl->io_queues[HCTX_TYPE_READ];
2421*4882a593Smuzhiyun 		set->map[HCTX_TYPE_READ].queue_offset =
2422*4882a593Smuzhiyun 			ctrl->io_queues[HCTX_TYPE_DEFAULT];
2423*4882a593Smuzhiyun 	} else {
2424*4882a593Smuzhiyun 		/* shared read/write queues */
2425*4882a593Smuzhiyun 		set->map[HCTX_TYPE_DEFAULT].nr_queues =
2426*4882a593Smuzhiyun 			ctrl->io_queues[HCTX_TYPE_DEFAULT];
2427*4882a593Smuzhiyun 		set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2428*4882a593Smuzhiyun 		set->map[HCTX_TYPE_READ].nr_queues =
2429*4882a593Smuzhiyun 			ctrl->io_queues[HCTX_TYPE_DEFAULT];
2430*4882a593Smuzhiyun 		set->map[HCTX_TYPE_READ].queue_offset = 0;
2431*4882a593Smuzhiyun 	}
2432*4882a593Smuzhiyun 	blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2433*4882a593Smuzhiyun 	blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
2434*4882a593Smuzhiyun 
2435*4882a593Smuzhiyun 	if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2436*4882a593Smuzhiyun 		/* map dedicated poll queues only if we have queues left */
2437*4882a593Smuzhiyun 		set->map[HCTX_TYPE_POLL].nr_queues =
2438*4882a593Smuzhiyun 				ctrl->io_queues[HCTX_TYPE_POLL];
2439*4882a593Smuzhiyun 		set->map[HCTX_TYPE_POLL].queue_offset =
2440*4882a593Smuzhiyun 			ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2441*4882a593Smuzhiyun 			ctrl->io_queues[HCTX_TYPE_READ];
2442*4882a593Smuzhiyun 		blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
2443*4882a593Smuzhiyun 	}
2444*4882a593Smuzhiyun 
2445*4882a593Smuzhiyun 	dev_info(ctrl->ctrl.device,
2446*4882a593Smuzhiyun 		"mapped %d/%d/%d default/read/poll queues.\n",
2447*4882a593Smuzhiyun 		ctrl->io_queues[HCTX_TYPE_DEFAULT],
2448*4882a593Smuzhiyun 		ctrl->io_queues[HCTX_TYPE_READ],
2449*4882a593Smuzhiyun 		ctrl->io_queues[HCTX_TYPE_POLL]);
2450*4882a593Smuzhiyun 
2451*4882a593Smuzhiyun 	return 0;
2452*4882a593Smuzhiyun }
2453*4882a593Smuzhiyun 
nvme_tcp_poll(struct blk_mq_hw_ctx * hctx)2454*4882a593Smuzhiyun static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
2455*4882a593Smuzhiyun {
2456*4882a593Smuzhiyun 	struct nvme_tcp_queue *queue = hctx->driver_data;
2457*4882a593Smuzhiyun 	struct sock *sk = queue->sock->sk;
2458*4882a593Smuzhiyun 
2459*4882a593Smuzhiyun 	if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2460*4882a593Smuzhiyun 		return 0;
2461*4882a593Smuzhiyun 
2462*4882a593Smuzhiyun 	set_bit(NVME_TCP_Q_POLLING, &queue->flags);
2463*4882a593Smuzhiyun 	if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
2464*4882a593Smuzhiyun 		sk_busy_loop(sk, true);
2465*4882a593Smuzhiyun 	nvme_tcp_try_recv(queue);
2466*4882a593Smuzhiyun 	clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
2467*4882a593Smuzhiyun 	return queue->nr_cqe;
2468*4882a593Smuzhiyun }
2469*4882a593Smuzhiyun 
2470*4882a593Smuzhiyun static const struct blk_mq_ops nvme_tcp_mq_ops = {
2471*4882a593Smuzhiyun 	.queue_rq	= nvme_tcp_queue_rq,
2472*4882a593Smuzhiyun 	.commit_rqs	= nvme_tcp_commit_rqs,
2473*4882a593Smuzhiyun 	.complete	= nvme_complete_rq,
2474*4882a593Smuzhiyun 	.init_request	= nvme_tcp_init_request,
2475*4882a593Smuzhiyun 	.exit_request	= nvme_tcp_exit_request,
2476*4882a593Smuzhiyun 	.init_hctx	= nvme_tcp_init_hctx,
2477*4882a593Smuzhiyun 	.timeout	= nvme_tcp_timeout,
2478*4882a593Smuzhiyun 	.map_queues	= nvme_tcp_map_queues,
2479*4882a593Smuzhiyun 	.poll		= nvme_tcp_poll,
2480*4882a593Smuzhiyun };
2481*4882a593Smuzhiyun 
2482*4882a593Smuzhiyun static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
2483*4882a593Smuzhiyun 	.queue_rq	= nvme_tcp_queue_rq,
2484*4882a593Smuzhiyun 	.complete	= nvme_complete_rq,
2485*4882a593Smuzhiyun 	.init_request	= nvme_tcp_init_request,
2486*4882a593Smuzhiyun 	.exit_request	= nvme_tcp_exit_request,
2487*4882a593Smuzhiyun 	.init_hctx	= nvme_tcp_init_admin_hctx,
2488*4882a593Smuzhiyun 	.timeout	= nvme_tcp_timeout,
2489*4882a593Smuzhiyun };
2490*4882a593Smuzhiyun 
2491*4882a593Smuzhiyun static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2492*4882a593Smuzhiyun 	.name			= "tcp",
2493*4882a593Smuzhiyun 	.module			= THIS_MODULE,
2494*4882a593Smuzhiyun 	.flags			= NVME_F_FABRICS,
2495*4882a593Smuzhiyun 	.reg_read32		= nvmf_reg_read32,
2496*4882a593Smuzhiyun 	.reg_read64		= nvmf_reg_read64,
2497*4882a593Smuzhiyun 	.reg_write32		= nvmf_reg_write32,
2498*4882a593Smuzhiyun 	.free_ctrl		= nvme_tcp_free_ctrl,
2499*4882a593Smuzhiyun 	.submit_async_event	= nvme_tcp_submit_async_event,
2500*4882a593Smuzhiyun 	.delete_ctrl		= nvme_tcp_delete_ctrl,
2501*4882a593Smuzhiyun 	.get_address		= nvmf_get_address,
2502*4882a593Smuzhiyun 	.stop_ctrl		= nvme_tcp_stop_ctrl,
2503*4882a593Smuzhiyun };
2504*4882a593Smuzhiyun 
2505*4882a593Smuzhiyun static bool
nvme_tcp_existing_controller(struct nvmf_ctrl_options * opts)2506*4882a593Smuzhiyun nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2507*4882a593Smuzhiyun {
2508*4882a593Smuzhiyun 	struct nvme_tcp_ctrl *ctrl;
2509*4882a593Smuzhiyun 	bool found = false;
2510*4882a593Smuzhiyun 
2511*4882a593Smuzhiyun 	mutex_lock(&nvme_tcp_ctrl_mutex);
2512*4882a593Smuzhiyun 	list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2513*4882a593Smuzhiyun 		found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2514*4882a593Smuzhiyun 		if (found)
2515*4882a593Smuzhiyun 			break;
2516*4882a593Smuzhiyun 	}
2517*4882a593Smuzhiyun 	mutex_unlock(&nvme_tcp_ctrl_mutex);
2518*4882a593Smuzhiyun 
2519*4882a593Smuzhiyun 	return found;
2520*4882a593Smuzhiyun }
2521*4882a593Smuzhiyun 
nvme_tcp_create_ctrl(struct device * dev,struct nvmf_ctrl_options * opts)2522*4882a593Smuzhiyun static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2523*4882a593Smuzhiyun 		struct nvmf_ctrl_options *opts)
2524*4882a593Smuzhiyun {
2525*4882a593Smuzhiyun 	struct nvme_tcp_ctrl *ctrl;
2526*4882a593Smuzhiyun 	int ret;
2527*4882a593Smuzhiyun 
2528*4882a593Smuzhiyun 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2529*4882a593Smuzhiyun 	if (!ctrl)
2530*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
2531*4882a593Smuzhiyun 
2532*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ctrl->list);
2533*4882a593Smuzhiyun 	ctrl->ctrl.opts = opts;
2534*4882a593Smuzhiyun 	ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2535*4882a593Smuzhiyun 				opts->nr_poll_queues + 1;
2536*4882a593Smuzhiyun 	ctrl->ctrl.sqsize = opts->queue_size - 1;
2537*4882a593Smuzhiyun 	ctrl->ctrl.kato = opts->kato;
2538*4882a593Smuzhiyun 
2539*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&ctrl->connect_work,
2540*4882a593Smuzhiyun 			nvme_tcp_reconnect_ctrl_work);
2541*4882a593Smuzhiyun 	INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2542*4882a593Smuzhiyun 	INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2543*4882a593Smuzhiyun 
2544*4882a593Smuzhiyun 	if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2545*4882a593Smuzhiyun 		opts->trsvcid =
2546*4882a593Smuzhiyun 			kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2547*4882a593Smuzhiyun 		if (!opts->trsvcid) {
2548*4882a593Smuzhiyun 			ret = -ENOMEM;
2549*4882a593Smuzhiyun 			goto out_free_ctrl;
2550*4882a593Smuzhiyun 		}
2551*4882a593Smuzhiyun 		opts->mask |= NVMF_OPT_TRSVCID;
2552*4882a593Smuzhiyun 	}
2553*4882a593Smuzhiyun 
2554*4882a593Smuzhiyun 	ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2555*4882a593Smuzhiyun 			opts->traddr, opts->trsvcid, &ctrl->addr);
2556*4882a593Smuzhiyun 	if (ret) {
2557*4882a593Smuzhiyun 		pr_err("malformed address passed: %s:%s\n",
2558*4882a593Smuzhiyun 			opts->traddr, opts->trsvcid);
2559*4882a593Smuzhiyun 		goto out_free_ctrl;
2560*4882a593Smuzhiyun 	}
2561*4882a593Smuzhiyun 
2562*4882a593Smuzhiyun 	if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2563*4882a593Smuzhiyun 		ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2564*4882a593Smuzhiyun 			opts->host_traddr, NULL, &ctrl->src_addr);
2565*4882a593Smuzhiyun 		if (ret) {
2566*4882a593Smuzhiyun 			pr_err("malformed src address passed: %s\n",
2567*4882a593Smuzhiyun 			       opts->host_traddr);
2568*4882a593Smuzhiyun 			goto out_free_ctrl;
2569*4882a593Smuzhiyun 		}
2570*4882a593Smuzhiyun 	}
2571*4882a593Smuzhiyun 
2572*4882a593Smuzhiyun 	if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2573*4882a593Smuzhiyun 		ret = -EALREADY;
2574*4882a593Smuzhiyun 		goto out_free_ctrl;
2575*4882a593Smuzhiyun 	}
2576*4882a593Smuzhiyun 
2577*4882a593Smuzhiyun 	ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2578*4882a593Smuzhiyun 				GFP_KERNEL);
2579*4882a593Smuzhiyun 	if (!ctrl->queues) {
2580*4882a593Smuzhiyun 		ret = -ENOMEM;
2581*4882a593Smuzhiyun 		goto out_free_ctrl;
2582*4882a593Smuzhiyun 	}
2583*4882a593Smuzhiyun 
2584*4882a593Smuzhiyun 	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2585*4882a593Smuzhiyun 	if (ret)
2586*4882a593Smuzhiyun 		goto out_kfree_queues;
2587*4882a593Smuzhiyun 
2588*4882a593Smuzhiyun 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2589*4882a593Smuzhiyun 		WARN_ON_ONCE(1);
2590*4882a593Smuzhiyun 		ret = -EINTR;
2591*4882a593Smuzhiyun 		goto out_uninit_ctrl;
2592*4882a593Smuzhiyun 	}
2593*4882a593Smuzhiyun 
2594*4882a593Smuzhiyun 	ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2595*4882a593Smuzhiyun 	if (ret)
2596*4882a593Smuzhiyun 		goto out_uninit_ctrl;
2597*4882a593Smuzhiyun 
2598*4882a593Smuzhiyun 	dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2599*4882a593Smuzhiyun 		ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
2600*4882a593Smuzhiyun 
2601*4882a593Smuzhiyun 	mutex_lock(&nvme_tcp_ctrl_mutex);
2602*4882a593Smuzhiyun 	list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2603*4882a593Smuzhiyun 	mutex_unlock(&nvme_tcp_ctrl_mutex);
2604*4882a593Smuzhiyun 
2605*4882a593Smuzhiyun 	return &ctrl->ctrl;
2606*4882a593Smuzhiyun 
2607*4882a593Smuzhiyun out_uninit_ctrl:
2608*4882a593Smuzhiyun 	nvme_uninit_ctrl(&ctrl->ctrl);
2609*4882a593Smuzhiyun 	nvme_put_ctrl(&ctrl->ctrl);
2610*4882a593Smuzhiyun 	if (ret > 0)
2611*4882a593Smuzhiyun 		ret = -EIO;
2612*4882a593Smuzhiyun 	return ERR_PTR(ret);
2613*4882a593Smuzhiyun out_kfree_queues:
2614*4882a593Smuzhiyun 	kfree(ctrl->queues);
2615*4882a593Smuzhiyun out_free_ctrl:
2616*4882a593Smuzhiyun 	kfree(ctrl);
2617*4882a593Smuzhiyun 	return ERR_PTR(ret);
2618*4882a593Smuzhiyun }
2619*4882a593Smuzhiyun 
2620*4882a593Smuzhiyun static struct nvmf_transport_ops nvme_tcp_transport = {
2621*4882a593Smuzhiyun 	.name		= "tcp",
2622*4882a593Smuzhiyun 	.module		= THIS_MODULE,
2623*4882a593Smuzhiyun 	.required_opts	= NVMF_OPT_TRADDR,
2624*4882a593Smuzhiyun 	.allowed_opts	= NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2625*4882a593Smuzhiyun 			  NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
2626*4882a593Smuzhiyun 			  NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
2627*4882a593Smuzhiyun 			  NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2628*4882a593Smuzhiyun 			  NVMF_OPT_TOS,
2629*4882a593Smuzhiyun 	.create_ctrl	= nvme_tcp_create_ctrl,
2630*4882a593Smuzhiyun };
2631*4882a593Smuzhiyun 
nvme_tcp_init_module(void)2632*4882a593Smuzhiyun static int __init nvme_tcp_init_module(void)
2633*4882a593Smuzhiyun {
2634*4882a593Smuzhiyun 	nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2635*4882a593Smuzhiyun 			WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2636*4882a593Smuzhiyun 	if (!nvme_tcp_wq)
2637*4882a593Smuzhiyun 		return -ENOMEM;
2638*4882a593Smuzhiyun 
2639*4882a593Smuzhiyun 	nvmf_register_transport(&nvme_tcp_transport);
2640*4882a593Smuzhiyun 	return 0;
2641*4882a593Smuzhiyun }
2642*4882a593Smuzhiyun 
nvme_tcp_cleanup_module(void)2643*4882a593Smuzhiyun static void __exit nvme_tcp_cleanup_module(void)
2644*4882a593Smuzhiyun {
2645*4882a593Smuzhiyun 	struct nvme_tcp_ctrl *ctrl;
2646*4882a593Smuzhiyun 
2647*4882a593Smuzhiyun 	nvmf_unregister_transport(&nvme_tcp_transport);
2648*4882a593Smuzhiyun 
2649*4882a593Smuzhiyun 	mutex_lock(&nvme_tcp_ctrl_mutex);
2650*4882a593Smuzhiyun 	list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2651*4882a593Smuzhiyun 		nvme_delete_ctrl(&ctrl->ctrl);
2652*4882a593Smuzhiyun 	mutex_unlock(&nvme_tcp_ctrl_mutex);
2653*4882a593Smuzhiyun 	flush_workqueue(nvme_delete_wq);
2654*4882a593Smuzhiyun 
2655*4882a593Smuzhiyun 	destroy_workqueue(nvme_tcp_wq);
2656*4882a593Smuzhiyun }
2657*4882a593Smuzhiyun 
2658*4882a593Smuzhiyun module_init(nvme_tcp_init_module);
2659*4882a593Smuzhiyun module_exit(nvme_tcp_cleanup_module);
2660*4882a593Smuzhiyun 
2661*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
2662