xref: /OK3568_Linux_fs/kernel/drivers/scsi/bnx2i/bnx2i_iscsi.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * bnx2i_iscsi.c: QLogic NetXtreme II iSCSI driver.
3  *
4  * Copyright (c) 2006 - 2013 Broadcom Corporation
5  * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
6  * Copyright (c) 2007, 2008 Mike Christie
7  * Copyright (c) 2014, QLogic Corporation
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation.
12  *
13  * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
14  * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com)
15  * Maintained by: QLogic-Storage-Upstream@qlogic.com
16  */
17 
18 #include <linux/slab.h>
19 #include <scsi/scsi_tcq.h>
20 #include <scsi/libiscsi.h>
21 #include "bnx2i.h"
22 
23 struct scsi_transport_template *bnx2i_scsi_xport_template;
24 struct iscsi_transport bnx2i_iscsi_transport;
25 static struct scsi_host_template bnx2i_host_template;
26 
27 /*
28  * Global endpoint resource info
29  */
30 static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */
31 
32 DECLARE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu);
33 
bnx2i_adapter_ready(struct bnx2i_hba * hba)34 static int bnx2i_adapter_ready(struct bnx2i_hba *hba)
35 {
36 	int retval = 0;
37 
38 	if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
39 	    test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
40 	    test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
41 		retval = -EPERM;
42 	return retval;
43 }
44 
45 /**
46  * bnx2i_get_write_cmd_bd_idx - identifies various BD bookmarks
47  * @cmd:		iscsi cmd struct pointer
48  * @buf_off:		absolute buffer offset
49  * @start_bd_off:	u32 pointer to return the offset within the BD
50  *			indicated by 'start_bd_idx' on which 'buf_off' falls
51  * @start_bd_idx:	index of the BD on which 'buf_off' falls
52  *
53  * identifies & marks various bd info for scsi command's imm data,
54  * unsolicited data and the first solicited data seq.
55  */
bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd * cmd,u32 buf_off,u32 * start_bd_off,u32 * start_bd_idx)56 static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off,
57 				       u32 *start_bd_off, u32 *start_bd_idx)
58 {
59 	struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl;
60 	u32 cur_offset = 0;
61 	u32 cur_bd_idx = 0;
62 
63 	if (buf_off) {
64 		while (buf_off >= (cur_offset + bd_tbl->buffer_length)) {
65 			cur_offset += bd_tbl->buffer_length;
66 			cur_bd_idx++;
67 			bd_tbl++;
68 		}
69 	}
70 
71 	*start_bd_off = buf_off - cur_offset;
72 	*start_bd_idx = cur_bd_idx;
73 }
74 
75 /**
76  * bnx2i_setup_write_cmd_bd_info - sets up BD various information
77  * @task:	transport layer's cmd struct pointer
78  *
79  * identifies & marks various bd info for scsi command's immediate data,
80  * unsolicited data and first solicited data seq which includes BD start
81  * index & BD buf off. his function takes into account iscsi parameter such
82  * as immediate data and unsolicited data is support on this connection.
83  */
bnx2i_setup_write_cmd_bd_info(struct iscsi_task * task)84 static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task)
85 {
86 	struct bnx2i_cmd *cmd = task->dd_data;
87 	u32 start_bd_offset;
88 	u32 start_bd_idx;
89 	u32 buffer_offset = 0;
90 	u32 cmd_len = cmd->req.total_data_transfer_length;
91 
92 	/* if ImmediateData is turned off & IntialR2T is turned on,
93 	 * there will be no immediate or unsolicited data, just return.
94 	 */
95 	if (!iscsi_task_has_unsol_data(task) && !task->imm_count)
96 		return;
97 
98 	/* Immediate data */
99 	buffer_offset += task->imm_count;
100 	if (task->imm_count == cmd_len)
101 		return;
102 
103 	if (iscsi_task_has_unsol_data(task)) {
104 		bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
105 					   &start_bd_offset, &start_bd_idx);
106 		cmd->req.ud_buffer_offset = start_bd_offset;
107 		cmd->req.ud_start_bd_index = start_bd_idx;
108 		buffer_offset += task->unsol_r2t.data_length;
109 	}
110 
111 	if (buffer_offset != cmd_len) {
112 		bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
113 					   &start_bd_offset, &start_bd_idx);
114 		if ((start_bd_offset > task->conn->session->first_burst) ||
115 		    (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) {
116 			int i = 0;
117 
118 			iscsi_conn_printk(KERN_ALERT, task->conn,
119 					  "bnx2i- error, buf offset 0x%x "
120 					  "bd_valid %d use_sg %d\n",
121 					  buffer_offset, cmd->io_tbl.bd_valid,
122 					  scsi_sg_count(cmd->scsi_cmd));
123 			for (i = 0; i < cmd->io_tbl.bd_valid; i++)
124 				iscsi_conn_printk(KERN_ALERT, task->conn,
125 						  "bnx2i err, bd[%d]: len %x\n",
126 						  i, cmd->io_tbl.bd_tbl[i].\
127 						  buffer_length);
128 		}
129 		cmd->req.sd_buffer_offset = start_bd_offset;
130 		cmd->req.sd_start_bd_index = start_bd_idx;
131 	}
132 }
133 
134 
135 
136 /**
137  * bnx2i_map_scsi_sg - maps IO buffer and prepares the BD table
138  * @hba:	adapter instance
139  * @cmd:	iscsi cmd struct pointer
140  *
141  * map SG list
142  */
bnx2i_map_scsi_sg(struct bnx2i_hba * hba,struct bnx2i_cmd * cmd)143 static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
144 {
145 	struct scsi_cmnd *sc = cmd->scsi_cmd;
146 	struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
147 	struct scatterlist *sg;
148 	int byte_count = 0;
149 	int bd_count = 0;
150 	int sg_count;
151 	int sg_len;
152 	u64 addr;
153 	int i;
154 
155 	BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD);
156 
157 	sg_count = scsi_dma_map(sc);
158 
159 	scsi_for_each_sg(sc, sg, sg_count, i) {
160 		sg_len = sg_dma_len(sg);
161 		addr = (u64) sg_dma_address(sg);
162 		bd[bd_count].buffer_addr_lo = addr & 0xffffffff;
163 		bd[bd_count].buffer_addr_hi = addr >> 32;
164 		bd[bd_count].buffer_length = sg_len;
165 		bd[bd_count].flags = 0;
166 		if (bd_count == 0)
167 			bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN;
168 
169 		byte_count += sg_len;
170 		bd_count++;
171 	}
172 
173 	if (bd_count)
174 		bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN;
175 
176 	BUG_ON(byte_count != scsi_bufflen(sc));
177 	return bd_count;
178 }
179 
180 /**
181  * bnx2i_iscsi_map_sg_list - maps SG list
182  * @cmd:	iscsi cmd struct pointer
183  *
184  * creates BD list table for the command
185  */
bnx2i_iscsi_map_sg_list(struct bnx2i_cmd * cmd)186 static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd)
187 {
188 	int bd_count;
189 
190 	bd_count  = bnx2i_map_scsi_sg(cmd->conn->hba, cmd);
191 	if (!bd_count) {
192 		struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
193 
194 		bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0;
195 		bd[0].buffer_length = bd[0].flags = 0;
196 	}
197 	cmd->io_tbl.bd_valid = bd_count;
198 }
199 
200 
201 /**
202  * bnx2i_iscsi_unmap_sg_list - unmaps SG list
203  * @cmd:	iscsi cmd struct pointer
204  *
205  * unmap IO buffers and invalidate the BD table
206  */
bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd * cmd)207 void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd)
208 {
209 	struct scsi_cmnd *sc = cmd->scsi_cmd;
210 
211 	if (cmd->io_tbl.bd_valid && sc) {
212 		scsi_dma_unmap(sc);
213 		cmd->io_tbl.bd_valid = 0;
214 	}
215 }
216 
bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd * cmd)217 static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd)
218 {
219 	memset(&cmd->req, 0x00, sizeof(cmd->req));
220 	cmd->req.op_code = 0xFF;
221 	cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma;
222 	cmd->req.bd_list_addr_hi =
223 		(u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32);
224 
225 }
226 
227 
228 /**
229  * bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid'
230  * @hba:	pointer to adapter instance
231  * @bnx2i_conn:	pointer to iscsi connection
232  * @iscsi_cid:	iscsi context ID, range 0 - (MAX_CONN - 1)
233  *
234  * update iscsi cid table entry with connection pointer. This enables
235  *	driver to quickly get hold of connection structure pointer in
236  *	completion/interrupt thread using iscsi context ID
237  */
bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba * hba,struct bnx2i_conn * bnx2i_conn,u32 iscsi_cid)238 static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba,
239 					struct bnx2i_conn *bnx2i_conn,
240 					u32 iscsi_cid)
241 {
242 	if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) {
243 		iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
244 				 "conn bind - entry #%d not free\n", iscsi_cid);
245 		return -EBUSY;
246 	}
247 
248 	hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn;
249 	return 0;
250 }
251 
252 
253 /**
254  * bnx2i_get_conn_from_id - maps an iscsi cid to corresponding conn ptr
255  * @hba:	pointer to adapter instance
256  * @iscsi_cid:	iscsi context ID, range 0 - (MAX_CONN - 1)
257  */
bnx2i_get_conn_from_id(struct bnx2i_hba * hba,u16 iscsi_cid)258 struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
259 					  u16 iscsi_cid)
260 {
261 	if (!hba->cid_que.conn_cid_tbl) {
262 		printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n");
263 		return NULL;
264 
265 	} else if (iscsi_cid >= hba->max_active_conns) {
266 		printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid);
267 		return NULL;
268 	}
269 	return hba->cid_que.conn_cid_tbl[iscsi_cid];
270 }
271 
272 
273 /**
274  * bnx2i_alloc_iscsi_cid - allocates a iscsi_cid from free pool
275  * @hba:	pointer to adapter instance
276  */
bnx2i_alloc_iscsi_cid(struct bnx2i_hba * hba)277 static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba)
278 {
279 	int idx;
280 
281 	if (!hba->cid_que.cid_free_cnt)
282 		return -1;
283 
284 	idx = hba->cid_que.cid_q_cons_idx;
285 	hba->cid_que.cid_q_cons_idx++;
286 	if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx)
287 		hba->cid_que.cid_q_cons_idx = 0;
288 
289 	hba->cid_que.cid_free_cnt--;
290 	return hba->cid_que.cid_que[idx];
291 }
292 
293 
294 /**
295  * bnx2i_free_iscsi_cid - returns tcp port to free list
296  * @hba: 		pointer to adapter instance
297  * @iscsi_cid:		iscsi context ID to free
298  */
bnx2i_free_iscsi_cid(struct bnx2i_hba * hba,u16 iscsi_cid)299 static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid)
300 {
301 	int idx;
302 
303 	if (iscsi_cid == (u16) -1)
304 		return;
305 
306 	hba->cid_que.cid_free_cnt++;
307 
308 	idx = hba->cid_que.cid_q_prod_idx;
309 	hba->cid_que.cid_que[idx] = iscsi_cid;
310 	hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL;
311 	hba->cid_que.cid_q_prod_idx++;
312 	if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx)
313 		hba->cid_que.cid_q_prod_idx = 0;
314 }
315 
316 
317 /**
318  * bnx2i_setup_free_cid_que - sets up free iscsi cid queue
319  * @hba:	pointer to adapter instance
320  *
321  * allocates memory for iscsi cid queue & 'cid - conn ptr' mapping table,
322  * 	and initialize table attributes
323  */
bnx2i_setup_free_cid_que(struct bnx2i_hba * hba)324 static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba)
325 {
326 	int mem_size;
327 	int i;
328 
329 	mem_size = hba->max_active_conns * sizeof(u32);
330 	mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
331 
332 	hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL);
333 	if (!hba->cid_que.cid_que_base)
334 		return -ENOMEM;
335 
336 	mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *);
337 	mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
338 	hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL);
339 	if (!hba->cid_que.conn_cid_tbl) {
340 		kfree(hba->cid_que.cid_que_base);
341 		hba->cid_que.cid_que_base = NULL;
342 		return -ENOMEM;
343 	}
344 
345 	hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base;
346 	hba->cid_que.cid_q_prod_idx = 0;
347 	hba->cid_que.cid_q_cons_idx = 0;
348 	hba->cid_que.cid_q_max_idx = hba->max_active_conns;
349 	hba->cid_que.cid_free_cnt = hba->max_active_conns;
350 
351 	for (i = 0; i < hba->max_active_conns; i++) {
352 		hba->cid_que.cid_que[i] = i;
353 		hba->cid_que.conn_cid_tbl[i] = NULL;
354 	}
355 	return 0;
356 }
357 
358 
359 /**
360  * bnx2i_release_free_cid_que - releases 'iscsi_cid' queue resources
361  * @hba:	pointer to adapter instance
362  */
bnx2i_release_free_cid_que(struct bnx2i_hba * hba)363 static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba)
364 {
365 	kfree(hba->cid_que.cid_que_base);
366 	hba->cid_que.cid_que_base = NULL;
367 
368 	kfree(hba->cid_que.conn_cid_tbl);
369 	hba->cid_que.conn_cid_tbl = NULL;
370 }
371 
372 
373 /**
374  * bnx2i_alloc_ep - allocates ep structure from global pool
375  * @hba:	pointer to adapter instance
376  *
377  * routine allocates a free endpoint structure from global pool and
378  *	a tcp port to be used for this connection.  Global resource lock,
379  *	'bnx2i_resc_lock' is held while accessing shared global data structures
380  */
bnx2i_alloc_ep(struct bnx2i_hba * hba)381 static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
382 {
383 	struct iscsi_endpoint *ep;
384 	struct bnx2i_endpoint *bnx2i_ep;
385 	u32 ec_div;
386 
387 	ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
388 	if (!ep) {
389 		printk(KERN_ERR "bnx2i: Could not allocate ep\n");
390 		return NULL;
391 	}
392 
393 	bnx2i_ep = ep->dd_data;
394 	bnx2i_ep->cls_ep = ep;
395 	INIT_LIST_HEAD(&bnx2i_ep->link);
396 	bnx2i_ep->state = EP_STATE_IDLE;
397 	bnx2i_ep->ep_iscsi_cid = (u16) -1;
398 	bnx2i_ep->hba = hba;
399 	bnx2i_ep->hba_age = hba->age;
400 
401 	ec_div = event_coal_div;
402 	while (ec_div >>= 1)
403 		bnx2i_ep->ec_shift += 1;
404 
405 	hba->ofld_conns_active++;
406 	init_waitqueue_head(&bnx2i_ep->ofld_wait);
407 	return ep;
408 }
409 
410 
411 /**
412  * bnx2i_free_ep - free endpoint
413  * @ep:		pointer to iscsi endpoint structure
414  */
bnx2i_free_ep(struct iscsi_endpoint * ep)415 static void bnx2i_free_ep(struct iscsi_endpoint *ep)
416 {
417 	struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
418 	unsigned long flags;
419 
420 	spin_lock_irqsave(&bnx2i_resc_lock, flags);
421 	bnx2i_ep->state = EP_STATE_IDLE;
422 	bnx2i_ep->hba->ofld_conns_active--;
423 
424 	if (bnx2i_ep->ep_iscsi_cid != (u16) -1)
425 		bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid);
426 
427 	if (bnx2i_ep->conn) {
428 		bnx2i_ep->conn->ep = NULL;
429 		bnx2i_ep->conn = NULL;
430 	}
431 
432 	bnx2i_ep->hba = NULL;
433 	spin_unlock_irqrestore(&bnx2i_resc_lock, flags);
434 	iscsi_destroy_endpoint(ep);
435 }
436 
437 
438 /**
439  * bnx2i_alloc_bdt - allocates buffer descriptor (BD) table for the command
440  * @hba:	adapter instance pointer
441  * @session:	iscsi session pointer
442  * @cmd:	iscsi command structure
443  */
bnx2i_alloc_bdt(struct bnx2i_hba * hba,struct iscsi_session * session,struct bnx2i_cmd * cmd)444 static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session,
445 			   struct bnx2i_cmd *cmd)
446 {
447 	struct io_bdt *io = &cmd->io_tbl;
448 	struct iscsi_bd *bd;
449 
450 	io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
451 					ISCSI_MAX_BDS_PER_CMD * sizeof(*bd),
452 					&io->bd_tbl_dma, GFP_KERNEL);
453 	if (!io->bd_tbl) {
454 		iscsi_session_printk(KERN_ERR, session, "Could not "
455 				     "allocate bdt.\n");
456 		return -ENOMEM;
457 	}
458 	io->bd_valid = 0;
459 	return 0;
460 }
461 
462 /**
463  * bnx2i_destroy_cmd_pool - destroys iscsi command pool and release BD table
464  * @hba:	adapter instance pointer
465  * @session:	iscsi session pointer
466  */
bnx2i_destroy_cmd_pool(struct bnx2i_hba * hba,struct iscsi_session * session)467 static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba,
468 				   struct iscsi_session *session)
469 {
470 	int i;
471 
472 	for (i = 0; i < session->cmds_max; i++) {
473 		struct iscsi_task *task = session->cmds[i];
474 		struct bnx2i_cmd *cmd = task->dd_data;
475 
476 		if (cmd->io_tbl.bd_tbl)
477 			dma_free_coherent(&hba->pcidev->dev,
478 					  ISCSI_MAX_BDS_PER_CMD *
479 					  sizeof(struct iscsi_bd),
480 					  cmd->io_tbl.bd_tbl,
481 					  cmd->io_tbl.bd_tbl_dma);
482 	}
483 
484 }
485 
486 
487 /**
488  * bnx2i_setup_cmd_pool - sets up iscsi command pool for the session
489  * @hba:	adapter instance pointer
490  * @session:	iscsi session pointer
491  */
bnx2i_setup_cmd_pool(struct bnx2i_hba * hba,struct iscsi_session * session)492 static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba,
493 				struct iscsi_session *session)
494 {
495 	int i;
496 
497 	for (i = 0; i < session->cmds_max; i++) {
498 		struct iscsi_task *task = session->cmds[i];
499 		struct bnx2i_cmd *cmd = task->dd_data;
500 
501 		task->hdr = &cmd->hdr;
502 		task->hdr_max = sizeof(struct iscsi_hdr);
503 
504 		if (bnx2i_alloc_bdt(hba, session, cmd))
505 			goto free_bdts;
506 	}
507 
508 	return 0;
509 
510 free_bdts:
511 	bnx2i_destroy_cmd_pool(hba, session);
512 	return -ENOMEM;
513 }
514 
515 
516 /**
517  * bnx2i_setup_mp_bdt - allocate BD table resources
518  * @hba:	pointer to adapter structure
519  *
520  * Allocate memory for dummy buffer and associated BD
521  * table to be used by middle path (MP) requests
522  */
bnx2i_setup_mp_bdt(struct bnx2i_hba * hba)523 static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
524 {
525 	int rc = 0;
526 	struct iscsi_bd *mp_bdt;
527 	u64 addr;
528 
529 	hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
530 					    &hba->mp_bd_dma, GFP_KERNEL);
531 	if (!hba->mp_bd_tbl) {
532 		printk(KERN_ERR "unable to allocate Middle Path BDT\n");
533 		rc = -1;
534 		goto out;
535 	}
536 
537 	hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
538 					       CNIC_PAGE_SIZE,
539 					       &hba->dummy_buf_dma, GFP_KERNEL);
540 	if (!hba->dummy_buffer) {
541 		printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
542 		dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
543 				  hba->mp_bd_tbl, hba->mp_bd_dma);
544 		hba->mp_bd_tbl = NULL;
545 		rc = -1;
546 		goto out;
547 	}
548 
549 	mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl;
550 	addr = (unsigned long) hba->dummy_buf_dma;
551 	mp_bdt->buffer_addr_lo = addr & 0xffffffff;
552 	mp_bdt->buffer_addr_hi = addr >> 32;
553 	mp_bdt->buffer_length = CNIC_PAGE_SIZE;
554 	mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
555 			ISCSI_BD_FIRST_IN_BD_CHAIN;
556 out:
557 	return rc;
558 }
559 
560 
561 /**
562  * bnx2i_free_mp_bdt - releases ITT back to free pool
563  * @hba:	pointer to adapter instance
564  *
565  * free MP dummy buffer and associated BD table
566  */
bnx2i_free_mp_bdt(struct bnx2i_hba * hba)567 static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
568 {
569 	if (hba->mp_bd_tbl) {
570 		dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
571 				  hba->mp_bd_tbl, hba->mp_bd_dma);
572 		hba->mp_bd_tbl = NULL;
573 	}
574 	if (hba->dummy_buffer) {
575 		dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
576 				  hba->dummy_buffer, hba->dummy_buf_dma);
577 		hba->dummy_buffer = NULL;
578 	}
579 	return;
580 }
581 
582 /**
583  * bnx2i_drop_session - notifies iscsid of connection error.
584  * @cls_session:	iscsi cls session pointer
585  *
586  * This notifies iscsid that there is a error, so it can initiate
587  * recovery.
588  *
589  * This relies on caller using the iscsi class iterator so the object
590  * is refcounted and does not disapper from under us.
591  */
bnx2i_drop_session(struct iscsi_cls_session * cls_session)592 void bnx2i_drop_session(struct iscsi_cls_session *cls_session)
593 {
594 	iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
595 }
596 
597 /**
598  * bnx2i_ep_destroy_list_add - add an entry to EP destroy list
599  * @hba:	pointer to adapter instance
600  * @ep:		pointer to endpoint (transport identifier) structure
601  *
602  * EP destroy queue manager
603  */
bnx2i_ep_destroy_list_add(struct bnx2i_hba * hba,struct bnx2i_endpoint * ep)604 static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba,
605 				     struct bnx2i_endpoint *ep)
606 {
607 	write_lock_bh(&hba->ep_rdwr_lock);
608 	list_add_tail(&ep->link, &hba->ep_destroy_list);
609 	write_unlock_bh(&hba->ep_rdwr_lock);
610 	return 0;
611 }
612 
613 /**
614  * bnx2i_ep_destroy_list_del - add an entry to EP destroy list
615  *
616  * @hba: 		pointer to adapter instance
617  * @ep: 		pointer to endpoint (transport identifier) structure
618  *
619  * EP destroy queue manager
620  */
bnx2i_ep_destroy_list_del(struct bnx2i_hba * hba,struct bnx2i_endpoint * ep)621 static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba,
622 				     struct bnx2i_endpoint *ep)
623 {
624 	write_lock_bh(&hba->ep_rdwr_lock);
625 	list_del_init(&ep->link);
626 	write_unlock_bh(&hba->ep_rdwr_lock);
627 
628 	return 0;
629 }
630 
631 /**
632  * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list
633  * @hba:	pointer to adapter instance
634  * @ep:		pointer to endpoint (transport identifier) structure
635  *
636  * pending conn offload completion queue manager
637  */
bnx2i_ep_ofld_list_add(struct bnx2i_hba * hba,struct bnx2i_endpoint * ep)638 static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba,
639 				  struct bnx2i_endpoint *ep)
640 {
641 	write_lock_bh(&hba->ep_rdwr_lock);
642 	list_add_tail(&ep->link, &hba->ep_ofld_list);
643 	write_unlock_bh(&hba->ep_rdwr_lock);
644 	return 0;
645 }
646 
647 /**
648  * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list
649  * @hba: 		pointer to adapter instance
650  * @ep: 		pointer to endpoint (transport identifier) structure
651  *
652  * pending conn offload completion queue manager
653  */
bnx2i_ep_ofld_list_del(struct bnx2i_hba * hba,struct bnx2i_endpoint * ep)654 static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba,
655 				  struct bnx2i_endpoint *ep)
656 {
657 	write_lock_bh(&hba->ep_rdwr_lock);
658 	list_del_init(&ep->link);
659 	write_unlock_bh(&hba->ep_rdwr_lock);
660 	return 0;
661 }
662 
663 
664 /**
665  * bnx2i_find_ep_in_ofld_list - find iscsi_cid in pending list of endpoints
666  *
667  * @hba: 		pointer to adapter instance
668  * @iscsi_cid:		iscsi context ID to find
669  *
670  */
671 struct bnx2i_endpoint *
bnx2i_find_ep_in_ofld_list(struct bnx2i_hba * hba,u32 iscsi_cid)672 bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid)
673 {
674 	struct list_head *list;
675 	struct list_head *tmp;
676 	struct bnx2i_endpoint *ep = NULL;
677 
678 	read_lock_bh(&hba->ep_rdwr_lock);
679 	list_for_each_safe(list, tmp, &hba->ep_ofld_list) {
680 		ep = (struct bnx2i_endpoint *)list;
681 
682 		if (ep->ep_iscsi_cid == iscsi_cid)
683 			break;
684 		ep = NULL;
685 	}
686 	read_unlock_bh(&hba->ep_rdwr_lock);
687 
688 	if (!ep)
689 		printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
690 	return ep;
691 }
692 
693 /**
694  * bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list
695  * @hba: 		pointer to adapter instance
696  * @iscsi_cid:		iscsi context ID to find
697  *
698  */
699 struct bnx2i_endpoint *
bnx2i_find_ep_in_destroy_list(struct bnx2i_hba * hba,u32 iscsi_cid)700 bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid)
701 {
702 	struct list_head *list;
703 	struct list_head *tmp;
704 	struct bnx2i_endpoint *ep = NULL;
705 
706 	read_lock_bh(&hba->ep_rdwr_lock);
707 	list_for_each_safe(list, tmp, &hba->ep_destroy_list) {
708 		ep = (struct bnx2i_endpoint *)list;
709 
710 		if (ep->ep_iscsi_cid == iscsi_cid)
711 			break;
712 		ep = NULL;
713 	}
714 	read_unlock_bh(&hba->ep_rdwr_lock);
715 
716 	if (!ep)
717 		printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
718 
719 	return ep;
720 }
721 
722 /**
723  * bnx2i_ep_active_list_add - add an entry to ep active list
724  * @hba:	pointer to adapter instance
725  * @ep:		pointer to endpoint (transport identifier) structure
726  *
727  * current active conn queue manager
728  */
bnx2i_ep_active_list_add(struct bnx2i_hba * hba,struct bnx2i_endpoint * ep)729 static void bnx2i_ep_active_list_add(struct bnx2i_hba *hba,
730 				     struct bnx2i_endpoint *ep)
731 {
732 	write_lock_bh(&hba->ep_rdwr_lock);
733 	list_add_tail(&ep->link, &hba->ep_active_list);
734 	write_unlock_bh(&hba->ep_rdwr_lock);
735 }
736 
737 
738 /**
739  * bnx2i_ep_active_list_del - deletes an entry to ep active list
740  * @hba:	pointer to adapter instance
741  * @ep:		pointer to endpoint (transport identifier) structure
742  *
743  * current active conn queue manager
744  */
bnx2i_ep_active_list_del(struct bnx2i_hba * hba,struct bnx2i_endpoint * ep)745 static void bnx2i_ep_active_list_del(struct bnx2i_hba *hba,
746 				     struct bnx2i_endpoint *ep)
747 {
748 	write_lock_bh(&hba->ep_rdwr_lock);
749 	list_del_init(&ep->link);
750 	write_unlock_bh(&hba->ep_rdwr_lock);
751 }
752 
753 
754 /**
755  * bnx2i_setup_host_queue_size - assigns shost->can_queue param
756  * @hba:	pointer to adapter instance
757  * @shost:	scsi host pointer
758  *
759  * Initializes 'can_queue' parameter based on how many outstanding commands
760  * 	the device can handle. Each device 5708/5709/57710 has different
761  *	capabilities
762  */
bnx2i_setup_host_queue_size(struct bnx2i_hba * hba,struct Scsi_Host * shost)763 static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba,
764 					struct Scsi_Host *shost)
765 {
766 	if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type))
767 		shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
768 	else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type))
769 		shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709;
770 	else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
771 		shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710;
772 	else
773 		shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
774 }
775 
776 
777 /**
778  * bnx2i_alloc_hba - allocate and init adapter instance
779  * @cnic:	cnic device pointer
780  *
781  * allocate & initialize adapter structure and call other
782  *	support routines to do per adapter initialization
783  */
bnx2i_alloc_hba(struct cnic_dev * cnic)784 struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
785 {
786 	struct Scsi_Host *shost;
787 	struct bnx2i_hba *hba;
788 
789 	shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0);
790 	if (!shost)
791 		return NULL;
792 	shost->dma_boundary = cnic->pcidev->dma_mask;
793 	shost->transportt = bnx2i_scsi_xport_template;
794 	shost->max_id = ISCSI_MAX_CONNS_PER_HBA - 1;
795 	shost->max_channel = 0;
796 	shost->max_lun = 512;
797 	shost->max_cmd_len = 16;
798 
799 	hba = iscsi_host_priv(shost);
800 	hba->shost = shost;
801 	hba->netdev = cnic->netdev;
802 	/* Get PCI related information and update hba struct members */
803 	hba->pcidev = cnic->pcidev;
804 	pci_dev_get(hba->pcidev);
805 	hba->pci_did = hba->pcidev->device;
806 	hba->pci_vid = hba->pcidev->vendor;
807 	hba->pci_sdid = hba->pcidev->subsystem_device;
808 	hba->pci_svid = hba->pcidev->subsystem_vendor;
809 	hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
810 	hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
811 
812 	bnx2i_identify_device(hba, cnic);
813 	bnx2i_setup_host_queue_size(hba, shost);
814 
815 	hba->reg_base = pci_resource_start(hba->pcidev, 0);
816 	if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
817 		hba->regview = pci_iomap(hba->pcidev, 0, BNX2_MQ_CONFIG2);
818 		if (!hba->regview)
819 			goto ioreg_map_err;
820 	} else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
821 		hba->regview = pci_iomap(hba->pcidev, 0, 4096);
822 		if (!hba->regview)
823 			goto ioreg_map_err;
824 	}
825 
826 	if (bnx2i_setup_mp_bdt(hba))
827 		goto mp_bdt_mem_err;
828 
829 	INIT_LIST_HEAD(&hba->ep_ofld_list);
830 	INIT_LIST_HEAD(&hba->ep_active_list);
831 	INIT_LIST_HEAD(&hba->ep_destroy_list);
832 	rwlock_init(&hba->ep_rdwr_lock);
833 
834 	hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED;
835 
836 	/* different values for 5708/5709/57710 */
837 	hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA;
838 
839 	if (bnx2i_setup_free_cid_que(hba))
840 		goto cid_que_err;
841 
842 	/* SQ/RQ/CQ size can be changed via sysfx interface */
843 	if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
844 		if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX)
845 			hba->max_sqes = sq_size;
846 		else
847 			hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT;
848 	} else {	/* 5706/5708/5709 */
849 		if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX)
850 			hba->max_sqes = sq_size;
851 		else
852 			hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT;
853 	}
854 
855 	hba->max_rqes = rq_size;
856 	hba->max_cqes = hba->max_sqes + rq_size;
857 	if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
858 		if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX)
859 			hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX;
860 	} else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX)
861 		hba->max_cqes = BNX2I_570X_CQ_WQES_MAX;
862 
863 	hba->num_ccell = hba->max_sqes / 2;
864 
865 	spin_lock_init(&hba->lock);
866 	mutex_init(&hba->net_dev_lock);
867 	init_waitqueue_head(&hba->eh_wait);
868 	if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
869 		hba->hba_shutdown_tmo = 30 * HZ;
870 		hba->conn_teardown_tmo = 20 * HZ;
871 		hba->conn_ctx_destroy_tmo = 6 * HZ;
872 	} else {	/* 5706/5708/5709 */
873 		hba->hba_shutdown_tmo = 20 * HZ;
874 		hba->conn_teardown_tmo = 10 * HZ;
875 		hba->conn_ctx_destroy_tmo = 2 * HZ;
876 	}
877 
878 #ifdef CONFIG_32BIT
879 	spin_lock_init(&hba->stat_lock);
880 #endif
881 	memset(&hba->stats, 0, sizeof(struct iscsi_stats_info));
882 
883 	if (iscsi_host_add(shost, &hba->pcidev->dev))
884 		goto free_dump_mem;
885 	return hba;
886 
887 free_dump_mem:
888 	bnx2i_release_free_cid_que(hba);
889 cid_que_err:
890 	bnx2i_free_mp_bdt(hba);
891 mp_bdt_mem_err:
892 	if (hba->regview) {
893 		pci_iounmap(hba->pcidev, hba->regview);
894 		hba->regview = NULL;
895 	}
896 ioreg_map_err:
897 	pci_dev_put(hba->pcidev);
898 	scsi_host_put(shost);
899 	return NULL;
900 }
901 
902 /**
903  * bnx2i_free_hba- releases hba structure and resources held by the adapter
904  * @hba:	pointer to adapter instance
905  *
906  * free adapter structure and call various cleanup routines.
907  */
bnx2i_free_hba(struct bnx2i_hba * hba)908 void bnx2i_free_hba(struct bnx2i_hba *hba)
909 {
910 	struct Scsi_Host *shost = hba->shost;
911 
912 	iscsi_host_remove(shost);
913 	INIT_LIST_HEAD(&hba->ep_ofld_list);
914 	INIT_LIST_HEAD(&hba->ep_active_list);
915 	INIT_LIST_HEAD(&hba->ep_destroy_list);
916 
917 	if (hba->regview) {
918 		pci_iounmap(hba->pcidev, hba->regview);
919 		hba->regview = NULL;
920 	}
921 	pci_dev_put(hba->pcidev);
922 	bnx2i_free_mp_bdt(hba);
923 	bnx2i_release_free_cid_que(hba);
924 	iscsi_host_free(shost);
925 }
926 
927 /**
928  * bnx2i_conn_free_login_resources - free DMA resources used for login process
929  * @hba:		pointer to adapter instance
930  * @bnx2i_conn:		iscsi connection pointer
931  *
932  * Login related resources, mostly BDT & payload DMA memory is freed
933  */
bnx2i_conn_free_login_resources(struct bnx2i_hba * hba,struct bnx2i_conn * bnx2i_conn)934 static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba,
935 					    struct bnx2i_conn *bnx2i_conn)
936 {
937 	if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
938 		dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
939 				  bnx2i_conn->gen_pdu.resp_bd_tbl,
940 				  bnx2i_conn->gen_pdu.resp_bd_dma);
941 		bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
942 	}
943 
944 	if (bnx2i_conn->gen_pdu.req_bd_tbl) {
945 		dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
946 				  bnx2i_conn->gen_pdu.req_bd_tbl,
947 				  bnx2i_conn->gen_pdu.req_bd_dma);
948 		bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
949 	}
950 
951 	if (bnx2i_conn->gen_pdu.resp_buf) {
952 		dma_free_coherent(&hba->pcidev->dev,
953 				  ISCSI_DEF_MAX_RECV_SEG_LEN,
954 				  bnx2i_conn->gen_pdu.resp_buf,
955 				  bnx2i_conn->gen_pdu.resp_dma_addr);
956 		bnx2i_conn->gen_pdu.resp_buf = NULL;
957 	}
958 
959 	if (bnx2i_conn->gen_pdu.req_buf) {
960 		dma_free_coherent(&hba->pcidev->dev,
961 				  ISCSI_DEF_MAX_RECV_SEG_LEN,
962 				  bnx2i_conn->gen_pdu.req_buf,
963 				  bnx2i_conn->gen_pdu.req_dma_addr);
964 		bnx2i_conn->gen_pdu.req_buf = NULL;
965 	}
966 }
967 
968 /**
969  * bnx2i_conn_alloc_login_resources - alloc DMA resources for login/nop.
970  * @hba:		pointer to adapter instance
971  * @bnx2i_conn:		iscsi connection pointer
972  *
973  * Mgmt task DNA resources are allocated in this routine.
974  */
bnx2i_conn_alloc_login_resources(struct bnx2i_hba * hba,struct bnx2i_conn * bnx2i_conn)975 static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
976 					    struct bnx2i_conn *bnx2i_conn)
977 {
978 	/* Allocate memory for login request/response buffers */
979 	bnx2i_conn->gen_pdu.req_buf =
980 		dma_alloc_coherent(&hba->pcidev->dev,
981 				   ISCSI_DEF_MAX_RECV_SEG_LEN,
982 				   &bnx2i_conn->gen_pdu.req_dma_addr,
983 				   GFP_KERNEL);
984 	if (bnx2i_conn->gen_pdu.req_buf == NULL)
985 		goto login_req_buf_failure;
986 
987 	bnx2i_conn->gen_pdu.req_buf_size = 0;
988 	bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf;
989 
990 	bnx2i_conn->gen_pdu.resp_buf =
991 		dma_alloc_coherent(&hba->pcidev->dev,
992 				   ISCSI_DEF_MAX_RECV_SEG_LEN,
993 				   &bnx2i_conn->gen_pdu.resp_dma_addr,
994 				   GFP_KERNEL);
995 	if (bnx2i_conn->gen_pdu.resp_buf == NULL)
996 		goto login_resp_buf_failure;
997 
998 	bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
999 	bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
1000 
1001 	bnx2i_conn->gen_pdu.req_bd_tbl =
1002 		dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
1003 				   &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
1004 	if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
1005 		goto login_req_bd_tbl_failure;
1006 
1007 	bnx2i_conn->gen_pdu.resp_bd_tbl =
1008 		dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
1009 				   &bnx2i_conn->gen_pdu.resp_bd_dma,
1010 				   GFP_KERNEL);
1011 	if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
1012 		goto login_resp_bd_tbl_failure;
1013 
1014 	return 0;
1015 
1016 login_resp_bd_tbl_failure:
1017 	dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
1018 			  bnx2i_conn->gen_pdu.req_bd_tbl,
1019 			  bnx2i_conn->gen_pdu.req_bd_dma);
1020 	bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
1021 
1022 login_req_bd_tbl_failure:
1023 	dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
1024 			  bnx2i_conn->gen_pdu.resp_buf,
1025 			  bnx2i_conn->gen_pdu.resp_dma_addr);
1026 	bnx2i_conn->gen_pdu.resp_buf = NULL;
1027 login_resp_buf_failure:
1028 	dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
1029 			  bnx2i_conn->gen_pdu.req_buf,
1030 			  bnx2i_conn->gen_pdu.req_dma_addr);
1031 	bnx2i_conn->gen_pdu.req_buf = NULL;
1032 login_req_buf_failure:
1033 	iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data,
1034 			  "login resource alloc failed!!\n");
1035 	return -ENOMEM;
1036 
1037 }
1038 
1039 
1040 /**
1041  * bnx2i_iscsi_prep_generic_pdu_bd - prepares BD table.
1042  * @bnx2i_conn:		iscsi connection pointer
1043  *
1044  * Allocates buffers and BD tables before shipping requests to cnic
1045  *	for PDUs prepared by 'iscsid' daemon
1046  */
bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn * bnx2i_conn)1047 static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn)
1048 {
1049 	struct iscsi_bd *bd_tbl;
1050 
1051 	bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl;
1052 
1053 	bd_tbl->buffer_addr_hi =
1054 		(u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32);
1055 	bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr;
1056 	bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr -
1057 				bnx2i_conn->gen_pdu.req_buf;
1058 	bd_tbl->reserved0 = 0;
1059 	bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
1060 			ISCSI_BD_FIRST_IN_BD_CHAIN;
1061 
1062 	bd_tbl = (struct iscsi_bd  *) bnx2i_conn->gen_pdu.resp_bd_tbl;
1063 	bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32;
1064 	bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr;
1065 	bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN;
1066 	bd_tbl->reserved0 = 0;
1067 	bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
1068 			ISCSI_BD_FIRST_IN_BD_CHAIN;
1069 }
1070 
1071 
1072 /**
1073  * bnx2i_iscsi_send_generic_request - called to send mgmt tasks.
1074  * @task:	transport layer task pointer
1075  *
1076  * called to transmit PDUs prepared by the 'iscsid' daemon. iSCSI login,
1077  *	Nop-out and Logout requests flow through this path.
1078  */
bnx2i_iscsi_send_generic_request(struct iscsi_task * task)1079 static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
1080 {
1081 	struct bnx2i_cmd *cmd = task->dd_data;
1082 	struct bnx2i_conn *bnx2i_conn = cmd->conn;
1083 	int rc = 0;
1084 	char *buf;
1085 	int data_len;
1086 
1087 	bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn);
1088 	switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
1089 	case ISCSI_OP_LOGIN:
1090 		bnx2i_send_iscsi_login(bnx2i_conn, task);
1091 		break;
1092 	case ISCSI_OP_NOOP_OUT:
1093 		data_len = bnx2i_conn->gen_pdu.req_buf_size;
1094 		buf = bnx2i_conn->gen_pdu.req_buf;
1095 		if (data_len)
1096 			rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
1097 						     buf, data_len, 1);
1098 		else
1099 			rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
1100 						     NULL, 0, 1);
1101 		break;
1102 	case ISCSI_OP_LOGOUT:
1103 		rc = bnx2i_send_iscsi_logout(bnx2i_conn, task);
1104 		break;
1105 	case ISCSI_OP_SCSI_TMFUNC:
1106 		rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task);
1107 		break;
1108 	case ISCSI_OP_TEXT:
1109 		rc = bnx2i_send_iscsi_text(bnx2i_conn, task);
1110 		break;
1111 	default:
1112 		iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
1113 				  "send_gen: unsupported op 0x%x\n",
1114 				  task->hdr->opcode);
1115 	}
1116 	return rc;
1117 }
1118 
1119 
1120 /**********************************************************************
1121  *		SCSI-ML Interface
1122  **********************************************************************/
1123 
1124 /**
1125  * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe
1126  * @sc:		SCSI-ML command pointer
1127  * @cmd:	iscsi cmd pointer
1128  */
bnx2i_cpy_scsi_cdb(struct scsi_cmnd * sc,struct bnx2i_cmd * cmd)1129 static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd)
1130 {
1131 	u32 dword;
1132 	int lpcnt;
1133 	u8 *srcp;
1134 	u32 *dstp;
1135 	u32 scsi_lun[2];
1136 
1137 	int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun);
1138 	cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]);
1139 	cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]);
1140 
1141 	lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword);
1142 	srcp = (u8 *) sc->cmnd;
1143 	dstp = (u32 *) cmd->req.cdb;
1144 	while (lpcnt--) {
1145 		memcpy(&dword, (const void *) srcp, 4);
1146 		*dstp = cpu_to_be32(dword);
1147 		srcp += 4;
1148 		dstp++;
1149 	}
1150 	if (sc->cmd_len & 0x3) {
1151 		dword = (u32) srcp[0] | ((u32) srcp[1] << 8);
1152 		*dstp = cpu_to_be32(dword);
1153 	}
1154 }
1155 
bnx2i_cleanup_task(struct iscsi_task * task)1156 static void bnx2i_cleanup_task(struct iscsi_task *task)
1157 {
1158 	struct iscsi_conn *conn = task->conn;
1159 	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1160 	struct bnx2i_hba *hba = bnx2i_conn->hba;
1161 
1162 	/*
1163 	 * mgmt task or cmd was never sent to us to transmit.
1164 	 */
1165 	if (!task->sc || task->state == ISCSI_TASK_PENDING)
1166 		return;
1167 	/*
1168 	 * need to clean-up task context to claim dma buffers
1169 	 */
1170 	if (task->state == ISCSI_TASK_ABRT_TMF) {
1171 		bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
1172 
1173 		spin_unlock_bh(&conn->session->back_lock);
1174 		spin_unlock_bh(&conn->session->frwd_lock);
1175 		wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
1176 				msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
1177 		spin_lock_bh(&conn->session->frwd_lock);
1178 		spin_lock_bh(&conn->session->back_lock);
1179 	}
1180 	bnx2i_iscsi_unmap_sg_list(task->dd_data);
1181 }
1182 
1183 /**
1184  * bnx2i_mtask_xmit - transmit mtask to chip for further processing
1185  * @conn:	transport layer conn structure pointer
1186  * @task:	transport layer command structure pointer
1187  */
1188 static int
bnx2i_mtask_xmit(struct iscsi_conn * conn,struct iscsi_task * task)1189 bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
1190 {
1191 	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1192 	struct bnx2i_hba *hba = bnx2i_conn->hba;
1193 	struct bnx2i_cmd *cmd = task->dd_data;
1194 
1195 	memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
1196 
1197 	bnx2i_setup_cmd_wqe_template(cmd);
1198 	bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
1199 
1200 	/* Tx PDU/data length count */
1201 	ADD_STATS_64(hba, tx_pdus, 1);
1202 	ADD_STATS_64(hba, tx_bytes, task->data_count);
1203 
1204 	if (task->data_count) {
1205 		memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
1206 		       task->data_count);
1207 		bnx2i_conn->gen_pdu.req_wr_ptr =
1208 			bnx2i_conn->gen_pdu.req_buf + task->data_count;
1209 	}
1210 	cmd->conn = conn->dd_data;
1211 	cmd->scsi_cmd = NULL;
1212 	return bnx2i_iscsi_send_generic_request(task);
1213 }
1214 
1215 /**
1216  * bnx2i_task_xmit - transmit iscsi command to chip for further processing
1217  * @task:	transport layer command structure pointer
1218  *
1219  * maps SG buffers and send request to chip/firmware in the form of SQ WQE
1220  */
bnx2i_task_xmit(struct iscsi_task * task)1221 static int bnx2i_task_xmit(struct iscsi_task *task)
1222 {
1223 	struct iscsi_conn *conn = task->conn;
1224 	struct iscsi_session *session = conn->session;
1225 	struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
1226 	struct bnx2i_hba *hba = iscsi_host_priv(shost);
1227 	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1228 	struct scsi_cmnd *sc = task->sc;
1229 	struct bnx2i_cmd *cmd = task->dd_data;
1230 	struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
1231 
1232 	if (atomic_read(&bnx2i_conn->ep->num_active_cmds) + 1  >
1233 	    hba->max_sqes)
1234 		return -ENOMEM;
1235 
1236 	/*
1237 	 * If there is no scsi_cmnd this must be a mgmt task
1238 	 */
1239 	if (!sc)
1240 		return bnx2i_mtask_xmit(conn, task);
1241 
1242 	bnx2i_setup_cmd_wqe_template(cmd);
1243 	cmd->req.op_code = ISCSI_OP_SCSI_CMD;
1244 	cmd->conn = bnx2i_conn;
1245 	cmd->scsi_cmd = sc;
1246 	cmd->req.total_data_transfer_length = scsi_bufflen(sc);
1247 	cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn);
1248 
1249 	bnx2i_iscsi_map_sg_list(cmd);
1250 	bnx2i_cpy_scsi_cdb(sc, cmd);
1251 
1252 	cmd->req.op_attr = ISCSI_ATTR_SIMPLE;
1253 	if (sc->sc_data_direction == DMA_TO_DEVICE) {
1254 		cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE;
1255 		cmd->req.itt = task->itt |
1256 			(ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
1257 		bnx2i_setup_write_cmd_bd_info(task);
1258 	} else {
1259 		if (scsi_bufflen(sc))
1260 			cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ;
1261 		cmd->req.itt = task->itt |
1262 			(ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
1263 	}
1264 
1265 	cmd->req.num_bds = cmd->io_tbl.bd_valid;
1266 	if (!cmd->io_tbl.bd_valid) {
1267 		cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma;
1268 		cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32);
1269 		cmd->req.num_bds = 1;
1270 	}
1271 
1272 	bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd);
1273 	return 0;
1274 }
1275 
1276 /**
1277  * bnx2i_session_create - create a new iscsi session
1278  * @ep:		pointer to iscsi endpoint
1279  * @cmds_max:		user specified maximum commands
1280  * @qdepth:		scsi queue depth to support
1281  * @initial_cmdsn:	initial iscsi CMDSN to be used for this session
1282  *
1283  * Creates a new iSCSI session instance on given device.
1284  */
1285 static struct iscsi_cls_session *
bnx2i_session_create(struct iscsi_endpoint * ep,uint16_t cmds_max,uint16_t qdepth,uint32_t initial_cmdsn)1286 bnx2i_session_create(struct iscsi_endpoint *ep,
1287 		     uint16_t cmds_max, uint16_t qdepth,
1288 		     uint32_t initial_cmdsn)
1289 {
1290 	struct Scsi_Host *shost;
1291 	struct iscsi_cls_session *cls_session;
1292 	struct bnx2i_hba *hba;
1293 	struct bnx2i_endpoint *bnx2i_ep;
1294 
1295 	if (!ep) {
1296 		printk(KERN_ERR "bnx2i: missing ep.\n");
1297 		return NULL;
1298 	}
1299 
1300 	bnx2i_ep = ep->dd_data;
1301 	shost = bnx2i_ep->hba->shost;
1302 	hba = iscsi_host_priv(shost);
1303 	if (bnx2i_adapter_ready(hba))
1304 		return NULL;
1305 
1306 	/*
1307 	 * user can override hw limit as long as it is within
1308 	 * the min/max.
1309 	 */
1310 	if (cmds_max > hba->max_sqes)
1311 		cmds_max = hba->max_sqes;
1312 	else if (cmds_max < BNX2I_SQ_WQES_MIN)
1313 		cmds_max = BNX2I_SQ_WQES_MIN;
1314 
1315 	cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost,
1316 					  cmds_max, 0, sizeof(struct bnx2i_cmd),
1317 					  initial_cmdsn, ISCSI_MAX_TARGET);
1318 	if (!cls_session)
1319 		return NULL;
1320 
1321 	if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data))
1322 		goto session_teardown;
1323 	return cls_session;
1324 
1325 session_teardown:
1326 	iscsi_session_teardown(cls_session);
1327 	return NULL;
1328 }
1329 
1330 
1331 /**
1332  * bnx2i_session_destroy - destroys iscsi session
1333  * @cls_session:	pointer to iscsi cls session
1334  *
1335  * Destroys previously created iSCSI session instance and releases
1336  *	all resources held by it
1337  */
bnx2i_session_destroy(struct iscsi_cls_session * cls_session)1338 static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session)
1339 {
1340 	struct iscsi_session *session = cls_session->dd_data;
1341 	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1342 	struct bnx2i_hba *hba = iscsi_host_priv(shost);
1343 
1344 	bnx2i_destroy_cmd_pool(hba, session);
1345 	iscsi_session_teardown(cls_session);
1346 }
1347 
1348 
1349 /**
1350  * bnx2i_conn_create - create iscsi connection instance
1351  * @cls_session:	pointer to iscsi cls session
1352  * @cid:		iscsi cid as per rfc (not NX2's CID terminology)
1353  *
1354  * Creates a new iSCSI connection instance for a given session
1355  */
1356 static struct iscsi_cls_conn *
bnx2i_conn_create(struct iscsi_cls_session * cls_session,uint32_t cid)1357 bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
1358 {
1359 	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1360 	struct bnx2i_hba *hba = iscsi_host_priv(shost);
1361 	struct bnx2i_conn *bnx2i_conn;
1362 	struct iscsi_cls_conn *cls_conn;
1363 	struct iscsi_conn *conn;
1364 
1365 	cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn),
1366 				    cid);
1367 	if (!cls_conn)
1368 		return NULL;
1369 	conn = cls_conn->dd_data;
1370 
1371 	bnx2i_conn = conn->dd_data;
1372 	bnx2i_conn->cls_conn = cls_conn;
1373 	bnx2i_conn->hba = hba;
1374 
1375 	atomic_set(&bnx2i_conn->work_cnt, 0);
1376 
1377 	/* 'ep' ptr will be assigned in bind() call */
1378 	bnx2i_conn->ep = NULL;
1379 	init_completion(&bnx2i_conn->cmd_cleanup_cmpl);
1380 
1381 	if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) {
1382 		iscsi_conn_printk(KERN_ALERT, conn,
1383 				  "conn_new: login resc alloc failed!!\n");
1384 		goto free_conn;
1385 	}
1386 
1387 	return cls_conn;
1388 
1389 free_conn:
1390 	iscsi_conn_teardown(cls_conn);
1391 	return NULL;
1392 }
1393 
1394 /**
1395  * bnx2i_conn_bind - binds iscsi sess, conn and ep objects together
1396  * @cls_session:	pointer to iscsi cls session
1397  * @cls_conn:		pointer to iscsi cls conn
1398  * @transport_fd:	64-bit EP handle
1399  * @is_leading:		leading connection on this session?
1400  *
1401  * Binds together iSCSI session instance, iSCSI connection instance
1402  *	and the TCP connection. This routine returns error code if
1403  *	TCP connection does not belong on the device iSCSI sess/conn
1404  *	is bound
1405  */
bnx2i_conn_bind(struct iscsi_cls_session * cls_session,struct iscsi_cls_conn * cls_conn,uint64_t transport_fd,int is_leading)1406 static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
1407 			   struct iscsi_cls_conn *cls_conn,
1408 			   uint64_t transport_fd, int is_leading)
1409 {
1410 	struct iscsi_conn *conn = cls_conn->dd_data;
1411 	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1412 	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1413 	struct bnx2i_hba *hba = iscsi_host_priv(shost);
1414 	struct bnx2i_endpoint *bnx2i_ep;
1415 	struct iscsi_endpoint *ep;
1416 	int ret_code;
1417 
1418 	ep = iscsi_lookup_endpoint(transport_fd);
1419 	if (!ep)
1420 		return -EINVAL;
1421 	/*
1422 	 * Forcefully terminate all in progress connection recovery at the
1423 	 * earliest, either in bind(), send_pdu(LOGIN), or conn_start()
1424 	 */
1425 	if (bnx2i_adapter_ready(hba)) {
1426 		ret_code = -EIO;
1427 		goto put_ep;
1428 	}
1429 
1430 	bnx2i_ep = ep->dd_data;
1431 	if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
1432 	    (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD)) {
1433 		/* Peer disconnect via' FIN or RST */
1434 		ret_code = -EINVAL;
1435 		goto put_ep;
1436 	}
1437 
1438 	if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) {
1439 		ret_code = -EINVAL;
1440 		goto put_ep;
1441 	}
1442 
1443 	if (bnx2i_ep->hba != hba) {
1444 		/* Error - TCP connection does not belong to this device
1445 		 */
1446 		iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
1447 				  "conn bind, ep=0x%p (%s) does not",
1448 				  bnx2i_ep, bnx2i_ep->hba->netdev->name);
1449 		iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
1450 				  "belong to hba (%s)\n",
1451 				  hba->netdev->name);
1452 		ret_code = -EEXIST;
1453 		goto put_ep;
1454 	}
1455 	bnx2i_ep->conn = bnx2i_conn;
1456 	bnx2i_conn->ep = bnx2i_ep;
1457 	bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid;
1458 	bnx2i_conn->fw_cid = bnx2i_ep->ep_cid;
1459 
1460 	ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn,
1461 						bnx2i_ep->ep_iscsi_cid);
1462 
1463 	/* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710
1464 	 * driver needs to explicitly replenish RQ index during setup.
1465 	 */
1466 	if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
1467 		bnx2i_put_rq_buf(bnx2i_conn, 0);
1468 
1469 	bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
1470 put_ep:
1471 	iscsi_put_endpoint(ep);
1472 	return ret_code;
1473 }
1474 
1475 
1476 /**
1477  * bnx2i_conn_destroy - destroy iscsi connection instance & release resources
1478  * @cls_conn:	pointer to iscsi cls conn
1479  *
1480  * Destroy an iSCSI connection instance and release memory resources held by
1481  *	this connection
1482  */
bnx2i_conn_destroy(struct iscsi_cls_conn * cls_conn)1483 static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
1484 {
1485 	struct iscsi_conn *conn = cls_conn->dd_data;
1486 	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1487 	struct Scsi_Host *shost;
1488 	struct bnx2i_hba *hba;
1489 	struct bnx2i_work *work, *tmp;
1490 	unsigned cpu = 0;
1491 	struct bnx2i_percpu_s *p;
1492 
1493 	shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
1494 	hba = iscsi_host_priv(shost);
1495 
1496 	bnx2i_conn_free_login_resources(hba, bnx2i_conn);
1497 
1498 	if (atomic_read(&bnx2i_conn->work_cnt)) {
1499 		for_each_online_cpu(cpu) {
1500 			p = &per_cpu(bnx2i_percpu, cpu);
1501 			spin_lock_bh(&p->p_work_lock);
1502 			list_for_each_entry_safe(work, tmp,
1503 						 &p->work_list, list) {
1504 				if (work->session == conn->session &&
1505 				    work->bnx2i_conn == bnx2i_conn) {
1506 					list_del_init(&work->list);
1507 					kfree(work);
1508 					if (!atomic_dec_and_test(
1509 							&bnx2i_conn->work_cnt))
1510 						break;
1511 				}
1512 			}
1513 			spin_unlock_bh(&p->p_work_lock);
1514 		}
1515 	}
1516 
1517 	iscsi_conn_teardown(cls_conn);
1518 }
1519 
1520 
1521 /**
1522  * bnx2i_ep_get_param - return iscsi ep parameter to caller
1523  * @ep:		pointer to iscsi endpoint
1524  * @param:	parameter type identifier
1525  * @buf: 	buffer pointer
1526  *
1527  * returns iSCSI ep parameters
1528  */
bnx2i_ep_get_param(struct iscsi_endpoint * ep,enum iscsi_param param,char * buf)1529 static int bnx2i_ep_get_param(struct iscsi_endpoint *ep,
1530 			      enum iscsi_param param, char *buf)
1531 {
1532 	struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
1533 	struct bnx2i_hba *hba = bnx2i_ep->hba;
1534 	int len = -ENOTCONN;
1535 
1536 	if (!hba)
1537 		return -ENOTCONN;
1538 
1539 	switch (param) {
1540 	case ISCSI_PARAM_CONN_PORT:
1541 		mutex_lock(&hba->net_dev_lock);
1542 		if (bnx2i_ep->cm_sk)
1543 			len = sprintf(buf, "%hu\n", bnx2i_ep->cm_sk->dst_port);
1544 		mutex_unlock(&hba->net_dev_lock);
1545 		break;
1546 	case ISCSI_PARAM_CONN_ADDRESS:
1547 		mutex_lock(&hba->net_dev_lock);
1548 		if (bnx2i_ep->cm_sk)
1549 			len = sprintf(buf, "%pI4\n", &bnx2i_ep->cm_sk->dst_ip);
1550 		mutex_unlock(&hba->net_dev_lock);
1551 		break;
1552 	default:
1553 		return -ENOSYS;
1554 	}
1555 
1556 	return len;
1557 }
1558 
1559 /**
1560  * bnx2i_host_get_param - returns host (adapter) related parameters
1561  * @shost:	scsi host pointer
1562  * @param:	parameter type identifier
1563  * @buf:	buffer pointer
1564  */
bnx2i_host_get_param(struct Scsi_Host * shost,enum iscsi_host_param param,char * buf)1565 static int bnx2i_host_get_param(struct Scsi_Host *shost,
1566 				enum iscsi_host_param param, char *buf)
1567 {
1568 	struct bnx2i_hba *hba = iscsi_host_priv(shost);
1569 	int len = 0;
1570 
1571 	switch (param) {
1572 	case ISCSI_HOST_PARAM_HWADDRESS:
1573 		len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6);
1574 		break;
1575 	case ISCSI_HOST_PARAM_NETDEV_NAME:
1576 		len = sprintf(buf, "%s\n", hba->netdev->name);
1577 		break;
1578 	case ISCSI_HOST_PARAM_IPADDRESS: {
1579 		struct list_head *active_list = &hba->ep_active_list;
1580 
1581 		read_lock_bh(&hba->ep_rdwr_lock);
1582 		if (!list_empty(&hba->ep_active_list)) {
1583 			struct bnx2i_endpoint *bnx2i_ep;
1584 			struct cnic_sock *csk;
1585 
1586 			bnx2i_ep = list_first_entry(active_list,
1587 						    struct bnx2i_endpoint,
1588 						    link);
1589 			csk = bnx2i_ep->cm_sk;
1590 			if (test_bit(SK_F_IPV6, &csk->flags))
1591 				len = sprintf(buf, "%pI6\n", csk->src_ip);
1592 			else
1593 				len = sprintf(buf, "%pI4\n", csk->src_ip);
1594 		}
1595 		read_unlock_bh(&hba->ep_rdwr_lock);
1596 		break;
1597 	}
1598 	default:
1599 		return iscsi_host_get_param(shost, param, buf);
1600 	}
1601 	return len;
1602 }
1603 
1604 /**
1605  * bnx2i_conn_start - completes iscsi connection migration to FFP
1606  * @cls_conn:	pointer to iscsi cls conn
1607  *
1608  * last call in FFP migration to handover iscsi conn to the driver
1609  */
bnx2i_conn_start(struct iscsi_cls_conn * cls_conn)1610 static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn)
1611 {
1612 	struct iscsi_conn *conn = cls_conn->dd_data;
1613 	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1614 
1615 	bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START;
1616 	bnx2i_update_iscsi_conn(conn);
1617 
1618 	/*
1619 	 * this should normally not sleep for a long time so it should
1620 	 * not disrupt the caller.
1621 	 */
1622 	timer_setup(&bnx2i_conn->ep->ofld_timer, bnx2i_ep_ofld_timer, 0);
1623 	bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies;
1624 	add_timer(&bnx2i_conn->ep->ofld_timer);
1625 	/* update iSCSI context for this conn, wait for CNIC to complete */
1626 	wait_event_interruptible(bnx2i_conn->ep->ofld_wait,
1627 			bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START);
1628 
1629 	if (signal_pending(current))
1630 		flush_signals(current);
1631 	del_timer_sync(&bnx2i_conn->ep->ofld_timer);
1632 
1633 	iscsi_conn_start(cls_conn);
1634 	return 0;
1635 }
1636 
1637 
1638 /**
1639  * bnx2i_conn_get_stats - returns iSCSI stats
1640  * @cls_conn:	pointer to iscsi cls conn
1641  * @stats:	pointer to iscsi statistic struct
1642  */
bnx2i_conn_get_stats(struct iscsi_cls_conn * cls_conn,struct iscsi_stats * stats)1643 static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
1644 				 struct iscsi_stats *stats)
1645 {
1646 	struct iscsi_conn *conn = cls_conn->dd_data;
1647 
1648 	stats->txdata_octets = conn->txdata_octets;
1649 	stats->rxdata_octets = conn->rxdata_octets;
1650 	stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
1651 	stats->dataout_pdus = conn->dataout_pdus_cnt;
1652 	stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
1653 	stats->datain_pdus = conn->datain_pdus_cnt;
1654 	stats->r2t_pdus = conn->r2t_pdus_cnt;
1655 	stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
1656 	stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
1657 	stats->digest_err = 0;
1658 	stats->timeout_err = 0;
1659 	strcpy(stats->custom[0].desc, "eh_abort_cnt");
1660 	stats->custom[0].value = conn->eh_abort_cnt;
1661 	stats->custom_length = 1;
1662 }
1663 
1664 
1665 /**
1666  * bnx2i_check_route - checks if target IP route belongs to one of NX2 devices
1667  * @dst_addr:	target IP address
1668  *
1669  * check if route resolves to BNX2 device
1670  */
bnx2i_check_route(struct sockaddr * dst_addr)1671 static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr)
1672 {
1673 	struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
1674 	struct bnx2i_hba *hba;
1675 	struct cnic_dev *cnic = NULL;
1676 
1677 	hba = get_adapter_list_head();
1678 	if (hba && hba->cnic)
1679 		cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI);
1680 	if (!cnic) {
1681 		printk(KERN_ALERT "bnx2i: no route,"
1682 		       "can't connect using cnic\n");
1683 		goto no_nx2_route;
1684 	}
1685 	hba = bnx2i_find_hba_for_cnic(cnic);
1686 	if (!hba)
1687 		goto no_nx2_route;
1688 
1689 	if (bnx2i_adapter_ready(hba)) {
1690 		printk(KERN_ALERT "bnx2i: check route, hba not found\n");
1691 		goto no_nx2_route;
1692 	}
1693 	if (hba->netdev->mtu > hba->mtu_supported) {
1694 		printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n",
1695 				  hba->netdev->name, hba->netdev->mtu);
1696 		printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n",
1697 				  hba->mtu_supported);
1698 		goto no_nx2_route;
1699 	}
1700 	return hba;
1701 no_nx2_route:
1702 	return NULL;
1703 }
1704 
1705 
1706 /**
1707  * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources
1708  * @hba:	pointer to adapter instance
1709  * @ep:		endpoint (transport identifier) structure
1710  *
1711  * destroys cm_sock structure and on chip iscsi context
1712  */
bnx2i_tear_down_conn(struct bnx2i_hba * hba,struct bnx2i_endpoint * ep)1713 static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
1714 				 struct bnx2i_endpoint *ep)
1715 {
1716 	if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) && ep->cm_sk)
1717 		hba->cnic->cm_destroy(ep->cm_sk);
1718 
1719 	if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) &&
1720 	    ep->state == EP_STATE_DISCONN_TIMEDOUT) {
1721 		if (ep->conn && ep->conn->cls_conn &&
1722 		    ep->conn->cls_conn->dd_data) {
1723 			struct iscsi_conn *conn = ep->conn->cls_conn->dd_data;
1724 
1725 			/* Must suspend all rx queue activity for this ep */
1726 			set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
1727 		}
1728 		/* CONN_DISCONNECT timeout may or may not be an issue depending
1729 		 * on what transcribed in TCP layer, different targets behave
1730 		 * differently
1731 		 */
1732 		printk(KERN_ALERT "bnx2i (%s): - WARN - CONN_DISCON timed out, "
1733 				  "please submit GRC Dump, NW/PCIe trace, "
1734 				  "driver msgs to developers for analysis\n",
1735 				  hba->netdev->name);
1736 	}
1737 
1738 	ep->state = EP_STATE_CLEANUP_START;
1739 	timer_setup(&ep->ofld_timer, bnx2i_ep_ofld_timer, 0);
1740 	ep->ofld_timer.expires = hba->conn_ctx_destroy_tmo + jiffies;
1741 	add_timer(&ep->ofld_timer);
1742 
1743 	bnx2i_ep_destroy_list_add(hba, ep);
1744 
1745 	/* destroy iSCSI context, wait for it to complete */
1746 	if (bnx2i_send_conn_destroy(hba, ep))
1747 		ep->state = EP_STATE_CLEANUP_CMPL;
1748 
1749 	wait_event_interruptible(ep->ofld_wait,
1750 				 (ep->state != EP_STATE_CLEANUP_START));
1751 
1752 	if (signal_pending(current))
1753 		flush_signals(current);
1754 	del_timer_sync(&ep->ofld_timer);
1755 
1756 	bnx2i_ep_destroy_list_del(hba, ep);
1757 
1758 	if (ep->state != EP_STATE_CLEANUP_CMPL)
1759 		/* should never happen */
1760 		printk(KERN_ALERT "bnx2i - conn destroy failed\n");
1761 
1762 	return 0;
1763 }
1764 
1765 
1766 /**
1767  * bnx2i_ep_connect - establish TCP connection to target portal
1768  * @shost:		scsi host
1769  * @dst_addr:		target IP address
1770  * @non_blocking:	blocking or non-blocking call
1771  *
1772  * this routine initiates the TCP/IP connection by invoking Option-2 i/f
1773  *	with l5_core and the CNIC. This is a multi-step process of resolving
1774  *	route to target, create a iscsi connection context, handshaking with
1775  *	CNIC module to create/initialize the socket struct and finally
1776  *	sending down option-2 request to complete TCP 3-way handshake
1777  */
bnx2i_ep_connect(struct Scsi_Host * shost,struct sockaddr * dst_addr,int non_blocking)1778 static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
1779 					       struct sockaddr *dst_addr,
1780 					       int non_blocking)
1781 {
1782 	u32 iscsi_cid = BNX2I_CID_RESERVED;
1783 	struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
1784 	struct sockaddr_in6 *desti6;
1785 	struct bnx2i_endpoint *bnx2i_ep;
1786 	struct bnx2i_hba *hba;
1787 	struct cnic_dev *cnic;
1788 	struct cnic_sockaddr saddr;
1789 	struct iscsi_endpoint *ep;
1790 	int rc = 0;
1791 
1792 	if (shost) {
1793 		/* driver is given scsi host to work with */
1794 		hba = iscsi_host_priv(shost);
1795 	} else
1796 		/*
1797 		 * check if the given destination can be reached through
1798 		 * a iscsi capable NetXtreme2 device
1799 		 */
1800 		hba = bnx2i_check_route(dst_addr);
1801 
1802 	if (!hba) {
1803 		rc = -EINVAL;
1804 		goto nohba;
1805 	}
1806 	mutex_lock(&hba->net_dev_lock);
1807 
1808 	if (bnx2i_adapter_ready(hba) || !hba->cid_que.cid_free_cnt) {
1809 		rc = -EPERM;
1810 		goto check_busy;
1811 	}
1812 	cnic = hba->cnic;
1813 	ep = bnx2i_alloc_ep(hba);
1814 	if (!ep) {
1815 		rc = -ENOMEM;
1816 		goto check_busy;
1817 	}
1818 	bnx2i_ep = ep->dd_data;
1819 
1820 	atomic_set(&bnx2i_ep->num_active_cmds, 0);
1821 	iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
1822 	if (iscsi_cid == -1) {
1823 		printk(KERN_ALERT "bnx2i (%s): alloc_ep - unable to allocate "
1824 			"iscsi cid\n", hba->netdev->name);
1825 		rc = -ENOMEM;
1826 		bnx2i_free_ep(ep);
1827 		goto check_busy;
1828 	}
1829 	bnx2i_ep->hba_age = hba->age;
1830 
1831 	rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep);
1832 	if (rc != 0) {
1833 		printk(KERN_ALERT "bnx2i (%s): ep_conn - alloc QP resc error"
1834 			"\n", hba->netdev->name);
1835 		rc = -ENOMEM;
1836 		goto qp_resc_err;
1837 	}
1838 
1839 	bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid;
1840 	bnx2i_ep->state = EP_STATE_OFLD_START;
1841 	bnx2i_ep_ofld_list_add(hba, bnx2i_ep);
1842 
1843 	timer_setup(&bnx2i_ep->ofld_timer, bnx2i_ep_ofld_timer, 0);
1844 	bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies;
1845 	add_timer(&bnx2i_ep->ofld_timer);
1846 
1847 	if (bnx2i_send_conn_ofld_req(hba, bnx2i_ep)) {
1848 		if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) {
1849 			printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n",
1850 				hba->netdev->name, bnx2i_ep->ep_iscsi_cid);
1851 			rc = -EBUSY;
1852 		} else
1853 			rc = -ENOSPC;
1854 		printk(KERN_ALERT "bnx2i (%s): unable to send conn offld kwqe"
1855 			"\n", hba->netdev->name);
1856 		bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
1857 		goto conn_failed;
1858 	}
1859 
1860 	/* Wait for CNIC hardware to setup conn context and return 'cid' */
1861 	wait_event_interruptible(bnx2i_ep->ofld_wait,
1862 				 bnx2i_ep->state != EP_STATE_OFLD_START);
1863 
1864 	if (signal_pending(current))
1865 		flush_signals(current);
1866 	del_timer_sync(&bnx2i_ep->ofld_timer);
1867 
1868 	bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
1869 
1870 	if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) {
1871 		if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) {
1872 			printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n",
1873 				hba->netdev->name, bnx2i_ep->ep_iscsi_cid);
1874 			rc = -EBUSY;
1875 		} else
1876 			rc = -ENOSPC;
1877 		goto conn_failed;
1878 	}
1879 
1880 	rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid,
1881 			     iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep);
1882 	if (rc) {
1883 		rc = -EINVAL;
1884 		/* Need to terminate and cleanup the connection */
1885 		goto release_ep;
1886 	}
1887 
1888 	bnx2i_ep->cm_sk->rcv_buf = 256 * 1024;
1889 	bnx2i_ep->cm_sk->snd_buf = 256 * 1024;
1890 	clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags);
1891 
1892 	memset(&saddr, 0, sizeof(saddr));
1893 	if (dst_addr->sa_family == AF_INET) {
1894 		desti = (struct sockaddr_in *) dst_addr;
1895 		saddr.remote.v4 = *desti;
1896 		saddr.local.v4.sin_family = desti->sin_family;
1897 	} else if (dst_addr->sa_family == AF_INET6) {
1898 		desti6 = (struct sockaddr_in6 *) dst_addr;
1899 		saddr.remote.v6 = *desti6;
1900 		saddr.local.v6.sin6_family = desti6->sin6_family;
1901 	}
1902 
1903 	bnx2i_ep->timestamp = jiffies;
1904 	bnx2i_ep->state = EP_STATE_CONNECT_START;
1905 	if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
1906 		rc = -EINVAL;
1907 		goto conn_failed;
1908 	} else
1909 		rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr);
1910 	if (rc)
1911 		goto release_ep;
1912 
1913 	bnx2i_ep_active_list_add(hba, bnx2i_ep);
1914 
1915 	rc = bnx2i_map_ep_dbell_regs(bnx2i_ep);
1916 	if (rc)
1917 		goto del_active_ep;
1918 
1919 	mutex_unlock(&hba->net_dev_lock);
1920 	return ep;
1921 
1922 del_active_ep:
1923 	bnx2i_ep_active_list_del(hba, bnx2i_ep);
1924 release_ep:
1925 	if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
1926 		mutex_unlock(&hba->net_dev_lock);
1927 		return ERR_PTR(rc);
1928 	}
1929 conn_failed:
1930 	bnx2i_free_qp_resc(hba, bnx2i_ep);
1931 qp_resc_err:
1932 	bnx2i_free_ep(ep);
1933 check_busy:
1934 	mutex_unlock(&hba->net_dev_lock);
1935 nohba:
1936 	return ERR_PTR(rc);
1937 }
1938 
1939 
1940 /**
1941  * bnx2i_ep_poll - polls for TCP connection establishement
1942  * @ep:			TCP connection (endpoint) handle
1943  * @timeout_ms:		timeout value in milli secs
1944  *
1945  * polls for TCP connect request to complete
1946  */
bnx2i_ep_poll(struct iscsi_endpoint * ep,int timeout_ms)1947 static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
1948 {
1949 	struct bnx2i_endpoint *bnx2i_ep;
1950 	int rc = 0;
1951 
1952 	bnx2i_ep = ep->dd_data;
1953 	if ((bnx2i_ep->state == EP_STATE_IDLE) ||
1954 	    (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) ||
1955 	    (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
1956 		return -1;
1957 	if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL)
1958 		return 1;
1959 
1960 	rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait,
1961 					      ((bnx2i_ep->state ==
1962 						EP_STATE_OFLD_FAILED) ||
1963 					       (bnx2i_ep->state ==
1964 						EP_STATE_CONNECT_FAILED) ||
1965 					       (bnx2i_ep->state ==
1966 						EP_STATE_CONNECT_COMPL)),
1967 					      msecs_to_jiffies(timeout_ms));
1968 	if (bnx2i_ep->state == EP_STATE_OFLD_FAILED)
1969 		rc = -1;
1970 
1971 	if (rc > 0)
1972 		return 1;
1973 	else if (!rc)
1974 		return 0;	/* timeout */
1975 	else
1976 		return rc;
1977 }
1978 
1979 
1980 /**
1981  * bnx2i_ep_tcp_conn_active - check EP state transition
1982  * @bnx2i_ep:		endpoint pointer
1983  *
1984  * check if underlying TCP connection is active
1985  */
bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint * bnx2i_ep)1986 static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
1987 {
1988 	int ret;
1989 	int cnic_dev_10g = 0;
1990 
1991 	if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
1992 		cnic_dev_10g = 1;
1993 
1994 	switch (bnx2i_ep->state) {
1995 	case EP_STATE_CLEANUP_FAILED:
1996 	case EP_STATE_OFLD_FAILED:
1997 	case EP_STATE_DISCONN_TIMEDOUT:
1998 		ret = 0;
1999 		break;
2000 	case EP_STATE_CONNECT_START:
2001 	case EP_STATE_CONNECT_FAILED:
2002 	case EP_STATE_CONNECT_COMPL:
2003 	case EP_STATE_ULP_UPDATE_START:
2004 	case EP_STATE_ULP_UPDATE_COMPL:
2005 	case EP_STATE_TCP_FIN_RCVD:
2006 	case EP_STATE_LOGOUT_SENT:
2007 	case EP_STATE_LOGOUT_RESP_RCVD:
2008 	case EP_STATE_ULP_UPDATE_FAILED:
2009 		ret = 1;
2010 		break;
2011 	case EP_STATE_TCP_RST_RCVD:
2012 		if (cnic_dev_10g)
2013 			ret = 0;
2014 		else
2015 			ret = 1;
2016 		break;
2017 	default:
2018 		ret = 0;
2019 	}
2020 
2021 	return ret;
2022 }
2023 
2024 
2025 /**
2026  * bnx2i_hw_ep_disconnect - executes TCP connection teardown process in the hw
2027  * @bnx2i_ep:		TCP connection (bnx2i endpoint) handle
2028  *
2029  * executes  TCP connection teardown process
2030  */
bnx2i_hw_ep_disconnect(struct bnx2i_endpoint * bnx2i_ep)2031 int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
2032 {
2033 	struct bnx2i_hba *hba = bnx2i_ep->hba;
2034 	struct cnic_dev *cnic;
2035 	struct iscsi_session *session = NULL;
2036 	struct iscsi_conn *conn = NULL;
2037 	int ret = 0;
2038 	int close = 0;
2039 	int close_ret = 0;
2040 
2041 	if (!hba)
2042 		return 0;
2043 
2044 	cnic = hba->cnic;
2045 	if (!cnic)
2046 		return 0;
2047 
2048 	if (bnx2i_ep->state == EP_STATE_IDLE ||
2049 	    bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT)
2050 		return 0;
2051 
2052 	if (!bnx2i_ep_tcp_conn_active(bnx2i_ep))
2053 		goto destroy_conn;
2054 
2055 	if (bnx2i_ep->conn) {
2056 		conn = bnx2i_ep->conn->cls_conn->dd_data;
2057 		session = conn->session;
2058 	}
2059 
2060 	timer_setup(&bnx2i_ep->ofld_timer, bnx2i_ep_ofld_timer, 0);
2061 	bnx2i_ep->ofld_timer.expires = hba->conn_teardown_tmo + jiffies;
2062 	add_timer(&bnx2i_ep->ofld_timer);
2063 
2064 	if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic))
2065 		goto out;
2066 
2067 	if (session) {
2068 		spin_lock_bh(&session->frwd_lock);
2069 		if (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD) {
2070 			if (session->state == ISCSI_STATE_LOGGING_OUT) {
2071 				if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) {
2072 					/* Logout sent, but no resp */
2073 					printk(KERN_ALERT "bnx2i (%s): WARNING"
2074 						" logout response was not "
2075 						"received!\n",
2076 						bnx2i_ep->hba->netdev->name);
2077 				} else if (bnx2i_ep->state ==
2078 					   EP_STATE_LOGOUT_RESP_RCVD)
2079 					close = 1;
2080 			}
2081 		} else
2082 			close = 1;
2083 
2084 		spin_unlock_bh(&session->frwd_lock);
2085 	}
2086 
2087 	bnx2i_ep->state = EP_STATE_DISCONN_START;
2088 
2089 	if (close)
2090 		close_ret = cnic->cm_close(bnx2i_ep->cm_sk);
2091 	else
2092 		close_ret = cnic->cm_abort(bnx2i_ep->cm_sk);
2093 
2094 	if (close_ret)
2095 		printk(KERN_ALERT "bnx2i (%s): close/abort(%d) returned %d\n",
2096 			bnx2i_ep->hba->netdev->name, close, close_ret);
2097 	else
2098 		/* wait for option-2 conn teardown */
2099 		wait_event_interruptible(bnx2i_ep->ofld_wait,
2100 				((bnx2i_ep->state != EP_STATE_DISCONN_START)
2101 				&& (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD)));
2102 
2103 	if (signal_pending(current))
2104 		flush_signals(current);
2105 	del_timer_sync(&bnx2i_ep->ofld_timer);
2106 
2107 destroy_conn:
2108 	bnx2i_ep_active_list_del(hba, bnx2i_ep);
2109 	if (bnx2i_tear_down_conn(hba, bnx2i_ep))
2110 		return -EINVAL;
2111 out:
2112 	bnx2i_ep->state = EP_STATE_IDLE;
2113 	return ret;
2114 }
2115 
2116 
2117 /**
2118  * bnx2i_ep_disconnect - executes TCP connection teardown process
2119  * @ep:		TCP connection (iscsi endpoint) handle
2120  *
2121  * executes  TCP connection teardown process
2122  */
bnx2i_ep_disconnect(struct iscsi_endpoint * ep)2123 static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
2124 {
2125 	struct bnx2i_endpoint *bnx2i_ep;
2126 	struct bnx2i_conn *bnx2i_conn = NULL;
2127 	struct iscsi_conn *conn = NULL;
2128 	struct bnx2i_hba *hba;
2129 
2130 	bnx2i_ep = ep->dd_data;
2131 
2132 	/* driver should not attempt connection cleanup until TCP_CONNECT
2133 	 * completes either successfully or fails. Timeout is 9-secs, so
2134 	 * wait for it to complete
2135 	 */
2136 	while ((bnx2i_ep->state == EP_STATE_CONNECT_START) &&
2137 		!time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ)))
2138 		msleep(250);
2139 
2140 	if (bnx2i_ep->conn) {
2141 		bnx2i_conn = bnx2i_ep->conn;
2142 		conn = bnx2i_conn->cls_conn->dd_data;
2143 		iscsi_suspend_queue(conn);
2144 	}
2145 	hba = bnx2i_ep->hba;
2146 
2147 	mutex_lock(&hba->net_dev_lock);
2148 
2149 	if (bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT)
2150 		goto out;
2151 
2152 	if (bnx2i_ep->state == EP_STATE_IDLE)
2153 		goto free_resc;
2154 
2155 	if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
2156 	    (bnx2i_ep->hba_age != hba->age)) {
2157 		bnx2i_ep_active_list_del(hba, bnx2i_ep);
2158 		goto free_resc;
2159 	}
2160 
2161 	/* Do all chip cleanup here */
2162 	if (bnx2i_hw_ep_disconnect(bnx2i_ep)) {
2163 		mutex_unlock(&hba->net_dev_lock);
2164 		return;
2165 	}
2166 free_resc:
2167 	bnx2i_free_qp_resc(hba, bnx2i_ep);
2168 
2169 	if (bnx2i_conn)
2170 		bnx2i_conn->ep = NULL;
2171 
2172 	bnx2i_free_ep(ep);
2173 out:
2174 	mutex_unlock(&hba->net_dev_lock);
2175 
2176 	wake_up_interruptible(&hba->eh_wait);
2177 }
2178 
2179 
2180 /**
2181  * bnx2i_nl_set_path - ISCSI_UEVENT_PATH_UPDATE user message handler
2182  * @shost:	scsi host pointer
2183  * @params:	pointer to buffer containing iscsi path message
2184  */
bnx2i_nl_set_path(struct Scsi_Host * shost,struct iscsi_path * params)2185 static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params)
2186 {
2187 	struct bnx2i_hba *hba = iscsi_host_priv(shost);
2188 	char *buf = (char *) params;
2189 	u16 len = sizeof(*params);
2190 
2191 	/* handled by cnic driver */
2192 	hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf,
2193 				     len);
2194 
2195 	return 0;
2196 }
2197 
bnx2i_attr_is_visible(int param_type,int param)2198 static umode_t bnx2i_attr_is_visible(int param_type, int param)
2199 {
2200 	switch (param_type) {
2201 	case ISCSI_HOST_PARAM:
2202 		switch (param) {
2203 		case ISCSI_HOST_PARAM_NETDEV_NAME:
2204 		case ISCSI_HOST_PARAM_HWADDRESS:
2205 		case ISCSI_HOST_PARAM_IPADDRESS:
2206 			return S_IRUGO;
2207 		default:
2208 			return 0;
2209 		}
2210 	case ISCSI_PARAM:
2211 		switch (param) {
2212 		case ISCSI_PARAM_MAX_RECV_DLENGTH:
2213 		case ISCSI_PARAM_MAX_XMIT_DLENGTH:
2214 		case ISCSI_PARAM_HDRDGST_EN:
2215 		case ISCSI_PARAM_DATADGST_EN:
2216 		case ISCSI_PARAM_CONN_ADDRESS:
2217 		case ISCSI_PARAM_CONN_PORT:
2218 		case ISCSI_PARAM_EXP_STATSN:
2219 		case ISCSI_PARAM_PERSISTENT_ADDRESS:
2220 		case ISCSI_PARAM_PERSISTENT_PORT:
2221 		case ISCSI_PARAM_PING_TMO:
2222 		case ISCSI_PARAM_RECV_TMO:
2223 		case ISCSI_PARAM_INITIAL_R2T_EN:
2224 		case ISCSI_PARAM_MAX_R2T:
2225 		case ISCSI_PARAM_IMM_DATA_EN:
2226 		case ISCSI_PARAM_FIRST_BURST:
2227 		case ISCSI_PARAM_MAX_BURST:
2228 		case ISCSI_PARAM_PDU_INORDER_EN:
2229 		case ISCSI_PARAM_DATASEQ_INORDER_EN:
2230 		case ISCSI_PARAM_ERL:
2231 		case ISCSI_PARAM_TARGET_NAME:
2232 		case ISCSI_PARAM_TPGT:
2233 		case ISCSI_PARAM_USERNAME:
2234 		case ISCSI_PARAM_PASSWORD:
2235 		case ISCSI_PARAM_USERNAME_IN:
2236 		case ISCSI_PARAM_PASSWORD_IN:
2237 		case ISCSI_PARAM_FAST_ABORT:
2238 		case ISCSI_PARAM_ABORT_TMO:
2239 		case ISCSI_PARAM_LU_RESET_TMO:
2240 		case ISCSI_PARAM_TGT_RESET_TMO:
2241 		case ISCSI_PARAM_IFACE_NAME:
2242 		case ISCSI_PARAM_INITIATOR_NAME:
2243 		case ISCSI_PARAM_BOOT_ROOT:
2244 		case ISCSI_PARAM_BOOT_NIC:
2245 		case ISCSI_PARAM_BOOT_TARGET:
2246 			return S_IRUGO;
2247 		default:
2248 			return 0;
2249 		}
2250 	}
2251 
2252 	return 0;
2253 }
2254 
2255 /*
2256  * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template
2257  * used while registering with the scsi host and iSCSI transport module.
2258  */
2259 static struct scsi_host_template bnx2i_host_template = {
2260 	.module			= THIS_MODULE,
2261 	.name			= "QLogic Offload iSCSI Initiator",
2262 	.proc_name		= "bnx2i",
2263 	.queuecommand		= iscsi_queuecommand,
2264 	.eh_timed_out		= iscsi_eh_cmd_timed_out,
2265 	.eh_abort_handler	= iscsi_eh_abort,
2266 	.eh_device_reset_handler = iscsi_eh_device_reset,
2267 	.eh_target_reset_handler = iscsi_eh_recover_target,
2268 	.change_queue_depth	= scsi_change_queue_depth,
2269 	.target_alloc		= iscsi_target_alloc,
2270 	.can_queue		= 2048,
2271 	.max_sectors		= 127,
2272 	.cmd_per_lun		= 128,
2273 	.this_id		= -1,
2274 	.sg_tablesize		= ISCSI_MAX_BDS_PER_CMD,
2275 	.shost_attrs		= bnx2i_dev_attributes,
2276 	.track_queue_depth	= 1,
2277 };
2278 
2279 struct iscsi_transport bnx2i_iscsi_transport = {
2280 	.owner			= THIS_MODULE,
2281 	.name			= "bnx2i",
2282 	.caps			= CAP_RECOVERY_L0 | CAP_HDRDGST |
2283 				  CAP_MULTI_R2T | CAP_DATADGST |
2284 				  CAP_DATA_PATH_OFFLOAD |
2285 				  CAP_TEXT_NEGO,
2286 	.create_session		= bnx2i_session_create,
2287 	.destroy_session	= bnx2i_session_destroy,
2288 	.create_conn		= bnx2i_conn_create,
2289 	.bind_conn		= bnx2i_conn_bind,
2290 	.unbind_conn		= iscsi_conn_unbind,
2291 	.destroy_conn		= bnx2i_conn_destroy,
2292 	.attr_is_visible	= bnx2i_attr_is_visible,
2293 	.set_param		= iscsi_set_param,
2294 	.get_conn_param		= iscsi_conn_get_param,
2295 	.get_session_param	= iscsi_session_get_param,
2296 	.get_host_param		= bnx2i_host_get_param,
2297 	.start_conn		= bnx2i_conn_start,
2298 	.stop_conn		= iscsi_conn_stop,
2299 	.send_pdu		= iscsi_conn_send_pdu,
2300 	.xmit_task		= bnx2i_task_xmit,
2301 	.get_stats		= bnx2i_conn_get_stats,
2302 	/* TCP connect - disconnect - option-2 interface calls */
2303 	.get_ep_param		= bnx2i_ep_get_param,
2304 	.ep_connect		= bnx2i_ep_connect,
2305 	.ep_poll		= bnx2i_ep_poll,
2306 	.ep_disconnect		= bnx2i_ep_disconnect,
2307 	.set_path		= bnx2i_nl_set_path,
2308 	/* Error recovery timeout call */
2309 	.session_recovery_timedout = iscsi_session_recovery_timedout,
2310 	.cleanup_task		= bnx2i_cleanup_task,
2311 };
2312