xref: /OK3568_Linux_fs/kernel/drivers/scsi/cxgbi/libcxgbi.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (c) 2010-2015 Chelsio Communications, Inc.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or modify
7*4882a593Smuzhiyun  * it under the terms of the GNU General Public License as published by
8*4882a593Smuzhiyun  * the Free Software Foundation.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * Written by: Karen Xie (kxie@chelsio.com)
11*4882a593Smuzhiyun  * Written by: Rakesh Ranjan (rranjan@chelsio.com)
12*4882a593Smuzhiyun  */
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #define pr_fmt(fmt)	KBUILD_MODNAME ":%s: " fmt, __func__
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <linux/skbuff.h>
17*4882a593Smuzhiyun #include <linux/crypto.h>
18*4882a593Smuzhiyun #include <linux/scatterlist.h>
19*4882a593Smuzhiyun #include <linux/pci.h>
20*4882a593Smuzhiyun #include <scsi/scsi.h>
21*4882a593Smuzhiyun #include <scsi/scsi_cmnd.h>
22*4882a593Smuzhiyun #include <scsi/scsi_host.h>
23*4882a593Smuzhiyun #include <linux/if_vlan.h>
24*4882a593Smuzhiyun #include <linux/inet.h>
25*4882a593Smuzhiyun #include <net/dst.h>
26*4882a593Smuzhiyun #include <net/route.h>
27*4882a593Smuzhiyun #include <net/ipv6.h>
28*4882a593Smuzhiyun #include <net/ip6_route.h>
29*4882a593Smuzhiyun #include <net/addrconf.h>
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #include <linux/inetdevice.h>	/* ip_dev_find */
32*4882a593Smuzhiyun #include <linux/module.h>
33*4882a593Smuzhiyun #include <net/tcp.h>
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun static unsigned int dbg_level;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #include "libcxgbi.h"
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #define DRV_MODULE_NAME		"libcxgbi"
40*4882a593Smuzhiyun #define DRV_MODULE_DESC		"Chelsio iSCSI driver library"
41*4882a593Smuzhiyun #define DRV_MODULE_VERSION	"0.9.1-ko"
42*4882a593Smuzhiyun #define DRV_MODULE_RELDATE	"Apr. 2015"
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun static char version[] =
45*4882a593Smuzhiyun 	DRV_MODULE_DESC " " DRV_MODULE_NAME
46*4882a593Smuzhiyun 	" v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun MODULE_AUTHOR("Chelsio Communications, Inc.");
49*4882a593Smuzhiyun MODULE_DESCRIPTION(DRV_MODULE_DESC);
50*4882a593Smuzhiyun MODULE_VERSION(DRV_MODULE_VERSION);
51*4882a593Smuzhiyun MODULE_LICENSE("GPL");
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun module_param(dbg_level, uint, 0644);
54*4882a593Smuzhiyun MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)");
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /*
58*4882a593Smuzhiyun  * cxgbi device management
59*4882a593Smuzhiyun  * maintains a list of the cxgbi devices
60*4882a593Smuzhiyun  */
61*4882a593Smuzhiyun static LIST_HEAD(cdev_list);
62*4882a593Smuzhiyun static DEFINE_MUTEX(cdev_mutex);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun static LIST_HEAD(cdev_rcu_list);
65*4882a593Smuzhiyun static DEFINE_SPINLOCK(cdev_rcu_lock);
66*4882a593Smuzhiyun 
cxgbi_decode_sw_tag(u32 sw_tag,int * idx,int * age)67*4882a593Smuzhiyun static inline void cxgbi_decode_sw_tag(u32 sw_tag, int *idx, int *age)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun 	if (age)
70*4882a593Smuzhiyun 		*age = sw_tag & 0x7FFF;
71*4882a593Smuzhiyun 	if (idx)
72*4882a593Smuzhiyun 		*idx = (sw_tag >> 16) & 0x7FFF;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
cxgbi_device_portmap_create(struct cxgbi_device * cdev,unsigned int base,unsigned int max_conn)75*4882a593Smuzhiyun int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base,
76*4882a593Smuzhiyun 				unsigned int max_conn)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	struct cxgbi_ports_map *pmap = &cdev->pmap;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	pmap->port_csk = kvzalloc(array_size(max_conn,
81*4882a593Smuzhiyun 					     sizeof(struct cxgbi_sock *)),
82*4882a593Smuzhiyun 				  GFP_KERNEL | __GFP_NOWARN);
83*4882a593Smuzhiyun 	if (!pmap->port_csk) {
84*4882a593Smuzhiyun 		pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev, max_conn);
85*4882a593Smuzhiyun 		return -ENOMEM;
86*4882a593Smuzhiyun 	}
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	pmap->max_connect = max_conn;
89*4882a593Smuzhiyun 	pmap->sport_base = base;
90*4882a593Smuzhiyun 	spin_lock_init(&pmap->lock);
91*4882a593Smuzhiyun 	return 0;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_device_portmap_create);
94*4882a593Smuzhiyun 
cxgbi_device_portmap_cleanup(struct cxgbi_device * cdev)95*4882a593Smuzhiyun void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	struct cxgbi_ports_map *pmap = &cdev->pmap;
98*4882a593Smuzhiyun 	struct cxgbi_sock *csk;
99*4882a593Smuzhiyun 	int i;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	for (i = 0; i < pmap->max_connect; i++) {
102*4882a593Smuzhiyun 		if (pmap->port_csk[i]) {
103*4882a593Smuzhiyun 			csk = pmap->port_csk[i];
104*4882a593Smuzhiyun 			pmap->port_csk[i] = NULL;
105*4882a593Smuzhiyun 			log_debug(1 << CXGBI_DBG_SOCK,
106*4882a593Smuzhiyun 				"csk 0x%p, cdev 0x%p, offload down.\n",
107*4882a593Smuzhiyun 				csk, cdev);
108*4882a593Smuzhiyun 			spin_lock_bh(&csk->lock);
109*4882a593Smuzhiyun 			cxgbi_sock_set_flag(csk, CTPF_OFFLOAD_DOWN);
110*4882a593Smuzhiyun 			cxgbi_sock_closed(csk);
111*4882a593Smuzhiyun 			spin_unlock_bh(&csk->lock);
112*4882a593Smuzhiyun 			cxgbi_sock_put(csk);
113*4882a593Smuzhiyun 		}
114*4882a593Smuzhiyun 	}
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_device_portmap_cleanup);
117*4882a593Smuzhiyun 
cxgbi_device_destroy(struct cxgbi_device * cdev)118*4882a593Smuzhiyun static inline void cxgbi_device_destroy(struct cxgbi_device *cdev)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_DEV,
121*4882a593Smuzhiyun 		"cdev 0x%p, p# %u.\n", cdev, cdev->nports);
122*4882a593Smuzhiyun 	cxgbi_hbas_remove(cdev);
123*4882a593Smuzhiyun 	cxgbi_device_portmap_cleanup(cdev);
124*4882a593Smuzhiyun 	if (cdev->cdev2ppm)
125*4882a593Smuzhiyun 		cxgbi_ppm_release(cdev->cdev2ppm(cdev));
126*4882a593Smuzhiyun 	if (cdev->pmap.max_connect)
127*4882a593Smuzhiyun 		kvfree(cdev->pmap.port_csk);
128*4882a593Smuzhiyun 	kfree(cdev);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
cxgbi_device_register(unsigned int extra,unsigned int nports)131*4882a593Smuzhiyun struct cxgbi_device *cxgbi_device_register(unsigned int extra,
132*4882a593Smuzhiyun 					   unsigned int nports)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	struct cxgbi_device *cdev;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	cdev = kzalloc(sizeof(*cdev) + extra + nports *
137*4882a593Smuzhiyun 			(sizeof(struct cxgbi_hba *) +
138*4882a593Smuzhiyun 			 sizeof(struct net_device *)),
139*4882a593Smuzhiyun 			GFP_KERNEL);
140*4882a593Smuzhiyun 	if (!cdev) {
141*4882a593Smuzhiyun 		pr_warn("nport %d, OOM.\n", nports);
142*4882a593Smuzhiyun 		return NULL;
143*4882a593Smuzhiyun 	}
144*4882a593Smuzhiyun 	cdev->ports = (struct net_device **)(cdev + 1);
145*4882a593Smuzhiyun 	cdev->hbas = (struct cxgbi_hba **)(((char*)cdev->ports) + nports *
146*4882a593Smuzhiyun 						sizeof(struct net_device *));
147*4882a593Smuzhiyun 	if (extra)
148*4882a593Smuzhiyun 		cdev->dd_data = ((char *)cdev->hbas) +
149*4882a593Smuzhiyun 				nports * sizeof(struct cxgbi_hba *);
150*4882a593Smuzhiyun 	spin_lock_init(&cdev->pmap.lock);
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	mutex_lock(&cdev_mutex);
153*4882a593Smuzhiyun 	list_add_tail(&cdev->list_head, &cdev_list);
154*4882a593Smuzhiyun 	mutex_unlock(&cdev_mutex);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	spin_lock(&cdev_rcu_lock);
157*4882a593Smuzhiyun 	list_add_tail_rcu(&cdev->rcu_node, &cdev_rcu_list);
158*4882a593Smuzhiyun 	spin_unlock(&cdev_rcu_lock);
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_DEV,
161*4882a593Smuzhiyun 		"cdev 0x%p, p# %u.\n", cdev, nports);
162*4882a593Smuzhiyun 	return cdev;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_device_register);
165*4882a593Smuzhiyun 
cxgbi_device_unregister(struct cxgbi_device * cdev)166*4882a593Smuzhiyun void cxgbi_device_unregister(struct cxgbi_device *cdev)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_DEV,
169*4882a593Smuzhiyun 		"cdev 0x%p, p# %u,%s.\n",
170*4882a593Smuzhiyun 		cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : "");
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	mutex_lock(&cdev_mutex);
173*4882a593Smuzhiyun 	list_del(&cdev->list_head);
174*4882a593Smuzhiyun 	mutex_unlock(&cdev_mutex);
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	spin_lock(&cdev_rcu_lock);
177*4882a593Smuzhiyun 	list_del_rcu(&cdev->rcu_node);
178*4882a593Smuzhiyun 	spin_unlock(&cdev_rcu_lock);
179*4882a593Smuzhiyun 	synchronize_rcu();
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	cxgbi_device_destroy(cdev);
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_device_unregister);
184*4882a593Smuzhiyun 
cxgbi_device_unregister_all(unsigned int flag)185*4882a593Smuzhiyun void cxgbi_device_unregister_all(unsigned int flag)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	struct cxgbi_device *cdev, *tmp;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	mutex_lock(&cdev_mutex);
190*4882a593Smuzhiyun 	list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
191*4882a593Smuzhiyun 		if ((cdev->flags & flag) == flag) {
192*4882a593Smuzhiyun 			mutex_unlock(&cdev_mutex);
193*4882a593Smuzhiyun 			cxgbi_device_unregister(cdev);
194*4882a593Smuzhiyun 			mutex_lock(&cdev_mutex);
195*4882a593Smuzhiyun 		}
196*4882a593Smuzhiyun 	}
197*4882a593Smuzhiyun 	mutex_unlock(&cdev_mutex);
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_device_unregister_all);
200*4882a593Smuzhiyun 
cxgbi_device_find_by_lldev(void * lldev)201*4882a593Smuzhiyun struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun 	struct cxgbi_device *cdev, *tmp;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	mutex_lock(&cdev_mutex);
206*4882a593Smuzhiyun 	list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
207*4882a593Smuzhiyun 		if (cdev->lldev == lldev) {
208*4882a593Smuzhiyun 			mutex_unlock(&cdev_mutex);
209*4882a593Smuzhiyun 			return cdev;
210*4882a593Smuzhiyun 		}
211*4882a593Smuzhiyun 	}
212*4882a593Smuzhiyun 	mutex_unlock(&cdev_mutex);
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_DEV,
215*4882a593Smuzhiyun 		"lldev 0x%p, NO match found.\n", lldev);
216*4882a593Smuzhiyun 	return NULL;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev);
219*4882a593Smuzhiyun 
cxgbi_device_find_by_netdev(struct net_device * ndev,int * port)220*4882a593Smuzhiyun struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev,
221*4882a593Smuzhiyun 						 int *port)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	struct net_device *vdev = NULL;
224*4882a593Smuzhiyun 	struct cxgbi_device *cdev, *tmp;
225*4882a593Smuzhiyun 	int i;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	if (is_vlan_dev(ndev)) {
228*4882a593Smuzhiyun 		vdev = ndev;
229*4882a593Smuzhiyun 		ndev = vlan_dev_real_dev(ndev);
230*4882a593Smuzhiyun 		log_debug(1 << CXGBI_DBG_DEV,
231*4882a593Smuzhiyun 			"vlan dev %s -> %s.\n", vdev->name, ndev->name);
232*4882a593Smuzhiyun 	}
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	mutex_lock(&cdev_mutex);
235*4882a593Smuzhiyun 	list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
236*4882a593Smuzhiyun 		for (i = 0; i < cdev->nports; i++) {
237*4882a593Smuzhiyun 			if (ndev == cdev->ports[i]) {
238*4882a593Smuzhiyun 				cdev->hbas[i]->vdev = vdev;
239*4882a593Smuzhiyun 				mutex_unlock(&cdev_mutex);
240*4882a593Smuzhiyun 				if (port)
241*4882a593Smuzhiyun 					*port = i;
242*4882a593Smuzhiyun 				return cdev;
243*4882a593Smuzhiyun 			}
244*4882a593Smuzhiyun 		}
245*4882a593Smuzhiyun 	}
246*4882a593Smuzhiyun 	mutex_unlock(&cdev_mutex);
247*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_DEV,
248*4882a593Smuzhiyun 		"ndev 0x%p, %s, NO match found.\n", ndev, ndev->name);
249*4882a593Smuzhiyun 	return NULL;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev);
252*4882a593Smuzhiyun 
cxgbi_device_find_by_netdev_rcu(struct net_device * ndev,int * port)253*4882a593Smuzhiyun struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *ndev,
254*4882a593Smuzhiyun 						     int *port)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun 	struct net_device *vdev = NULL;
257*4882a593Smuzhiyun 	struct cxgbi_device *cdev;
258*4882a593Smuzhiyun 	int i;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	if (is_vlan_dev(ndev)) {
261*4882a593Smuzhiyun 		vdev = ndev;
262*4882a593Smuzhiyun 		ndev = vlan_dev_real_dev(ndev);
263*4882a593Smuzhiyun 		pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name);
264*4882a593Smuzhiyun 	}
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	rcu_read_lock();
267*4882a593Smuzhiyun 	list_for_each_entry_rcu(cdev, &cdev_rcu_list, rcu_node) {
268*4882a593Smuzhiyun 		for (i = 0; i < cdev->nports; i++) {
269*4882a593Smuzhiyun 			if (ndev == cdev->ports[i]) {
270*4882a593Smuzhiyun 				cdev->hbas[i]->vdev = vdev;
271*4882a593Smuzhiyun 				rcu_read_unlock();
272*4882a593Smuzhiyun 				if (port)
273*4882a593Smuzhiyun 					*port = i;
274*4882a593Smuzhiyun 				return cdev;
275*4882a593Smuzhiyun 			}
276*4882a593Smuzhiyun 		}
277*4882a593Smuzhiyun 	}
278*4882a593Smuzhiyun 	rcu_read_unlock();
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_DEV,
281*4882a593Smuzhiyun 		  "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name);
282*4882a593Smuzhiyun 	return NULL;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu);
285*4882a593Smuzhiyun 
cxgbi_device_find_by_mac(struct net_device * ndev,int * port)286*4882a593Smuzhiyun static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev,
287*4882a593Smuzhiyun 						     int *port)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun 	struct net_device *vdev = NULL;
290*4882a593Smuzhiyun 	struct cxgbi_device *cdev, *tmp;
291*4882a593Smuzhiyun 	int i;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	if (is_vlan_dev(ndev)) {
294*4882a593Smuzhiyun 		vdev = ndev;
295*4882a593Smuzhiyun 		ndev = vlan_dev_real_dev(ndev);
296*4882a593Smuzhiyun 		pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name);
297*4882a593Smuzhiyun 	}
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	mutex_lock(&cdev_mutex);
300*4882a593Smuzhiyun 	list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
301*4882a593Smuzhiyun 		for (i = 0; i < cdev->nports; i++) {
302*4882a593Smuzhiyun 			if (!memcmp(ndev->dev_addr, cdev->ports[i]->dev_addr,
303*4882a593Smuzhiyun 				    MAX_ADDR_LEN)) {
304*4882a593Smuzhiyun 				cdev->hbas[i]->vdev = vdev;
305*4882a593Smuzhiyun 				mutex_unlock(&cdev_mutex);
306*4882a593Smuzhiyun 				if (port)
307*4882a593Smuzhiyun 					*port = i;
308*4882a593Smuzhiyun 				return cdev;
309*4882a593Smuzhiyun 			}
310*4882a593Smuzhiyun 		}
311*4882a593Smuzhiyun 	}
312*4882a593Smuzhiyun 	mutex_unlock(&cdev_mutex);
313*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_DEV,
314*4882a593Smuzhiyun 		  "ndev 0x%p, %s, NO match mac found.\n",
315*4882a593Smuzhiyun 		  ndev, ndev->name);
316*4882a593Smuzhiyun 	return NULL;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun 
cxgbi_hbas_remove(struct cxgbi_device * cdev)319*4882a593Smuzhiyun void cxgbi_hbas_remove(struct cxgbi_device *cdev)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun 	int i;
322*4882a593Smuzhiyun 	struct cxgbi_hba *chba;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_DEV,
325*4882a593Smuzhiyun 		"cdev 0x%p, p#%u.\n", cdev, cdev->nports);
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	for (i = 0; i < cdev->nports; i++) {
328*4882a593Smuzhiyun 		chba = cdev->hbas[i];
329*4882a593Smuzhiyun 		if (chba) {
330*4882a593Smuzhiyun 			cdev->hbas[i] = NULL;
331*4882a593Smuzhiyun 			iscsi_host_remove(chba->shost);
332*4882a593Smuzhiyun 			pci_dev_put(cdev->pdev);
333*4882a593Smuzhiyun 			iscsi_host_free(chba->shost);
334*4882a593Smuzhiyun 		}
335*4882a593Smuzhiyun 	}
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_hbas_remove);
338*4882a593Smuzhiyun 
cxgbi_hbas_add(struct cxgbi_device * cdev,u64 max_lun,unsigned int max_conns,struct scsi_host_template * sht,struct scsi_transport_template * stt)339*4882a593Smuzhiyun int cxgbi_hbas_add(struct cxgbi_device *cdev, u64 max_lun,
340*4882a593Smuzhiyun 		unsigned int max_conns, struct scsi_host_template *sht,
341*4882a593Smuzhiyun 		struct scsi_transport_template *stt)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun 	struct cxgbi_hba *chba;
344*4882a593Smuzhiyun 	struct Scsi_Host *shost;
345*4882a593Smuzhiyun 	int i, err;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p#%u.\n", cdev, cdev->nports);
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	for (i = 0; i < cdev->nports; i++) {
350*4882a593Smuzhiyun 		shost = iscsi_host_alloc(sht, sizeof(*chba), 1);
351*4882a593Smuzhiyun 		if (!shost) {
352*4882a593Smuzhiyun 			pr_info("0x%p, p%d, %s, host alloc failed.\n",
353*4882a593Smuzhiyun 				cdev, i, cdev->ports[i]->name);
354*4882a593Smuzhiyun 			err = -ENOMEM;
355*4882a593Smuzhiyun 			goto err_out;
356*4882a593Smuzhiyun 		}
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 		shost->transportt = stt;
359*4882a593Smuzhiyun 		shost->max_lun = max_lun;
360*4882a593Smuzhiyun 		shost->max_id = max_conns - 1;
361*4882a593Smuzhiyun 		shost->max_channel = 0;
362*4882a593Smuzhiyun 		shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 		chba = iscsi_host_priv(shost);
365*4882a593Smuzhiyun 		chba->cdev = cdev;
366*4882a593Smuzhiyun 		chba->ndev = cdev->ports[i];
367*4882a593Smuzhiyun 		chba->shost = shost;
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 		shost->can_queue = sht->can_queue - ISCSI_MGMT_CMDS_MAX;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 		log_debug(1 << CXGBI_DBG_DEV,
372*4882a593Smuzhiyun 			"cdev 0x%p, p#%d %s: chba 0x%p.\n",
373*4882a593Smuzhiyun 			cdev, i, cdev->ports[i]->name, chba);
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 		pci_dev_get(cdev->pdev);
376*4882a593Smuzhiyun 		err = iscsi_host_add(shost, &cdev->pdev->dev);
377*4882a593Smuzhiyun 		if (err) {
378*4882a593Smuzhiyun 			pr_info("cdev 0x%p, p#%d %s, host add failed.\n",
379*4882a593Smuzhiyun 				cdev, i, cdev->ports[i]->name);
380*4882a593Smuzhiyun 			pci_dev_put(cdev->pdev);
381*4882a593Smuzhiyun 			scsi_host_put(shost);
382*4882a593Smuzhiyun 			goto  err_out;
383*4882a593Smuzhiyun 		}
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 		cdev->hbas[i] = chba;
386*4882a593Smuzhiyun 	}
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	return 0;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun err_out:
391*4882a593Smuzhiyun 	cxgbi_hbas_remove(cdev);
392*4882a593Smuzhiyun 	return err;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_hbas_add);
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun /*
397*4882a593Smuzhiyun  * iSCSI offload
398*4882a593Smuzhiyun  *
399*4882a593Smuzhiyun  * - source port management
400*4882a593Smuzhiyun  *   To find a free source port in the port allocation map we use a very simple
401*4882a593Smuzhiyun  *   rotor scheme to look for the next free port.
402*4882a593Smuzhiyun  *
403*4882a593Smuzhiyun  *   If a source port has been specified make sure that it doesn't collide with
404*4882a593Smuzhiyun  *   our normal source port allocation map.  If it's outside the range of our
405*4882a593Smuzhiyun  *   allocation/deallocation scheme just let them use it.
406*4882a593Smuzhiyun  *
407*4882a593Smuzhiyun  *   If the source port is outside our allocation range, the caller is
408*4882a593Smuzhiyun  *   responsible for keeping track of their port usage.
409*4882a593Smuzhiyun  */
410*4882a593Smuzhiyun 
find_sock_on_port(struct cxgbi_device * cdev,unsigned char port_id)411*4882a593Smuzhiyun static struct cxgbi_sock *find_sock_on_port(struct cxgbi_device *cdev,
412*4882a593Smuzhiyun 					    unsigned char port_id)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun 	struct cxgbi_ports_map *pmap = &cdev->pmap;
415*4882a593Smuzhiyun 	unsigned int i;
416*4882a593Smuzhiyun 	unsigned int used;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	if (!pmap->max_connect || !pmap->used)
419*4882a593Smuzhiyun 		return NULL;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	spin_lock_bh(&pmap->lock);
422*4882a593Smuzhiyun 	used = pmap->used;
423*4882a593Smuzhiyun 	for (i = 0; used && i < pmap->max_connect; i++) {
424*4882a593Smuzhiyun 		struct cxgbi_sock *csk = pmap->port_csk[i];
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 		if (csk) {
427*4882a593Smuzhiyun 			if (csk->port_id == port_id) {
428*4882a593Smuzhiyun 				spin_unlock_bh(&pmap->lock);
429*4882a593Smuzhiyun 				return csk;
430*4882a593Smuzhiyun 			}
431*4882a593Smuzhiyun 			used--;
432*4882a593Smuzhiyun 		}
433*4882a593Smuzhiyun 	}
434*4882a593Smuzhiyun 	spin_unlock_bh(&pmap->lock);
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	return NULL;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun 
sock_get_port(struct cxgbi_sock * csk)439*4882a593Smuzhiyun static int sock_get_port(struct cxgbi_sock *csk)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun 	struct cxgbi_device *cdev = csk->cdev;
442*4882a593Smuzhiyun 	struct cxgbi_ports_map *pmap = &cdev->pmap;
443*4882a593Smuzhiyun 	unsigned int start;
444*4882a593Smuzhiyun 	int idx;
445*4882a593Smuzhiyun 	__be16 *port;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	if (!pmap->max_connect) {
448*4882a593Smuzhiyun 		pr_err("cdev 0x%p, p#%u %s, NO port map.\n",
449*4882a593Smuzhiyun 			   cdev, csk->port_id, cdev->ports[csk->port_id]->name);
450*4882a593Smuzhiyun 		return -EADDRNOTAVAIL;
451*4882a593Smuzhiyun 	}
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	if (csk->csk_family == AF_INET)
454*4882a593Smuzhiyun 		port = &csk->saddr.sin_port;
455*4882a593Smuzhiyun 	else /* ipv6 */
456*4882a593Smuzhiyun 		port = &csk->saddr6.sin6_port;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	if (*port) {
459*4882a593Smuzhiyun 		pr_err("source port NON-ZERO %u.\n",
460*4882a593Smuzhiyun 			ntohs(*port));
461*4882a593Smuzhiyun 		return -EADDRINUSE;
462*4882a593Smuzhiyun 	}
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	spin_lock_bh(&pmap->lock);
465*4882a593Smuzhiyun 	if (pmap->used >= pmap->max_connect) {
466*4882a593Smuzhiyun 		spin_unlock_bh(&pmap->lock);
467*4882a593Smuzhiyun 		pr_info("cdev 0x%p, p#%u %s, ALL ports used.\n",
468*4882a593Smuzhiyun 			cdev, csk->port_id, cdev->ports[csk->port_id]->name);
469*4882a593Smuzhiyun 		return -EADDRNOTAVAIL;
470*4882a593Smuzhiyun 	}
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	start = idx = pmap->next;
473*4882a593Smuzhiyun 	do {
474*4882a593Smuzhiyun 		if (++idx >= pmap->max_connect)
475*4882a593Smuzhiyun 			idx = 0;
476*4882a593Smuzhiyun 		if (!pmap->port_csk[idx]) {
477*4882a593Smuzhiyun 			pmap->used++;
478*4882a593Smuzhiyun 			*port = htons(pmap->sport_base + idx);
479*4882a593Smuzhiyun 			pmap->next = idx;
480*4882a593Smuzhiyun 			pmap->port_csk[idx] = csk;
481*4882a593Smuzhiyun 			spin_unlock_bh(&pmap->lock);
482*4882a593Smuzhiyun 			cxgbi_sock_get(csk);
483*4882a593Smuzhiyun 			log_debug(1 << CXGBI_DBG_SOCK,
484*4882a593Smuzhiyun 				"cdev 0x%p, p#%u %s, p %u, %u.\n",
485*4882a593Smuzhiyun 				cdev, csk->port_id,
486*4882a593Smuzhiyun 				cdev->ports[csk->port_id]->name,
487*4882a593Smuzhiyun 				pmap->sport_base + idx, pmap->next);
488*4882a593Smuzhiyun 			return 0;
489*4882a593Smuzhiyun 		}
490*4882a593Smuzhiyun 	} while (idx != start);
491*4882a593Smuzhiyun 	spin_unlock_bh(&pmap->lock);
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	/* should not happen */
494*4882a593Smuzhiyun 	pr_warn("cdev 0x%p, p#%u %s, next %u?\n",
495*4882a593Smuzhiyun 		cdev, csk->port_id, cdev->ports[csk->port_id]->name,
496*4882a593Smuzhiyun 		pmap->next);
497*4882a593Smuzhiyun 	return -EADDRNOTAVAIL;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun 
sock_put_port(struct cxgbi_sock * csk)500*4882a593Smuzhiyun static void sock_put_port(struct cxgbi_sock *csk)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun 	struct cxgbi_device *cdev = csk->cdev;
503*4882a593Smuzhiyun 	struct cxgbi_ports_map *pmap = &cdev->pmap;
504*4882a593Smuzhiyun 	__be16 *port;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	if (csk->csk_family == AF_INET)
507*4882a593Smuzhiyun 		port = &csk->saddr.sin_port;
508*4882a593Smuzhiyun 	else /* ipv6 */
509*4882a593Smuzhiyun 		port = &csk->saddr6.sin6_port;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	if (*port) {
512*4882a593Smuzhiyun 		int idx = ntohs(*port) - pmap->sport_base;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 		*port = 0;
515*4882a593Smuzhiyun 		if (idx < 0 || idx >= pmap->max_connect) {
516*4882a593Smuzhiyun 			pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n",
517*4882a593Smuzhiyun 				cdev, csk->port_id,
518*4882a593Smuzhiyun 				cdev->ports[csk->port_id]->name,
519*4882a593Smuzhiyun 				ntohs(*port));
520*4882a593Smuzhiyun 			return;
521*4882a593Smuzhiyun 		}
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 		spin_lock_bh(&pmap->lock);
524*4882a593Smuzhiyun 		pmap->port_csk[idx] = NULL;
525*4882a593Smuzhiyun 		pmap->used--;
526*4882a593Smuzhiyun 		spin_unlock_bh(&pmap->lock);
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 		log_debug(1 << CXGBI_DBG_SOCK,
529*4882a593Smuzhiyun 			"cdev 0x%p, p#%u %s, release %u.\n",
530*4882a593Smuzhiyun 			cdev, csk->port_id, cdev->ports[csk->port_id]->name,
531*4882a593Smuzhiyun 			pmap->sport_base + idx);
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 		cxgbi_sock_put(csk);
534*4882a593Smuzhiyun 	}
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun /*
538*4882a593Smuzhiyun  * iscsi tcp connection
539*4882a593Smuzhiyun  */
cxgbi_sock_free_cpl_skbs(struct cxgbi_sock * csk)540*4882a593Smuzhiyun void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *csk)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun 	if (csk->cpl_close) {
543*4882a593Smuzhiyun 		kfree_skb(csk->cpl_close);
544*4882a593Smuzhiyun 		csk->cpl_close = NULL;
545*4882a593Smuzhiyun 	}
546*4882a593Smuzhiyun 	if (csk->cpl_abort_req) {
547*4882a593Smuzhiyun 		kfree_skb(csk->cpl_abort_req);
548*4882a593Smuzhiyun 		csk->cpl_abort_req = NULL;
549*4882a593Smuzhiyun 	}
550*4882a593Smuzhiyun 	if (csk->cpl_abort_rpl) {
551*4882a593Smuzhiyun 		kfree_skb(csk->cpl_abort_rpl);
552*4882a593Smuzhiyun 		csk->cpl_abort_rpl = NULL;
553*4882a593Smuzhiyun 	}
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs);
556*4882a593Smuzhiyun 
cxgbi_sock_create(struct cxgbi_device * cdev)557*4882a593Smuzhiyun static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun 	struct cxgbi_sock *csk = kzalloc(sizeof(*csk), GFP_NOIO);
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	if (!csk) {
562*4882a593Smuzhiyun 		pr_info("alloc csk %zu failed.\n", sizeof(*csk));
563*4882a593Smuzhiyun 		return NULL;
564*4882a593Smuzhiyun 	}
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	if (cdev->csk_alloc_cpls(csk) < 0) {
567*4882a593Smuzhiyun 		pr_info("csk 0x%p, alloc cpls failed.\n", csk);
568*4882a593Smuzhiyun 		kfree(csk);
569*4882a593Smuzhiyun 		return NULL;
570*4882a593Smuzhiyun 	}
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	spin_lock_init(&csk->lock);
573*4882a593Smuzhiyun 	kref_init(&csk->refcnt);
574*4882a593Smuzhiyun 	skb_queue_head_init(&csk->receive_queue);
575*4882a593Smuzhiyun 	skb_queue_head_init(&csk->write_queue);
576*4882a593Smuzhiyun 	timer_setup(&csk->retry_timer, NULL, 0);
577*4882a593Smuzhiyun 	init_completion(&csk->cmpl);
578*4882a593Smuzhiyun 	rwlock_init(&csk->callback_lock);
579*4882a593Smuzhiyun 	csk->cdev = cdev;
580*4882a593Smuzhiyun 	csk->flags = 0;
581*4882a593Smuzhiyun 	cxgbi_sock_set_state(csk, CTP_CLOSED);
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_SOCK, "cdev 0x%p, new csk 0x%p.\n", cdev, csk);
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	return csk;
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun 
find_route_ipv4(struct flowi4 * fl4,__be32 saddr,__be32 daddr,__be16 sport,__be16 dport,u8 tos,int ifindex)588*4882a593Smuzhiyun static struct rtable *find_route_ipv4(struct flowi4 *fl4,
589*4882a593Smuzhiyun 				      __be32 saddr, __be32 daddr,
590*4882a593Smuzhiyun 				      __be16 sport, __be16 dport, u8 tos,
591*4882a593Smuzhiyun 				      int ifindex)
592*4882a593Smuzhiyun {
593*4882a593Smuzhiyun 	struct rtable *rt;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	rt = ip_route_output_ports(&init_net, fl4, NULL, daddr, saddr,
596*4882a593Smuzhiyun 				   dport, sport, IPPROTO_TCP, tos, ifindex);
597*4882a593Smuzhiyun 	if (IS_ERR(rt))
598*4882a593Smuzhiyun 		return NULL;
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	return rt;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun static struct cxgbi_sock *
cxgbi_check_route(struct sockaddr * dst_addr,int ifindex)604*4882a593Smuzhiyun cxgbi_check_route(struct sockaddr *dst_addr, int ifindex)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun 	struct sockaddr_in *daddr = (struct sockaddr_in *)dst_addr;
607*4882a593Smuzhiyun 	struct dst_entry *dst;
608*4882a593Smuzhiyun 	struct net_device *ndev;
609*4882a593Smuzhiyun 	struct cxgbi_device *cdev;
610*4882a593Smuzhiyun 	struct rtable *rt = NULL;
611*4882a593Smuzhiyun 	struct neighbour *n;
612*4882a593Smuzhiyun 	struct flowi4 fl4;
613*4882a593Smuzhiyun 	struct cxgbi_sock *csk = NULL;
614*4882a593Smuzhiyun 	unsigned int mtu = 0;
615*4882a593Smuzhiyun 	int port = 0xFFFF;
616*4882a593Smuzhiyun 	int err = 0;
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0,
619*4882a593Smuzhiyun 			     daddr->sin_port, 0, ifindex);
620*4882a593Smuzhiyun 	if (!rt) {
621*4882a593Smuzhiyun 		pr_info("no route to ipv4 0x%x, port %u.\n",
622*4882a593Smuzhiyun 			be32_to_cpu(daddr->sin_addr.s_addr),
623*4882a593Smuzhiyun 			be16_to_cpu(daddr->sin_port));
624*4882a593Smuzhiyun 		err = -ENETUNREACH;
625*4882a593Smuzhiyun 		goto err_out;
626*4882a593Smuzhiyun 	}
627*4882a593Smuzhiyun 	dst = &rt->dst;
628*4882a593Smuzhiyun 	n = dst_neigh_lookup(dst, &daddr->sin_addr.s_addr);
629*4882a593Smuzhiyun 	if (!n) {
630*4882a593Smuzhiyun 		err = -ENODEV;
631*4882a593Smuzhiyun 		goto rel_rt;
632*4882a593Smuzhiyun 	}
633*4882a593Smuzhiyun 	ndev = n->dev;
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
636*4882a593Smuzhiyun 		pr_info("multi-cast route %pI4, port %u, dev %s.\n",
637*4882a593Smuzhiyun 			&daddr->sin_addr.s_addr, ntohs(daddr->sin_port),
638*4882a593Smuzhiyun 			ndev->name);
639*4882a593Smuzhiyun 		err = -ENETUNREACH;
640*4882a593Smuzhiyun 		goto rel_neigh;
641*4882a593Smuzhiyun 	}
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	if (ndev->flags & IFF_LOOPBACK) {
644*4882a593Smuzhiyun 		ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
645*4882a593Smuzhiyun 		if (!ndev) {
646*4882a593Smuzhiyun 			err = -ENETUNREACH;
647*4882a593Smuzhiyun 			goto rel_neigh;
648*4882a593Smuzhiyun 		}
649*4882a593Smuzhiyun 		mtu = ndev->mtu;
650*4882a593Smuzhiyun 		pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
651*4882a593Smuzhiyun 			n->dev->name, ndev->name, mtu);
652*4882a593Smuzhiyun 	}
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) {
655*4882a593Smuzhiyun 		pr_info("%s interface not up.\n", ndev->name);
656*4882a593Smuzhiyun 		err = -ENETDOWN;
657*4882a593Smuzhiyun 		goto rel_neigh;
658*4882a593Smuzhiyun 	}
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	cdev = cxgbi_device_find_by_netdev(ndev, &port);
661*4882a593Smuzhiyun 	if (!cdev)
662*4882a593Smuzhiyun 		cdev = cxgbi_device_find_by_mac(ndev, &port);
663*4882a593Smuzhiyun 	if (!cdev) {
664*4882a593Smuzhiyun 		pr_info("dst %pI4, %s, NOT cxgbi device.\n",
665*4882a593Smuzhiyun 			&daddr->sin_addr.s_addr, ndev->name);
666*4882a593Smuzhiyun 		err = -ENETUNREACH;
667*4882a593Smuzhiyun 		goto rel_neigh;
668*4882a593Smuzhiyun 	}
669*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_SOCK,
670*4882a593Smuzhiyun 		"route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n",
671*4882a593Smuzhiyun 		&daddr->sin_addr.s_addr, ntohs(daddr->sin_port),
672*4882a593Smuzhiyun 			   port, ndev->name, cdev);
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	csk = cxgbi_sock_create(cdev);
675*4882a593Smuzhiyun 	if (!csk) {
676*4882a593Smuzhiyun 		err = -ENOMEM;
677*4882a593Smuzhiyun 		goto rel_neigh;
678*4882a593Smuzhiyun 	}
679*4882a593Smuzhiyun 	csk->cdev = cdev;
680*4882a593Smuzhiyun 	csk->port_id = port;
681*4882a593Smuzhiyun 	csk->mtu = mtu;
682*4882a593Smuzhiyun 	csk->dst = dst;
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	csk->csk_family = AF_INET;
685*4882a593Smuzhiyun 	csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr;
686*4882a593Smuzhiyun 	csk->daddr.sin_port = daddr->sin_port;
687*4882a593Smuzhiyun 	csk->daddr.sin_family = daddr->sin_family;
688*4882a593Smuzhiyun 	csk->saddr.sin_family = daddr->sin_family;
689*4882a593Smuzhiyun 	csk->saddr.sin_addr.s_addr = fl4.saddr;
690*4882a593Smuzhiyun 	neigh_release(n);
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	return csk;
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun rel_neigh:
695*4882a593Smuzhiyun 	neigh_release(n);
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun rel_rt:
698*4882a593Smuzhiyun 	ip_rt_put(rt);
699*4882a593Smuzhiyun err_out:
700*4882a593Smuzhiyun 	return ERR_PTR(err);
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6)
find_route_ipv6(const struct in6_addr * saddr,const struct in6_addr * daddr,int ifindex)704*4882a593Smuzhiyun static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr,
705*4882a593Smuzhiyun 					const struct in6_addr *daddr,
706*4882a593Smuzhiyun 					int ifindex)
707*4882a593Smuzhiyun {
708*4882a593Smuzhiyun 	struct flowi6 fl;
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	memset(&fl, 0, sizeof(fl));
711*4882a593Smuzhiyun 	fl.flowi6_oif = ifindex;
712*4882a593Smuzhiyun 	if (saddr)
713*4882a593Smuzhiyun 		memcpy(&fl.saddr, saddr, sizeof(struct in6_addr));
714*4882a593Smuzhiyun 	if (daddr)
715*4882a593Smuzhiyun 		memcpy(&fl.daddr, daddr, sizeof(struct in6_addr));
716*4882a593Smuzhiyun 	return (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun static struct cxgbi_sock *
cxgbi_check_route6(struct sockaddr * dst_addr,int ifindex)720*4882a593Smuzhiyun cxgbi_check_route6(struct sockaddr *dst_addr, int ifindex)
721*4882a593Smuzhiyun {
722*4882a593Smuzhiyun 	struct sockaddr_in6 *daddr6 = (struct sockaddr_in6 *)dst_addr;
723*4882a593Smuzhiyun 	struct dst_entry *dst;
724*4882a593Smuzhiyun 	struct net_device *ndev;
725*4882a593Smuzhiyun 	struct cxgbi_device *cdev;
726*4882a593Smuzhiyun 	struct rt6_info *rt = NULL;
727*4882a593Smuzhiyun 	struct neighbour *n;
728*4882a593Smuzhiyun 	struct in6_addr pref_saddr;
729*4882a593Smuzhiyun 	struct cxgbi_sock *csk = NULL;
730*4882a593Smuzhiyun 	unsigned int mtu = 0;
731*4882a593Smuzhiyun 	int port = 0xFFFF;
732*4882a593Smuzhiyun 	int err = 0;
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	rt = find_route_ipv6(NULL, &daddr6->sin6_addr, ifindex);
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 	if (!rt) {
737*4882a593Smuzhiyun 		pr_info("no route to ipv6 %pI6 port %u\n",
738*4882a593Smuzhiyun 			daddr6->sin6_addr.s6_addr,
739*4882a593Smuzhiyun 			be16_to_cpu(daddr6->sin6_port));
740*4882a593Smuzhiyun 		err = -ENETUNREACH;
741*4882a593Smuzhiyun 		goto err_out;
742*4882a593Smuzhiyun 	}
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 	dst = &rt->dst;
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	n = dst_neigh_lookup(dst, &daddr6->sin6_addr);
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	if (!n) {
749*4882a593Smuzhiyun 		pr_info("%pI6, port %u, dst no neighbour.\n",
750*4882a593Smuzhiyun 			daddr6->sin6_addr.s6_addr,
751*4882a593Smuzhiyun 			be16_to_cpu(daddr6->sin6_port));
752*4882a593Smuzhiyun 		err = -ENETUNREACH;
753*4882a593Smuzhiyun 		goto rel_rt;
754*4882a593Smuzhiyun 	}
755*4882a593Smuzhiyun 	ndev = n->dev;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) {
758*4882a593Smuzhiyun 		pr_info("%s interface not up.\n", ndev->name);
759*4882a593Smuzhiyun 		err = -ENETDOWN;
760*4882a593Smuzhiyun 		goto rel_rt;
761*4882a593Smuzhiyun 	}
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	if (ipv6_addr_is_multicast(&daddr6->sin6_addr)) {
764*4882a593Smuzhiyun 		pr_info("multi-cast route %pI6 port %u, dev %s.\n",
765*4882a593Smuzhiyun 			daddr6->sin6_addr.s6_addr,
766*4882a593Smuzhiyun 			ntohs(daddr6->sin6_port), ndev->name);
767*4882a593Smuzhiyun 		err = -ENETUNREACH;
768*4882a593Smuzhiyun 		goto rel_rt;
769*4882a593Smuzhiyun 	}
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	cdev = cxgbi_device_find_by_netdev(ndev, &port);
772*4882a593Smuzhiyun 	if (!cdev)
773*4882a593Smuzhiyun 		cdev = cxgbi_device_find_by_mac(ndev, &port);
774*4882a593Smuzhiyun 	if (!cdev) {
775*4882a593Smuzhiyun 		pr_info("dst %pI6 %s, NOT cxgbi device.\n",
776*4882a593Smuzhiyun 			daddr6->sin6_addr.s6_addr, ndev->name);
777*4882a593Smuzhiyun 		err = -ENETUNREACH;
778*4882a593Smuzhiyun 		goto rel_rt;
779*4882a593Smuzhiyun 	}
780*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_SOCK,
781*4882a593Smuzhiyun 		  "route to %pI6 :%u, ndev p#%d,%s, cdev 0x%p.\n",
782*4882a593Smuzhiyun 		  daddr6->sin6_addr.s6_addr, ntohs(daddr6->sin6_port), port,
783*4882a593Smuzhiyun 		  ndev->name, cdev);
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	csk = cxgbi_sock_create(cdev);
786*4882a593Smuzhiyun 	if (!csk) {
787*4882a593Smuzhiyun 		err = -ENOMEM;
788*4882a593Smuzhiyun 		goto rel_rt;
789*4882a593Smuzhiyun 	}
790*4882a593Smuzhiyun 	csk->cdev = cdev;
791*4882a593Smuzhiyun 	csk->port_id = port;
792*4882a593Smuzhiyun 	csk->mtu = mtu;
793*4882a593Smuzhiyun 	csk->dst = dst;
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	rt6_get_prefsrc(rt, &pref_saddr);
796*4882a593Smuzhiyun 	if (ipv6_addr_any(&pref_saddr)) {
797*4882a593Smuzhiyun 		struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt);
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 		err = ipv6_dev_get_saddr(&init_net, idev ? idev->dev : NULL,
800*4882a593Smuzhiyun 					 &daddr6->sin6_addr, 0, &pref_saddr);
801*4882a593Smuzhiyun 		if (err) {
802*4882a593Smuzhiyun 			pr_info("failed to get source address to reach %pI6\n",
803*4882a593Smuzhiyun 				&daddr6->sin6_addr);
804*4882a593Smuzhiyun 			goto rel_rt;
805*4882a593Smuzhiyun 		}
806*4882a593Smuzhiyun 	}
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	csk->csk_family = AF_INET6;
809*4882a593Smuzhiyun 	csk->daddr6.sin6_addr = daddr6->sin6_addr;
810*4882a593Smuzhiyun 	csk->daddr6.sin6_port = daddr6->sin6_port;
811*4882a593Smuzhiyun 	csk->daddr6.sin6_family = daddr6->sin6_family;
812*4882a593Smuzhiyun 	csk->saddr6.sin6_family = daddr6->sin6_family;
813*4882a593Smuzhiyun 	csk->saddr6.sin6_addr = pref_saddr;
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 	neigh_release(n);
816*4882a593Smuzhiyun 	return csk;
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun rel_rt:
819*4882a593Smuzhiyun 	if (n)
820*4882a593Smuzhiyun 		neigh_release(n);
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	ip6_rt_put(rt);
823*4882a593Smuzhiyun 	if (csk)
824*4882a593Smuzhiyun 		cxgbi_sock_closed(csk);
825*4882a593Smuzhiyun err_out:
826*4882a593Smuzhiyun 	return ERR_PTR(err);
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun #endif /* IS_ENABLED(CONFIG_IPV6) */
829*4882a593Smuzhiyun 
cxgbi_sock_established(struct cxgbi_sock * csk,unsigned int snd_isn,unsigned int opt)830*4882a593Smuzhiyun void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn,
831*4882a593Smuzhiyun 			unsigned int opt)
832*4882a593Smuzhiyun {
833*4882a593Smuzhiyun 	csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn;
834*4882a593Smuzhiyun 	dst_confirm(csk->dst);
835*4882a593Smuzhiyun 	smp_mb();
836*4882a593Smuzhiyun 	cxgbi_sock_set_state(csk, CTP_ESTABLISHED);
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_sock_established);
839*4882a593Smuzhiyun 
cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock * csk)840*4882a593Smuzhiyun static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk)
841*4882a593Smuzhiyun {
842*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_SOCK,
843*4882a593Smuzhiyun 		"csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n",
844*4882a593Smuzhiyun 		csk, csk->state, csk->flags, csk->user_data);
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 	if (csk->state != CTP_ESTABLISHED) {
847*4882a593Smuzhiyun 		read_lock_bh(&csk->callback_lock);
848*4882a593Smuzhiyun 		if (csk->user_data)
849*4882a593Smuzhiyun 			iscsi_conn_failure(csk->user_data,
850*4882a593Smuzhiyun 					ISCSI_ERR_TCP_CONN_CLOSE);
851*4882a593Smuzhiyun 		read_unlock_bh(&csk->callback_lock);
852*4882a593Smuzhiyun 	}
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun 
cxgbi_sock_closed(struct cxgbi_sock * csk)855*4882a593Smuzhiyun void cxgbi_sock_closed(struct cxgbi_sock *csk)
856*4882a593Smuzhiyun {
857*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
858*4882a593Smuzhiyun 		csk, (csk)->state, (csk)->flags, (csk)->tid);
859*4882a593Smuzhiyun 	cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED);
860*4882a593Smuzhiyun 	if (csk->state == CTP_ACTIVE_OPEN || csk->state == CTP_CLOSED)
861*4882a593Smuzhiyun 		return;
862*4882a593Smuzhiyun 	if (csk->saddr.sin_port)
863*4882a593Smuzhiyun 		sock_put_port(csk);
864*4882a593Smuzhiyun 	if (csk->dst)
865*4882a593Smuzhiyun 		dst_release(csk->dst);
866*4882a593Smuzhiyun 	csk->cdev->csk_release_offload_resources(csk);
867*4882a593Smuzhiyun 	cxgbi_sock_set_state(csk, CTP_CLOSED);
868*4882a593Smuzhiyun 	cxgbi_inform_iscsi_conn_closing(csk);
869*4882a593Smuzhiyun 	cxgbi_sock_put(csk);
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_sock_closed);
872*4882a593Smuzhiyun 
need_active_close(struct cxgbi_sock * csk)873*4882a593Smuzhiyun static void need_active_close(struct cxgbi_sock *csk)
874*4882a593Smuzhiyun {
875*4882a593Smuzhiyun 	int data_lost;
876*4882a593Smuzhiyun 	int close_req = 0;
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
879*4882a593Smuzhiyun 		csk, (csk)->state, (csk)->flags, (csk)->tid);
880*4882a593Smuzhiyun 	spin_lock_bh(&csk->lock);
881*4882a593Smuzhiyun 	if (csk->dst)
882*4882a593Smuzhiyun 		dst_confirm(csk->dst);
883*4882a593Smuzhiyun 	data_lost = skb_queue_len(&csk->receive_queue);
884*4882a593Smuzhiyun 	__skb_queue_purge(&csk->receive_queue);
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 	if (csk->state == CTP_ACTIVE_OPEN)
887*4882a593Smuzhiyun 		cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED);
888*4882a593Smuzhiyun 	else if (csk->state == CTP_ESTABLISHED) {
889*4882a593Smuzhiyun 		close_req = 1;
890*4882a593Smuzhiyun 		cxgbi_sock_set_state(csk, CTP_ACTIVE_CLOSE);
891*4882a593Smuzhiyun 	} else if (csk->state == CTP_PASSIVE_CLOSE) {
892*4882a593Smuzhiyun 		close_req = 1;
893*4882a593Smuzhiyun 		cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2);
894*4882a593Smuzhiyun 	}
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	if (close_req) {
897*4882a593Smuzhiyun 		if (!cxgbi_sock_flag(csk, CTPF_LOGOUT_RSP_RCVD) ||
898*4882a593Smuzhiyun 		    data_lost)
899*4882a593Smuzhiyun 			csk->cdev->csk_send_abort_req(csk);
900*4882a593Smuzhiyun 		else
901*4882a593Smuzhiyun 			csk->cdev->csk_send_close_req(csk);
902*4882a593Smuzhiyun 	}
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	spin_unlock_bh(&csk->lock);
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun 
cxgbi_sock_fail_act_open(struct cxgbi_sock * csk,int errno)907*4882a593Smuzhiyun void cxgbi_sock_fail_act_open(struct cxgbi_sock *csk, int errno)
908*4882a593Smuzhiyun {
909*4882a593Smuzhiyun 	pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n",
910*4882a593Smuzhiyun 			csk, csk->state, csk->flags,
911*4882a593Smuzhiyun 			&csk->saddr.sin_addr.s_addr, csk->saddr.sin_port,
912*4882a593Smuzhiyun 			&csk->daddr.sin_addr.s_addr, csk->daddr.sin_port,
913*4882a593Smuzhiyun 			errno);
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	cxgbi_sock_set_state(csk, CTP_CONNECTING);
916*4882a593Smuzhiyun 	csk->err = errno;
917*4882a593Smuzhiyun 	cxgbi_sock_closed(csk);
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open);
920*4882a593Smuzhiyun 
cxgbi_sock_act_open_req_arp_failure(void * handle,struct sk_buff * skb)921*4882a593Smuzhiyun void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb)
922*4882a593Smuzhiyun {
923*4882a593Smuzhiyun 	struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk;
924*4882a593Smuzhiyun 	struct module *owner = csk->cdev->owner;
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
927*4882a593Smuzhiyun 		csk, (csk)->state, (csk)->flags, (csk)->tid);
928*4882a593Smuzhiyun 	cxgbi_sock_get(csk);
929*4882a593Smuzhiyun 	spin_lock_bh(&csk->lock);
930*4882a593Smuzhiyun 	if (csk->state == CTP_ACTIVE_OPEN)
931*4882a593Smuzhiyun 		cxgbi_sock_fail_act_open(csk, -EHOSTUNREACH);
932*4882a593Smuzhiyun 	spin_unlock_bh(&csk->lock);
933*4882a593Smuzhiyun 	cxgbi_sock_put(csk);
934*4882a593Smuzhiyun 	__kfree_skb(skb);
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	module_put(owner);
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure);
939*4882a593Smuzhiyun 
cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock * csk)940*4882a593Smuzhiyun void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk)
941*4882a593Smuzhiyun {
942*4882a593Smuzhiyun 	cxgbi_sock_get(csk);
943*4882a593Smuzhiyun 	spin_lock_bh(&csk->lock);
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun 	cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD);
946*4882a593Smuzhiyun 	if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
947*4882a593Smuzhiyun 		cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING);
948*4882a593Smuzhiyun 		if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD))
949*4882a593Smuzhiyun 			pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n",
950*4882a593Smuzhiyun 			       csk, csk->state, csk->flags, csk->tid);
951*4882a593Smuzhiyun 		cxgbi_sock_closed(csk);
952*4882a593Smuzhiyun 	}
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	spin_unlock_bh(&csk->lock);
955*4882a593Smuzhiyun 	cxgbi_sock_put(csk);
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_abort_rpl);
958*4882a593Smuzhiyun 
cxgbi_sock_rcv_peer_close(struct cxgbi_sock * csk)959*4882a593Smuzhiyun void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *csk)
960*4882a593Smuzhiyun {
961*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
962*4882a593Smuzhiyun 		csk, (csk)->state, (csk)->flags, (csk)->tid);
963*4882a593Smuzhiyun 	cxgbi_sock_get(csk);
964*4882a593Smuzhiyun 	spin_lock_bh(&csk->lock);
965*4882a593Smuzhiyun 
966*4882a593Smuzhiyun 	if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING))
967*4882a593Smuzhiyun 		goto done;
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 	switch (csk->state) {
970*4882a593Smuzhiyun 	case CTP_ESTABLISHED:
971*4882a593Smuzhiyun 		cxgbi_sock_set_state(csk, CTP_PASSIVE_CLOSE);
972*4882a593Smuzhiyun 		break;
973*4882a593Smuzhiyun 	case CTP_ACTIVE_CLOSE:
974*4882a593Smuzhiyun 		cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2);
975*4882a593Smuzhiyun 		break;
976*4882a593Smuzhiyun 	case CTP_CLOSE_WAIT_1:
977*4882a593Smuzhiyun 		cxgbi_sock_closed(csk);
978*4882a593Smuzhiyun 		break;
979*4882a593Smuzhiyun 	case CTP_ABORTING:
980*4882a593Smuzhiyun 		break;
981*4882a593Smuzhiyun 	default:
982*4882a593Smuzhiyun 		pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
983*4882a593Smuzhiyun 			csk, csk->state, csk->flags, csk->tid);
984*4882a593Smuzhiyun 	}
985*4882a593Smuzhiyun 	cxgbi_inform_iscsi_conn_closing(csk);
986*4882a593Smuzhiyun done:
987*4882a593Smuzhiyun 	spin_unlock_bh(&csk->lock);
988*4882a593Smuzhiyun 	cxgbi_sock_put(csk);
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_peer_close);
991*4882a593Smuzhiyun 
cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock * csk,u32 snd_nxt)992*4882a593Smuzhiyun void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *csk, u32 snd_nxt)
993*4882a593Smuzhiyun {
994*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
995*4882a593Smuzhiyun 		csk, (csk)->state, (csk)->flags, (csk)->tid);
996*4882a593Smuzhiyun 	cxgbi_sock_get(csk);
997*4882a593Smuzhiyun 	spin_lock_bh(&csk->lock);
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	csk->snd_una = snd_nxt - 1;
1000*4882a593Smuzhiyun 	if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING))
1001*4882a593Smuzhiyun 		goto done;
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 	switch (csk->state) {
1004*4882a593Smuzhiyun 	case CTP_ACTIVE_CLOSE:
1005*4882a593Smuzhiyun 		cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_1);
1006*4882a593Smuzhiyun 		break;
1007*4882a593Smuzhiyun 	case CTP_CLOSE_WAIT_1:
1008*4882a593Smuzhiyun 	case CTP_CLOSE_WAIT_2:
1009*4882a593Smuzhiyun 		cxgbi_sock_closed(csk);
1010*4882a593Smuzhiyun 		break;
1011*4882a593Smuzhiyun 	case CTP_ABORTING:
1012*4882a593Smuzhiyun 		break;
1013*4882a593Smuzhiyun 	default:
1014*4882a593Smuzhiyun 		pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
1015*4882a593Smuzhiyun 			csk, csk->state, csk->flags, csk->tid);
1016*4882a593Smuzhiyun 	}
1017*4882a593Smuzhiyun done:
1018*4882a593Smuzhiyun 	spin_unlock_bh(&csk->lock);
1019*4882a593Smuzhiyun 	cxgbi_sock_put(csk);
1020*4882a593Smuzhiyun }
1021*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_close_conn_rpl);
1022*4882a593Smuzhiyun 
cxgbi_sock_rcv_wr_ack(struct cxgbi_sock * csk,unsigned int credits,unsigned int snd_una,int seq_chk)1023*4882a593Smuzhiyun void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *csk, unsigned int credits,
1024*4882a593Smuzhiyun 			   unsigned int snd_una, int seq_chk)
1025*4882a593Smuzhiyun {
1026*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1027*4882a593Smuzhiyun 			"csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n",
1028*4882a593Smuzhiyun 			csk, csk->state, csk->flags, csk->tid, credits,
1029*4882a593Smuzhiyun 			csk->wr_cred, csk->wr_una_cred, snd_una, seq_chk);
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 	spin_lock_bh(&csk->lock);
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 	csk->wr_cred += credits;
1034*4882a593Smuzhiyun 	if (csk->wr_una_cred > csk->wr_max_cred - csk->wr_cred)
1035*4882a593Smuzhiyun 		csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	while (credits) {
1038*4882a593Smuzhiyun 		struct sk_buff *p = cxgbi_sock_peek_wr(csk);
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 		if (unlikely(!p)) {
1041*4882a593Smuzhiyun 			pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n",
1042*4882a593Smuzhiyun 				csk, csk->state, csk->flags, csk->tid, credits,
1043*4882a593Smuzhiyun 				csk->wr_cred, csk->wr_una_cred);
1044*4882a593Smuzhiyun 			break;
1045*4882a593Smuzhiyun 		}
1046*4882a593Smuzhiyun 
1047*4882a593Smuzhiyun 		if (unlikely(credits < p->csum)) {
1048*4882a593Smuzhiyun 			pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n",
1049*4882a593Smuzhiyun 				csk, csk->state, csk->flags, csk->tid,
1050*4882a593Smuzhiyun 				credits, csk->wr_cred, csk->wr_una_cred,
1051*4882a593Smuzhiyun 				p->csum);
1052*4882a593Smuzhiyun 			p->csum -= credits;
1053*4882a593Smuzhiyun 			break;
1054*4882a593Smuzhiyun 		} else {
1055*4882a593Smuzhiyun 			cxgbi_sock_dequeue_wr(csk);
1056*4882a593Smuzhiyun 			credits -= p->csum;
1057*4882a593Smuzhiyun 			kfree_skb(p);
1058*4882a593Smuzhiyun 		}
1059*4882a593Smuzhiyun 	}
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 	cxgbi_sock_check_wr_invariants(csk);
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	if (seq_chk) {
1064*4882a593Smuzhiyun 		if (unlikely(before(snd_una, csk->snd_una))) {
1065*4882a593Smuzhiyun 			pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.",
1066*4882a593Smuzhiyun 				csk, csk->state, csk->flags, csk->tid, snd_una,
1067*4882a593Smuzhiyun 				csk->snd_una);
1068*4882a593Smuzhiyun 			goto done;
1069*4882a593Smuzhiyun 		}
1070*4882a593Smuzhiyun 
1071*4882a593Smuzhiyun 		if (csk->snd_una != snd_una) {
1072*4882a593Smuzhiyun 			csk->snd_una = snd_una;
1073*4882a593Smuzhiyun 			dst_confirm(csk->dst);
1074*4882a593Smuzhiyun 		}
1075*4882a593Smuzhiyun 	}
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	if (skb_queue_len(&csk->write_queue)) {
1078*4882a593Smuzhiyun 		if (csk->cdev->csk_push_tx_frames(csk, 0))
1079*4882a593Smuzhiyun 			cxgbi_conn_tx_open(csk);
1080*4882a593Smuzhiyun 	} else
1081*4882a593Smuzhiyun 		cxgbi_conn_tx_open(csk);
1082*4882a593Smuzhiyun done:
1083*4882a593Smuzhiyun 	spin_unlock_bh(&csk->lock);
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_wr_ack);
1086*4882a593Smuzhiyun 
cxgbi_sock_find_best_mtu(struct cxgbi_sock * csk,unsigned short mtu)1087*4882a593Smuzhiyun static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock *csk,
1088*4882a593Smuzhiyun 					     unsigned short mtu)
1089*4882a593Smuzhiyun {
1090*4882a593Smuzhiyun 	int i = 0;
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 	while (i < csk->cdev->nmtus - 1 && csk->cdev->mtus[i + 1] <= mtu)
1093*4882a593Smuzhiyun 		++i;
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun 	return i;
1096*4882a593Smuzhiyun }
1097*4882a593Smuzhiyun 
cxgbi_sock_select_mss(struct cxgbi_sock * csk,unsigned int pmtu)1098*4882a593Smuzhiyun unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *csk, unsigned int pmtu)
1099*4882a593Smuzhiyun {
1100*4882a593Smuzhiyun 	unsigned int idx;
1101*4882a593Smuzhiyun 	struct dst_entry *dst = csk->dst;
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun 	csk->advmss = dst_metric_advmss(dst);
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 	if (csk->advmss > pmtu - 40)
1106*4882a593Smuzhiyun 		csk->advmss = pmtu - 40;
1107*4882a593Smuzhiyun 	if (csk->advmss < csk->cdev->mtus[0] - 40)
1108*4882a593Smuzhiyun 		csk->advmss = csk->cdev->mtus[0] - 40;
1109*4882a593Smuzhiyun 	idx = cxgbi_sock_find_best_mtu(csk, csk->advmss + 40);
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 	return idx;
1112*4882a593Smuzhiyun }
1113*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_sock_select_mss);
1114*4882a593Smuzhiyun 
cxgbi_sock_skb_entail(struct cxgbi_sock * csk,struct sk_buff * skb)1115*4882a593Smuzhiyun void cxgbi_sock_skb_entail(struct cxgbi_sock *csk, struct sk_buff *skb)
1116*4882a593Smuzhiyun {
1117*4882a593Smuzhiyun 	cxgbi_skcb_tcp_seq(skb) = csk->write_seq;
1118*4882a593Smuzhiyun 	__skb_queue_tail(&csk->write_queue, skb);
1119*4882a593Smuzhiyun }
1120*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_sock_skb_entail);
1121*4882a593Smuzhiyun 
cxgbi_sock_purge_wr_queue(struct cxgbi_sock * csk)1122*4882a593Smuzhiyun void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *csk)
1123*4882a593Smuzhiyun {
1124*4882a593Smuzhiyun 	struct sk_buff *skb;
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun 	while ((skb = cxgbi_sock_dequeue_wr(csk)) != NULL)
1127*4882a593Smuzhiyun 		kfree_skb(skb);
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_sock_purge_wr_queue);
1130*4882a593Smuzhiyun 
cxgbi_sock_check_wr_invariants(const struct cxgbi_sock * csk)1131*4882a593Smuzhiyun void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *csk)
1132*4882a593Smuzhiyun {
1133*4882a593Smuzhiyun 	int pending = cxgbi_sock_count_pending_wrs(csk);
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun 	if (unlikely(csk->wr_cred + pending != csk->wr_max_cred))
1136*4882a593Smuzhiyun 		pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
1137*4882a593Smuzhiyun 			csk, csk->tid, csk->wr_cred, pending, csk->wr_max_cred);
1138*4882a593Smuzhiyun }
1139*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants);
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun static inline void
scmd_get_params(struct scsi_cmnd * sc,struct scatterlist ** sgl,unsigned int * sgcnt,unsigned int * dlen,unsigned int prot)1142*4882a593Smuzhiyun scmd_get_params(struct scsi_cmnd *sc, struct scatterlist **sgl,
1143*4882a593Smuzhiyun 		unsigned int *sgcnt, unsigned int *dlen,
1144*4882a593Smuzhiyun 		unsigned int prot)
1145*4882a593Smuzhiyun {
1146*4882a593Smuzhiyun 	struct scsi_data_buffer *sdb = prot ? scsi_prot(sc) : &sc->sdb;
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 	*sgl = sdb->table.sgl;
1149*4882a593Smuzhiyun 	*sgcnt = sdb->table.nents;
1150*4882a593Smuzhiyun 	*dlen = sdb->length;
1151*4882a593Smuzhiyun 	/* Caution: for protection sdb, sdb->length is invalid */
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun 
cxgbi_ddp_set_one_ppod(struct cxgbi_pagepod * ppod,struct cxgbi_task_tag_info * ttinfo,struct scatterlist ** sg_pp,unsigned int * sg_off)1154*4882a593Smuzhiyun void cxgbi_ddp_set_one_ppod(struct cxgbi_pagepod *ppod,
1155*4882a593Smuzhiyun 			    struct cxgbi_task_tag_info *ttinfo,
1156*4882a593Smuzhiyun 			    struct scatterlist **sg_pp, unsigned int *sg_off)
1157*4882a593Smuzhiyun {
1158*4882a593Smuzhiyun 	struct scatterlist *sg = sg_pp ? *sg_pp : NULL;
1159*4882a593Smuzhiyun 	unsigned int offset = sg_off ? *sg_off : 0;
1160*4882a593Smuzhiyun 	dma_addr_t addr = 0UL;
1161*4882a593Smuzhiyun 	unsigned int len = 0;
1162*4882a593Smuzhiyun 	int i;
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun 	memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr));
1165*4882a593Smuzhiyun 
1166*4882a593Smuzhiyun 	if (sg) {
1167*4882a593Smuzhiyun 		addr = sg_dma_address(sg);
1168*4882a593Smuzhiyun 		len = sg_dma_len(sg);
1169*4882a593Smuzhiyun 	}
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun 	for (i = 0; i < PPOD_PAGES_MAX; i++) {
1172*4882a593Smuzhiyun 		if (sg) {
1173*4882a593Smuzhiyun 			ppod->addr[i] = cpu_to_be64(addr + offset);
1174*4882a593Smuzhiyun 			offset += PAGE_SIZE;
1175*4882a593Smuzhiyun 			if (offset == (len + sg->offset)) {
1176*4882a593Smuzhiyun 				offset = 0;
1177*4882a593Smuzhiyun 				sg = sg_next(sg);
1178*4882a593Smuzhiyun 				if (sg) {
1179*4882a593Smuzhiyun 					addr = sg_dma_address(sg);
1180*4882a593Smuzhiyun 					len = sg_dma_len(sg);
1181*4882a593Smuzhiyun 				}
1182*4882a593Smuzhiyun 			}
1183*4882a593Smuzhiyun 		} else {
1184*4882a593Smuzhiyun 			ppod->addr[i] = 0ULL;
1185*4882a593Smuzhiyun 		}
1186*4882a593Smuzhiyun 	}
1187*4882a593Smuzhiyun 
1188*4882a593Smuzhiyun 	/*
1189*4882a593Smuzhiyun 	 * the fifth address needs to be repeated in the next ppod, so do
1190*4882a593Smuzhiyun 	 * not move sg
1191*4882a593Smuzhiyun 	 */
1192*4882a593Smuzhiyun 	if (sg_pp) {
1193*4882a593Smuzhiyun 		*sg_pp = sg;
1194*4882a593Smuzhiyun 		*sg_off = offset;
1195*4882a593Smuzhiyun 	}
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 	if (offset == len) {
1198*4882a593Smuzhiyun 		offset = 0;
1199*4882a593Smuzhiyun 		sg = sg_next(sg);
1200*4882a593Smuzhiyun 		if (sg) {
1201*4882a593Smuzhiyun 			addr = sg_dma_address(sg);
1202*4882a593Smuzhiyun 			len = sg_dma_len(sg);
1203*4882a593Smuzhiyun 		}
1204*4882a593Smuzhiyun 	}
1205*4882a593Smuzhiyun 	ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL;
1206*4882a593Smuzhiyun }
1207*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_ddp_set_one_ppod);
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun /*
1210*4882a593Smuzhiyun  * APIs interacting with open-iscsi libraries
1211*4882a593Smuzhiyun  */
1212*4882a593Smuzhiyun 
cxgbi_ddp_ppm_setup(void ** ppm_pp,struct cxgbi_device * cdev,struct cxgbi_tag_format * tformat,unsigned int iscsi_size,unsigned int llimit,unsigned int start,unsigned int rsvd_factor,unsigned int edram_start,unsigned int edram_size)1213*4882a593Smuzhiyun int cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *cdev,
1214*4882a593Smuzhiyun 			struct cxgbi_tag_format *tformat,
1215*4882a593Smuzhiyun 			unsigned int iscsi_size, unsigned int llimit,
1216*4882a593Smuzhiyun 			unsigned int start, unsigned int rsvd_factor,
1217*4882a593Smuzhiyun 			unsigned int edram_start, unsigned int edram_size)
1218*4882a593Smuzhiyun {
1219*4882a593Smuzhiyun 	int err = cxgbi_ppm_init(ppm_pp, cdev->ports[0], cdev->pdev,
1220*4882a593Smuzhiyun 				cdev->lldev, tformat, iscsi_size, llimit, start,
1221*4882a593Smuzhiyun 				rsvd_factor, edram_start, edram_size);
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun 	if (err >= 0) {
1224*4882a593Smuzhiyun 		struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp);
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun 		if (ppm->ppmax < 1024 ||
1227*4882a593Smuzhiyun 		    ppm->tformat.pgsz_idx_dflt >= DDP_PGIDX_MAX)
1228*4882a593Smuzhiyun 			cdev->flags |= CXGBI_FLAG_DDP_OFF;
1229*4882a593Smuzhiyun 		err = 0;
1230*4882a593Smuzhiyun 	} else {
1231*4882a593Smuzhiyun 		cdev->flags |= CXGBI_FLAG_DDP_OFF;
1232*4882a593Smuzhiyun 	}
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun 	return err;
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_ddp_ppm_setup);
1237*4882a593Smuzhiyun 
cxgbi_ddp_sgl_check(struct scatterlist * sgl,int nents)1238*4882a593Smuzhiyun static int cxgbi_ddp_sgl_check(struct scatterlist *sgl, int nents)
1239*4882a593Smuzhiyun {
1240*4882a593Smuzhiyun 	int i;
1241*4882a593Smuzhiyun 	int last_sgidx = nents - 1;
1242*4882a593Smuzhiyun 	struct scatterlist *sg = sgl;
1243*4882a593Smuzhiyun 
1244*4882a593Smuzhiyun 	for (i = 0; i < nents; i++, sg = sg_next(sg)) {
1245*4882a593Smuzhiyun 		unsigned int len = sg->length + sg->offset;
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun 		if ((sg->offset & 0x3) || (i && sg->offset) ||
1248*4882a593Smuzhiyun 		    ((i != last_sgidx) && len != PAGE_SIZE)) {
1249*4882a593Smuzhiyun 			log_debug(1 << CXGBI_DBG_DDP,
1250*4882a593Smuzhiyun 				  "sg %u/%u, %u,%u, not aligned.\n",
1251*4882a593Smuzhiyun 				  i, nents, sg->offset, sg->length);
1252*4882a593Smuzhiyun 			goto err_out;
1253*4882a593Smuzhiyun 		}
1254*4882a593Smuzhiyun 	}
1255*4882a593Smuzhiyun 	return 0;
1256*4882a593Smuzhiyun err_out:
1257*4882a593Smuzhiyun 	return -EINVAL;
1258*4882a593Smuzhiyun }
1259*4882a593Smuzhiyun 
cxgbi_ddp_reserve(struct cxgbi_conn * cconn,struct cxgbi_task_data * tdata,u32 sw_tag,unsigned int xferlen)1260*4882a593Smuzhiyun static int cxgbi_ddp_reserve(struct cxgbi_conn *cconn,
1261*4882a593Smuzhiyun 			     struct cxgbi_task_data *tdata, u32 sw_tag,
1262*4882a593Smuzhiyun 			     unsigned int xferlen)
1263*4882a593Smuzhiyun {
1264*4882a593Smuzhiyun 	struct cxgbi_sock *csk = cconn->cep->csk;
1265*4882a593Smuzhiyun 	struct cxgbi_device *cdev = csk->cdev;
1266*4882a593Smuzhiyun 	struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev);
1267*4882a593Smuzhiyun 	struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo;
1268*4882a593Smuzhiyun 	struct scatterlist *sgl = ttinfo->sgl;
1269*4882a593Smuzhiyun 	unsigned int sgcnt = ttinfo->nents;
1270*4882a593Smuzhiyun 	unsigned int sg_offset = sgl->offset;
1271*4882a593Smuzhiyun 	int err;
1272*4882a593Smuzhiyun 
1273*4882a593Smuzhiyun 	if (cdev->flags & CXGBI_FLAG_DDP_OFF) {
1274*4882a593Smuzhiyun 		log_debug(1 << CXGBI_DBG_DDP,
1275*4882a593Smuzhiyun 			  "cdev 0x%p DDP off.\n", cdev);
1276*4882a593Smuzhiyun 		return -EINVAL;
1277*4882a593Smuzhiyun 	}
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun 	if (!ppm || xferlen < DDP_THRESHOLD || !sgcnt ||
1280*4882a593Smuzhiyun 	    ppm->tformat.pgsz_idx_dflt >= DDP_PGIDX_MAX) {
1281*4882a593Smuzhiyun 		log_debug(1 << CXGBI_DBG_DDP,
1282*4882a593Smuzhiyun 			  "ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n",
1283*4882a593Smuzhiyun 			  ppm, ppm ? ppm->tformat.pgsz_idx_dflt : DDP_PGIDX_MAX,
1284*4882a593Smuzhiyun 			  xferlen, ttinfo->nents);
1285*4882a593Smuzhiyun 		return -EINVAL;
1286*4882a593Smuzhiyun 	}
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 	/* make sure the buffer is suitable for ddp */
1289*4882a593Smuzhiyun 	if (cxgbi_ddp_sgl_check(sgl, sgcnt) < 0)
1290*4882a593Smuzhiyun 		return -EINVAL;
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun 	ttinfo->nr_pages = (xferlen + sgl->offset + (1 << PAGE_SHIFT) - 1) >>
1293*4882a593Smuzhiyun 			    PAGE_SHIFT;
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun 	/*
1296*4882a593Smuzhiyun 	 * the ddp tag will be used for the itt in the outgoing pdu,
1297*4882a593Smuzhiyun 	 * the itt genrated by libiscsi is saved in the ppm and can be
1298*4882a593Smuzhiyun 	 * retrieved via the ddp tag
1299*4882a593Smuzhiyun 	 */
1300*4882a593Smuzhiyun 	err = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx,
1301*4882a593Smuzhiyun 				      &ttinfo->tag, (unsigned long)sw_tag);
1302*4882a593Smuzhiyun 	if (err < 0) {
1303*4882a593Smuzhiyun 		cconn->ddp_full++;
1304*4882a593Smuzhiyun 		return err;
1305*4882a593Smuzhiyun 	}
1306*4882a593Smuzhiyun 	ttinfo->npods = err;
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 	 /* setup dma from scsi command sgl */
1309*4882a593Smuzhiyun 	sgl->offset = 0;
1310*4882a593Smuzhiyun 	err = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
1311*4882a593Smuzhiyun 	sgl->offset = sg_offset;
1312*4882a593Smuzhiyun 	if (err == 0) {
1313*4882a593Smuzhiyun 		pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
1314*4882a593Smuzhiyun 			__func__, sw_tag, xferlen, sgcnt);
1315*4882a593Smuzhiyun 		goto rel_ppods;
1316*4882a593Smuzhiyun 	}
1317*4882a593Smuzhiyun 	if (err != ttinfo->nr_pages) {
1318*4882a593Smuzhiyun 		log_debug(1 << CXGBI_DBG_DDP,
1319*4882a593Smuzhiyun 			  "%s: sw tag 0x%x, xfer %u, sgl %u, dma count %d.\n",
1320*4882a593Smuzhiyun 			  __func__, sw_tag, xferlen, sgcnt, err);
1321*4882a593Smuzhiyun 	}
1322*4882a593Smuzhiyun 
1323*4882a593Smuzhiyun 	ttinfo->flags |= CXGBI_PPOD_INFO_FLAG_MAPPED;
1324*4882a593Smuzhiyun 	ttinfo->cid = csk->port_id;
1325*4882a593Smuzhiyun 
1326*4882a593Smuzhiyun 	cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset,
1327*4882a593Smuzhiyun 				xferlen, &ttinfo->hdr);
1328*4882a593Smuzhiyun 
1329*4882a593Smuzhiyun 	if (cdev->flags & CXGBI_FLAG_USE_PPOD_OFLDQ) {
1330*4882a593Smuzhiyun 		/* write ppod from xmit_pdu (of iscsi_scsi_command pdu) */
1331*4882a593Smuzhiyun 		ttinfo->flags |= CXGBI_PPOD_INFO_FLAG_VALID;
1332*4882a593Smuzhiyun 	} else {
1333*4882a593Smuzhiyun 		/* write ppod from control queue now */
1334*4882a593Smuzhiyun 		err = cdev->csk_ddp_set_map(ppm, csk, ttinfo);
1335*4882a593Smuzhiyun 		if (err < 0)
1336*4882a593Smuzhiyun 			goto rel_ppods;
1337*4882a593Smuzhiyun 	}
1338*4882a593Smuzhiyun 
1339*4882a593Smuzhiyun 	return 0;
1340*4882a593Smuzhiyun 
1341*4882a593Smuzhiyun rel_ppods:
1342*4882a593Smuzhiyun 	cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
1343*4882a593Smuzhiyun 
1344*4882a593Smuzhiyun 	if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_MAPPED) {
1345*4882a593Smuzhiyun 		ttinfo->flags &= ~CXGBI_PPOD_INFO_FLAG_MAPPED;
1346*4882a593Smuzhiyun 		dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
1347*4882a593Smuzhiyun 	}
1348*4882a593Smuzhiyun 	return -EINVAL;
1349*4882a593Smuzhiyun }
1350*4882a593Smuzhiyun 
task_release_itt(struct iscsi_task * task,itt_t hdr_itt)1351*4882a593Smuzhiyun static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt)
1352*4882a593Smuzhiyun {
1353*4882a593Smuzhiyun 	struct scsi_cmnd *sc = task->sc;
1354*4882a593Smuzhiyun 	struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
1355*4882a593Smuzhiyun 	struct cxgbi_conn *cconn = tcp_conn->dd_data;
1356*4882a593Smuzhiyun 	struct cxgbi_device *cdev = cconn->chba->cdev;
1357*4882a593Smuzhiyun 	struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev);
1358*4882a593Smuzhiyun 	u32 tag = ntohl((__force u32)hdr_itt);
1359*4882a593Smuzhiyun 
1360*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_DDP,
1361*4882a593Smuzhiyun 		  "cdev 0x%p, task 0x%p, release tag 0x%x.\n",
1362*4882a593Smuzhiyun 		  cdev, task, tag);
1363*4882a593Smuzhiyun 	if (sc && sc->sc_data_direction == DMA_FROM_DEVICE &&
1364*4882a593Smuzhiyun 	    cxgbi_ppm_is_ddp_tag(ppm, tag)) {
1365*4882a593Smuzhiyun 		struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
1366*4882a593Smuzhiyun 		struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo;
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 		if (!(cdev->flags & CXGBI_FLAG_USE_PPOD_OFLDQ))
1369*4882a593Smuzhiyun 			cdev->csk_ddp_clear_map(cdev, ppm, ttinfo);
1370*4882a593Smuzhiyun 		cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
1371*4882a593Smuzhiyun 		dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, ttinfo->nents,
1372*4882a593Smuzhiyun 			     DMA_FROM_DEVICE);
1373*4882a593Smuzhiyun 	}
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun 
cxgbi_build_sw_tag(u32 idx,u32 age)1376*4882a593Smuzhiyun static inline u32 cxgbi_build_sw_tag(u32 idx, u32 age)
1377*4882a593Smuzhiyun {
1378*4882a593Smuzhiyun 	/* assume idx and age both are < 0x7FFF (32767) */
1379*4882a593Smuzhiyun 	return (idx << 16) | age;
1380*4882a593Smuzhiyun }
1381*4882a593Smuzhiyun 
task_reserve_itt(struct iscsi_task * task,itt_t * hdr_itt)1382*4882a593Smuzhiyun static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt)
1383*4882a593Smuzhiyun {
1384*4882a593Smuzhiyun 	struct scsi_cmnd *sc = task->sc;
1385*4882a593Smuzhiyun 	struct iscsi_conn *conn = task->conn;
1386*4882a593Smuzhiyun 	struct iscsi_session *sess = conn->session;
1387*4882a593Smuzhiyun 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1388*4882a593Smuzhiyun 	struct cxgbi_conn *cconn = tcp_conn->dd_data;
1389*4882a593Smuzhiyun 	struct cxgbi_device *cdev = cconn->chba->cdev;
1390*4882a593Smuzhiyun 	struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev);
1391*4882a593Smuzhiyun 	u32 sw_tag = cxgbi_build_sw_tag(task->itt, sess->age);
1392*4882a593Smuzhiyun 	u32 tag = 0;
1393*4882a593Smuzhiyun 	int err = -EINVAL;
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun 	if (sc && sc->sc_data_direction == DMA_FROM_DEVICE) {
1396*4882a593Smuzhiyun 		struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
1397*4882a593Smuzhiyun 		struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo;
1398*4882a593Smuzhiyun 
1399*4882a593Smuzhiyun 		scmd_get_params(sc, &ttinfo->sgl, &ttinfo->nents,
1400*4882a593Smuzhiyun 				&tdata->dlen, 0);
1401*4882a593Smuzhiyun 		err = cxgbi_ddp_reserve(cconn, tdata, sw_tag, tdata->dlen);
1402*4882a593Smuzhiyun 		if (!err)
1403*4882a593Smuzhiyun 			tag = ttinfo->tag;
1404*4882a593Smuzhiyun 		else
1405*4882a593Smuzhiyun 			 log_debug(1 << CXGBI_DBG_DDP,
1406*4882a593Smuzhiyun 				   "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n",
1407*4882a593Smuzhiyun 				   cconn->cep->csk, task, tdata->dlen,
1408*4882a593Smuzhiyun 				   ttinfo->nents);
1409*4882a593Smuzhiyun 	}
1410*4882a593Smuzhiyun 
1411*4882a593Smuzhiyun 	if (err < 0) {
1412*4882a593Smuzhiyun 		err = cxgbi_ppm_make_non_ddp_tag(ppm, sw_tag, &tag);
1413*4882a593Smuzhiyun 		if (err < 0)
1414*4882a593Smuzhiyun 			return err;
1415*4882a593Smuzhiyun 	}
1416*4882a593Smuzhiyun 	/*  the itt need to sent in big-endian order */
1417*4882a593Smuzhiyun 	*hdr_itt = (__force itt_t)htonl(tag);
1418*4882a593Smuzhiyun 
1419*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_DDP,
1420*4882a593Smuzhiyun 		  "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n",
1421*4882a593Smuzhiyun 		  cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt);
1422*4882a593Smuzhiyun 	return 0;
1423*4882a593Smuzhiyun }
1424*4882a593Smuzhiyun 
cxgbi_parse_pdu_itt(struct iscsi_conn * conn,itt_t itt,int * idx,int * age)1425*4882a593Smuzhiyun void cxgbi_parse_pdu_itt(struct iscsi_conn *conn, itt_t itt, int *idx, int *age)
1426*4882a593Smuzhiyun {
1427*4882a593Smuzhiyun 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1428*4882a593Smuzhiyun 	struct cxgbi_conn *cconn = tcp_conn->dd_data;
1429*4882a593Smuzhiyun 	struct cxgbi_device *cdev = cconn->chba->cdev;
1430*4882a593Smuzhiyun 	struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev);
1431*4882a593Smuzhiyun 	u32 tag = ntohl((__force u32)itt);
1432*4882a593Smuzhiyun 	u32 sw_bits;
1433*4882a593Smuzhiyun 
1434*4882a593Smuzhiyun 	if (ppm) {
1435*4882a593Smuzhiyun 		if (cxgbi_ppm_is_ddp_tag(ppm, tag))
1436*4882a593Smuzhiyun 			sw_bits = cxgbi_ppm_get_tag_caller_data(ppm, tag);
1437*4882a593Smuzhiyun 		else
1438*4882a593Smuzhiyun 			sw_bits = cxgbi_ppm_decode_non_ddp_tag(ppm, tag);
1439*4882a593Smuzhiyun 	} else {
1440*4882a593Smuzhiyun 		sw_bits = tag;
1441*4882a593Smuzhiyun 	}
1442*4882a593Smuzhiyun 
1443*4882a593Smuzhiyun 	cxgbi_decode_sw_tag(sw_bits, idx, age);
1444*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_DDP,
1445*4882a593Smuzhiyun 		  "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n",
1446*4882a593Smuzhiyun 		  cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF,
1447*4882a593Smuzhiyun 		  age ? *age : 0xFF);
1448*4882a593Smuzhiyun }
1449*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt);
1450*4882a593Smuzhiyun 
cxgbi_conn_tx_open(struct cxgbi_sock * csk)1451*4882a593Smuzhiyun void cxgbi_conn_tx_open(struct cxgbi_sock *csk)
1452*4882a593Smuzhiyun {
1453*4882a593Smuzhiyun 	struct iscsi_conn *conn = csk->user_data;
1454*4882a593Smuzhiyun 
1455*4882a593Smuzhiyun 	if (conn) {
1456*4882a593Smuzhiyun 		log_debug(1 << CXGBI_DBG_SOCK,
1457*4882a593Smuzhiyun 			"csk 0x%p, cid %d.\n", csk, conn->id);
1458*4882a593Smuzhiyun 		iscsi_conn_queue_work(conn);
1459*4882a593Smuzhiyun 	}
1460*4882a593Smuzhiyun }
1461*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open);
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun /*
1464*4882a593Smuzhiyun  * pdu receive, interact with libiscsi_tcp
1465*4882a593Smuzhiyun  */
read_pdu_skb(struct iscsi_conn * conn,struct sk_buff * skb,unsigned int offset,int offloaded)1466*4882a593Smuzhiyun static inline int read_pdu_skb(struct iscsi_conn *conn,
1467*4882a593Smuzhiyun 			       struct sk_buff *skb,
1468*4882a593Smuzhiyun 			       unsigned int offset,
1469*4882a593Smuzhiyun 			       int offloaded)
1470*4882a593Smuzhiyun {
1471*4882a593Smuzhiyun 	int status = 0;
1472*4882a593Smuzhiyun 	int bytes_read;
1473*4882a593Smuzhiyun 
1474*4882a593Smuzhiyun 	bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status);
1475*4882a593Smuzhiyun 	switch (status) {
1476*4882a593Smuzhiyun 	case ISCSI_TCP_CONN_ERR:
1477*4882a593Smuzhiyun 		pr_info("skb 0x%p, off %u, %d, TCP_ERR.\n",
1478*4882a593Smuzhiyun 			  skb, offset, offloaded);
1479*4882a593Smuzhiyun 		return -EIO;
1480*4882a593Smuzhiyun 	case ISCSI_TCP_SUSPENDED:
1481*4882a593Smuzhiyun 		log_debug(1 << CXGBI_DBG_PDU_RX,
1482*4882a593Smuzhiyun 			"skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n",
1483*4882a593Smuzhiyun 			skb, offset, offloaded, bytes_read);
1484*4882a593Smuzhiyun 		/* no transfer - just have caller flush queue */
1485*4882a593Smuzhiyun 		return bytes_read;
1486*4882a593Smuzhiyun 	case ISCSI_TCP_SKB_DONE:
1487*4882a593Smuzhiyun 		pr_info("skb 0x%p, off %u, %d, TCP_SKB_DONE.\n",
1488*4882a593Smuzhiyun 			skb, offset, offloaded);
1489*4882a593Smuzhiyun 		/*
1490*4882a593Smuzhiyun 		 * pdus should always fit in the skb and we should get
1491*4882a593Smuzhiyun 		 * segment done notifcation.
1492*4882a593Smuzhiyun 		 */
1493*4882a593Smuzhiyun 		iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb.");
1494*4882a593Smuzhiyun 		return -EFAULT;
1495*4882a593Smuzhiyun 	case ISCSI_TCP_SEGMENT_DONE:
1496*4882a593Smuzhiyun 		log_debug(1 << CXGBI_DBG_PDU_RX,
1497*4882a593Smuzhiyun 			"skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n",
1498*4882a593Smuzhiyun 			skb, offset, offloaded, bytes_read);
1499*4882a593Smuzhiyun 		return bytes_read;
1500*4882a593Smuzhiyun 	default:
1501*4882a593Smuzhiyun 		pr_info("skb 0x%p, off %u, %d, invalid status %d.\n",
1502*4882a593Smuzhiyun 			skb, offset, offloaded, status);
1503*4882a593Smuzhiyun 		return -EINVAL;
1504*4882a593Smuzhiyun 	}
1505*4882a593Smuzhiyun }
1506*4882a593Smuzhiyun 
1507*4882a593Smuzhiyun static int
skb_read_pdu_bhs(struct cxgbi_sock * csk,struct iscsi_conn * conn,struct sk_buff * skb)1508*4882a593Smuzhiyun skb_read_pdu_bhs(struct cxgbi_sock *csk, struct iscsi_conn *conn,
1509*4882a593Smuzhiyun 		 struct sk_buff *skb)
1510*4882a593Smuzhiyun {
1511*4882a593Smuzhiyun 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1512*4882a593Smuzhiyun 	int err;
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_PDU_RX,
1515*4882a593Smuzhiyun 		"conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
1516*4882a593Smuzhiyun 		conn, skb, skb->len, cxgbi_skcb_flags(skb));
1517*4882a593Smuzhiyun 
1518*4882a593Smuzhiyun 	if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) {
1519*4882a593Smuzhiyun 		pr_info("conn 0x%p, skb 0x%p, not hdr.\n", conn, skb);
1520*4882a593Smuzhiyun 		iscsi_conn_failure(conn, ISCSI_ERR_PROTO);
1521*4882a593Smuzhiyun 		return -EIO;
1522*4882a593Smuzhiyun 	}
1523*4882a593Smuzhiyun 
1524*4882a593Smuzhiyun 	if (conn->hdrdgst_en &&
1525*4882a593Smuzhiyun 	    cxgbi_skcb_test_flag(skb, SKCBF_RX_HCRC_ERR)) {
1526*4882a593Smuzhiyun 		pr_info("conn 0x%p, skb 0x%p, hcrc.\n", conn, skb);
1527*4882a593Smuzhiyun 		iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST);
1528*4882a593Smuzhiyun 		return -EIO;
1529*4882a593Smuzhiyun 	}
1530*4882a593Smuzhiyun 
1531*4882a593Smuzhiyun 	if (cxgbi_skcb_test_flag(skb, SKCBF_RX_ISCSI_COMPL) &&
1532*4882a593Smuzhiyun 	    cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA_DDPD)) {
1533*4882a593Smuzhiyun 		/* If completion flag is set and data is directly
1534*4882a593Smuzhiyun 		 * placed in to the host memory then update
1535*4882a593Smuzhiyun 		 * task->exp_datasn to the datasn in completion
1536*4882a593Smuzhiyun 		 * iSCSI hdr as T6 adapter generates completion only
1537*4882a593Smuzhiyun 		 * for the last pdu of a sequence.
1538*4882a593Smuzhiyun 		 */
1539*4882a593Smuzhiyun 		itt_t itt = ((struct iscsi_data *)skb->data)->itt;
1540*4882a593Smuzhiyun 		struct iscsi_task *task = iscsi_itt_to_ctask(conn, itt);
1541*4882a593Smuzhiyun 		u32 data_sn = be32_to_cpu(((struct iscsi_data *)
1542*4882a593Smuzhiyun 							skb->data)->datasn);
1543*4882a593Smuzhiyun 		if (task && task->sc) {
1544*4882a593Smuzhiyun 			struct iscsi_tcp_task *tcp_task = task->dd_data;
1545*4882a593Smuzhiyun 
1546*4882a593Smuzhiyun 			tcp_task->exp_datasn = data_sn;
1547*4882a593Smuzhiyun 		}
1548*4882a593Smuzhiyun 	}
1549*4882a593Smuzhiyun 
1550*4882a593Smuzhiyun 	err = read_pdu_skb(conn, skb, 0, 0);
1551*4882a593Smuzhiyun 	if (likely(err >= 0)) {
1552*4882a593Smuzhiyun 		struct iscsi_hdr *hdr = (struct iscsi_hdr *)skb->data;
1553*4882a593Smuzhiyun 		u8 opcode = hdr->opcode & ISCSI_OPCODE_MASK;
1554*4882a593Smuzhiyun 
1555*4882a593Smuzhiyun 		if (unlikely(opcode == ISCSI_OP_LOGOUT_RSP))
1556*4882a593Smuzhiyun 			cxgbi_sock_set_flag(csk, CTPF_LOGOUT_RSP_RCVD);
1557*4882a593Smuzhiyun 	}
1558*4882a593Smuzhiyun 
1559*4882a593Smuzhiyun 	return err;
1560*4882a593Smuzhiyun }
1561*4882a593Smuzhiyun 
skb_read_pdu_data(struct iscsi_conn * conn,struct sk_buff * lskb,struct sk_buff * skb,unsigned int offset)1562*4882a593Smuzhiyun static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb,
1563*4882a593Smuzhiyun 			     struct sk_buff *skb, unsigned int offset)
1564*4882a593Smuzhiyun {
1565*4882a593Smuzhiyun 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1566*4882a593Smuzhiyun 	bool offloaded = 0;
1567*4882a593Smuzhiyun 	int opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK;
1568*4882a593Smuzhiyun 
1569*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_PDU_RX,
1570*4882a593Smuzhiyun 		"conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
1571*4882a593Smuzhiyun 		conn, skb, skb->len, cxgbi_skcb_flags(skb));
1572*4882a593Smuzhiyun 
1573*4882a593Smuzhiyun 	if (conn->datadgst_en &&
1574*4882a593Smuzhiyun 	    cxgbi_skcb_test_flag(lskb, SKCBF_RX_DCRC_ERR)) {
1575*4882a593Smuzhiyun 		pr_info("conn 0x%p, skb 0x%p, dcrc 0x%lx.\n",
1576*4882a593Smuzhiyun 			conn, lskb, cxgbi_skcb_flags(lskb));
1577*4882a593Smuzhiyun 		iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
1578*4882a593Smuzhiyun 		return -EIO;
1579*4882a593Smuzhiyun 	}
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun 	if (iscsi_tcp_recv_segment_is_hdr(tcp_conn))
1582*4882a593Smuzhiyun 		return 0;
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun 	/* coalesced, add header digest length */
1585*4882a593Smuzhiyun 	if (lskb == skb && conn->hdrdgst_en)
1586*4882a593Smuzhiyun 		offset += ISCSI_DIGEST_SIZE;
1587*4882a593Smuzhiyun 
1588*4882a593Smuzhiyun 	if (cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA_DDPD))
1589*4882a593Smuzhiyun 		offloaded = 1;
1590*4882a593Smuzhiyun 
1591*4882a593Smuzhiyun 	if (opcode == ISCSI_OP_SCSI_DATA_IN)
1592*4882a593Smuzhiyun 		log_debug(1 << CXGBI_DBG_PDU_RX,
1593*4882a593Smuzhiyun 			"skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n",
1594*4882a593Smuzhiyun 			skb, opcode, ntohl(tcp_conn->in.hdr->itt),
1595*4882a593Smuzhiyun 			tcp_conn->in.datalen, offloaded ? "is" : "not");
1596*4882a593Smuzhiyun 
1597*4882a593Smuzhiyun 	return read_pdu_skb(conn, skb, offset, offloaded);
1598*4882a593Smuzhiyun }
1599*4882a593Smuzhiyun 
csk_return_rx_credits(struct cxgbi_sock * csk,int copied)1600*4882a593Smuzhiyun static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied)
1601*4882a593Smuzhiyun {
1602*4882a593Smuzhiyun 	struct cxgbi_device *cdev = csk->cdev;
1603*4882a593Smuzhiyun 	int must_send;
1604*4882a593Smuzhiyun 	u32 credits;
1605*4882a593Smuzhiyun 
1606*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_PDU_RX,
1607*4882a593Smuzhiyun 		"csk 0x%p,%u,0x%lx,%u, seq %u, wup %u, thre %u, %u.\n",
1608*4882a593Smuzhiyun 		csk, csk->state, csk->flags, csk->tid, csk->copied_seq,
1609*4882a593Smuzhiyun 		csk->rcv_wup, cdev->rx_credit_thres,
1610*4882a593Smuzhiyun 		csk->rcv_win);
1611*4882a593Smuzhiyun 
1612*4882a593Smuzhiyun 	if (!cdev->rx_credit_thres)
1613*4882a593Smuzhiyun 		return;
1614*4882a593Smuzhiyun 
1615*4882a593Smuzhiyun 	if (csk->state != CTP_ESTABLISHED)
1616*4882a593Smuzhiyun 		return;
1617*4882a593Smuzhiyun 
1618*4882a593Smuzhiyun 	credits = csk->copied_seq - csk->rcv_wup;
1619*4882a593Smuzhiyun 	if (unlikely(!credits))
1620*4882a593Smuzhiyun 		return;
1621*4882a593Smuzhiyun 	must_send = credits + 16384 >= csk->rcv_win;
1622*4882a593Smuzhiyun 	if (must_send || credits >= cdev->rx_credit_thres)
1623*4882a593Smuzhiyun 		csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits);
1624*4882a593Smuzhiyun }
1625*4882a593Smuzhiyun 
cxgbi_conn_pdu_ready(struct cxgbi_sock * csk)1626*4882a593Smuzhiyun void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
1627*4882a593Smuzhiyun {
1628*4882a593Smuzhiyun 	struct cxgbi_device *cdev = csk->cdev;
1629*4882a593Smuzhiyun 	struct iscsi_conn *conn = csk->user_data;
1630*4882a593Smuzhiyun 	struct sk_buff *skb;
1631*4882a593Smuzhiyun 	unsigned int read = 0;
1632*4882a593Smuzhiyun 	int err = 0;
1633*4882a593Smuzhiyun 
1634*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_PDU_RX,
1635*4882a593Smuzhiyun 		"csk 0x%p, conn 0x%p.\n", csk, conn);
1636*4882a593Smuzhiyun 
1637*4882a593Smuzhiyun 	if (unlikely(!conn || conn->suspend_rx)) {
1638*4882a593Smuzhiyun 		log_debug(1 << CXGBI_DBG_PDU_RX,
1639*4882a593Smuzhiyun 			"csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
1640*4882a593Smuzhiyun 			csk, conn, conn ? conn->id : 0xFF,
1641*4882a593Smuzhiyun 			conn ? conn->suspend_rx : 0xFF);
1642*4882a593Smuzhiyun 		return;
1643*4882a593Smuzhiyun 	}
1644*4882a593Smuzhiyun 
1645*4882a593Smuzhiyun 	while (!err) {
1646*4882a593Smuzhiyun 		skb = skb_peek(&csk->receive_queue);
1647*4882a593Smuzhiyun 		if (!skb ||
1648*4882a593Smuzhiyun 		    !(cxgbi_skcb_test_flag(skb, SKCBF_RX_STATUS))) {
1649*4882a593Smuzhiyun 			if (skb)
1650*4882a593Smuzhiyun 				log_debug(1 << CXGBI_DBG_PDU_RX,
1651*4882a593Smuzhiyun 					"skb 0x%p, NOT ready 0x%lx.\n",
1652*4882a593Smuzhiyun 					skb, cxgbi_skcb_flags(skb));
1653*4882a593Smuzhiyun 			break;
1654*4882a593Smuzhiyun 		}
1655*4882a593Smuzhiyun 		__skb_unlink(skb, &csk->receive_queue);
1656*4882a593Smuzhiyun 
1657*4882a593Smuzhiyun 		read += cxgbi_skcb_rx_pdulen(skb);
1658*4882a593Smuzhiyun 		log_debug(1 << CXGBI_DBG_PDU_RX,
1659*4882a593Smuzhiyun 			"csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n",
1660*4882a593Smuzhiyun 			csk, skb, skb->len, cxgbi_skcb_flags(skb),
1661*4882a593Smuzhiyun 			cxgbi_skcb_rx_pdulen(skb));
1662*4882a593Smuzhiyun 
1663*4882a593Smuzhiyun 		if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) {
1664*4882a593Smuzhiyun 			err = skb_read_pdu_bhs(csk, conn, skb);
1665*4882a593Smuzhiyun 			if (err < 0) {
1666*4882a593Smuzhiyun 				pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, "
1667*4882a593Smuzhiyun 					"f 0x%lx, plen %u.\n",
1668*4882a593Smuzhiyun 					csk, skb, skb->len,
1669*4882a593Smuzhiyun 					cxgbi_skcb_flags(skb),
1670*4882a593Smuzhiyun 					cxgbi_skcb_rx_pdulen(skb));
1671*4882a593Smuzhiyun 				goto skb_done;
1672*4882a593Smuzhiyun 			}
1673*4882a593Smuzhiyun 			err = skb_read_pdu_data(conn, skb, skb,
1674*4882a593Smuzhiyun 						err + cdev->skb_rx_extra);
1675*4882a593Smuzhiyun 			if (err < 0)
1676*4882a593Smuzhiyun 				pr_err("coalesced data, csk 0x%p, skb 0x%p,%u, "
1677*4882a593Smuzhiyun 					"f 0x%lx, plen %u.\n",
1678*4882a593Smuzhiyun 					csk, skb, skb->len,
1679*4882a593Smuzhiyun 					cxgbi_skcb_flags(skb),
1680*4882a593Smuzhiyun 					cxgbi_skcb_rx_pdulen(skb));
1681*4882a593Smuzhiyun 		} else {
1682*4882a593Smuzhiyun 			err = skb_read_pdu_bhs(csk, conn, skb);
1683*4882a593Smuzhiyun 			if (err < 0) {
1684*4882a593Smuzhiyun 				pr_err("bhs, csk 0x%p, skb 0x%p,%u, "
1685*4882a593Smuzhiyun 					"f 0x%lx, plen %u.\n",
1686*4882a593Smuzhiyun 					csk, skb, skb->len,
1687*4882a593Smuzhiyun 					cxgbi_skcb_flags(skb),
1688*4882a593Smuzhiyun 					cxgbi_skcb_rx_pdulen(skb));
1689*4882a593Smuzhiyun 				goto skb_done;
1690*4882a593Smuzhiyun 			}
1691*4882a593Smuzhiyun 
1692*4882a593Smuzhiyun 			if (cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) {
1693*4882a593Smuzhiyun 				struct sk_buff *dskb;
1694*4882a593Smuzhiyun 
1695*4882a593Smuzhiyun 				dskb = skb_peek(&csk->receive_queue);
1696*4882a593Smuzhiyun 				if (!dskb) {
1697*4882a593Smuzhiyun 					pr_err("csk 0x%p, skb 0x%p,%u, f 0x%lx,"
1698*4882a593Smuzhiyun 						" plen %u, NO data.\n",
1699*4882a593Smuzhiyun 						csk, skb, skb->len,
1700*4882a593Smuzhiyun 						cxgbi_skcb_flags(skb),
1701*4882a593Smuzhiyun 						cxgbi_skcb_rx_pdulen(skb));
1702*4882a593Smuzhiyun 					err = -EIO;
1703*4882a593Smuzhiyun 					goto skb_done;
1704*4882a593Smuzhiyun 				}
1705*4882a593Smuzhiyun 				__skb_unlink(dskb, &csk->receive_queue);
1706*4882a593Smuzhiyun 
1707*4882a593Smuzhiyun 				err = skb_read_pdu_data(conn, skb, dskb, 0);
1708*4882a593Smuzhiyun 				if (err < 0)
1709*4882a593Smuzhiyun 					pr_err("data, csk 0x%p, skb 0x%p,%u, "
1710*4882a593Smuzhiyun 						"f 0x%lx, plen %u, dskb 0x%p,"
1711*4882a593Smuzhiyun 						"%u.\n",
1712*4882a593Smuzhiyun 						csk, skb, skb->len,
1713*4882a593Smuzhiyun 						cxgbi_skcb_flags(skb),
1714*4882a593Smuzhiyun 						cxgbi_skcb_rx_pdulen(skb),
1715*4882a593Smuzhiyun 						dskb, dskb->len);
1716*4882a593Smuzhiyun 				__kfree_skb(dskb);
1717*4882a593Smuzhiyun 			} else
1718*4882a593Smuzhiyun 				err = skb_read_pdu_data(conn, skb, skb, 0);
1719*4882a593Smuzhiyun 		}
1720*4882a593Smuzhiyun skb_done:
1721*4882a593Smuzhiyun 		__kfree_skb(skb);
1722*4882a593Smuzhiyun 
1723*4882a593Smuzhiyun 		if (err < 0)
1724*4882a593Smuzhiyun 			break;
1725*4882a593Smuzhiyun 	}
1726*4882a593Smuzhiyun 
1727*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, read %u.\n", csk, read);
1728*4882a593Smuzhiyun 	if (read) {
1729*4882a593Smuzhiyun 		csk->copied_seq += read;
1730*4882a593Smuzhiyun 		csk_return_rx_credits(csk, read);
1731*4882a593Smuzhiyun 		conn->rxdata_octets += read;
1732*4882a593Smuzhiyun 	}
1733*4882a593Smuzhiyun 
1734*4882a593Smuzhiyun 	if (err < 0) {
1735*4882a593Smuzhiyun 		pr_info("csk 0x%p, 0x%p, rx failed %d, read %u.\n",
1736*4882a593Smuzhiyun 			csk, conn, err, read);
1737*4882a593Smuzhiyun 		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1738*4882a593Smuzhiyun 	}
1739*4882a593Smuzhiyun }
1740*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_conn_pdu_ready);
1741*4882a593Smuzhiyun 
sgl_seek_offset(struct scatterlist * sgl,unsigned int sgcnt,unsigned int offset,unsigned int * off,struct scatterlist ** sgp)1742*4882a593Smuzhiyun static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt,
1743*4882a593Smuzhiyun 				unsigned int offset, unsigned int *off,
1744*4882a593Smuzhiyun 				struct scatterlist **sgp)
1745*4882a593Smuzhiyun {
1746*4882a593Smuzhiyun 	int i;
1747*4882a593Smuzhiyun 	struct scatterlist *sg;
1748*4882a593Smuzhiyun 
1749*4882a593Smuzhiyun 	for_each_sg(sgl, sg, sgcnt, i) {
1750*4882a593Smuzhiyun 		if (offset < sg->length) {
1751*4882a593Smuzhiyun 			*off = offset;
1752*4882a593Smuzhiyun 			*sgp = sg;
1753*4882a593Smuzhiyun 			return 0;
1754*4882a593Smuzhiyun 		}
1755*4882a593Smuzhiyun 		offset -= sg->length;
1756*4882a593Smuzhiyun 	}
1757*4882a593Smuzhiyun 	return -EFAULT;
1758*4882a593Smuzhiyun }
1759*4882a593Smuzhiyun 
1760*4882a593Smuzhiyun static int
sgl_read_to_frags(struct scatterlist * sg,unsigned int sgoffset,unsigned int dlen,struct page_frag * frags,int frag_max,u32 * dlimit)1761*4882a593Smuzhiyun sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
1762*4882a593Smuzhiyun 		  unsigned int dlen, struct page_frag *frags,
1763*4882a593Smuzhiyun 		  int frag_max, u32 *dlimit)
1764*4882a593Smuzhiyun {
1765*4882a593Smuzhiyun 	unsigned int datalen = dlen;
1766*4882a593Smuzhiyun 	unsigned int sglen = sg->length - sgoffset;
1767*4882a593Smuzhiyun 	struct page *page = sg_page(sg);
1768*4882a593Smuzhiyun 	int i;
1769*4882a593Smuzhiyun 
1770*4882a593Smuzhiyun 	i = 0;
1771*4882a593Smuzhiyun 	do {
1772*4882a593Smuzhiyun 		unsigned int copy;
1773*4882a593Smuzhiyun 
1774*4882a593Smuzhiyun 		if (!sglen) {
1775*4882a593Smuzhiyun 			sg = sg_next(sg);
1776*4882a593Smuzhiyun 			if (!sg) {
1777*4882a593Smuzhiyun 				pr_warn("sg %d NULL, len %u/%u.\n",
1778*4882a593Smuzhiyun 					i, datalen, dlen);
1779*4882a593Smuzhiyun 				return -EINVAL;
1780*4882a593Smuzhiyun 			}
1781*4882a593Smuzhiyun 			sgoffset = 0;
1782*4882a593Smuzhiyun 			sglen = sg->length;
1783*4882a593Smuzhiyun 			page = sg_page(sg);
1784*4882a593Smuzhiyun 
1785*4882a593Smuzhiyun 		}
1786*4882a593Smuzhiyun 		copy = min(datalen, sglen);
1787*4882a593Smuzhiyun 		if (i && page == frags[i - 1].page &&
1788*4882a593Smuzhiyun 		    sgoffset + sg->offset ==
1789*4882a593Smuzhiyun 			frags[i - 1].offset + frags[i - 1].size) {
1790*4882a593Smuzhiyun 			frags[i - 1].size += copy;
1791*4882a593Smuzhiyun 		} else {
1792*4882a593Smuzhiyun 			if (i >= frag_max) {
1793*4882a593Smuzhiyun 				pr_warn("too many pages %u, dlen %u.\n",
1794*4882a593Smuzhiyun 					frag_max, dlen);
1795*4882a593Smuzhiyun 				*dlimit = dlen - datalen;
1796*4882a593Smuzhiyun 				return -EINVAL;
1797*4882a593Smuzhiyun 			}
1798*4882a593Smuzhiyun 
1799*4882a593Smuzhiyun 			frags[i].page = page;
1800*4882a593Smuzhiyun 			frags[i].offset = sg->offset + sgoffset;
1801*4882a593Smuzhiyun 			frags[i].size = copy;
1802*4882a593Smuzhiyun 			i++;
1803*4882a593Smuzhiyun 		}
1804*4882a593Smuzhiyun 		datalen -= copy;
1805*4882a593Smuzhiyun 		sgoffset += copy;
1806*4882a593Smuzhiyun 		sglen -= copy;
1807*4882a593Smuzhiyun 	} while (datalen);
1808*4882a593Smuzhiyun 
1809*4882a593Smuzhiyun 	return i;
1810*4882a593Smuzhiyun }
1811*4882a593Smuzhiyun 
cxgbi_task_data_sgl_check(struct iscsi_task * task)1812*4882a593Smuzhiyun static void cxgbi_task_data_sgl_check(struct iscsi_task *task)
1813*4882a593Smuzhiyun {
1814*4882a593Smuzhiyun 	struct scsi_cmnd *sc = task->sc;
1815*4882a593Smuzhiyun 	struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
1816*4882a593Smuzhiyun 	struct scatterlist *sg, *sgl = NULL;
1817*4882a593Smuzhiyun 	u32 sgcnt = 0;
1818*4882a593Smuzhiyun 	int i;
1819*4882a593Smuzhiyun 
1820*4882a593Smuzhiyun 	tdata->flags = CXGBI_TASK_SGL_CHECKED;
1821*4882a593Smuzhiyun 	if (!sc)
1822*4882a593Smuzhiyun 		return;
1823*4882a593Smuzhiyun 
1824*4882a593Smuzhiyun 	scmd_get_params(sc, &sgl, &sgcnt, &tdata->dlen, 0);
1825*4882a593Smuzhiyun 	if (!sgl || !sgcnt) {
1826*4882a593Smuzhiyun 		tdata->flags |= CXGBI_TASK_SGL_COPY;
1827*4882a593Smuzhiyun 		return;
1828*4882a593Smuzhiyun 	}
1829*4882a593Smuzhiyun 
1830*4882a593Smuzhiyun 	for_each_sg(sgl, sg, sgcnt, i) {
1831*4882a593Smuzhiyun 		if (page_count(sg_page(sg)) < 1) {
1832*4882a593Smuzhiyun 			tdata->flags |= CXGBI_TASK_SGL_COPY;
1833*4882a593Smuzhiyun 			return;
1834*4882a593Smuzhiyun 		}
1835*4882a593Smuzhiyun 	}
1836*4882a593Smuzhiyun }
1837*4882a593Smuzhiyun 
1838*4882a593Smuzhiyun static int
cxgbi_task_data_sgl_read(struct iscsi_task * task,u32 offset,u32 count,u32 * dlimit)1839*4882a593Smuzhiyun cxgbi_task_data_sgl_read(struct iscsi_task *task, u32 offset, u32 count,
1840*4882a593Smuzhiyun 			 u32 *dlimit)
1841*4882a593Smuzhiyun {
1842*4882a593Smuzhiyun 	struct scsi_cmnd *sc = task->sc;
1843*4882a593Smuzhiyun 	struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
1844*4882a593Smuzhiyun 	struct scatterlist *sgl = NULL;
1845*4882a593Smuzhiyun 	struct scatterlist *sg;
1846*4882a593Smuzhiyun 	u32 dlen = 0;
1847*4882a593Smuzhiyun 	u32 sgcnt;
1848*4882a593Smuzhiyun 	int err;
1849*4882a593Smuzhiyun 
1850*4882a593Smuzhiyun 	if (!sc)
1851*4882a593Smuzhiyun 		return 0;
1852*4882a593Smuzhiyun 
1853*4882a593Smuzhiyun 	scmd_get_params(sc, &sgl, &sgcnt, &dlen, 0);
1854*4882a593Smuzhiyun 	if (!sgl || !sgcnt)
1855*4882a593Smuzhiyun 		return 0;
1856*4882a593Smuzhiyun 
1857*4882a593Smuzhiyun 	err = sgl_seek_offset(sgl, sgcnt, offset, &tdata->sgoffset, &sg);
1858*4882a593Smuzhiyun 	if (err < 0) {
1859*4882a593Smuzhiyun 		pr_warn("tpdu max, sgl %u, bad offset %u/%u.\n",
1860*4882a593Smuzhiyun 			sgcnt, offset, tdata->dlen);
1861*4882a593Smuzhiyun 		return err;
1862*4882a593Smuzhiyun 	}
1863*4882a593Smuzhiyun 	err = sgl_read_to_frags(sg, tdata->sgoffset, count,
1864*4882a593Smuzhiyun 				tdata->frags, MAX_SKB_FRAGS, dlimit);
1865*4882a593Smuzhiyun 	if (err < 0) {
1866*4882a593Smuzhiyun 		log_debug(1 << CXGBI_DBG_ISCSI,
1867*4882a593Smuzhiyun 			  "sgl max limit, sgl %u, offset %u, %u/%u, dlimit %u.\n",
1868*4882a593Smuzhiyun 			  sgcnt, offset, count, tdata->dlen, *dlimit);
1869*4882a593Smuzhiyun 		return err;
1870*4882a593Smuzhiyun 	}
1871*4882a593Smuzhiyun 	tdata->offset = offset;
1872*4882a593Smuzhiyun 	tdata->count = count;
1873*4882a593Smuzhiyun 	tdata->nr_frags = err;
1874*4882a593Smuzhiyun 	tdata->total_count = count;
1875*4882a593Smuzhiyun 	tdata->total_offset = offset;
1876*4882a593Smuzhiyun 
1877*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
1878*4882a593Smuzhiyun 		  "%s: offset %u, count %u,\n"
1879*4882a593Smuzhiyun 		  "err %u, total_count %u, total_offset %u\n",
1880*4882a593Smuzhiyun 		  __func__, offset, count, err,  tdata->total_count, tdata->total_offset);
1881*4882a593Smuzhiyun 
1882*4882a593Smuzhiyun 	return 0;
1883*4882a593Smuzhiyun }
1884*4882a593Smuzhiyun 
cxgbi_conn_alloc_pdu(struct iscsi_task * task,u8 op)1885*4882a593Smuzhiyun int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 op)
1886*4882a593Smuzhiyun {
1887*4882a593Smuzhiyun 	struct iscsi_conn *conn = task->conn;
1888*4882a593Smuzhiyun 	struct iscsi_session *session = task->conn->session;
1889*4882a593Smuzhiyun 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1890*4882a593Smuzhiyun 	struct cxgbi_conn *cconn = tcp_conn->dd_data;
1891*4882a593Smuzhiyun 	struct cxgbi_device *cdev = cconn->chba->cdev;
1892*4882a593Smuzhiyun 	struct cxgbi_sock *csk = cconn->cep ? cconn->cep->csk : NULL;
1893*4882a593Smuzhiyun 	struct iscsi_tcp_task *tcp_task = task->dd_data;
1894*4882a593Smuzhiyun 	struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
1895*4882a593Smuzhiyun 	struct scsi_cmnd *sc = task->sc;
1896*4882a593Smuzhiyun 	u32 headroom = SKB_TX_ISCSI_PDU_HEADER_MAX;
1897*4882a593Smuzhiyun 	u32 max_txdata_len = conn->max_xmit_dlength;
1898*4882a593Smuzhiyun 	u32 iso_tx_rsvd = 0, local_iso_info = 0;
1899*4882a593Smuzhiyun 	u32 last_tdata_offset, last_tdata_count;
1900*4882a593Smuzhiyun 	int err = 0;
1901*4882a593Smuzhiyun 
1902*4882a593Smuzhiyun 	if (!tcp_task) {
1903*4882a593Smuzhiyun 		pr_err("task 0x%p, tcp_task 0x%p, tdata 0x%p.\n",
1904*4882a593Smuzhiyun 		       task, tcp_task, tdata);
1905*4882a593Smuzhiyun 		return -ENOMEM;
1906*4882a593Smuzhiyun 	}
1907*4882a593Smuzhiyun 	if (!csk) {
1908*4882a593Smuzhiyun 		pr_err("task 0x%p, csk gone.\n", task);
1909*4882a593Smuzhiyun 		return -EPIPE;
1910*4882a593Smuzhiyun 	}
1911*4882a593Smuzhiyun 
1912*4882a593Smuzhiyun 	op &= ISCSI_OPCODE_MASK;
1913*4882a593Smuzhiyun 
1914*4882a593Smuzhiyun 	tcp_task->dd_data = tdata;
1915*4882a593Smuzhiyun 	task->hdr = NULL;
1916*4882a593Smuzhiyun 
1917*4882a593Smuzhiyun 	last_tdata_count = tdata->count;
1918*4882a593Smuzhiyun 	last_tdata_offset = tdata->offset;
1919*4882a593Smuzhiyun 
1920*4882a593Smuzhiyun 	if ((op == ISCSI_OP_SCSI_DATA_OUT) ||
1921*4882a593Smuzhiyun 	    ((op == ISCSI_OP_SCSI_CMD) &&
1922*4882a593Smuzhiyun 	     (sc->sc_data_direction == DMA_TO_DEVICE))) {
1923*4882a593Smuzhiyun 		u32 remaining_data_tosend, dlimit = 0;
1924*4882a593Smuzhiyun 		u32 max_pdu_size, max_num_pdu, num_pdu;
1925*4882a593Smuzhiyun 		u32 count;
1926*4882a593Smuzhiyun 
1927*4882a593Smuzhiyun 		/* Preserve conn->max_xmit_dlength because it can get updated to
1928*4882a593Smuzhiyun 		 * ISO data size.
1929*4882a593Smuzhiyun 		 */
1930*4882a593Smuzhiyun 		if (task->state == ISCSI_TASK_PENDING)
1931*4882a593Smuzhiyun 			tdata->max_xmit_dlength = conn->max_xmit_dlength;
1932*4882a593Smuzhiyun 
1933*4882a593Smuzhiyun 		if (!tdata->offset)
1934*4882a593Smuzhiyun 			cxgbi_task_data_sgl_check(task);
1935*4882a593Smuzhiyun 
1936*4882a593Smuzhiyun 		remaining_data_tosend =
1937*4882a593Smuzhiyun 			tdata->dlen - tdata->offset - tdata->count;
1938*4882a593Smuzhiyun 
1939*4882a593Smuzhiyun recalculate_sgl:
1940*4882a593Smuzhiyun 		max_txdata_len = tdata->max_xmit_dlength;
1941*4882a593Smuzhiyun 		log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
1942*4882a593Smuzhiyun 			  "tdata->dlen %u, remaining to send %u "
1943*4882a593Smuzhiyun 			  "conn->max_xmit_dlength %u, "
1944*4882a593Smuzhiyun 			  "tdata->max_xmit_dlength %u\n",
1945*4882a593Smuzhiyun 			  tdata->dlen, remaining_data_tosend,
1946*4882a593Smuzhiyun 			  conn->max_xmit_dlength, tdata->max_xmit_dlength);
1947*4882a593Smuzhiyun 
1948*4882a593Smuzhiyun 		if (cdev->skb_iso_txhdr && !csk->disable_iso &&
1949*4882a593Smuzhiyun 		    (remaining_data_tosend > tdata->max_xmit_dlength) &&
1950*4882a593Smuzhiyun 		    !(remaining_data_tosend % 4)) {
1951*4882a593Smuzhiyun 			u32 max_iso_data;
1952*4882a593Smuzhiyun 
1953*4882a593Smuzhiyun 			if ((op == ISCSI_OP_SCSI_CMD) &&
1954*4882a593Smuzhiyun 			    session->initial_r2t_en)
1955*4882a593Smuzhiyun 				goto no_iso;
1956*4882a593Smuzhiyun 
1957*4882a593Smuzhiyun 			max_pdu_size = tdata->max_xmit_dlength +
1958*4882a593Smuzhiyun 				       ISCSI_PDU_NONPAYLOAD_LEN;
1959*4882a593Smuzhiyun 			max_iso_data = rounddown(CXGBI_MAX_ISO_DATA_IN_SKB,
1960*4882a593Smuzhiyun 						 csk->advmss);
1961*4882a593Smuzhiyun 			max_num_pdu = max_iso_data / max_pdu_size;
1962*4882a593Smuzhiyun 
1963*4882a593Smuzhiyun 			num_pdu = (remaining_data_tosend +
1964*4882a593Smuzhiyun 				   tdata->max_xmit_dlength - 1) /
1965*4882a593Smuzhiyun 				  tdata->max_xmit_dlength;
1966*4882a593Smuzhiyun 
1967*4882a593Smuzhiyun 			if (num_pdu > max_num_pdu)
1968*4882a593Smuzhiyun 				num_pdu = max_num_pdu;
1969*4882a593Smuzhiyun 
1970*4882a593Smuzhiyun 			conn->max_xmit_dlength = tdata->max_xmit_dlength * num_pdu;
1971*4882a593Smuzhiyun 			max_txdata_len = conn->max_xmit_dlength;
1972*4882a593Smuzhiyun 			iso_tx_rsvd = cdev->skb_iso_txhdr;
1973*4882a593Smuzhiyun 			local_iso_info = sizeof(struct cxgbi_iso_info);
1974*4882a593Smuzhiyun 
1975*4882a593Smuzhiyun 			log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
1976*4882a593Smuzhiyun 				  "max_pdu_size %u, max_num_pdu %u, "
1977*4882a593Smuzhiyun 				  "max_txdata %u, num_pdu %u\n",
1978*4882a593Smuzhiyun 				  max_pdu_size, max_num_pdu,
1979*4882a593Smuzhiyun 				  max_txdata_len, num_pdu);
1980*4882a593Smuzhiyun 		}
1981*4882a593Smuzhiyun no_iso:
1982*4882a593Smuzhiyun 		count  = min_t(u32, max_txdata_len, remaining_data_tosend);
1983*4882a593Smuzhiyun 		err = cxgbi_task_data_sgl_read(task,
1984*4882a593Smuzhiyun 					       tdata->offset + tdata->count,
1985*4882a593Smuzhiyun 					       count, &dlimit);
1986*4882a593Smuzhiyun 		if (unlikely(err < 0)) {
1987*4882a593Smuzhiyun 			log_debug(1 << CXGBI_DBG_ISCSI,
1988*4882a593Smuzhiyun 				  "task 0x%p, tcp_task 0x%p, tdata 0x%p, "
1989*4882a593Smuzhiyun 				  "sgl err %d, count %u, dlimit %u\n",
1990*4882a593Smuzhiyun 				  task, tcp_task, tdata, err, count, dlimit);
1991*4882a593Smuzhiyun 			if (dlimit) {
1992*4882a593Smuzhiyun 				remaining_data_tosend =
1993*4882a593Smuzhiyun 					rounddown(dlimit,
1994*4882a593Smuzhiyun 						  tdata->max_xmit_dlength);
1995*4882a593Smuzhiyun 				if (!remaining_data_tosend)
1996*4882a593Smuzhiyun 					remaining_data_tosend = dlimit;
1997*4882a593Smuzhiyun 
1998*4882a593Smuzhiyun 				dlimit = 0;
1999*4882a593Smuzhiyun 
2000*4882a593Smuzhiyun 				conn->max_xmit_dlength = remaining_data_tosend;
2001*4882a593Smuzhiyun 				goto recalculate_sgl;
2002*4882a593Smuzhiyun 			}
2003*4882a593Smuzhiyun 
2004*4882a593Smuzhiyun 			pr_err("task 0x%p, tcp_task 0x%p, tdata 0x%p, "
2005*4882a593Smuzhiyun 				"sgl err %d\n",
2006*4882a593Smuzhiyun 				task, tcp_task, tdata, err);
2007*4882a593Smuzhiyun 			goto ret_err;
2008*4882a593Smuzhiyun 		}
2009*4882a593Smuzhiyun 
2010*4882a593Smuzhiyun 		if ((tdata->flags & CXGBI_TASK_SGL_COPY) ||
2011*4882a593Smuzhiyun 		    (tdata->nr_frags > MAX_SKB_FRAGS))
2012*4882a593Smuzhiyun 			headroom += conn->max_xmit_dlength;
2013*4882a593Smuzhiyun 	}
2014*4882a593Smuzhiyun 
2015*4882a593Smuzhiyun 	tdata->skb = alloc_skb(local_iso_info + cdev->skb_tx_rsvd +
2016*4882a593Smuzhiyun 			       iso_tx_rsvd + headroom, GFP_ATOMIC);
2017*4882a593Smuzhiyun 	if (!tdata->skb) {
2018*4882a593Smuzhiyun 		tdata->count = last_tdata_count;
2019*4882a593Smuzhiyun 		tdata->offset = last_tdata_offset;
2020*4882a593Smuzhiyun 		err = -ENOMEM;
2021*4882a593Smuzhiyun 		goto ret_err;
2022*4882a593Smuzhiyun 	}
2023*4882a593Smuzhiyun 
2024*4882a593Smuzhiyun 	skb_reserve(tdata->skb, local_iso_info + cdev->skb_tx_rsvd +
2025*4882a593Smuzhiyun 		    iso_tx_rsvd);
2026*4882a593Smuzhiyun 
2027*4882a593Smuzhiyun 	if (task->sc) {
2028*4882a593Smuzhiyun 		task->hdr = (struct iscsi_hdr *)tdata->skb->data;
2029*4882a593Smuzhiyun 	} else {
2030*4882a593Smuzhiyun 		task->hdr = kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX, GFP_ATOMIC);
2031*4882a593Smuzhiyun 		if (!task->hdr) {
2032*4882a593Smuzhiyun 			__kfree_skb(tdata->skb);
2033*4882a593Smuzhiyun 			tdata->skb = NULL;
2034*4882a593Smuzhiyun 			return -ENOMEM;
2035*4882a593Smuzhiyun 		}
2036*4882a593Smuzhiyun 	}
2037*4882a593Smuzhiyun 
2038*4882a593Smuzhiyun 	task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX;
2039*4882a593Smuzhiyun 
2040*4882a593Smuzhiyun 	if (iso_tx_rsvd)
2041*4882a593Smuzhiyun 		cxgbi_skcb_set_flag(tdata->skb, SKCBF_TX_ISO);
2042*4882a593Smuzhiyun 
2043*4882a593Smuzhiyun 	/* data_out uses scsi_cmd's itt */
2044*4882a593Smuzhiyun 	if (op != ISCSI_OP_SCSI_DATA_OUT)
2045*4882a593Smuzhiyun 		task_reserve_itt(task, &task->hdr->itt);
2046*4882a593Smuzhiyun 
2047*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2048*4882a593Smuzhiyun 		  "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n",
2049*4882a593Smuzhiyun 		  task, op, tdata->skb, cdev->skb_tx_rsvd, headroom,
2050*4882a593Smuzhiyun 		  conn->max_xmit_dlength, be32_to_cpu(task->hdr->itt));
2051*4882a593Smuzhiyun 
2052*4882a593Smuzhiyun 	return 0;
2053*4882a593Smuzhiyun 
2054*4882a593Smuzhiyun ret_err:
2055*4882a593Smuzhiyun 	conn->max_xmit_dlength = tdata->max_xmit_dlength;
2056*4882a593Smuzhiyun 	return err;
2057*4882a593Smuzhiyun }
2058*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu);
2059*4882a593Smuzhiyun 
2060*4882a593Smuzhiyun static int
cxgbi_prep_iso_info(struct iscsi_task * task,struct sk_buff * skb,u32 count)2061*4882a593Smuzhiyun cxgbi_prep_iso_info(struct iscsi_task *task, struct sk_buff *skb,
2062*4882a593Smuzhiyun 		    u32 count)
2063*4882a593Smuzhiyun {
2064*4882a593Smuzhiyun 	struct cxgbi_iso_info *iso_info = (struct cxgbi_iso_info *)skb->head;
2065*4882a593Smuzhiyun 	struct iscsi_r2t_info *r2t;
2066*4882a593Smuzhiyun 	struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
2067*4882a593Smuzhiyun 	struct iscsi_conn *conn = task->conn;
2068*4882a593Smuzhiyun 	struct iscsi_session *session = conn->session;
2069*4882a593Smuzhiyun 	struct iscsi_tcp_task *tcp_task = task->dd_data;
2070*4882a593Smuzhiyun 	u32 burst_size = 0, r2t_dlength = 0, dlength;
2071*4882a593Smuzhiyun 	u32 max_pdu_len = tdata->max_xmit_dlength;
2072*4882a593Smuzhiyun 	u32 segment_offset = 0;
2073*4882a593Smuzhiyun 	u32 num_pdu;
2074*4882a593Smuzhiyun 
2075*4882a593Smuzhiyun 	if (unlikely(!cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO)))
2076*4882a593Smuzhiyun 		return 0;
2077*4882a593Smuzhiyun 
2078*4882a593Smuzhiyun 	memset(iso_info, 0, sizeof(struct cxgbi_iso_info));
2079*4882a593Smuzhiyun 
2080*4882a593Smuzhiyun 	if (task->hdr->opcode == ISCSI_OP_SCSI_CMD && session->imm_data_en) {
2081*4882a593Smuzhiyun 		iso_info->flags |= CXGBI_ISO_INFO_IMM_ENABLE;
2082*4882a593Smuzhiyun 		burst_size = count;
2083*4882a593Smuzhiyun 	}
2084*4882a593Smuzhiyun 
2085*4882a593Smuzhiyun 	dlength = ntoh24(task->hdr->dlength);
2086*4882a593Smuzhiyun 	dlength = min(dlength, max_pdu_len);
2087*4882a593Smuzhiyun 	hton24(task->hdr->dlength, dlength);
2088*4882a593Smuzhiyun 
2089*4882a593Smuzhiyun 	num_pdu = (count + max_pdu_len - 1) / max_pdu_len;
2090*4882a593Smuzhiyun 
2091*4882a593Smuzhiyun 	if (iscsi_task_has_unsol_data(task))
2092*4882a593Smuzhiyun 		r2t = &task->unsol_r2t;
2093*4882a593Smuzhiyun 	else
2094*4882a593Smuzhiyun 		r2t = tcp_task->r2t;
2095*4882a593Smuzhiyun 
2096*4882a593Smuzhiyun 	if (r2t) {
2097*4882a593Smuzhiyun 		log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2098*4882a593Smuzhiyun 			  "count %u, tdata->count %u, num_pdu %u,"
2099*4882a593Smuzhiyun 			  "task->hdr_len %u, r2t->data_length %u, r2t->sent %u\n",
2100*4882a593Smuzhiyun 			  count, tdata->count, num_pdu, task->hdr_len,
2101*4882a593Smuzhiyun 			  r2t->data_length, r2t->sent);
2102*4882a593Smuzhiyun 
2103*4882a593Smuzhiyun 		r2t_dlength = r2t->data_length - r2t->sent;
2104*4882a593Smuzhiyun 		segment_offset = r2t->sent;
2105*4882a593Smuzhiyun 		r2t->datasn += num_pdu - 1;
2106*4882a593Smuzhiyun 	}
2107*4882a593Smuzhiyun 
2108*4882a593Smuzhiyun 	if (!r2t || !r2t->sent)
2109*4882a593Smuzhiyun 		iso_info->flags |= CXGBI_ISO_INFO_FSLICE;
2110*4882a593Smuzhiyun 
2111*4882a593Smuzhiyun 	if (task->hdr->flags & ISCSI_FLAG_CMD_FINAL)
2112*4882a593Smuzhiyun 		iso_info->flags |= CXGBI_ISO_INFO_LSLICE;
2113*4882a593Smuzhiyun 
2114*4882a593Smuzhiyun 	task->hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
2115*4882a593Smuzhiyun 
2116*4882a593Smuzhiyun 	iso_info->op = task->hdr->opcode;
2117*4882a593Smuzhiyun 	iso_info->ahs = task->hdr->hlength;
2118*4882a593Smuzhiyun 	iso_info->num_pdu = num_pdu;
2119*4882a593Smuzhiyun 	iso_info->mpdu = max_pdu_len;
2120*4882a593Smuzhiyun 	iso_info->burst_size = (burst_size + r2t_dlength) >> 2;
2121*4882a593Smuzhiyun 	iso_info->len = count + task->hdr_len;
2122*4882a593Smuzhiyun 	iso_info->segment_offset = segment_offset;
2123*4882a593Smuzhiyun 
2124*4882a593Smuzhiyun 	cxgbi_skcb_tx_iscsi_hdrlen(skb) = task->hdr_len;
2125*4882a593Smuzhiyun 	return 0;
2126*4882a593Smuzhiyun }
2127*4882a593Smuzhiyun 
tx_skb_setmode(struct sk_buff * skb,int hcrc,int dcrc)2128*4882a593Smuzhiyun static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
2129*4882a593Smuzhiyun {
2130*4882a593Smuzhiyun 	if (hcrc || dcrc) {
2131*4882a593Smuzhiyun 		u8 submode = 0;
2132*4882a593Smuzhiyun 
2133*4882a593Smuzhiyun 		if (hcrc)
2134*4882a593Smuzhiyun 			submode |= 1;
2135*4882a593Smuzhiyun 		if (dcrc)
2136*4882a593Smuzhiyun 			submode |= 2;
2137*4882a593Smuzhiyun 		cxgbi_skcb_tx_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode;
2138*4882a593Smuzhiyun 	} else
2139*4882a593Smuzhiyun 		cxgbi_skcb_tx_ulp_mode(skb) = 0;
2140*4882a593Smuzhiyun }
2141*4882a593Smuzhiyun 
2142*4882a593Smuzhiyun static struct page *rsvd_page;
2143*4882a593Smuzhiyun 
cxgbi_conn_init_pdu(struct iscsi_task * task,unsigned int offset,unsigned int count)2144*4882a593Smuzhiyun int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
2145*4882a593Smuzhiyun 			      unsigned int count)
2146*4882a593Smuzhiyun {
2147*4882a593Smuzhiyun 	struct iscsi_conn *conn = task->conn;
2148*4882a593Smuzhiyun 	struct iscsi_tcp_task *tcp_task = task->dd_data;
2149*4882a593Smuzhiyun 	struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
2150*4882a593Smuzhiyun 	struct sk_buff *skb;
2151*4882a593Smuzhiyun 	struct scsi_cmnd *sc = task->sc;
2152*4882a593Smuzhiyun 	u32 expected_count, expected_offset;
2153*4882a593Smuzhiyun 	u32 datalen = count, dlimit = 0;
2154*4882a593Smuzhiyun 	u32 i, padlen = iscsi_padding(count);
2155*4882a593Smuzhiyun 	struct page *pg;
2156*4882a593Smuzhiyun 	int err;
2157*4882a593Smuzhiyun 
2158*4882a593Smuzhiyun 	if (!tcp_task || (tcp_task->dd_data != tdata)) {
2159*4882a593Smuzhiyun 		pr_err("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n",
2160*4882a593Smuzhiyun 		       task, task->sc, tcp_task,
2161*4882a593Smuzhiyun 		       tcp_task ? tcp_task->dd_data : NULL, tdata);
2162*4882a593Smuzhiyun 		return -EINVAL;
2163*4882a593Smuzhiyun 	}
2164*4882a593Smuzhiyun 	skb = tdata->skb;
2165*4882a593Smuzhiyun 
2166*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2167*4882a593Smuzhiyun 		  "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n",
2168*4882a593Smuzhiyun 		  task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK,
2169*4882a593Smuzhiyun 		  be32_to_cpu(task->cmdsn), be32_to_cpu(task->hdr->itt), offset, count);
2170*4882a593Smuzhiyun 
2171*4882a593Smuzhiyun 	skb_put(skb, task->hdr_len);
2172*4882a593Smuzhiyun 	tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0);
2173*4882a593Smuzhiyun 	if (!count) {
2174*4882a593Smuzhiyun 		tdata->count = count;
2175*4882a593Smuzhiyun 		tdata->offset = offset;
2176*4882a593Smuzhiyun 		tdata->nr_frags = 0;
2177*4882a593Smuzhiyun 		tdata->total_offset = 0;
2178*4882a593Smuzhiyun 		tdata->total_count = 0;
2179*4882a593Smuzhiyun 		if (tdata->max_xmit_dlength)
2180*4882a593Smuzhiyun 			conn->max_xmit_dlength = tdata->max_xmit_dlength;
2181*4882a593Smuzhiyun 		cxgbi_skcb_clear_flag(skb, SKCBF_TX_ISO);
2182*4882a593Smuzhiyun 		return 0;
2183*4882a593Smuzhiyun 	}
2184*4882a593Smuzhiyun 
2185*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2186*4882a593Smuzhiyun 		  "data->total_count %u, tdata->total_offset %u\n",
2187*4882a593Smuzhiyun 		  tdata->total_count, tdata->total_offset);
2188*4882a593Smuzhiyun 
2189*4882a593Smuzhiyun 	expected_count = tdata->total_count;
2190*4882a593Smuzhiyun 	expected_offset = tdata->total_offset;
2191*4882a593Smuzhiyun 
2192*4882a593Smuzhiyun 	if ((count != expected_count) ||
2193*4882a593Smuzhiyun 	    (offset != expected_offset)) {
2194*4882a593Smuzhiyun 		err = cxgbi_task_data_sgl_read(task, offset, count, &dlimit);
2195*4882a593Smuzhiyun 		if (err < 0) {
2196*4882a593Smuzhiyun 			pr_err("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p "
2197*4882a593Smuzhiyun 			       "dlimit %u, sgl err %d.\n", task, task->sc,
2198*4882a593Smuzhiyun 			       tcp_task, tcp_task ? tcp_task->dd_data : NULL,
2199*4882a593Smuzhiyun 			       tdata, dlimit, err);
2200*4882a593Smuzhiyun 			return err;
2201*4882a593Smuzhiyun 		}
2202*4882a593Smuzhiyun 	}
2203*4882a593Smuzhiyun 
2204*4882a593Smuzhiyun 	/* Restore original value of conn->max_xmit_dlength because
2205*4882a593Smuzhiyun 	 * it can get updated to ISO data size.
2206*4882a593Smuzhiyun 	 */
2207*4882a593Smuzhiyun 	conn->max_xmit_dlength = tdata->max_xmit_dlength;
2208*4882a593Smuzhiyun 
2209*4882a593Smuzhiyun 	if (sc) {
2210*4882a593Smuzhiyun 		struct page_frag *frag = tdata->frags;
2211*4882a593Smuzhiyun 
2212*4882a593Smuzhiyun 		if ((tdata->flags & CXGBI_TASK_SGL_COPY) ||
2213*4882a593Smuzhiyun 		    (tdata->nr_frags > MAX_SKB_FRAGS) ||
2214*4882a593Smuzhiyun 		    (padlen && (tdata->nr_frags ==
2215*4882a593Smuzhiyun 					MAX_SKB_FRAGS))) {
2216*4882a593Smuzhiyun 			char *dst = skb->data + task->hdr_len;
2217*4882a593Smuzhiyun 
2218*4882a593Smuzhiyun 			/* data fits in the skb's headroom */
2219*4882a593Smuzhiyun 			for (i = 0; i < tdata->nr_frags; i++, frag++) {
2220*4882a593Smuzhiyun 				char *src = kmap_atomic(frag->page);
2221*4882a593Smuzhiyun 
2222*4882a593Smuzhiyun 				memcpy(dst, src + frag->offset, frag->size);
2223*4882a593Smuzhiyun 				dst += frag->size;
2224*4882a593Smuzhiyun 				kunmap_atomic(src);
2225*4882a593Smuzhiyun 			}
2226*4882a593Smuzhiyun 
2227*4882a593Smuzhiyun 			if (padlen) {
2228*4882a593Smuzhiyun 				memset(dst, 0, padlen);
2229*4882a593Smuzhiyun 				padlen = 0;
2230*4882a593Smuzhiyun 			}
2231*4882a593Smuzhiyun 			skb_put(skb, count + padlen);
2232*4882a593Smuzhiyun 		} else {
2233*4882a593Smuzhiyun 			for (i = 0; i < tdata->nr_frags; i++, frag++) {
2234*4882a593Smuzhiyun 				get_page(frag->page);
2235*4882a593Smuzhiyun 				skb_fill_page_desc(skb, i, frag->page,
2236*4882a593Smuzhiyun 						   frag->offset, frag->size);
2237*4882a593Smuzhiyun 			}
2238*4882a593Smuzhiyun 
2239*4882a593Smuzhiyun 			skb->len += count;
2240*4882a593Smuzhiyun 			skb->data_len += count;
2241*4882a593Smuzhiyun 			skb->truesize += count;
2242*4882a593Smuzhiyun 		}
2243*4882a593Smuzhiyun 	} else {
2244*4882a593Smuzhiyun 		pg = virt_to_head_page(task->data);
2245*4882a593Smuzhiyun 		get_page(pg);
2246*4882a593Smuzhiyun 		skb_fill_page_desc(skb, 0, pg,
2247*4882a593Smuzhiyun 				   task->data - (char *)page_address(pg),
2248*4882a593Smuzhiyun 				   count);
2249*4882a593Smuzhiyun 		skb->len += count;
2250*4882a593Smuzhiyun 		skb->data_len += count;
2251*4882a593Smuzhiyun 		skb->truesize += count;
2252*4882a593Smuzhiyun 	}
2253*4882a593Smuzhiyun 
2254*4882a593Smuzhiyun 	if (padlen) {
2255*4882a593Smuzhiyun 		get_page(rsvd_page);
2256*4882a593Smuzhiyun 		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
2257*4882a593Smuzhiyun 				   rsvd_page, 0, padlen);
2258*4882a593Smuzhiyun 
2259*4882a593Smuzhiyun 		skb->data_len += padlen;
2260*4882a593Smuzhiyun 		skb->truesize += padlen;
2261*4882a593Smuzhiyun 		skb->len += padlen;
2262*4882a593Smuzhiyun 	}
2263*4882a593Smuzhiyun 
2264*4882a593Smuzhiyun 	if (likely(count > tdata->max_xmit_dlength))
2265*4882a593Smuzhiyun 		cxgbi_prep_iso_info(task, skb, count);
2266*4882a593Smuzhiyun 	else
2267*4882a593Smuzhiyun 		cxgbi_skcb_clear_flag(skb, SKCBF_TX_ISO);
2268*4882a593Smuzhiyun 
2269*4882a593Smuzhiyun 	return 0;
2270*4882a593Smuzhiyun }
2271*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu);
2272*4882a593Smuzhiyun 
cxgbi_sock_tx_queue_up(struct cxgbi_sock * csk,struct sk_buff * skb)2273*4882a593Smuzhiyun static int cxgbi_sock_tx_queue_up(struct cxgbi_sock *csk, struct sk_buff *skb)
2274*4882a593Smuzhiyun {
2275*4882a593Smuzhiyun 	struct cxgbi_device *cdev = csk->cdev;
2276*4882a593Smuzhiyun 	struct cxgbi_iso_info *iso_cpl;
2277*4882a593Smuzhiyun 	u32 frags = skb_shinfo(skb)->nr_frags;
2278*4882a593Smuzhiyun 	u32 extra_len, num_pdu, hdr_len;
2279*4882a593Smuzhiyun 	u32 iso_tx_rsvd = 0;
2280*4882a593Smuzhiyun 
2281*4882a593Smuzhiyun 	if (csk->state != CTP_ESTABLISHED) {
2282*4882a593Smuzhiyun 		log_debug(1 << CXGBI_DBG_PDU_TX,
2283*4882a593Smuzhiyun 			  "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n",
2284*4882a593Smuzhiyun 			  csk, csk->state, csk->flags, csk->tid);
2285*4882a593Smuzhiyun 		return -EPIPE;
2286*4882a593Smuzhiyun 	}
2287*4882a593Smuzhiyun 
2288*4882a593Smuzhiyun 	if (csk->err) {
2289*4882a593Smuzhiyun 		log_debug(1 << CXGBI_DBG_PDU_TX,
2290*4882a593Smuzhiyun 			  "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n",
2291*4882a593Smuzhiyun 			  csk, csk->state, csk->flags, csk->tid, csk->err);
2292*4882a593Smuzhiyun 		return -EPIPE;
2293*4882a593Smuzhiyun 	}
2294*4882a593Smuzhiyun 
2295*4882a593Smuzhiyun 	if ((cdev->flags & CXGBI_FLAG_DEV_T3) &&
2296*4882a593Smuzhiyun 	    before((csk->snd_win + csk->snd_una), csk->write_seq)) {
2297*4882a593Smuzhiyun 		log_debug(1 << CXGBI_DBG_PDU_TX,
2298*4882a593Smuzhiyun 			  "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n",
2299*4882a593Smuzhiyun 			  csk, csk->state, csk->flags, csk->tid, csk->write_seq,
2300*4882a593Smuzhiyun 			  csk->snd_una, csk->snd_win);
2301*4882a593Smuzhiyun 		return -ENOBUFS;
2302*4882a593Smuzhiyun 	}
2303*4882a593Smuzhiyun 
2304*4882a593Smuzhiyun 	if (cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO))
2305*4882a593Smuzhiyun 		iso_tx_rsvd = cdev->skb_iso_txhdr;
2306*4882a593Smuzhiyun 
2307*4882a593Smuzhiyun 	if (unlikely(skb_headroom(skb) < (cdev->skb_tx_rsvd + iso_tx_rsvd))) {
2308*4882a593Smuzhiyun 		pr_err("csk 0x%p, skb head %u < %u.\n",
2309*4882a593Smuzhiyun 		       csk, skb_headroom(skb), cdev->skb_tx_rsvd);
2310*4882a593Smuzhiyun 		return -EINVAL;
2311*4882a593Smuzhiyun 	}
2312*4882a593Smuzhiyun 
2313*4882a593Smuzhiyun 	if (skb->len != skb->data_len)
2314*4882a593Smuzhiyun 		frags++;
2315*4882a593Smuzhiyun 
2316*4882a593Smuzhiyun 	if (frags >= SKB_WR_LIST_SIZE) {
2317*4882a593Smuzhiyun 		pr_err("csk 0x%p, frags %u, %u,%u >%lu.\n",
2318*4882a593Smuzhiyun 		       csk, skb_shinfo(skb)->nr_frags, skb->len,
2319*4882a593Smuzhiyun 		       skb->data_len, SKB_WR_LIST_SIZE);
2320*4882a593Smuzhiyun 		return -EINVAL;
2321*4882a593Smuzhiyun 	}
2322*4882a593Smuzhiyun 
2323*4882a593Smuzhiyun 	cxgbi_skcb_set_flag(skb, SKCBF_TX_NEED_HDR);
2324*4882a593Smuzhiyun 	skb_reset_transport_header(skb);
2325*4882a593Smuzhiyun 	cxgbi_sock_skb_entail(csk, skb);
2326*4882a593Smuzhiyun 
2327*4882a593Smuzhiyun 	extra_len = cxgbi_ulp_extra_len(cxgbi_skcb_tx_ulp_mode(skb));
2328*4882a593Smuzhiyun 
2329*4882a593Smuzhiyun 	if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO))) {
2330*4882a593Smuzhiyun 		iso_cpl = (struct cxgbi_iso_info *)skb->head;
2331*4882a593Smuzhiyun 		num_pdu = iso_cpl->num_pdu;
2332*4882a593Smuzhiyun 		hdr_len = cxgbi_skcb_tx_iscsi_hdrlen(skb);
2333*4882a593Smuzhiyun 		extra_len = (cxgbi_ulp_extra_len(cxgbi_skcb_tx_ulp_mode(skb)) *
2334*4882a593Smuzhiyun 			     num_pdu) +	(hdr_len * (num_pdu - 1));
2335*4882a593Smuzhiyun 	}
2336*4882a593Smuzhiyun 
2337*4882a593Smuzhiyun 	csk->write_seq += (skb->len + extra_len);
2338*4882a593Smuzhiyun 
2339*4882a593Smuzhiyun 	return 0;
2340*4882a593Smuzhiyun }
2341*4882a593Smuzhiyun 
cxgbi_sock_send_skb(struct cxgbi_sock * csk,struct sk_buff * skb)2342*4882a593Smuzhiyun static int cxgbi_sock_send_skb(struct cxgbi_sock *csk, struct sk_buff *skb)
2343*4882a593Smuzhiyun {
2344*4882a593Smuzhiyun 	struct cxgbi_device *cdev = csk->cdev;
2345*4882a593Smuzhiyun 	int len = skb->len;
2346*4882a593Smuzhiyun 	int err;
2347*4882a593Smuzhiyun 
2348*4882a593Smuzhiyun 	spin_lock_bh(&csk->lock);
2349*4882a593Smuzhiyun 	err = cxgbi_sock_tx_queue_up(csk, skb);
2350*4882a593Smuzhiyun 	if (err < 0) {
2351*4882a593Smuzhiyun 		spin_unlock_bh(&csk->lock);
2352*4882a593Smuzhiyun 		return err;
2353*4882a593Smuzhiyun 	}
2354*4882a593Smuzhiyun 
2355*4882a593Smuzhiyun 	if (likely(skb_queue_len(&csk->write_queue)))
2356*4882a593Smuzhiyun 		cdev->csk_push_tx_frames(csk, 0);
2357*4882a593Smuzhiyun 	spin_unlock_bh(&csk->lock);
2358*4882a593Smuzhiyun 	return len;
2359*4882a593Smuzhiyun }
2360*4882a593Smuzhiyun 
cxgbi_conn_xmit_pdu(struct iscsi_task * task)2361*4882a593Smuzhiyun int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
2362*4882a593Smuzhiyun {
2363*4882a593Smuzhiyun 	struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
2364*4882a593Smuzhiyun 	struct cxgbi_conn *cconn = tcp_conn->dd_data;
2365*4882a593Smuzhiyun 	struct iscsi_tcp_task *tcp_task = task->dd_data;
2366*4882a593Smuzhiyun 	struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
2367*4882a593Smuzhiyun 	struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo;
2368*4882a593Smuzhiyun 	struct sk_buff *skb;
2369*4882a593Smuzhiyun 	struct cxgbi_sock *csk = NULL;
2370*4882a593Smuzhiyun 	u32 pdulen = 0;
2371*4882a593Smuzhiyun 	u32 datalen;
2372*4882a593Smuzhiyun 	int err;
2373*4882a593Smuzhiyun 
2374*4882a593Smuzhiyun 	if (!tcp_task || (tcp_task->dd_data != tdata)) {
2375*4882a593Smuzhiyun 		pr_err("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n",
2376*4882a593Smuzhiyun 		       task, task->sc, tcp_task,
2377*4882a593Smuzhiyun 		       tcp_task ? tcp_task->dd_data : NULL, tdata);
2378*4882a593Smuzhiyun 		return -EINVAL;
2379*4882a593Smuzhiyun 	}
2380*4882a593Smuzhiyun 
2381*4882a593Smuzhiyun 	skb = tdata->skb;
2382*4882a593Smuzhiyun 	if (!skb) {
2383*4882a593Smuzhiyun 		log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2384*4882a593Smuzhiyun 			  "task 0x%p, skb NULL.\n", task);
2385*4882a593Smuzhiyun 		return 0;
2386*4882a593Smuzhiyun 	}
2387*4882a593Smuzhiyun 
2388*4882a593Smuzhiyun 	if (cconn && cconn->cep)
2389*4882a593Smuzhiyun 		csk = cconn->cep->csk;
2390*4882a593Smuzhiyun 
2391*4882a593Smuzhiyun 	if (!csk) {
2392*4882a593Smuzhiyun 		log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2393*4882a593Smuzhiyun 			  "task 0x%p, csk gone.\n", task);
2394*4882a593Smuzhiyun 		return -EPIPE;
2395*4882a593Smuzhiyun 	}
2396*4882a593Smuzhiyun 
2397*4882a593Smuzhiyun 	tdata->skb = NULL;
2398*4882a593Smuzhiyun 	datalen = skb->data_len;
2399*4882a593Smuzhiyun 
2400*4882a593Smuzhiyun 	/* write ppod first if using ofldq to write ppod */
2401*4882a593Smuzhiyun 	if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_VALID) {
2402*4882a593Smuzhiyun 		struct cxgbi_ppm *ppm = csk->cdev->cdev2ppm(csk->cdev);
2403*4882a593Smuzhiyun 
2404*4882a593Smuzhiyun 		ttinfo->flags &= ~CXGBI_PPOD_INFO_FLAG_VALID;
2405*4882a593Smuzhiyun 		if (csk->cdev->csk_ddp_set_map(ppm, csk, ttinfo) < 0)
2406*4882a593Smuzhiyun 			pr_err("task 0x%p, ppod writing using ofldq failed.\n",
2407*4882a593Smuzhiyun 			       task);
2408*4882a593Smuzhiyun 			/* continue. Let fl get the data */
2409*4882a593Smuzhiyun 	}
2410*4882a593Smuzhiyun 
2411*4882a593Smuzhiyun 	if (!task->sc)
2412*4882a593Smuzhiyun 		memcpy(skb->data, task->hdr, SKB_TX_ISCSI_PDU_HEADER_MAX);
2413*4882a593Smuzhiyun 
2414*4882a593Smuzhiyun 	err = cxgbi_sock_send_skb(csk, skb);
2415*4882a593Smuzhiyun 	if (err > 0) {
2416*4882a593Smuzhiyun 		pdulen += err;
2417*4882a593Smuzhiyun 
2418*4882a593Smuzhiyun 		log_debug(1 << CXGBI_DBG_PDU_TX, "task 0x%p,0x%p, rv %d.\n",
2419*4882a593Smuzhiyun 			  task, task->sc, err);
2420*4882a593Smuzhiyun 
2421*4882a593Smuzhiyun 		if (task->conn->hdrdgst_en)
2422*4882a593Smuzhiyun 			pdulen += ISCSI_DIGEST_SIZE;
2423*4882a593Smuzhiyun 
2424*4882a593Smuzhiyun 		if (datalen && task->conn->datadgst_en)
2425*4882a593Smuzhiyun 			pdulen += ISCSI_DIGEST_SIZE;
2426*4882a593Smuzhiyun 
2427*4882a593Smuzhiyun 		task->conn->txdata_octets += pdulen;
2428*4882a593Smuzhiyun 
2429*4882a593Smuzhiyun 		if (unlikely(cxgbi_is_iso_config(csk) && cxgbi_is_iso_disabled(csk))) {
2430*4882a593Smuzhiyun 			if (time_after(jiffies, csk->prev_iso_ts + HZ)) {
2431*4882a593Smuzhiyun 				csk->disable_iso = false;
2432*4882a593Smuzhiyun 				csk->prev_iso_ts = 0;
2433*4882a593Smuzhiyun 				log_debug(1 << CXGBI_DBG_PDU_TX,
2434*4882a593Smuzhiyun 					  "enable iso: csk 0x%p\n", csk);
2435*4882a593Smuzhiyun 			}
2436*4882a593Smuzhiyun 		}
2437*4882a593Smuzhiyun 
2438*4882a593Smuzhiyun 		return 0;
2439*4882a593Smuzhiyun 	}
2440*4882a593Smuzhiyun 
2441*4882a593Smuzhiyun 	if (err == -EAGAIN || err == -ENOBUFS) {
2442*4882a593Smuzhiyun 		log_debug(1 << CXGBI_DBG_PDU_TX,
2443*4882a593Smuzhiyun 			  "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
2444*4882a593Smuzhiyun 			  task, skb, skb->len, skb->data_len, err);
2445*4882a593Smuzhiyun 		/* reset skb to send when we are called again */
2446*4882a593Smuzhiyun 		tdata->skb = skb;
2447*4882a593Smuzhiyun 
2448*4882a593Smuzhiyun 		if (cxgbi_is_iso_config(csk) && !cxgbi_is_iso_disabled(csk) &&
2449*4882a593Smuzhiyun 		    (csk->no_tx_credits++ >= 2)) {
2450*4882a593Smuzhiyun 			csk->disable_iso = true;
2451*4882a593Smuzhiyun 			csk->prev_iso_ts = jiffies;
2452*4882a593Smuzhiyun 			log_debug(1 << CXGBI_DBG_PDU_TX,
2453*4882a593Smuzhiyun 				  "disable iso:csk 0x%p, ts:%lu\n",
2454*4882a593Smuzhiyun 				  csk, csk->prev_iso_ts);
2455*4882a593Smuzhiyun 		}
2456*4882a593Smuzhiyun 
2457*4882a593Smuzhiyun 		return err;
2458*4882a593Smuzhiyun 	}
2459*4882a593Smuzhiyun 
2460*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2461*4882a593Smuzhiyun 		  "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
2462*4882a593Smuzhiyun 		  task->itt, skb, skb->len, skb->data_len, err);
2463*4882a593Smuzhiyun 	__kfree_skb(skb);
2464*4882a593Smuzhiyun 	iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
2465*4882a593Smuzhiyun 	iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
2466*4882a593Smuzhiyun 	return err;
2467*4882a593Smuzhiyun }
2468*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu);
2469*4882a593Smuzhiyun 
cxgbi_cleanup_task(struct iscsi_task * task)2470*4882a593Smuzhiyun void cxgbi_cleanup_task(struct iscsi_task *task)
2471*4882a593Smuzhiyun {
2472*4882a593Smuzhiyun 	struct iscsi_tcp_task *tcp_task = task->dd_data;
2473*4882a593Smuzhiyun 	struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
2474*4882a593Smuzhiyun 
2475*4882a593Smuzhiyun 	if (!tcp_task || (tcp_task->dd_data != tdata)) {
2476*4882a593Smuzhiyun 		pr_info("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n",
2477*4882a593Smuzhiyun 			task, task->sc, tcp_task,
2478*4882a593Smuzhiyun 			tcp_task ? tcp_task->dd_data : NULL, tdata);
2479*4882a593Smuzhiyun 		return;
2480*4882a593Smuzhiyun 	}
2481*4882a593Smuzhiyun 
2482*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_ISCSI,
2483*4882a593Smuzhiyun 		"task 0x%p, skb 0x%p, itt 0x%x.\n",
2484*4882a593Smuzhiyun 		task, tdata->skb, task->hdr_itt);
2485*4882a593Smuzhiyun 
2486*4882a593Smuzhiyun 	tcp_task->dd_data = NULL;
2487*4882a593Smuzhiyun 
2488*4882a593Smuzhiyun 	if (!task->sc)
2489*4882a593Smuzhiyun 		kfree(task->hdr);
2490*4882a593Smuzhiyun 	task->hdr = NULL;
2491*4882a593Smuzhiyun 
2492*4882a593Smuzhiyun 	/*  never reached the xmit task callout */
2493*4882a593Smuzhiyun 	if (tdata->skb) {
2494*4882a593Smuzhiyun 		__kfree_skb(tdata->skb);
2495*4882a593Smuzhiyun 		tdata->skb = NULL;
2496*4882a593Smuzhiyun 	}
2497*4882a593Smuzhiyun 
2498*4882a593Smuzhiyun 	task_release_itt(task, task->hdr_itt);
2499*4882a593Smuzhiyun 	memset(tdata, 0, sizeof(*tdata));
2500*4882a593Smuzhiyun 
2501*4882a593Smuzhiyun 	iscsi_tcp_cleanup_task(task);
2502*4882a593Smuzhiyun }
2503*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_cleanup_task);
2504*4882a593Smuzhiyun 
cxgbi_get_conn_stats(struct iscsi_cls_conn * cls_conn,struct iscsi_stats * stats)2505*4882a593Smuzhiyun void cxgbi_get_conn_stats(struct iscsi_cls_conn *cls_conn,
2506*4882a593Smuzhiyun 				struct iscsi_stats *stats)
2507*4882a593Smuzhiyun {
2508*4882a593Smuzhiyun 	struct iscsi_conn *conn = cls_conn->dd_data;
2509*4882a593Smuzhiyun 
2510*4882a593Smuzhiyun 	stats->txdata_octets = conn->txdata_octets;
2511*4882a593Smuzhiyun 	stats->rxdata_octets = conn->rxdata_octets;
2512*4882a593Smuzhiyun 	stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
2513*4882a593Smuzhiyun 	stats->dataout_pdus = conn->dataout_pdus_cnt;
2514*4882a593Smuzhiyun 	stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
2515*4882a593Smuzhiyun 	stats->datain_pdus = conn->datain_pdus_cnt;
2516*4882a593Smuzhiyun 	stats->r2t_pdus = conn->r2t_pdus_cnt;
2517*4882a593Smuzhiyun 	stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
2518*4882a593Smuzhiyun 	stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
2519*4882a593Smuzhiyun 	stats->digest_err = 0;
2520*4882a593Smuzhiyun 	stats->timeout_err = 0;
2521*4882a593Smuzhiyun 	stats->custom_length = 1;
2522*4882a593Smuzhiyun 	strcpy(stats->custom[0].desc, "eh_abort_cnt");
2523*4882a593Smuzhiyun 	stats->custom[0].value = conn->eh_abort_cnt;
2524*4882a593Smuzhiyun }
2525*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_get_conn_stats);
2526*4882a593Smuzhiyun 
cxgbi_conn_max_xmit_dlength(struct iscsi_conn * conn)2527*4882a593Smuzhiyun static int cxgbi_conn_max_xmit_dlength(struct iscsi_conn *conn)
2528*4882a593Smuzhiyun {
2529*4882a593Smuzhiyun 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2530*4882a593Smuzhiyun 	struct cxgbi_conn *cconn = tcp_conn->dd_data;
2531*4882a593Smuzhiyun 	struct cxgbi_device *cdev = cconn->chba->cdev;
2532*4882a593Smuzhiyun 	unsigned int headroom = SKB_MAX_HEAD(cdev->skb_tx_rsvd);
2533*4882a593Smuzhiyun 	unsigned int max_def = 512 * MAX_SKB_FRAGS;
2534*4882a593Smuzhiyun 	unsigned int max = max(max_def, headroom);
2535*4882a593Smuzhiyun 
2536*4882a593Smuzhiyun 	max = min(cconn->chba->cdev->tx_max_size, max);
2537*4882a593Smuzhiyun 	if (conn->max_xmit_dlength)
2538*4882a593Smuzhiyun 		conn->max_xmit_dlength = min(conn->max_xmit_dlength, max);
2539*4882a593Smuzhiyun 	else
2540*4882a593Smuzhiyun 		conn->max_xmit_dlength = max;
2541*4882a593Smuzhiyun 	cxgbi_align_pdu_size(conn->max_xmit_dlength);
2542*4882a593Smuzhiyun 
2543*4882a593Smuzhiyun 	return 0;
2544*4882a593Smuzhiyun }
2545*4882a593Smuzhiyun 
cxgbi_conn_max_recv_dlength(struct iscsi_conn * conn)2546*4882a593Smuzhiyun static int cxgbi_conn_max_recv_dlength(struct iscsi_conn *conn)
2547*4882a593Smuzhiyun {
2548*4882a593Smuzhiyun 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2549*4882a593Smuzhiyun 	struct cxgbi_conn *cconn = tcp_conn->dd_data;
2550*4882a593Smuzhiyun 	unsigned int max = cconn->chba->cdev->rx_max_size;
2551*4882a593Smuzhiyun 
2552*4882a593Smuzhiyun 	cxgbi_align_pdu_size(max);
2553*4882a593Smuzhiyun 
2554*4882a593Smuzhiyun 	if (conn->max_recv_dlength) {
2555*4882a593Smuzhiyun 		if (conn->max_recv_dlength > max) {
2556*4882a593Smuzhiyun 			pr_err("MaxRecvDataSegmentLength %u > %u.\n",
2557*4882a593Smuzhiyun 				conn->max_recv_dlength, max);
2558*4882a593Smuzhiyun 			return -EINVAL;
2559*4882a593Smuzhiyun 		}
2560*4882a593Smuzhiyun 		conn->max_recv_dlength = min(conn->max_recv_dlength, max);
2561*4882a593Smuzhiyun 		cxgbi_align_pdu_size(conn->max_recv_dlength);
2562*4882a593Smuzhiyun 	} else
2563*4882a593Smuzhiyun 		conn->max_recv_dlength = max;
2564*4882a593Smuzhiyun 
2565*4882a593Smuzhiyun 	return 0;
2566*4882a593Smuzhiyun }
2567*4882a593Smuzhiyun 
cxgbi_set_conn_param(struct iscsi_cls_conn * cls_conn,enum iscsi_param param,char * buf,int buflen)2568*4882a593Smuzhiyun int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
2569*4882a593Smuzhiyun 			enum iscsi_param param, char *buf, int buflen)
2570*4882a593Smuzhiyun {
2571*4882a593Smuzhiyun 	struct iscsi_conn *conn = cls_conn->dd_data;
2572*4882a593Smuzhiyun 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2573*4882a593Smuzhiyun 	struct cxgbi_conn *cconn = tcp_conn->dd_data;
2574*4882a593Smuzhiyun 	struct cxgbi_sock *csk = cconn->cep->csk;
2575*4882a593Smuzhiyun 	int err;
2576*4882a593Smuzhiyun 
2577*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_ISCSI,
2578*4882a593Smuzhiyun 		"cls_conn 0x%p, param %d, buf(%d) %s.\n",
2579*4882a593Smuzhiyun 		cls_conn, param, buflen, buf);
2580*4882a593Smuzhiyun 
2581*4882a593Smuzhiyun 	switch (param) {
2582*4882a593Smuzhiyun 	case ISCSI_PARAM_HDRDGST_EN:
2583*4882a593Smuzhiyun 		err = iscsi_set_param(cls_conn, param, buf, buflen);
2584*4882a593Smuzhiyun 		if (!err && conn->hdrdgst_en)
2585*4882a593Smuzhiyun 			err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2586*4882a593Smuzhiyun 							conn->hdrdgst_en,
2587*4882a593Smuzhiyun 							conn->datadgst_en);
2588*4882a593Smuzhiyun 		break;
2589*4882a593Smuzhiyun 	case ISCSI_PARAM_DATADGST_EN:
2590*4882a593Smuzhiyun 		err = iscsi_set_param(cls_conn, param, buf, buflen);
2591*4882a593Smuzhiyun 		if (!err && conn->datadgst_en)
2592*4882a593Smuzhiyun 			err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2593*4882a593Smuzhiyun 							conn->hdrdgst_en,
2594*4882a593Smuzhiyun 							conn->datadgst_en);
2595*4882a593Smuzhiyun 		break;
2596*4882a593Smuzhiyun 	case ISCSI_PARAM_MAX_R2T:
2597*4882a593Smuzhiyun 		return iscsi_tcp_set_max_r2t(conn, buf);
2598*4882a593Smuzhiyun 	case ISCSI_PARAM_MAX_RECV_DLENGTH:
2599*4882a593Smuzhiyun 		err = iscsi_set_param(cls_conn, param, buf, buflen);
2600*4882a593Smuzhiyun 		if (!err)
2601*4882a593Smuzhiyun 			err = cxgbi_conn_max_recv_dlength(conn);
2602*4882a593Smuzhiyun 		break;
2603*4882a593Smuzhiyun 	case ISCSI_PARAM_MAX_XMIT_DLENGTH:
2604*4882a593Smuzhiyun 		err = iscsi_set_param(cls_conn, param, buf, buflen);
2605*4882a593Smuzhiyun 		if (!err)
2606*4882a593Smuzhiyun 			err = cxgbi_conn_max_xmit_dlength(conn);
2607*4882a593Smuzhiyun 		break;
2608*4882a593Smuzhiyun 	default:
2609*4882a593Smuzhiyun 		return iscsi_set_param(cls_conn, param, buf, buflen);
2610*4882a593Smuzhiyun 	}
2611*4882a593Smuzhiyun 	return err;
2612*4882a593Smuzhiyun }
2613*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_set_conn_param);
2614*4882a593Smuzhiyun 
cxgbi_get_ep_param(struct iscsi_endpoint * ep,enum iscsi_param param,char * buf)2615*4882a593Smuzhiyun int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param,
2616*4882a593Smuzhiyun 		       char *buf)
2617*4882a593Smuzhiyun {
2618*4882a593Smuzhiyun 	struct cxgbi_endpoint *cep = ep->dd_data;
2619*4882a593Smuzhiyun 	struct cxgbi_sock *csk;
2620*4882a593Smuzhiyun 
2621*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_ISCSI,
2622*4882a593Smuzhiyun 		"cls_conn 0x%p, param %d.\n", ep, param);
2623*4882a593Smuzhiyun 
2624*4882a593Smuzhiyun 	switch (param) {
2625*4882a593Smuzhiyun 	case ISCSI_PARAM_CONN_PORT:
2626*4882a593Smuzhiyun 	case ISCSI_PARAM_CONN_ADDRESS:
2627*4882a593Smuzhiyun 		if (!cep)
2628*4882a593Smuzhiyun 			return -ENOTCONN;
2629*4882a593Smuzhiyun 
2630*4882a593Smuzhiyun 		csk = cep->csk;
2631*4882a593Smuzhiyun 		if (!csk)
2632*4882a593Smuzhiyun 			return -ENOTCONN;
2633*4882a593Smuzhiyun 
2634*4882a593Smuzhiyun 		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
2635*4882a593Smuzhiyun 						 &csk->daddr, param, buf);
2636*4882a593Smuzhiyun 	default:
2637*4882a593Smuzhiyun 		break;
2638*4882a593Smuzhiyun 	}
2639*4882a593Smuzhiyun 	return -ENOSYS;
2640*4882a593Smuzhiyun }
2641*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_get_ep_param);
2642*4882a593Smuzhiyun 
2643*4882a593Smuzhiyun struct iscsi_cls_conn *
cxgbi_create_conn(struct iscsi_cls_session * cls_session,u32 cid)2644*4882a593Smuzhiyun cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid)
2645*4882a593Smuzhiyun {
2646*4882a593Smuzhiyun 	struct iscsi_cls_conn *cls_conn;
2647*4882a593Smuzhiyun 	struct iscsi_conn *conn;
2648*4882a593Smuzhiyun 	struct iscsi_tcp_conn *tcp_conn;
2649*4882a593Smuzhiyun 	struct cxgbi_conn *cconn;
2650*4882a593Smuzhiyun 
2651*4882a593Smuzhiyun 	cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid);
2652*4882a593Smuzhiyun 	if (!cls_conn)
2653*4882a593Smuzhiyun 		return NULL;
2654*4882a593Smuzhiyun 
2655*4882a593Smuzhiyun 	conn = cls_conn->dd_data;
2656*4882a593Smuzhiyun 	tcp_conn = conn->dd_data;
2657*4882a593Smuzhiyun 	cconn = tcp_conn->dd_data;
2658*4882a593Smuzhiyun 	cconn->iconn = conn;
2659*4882a593Smuzhiyun 
2660*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_ISCSI,
2661*4882a593Smuzhiyun 		"cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n",
2662*4882a593Smuzhiyun 		cid, cid, cls_session, cls_conn, conn, tcp_conn, cconn);
2663*4882a593Smuzhiyun 
2664*4882a593Smuzhiyun 	return cls_conn;
2665*4882a593Smuzhiyun }
2666*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_create_conn);
2667*4882a593Smuzhiyun 
cxgbi_bind_conn(struct iscsi_cls_session * cls_session,struct iscsi_cls_conn * cls_conn,u64 transport_eph,int is_leading)2668*4882a593Smuzhiyun int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
2669*4882a593Smuzhiyun 				struct iscsi_cls_conn *cls_conn,
2670*4882a593Smuzhiyun 				u64 transport_eph, int is_leading)
2671*4882a593Smuzhiyun {
2672*4882a593Smuzhiyun 	struct iscsi_conn *conn = cls_conn->dd_data;
2673*4882a593Smuzhiyun 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2674*4882a593Smuzhiyun 	struct cxgbi_conn *cconn = tcp_conn->dd_data;
2675*4882a593Smuzhiyun 	struct cxgbi_ppm *ppm;
2676*4882a593Smuzhiyun 	struct iscsi_endpoint *ep;
2677*4882a593Smuzhiyun 	struct cxgbi_endpoint *cep;
2678*4882a593Smuzhiyun 	struct cxgbi_sock *csk;
2679*4882a593Smuzhiyun 	int err;
2680*4882a593Smuzhiyun 
2681*4882a593Smuzhiyun 	ep = iscsi_lookup_endpoint(transport_eph);
2682*4882a593Smuzhiyun 	if (!ep)
2683*4882a593Smuzhiyun 		return -EINVAL;
2684*4882a593Smuzhiyun 
2685*4882a593Smuzhiyun 	/*  setup ddp pagesize */
2686*4882a593Smuzhiyun 	cep = ep->dd_data;
2687*4882a593Smuzhiyun 	csk = cep->csk;
2688*4882a593Smuzhiyun 
2689*4882a593Smuzhiyun 	ppm = csk->cdev->cdev2ppm(csk->cdev);
2690*4882a593Smuzhiyun 	err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
2691*4882a593Smuzhiyun 					     ppm->tformat.pgsz_idx_dflt);
2692*4882a593Smuzhiyun 	if (err < 0)
2693*4882a593Smuzhiyun 		goto put_ep;
2694*4882a593Smuzhiyun 
2695*4882a593Smuzhiyun 	err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
2696*4882a593Smuzhiyun 	if (err) {
2697*4882a593Smuzhiyun 		err = -EINVAL;
2698*4882a593Smuzhiyun 		goto put_ep;
2699*4882a593Smuzhiyun 	}
2700*4882a593Smuzhiyun 
2701*4882a593Smuzhiyun 	/*  calculate the tag idx bits needed for this conn based on cmds_max */
2702*4882a593Smuzhiyun 	cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1;
2703*4882a593Smuzhiyun 
2704*4882a593Smuzhiyun 	write_lock_bh(&csk->callback_lock);
2705*4882a593Smuzhiyun 	csk->user_data = conn;
2706*4882a593Smuzhiyun 	cconn->chba = cep->chba;
2707*4882a593Smuzhiyun 	cconn->cep = cep;
2708*4882a593Smuzhiyun 	cep->cconn = cconn;
2709*4882a593Smuzhiyun 	write_unlock_bh(&csk->callback_lock);
2710*4882a593Smuzhiyun 
2711*4882a593Smuzhiyun 	cxgbi_conn_max_xmit_dlength(conn);
2712*4882a593Smuzhiyun 	cxgbi_conn_max_recv_dlength(conn);
2713*4882a593Smuzhiyun 
2714*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_ISCSI,
2715*4882a593Smuzhiyun 		"cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n",
2716*4882a593Smuzhiyun 		cls_session, cls_conn, ep, cconn, csk);
2717*4882a593Smuzhiyun 	/*  init recv engine */
2718*4882a593Smuzhiyun 	iscsi_tcp_hdr_recv_prep(tcp_conn);
2719*4882a593Smuzhiyun 
2720*4882a593Smuzhiyun put_ep:
2721*4882a593Smuzhiyun 	iscsi_put_endpoint(ep);
2722*4882a593Smuzhiyun 	return err;
2723*4882a593Smuzhiyun }
2724*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_bind_conn);
2725*4882a593Smuzhiyun 
cxgbi_create_session(struct iscsi_endpoint * ep,u16 cmds_max,u16 qdepth,u32 initial_cmdsn)2726*4882a593Smuzhiyun struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *ep,
2727*4882a593Smuzhiyun 						u16 cmds_max, u16 qdepth,
2728*4882a593Smuzhiyun 						u32 initial_cmdsn)
2729*4882a593Smuzhiyun {
2730*4882a593Smuzhiyun 	struct cxgbi_endpoint *cep;
2731*4882a593Smuzhiyun 	struct cxgbi_hba *chba;
2732*4882a593Smuzhiyun 	struct Scsi_Host *shost;
2733*4882a593Smuzhiyun 	struct iscsi_cls_session *cls_session;
2734*4882a593Smuzhiyun 	struct iscsi_session *session;
2735*4882a593Smuzhiyun 
2736*4882a593Smuzhiyun 	if (!ep) {
2737*4882a593Smuzhiyun 		pr_err("missing endpoint.\n");
2738*4882a593Smuzhiyun 		return NULL;
2739*4882a593Smuzhiyun 	}
2740*4882a593Smuzhiyun 
2741*4882a593Smuzhiyun 	cep = ep->dd_data;
2742*4882a593Smuzhiyun 	chba = cep->chba;
2743*4882a593Smuzhiyun 	shost = chba->shost;
2744*4882a593Smuzhiyun 
2745*4882a593Smuzhiyun 	BUG_ON(chba != iscsi_host_priv(shost));
2746*4882a593Smuzhiyun 
2747*4882a593Smuzhiyun 	cls_session = iscsi_session_setup(chba->cdev->itp, shost,
2748*4882a593Smuzhiyun 					cmds_max, 0,
2749*4882a593Smuzhiyun 					sizeof(struct iscsi_tcp_task) +
2750*4882a593Smuzhiyun 					sizeof(struct cxgbi_task_data),
2751*4882a593Smuzhiyun 					initial_cmdsn, ISCSI_MAX_TARGET);
2752*4882a593Smuzhiyun 	if (!cls_session)
2753*4882a593Smuzhiyun 		return NULL;
2754*4882a593Smuzhiyun 
2755*4882a593Smuzhiyun 	session = cls_session->dd_data;
2756*4882a593Smuzhiyun 	if (iscsi_tcp_r2tpool_alloc(session))
2757*4882a593Smuzhiyun 		goto remove_session;
2758*4882a593Smuzhiyun 
2759*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_ISCSI,
2760*4882a593Smuzhiyun 		"ep 0x%p, cls sess 0x%p.\n", ep, cls_session);
2761*4882a593Smuzhiyun 	return cls_session;
2762*4882a593Smuzhiyun 
2763*4882a593Smuzhiyun remove_session:
2764*4882a593Smuzhiyun 	iscsi_session_teardown(cls_session);
2765*4882a593Smuzhiyun 	return NULL;
2766*4882a593Smuzhiyun }
2767*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_create_session);
2768*4882a593Smuzhiyun 
cxgbi_destroy_session(struct iscsi_cls_session * cls_session)2769*4882a593Smuzhiyun void cxgbi_destroy_session(struct iscsi_cls_session *cls_session)
2770*4882a593Smuzhiyun {
2771*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_ISCSI,
2772*4882a593Smuzhiyun 		"cls sess 0x%p.\n", cls_session);
2773*4882a593Smuzhiyun 
2774*4882a593Smuzhiyun 	iscsi_tcp_r2tpool_free(cls_session->dd_data);
2775*4882a593Smuzhiyun 	iscsi_session_teardown(cls_session);
2776*4882a593Smuzhiyun }
2777*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_destroy_session);
2778*4882a593Smuzhiyun 
cxgbi_set_host_param(struct Scsi_Host * shost,enum iscsi_host_param param,char * buf,int buflen)2779*4882a593Smuzhiyun int cxgbi_set_host_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2780*4882a593Smuzhiyun 			char *buf, int buflen)
2781*4882a593Smuzhiyun {
2782*4882a593Smuzhiyun 	struct cxgbi_hba *chba = iscsi_host_priv(shost);
2783*4882a593Smuzhiyun 
2784*4882a593Smuzhiyun 	if (!chba->ndev) {
2785*4882a593Smuzhiyun 		shost_printk(KERN_ERR, shost, "Could not get host param. "
2786*4882a593Smuzhiyun 				"netdev for host not set.\n");
2787*4882a593Smuzhiyun 		return -ENODEV;
2788*4882a593Smuzhiyun 	}
2789*4882a593Smuzhiyun 
2790*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_ISCSI,
2791*4882a593Smuzhiyun 		"shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n",
2792*4882a593Smuzhiyun 		shost, chba, chba->ndev->name, param, buflen, buf);
2793*4882a593Smuzhiyun 
2794*4882a593Smuzhiyun 	switch (param) {
2795*4882a593Smuzhiyun 	case ISCSI_HOST_PARAM_IPADDRESS:
2796*4882a593Smuzhiyun 	{
2797*4882a593Smuzhiyun 		__be32 addr = in_aton(buf);
2798*4882a593Smuzhiyun 		log_debug(1 << CXGBI_DBG_ISCSI,
2799*4882a593Smuzhiyun 			"hba %s, req. ipv4 %pI4.\n", chba->ndev->name, &addr);
2800*4882a593Smuzhiyun 		cxgbi_set_iscsi_ipv4(chba, addr);
2801*4882a593Smuzhiyun 		return 0;
2802*4882a593Smuzhiyun 	}
2803*4882a593Smuzhiyun 	case ISCSI_HOST_PARAM_HWADDRESS:
2804*4882a593Smuzhiyun 	case ISCSI_HOST_PARAM_NETDEV_NAME:
2805*4882a593Smuzhiyun 		return 0;
2806*4882a593Smuzhiyun 	default:
2807*4882a593Smuzhiyun 		return iscsi_host_set_param(shost, param, buf, buflen);
2808*4882a593Smuzhiyun 	}
2809*4882a593Smuzhiyun }
2810*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_set_host_param);
2811*4882a593Smuzhiyun 
cxgbi_get_host_param(struct Scsi_Host * shost,enum iscsi_host_param param,char * buf)2812*4882a593Smuzhiyun int cxgbi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2813*4882a593Smuzhiyun 			char *buf)
2814*4882a593Smuzhiyun {
2815*4882a593Smuzhiyun 	struct cxgbi_hba *chba = iscsi_host_priv(shost);
2816*4882a593Smuzhiyun 	int len = 0;
2817*4882a593Smuzhiyun 
2818*4882a593Smuzhiyun 	if (!chba->ndev) {
2819*4882a593Smuzhiyun 		shost_printk(KERN_ERR, shost, "Could not get host param. "
2820*4882a593Smuzhiyun 				"netdev for host not set.\n");
2821*4882a593Smuzhiyun 		return -ENODEV;
2822*4882a593Smuzhiyun 	}
2823*4882a593Smuzhiyun 
2824*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_ISCSI,
2825*4882a593Smuzhiyun 		"shost 0x%p, hba 0x%p,%s, param %d.\n",
2826*4882a593Smuzhiyun 		shost, chba, chba->ndev->name, param);
2827*4882a593Smuzhiyun 
2828*4882a593Smuzhiyun 	switch (param) {
2829*4882a593Smuzhiyun 	case ISCSI_HOST_PARAM_HWADDRESS:
2830*4882a593Smuzhiyun 		len = sysfs_format_mac(buf, chba->ndev->dev_addr, 6);
2831*4882a593Smuzhiyun 		break;
2832*4882a593Smuzhiyun 	case ISCSI_HOST_PARAM_NETDEV_NAME:
2833*4882a593Smuzhiyun 		len = sprintf(buf, "%s\n", chba->ndev->name);
2834*4882a593Smuzhiyun 		break;
2835*4882a593Smuzhiyun 	case ISCSI_HOST_PARAM_IPADDRESS:
2836*4882a593Smuzhiyun 	{
2837*4882a593Smuzhiyun 		struct cxgbi_sock *csk = find_sock_on_port(chba->cdev,
2838*4882a593Smuzhiyun 							   chba->port_id);
2839*4882a593Smuzhiyun 		if (csk) {
2840*4882a593Smuzhiyun 			len = sprintf(buf, "%pIS",
2841*4882a593Smuzhiyun 				      (struct sockaddr *)&csk->saddr);
2842*4882a593Smuzhiyun 		}
2843*4882a593Smuzhiyun 		log_debug(1 << CXGBI_DBG_ISCSI,
2844*4882a593Smuzhiyun 			  "hba %s, addr %s.\n", chba->ndev->name, buf);
2845*4882a593Smuzhiyun 		break;
2846*4882a593Smuzhiyun 	}
2847*4882a593Smuzhiyun 	default:
2848*4882a593Smuzhiyun 		return iscsi_host_get_param(shost, param, buf);
2849*4882a593Smuzhiyun 	}
2850*4882a593Smuzhiyun 
2851*4882a593Smuzhiyun 	return len;
2852*4882a593Smuzhiyun }
2853*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_get_host_param);
2854*4882a593Smuzhiyun 
cxgbi_ep_connect(struct Scsi_Host * shost,struct sockaddr * dst_addr,int non_blocking)2855*4882a593Smuzhiyun struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost,
2856*4882a593Smuzhiyun 					struct sockaddr *dst_addr,
2857*4882a593Smuzhiyun 					int non_blocking)
2858*4882a593Smuzhiyun {
2859*4882a593Smuzhiyun 	struct iscsi_endpoint *ep;
2860*4882a593Smuzhiyun 	struct cxgbi_endpoint *cep;
2861*4882a593Smuzhiyun 	struct cxgbi_hba *hba = NULL;
2862*4882a593Smuzhiyun 	struct cxgbi_sock *csk;
2863*4882a593Smuzhiyun 	int ifindex = 0;
2864*4882a593Smuzhiyun 	int err = -EINVAL;
2865*4882a593Smuzhiyun 
2866*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
2867*4882a593Smuzhiyun 		"shost 0x%p, non_blocking %d, dst_addr 0x%p.\n",
2868*4882a593Smuzhiyun 		shost, non_blocking, dst_addr);
2869*4882a593Smuzhiyun 
2870*4882a593Smuzhiyun 	if (shost) {
2871*4882a593Smuzhiyun 		hba = iscsi_host_priv(shost);
2872*4882a593Smuzhiyun 		if (!hba) {
2873*4882a593Smuzhiyun 			pr_info("shost 0x%p, priv NULL.\n", shost);
2874*4882a593Smuzhiyun 			goto err_out;
2875*4882a593Smuzhiyun 		}
2876*4882a593Smuzhiyun 	}
2877*4882a593Smuzhiyun 
2878*4882a593Smuzhiyun check_route:
2879*4882a593Smuzhiyun 	if (dst_addr->sa_family == AF_INET) {
2880*4882a593Smuzhiyun 		csk = cxgbi_check_route(dst_addr, ifindex);
2881*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6)
2882*4882a593Smuzhiyun 	} else if (dst_addr->sa_family == AF_INET6) {
2883*4882a593Smuzhiyun 		csk = cxgbi_check_route6(dst_addr, ifindex);
2884*4882a593Smuzhiyun #endif
2885*4882a593Smuzhiyun 	} else {
2886*4882a593Smuzhiyun 		pr_info("address family 0x%x NOT supported.\n",
2887*4882a593Smuzhiyun 			dst_addr->sa_family);
2888*4882a593Smuzhiyun 		err = -EAFNOSUPPORT;
2889*4882a593Smuzhiyun 		return (struct iscsi_endpoint *)ERR_PTR(err);
2890*4882a593Smuzhiyun 	}
2891*4882a593Smuzhiyun 
2892*4882a593Smuzhiyun 	if (IS_ERR(csk))
2893*4882a593Smuzhiyun 		return (struct iscsi_endpoint *)csk;
2894*4882a593Smuzhiyun 	cxgbi_sock_get(csk);
2895*4882a593Smuzhiyun 
2896*4882a593Smuzhiyun 	if (!hba)
2897*4882a593Smuzhiyun 		hba = csk->cdev->hbas[csk->port_id];
2898*4882a593Smuzhiyun 	else if (hba != csk->cdev->hbas[csk->port_id]) {
2899*4882a593Smuzhiyun 		if (ifindex != hba->ndev->ifindex) {
2900*4882a593Smuzhiyun 			cxgbi_sock_put(csk);
2901*4882a593Smuzhiyun 			cxgbi_sock_closed(csk);
2902*4882a593Smuzhiyun 			ifindex = hba->ndev->ifindex;
2903*4882a593Smuzhiyun 			goto check_route;
2904*4882a593Smuzhiyun 		}
2905*4882a593Smuzhiyun 
2906*4882a593Smuzhiyun 		pr_info("Could not connect through requested host %u"
2907*4882a593Smuzhiyun 			"hba 0x%p != 0x%p (%u).\n",
2908*4882a593Smuzhiyun 			shost->host_no, hba,
2909*4882a593Smuzhiyun 			csk->cdev->hbas[csk->port_id], csk->port_id);
2910*4882a593Smuzhiyun 		err = -ENOSPC;
2911*4882a593Smuzhiyun 		goto release_conn;
2912*4882a593Smuzhiyun 	}
2913*4882a593Smuzhiyun 
2914*4882a593Smuzhiyun 	err = sock_get_port(csk);
2915*4882a593Smuzhiyun 	if (err)
2916*4882a593Smuzhiyun 		goto release_conn;
2917*4882a593Smuzhiyun 
2918*4882a593Smuzhiyun 	cxgbi_sock_set_state(csk, CTP_CONNECTING);
2919*4882a593Smuzhiyun 	err = csk->cdev->csk_init_act_open(csk);
2920*4882a593Smuzhiyun 	if (err)
2921*4882a593Smuzhiyun 		goto release_conn;
2922*4882a593Smuzhiyun 
2923*4882a593Smuzhiyun 	if (cxgbi_sock_is_closing(csk)) {
2924*4882a593Smuzhiyun 		err = -ENOSPC;
2925*4882a593Smuzhiyun 		pr_info("csk 0x%p is closing.\n", csk);
2926*4882a593Smuzhiyun 		goto release_conn;
2927*4882a593Smuzhiyun 	}
2928*4882a593Smuzhiyun 
2929*4882a593Smuzhiyun 	ep = iscsi_create_endpoint(sizeof(*cep));
2930*4882a593Smuzhiyun 	if (!ep) {
2931*4882a593Smuzhiyun 		err = -ENOMEM;
2932*4882a593Smuzhiyun 		pr_info("iscsi alloc ep, OOM.\n");
2933*4882a593Smuzhiyun 		goto release_conn;
2934*4882a593Smuzhiyun 	}
2935*4882a593Smuzhiyun 
2936*4882a593Smuzhiyun 	cep = ep->dd_data;
2937*4882a593Smuzhiyun 	cep->csk = csk;
2938*4882a593Smuzhiyun 	cep->chba = hba;
2939*4882a593Smuzhiyun 
2940*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
2941*4882a593Smuzhiyun 		"ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n",
2942*4882a593Smuzhiyun 		ep, cep, csk, hba, hba->ndev->name);
2943*4882a593Smuzhiyun 	return ep;
2944*4882a593Smuzhiyun 
2945*4882a593Smuzhiyun release_conn:
2946*4882a593Smuzhiyun 	cxgbi_sock_put(csk);
2947*4882a593Smuzhiyun 	cxgbi_sock_closed(csk);
2948*4882a593Smuzhiyun err_out:
2949*4882a593Smuzhiyun 	return ERR_PTR(err);
2950*4882a593Smuzhiyun }
2951*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_ep_connect);
2952*4882a593Smuzhiyun 
cxgbi_ep_poll(struct iscsi_endpoint * ep,int timeout_ms)2953*4882a593Smuzhiyun int cxgbi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
2954*4882a593Smuzhiyun {
2955*4882a593Smuzhiyun 	struct cxgbi_endpoint *cep = ep->dd_data;
2956*4882a593Smuzhiyun 	struct cxgbi_sock *csk = cep->csk;
2957*4882a593Smuzhiyun 
2958*4882a593Smuzhiyun 	if (!cxgbi_sock_is_established(csk))
2959*4882a593Smuzhiyun 		return 0;
2960*4882a593Smuzhiyun 	return 1;
2961*4882a593Smuzhiyun }
2962*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_ep_poll);
2963*4882a593Smuzhiyun 
cxgbi_ep_disconnect(struct iscsi_endpoint * ep)2964*4882a593Smuzhiyun void cxgbi_ep_disconnect(struct iscsi_endpoint *ep)
2965*4882a593Smuzhiyun {
2966*4882a593Smuzhiyun 	struct cxgbi_endpoint *cep = ep->dd_data;
2967*4882a593Smuzhiyun 	struct cxgbi_conn *cconn = cep->cconn;
2968*4882a593Smuzhiyun 	struct cxgbi_sock *csk = cep->csk;
2969*4882a593Smuzhiyun 
2970*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
2971*4882a593Smuzhiyun 		"ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n",
2972*4882a593Smuzhiyun 		ep, cep, cconn, csk, csk->state, csk->flags);
2973*4882a593Smuzhiyun 
2974*4882a593Smuzhiyun 	if (cconn && cconn->iconn) {
2975*4882a593Smuzhiyun 		iscsi_suspend_tx(cconn->iconn);
2976*4882a593Smuzhiyun 		write_lock_bh(&csk->callback_lock);
2977*4882a593Smuzhiyun 		cep->csk->user_data = NULL;
2978*4882a593Smuzhiyun 		cconn->cep = NULL;
2979*4882a593Smuzhiyun 		write_unlock_bh(&csk->callback_lock);
2980*4882a593Smuzhiyun 	}
2981*4882a593Smuzhiyun 	iscsi_destroy_endpoint(ep);
2982*4882a593Smuzhiyun 
2983*4882a593Smuzhiyun 	if (likely(csk->state >= CTP_ESTABLISHED))
2984*4882a593Smuzhiyun 		need_active_close(csk);
2985*4882a593Smuzhiyun 	else
2986*4882a593Smuzhiyun 		cxgbi_sock_closed(csk);
2987*4882a593Smuzhiyun 
2988*4882a593Smuzhiyun 	cxgbi_sock_put(csk);
2989*4882a593Smuzhiyun }
2990*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_ep_disconnect);
2991*4882a593Smuzhiyun 
cxgbi_iscsi_init(struct iscsi_transport * itp,struct scsi_transport_template ** stt)2992*4882a593Smuzhiyun int cxgbi_iscsi_init(struct iscsi_transport *itp,
2993*4882a593Smuzhiyun 			struct scsi_transport_template **stt)
2994*4882a593Smuzhiyun {
2995*4882a593Smuzhiyun 	*stt = iscsi_register_transport(itp);
2996*4882a593Smuzhiyun 	if (*stt == NULL) {
2997*4882a593Smuzhiyun 		pr_err("unable to register %s transport 0x%p.\n",
2998*4882a593Smuzhiyun 			itp->name, itp);
2999*4882a593Smuzhiyun 		return -ENODEV;
3000*4882a593Smuzhiyun 	}
3001*4882a593Smuzhiyun 	log_debug(1 << CXGBI_DBG_ISCSI,
3002*4882a593Smuzhiyun 		"%s, registered iscsi transport 0x%p.\n",
3003*4882a593Smuzhiyun 		itp->name, stt);
3004*4882a593Smuzhiyun 	return 0;
3005*4882a593Smuzhiyun }
3006*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_iscsi_init);
3007*4882a593Smuzhiyun 
cxgbi_iscsi_cleanup(struct iscsi_transport * itp,struct scsi_transport_template ** stt)3008*4882a593Smuzhiyun void cxgbi_iscsi_cleanup(struct iscsi_transport *itp,
3009*4882a593Smuzhiyun 			struct scsi_transport_template **stt)
3010*4882a593Smuzhiyun {
3011*4882a593Smuzhiyun 	if (*stt) {
3012*4882a593Smuzhiyun 		log_debug(1 << CXGBI_DBG_ISCSI,
3013*4882a593Smuzhiyun 			"de-register transport 0x%p, %s, stt 0x%p.\n",
3014*4882a593Smuzhiyun 			itp, itp->name, *stt);
3015*4882a593Smuzhiyun 		*stt = NULL;
3016*4882a593Smuzhiyun 		iscsi_unregister_transport(itp);
3017*4882a593Smuzhiyun 	}
3018*4882a593Smuzhiyun }
3019*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup);
3020*4882a593Smuzhiyun 
cxgbi_attr_is_visible(int param_type,int param)3021*4882a593Smuzhiyun umode_t cxgbi_attr_is_visible(int param_type, int param)
3022*4882a593Smuzhiyun {
3023*4882a593Smuzhiyun 	switch (param_type) {
3024*4882a593Smuzhiyun 	case ISCSI_HOST_PARAM:
3025*4882a593Smuzhiyun 		switch (param) {
3026*4882a593Smuzhiyun 		case ISCSI_HOST_PARAM_NETDEV_NAME:
3027*4882a593Smuzhiyun 		case ISCSI_HOST_PARAM_HWADDRESS:
3028*4882a593Smuzhiyun 		case ISCSI_HOST_PARAM_IPADDRESS:
3029*4882a593Smuzhiyun 		case ISCSI_HOST_PARAM_INITIATOR_NAME:
3030*4882a593Smuzhiyun 			return S_IRUGO;
3031*4882a593Smuzhiyun 		default:
3032*4882a593Smuzhiyun 			return 0;
3033*4882a593Smuzhiyun 		}
3034*4882a593Smuzhiyun 	case ISCSI_PARAM:
3035*4882a593Smuzhiyun 		switch (param) {
3036*4882a593Smuzhiyun 		case ISCSI_PARAM_MAX_RECV_DLENGTH:
3037*4882a593Smuzhiyun 		case ISCSI_PARAM_MAX_XMIT_DLENGTH:
3038*4882a593Smuzhiyun 		case ISCSI_PARAM_HDRDGST_EN:
3039*4882a593Smuzhiyun 		case ISCSI_PARAM_DATADGST_EN:
3040*4882a593Smuzhiyun 		case ISCSI_PARAM_CONN_ADDRESS:
3041*4882a593Smuzhiyun 		case ISCSI_PARAM_CONN_PORT:
3042*4882a593Smuzhiyun 		case ISCSI_PARAM_EXP_STATSN:
3043*4882a593Smuzhiyun 		case ISCSI_PARAM_PERSISTENT_ADDRESS:
3044*4882a593Smuzhiyun 		case ISCSI_PARAM_PERSISTENT_PORT:
3045*4882a593Smuzhiyun 		case ISCSI_PARAM_PING_TMO:
3046*4882a593Smuzhiyun 		case ISCSI_PARAM_RECV_TMO:
3047*4882a593Smuzhiyun 		case ISCSI_PARAM_INITIAL_R2T_EN:
3048*4882a593Smuzhiyun 		case ISCSI_PARAM_MAX_R2T:
3049*4882a593Smuzhiyun 		case ISCSI_PARAM_IMM_DATA_EN:
3050*4882a593Smuzhiyun 		case ISCSI_PARAM_FIRST_BURST:
3051*4882a593Smuzhiyun 		case ISCSI_PARAM_MAX_BURST:
3052*4882a593Smuzhiyun 		case ISCSI_PARAM_PDU_INORDER_EN:
3053*4882a593Smuzhiyun 		case ISCSI_PARAM_DATASEQ_INORDER_EN:
3054*4882a593Smuzhiyun 		case ISCSI_PARAM_ERL:
3055*4882a593Smuzhiyun 		case ISCSI_PARAM_TARGET_NAME:
3056*4882a593Smuzhiyun 		case ISCSI_PARAM_TPGT:
3057*4882a593Smuzhiyun 		case ISCSI_PARAM_USERNAME:
3058*4882a593Smuzhiyun 		case ISCSI_PARAM_PASSWORD:
3059*4882a593Smuzhiyun 		case ISCSI_PARAM_USERNAME_IN:
3060*4882a593Smuzhiyun 		case ISCSI_PARAM_PASSWORD_IN:
3061*4882a593Smuzhiyun 		case ISCSI_PARAM_FAST_ABORT:
3062*4882a593Smuzhiyun 		case ISCSI_PARAM_ABORT_TMO:
3063*4882a593Smuzhiyun 		case ISCSI_PARAM_LU_RESET_TMO:
3064*4882a593Smuzhiyun 		case ISCSI_PARAM_TGT_RESET_TMO:
3065*4882a593Smuzhiyun 		case ISCSI_PARAM_IFACE_NAME:
3066*4882a593Smuzhiyun 		case ISCSI_PARAM_INITIATOR_NAME:
3067*4882a593Smuzhiyun 			return S_IRUGO;
3068*4882a593Smuzhiyun 		default:
3069*4882a593Smuzhiyun 			return 0;
3070*4882a593Smuzhiyun 		}
3071*4882a593Smuzhiyun 	}
3072*4882a593Smuzhiyun 
3073*4882a593Smuzhiyun 	return 0;
3074*4882a593Smuzhiyun }
3075*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible);
3076*4882a593Smuzhiyun 
libcxgbi_init_module(void)3077*4882a593Smuzhiyun static int __init libcxgbi_init_module(void)
3078*4882a593Smuzhiyun {
3079*4882a593Smuzhiyun 	pr_info("%s", version);
3080*4882a593Smuzhiyun 
3081*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof_field(struct sk_buff, cb) <
3082*4882a593Smuzhiyun 		     sizeof(struct cxgbi_skb_cb));
3083*4882a593Smuzhiyun 	rsvd_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
3084*4882a593Smuzhiyun 	if (!rsvd_page)
3085*4882a593Smuzhiyun 		return -ENOMEM;
3086*4882a593Smuzhiyun 
3087*4882a593Smuzhiyun 	return 0;
3088*4882a593Smuzhiyun }
3089*4882a593Smuzhiyun 
libcxgbi_exit_module(void)3090*4882a593Smuzhiyun static void __exit libcxgbi_exit_module(void)
3091*4882a593Smuzhiyun {
3092*4882a593Smuzhiyun 	cxgbi_device_unregister_all(0xFF);
3093*4882a593Smuzhiyun 	put_page(rsvd_page);
3094*4882a593Smuzhiyun 	return;
3095*4882a593Smuzhiyun }
3096*4882a593Smuzhiyun 
3097*4882a593Smuzhiyun module_init(libcxgbi_init_module);
3098*4882a593Smuzhiyun module_exit(libcxgbi_exit_module);
3099