1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * iSCSI over TCP/IP Data-Path lib
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2004 Dmitry Yusupov
6*4882a593Smuzhiyun * Copyright (C) 2004 Alex Aizman
7*4882a593Smuzhiyun * Copyright (C) 2005 - 2006 Mike Christie
8*4882a593Smuzhiyun * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
9*4882a593Smuzhiyun * maintained by open-iscsi@googlegroups.com
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Credits:
12*4882a593Smuzhiyun * Christoph Hellwig
13*4882a593Smuzhiyun * FUJITA Tomonori
14*4882a593Smuzhiyun * Arne Redlich
15*4882a593Smuzhiyun * Zhenyu Wang
16*4882a593Smuzhiyun */
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <crypto/hash.h>
19*4882a593Smuzhiyun #include <linux/types.h>
20*4882a593Smuzhiyun #include <linux/list.h>
21*4882a593Smuzhiyun #include <linux/inet.h>
22*4882a593Smuzhiyun #include <linux/slab.h>
23*4882a593Smuzhiyun #include <linux/file.h>
24*4882a593Smuzhiyun #include <linux/blkdev.h>
25*4882a593Smuzhiyun #include <linux/delay.h>
26*4882a593Smuzhiyun #include <linux/kfifo.h>
27*4882a593Smuzhiyun #include <linux/scatterlist.h>
28*4882a593Smuzhiyun #include <linux/module.h>
29*4882a593Smuzhiyun #include <net/tcp.h>
30*4882a593Smuzhiyun #include <scsi/scsi_cmnd.h>
31*4882a593Smuzhiyun #include <scsi/scsi_device.h>
32*4882a593Smuzhiyun #include <scsi/scsi_host.h>
33*4882a593Smuzhiyun #include <scsi/scsi.h>
34*4882a593Smuzhiyun #include <scsi/scsi_transport_iscsi.h>
35*4882a593Smuzhiyun #include <trace/events/iscsi.h>
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #include "iscsi_tcp.h"
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
40*4882a593Smuzhiyun "Dmitry Yusupov <dmitry_yus@yahoo.com>, "
41*4882a593Smuzhiyun "Alex Aizman <itn780@yahoo.com>");
42*4882a593Smuzhiyun MODULE_DESCRIPTION("iSCSI/TCP data-path");
43*4882a593Smuzhiyun MODULE_LICENSE("GPL");
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun static int iscsi_dbg_libtcp;
46*4882a593Smuzhiyun module_param_named(debug_libiscsi_tcp, iscsi_dbg_libtcp, int,
47*4882a593Smuzhiyun S_IRUGO | S_IWUSR);
48*4882a593Smuzhiyun MODULE_PARM_DESC(debug_libiscsi_tcp, "Turn on debugging for libiscsi_tcp "
49*4882a593Smuzhiyun "module. Set to 1 to turn on, and zero to turn off. Default "
50*4882a593Smuzhiyun "is off.");
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #define ISCSI_DBG_TCP(_conn, dbg_fmt, arg...) \
53*4882a593Smuzhiyun do { \
54*4882a593Smuzhiyun if (iscsi_dbg_libtcp) \
55*4882a593Smuzhiyun iscsi_conn_printk(KERN_INFO, _conn, \
56*4882a593Smuzhiyun "%s " dbg_fmt, \
57*4882a593Smuzhiyun __func__, ##arg); \
58*4882a593Smuzhiyun iscsi_dbg_trace(trace_iscsi_dbg_tcp, \
59*4882a593Smuzhiyun &(_conn)->cls_conn->dev, \
60*4882a593Smuzhiyun "%s " dbg_fmt, __func__, ##arg);\
61*4882a593Smuzhiyun } while (0);
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
64*4882a593Smuzhiyun struct iscsi_segment *segment);
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /*
67*4882a593Smuzhiyun * Scatterlist handling: inside the iscsi_segment, we
68*4882a593Smuzhiyun * remember an index into the scatterlist, and set data/size
69*4882a593Smuzhiyun * to the current scatterlist entry. For highmem pages, we
70*4882a593Smuzhiyun * kmap as needed.
71*4882a593Smuzhiyun *
72*4882a593Smuzhiyun * Note that the page is unmapped when we return from
73*4882a593Smuzhiyun * TCP's data_ready handler, so we may end up mapping and
74*4882a593Smuzhiyun * unmapping the same page repeatedly. The whole reason
75*4882a593Smuzhiyun * for this is that we shouldn't keep the page mapped
76*4882a593Smuzhiyun * outside the softirq.
77*4882a593Smuzhiyun */
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /**
80*4882a593Smuzhiyun * iscsi_tcp_segment_init_sg - init indicated scatterlist entry
81*4882a593Smuzhiyun * @segment: the buffer object
82*4882a593Smuzhiyun * @sg: scatterlist
83*4882a593Smuzhiyun * @offset: byte offset into that sg entry
84*4882a593Smuzhiyun *
85*4882a593Smuzhiyun * This function sets up the segment so that subsequent
86*4882a593Smuzhiyun * data is copied to the indicated sg entry, at the given
87*4882a593Smuzhiyun * offset.
88*4882a593Smuzhiyun */
89*4882a593Smuzhiyun static inline void
iscsi_tcp_segment_init_sg(struct iscsi_segment * segment,struct scatterlist * sg,unsigned int offset)90*4882a593Smuzhiyun iscsi_tcp_segment_init_sg(struct iscsi_segment *segment,
91*4882a593Smuzhiyun struct scatterlist *sg, unsigned int offset)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun segment->sg = sg;
94*4882a593Smuzhiyun segment->sg_offset = offset;
95*4882a593Smuzhiyun segment->size = min(sg->length - offset,
96*4882a593Smuzhiyun segment->total_size - segment->total_copied);
97*4882a593Smuzhiyun segment->data = NULL;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /**
101*4882a593Smuzhiyun * iscsi_tcp_segment_map - map the current S/G page
102*4882a593Smuzhiyun * @segment: iscsi_segment
103*4882a593Smuzhiyun * @recv: 1 if called from recv path
104*4882a593Smuzhiyun *
105*4882a593Smuzhiyun * We only need to possibly kmap data if scatter lists are being used,
106*4882a593Smuzhiyun * because the iscsi passthrough and internal IO paths will never use high
107*4882a593Smuzhiyun * mem pages.
108*4882a593Smuzhiyun */
iscsi_tcp_segment_map(struct iscsi_segment * segment,int recv)109*4882a593Smuzhiyun static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun struct scatterlist *sg;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun if (segment->data != NULL || !segment->sg)
114*4882a593Smuzhiyun return;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun sg = segment->sg;
117*4882a593Smuzhiyun BUG_ON(segment->sg_mapped);
118*4882a593Smuzhiyun BUG_ON(sg->length == 0);
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /*
121*4882a593Smuzhiyun * We always map for the recv path.
122*4882a593Smuzhiyun *
123*4882a593Smuzhiyun * If the page count is greater than one it is ok to send
124*4882a593Smuzhiyun * to the network layer's zero copy send path. If not we
125*4882a593Smuzhiyun * have to go the slow sendmsg path.
126*4882a593Smuzhiyun *
127*4882a593Smuzhiyun * Same goes for slab pages: skb_can_coalesce() allows
128*4882a593Smuzhiyun * coalescing neighboring slab objects into a single frag which
129*4882a593Smuzhiyun * triggers one of hardened usercopy checks.
130*4882a593Smuzhiyun */
131*4882a593Smuzhiyun if (!recv && sendpage_ok(sg_page(sg)))
132*4882a593Smuzhiyun return;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun if (recv) {
135*4882a593Smuzhiyun segment->atomic_mapped = true;
136*4882a593Smuzhiyun segment->sg_mapped = kmap_atomic(sg_page(sg));
137*4882a593Smuzhiyun } else {
138*4882a593Smuzhiyun segment->atomic_mapped = false;
139*4882a593Smuzhiyun /* the xmit path can sleep with the page mapped so use kmap */
140*4882a593Smuzhiyun segment->sg_mapped = kmap(sg_page(sg));
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun segment->data = segment->sg_mapped + sg->offset + segment->sg_offset;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
iscsi_tcp_segment_unmap(struct iscsi_segment * segment)146*4882a593Smuzhiyun void iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun if (segment->sg_mapped) {
149*4882a593Smuzhiyun if (segment->atomic_mapped)
150*4882a593Smuzhiyun kunmap_atomic(segment->sg_mapped);
151*4882a593Smuzhiyun else
152*4882a593Smuzhiyun kunmap(sg_page(segment->sg));
153*4882a593Smuzhiyun segment->sg_mapped = NULL;
154*4882a593Smuzhiyun segment->data = NULL;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iscsi_tcp_segment_unmap);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /*
160*4882a593Smuzhiyun * Splice the digest buffer into the buffer
161*4882a593Smuzhiyun */
162*4882a593Smuzhiyun static inline void
iscsi_tcp_segment_splice_digest(struct iscsi_segment * segment,void * digest)163*4882a593Smuzhiyun iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun segment->data = digest;
166*4882a593Smuzhiyun segment->digest_len = ISCSI_DIGEST_SIZE;
167*4882a593Smuzhiyun segment->total_size += ISCSI_DIGEST_SIZE;
168*4882a593Smuzhiyun segment->size = ISCSI_DIGEST_SIZE;
169*4882a593Smuzhiyun segment->copied = 0;
170*4882a593Smuzhiyun segment->sg = NULL;
171*4882a593Smuzhiyun segment->hash = NULL;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /**
175*4882a593Smuzhiyun * iscsi_tcp_segment_done - check whether the segment is complete
176*4882a593Smuzhiyun * @tcp_conn: iscsi tcp connection
177*4882a593Smuzhiyun * @segment: iscsi segment to check
178*4882a593Smuzhiyun * @recv: set to one of this is called from the recv path
179*4882a593Smuzhiyun * @copied: number of bytes copied
180*4882a593Smuzhiyun *
181*4882a593Smuzhiyun * Check if we're done receiving this segment. If the receive
182*4882a593Smuzhiyun * buffer is full but we expect more data, move on to the
183*4882a593Smuzhiyun * next entry in the scatterlist.
184*4882a593Smuzhiyun *
185*4882a593Smuzhiyun * If the amount of data we received isn't a multiple of 4,
186*4882a593Smuzhiyun * we will transparently receive the pad bytes, too.
187*4882a593Smuzhiyun *
188*4882a593Smuzhiyun * This function must be re-entrant.
189*4882a593Smuzhiyun */
iscsi_tcp_segment_done(struct iscsi_tcp_conn * tcp_conn,struct iscsi_segment * segment,int recv,unsigned copied)190*4882a593Smuzhiyun int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
191*4882a593Smuzhiyun struct iscsi_segment *segment, int recv,
192*4882a593Smuzhiyun unsigned copied)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun struct scatterlist sg;
195*4882a593Smuzhiyun unsigned int pad;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "copied %u %u size %u %s\n",
198*4882a593Smuzhiyun segment->copied, copied, segment->size,
199*4882a593Smuzhiyun recv ? "recv" : "xmit");
200*4882a593Smuzhiyun if (segment->hash && copied) {
201*4882a593Smuzhiyun /*
202*4882a593Smuzhiyun * If a segment is kmapd we must unmap it before sending
203*4882a593Smuzhiyun * to the crypto layer since that will try to kmap it again.
204*4882a593Smuzhiyun */
205*4882a593Smuzhiyun iscsi_tcp_segment_unmap(segment);
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun if (!segment->data) {
208*4882a593Smuzhiyun sg_init_table(&sg, 1);
209*4882a593Smuzhiyun sg_set_page(&sg, sg_page(segment->sg), copied,
210*4882a593Smuzhiyun segment->copied + segment->sg_offset +
211*4882a593Smuzhiyun segment->sg->offset);
212*4882a593Smuzhiyun } else
213*4882a593Smuzhiyun sg_init_one(&sg, segment->data + segment->copied,
214*4882a593Smuzhiyun copied);
215*4882a593Smuzhiyun ahash_request_set_crypt(segment->hash, &sg, NULL, copied);
216*4882a593Smuzhiyun crypto_ahash_update(segment->hash);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun segment->copied += copied;
220*4882a593Smuzhiyun if (segment->copied < segment->size) {
221*4882a593Smuzhiyun iscsi_tcp_segment_map(segment, recv);
222*4882a593Smuzhiyun return 0;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun segment->total_copied += segment->copied;
226*4882a593Smuzhiyun segment->copied = 0;
227*4882a593Smuzhiyun segment->size = 0;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /* Unmap the current scatterlist page, if there is one. */
230*4882a593Smuzhiyun iscsi_tcp_segment_unmap(segment);
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /* Do we have more scatterlist entries? */
233*4882a593Smuzhiyun ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "total copied %u total size %u\n",
234*4882a593Smuzhiyun segment->total_copied, segment->total_size);
235*4882a593Smuzhiyun if (segment->total_copied < segment->total_size) {
236*4882a593Smuzhiyun /* Proceed to the next entry in the scatterlist. */
237*4882a593Smuzhiyun iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg),
238*4882a593Smuzhiyun 0);
239*4882a593Smuzhiyun iscsi_tcp_segment_map(segment, recv);
240*4882a593Smuzhiyun BUG_ON(segment->size == 0);
241*4882a593Smuzhiyun return 0;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /* Do we need to handle padding? */
245*4882a593Smuzhiyun if (!(tcp_conn->iscsi_conn->session->tt->caps & CAP_PADDING_OFFLOAD)) {
246*4882a593Smuzhiyun pad = iscsi_padding(segment->total_copied);
247*4882a593Smuzhiyun if (pad != 0) {
248*4882a593Smuzhiyun ISCSI_DBG_TCP(tcp_conn->iscsi_conn,
249*4882a593Smuzhiyun "consume %d pad bytes\n", pad);
250*4882a593Smuzhiyun segment->total_size += pad;
251*4882a593Smuzhiyun segment->size = pad;
252*4882a593Smuzhiyun segment->data = segment->padbuf;
253*4882a593Smuzhiyun return 0;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /*
258*4882a593Smuzhiyun * Set us up for transferring the data digest. hdr digest
259*4882a593Smuzhiyun * is completely handled in hdr done function.
260*4882a593Smuzhiyun */
261*4882a593Smuzhiyun if (segment->hash) {
262*4882a593Smuzhiyun ahash_request_set_crypt(segment->hash, NULL,
263*4882a593Smuzhiyun segment->digest, 0);
264*4882a593Smuzhiyun crypto_ahash_final(segment->hash);
265*4882a593Smuzhiyun iscsi_tcp_segment_splice_digest(segment,
266*4882a593Smuzhiyun recv ? segment->recv_digest : segment->digest);
267*4882a593Smuzhiyun return 0;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun return 1;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iscsi_tcp_segment_done);
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun /**
275*4882a593Smuzhiyun * iscsi_tcp_segment_recv - copy data to segment
276*4882a593Smuzhiyun * @tcp_conn: the iSCSI TCP connection
277*4882a593Smuzhiyun * @segment: the buffer to copy to
278*4882a593Smuzhiyun * @ptr: data pointer
279*4882a593Smuzhiyun * @len: amount of data available
280*4882a593Smuzhiyun *
281*4882a593Smuzhiyun * This function copies up to @len bytes to the
282*4882a593Smuzhiyun * given buffer, and returns the number of bytes
283*4882a593Smuzhiyun * consumed, which can actually be less than @len.
284*4882a593Smuzhiyun *
285*4882a593Smuzhiyun * If hash digest is enabled, the function will update the
286*4882a593Smuzhiyun * hash while copying.
287*4882a593Smuzhiyun * Combining these two operations doesn't buy us a lot (yet),
288*4882a593Smuzhiyun * but in the future we could implement combined copy+crc,
289*4882a593Smuzhiyun * just way we do for network layer checksums.
290*4882a593Smuzhiyun */
291*4882a593Smuzhiyun static int
iscsi_tcp_segment_recv(struct iscsi_tcp_conn * tcp_conn,struct iscsi_segment * segment,const void * ptr,unsigned int len)292*4882a593Smuzhiyun iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
293*4882a593Smuzhiyun struct iscsi_segment *segment, const void *ptr,
294*4882a593Smuzhiyun unsigned int len)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun unsigned int copy = 0, copied = 0;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun while (!iscsi_tcp_segment_done(tcp_conn, segment, 1, copy)) {
299*4882a593Smuzhiyun if (copied == len) {
300*4882a593Smuzhiyun ISCSI_DBG_TCP(tcp_conn->iscsi_conn,
301*4882a593Smuzhiyun "copied %d bytes\n", len);
302*4882a593Smuzhiyun break;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun copy = min(len - copied, segment->size - segment->copied);
306*4882a593Smuzhiyun ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "copying %d\n", copy);
307*4882a593Smuzhiyun memcpy(segment->data + segment->copied, ptr + copied, copy);
308*4882a593Smuzhiyun copied += copy;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun return copied;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun inline void
iscsi_tcp_dgst_header(struct ahash_request * hash,const void * hdr,size_t hdrlen,unsigned char digest[ISCSI_DIGEST_SIZE])314*4882a593Smuzhiyun iscsi_tcp_dgst_header(struct ahash_request *hash, const void *hdr,
315*4882a593Smuzhiyun size_t hdrlen, unsigned char digest[ISCSI_DIGEST_SIZE])
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun struct scatterlist sg;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun sg_init_one(&sg, hdr, hdrlen);
320*4882a593Smuzhiyun ahash_request_set_crypt(hash, &sg, digest, hdrlen);
321*4882a593Smuzhiyun crypto_ahash_digest(hash);
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iscsi_tcp_dgst_header);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun static inline int
iscsi_tcp_dgst_verify(struct iscsi_tcp_conn * tcp_conn,struct iscsi_segment * segment)326*4882a593Smuzhiyun iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
327*4882a593Smuzhiyun struct iscsi_segment *segment)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun if (!segment->digest_len)
330*4882a593Smuzhiyun return 1;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun if (memcmp(segment->recv_digest, segment->digest,
333*4882a593Smuzhiyun segment->digest_len)) {
334*4882a593Smuzhiyun ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "digest mismatch\n");
335*4882a593Smuzhiyun return 0;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun return 1;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /*
342*4882a593Smuzhiyun * Helper function to set up segment buffer
343*4882a593Smuzhiyun */
344*4882a593Smuzhiyun static inline void
__iscsi_segment_init(struct iscsi_segment * segment,size_t size,iscsi_segment_done_fn_t * done,struct ahash_request * hash)345*4882a593Smuzhiyun __iscsi_segment_init(struct iscsi_segment *segment, size_t size,
346*4882a593Smuzhiyun iscsi_segment_done_fn_t *done, struct ahash_request *hash)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun memset(segment, 0, sizeof(*segment));
349*4882a593Smuzhiyun segment->total_size = size;
350*4882a593Smuzhiyun segment->done = done;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun if (hash) {
353*4882a593Smuzhiyun segment->hash = hash;
354*4882a593Smuzhiyun crypto_ahash_init(hash);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun inline void
iscsi_segment_init_linear(struct iscsi_segment * segment,void * data,size_t size,iscsi_segment_done_fn_t * done,struct ahash_request * hash)359*4882a593Smuzhiyun iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
360*4882a593Smuzhiyun size_t size, iscsi_segment_done_fn_t *done,
361*4882a593Smuzhiyun struct ahash_request *hash)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun __iscsi_segment_init(segment, size, done, hash);
364*4882a593Smuzhiyun segment->data = data;
365*4882a593Smuzhiyun segment->size = size;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iscsi_segment_init_linear);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun inline int
iscsi_segment_seek_sg(struct iscsi_segment * segment,struct scatterlist * sg_list,unsigned int sg_count,unsigned int offset,size_t size,iscsi_segment_done_fn_t * done,struct ahash_request * hash)370*4882a593Smuzhiyun iscsi_segment_seek_sg(struct iscsi_segment *segment,
371*4882a593Smuzhiyun struct scatterlist *sg_list, unsigned int sg_count,
372*4882a593Smuzhiyun unsigned int offset, size_t size,
373*4882a593Smuzhiyun iscsi_segment_done_fn_t *done,
374*4882a593Smuzhiyun struct ahash_request *hash)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun struct scatterlist *sg;
377*4882a593Smuzhiyun unsigned int i;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun __iscsi_segment_init(segment, size, done, hash);
380*4882a593Smuzhiyun for_each_sg(sg_list, sg, sg_count, i) {
381*4882a593Smuzhiyun if (offset < sg->length) {
382*4882a593Smuzhiyun iscsi_tcp_segment_init_sg(segment, sg, offset);
383*4882a593Smuzhiyun return 0;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun offset -= sg->length;
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun return ISCSI_ERR_DATA_OFFSET;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iscsi_segment_seek_sg);
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /**
393*4882a593Smuzhiyun * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception
394*4882a593Smuzhiyun * @tcp_conn: iscsi connection to prep for
395*4882a593Smuzhiyun *
396*4882a593Smuzhiyun * This function always passes NULL for the hash argument, because when this
397*4882a593Smuzhiyun * function is called we do not yet know the final size of the header and want
398*4882a593Smuzhiyun * to delay the digest processing until we know that.
399*4882a593Smuzhiyun */
iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn * tcp_conn)400*4882a593Smuzhiyun void iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun ISCSI_DBG_TCP(tcp_conn->iscsi_conn,
403*4882a593Smuzhiyun "(%s)\n", tcp_conn->iscsi_conn->hdrdgst_en ?
404*4882a593Smuzhiyun "digest enabled" : "digest disabled");
405*4882a593Smuzhiyun iscsi_segment_init_linear(&tcp_conn->in.segment,
406*4882a593Smuzhiyun tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr),
407*4882a593Smuzhiyun iscsi_tcp_hdr_recv_done, NULL);
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iscsi_tcp_hdr_recv_prep);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun /*
412*4882a593Smuzhiyun * Handle incoming reply to any other type of command
413*4882a593Smuzhiyun */
414*4882a593Smuzhiyun static int
iscsi_tcp_data_recv_done(struct iscsi_tcp_conn * tcp_conn,struct iscsi_segment * segment)415*4882a593Smuzhiyun iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn,
416*4882a593Smuzhiyun struct iscsi_segment *segment)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun struct iscsi_conn *conn = tcp_conn->iscsi_conn;
419*4882a593Smuzhiyun int rc = 0;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
422*4882a593Smuzhiyun return ISCSI_ERR_DATA_DGST;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr,
425*4882a593Smuzhiyun conn->data, tcp_conn->in.datalen);
426*4882a593Smuzhiyun if (rc)
427*4882a593Smuzhiyun return rc;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun iscsi_tcp_hdr_recv_prep(tcp_conn);
430*4882a593Smuzhiyun return 0;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun static void
iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn * tcp_conn)434*4882a593Smuzhiyun iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun struct iscsi_conn *conn = tcp_conn->iscsi_conn;
437*4882a593Smuzhiyun struct ahash_request *rx_hash = NULL;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun if (conn->datadgst_en &&
440*4882a593Smuzhiyun !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
441*4882a593Smuzhiyun rx_hash = tcp_conn->rx_hash;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun iscsi_segment_init_linear(&tcp_conn->in.segment,
444*4882a593Smuzhiyun conn->data, tcp_conn->in.datalen,
445*4882a593Smuzhiyun iscsi_tcp_data_recv_done, rx_hash);
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun /**
449*4882a593Smuzhiyun * iscsi_tcp_cleanup_task - free tcp_task resources
450*4882a593Smuzhiyun * @task: iscsi task
451*4882a593Smuzhiyun *
452*4882a593Smuzhiyun * must be called with session back_lock
453*4882a593Smuzhiyun */
iscsi_tcp_cleanup_task(struct iscsi_task * task)454*4882a593Smuzhiyun void iscsi_tcp_cleanup_task(struct iscsi_task *task)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun struct iscsi_tcp_task *tcp_task = task->dd_data;
457*4882a593Smuzhiyun struct iscsi_r2t_info *r2t;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun /* nothing to do for mgmt */
460*4882a593Smuzhiyun if (!task->sc)
461*4882a593Smuzhiyun return;
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun spin_lock_bh(&tcp_task->queue2pool);
464*4882a593Smuzhiyun /* flush task's r2t queues */
465*4882a593Smuzhiyun while (kfifo_out(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
466*4882a593Smuzhiyun kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
467*4882a593Smuzhiyun sizeof(void*));
468*4882a593Smuzhiyun ISCSI_DBG_TCP(task->conn, "pending r2t dropped\n");
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun r2t = tcp_task->r2t;
472*4882a593Smuzhiyun if (r2t != NULL) {
473*4882a593Smuzhiyun kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
474*4882a593Smuzhiyun sizeof(void*));
475*4882a593Smuzhiyun tcp_task->r2t = NULL;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun spin_unlock_bh(&tcp_task->queue2pool);
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iscsi_tcp_cleanup_task);
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun /**
482*4882a593Smuzhiyun * iscsi_tcp_data_in - SCSI Data-In Response processing
483*4882a593Smuzhiyun * @conn: iscsi connection
484*4882a593Smuzhiyun * @task: scsi command task
485*4882a593Smuzhiyun */
iscsi_tcp_data_in(struct iscsi_conn * conn,struct iscsi_task * task)486*4882a593Smuzhiyun static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
489*4882a593Smuzhiyun struct iscsi_tcp_task *tcp_task = task->dd_data;
490*4882a593Smuzhiyun struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
491*4882a593Smuzhiyun int datasn = be32_to_cpu(rhdr->datasn);
492*4882a593Smuzhiyun unsigned total_in_length = task->sc->sdb.length;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun /*
495*4882a593Smuzhiyun * lib iscsi will update this in the completion handling if there
496*4882a593Smuzhiyun * is status.
497*4882a593Smuzhiyun */
498*4882a593Smuzhiyun if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
499*4882a593Smuzhiyun iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun if (tcp_conn->in.datalen == 0)
502*4882a593Smuzhiyun return 0;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun if (tcp_task->exp_datasn != datasn) {
505*4882a593Smuzhiyun ISCSI_DBG_TCP(conn, "task->exp_datasn(%d) != rhdr->datasn(%d)"
506*4882a593Smuzhiyun "\n", tcp_task->exp_datasn, datasn);
507*4882a593Smuzhiyun return ISCSI_ERR_DATASN;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun tcp_task->exp_datasn++;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun tcp_task->data_offset = be32_to_cpu(rhdr->offset);
513*4882a593Smuzhiyun if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
514*4882a593Smuzhiyun ISCSI_DBG_TCP(conn, "data_offset(%d) + data_len(%d) > "
515*4882a593Smuzhiyun "total_length_in(%d)\n", tcp_task->data_offset,
516*4882a593Smuzhiyun tcp_conn->in.datalen, total_in_length);
517*4882a593Smuzhiyun return ISCSI_ERR_DATA_OFFSET;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun conn->datain_pdus_cnt++;
521*4882a593Smuzhiyun return 0;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun /**
525*4882a593Smuzhiyun * iscsi_tcp_r2t_rsp - iSCSI R2T Response processing
526*4882a593Smuzhiyun * @conn: iscsi connection
527*4882a593Smuzhiyun * @task: scsi command task
528*4882a593Smuzhiyun */
iscsi_tcp_r2t_rsp(struct iscsi_conn * conn,struct iscsi_task * task)529*4882a593Smuzhiyun static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun struct iscsi_session *session = conn->session;
532*4882a593Smuzhiyun struct iscsi_tcp_task *tcp_task = task->dd_data;
533*4882a593Smuzhiyun struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
534*4882a593Smuzhiyun struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
535*4882a593Smuzhiyun struct iscsi_r2t_info *r2t;
536*4882a593Smuzhiyun int r2tsn = be32_to_cpu(rhdr->r2tsn);
537*4882a593Smuzhiyun u32 data_length;
538*4882a593Smuzhiyun u32 data_offset;
539*4882a593Smuzhiyun int rc;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun if (tcp_conn->in.datalen) {
542*4882a593Smuzhiyun iscsi_conn_printk(KERN_ERR, conn,
543*4882a593Smuzhiyun "invalid R2t with datalen %d\n",
544*4882a593Smuzhiyun tcp_conn->in.datalen);
545*4882a593Smuzhiyun return ISCSI_ERR_DATALEN;
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun if (tcp_task->exp_datasn != r2tsn){
549*4882a593Smuzhiyun ISCSI_DBG_TCP(conn, "task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
550*4882a593Smuzhiyun tcp_task->exp_datasn, r2tsn);
551*4882a593Smuzhiyun return ISCSI_ERR_R2TSN;
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun /* fill-in new R2T associated with the task */
555*4882a593Smuzhiyun iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
558*4882a593Smuzhiyun iscsi_conn_printk(KERN_INFO, conn,
559*4882a593Smuzhiyun "dropping R2T itt %d in recovery.\n",
560*4882a593Smuzhiyun task->itt);
561*4882a593Smuzhiyun return 0;
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun data_length = be32_to_cpu(rhdr->data_length);
565*4882a593Smuzhiyun if (data_length == 0) {
566*4882a593Smuzhiyun iscsi_conn_printk(KERN_ERR, conn,
567*4882a593Smuzhiyun "invalid R2T with zero data len\n");
568*4882a593Smuzhiyun return ISCSI_ERR_DATALEN;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun if (data_length > session->max_burst)
572*4882a593Smuzhiyun ISCSI_DBG_TCP(conn, "invalid R2T with data len %u and max "
573*4882a593Smuzhiyun "burst %u. Attempting to execute request.\n",
574*4882a593Smuzhiyun data_length, session->max_burst);
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun data_offset = be32_to_cpu(rhdr->data_offset);
577*4882a593Smuzhiyun if (data_offset + data_length > task->sc->sdb.length) {
578*4882a593Smuzhiyun iscsi_conn_printk(KERN_ERR, conn,
579*4882a593Smuzhiyun "invalid R2T with data len %u at offset %u "
580*4882a593Smuzhiyun "and total length %d\n", data_length,
581*4882a593Smuzhiyun data_offset, task->sc->sdb.length);
582*4882a593Smuzhiyun return ISCSI_ERR_DATALEN;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun spin_lock(&tcp_task->pool2queue);
586*4882a593Smuzhiyun rc = kfifo_out(&tcp_task->r2tpool.queue, (void *)&r2t, sizeof(void *));
587*4882a593Smuzhiyun if (!rc) {
588*4882a593Smuzhiyun iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. "
589*4882a593Smuzhiyun "Target has sent more R2Ts than it "
590*4882a593Smuzhiyun "negotiated for or driver has leaked.\n");
591*4882a593Smuzhiyun spin_unlock(&tcp_task->pool2queue);
592*4882a593Smuzhiyun return ISCSI_ERR_PROTO;
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun r2t->exp_statsn = rhdr->statsn;
596*4882a593Smuzhiyun r2t->data_length = data_length;
597*4882a593Smuzhiyun r2t->data_offset = data_offset;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun r2t->ttt = rhdr->ttt; /* no flip */
600*4882a593Smuzhiyun r2t->datasn = 0;
601*4882a593Smuzhiyun r2t->sent = 0;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun tcp_task->exp_datasn = r2tsn + 1;
604*4882a593Smuzhiyun kfifo_in(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
605*4882a593Smuzhiyun conn->r2t_pdus_cnt++;
606*4882a593Smuzhiyun spin_unlock(&tcp_task->pool2queue);
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun iscsi_requeue_task(task);
609*4882a593Smuzhiyun return 0;
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun /*
613*4882a593Smuzhiyun * Handle incoming reply to DataIn command
614*4882a593Smuzhiyun */
615*4882a593Smuzhiyun static int
iscsi_tcp_process_data_in(struct iscsi_tcp_conn * tcp_conn,struct iscsi_segment * segment)616*4882a593Smuzhiyun iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn,
617*4882a593Smuzhiyun struct iscsi_segment *segment)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun struct iscsi_conn *conn = tcp_conn->iscsi_conn;
620*4882a593Smuzhiyun struct iscsi_hdr *hdr = tcp_conn->in.hdr;
621*4882a593Smuzhiyun int rc;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
624*4882a593Smuzhiyun return ISCSI_ERR_DATA_DGST;
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun /* check for non-exceptional status */
627*4882a593Smuzhiyun if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
628*4882a593Smuzhiyun rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
629*4882a593Smuzhiyun if (rc)
630*4882a593Smuzhiyun return rc;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun iscsi_tcp_hdr_recv_prep(tcp_conn);
634*4882a593Smuzhiyun return 0;
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun /**
638*4882a593Smuzhiyun * iscsi_tcp_hdr_dissect - process PDU header
639*4882a593Smuzhiyun * @conn: iSCSI connection
640*4882a593Smuzhiyun * @hdr: PDU header
641*4882a593Smuzhiyun *
642*4882a593Smuzhiyun * This function analyzes the header of the PDU received,
643*4882a593Smuzhiyun * and performs several sanity checks. If the PDU is accompanied
644*4882a593Smuzhiyun * by data, the receive buffer is set up to copy the incoming data
645*4882a593Smuzhiyun * to the correct location.
646*4882a593Smuzhiyun */
647*4882a593Smuzhiyun static int
iscsi_tcp_hdr_dissect(struct iscsi_conn * conn,struct iscsi_hdr * hdr)648*4882a593Smuzhiyun iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
649*4882a593Smuzhiyun {
650*4882a593Smuzhiyun int rc = 0, opcode, ahslen;
651*4882a593Smuzhiyun struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
652*4882a593Smuzhiyun struct iscsi_task *task;
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun /* verify PDU length */
655*4882a593Smuzhiyun tcp_conn->in.datalen = ntoh24(hdr->dlength);
656*4882a593Smuzhiyun if (tcp_conn->in.datalen > conn->max_recv_dlength) {
657*4882a593Smuzhiyun iscsi_conn_printk(KERN_ERR, conn,
658*4882a593Smuzhiyun "iscsi_tcp: datalen %d > %d\n",
659*4882a593Smuzhiyun tcp_conn->in.datalen, conn->max_recv_dlength);
660*4882a593Smuzhiyun return ISCSI_ERR_DATALEN;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun /* Additional header segments. So far, we don't
664*4882a593Smuzhiyun * process additional headers.
665*4882a593Smuzhiyun */
666*4882a593Smuzhiyun ahslen = hdr->hlength << 2;
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun opcode = hdr->opcode & ISCSI_OPCODE_MASK;
669*4882a593Smuzhiyun /* verify itt (itt encoding: age+cid+itt) */
670*4882a593Smuzhiyun rc = iscsi_verify_itt(conn, hdr->itt);
671*4882a593Smuzhiyun if (rc)
672*4882a593Smuzhiyun return rc;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun ISCSI_DBG_TCP(conn, "opcode 0x%x ahslen %d datalen %d\n",
675*4882a593Smuzhiyun opcode, ahslen, tcp_conn->in.datalen);
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun switch(opcode) {
678*4882a593Smuzhiyun case ISCSI_OP_SCSI_DATA_IN:
679*4882a593Smuzhiyun spin_lock(&conn->session->back_lock);
680*4882a593Smuzhiyun task = iscsi_itt_to_ctask(conn, hdr->itt);
681*4882a593Smuzhiyun if (!task)
682*4882a593Smuzhiyun rc = ISCSI_ERR_BAD_ITT;
683*4882a593Smuzhiyun else
684*4882a593Smuzhiyun rc = iscsi_tcp_data_in(conn, task);
685*4882a593Smuzhiyun if (rc) {
686*4882a593Smuzhiyun spin_unlock(&conn->session->back_lock);
687*4882a593Smuzhiyun break;
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun if (tcp_conn->in.datalen) {
691*4882a593Smuzhiyun struct iscsi_tcp_task *tcp_task = task->dd_data;
692*4882a593Smuzhiyun struct ahash_request *rx_hash = NULL;
693*4882a593Smuzhiyun struct scsi_data_buffer *sdb = &task->sc->sdb;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun /*
696*4882a593Smuzhiyun * Setup copy of Data-In into the struct scsi_cmnd
697*4882a593Smuzhiyun * Scatterlist case:
698*4882a593Smuzhiyun * We set up the iscsi_segment to point to the next
699*4882a593Smuzhiyun * scatterlist entry to copy to. As we go along,
700*4882a593Smuzhiyun * we move on to the next scatterlist entry and
701*4882a593Smuzhiyun * update the digest per-entry.
702*4882a593Smuzhiyun */
703*4882a593Smuzhiyun if (conn->datadgst_en &&
704*4882a593Smuzhiyun !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
705*4882a593Smuzhiyun rx_hash = tcp_conn->rx_hash;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun ISCSI_DBG_TCP(conn, "iscsi_tcp_begin_data_in( "
708*4882a593Smuzhiyun "offset=%d, datalen=%d)\n",
709*4882a593Smuzhiyun tcp_task->data_offset,
710*4882a593Smuzhiyun tcp_conn->in.datalen);
711*4882a593Smuzhiyun task->last_xfer = jiffies;
712*4882a593Smuzhiyun rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
713*4882a593Smuzhiyun sdb->table.sgl,
714*4882a593Smuzhiyun sdb->table.nents,
715*4882a593Smuzhiyun tcp_task->data_offset,
716*4882a593Smuzhiyun tcp_conn->in.datalen,
717*4882a593Smuzhiyun iscsi_tcp_process_data_in,
718*4882a593Smuzhiyun rx_hash);
719*4882a593Smuzhiyun spin_unlock(&conn->session->back_lock);
720*4882a593Smuzhiyun return rc;
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
723*4882a593Smuzhiyun spin_unlock(&conn->session->back_lock);
724*4882a593Smuzhiyun break;
725*4882a593Smuzhiyun case ISCSI_OP_SCSI_CMD_RSP:
726*4882a593Smuzhiyun if (tcp_conn->in.datalen) {
727*4882a593Smuzhiyun iscsi_tcp_data_recv_prep(tcp_conn);
728*4882a593Smuzhiyun return 0;
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
731*4882a593Smuzhiyun break;
732*4882a593Smuzhiyun case ISCSI_OP_R2T:
733*4882a593Smuzhiyun spin_lock(&conn->session->back_lock);
734*4882a593Smuzhiyun task = iscsi_itt_to_ctask(conn, hdr->itt);
735*4882a593Smuzhiyun spin_unlock(&conn->session->back_lock);
736*4882a593Smuzhiyun if (!task)
737*4882a593Smuzhiyun rc = ISCSI_ERR_BAD_ITT;
738*4882a593Smuzhiyun else if (ahslen)
739*4882a593Smuzhiyun rc = ISCSI_ERR_AHSLEN;
740*4882a593Smuzhiyun else if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
741*4882a593Smuzhiyun task->last_xfer = jiffies;
742*4882a593Smuzhiyun spin_lock(&conn->session->frwd_lock);
743*4882a593Smuzhiyun rc = iscsi_tcp_r2t_rsp(conn, task);
744*4882a593Smuzhiyun spin_unlock(&conn->session->frwd_lock);
745*4882a593Smuzhiyun } else
746*4882a593Smuzhiyun rc = ISCSI_ERR_PROTO;
747*4882a593Smuzhiyun break;
748*4882a593Smuzhiyun case ISCSI_OP_LOGIN_RSP:
749*4882a593Smuzhiyun case ISCSI_OP_TEXT_RSP:
750*4882a593Smuzhiyun case ISCSI_OP_REJECT:
751*4882a593Smuzhiyun case ISCSI_OP_ASYNC_EVENT:
752*4882a593Smuzhiyun /*
753*4882a593Smuzhiyun * It is possible that we could get a PDU with a buffer larger
754*4882a593Smuzhiyun * than 8K, but there are no targets that currently do this.
755*4882a593Smuzhiyun * For now we fail until we find a vendor that needs it
756*4882a593Smuzhiyun */
757*4882a593Smuzhiyun if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
758*4882a593Smuzhiyun iscsi_conn_printk(KERN_ERR, conn,
759*4882a593Smuzhiyun "iscsi_tcp: received buffer of "
760*4882a593Smuzhiyun "len %u but conn buffer is only %u "
761*4882a593Smuzhiyun "(opcode %0x)\n",
762*4882a593Smuzhiyun tcp_conn->in.datalen,
763*4882a593Smuzhiyun ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
764*4882a593Smuzhiyun rc = ISCSI_ERR_PROTO;
765*4882a593Smuzhiyun break;
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun /* If there's data coming in with the response,
769*4882a593Smuzhiyun * receive it to the connection's buffer.
770*4882a593Smuzhiyun */
771*4882a593Smuzhiyun if (tcp_conn->in.datalen) {
772*4882a593Smuzhiyun iscsi_tcp_data_recv_prep(tcp_conn);
773*4882a593Smuzhiyun return 0;
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun fallthrough;
776*4882a593Smuzhiyun case ISCSI_OP_LOGOUT_RSP:
777*4882a593Smuzhiyun case ISCSI_OP_NOOP_IN:
778*4882a593Smuzhiyun case ISCSI_OP_SCSI_TMFUNC_RSP:
779*4882a593Smuzhiyun rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
780*4882a593Smuzhiyun break;
781*4882a593Smuzhiyun default:
782*4882a593Smuzhiyun rc = ISCSI_ERR_BAD_OPCODE;
783*4882a593Smuzhiyun break;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun if (rc == 0) {
787*4882a593Smuzhiyun /* Anything that comes with data should have
788*4882a593Smuzhiyun * been handled above. */
789*4882a593Smuzhiyun if (tcp_conn->in.datalen)
790*4882a593Smuzhiyun return ISCSI_ERR_PROTO;
791*4882a593Smuzhiyun iscsi_tcp_hdr_recv_prep(tcp_conn);
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun return rc;
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun /**
798*4882a593Smuzhiyun * iscsi_tcp_hdr_recv_done - process PDU header
799*4882a593Smuzhiyun * @tcp_conn: iSCSI TCP connection
800*4882a593Smuzhiyun * @segment: the buffer segment being processed
801*4882a593Smuzhiyun *
802*4882a593Smuzhiyun * This is the callback invoked when the PDU header has
803*4882a593Smuzhiyun * been received. If the header is followed by additional
804*4882a593Smuzhiyun * header segments, we go back for more data.
805*4882a593Smuzhiyun */
806*4882a593Smuzhiyun static int
iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn * tcp_conn,struct iscsi_segment * segment)807*4882a593Smuzhiyun iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
808*4882a593Smuzhiyun struct iscsi_segment *segment)
809*4882a593Smuzhiyun {
810*4882a593Smuzhiyun struct iscsi_conn *conn = tcp_conn->iscsi_conn;
811*4882a593Smuzhiyun struct iscsi_hdr *hdr;
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun /* Check if there are additional header segments
814*4882a593Smuzhiyun * *prior* to computing the digest, because we
815*4882a593Smuzhiyun * may need to go back to the caller for more.
816*4882a593Smuzhiyun */
817*4882a593Smuzhiyun hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf;
818*4882a593Smuzhiyun if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) {
819*4882a593Smuzhiyun /* Bump the header length - the caller will
820*4882a593Smuzhiyun * just loop around and get the AHS for us, and
821*4882a593Smuzhiyun * call again. */
822*4882a593Smuzhiyun unsigned int ahslen = hdr->hlength << 2;
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun /* Make sure we don't overflow */
825*4882a593Smuzhiyun if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf))
826*4882a593Smuzhiyun return ISCSI_ERR_AHSLEN;
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun segment->total_size += ahslen;
829*4882a593Smuzhiyun segment->size += ahslen;
830*4882a593Smuzhiyun return 0;
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun /* We're done processing the header. See if we're doing
834*4882a593Smuzhiyun * header digests; if so, set up the recv_digest buffer
835*4882a593Smuzhiyun * and go back for more. */
836*4882a593Smuzhiyun if (conn->hdrdgst_en &&
837*4882a593Smuzhiyun !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD)) {
838*4882a593Smuzhiyun if (segment->digest_len == 0) {
839*4882a593Smuzhiyun /*
840*4882a593Smuzhiyun * Even if we offload the digest processing we
841*4882a593Smuzhiyun * splice it in so we can increment the skb/segment
842*4882a593Smuzhiyun * counters in preparation for the data segment.
843*4882a593Smuzhiyun */
844*4882a593Smuzhiyun iscsi_tcp_segment_splice_digest(segment,
845*4882a593Smuzhiyun segment->recv_digest);
846*4882a593Smuzhiyun return 0;
847*4882a593Smuzhiyun }
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun iscsi_tcp_dgst_header(tcp_conn->rx_hash, hdr,
850*4882a593Smuzhiyun segment->total_copied - ISCSI_DIGEST_SIZE,
851*4882a593Smuzhiyun segment->digest);
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
854*4882a593Smuzhiyun return ISCSI_ERR_HDR_DGST;
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun tcp_conn->in.hdr = hdr;
858*4882a593Smuzhiyun return iscsi_tcp_hdr_dissect(conn, hdr);
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun /**
862*4882a593Smuzhiyun * iscsi_tcp_recv_segment_is_hdr - tests if we are reading in a header
863*4882a593Smuzhiyun * @tcp_conn: iscsi tcp conn
864*4882a593Smuzhiyun *
865*4882a593Smuzhiyun * returns non zero if we are currently processing or setup to process
866*4882a593Smuzhiyun * a header.
867*4882a593Smuzhiyun */
iscsi_tcp_recv_segment_is_hdr(struct iscsi_tcp_conn * tcp_conn)868*4882a593Smuzhiyun inline int iscsi_tcp_recv_segment_is_hdr(struct iscsi_tcp_conn *tcp_conn)
869*4882a593Smuzhiyun {
870*4882a593Smuzhiyun return tcp_conn->in.segment.done == iscsi_tcp_hdr_recv_done;
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iscsi_tcp_recv_segment_is_hdr);
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun /**
875*4882a593Smuzhiyun * iscsi_tcp_recv_skb - Process skb
876*4882a593Smuzhiyun * @conn: iscsi connection
877*4882a593Smuzhiyun * @skb: network buffer with header and/or data segment
878*4882a593Smuzhiyun * @offset: offset in skb
879*4882a593Smuzhiyun * @offloaded: bool indicating if transfer was offloaded
880*4882a593Smuzhiyun * @status: iscsi TCP status result
881*4882a593Smuzhiyun *
882*4882a593Smuzhiyun * Will return status of transfer in @status. And will return
883*4882a593Smuzhiyun * number of bytes copied.
884*4882a593Smuzhiyun */
iscsi_tcp_recv_skb(struct iscsi_conn * conn,struct sk_buff * skb,unsigned int offset,bool offloaded,int * status)885*4882a593Smuzhiyun int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
886*4882a593Smuzhiyun unsigned int offset, bool offloaded, int *status)
887*4882a593Smuzhiyun {
888*4882a593Smuzhiyun struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
889*4882a593Smuzhiyun struct iscsi_segment *segment = &tcp_conn->in.segment;
890*4882a593Smuzhiyun struct skb_seq_state seq;
891*4882a593Smuzhiyun unsigned int consumed = 0;
892*4882a593Smuzhiyun int rc = 0;
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun ISCSI_DBG_TCP(conn, "in %d bytes\n", skb->len - offset);
895*4882a593Smuzhiyun /*
896*4882a593Smuzhiyun * Update for each skb instead of pdu, because over slow networks a
897*4882a593Smuzhiyun * data_in's data could take a while to read in. We also want to
898*4882a593Smuzhiyun * account for r2ts.
899*4882a593Smuzhiyun */
900*4882a593Smuzhiyun conn->last_recv = jiffies;
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun if (unlikely(conn->suspend_rx)) {
903*4882a593Smuzhiyun ISCSI_DBG_TCP(conn, "Rx suspended!\n");
904*4882a593Smuzhiyun *status = ISCSI_TCP_SUSPENDED;
905*4882a593Smuzhiyun return 0;
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun if (offloaded) {
909*4882a593Smuzhiyun segment->total_copied = segment->total_size;
910*4882a593Smuzhiyun goto segment_done;
911*4882a593Smuzhiyun }
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun skb_prepare_seq_read(skb, offset, skb->len, &seq);
914*4882a593Smuzhiyun while (1) {
915*4882a593Smuzhiyun unsigned int avail;
916*4882a593Smuzhiyun const u8 *ptr;
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun avail = skb_seq_read(consumed, &ptr, &seq);
919*4882a593Smuzhiyun if (avail == 0) {
920*4882a593Smuzhiyun ISCSI_DBG_TCP(conn, "no more data avail. Consumed %d\n",
921*4882a593Smuzhiyun consumed);
922*4882a593Smuzhiyun *status = ISCSI_TCP_SKB_DONE;
923*4882a593Smuzhiyun goto skb_done;
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun BUG_ON(segment->copied >= segment->size);
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun ISCSI_DBG_TCP(conn, "skb %p ptr=%p avail=%u\n", skb, ptr,
928*4882a593Smuzhiyun avail);
929*4882a593Smuzhiyun rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail);
930*4882a593Smuzhiyun BUG_ON(rc == 0);
931*4882a593Smuzhiyun consumed += rc;
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun if (segment->total_copied >= segment->total_size) {
934*4882a593Smuzhiyun skb_abort_seq_read(&seq);
935*4882a593Smuzhiyun goto segment_done;
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun segment_done:
940*4882a593Smuzhiyun *status = ISCSI_TCP_SEGMENT_DONE;
941*4882a593Smuzhiyun ISCSI_DBG_TCP(conn, "segment done\n");
942*4882a593Smuzhiyun rc = segment->done(tcp_conn, segment);
943*4882a593Smuzhiyun if (rc != 0) {
944*4882a593Smuzhiyun *status = ISCSI_TCP_CONN_ERR;
945*4882a593Smuzhiyun ISCSI_DBG_TCP(conn, "Error receiving PDU, errno=%d\n", rc);
946*4882a593Smuzhiyun iscsi_conn_failure(conn, rc);
947*4882a593Smuzhiyun return 0;
948*4882a593Smuzhiyun }
949*4882a593Smuzhiyun /* The done() functions sets up the next segment. */
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun skb_done:
952*4882a593Smuzhiyun conn->rxdata_octets += consumed;
953*4882a593Smuzhiyun return consumed;
954*4882a593Smuzhiyun }
955*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iscsi_tcp_recv_skb);
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun /**
958*4882a593Smuzhiyun * iscsi_tcp_task_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
959*4882a593Smuzhiyun * @task: scsi command task
960*4882a593Smuzhiyun */
iscsi_tcp_task_init(struct iscsi_task * task)961*4882a593Smuzhiyun int iscsi_tcp_task_init(struct iscsi_task *task)
962*4882a593Smuzhiyun {
963*4882a593Smuzhiyun struct iscsi_tcp_task *tcp_task = task->dd_data;
964*4882a593Smuzhiyun struct iscsi_conn *conn = task->conn;
965*4882a593Smuzhiyun struct scsi_cmnd *sc = task->sc;
966*4882a593Smuzhiyun int err;
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun if (!sc) {
969*4882a593Smuzhiyun /*
970*4882a593Smuzhiyun * mgmt tasks do not have a scatterlist since they come
971*4882a593Smuzhiyun * in from the iscsi interface.
972*4882a593Smuzhiyun */
973*4882a593Smuzhiyun ISCSI_DBG_TCP(conn, "mtask deq [itt 0x%x]\n", task->itt);
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun return conn->session->tt->init_pdu(task, 0, task->data_count);
976*4882a593Smuzhiyun }
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun BUG_ON(kfifo_len(&tcp_task->r2tqueue));
979*4882a593Smuzhiyun tcp_task->exp_datasn = 0;
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun /* Prepare PDU, optionally w/ immediate data */
982*4882a593Smuzhiyun ISCSI_DBG_TCP(conn, "task deq [itt 0x%x imm %d unsol %d]\n",
983*4882a593Smuzhiyun task->itt, task->imm_count, task->unsol_r2t.data_length);
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun err = conn->session->tt->init_pdu(task, 0, task->imm_count);
986*4882a593Smuzhiyun if (err)
987*4882a593Smuzhiyun return err;
988*4882a593Smuzhiyun task->imm_count = 0;
989*4882a593Smuzhiyun return 0;
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iscsi_tcp_task_init);
992*4882a593Smuzhiyun
iscsi_tcp_get_curr_r2t(struct iscsi_task * task)993*4882a593Smuzhiyun static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
994*4882a593Smuzhiyun {
995*4882a593Smuzhiyun struct iscsi_tcp_task *tcp_task = task->dd_data;
996*4882a593Smuzhiyun struct iscsi_r2t_info *r2t = NULL;
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun if (iscsi_task_has_unsol_data(task))
999*4882a593Smuzhiyun r2t = &task->unsol_r2t;
1000*4882a593Smuzhiyun else {
1001*4882a593Smuzhiyun spin_lock_bh(&tcp_task->queue2pool);
1002*4882a593Smuzhiyun if (tcp_task->r2t) {
1003*4882a593Smuzhiyun r2t = tcp_task->r2t;
1004*4882a593Smuzhiyun /* Continue with this R2T? */
1005*4882a593Smuzhiyun if (r2t->data_length <= r2t->sent) {
1006*4882a593Smuzhiyun ISCSI_DBG_TCP(task->conn,
1007*4882a593Smuzhiyun " done with r2t %p\n", r2t);
1008*4882a593Smuzhiyun kfifo_in(&tcp_task->r2tpool.queue,
1009*4882a593Smuzhiyun (void *)&tcp_task->r2t,
1010*4882a593Smuzhiyun sizeof(void *));
1011*4882a593Smuzhiyun tcp_task->r2t = r2t = NULL;
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun if (r2t == NULL) {
1016*4882a593Smuzhiyun if (kfifo_out(&tcp_task->r2tqueue,
1017*4882a593Smuzhiyun (void *)&tcp_task->r2t, sizeof(void *)) !=
1018*4882a593Smuzhiyun sizeof(void *))
1019*4882a593Smuzhiyun r2t = NULL;
1020*4882a593Smuzhiyun else
1021*4882a593Smuzhiyun r2t = tcp_task->r2t;
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun spin_unlock_bh(&tcp_task->queue2pool);
1024*4882a593Smuzhiyun }
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun return r2t;
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun /**
1030*4882a593Smuzhiyun * iscsi_tcp_task_xmit - xmit normal PDU task
1031*4882a593Smuzhiyun * @task: iscsi command task
1032*4882a593Smuzhiyun *
1033*4882a593Smuzhiyun * We're expected to return 0 when everything was transmitted successfully,
1034*4882a593Smuzhiyun * -EAGAIN if there's still data in the queue, or != 0 for any other kind
1035*4882a593Smuzhiyun * of error.
1036*4882a593Smuzhiyun */
iscsi_tcp_task_xmit(struct iscsi_task * task)1037*4882a593Smuzhiyun int iscsi_tcp_task_xmit(struct iscsi_task *task)
1038*4882a593Smuzhiyun {
1039*4882a593Smuzhiyun struct iscsi_conn *conn = task->conn;
1040*4882a593Smuzhiyun struct iscsi_session *session = conn->session;
1041*4882a593Smuzhiyun struct iscsi_r2t_info *r2t;
1042*4882a593Smuzhiyun int rc = 0;
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun flush:
1045*4882a593Smuzhiyun /* Flush any pending data first. */
1046*4882a593Smuzhiyun rc = session->tt->xmit_pdu(task);
1047*4882a593Smuzhiyun if (rc < 0)
1048*4882a593Smuzhiyun return rc;
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun /* mgmt command */
1051*4882a593Smuzhiyun if (!task->sc) {
1052*4882a593Smuzhiyun if (task->hdr->itt == RESERVED_ITT)
1053*4882a593Smuzhiyun iscsi_put_task(task);
1054*4882a593Smuzhiyun return 0;
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun /* Are we done already? */
1058*4882a593Smuzhiyun if (task->sc->sc_data_direction != DMA_TO_DEVICE)
1059*4882a593Smuzhiyun return 0;
1060*4882a593Smuzhiyun
1061*4882a593Smuzhiyun r2t = iscsi_tcp_get_curr_r2t(task);
1062*4882a593Smuzhiyun if (r2t == NULL) {
1063*4882a593Smuzhiyun /* Waiting for more R2Ts to arrive. */
1064*4882a593Smuzhiyun ISCSI_DBG_TCP(conn, "no R2Ts yet\n");
1065*4882a593Smuzhiyun return 0;
1066*4882a593Smuzhiyun }
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_DATA_OUT);
1069*4882a593Smuzhiyun if (rc)
1070*4882a593Smuzhiyun return rc;
1071*4882a593Smuzhiyun iscsi_prep_data_out_pdu(task, r2t, (struct iscsi_data *) task->hdr);
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun ISCSI_DBG_TCP(conn, "sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
1074*4882a593Smuzhiyun r2t, r2t->datasn - 1, task->hdr->itt,
1075*4882a593Smuzhiyun r2t->data_offset + r2t->sent, r2t->data_count);
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun rc = conn->session->tt->init_pdu(task, r2t->data_offset + r2t->sent,
1078*4882a593Smuzhiyun r2t->data_count);
1079*4882a593Smuzhiyun if (rc) {
1080*4882a593Smuzhiyun iscsi_conn_failure(conn, ISCSI_ERR_XMIT_FAILED);
1081*4882a593Smuzhiyun return rc;
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun r2t->sent += r2t->data_count;
1085*4882a593Smuzhiyun goto flush;
1086*4882a593Smuzhiyun }
1087*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iscsi_tcp_task_xmit);
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun struct iscsi_cls_conn *
iscsi_tcp_conn_setup(struct iscsi_cls_session * cls_session,int dd_data_size,uint32_t conn_idx)1090*4882a593Smuzhiyun iscsi_tcp_conn_setup(struct iscsi_cls_session *cls_session, int dd_data_size,
1091*4882a593Smuzhiyun uint32_t conn_idx)
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun {
1094*4882a593Smuzhiyun struct iscsi_conn *conn;
1095*4882a593Smuzhiyun struct iscsi_cls_conn *cls_conn;
1096*4882a593Smuzhiyun struct iscsi_tcp_conn *tcp_conn;
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun cls_conn = iscsi_conn_setup(cls_session,
1099*4882a593Smuzhiyun sizeof(*tcp_conn) + dd_data_size, conn_idx);
1100*4882a593Smuzhiyun if (!cls_conn)
1101*4882a593Smuzhiyun return NULL;
1102*4882a593Smuzhiyun conn = cls_conn->dd_data;
1103*4882a593Smuzhiyun /*
1104*4882a593Smuzhiyun * due to strange issues with iser these are not set
1105*4882a593Smuzhiyun * in iscsi_conn_setup
1106*4882a593Smuzhiyun */
1107*4882a593Smuzhiyun conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun tcp_conn = conn->dd_data;
1110*4882a593Smuzhiyun tcp_conn->iscsi_conn = conn;
1111*4882a593Smuzhiyun tcp_conn->dd_data = conn->dd_data + sizeof(*tcp_conn);
1112*4882a593Smuzhiyun return cls_conn;
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iscsi_tcp_conn_setup);
1115*4882a593Smuzhiyun
iscsi_tcp_conn_teardown(struct iscsi_cls_conn * cls_conn)1116*4882a593Smuzhiyun void iscsi_tcp_conn_teardown(struct iscsi_cls_conn *cls_conn)
1117*4882a593Smuzhiyun {
1118*4882a593Smuzhiyun iscsi_conn_teardown(cls_conn);
1119*4882a593Smuzhiyun }
1120*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iscsi_tcp_conn_teardown);
1121*4882a593Smuzhiyun
iscsi_tcp_r2tpool_alloc(struct iscsi_session * session)1122*4882a593Smuzhiyun int iscsi_tcp_r2tpool_alloc(struct iscsi_session *session)
1123*4882a593Smuzhiyun {
1124*4882a593Smuzhiyun int i;
1125*4882a593Smuzhiyun int cmd_i;
1126*4882a593Smuzhiyun
1127*4882a593Smuzhiyun /*
1128*4882a593Smuzhiyun * initialize per-task: R2T pool and xmit queue
1129*4882a593Smuzhiyun */
1130*4882a593Smuzhiyun for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
1131*4882a593Smuzhiyun struct iscsi_task *task = session->cmds[cmd_i];
1132*4882a593Smuzhiyun struct iscsi_tcp_task *tcp_task = task->dd_data;
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun /*
1135*4882a593Smuzhiyun * pre-allocated x2 as much r2ts to handle race when
1136*4882a593Smuzhiyun * target acks DataOut faster than we data_xmit() queues
1137*4882a593Smuzhiyun * could replenish r2tqueue.
1138*4882a593Smuzhiyun */
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun /* R2T pool */
1141*4882a593Smuzhiyun if (iscsi_pool_init(&tcp_task->r2tpool,
1142*4882a593Smuzhiyun session->max_r2t * 2, NULL,
1143*4882a593Smuzhiyun sizeof(struct iscsi_r2t_info))) {
1144*4882a593Smuzhiyun goto r2t_alloc_fail;
1145*4882a593Smuzhiyun }
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun /* R2T xmit queue */
1148*4882a593Smuzhiyun if (kfifo_alloc(&tcp_task->r2tqueue,
1149*4882a593Smuzhiyun session->max_r2t * 4 * sizeof(void*), GFP_KERNEL)) {
1150*4882a593Smuzhiyun iscsi_pool_free(&tcp_task->r2tpool);
1151*4882a593Smuzhiyun goto r2t_alloc_fail;
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun spin_lock_init(&tcp_task->pool2queue);
1154*4882a593Smuzhiyun spin_lock_init(&tcp_task->queue2pool);
1155*4882a593Smuzhiyun }
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun return 0;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun r2t_alloc_fail:
1160*4882a593Smuzhiyun for (i = 0; i < cmd_i; i++) {
1161*4882a593Smuzhiyun struct iscsi_task *task = session->cmds[i];
1162*4882a593Smuzhiyun struct iscsi_tcp_task *tcp_task = task->dd_data;
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun kfifo_free(&tcp_task->r2tqueue);
1165*4882a593Smuzhiyun iscsi_pool_free(&tcp_task->r2tpool);
1166*4882a593Smuzhiyun }
1167*4882a593Smuzhiyun return -ENOMEM;
1168*4882a593Smuzhiyun }
1169*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iscsi_tcp_r2tpool_alloc);
1170*4882a593Smuzhiyun
iscsi_tcp_r2tpool_free(struct iscsi_session * session)1171*4882a593Smuzhiyun void iscsi_tcp_r2tpool_free(struct iscsi_session *session)
1172*4882a593Smuzhiyun {
1173*4882a593Smuzhiyun int i;
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun for (i = 0; i < session->cmds_max; i++) {
1176*4882a593Smuzhiyun struct iscsi_task *task = session->cmds[i];
1177*4882a593Smuzhiyun struct iscsi_tcp_task *tcp_task = task->dd_data;
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun kfifo_free(&tcp_task->r2tqueue);
1180*4882a593Smuzhiyun iscsi_pool_free(&tcp_task->r2tpool);
1181*4882a593Smuzhiyun }
1182*4882a593Smuzhiyun }
1183*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iscsi_tcp_r2tpool_free);
1184*4882a593Smuzhiyun
iscsi_tcp_set_max_r2t(struct iscsi_conn * conn,char * buf)1185*4882a593Smuzhiyun int iscsi_tcp_set_max_r2t(struct iscsi_conn *conn, char *buf)
1186*4882a593Smuzhiyun {
1187*4882a593Smuzhiyun struct iscsi_session *session = conn->session;
1188*4882a593Smuzhiyun unsigned short r2ts = 0;
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun sscanf(buf, "%hu", &r2ts);
1191*4882a593Smuzhiyun if (session->max_r2t == r2ts)
1192*4882a593Smuzhiyun return 0;
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun if (!r2ts || !is_power_of_2(r2ts))
1195*4882a593Smuzhiyun return -EINVAL;
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun session->max_r2t = r2ts;
1198*4882a593Smuzhiyun iscsi_tcp_r2tpool_free(session);
1199*4882a593Smuzhiyun return iscsi_tcp_r2tpool_alloc(session);
1200*4882a593Smuzhiyun }
1201*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iscsi_tcp_set_max_r2t);
1202*4882a593Smuzhiyun
iscsi_tcp_conn_get_stats(struct iscsi_cls_conn * cls_conn,struct iscsi_stats * stats)1203*4882a593Smuzhiyun void iscsi_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
1204*4882a593Smuzhiyun struct iscsi_stats *stats)
1205*4882a593Smuzhiyun {
1206*4882a593Smuzhiyun struct iscsi_conn *conn = cls_conn->dd_data;
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun stats->txdata_octets = conn->txdata_octets;
1209*4882a593Smuzhiyun stats->rxdata_octets = conn->rxdata_octets;
1210*4882a593Smuzhiyun stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
1211*4882a593Smuzhiyun stats->dataout_pdus = conn->dataout_pdus_cnt;
1212*4882a593Smuzhiyun stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
1213*4882a593Smuzhiyun stats->datain_pdus = conn->datain_pdus_cnt;
1214*4882a593Smuzhiyun stats->r2t_pdus = conn->r2t_pdus_cnt;
1215*4882a593Smuzhiyun stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
1216*4882a593Smuzhiyun stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
1217*4882a593Smuzhiyun }
1218*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iscsi_tcp_conn_get_stats);
1219