xref: /OK3568_Linux_fs/kernel/drivers/infiniband/sw/siw/siw_qp_tx.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
2*4882a593Smuzhiyun 
3*4882a593Smuzhiyun /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
4*4882a593Smuzhiyun /* Copyright (c) 2008-2019, IBM Corporation */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/errno.h>
7*4882a593Smuzhiyun #include <linux/types.h>
8*4882a593Smuzhiyun #include <linux/net.h>
9*4882a593Smuzhiyun #include <linux/scatterlist.h>
10*4882a593Smuzhiyun #include <linux/highmem.h>
11*4882a593Smuzhiyun #include <net/tcp.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <rdma/iw_cm.h>
14*4882a593Smuzhiyun #include <rdma/ib_verbs.h>
15*4882a593Smuzhiyun #include <rdma/ib_user_verbs.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include "siw.h"
18*4882a593Smuzhiyun #include "siw_verbs.h"
19*4882a593Smuzhiyun #include "siw_mem.h"
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #define MAX_HDR_INLINE					\
22*4882a593Smuzhiyun 	(((uint32_t)(sizeof(struct siw_rreq_pkt) -	\
23*4882a593Smuzhiyun 		     sizeof(struct iwarp_send))) & 0xF8)
24*4882a593Smuzhiyun 
siw_get_pblpage(struct siw_mem * mem,u64 addr,int * idx)25*4882a593Smuzhiyun static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun 	struct siw_pbl *pbl = mem->pbl;
28*4882a593Smuzhiyun 	u64 offset = addr - mem->va;
29*4882a593Smuzhiyun 	dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	if (paddr)
32*4882a593Smuzhiyun 		return virt_to_page((void *)paddr);
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	return NULL;
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun /*
38*4882a593Smuzhiyun  * Copy short payload at provided destination payload address
39*4882a593Smuzhiyun  */
siw_try_1seg(struct siw_iwarp_tx * c_tx,void * paddr)40*4882a593Smuzhiyun static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	struct siw_wqe *wqe = &c_tx->wqe_active;
43*4882a593Smuzhiyun 	struct siw_sge *sge = &wqe->sqe.sge[0];
44*4882a593Smuzhiyun 	u32 bytes = sge->length;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	if (bytes > MAX_HDR_INLINE || wqe->sqe.num_sge != 1)
47*4882a593Smuzhiyun 		return MAX_HDR_INLINE + 1;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	if (!bytes)
50*4882a593Smuzhiyun 		return 0;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	if (tx_flags(wqe) & SIW_WQE_INLINE) {
53*4882a593Smuzhiyun 		memcpy(paddr, &wqe->sqe.sge[1], bytes);
54*4882a593Smuzhiyun 	} else {
55*4882a593Smuzhiyun 		struct siw_mem *mem = wqe->mem[0];
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 		if (!mem->mem_obj) {
58*4882a593Smuzhiyun 			/* Kernel client using kva */
59*4882a593Smuzhiyun 			memcpy(paddr,
60*4882a593Smuzhiyun 			       (const void *)(uintptr_t)sge->laddr, bytes);
61*4882a593Smuzhiyun 		} else if (c_tx->in_syscall) {
62*4882a593Smuzhiyun 			if (copy_from_user(paddr, u64_to_user_ptr(sge->laddr),
63*4882a593Smuzhiyun 					   bytes))
64*4882a593Smuzhiyun 				return -EFAULT;
65*4882a593Smuzhiyun 		} else {
66*4882a593Smuzhiyun 			unsigned int off = sge->laddr & ~PAGE_MASK;
67*4882a593Smuzhiyun 			struct page *p;
68*4882a593Smuzhiyun 			char *buffer;
69*4882a593Smuzhiyun 			int pbl_idx = 0;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 			if (!mem->is_pbl)
72*4882a593Smuzhiyun 				p = siw_get_upage(mem->umem, sge->laddr);
73*4882a593Smuzhiyun 			else
74*4882a593Smuzhiyun 				p = siw_get_pblpage(mem, sge->laddr, &pbl_idx);
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 			if (unlikely(!p))
77*4882a593Smuzhiyun 				return -EFAULT;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 			buffer = kmap(p);
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 			if (likely(PAGE_SIZE - off >= bytes)) {
82*4882a593Smuzhiyun 				memcpy(paddr, buffer + off, bytes);
83*4882a593Smuzhiyun 			} else {
84*4882a593Smuzhiyun 				unsigned long part = bytes - (PAGE_SIZE - off);
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 				memcpy(paddr, buffer + off, part);
87*4882a593Smuzhiyun 				kunmap(p);
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 				if (!mem->is_pbl)
90*4882a593Smuzhiyun 					p = siw_get_upage(mem->umem,
91*4882a593Smuzhiyun 							  sge->laddr + part);
92*4882a593Smuzhiyun 				else
93*4882a593Smuzhiyun 					p = siw_get_pblpage(mem,
94*4882a593Smuzhiyun 							    sge->laddr + part,
95*4882a593Smuzhiyun 							    &pbl_idx);
96*4882a593Smuzhiyun 				if (unlikely(!p))
97*4882a593Smuzhiyun 					return -EFAULT;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 				buffer = kmap(p);
100*4882a593Smuzhiyun 				memcpy(paddr + part, buffer, bytes - part);
101*4882a593Smuzhiyun 			}
102*4882a593Smuzhiyun 			kunmap(p);
103*4882a593Smuzhiyun 		}
104*4882a593Smuzhiyun 	}
105*4882a593Smuzhiyun 	return (int)bytes;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun #define PKT_FRAGMENTED 1
109*4882a593Smuzhiyun #define PKT_COMPLETE 0
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun  * siw_qp_prepare_tx()
113*4882a593Smuzhiyun  *
114*4882a593Smuzhiyun  * Prepare tx state for sending out one fpdu. Builds complete pkt
115*4882a593Smuzhiyun  * if no user data or only immediate data are present.
116*4882a593Smuzhiyun  *
117*4882a593Smuzhiyun  * returns PKT_COMPLETE if complete pkt built, PKT_FRAGMENTED otherwise.
118*4882a593Smuzhiyun  */
siw_qp_prepare_tx(struct siw_iwarp_tx * c_tx)119*4882a593Smuzhiyun static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	struct siw_wqe *wqe = &c_tx->wqe_active;
122*4882a593Smuzhiyun 	char *crc = NULL;
123*4882a593Smuzhiyun 	int data = 0;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	switch (tx_type(wqe)) {
126*4882a593Smuzhiyun 	case SIW_OP_READ:
127*4882a593Smuzhiyun 	case SIW_OP_READ_LOCAL_INV:
128*4882a593Smuzhiyun 		memcpy(&c_tx->pkt.ctrl,
129*4882a593Smuzhiyun 		       &iwarp_pktinfo[RDMAP_RDMA_READ_REQ].ctrl,
130*4882a593Smuzhiyun 		       sizeof(struct iwarp_ctrl));
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 		c_tx->pkt.rreq.rsvd = 0;
133*4882a593Smuzhiyun 		c_tx->pkt.rreq.ddp_qn = htonl(RDMAP_UNTAGGED_QN_RDMA_READ);
134*4882a593Smuzhiyun 		c_tx->pkt.rreq.ddp_msn =
135*4882a593Smuzhiyun 			htonl(++c_tx->ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ]);
136*4882a593Smuzhiyun 		c_tx->pkt.rreq.ddp_mo = 0;
137*4882a593Smuzhiyun 		c_tx->pkt.rreq.sink_stag = htonl(wqe->sqe.sge[0].lkey);
138*4882a593Smuzhiyun 		c_tx->pkt.rreq.sink_to =
139*4882a593Smuzhiyun 			cpu_to_be64(wqe->sqe.sge[0].laddr);
140*4882a593Smuzhiyun 		c_tx->pkt.rreq.source_stag = htonl(wqe->sqe.rkey);
141*4882a593Smuzhiyun 		c_tx->pkt.rreq.source_to = cpu_to_be64(wqe->sqe.raddr);
142*4882a593Smuzhiyun 		c_tx->pkt.rreq.read_size = htonl(wqe->sqe.sge[0].length);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 		c_tx->ctrl_len = sizeof(struct iwarp_rdma_rreq);
145*4882a593Smuzhiyun 		crc = (char *)&c_tx->pkt.rreq_pkt.crc;
146*4882a593Smuzhiyun 		break;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	case SIW_OP_SEND:
149*4882a593Smuzhiyun 		if (tx_flags(wqe) & SIW_WQE_SOLICITED)
150*4882a593Smuzhiyun 			memcpy(&c_tx->pkt.ctrl,
151*4882a593Smuzhiyun 			       &iwarp_pktinfo[RDMAP_SEND_SE].ctrl,
152*4882a593Smuzhiyun 			       sizeof(struct iwarp_ctrl));
153*4882a593Smuzhiyun 		else
154*4882a593Smuzhiyun 			memcpy(&c_tx->pkt.ctrl, &iwarp_pktinfo[RDMAP_SEND].ctrl,
155*4882a593Smuzhiyun 			       sizeof(struct iwarp_ctrl));
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 		c_tx->pkt.send.ddp_qn = RDMAP_UNTAGGED_QN_SEND;
158*4882a593Smuzhiyun 		c_tx->pkt.send.ddp_msn =
159*4882a593Smuzhiyun 			htonl(++c_tx->ddp_msn[RDMAP_UNTAGGED_QN_SEND]);
160*4882a593Smuzhiyun 		c_tx->pkt.send.ddp_mo = 0;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 		c_tx->pkt.send_inv.inval_stag = 0;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 		c_tx->ctrl_len = sizeof(struct iwarp_send);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 		crc = (char *)&c_tx->pkt.send_pkt.crc;
167*4882a593Smuzhiyun 		data = siw_try_1seg(c_tx, crc);
168*4882a593Smuzhiyun 		break;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	case SIW_OP_SEND_REMOTE_INV:
171*4882a593Smuzhiyun 		if (tx_flags(wqe) & SIW_WQE_SOLICITED)
172*4882a593Smuzhiyun 			memcpy(&c_tx->pkt.ctrl,
173*4882a593Smuzhiyun 			       &iwarp_pktinfo[RDMAP_SEND_SE_INVAL].ctrl,
174*4882a593Smuzhiyun 			       sizeof(struct iwarp_ctrl));
175*4882a593Smuzhiyun 		else
176*4882a593Smuzhiyun 			memcpy(&c_tx->pkt.ctrl,
177*4882a593Smuzhiyun 			       &iwarp_pktinfo[RDMAP_SEND_INVAL].ctrl,
178*4882a593Smuzhiyun 			       sizeof(struct iwarp_ctrl));
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 		c_tx->pkt.send.ddp_qn = RDMAP_UNTAGGED_QN_SEND;
181*4882a593Smuzhiyun 		c_tx->pkt.send.ddp_msn =
182*4882a593Smuzhiyun 			htonl(++c_tx->ddp_msn[RDMAP_UNTAGGED_QN_SEND]);
183*4882a593Smuzhiyun 		c_tx->pkt.send.ddp_mo = 0;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 		c_tx->pkt.send_inv.inval_stag = cpu_to_be32(wqe->sqe.rkey);
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 		c_tx->ctrl_len = sizeof(struct iwarp_send_inv);
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 		crc = (char *)&c_tx->pkt.send_pkt.crc;
190*4882a593Smuzhiyun 		data = siw_try_1seg(c_tx, crc);
191*4882a593Smuzhiyun 		break;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	case SIW_OP_WRITE:
194*4882a593Smuzhiyun 		memcpy(&c_tx->pkt.ctrl, &iwarp_pktinfo[RDMAP_RDMA_WRITE].ctrl,
195*4882a593Smuzhiyun 		       sizeof(struct iwarp_ctrl));
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 		c_tx->pkt.rwrite.sink_stag = htonl(wqe->sqe.rkey);
198*4882a593Smuzhiyun 		c_tx->pkt.rwrite.sink_to = cpu_to_be64(wqe->sqe.raddr);
199*4882a593Smuzhiyun 		c_tx->ctrl_len = sizeof(struct iwarp_rdma_write);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 		crc = (char *)&c_tx->pkt.write_pkt.crc;
202*4882a593Smuzhiyun 		data = siw_try_1seg(c_tx, crc);
203*4882a593Smuzhiyun 		break;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	case SIW_OP_READ_RESPONSE:
206*4882a593Smuzhiyun 		memcpy(&c_tx->pkt.ctrl,
207*4882a593Smuzhiyun 		       &iwarp_pktinfo[RDMAP_RDMA_READ_RESP].ctrl,
208*4882a593Smuzhiyun 		       sizeof(struct iwarp_ctrl));
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 		/* NBO */
211*4882a593Smuzhiyun 		c_tx->pkt.rresp.sink_stag = cpu_to_be32(wqe->sqe.rkey);
212*4882a593Smuzhiyun 		c_tx->pkt.rresp.sink_to = cpu_to_be64(wqe->sqe.raddr);
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 		c_tx->ctrl_len = sizeof(struct iwarp_rdma_rresp);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 		crc = (char *)&c_tx->pkt.write_pkt.crc;
217*4882a593Smuzhiyun 		data = siw_try_1seg(c_tx, crc);
218*4882a593Smuzhiyun 		break;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	default:
221*4882a593Smuzhiyun 		siw_dbg_qp(tx_qp(c_tx), "stale wqe type %d\n", tx_type(wqe));
222*4882a593Smuzhiyun 		return -EOPNOTSUPP;
223*4882a593Smuzhiyun 	}
224*4882a593Smuzhiyun 	if (unlikely(data < 0))
225*4882a593Smuzhiyun 		return data;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	c_tx->ctrl_sent = 0;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	if (data <= MAX_HDR_INLINE) {
230*4882a593Smuzhiyun 		if (data) {
231*4882a593Smuzhiyun 			wqe->processed = data;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 			c_tx->pkt.ctrl.mpa_len =
234*4882a593Smuzhiyun 				htons(c_tx->ctrl_len + data - MPA_HDR_SIZE);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 			/* Add pad, if needed */
237*4882a593Smuzhiyun 			data += -(int)data & 0x3;
238*4882a593Smuzhiyun 			/* advance CRC location after payload */
239*4882a593Smuzhiyun 			crc += data;
240*4882a593Smuzhiyun 			c_tx->ctrl_len += data;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 			if (!(c_tx->pkt.ctrl.ddp_rdmap_ctrl & DDP_FLAG_TAGGED))
243*4882a593Smuzhiyun 				c_tx->pkt.c_untagged.ddp_mo = 0;
244*4882a593Smuzhiyun 			else
245*4882a593Smuzhiyun 				c_tx->pkt.c_tagged.ddp_to =
246*4882a593Smuzhiyun 					cpu_to_be64(wqe->sqe.raddr);
247*4882a593Smuzhiyun 		}
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 		*(u32 *)crc = 0;
250*4882a593Smuzhiyun 		/*
251*4882a593Smuzhiyun 		 * Do complete CRC if enabled and short packet
252*4882a593Smuzhiyun 		 */
253*4882a593Smuzhiyun 		if (c_tx->mpa_crc_hd) {
254*4882a593Smuzhiyun 			crypto_shash_init(c_tx->mpa_crc_hd);
255*4882a593Smuzhiyun 			if (crypto_shash_update(c_tx->mpa_crc_hd,
256*4882a593Smuzhiyun 						(u8 *)&c_tx->pkt,
257*4882a593Smuzhiyun 						c_tx->ctrl_len))
258*4882a593Smuzhiyun 				return -EINVAL;
259*4882a593Smuzhiyun 			crypto_shash_final(c_tx->mpa_crc_hd, (u8 *)crc);
260*4882a593Smuzhiyun 		}
261*4882a593Smuzhiyun 		c_tx->ctrl_len += MPA_CRC_SIZE;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 		return PKT_COMPLETE;
264*4882a593Smuzhiyun 	}
265*4882a593Smuzhiyun 	c_tx->ctrl_len += MPA_CRC_SIZE;
266*4882a593Smuzhiyun 	c_tx->sge_idx = 0;
267*4882a593Smuzhiyun 	c_tx->sge_off = 0;
268*4882a593Smuzhiyun 	c_tx->pbl_idx = 0;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	/*
271*4882a593Smuzhiyun 	 * Allow direct sending out of user buffer if WR is non signalled
272*4882a593Smuzhiyun 	 * and payload is over threshold.
273*4882a593Smuzhiyun 	 * Per RDMA verbs, the application should not change the send buffer
274*4882a593Smuzhiyun 	 * until the work completed. In iWarp, work completion is only
275*4882a593Smuzhiyun 	 * local delivery to TCP. TCP may reuse the buffer for
276*4882a593Smuzhiyun 	 * retransmission. Changing unsent data also breaks the CRC,
277*4882a593Smuzhiyun 	 * if applied.
278*4882a593Smuzhiyun 	 */
279*4882a593Smuzhiyun 	if (c_tx->zcopy_tx && wqe->bytes >= SENDPAGE_THRESH &&
280*4882a593Smuzhiyun 	    !(tx_flags(wqe) & SIW_WQE_SIGNALLED))
281*4882a593Smuzhiyun 		c_tx->use_sendpage = 1;
282*4882a593Smuzhiyun 	else
283*4882a593Smuzhiyun 		c_tx->use_sendpage = 0;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	return PKT_FRAGMENTED;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun /*
289*4882a593Smuzhiyun  * Send out one complete control type FPDU, or header of FPDU carrying
290*4882a593Smuzhiyun  * data. Used for fixed sized packets like Read.Requests or zero length
291*4882a593Smuzhiyun  * SENDs, WRITEs, READ.Responses, or header only.
292*4882a593Smuzhiyun  */
siw_tx_ctrl(struct siw_iwarp_tx * c_tx,struct socket * s,int flags)293*4882a593Smuzhiyun static int siw_tx_ctrl(struct siw_iwarp_tx *c_tx, struct socket *s,
294*4882a593Smuzhiyun 			      int flags)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	struct msghdr msg = { .msg_flags = flags };
297*4882a593Smuzhiyun 	struct kvec iov = { .iov_base =
298*4882a593Smuzhiyun 				    (char *)&c_tx->pkt.ctrl + c_tx->ctrl_sent,
299*4882a593Smuzhiyun 			    .iov_len = c_tx->ctrl_len - c_tx->ctrl_sent };
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	int rv = kernel_sendmsg(s, &msg, &iov, 1,
302*4882a593Smuzhiyun 				c_tx->ctrl_len - c_tx->ctrl_sent);
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	if (rv >= 0) {
305*4882a593Smuzhiyun 		c_tx->ctrl_sent += rv;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 		if (c_tx->ctrl_sent == c_tx->ctrl_len)
308*4882a593Smuzhiyun 			rv = 0;
309*4882a593Smuzhiyun 		else
310*4882a593Smuzhiyun 			rv = -EAGAIN;
311*4882a593Smuzhiyun 	}
312*4882a593Smuzhiyun 	return rv;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun /*
316*4882a593Smuzhiyun  * 0copy TCP transmit interface: Use do_tcp_sendpages.
317*4882a593Smuzhiyun  *
318*4882a593Smuzhiyun  * Using sendpage to push page by page appears to be less efficient
319*4882a593Smuzhiyun  * than using sendmsg, even if data are copied.
320*4882a593Smuzhiyun  *
321*4882a593Smuzhiyun  * A general performance limitation might be the extra four bytes
322*4882a593Smuzhiyun  * trailer checksum segment to be pushed after user data.
323*4882a593Smuzhiyun  */
siw_tcp_sendpages(struct socket * s,struct page ** page,int offset,size_t size)324*4882a593Smuzhiyun static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset,
325*4882a593Smuzhiyun 			     size_t size)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun 	struct sock *sk = s->sk;
328*4882a593Smuzhiyun 	int i = 0, rv = 0, sent = 0,
329*4882a593Smuzhiyun 	    flags = MSG_MORE | MSG_DONTWAIT | MSG_SENDPAGE_NOTLAST;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	while (size) {
332*4882a593Smuzhiyun 		size_t bytes = min_t(size_t, PAGE_SIZE - offset, size);
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 		if (size + offset <= PAGE_SIZE)
335*4882a593Smuzhiyun 			flags = MSG_MORE | MSG_DONTWAIT;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 		tcp_rate_check_app_limited(sk);
338*4882a593Smuzhiyun try_page_again:
339*4882a593Smuzhiyun 		lock_sock(sk);
340*4882a593Smuzhiyun 		rv = do_tcp_sendpages(sk, page[i], offset, bytes, flags);
341*4882a593Smuzhiyun 		release_sock(sk);
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 		if (rv > 0) {
344*4882a593Smuzhiyun 			size -= rv;
345*4882a593Smuzhiyun 			sent += rv;
346*4882a593Smuzhiyun 			if (rv != bytes) {
347*4882a593Smuzhiyun 				offset += rv;
348*4882a593Smuzhiyun 				bytes -= rv;
349*4882a593Smuzhiyun 				goto try_page_again;
350*4882a593Smuzhiyun 			}
351*4882a593Smuzhiyun 			offset = 0;
352*4882a593Smuzhiyun 		} else {
353*4882a593Smuzhiyun 			if (rv == -EAGAIN || rv == 0)
354*4882a593Smuzhiyun 				break;
355*4882a593Smuzhiyun 			return rv;
356*4882a593Smuzhiyun 		}
357*4882a593Smuzhiyun 		i++;
358*4882a593Smuzhiyun 	}
359*4882a593Smuzhiyun 	return sent;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun /*
363*4882a593Smuzhiyun  * siw_0copy_tx()
364*4882a593Smuzhiyun  *
365*4882a593Smuzhiyun  * Pushes list of pages to TCP socket. If pages from multiple
366*4882a593Smuzhiyun  * SGE's, all referenced pages of each SGE are pushed in one
367*4882a593Smuzhiyun  * shot.
368*4882a593Smuzhiyun  */
siw_0copy_tx(struct socket * s,struct page ** page,struct siw_sge * sge,unsigned int offset,unsigned int size)369*4882a593Smuzhiyun static int siw_0copy_tx(struct socket *s, struct page **page,
370*4882a593Smuzhiyun 			struct siw_sge *sge, unsigned int offset,
371*4882a593Smuzhiyun 			unsigned int size)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun 	int i = 0, sent = 0, rv;
374*4882a593Smuzhiyun 	int sge_bytes = min(sge->length - offset, size);
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	offset = (sge->laddr + offset) & ~PAGE_MASK;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	while (sent != size) {
379*4882a593Smuzhiyun 		rv = siw_tcp_sendpages(s, &page[i], offset, sge_bytes);
380*4882a593Smuzhiyun 		if (rv >= 0) {
381*4882a593Smuzhiyun 			sent += rv;
382*4882a593Smuzhiyun 			if (size == sent || sge_bytes > rv)
383*4882a593Smuzhiyun 				break;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 			i += PAGE_ALIGN(sge_bytes + offset) >> PAGE_SHIFT;
386*4882a593Smuzhiyun 			sge++;
387*4882a593Smuzhiyun 			sge_bytes = min(sge->length, size - sent);
388*4882a593Smuzhiyun 			offset = sge->laddr & ~PAGE_MASK;
389*4882a593Smuzhiyun 		} else {
390*4882a593Smuzhiyun 			sent = rv;
391*4882a593Smuzhiyun 			break;
392*4882a593Smuzhiyun 		}
393*4882a593Smuzhiyun 	}
394*4882a593Smuzhiyun 	return sent;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun #define MAX_TRAILER (MPA_CRC_SIZE + 4)
398*4882a593Smuzhiyun 
siw_unmap_pages(struct page ** pp,unsigned long kmap_mask)399*4882a593Smuzhiyun static void siw_unmap_pages(struct page **pp, unsigned long kmap_mask)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun 	while (kmap_mask) {
402*4882a593Smuzhiyun 		if (kmap_mask & BIT(0))
403*4882a593Smuzhiyun 			kunmap(*pp);
404*4882a593Smuzhiyun 		pp++;
405*4882a593Smuzhiyun 		kmap_mask >>= 1;
406*4882a593Smuzhiyun 	}
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun /*
410*4882a593Smuzhiyun  * siw_tx_hdt() tries to push a complete packet to TCP where all
411*4882a593Smuzhiyun  * packet fragments are referenced by the elements of one iovec.
412*4882a593Smuzhiyun  * For the data portion, each involved page must be referenced by
413*4882a593Smuzhiyun  * one extra element. All sge's data can be non-aligned to page
414*4882a593Smuzhiyun  * boundaries. Two more elements are referencing iWARP header
415*4882a593Smuzhiyun  * and trailer:
416*4882a593Smuzhiyun  * MAX_ARRAY = 64KB/PAGE_SIZE + 1 + (2 * (SIW_MAX_SGE - 1) + HDR + TRL
417*4882a593Smuzhiyun  */
418*4882a593Smuzhiyun #define MAX_ARRAY ((0xffff / PAGE_SIZE) + 1 + (2 * (SIW_MAX_SGE - 1) + 2))
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun /*
421*4882a593Smuzhiyun  * Write out iov referencing hdr, data and trailer of current FPDU.
422*4882a593Smuzhiyun  * Update transmit state dependent on write return status
423*4882a593Smuzhiyun  */
siw_tx_hdt(struct siw_iwarp_tx * c_tx,struct socket * s)424*4882a593Smuzhiyun static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun 	struct siw_wqe *wqe = &c_tx->wqe_active;
427*4882a593Smuzhiyun 	struct siw_sge *sge = &wqe->sqe.sge[c_tx->sge_idx];
428*4882a593Smuzhiyun 	struct kvec iov[MAX_ARRAY];
429*4882a593Smuzhiyun 	struct page *page_array[MAX_ARRAY];
430*4882a593Smuzhiyun 	struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR };
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	int seg = 0, do_crc = c_tx->do_crc, is_kva = 0, rv;
433*4882a593Smuzhiyun 	unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0,
434*4882a593Smuzhiyun 		     sge_off = c_tx->sge_off, sge_idx = c_tx->sge_idx,
435*4882a593Smuzhiyun 		     pbl_idx = c_tx->pbl_idx;
436*4882a593Smuzhiyun 	unsigned long kmap_mask = 0L;
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	if (c_tx->state == SIW_SEND_HDR) {
439*4882a593Smuzhiyun 		if (c_tx->use_sendpage) {
440*4882a593Smuzhiyun 			rv = siw_tx_ctrl(c_tx, s, MSG_DONTWAIT | MSG_MORE);
441*4882a593Smuzhiyun 			if (rv)
442*4882a593Smuzhiyun 				goto done;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 			c_tx->state = SIW_SEND_DATA;
445*4882a593Smuzhiyun 		} else {
446*4882a593Smuzhiyun 			iov[0].iov_base =
447*4882a593Smuzhiyun 				(char *)&c_tx->pkt.ctrl + c_tx->ctrl_sent;
448*4882a593Smuzhiyun 			iov[0].iov_len = hdr_len =
449*4882a593Smuzhiyun 				c_tx->ctrl_len - c_tx->ctrl_sent;
450*4882a593Smuzhiyun 			seg = 1;
451*4882a593Smuzhiyun 		}
452*4882a593Smuzhiyun 	}
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	wqe->processed += data_len;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	while (data_len) { /* walk the list of SGE's */
457*4882a593Smuzhiyun 		unsigned int sge_len = min(sge->length - sge_off, data_len);
458*4882a593Smuzhiyun 		unsigned int fp_off = (sge->laddr + sge_off) & ~PAGE_MASK;
459*4882a593Smuzhiyun 		struct siw_mem *mem;
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 		if (!(tx_flags(wqe) & SIW_WQE_INLINE)) {
462*4882a593Smuzhiyun 			mem = wqe->mem[sge_idx];
463*4882a593Smuzhiyun 			is_kva = mem->mem_obj == NULL ? 1 : 0;
464*4882a593Smuzhiyun 		} else {
465*4882a593Smuzhiyun 			is_kva = 1;
466*4882a593Smuzhiyun 		}
467*4882a593Smuzhiyun 		if (is_kva && !c_tx->use_sendpage) {
468*4882a593Smuzhiyun 			/*
469*4882a593Smuzhiyun 			 * tx from kernel virtual address: either inline data
470*4882a593Smuzhiyun 			 * or memory region with assigned kernel buffer
471*4882a593Smuzhiyun 			 */
472*4882a593Smuzhiyun 			iov[seg].iov_base =
473*4882a593Smuzhiyun 				(void *)(uintptr_t)(sge->laddr + sge_off);
474*4882a593Smuzhiyun 			iov[seg].iov_len = sge_len;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 			if (do_crc)
477*4882a593Smuzhiyun 				crypto_shash_update(c_tx->mpa_crc_hd,
478*4882a593Smuzhiyun 						    iov[seg].iov_base,
479*4882a593Smuzhiyun 						    sge_len);
480*4882a593Smuzhiyun 			sge_off += sge_len;
481*4882a593Smuzhiyun 			data_len -= sge_len;
482*4882a593Smuzhiyun 			seg++;
483*4882a593Smuzhiyun 			goto sge_done;
484*4882a593Smuzhiyun 		}
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 		while (sge_len) {
487*4882a593Smuzhiyun 			size_t plen = min((int)PAGE_SIZE - fp_off, sge_len);
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 			if (!is_kva) {
490*4882a593Smuzhiyun 				struct page *p;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 				if (mem->is_pbl)
493*4882a593Smuzhiyun 					p = siw_get_pblpage(
494*4882a593Smuzhiyun 						mem, sge->laddr + sge_off,
495*4882a593Smuzhiyun 						&pbl_idx);
496*4882a593Smuzhiyun 				else
497*4882a593Smuzhiyun 					p = siw_get_upage(mem->umem,
498*4882a593Smuzhiyun 							  sge->laddr + sge_off);
499*4882a593Smuzhiyun 				if (unlikely(!p)) {
500*4882a593Smuzhiyun 					siw_unmap_pages(page_array, kmap_mask);
501*4882a593Smuzhiyun 					wqe->processed -= c_tx->bytes_unsent;
502*4882a593Smuzhiyun 					rv = -EFAULT;
503*4882a593Smuzhiyun 					goto done_crc;
504*4882a593Smuzhiyun 				}
505*4882a593Smuzhiyun 				page_array[seg] = p;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 				if (!c_tx->use_sendpage) {
508*4882a593Smuzhiyun 					iov[seg].iov_base = kmap(p) + fp_off;
509*4882a593Smuzhiyun 					iov[seg].iov_len = plen;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 					/* Remember for later kunmap() */
512*4882a593Smuzhiyun 					kmap_mask |= BIT(seg);
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 					if (do_crc)
515*4882a593Smuzhiyun 						crypto_shash_update(
516*4882a593Smuzhiyun 							c_tx->mpa_crc_hd,
517*4882a593Smuzhiyun 							iov[seg].iov_base,
518*4882a593Smuzhiyun 							plen);
519*4882a593Smuzhiyun 				} else if (do_crc) {
520*4882a593Smuzhiyun 					crypto_shash_update(c_tx->mpa_crc_hd,
521*4882a593Smuzhiyun 							    kmap(p) + fp_off,
522*4882a593Smuzhiyun 							    plen);
523*4882a593Smuzhiyun 					kunmap(p);
524*4882a593Smuzhiyun 				}
525*4882a593Smuzhiyun 			} else {
526*4882a593Smuzhiyun 				/*
527*4882a593Smuzhiyun 				 * Cast to an uintptr_t to preserve all 64 bits
528*4882a593Smuzhiyun 				 * in sge->laddr.
529*4882a593Smuzhiyun 				 */
530*4882a593Smuzhiyun 				uintptr_t va = (uintptr_t)(sge->laddr + sge_off);
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 				/*
533*4882a593Smuzhiyun 				 * virt_to_page() takes a (void *) pointer
534*4882a593Smuzhiyun 				 * so cast to a (void *) meaning it will be 64
535*4882a593Smuzhiyun 				 * bits on a 64 bit platform and 32 bits on a
536*4882a593Smuzhiyun 				 * 32 bit platform.
537*4882a593Smuzhiyun 				 */
538*4882a593Smuzhiyun 				page_array[seg] = virt_to_page((void *)(va & PAGE_MASK));
539*4882a593Smuzhiyun 				if (do_crc)
540*4882a593Smuzhiyun 					crypto_shash_update(
541*4882a593Smuzhiyun 						c_tx->mpa_crc_hd,
542*4882a593Smuzhiyun 						(void *)va,
543*4882a593Smuzhiyun 						plen);
544*4882a593Smuzhiyun 			}
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 			sge_len -= plen;
547*4882a593Smuzhiyun 			sge_off += plen;
548*4882a593Smuzhiyun 			data_len -= plen;
549*4882a593Smuzhiyun 			fp_off = 0;
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 			if (++seg > (int)MAX_ARRAY) {
552*4882a593Smuzhiyun 				siw_dbg_qp(tx_qp(c_tx), "to many fragments\n");
553*4882a593Smuzhiyun 				siw_unmap_pages(page_array, kmap_mask);
554*4882a593Smuzhiyun 				wqe->processed -= c_tx->bytes_unsent;
555*4882a593Smuzhiyun 				rv = -EMSGSIZE;
556*4882a593Smuzhiyun 				goto done_crc;
557*4882a593Smuzhiyun 			}
558*4882a593Smuzhiyun 		}
559*4882a593Smuzhiyun sge_done:
560*4882a593Smuzhiyun 		/* Update SGE variables at end of SGE */
561*4882a593Smuzhiyun 		if (sge_off == sge->length &&
562*4882a593Smuzhiyun 		    (data_len != 0 || wqe->processed < wqe->bytes)) {
563*4882a593Smuzhiyun 			sge_idx++;
564*4882a593Smuzhiyun 			sge++;
565*4882a593Smuzhiyun 			sge_off = 0;
566*4882a593Smuzhiyun 		}
567*4882a593Smuzhiyun 	}
568*4882a593Smuzhiyun 	/* trailer */
569*4882a593Smuzhiyun 	if (likely(c_tx->state != SIW_SEND_TRAILER)) {
570*4882a593Smuzhiyun 		iov[seg].iov_base = &c_tx->trailer.pad[4 - c_tx->pad];
571*4882a593Smuzhiyun 		iov[seg].iov_len = trl_len = MAX_TRAILER - (4 - c_tx->pad);
572*4882a593Smuzhiyun 	} else {
573*4882a593Smuzhiyun 		iov[seg].iov_base = &c_tx->trailer.pad[c_tx->ctrl_sent];
574*4882a593Smuzhiyun 		iov[seg].iov_len = trl_len = MAX_TRAILER - c_tx->ctrl_sent;
575*4882a593Smuzhiyun 	}
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	if (c_tx->pad) {
578*4882a593Smuzhiyun 		*(u32 *)c_tx->trailer.pad = 0;
579*4882a593Smuzhiyun 		if (do_crc)
580*4882a593Smuzhiyun 			crypto_shash_update(c_tx->mpa_crc_hd,
581*4882a593Smuzhiyun 				(u8 *)&c_tx->trailer.crc - c_tx->pad,
582*4882a593Smuzhiyun 				c_tx->pad);
583*4882a593Smuzhiyun 	}
584*4882a593Smuzhiyun 	if (!c_tx->mpa_crc_hd)
585*4882a593Smuzhiyun 		c_tx->trailer.crc = 0;
586*4882a593Smuzhiyun 	else if (do_crc)
587*4882a593Smuzhiyun 		crypto_shash_final(c_tx->mpa_crc_hd, (u8 *)&c_tx->trailer.crc);
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	data_len = c_tx->bytes_unsent;
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	if (c_tx->use_sendpage) {
592*4882a593Smuzhiyun 		rv = siw_0copy_tx(s, page_array, &wqe->sqe.sge[c_tx->sge_idx],
593*4882a593Smuzhiyun 				  c_tx->sge_off, data_len);
594*4882a593Smuzhiyun 		if (rv == data_len) {
595*4882a593Smuzhiyun 			rv = kernel_sendmsg(s, &msg, &iov[seg], 1, trl_len);
596*4882a593Smuzhiyun 			if (rv > 0)
597*4882a593Smuzhiyun 				rv += data_len;
598*4882a593Smuzhiyun 			else
599*4882a593Smuzhiyun 				rv = data_len;
600*4882a593Smuzhiyun 		}
601*4882a593Smuzhiyun 	} else {
602*4882a593Smuzhiyun 		rv = kernel_sendmsg(s, &msg, iov, seg + 1,
603*4882a593Smuzhiyun 				    hdr_len + data_len + trl_len);
604*4882a593Smuzhiyun 		siw_unmap_pages(page_array, kmap_mask);
605*4882a593Smuzhiyun 	}
606*4882a593Smuzhiyun 	if (rv < (int)hdr_len) {
607*4882a593Smuzhiyun 		/* Not even complete hdr pushed or negative rv */
608*4882a593Smuzhiyun 		wqe->processed -= data_len;
609*4882a593Smuzhiyun 		if (rv >= 0) {
610*4882a593Smuzhiyun 			c_tx->ctrl_sent += rv;
611*4882a593Smuzhiyun 			rv = -EAGAIN;
612*4882a593Smuzhiyun 		}
613*4882a593Smuzhiyun 		goto done_crc;
614*4882a593Smuzhiyun 	}
615*4882a593Smuzhiyun 	rv -= hdr_len;
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	if (rv >= (int)data_len) {
618*4882a593Smuzhiyun 		/* all user data pushed to TCP or no data to push */
619*4882a593Smuzhiyun 		if (data_len > 0 && wqe->processed < wqe->bytes) {
620*4882a593Smuzhiyun 			/* Save the current state for next tx */
621*4882a593Smuzhiyun 			c_tx->sge_idx = sge_idx;
622*4882a593Smuzhiyun 			c_tx->sge_off = sge_off;
623*4882a593Smuzhiyun 			c_tx->pbl_idx = pbl_idx;
624*4882a593Smuzhiyun 		}
625*4882a593Smuzhiyun 		rv -= data_len;
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 		if (rv == trl_len) /* all pushed */
628*4882a593Smuzhiyun 			rv = 0;
629*4882a593Smuzhiyun 		else {
630*4882a593Smuzhiyun 			c_tx->state = SIW_SEND_TRAILER;
631*4882a593Smuzhiyun 			c_tx->ctrl_len = MAX_TRAILER;
632*4882a593Smuzhiyun 			c_tx->ctrl_sent = rv + 4 - c_tx->pad;
633*4882a593Smuzhiyun 			c_tx->bytes_unsent = 0;
634*4882a593Smuzhiyun 			rv = -EAGAIN;
635*4882a593Smuzhiyun 		}
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	} else if (data_len > 0) {
638*4882a593Smuzhiyun 		/* Maybe some user data pushed to TCP */
639*4882a593Smuzhiyun 		c_tx->state = SIW_SEND_DATA;
640*4882a593Smuzhiyun 		wqe->processed -= data_len - rv;
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 		if (rv) {
643*4882a593Smuzhiyun 			/*
644*4882a593Smuzhiyun 			 * Some bytes out. Recompute tx state based
645*4882a593Smuzhiyun 			 * on old state and bytes pushed
646*4882a593Smuzhiyun 			 */
647*4882a593Smuzhiyun 			unsigned int sge_unsent;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 			c_tx->bytes_unsent -= rv;
650*4882a593Smuzhiyun 			sge = &wqe->sqe.sge[c_tx->sge_idx];
651*4882a593Smuzhiyun 			sge_unsent = sge->length - c_tx->sge_off;
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 			while (sge_unsent <= rv) {
654*4882a593Smuzhiyun 				rv -= sge_unsent;
655*4882a593Smuzhiyun 				c_tx->sge_idx++;
656*4882a593Smuzhiyun 				c_tx->sge_off = 0;
657*4882a593Smuzhiyun 				sge++;
658*4882a593Smuzhiyun 				sge_unsent = sge->length;
659*4882a593Smuzhiyun 			}
660*4882a593Smuzhiyun 			c_tx->sge_off += rv;
661*4882a593Smuzhiyun 		}
662*4882a593Smuzhiyun 		rv = -EAGAIN;
663*4882a593Smuzhiyun 	}
664*4882a593Smuzhiyun done_crc:
665*4882a593Smuzhiyun 	c_tx->do_crc = 0;
666*4882a593Smuzhiyun done:
667*4882a593Smuzhiyun 	return rv;
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun 
siw_update_tcpseg(struct siw_iwarp_tx * c_tx,struct socket * s)670*4882a593Smuzhiyun static void siw_update_tcpseg(struct siw_iwarp_tx *c_tx,
671*4882a593Smuzhiyun 				     struct socket *s)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun 	struct tcp_sock *tp = tcp_sk(s->sk);
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	if (tp->gso_segs) {
676*4882a593Smuzhiyun 		if (c_tx->gso_seg_limit == 0)
677*4882a593Smuzhiyun 			c_tx->tcp_seglen = tp->mss_cache * tp->gso_segs;
678*4882a593Smuzhiyun 		else
679*4882a593Smuzhiyun 			c_tx->tcp_seglen =
680*4882a593Smuzhiyun 				tp->mss_cache *
681*4882a593Smuzhiyun 				min_t(u16, c_tx->gso_seg_limit, tp->gso_segs);
682*4882a593Smuzhiyun 	} else {
683*4882a593Smuzhiyun 		c_tx->tcp_seglen = tp->mss_cache;
684*4882a593Smuzhiyun 	}
685*4882a593Smuzhiyun 	/* Loopback may give odd numbers */
686*4882a593Smuzhiyun 	c_tx->tcp_seglen &= 0xfffffff8;
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun /*
690*4882a593Smuzhiyun  * siw_prepare_fpdu()
691*4882a593Smuzhiyun  *
692*4882a593Smuzhiyun  * Prepares transmit context to send out one FPDU if FPDU will contain
693*4882a593Smuzhiyun  * user data and user data are not immediate data.
694*4882a593Smuzhiyun  * Computes maximum FPDU length to fill up TCP MSS if possible.
695*4882a593Smuzhiyun  *
696*4882a593Smuzhiyun  * @qp:		QP from which to transmit
697*4882a593Smuzhiyun  * @wqe:	Current WQE causing transmission
698*4882a593Smuzhiyun  *
699*4882a593Smuzhiyun  * TODO: Take into account real available sendspace on socket
700*4882a593Smuzhiyun  *       to avoid header misalignment due to send pausing within
701*4882a593Smuzhiyun  *       fpdu transmission
702*4882a593Smuzhiyun  */
siw_prepare_fpdu(struct siw_qp * qp,struct siw_wqe * wqe)703*4882a593Smuzhiyun static void siw_prepare_fpdu(struct siw_qp *qp, struct siw_wqe *wqe)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun 	struct siw_iwarp_tx *c_tx = &qp->tx_ctx;
706*4882a593Smuzhiyun 	int data_len;
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	c_tx->ctrl_len =
709*4882a593Smuzhiyun 		iwarp_pktinfo[__rdmap_get_opcode(&c_tx->pkt.ctrl)].hdr_len;
710*4882a593Smuzhiyun 	c_tx->ctrl_sent = 0;
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	/*
713*4882a593Smuzhiyun 	 * Update target buffer offset if any
714*4882a593Smuzhiyun 	 */
715*4882a593Smuzhiyun 	if (!(c_tx->pkt.ctrl.ddp_rdmap_ctrl & DDP_FLAG_TAGGED))
716*4882a593Smuzhiyun 		/* Untagged message */
717*4882a593Smuzhiyun 		c_tx->pkt.c_untagged.ddp_mo = cpu_to_be32(wqe->processed);
718*4882a593Smuzhiyun 	else /* Tagged message */
719*4882a593Smuzhiyun 		c_tx->pkt.c_tagged.ddp_to =
720*4882a593Smuzhiyun 			cpu_to_be64(wqe->sqe.raddr + wqe->processed);
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	data_len = wqe->bytes - wqe->processed;
723*4882a593Smuzhiyun 	if (data_len + c_tx->ctrl_len + MPA_CRC_SIZE > c_tx->tcp_seglen) {
724*4882a593Smuzhiyun 		/* Trim DDP payload to fit into current TCP segment */
725*4882a593Smuzhiyun 		data_len = c_tx->tcp_seglen - (c_tx->ctrl_len + MPA_CRC_SIZE);
726*4882a593Smuzhiyun 		c_tx->pkt.ctrl.ddp_rdmap_ctrl &= ~DDP_FLAG_LAST;
727*4882a593Smuzhiyun 		c_tx->pad = 0;
728*4882a593Smuzhiyun 	} else {
729*4882a593Smuzhiyun 		c_tx->pkt.ctrl.ddp_rdmap_ctrl |= DDP_FLAG_LAST;
730*4882a593Smuzhiyun 		c_tx->pad = -data_len & 0x3;
731*4882a593Smuzhiyun 	}
732*4882a593Smuzhiyun 	c_tx->bytes_unsent = data_len;
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	c_tx->pkt.ctrl.mpa_len =
735*4882a593Smuzhiyun 		htons(c_tx->ctrl_len + data_len - MPA_HDR_SIZE);
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	/*
738*4882a593Smuzhiyun 	 * Init MPA CRC computation
739*4882a593Smuzhiyun 	 */
740*4882a593Smuzhiyun 	if (c_tx->mpa_crc_hd) {
741*4882a593Smuzhiyun 		crypto_shash_init(c_tx->mpa_crc_hd);
742*4882a593Smuzhiyun 		crypto_shash_update(c_tx->mpa_crc_hd, (u8 *)&c_tx->pkt,
743*4882a593Smuzhiyun 				    c_tx->ctrl_len);
744*4882a593Smuzhiyun 		c_tx->do_crc = 1;
745*4882a593Smuzhiyun 	}
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun /*
749*4882a593Smuzhiyun  * siw_check_sgl_tx()
750*4882a593Smuzhiyun  *
751*4882a593Smuzhiyun  * Check permissions for a list of SGE's (SGL).
752*4882a593Smuzhiyun  * A successful check will have all memory referenced
753*4882a593Smuzhiyun  * for transmission resolved and assigned to the WQE.
754*4882a593Smuzhiyun  *
755*4882a593Smuzhiyun  * @pd:		Protection Domain SGL should belong to
756*4882a593Smuzhiyun  * @wqe:	WQE to be checked
757*4882a593Smuzhiyun  * @perms:	requested access permissions
758*4882a593Smuzhiyun  *
759*4882a593Smuzhiyun  */
760*4882a593Smuzhiyun 
siw_check_sgl_tx(struct ib_pd * pd,struct siw_wqe * wqe,enum ib_access_flags perms)761*4882a593Smuzhiyun static int siw_check_sgl_tx(struct ib_pd *pd, struct siw_wqe *wqe,
762*4882a593Smuzhiyun 			    enum ib_access_flags perms)
763*4882a593Smuzhiyun {
764*4882a593Smuzhiyun 	struct siw_sge *sge = &wqe->sqe.sge[0];
765*4882a593Smuzhiyun 	int i, len, num_sge = wqe->sqe.num_sge;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	if (unlikely(num_sge > SIW_MAX_SGE))
768*4882a593Smuzhiyun 		return -EINVAL;
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	for (i = 0, len = 0; num_sge; num_sge--, i++, sge++) {
771*4882a593Smuzhiyun 		/*
772*4882a593Smuzhiyun 		 * rdma verbs: do not check stag for a zero length sge
773*4882a593Smuzhiyun 		 */
774*4882a593Smuzhiyun 		if (sge->length) {
775*4882a593Smuzhiyun 			int rv = siw_check_sge(pd, sge, &wqe->mem[i], perms, 0,
776*4882a593Smuzhiyun 					       sge->length);
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 			if (unlikely(rv != E_ACCESS_OK))
779*4882a593Smuzhiyun 				return rv;
780*4882a593Smuzhiyun 		}
781*4882a593Smuzhiyun 		len += sge->length;
782*4882a593Smuzhiyun 	}
783*4882a593Smuzhiyun 	return len;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun /*
787*4882a593Smuzhiyun  * siw_qp_sq_proc_tx()
788*4882a593Smuzhiyun  *
789*4882a593Smuzhiyun  * Process one WQE which needs transmission on the wire.
790*4882a593Smuzhiyun  */
siw_qp_sq_proc_tx(struct siw_qp * qp,struct siw_wqe * wqe)791*4882a593Smuzhiyun static int siw_qp_sq_proc_tx(struct siw_qp *qp, struct siw_wqe *wqe)
792*4882a593Smuzhiyun {
793*4882a593Smuzhiyun 	struct siw_iwarp_tx *c_tx = &qp->tx_ctx;
794*4882a593Smuzhiyun 	struct socket *s = qp->attrs.sk;
795*4882a593Smuzhiyun 	int rv = 0, burst_len = qp->tx_ctx.burst;
796*4882a593Smuzhiyun 	enum rdmap_ecode ecode = RDMAP_ECODE_CATASTROPHIC_STREAM;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	if (unlikely(wqe->wr_status == SIW_WR_IDLE))
799*4882a593Smuzhiyun 		return 0;
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	if (!burst_len)
802*4882a593Smuzhiyun 		burst_len = SQ_USER_MAXBURST;
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 	if (wqe->wr_status == SIW_WR_QUEUED) {
805*4882a593Smuzhiyun 		if (!(wqe->sqe.flags & SIW_WQE_INLINE)) {
806*4882a593Smuzhiyun 			if (tx_type(wqe) == SIW_OP_READ_RESPONSE)
807*4882a593Smuzhiyun 				wqe->sqe.num_sge = 1;
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 			if (tx_type(wqe) != SIW_OP_READ &&
810*4882a593Smuzhiyun 			    tx_type(wqe) != SIW_OP_READ_LOCAL_INV) {
811*4882a593Smuzhiyun 				/*
812*4882a593Smuzhiyun 				 * Reference memory to be tx'd w/o checking
813*4882a593Smuzhiyun 				 * access for LOCAL_READ permission, since
814*4882a593Smuzhiyun 				 * not defined in RDMA core.
815*4882a593Smuzhiyun 				 */
816*4882a593Smuzhiyun 				rv = siw_check_sgl_tx(qp->pd, wqe, 0);
817*4882a593Smuzhiyun 				if (rv < 0) {
818*4882a593Smuzhiyun 					if (tx_type(wqe) ==
819*4882a593Smuzhiyun 					    SIW_OP_READ_RESPONSE)
820*4882a593Smuzhiyun 						ecode = siw_rdmap_error(-rv);
821*4882a593Smuzhiyun 					rv = -EINVAL;
822*4882a593Smuzhiyun 					goto tx_error;
823*4882a593Smuzhiyun 				}
824*4882a593Smuzhiyun 				wqe->bytes = rv;
825*4882a593Smuzhiyun 			} else {
826*4882a593Smuzhiyun 				wqe->bytes = 0;
827*4882a593Smuzhiyun 			}
828*4882a593Smuzhiyun 		} else {
829*4882a593Smuzhiyun 			wqe->bytes = wqe->sqe.sge[0].length;
830*4882a593Smuzhiyun 			if (!rdma_is_kernel_res(&qp->base_qp.res)) {
831*4882a593Smuzhiyun 				if (wqe->bytes > SIW_MAX_INLINE) {
832*4882a593Smuzhiyun 					rv = -EINVAL;
833*4882a593Smuzhiyun 					goto tx_error;
834*4882a593Smuzhiyun 				}
835*4882a593Smuzhiyun 				wqe->sqe.sge[0].laddr =
836*4882a593Smuzhiyun 					(u64)(uintptr_t)&wqe->sqe.sge[1];
837*4882a593Smuzhiyun 			}
838*4882a593Smuzhiyun 		}
839*4882a593Smuzhiyun 		wqe->wr_status = SIW_WR_INPROGRESS;
840*4882a593Smuzhiyun 		wqe->processed = 0;
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 		siw_update_tcpseg(c_tx, s);
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 		rv = siw_qp_prepare_tx(c_tx);
845*4882a593Smuzhiyun 		if (rv == PKT_FRAGMENTED) {
846*4882a593Smuzhiyun 			c_tx->state = SIW_SEND_HDR;
847*4882a593Smuzhiyun 			siw_prepare_fpdu(qp, wqe);
848*4882a593Smuzhiyun 		} else if (rv == PKT_COMPLETE) {
849*4882a593Smuzhiyun 			c_tx->state = SIW_SEND_SHORT_FPDU;
850*4882a593Smuzhiyun 		} else {
851*4882a593Smuzhiyun 			goto tx_error;
852*4882a593Smuzhiyun 		}
853*4882a593Smuzhiyun 	}
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun next_segment:
856*4882a593Smuzhiyun 	siw_dbg_qp(qp, "wr type %d, state %d, data %u, sent %u, id %llx\n",
857*4882a593Smuzhiyun 		   tx_type(wqe), wqe->wr_status, wqe->bytes, wqe->processed,
858*4882a593Smuzhiyun 		   wqe->sqe.id);
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 	if (--burst_len == 0) {
861*4882a593Smuzhiyun 		rv = -EINPROGRESS;
862*4882a593Smuzhiyun 		goto tx_done;
863*4882a593Smuzhiyun 	}
864*4882a593Smuzhiyun 	if (c_tx->state == SIW_SEND_SHORT_FPDU) {
865*4882a593Smuzhiyun 		enum siw_opcode tx_type = tx_type(wqe);
866*4882a593Smuzhiyun 		unsigned int msg_flags;
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 		if (siw_sq_empty(qp) || !siw_tcp_nagle || burst_len == 1)
869*4882a593Smuzhiyun 			/*
870*4882a593Smuzhiyun 			 * End current TCP segment, if SQ runs empty,
871*4882a593Smuzhiyun 			 * or siw_tcp_nagle is not set, or we bail out
872*4882a593Smuzhiyun 			 * soon due to no burst credit left.
873*4882a593Smuzhiyun 			 */
874*4882a593Smuzhiyun 			msg_flags = MSG_DONTWAIT;
875*4882a593Smuzhiyun 		else
876*4882a593Smuzhiyun 			msg_flags = MSG_DONTWAIT | MSG_MORE;
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 		rv = siw_tx_ctrl(c_tx, s, msg_flags);
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 		if (!rv && tx_type != SIW_OP_READ &&
881*4882a593Smuzhiyun 		    tx_type != SIW_OP_READ_LOCAL_INV)
882*4882a593Smuzhiyun 			wqe->processed = wqe->bytes;
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 		goto tx_done;
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 	} else {
887*4882a593Smuzhiyun 		rv = siw_tx_hdt(c_tx, s);
888*4882a593Smuzhiyun 	}
889*4882a593Smuzhiyun 	if (!rv) {
890*4882a593Smuzhiyun 		/*
891*4882a593Smuzhiyun 		 * One segment sent. Processing completed if last
892*4882a593Smuzhiyun 		 * segment, Do next segment otherwise.
893*4882a593Smuzhiyun 		 */
894*4882a593Smuzhiyun 		if (unlikely(c_tx->tx_suspend)) {
895*4882a593Smuzhiyun 			/*
896*4882a593Smuzhiyun 			 * Verbs, 6.4.: Try stopping sending after a full
897*4882a593Smuzhiyun 			 * DDP segment if the connection goes down
898*4882a593Smuzhiyun 			 * (== peer halfclose)
899*4882a593Smuzhiyun 			 */
900*4882a593Smuzhiyun 			rv = -ECONNABORTED;
901*4882a593Smuzhiyun 			goto tx_done;
902*4882a593Smuzhiyun 		}
903*4882a593Smuzhiyun 		if (c_tx->pkt.ctrl.ddp_rdmap_ctrl & DDP_FLAG_LAST) {
904*4882a593Smuzhiyun 			siw_dbg_qp(qp, "WQE completed\n");
905*4882a593Smuzhiyun 			goto tx_done;
906*4882a593Smuzhiyun 		}
907*4882a593Smuzhiyun 		c_tx->state = SIW_SEND_HDR;
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 		siw_update_tcpseg(c_tx, s);
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 		siw_prepare_fpdu(qp, wqe);
912*4882a593Smuzhiyun 		goto next_segment;
913*4882a593Smuzhiyun 	}
914*4882a593Smuzhiyun tx_done:
915*4882a593Smuzhiyun 	qp->tx_ctx.burst = burst_len;
916*4882a593Smuzhiyun 	return rv;
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun tx_error:
919*4882a593Smuzhiyun 	if (ecode != RDMAP_ECODE_CATASTROPHIC_STREAM)
920*4882a593Smuzhiyun 		siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP,
921*4882a593Smuzhiyun 				   RDMAP_ETYPE_REMOTE_PROTECTION, ecode, 1);
922*4882a593Smuzhiyun 	else
923*4882a593Smuzhiyun 		siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP,
924*4882a593Smuzhiyun 				   RDMAP_ETYPE_CATASTROPHIC,
925*4882a593Smuzhiyun 				   RDMAP_ECODE_UNSPECIFIED, 1);
926*4882a593Smuzhiyun 	return rv;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun 
siw_fastreg_mr(struct ib_pd * pd,struct siw_sqe * sqe)929*4882a593Smuzhiyun static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
930*4882a593Smuzhiyun {
931*4882a593Smuzhiyun 	struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr;
932*4882a593Smuzhiyun 	struct siw_device *sdev = to_siw_dev(pd->device);
933*4882a593Smuzhiyun 	struct siw_mem *mem;
934*4882a593Smuzhiyun 	int rv = 0;
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	siw_dbg_pd(pd, "STag 0x%08x\n", sqe->rkey);
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 	if (unlikely(!base_mr)) {
939*4882a593Smuzhiyun 		pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey);
940*4882a593Smuzhiyun 		return -EINVAL;
941*4882a593Smuzhiyun 	}
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun 	if (unlikely(base_mr->rkey >> 8 != sqe->rkey  >> 8)) {
944*4882a593Smuzhiyun 		pr_warn("siw: fastreg: STag 0x%08x: bad MR\n", sqe->rkey);
945*4882a593Smuzhiyun 		return -EINVAL;
946*4882a593Smuzhiyun 	}
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	mem = siw_mem_id2obj(sdev, sqe->rkey  >> 8);
949*4882a593Smuzhiyun 	if (unlikely(!mem)) {
950*4882a593Smuzhiyun 		pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey);
951*4882a593Smuzhiyun 		return -EINVAL;
952*4882a593Smuzhiyun 	}
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	if (unlikely(mem->pd != pd)) {
955*4882a593Smuzhiyun 		pr_warn("siw: fastreg: PD mismatch\n");
956*4882a593Smuzhiyun 		rv = -EINVAL;
957*4882a593Smuzhiyun 		goto out;
958*4882a593Smuzhiyun 	}
959*4882a593Smuzhiyun 	if (unlikely(mem->stag_valid)) {
960*4882a593Smuzhiyun 		pr_warn("siw: fastreg: STag 0x%08x already valid\n", sqe->rkey);
961*4882a593Smuzhiyun 		rv = -EINVAL;
962*4882a593Smuzhiyun 		goto out;
963*4882a593Smuzhiyun 	}
964*4882a593Smuzhiyun 	/* Refresh STag since user may have changed key part */
965*4882a593Smuzhiyun 	mem->stag = sqe->rkey;
966*4882a593Smuzhiyun 	mem->perms = sqe->access;
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	siw_dbg_mem(mem, "STag 0x%08x now valid\n", sqe->rkey);
969*4882a593Smuzhiyun 	mem->va = base_mr->iova;
970*4882a593Smuzhiyun 	mem->stag_valid = 1;
971*4882a593Smuzhiyun out:
972*4882a593Smuzhiyun 	siw_mem_put(mem);
973*4882a593Smuzhiyun 	return rv;
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun 
siw_qp_sq_proc_local(struct siw_qp * qp,struct siw_wqe * wqe)976*4882a593Smuzhiyun static int siw_qp_sq_proc_local(struct siw_qp *qp, struct siw_wqe *wqe)
977*4882a593Smuzhiyun {
978*4882a593Smuzhiyun 	int rv;
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	switch (tx_type(wqe)) {
981*4882a593Smuzhiyun 	case SIW_OP_REG_MR:
982*4882a593Smuzhiyun 		rv = siw_fastreg_mr(qp->pd, &wqe->sqe);
983*4882a593Smuzhiyun 		break;
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	case SIW_OP_INVAL_STAG:
986*4882a593Smuzhiyun 		rv = siw_invalidate_stag(qp->pd, wqe->sqe.rkey);
987*4882a593Smuzhiyun 		break;
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 	default:
990*4882a593Smuzhiyun 		rv = -EINVAL;
991*4882a593Smuzhiyun 	}
992*4882a593Smuzhiyun 	return rv;
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun /*
996*4882a593Smuzhiyun  * siw_qp_sq_process()
997*4882a593Smuzhiyun  *
998*4882a593Smuzhiyun  * Core TX path routine for RDMAP/DDP/MPA using a TCP kernel socket.
999*4882a593Smuzhiyun  * Sends RDMAP payload for the current SQ WR @wqe of @qp in one or more
1000*4882a593Smuzhiyun  * MPA FPDUs, each containing a DDP segment.
1001*4882a593Smuzhiyun  *
1002*4882a593Smuzhiyun  * SQ processing may occur in user context as a result of posting
1003*4882a593Smuzhiyun  * new WQE's or from siw_sq_work_handler() context. Processing in
1004*4882a593Smuzhiyun  * user context is limited to non-kernel verbs users.
1005*4882a593Smuzhiyun  *
1006*4882a593Smuzhiyun  * SQ processing may get paused anytime, possibly in the middle of a WR
1007*4882a593Smuzhiyun  * or FPDU, if insufficient send space is available. SQ processing
1008*4882a593Smuzhiyun  * gets resumed from siw_sq_work_handler(), if send space becomes
1009*4882a593Smuzhiyun  * available again.
1010*4882a593Smuzhiyun  *
1011*4882a593Smuzhiyun  * Must be called with the QP state read-locked.
1012*4882a593Smuzhiyun  *
1013*4882a593Smuzhiyun  * Note:
1014*4882a593Smuzhiyun  * An outbound RREQ can be satisfied by the corresponding RRESP
1015*4882a593Smuzhiyun  * _before_ it gets assigned to the ORQ. This happens regularly
1016*4882a593Smuzhiyun  * in RDMA READ via loopback case. Since both outbound RREQ and
1017*4882a593Smuzhiyun  * inbound RRESP can be handled by the same CPU, locking the ORQ
1018*4882a593Smuzhiyun  * is dead-lock prone and thus not an option. With that, the
1019*4882a593Smuzhiyun  * RREQ gets assigned to the ORQ _before_ being sent - see
1020*4882a593Smuzhiyun  * siw_activate_tx() - and pulled back in case of send failure.
1021*4882a593Smuzhiyun  */
siw_qp_sq_process(struct siw_qp * qp)1022*4882a593Smuzhiyun int siw_qp_sq_process(struct siw_qp *qp)
1023*4882a593Smuzhiyun {
1024*4882a593Smuzhiyun 	struct siw_wqe *wqe = tx_wqe(qp);
1025*4882a593Smuzhiyun 	enum siw_opcode tx_type;
1026*4882a593Smuzhiyun 	unsigned long flags;
1027*4882a593Smuzhiyun 	int rv = 0;
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun 	siw_dbg_qp(qp, "enter for type %d\n", tx_type(wqe));
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun next_wqe:
1032*4882a593Smuzhiyun 	/*
1033*4882a593Smuzhiyun 	 * Stop QP processing if SQ state changed
1034*4882a593Smuzhiyun 	 */
1035*4882a593Smuzhiyun 	if (unlikely(qp->tx_ctx.tx_suspend)) {
1036*4882a593Smuzhiyun 		siw_dbg_qp(qp, "tx suspended\n");
1037*4882a593Smuzhiyun 		goto done;
1038*4882a593Smuzhiyun 	}
1039*4882a593Smuzhiyun 	tx_type = tx_type(wqe);
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 	if (tx_type <= SIW_OP_READ_RESPONSE)
1042*4882a593Smuzhiyun 		rv = siw_qp_sq_proc_tx(qp, wqe);
1043*4882a593Smuzhiyun 	else
1044*4882a593Smuzhiyun 		rv = siw_qp_sq_proc_local(qp, wqe);
1045*4882a593Smuzhiyun 
1046*4882a593Smuzhiyun 	if (!rv) {
1047*4882a593Smuzhiyun 		/*
1048*4882a593Smuzhiyun 		 * WQE processing done
1049*4882a593Smuzhiyun 		 */
1050*4882a593Smuzhiyun 		switch (tx_type) {
1051*4882a593Smuzhiyun 		case SIW_OP_SEND:
1052*4882a593Smuzhiyun 		case SIW_OP_SEND_REMOTE_INV:
1053*4882a593Smuzhiyun 		case SIW_OP_WRITE:
1054*4882a593Smuzhiyun 			siw_wqe_put_mem(wqe, tx_type);
1055*4882a593Smuzhiyun 			fallthrough;
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 		case SIW_OP_INVAL_STAG:
1058*4882a593Smuzhiyun 		case SIW_OP_REG_MR:
1059*4882a593Smuzhiyun 			if (tx_flags(wqe) & SIW_WQE_SIGNALLED)
1060*4882a593Smuzhiyun 				siw_sqe_complete(qp, &wqe->sqe, wqe->bytes,
1061*4882a593Smuzhiyun 						 SIW_WC_SUCCESS);
1062*4882a593Smuzhiyun 			break;
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 		case SIW_OP_READ:
1065*4882a593Smuzhiyun 		case SIW_OP_READ_LOCAL_INV:
1066*4882a593Smuzhiyun 			/*
1067*4882a593Smuzhiyun 			 * already enqueued to ORQ queue
1068*4882a593Smuzhiyun 			 */
1069*4882a593Smuzhiyun 			break;
1070*4882a593Smuzhiyun 
1071*4882a593Smuzhiyun 		case SIW_OP_READ_RESPONSE:
1072*4882a593Smuzhiyun 			siw_wqe_put_mem(wqe, tx_type);
1073*4882a593Smuzhiyun 			break;
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 		default:
1076*4882a593Smuzhiyun 			WARN(1, "undefined WQE type %d\n", tx_type);
1077*4882a593Smuzhiyun 			rv = -EINVAL;
1078*4882a593Smuzhiyun 			goto done;
1079*4882a593Smuzhiyun 		}
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 		spin_lock_irqsave(&qp->sq_lock, flags);
1082*4882a593Smuzhiyun 		wqe->wr_status = SIW_WR_IDLE;
1083*4882a593Smuzhiyun 		rv = siw_activate_tx(qp);
1084*4882a593Smuzhiyun 		spin_unlock_irqrestore(&qp->sq_lock, flags);
1085*4882a593Smuzhiyun 
1086*4882a593Smuzhiyun 		if (rv <= 0)
1087*4882a593Smuzhiyun 			goto done;
1088*4882a593Smuzhiyun 
1089*4882a593Smuzhiyun 		goto next_wqe;
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 	} else if (rv == -EAGAIN) {
1092*4882a593Smuzhiyun 		siw_dbg_qp(qp, "sq paused: hd/tr %d of %d, data %d\n",
1093*4882a593Smuzhiyun 			   qp->tx_ctx.ctrl_sent, qp->tx_ctx.ctrl_len,
1094*4882a593Smuzhiyun 			   qp->tx_ctx.bytes_unsent);
1095*4882a593Smuzhiyun 		rv = 0;
1096*4882a593Smuzhiyun 		goto done;
1097*4882a593Smuzhiyun 	} else if (rv == -EINPROGRESS) {
1098*4882a593Smuzhiyun 		rv = siw_sq_start(qp);
1099*4882a593Smuzhiyun 		goto done;
1100*4882a593Smuzhiyun 	} else {
1101*4882a593Smuzhiyun 		/*
1102*4882a593Smuzhiyun 		 * WQE processing failed.
1103*4882a593Smuzhiyun 		 * Verbs 8.3.2:
1104*4882a593Smuzhiyun 		 * o It turns any WQE into a signalled WQE.
1105*4882a593Smuzhiyun 		 * o Local catastrophic error must be surfaced
1106*4882a593Smuzhiyun 		 * o QP must be moved into Terminate state: done by code
1107*4882a593Smuzhiyun 		 *   doing socket state change processing
1108*4882a593Smuzhiyun 		 *
1109*4882a593Smuzhiyun 		 * o TODO: Termination message must be sent.
1110*4882a593Smuzhiyun 		 * o TODO: Implement more precise work completion errors,
1111*4882a593Smuzhiyun 		 *         see enum ib_wc_status in ib_verbs.h
1112*4882a593Smuzhiyun 		 */
1113*4882a593Smuzhiyun 		siw_dbg_qp(qp, "wqe type %d processing failed: %d\n",
1114*4882a593Smuzhiyun 			   tx_type(wqe), rv);
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 		spin_lock_irqsave(&qp->sq_lock, flags);
1117*4882a593Smuzhiyun 		/*
1118*4882a593Smuzhiyun 		 * RREQ may have already been completed by inbound RRESP!
1119*4882a593Smuzhiyun 		 */
1120*4882a593Smuzhiyun 		if ((tx_type == SIW_OP_READ ||
1121*4882a593Smuzhiyun 		     tx_type == SIW_OP_READ_LOCAL_INV) && qp->attrs.orq_size) {
1122*4882a593Smuzhiyun 			/* Cleanup pending entry in ORQ */
1123*4882a593Smuzhiyun 			qp->orq_put--;
1124*4882a593Smuzhiyun 			qp->orq[qp->orq_put % qp->attrs.orq_size].flags = 0;
1125*4882a593Smuzhiyun 		}
1126*4882a593Smuzhiyun 		spin_unlock_irqrestore(&qp->sq_lock, flags);
1127*4882a593Smuzhiyun 		/*
1128*4882a593Smuzhiyun 		 * immediately suspends further TX processing
1129*4882a593Smuzhiyun 		 */
1130*4882a593Smuzhiyun 		if (!qp->tx_ctx.tx_suspend)
1131*4882a593Smuzhiyun 			siw_qp_cm_drop(qp, 0);
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 		switch (tx_type) {
1134*4882a593Smuzhiyun 		case SIW_OP_SEND:
1135*4882a593Smuzhiyun 		case SIW_OP_SEND_REMOTE_INV:
1136*4882a593Smuzhiyun 		case SIW_OP_SEND_WITH_IMM:
1137*4882a593Smuzhiyun 		case SIW_OP_WRITE:
1138*4882a593Smuzhiyun 		case SIW_OP_READ:
1139*4882a593Smuzhiyun 		case SIW_OP_READ_LOCAL_INV:
1140*4882a593Smuzhiyun 			siw_wqe_put_mem(wqe, tx_type);
1141*4882a593Smuzhiyun 			fallthrough;
1142*4882a593Smuzhiyun 
1143*4882a593Smuzhiyun 		case SIW_OP_INVAL_STAG:
1144*4882a593Smuzhiyun 		case SIW_OP_REG_MR:
1145*4882a593Smuzhiyun 			siw_sqe_complete(qp, &wqe->sqe, wqe->bytes,
1146*4882a593Smuzhiyun 					 SIW_WC_LOC_QP_OP_ERR);
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 			siw_qp_event(qp, IB_EVENT_QP_FATAL);
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 			break;
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun 		case SIW_OP_READ_RESPONSE:
1153*4882a593Smuzhiyun 			siw_dbg_qp(qp, "proc. read.response failed: %d\n", rv);
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun 			siw_qp_event(qp, IB_EVENT_QP_REQ_ERR);
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 			siw_wqe_put_mem(wqe, SIW_OP_READ_RESPONSE);
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun 			break;
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun 		default:
1162*4882a593Smuzhiyun 			WARN(1, "undefined WQE type %d\n", tx_type);
1163*4882a593Smuzhiyun 			rv = -EINVAL;
1164*4882a593Smuzhiyun 		}
1165*4882a593Smuzhiyun 		wqe->wr_status = SIW_WR_IDLE;
1166*4882a593Smuzhiyun 	}
1167*4882a593Smuzhiyun done:
1168*4882a593Smuzhiyun 	return rv;
1169*4882a593Smuzhiyun }
1170*4882a593Smuzhiyun 
siw_sq_resume(struct siw_qp * qp)1171*4882a593Smuzhiyun static void siw_sq_resume(struct siw_qp *qp)
1172*4882a593Smuzhiyun {
1173*4882a593Smuzhiyun 	if (down_read_trylock(&qp->state_lock)) {
1174*4882a593Smuzhiyun 		if (likely(qp->attrs.state == SIW_QP_STATE_RTS &&
1175*4882a593Smuzhiyun 			   !qp->tx_ctx.tx_suspend)) {
1176*4882a593Smuzhiyun 			int rv = siw_qp_sq_process(qp);
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun 			up_read(&qp->state_lock);
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun 			if (unlikely(rv < 0)) {
1181*4882a593Smuzhiyun 				siw_dbg_qp(qp, "SQ task failed: err %d\n", rv);
1182*4882a593Smuzhiyun 
1183*4882a593Smuzhiyun 				if (!qp->tx_ctx.tx_suspend)
1184*4882a593Smuzhiyun 					siw_qp_cm_drop(qp, 0);
1185*4882a593Smuzhiyun 			}
1186*4882a593Smuzhiyun 		} else {
1187*4882a593Smuzhiyun 			up_read(&qp->state_lock);
1188*4882a593Smuzhiyun 		}
1189*4882a593Smuzhiyun 	} else {
1190*4882a593Smuzhiyun 		siw_dbg_qp(qp, "Resume SQ while QP locked\n");
1191*4882a593Smuzhiyun 	}
1192*4882a593Smuzhiyun 	siw_qp_put(qp);
1193*4882a593Smuzhiyun }
1194*4882a593Smuzhiyun 
1195*4882a593Smuzhiyun struct tx_task_t {
1196*4882a593Smuzhiyun 	struct llist_head active;
1197*4882a593Smuzhiyun 	wait_queue_head_t waiting;
1198*4882a593Smuzhiyun };
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun static DEFINE_PER_CPU(struct tx_task_t, siw_tx_task_g);
1201*4882a593Smuzhiyun 
siw_stop_tx_thread(int nr_cpu)1202*4882a593Smuzhiyun void siw_stop_tx_thread(int nr_cpu)
1203*4882a593Smuzhiyun {
1204*4882a593Smuzhiyun 	kthread_stop(siw_tx_thread[nr_cpu]);
1205*4882a593Smuzhiyun 	wake_up(&per_cpu(siw_tx_task_g, nr_cpu).waiting);
1206*4882a593Smuzhiyun }
1207*4882a593Smuzhiyun 
siw_run_sq(void * data)1208*4882a593Smuzhiyun int siw_run_sq(void *data)
1209*4882a593Smuzhiyun {
1210*4882a593Smuzhiyun 	const int nr_cpu = (unsigned int)(long)data;
1211*4882a593Smuzhiyun 	struct llist_node *active;
1212*4882a593Smuzhiyun 	struct siw_qp *qp;
1213*4882a593Smuzhiyun 	struct tx_task_t *tx_task = &per_cpu(siw_tx_task_g, nr_cpu);
1214*4882a593Smuzhiyun 
1215*4882a593Smuzhiyun 	init_llist_head(&tx_task->active);
1216*4882a593Smuzhiyun 	init_waitqueue_head(&tx_task->waiting);
1217*4882a593Smuzhiyun 
1218*4882a593Smuzhiyun 	while (1) {
1219*4882a593Smuzhiyun 		struct llist_node *fifo_list = NULL;
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 		wait_event_interruptible(tx_task->waiting,
1222*4882a593Smuzhiyun 					 !llist_empty(&tx_task->active) ||
1223*4882a593Smuzhiyun 						 kthread_should_stop());
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun 		if (kthread_should_stop())
1226*4882a593Smuzhiyun 			break;
1227*4882a593Smuzhiyun 
1228*4882a593Smuzhiyun 		active = llist_del_all(&tx_task->active);
1229*4882a593Smuzhiyun 		/*
1230*4882a593Smuzhiyun 		 * llist_del_all returns a list with newest entry first.
1231*4882a593Smuzhiyun 		 * Re-order list for fairness among QP's.
1232*4882a593Smuzhiyun 		 */
1233*4882a593Smuzhiyun 		while (active) {
1234*4882a593Smuzhiyun 			struct llist_node *tmp = active;
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 			active = llist_next(active);
1237*4882a593Smuzhiyun 			tmp->next = fifo_list;
1238*4882a593Smuzhiyun 			fifo_list = tmp;
1239*4882a593Smuzhiyun 		}
1240*4882a593Smuzhiyun 		while (fifo_list) {
1241*4882a593Smuzhiyun 			qp = container_of(fifo_list, struct siw_qp, tx_list);
1242*4882a593Smuzhiyun 			fifo_list = llist_next(fifo_list);
1243*4882a593Smuzhiyun 			qp->tx_list.next = NULL;
1244*4882a593Smuzhiyun 
1245*4882a593Smuzhiyun 			siw_sq_resume(qp);
1246*4882a593Smuzhiyun 		}
1247*4882a593Smuzhiyun 	}
1248*4882a593Smuzhiyun 	active = llist_del_all(&tx_task->active);
1249*4882a593Smuzhiyun 	if (active) {
1250*4882a593Smuzhiyun 		llist_for_each_entry(qp, active, tx_list) {
1251*4882a593Smuzhiyun 			qp->tx_list.next = NULL;
1252*4882a593Smuzhiyun 			siw_sq_resume(qp);
1253*4882a593Smuzhiyun 		}
1254*4882a593Smuzhiyun 	}
1255*4882a593Smuzhiyun 	return 0;
1256*4882a593Smuzhiyun }
1257*4882a593Smuzhiyun 
siw_sq_start(struct siw_qp * qp)1258*4882a593Smuzhiyun int siw_sq_start(struct siw_qp *qp)
1259*4882a593Smuzhiyun {
1260*4882a593Smuzhiyun 	if (tx_wqe(qp)->wr_status == SIW_WR_IDLE)
1261*4882a593Smuzhiyun 		return 0;
1262*4882a593Smuzhiyun 
1263*4882a593Smuzhiyun 	if (unlikely(!cpu_online(qp->tx_cpu))) {
1264*4882a593Smuzhiyun 		siw_put_tx_cpu(qp->tx_cpu);
1265*4882a593Smuzhiyun 		qp->tx_cpu = siw_get_tx_cpu(qp->sdev);
1266*4882a593Smuzhiyun 		if (qp->tx_cpu < 0) {
1267*4882a593Smuzhiyun 			pr_warn("siw: no tx cpu available\n");
1268*4882a593Smuzhiyun 
1269*4882a593Smuzhiyun 			return -EIO;
1270*4882a593Smuzhiyun 		}
1271*4882a593Smuzhiyun 	}
1272*4882a593Smuzhiyun 	siw_qp_get(qp);
1273*4882a593Smuzhiyun 
1274*4882a593Smuzhiyun 	llist_add(&qp->tx_list, &per_cpu(siw_tx_task_g, qp->tx_cpu).active);
1275*4882a593Smuzhiyun 
1276*4882a593Smuzhiyun 	wake_up(&per_cpu(siw_tx_task_g, qp->tx_cpu).waiting);
1277*4882a593Smuzhiyun 
1278*4882a593Smuzhiyun 	return 0;
1279*4882a593Smuzhiyun }
1280