1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright(c) 2015 - 2018 Intel Corporation.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * This file is provided under a dual BSD/GPLv2 license. When using or
5*4882a593Smuzhiyun * redistributing this file, you may do so under either license.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * GPL LICENSE SUMMARY
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
10*4882a593Smuzhiyun * it under the terms of version 2 of the GNU General Public License as
11*4882a593Smuzhiyun * published by the Free Software Foundation.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful, but
14*4882a593Smuzhiyun * WITHOUT ANY WARRANTY; without even the implied warranty of
15*4882a593Smuzhiyun * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16*4882a593Smuzhiyun * General Public License for more details.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * BSD LICENSE
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
21*4882a593Smuzhiyun * modification, are permitted provided that the following conditions
22*4882a593Smuzhiyun * are met:
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * - Redistributions of source code must retain the above copyright
25*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer.
26*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above copyright
27*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer in
28*4882a593Smuzhiyun * the documentation and/or other materials provided with the
29*4882a593Smuzhiyun * distribution.
30*4882a593Smuzhiyun * - Neither the name of Intel Corporation nor the names of its
31*4882a593Smuzhiyun * contributors may be used to endorse or promote products derived
32*4882a593Smuzhiyun * from this software without specific prior written permission.
33*4882a593Smuzhiyun *
34*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35*4882a593Smuzhiyun * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36*4882a593Smuzhiyun * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37*4882a593Smuzhiyun * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38*4882a593Smuzhiyun * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39*4882a593Smuzhiyun * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40*4882a593Smuzhiyun * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41*4882a593Smuzhiyun * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42*4882a593Smuzhiyun * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43*4882a593Smuzhiyun * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44*4882a593Smuzhiyun * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45*4882a593Smuzhiyun *
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun #include <linux/io.h>
49*4882a593Smuzhiyun #include <rdma/rdma_vt.h>
50*4882a593Smuzhiyun #include <rdma/rdmavt_qp.h>
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #include "hfi.h"
53*4882a593Smuzhiyun #include "qp.h"
54*4882a593Smuzhiyun #include "rc.h"
55*4882a593Smuzhiyun #include "verbs_txreq.h"
56*4882a593Smuzhiyun #include "trace.h"
57*4882a593Smuzhiyun
find_prev_entry(struct rvt_qp * qp,u32 psn,u8 * prev,u8 * prev_ack,bool * scheduled)58*4882a593Smuzhiyun struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev,
59*4882a593Smuzhiyun u8 *prev_ack, bool *scheduled)
60*4882a593Smuzhiyun __must_hold(&qp->s_lock)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun struct rvt_ack_entry *e = NULL;
63*4882a593Smuzhiyun u8 i, p;
64*4882a593Smuzhiyun bool s = true;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun for (i = qp->r_head_ack_queue; ; i = p) {
67*4882a593Smuzhiyun if (i == qp->s_tail_ack_queue)
68*4882a593Smuzhiyun s = false;
69*4882a593Smuzhiyun if (i)
70*4882a593Smuzhiyun p = i - 1;
71*4882a593Smuzhiyun else
72*4882a593Smuzhiyun p = rvt_size_atomic(ib_to_rvt(qp->ibqp.device));
73*4882a593Smuzhiyun if (p == qp->r_head_ack_queue) {
74*4882a593Smuzhiyun e = NULL;
75*4882a593Smuzhiyun break;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun e = &qp->s_ack_queue[p];
78*4882a593Smuzhiyun if (!e->opcode) {
79*4882a593Smuzhiyun e = NULL;
80*4882a593Smuzhiyun break;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun if (cmp_psn(psn, e->psn) >= 0) {
83*4882a593Smuzhiyun if (p == qp->s_tail_ack_queue &&
84*4882a593Smuzhiyun cmp_psn(psn, e->lpsn) <= 0)
85*4882a593Smuzhiyun s = false;
86*4882a593Smuzhiyun break;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun if (prev)
90*4882a593Smuzhiyun *prev = p;
91*4882a593Smuzhiyun if (prev_ack)
92*4882a593Smuzhiyun *prev_ack = i;
93*4882a593Smuzhiyun if (scheduled)
94*4882a593Smuzhiyun *scheduled = s;
95*4882a593Smuzhiyun return e;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /**
99*4882a593Smuzhiyun * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
100*4882a593Smuzhiyun * @dev: the device for this QP
101*4882a593Smuzhiyun * @qp: a pointer to the QP
102*4882a593Smuzhiyun * @ohdr: a pointer to the IB header being constructed
103*4882a593Smuzhiyun * @ps: the xmit packet state
104*4882a593Smuzhiyun *
105*4882a593Smuzhiyun * Return 1 if constructed; otherwise, return 0.
106*4882a593Smuzhiyun * Note that we are in the responder's side of the QP context.
107*4882a593Smuzhiyun * Note the QP s_lock must be held.
108*4882a593Smuzhiyun */
make_rc_ack(struct hfi1_ibdev * dev,struct rvt_qp * qp,struct ib_other_headers * ohdr,struct hfi1_pkt_state * ps)109*4882a593Smuzhiyun static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
110*4882a593Smuzhiyun struct ib_other_headers *ohdr,
111*4882a593Smuzhiyun struct hfi1_pkt_state *ps)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun struct rvt_ack_entry *e;
114*4882a593Smuzhiyun u32 hwords, hdrlen;
115*4882a593Smuzhiyun u32 len = 0;
116*4882a593Smuzhiyun u32 bth0 = 0, bth2 = 0;
117*4882a593Smuzhiyun u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT);
118*4882a593Smuzhiyun int middle = 0;
119*4882a593Smuzhiyun u32 pmtu = qp->pmtu;
120*4882a593Smuzhiyun struct hfi1_qp_priv *qpriv = qp->priv;
121*4882a593Smuzhiyun bool last_pkt;
122*4882a593Smuzhiyun u32 delta;
123*4882a593Smuzhiyun u8 next = qp->s_tail_ack_queue;
124*4882a593Smuzhiyun struct tid_rdma_request *req;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun trace_hfi1_rsp_make_rc_ack(qp, 0);
127*4882a593Smuzhiyun lockdep_assert_held(&qp->s_lock);
128*4882a593Smuzhiyun /* Don't send an ACK if we aren't supposed to. */
129*4882a593Smuzhiyun if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
130*4882a593Smuzhiyun goto bail;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun if (qpriv->hdr_type == HFI1_PKT_TYPE_9B)
133*4882a593Smuzhiyun /* header size in 32-bit words LRH+BTH = (8+12)/4. */
134*4882a593Smuzhiyun hwords = 5;
135*4882a593Smuzhiyun else
136*4882a593Smuzhiyun /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */
137*4882a593Smuzhiyun hwords = 7;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun switch (qp->s_ack_state) {
140*4882a593Smuzhiyun case OP(RDMA_READ_RESPONSE_LAST):
141*4882a593Smuzhiyun case OP(RDMA_READ_RESPONSE_ONLY):
142*4882a593Smuzhiyun e = &qp->s_ack_queue[qp->s_tail_ack_queue];
143*4882a593Smuzhiyun release_rdma_sge_mr(e);
144*4882a593Smuzhiyun fallthrough;
145*4882a593Smuzhiyun case OP(ATOMIC_ACKNOWLEDGE):
146*4882a593Smuzhiyun /*
147*4882a593Smuzhiyun * We can increment the tail pointer now that the last
148*4882a593Smuzhiyun * response has been sent instead of only being
149*4882a593Smuzhiyun * constructed.
150*4882a593Smuzhiyun */
151*4882a593Smuzhiyun if (++next > rvt_size_atomic(&dev->rdi))
152*4882a593Smuzhiyun next = 0;
153*4882a593Smuzhiyun /*
154*4882a593Smuzhiyun * Only advance the s_acked_ack_queue pointer if there
155*4882a593Smuzhiyun * have been no TID RDMA requests.
156*4882a593Smuzhiyun */
157*4882a593Smuzhiyun e = &qp->s_ack_queue[qp->s_tail_ack_queue];
158*4882a593Smuzhiyun if (e->opcode != TID_OP(WRITE_REQ) &&
159*4882a593Smuzhiyun qp->s_acked_ack_queue == qp->s_tail_ack_queue)
160*4882a593Smuzhiyun qp->s_acked_ack_queue = next;
161*4882a593Smuzhiyun qp->s_tail_ack_queue = next;
162*4882a593Smuzhiyun trace_hfi1_rsp_make_rc_ack(qp, e->psn);
163*4882a593Smuzhiyun fallthrough;
164*4882a593Smuzhiyun case OP(SEND_ONLY):
165*4882a593Smuzhiyun case OP(ACKNOWLEDGE):
166*4882a593Smuzhiyun /* Check for no next entry in the queue. */
167*4882a593Smuzhiyun if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
168*4882a593Smuzhiyun if (qp->s_flags & RVT_S_ACK_PENDING)
169*4882a593Smuzhiyun goto normal;
170*4882a593Smuzhiyun goto bail;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun e = &qp->s_ack_queue[qp->s_tail_ack_queue];
174*4882a593Smuzhiyun /* Check for tid write fence */
175*4882a593Smuzhiyun if ((qpriv->s_flags & HFI1_R_TID_WAIT_INTERLCK) ||
176*4882a593Smuzhiyun hfi1_tid_rdma_ack_interlock(qp, e)) {
177*4882a593Smuzhiyun iowait_set_flag(&qpriv->s_iowait, IOWAIT_PENDING_IB);
178*4882a593Smuzhiyun goto bail;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun if (e->opcode == OP(RDMA_READ_REQUEST)) {
181*4882a593Smuzhiyun /*
182*4882a593Smuzhiyun * If a RDMA read response is being resent and
183*4882a593Smuzhiyun * we haven't seen the duplicate request yet,
184*4882a593Smuzhiyun * then stop sending the remaining responses the
185*4882a593Smuzhiyun * responder has seen until the requester re-sends it.
186*4882a593Smuzhiyun */
187*4882a593Smuzhiyun len = e->rdma_sge.sge_length;
188*4882a593Smuzhiyun if (len && !e->rdma_sge.mr) {
189*4882a593Smuzhiyun if (qp->s_acked_ack_queue ==
190*4882a593Smuzhiyun qp->s_tail_ack_queue)
191*4882a593Smuzhiyun qp->s_acked_ack_queue =
192*4882a593Smuzhiyun qp->r_head_ack_queue;
193*4882a593Smuzhiyun qp->s_tail_ack_queue = qp->r_head_ack_queue;
194*4882a593Smuzhiyun goto bail;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun /* Copy SGE state in case we need to resend */
197*4882a593Smuzhiyun ps->s_txreq->mr = e->rdma_sge.mr;
198*4882a593Smuzhiyun if (ps->s_txreq->mr)
199*4882a593Smuzhiyun rvt_get_mr(ps->s_txreq->mr);
200*4882a593Smuzhiyun qp->s_ack_rdma_sge.sge = e->rdma_sge;
201*4882a593Smuzhiyun qp->s_ack_rdma_sge.num_sge = 1;
202*4882a593Smuzhiyun ps->s_txreq->ss = &qp->s_ack_rdma_sge;
203*4882a593Smuzhiyun if (len > pmtu) {
204*4882a593Smuzhiyun len = pmtu;
205*4882a593Smuzhiyun qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
206*4882a593Smuzhiyun } else {
207*4882a593Smuzhiyun qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
208*4882a593Smuzhiyun e->sent = 1;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun ohdr->u.aeth = rvt_compute_aeth(qp);
211*4882a593Smuzhiyun hwords++;
212*4882a593Smuzhiyun qp->s_ack_rdma_psn = e->psn;
213*4882a593Smuzhiyun bth2 = mask_psn(qp->s_ack_rdma_psn++);
214*4882a593Smuzhiyun } else if (e->opcode == TID_OP(WRITE_REQ)) {
215*4882a593Smuzhiyun /*
216*4882a593Smuzhiyun * If a TID RDMA WRITE RESP is being resent, we have to
217*4882a593Smuzhiyun * wait for the actual request. All requests that are to
218*4882a593Smuzhiyun * be resent will have their state set to
219*4882a593Smuzhiyun * TID_REQUEST_RESEND. When the new request arrives, the
220*4882a593Smuzhiyun * state will be changed to TID_REQUEST_RESEND_ACTIVE.
221*4882a593Smuzhiyun */
222*4882a593Smuzhiyun req = ack_to_tid_req(e);
223*4882a593Smuzhiyun if (req->state == TID_REQUEST_RESEND ||
224*4882a593Smuzhiyun req->state == TID_REQUEST_INIT_RESEND)
225*4882a593Smuzhiyun goto bail;
226*4882a593Smuzhiyun qp->s_ack_state = TID_OP(WRITE_RESP);
227*4882a593Smuzhiyun qp->s_ack_rdma_psn = mask_psn(e->psn + req->cur_seg);
228*4882a593Smuzhiyun goto write_resp;
229*4882a593Smuzhiyun } else if (e->opcode == TID_OP(READ_REQ)) {
230*4882a593Smuzhiyun /*
231*4882a593Smuzhiyun * If a TID RDMA read response is being resent and
232*4882a593Smuzhiyun * we haven't seen the duplicate request yet,
233*4882a593Smuzhiyun * then stop sending the remaining responses the
234*4882a593Smuzhiyun * responder has seen until the requester re-sends it.
235*4882a593Smuzhiyun */
236*4882a593Smuzhiyun len = e->rdma_sge.sge_length;
237*4882a593Smuzhiyun if (len && !e->rdma_sge.mr) {
238*4882a593Smuzhiyun if (qp->s_acked_ack_queue ==
239*4882a593Smuzhiyun qp->s_tail_ack_queue)
240*4882a593Smuzhiyun qp->s_acked_ack_queue =
241*4882a593Smuzhiyun qp->r_head_ack_queue;
242*4882a593Smuzhiyun qp->s_tail_ack_queue = qp->r_head_ack_queue;
243*4882a593Smuzhiyun goto bail;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun /* Copy SGE state in case we need to resend */
246*4882a593Smuzhiyun ps->s_txreq->mr = e->rdma_sge.mr;
247*4882a593Smuzhiyun if (ps->s_txreq->mr)
248*4882a593Smuzhiyun rvt_get_mr(ps->s_txreq->mr);
249*4882a593Smuzhiyun qp->s_ack_rdma_sge.sge = e->rdma_sge;
250*4882a593Smuzhiyun qp->s_ack_rdma_sge.num_sge = 1;
251*4882a593Smuzhiyun qp->s_ack_state = TID_OP(READ_RESP);
252*4882a593Smuzhiyun goto read_resp;
253*4882a593Smuzhiyun } else {
254*4882a593Smuzhiyun /* COMPARE_SWAP or FETCH_ADD */
255*4882a593Smuzhiyun ps->s_txreq->ss = NULL;
256*4882a593Smuzhiyun len = 0;
257*4882a593Smuzhiyun qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
258*4882a593Smuzhiyun ohdr->u.at.aeth = rvt_compute_aeth(qp);
259*4882a593Smuzhiyun ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
260*4882a593Smuzhiyun hwords += sizeof(ohdr->u.at) / sizeof(u32);
261*4882a593Smuzhiyun bth2 = mask_psn(e->psn);
262*4882a593Smuzhiyun e->sent = 1;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun trace_hfi1_tid_write_rsp_make_rc_ack(qp);
265*4882a593Smuzhiyun bth0 = qp->s_ack_state << 24;
266*4882a593Smuzhiyun break;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun case OP(RDMA_READ_RESPONSE_FIRST):
269*4882a593Smuzhiyun qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
270*4882a593Smuzhiyun fallthrough;
271*4882a593Smuzhiyun case OP(RDMA_READ_RESPONSE_MIDDLE):
272*4882a593Smuzhiyun ps->s_txreq->ss = &qp->s_ack_rdma_sge;
273*4882a593Smuzhiyun ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr;
274*4882a593Smuzhiyun if (ps->s_txreq->mr)
275*4882a593Smuzhiyun rvt_get_mr(ps->s_txreq->mr);
276*4882a593Smuzhiyun len = qp->s_ack_rdma_sge.sge.sge_length;
277*4882a593Smuzhiyun if (len > pmtu) {
278*4882a593Smuzhiyun len = pmtu;
279*4882a593Smuzhiyun middle = HFI1_CAP_IS_KSET(SDMA_AHG);
280*4882a593Smuzhiyun } else {
281*4882a593Smuzhiyun ohdr->u.aeth = rvt_compute_aeth(qp);
282*4882a593Smuzhiyun hwords++;
283*4882a593Smuzhiyun qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
284*4882a593Smuzhiyun e = &qp->s_ack_queue[qp->s_tail_ack_queue];
285*4882a593Smuzhiyun e->sent = 1;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun bth0 = qp->s_ack_state << 24;
288*4882a593Smuzhiyun bth2 = mask_psn(qp->s_ack_rdma_psn++);
289*4882a593Smuzhiyun break;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun case TID_OP(WRITE_RESP):
292*4882a593Smuzhiyun write_resp:
293*4882a593Smuzhiyun /*
294*4882a593Smuzhiyun * 1. Check if RVT_S_ACK_PENDING is set. If yes,
295*4882a593Smuzhiyun * goto normal.
296*4882a593Smuzhiyun * 2. Attempt to allocate TID resources.
297*4882a593Smuzhiyun * 3. Remove RVT_S_RESP_PENDING flags from s_flags
298*4882a593Smuzhiyun * 4. If resources not available:
299*4882a593Smuzhiyun * 4.1 Set RVT_S_WAIT_TID_SPACE
300*4882a593Smuzhiyun * 4.2 Queue QP on RCD TID queue
301*4882a593Smuzhiyun * 4.3 Put QP on iowait list.
302*4882a593Smuzhiyun * 4.4 Build IB RNR NAK with appropriate timeout value
303*4882a593Smuzhiyun * 4.5 Return indication progress made.
304*4882a593Smuzhiyun * 5. If resources are available:
305*4882a593Smuzhiyun * 5.1 Program HW flow CSRs
306*4882a593Smuzhiyun * 5.2 Build TID RDMA WRITE RESP packet
307*4882a593Smuzhiyun * 5.3 If more resources needed, do 2.1 - 2.3.
308*4882a593Smuzhiyun * 5.4 Wake up next QP on RCD TID queue.
309*4882a593Smuzhiyun * 5.5 Return indication progress made.
310*4882a593Smuzhiyun */
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun e = &qp->s_ack_queue[qp->s_tail_ack_queue];
313*4882a593Smuzhiyun req = ack_to_tid_req(e);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun /*
316*4882a593Smuzhiyun * Send scheduled RNR NAK's. RNR NAK's need to be sent at
317*4882a593Smuzhiyun * segment boundaries, not at request boundaries. Don't change
318*4882a593Smuzhiyun * s_ack_state because we are still in the middle of a request
319*4882a593Smuzhiyun */
320*4882a593Smuzhiyun if (qpriv->rnr_nak_state == TID_RNR_NAK_SEND &&
321*4882a593Smuzhiyun qp->s_tail_ack_queue == qpriv->r_tid_alloc &&
322*4882a593Smuzhiyun req->cur_seg == req->alloc_seg) {
323*4882a593Smuzhiyun qpriv->rnr_nak_state = TID_RNR_NAK_SENT;
324*4882a593Smuzhiyun goto normal_no_state;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun bth2 = mask_psn(qp->s_ack_rdma_psn);
328*4882a593Smuzhiyun hdrlen = hfi1_build_tid_rdma_write_resp(qp, e, ohdr, &bth1,
329*4882a593Smuzhiyun bth2, &len,
330*4882a593Smuzhiyun &ps->s_txreq->ss);
331*4882a593Smuzhiyun if (!hdrlen)
332*4882a593Smuzhiyun return 0;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun hwords += hdrlen;
335*4882a593Smuzhiyun bth0 = qp->s_ack_state << 24;
336*4882a593Smuzhiyun qp->s_ack_rdma_psn++;
337*4882a593Smuzhiyun trace_hfi1_tid_req_make_rc_ack_write(qp, 0, e->opcode, e->psn,
338*4882a593Smuzhiyun e->lpsn, req);
339*4882a593Smuzhiyun if (req->cur_seg != req->total_segs)
340*4882a593Smuzhiyun break;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun e->sent = 1;
343*4882a593Smuzhiyun /* Do not free e->rdma_sge until all data are received */
344*4882a593Smuzhiyun qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
345*4882a593Smuzhiyun break;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun case TID_OP(READ_RESP):
348*4882a593Smuzhiyun read_resp:
349*4882a593Smuzhiyun e = &qp->s_ack_queue[qp->s_tail_ack_queue];
350*4882a593Smuzhiyun ps->s_txreq->ss = &qp->s_ack_rdma_sge;
351*4882a593Smuzhiyun delta = hfi1_build_tid_rdma_read_resp(qp, e, ohdr, &bth0,
352*4882a593Smuzhiyun &bth1, &bth2, &len,
353*4882a593Smuzhiyun &last_pkt);
354*4882a593Smuzhiyun if (delta == 0)
355*4882a593Smuzhiyun goto error_qp;
356*4882a593Smuzhiyun hwords += delta;
357*4882a593Smuzhiyun if (last_pkt) {
358*4882a593Smuzhiyun e->sent = 1;
359*4882a593Smuzhiyun /*
360*4882a593Smuzhiyun * Increment qp->s_tail_ack_queue through s_ack_state
361*4882a593Smuzhiyun * transition.
362*4882a593Smuzhiyun */
363*4882a593Smuzhiyun qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun break;
366*4882a593Smuzhiyun case TID_OP(READ_REQ):
367*4882a593Smuzhiyun goto bail;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun default:
370*4882a593Smuzhiyun normal:
371*4882a593Smuzhiyun /*
372*4882a593Smuzhiyun * Send a regular ACK.
373*4882a593Smuzhiyun * Set the s_ack_state so we wait until after sending
374*4882a593Smuzhiyun * the ACK before setting s_ack_state to ACKNOWLEDGE
375*4882a593Smuzhiyun * (see above).
376*4882a593Smuzhiyun */
377*4882a593Smuzhiyun qp->s_ack_state = OP(SEND_ONLY);
378*4882a593Smuzhiyun normal_no_state:
379*4882a593Smuzhiyun if (qp->s_nak_state)
380*4882a593Smuzhiyun ohdr->u.aeth =
381*4882a593Smuzhiyun cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
382*4882a593Smuzhiyun (qp->s_nak_state <<
383*4882a593Smuzhiyun IB_AETH_CREDIT_SHIFT));
384*4882a593Smuzhiyun else
385*4882a593Smuzhiyun ohdr->u.aeth = rvt_compute_aeth(qp);
386*4882a593Smuzhiyun hwords++;
387*4882a593Smuzhiyun len = 0;
388*4882a593Smuzhiyun bth0 = OP(ACKNOWLEDGE) << 24;
389*4882a593Smuzhiyun bth2 = mask_psn(qp->s_ack_psn);
390*4882a593Smuzhiyun qp->s_flags &= ~RVT_S_ACK_PENDING;
391*4882a593Smuzhiyun ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP;
392*4882a593Smuzhiyun ps->s_txreq->ss = NULL;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun qp->s_rdma_ack_cnt++;
395*4882a593Smuzhiyun ps->s_txreq->sde = qpriv->s_sde;
396*4882a593Smuzhiyun ps->s_txreq->s_cur_size = len;
397*4882a593Smuzhiyun ps->s_txreq->hdr_dwords = hwords;
398*4882a593Smuzhiyun hfi1_make_ruc_header(qp, ohdr, bth0, bth1, bth2, middle, ps);
399*4882a593Smuzhiyun return 1;
400*4882a593Smuzhiyun error_qp:
401*4882a593Smuzhiyun spin_unlock_irqrestore(&qp->s_lock, ps->flags);
402*4882a593Smuzhiyun spin_lock_irqsave(&qp->r_lock, ps->flags);
403*4882a593Smuzhiyun spin_lock(&qp->s_lock);
404*4882a593Smuzhiyun rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
405*4882a593Smuzhiyun spin_unlock(&qp->s_lock);
406*4882a593Smuzhiyun spin_unlock_irqrestore(&qp->r_lock, ps->flags);
407*4882a593Smuzhiyun spin_lock_irqsave(&qp->s_lock, ps->flags);
408*4882a593Smuzhiyun bail:
409*4882a593Smuzhiyun qp->s_ack_state = OP(ACKNOWLEDGE);
410*4882a593Smuzhiyun /*
411*4882a593Smuzhiyun * Ensure s_rdma_ack_cnt changes are committed prior to resetting
412*4882a593Smuzhiyun * RVT_S_RESP_PENDING
413*4882a593Smuzhiyun */
414*4882a593Smuzhiyun smp_wmb();
415*4882a593Smuzhiyun qp->s_flags &= ~(RVT_S_RESP_PENDING
416*4882a593Smuzhiyun | RVT_S_ACK_PENDING
417*4882a593Smuzhiyun | HFI1_S_AHG_VALID);
418*4882a593Smuzhiyun return 0;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /**
422*4882a593Smuzhiyun * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
423*4882a593Smuzhiyun * @qp: a pointer to the QP
424*4882a593Smuzhiyun *
425*4882a593Smuzhiyun * Assumes s_lock is held.
426*4882a593Smuzhiyun *
427*4882a593Smuzhiyun * Return 1 if constructed; otherwise, return 0.
428*4882a593Smuzhiyun */
hfi1_make_rc_req(struct rvt_qp * qp,struct hfi1_pkt_state * ps)429*4882a593Smuzhiyun int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun struct hfi1_qp_priv *priv = qp->priv;
432*4882a593Smuzhiyun struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
433*4882a593Smuzhiyun struct ib_other_headers *ohdr;
434*4882a593Smuzhiyun struct rvt_sge_state *ss = NULL;
435*4882a593Smuzhiyun struct rvt_swqe *wqe;
436*4882a593Smuzhiyun struct hfi1_swqe_priv *wpriv;
437*4882a593Smuzhiyun struct tid_rdma_request *req = NULL;
438*4882a593Smuzhiyun /* header size in 32-bit words LRH+BTH = (8+12)/4. */
439*4882a593Smuzhiyun u32 hwords = 5;
440*4882a593Smuzhiyun u32 len = 0;
441*4882a593Smuzhiyun u32 bth0 = 0, bth2 = 0;
442*4882a593Smuzhiyun u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT);
443*4882a593Smuzhiyun u32 pmtu = qp->pmtu;
444*4882a593Smuzhiyun char newreq;
445*4882a593Smuzhiyun int middle = 0;
446*4882a593Smuzhiyun int delta;
447*4882a593Smuzhiyun struct tid_rdma_flow *flow = NULL;
448*4882a593Smuzhiyun struct tid_rdma_params *remote;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun trace_hfi1_sender_make_rc_req(qp);
451*4882a593Smuzhiyun lockdep_assert_held(&qp->s_lock);
452*4882a593Smuzhiyun ps->s_txreq = get_txreq(ps->dev, qp);
453*4882a593Smuzhiyun if (!ps->s_txreq)
454*4882a593Smuzhiyun goto bail_no_tx;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
457*4882a593Smuzhiyun /* header size in 32-bit words LRH+BTH = (8+12)/4. */
458*4882a593Smuzhiyun hwords = 5;
459*4882a593Smuzhiyun if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
460*4882a593Smuzhiyun ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth;
461*4882a593Smuzhiyun else
462*4882a593Smuzhiyun ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth;
463*4882a593Smuzhiyun } else {
464*4882a593Smuzhiyun /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */
465*4882a593Smuzhiyun hwords = 7;
466*4882a593Smuzhiyun if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
467*4882a593Smuzhiyun (hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))))
468*4882a593Smuzhiyun ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth;
469*4882a593Smuzhiyun else
470*4882a593Smuzhiyun ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth;
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun /* Sending responses has higher priority over sending requests. */
474*4882a593Smuzhiyun if ((qp->s_flags & RVT_S_RESP_PENDING) &&
475*4882a593Smuzhiyun make_rc_ack(dev, qp, ohdr, ps))
476*4882a593Smuzhiyun return 1;
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
479*4882a593Smuzhiyun if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
480*4882a593Smuzhiyun goto bail;
481*4882a593Smuzhiyun /* We are in the error state, flush the work request. */
482*4882a593Smuzhiyun if (qp->s_last == READ_ONCE(qp->s_head))
483*4882a593Smuzhiyun goto bail;
484*4882a593Smuzhiyun /* If DMAs are in progress, we can't flush immediately. */
485*4882a593Smuzhiyun if (iowait_sdma_pending(&priv->s_iowait)) {
486*4882a593Smuzhiyun qp->s_flags |= RVT_S_WAIT_DMA;
487*4882a593Smuzhiyun goto bail;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun clear_ahg(qp);
490*4882a593Smuzhiyun wqe = rvt_get_swqe_ptr(qp, qp->s_last);
491*4882a593Smuzhiyun hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
492*4882a593Smuzhiyun IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
493*4882a593Smuzhiyun /* will get called again */
494*4882a593Smuzhiyun goto done_free_tx;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK | HFI1_S_WAIT_HALT))
498*4882a593Smuzhiyun goto bail;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) {
501*4882a593Smuzhiyun if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
502*4882a593Smuzhiyun qp->s_flags |= RVT_S_WAIT_PSN;
503*4882a593Smuzhiyun goto bail;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun qp->s_sending_psn = qp->s_psn;
506*4882a593Smuzhiyun qp->s_sending_hpsn = qp->s_psn - 1;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun /* Send a request. */
510*4882a593Smuzhiyun wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
511*4882a593Smuzhiyun check_s_state:
512*4882a593Smuzhiyun switch (qp->s_state) {
513*4882a593Smuzhiyun default:
514*4882a593Smuzhiyun if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
515*4882a593Smuzhiyun goto bail;
516*4882a593Smuzhiyun /*
517*4882a593Smuzhiyun * Resend an old request or start a new one.
518*4882a593Smuzhiyun *
519*4882a593Smuzhiyun * We keep track of the current SWQE so that
520*4882a593Smuzhiyun * we don't reset the "furthest progress" state
521*4882a593Smuzhiyun * if we need to back up.
522*4882a593Smuzhiyun */
523*4882a593Smuzhiyun newreq = 0;
524*4882a593Smuzhiyun if (qp->s_cur == qp->s_tail) {
525*4882a593Smuzhiyun /* Check if send work queue is empty. */
526*4882a593Smuzhiyun if (qp->s_tail == READ_ONCE(qp->s_head)) {
527*4882a593Smuzhiyun clear_ahg(qp);
528*4882a593Smuzhiyun goto bail;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun /*
531*4882a593Smuzhiyun * If a fence is requested, wait for previous
532*4882a593Smuzhiyun * RDMA read and atomic operations to finish.
533*4882a593Smuzhiyun * However, there is no need to guard against
534*4882a593Smuzhiyun * TID RDMA READ after TID RDMA READ.
535*4882a593Smuzhiyun */
536*4882a593Smuzhiyun if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
537*4882a593Smuzhiyun qp->s_num_rd_atomic &&
538*4882a593Smuzhiyun (wqe->wr.opcode != IB_WR_TID_RDMA_READ ||
539*4882a593Smuzhiyun priv->pending_tid_r_segs < qp->s_num_rd_atomic)) {
540*4882a593Smuzhiyun qp->s_flags |= RVT_S_WAIT_FENCE;
541*4882a593Smuzhiyun goto bail;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun /*
544*4882a593Smuzhiyun * Local operations are processed immediately
545*4882a593Smuzhiyun * after all prior requests have completed
546*4882a593Smuzhiyun */
547*4882a593Smuzhiyun if (wqe->wr.opcode == IB_WR_REG_MR ||
548*4882a593Smuzhiyun wqe->wr.opcode == IB_WR_LOCAL_INV) {
549*4882a593Smuzhiyun int local_ops = 0;
550*4882a593Smuzhiyun int err = 0;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun if (qp->s_last != qp->s_cur)
553*4882a593Smuzhiyun goto bail;
554*4882a593Smuzhiyun if (++qp->s_cur == qp->s_size)
555*4882a593Smuzhiyun qp->s_cur = 0;
556*4882a593Smuzhiyun if (++qp->s_tail == qp->s_size)
557*4882a593Smuzhiyun qp->s_tail = 0;
558*4882a593Smuzhiyun if (!(wqe->wr.send_flags &
559*4882a593Smuzhiyun RVT_SEND_COMPLETION_ONLY)) {
560*4882a593Smuzhiyun err = rvt_invalidate_rkey(
561*4882a593Smuzhiyun qp,
562*4882a593Smuzhiyun wqe->wr.ex.invalidate_rkey);
563*4882a593Smuzhiyun local_ops = 1;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun rvt_send_complete(qp, wqe,
566*4882a593Smuzhiyun err ? IB_WC_LOC_PROT_ERR
567*4882a593Smuzhiyun : IB_WC_SUCCESS);
568*4882a593Smuzhiyun if (local_ops)
569*4882a593Smuzhiyun atomic_dec(&qp->local_ops_pending);
570*4882a593Smuzhiyun goto done_free_tx;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun newreq = 1;
574*4882a593Smuzhiyun qp->s_psn = wqe->psn;
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun /*
577*4882a593Smuzhiyun * Note that we have to be careful not to modify the
578*4882a593Smuzhiyun * original work request since we may need to resend
579*4882a593Smuzhiyun * it.
580*4882a593Smuzhiyun */
581*4882a593Smuzhiyun len = wqe->length;
582*4882a593Smuzhiyun ss = &qp->s_sge;
583*4882a593Smuzhiyun bth2 = mask_psn(qp->s_psn);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun /*
586*4882a593Smuzhiyun * Interlock between various IB requests and TID RDMA
587*4882a593Smuzhiyun * if necessary.
588*4882a593Smuzhiyun */
589*4882a593Smuzhiyun if ((priv->s_flags & HFI1_S_TID_WAIT_INTERLCK) ||
590*4882a593Smuzhiyun hfi1_tid_rdma_wqe_interlock(qp, wqe))
591*4882a593Smuzhiyun goto bail;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun switch (wqe->wr.opcode) {
594*4882a593Smuzhiyun case IB_WR_SEND:
595*4882a593Smuzhiyun case IB_WR_SEND_WITH_IMM:
596*4882a593Smuzhiyun case IB_WR_SEND_WITH_INV:
597*4882a593Smuzhiyun /* If no credit, return. */
598*4882a593Smuzhiyun if (!rvt_rc_credit_avail(qp, wqe))
599*4882a593Smuzhiyun goto bail;
600*4882a593Smuzhiyun if (len > pmtu) {
601*4882a593Smuzhiyun qp->s_state = OP(SEND_FIRST);
602*4882a593Smuzhiyun len = pmtu;
603*4882a593Smuzhiyun break;
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun if (wqe->wr.opcode == IB_WR_SEND) {
606*4882a593Smuzhiyun qp->s_state = OP(SEND_ONLY);
607*4882a593Smuzhiyun } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
608*4882a593Smuzhiyun qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
609*4882a593Smuzhiyun /* Immediate data comes after the BTH */
610*4882a593Smuzhiyun ohdr->u.imm_data = wqe->wr.ex.imm_data;
611*4882a593Smuzhiyun hwords += 1;
612*4882a593Smuzhiyun } else {
613*4882a593Smuzhiyun qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE);
614*4882a593Smuzhiyun /* Invalidate rkey comes after the BTH */
615*4882a593Smuzhiyun ohdr->u.ieth = cpu_to_be32(
616*4882a593Smuzhiyun wqe->wr.ex.invalidate_rkey);
617*4882a593Smuzhiyun hwords += 1;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun if (wqe->wr.send_flags & IB_SEND_SOLICITED)
620*4882a593Smuzhiyun bth0 |= IB_BTH_SOLICITED;
621*4882a593Smuzhiyun bth2 |= IB_BTH_REQ_ACK;
622*4882a593Smuzhiyun if (++qp->s_cur == qp->s_size)
623*4882a593Smuzhiyun qp->s_cur = 0;
624*4882a593Smuzhiyun break;
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun case IB_WR_RDMA_WRITE:
627*4882a593Smuzhiyun if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
628*4882a593Smuzhiyun qp->s_lsn++;
629*4882a593Smuzhiyun goto no_flow_control;
630*4882a593Smuzhiyun case IB_WR_RDMA_WRITE_WITH_IMM:
631*4882a593Smuzhiyun /* If no credit, return. */
632*4882a593Smuzhiyun if (!rvt_rc_credit_avail(qp, wqe))
633*4882a593Smuzhiyun goto bail;
634*4882a593Smuzhiyun no_flow_control:
635*4882a593Smuzhiyun put_ib_reth_vaddr(
636*4882a593Smuzhiyun wqe->rdma_wr.remote_addr,
637*4882a593Smuzhiyun &ohdr->u.rc.reth);
638*4882a593Smuzhiyun ohdr->u.rc.reth.rkey =
639*4882a593Smuzhiyun cpu_to_be32(wqe->rdma_wr.rkey);
640*4882a593Smuzhiyun ohdr->u.rc.reth.length = cpu_to_be32(len);
641*4882a593Smuzhiyun hwords += sizeof(struct ib_reth) / sizeof(u32);
642*4882a593Smuzhiyun if (len > pmtu) {
643*4882a593Smuzhiyun qp->s_state = OP(RDMA_WRITE_FIRST);
644*4882a593Smuzhiyun len = pmtu;
645*4882a593Smuzhiyun break;
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
648*4882a593Smuzhiyun qp->s_state = OP(RDMA_WRITE_ONLY);
649*4882a593Smuzhiyun } else {
650*4882a593Smuzhiyun qp->s_state =
651*4882a593Smuzhiyun OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
652*4882a593Smuzhiyun /* Immediate data comes after RETH */
653*4882a593Smuzhiyun ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
654*4882a593Smuzhiyun hwords += 1;
655*4882a593Smuzhiyun if (wqe->wr.send_flags & IB_SEND_SOLICITED)
656*4882a593Smuzhiyun bth0 |= IB_BTH_SOLICITED;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun bth2 |= IB_BTH_REQ_ACK;
659*4882a593Smuzhiyun if (++qp->s_cur == qp->s_size)
660*4882a593Smuzhiyun qp->s_cur = 0;
661*4882a593Smuzhiyun break;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun case IB_WR_TID_RDMA_WRITE:
664*4882a593Smuzhiyun if (newreq) {
665*4882a593Smuzhiyun /*
666*4882a593Smuzhiyun * Limit the number of TID RDMA WRITE requests.
667*4882a593Smuzhiyun */
668*4882a593Smuzhiyun if (atomic_read(&priv->n_tid_requests) >=
669*4882a593Smuzhiyun HFI1_TID_RDMA_WRITE_CNT)
670*4882a593Smuzhiyun goto bail;
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
673*4882a593Smuzhiyun qp->s_lsn++;
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun hwords += hfi1_build_tid_rdma_write_req(qp, wqe, ohdr,
677*4882a593Smuzhiyun &bth1, &bth2,
678*4882a593Smuzhiyun &len);
679*4882a593Smuzhiyun ss = NULL;
680*4882a593Smuzhiyun if (priv->s_tid_cur == HFI1_QP_WQE_INVALID) {
681*4882a593Smuzhiyun priv->s_tid_cur = qp->s_cur;
682*4882a593Smuzhiyun if (priv->s_tid_tail == HFI1_QP_WQE_INVALID) {
683*4882a593Smuzhiyun priv->s_tid_tail = qp->s_cur;
684*4882a593Smuzhiyun priv->s_state = TID_OP(WRITE_RESP);
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun } else if (priv->s_tid_cur == priv->s_tid_head) {
687*4882a593Smuzhiyun struct rvt_swqe *__w;
688*4882a593Smuzhiyun struct tid_rdma_request *__r;
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun __w = rvt_get_swqe_ptr(qp, priv->s_tid_cur);
691*4882a593Smuzhiyun __r = wqe_to_tid_req(__w);
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun /*
694*4882a593Smuzhiyun * The s_tid_cur pointer is advanced to s_cur if
695*4882a593Smuzhiyun * any of the following conditions about the WQE
696*4882a593Smuzhiyun * to which s_ti_cur currently points to are
697*4882a593Smuzhiyun * satisfied:
698*4882a593Smuzhiyun * 1. The request is not a TID RDMA WRITE
699*4882a593Smuzhiyun * request,
700*4882a593Smuzhiyun * 2. The request is in the INACTIVE or
701*4882a593Smuzhiyun * COMPLETE states (TID RDMA READ requests
702*4882a593Smuzhiyun * stay at INACTIVE and TID RDMA WRITE
703*4882a593Smuzhiyun * transition to COMPLETE when done),
704*4882a593Smuzhiyun * 3. The request is in the ACTIVE or SYNC
705*4882a593Smuzhiyun * state and the number of completed
706*4882a593Smuzhiyun * segments is equal to the total segment
707*4882a593Smuzhiyun * count.
708*4882a593Smuzhiyun * (If ACTIVE, the request is waiting for
709*4882a593Smuzhiyun * ACKs. If SYNC, the request has not
710*4882a593Smuzhiyun * received any responses because it's
711*4882a593Smuzhiyun * waiting on a sync point.)
712*4882a593Smuzhiyun */
713*4882a593Smuzhiyun if (__w->wr.opcode != IB_WR_TID_RDMA_WRITE ||
714*4882a593Smuzhiyun __r->state == TID_REQUEST_INACTIVE ||
715*4882a593Smuzhiyun __r->state == TID_REQUEST_COMPLETE ||
716*4882a593Smuzhiyun ((__r->state == TID_REQUEST_ACTIVE ||
717*4882a593Smuzhiyun __r->state == TID_REQUEST_SYNC) &&
718*4882a593Smuzhiyun __r->comp_seg == __r->total_segs)) {
719*4882a593Smuzhiyun if (priv->s_tid_tail ==
720*4882a593Smuzhiyun priv->s_tid_cur &&
721*4882a593Smuzhiyun priv->s_state ==
722*4882a593Smuzhiyun TID_OP(WRITE_DATA_LAST)) {
723*4882a593Smuzhiyun priv->s_tid_tail = qp->s_cur;
724*4882a593Smuzhiyun priv->s_state =
725*4882a593Smuzhiyun TID_OP(WRITE_RESP);
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun priv->s_tid_cur = qp->s_cur;
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun /*
730*4882a593Smuzhiyun * A corner case: when the last TID RDMA WRITE
731*4882a593Smuzhiyun * request was completed, s_tid_head,
732*4882a593Smuzhiyun * s_tid_cur, and s_tid_tail all point to the
733*4882a593Smuzhiyun * same location. Other requests are posted and
734*4882a593Smuzhiyun * s_cur wraps around to the same location,
735*4882a593Smuzhiyun * where a new TID RDMA WRITE is posted. In
736*4882a593Smuzhiyun * this case, none of the indices need to be
737*4882a593Smuzhiyun * updated. However, the priv->s_state should.
738*4882a593Smuzhiyun */
739*4882a593Smuzhiyun if (priv->s_tid_tail == qp->s_cur &&
740*4882a593Smuzhiyun priv->s_state == TID_OP(WRITE_DATA_LAST))
741*4882a593Smuzhiyun priv->s_state = TID_OP(WRITE_RESP);
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun req = wqe_to_tid_req(wqe);
744*4882a593Smuzhiyun if (newreq) {
745*4882a593Smuzhiyun priv->s_tid_head = qp->s_cur;
746*4882a593Smuzhiyun priv->pending_tid_w_resp += req->total_segs;
747*4882a593Smuzhiyun atomic_inc(&priv->n_tid_requests);
748*4882a593Smuzhiyun atomic_dec(&priv->n_requests);
749*4882a593Smuzhiyun } else {
750*4882a593Smuzhiyun req->state = TID_REQUEST_RESEND;
751*4882a593Smuzhiyun req->comp_seg = delta_psn(bth2, wqe->psn);
752*4882a593Smuzhiyun /*
753*4882a593Smuzhiyun * Pull back any segments since we are going
754*4882a593Smuzhiyun * to re-receive them.
755*4882a593Smuzhiyun */
756*4882a593Smuzhiyun req->setup_head = req->clear_tail;
757*4882a593Smuzhiyun priv->pending_tid_w_resp +=
758*4882a593Smuzhiyun delta_psn(wqe->lpsn, bth2) + 1;
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun trace_hfi1_tid_write_sender_make_req(qp, newreq);
762*4882a593Smuzhiyun trace_hfi1_tid_req_make_req_write(qp, newreq,
763*4882a593Smuzhiyun wqe->wr.opcode,
764*4882a593Smuzhiyun wqe->psn, wqe->lpsn,
765*4882a593Smuzhiyun req);
766*4882a593Smuzhiyun if (++qp->s_cur == qp->s_size)
767*4882a593Smuzhiyun qp->s_cur = 0;
768*4882a593Smuzhiyun break;
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun case IB_WR_RDMA_READ:
771*4882a593Smuzhiyun /*
772*4882a593Smuzhiyun * Don't allow more operations to be started
773*4882a593Smuzhiyun * than the QP limits allow.
774*4882a593Smuzhiyun */
775*4882a593Smuzhiyun if (qp->s_num_rd_atomic >=
776*4882a593Smuzhiyun qp->s_max_rd_atomic) {
777*4882a593Smuzhiyun qp->s_flags |= RVT_S_WAIT_RDMAR;
778*4882a593Smuzhiyun goto bail;
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun qp->s_num_rd_atomic++;
781*4882a593Smuzhiyun if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
782*4882a593Smuzhiyun qp->s_lsn++;
783*4882a593Smuzhiyun put_ib_reth_vaddr(
784*4882a593Smuzhiyun wqe->rdma_wr.remote_addr,
785*4882a593Smuzhiyun &ohdr->u.rc.reth);
786*4882a593Smuzhiyun ohdr->u.rc.reth.rkey =
787*4882a593Smuzhiyun cpu_to_be32(wqe->rdma_wr.rkey);
788*4882a593Smuzhiyun ohdr->u.rc.reth.length = cpu_to_be32(len);
789*4882a593Smuzhiyun qp->s_state = OP(RDMA_READ_REQUEST);
790*4882a593Smuzhiyun hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
791*4882a593Smuzhiyun ss = NULL;
792*4882a593Smuzhiyun len = 0;
793*4882a593Smuzhiyun bth2 |= IB_BTH_REQ_ACK;
794*4882a593Smuzhiyun if (++qp->s_cur == qp->s_size)
795*4882a593Smuzhiyun qp->s_cur = 0;
796*4882a593Smuzhiyun break;
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun case IB_WR_TID_RDMA_READ:
799*4882a593Smuzhiyun trace_hfi1_tid_read_sender_make_req(qp, newreq);
800*4882a593Smuzhiyun wpriv = wqe->priv;
801*4882a593Smuzhiyun req = wqe_to_tid_req(wqe);
802*4882a593Smuzhiyun trace_hfi1_tid_req_make_req_read(qp, newreq,
803*4882a593Smuzhiyun wqe->wr.opcode,
804*4882a593Smuzhiyun wqe->psn, wqe->lpsn,
805*4882a593Smuzhiyun req);
806*4882a593Smuzhiyun delta = cmp_psn(qp->s_psn, wqe->psn);
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun /*
809*4882a593Smuzhiyun * Don't allow more operations to be started
810*4882a593Smuzhiyun * than the QP limits allow. We could get here under
811*4882a593Smuzhiyun * three conditions; (1) It's a new request; (2) We are
812*4882a593Smuzhiyun * sending the second or later segment of a request,
813*4882a593Smuzhiyun * but the qp->s_state is set to OP(RDMA_READ_REQUEST)
814*4882a593Smuzhiyun * when the last segment of a previous request is
815*4882a593Smuzhiyun * received just before this; (3) We are re-sending a
816*4882a593Smuzhiyun * request.
817*4882a593Smuzhiyun */
818*4882a593Smuzhiyun if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) {
819*4882a593Smuzhiyun qp->s_flags |= RVT_S_WAIT_RDMAR;
820*4882a593Smuzhiyun goto bail;
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun if (newreq) {
823*4882a593Smuzhiyun struct tid_rdma_flow *flow =
824*4882a593Smuzhiyun &req->flows[req->setup_head];
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun /*
827*4882a593Smuzhiyun * Set up s_sge as it is needed for TID
828*4882a593Smuzhiyun * allocation. However, if the pages have been
829*4882a593Smuzhiyun * walked and mapped, skip it. An earlier try
830*4882a593Smuzhiyun * has failed to allocate the TID entries.
831*4882a593Smuzhiyun */
832*4882a593Smuzhiyun if (!flow->npagesets) {
833*4882a593Smuzhiyun qp->s_sge.sge = wqe->sg_list[0];
834*4882a593Smuzhiyun qp->s_sge.sg_list = wqe->sg_list + 1;
835*4882a593Smuzhiyun qp->s_sge.num_sge = wqe->wr.num_sge;
836*4882a593Smuzhiyun qp->s_sge.total_len = wqe->length;
837*4882a593Smuzhiyun qp->s_len = wqe->length;
838*4882a593Smuzhiyun req->isge = 0;
839*4882a593Smuzhiyun req->clear_tail = req->setup_head;
840*4882a593Smuzhiyun req->flow_idx = req->setup_head;
841*4882a593Smuzhiyun req->state = TID_REQUEST_ACTIVE;
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun } else if (delta == 0) {
844*4882a593Smuzhiyun /* Re-send a request */
845*4882a593Smuzhiyun req->cur_seg = 0;
846*4882a593Smuzhiyun req->comp_seg = 0;
847*4882a593Smuzhiyun req->ack_pending = 0;
848*4882a593Smuzhiyun req->flow_idx = req->clear_tail;
849*4882a593Smuzhiyun req->state = TID_REQUEST_RESEND;
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun req->s_next_psn = qp->s_psn;
852*4882a593Smuzhiyun /* Read one segment at a time */
853*4882a593Smuzhiyun len = min_t(u32, req->seg_len,
854*4882a593Smuzhiyun wqe->length - req->seg_len * req->cur_seg);
855*4882a593Smuzhiyun delta = hfi1_build_tid_rdma_read_req(qp, wqe, ohdr,
856*4882a593Smuzhiyun &bth1, &bth2,
857*4882a593Smuzhiyun &len);
858*4882a593Smuzhiyun if (delta <= 0) {
859*4882a593Smuzhiyun /* Wait for TID space */
860*4882a593Smuzhiyun goto bail;
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
863*4882a593Smuzhiyun qp->s_lsn++;
864*4882a593Smuzhiyun hwords += delta;
865*4882a593Smuzhiyun ss = &wpriv->ss;
866*4882a593Smuzhiyun /* Check if this is the last segment */
867*4882a593Smuzhiyun if (req->cur_seg >= req->total_segs &&
868*4882a593Smuzhiyun ++qp->s_cur == qp->s_size)
869*4882a593Smuzhiyun qp->s_cur = 0;
870*4882a593Smuzhiyun break;
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun case IB_WR_ATOMIC_CMP_AND_SWP:
873*4882a593Smuzhiyun case IB_WR_ATOMIC_FETCH_AND_ADD:
874*4882a593Smuzhiyun /*
875*4882a593Smuzhiyun * Don't allow more operations to be started
876*4882a593Smuzhiyun * than the QP limits allow.
877*4882a593Smuzhiyun */
878*4882a593Smuzhiyun if (qp->s_num_rd_atomic >=
879*4882a593Smuzhiyun qp->s_max_rd_atomic) {
880*4882a593Smuzhiyun qp->s_flags |= RVT_S_WAIT_RDMAR;
881*4882a593Smuzhiyun goto bail;
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun qp->s_num_rd_atomic++;
884*4882a593Smuzhiyun fallthrough;
885*4882a593Smuzhiyun case IB_WR_OPFN:
886*4882a593Smuzhiyun if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
887*4882a593Smuzhiyun qp->s_lsn++;
888*4882a593Smuzhiyun if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
889*4882a593Smuzhiyun wqe->wr.opcode == IB_WR_OPFN) {
890*4882a593Smuzhiyun qp->s_state = OP(COMPARE_SWAP);
891*4882a593Smuzhiyun put_ib_ateth_swap(wqe->atomic_wr.swap,
892*4882a593Smuzhiyun &ohdr->u.atomic_eth);
893*4882a593Smuzhiyun put_ib_ateth_compare(wqe->atomic_wr.compare_add,
894*4882a593Smuzhiyun &ohdr->u.atomic_eth);
895*4882a593Smuzhiyun } else {
896*4882a593Smuzhiyun qp->s_state = OP(FETCH_ADD);
897*4882a593Smuzhiyun put_ib_ateth_swap(wqe->atomic_wr.compare_add,
898*4882a593Smuzhiyun &ohdr->u.atomic_eth);
899*4882a593Smuzhiyun put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
902*4882a593Smuzhiyun &ohdr->u.atomic_eth);
903*4882a593Smuzhiyun ohdr->u.atomic_eth.rkey = cpu_to_be32(
904*4882a593Smuzhiyun wqe->atomic_wr.rkey);
905*4882a593Smuzhiyun hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
906*4882a593Smuzhiyun ss = NULL;
907*4882a593Smuzhiyun len = 0;
908*4882a593Smuzhiyun bth2 |= IB_BTH_REQ_ACK;
909*4882a593Smuzhiyun if (++qp->s_cur == qp->s_size)
910*4882a593Smuzhiyun qp->s_cur = 0;
911*4882a593Smuzhiyun break;
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun default:
914*4882a593Smuzhiyun goto bail;
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) {
917*4882a593Smuzhiyun qp->s_sge.sge = wqe->sg_list[0];
918*4882a593Smuzhiyun qp->s_sge.sg_list = wqe->sg_list + 1;
919*4882a593Smuzhiyun qp->s_sge.num_sge = wqe->wr.num_sge;
920*4882a593Smuzhiyun qp->s_sge.total_len = wqe->length;
921*4882a593Smuzhiyun qp->s_len = wqe->length;
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun if (newreq) {
924*4882a593Smuzhiyun qp->s_tail++;
925*4882a593Smuzhiyun if (qp->s_tail >= qp->s_size)
926*4882a593Smuzhiyun qp->s_tail = 0;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun if (wqe->wr.opcode == IB_WR_RDMA_READ ||
929*4882a593Smuzhiyun wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
930*4882a593Smuzhiyun qp->s_psn = wqe->lpsn + 1;
931*4882a593Smuzhiyun else if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
932*4882a593Smuzhiyun qp->s_psn = req->s_next_psn;
933*4882a593Smuzhiyun else
934*4882a593Smuzhiyun qp->s_psn++;
935*4882a593Smuzhiyun break;
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun case OP(RDMA_READ_RESPONSE_FIRST):
938*4882a593Smuzhiyun /*
939*4882a593Smuzhiyun * qp->s_state is normally set to the opcode of the
940*4882a593Smuzhiyun * last packet constructed for new requests and therefore
941*4882a593Smuzhiyun * is never set to RDMA read response.
942*4882a593Smuzhiyun * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
943*4882a593Smuzhiyun * thread to indicate a SEND needs to be restarted from an
944*4882a593Smuzhiyun * earlier PSN without interfering with the sending thread.
945*4882a593Smuzhiyun * See restart_rc().
946*4882a593Smuzhiyun */
947*4882a593Smuzhiyun qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
948*4882a593Smuzhiyun fallthrough;
949*4882a593Smuzhiyun case OP(SEND_FIRST):
950*4882a593Smuzhiyun qp->s_state = OP(SEND_MIDDLE);
951*4882a593Smuzhiyun fallthrough;
952*4882a593Smuzhiyun case OP(SEND_MIDDLE):
953*4882a593Smuzhiyun bth2 = mask_psn(qp->s_psn++);
954*4882a593Smuzhiyun ss = &qp->s_sge;
955*4882a593Smuzhiyun len = qp->s_len;
956*4882a593Smuzhiyun if (len > pmtu) {
957*4882a593Smuzhiyun len = pmtu;
958*4882a593Smuzhiyun middle = HFI1_CAP_IS_KSET(SDMA_AHG);
959*4882a593Smuzhiyun break;
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun if (wqe->wr.opcode == IB_WR_SEND) {
962*4882a593Smuzhiyun qp->s_state = OP(SEND_LAST);
963*4882a593Smuzhiyun } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
964*4882a593Smuzhiyun qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
965*4882a593Smuzhiyun /* Immediate data comes after the BTH */
966*4882a593Smuzhiyun ohdr->u.imm_data = wqe->wr.ex.imm_data;
967*4882a593Smuzhiyun hwords += 1;
968*4882a593Smuzhiyun } else {
969*4882a593Smuzhiyun qp->s_state = OP(SEND_LAST_WITH_INVALIDATE);
970*4882a593Smuzhiyun /* invalidate data comes after the BTH */
971*4882a593Smuzhiyun ohdr->u.ieth = cpu_to_be32(wqe->wr.ex.invalidate_rkey);
972*4882a593Smuzhiyun hwords += 1;
973*4882a593Smuzhiyun }
974*4882a593Smuzhiyun if (wqe->wr.send_flags & IB_SEND_SOLICITED)
975*4882a593Smuzhiyun bth0 |= IB_BTH_SOLICITED;
976*4882a593Smuzhiyun bth2 |= IB_BTH_REQ_ACK;
977*4882a593Smuzhiyun qp->s_cur++;
978*4882a593Smuzhiyun if (qp->s_cur >= qp->s_size)
979*4882a593Smuzhiyun qp->s_cur = 0;
980*4882a593Smuzhiyun break;
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun case OP(RDMA_READ_RESPONSE_LAST):
983*4882a593Smuzhiyun /*
984*4882a593Smuzhiyun * qp->s_state is normally set to the opcode of the
985*4882a593Smuzhiyun * last packet constructed for new requests and therefore
986*4882a593Smuzhiyun * is never set to RDMA read response.
987*4882a593Smuzhiyun * RDMA_READ_RESPONSE_LAST is used by the ACK processing
988*4882a593Smuzhiyun * thread to indicate a RDMA write needs to be restarted from
989*4882a593Smuzhiyun * an earlier PSN without interfering with the sending thread.
990*4882a593Smuzhiyun * See restart_rc().
991*4882a593Smuzhiyun */
992*4882a593Smuzhiyun qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
993*4882a593Smuzhiyun fallthrough;
994*4882a593Smuzhiyun case OP(RDMA_WRITE_FIRST):
995*4882a593Smuzhiyun qp->s_state = OP(RDMA_WRITE_MIDDLE);
996*4882a593Smuzhiyun fallthrough;
997*4882a593Smuzhiyun case OP(RDMA_WRITE_MIDDLE):
998*4882a593Smuzhiyun bth2 = mask_psn(qp->s_psn++);
999*4882a593Smuzhiyun ss = &qp->s_sge;
1000*4882a593Smuzhiyun len = qp->s_len;
1001*4882a593Smuzhiyun if (len > pmtu) {
1002*4882a593Smuzhiyun len = pmtu;
1003*4882a593Smuzhiyun middle = HFI1_CAP_IS_KSET(SDMA_AHG);
1004*4882a593Smuzhiyun break;
1005*4882a593Smuzhiyun }
1006*4882a593Smuzhiyun if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
1007*4882a593Smuzhiyun qp->s_state = OP(RDMA_WRITE_LAST);
1008*4882a593Smuzhiyun } else {
1009*4882a593Smuzhiyun qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
1010*4882a593Smuzhiyun /* Immediate data comes after the BTH */
1011*4882a593Smuzhiyun ohdr->u.imm_data = wqe->wr.ex.imm_data;
1012*4882a593Smuzhiyun hwords += 1;
1013*4882a593Smuzhiyun if (wqe->wr.send_flags & IB_SEND_SOLICITED)
1014*4882a593Smuzhiyun bth0 |= IB_BTH_SOLICITED;
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun bth2 |= IB_BTH_REQ_ACK;
1017*4882a593Smuzhiyun qp->s_cur++;
1018*4882a593Smuzhiyun if (qp->s_cur >= qp->s_size)
1019*4882a593Smuzhiyun qp->s_cur = 0;
1020*4882a593Smuzhiyun break;
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun case OP(RDMA_READ_RESPONSE_MIDDLE):
1023*4882a593Smuzhiyun /*
1024*4882a593Smuzhiyun * qp->s_state is normally set to the opcode of the
1025*4882a593Smuzhiyun * last packet constructed for new requests and therefore
1026*4882a593Smuzhiyun * is never set to RDMA read response.
1027*4882a593Smuzhiyun * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
1028*4882a593Smuzhiyun * thread to indicate a RDMA read needs to be restarted from
1029*4882a593Smuzhiyun * an earlier PSN without interfering with the sending thread.
1030*4882a593Smuzhiyun * See restart_rc().
1031*4882a593Smuzhiyun */
1032*4882a593Smuzhiyun len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu;
1033*4882a593Smuzhiyun put_ib_reth_vaddr(
1034*4882a593Smuzhiyun wqe->rdma_wr.remote_addr + len,
1035*4882a593Smuzhiyun &ohdr->u.rc.reth);
1036*4882a593Smuzhiyun ohdr->u.rc.reth.rkey =
1037*4882a593Smuzhiyun cpu_to_be32(wqe->rdma_wr.rkey);
1038*4882a593Smuzhiyun ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
1039*4882a593Smuzhiyun qp->s_state = OP(RDMA_READ_REQUEST);
1040*4882a593Smuzhiyun hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
1041*4882a593Smuzhiyun bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK;
1042*4882a593Smuzhiyun qp->s_psn = wqe->lpsn + 1;
1043*4882a593Smuzhiyun ss = NULL;
1044*4882a593Smuzhiyun len = 0;
1045*4882a593Smuzhiyun qp->s_cur++;
1046*4882a593Smuzhiyun if (qp->s_cur == qp->s_size)
1047*4882a593Smuzhiyun qp->s_cur = 0;
1048*4882a593Smuzhiyun break;
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun case TID_OP(WRITE_RESP):
1051*4882a593Smuzhiyun /*
1052*4882a593Smuzhiyun * This value for s_state is used for restarting a TID RDMA
1053*4882a593Smuzhiyun * WRITE request. See comment in OP(RDMA_READ_RESPONSE_MIDDLE
1054*4882a593Smuzhiyun * for more).
1055*4882a593Smuzhiyun */
1056*4882a593Smuzhiyun req = wqe_to_tid_req(wqe);
1057*4882a593Smuzhiyun req->state = TID_REQUEST_RESEND;
1058*4882a593Smuzhiyun rcu_read_lock();
1059*4882a593Smuzhiyun remote = rcu_dereference(priv->tid_rdma.remote);
1060*4882a593Smuzhiyun req->comp_seg = delta_psn(qp->s_psn, wqe->psn);
1061*4882a593Smuzhiyun len = wqe->length - (req->comp_seg * remote->max_len);
1062*4882a593Smuzhiyun rcu_read_unlock();
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun bth2 = mask_psn(qp->s_psn);
1065*4882a593Smuzhiyun hwords += hfi1_build_tid_rdma_write_req(qp, wqe, ohdr, &bth1,
1066*4882a593Smuzhiyun &bth2, &len);
1067*4882a593Smuzhiyun qp->s_psn = wqe->lpsn + 1;
1068*4882a593Smuzhiyun ss = NULL;
1069*4882a593Smuzhiyun qp->s_state = TID_OP(WRITE_REQ);
1070*4882a593Smuzhiyun priv->pending_tid_w_resp += delta_psn(wqe->lpsn, bth2) + 1;
1071*4882a593Smuzhiyun priv->s_tid_cur = qp->s_cur;
1072*4882a593Smuzhiyun if (++qp->s_cur == qp->s_size)
1073*4882a593Smuzhiyun qp->s_cur = 0;
1074*4882a593Smuzhiyun trace_hfi1_tid_req_make_req_write(qp, 0, wqe->wr.opcode,
1075*4882a593Smuzhiyun wqe->psn, wqe->lpsn, req);
1076*4882a593Smuzhiyun break;
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun case TID_OP(READ_RESP):
1079*4882a593Smuzhiyun if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
1080*4882a593Smuzhiyun goto bail;
1081*4882a593Smuzhiyun /* This is used to restart a TID read request */
1082*4882a593Smuzhiyun req = wqe_to_tid_req(wqe);
1083*4882a593Smuzhiyun wpriv = wqe->priv;
1084*4882a593Smuzhiyun /*
1085*4882a593Smuzhiyun * Back down. The field qp->s_psn has been set to the psn with
1086*4882a593Smuzhiyun * which the request should be restart. It's OK to use division
1087*4882a593Smuzhiyun * as this is on the retry path.
1088*4882a593Smuzhiyun */
1089*4882a593Smuzhiyun req->cur_seg = delta_psn(qp->s_psn, wqe->psn) / priv->pkts_ps;
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun /*
1092*4882a593Smuzhiyun * The following function need to be redefined to return the
1093*4882a593Smuzhiyun * status to make sure that we find the flow. At the same
1094*4882a593Smuzhiyun * time, we can use the req->state change to check if the
1095*4882a593Smuzhiyun * call succeeds or not.
1096*4882a593Smuzhiyun */
1097*4882a593Smuzhiyun req->state = TID_REQUEST_RESEND;
1098*4882a593Smuzhiyun hfi1_tid_rdma_restart_req(qp, wqe, &bth2);
1099*4882a593Smuzhiyun if (req->state != TID_REQUEST_ACTIVE) {
1100*4882a593Smuzhiyun /*
1101*4882a593Smuzhiyun * Failed to find the flow. Release all allocated tid
1102*4882a593Smuzhiyun * resources.
1103*4882a593Smuzhiyun */
1104*4882a593Smuzhiyun hfi1_kern_exp_rcv_clear_all(req);
1105*4882a593Smuzhiyun hfi1_kern_clear_hw_flow(priv->rcd, qp);
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun hfi1_trdma_send_complete(qp, wqe, IB_WC_LOC_QP_OP_ERR);
1108*4882a593Smuzhiyun goto bail;
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun req->state = TID_REQUEST_RESEND;
1111*4882a593Smuzhiyun len = min_t(u32, req->seg_len,
1112*4882a593Smuzhiyun wqe->length - req->seg_len * req->cur_seg);
1113*4882a593Smuzhiyun flow = &req->flows[req->flow_idx];
1114*4882a593Smuzhiyun len -= flow->sent;
1115*4882a593Smuzhiyun req->s_next_psn = flow->flow_state.ib_lpsn + 1;
1116*4882a593Smuzhiyun delta = hfi1_build_tid_rdma_read_packet(wqe, ohdr, &bth1,
1117*4882a593Smuzhiyun &bth2, &len);
1118*4882a593Smuzhiyun if (delta <= 0) {
1119*4882a593Smuzhiyun /* Wait for TID space */
1120*4882a593Smuzhiyun goto bail;
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun hwords += delta;
1123*4882a593Smuzhiyun ss = &wpriv->ss;
1124*4882a593Smuzhiyun /* Check if this is the last segment */
1125*4882a593Smuzhiyun if (req->cur_seg >= req->total_segs &&
1126*4882a593Smuzhiyun ++qp->s_cur == qp->s_size)
1127*4882a593Smuzhiyun qp->s_cur = 0;
1128*4882a593Smuzhiyun qp->s_psn = req->s_next_psn;
1129*4882a593Smuzhiyun trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode,
1130*4882a593Smuzhiyun wqe->psn, wqe->lpsn, req);
1131*4882a593Smuzhiyun break;
1132*4882a593Smuzhiyun case TID_OP(READ_REQ):
1133*4882a593Smuzhiyun req = wqe_to_tid_req(wqe);
1134*4882a593Smuzhiyun delta = cmp_psn(qp->s_psn, wqe->psn);
1135*4882a593Smuzhiyun /*
1136*4882a593Smuzhiyun * If the current WR is not TID RDMA READ, or this is the start
1137*4882a593Smuzhiyun * of a new request, we need to change the qp->s_state so that
1138*4882a593Smuzhiyun * the request can be set up properly.
1139*4882a593Smuzhiyun */
1140*4882a593Smuzhiyun if (wqe->wr.opcode != IB_WR_TID_RDMA_READ || delta == 0 ||
1141*4882a593Smuzhiyun qp->s_cur == qp->s_tail) {
1142*4882a593Smuzhiyun qp->s_state = OP(RDMA_READ_REQUEST);
1143*4882a593Smuzhiyun if (delta == 0 || qp->s_cur == qp->s_tail)
1144*4882a593Smuzhiyun goto check_s_state;
1145*4882a593Smuzhiyun else
1146*4882a593Smuzhiyun goto bail;
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun /* Rate limiting */
1150*4882a593Smuzhiyun if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) {
1151*4882a593Smuzhiyun qp->s_flags |= RVT_S_WAIT_RDMAR;
1152*4882a593Smuzhiyun goto bail;
1153*4882a593Smuzhiyun }
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun wpriv = wqe->priv;
1156*4882a593Smuzhiyun /* Read one segment at a time */
1157*4882a593Smuzhiyun len = min_t(u32, req->seg_len,
1158*4882a593Smuzhiyun wqe->length - req->seg_len * req->cur_seg);
1159*4882a593Smuzhiyun delta = hfi1_build_tid_rdma_read_req(qp, wqe, ohdr, &bth1,
1160*4882a593Smuzhiyun &bth2, &len);
1161*4882a593Smuzhiyun if (delta <= 0) {
1162*4882a593Smuzhiyun /* Wait for TID space */
1163*4882a593Smuzhiyun goto bail;
1164*4882a593Smuzhiyun }
1165*4882a593Smuzhiyun hwords += delta;
1166*4882a593Smuzhiyun ss = &wpriv->ss;
1167*4882a593Smuzhiyun /* Check if this is the last segment */
1168*4882a593Smuzhiyun if (req->cur_seg >= req->total_segs &&
1169*4882a593Smuzhiyun ++qp->s_cur == qp->s_size)
1170*4882a593Smuzhiyun qp->s_cur = 0;
1171*4882a593Smuzhiyun qp->s_psn = req->s_next_psn;
1172*4882a593Smuzhiyun trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode,
1173*4882a593Smuzhiyun wqe->psn, wqe->lpsn, req);
1174*4882a593Smuzhiyun break;
1175*4882a593Smuzhiyun }
1176*4882a593Smuzhiyun qp->s_sending_hpsn = bth2;
1177*4882a593Smuzhiyun delta = delta_psn(bth2, wqe->psn);
1178*4882a593Smuzhiyun if (delta && delta % HFI1_PSN_CREDIT == 0 &&
1179*4882a593Smuzhiyun wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
1180*4882a593Smuzhiyun bth2 |= IB_BTH_REQ_ACK;
1181*4882a593Smuzhiyun if (qp->s_flags & RVT_S_SEND_ONE) {
1182*4882a593Smuzhiyun qp->s_flags &= ~RVT_S_SEND_ONE;
1183*4882a593Smuzhiyun qp->s_flags |= RVT_S_WAIT_ACK;
1184*4882a593Smuzhiyun bth2 |= IB_BTH_REQ_ACK;
1185*4882a593Smuzhiyun }
1186*4882a593Smuzhiyun qp->s_len -= len;
1187*4882a593Smuzhiyun ps->s_txreq->hdr_dwords = hwords;
1188*4882a593Smuzhiyun ps->s_txreq->sde = priv->s_sde;
1189*4882a593Smuzhiyun ps->s_txreq->ss = ss;
1190*4882a593Smuzhiyun ps->s_txreq->s_cur_size = len;
1191*4882a593Smuzhiyun hfi1_make_ruc_header(
1192*4882a593Smuzhiyun qp,
1193*4882a593Smuzhiyun ohdr,
1194*4882a593Smuzhiyun bth0 | (qp->s_state << 24),
1195*4882a593Smuzhiyun bth1,
1196*4882a593Smuzhiyun bth2,
1197*4882a593Smuzhiyun middle,
1198*4882a593Smuzhiyun ps);
1199*4882a593Smuzhiyun return 1;
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun done_free_tx:
1202*4882a593Smuzhiyun hfi1_put_txreq(ps->s_txreq);
1203*4882a593Smuzhiyun ps->s_txreq = NULL;
1204*4882a593Smuzhiyun return 1;
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun bail:
1207*4882a593Smuzhiyun hfi1_put_txreq(ps->s_txreq);
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun bail_no_tx:
1210*4882a593Smuzhiyun ps->s_txreq = NULL;
1211*4882a593Smuzhiyun qp->s_flags &= ~RVT_S_BUSY;
1212*4882a593Smuzhiyun /*
1213*4882a593Smuzhiyun * If we didn't get a txreq, the QP will be woken up later to try
1214*4882a593Smuzhiyun * again. Set the flags to indicate which work item to wake
1215*4882a593Smuzhiyun * up.
1216*4882a593Smuzhiyun */
1217*4882a593Smuzhiyun iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
1218*4882a593Smuzhiyun return 0;
1219*4882a593Smuzhiyun }
1220*4882a593Smuzhiyun
hfi1_make_bth_aeth(struct rvt_qp * qp,struct ib_other_headers * ohdr,u32 bth0,u32 bth1)1221*4882a593Smuzhiyun static inline void hfi1_make_bth_aeth(struct rvt_qp *qp,
1222*4882a593Smuzhiyun struct ib_other_headers *ohdr,
1223*4882a593Smuzhiyun u32 bth0, u32 bth1)
1224*4882a593Smuzhiyun {
1225*4882a593Smuzhiyun if (qp->r_nak_state)
1226*4882a593Smuzhiyun ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
1227*4882a593Smuzhiyun (qp->r_nak_state <<
1228*4882a593Smuzhiyun IB_AETH_CREDIT_SHIFT));
1229*4882a593Smuzhiyun else
1230*4882a593Smuzhiyun ohdr->u.aeth = rvt_compute_aeth(qp);
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun ohdr->bth[0] = cpu_to_be32(bth0);
1233*4882a593Smuzhiyun ohdr->bth[1] = cpu_to_be32(bth1 | qp->remote_qpn);
1234*4882a593Smuzhiyun ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn));
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun
hfi1_queue_rc_ack(struct hfi1_packet * packet,bool is_fecn)1237*4882a593Smuzhiyun static inline void hfi1_queue_rc_ack(struct hfi1_packet *packet, bool is_fecn)
1238*4882a593Smuzhiyun {
1239*4882a593Smuzhiyun struct rvt_qp *qp = packet->qp;
1240*4882a593Smuzhiyun struct hfi1_ibport *ibp;
1241*4882a593Smuzhiyun unsigned long flags;
1242*4882a593Smuzhiyun
1243*4882a593Smuzhiyun spin_lock_irqsave(&qp->s_lock, flags);
1244*4882a593Smuzhiyun if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
1245*4882a593Smuzhiyun goto unlock;
1246*4882a593Smuzhiyun ibp = rcd_to_iport(packet->rcd);
1247*4882a593Smuzhiyun this_cpu_inc(*ibp->rvp.rc_qacks);
1248*4882a593Smuzhiyun qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
1249*4882a593Smuzhiyun qp->s_nak_state = qp->r_nak_state;
1250*4882a593Smuzhiyun qp->s_ack_psn = qp->r_ack_psn;
1251*4882a593Smuzhiyun if (is_fecn)
1252*4882a593Smuzhiyun qp->s_flags |= RVT_S_ECN;
1253*4882a593Smuzhiyun
1254*4882a593Smuzhiyun /* Schedule the send tasklet. */
1255*4882a593Smuzhiyun hfi1_schedule_send(qp);
1256*4882a593Smuzhiyun unlock:
1257*4882a593Smuzhiyun spin_unlock_irqrestore(&qp->s_lock, flags);
1258*4882a593Smuzhiyun }
1259*4882a593Smuzhiyun
hfi1_make_rc_ack_9B(struct hfi1_packet * packet,struct hfi1_opa_header * opa_hdr,u8 sc5,bool is_fecn,u64 * pbc_flags,u32 * hwords,u32 * nwords)1260*4882a593Smuzhiyun static inline void hfi1_make_rc_ack_9B(struct hfi1_packet *packet,
1261*4882a593Smuzhiyun struct hfi1_opa_header *opa_hdr,
1262*4882a593Smuzhiyun u8 sc5, bool is_fecn,
1263*4882a593Smuzhiyun u64 *pbc_flags, u32 *hwords,
1264*4882a593Smuzhiyun u32 *nwords)
1265*4882a593Smuzhiyun {
1266*4882a593Smuzhiyun struct rvt_qp *qp = packet->qp;
1267*4882a593Smuzhiyun struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
1268*4882a593Smuzhiyun struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1269*4882a593Smuzhiyun struct ib_header *hdr = &opa_hdr->ibh;
1270*4882a593Smuzhiyun struct ib_other_headers *ohdr;
1271*4882a593Smuzhiyun u16 lrh0 = HFI1_LRH_BTH;
1272*4882a593Smuzhiyun u16 pkey;
1273*4882a593Smuzhiyun u32 bth0, bth1;
1274*4882a593Smuzhiyun
1275*4882a593Smuzhiyun opa_hdr->hdr_type = HFI1_PKT_TYPE_9B;
1276*4882a593Smuzhiyun ohdr = &hdr->u.oth;
1277*4882a593Smuzhiyun /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */
1278*4882a593Smuzhiyun *hwords = 6;
1279*4882a593Smuzhiyun
1280*4882a593Smuzhiyun if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
1281*4882a593Smuzhiyun *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh,
1282*4882a593Smuzhiyun rdma_ah_read_grh(&qp->remote_ah_attr),
1283*4882a593Smuzhiyun *hwords - 2, SIZE_OF_CRC);
1284*4882a593Smuzhiyun ohdr = &hdr->u.l.oth;
1285*4882a593Smuzhiyun lrh0 = HFI1_LRH_GRH;
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
1288*4882a593Smuzhiyun *pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT);
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun /* read pkey_index w/o lock (its atomic) */
1291*4882a593Smuzhiyun pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
1292*4882a593Smuzhiyun
1293*4882a593Smuzhiyun lrh0 |= (sc5 & IB_SC_MASK) << IB_SC_SHIFT |
1294*4882a593Smuzhiyun (rdma_ah_get_sl(&qp->remote_ah_attr) & IB_SL_MASK) <<
1295*4882a593Smuzhiyun IB_SL_SHIFT;
1296*4882a593Smuzhiyun
1297*4882a593Smuzhiyun hfi1_make_ib_hdr(hdr, lrh0, *hwords + SIZE_OF_CRC,
1298*4882a593Smuzhiyun opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B),
1299*4882a593Smuzhiyun ppd->lid | rdma_ah_get_path_bits(&qp->remote_ah_attr));
1300*4882a593Smuzhiyun
1301*4882a593Smuzhiyun bth0 = pkey | (OP(ACKNOWLEDGE) << 24);
1302*4882a593Smuzhiyun if (qp->s_mig_state == IB_MIG_MIGRATED)
1303*4882a593Smuzhiyun bth0 |= IB_BTH_MIG_REQ;
1304*4882a593Smuzhiyun bth1 = (!!is_fecn) << IB_BECN_SHIFT;
1305*4882a593Smuzhiyun /*
1306*4882a593Smuzhiyun * Inline ACKs go out without the use of the Verbs send engine, so
1307*4882a593Smuzhiyun * we need to set the STL Verbs Extended bit here
1308*4882a593Smuzhiyun */
1309*4882a593Smuzhiyun bth1 |= HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT;
1310*4882a593Smuzhiyun hfi1_make_bth_aeth(qp, ohdr, bth0, bth1);
1311*4882a593Smuzhiyun }
1312*4882a593Smuzhiyun
hfi1_make_rc_ack_16B(struct hfi1_packet * packet,struct hfi1_opa_header * opa_hdr,u8 sc5,bool is_fecn,u64 * pbc_flags,u32 * hwords,u32 * nwords)1313*4882a593Smuzhiyun static inline void hfi1_make_rc_ack_16B(struct hfi1_packet *packet,
1314*4882a593Smuzhiyun struct hfi1_opa_header *opa_hdr,
1315*4882a593Smuzhiyun u8 sc5, bool is_fecn,
1316*4882a593Smuzhiyun u64 *pbc_flags, u32 *hwords,
1317*4882a593Smuzhiyun u32 *nwords)
1318*4882a593Smuzhiyun {
1319*4882a593Smuzhiyun struct rvt_qp *qp = packet->qp;
1320*4882a593Smuzhiyun struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
1321*4882a593Smuzhiyun struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1322*4882a593Smuzhiyun struct hfi1_16b_header *hdr = &opa_hdr->opah;
1323*4882a593Smuzhiyun struct ib_other_headers *ohdr;
1324*4882a593Smuzhiyun u32 bth0, bth1 = 0;
1325*4882a593Smuzhiyun u16 len, pkey;
1326*4882a593Smuzhiyun bool becn = is_fecn;
1327*4882a593Smuzhiyun u8 l4 = OPA_16B_L4_IB_LOCAL;
1328*4882a593Smuzhiyun u8 extra_bytes;
1329*4882a593Smuzhiyun
1330*4882a593Smuzhiyun opa_hdr->hdr_type = HFI1_PKT_TYPE_16B;
1331*4882a593Smuzhiyun ohdr = &hdr->u.oth;
1332*4882a593Smuzhiyun /* header size in 32-bit words 16B LRH+BTH+AETH = (16+12+4)/4 */
1333*4882a593Smuzhiyun *hwords = 8;
1334*4882a593Smuzhiyun extra_bytes = hfi1_get_16b_padding(*hwords << 2, 0);
1335*4882a593Smuzhiyun *nwords = SIZE_OF_CRC + ((extra_bytes + SIZE_OF_LT) >> 2);
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
1338*4882a593Smuzhiyun hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) {
1339*4882a593Smuzhiyun *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh,
1340*4882a593Smuzhiyun rdma_ah_read_grh(&qp->remote_ah_attr),
1341*4882a593Smuzhiyun *hwords - 4, *nwords);
1342*4882a593Smuzhiyun ohdr = &hdr->u.l.oth;
1343*4882a593Smuzhiyun l4 = OPA_16B_L4_IB_GLOBAL;
1344*4882a593Smuzhiyun }
1345*4882a593Smuzhiyun *pbc_flags |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
1346*4882a593Smuzhiyun
1347*4882a593Smuzhiyun /* read pkey_index w/o lock (its atomic) */
1348*4882a593Smuzhiyun pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
1349*4882a593Smuzhiyun
1350*4882a593Smuzhiyun /* Convert dwords to flits */
1351*4882a593Smuzhiyun len = (*hwords + *nwords) >> 1;
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun hfi1_make_16b_hdr(hdr, ppd->lid |
1354*4882a593Smuzhiyun (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
1355*4882a593Smuzhiyun ((1 << ppd->lmc) - 1)),
1356*4882a593Smuzhiyun opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr),
1357*4882a593Smuzhiyun 16B), len, pkey, becn, 0, l4, sc5);
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun bth0 = pkey | (OP(ACKNOWLEDGE) << 24);
1360*4882a593Smuzhiyun bth0 |= extra_bytes << 20;
1361*4882a593Smuzhiyun if (qp->s_mig_state == IB_MIG_MIGRATED)
1362*4882a593Smuzhiyun bth1 = OPA_BTH_MIG_REQ;
1363*4882a593Smuzhiyun hfi1_make_bth_aeth(qp, ohdr, bth0, bth1);
1364*4882a593Smuzhiyun }
1365*4882a593Smuzhiyun
1366*4882a593Smuzhiyun typedef void (*hfi1_make_rc_ack)(struct hfi1_packet *packet,
1367*4882a593Smuzhiyun struct hfi1_opa_header *opa_hdr,
1368*4882a593Smuzhiyun u8 sc5, bool is_fecn,
1369*4882a593Smuzhiyun u64 *pbc_flags, u32 *hwords,
1370*4882a593Smuzhiyun u32 *nwords);
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun /* We support only two types - 9B and 16B for now */
1373*4882a593Smuzhiyun static const hfi1_make_rc_ack hfi1_make_rc_ack_tbl[2] = {
1374*4882a593Smuzhiyun [HFI1_PKT_TYPE_9B] = &hfi1_make_rc_ack_9B,
1375*4882a593Smuzhiyun [HFI1_PKT_TYPE_16B] = &hfi1_make_rc_ack_16B
1376*4882a593Smuzhiyun };
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun /**
1379*4882a593Smuzhiyun * hfi1_send_rc_ack - Construct an ACK packet and send it
1380*4882a593Smuzhiyun * @qp: a pointer to the QP
1381*4882a593Smuzhiyun *
1382*4882a593Smuzhiyun * This is called from hfi1_rc_rcv() and handle_receive_interrupt().
1383*4882a593Smuzhiyun * Note that RDMA reads and atomics are handled in the
1384*4882a593Smuzhiyun * send side QP state and send engine.
1385*4882a593Smuzhiyun */
hfi1_send_rc_ack(struct hfi1_packet * packet,bool is_fecn)1386*4882a593Smuzhiyun void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn)
1387*4882a593Smuzhiyun {
1388*4882a593Smuzhiyun struct hfi1_ctxtdata *rcd = packet->rcd;
1389*4882a593Smuzhiyun struct rvt_qp *qp = packet->qp;
1390*4882a593Smuzhiyun struct hfi1_ibport *ibp = rcd_to_iport(rcd);
1391*4882a593Smuzhiyun struct hfi1_qp_priv *priv = qp->priv;
1392*4882a593Smuzhiyun struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1393*4882a593Smuzhiyun u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
1394*4882a593Smuzhiyun u64 pbc, pbc_flags = 0;
1395*4882a593Smuzhiyun u32 hwords = 0;
1396*4882a593Smuzhiyun u32 nwords = 0;
1397*4882a593Smuzhiyun u32 plen;
1398*4882a593Smuzhiyun struct pio_buf *pbuf;
1399*4882a593Smuzhiyun struct hfi1_opa_header opa_hdr;
1400*4882a593Smuzhiyun
1401*4882a593Smuzhiyun /* clear the defer count */
1402*4882a593Smuzhiyun qp->r_adefered = 0;
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
1405*4882a593Smuzhiyun if (qp->s_flags & RVT_S_RESP_PENDING) {
1406*4882a593Smuzhiyun hfi1_queue_rc_ack(packet, is_fecn);
1407*4882a593Smuzhiyun return;
1408*4882a593Smuzhiyun }
1409*4882a593Smuzhiyun
1410*4882a593Smuzhiyun /* Ensure s_rdma_ack_cnt changes are committed */
1411*4882a593Smuzhiyun if (qp->s_rdma_ack_cnt) {
1412*4882a593Smuzhiyun hfi1_queue_rc_ack(packet, is_fecn);
1413*4882a593Smuzhiyun return;
1414*4882a593Smuzhiyun }
1415*4882a593Smuzhiyun
1416*4882a593Smuzhiyun /* Don't try to send ACKs if the link isn't ACTIVE */
1417*4882a593Smuzhiyun if (driver_lstate(ppd) != IB_PORT_ACTIVE)
1418*4882a593Smuzhiyun return;
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun /* Make the appropriate header */
1421*4882a593Smuzhiyun hfi1_make_rc_ack_tbl[priv->hdr_type](packet, &opa_hdr, sc5, is_fecn,
1422*4882a593Smuzhiyun &pbc_flags, &hwords, &nwords);
1423*4882a593Smuzhiyun
1424*4882a593Smuzhiyun plen = 2 /* PBC */ + hwords + nwords;
1425*4882a593Smuzhiyun pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps,
1426*4882a593Smuzhiyun sc_to_vlt(ppd->dd, sc5), plen);
1427*4882a593Smuzhiyun pbuf = sc_buffer_alloc(rcd->sc, plen, NULL, NULL);
1428*4882a593Smuzhiyun if (IS_ERR_OR_NULL(pbuf)) {
1429*4882a593Smuzhiyun /*
1430*4882a593Smuzhiyun * We have no room to send at the moment. Pass
1431*4882a593Smuzhiyun * responsibility for sending the ACK to the send engine
1432*4882a593Smuzhiyun * so that when enough buffer space becomes available,
1433*4882a593Smuzhiyun * the ACK is sent ahead of other outgoing packets.
1434*4882a593Smuzhiyun */
1435*4882a593Smuzhiyun hfi1_queue_rc_ack(packet, is_fecn);
1436*4882a593Smuzhiyun return;
1437*4882a593Smuzhiyun }
1438*4882a593Smuzhiyun trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
1439*4882a593Smuzhiyun &opa_hdr, ib_is_sc5(sc5));
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun /* write the pbc and data */
1442*4882a593Smuzhiyun ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
1443*4882a593Smuzhiyun (priv->hdr_type == HFI1_PKT_TYPE_9B ?
1444*4882a593Smuzhiyun (void *)&opa_hdr.ibh :
1445*4882a593Smuzhiyun (void *)&opa_hdr.opah), hwords);
1446*4882a593Smuzhiyun return;
1447*4882a593Smuzhiyun }
1448*4882a593Smuzhiyun
1449*4882a593Smuzhiyun /**
1450*4882a593Smuzhiyun * update_num_rd_atomic - update the qp->s_num_rd_atomic
1451*4882a593Smuzhiyun * @qp: the QP
1452*4882a593Smuzhiyun * @psn: the packet sequence number to restart at
1453*4882a593Smuzhiyun * @wqe: the wqe
1454*4882a593Smuzhiyun *
1455*4882a593Smuzhiyun * This is called from reset_psn() to update qp->s_num_rd_atomic
1456*4882a593Smuzhiyun * for the current wqe.
1457*4882a593Smuzhiyun * Called at interrupt level with the QP s_lock held.
1458*4882a593Smuzhiyun */
update_num_rd_atomic(struct rvt_qp * qp,u32 psn,struct rvt_swqe * wqe)1459*4882a593Smuzhiyun static void update_num_rd_atomic(struct rvt_qp *qp, u32 psn,
1460*4882a593Smuzhiyun struct rvt_swqe *wqe)
1461*4882a593Smuzhiyun {
1462*4882a593Smuzhiyun u32 opcode = wqe->wr.opcode;
1463*4882a593Smuzhiyun
1464*4882a593Smuzhiyun if (opcode == IB_WR_RDMA_READ ||
1465*4882a593Smuzhiyun opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1466*4882a593Smuzhiyun opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
1467*4882a593Smuzhiyun qp->s_num_rd_atomic++;
1468*4882a593Smuzhiyun } else if (opcode == IB_WR_TID_RDMA_READ) {
1469*4882a593Smuzhiyun struct tid_rdma_request *req = wqe_to_tid_req(wqe);
1470*4882a593Smuzhiyun struct hfi1_qp_priv *priv = qp->priv;
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun if (cmp_psn(psn, wqe->lpsn) <= 0) {
1473*4882a593Smuzhiyun u32 cur_seg;
1474*4882a593Smuzhiyun
1475*4882a593Smuzhiyun cur_seg = (psn - wqe->psn) / priv->pkts_ps;
1476*4882a593Smuzhiyun req->ack_pending = cur_seg - req->comp_seg;
1477*4882a593Smuzhiyun priv->pending_tid_r_segs += req->ack_pending;
1478*4882a593Smuzhiyun qp->s_num_rd_atomic += req->ack_pending;
1479*4882a593Smuzhiyun trace_hfi1_tid_req_update_num_rd_atomic(qp, 0,
1480*4882a593Smuzhiyun wqe->wr.opcode,
1481*4882a593Smuzhiyun wqe->psn,
1482*4882a593Smuzhiyun wqe->lpsn,
1483*4882a593Smuzhiyun req);
1484*4882a593Smuzhiyun } else {
1485*4882a593Smuzhiyun priv->pending_tid_r_segs += req->total_segs;
1486*4882a593Smuzhiyun qp->s_num_rd_atomic += req->total_segs;
1487*4882a593Smuzhiyun }
1488*4882a593Smuzhiyun }
1489*4882a593Smuzhiyun }
1490*4882a593Smuzhiyun
1491*4882a593Smuzhiyun /**
1492*4882a593Smuzhiyun * reset_psn - reset the QP state to send starting from PSN
1493*4882a593Smuzhiyun * @qp: the QP
1494*4882a593Smuzhiyun * @psn: the packet sequence number to restart at
1495*4882a593Smuzhiyun *
1496*4882a593Smuzhiyun * This is called from hfi1_rc_rcv() to process an incoming RC ACK
1497*4882a593Smuzhiyun * for the given QP.
1498*4882a593Smuzhiyun * Called at interrupt level with the QP s_lock held.
1499*4882a593Smuzhiyun */
reset_psn(struct rvt_qp * qp,u32 psn)1500*4882a593Smuzhiyun static void reset_psn(struct rvt_qp *qp, u32 psn)
1501*4882a593Smuzhiyun {
1502*4882a593Smuzhiyun u32 n = qp->s_acked;
1503*4882a593Smuzhiyun struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
1504*4882a593Smuzhiyun u32 opcode;
1505*4882a593Smuzhiyun struct hfi1_qp_priv *priv = qp->priv;
1506*4882a593Smuzhiyun
1507*4882a593Smuzhiyun lockdep_assert_held(&qp->s_lock);
1508*4882a593Smuzhiyun qp->s_cur = n;
1509*4882a593Smuzhiyun priv->pending_tid_r_segs = 0;
1510*4882a593Smuzhiyun priv->pending_tid_w_resp = 0;
1511*4882a593Smuzhiyun qp->s_num_rd_atomic = 0;
1512*4882a593Smuzhiyun
1513*4882a593Smuzhiyun /*
1514*4882a593Smuzhiyun * If we are starting the request from the beginning,
1515*4882a593Smuzhiyun * let the normal send code handle initialization.
1516*4882a593Smuzhiyun */
1517*4882a593Smuzhiyun if (cmp_psn(psn, wqe->psn) <= 0) {
1518*4882a593Smuzhiyun qp->s_state = OP(SEND_LAST);
1519*4882a593Smuzhiyun goto done;
1520*4882a593Smuzhiyun }
1521*4882a593Smuzhiyun update_num_rd_atomic(qp, psn, wqe);
1522*4882a593Smuzhiyun
1523*4882a593Smuzhiyun /* Find the work request opcode corresponding to the given PSN. */
1524*4882a593Smuzhiyun for (;;) {
1525*4882a593Smuzhiyun int diff;
1526*4882a593Smuzhiyun
1527*4882a593Smuzhiyun if (++n == qp->s_size)
1528*4882a593Smuzhiyun n = 0;
1529*4882a593Smuzhiyun if (n == qp->s_tail)
1530*4882a593Smuzhiyun break;
1531*4882a593Smuzhiyun wqe = rvt_get_swqe_ptr(qp, n);
1532*4882a593Smuzhiyun diff = cmp_psn(psn, wqe->psn);
1533*4882a593Smuzhiyun if (diff < 0) {
1534*4882a593Smuzhiyun /* Point wqe back to the previous one*/
1535*4882a593Smuzhiyun wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
1536*4882a593Smuzhiyun break;
1537*4882a593Smuzhiyun }
1538*4882a593Smuzhiyun qp->s_cur = n;
1539*4882a593Smuzhiyun /*
1540*4882a593Smuzhiyun * If we are starting the request from the beginning,
1541*4882a593Smuzhiyun * let the normal send code handle initialization.
1542*4882a593Smuzhiyun */
1543*4882a593Smuzhiyun if (diff == 0) {
1544*4882a593Smuzhiyun qp->s_state = OP(SEND_LAST);
1545*4882a593Smuzhiyun goto done;
1546*4882a593Smuzhiyun }
1547*4882a593Smuzhiyun
1548*4882a593Smuzhiyun update_num_rd_atomic(qp, psn, wqe);
1549*4882a593Smuzhiyun }
1550*4882a593Smuzhiyun opcode = wqe->wr.opcode;
1551*4882a593Smuzhiyun
1552*4882a593Smuzhiyun /*
1553*4882a593Smuzhiyun * Set the state to restart in the middle of a request.
1554*4882a593Smuzhiyun * Don't change the s_sge, s_cur_sge, or s_cur_size.
1555*4882a593Smuzhiyun * See hfi1_make_rc_req().
1556*4882a593Smuzhiyun */
1557*4882a593Smuzhiyun switch (opcode) {
1558*4882a593Smuzhiyun case IB_WR_SEND:
1559*4882a593Smuzhiyun case IB_WR_SEND_WITH_IMM:
1560*4882a593Smuzhiyun qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
1561*4882a593Smuzhiyun break;
1562*4882a593Smuzhiyun
1563*4882a593Smuzhiyun case IB_WR_RDMA_WRITE:
1564*4882a593Smuzhiyun case IB_WR_RDMA_WRITE_WITH_IMM:
1565*4882a593Smuzhiyun qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
1566*4882a593Smuzhiyun break;
1567*4882a593Smuzhiyun
1568*4882a593Smuzhiyun case IB_WR_TID_RDMA_WRITE:
1569*4882a593Smuzhiyun qp->s_state = TID_OP(WRITE_RESP);
1570*4882a593Smuzhiyun break;
1571*4882a593Smuzhiyun
1572*4882a593Smuzhiyun case IB_WR_RDMA_READ:
1573*4882a593Smuzhiyun qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
1574*4882a593Smuzhiyun break;
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun case IB_WR_TID_RDMA_READ:
1577*4882a593Smuzhiyun qp->s_state = TID_OP(READ_RESP);
1578*4882a593Smuzhiyun break;
1579*4882a593Smuzhiyun
1580*4882a593Smuzhiyun default:
1581*4882a593Smuzhiyun /*
1582*4882a593Smuzhiyun * This case shouldn't happen since its only
1583*4882a593Smuzhiyun * one PSN per req.
1584*4882a593Smuzhiyun */
1585*4882a593Smuzhiyun qp->s_state = OP(SEND_LAST);
1586*4882a593Smuzhiyun }
1587*4882a593Smuzhiyun done:
1588*4882a593Smuzhiyun priv->s_flags &= ~HFI1_S_TID_WAIT_INTERLCK;
1589*4882a593Smuzhiyun qp->s_psn = psn;
1590*4882a593Smuzhiyun /*
1591*4882a593Smuzhiyun * Set RVT_S_WAIT_PSN as rc_complete() may start the timer
1592*4882a593Smuzhiyun * asynchronously before the send engine can get scheduled.
1593*4882a593Smuzhiyun * Doing it in hfi1_make_rc_req() is too late.
1594*4882a593Smuzhiyun */
1595*4882a593Smuzhiyun if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
1596*4882a593Smuzhiyun (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
1597*4882a593Smuzhiyun qp->s_flags |= RVT_S_WAIT_PSN;
1598*4882a593Smuzhiyun qp->s_flags &= ~HFI1_S_AHG_VALID;
1599*4882a593Smuzhiyun trace_hfi1_sender_reset_psn(qp);
1600*4882a593Smuzhiyun }
1601*4882a593Smuzhiyun
1602*4882a593Smuzhiyun /*
1603*4882a593Smuzhiyun * Back up requester to resend the last un-ACKed request.
1604*4882a593Smuzhiyun * The QP r_lock and s_lock should be held and interrupts disabled.
1605*4882a593Smuzhiyun */
hfi1_restart_rc(struct rvt_qp * qp,u32 psn,int wait)1606*4882a593Smuzhiyun void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
1607*4882a593Smuzhiyun {
1608*4882a593Smuzhiyun struct hfi1_qp_priv *priv = qp->priv;
1609*4882a593Smuzhiyun struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1610*4882a593Smuzhiyun struct hfi1_ibport *ibp;
1611*4882a593Smuzhiyun
1612*4882a593Smuzhiyun lockdep_assert_held(&qp->r_lock);
1613*4882a593Smuzhiyun lockdep_assert_held(&qp->s_lock);
1614*4882a593Smuzhiyun trace_hfi1_sender_restart_rc(qp);
1615*4882a593Smuzhiyun if (qp->s_retry == 0) {
1616*4882a593Smuzhiyun if (qp->s_mig_state == IB_MIG_ARMED) {
1617*4882a593Smuzhiyun hfi1_migrate_qp(qp);
1618*4882a593Smuzhiyun qp->s_retry = qp->s_retry_cnt;
1619*4882a593Smuzhiyun } else if (qp->s_last == qp->s_acked) {
1620*4882a593Smuzhiyun /*
1621*4882a593Smuzhiyun * We need special handling for the OPFN request WQEs as
1622*4882a593Smuzhiyun * they are not allowed to generate real user errors
1623*4882a593Smuzhiyun */
1624*4882a593Smuzhiyun if (wqe->wr.opcode == IB_WR_OPFN) {
1625*4882a593Smuzhiyun struct hfi1_ibport *ibp =
1626*4882a593Smuzhiyun to_iport(qp->ibqp.device, qp->port_num);
1627*4882a593Smuzhiyun /*
1628*4882a593Smuzhiyun * Call opfn_conn_reply() with capcode and
1629*4882a593Smuzhiyun * remaining data as 0 to close out the
1630*4882a593Smuzhiyun * current request
1631*4882a593Smuzhiyun */
1632*4882a593Smuzhiyun opfn_conn_reply(qp, priv->opfn.curr);
1633*4882a593Smuzhiyun wqe = do_rc_completion(qp, wqe, ibp);
1634*4882a593Smuzhiyun qp->s_flags &= ~RVT_S_WAIT_ACK;
1635*4882a593Smuzhiyun } else {
1636*4882a593Smuzhiyun trace_hfi1_tid_write_sender_restart_rc(qp, 0);
1637*4882a593Smuzhiyun if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
1638*4882a593Smuzhiyun struct tid_rdma_request *req;
1639*4882a593Smuzhiyun
1640*4882a593Smuzhiyun req = wqe_to_tid_req(wqe);
1641*4882a593Smuzhiyun hfi1_kern_exp_rcv_clear_all(req);
1642*4882a593Smuzhiyun hfi1_kern_clear_hw_flow(priv->rcd, qp);
1643*4882a593Smuzhiyun }
1644*4882a593Smuzhiyun
1645*4882a593Smuzhiyun hfi1_trdma_send_complete(qp, wqe,
1646*4882a593Smuzhiyun IB_WC_RETRY_EXC_ERR);
1647*4882a593Smuzhiyun rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1648*4882a593Smuzhiyun }
1649*4882a593Smuzhiyun return;
1650*4882a593Smuzhiyun } else { /* need to handle delayed completion */
1651*4882a593Smuzhiyun return;
1652*4882a593Smuzhiyun }
1653*4882a593Smuzhiyun } else {
1654*4882a593Smuzhiyun qp->s_retry--;
1655*4882a593Smuzhiyun }
1656*4882a593Smuzhiyun
1657*4882a593Smuzhiyun ibp = to_iport(qp->ibqp.device, qp->port_num);
1658*4882a593Smuzhiyun if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1659*4882a593Smuzhiyun wqe->wr.opcode == IB_WR_TID_RDMA_READ)
1660*4882a593Smuzhiyun ibp->rvp.n_rc_resends++;
1661*4882a593Smuzhiyun else
1662*4882a593Smuzhiyun ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
1663*4882a593Smuzhiyun
1664*4882a593Smuzhiyun qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
1665*4882a593Smuzhiyun RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
1666*4882a593Smuzhiyun RVT_S_WAIT_ACK | HFI1_S_WAIT_TID_RESP);
1667*4882a593Smuzhiyun if (wait)
1668*4882a593Smuzhiyun qp->s_flags |= RVT_S_SEND_ONE;
1669*4882a593Smuzhiyun reset_psn(qp, psn);
1670*4882a593Smuzhiyun }
1671*4882a593Smuzhiyun
1672*4882a593Smuzhiyun /*
1673*4882a593Smuzhiyun * Set qp->s_sending_psn to the next PSN after the given one.
1674*4882a593Smuzhiyun * This would be psn+1 except when RDMA reads or TID RDMA ops
1675*4882a593Smuzhiyun * are present.
1676*4882a593Smuzhiyun */
reset_sending_psn(struct rvt_qp * qp,u32 psn)1677*4882a593Smuzhiyun static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
1678*4882a593Smuzhiyun {
1679*4882a593Smuzhiyun struct rvt_swqe *wqe;
1680*4882a593Smuzhiyun u32 n = qp->s_last;
1681*4882a593Smuzhiyun
1682*4882a593Smuzhiyun lockdep_assert_held(&qp->s_lock);
1683*4882a593Smuzhiyun /* Find the work request corresponding to the given PSN. */
1684*4882a593Smuzhiyun for (;;) {
1685*4882a593Smuzhiyun wqe = rvt_get_swqe_ptr(qp, n);
1686*4882a593Smuzhiyun if (cmp_psn(psn, wqe->lpsn) <= 0) {
1687*4882a593Smuzhiyun if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1688*4882a593Smuzhiyun wqe->wr.opcode == IB_WR_TID_RDMA_READ ||
1689*4882a593Smuzhiyun wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
1690*4882a593Smuzhiyun qp->s_sending_psn = wqe->lpsn + 1;
1691*4882a593Smuzhiyun else
1692*4882a593Smuzhiyun qp->s_sending_psn = psn + 1;
1693*4882a593Smuzhiyun break;
1694*4882a593Smuzhiyun }
1695*4882a593Smuzhiyun if (++n == qp->s_size)
1696*4882a593Smuzhiyun n = 0;
1697*4882a593Smuzhiyun if (n == qp->s_tail)
1698*4882a593Smuzhiyun break;
1699*4882a593Smuzhiyun }
1700*4882a593Smuzhiyun }
1701*4882a593Smuzhiyun
1702*4882a593Smuzhiyun /**
1703*4882a593Smuzhiyun * hfi1_rc_verbs_aborted - handle abort status
1704*4882a593Smuzhiyun * @qp: the QP
1705*4882a593Smuzhiyun * @opah: the opa header
1706*4882a593Smuzhiyun *
1707*4882a593Smuzhiyun * This code modifies both ACK bit in BTH[2]
1708*4882a593Smuzhiyun * and the s_flags to go into send one mode.
1709*4882a593Smuzhiyun *
1710*4882a593Smuzhiyun * This serves to throttle the send engine to only
1711*4882a593Smuzhiyun * send a single packet in the likely case the
1712*4882a593Smuzhiyun * a link has gone down.
1713*4882a593Smuzhiyun */
hfi1_rc_verbs_aborted(struct rvt_qp * qp,struct hfi1_opa_header * opah)1714*4882a593Smuzhiyun void hfi1_rc_verbs_aborted(struct rvt_qp *qp, struct hfi1_opa_header *opah)
1715*4882a593Smuzhiyun {
1716*4882a593Smuzhiyun struct ib_other_headers *ohdr = hfi1_get_rc_ohdr(opah);
1717*4882a593Smuzhiyun u8 opcode = ib_bth_get_opcode(ohdr);
1718*4882a593Smuzhiyun u32 psn;
1719*4882a593Smuzhiyun
1720*4882a593Smuzhiyun /* ignore responses */
1721*4882a593Smuzhiyun if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1722*4882a593Smuzhiyun opcode <= OP(ATOMIC_ACKNOWLEDGE)) ||
1723*4882a593Smuzhiyun opcode == TID_OP(READ_RESP) ||
1724*4882a593Smuzhiyun opcode == TID_OP(WRITE_RESP))
1725*4882a593Smuzhiyun return;
1726*4882a593Smuzhiyun
1727*4882a593Smuzhiyun psn = ib_bth_get_psn(ohdr) | IB_BTH_REQ_ACK;
1728*4882a593Smuzhiyun ohdr->bth[2] = cpu_to_be32(psn);
1729*4882a593Smuzhiyun qp->s_flags |= RVT_S_SEND_ONE;
1730*4882a593Smuzhiyun }
1731*4882a593Smuzhiyun
1732*4882a593Smuzhiyun /*
1733*4882a593Smuzhiyun * This should be called with the QP s_lock held and interrupts disabled.
1734*4882a593Smuzhiyun */
hfi1_rc_send_complete(struct rvt_qp * qp,struct hfi1_opa_header * opah)1735*4882a593Smuzhiyun void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
1736*4882a593Smuzhiyun {
1737*4882a593Smuzhiyun struct ib_other_headers *ohdr;
1738*4882a593Smuzhiyun struct hfi1_qp_priv *priv = qp->priv;
1739*4882a593Smuzhiyun struct rvt_swqe *wqe;
1740*4882a593Smuzhiyun u32 opcode, head, tail;
1741*4882a593Smuzhiyun u32 psn;
1742*4882a593Smuzhiyun struct tid_rdma_request *req;
1743*4882a593Smuzhiyun
1744*4882a593Smuzhiyun lockdep_assert_held(&qp->s_lock);
1745*4882a593Smuzhiyun if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
1746*4882a593Smuzhiyun return;
1747*4882a593Smuzhiyun
1748*4882a593Smuzhiyun ohdr = hfi1_get_rc_ohdr(opah);
1749*4882a593Smuzhiyun opcode = ib_bth_get_opcode(ohdr);
1750*4882a593Smuzhiyun if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1751*4882a593Smuzhiyun opcode <= OP(ATOMIC_ACKNOWLEDGE)) ||
1752*4882a593Smuzhiyun opcode == TID_OP(READ_RESP) ||
1753*4882a593Smuzhiyun opcode == TID_OP(WRITE_RESP)) {
1754*4882a593Smuzhiyun WARN_ON(!qp->s_rdma_ack_cnt);
1755*4882a593Smuzhiyun qp->s_rdma_ack_cnt--;
1756*4882a593Smuzhiyun return;
1757*4882a593Smuzhiyun }
1758*4882a593Smuzhiyun
1759*4882a593Smuzhiyun psn = ib_bth_get_psn(ohdr);
1760*4882a593Smuzhiyun /*
1761*4882a593Smuzhiyun * Don't attempt to reset the sending PSN for packets in the
1762*4882a593Smuzhiyun * KDETH PSN space since the PSN does not match anything.
1763*4882a593Smuzhiyun */
1764*4882a593Smuzhiyun if (opcode != TID_OP(WRITE_DATA) &&
1765*4882a593Smuzhiyun opcode != TID_OP(WRITE_DATA_LAST) &&
1766*4882a593Smuzhiyun opcode != TID_OP(ACK) && opcode != TID_OP(RESYNC))
1767*4882a593Smuzhiyun reset_sending_psn(qp, psn);
1768*4882a593Smuzhiyun
1769*4882a593Smuzhiyun /* Handle TID RDMA WRITE packets differently */
1770*4882a593Smuzhiyun if (opcode >= TID_OP(WRITE_REQ) &&
1771*4882a593Smuzhiyun opcode <= TID_OP(WRITE_DATA_LAST)) {
1772*4882a593Smuzhiyun head = priv->s_tid_head;
1773*4882a593Smuzhiyun tail = priv->s_tid_cur;
1774*4882a593Smuzhiyun /*
1775*4882a593Smuzhiyun * s_tid_cur is set to s_tid_head in the case, where
1776*4882a593Smuzhiyun * a new TID RDMA request is being started and all
1777*4882a593Smuzhiyun * previous ones have been completed.
1778*4882a593Smuzhiyun * Therefore, we need to do a secondary check in order
1779*4882a593Smuzhiyun * to properly determine whether we should start the
1780*4882a593Smuzhiyun * RC timer.
1781*4882a593Smuzhiyun */
1782*4882a593Smuzhiyun wqe = rvt_get_swqe_ptr(qp, tail);
1783*4882a593Smuzhiyun req = wqe_to_tid_req(wqe);
1784*4882a593Smuzhiyun if (head == tail && req->comp_seg < req->total_segs) {
1785*4882a593Smuzhiyun if (tail == 0)
1786*4882a593Smuzhiyun tail = qp->s_size - 1;
1787*4882a593Smuzhiyun else
1788*4882a593Smuzhiyun tail -= 1;
1789*4882a593Smuzhiyun }
1790*4882a593Smuzhiyun } else {
1791*4882a593Smuzhiyun head = qp->s_tail;
1792*4882a593Smuzhiyun tail = qp->s_acked;
1793*4882a593Smuzhiyun }
1794*4882a593Smuzhiyun
1795*4882a593Smuzhiyun /*
1796*4882a593Smuzhiyun * Start timer after a packet requesting an ACK has been sent and
1797*4882a593Smuzhiyun * there are still requests that haven't been acked.
1798*4882a593Smuzhiyun */
1799*4882a593Smuzhiyun if ((psn & IB_BTH_REQ_ACK) && tail != head &&
1800*4882a593Smuzhiyun opcode != TID_OP(WRITE_DATA) && opcode != TID_OP(WRITE_DATA_LAST) &&
1801*4882a593Smuzhiyun opcode != TID_OP(RESYNC) &&
1802*4882a593Smuzhiyun !(qp->s_flags &
1803*4882a593Smuzhiyun (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
1804*4882a593Smuzhiyun (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
1805*4882a593Smuzhiyun if (opcode == TID_OP(READ_REQ))
1806*4882a593Smuzhiyun rvt_add_retry_timer_ext(qp, priv->timeout_shift);
1807*4882a593Smuzhiyun else
1808*4882a593Smuzhiyun rvt_add_retry_timer(qp);
1809*4882a593Smuzhiyun }
1810*4882a593Smuzhiyun
1811*4882a593Smuzhiyun /* Start TID RDMA ACK timer */
1812*4882a593Smuzhiyun if ((opcode == TID_OP(WRITE_DATA) ||
1813*4882a593Smuzhiyun opcode == TID_OP(WRITE_DATA_LAST) ||
1814*4882a593Smuzhiyun opcode == TID_OP(RESYNC)) &&
1815*4882a593Smuzhiyun (psn & IB_BTH_REQ_ACK) &&
1816*4882a593Smuzhiyun !(priv->s_flags & HFI1_S_TID_RETRY_TIMER) &&
1817*4882a593Smuzhiyun (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
1818*4882a593Smuzhiyun /*
1819*4882a593Smuzhiyun * The TID RDMA ACK packet could be received before this
1820*4882a593Smuzhiyun * function is called. Therefore, add the timer only if TID
1821*4882a593Smuzhiyun * RDMA ACK packets are actually pending.
1822*4882a593Smuzhiyun */
1823*4882a593Smuzhiyun wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1824*4882a593Smuzhiyun req = wqe_to_tid_req(wqe);
1825*4882a593Smuzhiyun if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
1826*4882a593Smuzhiyun req->ack_seg < req->cur_seg)
1827*4882a593Smuzhiyun hfi1_add_tid_retry_timer(qp);
1828*4882a593Smuzhiyun }
1829*4882a593Smuzhiyun
1830*4882a593Smuzhiyun while (qp->s_last != qp->s_acked) {
1831*4882a593Smuzhiyun wqe = rvt_get_swqe_ptr(qp, qp->s_last);
1832*4882a593Smuzhiyun if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
1833*4882a593Smuzhiyun cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
1834*4882a593Smuzhiyun break;
1835*4882a593Smuzhiyun trdma_clean_swqe(qp, wqe);
1836*4882a593Smuzhiyun trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
1837*4882a593Smuzhiyun rvt_qp_complete_swqe(qp,
1838*4882a593Smuzhiyun wqe,
1839*4882a593Smuzhiyun ib_hfi1_wc_opcode[wqe->wr.opcode],
1840*4882a593Smuzhiyun IB_WC_SUCCESS);
1841*4882a593Smuzhiyun }
1842*4882a593Smuzhiyun /*
1843*4882a593Smuzhiyun * If we were waiting for sends to complete before re-sending,
1844*4882a593Smuzhiyun * and they are now complete, restart sending.
1845*4882a593Smuzhiyun */
1846*4882a593Smuzhiyun trace_hfi1_sendcomplete(qp, psn);
1847*4882a593Smuzhiyun if (qp->s_flags & RVT_S_WAIT_PSN &&
1848*4882a593Smuzhiyun cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1849*4882a593Smuzhiyun qp->s_flags &= ~RVT_S_WAIT_PSN;
1850*4882a593Smuzhiyun qp->s_sending_psn = qp->s_psn;
1851*4882a593Smuzhiyun qp->s_sending_hpsn = qp->s_psn - 1;
1852*4882a593Smuzhiyun hfi1_schedule_send(qp);
1853*4882a593Smuzhiyun }
1854*4882a593Smuzhiyun }
1855*4882a593Smuzhiyun
update_last_psn(struct rvt_qp * qp,u32 psn)1856*4882a593Smuzhiyun static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
1857*4882a593Smuzhiyun {
1858*4882a593Smuzhiyun qp->s_last_psn = psn;
1859*4882a593Smuzhiyun }
1860*4882a593Smuzhiyun
1861*4882a593Smuzhiyun /*
1862*4882a593Smuzhiyun * Generate a SWQE completion.
1863*4882a593Smuzhiyun * This is similar to hfi1_send_complete but has to check to be sure
1864*4882a593Smuzhiyun * that the SGEs are not being referenced if the SWQE is being resent.
1865*4882a593Smuzhiyun */
do_rc_completion(struct rvt_qp * qp,struct rvt_swqe * wqe,struct hfi1_ibport * ibp)1866*4882a593Smuzhiyun struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
1867*4882a593Smuzhiyun struct rvt_swqe *wqe,
1868*4882a593Smuzhiyun struct hfi1_ibport *ibp)
1869*4882a593Smuzhiyun {
1870*4882a593Smuzhiyun struct hfi1_qp_priv *priv = qp->priv;
1871*4882a593Smuzhiyun
1872*4882a593Smuzhiyun lockdep_assert_held(&qp->s_lock);
1873*4882a593Smuzhiyun /*
1874*4882a593Smuzhiyun * Don't decrement refcount and don't generate a
1875*4882a593Smuzhiyun * completion if the SWQE is being resent until the send
1876*4882a593Smuzhiyun * is finished.
1877*4882a593Smuzhiyun */
1878*4882a593Smuzhiyun trace_hfi1_rc_completion(qp, wqe->lpsn);
1879*4882a593Smuzhiyun if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
1880*4882a593Smuzhiyun cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1881*4882a593Smuzhiyun trdma_clean_swqe(qp, wqe);
1882*4882a593Smuzhiyun trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
1883*4882a593Smuzhiyun rvt_qp_complete_swqe(qp,
1884*4882a593Smuzhiyun wqe,
1885*4882a593Smuzhiyun ib_hfi1_wc_opcode[wqe->wr.opcode],
1886*4882a593Smuzhiyun IB_WC_SUCCESS);
1887*4882a593Smuzhiyun } else {
1888*4882a593Smuzhiyun struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1889*4882a593Smuzhiyun
1890*4882a593Smuzhiyun this_cpu_inc(*ibp->rvp.rc_delayed_comp);
1891*4882a593Smuzhiyun /*
1892*4882a593Smuzhiyun * If send progress not running attempt to progress
1893*4882a593Smuzhiyun * SDMA queue.
1894*4882a593Smuzhiyun */
1895*4882a593Smuzhiyun if (ppd->dd->flags & HFI1_HAS_SEND_DMA) {
1896*4882a593Smuzhiyun struct sdma_engine *engine;
1897*4882a593Smuzhiyun u8 sl = rdma_ah_get_sl(&qp->remote_ah_attr);
1898*4882a593Smuzhiyun u8 sc5;
1899*4882a593Smuzhiyun
1900*4882a593Smuzhiyun /* For now use sc to find engine */
1901*4882a593Smuzhiyun sc5 = ibp->sl_to_sc[sl];
1902*4882a593Smuzhiyun engine = qp_to_sdma_engine(qp, sc5);
1903*4882a593Smuzhiyun sdma_engine_progress_schedule(engine);
1904*4882a593Smuzhiyun }
1905*4882a593Smuzhiyun }
1906*4882a593Smuzhiyun
1907*4882a593Smuzhiyun qp->s_retry = qp->s_retry_cnt;
1908*4882a593Smuzhiyun /*
1909*4882a593Smuzhiyun * Don't update the last PSN if the request being completed is
1910*4882a593Smuzhiyun * a TID RDMA WRITE request.
1911*4882a593Smuzhiyun * Completion of the TID RDMA WRITE requests are done by the
1912*4882a593Smuzhiyun * TID RDMA ACKs and as such could be for a request that has
1913*4882a593Smuzhiyun * already been ACKed as far as the IB state machine is
1914*4882a593Smuzhiyun * concerned.
1915*4882a593Smuzhiyun */
1916*4882a593Smuzhiyun if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
1917*4882a593Smuzhiyun update_last_psn(qp, wqe->lpsn);
1918*4882a593Smuzhiyun
1919*4882a593Smuzhiyun /*
1920*4882a593Smuzhiyun * If we are completing a request which is in the process of
1921*4882a593Smuzhiyun * being resent, we can stop re-sending it since we know the
1922*4882a593Smuzhiyun * responder has already seen it.
1923*4882a593Smuzhiyun */
1924*4882a593Smuzhiyun if (qp->s_acked == qp->s_cur) {
1925*4882a593Smuzhiyun if (++qp->s_cur >= qp->s_size)
1926*4882a593Smuzhiyun qp->s_cur = 0;
1927*4882a593Smuzhiyun qp->s_acked = qp->s_cur;
1928*4882a593Smuzhiyun wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
1929*4882a593Smuzhiyun if (qp->s_acked != qp->s_tail) {
1930*4882a593Smuzhiyun qp->s_state = OP(SEND_LAST);
1931*4882a593Smuzhiyun qp->s_psn = wqe->psn;
1932*4882a593Smuzhiyun }
1933*4882a593Smuzhiyun } else {
1934*4882a593Smuzhiyun if (++qp->s_acked >= qp->s_size)
1935*4882a593Smuzhiyun qp->s_acked = 0;
1936*4882a593Smuzhiyun if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
1937*4882a593Smuzhiyun qp->s_draining = 0;
1938*4882a593Smuzhiyun wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1939*4882a593Smuzhiyun }
1940*4882a593Smuzhiyun if (priv->s_flags & HFI1_S_TID_WAIT_INTERLCK) {
1941*4882a593Smuzhiyun priv->s_flags &= ~HFI1_S_TID_WAIT_INTERLCK;
1942*4882a593Smuzhiyun hfi1_schedule_send(qp);
1943*4882a593Smuzhiyun }
1944*4882a593Smuzhiyun return wqe;
1945*4882a593Smuzhiyun }
1946*4882a593Smuzhiyun
set_restart_qp(struct rvt_qp * qp,struct hfi1_ctxtdata * rcd)1947*4882a593Smuzhiyun static void set_restart_qp(struct rvt_qp *qp, struct hfi1_ctxtdata *rcd)
1948*4882a593Smuzhiyun {
1949*4882a593Smuzhiyun /* Retry this request. */
1950*4882a593Smuzhiyun if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
1951*4882a593Smuzhiyun qp->r_flags |= RVT_R_RDMAR_SEQ;
1952*4882a593Smuzhiyun hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
1953*4882a593Smuzhiyun if (list_empty(&qp->rspwait)) {
1954*4882a593Smuzhiyun qp->r_flags |= RVT_R_RSP_SEND;
1955*4882a593Smuzhiyun rvt_get_qp(qp);
1956*4882a593Smuzhiyun list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1957*4882a593Smuzhiyun }
1958*4882a593Smuzhiyun }
1959*4882a593Smuzhiyun }
1960*4882a593Smuzhiyun
1961*4882a593Smuzhiyun /**
1962*4882a593Smuzhiyun * update_qp_retry_state - Update qp retry state.
1963*4882a593Smuzhiyun * @qp: the QP
1964*4882a593Smuzhiyun * @psn: the packet sequence number of the TID RDMA WRITE RESP.
1965*4882a593Smuzhiyun * @spsn: The start psn for the given TID RDMA WRITE swqe.
1966*4882a593Smuzhiyun * @lpsn: The last psn for the given TID RDMA WRITE swqe.
1967*4882a593Smuzhiyun *
1968*4882a593Smuzhiyun * This function is called to update the qp retry state upon
1969*4882a593Smuzhiyun * receiving a TID WRITE RESP after the qp is scheduled to retry
1970*4882a593Smuzhiyun * a request.
1971*4882a593Smuzhiyun */
update_qp_retry_state(struct rvt_qp * qp,u32 psn,u32 spsn,u32 lpsn)1972*4882a593Smuzhiyun static void update_qp_retry_state(struct rvt_qp *qp, u32 psn, u32 spsn,
1973*4882a593Smuzhiyun u32 lpsn)
1974*4882a593Smuzhiyun {
1975*4882a593Smuzhiyun struct hfi1_qp_priv *qpriv = qp->priv;
1976*4882a593Smuzhiyun
1977*4882a593Smuzhiyun qp->s_psn = psn + 1;
1978*4882a593Smuzhiyun /*
1979*4882a593Smuzhiyun * If this is the first TID RDMA WRITE RESP packet for the current
1980*4882a593Smuzhiyun * request, change the s_state so that the retry will be processed
1981*4882a593Smuzhiyun * correctly. Similarly, if this is the last TID RDMA WRITE RESP
1982*4882a593Smuzhiyun * packet, change the s_state and advance the s_cur.
1983*4882a593Smuzhiyun */
1984*4882a593Smuzhiyun if (cmp_psn(psn, lpsn) >= 0) {
1985*4882a593Smuzhiyun qp->s_cur = qpriv->s_tid_cur + 1;
1986*4882a593Smuzhiyun if (qp->s_cur >= qp->s_size)
1987*4882a593Smuzhiyun qp->s_cur = 0;
1988*4882a593Smuzhiyun qp->s_state = TID_OP(WRITE_REQ);
1989*4882a593Smuzhiyun } else if (!cmp_psn(psn, spsn)) {
1990*4882a593Smuzhiyun qp->s_cur = qpriv->s_tid_cur;
1991*4882a593Smuzhiyun qp->s_state = TID_OP(WRITE_RESP);
1992*4882a593Smuzhiyun }
1993*4882a593Smuzhiyun }
1994*4882a593Smuzhiyun
1995*4882a593Smuzhiyun /**
1996*4882a593Smuzhiyun * do_rc_ack - process an incoming RC ACK
1997*4882a593Smuzhiyun * @qp: the QP the ACK came in on
1998*4882a593Smuzhiyun * @psn: the packet sequence number of the ACK
1999*4882a593Smuzhiyun * @opcode: the opcode of the request that resulted in the ACK
2000*4882a593Smuzhiyun *
2001*4882a593Smuzhiyun * This is called from rc_rcv_resp() to process an incoming RC ACK
2002*4882a593Smuzhiyun * for the given QP.
2003*4882a593Smuzhiyun * May be called at interrupt level, with the QP s_lock held.
2004*4882a593Smuzhiyun * Returns 1 if OK, 0 if current operation should be aborted (NAK).
2005*4882a593Smuzhiyun */
do_rc_ack(struct rvt_qp * qp,u32 aeth,u32 psn,int opcode,u64 val,struct hfi1_ctxtdata * rcd)2006*4882a593Smuzhiyun int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
2007*4882a593Smuzhiyun u64 val, struct hfi1_ctxtdata *rcd)
2008*4882a593Smuzhiyun {
2009*4882a593Smuzhiyun struct hfi1_ibport *ibp;
2010*4882a593Smuzhiyun enum ib_wc_status status;
2011*4882a593Smuzhiyun struct hfi1_qp_priv *qpriv = qp->priv;
2012*4882a593Smuzhiyun struct rvt_swqe *wqe;
2013*4882a593Smuzhiyun int ret = 0;
2014*4882a593Smuzhiyun u32 ack_psn;
2015*4882a593Smuzhiyun int diff;
2016*4882a593Smuzhiyun struct rvt_dev_info *rdi;
2017*4882a593Smuzhiyun
2018*4882a593Smuzhiyun lockdep_assert_held(&qp->s_lock);
2019*4882a593Smuzhiyun /*
2020*4882a593Smuzhiyun * Note that NAKs implicitly ACK outstanding SEND and RDMA write
2021*4882a593Smuzhiyun * requests and implicitly NAK RDMA read and atomic requests issued
2022*4882a593Smuzhiyun * before the NAK'ed request. The MSN won't include the NAK'ed
2023*4882a593Smuzhiyun * request but will include an ACK'ed request(s).
2024*4882a593Smuzhiyun */
2025*4882a593Smuzhiyun ack_psn = psn;
2026*4882a593Smuzhiyun if (aeth >> IB_AETH_NAK_SHIFT)
2027*4882a593Smuzhiyun ack_psn--;
2028*4882a593Smuzhiyun wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2029*4882a593Smuzhiyun ibp = rcd_to_iport(rcd);
2030*4882a593Smuzhiyun
2031*4882a593Smuzhiyun /*
2032*4882a593Smuzhiyun * The MSN might be for a later WQE than the PSN indicates so
2033*4882a593Smuzhiyun * only complete WQEs that the PSN finishes.
2034*4882a593Smuzhiyun */
2035*4882a593Smuzhiyun while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) {
2036*4882a593Smuzhiyun /*
2037*4882a593Smuzhiyun * RDMA_READ_RESPONSE_ONLY is a special case since
2038*4882a593Smuzhiyun * we want to generate completion events for everything
2039*4882a593Smuzhiyun * before the RDMA read, copy the data, then generate
2040*4882a593Smuzhiyun * the completion for the read.
2041*4882a593Smuzhiyun */
2042*4882a593Smuzhiyun if (wqe->wr.opcode == IB_WR_RDMA_READ &&
2043*4882a593Smuzhiyun opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
2044*4882a593Smuzhiyun diff == 0) {
2045*4882a593Smuzhiyun ret = 1;
2046*4882a593Smuzhiyun goto bail_stop;
2047*4882a593Smuzhiyun }
2048*4882a593Smuzhiyun /*
2049*4882a593Smuzhiyun * If this request is a RDMA read or atomic, and the ACK is
2050*4882a593Smuzhiyun * for a later operation, this ACK NAKs the RDMA read or
2051*4882a593Smuzhiyun * atomic. In other words, only a RDMA_READ_LAST or ONLY
2052*4882a593Smuzhiyun * can ACK a RDMA read and likewise for atomic ops. Note
2053*4882a593Smuzhiyun * that the NAK case can only happen if relaxed ordering is
2054*4882a593Smuzhiyun * used and requests are sent after an RDMA read or atomic
2055*4882a593Smuzhiyun * is sent but before the response is received.
2056*4882a593Smuzhiyun */
2057*4882a593Smuzhiyun if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
2058*4882a593Smuzhiyun (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
2059*4882a593Smuzhiyun (wqe->wr.opcode == IB_WR_TID_RDMA_READ &&
2060*4882a593Smuzhiyun (opcode != TID_OP(READ_RESP) || diff != 0)) ||
2061*4882a593Smuzhiyun ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2062*4882a593Smuzhiyun wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
2063*4882a593Smuzhiyun (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0)) ||
2064*4882a593Smuzhiyun (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
2065*4882a593Smuzhiyun (delta_psn(psn, qp->s_last_psn) != 1))) {
2066*4882a593Smuzhiyun set_restart_qp(qp, rcd);
2067*4882a593Smuzhiyun /*
2068*4882a593Smuzhiyun * No need to process the ACK/NAK since we are
2069*4882a593Smuzhiyun * restarting an earlier request.
2070*4882a593Smuzhiyun */
2071*4882a593Smuzhiyun goto bail_stop;
2072*4882a593Smuzhiyun }
2073*4882a593Smuzhiyun if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2074*4882a593Smuzhiyun wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2075*4882a593Smuzhiyun u64 *vaddr = wqe->sg_list[0].vaddr;
2076*4882a593Smuzhiyun *vaddr = val;
2077*4882a593Smuzhiyun }
2078*4882a593Smuzhiyun if (wqe->wr.opcode == IB_WR_OPFN)
2079*4882a593Smuzhiyun opfn_conn_reply(qp, val);
2080*4882a593Smuzhiyun
2081*4882a593Smuzhiyun if (qp->s_num_rd_atomic &&
2082*4882a593Smuzhiyun (wqe->wr.opcode == IB_WR_RDMA_READ ||
2083*4882a593Smuzhiyun wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2084*4882a593Smuzhiyun wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
2085*4882a593Smuzhiyun qp->s_num_rd_atomic--;
2086*4882a593Smuzhiyun /* Restart sending task if fence is complete */
2087*4882a593Smuzhiyun if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
2088*4882a593Smuzhiyun !qp->s_num_rd_atomic) {
2089*4882a593Smuzhiyun qp->s_flags &= ~(RVT_S_WAIT_FENCE |
2090*4882a593Smuzhiyun RVT_S_WAIT_ACK);
2091*4882a593Smuzhiyun hfi1_schedule_send(qp);
2092*4882a593Smuzhiyun } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
2093*4882a593Smuzhiyun qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
2094*4882a593Smuzhiyun RVT_S_WAIT_ACK);
2095*4882a593Smuzhiyun hfi1_schedule_send(qp);
2096*4882a593Smuzhiyun }
2097*4882a593Smuzhiyun }
2098*4882a593Smuzhiyun
2099*4882a593Smuzhiyun /*
2100*4882a593Smuzhiyun * TID RDMA WRITE requests will be completed by the TID RDMA
2101*4882a593Smuzhiyun * ACK packet handler (see tid_rdma.c).
2102*4882a593Smuzhiyun */
2103*4882a593Smuzhiyun if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
2104*4882a593Smuzhiyun break;
2105*4882a593Smuzhiyun
2106*4882a593Smuzhiyun wqe = do_rc_completion(qp, wqe, ibp);
2107*4882a593Smuzhiyun if (qp->s_acked == qp->s_tail)
2108*4882a593Smuzhiyun break;
2109*4882a593Smuzhiyun }
2110*4882a593Smuzhiyun
2111*4882a593Smuzhiyun trace_hfi1_rc_ack_do(qp, aeth, psn, wqe);
2112*4882a593Smuzhiyun trace_hfi1_sender_do_rc_ack(qp);
2113*4882a593Smuzhiyun switch (aeth >> IB_AETH_NAK_SHIFT) {
2114*4882a593Smuzhiyun case 0: /* ACK */
2115*4882a593Smuzhiyun this_cpu_inc(*ibp->rvp.rc_acks);
2116*4882a593Smuzhiyun if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
2117*4882a593Smuzhiyun if (wqe_to_tid_req(wqe)->ack_pending)
2118*4882a593Smuzhiyun rvt_mod_retry_timer_ext(qp,
2119*4882a593Smuzhiyun qpriv->timeout_shift);
2120*4882a593Smuzhiyun else
2121*4882a593Smuzhiyun rvt_stop_rc_timers(qp);
2122*4882a593Smuzhiyun } else if (qp->s_acked != qp->s_tail) {
2123*4882a593Smuzhiyun struct rvt_swqe *__w = NULL;
2124*4882a593Smuzhiyun
2125*4882a593Smuzhiyun if (qpriv->s_tid_cur != HFI1_QP_WQE_INVALID)
2126*4882a593Smuzhiyun __w = rvt_get_swqe_ptr(qp, qpriv->s_tid_cur);
2127*4882a593Smuzhiyun
2128*4882a593Smuzhiyun /*
2129*4882a593Smuzhiyun * Stop timers if we've received all of the TID RDMA
2130*4882a593Smuzhiyun * WRITE * responses.
2131*4882a593Smuzhiyun */
2132*4882a593Smuzhiyun if (__w && __w->wr.opcode == IB_WR_TID_RDMA_WRITE &&
2133*4882a593Smuzhiyun opcode == TID_OP(WRITE_RESP)) {
2134*4882a593Smuzhiyun /*
2135*4882a593Smuzhiyun * Normally, the loop above would correctly
2136*4882a593Smuzhiyun * process all WQEs from s_acked onward and
2137*4882a593Smuzhiyun * either complete them or check for correct
2138*4882a593Smuzhiyun * PSN sequencing.
2139*4882a593Smuzhiyun * However, for TID RDMA, due to pipelining,
2140*4882a593Smuzhiyun * the response may not be for the request at
2141*4882a593Smuzhiyun * s_acked so the above look would just be
2142*4882a593Smuzhiyun * skipped. This does not allow for checking
2143*4882a593Smuzhiyun * the PSN sequencing. It has to be done
2144*4882a593Smuzhiyun * separately.
2145*4882a593Smuzhiyun */
2146*4882a593Smuzhiyun if (cmp_psn(psn, qp->s_last_psn + 1)) {
2147*4882a593Smuzhiyun set_restart_qp(qp, rcd);
2148*4882a593Smuzhiyun goto bail_stop;
2149*4882a593Smuzhiyun }
2150*4882a593Smuzhiyun /*
2151*4882a593Smuzhiyun * If the psn is being resent, stop the
2152*4882a593Smuzhiyun * resending.
2153*4882a593Smuzhiyun */
2154*4882a593Smuzhiyun if (qp->s_cur != qp->s_tail &&
2155*4882a593Smuzhiyun cmp_psn(qp->s_psn, psn) <= 0)
2156*4882a593Smuzhiyun update_qp_retry_state(qp, psn,
2157*4882a593Smuzhiyun __w->psn,
2158*4882a593Smuzhiyun __w->lpsn);
2159*4882a593Smuzhiyun else if (--qpriv->pending_tid_w_resp)
2160*4882a593Smuzhiyun rvt_mod_retry_timer(qp);
2161*4882a593Smuzhiyun else
2162*4882a593Smuzhiyun rvt_stop_rc_timers(qp);
2163*4882a593Smuzhiyun } else {
2164*4882a593Smuzhiyun /*
2165*4882a593Smuzhiyun * We are expecting more ACKs so
2166*4882a593Smuzhiyun * mod the retry timer.
2167*4882a593Smuzhiyun */
2168*4882a593Smuzhiyun rvt_mod_retry_timer(qp);
2169*4882a593Smuzhiyun /*
2170*4882a593Smuzhiyun * We can stop re-sending the earlier packets
2171*4882a593Smuzhiyun * and continue with the next packet the
2172*4882a593Smuzhiyun * receiver wants.
2173*4882a593Smuzhiyun */
2174*4882a593Smuzhiyun if (cmp_psn(qp->s_psn, psn) <= 0)
2175*4882a593Smuzhiyun reset_psn(qp, psn + 1);
2176*4882a593Smuzhiyun }
2177*4882a593Smuzhiyun } else {
2178*4882a593Smuzhiyun /* No more acks - kill all timers */
2179*4882a593Smuzhiyun rvt_stop_rc_timers(qp);
2180*4882a593Smuzhiyun if (cmp_psn(qp->s_psn, psn) <= 0) {
2181*4882a593Smuzhiyun qp->s_state = OP(SEND_LAST);
2182*4882a593Smuzhiyun qp->s_psn = psn + 1;
2183*4882a593Smuzhiyun }
2184*4882a593Smuzhiyun }
2185*4882a593Smuzhiyun if (qp->s_flags & RVT_S_WAIT_ACK) {
2186*4882a593Smuzhiyun qp->s_flags &= ~RVT_S_WAIT_ACK;
2187*4882a593Smuzhiyun hfi1_schedule_send(qp);
2188*4882a593Smuzhiyun }
2189*4882a593Smuzhiyun rvt_get_credit(qp, aeth);
2190*4882a593Smuzhiyun qp->s_rnr_retry = qp->s_rnr_retry_cnt;
2191*4882a593Smuzhiyun qp->s_retry = qp->s_retry_cnt;
2192*4882a593Smuzhiyun /*
2193*4882a593Smuzhiyun * If the current request is a TID RDMA WRITE request and the
2194*4882a593Smuzhiyun * response is not a TID RDMA WRITE RESP packet, s_last_psn
2195*4882a593Smuzhiyun * can't be advanced.
2196*4882a593Smuzhiyun */
2197*4882a593Smuzhiyun if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
2198*4882a593Smuzhiyun opcode != TID_OP(WRITE_RESP) &&
2199*4882a593Smuzhiyun cmp_psn(psn, wqe->psn) >= 0)
2200*4882a593Smuzhiyun return 1;
2201*4882a593Smuzhiyun update_last_psn(qp, psn);
2202*4882a593Smuzhiyun return 1;
2203*4882a593Smuzhiyun
2204*4882a593Smuzhiyun case 1: /* RNR NAK */
2205*4882a593Smuzhiyun ibp->rvp.n_rnr_naks++;
2206*4882a593Smuzhiyun if (qp->s_acked == qp->s_tail)
2207*4882a593Smuzhiyun goto bail_stop;
2208*4882a593Smuzhiyun if (qp->s_flags & RVT_S_WAIT_RNR)
2209*4882a593Smuzhiyun goto bail_stop;
2210*4882a593Smuzhiyun rdi = ib_to_rvt(qp->ibqp.device);
2211*4882a593Smuzhiyun if (!(rdi->post_parms[wqe->wr.opcode].flags &
2212*4882a593Smuzhiyun RVT_OPERATION_IGN_RNR_CNT)) {
2213*4882a593Smuzhiyun if (qp->s_rnr_retry == 0) {
2214*4882a593Smuzhiyun status = IB_WC_RNR_RETRY_EXC_ERR;
2215*4882a593Smuzhiyun goto class_b;
2216*4882a593Smuzhiyun }
2217*4882a593Smuzhiyun if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0)
2218*4882a593Smuzhiyun qp->s_rnr_retry--;
2219*4882a593Smuzhiyun }
2220*4882a593Smuzhiyun
2221*4882a593Smuzhiyun /*
2222*4882a593Smuzhiyun * The last valid PSN is the previous PSN. For TID RDMA WRITE
2223*4882a593Smuzhiyun * request, s_last_psn should be incremented only when a TID
2224*4882a593Smuzhiyun * RDMA WRITE RESP is received to avoid skipping lost TID RDMA
2225*4882a593Smuzhiyun * WRITE RESP packets.
2226*4882a593Smuzhiyun */
2227*4882a593Smuzhiyun if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) {
2228*4882a593Smuzhiyun reset_psn(qp, qp->s_last_psn + 1);
2229*4882a593Smuzhiyun } else {
2230*4882a593Smuzhiyun update_last_psn(qp, psn - 1);
2231*4882a593Smuzhiyun reset_psn(qp, psn);
2232*4882a593Smuzhiyun }
2233*4882a593Smuzhiyun
2234*4882a593Smuzhiyun ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
2235*4882a593Smuzhiyun qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
2236*4882a593Smuzhiyun rvt_stop_rc_timers(qp);
2237*4882a593Smuzhiyun rvt_add_rnr_timer(qp, aeth);
2238*4882a593Smuzhiyun return 0;
2239*4882a593Smuzhiyun
2240*4882a593Smuzhiyun case 3: /* NAK */
2241*4882a593Smuzhiyun if (qp->s_acked == qp->s_tail)
2242*4882a593Smuzhiyun goto bail_stop;
2243*4882a593Smuzhiyun /* The last valid PSN is the previous PSN. */
2244*4882a593Smuzhiyun update_last_psn(qp, psn - 1);
2245*4882a593Smuzhiyun switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
2246*4882a593Smuzhiyun IB_AETH_CREDIT_MASK) {
2247*4882a593Smuzhiyun case 0: /* PSN sequence error */
2248*4882a593Smuzhiyun ibp->rvp.n_seq_naks++;
2249*4882a593Smuzhiyun /*
2250*4882a593Smuzhiyun * Back up to the responder's expected PSN.
2251*4882a593Smuzhiyun * Note that we might get a NAK in the middle of an
2252*4882a593Smuzhiyun * RDMA READ response which terminates the RDMA
2253*4882a593Smuzhiyun * READ.
2254*4882a593Smuzhiyun */
2255*4882a593Smuzhiyun hfi1_restart_rc(qp, psn, 0);
2256*4882a593Smuzhiyun hfi1_schedule_send(qp);
2257*4882a593Smuzhiyun break;
2258*4882a593Smuzhiyun
2259*4882a593Smuzhiyun case 1: /* Invalid Request */
2260*4882a593Smuzhiyun status = IB_WC_REM_INV_REQ_ERR;
2261*4882a593Smuzhiyun ibp->rvp.n_other_naks++;
2262*4882a593Smuzhiyun goto class_b;
2263*4882a593Smuzhiyun
2264*4882a593Smuzhiyun case 2: /* Remote Access Error */
2265*4882a593Smuzhiyun status = IB_WC_REM_ACCESS_ERR;
2266*4882a593Smuzhiyun ibp->rvp.n_other_naks++;
2267*4882a593Smuzhiyun goto class_b;
2268*4882a593Smuzhiyun
2269*4882a593Smuzhiyun case 3: /* Remote Operation Error */
2270*4882a593Smuzhiyun status = IB_WC_REM_OP_ERR;
2271*4882a593Smuzhiyun ibp->rvp.n_other_naks++;
2272*4882a593Smuzhiyun class_b:
2273*4882a593Smuzhiyun if (qp->s_last == qp->s_acked) {
2274*4882a593Smuzhiyun if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
2275*4882a593Smuzhiyun hfi1_kern_read_tid_flow_free(qp);
2276*4882a593Smuzhiyun
2277*4882a593Smuzhiyun hfi1_trdma_send_complete(qp, wqe, status);
2278*4882a593Smuzhiyun rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
2279*4882a593Smuzhiyun }
2280*4882a593Smuzhiyun break;
2281*4882a593Smuzhiyun
2282*4882a593Smuzhiyun default:
2283*4882a593Smuzhiyun /* Ignore other reserved NAK error codes */
2284*4882a593Smuzhiyun goto reserved;
2285*4882a593Smuzhiyun }
2286*4882a593Smuzhiyun qp->s_retry = qp->s_retry_cnt;
2287*4882a593Smuzhiyun qp->s_rnr_retry = qp->s_rnr_retry_cnt;
2288*4882a593Smuzhiyun goto bail_stop;
2289*4882a593Smuzhiyun
2290*4882a593Smuzhiyun default: /* 2: reserved */
2291*4882a593Smuzhiyun reserved:
2292*4882a593Smuzhiyun /* Ignore reserved NAK codes. */
2293*4882a593Smuzhiyun goto bail_stop;
2294*4882a593Smuzhiyun }
2295*4882a593Smuzhiyun /* cannot be reached */
2296*4882a593Smuzhiyun bail_stop:
2297*4882a593Smuzhiyun rvt_stop_rc_timers(qp);
2298*4882a593Smuzhiyun return ret;
2299*4882a593Smuzhiyun }
2300*4882a593Smuzhiyun
2301*4882a593Smuzhiyun /*
2302*4882a593Smuzhiyun * We have seen an out of sequence RDMA read middle or last packet.
2303*4882a593Smuzhiyun * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
2304*4882a593Smuzhiyun */
rdma_seq_err(struct rvt_qp * qp,struct hfi1_ibport * ibp,u32 psn,struct hfi1_ctxtdata * rcd)2305*4882a593Smuzhiyun static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
2306*4882a593Smuzhiyun struct hfi1_ctxtdata *rcd)
2307*4882a593Smuzhiyun {
2308*4882a593Smuzhiyun struct rvt_swqe *wqe;
2309*4882a593Smuzhiyun
2310*4882a593Smuzhiyun lockdep_assert_held(&qp->s_lock);
2311*4882a593Smuzhiyun /* Remove QP from retry timer */
2312*4882a593Smuzhiyun rvt_stop_rc_timers(qp);
2313*4882a593Smuzhiyun
2314*4882a593Smuzhiyun wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2315*4882a593Smuzhiyun
2316*4882a593Smuzhiyun while (cmp_psn(psn, wqe->lpsn) > 0) {
2317*4882a593Smuzhiyun if (wqe->wr.opcode == IB_WR_RDMA_READ ||
2318*4882a593Smuzhiyun wqe->wr.opcode == IB_WR_TID_RDMA_READ ||
2319*4882a593Smuzhiyun wqe->wr.opcode == IB_WR_TID_RDMA_WRITE ||
2320*4882a593Smuzhiyun wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2321*4882a593Smuzhiyun wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
2322*4882a593Smuzhiyun break;
2323*4882a593Smuzhiyun wqe = do_rc_completion(qp, wqe, ibp);
2324*4882a593Smuzhiyun }
2325*4882a593Smuzhiyun
2326*4882a593Smuzhiyun ibp->rvp.n_rdma_seq++;
2327*4882a593Smuzhiyun qp->r_flags |= RVT_R_RDMAR_SEQ;
2328*4882a593Smuzhiyun hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
2329*4882a593Smuzhiyun if (list_empty(&qp->rspwait)) {
2330*4882a593Smuzhiyun qp->r_flags |= RVT_R_RSP_SEND;
2331*4882a593Smuzhiyun rvt_get_qp(qp);
2332*4882a593Smuzhiyun list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2333*4882a593Smuzhiyun }
2334*4882a593Smuzhiyun }
2335*4882a593Smuzhiyun
2336*4882a593Smuzhiyun /**
2337*4882a593Smuzhiyun * rc_rcv_resp - process an incoming RC response packet
2338*4882a593Smuzhiyun * @packet: data packet information
2339*4882a593Smuzhiyun *
2340*4882a593Smuzhiyun * This is called from hfi1_rc_rcv() to process an incoming RC response
2341*4882a593Smuzhiyun * packet for the given QP.
2342*4882a593Smuzhiyun * Called at interrupt level.
2343*4882a593Smuzhiyun */
rc_rcv_resp(struct hfi1_packet * packet)2344*4882a593Smuzhiyun static void rc_rcv_resp(struct hfi1_packet *packet)
2345*4882a593Smuzhiyun {
2346*4882a593Smuzhiyun struct hfi1_ctxtdata *rcd = packet->rcd;
2347*4882a593Smuzhiyun void *data = packet->payload;
2348*4882a593Smuzhiyun u32 tlen = packet->tlen;
2349*4882a593Smuzhiyun struct rvt_qp *qp = packet->qp;
2350*4882a593Smuzhiyun struct hfi1_ibport *ibp;
2351*4882a593Smuzhiyun struct ib_other_headers *ohdr = packet->ohdr;
2352*4882a593Smuzhiyun struct rvt_swqe *wqe;
2353*4882a593Smuzhiyun enum ib_wc_status status;
2354*4882a593Smuzhiyun unsigned long flags;
2355*4882a593Smuzhiyun int diff;
2356*4882a593Smuzhiyun u64 val;
2357*4882a593Smuzhiyun u32 aeth;
2358*4882a593Smuzhiyun u32 psn = ib_bth_get_psn(packet->ohdr);
2359*4882a593Smuzhiyun u32 pmtu = qp->pmtu;
2360*4882a593Smuzhiyun u16 hdrsize = packet->hlen;
2361*4882a593Smuzhiyun u8 opcode = packet->opcode;
2362*4882a593Smuzhiyun u8 pad = packet->pad;
2363*4882a593Smuzhiyun u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
2364*4882a593Smuzhiyun
2365*4882a593Smuzhiyun spin_lock_irqsave(&qp->s_lock, flags);
2366*4882a593Smuzhiyun trace_hfi1_ack(qp, psn);
2367*4882a593Smuzhiyun
2368*4882a593Smuzhiyun /* Ignore invalid responses. */
2369*4882a593Smuzhiyun if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0)
2370*4882a593Smuzhiyun goto ack_done;
2371*4882a593Smuzhiyun
2372*4882a593Smuzhiyun /* Ignore duplicate responses. */
2373*4882a593Smuzhiyun diff = cmp_psn(psn, qp->s_last_psn);
2374*4882a593Smuzhiyun if (unlikely(diff <= 0)) {
2375*4882a593Smuzhiyun /* Update credits for "ghost" ACKs */
2376*4882a593Smuzhiyun if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
2377*4882a593Smuzhiyun aeth = be32_to_cpu(ohdr->u.aeth);
2378*4882a593Smuzhiyun if ((aeth >> IB_AETH_NAK_SHIFT) == 0)
2379*4882a593Smuzhiyun rvt_get_credit(qp, aeth);
2380*4882a593Smuzhiyun }
2381*4882a593Smuzhiyun goto ack_done;
2382*4882a593Smuzhiyun }
2383*4882a593Smuzhiyun
2384*4882a593Smuzhiyun /*
2385*4882a593Smuzhiyun * Skip everything other than the PSN we expect, if we are waiting
2386*4882a593Smuzhiyun * for a reply to a restarted RDMA read or atomic op.
2387*4882a593Smuzhiyun */
2388*4882a593Smuzhiyun if (qp->r_flags & RVT_R_RDMAR_SEQ) {
2389*4882a593Smuzhiyun if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
2390*4882a593Smuzhiyun goto ack_done;
2391*4882a593Smuzhiyun qp->r_flags &= ~RVT_R_RDMAR_SEQ;
2392*4882a593Smuzhiyun }
2393*4882a593Smuzhiyun
2394*4882a593Smuzhiyun if (unlikely(qp->s_acked == qp->s_tail))
2395*4882a593Smuzhiyun goto ack_done;
2396*4882a593Smuzhiyun wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2397*4882a593Smuzhiyun status = IB_WC_SUCCESS;
2398*4882a593Smuzhiyun
2399*4882a593Smuzhiyun switch (opcode) {
2400*4882a593Smuzhiyun case OP(ACKNOWLEDGE):
2401*4882a593Smuzhiyun case OP(ATOMIC_ACKNOWLEDGE):
2402*4882a593Smuzhiyun case OP(RDMA_READ_RESPONSE_FIRST):
2403*4882a593Smuzhiyun aeth = be32_to_cpu(ohdr->u.aeth);
2404*4882a593Smuzhiyun if (opcode == OP(ATOMIC_ACKNOWLEDGE))
2405*4882a593Smuzhiyun val = ib_u64_get(&ohdr->u.at.atomic_ack_eth);
2406*4882a593Smuzhiyun else
2407*4882a593Smuzhiyun val = 0;
2408*4882a593Smuzhiyun if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
2409*4882a593Smuzhiyun opcode != OP(RDMA_READ_RESPONSE_FIRST))
2410*4882a593Smuzhiyun goto ack_done;
2411*4882a593Smuzhiyun wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2412*4882a593Smuzhiyun if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
2413*4882a593Smuzhiyun goto ack_op_err;
2414*4882a593Smuzhiyun /*
2415*4882a593Smuzhiyun * If this is a response to a resent RDMA read, we
2416*4882a593Smuzhiyun * have to be careful to copy the data to the right
2417*4882a593Smuzhiyun * location.
2418*4882a593Smuzhiyun */
2419*4882a593Smuzhiyun qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
2420*4882a593Smuzhiyun wqe, psn, pmtu);
2421*4882a593Smuzhiyun goto read_middle;
2422*4882a593Smuzhiyun
2423*4882a593Smuzhiyun case OP(RDMA_READ_RESPONSE_MIDDLE):
2424*4882a593Smuzhiyun /* no AETH, no ACK */
2425*4882a593Smuzhiyun if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
2426*4882a593Smuzhiyun goto ack_seq_err;
2427*4882a593Smuzhiyun if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
2428*4882a593Smuzhiyun goto ack_op_err;
2429*4882a593Smuzhiyun read_middle:
2430*4882a593Smuzhiyun if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
2431*4882a593Smuzhiyun goto ack_len_err;
2432*4882a593Smuzhiyun if (unlikely(pmtu >= qp->s_rdma_read_len))
2433*4882a593Smuzhiyun goto ack_len_err;
2434*4882a593Smuzhiyun
2435*4882a593Smuzhiyun /*
2436*4882a593Smuzhiyun * We got a response so update the timeout.
2437*4882a593Smuzhiyun * 4.096 usec. * (1 << qp->timeout)
2438*4882a593Smuzhiyun */
2439*4882a593Smuzhiyun rvt_mod_retry_timer(qp);
2440*4882a593Smuzhiyun if (qp->s_flags & RVT_S_WAIT_ACK) {
2441*4882a593Smuzhiyun qp->s_flags &= ~RVT_S_WAIT_ACK;
2442*4882a593Smuzhiyun hfi1_schedule_send(qp);
2443*4882a593Smuzhiyun }
2444*4882a593Smuzhiyun
2445*4882a593Smuzhiyun if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
2446*4882a593Smuzhiyun qp->s_retry = qp->s_retry_cnt;
2447*4882a593Smuzhiyun
2448*4882a593Smuzhiyun /*
2449*4882a593Smuzhiyun * Update the RDMA receive state but do the copy w/o
2450*4882a593Smuzhiyun * holding the locks and blocking interrupts.
2451*4882a593Smuzhiyun */
2452*4882a593Smuzhiyun qp->s_rdma_read_len -= pmtu;
2453*4882a593Smuzhiyun update_last_psn(qp, psn);
2454*4882a593Smuzhiyun spin_unlock_irqrestore(&qp->s_lock, flags);
2455*4882a593Smuzhiyun rvt_copy_sge(qp, &qp->s_rdma_read_sge,
2456*4882a593Smuzhiyun data, pmtu, false, false);
2457*4882a593Smuzhiyun goto bail;
2458*4882a593Smuzhiyun
2459*4882a593Smuzhiyun case OP(RDMA_READ_RESPONSE_ONLY):
2460*4882a593Smuzhiyun aeth = be32_to_cpu(ohdr->u.aeth);
2461*4882a593Smuzhiyun if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
2462*4882a593Smuzhiyun goto ack_done;
2463*4882a593Smuzhiyun /*
2464*4882a593Smuzhiyun * Check that the data size is >= 0 && <= pmtu.
2465*4882a593Smuzhiyun * Remember to account for ICRC (4).
2466*4882a593Smuzhiyun */
2467*4882a593Smuzhiyun if (unlikely(tlen < (hdrsize + extra_bytes)))
2468*4882a593Smuzhiyun goto ack_len_err;
2469*4882a593Smuzhiyun /*
2470*4882a593Smuzhiyun * If this is a response to a resent RDMA read, we
2471*4882a593Smuzhiyun * have to be careful to copy the data to the right
2472*4882a593Smuzhiyun * location.
2473*4882a593Smuzhiyun */
2474*4882a593Smuzhiyun wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2475*4882a593Smuzhiyun qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
2476*4882a593Smuzhiyun wqe, psn, pmtu);
2477*4882a593Smuzhiyun goto read_last;
2478*4882a593Smuzhiyun
2479*4882a593Smuzhiyun case OP(RDMA_READ_RESPONSE_LAST):
2480*4882a593Smuzhiyun /* ACKs READ req. */
2481*4882a593Smuzhiyun if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
2482*4882a593Smuzhiyun goto ack_seq_err;
2483*4882a593Smuzhiyun if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
2484*4882a593Smuzhiyun goto ack_op_err;
2485*4882a593Smuzhiyun /*
2486*4882a593Smuzhiyun * Check that the data size is >= 1 && <= pmtu.
2487*4882a593Smuzhiyun * Remember to account for ICRC (4).
2488*4882a593Smuzhiyun */
2489*4882a593Smuzhiyun if (unlikely(tlen <= (hdrsize + extra_bytes)))
2490*4882a593Smuzhiyun goto ack_len_err;
2491*4882a593Smuzhiyun read_last:
2492*4882a593Smuzhiyun tlen -= hdrsize + extra_bytes;
2493*4882a593Smuzhiyun if (unlikely(tlen != qp->s_rdma_read_len))
2494*4882a593Smuzhiyun goto ack_len_err;
2495*4882a593Smuzhiyun aeth = be32_to_cpu(ohdr->u.aeth);
2496*4882a593Smuzhiyun rvt_copy_sge(qp, &qp->s_rdma_read_sge,
2497*4882a593Smuzhiyun data, tlen, false, false);
2498*4882a593Smuzhiyun WARN_ON(qp->s_rdma_read_sge.num_sge);
2499*4882a593Smuzhiyun (void)do_rc_ack(qp, aeth, psn,
2500*4882a593Smuzhiyun OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
2501*4882a593Smuzhiyun goto ack_done;
2502*4882a593Smuzhiyun }
2503*4882a593Smuzhiyun
2504*4882a593Smuzhiyun ack_op_err:
2505*4882a593Smuzhiyun status = IB_WC_LOC_QP_OP_ERR;
2506*4882a593Smuzhiyun goto ack_err;
2507*4882a593Smuzhiyun
2508*4882a593Smuzhiyun ack_seq_err:
2509*4882a593Smuzhiyun ibp = rcd_to_iport(rcd);
2510*4882a593Smuzhiyun rdma_seq_err(qp, ibp, psn, rcd);
2511*4882a593Smuzhiyun goto ack_done;
2512*4882a593Smuzhiyun
2513*4882a593Smuzhiyun ack_len_err:
2514*4882a593Smuzhiyun status = IB_WC_LOC_LEN_ERR;
2515*4882a593Smuzhiyun ack_err:
2516*4882a593Smuzhiyun if (qp->s_last == qp->s_acked) {
2517*4882a593Smuzhiyun rvt_send_complete(qp, wqe, status);
2518*4882a593Smuzhiyun rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
2519*4882a593Smuzhiyun }
2520*4882a593Smuzhiyun ack_done:
2521*4882a593Smuzhiyun spin_unlock_irqrestore(&qp->s_lock, flags);
2522*4882a593Smuzhiyun bail:
2523*4882a593Smuzhiyun return;
2524*4882a593Smuzhiyun }
2525*4882a593Smuzhiyun
rc_cancel_ack(struct rvt_qp * qp)2526*4882a593Smuzhiyun static inline void rc_cancel_ack(struct rvt_qp *qp)
2527*4882a593Smuzhiyun {
2528*4882a593Smuzhiyun qp->r_adefered = 0;
2529*4882a593Smuzhiyun if (list_empty(&qp->rspwait))
2530*4882a593Smuzhiyun return;
2531*4882a593Smuzhiyun list_del_init(&qp->rspwait);
2532*4882a593Smuzhiyun qp->r_flags &= ~RVT_R_RSP_NAK;
2533*4882a593Smuzhiyun rvt_put_qp(qp);
2534*4882a593Smuzhiyun }
2535*4882a593Smuzhiyun
2536*4882a593Smuzhiyun /**
2537*4882a593Smuzhiyun * rc_rcv_error - process an incoming duplicate or error RC packet
2538*4882a593Smuzhiyun * @ohdr: the other headers for this packet
2539*4882a593Smuzhiyun * @data: the packet data
2540*4882a593Smuzhiyun * @qp: the QP for this packet
2541*4882a593Smuzhiyun * @opcode: the opcode for this packet
2542*4882a593Smuzhiyun * @psn: the packet sequence number for this packet
2543*4882a593Smuzhiyun * @diff: the difference between the PSN and the expected PSN
2544*4882a593Smuzhiyun *
2545*4882a593Smuzhiyun * This is called from hfi1_rc_rcv() to process an unexpected
2546*4882a593Smuzhiyun * incoming RC packet for the given QP.
2547*4882a593Smuzhiyun * Called at interrupt level.
2548*4882a593Smuzhiyun * Return 1 if no more processing is needed; otherwise return 0 to
2549*4882a593Smuzhiyun * schedule a response to be sent.
2550*4882a593Smuzhiyun */
rc_rcv_error(struct ib_other_headers * ohdr,void * data,struct rvt_qp * qp,u32 opcode,u32 psn,int diff,struct hfi1_ctxtdata * rcd)2551*4882a593Smuzhiyun static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
2552*4882a593Smuzhiyun struct rvt_qp *qp, u32 opcode, u32 psn,
2553*4882a593Smuzhiyun int diff, struct hfi1_ctxtdata *rcd)
2554*4882a593Smuzhiyun {
2555*4882a593Smuzhiyun struct hfi1_ibport *ibp = rcd_to_iport(rcd);
2556*4882a593Smuzhiyun struct rvt_ack_entry *e;
2557*4882a593Smuzhiyun unsigned long flags;
2558*4882a593Smuzhiyun u8 prev;
2559*4882a593Smuzhiyun u8 mra; /* most recent ACK */
2560*4882a593Smuzhiyun bool old_req;
2561*4882a593Smuzhiyun
2562*4882a593Smuzhiyun trace_hfi1_rcv_error(qp, psn);
2563*4882a593Smuzhiyun if (diff > 0) {
2564*4882a593Smuzhiyun /*
2565*4882a593Smuzhiyun * Packet sequence error.
2566*4882a593Smuzhiyun * A NAK will ACK earlier sends and RDMA writes.
2567*4882a593Smuzhiyun * Don't queue the NAK if we already sent one.
2568*4882a593Smuzhiyun */
2569*4882a593Smuzhiyun if (!qp->r_nak_state) {
2570*4882a593Smuzhiyun ibp->rvp.n_rc_seqnak++;
2571*4882a593Smuzhiyun qp->r_nak_state = IB_NAK_PSN_ERROR;
2572*4882a593Smuzhiyun /* Use the expected PSN. */
2573*4882a593Smuzhiyun qp->r_ack_psn = qp->r_psn;
2574*4882a593Smuzhiyun /*
2575*4882a593Smuzhiyun * Wait to send the sequence NAK until all packets
2576*4882a593Smuzhiyun * in the receive queue have been processed.
2577*4882a593Smuzhiyun * Otherwise, we end up propagating congestion.
2578*4882a593Smuzhiyun */
2579*4882a593Smuzhiyun rc_defered_ack(rcd, qp);
2580*4882a593Smuzhiyun }
2581*4882a593Smuzhiyun goto done;
2582*4882a593Smuzhiyun }
2583*4882a593Smuzhiyun
2584*4882a593Smuzhiyun /*
2585*4882a593Smuzhiyun * Handle a duplicate request. Don't re-execute SEND, RDMA
2586*4882a593Smuzhiyun * write or atomic op. Don't NAK errors, just silently drop
2587*4882a593Smuzhiyun * the duplicate request. Note that r_sge, r_len, and
2588*4882a593Smuzhiyun * r_rcv_len may be in use so don't modify them.
2589*4882a593Smuzhiyun *
2590*4882a593Smuzhiyun * We are supposed to ACK the earliest duplicate PSN but we
2591*4882a593Smuzhiyun * can coalesce an outstanding duplicate ACK. We have to
2592*4882a593Smuzhiyun * send the earliest so that RDMA reads can be restarted at
2593*4882a593Smuzhiyun * the requester's expected PSN.
2594*4882a593Smuzhiyun *
2595*4882a593Smuzhiyun * First, find where this duplicate PSN falls within the
2596*4882a593Smuzhiyun * ACKs previously sent.
2597*4882a593Smuzhiyun * old_req is true if there is an older response that is scheduled
2598*4882a593Smuzhiyun * to be sent before sending this one.
2599*4882a593Smuzhiyun */
2600*4882a593Smuzhiyun e = NULL;
2601*4882a593Smuzhiyun old_req = true;
2602*4882a593Smuzhiyun ibp->rvp.n_rc_dupreq++;
2603*4882a593Smuzhiyun
2604*4882a593Smuzhiyun spin_lock_irqsave(&qp->s_lock, flags);
2605*4882a593Smuzhiyun
2606*4882a593Smuzhiyun e = find_prev_entry(qp, psn, &prev, &mra, &old_req);
2607*4882a593Smuzhiyun
2608*4882a593Smuzhiyun switch (opcode) {
2609*4882a593Smuzhiyun case OP(RDMA_READ_REQUEST): {
2610*4882a593Smuzhiyun struct ib_reth *reth;
2611*4882a593Smuzhiyun u32 offset;
2612*4882a593Smuzhiyun u32 len;
2613*4882a593Smuzhiyun
2614*4882a593Smuzhiyun /*
2615*4882a593Smuzhiyun * If we didn't find the RDMA read request in the ack queue,
2616*4882a593Smuzhiyun * we can ignore this request.
2617*4882a593Smuzhiyun */
2618*4882a593Smuzhiyun if (!e || e->opcode != OP(RDMA_READ_REQUEST))
2619*4882a593Smuzhiyun goto unlock_done;
2620*4882a593Smuzhiyun /* RETH comes after BTH */
2621*4882a593Smuzhiyun reth = &ohdr->u.rc.reth;
2622*4882a593Smuzhiyun /*
2623*4882a593Smuzhiyun * Address range must be a subset of the original
2624*4882a593Smuzhiyun * request and start on pmtu boundaries.
2625*4882a593Smuzhiyun * We reuse the old ack_queue slot since the requester
2626*4882a593Smuzhiyun * should not back up and request an earlier PSN for the
2627*4882a593Smuzhiyun * same request.
2628*4882a593Smuzhiyun */
2629*4882a593Smuzhiyun offset = delta_psn(psn, e->psn) * qp->pmtu;
2630*4882a593Smuzhiyun len = be32_to_cpu(reth->length);
2631*4882a593Smuzhiyun if (unlikely(offset + len != e->rdma_sge.sge_length))
2632*4882a593Smuzhiyun goto unlock_done;
2633*4882a593Smuzhiyun release_rdma_sge_mr(e);
2634*4882a593Smuzhiyun if (len != 0) {
2635*4882a593Smuzhiyun u32 rkey = be32_to_cpu(reth->rkey);
2636*4882a593Smuzhiyun u64 vaddr = get_ib_reth_vaddr(reth);
2637*4882a593Smuzhiyun int ok;
2638*4882a593Smuzhiyun
2639*4882a593Smuzhiyun ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
2640*4882a593Smuzhiyun IB_ACCESS_REMOTE_READ);
2641*4882a593Smuzhiyun if (unlikely(!ok))
2642*4882a593Smuzhiyun goto unlock_done;
2643*4882a593Smuzhiyun } else {
2644*4882a593Smuzhiyun e->rdma_sge.vaddr = NULL;
2645*4882a593Smuzhiyun e->rdma_sge.length = 0;
2646*4882a593Smuzhiyun e->rdma_sge.sge_length = 0;
2647*4882a593Smuzhiyun }
2648*4882a593Smuzhiyun e->psn = psn;
2649*4882a593Smuzhiyun if (old_req)
2650*4882a593Smuzhiyun goto unlock_done;
2651*4882a593Smuzhiyun if (qp->s_acked_ack_queue == qp->s_tail_ack_queue)
2652*4882a593Smuzhiyun qp->s_acked_ack_queue = prev;
2653*4882a593Smuzhiyun qp->s_tail_ack_queue = prev;
2654*4882a593Smuzhiyun break;
2655*4882a593Smuzhiyun }
2656*4882a593Smuzhiyun
2657*4882a593Smuzhiyun case OP(COMPARE_SWAP):
2658*4882a593Smuzhiyun case OP(FETCH_ADD): {
2659*4882a593Smuzhiyun /*
2660*4882a593Smuzhiyun * If we didn't find the atomic request in the ack queue
2661*4882a593Smuzhiyun * or the send engine is already backed up to send an
2662*4882a593Smuzhiyun * earlier entry, we can ignore this request.
2663*4882a593Smuzhiyun */
2664*4882a593Smuzhiyun if (!e || e->opcode != (u8)opcode || old_req)
2665*4882a593Smuzhiyun goto unlock_done;
2666*4882a593Smuzhiyun if (qp->s_tail_ack_queue == qp->s_acked_ack_queue)
2667*4882a593Smuzhiyun qp->s_acked_ack_queue = prev;
2668*4882a593Smuzhiyun qp->s_tail_ack_queue = prev;
2669*4882a593Smuzhiyun break;
2670*4882a593Smuzhiyun }
2671*4882a593Smuzhiyun
2672*4882a593Smuzhiyun default:
2673*4882a593Smuzhiyun /*
2674*4882a593Smuzhiyun * Ignore this operation if it doesn't request an ACK
2675*4882a593Smuzhiyun * or an earlier RDMA read or atomic is going to be resent.
2676*4882a593Smuzhiyun */
2677*4882a593Smuzhiyun if (!(psn & IB_BTH_REQ_ACK) || old_req)
2678*4882a593Smuzhiyun goto unlock_done;
2679*4882a593Smuzhiyun /*
2680*4882a593Smuzhiyun * Resend the most recent ACK if this request is
2681*4882a593Smuzhiyun * after all the previous RDMA reads and atomics.
2682*4882a593Smuzhiyun */
2683*4882a593Smuzhiyun if (mra == qp->r_head_ack_queue) {
2684*4882a593Smuzhiyun spin_unlock_irqrestore(&qp->s_lock, flags);
2685*4882a593Smuzhiyun qp->r_nak_state = 0;
2686*4882a593Smuzhiyun qp->r_ack_psn = qp->r_psn - 1;
2687*4882a593Smuzhiyun goto send_ack;
2688*4882a593Smuzhiyun }
2689*4882a593Smuzhiyun
2690*4882a593Smuzhiyun /*
2691*4882a593Smuzhiyun * Resend the RDMA read or atomic op which
2692*4882a593Smuzhiyun * ACKs this duplicate request.
2693*4882a593Smuzhiyun */
2694*4882a593Smuzhiyun if (qp->s_tail_ack_queue == qp->s_acked_ack_queue)
2695*4882a593Smuzhiyun qp->s_acked_ack_queue = mra;
2696*4882a593Smuzhiyun qp->s_tail_ack_queue = mra;
2697*4882a593Smuzhiyun break;
2698*4882a593Smuzhiyun }
2699*4882a593Smuzhiyun qp->s_ack_state = OP(ACKNOWLEDGE);
2700*4882a593Smuzhiyun qp->s_flags |= RVT_S_RESP_PENDING;
2701*4882a593Smuzhiyun qp->r_nak_state = 0;
2702*4882a593Smuzhiyun hfi1_schedule_send(qp);
2703*4882a593Smuzhiyun
2704*4882a593Smuzhiyun unlock_done:
2705*4882a593Smuzhiyun spin_unlock_irqrestore(&qp->s_lock, flags);
2706*4882a593Smuzhiyun done:
2707*4882a593Smuzhiyun return 1;
2708*4882a593Smuzhiyun
2709*4882a593Smuzhiyun send_ack:
2710*4882a593Smuzhiyun return 0;
2711*4882a593Smuzhiyun }
2712*4882a593Smuzhiyun
log_cca_event(struct hfi1_pportdata * ppd,u8 sl,u32 rlid,u32 lqpn,u32 rqpn,u8 svc_type)2713*4882a593Smuzhiyun static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid,
2714*4882a593Smuzhiyun u32 lqpn, u32 rqpn, u8 svc_type)
2715*4882a593Smuzhiyun {
2716*4882a593Smuzhiyun struct opa_hfi1_cong_log_event_internal *cc_event;
2717*4882a593Smuzhiyun unsigned long flags;
2718*4882a593Smuzhiyun
2719*4882a593Smuzhiyun if (sl >= OPA_MAX_SLS)
2720*4882a593Smuzhiyun return;
2721*4882a593Smuzhiyun
2722*4882a593Smuzhiyun spin_lock_irqsave(&ppd->cc_log_lock, flags);
2723*4882a593Smuzhiyun
2724*4882a593Smuzhiyun ppd->threshold_cong_event_map[sl / 8] |= 1 << (sl % 8);
2725*4882a593Smuzhiyun ppd->threshold_event_counter++;
2726*4882a593Smuzhiyun
2727*4882a593Smuzhiyun cc_event = &ppd->cc_events[ppd->cc_log_idx++];
2728*4882a593Smuzhiyun if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS)
2729*4882a593Smuzhiyun ppd->cc_log_idx = 0;
2730*4882a593Smuzhiyun cc_event->lqpn = lqpn & RVT_QPN_MASK;
2731*4882a593Smuzhiyun cc_event->rqpn = rqpn & RVT_QPN_MASK;
2732*4882a593Smuzhiyun cc_event->sl = sl;
2733*4882a593Smuzhiyun cc_event->svc_type = svc_type;
2734*4882a593Smuzhiyun cc_event->rlid = rlid;
2735*4882a593Smuzhiyun /* keep timestamp in units of 1.024 usec */
2736*4882a593Smuzhiyun cc_event->timestamp = ktime_get_ns() / 1024;
2737*4882a593Smuzhiyun
2738*4882a593Smuzhiyun spin_unlock_irqrestore(&ppd->cc_log_lock, flags);
2739*4882a593Smuzhiyun }
2740*4882a593Smuzhiyun
process_becn(struct hfi1_pportdata * ppd,u8 sl,u32 rlid,u32 lqpn,u32 rqpn,u8 svc_type)2741*4882a593Smuzhiyun void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn,
2742*4882a593Smuzhiyun u32 rqpn, u8 svc_type)
2743*4882a593Smuzhiyun {
2744*4882a593Smuzhiyun struct cca_timer *cca_timer;
2745*4882a593Smuzhiyun u16 ccti, ccti_incr, ccti_timer, ccti_limit;
2746*4882a593Smuzhiyun u8 trigger_threshold;
2747*4882a593Smuzhiyun struct cc_state *cc_state;
2748*4882a593Smuzhiyun unsigned long flags;
2749*4882a593Smuzhiyun
2750*4882a593Smuzhiyun if (sl >= OPA_MAX_SLS)
2751*4882a593Smuzhiyun return;
2752*4882a593Smuzhiyun
2753*4882a593Smuzhiyun cc_state = get_cc_state(ppd);
2754*4882a593Smuzhiyun
2755*4882a593Smuzhiyun if (!cc_state)
2756*4882a593Smuzhiyun return;
2757*4882a593Smuzhiyun
2758*4882a593Smuzhiyun /*
2759*4882a593Smuzhiyun * 1) increase CCTI (for this SL)
2760*4882a593Smuzhiyun * 2) select IPG (i.e., call set_link_ipg())
2761*4882a593Smuzhiyun * 3) start timer
2762*4882a593Smuzhiyun */
2763*4882a593Smuzhiyun ccti_limit = cc_state->cct.ccti_limit;
2764*4882a593Smuzhiyun ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase;
2765*4882a593Smuzhiyun ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
2766*4882a593Smuzhiyun trigger_threshold =
2767*4882a593Smuzhiyun cc_state->cong_setting.entries[sl].trigger_threshold;
2768*4882a593Smuzhiyun
2769*4882a593Smuzhiyun spin_lock_irqsave(&ppd->cca_timer_lock, flags);
2770*4882a593Smuzhiyun
2771*4882a593Smuzhiyun cca_timer = &ppd->cca_timer[sl];
2772*4882a593Smuzhiyun if (cca_timer->ccti < ccti_limit) {
2773*4882a593Smuzhiyun if (cca_timer->ccti + ccti_incr <= ccti_limit)
2774*4882a593Smuzhiyun cca_timer->ccti += ccti_incr;
2775*4882a593Smuzhiyun else
2776*4882a593Smuzhiyun cca_timer->ccti = ccti_limit;
2777*4882a593Smuzhiyun set_link_ipg(ppd);
2778*4882a593Smuzhiyun }
2779*4882a593Smuzhiyun
2780*4882a593Smuzhiyun ccti = cca_timer->ccti;
2781*4882a593Smuzhiyun
2782*4882a593Smuzhiyun if (!hrtimer_active(&cca_timer->hrtimer)) {
2783*4882a593Smuzhiyun /* ccti_timer is in units of 1.024 usec */
2784*4882a593Smuzhiyun unsigned long nsec = 1024 * ccti_timer;
2785*4882a593Smuzhiyun
2786*4882a593Smuzhiyun hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec),
2787*4882a593Smuzhiyun HRTIMER_MODE_REL_PINNED);
2788*4882a593Smuzhiyun }
2789*4882a593Smuzhiyun
2790*4882a593Smuzhiyun spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
2791*4882a593Smuzhiyun
2792*4882a593Smuzhiyun if ((trigger_threshold != 0) && (ccti >= trigger_threshold))
2793*4882a593Smuzhiyun log_cca_event(ppd, sl, rlid, lqpn, rqpn, svc_type);
2794*4882a593Smuzhiyun }
2795*4882a593Smuzhiyun
2796*4882a593Smuzhiyun /**
2797*4882a593Smuzhiyun * hfi1_rc_rcv - process an incoming RC packet
2798*4882a593Smuzhiyun * @packet: data packet information
2799*4882a593Smuzhiyun *
2800*4882a593Smuzhiyun * This is called from qp_rcv() to process an incoming RC packet
2801*4882a593Smuzhiyun * for the given QP.
2802*4882a593Smuzhiyun * May be called at interrupt level.
2803*4882a593Smuzhiyun */
hfi1_rc_rcv(struct hfi1_packet * packet)2804*4882a593Smuzhiyun void hfi1_rc_rcv(struct hfi1_packet *packet)
2805*4882a593Smuzhiyun {
2806*4882a593Smuzhiyun struct hfi1_ctxtdata *rcd = packet->rcd;
2807*4882a593Smuzhiyun void *data = packet->payload;
2808*4882a593Smuzhiyun u32 tlen = packet->tlen;
2809*4882a593Smuzhiyun struct rvt_qp *qp = packet->qp;
2810*4882a593Smuzhiyun struct hfi1_qp_priv *qpriv = qp->priv;
2811*4882a593Smuzhiyun struct hfi1_ibport *ibp = rcd_to_iport(rcd);
2812*4882a593Smuzhiyun struct ib_other_headers *ohdr = packet->ohdr;
2813*4882a593Smuzhiyun u32 opcode = packet->opcode;
2814*4882a593Smuzhiyun u32 hdrsize = packet->hlen;
2815*4882a593Smuzhiyun u32 psn = ib_bth_get_psn(packet->ohdr);
2816*4882a593Smuzhiyun u32 pad = packet->pad;
2817*4882a593Smuzhiyun struct ib_wc wc;
2818*4882a593Smuzhiyun u32 pmtu = qp->pmtu;
2819*4882a593Smuzhiyun int diff;
2820*4882a593Smuzhiyun struct ib_reth *reth;
2821*4882a593Smuzhiyun unsigned long flags;
2822*4882a593Smuzhiyun int ret;
2823*4882a593Smuzhiyun bool copy_last = false, fecn;
2824*4882a593Smuzhiyun u32 rkey;
2825*4882a593Smuzhiyun u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
2826*4882a593Smuzhiyun
2827*4882a593Smuzhiyun lockdep_assert_held(&qp->r_lock);
2828*4882a593Smuzhiyun
2829*4882a593Smuzhiyun if (hfi1_ruc_check_hdr(ibp, packet))
2830*4882a593Smuzhiyun return;
2831*4882a593Smuzhiyun
2832*4882a593Smuzhiyun fecn = process_ecn(qp, packet);
2833*4882a593Smuzhiyun opfn_trigger_conn_request(qp, be32_to_cpu(ohdr->bth[1]));
2834*4882a593Smuzhiyun
2835*4882a593Smuzhiyun /*
2836*4882a593Smuzhiyun * Process responses (ACKs) before anything else. Note that the
2837*4882a593Smuzhiyun * packet sequence number will be for something in the send work
2838*4882a593Smuzhiyun * queue rather than the expected receive packet sequence number.
2839*4882a593Smuzhiyun * In other words, this QP is the requester.
2840*4882a593Smuzhiyun */
2841*4882a593Smuzhiyun if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
2842*4882a593Smuzhiyun opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
2843*4882a593Smuzhiyun rc_rcv_resp(packet);
2844*4882a593Smuzhiyun return;
2845*4882a593Smuzhiyun }
2846*4882a593Smuzhiyun
2847*4882a593Smuzhiyun /* Compute 24 bits worth of difference. */
2848*4882a593Smuzhiyun diff = delta_psn(psn, qp->r_psn);
2849*4882a593Smuzhiyun if (unlikely(diff)) {
2850*4882a593Smuzhiyun if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
2851*4882a593Smuzhiyun return;
2852*4882a593Smuzhiyun goto send_ack;
2853*4882a593Smuzhiyun }
2854*4882a593Smuzhiyun
2855*4882a593Smuzhiyun /* Check for opcode sequence errors. */
2856*4882a593Smuzhiyun switch (qp->r_state) {
2857*4882a593Smuzhiyun case OP(SEND_FIRST):
2858*4882a593Smuzhiyun case OP(SEND_MIDDLE):
2859*4882a593Smuzhiyun if (opcode == OP(SEND_MIDDLE) ||
2860*4882a593Smuzhiyun opcode == OP(SEND_LAST) ||
2861*4882a593Smuzhiyun opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
2862*4882a593Smuzhiyun opcode == OP(SEND_LAST_WITH_INVALIDATE))
2863*4882a593Smuzhiyun break;
2864*4882a593Smuzhiyun goto nack_inv;
2865*4882a593Smuzhiyun
2866*4882a593Smuzhiyun case OP(RDMA_WRITE_FIRST):
2867*4882a593Smuzhiyun case OP(RDMA_WRITE_MIDDLE):
2868*4882a593Smuzhiyun if (opcode == OP(RDMA_WRITE_MIDDLE) ||
2869*4882a593Smuzhiyun opcode == OP(RDMA_WRITE_LAST) ||
2870*4882a593Smuzhiyun opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
2871*4882a593Smuzhiyun break;
2872*4882a593Smuzhiyun goto nack_inv;
2873*4882a593Smuzhiyun
2874*4882a593Smuzhiyun default:
2875*4882a593Smuzhiyun if (opcode == OP(SEND_MIDDLE) ||
2876*4882a593Smuzhiyun opcode == OP(SEND_LAST) ||
2877*4882a593Smuzhiyun opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
2878*4882a593Smuzhiyun opcode == OP(SEND_LAST_WITH_INVALIDATE) ||
2879*4882a593Smuzhiyun opcode == OP(RDMA_WRITE_MIDDLE) ||
2880*4882a593Smuzhiyun opcode == OP(RDMA_WRITE_LAST) ||
2881*4882a593Smuzhiyun opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
2882*4882a593Smuzhiyun goto nack_inv;
2883*4882a593Smuzhiyun /*
2884*4882a593Smuzhiyun * Note that it is up to the requester to not send a new
2885*4882a593Smuzhiyun * RDMA read or atomic operation before receiving an ACK
2886*4882a593Smuzhiyun * for the previous operation.
2887*4882a593Smuzhiyun */
2888*4882a593Smuzhiyun break;
2889*4882a593Smuzhiyun }
2890*4882a593Smuzhiyun
2891*4882a593Smuzhiyun if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
2892*4882a593Smuzhiyun rvt_comm_est(qp);
2893*4882a593Smuzhiyun
2894*4882a593Smuzhiyun /* OK, process the packet. */
2895*4882a593Smuzhiyun switch (opcode) {
2896*4882a593Smuzhiyun case OP(SEND_FIRST):
2897*4882a593Smuzhiyun ret = rvt_get_rwqe(qp, false);
2898*4882a593Smuzhiyun if (ret < 0)
2899*4882a593Smuzhiyun goto nack_op_err;
2900*4882a593Smuzhiyun if (!ret)
2901*4882a593Smuzhiyun goto rnr_nak;
2902*4882a593Smuzhiyun qp->r_rcv_len = 0;
2903*4882a593Smuzhiyun fallthrough;
2904*4882a593Smuzhiyun case OP(SEND_MIDDLE):
2905*4882a593Smuzhiyun case OP(RDMA_WRITE_MIDDLE):
2906*4882a593Smuzhiyun send_middle:
2907*4882a593Smuzhiyun /* Check for invalid length PMTU or posted rwqe len. */
2908*4882a593Smuzhiyun /*
2909*4882a593Smuzhiyun * There will be no padding for 9B packet but 16B packets
2910*4882a593Smuzhiyun * will come in with some padding since we always add
2911*4882a593Smuzhiyun * CRC and LT bytes which will need to be flit aligned
2912*4882a593Smuzhiyun */
2913*4882a593Smuzhiyun if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
2914*4882a593Smuzhiyun goto nack_inv;
2915*4882a593Smuzhiyun qp->r_rcv_len += pmtu;
2916*4882a593Smuzhiyun if (unlikely(qp->r_rcv_len > qp->r_len))
2917*4882a593Smuzhiyun goto nack_inv;
2918*4882a593Smuzhiyun rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
2919*4882a593Smuzhiyun break;
2920*4882a593Smuzhiyun
2921*4882a593Smuzhiyun case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
2922*4882a593Smuzhiyun /* consume RWQE */
2923*4882a593Smuzhiyun ret = rvt_get_rwqe(qp, true);
2924*4882a593Smuzhiyun if (ret < 0)
2925*4882a593Smuzhiyun goto nack_op_err;
2926*4882a593Smuzhiyun if (!ret)
2927*4882a593Smuzhiyun goto rnr_nak;
2928*4882a593Smuzhiyun goto send_last_imm;
2929*4882a593Smuzhiyun
2930*4882a593Smuzhiyun case OP(SEND_ONLY):
2931*4882a593Smuzhiyun case OP(SEND_ONLY_WITH_IMMEDIATE):
2932*4882a593Smuzhiyun case OP(SEND_ONLY_WITH_INVALIDATE):
2933*4882a593Smuzhiyun ret = rvt_get_rwqe(qp, false);
2934*4882a593Smuzhiyun if (ret < 0)
2935*4882a593Smuzhiyun goto nack_op_err;
2936*4882a593Smuzhiyun if (!ret)
2937*4882a593Smuzhiyun goto rnr_nak;
2938*4882a593Smuzhiyun qp->r_rcv_len = 0;
2939*4882a593Smuzhiyun if (opcode == OP(SEND_ONLY))
2940*4882a593Smuzhiyun goto no_immediate_data;
2941*4882a593Smuzhiyun if (opcode == OP(SEND_ONLY_WITH_INVALIDATE))
2942*4882a593Smuzhiyun goto send_last_inv;
2943*4882a593Smuzhiyun fallthrough; /* for SEND_ONLY_WITH_IMMEDIATE */
2944*4882a593Smuzhiyun case OP(SEND_LAST_WITH_IMMEDIATE):
2945*4882a593Smuzhiyun send_last_imm:
2946*4882a593Smuzhiyun wc.ex.imm_data = ohdr->u.imm_data;
2947*4882a593Smuzhiyun wc.wc_flags = IB_WC_WITH_IMM;
2948*4882a593Smuzhiyun goto send_last;
2949*4882a593Smuzhiyun case OP(SEND_LAST_WITH_INVALIDATE):
2950*4882a593Smuzhiyun send_last_inv:
2951*4882a593Smuzhiyun rkey = be32_to_cpu(ohdr->u.ieth);
2952*4882a593Smuzhiyun if (rvt_invalidate_rkey(qp, rkey))
2953*4882a593Smuzhiyun goto no_immediate_data;
2954*4882a593Smuzhiyun wc.ex.invalidate_rkey = rkey;
2955*4882a593Smuzhiyun wc.wc_flags = IB_WC_WITH_INVALIDATE;
2956*4882a593Smuzhiyun goto send_last;
2957*4882a593Smuzhiyun case OP(RDMA_WRITE_LAST):
2958*4882a593Smuzhiyun copy_last = rvt_is_user_qp(qp);
2959*4882a593Smuzhiyun fallthrough;
2960*4882a593Smuzhiyun case OP(SEND_LAST):
2961*4882a593Smuzhiyun no_immediate_data:
2962*4882a593Smuzhiyun wc.wc_flags = 0;
2963*4882a593Smuzhiyun wc.ex.imm_data = 0;
2964*4882a593Smuzhiyun send_last:
2965*4882a593Smuzhiyun /* Check for invalid length. */
2966*4882a593Smuzhiyun /* LAST len should be >= 1 */
2967*4882a593Smuzhiyun if (unlikely(tlen < (hdrsize + extra_bytes)))
2968*4882a593Smuzhiyun goto nack_inv;
2969*4882a593Smuzhiyun /* Don't count the CRC(and padding and LT byte for 16B). */
2970*4882a593Smuzhiyun tlen -= (hdrsize + extra_bytes);
2971*4882a593Smuzhiyun wc.byte_len = tlen + qp->r_rcv_len;
2972*4882a593Smuzhiyun if (unlikely(wc.byte_len > qp->r_len))
2973*4882a593Smuzhiyun goto nack_inv;
2974*4882a593Smuzhiyun rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, copy_last);
2975*4882a593Smuzhiyun rvt_put_ss(&qp->r_sge);
2976*4882a593Smuzhiyun qp->r_msn++;
2977*4882a593Smuzhiyun if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
2978*4882a593Smuzhiyun break;
2979*4882a593Smuzhiyun wc.wr_id = qp->r_wr_id;
2980*4882a593Smuzhiyun wc.status = IB_WC_SUCCESS;
2981*4882a593Smuzhiyun if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
2982*4882a593Smuzhiyun opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
2983*4882a593Smuzhiyun wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
2984*4882a593Smuzhiyun else
2985*4882a593Smuzhiyun wc.opcode = IB_WC_RECV;
2986*4882a593Smuzhiyun wc.qp = &qp->ibqp;
2987*4882a593Smuzhiyun wc.src_qp = qp->remote_qpn;
2988*4882a593Smuzhiyun wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
2989*4882a593Smuzhiyun /*
2990*4882a593Smuzhiyun * It seems that IB mandates the presence of an SL in a
2991*4882a593Smuzhiyun * work completion only for the UD transport (see section
2992*4882a593Smuzhiyun * 11.4.2 of IBTA Vol. 1).
2993*4882a593Smuzhiyun *
2994*4882a593Smuzhiyun * However, the way the SL is chosen below is consistent
2995*4882a593Smuzhiyun * with the way that IB/qib works and is trying avoid
2996*4882a593Smuzhiyun * introducing incompatibilities.
2997*4882a593Smuzhiyun *
2998*4882a593Smuzhiyun * See also OPA Vol. 1, section 9.7.6, and table 9-17.
2999*4882a593Smuzhiyun */
3000*4882a593Smuzhiyun wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
3001*4882a593Smuzhiyun /* zero fields that are N/A */
3002*4882a593Smuzhiyun wc.vendor_err = 0;
3003*4882a593Smuzhiyun wc.pkey_index = 0;
3004*4882a593Smuzhiyun wc.dlid_path_bits = 0;
3005*4882a593Smuzhiyun wc.port_num = 0;
3006*4882a593Smuzhiyun /* Signal completion event if the solicited bit is set. */
3007*4882a593Smuzhiyun rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
3008*4882a593Smuzhiyun break;
3009*4882a593Smuzhiyun
3010*4882a593Smuzhiyun case OP(RDMA_WRITE_ONLY):
3011*4882a593Smuzhiyun copy_last = rvt_is_user_qp(qp);
3012*4882a593Smuzhiyun fallthrough;
3013*4882a593Smuzhiyun case OP(RDMA_WRITE_FIRST):
3014*4882a593Smuzhiyun case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
3015*4882a593Smuzhiyun if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3016*4882a593Smuzhiyun goto nack_inv;
3017*4882a593Smuzhiyun /* consume RWQE */
3018*4882a593Smuzhiyun reth = &ohdr->u.rc.reth;
3019*4882a593Smuzhiyun qp->r_len = be32_to_cpu(reth->length);
3020*4882a593Smuzhiyun qp->r_rcv_len = 0;
3021*4882a593Smuzhiyun qp->r_sge.sg_list = NULL;
3022*4882a593Smuzhiyun if (qp->r_len != 0) {
3023*4882a593Smuzhiyun u32 rkey = be32_to_cpu(reth->rkey);
3024*4882a593Smuzhiyun u64 vaddr = get_ib_reth_vaddr(reth);
3025*4882a593Smuzhiyun int ok;
3026*4882a593Smuzhiyun
3027*4882a593Smuzhiyun /* Check rkey & NAK */
3028*4882a593Smuzhiyun ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
3029*4882a593Smuzhiyun rkey, IB_ACCESS_REMOTE_WRITE);
3030*4882a593Smuzhiyun if (unlikely(!ok))
3031*4882a593Smuzhiyun goto nack_acc;
3032*4882a593Smuzhiyun qp->r_sge.num_sge = 1;
3033*4882a593Smuzhiyun } else {
3034*4882a593Smuzhiyun qp->r_sge.num_sge = 0;
3035*4882a593Smuzhiyun qp->r_sge.sge.mr = NULL;
3036*4882a593Smuzhiyun qp->r_sge.sge.vaddr = NULL;
3037*4882a593Smuzhiyun qp->r_sge.sge.length = 0;
3038*4882a593Smuzhiyun qp->r_sge.sge.sge_length = 0;
3039*4882a593Smuzhiyun }
3040*4882a593Smuzhiyun if (opcode == OP(RDMA_WRITE_FIRST))
3041*4882a593Smuzhiyun goto send_middle;
3042*4882a593Smuzhiyun else if (opcode == OP(RDMA_WRITE_ONLY))
3043*4882a593Smuzhiyun goto no_immediate_data;
3044*4882a593Smuzhiyun ret = rvt_get_rwqe(qp, true);
3045*4882a593Smuzhiyun if (ret < 0)
3046*4882a593Smuzhiyun goto nack_op_err;
3047*4882a593Smuzhiyun if (!ret) {
3048*4882a593Smuzhiyun /* peer will send again */
3049*4882a593Smuzhiyun rvt_put_ss(&qp->r_sge);
3050*4882a593Smuzhiyun goto rnr_nak;
3051*4882a593Smuzhiyun }
3052*4882a593Smuzhiyun wc.ex.imm_data = ohdr->u.rc.imm_data;
3053*4882a593Smuzhiyun wc.wc_flags = IB_WC_WITH_IMM;
3054*4882a593Smuzhiyun goto send_last;
3055*4882a593Smuzhiyun
3056*4882a593Smuzhiyun case OP(RDMA_READ_REQUEST): {
3057*4882a593Smuzhiyun struct rvt_ack_entry *e;
3058*4882a593Smuzhiyun u32 len;
3059*4882a593Smuzhiyun u8 next;
3060*4882a593Smuzhiyun
3061*4882a593Smuzhiyun if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
3062*4882a593Smuzhiyun goto nack_inv;
3063*4882a593Smuzhiyun next = qp->r_head_ack_queue + 1;
3064*4882a593Smuzhiyun /* s_ack_queue is size rvt_size_atomic()+1 so use > not >= */
3065*4882a593Smuzhiyun if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
3066*4882a593Smuzhiyun next = 0;
3067*4882a593Smuzhiyun spin_lock_irqsave(&qp->s_lock, flags);
3068*4882a593Smuzhiyun if (unlikely(next == qp->s_acked_ack_queue)) {
3069*4882a593Smuzhiyun if (!qp->s_ack_queue[next].sent)
3070*4882a593Smuzhiyun goto nack_inv_unlck;
3071*4882a593Smuzhiyun update_ack_queue(qp, next);
3072*4882a593Smuzhiyun }
3073*4882a593Smuzhiyun e = &qp->s_ack_queue[qp->r_head_ack_queue];
3074*4882a593Smuzhiyun release_rdma_sge_mr(e);
3075*4882a593Smuzhiyun reth = &ohdr->u.rc.reth;
3076*4882a593Smuzhiyun len = be32_to_cpu(reth->length);
3077*4882a593Smuzhiyun if (len) {
3078*4882a593Smuzhiyun u32 rkey = be32_to_cpu(reth->rkey);
3079*4882a593Smuzhiyun u64 vaddr = get_ib_reth_vaddr(reth);
3080*4882a593Smuzhiyun int ok;
3081*4882a593Smuzhiyun
3082*4882a593Smuzhiyun /* Check rkey & NAK */
3083*4882a593Smuzhiyun ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
3084*4882a593Smuzhiyun rkey, IB_ACCESS_REMOTE_READ);
3085*4882a593Smuzhiyun if (unlikely(!ok))
3086*4882a593Smuzhiyun goto nack_acc_unlck;
3087*4882a593Smuzhiyun /*
3088*4882a593Smuzhiyun * Update the next expected PSN. We add 1 later
3089*4882a593Smuzhiyun * below, so only add the remainder here.
3090*4882a593Smuzhiyun */
3091*4882a593Smuzhiyun qp->r_psn += rvt_div_mtu(qp, len - 1);
3092*4882a593Smuzhiyun } else {
3093*4882a593Smuzhiyun e->rdma_sge.mr = NULL;
3094*4882a593Smuzhiyun e->rdma_sge.vaddr = NULL;
3095*4882a593Smuzhiyun e->rdma_sge.length = 0;
3096*4882a593Smuzhiyun e->rdma_sge.sge_length = 0;
3097*4882a593Smuzhiyun }
3098*4882a593Smuzhiyun e->opcode = opcode;
3099*4882a593Smuzhiyun e->sent = 0;
3100*4882a593Smuzhiyun e->psn = psn;
3101*4882a593Smuzhiyun e->lpsn = qp->r_psn;
3102*4882a593Smuzhiyun /*
3103*4882a593Smuzhiyun * We need to increment the MSN here instead of when we
3104*4882a593Smuzhiyun * finish sending the result since a duplicate request would
3105*4882a593Smuzhiyun * increment it more than once.
3106*4882a593Smuzhiyun */
3107*4882a593Smuzhiyun qp->r_msn++;
3108*4882a593Smuzhiyun qp->r_psn++;
3109*4882a593Smuzhiyun qp->r_state = opcode;
3110*4882a593Smuzhiyun qp->r_nak_state = 0;
3111*4882a593Smuzhiyun qp->r_head_ack_queue = next;
3112*4882a593Smuzhiyun qpriv->r_tid_alloc = qp->r_head_ack_queue;
3113*4882a593Smuzhiyun
3114*4882a593Smuzhiyun /* Schedule the send engine. */
3115*4882a593Smuzhiyun qp->s_flags |= RVT_S_RESP_PENDING;
3116*4882a593Smuzhiyun if (fecn)
3117*4882a593Smuzhiyun qp->s_flags |= RVT_S_ECN;
3118*4882a593Smuzhiyun hfi1_schedule_send(qp);
3119*4882a593Smuzhiyun
3120*4882a593Smuzhiyun spin_unlock_irqrestore(&qp->s_lock, flags);
3121*4882a593Smuzhiyun return;
3122*4882a593Smuzhiyun }
3123*4882a593Smuzhiyun
3124*4882a593Smuzhiyun case OP(COMPARE_SWAP):
3125*4882a593Smuzhiyun case OP(FETCH_ADD): {
3126*4882a593Smuzhiyun struct ib_atomic_eth *ateth = &ohdr->u.atomic_eth;
3127*4882a593Smuzhiyun u64 vaddr = get_ib_ateth_vaddr(ateth);
3128*4882a593Smuzhiyun bool opfn = opcode == OP(COMPARE_SWAP) &&
3129*4882a593Smuzhiyun vaddr == HFI1_VERBS_E_ATOMIC_VADDR;
3130*4882a593Smuzhiyun struct rvt_ack_entry *e;
3131*4882a593Smuzhiyun atomic64_t *maddr;
3132*4882a593Smuzhiyun u64 sdata;
3133*4882a593Smuzhiyun u32 rkey;
3134*4882a593Smuzhiyun u8 next;
3135*4882a593Smuzhiyun
3136*4882a593Smuzhiyun if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
3137*4882a593Smuzhiyun !opfn))
3138*4882a593Smuzhiyun goto nack_inv;
3139*4882a593Smuzhiyun next = qp->r_head_ack_queue + 1;
3140*4882a593Smuzhiyun if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
3141*4882a593Smuzhiyun next = 0;
3142*4882a593Smuzhiyun spin_lock_irqsave(&qp->s_lock, flags);
3143*4882a593Smuzhiyun if (unlikely(next == qp->s_acked_ack_queue)) {
3144*4882a593Smuzhiyun if (!qp->s_ack_queue[next].sent)
3145*4882a593Smuzhiyun goto nack_inv_unlck;
3146*4882a593Smuzhiyun update_ack_queue(qp, next);
3147*4882a593Smuzhiyun }
3148*4882a593Smuzhiyun e = &qp->s_ack_queue[qp->r_head_ack_queue];
3149*4882a593Smuzhiyun release_rdma_sge_mr(e);
3150*4882a593Smuzhiyun /* Process OPFN special virtual address */
3151*4882a593Smuzhiyun if (opfn) {
3152*4882a593Smuzhiyun opfn_conn_response(qp, e, ateth);
3153*4882a593Smuzhiyun goto ack;
3154*4882a593Smuzhiyun }
3155*4882a593Smuzhiyun if (unlikely(vaddr & (sizeof(u64) - 1)))
3156*4882a593Smuzhiyun goto nack_inv_unlck;
3157*4882a593Smuzhiyun rkey = be32_to_cpu(ateth->rkey);
3158*4882a593Smuzhiyun /* Check rkey & NAK */
3159*4882a593Smuzhiyun if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
3160*4882a593Smuzhiyun vaddr, rkey,
3161*4882a593Smuzhiyun IB_ACCESS_REMOTE_ATOMIC)))
3162*4882a593Smuzhiyun goto nack_acc_unlck;
3163*4882a593Smuzhiyun /* Perform atomic OP and save result. */
3164*4882a593Smuzhiyun maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
3165*4882a593Smuzhiyun sdata = get_ib_ateth_swap(ateth);
3166*4882a593Smuzhiyun e->atomic_data = (opcode == OP(FETCH_ADD)) ?
3167*4882a593Smuzhiyun (u64)atomic64_add_return(sdata, maddr) - sdata :
3168*4882a593Smuzhiyun (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
3169*4882a593Smuzhiyun get_ib_ateth_compare(ateth),
3170*4882a593Smuzhiyun sdata);
3171*4882a593Smuzhiyun rvt_put_mr(qp->r_sge.sge.mr);
3172*4882a593Smuzhiyun qp->r_sge.num_sge = 0;
3173*4882a593Smuzhiyun ack:
3174*4882a593Smuzhiyun e->opcode = opcode;
3175*4882a593Smuzhiyun e->sent = 0;
3176*4882a593Smuzhiyun e->psn = psn;
3177*4882a593Smuzhiyun e->lpsn = psn;
3178*4882a593Smuzhiyun qp->r_msn++;
3179*4882a593Smuzhiyun qp->r_psn++;
3180*4882a593Smuzhiyun qp->r_state = opcode;
3181*4882a593Smuzhiyun qp->r_nak_state = 0;
3182*4882a593Smuzhiyun qp->r_head_ack_queue = next;
3183*4882a593Smuzhiyun qpriv->r_tid_alloc = qp->r_head_ack_queue;
3184*4882a593Smuzhiyun
3185*4882a593Smuzhiyun /* Schedule the send engine. */
3186*4882a593Smuzhiyun qp->s_flags |= RVT_S_RESP_PENDING;
3187*4882a593Smuzhiyun if (fecn)
3188*4882a593Smuzhiyun qp->s_flags |= RVT_S_ECN;
3189*4882a593Smuzhiyun hfi1_schedule_send(qp);
3190*4882a593Smuzhiyun
3191*4882a593Smuzhiyun spin_unlock_irqrestore(&qp->s_lock, flags);
3192*4882a593Smuzhiyun return;
3193*4882a593Smuzhiyun }
3194*4882a593Smuzhiyun
3195*4882a593Smuzhiyun default:
3196*4882a593Smuzhiyun /* NAK unknown opcodes. */
3197*4882a593Smuzhiyun goto nack_inv;
3198*4882a593Smuzhiyun }
3199*4882a593Smuzhiyun qp->r_psn++;
3200*4882a593Smuzhiyun qp->r_state = opcode;
3201*4882a593Smuzhiyun qp->r_ack_psn = psn;
3202*4882a593Smuzhiyun qp->r_nak_state = 0;
3203*4882a593Smuzhiyun /* Send an ACK if requested or required. */
3204*4882a593Smuzhiyun if (psn & IB_BTH_REQ_ACK || fecn) {
3205*4882a593Smuzhiyun if (packet->numpkt == 0 || fecn ||
3206*4882a593Smuzhiyun qp->r_adefered >= HFI1_PSN_CREDIT) {
3207*4882a593Smuzhiyun rc_cancel_ack(qp);
3208*4882a593Smuzhiyun goto send_ack;
3209*4882a593Smuzhiyun }
3210*4882a593Smuzhiyun qp->r_adefered++;
3211*4882a593Smuzhiyun rc_defered_ack(rcd, qp);
3212*4882a593Smuzhiyun }
3213*4882a593Smuzhiyun return;
3214*4882a593Smuzhiyun
3215*4882a593Smuzhiyun rnr_nak:
3216*4882a593Smuzhiyun qp->r_nak_state = qp->r_min_rnr_timer | IB_RNR_NAK;
3217*4882a593Smuzhiyun qp->r_ack_psn = qp->r_psn;
3218*4882a593Smuzhiyun /* Queue RNR NAK for later */
3219*4882a593Smuzhiyun rc_defered_ack(rcd, qp);
3220*4882a593Smuzhiyun return;
3221*4882a593Smuzhiyun
3222*4882a593Smuzhiyun nack_op_err:
3223*4882a593Smuzhiyun rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
3224*4882a593Smuzhiyun qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
3225*4882a593Smuzhiyun qp->r_ack_psn = qp->r_psn;
3226*4882a593Smuzhiyun /* Queue NAK for later */
3227*4882a593Smuzhiyun rc_defered_ack(rcd, qp);
3228*4882a593Smuzhiyun return;
3229*4882a593Smuzhiyun
3230*4882a593Smuzhiyun nack_inv_unlck:
3231*4882a593Smuzhiyun spin_unlock_irqrestore(&qp->s_lock, flags);
3232*4882a593Smuzhiyun nack_inv:
3233*4882a593Smuzhiyun rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
3234*4882a593Smuzhiyun qp->r_nak_state = IB_NAK_INVALID_REQUEST;
3235*4882a593Smuzhiyun qp->r_ack_psn = qp->r_psn;
3236*4882a593Smuzhiyun /* Queue NAK for later */
3237*4882a593Smuzhiyun rc_defered_ack(rcd, qp);
3238*4882a593Smuzhiyun return;
3239*4882a593Smuzhiyun
3240*4882a593Smuzhiyun nack_acc_unlck:
3241*4882a593Smuzhiyun spin_unlock_irqrestore(&qp->s_lock, flags);
3242*4882a593Smuzhiyun nack_acc:
3243*4882a593Smuzhiyun rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
3244*4882a593Smuzhiyun qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
3245*4882a593Smuzhiyun qp->r_ack_psn = qp->r_psn;
3246*4882a593Smuzhiyun send_ack:
3247*4882a593Smuzhiyun hfi1_send_rc_ack(packet, fecn);
3248*4882a593Smuzhiyun }
3249*4882a593Smuzhiyun
hfi1_rc_hdrerr(struct hfi1_ctxtdata * rcd,struct hfi1_packet * packet,struct rvt_qp * qp)3250*4882a593Smuzhiyun void hfi1_rc_hdrerr(
3251*4882a593Smuzhiyun struct hfi1_ctxtdata *rcd,
3252*4882a593Smuzhiyun struct hfi1_packet *packet,
3253*4882a593Smuzhiyun struct rvt_qp *qp)
3254*4882a593Smuzhiyun {
3255*4882a593Smuzhiyun struct hfi1_ibport *ibp = rcd_to_iport(rcd);
3256*4882a593Smuzhiyun int diff;
3257*4882a593Smuzhiyun u32 opcode;
3258*4882a593Smuzhiyun u32 psn;
3259*4882a593Smuzhiyun
3260*4882a593Smuzhiyun if (hfi1_ruc_check_hdr(ibp, packet))
3261*4882a593Smuzhiyun return;
3262*4882a593Smuzhiyun
3263*4882a593Smuzhiyun psn = ib_bth_get_psn(packet->ohdr);
3264*4882a593Smuzhiyun opcode = ib_bth_get_opcode(packet->ohdr);
3265*4882a593Smuzhiyun
3266*4882a593Smuzhiyun /* Only deal with RDMA Writes for now */
3267*4882a593Smuzhiyun if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
3268*4882a593Smuzhiyun diff = delta_psn(psn, qp->r_psn);
3269*4882a593Smuzhiyun if (!qp->r_nak_state && diff >= 0) {
3270*4882a593Smuzhiyun ibp->rvp.n_rc_seqnak++;
3271*4882a593Smuzhiyun qp->r_nak_state = IB_NAK_PSN_ERROR;
3272*4882a593Smuzhiyun /* Use the expected PSN. */
3273*4882a593Smuzhiyun qp->r_ack_psn = qp->r_psn;
3274*4882a593Smuzhiyun /*
3275*4882a593Smuzhiyun * Wait to send the sequence
3276*4882a593Smuzhiyun * NAK until all packets
3277*4882a593Smuzhiyun * in the receive queue have
3278*4882a593Smuzhiyun * been processed.
3279*4882a593Smuzhiyun * Otherwise, we end up
3280*4882a593Smuzhiyun * propagating congestion.
3281*4882a593Smuzhiyun */
3282*4882a593Smuzhiyun rc_defered_ack(rcd, qp);
3283*4882a593Smuzhiyun } /* Out of sequence NAK */
3284*4882a593Smuzhiyun } /* QP Request NAKs */
3285*4882a593Smuzhiyun }
3286