xref: /OK3568_Linux_fs/kernel/drivers/infiniband/hw/hfi1/rc.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright(c) 2018 Intel Corporation.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #ifndef HFI1_RC_H
8*4882a593Smuzhiyun #define HFI1_RC_H
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun /* cut down ridiculously long IB macro names */
11*4882a593Smuzhiyun #define OP(x) IB_OPCODE_RC_##x
12*4882a593Smuzhiyun 
update_ack_queue(struct rvt_qp * qp,unsigned int n)13*4882a593Smuzhiyun static inline void update_ack_queue(struct rvt_qp *qp, unsigned int n)
14*4882a593Smuzhiyun {
15*4882a593Smuzhiyun 	unsigned int next;
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun 	next = n + 1;
18*4882a593Smuzhiyun 	if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
19*4882a593Smuzhiyun 		next = 0;
20*4882a593Smuzhiyun 	qp->s_tail_ack_queue = next;
21*4882a593Smuzhiyun 	qp->s_acked_ack_queue = next;
22*4882a593Smuzhiyun 	qp->s_ack_state = OP(ACKNOWLEDGE);
23*4882a593Smuzhiyun }
24*4882a593Smuzhiyun 
rc_defered_ack(struct hfi1_ctxtdata * rcd,struct rvt_qp * qp)25*4882a593Smuzhiyun static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
26*4882a593Smuzhiyun 				  struct rvt_qp *qp)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	if (list_empty(&qp->rspwait)) {
29*4882a593Smuzhiyun 		qp->r_flags |= RVT_R_RSP_NAK;
30*4882a593Smuzhiyun 		rvt_get_qp(qp);
31*4882a593Smuzhiyun 		list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
32*4882a593Smuzhiyun 	}
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun 
restart_sge(struct rvt_sge_state * ss,struct rvt_swqe * wqe,u32 psn,u32 pmtu)35*4882a593Smuzhiyun static inline u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
36*4882a593Smuzhiyun 			      u32 psn, u32 pmtu)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	u32 len;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	len = delta_psn(psn, wqe->psn) * pmtu;
41*4882a593Smuzhiyun 	return rvt_restart_sge(ss, wqe, len);
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun 
release_rdma_sge_mr(struct rvt_ack_entry * e)44*4882a593Smuzhiyun static inline void release_rdma_sge_mr(struct rvt_ack_entry *e)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	if (e->rdma_sge.mr) {
47*4882a593Smuzhiyun 		rvt_put_mr(e->rdma_sge.mr);
48*4882a593Smuzhiyun 		e->rdma_sge.mr = NULL;
49*4882a593Smuzhiyun 	}
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev,
53*4882a593Smuzhiyun 				      u8 *prev_ack, bool *scheduled);
54*4882a593Smuzhiyun int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, u64 val,
55*4882a593Smuzhiyun 	      struct hfi1_ctxtdata *rcd);
56*4882a593Smuzhiyun struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, struct rvt_swqe *wqe,
57*4882a593Smuzhiyun 				  struct hfi1_ibport *ibp);
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun #endif /* HFI1_RC_H */
60