1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4*4882a593Smuzhiyun * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #ifndef RXE_LOC_H
8*4882a593Smuzhiyun #define RXE_LOC_H
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun /* rxe_av.c */
11*4882a593Smuzhiyun void rxe_init_av(struct rdma_ah_attr *attr, struct rxe_av *av);
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun int rxe_av_chk_attr(struct rxe_dev *rxe, struct rdma_ah_attr *attr);
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun void rxe_av_from_attr(u8 port_num, struct rxe_av *av,
16*4882a593Smuzhiyun struct rdma_ah_attr *attr);
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun void rxe_av_to_attr(struct rxe_av *av, struct rdma_ah_attr *attr);
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr);
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt);
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /* rxe_cq.c */
25*4882a593Smuzhiyun int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
26*4882a593Smuzhiyun int cqe, int comp_vector);
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
29*4882a593Smuzhiyun int comp_vector, struct ib_udata *udata,
30*4882a593Smuzhiyun struct rxe_create_cq_resp __user *uresp);
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe,
33*4882a593Smuzhiyun struct rxe_resize_cq_resp __user *uresp,
34*4882a593Smuzhiyun struct ib_udata *udata);
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun void rxe_cq_disable(struct rxe_cq *cq);
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun void rxe_cq_cleanup(struct rxe_pool_entry *arg);
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /* rxe_mcast.c */
43*4882a593Smuzhiyun int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
44*4882a593Smuzhiyun struct rxe_mc_grp **grp_p);
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
47*4882a593Smuzhiyun struct rxe_mc_grp *grp);
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
50*4882a593Smuzhiyun union ib_gid *mgid);
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun void rxe_drop_all_mcast_groups(struct rxe_qp *qp);
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun void rxe_mc_cleanup(struct rxe_pool_entry *arg);
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun /* rxe_mmap.c */
57*4882a593Smuzhiyun struct rxe_mmap_info {
58*4882a593Smuzhiyun struct list_head pending_mmaps;
59*4882a593Smuzhiyun struct ib_ucontext *context;
60*4882a593Smuzhiyun struct kref ref;
61*4882a593Smuzhiyun void *obj;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun struct mminfo info;
64*4882a593Smuzhiyun };
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun void rxe_mmap_release(struct kref *ref);
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev, u32 size,
69*4882a593Smuzhiyun struct ib_udata *udata, void *obj);
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /* rxe_mr.c */
74*4882a593Smuzhiyun enum copy_direction {
75*4882a593Smuzhiyun to_mem_obj,
76*4882a593Smuzhiyun from_mem_obj,
77*4882a593Smuzhiyun };
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun void rxe_mem_init_dma(struct rxe_pd *pd,
80*4882a593Smuzhiyun int access, struct rxe_mem *mem);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
83*4882a593Smuzhiyun u64 length, u64 iova, int access, struct ib_udata *udata,
84*4882a593Smuzhiyun struct rxe_mem *mr);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun int rxe_mem_init_fast(struct rxe_pd *pd,
87*4882a593Smuzhiyun int max_pages, struct rxe_mem *mem);
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr,
90*4882a593Smuzhiyun int length, enum copy_direction dir, u32 *crcp);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun int copy_data(struct rxe_pd *pd, int access,
93*4882a593Smuzhiyun struct rxe_dma_info *dma, void *addr, int length,
94*4882a593Smuzhiyun enum copy_direction dir, u32 *crcp);
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun enum lookup_type {
99*4882a593Smuzhiyun lookup_local,
100*4882a593Smuzhiyun lookup_remote,
101*4882a593Smuzhiyun };
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
104*4882a593Smuzhiyun enum lookup_type type);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length);
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun void rxe_mem_cleanup(struct rxe_pool_entry *arg);
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /* rxe_net.c */
113*4882a593Smuzhiyun void rxe_loopback(struct sk_buff *skb);
114*4882a593Smuzhiyun int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb);
115*4882a593Smuzhiyun struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
116*4882a593Smuzhiyun int paylen, struct rxe_pkt_info *pkt);
117*4882a593Smuzhiyun int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc);
118*4882a593Smuzhiyun const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num);
119*4882a593Smuzhiyun struct device *rxe_dma_device(struct rxe_dev *rxe);
120*4882a593Smuzhiyun int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid);
121*4882a593Smuzhiyun int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid);
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /* rxe_qp.c */
124*4882a593Smuzhiyun int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init);
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
127*4882a593Smuzhiyun struct ib_qp_init_attr *init,
128*4882a593Smuzhiyun struct rxe_create_qp_resp __user *uresp,
129*4882a593Smuzhiyun struct ib_pd *ibpd, struct ib_udata *udata);
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init);
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
134*4882a593Smuzhiyun struct ib_qp_attr *attr, int mask);
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr,
137*4882a593Smuzhiyun int mask, struct ib_udata *udata);
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask);
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun void rxe_qp_error(struct rxe_qp *qp);
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun void rxe_qp_destroy(struct rxe_qp *qp);
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun void rxe_qp_cleanup(struct rxe_pool_entry *arg);
146*4882a593Smuzhiyun
qp_num(struct rxe_qp * qp)147*4882a593Smuzhiyun static inline int qp_num(struct rxe_qp *qp)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun return qp->ibqp.qp_num;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
qp_type(struct rxe_qp * qp)152*4882a593Smuzhiyun static inline enum ib_qp_type qp_type(struct rxe_qp *qp)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun return qp->ibqp.qp_type;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
qp_state(struct rxe_qp * qp)157*4882a593Smuzhiyun static inline enum ib_qp_state qp_state(struct rxe_qp *qp)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun return qp->attr.qp_state;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
qp_mtu(struct rxe_qp * qp)162*4882a593Smuzhiyun static inline int qp_mtu(struct rxe_qp *qp)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
165*4882a593Smuzhiyun return qp->attr.path_mtu;
166*4882a593Smuzhiyun else
167*4882a593Smuzhiyun return IB_MTU_4096;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
rcv_wqe_size(int max_sge)170*4882a593Smuzhiyun static inline int rcv_wqe_size(int max_sge)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun return sizeof(struct rxe_recv_wqe) +
173*4882a593Smuzhiyun max_sge * sizeof(struct ib_sge);
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res);
177*4882a593Smuzhiyun
rxe_advance_resp_resource(struct rxe_qp * qp)178*4882a593Smuzhiyun static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun qp->resp.res_head++;
181*4882a593Smuzhiyun if (unlikely(qp->resp.res_head == qp->attr.max_dest_rd_atomic))
182*4882a593Smuzhiyun qp->resp.res_head = 0;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun void retransmit_timer(struct timer_list *t);
186*4882a593Smuzhiyun void rnr_nak_timer(struct timer_list *t);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /* rxe_srq.c */
189*4882a593Smuzhiyun #define IB_SRQ_INIT_MASK (~IB_SRQ_LIMIT)
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
192*4882a593Smuzhiyun struct ib_srq_attr *attr, enum ib_srq_attr_mask mask);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
195*4882a593Smuzhiyun struct ib_srq_init_attr *init, struct ib_udata *udata,
196*4882a593Smuzhiyun struct rxe_create_srq_resp __user *uresp);
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
199*4882a593Smuzhiyun struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
200*4882a593Smuzhiyun struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata);
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun void rxe_dealloc(struct ib_device *ib_dev);
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun int rxe_completer(void *arg);
205*4882a593Smuzhiyun int rxe_requester(void *arg);
206*4882a593Smuzhiyun int rxe_responder(void *arg);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb);
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb);
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb);
213*4882a593Smuzhiyun
wr_opcode_mask(int opcode,struct rxe_qp * qp)214*4882a593Smuzhiyun static inline unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun return rxe_wr_opcode_info[opcode].mask[qp->ibqp.qp_type];
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
rxe_xmit_packet(struct rxe_qp * qp,struct rxe_pkt_info * pkt,struct sk_buff * skb)219*4882a593Smuzhiyun static inline int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
220*4882a593Smuzhiyun struct sk_buff *skb)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun int err;
223*4882a593Smuzhiyun int is_request = pkt->mask & RXE_REQ_MASK;
224*4882a593Smuzhiyun struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun if ((is_request && (qp->req.state != QP_STATE_READY)) ||
227*4882a593Smuzhiyun (!is_request && (qp->resp.state != QP_STATE_READY))) {
228*4882a593Smuzhiyun pr_info("Packet dropped. QP is not in ready state\n");
229*4882a593Smuzhiyun goto drop;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun if (pkt->mask & RXE_LOOPBACK_MASK) {
233*4882a593Smuzhiyun memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt));
234*4882a593Smuzhiyun rxe_loopback(skb);
235*4882a593Smuzhiyun err = 0;
236*4882a593Smuzhiyun } else {
237*4882a593Smuzhiyun err = rxe_send(pkt, skb);
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun if (err) {
241*4882a593Smuzhiyun rxe->xmit_errors++;
242*4882a593Smuzhiyun rxe_counter_inc(rxe, RXE_CNT_SEND_ERR);
243*4882a593Smuzhiyun return err;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun if ((qp_type(qp) != IB_QPT_RC) &&
247*4882a593Smuzhiyun (pkt->mask & RXE_END_MASK)) {
248*4882a593Smuzhiyun pkt->wqe->state = wqe_state_done;
249*4882a593Smuzhiyun rxe_run_task(&qp->comp.task, 1);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS);
253*4882a593Smuzhiyun goto done;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun drop:
256*4882a593Smuzhiyun kfree_skb(skb);
257*4882a593Smuzhiyun err = 0;
258*4882a593Smuzhiyun done:
259*4882a593Smuzhiyun return err;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun #endif /* RXE_LOC_H */
263