xref: /OK3568_Linux_fs/kernel/drivers/infiniband/hw/qib/qib_verbs.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2012 - 2018 Intel Corporation.  All rights reserved.
3*4882a593Smuzhiyun  * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4*4882a593Smuzhiyun  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
7*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
8*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
9*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
10*4882a593Smuzhiyun  * OpenIB.org BSD license below:
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
13*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
14*4882a593Smuzhiyun  *     conditions are met:
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
17*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
18*4882a593Smuzhiyun  *        disclaimer.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
21*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
22*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
23*4882a593Smuzhiyun  *        provided with the distribution.
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32*4882a593Smuzhiyun  * SOFTWARE.
33*4882a593Smuzhiyun  */
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #ifndef QIB_VERBS_H
36*4882a593Smuzhiyun #define QIB_VERBS_H
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #include <linux/types.h>
39*4882a593Smuzhiyun #include <linux/spinlock.h>
40*4882a593Smuzhiyun #include <linux/kernel.h>
41*4882a593Smuzhiyun #include <linux/interrupt.h>
42*4882a593Smuzhiyun #include <linux/kref.h>
43*4882a593Smuzhiyun #include <linux/workqueue.h>
44*4882a593Smuzhiyun #include <linux/kthread.h>
45*4882a593Smuzhiyun #include <linux/completion.h>
46*4882a593Smuzhiyun #include <rdma/ib_pack.h>
47*4882a593Smuzhiyun #include <rdma/ib_user_verbs.h>
48*4882a593Smuzhiyun #include <rdma/ib_hdrs.h>
49*4882a593Smuzhiyun #include <rdma/rdmavt_qp.h>
50*4882a593Smuzhiyun #include <rdma/rdmavt_cq.h>
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun struct qib_ctxtdata;
53*4882a593Smuzhiyun struct qib_pportdata;
54*4882a593Smuzhiyun struct qib_devdata;
55*4882a593Smuzhiyun struct qib_verbs_txreq;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun #define QIB_MAX_RDMA_ATOMIC     16
58*4882a593Smuzhiyun #define QIB_GUIDS_PER_PORT	5
59*4882a593Smuzhiyun #define QIB_PSN_SHIFT		8
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun /*
62*4882a593Smuzhiyun  * Increment this value if any changes that break userspace ABI
63*4882a593Smuzhiyun  * compatibility are made.
64*4882a593Smuzhiyun  */
65*4882a593Smuzhiyun #define QIB_UVERBS_ABI_VERSION       2
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun /* IB Performance Manager status values */
68*4882a593Smuzhiyun #define IB_PMA_SAMPLE_STATUS_DONE       0x00
69*4882a593Smuzhiyun #define IB_PMA_SAMPLE_STATUS_STARTED    0x01
70*4882a593Smuzhiyun #define IB_PMA_SAMPLE_STATUS_RUNNING    0x02
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun /* Mandatory IB performance counter select values. */
73*4882a593Smuzhiyun #define IB_PMA_PORT_XMIT_DATA   cpu_to_be16(0x0001)
74*4882a593Smuzhiyun #define IB_PMA_PORT_RCV_DATA    cpu_to_be16(0x0002)
75*4882a593Smuzhiyun #define IB_PMA_PORT_XMIT_PKTS   cpu_to_be16(0x0003)
76*4882a593Smuzhiyun #define IB_PMA_PORT_RCV_PKTS    cpu_to_be16(0x0004)
77*4882a593Smuzhiyun #define IB_PMA_PORT_XMIT_WAIT   cpu_to_be16(0x0005)
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun #define QIB_VENDOR_IPG		cpu_to_be16(0xFFA0)
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun #define IB_DEFAULT_GID_PREFIX	cpu_to_be64(0xfe80000000000000ULL)
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun /* Values for set/get portinfo VLCap OperationalVLs */
84*4882a593Smuzhiyun #define IB_VL_VL0       1
85*4882a593Smuzhiyun #define IB_VL_VL0_1     2
86*4882a593Smuzhiyun #define IB_VL_VL0_3     3
87*4882a593Smuzhiyun #define IB_VL_VL0_7     4
88*4882a593Smuzhiyun #define IB_VL_VL0_14    5
89*4882a593Smuzhiyun 
qib_num_vls(int vls)90*4882a593Smuzhiyun static inline int qib_num_vls(int vls)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun 	switch (vls) {
93*4882a593Smuzhiyun 	default:
94*4882a593Smuzhiyun 	case IB_VL_VL0:
95*4882a593Smuzhiyun 		return 1;
96*4882a593Smuzhiyun 	case IB_VL_VL0_1:
97*4882a593Smuzhiyun 		return 2;
98*4882a593Smuzhiyun 	case IB_VL_VL0_3:
99*4882a593Smuzhiyun 		return 4;
100*4882a593Smuzhiyun 	case IB_VL_VL0_7:
101*4882a593Smuzhiyun 		return 8;
102*4882a593Smuzhiyun 	case IB_VL_VL0_14:
103*4882a593Smuzhiyun 		return 15;
104*4882a593Smuzhiyun 	}
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun struct qib_pio_header {
108*4882a593Smuzhiyun 	__le32 pbc[2];
109*4882a593Smuzhiyun 	struct ib_header hdr;
110*4882a593Smuzhiyun } __packed;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun /*
113*4882a593Smuzhiyun  * qib specific data structure that will be hidden from rvt after the queue pair
114*4882a593Smuzhiyun  * is made common.
115*4882a593Smuzhiyun  */
116*4882a593Smuzhiyun struct qib_qp_priv {
117*4882a593Smuzhiyun 	struct ib_header *s_hdr;        /* next packet header to send */
118*4882a593Smuzhiyun 	struct list_head iowait;        /* link for wait PIO buf */
119*4882a593Smuzhiyun 	atomic_t s_dma_busy;
120*4882a593Smuzhiyun 	struct qib_verbs_txreq *s_tx;
121*4882a593Smuzhiyun 	struct work_struct s_work;
122*4882a593Smuzhiyun 	wait_queue_head_t wait_dma;
123*4882a593Smuzhiyun 	struct rvt_qp *owner;
124*4882a593Smuzhiyun };
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun #define QIB_PSN_CREDIT  16
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun struct qib_opcode_stats {
129*4882a593Smuzhiyun 	u64 n_packets;          /* number of packets */
130*4882a593Smuzhiyun 	u64 n_bytes;            /* total number of bytes */
131*4882a593Smuzhiyun };
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun struct qib_opcode_stats_perctx {
134*4882a593Smuzhiyun 	struct qib_opcode_stats stats[128];
135*4882a593Smuzhiyun };
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun struct qib_pma_counters {
138*4882a593Smuzhiyun 	u64 n_unicast_xmit;     /* total unicast packets sent */
139*4882a593Smuzhiyun 	u64 n_unicast_rcv;      /* total unicast packets received */
140*4882a593Smuzhiyun 	u64 n_multicast_xmit;   /* total multicast packets sent */
141*4882a593Smuzhiyun 	u64 n_multicast_rcv;    /* total multicast packets received */
142*4882a593Smuzhiyun };
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun struct qib_ibport {
145*4882a593Smuzhiyun 	struct rvt_ibport rvp;
146*4882a593Smuzhiyun 	struct rvt_ah *smi_ah;
147*4882a593Smuzhiyun 	__be64 guids[QIB_GUIDS_PER_PORT	- 1];	/* writable GUIDs */
148*4882a593Smuzhiyun 	struct qib_pma_counters __percpu *pmastats;
149*4882a593Smuzhiyun 	u64 z_unicast_xmit;     /* starting count for PMA */
150*4882a593Smuzhiyun 	u64 z_unicast_rcv;      /* starting count for PMA */
151*4882a593Smuzhiyun 	u64 z_multicast_xmit;   /* starting count for PMA */
152*4882a593Smuzhiyun 	u64 z_multicast_rcv;    /* starting count for PMA */
153*4882a593Smuzhiyun 	u64 z_symbol_error_counter;             /* starting count for PMA */
154*4882a593Smuzhiyun 	u64 z_link_error_recovery_counter;      /* starting count for PMA */
155*4882a593Smuzhiyun 	u64 z_link_downed_counter;              /* starting count for PMA */
156*4882a593Smuzhiyun 	u64 z_port_rcv_errors;                  /* starting count for PMA */
157*4882a593Smuzhiyun 	u64 z_port_rcv_remphys_errors;          /* starting count for PMA */
158*4882a593Smuzhiyun 	u64 z_port_xmit_discards;               /* starting count for PMA */
159*4882a593Smuzhiyun 	u64 z_port_xmit_data;                   /* starting count for PMA */
160*4882a593Smuzhiyun 	u64 z_port_rcv_data;                    /* starting count for PMA */
161*4882a593Smuzhiyun 	u64 z_port_xmit_packets;                /* starting count for PMA */
162*4882a593Smuzhiyun 	u64 z_port_rcv_packets;                 /* starting count for PMA */
163*4882a593Smuzhiyun 	u32 z_local_link_integrity_errors;      /* starting count for PMA */
164*4882a593Smuzhiyun 	u32 z_excessive_buffer_overrun_errors;  /* starting count for PMA */
165*4882a593Smuzhiyun 	u32 z_vl15_dropped;                     /* starting count for PMA */
166*4882a593Smuzhiyun 	u8 sl_to_vl[16];
167*4882a593Smuzhiyun };
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun struct qib_ibdev {
170*4882a593Smuzhiyun 	struct rvt_dev_info rdi;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	struct list_head piowait;       /* list for wait PIO buf */
173*4882a593Smuzhiyun 	struct list_head dmawait;	/* list for wait DMA */
174*4882a593Smuzhiyun 	struct list_head txwait;        /* list for wait qib_verbs_txreq */
175*4882a593Smuzhiyun 	struct list_head memwait;       /* list for wait kernel memory */
176*4882a593Smuzhiyun 	struct list_head txreq_free;
177*4882a593Smuzhiyun 	struct timer_list mem_timer;
178*4882a593Smuzhiyun 	struct qib_pio_header *pio_hdrs;
179*4882a593Smuzhiyun 	dma_addr_t pio_hdrs_phys;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	u32 n_piowait;
182*4882a593Smuzhiyun 	u32 n_txwait;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
185*4882a593Smuzhiyun 	/* per HCA debugfs */
186*4882a593Smuzhiyun 	struct dentry *qib_ibdev_dbg;
187*4882a593Smuzhiyun #endif
188*4882a593Smuzhiyun };
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun struct qib_verbs_counters {
191*4882a593Smuzhiyun 	u64 symbol_error_counter;
192*4882a593Smuzhiyun 	u64 link_error_recovery_counter;
193*4882a593Smuzhiyun 	u64 link_downed_counter;
194*4882a593Smuzhiyun 	u64 port_rcv_errors;
195*4882a593Smuzhiyun 	u64 port_rcv_remphys_errors;
196*4882a593Smuzhiyun 	u64 port_xmit_discards;
197*4882a593Smuzhiyun 	u64 port_xmit_data;
198*4882a593Smuzhiyun 	u64 port_rcv_data;
199*4882a593Smuzhiyun 	u64 port_xmit_packets;
200*4882a593Smuzhiyun 	u64 port_rcv_packets;
201*4882a593Smuzhiyun 	u32 local_link_integrity_errors;
202*4882a593Smuzhiyun 	u32 excessive_buffer_overrun_errors;
203*4882a593Smuzhiyun 	u32 vl15_dropped;
204*4882a593Smuzhiyun };
205*4882a593Smuzhiyun 
to_idev(struct ib_device * ibdev)206*4882a593Smuzhiyun static inline struct qib_ibdev *to_idev(struct ib_device *ibdev)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	struct rvt_dev_info *rdi;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	rdi = container_of(ibdev, struct rvt_dev_info, ibdev);
211*4882a593Smuzhiyun 	return container_of(rdi, struct qib_ibdev, rdi);
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun /*
215*4882a593Smuzhiyun  * Send if not busy or waiting for I/O and either
216*4882a593Smuzhiyun  * a RC response is pending or we can process send work requests.
217*4882a593Smuzhiyun  */
qib_send_ok(struct rvt_qp * qp)218*4882a593Smuzhiyun static inline int qib_send_ok(struct rvt_qp *qp)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun 	return !(qp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT_IO)) &&
221*4882a593Smuzhiyun 		(qp->s_hdrwords || (qp->s_flags & RVT_S_RESP_PENDING) ||
222*4882a593Smuzhiyun 		 !(qp->s_flags & RVT_S_ANY_WAIT_SEND));
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun bool _qib_schedule_send(struct rvt_qp *qp);
226*4882a593Smuzhiyun bool qib_schedule_send(struct rvt_qp *qp);
227*4882a593Smuzhiyun 
qib_pkey_ok(u16 pkey1,u16 pkey2)228*4882a593Smuzhiyun static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	u16 p1 = pkey1 & 0x7FFF;
231*4882a593Smuzhiyun 	u16 p2 = pkey2 & 0x7FFF;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	/*
234*4882a593Smuzhiyun 	 * Low 15 bits must be non-zero and match, and
235*4882a593Smuzhiyun 	 * one of the two must be a full member.
236*4882a593Smuzhiyun 	 */
237*4882a593Smuzhiyun 	return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0);
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun void qib_bad_pkey(struct qib_ibport *ibp, u32 key, u32 sl,
241*4882a593Smuzhiyun 		  u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
242*4882a593Smuzhiyun void qib_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num);
243*4882a593Smuzhiyun void qib_sys_guid_chg(struct qib_ibport *ibp);
244*4882a593Smuzhiyun void qib_node_desc_chg(struct qib_ibport *ibp);
245*4882a593Smuzhiyun int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
246*4882a593Smuzhiyun 		    const struct ib_wc *in_wc, const struct ib_grh *in_grh,
247*4882a593Smuzhiyun 		    const struct ib_mad *in, struct ib_mad *out,
248*4882a593Smuzhiyun 		    size_t *out_mad_size, u16 *out_mad_pkey_index);
249*4882a593Smuzhiyun void qib_notify_create_mad_agent(struct rvt_dev_info *rdi, int port_idx);
250*4882a593Smuzhiyun void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx);
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun /*
253*4882a593Smuzhiyun  * Compare the lower 24 bits of the two values.
254*4882a593Smuzhiyun  * Returns an integer <, ==, or > than zero.
255*4882a593Smuzhiyun  */
qib_cmp24(u32 a,u32 b)256*4882a593Smuzhiyun static inline int qib_cmp24(u32 a, u32 b)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun 	return (((int) a) - ((int) b)) << 8;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
262*4882a593Smuzhiyun 			  u64 *rwords, u64 *spkts, u64 *rpkts,
263*4882a593Smuzhiyun 			  u64 *xmit_wait);
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun int qib_get_counters(struct qib_pportdata *ppd,
266*4882a593Smuzhiyun 		     struct qib_verbs_counters *cntrs);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun /*
269*4882a593Smuzhiyun  * Functions provided by qib driver for rdmavt to use
270*4882a593Smuzhiyun  */
271*4882a593Smuzhiyun unsigned qib_free_all_qps(struct rvt_dev_info *rdi);
272*4882a593Smuzhiyun void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp);
273*4882a593Smuzhiyun void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
274*4882a593Smuzhiyun void qib_notify_qp_reset(struct rvt_qp *qp);
275*4882a593Smuzhiyun int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
276*4882a593Smuzhiyun 		  enum ib_qp_type type, u8 port);
277*4882a593Smuzhiyun void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait);
278*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun void qib_qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun #endif
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun void qib_put_txreq(struct qib_verbs_txreq *tx);
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun int qib_verbs_send(struct rvt_qp *qp, struct ib_header *hdr,
291*4882a593Smuzhiyun 		   u32 hdrwords, struct rvt_sge_state *ss, u32 len);
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun void qib_uc_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
294*4882a593Smuzhiyun 		int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
297*4882a593Smuzhiyun 		int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun int qib_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr);
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun int qib_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe,
302*4882a593Smuzhiyun 		       bool *call_send);
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun void qib_rc_rnr_retry(unsigned long arg);
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr);
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun int qib_post_ud_send(struct rvt_qp *qp, const struct ib_send_wr *wr);
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
313*4882a593Smuzhiyun 		int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun void mr_rcu_callback(struct rcu_head *list);
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun void qib_migrate_qp(struct rvt_qp *qp);
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun int qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr,
320*4882a593Smuzhiyun 		      int has_grh, struct rvt_qp *qp, u32 bth0);
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
323*4882a593Smuzhiyun 		 const struct ib_global_route *grh, u32 hwords, u32 nwords);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun void qib_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
326*4882a593Smuzhiyun 			 u32 bth0, u32 bth2);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun void _qib_do_send(struct work_struct *work);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun void qib_do_send(struct rvt_qp *qp);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun void qib_send_rc_ack(struct rvt_qp *qp);
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags);
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags);
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun int qib_register_ib_device(struct qib_devdata *);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun void qib_unregister_ib_device(struct qib_devdata *);
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32);
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun void qib_ib_piobufavail(struct qib_devdata *);
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun unsigned qib_get_npkeys(struct qib_devdata *);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun unsigned qib_get_pkey(struct qib_ibport *, unsigned);
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun extern const enum ib_wc_opcode ib_qib_wc_opcode[];
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun /*
355*4882a593Smuzhiyun  * Below  HCA-independent IB PhysPortState values, returned
356*4882a593Smuzhiyun  * by the f_ibphys_portstate() routine.
357*4882a593Smuzhiyun  */
358*4882a593Smuzhiyun #define IB_PHYSPORTSTATE_SLEEP 1
359*4882a593Smuzhiyun #define IB_PHYSPORTSTATE_POLL 2
360*4882a593Smuzhiyun #define IB_PHYSPORTSTATE_DISABLED 3
361*4882a593Smuzhiyun #define IB_PHYSPORTSTATE_CFG_TRAIN 4
362*4882a593Smuzhiyun #define IB_PHYSPORTSTATE_LINKUP 5
363*4882a593Smuzhiyun #define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
364*4882a593Smuzhiyun #define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8
365*4882a593Smuzhiyun #define IB_PHYSPORTSTATE_CFG_IDLE 0xB
366*4882a593Smuzhiyun #define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC
367*4882a593Smuzhiyun #define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE
368*4882a593Smuzhiyun #define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF
369*4882a593Smuzhiyun #define IB_PHYSPORTSTATE_CFG_ENH 0x10
370*4882a593Smuzhiyun #define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun extern const int ib_rvt_state_ops[];
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun extern __be64 ib_qib_sys_image_guid;    /* in network order */
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun extern unsigned int ib_rvt_lkey_table_size;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun extern unsigned int ib_qib_max_cqes;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun extern unsigned int ib_qib_max_cqs;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun extern unsigned int ib_qib_max_qp_wrs;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun extern unsigned int ib_qib_max_qps;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun extern unsigned int ib_qib_max_sges;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun extern unsigned int ib_qib_max_mcast_grps;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun extern unsigned int ib_qib_max_mcast_qp_attached;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun extern unsigned int ib_qib_max_srqs;
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun extern unsigned int ib_qib_max_srq_sges;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun extern unsigned int ib_qib_max_srq_wrs;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun extern const u32 ib_qib_rnr_table[];
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun extern const struct rvt_operation_params qib_post_parms[];
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun #endif                          /* QIB_VERBS_H */
403