1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright(c) 2016 - 2019 Intel Corporation.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #ifndef DEF_RDMA_VT_H
7*4882a593Smuzhiyun #define DEF_RDMA_VT_H
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun /*
10*4882a593Smuzhiyun * Structure that low level drivers will populate in order to register with the
11*4882a593Smuzhiyun * rdmavt layer.
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <linux/spinlock.h>
15*4882a593Smuzhiyun #include <linux/list.h>
16*4882a593Smuzhiyun #include <linux/hash.h>
17*4882a593Smuzhiyun #include <rdma/ib_verbs.h>
18*4882a593Smuzhiyun #include <rdma/ib_mad.h>
19*4882a593Smuzhiyun #include <rdma/rdmavt_mr.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #define RVT_MAX_PKEY_VALUES 16
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #define RVT_MAX_TRAP_LEN 100 /* Limit pending trap list */
24*4882a593Smuzhiyun #define RVT_MAX_TRAP_LISTS 5 /*((IB_NOTICE_TYPE_INFO & 0x0F) + 1)*/
25*4882a593Smuzhiyun #define RVT_TRAP_TIMEOUT 4096 /* 4.096 usec */
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun struct trap_list {
28*4882a593Smuzhiyun u32 list_len;
29*4882a593Smuzhiyun struct list_head list;
30*4882a593Smuzhiyun };
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun struct rvt_qp;
33*4882a593Smuzhiyun struct rvt_qpn_table;
34*4882a593Smuzhiyun struct rvt_ibport {
35*4882a593Smuzhiyun struct rvt_qp __rcu *qp[2];
36*4882a593Smuzhiyun struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
37*4882a593Smuzhiyun struct rb_root mcast_tree;
38*4882a593Smuzhiyun spinlock_t lock; /* protect changes in this struct */
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /* non-zero when timer is set */
41*4882a593Smuzhiyun unsigned long mkey_lease_timeout;
42*4882a593Smuzhiyun unsigned long trap_timeout;
43*4882a593Smuzhiyun __be64 gid_prefix; /* in network order */
44*4882a593Smuzhiyun __be64 mkey;
45*4882a593Smuzhiyun u64 tid;
46*4882a593Smuzhiyun u32 port_cap_flags;
47*4882a593Smuzhiyun u16 port_cap3_flags;
48*4882a593Smuzhiyun u32 pma_sample_start;
49*4882a593Smuzhiyun u32 pma_sample_interval;
50*4882a593Smuzhiyun __be16 pma_counter_select[5];
51*4882a593Smuzhiyun u16 pma_tag;
52*4882a593Smuzhiyun u16 mkey_lease_period;
53*4882a593Smuzhiyun u32 sm_lid;
54*4882a593Smuzhiyun u8 sm_sl;
55*4882a593Smuzhiyun u8 mkeyprot;
56*4882a593Smuzhiyun u8 subnet_timeout;
57*4882a593Smuzhiyun u8 vl_high_limit;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /*
60*4882a593Smuzhiyun * Driver is expected to keep these up to date. These
61*4882a593Smuzhiyun * counters are informational only and not required to be
62*4882a593Smuzhiyun * completely accurate.
63*4882a593Smuzhiyun */
64*4882a593Smuzhiyun u64 n_rc_resends;
65*4882a593Smuzhiyun u64 n_seq_naks;
66*4882a593Smuzhiyun u64 n_rdma_seq;
67*4882a593Smuzhiyun u64 n_rnr_naks;
68*4882a593Smuzhiyun u64 n_other_naks;
69*4882a593Smuzhiyun u64 n_loop_pkts;
70*4882a593Smuzhiyun u64 n_pkt_drops;
71*4882a593Smuzhiyun u64 n_vl15_dropped;
72*4882a593Smuzhiyun u64 n_rc_timeouts;
73*4882a593Smuzhiyun u64 n_dmawait;
74*4882a593Smuzhiyun u64 n_unaligned;
75*4882a593Smuzhiyun u64 n_rc_dupreq;
76*4882a593Smuzhiyun u64 n_rc_seqnak;
77*4882a593Smuzhiyun u64 n_rc_crwaits;
78*4882a593Smuzhiyun u16 pkey_violations;
79*4882a593Smuzhiyun u16 qkey_violations;
80*4882a593Smuzhiyun u16 mkey_violations;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /* Hot-path per CPU counters to avoid cacheline trading to update */
83*4882a593Smuzhiyun u64 z_rc_acks;
84*4882a593Smuzhiyun u64 z_rc_qacks;
85*4882a593Smuzhiyun u64 z_rc_delayed_comp;
86*4882a593Smuzhiyun u64 __percpu *rc_acks;
87*4882a593Smuzhiyun u64 __percpu *rc_qacks;
88*4882a593Smuzhiyun u64 __percpu *rc_delayed_comp;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun void *priv; /* driver private data */
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /*
93*4882a593Smuzhiyun * The pkey table is allocated and maintained by the driver. Drivers
94*4882a593Smuzhiyun * need to have access to this before registering with rdmav. However
95*4882a593Smuzhiyun * rdmavt will need access to it so drivers need to proviee this during
96*4882a593Smuzhiyun * the attach port API call.
97*4882a593Smuzhiyun */
98*4882a593Smuzhiyun u16 *pkey_table;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun struct rvt_ah *sm_ah;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /*
103*4882a593Smuzhiyun * Keep a list of traps that have not been repressed. They will be
104*4882a593Smuzhiyun * resent based on trap_timer.
105*4882a593Smuzhiyun */
106*4882a593Smuzhiyun struct trap_list trap_lists[RVT_MAX_TRAP_LISTS];
107*4882a593Smuzhiyun struct timer_list trap_timer;
108*4882a593Smuzhiyun };
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun #define RVT_CQN_MAX 16 /* maximum length of cq name */
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun #define RVT_SGE_COPY_MEMCPY 0
113*4882a593Smuzhiyun #define RVT_SGE_COPY_CACHELESS 1
114*4882a593Smuzhiyun #define RVT_SGE_COPY_ADAPTIVE 2
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /*
117*4882a593Smuzhiyun * Things that are driver specific, module parameters in hfi1 and qib
118*4882a593Smuzhiyun */
119*4882a593Smuzhiyun struct rvt_driver_params {
120*4882a593Smuzhiyun struct ib_device_attr props;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun /*
123*4882a593Smuzhiyun * Anything driver specific that is not covered by props
124*4882a593Smuzhiyun * For instance special module parameters. Goes here.
125*4882a593Smuzhiyun */
126*4882a593Smuzhiyun unsigned int lkey_table_size;
127*4882a593Smuzhiyun unsigned int qp_table_size;
128*4882a593Smuzhiyun unsigned int sge_copy_mode;
129*4882a593Smuzhiyun unsigned int wss_threshold;
130*4882a593Smuzhiyun unsigned int wss_clean_period;
131*4882a593Smuzhiyun int qpn_start;
132*4882a593Smuzhiyun int qpn_inc;
133*4882a593Smuzhiyun int qpn_res_start;
134*4882a593Smuzhiyun int qpn_res_end;
135*4882a593Smuzhiyun int nports;
136*4882a593Smuzhiyun int npkeys;
137*4882a593Smuzhiyun int node;
138*4882a593Smuzhiyun int psn_mask;
139*4882a593Smuzhiyun int psn_shift;
140*4882a593Smuzhiyun int psn_modify_mask;
141*4882a593Smuzhiyun u32 core_cap_flags;
142*4882a593Smuzhiyun u32 max_mad_size;
143*4882a593Smuzhiyun u8 qos_shift;
144*4882a593Smuzhiyun u8 max_rdma_atomic;
145*4882a593Smuzhiyun u8 extra_rdma_atomic;
146*4882a593Smuzhiyun u8 reserved_operations;
147*4882a593Smuzhiyun };
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /* User context */
150*4882a593Smuzhiyun struct rvt_ucontext {
151*4882a593Smuzhiyun struct ib_ucontext ibucontext;
152*4882a593Smuzhiyun };
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /* Protection domain */
155*4882a593Smuzhiyun struct rvt_pd {
156*4882a593Smuzhiyun struct ib_pd ibpd;
157*4882a593Smuzhiyun bool user;
158*4882a593Smuzhiyun };
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /* Address handle */
161*4882a593Smuzhiyun struct rvt_ah {
162*4882a593Smuzhiyun struct ib_ah ibah;
163*4882a593Smuzhiyun struct rdma_ah_attr attr;
164*4882a593Smuzhiyun u8 vl;
165*4882a593Smuzhiyun u8 log_pmtu;
166*4882a593Smuzhiyun };
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /*
169*4882a593Smuzhiyun * This structure is used by rvt_mmap() to validate an offset
170*4882a593Smuzhiyun * when an mmap() request is made. The vm_area_struct then uses
171*4882a593Smuzhiyun * this as its vm_private_data.
172*4882a593Smuzhiyun */
173*4882a593Smuzhiyun struct rvt_mmap_info {
174*4882a593Smuzhiyun struct list_head pending_mmaps;
175*4882a593Smuzhiyun struct ib_ucontext *context;
176*4882a593Smuzhiyun void *obj;
177*4882a593Smuzhiyun __u64 offset;
178*4882a593Smuzhiyun struct kref ref;
179*4882a593Smuzhiyun u32 size;
180*4882a593Smuzhiyun };
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun /* memory working set size */
183*4882a593Smuzhiyun struct rvt_wss {
184*4882a593Smuzhiyun unsigned long *entries;
185*4882a593Smuzhiyun atomic_t total_count;
186*4882a593Smuzhiyun atomic_t clean_counter;
187*4882a593Smuzhiyun atomic_t clean_entry;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun int threshold;
190*4882a593Smuzhiyun int num_entries;
191*4882a593Smuzhiyun long pages_mask;
192*4882a593Smuzhiyun unsigned int clean_period;
193*4882a593Smuzhiyun };
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun struct rvt_dev_info;
196*4882a593Smuzhiyun struct rvt_swqe;
197*4882a593Smuzhiyun struct rvt_driver_provided {
198*4882a593Smuzhiyun /*
199*4882a593Smuzhiyun * Which functions are required depends on which verbs rdmavt is
200*4882a593Smuzhiyun * providing and which verbs the driver is overriding. See
201*4882a593Smuzhiyun * check_support() for details.
202*4882a593Smuzhiyun */
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun /* hot path calldowns in a single cacheline */
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun /*
207*4882a593Smuzhiyun * Give the driver a notice that there is send work to do. It is up to
208*4882a593Smuzhiyun * the driver to generally push the packets out, this just queues the
209*4882a593Smuzhiyun * work with the driver. There are two variants here. The no_lock
210*4882a593Smuzhiyun * version requires the s_lock not to be held. The other assumes the
211*4882a593Smuzhiyun * s_lock is held.
212*4882a593Smuzhiyun */
213*4882a593Smuzhiyun bool (*schedule_send)(struct rvt_qp *qp);
214*4882a593Smuzhiyun bool (*schedule_send_no_lock)(struct rvt_qp *qp);
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /*
217*4882a593Smuzhiyun * Driver specific work request setup and checking.
218*4882a593Smuzhiyun * This function is allowed to perform any setup, checks, or
219*4882a593Smuzhiyun * adjustments required to the SWQE in order to be usable by
220*4882a593Smuzhiyun * underlying protocols. This includes private data structure
221*4882a593Smuzhiyun * allocations.
222*4882a593Smuzhiyun */
223*4882a593Smuzhiyun int (*setup_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe,
224*4882a593Smuzhiyun bool *call_send);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /*
227*4882a593Smuzhiyun * Sometimes rdmavt needs to kick the driver's send progress. That is
228*4882a593Smuzhiyun * done by this call back.
229*4882a593Smuzhiyun */
230*4882a593Smuzhiyun void (*do_send)(struct rvt_qp *qp);
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /*
233*4882a593Smuzhiyun * Returns a pointer to the undelying hardware's PCI device. This is
234*4882a593Smuzhiyun * used to display information as to what hardware is being referenced
235*4882a593Smuzhiyun * in an output message
236*4882a593Smuzhiyun */
237*4882a593Smuzhiyun struct pci_dev * (*get_pci_dev)(struct rvt_dev_info *rdi);
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /*
240*4882a593Smuzhiyun * Allocate a private queue pair data structure for driver specific
241*4882a593Smuzhiyun * information which is opaque to rdmavt. Errors are returned via
242*4882a593Smuzhiyun * ERR_PTR(err). The driver is free to return NULL or a valid
243*4882a593Smuzhiyun * pointer.
244*4882a593Smuzhiyun */
245*4882a593Smuzhiyun void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun /*
248*4882a593Smuzhiyun * Init a struture allocated with qp_priv_alloc(). This should be
249*4882a593Smuzhiyun * called after all qp fields have been initialized in rdmavt.
250*4882a593Smuzhiyun */
251*4882a593Smuzhiyun int (*qp_priv_init)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
252*4882a593Smuzhiyun struct ib_qp_init_attr *init_attr);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /*
255*4882a593Smuzhiyun * Free the driver's private qp structure.
256*4882a593Smuzhiyun */
257*4882a593Smuzhiyun void (*qp_priv_free)(struct rvt_dev_info *rdi, struct rvt_qp *qp);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun /*
260*4882a593Smuzhiyun * Inform the driver the particular qp in quesiton has been reset so
261*4882a593Smuzhiyun * that it can clean up anything it needs to.
262*4882a593Smuzhiyun */
263*4882a593Smuzhiyun void (*notify_qp_reset)(struct rvt_qp *qp);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /*
266*4882a593Smuzhiyun * Get a path mtu from the driver based on qp attributes.
267*4882a593Smuzhiyun */
268*4882a593Smuzhiyun int (*get_pmtu_from_attr)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
269*4882a593Smuzhiyun struct ib_qp_attr *attr);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /*
272*4882a593Smuzhiyun * Notify driver that it needs to flush any outstanding IO requests that
273*4882a593Smuzhiyun * are waiting on a qp.
274*4882a593Smuzhiyun */
275*4882a593Smuzhiyun void (*flush_qp_waiters)(struct rvt_qp *qp);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun /*
278*4882a593Smuzhiyun * Notify driver to stop its queue of sending packets. Nothing else
279*4882a593Smuzhiyun * should be posted to the queue pair after this has been called.
280*4882a593Smuzhiyun */
281*4882a593Smuzhiyun void (*stop_send_queue)(struct rvt_qp *qp);
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun /*
284*4882a593Smuzhiyun * Have the drivr drain any in progress operations
285*4882a593Smuzhiyun */
286*4882a593Smuzhiyun void (*quiesce_qp)(struct rvt_qp *qp);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun /*
289*4882a593Smuzhiyun * Inform the driver a qp has went to error state.
290*4882a593Smuzhiyun */
291*4882a593Smuzhiyun void (*notify_error_qp)(struct rvt_qp *qp);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun /*
294*4882a593Smuzhiyun * Get an MTU for a qp.
295*4882a593Smuzhiyun */
296*4882a593Smuzhiyun u32 (*mtu_from_qp)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
297*4882a593Smuzhiyun u32 pmtu);
298*4882a593Smuzhiyun /*
299*4882a593Smuzhiyun * Convert an mtu to a path mtu
300*4882a593Smuzhiyun */
301*4882a593Smuzhiyun int (*mtu_to_path_mtu)(u32 mtu);
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun /*
304*4882a593Smuzhiyun * Get the guid of a port in big endian byte order
305*4882a593Smuzhiyun */
306*4882a593Smuzhiyun int (*get_guid_be)(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
307*4882a593Smuzhiyun int guid_index, __be64 *guid);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /*
310*4882a593Smuzhiyun * Query driver for the state of the port.
311*4882a593Smuzhiyun */
312*4882a593Smuzhiyun int (*query_port_state)(struct rvt_dev_info *rdi, u8 port_num,
313*4882a593Smuzhiyun struct ib_port_attr *props);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun /*
316*4882a593Smuzhiyun * Tell driver to shutdown a port
317*4882a593Smuzhiyun */
318*4882a593Smuzhiyun int (*shut_down_port)(struct rvt_dev_info *rdi, u8 port_num);
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /* Tell driver to send a trap for changed port capabilities */
321*4882a593Smuzhiyun void (*cap_mask_chg)(struct rvt_dev_info *rdi, u8 port_num);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /*
324*4882a593Smuzhiyun * The following functions can be safely ignored completely. Any use of
325*4882a593Smuzhiyun * these is checked for NULL before blindly calling. Rdmavt should also
326*4882a593Smuzhiyun * be functional if drivers omit these.
327*4882a593Smuzhiyun */
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /* Called to inform the driver that all qps should now be freed. */
330*4882a593Smuzhiyun unsigned (*free_all_qps)(struct rvt_dev_info *rdi);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun /* Driver specific AH validation */
333*4882a593Smuzhiyun int (*check_ah)(struct ib_device *, struct rdma_ah_attr *);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun /* Inform the driver a new AH has been created */
336*4882a593Smuzhiyun void (*notify_new_ah)(struct ib_device *, struct rdma_ah_attr *,
337*4882a593Smuzhiyun struct rvt_ah *);
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun /* Let the driver pick the next queue pair number*/
340*4882a593Smuzhiyun int (*alloc_qpn)(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
341*4882a593Smuzhiyun enum ib_qp_type type, u8 port_num);
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun /* Determine if its safe or allowed to modify the qp */
344*4882a593Smuzhiyun int (*check_modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr,
345*4882a593Smuzhiyun int attr_mask, struct ib_udata *udata);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun /* Driver specific QP modification/notification-of */
348*4882a593Smuzhiyun void (*modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr,
349*4882a593Smuzhiyun int attr_mask, struct ib_udata *udata);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun /* Notify driver a mad agent has been created */
352*4882a593Smuzhiyun void (*notify_create_mad_agent)(struct rvt_dev_info *rdi, int port_idx);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun /* Notify driver a mad agent has been removed */
355*4882a593Smuzhiyun void (*notify_free_mad_agent)(struct rvt_dev_info *rdi, int port_idx);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun /* Notify driver to restart rc */
358*4882a593Smuzhiyun void (*notify_restart_rc)(struct rvt_qp *qp, u32 psn, int wait);
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun /* Get and return CPU to pin CQ processing thread */
361*4882a593Smuzhiyun int (*comp_vect_cpu_lookup)(struct rvt_dev_info *rdi, int comp_vect);
362*4882a593Smuzhiyun };
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun struct rvt_dev_info {
365*4882a593Smuzhiyun struct ib_device ibdev; /* Keep this first. Nothing above here */
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /*
368*4882a593Smuzhiyun * Prior to calling for registration the driver will be responsible for
369*4882a593Smuzhiyun * allocating space for this structure.
370*4882a593Smuzhiyun *
371*4882a593Smuzhiyun * The driver will also be responsible for filling in certain members of
372*4882a593Smuzhiyun * dparms.props. The driver needs to fill in dparms exactly as it would
373*4882a593Smuzhiyun * want values reported to a ULP. This will be returned to the caller
374*4882a593Smuzhiyun * in rdmavt's device. The driver should also therefore refrain from
375*4882a593Smuzhiyun * modifying this directly after registration with rdmavt.
376*4882a593Smuzhiyun */
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun /* Driver specific properties */
379*4882a593Smuzhiyun struct rvt_driver_params dparms;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun /* post send table */
382*4882a593Smuzhiyun const struct rvt_operation_params *post_parms;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun /* opcode translation table */
385*4882a593Smuzhiyun const enum ib_wc_opcode *wc_opcode;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun /* Driver specific helper functions */
388*4882a593Smuzhiyun struct rvt_driver_provided driver_f;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun struct rvt_mregion __rcu *dma_mr;
391*4882a593Smuzhiyun struct rvt_lkey_table lkey_table;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun /* Internal use */
394*4882a593Smuzhiyun int n_pds_allocated;
395*4882a593Smuzhiyun spinlock_t n_pds_lock; /* Protect pd allocated count */
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun int n_ahs_allocated;
398*4882a593Smuzhiyun spinlock_t n_ahs_lock; /* Protect ah allocated count */
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun u32 n_srqs_allocated;
401*4882a593Smuzhiyun spinlock_t n_srqs_lock; /* Protect srqs allocated count */
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun int flags;
404*4882a593Smuzhiyun struct rvt_ibport **ports;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /* QP */
407*4882a593Smuzhiyun struct rvt_qp_ibdev *qp_dev;
408*4882a593Smuzhiyun u32 n_qps_allocated; /* number of QPs allocated for device */
409*4882a593Smuzhiyun u32 n_rc_qps; /* number of RC QPs allocated for device */
410*4882a593Smuzhiyun u32 busy_jiffies; /* timeout scaling based on RC QP count */
411*4882a593Smuzhiyun spinlock_t n_qps_lock; /* protect qps, rc qps and busy jiffy counts */
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun /* memory maps */
414*4882a593Smuzhiyun struct list_head pending_mmaps;
415*4882a593Smuzhiyun spinlock_t mmap_offset_lock; /* protect mmap_offset */
416*4882a593Smuzhiyun u32 mmap_offset;
417*4882a593Smuzhiyun spinlock_t pending_lock; /* protect pending mmap list */
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /* CQ */
420*4882a593Smuzhiyun u32 n_cqs_allocated; /* number of CQs allocated for device */
421*4882a593Smuzhiyun spinlock_t n_cqs_lock; /* protect count of in use cqs */
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun /* Multicast */
424*4882a593Smuzhiyun u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
425*4882a593Smuzhiyun spinlock_t n_mcast_grps_lock;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun /* Memory Working Set Size */
428*4882a593Smuzhiyun struct rvt_wss *wss;
429*4882a593Smuzhiyun };
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun /**
432*4882a593Smuzhiyun * rvt_set_ibdev_name - Craft an IB device name from client info
433*4882a593Smuzhiyun * @rdi: pointer to the client rvt_dev_info structure
434*4882a593Smuzhiyun * @name: client specific name
435*4882a593Smuzhiyun * @unit: client specific unit number.
436*4882a593Smuzhiyun */
rvt_set_ibdev_name(struct rvt_dev_info * rdi,const char * fmt,const char * name,const int unit)437*4882a593Smuzhiyun static inline void rvt_set_ibdev_name(struct rvt_dev_info *rdi,
438*4882a593Smuzhiyun const char *fmt, const char *name,
439*4882a593Smuzhiyun const int unit)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun /*
442*4882a593Smuzhiyun * FIXME: rvt and its users want to touch the ibdev before
443*4882a593Smuzhiyun * registration and have things like the name work. We don't have the
444*4882a593Smuzhiyun * infrastructure in the core to support this directly today, hack it
445*4882a593Smuzhiyun * to work by setting the name manually here.
446*4882a593Smuzhiyun */
447*4882a593Smuzhiyun dev_set_name(&rdi->ibdev.dev, fmt, name, unit);
448*4882a593Smuzhiyun strlcpy(rdi->ibdev.name, dev_name(&rdi->ibdev.dev), IB_DEVICE_NAME_MAX);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun /**
452*4882a593Smuzhiyun * rvt_get_ibdev_name - return the IB name
453*4882a593Smuzhiyun * @rdi: rdmavt device
454*4882a593Smuzhiyun *
455*4882a593Smuzhiyun * Return the registered name of the device.
456*4882a593Smuzhiyun */
rvt_get_ibdev_name(const struct rvt_dev_info * rdi)457*4882a593Smuzhiyun static inline const char *rvt_get_ibdev_name(const struct rvt_dev_info *rdi)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun return dev_name(&rdi->ibdev.dev);
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
ibpd_to_rvtpd(struct ib_pd * ibpd)462*4882a593Smuzhiyun static inline struct rvt_pd *ibpd_to_rvtpd(struct ib_pd *ibpd)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun return container_of(ibpd, struct rvt_pd, ibpd);
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
ibah_to_rvtah(struct ib_ah * ibah)467*4882a593Smuzhiyun static inline struct rvt_ah *ibah_to_rvtah(struct ib_ah *ibah)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun return container_of(ibah, struct rvt_ah, ibah);
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
ib_to_rvt(struct ib_device * ibdev)472*4882a593Smuzhiyun static inline struct rvt_dev_info *ib_to_rvt(struct ib_device *ibdev)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun return container_of(ibdev, struct rvt_dev_info, ibdev);
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun
rvt_get_npkeys(struct rvt_dev_info * rdi)477*4882a593Smuzhiyun static inline unsigned rvt_get_npkeys(struct rvt_dev_info *rdi)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun /*
480*4882a593Smuzhiyun * All ports have same number of pkeys.
481*4882a593Smuzhiyun */
482*4882a593Smuzhiyun return rdi->dparms.npkeys;
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun /*
486*4882a593Smuzhiyun * Return the max atomic suitable for determining
487*4882a593Smuzhiyun * the size of the ack ring buffer in a QP.
488*4882a593Smuzhiyun */
rvt_max_atomic(struct rvt_dev_info * rdi)489*4882a593Smuzhiyun static inline unsigned int rvt_max_atomic(struct rvt_dev_info *rdi)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun return rdi->dparms.max_rdma_atomic +
492*4882a593Smuzhiyun rdi->dparms.extra_rdma_atomic + 1;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
rvt_size_atomic(struct rvt_dev_info * rdi)495*4882a593Smuzhiyun static inline unsigned int rvt_size_atomic(struct rvt_dev_info *rdi)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun return rdi->dparms.max_rdma_atomic +
498*4882a593Smuzhiyun rdi->dparms.extra_rdma_atomic;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun /*
502*4882a593Smuzhiyun * Return the indexed PKEY from the port PKEY table.
503*4882a593Smuzhiyun */
rvt_get_pkey(struct rvt_dev_info * rdi,int port_index,unsigned index)504*4882a593Smuzhiyun static inline u16 rvt_get_pkey(struct rvt_dev_info *rdi,
505*4882a593Smuzhiyun int port_index,
506*4882a593Smuzhiyun unsigned index)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun if (index >= rvt_get_npkeys(rdi))
509*4882a593Smuzhiyun return 0;
510*4882a593Smuzhiyun else
511*4882a593Smuzhiyun return rdi->ports[port_index]->pkey_table[index];
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun struct rvt_dev_info *rvt_alloc_device(size_t size, int nports);
515*4882a593Smuzhiyun void rvt_dealloc_device(struct rvt_dev_info *rdi);
516*4882a593Smuzhiyun int rvt_register_device(struct rvt_dev_info *rvd);
517*4882a593Smuzhiyun void rvt_unregister_device(struct rvt_dev_info *rvd);
518*4882a593Smuzhiyun int rvt_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr);
519*4882a593Smuzhiyun int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port,
520*4882a593Smuzhiyun int port_index, u16 *pkey_table);
521*4882a593Smuzhiyun int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
522*4882a593Smuzhiyun int access);
523*4882a593Smuzhiyun int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey);
524*4882a593Smuzhiyun int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
525*4882a593Smuzhiyun u32 len, u64 vaddr, u32 rkey, int acc);
526*4882a593Smuzhiyun int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
527*4882a593Smuzhiyun struct rvt_sge *isge, struct rvt_sge *last_sge,
528*4882a593Smuzhiyun struct ib_sge *sge, int acc);
529*4882a593Smuzhiyun struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid,
530*4882a593Smuzhiyun u16 lid);
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun #endif /* DEF_RDMA_VT_H */
533