1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #ifndef MLX5_IB_H
7*4882a593Smuzhiyun #define MLX5_IB_H
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/kernel.h>
10*4882a593Smuzhiyun #include <linux/sched.h>
11*4882a593Smuzhiyun #include <rdma/ib_verbs.h>
12*4882a593Smuzhiyun #include <rdma/ib_umem.h>
13*4882a593Smuzhiyun #include <rdma/ib_smi.h>
14*4882a593Smuzhiyun #include <linux/mlx5/driver.h>
15*4882a593Smuzhiyun #include <linux/mlx5/cq.h>
16*4882a593Smuzhiyun #include <linux/mlx5/fs.h>
17*4882a593Smuzhiyun #include <linux/mlx5/qp.h>
18*4882a593Smuzhiyun #include <linux/types.h>
19*4882a593Smuzhiyun #include <linux/mlx5/transobj.h>
20*4882a593Smuzhiyun #include <rdma/ib_user_verbs.h>
21*4882a593Smuzhiyun #include <rdma/mlx5-abi.h>
22*4882a593Smuzhiyun #include <rdma/uverbs_ioctl.h>
23*4882a593Smuzhiyun #include <rdma/mlx5_user_ioctl_cmds.h>
24*4882a593Smuzhiyun #include <rdma/mlx5_user_ioctl_verbs.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #include "srq.h"
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #define mlx5_ib_dbg(_dev, format, arg...) \
29*4882a593Smuzhiyun dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
30*4882a593Smuzhiyun __LINE__, current->pid, ##arg)
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define mlx5_ib_err(_dev, format, arg...) \
33*4882a593Smuzhiyun dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
34*4882a593Smuzhiyun __LINE__, current->pid, ##arg)
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #define mlx5_ib_warn(_dev, format, arg...) \
37*4882a593Smuzhiyun dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
38*4882a593Smuzhiyun __LINE__, current->pid, ##arg)
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #define MLX5_IB_DEFAULT_UIDX 0xffffff
41*4882a593Smuzhiyun #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun #define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun enum {
46*4882a593Smuzhiyun MLX5_IB_MMAP_OFFSET_START = 9,
47*4882a593Smuzhiyun MLX5_IB_MMAP_OFFSET_END = 255,
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun enum {
51*4882a593Smuzhiyun MLX5_IB_MMAP_CMD_SHIFT = 8,
52*4882a593Smuzhiyun MLX5_IB_MMAP_CMD_MASK = 0xff,
53*4882a593Smuzhiyun };
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun enum {
56*4882a593Smuzhiyun MLX5_RES_SCAT_DATA32_CQE = 0x1,
57*4882a593Smuzhiyun MLX5_RES_SCAT_DATA64_CQE = 0x2,
58*4882a593Smuzhiyun MLX5_REQ_SCAT_DATA32_CQE = 0x11,
59*4882a593Smuzhiyun MLX5_REQ_SCAT_DATA64_CQE = 0x22,
60*4882a593Smuzhiyun };
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun enum mlx5_ib_mad_ifc_flags {
63*4882a593Smuzhiyun MLX5_MAD_IFC_IGNORE_MKEY = 1,
64*4882a593Smuzhiyun MLX5_MAD_IFC_IGNORE_BKEY = 2,
65*4882a593Smuzhiyun MLX5_MAD_IFC_NET_VIEW = 4,
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun enum {
69*4882a593Smuzhiyun MLX5_CROSS_CHANNEL_BFREG = 0,
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun enum {
73*4882a593Smuzhiyun MLX5_CQE_VERSION_V0,
74*4882a593Smuzhiyun MLX5_CQE_VERSION_V1,
75*4882a593Smuzhiyun };
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun enum {
78*4882a593Smuzhiyun MLX5_TM_MAX_RNDV_MSG_SIZE = 64,
79*4882a593Smuzhiyun MLX5_TM_MAX_SGE = 1,
80*4882a593Smuzhiyun };
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun enum {
83*4882a593Smuzhiyun MLX5_IB_INVALID_UAR_INDEX = BIT(31),
84*4882a593Smuzhiyun MLX5_IB_INVALID_BFREG = BIT(31),
85*4882a593Smuzhiyun };
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun enum {
88*4882a593Smuzhiyun MLX5_MAX_MEMIC_PAGES = 0x100,
89*4882a593Smuzhiyun MLX5_MEMIC_ALLOC_SIZE_MASK = 0x3f,
90*4882a593Smuzhiyun };
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun enum {
93*4882a593Smuzhiyun MLX5_MEMIC_BASE_ALIGN = 6,
94*4882a593Smuzhiyun MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN,
95*4882a593Smuzhiyun };
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun enum mlx5_ib_mmap_type {
98*4882a593Smuzhiyun MLX5_IB_MMAP_TYPE_MEMIC = 1,
99*4882a593Smuzhiyun MLX5_IB_MMAP_TYPE_VAR = 2,
100*4882a593Smuzhiyun MLX5_IB_MMAP_TYPE_UAR_WC = 3,
101*4882a593Smuzhiyun MLX5_IB_MMAP_TYPE_UAR_NC = 4,
102*4882a593Smuzhiyun };
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun struct mlx5_bfreg_info {
105*4882a593Smuzhiyun u32 *sys_pages;
106*4882a593Smuzhiyun int num_low_latency_bfregs;
107*4882a593Smuzhiyun unsigned int *count;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /*
110*4882a593Smuzhiyun * protect bfreg allocation data structs
111*4882a593Smuzhiyun */
112*4882a593Smuzhiyun struct mutex lock;
113*4882a593Smuzhiyun u32 ver;
114*4882a593Smuzhiyun u8 lib_uar_4k : 1;
115*4882a593Smuzhiyun u8 lib_uar_dyn : 1;
116*4882a593Smuzhiyun u32 num_sys_pages;
117*4882a593Smuzhiyun u32 num_static_sys_pages;
118*4882a593Smuzhiyun u32 total_num_bfregs;
119*4882a593Smuzhiyun u32 num_dyn_bfregs;
120*4882a593Smuzhiyun };
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun struct mlx5_ib_ucontext {
123*4882a593Smuzhiyun struct ib_ucontext ibucontext;
124*4882a593Smuzhiyun struct list_head db_page_list;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /* protect doorbell record alloc/free
127*4882a593Smuzhiyun */
128*4882a593Smuzhiyun struct mutex db_page_mutex;
129*4882a593Smuzhiyun struct mlx5_bfreg_info bfregi;
130*4882a593Smuzhiyun u8 cqe_version;
131*4882a593Smuzhiyun /* Transport Domain number */
132*4882a593Smuzhiyun u32 tdn;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun u64 lib_caps;
135*4882a593Smuzhiyun u16 devx_uid;
136*4882a593Smuzhiyun /* For RoCE LAG TX affinity */
137*4882a593Smuzhiyun atomic_t tx_port_affinity;
138*4882a593Smuzhiyun };
139*4882a593Smuzhiyun
to_mucontext(struct ib_ucontext * ibucontext)140*4882a593Smuzhiyun static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun struct mlx5_ib_pd {
146*4882a593Smuzhiyun struct ib_pd ibpd;
147*4882a593Smuzhiyun u32 pdn;
148*4882a593Smuzhiyun u16 uid;
149*4882a593Smuzhiyun };
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun enum {
152*4882a593Smuzhiyun MLX5_IB_FLOW_ACTION_MODIFY_HEADER,
153*4882a593Smuzhiyun MLX5_IB_FLOW_ACTION_PACKET_REFORMAT,
154*4882a593Smuzhiyun MLX5_IB_FLOW_ACTION_DECAP,
155*4882a593Smuzhiyun };
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
158*4882a593Smuzhiyun #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
159*4882a593Smuzhiyun #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
160*4882a593Smuzhiyun #error "Invalid number of bypass priorities"
161*4882a593Smuzhiyun #endif
162*4882a593Smuzhiyun #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
165*4882a593Smuzhiyun #define MLX5_IB_NUM_SNIFFER_FTS 2
166*4882a593Smuzhiyun #define MLX5_IB_NUM_EGRESS_FTS 1
167*4882a593Smuzhiyun struct mlx5_ib_flow_prio {
168*4882a593Smuzhiyun struct mlx5_flow_table *flow_table;
169*4882a593Smuzhiyun unsigned int refcount;
170*4882a593Smuzhiyun };
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun struct mlx5_ib_flow_handler {
173*4882a593Smuzhiyun struct list_head list;
174*4882a593Smuzhiyun struct ib_flow ibflow;
175*4882a593Smuzhiyun struct mlx5_ib_flow_prio *prio;
176*4882a593Smuzhiyun struct mlx5_flow_handle *rule;
177*4882a593Smuzhiyun struct ib_counters *ibcounters;
178*4882a593Smuzhiyun struct mlx5_ib_dev *dev;
179*4882a593Smuzhiyun struct mlx5_ib_flow_matcher *flow_matcher;
180*4882a593Smuzhiyun };
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun struct mlx5_ib_flow_matcher {
183*4882a593Smuzhiyun struct mlx5_ib_match_params matcher_mask;
184*4882a593Smuzhiyun int mask_len;
185*4882a593Smuzhiyun enum mlx5_ib_flow_type flow_type;
186*4882a593Smuzhiyun enum mlx5_flow_namespace_type ns_type;
187*4882a593Smuzhiyun u16 priority;
188*4882a593Smuzhiyun struct mlx5_core_dev *mdev;
189*4882a593Smuzhiyun atomic_t usecnt;
190*4882a593Smuzhiyun u8 match_criteria_enable;
191*4882a593Smuzhiyun };
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun struct mlx5_ib_pp {
194*4882a593Smuzhiyun u16 index;
195*4882a593Smuzhiyun struct mlx5_core_dev *mdev;
196*4882a593Smuzhiyun };
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun struct mlx5_ib_flow_db {
199*4882a593Smuzhiyun struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
200*4882a593Smuzhiyun struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT];
201*4882a593Smuzhiyun struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
202*4882a593Smuzhiyun struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS];
203*4882a593Smuzhiyun struct mlx5_ib_flow_prio fdb;
204*4882a593Smuzhiyun struct mlx5_ib_flow_prio rdma_rx[MLX5_IB_NUM_FLOW_FT];
205*4882a593Smuzhiyun struct mlx5_ib_flow_prio rdma_tx[MLX5_IB_NUM_FLOW_FT];
206*4882a593Smuzhiyun struct mlx5_flow_table *lag_demux_ft;
207*4882a593Smuzhiyun /* Protect flow steering bypass flow tables
208*4882a593Smuzhiyun * when add/del flow rules.
209*4882a593Smuzhiyun * only single add/removal of flow steering rule could be done
210*4882a593Smuzhiyun * simultaneously.
211*4882a593Smuzhiyun */
212*4882a593Smuzhiyun struct mutex lock;
213*4882a593Smuzhiyun };
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /* Use macros here so that don't have to duplicate
216*4882a593Smuzhiyun * enum ib_send_flags and enum ib_qp_type for low-level driver
217*4882a593Smuzhiyun */
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun #define MLX5_IB_SEND_UMR_ENABLE_MR (IB_SEND_RESERVED_START << 0)
220*4882a593Smuzhiyun #define MLX5_IB_SEND_UMR_DISABLE_MR (IB_SEND_RESERVED_START << 1)
221*4882a593Smuzhiyun #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 2)
222*4882a593Smuzhiyun #define MLX5_IB_SEND_UMR_UPDATE_XLT (IB_SEND_RESERVED_START << 3)
223*4882a593Smuzhiyun #define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 4)
224*4882a593Smuzhiyun #define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS IB_SEND_RESERVED_END
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
227*4882a593Smuzhiyun /*
228*4882a593Smuzhiyun * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
229*4882a593Smuzhiyun * creates the actual hardware QP.
230*4882a593Smuzhiyun */
231*4882a593Smuzhiyun #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
232*4882a593Smuzhiyun #define MLX5_IB_QPT_DCI IB_QPT_RESERVED3
233*4882a593Smuzhiyun #define MLX5_IB_QPT_DCT IB_QPT_RESERVED4
234*4882a593Smuzhiyun #define MLX5_IB_WR_UMR IB_WR_RESERVED1
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun #define MLX5_IB_UMR_OCTOWORD 16
237*4882a593Smuzhiyun #define MLX5_IB_UMR_XLT_ALIGNMENT 64
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun #define MLX5_IB_UPD_XLT_ZAP BIT(0)
240*4882a593Smuzhiyun #define MLX5_IB_UPD_XLT_ENABLE BIT(1)
241*4882a593Smuzhiyun #define MLX5_IB_UPD_XLT_ATOMIC BIT(2)
242*4882a593Smuzhiyun #define MLX5_IB_UPD_XLT_ADDR BIT(3)
243*4882a593Smuzhiyun #define MLX5_IB_UPD_XLT_PD BIT(4)
244*4882a593Smuzhiyun #define MLX5_IB_UPD_XLT_ACCESS BIT(5)
245*4882a593Smuzhiyun #define MLX5_IB_UPD_XLT_INDIRECT BIT(6)
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
248*4882a593Smuzhiyun *
249*4882a593Smuzhiyun * These flags are intended for internal use by the mlx5_ib driver, and they
250*4882a593Smuzhiyun * rely on the range reserved for that use in the ib_qp_create_flags enum.
251*4882a593Smuzhiyun */
252*4882a593Smuzhiyun #define MLX5_IB_QP_CREATE_SQPN_QP1 IB_QP_CREATE_RESERVED_START
253*4882a593Smuzhiyun #define MLX5_IB_QP_CREATE_WC_TEST (IB_QP_CREATE_RESERVED_START << 1)
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun struct wr_list {
256*4882a593Smuzhiyun u16 opcode;
257*4882a593Smuzhiyun u16 next;
258*4882a593Smuzhiyun };
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun enum mlx5_ib_rq_flags {
261*4882a593Smuzhiyun MLX5_IB_RQ_CVLAN_STRIPPING = 1 << 0,
262*4882a593Smuzhiyun MLX5_IB_RQ_PCI_WRITE_END_PADDING = 1 << 1,
263*4882a593Smuzhiyun };
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun struct mlx5_ib_wq {
266*4882a593Smuzhiyun struct mlx5_frag_buf_ctrl fbc;
267*4882a593Smuzhiyun u64 *wrid;
268*4882a593Smuzhiyun u32 *wr_data;
269*4882a593Smuzhiyun struct wr_list *w_list;
270*4882a593Smuzhiyun unsigned *wqe_head;
271*4882a593Smuzhiyun u16 unsig_count;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /* serialize post to the work queue
274*4882a593Smuzhiyun */
275*4882a593Smuzhiyun spinlock_t lock;
276*4882a593Smuzhiyun int wqe_cnt;
277*4882a593Smuzhiyun int max_post;
278*4882a593Smuzhiyun int max_gs;
279*4882a593Smuzhiyun int offset;
280*4882a593Smuzhiyun int wqe_shift;
281*4882a593Smuzhiyun unsigned head;
282*4882a593Smuzhiyun unsigned tail;
283*4882a593Smuzhiyun u16 cur_post;
284*4882a593Smuzhiyun u16 last_poll;
285*4882a593Smuzhiyun void *cur_edge;
286*4882a593Smuzhiyun };
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun enum mlx5_ib_wq_flags {
289*4882a593Smuzhiyun MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1,
290*4882a593Smuzhiyun MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2,
291*4882a593Smuzhiyun };
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
294*4882a593Smuzhiyun #define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
295*4882a593Smuzhiyun #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
296*4882a593Smuzhiyun #define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
297*4882a593Smuzhiyun #define MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES 3
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun struct mlx5_ib_rwq {
300*4882a593Smuzhiyun struct ib_wq ibwq;
301*4882a593Smuzhiyun struct mlx5_core_qp core_qp;
302*4882a593Smuzhiyun u32 rq_num_pas;
303*4882a593Smuzhiyun u32 log_rq_stride;
304*4882a593Smuzhiyun u32 log_rq_size;
305*4882a593Smuzhiyun u32 rq_page_offset;
306*4882a593Smuzhiyun u32 log_page_size;
307*4882a593Smuzhiyun u32 log_num_strides;
308*4882a593Smuzhiyun u32 two_byte_shift_en;
309*4882a593Smuzhiyun u32 single_stride_log_num_of_bytes;
310*4882a593Smuzhiyun struct ib_umem *umem;
311*4882a593Smuzhiyun size_t buf_size;
312*4882a593Smuzhiyun unsigned int page_shift;
313*4882a593Smuzhiyun struct mlx5_db db;
314*4882a593Smuzhiyun u32 user_index;
315*4882a593Smuzhiyun u32 wqe_count;
316*4882a593Smuzhiyun u32 wqe_shift;
317*4882a593Smuzhiyun int wq_sig;
318*4882a593Smuzhiyun u32 create_flags; /* Use enum mlx5_ib_wq_flags */
319*4882a593Smuzhiyun };
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun struct mlx5_ib_rwq_ind_table {
322*4882a593Smuzhiyun struct ib_rwq_ind_table ib_rwq_ind_tbl;
323*4882a593Smuzhiyun u32 rqtn;
324*4882a593Smuzhiyun u16 uid;
325*4882a593Smuzhiyun };
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun struct mlx5_ib_ubuffer {
328*4882a593Smuzhiyun struct ib_umem *umem;
329*4882a593Smuzhiyun int buf_size;
330*4882a593Smuzhiyun u64 buf_addr;
331*4882a593Smuzhiyun };
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun struct mlx5_ib_qp_base {
334*4882a593Smuzhiyun struct mlx5_ib_qp *container_mibqp;
335*4882a593Smuzhiyun struct mlx5_core_qp mqp;
336*4882a593Smuzhiyun struct mlx5_ib_ubuffer ubuffer;
337*4882a593Smuzhiyun };
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun struct mlx5_ib_qp_trans {
340*4882a593Smuzhiyun struct mlx5_ib_qp_base base;
341*4882a593Smuzhiyun u16 xrcdn;
342*4882a593Smuzhiyun u8 alt_port;
343*4882a593Smuzhiyun u8 atomic_rd_en;
344*4882a593Smuzhiyun u8 resp_depth;
345*4882a593Smuzhiyun };
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun struct mlx5_ib_rss_qp {
348*4882a593Smuzhiyun u32 tirn;
349*4882a593Smuzhiyun };
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun struct mlx5_ib_rq {
352*4882a593Smuzhiyun struct mlx5_ib_qp_base base;
353*4882a593Smuzhiyun struct mlx5_ib_wq *rq;
354*4882a593Smuzhiyun struct mlx5_ib_ubuffer ubuffer;
355*4882a593Smuzhiyun struct mlx5_db *doorbell;
356*4882a593Smuzhiyun u32 tirn;
357*4882a593Smuzhiyun u8 state;
358*4882a593Smuzhiyun u32 flags;
359*4882a593Smuzhiyun };
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun struct mlx5_ib_sq {
362*4882a593Smuzhiyun struct mlx5_ib_qp_base base;
363*4882a593Smuzhiyun struct mlx5_ib_wq *sq;
364*4882a593Smuzhiyun struct mlx5_ib_ubuffer ubuffer;
365*4882a593Smuzhiyun struct mlx5_db *doorbell;
366*4882a593Smuzhiyun struct mlx5_flow_handle *flow_rule;
367*4882a593Smuzhiyun u32 tisn;
368*4882a593Smuzhiyun u8 state;
369*4882a593Smuzhiyun };
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun struct mlx5_ib_raw_packet_qp {
372*4882a593Smuzhiyun struct mlx5_ib_sq sq;
373*4882a593Smuzhiyun struct mlx5_ib_rq rq;
374*4882a593Smuzhiyun };
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun struct mlx5_bf {
377*4882a593Smuzhiyun int buf_size;
378*4882a593Smuzhiyun unsigned long offset;
379*4882a593Smuzhiyun struct mlx5_sq_bfreg *bfreg;
380*4882a593Smuzhiyun };
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun struct mlx5_ib_dct {
383*4882a593Smuzhiyun struct mlx5_core_dct mdct;
384*4882a593Smuzhiyun u32 *in;
385*4882a593Smuzhiyun };
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun struct mlx5_ib_gsi_qp {
388*4882a593Smuzhiyun struct ib_qp *rx_qp;
389*4882a593Smuzhiyun u8 port_num;
390*4882a593Smuzhiyun struct ib_qp_cap cap;
391*4882a593Smuzhiyun struct ib_cq *cq;
392*4882a593Smuzhiyun struct mlx5_ib_gsi_wr *outstanding_wrs;
393*4882a593Smuzhiyun u32 outstanding_pi, outstanding_ci;
394*4882a593Smuzhiyun int num_qps;
395*4882a593Smuzhiyun /* Protects access to the tx_qps. Post send operations synchronize
396*4882a593Smuzhiyun * with tx_qp creation in setup_qp(). Also protects the
397*4882a593Smuzhiyun * outstanding_wrs array and indices.
398*4882a593Smuzhiyun */
399*4882a593Smuzhiyun spinlock_t lock;
400*4882a593Smuzhiyun struct ib_qp **tx_qps;
401*4882a593Smuzhiyun };
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun struct mlx5_ib_qp {
404*4882a593Smuzhiyun struct ib_qp ibqp;
405*4882a593Smuzhiyun union {
406*4882a593Smuzhiyun struct mlx5_ib_qp_trans trans_qp;
407*4882a593Smuzhiyun struct mlx5_ib_raw_packet_qp raw_packet_qp;
408*4882a593Smuzhiyun struct mlx5_ib_rss_qp rss_qp;
409*4882a593Smuzhiyun struct mlx5_ib_dct dct;
410*4882a593Smuzhiyun struct mlx5_ib_gsi_qp gsi;
411*4882a593Smuzhiyun };
412*4882a593Smuzhiyun struct mlx5_frag_buf buf;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun struct mlx5_db db;
415*4882a593Smuzhiyun struct mlx5_ib_wq rq;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun u8 sq_signal_bits;
418*4882a593Smuzhiyun u8 next_fence;
419*4882a593Smuzhiyun struct mlx5_ib_wq sq;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /* serialize qp state modifications
422*4882a593Smuzhiyun */
423*4882a593Smuzhiyun struct mutex mutex;
424*4882a593Smuzhiyun /* cached variant of create_flags from struct ib_qp_init_attr */
425*4882a593Smuzhiyun u32 flags;
426*4882a593Smuzhiyun u8 port;
427*4882a593Smuzhiyun u8 state;
428*4882a593Smuzhiyun int max_inline_data;
429*4882a593Smuzhiyun struct mlx5_bf bf;
430*4882a593Smuzhiyun u8 has_rq:1;
431*4882a593Smuzhiyun u8 is_rss:1;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun /* only for user space QPs. For kernel
434*4882a593Smuzhiyun * we have it from the bf object
435*4882a593Smuzhiyun */
436*4882a593Smuzhiyun int bfregn;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun struct list_head qps_list;
439*4882a593Smuzhiyun struct list_head cq_recv_list;
440*4882a593Smuzhiyun struct list_head cq_send_list;
441*4882a593Smuzhiyun struct mlx5_rate_limit rl;
442*4882a593Smuzhiyun u32 underlay_qpn;
443*4882a593Smuzhiyun u32 flags_en;
444*4882a593Smuzhiyun /*
445*4882a593Smuzhiyun * IB/core doesn't store low-level QP types, so
446*4882a593Smuzhiyun * store both MLX and IBTA types in the field below.
447*4882a593Smuzhiyun * IB_QPT_DRIVER will be break to DCI/DCT subtypes.
448*4882a593Smuzhiyun */
449*4882a593Smuzhiyun enum ib_qp_type type;
450*4882a593Smuzhiyun /* A flag to indicate if there's a new counter is configured
451*4882a593Smuzhiyun * but not take effective
452*4882a593Smuzhiyun */
453*4882a593Smuzhiyun u32 counter_pending;
454*4882a593Smuzhiyun u16 gsi_lag_port;
455*4882a593Smuzhiyun };
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun struct mlx5_ib_cq_buf {
458*4882a593Smuzhiyun struct mlx5_frag_buf_ctrl fbc;
459*4882a593Smuzhiyun struct mlx5_frag_buf frag_buf;
460*4882a593Smuzhiyun struct ib_umem *umem;
461*4882a593Smuzhiyun int cqe_size;
462*4882a593Smuzhiyun int nent;
463*4882a593Smuzhiyun };
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun struct mlx5_umr_wr {
466*4882a593Smuzhiyun struct ib_send_wr wr;
467*4882a593Smuzhiyun u64 virt_addr;
468*4882a593Smuzhiyun u64 offset;
469*4882a593Smuzhiyun struct ib_pd *pd;
470*4882a593Smuzhiyun unsigned int page_shift;
471*4882a593Smuzhiyun unsigned int xlt_size;
472*4882a593Smuzhiyun u64 length;
473*4882a593Smuzhiyun int access_flags;
474*4882a593Smuzhiyun u32 mkey;
475*4882a593Smuzhiyun u8 ignore_free_state:1;
476*4882a593Smuzhiyun };
477*4882a593Smuzhiyun
umr_wr(const struct ib_send_wr * wr)478*4882a593Smuzhiyun static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun return container_of(wr, struct mlx5_umr_wr, wr);
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun struct mlx5_shared_mr_info {
484*4882a593Smuzhiyun int mr_id;
485*4882a593Smuzhiyun struct ib_umem *umem;
486*4882a593Smuzhiyun };
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun enum mlx5_ib_cq_pr_flags {
489*4882a593Smuzhiyun MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0,
490*4882a593Smuzhiyun };
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun struct mlx5_ib_cq {
493*4882a593Smuzhiyun struct ib_cq ibcq;
494*4882a593Smuzhiyun struct mlx5_core_cq mcq;
495*4882a593Smuzhiyun struct mlx5_ib_cq_buf buf;
496*4882a593Smuzhiyun struct mlx5_db db;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun /* serialize access to the CQ
499*4882a593Smuzhiyun */
500*4882a593Smuzhiyun spinlock_t lock;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun /* protect resize cq
503*4882a593Smuzhiyun */
504*4882a593Smuzhiyun struct mutex resize_mutex;
505*4882a593Smuzhiyun struct mlx5_ib_cq_buf *resize_buf;
506*4882a593Smuzhiyun struct ib_umem *resize_umem;
507*4882a593Smuzhiyun int cqe_size;
508*4882a593Smuzhiyun struct list_head list_send_qp;
509*4882a593Smuzhiyun struct list_head list_recv_qp;
510*4882a593Smuzhiyun u32 create_flags;
511*4882a593Smuzhiyun struct list_head wc_list;
512*4882a593Smuzhiyun enum ib_cq_notify_flags notify_flags;
513*4882a593Smuzhiyun struct work_struct notify_work;
514*4882a593Smuzhiyun u16 private_flags; /* Use mlx5_ib_cq_pr_flags */
515*4882a593Smuzhiyun };
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun struct mlx5_ib_wc {
518*4882a593Smuzhiyun struct ib_wc wc;
519*4882a593Smuzhiyun struct list_head list;
520*4882a593Smuzhiyun };
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun struct mlx5_ib_srq {
523*4882a593Smuzhiyun struct ib_srq ibsrq;
524*4882a593Smuzhiyun struct mlx5_core_srq msrq;
525*4882a593Smuzhiyun struct mlx5_frag_buf buf;
526*4882a593Smuzhiyun struct mlx5_db db;
527*4882a593Smuzhiyun struct mlx5_frag_buf_ctrl fbc;
528*4882a593Smuzhiyun u64 *wrid;
529*4882a593Smuzhiyun /* protect SRQ hanlding
530*4882a593Smuzhiyun */
531*4882a593Smuzhiyun spinlock_t lock;
532*4882a593Smuzhiyun int head;
533*4882a593Smuzhiyun int tail;
534*4882a593Smuzhiyun u16 wqe_ctr;
535*4882a593Smuzhiyun struct ib_umem *umem;
536*4882a593Smuzhiyun /* serialize arming a SRQ
537*4882a593Smuzhiyun */
538*4882a593Smuzhiyun struct mutex mutex;
539*4882a593Smuzhiyun int wq_sig;
540*4882a593Smuzhiyun };
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun struct mlx5_ib_xrcd {
543*4882a593Smuzhiyun struct ib_xrcd ibxrcd;
544*4882a593Smuzhiyun u32 xrcdn;
545*4882a593Smuzhiyun };
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun enum mlx5_ib_mtt_access_flags {
548*4882a593Smuzhiyun MLX5_IB_MTT_READ = (1 << 0),
549*4882a593Smuzhiyun MLX5_IB_MTT_WRITE = (1 << 1),
550*4882a593Smuzhiyun };
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun struct mlx5_user_mmap_entry {
553*4882a593Smuzhiyun struct rdma_user_mmap_entry rdma_entry;
554*4882a593Smuzhiyun u8 mmap_flag;
555*4882a593Smuzhiyun u64 address;
556*4882a593Smuzhiyun u32 page_idx;
557*4882a593Smuzhiyun };
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun struct mlx5_ib_dm {
560*4882a593Smuzhiyun struct ib_dm ibdm;
561*4882a593Smuzhiyun phys_addr_t dev_addr;
562*4882a593Smuzhiyun u32 type;
563*4882a593Smuzhiyun size_t size;
564*4882a593Smuzhiyun union {
565*4882a593Smuzhiyun struct {
566*4882a593Smuzhiyun u32 obj_id;
567*4882a593Smuzhiyun } icm_dm;
568*4882a593Smuzhiyun /* other dm types specific params should be added here */
569*4882a593Smuzhiyun };
570*4882a593Smuzhiyun struct mlx5_user_mmap_entry mentry;
571*4882a593Smuzhiyun };
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun #define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
576*4882a593Smuzhiyun IB_ACCESS_REMOTE_WRITE |\
577*4882a593Smuzhiyun IB_ACCESS_REMOTE_READ |\
578*4882a593Smuzhiyun IB_ACCESS_REMOTE_ATOMIC |\
579*4882a593Smuzhiyun IB_ZERO_BASED)
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun #define MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
582*4882a593Smuzhiyun IB_ACCESS_REMOTE_WRITE |\
583*4882a593Smuzhiyun IB_ACCESS_REMOTE_READ |\
584*4882a593Smuzhiyun IB_ZERO_BASED)
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun #define mlx5_update_odp_stats(mr, counter_name, value) \
587*4882a593Smuzhiyun atomic64_add(value, &((mr)->odp_stats.counter_name))
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun struct mlx5_ib_mr {
590*4882a593Smuzhiyun struct ib_mr ibmr;
591*4882a593Smuzhiyun void *descs;
592*4882a593Smuzhiyun dma_addr_t desc_map;
593*4882a593Smuzhiyun int ndescs;
594*4882a593Smuzhiyun int data_length;
595*4882a593Smuzhiyun int meta_ndescs;
596*4882a593Smuzhiyun int meta_length;
597*4882a593Smuzhiyun int max_descs;
598*4882a593Smuzhiyun int desc_size;
599*4882a593Smuzhiyun int access_mode;
600*4882a593Smuzhiyun struct mlx5_core_mkey mmkey;
601*4882a593Smuzhiyun struct ib_umem *umem;
602*4882a593Smuzhiyun struct mlx5_shared_mr_info *smr_info;
603*4882a593Smuzhiyun struct list_head list;
604*4882a593Smuzhiyun unsigned int order;
605*4882a593Smuzhiyun struct mlx5_cache_ent *cache_ent;
606*4882a593Smuzhiyun int npages;
607*4882a593Smuzhiyun struct mlx5_ib_dev *dev;
608*4882a593Smuzhiyun u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
609*4882a593Smuzhiyun struct mlx5_core_sig_ctx *sig;
610*4882a593Smuzhiyun void *descs_alloc;
611*4882a593Smuzhiyun int access_flags; /* Needed for rereg MR */
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun struct mlx5_ib_mr *parent;
614*4882a593Smuzhiyun /* Needed for IB_MR_TYPE_INTEGRITY */
615*4882a593Smuzhiyun struct mlx5_ib_mr *pi_mr;
616*4882a593Smuzhiyun struct mlx5_ib_mr *klm_mr;
617*4882a593Smuzhiyun struct mlx5_ib_mr *mtt_mr;
618*4882a593Smuzhiyun u64 data_iova;
619*4882a593Smuzhiyun u64 pi_iova;
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun /* For ODP and implicit */
622*4882a593Smuzhiyun atomic_t num_deferred_work;
623*4882a593Smuzhiyun wait_queue_head_t q_deferred_work;
624*4882a593Smuzhiyun struct xarray implicit_children;
625*4882a593Smuzhiyun union {
626*4882a593Smuzhiyun struct rcu_head rcu;
627*4882a593Smuzhiyun struct list_head elm;
628*4882a593Smuzhiyun struct work_struct work;
629*4882a593Smuzhiyun } odp_destroy;
630*4882a593Smuzhiyun struct ib_odp_counters odp_stats;
631*4882a593Smuzhiyun bool is_odp_implicit;
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun struct mlx5_async_work cb_work;
634*4882a593Smuzhiyun };
635*4882a593Smuzhiyun
is_odp_mr(struct mlx5_ib_mr * mr)636*4882a593Smuzhiyun static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
639*4882a593Smuzhiyun mr->umem->is_odp;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun struct mlx5_ib_mw {
643*4882a593Smuzhiyun struct ib_mw ibmw;
644*4882a593Smuzhiyun struct mlx5_core_mkey mmkey;
645*4882a593Smuzhiyun int ndescs;
646*4882a593Smuzhiyun };
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun struct mlx5_ib_devx_mr {
649*4882a593Smuzhiyun struct mlx5_core_mkey mmkey;
650*4882a593Smuzhiyun int ndescs;
651*4882a593Smuzhiyun };
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun struct mlx5_ib_umr_context {
654*4882a593Smuzhiyun struct ib_cqe cqe;
655*4882a593Smuzhiyun enum ib_wc_status status;
656*4882a593Smuzhiyun struct completion done;
657*4882a593Smuzhiyun };
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun struct umr_common {
660*4882a593Smuzhiyun struct ib_pd *pd;
661*4882a593Smuzhiyun struct ib_cq *cq;
662*4882a593Smuzhiyun struct ib_qp *qp;
663*4882a593Smuzhiyun /* control access to UMR QP
664*4882a593Smuzhiyun */
665*4882a593Smuzhiyun struct semaphore sem;
666*4882a593Smuzhiyun };
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun struct mlx5_cache_ent {
669*4882a593Smuzhiyun struct list_head head;
670*4882a593Smuzhiyun /* sync access to the cahce entry
671*4882a593Smuzhiyun */
672*4882a593Smuzhiyun spinlock_t lock;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun char name[4];
676*4882a593Smuzhiyun u32 order;
677*4882a593Smuzhiyun u32 xlt;
678*4882a593Smuzhiyun u32 access_mode;
679*4882a593Smuzhiyun u32 page;
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun u8 disabled:1;
682*4882a593Smuzhiyun u8 fill_to_high_water:1;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun /*
685*4882a593Smuzhiyun * - available_mrs is the length of list head, ie the number of MRs
686*4882a593Smuzhiyun * available for immediate allocation.
687*4882a593Smuzhiyun * - total_mrs is available_mrs plus all in use MRs that could be
688*4882a593Smuzhiyun * returned to the cache.
689*4882a593Smuzhiyun * - limit is the low water mark for available_mrs, 2* limit is the
690*4882a593Smuzhiyun * upper water mark.
691*4882a593Smuzhiyun * - pending is the number of MRs currently being created
692*4882a593Smuzhiyun */
693*4882a593Smuzhiyun u32 total_mrs;
694*4882a593Smuzhiyun u32 available_mrs;
695*4882a593Smuzhiyun u32 limit;
696*4882a593Smuzhiyun u32 pending;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun /* Statistics */
699*4882a593Smuzhiyun u32 miss;
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun struct mlx5_ib_dev *dev;
702*4882a593Smuzhiyun struct work_struct work;
703*4882a593Smuzhiyun struct delayed_work dwork;
704*4882a593Smuzhiyun };
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun struct mlx5_mr_cache {
707*4882a593Smuzhiyun struct workqueue_struct *wq;
708*4882a593Smuzhiyun struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
709*4882a593Smuzhiyun struct dentry *root;
710*4882a593Smuzhiyun unsigned long last_add;
711*4882a593Smuzhiyun };
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun struct mlx5_ib_port_resources {
714*4882a593Smuzhiyun struct mlx5_ib_gsi_qp *gsi;
715*4882a593Smuzhiyun struct work_struct pkey_change_work;
716*4882a593Smuzhiyun };
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun struct mlx5_ib_resources {
719*4882a593Smuzhiyun struct ib_cq *c0;
720*4882a593Smuzhiyun u32 xrcdn0;
721*4882a593Smuzhiyun u32 xrcdn1;
722*4882a593Smuzhiyun struct ib_pd *p0;
723*4882a593Smuzhiyun struct ib_srq *s0;
724*4882a593Smuzhiyun struct ib_srq *s1;
725*4882a593Smuzhiyun struct mlx5_ib_port_resources ports[2];
726*4882a593Smuzhiyun /* Protects changes to the port resources */
727*4882a593Smuzhiyun struct mutex mutex;
728*4882a593Smuzhiyun };
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun struct mlx5_ib_counters {
731*4882a593Smuzhiyun const char **names;
732*4882a593Smuzhiyun size_t *offsets;
733*4882a593Smuzhiyun u32 num_q_counters;
734*4882a593Smuzhiyun u32 num_cong_counters;
735*4882a593Smuzhiyun u32 num_ext_ppcnt_counters;
736*4882a593Smuzhiyun u16 set_id;
737*4882a593Smuzhiyun };
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun struct mlx5_ib_multiport_info;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun struct mlx5_ib_multiport {
742*4882a593Smuzhiyun struct mlx5_ib_multiport_info *mpi;
743*4882a593Smuzhiyun /* To be held when accessing the multiport info */
744*4882a593Smuzhiyun spinlock_t mpi_lock;
745*4882a593Smuzhiyun };
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun struct mlx5_roce {
748*4882a593Smuzhiyun /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
749*4882a593Smuzhiyun * netdev pointer
750*4882a593Smuzhiyun */
751*4882a593Smuzhiyun rwlock_t netdev_lock;
752*4882a593Smuzhiyun struct net_device *netdev;
753*4882a593Smuzhiyun struct notifier_block nb;
754*4882a593Smuzhiyun atomic_t tx_port_affinity;
755*4882a593Smuzhiyun enum ib_port_state last_port_state;
756*4882a593Smuzhiyun struct mlx5_ib_dev *dev;
757*4882a593Smuzhiyun u8 native_port_num;
758*4882a593Smuzhiyun };
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun struct mlx5_ib_port {
761*4882a593Smuzhiyun struct mlx5_ib_counters cnts;
762*4882a593Smuzhiyun struct mlx5_ib_multiport mp;
763*4882a593Smuzhiyun struct mlx5_ib_dbg_cc_params *dbg_cc_params;
764*4882a593Smuzhiyun struct mlx5_roce roce;
765*4882a593Smuzhiyun struct mlx5_eswitch_rep *rep;
766*4882a593Smuzhiyun };
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun struct mlx5_ib_dbg_param {
769*4882a593Smuzhiyun int offset;
770*4882a593Smuzhiyun struct mlx5_ib_dev *dev;
771*4882a593Smuzhiyun struct dentry *dentry;
772*4882a593Smuzhiyun u8 port_num;
773*4882a593Smuzhiyun };
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun enum mlx5_ib_dbg_cc_types {
776*4882a593Smuzhiyun MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE,
777*4882a593Smuzhiyun MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI,
778*4882a593Smuzhiyun MLX5_IB_DBG_CC_RP_TIME_RESET,
779*4882a593Smuzhiyun MLX5_IB_DBG_CC_RP_BYTE_RESET,
780*4882a593Smuzhiyun MLX5_IB_DBG_CC_RP_THRESHOLD,
781*4882a593Smuzhiyun MLX5_IB_DBG_CC_RP_AI_RATE,
782*4882a593Smuzhiyun MLX5_IB_DBG_CC_RP_MAX_RATE,
783*4882a593Smuzhiyun MLX5_IB_DBG_CC_RP_HAI_RATE,
784*4882a593Smuzhiyun MLX5_IB_DBG_CC_RP_MIN_DEC_FAC,
785*4882a593Smuzhiyun MLX5_IB_DBG_CC_RP_MIN_RATE,
786*4882a593Smuzhiyun MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP,
787*4882a593Smuzhiyun MLX5_IB_DBG_CC_RP_DCE_TCP_G,
788*4882a593Smuzhiyun MLX5_IB_DBG_CC_RP_DCE_TCP_RTT,
789*4882a593Smuzhiyun MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD,
790*4882a593Smuzhiyun MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE,
791*4882a593Smuzhiyun MLX5_IB_DBG_CC_RP_GD,
792*4882a593Smuzhiyun MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS,
793*4882a593Smuzhiyun MLX5_IB_DBG_CC_NP_CNP_DSCP,
794*4882a593Smuzhiyun MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE,
795*4882a593Smuzhiyun MLX5_IB_DBG_CC_NP_CNP_PRIO,
796*4882a593Smuzhiyun MLX5_IB_DBG_CC_MAX,
797*4882a593Smuzhiyun };
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun struct mlx5_ib_dbg_cc_params {
800*4882a593Smuzhiyun struct dentry *root;
801*4882a593Smuzhiyun struct mlx5_ib_dbg_param params[MLX5_IB_DBG_CC_MAX];
802*4882a593Smuzhiyun };
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun enum {
805*4882a593Smuzhiyun MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100,
806*4882a593Smuzhiyun };
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun struct mlx5_ib_delay_drop {
809*4882a593Smuzhiyun struct mlx5_ib_dev *dev;
810*4882a593Smuzhiyun struct work_struct delay_drop_work;
811*4882a593Smuzhiyun /* serialize setting of delay drop */
812*4882a593Smuzhiyun struct mutex lock;
813*4882a593Smuzhiyun u32 timeout;
814*4882a593Smuzhiyun bool activate;
815*4882a593Smuzhiyun atomic_t events_cnt;
816*4882a593Smuzhiyun atomic_t rqs_cnt;
817*4882a593Smuzhiyun struct dentry *dir_debugfs;
818*4882a593Smuzhiyun };
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun enum mlx5_ib_stages {
821*4882a593Smuzhiyun MLX5_IB_STAGE_INIT,
822*4882a593Smuzhiyun MLX5_IB_STAGE_FS,
823*4882a593Smuzhiyun MLX5_IB_STAGE_CAPS,
824*4882a593Smuzhiyun MLX5_IB_STAGE_NON_DEFAULT_CB,
825*4882a593Smuzhiyun MLX5_IB_STAGE_ROCE,
826*4882a593Smuzhiyun MLX5_IB_STAGE_QP,
827*4882a593Smuzhiyun MLX5_IB_STAGE_SRQ,
828*4882a593Smuzhiyun MLX5_IB_STAGE_DEVICE_RESOURCES,
829*4882a593Smuzhiyun MLX5_IB_STAGE_DEVICE_NOTIFIER,
830*4882a593Smuzhiyun MLX5_IB_STAGE_ODP,
831*4882a593Smuzhiyun MLX5_IB_STAGE_COUNTERS,
832*4882a593Smuzhiyun MLX5_IB_STAGE_CONG_DEBUGFS,
833*4882a593Smuzhiyun MLX5_IB_STAGE_UAR,
834*4882a593Smuzhiyun MLX5_IB_STAGE_BFREG,
835*4882a593Smuzhiyun MLX5_IB_STAGE_PRE_IB_REG_UMR,
836*4882a593Smuzhiyun MLX5_IB_STAGE_WHITELIST_UID,
837*4882a593Smuzhiyun MLX5_IB_STAGE_IB_REG,
838*4882a593Smuzhiyun MLX5_IB_STAGE_POST_IB_REG_UMR,
839*4882a593Smuzhiyun MLX5_IB_STAGE_DELAY_DROP,
840*4882a593Smuzhiyun MLX5_IB_STAGE_RESTRACK,
841*4882a593Smuzhiyun MLX5_IB_STAGE_MAX,
842*4882a593Smuzhiyun };
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun struct mlx5_ib_stage {
845*4882a593Smuzhiyun int (*init)(struct mlx5_ib_dev *dev);
846*4882a593Smuzhiyun void (*cleanup)(struct mlx5_ib_dev *dev);
847*4882a593Smuzhiyun };
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun #define STAGE_CREATE(_stage, _init, _cleanup) \
850*4882a593Smuzhiyun .stage[_stage] = {.init = _init, .cleanup = _cleanup}
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun struct mlx5_ib_profile {
853*4882a593Smuzhiyun struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX];
854*4882a593Smuzhiyun };
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun struct mlx5_ib_multiport_info {
857*4882a593Smuzhiyun struct list_head list;
858*4882a593Smuzhiyun struct mlx5_ib_dev *ibdev;
859*4882a593Smuzhiyun struct mlx5_core_dev *mdev;
860*4882a593Smuzhiyun struct notifier_block mdev_events;
861*4882a593Smuzhiyun struct completion unref_comp;
862*4882a593Smuzhiyun u64 sys_image_guid;
863*4882a593Smuzhiyun u32 mdev_refcnt;
864*4882a593Smuzhiyun bool is_master;
865*4882a593Smuzhiyun bool unaffiliate;
866*4882a593Smuzhiyun };
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun struct mlx5_ib_flow_action {
869*4882a593Smuzhiyun struct ib_flow_action ib_action;
870*4882a593Smuzhiyun union {
871*4882a593Smuzhiyun struct {
872*4882a593Smuzhiyun u64 ib_flags;
873*4882a593Smuzhiyun struct mlx5_accel_esp_xfrm *ctx;
874*4882a593Smuzhiyun } esp_aes_gcm;
875*4882a593Smuzhiyun struct {
876*4882a593Smuzhiyun struct mlx5_ib_dev *dev;
877*4882a593Smuzhiyun u32 sub_type;
878*4882a593Smuzhiyun union {
879*4882a593Smuzhiyun struct mlx5_modify_hdr *modify_hdr;
880*4882a593Smuzhiyun struct mlx5_pkt_reformat *pkt_reformat;
881*4882a593Smuzhiyun };
882*4882a593Smuzhiyun } flow_action_raw;
883*4882a593Smuzhiyun };
884*4882a593Smuzhiyun };
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun struct mlx5_dm {
887*4882a593Smuzhiyun struct mlx5_core_dev *dev;
888*4882a593Smuzhiyun /* This lock is used to protect the access to the shared
889*4882a593Smuzhiyun * allocation map when concurrent requests by different
890*4882a593Smuzhiyun * processes are handled.
891*4882a593Smuzhiyun */
892*4882a593Smuzhiyun spinlock_t lock;
893*4882a593Smuzhiyun DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES);
894*4882a593Smuzhiyun };
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun struct mlx5_read_counters_attr {
897*4882a593Smuzhiyun struct mlx5_fc *hw_cntrs_hndl;
898*4882a593Smuzhiyun u64 *out;
899*4882a593Smuzhiyun u32 flags;
900*4882a593Smuzhiyun };
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun enum mlx5_ib_counters_type {
903*4882a593Smuzhiyun MLX5_IB_COUNTERS_FLOW,
904*4882a593Smuzhiyun };
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun struct mlx5_ib_mcounters {
907*4882a593Smuzhiyun struct ib_counters ibcntrs;
908*4882a593Smuzhiyun enum mlx5_ib_counters_type type;
909*4882a593Smuzhiyun /* number of counters supported for this counters type */
910*4882a593Smuzhiyun u32 counters_num;
911*4882a593Smuzhiyun struct mlx5_fc *hw_cntrs_hndl;
912*4882a593Smuzhiyun /* read function for this counters type */
913*4882a593Smuzhiyun int (*read_counters)(struct ib_device *ibdev,
914*4882a593Smuzhiyun struct mlx5_read_counters_attr *read_attr);
915*4882a593Smuzhiyun /* max index set as part of create_flow */
916*4882a593Smuzhiyun u32 cntrs_max_index;
917*4882a593Smuzhiyun /* number of counters data entries (<description,index> pair) */
918*4882a593Smuzhiyun u32 ncounters;
919*4882a593Smuzhiyun /* counters data array for descriptions and indexes */
920*4882a593Smuzhiyun struct mlx5_ib_flow_counters_desc *counters_data;
921*4882a593Smuzhiyun /* protects access to mcounters internal data */
922*4882a593Smuzhiyun struct mutex mcntrs_mutex;
923*4882a593Smuzhiyun };
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun static inline struct mlx5_ib_mcounters *
to_mcounters(struct ib_counters * ibcntrs)926*4882a593Smuzhiyun to_mcounters(struct ib_counters *ibcntrs)
927*4882a593Smuzhiyun {
928*4882a593Smuzhiyun return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs);
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
932*4882a593Smuzhiyun bool is_egress,
933*4882a593Smuzhiyun struct mlx5_flow_act *action);
934*4882a593Smuzhiyun struct mlx5_ib_lb_state {
935*4882a593Smuzhiyun /* protect the user_td */
936*4882a593Smuzhiyun struct mutex mutex;
937*4882a593Smuzhiyun u32 user_td;
938*4882a593Smuzhiyun int qps;
939*4882a593Smuzhiyun bool enabled;
940*4882a593Smuzhiyun };
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun struct mlx5_ib_pf_eq {
943*4882a593Smuzhiyun struct notifier_block irq_nb;
944*4882a593Smuzhiyun struct mlx5_ib_dev *dev;
945*4882a593Smuzhiyun struct mlx5_eq *core;
946*4882a593Smuzhiyun struct work_struct work;
947*4882a593Smuzhiyun spinlock_t lock; /* Pagefaults spinlock */
948*4882a593Smuzhiyun struct workqueue_struct *wq;
949*4882a593Smuzhiyun mempool_t *pool;
950*4882a593Smuzhiyun };
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun struct mlx5_devx_event_table {
953*4882a593Smuzhiyun struct mlx5_nb devx_nb;
954*4882a593Smuzhiyun /* serialize updating the event_xa */
955*4882a593Smuzhiyun struct mutex event_xa_lock;
956*4882a593Smuzhiyun struct xarray event_xa;
957*4882a593Smuzhiyun };
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun struct mlx5_var_table {
960*4882a593Smuzhiyun /* serialize updating the bitmap */
961*4882a593Smuzhiyun struct mutex bitmap_lock;
962*4882a593Smuzhiyun unsigned long *bitmap;
963*4882a593Smuzhiyun u64 hw_start_addr;
964*4882a593Smuzhiyun u32 stride_size;
965*4882a593Smuzhiyun u64 num_var_hw_entries;
966*4882a593Smuzhiyun };
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun struct mlx5_ib_dev {
969*4882a593Smuzhiyun struct ib_device ib_dev;
970*4882a593Smuzhiyun struct mlx5_core_dev *mdev;
971*4882a593Smuzhiyun struct notifier_block mdev_events;
972*4882a593Smuzhiyun int num_ports;
973*4882a593Smuzhiyun /* serialize update of capability mask
974*4882a593Smuzhiyun */
975*4882a593Smuzhiyun struct mutex cap_mask_mutex;
976*4882a593Smuzhiyun u8 ib_active:1;
977*4882a593Smuzhiyun u8 is_rep:1;
978*4882a593Smuzhiyun u8 lag_active:1;
979*4882a593Smuzhiyun u8 wc_support:1;
980*4882a593Smuzhiyun u8 fill_delay;
981*4882a593Smuzhiyun struct umr_common umrc;
982*4882a593Smuzhiyun /* sync used page count stats
983*4882a593Smuzhiyun */
984*4882a593Smuzhiyun struct mlx5_ib_resources devr;
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun atomic_t mkey_var;
987*4882a593Smuzhiyun struct mlx5_mr_cache cache;
988*4882a593Smuzhiyun struct timer_list delay_timer;
989*4882a593Smuzhiyun /* Prevents soft lock on massive reg MRs */
990*4882a593Smuzhiyun struct mutex slow_path_mutex;
991*4882a593Smuzhiyun struct ib_odp_caps odp_caps;
992*4882a593Smuzhiyun u64 odp_max_size;
993*4882a593Smuzhiyun struct mlx5_ib_pf_eq odp_pf_eq;
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun /*
996*4882a593Smuzhiyun * Sleepable RCU that prevents destruction of MRs while they are still
997*4882a593Smuzhiyun * being used by a page fault handler.
998*4882a593Smuzhiyun */
999*4882a593Smuzhiyun struct srcu_struct odp_srcu;
1000*4882a593Smuzhiyun struct xarray odp_mkeys;
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun u32 null_mkey;
1003*4882a593Smuzhiyun struct mlx5_ib_flow_db *flow_db;
1004*4882a593Smuzhiyun /* protect resources needed as part of reset flow */
1005*4882a593Smuzhiyun spinlock_t reset_flow_resource_lock;
1006*4882a593Smuzhiyun struct list_head qp_list;
1007*4882a593Smuzhiyun /* Array with num_ports elements */
1008*4882a593Smuzhiyun struct mlx5_ib_port *port;
1009*4882a593Smuzhiyun struct mlx5_sq_bfreg bfreg;
1010*4882a593Smuzhiyun struct mlx5_sq_bfreg wc_bfreg;
1011*4882a593Smuzhiyun struct mlx5_sq_bfreg fp_bfreg;
1012*4882a593Smuzhiyun struct mlx5_ib_delay_drop delay_drop;
1013*4882a593Smuzhiyun const struct mlx5_ib_profile *profile;
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun struct mlx5_ib_lb_state lb;
1016*4882a593Smuzhiyun u8 umr_fence;
1017*4882a593Smuzhiyun struct list_head ib_dev_list;
1018*4882a593Smuzhiyun u64 sys_image_guid;
1019*4882a593Smuzhiyun struct mlx5_dm dm;
1020*4882a593Smuzhiyun u16 devx_whitelist_uid;
1021*4882a593Smuzhiyun struct mlx5_srq_table srq_table;
1022*4882a593Smuzhiyun struct mlx5_qp_table qp_table;
1023*4882a593Smuzhiyun struct mlx5_async_ctx async_ctx;
1024*4882a593Smuzhiyun struct mlx5_devx_event_table devx_event_table;
1025*4882a593Smuzhiyun struct mlx5_var_table var_table;
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun struct xarray sig_mrs;
1028*4882a593Smuzhiyun };
1029*4882a593Smuzhiyun
to_mibcq(struct mlx5_core_cq * mcq)1030*4882a593Smuzhiyun static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
1031*4882a593Smuzhiyun {
1032*4882a593Smuzhiyun return container_of(mcq, struct mlx5_ib_cq, mcq);
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun
to_mxrcd(struct ib_xrcd * ibxrcd)1035*4882a593Smuzhiyun static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
1036*4882a593Smuzhiyun {
1037*4882a593Smuzhiyun return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun
to_mdev(struct ib_device * ibdev)1040*4882a593Smuzhiyun static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
1041*4882a593Smuzhiyun {
1042*4882a593Smuzhiyun return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun
mlx5_udata_to_mdev(struct ib_udata * udata)1045*4882a593Smuzhiyun static inline struct mlx5_ib_dev *mlx5_udata_to_mdev(struct ib_udata *udata)
1046*4882a593Smuzhiyun {
1047*4882a593Smuzhiyun struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
1048*4882a593Smuzhiyun udata, struct mlx5_ib_ucontext, ibucontext);
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun return to_mdev(context->ibucontext.device);
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun
to_mcq(struct ib_cq * ibcq)1053*4882a593Smuzhiyun static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
1054*4882a593Smuzhiyun {
1055*4882a593Smuzhiyun return container_of(ibcq, struct mlx5_ib_cq, ibcq);
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun
to_mibqp(struct mlx5_core_qp * mqp)1058*4882a593Smuzhiyun static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
1059*4882a593Smuzhiyun {
1060*4882a593Smuzhiyun return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun
to_mibrwq(struct mlx5_core_qp * core_qp)1063*4882a593Smuzhiyun static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
1064*4882a593Smuzhiyun {
1065*4882a593Smuzhiyun return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
1066*4882a593Smuzhiyun }
1067*4882a593Smuzhiyun
to_mpd(struct ib_pd * ibpd)1068*4882a593Smuzhiyun static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
1069*4882a593Smuzhiyun {
1070*4882a593Smuzhiyun return container_of(ibpd, struct mlx5_ib_pd, ibpd);
1071*4882a593Smuzhiyun }
1072*4882a593Smuzhiyun
to_msrq(struct ib_srq * ibsrq)1073*4882a593Smuzhiyun static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
1074*4882a593Smuzhiyun {
1075*4882a593Smuzhiyun return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
1076*4882a593Smuzhiyun }
1077*4882a593Smuzhiyun
to_mqp(struct ib_qp * ibqp)1078*4882a593Smuzhiyun static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
1079*4882a593Smuzhiyun {
1080*4882a593Smuzhiyun return container_of(ibqp, struct mlx5_ib_qp, ibqp);
1081*4882a593Smuzhiyun }
1082*4882a593Smuzhiyun
to_mrwq(struct ib_wq * ibwq)1083*4882a593Smuzhiyun static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
1084*4882a593Smuzhiyun {
1085*4882a593Smuzhiyun return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
1086*4882a593Smuzhiyun }
1087*4882a593Smuzhiyun
to_mrwq_ind_table(struct ib_rwq_ind_table * ib_rwq_ind_tbl)1088*4882a593Smuzhiyun static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
1089*4882a593Smuzhiyun {
1090*4882a593Smuzhiyun return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
1091*4882a593Smuzhiyun }
1092*4882a593Smuzhiyun
to_mibsrq(struct mlx5_core_srq * msrq)1093*4882a593Smuzhiyun static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
1094*4882a593Smuzhiyun {
1095*4882a593Smuzhiyun return container_of(msrq, struct mlx5_ib_srq, msrq);
1096*4882a593Smuzhiyun }
1097*4882a593Smuzhiyun
to_mdm(struct ib_dm * ibdm)1098*4882a593Smuzhiyun static inline struct mlx5_ib_dm *to_mdm(struct ib_dm *ibdm)
1099*4882a593Smuzhiyun {
1100*4882a593Smuzhiyun return container_of(ibdm, struct mlx5_ib_dm, ibdm);
1101*4882a593Smuzhiyun }
1102*4882a593Smuzhiyun
to_mmr(struct ib_mr * ibmr)1103*4882a593Smuzhiyun static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
1104*4882a593Smuzhiyun {
1105*4882a593Smuzhiyun return container_of(ibmr, struct mlx5_ib_mr, ibmr);
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun
to_mmw(struct ib_mw * ibmw)1108*4882a593Smuzhiyun static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
1109*4882a593Smuzhiyun {
1110*4882a593Smuzhiyun return container_of(ibmw, struct mlx5_ib_mw, ibmw);
1111*4882a593Smuzhiyun }
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun static inline struct mlx5_ib_flow_action *
to_mflow_act(struct ib_flow_action * ibact)1114*4882a593Smuzhiyun to_mflow_act(struct ib_flow_action *ibact)
1115*4882a593Smuzhiyun {
1116*4882a593Smuzhiyun return container_of(ibact, struct mlx5_ib_flow_action, ib_action);
1117*4882a593Smuzhiyun }
1118*4882a593Smuzhiyun
1119*4882a593Smuzhiyun static inline struct mlx5_user_mmap_entry *
to_mmmap(struct rdma_user_mmap_entry * rdma_entry)1120*4882a593Smuzhiyun to_mmmap(struct rdma_user_mmap_entry *rdma_entry)
1121*4882a593Smuzhiyun {
1122*4882a593Smuzhiyun return container_of(rdma_entry,
1123*4882a593Smuzhiyun struct mlx5_user_mmap_entry, rdma_entry);
1124*4882a593Smuzhiyun }
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
1127*4882a593Smuzhiyun struct ib_udata *udata, unsigned long virt,
1128*4882a593Smuzhiyun struct mlx5_db *db);
1129*4882a593Smuzhiyun void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
1130*4882a593Smuzhiyun void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
1131*4882a593Smuzhiyun void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
1132*4882a593Smuzhiyun void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
1133*4882a593Smuzhiyun int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
1134*4882a593Smuzhiyun struct ib_udata *udata);
1135*4882a593Smuzhiyun int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
mlx5_ib_destroy_ah(struct ib_ah * ah,u32 flags)1136*4882a593Smuzhiyun static inline int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags)
1137*4882a593Smuzhiyun {
1138*4882a593Smuzhiyun return 0;
1139*4882a593Smuzhiyun }
1140*4882a593Smuzhiyun int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
1141*4882a593Smuzhiyun struct ib_udata *udata);
1142*4882a593Smuzhiyun int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1143*4882a593Smuzhiyun enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
1144*4882a593Smuzhiyun int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
1145*4882a593Smuzhiyun int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
1146*4882a593Smuzhiyun int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
1147*4882a593Smuzhiyun const struct ib_recv_wr **bad_wr);
1148*4882a593Smuzhiyun int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
1149*4882a593Smuzhiyun void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
1150*4882a593Smuzhiyun struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1151*4882a593Smuzhiyun struct ib_qp_init_attr *init_attr,
1152*4882a593Smuzhiyun struct ib_udata *udata);
1153*4882a593Smuzhiyun int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1154*4882a593Smuzhiyun int attr_mask, struct ib_udata *udata);
1155*4882a593Smuzhiyun int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
1156*4882a593Smuzhiyun struct ib_qp_init_attr *qp_init_attr);
1157*4882a593Smuzhiyun int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
1158*4882a593Smuzhiyun void mlx5_ib_drain_sq(struct ib_qp *qp);
1159*4882a593Smuzhiyun void mlx5_ib_drain_rq(struct ib_qp *qp);
1160*4882a593Smuzhiyun int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
1161*4882a593Smuzhiyun size_t buflen, size_t *bc);
1162*4882a593Smuzhiyun int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
1163*4882a593Smuzhiyun size_t buflen, size_t *bc);
1164*4882a593Smuzhiyun int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
1165*4882a593Smuzhiyun size_t buflen, size_t *bc);
1166*4882a593Smuzhiyun int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1167*4882a593Smuzhiyun struct ib_udata *udata);
1168*4882a593Smuzhiyun int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
1169*4882a593Smuzhiyun int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
1170*4882a593Smuzhiyun int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
1171*4882a593Smuzhiyun int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1172*4882a593Smuzhiyun int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
1173*4882a593Smuzhiyun struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
1174*4882a593Smuzhiyun struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1175*4882a593Smuzhiyun u64 virt_addr, int access_flags,
1176*4882a593Smuzhiyun struct ib_udata *udata);
1177*4882a593Smuzhiyun int mlx5_ib_advise_mr(struct ib_pd *pd,
1178*4882a593Smuzhiyun enum ib_uverbs_advise_mr_advice advice,
1179*4882a593Smuzhiyun u32 flags,
1180*4882a593Smuzhiyun struct ib_sge *sg_list,
1181*4882a593Smuzhiyun u32 num_sge,
1182*4882a593Smuzhiyun struct uverbs_attr_bundle *attrs);
1183*4882a593Smuzhiyun int mlx5_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
1184*4882a593Smuzhiyun int mlx5_ib_dealloc_mw(struct ib_mw *mw);
1185*4882a593Smuzhiyun int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
1186*4882a593Smuzhiyun int page_shift, int flags);
1187*4882a593Smuzhiyun struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
1188*4882a593Smuzhiyun struct ib_udata *udata,
1189*4882a593Smuzhiyun int access_flags);
1190*4882a593Smuzhiyun void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
1191*4882a593Smuzhiyun void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr);
1192*4882a593Smuzhiyun int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1193*4882a593Smuzhiyun u64 length, u64 virt_addr, int access_flags,
1194*4882a593Smuzhiyun struct ib_pd *pd, struct ib_udata *udata);
1195*4882a593Smuzhiyun int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
1196*4882a593Smuzhiyun struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1197*4882a593Smuzhiyun u32 max_num_sg);
1198*4882a593Smuzhiyun struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
1199*4882a593Smuzhiyun u32 max_num_sg,
1200*4882a593Smuzhiyun u32 max_num_meta_sg);
1201*4882a593Smuzhiyun int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1202*4882a593Smuzhiyun unsigned int *sg_offset);
1203*4882a593Smuzhiyun int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
1204*4882a593Smuzhiyun int data_sg_nents, unsigned int *data_sg_offset,
1205*4882a593Smuzhiyun struct scatterlist *meta_sg, int meta_sg_nents,
1206*4882a593Smuzhiyun unsigned int *meta_sg_offset);
1207*4882a593Smuzhiyun int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
1208*4882a593Smuzhiyun const struct ib_wc *in_wc, const struct ib_grh *in_grh,
1209*4882a593Smuzhiyun const struct ib_mad *in, struct ib_mad *out,
1210*4882a593Smuzhiyun size_t *out_mad_size, u16 *out_mad_pkey_index);
1211*4882a593Smuzhiyun int mlx5_ib_alloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
1212*4882a593Smuzhiyun int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
1213*4882a593Smuzhiyun int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
1214*4882a593Smuzhiyun int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
1215*4882a593Smuzhiyun int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
1216*4882a593Smuzhiyun struct ib_smp *out_mad);
1217*4882a593Smuzhiyun int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
1218*4882a593Smuzhiyun __be64 *sys_image_guid);
1219*4882a593Smuzhiyun int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
1220*4882a593Smuzhiyun u16 *max_pkeys);
1221*4882a593Smuzhiyun int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
1222*4882a593Smuzhiyun u32 *vendor_id);
1223*4882a593Smuzhiyun int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
1224*4882a593Smuzhiyun int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
1225*4882a593Smuzhiyun int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
1226*4882a593Smuzhiyun u16 *pkey);
1227*4882a593Smuzhiyun int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
1228*4882a593Smuzhiyun union ib_gid *gid);
1229*4882a593Smuzhiyun int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
1230*4882a593Smuzhiyun struct ib_port_attr *props);
1231*4882a593Smuzhiyun int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
1232*4882a593Smuzhiyun struct ib_port_attr *props);
1233*4882a593Smuzhiyun void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
1234*4882a593Smuzhiyun unsigned long max_page_shift,
1235*4882a593Smuzhiyun int *count, int *shift,
1236*4882a593Smuzhiyun int *ncont, int *order);
1237*4882a593Smuzhiyun void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
1238*4882a593Smuzhiyun int page_shift, size_t offset, size_t num_pages,
1239*4882a593Smuzhiyun __be64 *pas, int access_flags);
1240*4882a593Smuzhiyun void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
1241*4882a593Smuzhiyun int page_shift, __be64 *pas, int access_flags);
1242*4882a593Smuzhiyun void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
1243*4882a593Smuzhiyun int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
1244*4882a593Smuzhiyun int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
1245*4882a593Smuzhiyun int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
1248*4882a593Smuzhiyun unsigned int entry, int access_flags);
1249*4882a593Smuzhiyun void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
1250*4882a593Smuzhiyun int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr);
1251*4882a593Smuzhiyun
1252*4882a593Smuzhiyun int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1253*4882a593Smuzhiyun struct ib_mr_status *mr_status);
1254*4882a593Smuzhiyun struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
1255*4882a593Smuzhiyun struct ib_wq_init_attr *init_attr,
1256*4882a593Smuzhiyun struct ib_udata *udata);
1257*4882a593Smuzhiyun int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
1258*4882a593Smuzhiyun int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
1259*4882a593Smuzhiyun u32 wq_attr_mask, struct ib_udata *udata);
1260*4882a593Smuzhiyun int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
1261*4882a593Smuzhiyun struct ib_rwq_ind_table_init_attr *init_attr,
1262*4882a593Smuzhiyun struct ib_udata *udata);
1263*4882a593Smuzhiyun int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
1264*4882a593Smuzhiyun struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
1265*4882a593Smuzhiyun struct ib_ucontext *context,
1266*4882a593Smuzhiyun struct ib_dm_alloc_attr *attr,
1267*4882a593Smuzhiyun struct uverbs_attr_bundle *attrs);
1268*4882a593Smuzhiyun int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs);
1269*4882a593Smuzhiyun struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1270*4882a593Smuzhiyun struct ib_dm_mr_attr *attr,
1271*4882a593Smuzhiyun struct uverbs_attr_bundle *attrs);
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1274*4882a593Smuzhiyun void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
1275*4882a593Smuzhiyun int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
1276*4882a593Smuzhiyun void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
1277*4882a593Smuzhiyun int __init mlx5_ib_odp_init(void);
1278*4882a593Smuzhiyun void mlx5_ib_odp_cleanup(void);
1279*4882a593Smuzhiyun void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent);
1280*4882a593Smuzhiyun void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
1281*4882a593Smuzhiyun struct mlx5_ib_mr *mr, int flags);
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1284*4882a593Smuzhiyun enum ib_uverbs_advise_mr_advice advice,
1285*4882a593Smuzhiyun u32 flags, struct ib_sge *sg_list, u32 num_sge);
1286*4882a593Smuzhiyun int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr, bool enable);
1287*4882a593Smuzhiyun #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev * dev)1288*4882a593Smuzhiyun static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
1289*4882a593Smuzhiyun {
1290*4882a593Smuzhiyun return;
1291*4882a593Smuzhiyun }
1292*4882a593Smuzhiyun
mlx5_ib_odp_init_one(struct mlx5_ib_dev * ibdev)1293*4882a593Smuzhiyun static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev * ibdev)1294*4882a593Smuzhiyun static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
mlx5_ib_odp_init(void)1295*4882a593Smuzhiyun static inline int mlx5_ib_odp_init(void) { return 0; }
mlx5_ib_odp_cleanup(void)1296*4882a593Smuzhiyun static inline void mlx5_ib_odp_cleanup(void) {}
mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent * ent)1297*4882a593Smuzhiyun static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
mlx5_odp_populate_xlt(void * xlt,size_t idx,size_t nentries,struct mlx5_ib_mr * mr,int flags)1298*4882a593Smuzhiyun static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
1299*4882a593Smuzhiyun struct mlx5_ib_mr *mr, int flags) {}
1300*4882a593Smuzhiyun
1301*4882a593Smuzhiyun static inline int
mlx5_ib_advise_mr_prefetch(struct ib_pd * pd,enum ib_uverbs_advise_mr_advice advice,u32 flags,struct ib_sge * sg_list,u32 num_sge)1302*4882a593Smuzhiyun mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1303*4882a593Smuzhiyun enum ib_uverbs_advise_mr_advice advice, u32 flags,
1304*4882a593Smuzhiyun struct ib_sge *sg_list, u32 num_sge)
1305*4882a593Smuzhiyun {
1306*4882a593Smuzhiyun return -EOPNOTSUPP;
1307*4882a593Smuzhiyun }
mlx5_ib_init_odp_mr(struct mlx5_ib_mr * mr,bool enable)1308*4882a593Smuzhiyun static inline int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr, bool enable)
1309*4882a593Smuzhiyun {
1310*4882a593Smuzhiyun return -EOPNOTSUPP;
1311*4882a593Smuzhiyun }
1312*4882a593Smuzhiyun #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
1313*4882a593Smuzhiyun
1314*4882a593Smuzhiyun extern const struct mmu_interval_notifier_ops mlx5_mn_ops;
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun /* Needed for rep profile */
1317*4882a593Smuzhiyun void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
1318*4882a593Smuzhiyun const struct mlx5_ib_profile *profile,
1319*4882a593Smuzhiyun int stage);
1320*4882a593Smuzhiyun void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
1321*4882a593Smuzhiyun const struct mlx5_ib_profile *profile);
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
1324*4882a593Smuzhiyun u8 port, struct ifla_vf_info *info);
1325*4882a593Smuzhiyun int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
1326*4882a593Smuzhiyun u8 port, int state);
1327*4882a593Smuzhiyun int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
1328*4882a593Smuzhiyun u8 port, struct ifla_vf_stats *stats);
1329*4882a593Smuzhiyun int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
1330*4882a593Smuzhiyun struct ifla_vf_guid *node_guid,
1331*4882a593Smuzhiyun struct ifla_vf_guid *port_guid);
1332*4882a593Smuzhiyun int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
1333*4882a593Smuzhiyun u64 guid, int type);
1334*4882a593Smuzhiyun
1335*4882a593Smuzhiyun __be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
1336*4882a593Smuzhiyun const struct ib_gid_attr *attr);
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
1339*4882a593Smuzhiyun void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
1340*4882a593Smuzhiyun
1341*4882a593Smuzhiyun /* GSI QP helper functions */
1342*4882a593Smuzhiyun int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
1343*4882a593Smuzhiyun struct ib_qp_init_attr *attr);
1344*4882a593Smuzhiyun int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp);
1345*4882a593Smuzhiyun int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1346*4882a593Smuzhiyun int attr_mask);
1347*4882a593Smuzhiyun int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
1348*4882a593Smuzhiyun int qp_attr_mask,
1349*4882a593Smuzhiyun struct ib_qp_init_attr *qp_init_attr);
1350*4882a593Smuzhiyun int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
1351*4882a593Smuzhiyun const struct ib_send_wr **bad_wr);
1352*4882a593Smuzhiyun int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
1353*4882a593Smuzhiyun const struct ib_recv_wr **bad_wr);
1354*4882a593Smuzhiyun void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
1355*4882a593Smuzhiyun
1356*4882a593Smuzhiyun int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
1357*4882a593Smuzhiyun
1358*4882a593Smuzhiyun void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
1359*4882a593Smuzhiyun int bfregn);
1360*4882a593Smuzhiyun struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi);
1361*4882a593Smuzhiyun struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
1362*4882a593Smuzhiyun u8 ib_port_num,
1363*4882a593Smuzhiyun u8 *native_port_num);
1364*4882a593Smuzhiyun void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
1365*4882a593Smuzhiyun u8 port_num);
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun extern const struct uapi_definition mlx5_ib_devx_defs[];
1368*4882a593Smuzhiyun extern const struct uapi_definition mlx5_ib_flow_defs[];
1369*4882a593Smuzhiyun extern const struct uapi_definition mlx5_ib_qos_defs[];
1370*4882a593Smuzhiyun extern const struct uapi_definition mlx5_ib_std_types_defs[];
1371*4882a593Smuzhiyun
init_query_mad(struct ib_smp * mad)1372*4882a593Smuzhiyun static inline void init_query_mad(struct ib_smp *mad)
1373*4882a593Smuzhiyun {
1374*4882a593Smuzhiyun mad->base_version = 1;
1375*4882a593Smuzhiyun mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
1376*4882a593Smuzhiyun mad->class_version = 1;
1377*4882a593Smuzhiyun mad->method = IB_MGMT_METHOD_GET;
1378*4882a593Smuzhiyun }
1379*4882a593Smuzhiyun
is_qp1(enum ib_qp_type qp_type)1380*4882a593Smuzhiyun static inline int is_qp1(enum ib_qp_type qp_type)
1381*4882a593Smuzhiyun {
1382*4882a593Smuzhiyun return qp_type == MLX5_IB_QPT_HW_GSI || qp_type == IB_QPT_GSI;
1383*4882a593Smuzhiyun }
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun #define MLX5_MAX_UMR_SHIFT 16
1386*4882a593Smuzhiyun #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
1387*4882a593Smuzhiyun
check_cq_create_flags(u32 flags)1388*4882a593Smuzhiyun static inline u32 check_cq_create_flags(u32 flags)
1389*4882a593Smuzhiyun {
1390*4882a593Smuzhiyun /*
1391*4882a593Smuzhiyun * It returns non-zero value for unsupported CQ
1392*4882a593Smuzhiyun * create flags, otherwise it returns zero.
1393*4882a593Smuzhiyun */
1394*4882a593Smuzhiyun return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN |
1395*4882a593Smuzhiyun IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
1396*4882a593Smuzhiyun }
1397*4882a593Smuzhiyun
verify_assign_uidx(u8 cqe_version,u32 cmd_uidx,u32 * user_index)1398*4882a593Smuzhiyun static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
1399*4882a593Smuzhiyun u32 *user_index)
1400*4882a593Smuzhiyun {
1401*4882a593Smuzhiyun if (cqe_version) {
1402*4882a593Smuzhiyun if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
1403*4882a593Smuzhiyun (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
1404*4882a593Smuzhiyun return -EINVAL;
1405*4882a593Smuzhiyun *user_index = cmd_uidx;
1406*4882a593Smuzhiyun } else {
1407*4882a593Smuzhiyun *user_index = MLX5_IB_DEFAULT_UIDX;
1408*4882a593Smuzhiyun }
1409*4882a593Smuzhiyun
1410*4882a593Smuzhiyun return 0;
1411*4882a593Smuzhiyun }
1412*4882a593Smuzhiyun
get_qp_user_index(struct mlx5_ib_ucontext * ucontext,struct mlx5_ib_create_qp * ucmd,int inlen,u32 * user_index)1413*4882a593Smuzhiyun static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
1414*4882a593Smuzhiyun struct mlx5_ib_create_qp *ucmd,
1415*4882a593Smuzhiyun int inlen,
1416*4882a593Smuzhiyun u32 *user_index)
1417*4882a593Smuzhiyun {
1418*4882a593Smuzhiyun u8 cqe_version = ucontext->cqe_version;
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
1421*4882a593Smuzhiyun (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1422*4882a593Smuzhiyun return 0;
1423*4882a593Smuzhiyun
1424*4882a593Smuzhiyun if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
1425*4882a593Smuzhiyun return -EINVAL;
1426*4882a593Smuzhiyun
1427*4882a593Smuzhiyun return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1428*4882a593Smuzhiyun }
1429*4882a593Smuzhiyun
get_srq_user_index(struct mlx5_ib_ucontext * ucontext,struct mlx5_ib_create_srq * ucmd,int inlen,u32 * user_index)1430*4882a593Smuzhiyun static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
1431*4882a593Smuzhiyun struct mlx5_ib_create_srq *ucmd,
1432*4882a593Smuzhiyun int inlen,
1433*4882a593Smuzhiyun u32 *user_index)
1434*4882a593Smuzhiyun {
1435*4882a593Smuzhiyun u8 cqe_version = ucontext->cqe_version;
1436*4882a593Smuzhiyun
1437*4882a593Smuzhiyun if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
1438*4882a593Smuzhiyun (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1439*4882a593Smuzhiyun return 0;
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
1442*4882a593Smuzhiyun return -EINVAL;
1443*4882a593Smuzhiyun
1444*4882a593Smuzhiyun return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1445*4882a593Smuzhiyun }
1446*4882a593Smuzhiyun
get_uars_per_sys_page(struct mlx5_ib_dev * dev,bool lib_support)1447*4882a593Smuzhiyun static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
1448*4882a593Smuzhiyun {
1449*4882a593Smuzhiyun return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1450*4882a593Smuzhiyun MLX5_UARS_IN_PAGE : 1;
1451*4882a593Smuzhiyun }
1452*4882a593Smuzhiyun
get_num_static_uars(struct mlx5_ib_dev * dev,struct mlx5_bfreg_info * bfregi)1453*4882a593Smuzhiyun static inline int get_num_static_uars(struct mlx5_ib_dev *dev,
1454*4882a593Smuzhiyun struct mlx5_bfreg_info *bfregi)
1455*4882a593Smuzhiyun {
1456*4882a593Smuzhiyun return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_static_sys_pages;
1457*4882a593Smuzhiyun }
1458*4882a593Smuzhiyun
1459*4882a593Smuzhiyun unsigned long mlx5_ib_get_xlt_emergency_page(void);
1460*4882a593Smuzhiyun void mlx5_ib_put_xlt_emergency_page(void);
1461*4882a593Smuzhiyun
1462*4882a593Smuzhiyun int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
1463*4882a593Smuzhiyun struct mlx5_bfreg_info *bfregi, u32 bfregn,
1464*4882a593Smuzhiyun bool dyn_bfreg);
1465*4882a593Smuzhiyun
mlx5_ib_can_load_pas_with_umr(struct mlx5_ib_dev * dev,size_t length)1466*4882a593Smuzhiyun static inline bool mlx5_ib_can_load_pas_with_umr(struct mlx5_ib_dev *dev,
1467*4882a593Smuzhiyun size_t length)
1468*4882a593Smuzhiyun {
1469*4882a593Smuzhiyun /*
1470*4882a593Smuzhiyun * umr_check_mkey_mask() rejects MLX5_MKEY_MASK_PAGE_SIZE which is
1471*4882a593Smuzhiyun * always set if MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (aka
1472*4882a593Smuzhiyun * MLX5_IB_UPD_XLT_ADDR and MLX5_IB_UPD_XLT_ENABLE) is set. Thus, a mkey
1473*4882a593Smuzhiyun * can never be enabled without this capability. Simplify this weird
1474*4882a593Smuzhiyun * quirky hardware by just saying it can't use PAS lists with UMR at
1475*4882a593Smuzhiyun * all.
1476*4882a593Smuzhiyun */
1477*4882a593Smuzhiyun if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
1478*4882a593Smuzhiyun return false;
1479*4882a593Smuzhiyun
1480*4882a593Smuzhiyun /*
1481*4882a593Smuzhiyun * length is the size of the MR in bytes when mlx5_ib_update_xlt() is
1482*4882a593Smuzhiyun * used.
1483*4882a593Smuzhiyun */
1484*4882a593Smuzhiyun if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
1485*4882a593Smuzhiyun length >= MLX5_MAX_UMR_PAGES * PAGE_SIZE)
1486*4882a593Smuzhiyun return false;
1487*4882a593Smuzhiyun return true;
1488*4882a593Smuzhiyun }
1489*4882a593Smuzhiyun
1490*4882a593Smuzhiyun /*
1491*4882a593Smuzhiyun * true if an existing MR can be reconfigured to new access_flags using UMR.
1492*4882a593Smuzhiyun * Older HW cannot use UMR to update certain elements of the MKC. See
1493*4882a593Smuzhiyun * umr_check_mkey_mask(), get_umr_update_access_mask() and umr_check_mkey_mask()
1494*4882a593Smuzhiyun */
mlx5_ib_can_reconfig_with_umr(struct mlx5_ib_dev * dev,unsigned int current_access_flags,unsigned int target_access_flags)1495*4882a593Smuzhiyun static inline bool mlx5_ib_can_reconfig_with_umr(struct mlx5_ib_dev *dev,
1496*4882a593Smuzhiyun unsigned int current_access_flags,
1497*4882a593Smuzhiyun unsigned int target_access_flags)
1498*4882a593Smuzhiyun {
1499*4882a593Smuzhiyun unsigned int diffs = current_access_flags ^ target_access_flags;
1500*4882a593Smuzhiyun
1501*4882a593Smuzhiyun if ((diffs & IB_ACCESS_REMOTE_ATOMIC) &&
1502*4882a593Smuzhiyun MLX5_CAP_GEN(dev->mdev, atomic) &&
1503*4882a593Smuzhiyun MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
1504*4882a593Smuzhiyun return false;
1505*4882a593Smuzhiyun
1506*4882a593Smuzhiyun if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
1507*4882a593Smuzhiyun MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) &&
1508*4882a593Smuzhiyun !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
1509*4882a593Smuzhiyun return false;
1510*4882a593Smuzhiyun
1511*4882a593Smuzhiyun if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
1512*4882a593Smuzhiyun MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) &&
1513*4882a593Smuzhiyun !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
1514*4882a593Smuzhiyun return false;
1515*4882a593Smuzhiyun
1516*4882a593Smuzhiyun return true;
1517*4882a593Smuzhiyun }
1518*4882a593Smuzhiyun
1519*4882a593Smuzhiyun int mlx5_ib_test_wc(struct mlx5_ib_dev *dev);
1520*4882a593Smuzhiyun
mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev * dev)1521*4882a593Smuzhiyun static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
1522*4882a593Smuzhiyun {
1523*4882a593Smuzhiyun return dev->lag_active ||
1524*4882a593Smuzhiyun (MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 &&
1525*4882a593Smuzhiyun MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity));
1526*4882a593Smuzhiyun }
1527*4882a593Smuzhiyun #endif /* MLX5_IB_H */
1528