xref: /OK3568_Linux_fs/kernel/include/uapi/rdma/mlx5-abi.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
6*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
7*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
8*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
9*4882a593Smuzhiyun  * OpenIB.org BSD license below:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
12*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
13*4882a593Smuzhiyun  *     conditions are met:
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
16*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
17*4882a593Smuzhiyun  *        disclaimer.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
20*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
21*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
22*4882a593Smuzhiyun  *        provided with the distribution.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31*4882a593Smuzhiyun  * SOFTWARE.
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #ifndef MLX5_ABI_USER_H
35*4882a593Smuzhiyun #define MLX5_ABI_USER_H
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #include <linux/types.h>
38*4882a593Smuzhiyun #include <linux/if_ether.h>	/* For ETH_ALEN. */
39*4882a593Smuzhiyun #include <rdma/ib_user_ioctl_verbs.h>
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun enum {
42*4882a593Smuzhiyun 	MLX5_QP_FLAG_SIGNATURE		= 1 << 0,
43*4882a593Smuzhiyun 	MLX5_QP_FLAG_SCATTER_CQE	= 1 << 1,
44*4882a593Smuzhiyun 	MLX5_QP_FLAG_TUNNEL_OFFLOADS	= 1 << 2,
45*4882a593Smuzhiyun 	MLX5_QP_FLAG_BFREG_INDEX	= 1 << 3,
46*4882a593Smuzhiyun 	MLX5_QP_FLAG_TYPE_DCT		= 1 << 4,
47*4882a593Smuzhiyun 	MLX5_QP_FLAG_TYPE_DCI		= 1 << 5,
48*4882a593Smuzhiyun 	MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC = 1 << 6,
49*4882a593Smuzhiyun 	MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7,
50*4882a593Smuzhiyun 	MLX5_QP_FLAG_ALLOW_SCATTER_CQE	= 1 << 8,
51*4882a593Smuzhiyun 	MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE	= 1 << 9,
52*4882a593Smuzhiyun 	MLX5_QP_FLAG_UAR_PAGE_INDEX = 1 << 10,
53*4882a593Smuzhiyun };
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun enum {
56*4882a593Smuzhiyun 	MLX5_SRQ_FLAG_SIGNATURE		= 1 << 0,
57*4882a593Smuzhiyun };
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun enum {
60*4882a593Smuzhiyun 	MLX5_WQ_FLAG_SIGNATURE		= 1 << 0,
61*4882a593Smuzhiyun };
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /* Increment this value if any changes that break userspace ABI
64*4882a593Smuzhiyun  * compatibility are made.
65*4882a593Smuzhiyun  */
66*4882a593Smuzhiyun #define MLX5_IB_UVERBS_ABI_VERSION	1
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun /* Make sure that all structs defined in this file remain laid out so
69*4882a593Smuzhiyun  * that they pack the same way on 32-bit and 64-bit architectures (to
70*4882a593Smuzhiyun  * avoid incompatibility between 32-bit userspace and 64-bit kernels).
71*4882a593Smuzhiyun  * In particular do not use pointer types -- pass pointers in __u64
72*4882a593Smuzhiyun  * instead.
73*4882a593Smuzhiyun  */
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun struct mlx5_ib_alloc_ucontext_req {
76*4882a593Smuzhiyun 	__u32	total_num_bfregs;
77*4882a593Smuzhiyun 	__u32	num_low_latency_bfregs;
78*4882a593Smuzhiyun };
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun enum mlx5_lib_caps {
81*4882a593Smuzhiyun 	MLX5_LIB_CAP_4K_UAR	= (__u64)1 << 0,
82*4882a593Smuzhiyun 	MLX5_LIB_CAP_DYN_UAR	= (__u64)1 << 1,
83*4882a593Smuzhiyun };
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun enum mlx5_ib_alloc_uctx_v2_flags {
86*4882a593Smuzhiyun 	MLX5_IB_ALLOC_UCTX_DEVX	= 1 << 0,
87*4882a593Smuzhiyun };
88*4882a593Smuzhiyun struct mlx5_ib_alloc_ucontext_req_v2 {
89*4882a593Smuzhiyun 	__u32	total_num_bfregs;
90*4882a593Smuzhiyun 	__u32	num_low_latency_bfregs;
91*4882a593Smuzhiyun 	__u32	flags;
92*4882a593Smuzhiyun 	__u32	comp_mask;
93*4882a593Smuzhiyun 	__u8	max_cqe_version;
94*4882a593Smuzhiyun 	__u8	reserved0;
95*4882a593Smuzhiyun 	__u16	reserved1;
96*4882a593Smuzhiyun 	__u32	reserved2;
97*4882a593Smuzhiyun 	__aligned_u64 lib_caps;
98*4882a593Smuzhiyun };
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun enum mlx5_ib_alloc_ucontext_resp_mask {
101*4882a593Smuzhiyun 	MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
102*4882a593Smuzhiyun 	MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY    = 1UL << 1,
103*4882a593Smuzhiyun 	MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE               = 1UL << 2,
104*4882a593Smuzhiyun };
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun enum mlx5_user_cmds_supp_uhw {
107*4882a593Smuzhiyun 	MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0,
108*4882a593Smuzhiyun 	MLX5_USER_CMDS_SUPP_UHW_CREATE_AH    = 1 << 1,
109*4882a593Smuzhiyun };
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun /* The eth_min_inline response value is set to off-by-one vs the FW
112*4882a593Smuzhiyun  * returned value to allow user-space to deal with older kernels.
113*4882a593Smuzhiyun  */
114*4882a593Smuzhiyun enum mlx5_user_inline_mode {
115*4882a593Smuzhiyun 	MLX5_USER_INLINE_MODE_NA,
116*4882a593Smuzhiyun 	MLX5_USER_INLINE_MODE_NONE,
117*4882a593Smuzhiyun 	MLX5_USER_INLINE_MODE_L2,
118*4882a593Smuzhiyun 	MLX5_USER_INLINE_MODE_IP,
119*4882a593Smuzhiyun 	MLX5_USER_INLINE_MODE_TCP_UDP,
120*4882a593Smuzhiyun };
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun enum {
123*4882a593Smuzhiyun 	MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM = 1 << 0,
124*4882a593Smuzhiyun 	MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA = 1 << 1,
125*4882a593Smuzhiyun 	MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING = 1 << 2,
126*4882a593Smuzhiyun 	MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD = 1 << 3,
127*4882a593Smuzhiyun 	MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN = 1 << 4,
128*4882a593Smuzhiyun };
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun struct mlx5_ib_alloc_ucontext_resp {
131*4882a593Smuzhiyun 	__u32	qp_tab_size;
132*4882a593Smuzhiyun 	__u32	bf_reg_size;
133*4882a593Smuzhiyun 	__u32	tot_bfregs;
134*4882a593Smuzhiyun 	__u32	cache_line_size;
135*4882a593Smuzhiyun 	__u16	max_sq_desc_sz;
136*4882a593Smuzhiyun 	__u16	max_rq_desc_sz;
137*4882a593Smuzhiyun 	__u32	max_send_wqebb;
138*4882a593Smuzhiyun 	__u32	max_recv_wr;
139*4882a593Smuzhiyun 	__u32	max_srq_recv_wr;
140*4882a593Smuzhiyun 	__u16	num_ports;
141*4882a593Smuzhiyun 	__u16	flow_action_flags;
142*4882a593Smuzhiyun 	__u32	comp_mask;
143*4882a593Smuzhiyun 	__u32	response_length;
144*4882a593Smuzhiyun 	__u8	cqe_version;
145*4882a593Smuzhiyun 	__u8	cmds_supp_uhw;
146*4882a593Smuzhiyun 	__u8	eth_min_inline;
147*4882a593Smuzhiyun 	__u8	clock_info_versions;
148*4882a593Smuzhiyun 	__aligned_u64 hca_core_clock_offset;
149*4882a593Smuzhiyun 	__u32	log_uar_size;
150*4882a593Smuzhiyun 	__u32	num_uars_per_page;
151*4882a593Smuzhiyun 	__u32	num_dyn_bfregs;
152*4882a593Smuzhiyun 	__u32	dump_fill_mkey;
153*4882a593Smuzhiyun };
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun struct mlx5_ib_alloc_pd_resp {
156*4882a593Smuzhiyun 	__u32	pdn;
157*4882a593Smuzhiyun };
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun struct mlx5_ib_tso_caps {
160*4882a593Smuzhiyun 	__u32 max_tso; /* Maximum tso payload size in bytes */
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	/* Corresponding bit will be set if qp type from
163*4882a593Smuzhiyun 	 * 'enum ib_qp_type' is supported, e.g.
164*4882a593Smuzhiyun 	 * supported_qpts |= 1 << IB_QPT_UD
165*4882a593Smuzhiyun 	 */
166*4882a593Smuzhiyun 	__u32 supported_qpts;
167*4882a593Smuzhiyun };
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun struct mlx5_ib_rss_caps {
170*4882a593Smuzhiyun 	__aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
171*4882a593Smuzhiyun 	__u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
172*4882a593Smuzhiyun 	__u8 reserved[7];
173*4882a593Smuzhiyun };
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun enum mlx5_ib_cqe_comp_res_format {
176*4882a593Smuzhiyun 	MLX5_IB_CQE_RES_FORMAT_HASH	= 1 << 0,
177*4882a593Smuzhiyun 	MLX5_IB_CQE_RES_FORMAT_CSUM	= 1 << 1,
178*4882a593Smuzhiyun 	MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX = 1 << 2,
179*4882a593Smuzhiyun };
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun struct mlx5_ib_cqe_comp_caps {
182*4882a593Smuzhiyun 	__u32 max_num;
183*4882a593Smuzhiyun 	__u32 supported_format; /* enum mlx5_ib_cqe_comp_res_format */
184*4882a593Smuzhiyun };
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun enum mlx5_ib_packet_pacing_cap_flags {
187*4882a593Smuzhiyun 	MLX5_IB_PP_SUPPORT_BURST	= 1 << 0,
188*4882a593Smuzhiyun };
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun struct mlx5_packet_pacing_caps {
191*4882a593Smuzhiyun 	__u32 qp_rate_limit_min;
192*4882a593Smuzhiyun 	__u32 qp_rate_limit_max; /* In kpbs */
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	/* Corresponding bit will be set if qp type from
195*4882a593Smuzhiyun 	 * 'enum ib_qp_type' is supported, e.g.
196*4882a593Smuzhiyun 	 * supported_qpts |= 1 << IB_QPT_RAW_PACKET
197*4882a593Smuzhiyun 	 */
198*4882a593Smuzhiyun 	__u32 supported_qpts;
199*4882a593Smuzhiyun 	__u8  cap_flags; /* enum mlx5_ib_packet_pacing_cap_flags */
200*4882a593Smuzhiyun 	__u8  reserved[3];
201*4882a593Smuzhiyun };
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun enum mlx5_ib_mpw_caps {
204*4882a593Smuzhiyun 	MPW_RESERVED		= 1 << 0,
205*4882a593Smuzhiyun 	MLX5_IB_ALLOW_MPW	= 1 << 1,
206*4882a593Smuzhiyun 	MLX5_IB_SUPPORT_EMPW	= 1 << 2,
207*4882a593Smuzhiyun };
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun enum mlx5_ib_sw_parsing_offloads {
210*4882a593Smuzhiyun 	MLX5_IB_SW_PARSING = 1 << 0,
211*4882a593Smuzhiyun 	MLX5_IB_SW_PARSING_CSUM = 1 << 1,
212*4882a593Smuzhiyun 	MLX5_IB_SW_PARSING_LSO = 1 << 2,
213*4882a593Smuzhiyun };
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun struct mlx5_ib_sw_parsing_caps {
216*4882a593Smuzhiyun 	__u32 sw_parsing_offloads; /* enum mlx5_ib_sw_parsing_offloads */
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	/* Corresponding bit will be set if qp type from
219*4882a593Smuzhiyun 	 * 'enum ib_qp_type' is supported, e.g.
220*4882a593Smuzhiyun 	 * supported_qpts |= 1 << IB_QPT_RAW_PACKET
221*4882a593Smuzhiyun 	 */
222*4882a593Smuzhiyun 	__u32 supported_qpts;
223*4882a593Smuzhiyun };
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun struct mlx5_ib_striding_rq_caps {
226*4882a593Smuzhiyun 	__u32 min_single_stride_log_num_of_bytes;
227*4882a593Smuzhiyun 	__u32 max_single_stride_log_num_of_bytes;
228*4882a593Smuzhiyun 	__u32 min_single_wqe_log_num_of_strides;
229*4882a593Smuzhiyun 	__u32 max_single_wqe_log_num_of_strides;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	/* Corresponding bit will be set if qp type from
232*4882a593Smuzhiyun 	 * 'enum ib_qp_type' is supported, e.g.
233*4882a593Smuzhiyun 	 * supported_qpts |= 1 << IB_QPT_RAW_PACKET
234*4882a593Smuzhiyun 	 */
235*4882a593Smuzhiyun 	__u32 supported_qpts;
236*4882a593Smuzhiyun 	__u32 reserved;
237*4882a593Smuzhiyun };
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun enum mlx5_ib_query_dev_resp_flags {
240*4882a593Smuzhiyun 	/* Support 128B CQE compression */
241*4882a593Smuzhiyun 	MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0,
242*4882a593Smuzhiyun 	MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD  = 1 << 1,
243*4882a593Smuzhiyun 	MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE = 1 << 2,
244*4882a593Smuzhiyun 	MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT = 1 << 3,
245*4882a593Smuzhiyun };
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun enum mlx5_ib_tunnel_offloads {
248*4882a593Smuzhiyun 	MLX5_IB_TUNNELED_OFFLOADS_VXLAN  = 1 << 0,
249*4882a593Smuzhiyun 	MLX5_IB_TUNNELED_OFFLOADS_GRE    = 1 << 1,
250*4882a593Smuzhiyun 	MLX5_IB_TUNNELED_OFFLOADS_GENEVE = 1 << 2,
251*4882a593Smuzhiyun 	MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE = 1 << 3,
252*4882a593Smuzhiyun 	MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP = 1 << 4,
253*4882a593Smuzhiyun };
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun struct mlx5_ib_query_device_resp {
256*4882a593Smuzhiyun 	__u32	comp_mask;
257*4882a593Smuzhiyun 	__u32	response_length;
258*4882a593Smuzhiyun 	struct	mlx5_ib_tso_caps tso_caps;
259*4882a593Smuzhiyun 	struct	mlx5_ib_rss_caps rss_caps;
260*4882a593Smuzhiyun 	struct	mlx5_ib_cqe_comp_caps cqe_comp_caps;
261*4882a593Smuzhiyun 	struct	mlx5_packet_pacing_caps packet_pacing_caps;
262*4882a593Smuzhiyun 	__u32	mlx5_ib_support_multi_pkt_send_wqes;
263*4882a593Smuzhiyun 	__u32	flags; /* Use enum mlx5_ib_query_dev_resp_flags */
264*4882a593Smuzhiyun 	struct mlx5_ib_sw_parsing_caps sw_parsing_caps;
265*4882a593Smuzhiyun 	struct mlx5_ib_striding_rq_caps striding_rq_caps;
266*4882a593Smuzhiyun 	__u32	tunnel_offloads_caps; /* enum mlx5_ib_tunnel_offloads */
267*4882a593Smuzhiyun 	__u32	reserved;
268*4882a593Smuzhiyun };
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun enum mlx5_ib_create_cq_flags {
271*4882a593Smuzhiyun 	MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD	= 1 << 0,
272*4882a593Smuzhiyun 	MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX  = 1 << 1,
273*4882a593Smuzhiyun };
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun struct mlx5_ib_create_cq {
276*4882a593Smuzhiyun 	__aligned_u64 buf_addr;
277*4882a593Smuzhiyun 	__aligned_u64 db_addr;
278*4882a593Smuzhiyun 	__u32	cqe_size;
279*4882a593Smuzhiyun 	__u8    cqe_comp_en;
280*4882a593Smuzhiyun 	__u8    cqe_comp_res_format;
281*4882a593Smuzhiyun 	__u16	flags;
282*4882a593Smuzhiyun 	__u16	uar_page_index;
283*4882a593Smuzhiyun 	__u16	reserved0;
284*4882a593Smuzhiyun 	__u32	reserved1;
285*4882a593Smuzhiyun };
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun struct mlx5_ib_create_cq_resp {
288*4882a593Smuzhiyun 	__u32	cqn;
289*4882a593Smuzhiyun 	__u32	reserved;
290*4882a593Smuzhiyun };
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun struct mlx5_ib_resize_cq {
293*4882a593Smuzhiyun 	__aligned_u64 buf_addr;
294*4882a593Smuzhiyun 	__u16	cqe_size;
295*4882a593Smuzhiyun 	__u16	reserved0;
296*4882a593Smuzhiyun 	__u32	reserved1;
297*4882a593Smuzhiyun };
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun struct mlx5_ib_create_srq {
300*4882a593Smuzhiyun 	__aligned_u64 buf_addr;
301*4882a593Smuzhiyun 	__aligned_u64 db_addr;
302*4882a593Smuzhiyun 	__u32	flags;
303*4882a593Smuzhiyun 	__u32	reserved0; /* explicit padding (optional on i386) */
304*4882a593Smuzhiyun 	__u32	uidx;
305*4882a593Smuzhiyun 	__u32	reserved1;
306*4882a593Smuzhiyun };
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun struct mlx5_ib_create_srq_resp {
309*4882a593Smuzhiyun 	__u32	srqn;
310*4882a593Smuzhiyun 	__u32	reserved;
311*4882a593Smuzhiyun };
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun struct mlx5_ib_create_qp {
314*4882a593Smuzhiyun 	__aligned_u64 buf_addr;
315*4882a593Smuzhiyun 	__aligned_u64 db_addr;
316*4882a593Smuzhiyun 	__u32	sq_wqe_count;
317*4882a593Smuzhiyun 	__u32	rq_wqe_count;
318*4882a593Smuzhiyun 	__u32	rq_wqe_shift;
319*4882a593Smuzhiyun 	__u32	flags;
320*4882a593Smuzhiyun 	__u32	uidx;
321*4882a593Smuzhiyun 	__u32	bfreg_index;
322*4882a593Smuzhiyun 	union {
323*4882a593Smuzhiyun 		__aligned_u64 sq_buf_addr;
324*4882a593Smuzhiyun 		__aligned_u64 access_key;
325*4882a593Smuzhiyun 	};
326*4882a593Smuzhiyun 	__u32  ece_options;
327*4882a593Smuzhiyun 	__u32  reserved;
328*4882a593Smuzhiyun };
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun /* RX Hash function flags */
331*4882a593Smuzhiyun enum mlx5_rx_hash_function_flags {
332*4882a593Smuzhiyun 	MLX5_RX_HASH_FUNC_TOEPLITZ	= 1 << 0,
333*4882a593Smuzhiyun };
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun /*
336*4882a593Smuzhiyun  * RX Hash flags, these flags allows to set which incoming packet's field should
337*4882a593Smuzhiyun  * participates in RX Hash. Each flag represent certain packet's field,
338*4882a593Smuzhiyun  * when the flag is set the field that is represented by the flag will
339*4882a593Smuzhiyun  * participate in RX Hash calculation.
340*4882a593Smuzhiyun  * Note: *IPV4 and *IPV6 flags can't be enabled together on the same QP
341*4882a593Smuzhiyun  * and *TCP and *UDP flags can't be enabled together on the same QP.
342*4882a593Smuzhiyun */
343*4882a593Smuzhiyun enum mlx5_rx_hash_fields {
344*4882a593Smuzhiyun 	MLX5_RX_HASH_SRC_IPV4	= 1 << 0,
345*4882a593Smuzhiyun 	MLX5_RX_HASH_DST_IPV4	= 1 << 1,
346*4882a593Smuzhiyun 	MLX5_RX_HASH_SRC_IPV6	= 1 << 2,
347*4882a593Smuzhiyun 	MLX5_RX_HASH_DST_IPV6	= 1 << 3,
348*4882a593Smuzhiyun 	MLX5_RX_HASH_SRC_PORT_TCP	= 1 << 4,
349*4882a593Smuzhiyun 	MLX5_RX_HASH_DST_PORT_TCP	= 1 << 5,
350*4882a593Smuzhiyun 	MLX5_RX_HASH_SRC_PORT_UDP	= 1 << 6,
351*4882a593Smuzhiyun 	MLX5_RX_HASH_DST_PORT_UDP	= 1 << 7,
352*4882a593Smuzhiyun 	MLX5_RX_HASH_IPSEC_SPI		= 1 << 8,
353*4882a593Smuzhiyun 	/* Save bits for future fields */
354*4882a593Smuzhiyun 	MLX5_RX_HASH_INNER		= (1UL << 31),
355*4882a593Smuzhiyun };
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun struct mlx5_ib_create_qp_rss {
358*4882a593Smuzhiyun 	__aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
359*4882a593Smuzhiyun 	__u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
360*4882a593Smuzhiyun 	__u8 rx_key_len; /* valid only for Toeplitz */
361*4882a593Smuzhiyun 	__u8 reserved[6];
362*4882a593Smuzhiyun 	__u8 rx_hash_key[128]; /* valid only for Toeplitz */
363*4882a593Smuzhiyun 	__u32   comp_mask;
364*4882a593Smuzhiyun 	__u32	flags;
365*4882a593Smuzhiyun };
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun enum mlx5_ib_create_qp_resp_mask {
368*4882a593Smuzhiyun 	MLX5_IB_CREATE_QP_RESP_MASK_TIRN = 1UL << 0,
369*4882a593Smuzhiyun 	MLX5_IB_CREATE_QP_RESP_MASK_TISN = 1UL << 1,
370*4882a593Smuzhiyun 	MLX5_IB_CREATE_QP_RESP_MASK_RQN  = 1UL << 2,
371*4882a593Smuzhiyun 	MLX5_IB_CREATE_QP_RESP_MASK_SQN  = 1UL << 3,
372*4882a593Smuzhiyun 	MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR  = 1UL << 4,
373*4882a593Smuzhiyun };
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun struct mlx5_ib_create_qp_resp {
376*4882a593Smuzhiyun 	__u32	bfreg_index;
377*4882a593Smuzhiyun 	__u32   ece_options;
378*4882a593Smuzhiyun 	__u32	comp_mask;
379*4882a593Smuzhiyun 	__u32	tirn;
380*4882a593Smuzhiyun 	__u32	tisn;
381*4882a593Smuzhiyun 	__u32	rqn;
382*4882a593Smuzhiyun 	__u32	sqn;
383*4882a593Smuzhiyun 	__u32   reserved1;
384*4882a593Smuzhiyun 	__u64	tir_icm_addr;
385*4882a593Smuzhiyun };
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun struct mlx5_ib_alloc_mw {
388*4882a593Smuzhiyun 	__u32	comp_mask;
389*4882a593Smuzhiyun 	__u8	num_klms;
390*4882a593Smuzhiyun 	__u8	reserved1;
391*4882a593Smuzhiyun 	__u16	reserved2;
392*4882a593Smuzhiyun };
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun enum mlx5_ib_create_wq_mask {
395*4882a593Smuzhiyun 	MLX5_IB_CREATE_WQ_STRIDING_RQ	= (1 << 0),
396*4882a593Smuzhiyun };
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun struct mlx5_ib_create_wq {
399*4882a593Smuzhiyun 	__aligned_u64 buf_addr;
400*4882a593Smuzhiyun 	__aligned_u64 db_addr;
401*4882a593Smuzhiyun 	__u32   rq_wqe_count;
402*4882a593Smuzhiyun 	__u32   rq_wqe_shift;
403*4882a593Smuzhiyun 	__u32   user_index;
404*4882a593Smuzhiyun 	__u32   flags;
405*4882a593Smuzhiyun 	__u32   comp_mask;
406*4882a593Smuzhiyun 	__u32	single_stride_log_num_of_bytes;
407*4882a593Smuzhiyun 	__u32	single_wqe_log_num_of_strides;
408*4882a593Smuzhiyun 	__u32	two_byte_shift_en;
409*4882a593Smuzhiyun };
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun struct mlx5_ib_create_ah_resp {
412*4882a593Smuzhiyun 	__u32	response_length;
413*4882a593Smuzhiyun 	__u8	dmac[ETH_ALEN];
414*4882a593Smuzhiyun 	__u8	reserved[6];
415*4882a593Smuzhiyun };
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun struct mlx5_ib_burst_info {
418*4882a593Smuzhiyun 	__u32       max_burst_sz;
419*4882a593Smuzhiyun 	__u16       typical_pkt_sz;
420*4882a593Smuzhiyun 	__u16       reserved;
421*4882a593Smuzhiyun };
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun struct mlx5_ib_modify_qp {
424*4882a593Smuzhiyun 	__u32			   comp_mask;
425*4882a593Smuzhiyun 	struct mlx5_ib_burst_info  burst_info;
426*4882a593Smuzhiyun 	__u32			   ece_options;
427*4882a593Smuzhiyun };
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun struct mlx5_ib_modify_qp_resp {
430*4882a593Smuzhiyun 	__u32	response_length;
431*4882a593Smuzhiyun 	__u32	dctn;
432*4882a593Smuzhiyun 	__u32   ece_options;
433*4882a593Smuzhiyun 	__u32   reserved;
434*4882a593Smuzhiyun };
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun struct mlx5_ib_create_wq_resp {
437*4882a593Smuzhiyun 	__u32	response_length;
438*4882a593Smuzhiyun 	__u32	reserved;
439*4882a593Smuzhiyun };
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun struct mlx5_ib_create_rwq_ind_tbl_resp {
442*4882a593Smuzhiyun 	__u32	response_length;
443*4882a593Smuzhiyun 	__u32	reserved;
444*4882a593Smuzhiyun };
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun struct mlx5_ib_modify_wq {
447*4882a593Smuzhiyun 	__u32	comp_mask;
448*4882a593Smuzhiyun 	__u32	reserved;
449*4882a593Smuzhiyun };
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun struct mlx5_ib_clock_info {
452*4882a593Smuzhiyun 	__u32 sign;
453*4882a593Smuzhiyun 	__u32 resv;
454*4882a593Smuzhiyun 	__aligned_u64 nsec;
455*4882a593Smuzhiyun 	__aligned_u64 cycles;
456*4882a593Smuzhiyun 	__aligned_u64 frac;
457*4882a593Smuzhiyun 	__u32 mult;
458*4882a593Smuzhiyun 	__u32 shift;
459*4882a593Smuzhiyun 	__aligned_u64 mask;
460*4882a593Smuzhiyun 	__aligned_u64 overflow_period;
461*4882a593Smuzhiyun };
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun enum mlx5_ib_mmap_cmd {
464*4882a593Smuzhiyun 	MLX5_IB_MMAP_REGULAR_PAGE               = 0,
465*4882a593Smuzhiyun 	MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES       = 1,
466*4882a593Smuzhiyun 	MLX5_IB_MMAP_WC_PAGE                    = 2,
467*4882a593Smuzhiyun 	MLX5_IB_MMAP_NC_PAGE                    = 3,
468*4882a593Smuzhiyun 	/* 5 is chosen in order to be compatible with old versions of libmlx5 */
469*4882a593Smuzhiyun 	MLX5_IB_MMAP_CORE_CLOCK                 = 5,
470*4882a593Smuzhiyun 	MLX5_IB_MMAP_ALLOC_WC                   = 6,
471*4882a593Smuzhiyun 	MLX5_IB_MMAP_CLOCK_INFO                 = 7,
472*4882a593Smuzhiyun 	MLX5_IB_MMAP_DEVICE_MEM                 = 8,
473*4882a593Smuzhiyun };
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun enum {
476*4882a593Smuzhiyun 	MLX5_IB_CLOCK_INFO_KERNEL_UPDATING = 1,
477*4882a593Smuzhiyun };
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun /* Bit indexes for the mlx5_alloc_ucontext_resp.clock_info_versions bitmap */
480*4882a593Smuzhiyun enum {
481*4882a593Smuzhiyun 	MLX5_IB_CLOCK_INFO_V1              = 0,
482*4882a593Smuzhiyun };
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun struct mlx5_ib_flow_counters_desc {
485*4882a593Smuzhiyun 	__u32	description;
486*4882a593Smuzhiyun 	__u32	index;
487*4882a593Smuzhiyun };
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun struct mlx5_ib_flow_counters_data {
490*4882a593Smuzhiyun 	RDMA_UAPI_PTR(struct mlx5_ib_flow_counters_desc *, counters_data);
491*4882a593Smuzhiyun 	__u32   ncounters;
492*4882a593Smuzhiyun 	__u32   reserved;
493*4882a593Smuzhiyun };
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun struct mlx5_ib_create_flow {
496*4882a593Smuzhiyun 	__u32   ncounters_data;
497*4882a593Smuzhiyun 	__u32   reserved;
498*4882a593Smuzhiyun 	/*
499*4882a593Smuzhiyun 	 * Following are counters data based on ncounters_data, each
500*4882a593Smuzhiyun 	 * entry in the data[] should match a corresponding counter object
501*4882a593Smuzhiyun 	 * that was pointed by a counters spec upon the flow creation
502*4882a593Smuzhiyun 	 */
503*4882a593Smuzhiyun 	struct mlx5_ib_flow_counters_data data[];
504*4882a593Smuzhiyun };
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun #endif /* MLX5_ABI_USER_H */
507