xref: /OK3568_Linux_fs/kernel/include/uapi/rdma/mlx4-abi.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
4*4882a593Smuzhiyun  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
7*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
8*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
9*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
10*4882a593Smuzhiyun  * OpenIB.org BSD license below:
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
13*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
14*4882a593Smuzhiyun  *     conditions are met:
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
17*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
18*4882a593Smuzhiyun  *        disclaimer.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
21*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
22*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
23*4882a593Smuzhiyun  *        provided with the distribution.
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32*4882a593Smuzhiyun  * SOFTWARE.
33*4882a593Smuzhiyun  */
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #ifndef MLX4_ABI_USER_H
36*4882a593Smuzhiyun #define MLX4_ABI_USER_H
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #include <linux/types.h>
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /*
41*4882a593Smuzhiyun  * Increment this value if any changes that break userspace ABI
42*4882a593Smuzhiyun  * compatibility are made.
43*4882a593Smuzhiyun  */
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION	3
46*4882a593Smuzhiyun #define MLX4_IB_UVERBS_ABI_VERSION		4
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /*
49*4882a593Smuzhiyun  * Make sure that all structs defined in this file remain laid out so
50*4882a593Smuzhiyun  * that they pack the same way on 32-bit and 64-bit architectures (to
51*4882a593Smuzhiyun  * avoid incompatibility between 32-bit userspace and 64-bit kernels).
52*4882a593Smuzhiyun  * In particular do not use pointer types -- pass pointers in __u64
53*4882a593Smuzhiyun  * instead.
54*4882a593Smuzhiyun  */
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun struct mlx4_ib_alloc_ucontext_resp_v3 {
57*4882a593Smuzhiyun 	__u32	qp_tab_size;
58*4882a593Smuzhiyun 	__u16	bf_reg_size;
59*4882a593Smuzhiyun 	__u16	bf_regs_per_page;
60*4882a593Smuzhiyun };
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun enum {
63*4882a593Smuzhiyun 	MLX4_USER_DEV_CAP_LARGE_CQE	= 1L << 0,
64*4882a593Smuzhiyun };
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun struct mlx4_ib_alloc_ucontext_resp {
67*4882a593Smuzhiyun 	__u32	dev_caps;
68*4882a593Smuzhiyun 	__u32	qp_tab_size;
69*4882a593Smuzhiyun 	__u16	bf_reg_size;
70*4882a593Smuzhiyun 	__u16	bf_regs_per_page;
71*4882a593Smuzhiyun 	__u32	cqe_size;
72*4882a593Smuzhiyun };
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun struct mlx4_ib_alloc_pd_resp {
75*4882a593Smuzhiyun 	__u32	pdn;
76*4882a593Smuzhiyun 	__u32	reserved;
77*4882a593Smuzhiyun };
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun struct mlx4_ib_create_cq {
80*4882a593Smuzhiyun 	__aligned_u64 buf_addr;
81*4882a593Smuzhiyun 	__aligned_u64 db_addr;
82*4882a593Smuzhiyun };
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun struct mlx4_ib_create_cq_resp {
85*4882a593Smuzhiyun 	__u32	cqn;
86*4882a593Smuzhiyun 	__u32	reserved;
87*4882a593Smuzhiyun };
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun struct mlx4_ib_resize_cq {
90*4882a593Smuzhiyun 	__aligned_u64 buf_addr;
91*4882a593Smuzhiyun };
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun struct mlx4_ib_create_srq {
94*4882a593Smuzhiyun 	__aligned_u64 buf_addr;
95*4882a593Smuzhiyun 	__aligned_u64 db_addr;
96*4882a593Smuzhiyun };
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun struct mlx4_ib_create_srq_resp {
99*4882a593Smuzhiyun 	__u32	srqn;
100*4882a593Smuzhiyun 	__u32	reserved;
101*4882a593Smuzhiyun };
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun struct mlx4_ib_create_qp_rss {
104*4882a593Smuzhiyun 	__aligned_u64 rx_hash_fields_mask; /* Use  enum mlx4_ib_rx_hash_fields */
105*4882a593Smuzhiyun 	__u8    rx_hash_function; /* Use enum mlx4_ib_rx_hash_function_flags */
106*4882a593Smuzhiyun 	__u8    reserved[7];
107*4882a593Smuzhiyun 	__u8    rx_hash_key[40];
108*4882a593Smuzhiyun 	__u32   comp_mask;
109*4882a593Smuzhiyun 	__u32   reserved1;
110*4882a593Smuzhiyun };
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun struct mlx4_ib_create_qp {
113*4882a593Smuzhiyun 	__aligned_u64 buf_addr;
114*4882a593Smuzhiyun 	__aligned_u64 db_addr;
115*4882a593Smuzhiyun 	__u8	log_sq_bb_count;
116*4882a593Smuzhiyun 	__u8	log_sq_stride;
117*4882a593Smuzhiyun 	__u8	sq_no_prefetch;
118*4882a593Smuzhiyun 	__u8	reserved;
119*4882a593Smuzhiyun 	__u32	inl_recv_sz;
120*4882a593Smuzhiyun };
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun struct mlx4_ib_create_wq {
123*4882a593Smuzhiyun 	__aligned_u64 buf_addr;
124*4882a593Smuzhiyun 	__aligned_u64 db_addr;
125*4882a593Smuzhiyun 	__u8	log_range_size;
126*4882a593Smuzhiyun 	__u8	reserved[3];
127*4882a593Smuzhiyun 	__u32   comp_mask;
128*4882a593Smuzhiyun };
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun struct mlx4_ib_modify_wq {
131*4882a593Smuzhiyun 	__u32	comp_mask;
132*4882a593Smuzhiyun 	__u32	reserved;
133*4882a593Smuzhiyun };
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun struct mlx4_ib_create_rwq_ind_tbl_resp {
136*4882a593Smuzhiyun 	__u32	response_length;
137*4882a593Smuzhiyun 	__u32	reserved;
138*4882a593Smuzhiyun };
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun /* RX Hash function flags */
141*4882a593Smuzhiyun enum mlx4_ib_rx_hash_function_flags {
142*4882a593Smuzhiyun 	MLX4_IB_RX_HASH_FUNC_TOEPLITZ	= 1 << 0,
143*4882a593Smuzhiyun };
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun /*
146*4882a593Smuzhiyun  * RX Hash flags, these flags allows to set which incoming packet's field should
147*4882a593Smuzhiyun  * participates in RX Hash. Each flag represent certain packet's field,
148*4882a593Smuzhiyun  * when the flag is set the field that is represented by the flag will
149*4882a593Smuzhiyun  * participate in RX Hash calculation.
150*4882a593Smuzhiyun  */
151*4882a593Smuzhiyun enum mlx4_ib_rx_hash_fields {
152*4882a593Smuzhiyun 	MLX4_IB_RX_HASH_SRC_IPV4	= 1 << 0,
153*4882a593Smuzhiyun 	MLX4_IB_RX_HASH_DST_IPV4	= 1 << 1,
154*4882a593Smuzhiyun 	MLX4_IB_RX_HASH_SRC_IPV6	= 1 << 2,
155*4882a593Smuzhiyun 	MLX4_IB_RX_HASH_DST_IPV6	= 1 << 3,
156*4882a593Smuzhiyun 	MLX4_IB_RX_HASH_SRC_PORT_TCP	= 1 << 4,
157*4882a593Smuzhiyun 	MLX4_IB_RX_HASH_DST_PORT_TCP	= 1 << 5,
158*4882a593Smuzhiyun 	MLX4_IB_RX_HASH_SRC_PORT_UDP	= 1 << 6,
159*4882a593Smuzhiyun 	MLX4_IB_RX_HASH_DST_PORT_UDP	= 1 << 7,
160*4882a593Smuzhiyun 	MLX4_IB_RX_HASH_INNER		= 1ULL << 31,
161*4882a593Smuzhiyun };
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun struct mlx4_ib_rss_caps {
164*4882a593Smuzhiyun 	__aligned_u64 rx_hash_fields_mask; /* enum mlx4_ib_rx_hash_fields */
165*4882a593Smuzhiyun 	__u8 rx_hash_function; /* enum mlx4_ib_rx_hash_function_flags */
166*4882a593Smuzhiyun 	__u8 reserved[7];
167*4882a593Smuzhiyun };
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun enum query_device_resp_mask {
170*4882a593Smuzhiyun 	MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
171*4882a593Smuzhiyun };
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun struct mlx4_ib_tso_caps {
174*4882a593Smuzhiyun 	__u32 max_tso; /* Maximum tso payload size in bytes */
175*4882a593Smuzhiyun 	/* Corresponding bit will be set if qp type from
176*4882a593Smuzhiyun 	 * 'enum ib_qp_type' is supported.
177*4882a593Smuzhiyun 	 */
178*4882a593Smuzhiyun 	__u32 supported_qpts;
179*4882a593Smuzhiyun };
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun struct mlx4_uverbs_ex_query_device_resp {
182*4882a593Smuzhiyun 	__u32			comp_mask;
183*4882a593Smuzhiyun 	__u32			response_length;
184*4882a593Smuzhiyun 	__aligned_u64		hca_core_clock_offset;
185*4882a593Smuzhiyun 	__u32			max_inl_recv_sz;
186*4882a593Smuzhiyun 	__u32			reserved;
187*4882a593Smuzhiyun 	struct mlx4_ib_rss_caps	rss_caps;
188*4882a593Smuzhiyun 	struct mlx4_ib_tso_caps tso_caps;
189*4882a593Smuzhiyun };
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun #endif /* MLX4_ABI_USER_H */
192