1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*******************************************************************************
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
5*4882a593Smuzhiyun * Copyright(c) 2013 - 2014 Intel Corporation.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Contact Information:
8*4882a593Smuzhiyun * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
9*4882a593Smuzhiyun * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun ******************************************************************************/
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #ifndef _VIRTCHNL_H_
14*4882a593Smuzhiyun #define _VIRTCHNL_H_
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun /* Description:
17*4882a593Smuzhiyun * This header file describes the VF-PF communication protocol used
18*4882a593Smuzhiyun * by the drivers for all devices starting from our 40G product line
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * Admin queue buffer usage:
21*4882a593Smuzhiyun * desc->opcode is always aqc_opc_send_msg_to_pf
22*4882a593Smuzhiyun * flags, retval, datalen, and data addr are all used normally.
23*4882a593Smuzhiyun * The Firmware copies the cookie fields when sending messages between the
24*4882a593Smuzhiyun * PF and VF, but uses all other fields internally. Due to this limitation,
25*4882a593Smuzhiyun * we must send all messages as "indirect", i.e. using an external buffer.
26*4882a593Smuzhiyun *
27*4882a593Smuzhiyun * All the VSI indexes are relative to the VF. Each VF can have maximum of
28*4882a593Smuzhiyun * three VSIs. All the queue indexes are relative to the VSI. Each VF can
29*4882a593Smuzhiyun * have a maximum of sixteen queues for all of its VSIs.
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun * The PF is required to return a status code in v_retval for all messages
32*4882a593Smuzhiyun * except RESET_VF, which does not require any response. The return value
33*4882a593Smuzhiyun * is of status_code type, defined in the shared type.h.
34*4882a593Smuzhiyun *
35*4882a593Smuzhiyun * In general, VF driver initialization should roughly follow the order of
36*4882a593Smuzhiyun * these opcodes. The VF driver must first validate the API version of the
37*4882a593Smuzhiyun * PF driver, then request a reset, then get resources, then configure
38*4882a593Smuzhiyun * queues and interrupts. After these operations are complete, the VF
39*4882a593Smuzhiyun * driver may start its queues, optionally add MAC and VLAN filters, and
40*4882a593Smuzhiyun * process traffic.
41*4882a593Smuzhiyun */
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /* START GENERIC DEFINES
44*4882a593Smuzhiyun * Need to ensure the following enums and defines hold the same meaning and
45*4882a593Smuzhiyun * value in current and future projects
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /* Error Codes */
49*4882a593Smuzhiyun enum virtchnl_status_code {
50*4882a593Smuzhiyun VIRTCHNL_STATUS_SUCCESS = 0,
51*4882a593Smuzhiyun VIRTCHNL_STATUS_ERR_PARAM = -5,
52*4882a593Smuzhiyun VIRTCHNL_STATUS_ERR_NO_MEMORY = -18,
53*4882a593Smuzhiyun VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
54*4882a593Smuzhiyun VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
55*4882a593Smuzhiyun VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
56*4882a593Smuzhiyun VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53,
57*4882a593Smuzhiyun VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64,
58*4882a593Smuzhiyun };
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /* Backward compatibility */
61*4882a593Smuzhiyun #define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
62*4882a593Smuzhiyun #define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun #define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0
65*4882a593Smuzhiyun #define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
66*4882a593Smuzhiyun #define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
67*4882a593Smuzhiyun #define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
68*4882a593Smuzhiyun #define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
69*4882a593Smuzhiyun #define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
70*4882a593Smuzhiyun #define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
71*4882a593Smuzhiyun #define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun enum virtchnl_link_speed {
74*4882a593Smuzhiyun VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
75*4882a593Smuzhiyun VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
76*4882a593Smuzhiyun VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
77*4882a593Smuzhiyun VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
78*4882a593Smuzhiyun VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
79*4882a593Smuzhiyun VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
80*4882a593Smuzhiyun VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
81*4882a593Smuzhiyun VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT),
82*4882a593Smuzhiyun VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT),
83*4882a593Smuzhiyun };
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun /* for hsplit_0 field of Rx HMC context */
86*4882a593Smuzhiyun /* deprecated with AVF 1.0 */
87*4882a593Smuzhiyun enum virtchnl_rx_hsplit {
88*4882a593Smuzhiyun VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0,
89*4882a593Smuzhiyun VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1,
90*4882a593Smuzhiyun VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2,
91*4882a593Smuzhiyun VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
92*4882a593Smuzhiyun VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
93*4882a593Smuzhiyun };
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /* END GENERIC DEFINES */
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /* Opcodes for VF-PF communication. These are placed in the v_opcode field
98*4882a593Smuzhiyun * of the virtchnl_msg structure.
99*4882a593Smuzhiyun */
100*4882a593Smuzhiyun enum virtchnl_ops {
101*4882a593Smuzhiyun /* The PF sends status change events to VFs using
102*4882a593Smuzhiyun * the VIRTCHNL_OP_EVENT opcode.
103*4882a593Smuzhiyun * VFs send requests to the PF using the other ops.
104*4882a593Smuzhiyun * Use of "advanced opcode" features must be negotiated as part of capabilities
105*4882a593Smuzhiyun * exchange and are not considered part of base mode feature set.
106*4882a593Smuzhiyun */
107*4882a593Smuzhiyun VIRTCHNL_OP_UNKNOWN = 0,
108*4882a593Smuzhiyun VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
109*4882a593Smuzhiyun VIRTCHNL_OP_RESET_VF = 2,
110*4882a593Smuzhiyun VIRTCHNL_OP_GET_VF_RESOURCES = 3,
111*4882a593Smuzhiyun VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
112*4882a593Smuzhiyun VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
113*4882a593Smuzhiyun VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
114*4882a593Smuzhiyun VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
115*4882a593Smuzhiyun VIRTCHNL_OP_ENABLE_QUEUES = 8,
116*4882a593Smuzhiyun VIRTCHNL_OP_DISABLE_QUEUES = 9,
117*4882a593Smuzhiyun VIRTCHNL_OP_ADD_ETH_ADDR = 10,
118*4882a593Smuzhiyun VIRTCHNL_OP_DEL_ETH_ADDR = 11,
119*4882a593Smuzhiyun VIRTCHNL_OP_ADD_VLAN = 12,
120*4882a593Smuzhiyun VIRTCHNL_OP_DEL_VLAN = 13,
121*4882a593Smuzhiyun VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
122*4882a593Smuzhiyun VIRTCHNL_OP_GET_STATS = 15,
123*4882a593Smuzhiyun VIRTCHNL_OP_RSVD = 16,
124*4882a593Smuzhiyun VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
125*4882a593Smuzhiyun VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
126*4882a593Smuzhiyun VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
127*4882a593Smuzhiyun VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
128*4882a593Smuzhiyun VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
129*4882a593Smuzhiyun VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
130*4882a593Smuzhiyun VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
131*4882a593Smuzhiyun VIRTCHNL_OP_SET_RSS_HENA = 26,
132*4882a593Smuzhiyun VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
133*4882a593Smuzhiyun VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
134*4882a593Smuzhiyun VIRTCHNL_OP_REQUEST_QUEUES = 29,
135*4882a593Smuzhiyun VIRTCHNL_OP_ENABLE_CHANNELS = 30,
136*4882a593Smuzhiyun VIRTCHNL_OP_DISABLE_CHANNELS = 31,
137*4882a593Smuzhiyun VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
138*4882a593Smuzhiyun VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
139*4882a593Smuzhiyun };
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /* These macros are used to generate compilation errors if a structure/union
142*4882a593Smuzhiyun * is not exactly the correct length. It gives a divide by zero error if the
143*4882a593Smuzhiyun * structure/union is not of the correct size, otherwise it creates an enum
144*4882a593Smuzhiyun * that is never used.
145*4882a593Smuzhiyun */
146*4882a593Smuzhiyun #define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
147*4882a593Smuzhiyun { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
148*4882a593Smuzhiyun #define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
149*4882a593Smuzhiyun { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /* Virtual channel message descriptor. This overlays the admin queue
152*4882a593Smuzhiyun * descriptor. All other data is passed in external buffers.
153*4882a593Smuzhiyun */
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun struct virtchnl_msg {
156*4882a593Smuzhiyun u8 pad[8]; /* AQ flags/opcode/len/retval fields */
157*4882a593Smuzhiyun enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
158*4882a593Smuzhiyun enum virtchnl_status_code v_retval; /* ditto for desc->retval */
159*4882a593Smuzhiyun u32 vfid; /* used by PF when sending to VF */
160*4882a593Smuzhiyun };
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /* Message descriptions and data structures. */
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /* VIRTCHNL_OP_VERSION
167*4882a593Smuzhiyun * VF posts its version number to the PF. PF responds with its version number
168*4882a593Smuzhiyun * in the same format, along with a return code.
169*4882a593Smuzhiyun * Reply from PF has its major/minor versions also in param0 and param1.
170*4882a593Smuzhiyun * If there is a major version mismatch, then the VF cannot operate.
171*4882a593Smuzhiyun * If there is a minor version mismatch, then the VF can operate but should
172*4882a593Smuzhiyun * add a warning to the system log.
173*4882a593Smuzhiyun *
174*4882a593Smuzhiyun * This enum element MUST always be specified as == 1, regardless of other
175*4882a593Smuzhiyun * changes in the API. The PF must always respond to this message without
176*4882a593Smuzhiyun * error regardless of version mismatch.
177*4882a593Smuzhiyun */
178*4882a593Smuzhiyun #define VIRTCHNL_VERSION_MAJOR 1
179*4882a593Smuzhiyun #define VIRTCHNL_VERSION_MINOR 1
180*4882a593Smuzhiyun #define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun struct virtchnl_version_info {
183*4882a593Smuzhiyun u32 major;
184*4882a593Smuzhiyun u32 minor;
185*4882a593Smuzhiyun };
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun #define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
190*4882a593Smuzhiyun #define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /* VIRTCHNL_OP_RESET_VF
193*4882a593Smuzhiyun * VF sends this request to PF with no parameters
194*4882a593Smuzhiyun * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
195*4882a593Smuzhiyun * until reset completion is indicated. The admin queue must be reinitialized
196*4882a593Smuzhiyun * after this operation.
197*4882a593Smuzhiyun *
198*4882a593Smuzhiyun * When reset is complete, PF must ensure that all queues in all VSIs associated
199*4882a593Smuzhiyun * with the VF are stopped, all queue configurations in the HMC are set to 0,
200*4882a593Smuzhiyun * and all MAC and VLAN filters (except the default MAC address) on all VSIs
201*4882a593Smuzhiyun * are cleared.
202*4882a593Smuzhiyun */
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun /* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
205*4882a593Smuzhiyun * vsi_type should always be 6 for backward compatibility. Add other fields
206*4882a593Smuzhiyun * as needed.
207*4882a593Smuzhiyun */
208*4882a593Smuzhiyun enum virtchnl_vsi_type {
209*4882a593Smuzhiyun VIRTCHNL_VSI_TYPE_INVALID = 0,
210*4882a593Smuzhiyun VIRTCHNL_VSI_SRIOV = 6,
211*4882a593Smuzhiyun };
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /* VIRTCHNL_OP_GET_VF_RESOURCES
214*4882a593Smuzhiyun * Version 1.0 VF sends this request to PF with no parameters
215*4882a593Smuzhiyun * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
216*4882a593Smuzhiyun * PF responds with an indirect message containing
217*4882a593Smuzhiyun * virtchnl_vf_resource and one or more
218*4882a593Smuzhiyun * virtchnl_vsi_resource structures.
219*4882a593Smuzhiyun */
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun struct virtchnl_vsi_resource {
222*4882a593Smuzhiyun u16 vsi_id;
223*4882a593Smuzhiyun u16 num_queue_pairs;
224*4882a593Smuzhiyun enum virtchnl_vsi_type vsi_type;
225*4882a593Smuzhiyun u16 qset_handle;
226*4882a593Smuzhiyun u8 default_mac_addr[ETH_ALEN];
227*4882a593Smuzhiyun };
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun /* VF capability flags
232*4882a593Smuzhiyun * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
233*4882a593Smuzhiyun * TX/RX Checksum offloading and TSO for non-tunnelled packets.
234*4882a593Smuzhiyun */
235*4882a593Smuzhiyun #define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
236*4882a593Smuzhiyun #define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
237*4882a593Smuzhiyun #define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
238*4882a593Smuzhiyun #define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
239*4882a593Smuzhiyun #define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
240*4882a593Smuzhiyun #define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
241*4882a593Smuzhiyun #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
242*4882a593Smuzhiyun #define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
243*4882a593Smuzhiyun #define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
244*4882a593Smuzhiyun #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
245*4882a593Smuzhiyun #define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
246*4882a593Smuzhiyun #define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
247*4882a593Smuzhiyun #define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
248*4882a593Smuzhiyun #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
249*4882a593Smuzhiyun #define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /* Define below the capability flags that are not offloads */
252*4882a593Smuzhiyun #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080
253*4882a593Smuzhiyun #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
254*4882a593Smuzhiyun VIRTCHNL_VF_OFFLOAD_VLAN | \
255*4882a593Smuzhiyun VIRTCHNL_VF_OFFLOAD_RSS_PF)
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun struct virtchnl_vf_resource {
258*4882a593Smuzhiyun u16 num_vsis;
259*4882a593Smuzhiyun u16 num_queue_pairs;
260*4882a593Smuzhiyun u16 max_vectors;
261*4882a593Smuzhiyun u16 max_mtu;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun u32 vf_cap_flags;
264*4882a593Smuzhiyun u32 rss_key_size;
265*4882a593Smuzhiyun u32 rss_lut_size;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun struct virtchnl_vsi_resource vsi_res[1];
268*4882a593Smuzhiyun };
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun /* VIRTCHNL_OP_CONFIG_TX_QUEUE
273*4882a593Smuzhiyun * VF sends this message to set up parameters for one TX queue.
274*4882a593Smuzhiyun * External data buffer contains one instance of virtchnl_txq_info.
275*4882a593Smuzhiyun * PF configures requested queue and returns a status code.
276*4882a593Smuzhiyun */
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /* Tx queue config info */
279*4882a593Smuzhiyun struct virtchnl_txq_info {
280*4882a593Smuzhiyun u16 vsi_id;
281*4882a593Smuzhiyun u16 queue_id;
282*4882a593Smuzhiyun u16 ring_len; /* number of descriptors, multiple of 8 */
283*4882a593Smuzhiyun u16 headwb_enabled; /* deprecated with AVF 1.0 */
284*4882a593Smuzhiyun u64 dma_ring_addr;
285*4882a593Smuzhiyun u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
286*4882a593Smuzhiyun };
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun /* VIRTCHNL_OP_CONFIG_RX_QUEUE
291*4882a593Smuzhiyun * VF sends this message to set up parameters for one RX queue.
292*4882a593Smuzhiyun * External data buffer contains one instance of virtchnl_rxq_info.
293*4882a593Smuzhiyun * PF configures requested queue and returns a status code.
294*4882a593Smuzhiyun */
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /* Rx queue config info */
297*4882a593Smuzhiyun struct virtchnl_rxq_info {
298*4882a593Smuzhiyun u16 vsi_id;
299*4882a593Smuzhiyun u16 queue_id;
300*4882a593Smuzhiyun u32 ring_len; /* number of descriptors, multiple of 32 */
301*4882a593Smuzhiyun u16 hdr_size;
302*4882a593Smuzhiyun u16 splithdr_enabled; /* deprecated with AVF 1.0 */
303*4882a593Smuzhiyun u32 databuffer_size;
304*4882a593Smuzhiyun u32 max_pkt_size;
305*4882a593Smuzhiyun u32 pad1;
306*4882a593Smuzhiyun u64 dma_ring_addr;
307*4882a593Smuzhiyun enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
308*4882a593Smuzhiyun u32 pad2;
309*4882a593Smuzhiyun };
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun /* VIRTCHNL_OP_CONFIG_VSI_QUEUES
314*4882a593Smuzhiyun * VF sends this message to set parameters for all active TX and RX queues
315*4882a593Smuzhiyun * associated with the specified VSI.
316*4882a593Smuzhiyun * PF configures queues and returns status.
317*4882a593Smuzhiyun * If the number of queues specified is greater than the number of queues
318*4882a593Smuzhiyun * associated with the VSI, an error is returned and no queues are configured.
319*4882a593Smuzhiyun */
320*4882a593Smuzhiyun struct virtchnl_queue_pair_info {
321*4882a593Smuzhiyun /* NOTE: vsi_id and queue_id should be identical for both queues. */
322*4882a593Smuzhiyun struct virtchnl_txq_info txq;
323*4882a593Smuzhiyun struct virtchnl_rxq_info rxq;
324*4882a593Smuzhiyun };
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun struct virtchnl_vsi_queue_config_info {
329*4882a593Smuzhiyun u16 vsi_id;
330*4882a593Smuzhiyun u16 num_queue_pairs;
331*4882a593Smuzhiyun u32 pad;
332*4882a593Smuzhiyun struct virtchnl_queue_pair_info qpair[1];
333*4882a593Smuzhiyun };
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun /* VIRTCHNL_OP_REQUEST_QUEUES
338*4882a593Smuzhiyun * VF sends this message to request the PF to allocate additional queues to
339*4882a593Smuzhiyun * this VF. Each VF gets a guaranteed number of queues on init but asking for
340*4882a593Smuzhiyun * additional queues must be negotiated. This is a best effort request as it
341*4882a593Smuzhiyun * is possible the PF does not have enough queues left to support the request.
342*4882a593Smuzhiyun * If the PF cannot support the number requested it will respond with the
343*4882a593Smuzhiyun * maximum number it is able to support. If the request is successful, PF will
344*4882a593Smuzhiyun * then reset the VF to institute required changes.
345*4882a593Smuzhiyun */
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun /* VF resource request */
348*4882a593Smuzhiyun struct virtchnl_vf_res_request {
349*4882a593Smuzhiyun u16 num_queue_pairs;
350*4882a593Smuzhiyun };
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun /* VIRTCHNL_OP_CONFIG_IRQ_MAP
353*4882a593Smuzhiyun * VF uses this message to map vectors to queues.
354*4882a593Smuzhiyun * The rxq_map and txq_map fields are bitmaps used to indicate which queues
355*4882a593Smuzhiyun * are to be associated with the specified vector.
356*4882a593Smuzhiyun * The "other" causes are always mapped to vector 0.
357*4882a593Smuzhiyun * PF configures interrupt mapping and returns status.
358*4882a593Smuzhiyun */
359*4882a593Smuzhiyun struct virtchnl_vector_map {
360*4882a593Smuzhiyun u16 vsi_id;
361*4882a593Smuzhiyun u16 vector_id;
362*4882a593Smuzhiyun u16 rxq_map;
363*4882a593Smuzhiyun u16 txq_map;
364*4882a593Smuzhiyun u16 rxitr_idx;
365*4882a593Smuzhiyun u16 txitr_idx;
366*4882a593Smuzhiyun };
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun struct virtchnl_irq_map_info {
371*4882a593Smuzhiyun u16 num_vectors;
372*4882a593Smuzhiyun struct virtchnl_vector_map vecmap[1];
373*4882a593Smuzhiyun };
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun /* VIRTCHNL_OP_ENABLE_QUEUES
378*4882a593Smuzhiyun * VIRTCHNL_OP_DISABLE_QUEUES
379*4882a593Smuzhiyun * VF sends these message to enable or disable TX/RX queue pairs.
380*4882a593Smuzhiyun * The queues fields are bitmaps indicating which queues to act upon.
381*4882a593Smuzhiyun * (Currently, we only support 16 queues per VF, but we make the field
382*4882a593Smuzhiyun * u32 to allow for expansion.)
383*4882a593Smuzhiyun * PF performs requested action and returns status.
384*4882a593Smuzhiyun */
385*4882a593Smuzhiyun struct virtchnl_queue_select {
386*4882a593Smuzhiyun u16 vsi_id;
387*4882a593Smuzhiyun u16 pad;
388*4882a593Smuzhiyun u32 rx_queues;
389*4882a593Smuzhiyun u32 tx_queues;
390*4882a593Smuzhiyun };
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun /* VIRTCHNL_OP_ADD_ETH_ADDR
395*4882a593Smuzhiyun * VF sends this message in order to add one or more unicast or multicast
396*4882a593Smuzhiyun * address filters for the specified VSI.
397*4882a593Smuzhiyun * PF adds the filters and returns status.
398*4882a593Smuzhiyun */
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /* VIRTCHNL_OP_DEL_ETH_ADDR
401*4882a593Smuzhiyun * VF sends this message in order to remove one or more unicast or multicast
402*4882a593Smuzhiyun * filters for the specified VSI.
403*4882a593Smuzhiyun * PF removes the filters and returns status.
404*4882a593Smuzhiyun */
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun struct virtchnl_ether_addr {
407*4882a593Smuzhiyun u8 addr[ETH_ALEN];
408*4882a593Smuzhiyun u8 pad[2];
409*4882a593Smuzhiyun };
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun struct virtchnl_ether_addr_list {
414*4882a593Smuzhiyun u16 vsi_id;
415*4882a593Smuzhiyun u16 num_elements;
416*4882a593Smuzhiyun struct virtchnl_ether_addr list[1];
417*4882a593Smuzhiyun };
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /* VIRTCHNL_OP_ADD_VLAN
422*4882a593Smuzhiyun * VF sends this message to add one or more VLAN tag filters for receives.
423*4882a593Smuzhiyun * PF adds the filters and returns status.
424*4882a593Smuzhiyun * If a port VLAN is configured by the PF, this operation will return an
425*4882a593Smuzhiyun * error to the VF.
426*4882a593Smuzhiyun */
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun /* VIRTCHNL_OP_DEL_VLAN
429*4882a593Smuzhiyun * VF sends this message to remove one or more VLAN tag filters for receives.
430*4882a593Smuzhiyun * PF removes the filters and returns status.
431*4882a593Smuzhiyun * If a port VLAN is configured by the PF, this operation will return an
432*4882a593Smuzhiyun * error to the VF.
433*4882a593Smuzhiyun */
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun struct virtchnl_vlan_filter_list {
436*4882a593Smuzhiyun u16 vsi_id;
437*4882a593Smuzhiyun u16 num_elements;
438*4882a593Smuzhiyun u16 vlan_id[1];
439*4882a593Smuzhiyun };
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun /* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
444*4882a593Smuzhiyun * VF sends VSI id and flags.
445*4882a593Smuzhiyun * PF returns status code in retval.
446*4882a593Smuzhiyun * Note: we assume that broadcast accept mode is always enabled.
447*4882a593Smuzhiyun */
448*4882a593Smuzhiyun struct virtchnl_promisc_info {
449*4882a593Smuzhiyun u16 vsi_id;
450*4882a593Smuzhiyun u16 flags;
451*4882a593Smuzhiyun };
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun #define FLAG_VF_UNICAST_PROMISC 0x00000001
456*4882a593Smuzhiyun #define FLAG_VF_MULTICAST_PROMISC 0x00000002
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun /* VIRTCHNL_OP_GET_STATS
459*4882a593Smuzhiyun * VF sends this message to request stats for the selected VSI. VF uses
460*4882a593Smuzhiyun * the virtchnl_queue_select struct to specify the VSI. The queue_id
461*4882a593Smuzhiyun * field is ignored by the PF.
462*4882a593Smuzhiyun *
463*4882a593Smuzhiyun * PF replies with struct eth_stats in an external buffer.
464*4882a593Smuzhiyun */
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun /* VIRTCHNL_OP_CONFIG_RSS_KEY
467*4882a593Smuzhiyun * VIRTCHNL_OP_CONFIG_RSS_LUT
468*4882a593Smuzhiyun * VF sends these messages to configure RSS. Only supported if both PF
469*4882a593Smuzhiyun * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
470*4882a593Smuzhiyun * configuration negotiation. If this is the case, then the RSS fields in
471*4882a593Smuzhiyun * the VF resource struct are valid.
472*4882a593Smuzhiyun * Both the key and LUT are initialized to 0 by the PF, meaning that
473*4882a593Smuzhiyun * RSS is effectively disabled until set up by the VF.
474*4882a593Smuzhiyun */
475*4882a593Smuzhiyun struct virtchnl_rss_key {
476*4882a593Smuzhiyun u16 vsi_id;
477*4882a593Smuzhiyun u16 key_len;
478*4882a593Smuzhiyun u8 key[1]; /* RSS hash key, packed bytes */
479*4882a593Smuzhiyun };
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun struct virtchnl_rss_lut {
484*4882a593Smuzhiyun u16 vsi_id;
485*4882a593Smuzhiyun u16 lut_entries;
486*4882a593Smuzhiyun u8 lut[1]; /* RSS lookup table */
487*4882a593Smuzhiyun };
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun /* VIRTCHNL_OP_GET_RSS_HENA_CAPS
492*4882a593Smuzhiyun * VIRTCHNL_OP_SET_RSS_HENA
493*4882a593Smuzhiyun * VF sends these messages to get and set the hash filter enable bits for RSS.
494*4882a593Smuzhiyun * By default, the PF sets these to all possible traffic types that the
495*4882a593Smuzhiyun * hardware supports. The VF can query this value if it wants to change the
496*4882a593Smuzhiyun * traffic types that are hashed by the hardware.
497*4882a593Smuzhiyun */
498*4882a593Smuzhiyun struct virtchnl_rss_hena {
499*4882a593Smuzhiyun u64 hena;
500*4882a593Smuzhiyun };
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun /* VIRTCHNL_OP_ENABLE_CHANNELS
505*4882a593Smuzhiyun * VIRTCHNL_OP_DISABLE_CHANNELS
506*4882a593Smuzhiyun * VF sends these messages to enable or disable channels based on
507*4882a593Smuzhiyun * the user specified queue count and queue offset for each traffic class.
508*4882a593Smuzhiyun * This struct encompasses all the information that the PF needs from
509*4882a593Smuzhiyun * VF to create a channel.
510*4882a593Smuzhiyun */
511*4882a593Smuzhiyun struct virtchnl_channel_info {
512*4882a593Smuzhiyun u16 count; /* number of queues in a channel */
513*4882a593Smuzhiyun u16 offset; /* queues in a channel start from 'offset' */
514*4882a593Smuzhiyun u32 pad;
515*4882a593Smuzhiyun u64 max_tx_rate;
516*4882a593Smuzhiyun };
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun struct virtchnl_tc_info {
521*4882a593Smuzhiyun u32 num_tc;
522*4882a593Smuzhiyun u32 pad;
523*4882a593Smuzhiyun struct virtchnl_channel_info list[1];
524*4882a593Smuzhiyun };
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info);
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun /* VIRTCHNL_ADD_CLOUD_FILTER
529*4882a593Smuzhiyun * VIRTCHNL_DEL_CLOUD_FILTER
530*4882a593Smuzhiyun * VF sends these messages to add or delete a cloud filter based on the
531*4882a593Smuzhiyun * user specified match and action filters. These structures encompass
532*4882a593Smuzhiyun * all the information that the PF needs from the VF to add/delete a
533*4882a593Smuzhiyun * cloud filter.
534*4882a593Smuzhiyun */
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun struct virtchnl_l4_spec {
537*4882a593Smuzhiyun u8 src_mac[ETH_ALEN];
538*4882a593Smuzhiyun u8 dst_mac[ETH_ALEN];
539*4882a593Smuzhiyun __be16 vlan_id;
540*4882a593Smuzhiyun __be16 pad; /* reserved for future use */
541*4882a593Smuzhiyun __be32 src_ip[4];
542*4882a593Smuzhiyun __be32 dst_ip[4];
543*4882a593Smuzhiyun __be16 src_port;
544*4882a593Smuzhiyun __be16 dst_port;
545*4882a593Smuzhiyun };
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec);
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun union virtchnl_flow_spec {
550*4882a593Smuzhiyun struct virtchnl_l4_spec tcp_spec;
551*4882a593Smuzhiyun u8 buffer[128]; /* reserved for future use */
552*4882a593Smuzhiyun };
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun enum virtchnl_action {
557*4882a593Smuzhiyun /* action types */
558*4882a593Smuzhiyun VIRTCHNL_ACTION_DROP = 0,
559*4882a593Smuzhiyun VIRTCHNL_ACTION_TC_REDIRECT,
560*4882a593Smuzhiyun };
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun enum virtchnl_flow_type {
563*4882a593Smuzhiyun /* flow types */
564*4882a593Smuzhiyun VIRTCHNL_TCP_V4_FLOW = 0,
565*4882a593Smuzhiyun VIRTCHNL_TCP_V6_FLOW,
566*4882a593Smuzhiyun };
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun struct virtchnl_filter {
569*4882a593Smuzhiyun union virtchnl_flow_spec data;
570*4882a593Smuzhiyun union virtchnl_flow_spec mask;
571*4882a593Smuzhiyun enum virtchnl_flow_type flow_type;
572*4882a593Smuzhiyun enum virtchnl_action action;
573*4882a593Smuzhiyun u32 action_meta;
574*4882a593Smuzhiyun u8 field_flags;
575*4882a593Smuzhiyun u8 pad[3];
576*4882a593Smuzhiyun };
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun /* VIRTCHNL_OP_EVENT
581*4882a593Smuzhiyun * PF sends this message to inform the VF driver of events that may affect it.
582*4882a593Smuzhiyun * No direct response is expected from the VF, though it may generate other
583*4882a593Smuzhiyun * messages in response to this one.
584*4882a593Smuzhiyun */
585*4882a593Smuzhiyun enum virtchnl_event_codes {
586*4882a593Smuzhiyun VIRTCHNL_EVENT_UNKNOWN = 0,
587*4882a593Smuzhiyun VIRTCHNL_EVENT_LINK_CHANGE,
588*4882a593Smuzhiyun VIRTCHNL_EVENT_RESET_IMPENDING,
589*4882a593Smuzhiyun VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
590*4882a593Smuzhiyun };
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun #define PF_EVENT_SEVERITY_INFO 0
593*4882a593Smuzhiyun #define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun struct virtchnl_pf_event {
596*4882a593Smuzhiyun enum virtchnl_event_codes event;
597*4882a593Smuzhiyun union {
598*4882a593Smuzhiyun /* If the PF driver does not support the new speed reporting
599*4882a593Smuzhiyun * capabilities then use link_event else use link_event_adv to
600*4882a593Smuzhiyun * get the speed and link information. The ability to understand
601*4882a593Smuzhiyun * new speeds is indicated by setting the capability flag
602*4882a593Smuzhiyun * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
603*4882a593Smuzhiyun * in virtchnl_vf_resource struct and can be used to determine
604*4882a593Smuzhiyun * which link event struct to use below.
605*4882a593Smuzhiyun */
606*4882a593Smuzhiyun struct {
607*4882a593Smuzhiyun enum virtchnl_link_speed link_speed;
608*4882a593Smuzhiyun bool link_status;
609*4882a593Smuzhiyun } link_event;
610*4882a593Smuzhiyun struct {
611*4882a593Smuzhiyun /* link_speed provided in Mbps */
612*4882a593Smuzhiyun u32 link_speed;
613*4882a593Smuzhiyun u8 link_status;
614*4882a593Smuzhiyun u8 pad[3];
615*4882a593Smuzhiyun } link_event_adv;
616*4882a593Smuzhiyun } event_data;
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun int severity;
619*4882a593Smuzhiyun };
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun /* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
624*4882a593Smuzhiyun * VF uses this message to request PF to map IWARP vectors to IWARP queues.
625*4882a593Smuzhiyun * The request for this originates from the VF IWARP driver through
626*4882a593Smuzhiyun * a client interface between VF LAN and VF IWARP driver.
627*4882a593Smuzhiyun * A vector could have an AEQ and CEQ attached to it although
628*4882a593Smuzhiyun * there is a single AEQ per VF IWARP instance in which case
629*4882a593Smuzhiyun * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
630*4882a593Smuzhiyun * There will never be a case where there will be multiple CEQs attached
631*4882a593Smuzhiyun * to a single vector.
632*4882a593Smuzhiyun * PF configures interrupt mapping and returns status.
633*4882a593Smuzhiyun */
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun struct virtchnl_iwarp_qv_info {
636*4882a593Smuzhiyun u32 v_idx; /* msix_vector */
637*4882a593Smuzhiyun u16 ceq_idx;
638*4882a593Smuzhiyun u16 aeq_idx;
639*4882a593Smuzhiyun u8 itr_idx;
640*4882a593Smuzhiyun u8 pad[3];
641*4882a593Smuzhiyun };
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info);
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun struct virtchnl_iwarp_qvlist_info {
646*4882a593Smuzhiyun u32 num_vectors;
647*4882a593Smuzhiyun struct virtchnl_iwarp_qv_info qv_info[1];
648*4882a593Smuzhiyun };
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info);
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun /* VF reset states - these are written into the RSTAT register:
653*4882a593Smuzhiyun * VFGEN_RSTAT on the VF
654*4882a593Smuzhiyun * When the PF initiates a reset, it writes 0
655*4882a593Smuzhiyun * When the reset is complete, it writes 1
656*4882a593Smuzhiyun * When the PF detects that the VF has recovered, it writes 2
657*4882a593Smuzhiyun * VF checks this register periodically to determine if a reset has occurred,
658*4882a593Smuzhiyun * then polls it to know when the reset is complete.
659*4882a593Smuzhiyun * If either the PF or VF reads the register while the hardware
660*4882a593Smuzhiyun * is in a reset state, it will return DEADBEEF, which, when masked
661*4882a593Smuzhiyun * will result in 3.
662*4882a593Smuzhiyun */
663*4882a593Smuzhiyun enum virtchnl_vfr_states {
664*4882a593Smuzhiyun VIRTCHNL_VFR_INPROGRESS = 0,
665*4882a593Smuzhiyun VIRTCHNL_VFR_COMPLETED,
666*4882a593Smuzhiyun VIRTCHNL_VFR_VFACTIVE,
667*4882a593Smuzhiyun };
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun /**
670*4882a593Smuzhiyun * virtchnl_vc_validate_vf_msg
671*4882a593Smuzhiyun * @ver: Virtchnl version info
672*4882a593Smuzhiyun * @v_opcode: Opcode for the message
673*4882a593Smuzhiyun * @msg: pointer to the msg buffer
674*4882a593Smuzhiyun * @msglen: msg length
675*4882a593Smuzhiyun *
676*4882a593Smuzhiyun * validate msg format against struct for each opcode
677*4882a593Smuzhiyun */
678*4882a593Smuzhiyun static inline int
virtchnl_vc_validate_vf_msg(struct virtchnl_version_info * ver,u32 v_opcode,u8 * msg,u16 msglen)679*4882a593Smuzhiyun virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
680*4882a593Smuzhiyun u8 *msg, u16 msglen)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun bool err_msg_format = false;
683*4882a593Smuzhiyun int valid_len = 0;
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun /* Validate message length. */
686*4882a593Smuzhiyun switch (v_opcode) {
687*4882a593Smuzhiyun case VIRTCHNL_OP_VERSION:
688*4882a593Smuzhiyun valid_len = sizeof(struct virtchnl_version_info);
689*4882a593Smuzhiyun break;
690*4882a593Smuzhiyun case VIRTCHNL_OP_RESET_VF:
691*4882a593Smuzhiyun break;
692*4882a593Smuzhiyun case VIRTCHNL_OP_GET_VF_RESOURCES:
693*4882a593Smuzhiyun if (VF_IS_V11(ver))
694*4882a593Smuzhiyun valid_len = sizeof(u32);
695*4882a593Smuzhiyun break;
696*4882a593Smuzhiyun case VIRTCHNL_OP_CONFIG_TX_QUEUE:
697*4882a593Smuzhiyun valid_len = sizeof(struct virtchnl_txq_info);
698*4882a593Smuzhiyun break;
699*4882a593Smuzhiyun case VIRTCHNL_OP_CONFIG_RX_QUEUE:
700*4882a593Smuzhiyun valid_len = sizeof(struct virtchnl_rxq_info);
701*4882a593Smuzhiyun break;
702*4882a593Smuzhiyun case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
703*4882a593Smuzhiyun valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
704*4882a593Smuzhiyun if (msglen >= valid_len) {
705*4882a593Smuzhiyun struct virtchnl_vsi_queue_config_info *vqc =
706*4882a593Smuzhiyun (struct virtchnl_vsi_queue_config_info *)msg;
707*4882a593Smuzhiyun valid_len += (vqc->num_queue_pairs *
708*4882a593Smuzhiyun sizeof(struct
709*4882a593Smuzhiyun virtchnl_queue_pair_info));
710*4882a593Smuzhiyun if (vqc->num_queue_pairs == 0)
711*4882a593Smuzhiyun err_msg_format = true;
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun break;
714*4882a593Smuzhiyun case VIRTCHNL_OP_CONFIG_IRQ_MAP:
715*4882a593Smuzhiyun valid_len = sizeof(struct virtchnl_irq_map_info);
716*4882a593Smuzhiyun if (msglen >= valid_len) {
717*4882a593Smuzhiyun struct virtchnl_irq_map_info *vimi =
718*4882a593Smuzhiyun (struct virtchnl_irq_map_info *)msg;
719*4882a593Smuzhiyun valid_len += (vimi->num_vectors *
720*4882a593Smuzhiyun sizeof(struct virtchnl_vector_map));
721*4882a593Smuzhiyun if (vimi->num_vectors == 0)
722*4882a593Smuzhiyun err_msg_format = true;
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun break;
725*4882a593Smuzhiyun case VIRTCHNL_OP_ENABLE_QUEUES:
726*4882a593Smuzhiyun case VIRTCHNL_OP_DISABLE_QUEUES:
727*4882a593Smuzhiyun valid_len = sizeof(struct virtchnl_queue_select);
728*4882a593Smuzhiyun break;
729*4882a593Smuzhiyun case VIRTCHNL_OP_ADD_ETH_ADDR:
730*4882a593Smuzhiyun case VIRTCHNL_OP_DEL_ETH_ADDR:
731*4882a593Smuzhiyun valid_len = sizeof(struct virtchnl_ether_addr_list);
732*4882a593Smuzhiyun if (msglen >= valid_len) {
733*4882a593Smuzhiyun struct virtchnl_ether_addr_list *veal =
734*4882a593Smuzhiyun (struct virtchnl_ether_addr_list *)msg;
735*4882a593Smuzhiyun valid_len += veal->num_elements *
736*4882a593Smuzhiyun sizeof(struct virtchnl_ether_addr);
737*4882a593Smuzhiyun if (veal->num_elements == 0)
738*4882a593Smuzhiyun err_msg_format = true;
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun break;
741*4882a593Smuzhiyun case VIRTCHNL_OP_ADD_VLAN:
742*4882a593Smuzhiyun case VIRTCHNL_OP_DEL_VLAN:
743*4882a593Smuzhiyun valid_len = sizeof(struct virtchnl_vlan_filter_list);
744*4882a593Smuzhiyun if (msglen >= valid_len) {
745*4882a593Smuzhiyun struct virtchnl_vlan_filter_list *vfl =
746*4882a593Smuzhiyun (struct virtchnl_vlan_filter_list *)msg;
747*4882a593Smuzhiyun valid_len += vfl->num_elements * sizeof(u16);
748*4882a593Smuzhiyun if (vfl->num_elements == 0)
749*4882a593Smuzhiyun err_msg_format = true;
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun break;
752*4882a593Smuzhiyun case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
753*4882a593Smuzhiyun valid_len = sizeof(struct virtchnl_promisc_info);
754*4882a593Smuzhiyun break;
755*4882a593Smuzhiyun case VIRTCHNL_OP_GET_STATS:
756*4882a593Smuzhiyun valid_len = sizeof(struct virtchnl_queue_select);
757*4882a593Smuzhiyun break;
758*4882a593Smuzhiyun case VIRTCHNL_OP_IWARP:
759*4882a593Smuzhiyun /* These messages are opaque to us and will be validated in
760*4882a593Smuzhiyun * the RDMA client code. We just need to check for nonzero
761*4882a593Smuzhiyun * length. The firmware will enforce max length restrictions.
762*4882a593Smuzhiyun */
763*4882a593Smuzhiyun if (msglen)
764*4882a593Smuzhiyun valid_len = msglen;
765*4882a593Smuzhiyun else
766*4882a593Smuzhiyun err_msg_format = true;
767*4882a593Smuzhiyun break;
768*4882a593Smuzhiyun case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
769*4882a593Smuzhiyun break;
770*4882a593Smuzhiyun case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
771*4882a593Smuzhiyun valid_len = sizeof(struct virtchnl_iwarp_qvlist_info);
772*4882a593Smuzhiyun if (msglen >= valid_len) {
773*4882a593Smuzhiyun struct virtchnl_iwarp_qvlist_info *qv =
774*4882a593Smuzhiyun (struct virtchnl_iwarp_qvlist_info *)msg;
775*4882a593Smuzhiyun if (qv->num_vectors == 0) {
776*4882a593Smuzhiyun err_msg_format = true;
777*4882a593Smuzhiyun break;
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun valid_len += ((qv->num_vectors - 1) *
780*4882a593Smuzhiyun sizeof(struct virtchnl_iwarp_qv_info));
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun break;
783*4882a593Smuzhiyun case VIRTCHNL_OP_CONFIG_RSS_KEY:
784*4882a593Smuzhiyun valid_len = sizeof(struct virtchnl_rss_key);
785*4882a593Smuzhiyun if (msglen >= valid_len) {
786*4882a593Smuzhiyun struct virtchnl_rss_key *vrk =
787*4882a593Smuzhiyun (struct virtchnl_rss_key *)msg;
788*4882a593Smuzhiyun valid_len += vrk->key_len - 1;
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun break;
791*4882a593Smuzhiyun case VIRTCHNL_OP_CONFIG_RSS_LUT:
792*4882a593Smuzhiyun valid_len = sizeof(struct virtchnl_rss_lut);
793*4882a593Smuzhiyun if (msglen >= valid_len) {
794*4882a593Smuzhiyun struct virtchnl_rss_lut *vrl =
795*4882a593Smuzhiyun (struct virtchnl_rss_lut *)msg;
796*4882a593Smuzhiyun valid_len += vrl->lut_entries - 1;
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun break;
799*4882a593Smuzhiyun case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
800*4882a593Smuzhiyun break;
801*4882a593Smuzhiyun case VIRTCHNL_OP_SET_RSS_HENA:
802*4882a593Smuzhiyun valid_len = sizeof(struct virtchnl_rss_hena);
803*4882a593Smuzhiyun break;
804*4882a593Smuzhiyun case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
805*4882a593Smuzhiyun case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
806*4882a593Smuzhiyun break;
807*4882a593Smuzhiyun case VIRTCHNL_OP_REQUEST_QUEUES:
808*4882a593Smuzhiyun valid_len = sizeof(struct virtchnl_vf_res_request);
809*4882a593Smuzhiyun break;
810*4882a593Smuzhiyun case VIRTCHNL_OP_ENABLE_CHANNELS:
811*4882a593Smuzhiyun valid_len = sizeof(struct virtchnl_tc_info);
812*4882a593Smuzhiyun if (msglen >= valid_len) {
813*4882a593Smuzhiyun struct virtchnl_tc_info *vti =
814*4882a593Smuzhiyun (struct virtchnl_tc_info *)msg;
815*4882a593Smuzhiyun valid_len += (vti->num_tc - 1) *
816*4882a593Smuzhiyun sizeof(struct virtchnl_channel_info);
817*4882a593Smuzhiyun if (vti->num_tc == 0)
818*4882a593Smuzhiyun err_msg_format = true;
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun break;
821*4882a593Smuzhiyun case VIRTCHNL_OP_DISABLE_CHANNELS:
822*4882a593Smuzhiyun break;
823*4882a593Smuzhiyun case VIRTCHNL_OP_ADD_CLOUD_FILTER:
824*4882a593Smuzhiyun valid_len = sizeof(struct virtchnl_filter);
825*4882a593Smuzhiyun break;
826*4882a593Smuzhiyun case VIRTCHNL_OP_DEL_CLOUD_FILTER:
827*4882a593Smuzhiyun valid_len = sizeof(struct virtchnl_filter);
828*4882a593Smuzhiyun break;
829*4882a593Smuzhiyun /* These are always errors coming from the VF. */
830*4882a593Smuzhiyun case VIRTCHNL_OP_EVENT:
831*4882a593Smuzhiyun case VIRTCHNL_OP_UNKNOWN:
832*4882a593Smuzhiyun default:
833*4882a593Smuzhiyun return VIRTCHNL_STATUS_ERR_PARAM;
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun /* few more checks */
836*4882a593Smuzhiyun if (err_msg_format || valid_len != msglen)
837*4882a593Smuzhiyun return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun return 0;
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun #endif /* _VIRTCHNL_H_ */
842