xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/qlogic/qed/qed_vf.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2*4882a593Smuzhiyun /* QLogic qed NIC Driver
3*4882a593Smuzhiyun  * Copyright (c) 2015-2017  QLogic Corporation
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #ifndef _QED_VF_H
7*4882a593Smuzhiyun #define _QED_VF_H
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include "qed_l2.h"
10*4882a593Smuzhiyun #include "qed_mcp.h"
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #define T_ETH_INDIRECTION_TABLE_SIZE 128
13*4882a593Smuzhiyun #define T_ETH_RSS_KEY_SIZE 10
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun struct vf_pf_resc_request {
16*4882a593Smuzhiyun 	u8 num_rxqs;
17*4882a593Smuzhiyun 	u8 num_txqs;
18*4882a593Smuzhiyun 	u8 num_sbs;
19*4882a593Smuzhiyun 	u8 num_mac_filters;
20*4882a593Smuzhiyun 	u8 num_vlan_filters;
21*4882a593Smuzhiyun 	u8 num_mc_filters;
22*4882a593Smuzhiyun 	u8 num_cids;
23*4882a593Smuzhiyun 	u8 padding;
24*4882a593Smuzhiyun };
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun struct hw_sb_info {
27*4882a593Smuzhiyun 	u16 hw_sb_id;
28*4882a593Smuzhiyun 	u8 sb_qid;
29*4882a593Smuzhiyun 	u8 padding[5];
30*4882a593Smuzhiyun };
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #define TLV_BUFFER_SIZE                 1024
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun enum {
35*4882a593Smuzhiyun 	PFVF_STATUS_WAITING,
36*4882a593Smuzhiyun 	PFVF_STATUS_SUCCESS,
37*4882a593Smuzhiyun 	PFVF_STATUS_FAILURE,
38*4882a593Smuzhiyun 	PFVF_STATUS_NOT_SUPPORTED,
39*4882a593Smuzhiyun 	PFVF_STATUS_NO_RESOURCE,
40*4882a593Smuzhiyun 	PFVF_STATUS_FORCED,
41*4882a593Smuzhiyun 	PFVF_STATUS_MALICIOUS,
42*4882a593Smuzhiyun };
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun /* vf pf channel tlvs */
45*4882a593Smuzhiyun /* general tlv header (used for both vf->pf request and pf->vf response) */
46*4882a593Smuzhiyun struct channel_tlv {
47*4882a593Smuzhiyun 	u16 type;
48*4882a593Smuzhiyun 	u16 length;
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /* header of first vf->pf tlv carries the offset used to calculate reponse
52*4882a593Smuzhiyun  * buffer address
53*4882a593Smuzhiyun  */
54*4882a593Smuzhiyun struct vfpf_first_tlv {
55*4882a593Smuzhiyun 	struct channel_tlv tl;
56*4882a593Smuzhiyun 	u32 padding;
57*4882a593Smuzhiyun 	u64 reply_address;
58*4882a593Smuzhiyun };
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /* header of pf->vf tlvs, carries the status of handling the request */
61*4882a593Smuzhiyun struct pfvf_tlv {
62*4882a593Smuzhiyun 	struct channel_tlv tl;
63*4882a593Smuzhiyun 	u8 status;
64*4882a593Smuzhiyun 	u8 padding[3];
65*4882a593Smuzhiyun };
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun /* response tlv used for most tlvs */
68*4882a593Smuzhiyun struct pfvf_def_resp_tlv {
69*4882a593Smuzhiyun 	struct pfvf_tlv hdr;
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun /* used to terminate and pad a tlv list */
73*4882a593Smuzhiyun struct channel_list_end_tlv {
74*4882a593Smuzhiyun 	struct channel_tlv tl;
75*4882a593Smuzhiyun 	u8 padding[4];
76*4882a593Smuzhiyun };
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun #define VFPF_ACQUIRE_OS_LINUX (0)
79*4882a593Smuzhiyun #define VFPF_ACQUIRE_OS_WINDOWS (1)
80*4882a593Smuzhiyun #define VFPF_ACQUIRE_OS_ESX (2)
81*4882a593Smuzhiyun #define VFPF_ACQUIRE_OS_SOLARIS (3)
82*4882a593Smuzhiyun #define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun struct vfpf_acquire_tlv {
85*4882a593Smuzhiyun 	struct vfpf_first_tlv first_tlv;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	struct vf_pf_vfdev_info {
88*4882a593Smuzhiyun #define VFPF_ACQUIRE_CAP_PRE_FP_HSI     (1 << 0) /* VF pre-FP hsi version */
89*4882a593Smuzhiyun #define VFPF_ACQUIRE_CAP_100G		(1 << 1) /* VF can support 100g */
90*4882a593Smuzhiyun 	/* A requirement for supporting multi-Tx queues on a single queue-zone,
91*4882a593Smuzhiyun 	 * VF would pass qids as additional information whenever passing queue
92*4882a593Smuzhiyun 	 * references.
93*4882a593Smuzhiyun 	 */
94*4882a593Smuzhiyun #define VFPF_ACQUIRE_CAP_QUEUE_QIDS     BIT(2)
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	/* The VF is using the physical bar. While this is mostly internal
97*4882a593Smuzhiyun 	 * to the VF, might affect the number of CIDs supported assuming
98*4882a593Smuzhiyun 	 * QUEUE_QIDS is set.
99*4882a593Smuzhiyun 	 */
100*4882a593Smuzhiyun #define VFPF_ACQUIRE_CAP_PHYSICAL_BAR   BIT(3)
101*4882a593Smuzhiyun 		u64 capabilities;
102*4882a593Smuzhiyun 		u8 fw_major;
103*4882a593Smuzhiyun 		u8 fw_minor;
104*4882a593Smuzhiyun 		u8 fw_revision;
105*4882a593Smuzhiyun 		u8 fw_engineering;
106*4882a593Smuzhiyun 		u32 driver_version;
107*4882a593Smuzhiyun 		u16 opaque_fid;	/* ME register value */
108*4882a593Smuzhiyun 		u8 os_type;	/* VFPF_ACQUIRE_OS_* value */
109*4882a593Smuzhiyun 		u8 eth_fp_hsi_major;
110*4882a593Smuzhiyun 		u8 eth_fp_hsi_minor;
111*4882a593Smuzhiyun 		u8 padding[3];
112*4882a593Smuzhiyun 	} vfdev_info;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	struct vf_pf_resc_request resc_request;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	u64 bulletin_addr;
117*4882a593Smuzhiyun 	u32 bulletin_size;
118*4882a593Smuzhiyun 	u32 padding;
119*4882a593Smuzhiyun };
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun /* receive side scaling tlv */
122*4882a593Smuzhiyun struct vfpf_vport_update_rss_tlv {
123*4882a593Smuzhiyun 	struct channel_tlv tl;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	u8 update_rss_flags;
126*4882a593Smuzhiyun #define VFPF_UPDATE_RSS_CONFIG_FLAG       BIT(0)
127*4882a593Smuzhiyun #define VFPF_UPDATE_RSS_CAPS_FLAG         BIT(1)
128*4882a593Smuzhiyun #define VFPF_UPDATE_RSS_IND_TABLE_FLAG    BIT(2)
129*4882a593Smuzhiyun #define VFPF_UPDATE_RSS_KEY_FLAG          BIT(3)
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	u8 rss_enable;
132*4882a593Smuzhiyun 	u8 rss_caps;
133*4882a593Smuzhiyun 	u8 rss_table_size_log;	/* The table size is 2 ^ rss_table_size_log */
134*4882a593Smuzhiyun 	u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
135*4882a593Smuzhiyun 	u32 rss_key[T_ETH_RSS_KEY_SIZE];
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun struct pfvf_storm_stats {
139*4882a593Smuzhiyun 	u32 address;
140*4882a593Smuzhiyun 	u32 len;
141*4882a593Smuzhiyun };
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun struct pfvf_stats_info {
144*4882a593Smuzhiyun 	struct pfvf_storm_stats mstats;
145*4882a593Smuzhiyun 	struct pfvf_storm_stats pstats;
146*4882a593Smuzhiyun 	struct pfvf_storm_stats tstats;
147*4882a593Smuzhiyun 	struct pfvf_storm_stats ustats;
148*4882a593Smuzhiyun };
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun struct pfvf_acquire_resp_tlv {
151*4882a593Smuzhiyun 	struct pfvf_tlv hdr;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	struct pf_vf_pfdev_info {
154*4882a593Smuzhiyun 		u32 chip_num;
155*4882a593Smuzhiyun 		u32 mfw_ver;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 		u16 fw_major;
158*4882a593Smuzhiyun 		u16 fw_minor;
159*4882a593Smuzhiyun 		u16 fw_rev;
160*4882a593Smuzhiyun 		u16 fw_eng;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 		u64 capabilities;
163*4882a593Smuzhiyun #define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED	BIT(0)
164*4882a593Smuzhiyun #define PFVF_ACQUIRE_CAP_100G			BIT(1)	/* If set, 100g PF */
165*4882a593Smuzhiyun /* There are old PF versions where the PF might mistakenly override the sanity
166*4882a593Smuzhiyun  * mechanism [version-based] and allow a VF that can't be supported to pass
167*4882a593Smuzhiyun  * the acquisition phase.
168*4882a593Smuzhiyun  * To overcome this, PFs now indicate that they're past that point and the new
169*4882a593Smuzhiyun  * VFs would fail probe on the older PFs that fail to do so.
170*4882a593Smuzhiyun  */
171*4882a593Smuzhiyun #define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE	BIT(2)
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	/* PF expects queues to be received with additional qids */
174*4882a593Smuzhiyun #define PFVF_ACQUIRE_CAP_QUEUE_QIDS             BIT(3)
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 		u16 db_size;
177*4882a593Smuzhiyun 		u8 indices_per_sb;
178*4882a593Smuzhiyun 		u8 os_type;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 		/* These should match the PF's qed_dev values */
181*4882a593Smuzhiyun 		u16 chip_rev;
182*4882a593Smuzhiyun 		u8 dev_type;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 		/* Doorbell bar size configured in HW: log(size) or 0 */
185*4882a593Smuzhiyun 		u8 bar_size;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 		struct pfvf_stats_info stats_info;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 		u8 port_mac[ETH_ALEN];
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 		/* It's possible PF had to configure an older fastpath HSI
192*4882a593Smuzhiyun 		 * [in case VF is newer than PF]. This is communicated back
193*4882a593Smuzhiyun 		 * to the VF. It can also be used in case of error due to
194*4882a593Smuzhiyun 		 * non-matching versions to shed light in VF about failure.
195*4882a593Smuzhiyun 		 */
196*4882a593Smuzhiyun 		u8 major_fp_hsi;
197*4882a593Smuzhiyun 		u8 minor_fp_hsi;
198*4882a593Smuzhiyun 	} pfdev_info;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	struct pf_vf_resc {
201*4882a593Smuzhiyun #define PFVF_MAX_QUEUES_PER_VF		16
202*4882a593Smuzhiyun #define PFVF_MAX_SBS_PER_VF		16
203*4882a593Smuzhiyun 		struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
204*4882a593Smuzhiyun 		u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
205*4882a593Smuzhiyun 		u8 cid[PFVF_MAX_QUEUES_PER_VF];
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 		u8 num_rxqs;
208*4882a593Smuzhiyun 		u8 num_txqs;
209*4882a593Smuzhiyun 		u8 num_sbs;
210*4882a593Smuzhiyun 		u8 num_mac_filters;
211*4882a593Smuzhiyun 		u8 num_vlan_filters;
212*4882a593Smuzhiyun 		u8 num_mc_filters;
213*4882a593Smuzhiyun 		u8 num_cids;
214*4882a593Smuzhiyun 		u8 padding;
215*4882a593Smuzhiyun 	} resc;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	u32 bulletin_size;
218*4882a593Smuzhiyun 	u32 padding;
219*4882a593Smuzhiyun };
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun struct pfvf_start_queue_resp_tlv {
222*4882a593Smuzhiyun 	struct pfvf_tlv hdr;
223*4882a593Smuzhiyun 	u32 offset;		/* offset to consumer/producer of queue */
224*4882a593Smuzhiyun 	u8 padding[4];
225*4882a593Smuzhiyun };
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun /* Extended queue information - additional index for reference inside qzone.
228*4882a593Smuzhiyun  * If commmunicated between VF/PF, each TLV relating to queues should be
229*4882a593Smuzhiyun  * extended by one such [or have a future base TLV that already contains info].
230*4882a593Smuzhiyun  */
231*4882a593Smuzhiyun struct vfpf_qid_tlv {
232*4882a593Smuzhiyun 	struct channel_tlv tl;
233*4882a593Smuzhiyun 	u8 qid;
234*4882a593Smuzhiyun 	u8 padding[3];
235*4882a593Smuzhiyun };
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun /* Setup Queue */
238*4882a593Smuzhiyun struct vfpf_start_rxq_tlv {
239*4882a593Smuzhiyun 	struct vfpf_first_tlv first_tlv;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	/* physical addresses */
242*4882a593Smuzhiyun 	u64 rxq_addr;
243*4882a593Smuzhiyun 	u64 deprecated_sge_addr;
244*4882a593Smuzhiyun 	u64 cqe_pbl_addr;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	u16 cqe_pbl_size;
247*4882a593Smuzhiyun 	u16 hw_sb;
248*4882a593Smuzhiyun 	u16 rx_qid;
249*4882a593Smuzhiyun 	u16 hc_rate;		/* desired interrupts per sec. */
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	u16 bd_max_bytes;
252*4882a593Smuzhiyun 	u16 stat_id;
253*4882a593Smuzhiyun 	u8 sb_index;
254*4882a593Smuzhiyun 	u8 padding[3];
255*4882a593Smuzhiyun };
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun struct vfpf_start_txq_tlv {
258*4882a593Smuzhiyun 	struct vfpf_first_tlv first_tlv;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	/* physical addresses */
261*4882a593Smuzhiyun 	u64 pbl_addr;
262*4882a593Smuzhiyun 	u16 pbl_size;
263*4882a593Smuzhiyun 	u16 stat_id;
264*4882a593Smuzhiyun 	u16 tx_qid;
265*4882a593Smuzhiyun 	u16 hw_sb;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	u32 flags;		/* VFPF_QUEUE_FLG_X flags */
268*4882a593Smuzhiyun 	u16 hc_rate;		/* desired interrupts per sec. */
269*4882a593Smuzhiyun 	u8 sb_index;
270*4882a593Smuzhiyun 	u8 padding[3];
271*4882a593Smuzhiyun };
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun /* Stop RX Queue */
274*4882a593Smuzhiyun struct vfpf_stop_rxqs_tlv {
275*4882a593Smuzhiyun 	struct vfpf_first_tlv first_tlv;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	u16 rx_qid;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	/* this field is deprecated and should *always* be set to '1' */
280*4882a593Smuzhiyun 	u8 num_rxqs;
281*4882a593Smuzhiyun 	u8 cqe_completion;
282*4882a593Smuzhiyun 	u8 padding[4];
283*4882a593Smuzhiyun };
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun /* Stop TX Queues */
286*4882a593Smuzhiyun struct vfpf_stop_txqs_tlv {
287*4882a593Smuzhiyun 	struct vfpf_first_tlv first_tlv;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	u16 tx_qid;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	/* this field is deprecated and should *always* be set to '1' */
292*4882a593Smuzhiyun 	u8 num_txqs;
293*4882a593Smuzhiyun 	u8 padding[5];
294*4882a593Smuzhiyun };
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun struct vfpf_update_rxq_tlv {
297*4882a593Smuzhiyun 	struct vfpf_first_tlv first_tlv;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	u16 rx_qid;
302*4882a593Smuzhiyun 	u8 num_rxqs;
303*4882a593Smuzhiyun 	u8 flags;
304*4882a593Smuzhiyun #define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG    BIT(0)
305*4882a593Smuzhiyun #define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG          BIT(1)
306*4882a593Smuzhiyun #define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG        BIT(2)
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	u8 padding[4];
309*4882a593Smuzhiyun };
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun /* Set Queue Filters */
312*4882a593Smuzhiyun struct vfpf_q_mac_vlan_filter {
313*4882a593Smuzhiyun 	u32 flags;
314*4882a593Smuzhiyun #define VFPF_Q_FILTER_DEST_MAC_VALID    0x01
315*4882a593Smuzhiyun #define VFPF_Q_FILTER_VLAN_TAG_VALID    0x02
316*4882a593Smuzhiyun #define VFPF_Q_FILTER_SET_MAC           0x100	/* set/clear */
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	u8 mac[ETH_ALEN];
319*4882a593Smuzhiyun 	u16 vlan_tag;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	u8 padding[4];
322*4882a593Smuzhiyun };
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun /* Start a vport */
325*4882a593Smuzhiyun struct vfpf_vport_start_tlv {
326*4882a593Smuzhiyun 	struct vfpf_first_tlv first_tlv;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	u64 sb_addr[PFVF_MAX_SBS_PER_VF];
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	u32 tpa_mode;
331*4882a593Smuzhiyun 	u16 dep1;
332*4882a593Smuzhiyun 	u16 mtu;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	u8 vport_id;
335*4882a593Smuzhiyun 	u8 inner_vlan_removal;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	u8 only_untagged;
338*4882a593Smuzhiyun 	u8 max_buffers_per_cqe;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	u8 padding[4];
341*4882a593Smuzhiyun };
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun /* Extended tlvs - need to add rss, mcast, accept mode tlvs */
344*4882a593Smuzhiyun struct vfpf_vport_update_activate_tlv {
345*4882a593Smuzhiyun 	struct channel_tlv tl;
346*4882a593Smuzhiyun 	u8 update_rx;
347*4882a593Smuzhiyun 	u8 update_tx;
348*4882a593Smuzhiyun 	u8 active_rx;
349*4882a593Smuzhiyun 	u8 active_tx;
350*4882a593Smuzhiyun };
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun struct vfpf_vport_update_tx_switch_tlv {
353*4882a593Smuzhiyun 	struct channel_tlv tl;
354*4882a593Smuzhiyun 	u8 tx_switching;
355*4882a593Smuzhiyun 	u8 padding[3];
356*4882a593Smuzhiyun };
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun struct vfpf_vport_update_vlan_strip_tlv {
359*4882a593Smuzhiyun 	struct channel_tlv tl;
360*4882a593Smuzhiyun 	u8 remove_vlan;
361*4882a593Smuzhiyun 	u8 padding[3];
362*4882a593Smuzhiyun };
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun struct vfpf_vport_update_mcast_bin_tlv {
365*4882a593Smuzhiyun 	struct channel_tlv tl;
366*4882a593Smuzhiyun 	u8 padding[4];
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	/* There are only 256 approx bins, and in HSI they're divided into
369*4882a593Smuzhiyun 	 * 32-bit values. As old VFs used to set-bit to the values on its side,
370*4882a593Smuzhiyun 	 * the upper half of the array is never expected to contain any data.
371*4882a593Smuzhiyun 	 */
372*4882a593Smuzhiyun 	u64 bins[4];
373*4882a593Smuzhiyun 	u64 obsolete_bins[4];
374*4882a593Smuzhiyun };
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun struct vfpf_vport_update_accept_param_tlv {
377*4882a593Smuzhiyun 	struct channel_tlv tl;
378*4882a593Smuzhiyun 	u8 update_rx_mode;
379*4882a593Smuzhiyun 	u8 update_tx_mode;
380*4882a593Smuzhiyun 	u8 rx_accept_filter;
381*4882a593Smuzhiyun 	u8 tx_accept_filter;
382*4882a593Smuzhiyun };
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun struct vfpf_vport_update_accept_any_vlan_tlv {
385*4882a593Smuzhiyun 	struct channel_tlv tl;
386*4882a593Smuzhiyun 	u8 update_accept_any_vlan_flg;
387*4882a593Smuzhiyun 	u8 accept_any_vlan;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	u8 padding[2];
390*4882a593Smuzhiyun };
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun struct vfpf_vport_update_sge_tpa_tlv {
393*4882a593Smuzhiyun 	struct channel_tlv tl;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	u16 sge_tpa_flags;
396*4882a593Smuzhiyun #define VFPF_TPA_IPV4_EN_FLAG		BIT(0)
397*4882a593Smuzhiyun #define VFPF_TPA_IPV6_EN_FLAG		BIT(1)
398*4882a593Smuzhiyun #define VFPF_TPA_PKT_SPLIT_FLAG		BIT(2)
399*4882a593Smuzhiyun #define VFPF_TPA_HDR_DATA_SPLIT_FLAG	BIT(3)
400*4882a593Smuzhiyun #define VFPF_TPA_GRO_CONSIST_FLAG	BIT(4)
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	u8 update_sge_tpa_flags;
403*4882a593Smuzhiyun #define VFPF_UPDATE_SGE_DEPRECATED_FLAG	BIT(0)
404*4882a593Smuzhiyun #define VFPF_UPDATE_TPA_EN_FLAG		BIT(1)
405*4882a593Smuzhiyun #define VFPF_UPDATE_TPA_PARAM_FLAG	BIT(2)
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	u8 max_buffers_per_cqe;
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	u16 deprecated_sge_buff_size;
410*4882a593Smuzhiyun 	u16 tpa_max_size;
411*4882a593Smuzhiyun 	u16 tpa_min_size_to_start;
412*4882a593Smuzhiyun 	u16 tpa_min_size_to_cont;
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	u8 tpa_max_aggs_num;
415*4882a593Smuzhiyun 	u8 padding[7];
416*4882a593Smuzhiyun };
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun /* Primary tlv as a header for various extended tlvs for
419*4882a593Smuzhiyun  * various functionalities in vport update ramrod.
420*4882a593Smuzhiyun  */
421*4882a593Smuzhiyun struct vfpf_vport_update_tlv {
422*4882a593Smuzhiyun 	struct vfpf_first_tlv first_tlv;
423*4882a593Smuzhiyun };
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun struct vfpf_ucast_filter_tlv {
426*4882a593Smuzhiyun 	struct vfpf_first_tlv first_tlv;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	u8 opcode;
429*4882a593Smuzhiyun 	u8 type;
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	u8 mac[ETH_ALEN];
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	u16 vlan;
434*4882a593Smuzhiyun 	u16 padding[3];
435*4882a593Smuzhiyun };
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun /* tunnel update param tlv */
438*4882a593Smuzhiyun struct vfpf_update_tunn_param_tlv {
439*4882a593Smuzhiyun 	struct vfpf_first_tlv first_tlv;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	u8 tun_mode_update_mask;
442*4882a593Smuzhiyun 	u8 tunn_mode;
443*4882a593Smuzhiyun 	u8 update_tun_cls;
444*4882a593Smuzhiyun 	u8 vxlan_clss;
445*4882a593Smuzhiyun 	u8 l2gre_clss;
446*4882a593Smuzhiyun 	u8 ipgre_clss;
447*4882a593Smuzhiyun 	u8 l2geneve_clss;
448*4882a593Smuzhiyun 	u8 ipgeneve_clss;
449*4882a593Smuzhiyun 	u8 update_geneve_port;
450*4882a593Smuzhiyun 	u8 update_vxlan_port;
451*4882a593Smuzhiyun 	u16 geneve_port;
452*4882a593Smuzhiyun 	u16 vxlan_port;
453*4882a593Smuzhiyun 	u8 padding[2];
454*4882a593Smuzhiyun };
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun struct pfvf_update_tunn_param_tlv {
457*4882a593Smuzhiyun 	struct pfvf_tlv hdr;
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	u16 tunn_feature_mask;
460*4882a593Smuzhiyun 	u8 vxlan_mode;
461*4882a593Smuzhiyun 	u8 l2geneve_mode;
462*4882a593Smuzhiyun 	u8 ipgeneve_mode;
463*4882a593Smuzhiyun 	u8 l2gre_mode;
464*4882a593Smuzhiyun 	u8 ipgre_mode;
465*4882a593Smuzhiyun 	u8 vxlan_clss;
466*4882a593Smuzhiyun 	u8 l2gre_clss;
467*4882a593Smuzhiyun 	u8 ipgre_clss;
468*4882a593Smuzhiyun 	u8 l2geneve_clss;
469*4882a593Smuzhiyun 	u8 ipgeneve_clss;
470*4882a593Smuzhiyun 	u16 vxlan_udp_port;
471*4882a593Smuzhiyun 	u16 geneve_udp_port;
472*4882a593Smuzhiyun };
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun struct tlv_buffer_size {
475*4882a593Smuzhiyun 	u8 tlv_buffer[TLV_BUFFER_SIZE];
476*4882a593Smuzhiyun };
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun struct vfpf_update_coalesce {
479*4882a593Smuzhiyun 	struct vfpf_first_tlv first_tlv;
480*4882a593Smuzhiyun 	u16 rx_coal;
481*4882a593Smuzhiyun 	u16 tx_coal;
482*4882a593Smuzhiyun 	u16 qid;
483*4882a593Smuzhiyun 	u8 padding[2];
484*4882a593Smuzhiyun };
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun struct vfpf_read_coal_req_tlv {
487*4882a593Smuzhiyun 	struct vfpf_first_tlv first_tlv;
488*4882a593Smuzhiyun 	u16 qid;
489*4882a593Smuzhiyun 	u8 is_rx;
490*4882a593Smuzhiyun 	u8 padding[5];
491*4882a593Smuzhiyun };
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun struct pfvf_read_coal_resp_tlv {
494*4882a593Smuzhiyun 	struct pfvf_tlv hdr;
495*4882a593Smuzhiyun 	u16 coal;
496*4882a593Smuzhiyun 	u8 padding[6];
497*4882a593Smuzhiyun };
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun struct vfpf_bulletin_update_mac_tlv {
500*4882a593Smuzhiyun 	struct vfpf_first_tlv first_tlv;
501*4882a593Smuzhiyun 	u8 mac[ETH_ALEN];
502*4882a593Smuzhiyun 	u8 padding[2];
503*4882a593Smuzhiyun };
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun union vfpf_tlvs {
506*4882a593Smuzhiyun 	struct vfpf_first_tlv first_tlv;
507*4882a593Smuzhiyun 	struct vfpf_acquire_tlv acquire;
508*4882a593Smuzhiyun 	struct vfpf_start_rxq_tlv start_rxq;
509*4882a593Smuzhiyun 	struct vfpf_start_txq_tlv start_txq;
510*4882a593Smuzhiyun 	struct vfpf_stop_rxqs_tlv stop_rxqs;
511*4882a593Smuzhiyun 	struct vfpf_stop_txqs_tlv stop_txqs;
512*4882a593Smuzhiyun 	struct vfpf_update_rxq_tlv update_rxq;
513*4882a593Smuzhiyun 	struct vfpf_vport_start_tlv start_vport;
514*4882a593Smuzhiyun 	struct vfpf_vport_update_tlv vport_update;
515*4882a593Smuzhiyun 	struct vfpf_ucast_filter_tlv ucast_filter;
516*4882a593Smuzhiyun 	struct vfpf_update_tunn_param_tlv tunn_param_update;
517*4882a593Smuzhiyun 	struct vfpf_update_coalesce update_coalesce;
518*4882a593Smuzhiyun 	struct vfpf_read_coal_req_tlv read_coal_req;
519*4882a593Smuzhiyun 	struct vfpf_bulletin_update_mac_tlv bulletin_update_mac;
520*4882a593Smuzhiyun 	struct tlv_buffer_size tlv_buf_size;
521*4882a593Smuzhiyun };
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun union pfvf_tlvs {
524*4882a593Smuzhiyun 	struct pfvf_def_resp_tlv default_resp;
525*4882a593Smuzhiyun 	struct pfvf_acquire_resp_tlv acquire_resp;
526*4882a593Smuzhiyun 	struct tlv_buffer_size tlv_buf_size;
527*4882a593Smuzhiyun 	struct pfvf_start_queue_resp_tlv queue_start;
528*4882a593Smuzhiyun 	struct pfvf_update_tunn_param_tlv tunn_param_resp;
529*4882a593Smuzhiyun 	struct pfvf_read_coal_resp_tlv read_coal_resp;
530*4882a593Smuzhiyun };
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun enum qed_bulletin_bit {
533*4882a593Smuzhiyun 	/* Alert the VF that a forced MAC was set by the PF */
534*4882a593Smuzhiyun 	MAC_ADDR_FORCED = 0,
535*4882a593Smuzhiyun 	/* Alert the VF that a forced VLAN was set by the PF */
536*4882a593Smuzhiyun 	VLAN_ADDR_FORCED = 2,
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	/* Indicate that `default_only_untagged' contains actual data */
539*4882a593Smuzhiyun 	VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
540*4882a593Smuzhiyun 	VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	/* Alert the VF that suggested mac was sent by the PF.
543*4882a593Smuzhiyun 	 * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set.
544*4882a593Smuzhiyun 	 */
545*4882a593Smuzhiyun 	VFPF_BULLETIN_MAC_ADDR = 5
546*4882a593Smuzhiyun };
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun struct qed_bulletin_content {
549*4882a593Smuzhiyun 	/* crc of structure to ensure is not in mid-update */
550*4882a593Smuzhiyun 	u32 crc;
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	u32 version;
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	/* bitmap indicating which fields hold valid values */
555*4882a593Smuzhiyun 	u64 valid_bitmap;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	/* used for MAC_ADDR or MAC_ADDR_FORCED */
558*4882a593Smuzhiyun 	u8 mac[ETH_ALEN];
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	/* If valid, 1 => only untagged Rx if no vlan is configured */
561*4882a593Smuzhiyun 	u8 default_only_untagged;
562*4882a593Smuzhiyun 	u8 padding;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	/* The following is a 'copy' of qed_mcp_link_state,
565*4882a593Smuzhiyun 	 * qed_mcp_link_params and qed_mcp_link_capabilities. Since it's
566*4882a593Smuzhiyun 	 * possible the structs will increase further along the road we cannot
567*4882a593Smuzhiyun 	 * have it here; Instead we need to have all of its fields.
568*4882a593Smuzhiyun 	 */
569*4882a593Smuzhiyun 	u8 req_autoneg;
570*4882a593Smuzhiyun 	u8 req_autoneg_pause;
571*4882a593Smuzhiyun 	u8 req_forced_rx;
572*4882a593Smuzhiyun 	u8 req_forced_tx;
573*4882a593Smuzhiyun 	u8 padding2[4];
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	u32 req_adv_speed;
576*4882a593Smuzhiyun 	u32 req_forced_speed;
577*4882a593Smuzhiyun 	u32 req_loopback;
578*4882a593Smuzhiyun 	u32 padding3;
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	u8 link_up;
581*4882a593Smuzhiyun 	u8 full_duplex;
582*4882a593Smuzhiyun 	u8 autoneg;
583*4882a593Smuzhiyun 	u8 autoneg_complete;
584*4882a593Smuzhiyun 	u8 parallel_detection;
585*4882a593Smuzhiyun 	u8 pfc_enabled;
586*4882a593Smuzhiyun 	u8 partner_tx_flow_ctrl_en;
587*4882a593Smuzhiyun 	u8 partner_rx_flow_ctrl_en;
588*4882a593Smuzhiyun 	u8 partner_adv_pause;
589*4882a593Smuzhiyun 	u8 sfp_tx_fault;
590*4882a593Smuzhiyun 	u16 vxlan_udp_port;
591*4882a593Smuzhiyun 	u16 geneve_udp_port;
592*4882a593Smuzhiyun 	u8 padding4[2];
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	u32 speed;
595*4882a593Smuzhiyun 	u32 partner_adv_speed;
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	u32 capability_speed;
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	/* Forced vlan */
600*4882a593Smuzhiyun 	u16 pvid;
601*4882a593Smuzhiyun 	u16 padding5;
602*4882a593Smuzhiyun };
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun struct qed_bulletin {
605*4882a593Smuzhiyun 	dma_addr_t phys;
606*4882a593Smuzhiyun 	struct qed_bulletin_content *p_virt;
607*4882a593Smuzhiyun 	u32 size;
608*4882a593Smuzhiyun };
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun enum {
611*4882a593Smuzhiyun 	CHANNEL_TLV_NONE,	/* ends tlv sequence */
612*4882a593Smuzhiyun 	CHANNEL_TLV_ACQUIRE,
613*4882a593Smuzhiyun 	CHANNEL_TLV_VPORT_START,
614*4882a593Smuzhiyun 	CHANNEL_TLV_VPORT_UPDATE,
615*4882a593Smuzhiyun 	CHANNEL_TLV_VPORT_TEARDOWN,
616*4882a593Smuzhiyun 	CHANNEL_TLV_START_RXQ,
617*4882a593Smuzhiyun 	CHANNEL_TLV_START_TXQ,
618*4882a593Smuzhiyun 	CHANNEL_TLV_STOP_RXQS,
619*4882a593Smuzhiyun 	CHANNEL_TLV_STOP_TXQS,
620*4882a593Smuzhiyun 	CHANNEL_TLV_UPDATE_RXQ,
621*4882a593Smuzhiyun 	CHANNEL_TLV_INT_CLEANUP,
622*4882a593Smuzhiyun 	CHANNEL_TLV_CLOSE,
623*4882a593Smuzhiyun 	CHANNEL_TLV_RELEASE,
624*4882a593Smuzhiyun 	CHANNEL_TLV_LIST_END,
625*4882a593Smuzhiyun 	CHANNEL_TLV_UCAST_FILTER,
626*4882a593Smuzhiyun 	CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
627*4882a593Smuzhiyun 	CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
628*4882a593Smuzhiyun 	CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
629*4882a593Smuzhiyun 	CHANNEL_TLV_VPORT_UPDATE_MCAST,
630*4882a593Smuzhiyun 	CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
631*4882a593Smuzhiyun 	CHANNEL_TLV_VPORT_UPDATE_RSS,
632*4882a593Smuzhiyun 	CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
633*4882a593Smuzhiyun 	CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
634*4882a593Smuzhiyun 	CHANNEL_TLV_UPDATE_TUNN_PARAM,
635*4882a593Smuzhiyun 	CHANNEL_TLV_COALESCE_UPDATE,
636*4882a593Smuzhiyun 	CHANNEL_TLV_QID,
637*4882a593Smuzhiyun 	CHANNEL_TLV_COALESCE_READ,
638*4882a593Smuzhiyun 	CHANNEL_TLV_BULLETIN_UPDATE_MAC,
639*4882a593Smuzhiyun 	CHANNEL_TLV_MAX,
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	/* Required for iterating over vport-update tlvs.
642*4882a593Smuzhiyun 	 * Will break in case non-sequential vport-update tlvs.
643*4882a593Smuzhiyun 	 */
644*4882a593Smuzhiyun 	CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
645*4882a593Smuzhiyun };
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun /* Default number of CIDs [total of both Rx and Tx] to be requested
648*4882a593Smuzhiyun  * by default, and maximum possible number.
649*4882a593Smuzhiyun  */
650*4882a593Smuzhiyun #define QED_ETH_VF_DEFAULT_NUM_CIDS (32)
651*4882a593Smuzhiyun #define QED_ETH_VF_MAX_NUM_CIDS (250)
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun /* This data is held in the qed_hwfn structure for VFs only. */
654*4882a593Smuzhiyun struct qed_vf_iov {
655*4882a593Smuzhiyun 	union vfpf_tlvs *vf2pf_request;
656*4882a593Smuzhiyun 	dma_addr_t vf2pf_request_phys;
657*4882a593Smuzhiyun 	union pfvf_tlvs *pf2vf_reply;
658*4882a593Smuzhiyun 	dma_addr_t pf2vf_reply_phys;
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	/* Should be taken whenever the mailbox buffers are accessed */
661*4882a593Smuzhiyun 	struct mutex mutex;
662*4882a593Smuzhiyun 	u8 *offset;
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	/* Bulletin Board */
665*4882a593Smuzhiyun 	struct qed_bulletin bulletin;
666*4882a593Smuzhiyun 	struct qed_bulletin_content bulletin_shadow;
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	/* we set aside a copy of the acquire response */
669*4882a593Smuzhiyun 	struct pfvf_acquire_resp_tlv acquire_resp;
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	/* In case PF originates prior to the fp-hsi version comparison,
672*4882a593Smuzhiyun 	 * this has to be propagated as it affects the fastpath.
673*4882a593Smuzhiyun 	 */
674*4882a593Smuzhiyun 	bool b_pre_fp_hsi;
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	/* Current day VFs are passing the SBs physical address on vport
677*4882a593Smuzhiyun 	 * start, and as they lack an IGU mapping they need to store the
678*4882a593Smuzhiyun 	 * addresses of previously registered SBs.
679*4882a593Smuzhiyun 	 * Even if we were to change configuration flow, due to backward
680*4882a593Smuzhiyun 	 * compatibility [with older PFs] we'd still need to store these.
681*4882a593Smuzhiyun 	 */
682*4882a593Smuzhiyun 	struct qed_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF];
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	/* Determines whether VF utilizes doorbells via limited register
685*4882a593Smuzhiyun 	 * bar or via the doorbell bar.
686*4882a593Smuzhiyun 	 */
687*4882a593Smuzhiyun 	bool b_doorbell_bar;
688*4882a593Smuzhiyun };
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun /**
691*4882a593Smuzhiyun  * @brief VF - Set Rx/Tx coalesce per VF's relative queue.
692*4882a593Smuzhiyun  *             Coalesce value '0' will omit the configuration.
693*4882a593Smuzhiyun  *
694*4882a593Smuzhiyun  * @param p_hwfn
695*4882a593Smuzhiyun  * @param rx_coal - coalesce value in micro second for rx queue
696*4882a593Smuzhiyun  * @param tx_coal - coalesce value in micro second for tx queue
697*4882a593Smuzhiyun  * @param p_cid   - queue cid
698*4882a593Smuzhiyun  *
699*4882a593Smuzhiyun  **/
700*4882a593Smuzhiyun int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
701*4882a593Smuzhiyun 			   u16 rx_coal,
702*4882a593Smuzhiyun 			   u16 tx_coal, struct qed_queue_cid *p_cid);
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun /**
705*4882a593Smuzhiyun  * @brief VF - Get coalesce per VF's relative queue.
706*4882a593Smuzhiyun  *
707*4882a593Smuzhiyun  * @param p_hwfn
708*4882a593Smuzhiyun  * @param p_coal - coalesce value in micro second for VF queues.
709*4882a593Smuzhiyun  * @param p_cid  - queue cid
710*4882a593Smuzhiyun  *
711*4882a593Smuzhiyun  **/
712*4882a593Smuzhiyun int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
713*4882a593Smuzhiyun 			   u16 *p_coal, struct qed_queue_cid *p_cid);
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun #ifdef CONFIG_QED_SRIOV
716*4882a593Smuzhiyun /**
717*4882a593Smuzhiyun  * @brief Read the VF bulletin and act on it if needed
718*4882a593Smuzhiyun  *
719*4882a593Smuzhiyun  * @param p_hwfn
720*4882a593Smuzhiyun  * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise.
721*4882a593Smuzhiyun  *
722*4882a593Smuzhiyun  * @return enum _qed_status
723*4882a593Smuzhiyun  */
724*4882a593Smuzhiyun int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change);
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun /**
727*4882a593Smuzhiyun  * @brief Get link paramters for VF from qed
728*4882a593Smuzhiyun  *
729*4882a593Smuzhiyun  * @param p_hwfn
730*4882a593Smuzhiyun  * @param params - the link params structure to be filled for the VF
731*4882a593Smuzhiyun  */
732*4882a593Smuzhiyun void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
733*4882a593Smuzhiyun 			    struct qed_mcp_link_params *params);
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun /**
736*4882a593Smuzhiyun  * @brief Get link state for VF from qed
737*4882a593Smuzhiyun  *
738*4882a593Smuzhiyun  * @param p_hwfn
739*4882a593Smuzhiyun  * @param link - the link state structure to be filled for the VF
740*4882a593Smuzhiyun  */
741*4882a593Smuzhiyun void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
742*4882a593Smuzhiyun 			   struct qed_mcp_link_state *link);
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun /**
745*4882a593Smuzhiyun  * @brief Get link capabilities for VF from qed
746*4882a593Smuzhiyun  *
747*4882a593Smuzhiyun  * @param p_hwfn
748*4882a593Smuzhiyun  * @param p_link_caps - the link capabilities structure to be filled for the VF
749*4882a593Smuzhiyun  */
750*4882a593Smuzhiyun void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
751*4882a593Smuzhiyun 			  struct qed_mcp_link_capabilities *p_link_caps);
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun /**
754*4882a593Smuzhiyun  * @brief Get number of Rx queues allocated for VF by qed
755*4882a593Smuzhiyun  *
756*4882a593Smuzhiyun  *  @param p_hwfn
757*4882a593Smuzhiyun  *  @param num_rxqs - allocated RX queues
758*4882a593Smuzhiyun  */
759*4882a593Smuzhiyun void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun /**
762*4882a593Smuzhiyun  * @brief Get number of Rx queues allocated for VF by qed
763*4882a593Smuzhiyun  *
764*4882a593Smuzhiyun  *  @param p_hwfn
765*4882a593Smuzhiyun  *  @param num_txqs - allocated RX queues
766*4882a593Smuzhiyun  */
767*4882a593Smuzhiyun void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs);
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun /**
770*4882a593Smuzhiyun  * @brief Get number of available connections [both Rx and Tx] for VF
771*4882a593Smuzhiyun  *
772*4882a593Smuzhiyun  * @param p_hwfn
773*4882a593Smuzhiyun  * @param num_cids - allocated number of connections
774*4882a593Smuzhiyun  */
775*4882a593Smuzhiyun void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids);
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun /**
778*4882a593Smuzhiyun  * @brief Get port mac address for VF
779*4882a593Smuzhiyun  *
780*4882a593Smuzhiyun  * @param p_hwfn
781*4882a593Smuzhiyun  * @param port_mac - destination location for port mac
782*4882a593Smuzhiyun  */
783*4882a593Smuzhiyun void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun /**
786*4882a593Smuzhiyun  * @brief Get number of VLAN filters allocated for VF by qed
787*4882a593Smuzhiyun  *
788*4882a593Smuzhiyun  *  @param p_hwfn
789*4882a593Smuzhiyun  *  @param num_rxqs - allocated VLAN filters
790*4882a593Smuzhiyun  */
791*4882a593Smuzhiyun void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
792*4882a593Smuzhiyun 				 u8 *num_vlan_filters);
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun /**
795*4882a593Smuzhiyun  * @brief Get number of MAC filters allocated for VF by qed
796*4882a593Smuzhiyun  *
797*4882a593Smuzhiyun  *  @param p_hwfn
798*4882a593Smuzhiyun  *  @param num_rxqs - allocated MAC filters
799*4882a593Smuzhiyun  */
800*4882a593Smuzhiyun void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters);
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun /**
803*4882a593Smuzhiyun  * @brief Check if VF can set a MAC address
804*4882a593Smuzhiyun  *
805*4882a593Smuzhiyun  * @param p_hwfn
806*4882a593Smuzhiyun  * @param mac
807*4882a593Smuzhiyun  *
808*4882a593Smuzhiyun  * @return bool
809*4882a593Smuzhiyun  */
810*4882a593Smuzhiyun bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac);
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun /**
813*4882a593Smuzhiyun  * @brief Set firmware version information in dev_info from VFs acquire response tlv
814*4882a593Smuzhiyun  *
815*4882a593Smuzhiyun  * @param p_hwfn
816*4882a593Smuzhiyun  * @param fw_major
817*4882a593Smuzhiyun  * @param fw_minor
818*4882a593Smuzhiyun  * @param fw_rev
819*4882a593Smuzhiyun  * @param fw_eng
820*4882a593Smuzhiyun  */
821*4882a593Smuzhiyun void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
822*4882a593Smuzhiyun 			   u16 *fw_major, u16 *fw_minor,
823*4882a593Smuzhiyun 			   u16 *fw_rev, u16 *fw_eng);
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun /**
826*4882a593Smuzhiyun  * @brief hw preparation for VF
827*4882a593Smuzhiyun  *      sends ACQUIRE message
828*4882a593Smuzhiyun  *
829*4882a593Smuzhiyun  * @param p_hwfn
830*4882a593Smuzhiyun  *
831*4882a593Smuzhiyun  * @return int
832*4882a593Smuzhiyun  */
833*4882a593Smuzhiyun int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun /**
836*4882a593Smuzhiyun  * @brief VF - start the RX Queue by sending a message to the PF
837*4882a593Smuzhiyun  * @param p_hwfn
838*4882a593Smuzhiyun  * @param p_cid			- Only relative fields are relevant
839*4882a593Smuzhiyun  * @param bd_max_bytes          - maximum number of bytes per bd
840*4882a593Smuzhiyun  * @param bd_chain_phys_addr    - physical address of bd chain
841*4882a593Smuzhiyun  * @param cqe_pbl_addr          - physical address of pbl
842*4882a593Smuzhiyun  * @param cqe_pbl_size          - pbl size
843*4882a593Smuzhiyun  * @param pp_prod               - pointer to the producer to be
844*4882a593Smuzhiyun  *				  used in fastpath
845*4882a593Smuzhiyun  *
846*4882a593Smuzhiyun  * @return int
847*4882a593Smuzhiyun  */
848*4882a593Smuzhiyun int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
849*4882a593Smuzhiyun 			struct qed_queue_cid *p_cid,
850*4882a593Smuzhiyun 			u16 bd_max_bytes,
851*4882a593Smuzhiyun 			dma_addr_t bd_chain_phys_addr,
852*4882a593Smuzhiyun 			dma_addr_t cqe_pbl_addr,
853*4882a593Smuzhiyun 			u16 cqe_pbl_size, void __iomem **pp_prod);
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun /**
856*4882a593Smuzhiyun  * @brief VF - start the TX queue by sending a message to the
857*4882a593Smuzhiyun  *        PF.
858*4882a593Smuzhiyun  *
859*4882a593Smuzhiyun  * @param p_hwfn
860*4882a593Smuzhiyun  * @param tx_queue_id           - zero based within the VF
861*4882a593Smuzhiyun  * @param sb                    - status block for this queue
862*4882a593Smuzhiyun  * @param sb_index              - index within the status block
863*4882a593Smuzhiyun  * @param bd_chain_phys_addr    - physical address of tx chain
864*4882a593Smuzhiyun  * @param pp_doorbell           - pointer to address to which to
865*4882a593Smuzhiyun  *                      write the doorbell too..
866*4882a593Smuzhiyun  *
867*4882a593Smuzhiyun  * @return int
868*4882a593Smuzhiyun  */
869*4882a593Smuzhiyun int
870*4882a593Smuzhiyun qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
871*4882a593Smuzhiyun 		    struct qed_queue_cid *p_cid,
872*4882a593Smuzhiyun 		    dma_addr_t pbl_addr,
873*4882a593Smuzhiyun 		    u16 pbl_size, void __iomem **pp_doorbell);
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun /**
876*4882a593Smuzhiyun  * @brief VF - stop the RX queue by sending a message to the PF
877*4882a593Smuzhiyun  *
878*4882a593Smuzhiyun  * @param p_hwfn
879*4882a593Smuzhiyun  * @param p_cid
880*4882a593Smuzhiyun  * @param cqe_completion
881*4882a593Smuzhiyun  *
882*4882a593Smuzhiyun  * @return int
883*4882a593Smuzhiyun  */
884*4882a593Smuzhiyun int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
885*4882a593Smuzhiyun 		       struct qed_queue_cid *p_cid, bool cqe_completion);
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun /**
888*4882a593Smuzhiyun  * @brief VF - stop the TX queue by sending a message to the PF
889*4882a593Smuzhiyun  *
890*4882a593Smuzhiyun  * @param p_hwfn
891*4882a593Smuzhiyun  * @param tx_qid
892*4882a593Smuzhiyun  *
893*4882a593Smuzhiyun  * @return int
894*4882a593Smuzhiyun  */
895*4882a593Smuzhiyun int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid);
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun /**
898*4882a593Smuzhiyun  * @brief VF - send a vport update command
899*4882a593Smuzhiyun  *
900*4882a593Smuzhiyun  * @param p_hwfn
901*4882a593Smuzhiyun  * @param params
902*4882a593Smuzhiyun  *
903*4882a593Smuzhiyun  * @return int
904*4882a593Smuzhiyun  */
905*4882a593Smuzhiyun int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
906*4882a593Smuzhiyun 			   struct qed_sp_vport_update_params *p_params);
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun /**
909*4882a593Smuzhiyun  *
910*4882a593Smuzhiyun  * @brief VF - send a close message to PF
911*4882a593Smuzhiyun  *
912*4882a593Smuzhiyun  * @param p_hwfn
913*4882a593Smuzhiyun  *
914*4882a593Smuzhiyun  * @return enum _qed_status
915*4882a593Smuzhiyun  */
916*4882a593Smuzhiyun int qed_vf_pf_reset(struct qed_hwfn *p_hwfn);
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun /**
919*4882a593Smuzhiyun  * @brief VF - free vf`s memories
920*4882a593Smuzhiyun  *
921*4882a593Smuzhiyun  * @param p_hwfn
922*4882a593Smuzhiyun  *
923*4882a593Smuzhiyun  * @return enum _qed_status
924*4882a593Smuzhiyun  */
925*4882a593Smuzhiyun int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun /**
928*4882a593Smuzhiyun  * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given
929*4882a593Smuzhiyun  *        sb_id. For VFs igu sbs don't have to be contiguous
930*4882a593Smuzhiyun  *
931*4882a593Smuzhiyun  * @param p_hwfn
932*4882a593Smuzhiyun  * @param sb_id
933*4882a593Smuzhiyun  *
934*4882a593Smuzhiyun  * @return INLINE u16
935*4882a593Smuzhiyun  */
936*4882a593Smuzhiyun u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun /**
939*4882a593Smuzhiyun  * @brief Stores [or removes] a configured sb_info.
940*4882a593Smuzhiyun  *
941*4882a593Smuzhiyun  * @param p_hwfn
942*4882a593Smuzhiyun  * @param sb_id - zero-based SB index [for fastpath]
943*4882a593Smuzhiyun  * @param sb_info - may be NULL [during removal].
944*4882a593Smuzhiyun  */
945*4882a593Smuzhiyun void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn,
946*4882a593Smuzhiyun 			u16 sb_id, struct qed_sb_info *p_sb);
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun /**
949*4882a593Smuzhiyun  * @brief qed_vf_pf_vport_start - perform vport start for VF.
950*4882a593Smuzhiyun  *
951*4882a593Smuzhiyun  * @param p_hwfn
952*4882a593Smuzhiyun  * @param vport_id
953*4882a593Smuzhiyun  * @param mtu
954*4882a593Smuzhiyun  * @param inner_vlan_removal
955*4882a593Smuzhiyun  * @param tpa_mode
956*4882a593Smuzhiyun  * @param max_buffers_per_cqe,
957*4882a593Smuzhiyun  * @param only_untagged - default behavior regarding vlan acceptance
958*4882a593Smuzhiyun  *
959*4882a593Smuzhiyun  * @return enum _qed_status
960*4882a593Smuzhiyun  */
961*4882a593Smuzhiyun int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
962*4882a593Smuzhiyun 			  u8 vport_id,
963*4882a593Smuzhiyun 			  u16 mtu,
964*4882a593Smuzhiyun 			  u8 inner_vlan_removal,
965*4882a593Smuzhiyun 			  enum qed_tpa_mode tpa_mode,
966*4882a593Smuzhiyun 			  u8 max_buffers_per_cqe, u8 only_untagged);
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun /**
969*4882a593Smuzhiyun  * @brief qed_vf_pf_vport_stop - stop the VF's vport
970*4882a593Smuzhiyun  *
971*4882a593Smuzhiyun  * @param p_hwfn
972*4882a593Smuzhiyun  *
973*4882a593Smuzhiyun  * @return enum _qed_status
974*4882a593Smuzhiyun  */
975*4882a593Smuzhiyun int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn);
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
978*4882a593Smuzhiyun 			   struct qed_filter_ucast *p_param);
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
981*4882a593Smuzhiyun 			    struct qed_filter_mcast *p_filter_cmd);
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun /**
984*4882a593Smuzhiyun  * @brief qed_vf_pf_int_cleanup - clean the SB of the VF
985*4882a593Smuzhiyun  *
986*4882a593Smuzhiyun  * @param p_hwfn
987*4882a593Smuzhiyun  *
988*4882a593Smuzhiyun  * @return enum _qed_status
989*4882a593Smuzhiyun  */
990*4882a593Smuzhiyun int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn);
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun /**
993*4882a593Smuzhiyun  * @brief - return the link params in a given bulletin board
994*4882a593Smuzhiyun  *
995*4882a593Smuzhiyun  * @param p_hwfn
996*4882a593Smuzhiyun  * @param p_params - pointer to a struct to fill with link params
997*4882a593Smuzhiyun  * @param p_bulletin
998*4882a593Smuzhiyun  */
999*4882a593Smuzhiyun void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
1000*4882a593Smuzhiyun 			      struct qed_mcp_link_params *p_params,
1001*4882a593Smuzhiyun 			      struct qed_bulletin_content *p_bulletin);
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun /**
1004*4882a593Smuzhiyun  * @brief - return the link state in a given bulletin board
1005*4882a593Smuzhiyun  *
1006*4882a593Smuzhiyun  * @param p_hwfn
1007*4882a593Smuzhiyun  * @param p_link - pointer to a struct to fill with link state
1008*4882a593Smuzhiyun  * @param p_bulletin
1009*4882a593Smuzhiyun  */
1010*4882a593Smuzhiyun void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
1011*4882a593Smuzhiyun 			     struct qed_mcp_link_state *p_link,
1012*4882a593Smuzhiyun 			     struct qed_bulletin_content *p_bulletin);
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun /**
1015*4882a593Smuzhiyun  * @brief - return the link capabilities in a given bulletin board
1016*4882a593Smuzhiyun  *
1017*4882a593Smuzhiyun  * @param p_hwfn
1018*4882a593Smuzhiyun  * @param p_link - pointer to a struct to fill with link capabilities
1019*4882a593Smuzhiyun  * @param p_bulletin
1020*4882a593Smuzhiyun  */
1021*4882a593Smuzhiyun void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
1022*4882a593Smuzhiyun 			    struct qed_mcp_link_capabilities *p_link_caps,
1023*4882a593Smuzhiyun 			    struct qed_bulletin_content *p_bulletin);
1024*4882a593Smuzhiyun 
1025*4882a593Smuzhiyun void qed_iov_vf_task(struct work_struct *work);
1026*4882a593Smuzhiyun void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun);
1027*4882a593Smuzhiyun int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
1028*4882a593Smuzhiyun 				  struct qed_tunnel_info *p_tunn);
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id);
1031*4882a593Smuzhiyun /**
1032*4882a593Smuzhiyun  * @brief - Ask PF to update the MAC address in it's bulletin board
1033*4882a593Smuzhiyun  *
1034*4882a593Smuzhiyun  * @param p_mac - mac address to be updated in bulletin board
1035*4882a593Smuzhiyun  */
1036*4882a593Smuzhiyun int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, u8 *p_mac);
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun #else
qed_vf_get_link_params(struct qed_hwfn * p_hwfn,struct qed_mcp_link_params * params)1039*4882a593Smuzhiyun static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
1040*4882a593Smuzhiyun 					  struct qed_mcp_link_params *params)
1041*4882a593Smuzhiyun {
1042*4882a593Smuzhiyun }
1043*4882a593Smuzhiyun 
qed_vf_get_link_state(struct qed_hwfn * p_hwfn,struct qed_mcp_link_state * link)1044*4882a593Smuzhiyun static inline void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
1045*4882a593Smuzhiyun 					 struct qed_mcp_link_state *link)
1046*4882a593Smuzhiyun {
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun static inline void
qed_vf_get_link_caps(struct qed_hwfn * p_hwfn,struct qed_mcp_link_capabilities * p_link_caps)1050*4882a593Smuzhiyun qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
1051*4882a593Smuzhiyun 		     struct qed_mcp_link_capabilities *p_link_caps)
1052*4882a593Smuzhiyun {
1053*4882a593Smuzhiyun }
1054*4882a593Smuzhiyun 
qed_vf_get_num_rxqs(struct qed_hwfn * p_hwfn,u8 * num_rxqs)1055*4882a593Smuzhiyun static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
1056*4882a593Smuzhiyun {
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun 
qed_vf_get_num_txqs(struct qed_hwfn * p_hwfn,u8 * num_txqs)1059*4882a593Smuzhiyun static inline void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs)
1060*4882a593Smuzhiyun {
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun 
qed_vf_get_num_cids(struct qed_hwfn * p_hwfn,u8 * num_cids)1063*4882a593Smuzhiyun static inline void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids)
1064*4882a593Smuzhiyun {
1065*4882a593Smuzhiyun }
1066*4882a593Smuzhiyun 
qed_vf_get_port_mac(struct qed_hwfn * p_hwfn,u8 * port_mac)1067*4882a593Smuzhiyun static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
1068*4882a593Smuzhiyun {
1069*4882a593Smuzhiyun }
1070*4882a593Smuzhiyun 
qed_vf_get_num_vlan_filters(struct qed_hwfn * p_hwfn,u8 * num_vlan_filters)1071*4882a593Smuzhiyun static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
1072*4882a593Smuzhiyun 					       u8 *num_vlan_filters)
1073*4882a593Smuzhiyun {
1074*4882a593Smuzhiyun }
1075*4882a593Smuzhiyun 
qed_vf_get_num_mac_filters(struct qed_hwfn * p_hwfn,u8 * num_mac_filters)1076*4882a593Smuzhiyun static inline void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn,
1077*4882a593Smuzhiyun 					      u8 *num_mac_filters)
1078*4882a593Smuzhiyun {
1079*4882a593Smuzhiyun }
1080*4882a593Smuzhiyun 
qed_vf_check_mac(struct qed_hwfn * p_hwfn,u8 * mac)1081*4882a593Smuzhiyun static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
1082*4882a593Smuzhiyun {
1083*4882a593Smuzhiyun 	return false;
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun 
qed_vf_get_fw_version(struct qed_hwfn * p_hwfn,u16 * fw_major,u16 * fw_minor,u16 * fw_rev,u16 * fw_eng)1086*4882a593Smuzhiyun static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
1087*4882a593Smuzhiyun 					 u16 *fw_major, u16 *fw_minor,
1088*4882a593Smuzhiyun 					 u16 *fw_rev, u16 *fw_eng)
1089*4882a593Smuzhiyun {
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun 
qed_vf_hw_prepare(struct qed_hwfn * p_hwfn)1092*4882a593Smuzhiyun static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
1093*4882a593Smuzhiyun {
1094*4882a593Smuzhiyun 	return -EINVAL;
1095*4882a593Smuzhiyun }
1096*4882a593Smuzhiyun 
qed_vf_pf_rxq_start(struct qed_hwfn * p_hwfn,struct qed_queue_cid * p_cid,u16 bd_max_bytes,dma_addr_t bd_chain_phys_adr,dma_addr_t cqe_pbl_addr,u16 cqe_pbl_size,void __iomem ** pp_prod)1097*4882a593Smuzhiyun static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
1098*4882a593Smuzhiyun 				      struct qed_queue_cid *p_cid,
1099*4882a593Smuzhiyun 				      u16 bd_max_bytes,
1100*4882a593Smuzhiyun 				      dma_addr_t bd_chain_phys_adr,
1101*4882a593Smuzhiyun 				      dma_addr_t cqe_pbl_addr,
1102*4882a593Smuzhiyun 				      u16 cqe_pbl_size, void __iomem **pp_prod)
1103*4882a593Smuzhiyun {
1104*4882a593Smuzhiyun 	return -EINVAL;
1105*4882a593Smuzhiyun }
1106*4882a593Smuzhiyun 
qed_vf_pf_txq_start(struct qed_hwfn * p_hwfn,struct qed_queue_cid * p_cid,dma_addr_t pbl_addr,u16 pbl_size,void __iomem ** pp_doorbell)1107*4882a593Smuzhiyun static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
1108*4882a593Smuzhiyun 				      struct qed_queue_cid *p_cid,
1109*4882a593Smuzhiyun 				      dma_addr_t pbl_addr,
1110*4882a593Smuzhiyun 				      u16 pbl_size, void __iomem **pp_doorbell)
1111*4882a593Smuzhiyun {
1112*4882a593Smuzhiyun 	return -EINVAL;
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun 
qed_vf_pf_rxq_stop(struct qed_hwfn * p_hwfn,struct qed_queue_cid * p_cid,bool cqe_completion)1115*4882a593Smuzhiyun static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
1116*4882a593Smuzhiyun 				     struct qed_queue_cid *p_cid,
1117*4882a593Smuzhiyun 				     bool cqe_completion)
1118*4882a593Smuzhiyun {
1119*4882a593Smuzhiyun 	return -EINVAL;
1120*4882a593Smuzhiyun }
1121*4882a593Smuzhiyun 
qed_vf_pf_txq_stop(struct qed_hwfn * p_hwfn,struct qed_queue_cid * p_cid)1122*4882a593Smuzhiyun static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn,
1123*4882a593Smuzhiyun 				     struct qed_queue_cid *p_cid)
1124*4882a593Smuzhiyun {
1125*4882a593Smuzhiyun 	return -EINVAL;
1126*4882a593Smuzhiyun }
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun static inline int
qed_vf_pf_vport_update(struct qed_hwfn * p_hwfn,struct qed_sp_vport_update_params * p_params)1129*4882a593Smuzhiyun qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
1130*4882a593Smuzhiyun 		       struct qed_sp_vport_update_params *p_params)
1131*4882a593Smuzhiyun {
1132*4882a593Smuzhiyun 	return -EINVAL;
1133*4882a593Smuzhiyun }
1134*4882a593Smuzhiyun 
qed_vf_pf_reset(struct qed_hwfn * p_hwfn)1135*4882a593Smuzhiyun static inline int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
1136*4882a593Smuzhiyun {
1137*4882a593Smuzhiyun 	return -EINVAL;
1138*4882a593Smuzhiyun }
1139*4882a593Smuzhiyun 
qed_vf_pf_release(struct qed_hwfn * p_hwfn)1140*4882a593Smuzhiyun static inline int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
1141*4882a593Smuzhiyun {
1142*4882a593Smuzhiyun 	return -EINVAL;
1143*4882a593Smuzhiyun }
1144*4882a593Smuzhiyun 
qed_vf_get_igu_sb_id(struct qed_hwfn * p_hwfn,u16 sb_id)1145*4882a593Smuzhiyun static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
1146*4882a593Smuzhiyun {
1147*4882a593Smuzhiyun 	return 0;
1148*4882a593Smuzhiyun }
1149*4882a593Smuzhiyun 
qed_vf_set_sb_info(struct qed_hwfn * p_hwfn,u16 sb_id,struct qed_sb_info * p_sb)1150*4882a593Smuzhiyun static inline void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn, u16 sb_id,
1151*4882a593Smuzhiyun 				      struct qed_sb_info *p_sb)
1152*4882a593Smuzhiyun {
1153*4882a593Smuzhiyun }
1154*4882a593Smuzhiyun 
qed_vf_pf_vport_start(struct qed_hwfn * p_hwfn,u8 vport_id,u16 mtu,u8 inner_vlan_removal,enum qed_tpa_mode tpa_mode,u8 max_buffers_per_cqe,u8 only_untagged)1155*4882a593Smuzhiyun static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
1156*4882a593Smuzhiyun 					u8 vport_id,
1157*4882a593Smuzhiyun 					u16 mtu,
1158*4882a593Smuzhiyun 					u8 inner_vlan_removal,
1159*4882a593Smuzhiyun 					enum qed_tpa_mode tpa_mode,
1160*4882a593Smuzhiyun 					u8 max_buffers_per_cqe,
1161*4882a593Smuzhiyun 					u8 only_untagged)
1162*4882a593Smuzhiyun {
1163*4882a593Smuzhiyun 	return -EINVAL;
1164*4882a593Smuzhiyun }
1165*4882a593Smuzhiyun 
qed_vf_pf_vport_stop(struct qed_hwfn * p_hwfn)1166*4882a593Smuzhiyun static inline int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
1167*4882a593Smuzhiyun {
1168*4882a593Smuzhiyun 	return -EINVAL;
1169*4882a593Smuzhiyun }
1170*4882a593Smuzhiyun 
qed_vf_pf_filter_ucast(struct qed_hwfn * p_hwfn,struct qed_filter_ucast * p_param)1171*4882a593Smuzhiyun static inline int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
1172*4882a593Smuzhiyun 					 struct qed_filter_ucast *p_param)
1173*4882a593Smuzhiyun {
1174*4882a593Smuzhiyun 	return -EINVAL;
1175*4882a593Smuzhiyun }
1176*4882a593Smuzhiyun 
qed_vf_pf_filter_mcast(struct qed_hwfn * p_hwfn,struct qed_filter_mcast * p_filter_cmd)1177*4882a593Smuzhiyun static inline void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
1178*4882a593Smuzhiyun 					  struct qed_filter_mcast *p_filter_cmd)
1179*4882a593Smuzhiyun {
1180*4882a593Smuzhiyun }
1181*4882a593Smuzhiyun 
qed_vf_pf_int_cleanup(struct qed_hwfn * p_hwfn)1182*4882a593Smuzhiyun static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
1183*4882a593Smuzhiyun {
1184*4882a593Smuzhiyun 	return -EINVAL;
1185*4882a593Smuzhiyun }
1186*4882a593Smuzhiyun 
__qed_vf_get_link_params(struct qed_hwfn * p_hwfn,struct qed_mcp_link_params * p_params,struct qed_bulletin_content * p_bulletin)1187*4882a593Smuzhiyun static inline void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
1188*4882a593Smuzhiyun 					    struct qed_mcp_link_params
1189*4882a593Smuzhiyun 					    *p_params,
1190*4882a593Smuzhiyun 					    struct qed_bulletin_content
1191*4882a593Smuzhiyun 					    *p_bulletin)
1192*4882a593Smuzhiyun {
1193*4882a593Smuzhiyun }
1194*4882a593Smuzhiyun 
__qed_vf_get_link_state(struct qed_hwfn * p_hwfn,struct qed_mcp_link_state * p_link,struct qed_bulletin_content * p_bulletin)1195*4882a593Smuzhiyun static inline void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
1196*4882a593Smuzhiyun 					   struct qed_mcp_link_state *p_link,
1197*4882a593Smuzhiyun 					   struct qed_bulletin_content
1198*4882a593Smuzhiyun 					   *p_bulletin)
1199*4882a593Smuzhiyun {
1200*4882a593Smuzhiyun }
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun static inline void
__qed_vf_get_link_caps(struct qed_hwfn * p_hwfn,struct qed_mcp_link_capabilities * p_link_caps,struct qed_bulletin_content * p_bulletin)1203*4882a593Smuzhiyun __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
1204*4882a593Smuzhiyun 		       struct qed_mcp_link_capabilities *p_link_caps,
1205*4882a593Smuzhiyun 		       struct qed_bulletin_content *p_bulletin)
1206*4882a593Smuzhiyun {
1207*4882a593Smuzhiyun }
1208*4882a593Smuzhiyun 
qed_iov_vf_task(struct work_struct * work)1209*4882a593Smuzhiyun static inline void qed_iov_vf_task(struct work_struct *work)
1210*4882a593Smuzhiyun {
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun static inline void
qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info * p_tun)1214*4882a593Smuzhiyun qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun)
1215*4882a593Smuzhiyun {
1216*4882a593Smuzhiyun }
1217*4882a593Smuzhiyun 
qed_vf_pf_tunnel_param_update(struct qed_hwfn * p_hwfn,struct qed_tunnel_info * p_tunn)1218*4882a593Smuzhiyun static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
1219*4882a593Smuzhiyun 						struct qed_tunnel_info *p_tunn)
1220*4882a593Smuzhiyun {
1221*4882a593Smuzhiyun 	return -EINVAL;
1222*4882a593Smuzhiyun }
1223*4882a593Smuzhiyun 
qed_vf_pf_bulletin_update_mac(struct qed_hwfn * p_hwfn,u8 * p_mac)1224*4882a593Smuzhiyun static inline int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
1225*4882a593Smuzhiyun 						u8 *p_mac)
1226*4882a593Smuzhiyun {
1227*4882a593Smuzhiyun 	return -EINVAL;
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun static inline u32
qed_vf_hw_bar_size(struct qed_hwfn * p_hwfn,enum BAR_ID bar_id)1231*4882a593Smuzhiyun qed_vf_hw_bar_size(struct qed_hwfn  *p_hwfn,
1232*4882a593Smuzhiyun 		   enum BAR_ID bar_id)
1233*4882a593Smuzhiyun {
1234*4882a593Smuzhiyun 	return 0;
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun #endif
1237*4882a593Smuzhiyun 
1238*4882a593Smuzhiyun #endif
1239