xref: /OK3568_Linux_fs/kernel/include/linux/qed/qed_if.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2*4882a593Smuzhiyun /* QLogic qed NIC Driver
3*4882a593Smuzhiyun  * Copyright (c) 2015-2017  QLogic Corporation
4*4882a593Smuzhiyun  * Copyright (c) 2019-2020 Marvell International Ltd.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #ifndef _QED_IF_H
8*4882a593Smuzhiyun #define _QED_IF_H
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/types.h>
11*4882a593Smuzhiyun #include <linux/interrupt.h>
12*4882a593Smuzhiyun #include <linux/netdevice.h>
13*4882a593Smuzhiyun #include <linux/pci.h>
14*4882a593Smuzhiyun #include <linux/skbuff.h>
15*4882a593Smuzhiyun #include <asm/byteorder.h>
16*4882a593Smuzhiyun #include <linux/io.h>
17*4882a593Smuzhiyun #include <linux/compiler.h>
18*4882a593Smuzhiyun #include <linux/kernel.h>
19*4882a593Smuzhiyun #include <linux/list.h>
20*4882a593Smuzhiyun #include <linux/slab.h>
21*4882a593Smuzhiyun #include <linux/qed/common_hsi.h>
22*4882a593Smuzhiyun #include <linux/qed/qed_chain.h>
23*4882a593Smuzhiyun #include <linux/io-64-nonatomic-lo-hi.h>
24*4882a593Smuzhiyun #include <net/devlink.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun enum dcbx_protocol_type {
27*4882a593Smuzhiyun 	DCBX_PROTOCOL_ISCSI,
28*4882a593Smuzhiyun 	DCBX_PROTOCOL_FCOE,
29*4882a593Smuzhiyun 	DCBX_PROTOCOL_ROCE,
30*4882a593Smuzhiyun 	DCBX_PROTOCOL_ROCE_V2,
31*4882a593Smuzhiyun 	DCBX_PROTOCOL_ETH,
32*4882a593Smuzhiyun 	DCBX_MAX_PROTOCOL_TYPE
33*4882a593Smuzhiyun };
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #define QED_ROCE_PROTOCOL_INDEX (3)
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #define QED_LLDP_CHASSIS_ID_STAT_LEN 4
38*4882a593Smuzhiyun #define QED_LLDP_PORT_ID_STAT_LEN 4
39*4882a593Smuzhiyun #define QED_DCBX_MAX_APP_PROTOCOL 32
40*4882a593Smuzhiyun #define QED_MAX_PFC_PRIORITIES 8
41*4882a593Smuzhiyun #define QED_DCBX_DSCP_SIZE 64
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun struct qed_dcbx_lldp_remote {
44*4882a593Smuzhiyun 	u32 peer_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
45*4882a593Smuzhiyun 	u32 peer_port_id[QED_LLDP_PORT_ID_STAT_LEN];
46*4882a593Smuzhiyun 	bool enable_rx;
47*4882a593Smuzhiyun 	bool enable_tx;
48*4882a593Smuzhiyun 	u32 tx_interval;
49*4882a593Smuzhiyun 	u32 max_credit;
50*4882a593Smuzhiyun };
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun struct qed_dcbx_lldp_local {
53*4882a593Smuzhiyun 	u32 local_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
54*4882a593Smuzhiyun 	u32 local_port_id[QED_LLDP_PORT_ID_STAT_LEN];
55*4882a593Smuzhiyun };
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun struct qed_dcbx_app_prio {
58*4882a593Smuzhiyun 	u8 roce;
59*4882a593Smuzhiyun 	u8 roce_v2;
60*4882a593Smuzhiyun 	u8 fcoe;
61*4882a593Smuzhiyun 	u8 iscsi;
62*4882a593Smuzhiyun 	u8 eth;
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun struct qed_dbcx_pfc_params {
66*4882a593Smuzhiyun 	bool willing;
67*4882a593Smuzhiyun 	bool enabled;
68*4882a593Smuzhiyun 	u8 prio[QED_MAX_PFC_PRIORITIES];
69*4882a593Smuzhiyun 	u8 max_tc;
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun enum qed_dcbx_sf_ieee_type {
73*4882a593Smuzhiyun 	QED_DCBX_SF_IEEE_ETHTYPE,
74*4882a593Smuzhiyun 	QED_DCBX_SF_IEEE_TCP_PORT,
75*4882a593Smuzhiyun 	QED_DCBX_SF_IEEE_UDP_PORT,
76*4882a593Smuzhiyun 	QED_DCBX_SF_IEEE_TCP_UDP_PORT
77*4882a593Smuzhiyun };
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun struct qed_app_entry {
80*4882a593Smuzhiyun 	bool ethtype;
81*4882a593Smuzhiyun 	enum qed_dcbx_sf_ieee_type sf_ieee;
82*4882a593Smuzhiyun 	bool enabled;
83*4882a593Smuzhiyun 	u8 prio;
84*4882a593Smuzhiyun 	u16 proto_id;
85*4882a593Smuzhiyun 	enum dcbx_protocol_type proto_type;
86*4882a593Smuzhiyun };
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun struct qed_dcbx_params {
89*4882a593Smuzhiyun 	struct qed_app_entry app_entry[QED_DCBX_MAX_APP_PROTOCOL];
90*4882a593Smuzhiyun 	u16 num_app_entries;
91*4882a593Smuzhiyun 	bool app_willing;
92*4882a593Smuzhiyun 	bool app_valid;
93*4882a593Smuzhiyun 	bool app_error;
94*4882a593Smuzhiyun 	bool ets_willing;
95*4882a593Smuzhiyun 	bool ets_enabled;
96*4882a593Smuzhiyun 	bool ets_cbs;
97*4882a593Smuzhiyun 	bool valid;
98*4882a593Smuzhiyun 	u8 ets_pri_tc_tbl[QED_MAX_PFC_PRIORITIES];
99*4882a593Smuzhiyun 	u8 ets_tc_bw_tbl[QED_MAX_PFC_PRIORITIES];
100*4882a593Smuzhiyun 	u8 ets_tc_tsa_tbl[QED_MAX_PFC_PRIORITIES];
101*4882a593Smuzhiyun 	struct qed_dbcx_pfc_params pfc;
102*4882a593Smuzhiyun 	u8 max_ets_tc;
103*4882a593Smuzhiyun };
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun struct qed_dcbx_admin_params {
106*4882a593Smuzhiyun 	struct qed_dcbx_params params;
107*4882a593Smuzhiyun 	bool valid;
108*4882a593Smuzhiyun };
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun struct qed_dcbx_remote_params {
111*4882a593Smuzhiyun 	struct qed_dcbx_params params;
112*4882a593Smuzhiyun 	bool valid;
113*4882a593Smuzhiyun };
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun struct qed_dcbx_operational_params {
116*4882a593Smuzhiyun 	struct qed_dcbx_app_prio app_prio;
117*4882a593Smuzhiyun 	struct qed_dcbx_params params;
118*4882a593Smuzhiyun 	bool valid;
119*4882a593Smuzhiyun 	bool enabled;
120*4882a593Smuzhiyun 	bool ieee;
121*4882a593Smuzhiyun 	bool cee;
122*4882a593Smuzhiyun 	bool local;
123*4882a593Smuzhiyun 	u32 err;
124*4882a593Smuzhiyun };
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun struct qed_dcbx_get {
127*4882a593Smuzhiyun 	struct qed_dcbx_operational_params operational;
128*4882a593Smuzhiyun 	struct qed_dcbx_lldp_remote lldp_remote;
129*4882a593Smuzhiyun 	struct qed_dcbx_lldp_local lldp_local;
130*4882a593Smuzhiyun 	struct qed_dcbx_remote_params remote;
131*4882a593Smuzhiyun 	struct qed_dcbx_admin_params local;
132*4882a593Smuzhiyun };
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun enum qed_nvm_images {
135*4882a593Smuzhiyun 	QED_NVM_IMAGE_ISCSI_CFG,
136*4882a593Smuzhiyun 	QED_NVM_IMAGE_FCOE_CFG,
137*4882a593Smuzhiyun 	QED_NVM_IMAGE_MDUMP,
138*4882a593Smuzhiyun 	QED_NVM_IMAGE_NVM_CFG1,
139*4882a593Smuzhiyun 	QED_NVM_IMAGE_DEFAULT_CFG,
140*4882a593Smuzhiyun 	QED_NVM_IMAGE_NVM_META,
141*4882a593Smuzhiyun };
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun struct qed_link_eee_params {
144*4882a593Smuzhiyun 	u32 tx_lpi_timer;
145*4882a593Smuzhiyun #define QED_EEE_1G_ADV		BIT(0)
146*4882a593Smuzhiyun #define QED_EEE_10G_ADV		BIT(1)
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	/* Capabilities are represented using QED_EEE_*_ADV values */
149*4882a593Smuzhiyun 	u8 adv_caps;
150*4882a593Smuzhiyun 	u8 lp_adv_caps;
151*4882a593Smuzhiyun 	bool enable;
152*4882a593Smuzhiyun 	bool tx_lpi_enable;
153*4882a593Smuzhiyun };
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun enum qed_led_mode {
156*4882a593Smuzhiyun 	QED_LED_MODE_OFF,
157*4882a593Smuzhiyun 	QED_LED_MODE_ON,
158*4882a593Smuzhiyun 	QED_LED_MODE_RESTORE
159*4882a593Smuzhiyun };
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun struct qed_mfw_tlv_eth {
162*4882a593Smuzhiyun 	u16 lso_maxoff_size;
163*4882a593Smuzhiyun 	bool lso_maxoff_size_set;
164*4882a593Smuzhiyun 	u16 lso_minseg_size;
165*4882a593Smuzhiyun 	bool lso_minseg_size_set;
166*4882a593Smuzhiyun 	u8 prom_mode;
167*4882a593Smuzhiyun 	bool prom_mode_set;
168*4882a593Smuzhiyun 	u16 tx_descr_size;
169*4882a593Smuzhiyun 	bool tx_descr_size_set;
170*4882a593Smuzhiyun 	u16 rx_descr_size;
171*4882a593Smuzhiyun 	bool rx_descr_size_set;
172*4882a593Smuzhiyun 	u16 netq_count;
173*4882a593Smuzhiyun 	bool netq_count_set;
174*4882a593Smuzhiyun 	u32 tcp4_offloads;
175*4882a593Smuzhiyun 	bool tcp4_offloads_set;
176*4882a593Smuzhiyun 	u32 tcp6_offloads;
177*4882a593Smuzhiyun 	bool tcp6_offloads_set;
178*4882a593Smuzhiyun 	u16 tx_descr_qdepth;
179*4882a593Smuzhiyun 	bool tx_descr_qdepth_set;
180*4882a593Smuzhiyun 	u16 rx_descr_qdepth;
181*4882a593Smuzhiyun 	bool rx_descr_qdepth_set;
182*4882a593Smuzhiyun 	u8 iov_offload;
183*4882a593Smuzhiyun #define QED_MFW_TLV_IOV_OFFLOAD_NONE            (0)
184*4882a593Smuzhiyun #define QED_MFW_TLV_IOV_OFFLOAD_MULTIQUEUE      (1)
185*4882a593Smuzhiyun #define QED_MFW_TLV_IOV_OFFLOAD_VEB             (2)
186*4882a593Smuzhiyun #define QED_MFW_TLV_IOV_OFFLOAD_VEPA            (3)
187*4882a593Smuzhiyun 	bool iov_offload_set;
188*4882a593Smuzhiyun 	u8 txqs_empty;
189*4882a593Smuzhiyun 	bool txqs_empty_set;
190*4882a593Smuzhiyun 	u8 rxqs_empty;
191*4882a593Smuzhiyun 	bool rxqs_empty_set;
192*4882a593Smuzhiyun 	u8 num_txqs_full;
193*4882a593Smuzhiyun 	bool num_txqs_full_set;
194*4882a593Smuzhiyun 	u8 num_rxqs_full;
195*4882a593Smuzhiyun 	bool num_rxqs_full_set;
196*4882a593Smuzhiyun };
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun #define QED_MFW_TLV_TIME_SIZE	14
199*4882a593Smuzhiyun struct qed_mfw_tlv_time {
200*4882a593Smuzhiyun 	bool b_set;
201*4882a593Smuzhiyun 	u8 month;
202*4882a593Smuzhiyun 	u8 day;
203*4882a593Smuzhiyun 	u8 hour;
204*4882a593Smuzhiyun 	u8 min;
205*4882a593Smuzhiyun 	u16 msec;
206*4882a593Smuzhiyun 	u16 usec;
207*4882a593Smuzhiyun };
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun struct qed_mfw_tlv_fcoe {
210*4882a593Smuzhiyun 	u8 scsi_timeout;
211*4882a593Smuzhiyun 	bool scsi_timeout_set;
212*4882a593Smuzhiyun 	u32 rt_tov;
213*4882a593Smuzhiyun 	bool rt_tov_set;
214*4882a593Smuzhiyun 	u32 ra_tov;
215*4882a593Smuzhiyun 	bool ra_tov_set;
216*4882a593Smuzhiyun 	u32 ed_tov;
217*4882a593Smuzhiyun 	bool ed_tov_set;
218*4882a593Smuzhiyun 	u32 cr_tov;
219*4882a593Smuzhiyun 	bool cr_tov_set;
220*4882a593Smuzhiyun 	u8 boot_type;
221*4882a593Smuzhiyun 	bool boot_type_set;
222*4882a593Smuzhiyun 	u8 npiv_state;
223*4882a593Smuzhiyun 	bool npiv_state_set;
224*4882a593Smuzhiyun 	u32 num_npiv_ids;
225*4882a593Smuzhiyun 	bool num_npiv_ids_set;
226*4882a593Smuzhiyun 	u8 switch_name[8];
227*4882a593Smuzhiyun 	bool switch_name_set;
228*4882a593Smuzhiyun 	u16 switch_portnum;
229*4882a593Smuzhiyun 	bool switch_portnum_set;
230*4882a593Smuzhiyun 	u8 switch_portid[3];
231*4882a593Smuzhiyun 	bool switch_portid_set;
232*4882a593Smuzhiyun 	u8 vendor_name[8];
233*4882a593Smuzhiyun 	bool vendor_name_set;
234*4882a593Smuzhiyun 	u8 switch_model[8];
235*4882a593Smuzhiyun 	bool switch_model_set;
236*4882a593Smuzhiyun 	u8 switch_fw_version[8];
237*4882a593Smuzhiyun 	bool switch_fw_version_set;
238*4882a593Smuzhiyun 	u8 qos_pri;
239*4882a593Smuzhiyun 	bool qos_pri_set;
240*4882a593Smuzhiyun 	u8 port_alias[3];
241*4882a593Smuzhiyun 	bool port_alias_set;
242*4882a593Smuzhiyun 	u8 port_state;
243*4882a593Smuzhiyun #define QED_MFW_TLV_PORT_STATE_OFFLINE  (0)
244*4882a593Smuzhiyun #define QED_MFW_TLV_PORT_STATE_LOOP             (1)
245*4882a593Smuzhiyun #define QED_MFW_TLV_PORT_STATE_P2P              (2)
246*4882a593Smuzhiyun #define QED_MFW_TLV_PORT_STATE_FABRIC           (3)
247*4882a593Smuzhiyun 	bool port_state_set;
248*4882a593Smuzhiyun 	u16 fip_tx_descr_size;
249*4882a593Smuzhiyun 	bool fip_tx_descr_size_set;
250*4882a593Smuzhiyun 	u16 fip_rx_descr_size;
251*4882a593Smuzhiyun 	bool fip_rx_descr_size_set;
252*4882a593Smuzhiyun 	u16 link_failures;
253*4882a593Smuzhiyun 	bool link_failures_set;
254*4882a593Smuzhiyun 	u8 fcoe_boot_progress;
255*4882a593Smuzhiyun 	bool fcoe_boot_progress_set;
256*4882a593Smuzhiyun 	u64 rx_bcast;
257*4882a593Smuzhiyun 	bool rx_bcast_set;
258*4882a593Smuzhiyun 	u64 tx_bcast;
259*4882a593Smuzhiyun 	bool tx_bcast_set;
260*4882a593Smuzhiyun 	u16 fcoe_txq_depth;
261*4882a593Smuzhiyun 	bool fcoe_txq_depth_set;
262*4882a593Smuzhiyun 	u16 fcoe_rxq_depth;
263*4882a593Smuzhiyun 	bool fcoe_rxq_depth_set;
264*4882a593Smuzhiyun 	u64 fcoe_rx_frames;
265*4882a593Smuzhiyun 	bool fcoe_rx_frames_set;
266*4882a593Smuzhiyun 	u64 fcoe_rx_bytes;
267*4882a593Smuzhiyun 	bool fcoe_rx_bytes_set;
268*4882a593Smuzhiyun 	u64 fcoe_tx_frames;
269*4882a593Smuzhiyun 	bool fcoe_tx_frames_set;
270*4882a593Smuzhiyun 	u64 fcoe_tx_bytes;
271*4882a593Smuzhiyun 	bool fcoe_tx_bytes_set;
272*4882a593Smuzhiyun 	u16 crc_count;
273*4882a593Smuzhiyun 	bool crc_count_set;
274*4882a593Smuzhiyun 	u32 crc_err_src_fcid[5];
275*4882a593Smuzhiyun 	bool crc_err_src_fcid_set[5];
276*4882a593Smuzhiyun 	struct qed_mfw_tlv_time crc_err[5];
277*4882a593Smuzhiyun 	u16 losync_err;
278*4882a593Smuzhiyun 	bool losync_err_set;
279*4882a593Smuzhiyun 	u16 losig_err;
280*4882a593Smuzhiyun 	bool losig_err_set;
281*4882a593Smuzhiyun 	u16 primtive_err;
282*4882a593Smuzhiyun 	bool primtive_err_set;
283*4882a593Smuzhiyun 	u16 disparity_err;
284*4882a593Smuzhiyun 	bool disparity_err_set;
285*4882a593Smuzhiyun 	u16 code_violation_err;
286*4882a593Smuzhiyun 	bool code_violation_err_set;
287*4882a593Smuzhiyun 	u32 flogi_param[4];
288*4882a593Smuzhiyun 	bool flogi_param_set[4];
289*4882a593Smuzhiyun 	struct qed_mfw_tlv_time flogi_tstamp;
290*4882a593Smuzhiyun 	u32 flogi_acc_param[4];
291*4882a593Smuzhiyun 	bool flogi_acc_param_set[4];
292*4882a593Smuzhiyun 	struct qed_mfw_tlv_time flogi_acc_tstamp;
293*4882a593Smuzhiyun 	u32 flogi_rjt;
294*4882a593Smuzhiyun 	bool flogi_rjt_set;
295*4882a593Smuzhiyun 	struct qed_mfw_tlv_time flogi_rjt_tstamp;
296*4882a593Smuzhiyun 	u32 fdiscs;
297*4882a593Smuzhiyun 	bool fdiscs_set;
298*4882a593Smuzhiyun 	u8 fdisc_acc;
299*4882a593Smuzhiyun 	bool fdisc_acc_set;
300*4882a593Smuzhiyun 	u8 fdisc_rjt;
301*4882a593Smuzhiyun 	bool fdisc_rjt_set;
302*4882a593Smuzhiyun 	u8 plogi;
303*4882a593Smuzhiyun 	bool plogi_set;
304*4882a593Smuzhiyun 	u8 plogi_acc;
305*4882a593Smuzhiyun 	bool plogi_acc_set;
306*4882a593Smuzhiyun 	u8 plogi_rjt;
307*4882a593Smuzhiyun 	bool plogi_rjt_set;
308*4882a593Smuzhiyun 	u32 plogi_dst_fcid[5];
309*4882a593Smuzhiyun 	bool plogi_dst_fcid_set[5];
310*4882a593Smuzhiyun 	struct qed_mfw_tlv_time plogi_tstamp[5];
311*4882a593Smuzhiyun 	u32 plogi_acc_src_fcid[5];
312*4882a593Smuzhiyun 	bool plogi_acc_src_fcid_set[5];
313*4882a593Smuzhiyun 	struct qed_mfw_tlv_time plogi_acc_tstamp[5];
314*4882a593Smuzhiyun 	u8 tx_plogos;
315*4882a593Smuzhiyun 	bool tx_plogos_set;
316*4882a593Smuzhiyun 	u8 plogo_acc;
317*4882a593Smuzhiyun 	bool plogo_acc_set;
318*4882a593Smuzhiyun 	u8 plogo_rjt;
319*4882a593Smuzhiyun 	bool plogo_rjt_set;
320*4882a593Smuzhiyun 	u32 plogo_src_fcid[5];
321*4882a593Smuzhiyun 	bool plogo_src_fcid_set[5];
322*4882a593Smuzhiyun 	struct qed_mfw_tlv_time plogo_tstamp[5];
323*4882a593Smuzhiyun 	u8 rx_logos;
324*4882a593Smuzhiyun 	bool rx_logos_set;
325*4882a593Smuzhiyun 	u8 tx_accs;
326*4882a593Smuzhiyun 	bool tx_accs_set;
327*4882a593Smuzhiyun 	u8 tx_prlis;
328*4882a593Smuzhiyun 	bool tx_prlis_set;
329*4882a593Smuzhiyun 	u8 rx_accs;
330*4882a593Smuzhiyun 	bool rx_accs_set;
331*4882a593Smuzhiyun 	u8 tx_abts;
332*4882a593Smuzhiyun 	bool tx_abts_set;
333*4882a593Smuzhiyun 	u8 rx_abts_acc;
334*4882a593Smuzhiyun 	bool rx_abts_acc_set;
335*4882a593Smuzhiyun 	u8 rx_abts_rjt;
336*4882a593Smuzhiyun 	bool rx_abts_rjt_set;
337*4882a593Smuzhiyun 	u32 abts_dst_fcid[5];
338*4882a593Smuzhiyun 	bool abts_dst_fcid_set[5];
339*4882a593Smuzhiyun 	struct qed_mfw_tlv_time abts_tstamp[5];
340*4882a593Smuzhiyun 	u8 rx_rscn;
341*4882a593Smuzhiyun 	bool rx_rscn_set;
342*4882a593Smuzhiyun 	u32 rx_rscn_nport[4];
343*4882a593Smuzhiyun 	bool rx_rscn_nport_set[4];
344*4882a593Smuzhiyun 	u8 tx_lun_rst;
345*4882a593Smuzhiyun 	bool tx_lun_rst_set;
346*4882a593Smuzhiyun 	u8 abort_task_sets;
347*4882a593Smuzhiyun 	bool abort_task_sets_set;
348*4882a593Smuzhiyun 	u8 tx_tprlos;
349*4882a593Smuzhiyun 	bool tx_tprlos_set;
350*4882a593Smuzhiyun 	u8 tx_nos;
351*4882a593Smuzhiyun 	bool tx_nos_set;
352*4882a593Smuzhiyun 	u8 rx_nos;
353*4882a593Smuzhiyun 	bool rx_nos_set;
354*4882a593Smuzhiyun 	u8 ols;
355*4882a593Smuzhiyun 	bool ols_set;
356*4882a593Smuzhiyun 	u8 lr;
357*4882a593Smuzhiyun 	bool lr_set;
358*4882a593Smuzhiyun 	u8 lrr;
359*4882a593Smuzhiyun 	bool lrr_set;
360*4882a593Smuzhiyun 	u8 tx_lip;
361*4882a593Smuzhiyun 	bool tx_lip_set;
362*4882a593Smuzhiyun 	u8 rx_lip;
363*4882a593Smuzhiyun 	bool rx_lip_set;
364*4882a593Smuzhiyun 	u8 eofa;
365*4882a593Smuzhiyun 	bool eofa_set;
366*4882a593Smuzhiyun 	u8 eofni;
367*4882a593Smuzhiyun 	bool eofni_set;
368*4882a593Smuzhiyun 	u8 scsi_chks;
369*4882a593Smuzhiyun 	bool scsi_chks_set;
370*4882a593Smuzhiyun 	u8 scsi_cond_met;
371*4882a593Smuzhiyun 	bool scsi_cond_met_set;
372*4882a593Smuzhiyun 	u8 scsi_busy;
373*4882a593Smuzhiyun 	bool scsi_busy_set;
374*4882a593Smuzhiyun 	u8 scsi_inter;
375*4882a593Smuzhiyun 	bool scsi_inter_set;
376*4882a593Smuzhiyun 	u8 scsi_inter_cond_met;
377*4882a593Smuzhiyun 	bool scsi_inter_cond_met_set;
378*4882a593Smuzhiyun 	u8 scsi_rsv_conflicts;
379*4882a593Smuzhiyun 	bool scsi_rsv_conflicts_set;
380*4882a593Smuzhiyun 	u8 scsi_tsk_full;
381*4882a593Smuzhiyun 	bool scsi_tsk_full_set;
382*4882a593Smuzhiyun 	u8 scsi_aca_active;
383*4882a593Smuzhiyun 	bool scsi_aca_active_set;
384*4882a593Smuzhiyun 	u8 scsi_tsk_abort;
385*4882a593Smuzhiyun 	bool scsi_tsk_abort_set;
386*4882a593Smuzhiyun 	u32 scsi_rx_chk[5];
387*4882a593Smuzhiyun 	bool scsi_rx_chk_set[5];
388*4882a593Smuzhiyun 	struct qed_mfw_tlv_time scsi_chk_tstamp[5];
389*4882a593Smuzhiyun };
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun struct qed_mfw_tlv_iscsi {
392*4882a593Smuzhiyun 	u8 target_llmnr;
393*4882a593Smuzhiyun 	bool target_llmnr_set;
394*4882a593Smuzhiyun 	u8 header_digest;
395*4882a593Smuzhiyun 	bool header_digest_set;
396*4882a593Smuzhiyun 	u8 data_digest;
397*4882a593Smuzhiyun 	bool data_digest_set;
398*4882a593Smuzhiyun 	u8 auth_method;
399*4882a593Smuzhiyun #define QED_MFW_TLV_AUTH_METHOD_NONE            (1)
400*4882a593Smuzhiyun #define QED_MFW_TLV_AUTH_METHOD_CHAP            (2)
401*4882a593Smuzhiyun #define QED_MFW_TLV_AUTH_METHOD_MUTUAL_CHAP     (3)
402*4882a593Smuzhiyun 	bool auth_method_set;
403*4882a593Smuzhiyun 	u16 boot_taget_portal;
404*4882a593Smuzhiyun 	bool boot_taget_portal_set;
405*4882a593Smuzhiyun 	u16 frame_size;
406*4882a593Smuzhiyun 	bool frame_size_set;
407*4882a593Smuzhiyun 	u16 tx_desc_size;
408*4882a593Smuzhiyun 	bool tx_desc_size_set;
409*4882a593Smuzhiyun 	u16 rx_desc_size;
410*4882a593Smuzhiyun 	bool rx_desc_size_set;
411*4882a593Smuzhiyun 	u8 boot_progress;
412*4882a593Smuzhiyun 	bool boot_progress_set;
413*4882a593Smuzhiyun 	u16 tx_desc_qdepth;
414*4882a593Smuzhiyun 	bool tx_desc_qdepth_set;
415*4882a593Smuzhiyun 	u16 rx_desc_qdepth;
416*4882a593Smuzhiyun 	bool rx_desc_qdepth_set;
417*4882a593Smuzhiyun 	u64 rx_frames;
418*4882a593Smuzhiyun 	bool rx_frames_set;
419*4882a593Smuzhiyun 	u64 rx_bytes;
420*4882a593Smuzhiyun 	bool rx_bytes_set;
421*4882a593Smuzhiyun 	u64 tx_frames;
422*4882a593Smuzhiyun 	bool tx_frames_set;
423*4882a593Smuzhiyun 	u64 tx_bytes;
424*4882a593Smuzhiyun 	bool tx_bytes_set;
425*4882a593Smuzhiyun };
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun enum qed_db_rec_width {
428*4882a593Smuzhiyun 	DB_REC_WIDTH_32B,
429*4882a593Smuzhiyun 	DB_REC_WIDTH_64B,
430*4882a593Smuzhiyun };
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun enum qed_db_rec_space {
433*4882a593Smuzhiyun 	DB_REC_KERNEL,
434*4882a593Smuzhiyun 	DB_REC_USER,
435*4882a593Smuzhiyun };
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
438*4882a593Smuzhiyun 					    (void __iomem *)(reg_addr))
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun #define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun #define DIRECT_REG_WR64(reg_addr, val) writeq((u64)val,	\
443*4882a593Smuzhiyun 					      (void __iomem *)(reg_addr))
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun #define QED_COALESCE_MAX 0x1FF
446*4882a593Smuzhiyun #define QED_DEFAULT_RX_USECS 12
447*4882a593Smuzhiyun #define QED_DEFAULT_TX_USECS 48
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun /* forward */
450*4882a593Smuzhiyun struct qed_dev;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun struct qed_eth_pf_params {
453*4882a593Smuzhiyun 	/* The following parameters are used during HW-init
454*4882a593Smuzhiyun 	 * and these parameters need to be passed as arguments
455*4882a593Smuzhiyun 	 * to update_pf_params routine invoked before slowpath start
456*4882a593Smuzhiyun 	 */
457*4882a593Smuzhiyun 	u16 num_cons;
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	/* per-VF number of CIDs */
460*4882a593Smuzhiyun 	u8 num_vf_cons;
461*4882a593Smuzhiyun #define ETH_PF_PARAMS_VF_CONS_DEFAULT	(32)
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	/* To enable arfs, previous to HW-init a positive number needs to be
464*4882a593Smuzhiyun 	 * set [as filters require allocated searcher ILT memory].
465*4882a593Smuzhiyun 	 * This will set the maximal number of configured steering-filters.
466*4882a593Smuzhiyun 	 */
467*4882a593Smuzhiyun 	u32 num_arfs_filters;
468*4882a593Smuzhiyun };
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun struct qed_fcoe_pf_params {
471*4882a593Smuzhiyun 	/* The following parameters are used during protocol-init */
472*4882a593Smuzhiyun 	u64 glbl_q_params_addr;
473*4882a593Smuzhiyun 	u64 bdq_pbl_base_addr[2];
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	/* The following parameters are used during HW-init
476*4882a593Smuzhiyun 	 * and these parameters need to be passed as arguments
477*4882a593Smuzhiyun 	 * to update_pf_params routine invoked before slowpath start
478*4882a593Smuzhiyun 	 */
479*4882a593Smuzhiyun 	u16 num_cons;
480*4882a593Smuzhiyun 	u16 num_tasks;
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	/* The following parameters are used during protocol-init */
483*4882a593Smuzhiyun 	u16 sq_num_pbl_pages;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	u16 cq_num_entries;
486*4882a593Smuzhiyun 	u16 cmdq_num_entries;
487*4882a593Smuzhiyun 	u16 rq_buffer_log_size;
488*4882a593Smuzhiyun 	u16 mtu;
489*4882a593Smuzhiyun 	u16 dummy_icid;
490*4882a593Smuzhiyun 	u16 bdq_xoff_threshold[2];
491*4882a593Smuzhiyun 	u16 bdq_xon_threshold[2];
492*4882a593Smuzhiyun 	u16 rq_buffer_size;
493*4882a593Smuzhiyun 	u8 num_cqs;		/* num of global CQs */
494*4882a593Smuzhiyun 	u8 log_page_size;
495*4882a593Smuzhiyun 	u8 gl_rq_pi;
496*4882a593Smuzhiyun 	u8 gl_cmd_pi;
497*4882a593Smuzhiyun 	u8 debug_mode;
498*4882a593Smuzhiyun 	u8 is_target;
499*4882a593Smuzhiyun 	u8 bdq_pbl_num_entries[2];
500*4882a593Smuzhiyun };
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun /* Most of the parameters below are described in the FW iSCSI / TCP HSI */
503*4882a593Smuzhiyun struct qed_iscsi_pf_params {
504*4882a593Smuzhiyun 	u64 glbl_q_params_addr;
505*4882a593Smuzhiyun 	u64 bdq_pbl_base_addr[3];
506*4882a593Smuzhiyun 	u16 cq_num_entries;
507*4882a593Smuzhiyun 	u16 cmdq_num_entries;
508*4882a593Smuzhiyun 	u32 two_msl_timer;
509*4882a593Smuzhiyun 	u16 tx_sws_timer;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	/* The following parameters are used during HW-init
512*4882a593Smuzhiyun 	 * and these parameters need to be passed as arguments
513*4882a593Smuzhiyun 	 * to update_pf_params routine invoked before slowpath start
514*4882a593Smuzhiyun 	 */
515*4882a593Smuzhiyun 	u16 num_cons;
516*4882a593Smuzhiyun 	u16 num_tasks;
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	/* The following parameters are used during protocol-init */
519*4882a593Smuzhiyun 	u16 half_way_close_timeout;
520*4882a593Smuzhiyun 	u16 bdq_xoff_threshold[3];
521*4882a593Smuzhiyun 	u16 bdq_xon_threshold[3];
522*4882a593Smuzhiyun 	u16 cmdq_xoff_threshold;
523*4882a593Smuzhiyun 	u16 cmdq_xon_threshold;
524*4882a593Smuzhiyun 	u16 rq_buffer_size;
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	u8 num_sq_pages_in_ring;
527*4882a593Smuzhiyun 	u8 num_r2tq_pages_in_ring;
528*4882a593Smuzhiyun 	u8 num_uhq_pages_in_ring;
529*4882a593Smuzhiyun 	u8 num_queues;
530*4882a593Smuzhiyun 	u8 log_page_size;
531*4882a593Smuzhiyun 	u8 rqe_log_size;
532*4882a593Smuzhiyun 	u8 max_fin_rt;
533*4882a593Smuzhiyun 	u8 gl_rq_pi;
534*4882a593Smuzhiyun 	u8 gl_cmd_pi;
535*4882a593Smuzhiyun 	u8 debug_mode;
536*4882a593Smuzhiyun 	u8 ll2_ooo_queue_id;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	u8 is_target;
539*4882a593Smuzhiyun 	u8 is_soc_en;
540*4882a593Smuzhiyun 	u8 soc_num_of_blocks_log;
541*4882a593Smuzhiyun 	u8 bdq_pbl_num_entries[3];
542*4882a593Smuzhiyun };
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun struct qed_rdma_pf_params {
545*4882a593Smuzhiyun 	/* Supplied to QED during resource allocation (may affect the ILT and
546*4882a593Smuzhiyun 	 * the doorbell BAR).
547*4882a593Smuzhiyun 	 */
548*4882a593Smuzhiyun 	u32 min_dpis;		/* number of requested DPIs */
549*4882a593Smuzhiyun 	u32 num_qps;		/* number of requested Queue Pairs */
550*4882a593Smuzhiyun 	u32 num_srqs;		/* number of requested SRQ */
551*4882a593Smuzhiyun 	u8 roce_edpm_mode;	/* see QED_ROCE_EDPM_MODE_ENABLE */
552*4882a593Smuzhiyun 	u8 gl_pi;		/* protocol index */
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	/* Will allocate rate limiters to be used with QPs */
555*4882a593Smuzhiyun 	u8 enable_dcqcn;
556*4882a593Smuzhiyun };
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun struct qed_pf_params {
559*4882a593Smuzhiyun 	struct qed_eth_pf_params eth_pf_params;
560*4882a593Smuzhiyun 	struct qed_fcoe_pf_params fcoe_pf_params;
561*4882a593Smuzhiyun 	struct qed_iscsi_pf_params iscsi_pf_params;
562*4882a593Smuzhiyun 	struct qed_rdma_pf_params rdma_pf_params;
563*4882a593Smuzhiyun };
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun enum qed_int_mode {
566*4882a593Smuzhiyun 	QED_INT_MODE_INTA,
567*4882a593Smuzhiyun 	QED_INT_MODE_MSIX,
568*4882a593Smuzhiyun 	QED_INT_MODE_MSI,
569*4882a593Smuzhiyun 	QED_INT_MODE_POLL,
570*4882a593Smuzhiyun };
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun struct qed_sb_info {
573*4882a593Smuzhiyun 	struct status_block_e4 *sb_virt;
574*4882a593Smuzhiyun 	dma_addr_t sb_phys;
575*4882a593Smuzhiyun 	u32 sb_ack; /* Last given ack */
576*4882a593Smuzhiyun 	u16 igu_sb_id;
577*4882a593Smuzhiyun 	void __iomem *igu_addr;
578*4882a593Smuzhiyun 	u8 flags;
579*4882a593Smuzhiyun #define QED_SB_INFO_INIT	0x1
580*4882a593Smuzhiyun #define QED_SB_INFO_SETUP	0x2
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	struct qed_dev *cdev;
583*4882a593Smuzhiyun };
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun enum qed_hw_err_type {
586*4882a593Smuzhiyun 	QED_HW_ERR_FAN_FAIL,
587*4882a593Smuzhiyun 	QED_HW_ERR_MFW_RESP_FAIL,
588*4882a593Smuzhiyun 	QED_HW_ERR_HW_ATTN,
589*4882a593Smuzhiyun 	QED_HW_ERR_DMAE_FAIL,
590*4882a593Smuzhiyun 	QED_HW_ERR_RAMROD_FAIL,
591*4882a593Smuzhiyun 	QED_HW_ERR_FW_ASSERT,
592*4882a593Smuzhiyun 	QED_HW_ERR_LAST,
593*4882a593Smuzhiyun };
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun enum qed_dev_type {
596*4882a593Smuzhiyun 	QED_DEV_TYPE_BB,
597*4882a593Smuzhiyun 	QED_DEV_TYPE_AH,
598*4882a593Smuzhiyun 	QED_DEV_TYPE_E5,
599*4882a593Smuzhiyun };
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun struct qed_dev_info {
602*4882a593Smuzhiyun 	unsigned long	pci_mem_start;
603*4882a593Smuzhiyun 	unsigned long	pci_mem_end;
604*4882a593Smuzhiyun 	unsigned int	pci_irq;
605*4882a593Smuzhiyun 	u8		num_hwfns;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	u8		hw_mac[ETH_ALEN];
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	/* FW version */
610*4882a593Smuzhiyun 	u16		fw_major;
611*4882a593Smuzhiyun 	u16		fw_minor;
612*4882a593Smuzhiyun 	u16		fw_rev;
613*4882a593Smuzhiyun 	u16		fw_eng;
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	/* MFW version */
616*4882a593Smuzhiyun 	u32		mfw_rev;
617*4882a593Smuzhiyun #define QED_MFW_VERSION_0_MASK		0x000000FF
618*4882a593Smuzhiyun #define QED_MFW_VERSION_0_OFFSET	0
619*4882a593Smuzhiyun #define QED_MFW_VERSION_1_MASK		0x0000FF00
620*4882a593Smuzhiyun #define QED_MFW_VERSION_1_OFFSET	8
621*4882a593Smuzhiyun #define QED_MFW_VERSION_2_MASK		0x00FF0000
622*4882a593Smuzhiyun #define QED_MFW_VERSION_2_OFFSET	16
623*4882a593Smuzhiyun #define QED_MFW_VERSION_3_MASK		0xFF000000
624*4882a593Smuzhiyun #define QED_MFW_VERSION_3_OFFSET	24
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	u32		flash_size;
627*4882a593Smuzhiyun 	bool		b_arfs_capable;
628*4882a593Smuzhiyun 	bool		b_inter_pf_switch;
629*4882a593Smuzhiyun 	bool		tx_switching;
630*4882a593Smuzhiyun 	bool		rdma_supported;
631*4882a593Smuzhiyun 	u16		mtu;
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	bool wol_support;
634*4882a593Smuzhiyun 	bool smart_an;
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	/* MBI version */
637*4882a593Smuzhiyun 	u32 mbi_version;
638*4882a593Smuzhiyun #define QED_MBI_VERSION_0_MASK		0x000000FF
639*4882a593Smuzhiyun #define QED_MBI_VERSION_0_OFFSET	0
640*4882a593Smuzhiyun #define QED_MBI_VERSION_1_MASK		0x0000FF00
641*4882a593Smuzhiyun #define QED_MBI_VERSION_1_OFFSET	8
642*4882a593Smuzhiyun #define QED_MBI_VERSION_2_MASK		0x00FF0000
643*4882a593Smuzhiyun #define QED_MBI_VERSION_2_OFFSET	16
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	enum qed_dev_type dev_type;
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	/* Output parameters for qede */
648*4882a593Smuzhiyun 	bool		vxlan_enable;
649*4882a593Smuzhiyun 	bool		gre_enable;
650*4882a593Smuzhiyun 	bool		geneve_enable;
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	u8		abs_pf_id;
653*4882a593Smuzhiyun };
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun enum qed_sb_type {
656*4882a593Smuzhiyun 	QED_SB_TYPE_L2_QUEUE,
657*4882a593Smuzhiyun 	QED_SB_TYPE_CNQ,
658*4882a593Smuzhiyun 	QED_SB_TYPE_STORAGE,
659*4882a593Smuzhiyun };
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun enum qed_protocol {
662*4882a593Smuzhiyun 	QED_PROTOCOL_ETH,
663*4882a593Smuzhiyun 	QED_PROTOCOL_ISCSI,
664*4882a593Smuzhiyun 	QED_PROTOCOL_FCOE,
665*4882a593Smuzhiyun };
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun enum qed_fec_mode {
668*4882a593Smuzhiyun 	QED_FEC_MODE_NONE			= BIT(0),
669*4882a593Smuzhiyun 	QED_FEC_MODE_FIRECODE			= BIT(1),
670*4882a593Smuzhiyun 	QED_FEC_MODE_RS				= BIT(2),
671*4882a593Smuzhiyun 	QED_FEC_MODE_AUTO			= BIT(3),
672*4882a593Smuzhiyun 	QED_FEC_MODE_UNSUPPORTED		= BIT(4),
673*4882a593Smuzhiyun };
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun struct qed_link_params {
676*4882a593Smuzhiyun 	bool					link_up;
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	u32					override_flags;
679*4882a593Smuzhiyun #define QED_LINK_OVERRIDE_SPEED_AUTONEG		BIT(0)
680*4882a593Smuzhiyun #define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS	BIT(1)
681*4882a593Smuzhiyun #define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED	BIT(2)
682*4882a593Smuzhiyun #define QED_LINK_OVERRIDE_PAUSE_CONFIG		BIT(3)
683*4882a593Smuzhiyun #define QED_LINK_OVERRIDE_LOOPBACK_MODE		BIT(4)
684*4882a593Smuzhiyun #define QED_LINK_OVERRIDE_EEE_CONFIG		BIT(5)
685*4882a593Smuzhiyun #define QED_LINK_OVERRIDE_FEC_CONFIG		BIT(6)
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	bool					autoneg;
688*4882a593Smuzhiyun 	__ETHTOOL_DECLARE_LINK_MODE_MASK(adv_speeds);
689*4882a593Smuzhiyun 	u32					forced_speed;
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 	u32					pause_config;
692*4882a593Smuzhiyun #define QED_LINK_PAUSE_AUTONEG_ENABLE		BIT(0)
693*4882a593Smuzhiyun #define QED_LINK_PAUSE_RX_ENABLE		BIT(1)
694*4882a593Smuzhiyun #define QED_LINK_PAUSE_TX_ENABLE		BIT(2)
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	u32					loopback_mode;
697*4882a593Smuzhiyun #define QED_LINK_LOOPBACK_NONE			BIT(0)
698*4882a593Smuzhiyun #define QED_LINK_LOOPBACK_INT_PHY		BIT(1)
699*4882a593Smuzhiyun #define QED_LINK_LOOPBACK_EXT_PHY		BIT(2)
700*4882a593Smuzhiyun #define QED_LINK_LOOPBACK_EXT			BIT(3)
701*4882a593Smuzhiyun #define QED_LINK_LOOPBACK_MAC			BIT(4)
702*4882a593Smuzhiyun #define QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123	BIT(5)
703*4882a593Smuzhiyun #define QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301	BIT(6)
704*4882a593Smuzhiyun #define QED_LINK_LOOPBACK_PCS_AH_ONLY		BIT(7)
705*4882a593Smuzhiyun #define QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY	BIT(8)
706*4882a593Smuzhiyun #define QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY	BIT(9)
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	struct qed_link_eee_params		eee;
709*4882a593Smuzhiyun 	u32					fec;
710*4882a593Smuzhiyun };
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun struct qed_link_output {
713*4882a593Smuzhiyun 	bool					link_up;
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported_caps);
716*4882a593Smuzhiyun 	__ETHTOOL_DECLARE_LINK_MODE_MASK(advertised_caps);
717*4882a593Smuzhiyun 	__ETHTOOL_DECLARE_LINK_MODE_MASK(lp_caps);
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 	u32					speed;	   /* In Mb/s */
720*4882a593Smuzhiyun 	u8					duplex;	   /* In DUPLEX defs */
721*4882a593Smuzhiyun 	u8					port;	   /* In PORT defs */
722*4882a593Smuzhiyun 	bool					autoneg;
723*4882a593Smuzhiyun 	u32					pause_config;
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	/* EEE - capability & param */
726*4882a593Smuzhiyun 	bool					eee_supported;
727*4882a593Smuzhiyun 	bool					eee_active;
728*4882a593Smuzhiyun 	u8					sup_caps;
729*4882a593Smuzhiyun 	struct qed_link_eee_params		eee;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	u32					sup_fec;
732*4882a593Smuzhiyun 	u32					active_fec;
733*4882a593Smuzhiyun };
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun struct qed_probe_params {
736*4882a593Smuzhiyun 	enum qed_protocol protocol;
737*4882a593Smuzhiyun 	u32 dp_module;
738*4882a593Smuzhiyun 	u8 dp_level;
739*4882a593Smuzhiyun 	bool is_vf;
740*4882a593Smuzhiyun 	bool recov_in_prog;
741*4882a593Smuzhiyun };
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun #define QED_DRV_VER_STR_SIZE 12
744*4882a593Smuzhiyun struct qed_slowpath_params {
745*4882a593Smuzhiyun 	u32	int_mode;
746*4882a593Smuzhiyun 	u8	drv_major;
747*4882a593Smuzhiyun 	u8	drv_minor;
748*4882a593Smuzhiyun 	u8	drv_rev;
749*4882a593Smuzhiyun 	u8	drv_eng;
750*4882a593Smuzhiyun 	u8	name[QED_DRV_VER_STR_SIZE];
751*4882a593Smuzhiyun };
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun #define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun struct qed_int_info {
756*4882a593Smuzhiyun 	struct msix_entry	*msix;
757*4882a593Smuzhiyun 	u8			msix_cnt;
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	/* This should be updated by the protocol driver */
760*4882a593Smuzhiyun 	u8			used_cnt;
761*4882a593Smuzhiyun };
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun struct qed_generic_tlvs {
764*4882a593Smuzhiyun #define QED_TLV_IP_CSUM         BIT(0)
765*4882a593Smuzhiyun #define QED_TLV_LSO             BIT(1)
766*4882a593Smuzhiyun 	u16 feat_flags;
767*4882a593Smuzhiyun #define QED_TLV_MAC_COUNT	3
768*4882a593Smuzhiyun 	u8 mac[QED_TLV_MAC_COUNT][ETH_ALEN];
769*4882a593Smuzhiyun };
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun #define QED_I2C_DEV_ADDR_A0 0xA0
772*4882a593Smuzhiyun #define QED_I2C_DEV_ADDR_A2 0xA2
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun #define QED_NVM_SIGNATURE 0x12435687
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun enum qed_nvm_flash_cmd {
777*4882a593Smuzhiyun 	QED_NVM_FLASH_CMD_FILE_DATA = 0x2,
778*4882a593Smuzhiyun 	QED_NVM_FLASH_CMD_FILE_START = 0x3,
779*4882a593Smuzhiyun 	QED_NVM_FLASH_CMD_NVM_CHANGE = 0x4,
780*4882a593Smuzhiyun 	QED_NVM_FLASH_CMD_NVM_CFG_ID = 0x5,
781*4882a593Smuzhiyun 	QED_NVM_FLASH_CMD_NVM_MAX,
782*4882a593Smuzhiyun };
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun struct qed_devlink {
785*4882a593Smuzhiyun 	struct qed_dev *cdev;
786*4882a593Smuzhiyun 	struct devlink_health_reporter *fw_reporter;
787*4882a593Smuzhiyun };
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun struct qed_common_cb_ops {
790*4882a593Smuzhiyun 	void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc);
791*4882a593Smuzhiyun 	void (*link_update)(void *dev, struct qed_link_output *link);
792*4882a593Smuzhiyun 	void (*schedule_recovery_handler)(void *dev);
793*4882a593Smuzhiyun 	void (*schedule_hw_err_handler)(void *dev,
794*4882a593Smuzhiyun 					enum qed_hw_err_type err_type);
795*4882a593Smuzhiyun 	void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
796*4882a593Smuzhiyun 	void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data);
797*4882a593Smuzhiyun 	void (*get_protocol_tlv_data)(void *dev, void *data);
798*4882a593Smuzhiyun 	void (*bw_update)(void *dev);
799*4882a593Smuzhiyun };
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun struct qed_selftest_ops {
802*4882a593Smuzhiyun /**
803*4882a593Smuzhiyun  * @brief selftest_interrupt - Perform interrupt test
804*4882a593Smuzhiyun  *
805*4882a593Smuzhiyun  * @param cdev
806*4882a593Smuzhiyun  *
807*4882a593Smuzhiyun  * @return 0 on success, error otherwise.
808*4882a593Smuzhiyun  */
809*4882a593Smuzhiyun 	int (*selftest_interrupt)(struct qed_dev *cdev);
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun /**
812*4882a593Smuzhiyun  * @brief selftest_memory - Perform memory test
813*4882a593Smuzhiyun  *
814*4882a593Smuzhiyun  * @param cdev
815*4882a593Smuzhiyun  *
816*4882a593Smuzhiyun  * @return 0 on success, error otherwise.
817*4882a593Smuzhiyun  */
818*4882a593Smuzhiyun 	int (*selftest_memory)(struct qed_dev *cdev);
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun /**
821*4882a593Smuzhiyun  * @brief selftest_register - Perform register test
822*4882a593Smuzhiyun  *
823*4882a593Smuzhiyun  * @param cdev
824*4882a593Smuzhiyun  *
825*4882a593Smuzhiyun  * @return 0 on success, error otherwise.
826*4882a593Smuzhiyun  */
827*4882a593Smuzhiyun 	int (*selftest_register)(struct qed_dev *cdev);
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun /**
830*4882a593Smuzhiyun  * @brief selftest_clock - Perform clock test
831*4882a593Smuzhiyun  *
832*4882a593Smuzhiyun  * @param cdev
833*4882a593Smuzhiyun  *
834*4882a593Smuzhiyun  * @return 0 on success, error otherwise.
835*4882a593Smuzhiyun  */
836*4882a593Smuzhiyun 	int (*selftest_clock)(struct qed_dev *cdev);
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun /**
839*4882a593Smuzhiyun  * @brief selftest_nvram - Perform nvram test
840*4882a593Smuzhiyun  *
841*4882a593Smuzhiyun  * @param cdev
842*4882a593Smuzhiyun  *
843*4882a593Smuzhiyun  * @return 0 on success, error otherwise.
844*4882a593Smuzhiyun  */
845*4882a593Smuzhiyun 	int (*selftest_nvram) (struct qed_dev *cdev);
846*4882a593Smuzhiyun };
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun struct qed_common_ops {
849*4882a593Smuzhiyun 	struct qed_selftest_ops *selftest;
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 	struct qed_dev*	(*probe)(struct pci_dev *dev,
852*4882a593Smuzhiyun 				 struct qed_probe_params *params);
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 	void (*remove)(struct qed_dev *cdev);
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 	int (*set_power_state)(struct qed_dev *cdev, pci_power_t state);
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	void (*set_name) (struct qed_dev *cdev, char name[]);
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 	/* Client drivers need to make this call before slowpath_start.
861*4882a593Smuzhiyun 	 * PF params required for the call before slowpath_start is
862*4882a593Smuzhiyun 	 * documented within the qed_pf_params structure definition.
863*4882a593Smuzhiyun 	 */
864*4882a593Smuzhiyun 	void (*update_pf_params)(struct qed_dev *cdev,
865*4882a593Smuzhiyun 				 struct qed_pf_params *params);
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	int (*slowpath_start)(struct qed_dev *cdev,
868*4882a593Smuzhiyun 			      struct qed_slowpath_params *params);
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	int (*slowpath_stop)(struct qed_dev *cdev);
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	/* Requests to use `cnt' interrupts for fastpath.
873*4882a593Smuzhiyun 	 * upon success, returns number of interrupts allocated for fastpath.
874*4882a593Smuzhiyun 	 */
875*4882a593Smuzhiyun 	int (*set_fp_int)(struct qed_dev *cdev, u16 cnt);
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 	/* Fills `info' with pointers required for utilizing interrupts */
878*4882a593Smuzhiyun 	int (*get_fp_int)(struct qed_dev *cdev, struct qed_int_info *info);
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 	u32 (*sb_init)(struct qed_dev *cdev,
881*4882a593Smuzhiyun 		       struct qed_sb_info *sb_info,
882*4882a593Smuzhiyun 		       void *sb_virt_addr,
883*4882a593Smuzhiyun 		       dma_addr_t sb_phy_addr,
884*4882a593Smuzhiyun 		       u16 sb_id,
885*4882a593Smuzhiyun 		       enum qed_sb_type type);
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 	u32 (*sb_release)(struct qed_dev *cdev,
888*4882a593Smuzhiyun 			  struct qed_sb_info *sb_info,
889*4882a593Smuzhiyun 			  u16 sb_id,
890*4882a593Smuzhiyun 			  enum qed_sb_type type);
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	void (*simd_handler_config)(struct qed_dev *cdev,
893*4882a593Smuzhiyun 				    void *token,
894*4882a593Smuzhiyun 				    int index,
895*4882a593Smuzhiyun 				    void (*handler)(void *));
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	void (*simd_handler_clean)(struct qed_dev *cdev, int index);
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	int (*dbg_grc)(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	int (*dbg_grc_size)(struct qed_dev *cdev);
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	int (*dbg_all_data)(struct qed_dev *cdev, void *buffer);
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	int (*dbg_all_data_size)(struct qed_dev *cdev);
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun 	int (*report_fatal_error)(struct devlink *devlink,
908*4882a593Smuzhiyun 				  enum qed_hw_err_type err_type);
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun /**
911*4882a593Smuzhiyun  * @brief can_link_change - can the instance change the link or not
912*4882a593Smuzhiyun  *
913*4882a593Smuzhiyun  * @param cdev
914*4882a593Smuzhiyun  *
915*4882a593Smuzhiyun  * @return true if link-change is allowed, false otherwise.
916*4882a593Smuzhiyun  */
917*4882a593Smuzhiyun 	bool (*can_link_change)(struct qed_dev *cdev);
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun /**
920*4882a593Smuzhiyun  * @brief set_link - set links according to params
921*4882a593Smuzhiyun  *
922*4882a593Smuzhiyun  * @param cdev
923*4882a593Smuzhiyun  * @param params - values used to override the default link configuration
924*4882a593Smuzhiyun  *
925*4882a593Smuzhiyun  * @return 0 on success, error otherwise.
926*4882a593Smuzhiyun  */
927*4882a593Smuzhiyun 	int		(*set_link)(struct qed_dev *cdev,
928*4882a593Smuzhiyun 				    struct qed_link_params *params);
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun /**
931*4882a593Smuzhiyun  * @brief get_link - returns the current link state.
932*4882a593Smuzhiyun  *
933*4882a593Smuzhiyun  * @param cdev
934*4882a593Smuzhiyun  * @param if_link - structure to be filled with current link configuration.
935*4882a593Smuzhiyun  */
936*4882a593Smuzhiyun 	void		(*get_link)(struct qed_dev *cdev,
937*4882a593Smuzhiyun 				    struct qed_link_output *if_link);
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun /**
940*4882a593Smuzhiyun  * @brief - drains chip in case Tx completions fail to arrive due to pause.
941*4882a593Smuzhiyun  *
942*4882a593Smuzhiyun  * @param cdev
943*4882a593Smuzhiyun  */
944*4882a593Smuzhiyun 	int		(*drain)(struct qed_dev *cdev);
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun /**
947*4882a593Smuzhiyun  * @brief update_msglvl - update module debug level
948*4882a593Smuzhiyun  *
949*4882a593Smuzhiyun  * @param cdev
950*4882a593Smuzhiyun  * @param dp_module
951*4882a593Smuzhiyun  * @param dp_level
952*4882a593Smuzhiyun  */
953*4882a593Smuzhiyun 	void		(*update_msglvl)(struct qed_dev *cdev,
954*4882a593Smuzhiyun 					 u32 dp_module,
955*4882a593Smuzhiyun 					 u8 dp_level);
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	int		(*chain_alloc)(struct qed_dev *cdev,
958*4882a593Smuzhiyun 				       struct qed_chain *chain,
959*4882a593Smuzhiyun 				       struct qed_chain_init_params *params);
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 	void		(*chain_free)(struct qed_dev *cdev,
962*4882a593Smuzhiyun 				      struct qed_chain *p_chain);
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun /**
965*4882a593Smuzhiyun  * @brief nvm_flash - Flash nvm data.
966*4882a593Smuzhiyun  *
967*4882a593Smuzhiyun  * @param cdev
968*4882a593Smuzhiyun  * @param name - file containing the data
969*4882a593Smuzhiyun  *
970*4882a593Smuzhiyun  * @return 0 on success, error otherwise.
971*4882a593Smuzhiyun  */
972*4882a593Smuzhiyun 	int (*nvm_flash)(struct qed_dev *cdev, const char *name);
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun /**
975*4882a593Smuzhiyun  * @brief nvm_get_image - reads an entire image from nvram
976*4882a593Smuzhiyun  *
977*4882a593Smuzhiyun  * @param cdev
978*4882a593Smuzhiyun  * @param type - type of the request nvram image
979*4882a593Smuzhiyun  * @param buf - preallocated buffer to fill with the image
980*4882a593Smuzhiyun  * @param len - length of the allocated buffer
981*4882a593Smuzhiyun  *
982*4882a593Smuzhiyun  * @return 0 on success, error otherwise
983*4882a593Smuzhiyun  */
984*4882a593Smuzhiyun 	int (*nvm_get_image)(struct qed_dev *cdev,
985*4882a593Smuzhiyun 			     enum qed_nvm_images type, u8 *buf, u16 len);
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun /**
988*4882a593Smuzhiyun  * @brief set_coalesce - Configure Rx coalesce value in usec
989*4882a593Smuzhiyun  *
990*4882a593Smuzhiyun  * @param cdev
991*4882a593Smuzhiyun  * @param rx_coal - Rx coalesce value in usec
992*4882a593Smuzhiyun  * @param tx_coal - Tx coalesce value in usec
993*4882a593Smuzhiyun  * @param qid - Queue index
994*4882a593Smuzhiyun  * @param sb_id - Status Block Id
995*4882a593Smuzhiyun  *
996*4882a593Smuzhiyun  * @return 0 on success, error otherwise.
997*4882a593Smuzhiyun  */
998*4882a593Smuzhiyun 	int (*set_coalesce)(struct qed_dev *cdev,
999*4882a593Smuzhiyun 			    u16 rx_coal, u16 tx_coal, void *handle);
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun /**
1002*4882a593Smuzhiyun  * @brief set_led - Configure LED mode
1003*4882a593Smuzhiyun  *
1004*4882a593Smuzhiyun  * @param cdev
1005*4882a593Smuzhiyun  * @param mode - LED mode
1006*4882a593Smuzhiyun  *
1007*4882a593Smuzhiyun  * @return 0 on success, error otherwise.
1008*4882a593Smuzhiyun  */
1009*4882a593Smuzhiyun 	int (*set_led)(struct qed_dev *cdev,
1010*4882a593Smuzhiyun 		       enum qed_led_mode mode);
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun /**
1013*4882a593Smuzhiyun  * @brief attn_clr_enable - Prevent attentions from being reasserted
1014*4882a593Smuzhiyun  *
1015*4882a593Smuzhiyun  * @param cdev
1016*4882a593Smuzhiyun  * @param clr_enable
1017*4882a593Smuzhiyun  */
1018*4882a593Smuzhiyun 	void (*attn_clr_enable)(struct qed_dev *cdev, bool clr_enable);
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun /**
1021*4882a593Smuzhiyun  * @brief db_recovery_add - add doorbell information to the doorbell
1022*4882a593Smuzhiyun  * recovery mechanism.
1023*4882a593Smuzhiyun  *
1024*4882a593Smuzhiyun  * @param cdev
1025*4882a593Smuzhiyun  * @param db_addr - doorbell address
1026*4882a593Smuzhiyun  * @param db_data - address of where db_data is stored
1027*4882a593Smuzhiyun  * @param db_is_32b - doorbell is 32b pr 64b
1028*4882a593Smuzhiyun  * @param db_is_user - doorbell recovery addresses are user or kernel space
1029*4882a593Smuzhiyun  */
1030*4882a593Smuzhiyun 	int (*db_recovery_add)(struct qed_dev *cdev,
1031*4882a593Smuzhiyun 			       void __iomem *db_addr,
1032*4882a593Smuzhiyun 			       void *db_data,
1033*4882a593Smuzhiyun 			       enum qed_db_rec_width db_width,
1034*4882a593Smuzhiyun 			       enum qed_db_rec_space db_space);
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun /**
1037*4882a593Smuzhiyun  * @brief db_recovery_del - remove doorbell information from the doorbell
1038*4882a593Smuzhiyun  * recovery mechanism. db_data serves as key (db_addr is not unique).
1039*4882a593Smuzhiyun  *
1040*4882a593Smuzhiyun  * @param cdev
1041*4882a593Smuzhiyun  * @param db_addr - doorbell address
1042*4882a593Smuzhiyun  * @param db_data - address where db_data is stored. Serves as key for the
1043*4882a593Smuzhiyun  *		    entry to delete.
1044*4882a593Smuzhiyun  */
1045*4882a593Smuzhiyun 	int (*db_recovery_del)(struct qed_dev *cdev,
1046*4882a593Smuzhiyun 			       void __iomem *db_addr, void *db_data);
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun /**
1049*4882a593Smuzhiyun  * @brief recovery_process - Trigger a recovery process
1050*4882a593Smuzhiyun  *
1051*4882a593Smuzhiyun  * @param cdev
1052*4882a593Smuzhiyun  *
1053*4882a593Smuzhiyun  * @return 0 on success, error otherwise.
1054*4882a593Smuzhiyun  */
1055*4882a593Smuzhiyun 	int (*recovery_process)(struct qed_dev *cdev);
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun /**
1058*4882a593Smuzhiyun  * @brief recovery_prolog - Execute the prolog operations of a recovery process
1059*4882a593Smuzhiyun  *
1060*4882a593Smuzhiyun  * @param cdev
1061*4882a593Smuzhiyun  *
1062*4882a593Smuzhiyun  * @return 0 on success, error otherwise.
1063*4882a593Smuzhiyun  */
1064*4882a593Smuzhiyun 	int (*recovery_prolog)(struct qed_dev *cdev);
1065*4882a593Smuzhiyun 
1066*4882a593Smuzhiyun /**
1067*4882a593Smuzhiyun  * @brief update_drv_state - API to inform the change in the driver state.
1068*4882a593Smuzhiyun  *
1069*4882a593Smuzhiyun  * @param cdev
1070*4882a593Smuzhiyun  * @param active
1071*4882a593Smuzhiyun  *
1072*4882a593Smuzhiyun  */
1073*4882a593Smuzhiyun 	int (*update_drv_state)(struct qed_dev *cdev, bool active);
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun /**
1076*4882a593Smuzhiyun  * @brief update_mac - API to inform the change in the mac address
1077*4882a593Smuzhiyun  *
1078*4882a593Smuzhiyun  * @param cdev
1079*4882a593Smuzhiyun  * @param mac
1080*4882a593Smuzhiyun  *
1081*4882a593Smuzhiyun  */
1082*4882a593Smuzhiyun 	int (*update_mac)(struct qed_dev *cdev, u8 *mac);
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun /**
1085*4882a593Smuzhiyun  * @brief update_mtu - API to inform the change in the mtu
1086*4882a593Smuzhiyun  *
1087*4882a593Smuzhiyun  * @param cdev
1088*4882a593Smuzhiyun  * @param mtu
1089*4882a593Smuzhiyun  *
1090*4882a593Smuzhiyun  */
1091*4882a593Smuzhiyun 	int (*update_mtu)(struct qed_dev *cdev, u16 mtu);
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun /**
1094*4882a593Smuzhiyun  * @brief update_wol - update of changes in the WoL configuration
1095*4882a593Smuzhiyun  *
1096*4882a593Smuzhiyun  * @param cdev
1097*4882a593Smuzhiyun  * @param enabled - true iff WoL should be enabled.
1098*4882a593Smuzhiyun  */
1099*4882a593Smuzhiyun 	int (*update_wol) (struct qed_dev *cdev, bool enabled);
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun /**
1102*4882a593Smuzhiyun  * @brief read_module_eeprom
1103*4882a593Smuzhiyun  *
1104*4882a593Smuzhiyun  * @param cdev
1105*4882a593Smuzhiyun  * @param buf - buffer
1106*4882a593Smuzhiyun  * @param dev_addr - PHY device memory region
1107*4882a593Smuzhiyun  * @param offset - offset into eeprom contents to be read
1108*4882a593Smuzhiyun  * @param len - buffer length, i.e., max bytes to be read
1109*4882a593Smuzhiyun  */
1110*4882a593Smuzhiyun 	int (*read_module_eeprom)(struct qed_dev *cdev,
1111*4882a593Smuzhiyun 				  char *buf, u8 dev_addr, u32 offset, u32 len);
1112*4882a593Smuzhiyun 
1113*4882a593Smuzhiyun /**
1114*4882a593Smuzhiyun  * @brief get_affin_hwfn_idx
1115*4882a593Smuzhiyun  *
1116*4882a593Smuzhiyun  * @param cdev
1117*4882a593Smuzhiyun  */
1118*4882a593Smuzhiyun 	u8 (*get_affin_hwfn_idx)(struct qed_dev *cdev);
1119*4882a593Smuzhiyun 
1120*4882a593Smuzhiyun /**
1121*4882a593Smuzhiyun  * @brief read_nvm_cfg - Read NVM config attribute value.
1122*4882a593Smuzhiyun  * @param cdev
1123*4882a593Smuzhiyun  * @param buf - buffer
1124*4882a593Smuzhiyun  * @param cmd - NVM CFG command id
1125*4882a593Smuzhiyun  * @param entity_id - Entity id
1126*4882a593Smuzhiyun  *
1127*4882a593Smuzhiyun  */
1128*4882a593Smuzhiyun 	int (*read_nvm_cfg)(struct qed_dev *cdev, u8 **buf, u32 cmd,
1129*4882a593Smuzhiyun 			    u32 entity_id);
1130*4882a593Smuzhiyun /**
1131*4882a593Smuzhiyun  * @brief read_nvm_cfg - Read NVM config attribute value.
1132*4882a593Smuzhiyun  * @param cdev
1133*4882a593Smuzhiyun  * @param cmd - NVM CFG command id
1134*4882a593Smuzhiyun  *
1135*4882a593Smuzhiyun  * @return config id length, 0 on error.
1136*4882a593Smuzhiyun  */
1137*4882a593Smuzhiyun 	int (*read_nvm_cfg_len)(struct qed_dev *cdev, u32 cmd);
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun /**
1140*4882a593Smuzhiyun  * @brief set_grc_config - Configure value for grc config id.
1141*4882a593Smuzhiyun  * @param cdev
1142*4882a593Smuzhiyun  * @param cfg_id - grc config id
1143*4882a593Smuzhiyun  * @param val - grc config value
1144*4882a593Smuzhiyun  *
1145*4882a593Smuzhiyun  */
1146*4882a593Smuzhiyun 	int (*set_grc_config)(struct qed_dev *cdev, u32 cfg_id, u32 val);
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 	struct devlink* (*devlink_register)(struct qed_dev *cdev);
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 	void (*devlink_unregister)(struct devlink *devlink);
1151*4882a593Smuzhiyun };
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun #define MASK_FIELD(_name, _value) \
1154*4882a593Smuzhiyun 	((_value) &= (_name ## _MASK))
1155*4882a593Smuzhiyun 
1156*4882a593Smuzhiyun #define FIELD_VALUE(_name, _value) \
1157*4882a593Smuzhiyun 	((_value & _name ## _MASK) << _name ## _SHIFT)
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun #define SET_FIELD(value, name, flag)			       \
1160*4882a593Smuzhiyun 	do {						       \
1161*4882a593Smuzhiyun 		(value) &= ~(name ## _MASK << name ## _SHIFT); \
1162*4882a593Smuzhiyun 		(value) |= (((u64)flag) << (name ## _SHIFT));  \
1163*4882a593Smuzhiyun 	} while (0)
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun #define GET_FIELD(value, name) \
1166*4882a593Smuzhiyun 	(((value) >> (name ## _SHIFT)) & name ## _MASK)
1167*4882a593Smuzhiyun 
1168*4882a593Smuzhiyun #define GET_MFW_FIELD(name, field) \
1169*4882a593Smuzhiyun 	(((name) & (field ## _MASK)) >> (field ## _OFFSET))
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun #define SET_MFW_FIELD(name, field, value)				 \
1172*4882a593Smuzhiyun 	do {								 \
1173*4882a593Smuzhiyun 		(name) &= ~(field ## _MASK);				 \
1174*4882a593Smuzhiyun 		(name) |= (((value) << (field ## _OFFSET)) & (field ## _MASK));\
1175*4882a593Smuzhiyun 	} while (0)
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun /* Debug print definitions */
1180*4882a593Smuzhiyun #define DP_ERR(cdev, fmt, ...)					\
1181*4882a593Smuzhiyun 	do {							\
1182*4882a593Smuzhiyun 		pr_err("[%s:%d(%s)]" fmt,			\
1183*4882a593Smuzhiyun 		       __func__, __LINE__,			\
1184*4882a593Smuzhiyun 		       DP_NAME(cdev) ? DP_NAME(cdev) : "",	\
1185*4882a593Smuzhiyun 		       ## __VA_ARGS__);				\
1186*4882a593Smuzhiyun 	} while (0)
1187*4882a593Smuzhiyun 
1188*4882a593Smuzhiyun #define DP_NOTICE(cdev, fmt, ...)				      \
1189*4882a593Smuzhiyun 	do {							      \
1190*4882a593Smuzhiyun 		if (unlikely((cdev)->dp_level <= QED_LEVEL_NOTICE)) { \
1191*4882a593Smuzhiyun 			pr_notice("[%s:%d(%s)]" fmt,		      \
1192*4882a593Smuzhiyun 				  __func__, __LINE__,		      \
1193*4882a593Smuzhiyun 				  DP_NAME(cdev) ? DP_NAME(cdev) : "", \
1194*4882a593Smuzhiyun 				  ## __VA_ARGS__);		      \
1195*4882a593Smuzhiyun 								      \
1196*4882a593Smuzhiyun 		}						      \
1197*4882a593Smuzhiyun 	} while (0)
1198*4882a593Smuzhiyun 
1199*4882a593Smuzhiyun #define DP_INFO(cdev, fmt, ...)					      \
1200*4882a593Smuzhiyun 	do {							      \
1201*4882a593Smuzhiyun 		if (unlikely((cdev)->dp_level <= QED_LEVEL_INFO)) {   \
1202*4882a593Smuzhiyun 			pr_notice("[%s:%d(%s)]" fmt,		      \
1203*4882a593Smuzhiyun 				  __func__, __LINE__,		      \
1204*4882a593Smuzhiyun 				  DP_NAME(cdev) ? DP_NAME(cdev) : "", \
1205*4882a593Smuzhiyun 				  ## __VA_ARGS__);		      \
1206*4882a593Smuzhiyun 		}						      \
1207*4882a593Smuzhiyun 	} while (0)
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun #define DP_VERBOSE(cdev, module, fmt, ...)				\
1210*4882a593Smuzhiyun 	do {								\
1211*4882a593Smuzhiyun 		if (unlikely(((cdev)->dp_level <= QED_LEVEL_VERBOSE) &&	\
1212*4882a593Smuzhiyun 			     ((cdev)->dp_module & module))) {		\
1213*4882a593Smuzhiyun 			pr_notice("[%s:%d(%s)]" fmt,			\
1214*4882a593Smuzhiyun 				  __func__, __LINE__,			\
1215*4882a593Smuzhiyun 				  DP_NAME(cdev) ? DP_NAME(cdev) : "",	\
1216*4882a593Smuzhiyun 				  ## __VA_ARGS__);			\
1217*4882a593Smuzhiyun 		}							\
1218*4882a593Smuzhiyun 	} while (0)
1219*4882a593Smuzhiyun 
1220*4882a593Smuzhiyun enum DP_LEVEL {
1221*4882a593Smuzhiyun 	QED_LEVEL_VERBOSE	= 0x0,
1222*4882a593Smuzhiyun 	QED_LEVEL_INFO		= 0x1,
1223*4882a593Smuzhiyun 	QED_LEVEL_NOTICE	= 0x2,
1224*4882a593Smuzhiyun 	QED_LEVEL_ERR		= 0x3,
1225*4882a593Smuzhiyun };
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun #define QED_LOG_LEVEL_SHIFT     (30)
1228*4882a593Smuzhiyun #define QED_LOG_VERBOSE_MASK    (0x3fffffff)
1229*4882a593Smuzhiyun #define QED_LOG_INFO_MASK       (0x40000000)
1230*4882a593Smuzhiyun #define QED_LOG_NOTICE_MASK     (0x80000000)
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun enum DP_MODULE {
1233*4882a593Smuzhiyun 	QED_MSG_SPQ	= 0x10000,
1234*4882a593Smuzhiyun 	QED_MSG_STATS	= 0x20000,
1235*4882a593Smuzhiyun 	QED_MSG_DCB	= 0x40000,
1236*4882a593Smuzhiyun 	QED_MSG_IOV	= 0x80000,
1237*4882a593Smuzhiyun 	QED_MSG_SP	= 0x100000,
1238*4882a593Smuzhiyun 	QED_MSG_STORAGE = 0x200000,
1239*4882a593Smuzhiyun 	QED_MSG_CXT	= 0x800000,
1240*4882a593Smuzhiyun 	QED_MSG_LL2	= 0x1000000,
1241*4882a593Smuzhiyun 	QED_MSG_ILT	= 0x2000000,
1242*4882a593Smuzhiyun 	QED_MSG_RDMA	= 0x4000000,
1243*4882a593Smuzhiyun 	QED_MSG_DEBUG	= 0x8000000,
1244*4882a593Smuzhiyun 	/* to be added...up to 0x8000000 */
1245*4882a593Smuzhiyun };
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun enum qed_mf_mode {
1248*4882a593Smuzhiyun 	QED_MF_DEFAULT,
1249*4882a593Smuzhiyun 	QED_MF_OVLAN,
1250*4882a593Smuzhiyun 	QED_MF_NPAR,
1251*4882a593Smuzhiyun };
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun struct qed_eth_stats_common {
1254*4882a593Smuzhiyun 	u64	no_buff_discards;
1255*4882a593Smuzhiyun 	u64	packet_too_big_discard;
1256*4882a593Smuzhiyun 	u64	ttl0_discard;
1257*4882a593Smuzhiyun 	u64	rx_ucast_bytes;
1258*4882a593Smuzhiyun 	u64	rx_mcast_bytes;
1259*4882a593Smuzhiyun 	u64	rx_bcast_bytes;
1260*4882a593Smuzhiyun 	u64	rx_ucast_pkts;
1261*4882a593Smuzhiyun 	u64	rx_mcast_pkts;
1262*4882a593Smuzhiyun 	u64	rx_bcast_pkts;
1263*4882a593Smuzhiyun 	u64	mftag_filter_discards;
1264*4882a593Smuzhiyun 	u64	mac_filter_discards;
1265*4882a593Smuzhiyun 	u64	gft_filter_drop;
1266*4882a593Smuzhiyun 	u64	tx_ucast_bytes;
1267*4882a593Smuzhiyun 	u64	tx_mcast_bytes;
1268*4882a593Smuzhiyun 	u64	tx_bcast_bytes;
1269*4882a593Smuzhiyun 	u64	tx_ucast_pkts;
1270*4882a593Smuzhiyun 	u64	tx_mcast_pkts;
1271*4882a593Smuzhiyun 	u64	tx_bcast_pkts;
1272*4882a593Smuzhiyun 	u64	tx_err_drop_pkts;
1273*4882a593Smuzhiyun 	u64	tpa_coalesced_pkts;
1274*4882a593Smuzhiyun 	u64	tpa_coalesced_events;
1275*4882a593Smuzhiyun 	u64	tpa_aborts_num;
1276*4882a593Smuzhiyun 	u64	tpa_not_coalesced_pkts;
1277*4882a593Smuzhiyun 	u64	tpa_coalesced_bytes;
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun 	/* port */
1280*4882a593Smuzhiyun 	u64	rx_64_byte_packets;
1281*4882a593Smuzhiyun 	u64	rx_65_to_127_byte_packets;
1282*4882a593Smuzhiyun 	u64	rx_128_to_255_byte_packets;
1283*4882a593Smuzhiyun 	u64	rx_256_to_511_byte_packets;
1284*4882a593Smuzhiyun 	u64	rx_512_to_1023_byte_packets;
1285*4882a593Smuzhiyun 	u64	rx_1024_to_1518_byte_packets;
1286*4882a593Smuzhiyun 	u64	rx_crc_errors;
1287*4882a593Smuzhiyun 	u64	rx_mac_crtl_frames;
1288*4882a593Smuzhiyun 	u64	rx_pause_frames;
1289*4882a593Smuzhiyun 	u64	rx_pfc_frames;
1290*4882a593Smuzhiyun 	u64	rx_align_errors;
1291*4882a593Smuzhiyun 	u64	rx_carrier_errors;
1292*4882a593Smuzhiyun 	u64	rx_oversize_packets;
1293*4882a593Smuzhiyun 	u64	rx_jabbers;
1294*4882a593Smuzhiyun 	u64	rx_undersize_packets;
1295*4882a593Smuzhiyun 	u64	rx_fragments;
1296*4882a593Smuzhiyun 	u64	tx_64_byte_packets;
1297*4882a593Smuzhiyun 	u64	tx_65_to_127_byte_packets;
1298*4882a593Smuzhiyun 	u64	tx_128_to_255_byte_packets;
1299*4882a593Smuzhiyun 	u64	tx_256_to_511_byte_packets;
1300*4882a593Smuzhiyun 	u64	tx_512_to_1023_byte_packets;
1301*4882a593Smuzhiyun 	u64	tx_1024_to_1518_byte_packets;
1302*4882a593Smuzhiyun 	u64	tx_pause_frames;
1303*4882a593Smuzhiyun 	u64	tx_pfc_frames;
1304*4882a593Smuzhiyun 	u64	brb_truncates;
1305*4882a593Smuzhiyun 	u64	brb_discards;
1306*4882a593Smuzhiyun 	u64	rx_mac_bytes;
1307*4882a593Smuzhiyun 	u64	rx_mac_uc_packets;
1308*4882a593Smuzhiyun 	u64	rx_mac_mc_packets;
1309*4882a593Smuzhiyun 	u64	rx_mac_bc_packets;
1310*4882a593Smuzhiyun 	u64	rx_mac_frames_ok;
1311*4882a593Smuzhiyun 	u64	tx_mac_bytes;
1312*4882a593Smuzhiyun 	u64	tx_mac_uc_packets;
1313*4882a593Smuzhiyun 	u64	tx_mac_mc_packets;
1314*4882a593Smuzhiyun 	u64	tx_mac_bc_packets;
1315*4882a593Smuzhiyun 	u64	tx_mac_ctrl_frames;
1316*4882a593Smuzhiyun 	u64	link_change_count;
1317*4882a593Smuzhiyun };
1318*4882a593Smuzhiyun 
1319*4882a593Smuzhiyun struct qed_eth_stats_bb {
1320*4882a593Smuzhiyun 	u64 rx_1519_to_1522_byte_packets;
1321*4882a593Smuzhiyun 	u64 rx_1519_to_2047_byte_packets;
1322*4882a593Smuzhiyun 	u64 rx_2048_to_4095_byte_packets;
1323*4882a593Smuzhiyun 	u64 rx_4096_to_9216_byte_packets;
1324*4882a593Smuzhiyun 	u64 rx_9217_to_16383_byte_packets;
1325*4882a593Smuzhiyun 	u64 tx_1519_to_2047_byte_packets;
1326*4882a593Smuzhiyun 	u64 tx_2048_to_4095_byte_packets;
1327*4882a593Smuzhiyun 	u64 tx_4096_to_9216_byte_packets;
1328*4882a593Smuzhiyun 	u64 tx_9217_to_16383_byte_packets;
1329*4882a593Smuzhiyun 	u64 tx_lpi_entry_count;
1330*4882a593Smuzhiyun 	u64 tx_total_collisions;
1331*4882a593Smuzhiyun };
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun struct qed_eth_stats_ah {
1334*4882a593Smuzhiyun 	u64 rx_1519_to_max_byte_packets;
1335*4882a593Smuzhiyun 	u64 tx_1519_to_max_byte_packets;
1336*4882a593Smuzhiyun };
1337*4882a593Smuzhiyun 
1338*4882a593Smuzhiyun struct qed_eth_stats {
1339*4882a593Smuzhiyun 	struct qed_eth_stats_common common;
1340*4882a593Smuzhiyun 
1341*4882a593Smuzhiyun 	union {
1342*4882a593Smuzhiyun 		struct qed_eth_stats_bb bb;
1343*4882a593Smuzhiyun 		struct qed_eth_stats_ah ah;
1344*4882a593Smuzhiyun 	};
1345*4882a593Smuzhiyun };
1346*4882a593Smuzhiyun 
1347*4882a593Smuzhiyun #define QED_SB_IDX              0x0002
1348*4882a593Smuzhiyun 
1349*4882a593Smuzhiyun #define RX_PI           0
1350*4882a593Smuzhiyun #define TX_PI(tc)       (RX_PI + 1 + tc)
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun struct qed_sb_cnt_info {
1353*4882a593Smuzhiyun 	/* Original, current, and free SBs for PF */
1354*4882a593Smuzhiyun 	int orig;
1355*4882a593Smuzhiyun 	int cnt;
1356*4882a593Smuzhiyun 	int free_cnt;
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun 	/* Original, current and free SBS for child VFs */
1359*4882a593Smuzhiyun 	int iov_orig;
1360*4882a593Smuzhiyun 	int iov_cnt;
1361*4882a593Smuzhiyun 	int free_cnt_iov;
1362*4882a593Smuzhiyun };
1363*4882a593Smuzhiyun 
qed_sb_update_sb_idx(struct qed_sb_info * sb_info)1364*4882a593Smuzhiyun static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
1365*4882a593Smuzhiyun {
1366*4882a593Smuzhiyun 	u32 prod = 0;
1367*4882a593Smuzhiyun 	u16 rc = 0;
1368*4882a593Smuzhiyun 
1369*4882a593Smuzhiyun 	prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
1370*4882a593Smuzhiyun 	       STATUS_BLOCK_E4_PROD_INDEX_MASK;
1371*4882a593Smuzhiyun 	if (sb_info->sb_ack != prod) {
1372*4882a593Smuzhiyun 		sb_info->sb_ack = prod;
1373*4882a593Smuzhiyun 		rc |= QED_SB_IDX;
1374*4882a593Smuzhiyun 	}
1375*4882a593Smuzhiyun 
1376*4882a593Smuzhiyun 	/* Let SB update */
1377*4882a593Smuzhiyun 	return rc;
1378*4882a593Smuzhiyun }
1379*4882a593Smuzhiyun 
1380*4882a593Smuzhiyun /**
1381*4882a593Smuzhiyun  *
1382*4882a593Smuzhiyun  * @brief This function creates an update command for interrupts that is
1383*4882a593Smuzhiyun  *        written to the IGU.
1384*4882a593Smuzhiyun  *
1385*4882a593Smuzhiyun  * @param sb_info       - This is the structure allocated and
1386*4882a593Smuzhiyun  *                 initialized per status block. Assumption is
1387*4882a593Smuzhiyun  *                 that it was initialized using qed_sb_init
1388*4882a593Smuzhiyun  * @param int_cmd       - Enable/Disable/Nop
1389*4882a593Smuzhiyun  * @param upd_flg       - whether igu consumer should be
1390*4882a593Smuzhiyun  *                 updated.
1391*4882a593Smuzhiyun  *
1392*4882a593Smuzhiyun  * @return inline void
1393*4882a593Smuzhiyun  */
qed_sb_ack(struct qed_sb_info * sb_info,enum igu_int_cmd int_cmd,u8 upd_flg)1394*4882a593Smuzhiyun static inline void qed_sb_ack(struct qed_sb_info *sb_info,
1395*4882a593Smuzhiyun 			      enum igu_int_cmd int_cmd,
1396*4882a593Smuzhiyun 			      u8 upd_flg)
1397*4882a593Smuzhiyun {
1398*4882a593Smuzhiyun 	u32 igu_ack;
1399*4882a593Smuzhiyun 
1400*4882a593Smuzhiyun 	igu_ack = ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
1401*4882a593Smuzhiyun 		   (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
1402*4882a593Smuzhiyun 		   (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
1403*4882a593Smuzhiyun 		   (IGU_SEG_ACCESS_REG <<
1404*4882a593Smuzhiyun 		    IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 	DIRECT_REG_WR(sb_info->igu_addr, igu_ack);
1407*4882a593Smuzhiyun 
1408*4882a593Smuzhiyun 	/* Both segments (interrupts & acks) are written to same place address;
1409*4882a593Smuzhiyun 	 * Need to guarantee all commands will be received (in-order) by HW.
1410*4882a593Smuzhiyun 	 */
1411*4882a593Smuzhiyun 	barrier();
1412*4882a593Smuzhiyun }
1413*4882a593Smuzhiyun 
__internal_ram_wr(void * p_hwfn,void __iomem * addr,int size,u32 * data)1414*4882a593Smuzhiyun static inline void __internal_ram_wr(void *p_hwfn,
1415*4882a593Smuzhiyun 				     void __iomem *addr,
1416*4882a593Smuzhiyun 				     int size,
1417*4882a593Smuzhiyun 				     u32 *data)
1418*4882a593Smuzhiyun 
1419*4882a593Smuzhiyun {
1420*4882a593Smuzhiyun 	unsigned int i;
1421*4882a593Smuzhiyun 
1422*4882a593Smuzhiyun 	for (i = 0; i < size / sizeof(*data); i++)
1423*4882a593Smuzhiyun 		DIRECT_REG_WR(&((u32 __iomem *)addr)[i], data[i]);
1424*4882a593Smuzhiyun }
1425*4882a593Smuzhiyun 
internal_ram_wr(void __iomem * addr,int size,u32 * data)1426*4882a593Smuzhiyun static inline void internal_ram_wr(void __iomem *addr,
1427*4882a593Smuzhiyun 				   int size,
1428*4882a593Smuzhiyun 				   u32 *data)
1429*4882a593Smuzhiyun {
1430*4882a593Smuzhiyun 	__internal_ram_wr(NULL, addr, size, data);
1431*4882a593Smuzhiyun }
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun enum qed_rss_caps {
1434*4882a593Smuzhiyun 	QED_RSS_IPV4		= 0x1,
1435*4882a593Smuzhiyun 	QED_RSS_IPV6		= 0x2,
1436*4882a593Smuzhiyun 	QED_RSS_IPV4_TCP	= 0x4,
1437*4882a593Smuzhiyun 	QED_RSS_IPV6_TCP	= 0x8,
1438*4882a593Smuzhiyun 	QED_RSS_IPV4_UDP	= 0x10,
1439*4882a593Smuzhiyun 	QED_RSS_IPV6_UDP	= 0x20,
1440*4882a593Smuzhiyun };
1441*4882a593Smuzhiyun 
1442*4882a593Smuzhiyun #define QED_RSS_IND_TABLE_SIZE 128
1443*4882a593Smuzhiyun #define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
1444*4882a593Smuzhiyun #endif
1445