1*4882a593Smuzhiyun /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2*4882a593Smuzhiyun /* QLogic qed NIC Driver
3*4882a593Smuzhiyun * Copyright (c) 2015-2017 QLogic Corporation
4*4882a593Smuzhiyun * Copyright (c) 2019-2020 Marvell International Ltd.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #ifndef _QED_MCP_H
8*4882a593Smuzhiyun #define _QED_MCP_H
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/types.h>
11*4882a593Smuzhiyun #include <linux/delay.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun #include <linux/spinlock.h>
14*4882a593Smuzhiyun #include <linux/qed/qed_fcoe_if.h>
15*4882a593Smuzhiyun #include "qed_hsi.h"
16*4882a593Smuzhiyun #include "qed_dev_api.h"
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun struct qed_mcp_link_speed_params {
19*4882a593Smuzhiyun bool autoneg;
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun u32 advertised_speeds;
22*4882a593Smuzhiyun #define QED_EXT_SPEED_MASK_RES 0x1
23*4882a593Smuzhiyun #define QED_EXT_SPEED_MASK_1G 0x2
24*4882a593Smuzhiyun #define QED_EXT_SPEED_MASK_10G 0x4
25*4882a593Smuzhiyun #define QED_EXT_SPEED_MASK_20G 0x8
26*4882a593Smuzhiyun #define QED_EXT_SPEED_MASK_25G 0x10
27*4882a593Smuzhiyun #define QED_EXT_SPEED_MASK_40G 0x20
28*4882a593Smuzhiyun #define QED_EXT_SPEED_MASK_50G_R 0x40
29*4882a593Smuzhiyun #define QED_EXT_SPEED_MASK_50G_R2 0x80
30*4882a593Smuzhiyun #define QED_EXT_SPEED_MASK_100G_R2 0x100
31*4882a593Smuzhiyun #define QED_EXT_SPEED_MASK_100G_R4 0x200
32*4882a593Smuzhiyun #define QED_EXT_SPEED_MASK_100G_P4 0x400
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun u32 forced_speed; /* In Mb/s */
35*4882a593Smuzhiyun #define QED_EXT_SPEED_1G 0x1
36*4882a593Smuzhiyun #define QED_EXT_SPEED_10G 0x2
37*4882a593Smuzhiyun #define QED_EXT_SPEED_20G 0x4
38*4882a593Smuzhiyun #define QED_EXT_SPEED_25G 0x8
39*4882a593Smuzhiyun #define QED_EXT_SPEED_40G 0x10
40*4882a593Smuzhiyun #define QED_EXT_SPEED_50G_R 0x20
41*4882a593Smuzhiyun #define QED_EXT_SPEED_50G_R2 0x40
42*4882a593Smuzhiyun #define QED_EXT_SPEED_100G_R2 0x80
43*4882a593Smuzhiyun #define QED_EXT_SPEED_100G_R4 0x100
44*4882a593Smuzhiyun #define QED_EXT_SPEED_100G_P4 0x200
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun struct qed_mcp_link_pause_params {
48*4882a593Smuzhiyun bool autoneg;
49*4882a593Smuzhiyun bool forced_rx;
50*4882a593Smuzhiyun bool forced_tx;
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun enum qed_mcp_eee_mode {
54*4882a593Smuzhiyun QED_MCP_EEE_DISABLED,
55*4882a593Smuzhiyun QED_MCP_EEE_ENABLED,
56*4882a593Smuzhiyun QED_MCP_EEE_UNSUPPORTED
57*4882a593Smuzhiyun };
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun struct qed_mcp_link_params {
60*4882a593Smuzhiyun struct qed_mcp_link_speed_params speed;
61*4882a593Smuzhiyun struct qed_mcp_link_pause_params pause;
62*4882a593Smuzhiyun u32 loopback_mode;
63*4882a593Smuzhiyun struct qed_link_eee_params eee;
64*4882a593Smuzhiyun u32 fec;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun struct qed_mcp_link_speed_params ext_speed;
67*4882a593Smuzhiyun u32 ext_fec_mode;
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun struct qed_mcp_link_capabilities {
71*4882a593Smuzhiyun u32 speed_capabilities;
72*4882a593Smuzhiyun bool default_speed_autoneg;
73*4882a593Smuzhiyun u32 fec_default;
74*4882a593Smuzhiyun enum qed_mcp_eee_mode default_eee;
75*4882a593Smuzhiyun u32 eee_lpi_timer;
76*4882a593Smuzhiyun u8 eee_speed_caps;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun u32 default_ext_speed_caps;
79*4882a593Smuzhiyun u32 default_ext_autoneg;
80*4882a593Smuzhiyun u32 default_ext_speed;
81*4882a593Smuzhiyun u32 default_ext_fec;
82*4882a593Smuzhiyun };
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun struct qed_mcp_link_state {
85*4882a593Smuzhiyun bool link_up;
86*4882a593Smuzhiyun u32 min_pf_rate;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /* Actual link speed in Mb/s */
89*4882a593Smuzhiyun u32 line_speed;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /* PF max speed in Mb/s, deduced from line_speed
92*4882a593Smuzhiyun * according to PF max bandwidth configuration.
93*4882a593Smuzhiyun */
94*4882a593Smuzhiyun u32 speed;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun bool full_duplex;
97*4882a593Smuzhiyun bool an;
98*4882a593Smuzhiyun bool an_complete;
99*4882a593Smuzhiyun bool parallel_detection;
100*4882a593Smuzhiyun bool pfc_enabled;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun u32 partner_adv_speed;
103*4882a593Smuzhiyun #define QED_LINK_PARTNER_SPEED_1G_HD BIT(0)
104*4882a593Smuzhiyun #define QED_LINK_PARTNER_SPEED_1G_FD BIT(1)
105*4882a593Smuzhiyun #define QED_LINK_PARTNER_SPEED_10G BIT(2)
106*4882a593Smuzhiyun #define QED_LINK_PARTNER_SPEED_20G BIT(3)
107*4882a593Smuzhiyun #define QED_LINK_PARTNER_SPEED_25G BIT(4)
108*4882a593Smuzhiyun #define QED_LINK_PARTNER_SPEED_40G BIT(5)
109*4882a593Smuzhiyun #define QED_LINK_PARTNER_SPEED_50G BIT(6)
110*4882a593Smuzhiyun #define QED_LINK_PARTNER_SPEED_100G BIT(7)
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun bool partner_tx_flow_ctrl_en;
113*4882a593Smuzhiyun bool partner_rx_flow_ctrl_en;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun u8 partner_adv_pause;
116*4882a593Smuzhiyun #define QED_LINK_PARTNER_SYMMETRIC_PAUSE 0x1
117*4882a593Smuzhiyun #define QED_LINK_PARTNER_ASYMMETRIC_PAUSE 0x2
118*4882a593Smuzhiyun #define QED_LINK_PARTNER_BOTH_PAUSE 0x3
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun bool sfp_tx_fault;
121*4882a593Smuzhiyun bool eee_active;
122*4882a593Smuzhiyun u8 eee_adv_caps;
123*4882a593Smuzhiyun u8 eee_lp_adv_caps;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun u32 fec_active;
126*4882a593Smuzhiyun };
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun struct qed_mcp_function_info {
129*4882a593Smuzhiyun u8 pause_on_host;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun enum qed_pci_personality protocol;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun u8 bandwidth_min;
134*4882a593Smuzhiyun u8 bandwidth_max;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun u8 mac[ETH_ALEN];
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun u64 wwn_port;
139*4882a593Smuzhiyun u64 wwn_node;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun #define QED_MCP_VLAN_UNSET (0xffff)
142*4882a593Smuzhiyun u16 ovlan;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun u16 mtu;
145*4882a593Smuzhiyun };
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun struct qed_mcp_nvm_common {
148*4882a593Smuzhiyun u32 offset;
149*4882a593Smuzhiyun u32 param;
150*4882a593Smuzhiyun u32 resp;
151*4882a593Smuzhiyun u32 cmd;
152*4882a593Smuzhiyun };
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun struct qed_mcp_drv_version {
155*4882a593Smuzhiyun u32 version;
156*4882a593Smuzhiyun u8 name[MCP_DRV_VER_STR_SIZE - 4];
157*4882a593Smuzhiyun };
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun struct qed_mcp_lan_stats {
160*4882a593Smuzhiyun u64 ucast_rx_pkts;
161*4882a593Smuzhiyun u64 ucast_tx_pkts;
162*4882a593Smuzhiyun u32 fcs_err;
163*4882a593Smuzhiyun };
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun struct qed_mcp_fcoe_stats {
166*4882a593Smuzhiyun u64 rx_pkts;
167*4882a593Smuzhiyun u64 tx_pkts;
168*4882a593Smuzhiyun u32 fcs_err;
169*4882a593Smuzhiyun u32 login_failure;
170*4882a593Smuzhiyun };
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun struct qed_mcp_iscsi_stats {
173*4882a593Smuzhiyun u64 rx_pdus;
174*4882a593Smuzhiyun u64 tx_pdus;
175*4882a593Smuzhiyun u64 rx_bytes;
176*4882a593Smuzhiyun u64 tx_bytes;
177*4882a593Smuzhiyun };
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun struct qed_mcp_rdma_stats {
180*4882a593Smuzhiyun u64 rx_pkts;
181*4882a593Smuzhiyun u64 tx_pkts;
182*4882a593Smuzhiyun u64 rx_bytes;
183*4882a593Smuzhiyun u64 tx_byts;
184*4882a593Smuzhiyun };
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun enum qed_mcp_protocol_type {
187*4882a593Smuzhiyun QED_MCP_LAN_STATS,
188*4882a593Smuzhiyun QED_MCP_FCOE_STATS,
189*4882a593Smuzhiyun QED_MCP_ISCSI_STATS,
190*4882a593Smuzhiyun QED_MCP_RDMA_STATS
191*4882a593Smuzhiyun };
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun union qed_mcp_protocol_stats {
194*4882a593Smuzhiyun struct qed_mcp_lan_stats lan_stats;
195*4882a593Smuzhiyun struct qed_mcp_fcoe_stats fcoe_stats;
196*4882a593Smuzhiyun struct qed_mcp_iscsi_stats iscsi_stats;
197*4882a593Smuzhiyun struct qed_mcp_rdma_stats rdma_stats;
198*4882a593Smuzhiyun };
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun enum qed_ov_eswitch {
201*4882a593Smuzhiyun QED_OV_ESWITCH_NONE,
202*4882a593Smuzhiyun QED_OV_ESWITCH_VEB,
203*4882a593Smuzhiyun QED_OV_ESWITCH_VEPA
204*4882a593Smuzhiyun };
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun enum qed_ov_client {
207*4882a593Smuzhiyun QED_OV_CLIENT_DRV,
208*4882a593Smuzhiyun QED_OV_CLIENT_USER,
209*4882a593Smuzhiyun QED_OV_CLIENT_VENDOR_SPEC
210*4882a593Smuzhiyun };
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun enum qed_ov_driver_state {
213*4882a593Smuzhiyun QED_OV_DRIVER_STATE_NOT_LOADED,
214*4882a593Smuzhiyun QED_OV_DRIVER_STATE_DISABLED,
215*4882a593Smuzhiyun QED_OV_DRIVER_STATE_ACTIVE
216*4882a593Smuzhiyun };
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun enum qed_ov_wol {
219*4882a593Smuzhiyun QED_OV_WOL_DEFAULT,
220*4882a593Smuzhiyun QED_OV_WOL_DISABLED,
221*4882a593Smuzhiyun QED_OV_WOL_ENABLED
222*4882a593Smuzhiyun };
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun enum qed_mfw_tlv_type {
225*4882a593Smuzhiyun QED_MFW_TLV_GENERIC = 0x1, /* Core driver TLVs */
226*4882a593Smuzhiyun QED_MFW_TLV_ETH = 0x2, /* L2 driver TLVs */
227*4882a593Smuzhiyun QED_MFW_TLV_FCOE = 0x4, /* FCoE protocol TLVs */
228*4882a593Smuzhiyun QED_MFW_TLV_ISCSI = 0x8, /* SCSI protocol TLVs */
229*4882a593Smuzhiyun QED_MFW_TLV_MAX = 0x16,
230*4882a593Smuzhiyun };
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun struct qed_mfw_tlv_generic {
233*4882a593Smuzhiyun #define QED_MFW_TLV_FLAGS_SIZE 2
234*4882a593Smuzhiyun struct {
235*4882a593Smuzhiyun u8 ipv4_csum_offload;
236*4882a593Smuzhiyun u8 lso_supported;
237*4882a593Smuzhiyun bool b_set;
238*4882a593Smuzhiyun } flags;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun #define QED_MFW_TLV_MAC_COUNT 3
241*4882a593Smuzhiyun /* First entry for primary MAC, 2 secondary MACs possible */
242*4882a593Smuzhiyun u8 mac[QED_MFW_TLV_MAC_COUNT][6];
243*4882a593Smuzhiyun bool mac_set[QED_MFW_TLV_MAC_COUNT];
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun u64 rx_frames;
246*4882a593Smuzhiyun bool rx_frames_set;
247*4882a593Smuzhiyun u64 rx_bytes;
248*4882a593Smuzhiyun bool rx_bytes_set;
249*4882a593Smuzhiyun u64 tx_frames;
250*4882a593Smuzhiyun bool tx_frames_set;
251*4882a593Smuzhiyun u64 tx_bytes;
252*4882a593Smuzhiyun bool tx_bytes_set;
253*4882a593Smuzhiyun };
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun union qed_mfw_tlv_data {
256*4882a593Smuzhiyun struct qed_mfw_tlv_generic generic;
257*4882a593Smuzhiyun struct qed_mfw_tlv_eth eth;
258*4882a593Smuzhiyun struct qed_mfw_tlv_fcoe fcoe;
259*4882a593Smuzhiyun struct qed_mfw_tlv_iscsi iscsi;
260*4882a593Smuzhiyun };
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun #define QED_NVM_CFG_OPTION_ALL BIT(0)
263*4882a593Smuzhiyun #define QED_NVM_CFG_OPTION_INIT BIT(1)
264*4882a593Smuzhiyun #define QED_NVM_CFG_OPTION_COMMIT BIT(2)
265*4882a593Smuzhiyun #define QED_NVM_CFG_OPTION_FREE BIT(3)
266*4882a593Smuzhiyun #define QED_NVM_CFG_OPTION_ENTITY_SEL BIT(4)
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /**
269*4882a593Smuzhiyun * @brief - returns the link params of the hw function
270*4882a593Smuzhiyun *
271*4882a593Smuzhiyun * @param p_hwfn
272*4882a593Smuzhiyun *
273*4882a593Smuzhiyun * @returns pointer to link params
274*4882a593Smuzhiyun */
275*4882a593Smuzhiyun struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun /**
278*4882a593Smuzhiyun * @brief - return the link state of the hw function
279*4882a593Smuzhiyun *
280*4882a593Smuzhiyun * @param p_hwfn
281*4882a593Smuzhiyun *
282*4882a593Smuzhiyun * @returns pointer to link state
283*4882a593Smuzhiyun */
284*4882a593Smuzhiyun struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *);
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /**
287*4882a593Smuzhiyun * @brief - return the link capabilities of the hw function
288*4882a593Smuzhiyun *
289*4882a593Smuzhiyun * @param p_hwfn
290*4882a593Smuzhiyun *
291*4882a593Smuzhiyun * @returns pointer to link capabilities
292*4882a593Smuzhiyun */
293*4882a593Smuzhiyun struct qed_mcp_link_capabilities
294*4882a593Smuzhiyun *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /**
297*4882a593Smuzhiyun * @brief Request the MFW to set the the link according to 'link_input'.
298*4882a593Smuzhiyun *
299*4882a593Smuzhiyun * @param p_hwfn
300*4882a593Smuzhiyun * @param p_ptt
301*4882a593Smuzhiyun * @param b_up - raise link if `true'. Reset link if `false'.
302*4882a593Smuzhiyun *
303*4882a593Smuzhiyun * @return int
304*4882a593Smuzhiyun */
305*4882a593Smuzhiyun int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
306*4882a593Smuzhiyun struct qed_ptt *p_ptt,
307*4882a593Smuzhiyun bool b_up);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /**
310*4882a593Smuzhiyun * @brief Get the management firmware version value
311*4882a593Smuzhiyun *
312*4882a593Smuzhiyun * @param p_hwfn
313*4882a593Smuzhiyun * @param p_ptt
314*4882a593Smuzhiyun * @param p_mfw_ver - mfw version value
315*4882a593Smuzhiyun * @param p_running_bundle_id - image id in nvram; Optional.
316*4882a593Smuzhiyun *
317*4882a593Smuzhiyun * @return int - 0 - operation was successful.
318*4882a593Smuzhiyun */
319*4882a593Smuzhiyun int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
320*4882a593Smuzhiyun struct qed_ptt *p_ptt,
321*4882a593Smuzhiyun u32 *p_mfw_ver, u32 *p_running_bundle_id);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /**
324*4882a593Smuzhiyun * @brief Get the MBI version value
325*4882a593Smuzhiyun *
326*4882a593Smuzhiyun * @param p_hwfn
327*4882a593Smuzhiyun * @param p_ptt
328*4882a593Smuzhiyun * @param p_mbi_ver - A pointer to a variable to be filled with the MBI version.
329*4882a593Smuzhiyun *
330*4882a593Smuzhiyun * @return int - 0 - operation was successful.
331*4882a593Smuzhiyun */
332*4882a593Smuzhiyun int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
333*4882a593Smuzhiyun struct qed_ptt *p_ptt, u32 *p_mbi_ver);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun /**
336*4882a593Smuzhiyun * @brief Get media type value of the port.
337*4882a593Smuzhiyun *
338*4882a593Smuzhiyun * @param cdev - qed dev pointer
339*4882a593Smuzhiyun * @param p_ptt
340*4882a593Smuzhiyun * @param mfw_ver - media type value
341*4882a593Smuzhiyun *
342*4882a593Smuzhiyun * @return int -
343*4882a593Smuzhiyun * 0 - Operation was successul.
344*4882a593Smuzhiyun * -EBUSY - Operation failed
345*4882a593Smuzhiyun */
346*4882a593Smuzhiyun int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
347*4882a593Smuzhiyun struct qed_ptt *p_ptt, u32 *media_type);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun /**
350*4882a593Smuzhiyun * @brief Get transceiver data of the port.
351*4882a593Smuzhiyun *
352*4882a593Smuzhiyun * @param cdev - qed dev pointer
353*4882a593Smuzhiyun * @param p_ptt
354*4882a593Smuzhiyun * @param p_transceiver_state - transceiver state.
355*4882a593Smuzhiyun * @param p_transceiver_type - media type value
356*4882a593Smuzhiyun *
357*4882a593Smuzhiyun * @return int -
358*4882a593Smuzhiyun * 0 - Operation was successful.
359*4882a593Smuzhiyun * -EBUSY - Operation failed
360*4882a593Smuzhiyun */
361*4882a593Smuzhiyun int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
362*4882a593Smuzhiyun struct qed_ptt *p_ptt,
363*4882a593Smuzhiyun u32 *p_transceiver_state,
364*4882a593Smuzhiyun u32 *p_tranceiver_type);
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun /**
367*4882a593Smuzhiyun * @brief Get transceiver supported speed mask.
368*4882a593Smuzhiyun *
369*4882a593Smuzhiyun * @param cdev - qed dev pointer
370*4882a593Smuzhiyun * @param p_ptt
371*4882a593Smuzhiyun * @param p_speed_mask - Bit mask of all supported speeds.
372*4882a593Smuzhiyun *
373*4882a593Smuzhiyun * @return int -
374*4882a593Smuzhiyun * 0 - Operation was successful.
375*4882a593Smuzhiyun * -EBUSY - Operation failed
376*4882a593Smuzhiyun */
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
379*4882a593Smuzhiyun struct qed_ptt *p_ptt, u32 *p_speed_mask);
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun /**
382*4882a593Smuzhiyun * @brief Get board configuration.
383*4882a593Smuzhiyun *
384*4882a593Smuzhiyun * @param cdev - qed dev pointer
385*4882a593Smuzhiyun * @param p_ptt
386*4882a593Smuzhiyun * @param p_board_config - Board config.
387*4882a593Smuzhiyun *
388*4882a593Smuzhiyun * @return int -
389*4882a593Smuzhiyun * 0 - Operation was successful.
390*4882a593Smuzhiyun * -EBUSY - Operation failed
391*4882a593Smuzhiyun */
392*4882a593Smuzhiyun int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
393*4882a593Smuzhiyun struct qed_ptt *p_ptt, u32 *p_board_config);
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /**
396*4882a593Smuzhiyun * @brief General function for sending commands to the MCP
397*4882a593Smuzhiyun * mailbox. It acquire mutex lock for the entire
398*4882a593Smuzhiyun * operation, from sending the request until the MCP
399*4882a593Smuzhiyun * response. Waiting for MCP response will be checked up
400*4882a593Smuzhiyun * to 5 seconds every 5ms.
401*4882a593Smuzhiyun *
402*4882a593Smuzhiyun * @param p_hwfn - hw function
403*4882a593Smuzhiyun * @param p_ptt - PTT required for register access
404*4882a593Smuzhiyun * @param cmd - command to be sent to the MCP.
405*4882a593Smuzhiyun * @param param - Optional param
406*4882a593Smuzhiyun * @param o_mcp_resp - The MCP response code (exclude sequence).
407*4882a593Smuzhiyun * @param o_mcp_param- Optional parameter provided by the MCP
408*4882a593Smuzhiyun * response
409*4882a593Smuzhiyun * @return int - 0 - operation
410*4882a593Smuzhiyun * was successul.
411*4882a593Smuzhiyun */
412*4882a593Smuzhiyun int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
413*4882a593Smuzhiyun struct qed_ptt *p_ptt,
414*4882a593Smuzhiyun u32 cmd,
415*4882a593Smuzhiyun u32 param,
416*4882a593Smuzhiyun u32 *o_mcp_resp,
417*4882a593Smuzhiyun u32 *o_mcp_param);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /**
420*4882a593Smuzhiyun * @brief - drains the nig, allowing completion to pass in case of pauses.
421*4882a593Smuzhiyun * (Should be called only from sleepable context)
422*4882a593Smuzhiyun *
423*4882a593Smuzhiyun * @param p_hwfn
424*4882a593Smuzhiyun * @param p_ptt
425*4882a593Smuzhiyun */
426*4882a593Smuzhiyun int qed_mcp_drain(struct qed_hwfn *p_hwfn,
427*4882a593Smuzhiyun struct qed_ptt *p_ptt);
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun /**
430*4882a593Smuzhiyun * @brief Get the flash size value
431*4882a593Smuzhiyun *
432*4882a593Smuzhiyun * @param p_hwfn
433*4882a593Smuzhiyun * @param p_ptt
434*4882a593Smuzhiyun * @param p_flash_size - flash size in bytes to be filled.
435*4882a593Smuzhiyun *
436*4882a593Smuzhiyun * @return int - 0 - operation was successul.
437*4882a593Smuzhiyun */
438*4882a593Smuzhiyun int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
439*4882a593Smuzhiyun struct qed_ptt *p_ptt,
440*4882a593Smuzhiyun u32 *p_flash_size);
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun /**
443*4882a593Smuzhiyun * @brief Send driver version to MFW
444*4882a593Smuzhiyun *
445*4882a593Smuzhiyun * @param p_hwfn
446*4882a593Smuzhiyun * @param p_ptt
447*4882a593Smuzhiyun * @param version - Version value
448*4882a593Smuzhiyun * @param name - Protocol driver name
449*4882a593Smuzhiyun *
450*4882a593Smuzhiyun * @return int - 0 - operation was successul.
451*4882a593Smuzhiyun */
452*4882a593Smuzhiyun int
453*4882a593Smuzhiyun qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
454*4882a593Smuzhiyun struct qed_ptt *p_ptt,
455*4882a593Smuzhiyun struct qed_mcp_drv_version *p_ver);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun /**
458*4882a593Smuzhiyun * @brief Read the MFW process kill counter
459*4882a593Smuzhiyun *
460*4882a593Smuzhiyun * @param p_hwfn
461*4882a593Smuzhiyun * @param p_ptt
462*4882a593Smuzhiyun *
463*4882a593Smuzhiyun * @return u32
464*4882a593Smuzhiyun */
465*4882a593Smuzhiyun u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn,
466*4882a593Smuzhiyun struct qed_ptt *p_ptt);
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun /**
469*4882a593Smuzhiyun * @brief Trigger a recovery process
470*4882a593Smuzhiyun *
471*4882a593Smuzhiyun * @param p_hwfn
472*4882a593Smuzhiyun * @param p_ptt
473*4882a593Smuzhiyun *
474*4882a593Smuzhiyun * @return int
475*4882a593Smuzhiyun */
476*4882a593Smuzhiyun int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun /**
479*4882a593Smuzhiyun * @brief A recovery handler must call this function as its first step.
480*4882a593Smuzhiyun * It is assumed that the handler is not run from an interrupt context.
481*4882a593Smuzhiyun *
482*4882a593Smuzhiyun * @param cdev
483*4882a593Smuzhiyun * @param p_ptt
484*4882a593Smuzhiyun *
485*4882a593Smuzhiyun * @return int
486*4882a593Smuzhiyun */
487*4882a593Smuzhiyun int qed_recovery_prolog(struct qed_dev *cdev);
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun /**
490*4882a593Smuzhiyun * @brief Notify MFW about the change in base device properties
491*4882a593Smuzhiyun *
492*4882a593Smuzhiyun * @param p_hwfn
493*4882a593Smuzhiyun * @param p_ptt
494*4882a593Smuzhiyun * @param client - qed client type
495*4882a593Smuzhiyun *
496*4882a593Smuzhiyun * @return int - 0 - operation was successful.
497*4882a593Smuzhiyun */
498*4882a593Smuzhiyun int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
499*4882a593Smuzhiyun struct qed_ptt *p_ptt,
500*4882a593Smuzhiyun enum qed_ov_client client);
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun /**
503*4882a593Smuzhiyun * @brief Notify MFW about the driver state
504*4882a593Smuzhiyun *
505*4882a593Smuzhiyun * @param p_hwfn
506*4882a593Smuzhiyun * @param p_ptt
507*4882a593Smuzhiyun * @param drv_state - Driver state
508*4882a593Smuzhiyun *
509*4882a593Smuzhiyun * @return int - 0 - operation was successful.
510*4882a593Smuzhiyun */
511*4882a593Smuzhiyun int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
512*4882a593Smuzhiyun struct qed_ptt *p_ptt,
513*4882a593Smuzhiyun enum qed_ov_driver_state drv_state);
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun /**
516*4882a593Smuzhiyun * @brief Send MTU size to MFW
517*4882a593Smuzhiyun *
518*4882a593Smuzhiyun * @param p_hwfn
519*4882a593Smuzhiyun * @param p_ptt
520*4882a593Smuzhiyun * @param mtu - MTU size
521*4882a593Smuzhiyun *
522*4882a593Smuzhiyun * @return int - 0 - operation was successful.
523*4882a593Smuzhiyun */
524*4882a593Smuzhiyun int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
525*4882a593Smuzhiyun struct qed_ptt *p_ptt, u16 mtu);
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun /**
528*4882a593Smuzhiyun * @brief Send MAC address to MFW
529*4882a593Smuzhiyun *
530*4882a593Smuzhiyun * @param p_hwfn
531*4882a593Smuzhiyun * @param p_ptt
532*4882a593Smuzhiyun * @param mac - MAC address
533*4882a593Smuzhiyun *
534*4882a593Smuzhiyun * @return int - 0 - operation was successful.
535*4882a593Smuzhiyun */
536*4882a593Smuzhiyun int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
537*4882a593Smuzhiyun struct qed_ptt *p_ptt, u8 *mac);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun /**
540*4882a593Smuzhiyun * @brief Send WOL mode to MFW
541*4882a593Smuzhiyun *
542*4882a593Smuzhiyun * @param p_hwfn
543*4882a593Smuzhiyun * @param p_ptt
544*4882a593Smuzhiyun * @param wol - WOL mode
545*4882a593Smuzhiyun *
546*4882a593Smuzhiyun * @return int - 0 - operation was successful.
547*4882a593Smuzhiyun */
548*4882a593Smuzhiyun int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
549*4882a593Smuzhiyun struct qed_ptt *p_ptt,
550*4882a593Smuzhiyun enum qed_ov_wol wol);
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun /**
553*4882a593Smuzhiyun * @brief Set LED status
554*4882a593Smuzhiyun *
555*4882a593Smuzhiyun * @param p_hwfn
556*4882a593Smuzhiyun * @param p_ptt
557*4882a593Smuzhiyun * @param mode - LED mode
558*4882a593Smuzhiyun *
559*4882a593Smuzhiyun * @return int - 0 - operation was successful.
560*4882a593Smuzhiyun */
561*4882a593Smuzhiyun int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
562*4882a593Smuzhiyun struct qed_ptt *p_ptt,
563*4882a593Smuzhiyun enum qed_led_mode mode);
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun /**
566*4882a593Smuzhiyun * @brief Read from nvm
567*4882a593Smuzhiyun *
568*4882a593Smuzhiyun * @param cdev
569*4882a593Smuzhiyun * @param addr - nvm offset
570*4882a593Smuzhiyun * @param p_buf - nvm read buffer
571*4882a593Smuzhiyun * @param len - buffer len
572*4882a593Smuzhiyun *
573*4882a593Smuzhiyun * @return int - 0 - operation was successful.
574*4882a593Smuzhiyun */
575*4882a593Smuzhiyun int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len);
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun /**
578*4882a593Smuzhiyun * @brief Write to nvm
579*4882a593Smuzhiyun *
580*4882a593Smuzhiyun * @param cdev
581*4882a593Smuzhiyun * @param addr - nvm offset
582*4882a593Smuzhiyun * @param cmd - nvm command
583*4882a593Smuzhiyun * @param p_buf - nvm write buffer
584*4882a593Smuzhiyun * @param len - buffer len
585*4882a593Smuzhiyun *
586*4882a593Smuzhiyun * @return int - 0 - operation was successful.
587*4882a593Smuzhiyun */
588*4882a593Smuzhiyun int qed_mcp_nvm_write(struct qed_dev *cdev,
589*4882a593Smuzhiyun u32 cmd, u32 addr, u8 *p_buf, u32 len);
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun /**
592*4882a593Smuzhiyun * @brief Check latest response
593*4882a593Smuzhiyun *
594*4882a593Smuzhiyun * @param cdev
595*4882a593Smuzhiyun * @param p_buf - nvm write buffer
596*4882a593Smuzhiyun *
597*4882a593Smuzhiyun * @return int - 0 - operation was successful.
598*4882a593Smuzhiyun */
599*4882a593Smuzhiyun int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf);
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun struct qed_nvm_image_att {
602*4882a593Smuzhiyun u32 start_addr;
603*4882a593Smuzhiyun u32 length;
604*4882a593Smuzhiyun };
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun /**
607*4882a593Smuzhiyun * @brief Allows reading a whole nvram image
608*4882a593Smuzhiyun *
609*4882a593Smuzhiyun * @param p_hwfn
610*4882a593Smuzhiyun * @param image_id - image to get attributes for
611*4882a593Smuzhiyun * @param p_image_att - image attributes structure into which to fill data
612*4882a593Smuzhiyun *
613*4882a593Smuzhiyun * @return int - 0 - operation was successful.
614*4882a593Smuzhiyun */
615*4882a593Smuzhiyun int
616*4882a593Smuzhiyun qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
617*4882a593Smuzhiyun enum qed_nvm_images image_id,
618*4882a593Smuzhiyun struct qed_nvm_image_att *p_image_att);
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun /**
621*4882a593Smuzhiyun * @brief Allows reading a whole nvram image
622*4882a593Smuzhiyun *
623*4882a593Smuzhiyun * @param p_hwfn
624*4882a593Smuzhiyun * @param image_id - image requested for reading
625*4882a593Smuzhiyun * @param p_buffer - allocated buffer into which to fill data
626*4882a593Smuzhiyun * @param buffer_len - length of the allocated buffer.
627*4882a593Smuzhiyun *
628*4882a593Smuzhiyun * @return 0 iff p_buffer now contains the nvram image.
629*4882a593Smuzhiyun */
630*4882a593Smuzhiyun int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
631*4882a593Smuzhiyun enum qed_nvm_images image_id,
632*4882a593Smuzhiyun u8 *p_buffer, u32 buffer_len);
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun /**
635*4882a593Smuzhiyun * @brief Bist register test
636*4882a593Smuzhiyun *
637*4882a593Smuzhiyun * @param p_hwfn - hw function
638*4882a593Smuzhiyun * @param p_ptt - PTT required for register access
639*4882a593Smuzhiyun *
640*4882a593Smuzhiyun * @return int - 0 - operation was successful.
641*4882a593Smuzhiyun */
642*4882a593Smuzhiyun int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn,
643*4882a593Smuzhiyun struct qed_ptt *p_ptt);
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun /**
646*4882a593Smuzhiyun * @brief Bist clock test
647*4882a593Smuzhiyun *
648*4882a593Smuzhiyun * @param p_hwfn - hw function
649*4882a593Smuzhiyun * @param p_ptt - PTT required for register access
650*4882a593Smuzhiyun *
651*4882a593Smuzhiyun * @return int - 0 - operation was successful.
652*4882a593Smuzhiyun */
653*4882a593Smuzhiyun int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn,
654*4882a593Smuzhiyun struct qed_ptt *p_ptt);
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun /**
657*4882a593Smuzhiyun * @brief Bist nvm test - get number of images
658*4882a593Smuzhiyun *
659*4882a593Smuzhiyun * @param p_hwfn - hw function
660*4882a593Smuzhiyun * @param p_ptt - PTT required for register access
661*4882a593Smuzhiyun * @param num_images - number of images if operation was
662*4882a593Smuzhiyun * successful. 0 if not.
663*4882a593Smuzhiyun *
664*4882a593Smuzhiyun * @return int - 0 - operation was successful.
665*4882a593Smuzhiyun */
666*4882a593Smuzhiyun int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
667*4882a593Smuzhiyun struct qed_ptt *p_ptt,
668*4882a593Smuzhiyun u32 *num_images);
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun /**
671*4882a593Smuzhiyun * @brief Bist nvm test - get image attributes by index
672*4882a593Smuzhiyun *
673*4882a593Smuzhiyun * @param p_hwfn - hw function
674*4882a593Smuzhiyun * @param p_ptt - PTT required for register access
675*4882a593Smuzhiyun * @param p_image_att - Attributes of image
676*4882a593Smuzhiyun * @param image_index - Index of image to get information for
677*4882a593Smuzhiyun *
678*4882a593Smuzhiyun * @return int - 0 - operation was successful.
679*4882a593Smuzhiyun */
680*4882a593Smuzhiyun int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
681*4882a593Smuzhiyun struct qed_ptt *p_ptt,
682*4882a593Smuzhiyun struct bist_nvm_image_att *p_image_att,
683*4882a593Smuzhiyun u32 image_index);
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun /**
686*4882a593Smuzhiyun * @brief - Processes the TLV request from MFW i.e., get the required TLV info
687*4882a593Smuzhiyun * from the qed client and send it to the MFW.
688*4882a593Smuzhiyun *
689*4882a593Smuzhiyun * @param p_hwfn
690*4882a593Smuzhiyun * @param p_ptt
691*4882a593Smuzhiyun *
692*4882a593Smuzhiyun * @param return 0 upon success.
693*4882a593Smuzhiyun */
694*4882a593Smuzhiyun int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun /**
697*4882a593Smuzhiyun * @brief Send raw debug data to the MFW
698*4882a593Smuzhiyun *
699*4882a593Smuzhiyun * @param p_hwfn
700*4882a593Smuzhiyun * @param p_ptt
701*4882a593Smuzhiyun * @param p_buf - raw debug data buffer
702*4882a593Smuzhiyun * @param size - buffer size
703*4882a593Smuzhiyun */
704*4882a593Smuzhiyun int
705*4882a593Smuzhiyun qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
706*4882a593Smuzhiyun struct qed_ptt *p_ptt, u8 *p_buf, u32 size);
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun /* Using hwfn number (and not pf_num) is required since in CMT mode,
709*4882a593Smuzhiyun * same pf_num may be used by two different hwfn
710*4882a593Smuzhiyun * TODO - this shouldn't really be in .h file, but until all fields
711*4882a593Smuzhiyun * required during hw-init will be placed in their correct place in shmem
712*4882a593Smuzhiyun * we need it in qed_dev.c [for readin the nvram reflection in shmem].
713*4882a593Smuzhiyun */
714*4882a593Smuzhiyun #define MCP_PF_ID_BY_REL(p_hwfn, rel_pfid) (QED_IS_BB((p_hwfn)->cdev) ? \
715*4882a593Smuzhiyun ((rel_pfid) | \
716*4882a593Smuzhiyun ((p_hwfn)->abs_pf_id & 1) << 3) : \
717*4882a593Smuzhiyun rel_pfid)
718*4882a593Smuzhiyun #define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun struct qed_mcp_info {
721*4882a593Smuzhiyun /* List for mailbox commands which were sent and wait for a response */
722*4882a593Smuzhiyun struct list_head cmd_list;
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun /* Spinlock used for protecting the access to the mailbox commands list
725*4882a593Smuzhiyun * and the sending of the commands.
726*4882a593Smuzhiyun */
727*4882a593Smuzhiyun spinlock_t cmd_lock;
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun /* Flag to indicate whether sending a MFW mailbox command is blocked */
730*4882a593Smuzhiyun bool b_block_cmd;
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun /* Spinlock used for syncing SW link-changes and link-changes
733*4882a593Smuzhiyun * originating from attention context.
734*4882a593Smuzhiyun */
735*4882a593Smuzhiyun spinlock_t link_lock;
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun u32 public_base;
738*4882a593Smuzhiyun u32 drv_mb_addr;
739*4882a593Smuzhiyun u32 mfw_mb_addr;
740*4882a593Smuzhiyun u32 port_addr;
741*4882a593Smuzhiyun u16 drv_mb_seq;
742*4882a593Smuzhiyun u16 drv_pulse_seq;
743*4882a593Smuzhiyun struct qed_mcp_link_params link_input;
744*4882a593Smuzhiyun struct qed_mcp_link_state link_output;
745*4882a593Smuzhiyun struct qed_mcp_link_capabilities link_capabilities;
746*4882a593Smuzhiyun struct qed_mcp_function_info func_info;
747*4882a593Smuzhiyun u8 *mfw_mb_cur;
748*4882a593Smuzhiyun u8 *mfw_mb_shadow;
749*4882a593Smuzhiyun u16 mfw_mb_length;
750*4882a593Smuzhiyun u32 mcp_hist;
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun /* Capabilties negotiated with the MFW */
753*4882a593Smuzhiyun u32 capabilities;
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun /* S/N for debug data mailbox commands */
756*4882a593Smuzhiyun atomic_t dbg_data_seq;
757*4882a593Smuzhiyun };
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun struct qed_mcp_mb_params {
760*4882a593Smuzhiyun u32 cmd;
761*4882a593Smuzhiyun u32 param;
762*4882a593Smuzhiyun void *p_data_src;
763*4882a593Smuzhiyun void *p_data_dst;
764*4882a593Smuzhiyun u8 data_src_size;
765*4882a593Smuzhiyun u8 data_dst_size;
766*4882a593Smuzhiyun u32 mcp_resp;
767*4882a593Smuzhiyun u32 mcp_param;
768*4882a593Smuzhiyun u32 flags;
769*4882a593Smuzhiyun #define QED_MB_FLAG_CAN_SLEEP (0x1 << 0)
770*4882a593Smuzhiyun #define QED_MB_FLAG_AVOID_BLOCK (0x1 << 1)
771*4882a593Smuzhiyun #define QED_MB_FLAGS_IS_SET(params, flag) \
772*4882a593Smuzhiyun ({ typeof(params) __params = (params); \
773*4882a593Smuzhiyun (__params && (__params->flags & QED_MB_FLAG_ ## flag)); })
774*4882a593Smuzhiyun };
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun struct qed_drv_tlv_hdr {
777*4882a593Smuzhiyun u8 tlv_type;
778*4882a593Smuzhiyun u8 tlv_length; /* In dwords - not including this header */
779*4882a593Smuzhiyun u8 tlv_reserved;
780*4882a593Smuzhiyun #define QED_DRV_TLV_FLAGS_CHANGED 0x01
781*4882a593Smuzhiyun u8 tlv_flags;
782*4882a593Smuzhiyun };
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun /**
785*4882a593Smuzhiyun * qed_mcp_is_ext_speed_supported() - Check if management firmware supports
786*4882a593Smuzhiyun * extended speeds.
787*4882a593Smuzhiyun * @p_hwfn: HW device data.
788*4882a593Smuzhiyun *
789*4882a593Smuzhiyun * Return: true if supported, false otherwise.
790*4882a593Smuzhiyun */
791*4882a593Smuzhiyun static inline bool
qed_mcp_is_ext_speed_supported(const struct qed_hwfn * p_hwfn)792*4882a593Smuzhiyun qed_mcp_is_ext_speed_supported(const struct qed_hwfn *p_hwfn)
793*4882a593Smuzhiyun {
794*4882a593Smuzhiyun return !!(p_hwfn->mcp_info->capabilities &
795*4882a593Smuzhiyun FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL);
796*4882a593Smuzhiyun }
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun /**
799*4882a593Smuzhiyun * @brief Initialize the interface with the MCP
800*4882a593Smuzhiyun *
801*4882a593Smuzhiyun * @param p_hwfn - HW func
802*4882a593Smuzhiyun * @param p_ptt - PTT required for register access
803*4882a593Smuzhiyun *
804*4882a593Smuzhiyun * @return int
805*4882a593Smuzhiyun */
806*4882a593Smuzhiyun int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
807*4882a593Smuzhiyun struct qed_ptt *p_ptt);
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun /**
810*4882a593Smuzhiyun * @brief Initialize the port interface with the MCP
811*4882a593Smuzhiyun *
812*4882a593Smuzhiyun * @param p_hwfn
813*4882a593Smuzhiyun * @param p_ptt
814*4882a593Smuzhiyun * Can only be called after `num_ports_in_engines' is set
815*4882a593Smuzhiyun */
816*4882a593Smuzhiyun void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
817*4882a593Smuzhiyun struct qed_ptt *p_ptt);
818*4882a593Smuzhiyun /**
819*4882a593Smuzhiyun * @brief Releases resources allocated during the init process.
820*4882a593Smuzhiyun *
821*4882a593Smuzhiyun * @param p_hwfn - HW func
822*4882a593Smuzhiyun * @param p_ptt - PTT required for register access
823*4882a593Smuzhiyun *
824*4882a593Smuzhiyun * @return int
825*4882a593Smuzhiyun */
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun int qed_mcp_free(struct qed_hwfn *p_hwfn);
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun /**
830*4882a593Smuzhiyun * @brief This function is called from the DPC context. After
831*4882a593Smuzhiyun * pointing PTT to the mfw mb, check for events sent by the MCP
832*4882a593Smuzhiyun * to the driver and ack them. In case a critical event
833*4882a593Smuzhiyun * detected, it will be handled here, otherwise the work will be
834*4882a593Smuzhiyun * queued to a sleepable work-queue.
835*4882a593Smuzhiyun *
836*4882a593Smuzhiyun * @param p_hwfn - HW function
837*4882a593Smuzhiyun * @param p_ptt - PTT required for register access
838*4882a593Smuzhiyun * @return int - 0 - operation
839*4882a593Smuzhiyun * was successul.
840*4882a593Smuzhiyun */
841*4882a593Smuzhiyun int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
842*4882a593Smuzhiyun struct qed_ptt *p_ptt);
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun enum qed_drv_role {
845*4882a593Smuzhiyun QED_DRV_ROLE_OS,
846*4882a593Smuzhiyun QED_DRV_ROLE_KDUMP,
847*4882a593Smuzhiyun };
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun struct qed_load_req_params {
850*4882a593Smuzhiyun /* Input params */
851*4882a593Smuzhiyun enum qed_drv_role drv_role;
852*4882a593Smuzhiyun u8 timeout_val;
853*4882a593Smuzhiyun bool avoid_eng_reset;
854*4882a593Smuzhiyun enum qed_override_force_load override_force_load;
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun /* Output params */
857*4882a593Smuzhiyun u32 load_code;
858*4882a593Smuzhiyun };
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun /**
861*4882a593Smuzhiyun * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds,
862*4882a593Smuzhiyun * returns whether this PF is the first on the engine/port or function.
863*4882a593Smuzhiyun *
864*4882a593Smuzhiyun * @param p_hwfn
865*4882a593Smuzhiyun * @param p_ptt
866*4882a593Smuzhiyun * @param p_params
867*4882a593Smuzhiyun *
868*4882a593Smuzhiyun * @return int - 0 - Operation was successful.
869*4882a593Smuzhiyun */
870*4882a593Smuzhiyun int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
871*4882a593Smuzhiyun struct qed_ptt *p_ptt,
872*4882a593Smuzhiyun struct qed_load_req_params *p_params);
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun /**
875*4882a593Smuzhiyun * @brief Sends a LOAD_DONE message to the MFW
876*4882a593Smuzhiyun *
877*4882a593Smuzhiyun * @param p_hwfn
878*4882a593Smuzhiyun * @param p_ptt
879*4882a593Smuzhiyun *
880*4882a593Smuzhiyun * @return int - 0 - Operation was successful.
881*4882a593Smuzhiyun */
882*4882a593Smuzhiyun int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun /**
885*4882a593Smuzhiyun * @brief Sends a UNLOAD_REQ message to the MFW
886*4882a593Smuzhiyun *
887*4882a593Smuzhiyun * @param p_hwfn
888*4882a593Smuzhiyun * @param p_ptt
889*4882a593Smuzhiyun *
890*4882a593Smuzhiyun * @return int - 0 - Operation was successful.
891*4882a593Smuzhiyun */
892*4882a593Smuzhiyun int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun /**
895*4882a593Smuzhiyun * @brief Sends a UNLOAD_DONE message to the MFW
896*4882a593Smuzhiyun *
897*4882a593Smuzhiyun * @param p_hwfn
898*4882a593Smuzhiyun * @param p_ptt
899*4882a593Smuzhiyun *
900*4882a593Smuzhiyun * @return int - 0 - Operation was successful.
901*4882a593Smuzhiyun */
902*4882a593Smuzhiyun int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun /**
905*4882a593Smuzhiyun * @brief Read the MFW mailbox into Current buffer.
906*4882a593Smuzhiyun *
907*4882a593Smuzhiyun * @param p_hwfn
908*4882a593Smuzhiyun * @param p_ptt
909*4882a593Smuzhiyun */
910*4882a593Smuzhiyun void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
911*4882a593Smuzhiyun struct qed_ptt *p_ptt);
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun /**
914*4882a593Smuzhiyun * @brief Ack to mfw that driver finished FLR process for VFs
915*4882a593Smuzhiyun *
916*4882a593Smuzhiyun * @param p_hwfn
917*4882a593Smuzhiyun * @param p_ptt
918*4882a593Smuzhiyun * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks.
919*4882a593Smuzhiyun *
920*4882a593Smuzhiyun * @param return int - 0 upon success.
921*4882a593Smuzhiyun */
922*4882a593Smuzhiyun int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
923*4882a593Smuzhiyun struct qed_ptt *p_ptt, u32 *vfs_to_ack);
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun /**
926*4882a593Smuzhiyun * @brief - calls during init to read shmem of all function-related info.
927*4882a593Smuzhiyun *
928*4882a593Smuzhiyun * @param p_hwfn
929*4882a593Smuzhiyun *
930*4882a593Smuzhiyun * @param return 0 upon success.
931*4882a593Smuzhiyun */
932*4882a593Smuzhiyun int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
933*4882a593Smuzhiyun struct qed_ptt *p_ptt);
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun /**
936*4882a593Smuzhiyun * @brief - Reset the MCP using mailbox command.
937*4882a593Smuzhiyun *
938*4882a593Smuzhiyun * @param p_hwfn
939*4882a593Smuzhiyun * @param p_ptt
940*4882a593Smuzhiyun *
941*4882a593Smuzhiyun * @param return 0 upon success.
942*4882a593Smuzhiyun */
943*4882a593Smuzhiyun int qed_mcp_reset(struct qed_hwfn *p_hwfn,
944*4882a593Smuzhiyun struct qed_ptt *p_ptt);
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun /**
947*4882a593Smuzhiyun * @brief - Sends an NVM read command request to the MFW to get
948*4882a593Smuzhiyun * a buffer.
949*4882a593Smuzhiyun *
950*4882a593Smuzhiyun * @param p_hwfn
951*4882a593Smuzhiyun * @param p_ptt
952*4882a593Smuzhiyun * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or
953*4882a593Smuzhiyun * DRV_MSG_CODE_NVM_READ_NVRAM commands
954*4882a593Smuzhiyun * @param param - [0:23] - Offset [24:31] - Size
955*4882a593Smuzhiyun * @param o_mcp_resp - MCP response
956*4882a593Smuzhiyun * @param o_mcp_param - MCP response param
957*4882a593Smuzhiyun * @param o_txn_size - Buffer size output
958*4882a593Smuzhiyun * @param o_buf - Pointer to the buffer returned by the MFW.
959*4882a593Smuzhiyun *
960*4882a593Smuzhiyun * @param return 0 upon success.
961*4882a593Smuzhiyun */
962*4882a593Smuzhiyun int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
963*4882a593Smuzhiyun struct qed_ptt *p_ptt,
964*4882a593Smuzhiyun u32 cmd,
965*4882a593Smuzhiyun u32 param,
966*4882a593Smuzhiyun u32 *o_mcp_resp,
967*4882a593Smuzhiyun u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf);
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun /**
970*4882a593Smuzhiyun * @brief Read from sfp
971*4882a593Smuzhiyun *
972*4882a593Smuzhiyun * @param p_hwfn - hw function
973*4882a593Smuzhiyun * @param p_ptt - PTT required for register access
974*4882a593Smuzhiyun * @param port - transceiver port
975*4882a593Smuzhiyun * @param addr - I2C address
976*4882a593Smuzhiyun * @param offset - offset in sfp
977*4882a593Smuzhiyun * @param len - buffer length
978*4882a593Smuzhiyun * @param p_buf - buffer to read into
979*4882a593Smuzhiyun *
980*4882a593Smuzhiyun * @return int - 0 - operation was successful.
981*4882a593Smuzhiyun */
982*4882a593Smuzhiyun int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
983*4882a593Smuzhiyun u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf);
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun /**
986*4882a593Smuzhiyun * @brief indicates whether the MFW objects [under mcp_info] are accessible
987*4882a593Smuzhiyun *
988*4882a593Smuzhiyun * @param p_hwfn
989*4882a593Smuzhiyun *
990*4882a593Smuzhiyun * @return true iff MFW is running and mcp_info is initialized
991*4882a593Smuzhiyun */
992*4882a593Smuzhiyun bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun /**
995*4882a593Smuzhiyun * @brief request MFW to configure MSI-X for a VF
996*4882a593Smuzhiyun *
997*4882a593Smuzhiyun * @param p_hwfn
998*4882a593Smuzhiyun * @param p_ptt
999*4882a593Smuzhiyun * @param vf_id - absolute inside engine
1000*4882a593Smuzhiyun * @param num_sbs - number of entries to request
1001*4882a593Smuzhiyun *
1002*4882a593Smuzhiyun * @return int
1003*4882a593Smuzhiyun */
1004*4882a593Smuzhiyun int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
1005*4882a593Smuzhiyun struct qed_ptt *p_ptt, u8 vf_id, u8 num);
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun /**
1008*4882a593Smuzhiyun * @brief - Halt the MCP.
1009*4882a593Smuzhiyun *
1010*4882a593Smuzhiyun * @param p_hwfn
1011*4882a593Smuzhiyun * @param p_ptt
1012*4882a593Smuzhiyun *
1013*4882a593Smuzhiyun * @param return 0 upon success.
1014*4882a593Smuzhiyun */
1015*4882a593Smuzhiyun int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun /**
1018*4882a593Smuzhiyun * @brief - Wake up the MCP.
1019*4882a593Smuzhiyun *
1020*4882a593Smuzhiyun * @param p_hwfn
1021*4882a593Smuzhiyun * @param p_ptt
1022*4882a593Smuzhiyun *
1023*4882a593Smuzhiyun * @param return 0 upon success.
1024*4882a593Smuzhiyun */
1025*4882a593Smuzhiyun int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw);
1028*4882a593Smuzhiyun int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw);
1029*4882a593Smuzhiyun int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
1030*4882a593Smuzhiyun struct qed_ptt *p_ptt,
1031*4882a593Smuzhiyun struct qed_mcp_link_state *p_link,
1032*4882a593Smuzhiyun u8 max_bw);
1033*4882a593Smuzhiyun int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
1034*4882a593Smuzhiyun struct qed_ptt *p_ptt,
1035*4882a593Smuzhiyun struct qed_mcp_link_state *p_link,
1036*4882a593Smuzhiyun u8 min_bw);
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
1039*4882a593Smuzhiyun struct qed_ptt *p_ptt, u32 mask_parities);
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun /* @brief - Gets the mdump retained data from the MFW.
1042*4882a593Smuzhiyun *
1043*4882a593Smuzhiyun * @param p_hwfn
1044*4882a593Smuzhiyun * @param p_ptt
1045*4882a593Smuzhiyun * @param p_mdump_retain
1046*4882a593Smuzhiyun *
1047*4882a593Smuzhiyun * @param return 0 upon success.
1048*4882a593Smuzhiyun */
1049*4882a593Smuzhiyun int
1050*4882a593Smuzhiyun qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
1051*4882a593Smuzhiyun struct qed_ptt *p_ptt,
1052*4882a593Smuzhiyun struct mdump_retain_data_stc *p_mdump_retain);
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun /**
1055*4882a593Smuzhiyun * @brief - Sets the MFW's max value for the given resource
1056*4882a593Smuzhiyun *
1057*4882a593Smuzhiyun * @param p_hwfn
1058*4882a593Smuzhiyun * @param p_ptt
1059*4882a593Smuzhiyun * @param res_id
1060*4882a593Smuzhiyun * @param resc_max_val
1061*4882a593Smuzhiyun * @param p_mcp_resp
1062*4882a593Smuzhiyun *
1063*4882a593Smuzhiyun * @return int - 0 - operation was successful.
1064*4882a593Smuzhiyun */
1065*4882a593Smuzhiyun int
1066*4882a593Smuzhiyun qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
1067*4882a593Smuzhiyun struct qed_ptt *p_ptt,
1068*4882a593Smuzhiyun enum qed_resources res_id,
1069*4882a593Smuzhiyun u32 resc_max_val, u32 *p_mcp_resp);
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun /**
1072*4882a593Smuzhiyun * @brief - Gets the MFW allocation info for the given resource
1073*4882a593Smuzhiyun *
1074*4882a593Smuzhiyun * @param p_hwfn
1075*4882a593Smuzhiyun * @param p_ptt
1076*4882a593Smuzhiyun * @param res_id
1077*4882a593Smuzhiyun * @param p_mcp_resp
1078*4882a593Smuzhiyun * @param p_resc_num
1079*4882a593Smuzhiyun * @param p_resc_start
1080*4882a593Smuzhiyun *
1081*4882a593Smuzhiyun * @return int - 0 - operation was successful.
1082*4882a593Smuzhiyun */
1083*4882a593Smuzhiyun int
1084*4882a593Smuzhiyun qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
1085*4882a593Smuzhiyun struct qed_ptt *p_ptt,
1086*4882a593Smuzhiyun enum qed_resources res_id,
1087*4882a593Smuzhiyun u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start);
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun /**
1090*4882a593Smuzhiyun * @brief Send eswitch mode to MFW
1091*4882a593Smuzhiyun *
1092*4882a593Smuzhiyun * @param p_hwfn
1093*4882a593Smuzhiyun * @param p_ptt
1094*4882a593Smuzhiyun * @param eswitch - eswitch mode
1095*4882a593Smuzhiyun *
1096*4882a593Smuzhiyun * @return int - 0 - operation was successful.
1097*4882a593Smuzhiyun */
1098*4882a593Smuzhiyun int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
1099*4882a593Smuzhiyun struct qed_ptt *p_ptt,
1100*4882a593Smuzhiyun enum qed_ov_eswitch eswitch);
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun #define QED_MCP_RESC_LOCK_MIN_VAL RESOURCE_DUMP
1103*4882a593Smuzhiyun #define QED_MCP_RESC_LOCK_MAX_VAL 31
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun enum qed_resc_lock {
1106*4882a593Smuzhiyun QED_RESC_LOCK_DBG_DUMP = QED_MCP_RESC_LOCK_MIN_VAL,
1107*4882a593Smuzhiyun QED_RESC_LOCK_PTP_PORT0,
1108*4882a593Smuzhiyun QED_RESC_LOCK_PTP_PORT1,
1109*4882a593Smuzhiyun QED_RESC_LOCK_PTP_PORT2,
1110*4882a593Smuzhiyun QED_RESC_LOCK_PTP_PORT3,
1111*4882a593Smuzhiyun QED_RESC_LOCK_RESC_ALLOC = QED_MCP_RESC_LOCK_MAX_VAL,
1112*4882a593Smuzhiyun QED_RESC_LOCK_RESC_INVALID
1113*4882a593Smuzhiyun };
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun /**
1116*4882a593Smuzhiyun * @brief - Initiates PF FLR
1117*4882a593Smuzhiyun *
1118*4882a593Smuzhiyun * @param p_hwfn
1119*4882a593Smuzhiyun * @param p_ptt
1120*4882a593Smuzhiyun *
1121*4882a593Smuzhiyun * @return int - 0 - operation was successful.
1122*4882a593Smuzhiyun */
1123*4882a593Smuzhiyun int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
1124*4882a593Smuzhiyun struct qed_resc_lock_params {
1125*4882a593Smuzhiyun /* Resource number [valid values are 0..31] */
1126*4882a593Smuzhiyun u8 resource;
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun /* Lock timeout value in seconds [default, none or 1..254] */
1129*4882a593Smuzhiyun u8 timeout;
1130*4882a593Smuzhiyun #define QED_MCP_RESC_LOCK_TO_DEFAULT 0
1131*4882a593Smuzhiyun #define QED_MCP_RESC_LOCK_TO_NONE 255
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun /* Number of times to retry locking */
1134*4882a593Smuzhiyun u8 retry_num;
1135*4882a593Smuzhiyun #define QED_MCP_RESC_LOCK_RETRY_CNT_DFLT 10
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun /* The interval in usec between retries */
1138*4882a593Smuzhiyun u16 retry_interval;
1139*4882a593Smuzhiyun #define QED_MCP_RESC_LOCK_RETRY_VAL_DFLT 10000
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun /* Use sleep or delay between retries */
1142*4882a593Smuzhiyun bool sleep_b4_retry;
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun /* Will be set as true if the resource is free and granted */
1145*4882a593Smuzhiyun bool b_granted;
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun /* Will be filled with the resource owner.
1148*4882a593Smuzhiyun * [0..15 = PF0-15, 16 = MFW]
1149*4882a593Smuzhiyun */
1150*4882a593Smuzhiyun u8 owner;
1151*4882a593Smuzhiyun };
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun /**
1154*4882a593Smuzhiyun * @brief Acquires MFW generic resource lock
1155*4882a593Smuzhiyun *
1156*4882a593Smuzhiyun * @param p_hwfn
1157*4882a593Smuzhiyun * @param p_ptt
1158*4882a593Smuzhiyun * @param p_params
1159*4882a593Smuzhiyun *
1160*4882a593Smuzhiyun * @return int - 0 - operation was successful.
1161*4882a593Smuzhiyun */
1162*4882a593Smuzhiyun int
1163*4882a593Smuzhiyun qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
1164*4882a593Smuzhiyun struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params);
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun struct qed_resc_unlock_params {
1167*4882a593Smuzhiyun /* Resource number [valid values are 0..31] */
1168*4882a593Smuzhiyun u8 resource;
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun /* Allow to release a resource even if belongs to another PF */
1171*4882a593Smuzhiyun bool b_force;
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun /* Will be set as true if the resource is released */
1174*4882a593Smuzhiyun bool b_released;
1175*4882a593Smuzhiyun };
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun /**
1178*4882a593Smuzhiyun * @brief Releases MFW generic resource lock
1179*4882a593Smuzhiyun *
1180*4882a593Smuzhiyun * @param p_hwfn
1181*4882a593Smuzhiyun * @param p_ptt
1182*4882a593Smuzhiyun * @param p_params
1183*4882a593Smuzhiyun *
1184*4882a593Smuzhiyun * @return int - 0 - operation was successful.
1185*4882a593Smuzhiyun */
1186*4882a593Smuzhiyun int
1187*4882a593Smuzhiyun qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
1188*4882a593Smuzhiyun struct qed_ptt *p_ptt,
1189*4882a593Smuzhiyun struct qed_resc_unlock_params *p_params);
1190*4882a593Smuzhiyun
1191*4882a593Smuzhiyun /**
1192*4882a593Smuzhiyun * @brief - default initialization for lock/unlock resource structs
1193*4882a593Smuzhiyun *
1194*4882a593Smuzhiyun * @param p_lock - lock params struct to be initialized; Can be NULL
1195*4882a593Smuzhiyun * @param p_unlock - unlock params struct to be initialized; Can be NULL
1196*4882a593Smuzhiyun * @param resource - the requested resource
1197*4882a593Smuzhiyun * @paral b_is_permanent - disable retries & aging when set
1198*4882a593Smuzhiyun */
1199*4882a593Smuzhiyun void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
1200*4882a593Smuzhiyun struct qed_resc_unlock_params *p_unlock,
1201*4882a593Smuzhiyun enum qed_resc_lock
1202*4882a593Smuzhiyun resource, bool b_is_permanent);
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun /**
1205*4882a593Smuzhiyun * @brief - Return whether management firmware support smart AN
1206*4882a593Smuzhiyun *
1207*4882a593Smuzhiyun * @param p_hwfn
1208*4882a593Smuzhiyun *
1209*4882a593Smuzhiyun * @return bool - true if feature is supported.
1210*4882a593Smuzhiyun */
1211*4882a593Smuzhiyun bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn);
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun /**
1214*4882a593Smuzhiyun * @brief Learn of supported MFW features; To be done during early init
1215*4882a593Smuzhiyun *
1216*4882a593Smuzhiyun * @param p_hwfn
1217*4882a593Smuzhiyun * @param p_ptt
1218*4882a593Smuzhiyun */
1219*4882a593Smuzhiyun int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun /**
1222*4882a593Smuzhiyun * @brief Inform MFW of set of features supported by driver. Should be done
1223*4882a593Smuzhiyun * inside the content of the LOAD_REQ.
1224*4882a593Smuzhiyun *
1225*4882a593Smuzhiyun * @param p_hwfn
1226*4882a593Smuzhiyun * @param p_ptt
1227*4882a593Smuzhiyun */
1228*4882a593Smuzhiyun int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun /**
1231*4882a593Smuzhiyun * @brief Read ufp config from the shared memory.
1232*4882a593Smuzhiyun *
1233*4882a593Smuzhiyun * @param p_hwfn
1234*4882a593Smuzhiyun * @param p_ptt
1235*4882a593Smuzhiyun */
1236*4882a593Smuzhiyun void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun /**
1239*4882a593Smuzhiyun * @brief Populate the nvm info shadow in the given hardware function
1240*4882a593Smuzhiyun *
1241*4882a593Smuzhiyun * @param p_hwfn
1242*4882a593Smuzhiyun */
1243*4882a593Smuzhiyun int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn);
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun /**
1246*4882a593Smuzhiyun * @brief Delete nvm info shadow in the given hardware function
1247*4882a593Smuzhiyun *
1248*4882a593Smuzhiyun * @param p_hwfn
1249*4882a593Smuzhiyun */
1250*4882a593Smuzhiyun void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn);
1251*4882a593Smuzhiyun
1252*4882a593Smuzhiyun /**
1253*4882a593Smuzhiyun * @brief Get the engine affinity configuration.
1254*4882a593Smuzhiyun *
1255*4882a593Smuzhiyun * @param p_hwfn
1256*4882a593Smuzhiyun * @param p_ptt
1257*4882a593Smuzhiyun */
1258*4882a593Smuzhiyun int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun /**
1261*4882a593Smuzhiyun * @brief Get the PPFID bitmap.
1262*4882a593Smuzhiyun *
1263*4882a593Smuzhiyun * @param p_hwfn
1264*4882a593Smuzhiyun * @param p_ptt
1265*4882a593Smuzhiyun */
1266*4882a593Smuzhiyun int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun /**
1269*4882a593Smuzhiyun * @brief Get NVM config attribute value.
1270*4882a593Smuzhiyun *
1271*4882a593Smuzhiyun * @param p_hwfn
1272*4882a593Smuzhiyun * @param p_ptt
1273*4882a593Smuzhiyun * @param option_id
1274*4882a593Smuzhiyun * @param entity_id
1275*4882a593Smuzhiyun * @param flags
1276*4882a593Smuzhiyun * @param p_buf
1277*4882a593Smuzhiyun * @param p_len
1278*4882a593Smuzhiyun */
1279*4882a593Smuzhiyun int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1280*4882a593Smuzhiyun u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
1281*4882a593Smuzhiyun u32 *p_len);
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun /**
1284*4882a593Smuzhiyun * @brief Set NVM config attribute value.
1285*4882a593Smuzhiyun *
1286*4882a593Smuzhiyun * @param p_hwfn
1287*4882a593Smuzhiyun * @param p_ptt
1288*4882a593Smuzhiyun * @param option_id
1289*4882a593Smuzhiyun * @param entity_id
1290*4882a593Smuzhiyun * @param flags
1291*4882a593Smuzhiyun * @param p_buf
1292*4882a593Smuzhiyun * @param len
1293*4882a593Smuzhiyun */
1294*4882a593Smuzhiyun int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1295*4882a593Smuzhiyun u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
1296*4882a593Smuzhiyun u32 len);
1297*4882a593Smuzhiyun #endif
1298