xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/emulex/benet/be_cmds.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2005 - 2016 Broadcom
4*4882a593Smuzhiyun  * All rights reserved.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Contact Information:
7*4882a593Smuzhiyun  * linux-drivers@emulex.com
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Emulex
10*4882a593Smuzhiyun  * 3333 Susan Street
11*4882a593Smuzhiyun  * Costa Mesa, CA 92626
12*4882a593Smuzhiyun  */
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include "be.h"
16*4882a593Smuzhiyun #include "be_cmds.h"
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun const char * const be_misconfig_evt_port_state[] = {
19*4882a593Smuzhiyun 	"Physical Link is functional",
20*4882a593Smuzhiyun 	"Optics faulted/incorrectly installed/not installed - Reseat optics. If issue not resolved, replace.",
21*4882a593Smuzhiyun 	"Optics of two types installed – Remove one optic or install matching pair of optics.",
22*4882a593Smuzhiyun 	"Incompatible optics – Replace with compatible optics for card to function.",
23*4882a593Smuzhiyun 	"Unqualified optics – Replace with Avago optics for Warranty and Technical Support.",
24*4882a593Smuzhiyun 	"Uncertified optics – Replace with Avago-certified optics to enable link operation."
25*4882a593Smuzhiyun };
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun static char *be_port_misconfig_evt_severity[] = {
28*4882a593Smuzhiyun 	"KERN_WARN",
29*4882a593Smuzhiyun 	"KERN_INFO",
30*4882a593Smuzhiyun 	"KERN_ERR",
31*4882a593Smuzhiyun 	"KERN_WARN"
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun static char *phy_state_oper_desc[] = {
35*4882a593Smuzhiyun 	"Link is non-operational",
36*4882a593Smuzhiyun 	"Link is operational",
37*4882a593Smuzhiyun 	""
38*4882a593Smuzhiyun };
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun static struct be_cmd_priv_map cmd_priv_map[] = {
41*4882a593Smuzhiyun 	{
42*4882a593Smuzhiyun 		OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
43*4882a593Smuzhiyun 		CMD_SUBSYSTEM_ETH,
44*4882a593Smuzhiyun 		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
45*4882a593Smuzhiyun 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
46*4882a593Smuzhiyun 	},
47*4882a593Smuzhiyun 	{
48*4882a593Smuzhiyun 		OPCODE_COMMON_GET_FLOW_CONTROL,
49*4882a593Smuzhiyun 		CMD_SUBSYSTEM_COMMON,
50*4882a593Smuzhiyun 		BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
51*4882a593Smuzhiyun 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
52*4882a593Smuzhiyun 	},
53*4882a593Smuzhiyun 	{
54*4882a593Smuzhiyun 		OPCODE_COMMON_SET_FLOW_CONTROL,
55*4882a593Smuzhiyun 		CMD_SUBSYSTEM_COMMON,
56*4882a593Smuzhiyun 		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
57*4882a593Smuzhiyun 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
58*4882a593Smuzhiyun 	},
59*4882a593Smuzhiyun 	{
60*4882a593Smuzhiyun 		OPCODE_ETH_GET_PPORT_STATS,
61*4882a593Smuzhiyun 		CMD_SUBSYSTEM_ETH,
62*4882a593Smuzhiyun 		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
63*4882a593Smuzhiyun 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
64*4882a593Smuzhiyun 	},
65*4882a593Smuzhiyun 	{
66*4882a593Smuzhiyun 		OPCODE_COMMON_GET_PHY_DETAILS,
67*4882a593Smuzhiyun 		CMD_SUBSYSTEM_COMMON,
68*4882a593Smuzhiyun 		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
69*4882a593Smuzhiyun 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
70*4882a593Smuzhiyun 	},
71*4882a593Smuzhiyun 	{
72*4882a593Smuzhiyun 		OPCODE_LOWLEVEL_HOST_DDR_DMA,
73*4882a593Smuzhiyun 		CMD_SUBSYSTEM_LOWLEVEL,
74*4882a593Smuzhiyun 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
75*4882a593Smuzhiyun 	},
76*4882a593Smuzhiyun 	{
77*4882a593Smuzhiyun 		OPCODE_LOWLEVEL_LOOPBACK_TEST,
78*4882a593Smuzhiyun 		CMD_SUBSYSTEM_LOWLEVEL,
79*4882a593Smuzhiyun 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
80*4882a593Smuzhiyun 	},
81*4882a593Smuzhiyun 	{
82*4882a593Smuzhiyun 		OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
83*4882a593Smuzhiyun 		CMD_SUBSYSTEM_LOWLEVEL,
84*4882a593Smuzhiyun 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
85*4882a593Smuzhiyun 	},
86*4882a593Smuzhiyun 	{
87*4882a593Smuzhiyun 		OPCODE_COMMON_SET_HSW_CONFIG,
88*4882a593Smuzhiyun 		CMD_SUBSYSTEM_COMMON,
89*4882a593Smuzhiyun 		BE_PRIV_DEVCFG | BE_PRIV_VHADM |
90*4882a593Smuzhiyun 		BE_PRIV_DEVSEC
91*4882a593Smuzhiyun 	},
92*4882a593Smuzhiyun 	{
93*4882a593Smuzhiyun 		OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES,
94*4882a593Smuzhiyun 		CMD_SUBSYSTEM_COMMON,
95*4882a593Smuzhiyun 		BE_PRIV_DEVCFG
96*4882a593Smuzhiyun 	}
97*4882a593Smuzhiyun };
98*4882a593Smuzhiyun 
be_cmd_allowed(struct be_adapter * adapter,u8 opcode,u8 subsystem)99*4882a593Smuzhiyun static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	int i;
102*4882a593Smuzhiyun 	int num_entries = ARRAY_SIZE(cmd_priv_map);
103*4882a593Smuzhiyun 	u32 cmd_privileges = adapter->cmd_privileges;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	for (i = 0; i < num_entries; i++)
106*4882a593Smuzhiyun 		if (opcode == cmd_priv_map[i].opcode &&
107*4882a593Smuzhiyun 		    subsystem == cmd_priv_map[i].subsystem)
108*4882a593Smuzhiyun 			if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
109*4882a593Smuzhiyun 				return false;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	return true;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun 
embedded_payload(struct be_mcc_wrb * wrb)114*4882a593Smuzhiyun static inline void *embedded_payload(struct be_mcc_wrb *wrb)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun 	return wrb->payload.embedded_payload;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
be_mcc_notify(struct be_adapter * adapter)119*4882a593Smuzhiyun static int be_mcc_notify(struct be_adapter *adapter)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	struct be_queue_info *mccq = &adapter->mcc_obj.q;
122*4882a593Smuzhiyun 	u32 val = 0;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	if (be_check_error(adapter, BE_ERROR_ANY))
125*4882a593Smuzhiyun 		return -EIO;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
128*4882a593Smuzhiyun 	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	wmb();
131*4882a593Smuzhiyun 	iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	return 0;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun /* To check if valid bit is set, check the entire word as we don't know
137*4882a593Smuzhiyun  * the endianness of the data (old entry is host endian while a new entry is
138*4882a593Smuzhiyun  * little endian) */
be_mcc_compl_is_new(struct be_mcc_compl * compl)139*4882a593Smuzhiyun static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	u32 flags;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	if (compl->flags != 0) {
144*4882a593Smuzhiyun 		flags = le32_to_cpu(compl->flags);
145*4882a593Smuzhiyun 		if (flags & CQE_FLAGS_VALID_MASK) {
146*4882a593Smuzhiyun 			compl->flags = flags;
147*4882a593Smuzhiyun 			return true;
148*4882a593Smuzhiyun 		}
149*4882a593Smuzhiyun 	}
150*4882a593Smuzhiyun 	return false;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun /* Need to reset the entire word that houses the valid bit */
be_mcc_compl_use(struct be_mcc_compl * compl)154*4882a593Smuzhiyun static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	compl->flags = 0;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
be_decode_resp_hdr(u32 tag0,u32 tag1)159*4882a593Smuzhiyun static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	unsigned long addr;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	addr = tag1;
164*4882a593Smuzhiyun 	addr = ((addr << 16) << 16) | tag0;
165*4882a593Smuzhiyun 	return (void *)addr;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
be_skip_err_log(u8 opcode,u16 base_status,u16 addl_status)168*4882a593Smuzhiyun static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	if (base_status == MCC_STATUS_NOT_SUPPORTED ||
171*4882a593Smuzhiyun 	    base_status == MCC_STATUS_ILLEGAL_REQUEST ||
172*4882a593Smuzhiyun 	    addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
173*4882a593Smuzhiyun 	    addl_status == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
174*4882a593Smuzhiyun 	    (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
175*4882a593Smuzhiyun 	    (base_status == MCC_STATUS_ILLEGAL_FIELD ||
176*4882a593Smuzhiyun 	     addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
177*4882a593Smuzhiyun 		return true;
178*4882a593Smuzhiyun 	else
179*4882a593Smuzhiyun 		return false;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun /* Place holder for all the async MCC cmds wherein the caller is not in a busy
183*4882a593Smuzhiyun  * loop (has not issued be_mcc_notify_wait())
184*4882a593Smuzhiyun  */
be_async_cmd_process(struct be_adapter * adapter,struct be_mcc_compl * compl,struct be_cmd_resp_hdr * resp_hdr)185*4882a593Smuzhiyun static void be_async_cmd_process(struct be_adapter *adapter,
186*4882a593Smuzhiyun 				 struct be_mcc_compl *compl,
187*4882a593Smuzhiyun 				 struct be_cmd_resp_hdr *resp_hdr)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	enum mcc_base_status base_status = base_status(compl->status);
190*4882a593Smuzhiyun 	u8 opcode = 0, subsystem = 0;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	if (resp_hdr) {
193*4882a593Smuzhiyun 		opcode = resp_hdr->opcode;
194*4882a593Smuzhiyun 		subsystem = resp_hdr->subsystem;
195*4882a593Smuzhiyun 	}
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
198*4882a593Smuzhiyun 	    subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
199*4882a593Smuzhiyun 		complete(&adapter->et_cmd_compl);
200*4882a593Smuzhiyun 		return;
201*4882a593Smuzhiyun 	}
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	if (opcode == OPCODE_LOWLEVEL_SET_LOOPBACK_MODE &&
204*4882a593Smuzhiyun 	    subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
205*4882a593Smuzhiyun 		complete(&adapter->et_cmd_compl);
206*4882a593Smuzhiyun 		return;
207*4882a593Smuzhiyun 	}
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
210*4882a593Smuzhiyun 	     opcode == OPCODE_COMMON_WRITE_OBJECT) &&
211*4882a593Smuzhiyun 	    subsystem == CMD_SUBSYSTEM_COMMON) {
212*4882a593Smuzhiyun 		adapter->flash_status = compl->status;
213*4882a593Smuzhiyun 		complete(&adapter->et_cmd_compl);
214*4882a593Smuzhiyun 		return;
215*4882a593Smuzhiyun 	}
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	if ((opcode == OPCODE_ETH_GET_STATISTICS ||
218*4882a593Smuzhiyun 	     opcode == OPCODE_ETH_GET_PPORT_STATS) &&
219*4882a593Smuzhiyun 	    subsystem == CMD_SUBSYSTEM_ETH &&
220*4882a593Smuzhiyun 	    base_status == MCC_STATUS_SUCCESS) {
221*4882a593Smuzhiyun 		be_parse_stats(adapter);
222*4882a593Smuzhiyun 		adapter->stats_cmd_sent = false;
223*4882a593Smuzhiyun 		return;
224*4882a593Smuzhiyun 	}
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
227*4882a593Smuzhiyun 	    subsystem == CMD_SUBSYSTEM_COMMON) {
228*4882a593Smuzhiyun 		if (base_status == MCC_STATUS_SUCCESS) {
229*4882a593Smuzhiyun 			struct be_cmd_resp_get_cntl_addnl_attribs *resp =
230*4882a593Smuzhiyun 							(void *)resp_hdr;
231*4882a593Smuzhiyun 			adapter->hwmon_info.be_on_die_temp =
232*4882a593Smuzhiyun 						resp->on_die_temperature;
233*4882a593Smuzhiyun 		} else {
234*4882a593Smuzhiyun 			adapter->be_get_temp_freq = 0;
235*4882a593Smuzhiyun 			adapter->hwmon_info.be_on_die_temp =
236*4882a593Smuzhiyun 						BE_INVALID_DIE_TEMP;
237*4882a593Smuzhiyun 		}
238*4882a593Smuzhiyun 		return;
239*4882a593Smuzhiyun 	}
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun 
be_mcc_compl_process(struct be_adapter * adapter,struct be_mcc_compl * compl)242*4882a593Smuzhiyun static int be_mcc_compl_process(struct be_adapter *adapter,
243*4882a593Smuzhiyun 				struct be_mcc_compl *compl)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun 	enum mcc_base_status base_status;
246*4882a593Smuzhiyun 	enum mcc_addl_status addl_status;
247*4882a593Smuzhiyun 	struct be_cmd_resp_hdr *resp_hdr;
248*4882a593Smuzhiyun 	u8 opcode = 0, subsystem = 0;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	/* Just swap the status to host endian; mcc tag is opaquely copied
251*4882a593Smuzhiyun 	 * from mcc_wrb */
252*4882a593Smuzhiyun 	be_dws_le_to_cpu(compl, 4);
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	base_status = base_status(compl->status);
255*4882a593Smuzhiyun 	addl_status = addl_status(compl->status);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
258*4882a593Smuzhiyun 	if (resp_hdr) {
259*4882a593Smuzhiyun 		opcode = resp_hdr->opcode;
260*4882a593Smuzhiyun 		subsystem = resp_hdr->subsystem;
261*4882a593Smuzhiyun 	}
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	be_async_cmd_process(adapter, compl, resp_hdr);
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	if (base_status != MCC_STATUS_SUCCESS &&
266*4882a593Smuzhiyun 	    !be_skip_err_log(opcode, base_status, addl_status)) {
267*4882a593Smuzhiyun 		if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST ||
268*4882a593Smuzhiyun 		    addl_status == MCC_ADDL_STATUS_INSUFFICIENT_PRIVILEGES) {
269*4882a593Smuzhiyun 			dev_warn(&adapter->pdev->dev,
270*4882a593Smuzhiyun 				 "VF is not privileged to issue opcode %d-%d\n",
271*4882a593Smuzhiyun 				 opcode, subsystem);
272*4882a593Smuzhiyun 		} else {
273*4882a593Smuzhiyun 			dev_err(&adapter->pdev->dev,
274*4882a593Smuzhiyun 				"opcode %d-%d failed:status %d-%d\n",
275*4882a593Smuzhiyun 				opcode, subsystem, base_status, addl_status);
276*4882a593Smuzhiyun 		}
277*4882a593Smuzhiyun 	}
278*4882a593Smuzhiyun 	return compl->status;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun /* Link state evt is a string of bytes; no need for endian swapping */
be_async_link_state_process(struct be_adapter * adapter,struct be_mcc_compl * compl)282*4882a593Smuzhiyun static void be_async_link_state_process(struct be_adapter *adapter,
283*4882a593Smuzhiyun 					struct be_mcc_compl *compl)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun 	struct be_async_event_link_state *evt =
286*4882a593Smuzhiyun 			(struct be_async_event_link_state *)compl;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	/* When link status changes, link speed must be re-queried from FW */
289*4882a593Smuzhiyun 	adapter->phy.link_speed = -1;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	/* On BEx the FW does not send a separate link status
292*4882a593Smuzhiyun 	 * notification for physical and logical link.
293*4882a593Smuzhiyun 	 * On other chips just process the logical link
294*4882a593Smuzhiyun 	 * status notification
295*4882a593Smuzhiyun 	 */
296*4882a593Smuzhiyun 	if (!BEx_chip(adapter) &&
297*4882a593Smuzhiyun 	    !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
298*4882a593Smuzhiyun 		return;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	/* For the initial link status do not rely on the ASYNC event as
301*4882a593Smuzhiyun 	 * it may not be received in some cases.
302*4882a593Smuzhiyun 	 */
303*4882a593Smuzhiyun 	if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
304*4882a593Smuzhiyun 		be_link_status_update(adapter,
305*4882a593Smuzhiyun 				      evt->port_link_status & LINK_STATUS_MASK);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun 
be_async_port_misconfig_event_process(struct be_adapter * adapter,struct be_mcc_compl * compl)308*4882a593Smuzhiyun static void be_async_port_misconfig_event_process(struct be_adapter *adapter,
309*4882a593Smuzhiyun 						  struct be_mcc_compl *compl)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun 	struct be_async_event_misconfig_port *evt =
312*4882a593Smuzhiyun 			(struct be_async_event_misconfig_port *)compl;
313*4882a593Smuzhiyun 	u32 sfp_misconfig_evt_word1 = le32_to_cpu(evt->event_data_word1);
314*4882a593Smuzhiyun 	u32 sfp_misconfig_evt_word2 = le32_to_cpu(evt->event_data_word2);
315*4882a593Smuzhiyun 	u8 phy_oper_state = PHY_STATE_OPER_MSG_NONE;
316*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
317*4882a593Smuzhiyun 	u8 msg_severity = DEFAULT_MSG_SEVERITY;
318*4882a593Smuzhiyun 	u8 phy_state_info;
319*4882a593Smuzhiyun 	u8 new_phy_state;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	new_phy_state =
322*4882a593Smuzhiyun 		(sfp_misconfig_evt_word1 >> (adapter->hba_port_num * 8)) & 0xff;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	if (new_phy_state == adapter->phy_state)
325*4882a593Smuzhiyun 		return;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	adapter->phy_state = new_phy_state;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	/* for older fw that doesn't populate link effect data */
330*4882a593Smuzhiyun 	if (!sfp_misconfig_evt_word2)
331*4882a593Smuzhiyun 		goto log_message;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	phy_state_info =
334*4882a593Smuzhiyun 		(sfp_misconfig_evt_word2 >> (adapter->hba_port_num * 8)) & 0xff;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	if (phy_state_info & PHY_STATE_INFO_VALID) {
337*4882a593Smuzhiyun 		msg_severity = (phy_state_info & PHY_STATE_MSG_SEVERITY) >> 1;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 		if (be_phy_unqualified(new_phy_state))
340*4882a593Smuzhiyun 			phy_oper_state = (phy_state_info & PHY_STATE_OPER);
341*4882a593Smuzhiyun 	}
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun log_message:
344*4882a593Smuzhiyun 	/* Log an error message that would allow a user to determine
345*4882a593Smuzhiyun 	 * whether the SFPs have an issue
346*4882a593Smuzhiyun 	 */
347*4882a593Smuzhiyun 	if (be_phy_state_unknown(new_phy_state))
348*4882a593Smuzhiyun 		dev_printk(be_port_misconfig_evt_severity[msg_severity], dev,
349*4882a593Smuzhiyun 			   "Port %c: Unrecognized Optics state: 0x%x. %s",
350*4882a593Smuzhiyun 			   adapter->port_name,
351*4882a593Smuzhiyun 			   new_phy_state,
352*4882a593Smuzhiyun 			   phy_state_oper_desc[phy_oper_state]);
353*4882a593Smuzhiyun 	else
354*4882a593Smuzhiyun 		dev_printk(be_port_misconfig_evt_severity[msg_severity], dev,
355*4882a593Smuzhiyun 			   "Port %c: %s %s",
356*4882a593Smuzhiyun 			   adapter->port_name,
357*4882a593Smuzhiyun 			   be_misconfig_evt_port_state[new_phy_state],
358*4882a593Smuzhiyun 			   phy_state_oper_desc[phy_oper_state]);
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	/* Log Vendor name and part no. if a misconfigured SFP is detected */
361*4882a593Smuzhiyun 	if (be_phy_misconfigured(new_phy_state))
362*4882a593Smuzhiyun 		adapter->flags |= BE_FLAGS_PHY_MISCONFIGURED;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun /* Grp5 CoS Priority evt */
be_async_grp5_cos_priority_process(struct be_adapter * adapter,struct be_mcc_compl * compl)366*4882a593Smuzhiyun static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
367*4882a593Smuzhiyun 					       struct be_mcc_compl *compl)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun 	struct be_async_event_grp5_cos_priority *evt =
370*4882a593Smuzhiyun 			(struct be_async_event_grp5_cos_priority *)compl;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	if (evt->valid) {
373*4882a593Smuzhiyun 		adapter->vlan_prio_bmap = evt->available_priority_bmap;
374*4882a593Smuzhiyun 		adapter->recommended_prio_bits =
375*4882a593Smuzhiyun 			evt->reco_default_priority << VLAN_PRIO_SHIFT;
376*4882a593Smuzhiyun 	}
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
be_async_grp5_qos_speed_process(struct be_adapter * adapter,struct be_mcc_compl * compl)380*4882a593Smuzhiyun static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
381*4882a593Smuzhiyun 					    struct be_mcc_compl *compl)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun 	struct be_async_event_grp5_qos_link_speed *evt =
384*4882a593Smuzhiyun 			(struct be_async_event_grp5_qos_link_speed *)compl;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	if (adapter->phy.link_speed >= 0 &&
387*4882a593Smuzhiyun 	    evt->physical_port == adapter->port_num)
388*4882a593Smuzhiyun 		adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun /*Grp5 PVID evt*/
be_async_grp5_pvid_state_process(struct be_adapter * adapter,struct be_mcc_compl * compl)392*4882a593Smuzhiyun static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
393*4882a593Smuzhiyun 					     struct be_mcc_compl *compl)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun 	struct be_async_event_grp5_pvid_state *evt =
396*4882a593Smuzhiyun 			(struct be_async_event_grp5_pvid_state *)compl;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	if (evt->enabled) {
399*4882a593Smuzhiyun 		adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
400*4882a593Smuzhiyun 		dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
401*4882a593Smuzhiyun 	} else {
402*4882a593Smuzhiyun 		adapter->pvid = 0;
403*4882a593Smuzhiyun 	}
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun #define MGMT_ENABLE_MASK	0x4
be_async_grp5_fw_control_process(struct be_adapter * adapter,struct be_mcc_compl * compl)407*4882a593Smuzhiyun static void be_async_grp5_fw_control_process(struct be_adapter *adapter,
408*4882a593Smuzhiyun 					     struct be_mcc_compl *compl)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun 	struct be_async_fw_control *evt = (struct be_async_fw_control *)compl;
411*4882a593Smuzhiyun 	u32 evt_dw1 = le32_to_cpu(evt->event_data_word1);
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	if (evt_dw1 & MGMT_ENABLE_MASK) {
414*4882a593Smuzhiyun 		adapter->flags |= BE_FLAGS_OS2BMC;
415*4882a593Smuzhiyun 		adapter->bmc_filt_mask = le32_to_cpu(evt->event_data_word2);
416*4882a593Smuzhiyun 	} else {
417*4882a593Smuzhiyun 		adapter->flags &= ~BE_FLAGS_OS2BMC;
418*4882a593Smuzhiyun 	}
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun 
be_async_grp5_evt_process(struct be_adapter * adapter,struct be_mcc_compl * compl)421*4882a593Smuzhiyun static void be_async_grp5_evt_process(struct be_adapter *adapter,
422*4882a593Smuzhiyun 				      struct be_mcc_compl *compl)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun 	u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) &
425*4882a593Smuzhiyun 				ASYNC_EVENT_TYPE_MASK;
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	switch (event_type) {
428*4882a593Smuzhiyun 	case ASYNC_EVENT_COS_PRIORITY:
429*4882a593Smuzhiyun 		be_async_grp5_cos_priority_process(adapter, compl);
430*4882a593Smuzhiyun 		break;
431*4882a593Smuzhiyun 	case ASYNC_EVENT_QOS_SPEED:
432*4882a593Smuzhiyun 		be_async_grp5_qos_speed_process(adapter, compl);
433*4882a593Smuzhiyun 		break;
434*4882a593Smuzhiyun 	case ASYNC_EVENT_PVID_STATE:
435*4882a593Smuzhiyun 		be_async_grp5_pvid_state_process(adapter, compl);
436*4882a593Smuzhiyun 		break;
437*4882a593Smuzhiyun 	/* Async event to disable/enable os2bmc and/or mac-learning */
438*4882a593Smuzhiyun 	case ASYNC_EVENT_FW_CONTROL:
439*4882a593Smuzhiyun 		be_async_grp5_fw_control_process(adapter, compl);
440*4882a593Smuzhiyun 		break;
441*4882a593Smuzhiyun 	default:
442*4882a593Smuzhiyun 		break;
443*4882a593Smuzhiyun 	}
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun 
be_async_dbg_evt_process(struct be_adapter * adapter,struct be_mcc_compl * cmp)446*4882a593Smuzhiyun static void be_async_dbg_evt_process(struct be_adapter *adapter,
447*4882a593Smuzhiyun 				     struct be_mcc_compl *cmp)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun 	u8 event_type = 0;
450*4882a593Smuzhiyun 	struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
453*4882a593Smuzhiyun 			ASYNC_EVENT_TYPE_MASK;
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	switch (event_type) {
456*4882a593Smuzhiyun 	case ASYNC_DEBUG_EVENT_TYPE_QNQ:
457*4882a593Smuzhiyun 		if (evt->valid)
458*4882a593Smuzhiyun 			adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
459*4882a593Smuzhiyun 		adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
460*4882a593Smuzhiyun 	break;
461*4882a593Smuzhiyun 	default:
462*4882a593Smuzhiyun 		dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
463*4882a593Smuzhiyun 			 event_type);
464*4882a593Smuzhiyun 	break;
465*4882a593Smuzhiyun 	}
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun 
be_async_sliport_evt_process(struct be_adapter * adapter,struct be_mcc_compl * cmp)468*4882a593Smuzhiyun static void be_async_sliport_evt_process(struct be_adapter *adapter,
469*4882a593Smuzhiyun 					 struct be_mcc_compl *cmp)
470*4882a593Smuzhiyun {
471*4882a593Smuzhiyun 	u8 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
472*4882a593Smuzhiyun 			ASYNC_EVENT_TYPE_MASK;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	if (event_type == ASYNC_EVENT_PORT_MISCONFIG)
475*4882a593Smuzhiyun 		be_async_port_misconfig_event_process(adapter, cmp);
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun 
is_link_state_evt(u32 flags)478*4882a593Smuzhiyun static inline bool is_link_state_evt(u32 flags)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun 	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
481*4882a593Smuzhiyun 			ASYNC_EVENT_CODE_LINK_STATE;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun 
is_grp5_evt(u32 flags)484*4882a593Smuzhiyun static inline bool is_grp5_evt(u32 flags)
485*4882a593Smuzhiyun {
486*4882a593Smuzhiyun 	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
487*4882a593Smuzhiyun 			ASYNC_EVENT_CODE_GRP_5;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun 
is_dbg_evt(u32 flags)490*4882a593Smuzhiyun static inline bool is_dbg_evt(u32 flags)
491*4882a593Smuzhiyun {
492*4882a593Smuzhiyun 	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
493*4882a593Smuzhiyun 			ASYNC_EVENT_CODE_QNQ;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun 
is_sliport_evt(u32 flags)496*4882a593Smuzhiyun static inline bool is_sliport_evt(u32 flags)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun 	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
499*4882a593Smuzhiyun 		ASYNC_EVENT_CODE_SLIPORT;
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun 
be_mcc_event_process(struct be_adapter * adapter,struct be_mcc_compl * compl)502*4882a593Smuzhiyun static void be_mcc_event_process(struct be_adapter *adapter,
503*4882a593Smuzhiyun 				 struct be_mcc_compl *compl)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun 	if (is_link_state_evt(compl->flags))
506*4882a593Smuzhiyun 		be_async_link_state_process(adapter, compl);
507*4882a593Smuzhiyun 	else if (is_grp5_evt(compl->flags))
508*4882a593Smuzhiyun 		be_async_grp5_evt_process(adapter, compl);
509*4882a593Smuzhiyun 	else if (is_dbg_evt(compl->flags))
510*4882a593Smuzhiyun 		be_async_dbg_evt_process(adapter, compl);
511*4882a593Smuzhiyun 	else if (is_sliport_evt(compl->flags))
512*4882a593Smuzhiyun 		be_async_sliport_evt_process(adapter, compl);
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun 
be_mcc_compl_get(struct be_adapter * adapter)515*4882a593Smuzhiyun static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun 	struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
518*4882a593Smuzhiyun 	struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	if (be_mcc_compl_is_new(compl)) {
521*4882a593Smuzhiyun 		queue_tail_inc(mcc_cq);
522*4882a593Smuzhiyun 		return compl;
523*4882a593Smuzhiyun 	}
524*4882a593Smuzhiyun 	return NULL;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun 
be_async_mcc_enable(struct be_adapter * adapter)527*4882a593Smuzhiyun void be_async_mcc_enable(struct be_adapter *adapter)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun 	spin_lock_bh(&adapter->mcc_cq_lock);
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
532*4882a593Smuzhiyun 	adapter->mcc_obj.rearm_cq = true;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	spin_unlock_bh(&adapter->mcc_cq_lock);
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun 
be_async_mcc_disable(struct be_adapter * adapter)537*4882a593Smuzhiyun void be_async_mcc_disable(struct be_adapter *adapter)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun 	spin_lock_bh(&adapter->mcc_cq_lock);
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	adapter->mcc_obj.rearm_cq = false;
542*4882a593Smuzhiyun 	be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	spin_unlock_bh(&adapter->mcc_cq_lock);
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun 
be_process_mcc(struct be_adapter * adapter)547*4882a593Smuzhiyun int be_process_mcc(struct be_adapter *adapter)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun 	struct be_mcc_compl *compl;
550*4882a593Smuzhiyun 	int num = 0, status = 0;
551*4882a593Smuzhiyun 	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	spin_lock(&adapter->mcc_cq_lock);
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	while ((compl = be_mcc_compl_get(adapter))) {
556*4882a593Smuzhiyun 		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
557*4882a593Smuzhiyun 			be_mcc_event_process(adapter, compl);
558*4882a593Smuzhiyun 		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
559*4882a593Smuzhiyun 			status = be_mcc_compl_process(adapter, compl);
560*4882a593Smuzhiyun 			atomic_dec(&mcc_obj->q.used);
561*4882a593Smuzhiyun 		}
562*4882a593Smuzhiyun 		be_mcc_compl_use(compl);
563*4882a593Smuzhiyun 		num++;
564*4882a593Smuzhiyun 	}
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	if (num)
567*4882a593Smuzhiyun 		be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	spin_unlock(&adapter->mcc_cq_lock);
570*4882a593Smuzhiyun 	return status;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun /* Wait till no more pending mcc requests are present */
be_mcc_wait_compl(struct be_adapter * adapter)574*4882a593Smuzhiyun static int be_mcc_wait_compl(struct be_adapter *adapter)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun #define mcc_timeout		12000 /* 12s timeout */
577*4882a593Smuzhiyun 	int i, status = 0;
578*4882a593Smuzhiyun 	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	for (i = 0; i < mcc_timeout; i++) {
581*4882a593Smuzhiyun 		if (be_check_error(adapter, BE_ERROR_ANY))
582*4882a593Smuzhiyun 			return -EIO;
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 		local_bh_disable();
585*4882a593Smuzhiyun 		status = be_process_mcc(adapter);
586*4882a593Smuzhiyun 		local_bh_enable();
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 		if (atomic_read(&mcc_obj->q.used) == 0)
589*4882a593Smuzhiyun 			break;
590*4882a593Smuzhiyun 		usleep_range(500, 1000);
591*4882a593Smuzhiyun 	}
592*4882a593Smuzhiyun 	if (i == mcc_timeout) {
593*4882a593Smuzhiyun 		dev_err(&adapter->pdev->dev, "FW not responding\n");
594*4882a593Smuzhiyun 		be_set_error(adapter, BE_ERROR_FW);
595*4882a593Smuzhiyun 		return -EIO;
596*4882a593Smuzhiyun 	}
597*4882a593Smuzhiyun 	return status;
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun /* Notify MCC requests and wait for completion */
be_mcc_notify_wait(struct be_adapter * adapter)601*4882a593Smuzhiyun static int be_mcc_notify_wait(struct be_adapter *adapter)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun 	int status;
604*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
605*4882a593Smuzhiyun 	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
606*4882a593Smuzhiyun 	u32 index = mcc_obj->q.head;
607*4882a593Smuzhiyun 	struct be_cmd_resp_hdr *resp;
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	index_dec(&index, mcc_obj->q.len);
610*4882a593Smuzhiyun 	wrb = queue_index_node(&mcc_obj->q, index);
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	status = be_mcc_notify(adapter);
615*4882a593Smuzhiyun 	if (status)
616*4882a593Smuzhiyun 		goto out;
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	status = be_mcc_wait_compl(adapter);
619*4882a593Smuzhiyun 	if (status == -EIO)
620*4882a593Smuzhiyun 		goto out;
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	status = (resp->base_status |
623*4882a593Smuzhiyun 		  ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
624*4882a593Smuzhiyun 		   CQE_ADDL_STATUS_SHIFT));
625*4882a593Smuzhiyun out:
626*4882a593Smuzhiyun 	return status;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun 
be_mbox_db_ready_wait(struct be_adapter * adapter,void __iomem * db)629*4882a593Smuzhiyun static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun 	int msecs = 0;
632*4882a593Smuzhiyun 	u32 ready;
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	do {
635*4882a593Smuzhiyun 		if (be_check_error(adapter, BE_ERROR_ANY))
636*4882a593Smuzhiyun 			return -EIO;
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 		ready = ioread32(db);
639*4882a593Smuzhiyun 		if (ready == 0xffffffff)
640*4882a593Smuzhiyun 			return -1;
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 		ready &= MPU_MAILBOX_DB_RDY_MASK;
643*4882a593Smuzhiyun 		if (ready)
644*4882a593Smuzhiyun 			break;
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 		if (msecs > 4000) {
647*4882a593Smuzhiyun 			dev_err(&adapter->pdev->dev, "FW not responding\n");
648*4882a593Smuzhiyun 			be_set_error(adapter, BE_ERROR_FW);
649*4882a593Smuzhiyun 			be_detect_error(adapter);
650*4882a593Smuzhiyun 			return -1;
651*4882a593Smuzhiyun 		}
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 		msleep(1);
654*4882a593Smuzhiyun 		msecs++;
655*4882a593Smuzhiyun 	} while (true);
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	return 0;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun /*
661*4882a593Smuzhiyun  * Insert the mailbox address into the doorbell in two steps
662*4882a593Smuzhiyun  * Polls on the mbox doorbell till a command completion (or a timeout) occurs
663*4882a593Smuzhiyun  */
be_mbox_notify_wait(struct be_adapter * adapter)664*4882a593Smuzhiyun static int be_mbox_notify_wait(struct be_adapter *adapter)
665*4882a593Smuzhiyun {
666*4882a593Smuzhiyun 	int status;
667*4882a593Smuzhiyun 	u32 val = 0;
668*4882a593Smuzhiyun 	void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
669*4882a593Smuzhiyun 	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
670*4882a593Smuzhiyun 	struct be_mcc_mailbox *mbox = mbox_mem->va;
671*4882a593Smuzhiyun 	struct be_mcc_compl *compl = &mbox->compl;
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	/* wait for ready to be set */
674*4882a593Smuzhiyun 	status = be_mbox_db_ready_wait(adapter, db);
675*4882a593Smuzhiyun 	if (status != 0)
676*4882a593Smuzhiyun 		return status;
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	val |= MPU_MAILBOX_DB_HI_MASK;
679*4882a593Smuzhiyun 	/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
680*4882a593Smuzhiyun 	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
681*4882a593Smuzhiyun 	iowrite32(val, db);
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 	/* wait for ready to be set */
684*4882a593Smuzhiyun 	status = be_mbox_db_ready_wait(adapter, db);
685*4882a593Smuzhiyun 	if (status != 0)
686*4882a593Smuzhiyun 		return status;
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	val = 0;
689*4882a593Smuzhiyun 	/* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
690*4882a593Smuzhiyun 	val |= (u32)(mbox_mem->dma >> 4) << 2;
691*4882a593Smuzhiyun 	iowrite32(val, db);
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	status = be_mbox_db_ready_wait(adapter, db);
694*4882a593Smuzhiyun 	if (status != 0)
695*4882a593Smuzhiyun 		return status;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	/* A cq entry has been made now */
698*4882a593Smuzhiyun 	if (be_mcc_compl_is_new(compl)) {
699*4882a593Smuzhiyun 		status = be_mcc_compl_process(adapter, &mbox->compl);
700*4882a593Smuzhiyun 		be_mcc_compl_use(compl);
701*4882a593Smuzhiyun 		if (status)
702*4882a593Smuzhiyun 			return status;
703*4882a593Smuzhiyun 	} else {
704*4882a593Smuzhiyun 		dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
705*4882a593Smuzhiyun 		return -1;
706*4882a593Smuzhiyun 	}
707*4882a593Smuzhiyun 	return 0;
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun 
be_POST_stage_get(struct be_adapter * adapter)710*4882a593Smuzhiyun u16 be_POST_stage_get(struct be_adapter *adapter)
711*4882a593Smuzhiyun {
712*4882a593Smuzhiyun 	u32 sem;
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	if (BEx_chip(adapter))
715*4882a593Smuzhiyun 		sem  = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
716*4882a593Smuzhiyun 	else
717*4882a593Smuzhiyun 		pci_read_config_dword(adapter->pdev,
718*4882a593Smuzhiyun 				      SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	return sem & POST_STAGE_MASK;
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun 
lancer_wait_ready(struct be_adapter * adapter)723*4882a593Smuzhiyun static int lancer_wait_ready(struct be_adapter *adapter)
724*4882a593Smuzhiyun {
725*4882a593Smuzhiyun #define SLIPORT_READY_TIMEOUT 30
726*4882a593Smuzhiyun 	u32 sliport_status;
727*4882a593Smuzhiyun 	int i;
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
730*4882a593Smuzhiyun 		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
731*4882a593Smuzhiyun 		if (sliport_status & SLIPORT_STATUS_RDY_MASK)
732*4882a593Smuzhiyun 			return 0;
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 		if (sliport_status & SLIPORT_STATUS_ERR_MASK &&
735*4882a593Smuzhiyun 		    !(sliport_status & SLIPORT_STATUS_RN_MASK))
736*4882a593Smuzhiyun 			return -EIO;
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 		msleep(1000);
739*4882a593Smuzhiyun 	}
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	return sliport_status ? : -1;
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun 
be_fw_wait_ready(struct be_adapter * adapter)744*4882a593Smuzhiyun int be_fw_wait_ready(struct be_adapter *adapter)
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun 	u16 stage;
747*4882a593Smuzhiyun 	int status, timeout = 0;
748*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	if (lancer_chip(adapter)) {
751*4882a593Smuzhiyun 		status = lancer_wait_ready(adapter);
752*4882a593Smuzhiyun 		if (status) {
753*4882a593Smuzhiyun 			stage = status;
754*4882a593Smuzhiyun 			goto err;
755*4882a593Smuzhiyun 		}
756*4882a593Smuzhiyun 		return 0;
757*4882a593Smuzhiyun 	}
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	do {
760*4882a593Smuzhiyun 		/* There's no means to poll POST state on BE2/3 VFs */
761*4882a593Smuzhiyun 		if (BEx_chip(adapter) && be_virtfn(adapter))
762*4882a593Smuzhiyun 			return 0;
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 		stage = be_POST_stage_get(adapter);
765*4882a593Smuzhiyun 		if (stage == POST_STAGE_ARMFW_RDY)
766*4882a593Smuzhiyun 			return 0;
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 		dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
769*4882a593Smuzhiyun 		if (msleep_interruptible(2000)) {
770*4882a593Smuzhiyun 			dev_err(dev, "Waiting for POST aborted\n");
771*4882a593Smuzhiyun 			return -EINTR;
772*4882a593Smuzhiyun 		}
773*4882a593Smuzhiyun 		timeout += 2;
774*4882a593Smuzhiyun 	} while (timeout < 60);
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun err:
777*4882a593Smuzhiyun 	dev_err(dev, "POST timeout; stage=%#x\n", stage);
778*4882a593Smuzhiyun 	return -ETIMEDOUT;
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun 
nonembedded_sgl(struct be_mcc_wrb * wrb)781*4882a593Smuzhiyun static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
782*4882a593Smuzhiyun {
783*4882a593Smuzhiyun 	return &wrb->payload.sgl[0];
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun 
fill_wrb_tags(struct be_mcc_wrb * wrb,unsigned long addr)786*4882a593Smuzhiyun static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun 	wrb->tag0 = addr & 0xFFFFFFFF;
789*4882a593Smuzhiyun 	wrb->tag1 = upper_32_bits(addr);
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun /* Don't touch the hdr after it's prepared */
793*4882a593Smuzhiyun /* mem will be NULL for embedded commands */
be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr * req_hdr,u8 subsystem,u8 opcode,int cmd_len,struct be_mcc_wrb * wrb,struct be_dma_mem * mem)794*4882a593Smuzhiyun static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
795*4882a593Smuzhiyun 				   u8 subsystem, u8 opcode, int cmd_len,
796*4882a593Smuzhiyun 				   struct be_mcc_wrb *wrb,
797*4882a593Smuzhiyun 				   struct be_dma_mem *mem)
798*4882a593Smuzhiyun {
799*4882a593Smuzhiyun 	struct be_sge *sge;
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	req_hdr->opcode = opcode;
802*4882a593Smuzhiyun 	req_hdr->subsystem = subsystem;
803*4882a593Smuzhiyun 	req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
804*4882a593Smuzhiyun 	req_hdr->version = 0;
805*4882a593Smuzhiyun 	fill_wrb_tags(wrb, (ulong) req_hdr);
806*4882a593Smuzhiyun 	wrb->payload_length = cmd_len;
807*4882a593Smuzhiyun 	if (mem) {
808*4882a593Smuzhiyun 		wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
809*4882a593Smuzhiyun 			MCC_WRB_SGE_CNT_SHIFT;
810*4882a593Smuzhiyun 		sge = nonembedded_sgl(wrb);
811*4882a593Smuzhiyun 		sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
812*4882a593Smuzhiyun 		sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
813*4882a593Smuzhiyun 		sge->len = cpu_to_le32(mem->size);
814*4882a593Smuzhiyun 	} else
815*4882a593Smuzhiyun 		wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
816*4882a593Smuzhiyun 	be_dws_cpu_to_le(wrb, 8);
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun 
be_cmd_page_addrs_prepare(struct phys_addr * pages,u32 max_pages,struct be_dma_mem * mem)819*4882a593Smuzhiyun static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
820*4882a593Smuzhiyun 				      struct be_dma_mem *mem)
821*4882a593Smuzhiyun {
822*4882a593Smuzhiyun 	int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
823*4882a593Smuzhiyun 	u64 dma = (u64)mem->dma;
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	for (i = 0; i < buf_pages; i++) {
826*4882a593Smuzhiyun 		pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
827*4882a593Smuzhiyun 		pages[i].hi = cpu_to_le32(upper_32_bits(dma));
828*4882a593Smuzhiyun 		dma += PAGE_SIZE_4K;
829*4882a593Smuzhiyun 	}
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun 
wrb_from_mbox(struct be_adapter * adapter)832*4882a593Smuzhiyun static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun 	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
835*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb
836*4882a593Smuzhiyun 		= &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
837*4882a593Smuzhiyun 	memset(wrb, 0, sizeof(*wrb));
838*4882a593Smuzhiyun 	return wrb;
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun 
wrb_from_mccq(struct be_adapter * adapter)841*4882a593Smuzhiyun static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
842*4882a593Smuzhiyun {
843*4882a593Smuzhiyun 	struct be_queue_info *mccq = &adapter->mcc_obj.q;
844*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 	if (!mccq->created)
847*4882a593Smuzhiyun 		return NULL;
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	if (atomic_read(&mccq->used) >= mccq->len)
850*4882a593Smuzhiyun 		return NULL;
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	wrb = queue_head_node(mccq);
853*4882a593Smuzhiyun 	queue_head_inc(mccq);
854*4882a593Smuzhiyun 	atomic_inc(&mccq->used);
855*4882a593Smuzhiyun 	memset(wrb, 0, sizeof(*wrb));
856*4882a593Smuzhiyun 	return wrb;
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun 
use_mcc(struct be_adapter * adapter)859*4882a593Smuzhiyun static bool use_mcc(struct be_adapter *adapter)
860*4882a593Smuzhiyun {
861*4882a593Smuzhiyun 	return adapter->mcc_obj.q.created;
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun /* Must be used only in process context */
be_cmd_lock(struct be_adapter * adapter)865*4882a593Smuzhiyun static int be_cmd_lock(struct be_adapter *adapter)
866*4882a593Smuzhiyun {
867*4882a593Smuzhiyun 	if (use_mcc(adapter)) {
868*4882a593Smuzhiyun 		mutex_lock(&adapter->mcc_lock);
869*4882a593Smuzhiyun 		return 0;
870*4882a593Smuzhiyun 	} else {
871*4882a593Smuzhiyun 		return mutex_lock_interruptible(&adapter->mbox_lock);
872*4882a593Smuzhiyun 	}
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun /* Must be used only in process context */
be_cmd_unlock(struct be_adapter * adapter)876*4882a593Smuzhiyun static void be_cmd_unlock(struct be_adapter *adapter)
877*4882a593Smuzhiyun {
878*4882a593Smuzhiyun 	if (use_mcc(adapter))
879*4882a593Smuzhiyun 		return mutex_unlock(&adapter->mcc_lock);
880*4882a593Smuzhiyun 	else
881*4882a593Smuzhiyun 		return mutex_unlock(&adapter->mbox_lock);
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun 
be_cmd_copy(struct be_adapter * adapter,struct be_mcc_wrb * wrb)884*4882a593Smuzhiyun static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
885*4882a593Smuzhiyun 				      struct be_mcc_wrb *wrb)
886*4882a593Smuzhiyun {
887*4882a593Smuzhiyun 	struct be_mcc_wrb *dest_wrb;
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	if (use_mcc(adapter)) {
890*4882a593Smuzhiyun 		dest_wrb = wrb_from_mccq(adapter);
891*4882a593Smuzhiyun 		if (!dest_wrb)
892*4882a593Smuzhiyun 			return NULL;
893*4882a593Smuzhiyun 	} else {
894*4882a593Smuzhiyun 		dest_wrb = wrb_from_mbox(adapter);
895*4882a593Smuzhiyun 	}
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	memcpy(dest_wrb, wrb, sizeof(*wrb));
898*4882a593Smuzhiyun 	if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
899*4882a593Smuzhiyun 		fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	return dest_wrb;
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun /* Must be used only in process context */
be_cmd_notify_wait(struct be_adapter * adapter,struct be_mcc_wrb * wrb)905*4882a593Smuzhiyun static int be_cmd_notify_wait(struct be_adapter *adapter,
906*4882a593Smuzhiyun 			      struct be_mcc_wrb *wrb)
907*4882a593Smuzhiyun {
908*4882a593Smuzhiyun 	struct be_mcc_wrb *dest_wrb;
909*4882a593Smuzhiyun 	int status;
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	status = be_cmd_lock(adapter);
912*4882a593Smuzhiyun 	if (status)
913*4882a593Smuzhiyun 		return status;
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	dest_wrb = be_cmd_copy(adapter, wrb);
916*4882a593Smuzhiyun 	if (!dest_wrb) {
917*4882a593Smuzhiyun 		status = -EBUSY;
918*4882a593Smuzhiyun 		goto unlock;
919*4882a593Smuzhiyun 	}
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	if (use_mcc(adapter))
922*4882a593Smuzhiyun 		status = be_mcc_notify_wait(adapter);
923*4882a593Smuzhiyun 	else
924*4882a593Smuzhiyun 		status = be_mbox_notify_wait(adapter);
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	if (!status)
927*4882a593Smuzhiyun 		memcpy(wrb, dest_wrb, sizeof(*wrb));
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun unlock:
930*4882a593Smuzhiyun 	be_cmd_unlock(adapter);
931*4882a593Smuzhiyun 	return status;
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun /* Tell fw we're about to start firing cmds by writing a
935*4882a593Smuzhiyun  * special pattern across the wrb hdr; uses mbox
936*4882a593Smuzhiyun  */
be_cmd_fw_init(struct be_adapter * adapter)937*4882a593Smuzhiyun int be_cmd_fw_init(struct be_adapter *adapter)
938*4882a593Smuzhiyun {
939*4882a593Smuzhiyun 	u8 *wrb;
940*4882a593Smuzhiyun 	int status;
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	if (lancer_chip(adapter))
943*4882a593Smuzhiyun 		return 0;
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun 	if (mutex_lock_interruptible(&adapter->mbox_lock))
946*4882a593Smuzhiyun 		return -1;
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	wrb = (u8 *)wrb_from_mbox(adapter);
949*4882a593Smuzhiyun 	*wrb++ = 0xFF;
950*4882a593Smuzhiyun 	*wrb++ = 0x12;
951*4882a593Smuzhiyun 	*wrb++ = 0x34;
952*4882a593Smuzhiyun 	*wrb++ = 0xFF;
953*4882a593Smuzhiyun 	*wrb++ = 0xFF;
954*4882a593Smuzhiyun 	*wrb++ = 0x56;
955*4882a593Smuzhiyun 	*wrb++ = 0x78;
956*4882a593Smuzhiyun 	*wrb = 0xFF;
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	status = be_mbox_notify_wait(adapter);
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 	mutex_unlock(&adapter->mbox_lock);
961*4882a593Smuzhiyun 	return status;
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun /* Tell fw we're done with firing cmds by writing a
965*4882a593Smuzhiyun  * special pattern across the wrb hdr; uses mbox
966*4882a593Smuzhiyun  */
be_cmd_fw_clean(struct be_adapter * adapter)967*4882a593Smuzhiyun int be_cmd_fw_clean(struct be_adapter *adapter)
968*4882a593Smuzhiyun {
969*4882a593Smuzhiyun 	u8 *wrb;
970*4882a593Smuzhiyun 	int status;
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	if (lancer_chip(adapter))
973*4882a593Smuzhiyun 		return 0;
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 	if (mutex_lock_interruptible(&adapter->mbox_lock))
976*4882a593Smuzhiyun 		return -1;
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	wrb = (u8 *)wrb_from_mbox(adapter);
979*4882a593Smuzhiyun 	*wrb++ = 0xFF;
980*4882a593Smuzhiyun 	*wrb++ = 0xAA;
981*4882a593Smuzhiyun 	*wrb++ = 0xBB;
982*4882a593Smuzhiyun 	*wrb++ = 0xFF;
983*4882a593Smuzhiyun 	*wrb++ = 0xFF;
984*4882a593Smuzhiyun 	*wrb++ = 0xCC;
985*4882a593Smuzhiyun 	*wrb++ = 0xDD;
986*4882a593Smuzhiyun 	*wrb = 0xFF;
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 	status = be_mbox_notify_wait(adapter);
989*4882a593Smuzhiyun 
990*4882a593Smuzhiyun 	mutex_unlock(&adapter->mbox_lock);
991*4882a593Smuzhiyun 	return status;
992*4882a593Smuzhiyun }
993*4882a593Smuzhiyun 
be_cmd_eq_create(struct be_adapter * adapter,struct be_eq_obj * eqo)994*4882a593Smuzhiyun int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
995*4882a593Smuzhiyun {
996*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
997*4882a593Smuzhiyun 	struct be_cmd_req_eq_create *req;
998*4882a593Smuzhiyun 	struct be_dma_mem *q_mem = &eqo->q.dma_mem;
999*4882a593Smuzhiyun 	int status, ver = 0;
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun 	if (mutex_lock_interruptible(&adapter->mbox_lock))
1002*4882a593Smuzhiyun 		return -1;
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun 	wrb = wrb_from_mbox(adapter);
1005*4882a593Smuzhiyun 	req = embedded_payload(wrb);
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1008*4882a593Smuzhiyun 			       OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
1009*4882a593Smuzhiyun 			       NULL);
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	/* Support for EQ_CREATEv2 available only SH-R onwards */
1012*4882a593Smuzhiyun 	if (!(BEx_chip(adapter) || lancer_chip(adapter)))
1013*4882a593Smuzhiyun 		ver = 2;
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 	req->hdr.version = ver;
1016*4882a593Smuzhiyun 	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1017*4882a593Smuzhiyun 
1018*4882a593Smuzhiyun 	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
1019*4882a593Smuzhiyun 	/* 4byte eqe*/
1020*4882a593Smuzhiyun 	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
1021*4882a593Smuzhiyun 	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
1022*4882a593Smuzhiyun 		      __ilog2_u32(eqo->q.len / 256));
1023*4882a593Smuzhiyun 	be_dws_cpu_to_le(req->context, sizeof(req->context));
1024*4882a593Smuzhiyun 
1025*4882a593Smuzhiyun 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	status = be_mbox_notify_wait(adapter);
1028*4882a593Smuzhiyun 	if (!status) {
1029*4882a593Smuzhiyun 		struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 		eqo->q.id = le16_to_cpu(resp->eq_id);
1032*4882a593Smuzhiyun 		eqo->msix_idx =
1033*4882a593Smuzhiyun 			(ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
1034*4882a593Smuzhiyun 		eqo->q.created = true;
1035*4882a593Smuzhiyun 	}
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	mutex_unlock(&adapter->mbox_lock);
1038*4882a593Smuzhiyun 	return status;
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun /* Use MCC */
be_cmd_mac_addr_query(struct be_adapter * adapter,u8 * mac_addr,bool permanent,u32 if_handle,u32 pmac_id)1042*4882a593Smuzhiyun int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
1043*4882a593Smuzhiyun 			  bool permanent, u32 if_handle, u32 pmac_id)
1044*4882a593Smuzhiyun {
1045*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
1046*4882a593Smuzhiyun 	struct be_cmd_req_mac_query *req;
1047*4882a593Smuzhiyun 	int status;
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
1052*4882a593Smuzhiyun 	if (!wrb) {
1053*4882a593Smuzhiyun 		status = -EBUSY;
1054*4882a593Smuzhiyun 		goto err;
1055*4882a593Smuzhiyun 	}
1056*4882a593Smuzhiyun 	req = embedded_payload(wrb);
1057*4882a593Smuzhiyun 
1058*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1059*4882a593Smuzhiyun 			       OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
1060*4882a593Smuzhiyun 			       NULL);
1061*4882a593Smuzhiyun 	req->type = MAC_ADDRESS_TYPE_NETWORK;
1062*4882a593Smuzhiyun 	if (permanent) {
1063*4882a593Smuzhiyun 		req->permanent = 1;
1064*4882a593Smuzhiyun 	} else {
1065*4882a593Smuzhiyun 		req->if_id = cpu_to_le16((u16)if_handle);
1066*4882a593Smuzhiyun 		req->pmac_id = cpu_to_le32(pmac_id);
1067*4882a593Smuzhiyun 		req->permanent = 0;
1068*4882a593Smuzhiyun 	}
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
1071*4882a593Smuzhiyun 	if (!status) {
1072*4882a593Smuzhiyun 		struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun 		memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
1075*4882a593Smuzhiyun 	}
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun err:
1078*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
1079*4882a593Smuzhiyun 	return status;
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun 
1082*4882a593Smuzhiyun /* Uses synchronous MCCQ */
be_cmd_pmac_add(struct be_adapter * adapter,u8 * mac_addr,u32 if_id,u32 * pmac_id,u32 domain)1083*4882a593Smuzhiyun int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
1084*4882a593Smuzhiyun 		    u32 if_id, u32 *pmac_id, u32 domain)
1085*4882a593Smuzhiyun {
1086*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
1087*4882a593Smuzhiyun 	struct be_cmd_req_pmac_add *req;
1088*4882a593Smuzhiyun 	int status;
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
1093*4882a593Smuzhiyun 	if (!wrb) {
1094*4882a593Smuzhiyun 		status = -EBUSY;
1095*4882a593Smuzhiyun 		goto err;
1096*4882a593Smuzhiyun 	}
1097*4882a593Smuzhiyun 	req = embedded_payload(wrb);
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1100*4882a593Smuzhiyun 			       OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
1101*4882a593Smuzhiyun 			       NULL);
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun 	req->hdr.domain = domain;
1104*4882a593Smuzhiyun 	req->if_id = cpu_to_le32(if_id);
1105*4882a593Smuzhiyun 	memcpy(req->mac_address, mac_addr, ETH_ALEN);
1106*4882a593Smuzhiyun 
1107*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
1108*4882a593Smuzhiyun 	if (!status) {
1109*4882a593Smuzhiyun 		struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 		*pmac_id = le32_to_cpu(resp->pmac_id);
1112*4882a593Smuzhiyun 	}
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun err:
1115*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun 	 if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
1118*4882a593Smuzhiyun 		status = -EPERM;
1119*4882a593Smuzhiyun 
1120*4882a593Smuzhiyun 	return status;
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun /* Uses synchronous MCCQ */
be_cmd_pmac_del(struct be_adapter * adapter,u32 if_id,int pmac_id,u32 dom)1124*4882a593Smuzhiyun int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
1125*4882a593Smuzhiyun {
1126*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
1127*4882a593Smuzhiyun 	struct be_cmd_req_pmac_del *req;
1128*4882a593Smuzhiyun 	int status;
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 	if (pmac_id == -1)
1131*4882a593Smuzhiyun 		return 0;
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
1136*4882a593Smuzhiyun 	if (!wrb) {
1137*4882a593Smuzhiyun 		status = -EBUSY;
1138*4882a593Smuzhiyun 		goto err;
1139*4882a593Smuzhiyun 	}
1140*4882a593Smuzhiyun 	req = embedded_payload(wrb);
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1143*4882a593Smuzhiyun 			       OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req),
1144*4882a593Smuzhiyun 			       wrb, NULL);
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun 	req->hdr.domain = dom;
1147*4882a593Smuzhiyun 	req->if_id = cpu_to_le32(if_id);
1148*4882a593Smuzhiyun 	req->pmac_id = cpu_to_le32(pmac_id);
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun err:
1153*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
1154*4882a593Smuzhiyun 	return status;
1155*4882a593Smuzhiyun }
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun /* Uses Mbox */
be_cmd_cq_create(struct be_adapter * adapter,struct be_queue_info * cq,struct be_queue_info * eq,bool no_delay,int coalesce_wm)1158*4882a593Smuzhiyun int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1159*4882a593Smuzhiyun 		     struct be_queue_info *eq, bool no_delay, int coalesce_wm)
1160*4882a593Smuzhiyun {
1161*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
1162*4882a593Smuzhiyun 	struct be_cmd_req_cq_create *req;
1163*4882a593Smuzhiyun 	struct be_dma_mem *q_mem = &cq->dma_mem;
1164*4882a593Smuzhiyun 	void *ctxt;
1165*4882a593Smuzhiyun 	int status;
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 	if (mutex_lock_interruptible(&adapter->mbox_lock))
1168*4882a593Smuzhiyun 		return -1;
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun 	wrb = wrb_from_mbox(adapter);
1171*4882a593Smuzhiyun 	req = embedded_payload(wrb);
1172*4882a593Smuzhiyun 	ctxt = &req->context;
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1175*4882a593Smuzhiyun 			       OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
1176*4882a593Smuzhiyun 			       NULL);
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun 	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun 	if (BEx_chip(adapter)) {
1181*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
1182*4882a593Smuzhiyun 			      coalesce_wm);
1183*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
1184*4882a593Smuzhiyun 			      ctxt, no_delay);
1185*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
1186*4882a593Smuzhiyun 			      __ilog2_u32(cq->len / 256));
1187*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
1188*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
1189*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
1190*4882a593Smuzhiyun 	} else {
1191*4882a593Smuzhiyun 		req->hdr.version = 2;
1192*4882a593Smuzhiyun 		req->page_size = 1; /* 1 for 4K */
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 		/* coalesce-wm field in this cmd is not relevant to Lancer.
1195*4882a593Smuzhiyun 		 * Lancer uses COMMON_MODIFY_CQ to set this field
1196*4882a593Smuzhiyun 		 */
1197*4882a593Smuzhiyun 		if (!lancer_chip(adapter))
1198*4882a593Smuzhiyun 			AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
1199*4882a593Smuzhiyun 				      ctxt, coalesce_wm);
1200*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
1201*4882a593Smuzhiyun 			      no_delay);
1202*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
1203*4882a593Smuzhiyun 			      __ilog2_u32(cq->len / 256));
1204*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
1205*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
1206*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
1207*4882a593Smuzhiyun 	}
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
1210*4882a593Smuzhiyun 
1211*4882a593Smuzhiyun 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun 	status = be_mbox_notify_wait(adapter);
1214*4882a593Smuzhiyun 	if (!status) {
1215*4882a593Smuzhiyun 		struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 		cq->id = le16_to_cpu(resp->cq_id);
1218*4882a593Smuzhiyun 		cq->created = true;
1219*4882a593Smuzhiyun 	}
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 	mutex_unlock(&adapter->mbox_lock);
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun 	return status;
1224*4882a593Smuzhiyun }
1225*4882a593Smuzhiyun 
be_encoded_q_len(int q_len)1226*4882a593Smuzhiyun static u32 be_encoded_q_len(int q_len)
1227*4882a593Smuzhiyun {
1228*4882a593Smuzhiyun 	u32 len_encoded = fls(q_len); /* log2(len) + 1 */
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 	if (len_encoded == 16)
1231*4882a593Smuzhiyun 		len_encoded = 0;
1232*4882a593Smuzhiyun 	return len_encoded;
1233*4882a593Smuzhiyun }
1234*4882a593Smuzhiyun 
be_cmd_mccq_ext_create(struct be_adapter * adapter,struct be_queue_info * mccq,struct be_queue_info * cq)1235*4882a593Smuzhiyun static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1236*4882a593Smuzhiyun 				  struct be_queue_info *mccq,
1237*4882a593Smuzhiyun 				  struct be_queue_info *cq)
1238*4882a593Smuzhiyun {
1239*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
1240*4882a593Smuzhiyun 	struct be_cmd_req_mcc_ext_create *req;
1241*4882a593Smuzhiyun 	struct be_dma_mem *q_mem = &mccq->dma_mem;
1242*4882a593Smuzhiyun 	void *ctxt;
1243*4882a593Smuzhiyun 	int status;
1244*4882a593Smuzhiyun 
1245*4882a593Smuzhiyun 	if (mutex_lock_interruptible(&adapter->mbox_lock))
1246*4882a593Smuzhiyun 		return -1;
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun 	wrb = wrb_from_mbox(adapter);
1249*4882a593Smuzhiyun 	req = embedded_payload(wrb);
1250*4882a593Smuzhiyun 	ctxt = &req->context;
1251*4882a593Smuzhiyun 
1252*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1253*4882a593Smuzhiyun 			       OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
1254*4882a593Smuzhiyun 			       NULL);
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1257*4882a593Smuzhiyun 	if (BEx_chip(adapter)) {
1258*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1259*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1260*4882a593Smuzhiyun 			      be_encoded_q_len(mccq->len));
1261*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1262*4882a593Smuzhiyun 	} else {
1263*4882a593Smuzhiyun 		req->hdr.version = 1;
1264*4882a593Smuzhiyun 		req->cq_id = cpu_to_le16(cq->id);
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt,
1267*4882a593Smuzhiyun 			      be_encoded_q_len(mccq->len));
1268*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1);
1269*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id,
1270*4882a593Smuzhiyun 			      ctxt, cq->id);
1271*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid,
1272*4882a593Smuzhiyun 			      ctxt, 1);
1273*4882a593Smuzhiyun 	}
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun 	/* Subscribe to Link State, Sliport Event and Group 5 Events
1276*4882a593Smuzhiyun 	 * (bits 1, 5 and 17 set)
1277*4882a593Smuzhiyun 	 */
1278*4882a593Smuzhiyun 	req->async_event_bitmap[0] =
1279*4882a593Smuzhiyun 			cpu_to_le32(BIT(ASYNC_EVENT_CODE_LINK_STATE) |
1280*4882a593Smuzhiyun 				    BIT(ASYNC_EVENT_CODE_GRP_5) |
1281*4882a593Smuzhiyun 				    BIT(ASYNC_EVENT_CODE_QNQ) |
1282*4882a593Smuzhiyun 				    BIT(ASYNC_EVENT_CODE_SLIPORT));
1283*4882a593Smuzhiyun 
1284*4882a593Smuzhiyun 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 	status = be_mbox_notify_wait(adapter);
1289*4882a593Smuzhiyun 	if (!status) {
1290*4882a593Smuzhiyun 		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun 		mccq->id = le16_to_cpu(resp->id);
1293*4882a593Smuzhiyun 		mccq->created = true;
1294*4882a593Smuzhiyun 	}
1295*4882a593Smuzhiyun 	mutex_unlock(&adapter->mbox_lock);
1296*4882a593Smuzhiyun 
1297*4882a593Smuzhiyun 	return status;
1298*4882a593Smuzhiyun }
1299*4882a593Smuzhiyun 
be_cmd_mccq_org_create(struct be_adapter * adapter,struct be_queue_info * mccq,struct be_queue_info * cq)1300*4882a593Smuzhiyun static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1301*4882a593Smuzhiyun 				  struct be_queue_info *mccq,
1302*4882a593Smuzhiyun 				  struct be_queue_info *cq)
1303*4882a593Smuzhiyun {
1304*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
1305*4882a593Smuzhiyun 	struct be_cmd_req_mcc_create *req;
1306*4882a593Smuzhiyun 	struct be_dma_mem *q_mem = &mccq->dma_mem;
1307*4882a593Smuzhiyun 	void *ctxt;
1308*4882a593Smuzhiyun 	int status;
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun 	if (mutex_lock_interruptible(&adapter->mbox_lock))
1311*4882a593Smuzhiyun 		return -1;
1312*4882a593Smuzhiyun 
1313*4882a593Smuzhiyun 	wrb = wrb_from_mbox(adapter);
1314*4882a593Smuzhiyun 	req = embedded_payload(wrb);
1315*4882a593Smuzhiyun 	ctxt = &req->context;
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1318*4882a593Smuzhiyun 			       OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
1319*4882a593Smuzhiyun 			       NULL);
1320*4882a593Smuzhiyun 
1321*4882a593Smuzhiyun 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1322*4882a593Smuzhiyun 
1323*4882a593Smuzhiyun 	AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1324*4882a593Smuzhiyun 	AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1325*4882a593Smuzhiyun 		      be_encoded_q_len(mccq->len));
1326*4882a593Smuzhiyun 	AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1327*4882a593Smuzhiyun 
1328*4882a593Smuzhiyun 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
1329*4882a593Smuzhiyun 
1330*4882a593Smuzhiyun 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1331*4882a593Smuzhiyun 
1332*4882a593Smuzhiyun 	status = be_mbox_notify_wait(adapter);
1333*4882a593Smuzhiyun 	if (!status) {
1334*4882a593Smuzhiyun 		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1335*4882a593Smuzhiyun 
1336*4882a593Smuzhiyun 		mccq->id = le16_to_cpu(resp->id);
1337*4882a593Smuzhiyun 		mccq->created = true;
1338*4882a593Smuzhiyun 	}
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 	mutex_unlock(&adapter->mbox_lock);
1341*4882a593Smuzhiyun 	return status;
1342*4882a593Smuzhiyun }
1343*4882a593Smuzhiyun 
be_cmd_mccq_create(struct be_adapter * adapter,struct be_queue_info * mccq,struct be_queue_info * cq)1344*4882a593Smuzhiyun int be_cmd_mccq_create(struct be_adapter *adapter,
1345*4882a593Smuzhiyun 		       struct be_queue_info *mccq, struct be_queue_info *cq)
1346*4882a593Smuzhiyun {
1347*4882a593Smuzhiyun 	int status;
1348*4882a593Smuzhiyun 
1349*4882a593Smuzhiyun 	status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1350*4882a593Smuzhiyun 	if (status && BEx_chip(adapter)) {
1351*4882a593Smuzhiyun 		dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1352*4882a593Smuzhiyun 			"or newer to avoid conflicting priorities between NIC "
1353*4882a593Smuzhiyun 			"and FCoE traffic");
1354*4882a593Smuzhiyun 		status = be_cmd_mccq_org_create(adapter, mccq, cq);
1355*4882a593Smuzhiyun 	}
1356*4882a593Smuzhiyun 	return status;
1357*4882a593Smuzhiyun }
1358*4882a593Smuzhiyun 
be_cmd_txq_create(struct be_adapter * adapter,struct be_tx_obj * txo)1359*4882a593Smuzhiyun int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1360*4882a593Smuzhiyun {
1361*4882a593Smuzhiyun 	struct be_mcc_wrb wrb = {0};
1362*4882a593Smuzhiyun 	struct be_cmd_req_eth_tx_create *req;
1363*4882a593Smuzhiyun 	struct be_queue_info *txq = &txo->q;
1364*4882a593Smuzhiyun 	struct be_queue_info *cq = &txo->cq;
1365*4882a593Smuzhiyun 	struct be_dma_mem *q_mem = &txq->dma_mem;
1366*4882a593Smuzhiyun 	int status, ver = 0;
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 	req = embedded_payload(&wrb);
1369*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1370*4882a593Smuzhiyun 			       OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
1371*4882a593Smuzhiyun 
1372*4882a593Smuzhiyun 	if (lancer_chip(adapter)) {
1373*4882a593Smuzhiyun 		req->hdr.version = 1;
1374*4882a593Smuzhiyun 	} else if (BEx_chip(adapter)) {
1375*4882a593Smuzhiyun 		if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1376*4882a593Smuzhiyun 			req->hdr.version = 2;
1377*4882a593Smuzhiyun 	} else { /* For SH */
1378*4882a593Smuzhiyun 		req->hdr.version = 2;
1379*4882a593Smuzhiyun 	}
1380*4882a593Smuzhiyun 
1381*4882a593Smuzhiyun 	if (req->hdr.version > 0)
1382*4882a593Smuzhiyun 		req->if_id = cpu_to_le16(adapter->if_handle);
1383*4882a593Smuzhiyun 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1384*4882a593Smuzhiyun 	req->ulp_num = BE_ULP1_NUM;
1385*4882a593Smuzhiyun 	req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1386*4882a593Smuzhiyun 	req->cq_id = cpu_to_le16(cq->id);
1387*4882a593Smuzhiyun 	req->queue_size = be_encoded_q_len(txq->len);
1388*4882a593Smuzhiyun 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1389*4882a593Smuzhiyun 	ver = req->hdr.version;
1390*4882a593Smuzhiyun 
1391*4882a593Smuzhiyun 	status = be_cmd_notify_wait(adapter, &wrb);
1392*4882a593Smuzhiyun 	if (!status) {
1393*4882a593Smuzhiyun 		struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun 		txq->id = le16_to_cpu(resp->cid);
1396*4882a593Smuzhiyun 		if (ver == 2)
1397*4882a593Smuzhiyun 			txo->db_offset = le32_to_cpu(resp->db_offset);
1398*4882a593Smuzhiyun 		else
1399*4882a593Smuzhiyun 			txo->db_offset = DB_TXULP1_OFFSET;
1400*4882a593Smuzhiyun 		txq->created = true;
1401*4882a593Smuzhiyun 	}
1402*4882a593Smuzhiyun 
1403*4882a593Smuzhiyun 	return status;
1404*4882a593Smuzhiyun }
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun /* Uses MCC */
be_cmd_rxq_create(struct be_adapter * adapter,struct be_queue_info * rxq,u16 cq_id,u16 frag_size,u32 if_id,u32 rss,u8 * rss_id)1407*4882a593Smuzhiyun int be_cmd_rxq_create(struct be_adapter *adapter,
1408*4882a593Smuzhiyun 		      struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1409*4882a593Smuzhiyun 		      u32 if_id, u32 rss, u8 *rss_id)
1410*4882a593Smuzhiyun {
1411*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
1412*4882a593Smuzhiyun 	struct be_cmd_req_eth_rx_create *req;
1413*4882a593Smuzhiyun 	struct be_dma_mem *q_mem = &rxq->dma_mem;
1414*4882a593Smuzhiyun 	int status;
1415*4882a593Smuzhiyun 
1416*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
1417*4882a593Smuzhiyun 
1418*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
1419*4882a593Smuzhiyun 	if (!wrb) {
1420*4882a593Smuzhiyun 		status = -EBUSY;
1421*4882a593Smuzhiyun 		goto err;
1422*4882a593Smuzhiyun 	}
1423*4882a593Smuzhiyun 	req = embedded_payload(wrb);
1424*4882a593Smuzhiyun 
1425*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1426*4882a593Smuzhiyun 			       OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1427*4882a593Smuzhiyun 
1428*4882a593Smuzhiyun 	req->cq_id = cpu_to_le16(cq_id);
1429*4882a593Smuzhiyun 	req->frag_size = fls(frag_size) - 1;
1430*4882a593Smuzhiyun 	req->num_pages = 2;
1431*4882a593Smuzhiyun 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1432*4882a593Smuzhiyun 	req->interface_id = cpu_to_le32(if_id);
1433*4882a593Smuzhiyun 	req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1434*4882a593Smuzhiyun 	req->rss_queue = cpu_to_le32(rss);
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
1437*4882a593Smuzhiyun 	if (!status) {
1438*4882a593Smuzhiyun 		struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1439*4882a593Smuzhiyun 
1440*4882a593Smuzhiyun 		rxq->id = le16_to_cpu(resp->id);
1441*4882a593Smuzhiyun 		rxq->created = true;
1442*4882a593Smuzhiyun 		*rss_id = resp->rss_id;
1443*4882a593Smuzhiyun 	}
1444*4882a593Smuzhiyun 
1445*4882a593Smuzhiyun err:
1446*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
1447*4882a593Smuzhiyun 	return status;
1448*4882a593Smuzhiyun }
1449*4882a593Smuzhiyun 
1450*4882a593Smuzhiyun /* Generic destroyer function for all types of queues
1451*4882a593Smuzhiyun  * Uses Mbox
1452*4882a593Smuzhiyun  */
be_cmd_q_destroy(struct be_adapter * adapter,struct be_queue_info * q,int queue_type)1453*4882a593Smuzhiyun int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1454*4882a593Smuzhiyun 		     int queue_type)
1455*4882a593Smuzhiyun {
1456*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
1457*4882a593Smuzhiyun 	struct be_cmd_req_q_destroy *req;
1458*4882a593Smuzhiyun 	u8 subsys = 0, opcode = 0;
1459*4882a593Smuzhiyun 	int status;
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun 	if (mutex_lock_interruptible(&adapter->mbox_lock))
1462*4882a593Smuzhiyun 		return -1;
1463*4882a593Smuzhiyun 
1464*4882a593Smuzhiyun 	wrb = wrb_from_mbox(adapter);
1465*4882a593Smuzhiyun 	req = embedded_payload(wrb);
1466*4882a593Smuzhiyun 
1467*4882a593Smuzhiyun 	switch (queue_type) {
1468*4882a593Smuzhiyun 	case QTYPE_EQ:
1469*4882a593Smuzhiyun 		subsys = CMD_SUBSYSTEM_COMMON;
1470*4882a593Smuzhiyun 		opcode = OPCODE_COMMON_EQ_DESTROY;
1471*4882a593Smuzhiyun 		break;
1472*4882a593Smuzhiyun 	case QTYPE_CQ:
1473*4882a593Smuzhiyun 		subsys = CMD_SUBSYSTEM_COMMON;
1474*4882a593Smuzhiyun 		opcode = OPCODE_COMMON_CQ_DESTROY;
1475*4882a593Smuzhiyun 		break;
1476*4882a593Smuzhiyun 	case QTYPE_TXQ:
1477*4882a593Smuzhiyun 		subsys = CMD_SUBSYSTEM_ETH;
1478*4882a593Smuzhiyun 		opcode = OPCODE_ETH_TX_DESTROY;
1479*4882a593Smuzhiyun 		break;
1480*4882a593Smuzhiyun 	case QTYPE_RXQ:
1481*4882a593Smuzhiyun 		subsys = CMD_SUBSYSTEM_ETH;
1482*4882a593Smuzhiyun 		opcode = OPCODE_ETH_RX_DESTROY;
1483*4882a593Smuzhiyun 		break;
1484*4882a593Smuzhiyun 	case QTYPE_MCCQ:
1485*4882a593Smuzhiyun 		subsys = CMD_SUBSYSTEM_COMMON;
1486*4882a593Smuzhiyun 		opcode = OPCODE_COMMON_MCC_DESTROY;
1487*4882a593Smuzhiyun 		break;
1488*4882a593Smuzhiyun 	default:
1489*4882a593Smuzhiyun 		BUG();
1490*4882a593Smuzhiyun 	}
1491*4882a593Smuzhiyun 
1492*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1493*4882a593Smuzhiyun 			       NULL);
1494*4882a593Smuzhiyun 	req->id = cpu_to_le16(q->id);
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun 	status = be_mbox_notify_wait(adapter);
1497*4882a593Smuzhiyun 	q->created = false;
1498*4882a593Smuzhiyun 
1499*4882a593Smuzhiyun 	mutex_unlock(&adapter->mbox_lock);
1500*4882a593Smuzhiyun 	return status;
1501*4882a593Smuzhiyun }
1502*4882a593Smuzhiyun 
1503*4882a593Smuzhiyun /* Uses MCC */
be_cmd_rxq_destroy(struct be_adapter * adapter,struct be_queue_info * q)1504*4882a593Smuzhiyun int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1505*4882a593Smuzhiyun {
1506*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
1507*4882a593Smuzhiyun 	struct be_cmd_req_q_destroy *req;
1508*4882a593Smuzhiyun 	int status;
1509*4882a593Smuzhiyun 
1510*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
1511*4882a593Smuzhiyun 
1512*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
1513*4882a593Smuzhiyun 	if (!wrb) {
1514*4882a593Smuzhiyun 		status = -EBUSY;
1515*4882a593Smuzhiyun 		goto err;
1516*4882a593Smuzhiyun 	}
1517*4882a593Smuzhiyun 	req = embedded_payload(wrb);
1518*4882a593Smuzhiyun 
1519*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1520*4882a593Smuzhiyun 			       OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1521*4882a593Smuzhiyun 	req->id = cpu_to_le16(q->id);
1522*4882a593Smuzhiyun 
1523*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
1524*4882a593Smuzhiyun 	q->created = false;
1525*4882a593Smuzhiyun 
1526*4882a593Smuzhiyun err:
1527*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
1528*4882a593Smuzhiyun 	return status;
1529*4882a593Smuzhiyun }
1530*4882a593Smuzhiyun 
1531*4882a593Smuzhiyun /* Create an rx filtering policy configuration on an i/f
1532*4882a593Smuzhiyun  * Will use MBOX only if MCCQ has not been created.
1533*4882a593Smuzhiyun  */
be_cmd_if_create(struct be_adapter * adapter,u32 cap_flags,u32 en_flags,u32 * if_handle,u32 domain)1534*4882a593Smuzhiyun int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1535*4882a593Smuzhiyun 		     u32 *if_handle, u32 domain)
1536*4882a593Smuzhiyun {
1537*4882a593Smuzhiyun 	struct be_mcc_wrb wrb = {0};
1538*4882a593Smuzhiyun 	struct be_cmd_req_if_create *req;
1539*4882a593Smuzhiyun 	int status;
1540*4882a593Smuzhiyun 
1541*4882a593Smuzhiyun 	req = embedded_payload(&wrb);
1542*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1543*4882a593Smuzhiyun 			       OPCODE_COMMON_NTWK_INTERFACE_CREATE,
1544*4882a593Smuzhiyun 			       sizeof(*req), &wrb, NULL);
1545*4882a593Smuzhiyun 	req->hdr.domain = domain;
1546*4882a593Smuzhiyun 	req->capability_flags = cpu_to_le32(cap_flags);
1547*4882a593Smuzhiyun 	req->enable_flags = cpu_to_le32(en_flags);
1548*4882a593Smuzhiyun 	req->pmac_invalid = true;
1549*4882a593Smuzhiyun 
1550*4882a593Smuzhiyun 	status = be_cmd_notify_wait(adapter, &wrb);
1551*4882a593Smuzhiyun 	if (!status) {
1552*4882a593Smuzhiyun 		struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
1553*4882a593Smuzhiyun 
1554*4882a593Smuzhiyun 		*if_handle = le32_to_cpu(resp->interface_id);
1555*4882a593Smuzhiyun 
1556*4882a593Smuzhiyun 		/* Hack to retrieve VF's pmac-id on BE3 */
1557*4882a593Smuzhiyun 		if (BE3_chip(adapter) && be_virtfn(adapter))
1558*4882a593Smuzhiyun 			adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
1559*4882a593Smuzhiyun 	}
1560*4882a593Smuzhiyun 	return status;
1561*4882a593Smuzhiyun }
1562*4882a593Smuzhiyun 
1563*4882a593Smuzhiyun /* Uses MCCQ if available else MBOX */
be_cmd_if_destroy(struct be_adapter * adapter,int interface_id,u32 domain)1564*4882a593Smuzhiyun int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1565*4882a593Smuzhiyun {
1566*4882a593Smuzhiyun 	struct be_mcc_wrb wrb = {0};
1567*4882a593Smuzhiyun 	struct be_cmd_req_if_destroy *req;
1568*4882a593Smuzhiyun 	int status;
1569*4882a593Smuzhiyun 
1570*4882a593Smuzhiyun 	if (interface_id == -1)
1571*4882a593Smuzhiyun 		return 0;
1572*4882a593Smuzhiyun 
1573*4882a593Smuzhiyun 	req = embedded_payload(&wrb);
1574*4882a593Smuzhiyun 
1575*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1576*4882a593Smuzhiyun 			       OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
1577*4882a593Smuzhiyun 			       sizeof(*req), &wrb, NULL);
1578*4882a593Smuzhiyun 	req->hdr.domain = domain;
1579*4882a593Smuzhiyun 	req->interface_id = cpu_to_le32(interface_id);
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun 	status = be_cmd_notify_wait(adapter, &wrb);
1582*4882a593Smuzhiyun 	return status;
1583*4882a593Smuzhiyun }
1584*4882a593Smuzhiyun 
1585*4882a593Smuzhiyun /* Get stats is a non embedded command: the request is not embedded inside
1586*4882a593Smuzhiyun  * WRB but is a separate dma memory block
1587*4882a593Smuzhiyun  * Uses asynchronous MCC
1588*4882a593Smuzhiyun  */
be_cmd_get_stats(struct be_adapter * adapter,struct be_dma_mem * nonemb_cmd)1589*4882a593Smuzhiyun int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1590*4882a593Smuzhiyun {
1591*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
1592*4882a593Smuzhiyun 	struct be_cmd_req_hdr *hdr;
1593*4882a593Smuzhiyun 	int status = 0;
1594*4882a593Smuzhiyun 
1595*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
1596*4882a593Smuzhiyun 
1597*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
1598*4882a593Smuzhiyun 	if (!wrb) {
1599*4882a593Smuzhiyun 		status = -EBUSY;
1600*4882a593Smuzhiyun 		goto err;
1601*4882a593Smuzhiyun 	}
1602*4882a593Smuzhiyun 	hdr = nonemb_cmd->va;
1603*4882a593Smuzhiyun 
1604*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1605*4882a593Smuzhiyun 			       OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
1606*4882a593Smuzhiyun 			       nonemb_cmd);
1607*4882a593Smuzhiyun 
1608*4882a593Smuzhiyun 	/* version 1 of the cmd is not supported only by BE2 */
1609*4882a593Smuzhiyun 	if (BE2_chip(adapter))
1610*4882a593Smuzhiyun 		hdr->version = 0;
1611*4882a593Smuzhiyun 	if (BE3_chip(adapter) || lancer_chip(adapter))
1612*4882a593Smuzhiyun 		hdr->version = 1;
1613*4882a593Smuzhiyun 	else
1614*4882a593Smuzhiyun 		hdr->version = 2;
1615*4882a593Smuzhiyun 
1616*4882a593Smuzhiyun 	status = be_mcc_notify(adapter);
1617*4882a593Smuzhiyun 	if (status)
1618*4882a593Smuzhiyun 		goto err;
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun 	adapter->stats_cmd_sent = true;
1621*4882a593Smuzhiyun 
1622*4882a593Smuzhiyun err:
1623*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
1624*4882a593Smuzhiyun 	return status;
1625*4882a593Smuzhiyun }
1626*4882a593Smuzhiyun 
1627*4882a593Smuzhiyun /* Lancer Stats */
lancer_cmd_get_pport_stats(struct be_adapter * adapter,struct be_dma_mem * nonemb_cmd)1628*4882a593Smuzhiyun int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1629*4882a593Smuzhiyun 			       struct be_dma_mem *nonemb_cmd)
1630*4882a593Smuzhiyun {
1631*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
1632*4882a593Smuzhiyun 	struct lancer_cmd_req_pport_stats *req;
1633*4882a593Smuzhiyun 	int status = 0;
1634*4882a593Smuzhiyun 
1635*4882a593Smuzhiyun 	if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1636*4882a593Smuzhiyun 			    CMD_SUBSYSTEM_ETH))
1637*4882a593Smuzhiyun 		return -EPERM;
1638*4882a593Smuzhiyun 
1639*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
1640*4882a593Smuzhiyun 
1641*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
1642*4882a593Smuzhiyun 	if (!wrb) {
1643*4882a593Smuzhiyun 		status = -EBUSY;
1644*4882a593Smuzhiyun 		goto err;
1645*4882a593Smuzhiyun 	}
1646*4882a593Smuzhiyun 	req = nonemb_cmd->va;
1647*4882a593Smuzhiyun 
1648*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1649*4882a593Smuzhiyun 			       OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
1650*4882a593Smuzhiyun 			       wrb, nonemb_cmd);
1651*4882a593Smuzhiyun 
1652*4882a593Smuzhiyun 	req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1653*4882a593Smuzhiyun 	req->cmd_params.params.reset_stats = 0;
1654*4882a593Smuzhiyun 
1655*4882a593Smuzhiyun 	status = be_mcc_notify(adapter);
1656*4882a593Smuzhiyun 	if (status)
1657*4882a593Smuzhiyun 		goto err;
1658*4882a593Smuzhiyun 
1659*4882a593Smuzhiyun 	adapter->stats_cmd_sent = true;
1660*4882a593Smuzhiyun 
1661*4882a593Smuzhiyun err:
1662*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
1663*4882a593Smuzhiyun 	return status;
1664*4882a593Smuzhiyun }
1665*4882a593Smuzhiyun 
be_mac_to_link_speed(int mac_speed)1666*4882a593Smuzhiyun static int be_mac_to_link_speed(int mac_speed)
1667*4882a593Smuzhiyun {
1668*4882a593Smuzhiyun 	switch (mac_speed) {
1669*4882a593Smuzhiyun 	case PHY_LINK_SPEED_ZERO:
1670*4882a593Smuzhiyun 		return 0;
1671*4882a593Smuzhiyun 	case PHY_LINK_SPEED_10MBPS:
1672*4882a593Smuzhiyun 		return 10;
1673*4882a593Smuzhiyun 	case PHY_LINK_SPEED_100MBPS:
1674*4882a593Smuzhiyun 		return 100;
1675*4882a593Smuzhiyun 	case PHY_LINK_SPEED_1GBPS:
1676*4882a593Smuzhiyun 		return 1000;
1677*4882a593Smuzhiyun 	case PHY_LINK_SPEED_10GBPS:
1678*4882a593Smuzhiyun 		return 10000;
1679*4882a593Smuzhiyun 	case PHY_LINK_SPEED_20GBPS:
1680*4882a593Smuzhiyun 		return 20000;
1681*4882a593Smuzhiyun 	case PHY_LINK_SPEED_25GBPS:
1682*4882a593Smuzhiyun 		return 25000;
1683*4882a593Smuzhiyun 	case PHY_LINK_SPEED_40GBPS:
1684*4882a593Smuzhiyun 		return 40000;
1685*4882a593Smuzhiyun 	}
1686*4882a593Smuzhiyun 	return 0;
1687*4882a593Smuzhiyun }
1688*4882a593Smuzhiyun 
1689*4882a593Smuzhiyun /* Uses synchronous mcc
1690*4882a593Smuzhiyun  * Returns link_speed in Mbps
1691*4882a593Smuzhiyun  */
be_cmd_link_status_query(struct be_adapter * adapter,u16 * link_speed,u8 * link_status,u32 dom)1692*4882a593Smuzhiyun int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1693*4882a593Smuzhiyun 			     u8 *link_status, u32 dom)
1694*4882a593Smuzhiyun {
1695*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
1696*4882a593Smuzhiyun 	struct be_cmd_req_link_status *req;
1697*4882a593Smuzhiyun 	int status;
1698*4882a593Smuzhiyun 
1699*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
1700*4882a593Smuzhiyun 
1701*4882a593Smuzhiyun 	if (link_status)
1702*4882a593Smuzhiyun 		*link_status = LINK_DOWN;
1703*4882a593Smuzhiyun 
1704*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
1705*4882a593Smuzhiyun 	if (!wrb) {
1706*4882a593Smuzhiyun 		status = -EBUSY;
1707*4882a593Smuzhiyun 		goto err;
1708*4882a593Smuzhiyun 	}
1709*4882a593Smuzhiyun 	req = embedded_payload(wrb);
1710*4882a593Smuzhiyun 
1711*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1712*4882a593Smuzhiyun 			       OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
1713*4882a593Smuzhiyun 			       sizeof(*req), wrb, NULL);
1714*4882a593Smuzhiyun 
1715*4882a593Smuzhiyun 	/* version 1 of the cmd is not supported only by BE2 */
1716*4882a593Smuzhiyun 	if (!BE2_chip(adapter))
1717*4882a593Smuzhiyun 		req->hdr.version = 1;
1718*4882a593Smuzhiyun 
1719*4882a593Smuzhiyun 	req->hdr.domain = dom;
1720*4882a593Smuzhiyun 
1721*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
1722*4882a593Smuzhiyun 	if (!status) {
1723*4882a593Smuzhiyun 		struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1724*4882a593Smuzhiyun 
1725*4882a593Smuzhiyun 		if (link_speed) {
1726*4882a593Smuzhiyun 			*link_speed = resp->link_speed ?
1727*4882a593Smuzhiyun 				      le16_to_cpu(resp->link_speed) * 10 :
1728*4882a593Smuzhiyun 				      be_mac_to_link_speed(resp->mac_speed);
1729*4882a593Smuzhiyun 
1730*4882a593Smuzhiyun 			if (!resp->logical_link_status)
1731*4882a593Smuzhiyun 				*link_speed = 0;
1732*4882a593Smuzhiyun 		}
1733*4882a593Smuzhiyun 		if (link_status)
1734*4882a593Smuzhiyun 			*link_status = resp->logical_link_status;
1735*4882a593Smuzhiyun 	}
1736*4882a593Smuzhiyun 
1737*4882a593Smuzhiyun err:
1738*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
1739*4882a593Smuzhiyun 	return status;
1740*4882a593Smuzhiyun }
1741*4882a593Smuzhiyun 
1742*4882a593Smuzhiyun /* Uses synchronous mcc */
be_cmd_get_die_temperature(struct be_adapter * adapter)1743*4882a593Smuzhiyun int be_cmd_get_die_temperature(struct be_adapter *adapter)
1744*4882a593Smuzhiyun {
1745*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
1746*4882a593Smuzhiyun 	struct be_cmd_req_get_cntl_addnl_attribs *req;
1747*4882a593Smuzhiyun 	int status = 0;
1748*4882a593Smuzhiyun 
1749*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
1750*4882a593Smuzhiyun 
1751*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
1752*4882a593Smuzhiyun 	if (!wrb) {
1753*4882a593Smuzhiyun 		status = -EBUSY;
1754*4882a593Smuzhiyun 		goto err;
1755*4882a593Smuzhiyun 	}
1756*4882a593Smuzhiyun 	req = embedded_payload(wrb);
1757*4882a593Smuzhiyun 
1758*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1759*4882a593Smuzhiyun 			       OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
1760*4882a593Smuzhiyun 			       sizeof(*req), wrb, NULL);
1761*4882a593Smuzhiyun 
1762*4882a593Smuzhiyun 	status = be_mcc_notify(adapter);
1763*4882a593Smuzhiyun err:
1764*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
1765*4882a593Smuzhiyun 	return status;
1766*4882a593Smuzhiyun }
1767*4882a593Smuzhiyun 
1768*4882a593Smuzhiyun /* Uses synchronous mcc */
be_cmd_get_fat_dump_len(struct be_adapter * adapter,u32 * dump_size)1769*4882a593Smuzhiyun int be_cmd_get_fat_dump_len(struct be_adapter *adapter, u32 *dump_size)
1770*4882a593Smuzhiyun {
1771*4882a593Smuzhiyun 	struct be_mcc_wrb wrb = {0};
1772*4882a593Smuzhiyun 	struct be_cmd_req_get_fat *req;
1773*4882a593Smuzhiyun 	int status;
1774*4882a593Smuzhiyun 
1775*4882a593Smuzhiyun 	req = embedded_payload(&wrb);
1776*4882a593Smuzhiyun 
1777*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1778*4882a593Smuzhiyun 			       OPCODE_COMMON_MANAGE_FAT, sizeof(*req),
1779*4882a593Smuzhiyun 			       &wrb, NULL);
1780*4882a593Smuzhiyun 	req->fat_operation = cpu_to_le32(QUERY_FAT);
1781*4882a593Smuzhiyun 	status = be_cmd_notify_wait(adapter, &wrb);
1782*4882a593Smuzhiyun 	if (!status) {
1783*4882a593Smuzhiyun 		struct be_cmd_resp_get_fat *resp = embedded_payload(&wrb);
1784*4882a593Smuzhiyun 
1785*4882a593Smuzhiyun 		if (dump_size && resp->log_size)
1786*4882a593Smuzhiyun 			*dump_size = le32_to_cpu(resp->log_size) -
1787*4882a593Smuzhiyun 					sizeof(u32);
1788*4882a593Smuzhiyun 	}
1789*4882a593Smuzhiyun 	return status;
1790*4882a593Smuzhiyun }
1791*4882a593Smuzhiyun 
be_cmd_get_fat_dump(struct be_adapter * adapter,u32 buf_len,void * buf)1792*4882a593Smuzhiyun int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
1793*4882a593Smuzhiyun {
1794*4882a593Smuzhiyun 	struct be_dma_mem get_fat_cmd;
1795*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
1796*4882a593Smuzhiyun 	struct be_cmd_req_get_fat *req;
1797*4882a593Smuzhiyun 	u32 offset = 0, total_size, buf_size,
1798*4882a593Smuzhiyun 				log_offset = sizeof(u32), payload_len;
1799*4882a593Smuzhiyun 	int status;
1800*4882a593Smuzhiyun 
1801*4882a593Smuzhiyun 	if (buf_len == 0)
1802*4882a593Smuzhiyun 		return 0;
1803*4882a593Smuzhiyun 
1804*4882a593Smuzhiyun 	total_size = buf_len;
1805*4882a593Smuzhiyun 
1806*4882a593Smuzhiyun 	get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1807*4882a593Smuzhiyun 	get_fat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
1808*4882a593Smuzhiyun 					    get_fat_cmd.size,
1809*4882a593Smuzhiyun 					    &get_fat_cmd.dma, GFP_ATOMIC);
1810*4882a593Smuzhiyun 	if (!get_fat_cmd.va)
1811*4882a593Smuzhiyun 		return -ENOMEM;
1812*4882a593Smuzhiyun 
1813*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
1814*4882a593Smuzhiyun 
1815*4882a593Smuzhiyun 	while (total_size) {
1816*4882a593Smuzhiyun 		buf_size = min(total_size, (u32)60*1024);
1817*4882a593Smuzhiyun 		total_size -= buf_size;
1818*4882a593Smuzhiyun 
1819*4882a593Smuzhiyun 		wrb = wrb_from_mccq(adapter);
1820*4882a593Smuzhiyun 		if (!wrb) {
1821*4882a593Smuzhiyun 			status = -EBUSY;
1822*4882a593Smuzhiyun 			goto err;
1823*4882a593Smuzhiyun 		}
1824*4882a593Smuzhiyun 		req = get_fat_cmd.va;
1825*4882a593Smuzhiyun 
1826*4882a593Smuzhiyun 		payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1827*4882a593Smuzhiyun 		be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1828*4882a593Smuzhiyun 				       OPCODE_COMMON_MANAGE_FAT, payload_len,
1829*4882a593Smuzhiyun 				       wrb, &get_fat_cmd);
1830*4882a593Smuzhiyun 
1831*4882a593Smuzhiyun 		req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1832*4882a593Smuzhiyun 		req->read_log_offset = cpu_to_le32(log_offset);
1833*4882a593Smuzhiyun 		req->read_log_length = cpu_to_le32(buf_size);
1834*4882a593Smuzhiyun 		req->data_buffer_size = cpu_to_le32(buf_size);
1835*4882a593Smuzhiyun 
1836*4882a593Smuzhiyun 		status = be_mcc_notify_wait(adapter);
1837*4882a593Smuzhiyun 		if (!status) {
1838*4882a593Smuzhiyun 			struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1839*4882a593Smuzhiyun 
1840*4882a593Smuzhiyun 			memcpy(buf + offset,
1841*4882a593Smuzhiyun 			       resp->data_buffer,
1842*4882a593Smuzhiyun 			       le32_to_cpu(resp->read_log_length));
1843*4882a593Smuzhiyun 		} else {
1844*4882a593Smuzhiyun 			dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1845*4882a593Smuzhiyun 			goto err;
1846*4882a593Smuzhiyun 		}
1847*4882a593Smuzhiyun 		offset += buf_size;
1848*4882a593Smuzhiyun 		log_offset += buf_size;
1849*4882a593Smuzhiyun 	}
1850*4882a593Smuzhiyun err:
1851*4882a593Smuzhiyun 	dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
1852*4882a593Smuzhiyun 			  get_fat_cmd.va, get_fat_cmd.dma);
1853*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
1854*4882a593Smuzhiyun 	return status;
1855*4882a593Smuzhiyun }
1856*4882a593Smuzhiyun 
1857*4882a593Smuzhiyun /* Uses synchronous mcc */
be_cmd_get_fw_ver(struct be_adapter * adapter)1858*4882a593Smuzhiyun int be_cmd_get_fw_ver(struct be_adapter *adapter)
1859*4882a593Smuzhiyun {
1860*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
1861*4882a593Smuzhiyun 	struct be_cmd_req_get_fw_version *req;
1862*4882a593Smuzhiyun 	int status;
1863*4882a593Smuzhiyun 
1864*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
1865*4882a593Smuzhiyun 
1866*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
1867*4882a593Smuzhiyun 	if (!wrb) {
1868*4882a593Smuzhiyun 		status = -EBUSY;
1869*4882a593Smuzhiyun 		goto err;
1870*4882a593Smuzhiyun 	}
1871*4882a593Smuzhiyun 
1872*4882a593Smuzhiyun 	req = embedded_payload(wrb);
1873*4882a593Smuzhiyun 
1874*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1875*4882a593Smuzhiyun 			       OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
1876*4882a593Smuzhiyun 			       NULL);
1877*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
1878*4882a593Smuzhiyun 	if (!status) {
1879*4882a593Smuzhiyun 		struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1880*4882a593Smuzhiyun 
1881*4882a593Smuzhiyun 		strlcpy(adapter->fw_ver, resp->firmware_version_string,
1882*4882a593Smuzhiyun 			sizeof(adapter->fw_ver));
1883*4882a593Smuzhiyun 		strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
1884*4882a593Smuzhiyun 			sizeof(adapter->fw_on_flash));
1885*4882a593Smuzhiyun 	}
1886*4882a593Smuzhiyun err:
1887*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
1888*4882a593Smuzhiyun 	return status;
1889*4882a593Smuzhiyun }
1890*4882a593Smuzhiyun 
1891*4882a593Smuzhiyun /* set the EQ delay interval of an EQ to specified value
1892*4882a593Smuzhiyun  * Uses async mcc
1893*4882a593Smuzhiyun  */
__be_cmd_modify_eqd(struct be_adapter * adapter,struct be_set_eqd * set_eqd,int num)1894*4882a593Smuzhiyun static int __be_cmd_modify_eqd(struct be_adapter *adapter,
1895*4882a593Smuzhiyun 			       struct be_set_eqd *set_eqd, int num)
1896*4882a593Smuzhiyun {
1897*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
1898*4882a593Smuzhiyun 	struct be_cmd_req_modify_eq_delay *req;
1899*4882a593Smuzhiyun 	int status = 0, i;
1900*4882a593Smuzhiyun 
1901*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
1902*4882a593Smuzhiyun 
1903*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
1904*4882a593Smuzhiyun 	if (!wrb) {
1905*4882a593Smuzhiyun 		status = -EBUSY;
1906*4882a593Smuzhiyun 		goto err;
1907*4882a593Smuzhiyun 	}
1908*4882a593Smuzhiyun 	req = embedded_payload(wrb);
1909*4882a593Smuzhiyun 
1910*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1911*4882a593Smuzhiyun 			       OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
1912*4882a593Smuzhiyun 			       NULL);
1913*4882a593Smuzhiyun 
1914*4882a593Smuzhiyun 	req->num_eq = cpu_to_le32(num);
1915*4882a593Smuzhiyun 	for (i = 0; i < num; i++) {
1916*4882a593Smuzhiyun 		req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
1917*4882a593Smuzhiyun 		req->set_eqd[i].phase = 0;
1918*4882a593Smuzhiyun 		req->set_eqd[i].delay_multiplier =
1919*4882a593Smuzhiyun 				cpu_to_le32(set_eqd[i].delay_multiplier);
1920*4882a593Smuzhiyun 	}
1921*4882a593Smuzhiyun 
1922*4882a593Smuzhiyun 	status = be_mcc_notify(adapter);
1923*4882a593Smuzhiyun err:
1924*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
1925*4882a593Smuzhiyun 	return status;
1926*4882a593Smuzhiyun }
1927*4882a593Smuzhiyun 
be_cmd_modify_eqd(struct be_adapter * adapter,struct be_set_eqd * set_eqd,int num)1928*4882a593Smuzhiyun int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1929*4882a593Smuzhiyun 		      int num)
1930*4882a593Smuzhiyun {
1931*4882a593Smuzhiyun 	int num_eqs, i = 0;
1932*4882a593Smuzhiyun 
1933*4882a593Smuzhiyun 	while (num) {
1934*4882a593Smuzhiyun 		num_eqs = min(num, 8);
1935*4882a593Smuzhiyun 		__be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
1936*4882a593Smuzhiyun 		i += num_eqs;
1937*4882a593Smuzhiyun 		num -= num_eqs;
1938*4882a593Smuzhiyun 	}
1939*4882a593Smuzhiyun 
1940*4882a593Smuzhiyun 	return 0;
1941*4882a593Smuzhiyun }
1942*4882a593Smuzhiyun 
1943*4882a593Smuzhiyun /* Uses sycnhronous mcc */
be_cmd_vlan_config(struct be_adapter * adapter,u32 if_id,u16 * vtag_array,u32 num,u32 domain)1944*4882a593Smuzhiyun int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1945*4882a593Smuzhiyun 		       u32 num, u32 domain)
1946*4882a593Smuzhiyun {
1947*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
1948*4882a593Smuzhiyun 	struct be_cmd_req_vlan_config *req;
1949*4882a593Smuzhiyun 	int status;
1950*4882a593Smuzhiyun 
1951*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
1952*4882a593Smuzhiyun 
1953*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
1954*4882a593Smuzhiyun 	if (!wrb) {
1955*4882a593Smuzhiyun 		status = -EBUSY;
1956*4882a593Smuzhiyun 		goto err;
1957*4882a593Smuzhiyun 	}
1958*4882a593Smuzhiyun 	req = embedded_payload(wrb);
1959*4882a593Smuzhiyun 
1960*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1961*4882a593Smuzhiyun 			       OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
1962*4882a593Smuzhiyun 			       wrb, NULL);
1963*4882a593Smuzhiyun 	req->hdr.domain = domain;
1964*4882a593Smuzhiyun 
1965*4882a593Smuzhiyun 	req->interface_id = if_id;
1966*4882a593Smuzhiyun 	req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
1967*4882a593Smuzhiyun 	req->num_vlan = num;
1968*4882a593Smuzhiyun 	memcpy(req->normal_vlan, vtag_array,
1969*4882a593Smuzhiyun 	       req->num_vlan * sizeof(vtag_array[0]));
1970*4882a593Smuzhiyun 
1971*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
1972*4882a593Smuzhiyun err:
1973*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
1974*4882a593Smuzhiyun 	return status;
1975*4882a593Smuzhiyun }
1976*4882a593Smuzhiyun 
__be_cmd_rx_filter(struct be_adapter * adapter,u32 flags,u32 value)1977*4882a593Smuzhiyun static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1978*4882a593Smuzhiyun {
1979*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
1980*4882a593Smuzhiyun 	struct be_dma_mem *mem = &adapter->rx_filter;
1981*4882a593Smuzhiyun 	struct be_cmd_req_rx_filter *req = mem->va;
1982*4882a593Smuzhiyun 	int status;
1983*4882a593Smuzhiyun 
1984*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
1985*4882a593Smuzhiyun 
1986*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
1987*4882a593Smuzhiyun 	if (!wrb) {
1988*4882a593Smuzhiyun 		status = -EBUSY;
1989*4882a593Smuzhiyun 		goto err;
1990*4882a593Smuzhiyun 	}
1991*4882a593Smuzhiyun 	memset(req, 0, sizeof(*req));
1992*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1993*4882a593Smuzhiyun 			       OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1994*4882a593Smuzhiyun 			       wrb, mem);
1995*4882a593Smuzhiyun 
1996*4882a593Smuzhiyun 	req->if_id = cpu_to_le32(adapter->if_handle);
1997*4882a593Smuzhiyun 	req->if_flags_mask = cpu_to_le32(flags);
1998*4882a593Smuzhiyun 	req->if_flags = (value == ON) ? req->if_flags_mask : 0;
1999*4882a593Smuzhiyun 
2000*4882a593Smuzhiyun 	if (flags & BE_IF_FLAGS_MULTICAST) {
2001*4882a593Smuzhiyun 		int i;
2002*4882a593Smuzhiyun 
2003*4882a593Smuzhiyun 		/* Reset mcast promisc mode if already set by setting mask
2004*4882a593Smuzhiyun 		 * and not setting flags field
2005*4882a593Smuzhiyun 		 */
2006*4882a593Smuzhiyun 		req->if_flags_mask |=
2007*4882a593Smuzhiyun 			cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
2008*4882a593Smuzhiyun 				    be_if_cap_flags(adapter));
2009*4882a593Smuzhiyun 		req->mcast_num = cpu_to_le32(adapter->mc_count);
2010*4882a593Smuzhiyun 		for (i = 0; i < adapter->mc_count; i++)
2011*4882a593Smuzhiyun 			ether_addr_copy(req->mcast_mac[i].byte,
2012*4882a593Smuzhiyun 					adapter->mc_list[i].mac);
2013*4882a593Smuzhiyun 	}
2014*4882a593Smuzhiyun 
2015*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
2016*4882a593Smuzhiyun err:
2017*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
2018*4882a593Smuzhiyun 	return status;
2019*4882a593Smuzhiyun }
2020*4882a593Smuzhiyun 
be_cmd_rx_filter(struct be_adapter * adapter,u32 flags,u32 value)2021*4882a593Smuzhiyun int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
2022*4882a593Smuzhiyun {
2023*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
2024*4882a593Smuzhiyun 
2025*4882a593Smuzhiyun 	if ((flags & be_if_cap_flags(adapter)) != flags) {
2026*4882a593Smuzhiyun 		dev_warn(dev, "Cannot set rx filter flags 0x%x\n", flags);
2027*4882a593Smuzhiyun 		dev_warn(dev, "Interface is capable of 0x%x flags only\n",
2028*4882a593Smuzhiyun 			 be_if_cap_flags(adapter));
2029*4882a593Smuzhiyun 	}
2030*4882a593Smuzhiyun 	flags &= be_if_cap_flags(adapter);
2031*4882a593Smuzhiyun 	if (!flags)
2032*4882a593Smuzhiyun 		return -ENOTSUPP;
2033*4882a593Smuzhiyun 
2034*4882a593Smuzhiyun 	return __be_cmd_rx_filter(adapter, flags, value);
2035*4882a593Smuzhiyun }
2036*4882a593Smuzhiyun 
2037*4882a593Smuzhiyun /* Uses synchrounous mcc */
be_cmd_set_flow_control(struct be_adapter * adapter,u32 tx_fc,u32 rx_fc)2038*4882a593Smuzhiyun int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
2039*4882a593Smuzhiyun {
2040*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
2041*4882a593Smuzhiyun 	struct be_cmd_req_set_flow_control *req;
2042*4882a593Smuzhiyun 	int status;
2043*4882a593Smuzhiyun 
2044*4882a593Smuzhiyun 	if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
2045*4882a593Smuzhiyun 			    CMD_SUBSYSTEM_COMMON))
2046*4882a593Smuzhiyun 		return -EPERM;
2047*4882a593Smuzhiyun 
2048*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
2049*4882a593Smuzhiyun 
2050*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
2051*4882a593Smuzhiyun 	if (!wrb) {
2052*4882a593Smuzhiyun 		status = -EBUSY;
2053*4882a593Smuzhiyun 		goto err;
2054*4882a593Smuzhiyun 	}
2055*4882a593Smuzhiyun 	req = embedded_payload(wrb);
2056*4882a593Smuzhiyun 
2057*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2058*4882a593Smuzhiyun 			       OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
2059*4882a593Smuzhiyun 			       wrb, NULL);
2060*4882a593Smuzhiyun 
2061*4882a593Smuzhiyun 	req->hdr.version = 1;
2062*4882a593Smuzhiyun 	req->tx_flow_control = cpu_to_le16((u16)tx_fc);
2063*4882a593Smuzhiyun 	req->rx_flow_control = cpu_to_le16((u16)rx_fc);
2064*4882a593Smuzhiyun 
2065*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
2066*4882a593Smuzhiyun 
2067*4882a593Smuzhiyun err:
2068*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
2069*4882a593Smuzhiyun 
2070*4882a593Smuzhiyun 	if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
2071*4882a593Smuzhiyun 		return  -EOPNOTSUPP;
2072*4882a593Smuzhiyun 
2073*4882a593Smuzhiyun 	return status;
2074*4882a593Smuzhiyun }
2075*4882a593Smuzhiyun 
2076*4882a593Smuzhiyun /* Uses sycn mcc */
be_cmd_get_flow_control(struct be_adapter * adapter,u32 * tx_fc,u32 * rx_fc)2077*4882a593Smuzhiyun int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
2078*4882a593Smuzhiyun {
2079*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
2080*4882a593Smuzhiyun 	struct be_cmd_req_get_flow_control *req;
2081*4882a593Smuzhiyun 	int status;
2082*4882a593Smuzhiyun 
2083*4882a593Smuzhiyun 	if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
2084*4882a593Smuzhiyun 			    CMD_SUBSYSTEM_COMMON))
2085*4882a593Smuzhiyun 		return -EPERM;
2086*4882a593Smuzhiyun 
2087*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
2088*4882a593Smuzhiyun 
2089*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
2090*4882a593Smuzhiyun 	if (!wrb) {
2091*4882a593Smuzhiyun 		status = -EBUSY;
2092*4882a593Smuzhiyun 		goto err;
2093*4882a593Smuzhiyun 	}
2094*4882a593Smuzhiyun 	req = embedded_payload(wrb);
2095*4882a593Smuzhiyun 
2096*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2097*4882a593Smuzhiyun 			       OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
2098*4882a593Smuzhiyun 			       wrb, NULL);
2099*4882a593Smuzhiyun 
2100*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
2101*4882a593Smuzhiyun 	if (!status) {
2102*4882a593Smuzhiyun 		struct be_cmd_resp_get_flow_control *resp =
2103*4882a593Smuzhiyun 						embedded_payload(wrb);
2104*4882a593Smuzhiyun 
2105*4882a593Smuzhiyun 		*tx_fc = le16_to_cpu(resp->tx_flow_control);
2106*4882a593Smuzhiyun 		*rx_fc = le16_to_cpu(resp->rx_flow_control);
2107*4882a593Smuzhiyun 	}
2108*4882a593Smuzhiyun 
2109*4882a593Smuzhiyun err:
2110*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
2111*4882a593Smuzhiyun 	return status;
2112*4882a593Smuzhiyun }
2113*4882a593Smuzhiyun 
2114*4882a593Smuzhiyun /* Uses mbox */
be_cmd_query_fw_cfg(struct be_adapter * adapter)2115*4882a593Smuzhiyun int be_cmd_query_fw_cfg(struct be_adapter *adapter)
2116*4882a593Smuzhiyun {
2117*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
2118*4882a593Smuzhiyun 	struct be_cmd_req_query_fw_cfg *req;
2119*4882a593Smuzhiyun 	int status;
2120*4882a593Smuzhiyun 
2121*4882a593Smuzhiyun 	if (mutex_lock_interruptible(&adapter->mbox_lock))
2122*4882a593Smuzhiyun 		return -1;
2123*4882a593Smuzhiyun 
2124*4882a593Smuzhiyun 	wrb = wrb_from_mbox(adapter);
2125*4882a593Smuzhiyun 	req = embedded_payload(wrb);
2126*4882a593Smuzhiyun 
2127*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2128*4882a593Smuzhiyun 			       OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2129*4882a593Smuzhiyun 			       sizeof(*req), wrb, NULL);
2130*4882a593Smuzhiyun 
2131*4882a593Smuzhiyun 	status = be_mbox_notify_wait(adapter);
2132*4882a593Smuzhiyun 	if (!status) {
2133*4882a593Smuzhiyun 		struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
2134*4882a593Smuzhiyun 
2135*4882a593Smuzhiyun 		adapter->port_num = le32_to_cpu(resp->phys_port);
2136*4882a593Smuzhiyun 		adapter->function_mode = le32_to_cpu(resp->function_mode);
2137*4882a593Smuzhiyun 		adapter->function_caps = le32_to_cpu(resp->function_caps);
2138*4882a593Smuzhiyun 		adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
2139*4882a593Smuzhiyun 		dev_info(&adapter->pdev->dev,
2140*4882a593Smuzhiyun 			 "FW config: function_mode=0x%x, function_caps=0x%x\n",
2141*4882a593Smuzhiyun 			 adapter->function_mode, adapter->function_caps);
2142*4882a593Smuzhiyun 	}
2143*4882a593Smuzhiyun 
2144*4882a593Smuzhiyun 	mutex_unlock(&adapter->mbox_lock);
2145*4882a593Smuzhiyun 	return status;
2146*4882a593Smuzhiyun }
2147*4882a593Smuzhiyun 
2148*4882a593Smuzhiyun /* Uses mbox */
be_cmd_reset_function(struct be_adapter * adapter)2149*4882a593Smuzhiyun int be_cmd_reset_function(struct be_adapter *adapter)
2150*4882a593Smuzhiyun {
2151*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
2152*4882a593Smuzhiyun 	struct be_cmd_req_hdr *req;
2153*4882a593Smuzhiyun 	int status;
2154*4882a593Smuzhiyun 
2155*4882a593Smuzhiyun 	if (lancer_chip(adapter)) {
2156*4882a593Smuzhiyun 		iowrite32(SLI_PORT_CONTROL_IP_MASK,
2157*4882a593Smuzhiyun 			  adapter->db + SLIPORT_CONTROL_OFFSET);
2158*4882a593Smuzhiyun 		status = lancer_wait_ready(adapter);
2159*4882a593Smuzhiyun 		if (status)
2160*4882a593Smuzhiyun 			dev_err(&adapter->pdev->dev,
2161*4882a593Smuzhiyun 				"Adapter in non recoverable error\n");
2162*4882a593Smuzhiyun 		return status;
2163*4882a593Smuzhiyun 	}
2164*4882a593Smuzhiyun 
2165*4882a593Smuzhiyun 	if (mutex_lock_interruptible(&adapter->mbox_lock))
2166*4882a593Smuzhiyun 		return -1;
2167*4882a593Smuzhiyun 
2168*4882a593Smuzhiyun 	wrb = wrb_from_mbox(adapter);
2169*4882a593Smuzhiyun 	req = embedded_payload(wrb);
2170*4882a593Smuzhiyun 
2171*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
2172*4882a593Smuzhiyun 			       OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
2173*4882a593Smuzhiyun 			       NULL);
2174*4882a593Smuzhiyun 
2175*4882a593Smuzhiyun 	status = be_mbox_notify_wait(adapter);
2176*4882a593Smuzhiyun 
2177*4882a593Smuzhiyun 	mutex_unlock(&adapter->mbox_lock);
2178*4882a593Smuzhiyun 	return status;
2179*4882a593Smuzhiyun }
2180*4882a593Smuzhiyun 
be_cmd_rss_config(struct be_adapter * adapter,u8 * rsstable,u32 rss_hash_opts,u16 table_size,const u8 * rss_hkey)2181*4882a593Smuzhiyun int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
2182*4882a593Smuzhiyun 		      u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey)
2183*4882a593Smuzhiyun {
2184*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
2185*4882a593Smuzhiyun 	struct be_cmd_req_rss_config *req;
2186*4882a593Smuzhiyun 	int status;
2187*4882a593Smuzhiyun 
2188*4882a593Smuzhiyun 	if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
2189*4882a593Smuzhiyun 		return 0;
2190*4882a593Smuzhiyun 
2191*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
2192*4882a593Smuzhiyun 
2193*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
2194*4882a593Smuzhiyun 	if (!wrb) {
2195*4882a593Smuzhiyun 		status = -EBUSY;
2196*4882a593Smuzhiyun 		goto err;
2197*4882a593Smuzhiyun 	}
2198*4882a593Smuzhiyun 	req = embedded_payload(wrb);
2199*4882a593Smuzhiyun 
2200*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2201*4882a593Smuzhiyun 			       OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
2202*4882a593Smuzhiyun 
2203*4882a593Smuzhiyun 	req->if_id = cpu_to_le32(adapter->if_handle);
2204*4882a593Smuzhiyun 	req->enable_rss = cpu_to_le16(rss_hash_opts);
2205*4882a593Smuzhiyun 	req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
2206*4882a593Smuzhiyun 
2207*4882a593Smuzhiyun 	if (!BEx_chip(adapter))
2208*4882a593Smuzhiyun 		req->hdr.version = 1;
2209*4882a593Smuzhiyun 
2210*4882a593Smuzhiyun 	memcpy(req->cpu_table, rsstable, table_size);
2211*4882a593Smuzhiyun 	memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
2212*4882a593Smuzhiyun 	be_dws_cpu_to_le(req->hash, sizeof(req->hash));
2213*4882a593Smuzhiyun 
2214*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
2215*4882a593Smuzhiyun err:
2216*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
2217*4882a593Smuzhiyun 	return status;
2218*4882a593Smuzhiyun }
2219*4882a593Smuzhiyun 
2220*4882a593Smuzhiyun /* Uses sync mcc */
be_cmd_set_beacon_state(struct be_adapter * adapter,u8 port_num,u8 bcn,u8 sts,u8 state)2221*4882a593Smuzhiyun int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
2222*4882a593Smuzhiyun 			    u8 bcn, u8 sts, u8 state)
2223*4882a593Smuzhiyun {
2224*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
2225*4882a593Smuzhiyun 	struct be_cmd_req_enable_disable_beacon *req;
2226*4882a593Smuzhiyun 	int status;
2227*4882a593Smuzhiyun 
2228*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
2229*4882a593Smuzhiyun 
2230*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
2231*4882a593Smuzhiyun 	if (!wrb) {
2232*4882a593Smuzhiyun 		status = -EBUSY;
2233*4882a593Smuzhiyun 		goto err;
2234*4882a593Smuzhiyun 	}
2235*4882a593Smuzhiyun 	req = embedded_payload(wrb);
2236*4882a593Smuzhiyun 
2237*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2238*4882a593Smuzhiyun 			       OPCODE_COMMON_ENABLE_DISABLE_BEACON,
2239*4882a593Smuzhiyun 			       sizeof(*req), wrb, NULL);
2240*4882a593Smuzhiyun 
2241*4882a593Smuzhiyun 	req->port_num = port_num;
2242*4882a593Smuzhiyun 	req->beacon_state = state;
2243*4882a593Smuzhiyun 	req->beacon_duration = bcn;
2244*4882a593Smuzhiyun 	req->status_duration = sts;
2245*4882a593Smuzhiyun 
2246*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
2247*4882a593Smuzhiyun 
2248*4882a593Smuzhiyun err:
2249*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
2250*4882a593Smuzhiyun 	return status;
2251*4882a593Smuzhiyun }
2252*4882a593Smuzhiyun 
2253*4882a593Smuzhiyun /* Uses sync mcc */
be_cmd_get_beacon_state(struct be_adapter * adapter,u8 port_num,u32 * state)2254*4882a593Smuzhiyun int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2255*4882a593Smuzhiyun {
2256*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
2257*4882a593Smuzhiyun 	struct be_cmd_req_get_beacon_state *req;
2258*4882a593Smuzhiyun 	int status;
2259*4882a593Smuzhiyun 
2260*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
2261*4882a593Smuzhiyun 
2262*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
2263*4882a593Smuzhiyun 	if (!wrb) {
2264*4882a593Smuzhiyun 		status = -EBUSY;
2265*4882a593Smuzhiyun 		goto err;
2266*4882a593Smuzhiyun 	}
2267*4882a593Smuzhiyun 	req = embedded_payload(wrb);
2268*4882a593Smuzhiyun 
2269*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2270*4882a593Smuzhiyun 			       OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
2271*4882a593Smuzhiyun 			       wrb, NULL);
2272*4882a593Smuzhiyun 
2273*4882a593Smuzhiyun 	req->port_num = port_num;
2274*4882a593Smuzhiyun 
2275*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
2276*4882a593Smuzhiyun 	if (!status) {
2277*4882a593Smuzhiyun 		struct be_cmd_resp_get_beacon_state *resp =
2278*4882a593Smuzhiyun 						embedded_payload(wrb);
2279*4882a593Smuzhiyun 
2280*4882a593Smuzhiyun 		*state = resp->beacon_state;
2281*4882a593Smuzhiyun 	}
2282*4882a593Smuzhiyun 
2283*4882a593Smuzhiyun err:
2284*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
2285*4882a593Smuzhiyun 	return status;
2286*4882a593Smuzhiyun }
2287*4882a593Smuzhiyun 
2288*4882a593Smuzhiyun /* Uses sync mcc */
be_cmd_read_port_transceiver_data(struct be_adapter * adapter,u8 page_num,u32 off,u32 len,u8 * data)2289*4882a593Smuzhiyun int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
2290*4882a593Smuzhiyun 				      u8 page_num, u32 off, u32 len, u8 *data)
2291*4882a593Smuzhiyun {
2292*4882a593Smuzhiyun 	struct be_dma_mem cmd;
2293*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
2294*4882a593Smuzhiyun 	struct be_cmd_req_port_type *req;
2295*4882a593Smuzhiyun 	int status;
2296*4882a593Smuzhiyun 
2297*4882a593Smuzhiyun 	if (page_num > TR_PAGE_A2)
2298*4882a593Smuzhiyun 		return -EINVAL;
2299*4882a593Smuzhiyun 
2300*4882a593Smuzhiyun 	cmd.size = sizeof(struct be_cmd_resp_port_type);
2301*4882a593Smuzhiyun 	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2302*4882a593Smuzhiyun 				    GFP_ATOMIC);
2303*4882a593Smuzhiyun 	if (!cmd.va) {
2304*4882a593Smuzhiyun 		dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
2305*4882a593Smuzhiyun 		return -ENOMEM;
2306*4882a593Smuzhiyun 	}
2307*4882a593Smuzhiyun 
2308*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
2309*4882a593Smuzhiyun 
2310*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
2311*4882a593Smuzhiyun 	if (!wrb) {
2312*4882a593Smuzhiyun 		status = -EBUSY;
2313*4882a593Smuzhiyun 		goto err;
2314*4882a593Smuzhiyun 	}
2315*4882a593Smuzhiyun 	req = cmd.va;
2316*4882a593Smuzhiyun 
2317*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2318*4882a593Smuzhiyun 			       OPCODE_COMMON_READ_TRANSRECV_DATA,
2319*4882a593Smuzhiyun 			       cmd.size, wrb, &cmd);
2320*4882a593Smuzhiyun 
2321*4882a593Smuzhiyun 	req->port = cpu_to_le32(adapter->hba_port_num);
2322*4882a593Smuzhiyun 	req->page_num = cpu_to_le32(page_num);
2323*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
2324*4882a593Smuzhiyun 	if (!status && len > 0) {
2325*4882a593Smuzhiyun 		struct be_cmd_resp_port_type *resp = cmd.va;
2326*4882a593Smuzhiyun 
2327*4882a593Smuzhiyun 		memcpy(data, resp->page_data + off, len);
2328*4882a593Smuzhiyun 	}
2329*4882a593Smuzhiyun err:
2330*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
2331*4882a593Smuzhiyun 	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2332*4882a593Smuzhiyun 	return status;
2333*4882a593Smuzhiyun }
2334*4882a593Smuzhiyun 
lancer_cmd_write_object(struct be_adapter * adapter,struct be_dma_mem * cmd,u32 data_size,u32 data_offset,const char * obj_name,u32 * data_written,u8 * change_status,u8 * addn_status)2335*4882a593Smuzhiyun static int lancer_cmd_write_object(struct be_adapter *adapter,
2336*4882a593Smuzhiyun 				   struct be_dma_mem *cmd, u32 data_size,
2337*4882a593Smuzhiyun 				   u32 data_offset, const char *obj_name,
2338*4882a593Smuzhiyun 				   u32 *data_written, u8 *change_status,
2339*4882a593Smuzhiyun 				   u8 *addn_status)
2340*4882a593Smuzhiyun {
2341*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
2342*4882a593Smuzhiyun 	struct lancer_cmd_req_write_object *req;
2343*4882a593Smuzhiyun 	struct lancer_cmd_resp_write_object *resp;
2344*4882a593Smuzhiyun 	void *ctxt = NULL;
2345*4882a593Smuzhiyun 	int status;
2346*4882a593Smuzhiyun 
2347*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
2348*4882a593Smuzhiyun 	adapter->flash_status = 0;
2349*4882a593Smuzhiyun 
2350*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
2351*4882a593Smuzhiyun 	if (!wrb) {
2352*4882a593Smuzhiyun 		status = -EBUSY;
2353*4882a593Smuzhiyun 		goto err_unlock;
2354*4882a593Smuzhiyun 	}
2355*4882a593Smuzhiyun 
2356*4882a593Smuzhiyun 	req = embedded_payload(wrb);
2357*4882a593Smuzhiyun 
2358*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2359*4882a593Smuzhiyun 			       OPCODE_COMMON_WRITE_OBJECT,
2360*4882a593Smuzhiyun 			       sizeof(struct lancer_cmd_req_write_object), wrb,
2361*4882a593Smuzhiyun 			       NULL);
2362*4882a593Smuzhiyun 
2363*4882a593Smuzhiyun 	ctxt = &req->context;
2364*4882a593Smuzhiyun 	AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2365*4882a593Smuzhiyun 		      write_length, ctxt, data_size);
2366*4882a593Smuzhiyun 
2367*4882a593Smuzhiyun 	if (data_size == 0)
2368*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2369*4882a593Smuzhiyun 			      eof, ctxt, 1);
2370*4882a593Smuzhiyun 	else
2371*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2372*4882a593Smuzhiyun 			      eof, ctxt, 0);
2373*4882a593Smuzhiyun 
2374*4882a593Smuzhiyun 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
2375*4882a593Smuzhiyun 	req->write_offset = cpu_to_le32(data_offset);
2376*4882a593Smuzhiyun 	strlcpy(req->object_name, obj_name, sizeof(req->object_name));
2377*4882a593Smuzhiyun 	req->descriptor_count = cpu_to_le32(1);
2378*4882a593Smuzhiyun 	req->buf_len = cpu_to_le32(data_size);
2379*4882a593Smuzhiyun 	req->addr_low = cpu_to_le32((cmd->dma +
2380*4882a593Smuzhiyun 				     sizeof(struct lancer_cmd_req_write_object))
2381*4882a593Smuzhiyun 				    & 0xFFFFFFFF);
2382*4882a593Smuzhiyun 	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2383*4882a593Smuzhiyun 				sizeof(struct lancer_cmd_req_write_object)));
2384*4882a593Smuzhiyun 
2385*4882a593Smuzhiyun 	status = be_mcc_notify(adapter);
2386*4882a593Smuzhiyun 	if (status)
2387*4882a593Smuzhiyun 		goto err_unlock;
2388*4882a593Smuzhiyun 
2389*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
2390*4882a593Smuzhiyun 
2391*4882a593Smuzhiyun 	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2392*4882a593Smuzhiyun 					 msecs_to_jiffies(60000)))
2393*4882a593Smuzhiyun 		status = -ETIMEDOUT;
2394*4882a593Smuzhiyun 	else
2395*4882a593Smuzhiyun 		status = adapter->flash_status;
2396*4882a593Smuzhiyun 
2397*4882a593Smuzhiyun 	resp = embedded_payload(wrb);
2398*4882a593Smuzhiyun 	if (!status) {
2399*4882a593Smuzhiyun 		*data_written = le32_to_cpu(resp->actual_write_len);
2400*4882a593Smuzhiyun 		*change_status = resp->change_status;
2401*4882a593Smuzhiyun 	} else {
2402*4882a593Smuzhiyun 		*addn_status = resp->additional_status;
2403*4882a593Smuzhiyun 	}
2404*4882a593Smuzhiyun 
2405*4882a593Smuzhiyun 	return status;
2406*4882a593Smuzhiyun 
2407*4882a593Smuzhiyun err_unlock:
2408*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
2409*4882a593Smuzhiyun 	return status;
2410*4882a593Smuzhiyun }
2411*4882a593Smuzhiyun 
be_cmd_query_cable_type(struct be_adapter * adapter)2412*4882a593Smuzhiyun int be_cmd_query_cable_type(struct be_adapter *adapter)
2413*4882a593Smuzhiyun {
2414*4882a593Smuzhiyun 	u8 page_data[PAGE_DATA_LEN];
2415*4882a593Smuzhiyun 	int status;
2416*4882a593Smuzhiyun 
2417*4882a593Smuzhiyun 	status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2418*4882a593Smuzhiyun 						   0, PAGE_DATA_LEN, page_data);
2419*4882a593Smuzhiyun 	if (!status) {
2420*4882a593Smuzhiyun 		switch (adapter->phy.interface_type) {
2421*4882a593Smuzhiyun 		case PHY_TYPE_QSFP:
2422*4882a593Smuzhiyun 			adapter->phy.cable_type =
2423*4882a593Smuzhiyun 				page_data[QSFP_PLUS_CABLE_TYPE_OFFSET];
2424*4882a593Smuzhiyun 			break;
2425*4882a593Smuzhiyun 		case PHY_TYPE_SFP_PLUS_10GB:
2426*4882a593Smuzhiyun 			adapter->phy.cable_type =
2427*4882a593Smuzhiyun 				page_data[SFP_PLUS_CABLE_TYPE_OFFSET];
2428*4882a593Smuzhiyun 			break;
2429*4882a593Smuzhiyun 		default:
2430*4882a593Smuzhiyun 			adapter->phy.cable_type = 0;
2431*4882a593Smuzhiyun 			break;
2432*4882a593Smuzhiyun 		}
2433*4882a593Smuzhiyun 	}
2434*4882a593Smuzhiyun 	return status;
2435*4882a593Smuzhiyun }
2436*4882a593Smuzhiyun 
be_cmd_query_sfp_info(struct be_adapter * adapter)2437*4882a593Smuzhiyun int be_cmd_query_sfp_info(struct be_adapter *adapter)
2438*4882a593Smuzhiyun {
2439*4882a593Smuzhiyun 	u8 page_data[PAGE_DATA_LEN];
2440*4882a593Smuzhiyun 	int status;
2441*4882a593Smuzhiyun 
2442*4882a593Smuzhiyun 	status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2443*4882a593Smuzhiyun 						   0, PAGE_DATA_LEN, page_data);
2444*4882a593Smuzhiyun 	if (!status) {
2445*4882a593Smuzhiyun 		strlcpy(adapter->phy.vendor_name, page_data +
2446*4882a593Smuzhiyun 			SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
2447*4882a593Smuzhiyun 		strlcpy(adapter->phy.vendor_pn,
2448*4882a593Smuzhiyun 			page_data + SFP_VENDOR_PN_OFFSET,
2449*4882a593Smuzhiyun 			SFP_VENDOR_NAME_LEN - 1);
2450*4882a593Smuzhiyun 	}
2451*4882a593Smuzhiyun 
2452*4882a593Smuzhiyun 	return status;
2453*4882a593Smuzhiyun }
2454*4882a593Smuzhiyun 
lancer_cmd_delete_object(struct be_adapter * adapter,const char * obj_name)2455*4882a593Smuzhiyun static int lancer_cmd_delete_object(struct be_adapter *adapter,
2456*4882a593Smuzhiyun 				    const char *obj_name)
2457*4882a593Smuzhiyun {
2458*4882a593Smuzhiyun 	struct lancer_cmd_req_delete_object *req;
2459*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
2460*4882a593Smuzhiyun 	int status;
2461*4882a593Smuzhiyun 
2462*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
2463*4882a593Smuzhiyun 
2464*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
2465*4882a593Smuzhiyun 	if (!wrb) {
2466*4882a593Smuzhiyun 		status = -EBUSY;
2467*4882a593Smuzhiyun 		goto err;
2468*4882a593Smuzhiyun 	}
2469*4882a593Smuzhiyun 
2470*4882a593Smuzhiyun 	req = embedded_payload(wrb);
2471*4882a593Smuzhiyun 
2472*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2473*4882a593Smuzhiyun 			       OPCODE_COMMON_DELETE_OBJECT,
2474*4882a593Smuzhiyun 			       sizeof(*req), wrb, NULL);
2475*4882a593Smuzhiyun 
2476*4882a593Smuzhiyun 	strlcpy(req->object_name, obj_name, sizeof(req->object_name));
2477*4882a593Smuzhiyun 
2478*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
2479*4882a593Smuzhiyun err:
2480*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
2481*4882a593Smuzhiyun 	return status;
2482*4882a593Smuzhiyun }
2483*4882a593Smuzhiyun 
lancer_cmd_read_object(struct be_adapter * adapter,struct be_dma_mem * cmd,u32 data_size,u32 data_offset,const char * obj_name,u32 * data_read,u32 * eof,u8 * addn_status)2484*4882a593Smuzhiyun int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2485*4882a593Smuzhiyun 			   u32 data_size, u32 data_offset, const char *obj_name,
2486*4882a593Smuzhiyun 			   u32 *data_read, u32 *eof, u8 *addn_status)
2487*4882a593Smuzhiyun {
2488*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
2489*4882a593Smuzhiyun 	struct lancer_cmd_req_read_object *req;
2490*4882a593Smuzhiyun 	struct lancer_cmd_resp_read_object *resp;
2491*4882a593Smuzhiyun 	int status;
2492*4882a593Smuzhiyun 
2493*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
2494*4882a593Smuzhiyun 
2495*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
2496*4882a593Smuzhiyun 	if (!wrb) {
2497*4882a593Smuzhiyun 		status = -EBUSY;
2498*4882a593Smuzhiyun 		goto err_unlock;
2499*4882a593Smuzhiyun 	}
2500*4882a593Smuzhiyun 
2501*4882a593Smuzhiyun 	req = embedded_payload(wrb);
2502*4882a593Smuzhiyun 
2503*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2504*4882a593Smuzhiyun 			       OPCODE_COMMON_READ_OBJECT,
2505*4882a593Smuzhiyun 			       sizeof(struct lancer_cmd_req_read_object), wrb,
2506*4882a593Smuzhiyun 			       NULL);
2507*4882a593Smuzhiyun 
2508*4882a593Smuzhiyun 	req->desired_read_len = cpu_to_le32(data_size);
2509*4882a593Smuzhiyun 	req->read_offset = cpu_to_le32(data_offset);
2510*4882a593Smuzhiyun 	strcpy(req->object_name, obj_name);
2511*4882a593Smuzhiyun 	req->descriptor_count = cpu_to_le32(1);
2512*4882a593Smuzhiyun 	req->buf_len = cpu_to_le32(data_size);
2513*4882a593Smuzhiyun 	req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2514*4882a593Smuzhiyun 	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2515*4882a593Smuzhiyun 
2516*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
2517*4882a593Smuzhiyun 
2518*4882a593Smuzhiyun 	resp = embedded_payload(wrb);
2519*4882a593Smuzhiyun 	if (!status) {
2520*4882a593Smuzhiyun 		*data_read = le32_to_cpu(resp->actual_read_len);
2521*4882a593Smuzhiyun 		*eof = le32_to_cpu(resp->eof);
2522*4882a593Smuzhiyun 	} else {
2523*4882a593Smuzhiyun 		*addn_status = resp->additional_status;
2524*4882a593Smuzhiyun 	}
2525*4882a593Smuzhiyun 
2526*4882a593Smuzhiyun err_unlock:
2527*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
2528*4882a593Smuzhiyun 	return status;
2529*4882a593Smuzhiyun }
2530*4882a593Smuzhiyun 
be_cmd_write_flashrom(struct be_adapter * adapter,struct be_dma_mem * cmd,u32 flash_type,u32 flash_opcode,u32 img_offset,u32 buf_size)2531*4882a593Smuzhiyun static int be_cmd_write_flashrom(struct be_adapter *adapter,
2532*4882a593Smuzhiyun 				 struct be_dma_mem *cmd, u32 flash_type,
2533*4882a593Smuzhiyun 				 u32 flash_opcode, u32 img_offset, u32 buf_size)
2534*4882a593Smuzhiyun {
2535*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
2536*4882a593Smuzhiyun 	struct be_cmd_write_flashrom *req;
2537*4882a593Smuzhiyun 	int status;
2538*4882a593Smuzhiyun 
2539*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
2540*4882a593Smuzhiyun 	adapter->flash_status = 0;
2541*4882a593Smuzhiyun 
2542*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
2543*4882a593Smuzhiyun 	if (!wrb) {
2544*4882a593Smuzhiyun 		status = -EBUSY;
2545*4882a593Smuzhiyun 		goto err_unlock;
2546*4882a593Smuzhiyun 	}
2547*4882a593Smuzhiyun 	req = cmd->va;
2548*4882a593Smuzhiyun 
2549*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2550*4882a593Smuzhiyun 			       OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
2551*4882a593Smuzhiyun 			       cmd);
2552*4882a593Smuzhiyun 
2553*4882a593Smuzhiyun 	req->params.op_type = cpu_to_le32(flash_type);
2554*4882a593Smuzhiyun 	if (flash_type == OPTYPE_OFFSET_SPECIFIED)
2555*4882a593Smuzhiyun 		req->params.offset = cpu_to_le32(img_offset);
2556*4882a593Smuzhiyun 
2557*4882a593Smuzhiyun 	req->params.op_code = cpu_to_le32(flash_opcode);
2558*4882a593Smuzhiyun 	req->params.data_buf_size = cpu_to_le32(buf_size);
2559*4882a593Smuzhiyun 
2560*4882a593Smuzhiyun 	status = be_mcc_notify(adapter);
2561*4882a593Smuzhiyun 	if (status)
2562*4882a593Smuzhiyun 		goto err_unlock;
2563*4882a593Smuzhiyun 
2564*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
2565*4882a593Smuzhiyun 
2566*4882a593Smuzhiyun 	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2567*4882a593Smuzhiyun 					 msecs_to_jiffies(40000)))
2568*4882a593Smuzhiyun 		status = -ETIMEDOUT;
2569*4882a593Smuzhiyun 	else
2570*4882a593Smuzhiyun 		status = adapter->flash_status;
2571*4882a593Smuzhiyun 
2572*4882a593Smuzhiyun 	return status;
2573*4882a593Smuzhiyun 
2574*4882a593Smuzhiyun err_unlock:
2575*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
2576*4882a593Smuzhiyun 	return status;
2577*4882a593Smuzhiyun }
2578*4882a593Smuzhiyun 
be_cmd_get_flash_crc(struct be_adapter * adapter,u8 * flashed_crc,u16 img_optype,u32 img_offset,u32 crc_offset)2579*4882a593Smuzhiyun static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2580*4882a593Smuzhiyun 				u16 img_optype, u32 img_offset, u32 crc_offset)
2581*4882a593Smuzhiyun {
2582*4882a593Smuzhiyun 	struct be_cmd_read_flash_crc *req;
2583*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
2584*4882a593Smuzhiyun 	int status;
2585*4882a593Smuzhiyun 
2586*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
2587*4882a593Smuzhiyun 
2588*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
2589*4882a593Smuzhiyun 	if (!wrb) {
2590*4882a593Smuzhiyun 		status = -EBUSY;
2591*4882a593Smuzhiyun 		goto err;
2592*4882a593Smuzhiyun 	}
2593*4882a593Smuzhiyun 	req = embedded_payload(wrb);
2594*4882a593Smuzhiyun 
2595*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2596*4882a593Smuzhiyun 			       OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2597*4882a593Smuzhiyun 			       wrb, NULL);
2598*4882a593Smuzhiyun 
2599*4882a593Smuzhiyun 	req->params.op_type = cpu_to_le32(img_optype);
2600*4882a593Smuzhiyun 	if (img_optype == OPTYPE_OFFSET_SPECIFIED)
2601*4882a593Smuzhiyun 		req->params.offset = cpu_to_le32(img_offset + crc_offset);
2602*4882a593Smuzhiyun 	else
2603*4882a593Smuzhiyun 		req->params.offset = cpu_to_le32(crc_offset);
2604*4882a593Smuzhiyun 
2605*4882a593Smuzhiyun 	req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2606*4882a593Smuzhiyun 	req->params.data_buf_size = cpu_to_le32(0x4);
2607*4882a593Smuzhiyun 
2608*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
2609*4882a593Smuzhiyun 	if (!status)
2610*4882a593Smuzhiyun 		memcpy(flashed_crc, req->crc, 4);
2611*4882a593Smuzhiyun 
2612*4882a593Smuzhiyun err:
2613*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
2614*4882a593Smuzhiyun 	return status;
2615*4882a593Smuzhiyun }
2616*4882a593Smuzhiyun 
2617*4882a593Smuzhiyun static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2618*4882a593Smuzhiyun 
phy_flashing_required(struct be_adapter * adapter)2619*4882a593Smuzhiyun static bool phy_flashing_required(struct be_adapter *adapter)
2620*4882a593Smuzhiyun {
2621*4882a593Smuzhiyun 	return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
2622*4882a593Smuzhiyun 		adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
2623*4882a593Smuzhiyun }
2624*4882a593Smuzhiyun 
is_comp_in_ufi(struct be_adapter * adapter,struct flash_section_info * fsec,int type)2625*4882a593Smuzhiyun static bool is_comp_in_ufi(struct be_adapter *adapter,
2626*4882a593Smuzhiyun 			   struct flash_section_info *fsec, int type)
2627*4882a593Smuzhiyun {
2628*4882a593Smuzhiyun 	int i = 0, img_type = 0;
2629*4882a593Smuzhiyun 	struct flash_section_info_g2 *fsec_g2 = NULL;
2630*4882a593Smuzhiyun 
2631*4882a593Smuzhiyun 	if (BE2_chip(adapter))
2632*4882a593Smuzhiyun 		fsec_g2 = (struct flash_section_info_g2 *)fsec;
2633*4882a593Smuzhiyun 
2634*4882a593Smuzhiyun 	for (i = 0; i < MAX_FLASH_COMP; i++) {
2635*4882a593Smuzhiyun 		if (fsec_g2)
2636*4882a593Smuzhiyun 			img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2637*4882a593Smuzhiyun 		else
2638*4882a593Smuzhiyun 			img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2639*4882a593Smuzhiyun 
2640*4882a593Smuzhiyun 		if (img_type == type)
2641*4882a593Smuzhiyun 			return true;
2642*4882a593Smuzhiyun 	}
2643*4882a593Smuzhiyun 	return false;
2644*4882a593Smuzhiyun }
2645*4882a593Smuzhiyun 
get_fsec_info(struct be_adapter * adapter,int header_size,const struct firmware * fw)2646*4882a593Smuzhiyun static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2647*4882a593Smuzhiyun 						int header_size,
2648*4882a593Smuzhiyun 						const struct firmware *fw)
2649*4882a593Smuzhiyun {
2650*4882a593Smuzhiyun 	struct flash_section_info *fsec = NULL;
2651*4882a593Smuzhiyun 	const u8 *p = fw->data;
2652*4882a593Smuzhiyun 
2653*4882a593Smuzhiyun 	p += header_size;
2654*4882a593Smuzhiyun 	while (p < (fw->data + fw->size)) {
2655*4882a593Smuzhiyun 		fsec = (struct flash_section_info *)p;
2656*4882a593Smuzhiyun 		if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2657*4882a593Smuzhiyun 			return fsec;
2658*4882a593Smuzhiyun 		p += 32;
2659*4882a593Smuzhiyun 	}
2660*4882a593Smuzhiyun 	return NULL;
2661*4882a593Smuzhiyun }
2662*4882a593Smuzhiyun 
be_check_flash_crc(struct be_adapter * adapter,const u8 * p,u32 img_offset,u32 img_size,int hdr_size,u16 img_optype,bool * crc_match)2663*4882a593Smuzhiyun static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
2664*4882a593Smuzhiyun 			      u32 img_offset, u32 img_size, int hdr_size,
2665*4882a593Smuzhiyun 			      u16 img_optype, bool *crc_match)
2666*4882a593Smuzhiyun {
2667*4882a593Smuzhiyun 	u32 crc_offset;
2668*4882a593Smuzhiyun 	int status;
2669*4882a593Smuzhiyun 	u8 crc[4];
2670*4882a593Smuzhiyun 
2671*4882a593Smuzhiyun 	status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
2672*4882a593Smuzhiyun 				      img_size - 4);
2673*4882a593Smuzhiyun 	if (status)
2674*4882a593Smuzhiyun 		return status;
2675*4882a593Smuzhiyun 
2676*4882a593Smuzhiyun 	crc_offset = hdr_size + img_offset + img_size - 4;
2677*4882a593Smuzhiyun 
2678*4882a593Smuzhiyun 	/* Skip flashing, if crc of flashed region matches */
2679*4882a593Smuzhiyun 	if (!memcmp(crc, p + crc_offset, 4))
2680*4882a593Smuzhiyun 		*crc_match = true;
2681*4882a593Smuzhiyun 	else
2682*4882a593Smuzhiyun 		*crc_match = false;
2683*4882a593Smuzhiyun 
2684*4882a593Smuzhiyun 	return status;
2685*4882a593Smuzhiyun }
2686*4882a593Smuzhiyun 
be_flash(struct be_adapter * adapter,const u8 * img,struct be_dma_mem * flash_cmd,int optype,int img_size,u32 img_offset)2687*4882a593Smuzhiyun static int be_flash(struct be_adapter *adapter, const u8 *img,
2688*4882a593Smuzhiyun 		    struct be_dma_mem *flash_cmd, int optype, int img_size,
2689*4882a593Smuzhiyun 		    u32 img_offset)
2690*4882a593Smuzhiyun {
2691*4882a593Smuzhiyun 	u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
2692*4882a593Smuzhiyun 	struct be_cmd_write_flashrom *req = flash_cmd->va;
2693*4882a593Smuzhiyun 	int status;
2694*4882a593Smuzhiyun 
2695*4882a593Smuzhiyun 	while (total_bytes) {
2696*4882a593Smuzhiyun 		num_bytes = min_t(u32, 32 * 1024, total_bytes);
2697*4882a593Smuzhiyun 
2698*4882a593Smuzhiyun 		total_bytes -= num_bytes;
2699*4882a593Smuzhiyun 
2700*4882a593Smuzhiyun 		if (!total_bytes) {
2701*4882a593Smuzhiyun 			if (optype == OPTYPE_PHY_FW)
2702*4882a593Smuzhiyun 				flash_op = FLASHROM_OPER_PHY_FLASH;
2703*4882a593Smuzhiyun 			else
2704*4882a593Smuzhiyun 				flash_op = FLASHROM_OPER_FLASH;
2705*4882a593Smuzhiyun 		} else {
2706*4882a593Smuzhiyun 			if (optype == OPTYPE_PHY_FW)
2707*4882a593Smuzhiyun 				flash_op = FLASHROM_OPER_PHY_SAVE;
2708*4882a593Smuzhiyun 			else
2709*4882a593Smuzhiyun 				flash_op = FLASHROM_OPER_SAVE;
2710*4882a593Smuzhiyun 		}
2711*4882a593Smuzhiyun 
2712*4882a593Smuzhiyun 		memcpy(req->data_buf, img, num_bytes);
2713*4882a593Smuzhiyun 		img += num_bytes;
2714*4882a593Smuzhiyun 		status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
2715*4882a593Smuzhiyun 					       flash_op, img_offset +
2716*4882a593Smuzhiyun 					       bytes_sent, num_bytes);
2717*4882a593Smuzhiyun 		if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
2718*4882a593Smuzhiyun 		    optype == OPTYPE_PHY_FW)
2719*4882a593Smuzhiyun 			break;
2720*4882a593Smuzhiyun 		else if (status)
2721*4882a593Smuzhiyun 			return status;
2722*4882a593Smuzhiyun 
2723*4882a593Smuzhiyun 		bytes_sent += num_bytes;
2724*4882a593Smuzhiyun 	}
2725*4882a593Smuzhiyun 	return 0;
2726*4882a593Smuzhiyun }
2727*4882a593Smuzhiyun 
2728*4882a593Smuzhiyun #define NCSI_UPDATE_LOG	"NCSI section update is not supported in FW ver %s\n"
be_fw_ncsi_supported(char * ver)2729*4882a593Smuzhiyun static bool be_fw_ncsi_supported(char *ver)
2730*4882a593Smuzhiyun {
2731*4882a593Smuzhiyun 	int v1[4] = {3, 102, 148, 0}; /* Min ver that supports NCSI FW */
2732*4882a593Smuzhiyun 	int v2[4];
2733*4882a593Smuzhiyun 	int i;
2734*4882a593Smuzhiyun 
2735*4882a593Smuzhiyun 	if (sscanf(ver, "%d.%d.%d.%d", &v2[0], &v2[1], &v2[2], &v2[3]) != 4)
2736*4882a593Smuzhiyun 		return false;
2737*4882a593Smuzhiyun 
2738*4882a593Smuzhiyun 	for (i = 0; i < 4; i++) {
2739*4882a593Smuzhiyun 		if (v1[i] < v2[i])
2740*4882a593Smuzhiyun 			return true;
2741*4882a593Smuzhiyun 		else if (v1[i] > v2[i])
2742*4882a593Smuzhiyun 			return false;
2743*4882a593Smuzhiyun 	}
2744*4882a593Smuzhiyun 
2745*4882a593Smuzhiyun 	return true;
2746*4882a593Smuzhiyun }
2747*4882a593Smuzhiyun 
2748*4882a593Smuzhiyun /* For BE2, BE3 and BE3-R */
be_flash_BEx(struct be_adapter * adapter,const struct firmware * fw,struct be_dma_mem * flash_cmd,int num_of_images)2749*4882a593Smuzhiyun static int be_flash_BEx(struct be_adapter *adapter,
2750*4882a593Smuzhiyun 			const struct firmware *fw,
2751*4882a593Smuzhiyun 			struct be_dma_mem *flash_cmd, int num_of_images)
2752*4882a593Smuzhiyun {
2753*4882a593Smuzhiyun 	int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
2754*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
2755*4882a593Smuzhiyun 	struct flash_section_info *fsec = NULL;
2756*4882a593Smuzhiyun 	int status, i, filehdr_size, num_comp;
2757*4882a593Smuzhiyun 	const struct flash_comp *pflashcomp;
2758*4882a593Smuzhiyun 	bool crc_match;
2759*4882a593Smuzhiyun 	const u8 *p;
2760*4882a593Smuzhiyun 
2761*4882a593Smuzhiyun 	static const struct flash_comp gen3_flash_types[] = {
2762*4882a593Smuzhiyun 		{ BE3_ISCSI_PRIMARY_IMAGE_START, OPTYPE_ISCSI_ACTIVE,
2763*4882a593Smuzhiyun 			BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_ISCSI},
2764*4882a593Smuzhiyun 		{ BE3_REDBOOT_START, OPTYPE_REDBOOT,
2765*4882a593Smuzhiyun 			BE3_REDBOOT_COMP_MAX_SIZE, IMAGE_BOOT_CODE},
2766*4882a593Smuzhiyun 		{ BE3_ISCSI_BIOS_START, OPTYPE_BIOS,
2767*4882a593Smuzhiyun 			BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_ISCSI},
2768*4882a593Smuzhiyun 		{ BE3_PXE_BIOS_START, OPTYPE_PXE_BIOS,
2769*4882a593Smuzhiyun 			BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_PXE},
2770*4882a593Smuzhiyun 		{ BE3_FCOE_BIOS_START, OPTYPE_FCOE_BIOS,
2771*4882a593Smuzhiyun 			BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_FCOE},
2772*4882a593Smuzhiyun 		{ BE3_ISCSI_BACKUP_IMAGE_START, OPTYPE_ISCSI_BACKUP,
2773*4882a593Smuzhiyun 			BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_ISCSI},
2774*4882a593Smuzhiyun 		{ BE3_FCOE_PRIMARY_IMAGE_START, OPTYPE_FCOE_FW_ACTIVE,
2775*4882a593Smuzhiyun 			BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_FCOE},
2776*4882a593Smuzhiyun 		{ BE3_FCOE_BACKUP_IMAGE_START, OPTYPE_FCOE_FW_BACKUP,
2777*4882a593Smuzhiyun 			BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_FCOE},
2778*4882a593Smuzhiyun 		{ BE3_NCSI_START, OPTYPE_NCSI_FW,
2779*4882a593Smuzhiyun 			BE3_NCSI_COMP_MAX_SIZE, IMAGE_NCSI},
2780*4882a593Smuzhiyun 		{ BE3_PHY_FW_START, OPTYPE_PHY_FW,
2781*4882a593Smuzhiyun 			BE3_PHY_FW_COMP_MAX_SIZE, IMAGE_FIRMWARE_PHY}
2782*4882a593Smuzhiyun 	};
2783*4882a593Smuzhiyun 
2784*4882a593Smuzhiyun 	static const struct flash_comp gen2_flash_types[] = {
2785*4882a593Smuzhiyun 		{ BE2_ISCSI_PRIMARY_IMAGE_START, OPTYPE_ISCSI_ACTIVE,
2786*4882a593Smuzhiyun 			BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_ISCSI},
2787*4882a593Smuzhiyun 		{ BE2_REDBOOT_START, OPTYPE_REDBOOT,
2788*4882a593Smuzhiyun 			BE2_REDBOOT_COMP_MAX_SIZE, IMAGE_BOOT_CODE},
2789*4882a593Smuzhiyun 		{ BE2_ISCSI_BIOS_START, OPTYPE_BIOS,
2790*4882a593Smuzhiyun 			BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_ISCSI},
2791*4882a593Smuzhiyun 		{ BE2_PXE_BIOS_START, OPTYPE_PXE_BIOS,
2792*4882a593Smuzhiyun 			BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_PXE},
2793*4882a593Smuzhiyun 		{ BE2_FCOE_BIOS_START, OPTYPE_FCOE_BIOS,
2794*4882a593Smuzhiyun 			BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_FCOE},
2795*4882a593Smuzhiyun 		{ BE2_ISCSI_BACKUP_IMAGE_START, OPTYPE_ISCSI_BACKUP,
2796*4882a593Smuzhiyun 			BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_ISCSI},
2797*4882a593Smuzhiyun 		{ BE2_FCOE_PRIMARY_IMAGE_START, OPTYPE_FCOE_FW_ACTIVE,
2798*4882a593Smuzhiyun 			BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_FCOE},
2799*4882a593Smuzhiyun 		{ BE2_FCOE_BACKUP_IMAGE_START, OPTYPE_FCOE_FW_BACKUP,
2800*4882a593Smuzhiyun 			 BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_FCOE}
2801*4882a593Smuzhiyun 	};
2802*4882a593Smuzhiyun 
2803*4882a593Smuzhiyun 	if (BE3_chip(adapter)) {
2804*4882a593Smuzhiyun 		pflashcomp = gen3_flash_types;
2805*4882a593Smuzhiyun 		filehdr_size = sizeof(struct flash_file_hdr_g3);
2806*4882a593Smuzhiyun 		num_comp = ARRAY_SIZE(gen3_flash_types);
2807*4882a593Smuzhiyun 	} else {
2808*4882a593Smuzhiyun 		pflashcomp = gen2_flash_types;
2809*4882a593Smuzhiyun 		filehdr_size = sizeof(struct flash_file_hdr_g2);
2810*4882a593Smuzhiyun 		num_comp = ARRAY_SIZE(gen2_flash_types);
2811*4882a593Smuzhiyun 		img_hdrs_size = 0;
2812*4882a593Smuzhiyun 	}
2813*4882a593Smuzhiyun 
2814*4882a593Smuzhiyun 	/* Get flash section info*/
2815*4882a593Smuzhiyun 	fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2816*4882a593Smuzhiyun 	if (!fsec) {
2817*4882a593Smuzhiyun 		dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
2818*4882a593Smuzhiyun 		return -1;
2819*4882a593Smuzhiyun 	}
2820*4882a593Smuzhiyun 	for (i = 0; i < num_comp; i++) {
2821*4882a593Smuzhiyun 		if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
2822*4882a593Smuzhiyun 			continue;
2823*4882a593Smuzhiyun 
2824*4882a593Smuzhiyun 		if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
2825*4882a593Smuzhiyun 		    !be_fw_ncsi_supported(adapter->fw_ver)) {
2826*4882a593Smuzhiyun 			dev_info(dev, NCSI_UPDATE_LOG, adapter->fw_ver);
2827*4882a593Smuzhiyun 			continue;
2828*4882a593Smuzhiyun 		}
2829*4882a593Smuzhiyun 
2830*4882a593Smuzhiyun 		if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
2831*4882a593Smuzhiyun 		    !phy_flashing_required(adapter))
2832*4882a593Smuzhiyun 			continue;
2833*4882a593Smuzhiyun 
2834*4882a593Smuzhiyun 		if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
2835*4882a593Smuzhiyun 			status = be_check_flash_crc(adapter, fw->data,
2836*4882a593Smuzhiyun 						    pflashcomp[i].offset,
2837*4882a593Smuzhiyun 						    pflashcomp[i].size,
2838*4882a593Smuzhiyun 						    filehdr_size +
2839*4882a593Smuzhiyun 						    img_hdrs_size,
2840*4882a593Smuzhiyun 						    OPTYPE_REDBOOT, &crc_match);
2841*4882a593Smuzhiyun 			if (status) {
2842*4882a593Smuzhiyun 				dev_err(dev,
2843*4882a593Smuzhiyun 					"Could not get CRC for 0x%x region\n",
2844*4882a593Smuzhiyun 					pflashcomp[i].optype);
2845*4882a593Smuzhiyun 				continue;
2846*4882a593Smuzhiyun 			}
2847*4882a593Smuzhiyun 
2848*4882a593Smuzhiyun 			if (crc_match)
2849*4882a593Smuzhiyun 				continue;
2850*4882a593Smuzhiyun 		}
2851*4882a593Smuzhiyun 
2852*4882a593Smuzhiyun 		p = fw->data + filehdr_size + pflashcomp[i].offset +
2853*4882a593Smuzhiyun 			img_hdrs_size;
2854*4882a593Smuzhiyun 		if (p + pflashcomp[i].size > fw->data + fw->size)
2855*4882a593Smuzhiyun 			return -1;
2856*4882a593Smuzhiyun 
2857*4882a593Smuzhiyun 		status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
2858*4882a593Smuzhiyun 				  pflashcomp[i].size, 0);
2859*4882a593Smuzhiyun 		if (status) {
2860*4882a593Smuzhiyun 			dev_err(dev, "Flashing section type 0x%x failed\n",
2861*4882a593Smuzhiyun 				pflashcomp[i].img_type);
2862*4882a593Smuzhiyun 			return status;
2863*4882a593Smuzhiyun 		}
2864*4882a593Smuzhiyun 	}
2865*4882a593Smuzhiyun 	return 0;
2866*4882a593Smuzhiyun }
2867*4882a593Smuzhiyun 
be_get_img_optype(struct flash_section_entry fsec_entry)2868*4882a593Smuzhiyun static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
2869*4882a593Smuzhiyun {
2870*4882a593Smuzhiyun 	u32 img_type = le32_to_cpu(fsec_entry.type);
2871*4882a593Smuzhiyun 	u16 img_optype = le16_to_cpu(fsec_entry.optype);
2872*4882a593Smuzhiyun 
2873*4882a593Smuzhiyun 	if (img_optype != 0xFFFF)
2874*4882a593Smuzhiyun 		return img_optype;
2875*4882a593Smuzhiyun 
2876*4882a593Smuzhiyun 	switch (img_type) {
2877*4882a593Smuzhiyun 	case IMAGE_FIRMWARE_ISCSI:
2878*4882a593Smuzhiyun 		img_optype = OPTYPE_ISCSI_ACTIVE;
2879*4882a593Smuzhiyun 		break;
2880*4882a593Smuzhiyun 	case IMAGE_BOOT_CODE:
2881*4882a593Smuzhiyun 		img_optype = OPTYPE_REDBOOT;
2882*4882a593Smuzhiyun 		break;
2883*4882a593Smuzhiyun 	case IMAGE_OPTION_ROM_ISCSI:
2884*4882a593Smuzhiyun 		img_optype = OPTYPE_BIOS;
2885*4882a593Smuzhiyun 		break;
2886*4882a593Smuzhiyun 	case IMAGE_OPTION_ROM_PXE:
2887*4882a593Smuzhiyun 		img_optype = OPTYPE_PXE_BIOS;
2888*4882a593Smuzhiyun 		break;
2889*4882a593Smuzhiyun 	case IMAGE_OPTION_ROM_FCOE:
2890*4882a593Smuzhiyun 		img_optype = OPTYPE_FCOE_BIOS;
2891*4882a593Smuzhiyun 		break;
2892*4882a593Smuzhiyun 	case IMAGE_FIRMWARE_BACKUP_ISCSI:
2893*4882a593Smuzhiyun 		img_optype = OPTYPE_ISCSI_BACKUP;
2894*4882a593Smuzhiyun 		break;
2895*4882a593Smuzhiyun 	case IMAGE_NCSI:
2896*4882a593Smuzhiyun 		img_optype = OPTYPE_NCSI_FW;
2897*4882a593Smuzhiyun 		break;
2898*4882a593Smuzhiyun 	case IMAGE_FLASHISM_JUMPVECTOR:
2899*4882a593Smuzhiyun 		img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
2900*4882a593Smuzhiyun 		break;
2901*4882a593Smuzhiyun 	case IMAGE_FIRMWARE_PHY:
2902*4882a593Smuzhiyun 		img_optype = OPTYPE_SH_PHY_FW;
2903*4882a593Smuzhiyun 		break;
2904*4882a593Smuzhiyun 	case IMAGE_REDBOOT_DIR:
2905*4882a593Smuzhiyun 		img_optype = OPTYPE_REDBOOT_DIR;
2906*4882a593Smuzhiyun 		break;
2907*4882a593Smuzhiyun 	case IMAGE_REDBOOT_CONFIG:
2908*4882a593Smuzhiyun 		img_optype = OPTYPE_REDBOOT_CONFIG;
2909*4882a593Smuzhiyun 		break;
2910*4882a593Smuzhiyun 	case IMAGE_UFI_DIR:
2911*4882a593Smuzhiyun 		img_optype = OPTYPE_UFI_DIR;
2912*4882a593Smuzhiyun 		break;
2913*4882a593Smuzhiyun 	default:
2914*4882a593Smuzhiyun 		break;
2915*4882a593Smuzhiyun 	}
2916*4882a593Smuzhiyun 
2917*4882a593Smuzhiyun 	return img_optype;
2918*4882a593Smuzhiyun }
2919*4882a593Smuzhiyun 
be_flash_skyhawk(struct be_adapter * adapter,const struct firmware * fw,struct be_dma_mem * flash_cmd,int num_of_images)2920*4882a593Smuzhiyun static int be_flash_skyhawk(struct be_adapter *adapter,
2921*4882a593Smuzhiyun 			    const struct firmware *fw,
2922*4882a593Smuzhiyun 			    struct be_dma_mem *flash_cmd, int num_of_images)
2923*4882a593Smuzhiyun {
2924*4882a593Smuzhiyun 	int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
2925*4882a593Smuzhiyun 	bool crc_match, old_fw_img, flash_offset_support = true;
2926*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
2927*4882a593Smuzhiyun 	struct flash_section_info *fsec = NULL;
2928*4882a593Smuzhiyun 	u32 img_offset, img_size, img_type;
2929*4882a593Smuzhiyun 	u16 img_optype, flash_optype;
2930*4882a593Smuzhiyun 	int status, i, filehdr_size;
2931*4882a593Smuzhiyun 	const u8 *p;
2932*4882a593Smuzhiyun 
2933*4882a593Smuzhiyun 	filehdr_size = sizeof(struct flash_file_hdr_g3);
2934*4882a593Smuzhiyun 	fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2935*4882a593Smuzhiyun 	if (!fsec) {
2936*4882a593Smuzhiyun 		dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
2937*4882a593Smuzhiyun 		return -EINVAL;
2938*4882a593Smuzhiyun 	}
2939*4882a593Smuzhiyun 
2940*4882a593Smuzhiyun retry_flash:
2941*4882a593Smuzhiyun 	for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
2942*4882a593Smuzhiyun 		img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
2943*4882a593Smuzhiyun 		img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
2944*4882a593Smuzhiyun 		img_type   = le32_to_cpu(fsec->fsec_entry[i].type);
2945*4882a593Smuzhiyun 		img_optype = be_get_img_optype(fsec->fsec_entry[i]);
2946*4882a593Smuzhiyun 		old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
2947*4882a593Smuzhiyun 
2948*4882a593Smuzhiyun 		if (img_optype == 0xFFFF)
2949*4882a593Smuzhiyun 			continue;
2950*4882a593Smuzhiyun 
2951*4882a593Smuzhiyun 		if (flash_offset_support)
2952*4882a593Smuzhiyun 			flash_optype = OPTYPE_OFFSET_SPECIFIED;
2953*4882a593Smuzhiyun 		else
2954*4882a593Smuzhiyun 			flash_optype = img_optype;
2955*4882a593Smuzhiyun 
2956*4882a593Smuzhiyun 		/* Don't bother verifying CRC if an old FW image is being
2957*4882a593Smuzhiyun 		 * flashed
2958*4882a593Smuzhiyun 		 */
2959*4882a593Smuzhiyun 		if (old_fw_img)
2960*4882a593Smuzhiyun 			goto flash;
2961*4882a593Smuzhiyun 
2962*4882a593Smuzhiyun 		status = be_check_flash_crc(adapter, fw->data, img_offset,
2963*4882a593Smuzhiyun 					    img_size, filehdr_size +
2964*4882a593Smuzhiyun 					    img_hdrs_size, flash_optype,
2965*4882a593Smuzhiyun 					    &crc_match);
2966*4882a593Smuzhiyun 		if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
2967*4882a593Smuzhiyun 		    base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
2968*4882a593Smuzhiyun 			/* The current FW image on the card does not support
2969*4882a593Smuzhiyun 			 * OFFSET based flashing. Retry using older mechanism
2970*4882a593Smuzhiyun 			 * of OPTYPE based flashing
2971*4882a593Smuzhiyun 			 */
2972*4882a593Smuzhiyun 			if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
2973*4882a593Smuzhiyun 				flash_offset_support = false;
2974*4882a593Smuzhiyun 				goto retry_flash;
2975*4882a593Smuzhiyun 			}
2976*4882a593Smuzhiyun 
2977*4882a593Smuzhiyun 			/* The current FW image on the card does not recognize
2978*4882a593Smuzhiyun 			 * the new FLASH op_type. The FW download is partially
2979*4882a593Smuzhiyun 			 * complete. Reboot the server now to enable FW image
2980*4882a593Smuzhiyun 			 * to recognize the new FLASH op_type. To complete the
2981*4882a593Smuzhiyun 			 * remaining process, download the same FW again after
2982*4882a593Smuzhiyun 			 * the reboot.
2983*4882a593Smuzhiyun 			 */
2984*4882a593Smuzhiyun 			dev_err(dev, "Flash incomplete. Reset the server\n");
2985*4882a593Smuzhiyun 			dev_err(dev, "Download FW image again after reset\n");
2986*4882a593Smuzhiyun 			return -EAGAIN;
2987*4882a593Smuzhiyun 		} else if (status) {
2988*4882a593Smuzhiyun 			dev_err(dev, "Could not get CRC for 0x%x region\n",
2989*4882a593Smuzhiyun 				img_optype);
2990*4882a593Smuzhiyun 			return -EFAULT;
2991*4882a593Smuzhiyun 		}
2992*4882a593Smuzhiyun 
2993*4882a593Smuzhiyun 		if (crc_match)
2994*4882a593Smuzhiyun 			continue;
2995*4882a593Smuzhiyun 
2996*4882a593Smuzhiyun flash:
2997*4882a593Smuzhiyun 		p = fw->data + filehdr_size + img_offset + img_hdrs_size;
2998*4882a593Smuzhiyun 		if (p + img_size > fw->data + fw->size)
2999*4882a593Smuzhiyun 			return -1;
3000*4882a593Smuzhiyun 
3001*4882a593Smuzhiyun 		status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
3002*4882a593Smuzhiyun 				  img_offset);
3003*4882a593Smuzhiyun 
3004*4882a593Smuzhiyun 		/* The current FW image on the card does not support OFFSET
3005*4882a593Smuzhiyun 		 * based flashing. Retry using older mechanism of OPTYPE based
3006*4882a593Smuzhiyun 		 * flashing
3007*4882a593Smuzhiyun 		 */
3008*4882a593Smuzhiyun 		if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
3009*4882a593Smuzhiyun 		    flash_optype == OPTYPE_OFFSET_SPECIFIED) {
3010*4882a593Smuzhiyun 			flash_offset_support = false;
3011*4882a593Smuzhiyun 			goto retry_flash;
3012*4882a593Smuzhiyun 		}
3013*4882a593Smuzhiyun 
3014*4882a593Smuzhiyun 		/* For old FW images ignore ILLEGAL_FIELD error or errors on
3015*4882a593Smuzhiyun 		 * UFI_DIR region
3016*4882a593Smuzhiyun 		 */
3017*4882a593Smuzhiyun 		if (old_fw_img &&
3018*4882a593Smuzhiyun 		    (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
3019*4882a593Smuzhiyun 		     (img_optype == OPTYPE_UFI_DIR &&
3020*4882a593Smuzhiyun 		      base_status(status) == MCC_STATUS_FAILED))) {
3021*4882a593Smuzhiyun 			continue;
3022*4882a593Smuzhiyun 		} else if (status) {
3023*4882a593Smuzhiyun 			dev_err(dev, "Flashing section type 0x%x failed\n",
3024*4882a593Smuzhiyun 				img_type);
3025*4882a593Smuzhiyun 
3026*4882a593Smuzhiyun 			switch (addl_status(status)) {
3027*4882a593Smuzhiyun 			case MCC_ADDL_STATUS_MISSING_SIGNATURE:
3028*4882a593Smuzhiyun 				dev_err(dev,
3029*4882a593Smuzhiyun 					"Digital signature missing in FW\n");
3030*4882a593Smuzhiyun 				return -EINVAL;
3031*4882a593Smuzhiyun 			case MCC_ADDL_STATUS_INVALID_SIGNATURE:
3032*4882a593Smuzhiyun 				dev_err(dev,
3033*4882a593Smuzhiyun 					"Invalid digital signature in FW\n");
3034*4882a593Smuzhiyun 				return -EINVAL;
3035*4882a593Smuzhiyun 			default:
3036*4882a593Smuzhiyun 				return -EFAULT;
3037*4882a593Smuzhiyun 			}
3038*4882a593Smuzhiyun 		}
3039*4882a593Smuzhiyun 	}
3040*4882a593Smuzhiyun 	return 0;
3041*4882a593Smuzhiyun }
3042*4882a593Smuzhiyun 
lancer_fw_download(struct be_adapter * adapter,const struct firmware * fw)3043*4882a593Smuzhiyun int lancer_fw_download(struct be_adapter *adapter,
3044*4882a593Smuzhiyun 		       const struct firmware *fw)
3045*4882a593Smuzhiyun {
3046*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
3047*4882a593Smuzhiyun 	struct be_dma_mem flash_cmd;
3048*4882a593Smuzhiyun 	const u8 *data_ptr = NULL;
3049*4882a593Smuzhiyun 	u8 *dest_image_ptr = NULL;
3050*4882a593Smuzhiyun 	size_t image_size = 0;
3051*4882a593Smuzhiyun 	u32 chunk_size = 0;
3052*4882a593Smuzhiyun 	u32 data_written = 0;
3053*4882a593Smuzhiyun 	u32 offset = 0;
3054*4882a593Smuzhiyun 	int status = 0;
3055*4882a593Smuzhiyun 	u8 add_status = 0;
3056*4882a593Smuzhiyun 	u8 change_status;
3057*4882a593Smuzhiyun 
3058*4882a593Smuzhiyun 	if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3059*4882a593Smuzhiyun 		dev_err(dev, "FW image size should be multiple of 4\n");
3060*4882a593Smuzhiyun 		return -EINVAL;
3061*4882a593Smuzhiyun 	}
3062*4882a593Smuzhiyun 
3063*4882a593Smuzhiyun 	flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3064*4882a593Smuzhiyun 				+ LANCER_FW_DOWNLOAD_CHUNK;
3065*4882a593Smuzhiyun 	flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
3066*4882a593Smuzhiyun 					  GFP_KERNEL);
3067*4882a593Smuzhiyun 	if (!flash_cmd.va)
3068*4882a593Smuzhiyun 		return -ENOMEM;
3069*4882a593Smuzhiyun 
3070*4882a593Smuzhiyun 	dest_image_ptr = flash_cmd.va +
3071*4882a593Smuzhiyun 				sizeof(struct lancer_cmd_req_write_object);
3072*4882a593Smuzhiyun 	image_size = fw->size;
3073*4882a593Smuzhiyun 	data_ptr = fw->data;
3074*4882a593Smuzhiyun 
3075*4882a593Smuzhiyun 	while (image_size) {
3076*4882a593Smuzhiyun 		chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3077*4882a593Smuzhiyun 
3078*4882a593Smuzhiyun 		/* Copy the image chunk content. */
3079*4882a593Smuzhiyun 		memcpy(dest_image_ptr, data_ptr, chunk_size);
3080*4882a593Smuzhiyun 
3081*4882a593Smuzhiyun 		status = lancer_cmd_write_object(adapter, &flash_cmd,
3082*4882a593Smuzhiyun 						 chunk_size, offset,
3083*4882a593Smuzhiyun 						 LANCER_FW_DOWNLOAD_LOCATION,
3084*4882a593Smuzhiyun 						 &data_written, &change_status,
3085*4882a593Smuzhiyun 						 &add_status);
3086*4882a593Smuzhiyun 		if (status)
3087*4882a593Smuzhiyun 			break;
3088*4882a593Smuzhiyun 
3089*4882a593Smuzhiyun 		offset += data_written;
3090*4882a593Smuzhiyun 		data_ptr += data_written;
3091*4882a593Smuzhiyun 		image_size -= data_written;
3092*4882a593Smuzhiyun 	}
3093*4882a593Smuzhiyun 
3094*4882a593Smuzhiyun 	if (!status) {
3095*4882a593Smuzhiyun 		/* Commit the FW written */
3096*4882a593Smuzhiyun 		status = lancer_cmd_write_object(adapter, &flash_cmd,
3097*4882a593Smuzhiyun 						 0, offset,
3098*4882a593Smuzhiyun 						 LANCER_FW_DOWNLOAD_LOCATION,
3099*4882a593Smuzhiyun 						 &data_written, &change_status,
3100*4882a593Smuzhiyun 						 &add_status);
3101*4882a593Smuzhiyun 	}
3102*4882a593Smuzhiyun 
3103*4882a593Smuzhiyun 	dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
3104*4882a593Smuzhiyun 	if (status) {
3105*4882a593Smuzhiyun 		dev_err(dev, "Firmware load error\n");
3106*4882a593Smuzhiyun 		return be_cmd_status(status);
3107*4882a593Smuzhiyun 	}
3108*4882a593Smuzhiyun 
3109*4882a593Smuzhiyun 	dev_info(dev, "Firmware flashed successfully\n");
3110*4882a593Smuzhiyun 
3111*4882a593Smuzhiyun 	if (change_status == LANCER_FW_RESET_NEEDED) {
3112*4882a593Smuzhiyun 		dev_info(dev, "Resetting adapter to activate new FW\n");
3113*4882a593Smuzhiyun 		status = lancer_physdev_ctrl(adapter,
3114*4882a593Smuzhiyun 					     PHYSDEV_CONTROL_FW_RESET_MASK);
3115*4882a593Smuzhiyun 		if (status) {
3116*4882a593Smuzhiyun 			dev_err(dev, "Adapter busy, could not reset FW\n");
3117*4882a593Smuzhiyun 			dev_err(dev, "Reboot server to activate new FW\n");
3118*4882a593Smuzhiyun 		}
3119*4882a593Smuzhiyun 	} else if (change_status != LANCER_NO_RESET_NEEDED) {
3120*4882a593Smuzhiyun 		dev_info(dev, "Reboot server to activate new FW\n");
3121*4882a593Smuzhiyun 	}
3122*4882a593Smuzhiyun 
3123*4882a593Smuzhiyun 	return 0;
3124*4882a593Smuzhiyun }
3125*4882a593Smuzhiyun 
3126*4882a593Smuzhiyun /* Check if the flash image file is compatible with the adapter that
3127*4882a593Smuzhiyun  * is being flashed.
3128*4882a593Smuzhiyun  */
be_check_ufi_compatibility(struct be_adapter * adapter,struct flash_file_hdr_g3 * fhdr)3129*4882a593Smuzhiyun static bool be_check_ufi_compatibility(struct be_adapter *adapter,
3130*4882a593Smuzhiyun 				       struct flash_file_hdr_g3 *fhdr)
3131*4882a593Smuzhiyun {
3132*4882a593Smuzhiyun 	if (!fhdr) {
3133*4882a593Smuzhiyun 		dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
3134*4882a593Smuzhiyun 		return false;
3135*4882a593Smuzhiyun 	}
3136*4882a593Smuzhiyun 
3137*4882a593Smuzhiyun 	/* First letter of the build version is used to identify
3138*4882a593Smuzhiyun 	 * which chip this image file is meant for.
3139*4882a593Smuzhiyun 	 */
3140*4882a593Smuzhiyun 	switch (fhdr->build[0]) {
3141*4882a593Smuzhiyun 	case BLD_STR_UFI_TYPE_SH:
3142*4882a593Smuzhiyun 		if (!skyhawk_chip(adapter))
3143*4882a593Smuzhiyun 			return false;
3144*4882a593Smuzhiyun 		break;
3145*4882a593Smuzhiyun 	case BLD_STR_UFI_TYPE_BE3:
3146*4882a593Smuzhiyun 		if (!BE3_chip(adapter))
3147*4882a593Smuzhiyun 			return false;
3148*4882a593Smuzhiyun 		break;
3149*4882a593Smuzhiyun 	case BLD_STR_UFI_TYPE_BE2:
3150*4882a593Smuzhiyun 		if (!BE2_chip(adapter))
3151*4882a593Smuzhiyun 			return false;
3152*4882a593Smuzhiyun 		break;
3153*4882a593Smuzhiyun 	default:
3154*4882a593Smuzhiyun 		return false;
3155*4882a593Smuzhiyun 	}
3156*4882a593Smuzhiyun 
3157*4882a593Smuzhiyun 	/* In BE3 FW images the "asic_type_rev" field doesn't track the
3158*4882a593Smuzhiyun 	 * asic_rev of the chips it is compatible with.
3159*4882a593Smuzhiyun 	 * When asic_type_rev is 0 the image is compatible only with
3160*4882a593Smuzhiyun 	 * pre-BE3-R chips (asic_rev < 0x10)
3161*4882a593Smuzhiyun 	 */
3162*4882a593Smuzhiyun 	if (BEx_chip(adapter) && fhdr->asic_type_rev == 0)
3163*4882a593Smuzhiyun 		return adapter->asic_rev < 0x10;
3164*4882a593Smuzhiyun 	else
3165*4882a593Smuzhiyun 		return (fhdr->asic_type_rev >= adapter->asic_rev);
3166*4882a593Smuzhiyun }
3167*4882a593Smuzhiyun 
be_fw_download(struct be_adapter * adapter,const struct firmware * fw)3168*4882a593Smuzhiyun int be_fw_download(struct be_adapter *adapter, const struct firmware *fw)
3169*4882a593Smuzhiyun {
3170*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
3171*4882a593Smuzhiyun 	struct flash_file_hdr_g3 *fhdr3;
3172*4882a593Smuzhiyun 	struct image_hdr *img_hdr_ptr;
3173*4882a593Smuzhiyun 	int status = 0, i, num_imgs;
3174*4882a593Smuzhiyun 	struct be_dma_mem flash_cmd;
3175*4882a593Smuzhiyun 
3176*4882a593Smuzhiyun 	fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3177*4882a593Smuzhiyun 	if (!be_check_ufi_compatibility(adapter, fhdr3)) {
3178*4882a593Smuzhiyun 		dev_err(dev, "Flash image is not compatible with adapter\n");
3179*4882a593Smuzhiyun 		return -EINVAL;
3180*4882a593Smuzhiyun 	}
3181*4882a593Smuzhiyun 
3182*4882a593Smuzhiyun 	flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3183*4882a593Smuzhiyun 	flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
3184*4882a593Smuzhiyun 					  GFP_KERNEL);
3185*4882a593Smuzhiyun 	if (!flash_cmd.va)
3186*4882a593Smuzhiyun 		return -ENOMEM;
3187*4882a593Smuzhiyun 
3188*4882a593Smuzhiyun 	num_imgs = le32_to_cpu(fhdr3->num_imgs);
3189*4882a593Smuzhiyun 	for (i = 0; i < num_imgs; i++) {
3190*4882a593Smuzhiyun 		img_hdr_ptr = (struct image_hdr *)(fw->data +
3191*4882a593Smuzhiyun 				(sizeof(struct flash_file_hdr_g3) +
3192*4882a593Smuzhiyun 				 i * sizeof(struct image_hdr)));
3193*4882a593Smuzhiyun 		if (!BE2_chip(adapter) &&
3194*4882a593Smuzhiyun 		    le32_to_cpu(img_hdr_ptr->imageid) != 1)
3195*4882a593Smuzhiyun 			continue;
3196*4882a593Smuzhiyun 
3197*4882a593Smuzhiyun 		if (skyhawk_chip(adapter))
3198*4882a593Smuzhiyun 			status = be_flash_skyhawk(adapter, fw, &flash_cmd,
3199*4882a593Smuzhiyun 						  num_imgs);
3200*4882a593Smuzhiyun 		else
3201*4882a593Smuzhiyun 			status = be_flash_BEx(adapter, fw, &flash_cmd,
3202*4882a593Smuzhiyun 					      num_imgs);
3203*4882a593Smuzhiyun 	}
3204*4882a593Smuzhiyun 
3205*4882a593Smuzhiyun 	dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
3206*4882a593Smuzhiyun 	if (!status)
3207*4882a593Smuzhiyun 		dev_info(dev, "Firmware flashed successfully\n");
3208*4882a593Smuzhiyun 
3209*4882a593Smuzhiyun 	return status;
3210*4882a593Smuzhiyun }
3211*4882a593Smuzhiyun 
be_cmd_enable_magic_wol(struct be_adapter * adapter,u8 * mac,struct be_dma_mem * nonemb_cmd)3212*4882a593Smuzhiyun int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
3213*4882a593Smuzhiyun 			    struct be_dma_mem *nonemb_cmd)
3214*4882a593Smuzhiyun {
3215*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
3216*4882a593Smuzhiyun 	struct be_cmd_req_acpi_wol_magic_config *req;
3217*4882a593Smuzhiyun 	int status;
3218*4882a593Smuzhiyun 
3219*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
3220*4882a593Smuzhiyun 
3221*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
3222*4882a593Smuzhiyun 	if (!wrb) {
3223*4882a593Smuzhiyun 		status = -EBUSY;
3224*4882a593Smuzhiyun 		goto err;
3225*4882a593Smuzhiyun 	}
3226*4882a593Smuzhiyun 	req = nonemb_cmd->va;
3227*4882a593Smuzhiyun 
3228*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
3229*4882a593Smuzhiyun 			       OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
3230*4882a593Smuzhiyun 			       wrb, nonemb_cmd);
3231*4882a593Smuzhiyun 	memcpy(req->magic_mac, mac, ETH_ALEN);
3232*4882a593Smuzhiyun 
3233*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
3234*4882a593Smuzhiyun 
3235*4882a593Smuzhiyun err:
3236*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
3237*4882a593Smuzhiyun 	return status;
3238*4882a593Smuzhiyun }
3239*4882a593Smuzhiyun 
be_cmd_set_loopback(struct be_adapter * adapter,u8 port_num,u8 loopback_type,u8 enable)3240*4882a593Smuzhiyun int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
3241*4882a593Smuzhiyun 			u8 loopback_type, u8 enable)
3242*4882a593Smuzhiyun {
3243*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
3244*4882a593Smuzhiyun 	struct be_cmd_req_set_lmode *req;
3245*4882a593Smuzhiyun 	int status;
3246*4882a593Smuzhiyun 
3247*4882a593Smuzhiyun 	if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
3248*4882a593Smuzhiyun 			    CMD_SUBSYSTEM_LOWLEVEL))
3249*4882a593Smuzhiyun 		return -EPERM;
3250*4882a593Smuzhiyun 
3251*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
3252*4882a593Smuzhiyun 
3253*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
3254*4882a593Smuzhiyun 	if (!wrb) {
3255*4882a593Smuzhiyun 		status = -EBUSY;
3256*4882a593Smuzhiyun 		goto err_unlock;
3257*4882a593Smuzhiyun 	}
3258*4882a593Smuzhiyun 
3259*4882a593Smuzhiyun 	req = embedded_payload(wrb);
3260*4882a593Smuzhiyun 
3261*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
3262*4882a593Smuzhiyun 			       OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
3263*4882a593Smuzhiyun 			       wrb, NULL);
3264*4882a593Smuzhiyun 
3265*4882a593Smuzhiyun 	req->src_port = port_num;
3266*4882a593Smuzhiyun 	req->dest_port = port_num;
3267*4882a593Smuzhiyun 	req->loopback_type = loopback_type;
3268*4882a593Smuzhiyun 	req->loopback_state = enable;
3269*4882a593Smuzhiyun 
3270*4882a593Smuzhiyun 	status = be_mcc_notify(adapter);
3271*4882a593Smuzhiyun 	if (status)
3272*4882a593Smuzhiyun 		goto err_unlock;
3273*4882a593Smuzhiyun 
3274*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
3275*4882a593Smuzhiyun 
3276*4882a593Smuzhiyun 	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
3277*4882a593Smuzhiyun 					 msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
3278*4882a593Smuzhiyun 		status = -ETIMEDOUT;
3279*4882a593Smuzhiyun 
3280*4882a593Smuzhiyun 	return status;
3281*4882a593Smuzhiyun 
3282*4882a593Smuzhiyun err_unlock:
3283*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
3284*4882a593Smuzhiyun 	return status;
3285*4882a593Smuzhiyun }
3286*4882a593Smuzhiyun 
be_cmd_loopback_test(struct be_adapter * adapter,u32 port_num,u32 loopback_type,u32 pkt_size,u32 num_pkts,u64 pattern)3287*4882a593Smuzhiyun int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
3288*4882a593Smuzhiyun 			 u32 loopback_type, u32 pkt_size, u32 num_pkts,
3289*4882a593Smuzhiyun 			 u64 pattern)
3290*4882a593Smuzhiyun {
3291*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
3292*4882a593Smuzhiyun 	struct be_cmd_req_loopback_test *req;
3293*4882a593Smuzhiyun 	struct be_cmd_resp_loopback_test *resp;
3294*4882a593Smuzhiyun 	int status;
3295*4882a593Smuzhiyun 
3296*4882a593Smuzhiyun 	if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_LOOPBACK_TEST,
3297*4882a593Smuzhiyun 			    CMD_SUBSYSTEM_LOWLEVEL))
3298*4882a593Smuzhiyun 		return -EPERM;
3299*4882a593Smuzhiyun 
3300*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
3301*4882a593Smuzhiyun 
3302*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
3303*4882a593Smuzhiyun 	if (!wrb) {
3304*4882a593Smuzhiyun 		status = -EBUSY;
3305*4882a593Smuzhiyun 		goto err;
3306*4882a593Smuzhiyun 	}
3307*4882a593Smuzhiyun 
3308*4882a593Smuzhiyun 	req = embedded_payload(wrb);
3309*4882a593Smuzhiyun 
3310*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
3311*4882a593Smuzhiyun 			       OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
3312*4882a593Smuzhiyun 			       NULL);
3313*4882a593Smuzhiyun 
3314*4882a593Smuzhiyun 	req->hdr.timeout = cpu_to_le32(15);
3315*4882a593Smuzhiyun 	req->pattern = cpu_to_le64(pattern);
3316*4882a593Smuzhiyun 	req->src_port = cpu_to_le32(port_num);
3317*4882a593Smuzhiyun 	req->dest_port = cpu_to_le32(port_num);
3318*4882a593Smuzhiyun 	req->pkt_size = cpu_to_le32(pkt_size);
3319*4882a593Smuzhiyun 	req->num_pkts = cpu_to_le32(num_pkts);
3320*4882a593Smuzhiyun 	req->loopback_type = cpu_to_le32(loopback_type);
3321*4882a593Smuzhiyun 
3322*4882a593Smuzhiyun 	status = be_mcc_notify(adapter);
3323*4882a593Smuzhiyun 	if (status)
3324*4882a593Smuzhiyun 		goto err;
3325*4882a593Smuzhiyun 
3326*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
3327*4882a593Smuzhiyun 
3328*4882a593Smuzhiyun 	wait_for_completion(&adapter->et_cmd_compl);
3329*4882a593Smuzhiyun 	resp = embedded_payload(wrb);
3330*4882a593Smuzhiyun 	status = le32_to_cpu(resp->status);
3331*4882a593Smuzhiyun 
3332*4882a593Smuzhiyun 	return status;
3333*4882a593Smuzhiyun err:
3334*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
3335*4882a593Smuzhiyun 	return status;
3336*4882a593Smuzhiyun }
3337*4882a593Smuzhiyun 
be_cmd_ddr_dma_test(struct be_adapter * adapter,u64 pattern,u32 byte_cnt,struct be_dma_mem * cmd)3338*4882a593Smuzhiyun int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
3339*4882a593Smuzhiyun 			u32 byte_cnt, struct be_dma_mem *cmd)
3340*4882a593Smuzhiyun {
3341*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
3342*4882a593Smuzhiyun 	struct be_cmd_req_ddrdma_test *req;
3343*4882a593Smuzhiyun 	int status;
3344*4882a593Smuzhiyun 	int i, j = 0;
3345*4882a593Smuzhiyun 
3346*4882a593Smuzhiyun 	if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_HOST_DDR_DMA,
3347*4882a593Smuzhiyun 			    CMD_SUBSYSTEM_LOWLEVEL))
3348*4882a593Smuzhiyun 		return -EPERM;
3349*4882a593Smuzhiyun 
3350*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
3351*4882a593Smuzhiyun 
3352*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
3353*4882a593Smuzhiyun 	if (!wrb) {
3354*4882a593Smuzhiyun 		status = -EBUSY;
3355*4882a593Smuzhiyun 		goto err;
3356*4882a593Smuzhiyun 	}
3357*4882a593Smuzhiyun 	req = cmd->va;
3358*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
3359*4882a593Smuzhiyun 			       OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
3360*4882a593Smuzhiyun 			       cmd);
3361*4882a593Smuzhiyun 
3362*4882a593Smuzhiyun 	req->pattern = cpu_to_le64(pattern);
3363*4882a593Smuzhiyun 	req->byte_count = cpu_to_le32(byte_cnt);
3364*4882a593Smuzhiyun 	for (i = 0; i < byte_cnt; i++) {
3365*4882a593Smuzhiyun 		req->snd_buff[i] = (u8)(pattern >> (j*8));
3366*4882a593Smuzhiyun 		j++;
3367*4882a593Smuzhiyun 		if (j > 7)
3368*4882a593Smuzhiyun 			j = 0;
3369*4882a593Smuzhiyun 	}
3370*4882a593Smuzhiyun 
3371*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
3372*4882a593Smuzhiyun 
3373*4882a593Smuzhiyun 	if (!status) {
3374*4882a593Smuzhiyun 		struct be_cmd_resp_ddrdma_test *resp;
3375*4882a593Smuzhiyun 
3376*4882a593Smuzhiyun 		resp = cmd->va;
3377*4882a593Smuzhiyun 		if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
3378*4882a593Smuzhiyun 		    resp->snd_err) {
3379*4882a593Smuzhiyun 			status = -1;
3380*4882a593Smuzhiyun 		}
3381*4882a593Smuzhiyun 	}
3382*4882a593Smuzhiyun 
3383*4882a593Smuzhiyun err:
3384*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
3385*4882a593Smuzhiyun 	return status;
3386*4882a593Smuzhiyun }
3387*4882a593Smuzhiyun 
be_cmd_get_seeprom_data(struct be_adapter * adapter,struct be_dma_mem * nonemb_cmd)3388*4882a593Smuzhiyun int be_cmd_get_seeprom_data(struct be_adapter *adapter,
3389*4882a593Smuzhiyun 			    struct be_dma_mem *nonemb_cmd)
3390*4882a593Smuzhiyun {
3391*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
3392*4882a593Smuzhiyun 	struct be_cmd_req_seeprom_read *req;
3393*4882a593Smuzhiyun 	int status;
3394*4882a593Smuzhiyun 
3395*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
3396*4882a593Smuzhiyun 
3397*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
3398*4882a593Smuzhiyun 	if (!wrb) {
3399*4882a593Smuzhiyun 		status = -EBUSY;
3400*4882a593Smuzhiyun 		goto err;
3401*4882a593Smuzhiyun 	}
3402*4882a593Smuzhiyun 	req = nonemb_cmd->va;
3403*4882a593Smuzhiyun 
3404*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3405*4882a593Smuzhiyun 			       OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
3406*4882a593Smuzhiyun 			       nonemb_cmd);
3407*4882a593Smuzhiyun 
3408*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
3409*4882a593Smuzhiyun 
3410*4882a593Smuzhiyun err:
3411*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
3412*4882a593Smuzhiyun 	return status;
3413*4882a593Smuzhiyun }
3414*4882a593Smuzhiyun 
be_cmd_get_phy_info(struct be_adapter * adapter)3415*4882a593Smuzhiyun int be_cmd_get_phy_info(struct be_adapter *adapter)
3416*4882a593Smuzhiyun {
3417*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
3418*4882a593Smuzhiyun 	struct be_cmd_req_get_phy_info *req;
3419*4882a593Smuzhiyun 	struct be_dma_mem cmd;
3420*4882a593Smuzhiyun 	int status;
3421*4882a593Smuzhiyun 
3422*4882a593Smuzhiyun 	if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
3423*4882a593Smuzhiyun 			    CMD_SUBSYSTEM_COMMON))
3424*4882a593Smuzhiyun 		return -EPERM;
3425*4882a593Smuzhiyun 
3426*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
3427*4882a593Smuzhiyun 
3428*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
3429*4882a593Smuzhiyun 	if (!wrb) {
3430*4882a593Smuzhiyun 		status = -EBUSY;
3431*4882a593Smuzhiyun 		goto err;
3432*4882a593Smuzhiyun 	}
3433*4882a593Smuzhiyun 	cmd.size = sizeof(struct be_cmd_req_get_phy_info);
3434*4882a593Smuzhiyun 	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3435*4882a593Smuzhiyun 				    GFP_ATOMIC);
3436*4882a593Smuzhiyun 	if (!cmd.va) {
3437*4882a593Smuzhiyun 		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3438*4882a593Smuzhiyun 		status = -ENOMEM;
3439*4882a593Smuzhiyun 		goto err;
3440*4882a593Smuzhiyun 	}
3441*4882a593Smuzhiyun 
3442*4882a593Smuzhiyun 	req = cmd.va;
3443*4882a593Smuzhiyun 
3444*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3445*4882a593Smuzhiyun 			       OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
3446*4882a593Smuzhiyun 			       wrb, &cmd);
3447*4882a593Smuzhiyun 
3448*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
3449*4882a593Smuzhiyun 	if (!status) {
3450*4882a593Smuzhiyun 		struct be_phy_info *resp_phy_info =
3451*4882a593Smuzhiyun 				cmd.va + sizeof(struct be_cmd_req_hdr);
3452*4882a593Smuzhiyun 
3453*4882a593Smuzhiyun 		adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
3454*4882a593Smuzhiyun 		adapter->phy.interface_type =
3455*4882a593Smuzhiyun 			le16_to_cpu(resp_phy_info->interface_type);
3456*4882a593Smuzhiyun 		adapter->phy.auto_speeds_supported =
3457*4882a593Smuzhiyun 			le16_to_cpu(resp_phy_info->auto_speeds_supported);
3458*4882a593Smuzhiyun 		adapter->phy.fixed_speeds_supported =
3459*4882a593Smuzhiyun 			le16_to_cpu(resp_phy_info->fixed_speeds_supported);
3460*4882a593Smuzhiyun 		adapter->phy.misc_params =
3461*4882a593Smuzhiyun 			le32_to_cpu(resp_phy_info->misc_params);
3462*4882a593Smuzhiyun 
3463*4882a593Smuzhiyun 		if (BE2_chip(adapter)) {
3464*4882a593Smuzhiyun 			adapter->phy.fixed_speeds_supported =
3465*4882a593Smuzhiyun 				BE_SUPPORTED_SPEED_10GBPS |
3466*4882a593Smuzhiyun 				BE_SUPPORTED_SPEED_1GBPS;
3467*4882a593Smuzhiyun 		}
3468*4882a593Smuzhiyun 	}
3469*4882a593Smuzhiyun 	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
3470*4882a593Smuzhiyun err:
3471*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
3472*4882a593Smuzhiyun 	return status;
3473*4882a593Smuzhiyun }
3474*4882a593Smuzhiyun 
be_cmd_set_qos(struct be_adapter * adapter,u32 bps,u32 domain)3475*4882a593Smuzhiyun static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
3476*4882a593Smuzhiyun {
3477*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
3478*4882a593Smuzhiyun 	struct be_cmd_req_set_qos *req;
3479*4882a593Smuzhiyun 	int status;
3480*4882a593Smuzhiyun 
3481*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
3482*4882a593Smuzhiyun 
3483*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
3484*4882a593Smuzhiyun 	if (!wrb) {
3485*4882a593Smuzhiyun 		status = -EBUSY;
3486*4882a593Smuzhiyun 		goto err;
3487*4882a593Smuzhiyun 	}
3488*4882a593Smuzhiyun 
3489*4882a593Smuzhiyun 	req = embedded_payload(wrb);
3490*4882a593Smuzhiyun 
3491*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3492*4882a593Smuzhiyun 			       OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
3493*4882a593Smuzhiyun 
3494*4882a593Smuzhiyun 	req->hdr.domain = domain;
3495*4882a593Smuzhiyun 	req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
3496*4882a593Smuzhiyun 	req->max_bps_nic = cpu_to_le32(bps);
3497*4882a593Smuzhiyun 
3498*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
3499*4882a593Smuzhiyun 
3500*4882a593Smuzhiyun err:
3501*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
3502*4882a593Smuzhiyun 	return status;
3503*4882a593Smuzhiyun }
3504*4882a593Smuzhiyun 
be_cmd_get_cntl_attributes(struct be_adapter * adapter)3505*4882a593Smuzhiyun int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
3506*4882a593Smuzhiyun {
3507*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
3508*4882a593Smuzhiyun 	struct be_cmd_req_cntl_attribs *req;
3509*4882a593Smuzhiyun 	struct be_cmd_resp_cntl_attribs *resp;
3510*4882a593Smuzhiyun 	int status, i;
3511*4882a593Smuzhiyun 	int payload_len = max(sizeof(*req), sizeof(*resp));
3512*4882a593Smuzhiyun 	struct mgmt_controller_attrib *attribs;
3513*4882a593Smuzhiyun 	struct be_dma_mem attribs_cmd;
3514*4882a593Smuzhiyun 	u32 *serial_num;
3515*4882a593Smuzhiyun 
3516*4882a593Smuzhiyun 	if (mutex_lock_interruptible(&adapter->mbox_lock))
3517*4882a593Smuzhiyun 		return -1;
3518*4882a593Smuzhiyun 
3519*4882a593Smuzhiyun 	memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
3520*4882a593Smuzhiyun 	attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
3521*4882a593Smuzhiyun 	attribs_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
3522*4882a593Smuzhiyun 					    attribs_cmd.size,
3523*4882a593Smuzhiyun 					    &attribs_cmd.dma, GFP_ATOMIC);
3524*4882a593Smuzhiyun 	if (!attribs_cmd.va) {
3525*4882a593Smuzhiyun 		dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
3526*4882a593Smuzhiyun 		status = -ENOMEM;
3527*4882a593Smuzhiyun 		goto err;
3528*4882a593Smuzhiyun 	}
3529*4882a593Smuzhiyun 
3530*4882a593Smuzhiyun 	wrb = wrb_from_mbox(adapter);
3531*4882a593Smuzhiyun 	if (!wrb) {
3532*4882a593Smuzhiyun 		status = -EBUSY;
3533*4882a593Smuzhiyun 		goto err;
3534*4882a593Smuzhiyun 	}
3535*4882a593Smuzhiyun 	req = attribs_cmd.va;
3536*4882a593Smuzhiyun 
3537*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3538*4882a593Smuzhiyun 			       OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
3539*4882a593Smuzhiyun 			       wrb, &attribs_cmd);
3540*4882a593Smuzhiyun 
3541*4882a593Smuzhiyun 	status = be_mbox_notify_wait(adapter);
3542*4882a593Smuzhiyun 	if (!status) {
3543*4882a593Smuzhiyun 		attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
3544*4882a593Smuzhiyun 		adapter->hba_port_num = attribs->hba_attribs.phy_port;
3545*4882a593Smuzhiyun 		serial_num = attribs->hba_attribs.controller_serial_number;
3546*4882a593Smuzhiyun 		for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++)
3547*4882a593Smuzhiyun 			adapter->serial_num[i] = le32_to_cpu(serial_num[i]) &
3548*4882a593Smuzhiyun 				(BIT_MASK(16) - 1);
3549*4882a593Smuzhiyun 		/* For BEx, since GET_FUNC_CONFIG command is not
3550*4882a593Smuzhiyun 		 * supported, we read funcnum here as a workaround.
3551*4882a593Smuzhiyun 		 */
3552*4882a593Smuzhiyun 		if (BEx_chip(adapter))
3553*4882a593Smuzhiyun 			adapter->pf_num = attribs->hba_attribs.pci_funcnum;
3554*4882a593Smuzhiyun 	}
3555*4882a593Smuzhiyun 
3556*4882a593Smuzhiyun err:
3557*4882a593Smuzhiyun 	mutex_unlock(&adapter->mbox_lock);
3558*4882a593Smuzhiyun 	if (attribs_cmd.va)
3559*4882a593Smuzhiyun 		dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size,
3560*4882a593Smuzhiyun 				  attribs_cmd.va, attribs_cmd.dma);
3561*4882a593Smuzhiyun 	return status;
3562*4882a593Smuzhiyun }
3563*4882a593Smuzhiyun 
3564*4882a593Smuzhiyun /* Uses mbox */
be_cmd_req_native_mode(struct be_adapter * adapter)3565*4882a593Smuzhiyun int be_cmd_req_native_mode(struct be_adapter *adapter)
3566*4882a593Smuzhiyun {
3567*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
3568*4882a593Smuzhiyun 	struct be_cmd_req_set_func_cap *req;
3569*4882a593Smuzhiyun 	int status;
3570*4882a593Smuzhiyun 
3571*4882a593Smuzhiyun 	if (mutex_lock_interruptible(&adapter->mbox_lock))
3572*4882a593Smuzhiyun 		return -1;
3573*4882a593Smuzhiyun 
3574*4882a593Smuzhiyun 	wrb = wrb_from_mbox(adapter);
3575*4882a593Smuzhiyun 	if (!wrb) {
3576*4882a593Smuzhiyun 		status = -EBUSY;
3577*4882a593Smuzhiyun 		goto err;
3578*4882a593Smuzhiyun 	}
3579*4882a593Smuzhiyun 
3580*4882a593Smuzhiyun 	req = embedded_payload(wrb);
3581*4882a593Smuzhiyun 
3582*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3583*4882a593Smuzhiyun 			       OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
3584*4882a593Smuzhiyun 			       sizeof(*req), wrb, NULL);
3585*4882a593Smuzhiyun 
3586*4882a593Smuzhiyun 	req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
3587*4882a593Smuzhiyun 				CAPABILITY_BE3_NATIVE_ERX_API);
3588*4882a593Smuzhiyun 	req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
3589*4882a593Smuzhiyun 
3590*4882a593Smuzhiyun 	status = be_mbox_notify_wait(adapter);
3591*4882a593Smuzhiyun 	if (!status) {
3592*4882a593Smuzhiyun 		struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
3593*4882a593Smuzhiyun 
3594*4882a593Smuzhiyun 		adapter->be3_native = le32_to_cpu(resp->cap_flags) &
3595*4882a593Smuzhiyun 					CAPABILITY_BE3_NATIVE_ERX_API;
3596*4882a593Smuzhiyun 		if (!adapter->be3_native)
3597*4882a593Smuzhiyun 			dev_warn(&adapter->pdev->dev,
3598*4882a593Smuzhiyun 				 "adapter not in advanced mode\n");
3599*4882a593Smuzhiyun 	}
3600*4882a593Smuzhiyun err:
3601*4882a593Smuzhiyun 	mutex_unlock(&adapter->mbox_lock);
3602*4882a593Smuzhiyun 	return status;
3603*4882a593Smuzhiyun }
3604*4882a593Smuzhiyun 
3605*4882a593Smuzhiyun /* Get privilege(s) for a function */
be_cmd_get_fn_privileges(struct be_adapter * adapter,u32 * privilege,u32 domain)3606*4882a593Smuzhiyun int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
3607*4882a593Smuzhiyun 			     u32 domain)
3608*4882a593Smuzhiyun {
3609*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
3610*4882a593Smuzhiyun 	struct be_cmd_req_get_fn_privileges *req;
3611*4882a593Smuzhiyun 	int status;
3612*4882a593Smuzhiyun 
3613*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
3614*4882a593Smuzhiyun 
3615*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
3616*4882a593Smuzhiyun 	if (!wrb) {
3617*4882a593Smuzhiyun 		status = -EBUSY;
3618*4882a593Smuzhiyun 		goto err;
3619*4882a593Smuzhiyun 	}
3620*4882a593Smuzhiyun 
3621*4882a593Smuzhiyun 	req = embedded_payload(wrb);
3622*4882a593Smuzhiyun 
3623*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3624*4882a593Smuzhiyun 			       OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
3625*4882a593Smuzhiyun 			       wrb, NULL);
3626*4882a593Smuzhiyun 
3627*4882a593Smuzhiyun 	req->hdr.domain = domain;
3628*4882a593Smuzhiyun 
3629*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
3630*4882a593Smuzhiyun 	if (!status) {
3631*4882a593Smuzhiyun 		struct be_cmd_resp_get_fn_privileges *resp =
3632*4882a593Smuzhiyun 						embedded_payload(wrb);
3633*4882a593Smuzhiyun 
3634*4882a593Smuzhiyun 		*privilege = le32_to_cpu(resp->privilege_mask);
3635*4882a593Smuzhiyun 
3636*4882a593Smuzhiyun 		/* In UMC mode FW does not return right privileges.
3637*4882a593Smuzhiyun 		 * Override with correct privilege equivalent to PF.
3638*4882a593Smuzhiyun 		 */
3639*4882a593Smuzhiyun 		if (BEx_chip(adapter) && be_is_mc(adapter) &&
3640*4882a593Smuzhiyun 		    be_physfn(adapter))
3641*4882a593Smuzhiyun 			*privilege = MAX_PRIVILEGES;
3642*4882a593Smuzhiyun 	}
3643*4882a593Smuzhiyun 
3644*4882a593Smuzhiyun err:
3645*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
3646*4882a593Smuzhiyun 	return status;
3647*4882a593Smuzhiyun }
3648*4882a593Smuzhiyun 
3649*4882a593Smuzhiyun /* Set privilege(s) for a function */
be_cmd_set_fn_privileges(struct be_adapter * adapter,u32 privileges,u32 domain)3650*4882a593Smuzhiyun int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
3651*4882a593Smuzhiyun 			     u32 domain)
3652*4882a593Smuzhiyun {
3653*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
3654*4882a593Smuzhiyun 	struct be_cmd_req_set_fn_privileges *req;
3655*4882a593Smuzhiyun 	int status;
3656*4882a593Smuzhiyun 
3657*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
3658*4882a593Smuzhiyun 
3659*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
3660*4882a593Smuzhiyun 	if (!wrb) {
3661*4882a593Smuzhiyun 		status = -EBUSY;
3662*4882a593Smuzhiyun 		goto err;
3663*4882a593Smuzhiyun 	}
3664*4882a593Smuzhiyun 
3665*4882a593Smuzhiyun 	req = embedded_payload(wrb);
3666*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3667*4882a593Smuzhiyun 			       OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
3668*4882a593Smuzhiyun 			       wrb, NULL);
3669*4882a593Smuzhiyun 	req->hdr.domain = domain;
3670*4882a593Smuzhiyun 	if (lancer_chip(adapter))
3671*4882a593Smuzhiyun 		req->privileges_lancer = cpu_to_le32(privileges);
3672*4882a593Smuzhiyun 	else
3673*4882a593Smuzhiyun 		req->privileges = cpu_to_le32(privileges);
3674*4882a593Smuzhiyun 
3675*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
3676*4882a593Smuzhiyun err:
3677*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
3678*4882a593Smuzhiyun 	return status;
3679*4882a593Smuzhiyun }
3680*4882a593Smuzhiyun 
3681*4882a593Smuzhiyun /* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
3682*4882a593Smuzhiyun  * pmac_id_valid: false => pmac_id or MAC address is requested.
3683*4882a593Smuzhiyun  *		  If pmac_id is returned, pmac_id_valid is returned as true
3684*4882a593Smuzhiyun  */
be_cmd_get_mac_from_list(struct be_adapter * adapter,u8 * mac,bool * pmac_id_valid,u32 * pmac_id,u32 if_handle,u8 domain)3685*4882a593Smuzhiyun int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
3686*4882a593Smuzhiyun 			     bool *pmac_id_valid, u32 *pmac_id, u32 if_handle,
3687*4882a593Smuzhiyun 			     u8 domain)
3688*4882a593Smuzhiyun {
3689*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
3690*4882a593Smuzhiyun 	struct be_cmd_req_get_mac_list *req;
3691*4882a593Smuzhiyun 	int status;
3692*4882a593Smuzhiyun 	int mac_count;
3693*4882a593Smuzhiyun 	struct be_dma_mem get_mac_list_cmd;
3694*4882a593Smuzhiyun 	int i;
3695*4882a593Smuzhiyun 
3696*4882a593Smuzhiyun 	memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
3697*4882a593Smuzhiyun 	get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
3698*4882a593Smuzhiyun 	get_mac_list_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
3699*4882a593Smuzhiyun 						 get_mac_list_cmd.size,
3700*4882a593Smuzhiyun 						 &get_mac_list_cmd.dma,
3701*4882a593Smuzhiyun 						 GFP_ATOMIC);
3702*4882a593Smuzhiyun 
3703*4882a593Smuzhiyun 	if (!get_mac_list_cmd.va) {
3704*4882a593Smuzhiyun 		dev_err(&adapter->pdev->dev,
3705*4882a593Smuzhiyun 			"Memory allocation failure during GET_MAC_LIST\n");
3706*4882a593Smuzhiyun 		return -ENOMEM;
3707*4882a593Smuzhiyun 	}
3708*4882a593Smuzhiyun 
3709*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
3710*4882a593Smuzhiyun 
3711*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
3712*4882a593Smuzhiyun 	if (!wrb) {
3713*4882a593Smuzhiyun 		status = -EBUSY;
3714*4882a593Smuzhiyun 		goto out;
3715*4882a593Smuzhiyun 	}
3716*4882a593Smuzhiyun 
3717*4882a593Smuzhiyun 	req = get_mac_list_cmd.va;
3718*4882a593Smuzhiyun 
3719*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3720*4882a593Smuzhiyun 			       OPCODE_COMMON_GET_MAC_LIST,
3721*4882a593Smuzhiyun 			       get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
3722*4882a593Smuzhiyun 	req->hdr.domain = domain;
3723*4882a593Smuzhiyun 	req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
3724*4882a593Smuzhiyun 	if (*pmac_id_valid) {
3725*4882a593Smuzhiyun 		req->mac_id = cpu_to_le32(*pmac_id);
3726*4882a593Smuzhiyun 		req->iface_id = cpu_to_le16(if_handle);
3727*4882a593Smuzhiyun 		req->perm_override = 0;
3728*4882a593Smuzhiyun 	} else {
3729*4882a593Smuzhiyun 		req->perm_override = 1;
3730*4882a593Smuzhiyun 	}
3731*4882a593Smuzhiyun 
3732*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
3733*4882a593Smuzhiyun 	if (!status) {
3734*4882a593Smuzhiyun 		struct be_cmd_resp_get_mac_list *resp =
3735*4882a593Smuzhiyun 						get_mac_list_cmd.va;
3736*4882a593Smuzhiyun 
3737*4882a593Smuzhiyun 		if (*pmac_id_valid) {
3738*4882a593Smuzhiyun 			memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
3739*4882a593Smuzhiyun 			       ETH_ALEN);
3740*4882a593Smuzhiyun 			goto out;
3741*4882a593Smuzhiyun 		}
3742*4882a593Smuzhiyun 
3743*4882a593Smuzhiyun 		mac_count = resp->true_mac_count + resp->pseudo_mac_count;
3744*4882a593Smuzhiyun 		/* Mac list returned could contain one or more active mac_ids
3745*4882a593Smuzhiyun 		 * or one or more true or pseudo permanent mac addresses.
3746*4882a593Smuzhiyun 		 * If an active mac_id is present, return first active mac_id
3747*4882a593Smuzhiyun 		 * found.
3748*4882a593Smuzhiyun 		 */
3749*4882a593Smuzhiyun 		for (i = 0; i < mac_count; i++) {
3750*4882a593Smuzhiyun 			struct get_list_macaddr *mac_entry;
3751*4882a593Smuzhiyun 			u16 mac_addr_size;
3752*4882a593Smuzhiyun 			u32 mac_id;
3753*4882a593Smuzhiyun 
3754*4882a593Smuzhiyun 			mac_entry = &resp->macaddr_list[i];
3755*4882a593Smuzhiyun 			mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
3756*4882a593Smuzhiyun 			/* mac_id is a 32 bit value and mac_addr size
3757*4882a593Smuzhiyun 			 * is 6 bytes
3758*4882a593Smuzhiyun 			 */
3759*4882a593Smuzhiyun 			if (mac_addr_size == sizeof(u32)) {
3760*4882a593Smuzhiyun 				*pmac_id_valid = true;
3761*4882a593Smuzhiyun 				mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
3762*4882a593Smuzhiyun 				*pmac_id = le32_to_cpu(mac_id);
3763*4882a593Smuzhiyun 				goto out;
3764*4882a593Smuzhiyun 			}
3765*4882a593Smuzhiyun 		}
3766*4882a593Smuzhiyun 		/* If no active mac_id found, return first mac addr */
3767*4882a593Smuzhiyun 		*pmac_id_valid = false;
3768*4882a593Smuzhiyun 		memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
3769*4882a593Smuzhiyun 		       ETH_ALEN);
3770*4882a593Smuzhiyun 	}
3771*4882a593Smuzhiyun 
3772*4882a593Smuzhiyun out:
3773*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
3774*4882a593Smuzhiyun 	dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
3775*4882a593Smuzhiyun 			  get_mac_list_cmd.va, get_mac_list_cmd.dma);
3776*4882a593Smuzhiyun 	return status;
3777*4882a593Smuzhiyun }
3778*4882a593Smuzhiyun 
be_cmd_get_active_mac(struct be_adapter * adapter,u32 curr_pmac_id,u8 * mac,u32 if_handle,bool active,u32 domain)3779*4882a593Smuzhiyun int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
3780*4882a593Smuzhiyun 			  u8 *mac, u32 if_handle, bool active, u32 domain)
3781*4882a593Smuzhiyun {
3782*4882a593Smuzhiyun 	if (!active)
3783*4882a593Smuzhiyun 		be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
3784*4882a593Smuzhiyun 					 if_handle, domain);
3785*4882a593Smuzhiyun 	if (BEx_chip(adapter))
3786*4882a593Smuzhiyun 		return be_cmd_mac_addr_query(adapter, mac, false,
3787*4882a593Smuzhiyun 					     if_handle, curr_pmac_id);
3788*4882a593Smuzhiyun 	else
3789*4882a593Smuzhiyun 		/* Fetch the MAC address using pmac_id */
3790*4882a593Smuzhiyun 		return be_cmd_get_mac_from_list(adapter, mac, &active,
3791*4882a593Smuzhiyun 						&curr_pmac_id,
3792*4882a593Smuzhiyun 						if_handle, domain);
3793*4882a593Smuzhiyun }
3794*4882a593Smuzhiyun 
be_cmd_get_perm_mac(struct be_adapter * adapter,u8 * mac)3795*4882a593Smuzhiyun int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
3796*4882a593Smuzhiyun {
3797*4882a593Smuzhiyun 	int status;
3798*4882a593Smuzhiyun 	bool pmac_valid = false;
3799*4882a593Smuzhiyun 
3800*4882a593Smuzhiyun 	eth_zero_addr(mac);
3801*4882a593Smuzhiyun 
3802*4882a593Smuzhiyun 	if (BEx_chip(adapter)) {
3803*4882a593Smuzhiyun 		if (be_physfn(adapter))
3804*4882a593Smuzhiyun 			status = be_cmd_mac_addr_query(adapter, mac, true, 0,
3805*4882a593Smuzhiyun 						       0);
3806*4882a593Smuzhiyun 		else
3807*4882a593Smuzhiyun 			status = be_cmd_mac_addr_query(adapter, mac, false,
3808*4882a593Smuzhiyun 						       adapter->if_handle, 0);
3809*4882a593Smuzhiyun 	} else {
3810*4882a593Smuzhiyun 		status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
3811*4882a593Smuzhiyun 						  NULL, adapter->if_handle, 0);
3812*4882a593Smuzhiyun 	}
3813*4882a593Smuzhiyun 
3814*4882a593Smuzhiyun 	return status;
3815*4882a593Smuzhiyun }
3816*4882a593Smuzhiyun 
3817*4882a593Smuzhiyun /* Uses synchronous MCCQ */
be_cmd_set_mac_list(struct be_adapter * adapter,u8 * mac_array,u8 mac_count,u32 domain)3818*4882a593Smuzhiyun int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
3819*4882a593Smuzhiyun 			u8 mac_count, u32 domain)
3820*4882a593Smuzhiyun {
3821*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
3822*4882a593Smuzhiyun 	struct be_cmd_req_set_mac_list *req;
3823*4882a593Smuzhiyun 	int status;
3824*4882a593Smuzhiyun 	struct be_dma_mem cmd;
3825*4882a593Smuzhiyun 
3826*4882a593Smuzhiyun 	memset(&cmd, 0, sizeof(struct be_dma_mem));
3827*4882a593Smuzhiyun 	cmd.size = sizeof(struct be_cmd_req_set_mac_list);
3828*4882a593Smuzhiyun 	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3829*4882a593Smuzhiyun 				    GFP_KERNEL);
3830*4882a593Smuzhiyun 	if (!cmd.va)
3831*4882a593Smuzhiyun 		return -ENOMEM;
3832*4882a593Smuzhiyun 
3833*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
3834*4882a593Smuzhiyun 
3835*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
3836*4882a593Smuzhiyun 	if (!wrb) {
3837*4882a593Smuzhiyun 		status = -EBUSY;
3838*4882a593Smuzhiyun 		goto err;
3839*4882a593Smuzhiyun 	}
3840*4882a593Smuzhiyun 
3841*4882a593Smuzhiyun 	req = cmd.va;
3842*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3843*4882a593Smuzhiyun 			       OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
3844*4882a593Smuzhiyun 			       wrb, &cmd);
3845*4882a593Smuzhiyun 
3846*4882a593Smuzhiyun 	req->hdr.domain = domain;
3847*4882a593Smuzhiyun 	req->mac_count = mac_count;
3848*4882a593Smuzhiyun 	if (mac_count)
3849*4882a593Smuzhiyun 		memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
3850*4882a593Smuzhiyun 
3851*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
3852*4882a593Smuzhiyun 
3853*4882a593Smuzhiyun err:
3854*4882a593Smuzhiyun 	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
3855*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
3856*4882a593Smuzhiyun 	return status;
3857*4882a593Smuzhiyun }
3858*4882a593Smuzhiyun 
3859*4882a593Smuzhiyun /* Wrapper to delete any active MACs and provision the new mac.
3860*4882a593Smuzhiyun  * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
3861*4882a593Smuzhiyun  * current list are active.
3862*4882a593Smuzhiyun  */
be_cmd_set_mac(struct be_adapter * adapter,u8 * mac,int if_id,u32 dom)3863*4882a593Smuzhiyun int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
3864*4882a593Smuzhiyun {
3865*4882a593Smuzhiyun 	bool active_mac = false;
3866*4882a593Smuzhiyun 	u8 old_mac[ETH_ALEN];
3867*4882a593Smuzhiyun 	u32 pmac_id;
3868*4882a593Smuzhiyun 	int status;
3869*4882a593Smuzhiyun 
3870*4882a593Smuzhiyun 	status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
3871*4882a593Smuzhiyun 					  &pmac_id, if_id, dom);
3872*4882a593Smuzhiyun 
3873*4882a593Smuzhiyun 	if (!status && active_mac)
3874*4882a593Smuzhiyun 		be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
3875*4882a593Smuzhiyun 
3876*4882a593Smuzhiyun 	return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
3877*4882a593Smuzhiyun }
3878*4882a593Smuzhiyun 
be_cmd_set_hsw_config(struct be_adapter * adapter,u16 pvid,u32 domain,u16 intf_id,u16 hsw_mode,u8 spoofchk)3879*4882a593Smuzhiyun int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
3880*4882a593Smuzhiyun 			  u32 domain, u16 intf_id, u16 hsw_mode, u8 spoofchk)
3881*4882a593Smuzhiyun {
3882*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
3883*4882a593Smuzhiyun 	struct be_cmd_req_set_hsw_config *req;
3884*4882a593Smuzhiyun 	void *ctxt;
3885*4882a593Smuzhiyun 	int status;
3886*4882a593Smuzhiyun 
3887*4882a593Smuzhiyun 	if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_HSW_CONFIG,
3888*4882a593Smuzhiyun 			    CMD_SUBSYSTEM_COMMON))
3889*4882a593Smuzhiyun 		return -EPERM;
3890*4882a593Smuzhiyun 
3891*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
3892*4882a593Smuzhiyun 
3893*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
3894*4882a593Smuzhiyun 	if (!wrb) {
3895*4882a593Smuzhiyun 		status = -EBUSY;
3896*4882a593Smuzhiyun 		goto err;
3897*4882a593Smuzhiyun 	}
3898*4882a593Smuzhiyun 
3899*4882a593Smuzhiyun 	req = embedded_payload(wrb);
3900*4882a593Smuzhiyun 	ctxt = &req->context;
3901*4882a593Smuzhiyun 
3902*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3903*4882a593Smuzhiyun 			       OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
3904*4882a593Smuzhiyun 			       NULL);
3905*4882a593Smuzhiyun 
3906*4882a593Smuzhiyun 	req->hdr.domain = domain;
3907*4882a593Smuzhiyun 	AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
3908*4882a593Smuzhiyun 	if (pvid) {
3909*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
3910*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
3911*4882a593Smuzhiyun 	}
3912*4882a593Smuzhiyun 	if (hsw_mode) {
3913*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
3914*4882a593Smuzhiyun 			      ctxt, adapter->hba_port_num);
3915*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
3916*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
3917*4882a593Smuzhiyun 			      ctxt, hsw_mode);
3918*4882a593Smuzhiyun 	}
3919*4882a593Smuzhiyun 
3920*4882a593Smuzhiyun 	/* Enable/disable both mac and vlan spoof checking */
3921*4882a593Smuzhiyun 	if (!BEx_chip(adapter) && spoofchk) {
3922*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_set_hsw_context, mac_spoofchk,
3923*4882a593Smuzhiyun 			      ctxt, spoofchk);
3924*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_set_hsw_context, vlan_spoofchk,
3925*4882a593Smuzhiyun 			      ctxt, spoofchk);
3926*4882a593Smuzhiyun 	}
3927*4882a593Smuzhiyun 
3928*4882a593Smuzhiyun 	be_dws_cpu_to_le(req->context, sizeof(req->context));
3929*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
3930*4882a593Smuzhiyun 
3931*4882a593Smuzhiyun err:
3932*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
3933*4882a593Smuzhiyun 	return status;
3934*4882a593Smuzhiyun }
3935*4882a593Smuzhiyun 
3936*4882a593Smuzhiyun /* Get Hyper switch config */
be_cmd_get_hsw_config(struct be_adapter * adapter,u16 * pvid,u32 domain,u16 intf_id,u8 * mode,bool * spoofchk)3937*4882a593Smuzhiyun int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
3938*4882a593Smuzhiyun 			  u32 domain, u16 intf_id, u8 *mode, bool *spoofchk)
3939*4882a593Smuzhiyun {
3940*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
3941*4882a593Smuzhiyun 	struct be_cmd_req_get_hsw_config *req;
3942*4882a593Smuzhiyun 	void *ctxt;
3943*4882a593Smuzhiyun 	int status;
3944*4882a593Smuzhiyun 	u16 vid;
3945*4882a593Smuzhiyun 
3946*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
3947*4882a593Smuzhiyun 
3948*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
3949*4882a593Smuzhiyun 	if (!wrb) {
3950*4882a593Smuzhiyun 		status = -EBUSY;
3951*4882a593Smuzhiyun 		goto err;
3952*4882a593Smuzhiyun 	}
3953*4882a593Smuzhiyun 
3954*4882a593Smuzhiyun 	req = embedded_payload(wrb);
3955*4882a593Smuzhiyun 	ctxt = &req->context;
3956*4882a593Smuzhiyun 
3957*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3958*4882a593Smuzhiyun 			       OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
3959*4882a593Smuzhiyun 			       NULL);
3960*4882a593Smuzhiyun 
3961*4882a593Smuzhiyun 	req->hdr.domain = domain;
3962*4882a593Smuzhiyun 	AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3963*4882a593Smuzhiyun 		      ctxt, intf_id);
3964*4882a593Smuzhiyun 	AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
3965*4882a593Smuzhiyun 
3966*4882a593Smuzhiyun 	if (!BEx_chip(adapter) && mode) {
3967*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3968*4882a593Smuzhiyun 			      ctxt, adapter->hba_port_num);
3969*4882a593Smuzhiyun 		AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
3970*4882a593Smuzhiyun 	}
3971*4882a593Smuzhiyun 	be_dws_cpu_to_le(req->context, sizeof(req->context));
3972*4882a593Smuzhiyun 
3973*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
3974*4882a593Smuzhiyun 	if (!status) {
3975*4882a593Smuzhiyun 		struct be_cmd_resp_get_hsw_config *resp =
3976*4882a593Smuzhiyun 						embedded_payload(wrb);
3977*4882a593Smuzhiyun 
3978*4882a593Smuzhiyun 		be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
3979*4882a593Smuzhiyun 		vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3980*4882a593Smuzhiyun 				    pvid, &resp->context);
3981*4882a593Smuzhiyun 		if (pvid)
3982*4882a593Smuzhiyun 			*pvid = le16_to_cpu(vid);
3983*4882a593Smuzhiyun 		if (mode)
3984*4882a593Smuzhiyun 			*mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3985*4882a593Smuzhiyun 					      port_fwd_type, &resp->context);
3986*4882a593Smuzhiyun 		if (spoofchk)
3987*4882a593Smuzhiyun 			*spoofchk =
3988*4882a593Smuzhiyun 				AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3989*4882a593Smuzhiyun 					      spoofchk, &resp->context);
3990*4882a593Smuzhiyun 	}
3991*4882a593Smuzhiyun 
3992*4882a593Smuzhiyun err:
3993*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
3994*4882a593Smuzhiyun 	return status;
3995*4882a593Smuzhiyun }
3996*4882a593Smuzhiyun 
be_is_wol_excluded(struct be_adapter * adapter)3997*4882a593Smuzhiyun static bool be_is_wol_excluded(struct be_adapter *adapter)
3998*4882a593Smuzhiyun {
3999*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
4000*4882a593Smuzhiyun 
4001*4882a593Smuzhiyun 	if (be_virtfn(adapter))
4002*4882a593Smuzhiyun 		return true;
4003*4882a593Smuzhiyun 
4004*4882a593Smuzhiyun 	switch (pdev->subsystem_device) {
4005*4882a593Smuzhiyun 	case OC_SUBSYS_DEVICE_ID1:
4006*4882a593Smuzhiyun 	case OC_SUBSYS_DEVICE_ID2:
4007*4882a593Smuzhiyun 	case OC_SUBSYS_DEVICE_ID3:
4008*4882a593Smuzhiyun 	case OC_SUBSYS_DEVICE_ID4:
4009*4882a593Smuzhiyun 		return true;
4010*4882a593Smuzhiyun 	default:
4011*4882a593Smuzhiyun 		return false;
4012*4882a593Smuzhiyun 	}
4013*4882a593Smuzhiyun }
4014*4882a593Smuzhiyun 
be_cmd_get_acpi_wol_cap(struct be_adapter * adapter)4015*4882a593Smuzhiyun int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
4016*4882a593Smuzhiyun {
4017*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
4018*4882a593Smuzhiyun 	struct be_cmd_req_acpi_wol_magic_config_v1 *req;
4019*4882a593Smuzhiyun 	int status = 0;
4020*4882a593Smuzhiyun 	struct be_dma_mem cmd;
4021*4882a593Smuzhiyun 
4022*4882a593Smuzhiyun 	if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
4023*4882a593Smuzhiyun 			    CMD_SUBSYSTEM_ETH))
4024*4882a593Smuzhiyun 		return -EPERM;
4025*4882a593Smuzhiyun 
4026*4882a593Smuzhiyun 	if (be_is_wol_excluded(adapter))
4027*4882a593Smuzhiyun 		return status;
4028*4882a593Smuzhiyun 
4029*4882a593Smuzhiyun 	if (mutex_lock_interruptible(&adapter->mbox_lock))
4030*4882a593Smuzhiyun 		return -1;
4031*4882a593Smuzhiyun 
4032*4882a593Smuzhiyun 	memset(&cmd, 0, sizeof(struct be_dma_mem));
4033*4882a593Smuzhiyun 	cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
4034*4882a593Smuzhiyun 	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4035*4882a593Smuzhiyun 				    GFP_ATOMIC);
4036*4882a593Smuzhiyun 	if (!cmd.va) {
4037*4882a593Smuzhiyun 		dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
4038*4882a593Smuzhiyun 		status = -ENOMEM;
4039*4882a593Smuzhiyun 		goto err;
4040*4882a593Smuzhiyun 	}
4041*4882a593Smuzhiyun 
4042*4882a593Smuzhiyun 	wrb = wrb_from_mbox(adapter);
4043*4882a593Smuzhiyun 	if (!wrb) {
4044*4882a593Smuzhiyun 		status = -EBUSY;
4045*4882a593Smuzhiyun 		goto err;
4046*4882a593Smuzhiyun 	}
4047*4882a593Smuzhiyun 
4048*4882a593Smuzhiyun 	req = cmd.va;
4049*4882a593Smuzhiyun 
4050*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
4051*4882a593Smuzhiyun 			       OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
4052*4882a593Smuzhiyun 			       sizeof(*req), wrb, &cmd);
4053*4882a593Smuzhiyun 
4054*4882a593Smuzhiyun 	req->hdr.version = 1;
4055*4882a593Smuzhiyun 	req->query_options = BE_GET_WOL_CAP;
4056*4882a593Smuzhiyun 
4057*4882a593Smuzhiyun 	status = be_mbox_notify_wait(adapter);
4058*4882a593Smuzhiyun 	if (!status) {
4059*4882a593Smuzhiyun 		struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
4060*4882a593Smuzhiyun 
4061*4882a593Smuzhiyun 		resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va;
4062*4882a593Smuzhiyun 
4063*4882a593Smuzhiyun 		adapter->wol_cap = resp->wol_settings;
4064*4882a593Smuzhiyun 
4065*4882a593Smuzhiyun 		/* Non-zero macaddr indicates WOL is enabled */
4066*4882a593Smuzhiyun 		if (adapter->wol_cap & BE_WOL_CAP &&
4067*4882a593Smuzhiyun 		    !is_zero_ether_addr(resp->magic_mac))
4068*4882a593Smuzhiyun 			adapter->wol_en = true;
4069*4882a593Smuzhiyun 	}
4070*4882a593Smuzhiyun err:
4071*4882a593Smuzhiyun 	mutex_unlock(&adapter->mbox_lock);
4072*4882a593Smuzhiyun 	if (cmd.va)
4073*4882a593Smuzhiyun 		dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
4074*4882a593Smuzhiyun 				  cmd.dma);
4075*4882a593Smuzhiyun 	return status;
4076*4882a593Smuzhiyun 
4077*4882a593Smuzhiyun }
4078*4882a593Smuzhiyun 
be_cmd_set_fw_log_level(struct be_adapter * adapter,u32 level)4079*4882a593Smuzhiyun int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
4080*4882a593Smuzhiyun {
4081*4882a593Smuzhiyun 	struct be_dma_mem extfat_cmd;
4082*4882a593Smuzhiyun 	struct be_fat_conf_params *cfgs;
4083*4882a593Smuzhiyun 	int status;
4084*4882a593Smuzhiyun 	int i, j;
4085*4882a593Smuzhiyun 
4086*4882a593Smuzhiyun 	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4087*4882a593Smuzhiyun 	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4088*4882a593Smuzhiyun 	extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
4089*4882a593Smuzhiyun 					   extfat_cmd.size, &extfat_cmd.dma,
4090*4882a593Smuzhiyun 					   GFP_ATOMIC);
4091*4882a593Smuzhiyun 	if (!extfat_cmd.va)
4092*4882a593Smuzhiyun 		return -ENOMEM;
4093*4882a593Smuzhiyun 
4094*4882a593Smuzhiyun 	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4095*4882a593Smuzhiyun 	if (status)
4096*4882a593Smuzhiyun 		goto err;
4097*4882a593Smuzhiyun 
4098*4882a593Smuzhiyun 	cfgs = (struct be_fat_conf_params *)
4099*4882a593Smuzhiyun 			(extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
4100*4882a593Smuzhiyun 	for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
4101*4882a593Smuzhiyun 		u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
4102*4882a593Smuzhiyun 
4103*4882a593Smuzhiyun 		for (j = 0; j < num_modes; j++) {
4104*4882a593Smuzhiyun 			if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
4105*4882a593Smuzhiyun 				cfgs->module[i].trace_lvl[j].dbg_lvl =
4106*4882a593Smuzhiyun 							cpu_to_le32(level);
4107*4882a593Smuzhiyun 		}
4108*4882a593Smuzhiyun 	}
4109*4882a593Smuzhiyun 
4110*4882a593Smuzhiyun 	status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
4111*4882a593Smuzhiyun err:
4112*4882a593Smuzhiyun 	dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
4113*4882a593Smuzhiyun 			  extfat_cmd.dma);
4114*4882a593Smuzhiyun 	return status;
4115*4882a593Smuzhiyun }
4116*4882a593Smuzhiyun 
be_cmd_get_fw_log_level(struct be_adapter * adapter)4117*4882a593Smuzhiyun int be_cmd_get_fw_log_level(struct be_adapter *adapter)
4118*4882a593Smuzhiyun {
4119*4882a593Smuzhiyun 	struct be_dma_mem extfat_cmd;
4120*4882a593Smuzhiyun 	struct be_fat_conf_params *cfgs;
4121*4882a593Smuzhiyun 	int status, j;
4122*4882a593Smuzhiyun 	int level = 0;
4123*4882a593Smuzhiyun 
4124*4882a593Smuzhiyun 	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4125*4882a593Smuzhiyun 	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4126*4882a593Smuzhiyun 	extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
4127*4882a593Smuzhiyun 					   extfat_cmd.size, &extfat_cmd.dma,
4128*4882a593Smuzhiyun 					   GFP_ATOMIC);
4129*4882a593Smuzhiyun 
4130*4882a593Smuzhiyun 	if (!extfat_cmd.va) {
4131*4882a593Smuzhiyun 		dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4132*4882a593Smuzhiyun 			__func__);
4133*4882a593Smuzhiyun 		goto err;
4134*4882a593Smuzhiyun 	}
4135*4882a593Smuzhiyun 
4136*4882a593Smuzhiyun 	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4137*4882a593Smuzhiyun 	if (!status) {
4138*4882a593Smuzhiyun 		cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4139*4882a593Smuzhiyun 						sizeof(struct be_cmd_resp_hdr));
4140*4882a593Smuzhiyun 
4141*4882a593Smuzhiyun 		for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4142*4882a593Smuzhiyun 			if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4143*4882a593Smuzhiyun 				level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4144*4882a593Smuzhiyun 		}
4145*4882a593Smuzhiyun 	}
4146*4882a593Smuzhiyun 	dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
4147*4882a593Smuzhiyun 			  extfat_cmd.dma);
4148*4882a593Smuzhiyun err:
4149*4882a593Smuzhiyun 	return level;
4150*4882a593Smuzhiyun }
4151*4882a593Smuzhiyun 
be_cmd_get_ext_fat_capabilites(struct be_adapter * adapter,struct be_dma_mem * cmd)4152*4882a593Smuzhiyun int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
4153*4882a593Smuzhiyun 				   struct be_dma_mem *cmd)
4154*4882a593Smuzhiyun {
4155*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
4156*4882a593Smuzhiyun 	struct be_cmd_req_get_ext_fat_caps *req;
4157*4882a593Smuzhiyun 	int status;
4158*4882a593Smuzhiyun 
4159*4882a593Smuzhiyun 	if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES,
4160*4882a593Smuzhiyun 			    CMD_SUBSYSTEM_COMMON))
4161*4882a593Smuzhiyun 		return -EPERM;
4162*4882a593Smuzhiyun 
4163*4882a593Smuzhiyun 	if (mutex_lock_interruptible(&adapter->mbox_lock))
4164*4882a593Smuzhiyun 		return -1;
4165*4882a593Smuzhiyun 
4166*4882a593Smuzhiyun 	wrb = wrb_from_mbox(adapter);
4167*4882a593Smuzhiyun 	if (!wrb) {
4168*4882a593Smuzhiyun 		status = -EBUSY;
4169*4882a593Smuzhiyun 		goto err;
4170*4882a593Smuzhiyun 	}
4171*4882a593Smuzhiyun 
4172*4882a593Smuzhiyun 	req = cmd->va;
4173*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4174*4882a593Smuzhiyun 			       OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES,
4175*4882a593Smuzhiyun 			       cmd->size, wrb, cmd);
4176*4882a593Smuzhiyun 	req->parameter_type = cpu_to_le32(1);
4177*4882a593Smuzhiyun 
4178*4882a593Smuzhiyun 	status = be_mbox_notify_wait(adapter);
4179*4882a593Smuzhiyun err:
4180*4882a593Smuzhiyun 	mutex_unlock(&adapter->mbox_lock);
4181*4882a593Smuzhiyun 	return status;
4182*4882a593Smuzhiyun }
4183*4882a593Smuzhiyun 
be_cmd_set_ext_fat_capabilites(struct be_adapter * adapter,struct be_dma_mem * cmd,struct be_fat_conf_params * configs)4184*4882a593Smuzhiyun int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
4185*4882a593Smuzhiyun 				   struct be_dma_mem *cmd,
4186*4882a593Smuzhiyun 				   struct be_fat_conf_params *configs)
4187*4882a593Smuzhiyun {
4188*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
4189*4882a593Smuzhiyun 	struct be_cmd_req_set_ext_fat_caps *req;
4190*4882a593Smuzhiyun 	int status;
4191*4882a593Smuzhiyun 
4192*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
4193*4882a593Smuzhiyun 
4194*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
4195*4882a593Smuzhiyun 	if (!wrb) {
4196*4882a593Smuzhiyun 		status = -EBUSY;
4197*4882a593Smuzhiyun 		goto err;
4198*4882a593Smuzhiyun 	}
4199*4882a593Smuzhiyun 
4200*4882a593Smuzhiyun 	req = cmd->va;
4201*4882a593Smuzhiyun 	memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
4202*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4203*4882a593Smuzhiyun 			       OPCODE_COMMON_SET_EXT_FAT_CAPABILITIES,
4204*4882a593Smuzhiyun 			       cmd->size, wrb, cmd);
4205*4882a593Smuzhiyun 
4206*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
4207*4882a593Smuzhiyun err:
4208*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
4209*4882a593Smuzhiyun 	return status;
4210*4882a593Smuzhiyun }
4211*4882a593Smuzhiyun 
be_cmd_query_port_name(struct be_adapter * adapter)4212*4882a593Smuzhiyun int be_cmd_query_port_name(struct be_adapter *adapter)
4213*4882a593Smuzhiyun {
4214*4882a593Smuzhiyun 	struct be_cmd_req_get_port_name *req;
4215*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
4216*4882a593Smuzhiyun 	int status;
4217*4882a593Smuzhiyun 
4218*4882a593Smuzhiyun 	if (mutex_lock_interruptible(&adapter->mbox_lock))
4219*4882a593Smuzhiyun 		return -1;
4220*4882a593Smuzhiyun 
4221*4882a593Smuzhiyun 	wrb = wrb_from_mbox(adapter);
4222*4882a593Smuzhiyun 	req = embedded_payload(wrb);
4223*4882a593Smuzhiyun 
4224*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4225*4882a593Smuzhiyun 			       OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
4226*4882a593Smuzhiyun 			       NULL);
4227*4882a593Smuzhiyun 	if (!BEx_chip(adapter))
4228*4882a593Smuzhiyun 		req->hdr.version = 1;
4229*4882a593Smuzhiyun 
4230*4882a593Smuzhiyun 	status = be_mbox_notify_wait(adapter);
4231*4882a593Smuzhiyun 	if (!status) {
4232*4882a593Smuzhiyun 		struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
4233*4882a593Smuzhiyun 
4234*4882a593Smuzhiyun 		adapter->port_name = resp->port_name[adapter->hba_port_num];
4235*4882a593Smuzhiyun 	} else {
4236*4882a593Smuzhiyun 		adapter->port_name = adapter->hba_port_num + '0';
4237*4882a593Smuzhiyun 	}
4238*4882a593Smuzhiyun 
4239*4882a593Smuzhiyun 	mutex_unlock(&adapter->mbox_lock);
4240*4882a593Smuzhiyun 	return status;
4241*4882a593Smuzhiyun }
4242*4882a593Smuzhiyun 
4243*4882a593Smuzhiyun /* When more than 1 NIC descriptor is present in the descriptor list,
4244*4882a593Smuzhiyun  * the caller must specify the pf_num to obtain the NIC descriptor
4245*4882a593Smuzhiyun  * corresponding to its pci function.
4246*4882a593Smuzhiyun  * get_vft must be true when the caller wants the VF-template desc of the
4247*4882a593Smuzhiyun  * PF-pool.
4248*4882a593Smuzhiyun  * The pf_num should be set to PF_NUM_IGNORE when the caller knows
4249*4882a593Smuzhiyun  * that only it's NIC descriptor is present in the descriptor list.
4250*4882a593Smuzhiyun  */
be_get_nic_desc(u8 * buf,u32 desc_count,bool get_vft,u8 pf_num)4251*4882a593Smuzhiyun static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
4252*4882a593Smuzhiyun 					       bool get_vft, u8 pf_num)
4253*4882a593Smuzhiyun {
4254*4882a593Smuzhiyun 	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
4255*4882a593Smuzhiyun 	struct be_nic_res_desc *nic;
4256*4882a593Smuzhiyun 	int i;
4257*4882a593Smuzhiyun 
4258*4882a593Smuzhiyun 	for (i = 0; i < desc_count; i++) {
4259*4882a593Smuzhiyun 		if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
4260*4882a593Smuzhiyun 		    hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) {
4261*4882a593Smuzhiyun 			nic = (struct be_nic_res_desc *)hdr;
4262*4882a593Smuzhiyun 
4263*4882a593Smuzhiyun 			if ((pf_num == PF_NUM_IGNORE ||
4264*4882a593Smuzhiyun 			     nic->pf_num == pf_num) &&
4265*4882a593Smuzhiyun 			    (!get_vft || nic->flags & BIT(VFT_SHIFT)))
4266*4882a593Smuzhiyun 				return nic;
4267*4882a593Smuzhiyun 		}
4268*4882a593Smuzhiyun 		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
4269*4882a593Smuzhiyun 		hdr = (void *)hdr + hdr->desc_len;
4270*4882a593Smuzhiyun 	}
4271*4882a593Smuzhiyun 	return NULL;
4272*4882a593Smuzhiyun }
4273*4882a593Smuzhiyun 
be_get_vft_desc(u8 * buf,u32 desc_count,u8 pf_num)4274*4882a593Smuzhiyun static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count,
4275*4882a593Smuzhiyun 					       u8 pf_num)
4276*4882a593Smuzhiyun {
4277*4882a593Smuzhiyun 	return be_get_nic_desc(buf, desc_count, true, pf_num);
4278*4882a593Smuzhiyun }
4279*4882a593Smuzhiyun 
be_get_func_nic_desc(u8 * buf,u32 desc_count,u8 pf_num)4280*4882a593Smuzhiyun static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count,
4281*4882a593Smuzhiyun 						    u8 pf_num)
4282*4882a593Smuzhiyun {
4283*4882a593Smuzhiyun 	return be_get_nic_desc(buf, desc_count, false, pf_num);
4284*4882a593Smuzhiyun }
4285*4882a593Smuzhiyun 
be_get_pcie_desc(u8 * buf,u32 desc_count,u8 pf_num)4286*4882a593Smuzhiyun static struct be_pcie_res_desc *be_get_pcie_desc(u8 *buf, u32 desc_count,
4287*4882a593Smuzhiyun 						 u8 pf_num)
4288*4882a593Smuzhiyun {
4289*4882a593Smuzhiyun 	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
4290*4882a593Smuzhiyun 	struct be_pcie_res_desc *pcie;
4291*4882a593Smuzhiyun 	int i;
4292*4882a593Smuzhiyun 
4293*4882a593Smuzhiyun 	for (i = 0; i < desc_count; i++) {
4294*4882a593Smuzhiyun 		if (hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
4295*4882a593Smuzhiyun 		    hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1) {
4296*4882a593Smuzhiyun 			pcie = (struct be_pcie_res_desc *)hdr;
4297*4882a593Smuzhiyun 			if (pcie->pf_num == pf_num)
4298*4882a593Smuzhiyun 				return pcie;
4299*4882a593Smuzhiyun 		}
4300*4882a593Smuzhiyun 
4301*4882a593Smuzhiyun 		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
4302*4882a593Smuzhiyun 		hdr = (void *)hdr + hdr->desc_len;
4303*4882a593Smuzhiyun 	}
4304*4882a593Smuzhiyun 	return NULL;
4305*4882a593Smuzhiyun }
4306*4882a593Smuzhiyun 
be_get_port_desc(u8 * buf,u32 desc_count)4307*4882a593Smuzhiyun static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
4308*4882a593Smuzhiyun {
4309*4882a593Smuzhiyun 	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
4310*4882a593Smuzhiyun 	int i;
4311*4882a593Smuzhiyun 
4312*4882a593Smuzhiyun 	for (i = 0; i < desc_count; i++) {
4313*4882a593Smuzhiyun 		if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
4314*4882a593Smuzhiyun 			return (struct be_port_res_desc *)hdr;
4315*4882a593Smuzhiyun 
4316*4882a593Smuzhiyun 		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
4317*4882a593Smuzhiyun 		hdr = (void *)hdr + hdr->desc_len;
4318*4882a593Smuzhiyun 	}
4319*4882a593Smuzhiyun 	return NULL;
4320*4882a593Smuzhiyun }
4321*4882a593Smuzhiyun 
be_copy_nic_desc(struct be_resources * res,struct be_nic_res_desc * desc)4322*4882a593Smuzhiyun static void be_copy_nic_desc(struct be_resources *res,
4323*4882a593Smuzhiyun 			     struct be_nic_res_desc *desc)
4324*4882a593Smuzhiyun {
4325*4882a593Smuzhiyun 	res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
4326*4882a593Smuzhiyun 	res->max_vlans = le16_to_cpu(desc->vlan_count);
4327*4882a593Smuzhiyun 	res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
4328*4882a593Smuzhiyun 	res->max_tx_qs = le16_to_cpu(desc->txq_count);
4329*4882a593Smuzhiyun 	res->max_rss_qs = le16_to_cpu(desc->rssq_count);
4330*4882a593Smuzhiyun 	res->max_rx_qs = le16_to_cpu(desc->rq_count);
4331*4882a593Smuzhiyun 	res->max_evt_qs = le16_to_cpu(desc->eq_count);
4332*4882a593Smuzhiyun 	res->max_cq_count = le16_to_cpu(desc->cq_count);
4333*4882a593Smuzhiyun 	res->max_iface_count = le16_to_cpu(desc->iface_count);
4334*4882a593Smuzhiyun 	res->max_mcc_count = le16_to_cpu(desc->mcc_count);
4335*4882a593Smuzhiyun 	/* Clear flags that driver is not interested in */
4336*4882a593Smuzhiyun 	res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
4337*4882a593Smuzhiyun 				BE_IF_CAP_FLAGS_WANT;
4338*4882a593Smuzhiyun }
4339*4882a593Smuzhiyun 
4340*4882a593Smuzhiyun /* Uses Mbox */
be_cmd_get_func_config(struct be_adapter * adapter,struct be_resources * res)4341*4882a593Smuzhiyun int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
4342*4882a593Smuzhiyun {
4343*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
4344*4882a593Smuzhiyun 	struct be_cmd_req_get_func_config *req;
4345*4882a593Smuzhiyun 	int status;
4346*4882a593Smuzhiyun 	struct be_dma_mem cmd;
4347*4882a593Smuzhiyun 
4348*4882a593Smuzhiyun 	if (mutex_lock_interruptible(&adapter->mbox_lock))
4349*4882a593Smuzhiyun 		return -1;
4350*4882a593Smuzhiyun 
4351*4882a593Smuzhiyun 	memset(&cmd, 0, sizeof(struct be_dma_mem));
4352*4882a593Smuzhiyun 	cmd.size = sizeof(struct be_cmd_resp_get_func_config);
4353*4882a593Smuzhiyun 	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4354*4882a593Smuzhiyun 				    GFP_ATOMIC);
4355*4882a593Smuzhiyun 	if (!cmd.va) {
4356*4882a593Smuzhiyun 		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
4357*4882a593Smuzhiyun 		status = -ENOMEM;
4358*4882a593Smuzhiyun 		goto err;
4359*4882a593Smuzhiyun 	}
4360*4882a593Smuzhiyun 
4361*4882a593Smuzhiyun 	wrb = wrb_from_mbox(adapter);
4362*4882a593Smuzhiyun 	if (!wrb) {
4363*4882a593Smuzhiyun 		status = -EBUSY;
4364*4882a593Smuzhiyun 		goto err;
4365*4882a593Smuzhiyun 	}
4366*4882a593Smuzhiyun 
4367*4882a593Smuzhiyun 	req = cmd.va;
4368*4882a593Smuzhiyun 
4369*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4370*4882a593Smuzhiyun 			       OPCODE_COMMON_GET_FUNC_CONFIG,
4371*4882a593Smuzhiyun 			       cmd.size, wrb, &cmd);
4372*4882a593Smuzhiyun 
4373*4882a593Smuzhiyun 	if (skyhawk_chip(adapter))
4374*4882a593Smuzhiyun 		req->hdr.version = 1;
4375*4882a593Smuzhiyun 
4376*4882a593Smuzhiyun 	status = be_mbox_notify_wait(adapter);
4377*4882a593Smuzhiyun 	if (!status) {
4378*4882a593Smuzhiyun 		struct be_cmd_resp_get_func_config *resp = cmd.va;
4379*4882a593Smuzhiyun 		u32 desc_count = le32_to_cpu(resp->desc_count);
4380*4882a593Smuzhiyun 		struct be_nic_res_desc *desc;
4381*4882a593Smuzhiyun 
4382*4882a593Smuzhiyun 		/* GET_FUNC_CONFIG returns resource descriptors of the
4383*4882a593Smuzhiyun 		 * current function only. So, pf_num should be set to
4384*4882a593Smuzhiyun 		 * PF_NUM_IGNORE.
4385*4882a593Smuzhiyun 		 */
4386*4882a593Smuzhiyun 		desc = be_get_func_nic_desc(resp->func_param, desc_count,
4387*4882a593Smuzhiyun 					    PF_NUM_IGNORE);
4388*4882a593Smuzhiyun 		if (!desc) {
4389*4882a593Smuzhiyun 			status = -EINVAL;
4390*4882a593Smuzhiyun 			goto err;
4391*4882a593Smuzhiyun 		}
4392*4882a593Smuzhiyun 
4393*4882a593Smuzhiyun 		/* Store pf_num & vf_num for later use in GET_PROFILE_CONFIG */
4394*4882a593Smuzhiyun 		adapter->pf_num = desc->pf_num;
4395*4882a593Smuzhiyun 		adapter->vf_num = desc->vf_num;
4396*4882a593Smuzhiyun 
4397*4882a593Smuzhiyun 		if (res)
4398*4882a593Smuzhiyun 			be_copy_nic_desc(res, desc);
4399*4882a593Smuzhiyun 	}
4400*4882a593Smuzhiyun err:
4401*4882a593Smuzhiyun 	mutex_unlock(&adapter->mbox_lock);
4402*4882a593Smuzhiyun 	if (cmd.va)
4403*4882a593Smuzhiyun 		dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
4404*4882a593Smuzhiyun 				  cmd.dma);
4405*4882a593Smuzhiyun 	return status;
4406*4882a593Smuzhiyun }
4407*4882a593Smuzhiyun 
4408*4882a593Smuzhiyun /* This routine returns a list of all the NIC PF_nums in the adapter */
be_get_nic_pf_num_list(u8 * buf,u32 desc_count,u16 * nic_pf_nums)4409*4882a593Smuzhiyun static u16 be_get_nic_pf_num_list(u8 *buf, u32 desc_count, u16 *nic_pf_nums)
4410*4882a593Smuzhiyun {
4411*4882a593Smuzhiyun 	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
4412*4882a593Smuzhiyun 	struct be_pcie_res_desc *pcie = NULL;
4413*4882a593Smuzhiyun 	int i;
4414*4882a593Smuzhiyun 	u16 nic_pf_count = 0;
4415*4882a593Smuzhiyun 
4416*4882a593Smuzhiyun 	for (i = 0; i < desc_count; i++) {
4417*4882a593Smuzhiyun 		if (hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
4418*4882a593Smuzhiyun 		    hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1) {
4419*4882a593Smuzhiyun 			pcie = (struct be_pcie_res_desc *)hdr;
4420*4882a593Smuzhiyun 			if (pcie->pf_state && (pcie->pf_type == MISSION_NIC ||
4421*4882a593Smuzhiyun 					       pcie->pf_type == MISSION_RDMA)) {
4422*4882a593Smuzhiyun 				nic_pf_nums[nic_pf_count++] = pcie->pf_num;
4423*4882a593Smuzhiyun 			}
4424*4882a593Smuzhiyun 		}
4425*4882a593Smuzhiyun 
4426*4882a593Smuzhiyun 		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
4427*4882a593Smuzhiyun 		hdr = (void *)hdr + hdr->desc_len;
4428*4882a593Smuzhiyun 	}
4429*4882a593Smuzhiyun 	return nic_pf_count;
4430*4882a593Smuzhiyun }
4431*4882a593Smuzhiyun 
4432*4882a593Smuzhiyun /* Will use MBOX only if MCCQ has not been created */
be_cmd_get_profile_config(struct be_adapter * adapter,struct be_resources * res,struct be_port_resources * port_res,u8 profile_type,u8 query,u8 domain)4433*4882a593Smuzhiyun int be_cmd_get_profile_config(struct be_adapter *adapter,
4434*4882a593Smuzhiyun 			      struct be_resources *res,
4435*4882a593Smuzhiyun 			      struct be_port_resources *port_res,
4436*4882a593Smuzhiyun 			      u8 profile_type, u8 query, u8 domain)
4437*4882a593Smuzhiyun {
4438*4882a593Smuzhiyun 	struct be_cmd_resp_get_profile_config *resp;
4439*4882a593Smuzhiyun 	struct be_cmd_req_get_profile_config *req;
4440*4882a593Smuzhiyun 	struct be_nic_res_desc *vf_res;
4441*4882a593Smuzhiyun 	struct be_pcie_res_desc *pcie;
4442*4882a593Smuzhiyun 	struct be_port_res_desc *port;
4443*4882a593Smuzhiyun 	struct be_nic_res_desc *nic;
4444*4882a593Smuzhiyun 	struct be_mcc_wrb wrb = {0};
4445*4882a593Smuzhiyun 	struct be_dma_mem cmd;
4446*4882a593Smuzhiyun 	u16 desc_count;
4447*4882a593Smuzhiyun 	int status;
4448*4882a593Smuzhiyun 
4449*4882a593Smuzhiyun 	memset(&cmd, 0, sizeof(struct be_dma_mem));
4450*4882a593Smuzhiyun 	cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
4451*4882a593Smuzhiyun 	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4452*4882a593Smuzhiyun 				    GFP_ATOMIC);
4453*4882a593Smuzhiyun 	if (!cmd.va)
4454*4882a593Smuzhiyun 		return -ENOMEM;
4455*4882a593Smuzhiyun 
4456*4882a593Smuzhiyun 	req = cmd.va;
4457*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4458*4882a593Smuzhiyun 			       OPCODE_COMMON_GET_PROFILE_CONFIG,
4459*4882a593Smuzhiyun 			       cmd.size, &wrb, &cmd);
4460*4882a593Smuzhiyun 
4461*4882a593Smuzhiyun 	if (!lancer_chip(adapter))
4462*4882a593Smuzhiyun 		req->hdr.version = 1;
4463*4882a593Smuzhiyun 	req->type = profile_type;
4464*4882a593Smuzhiyun 	req->hdr.domain = domain;
4465*4882a593Smuzhiyun 
4466*4882a593Smuzhiyun 	/* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
4467*4882a593Smuzhiyun 	 * descriptors with all bits set to "1" for the fields which can be
4468*4882a593Smuzhiyun 	 * modified using SET_PROFILE_CONFIG cmd.
4469*4882a593Smuzhiyun 	 */
4470*4882a593Smuzhiyun 	if (query == RESOURCE_MODIFIABLE)
4471*4882a593Smuzhiyun 		req->type |= QUERY_MODIFIABLE_FIELDS_TYPE;
4472*4882a593Smuzhiyun 
4473*4882a593Smuzhiyun 	status = be_cmd_notify_wait(adapter, &wrb);
4474*4882a593Smuzhiyun 	if (status)
4475*4882a593Smuzhiyun 		goto err;
4476*4882a593Smuzhiyun 
4477*4882a593Smuzhiyun 	resp = cmd.va;
4478*4882a593Smuzhiyun 	desc_count = le16_to_cpu(resp->desc_count);
4479*4882a593Smuzhiyun 
4480*4882a593Smuzhiyun 	if (port_res) {
4481*4882a593Smuzhiyun 		u16 nic_pf_cnt = 0, i;
4482*4882a593Smuzhiyun 		u16 nic_pf_num_list[MAX_NIC_FUNCS];
4483*4882a593Smuzhiyun 
4484*4882a593Smuzhiyun 		nic_pf_cnt = be_get_nic_pf_num_list(resp->func_param,
4485*4882a593Smuzhiyun 						    desc_count,
4486*4882a593Smuzhiyun 						    nic_pf_num_list);
4487*4882a593Smuzhiyun 
4488*4882a593Smuzhiyun 		for (i = 0; i < nic_pf_cnt; i++) {
4489*4882a593Smuzhiyun 			nic = be_get_func_nic_desc(resp->func_param, desc_count,
4490*4882a593Smuzhiyun 						   nic_pf_num_list[i]);
4491*4882a593Smuzhiyun 			if (nic->link_param == adapter->port_num) {
4492*4882a593Smuzhiyun 				port_res->nic_pfs++;
4493*4882a593Smuzhiyun 				pcie = be_get_pcie_desc(resp->func_param,
4494*4882a593Smuzhiyun 							desc_count,
4495*4882a593Smuzhiyun 							nic_pf_num_list[i]);
4496*4882a593Smuzhiyun 				port_res->max_vfs += le16_to_cpu(pcie->num_vfs);
4497*4882a593Smuzhiyun 			}
4498*4882a593Smuzhiyun 		}
4499*4882a593Smuzhiyun 		goto err;
4500*4882a593Smuzhiyun 	}
4501*4882a593Smuzhiyun 
4502*4882a593Smuzhiyun 	pcie = be_get_pcie_desc(resp->func_param, desc_count,
4503*4882a593Smuzhiyun 				adapter->pf_num);
4504*4882a593Smuzhiyun 	if (pcie)
4505*4882a593Smuzhiyun 		res->max_vfs = le16_to_cpu(pcie->num_vfs);
4506*4882a593Smuzhiyun 
4507*4882a593Smuzhiyun 	port = be_get_port_desc(resp->func_param, desc_count);
4508*4882a593Smuzhiyun 	if (port)
4509*4882a593Smuzhiyun 		adapter->mc_type = port->mc_type;
4510*4882a593Smuzhiyun 
4511*4882a593Smuzhiyun 	nic = be_get_func_nic_desc(resp->func_param, desc_count,
4512*4882a593Smuzhiyun 				   adapter->pf_num);
4513*4882a593Smuzhiyun 	if (nic)
4514*4882a593Smuzhiyun 		be_copy_nic_desc(res, nic);
4515*4882a593Smuzhiyun 
4516*4882a593Smuzhiyun 	vf_res = be_get_vft_desc(resp->func_param, desc_count,
4517*4882a593Smuzhiyun 				 adapter->pf_num);
4518*4882a593Smuzhiyun 	if (vf_res)
4519*4882a593Smuzhiyun 		res->vf_if_cap_flags = vf_res->cap_flags;
4520*4882a593Smuzhiyun err:
4521*4882a593Smuzhiyun 	if (cmd.va)
4522*4882a593Smuzhiyun 		dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
4523*4882a593Smuzhiyun 				  cmd.dma);
4524*4882a593Smuzhiyun 	return status;
4525*4882a593Smuzhiyun }
4526*4882a593Smuzhiyun 
4527*4882a593Smuzhiyun /* Will use MBOX only if MCCQ has not been created */
be_cmd_set_profile_config(struct be_adapter * adapter,void * desc,int size,int count,u8 version,u8 domain)4528*4882a593Smuzhiyun static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
4529*4882a593Smuzhiyun 				     int size, int count, u8 version, u8 domain)
4530*4882a593Smuzhiyun {
4531*4882a593Smuzhiyun 	struct be_cmd_req_set_profile_config *req;
4532*4882a593Smuzhiyun 	struct be_mcc_wrb wrb = {0};
4533*4882a593Smuzhiyun 	struct be_dma_mem cmd;
4534*4882a593Smuzhiyun 	int status;
4535*4882a593Smuzhiyun 
4536*4882a593Smuzhiyun 	memset(&cmd, 0, sizeof(struct be_dma_mem));
4537*4882a593Smuzhiyun 	cmd.size = sizeof(struct be_cmd_req_set_profile_config);
4538*4882a593Smuzhiyun 	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4539*4882a593Smuzhiyun 				    GFP_ATOMIC);
4540*4882a593Smuzhiyun 	if (!cmd.va)
4541*4882a593Smuzhiyun 		return -ENOMEM;
4542*4882a593Smuzhiyun 
4543*4882a593Smuzhiyun 	req = cmd.va;
4544*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4545*4882a593Smuzhiyun 			       OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size,
4546*4882a593Smuzhiyun 			       &wrb, &cmd);
4547*4882a593Smuzhiyun 	req->hdr.version = version;
4548*4882a593Smuzhiyun 	req->hdr.domain = domain;
4549*4882a593Smuzhiyun 	req->desc_count = cpu_to_le32(count);
4550*4882a593Smuzhiyun 	memcpy(req->desc, desc, size);
4551*4882a593Smuzhiyun 
4552*4882a593Smuzhiyun 	status = be_cmd_notify_wait(adapter, &wrb);
4553*4882a593Smuzhiyun 
4554*4882a593Smuzhiyun 	if (cmd.va)
4555*4882a593Smuzhiyun 		dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
4556*4882a593Smuzhiyun 				  cmd.dma);
4557*4882a593Smuzhiyun 	return status;
4558*4882a593Smuzhiyun }
4559*4882a593Smuzhiyun 
4560*4882a593Smuzhiyun /* Mark all fields invalid */
be_reset_nic_desc(struct be_nic_res_desc * nic)4561*4882a593Smuzhiyun static void be_reset_nic_desc(struct be_nic_res_desc *nic)
4562*4882a593Smuzhiyun {
4563*4882a593Smuzhiyun 	memset(nic, 0, sizeof(*nic));
4564*4882a593Smuzhiyun 	nic->unicast_mac_count = 0xFFFF;
4565*4882a593Smuzhiyun 	nic->mcc_count = 0xFFFF;
4566*4882a593Smuzhiyun 	nic->vlan_count = 0xFFFF;
4567*4882a593Smuzhiyun 	nic->mcast_mac_count = 0xFFFF;
4568*4882a593Smuzhiyun 	nic->txq_count = 0xFFFF;
4569*4882a593Smuzhiyun 	nic->rq_count = 0xFFFF;
4570*4882a593Smuzhiyun 	nic->rssq_count = 0xFFFF;
4571*4882a593Smuzhiyun 	nic->lro_count = 0xFFFF;
4572*4882a593Smuzhiyun 	nic->cq_count = 0xFFFF;
4573*4882a593Smuzhiyun 	nic->toe_conn_count = 0xFFFF;
4574*4882a593Smuzhiyun 	nic->eq_count = 0xFFFF;
4575*4882a593Smuzhiyun 	nic->iface_count = 0xFFFF;
4576*4882a593Smuzhiyun 	nic->link_param = 0xFF;
4577*4882a593Smuzhiyun 	nic->channel_id_param = cpu_to_le16(0xF000);
4578*4882a593Smuzhiyun 	nic->acpi_params = 0xFF;
4579*4882a593Smuzhiyun 	nic->wol_param = 0x0F;
4580*4882a593Smuzhiyun 	nic->tunnel_iface_count = 0xFFFF;
4581*4882a593Smuzhiyun 	nic->direct_tenant_iface_count = 0xFFFF;
4582*4882a593Smuzhiyun 	nic->bw_min = 0xFFFFFFFF;
4583*4882a593Smuzhiyun 	nic->bw_max = 0xFFFFFFFF;
4584*4882a593Smuzhiyun }
4585*4882a593Smuzhiyun 
4586*4882a593Smuzhiyun /* Mark all fields invalid */
be_reset_pcie_desc(struct be_pcie_res_desc * pcie)4587*4882a593Smuzhiyun static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie)
4588*4882a593Smuzhiyun {
4589*4882a593Smuzhiyun 	memset(pcie, 0, sizeof(*pcie));
4590*4882a593Smuzhiyun 	pcie->sriov_state = 0xFF;
4591*4882a593Smuzhiyun 	pcie->pf_state = 0xFF;
4592*4882a593Smuzhiyun 	pcie->pf_type = 0xFF;
4593*4882a593Smuzhiyun 	pcie->num_vfs = 0xFFFF;
4594*4882a593Smuzhiyun }
4595*4882a593Smuzhiyun 
be_cmd_config_qos(struct be_adapter * adapter,u32 max_rate,u16 link_speed,u8 domain)4596*4882a593Smuzhiyun int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
4597*4882a593Smuzhiyun 		      u8 domain)
4598*4882a593Smuzhiyun {
4599*4882a593Smuzhiyun 	struct be_nic_res_desc nic_desc;
4600*4882a593Smuzhiyun 	u32 bw_percent;
4601*4882a593Smuzhiyun 	u16 version = 0;
4602*4882a593Smuzhiyun 
4603*4882a593Smuzhiyun 	if (BE3_chip(adapter))
4604*4882a593Smuzhiyun 		return be_cmd_set_qos(adapter, max_rate / 10, domain);
4605*4882a593Smuzhiyun 
4606*4882a593Smuzhiyun 	be_reset_nic_desc(&nic_desc);
4607*4882a593Smuzhiyun 	nic_desc.pf_num = adapter->pf_num;
4608*4882a593Smuzhiyun 	nic_desc.vf_num = domain;
4609*4882a593Smuzhiyun 	nic_desc.bw_min = 0;
4610*4882a593Smuzhiyun 	if (lancer_chip(adapter)) {
4611*4882a593Smuzhiyun 		nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
4612*4882a593Smuzhiyun 		nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
4613*4882a593Smuzhiyun 		nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
4614*4882a593Smuzhiyun 					(1 << NOSV_SHIFT);
4615*4882a593Smuzhiyun 		nic_desc.bw_max = cpu_to_le32(max_rate / 10);
4616*4882a593Smuzhiyun 	} else {
4617*4882a593Smuzhiyun 		version = 1;
4618*4882a593Smuzhiyun 		nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
4619*4882a593Smuzhiyun 		nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
4620*4882a593Smuzhiyun 		nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
4621*4882a593Smuzhiyun 		bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
4622*4882a593Smuzhiyun 		nic_desc.bw_max = cpu_to_le32(bw_percent);
4623*4882a593Smuzhiyun 	}
4624*4882a593Smuzhiyun 
4625*4882a593Smuzhiyun 	return be_cmd_set_profile_config(adapter, &nic_desc,
4626*4882a593Smuzhiyun 					 nic_desc.hdr.desc_len,
4627*4882a593Smuzhiyun 					 1, version, domain);
4628*4882a593Smuzhiyun }
4629*4882a593Smuzhiyun 
be_cmd_set_sriov_config(struct be_adapter * adapter,struct be_resources pool_res,u16 num_vfs,struct be_resources * vft_res)4630*4882a593Smuzhiyun int be_cmd_set_sriov_config(struct be_adapter *adapter,
4631*4882a593Smuzhiyun 			    struct be_resources pool_res, u16 num_vfs,
4632*4882a593Smuzhiyun 			    struct be_resources *vft_res)
4633*4882a593Smuzhiyun {
4634*4882a593Smuzhiyun 	struct {
4635*4882a593Smuzhiyun 		struct be_pcie_res_desc pcie;
4636*4882a593Smuzhiyun 		struct be_nic_res_desc nic_vft;
4637*4882a593Smuzhiyun 	} __packed desc;
4638*4882a593Smuzhiyun 
4639*4882a593Smuzhiyun 	/* PF PCIE descriptor */
4640*4882a593Smuzhiyun 	be_reset_pcie_desc(&desc.pcie);
4641*4882a593Smuzhiyun 	desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1;
4642*4882a593Smuzhiyun 	desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
4643*4882a593Smuzhiyun 	desc.pcie.flags = BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
4644*4882a593Smuzhiyun 	desc.pcie.pf_num = adapter->pdev->devfn;
4645*4882a593Smuzhiyun 	desc.pcie.sriov_state = num_vfs ? 1 : 0;
4646*4882a593Smuzhiyun 	desc.pcie.num_vfs = cpu_to_le16(num_vfs);
4647*4882a593Smuzhiyun 
4648*4882a593Smuzhiyun 	/* VF NIC Template descriptor */
4649*4882a593Smuzhiyun 	be_reset_nic_desc(&desc.nic_vft);
4650*4882a593Smuzhiyun 	desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
4651*4882a593Smuzhiyun 	desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
4652*4882a593Smuzhiyun 	desc.nic_vft.flags = vft_res->flags | BIT(VFT_SHIFT) |
4653*4882a593Smuzhiyun 			     BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
4654*4882a593Smuzhiyun 	desc.nic_vft.pf_num = adapter->pdev->devfn;
4655*4882a593Smuzhiyun 	desc.nic_vft.vf_num = 0;
4656*4882a593Smuzhiyun 	desc.nic_vft.cap_flags = cpu_to_le32(vft_res->vf_if_cap_flags);
4657*4882a593Smuzhiyun 	desc.nic_vft.rq_count = cpu_to_le16(vft_res->max_rx_qs);
4658*4882a593Smuzhiyun 	desc.nic_vft.txq_count = cpu_to_le16(vft_res->max_tx_qs);
4659*4882a593Smuzhiyun 	desc.nic_vft.rssq_count = cpu_to_le16(vft_res->max_rss_qs);
4660*4882a593Smuzhiyun 	desc.nic_vft.cq_count = cpu_to_le16(vft_res->max_cq_count);
4661*4882a593Smuzhiyun 
4662*4882a593Smuzhiyun 	if (vft_res->max_uc_mac)
4663*4882a593Smuzhiyun 		desc.nic_vft.unicast_mac_count =
4664*4882a593Smuzhiyun 					cpu_to_le16(vft_res->max_uc_mac);
4665*4882a593Smuzhiyun 	if (vft_res->max_vlans)
4666*4882a593Smuzhiyun 		desc.nic_vft.vlan_count = cpu_to_le16(vft_res->max_vlans);
4667*4882a593Smuzhiyun 	if (vft_res->max_iface_count)
4668*4882a593Smuzhiyun 		desc.nic_vft.iface_count =
4669*4882a593Smuzhiyun 				cpu_to_le16(vft_res->max_iface_count);
4670*4882a593Smuzhiyun 	if (vft_res->max_mcc_count)
4671*4882a593Smuzhiyun 		desc.nic_vft.mcc_count = cpu_to_le16(vft_res->max_mcc_count);
4672*4882a593Smuzhiyun 
4673*4882a593Smuzhiyun 	return be_cmd_set_profile_config(adapter, &desc,
4674*4882a593Smuzhiyun 					 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
4675*4882a593Smuzhiyun }
4676*4882a593Smuzhiyun 
be_cmd_manage_iface(struct be_adapter * adapter,u32 iface,u8 op)4677*4882a593Smuzhiyun int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
4678*4882a593Smuzhiyun {
4679*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
4680*4882a593Smuzhiyun 	struct be_cmd_req_manage_iface_filters *req;
4681*4882a593Smuzhiyun 	int status;
4682*4882a593Smuzhiyun 
4683*4882a593Smuzhiyun 	if (iface == 0xFFFFFFFF)
4684*4882a593Smuzhiyun 		return -1;
4685*4882a593Smuzhiyun 
4686*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
4687*4882a593Smuzhiyun 
4688*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
4689*4882a593Smuzhiyun 	if (!wrb) {
4690*4882a593Smuzhiyun 		status = -EBUSY;
4691*4882a593Smuzhiyun 		goto err;
4692*4882a593Smuzhiyun 	}
4693*4882a593Smuzhiyun 	req = embedded_payload(wrb);
4694*4882a593Smuzhiyun 
4695*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4696*4882a593Smuzhiyun 			       OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req),
4697*4882a593Smuzhiyun 			       wrb, NULL);
4698*4882a593Smuzhiyun 	req->op = op;
4699*4882a593Smuzhiyun 	req->target_iface_id = cpu_to_le32(iface);
4700*4882a593Smuzhiyun 
4701*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
4702*4882a593Smuzhiyun err:
4703*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
4704*4882a593Smuzhiyun 	return status;
4705*4882a593Smuzhiyun }
4706*4882a593Smuzhiyun 
be_cmd_set_vxlan_port(struct be_adapter * adapter,__be16 port)4707*4882a593Smuzhiyun int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
4708*4882a593Smuzhiyun {
4709*4882a593Smuzhiyun 	struct be_port_res_desc port_desc;
4710*4882a593Smuzhiyun 
4711*4882a593Smuzhiyun 	memset(&port_desc, 0, sizeof(port_desc));
4712*4882a593Smuzhiyun 	port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1;
4713*4882a593Smuzhiyun 	port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
4714*4882a593Smuzhiyun 	port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
4715*4882a593Smuzhiyun 	port_desc.link_num = adapter->hba_port_num;
4716*4882a593Smuzhiyun 	if (port) {
4717*4882a593Smuzhiyun 		port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) |
4718*4882a593Smuzhiyun 					(1 << RCVID_SHIFT);
4719*4882a593Smuzhiyun 		port_desc.nv_port = swab16(port);
4720*4882a593Smuzhiyun 	} else {
4721*4882a593Smuzhiyun 		port_desc.nv_flags = NV_TYPE_DISABLED;
4722*4882a593Smuzhiyun 		port_desc.nv_port = 0;
4723*4882a593Smuzhiyun 	}
4724*4882a593Smuzhiyun 
4725*4882a593Smuzhiyun 	return be_cmd_set_profile_config(adapter, &port_desc,
4726*4882a593Smuzhiyun 					 RESOURCE_DESC_SIZE_V1, 1, 1, 0);
4727*4882a593Smuzhiyun }
4728*4882a593Smuzhiyun 
be_cmd_get_if_id(struct be_adapter * adapter,struct be_vf_cfg * vf_cfg,int vf_num)4729*4882a593Smuzhiyun int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
4730*4882a593Smuzhiyun 		     int vf_num)
4731*4882a593Smuzhiyun {
4732*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
4733*4882a593Smuzhiyun 	struct be_cmd_req_get_iface_list *req;
4734*4882a593Smuzhiyun 	struct be_cmd_resp_get_iface_list *resp;
4735*4882a593Smuzhiyun 	int status;
4736*4882a593Smuzhiyun 
4737*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
4738*4882a593Smuzhiyun 
4739*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
4740*4882a593Smuzhiyun 	if (!wrb) {
4741*4882a593Smuzhiyun 		status = -EBUSY;
4742*4882a593Smuzhiyun 		goto err;
4743*4882a593Smuzhiyun 	}
4744*4882a593Smuzhiyun 	req = embedded_payload(wrb);
4745*4882a593Smuzhiyun 
4746*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4747*4882a593Smuzhiyun 			       OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
4748*4882a593Smuzhiyun 			       wrb, NULL);
4749*4882a593Smuzhiyun 	req->hdr.domain = vf_num + 1;
4750*4882a593Smuzhiyun 
4751*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
4752*4882a593Smuzhiyun 	if (!status) {
4753*4882a593Smuzhiyun 		resp = (struct be_cmd_resp_get_iface_list *)req;
4754*4882a593Smuzhiyun 		vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
4755*4882a593Smuzhiyun 	}
4756*4882a593Smuzhiyun 
4757*4882a593Smuzhiyun err:
4758*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
4759*4882a593Smuzhiyun 	return status;
4760*4882a593Smuzhiyun }
4761*4882a593Smuzhiyun 
lancer_wait_idle(struct be_adapter * adapter)4762*4882a593Smuzhiyun static int lancer_wait_idle(struct be_adapter *adapter)
4763*4882a593Smuzhiyun {
4764*4882a593Smuzhiyun #define SLIPORT_IDLE_TIMEOUT 30
4765*4882a593Smuzhiyun 	u32 reg_val;
4766*4882a593Smuzhiyun 	int status = 0, i;
4767*4882a593Smuzhiyun 
4768*4882a593Smuzhiyun 	for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
4769*4882a593Smuzhiyun 		reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
4770*4882a593Smuzhiyun 		if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
4771*4882a593Smuzhiyun 			break;
4772*4882a593Smuzhiyun 
4773*4882a593Smuzhiyun 		ssleep(1);
4774*4882a593Smuzhiyun 	}
4775*4882a593Smuzhiyun 
4776*4882a593Smuzhiyun 	if (i == SLIPORT_IDLE_TIMEOUT)
4777*4882a593Smuzhiyun 		status = -1;
4778*4882a593Smuzhiyun 
4779*4882a593Smuzhiyun 	return status;
4780*4882a593Smuzhiyun }
4781*4882a593Smuzhiyun 
lancer_physdev_ctrl(struct be_adapter * adapter,u32 mask)4782*4882a593Smuzhiyun int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
4783*4882a593Smuzhiyun {
4784*4882a593Smuzhiyun 	int status = 0;
4785*4882a593Smuzhiyun 
4786*4882a593Smuzhiyun 	status = lancer_wait_idle(adapter);
4787*4882a593Smuzhiyun 	if (status)
4788*4882a593Smuzhiyun 		return status;
4789*4882a593Smuzhiyun 
4790*4882a593Smuzhiyun 	iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);
4791*4882a593Smuzhiyun 
4792*4882a593Smuzhiyun 	return status;
4793*4882a593Smuzhiyun }
4794*4882a593Smuzhiyun 
4795*4882a593Smuzhiyun /* Routine to check whether dump image is present or not */
dump_present(struct be_adapter * adapter)4796*4882a593Smuzhiyun bool dump_present(struct be_adapter *adapter)
4797*4882a593Smuzhiyun {
4798*4882a593Smuzhiyun 	u32 sliport_status = 0;
4799*4882a593Smuzhiyun 
4800*4882a593Smuzhiyun 	sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
4801*4882a593Smuzhiyun 	return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
4802*4882a593Smuzhiyun }
4803*4882a593Smuzhiyun 
lancer_initiate_dump(struct be_adapter * adapter)4804*4882a593Smuzhiyun int lancer_initiate_dump(struct be_adapter *adapter)
4805*4882a593Smuzhiyun {
4806*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
4807*4882a593Smuzhiyun 	int status;
4808*4882a593Smuzhiyun 
4809*4882a593Smuzhiyun 	if (dump_present(adapter)) {
4810*4882a593Smuzhiyun 		dev_info(dev, "Previous dump not cleared, not forcing dump\n");
4811*4882a593Smuzhiyun 		return -EEXIST;
4812*4882a593Smuzhiyun 	}
4813*4882a593Smuzhiyun 
4814*4882a593Smuzhiyun 	/* give firmware reset and diagnostic dump */
4815*4882a593Smuzhiyun 	status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
4816*4882a593Smuzhiyun 				     PHYSDEV_CONTROL_DD_MASK);
4817*4882a593Smuzhiyun 	if (status < 0) {
4818*4882a593Smuzhiyun 		dev_err(dev, "FW reset failed\n");
4819*4882a593Smuzhiyun 		return status;
4820*4882a593Smuzhiyun 	}
4821*4882a593Smuzhiyun 
4822*4882a593Smuzhiyun 	status = lancer_wait_idle(adapter);
4823*4882a593Smuzhiyun 	if (status)
4824*4882a593Smuzhiyun 		return status;
4825*4882a593Smuzhiyun 
4826*4882a593Smuzhiyun 	if (!dump_present(adapter)) {
4827*4882a593Smuzhiyun 		dev_err(dev, "FW dump not generated\n");
4828*4882a593Smuzhiyun 		return -EIO;
4829*4882a593Smuzhiyun 	}
4830*4882a593Smuzhiyun 
4831*4882a593Smuzhiyun 	return 0;
4832*4882a593Smuzhiyun }
4833*4882a593Smuzhiyun 
lancer_delete_dump(struct be_adapter * adapter)4834*4882a593Smuzhiyun int lancer_delete_dump(struct be_adapter *adapter)
4835*4882a593Smuzhiyun {
4836*4882a593Smuzhiyun 	int status;
4837*4882a593Smuzhiyun 
4838*4882a593Smuzhiyun 	status = lancer_cmd_delete_object(adapter, LANCER_FW_DUMP_FILE);
4839*4882a593Smuzhiyun 	return be_cmd_status(status);
4840*4882a593Smuzhiyun }
4841*4882a593Smuzhiyun 
4842*4882a593Smuzhiyun /* Uses sync mcc */
be_cmd_enable_vf(struct be_adapter * adapter,u8 domain)4843*4882a593Smuzhiyun int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
4844*4882a593Smuzhiyun {
4845*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
4846*4882a593Smuzhiyun 	struct be_cmd_enable_disable_vf *req;
4847*4882a593Smuzhiyun 	int status;
4848*4882a593Smuzhiyun 
4849*4882a593Smuzhiyun 	if (BEx_chip(adapter))
4850*4882a593Smuzhiyun 		return 0;
4851*4882a593Smuzhiyun 
4852*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
4853*4882a593Smuzhiyun 
4854*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
4855*4882a593Smuzhiyun 	if (!wrb) {
4856*4882a593Smuzhiyun 		status = -EBUSY;
4857*4882a593Smuzhiyun 		goto err;
4858*4882a593Smuzhiyun 	}
4859*4882a593Smuzhiyun 
4860*4882a593Smuzhiyun 	req = embedded_payload(wrb);
4861*4882a593Smuzhiyun 
4862*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4863*4882a593Smuzhiyun 			       OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
4864*4882a593Smuzhiyun 			       wrb, NULL);
4865*4882a593Smuzhiyun 
4866*4882a593Smuzhiyun 	req->hdr.domain = domain;
4867*4882a593Smuzhiyun 	req->enable = 1;
4868*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
4869*4882a593Smuzhiyun err:
4870*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
4871*4882a593Smuzhiyun 	return status;
4872*4882a593Smuzhiyun }
4873*4882a593Smuzhiyun 
be_cmd_intr_set(struct be_adapter * adapter,bool intr_enable)4874*4882a593Smuzhiyun int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
4875*4882a593Smuzhiyun {
4876*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
4877*4882a593Smuzhiyun 	struct be_cmd_req_intr_set *req;
4878*4882a593Smuzhiyun 	int status;
4879*4882a593Smuzhiyun 
4880*4882a593Smuzhiyun 	if (mutex_lock_interruptible(&adapter->mbox_lock))
4881*4882a593Smuzhiyun 		return -1;
4882*4882a593Smuzhiyun 
4883*4882a593Smuzhiyun 	wrb = wrb_from_mbox(adapter);
4884*4882a593Smuzhiyun 
4885*4882a593Smuzhiyun 	req = embedded_payload(wrb);
4886*4882a593Smuzhiyun 
4887*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4888*4882a593Smuzhiyun 			       OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
4889*4882a593Smuzhiyun 			       wrb, NULL);
4890*4882a593Smuzhiyun 
4891*4882a593Smuzhiyun 	req->intr_enabled = intr_enable;
4892*4882a593Smuzhiyun 
4893*4882a593Smuzhiyun 	status = be_mbox_notify_wait(adapter);
4894*4882a593Smuzhiyun 
4895*4882a593Smuzhiyun 	mutex_unlock(&adapter->mbox_lock);
4896*4882a593Smuzhiyun 	return status;
4897*4882a593Smuzhiyun }
4898*4882a593Smuzhiyun 
4899*4882a593Smuzhiyun /* Uses MBOX */
be_cmd_get_active_profile(struct be_adapter * adapter,u16 * profile_id)4900*4882a593Smuzhiyun int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
4901*4882a593Smuzhiyun {
4902*4882a593Smuzhiyun 	struct be_cmd_req_get_active_profile *req;
4903*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
4904*4882a593Smuzhiyun 	int status;
4905*4882a593Smuzhiyun 
4906*4882a593Smuzhiyun 	if (mutex_lock_interruptible(&adapter->mbox_lock))
4907*4882a593Smuzhiyun 		return -1;
4908*4882a593Smuzhiyun 
4909*4882a593Smuzhiyun 	wrb = wrb_from_mbox(adapter);
4910*4882a593Smuzhiyun 	if (!wrb) {
4911*4882a593Smuzhiyun 		status = -EBUSY;
4912*4882a593Smuzhiyun 		goto err;
4913*4882a593Smuzhiyun 	}
4914*4882a593Smuzhiyun 
4915*4882a593Smuzhiyun 	req = embedded_payload(wrb);
4916*4882a593Smuzhiyun 
4917*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4918*4882a593Smuzhiyun 			       OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req),
4919*4882a593Smuzhiyun 			       wrb, NULL);
4920*4882a593Smuzhiyun 
4921*4882a593Smuzhiyun 	status = be_mbox_notify_wait(adapter);
4922*4882a593Smuzhiyun 	if (!status) {
4923*4882a593Smuzhiyun 		struct be_cmd_resp_get_active_profile *resp =
4924*4882a593Smuzhiyun 							embedded_payload(wrb);
4925*4882a593Smuzhiyun 
4926*4882a593Smuzhiyun 		*profile_id = le16_to_cpu(resp->active_profile_id);
4927*4882a593Smuzhiyun 	}
4928*4882a593Smuzhiyun 
4929*4882a593Smuzhiyun err:
4930*4882a593Smuzhiyun 	mutex_unlock(&adapter->mbox_lock);
4931*4882a593Smuzhiyun 	return status;
4932*4882a593Smuzhiyun }
4933*4882a593Smuzhiyun 
4934*4882a593Smuzhiyun static int
__be_cmd_set_logical_link_config(struct be_adapter * adapter,int link_state,int version,u8 domain)4935*4882a593Smuzhiyun __be_cmd_set_logical_link_config(struct be_adapter *adapter,
4936*4882a593Smuzhiyun 				 int link_state, int version, u8 domain)
4937*4882a593Smuzhiyun {
4938*4882a593Smuzhiyun 	struct be_cmd_req_set_ll_link *req;
4939*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
4940*4882a593Smuzhiyun 	u32 link_config = 0;
4941*4882a593Smuzhiyun 	int status;
4942*4882a593Smuzhiyun 
4943*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
4944*4882a593Smuzhiyun 
4945*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
4946*4882a593Smuzhiyun 	if (!wrb) {
4947*4882a593Smuzhiyun 		status = -EBUSY;
4948*4882a593Smuzhiyun 		goto err;
4949*4882a593Smuzhiyun 	}
4950*4882a593Smuzhiyun 
4951*4882a593Smuzhiyun 	req = embedded_payload(wrb);
4952*4882a593Smuzhiyun 
4953*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4954*4882a593Smuzhiyun 			       OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
4955*4882a593Smuzhiyun 			       sizeof(*req), wrb, NULL);
4956*4882a593Smuzhiyun 
4957*4882a593Smuzhiyun 	req->hdr.version = version;
4958*4882a593Smuzhiyun 	req->hdr.domain = domain;
4959*4882a593Smuzhiyun 
4960*4882a593Smuzhiyun 	if (link_state == IFLA_VF_LINK_STATE_ENABLE ||
4961*4882a593Smuzhiyun 	    link_state == IFLA_VF_LINK_STATE_AUTO)
4962*4882a593Smuzhiyun 		link_config |= PLINK_ENABLE;
4963*4882a593Smuzhiyun 
4964*4882a593Smuzhiyun 	if (link_state == IFLA_VF_LINK_STATE_AUTO)
4965*4882a593Smuzhiyun 		link_config |= PLINK_TRACK;
4966*4882a593Smuzhiyun 
4967*4882a593Smuzhiyun 	req->link_config = cpu_to_le32(link_config);
4968*4882a593Smuzhiyun 
4969*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
4970*4882a593Smuzhiyun err:
4971*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
4972*4882a593Smuzhiyun 	return status;
4973*4882a593Smuzhiyun }
4974*4882a593Smuzhiyun 
be_cmd_set_logical_link_config(struct be_adapter * adapter,int link_state,u8 domain)4975*4882a593Smuzhiyun int be_cmd_set_logical_link_config(struct be_adapter *adapter,
4976*4882a593Smuzhiyun 				   int link_state, u8 domain)
4977*4882a593Smuzhiyun {
4978*4882a593Smuzhiyun 	int status;
4979*4882a593Smuzhiyun 
4980*4882a593Smuzhiyun 	if (BE2_chip(adapter))
4981*4882a593Smuzhiyun 		return -EOPNOTSUPP;
4982*4882a593Smuzhiyun 
4983*4882a593Smuzhiyun 	status = __be_cmd_set_logical_link_config(adapter, link_state,
4984*4882a593Smuzhiyun 						  2, domain);
4985*4882a593Smuzhiyun 
4986*4882a593Smuzhiyun 	/* Version 2 of the command will not be recognized by older FW.
4987*4882a593Smuzhiyun 	 * On such a failure issue version 1 of the command.
4988*4882a593Smuzhiyun 	 */
4989*4882a593Smuzhiyun 	if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST)
4990*4882a593Smuzhiyun 		status = __be_cmd_set_logical_link_config(adapter, link_state,
4991*4882a593Smuzhiyun 							  1, domain);
4992*4882a593Smuzhiyun 	return status;
4993*4882a593Smuzhiyun }
4994*4882a593Smuzhiyun 
be_cmd_set_features(struct be_adapter * adapter)4995*4882a593Smuzhiyun int be_cmd_set_features(struct be_adapter *adapter)
4996*4882a593Smuzhiyun {
4997*4882a593Smuzhiyun 	struct be_cmd_resp_set_features *resp;
4998*4882a593Smuzhiyun 	struct be_cmd_req_set_features *req;
4999*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
5000*4882a593Smuzhiyun 	int status;
5001*4882a593Smuzhiyun 
5002*4882a593Smuzhiyun 	if (mutex_lock_interruptible(&adapter->mcc_lock))
5003*4882a593Smuzhiyun 		return -1;
5004*4882a593Smuzhiyun 
5005*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
5006*4882a593Smuzhiyun 	if (!wrb) {
5007*4882a593Smuzhiyun 		status = -EBUSY;
5008*4882a593Smuzhiyun 		goto err;
5009*4882a593Smuzhiyun 	}
5010*4882a593Smuzhiyun 
5011*4882a593Smuzhiyun 	req = embedded_payload(wrb);
5012*4882a593Smuzhiyun 
5013*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
5014*4882a593Smuzhiyun 			       OPCODE_COMMON_SET_FEATURES,
5015*4882a593Smuzhiyun 			       sizeof(*req), wrb, NULL);
5016*4882a593Smuzhiyun 
5017*4882a593Smuzhiyun 	req->features = cpu_to_le32(BE_FEATURE_UE_RECOVERY);
5018*4882a593Smuzhiyun 	req->parameter_len = cpu_to_le32(sizeof(struct be_req_ue_recovery));
5019*4882a593Smuzhiyun 	req->parameter.req.uer = cpu_to_le32(BE_UE_RECOVERY_UER_MASK);
5020*4882a593Smuzhiyun 
5021*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
5022*4882a593Smuzhiyun 	if (status)
5023*4882a593Smuzhiyun 		goto err;
5024*4882a593Smuzhiyun 
5025*4882a593Smuzhiyun 	resp = embedded_payload(wrb);
5026*4882a593Smuzhiyun 
5027*4882a593Smuzhiyun 	adapter->error_recovery.ue_to_poll_time =
5028*4882a593Smuzhiyun 		le16_to_cpu(resp->parameter.resp.ue2rp);
5029*4882a593Smuzhiyun 	adapter->error_recovery.ue_to_reset_time =
5030*4882a593Smuzhiyun 		le16_to_cpu(resp->parameter.resp.ue2sr);
5031*4882a593Smuzhiyun 	adapter->error_recovery.recovery_supported = true;
5032*4882a593Smuzhiyun err:
5033*4882a593Smuzhiyun 	/* Checking "MCC_STATUS_INVALID_LENGTH" for SKH as FW
5034*4882a593Smuzhiyun 	 * returns this error in older firmware versions
5035*4882a593Smuzhiyun 	 */
5036*4882a593Smuzhiyun 	if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
5037*4882a593Smuzhiyun 	    base_status(status) == MCC_STATUS_INVALID_LENGTH)
5038*4882a593Smuzhiyun 		dev_info(&adapter->pdev->dev,
5039*4882a593Smuzhiyun 			 "Adapter does not support HW error recovery\n");
5040*4882a593Smuzhiyun 
5041*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
5042*4882a593Smuzhiyun 	return status;
5043*4882a593Smuzhiyun }
5044*4882a593Smuzhiyun 
be_roce_mcc_cmd(void * netdev_handle,void * wrb_payload,int wrb_payload_size,u16 * cmd_status,u16 * ext_status)5045*4882a593Smuzhiyun int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
5046*4882a593Smuzhiyun 		    int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
5047*4882a593Smuzhiyun {
5048*4882a593Smuzhiyun 	struct be_adapter *adapter = netdev_priv(netdev_handle);
5049*4882a593Smuzhiyun 	struct be_mcc_wrb *wrb;
5050*4882a593Smuzhiyun 	struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload;
5051*4882a593Smuzhiyun 	struct be_cmd_req_hdr *req;
5052*4882a593Smuzhiyun 	struct be_cmd_resp_hdr *resp;
5053*4882a593Smuzhiyun 	int status;
5054*4882a593Smuzhiyun 
5055*4882a593Smuzhiyun 	mutex_lock(&adapter->mcc_lock);
5056*4882a593Smuzhiyun 
5057*4882a593Smuzhiyun 	wrb = wrb_from_mccq(adapter);
5058*4882a593Smuzhiyun 	if (!wrb) {
5059*4882a593Smuzhiyun 		status = -EBUSY;
5060*4882a593Smuzhiyun 		goto err;
5061*4882a593Smuzhiyun 	}
5062*4882a593Smuzhiyun 	req = embedded_payload(wrb);
5063*4882a593Smuzhiyun 	resp = embedded_payload(wrb);
5064*4882a593Smuzhiyun 
5065*4882a593Smuzhiyun 	be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
5066*4882a593Smuzhiyun 			       hdr->opcode, wrb_payload_size, wrb, NULL);
5067*4882a593Smuzhiyun 	memcpy(req, wrb_payload, wrb_payload_size);
5068*4882a593Smuzhiyun 	be_dws_cpu_to_le(req, wrb_payload_size);
5069*4882a593Smuzhiyun 
5070*4882a593Smuzhiyun 	status = be_mcc_notify_wait(adapter);
5071*4882a593Smuzhiyun 	if (cmd_status)
5072*4882a593Smuzhiyun 		*cmd_status = (status & 0xffff);
5073*4882a593Smuzhiyun 	if (ext_status)
5074*4882a593Smuzhiyun 		*ext_status = 0;
5075*4882a593Smuzhiyun 	memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
5076*4882a593Smuzhiyun 	be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
5077*4882a593Smuzhiyun err:
5078*4882a593Smuzhiyun 	mutex_unlock(&adapter->mcc_lock);
5079*4882a593Smuzhiyun 	return status;
5080*4882a593Smuzhiyun }
5081*4882a593Smuzhiyun EXPORT_SYMBOL(be_roce_mcc_cmd);
5082