xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* bnx2x_sp.h: Qlogic Everest network driver.
2*4882a593Smuzhiyun  *
3*4882a593Smuzhiyun  * Copyright 2011-2013 Broadcom Corporation
4*4882a593Smuzhiyun  * Copyright (c) 2014 QLogic Corporation
5*4882a593Smuzhiyun  * All rights reserved
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Unless you and Qlogic execute a separate written software license
8*4882a593Smuzhiyun  * agreement governing use of this software, this software is licensed to you
9*4882a593Smuzhiyun  * under the terms of the GNU General Public License version 2, available
10*4882a593Smuzhiyun  * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  * Notwithstanding the above, under no circumstances may you combine this
13*4882a593Smuzhiyun  * software in any way with any other Qlogic software provided under a
14*4882a593Smuzhiyun  * license other than the GPL, without Qlogic's express prior written
15*4882a593Smuzhiyun  * consent.
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
18*4882a593Smuzhiyun  * Written by: Vladislav Zolotarov
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  */
21*4882a593Smuzhiyun #ifndef BNX2X_SP_VERBS
22*4882a593Smuzhiyun #define BNX2X_SP_VERBS
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun struct bnx2x;
25*4882a593Smuzhiyun struct eth_context;
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /* Bits representing general command's configuration */
28*4882a593Smuzhiyun enum {
29*4882a593Smuzhiyun 	RAMROD_TX,
30*4882a593Smuzhiyun 	RAMROD_RX,
31*4882a593Smuzhiyun 	/* Wait until all pending commands complete */
32*4882a593Smuzhiyun 	RAMROD_COMP_WAIT,
33*4882a593Smuzhiyun 	/* Don't send a ramrod, only update a registry */
34*4882a593Smuzhiyun 	RAMROD_DRV_CLR_ONLY,
35*4882a593Smuzhiyun 	/* Configure HW according to the current object state */
36*4882a593Smuzhiyun 	RAMROD_RESTORE,
37*4882a593Smuzhiyun 	 /* Execute the next command now */
38*4882a593Smuzhiyun 	RAMROD_EXEC,
39*4882a593Smuzhiyun 	/* Don't add a new command and continue execution of postponed
40*4882a593Smuzhiyun 	 * commands. If not set a new command will be added to the
41*4882a593Smuzhiyun 	 * pending commands list.
42*4882a593Smuzhiyun 	 */
43*4882a593Smuzhiyun 	RAMROD_CONT,
44*4882a593Smuzhiyun 	/* If there is another pending ramrod, wait until it finishes and
45*4882a593Smuzhiyun 	 * re-try to submit this one. This flag can be set only in sleepable
46*4882a593Smuzhiyun 	 * context, and should not be set from the context that completes the
47*4882a593Smuzhiyun 	 * ramrods as deadlock will occur.
48*4882a593Smuzhiyun 	 */
49*4882a593Smuzhiyun 	RAMROD_RETRY,
50*4882a593Smuzhiyun };
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun typedef enum {
53*4882a593Smuzhiyun 	BNX2X_OBJ_TYPE_RX,
54*4882a593Smuzhiyun 	BNX2X_OBJ_TYPE_TX,
55*4882a593Smuzhiyun 	BNX2X_OBJ_TYPE_RX_TX,
56*4882a593Smuzhiyun } bnx2x_obj_type;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun /* Public slow path states */
59*4882a593Smuzhiyun enum {
60*4882a593Smuzhiyun 	BNX2X_FILTER_MAC_PENDING,
61*4882a593Smuzhiyun 	BNX2X_FILTER_VLAN_PENDING,
62*4882a593Smuzhiyun 	BNX2X_FILTER_VLAN_MAC_PENDING,
63*4882a593Smuzhiyun 	BNX2X_FILTER_RX_MODE_PENDING,
64*4882a593Smuzhiyun 	BNX2X_FILTER_RX_MODE_SCHED,
65*4882a593Smuzhiyun 	BNX2X_FILTER_ISCSI_ETH_START_SCHED,
66*4882a593Smuzhiyun 	BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
67*4882a593Smuzhiyun 	BNX2X_FILTER_FCOE_ETH_START_SCHED,
68*4882a593Smuzhiyun 	BNX2X_FILTER_FCOE_ETH_STOP_SCHED,
69*4882a593Smuzhiyun 	BNX2X_FILTER_MCAST_PENDING,
70*4882a593Smuzhiyun 	BNX2X_FILTER_MCAST_SCHED,
71*4882a593Smuzhiyun 	BNX2X_FILTER_RSS_CONF_PENDING,
72*4882a593Smuzhiyun 	BNX2X_AFEX_FCOE_Q_UPDATE_PENDING,
73*4882a593Smuzhiyun 	BNX2X_AFEX_PENDING_VIFSET_MCP_ACK
74*4882a593Smuzhiyun };
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun struct bnx2x_raw_obj {
77*4882a593Smuzhiyun 	u8		func_id;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	/* Queue params */
80*4882a593Smuzhiyun 	u8		cl_id;
81*4882a593Smuzhiyun 	u32		cid;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	/* Ramrod data buffer params */
84*4882a593Smuzhiyun 	void		*rdata;
85*4882a593Smuzhiyun 	dma_addr_t	rdata_mapping;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	/* Ramrod state params */
88*4882a593Smuzhiyun 	int		state;   /* "ramrod is pending" state bit */
89*4882a593Smuzhiyun 	unsigned long	*pstate; /* pointer to state buffer */
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	bnx2x_obj_type	obj_type;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	int (*wait_comp)(struct bnx2x *bp,
94*4882a593Smuzhiyun 			 struct bnx2x_raw_obj *o);
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	bool (*check_pending)(struct bnx2x_raw_obj *o);
97*4882a593Smuzhiyun 	void (*clear_pending)(struct bnx2x_raw_obj *o);
98*4882a593Smuzhiyun 	void (*set_pending)(struct bnx2x_raw_obj *o);
99*4882a593Smuzhiyun };
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun /************************* VLAN-MAC commands related parameters ***************/
102*4882a593Smuzhiyun struct bnx2x_mac_ramrod_data {
103*4882a593Smuzhiyun 	u8 mac[ETH_ALEN];
104*4882a593Smuzhiyun 	u8 is_inner_mac;
105*4882a593Smuzhiyun };
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun struct bnx2x_vlan_ramrod_data {
108*4882a593Smuzhiyun 	u16 vlan;
109*4882a593Smuzhiyun };
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun struct bnx2x_vlan_mac_ramrod_data {
112*4882a593Smuzhiyun 	u8 mac[ETH_ALEN];
113*4882a593Smuzhiyun 	u8 is_inner_mac;
114*4882a593Smuzhiyun 	u16 vlan;
115*4882a593Smuzhiyun };
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun union bnx2x_classification_ramrod_data {
118*4882a593Smuzhiyun 	struct bnx2x_mac_ramrod_data mac;
119*4882a593Smuzhiyun 	struct bnx2x_vlan_ramrod_data vlan;
120*4882a593Smuzhiyun 	struct bnx2x_vlan_mac_ramrod_data vlan_mac;
121*4882a593Smuzhiyun };
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun /* VLAN_MAC commands */
124*4882a593Smuzhiyun enum bnx2x_vlan_mac_cmd {
125*4882a593Smuzhiyun 	BNX2X_VLAN_MAC_ADD,
126*4882a593Smuzhiyun 	BNX2X_VLAN_MAC_DEL,
127*4882a593Smuzhiyun 	BNX2X_VLAN_MAC_MOVE,
128*4882a593Smuzhiyun };
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun struct bnx2x_vlan_mac_data {
131*4882a593Smuzhiyun 	/* Requested command: BNX2X_VLAN_MAC_XX */
132*4882a593Smuzhiyun 	enum bnx2x_vlan_mac_cmd cmd;
133*4882a593Smuzhiyun 	/* used to contain the data related vlan_mac_flags bits from
134*4882a593Smuzhiyun 	 * ramrod parameters.
135*4882a593Smuzhiyun 	 */
136*4882a593Smuzhiyun 	unsigned long vlan_mac_flags;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	/* Needed for MOVE command */
139*4882a593Smuzhiyun 	struct bnx2x_vlan_mac_obj *target_obj;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	union bnx2x_classification_ramrod_data u;
142*4882a593Smuzhiyun };
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun /*************************** Exe Queue obj ************************************/
145*4882a593Smuzhiyun union bnx2x_exe_queue_cmd_data {
146*4882a593Smuzhiyun 	struct bnx2x_vlan_mac_data vlan_mac;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	struct {
149*4882a593Smuzhiyun 		/* TODO */
150*4882a593Smuzhiyun 	} mcast;
151*4882a593Smuzhiyun };
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun struct bnx2x_exeq_elem {
154*4882a593Smuzhiyun 	struct list_head		link;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	/* Length of this element in the exe_chunk. */
157*4882a593Smuzhiyun 	int				cmd_len;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	union bnx2x_exe_queue_cmd_data	cmd_data;
160*4882a593Smuzhiyun };
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun union bnx2x_qable_obj;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun union bnx2x_exeq_comp_elem {
165*4882a593Smuzhiyun 	union event_ring_elem *elem;
166*4882a593Smuzhiyun };
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun struct bnx2x_exe_queue_obj;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun typedef int (*exe_q_validate)(struct bnx2x *bp,
171*4882a593Smuzhiyun 			      union bnx2x_qable_obj *o,
172*4882a593Smuzhiyun 			      struct bnx2x_exeq_elem *elem);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun typedef int (*exe_q_remove)(struct bnx2x *bp,
175*4882a593Smuzhiyun 			    union bnx2x_qable_obj *o,
176*4882a593Smuzhiyun 			    struct bnx2x_exeq_elem *elem);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun /* Return positive if entry was optimized, 0 - if not, negative
179*4882a593Smuzhiyun  * in case of an error.
180*4882a593Smuzhiyun  */
181*4882a593Smuzhiyun typedef int (*exe_q_optimize)(struct bnx2x *bp,
182*4882a593Smuzhiyun 			      union bnx2x_qable_obj *o,
183*4882a593Smuzhiyun 			      struct bnx2x_exeq_elem *elem);
184*4882a593Smuzhiyun typedef int (*exe_q_execute)(struct bnx2x *bp,
185*4882a593Smuzhiyun 			     union bnx2x_qable_obj *o,
186*4882a593Smuzhiyun 			     struct list_head *exe_chunk,
187*4882a593Smuzhiyun 			     unsigned long *ramrod_flags);
188*4882a593Smuzhiyun typedef struct bnx2x_exeq_elem *
189*4882a593Smuzhiyun 			(*exe_q_get)(struct bnx2x_exe_queue_obj *o,
190*4882a593Smuzhiyun 				     struct bnx2x_exeq_elem *elem);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun struct bnx2x_exe_queue_obj {
193*4882a593Smuzhiyun 	/* Commands pending for an execution. */
194*4882a593Smuzhiyun 	struct list_head	exe_queue;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	/* Commands pending for an completion. */
197*4882a593Smuzhiyun 	struct list_head	pending_comp;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	spinlock_t		lock;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	/* Maximum length of commands' list for one execution */
202*4882a593Smuzhiyun 	int			exe_chunk_len;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	union bnx2x_qable_obj	*owner;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	/****** Virtual functions ******/
207*4882a593Smuzhiyun 	/**
208*4882a593Smuzhiyun 	 * Called before commands execution for commands that are really
209*4882a593Smuzhiyun 	 * going to be executed (after 'optimize').
210*4882a593Smuzhiyun 	 *
211*4882a593Smuzhiyun 	 * Must run under exe_queue->lock
212*4882a593Smuzhiyun 	 */
213*4882a593Smuzhiyun 	exe_q_validate		validate;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	/**
216*4882a593Smuzhiyun 	 * Called before removing pending commands, cleaning allocated
217*4882a593Smuzhiyun 	 * resources (e.g., credits from validate)
218*4882a593Smuzhiyun 	 */
219*4882a593Smuzhiyun 	 exe_q_remove		remove;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	/**
222*4882a593Smuzhiyun 	 * This will try to cancel the current pending commands list
223*4882a593Smuzhiyun 	 * considering the new command.
224*4882a593Smuzhiyun 	 *
225*4882a593Smuzhiyun 	 * Returns the number of optimized commands or a negative error code
226*4882a593Smuzhiyun 	 *
227*4882a593Smuzhiyun 	 * Must run under exe_queue->lock
228*4882a593Smuzhiyun 	 */
229*4882a593Smuzhiyun 	exe_q_optimize		optimize;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	/**
232*4882a593Smuzhiyun 	 * Run the next commands chunk (owner specific).
233*4882a593Smuzhiyun 	 */
234*4882a593Smuzhiyun 	exe_q_execute		execute;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	/**
237*4882a593Smuzhiyun 	 * Return the exe_queue element containing the specific command
238*4882a593Smuzhiyun 	 * if any. Otherwise return NULL.
239*4882a593Smuzhiyun 	 */
240*4882a593Smuzhiyun 	exe_q_get		get;
241*4882a593Smuzhiyun };
242*4882a593Smuzhiyun /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
243*4882a593Smuzhiyun /*
244*4882a593Smuzhiyun  * Element in the VLAN_MAC registry list having all currently configured
245*4882a593Smuzhiyun  * rules.
246*4882a593Smuzhiyun  */
247*4882a593Smuzhiyun struct bnx2x_vlan_mac_registry_elem {
248*4882a593Smuzhiyun 	struct list_head	link;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	/* Used to store the cam offset used for the mac/vlan/vlan-mac.
251*4882a593Smuzhiyun 	 * Relevant for 57710 and 57711 only. VLANs and MACs share the
252*4882a593Smuzhiyun 	 * same CAM for these chips.
253*4882a593Smuzhiyun 	 */
254*4882a593Smuzhiyun 	int			cam_offset;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	/* Needed for DEL and RESTORE flows */
257*4882a593Smuzhiyun 	unsigned long		vlan_mac_flags;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	union bnx2x_classification_ramrod_data u;
260*4882a593Smuzhiyun };
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun /* Bits representing VLAN_MAC commands specific flags */
263*4882a593Smuzhiyun enum {
264*4882a593Smuzhiyun 	BNX2X_UC_LIST_MAC,
265*4882a593Smuzhiyun 	BNX2X_ETH_MAC,
266*4882a593Smuzhiyun 	BNX2X_ISCSI_ETH_MAC,
267*4882a593Smuzhiyun 	BNX2X_NETQ_ETH_MAC,
268*4882a593Smuzhiyun 	BNX2X_VLAN,
269*4882a593Smuzhiyun 	BNX2X_DONT_CONSUME_CAM_CREDIT,
270*4882a593Smuzhiyun 	BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
271*4882a593Smuzhiyun };
272*4882a593Smuzhiyun /* When looking for matching filters, some flags are not interesting */
273*4882a593Smuzhiyun #define BNX2X_VLAN_MAC_CMP_MASK	(1 << BNX2X_UC_LIST_MAC | \
274*4882a593Smuzhiyun 				 1 << BNX2X_ETH_MAC | \
275*4882a593Smuzhiyun 				 1 << BNX2X_ISCSI_ETH_MAC | \
276*4882a593Smuzhiyun 				 1 << BNX2X_NETQ_ETH_MAC | \
277*4882a593Smuzhiyun 				 1 << BNX2X_VLAN)
278*4882a593Smuzhiyun #define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
279*4882a593Smuzhiyun 	((flags) & BNX2X_VLAN_MAC_CMP_MASK)
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun struct bnx2x_vlan_mac_ramrod_params {
282*4882a593Smuzhiyun 	/* Object to run the command from */
283*4882a593Smuzhiyun 	struct bnx2x_vlan_mac_obj *vlan_mac_obj;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	/* General command flags: COMP_WAIT, etc. */
286*4882a593Smuzhiyun 	unsigned long ramrod_flags;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	/* Command specific configuration request */
289*4882a593Smuzhiyun 	struct bnx2x_vlan_mac_data user_req;
290*4882a593Smuzhiyun };
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun struct bnx2x_vlan_mac_obj {
293*4882a593Smuzhiyun 	struct bnx2x_raw_obj raw;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	/* Bookkeeping list: will prevent the addition of already existing
296*4882a593Smuzhiyun 	 * entries.
297*4882a593Smuzhiyun 	 */
298*4882a593Smuzhiyun 	struct list_head		head;
299*4882a593Smuzhiyun 	/* Implement a simple reader/writer lock on the head list.
300*4882a593Smuzhiyun 	 * all these fields should only be accessed under the exe_queue lock
301*4882a593Smuzhiyun 	 */
302*4882a593Smuzhiyun 	u8		head_reader; /* Num. of readers accessing head list */
303*4882a593Smuzhiyun 	bool		head_exe_request; /* Pending execution request. */
304*4882a593Smuzhiyun 	unsigned long	saved_ramrod_flags; /* Ramrods of pending execution */
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	/* TODO: Add it's initialization in the init functions */
307*4882a593Smuzhiyun 	struct bnx2x_exe_queue_obj	exe_queue;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	/* MACs credit pool */
310*4882a593Smuzhiyun 	struct bnx2x_credit_pool_obj	*macs_pool;
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	/* VLANs credit pool */
313*4882a593Smuzhiyun 	struct bnx2x_credit_pool_obj	*vlans_pool;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	/* RAMROD command to be used */
316*4882a593Smuzhiyun 	int				ramrod_cmd;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	/* copy first n elements onto preallocated buffer
319*4882a593Smuzhiyun 	 *
320*4882a593Smuzhiyun 	 * @param n number of elements to get
321*4882a593Smuzhiyun 	 * @param buf buffer preallocated by caller into which elements
322*4882a593Smuzhiyun 	 *            will be copied. Note elements are 4-byte aligned
323*4882a593Smuzhiyun 	 *            so buffer size must be able to accommodate the
324*4882a593Smuzhiyun 	 *            aligned elements.
325*4882a593Smuzhiyun 	 *
326*4882a593Smuzhiyun 	 * @return number of copied bytes
327*4882a593Smuzhiyun 	 */
328*4882a593Smuzhiyun 	int (*get_n_elements)(struct bnx2x *bp,
329*4882a593Smuzhiyun 			      struct bnx2x_vlan_mac_obj *o, int n, u8 *base,
330*4882a593Smuzhiyun 			      u8 stride, u8 size);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	/**
333*4882a593Smuzhiyun 	 * Checks if ADD-ramrod with the given params may be performed.
334*4882a593Smuzhiyun 	 *
335*4882a593Smuzhiyun 	 * @return zero if the element may be added
336*4882a593Smuzhiyun 	 */
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	int (*check_add)(struct bnx2x *bp,
339*4882a593Smuzhiyun 			 struct bnx2x_vlan_mac_obj *o,
340*4882a593Smuzhiyun 			 union bnx2x_classification_ramrod_data *data);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	/**
343*4882a593Smuzhiyun 	 * Checks if DEL-ramrod with the given params may be performed.
344*4882a593Smuzhiyun 	 *
345*4882a593Smuzhiyun 	 * @return true if the element may be deleted
346*4882a593Smuzhiyun 	 */
347*4882a593Smuzhiyun 	struct bnx2x_vlan_mac_registry_elem *
348*4882a593Smuzhiyun 		(*check_del)(struct bnx2x *bp,
349*4882a593Smuzhiyun 			     struct bnx2x_vlan_mac_obj *o,
350*4882a593Smuzhiyun 			     union bnx2x_classification_ramrod_data *data);
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	/**
353*4882a593Smuzhiyun 	 * Checks if DEL-ramrod with the given params may be performed.
354*4882a593Smuzhiyun 	 *
355*4882a593Smuzhiyun 	 * @return true if the element may be deleted
356*4882a593Smuzhiyun 	 */
357*4882a593Smuzhiyun 	bool (*check_move)(struct bnx2x *bp,
358*4882a593Smuzhiyun 			   struct bnx2x_vlan_mac_obj *src_o,
359*4882a593Smuzhiyun 			   struct bnx2x_vlan_mac_obj *dst_o,
360*4882a593Smuzhiyun 			   union bnx2x_classification_ramrod_data *data);
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	/**
363*4882a593Smuzhiyun 	 *  Update the relevant credit object(s) (consume/return
364*4882a593Smuzhiyun 	 *  correspondingly).
365*4882a593Smuzhiyun 	 */
366*4882a593Smuzhiyun 	bool (*get_credit)(struct bnx2x_vlan_mac_obj *o);
367*4882a593Smuzhiyun 	bool (*put_credit)(struct bnx2x_vlan_mac_obj *o);
368*4882a593Smuzhiyun 	bool (*get_cam_offset)(struct bnx2x_vlan_mac_obj *o, int *offset);
369*4882a593Smuzhiyun 	bool (*put_cam_offset)(struct bnx2x_vlan_mac_obj *o, int offset);
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	/**
372*4882a593Smuzhiyun 	 * Configures one rule in the ramrod data buffer.
373*4882a593Smuzhiyun 	 */
374*4882a593Smuzhiyun 	void (*set_one_rule)(struct bnx2x *bp,
375*4882a593Smuzhiyun 			     struct bnx2x_vlan_mac_obj *o,
376*4882a593Smuzhiyun 			     struct bnx2x_exeq_elem *elem, int rule_idx,
377*4882a593Smuzhiyun 			     int cam_offset);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	/**
380*4882a593Smuzhiyun 	*  Delete all configured elements having the given
381*4882a593Smuzhiyun 	*  vlan_mac_flags specification. Assumes no pending for
382*4882a593Smuzhiyun 	*  execution commands. Will schedule all all currently
383*4882a593Smuzhiyun 	*  configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags
384*4882a593Smuzhiyun 	*  specification for deletion and will use the given
385*4882a593Smuzhiyun 	*  ramrod_flags for the last DEL operation.
386*4882a593Smuzhiyun 	 *
387*4882a593Smuzhiyun 	 * @param bp
388*4882a593Smuzhiyun 	 * @param o
389*4882a593Smuzhiyun 	 * @param ramrod_flags RAMROD_XX flags
390*4882a593Smuzhiyun 	 *
391*4882a593Smuzhiyun 	 * @return 0 if the last operation has completed successfully
392*4882a593Smuzhiyun 	 *         and there are no more elements left, positive value
393*4882a593Smuzhiyun 	 *         if there are pending for completion commands,
394*4882a593Smuzhiyun 	 *         negative value in case of failure.
395*4882a593Smuzhiyun 	 */
396*4882a593Smuzhiyun 	int (*delete_all)(struct bnx2x *bp,
397*4882a593Smuzhiyun 			  struct bnx2x_vlan_mac_obj *o,
398*4882a593Smuzhiyun 			  unsigned long *vlan_mac_flags,
399*4882a593Smuzhiyun 			  unsigned long *ramrod_flags);
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	/**
402*4882a593Smuzhiyun 	 * Reconfigures the next MAC/VLAN/VLAN-MAC element from the previously
403*4882a593Smuzhiyun 	 * configured elements list.
404*4882a593Smuzhiyun 	 *
405*4882a593Smuzhiyun 	 * @param bp
406*4882a593Smuzhiyun 	 * @param p Command parameters (RAMROD_COMP_WAIT bit in
407*4882a593Smuzhiyun 	 *          ramrod_flags is only taken into an account)
408*4882a593Smuzhiyun 	 * @param ppos a pointer to the cookie that should be given back in the
409*4882a593Smuzhiyun 	 *        next call to make function handle the next element. If
410*4882a593Smuzhiyun 	 *        *ppos is set to NULL it will restart the iterator.
411*4882a593Smuzhiyun 	 *        If returned *ppos == NULL this means that the last
412*4882a593Smuzhiyun 	 *        element has been handled.
413*4882a593Smuzhiyun 	 *
414*4882a593Smuzhiyun 	 * @return int
415*4882a593Smuzhiyun 	 */
416*4882a593Smuzhiyun 	int (*restore)(struct bnx2x *bp,
417*4882a593Smuzhiyun 		       struct bnx2x_vlan_mac_ramrod_params *p,
418*4882a593Smuzhiyun 		       struct bnx2x_vlan_mac_registry_elem **ppos);
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	/**
421*4882a593Smuzhiyun 	 * Should be called on a completion arrival.
422*4882a593Smuzhiyun 	 *
423*4882a593Smuzhiyun 	 * @param bp
424*4882a593Smuzhiyun 	 * @param o
425*4882a593Smuzhiyun 	 * @param cqe Completion element we are handling
426*4882a593Smuzhiyun 	 * @param ramrod_flags if RAMROD_CONT is set the next bulk of
427*4882a593Smuzhiyun 	 *		       pending commands will be executed.
428*4882a593Smuzhiyun 	 *		       RAMROD_DRV_CLR_ONLY and RAMROD_RESTORE
429*4882a593Smuzhiyun 	 *		       may also be set if needed.
430*4882a593Smuzhiyun 	 *
431*4882a593Smuzhiyun 	 * @return 0 if there are neither pending nor waiting for
432*4882a593Smuzhiyun 	 *         completion commands. Positive value if there are
433*4882a593Smuzhiyun 	 *         pending for execution or for completion commands.
434*4882a593Smuzhiyun 	 *         Negative value in case of an error (including an
435*4882a593Smuzhiyun 	 *         error in the cqe).
436*4882a593Smuzhiyun 	 */
437*4882a593Smuzhiyun 	int (*complete)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
438*4882a593Smuzhiyun 			union event_ring_elem *cqe,
439*4882a593Smuzhiyun 			unsigned long *ramrod_flags);
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	/**
442*4882a593Smuzhiyun 	 * Wait for completion of all commands. Don't schedule new ones,
443*4882a593Smuzhiyun 	 * just wait. It assumes that the completion code will schedule
444*4882a593Smuzhiyun 	 * for new commands.
445*4882a593Smuzhiyun 	 */
446*4882a593Smuzhiyun 	int (*wait)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o);
447*4882a593Smuzhiyun };
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun enum {
450*4882a593Smuzhiyun 	BNX2X_LLH_CAM_ISCSI_ETH_LINE = 0,
451*4882a593Smuzhiyun 	BNX2X_LLH_CAM_ETH_LINE,
452*4882a593Smuzhiyun 	BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
453*4882a593Smuzhiyun };
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun /** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun /* RX_MODE ramrod special flags: set in rx_mode_flags field in
458*4882a593Smuzhiyun  * a bnx2x_rx_mode_ramrod_params.
459*4882a593Smuzhiyun  */
460*4882a593Smuzhiyun enum {
461*4882a593Smuzhiyun 	BNX2X_RX_MODE_FCOE_ETH,
462*4882a593Smuzhiyun 	BNX2X_RX_MODE_ISCSI_ETH,
463*4882a593Smuzhiyun };
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun enum {
466*4882a593Smuzhiyun 	BNX2X_ACCEPT_UNICAST,
467*4882a593Smuzhiyun 	BNX2X_ACCEPT_MULTICAST,
468*4882a593Smuzhiyun 	BNX2X_ACCEPT_ALL_UNICAST,
469*4882a593Smuzhiyun 	BNX2X_ACCEPT_ALL_MULTICAST,
470*4882a593Smuzhiyun 	BNX2X_ACCEPT_BROADCAST,
471*4882a593Smuzhiyun 	BNX2X_ACCEPT_UNMATCHED,
472*4882a593Smuzhiyun 	BNX2X_ACCEPT_ANY_VLAN
473*4882a593Smuzhiyun };
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun struct bnx2x_rx_mode_ramrod_params {
476*4882a593Smuzhiyun 	struct bnx2x_rx_mode_obj *rx_mode_obj;
477*4882a593Smuzhiyun 	unsigned long *pstate;
478*4882a593Smuzhiyun 	int state;
479*4882a593Smuzhiyun 	u8 cl_id;
480*4882a593Smuzhiyun 	u32 cid;
481*4882a593Smuzhiyun 	u8 func_id;
482*4882a593Smuzhiyun 	unsigned long ramrod_flags;
483*4882a593Smuzhiyun 	unsigned long rx_mode_flags;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	/* rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to
486*4882a593Smuzhiyun 	 * a tstorm_eth_mac_filter_config (e1x).
487*4882a593Smuzhiyun 	 */
488*4882a593Smuzhiyun 	void *rdata;
489*4882a593Smuzhiyun 	dma_addr_t rdata_mapping;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	/* Rx mode settings */
492*4882a593Smuzhiyun 	unsigned long rx_accept_flags;
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	/* internal switching settings */
495*4882a593Smuzhiyun 	unsigned long tx_accept_flags;
496*4882a593Smuzhiyun };
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun struct bnx2x_rx_mode_obj {
499*4882a593Smuzhiyun 	int (*config_rx_mode)(struct bnx2x *bp,
500*4882a593Smuzhiyun 			      struct bnx2x_rx_mode_ramrod_params *p);
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	int (*wait_comp)(struct bnx2x *bp,
503*4882a593Smuzhiyun 			 struct bnx2x_rx_mode_ramrod_params *p);
504*4882a593Smuzhiyun };
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun /********************** Set multicast group ***********************************/
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun struct bnx2x_mcast_list_elem {
509*4882a593Smuzhiyun 	struct list_head link;
510*4882a593Smuzhiyun 	u8 *mac;
511*4882a593Smuzhiyun };
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun union bnx2x_mcast_config_data {
514*4882a593Smuzhiyun 	u8 *mac;
515*4882a593Smuzhiyun 	u8 bin; /* used in a RESTORE flow */
516*4882a593Smuzhiyun };
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun struct bnx2x_mcast_ramrod_params {
519*4882a593Smuzhiyun 	struct bnx2x_mcast_obj *mcast_obj;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	/* Relevant options are RAMROD_COMP_WAIT and RAMROD_DRV_CLR_ONLY */
522*4882a593Smuzhiyun 	unsigned long ramrod_flags;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	struct list_head mcast_list; /* list of struct bnx2x_mcast_list_elem */
525*4882a593Smuzhiyun 	/** TODO:
526*4882a593Smuzhiyun 	 *      - rename it to macs_num.
527*4882a593Smuzhiyun 	 *      - Add a new command type for handling pending commands
528*4882a593Smuzhiyun 	 *        (remove "zero semantics").
529*4882a593Smuzhiyun 	 *
530*4882a593Smuzhiyun 	 *  Length of mcast_list. If zero and ADD_CONT command - post
531*4882a593Smuzhiyun 	 *  pending commands.
532*4882a593Smuzhiyun 	 */
533*4882a593Smuzhiyun 	int mcast_list_len;
534*4882a593Smuzhiyun };
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun enum bnx2x_mcast_cmd {
537*4882a593Smuzhiyun 	BNX2X_MCAST_CMD_ADD,
538*4882a593Smuzhiyun 	BNX2X_MCAST_CMD_CONT,
539*4882a593Smuzhiyun 	BNX2X_MCAST_CMD_DEL,
540*4882a593Smuzhiyun 	BNX2X_MCAST_CMD_RESTORE,
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	/* Following this, multicast configuration should equal to approx
543*4882a593Smuzhiyun 	 * the set of MACs provided [i.e., remove all else].
544*4882a593Smuzhiyun 	 * The two sub-commands are used internally to decide whether a given
545*4882a593Smuzhiyun 	 * bin is to be added or removed
546*4882a593Smuzhiyun 	 */
547*4882a593Smuzhiyun 	BNX2X_MCAST_CMD_SET,
548*4882a593Smuzhiyun 	BNX2X_MCAST_CMD_SET_ADD,
549*4882a593Smuzhiyun 	BNX2X_MCAST_CMD_SET_DEL,
550*4882a593Smuzhiyun };
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun struct bnx2x_mcast_obj {
553*4882a593Smuzhiyun 	struct bnx2x_raw_obj raw;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	union {
556*4882a593Smuzhiyun 		struct {
557*4882a593Smuzhiyun 		#define BNX2X_MCAST_BINS_NUM	256
558*4882a593Smuzhiyun 		#define BNX2X_MCAST_VEC_SZ	(BNX2X_MCAST_BINS_NUM / 64)
559*4882a593Smuzhiyun 			u64 vec[BNX2X_MCAST_VEC_SZ];
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 			/** Number of BINs to clear. Should be updated
562*4882a593Smuzhiyun 			 *  immediately when a command arrives in order to
563*4882a593Smuzhiyun 			 *  properly create DEL commands.
564*4882a593Smuzhiyun 			 */
565*4882a593Smuzhiyun 			int num_bins_set;
566*4882a593Smuzhiyun 		} aprox_match;
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 		struct {
569*4882a593Smuzhiyun 			struct list_head macs;
570*4882a593Smuzhiyun 			int num_macs_set;
571*4882a593Smuzhiyun 		} exact_match;
572*4882a593Smuzhiyun 	} registry;
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	/* Pending commands */
575*4882a593Smuzhiyun 	struct list_head pending_cmds_head;
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	/* A state that is set in raw.pstate, when there are pending commands */
578*4882a593Smuzhiyun 	int sched_state;
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	/* Maximal number of mcast MACs configured in one command */
581*4882a593Smuzhiyun 	int max_cmd_len;
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	/* Total number of currently pending MACs to configure: both
584*4882a593Smuzhiyun 	 * in the pending commands list and in the current command.
585*4882a593Smuzhiyun 	 */
586*4882a593Smuzhiyun 	int total_pending_num;
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	u8 engine_id;
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	/**
591*4882a593Smuzhiyun 	 * @param cmd command to execute (BNX2X_MCAST_CMD_X, see above)
592*4882a593Smuzhiyun 	 */
593*4882a593Smuzhiyun 	int (*config_mcast)(struct bnx2x *bp,
594*4882a593Smuzhiyun 			    struct bnx2x_mcast_ramrod_params *p,
595*4882a593Smuzhiyun 			    enum bnx2x_mcast_cmd cmd);
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	/**
598*4882a593Smuzhiyun 	 * Fills the ramrod data during the RESTORE flow.
599*4882a593Smuzhiyun 	 *
600*4882a593Smuzhiyun 	 * @param bp
601*4882a593Smuzhiyun 	 * @param o
602*4882a593Smuzhiyun 	 * @param start_idx Registry index to start from
603*4882a593Smuzhiyun 	 * @param rdata_idx Index in the ramrod data to start from
604*4882a593Smuzhiyun 	 *
605*4882a593Smuzhiyun 	 * @return -1 if we handled the whole registry or index of the last
606*4882a593Smuzhiyun 	 *         handled registry element.
607*4882a593Smuzhiyun 	 */
608*4882a593Smuzhiyun 	int (*hdl_restore)(struct bnx2x *bp, struct bnx2x_mcast_obj *o,
609*4882a593Smuzhiyun 			   int start_bin, int *rdata_idx);
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	int (*enqueue_cmd)(struct bnx2x *bp, struct bnx2x_mcast_obj *o,
612*4882a593Smuzhiyun 			   struct bnx2x_mcast_ramrod_params *p,
613*4882a593Smuzhiyun 			   enum bnx2x_mcast_cmd cmd);
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	void (*set_one_rule)(struct bnx2x *bp,
616*4882a593Smuzhiyun 			     struct bnx2x_mcast_obj *o, int idx,
617*4882a593Smuzhiyun 			     union bnx2x_mcast_config_data *cfg_data,
618*4882a593Smuzhiyun 			     enum bnx2x_mcast_cmd cmd);
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	/** Checks if there are more mcast MACs to be set or a previous
621*4882a593Smuzhiyun 	 *  command is still pending.
622*4882a593Smuzhiyun 	 */
623*4882a593Smuzhiyun 	bool (*check_pending)(struct bnx2x_mcast_obj *o);
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	/**
626*4882a593Smuzhiyun 	 * Set/Clear/Check SCHEDULED state of the object
627*4882a593Smuzhiyun 	 */
628*4882a593Smuzhiyun 	void (*set_sched)(struct bnx2x_mcast_obj *o);
629*4882a593Smuzhiyun 	void (*clear_sched)(struct bnx2x_mcast_obj *o);
630*4882a593Smuzhiyun 	bool (*check_sched)(struct bnx2x_mcast_obj *o);
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	/* Wait until all pending commands complete */
633*4882a593Smuzhiyun 	int (*wait_comp)(struct bnx2x *bp, struct bnx2x_mcast_obj *o);
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	/**
636*4882a593Smuzhiyun 	 * Handle the internal object counters needed for proper
637*4882a593Smuzhiyun 	 * commands handling. Checks that the provided parameters are
638*4882a593Smuzhiyun 	 * feasible.
639*4882a593Smuzhiyun 	 */
640*4882a593Smuzhiyun 	int (*validate)(struct bnx2x *bp,
641*4882a593Smuzhiyun 			struct bnx2x_mcast_ramrod_params *p,
642*4882a593Smuzhiyun 			enum bnx2x_mcast_cmd cmd);
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	/**
645*4882a593Smuzhiyun 	 * Restore the values of internal counters in case of a failure.
646*4882a593Smuzhiyun 	 */
647*4882a593Smuzhiyun 	void (*revert)(struct bnx2x *bp,
648*4882a593Smuzhiyun 		       struct bnx2x_mcast_ramrod_params *p,
649*4882a593Smuzhiyun 		       int old_num_bins,
650*4882a593Smuzhiyun 		       enum bnx2x_mcast_cmd cmd);
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	int (*get_registry_size)(struct bnx2x_mcast_obj *o);
653*4882a593Smuzhiyun 	void (*set_registry_size)(struct bnx2x_mcast_obj *o, int n);
654*4882a593Smuzhiyun };
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun /*************************** Credit handling **********************************/
657*4882a593Smuzhiyun struct bnx2x_credit_pool_obj {
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	/* Current amount of credit in the pool */
660*4882a593Smuzhiyun 	atomic_t	credit;
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	/* Maximum allowed credit. put() will check against it. */
663*4882a593Smuzhiyun 	int		pool_sz;
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	/* Allocate a pool table statically.
666*4882a593Smuzhiyun 	 *
667*4882a593Smuzhiyun 	 * Currently the maximum allowed size is MAX_MAC_CREDIT_E2(272)
668*4882a593Smuzhiyun 	 *
669*4882a593Smuzhiyun 	 * The set bit in the table will mean that the entry is available.
670*4882a593Smuzhiyun 	 */
671*4882a593Smuzhiyun #define BNX2X_POOL_VEC_SIZE	(MAX_MAC_CREDIT_E2 / 64)
672*4882a593Smuzhiyun 	u64		pool_mirror[BNX2X_POOL_VEC_SIZE];
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	/* Base pool offset (initialized differently */
675*4882a593Smuzhiyun 	int		base_pool_offset;
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	/**
678*4882a593Smuzhiyun 	 * Get the next free pool entry.
679*4882a593Smuzhiyun 	 *
680*4882a593Smuzhiyun 	 * @return true if there was a free entry in the pool
681*4882a593Smuzhiyun 	 */
682*4882a593Smuzhiyun 	bool (*get_entry)(struct bnx2x_credit_pool_obj *o, int *entry);
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	/**
685*4882a593Smuzhiyun 	 * Return the entry back to the pool.
686*4882a593Smuzhiyun 	 *
687*4882a593Smuzhiyun 	 * @return true if entry is legal and has been successfully
688*4882a593Smuzhiyun 	 *         returned to the pool.
689*4882a593Smuzhiyun 	 */
690*4882a593Smuzhiyun 	bool (*put_entry)(struct bnx2x_credit_pool_obj *o, int entry);
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	/**
693*4882a593Smuzhiyun 	 * Get the requested amount of credit from the pool.
694*4882a593Smuzhiyun 	 *
695*4882a593Smuzhiyun 	 * @param cnt Amount of requested credit
696*4882a593Smuzhiyun 	 * @return true if the operation is successful
697*4882a593Smuzhiyun 	 */
698*4882a593Smuzhiyun 	bool (*get)(struct bnx2x_credit_pool_obj *o, int cnt);
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	/**
701*4882a593Smuzhiyun 	 * Returns the credit to the pool.
702*4882a593Smuzhiyun 	 *
703*4882a593Smuzhiyun 	 * @param cnt Amount of credit to return
704*4882a593Smuzhiyun 	 * @return true if the operation is successful
705*4882a593Smuzhiyun 	 */
706*4882a593Smuzhiyun 	bool (*put)(struct bnx2x_credit_pool_obj *o, int cnt);
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	/**
709*4882a593Smuzhiyun 	 * Reads the current amount of credit.
710*4882a593Smuzhiyun 	 */
711*4882a593Smuzhiyun 	int (*check)(struct bnx2x_credit_pool_obj *o);
712*4882a593Smuzhiyun };
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun /*************************** RSS configuration ********************************/
715*4882a593Smuzhiyun enum {
716*4882a593Smuzhiyun 	/* RSS_MODE bits are mutually exclusive */
717*4882a593Smuzhiyun 	BNX2X_RSS_MODE_DISABLED,
718*4882a593Smuzhiyun 	BNX2X_RSS_MODE_REGULAR,
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	BNX2X_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	BNX2X_RSS_IPV4,
723*4882a593Smuzhiyun 	BNX2X_RSS_IPV4_TCP,
724*4882a593Smuzhiyun 	BNX2X_RSS_IPV4_UDP,
725*4882a593Smuzhiyun 	BNX2X_RSS_IPV6,
726*4882a593Smuzhiyun 	BNX2X_RSS_IPV6_TCP,
727*4882a593Smuzhiyun 	BNX2X_RSS_IPV6_UDP,
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	BNX2X_RSS_IPV4_VXLAN,
730*4882a593Smuzhiyun 	BNX2X_RSS_IPV6_VXLAN,
731*4882a593Smuzhiyun 	BNX2X_RSS_TUNN_INNER_HDRS,
732*4882a593Smuzhiyun };
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun struct bnx2x_config_rss_params {
735*4882a593Smuzhiyun 	struct bnx2x_rss_config_obj *rss_obj;
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	/* may have RAMROD_COMP_WAIT set only */
738*4882a593Smuzhiyun 	unsigned long	ramrod_flags;
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	/* BNX2X_RSS_X bits */
741*4882a593Smuzhiyun 	unsigned long	rss_flags;
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	/* Number hash bits to take into an account */
744*4882a593Smuzhiyun 	u8		rss_result_mask;
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	/* Indirection table */
747*4882a593Smuzhiyun 	u8		ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	/* RSS hash values */
750*4882a593Smuzhiyun 	u32		rss_key[10];
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 	/* valid only iff BNX2X_RSS_UPDATE_TOE is set */
753*4882a593Smuzhiyun 	u16		toe_rss_bitmap;
754*4882a593Smuzhiyun };
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun struct bnx2x_rss_config_obj {
757*4882a593Smuzhiyun 	struct bnx2x_raw_obj	raw;
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	/* RSS engine to use */
760*4882a593Smuzhiyun 	u8			engine_id;
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	/* Last configured indirection table */
763*4882a593Smuzhiyun 	u8			ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	/* flags for enabling 4-tupple hash on UDP */
766*4882a593Smuzhiyun 	u8			udp_rss_v4;
767*4882a593Smuzhiyun 	u8			udp_rss_v6;
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	int (*config_rss)(struct bnx2x *bp,
770*4882a593Smuzhiyun 			  struct bnx2x_config_rss_params *p);
771*4882a593Smuzhiyun };
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun /*********************** Queue state update ***********************************/
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun /* UPDATE command options */
776*4882a593Smuzhiyun enum {
777*4882a593Smuzhiyun 	BNX2X_Q_UPDATE_IN_VLAN_REM,
778*4882a593Smuzhiyun 	BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
779*4882a593Smuzhiyun 	BNX2X_Q_UPDATE_OUT_VLAN_REM,
780*4882a593Smuzhiyun 	BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
781*4882a593Smuzhiyun 	BNX2X_Q_UPDATE_ANTI_SPOOF,
782*4882a593Smuzhiyun 	BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG,
783*4882a593Smuzhiyun 	BNX2X_Q_UPDATE_ACTIVATE,
784*4882a593Smuzhiyun 	BNX2X_Q_UPDATE_ACTIVATE_CHNG,
785*4882a593Smuzhiyun 	BNX2X_Q_UPDATE_DEF_VLAN_EN,
786*4882a593Smuzhiyun 	BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
787*4882a593Smuzhiyun 	BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
788*4882a593Smuzhiyun 	BNX2X_Q_UPDATE_SILENT_VLAN_REM,
789*4882a593Smuzhiyun 	BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
790*4882a593Smuzhiyun 	BNX2X_Q_UPDATE_TX_SWITCHING,
791*4882a593Smuzhiyun 	BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
792*4882a593Smuzhiyun 	BNX2X_Q_UPDATE_PTP_PKTS,
793*4882a593Smuzhiyun };
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun /* Allowed Queue states */
796*4882a593Smuzhiyun enum bnx2x_q_state {
797*4882a593Smuzhiyun 	BNX2X_Q_STATE_RESET,
798*4882a593Smuzhiyun 	BNX2X_Q_STATE_INITIALIZED,
799*4882a593Smuzhiyun 	BNX2X_Q_STATE_ACTIVE,
800*4882a593Smuzhiyun 	BNX2X_Q_STATE_MULTI_COS,
801*4882a593Smuzhiyun 	BNX2X_Q_STATE_MCOS_TERMINATED,
802*4882a593Smuzhiyun 	BNX2X_Q_STATE_INACTIVE,
803*4882a593Smuzhiyun 	BNX2X_Q_STATE_STOPPED,
804*4882a593Smuzhiyun 	BNX2X_Q_STATE_TERMINATED,
805*4882a593Smuzhiyun 	BNX2X_Q_STATE_FLRED,
806*4882a593Smuzhiyun 	BNX2X_Q_STATE_MAX,
807*4882a593Smuzhiyun };
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun /* Allowed Queue states */
810*4882a593Smuzhiyun enum bnx2x_q_logical_state {
811*4882a593Smuzhiyun 	BNX2X_Q_LOGICAL_STATE_ACTIVE,
812*4882a593Smuzhiyun 	BNX2X_Q_LOGICAL_STATE_STOPPED,
813*4882a593Smuzhiyun };
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun /* Allowed commands */
816*4882a593Smuzhiyun enum bnx2x_queue_cmd {
817*4882a593Smuzhiyun 	BNX2X_Q_CMD_INIT,
818*4882a593Smuzhiyun 	BNX2X_Q_CMD_SETUP,
819*4882a593Smuzhiyun 	BNX2X_Q_CMD_SETUP_TX_ONLY,
820*4882a593Smuzhiyun 	BNX2X_Q_CMD_DEACTIVATE,
821*4882a593Smuzhiyun 	BNX2X_Q_CMD_ACTIVATE,
822*4882a593Smuzhiyun 	BNX2X_Q_CMD_UPDATE,
823*4882a593Smuzhiyun 	BNX2X_Q_CMD_UPDATE_TPA,
824*4882a593Smuzhiyun 	BNX2X_Q_CMD_HALT,
825*4882a593Smuzhiyun 	BNX2X_Q_CMD_CFC_DEL,
826*4882a593Smuzhiyun 	BNX2X_Q_CMD_TERMINATE,
827*4882a593Smuzhiyun 	BNX2X_Q_CMD_EMPTY,
828*4882a593Smuzhiyun 	BNX2X_Q_CMD_MAX,
829*4882a593Smuzhiyun };
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun /* queue SETUP + INIT flags */
832*4882a593Smuzhiyun enum {
833*4882a593Smuzhiyun 	BNX2X_Q_FLG_TPA,
834*4882a593Smuzhiyun 	BNX2X_Q_FLG_TPA_IPV6,
835*4882a593Smuzhiyun 	BNX2X_Q_FLG_TPA_GRO,
836*4882a593Smuzhiyun 	BNX2X_Q_FLG_STATS,
837*4882a593Smuzhiyun 	BNX2X_Q_FLG_ZERO_STATS,
838*4882a593Smuzhiyun 	BNX2X_Q_FLG_ACTIVE,
839*4882a593Smuzhiyun 	BNX2X_Q_FLG_OV,
840*4882a593Smuzhiyun 	BNX2X_Q_FLG_VLAN,
841*4882a593Smuzhiyun 	BNX2X_Q_FLG_COS,
842*4882a593Smuzhiyun 	BNX2X_Q_FLG_HC,
843*4882a593Smuzhiyun 	BNX2X_Q_FLG_HC_EN,
844*4882a593Smuzhiyun 	BNX2X_Q_FLG_DHC,
845*4882a593Smuzhiyun 	BNX2X_Q_FLG_FCOE,
846*4882a593Smuzhiyun 	BNX2X_Q_FLG_LEADING_RSS,
847*4882a593Smuzhiyun 	BNX2X_Q_FLG_MCAST,
848*4882a593Smuzhiyun 	BNX2X_Q_FLG_DEF_VLAN,
849*4882a593Smuzhiyun 	BNX2X_Q_FLG_TX_SWITCH,
850*4882a593Smuzhiyun 	BNX2X_Q_FLG_TX_SEC,
851*4882a593Smuzhiyun 	BNX2X_Q_FLG_ANTI_SPOOF,
852*4882a593Smuzhiyun 	BNX2X_Q_FLG_SILENT_VLAN_REM,
853*4882a593Smuzhiyun 	BNX2X_Q_FLG_FORCE_DEFAULT_PRI,
854*4882a593Smuzhiyun 	BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN,
855*4882a593Smuzhiyun 	BNX2X_Q_FLG_PCSUM_ON_PKT,
856*4882a593Smuzhiyun 	BNX2X_Q_FLG_TUN_INC_INNER_IP_ID
857*4882a593Smuzhiyun };
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun /* Queue type options: queue type may be a combination of below. */
860*4882a593Smuzhiyun enum bnx2x_q_type {
861*4882a593Smuzhiyun 	/** TODO: Consider moving both these flags into the init()
862*4882a593Smuzhiyun 	 *        ramrod params.
863*4882a593Smuzhiyun 	 */
864*4882a593Smuzhiyun 	BNX2X_Q_TYPE_HAS_RX,
865*4882a593Smuzhiyun 	BNX2X_Q_TYPE_HAS_TX,
866*4882a593Smuzhiyun };
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun #define BNX2X_PRIMARY_CID_INDEX			0
869*4882a593Smuzhiyun #define BNX2X_MULTI_TX_COS_E1X			3 /* QM only */
870*4882a593Smuzhiyun #define BNX2X_MULTI_TX_COS_E2_E3A0		2
871*4882a593Smuzhiyun #define BNX2X_MULTI_TX_COS_E3B0			3
872*4882a593Smuzhiyun #define BNX2X_MULTI_TX_COS			3 /* Maximum possible */
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun #define MAC_PAD (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
875*4882a593Smuzhiyun /* DMAE channel to be used by FW for timesync workaroun. A driver that sends
876*4882a593Smuzhiyun  * timesync-related ramrods must not use this DMAE command ID.
877*4882a593Smuzhiyun  */
878*4882a593Smuzhiyun #define FW_DMAE_CMD_ID 6
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun struct bnx2x_queue_init_params {
881*4882a593Smuzhiyun 	struct {
882*4882a593Smuzhiyun 		unsigned long	flags;
883*4882a593Smuzhiyun 		u16		hc_rate;
884*4882a593Smuzhiyun 		u8		fw_sb_id;
885*4882a593Smuzhiyun 		u8		sb_cq_index;
886*4882a593Smuzhiyun 	} tx;
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 	struct {
889*4882a593Smuzhiyun 		unsigned long	flags;
890*4882a593Smuzhiyun 		u16		hc_rate;
891*4882a593Smuzhiyun 		u8		fw_sb_id;
892*4882a593Smuzhiyun 		u8		sb_cq_index;
893*4882a593Smuzhiyun 	} rx;
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	/* CID context in the host memory */
896*4882a593Smuzhiyun 	struct eth_context *cxts[BNX2X_MULTI_TX_COS];
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 	/* maximum number of cos supported by hardware */
899*4882a593Smuzhiyun 	u8 max_cos;
900*4882a593Smuzhiyun };
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun struct bnx2x_queue_terminate_params {
903*4882a593Smuzhiyun 	/* index within the tx_only cids of this queue object */
904*4882a593Smuzhiyun 	u8 cid_index;
905*4882a593Smuzhiyun };
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun struct bnx2x_queue_cfc_del_params {
908*4882a593Smuzhiyun 	/* index within the tx_only cids of this queue object */
909*4882a593Smuzhiyun 	u8 cid_index;
910*4882a593Smuzhiyun };
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun struct bnx2x_queue_update_params {
913*4882a593Smuzhiyun 	unsigned long	update_flags; /* BNX2X_Q_UPDATE_XX bits */
914*4882a593Smuzhiyun 	u16		def_vlan;
915*4882a593Smuzhiyun 	u16		silent_removal_value;
916*4882a593Smuzhiyun 	u16		silent_removal_mask;
917*4882a593Smuzhiyun /* index within the tx_only cids of this queue object */
918*4882a593Smuzhiyun 	u8		cid_index;
919*4882a593Smuzhiyun };
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun struct bnx2x_queue_update_tpa_params {
922*4882a593Smuzhiyun 	dma_addr_t sge_map;
923*4882a593Smuzhiyun 	u8 update_ipv4;
924*4882a593Smuzhiyun 	u8 update_ipv6;
925*4882a593Smuzhiyun 	u8 max_tpa_queues;
926*4882a593Smuzhiyun 	u8 max_sges_pkt;
927*4882a593Smuzhiyun 	u8 complete_on_both_clients;
928*4882a593Smuzhiyun 	u8 dont_verify_thr;
929*4882a593Smuzhiyun 	u8 tpa_mode;
930*4882a593Smuzhiyun 	u8 _pad;
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	u16 sge_buff_sz;
933*4882a593Smuzhiyun 	u16 max_agg_sz;
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	u16 sge_pause_thr_low;
936*4882a593Smuzhiyun 	u16 sge_pause_thr_high;
937*4882a593Smuzhiyun };
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun struct rxq_pause_params {
940*4882a593Smuzhiyun 	u16		bd_th_lo;
941*4882a593Smuzhiyun 	u16		bd_th_hi;
942*4882a593Smuzhiyun 	u16		rcq_th_lo;
943*4882a593Smuzhiyun 	u16		rcq_th_hi;
944*4882a593Smuzhiyun 	u16		sge_th_lo; /* valid iff BNX2X_Q_FLG_TPA */
945*4882a593Smuzhiyun 	u16		sge_th_hi; /* valid iff BNX2X_Q_FLG_TPA */
946*4882a593Smuzhiyun 	u16		pri_map;
947*4882a593Smuzhiyun };
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun /* general */
950*4882a593Smuzhiyun struct bnx2x_general_setup_params {
951*4882a593Smuzhiyun 	/* valid iff BNX2X_Q_FLG_STATS */
952*4882a593Smuzhiyun 	u8		stat_id;
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	u8		spcl_id;
955*4882a593Smuzhiyun 	u16		mtu;
956*4882a593Smuzhiyun 	u8		cos;
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	u8		fp_hsi;
959*4882a593Smuzhiyun };
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun struct bnx2x_rxq_setup_params {
962*4882a593Smuzhiyun 	/* dma */
963*4882a593Smuzhiyun 	dma_addr_t	dscr_map;
964*4882a593Smuzhiyun 	dma_addr_t	sge_map;
965*4882a593Smuzhiyun 	dma_addr_t	rcq_map;
966*4882a593Smuzhiyun 	dma_addr_t	rcq_np_map;
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	u16		drop_flags;
969*4882a593Smuzhiyun 	u16		buf_sz;
970*4882a593Smuzhiyun 	u8		fw_sb_id;
971*4882a593Smuzhiyun 	u8		cl_qzone_id;
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun 	/* valid iff BNX2X_Q_FLG_TPA */
974*4882a593Smuzhiyun 	u16		tpa_agg_sz;
975*4882a593Smuzhiyun 	u16		sge_buf_sz;
976*4882a593Smuzhiyun 	u8		max_sges_pkt;
977*4882a593Smuzhiyun 	u8		max_tpa_queues;
978*4882a593Smuzhiyun 	u8		rss_engine_id;
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	/* valid iff BNX2X_Q_FLG_MCAST */
981*4882a593Smuzhiyun 	u8		mcast_engine_id;
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 	u8		cache_line_log;
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	u8		sb_cq_index;
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun 	/* valid iff BXN2X_Q_FLG_SILENT_VLAN_REM */
988*4882a593Smuzhiyun 	u16 silent_removal_value;
989*4882a593Smuzhiyun 	u16 silent_removal_mask;
990*4882a593Smuzhiyun };
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun struct bnx2x_txq_setup_params {
993*4882a593Smuzhiyun 	/* dma */
994*4882a593Smuzhiyun 	dma_addr_t	dscr_map;
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	u8		fw_sb_id;
997*4882a593Smuzhiyun 	u8		sb_cq_index;
998*4882a593Smuzhiyun 	u8		cos;		/* valid iff BNX2X_Q_FLG_COS */
999*4882a593Smuzhiyun 	u16		traffic_type;
1000*4882a593Smuzhiyun 	/* equals to the leading rss client id, used for TX classification*/
1001*4882a593Smuzhiyun 	u8		tss_leading_cl_id;
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 	/* valid iff BNX2X_Q_FLG_DEF_VLAN */
1004*4882a593Smuzhiyun 	u16		default_vlan;
1005*4882a593Smuzhiyun };
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun struct bnx2x_queue_setup_params {
1008*4882a593Smuzhiyun 	struct bnx2x_general_setup_params gen_params;
1009*4882a593Smuzhiyun 	struct bnx2x_txq_setup_params txq_params;
1010*4882a593Smuzhiyun 	struct bnx2x_rxq_setup_params rxq_params;
1011*4882a593Smuzhiyun 	struct rxq_pause_params pause_params;
1012*4882a593Smuzhiyun 	unsigned long flags;
1013*4882a593Smuzhiyun };
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun struct bnx2x_queue_setup_tx_only_params {
1016*4882a593Smuzhiyun 	struct bnx2x_general_setup_params	gen_params;
1017*4882a593Smuzhiyun 	struct bnx2x_txq_setup_params		txq_params;
1018*4882a593Smuzhiyun 	unsigned long				flags;
1019*4882a593Smuzhiyun 	/* index within the tx_only cids of this queue object */
1020*4882a593Smuzhiyun 	u8					cid_index;
1021*4882a593Smuzhiyun };
1022*4882a593Smuzhiyun 
1023*4882a593Smuzhiyun struct bnx2x_queue_state_params {
1024*4882a593Smuzhiyun 	struct bnx2x_queue_sp_obj *q_obj;
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 	/* Current command */
1027*4882a593Smuzhiyun 	enum bnx2x_queue_cmd cmd;
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun 	/* may have RAMROD_COMP_WAIT set only */
1030*4882a593Smuzhiyun 	unsigned long ramrod_flags;
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	/* Params according to the current command */
1033*4882a593Smuzhiyun 	union {
1034*4882a593Smuzhiyun 		struct bnx2x_queue_update_params	update;
1035*4882a593Smuzhiyun 		struct bnx2x_queue_update_tpa_params    update_tpa;
1036*4882a593Smuzhiyun 		struct bnx2x_queue_setup_params		setup;
1037*4882a593Smuzhiyun 		struct bnx2x_queue_init_params		init;
1038*4882a593Smuzhiyun 		struct bnx2x_queue_setup_tx_only_params	tx_only;
1039*4882a593Smuzhiyun 		struct bnx2x_queue_terminate_params	terminate;
1040*4882a593Smuzhiyun 		struct bnx2x_queue_cfc_del_params	cfc_del;
1041*4882a593Smuzhiyun 	} params;
1042*4882a593Smuzhiyun };
1043*4882a593Smuzhiyun 
1044*4882a593Smuzhiyun struct bnx2x_viflist_params {
1045*4882a593Smuzhiyun 	u8 echo_res;
1046*4882a593Smuzhiyun 	u8 func_bit_map_res;
1047*4882a593Smuzhiyun };
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun struct bnx2x_queue_sp_obj {
1050*4882a593Smuzhiyun 	u32		cids[BNX2X_MULTI_TX_COS];
1051*4882a593Smuzhiyun 	u8		cl_id;
1052*4882a593Smuzhiyun 	u8		func_id;
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 	/* number of traffic classes supported by queue.
1055*4882a593Smuzhiyun 	 * The primary connection of the queue supports the first traffic
1056*4882a593Smuzhiyun 	 * class. Any further traffic class is supported by a tx-only
1057*4882a593Smuzhiyun 	 * connection.
1058*4882a593Smuzhiyun 	 *
1059*4882a593Smuzhiyun 	 * Therefore max_cos is also a number of valid entries in the cids
1060*4882a593Smuzhiyun 	 * array.
1061*4882a593Smuzhiyun 	 */
1062*4882a593Smuzhiyun 	u8 max_cos;
1063*4882a593Smuzhiyun 	u8 num_tx_only, next_tx_only;
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun 	enum bnx2x_q_state state, next_state;
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun 	/* bits from enum bnx2x_q_type */
1068*4882a593Smuzhiyun 	unsigned long	type;
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 	/* BNX2X_Q_CMD_XX bits. This object implements "one
1071*4882a593Smuzhiyun 	 * pending" paradigm but for debug and tracing purposes it's
1072*4882a593Smuzhiyun 	 * more convenient to have different bits for different
1073*4882a593Smuzhiyun 	 * commands.
1074*4882a593Smuzhiyun 	 */
1075*4882a593Smuzhiyun 	unsigned long	pending;
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	/* Buffer to use as a ramrod data and its mapping */
1078*4882a593Smuzhiyun 	void		*rdata;
1079*4882a593Smuzhiyun 	dma_addr_t	rdata_mapping;
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 	/**
1082*4882a593Smuzhiyun 	 * Performs one state change according to the given parameters.
1083*4882a593Smuzhiyun 	 *
1084*4882a593Smuzhiyun 	 * @return 0 in case of success and negative value otherwise.
1085*4882a593Smuzhiyun 	 */
1086*4882a593Smuzhiyun 	int (*send_cmd)(struct bnx2x *bp,
1087*4882a593Smuzhiyun 			struct bnx2x_queue_state_params *params);
1088*4882a593Smuzhiyun 
1089*4882a593Smuzhiyun 	/**
1090*4882a593Smuzhiyun 	 * Sets the pending bit according to the requested transition.
1091*4882a593Smuzhiyun 	 */
1092*4882a593Smuzhiyun 	int (*set_pending)(struct bnx2x_queue_sp_obj *o,
1093*4882a593Smuzhiyun 			   struct bnx2x_queue_state_params *params);
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun 	/**
1096*4882a593Smuzhiyun 	 * Checks that the requested state transition is legal.
1097*4882a593Smuzhiyun 	 */
1098*4882a593Smuzhiyun 	int (*check_transition)(struct bnx2x *bp,
1099*4882a593Smuzhiyun 				struct bnx2x_queue_sp_obj *o,
1100*4882a593Smuzhiyun 				struct bnx2x_queue_state_params *params);
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 	/**
1103*4882a593Smuzhiyun 	 * Completes the pending command.
1104*4882a593Smuzhiyun 	 */
1105*4882a593Smuzhiyun 	int (*complete_cmd)(struct bnx2x *bp,
1106*4882a593Smuzhiyun 			    struct bnx2x_queue_sp_obj *o,
1107*4882a593Smuzhiyun 			    enum bnx2x_queue_cmd);
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 	int (*wait_comp)(struct bnx2x *bp,
1110*4882a593Smuzhiyun 			 struct bnx2x_queue_sp_obj *o,
1111*4882a593Smuzhiyun 			 enum bnx2x_queue_cmd cmd);
1112*4882a593Smuzhiyun };
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun /********************** Function state update *********************************/
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun /* UPDATE command options */
1117*4882a593Smuzhiyun enum {
1118*4882a593Smuzhiyun 	BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
1119*4882a593Smuzhiyun 	BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
1120*4882a593Smuzhiyun 	BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
1121*4882a593Smuzhiyun 	BNX2X_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG,
1122*4882a593Smuzhiyun 	BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
1123*4882a593Smuzhiyun 	BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
1124*4882a593Smuzhiyun 	BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
1125*4882a593Smuzhiyun 	BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
1126*4882a593Smuzhiyun 	BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
1127*4882a593Smuzhiyun 	BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
1128*4882a593Smuzhiyun 	BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
1129*4882a593Smuzhiyun };
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun /* Allowed Function states */
1132*4882a593Smuzhiyun enum bnx2x_func_state {
1133*4882a593Smuzhiyun 	BNX2X_F_STATE_RESET,
1134*4882a593Smuzhiyun 	BNX2X_F_STATE_INITIALIZED,
1135*4882a593Smuzhiyun 	BNX2X_F_STATE_STARTED,
1136*4882a593Smuzhiyun 	BNX2X_F_STATE_TX_STOPPED,
1137*4882a593Smuzhiyun 	BNX2X_F_STATE_MAX,
1138*4882a593Smuzhiyun };
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun /* Allowed Function commands */
1141*4882a593Smuzhiyun enum bnx2x_func_cmd {
1142*4882a593Smuzhiyun 	BNX2X_F_CMD_HW_INIT,
1143*4882a593Smuzhiyun 	BNX2X_F_CMD_START,
1144*4882a593Smuzhiyun 	BNX2X_F_CMD_STOP,
1145*4882a593Smuzhiyun 	BNX2X_F_CMD_HW_RESET,
1146*4882a593Smuzhiyun 	BNX2X_F_CMD_AFEX_UPDATE,
1147*4882a593Smuzhiyun 	BNX2X_F_CMD_AFEX_VIFLISTS,
1148*4882a593Smuzhiyun 	BNX2X_F_CMD_TX_STOP,
1149*4882a593Smuzhiyun 	BNX2X_F_CMD_TX_START,
1150*4882a593Smuzhiyun 	BNX2X_F_CMD_SWITCH_UPDATE,
1151*4882a593Smuzhiyun 	BNX2X_F_CMD_SET_TIMESYNC,
1152*4882a593Smuzhiyun 	BNX2X_F_CMD_MAX,
1153*4882a593Smuzhiyun };
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun struct bnx2x_func_hw_init_params {
1156*4882a593Smuzhiyun 	/* A load phase returned by MCP.
1157*4882a593Smuzhiyun 	 *
1158*4882a593Smuzhiyun 	 * May be:
1159*4882a593Smuzhiyun 	 *		FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
1160*4882a593Smuzhiyun 	 *		FW_MSG_CODE_DRV_LOAD_COMMON
1161*4882a593Smuzhiyun 	 *		FW_MSG_CODE_DRV_LOAD_PORT
1162*4882a593Smuzhiyun 	 *		FW_MSG_CODE_DRV_LOAD_FUNCTION
1163*4882a593Smuzhiyun 	 */
1164*4882a593Smuzhiyun 	u32 load_phase;
1165*4882a593Smuzhiyun };
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun struct bnx2x_func_hw_reset_params {
1168*4882a593Smuzhiyun 	/* A load phase returned by MCP.
1169*4882a593Smuzhiyun 	 *
1170*4882a593Smuzhiyun 	 * May be:
1171*4882a593Smuzhiyun 	 *		FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
1172*4882a593Smuzhiyun 	 *		FW_MSG_CODE_DRV_LOAD_COMMON
1173*4882a593Smuzhiyun 	 *		FW_MSG_CODE_DRV_LOAD_PORT
1174*4882a593Smuzhiyun 	 *		FW_MSG_CODE_DRV_LOAD_FUNCTION
1175*4882a593Smuzhiyun 	 */
1176*4882a593Smuzhiyun 	u32 reset_phase;
1177*4882a593Smuzhiyun };
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun struct bnx2x_func_start_params {
1180*4882a593Smuzhiyun 	/* Multi Function mode:
1181*4882a593Smuzhiyun 	 *	- Single Function
1182*4882a593Smuzhiyun 	 *	- Switch Dependent
1183*4882a593Smuzhiyun 	 *	- Switch Independent
1184*4882a593Smuzhiyun 	 */
1185*4882a593Smuzhiyun 	u16 mf_mode;
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun 	/* Switch Dependent mode outer VLAN tag */
1188*4882a593Smuzhiyun 	u16 sd_vlan_tag;
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 	/* Function cos mode */
1191*4882a593Smuzhiyun 	u8 network_cos_mode;
1192*4882a593Smuzhiyun 
1193*4882a593Smuzhiyun 	/* UDP dest port for VXLAN */
1194*4882a593Smuzhiyun 	u16 vxlan_dst_port;
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 	/* UDP dest port for Geneve */
1197*4882a593Smuzhiyun 	u16 geneve_dst_port;
1198*4882a593Smuzhiyun 
1199*4882a593Smuzhiyun 	/* Enable inner Rx classifications for L2GRE packets */
1200*4882a593Smuzhiyun 	u8 inner_clss_l2gre;
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun 	/* Enable inner Rx classifications for L2-Geneve packets */
1203*4882a593Smuzhiyun 	u8 inner_clss_l2geneve;
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 	/* Enable inner Rx classification for vxlan packets */
1206*4882a593Smuzhiyun 	u8 inner_clss_vxlan;
1207*4882a593Smuzhiyun 
1208*4882a593Smuzhiyun 	/* Enable RSS according to inner header */
1209*4882a593Smuzhiyun 	u8 inner_rss;
1210*4882a593Smuzhiyun 
1211*4882a593Smuzhiyun 	/* Allows accepting of packets failing MF classification, possibly
1212*4882a593Smuzhiyun 	 * only matching a given ethertype
1213*4882a593Smuzhiyun 	 */
1214*4882a593Smuzhiyun 	u8 class_fail;
1215*4882a593Smuzhiyun 	u16 class_fail_ethtype;
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 	/* Override priority of output packets */
1218*4882a593Smuzhiyun 	u8 sd_vlan_force_pri;
1219*4882a593Smuzhiyun 	u8 sd_vlan_force_pri_val;
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 	/* Replace vlan's ethertype */
1222*4882a593Smuzhiyun 	u16 sd_vlan_eth_type;
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun 	/* Prevent inner vlans from being added by FW */
1225*4882a593Smuzhiyun 	u8 no_added_tags;
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 	/* Inner-to-Outer vlan priority mapping */
1228*4882a593Smuzhiyun 	u8 c2s_pri[MAX_VLAN_PRIORITIES];
1229*4882a593Smuzhiyun 	u8 c2s_pri_default;
1230*4882a593Smuzhiyun 	u8 c2s_pri_valid;
1231*4882a593Smuzhiyun };
1232*4882a593Smuzhiyun 
1233*4882a593Smuzhiyun struct bnx2x_func_switch_update_params {
1234*4882a593Smuzhiyun 	unsigned long changes; /* BNX2X_F_UPDATE_XX bits */
1235*4882a593Smuzhiyun 	u16 vlan;
1236*4882a593Smuzhiyun 	u16 vlan_eth_type;
1237*4882a593Smuzhiyun 	u8 vlan_force_prio;
1238*4882a593Smuzhiyun 	u16 vxlan_dst_port;
1239*4882a593Smuzhiyun 	u16 geneve_dst_port;
1240*4882a593Smuzhiyun };
1241*4882a593Smuzhiyun 
1242*4882a593Smuzhiyun struct bnx2x_func_afex_update_params {
1243*4882a593Smuzhiyun 	u16 vif_id;
1244*4882a593Smuzhiyun 	u16 afex_default_vlan;
1245*4882a593Smuzhiyun 	u8 allowed_priorities;
1246*4882a593Smuzhiyun };
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun struct bnx2x_func_afex_viflists_params {
1249*4882a593Smuzhiyun 	u16 vif_list_index;
1250*4882a593Smuzhiyun 	u8 func_bit_map;
1251*4882a593Smuzhiyun 	u8 afex_vif_list_command;
1252*4882a593Smuzhiyun 	u8 func_to_clear;
1253*4882a593Smuzhiyun };
1254*4882a593Smuzhiyun 
1255*4882a593Smuzhiyun struct bnx2x_func_tx_start_params {
1256*4882a593Smuzhiyun 	struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
1257*4882a593Smuzhiyun 	u8 dcb_enabled;
1258*4882a593Smuzhiyun 	u8 dcb_version;
1259*4882a593Smuzhiyun 	u8 dont_add_pri_0_en;
1260*4882a593Smuzhiyun 	u8 dcb_outer_pri[MAX_TRAFFIC_TYPES];
1261*4882a593Smuzhiyun };
1262*4882a593Smuzhiyun 
1263*4882a593Smuzhiyun struct bnx2x_func_set_timesync_params {
1264*4882a593Smuzhiyun 	/* Reset, set or keep the current drift value */
1265*4882a593Smuzhiyun 	u8 drift_adjust_cmd;
1266*4882a593Smuzhiyun 
1267*4882a593Smuzhiyun 	/* Dec, inc or keep the current offset */
1268*4882a593Smuzhiyun 	u8 offset_cmd;
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun 	/* Drift value direction */
1271*4882a593Smuzhiyun 	u8 add_sub_drift_adjust_value;
1272*4882a593Smuzhiyun 
1273*4882a593Smuzhiyun 	/* Drift, period and offset values to be used according to the commands
1274*4882a593Smuzhiyun 	 * above.
1275*4882a593Smuzhiyun 	 */
1276*4882a593Smuzhiyun 	u8 drift_adjust_value;
1277*4882a593Smuzhiyun 	u32 drift_adjust_period;
1278*4882a593Smuzhiyun 	u64 offset_delta;
1279*4882a593Smuzhiyun };
1280*4882a593Smuzhiyun 
1281*4882a593Smuzhiyun struct bnx2x_func_state_params {
1282*4882a593Smuzhiyun 	struct bnx2x_func_sp_obj *f_obj;
1283*4882a593Smuzhiyun 
1284*4882a593Smuzhiyun 	/* Current command */
1285*4882a593Smuzhiyun 	enum bnx2x_func_cmd cmd;
1286*4882a593Smuzhiyun 
1287*4882a593Smuzhiyun 	/* may have RAMROD_COMP_WAIT set only */
1288*4882a593Smuzhiyun 	unsigned long	ramrod_flags;
1289*4882a593Smuzhiyun 
1290*4882a593Smuzhiyun 	/* Params according to the current command */
1291*4882a593Smuzhiyun 	union {
1292*4882a593Smuzhiyun 		struct bnx2x_func_hw_init_params hw_init;
1293*4882a593Smuzhiyun 		struct bnx2x_func_hw_reset_params hw_reset;
1294*4882a593Smuzhiyun 		struct bnx2x_func_start_params start;
1295*4882a593Smuzhiyun 		struct bnx2x_func_switch_update_params switch_update;
1296*4882a593Smuzhiyun 		struct bnx2x_func_afex_update_params afex_update;
1297*4882a593Smuzhiyun 		struct bnx2x_func_afex_viflists_params afex_viflists;
1298*4882a593Smuzhiyun 		struct bnx2x_func_tx_start_params tx_start;
1299*4882a593Smuzhiyun 		struct bnx2x_func_set_timesync_params set_timesync;
1300*4882a593Smuzhiyun 	} params;
1301*4882a593Smuzhiyun };
1302*4882a593Smuzhiyun 
1303*4882a593Smuzhiyun struct bnx2x_func_sp_drv_ops {
1304*4882a593Smuzhiyun 	/* Init tool + runtime initialization:
1305*4882a593Smuzhiyun 	 *      - Common Chip
1306*4882a593Smuzhiyun 	 *      - Common (per Path)
1307*4882a593Smuzhiyun 	 *      - Port
1308*4882a593Smuzhiyun 	 *      - Function phases
1309*4882a593Smuzhiyun 	 */
1310*4882a593Smuzhiyun 	int (*init_hw_cmn_chip)(struct bnx2x *bp);
1311*4882a593Smuzhiyun 	int (*init_hw_cmn)(struct bnx2x *bp);
1312*4882a593Smuzhiyun 	int (*init_hw_port)(struct bnx2x *bp);
1313*4882a593Smuzhiyun 	int (*init_hw_func)(struct bnx2x *bp);
1314*4882a593Smuzhiyun 
1315*4882a593Smuzhiyun 	/* Reset Function HW: Common, Port, Function phases. */
1316*4882a593Smuzhiyun 	void (*reset_hw_cmn)(struct bnx2x *bp);
1317*4882a593Smuzhiyun 	void (*reset_hw_port)(struct bnx2x *bp);
1318*4882a593Smuzhiyun 	void (*reset_hw_func)(struct bnx2x *bp);
1319*4882a593Smuzhiyun 
1320*4882a593Smuzhiyun 	/* Init/Free GUNZIP resources */
1321*4882a593Smuzhiyun 	int (*gunzip_init)(struct bnx2x *bp);
1322*4882a593Smuzhiyun 	void (*gunzip_end)(struct bnx2x *bp);
1323*4882a593Smuzhiyun 
1324*4882a593Smuzhiyun 	/* Prepare/Release FW resources */
1325*4882a593Smuzhiyun 	int (*init_fw)(struct bnx2x *bp);
1326*4882a593Smuzhiyun 	void (*release_fw)(struct bnx2x *bp);
1327*4882a593Smuzhiyun };
1328*4882a593Smuzhiyun 
1329*4882a593Smuzhiyun struct bnx2x_func_sp_obj {
1330*4882a593Smuzhiyun 	enum bnx2x_func_state	state, next_state;
1331*4882a593Smuzhiyun 
1332*4882a593Smuzhiyun 	/* BNX2X_FUNC_CMD_XX bits. This object implements "one
1333*4882a593Smuzhiyun 	 * pending" paradigm but for debug and tracing purposes it's
1334*4882a593Smuzhiyun 	 * more convenient to have different bits for different
1335*4882a593Smuzhiyun 	 * commands.
1336*4882a593Smuzhiyun 	 */
1337*4882a593Smuzhiyun 	unsigned long		pending;
1338*4882a593Smuzhiyun 
1339*4882a593Smuzhiyun 	/* Buffer to use as a ramrod data and its mapping */
1340*4882a593Smuzhiyun 	void			*rdata;
1341*4882a593Smuzhiyun 	dma_addr_t		rdata_mapping;
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun 	/* Buffer to use as a afex ramrod data and its mapping.
1344*4882a593Smuzhiyun 	 * This can't be same rdata as above because afex ramrod requests
1345*4882a593Smuzhiyun 	 * can arrive to the object in parallel to other ramrod requests.
1346*4882a593Smuzhiyun 	 */
1347*4882a593Smuzhiyun 	void			*afex_rdata;
1348*4882a593Smuzhiyun 	dma_addr_t		afex_rdata_mapping;
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun 	/* this mutex validates that when pending flag is taken, the next
1351*4882a593Smuzhiyun 	 * ramrod to be sent will be the one set the pending bit
1352*4882a593Smuzhiyun 	 */
1353*4882a593Smuzhiyun 	struct mutex		one_pending_mutex;
1354*4882a593Smuzhiyun 
1355*4882a593Smuzhiyun 	/* Driver interface */
1356*4882a593Smuzhiyun 	struct bnx2x_func_sp_drv_ops	*drv;
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun 	/**
1359*4882a593Smuzhiyun 	 * Performs one state change according to the given parameters.
1360*4882a593Smuzhiyun 	 *
1361*4882a593Smuzhiyun 	 * @return 0 in case of success and negative value otherwise.
1362*4882a593Smuzhiyun 	 */
1363*4882a593Smuzhiyun 	int (*send_cmd)(struct bnx2x *bp,
1364*4882a593Smuzhiyun 			struct bnx2x_func_state_params *params);
1365*4882a593Smuzhiyun 
1366*4882a593Smuzhiyun 	/**
1367*4882a593Smuzhiyun 	 * Checks that the requested state transition is legal.
1368*4882a593Smuzhiyun 	 */
1369*4882a593Smuzhiyun 	int (*check_transition)(struct bnx2x *bp,
1370*4882a593Smuzhiyun 				struct bnx2x_func_sp_obj *o,
1371*4882a593Smuzhiyun 				struct bnx2x_func_state_params *params);
1372*4882a593Smuzhiyun 
1373*4882a593Smuzhiyun 	/**
1374*4882a593Smuzhiyun 	 * Completes the pending command.
1375*4882a593Smuzhiyun 	 */
1376*4882a593Smuzhiyun 	int (*complete_cmd)(struct bnx2x *bp,
1377*4882a593Smuzhiyun 			    struct bnx2x_func_sp_obj *o,
1378*4882a593Smuzhiyun 			    enum bnx2x_func_cmd cmd);
1379*4882a593Smuzhiyun 
1380*4882a593Smuzhiyun 	int (*wait_comp)(struct bnx2x *bp, struct bnx2x_func_sp_obj *o,
1381*4882a593Smuzhiyun 			 enum bnx2x_func_cmd cmd);
1382*4882a593Smuzhiyun };
1383*4882a593Smuzhiyun 
1384*4882a593Smuzhiyun /********************** Interfaces ********************************************/
1385*4882a593Smuzhiyun /* Queueable objects set */
1386*4882a593Smuzhiyun union bnx2x_qable_obj {
1387*4882a593Smuzhiyun 	struct bnx2x_vlan_mac_obj vlan_mac;
1388*4882a593Smuzhiyun };
1389*4882a593Smuzhiyun /************** Function state update *********/
1390*4882a593Smuzhiyun void bnx2x_init_func_obj(struct bnx2x *bp,
1391*4882a593Smuzhiyun 			 struct bnx2x_func_sp_obj *obj,
1392*4882a593Smuzhiyun 			 void *rdata, dma_addr_t rdata_mapping,
1393*4882a593Smuzhiyun 			 void *afex_rdata, dma_addr_t afex_rdata_mapping,
1394*4882a593Smuzhiyun 			 struct bnx2x_func_sp_drv_ops *drv_iface);
1395*4882a593Smuzhiyun 
1396*4882a593Smuzhiyun int bnx2x_func_state_change(struct bnx2x *bp,
1397*4882a593Smuzhiyun 			    struct bnx2x_func_state_params *params);
1398*4882a593Smuzhiyun 
1399*4882a593Smuzhiyun enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
1400*4882a593Smuzhiyun 					   struct bnx2x_func_sp_obj *o);
1401*4882a593Smuzhiyun /******************* Queue State **************/
1402*4882a593Smuzhiyun void bnx2x_init_queue_obj(struct bnx2x *bp,
1403*4882a593Smuzhiyun 			  struct bnx2x_queue_sp_obj *obj, u8 cl_id, u32 *cids,
1404*4882a593Smuzhiyun 			  u8 cid_cnt, u8 func_id, void *rdata,
1405*4882a593Smuzhiyun 			  dma_addr_t rdata_mapping, unsigned long type);
1406*4882a593Smuzhiyun 
1407*4882a593Smuzhiyun int bnx2x_queue_state_change(struct bnx2x *bp,
1408*4882a593Smuzhiyun 			     struct bnx2x_queue_state_params *params);
1409*4882a593Smuzhiyun 
1410*4882a593Smuzhiyun int bnx2x_get_q_logical_state(struct bnx2x *bp,
1411*4882a593Smuzhiyun 			       struct bnx2x_queue_sp_obj *obj);
1412*4882a593Smuzhiyun 
1413*4882a593Smuzhiyun /********************* VLAN-MAC ****************/
1414*4882a593Smuzhiyun void bnx2x_init_mac_obj(struct bnx2x *bp,
1415*4882a593Smuzhiyun 			struct bnx2x_vlan_mac_obj *mac_obj,
1416*4882a593Smuzhiyun 			u8 cl_id, u32 cid, u8 func_id, void *rdata,
1417*4882a593Smuzhiyun 			dma_addr_t rdata_mapping, int state,
1418*4882a593Smuzhiyun 			unsigned long *pstate, bnx2x_obj_type type,
1419*4882a593Smuzhiyun 			struct bnx2x_credit_pool_obj *macs_pool);
1420*4882a593Smuzhiyun 
1421*4882a593Smuzhiyun void bnx2x_init_vlan_obj(struct bnx2x *bp,
1422*4882a593Smuzhiyun 			 struct bnx2x_vlan_mac_obj *vlan_obj,
1423*4882a593Smuzhiyun 			 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1424*4882a593Smuzhiyun 			 dma_addr_t rdata_mapping, int state,
1425*4882a593Smuzhiyun 			 unsigned long *pstate, bnx2x_obj_type type,
1426*4882a593Smuzhiyun 			 struct bnx2x_credit_pool_obj *vlans_pool);
1427*4882a593Smuzhiyun 
1428*4882a593Smuzhiyun void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
1429*4882a593Smuzhiyun 			     struct bnx2x_vlan_mac_obj *vlan_mac_obj,
1430*4882a593Smuzhiyun 			     u8 cl_id, u32 cid, u8 func_id, void *rdata,
1431*4882a593Smuzhiyun 			     dma_addr_t rdata_mapping, int state,
1432*4882a593Smuzhiyun 			     unsigned long *pstate, bnx2x_obj_type type,
1433*4882a593Smuzhiyun 			     struct bnx2x_credit_pool_obj *macs_pool,
1434*4882a593Smuzhiyun 			     struct bnx2x_credit_pool_obj *vlans_pool);
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
1437*4882a593Smuzhiyun 					struct bnx2x_vlan_mac_obj *o);
1438*4882a593Smuzhiyun void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
1439*4882a593Smuzhiyun 				  struct bnx2x_vlan_mac_obj *o);
1440*4882a593Smuzhiyun int bnx2x_vlan_mac_h_write_lock(struct bnx2x *bp,
1441*4882a593Smuzhiyun 				struct bnx2x_vlan_mac_obj *o);
1442*4882a593Smuzhiyun int bnx2x_config_vlan_mac(struct bnx2x *bp,
1443*4882a593Smuzhiyun 			   struct bnx2x_vlan_mac_ramrod_params *p);
1444*4882a593Smuzhiyun 
1445*4882a593Smuzhiyun int bnx2x_vlan_mac_move(struct bnx2x *bp,
1446*4882a593Smuzhiyun 			struct bnx2x_vlan_mac_ramrod_params *p,
1447*4882a593Smuzhiyun 			struct bnx2x_vlan_mac_obj *dest_o);
1448*4882a593Smuzhiyun 
1449*4882a593Smuzhiyun /********************* RX MODE ****************/
1450*4882a593Smuzhiyun 
1451*4882a593Smuzhiyun void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
1452*4882a593Smuzhiyun 			    struct bnx2x_rx_mode_obj *o);
1453*4882a593Smuzhiyun 
1454*4882a593Smuzhiyun /**
1455*4882a593Smuzhiyun  * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
1456*4882a593Smuzhiyun  *
1457*4882a593Smuzhiyun  * @p: Command parameters
1458*4882a593Smuzhiyun  *
1459*4882a593Smuzhiyun  * Return: 0 - if operation was successful and there is no pending completions,
1460*4882a593Smuzhiyun  *         positive number - if there are pending completions,
1461*4882a593Smuzhiyun  *         negative - if there were errors
1462*4882a593Smuzhiyun  */
1463*4882a593Smuzhiyun int bnx2x_config_rx_mode(struct bnx2x *bp,
1464*4882a593Smuzhiyun 			 struct bnx2x_rx_mode_ramrod_params *p);
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun /****************** MULTICASTS ****************/
1467*4882a593Smuzhiyun 
1468*4882a593Smuzhiyun void bnx2x_init_mcast_obj(struct bnx2x *bp,
1469*4882a593Smuzhiyun 			  struct bnx2x_mcast_obj *mcast_obj,
1470*4882a593Smuzhiyun 			  u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
1471*4882a593Smuzhiyun 			  u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
1472*4882a593Smuzhiyun 			  int state, unsigned long *pstate,
1473*4882a593Smuzhiyun 			  bnx2x_obj_type type);
1474*4882a593Smuzhiyun 
1475*4882a593Smuzhiyun /**
1476*4882a593Smuzhiyun  * bnx2x_config_mcast - Configure multicast MACs list.
1477*4882a593Smuzhiyun  *
1478*4882a593Smuzhiyun  * @cmd: command to execute: BNX2X_MCAST_CMD_X
1479*4882a593Smuzhiyun  *
1480*4882a593Smuzhiyun  * May configure a new list
1481*4882a593Smuzhiyun  * provided in p->mcast_list (BNX2X_MCAST_CMD_ADD), clean up
1482*4882a593Smuzhiyun  * (BNX2X_MCAST_CMD_DEL) or restore (BNX2X_MCAST_CMD_RESTORE) a current
1483*4882a593Smuzhiyun  * configuration, continue to execute the pending commands
1484*4882a593Smuzhiyun  * (BNX2X_MCAST_CMD_CONT).
1485*4882a593Smuzhiyun  *
1486*4882a593Smuzhiyun  * If previous command is still pending or if number of MACs to
1487*4882a593Smuzhiyun  * configure is more that maximum number of MACs in one command,
1488*4882a593Smuzhiyun  * the current command will be enqueued to the tail of the
1489*4882a593Smuzhiyun  * pending commands list.
1490*4882a593Smuzhiyun  *
1491*4882a593Smuzhiyun  * Return: 0 is operation was successful and there are no pending completions,
1492*4882a593Smuzhiyun  *         negative if there were errors, positive if there are pending
1493*4882a593Smuzhiyun  *         completions.
1494*4882a593Smuzhiyun  */
1495*4882a593Smuzhiyun int bnx2x_config_mcast(struct bnx2x *bp,
1496*4882a593Smuzhiyun 		       struct bnx2x_mcast_ramrod_params *p,
1497*4882a593Smuzhiyun 		       enum bnx2x_mcast_cmd cmd);
1498*4882a593Smuzhiyun 
1499*4882a593Smuzhiyun /****************** CREDIT POOL ****************/
1500*4882a593Smuzhiyun void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
1501*4882a593Smuzhiyun 				struct bnx2x_credit_pool_obj *p, u8 func_id,
1502*4882a593Smuzhiyun 				u8 func_num);
1503*4882a593Smuzhiyun void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
1504*4882a593Smuzhiyun 				 struct bnx2x_credit_pool_obj *p, u8 func_id,
1505*4882a593Smuzhiyun 				 u8 func_num);
1506*4882a593Smuzhiyun void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
1507*4882a593Smuzhiyun 			    int base, int credit);
1508*4882a593Smuzhiyun 
1509*4882a593Smuzhiyun /****************** RSS CONFIGURATION ****************/
1510*4882a593Smuzhiyun void bnx2x_init_rss_config_obj(struct bnx2x *bp,
1511*4882a593Smuzhiyun 			       struct bnx2x_rss_config_obj *rss_obj,
1512*4882a593Smuzhiyun 			       u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
1513*4882a593Smuzhiyun 			       void *rdata, dma_addr_t rdata_mapping,
1514*4882a593Smuzhiyun 			       int state, unsigned long *pstate,
1515*4882a593Smuzhiyun 			       bnx2x_obj_type type);
1516*4882a593Smuzhiyun 
1517*4882a593Smuzhiyun /**
1518*4882a593Smuzhiyun  * bnx2x_config_rss - Updates RSS configuration according to provided parameters
1519*4882a593Smuzhiyun  *
1520*4882a593Smuzhiyun  * Return: 0 in case of success
1521*4882a593Smuzhiyun  */
1522*4882a593Smuzhiyun int bnx2x_config_rss(struct bnx2x *bp,
1523*4882a593Smuzhiyun 		     struct bnx2x_config_rss_params *p);
1524*4882a593Smuzhiyun 
1525*4882a593Smuzhiyun /**
1526*4882a593Smuzhiyun  * bnx2x_get_rss_ind_table - Return the current ind_table configuration.
1527*4882a593Smuzhiyun  *
1528*4882a593Smuzhiyun  * @ind_table: buffer to fill with the current indirection
1529*4882a593Smuzhiyun  *                  table content. Should be at least
1530*4882a593Smuzhiyun  *                  T_ETH_INDIRECTION_TABLE_SIZE bytes long.
1531*4882a593Smuzhiyun  */
1532*4882a593Smuzhiyun void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
1533*4882a593Smuzhiyun 			     u8 *ind_table);
1534*4882a593Smuzhiyun 
1535*4882a593Smuzhiyun #define PF_MAC_CREDIT_E2(bp, func_num)					\
1536*4882a593Smuzhiyun 	((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_MAC_CREDIT_CNT) / \
1537*4882a593Smuzhiyun 	 func_num + GET_NUM_VFS_PER_PF(bp) * VF_MAC_CREDIT_CNT)
1538*4882a593Smuzhiyun 
1539*4882a593Smuzhiyun #define BNX2X_VFS_VLAN_CREDIT(bp)	\
1540*4882a593Smuzhiyun 	(GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT)
1541*4882a593Smuzhiyun 
1542*4882a593Smuzhiyun #define PF_VLAN_CREDIT_E2(bp, func_num)					 \
1543*4882a593Smuzhiyun 	((MAX_VLAN_CREDIT_E2 - 1 - BNX2X_VFS_VLAN_CREDIT(bp)) /	\
1544*4882a593Smuzhiyun 	 func_num + GET_NUM_VFS_PER_PF(bp) * VF_VLAN_CREDIT_CNT)
1545*4882a593Smuzhiyun 
1546*4882a593Smuzhiyun #endif /* BNX2X_SP_VERBS */
1547