xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/qlogic/qed/qed_sp.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2*4882a593Smuzhiyun /* QLogic qed NIC Driver
3*4882a593Smuzhiyun  * Copyright (c) 2015-2017  QLogic Corporation
4*4882a593Smuzhiyun  * Copyright (c) 2019-2020 Marvell International Ltd.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #ifndef _QED_SP_H
8*4882a593Smuzhiyun #define _QED_SP_H
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/types.h>
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/list.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/spinlock.h>
15*4882a593Smuzhiyun #include <linux/qed/qed_chain.h>
16*4882a593Smuzhiyun #include "qed.h"
17*4882a593Smuzhiyun #include "qed_hsi.h"
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun enum spq_mode {
20*4882a593Smuzhiyun 	QED_SPQ_MODE_BLOCK,     /* Client will poll a designated mem. address */
21*4882a593Smuzhiyun 	QED_SPQ_MODE_CB,        /* Client supplies a callback */
22*4882a593Smuzhiyun 	QED_SPQ_MODE_EBLOCK,    /* QED should block until completion */
23*4882a593Smuzhiyun };
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun struct qed_spq_comp_cb {
26*4882a593Smuzhiyun 	void	(*function)(struct qed_hwfn *,
27*4882a593Smuzhiyun 			    void *,
28*4882a593Smuzhiyun 			    union event_ring_data *,
29*4882a593Smuzhiyun 			    u8 fw_return_code);
30*4882a593Smuzhiyun 	void	*cookie;
31*4882a593Smuzhiyun };
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /**
34*4882a593Smuzhiyun  * @brief qed_eth_cqe_completion - handles the completion of a
35*4882a593Smuzhiyun  *        ramrod on the cqe ring
36*4882a593Smuzhiyun  *
37*4882a593Smuzhiyun  * @param p_hwfn
38*4882a593Smuzhiyun  * @param cqe
39*4882a593Smuzhiyun  *
40*4882a593Smuzhiyun  * @return int
41*4882a593Smuzhiyun  */
42*4882a593Smuzhiyun int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
43*4882a593Smuzhiyun 			   struct eth_slow_path_rx_cqe *cqe);
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /**
46*4882a593Smuzhiyun  *  @file
47*4882a593Smuzhiyun  *
48*4882a593Smuzhiyun  *  QED Slow-hwfn queue interface
49*4882a593Smuzhiyun  */
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun union ramrod_data {
52*4882a593Smuzhiyun 	struct pf_start_ramrod_data pf_start;
53*4882a593Smuzhiyun 	struct pf_update_ramrod_data pf_update;
54*4882a593Smuzhiyun 	struct rx_queue_start_ramrod_data rx_queue_start;
55*4882a593Smuzhiyun 	struct rx_queue_update_ramrod_data rx_queue_update;
56*4882a593Smuzhiyun 	struct rx_queue_stop_ramrod_data rx_queue_stop;
57*4882a593Smuzhiyun 	struct tx_queue_start_ramrod_data tx_queue_start;
58*4882a593Smuzhiyun 	struct tx_queue_stop_ramrod_data tx_queue_stop;
59*4882a593Smuzhiyun 	struct vport_start_ramrod_data vport_start;
60*4882a593Smuzhiyun 	struct vport_stop_ramrod_data vport_stop;
61*4882a593Smuzhiyun 	struct rx_update_gft_filter_data rx_update_gft;
62*4882a593Smuzhiyun 	struct vport_update_ramrod_data vport_update;
63*4882a593Smuzhiyun 	struct core_rx_start_ramrod_data core_rx_queue_start;
64*4882a593Smuzhiyun 	struct core_rx_stop_ramrod_data core_rx_queue_stop;
65*4882a593Smuzhiyun 	struct core_tx_start_ramrod_data core_tx_queue_start;
66*4882a593Smuzhiyun 	struct core_tx_stop_ramrod_data core_tx_queue_stop;
67*4882a593Smuzhiyun 	struct vport_filter_update_ramrod_data vport_filter_update;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	struct rdma_init_func_ramrod_data rdma_init_func;
70*4882a593Smuzhiyun 	struct rdma_close_func_ramrod_data rdma_close_func;
71*4882a593Smuzhiyun 	struct rdma_register_tid_ramrod_data rdma_register_tid;
72*4882a593Smuzhiyun 	struct rdma_deregister_tid_ramrod_data rdma_deregister_tid;
73*4882a593Smuzhiyun 	struct roce_create_qp_resp_ramrod_data roce_create_qp_resp;
74*4882a593Smuzhiyun 	struct roce_create_qp_req_ramrod_data roce_create_qp_req;
75*4882a593Smuzhiyun 	struct roce_modify_qp_resp_ramrod_data roce_modify_qp_resp;
76*4882a593Smuzhiyun 	struct roce_modify_qp_req_ramrod_data roce_modify_qp_req;
77*4882a593Smuzhiyun 	struct roce_query_qp_resp_ramrod_data roce_query_qp_resp;
78*4882a593Smuzhiyun 	struct roce_query_qp_req_ramrod_data roce_query_qp_req;
79*4882a593Smuzhiyun 	struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
80*4882a593Smuzhiyun 	struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
81*4882a593Smuzhiyun 	struct roce_init_func_ramrod_data roce_init_func;
82*4882a593Smuzhiyun 	struct rdma_create_cq_ramrod_data rdma_create_cq;
83*4882a593Smuzhiyun 	struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
84*4882a593Smuzhiyun 	struct rdma_srq_create_ramrod_data rdma_create_srq;
85*4882a593Smuzhiyun 	struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
86*4882a593Smuzhiyun 	struct rdma_srq_modify_ramrod_data rdma_modify_srq;
87*4882a593Smuzhiyun 	struct iwarp_create_qp_ramrod_data iwarp_create_qp;
88*4882a593Smuzhiyun 	struct iwarp_tcp_offload_ramrod_data iwarp_tcp_offload;
89*4882a593Smuzhiyun 	struct iwarp_mpa_offload_ramrod_data iwarp_mpa_offload;
90*4882a593Smuzhiyun 	struct iwarp_modify_qp_ramrod_data iwarp_modify_qp;
91*4882a593Smuzhiyun 	struct iwarp_init_func_ramrod_data iwarp_init_func;
92*4882a593Smuzhiyun 	struct fcoe_init_ramrod_params fcoe_init;
93*4882a593Smuzhiyun 	struct fcoe_conn_offload_ramrod_params fcoe_conn_ofld;
94*4882a593Smuzhiyun 	struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate;
95*4882a593Smuzhiyun 	struct fcoe_stat_ramrod_params fcoe_stat;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	struct iscsi_init_ramrod_params iscsi_init;
98*4882a593Smuzhiyun 	struct iscsi_spe_conn_offload iscsi_conn_offload;
99*4882a593Smuzhiyun 	struct iscsi_conn_update_ramrod_params iscsi_conn_update;
100*4882a593Smuzhiyun 	struct iscsi_spe_conn_mac_update iscsi_conn_mac_update;
101*4882a593Smuzhiyun 	struct iscsi_spe_conn_termination iscsi_conn_terminate;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	struct vf_start_ramrod_data vf_start;
104*4882a593Smuzhiyun 	struct vf_stop_ramrod_data vf_stop;
105*4882a593Smuzhiyun };
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun #define EQ_MAX_CREDIT   0xffffffff
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun enum spq_priority {
110*4882a593Smuzhiyun 	QED_SPQ_PRIORITY_NORMAL,
111*4882a593Smuzhiyun 	QED_SPQ_PRIORITY_HIGH,
112*4882a593Smuzhiyun };
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun union qed_spq_req_comp {
115*4882a593Smuzhiyun 	struct qed_spq_comp_cb	cb;
116*4882a593Smuzhiyun 	u64			*done_addr;
117*4882a593Smuzhiyun };
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun struct qed_spq_comp_done {
120*4882a593Smuzhiyun 	unsigned int	done;
121*4882a593Smuzhiyun 	u8		fw_return_code;
122*4882a593Smuzhiyun };
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun struct qed_spq_entry {
125*4882a593Smuzhiyun 	struct list_head		list;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	u8				flags;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	/* HSI slow path element */
130*4882a593Smuzhiyun 	struct slow_path_element	elem;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	union ramrod_data		ramrod;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	enum spq_priority		priority;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	/* pending queue for this entry */
137*4882a593Smuzhiyun 	struct list_head		*queue;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	enum spq_mode			comp_mode;
140*4882a593Smuzhiyun 	struct qed_spq_comp_cb		comp_cb;
141*4882a593Smuzhiyun 	struct qed_spq_comp_done	comp_done; /* SPQ_MODE_EBLOCK */
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	/* Posted entry for unlimited list entry in EBLOCK mode */
144*4882a593Smuzhiyun 	struct qed_spq_entry		*post_ent;
145*4882a593Smuzhiyun };
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun struct qed_eq {
148*4882a593Smuzhiyun 	struct qed_chain	chain;
149*4882a593Smuzhiyun 	u8			eq_sb_index;    /* index within the SB */
150*4882a593Smuzhiyun 	__le16			*p_fw_cons;     /* ptr to index value */
151*4882a593Smuzhiyun };
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun struct qed_consq {
154*4882a593Smuzhiyun 	struct qed_chain chain;
155*4882a593Smuzhiyun };
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun typedef int (*qed_spq_async_comp_cb)(struct qed_hwfn *p_hwfn, u8 opcode,
158*4882a593Smuzhiyun 				     __le16 echo, union event_ring_data *data,
159*4882a593Smuzhiyun 				     u8 fw_return_code);
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun int
162*4882a593Smuzhiyun qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
163*4882a593Smuzhiyun 			  enum protocol_type protocol_id,
164*4882a593Smuzhiyun 			  qed_spq_async_comp_cb cb);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun void
167*4882a593Smuzhiyun qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
168*4882a593Smuzhiyun 			    enum protocol_type protocol_id);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun struct qed_spq {
171*4882a593Smuzhiyun 	spinlock_t		lock; /* SPQ lock */
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	struct list_head	unlimited_pending;
174*4882a593Smuzhiyun 	struct list_head	pending;
175*4882a593Smuzhiyun 	struct list_head	completion_pending;
176*4882a593Smuzhiyun 	struct list_head	free_pool;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	struct qed_chain	chain;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	/* allocated dma-able memory for spq entries (+ramrod data) */
181*4882a593Smuzhiyun 	dma_addr_t		p_phys;
182*4882a593Smuzhiyun 	struct qed_spq_entry	*p_virt;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun #define SPQ_RING_SIZE \
185*4882a593Smuzhiyun 	(CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element))
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	/* Bitmap for handling out-of-order completions */
188*4882a593Smuzhiyun 	DECLARE_BITMAP(p_comp_bitmap, SPQ_RING_SIZE);
189*4882a593Smuzhiyun 	u8			comp_bitmap_idx;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	/* Statistics */
192*4882a593Smuzhiyun 	u32			unlimited_pending_count;
193*4882a593Smuzhiyun 	u32			normal_count;
194*4882a593Smuzhiyun 	u32			high_count;
195*4882a593Smuzhiyun 	u32			comp_sent_count;
196*4882a593Smuzhiyun 	u32			comp_count;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	u32			cid;
199*4882a593Smuzhiyun 	u32			db_addr_offset;
200*4882a593Smuzhiyun 	struct core_db_data	db_data;
201*4882a593Smuzhiyun 	qed_spq_async_comp_cb	async_comp_cb[MAX_PROTOCOL_TYPE];
202*4882a593Smuzhiyun };
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun /**
205*4882a593Smuzhiyun  * @brief qed_spq_post - Posts a Slow hwfn request to FW, or lacking that
206*4882a593Smuzhiyun  *        Pends it to the future list.
207*4882a593Smuzhiyun  *
208*4882a593Smuzhiyun  * @param p_hwfn
209*4882a593Smuzhiyun  * @param p_req
210*4882a593Smuzhiyun  *
211*4882a593Smuzhiyun  * @return int
212*4882a593Smuzhiyun  */
213*4882a593Smuzhiyun int qed_spq_post(struct qed_hwfn *p_hwfn,
214*4882a593Smuzhiyun 		 struct qed_spq_entry *p_ent,
215*4882a593Smuzhiyun 		 u8 *fw_return_code);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun /**
218*4882a593Smuzhiyun  * @brief qed_spq_allocate - Alloocates & initializes the SPQ and EQ.
219*4882a593Smuzhiyun  *
220*4882a593Smuzhiyun  * @param p_hwfn
221*4882a593Smuzhiyun  *
222*4882a593Smuzhiyun  * @return int
223*4882a593Smuzhiyun  */
224*4882a593Smuzhiyun int qed_spq_alloc(struct qed_hwfn *p_hwfn);
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun /**
227*4882a593Smuzhiyun  * @brief qed_spq_setup - Reset the SPQ to its start state.
228*4882a593Smuzhiyun  *
229*4882a593Smuzhiyun  * @param p_hwfn
230*4882a593Smuzhiyun  */
231*4882a593Smuzhiyun void qed_spq_setup(struct qed_hwfn *p_hwfn);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun /**
234*4882a593Smuzhiyun  * @brief qed_spq_deallocate - Deallocates the given SPQ struct.
235*4882a593Smuzhiyun  *
236*4882a593Smuzhiyun  * @param p_hwfn
237*4882a593Smuzhiyun  */
238*4882a593Smuzhiyun void qed_spq_free(struct qed_hwfn *p_hwfn);
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun /**
241*4882a593Smuzhiyun  * @brief qed_spq_get_entry - Obtain an entrry from the spq
242*4882a593Smuzhiyun  *        free pool list.
243*4882a593Smuzhiyun  *
244*4882a593Smuzhiyun  *
245*4882a593Smuzhiyun  *
246*4882a593Smuzhiyun  * @param p_hwfn
247*4882a593Smuzhiyun  * @param pp_ent
248*4882a593Smuzhiyun  *
249*4882a593Smuzhiyun  * @return int
250*4882a593Smuzhiyun  */
251*4882a593Smuzhiyun int
252*4882a593Smuzhiyun qed_spq_get_entry(struct qed_hwfn *p_hwfn,
253*4882a593Smuzhiyun 		  struct qed_spq_entry **pp_ent);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun /**
256*4882a593Smuzhiyun  * @brief qed_spq_return_entry - Return an entry to spq free
257*4882a593Smuzhiyun  *                                 pool list
258*4882a593Smuzhiyun  *
259*4882a593Smuzhiyun  * @param p_hwfn
260*4882a593Smuzhiyun  * @param p_ent
261*4882a593Smuzhiyun  */
262*4882a593Smuzhiyun void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
263*4882a593Smuzhiyun 			  struct qed_spq_entry *p_ent);
264*4882a593Smuzhiyun /**
265*4882a593Smuzhiyun  * @brief qed_eq_allocate - Allocates & initializes an EQ struct
266*4882a593Smuzhiyun  *
267*4882a593Smuzhiyun  * @param p_hwfn
268*4882a593Smuzhiyun  * @param num_elem number of elements in the eq
269*4882a593Smuzhiyun  *
270*4882a593Smuzhiyun  * @return int
271*4882a593Smuzhiyun  */
272*4882a593Smuzhiyun int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun /**
275*4882a593Smuzhiyun  * @brief qed_eq_setup - Reset the EQ to its start state.
276*4882a593Smuzhiyun  *
277*4882a593Smuzhiyun  * @param p_hwfn
278*4882a593Smuzhiyun  */
279*4882a593Smuzhiyun void qed_eq_setup(struct qed_hwfn *p_hwfn);
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun /**
282*4882a593Smuzhiyun  * @brief qed_eq_free - deallocates the given EQ struct.
283*4882a593Smuzhiyun  *
284*4882a593Smuzhiyun  * @param p_hwfn
285*4882a593Smuzhiyun  */
286*4882a593Smuzhiyun void qed_eq_free(struct qed_hwfn *p_hwfn);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun /**
289*4882a593Smuzhiyun  * @brief qed_eq_prod_update - update the FW with default EQ producer
290*4882a593Smuzhiyun  *
291*4882a593Smuzhiyun  * @param p_hwfn
292*4882a593Smuzhiyun  * @param prod
293*4882a593Smuzhiyun  */
294*4882a593Smuzhiyun void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
295*4882a593Smuzhiyun 			u16 prod);
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun /**
298*4882a593Smuzhiyun  * @brief qed_eq_completion - Completes currently pending EQ elements
299*4882a593Smuzhiyun  *
300*4882a593Smuzhiyun  * @param p_hwfn
301*4882a593Smuzhiyun  * @param cookie
302*4882a593Smuzhiyun  *
303*4882a593Smuzhiyun  * @return int
304*4882a593Smuzhiyun  */
305*4882a593Smuzhiyun int qed_eq_completion(struct qed_hwfn *p_hwfn,
306*4882a593Smuzhiyun 		      void *cookie);
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun /**
309*4882a593Smuzhiyun  * @brief qed_spq_completion - Completes a single event
310*4882a593Smuzhiyun  *
311*4882a593Smuzhiyun  * @param p_hwfn
312*4882a593Smuzhiyun  * @param echo - echo value from cookie (used for determining completion)
313*4882a593Smuzhiyun  * @param p_data - data from cookie (used in callback function if applicable)
314*4882a593Smuzhiyun  *
315*4882a593Smuzhiyun  * @return int
316*4882a593Smuzhiyun  */
317*4882a593Smuzhiyun int qed_spq_completion(struct qed_hwfn *p_hwfn,
318*4882a593Smuzhiyun 		       __le16 echo,
319*4882a593Smuzhiyun 		       u8 fw_return_code,
320*4882a593Smuzhiyun 		       union event_ring_data *p_data);
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun /**
323*4882a593Smuzhiyun  * @brief qed_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ
324*4882a593Smuzhiyun  *
325*4882a593Smuzhiyun  * @param p_hwfn
326*4882a593Smuzhiyun  *
327*4882a593Smuzhiyun  * @return u32 - SPQ CID
328*4882a593Smuzhiyun  */
329*4882a593Smuzhiyun u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn);
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun /**
332*4882a593Smuzhiyun  * @brief qed_consq_alloc - Allocates & initializes an ConsQ
333*4882a593Smuzhiyun  *        struct
334*4882a593Smuzhiyun  *
335*4882a593Smuzhiyun  * @param p_hwfn
336*4882a593Smuzhiyun  *
337*4882a593Smuzhiyun  * @return int
338*4882a593Smuzhiyun  */
339*4882a593Smuzhiyun int qed_consq_alloc(struct qed_hwfn *p_hwfn);
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun /**
342*4882a593Smuzhiyun  * @brief qed_consq_setup - Reset the ConsQ to its start state.
343*4882a593Smuzhiyun  *
344*4882a593Smuzhiyun  * @param p_hwfn
345*4882a593Smuzhiyun  */
346*4882a593Smuzhiyun void qed_consq_setup(struct qed_hwfn *p_hwfn);
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun /**
349*4882a593Smuzhiyun  * @brief qed_consq_free - deallocates the given ConsQ struct.
350*4882a593Smuzhiyun  *
351*4882a593Smuzhiyun  * @param p_hwfn
352*4882a593Smuzhiyun  */
353*4882a593Smuzhiyun void qed_consq_free(struct qed_hwfn *p_hwfn);
354*4882a593Smuzhiyun int qed_spq_pend_post(struct qed_hwfn *p_hwfn);
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun /**
357*4882a593Smuzhiyun  * @file
358*4882a593Smuzhiyun  *
359*4882a593Smuzhiyun  * @brief Slow-hwfn low-level commands (Ramrods) function definitions.
360*4882a593Smuzhiyun  */
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun #define QED_SP_EQ_COMPLETION  0x01
363*4882a593Smuzhiyun #define QED_SP_CQE_COMPLETION 0x02
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun struct qed_sp_init_data {
366*4882a593Smuzhiyun 	u32			cid;
367*4882a593Smuzhiyun 	u16			opaque_fid;
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	/* Information regarding operation upon sending & completion */
370*4882a593Smuzhiyun 	enum spq_mode		comp_mode;
371*4882a593Smuzhiyun 	struct qed_spq_comp_cb *p_comp_data;
372*4882a593Smuzhiyun };
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun /**
375*4882a593Smuzhiyun  * @brief Returns a SPQ entry to the pool / frees the entry if allocated.
376*4882a593Smuzhiyun  *        Should be called on in error flows after initializing the SPQ entry
377*4882a593Smuzhiyun  *        and before posting it.
378*4882a593Smuzhiyun  *
379*4882a593Smuzhiyun  * @param p_hwfn
380*4882a593Smuzhiyun  * @param p_ent
381*4882a593Smuzhiyun  */
382*4882a593Smuzhiyun void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
383*4882a593Smuzhiyun 			    struct qed_spq_entry *p_ent);
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun int qed_sp_init_request(struct qed_hwfn *p_hwfn,
386*4882a593Smuzhiyun 			struct qed_spq_entry **pp_ent,
387*4882a593Smuzhiyun 			u8 cmd,
388*4882a593Smuzhiyun 			u8 protocol,
389*4882a593Smuzhiyun 			struct qed_sp_init_data *p_data);
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun /**
392*4882a593Smuzhiyun  * @brief qed_sp_pf_start - PF Function Start Ramrod
393*4882a593Smuzhiyun  *
394*4882a593Smuzhiyun  * This ramrod is sent to initialize a physical function (PF). It will
395*4882a593Smuzhiyun  * configure the function related parameters and write its completion to the
396*4882a593Smuzhiyun  * event ring specified in the parameters.
397*4882a593Smuzhiyun  *
398*4882a593Smuzhiyun  * Ramrods complete on the common event ring for the PF. This ring is
399*4882a593Smuzhiyun  * allocated by the driver on host memory and its parameters are written
400*4882a593Smuzhiyun  * to the internal RAM of the UStorm by the Function Start Ramrod.
401*4882a593Smuzhiyun  *
402*4882a593Smuzhiyun  * @param p_hwfn
403*4882a593Smuzhiyun  * @param p_ptt
404*4882a593Smuzhiyun  * @param p_tunn
405*4882a593Smuzhiyun  * @param allow_npar_tx_switch
406*4882a593Smuzhiyun  *
407*4882a593Smuzhiyun  * @return int
408*4882a593Smuzhiyun  */
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
411*4882a593Smuzhiyun 		    struct qed_ptt *p_ptt,
412*4882a593Smuzhiyun 		    struct qed_tunnel_info *p_tunn,
413*4882a593Smuzhiyun 		    bool allow_npar_tx_switch);
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun /**
416*4882a593Smuzhiyun  * @brief qed_sp_pf_update - PF Function Update Ramrod
417*4882a593Smuzhiyun  *
418*4882a593Smuzhiyun  * This ramrod updates function-related parameters. Every parameter can be
419*4882a593Smuzhiyun  * updated independently, according to configuration flags.
420*4882a593Smuzhiyun  *
421*4882a593Smuzhiyun  * @param p_hwfn
422*4882a593Smuzhiyun  *
423*4882a593Smuzhiyun  * @return int
424*4882a593Smuzhiyun  */
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun int qed_sp_pf_update(struct qed_hwfn *p_hwfn);
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun /**
429*4882a593Smuzhiyun  * @brief qed_sp_pf_update_stag - Update firmware of new outer tag
430*4882a593Smuzhiyun  *
431*4882a593Smuzhiyun  * @param p_hwfn
432*4882a593Smuzhiyun  *
433*4882a593Smuzhiyun  * @return int
434*4882a593Smuzhiyun  */
435*4882a593Smuzhiyun int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn);
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun /**
438*4882a593Smuzhiyun  * @brief qed_sp_pf_stop - PF Function Stop Ramrod
439*4882a593Smuzhiyun  *
440*4882a593Smuzhiyun  * This ramrod is sent to close a Physical Function (PF). It is the last ramrod
441*4882a593Smuzhiyun  * sent and the last completion written to the PFs Event Ring. This ramrod also
442*4882a593Smuzhiyun  * deletes the context for the Slowhwfn connection on this PF.
443*4882a593Smuzhiyun  *
444*4882a593Smuzhiyun  * @note Not required for first packet.
445*4882a593Smuzhiyun  *
446*4882a593Smuzhiyun  * @param p_hwfn
447*4882a593Smuzhiyun  *
448*4882a593Smuzhiyun  * @return int
449*4882a593Smuzhiyun  */
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun /**
452*4882a593Smuzhiyun  * @brief qed_sp_pf_update_ufp - PF ufp update Ramrod
453*4882a593Smuzhiyun  *
454*4882a593Smuzhiyun  * @param p_hwfn
455*4882a593Smuzhiyun  *
456*4882a593Smuzhiyun  * @return int
457*4882a593Smuzhiyun  */
458*4882a593Smuzhiyun int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn);
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
463*4882a593Smuzhiyun 			      struct qed_ptt *p_ptt,
464*4882a593Smuzhiyun 			      struct qed_tunnel_info *p_tunn,
465*4882a593Smuzhiyun 			      enum spq_mode comp_mode,
466*4882a593Smuzhiyun 			      struct qed_spq_comp_cb *p_comp_data);
467*4882a593Smuzhiyun /**
468*4882a593Smuzhiyun  * @brief qed_sp_heartbeat_ramrod - Send empty Ramrod
469*4882a593Smuzhiyun  *
470*4882a593Smuzhiyun  * @param p_hwfn
471*4882a593Smuzhiyun  *
472*4882a593Smuzhiyun  * @return int
473*4882a593Smuzhiyun  */
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn);
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun #endif
478