xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/neterion/vxge/vxge-traffic.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /******************************************************************************
2*4882a593Smuzhiyun  * This software may be used and distributed according to the terms of
3*4882a593Smuzhiyun  * the GNU General Public License (GPL), incorporated herein by reference.
4*4882a593Smuzhiyun  * Drivers based on or derived from this code fall under the GPL and must
5*4882a593Smuzhiyun  * retain the authorship, copyright and license notice.  This file is not
6*4882a593Smuzhiyun  * a complete program and may only be used when the entire operating
7*4882a593Smuzhiyun  * system is licensed under the GPL.
8*4882a593Smuzhiyun  * See the file COPYING in this distribution for more information.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11*4882a593Smuzhiyun  *                 Virtualized Server Adapter.
12*4882a593Smuzhiyun  * Copyright(c) 2002-2010 Exar Corp.
13*4882a593Smuzhiyun  ******************************************************************************/
14*4882a593Smuzhiyun #include <linux/etherdevice.h>
15*4882a593Smuzhiyun #include <linux/io-64-nonatomic-lo-hi.h>
16*4882a593Smuzhiyun #include <linux/prefetch.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include "vxge-traffic.h"
19*4882a593Smuzhiyun #include "vxge-config.h"
20*4882a593Smuzhiyun #include "vxge-main.h"
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun  * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
24*4882a593Smuzhiyun  * @vp: Virtual Path handle.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * Enable vpath interrupts. The function is to be executed the last in
27*4882a593Smuzhiyun  * vpath initialization sequence.
28*4882a593Smuzhiyun  *
29*4882a593Smuzhiyun  * See also: vxge_hw_vpath_intr_disable()
30*4882a593Smuzhiyun  */
vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle * vp)31*4882a593Smuzhiyun enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun 	struct __vxge_hw_virtualpath *vpath;
34*4882a593Smuzhiyun 	struct vxge_hw_vpath_reg __iomem *vp_reg;
35*4882a593Smuzhiyun 	enum vxge_hw_status status = VXGE_HW_OK;
36*4882a593Smuzhiyun 	if (vp == NULL) {
37*4882a593Smuzhiyun 		status = VXGE_HW_ERR_INVALID_HANDLE;
38*4882a593Smuzhiyun 		goto exit;
39*4882a593Smuzhiyun 	}
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	vpath = vp->vpath;
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
44*4882a593Smuzhiyun 		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
45*4882a593Smuzhiyun 		goto exit;
46*4882a593Smuzhiyun 	}
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	vp_reg = vpath->vp_reg;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
53*4882a593Smuzhiyun 			&vp_reg->general_errors_reg);
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
56*4882a593Smuzhiyun 			&vp_reg->pci_config_errors_reg);
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
59*4882a593Smuzhiyun 			&vp_reg->mrpcim_to_vpath_alarm_reg);
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
62*4882a593Smuzhiyun 			&vp_reg->srpcim_to_vpath_alarm_reg);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
65*4882a593Smuzhiyun 			&vp_reg->vpath_ppif_int_status);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
68*4882a593Smuzhiyun 			&vp_reg->srpcim_msg_to_vpath_reg);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
71*4882a593Smuzhiyun 			&vp_reg->vpath_pcipif_int_status);
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
74*4882a593Smuzhiyun 			&vp_reg->prc_alarm_reg);
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
77*4882a593Smuzhiyun 			&vp_reg->wrdma_alarm_status);
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
80*4882a593Smuzhiyun 			&vp_reg->asic_ntwk_vp_err_reg);
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
83*4882a593Smuzhiyun 			&vp_reg->xgmac_vp_int_status);
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	readq(&vp_reg->vpath_general_int_status);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	/* Mask unwanted interrupts */
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
90*4882a593Smuzhiyun 			&vp_reg->vpath_pcipif_int_mask);
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
93*4882a593Smuzhiyun 			&vp_reg->srpcim_msg_to_vpath_mask);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
96*4882a593Smuzhiyun 			&vp_reg->srpcim_to_vpath_alarm_mask);
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
99*4882a593Smuzhiyun 			&vp_reg->mrpcim_to_vpath_alarm_mask);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
102*4882a593Smuzhiyun 			&vp_reg->pci_config_errors_mask);
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	/* Unmask the individual interrupts */
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
107*4882a593Smuzhiyun 		VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
108*4882a593Smuzhiyun 		VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
109*4882a593Smuzhiyun 		VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
110*4882a593Smuzhiyun 		&vp_reg->general_errors_mask);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper(
113*4882a593Smuzhiyun 		(u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
114*4882a593Smuzhiyun 		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
115*4882a593Smuzhiyun 		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
116*4882a593Smuzhiyun 		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
117*4882a593Smuzhiyun 		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
118*4882a593Smuzhiyun 		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
119*4882a593Smuzhiyun 		&vp_reg->kdfcctl_errors_mask);
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper(
124*4882a593Smuzhiyun 		(u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
125*4882a593Smuzhiyun 		&vp_reg->prc_alarm_mask);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
128*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	if (vpath->hldev->first_vp_id != vpath->vp_id)
131*4882a593Smuzhiyun 		__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
132*4882a593Smuzhiyun 			&vp_reg->asic_ntwk_vp_err_mask);
133*4882a593Smuzhiyun 	else
134*4882a593Smuzhiyun 		__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
135*4882a593Smuzhiyun 		VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
136*4882a593Smuzhiyun 		VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
137*4882a593Smuzhiyun 		&vp_reg->asic_ntwk_vp_err_mask);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper(0,
140*4882a593Smuzhiyun 		&vp_reg->vpath_general_int_mask);
141*4882a593Smuzhiyun exit:
142*4882a593Smuzhiyun 	return status;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun /*
147*4882a593Smuzhiyun  * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
148*4882a593Smuzhiyun  * @vp: Virtual Path handle.
149*4882a593Smuzhiyun  *
150*4882a593Smuzhiyun  * Disable vpath interrupts. The function is to be executed the last in
151*4882a593Smuzhiyun  * vpath initialization sequence.
152*4882a593Smuzhiyun  *
153*4882a593Smuzhiyun  * See also: vxge_hw_vpath_intr_enable()
154*4882a593Smuzhiyun  */
vxge_hw_vpath_intr_disable(struct __vxge_hw_vpath_handle * vp)155*4882a593Smuzhiyun enum vxge_hw_status vxge_hw_vpath_intr_disable(
156*4882a593Smuzhiyun 			struct __vxge_hw_vpath_handle *vp)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	struct __vxge_hw_virtualpath *vpath;
159*4882a593Smuzhiyun 	enum vxge_hw_status status = VXGE_HW_OK;
160*4882a593Smuzhiyun 	struct vxge_hw_vpath_reg __iomem *vp_reg;
161*4882a593Smuzhiyun 	if (vp == NULL) {
162*4882a593Smuzhiyun 		status = VXGE_HW_ERR_INVALID_HANDLE;
163*4882a593Smuzhiyun 		goto exit;
164*4882a593Smuzhiyun 	}
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	vpath = vp->vpath;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
169*4882a593Smuzhiyun 		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
170*4882a593Smuzhiyun 		goto exit;
171*4882a593Smuzhiyun 	}
172*4882a593Smuzhiyun 	vp_reg = vpath->vp_reg;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper(
175*4882a593Smuzhiyun 		(u32)VXGE_HW_INTR_MASK_ALL,
176*4882a593Smuzhiyun 		&vp_reg->vpath_general_int_mask);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
181*4882a593Smuzhiyun 			&vp_reg->general_errors_mask);
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
184*4882a593Smuzhiyun 			&vp_reg->pci_config_errors_mask);
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
187*4882a593Smuzhiyun 			&vp_reg->mrpcim_to_vpath_alarm_mask);
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
190*4882a593Smuzhiyun 			&vp_reg->srpcim_to_vpath_alarm_mask);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
193*4882a593Smuzhiyun 			&vp_reg->vpath_ppif_int_mask);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
196*4882a593Smuzhiyun 			&vp_reg->srpcim_msg_to_vpath_mask);
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
199*4882a593Smuzhiyun 			&vp_reg->vpath_pcipif_int_mask);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
202*4882a593Smuzhiyun 			&vp_reg->wrdma_alarm_mask);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
205*4882a593Smuzhiyun 			&vp_reg->prc_alarm_mask);
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
208*4882a593Smuzhiyun 			&vp_reg->xgmac_vp_int_mask);
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
211*4882a593Smuzhiyun 			&vp_reg->asic_ntwk_vp_err_mask);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun exit:
214*4882a593Smuzhiyun 	return status;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo * fifo)217*4882a593Smuzhiyun void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	struct vxge_hw_vpath_reg __iomem *vp_reg;
220*4882a593Smuzhiyun 	struct vxge_hw_vp_config *config;
221*4882a593Smuzhiyun 	u64 val64;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
224*4882a593Smuzhiyun 		return;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	vp_reg = fifo->vp_reg;
227*4882a593Smuzhiyun 	config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
230*4882a593Smuzhiyun 		config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
231*4882a593Smuzhiyun 		val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
232*4882a593Smuzhiyun 		val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
233*4882a593Smuzhiyun 		fifo->tim_tti_cfg1_saved = val64;
234*4882a593Smuzhiyun 		writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
235*4882a593Smuzhiyun 	}
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring * ring)238*4882a593Smuzhiyun void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 	u64 val64 = ring->tim_rti_cfg1_saved;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
243*4882a593Smuzhiyun 	ring->tim_rti_cfg1_saved = val64;
244*4882a593Smuzhiyun 	writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo * fifo)247*4882a593Smuzhiyun void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun 	u64 val64 = fifo->tim_tti_cfg3_saved;
250*4882a593Smuzhiyun 	u64 timer = (fifo->rtimer * 1000) / 272;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
253*4882a593Smuzhiyun 	if (timer)
254*4882a593Smuzhiyun 		val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
255*4882a593Smuzhiyun 			VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
258*4882a593Smuzhiyun 	/* tti_cfg3_saved is not updated again because it is
259*4882a593Smuzhiyun 	 * initialized at one place only - init time.
260*4882a593Smuzhiyun 	 */
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun 
vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring * ring)263*4882a593Smuzhiyun void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun 	u64 val64 = ring->tim_rti_cfg3_saved;
266*4882a593Smuzhiyun 	u64 timer = (ring->rtimer * 1000) / 272;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
269*4882a593Smuzhiyun 	if (timer)
270*4882a593Smuzhiyun 		val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
271*4882a593Smuzhiyun 			VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
274*4882a593Smuzhiyun 	/* rti_cfg3_saved is not updated again because it is
275*4882a593Smuzhiyun 	 * initialized at one place only - init time.
276*4882a593Smuzhiyun 	 */
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun /**
280*4882a593Smuzhiyun  * vxge_hw_channel_msix_mask - Mask MSIX Vector.
281*4882a593Smuzhiyun  * @channel: Channel for rx or tx handle
282*4882a593Smuzhiyun  * @msix_id:  MSIX ID
283*4882a593Smuzhiyun  *
284*4882a593Smuzhiyun  * The function masks the msix interrupt for the given msix_id
285*4882a593Smuzhiyun  *
286*4882a593Smuzhiyun  * Returns: 0
287*4882a593Smuzhiyun  */
vxge_hw_channel_msix_mask(struct __vxge_hw_channel * channel,int msix_id)288*4882a593Smuzhiyun void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper(
292*4882a593Smuzhiyun 		(u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
293*4882a593Smuzhiyun 		&channel->common_reg->set_msix_mask_vect[msix_id%4]);
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun /**
297*4882a593Smuzhiyun  * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
298*4882a593Smuzhiyun  * @channel: Channel for rx or tx handle
299*4882a593Smuzhiyun  * @msix_id:  MSI ID
300*4882a593Smuzhiyun  *
301*4882a593Smuzhiyun  * The function unmasks the msix interrupt for the given msix_id
302*4882a593Smuzhiyun  *
303*4882a593Smuzhiyun  * Returns: 0
304*4882a593Smuzhiyun  */
305*4882a593Smuzhiyun void
vxge_hw_channel_msix_unmask(struct __vxge_hw_channel * channel,int msix_id)306*4882a593Smuzhiyun vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper(
310*4882a593Smuzhiyun 		(u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
311*4882a593Smuzhiyun 		&channel->common_reg->clear_msix_mask_vect[msix_id%4]);
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun /**
315*4882a593Smuzhiyun  * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
316*4882a593Smuzhiyun  * @channel: Channel for rx or tx handle
317*4882a593Smuzhiyun  * @msix_id:  MSI ID
318*4882a593Smuzhiyun  *
319*4882a593Smuzhiyun  * The function unmasks the msix interrupt for the given msix_id
320*4882a593Smuzhiyun  * if configured in MSIX oneshot mode
321*4882a593Smuzhiyun  *
322*4882a593Smuzhiyun  * Returns: 0
323*4882a593Smuzhiyun  */
vxge_hw_channel_msix_clear(struct __vxge_hw_channel * channel,int msix_id)324*4882a593Smuzhiyun void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper(
327*4882a593Smuzhiyun 		(u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
328*4882a593Smuzhiyun 		&channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun /**
332*4882a593Smuzhiyun  * vxge_hw_device_set_intr_type - Updates the configuration
333*4882a593Smuzhiyun  *		with new interrupt type.
334*4882a593Smuzhiyun  * @hldev: HW device handle.
335*4882a593Smuzhiyun  * @intr_mode: New interrupt type
336*4882a593Smuzhiyun  */
vxge_hw_device_set_intr_type(struct __vxge_hw_device * hldev,u32 intr_mode)337*4882a593Smuzhiyun u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
341*4882a593Smuzhiyun 	   (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
342*4882a593Smuzhiyun 	   (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
343*4882a593Smuzhiyun 	   (intr_mode != VXGE_HW_INTR_MODE_DEF))
344*4882a593Smuzhiyun 		intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	hldev->config.intr_mode = intr_mode;
347*4882a593Smuzhiyun 	return intr_mode;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun /**
351*4882a593Smuzhiyun  * vxge_hw_device_intr_enable - Enable interrupts.
352*4882a593Smuzhiyun  * @hldev: HW device handle.
353*4882a593Smuzhiyun  *
354*4882a593Smuzhiyun  * Enable Titan interrupts. The function is to be executed the last in
355*4882a593Smuzhiyun  * Titan initialization sequence.
356*4882a593Smuzhiyun  *
357*4882a593Smuzhiyun  * See also: vxge_hw_device_intr_disable()
358*4882a593Smuzhiyun  */
vxge_hw_device_intr_enable(struct __vxge_hw_device * hldev)359*4882a593Smuzhiyun void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun 	u32 i;
362*4882a593Smuzhiyun 	u64 val64;
363*4882a593Smuzhiyun 	u32 val32;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	vxge_hw_device_mask_all(hldev);
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 		if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
370*4882a593Smuzhiyun 			continue;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 		vxge_hw_vpath_intr_enable(
373*4882a593Smuzhiyun 			VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
374*4882a593Smuzhiyun 	}
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
377*4882a593Smuzhiyun 		val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
378*4882a593Smuzhiyun 			hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 		if (val64 != 0) {
381*4882a593Smuzhiyun 			writeq(val64, &hldev->common_reg->tim_int_status0);
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 			writeq(~val64, &hldev->common_reg->tim_int_mask0);
384*4882a593Smuzhiyun 		}
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 		val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
387*4882a593Smuzhiyun 			hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 		if (val32 != 0) {
390*4882a593Smuzhiyun 			__vxge_hw_pio_mem_write32_upper(val32,
391*4882a593Smuzhiyun 					&hldev->common_reg->tim_int_status1);
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 			__vxge_hw_pio_mem_write32_upper(~val32,
394*4882a593Smuzhiyun 					&hldev->common_reg->tim_int_mask1);
395*4882a593Smuzhiyun 		}
396*4882a593Smuzhiyun 	}
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	val64 = readq(&hldev->common_reg->titan_general_int_status);
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	vxge_hw_device_unmask_all(hldev);
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun /**
404*4882a593Smuzhiyun  * vxge_hw_device_intr_disable - Disable Titan interrupts.
405*4882a593Smuzhiyun  * @hldev: HW device handle.
406*4882a593Smuzhiyun  *
407*4882a593Smuzhiyun  * Disable Titan interrupts.
408*4882a593Smuzhiyun  *
409*4882a593Smuzhiyun  * See also: vxge_hw_device_intr_enable()
410*4882a593Smuzhiyun  */
vxge_hw_device_intr_disable(struct __vxge_hw_device * hldev)411*4882a593Smuzhiyun void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun 	u32 i;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	vxge_hw_device_mask_all(hldev);
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	/* mask all the tim interrupts */
418*4882a593Smuzhiyun 	writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
419*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
420*4882a593Smuzhiyun 		&hldev->common_reg->tim_int_mask1);
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 		if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
425*4882a593Smuzhiyun 			continue;
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 		vxge_hw_vpath_intr_disable(
428*4882a593Smuzhiyun 			VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
429*4882a593Smuzhiyun 	}
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun /**
433*4882a593Smuzhiyun  * vxge_hw_device_mask_all - Mask all device interrupts.
434*4882a593Smuzhiyun  * @hldev: HW device handle.
435*4882a593Smuzhiyun  *
436*4882a593Smuzhiyun  * Mask	all device interrupts.
437*4882a593Smuzhiyun  *
438*4882a593Smuzhiyun  * See also: vxge_hw_device_unmask_all()
439*4882a593Smuzhiyun  */
vxge_hw_device_mask_all(struct __vxge_hw_device * hldev)440*4882a593Smuzhiyun void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun 	u64 val64;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
445*4882a593Smuzhiyun 		VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
448*4882a593Smuzhiyun 				&hldev->common_reg->titan_mask_all_int);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun /**
452*4882a593Smuzhiyun  * vxge_hw_device_unmask_all - Unmask all device interrupts.
453*4882a593Smuzhiyun  * @hldev: HW device handle.
454*4882a593Smuzhiyun  *
455*4882a593Smuzhiyun  * Unmask all device interrupts.
456*4882a593Smuzhiyun  *
457*4882a593Smuzhiyun  * See also: vxge_hw_device_mask_all()
458*4882a593Smuzhiyun  */
vxge_hw_device_unmask_all(struct __vxge_hw_device * hldev)459*4882a593Smuzhiyun void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun 	u64 val64 = 0;
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
464*4882a593Smuzhiyun 		val64 =  VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
467*4882a593Smuzhiyun 			&hldev->common_reg->titan_mask_all_int);
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun /**
471*4882a593Smuzhiyun  * vxge_hw_device_flush_io - Flush io writes.
472*4882a593Smuzhiyun  * @hldev: HW device handle.
473*4882a593Smuzhiyun  *
474*4882a593Smuzhiyun  * The function	performs a read operation to flush io writes.
475*4882a593Smuzhiyun  *
476*4882a593Smuzhiyun  * Returns: void
477*4882a593Smuzhiyun  */
vxge_hw_device_flush_io(struct __vxge_hw_device * hldev)478*4882a593Smuzhiyun void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun 	readl(&hldev->common_reg->titan_general_int_status);
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun /**
484*4882a593Smuzhiyun  * __vxge_hw_device_handle_error - Handle error
485*4882a593Smuzhiyun  * @hldev: HW device
486*4882a593Smuzhiyun  * @vp_id: Vpath Id
487*4882a593Smuzhiyun  * @type: Error type. Please see enum vxge_hw_event{}
488*4882a593Smuzhiyun  *
489*4882a593Smuzhiyun  * Handle error.
490*4882a593Smuzhiyun  */
491*4882a593Smuzhiyun static enum vxge_hw_status
__vxge_hw_device_handle_error(struct __vxge_hw_device * hldev,u32 vp_id,enum vxge_hw_event type)492*4882a593Smuzhiyun __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
493*4882a593Smuzhiyun 			      enum vxge_hw_event type)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun 	switch (type) {
496*4882a593Smuzhiyun 	case VXGE_HW_EVENT_UNKNOWN:
497*4882a593Smuzhiyun 		break;
498*4882a593Smuzhiyun 	case VXGE_HW_EVENT_RESET_START:
499*4882a593Smuzhiyun 	case VXGE_HW_EVENT_RESET_COMPLETE:
500*4882a593Smuzhiyun 	case VXGE_HW_EVENT_LINK_DOWN:
501*4882a593Smuzhiyun 	case VXGE_HW_EVENT_LINK_UP:
502*4882a593Smuzhiyun 		goto out;
503*4882a593Smuzhiyun 	case VXGE_HW_EVENT_ALARM_CLEARED:
504*4882a593Smuzhiyun 		goto out;
505*4882a593Smuzhiyun 	case VXGE_HW_EVENT_ECCERR:
506*4882a593Smuzhiyun 	case VXGE_HW_EVENT_MRPCIM_ECCERR:
507*4882a593Smuzhiyun 		goto out;
508*4882a593Smuzhiyun 	case VXGE_HW_EVENT_FIFO_ERR:
509*4882a593Smuzhiyun 	case VXGE_HW_EVENT_VPATH_ERR:
510*4882a593Smuzhiyun 	case VXGE_HW_EVENT_CRITICAL_ERR:
511*4882a593Smuzhiyun 	case VXGE_HW_EVENT_SERR:
512*4882a593Smuzhiyun 		break;
513*4882a593Smuzhiyun 	case VXGE_HW_EVENT_SRPCIM_SERR:
514*4882a593Smuzhiyun 	case VXGE_HW_EVENT_MRPCIM_SERR:
515*4882a593Smuzhiyun 		goto out;
516*4882a593Smuzhiyun 	case VXGE_HW_EVENT_SLOT_FREEZE:
517*4882a593Smuzhiyun 		break;
518*4882a593Smuzhiyun 	default:
519*4882a593Smuzhiyun 		vxge_assert(0);
520*4882a593Smuzhiyun 		goto out;
521*4882a593Smuzhiyun 	}
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	/* notify driver */
524*4882a593Smuzhiyun 	if (hldev->uld_callbacks->crit_err)
525*4882a593Smuzhiyun 		hldev->uld_callbacks->crit_err(hldev,
526*4882a593Smuzhiyun 			type, vp_id);
527*4882a593Smuzhiyun out:
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	return VXGE_HW_OK;
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun /*
533*4882a593Smuzhiyun  * __vxge_hw_device_handle_link_down_ind
534*4882a593Smuzhiyun  * @hldev: HW device handle.
535*4882a593Smuzhiyun  *
536*4882a593Smuzhiyun  * Link down indication handler. The function is invoked by HW when
537*4882a593Smuzhiyun  * Titan indicates that the link is down.
538*4882a593Smuzhiyun  */
539*4882a593Smuzhiyun static enum vxge_hw_status
__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device * hldev)540*4882a593Smuzhiyun __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun 	/*
543*4882a593Smuzhiyun 	 * If the previous link state is not down, return.
544*4882a593Smuzhiyun 	 */
545*4882a593Smuzhiyun 	if (hldev->link_state == VXGE_HW_LINK_DOWN)
546*4882a593Smuzhiyun 		goto exit;
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	hldev->link_state = VXGE_HW_LINK_DOWN;
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	/* notify driver */
551*4882a593Smuzhiyun 	if (hldev->uld_callbacks->link_down)
552*4882a593Smuzhiyun 		hldev->uld_callbacks->link_down(hldev);
553*4882a593Smuzhiyun exit:
554*4882a593Smuzhiyun 	return VXGE_HW_OK;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun /*
558*4882a593Smuzhiyun  * __vxge_hw_device_handle_link_up_ind
559*4882a593Smuzhiyun  * @hldev: HW device handle.
560*4882a593Smuzhiyun  *
561*4882a593Smuzhiyun  * Link up indication handler. The function is invoked by HW when
562*4882a593Smuzhiyun  * Titan indicates that the link is up for programmable amount of time.
563*4882a593Smuzhiyun  */
564*4882a593Smuzhiyun static enum vxge_hw_status
__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device * hldev)565*4882a593Smuzhiyun __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun 	/*
568*4882a593Smuzhiyun 	 * If the previous link state is not down, return.
569*4882a593Smuzhiyun 	 */
570*4882a593Smuzhiyun 	if (hldev->link_state == VXGE_HW_LINK_UP)
571*4882a593Smuzhiyun 		goto exit;
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	hldev->link_state = VXGE_HW_LINK_UP;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	/* notify driver */
576*4882a593Smuzhiyun 	if (hldev->uld_callbacks->link_up)
577*4882a593Smuzhiyun 		hldev->uld_callbacks->link_up(hldev);
578*4882a593Smuzhiyun exit:
579*4882a593Smuzhiyun 	return VXGE_HW_OK;
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun /*
583*4882a593Smuzhiyun  * __vxge_hw_vpath_alarm_process - Process Alarms.
584*4882a593Smuzhiyun  * @vpath: Virtual Path.
585*4882a593Smuzhiyun  * @skip_alarms: Do not clear the alarms
586*4882a593Smuzhiyun  *
587*4882a593Smuzhiyun  * Process vpath alarms.
588*4882a593Smuzhiyun  *
589*4882a593Smuzhiyun  */
590*4882a593Smuzhiyun static enum vxge_hw_status
__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath * vpath,u32 skip_alarms)591*4882a593Smuzhiyun __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
592*4882a593Smuzhiyun 			      u32 skip_alarms)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun 	u64 val64;
595*4882a593Smuzhiyun 	u64 alarm_status;
596*4882a593Smuzhiyun 	u64 pic_status;
597*4882a593Smuzhiyun 	struct __vxge_hw_device *hldev = NULL;
598*4882a593Smuzhiyun 	enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
599*4882a593Smuzhiyun 	u64 mask64;
600*4882a593Smuzhiyun 	struct vxge_hw_vpath_stats_sw_info *sw_stats;
601*4882a593Smuzhiyun 	struct vxge_hw_vpath_reg __iomem *vp_reg;
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	if (vpath == NULL) {
604*4882a593Smuzhiyun 		alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
605*4882a593Smuzhiyun 			alarm_event);
606*4882a593Smuzhiyun 		goto out2;
607*4882a593Smuzhiyun 	}
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	hldev = vpath->hldev;
610*4882a593Smuzhiyun 	vp_reg = vpath->vp_reg;
611*4882a593Smuzhiyun 	alarm_status = readq(&vp_reg->vpath_general_int_status);
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	if (alarm_status == VXGE_HW_ALL_FOXES) {
614*4882a593Smuzhiyun 		alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
615*4882a593Smuzhiyun 			alarm_event);
616*4882a593Smuzhiyun 		goto out;
617*4882a593Smuzhiyun 	}
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	sw_stats = vpath->sw_stats;
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	if (alarm_status & ~(
622*4882a593Smuzhiyun 		VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
623*4882a593Smuzhiyun 		VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
624*4882a593Smuzhiyun 		VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
625*4882a593Smuzhiyun 		VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
626*4882a593Smuzhiyun 		sw_stats->error_stats.unknown_alarms++;
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 		alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
629*4882a593Smuzhiyun 			alarm_event);
630*4882a593Smuzhiyun 		goto out;
631*4882a593Smuzhiyun 	}
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 		val64 = readq(&vp_reg->xgmac_vp_int_status);
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 		if (val64 &
638*4882a593Smuzhiyun 		VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 			val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 			if (((val64 &
643*4882a593Smuzhiyun 			      VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
644*4882a593Smuzhiyun 			     (!(val64 &
645*4882a593Smuzhiyun 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
646*4882a593Smuzhiyun 			    ((val64 &
647*4882a593Smuzhiyun 			     VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
648*4882a593Smuzhiyun 			     (!(val64 &
649*4882a593Smuzhiyun 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
650*4882a593Smuzhiyun 				     ))) {
651*4882a593Smuzhiyun 				sw_stats->error_stats.network_sustained_fault++;
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 				writeq(
654*4882a593Smuzhiyun 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
655*4882a593Smuzhiyun 					&vp_reg->asic_ntwk_vp_err_mask);
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 				__vxge_hw_device_handle_link_down_ind(hldev);
658*4882a593Smuzhiyun 				alarm_event = VXGE_HW_SET_LEVEL(
659*4882a593Smuzhiyun 					VXGE_HW_EVENT_LINK_DOWN, alarm_event);
660*4882a593Smuzhiyun 			}
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 			if (((val64 &
663*4882a593Smuzhiyun 			      VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
664*4882a593Smuzhiyun 			     (!(val64 &
665*4882a593Smuzhiyun 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
666*4882a593Smuzhiyun 			    ((val64 &
667*4882a593Smuzhiyun 			      VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
668*4882a593Smuzhiyun 			     (!(val64 &
669*4882a593Smuzhiyun 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
670*4882a593Smuzhiyun 				     ))) {
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 				sw_stats->error_stats.network_sustained_ok++;
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 				writeq(
675*4882a593Smuzhiyun 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
676*4882a593Smuzhiyun 					&vp_reg->asic_ntwk_vp_err_mask);
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 				__vxge_hw_device_handle_link_up_ind(hldev);
679*4882a593Smuzhiyun 				alarm_event = VXGE_HW_SET_LEVEL(
680*4882a593Smuzhiyun 					VXGE_HW_EVENT_LINK_UP, alarm_event);
681*4882a593Smuzhiyun 			}
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 			writeq(VXGE_HW_INTR_MASK_ALL,
684*4882a593Smuzhiyun 				&vp_reg->asic_ntwk_vp_err_reg);
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 			alarm_event = VXGE_HW_SET_LEVEL(
687*4882a593Smuzhiyun 				VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 			if (skip_alarms)
690*4882a593Smuzhiyun 				return VXGE_HW_OK;
691*4882a593Smuzhiyun 		}
692*4882a593Smuzhiyun 	}
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 		pic_status = readq(&vp_reg->vpath_ppif_int_status);
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 		if (pic_status &
699*4882a593Smuzhiyun 		    VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 			val64 = readq(&vp_reg->general_errors_reg);
702*4882a593Smuzhiyun 			mask64 = readq(&vp_reg->general_errors_mask);
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 			if ((val64 &
705*4882a593Smuzhiyun 				VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
706*4882a593Smuzhiyun 				~mask64) {
707*4882a593Smuzhiyun 				sw_stats->error_stats.ini_serr_det++;
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 				alarm_event = VXGE_HW_SET_LEVEL(
710*4882a593Smuzhiyun 					VXGE_HW_EVENT_SERR, alarm_event);
711*4882a593Smuzhiyun 			}
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 			if ((val64 &
714*4882a593Smuzhiyun 			    VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
715*4882a593Smuzhiyun 				~mask64) {
716*4882a593Smuzhiyun 				sw_stats->error_stats.dblgen_fifo0_overflow++;
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 				alarm_event = VXGE_HW_SET_LEVEL(
719*4882a593Smuzhiyun 					VXGE_HW_EVENT_FIFO_ERR, alarm_event);
720*4882a593Smuzhiyun 			}
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 			if ((val64 &
723*4882a593Smuzhiyun 			    VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
724*4882a593Smuzhiyun 				~mask64)
725*4882a593Smuzhiyun 				sw_stats->error_stats.statsb_pif_chain_error++;
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 			if ((val64 &
728*4882a593Smuzhiyun 			   VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
729*4882a593Smuzhiyun 				~mask64)
730*4882a593Smuzhiyun 				sw_stats->error_stats.statsb_drop_timeout++;
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 			if ((val64 &
733*4882a593Smuzhiyun 				VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
734*4882a593Smuzhiyun 				~mask64)
735*4882a593Smuzhiyun 				sw_stats->error_stats.target_illegal_access++;
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 			if (!skip_alarms) {
738*4882a593Smuzhiyun 				writeq(VXGE_HW_INTR_MASK_ALL,
739*4882a593Smuzhiyun 					&vp_reg->general_errors_reg);
740*4882a593Smuzhiyun 				alarm_event = VXGE_HW_SET_LEVEL(
741*4882a593Smuzhiyun 					VXGE_HW_EVENT_ALARM_CLEARED,
742*4882a593Smuzhiyun 					alarm_event);
743*4882a593Smuzhiyun 			}
744*4882a593Smuzhiyun 		}
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 		if (pic_status &
747*4882a593Smuzhiyun 		    VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 			val64 = readq(&vp_reg->kdfcctl_errors_reg);
750*4882a593Smuzhiyun 			mask64 = readq(&vp_reg->kdfcctl_errors_mask);
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 			if ((val64 &
753*4882a593Smuzhiyun 			    VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
754*4882a593Smuzhiyun 				~mask64) {
755*4882a593Smuzhiyun 				sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 				alarm_event = VXGE_HW_SET_LEVEL(
758*4882a593Smuzhiyun 					VXGE_HW_EVENT_FIFO_ERR,
759*4882a593Smuzhiyun 					alarm_event);
760*4882a593Smuzhiyun 			}
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 			if ((val64 &
763*4882a593Smuzhiyun 			    VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
764*4882a593Smuzhiyun 				~mask64) {
765*4882a593Smuzhiyun 				sw_stats->error_stats.kdfcctl_fifo0_poison++;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 				alarm_event = VXGE_HW_SET_LEVEL(
768*4882a593Smuzhiyun 					VXGE_HW_EVENT_FIFO_ERR,
769*4882a593Smuzhiyun 					alarm_event);
770*4882a593Smuzhiyun 			}
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 			if ((val64 &
773*4882a593Smuzhiyun 			    VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
774*4882a593Smuzhiyun 				~mask64) {
775*4882a593Smuzhiyun 				sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 				alarm_event = VXGE_HW_SET_LEVEL(
778*4882a593Smuzhiyun 					VXGE_HW_EVENT_FIFO_ERR,
779*4882a593Smuzhiyun 					alarm_event);
780*4882a593Smuzhiyun 			}
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 			if (!skip_alarms) {
783*4882a593Smuzhiyun 				writeq(VXGE_HW_INTR_MASK_ALL,
784*4882a593Smuzhiyun 					&vp_reg->kdfcctl_errors_reg);
785*4882a593Smuzhiyun 				alarm_event = VXGE_HW_SET_LEVEL(
786*4882a593Smuzhiyun 					VXGE_HW_EVENT_ALARM_CLEARED,
787*4882a593Smuzhiyun 					alarm_event);
788*4882a593Smuzhiyun 			}
789*4882a593Smuzhiyun 		}
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun 	}
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 	if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 		val64 = readq(&vp_reg->wrdma_alarm_status);
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 		if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 			val64 = readq(&vp_reg->prc_alarm_reg);
800*4882a593Smuzhiyun 			mask64 = readq(&vp_reg->prc_alarm_mask);
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
803*4882a593Smuzhiyun 				~mask64)
804*4882a593Smuzhiyun 				sw_stats->error_stats.prc_ring_bumps++;
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
807*4882a593Smuzhiyun 				~mask64) {
808*4882a593Smuzhiyun 				sw_stats->error_stats.prc_rxdcm_sc_err++;
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 				alarm_event = VXGE_HW_SET_LEVEL(
811*4882a593Smuzhiyun 					VXGE_HW_EVENT_VPATH_ERR,
812*4882a593Smuzhiyun 					alarm_event);
813*4882a593Smuzhiyun 			}
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
816*4882a593Smuzhiyun 				& ~mask64) {
817*4882a593Smuzhiyun 				sw_stats->error_stats.prc_rxdcm_sc_abort++;
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 				alarm_event = VXGE_HW_SET_LEVEL(
820*4882a593Smuzhiyun 						VXGE_HW_EVENT_VPATH_ERR,
821*4882a593Smuzhiyun 						alarm_event);
822*4882a593Smuzhiyun 			}
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
825*4882a593Smuzhiyun 				 & ~mask64) {
826*4882a593Smuzhiyun 				sw_stats->error_stats.prc_quanta_size_err++;
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 				alarm_event = VXGE_HW_SET_LEVEL(
829*4882a593Smuzhiyun 					VXGE_HW_EVENT_VPATH_ERR,
830*4882a593Smuzhiyun 					alarm_event);
831*4882a593Smuzhiyun 			}
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 			if (!skip_alarms) {
834*4882a593Smuzhiyun 				writeq(VXGE_HW_INTR_MASK_ALL,
835*4882a593Smuzhiyun 					&vp_reg->prc_alarm_reg);
836*4882a593Smuzhiyun 				alarm_event = VXGE_HW_SET_LEVEL(
837*4882a593Smuzhiyun 						VXGE_HW_EVENT_ALARM_CLEARED,
838*4882a593Smuzhiyun 						alarm_event);
839*4882a593Smuzhiyun 			}
840*4882a593Smuzhiyun 		}
841*4882a593Smuzhiyun 	}
842*4882a593Smuzhiyun out:
843*4882a593Smuzhiyun 	hldev->stats.sw_dev_err_stats.vpath_alarms++;
844*4882a593Smuzhiyun out2:
845*4882a593Smuzhiyun 	if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
846*4882a593Smuzhiyun 		(alarm_event == VXGE_HW_EVENT_UNKNOWN))
847*4882a593Smuzhiyun 		return VXGE_HW_OK;
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	__vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 	if (alarm_event == VXGE_HW_EVENT_SERR)
852*4882a593Smuzhiyun 		return VXGE_HW_ERR_CRITICAL;
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 	return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
855*4882a593Smuzhiyun 		VXGE_HW_ERR_SLOT_FREEZE :
856*4882a593Smuzhiyun 		(alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
857*4882a593Smuzhiyun 		VXGE_HW_ERR_VPATH;
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun /**
861*4882a593Smuzhiyun  * vxge_hw_device_begin_irq - Begin IRQ processing.
862*4882a593Smuzhiyun  * @hldev: HW device handle.
863*4882a593Smuzhiyun  * @skip_alarms: Do not clear the alarms
864*4882a593Smuzhiyun  * @reason: "Reason" for the interrupt, the value of Titan's
865*4882a593Smuzhiyun  *	general_int_status register.
866*4882a593Smuzhiyun  *
867*4882a593Smuzhiyun  * The function	performs two actions, It first checks whether (shared IRQ) the
868*4882a593Smuzhiyun  * interrupt was raised	by the device. Next, it	masks the device interrupts.
869*4882a593Smuzhiyun  *
870*4882a593Smuzhiyun  * Note:
871*4882a593Smuzhiyun  * vxge_hw_device_begin_irq() does not flush MMIO writes through the
872*4882a593Smuzhiyun  * bridge. Therefore, two back-to-back interrupts are potentially possible.
873*4882a593Smuzhiyun  *
874*4882a593Smuzhiyun  * Returns: 0, if the interrupt	is not "ours" (note that in this case the
875*4882a593Smuzhiyun  * device remain enabled).
876*4882a593Smuzhiyun  * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
877*4882a593Smuzhiyun  * status.
878*4882a593Smuzhiyun  */
vxge_hw_device_begin_irq(struct __vxge_hw_device * hldev,u32 skip_alarms,u64 * reason)879*4882a593Smuzhiyun enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
880*4882a593Smuzhiyun 					     u32 skip_alarms, u64 *reason)
881*4882a593Smuzhiyun {
882*4882a593Smuzhiyun 	u32 i;
883*4882a593Smuzhiyun 	u64 val64;
884*4882a593Smuzhiyun 	u64 adapter_status;
885*4882a593Smuzhiyun 	u64 vpath_mask;
886*4882a593Smuzhiyun 	enum vxge_hw_status ret = VXGE_HW_OK;
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 	val64 = readq(&hldev->common_reg->titan_general_int_status);
889*4882a593Smuzhiyun 
890*4882a593Smuzhiyun 	if (unlikely(!val64)) {
891*4882a593Smuzhiyun 		/* not Titan interrupt	*/
892*4882a593Smuzhiyun 		*reason	= 0;
893*4882a593Smuzhiyun 		ret = VXGE_HW_ERR_WRONG_IRQ;
894*4882a593Smuzhiyun 		goto exit;
895*4882a593Smuzhiyun 	}
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 		adapter_status = readq(&hldev->common_reg->adapter_status);
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 		if (adapter_status == VXGE_HW_ALL_FOXES) {
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 			__vxge_hw_device_handle_error(hldev,
904*4882a593Smuzhiyun 				NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
905*4882a593Smuzhiyun 			*reason	= 0;
906*4882a593Smuzhiyun 			ret = VXGE_HW_ERR_SLOT_FREEZE;
907*4882a593Smuzhiyun 			goto exit;
908*4882a593Smuzhiyun 		}
909*4882a593Smuzhiyun 	}
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	hldev->stats.sw_dev_info_stats.total_intr_cnt++;
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	*reason	= val64;
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	vpath_mask = hldev->vpaths_deployed >>
916*4882a593Smuzhiyun 				(64 - VXGE_HW_MAX_VIRTUAL_PATHS);
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 	if (val64 &
919*4882a593Smuzhiyun 	    VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
920*4882a593Smuzhiyun 		hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 		return VXGE_HW_OK;
923*4882a593Smuzhiyun 	}
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 	hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	if (unlikely(val64 &
928*4882a593Smuzhiyun 			VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun 		enum vxge_hw_status error_level = VXGE_HW_OK;
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 		hldev->stats.sw_dev_err_stats.vpath_alarms++;
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun 		for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 			if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
937*4882a593Smuzhiyun 				continue;
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 			ret = __vxge_hw_vpath_alarm_process(
940*4882a593Smuzhiyun 				&hldev->virtual_paths[i], skip_alarms);
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 			error_level = VXGE_HW_SET_LEVEL(ret, error_level);
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 			if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
945*4882a593Smuzhiyun 				(ret == VXGE_HW_ERR_SLOT_FREEZE)))
946*4882a593Smuzhiyun 				break;
947*4882a593Smuzhiyun 		}
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 		ret = error_level;
950*4882a593Smuzhiyun 	}
951*4882a593Smuzhiyun exit:
952*4882a593Smuzhiyun 	return ret;
953*4882a593Smuzhiyun }
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun /**
956*4882a593Smuzhiyun  * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
957*4882a593Smuzhiyun  * condition that has caused the Tx and RX interrupt.
958*4882a593Smuzhiyun  * @hldev: HW device.
959*4882a593Smuzhiyun  *
960*4882a593Smuzhiyun  * Acknowledge (that is, clear) the condition that has caused
961*4882a593Smuzhiyun  * the Tx and Rx interrupt.
962*4882a593Smuzhiyun  * See also: vxge_hw_device_begin_irq(),
963*4882a593Smuzhiyun  * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
964*4882a593Smuzhiyun  */
vxge_hw_device_clear_tx_rx(struct __vxge_hw_device * hldev)965*4882a593Smuzhiyun void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
966*4882a593Smuzhiyun {
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
969*4882a593Smuzhiyun 	   (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
970*4882a593Smuzhiyun 		writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
971*4882a593Smuzhiyun 				 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
972*4882a593Smuzhiyun 				&hldev->common_reg->tim_int_status0);
973*4882a593Smuzhiyun 	}
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 	if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
976*4882a593Smuzhiyun 	   (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
977*4882a593Smuzhiyun 		__vxge_hw_pio_mem_write32_upper(
978*4882a593Smuzhiyun 				(hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
979*4882a593Smuzhiyun 				 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
980*4882a593Smuzhiyun 				&hldev->common_reg->tim_int_status1);
981*4882a593Smuzhiyun 	}
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun /*
985*4882a593Smuzhiyun  * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
986*4882a593Smuzhiyun  * @channel: Channel
987*4882a593Smuzhiyun  * @dtrh: Buffer to return the DTR pointer
988*4882a593Smuzhiyun  *
989*4882a593Smuzhiyun  * Allocates a dtr from the reserve array. If the reserve array is empty,
990*4882a593Smuzhiyun  * it swaps the reserve and free arrays.
991*4882a593Smuzhiyun  *
992*4882a593Smuzhiyun  */
993*4882a593Smuzhiyun static enum vxge_hw_status
vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel * channel,void ** dtrh)994*4882a593Smuzhiyun vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
995*4882a593Smuzhiyun {
996*4882a593Smuzhiyun 	if (channel->reserve_ptr - channel->reserve_top > 0) {
997*4882a593Smuzhiyun _alloc_after_swap:
998*4882a593Smuzhiyun 		*dtrh =	channel->reserve_arr[--channel->reserve_ptr];
999*4882a593Smuzhiyun 
1000*4882a593Smuzhiyun 		return VXGE_HW_OK;
1001*4882a593Smuzhiyun 	}
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 	/* switch between empty	and full arrays	*/
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 	/* the idea behind such	a design is that by having free	and reserved
1006*4882a593Smuzhiyun 	 * arrays separated we basically separated irq and non-irq parts.
1007*4882a593Smuzhiyun 	 * i.e.	no additional lock need	to be done when	we free	a resource */
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun 	if (channel->length - channel->free_ptr > 0) {
1010*4882a593Smuzhiyun 		swap(channel->reserve_arr, channel->free_arr);
1011*4882a593Smuzhiyun 		channel->reserve_ptr = channel->length;
1012*4882a593Smuzhiyun 		channel->reserve_top = channel->free_ptr;
1013*4882a593Smuzhiyun 		channel->free_ptr = channel->length;
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 		channel->stats->reserve_free_swaps_cnt++;
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 		goto _alloc_after_swap;
1018*4882a593Smuzhiyun 	}
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun 	channel->stats->full_cnt++;
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun 	*dtrh =	NULL;
1023*4882a593Smuzhiyun 	return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
1024*4882a593Smuzhiyun }
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun /*
1027*4882a593Smuzhiyun  * vxge_hw_channel_dtr_post - Post a dtr to the channel
1028*4882a593Smuzhiyun  * @channelh: Channel
1029*4882a593Smuzhiyun  * @dtrh: DTR pointer
1030*4882a593Smuzhiyun  *
1031*4882a593Smuzhiyun  * Posts a dtr to work array.
1032*4882a593Smuzhiyun  *
1033*4882a593Smuzhiyun  */
1034*4882a593Smuzhiyun static void
vxge_hw_channel_dtr_post(struct __vxge_hw_channel * channel,void * dtrh)1035*4882a593Smuzhiyun vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
1036*4882a593Smuzhiyun {
1037*4882a593Smuzhiyun 	vxge_assert(channel->work_arr[channel->post_index] == NULL);
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 	channel->work_arr[channel->post_index++] = dtrh;
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 	/* wrap-around */
1042*4882a593Smuzhiyun 	if (channel->post_index	== channel->length)
1043*4882a593Smuzhiyun 		channel->post_index = 0;
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun 
1046*4882a593Smuzhiyun /*
1047*4882a593Smuzhiyun  * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
1048*4882a593Smuzhiyun  * @channel: Channel
1049*4882a593Smuzhiyun  * @dtr: Buffer to return the next completed DTR pointer
1050*4882a593Smuzhiyun  *
1051*4882a593Smuzhiyun  * Returns the next completed dtr with out removing it from work array
1052*4882a593Smuzhiyun  *
1053*4882a593Smuzhiyun  */
1054*4882a593Smuzhiyun void
vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel * channel,void ** dtrh)1055*4882a593Smuzhiyun vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
1056*4882a593Smuzhiyun {
1057*4882a593Smuzhiyun 	vxge_assert(channel->compl_index < channel->length);
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 	*dtrh =	channel->work_arr[channel->compl_index];
1060*4882a593Smuzhiyun 	prefetch(*dtrh);
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun /*
1064*4882a593Smuzhiyun  * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
1065*4882a593Smuzhiyun  * @channel: Channel handle
1066*4882a593Smuzhiyun  *
1067*4882a593Smuzhiyun  * Removes the next completed dtr from work array
1068*4882a593Smuzhiyun  *
1069*4882a593Smuzhiyun  */
vxge_hw_channel_dtr_complete(struct __vxge_hw_channel * channel)1070*4882a593Smuzhiyun void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
1071*4882a593Smuzhiyun {
1072*4882a593Smuzhiyun 	channel->work_arr[channel->compl_index]	= NULL;
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun 	/* wrap-around */
1075*4882a593Smuzhiyun 	if (++channel->compl_index == channel->length)
1076*4882a593Smuzhiyun 		channel->compl_index = 0;
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 	channel->stats->total_compl_cnt++;
1079*4882a593Smuzhiyun }
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun /*
1082*4882a593Smuzhiyun  * vxge_hw_channel_dtr_free - Frees a dtr
1083*4882a593Smuzhiyun  * @channel: Channel handle
1084*4882a593Smuzhiyun  * @dtr:  DTR pointer
1085*4882a593Smuzhiyun  *
1086*4882a593Smuzhiyun  * Returns the dtr to free array
1087*4882a593Smuzhiyun  *
1088*4882a593Smuzhiyun  */
vxge_hw_channel_dtr_free(struct __vxge_hw_channel * channel,void * dtrh)1089*4882a593Smuzhiyun void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
1090*4882a593Smuzhiyun {
1091*4882a593Smuzhiyun 	channel->free_arr[--channel->free_ptr] = dtrh;
1092*4882a593Smuzhiyun }
1093*4882a593Smuzhiyun 
1094*4882a593Smuzhiyun /*
1095*4882a593Smuzhiyun  * vxge_hw_channel_dtr_count
1096*4882a593Smuzhiyun  * @channel: Channel handle. Obtained via vxge_hw_channel_open().
1097*4882a593Smuzhiyun  *
1098*4882a593Smuzhiyun  * Retrieve number of DTRs available. This function can not be called
1099*4882a593Smuzhiyun  * from data path. ring_initial_replenishi() is the only user.
1100*4882a593Smuzhiyun  */
vxge_hw_channel_dtr_count(struct __vxge_hw_channel * channel)1101*4882a593Smuzhiyun int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
1102*4882a593Smuzhiyun {
1103*4882a593Smuzhiyun 	return (channel->reserve_ptr - channel->reserve_top) +
1104*4882a593Smuzhiyun 		(channel->length - channel->free_ptr);
1105*4882a593Smuzhiyun }
1106*4882a593Smuzhiyun 
1107*4882a593Smuzhiyun /**
1108*4882a593Smuzhiyun  * vxge_hw_ring_rxd_reserve	- Reserve ring descriptor.
1109*4882a593Smuzhiyun  * @ring: Handle to the ring object used for receive
1110*4882a593Smuzhiyun  * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
1111*4882a593Smuzhiyun  * with a valid handle.
1112*4882a593Smuzhiyun  *
1113*4882a593Smuzhiyun  * Reserve Rx descriptor for the subsequent filling-in driver
1114*4882a593Smuzhiyun  * and posting on the corresponding channel (@channelh)
1115*4882a593Smuzhiyun  * via vxge_hw_ring_rxd_post().
1116*4882a593Smuzhiyun  *
1117*4882a593Smuzhiyun  * Returns: VXGE_HW_OK - success.
1118*4882a593Smuzhiyun  * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
1119*4882a593Smuzhiyun  *
1120*4882a593Smuzhiyun  */
vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring * ring,void ** rxdh)1121*4882a593Smuzhiyun enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
1122*4882a593Smuzhiyun 	void **rxdh)
1123*4882a593Smuzhiyun {
1124*4882a593Smuzhiyun 	enum vxge_hw_status status;
1125*4882a593Smuzhiyun 	struct __vxge_hw_channel *channel;
1126*4882a593Smuzhiyun 
1127*4882a593Smuzhiyun 	channel = &ring->channel;
1128*4882a593Smuzhiyun 
1129*4882a593Smuzhiyun 	status = vxge_hw_channel_dtr_alloc(channel, rxdh);
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 	if (status == VXGE_HW_OK) {
1132*4882a593Smuzhiyun 		struct vxge_hw_ring_rxd_1 *rxdp =
1133*4882a593Smuzhiyun 			(struct vxge_hw_ring_rxd_1 *)*rxdh;
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun 		rxdp->control_0	= rxdp->control_1 = 0;
1136*4882a593Smuzhiyun 	}
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun 	return status;
1139*4882a593Smuzhiyun }
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun /**
1142*4882a593Smuzhiyun  * vxge_hw_ring_rxd_free - Free descriptor.
1143*4882a593Smuzhiyun  * @ring: Handle to the ring object used for receive
1144*4882a593Smuzhiyun  * @rxdh: Descriptor handle.
1145*4882a593Smuzhiyun  *
1146*4882a593Smuzhiyun  * Free	the reserved descriptor. This operation is "symmetrical" to
1147*4882a593Smuzhiyun  * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
1148*4882a593Smuzhiyun  * lifecycle.
1149*4882a593Smuzhiyun  *
1150*4882a593Smuzhiyun  * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
1151*4882a593Smuzhiyun  * be:
1152*4882a593Smuzhiyun  *
1153*4882a593Smuzhiyun  * - reserved (vxge_hw_ring_rxd_reserve);
1154*4882a593Smuzhiyun  *
1155*4882a593Smuzhiyun  * - posted	(vxge_hw_ring_rxd_post);
1156*4882a593Smuzhiyun  *
1157*4882a593Smuzhiyun  * - completed (vxge_hw_ring_rxd_next_completed);
1158*4882a593Smuzhiyun  *
1159*4882a593Smuzhiyun  * - and recycled again	(vxge_hw_ring_rxd_free).
1160*4882a593Smuzhiyun  *
1161*4882a593Smuzhiyun  * For alternative state transitions and more details please refer to
1162*4882a593Smuzhiyun  * the design doc.
1163*4882a593Smuzhiyun  *
1164*4882a593Smuzhiyun  */
vxge_hw_ring_rxd_free(struct __vxge_hw_ring * ring,void * rxdh)1165*4882a593Smuzhiyun void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
1166*4882a593Smuzhiyun {
1167*4882a593Smuzhiyun 	struct __vxge_hw_channel *channel;
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun 	channel = &ring->channel;
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun 	vxge_hw_channel_dtr_free(channel, rxdh);
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun /**
1176*4882a593Smuzhiyun  * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
1177*4882a593Smuzhiyun  * @ring: Handle to the ring object used for receive
1178*4882a593Smuzhiyun  * @rxdh: Descriptor handle.
1179*4882a593Smuzhiyun  *
1180*4882a593Smuzhiyun  * This routine prepares a rxd and posts
1181*4882a593Smuzhiyun  */
vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring * ring,void * rxdh)1182*4882a593Smuzhiyun void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
1183*4882a593Smuzhiyun {
1184*4882a593Smuzhiyun 	struct __vxge_hw_channel *channel;
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun 	channel = &ring->channel;
1187*4882a593Smuzhiyun 
1188*4882a593Smuzhiyun 	vxge_hw_channel_dtr_post(channel, rxdh);
1189*4882a593Smuzhiyun }
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun /**
1192*4882a593Smuzhiyun  * vxge_hw_ring_rxd_post_post - Process rxd after post.
1193*4882a593Smuzhiyun  * @ring: Handle to the ring object used for receive
1194*4882a593Smuzhiyun  * @rxdh: Descriptor handle.
1195*4882a593Smuzhiyun  *
1196*4882a593Smuzhiyun  * Processes rxd after post
1197*4882a593Smuzhiyun  */
vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring * ring,void * rxdh)1198*4882a593Smuzhiyun void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
1199*4882a593Smuzhiyun {
1200*4882a593Smuzhiyun 	struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun 	rxdp->control_0	= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun 	if (ring->stats->common_stats.usage_cnt > 0)
1205*4882a593Smuzhiyun 		ring->stats->common_stats.usage_cnt--;
1206*4882a593Smuzhiyun }
1207*4882a593Smuzhiyun 
1208*4882a593Smuzhiyun /**
1209*4882a593Smuzhiyun  * vxge_hw_ring_rxd_post - Post descriptor on the ring.
1210*4882a593Smuzhiyun  * @ring: Handle to the ring object used for receive
1211*4882a593Smuzhiyun  * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
1212*4882a593Smuzhiyun  *
1213*4882a593Smuzhiyun  * Post	descriptor on the ring.
1214*4882a593Smuzhiyun  * Prior to posting the	descriptor should be filled in accordance with
1215*4882a593Smuzhiyun  * Host/Titan interface specification for a given service (LL, etc.).
1216*4882a593Smuzhiyun  *
1217*4882a593Smuzhiyun  */
vxge_hw_ring_rxd_post(struct __vxge_hw_ring * ring,void * rxdh)1218*4882a593Smuzhiyun void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
1219*4882a593Smuzhiyun {
1220*4882a593Smuzhiyun 	struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1221*4882a593Smuzhiyun 	struct __vxge_hw_channel *channel;
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun 	channel = &ring->channel;
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun 	wmb();
1226*4882a593Smuzhiyun 	rxdp->control_0	= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1227*4882a593Smuzhiyun 
1228*4882a593Smuzhiyun 	vxge_hw_channel_dtr_post(channel, rxdh);
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 	if (ring->stats->common_stats.usage_cnt > 0)
1231*4882a593Smuzhiyun 		ring->stats->common_stats.usage_cnt--;
1232*4882a593Smuzhiyun }
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun /**
1235*4882a593Smuzhiyun  * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
1236*4882a593Smuzhiyun  * @ring: Handle to the ring object used for receive
1237*4882a593Smuzhiyun  * @rxdh: Descriptor handle.
1238*4882a593Smuzhiyun  *
1239*4882a593Smuzhiyun  * Processes rxd after post with memory barrier.
1240*4882a593Smuzhiyun  */
vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring * ring,void * rxdh)1241*4882a593Smuzhiyun void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
1242*4882a593Smuzhiyun {
1243*4882a593Smuzhiyun 	wmb();
1244*4882a593Smuzhiyun 	vxge_hw_ring_rxd_post_post(ring, rxdh);
1245*4882a593Smuzhiyun }
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun /**
1248*4882a593Smuzhiyun  * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
1249*4882a593Smuzhiyun  * @ring: Handle to the ring object used for receive
1250*4882a593Smuzhiyun  * @rxdh: Descriptor handle. Returned by HW.
1251*4882a593Smuzhiyun  * @t_code:	Transfer code, as per Titan User Guide,
1252*4882a593Smuzhiyun  *	 Receive Descriptor Format. Returned by HW.
1253*4882a593Smuzhiyun  *
1254*4882a593Smuzhiyun  * Retrieve the	_next_ completed descriptor.
1255*4882a593Smuzhiyun  * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
1256*4882a593Smuzhiyun  * driver of new completed descriptors. After that
1257*4882a593Smuzhiyun  * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
1258*4882a593Smuzhiyun  * completions (the very first completion is passed by HW via
1259*4882a593Smuzhiyun  * vxge_hw_ring_callback_f).
1260*4882a593Smuzhiyun  *
1261*4882a593Smuzhiyun  * Implementation-wise, the driver is free to call
1262*4882a593Smuzhiyun  * vxge_hw_ring_rxd_next_completed either immediately from inside the
1263*4882a593Smuzhiyun  * ring callback, or in a deferred fashion and separate (from HW)
1264*4882a593Smuzhiyun  * context.
1265*4882a593Smuzhiyun  *
1266*4882a593Smuzhiyun  * Non-zero @t_code means failure to fill-in receive buffer(s)
1267*4882a593Smuzhiyun  * of the descriptor.
1268*4882a593Smuzhiyun  * For instance, parity	error detected during the data transfer.
1269*4882a593Smuzhiyun  * In this case	Titan will complete the descriptor and indicate
1270*4882a593Smuzhiyun  * for the host	that the received data is not to be used.
1271*4882a593Smuzhiyun  * For details please refer to Titan User Guide.
1272*4882a593Smuzhiyun  *
1273*4882a593Smuzhiyun  * Returns: VXGE_HW_OK - success.
1274*4882a593Smuzhiyun  * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1275*4882a593Smuzhiyun  * are currently available for processing.
1276*4882a593Smuzhiyun  *
1277*4882a593Smuzhiyun  * See also: vxge_hw_ring_callback_f{},
1278*4882a593Smuzhiyun  * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
1279*4882a593Smuzhiyun  */
vxge_hw_ring_rxd_next_completed(struct __vxge_hw_ring * ring,void ** rxdh,u8 * t_code)1280*4882a593Smuzhiyun enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
1281*4882a593Smuzhiyun 	struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
1282*4882a593Smuzhiyun {
1283*4882a593Smuzhiyun 	struct __vxge_hw_channel *channel;
1284*4882a593Smuzhiyun 	struct vxge_hw_ring_rxd_1 *rxdp;
1285*4882a593Smuzhiyun 	enum vxge_hw_status status = VXGE_HW_OK;
1286*4882a593Smuzhiyun 	u64 control_0, own;
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 	channel = &ring->channel;
1289*4882a593Smuzhiyun 
1290*4882a593Smuzhiyun 	vxge_hw_channel_dtr_try_complete(channel, rxdh);
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun 	rxdp = *rxdh;
1293*4882a593Smuzhiyun 	if (rxdp == NULL) {
1294*4882a593Smuzhiyun 		status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1295*4882a593Smuzhiyun 		goto exit;
1296*4882a593Smuzhiyun 	}
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun 	control_0 = rxdp->control_0;
1299*4882a593Smuzhiyun 	own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1300*4882a593Smuzhiyun 	*t_code	= (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
1301*4882a593Smuzhiyun 
1302*4882a593Smuzhiyun 	/* check whether it is not the end */
1303*4882a593Smuzhiyun 	if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
1304*4882a593Smuzhiyun 
1305*4882a593Smuzhiyun 		vxge_assert((rxdp)->host_control !=
1306*4882a593Smuzhiyun 				0);
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 		++ring->cmpl_cnt;
1309*4882a593Smuzhiyun 		vxge_hw_channel_dtr_complete(channel);
1310*4882a593Smuzhiyun 
1311*4882a593Smuzhiyun 		vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
1312*4882a593Smuzhiyun 
1313*4882a593Smuzhiyun 		ring->stats->common_stats.usage_cnt++;
1314*4882a593Smuzhiyun 		if (ring->stats->common_stats.usage_max <
1315*4882a593Smuzhiyun 				ring->stats->common_stats.usage_cnt)
1316*4882a593Smuzhiyun 			ring->stats->common_stats.usage_max =
1317*4882a593Smuzhiyun 				ring->stats->common_stats.usage_cnt;
1318*4882a593Smuzhiyun 
1319*4882a593Smuzhiyun 		status = VXGE_HW_OK;
1320*4882a593Smuzhiyun 		goto exit;
1321*4882a593Smuzhiyun 	}
1322*4882a593Smuzhiyun 
1323*4882a593Smuzhiyun 	/* reset it. since we don't want to return
1324*4882a593Smuzhiyun 	 * garbage to the driver */
1325*4882a593Smuzhiyun 	*rxdh =	NULL;
1326*4882a593Smuzhiyun 	status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1327*4882a593Smuzhiyun exit:
1328*4882a593Smuzhiyun 	return status;
1329*4882a593Smuzhiyun }
1330*4882a593Smuzhiyun 
1331*4882a593Smuzhiyun /**
1332*4882a593Smuzhiyun  * vxge_hw_ring_handle_tcode - Handle transfer code.
1333*4882a593Smuzhiyun  * @ring: Handle to the ring object used for receive
1334*4882a593Smuzhiyun  * @rxdh: Descriptor handle.
1335*4882a593Smuzhiyun  * @t_code: One of the enumerated (and documented in the Titan user guide)
1336*4882a593Smuzhiyun  * "transfer codes".
1337*4882a593Smuzhiyun  *
1338*4882a593Smuzhiyun  * Handle descriptor's transfer code. The latter comes with each completed
1339*4882a593Smuzhiyun  * descriptor.
1340*4882a593Smuzhiyun  *
1341*4882a593Smuzhiyun  * Returns: one of the enum vxge_hw_status{} enumerated types.
1342*4882a593Smuzhiyun  * VXGE_HW_OK			- for success.
1343*4882a593Smuzhiyun  * VXGE_HW_ERR_CRITICAL         - when encounters critical error.
1344*4882a593Smuzhiyun  */
vxge_hw_ring_handle_tcode(struct __vxge_hw_ring * ring,void * rxdh,u8 t_code)1345*4882a593Smuzhiyun enum vxge_hw_status vxge_hw_ring_handle_tcode(
1346*4882a593Smuzhiyun 	struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1347*4882a593Smuzhiyun {
1348*4882a593Smuzhiyun 	enum vxge_hw_status status = VXGE_HW_OK;
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun 	/* If the t_code is not supported and if the
1351*4882a593Smuzhiyun 	 * t_code is other than 0x5 (unparseable packet
1352*4882a593Smuzhiyun 	 * such as unknown UPV6 header), Drop it !!!
1353*4882a593Smuzhiyun 	 */
1354*4882a593Smuzhiyun 
1355*4882a593Smuzhiyun 	if (t_code ==  VXGE_HW_RING_T_CODE_OK ||
1356*4882a593Smuzhiyun 		t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1357*4882a593Smuzhiyun 		status = VXGE_HW_OK;
1358*4882a593Smuzhiyun 		goto exit;
1359*4882a593Smuzhiyun 	}
1360*4882a593Smuzhiyun 
1361*4882a593Smuzhiyun 	if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1362*4882a593Smuzhiyun 		status = VXGE_HW_ERR_INVALID_TCODE;
1363*4882a593Smuzhiyun 		goto exit;
1364*4882a593Smuzhiyun 	}
1365*4882a593Smuzhiyun 
1366*4882a593Smuzhiyun 	ring->stats->rxd_t_code_err_cnt[t_code]++;
1367*4882a593Smuzhiyun exit:
1368*4882a593Smuzhiyun 	return status;
1369*4882a593Smuzhiyun }
1370*4882a593Smuzhiyun 
1371*4882a593Smuzhiyun /**
1372*4882a593Smuzhiyun  * __vxge_hw_non_offload_db_post - Post non offload doorbell
1373*4882a593Smuzhiyun  *
1374*4882a593Smuzhiyun  * @fifo: fifohandle
1375*4882a593Smuzhiyun  * @txdl_ptr: The starting location of the TxDL in host memory
1376*4882a593Smuzhiyun  * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1377*4882a593Smuzhiyun  * @no_snoop: No snoop flags
1378*4882a593Smuzhiyun  *
1379*4882a593Smuzhiyun  * This function posts a non-offload doorbell to doorbell FIFO
1380*4882a593Smuzhiyun  *
1381*4882a593Smuzhiyun  */
__vxge_hw_non_offload_db_post(struct __vxge_hw_fifo * fifo,u64 txdl_ptr,u32 num_txds,u32 no_snoop)1382*4882a593Smuzhiyun static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1383*4882a593Smuzhiyun 	u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1384*4882a593Smuzhiyun {
1385*4882a593Smuzhiyun 	writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1386*4882a593Smuzhiyun 		VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1387*4882a593Smuzhiyun 		VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1388*4882a593Smuzhiyun 		&fifo->nofl_db->control_0);
1389*4882a593Smuzhiyun 
1390*4882a593Smuzhiyun 	writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1391*4882a593Smuzhiyun }
1392*4882a593Smuzhiyun 
1393*4882a593Smuzhiyun /**
1394*4882a593Smuzhiyun  * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1395*4882a593Smuzhiyun  * the fifo
1396*4882a593Smuzhiyun  * @fifoh: Handle to the fifo object used for non offload send
1397*4882a593Smuzhiyun  */
vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo * fifoh)1398*4882a593Smuzhiyun u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1399*4882a593Smuzhiyun {
1400*4882a593Smuzhiyun 	return vxge_hw_channel_dtr_count(&fifoh->channel);
1401*4882a593Smuzhiyun }
1402*4882a593Smuzhiyun 
1403*4882a593Smuzhiyun /**
1404*4882a593Smuzhiyun  * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1405*4882a593Smuzhiyun  * @fifo: Handle to the fifo object used for non offload send
1406*4882a593Smuzhiyun  * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1407*4882a593Smuzhiyun  *        with a valid handle.
1408*4882a593Smuzhiyun  * @txdl_priv: Buffer to return the pointer to per txdl space
1409*4882a593Smuzhiyun  *
1410*4882a593Smuzhiyun  * Reserve a single TxDL (that is, fifo descriptor)
1411*4882a593Smuzhiyun  * for the subsequent filling-in by driver)
1412*4882a593Smuzhiyun  * and posting on the corresponding channel (@channelh)
1413*4882a593Smuzhiyun  * via vxge_hw_fifo_txdl_post().
1414*4882a593Smuzhiyun  *
1415*4882a593Smuzhiyun  * Note: it is the responsibility of driver to reserve multiple descriptors
1416*4882a593Smuzhiyun  * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1417*4882a593Smuzhiyun  * carries up to configured number (fifo.max_frags) of contiguous buffers.
1418*4882a593Smuzhiyun  *
1419*4882a593Smuzhiyun  * Returns: VXGE_HW_OK - success;
1420*4882a593Smuzhiyun  * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1421*4882a593Smuzhiyun  *
1422*4882a593Smuzhiyun  */
vxge_hw_fifo_txdl_reserve(struct __vxge_hw_fifo * fifo,void ** txdlh,void ** txdl_priv)1423*4882a593Smuzhiyun enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1424*4882a593Smuzhiyun 	struct __vxge_hw_fifo *fifo,
1425*4882a593Smuzhiyun 	void **txdlh, void **txdl_priv)
1426*4882a593Smuzhiyun {
1427*4882a593Smuzhiyun 	struct __vxge_hw_channel *channel;
1428*4882a593Smuzhiyun 	enum vxge_hw_status status;
1429*4882a593Smuzhiyun 	int i;
1430*4882a593Smuzhiyun 
1431*4882a593Smuzhiyun 	channel = &fifo->channel;
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun 	status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun 	if (status == VXGE_HW_OK) {
1436*4882a593Smuzhiyun 		struct vxge_hw_fifo_txd *txdp =
1437*4882a593Smuzhiyun 			(struct vxge_hw_fifo_txd *)*txdlh;
1438*4882a593Smuzhiyun 		struct __vxge_hw_fifo_txdl_priv *priv;
1439*4882a593Smuzhiyun 
1440*4882a593Smuzhiyun 		priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1441*4882a593Smuzhiyun 
1442*4882a593Smuzhiyun 		/* reset the TxDL's private */
1443*4882a593Smuzhiyun 		priv->align_dma_offset = 0;
1444*4882a593Smuzhiyun 		priv->align_vaddr_start = priv->align_vaddr;
1445*4882a593Smuzhiyun 		priv->align_used_frags = 0;
1446*4882a593Smuzhiyun 		priv->frags = 0;
1447*4882a593Smuzhiyun 		priv->alloc_frags = fifo->config->max_frags;
1448*4882a593Smuzhiyun 		priv->next_txdl_priv = NULL;
1449*4882a593Smuzhiyun 
1450*4882a593Smuzhiyun 		*txdl_priv = (void *)(size_t)txdp->host_control;
1451*4882a593Smuzhiyun 
1452*4882a593Smuzhiyun 		for (i = 0; i < fifo->config->max_frags; i++) {
1453*4882a593Smuzhiyun 			txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1454*4882a593Smuzhiyun 			txdp->control_0 = txdp->control_1 = 0;
1455*4882a593Smuzhiyun 		}
1456*4882a593Smuzhiyun 	}
1457*4882a593Smuzhiyun 
1458*4882a593Smuzhiyun 	return status;
1459*4882a593Smuzhiyun }
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun /**
1462*4882a593Smuzhiyun  * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1463*4882a593Smuzhiyun  * descriptor.
1464*4882a593Smuzhiyun  * @fifo: Handle to the fifo object used for non offload send
1465*4882a593Smuzhiyun  * @txdlh: Descriptor handle.
1466*4882a593Smuzhiyun  * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1467*4882a593Smuzhiyun  *            (of buffers).
1468*4882a593Smuzhiyun  * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1469*4882a593Smuzhiyun  * @size: Size of the data buffer (in bytes).
1470*4882a593Smuzhiyun  *
1471*4882a593Smuzhiyun  * This API is part of the preparation of the transmit descriptor for posting
1472*4882a593Smuzhiyun  * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1473*4882a593Smuzhiyun  * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1474*4882a593Smuzhiyun  * All three APIs fill in the fields of the fifo descriptor,
1475*4882a593Smuzhiyun  * in accordance with the Titan specification.
1476*4882a593Smuzhiyun  *
1477*4882a593Smuzhiyun  */
vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo * fifo,void * txdlh,u32 frag_idx,dma_addr_t dma_pointer,u32 size)1478*4882a593Smuzhiyun void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1479*4882a593Smuzhiyun 				  void *txdlh, u32 frag_idx,
1480*4882a593Smuzhiyun 				  dma_addr_t dma_pointer, u32 size)
1481*4882a593Smuzhiyun {
1482*4882a593Smuzhiyun 	struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1483*4882a593Smuzhiyun 	struct vxge_hw_fifo_txd *txdp, *txdp_last;
1484*4882a593Smuzhiyun 
1485*4882a593Smuzhiyun 	txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1486*4882a593Smuzhiyun 	txdp = (struct vxge_hw_fifo_txd *)txdlh  +  txdl_priv->frags;
1487*4882a593Smuzhiyun 
1488*4882a593Smuzhiyun 	if (frag_idx != 0)
1489*4882a593Smuzhiyun 		txdp->control_0 = txdp->control_1 = 0;
1490*4882a593Smuzhiyun 	else {
1491*4882a593Smuzhiyun 		txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1492*4882a593Smuzhiyun 			VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1493*4882a593Smuzhiyun 		txdp->control_1 |= fifo->interrupt_type;
1494*4882a593Smuzhiyun 		txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1495*4882a593Smuzhiyun 			fifo->tx_intr_num);
1496*4882a593Smuzhiyun 		if (txdl_priv->frags) {
1497*4882a593Smuzhiyun 			txdp_last = (struct vxge_hw_fifo_txd *)txdlh  +
1498*4882a593Smuzhiyun 			(txdl_priv->frags - 1);
1499*4882a593Smuzhiyun 			txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1500*4882a593Smuzhiyun 				VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1501*4882a593Smuzhiyun 		}
1502*4882a593Smuzhiyun 	}
1503*4882a593Smuzhiyun 
1504*4882a593Smuzhiyun 	vxge_assert(frag_idx < txdl_priv->alloc_frags);
1505*4882a593Smuzhiyun 
1506*4882a593Smuzhiyun 	txdp->buffer_pointer = (u64)dma_pointer;
1507*4882a593Smuzhiyun 	txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1508*4882a593Smuzhiyun 	fifo->stats->total_buffers++;
1509*4882a593Smuzhiyun 	txdl_priv->frags++;
1510*4882a593Smuzhiyun }
1511*4882a593Smuzhiyun 
1512*4882a593Smuzhiyun /**
1513*4882a593Smuzhiyun  * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1514*4882a593Smuzhiyun  * @fifo: Handle to the fifo object used for non offload send
1515*4882a593Smuzhiyun  * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1516*4882a593Smuzhiyun  *
1517*4882a593Smuzhiyun  * Post descriptor on the 'fifo' type channel for transmission.
1518*4882a593Smuzhiyun  * Prior to posting the descriptor should be filled in accordance with
1519*4882a593Smuzhiyun  * Host/Titan interface specification for a given service (LL, etc.).
1520*4882a593Smuzhiyun  *
1521*4882a593Smuzhiyun  */
vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo * fifo,void * txdlh)1522*4882a593Smuzhiyun void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1523*4882a593Smuzhiyun {
1524*4882a593Smuzhiyun 	struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1525*4882a593Smuzhiyun 	struct vxge_hw_fifo_txd *txdp_last;
1526*4882a593Smuzhiyun 	struct vxge_hw_fifo_txd *txdp_first;
1527*4882a593Smuzhiyun 
1528*4882a593Smuzhiyun 	txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1529*4882a593Smuzhiyun 	txdp_first = txdlh;
1530*4882a593Smuzhiyun 
1531*4882a593Smuzhiyun 	txdp_last = (struct vxge_hw_fifo_txd *)txdlh  +  (txdl_priv->frags - 1);
1532*4882a593Smuzhiyun 	txdp_last->control_0 |=
1533*4882a593Smuzhiyun 	      VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1534*4882a593Smuzhiyun 	txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1535*4882a593Smuzhiyun 
1536*4882a593Smuzhiyun 	vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1537*4882a593Smuzhiyun 
1538*4882a593Smuzhiyun 	__vxge_hw_non_offload_db_post(fifo,
1539*4882a593Smuzhiyun 		(u64)txdl_priv->dma_addr,
1540*4882a593Smuzhiyun 		txdl_priv->frags - 1,
1541*4882a593Smuzhiyun 		fifo->no_snoop_bits);
1542*4882a593Smuzhiyun 
1543*4882a593Smuzhiyun 	fifo->stats->total_posts++;
1544*4882a593Smuzhiyun 	fifo->stats->common_stats.usage_cnt++;
1545*4882a593Smuzhiyun 	if (fifo->stats->common_stats.usage_max <
1546*4882a593Smuzhiyun 		fifo->stats->common_stats.usage_cnt)
1547*4882a593Smuzhiyun 		fifo->stats->common_stats.usage_max =
1548*4882a593Smuzhiyun 			fifo->stats->common_stats.usage_cnt;
1549*4882a593Smuzhiyun }
1550*4882a593Smuzhiyun 
1551*4882a593Smuzhiyun /**
1552*4882a593Smuzhiyun  * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1553*4882a593Smuzhiyun  * @fifo: Handle to the fifo object used for non offload send
1554*4882a593Smuzhiyun  * @txdlh: Descriptor handle. Returned by HW.
1555*4882a593Smuzhiyun  * @t_code: Transfer code, as per Titan User Guide,
1556*4882a593Smuzhiyun  *          Transmit Descriptor Format.
1557*4882a593Smuzhiyun  *          Returned by HW.
1558*4882a593Smuzhiyun  *
1559*4882a593Smuzhiyun  * Retrieve the _next_ completed descriptor.
1560*4882a593Smuzhiyun  * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1561*4882a593Smuzhiyun  * driver of new completed descriptors. After that
1562*4882a593Smuzhiyun  * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1563*4882a593Smuzhiyun  * completions (the very first completion is passed by HW via
1564*4882a593Smuzhiyun  * vxge_hw_channel_callback_f).
1565*4882a593Smuzhiyun  *
1566*4882a593Smuzhiyun  * Implementation-wise, the driver is free to call
1567*4882a593Smuzhiyun  * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1568*4882a593Smuzhiyun  * channel callback, or in a deferred fashion and separate (from HW)
1569*4882a593Smuzhiyun  * context.
1570*4882a593Smuzhiyun  *
1571*4882a593Smuzhiyun  * Non-zero @t_code means failure to process the descriptor.
1572*4882a593Smuzhiyun  * The failure could happen, for instance, when the link is
1573*4882a593Smuzhiyun  * down, in which case Titan completes the descriptor because it
1574*4882a593Smuzhiyun  * is not able to send the data out.
1575*4882a593Smuzhiyun  *
1576*4882a593Smuzhiyun  * For details please refer to Titan User Guide.
1577*4882a593Smuzhiyun  *
1578*4882a593Smuzhiyun  * Returns: VXGE_HW_OK - success.
1579*4882a593Smuzhiyun  * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1580*4882a593Smuzhiyun  * are currently available for processing.
1581*4882a593Smuzhiyun  *
1582*4882a593Smuzhiyun  */
vxge_hw_fifo_txdl_next_completed(struct __vxge_hw_fifo * fifo,void ** txdlh,enum vxge_hw_fifo_tcode * t_code)1583*4882a593Smuzhiyun enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1584*4882a593Smuzhiyun 	struct __vxge_hw_fifo *fifo, void **txdlh,
1585*4882a593Smuzhiyun 	enum vxge_hw_fifo_tcode *t_code)
1586*4882a593Smuzhiyun {
1587*4882a593Smuzhiyun 	struct __vxge_hw_channel *channel;
1588*4882a593Smuzhiyun 	struct vxge_hw_fifo_txd *txdp;
1589*4882a593Smuzhiyun 	enum vxge_hw_status status = VXGE_HW_OK;
1590*4882a593Smuzhiyun 
1591*4882a593Smuzhiyun 	channel = &fifo->channel;
1592*4882a593Smuzhiyun 
1593*4882a593Smuzhiyun 	vxge_hw_channel_dtr_try_complete(channel, txdlh);
1594*4882a593Smuzhiyun 
1595*4882a593Smuzhiyun 	txdp = *txdlh;
1596*4882a593Smuzhiyun 	if (txdp == NULL) {
1597*4882a593Smuzhiyun 		status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1598*4882a593Smuzhiyun 		goto exit;
1599*4882a593Smuzhiyun 	}
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun 	/* check whether host owns it */
1602*4882a593Smuzhiyun 	if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1603*4882a593Smuzhiyun 
1604*4882a593Smuzhiyun 		vxge_assert(txdp->host_control != 0);
1605*4882a593Smuzhiyun 
1606*4882a593Smuzhiyun 		vxge_hw_channel_dtr_complete(channel);
1607*4882a593Smuzhiyun 
1608*4882a593Smuzhiyun 		*t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun 		if (fifo->stats->common_stats.usage_cnt > 0)
1611*4882a593Smuzhiyun 			fifo->stats->common_stats.usage_cnt--;
1612*4882a593Smuzhiyun 
1613*4882a593Smuzhiyun 		status = VXGE_HW_OK;
1614*4882a593Smuzhiyun 		goto exit;
1615*4882a593Smuzhiyun 	}
1616*4882a593Smuzhiyun 
1617*4882a593Smuzhiyun 	/* no more completions */
1618*4882a593Smuzhiyun 	*txdlh = NULL;
1619*4882a593Smuzhiyun 	status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1620*4882a593Smuzhiyun exit:
1621*4882a593Smuzhiyun 	return status;
1622*4882a593Smuzhiyun }
1623*4882a593Smuzhiyun 
1624*4882a593Smuzhiyun /**
1625*4882a593Smuzhiyun  * vxge_hw_fifo_handle_tcode - Handle transfer code.
1626*4882a593Smuzhiyun  * @fifo: Handle to the fifo object used for non offload send
1627*4882a593Smuzhiyun  * @txdlh: Descriptor handle.
1628*4882a593Smuzhiyun  * @t_code: One of the enumerated (and documented in the Titan user guide)
1629*4882a593Smuzhiyun  *          "transfer codes".
1630*4882a593Smuzhiyun  *
1631*4882a593Smuzhiyun  * Handle descriptor's transfer code. The latter comes with each completed
1632*4882a593Smuzhiyun  * descriptor.
1633*4882a593Smuzhiyun  *
1634*4882a593Smuzhiyun  * Returns: one of the enum vxge_hw_status{} enumerated types.
1635*4882a593Smuzhiyun  * VXGE_HW_OK - for success.
1636*4882a593Smuzhiyun  * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1637*4882a593Smuzhiyun  */
vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo * fifo,void * txdlh,enum vxge_hw_fifo_tcode t_code)1638*4882a593Smuzhiyun enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1639*4882a593Smuzhiyun 					      void *txdlh,
1640*4882a593Smuzhiyun 					      enum vxge_hw_fifo_tcode t_code)
1641*4882a593Smuzhiyun {
1642*4882a593Smuzhiyun 	enum vxge_hw_status status = VXGE_HW_OK;
1643*4882a593Smuzhiyun 
1644*4882a593Smuzhiyun 	if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1645*4882a593Smuzhiyun 		status = VXGE_HW_ERR_INVALID_TCODE;
1646*4882a593Smuzhiyun 		goto exit;
1647*4882a593Smuzhiyun 	}
1648*4882a593Smuzhiyun 
1649*4882a593Smuzhiyun 	fifo->stats->txd_t_code_err_cnt[t_code]++;
1650*4882a593Smuzhiyun exit:
1651*4882a593Smuzhiyun 	return status;
1652*4882a593Smuzhiyun }
1653*4882a593Smuzhiyun 
1654*4882a593Smuzhiyun /**
1655*4882a593Smuzhiyun  * vxge_hw_fifo_txdl_free - Free descriptor.
1656*4882a593Smuzhiyun  * @fifo: Handle to the fifo object used for non offload send
1657*4882a593Smuzhiyun  * @txdlh: Descriptor handle.
1658*4882a593Smuzhiyun  *
1659*4882a593Smuzhiyun  * Free the reserved descriptor. This operation is "symmetrical" to
1660*4882a593Smuzhiyun  * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1661*4882a593Smuzhiyun  * lifecycle.
1662*4882a593Smuzhiyun  *
1663*4882a593Smuzhiyun  * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1664*4882a593Smuzhiyun  * be:
1665*4882a593Smuzhiyun  *
1666*4882a593Smuzhiyun  * - reserved (vxge_hw_fifo_txdl_reserve);
1667*4882a593Smuzhiyun  *
1668*4882a593Smuzhiyun  * - posted (vxge_hw_fifo_txdl_post);
1669*4882a593Smuzhiyun  *
1670*4882a593Smuzhiyun  * - completed (vxge_hw_fifo_txdl_next_completed);
1671*4882a593Smuzhiyun  *
1672*4882a593Smuzhiyun  * - and recycled again (vxge_hw_fifo_txdl_free).
1673*4882a593Smuzhiyun  *
1674*4882a593Smuzhiyun  * For alternative state transitions and more details please refer to
1675*4882a593Smuzhiyun  * the design doc.
1676*4882a593Smuzhiyun  *
1677*4882a593Smuzhiyun  */
vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo * fifo,void * txdlh)1678*4882a593Smuzhiyun void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1679*4882a593Smuzhiyun {
1680*4882a593Smuzhiyun 	struct __vxge_hw_channel *channel;
1681*4882a593Smuzhiyun 
1682*4882a593Smuzhiyun 	channel = &fifo->channel;
1683*4882a593Smuzhiyun 
1684*4882a593Smuzhiyun 	vxge_hw_channel_dtr_free(channel, txdlh);
1685*4882a593Smuzhiyun }
1686*4882a593Smuzhiyun 
1687*4882a593Smuzhiyun /**
1688*4882a593Smuzhiyun  * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath to MAC address table.
1689*4882a593Smuzhiyun  * @vp: Vpath handle.
1690*4882a593Smuzhiyun  * @macaddr: MAC address to be added for this vpath into the list
1691*4882a593Smuzhiyun  * @macaddr_mask: MAC address mask for macaddr
1692*4882a593Smuzhiyun  * @duplicate_mode: Duplicate MAC address add mode. Please see
1693*4882a593Smuzhiyun  *             enum vxge_hw_vpath_mac_addr_add_mode{}
1694*4882a593Smuzhiyun  *
1695*4882a593Smuzhiyun  * Adds the given mac address and mac address mask into the list for this
1696*4882a593Smuzhiyun  * vpath.
1697*4882a593Smuzhiyun  * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1698*4882a593Smuzhiyun  * vxge_hw_vpath_mac_addr_get_next
1699*4882a593Smuzhiyun  *
1700*4882a593Smuzhiyun  */
1701*4882a593Smuzhiyun enum vxge_hw_status
vxge_hw_vpath_mac_addr_add(struct __vxge_hw_vpath_handle * vp,u8 * macaddr,u8 * macaddr_mask,enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)1702*4882a593Smuzhiyun vxge_hw_vpath_mac_addr_add(
1703*4882a593Smuzhiyun 	struct __vxge_hw_vpath_handle *vp,
1704*4882a593Smuzhiyun 	u8 *macaddr,
1705*4882a593Smuzhiyun 	u8 *macaddr_mask,
1706*4882a593Smuzhiyun 	enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1707*4882a593Smuzhiyun {
1708*4882a593Smuzhiyun 	u32 i;
1709*4882a593Smuzhiyun 	u64 data1 = 0ULL;
1710*4882a593Smuzhiyun 	u64 data2 = 0ULL;
1711*4882a593Smuzhiyun 	enum vxge_hw_status status = VXGE_HW_OK;
1712*4882a593Smuzhiyun 
1713*4882a593Smuzhiyun 	if (vp == NULL) {
1714*4882a593Smuzhiyun 		status = VXGE_HW_ERR_INVALID_HANDLE;
1715*4882a593Smuzhiyun 		goto exit;
1716*4882a593Smuzhiyun 	}
1717*4882a593Smuzhiyun 
1718*4882a593Smuzhiyun 	for (i = 0; i < ETH_ALEN; i++) {
1719*4882a593Smuzhiyun 		data1 <<= 8;
1720*4882a593Smuzhiyun 		data1 |= (u8)macaddr[i];
1721*4882a593Smuzhiyun 
1722*4882a593Smuzhiyun 		data2 <<= 8;
1723*4882a593Smuzhiyun 		data2 |= (u8)macaddr_mask[i];
1724*4882a593Smuzhiyun 	}
1725*4882a593Smuzhiyun 
1726*4882a593Smuzhiyun 	switch (duplicate_mode) {
1727*4882a593Smuzhiyun 	case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1728*4882a593Smuzhiyun 		i = 0;
1729*4882a593Smuzhiyun 		break;
1730*4882a593Smuzhiyun 	case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1731*4882a593Smuzhiyun 		i = 1;
1732*4882a593Smuzhiyun 		break;
1733*4882a593Smuzhiyun 	case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1734*4882a593Smuzhiyun 		i = 2;
1735*4882a593Smuzhiyun 		break;
1736*4882a593Smuzhiyun 	default:
1737*4882a593Smuzhiyun 		i = 0;
1738*4882a593Smuzhiyun 		break;
1739*4882a593Smuzhiyun 	}
1740*4882a593Smuzhiyun 
1741*4882a593Smuzhiyun 	status = __vxge_hw_vpath_rts_table_set(vp,
1742*4882a593Smuzhiyun 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1743*4882a593Smuzhiyun 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1744*4882a593Smuzhiyun 			0,
1745*4882a593Smuzhiyun 			VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1746*4882a593Smuzhiyun 			VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1747*4882a593Smuzhiyun 			VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1748*4882a593Smuzhiyun exit:
1749*4882a593Smuzhiyun 	return status;
1750*4882a593Smuzhiyun }
1751*4882a593Smuzhiyun 
1752*4882a593Smuzhiyun /**
1753*4882a593Smuzhiyun  * vxge_hw_vpath_mac_addr_get - Get the first mac address entry
1754*4882a593Smuzhiyun  * @vp: Vpath handle.
1755*4882a593Smuzhiyun  * @macaddr: First MAC address entry for this vpath in the list
1756*4882a593Smuzhiyun  * @macaddr_mask: MAC address mask for macaddr
1757*4882a593Smuzhiyun  *
1758*4882a593Smuzhiyun  * Get the first mac address entry for this vpath from MAC address table.
1759*4882a593Smuzhiyun  * Return: the first mac address and mac address mask in the list for this
1760*4882a593Smuzhiyun  * vpath.
1761*4882a593Smuzhiyun  * see also: vxge_hw_vpath_mac_addr_get_next
1762*4882a593Smuzhiyun  *
1763*4882a593Smuzhiyun  */
1764*4882a593Smuzhiyun enum vxge_hw_status
vxge_hw_vpath_mac_addr_get(struct __vxge_hw_vpath_handle * vp,u8 * macaddr,u8 * macaddr_mask)1765*4882a593Smuzhiyun vxge_hw_vpath_mac_addr_get(
1766*4882a593Smuzhiyun 	struct __vxge_hw_vpath_handle *vp,
1767*4882a593Smuzhiyun 	u8 *macaddr,
1768*4882a593Smuzhiyun 	u8 *macaddr_mask)
1769*4882a593Smuzhiyun {
1770*4882a593Smuzhiyun 	u32 i;
1771*4882a593Smuzhiyun 	u64 data1 = 0ULL;
1772*4882a593Smuzhiyun 	u64 data2 = 0ULL;
1773*4882a593Smuzhiyun 	enum vxge_hw_status status = VXGE_HW_OK;
1774*4882a593Smuzhiyun 
1775*4882a593Smuzhiyun 	if (vp == NULL) {
1776*4882a593Smuzhiyun 		status = VXGE_HW_ERR_INVALID_HANDLE;
1777*4882a593Smuzhiyun 		goto exit;
1778*4882a593Smuzhiyun 	}
1779*4882a593Smuzhiyun 
1780*4882a593Smuzhiyun 	status = __vxge_hw_vpath_rts_table_get(vp,
1781*4882a593Smuzhiyun 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1782*4882a593Smuzhiyun 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1783*4882a593Smuzhiyun 			0, &data1, &data2);
1784*4882a593Smuzhiyun 
1785*4882a593Smuzhiyun 	if (status != VXGE_HW_OK)
1786*4882a593Smuzhiyun 		goto exit;
1787*4882a593Smuzhiyun 
1788*4882a593Smuzhiyun 	data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1789*4882a593Smuzhiyun 
1790*4882a593Smuzhiyun 	data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1791*4882a593Smuzhiyun 
1792*4882a593Smuzhiyun 	for (i = ETH_ALEN; i > 0; i--) {
1793*4882a593Smuzhiyun 		macaddr[i-1] = (u8)(data1 & 0xFF);
1794*4882a593Smuzhiyun 		data1 >>= 8;
1795*4882a593Smuzhiyun 
1796*4882a593Smuzhiyun 		macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1797*4882a593Smuzhiyun 		data2 >>= 8;
1798*4882a593Smuzhiyun 	}
1799*4882a593Smuzhiyun exit:
1800*4882a593Smuzhiyun 	return status;
1801*4882a593Smuzhiyun }
1802*4882a593Smuzhiyun 
1803*4882a593Smuzhiyun /**
1804*4882a593Smuzhiyun  * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry
1805*4882a593Smuzhiyun  * @vp: Vpath handle.
1806*4882a593Smuzhiyun  * @macaddr: Next MAC address entry for this vpath in the list
1807*4882a593Smuzhiyun  * @macaddr_mask: MAC address mask for macaddr
1808*4882a593Smuzhiyun  *
1809*4882a593Smuzhiyun  * Get the next mac address entry for this vpath from MAC address table.
1810*4882a593Smuzhiyun  * Return: the next mac address and mac address mask in the list for this
1811*4882a593Smuzhiyun  * vpath.
1812*4882a593Smuzhiyun  * see also: vxge_hw_vpath_mac_addr_get
1813*4882a593Smuzhiyun  *
1814*4882a593Smuzhiyun  */
1815*4882a593Smuzhiyun enum vxge_hw_status
vxge_hw_vpath_mac_addr_get_next(struct __vxge_hw_vpath_handle * vp,u8 * macaddr,u8 * macaddr_mask)1816*4882a593Smuzhiyun vxge_hw_vpath_mac_addr_get_next(
1817*4882a593Smuzhiyun 	struct __vxge_hw_vpath_handle *vp,
1818*4882a593Smuzhiyun 	u8 *macaddr,
1819*4882a593Smuzhiyun 	u8 *macaddr_mask)
1820*4882a593Smuzhiyun {
1821*4882a593Smuzhiyun 	u32 i;
1822*4882a593Smuzhiyun 	u64 data1 = 0ULL;
1823*4882a593Smuzhiyun 	u64 data2 = 0ULL;
1824*4882a593Smuzhiyun 	enum vxge_hw_status status = VXGE_HW_OK;
1825*4882a593Smuzhiyun 
1826*4882a593Smuzhiyun 	if (vp == NULL) {
1827*4882a593Smuzhiyun 		status = VXGE_HW_ERR_INVALID_HANDLE;
1828*4882a593Smuzhiyun 		goto exit;
1829*4882a593Smuzhiyun 	}
1830*4882a593Smuzhiyun 
1831*4882a593Smuzhiyun 	status = __vxge_hw_vpath_rts_table_get(vp,
1832*4882a593Smuzhiyun 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1833*4882a593Smuzhiyun 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1834*4882a593Smuzhiyun 			0, &data1, &data2);
1835*4882a593Smuzhiyun 
1836*4882a593Smuzhiyun 	if (status != VXGE_HW_OK)
1837*4882a593Smuzhiyun 		goto exit;
1838*4882a593Smuzhiyun 
1839*4882a593Smuzhiyun 	data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1840*4882a593Smuzhiyun 
1841*4882a593Smuzhiyun 	data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1842*4882a593Smuzhiyun 
1843*4882a593Smuzhiyun 	for (i = ETH_ALEN; i > 0; i--) {
1844*4882a593Smuzhiyun 		macaddr[i-1] = (u8)(data1 & 0xFF);
1845*4882a593Smuzhiyun 		data1 >>= 8;
1846*4882a593Smuzhiyun 
1847*4882a593Smuzhiyun 		macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1848*4882a593Smuzhiyun 		data2 >>= 8;
1849*4882a593Smuzhiyun 	}
1850*4882a593Smuzhiyun 
1851*4882a593Smuzhiyun exit:
1852*4882a593Smuzhiyun 	return status;
1853*4882a593Smuzhiyun }
1854*4882a593Smuzhiyun 
1855*4882a593Smuzhiyun /**
1856*4882a593Smuzhiyun  * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath to MAC address table.
1857*4882a593Smuzhiyun  * @vp: Vpath handle.
1858*4882a593Smuzhiyun  * @macaddr: MAC address to be added for this vpath into the list
1859*4882a593Smuzhiyun  * @macaddr_mask: MAC address mask for macaddr
1860*4882a593Smuzhiyun  *
1861*4882a593Smuzhiyun  * Delete the given mac address and mac address mask into the list for this
1862*4882a593Smuzhiyun  * vpath.
1863*4882a593Smuzhiyun  * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1864*4882a593Smuzhiyun  * vxge_hw_vpath_mac_addr_get_next
1865*4882a593Smuzhiyun  *
1866*4882a593Smuzhiyun  */
1867*4882a593Smuzhiyun enum vxge_hw_status
vxge_hw_vpath_mac_addr_delete(struct __vxge_hw_vpath_handle * vp,u8 * macaddr,u8 * macaddr_mask)1868*4882a593Smuzhiyun vxge_hw_vpath_mac_addr_delete(
1869*4882a593Smuzhiyun 	struct __vxge_hw_vpath_handle *vp,
1870*4882a593Smuzhiyun 	u8 *macaddr,
1871*4882a593Smuzhiyun 	u8 *macaddr_mask)
1872*4882a593Smuzhiyun {
1873*4882a593Smuzhiyun 	u32 i;
1874*4882a593Smuzhiyun 	u64 data1 = 0ULL;
1875*4882a593Smuzhiyun 	u64 data2 = 0ULL;
1876*4882a593Smuzhiyun 	enum vxge_hw_status status = VXGE_HW_OK;
1877*4882a593Smuzhiyun 
1878*4882a593Smuzhiyun 	if (vp == NULL) {
1879*4882a593Smuzhiyun 		status = VXGE_HW_ERR_INVALID_HANDLE;
1880*4882a593Smuzhiyun 		goto exit;
1881*4882a593Smuzhiyun 	}
1882*4882a593Smuzhiyun 
1883*4882a593Smuzhiyun 	for (i = 0; i < ETH_ALEN; i++) {
1884*4882a593Smuzhiyun 		data1 <<= 8;
1885*4882a593Smuzhiyun 		data1 |= (u8)macaddr[i];
1886*4882a593Smuzhiyun 
1887*4882a593Smuzhiyun 		data2 <<= 8;
1888*4882a593Smuzhiyun 		data2 |= (u8)macaddr_mask[i];
1889*4882a593Smuzhiyun 	}
1890*4882a593Smuzhiyun 
1891*4882a593Smuzhiyun 	status = __vxge_hw_vpath_rts_table_set(vp,
1892*4882a593Smuzhiyun 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1893*4882a593Smuzhiyun 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1894*4882a593Smuzhiyun 			0,
1895*4882a593Smuzhiyun 			VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1896*4882a593Smuzhiyun 			VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1897*4882a593Smuzhiyun exit:
1898*4882a593Smuzhiyun 	return status;
1899*4882a593Smuzhiyun }
1900*4882a593Smuzhiyun 
1901*4882a593Smuzhiyun /**
1902*4882a593Smuzhiyun  * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath to vlan id table.
1903*4882a593Smuzhiyun  * @vp: Vpath handle.
1904*4882a593Smuzhiyun  * @vid: vlan id to be added for this vpath into the list
1905*4882a593Smuzhiyun  *
1906*4882a593Smuzhiyun  * Adds the given vlan id into the list for this  vpath.
1907*4882a593Smuzhiyun  * see also: vxge_hw_vpath_vid_delete
1908*4882a593Smuzhiyun  *
1909*4882a593Smuzhiyun  */
1910*4882a593Smuzhiyun enum vxge_hw_status
vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle * vp,u64 vid)1911*4882a593Smuzhiyun vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1912*4882a593Smuzhiyun {
1913*4882a593Smuzhiyun 	enum vxge_hw_status status = VXGE_HW_OK;
1914*4882a593Smuzhiyun 
1915*4882a593Smuzhiyun 	if (vp == NULL) {
1916*4882a593Smuzhiyun 		status = VXGE_HW_ERR_INVALID_HANDLE;
1917*4882a593Smuzhiyun 		goto exit;
1918*4882a593Smuzhiyun 	}
1919*4882a593Smuzhiyun 
1920*4882a593Smuzhiyun 	status = __vxge_hw_vpath_rts_table_set(vp,
1921*4882a593Smuzhiyun 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1922*4882a593Smuzhiyun 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1923*4882a593Smuzhiyun 			0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1924*4882a593Smuzhiyun exit:
1925*4882a593Smuzhiyun 	return status;
1926*4882a593Smuzhiyun }
1927*4882a593Smuzhiyun 
1928*4882a593Smuzhiyun /**
1929*4882a593Smuzhiyun  * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
1930*4882a593Smuzhiyun  *               to vlan id table.
1931*4882a593Smuzhiyun  * @vp: Vpath handle.
1932*4882a593Smuzhiyun  * @vid: vlan id to be added for this vpath into the list
1933*4882a593Smuzhiyun  *
1934*4882a593Smuzhiyun  * Adds the given vlan id into the list for this  vpath.
1935*4882a593Smuzhiyun  * see also: vxge_hw_vpath_vid_add
1936*4882a593Smuzhiyun  *
1937*4882a593Smuzhiyun  */
1938*4882a593Smuzhiyun enum vxge_hw_status
vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle * vp,u64 vid)1939*4882a593Smuzhiyun vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
1940*4882a593Smuzhiyun {
1941*4882a593Smuzhiyun 	enum vxge_hw_status status = VXGE_HW_OK;
1942*4882a593Smuzhiyun 
1943*4882a593Smuzhiyun 	if (vp == NULL) {
1944*4882a593Smuzhiyun 		status = VXGE_HW_ERR_INVALID_HANDLE;
1945*4882a593Smuzhiyun 		goto exit;
1946*4882a593Smuzhiyun 	}
1947*4882a593Smuzhiyun 
1948*4882a593Smuzhiyun 	status = __vxge_hw_vpath_rts_table_set(vp,
1949*4882a593Smuzhiyun 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1950*4882a593Smuzhiyun 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1951*4882a593Smuzhiyun 			0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1952*4882a593Smuzhiyun exit:
1953*4882a593Smuzhiyun 	return status;
1954*4882a593Smuzhiyun }
1955*4882a593Smuzhiyun 
1956*4882a593Smuzhiyun /**
1957*4882a593Smuzhiyun  * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
1958*4882a593Smuzhiyun  * @vp: Vpath handle.
1959*4882a593Smuzhiyun  *
1960*4882a593Smuzhiyun  * Enable promiscuous mode of Titan-e operation.
1961*4882a593Smuzhiyun  *
1962*4882a593Smuzhiyun  * See also: vxge_hw_vpath_promisc_disable().
1963*4882a593Smuzhiyun  */
vxge_hw_vpath_promisc_enable(struct __vxge_hw_vpath_handle * vp)1964*4882a593Smuzhiyun enum vxge_hw_status vxge_hw_vpath_promisc_enable(
1965*4882a593Smuzhiyun 			struct __vxge_hw_vpath_handle *vp)
1966*4882a593Smuzhiyun {
1967*4882a593Smuzhiyun 	u64 val64;
1968*4882a593Smuzhiyun 	struct __vxge_hw_virtualpath *vpath;
1969*4882a593Smuzhiyun 	enum vxge_hw_status status = VXGE_HW_OK;
1970*4882a593Smuzhiyun 
1971*4882a593Smuzhiyun 	if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1972*4882a593Smuzhiyun 		status = VXGE_HW_ERR_INVALID_HANDLE;
1973*4882a593Smuzhiyun 		goto exit;
1974*4882a593Smuzhiyun 	}
1975*4882a593Smuzhiyun 
1976*4882a593Smuzhiyun 	vpath = vp->vpath;
1977*4882a593Smuzhiyun 
1978*4882a593Smuzhiyun 	/* Enable promiscuous mode for function 0 only */
1979*4882a593Smuzhiyun 	if (!(vpath->hldev->access_rights &
1980*4882a593Smuzhiyun 		VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
1981*4882a593Smuzhiyun 		return VXGE_HW_OK;
1982*4882a593Smuzhiyun 
1983*4882a593Smuzhiyun 	val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1984*4882a593Smuzhiyun 
1985*4882a593Smuzhiyun 	if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
1986*4882a593Smuzhiyun 
1987*4882a593Smuzhiyun 		val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1988*4882a593Smuzhiyun 			 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1989*4882a593Smuzhiyun 			 VXGE_HW_RXMAC_VCFG0_BCAST_EN |
1990*4882a593Smuzhiyun 			 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
1991*4882a593Smuzhiyun 
1992*4882a593Smuzhiyun 		writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1993*4882a593Smuzhiyun 	}
1994*4882a593Smuzhiyun exit:
1995*4882a593Smuzhiyun 	return status;
1996*4882a593Smuzhiyun }
1997*4882a593Smuzhiyun 
1998*4882a593Smuzhiyun /**
1999*4882a593Smuzhiyun  * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
2000*4882a593Smuzhiyun  * @vp: Vpath handle.
2001*4882a593Smuzhiyun  *
2002*4882a593Smuzhiyun  * Disable promiscuous mode of Titan-e operation.
2003*4882a593Smuzhiyun  *
2004*4882a593Smuzhiyun  * See also: vxge_hw_vpath_promisc_enable().
2005*4882a593Smuzhiyun  */
vxge_hw_vpath_promisc_disable(struct __vxge_hw_vpath_handle * vp)2006*4882a593Smuzhiyun enum vxge_hw_status vxge_hw_vpath_promisc_disable(
2007*4882a593Smuzhiyun 			struct __vxge_hw_vpath_handle *vp)
2008*4882a593Smuzhiyun {
2009*4882a593Smuzhiyun 	u64 val64;
2010*4882a593Smuzhiyun 	struct __vxge_hw_virtualpath *vpath;
2011*4882a593Smuzhiyun 	enum vxge_hw_status status = VXGE_HW_OK;
2012*4882a593Smuzhiyun 
2013*4882a593Smuzhiyun 	if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2014*4882a593Smuzhiyun 		status = VXGE_HW_ERR_INVALID_HANDLE;
2015*4882a593Smuzhiyun 		goto exit;
2016*4882a593Smuzhiyun 	}
2017*4882a593Smuzhiyun 
2018*4882a593Smuzhiyun 	vpath = vp->vpath;
2019*4882a593Smuzhiyun 
2020*4882a593Smuzhiyun 	val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2021*4882a593Smuzhiyun 
2022*4882a593Smuzhiyun 	if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
2023*4882a593Smuzhiyun 
2024*4882a593Smuzhiyun 		val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2025*4882a593Smuzhiyun 			   VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2026*4882a593Smuzhiyun 			   VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
2027*4882a593Smuzhiyun 
2028*4882a593Smuzhiyun 		writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2029*4882a593Smuzhiyun 	}
2030*4882a593Smuzhiyun exit:
2031*4882a593Smuzhiyun 	return status;
2032*4882a593Smuzhiyun }
2033*4882a593Smuzhiyun 
2034*4882a593Smuzhiyun /*
2035*4882a593Smuzhiyun  * vxge_hw_vpath_bcast_enable - Enable broadcast
2036*4882a593Smuzhiyun  * @vp: Vpath handle.
2037*4882a593Smuzhiyun  *
2038*4882a593Smuzhiyun  * Enable receiving broadcasts.
2039*4882a593Smuzhiyun  */
vxge_hw_vpath_bcast_enable(struct __vxge_hw_vpath_handle * vp)2040*4882a593Smuzhiyun enum vxge_hw_status vxge_hw_vpath_bcast_enable(
2041*4882a593Smuzhiyun 			struct __vxge_hw_vpath_handle *vp)
2042*4882a593Smuzhiyun {
2043*4882a593Smuzhiyun 	u64 val64;
2044*4882a593Smuzhiyun 	struct __vxge_hw_virtualpath *vpath;
2045*4882a593Smuzhiyun 	enum vxge_hw_status status = VXGE_HW_OK;
2046*4882a593Smuzhiyun 
2047*4882a593Smuzhiyun 	if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2048*4882a593Smuzhiyun 		status = VXGE_HW_ERR_INVALID_HANDLE;
2049*4882a593Smuzhiyun 		goto exit;
2050*4882a593Smuzhiyun 	}
2051*4882a593Smuzhiyun 
2052*4882a593Smuzhiyun 	vpath = vp->vpath;
2053*4882a593Smuzhiyun 
2054*4882a593Smuzhiyun 	val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2055*4882a593Smuzhiyun 
2056*4882a593Smuzhiyun 	if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
2057*4882a593Smuzhiyun 		val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
2058*4882a593Smuzhiyun 		writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2059*4882a593Smuzhiyun 	}
2060*4882a593Smuzhiyun exit:
2061*4882a593Smuzhiyun 	return status;
2062*4882a593Smuzhiyun }
2063*4882a593Smuzhiyun 
2064*4882a593Smuzhiyun /**
2065*4882a593Smuzhiyun  * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
2066*4882a593Smuzhiyun  * @vp: Vpath handle.
2067*4882a593Smuzhiyun  *
2068*4882a593Smuzhiyun  * Enable Titan-e multicast addresses.
2069*4882a593Smuzhiyun  * Returns: VXGE_HW_OK on success.
2070*4882a593Smuzhiyun  *
2071*4882a593Smuzhiyun  */
vxge_hw_vpath_mcast_enable(struct __vxge_hw_vpath_handle * vp)2072*4882a593Smuzhiyun enum vxge_hw_status vxge_hw_vpath_mcast_enable(
2073*4882a593Smuzhiyun 			struct __vxge_hw_vpath_handle *vp)
2074*4882a593Smuzhiyun {
2075*4882a593Smuzhiyun 	u64 val64;
2076*4882a593Smuzhiyun 	struct __vxge_hw_virtualpath *vpath;
2077*4882a593Smuzhiyun 	enum vxge_hw_status status = VXGE_HW_OK;
2078*4882a593Smuzhiyun 
2079*4882a593Smuzhiyun 	if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2080*4882a593Smuzhiyun 		status = VXGE_HW_ERR_INVALID_HANDLE;
2081*4882a593Smuzhiyun 		goto exit;
2082*4882a593Smuzhiyun 	}
2083*4882a593Smuzhiyun 
2084*4882a593Smuzhiyun 	vpath = vp->vpath;
2085*4882a593Smuzhiyun 
2086*4882a593Smuzhiyun 	val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2087*4882a593Smuzhiyun 
2088*4882a593Smuzhiyun 	if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
2089*4882a593Smuzhiyun 		val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2090*4882a593Smuzhiyun 		writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2091*4882a593Smuzhiyun 	}
2092*4882a593Smuzhiyun exit:
2093*4882a593Smuzhiyun 	return status;
2094*4882a593Smuzhiyun }
2095*4882a593Smuzhiyun 
2096*4882a593Smuzhiyun /**
2097*4882a593Smuzhiyun  * vxge_hw_vpath_mcast_disable - Disable  multicast addresses.
2098*4882a593Smuzhiyun  * @vp: Vpath handle.
2099*4882a593Smuzhiyun  *
2100*4882a593Smuzhiyun  * Disable Titan-e multicast addresses.
2101*4882a593Smuzhiyun  * Returns: VXGE_HW_OK - success.
2102*4882a593Smuzhiyun  * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
2103*4882a593Smuzhiyun  *
2104*4882a593Smuzhiyun  */
2105*4882a593Smuzhiyun enum vxge_hw_status
vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle * vp)2106*4882a593Smuzhiyun vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
2107*4882a593Smuzhiyun {
2108*4882a593Smuzhiyun 	u64 val64;
2109*4882a593Smuzhiyun 	struct __vxge_hw_virtualpath *vpath;
2110*4882a593Smuzhiyun 	enum vxge_hw_status status = VXGE_HW_OK;
2111*4882a593Smuzhiyun 
2112*4882a593Smuzhiyun 	if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2113*4882a593Smuzhiyun 		status = VXGE_HW_ERR_INVALID_HANDLE;
2114*4882a593Smuzhiyun 		goto exit;
2115*4882a593Smuzhiyun 	}
2116*4882a593Smuzhiyun 
2117*4882a593Smuzhiyun 	vpath = vp->vpath;
2118*4882a593Smuzhiyun 
2119*4882a593Smuzhiyun 	val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2120*4882a593Smuzhiyun 
2121*4882a593Smuzhiyun 	if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
2122*4882a593Smuzhiyun 		val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2123*4882a593Smuzhiyun 		writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2124*4882a593Smuzhiyun 	}
2125*4882a593Smuzhiyun exit:
2126*4882a593Smuzhiyun 	return status;
2127*4882a593Smuzhiyun }
2128*4882a593Smuzhiyun 
2129*4882a593Smuzhiyun /*
2130*4882a593Smuzhiyun  * vxge_hw_vpath_alarm_process - Process Alarms.
2131*4882a593Smuzhiyun  * @vpath: Virtual Path.
2132*4882a593Smuzhiyun  * @skip_alarms: Do not clear the alarms
2133*4882a593Smuzhiyun  *
2134*4882a593Smuzhiyun  * Process vpath alarms.
2135*4882a593Smuzhiyun  *
2136*4882a593Smuzhiyun  */
vxge_hw_vpath_alarm_process(struct __vxge_hw_vpath_handle * vp,u32 skip_alarms)2137*4882a593Smuzhiyun enum vxge_hw_status vxge_hw_vpath_alarm_process(
2138*4882a593Smuzhiyun 			struct __vxge_hw_vpath_handle *vp,
2139*4882a593Smuzhiyun 			u32 skip_alarms)
2140*4882a593Smuzhiyun {
2141*4882a593Smuzhiyun 	enum vxge_hw_status status = VXGE_HW_OK;
2142*4882a593Smuzhiyun 
2143*4882a593Smuzhiyun 	if (vp == NULL) {
2144*4882a593Smuzhiyun 		status = VXGE_HW_ERR_INVALID_HANDLE;
2145*4882a593Smuzhiyun 		goto exit;
2146*4882a593Smuzhiyun 	}
2147*4882a593Smuzhiyun 
2148*4882a593Smuzhiyun 	status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2149*4882a593Smuzhiyun exit:
2150*4882a593Smuzhiyun 	return status;
2151*4882a593Smuzhiyun }
2152*4882a593Smuzhiyun 
2153*4882a593Smuzhiyun /**
2154*4882a593Smuzhiyun  * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2155*4882a593Smuzhiyun  *                            alrms
2156*4882a593Smuzhiyun  * @vp: Virtual Path handle.
2157*4882a593Smuzhiyun  * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2158*4882a593Smuzhiyun  *             interrupts(Can be repeated). If fifo or ring are not enabled
2159*4882a593Smuzhiyun  *             the MSIX vector for that should be set to 0
2160*4882a593Smuzhiyun  * @alarm_msix_id: MSIX vector for alarm.
2161*4882a593Smuzhiyun  *
2162*4882a593Smuzhiyun  * This API will associate a given MSIX vector numbers with the four TIM
2163*4882a593Smuzhiyun  * interrupts and alarm interrupt.
2164*4882a593Smuzhiyun  */
2165*4882a593Smuzhiyun void
vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle * vp,int * tim_msix_id,int alarm_msix_id)2166*4882a593Smuzhiyun vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2167*4882a593Smuzhiyun 		       int alarm_msix_id)
2168*4882a593Smuzhiyun {
2169*4882a593Smuzhiyun 	u64 val64;
2170*4882a593Smuzhiyun 	struct __vxge_hw_virtualpath *vpath = vp->vpath;
2171*4882a593Smuzhiyun 	struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2172*4882a593Smuzhiyun 	u32 vp_id = vp->vpath->vp_id;
2173*4882a593Smuzhiyun 
2174*4882a593Smuzhiyun 	val64 =  VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2175*4882a593Smuzhiyun 		  (vp_id * 4) + tim_msix_id[0]) |
2176*4882a593Smuzhiyun 		 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2177*4882a593Smuzhiyun 		  (vp_id * 4) + tim_msix_id[1]);
2178*4882a593Smuzhiyun 
2179*4882a593Smuzhiyun 	writeq(val64, &vp_reg->interrupt_cfg0);
2180*4882a593Smuzhiyun 
2181*4882a593Smuzhiyun 	writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2182*4882a593Smuzhiyun 			(vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2183*4882a593Smuzhiyun 			&vp_reg->interrupt_cfg2);
2184*4882a593Smuzhiyun 
2185*4882a593Smuzhiyun 	if (vpath->hldev->config.intr_mode ==
2186*4882a593Smuzhiyun 					VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2187*4882a593Smuzhiyun 		__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2188*4882a593Smuzhiyun 				VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
2189*4882a593Smuzhiyun 				0, 32), &vp_reg->one_shot_vect0_en);
2190*4882a593Smuzhiyun 		__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2191*4882a593Smuzhiyun 				VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2192*4882a593Smuzhiyun 				0, 32), &vp_reg->one_shot_vect1_en);
2193*4882a593Smuzhiyun 		__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2194*4882a593Smuzhiyun 				VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2195*4882a593Smuzhiyun 				0, 32), &vp_reg->one_shot_vect2_en);
2196*4882a593Smuzhiyun 	}
2197*4882a593Smuzhiyun }
2198*4882a593Smuzhiyun 
2199*4882a593Smuzhiyun /**
2200*4882a593Smuzhiyun  * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2201*4882a593Smuzhiyun  * @vp: Virtual Path handle.
2202*4882a593Smuzhiyun  * @msix_id:  MSIX ID
2203*4882a593Smuzhiyun  *
2204*4882a593Smuzhiyun  * The function masks the msix interrupt for the given msix_id
2205*4882a593Smuzhiyun  *
2206*4882a593Smuzhiyun  * Returns: 0,
2207*4882a593Smuzhiyun  * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2208*4882a593Smuzhiyun  * status.
2209*4882a593Smuzhiyun  * See also:
2210*4882a593Smuzhiyun  */
2211*4882a593Smuzhiyun void
vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle * vp,int msix_id)2212*4882a593Smuzhiyun vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2213*4882a593Smuzhiyun {
2214*4882a593Smuzhiyun 	struct __vxge_hw_device *hldev = vp->vpath->hldev;
2215*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper(
2216*4882a593Smuzhiyun 		(u32) vxge_bVALn(vxge_mBIT(msix_id  >> 2), 0, 32),
2217*4882a593Smuzhiyun 		&hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2218*4882a593Smuzhiyun }
2219*4882a593Smuzhiyun 
2220*4882a593Smuzhiyun /**
2221*4882a593Smuzhiyun  * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2222*4882a593Smuzhiyun  * @vp: Virtual Path handle.
2223*4882a593Smuzhiyun  * @msix_id:  MSI ID
2224*4882a593Smuzhiyun  *
2225*4882a593Smuzhiyun  * The function clears the msix interrupt for the given msix_id
2226*4882a593Smuzhiyun  *
2227*4882a593Smuzhiyun  * Returns: 0,
2228*4882a593Smuzhiyun  * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2229*4882a593Smuzhiyun  * status.
2230*4882a593Smuzhiyun  * See also:
2231*4882a593Smuzhiyun  */
vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle * vp,int msix_id)2232*4882a593Smuzhiyun void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2233*4882a593Smuzhiyun {
2234*4882a593Smuzhiyun 	struct __vxge_hw_device *hldev = vp->vpath->hldev;
2235*4882a593Smuzhiyun 
2236*4882a593Smuzhiyun 	if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT)
2237*4882a593Smuzhiyun 		__vxge_hw_pio_mem_write32_upper(
2238*4882a593Smuzhiyun 			(u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2239*4882a593Smuzhiyun 			&hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
2240*4882a593Smuzhiyun 	else
2241*4882a593Smuzhiyun 		__vxge_hw_pio_mem_write32_upper(
2242*4882a593Smuzhiyun 			(u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2243*4882a593Smuzhiyun 			&hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
2244*4882a593Smuzhiyun }
2245*4882a593Smuzhiyun 
2246*4882a593Smuzhiyun /**
2247*4882a593Smuzhiyun  * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2248*4882a593Smuzhiyun  * @vp: Virtual Path handle.
2249*4882a593Smuzhiyun  * @msix_id:  MSI ID
2250*4882a593Smuzhiyun  *
2251*4882a593Smuzhiyun  * The function unmasks the msix interrupt for the given msix_id
2252*4882a593Smuzhiyun  *
2253*4882a593Smuzhiyun  * Returns: 0,
2254*4882a593Smuzhiyun  * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2255*4882a593Smuzhiyun  * status.
2256*4882a593Smuzhiyun  * See also:
2257*4882a593Smuzhiyun  */
2258*4882a593Smuzhiyun void
vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle * vp,int msix_id)2259*4882a593Smuzhiyun vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2260*4882a593Smuzhiyun {
2261*4882a593Smuzhiyun 	struct __vxge_hw_device *hldev = vp->vpath->hldev;
2262*4882a593Smuzhiyun 	__vxge_hw_pio_mem_write32_upper(
2263*4882a593Smuzhiyun 			(u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2264*4882a593Smuzhiyun 			&hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2265*4882a593Smuzhiyun }
2266*4882a593Smuzhiyun 
2267*4882a593Smuzhiyun /**
2268*4882a593Smuzhiyun  * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2269*4882a593Smuzhiyun  * @vp: Virtual Path handle.
2270*4882a593Smuzhiyun  *
2271*4882a593Smuzhiyun  * Mask Tx and Rx vpath interrupts.
2272*4882a593Smuzhiyun  *
2273*4882a593Smuzhiyun  * See also: vxge_hw_vpath_inta_mask_tx_rx()
2274*4882a593Smuzhiyun  */
vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle * vp)2275*4882a593Smuzhiyun void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2276*4882a593Smuzhiyun {
2277*4882a593Smuzhiyun 	u64	tim_int_mask0[4] = {[0 ...3] = 0};
2278*4882a593Smuzhiyun 	u32	tim_int_mask1[4] = {[0 ...3] = 0};
2279*4882a593Smuzhiyun 	u64	val64;
2280*4882a593Smuzhiyun 	struct __vxge_hw_device *hldev = vp->vpath->hldev;
2281*4882a593Smuzhiyun 
2282*4882a593Smuzhiyun 	VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2283*4882a593Smuzhiyun 		tim_int_mask1, vp->vpath->vp_id);
2284*4882a593Smuzhiyun 
2285*4882a593Smuzhiyun 	val64 = readq(&hldev->common_reg->tim_int_mask0);
2286*4882a593Smuzhiyun 
2287*4882a593Smuzhiyun 	if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2288*4882a593Smuzhiyun 		(tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2289*4882a593Smuzhiyun 		writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2290*4882a593Smuzhiyun 			tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2291*4882a593Smuzhiyun 			&hldev->common_reg->tim_int_mask0);
2292*4882a593Smuzhiyun 	}
2293*4882a593Smuzhiyun 
2294*4882a593Smuzhiyun 	val64 = readl(&hldev->common_reg->tim_int_mask1);
2295*4882a593Smuzhiyun 
2296*4882a593Smuzhiyun 	if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2297*4882a593Smuzhiyun 		(tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2298*4882a593Smuzhiyun 		__vxge_hw_pio_mem_write32_upper(
2299*4882a593Smuzhiyun 			(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2300*4882a593Smuzhiyun 			tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2301*4882a593Smuzhiyun 			&hldev->common_reg->tim_int_mask1);
2302*4882a593Smuzhiyun 	}
2303*4882a593Smuzhiyun }
2304*4882a593Smuzhiyun 
2305*4882a593Smuzhiyun /**
2306*4882a593Smuzhiyun  * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2307*4882a593Smuzhiyun  * @vp: Virtual Path handle.
2308*4882a593Smuzhiyun  *
2309*4882a593Smuzhiyun  * Unmask Tx and Rx vpath interrupts.
2310*4882a593Smuzhiyun  *
2311*4882a593Smuzhiyun  * See also: vxge_hw_vpath_inta_mask_tx_rx()
2312*4882a593Smuzhiyun  */
vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle * vp)2313*4882a593Smuzhiyun void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2314*4882a593Smuzhiyun {
2315*4882a593Smuzhiyun 	u64	tim_int_mask0[4] = {[0 ...3] = 0};
2316*4882a593Smuzhiyun 	u32	tim_int_mask1[4] = {[0 ...3] = 0};
2317*4882a593Smuzhiyun 	u64	val64;
2318*4882a593Smuzhiyun 	struct __vxge_hw_device *hldev = vp->vpath->hldev;
2319*4882a593Smuzhiyun 
2320*4882a593Smuzhiyun 	VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2321*4882a593Smuzhiyun 		tim_int_mask1, vp->vpath->vp_id);
2322*4882a593Smuzhiyun 
2323*4882a593Smuzhiyun 	val64 = readq(&hldev->common_reg->tim_int_mask0);
2324*4882a593Smuzhiyun 
2325*4882a593Smuzhiyun 	if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2326*4882a593Smuzhiyun 	   (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2327*4882a593Smuzhiyun 		writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2328*4882a593Smuzhiyun 			tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2329*4882a593Smuzhiyun 			&hldev->common_reg->tim_int_mask0);
2330*4882a593Smuzhiyun 	}
2331*4882a593Smuzhiyun 
2332*4882a593Smuzhiyun 	if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2333*4882a593Smuzhiyun 	   (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2334*4882a593Smuzhiyun 		__vxge_hw_pio_mem_write32_upper(
2335*4882a593Smuzhiyun 			(~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2336*4882a593Smuzhiyun 			  tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2337*4882a593Smuzhiyun 			&hldev->common_reg->tim_int_mask1);
2338*4882a593Smuzhiyun 	}
2339*4882a593Smuzhiyun }
2340*4882a593Smuzhiyun 
2341*4882a593Smuzhiyun /**
2342*4882a593Smuzhiyun  * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2343*4882a593Smuzhiyun  * descriptors and process the same.
2344*4882a593Smuzhiyun  * @ring: Handle to the ring object used for receive
2345*4882a593Smuzhiyun  *
2346*4882a593Smuzhiyun  * The function	polls the Rx for the completed	descriptors and	calls
2347*4882a593Smuzhiyun  * the driver via supplied completion	callback.
2348*4882a593Smuzhiyun  *
2349*4882a593Smuzhiyun  * Returns: VXGE_HW_OK, if the polling is completed successful.
2350*4882a593Smuzhiyun  * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2351*4882a593Smuzhiyun  * descriptors available which are yet to be processed.
2352*4882a593Smuzhiyun  *
2353*4882a593Smuzhiyun  * See also: vxge_hw_vpath_poll_rx()
2354*4882a593Smuzhiyun  */
vxge_hw_vpath_poll_rx(struct __vxge_hw_ring * ring)2355*4882a593Smuzhiyun enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2356*4882a593Smuzhiyun {
2357*4882a593Smuzhiyun 	u8 t_code;
2358*4882a593Smuzhiyun 	enum vxge_hw_status status = VXGE_HW_OK;
2359*4882a593Smuzhiyun 	void *first_rxdh;
2360*4882a593Smuzhiyun 	int new_count = 0;
2361*4882a593Smuzhiyun 
2362*4882a593Smuzhiyun 	ring->cmpl_cnt = 0;
2363*4882a593Smuzhiyun 
2364*4882a593Smuzhiyun 	status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2365*4882a593Smuzhiyun 	if (status == VXGE_HW_OK)
2366*4882a593Smuzhiyun 		ring->callback(ring, first_rxdh,
2367*4882a593Smuzhiyun 			t_code, ring->channel.userdata);
2368*4882a593Smuzhiyun 
2369*4882a593Smuzhiyun 	if (ring->cmpl_cnt != 0) {
2370*4882a593Smuzhiyun 		ring->doorbell_cnt += ring->cmpl_cnt;
2371*4882a593Smuzhiyun 		if (ring->doorbell_cnt >= ring->rxds_limit) {
2372*4882a593Smuzhiyun 			/*
2373*4882a593Smuzhiyun 			 * Each RxD is of 4 qwords, update the number of
2374*4882a593Smuzhiyun 			 * qwords replenished
2375*4882a593Smuzhiyun 			 */
2376*4882a593Smuzhiyun 			new_count = (ring->doorbell_cnt * 4);
2377*4882a593Smuzhiyun 
2378*4882a593Smuzhiyun 			/* For each block add 4 more qwords */
2379*4882a593Smuzhiyun 			ring->total_db_cnt += ring->doorbell_cnt;
2380*4882a593Smuzhiyun 			if (ring->total_db_cnt >= ring->rxds_per_block) {
2381*4882a593Smuzhiyun 				new_count += 4;
2382*4882a593Smuzhiyun 				/* Reset total count */
2383*4882a593Smuzhiyun 				ring->total_db_cnt %= ring->rxds_per_block;
2384*4882a593Smuzhiyun 			}
2385*4882a593Smuzhiyun 			writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2386*4882a593Smuzhiyun 				&ring->vp_reg->prc_rxd_doorbell);
2387*4882a593Smuzhiyun 			readl(&ring->common_reg->titan_general_int_status);
2388*4882a593Smuzhiyun 			ring->doorbell_cnt = 0;
2389*4882a593Smuzhiyun 		}
2390*4882a593Smuzhiyun 	}
2391*4882a593Smuzhiyun 
2392*4882a593Smuzhiyun 	return status;
2393*4882a593Smuzhiyun }
2394*4882a593Smuzhiyun 
2395*4882a593Smuzhiyun /**
2396*4882a593Smuzhiyun  * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process the same.
2397*4882a593Smuzhiyun  * @fifo: Handle to the fifo object used for non offload send
2398*4882a593Smuzhiyun  * @skb_ptr: pointer to skb
2399*4882a593Smuzhiyun  * @nr_skb: number of skbs
2400*4882a593Smuzhiyun  * @more: more is coming
2401*4882a593Smuzhiyun  *
2402*4882a593Smuzhiyun  * The function polls the Tx for the completed descriptors and calls
2403*4882a593Smuzhiyun  * the driver via supplied completion callback.
2404*4882a593Smuzhiyun  *
2405*4882a593Smuzhiyun  * Returns: VXGE_HW_OK, if the polling is completed successful.
2406*4882a593Smuzhiyun  * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2407*4882a593Smuzhiyun  * descriptors available which are yet to be processed.
2408*4882a593Smuzhiyun  */
vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo * fifo,struct sk_buff *** skb_ptr,int nr_skb,int * more)2409*4882a593Smuzhiyun enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2410*4882a593Smuzhiyun 					struct sk_buff ***skb_ptr, int nr_skb,
2411*4882a593Smuzhiyun 					int *more)
2412*4882a593Smuzhiyun {
2413*4882a593Smuzhiyun 	enum vxge_hw_fifo_tcode t_code;
2414*4882a593Smuzhiyun 	void *first_txdlh;
2415*4882a593Smuzhiyun 	enum vxge_hw_status status = VXGE_HW_OK;
2416*4882a593Smuzhiyun 	struct __vxge_hw_channel *channel;
2417*4882a593Smuzhiyun 
2418*4882a593Smuzhiyun 	channel = &fifo->channel;
2419*4882a593Smuzhiyun 
2420*4882a593Smuzhiyun 	status = vxge_hw_fifo_txdl_next_completed(fifo,
2421*4882a593Smuzhiyun 				&first_txdlh, &t_code);
2422*4882a593Smuzhiyun 	if (status == VXGE_HW_OK)
2423*4882a593Smuzhiyun 		if (fifo->callback(fifo, first_txdlh, t_code,
2424*4882a593Smuzhiyun 			channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2425*4882a593Smuzhiyun 			status = VXGE_HW_COMPLETIONS_REMAIN;
2426*4882a593Smuzhiyun 
2427*4882a593Smuzhiyun 	return status;
2428*4882a593Smuzhiyun }
2429