xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/neterion/s2io.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /************************************************************************
2*4882a593Smuzhiyun  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3*4882a593Smuzhiyun  * Copyright(c) 2002-2010 Exar Corp.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This software may be used and distributed according to the terms of
6*4882a593Smuzhiyun  * the GNU General Public License (GPL), incorporated herein by reference.
7*4882a593Smuzhiyun  * Drivers based on or derived from this code fall under the GPL and must
8*4882a593Smuzhiyun  * retain the authorship, copyright and license notice.  This file is not
9*4882a593Smuzhiyun  * a complete program and may only be used when the entire operating
10*4882a593Smuzhiyun  * system is licensed under the GPL.
11*4882a593Smuzhiyun  * See the file COPYING in this distribution for more information.
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * Credits:
14*4882a593Smuzhiyun  * Jeff Garzik		: For pointing out the improper error condition
15*4882a593Smuzhiyun  *			  check in the s2io_xmit routine and also some
16*4882a593Smuzhiyun  *			  issues in the Tx watch dog function. Also for
17*4882a593Smuzhiyun  *			  patiently answering all those innumerable
18*4882a593Smuzhiyun  *			  questions regaring the 2.6 porting issues.
19*4882a593Smuzhiyun  * Stephen Hemminger	: Providing proper 2.6 porting mechanism for some
20*4882a593Smuzhiyun  *			  macros available only in 2.6 Kernel.
21*4882a593Smuzhiyun  * Francois Romieu	: For pointing out all code part that were
22*4882a593Smuzhiyun  *			  deprecated and also styling related comments.
23*4882a593Smuzhiyun  * Grant Grundler	: For helping me get rid of some Architecture
24*4882a593Smuzhiyun  *			  dependent code.
25*4882a593Smuzhiyun  * Christopher Hellwig	: Some more 2.6 specific issues in the driver.
26*4882a593Smuzhiyun  *
27*4882a593Smuzhiyun  * The module loadable parameters that are supported by the driver and a brief
28*4882a593Smuzhiyun  * explanation of all the variables.
29*4882a593Smuzhiyun  *
30*4882a593Smuzhiyun  * rx_ring_num : This can be used to program the number of receive rings used
31*4882a593Smuzhiyun  * in the driver.
32*4882a593Smuzhiyun  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33*4882a593Smuzhiyun  *     This is also an array of size 8.
34*4882a593Smuzhiyun  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35*4882a593Smuzhiyun  *		values are 1, 2.
36*4882a593Smuzhiyun  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37*4882a593Smuzhiyun  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38*4882a593Smuzhiyun  * Tx descriptors that can be associated with each corresponding FIFO.
39*4882a593Smuzhiyun  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40*4882a593Smuzhiyun  *     2(MSI_X). Default value is '2(MSI_X)'
41*4882a593Smuzhiyun  * lro_max_pkts: This parameter defines maximum number of packets can be
42*4882a593Smuzhiyun  *     aggregated as a single large packet
43*4882a593Smuzhiyun  * napi: This parameter used to enable/disable NAPI (polling Rx)
44*4882a593Smuzhiyun  *     Possible values '1' for enable and '0' for disable. Default is '1'
45*4882a593Smuzhiyun  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
46*4882a593Smuzhiyun  *                 Possible values '1' for enable , '0' for disable.
47*4882a593Smuzhiyun  *                 Default is '2' - which means disable in promisc mode
48*4882a593Smuzhiyun  *                 and enable in non-promiscuous mode.
49*4882a593Smuzhiyun  * multiq: This parameter used to enable/disable MULTIQUEUE support.
50*4882a593Smuzhiyun  *      Possible values '1' for enable and '0' for disable. Default is '0'
51*4882a593Smuzhiyun  ************************************************************************/
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun #include <linux/module.h>
56*4882a593Smuzhiyun #include <linux/types.h>
57*4882a593Smuzhiyun #include <linux/errno.h>
58*4882a593Smuzhiyun #include <linux/ioport.h>
59*4882a593Smuzhiyun #include <linux/pci.h>
60*4882a593Smuzhiyun #include <linux/dma-mapping.h>
61*4882a593Smuzhiyun #include <linux/kernel.h>
62*4882a593Smuzhiyun #include <linux/netdevice.h>
63*4882a593Smuzhiyun #include <linux/etherdevice.h>
64*4882a593Smuzhiyun #include <linux/mdio.h>
65*4882a593Smuzhiyun #include <linux/skbuff.h>
66*4882a593Smuzhiyun #include <linux/init.h>
67*4882a593Smuzhiyun #include <linux/delay.h>
68*4882a593Smuzhiyun #include <linux/stddef.h>
69*4882a593Smuzhiyun #include <linux/ioctl.h>
70*4882a593Smuzhiyun #include <linux/timex.h>
71*4882a593Smuzhiyun #include <linux/ethtool.h>
72*4882a593Smuzhiyun #include <linux/workqueue.h>
73*4882a593Smuzhiyun #include <linux/if_vlan.h>
74*4882a593Smuzhiyun #include <linux/ip.h>
75*4882a593Smuzhiyun #include <linux/tcp.h>
76*4882a593Smuzhiyun #include <linux/uaccess.h>
77*4882a593Smuzhiyun #include <linux/io.h>
78*4882a593Smuzhiyun #include <linux/io-64-nonatomic-lo-hi.h>
79*4882a593Smuzhiyun #include <linux/slab.h>
80*4882a593Smuzhiyun #include <linux/prefetch.h>
81*4882a593Smuzhiyun #include <net/tcp.h>
82*4882a593Smuzhiyun #include <net/checksum.h>
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun #include <asm/div64.h>
85*4882a593Smuzhiyun #include <asm/irq.h>
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun /* local include */
88*4882a593Smuzhiyun #include "s2io.h"
89*4882a593Smuzhiyun #include "s2io-regs.h"
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun #define DRV_VERSION "2.0.26.28"
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /* S2io Driver name & version. */
94*4882a593Smuzhiyun static const char s2io_driver_name[] = "Neterion";
95*4882a593Smuzhiyun static const char s2io_driver_version[] = DRV_VERSION;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun static const int rxd_size[2] = {32, 48};
98*4882a593Smuzhiyun static const int rxd_count[2] = {127, 85};
99*4882a593Smuzhiyun 
RXD_IS_UP2DT(struct RxD_t * rxdp)100*4882a593Smuzhiyun static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	int ret;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
105*4882a593Smuzhiyun 	       (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	return ret;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun /*
111*4882a593Smuzhiyun  * Cards with following subsystem_id have a link state indication
112*4882a593Smuzhiyun  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
113*4882a593Smuzhiyun  * macro below identifies these cards given the subsystem_id.
114*4882a593Smuzhiyun  */
115*4882a593Smuzhiyun #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid)		\
116*4882a593Smuzhiyun 	(dev_type == XFRAME_I_DEVICE) ?					\
117*4882a593Smuzhiyun 	((((subid >= 0x600B) && (subid <= 0x600D)) ||			\
118*4882a593Smuzhiyun 	  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
121*4882a593Smuzhiyun 				      ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
122*4882a593Smuzhiyun 
is_s2io_card_up(const struct s2io_nic * sp)123*4882a593Smuzhiyun static inline int is_s2io_card_up(const struct s2io_nic *sp)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun /* Ethtool related variables and Macros. */
129*4882a593Smuzhiyun static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
130*4882a593Smuzhiyun 	"Register test\t(offline)",
131*4882a593Smuzhiyun 	"Eeprom test\t(offline)",
132*4882a593Smuzhiyun 	"Link test\t(online)",
133*4882a593Smuzhiyun 	"RLDRAM test\t(offline)",
134*4882a593Smuzhiyun 	"BIST Test\t(offline)"
135*4882a593Smuzhiyun };
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
138*4882a593Smuzhiyun 	{"tmac_frms"},
139*4882a593Smuzhiyun 	{"tmac_data_octets"},
140*4882a593Smuzhiyun 	{"tmac_drop_frms"},
141*4882a593Smuzhiyun 	{"tmac_mcst_frms"},
142*4882a593Smuzhiyun 	{"tmac_bcst_frms"},
143*4882a593Smuzhiyun 	{"tmac_pause_ctrl_frms"},
144*4882a593Smuzhiyun 	{"tmac_ttl_octets"},
145*4882a593Smuzhiyun 	{"tmac_ucst_frms"},
146*4882a593Smuzhiyun 	{"tmac_nucst_frms"},
147*4882a593Smuzhiyun 	{"tmac_any_err_frms"},
148*4882a593Smuzhiyun 	{"tmac_ttl_less_fb_octets"},
149*4882a593Smuzhiyun 	{"tmac_vld_ip_octets"},
150*4882a593Smuzhiyun 	{"tmac_vld_ip"},
151*4882a593Smuzhiyun 	{"tmac_drop_ip"},
152*4882a593Smuzhiyun 	{"tmac_icmp"},
153*4882a593Smuzhiyun 	{"tmac_rst_tcp"},
154*4882a593Smuzhiyun 	{"tmac_tcp"},
155*4882a593Smuzhiyun 	{"tmac_udp"},
156*4882a593Smuzhiyun 	{"rmac_vld_frms"},
157*4882a593Smuzhiyun 	{"rmac_data_octets"},
158*4882a593Smuzhiyun 	{"rmac_fcs_err_frms"},
159*4882a593Smuzhiyun 	{"rmac_drop_frms"},
160*4882a593Smuzhiyun 	{"rmac_vld_mcst_frms"},
161*4882a593Smuzhiyun 	{"rmac_vld_bcst_frms"},
162*4882a593Smuzhiyun 	{"rmac_in_rng_len_err_frms"},
163*4882a593Smuzhiyun 	{"rmac_out_rng_len_err_frms"},
164*4882a593Smuzhiyun 	{"rmac_long_frms"},
165*4882a593Smuzhiyun 	{"rmac_pause_ctrl_frms"},
166*4882a593Smuzhiyun 	{"rmac_unsup_ctrl_frms"},
167*4882a593Smuzhiyun 	{"rmac_ttl_octets"},
168*4882a593Smuzhiyun 	{"rmac_accepted_ucst_frms"},
169*4882a593Smuzhiyun 	{"rmac_accepted_nucst_frms"},
170*4882a593Smuzhiyun 	{"rmac_discarded_frms"},
171*4882a593Smuzhiyun 	{"rmac_drop_events"},
172*4882a593Smuzhiyun 	{"rmac_ttl_less_fb_octets"},
173*4882a593Smuzhiyun 	{"rmac_ttl_frms"},
174*4882a593Smuzhiyun 	{"rmac_usized_frms"},
175*4882a593Smuzhiyun 	{"rmac_osized_frms"},
176*4882a593Smuzhiyun 	{"rmac_frag_frms"},
177*4882a593Smuzhiyun 	{"rmac_jabber_frms"},
178*4882a593Smuzhiyun 	{"rmac_ttl_64_frms"},
179*4882a593Smuzhiyun 	{"rmac_ttl_65_127_frms"},
180*4882a593Smuzhiyun 	{"rmac_ttl_128_255_frms"},
181*4882a593Smuzhiyun 	{"rmac_ttl_256_511_frms"},
182*4882a593Smuzhiyun 	{"rmac_ttl_512_1023_frms"},
183*4882a593Smuzhiyun 	{"rmac_ttl_1024_1518_frms"},
184*4882a593Smuzhiyun 	{"rmac_ip"},
185*4882a593Smuzhiyun 	{"rmac_ip_octets"},
186*4882a593Smuzhiyun 	{"rmac_hdr_err_ip"},
187*4882a593Smuzhiyun 	{"rmac_drop_ip"},
188*4882a593Smuzhiyun 	{"rmac_icmp"},
189*4882a593Smuzhiyun 	{"rmac_tcp"},
190*4882a593Smuzhiyun 	{"rmac_udp"},
191*4882a593Smuzhiyun 	{"rmac_err_drp_udp"},
192*4882a593Smuzhiyun 	{"rmac_xgmii_err_sym"},
193*4882a593Smuzhiyun 	{"rmac_frms_q0"},
194*4882a593Smuzhiyun 	{"rmac_frms_q1"},
195*4882a593Smuzhiyun 	{"rmac_frms_q2"},
196*4882a593Smuzhiyun 	{"rmac_frms_q3"},
197*4882a593Smuzhiyun 	{"rmac_frms_q4"},
198*4882a593Smuzhiyun 	{"rmac_frms_q5"},
199*4882a593Smuzhiyun 	{"rmac_frms_q6"},
200*4882a593Smuzhiyun 	{"rmac_frms_q7"},
201*4882a593Smuzhiyun 	{"rmac_full_q0"},
202*4882a593Smuzhiyun 	{"rmac_full_q1"},
203*4882a593Smuzhiyun 	{"rmac_full_q2"},
204*4882a593Smuzhiyun 	{"rmac_full_q3"},
205*4882a593Smuzhiyun 	{"rmac_full_q4"},
206*4882a593Smuzhiyun 	{"rmac_full_q5"},
207*4882a593Smuzhiyun 	{"rmac_full_q6"},
208*4882a593Smuzhiyun 	{"rmac_full_q7"},
209*4882a593Smuzhiyun 	{"rmac_pause_cnt"},
210*4882a593Smuzhiyun 	{"rmac_xgmii_data_err_cnt"},
211*4882a593Smuzhiyun 	{"rmac_xgmii_ctrl_err_cnt"},
212*4882a593Smuzhiyun 	{"rmac_accepted_ip"},
213*4882a593Smuzhiyun 	{"rmac_err_tcp"},
214*4882a593Smuzhiyun 	{"rd_req_cnt"},
215*4882a593Smuzhiyun 	{"new_rd_req_cnt"},
216*4882a593Smuzhiyun 	{"new_rd_req_rtry_cnt"},
217*4882a593Smuzhiyun 	{"rd_rtry_cnt"},
218*4882a593Smuzhiyun 	{"wr_rtry_rd_ack_cnt"},
219*4882a593Smuzhiyun 	{"wr_req_cnt"},
220*4882a593Smuzhiyun 	{"new_wr_req_cnt"},
221*4882a593Smuzhiyun 	{"new_wr_req_rtry_cnt"},
222*4882a593Smuzhiyun 	{"wr_rtry_cnt"},
223*4882a593Smuzhiyun 	{"wr_disc_cnt"},
224*4882a593Smuzhiyun 	{"rd_rtry_wr_ack_cnt"},
225*4882a593Smuzhiyun 	{"txp_wr_cnt"},
226*4882a593Smuzhiyun 	{"txd_rd_cnt"},
227*4882a593Smuzhiyun 	{"txd_wr_cnt"},
228*4882a593Smuzhiyun 	{"rxd_rd_cnt"},
229*4882a593Smuzhiyun 	{"rxd_wr_cnt"},
230*4882a593Smuzhiyun 	{"txf_rd_cnt"},
231*4882a593Smuzhiyun 	{"rxf_wr_cnt"}
232*4882a593Smuzhiyun };
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
235*4882a593Smuzhiyun 	{"rmac_ttl_1519_4095_frms"},
236*4882a593Smuzhiyun 	{"rmac_ttl_4096_8191_frms"},
237*4882a593Smuzhiyun 	{"rmac_ttl_8192_max_frms"},
238*4882a593Smuzhiyun 	{"rmac_ttl_gt_max_frms"},
239*4882a593Smuzhiyun 	{"rmac_osized_alt_frms"},
240*4882a593Smuzhiyun 	{"rmac_jabber_alt_frms"},
241*4882a593Smuzhiyun 	{"rmac_gt_max_alt_frms"},
242*4882a593Smuzhiyun 	{"rmac_vlan_frms"},
243*4882a593Smuzhiyun 	{"rmac_len_discard"},
244*4882a593Smuzhiyun 	{"rmac_fcs_discard"},
245*4882a593Smuzhiyun 	{"rmac_pf_discard"},
246*4882a593Smuzhiyun 	{"rmac_da_discard"},
247*4882a593Smuzhiyun 	{"rmac_red_discard"},
248*4882a593Smuzhiyun 	{"rmac_rts_discard"},
249*4882a593Smuzhiyun 	{"rmac_ingm_full_discard"},
250*4882a593Smuzhiyun 	{"link_fault_cnt"}
251*4882a593Smuzhiyun };
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
254*4882a593Smuzhiyun 	{"\n DRIVER STATISTICS"},
255*4882a593Smuzhiyun 	{"single_bit_ecc_errs"},
256*4882a593Smuzhiyun 	{"double_bit_ecc_errs"},
257*4882a593Smuzhiyun 	{"parity_err_cnt"},
258*4882a593Smuzhiyun 	{"serious_err_cnt"},
259*4882a593Smuzhiyun 	{"soft_reset_cnt"},
260*4882a593Smuzhiyun 	{"fifo_full_cnt"},
261*4882a593Smuzhiyun 	{"ring_0_full_cnt"},
262*4882a593Smuzhiyun 	{"ring_1_full_cnt"},
263*4882a593Smuzhiyun 	{"ring_2_full_cnt"},
264*4882a593Smuzhiyun 	{"ring_3_full_cnt"},
265*4882a593Smuzhiyun 	{"ring_4_full_cnt"},
266*4882a593Smuzhiyun 	{"ring_5_full_cnt"},
267*4882a593Smuzhiyun 	{"ring_6_full_cnt"},
268*4882a593Smuzhiyun 	{"ring_7_full_cnt"},
269*4882a593Smuzhiyun 	{"alarm_transceiver_temp_high"},
270*4882a593Smuzhiyun 	{"alarm_transceiver_temp_low"},
271*4882a593Smuzhiyun 	{"alarm_laser_bias_current_high"},
272*4882a593Smuzhiyun 	{"alarm_laser_bias_current_low"},
273*4882a593Smuzhiyun 	{"alarm_laser_output_power_high"},
274*4882a593Smuzhiyun 	{"alarm_laser_output_power_low"},
275*4882a593Smuzhiyun 	{"warn_transceiver_temp_high"},
276*4882a593Smuzhiyun 	{"warn_transceiver_temp_low"},
277*4882a593Smuzhiyun 	{"warn_laser_bias_current_high"},
278*4882a593Smuzhiyun 	{"warn_laser_bias_current_low"},
279*4882a593Smuzhiyun 	{"warn_laser_output_power_high"},
280*4882a593Smuzhiyun 	{"warn_laser_output_power_low"},
281*4882a593Smuzhiyun 	{"lro_aggregated_pkts"},
282*4882a593Smuzhiyun 	{"lro_flush_both_count"},
283*4882a593Smuzhiyun 	{"lro_out_of_sequence_pkts"},
284*4882a593Smuzhiyun 	{"lro_flush_due_to_max_pkts"},
285*4882a593Smuzhiyun 	{"lro_avg_aggr_pkts"},
286*4882a593Smuzhiyun 	{"mem_alloc_fail_cnt"},
287*4882a593Smuzhiyun 	{"pci_map_fail_cnt"},
288*4882a593Smuzhiyun 	{"watchdog_timer_cnt"},
289*4882a593Smuzhiyun 	{"mem_allocated"},
290*4882a593Smuzhiyun 	{"mem_freed"},
291*4882a593Smuzhiyun 	{"link_up_cnt"},
292*4882a593Smuzhiyun 	{"link_down_cnt"},
293*4882a593Smuzhiyun 	{"link_up_time"},
294*4882a593Smuzhiyun 	{"link_down_time"},
295*4882a593Smuzhiyun 	{"tx_tcode_buf_abort_cnt"},
296*4882a593Smuzhiyun 	{"tx_tcode_desc_abort_cnt"},
297*4882a593Smuzhiyun 	{"tx_tcode_parity_err_cnt"},
298*4882a593Smuzhiyun 	{"tx_tcode_link_loss_cnt"},
299*4882a593Smuzhiyun 	{"tx_tcode_list_proc_err_cnt"},
300*4882a593Smuzhiyun 	{"rx_tcode_parity_err_cnt"},
301*4882a593Smuzhiyun 	{"rx_tcode_abort_cnt"},
302*4882a593Smuzhiyun 	{"rx_tcode_parity_abort_cnt"},
303*4882a593Smuzhiyun 	{"rx_tcode_rda_fail_cnt"},
304*4882a593Smuzhiyun 	{"rx_tcode_unkn_prot_cnt"},
305*4882a593Smuzhiyun 	{"rx_tcode_fcs_err_cnt"},
306*4882a593Smuzhiyun 	{"rx_tcode_buf_size_err_cnt"},
307*4882a593Smuzhiyun 	{"rx_tcode_rxd_corrupt_cnt"},
308*4882a593Smuzhiyun 	{"rx_tcode_unkn_err_cnt"},
309*4882a593Smuzhiyun 	{"tda_err_cnt"},
310*4882a593Smuzhiyun 	{"pfc_err_cnt"},
311*4882a593Smuzhiyun 	{"pcc_err_cnt"},
312*4882a593Smuzhiyun 	{"tti_err_cnt"},
313*4882a593Smuzhiyun 	{"tpa_err_cnt"},
314*4882a593Smuzhiyun 	{"sm_err_cnt"},
315*4882a593Smuzhiyun 	{"lso_err_cnt"},
316*4882a593Smuzhiyun 	{"mac_tmac_err_cnt"},
317*4882a593Smuzhiyun 	{"mac_rmac_err_cnt"},
318*4882a593Smuzhiyun 	{"xgxs_txgxs_err_cnt"},
319*4882a593Smuzhiyun 	{"xgxs_rxgxs_err_cnt"},
320*4882a593Smuzhiyun 	{"rc_err_cnt"},
321*4882a593Smuzhiyun 	{"prc_pcix_err_cnt"},
322*4882a593Smuzhiyun 	{"rpa_err_cnt"},
323*4882a593Smuzhiyun 	{"rda_err_cnt"},
324*4882a593Smuzhiyun 	{"rti_err_cnt"},
325*4882a593Smuzhiyun 	{"mc_err_cnt"}
326*4882a593Smuzhiyun };
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun #define S2IO_XENA_STAT_LEN	ARRAY_SIZE(ethtool_xena_stats_keys)
329*4882a593Smuzhiyun #define S2IO_ENHANCED_STAT_LEN	ARRAY_SIZE(ethtool_enhanced_stats_keys)
330*4882a593Smuzhiyun #define S2IO_DRIVER_STAT_LEN	ARRAY_SIZE(ethtool_driver_stats_keys)
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
333*4882a593Smuzhiyun #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun #define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
336*4882a593Smuzhiyun #define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun #define S2IO_TEST_LEN	ARRAY_SIZE(s2io_gstrings)
339*4882a593Smuzhiyun #define S2IO_STRINGS_LEN	(S2IO_TEST_LEN * ETH_GSTRING_LEN)
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun /* copy mac addr to def_mac_addr array */
do_s2io_copy_mac_addr(struct s2io_nic * sp,int offset,u64 mac_addr)342*4882a593Smuzhiyun static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun 	sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
345*4882a593Smuzhiyun 	sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
346*4882a593Smuzhiyun 	sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
347*4882a593Smuzhiyun 	sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
348*4882a593Smuzhiyun 	sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
349*4882a593Smuzhiyun 	sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun /*
353*4882a593Smuzhiyun  * Constants to be programmed into the Xena's registers, to configure
354*4882a593Smuzhiyun  * the XAUI.
355*4882a593Smuzhiyun  */
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun #define	END_SIGN	0x0
358*4882a593Smuzhiyun static const u64 herc_act_dtx_cfg[] = {
359*4882a593Smuzhiyun 	/* Set address */
360*4882a593Smuzhiyun 	0x8000051536750000ULL, 0x80000515367500E0ULL,
361*4882a593Smuzhiyun 	/* Write data */
362*4882a593Smuzhiyun 	0x8000051536750004ULL, 0x80000515367500E4ULL,
363*4882a593Smuzhiyun 	/* Set address */
364*4882a593Smuzhiyun 	0x80010515003F0000ULL, 0x80010515003F00E0ULL,
365*4882a593Smuzhiyun 	/* Write data */
366*4882a593Smuzhiyun 	0x80010515003F0004ULL, 0x80010515003F00E4ULL,
367*4882a593Smuzhiyun 	/* Set address */
368*4882a593Smuzhiyun 	0x801205150D440000ULL, 0x801205150D4400E0ULL,
369*4882a593Smuzhiyun 	/* Write data */
370*4882a593Smuzhiyun 	0x801205150D440004ULL, 0x801205150D4400E4ULL,
371*4882a593Smuzhiyun 	/* Set address */
372*4882a593Smuzhiyun 	0x80020515F2100000ULL, 0x80020515F21000E0ULL,
373*4882a593Smuzhiyun 	/* Write data */
374*4882a593Smuzhiyun 	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
375*4882a593Smuzhiyun 	/* Done */
376*4882a593Smuzhiyun 	END_SIGN
377*4882a593Smuzhiyun };
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun static const u64 xena_dtx_cfg[] = {
380*4882a593Smuzhiyun 	/* Set address */
381*4882a593Smuzhiyun 	0x8000051500000000ULL, 0x80000515000000E0ULL,
382*4882a593Smuzhiyun 	/* Write data */
383*4882a593Smuzhiyun 	0x80000515D9350004ULL, 0x80000515D93500E4ULL,
384*4882a593Smuzhiyun 	/* Set address */
385*4882a593Smuzhiyun 	0x8001051500000000ULL, 0x80010515000000E0ULL,
386*4882a593Smuzhiyun 	/* Write data */
387*4882a593Smuzhiyun 	0x80010515001E0004ULL, 0x80010515001E00E4ULL,
388*4882a593Smuzhiyun 	/* Set address */
389*4882a593Smuzhiyun 	0x8002051500000000ULL, 0x80020515000000E0ULL,
390*4882a593Smuzhiyun 	/* Write data */
391*4882a593Smuzhiyun 	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
392*4882a593Smuzhiyun 	END_SIGN
393*4882a593Smuzhiyun };
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun /*
396*4882a593Smuzhiyun  * Constants for Fixing the MacAddress problem seen mostly on
397*4882a593Smuzhiyun  * Alpha machines.
398*4882a593Smuzhiyun  */
399*4882a593Smuzhiyun static const u64 fix_mac[] = {
400*4882a593Smuzhiyun 	0x0060000000000000ULL, 0x0060600000000000ULL,
401*4882a593Smuzhiyun 	0x0040600000000000ULL, 0x0000600000000000ULL,
402*4882a593Smuzhiyun 	0x0020600000000000ULL, 0x0060600000000000ULL,
403*4882a593Smuzhiyun 	0x0020600000000000ULL, 0x0060600000000000ULL,
404*4882a593Smuzhiyun 	0x0020600000000000ULL, 0x0060600000000000ULL,
405*4882a593Smuzhiyun 	0x0020600000000000ULL, 0x0060600000000000ULL,
406*4882a593Smuzhiyun 	0x0020600000000000ULL, 0x0060600000000000ULL,
407*4882a593Smuzhiyun 	0x0020600000000000ULL, 0x0060600000000000ULL,
408*4882a593Smuzhiyun 	0x0020600000000000ULL, 0x0060600000000000ULL,
409*4882a593Smuzhiyun 	0x0020600000000000ULL, 0x0060600000000000ULL,
410*4882a593Smuzhiyun 	0x0020600000000000ULL, 0x0060600000000000ULL,
411*4882a593Smuzhiyun 	0x0020600000000000ULL, 0x0060600000000000ULL,
412*4882a593Smuzhiyun 	0x0020600000000000ULL, 0x0000600000000000ULL,
413*4882a593Smuzhiyun 	0x0040600000000000ULL, 0x0060600000000000ULL,
414*4882a593Smuzhiyun 	END_SIGN
415*4882a593Smuzhiyun };
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun MODULE_LICENSE("GPL");
418*4882a593Smuzhiyun MODULE_VERSION(DRV_VERSION);
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun /* Module Loadable parameters. */
422*4882a593Smuzhiyun S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
423*4882a593Smuzhiyun S2IO_PARM_INT(rx_ring_num, 1);
424*4882a593Smuzhiyun S2IO_PARM_INT(multiq, 0);
425*4882a593Smuzhiyun S2IO_PARM_INT(rx_ring_mode, 1);
426*4882a593Smuzhiyun S2IO_PARM_INT(use_continuous_tx_intrs, 1);
427*4882a593Smuzhiyun S2IO_PARM_INT(rmac_pause_time, 0x100);
428*4882a593Smuzhiyun S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
429*4882a593Smuzhiyun S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
430*4882a593Smuzhiyun S2IO_PARM_INT(shared_splits, 0);
431*4882a593Smuzhiyun S2IO_PARM_INT(tmac_util_period, 5);
432*4882a593Smuzhiyun S2IO_PARM_INT(rmac_util_period, 5);
433*4882a593Smuzhiyun S2IO_PARM_INT(l3l4hdr_size, 128);
434*4882a593Smuzhiyun /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
435*4882a593Smuzhiyun S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
436*4882a593Smuzhiyun /* Frequency of Rx desc syncs expressed as power of 2 */
437*4882a593Smuzhiyun S2IO_PARM_INT(rxsync_frequency, 3);
438*4882a593Smuzhiyun /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
439*4882a593Smuzhiyun S2IO_PARM_INT(intr_type, 2);
440*4882a593Smuzhiyun /* Large receive offload feature */
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun /* Max pkts to be aggregated by LRO at one time. If not specified,
443*4882a593Smuzhiyun  * aggregation happens until we hit max IP pkt size(64K)
444*4882a593Smuzhiyun  */
445*4882a593Smuzhiyun S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
446*4882a593Smuzhiyun S2IO_PARM_INT(indicate_max_pkts, 0);
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun S2IO_PARM_INT(napi, 1);
449*4882a593Smuzhiyun S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
452*4882a593Smuzhiyun {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
453*4882a593Smuzhiyun static unsigned int rx_ring_sz[MAX_RX_RINGS] =
454*4882a593Smuzhiyun {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
455*4882a593Smuzhiyun static unsigned int rts_frm_len[MAX_RX_RINGS] =
456*4882a593Smuzhiyun {[0 ...(MAX_RX_RINGS - 1)] = 0 };
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun module_param_array(tx_fifo_len, uint, NULL, 0);
459*4882a593Smuzhiyun module_param_array(rx_ring_sz, uint, NULL, 0);
460*4882a593Smuzhiyun module_param_array(rts_frm_len, uint, NULL, 0);
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun /*
463*4882a593Smuzhiyun  * S2IO device table.
464*4882a593Smuzhiyun  * This table lists all the devices that this driver supports.
465*4882a593Smuzhiyun  */
466*4882a593Smuzhiyun static const struct pci_device_id s2io_tbl[] = {
467*4882a593Smuzhiyun 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
468*4882a593Smuzhiyun 	 PCI_ANY_ID, PCI_ANY_ID},
469*4882a593Smuzhiyun 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
470*4882a593Smuzhiyun 	 PCI_ANY_ID, PCI_ANY_ID},
471*4882a593Smuzhiyun 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
472*4882a593Smuzhiyun 	 PCI_ANY_ID, PCI_ANY_ID},
473*4882a593Smuzhiyun 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
474*4882a593Smuzhiyun 	 PCI_ANY_ID, PCI_ANY_ID},
475*4882a593Smuzhiyun 	{0,}
476*4882a593Smuzhiyun };
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, s2io_tbl);
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun static const struct pci_error_handlers s2io_err_handler = {
481*4882a593Smuzhiyun 	.error_detected = s2io_io_error_detected,
482*4882a593Smuzhiyun 	.slot_reset = s2io_io_slot_reset,
483*4882a593Smuzhiyun 	.resume = s2io_io_resume,
484*4882a593Smuzhiyun };
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun static struct pci_driver s2io_driver = {
487*4882a593Smuzhiyun 	.name = "S2IO",
488*4882a593Smuzhiyun 	.id_table = s2io_tbl,
489*4882a593Smuzhiyun 	.probe = s2io_init_nic,
490*4882a593Smuzhiyun 	.remove = s2io_rem_nic,
491*4882a593Smuzhiyun 	.err_handler = &s2io_err_handler,
492*4882a593Smuzhiyun };
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun /* A simplifier macro used both by init and free shared_mem Fns(). */
495*4882a593Smuzhiyun #define TXD_MEM_PAGE_CNT(len, per_each) DIV_ROUND_UP(len, per_each)
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun /* netqueue manipulation helper functions */
s2io_stop_all_tx_queue(struct s2io_nic * sp)498*4882a593Smuzhiyun static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun 	if (!sp->config.multiq) {
501*4882a593Smuzhiyun 		int i;
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 		for (i = 0; i < sp->config.tx_fifo_num; i++)
504*4882a593Smuzhiyun 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
505*4882a593Smuzhiyun 	}
506*4882a593Smuzhiyun 	netif_tx_stop_all_queues(sp->dev);
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun 
s2io_stop_tx_queue(struct s2io_nic * sp,int fifo_no)509*4882a593Smuzhiyun static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
510*4882a593Smuzhiyun {
511*4882a593Smuzhiyun 	if (!sp->config.multiq)
512*4882a593Smuzhiyun 		sp->mac_control.fifos[fifo_no].queue_state =
513*4882a593Smuzhiyun 			FIFO_QUEUE_STOP;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	netif_tx_stop_all_queues(sp->dev);
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun 
s2io_start_all_tx_queue(struct s2io_nic * sp)518*4882a593Smuzhiyun static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun 	if (!sp->config.multiq) {
521*4882a593Smuzhiyun 		int i;
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 		for (i = 0; i < sp->config.tx_fifo_num; i++)
524*4882a593Smuzhiyun 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
525*4882a593Smuzhiyun 	}
526*4882a593Smuzhiyun 	netif_tx_start_all_queues(sp->dev);
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun 
s2io_wake_all_tx_queue(struct s2io_nic * sp)529*4882a593Smuzhiyun static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun 	if (!sp->config.multiq) {
532*4882a593Smuzhiyun 		int i;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 		for (i = 0; i < sp->config.tx_fifo_num; i++)
535*4882a593Smuzhiyun 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
536*4882a593Smuzhiyun 	}
537*4882a593Smuzhiyun 	netif_tx_wake_all_queues(sp->dev);
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun 
s2io_wake_tx_queue(struct fifo_info * fifo,int cnt,u8 multiq)540*4882a593Smuzhiyun static inline void s2io_wake_tx_queue(
541*4882a593Smuzhiyun 	struct fifo_info *fifo, int cnt, u8 multiq)
542*4882a593Smuzhiyun {
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	if (multiq) {
545*4882a593Smuzhiyun 		if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
546*4882a593Smuzhiyun 			netif_wake_subqueue(fifo->dev, fifo->fifo_no);
547*4882a593Smuzhiyun 	} else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
548*4882a593Smuzhiyun 		if (netif_queue_stopped(fifo->dev)) {
549*4882a593Smuzhiyun 			fifo->queue_state = FIFO_QUEUE_START;
550*4882a593Smuzhiyun 			netif_wake_queue(fifo->dev);
551*4882a593Smuzhiyun 		}
552*4882a593Smuzhiyun 	}
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun /**
556*4882a593Smuzhiyun  * init_shared_mem - Allocation and Initialization of Memory
557*4882a593Smuzhiyun  * @nic: Device private variable.
558*4882a593Smuzhiyun  * Description: The function allocates all the memory areas shared
559*4882a593Smuzhiyun  * between the NIC and the driver. This includes Tx descriptors,
560*4882a593Smuzhiyun  * Rx descriptors and the statistics block.
561*4882a593Smuzhiyun  */
562*4882a593Smuzhiyun 
init_shared_mem(struct s2io_nic * nic)563*4882a593Smuzhiyun static int init_shared_mem(struct s2io_nic *nic)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun 	u32 size;
566*4882a593Smuzhiyun 	void *tmp_v_addr, *tmp_v_addr_next;
567*4882a593Smuzhiyun 	dma_addr_t tmp_p_addr, tmp_p_addr_next;
568*4882a593Smuzhiyun 	struct RxD_block *pre_rxd_blk = NULL;
569*4882a593Smuzhiyun 	int i, j, blk_cnt;
570*4882a593Smuzhiyun 	int lst_size, lst_per_page;
571*4882a593Smuzhiyun 	struct net_device *dev = nic->dev;
572*4882a593Smuzhiyun 	unsigned long tmp;
573*4882a593Smuzhiyun 	struct buffAdd *ba;
574*4882a593Smuzhiyun 	struct config_param *config = &nic->config;
575*4882a593Smuzhiyun 	struct mac_info *mac_control = &nic->mac_control;
576*4882a593Smuzhiyun 	unsigned long long mem_allocated = 0;
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	/* Allocation and initialization of TXDLs in FIFOs */
579*4882a593Smuzhiyun 	size = 0;
580*4882a593Smuzhiyun 	for (i = 0; i < config->tx_fifo_num; i++) {
581*4882a593Smuzhiyun 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 		size += tx_cfg->fifo_len;
584*4882a593Smuzhiyun 	}
585*4882a593Smuzhiyun 	if (size > MAX_AVAILABLE_TXDS) {
586*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG,
587*4882a593Smuzhiyun 			  "Too many TxDs requested: %d, max supported: %d\n",
588*4882a593Smuzhiyun 			  size, MAX_AVAILABLE_TXDS);
589*4882a593Smuzhiyun 		return -EINVAL;
590*4882a593Smuzhiyun 	}
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	size = 0;
593*4882a593Smuzhiyun 	for (i = 0; i < config->tx_fifo_num; i++) {
594*4882a593Smuzhiyun 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 		size = tx_cfg->fifo_len;
597*4882a593Smuzhiyun 		/*
598*4882a593Smuzhiyun 		 * Legal values are from 2 to 8192
599*4882a593Smuzhiyun 		 */
600*4882a593Smuzhiyun 		if (size < 2) {
601*4882a593Smuzhiyun 			DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
602*4882a593Smuzhiyun 				  "Valid lengths are 2 through 8192\n",
603*4882a593Smuzhiyun 				  i, size);
604*4882a593Smuzhiyun 			return -EINVAL;
605*4882a593Smuzhiyun 		}
606*4882a593Smuzhiyun 	}
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	lst_size = (sizeof(struct TxD) * config->max_txds);
609*4882a593Smuzhiyun 	lst_per_page = PAGE_SIZE / lst_size;
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	for (i = 0; i < config->tx_fifo_num; i++) {
612*4882a593Smuzhiyun 		struct fifo_info *fifo = &mac_control->fifos[i];
613*4882a593Smuzhiyun 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
614*4882a593Smuzhiyun 		int fifo_len = tx_cfg->fifo_len;
615*4882a593Smuzhiyun 		int list_holder_size = fifo_len * sizeof(struct list_info_hold);
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 		fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
618*4882a593Smuzhiyun 		if (!fifo->list_info) {
619*4882a593Smuzhiyun 			DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
620*4882a593Smuzhiyun 			return -ENOMEM;
621*4882a593Smuzhiyun 		}
622*4882a593Smuzhiyun 		mem_allocated += list_holder_size;
623*4882a593Smuzhiyun 	}
624*4882a593Smuzhiyun 	for (i = 0; i < config->tx_fifo_num; i++) {
625*4882a593Smuzhiyun 		int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
626*4882a593Smuzhiyun 						lst_per_page);
627*4882a593Smuzhiyun 		struct fifo_info *fifo = &mac_control->fifos[i];
628*4882a593Smuzhiyun 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 		fifo->tx_curr_put_info.offset = 0;
631*4882a593Smuzhiyun 		fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
632*4882a593Smuzhiyun 		fifo->tx_curr_get_info.offset = 0;
633*4882a593Smuzhiyun 		fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
634*4882a593Smuzhiyun 		fifo->fifo_no = i;
635*4882a593Smuzhiyun 		fifo->nic = nic;
636*4882a593Smuzhiyun 		fifo->max_txds = MAX_SKB_FRAGS + 2;
637*4882a593Smuzhiyun 		fifo->dev = dev;
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 		for (j = 0; j < page_num; j++) {
640*4882a593Smuzhiyun 			int k = 0;
641*4882a593Smuzhiyun 			dma_addr_t tmp_p;
642*4882a593Smuzhiyun 			void *tmp_v;
643*4882a593Smuzhiyun 			tmp_v = dma_alloc_coherent(&nic->pdev->dev, PAGE_SIZE,
644*4882a593Smuzhiyun 						   &tmp_p, GFP_KERNEL);
645*4882a593Smuzhiyun 			if (!tmp_v) {
646*4882a593Smuzhiyun 				DBG_PRINT(INFO_DBG,
647*4882a593Smuzhiyun 					  "dma_alloc_coherent failed for TxDL\n");
648*4882a593Smuzhiyun 				return -ENOMEM;
649*4882a593Smuzhiyun 			}
650*4882a593Smuzhiyun 			/* If we got a zero DMA address(can happen on
651*4882a593Smuzhiyun 			 * certain platforms like PPC), reallocate.
652*4882a593Smuzhiyun 			 * Store virtual address of page we don't want,
653*4882a593Smuzhiyun 			 * to be freed later.
654*4882a593Smuzhiyun 			 */
655*4882a593Smuzhiyun 			if (!tmp_p) {
656*4882a593Smuzhiyun 				mac_control->zerodma_virt_addr = tmp_v;
657*4882a593Smuzhiyun 				DBG_PRINT(INIT_DBG,
658*4882a593Smuzhiyun 					  "%s: Zero DMA address for TxDL. "
659*4882a593Smuzhiyun 					  "Virtual address %p\n",
660*4882a593Smuzhiyun 					  dev->name, tmp_v);
661*4882a593Smuzhiyun 				tmp_v = dma_alloc_coherent(&nic->pdev->dev,
662*4882a593Smuzhiyun 							   PAGE_SIZE, &tmp_p,
663*4882a593Smuzhiyun 							   GFP_KERNEL);
664*4882a593Smuzhiyun 				if (!tmp_v) {
665*4882a593Smuzhiyun 					DBG_PRINT(INFO_DBG,
666*4882a593Smuzhiyun 						  "dma_alloc_coherent failed for TxDL\n");
667*4882a593Smuzhiyun 					return -ENOMEM;
668*4882a593Smuzhiyun 				}
669*4882a593Smuzhiyun 				mem_allocated += PAGE_SIZE;
670*4882a593Smuzhiyun 			}
671*4882a593Smuzhiyun 			while (k < lst_per_page) {
672*4882a593Smuzhiyun 				int l = (j * lst_per_page) + k;
673*4882a593Smuzhiyun 				if (l == tx_cfg->fifo_len)
674*4882a593Smuzhiyun 					break;
675*4882a593Smuzhiyun 				fifo->list_info[l].list_virt_addr =
676*4882a593Smuzhiyun 					tmp_v + (k * lst_size);
677*4882a593Smuzhiyun 				fifo->list_info[l].list_phy_addr =
678*4882a593Smuzhiyun 					tmp_p + (k * lst_size);
679*4882a593Smuzhiyun 				k++;
680*4882a593Smuzhiyun 			}
681*4882a593Smuzhiyun 		}
682*4882a593Smuzhiyun 	}
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	for (i = 0; i < config->tx_fifo_num; i++) {
685*4882a593Smuzhiyun 		struct fifo_info *fifo = &mac_control->fifos[i];
686*4882a593Smuzhiyun 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 		size = tx_cfg->fifo_len;
689*4882a593Smuzhiyun 		fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
690*4882a593Smuzhiyun 		if (!fifo->ufo_in_band_v)
691*4882a593Smuzhiyun 			return -ENOMEM;
692*4882a593Smuzhiyun 		mem_allocated += (size * sizeof(u64));
693*4882a593Smuzhiyun 	}
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	/* Allocation and initialization of RXDs in Rings */
696*4882a593Smuzhiyun 	size = 0;
697*4882a593Smuzhiyun 	for (i = 0; i < config->rx_ring_num; i++) {
698*4882a593Smuzhiyun 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
699*4882a593Smuzhiyun 		struct ring_info *ring = &mac_control->rings[i];
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 		if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
702*4882a593Smuzhiyun 			DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
703*4882a593Smuzhiyun 				  "multiple of RxDs per Block\n",
704*4882a593Smuzhiyun 				  dev->name, i);
705*4882a593Smuzhiyun 			return FAILURE;
706*4882a593Smuzhiyun 		}
707*4882a593Smuzhiyun 		size += rx_cfg->num_rxd;
708*4882a593Smuzhiyun 		ring->block_count = rx_cfg->num_rxd /
709*4882a593Smuzhiyun 			(rxd_count[nic->rxd_mode] + 1);
710*4882a593Smuzhiyun 		ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
711*4882a593Smuzhiyun 	}
712*4882a593Smuzhiyun 	if (nic->rxd_mode == RXD_MODE_1)
713*4882a593Smuzhiyun 		size = (size * (sizeof(struct RxD1)));
714*4882a593Smuzhiyun 	else
715*4882a593Smuzhiyun 		size = (size * (sizeof(struct RxD3)));
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	for (i = 0; i < config->rx_ring_num; i++) {
718*4882a593Smuzhiyun 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
719*4882a593Smuzhiyun 		struct ring_info *ring = &mac_control->rings[i];
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 		ring->rx_curr_get_info.block_index = 0;
722*4882a593Smuzhiyun 		ring->rx_curr_get_info.offset = 0;
723*4882a593Smuzhiyun 		ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
724*4882a593Smuzhiyun 		ring->rx_curr_put_info.block_index = 0;
725*4882a593Smuzhiyun 		ring->rx_curr_put_info.offset = 0;
726*4882a593Smuzhiyun 		ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
727*4882a593Smuzhiyun 		ring->nic = nic;
728*4882a593Smuzhiyun 		ring->ring_no = i;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 		blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
731*4882a593Smuzhiyun 		/*  Allocating all the Rx blocks */
732*4882a593Smuzhiyun 		for (j = 0; j < blk_cnt; j++) {
733*4882a593Smuzhiyun 			struct rx_block_info *rx_blocks;
734*4882a593Smuzhiyun 			int l;
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 			rx_blocks = &ring->rx_blocks[j];
737*4882a593Smuzhiyun 			size = SIZE_OF_BLOCK;	/* size is always page size */
738*4882a593Smuzhiyun 			tmp_v_addr = dma_alloc_coherent(&nic->pdev->dev, size,
739*4882a593Smuzhiyun 							&tmp_p_addr, GFP_KERNEL);
740*4882a593Smuzhiyun 			if (tmp_v_addr == NULL) {
741*4882a593Smuzhiyun 				/*
742*4882a593Smuzhiyun 				 * In case of failure, free_shared_mem()
743*4882a593Smuzhiyun 				 * is called, which should free any
744*4882a593Smuzhiyun 				 * memory that was alloced till the
745*4882a593Smuzhiyun 				 * failure happened.
746*4882a593Smuzhiyun 				 */
747*4882a593Smuzhiyun 				rx_blocks->block_virt_addr = tmp_v_addr;
748*4882a593Smuzhiyun 				return -ENOMEM;
749*4882a593Smuzhiyun 			}
750*4882a593Smuzhiyun 			mem_allocated += size;
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 			size = sizeof(struct rxd_info) *
753*4882a593Smuzhiyun 				rxd_count[nic->rxd_mode];
754*4882a593Smuzhiyun 			rx_blocks->block_virt_addr = tmp_v_addr;
755*4882a593Smuzhiyun 			rx_blocks->block_dma_addr = tmp_p_addr;
756*4882a593Smuzhiyun 			rx_blocks->rxds = kmalloc(size,  GFP_KERNEL);
757*4882a593Smuzhiyun 			if (!rx_blocks->rxds)
758*4882a593Smuzhiyun 				return -ENOMEM;
759*4882a593Smuzhiyun 			mem_allocated += size;
760*4882a593Smuzhiyun 			for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
761*4882a593Smuzhiyun 				rx_blocks->rxds[l].virt_addr =
762*4882a593Smuzhiyun 					rx_blocks->block_virt_addr +
763*4882a593Smuzhiyun 					(rxd_size[nic->rxd_mode] * l);
764*4882a593Smuzhiyun 				rx_blocks->rxds[l].dma_addr =
765*4882a593Smuzhiyun 					rx_blocks->block_dma_addr +
766*4882a593Smuzhiyun 					(rxd_size[nic->rxd_mode] * l);
767*4882a593Smuzhiyun 			}
768*4882a593Smuzhiyun 		}
769*4882a593Smuzhiyun 		/* Interlinking all Rx Blocks */
770*4882a593Smuzhiyun 		for (j = 0; j < blk_cnt; j++) {
771*4882a593Smuzhiyun 			int next = (j + 1) % blk_cnt;
772*4882a593Smuzhiyun 			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
773*4882a593Smuzhiyun 			tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
774*4882a593Smuzhiyun 			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
775*4882a593Smuzhiyun 			tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 			pre_rxd_blk = tmp_v_addr;
778*4882a593Smuzhiyun 			pre_rxd_blk->reserved_2_pNext_RxD_block =
779*4882a593Smuzhiyun 				(unsigned long)tmp_v_addr_next;
780*4882a593Smuzhiyun 			pre_rxd_blk->pNext_RxD_Blk_physical =
781*4882a593Smuzhiyun 				(u64)tmp_p_addr_next;
782*4882a593Smuzhiyun 		}
783*4882a593Smuzhiyun 	}
784*4882a593Smuzhiyun 	if (nic->rxd_mode == RXD_MODE_3B) {
785*4882a593Smuzhiyun 		/*
786*4882a593Smuzhiyun 		 * Allocation of Storages for buffer addresses in 2BUFF mode
787*4882a593Smuzhiyun 		 * and the buffers as well.
788*4882a593Smuzhiyun 		 */
789*4882a593Smuzhiyun 		for (i = 0; i < config->rx_ring_num; i++) {
790*4882a593Smuzhiyun 			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
791*4882a593Smuzhiyun 			struct ring_info *ring = &mac_control->rings[i];
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 			blk_cnt = rx_cfg->num_rxd /
794*4882a593Smuzhiyun 				(rxd_count[nic->rxd_mode] + 1);
795*4882a593Smuzhiyun 			size = sizeof(struct buffAdd *) * blk_cnt;
796*4882a593Smuzhiyun 			ring->ba = kmalloc(size, GFP_KERNEL);
797*4882a593Smuzhiyun 			if (!ring->ba)
798*4882a593Smuzhiyun 				return -ENOMEM;
799*4882a593Smuzhiyun 			mem_allocated += size;
800*4882a593Smuzhiyun 			for (j = 0; j < blk_cnt; j++) {
801*4882a593Smuzhiyun 				int k = 0;
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 				size = sizeof(struct buffAdd) *
804*4882a593Smuzhiyun 					(rxd_count[nic->rxd_mode] + 1);
805*4882a593Smuzhiyun 				ring->ba[j] = kmalloc(size, GFP_KERNEL);
806*4882a593Smuzhiyun 				if (!ring->ba[j])
807*4882a593Smuzhiyun 					return -ENOMEM;
808*4882a593Smuzhiyun 				mem_allocated += size;
809*4882a593Smuzhiyun 				while (k != rxd_count[nic->rxd_mode]) {
810*4882a593Smuzhiyun 					ba = &ring->ba[j][k];
811*4882a593Smuzhiyun 					size = BUF0_LEN + ALIGN_SIZE;
812*4882a593Smuzhiyun 					ba->ba_0_org = kmalloc(size, GFP_KERNEL);
813*4882a593Smuzhiyun 					if (!ba->ba_0_org)
814*4882a593Smuzhiyun 						return -ENOMEM;
815*4882a593Smuzhiyun 					mem_allocated += size;
816*4882a593Smuzhiyun 					tmp = (unsigned long)ba->ba_0_org;
817*4882a593Smuzhiyun 					tmp += ALIGN_SIZE;
818*4882a593Smuzhiyun 					tmp &= ~((unsigned long)ALIGN_SIZE);
819*4882a593Smuzhiyun 					ba->ba_0 = (void *)tmp;
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 					size = BUF1_LEN + ALIGN_SIZE;
822*4882a593Smuzhiyun 					ba->ba_1_org = kmalloc(size, GFP_KERNEL);
823*4882a593Smuzhiyun 					if (!ba->ba_1_org)
824*4882a593Smuzhiyun 						return -ENOMEM;
825*4882a593Smuzhiyun 					mem_allocated += size;
826*4882a593Smuzhiyun 					tmp = (unsigned long)ba->ba_1_org;
827*4882a593Smuzhiyun 					tmp += ALIGN_SIZE;
828*4882a593Smuzhiyun 					tmp &= ~((unsigned long)ALIGN_SIZE);
829*4882a593Smuzhiyun 					ba->ba_1 = (void *)tmp;
830*4882a593Smuzhiyun 					k++;
831*4882a593Smuzhiyun 				}
832*4882a593Smuzhiyun 			}
833*4882a593Smuzhiyun 		}
834*4882a593Smuzhiyun 	}
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 	/* Allocation and initialization of Statistics block */
837*4882a593Smuzhiyun 	size = sizeof(struct stat_block);
838*4882a593Smuzhiyun 	mac_control->stats_mem =
839*4882a593Smuzhiyun 		dma_alloc_coherent(&nic->pdev->dev, size,
840*4882a593Smuzhiyun 				   &mac_control->stats_mem_phy, GFP_KERNEL);
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	if (!mac_control->stats_mem) {
843*4882a593Smuzhiyun 		/*
844*4882a593Smuzhiyun 		 * In case of failure, free_shared_mem() is called, which
845*4882a593Smuzhiyun 		 * should free any memory that was alloced till the
846*4882a593Smuzhiyun 		 * failure happened.
847*4882a593Smuzhiyun 		 */
848*4882a593Smuzhiyun 		return -ENOMEM;
849*4882a593Smuzhiyun 	}
850*4882a593Smuzhiyun 	mem_allocated += size;
851*4882a593Smuzhiyun 	mac_control->stats_mem_sz = size;
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 	tmp_v_addr = mac_control->stats_mem;
854*4882a593Smuzhiyun 	mac_control->stats_info = tmp_v_addr;
855*4882a593Smuzhiyun 	memset(tmp_v_addr, 0, size);
856*4882a593Smuzhiyun 	DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
857*4882a593Smuzhiyun 		dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
858*4882a593Smuzhiyun 	mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
859*4882a593Smuzhiyun 	return SUCCESS;
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun /**
863*4882a593Smuzhiyun  * free_shared_mem - Free the allocated Memory
864*4882a593Smuzhiyun  * @nic:  Device private variable.
865*4882a593Smuzhiyun  * Description: This function is to free all memory locations allocated by
866*4882a593Smuzhiyun  * the init_shared_mem() function and return it to the kernel.
867*4882a593Smuzhiyun  */
868*4882a593Smuzhiyun 
free_shared_mem(struct s2io_nic * nic)869*4882a593Smuzhiyun static void free_shared_mem(struct s2io_nic *nic)
870*4882a593Smuzhiyun {
871*4882a593Smuzhiyun 	int i, j, blk_cnt, size;
872*4882a593Smuzhiyun 	void *tmp_v_addr;
873*4882a593Smuzhiyun 	dma_addr_t tmp_p_addr;
874*4882a593Smuzhiyun 	int lst_size, lst_per_page;
875*4882a593Smuzhiyun 	struct net_device *dev;
876*4882a593Smuzhiyun 	int page_num = 0;
877*4882a593Smuzhiyun 	struct config_param *config;
878*4882a593Smuzhiyun 	struct mac_info *mac_control;
879*4882a593Smuzhiyun 	struct stat_block *stats;
880*4882a593Smuzhiyun 	struct swStat *swstats;
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 	if (!nic)
883*4882a593Smuzhiyun 		return;
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	dev = nic->dev;
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 	config = &nic->config;
888*4882a593Smuzhiyun 	mac_control = &nic->mac_control;
889*4882a593Smuzhiyun 	stats = mac_control->stats_info;
890*4882a593Smuzhiyun 	swstats = &stats->sw_stat;
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	lst_size = sizeof(struct TxD) * config->max_txds;
893*4882a593Smuzhiyun 	lst_per_page = PAGE_SIZE / lst_size;
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	for (i = 0; i < config->tx_fifo_num; i++) {
896*4882a593Smuzhiyun 		struct fifo_info *fifo = &mac_control->fifos[i];
897*4882a593Smuzhiyun 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 		page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
900*4882a593Smuzhiyun 		for (j = 0; j < page_num; j++) {
901*4882a593Smuzhiyun 			int mem_blks = (j * lst_per_page);
902*4882a593Smuzhiyun 			struct list_info_hold *fli;
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 			if (!fifo->list_info)
905*4882a593Smuzhiyun 				return;
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun 			fli = &fifo->list_info[mem_blks];
908*4882a593Smuzhiyun 			if (!fli->list_virt_addr)
909*4882a593Smuzhiyun 				break;
910*4882a593Smuzhiyun 			dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
911*4882a593Smuzhiyun 					  fli->list_virt_addr,
912*4882a593Smuzhiyun 					  fli->list_phy_addr);
913*4882a593Smuzhiyun 			swstats->mem_freed += PAGE_SIZE;
914*4882a593Smuzhiyun 		}
915*4882a593Smuzhiyun 		/* If we got a zero DMA address during allocation,
916*4882a593Smuzhiyun 		 * free the page now
917*4882a593Smuzhiyun 		 */
918*4882a593Smuzhiyun 		if (mac_control->zerodma_virt_addr) {
919*4882a593Smuzhiyun 			dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
920*4882a593Smuzhiyun 					  mac_control->zerodma_virt_addr,
921*4882a593Smuzhiyun 					  (dma_addr_t)0);
922*4882a593Smuzhiyun 			DBG_PRINT(INIT_DBG,
923*4882a593Smuzhiyun 				  "%s: Freeing TxDL with zero DMA address. "
924*4882a593Smuzhiyun 				  "Virtual address %p\n",
925*4882a593Smuzhiyun 				  dev->name, mac_control->zerodma_virt_addr);
926*4882a593Smuzhiyun 			swstats->mem_freed += PAGE_SIZE;
927*4882a593Smuzhiyun 		}
928*4882a593Smuzhiyun 		kfree(fifo->list_info);
929*4882a593Smuzhiyun 		swstats->mem_freed += tx_cfg->fifo_len *
930*4882a593Smuzhiyun 			sizeof(struct list_info_hold);
931*4882a593Smuzhiyun 	}
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun 	size = SIZE_OF_BLOCK;
934*4882a593Smuzhiyun 	for (i = 0; i < config->rx_ring_num; i++) {
935*4882a593Smuzhiyun 		struct ring_info *ring = &mac_control->rings[i];
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun 		blk_cnt = ring->block_count;
938*4882a593Smuzhiyun 		for (j = 0; j < blk_cnt; j++) {
939*4882a593Smuzhiyun 			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
940*4882a593Smuzhiyun 			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
941*4882a593Smuzhiyun 			if (tmp_v_addr == NULL)
942*4882a593Smuzhiyun 				break;
943*4882a593Smuzhiyun 			dma_free_coherent(&nic->pdev->dev, size, tmp_v_addr,
944*4882a593Smuzhiyun 					  tmp_p_addr);
945*4882a593Smuzhiyun 			swstats->mem_freed += size;
946*4882a593Smuzhiyun 			kfree(ring->rx_blocks[j].rxds);
947*4882a593Smuzhiyun 			swstats->mem_freed += sizeof(struct rxd_info) *
948*4882a593Smuzhiyun 				rxd_count[nic->rxd_mode];
949*4882a593Smuzhiyun 		}
950*4882a593Smuzhiyun 	}
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	if (nic->rxd_mode == RXD_MODE_3B) {
953*4882a593Smuzhiyun 		/* Freeing buffer storage addresses in 2BUFF mode. */
954*4882a593Smuzhiyun 		for (i = 0; i < config->rx_ring_num; i++) {
955*4882a593Smuzhiyun 			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
956*4882a593Smuzhiyun 			struct ring_info *ring = &mac_control->rings[i];
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 			blk_cnt = rx_cfg->num_rxd /
959*4882a593Smuzhiyun 				(rxd_count[nic->rxd_mode] + 1);
960*4882a593Smuzhiyun 			for (j = 0; j < blk_cnt; j++) {
961*4882a593Smuzhiyun 				int k = 0;
962*4882a593Smuzhiyun 				if (!ring->ba[j])
963*4882a593Smuzhiyun 					continue;
964*4882a593Smuzhiyun 				while (k != rxd_count[nic->rxd_mode]) {
965*4882a593Smuzhiyun 					struct buffAdd *ba = &ring->ba[j][k];
966*4882a593Smuzhiyun 					kfree(ba->ba_0_org);
967*4882a593Smuzhiyun 					swstats->mem_freed +=
968*4882a593Smuzhiyun 						BUF0_LEN + ALIGN_SIZE;
969*4882a593Smuzhiyun 					kfree(ba->ba_1_org);
970*4882a593Smuzhiyun 					swstats->mem_freed +=
971*4882a593Smuzhiyun 						BUF1_LEN + ALIGN_SIZE;
972*4882a593Smuzhiyun 					k++;
973*4882a593Smuzhiyun 				}
974*4882a593Smuzhiyun 				kfree(ring->ba[j]);
975*4882a593Smuzhiyun 				swstats->mem_freed += sizeof(struct buffAdd) *
976*4882a593Smuzhiyun 					(rxd_count[nic->rxd_mode] + 1);
977*4882a593Smuzhiyun 			}
978*4882a593Smuzhiyun 			kfree(ring->ba);
979*4882a593Smuzhiyun 			swstats->mem_freed += sizeof(struct buffAdd *) *
980*4882a593Smuzhiyun 				blk_cnt;
981*4882a593Smuzhiyun 		}
982*4882a593Smuzhiyun 	}
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun 	for (i = 0; i < nic->config.tx_fifo_num; i++) {
985*4882a593Smuzhiyun 		struct fifo_info *fifo = &mac_control->fifos[i];
986*4882a593Smuzhiyun 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 		if (fifo->ufo_in_band_v) {
989*4882a593Smuzhiyun 			swstats->mem_freed += tx_cfg->fifo_len *
990*4882a593Smuzhiyun 				sizeof(u64);
991*4882a593Smuzhiyun 			kfree(fifo->ufo_in_band_v);
992*4882a593Smuzhiyun 		}
993*4882a593Smuzhiyun 	}
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	if (mac_control->stats_mem) {
996*4882a593Smuzhiyun 		swstats->mem_freed += mac_control->stats_mem_sz;
997*4882a593Smuzhiyun 		dma_free_coherent(&nic->pdev->dev, mac_control->stats_mem_sz,
998*4882a593Smuzhiyun 				  mac_control->stats_mem,
999*4882a593Smuzhiyun 				  mac_control->stats_mem_phy);
1000*4882a593Smuzhiyun 	}
1001*4882a593Smuzhiyun }
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun /*
1004*4882a593Smuzhiyun  * s2io_verify_pci_mode -
1005*4882a593Smuzhiyun  */
1006*4882a593Smuzhiyun 
s2io_verify_pci_mode(struct s2io_nic * nic)1007*4882a593Smuzhiyun static int s2io_verify_pci_mode(struct s2io_nic *nic)
1008*4882a593Smuzhiyun {
1009*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1010*4882a593Smuzhiyun 	register u64 val64 = 0;
1011*4882a593Smuzhiyun 	int     mode;
1012*4882a593Smuzhiyun 
1013*4882a593Smuzhiyun 	val64 = readq(&bar0->pci_mode);
1014*4882a593Smuzhiyun 	mode = (u8)GET_PCI_MODE(val64);
1015*4882a593Smuzhiyun 
1016*4882a593Smuzhiyun 	if (val64 & PCI_MODE_UNKNOWN_MODE)
1017*4882a593Smuzhiyun 		return -1;      /* Unknown PCI mode */
1018*4882a593Smuzhiyun 	return mode;
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun #define NEC_VENID   0x1033
1022*4882a593Smuzhiyun #define NEC_DEVID   0x0125
s2io_on_nec_bridge(struct pci_dev * s2io_pdev)1023*4882a593Smuzhiyun static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1024*4882a593Smuzhiyun {
1025*4882a593Smuzhiyun 	struct pci_dev *tdev = NULL;
1026*4882a593Smuzhiyun 	for_each_pci_dev(tdev) {
1027*4882a593Smuzhiyun 		if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1028*4882a593Smuzhiyun 			if (tdev->bus == s2io_pdev->bus->parent) {
1029*4882a593Smuzhiyun 				pci_dev_put(tdev);
1030*4882a593Smuzhiyun 				return 1;
1031*4882a593Smuzhiyun 			}
1032*4882a593Smuzhiyun 		}
1033*4882a593Smuzhiyun 	}
1034*4882a593Smuzhiyun 	return 0;
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1038*4882a593Smuzhiyun /*
1039*4882a593Smuzhiyun  * s2io_print_pci_mode -
1040*4882a593Smuzhiyun  */
s2io_print_pci_mode(struct s2io_nic * nic)1041*4882a593Smuzhiyun static int s2io_print_pci_mode(struct s2io_nic *nic)
1042*4882a593Smuzhiyun {
1043*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1044*4882a593Smuzhiyun 	register u64 val64 = 0;
1045*4882a593Smuzhiyun 	int	mode;
1046*4882a593Smuzhiyun 	struct config_param *config = &nic->config;
1047*4882a593Smuzhiyun 	const char *pcimode;
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 	val64 = readq(&bar0->pci_mode);
1050*4882a593Smuzhiyun 	mode = (u8)GET_PCI_MODE(val64);
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun 	if (val64 & PCI_MODE_UNKNOWN_MODE)
1053*4882a593Smuzhiyun 		return -1;	/* Unknown PCI mode */
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 	config->bus_speed = bus_speed[mode];
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 	if (s2io_on_nec_bridge(nic->pdev)) {
1058*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1059*4882a593Smuzhiyun 			  nic->dev->name);
1060*4882a593Smuzhiyun 		return mode;
1061*4882a593Smuzhiyun 	}
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	switch (mode) {
1064*4882a593Smuzhiyun 	case PCI_MODE_PCI_33:
1065*4882a593Smuzhiyun 		pcimode = "33MHz PCI bus";
1066*4882a593Smuzhiyun 		break;
1067*4882a593Smuzhiyun 	case PCI_MODE_PCI_66:
1068*4882a593Smuzhiyun 		pcimode = "66MHz PCI bus";
1069*4882a593Smuzhiyun 		break;
1070*4882a593Smuzhiyun 	case PCI_MODE_PCIX_M1_66:
1071*4882a593Smuzhiyun 		pcimode = "66MHz PCIX(M1) bus";
1072*4882a593Smuzhiyun 		break;
1073*4882a593Smuzhiyun 	case PCI_MODE_PCIX_M1_100:
1074*4882a593Smuzhiyun 		pcimode = "100MHz PCIX(M1) bus";
1075*4882a593Smuzhiyun 		break;
1076*4882a593Smuzhiyun 	case PCI_MODE_PCIX_M1_133:
1077*4882a593Smuzhiyun 		pcimode = "133MHz PCIX(M1) bus";
1078*4882a593Smuzhiyun 		break;
1079*4882a593Smuzhiyun 	case PCI_MODE_PCIX_M2_66:
1080*4882a593Smuzhiyun 		pcimode = "133MHz PCIX(M2) bus";
1081*4882a593Smuzhiyun 		break;
1082*4882a593Smuzhiyun 	case PCI_MODE_PCIX_M2_100:
1083*4882a593Smuzhiyun 		pcimode = "200MHz PCIX(M2) bus";
1084*4882a593Smuzhiyun 		break;
1085*4882a593Smuzhiyun 	case PCI_MODE_PCIX_M2_133:
1086*4882a593Smuzhiyun 		pcimode = "266MHz PCIX(M2) bus";
1087*4882a593Smuzhiyun 		break;
1088*4882a593Smuzhiyun 	default:
1089*4882a593Smuzhiyun 		pcimode = "unsupported bus!";
1090*4882a593Smuzhiyun 		mode = -1;
1091*4882a593Smuzhiyun 	}
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 	DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1094*4882a593Smuzhiyun 		  nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun 	return mode;
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun /**
1100*4882a593Smuzhiyun  *  init_tti - Initialization transmit traffic interrupt scheme
1101*4882a593Smuzhiyun  *  @nic: device private variable
1102*4882a593Smuzhiyun  *  @link: link status (UP/DOWN) used to enable/disable continuous
1103*4882a593Smuzhiyun  *  transmit interrupts
1104*4882a593Smuzhiyun  *  Description: The function configures transmit traffic interrupts
1105*4882a593Smuzhiyun  *  Return Value:  SUCCESS on success and
1106*4882a593Smuzhiyun  *  '-1' on failure
1107*4882a593Smuzhiyun  */
1108*4882a593Smuzhiyun 
init_tti(struct s2io_nic * nic,int link)1109*4882a593Smuzhiyun static int init_tti(struct s2io_nic *nic, int link)
1110*4882a593Smuzhiyun {
1111*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1112*4882a593Smuzhiyun 	register u64 val64 = 0;
1113*4882a593Smuzhiyun 	int i;
1114*4882a593Smuzhiyun 	struct config_param *config = &nic->config;
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 	for (i = 0; i < config->tx_fifo_num; i++) {
1117*4882a593Smuzhiyun 		/*
1118*4882a593Smuzhiyun 		 * TTI Initialization. Default Tx timer gets us about
1119*4882a593Smuzhiyun 		 * 250 interrupts per sec. Continuous interrupts are enabled
1120*4882a593Smuzhiyun 		 * by default.
1121*4882a593Smuzhiyun 		 */
1122*4882a593Smuzhiyun 		if (nic->device_type == XFRAME_II_DEVICE) {
1123*4882a593Smuzhiyun 			int count = (nic->config.bus_speed * 125)/2;
1124*4882a593Smuzhiyun 			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1125*4882a593Smuzhiyun 		} else
1126*4882a593Smuzhiyun 			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun 		val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1129*4882a593Smuzhiyun 			TTI_DATA1_MEM_TX_URNG_B(0x10) |
1130*4882a593Smuzhiyun 			TTI_DATA1_MEM_TX_URNG_C(0x30) |
1131*4882a593Smuzhiyun 			TTI_DATA1_MEM_TX_TIMER_AC_EN;
1132*4882a593Smuzhiyun 		if (i == 0)
1133*4882a593Smuzhiyun 			if (use_continuous_tx_intrs && (link == LINK_UP))
1134*4882a593Smuzhiyun 				val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1135*4882a593Smuzhiyun 		writeq(val64, &bar0->tti_data1_mem);
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun 		if (nic->config.intr_type == MSI_X) {
1138*4882a593Smuzhiyun 			val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1139*4882a593Smuzhiyun 				TTI_DATA2_MEM_TX_UFC_B(0x100) |
1140*4882a593Smuzhiyun 				TTI_DATA2_MEM_TX_UFC_C(0x200) |
1141*4882a593Smuzhiyun 				TTI_DATA2_MEM_TX_UFC_D(0x300);
1142*4882a593Smuzhiyun 		} else {
1143*4882a593Smuzhiyun 			if ((nic->config.tx_steering_type ==
1144*4882a593Smuzhiyun 			     TX_DEFAULT_STEERING) &&
1145*4882a593Smuzhiyun 			    (config->tx_fifo_num > 1) &&
1146*4882a593Smuzhiyun 			    (i >= nic->udp_fifo_idx) &&
1147*4882a593Smuzhiyun 			    (i < (nic->udp_fifo_idx +
1148*4882a593Smuzhiyun 				  nic->total_udp_fifos)))
1149*4882a593Smuzhiyun 				val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1150*4882a593Smuzhiyun 					TTI_DATA2_MEM_TX_UFC_B(0x80) |
1151*4882a593Smuzhiyun 					TTI_DATA2_MEM_TX_UFC_C(0x100) |
1152*4882a593Smuzhiyun 					TTI_DATA2_MEM_TX_UFC_D(0x120);
1153*4882a593Smuzhiyun 			else
1154*4882a593Smuzhiyun 				val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1155*4882a593Smuzhiyun 					TTI_DATA2_MEM_TX_UFC_B(0x20) |
1156*4882a593Smuzhiyun 					TTI_DATA2_MEM_TX_UFC_C(0x40) |
1157*4882a593Smuzhiyun 					TTI_DATA2_MEM_TX_UFC_D(0x80);
1158*4882a593Smuzhiyun 		}
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun 		writeq(val64, &bar0->tti_data2_mem);
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 		val64 = TTI_CMD_MEM_WE |
1163*4882a593Smuzhiyun 			TTI_CMD_MEM_STROBE_NEW_CMD |
1164*4882a593Smuzhiyun 			TTI_CMD_MEM_OFFSET(i);
1165*4882a593Smuzhiyun 		writeq(val64, &bar0->tti_command_mem);
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 		if (wait_for_cmd_complete(&bar0->tti_command_mem,
1168*4882a593Smuzhiyun 					  TTI_CMD_MEM_STROBE_NEW_CMD,
1169*4882a593Smuzhiyun 					  S2IO_BIT_RESET) != SUCCESS)
1170*4882a593Smuzhiyun 			return FAILURE;
1171*4882a593Smuzhiyun 	}
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 	return SUCCESS;
1174*4882a593Smuzhiyun }
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun /**
1177*4882a593Smuzhiyun  *  init_nic - Initialization of hardware
1178*4882a593Smuzhiyun  *  @nic: device private variable
1179*4882a593Smuzhiyun  *  Description: The function sequentially configures every block
1180*4882a593Smuzhiyun  *  of the H/W from their reset values.
1181*4882a593Smuzhiyun  *  Return Value:  SUCCESS on success and
1182*4882a593Smuzhiyun  *  '-1' on failure (endian settings incorrect).
1183*4882a593Smuzhiyun  */
1184*4882a593Smuzhiyun 
init_nic(struct s2io_nic * nic)1185*4882a593Smuzhiyun static int init_nic(struct s2io_nic *nic)
1186*4882a593Smuzhiyun {
1187*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1188*4882a593Smuzhiyun 	struct net_device *dev = nic->dev;
1189*4882a593Smuzhiyun 	register u64 val64 = 0;
1190*4882a593Smuzhiyun 	void __iomem *add;
1191*4882a593Smuzhiyun 	u32 time;
1192*4882a593Smuzhiyun 	int i, j;
1193*4882a593Smuzhiyun 	int dtx_cnt = 0;
1194*4882a593Smuzhiyun 	unsigned long long mem_share;
1195*4882a593Smuzhiyun 	int mem_size;
1196*4882a593Smuzhiyun 	struct config_param *config = &nic->config;
1197*4882a593Smuzhiyun 	struct mac_info *mac_control = &nic->mac_control;
1198*4882a593Smuzhiyun 
1199*4882a593Smuzhiyun 	/* to set the swapper controle on the card */
1200*4882a593Smuzhiyun 	if (s2io_set_swapper(nic)) {
1201*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1202*4882a593Smuzhiyun 		return -EIO;
1203*4882a593Smuzhiyun 	}
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 	/*
1206*4882a593Smuzhiyun 	 * Herc requires EOI to be removed from reset before XGXS, so..
1207*4882a593Smuzhiyun 	 */
1208*4882a593Smuzhiyun 	if (nic->device_type & XFRAME_II_DEVICE) {
1209*4882a593Smuzhiyun 		val64 = 0xA500000000ULL;
1210*4882a593Smuzhiyun 		writeq(val64, &bar0->sw_reset);
1211*4882a593Smuzhiyun 		msleep(500);
1212*4882a593Smuzhiyun 		val64 = readq(&bar0->sw_reset);
1213*4882a593Smuzhiyun 	}
1214*4882a593Smuzhiyun 
1215*4882a593Smuzhiyun 	/* Remove XGXS from reset state */
1216*4882a593Smuzhiyun 	val64 = 0;
1217*4882a593Smuzhiyun 	writeq(val64, &bar0->sw_reset);
1218*4882a593Smuzhiyun 	msleep(500);
1219*4882a593Smuzhiyun 	val64 = readq(&bar0->sw_reset);
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 	/* Ensure that it's safe to access registers by checking
1222*4882a593Smuzhiyun 	 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1223*4882a593Smuzhiyun 	 */
1224*4882a593Smuzhiyun 	if (nic->device_type == XFRAME_II_DEVICE) {
1225*4882a593Smuzhiyun 		for (i = 0; i < 50; i++) {
1226*4882a593Smuzhiyun 			val64 = readq(&bar0->adapter_status);
1227*4882a593Smuzhiyun 			if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1228*4882a593Smuzhiyun 				break;
1229*4882a593Smuzhiyun 			msleep(10);
1230*4882a593Smuzhiyun 		}
1231*4882a593Smuzhiyun 		if (i == 50)
1232*4882a593Smuzhiyun 			return -ENODEV;
1233*4882a593Smuzhiyun 	}
1234*4882a593Smuzhiyun 
1235*4882a593Smuzhiyun 	/*  Enable Receiving broadcasts */
1236*4882a593Smuzhiyun 	add = &bar0->mac_cfg;
1237*4882a593Smuzhiyun 	val64 = readq(&bar0->mac_cfg);
1238*4882a593Smuzhiyun 	val64 |= MAC_RMAC_BCAST_ENABLE;
1239*4882a593Smuzhiyun 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1240*4882a593Smuzhiyun 	writel((u32)val64, add);
1241*4882a593Smuzhiyun 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1242*4882a593Smuzhiyun 	writel((u32) (val64 >> 32), (add + 4));
1243*4882a593Smuzhiyun 
1244*4882a593Smuzhiyun 	/* Read registers in all blocks */
1245*4882a593Smuzhiyun 	val64 = readq(&bar0->mac_int_mask);
1246*4882a593Smuzhiyun 	val64 = readq(&bar0->mc_int_mask);
1247*4882a593Smuzhiyun 	val64 = readq(&bar0->xgxs_int_mask);
1248*4882a593Smuzhiyun 
1249*4882a593Smuzhiyun 	/*  Set MTU */
1250*4882a593Smuzhiyun 	val64 = dev->mtu;
1251*4882a593Smuzhiyun 	writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun 	if (nic->device_type & XFRAME_II_DEVICE) {
1254*4882a593Smuzhiyun 		while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1255*4882a593Smuzhiyun 			SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1256*4882a593Smuzhiyun 					  &bar0->dtx_control, UF);
1257*4882a593Smuzhiyun 			if (dtx_cnt & 0x1)
1258*4882a593Smuzhiyun 				msleep(1); /* Necessary!! */
1259*4882a593Smuzhiyun 			dtx_cnt++;
1260*4882a593Smuzhiyun 		}
1261*4882a593Smuzhiyun 	} else {
1262*4882a593Smuzhiyun 		while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1263*4882a593Smuzhiyun 			SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1264*4882a593Smuzhiyun 					  &bar0->dtx_control, UF);
1265*4882a593Smuzhiyun 			val64 = readq(&bar0->dtx_control);
1266*4882a593Smuzhiyun 			dtx_cnt++;
1267*4882a593Smuzhiyun 		}
1268*4882a593Smuzhiyun 	}
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun 	/*  Tx DMA Initialization */
1271*4882a593Smuzhiyun 	val64 = 0;
1272*4882a593Smuzhiyun 	writeq(val64, &bar0->tx_fifo_partition_0);
1273*4882a593Smuzhiyun 	writeq(val64, &bar0->tx_fifo_partition_1);
1274*4882a593Smuzhiyun 	writeq(val64, &bar0->tx_fifo_partition_2);
1275*4882a593Smuzhiyun 	writeq(val64, &bar0->tx_fifo_partition_3);
1276*4882a593Smuzhiyun 
1277*4882a593Smuzhiyun 	for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1278*4882a593Smuzhiyun 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1279*4882a593Smuzhiyun 
1280*4882a593Smuzhiyun 		val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1281*4882a593Smuzhiyun 			vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1282*4882a593Smuzhiyun 
1283*4882a593Smuzhiyun 		if (i == (config->tx_fifo_num - 1)) {
1284*4882a593Smuzhiyun 			if (i % 2 == 0)
1285*4882a593Smuzhiyun 				i++;
1286*4882a593Smuzhiyun 		}
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 		switch (i) {
1289*4882a593Smuzhiyun 		case 1:
1290*4882a593Smuzhiyun 			writeq(val64, &bar0->tx_fifo_partition_0);
1291*4882a593Smuzhiyun 			val64 = 0;
1292*4882a593Smuzhiyun 			j = 0;
1293*4882a593Smuzhiyun 			break;
1294*4882a593Smuzhiyun 		case 3:
1295*4882a593Smuzhiyun 			writeq(val64, &bar0->tx_fifo_partition_1);
1296*4882a593Smuzhiyun 			val64 = 0;
1297*4882a593Smuzhiyun 			j = 0;
1298*4882a593Smuzhiyun 			break;
1299*4882a593Smuzhiyun 		case 5:
1300*4882a593Smuzhiyun 			writeq(val64, &bar0->tx_fifo_partition_2);
1301*4882a593Smuzhiyun 			val64 = 0;
1302*4882a593Smuzhiyun 			j = 0;
1303*4882a593Smuzhiyun 			break;
1304*4882a593Smuzhiyun 		case 7:
1305*4882a593Smuzhiyun 			writeq(val64, &bar0->tx_fifo_partition_3);
1306*4882a593Smuzhiyun 			val64 = 0;
1307*4882a593Smuzhiyun 			j = 0;
1308*4882a593Smuzhiyun 			break;
1309*4882a593Smuzhiyun 		default:
1310*4882a593Smuzhiyun 			j++;
1311*4882a593Smuzhiyun 			break;
1312*4882a593Smuzhiyun 		}
1313*4882a593Smuzhiyun 	}
1314*4882a593Smuzhiyun 
1315*4882a593Smuzhiyun 	/*
1316*4882a593Smuzhiyun 	 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1317*4882a593Smuzhiyun 	 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1318*4882a593Smuzhiyun 	 */
1319*4882a593Smuzhiyun 	if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1320*4882a593Smuzhiyun 		writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1321*4882a593Smuzhiyun 
1322*4882a593Smuzhiyun 	val64 = readq(&bar0->tx_fifo_partition_0);
1323*4882a593Smuzhiyun 	DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1324*4882a593Smuzhiyun 		  &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1325*4882a593Smuzhiyun 
1326*4882a593Smuzhiyun 	/*
1327*4882a593Smuzhiyun 	 * Initialization of Tx_PA_CONFIG register to ignore packet
1328*4882a593Smuzhiyun 	 * integrity checking.
1329*4882a593Smuzhiyun 	 */
1330*4882a593Smuzhiyun 	val64 = readq(&bar0->tx_pa_cfg);
1331*4882a593Smuzhiyun 	val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1332*4882a593Smuzhiyun 		TX_PA_CFG_IGNORE_SNAP_OUI |
1333*4882a593Smuzhiyun 		TX_PA_CFG_IGNORE_LLC_CTRL |
1334*4882a593Smuzhiyun 		TX_PA_CFG_IGNORE_L2_ERR;
1335*4882a593Smuzhiyun 	writeq(val64, &bar0->tx_pa_cfg);
1336*4882a593Smuzhiyun 
1337*4882a593Smuzhiyun 	/* Rx DMA initialization. */
1338*4882a593Smuzhiyun 	val64 = 0;
1339*4882a593Smuzhiyun 	for (i = 0; i < config->rx_ring_num; i++) {
1340*4882a593Smuzhiyun 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1341*4882a593Smuzhiyun 
1342*4882a593Smuzhiyun 		val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1343*4882a593Smuzhiyun 	}
1344*4882a593Smuzhiyun 	writeq(val64, &bar0->rx_queue_priority);
1345*4882a593Smuzhiyun 
1346*4882a593Smuzhiyun 	/*
1347*4882a593Smuzhiyun 	 * Allocating equal share of memory to all the
1348*4882a593Smuzhiyun 	 * configured Rings.
1349*4882a593Smuzhiyun 	 */
1350*4882a593Smuzhiyun 	val64 = 0;
1351*4882a593Smuzhiyun 	if (nic->device_type & XFRAME_II_DEVICE)
1352*4882a593Smuzhiyun 		mem_size = 32;
1353*4882a593Smuzhiyun 	else
1354*4882a593Smuzhiyun 		mem_size = 64;
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun 	for (i = 0; i < config->rx_ring_num; i++) {
1357*4882a593Smuzhiyun 		switch (i) {
1358*4882a593Smuzhiyun 		case 0:
1359*4882a593Smuzhiyun 			mem_share = (mem_size / config->rx_ring_num +
1360*4882a593Smuzhiyun 				     mem_size % config->rx_ring_num);
1361*4882a593Smuzhiyun 			val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1362*4882a593Smuzhiyun 			continue;
1363*4882a593Smuzhiyun 		case 1:
1364*4882a593Smuzhiyun 			mem_share = (mem_size / config->rx_ring_num);
1365*4882a593Smuzhiyun 			val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1366*4882a593Smuzhiyun 			continue;
1367*4882a593Smuzhiyun 		case 2:
1368*4882a593Smuzhiyun 			mem_share = (mem_size / config->rx_ring_num);
1369*4882a593Smuzhiyun 			val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1370*4882a593Smuzhiyun 			continue;
1371*4882a593Smuzhiyun 		case 3:
1372*4882a593Smuzhiyun 			mem_share = (mem_size / config->rx_ring_num);
1373*4882a593Smuzhiyun 			val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1374*4882a593Smuzhiyun 			continue;
1375*4882a593Smuzhiyun 		case 4:
1376*4882a593Smuzhiyun 			mem_share = (mem_size / config->rx_ring_num);
1377*4882a593Smuzhiyun 			val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1378*4882a593Smuzhiyun 			continue;
1379*4882a593Smuzhiyun 		case 5:
1380*4882a593Smuzhiyun 			mem_share = (mem_size / config->rx_ring_num);
1381*4882a593Smuzhiyun 			val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1382*4882a593Smuzhiyun 			continue;
1383*4882a593Smuzhiyun 		case 6:
1384*4882a593Smuzhiyun 			mem_share = (mem_size / config->rx_ring_num);
1385*4882a593Smuzhiyun 			val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1386*4882a593Smuzhiyun 			continue;
1387*4882a593Smuzhiyun 		case 7:
1388*4882a593Smuzhiyun 			mem_share = (mem_size / config->rx_ring_num);
1389*4882a593Smuzhiyun 			val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1390*4882a593Smuzhiyun 			continue;
1391*4882a593Smuzhiyun 		}
1392*4882a593Smuzhiyun 	}
1393*4882a593Smuzhiyun 	writeq(val64, &bar0->rx_queue_cfg);
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun 	/*
1396*4882a593Smuzhiyun 	 * Filling Tx round robin registers
1397*4882a593Smuzhiyun 	 * as per the number of FIFOs for equal scheduling priority
1398*4882a593Smuzhiyun 	 */
1399*4882a593Smuzhiyun 	switch (config->tx_fifo_num) {
1400*4882a593Smuzhiyun 	case 1:
1401*4882a593Smuzhiyun 		val64 = 0x0;
1402*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_0);
1403*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_1);
1404*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_2);
1405*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_3);
1406*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_4);
1407*4882a593Smuzhiyun 		break;
1408*4882a593Smuzhiyun 	case 2:
1409*4882a593Smuzhiyun 		val64 = 0x0001000100010001ULL;
1410*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_0);
1411*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_1);
1412*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_2);
1413*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_3);
1414*4882a593Smuzhiyun 		val64 = 0x0001000100000000ULL;
1415*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_4);
1416*4882a593Smuzhiyun 		break;
1417*4882a593Smuzhiyun 	case 3:
1418*4882a593Smuzhiyun 		val64 = 0x0001020001020001ULL;
1419*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_0);
1420*4882a593Smuzhiyun 		val64 = 0x0200010200010200ULL;
1421*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_1);
1422*4882a593Smuzhiyun 		val64 = 0x0102000102000102ULL;
1423*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_2);
1424*4882a593Smuzhiyun 		val64 = 0x0001020001020001ULL;
1425*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_3);
1426*4882a593Smuzhiyun 		val64 = 0x0200010200000000ULL;
1427*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_4);
1428*4882a593Smuzhiyun 		break;
1429*4882a593Smuzhiyun 	case 4:
1430*4882a593Smuzhiyun 		val64 = 0x0001020300010203ULL;
1431*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_0);
1432*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_1);
1433*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_2);
1434*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_3);
1435*4882a593Smuzhiyun 		val64 = 0x0001020300000000ULL;
1436*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_4);
1437*4882a593Smuzhiyun 		break;
1438*4882a593Smuzhiyun 	case 5:
1439*4882a593Smuzhiyun 		val64 = 0x0001020304000102ULL;
1440*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_0);
1441*4882a593Smuzhiyun 		val64 = 0x0304000102030400ULL;
1442*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_1);
1443*4882a593Smuzhiyun 		val64 = 0x0102030400010203ULL;
1444*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_2);
1445*4882a593Smuzhiyun 		val64 = 0x0400010203040001ULL;
1446*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_3);
1447*4882a593Smuzhiyun 		val64 = 0x0203040000000000ULL;
1448*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_4);
1449*4882a593Smuzhiyun 		break;
1450*4882a593Smuzhiyun 	case 6:
1451*4882a593Smuzhiyun 		val64 = 0x0001020304050001ULL;
1452*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_0);
1453*4882a593Smuzhiyun 		val64 = 0x0203040500010203ULL;
1454*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_1);
1455*4882a593Smuzhiyun 		val64 = 0x0405000102030405ULL;
1456*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_2);
1457*4882a593Smuzhiyun 		val64 = 0x0001020304050001ULL;
1458*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_3);
1459*4882a593Smuzhiyun 		val64 = 0x0203040500000000ULL;
1460*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_4);
1461*4882a593Smuzhiyun 		break;
1462*4882a593Smuzhiyun 	case 7:
1463*4882a593Smuzhiyun 		val64 = 0x0001020304050600ULL;
1464*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_0);
1465*4882a593Smuzhiyun 		val64 = 0x0102030405060001ULL;
1466*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_1);
1467*4882a593Smuzhiyun 		val64 = 0x0203040506000102ULL;
1468*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_2);
1469*4882a593Smuzhiyun 		val64 = 0x0304050600010203ULL;
1470*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_3);
1471*4882a593Smuzhiyun 		val64 = 0x0405060000000000ULL;
1472*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_4);
1473*4882a593Smuzhiyun 		break;
1474*4882a593Smuzhiyun 	case 8:
1475*4882a593Smuzhiyun 		val64 = 0x0001020304050607ULL;
1476*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_0);
1477*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_1);
1478*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_2);
1479*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_3);
1480*4882a593Smuzhiyun 		val64 = 0x0001020300000000ULL;
1481*4882a593Smuzhiyun 		writeq(val64, &bar0->tx_w_round_robin_4);
1482*4882a593Smuzhiyun 		break;
1483*4882a593Smuzhiyun 	}
1484*4882a593Smuzhiyun 
1485*4882a593Smuzhiyun 	/* Enable all configured Tx FIFO partitions */
1486*4882a593Smuzhiyun 	val64 = readq(&bar0->tx_fifo_partition_0);
1487*4882a593Smuzhiyun 	val64 |= (TX_FIFO_PARTITION_EN);
1488*4882a593Smuzhiyun 	writeq(val64, &bar0->tx_fifo_partition_0);
1489*4882a593Smuzhiyun 
1490*4882a593Smuzhiyun 	/* Filling the Rx round robin registers as per the
1491*4882a593Smuzhiyun 	 * number of Rings and steering based on QoS with
1492*4882a593Smuzhiyun 	 * equal priority.
1493*4882a593Smuzhiyun 	 */
1494*4882a593Smuzhiyun 	switch (config->rx_ring_num) {
1495*4882a593Smuzhiyun 	case 1:
1496*4882a593Smuzhiyun 		val64 = 0x0;
1497*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_0);
1498*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_1);
1499*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_2);
1500*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_3);
1501*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_4);
1502*4882a593Smuzhiyun 
1503*4882a593Smuzhiyun 		val64 = 0x8080808080808080ULL;
1504*4882a593Smuzhiyun 		writeq(val64, &bar0->rts_qos_steering);
1505*4882a593Smuzhiyun 		break;
1506*4882a593Smuzhiyun 	case 2:
1507*4882a593Smuzhiyun 		val64 = 0x0001000100010001ULL;
1508*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_0);
1509*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_1);
1510*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_2);
1511*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_3);
1512*4882a593Smuzhiyun 		val64 = 0x0001000100000000ULL;
1513*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_4);
1514*4882a593Smuzhiyun 
1515*4882a593Smuzhiyun 		val64 = 0x8080808040404040ULL;
1516*4882a593Smuzhiyun 		writeq(val64, &bar0->rts_qos_steering);
1517*4882a593Smuzhiyun 		break;
1518*4882a593Smuzhiyun 	case 3:
1519*4882a593Smuzhiyun 		val64 = 0x0001020001020001ULL;
1520*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_0);
1521*4882a593Smuzhiyun 		val64 = 0x0200010200010200ULL;
1522*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_1);
1523*4882a593Smuzhiyun 		val64 = 0x0102000102000102ULL;
1524*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_2);
1525*4882a593Smuzhiyun 		val64 = 0x0001020001020001ULL;
1526*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_3);
1527*4882a593Smuzhiyun 		val64 = 0x0200010200000000ULL;
1528*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_4);
1529*4882a593Smuzhiyun 
1530*4882a593Smuzhiyun 		val64 = 0x8080804040402020ULL;
1531*4882a593Smuzhiyun 		writeq(val64, &bar0->rts_qos_steering);
1532*4882a593Smuzhiyun 		break;
1533*4882a593Smuzhiyun 	case 4:
1534*4882a593Smuzhiyun 		val64 = 0x0001020300010203ULL;
1535*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_0);
1536*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_1);
1537*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_2);
1538*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_3);
1539*4882a593Smuzhiyun 		val64 = 0x0001020300000000ULL;
1540*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_4);
1541*4882a593Smuzhiyun 
1542*4882a593Smuzhiyun 		val64 = 0x8080404020201010ULL;
1543*4882a593Smuzhiyun 		writeq(val64, &bar0->rts_qos_steering);
1544*4882a593Smuzhiyun 		break;
1545*4882a593Smuzhiyun 	case 5:
1546*4882a593Smuzhiyun 		val64 = 0x0001020304000102ULL;
1547*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_0);
1548*4882a593Smuzhiyun 		val64 = 0x0304000102030400ULL;
1549*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_1);
1550*4882a593Smuzhiyun 		val64 = 0x0102030400010203ULL;
1551*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_2);
1552*4882a593Smuzhiyun 		val64 = 0x0400010203040001ULL;
1553*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_3);
1554*4882a593Smuzhiyun 		val64 = 0x0203040000000000ULL;
1555*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_4);
1556*4882a593Smuzhiyun 
1557*4882a593Smuzhiyun 		val64 = 0x8080404020201008ULL;
1558*4882a593Smuzhiyun 		writeq(val64, &bar0->rts_qos_steering);
1559*4882a593Smuzhiyun 		break;
1560*4882a593Smuzhiyun 	case 6:
1561*4882a593Smuzhiyun 		val64 = 0x0001020304050001ULL;
1562*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_0);
1563*4882a593Smuzhiyun 		val64 = 0x0203040500010203ULL;
1564*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_1);
1565*4882a593Smuzhiyun 		val64 = 0x0405000102030405ULL;
1566*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_2);
1567*4882a593Smuzhiyun 		val64 = 0x0001020304050001ULL;
1568*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_3);
1569*4882a593Smuzhiyun 		val64 = 0x0203040500000000ULL;
1570*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_4);
1571*4882a593Smuzhiyun 
1572*4882a593Smuzhiyun 		val64 = 0x8080404020100804ULL;
1573*4882a593Smuzhiyun 		writeq(val64, &bar0->rts_qos_steering);
1574*4882a593Smuzhiyun 		break;
1575*4882a593Smuzhiyun 	case 7:
1576*4882a593Smuzhiyun 		val64 = 0x0001020304050600ULL;
1577*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_0);
1578*4882a593Smuzhiyun 		val64 = 0x0102030405060001ULL;
1579*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_1);
1580*4882a593Smuzhiyun 		val64 = 0x0203040506000102ULL;
1581*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_2);
1582*4882a593Smuzhiyun 		val64 = 0x0304050600010203ULL;
1583*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_3);
1584*4882a593Smuzhiyun 		val64 = 0x0405060000000000ULL;
1585*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_4);
1586*4882a593Smuzhiyun 
1587*4882a593Smuzhiyun 		val64 = 0x8080402010080402ULL;
1588*4882a593Smuzhiyun 		writeq(val64, &bar0->rts_qos_steering);
1589*4882a593Smuzhiyun 		break;
1590*4882a593Smuzhiyun 	case 8:
1591*4882a593Smuzhiyun 		val64 = 0x0001020304050607ULL;
1592*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_0);
1593*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_1);
1594*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_2);
1595*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_3);
1596*4882a593Smuzhiyun 		val64 = 0x0001020300000000ULL;
1597*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_w_round_robin_4);
1598*4882a593Smuzhiyun 
1599*4882a593Smuzhiyun 		val64 = 0x8040201008040201ULL;
1600*4882a593Smuzhiyun 		writeq(val64, &bar0->rts_qos_steering);
1601*4882a593Smuzhiyun 		break;
1602*4882a593Smuzhiyun 	}
1603*4882a593Smuzhiyun 
1604*4882a593Smuzhiyun 	/* UDP Fix */
1605*4882a593Smuzhiyun 	val64 = 0;
1606*4882a593Smuzhiyun 	for (i = 0; i < 8; i++)
1607*4882a593Smuzhiyun 		writeq(val64, &bar0->rts_frm_len_n[i]);
1608*4882a593Smuzhiyun 
1609*4882a593Smuzhiyun 	/* Set the default rts frame length for the rings configured */
1610*4882a593Smuzhiyun 	val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1611*4882a593Smuzhiyun 	for (i = 0 ; i < config->rx_ring_num ; i++)
1612*4882a593Smuzhiyun 		writeq(val64, &bar0->rts_frm_len_n[i]);
1613*4882a593Smuzhiyun 
1614*4882a593Smuzhiyun 	/* Set the frame length for the configured rings
1615*4882a593Smuzhiyun 	 * desired by the user
1616*4882a593Smuzhiyun 	 */
1617*4882a593Smuzhiyun 	for (i = 0; i < config->rx_ring_num; i++) {
1618*4882a593Smuzhiyun 		/* If rts_frm_len[i] == 0 then it is assumed that user not
1619*4882a593Smuzhiyun 		 * specified frame length steering.
1620*4882a593Smuzhiyun 		 * If the user provides the frame length then program
1621*4882a593Smuzhiyun 		 * the rts_frm_len register for those values or else
1622*4882a593Smuzhiyun 		 * leave it as it is.
1623*4882a593Smuzhiyun 		 */
1624*4882a593Smuzhiyun 		if (rts_frm_len[i] != 0) {
1625*4882a593Smuzhiyun 			writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1626*4882a593Smuzhiyun 			       &bar0->rts_frm_len_n[i]);
1627*4882a593Smuzhiyun 		}
1628*4882a593Smuzhiyun 	}
1629*4882a593Smuzhiyun 
1630*4882a593Smuzhiyun 	/* Disable differentiated services steering logic */
1631*4882a593Smuzhiyun 	for (i = 0; i < 64; i++) {
1632*4882a593Smuzhiyun 		if (rts_ds_steer(nic, i, 0) == FAILURE) {
1633*4882a593Smuzhiyun 			DBG_PRINT(ERR_DBG,
1634*4882a593Smuzhiyun 				  "%s: rts_ds_steer failed on codepoint %d\n",
1635*4882a593Smuzhiyun 				  dev->name, i);
1636*4882a593Smuzhiyun 			return -ENODEV;
1637*4882a593Smuzhiyun 		}
1638*4882a593Smuzhiyun 	}
1639*4882a593Smuzhiyun 
1640*4882a593Smuzhiyun 	/* Program statistics memory */
1641*4882a593Smuzhiyun 	writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun 	if (nic->device_type == XFRAME_II_DEVICE) {
1644*4882a593Smuzhiyun 		val64 = STAT_BC(0x320);
1645*4882a593Smuzhiyun 		writeq(val64, &bar0->stat_byte_cnt);
1646*4882a593Smuzhiyun 	}
1647*4882a593Smuzhiyun 
1648*4882a593Smuzhiyun 	/*
1649*4882a593Smuzhiyun 	 * Initializing the sampling rate for the device to calculate the
1650*4882a593Smuzhiyun 	 * bandwidth utilization.
1651*4882a593Smuzhiyun 	 */
1652*4882a593Smuzhiyun 	val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1653*4882a593Smuzhiyun 		MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1654*4882a593Smuzhiyun 	writeq(val64, &bar0->mac_link_util);
1655*4882a593Smuzhiyun 
1656*4882a593Smuzhiyun 	/*
1657*4882a593Smuzhiyun 	 * Initializing the Transmit and Receive Traffic Interrupt
1658*4882a593Smuzhiyun 	 * Scheme.
1659*4882a593Smuzhiyun 	 */
1660*4882a593Smuzhiyun 
1661*4882a593Smuzhiyun 	/* Initialize TTI */
1662*4882a593Smuzhiyun 	if (SUCCESS != init_tti(nic, nic->last_link_state))
1663*4882a593Smuzhiyun 		return -ENODEV;
1664*4882a593Smuzhiyun 
1665*4882a593Smuzhiyun 	/* RTI Initialization */
1666*4882a593Smuzhiyun 	if (nic->device_type == XFRAME_II_DEVICE) {
1667*4882a593Smuzhiyun 		/*
1668*4882a593Smuzhiyun 		 * Programmed to generate Apprx 500 Intrs per
1669*4882a593Smuzhiyun 		 * second
1670*4882a593Smuzhiyun 		 */
1671*4882a593Smuzhiyun 		int count = (nic->config.bus_speed * 125)/4;
1672*4882a593Smuzhiyun 		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1673*4882a593Smuzhiyun 	} else
1674*4882a593Smuzhiyun 		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1675*4882a593Smuzhiyun 	val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1676*4882a593Smuzhiyun 		RTI_DATA1_MEM_RX_URNG_B(0x10) |
1677*4882a593Smuzhiyun 		RTI_DATA1_MEM_RX_URNG_C(0x30) |
1678*4882a593Smuzhiyun 		RTI_DATA1_MEM_RX_TIMER_AC_EN;
1679*4882a593Smuzhiyun 
1680*4882a593Smuzhiyun 	writeq(val64, &bar0->rti_data1_mem);
1681*4882a593Smuzhiyun 
1682*4882a593Smuzhiyun 	val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1683*4882a593Smuzhiyun 		RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1684*4882a593Smuzhiyun 	if (nic->config.intr_type == MSI_X)
1685*4882a593Smuzhiyun 		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1686*4882a593Smuzhiyun 			  RTI_DATA2_MEM_RX_UFC_D(0x40));
1687*4882a593Smuzhiyun 	else
1688*4882a593Smuzhiyun 		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1689*4882a593Smuzhiyun 			  RTI_DATA2_MEM_RX_UFC_D(0x80));
1690*4882a593Smuzhiyun 	writeq(val64, &bar0->rti_data2_mem);
1691*4882a593Smuzhiyun 
1692*4882a593Smuzhiyun 	for (i = 0; i < config->rx_ring_num; i++) {
1693*4882a593Smuzhiyun 		val64 = RTI_CMD_MEM_WE |
1694*4882a593Smuzhiyun 			RTI_CMD_MEM_STROBE_NEW_CMD |
1695*4882a593Smuzhiyun 			RTI_CMD_MEM_OFFSET(i);
1696*4882a593Smuzhiyun 		writeq(val64, &bar0->rti_command_mem);
1697*4882a593Smuzhiyun 
1698*4882a593Smuzhiyun 		/*
1699*4882a593Smuzhiyun 		 * Once the operation completes, the Strobe bit of the
1700*4882a593Smuzhiyun 		 * command register will be reset. We poll for this
1701*4882a593Smuzhiyun 		 * particular condition. We wait for a maximum of 500ms
1702*4882a593Smuzhiyun 		 * for the operation to complete, if it's not complete
1703*4882a593Smuzhiyun 		 * by then we return error.
1704*4882a593Smuzhiyun 		 */
1705*4882a593Smuzhiyun 		time = 0;
1706*4882a593Smuzhiyun 		while (true) {
1707*4882a593Smuzhiyun 			val64 = readq(&bar0->rti_command_mem);
1708*4882a593Smuzhiyun 			if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1709*4882a593Smuzhiyun 				break;
1710*4882a593Smuzhiyun 
1711*4882a593Smuzhiyun 			if (time > 10) {
1712*4882a593Smuzhiyun 				DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
1713*4882a593Smuzhiyun 					  dev->name);
1714*4882a593Smuzhiyun 				return -ENODEV;
1715*4882a593Smuzhiyun 			}
1716*4882a593Smuzhiyun 			time++;
1717*4882a593Smuzhiyun 			msleep(50);
1718*4882a593Smuzhiyun 		}
1719*4882a593Smuzhiyun 	}
1720*4882a593Smuzhiyun 
1721*4882a593Smuzhiyun 	/*
1722*4882a593Smuzhiyun 	 * Initializing proper values as Pause threshold into all
1723*4882a593Smuzhiyun 	 * the 8 Queues on Rx side.
1724*4882a593Smuzhiyun 	 */
1725*4882a593Smuzhiyun 	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1726*4882a593Smuzhiyun 	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1727*4882a593Smuzhiyun 
1728*4882a593Smuzhiyun 	/* Disable RMAC PAD STRIPPING */
1729*4882a593Smuzhiyun 	add = &bar0->mac_cfg;
1730*4882a593Smuzhiyun 	val64 = readq(&bar0->mac_cfg);
1731*4882a593Smuzhiyun 	val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1732*4882a593Smuzhiyun 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1733*4882a593Smuzhiyun 	writel((u32) (val64), add);
1734*4882a593Smuzhiyun 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1735*4882a593Smuzhiyun 	writel((u32) (val64 >> 32), (add + 4));
1736*4882a593Smuzhiyun 	val64 = readq(&bar0->mac_cfg);
1737*4882a593Smuzhiyun 
1738*4882a593Smuzhiyun 	/* Enable FCS stripping by adapter */
1739*4882a593Smuzhiyun 	add = &bar0->mac_cfg;
1740*4882a593Smuzhiyun 	val64 = readq(&bar0->mac_cfg);
1741*4882a593Smuzhiyun 	val64 |= MAC_CFG_RMAC_STRIP_FCS;
1742*4882a593Smuzhiyun 	if (nic->device_type == XFRAME_II_DEVICE)
1743*4882a593Smuzhiyun 		writeq(val64, &bar0->mac_cfg);
1744*4882a593Smuzhiyun 	else {
1745*4882a593Smuzhiyun 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1746*4882a593Smuzhiyun 		writel((u32) (val64), add);
1747*4882a593Smuzhiyun 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1748*4882a593Smuzhiyun 		writel((u32) (val64 >> 32), (add + 4));
1749*4882a593Smuzhiyun 	}
1750*4882a593Smuzhiyun 
1751*4882a593Smuzhiyun 	/*
1752*4882a593Smuzhiyun 	 * Set the time value to be inserted in the pause frame
1753*4882a593Smuzhiyun 	 * generated by xena.
1754*4882a593Smuzhiyun 	 */
1755*4882a593Smuzhiyun 	val64 = readq(&bar0->rmac_pause_cfg);
1756*4882a593Smuzhiyun 	val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1757*4882a593Smuzhiyun 	val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1758*4882a593Smuzhiyun 	writeq(val64, &bar0->rmac_pause_cfg);
1759*4882a593Smuzhiyun 
1760*4882a593Smuzhiyun 	/*
1761*4882a593Smuzhiyun 	 * Set the Threshold Limit for Generating the pause frame
1762*4882a593Smuzhiyun 	 * If the amount of data in any Queue exceeds ratio of
1763*4882a593Smuzhiyun 	 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1764*4882a593Smuzhiyun 	 * pause frame is generated
1765*4882a593Smuzhiyun 	 */
1766*4882a593Smuzhiyun 	val64 = 0;
1767*4882a593Smuzhiyun 	for (i = 0; i < 4; i++) {
1768*4882a593Smuzhiyun 		val64 |= (((u64)0xFF00 |
1769*4882a593Smuzhiyun 			   nic->mac_control.mc_pause_threshold_q0q3)
1770*4882a593Smuzhiyun 			  << (i * 2 * 8));
1771*4882a593Smuzhiyun 	}
1772*4882a593Smuzhiyun 	writeq(val64, &bar0->mc_pause_thresh_q0q3);
1773*4882a593Smuzhiyun 
1774*4882a593Smuzhiyun 	val64 = 0;
1775*4882a593Smuzhiyun 	for (i = 0; i < 4; i++) {
1776*4882a593Smuzhiyun 		val64 |= (((u64)0xFF00 |
1777*4882a593Smuzhiyun 			   nic->mac_control.mc_pause_threshold_q4q7)
1778*4882a593Smuzhiyun 			  << (i * 2 * 8));
1779*4882a593Smuzhiyun 	}
1780*4882a593Smuzhiyun 	writeq(val64, &bar0->mc_pause_thresh_q4q7);
1781*4882a593Smuzhiyun 
1782*4882a593Smuzhiyun 	/*
1783*4882a593Smuzhiyun 	 * TxDMA will stop Read request if the number of read split has
1784*4882a593Smuzhiyun 	 * exceeded the limit pointed by shared_splits
1785*4882a593Smuzhiyun 	 */
1786*4882a593Smuzhiyun 	val64 = readq(&bar0->pic_control);
1787*4882a593Smuzhiyun 	val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1788*4882a593Smuzhiyun 	writeq(val64, &bar0->pic_control);
1789*4882a593Smuzhiyun 
1790*4882a593Smuzhiyun 	if (nic->config.bus_speed == 266) {
1791*4882a593Smuzhiyun 		writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1792*4882a593Smuzhiyun 		writeq(0x0, &bar0->read_retry_delay);
1793*4882a593Smuzhiyun 		writeq(0x0, &bar0->write_retry_delay);
1794*4882a593Smuzhiyun 	}
1795*4882a593Smuzhiyun 
1796*4882a593Smuzhiyun 	/*
1797*4882a593Smuzhiyun 	 * Programming the Herc to split every write transaction
1798*4882a593Smuzhiyun 	 * that does not start on an ADB to reduce disconnects.
1799*4882a593Smuzhiyun 	 */
1800*4882a593Smuzhiyun 	if (nic->device_type == XFRAME_II_DEVICE) {
1801*4882a593Smuzhiyun 		val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1802*4882a593Smuzhiyun 			MISC_LINK_STABILITY_PRD(3);
1803*4882a593Smuzhiyun 		writeq(val64, &bar0->misc_control);
1804*4882a593Smuzhiyun 		val64 = readq(&bar0->pic_control2);
1805*4882a593Smuzhiyun 		val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1806*4882a593Smuzhiyun 		writeq(val64, &bar0->pic_control2);
1807*4882a593Smuzhiyun 	}
1808*4882a593Smuzhiyun 	if (strstr(nic->product_name, "CX4")) {
1809*4882a593Smuzhiyun 		val64 = TMAC_AVG_IPG(0x17);
1810*4882a593Smuzhiyun 		writeq(val64, &bar0->tmac_avg_ipg);
1811*4882a593Smuzhiyun 	}
1812*4882a593Smuzhiyun 
1813*4882a593Smuzhiyun 	return SUCCESS;
1814*4882a593Smuzhiyun }
1815*4882a593Smuzhiyun #define LINK_UP_DOWN_INTERRUPT		1
1816*4882a593Smuzhiyun #define MAC_RMAC_ERR_TIMER		2
1817*4882a593Smuzhiyun 
s2io_link_fault_indication(struct s2io_nic * nic)1818*4882a593Smuzhiyun static int s2io_link_fault_indication(struct s2io_nic *nic)
1819*4882a593Smuzhiyun {
1820*4882a593Smuzhiyun 	if (nic->device_type == XFRAME_II_DEVICE)
1821*4882a593Smuzhiyun 		return LINK_UP_DOWN_INTERRUPT;
1822*4882a593Smuzhiyun 	else
1823*4882a593Smuzhiyun 		return MAC_RMAC_ERR_TIMER;
1824*4882a593Smuzhiyun }
1825*4882a593Smuzhiyun 
1826*4882a593Smuzhiyun /**
1827*4882a593Smuzhiyun  *  do_s2io_write_bits -  update alarm bits in alarm register
1828*4882a593Smuzhiyun  *  @value: alarm bits
1829*4882a593Smuzhiyun  *  @flag: interrupt status
1830*4882a593Smuzhiyun  *  @addr: address value
1831*4882a593Smuzhiyun  *  Description: update alarm bits in alarm register
1832*4882a593Smuzhiyun  *  Return Value:
1833*4882a593Smuzhiyun  *  NONE.
1834*4882a593Smuzhiyun  */
do_s2io_write_bits(u64 value,int flag,void __iomem * addr)1835*4882a593Smuzhiyun static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1836*4882a593Smuzhiyun {
1837*4882a593Smuzhiyun 	u64 temp64;
1838*4882a593Smuzhiyun 
1839*4882a593Smuzhiyun 	temp64 = readq(addr);
1840*4882a593Smuzhiyun 
1841*4882a593Smuzhiyun 	if (flag == ENABLE_INTRS)
1842*4882a593Smuzhiyun 		temp64 &= ~((u64)value);
1843*4882a593Smuzhiyun 	else
1844*4882a593Smuzhiyun 		temp64 |= ((u64)value);
1845*4882a593Smuzhiyun 	writeq(temp64, addr);
1846*4882a593Smuzhiyun }
1847*4882a593Smuzhiyun 
en_dis_err_alarms(struct s2io_nic * nic,u16 mask,int flag)1848*4882a593Smuzhiyun static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1849*4882a593Smuzhiyun {
1850*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1851*4882a593Smuzhiyun 	register u64 gen_int_mask = 0;
1852*4882a593Smuzhiyun 	u64 interruptible;
1853*4882a593Smuzhiyun 
1854*4882a593Smuzhiyun 	writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1855*4882a593Smuzhiyun 	if (mask & TX_DMA_INTR) {
1856*4882a593Smuzhiyun 		gen_int_mask |= TXDMA_INT_M;
1857*4882a593Smuzhiyun 
1858*4882a593Smuzhiyun 		do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1859*4882a593Smuzhiyun 				   TXDMA_PCC_INT | TXDMA_TTI_INT |
1860*4882a593Smuzhiyun 				   TXDMA_LSO_INT | TXDMA_TPA_INT |
1861*4882a593Smuzhiyun 				   TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1862*4882a593Smuzhiyun 
1863*4882a593Smuzhiyun 		do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1864*4882a593Smuzhiyun 				   PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1865*4882a593Smuzhiyun 				   PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1866*4882a593Smuzhiyun 				   &bar0->pfc_err_mask);
1867*4882a593Smuzhiyun 
1868*4882a593Smuzhiyun 		do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1869*4882a593Smuzhiyun 				   TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1870*4882a593Smuzhiyun 				   TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1871*4882a593Smuzhiyun 
1872*4882a593Smuzhiyun 		do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1873*4882a593Smuzhiyun 				   PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1874*4882a593Smuzhiyun 				   PCC_N_SERR | PCC_6_COF_OV_ERR |
1875*4882a593Smuzhiyun 				   PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1876*4882a593Smuzhiyun 				   PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1877*4882a593Smuzhiyun 				   PCC_TXB_ECC_SG_ERR,
1878*4882a593Smuzhiyun 				   flag, &bar0->pcc_err_mask);
1879*4882a593Smuzhiyun 
1880*4882a593Smuzhiyun 		do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1881*4882a593Smuzhiyun 				   TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1882*4882a593Smuzhiyun 
1883*4882a593Smuzhiyun 		do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1884*4882a593Smuzhiyun 				   LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1885*4882a593Smuzhiyun 				   LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1886*4882a593Smuzhiyun 				   flag, &bar0->lso_err_mask);
1887*4882a593Smuzhiyun 
1888*4882a593Smuzhiyun 		do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1889*4882a593Smuzhiyun 				   flag, &bar0->tpa_err_mask);
1890*4882a593Smuzhiyun 
1891*4882a593Smuzhiyun 		do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1892*4882a593Smuzhiyun 	}
1893*4882a593Smuzhiyun 
1894*4882a593Smuzhiyun 	if (mask & TX_MAC_INTR) {
1895*4882a593Smuzhiyun 		gen_int_mask |= TXMAC_INT_M;
1896*4882a593Smuzhiyun 		do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1897*4882a593Smuzhiyun 				   &bar0->mac_int_mask);
1898*4882a593Smuzhiyun 		do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1899*4882a593Smuzhiyun 				   TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1900*4882a593Smuzhiyun 				   TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1901*4882a593Smuzhiyun 				   flag, &bar0->mac_tmac_err_mask);
1902*4882a593Smuzhiyun 	}
1903*4882a593Smuzhiyun 
1904*4882a593Smuzhiyun 	if (mask & TX_XGXS_INTR) {
1905*4882a593Smuzhiyun 		gen_int_mask |= TXXGXS_INT_M;
1906*4882a593Smuzhiyun 		do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1907*4882a593Smuzhiyun 				   &bar0->xgxs_int_mask);
1908*4882a593Smuzhiyun 		do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1909*4882a593Smuzhiyun 				   TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1910*4882a593Smuzhiyun 				   flag, &bar0->xgxs_txgxs_err_mask);
1911*4882a593Smuzhiyun 	}
1912*4882a593Smuzhiyun 
1913*4882a593Smuzhiyun 	if (mask & RX_DMA_INTR) {
1914*4882a593Smuzhiyun 		gen_int_mask |= RXDMA_INT_M;
1915*4882a593Smuzhiyun 		do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1916*4882a593Smuzhiyun 				   RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1917*4882a593Smuzhiyun 				   flag, &bar0->rxdma_int_mask);
1918*4882a593Smuzhiyun 		do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1919*4882a593Smuzhiyun 				   RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1920*4882a593Smuzhiyun 				   RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1921*4882a593Smuzhiyun 				   RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1922*4882a593Smuzhiyun 		do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1923*4882a593Smuzhiyun 				   PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1924*4882a593Smuzhiyun 				   PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1925*4882a593Smuzhiyun 				   &bar0->prc_pcix_err_mask);
1926*4882a593Smuzhiyun 		do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1927*4882a593Smuzhiyun 				   RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1928*4882a593Smuzhiyun 				   &bar0->rpa_err_mask);
1929*4882a593Smuzhiyun 		do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1930*4882a593Smuzhiyun 				   RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1931*4882a593Smuzhiyun 				   RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1932*4882a593Smuzhiyun 				   RDA_FRM_ECC_SG_ERR |
1933*4882a593Smuzhiyun 				   RDA_MISC_ERR|RDA_PCIX_ERR,
1934*4882a593Smuzhiyun 				   flag, &bar0->rda_err_mask);
1935*4882a593Smuzhiyun 		do_s2io_write_bits(RTI_SM_ERR_ALARM |
1936*4882a593Smuzhiyun 				   RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1937*4882a593Smuzhiyun 				   flag, &bar0->rti_err_mask);
1938*4882a593Smuzhiyun 	}
1939*4882a593Smuzhiyun 
1940*4882a593Smuzhiyun 	if (mask & RX_MAC_INTR) {
1941*4882a593Smuzhiyun 		gen_int_mask |= RXMAC_INT_M;
1942*4882a593Smuzhiyun 		do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1943*4882a593Smuzhiyun 				   &bar0->mac_int_mask);
1944*4882a593Smuzhiyun 		interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1945*4882a593Smuzhiyun 				 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1946*4882a593Smuzhiyun 				 RMAC_DOUBLE_ECC_ERR);
1947*4882a593Smuzhiyun 		if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1948*4882a593Smuzhiyun 			interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1949*4882a593Smuzhiyun 		do_s2io_write_bits(interruptible,
1950*4882a593Smuzhiyun 				   flag, &bar0->mac_rmac_err_mask);
1951*4882a593Smuzhiyun 	}
1952*4882a593Smuzhiyun 
1953*4882a593Smuzhiyun 	if (mask & RX_XGXS_INTR) {
1954*4882a593Smuzhiyun 		gen_int_mask |= RXXGXS_INT_M;
1955*4882a593Smuzhiyun 		do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1956*4882a593Smuzhiyun 				   &bar0->xgxs_int_mask);
1957*4882a593Smuzhiyun 		do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1958*4882a593Smuzhiyun 				   &bar0->xgxs_rxgxs_err_mask);
1959*4882a593Smuzhiyun 	}
1960*4882a593Smuzhiyun 
1961*4882a593Smuzhiyun 	if (mask & MC_INTR) {
1962*4882a593Smuzhiyun 		gen_int_mask |= MC_INT_M;
1963*4882a593Smuzhiyun 		do_s2io_write_bits(MC_INT_MASK_MC_INT,
1964*4882a593Smuzhiyun 				   flag, &bar0->mc_int_mask);
1965*4882a593Smuzhiyun 		do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1966*4882a593Smuzhiyun 				   MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1967*4882a593Smuzhiyun 				   &bar0->mc_err_mask);
1968*4882a593Smuzhiyun 	}
1969*4882a593Smuzhiyun 	nic->general_int_mask = gen_int_mask;
1970*4882a593Smuzhiyun 
1971*4882a593Smuzhiyun 	/* Remove this line when alarm interrupts are enabled */
1972*4882a593Smuzhiyun 	nic->general_int_mask = 0;
1973*4882a593Smuzhiyun }
1974*4882a593Smuzhiyun 
1975*4882a593Smuzhiyun /**
1976*4882a593Smuzhiyun  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1977*4882a593Smuzhiyun  *  @nic: device private variable,
1978*4882a593Smuzhiyun  *  @mask: A mask indicating which Intr block must be modified and,
1979*4882a593Smuzhiyun  *  @flag: A flag indicating whether to enable or disable the Intrs.
1980*4882a593Smuzhiyun  *  Description: This function will either disable or enable the interrupts
1981*4882a593Smuzhiyun  *  depending on the flag argument. The mask argument can be used to
1982*4882a593Smuzhiyun  *  enable/disable any Intr block.
1983*4882a593Smuzhiyun  *  Return Value: NONE.
1984*4882a593Smuzhiyun  */
1985*4882a593Smuzhiyun 
en_dis_able_nic_intrs(struct s2io_nic * nic,u16 mask,int flag)1986*4882a593Smuzhiyun static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1987*4882a593Smuzhiyun {
1988*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1989*4882a593Smuzhiyun 	register u64 temp64 = 0, intr_mask = 0;
1990*4882a593Smuzhiyun 
1991*4882a593Smuzhiyun 	intr_mask = nic->general_int_mask;
1992*4882a593Smuzhiyun 
1993*4882a593Smuzhiyun 	/*  Top level interrupt classification */
1994*4882a593Smuzhiyun 	/*  PIC Interrupts */
1995*4882a593Smuzhiyun 	if (mask & TX_PIC_INTR) {
1996*4882a593Smuzhiyun 		/*  Enable PIC Intrs in the general intr mask register */
1997*4882a593Smuzhiyun 		intr_mask |= TXPIC_INT_M;
1998*4882a593Smuzhiyun 		if (flag == ENABLE_INTRS) {
1999*4882a593Smuzhiyun 			/*
2000*4882a593Smuzhiyun 			 * If Hercules adapter enable GPIO otherwise
2001*4882a593Smuzhiyun 			 * disable all PCIX, Flash, MDIO, IIC and GPIO
2002*4882a593Smuzhiyun 			 * interrupts for now.
2003*4882a593Smuzhiyun 			 * TODO
2004*4882a593Smuzhiyun 			 */
2005*4882a593Smuzhiyun 			if (s2io_link_fault_indication(nic) ==
2006*4882a593Smuzhiyun 			    LINK_UP_DOWN_INTERRUPT) {
2007*4882a593Smuzhiyun 				do_s2io_write_bits(PIC_INT_GPIO, flag,
2008*4882a593Smuzhiyun 						   &bar0->pic_int_mask);
2009*4882a593Smuzhiyun 				do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2010*4882a593Smuzhiyun 						   &bar0->gpio_int_mask);
2011*4882a593Smuzhiyun 			} else
2012*4882a593Smuzhiyun 				writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2013*4882a593Smuzhiyun 		} else if (flag == DISABLE_INTRS) {
2014*4882a593Smuzhiyun 			/*
2015*4882a593Smuzhiyun 			 * Disable PIC Intrs in the general
2016*4882a593Smuzhiyun 			 * intr mask register
2017*4882a593Smuzhiyun 			 */
2018*4882a593Smuzhiyun 			writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2019*4882a593Smuzhiyun 		}
2020*4882a593Smuzhiyun 	}
2021*4882a593Smuzhiyun 
2022*4882a593Smuzhiyun 	/*  Tx traffic interrupts */
2023*4882a593Smuzhiyun 	if (mask & TX_TRAFFIC_INTR) {
2024*4882a593Smuzhiyun 		intr_mask |= TXTRAFFIC_INT_M;
2025*4882a593Smuzhiyun 		if (flag == ENABLE_INTRS) {
2026*4882a593Smuzhiyun 			/*
2027*4882a593Smuzhiyun 			 * Enable all the Tx side interrupts
2028*4882a593Smuzhiyun 			 * writing 0 Enables all 64 TX interrupt levels
2029*4882a593Smuzhiyun 			 */
2030*4882a593Smuzhiyun 			writeq(0x0, &bar0->tx_traffic_mask);
2031*4882a593Smuzhiyun 		} else if (flag == DISABLE_INTRS) {
2032*4882a593Smuzhiyun 			/*
2033*4882a593Smuzhiyun 			 * Disable Tx Traffic Intrs in the general intr mask
2034*4882a593Smuzhiyun 			 * register.
2035*4882a593Smuzhiyun 			 */
2036*4882a593Smuzhiyun 			writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2037*4882a593Smuzhiyun 		}
2038*4882a593Smuzhiyun 	}
2039*4882a593Smuzhiyun 
2040*4882a593Smuzhiyun 	/*  Rx traffic interrupts */
2041*4882a593Smuzhiyun 	if (mask & RX_TRAFFIC_INTR) {
2042*4882a593Smuzhiyun 		intr_mask |= RXTRAFFIC_INT_M;
2043*4882a593Smuzhiyun 		if (flag == ENABLE_INTRS) {
2044*4882a593Smuzhiyun 			/* writing 0 Enables all 8 RX interrupt levels */
2045*4882a593Smuzhiyun 			writeq(0x0, &bar0->rx_traffic_mask);
2046*4882a593Smuzhiyun 		} else if (flag == DISABLE_INTRS) {
2047*4882a593Smuzhiyun 			/*
2048*4882a593Smuzhiyun 			 * Disable Rx Traffic Intrs in the general intr mask
2049*4882a593Smuzhiyun 			 * register.
2050*4882a593Smuzhiyun 			 */
2051*4882a593Smuzhiyun 			writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2052*4882a593Smuzhiyun 		}
2053*4882a593Smuzhiyun 	}
2054*4882a593Smuzhiyun 
2055*4882a593Smuzhiyun 	temp64 = readq(&bar0->general_int_mask);
2056*4882a593Smuzhiyun 	if (flag == ENABLE_INTRS)
2057*4882a593Smuzhiyun 		temp64 &= ~((u64)intr_mask);
2058*4882a593Smuzhiyun 	else
2059*4882a593Smuzhiyun 		temp64 = DISABLE_ALL_INTRS;
2060*4882a593Smuzhiyun 	writeq(temp64, &bar0->general_int_mask);
2061*4882a593Smuzhiyun 
2062*4882a593Smuzhiyun 	nic->general_int_mask = readq(&bar0->general_int_mask);
2063*4882a593Smuzhiyun }
2064*4882a593Smuzhiyun 
2065*4882a593Smuzhiyun /**
2066*4882a593Smuzhiyun  *  verify_pcc_quiescent- Checks for PCC quiescent state
2067*4882a593Smuzhiyun  *  @sp : private member of the device structure, which is a pointer to the
2068*4882a593Smuzhiyun  *  s2io_nic structure.
2069*4882a593Smuzhiyun  *  @flag: boolean controlling function path
2070*4882a593Smuzhiyun  *  Return: 1 If PCC is quiescence
2071*4882a593Smuzhiyun  *          0 If PCC is not quiescence
2072*4882a593Smuzhiyun  */
verify_pcc_quiescent(struct s2io_nic * sp,int flag)2073*4882a593Smuzhiyun static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2074*4882a593Smuzhiyun {
2075*4882a593Smuzhiyun 	int ret = 0, herc;
2076*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2077*4882a593Smuzhiyun 	u64 val64 = readq(&bar0->adapter_status);
2078*4882a593Smuzhiyun 
2079*4882a593Smuzhiyun 	herc = (sp->device_type == XFRAME_II_DEVICE);
2080*4882a593Smuzhiyun 
2081*4882a593Smuzhiyun 	if (flag == false) {
2082*4882a593Smuzhiyun 		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2083*4882a593Smuzhiyun 			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2084*4882a593Smuzhiyun 				ret = 1;
2085*4882a593Smuzhiyun 		} else {
2086*4882a593Smuzhiyun 			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2087*4882a593Smuzhiyun 				ret = 1;
2088*4882a593Smuzhiyun 		}
2089*4882a593Smuzhiyun 	} else {
2090*4882a593Smuzhiyun 		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2091*4882a593Smuzhiyun 			if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2092*4882a593Smuzhiyun 			     ADAPTER_STATUS_RMAC_PCC_IDLE))
2093*4882a593Smuzhiyun 				ret = 1;
2094*4882a593Smuzhiyun 		} else {
2095*4882a593Smuzhiyun 			if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2096*4882a593Smuzhiyun 			     ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2097*4882a593Smuzhiyun 				ret = 1;
2098*4882a593Smuzhiyun 		}
2099*4882a593Smuzhiyun 	}
2100*4882a593Smuzhiyun 
2101*4882a593Smuzhiyun 	return ret;
2102*4882a593Smuzhiyun }
2103*4882a593Smuzhiyun /**
2104*4882a593Smuzhiyun  *  verify_xena_quiescence - Checks whether the H/W is ready
2105*4882a593Smuzhiyun  *  @sp : private member of the device structure, which is a pointer to the
2106*4882a593Smuzhiyun  *  s2io_nic structure.
2107*4882a593Smuzhiyun  *  Description: Returns whether the H/W is ready to go or not. Depending
2108*4882a593Smuzhiyun  *  on whether adapter enable bit was written or not the comparison
2109*4882a593Smuzhiyun  *  differs and the calling function passes the input argument flag to
2110*4882a593Smuzhiyun  *  indicate this.
2111*4882a593Smuzhiyun  *  Return: 1 If xena is quiescence
2112*4882a593Smuzhiyun  *          0 If Xena is not quiescence
2113*4882a593Smuzhiyun  */
2114*4882a593Smuzhiyun 
verify_xena_quiescence(struct s2io_nic * sp)2115*4882a593Smuzhiyun static int verify_xena_quiescence(struct s2io_nic *sp)
2116*4882a593Smuzhiyun {
2117*4882a593Smuzhiyun 	int  mode;
2118*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2119*4882a593Smuzhiyun 	u64 val64 = readq(&bar0->adapter_status);
2120*4882a593Smuzhiyun 	mode = s2io_verify_pci_mode(sp);
2121*4882a593Smuzhiyun 
2122*4882a593Smuzhiyun 	if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2123*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
2124*4882a593Smuzhiyun 		return 0;
2125*4882a593Smuzhiyun 	}
2126*4882a593Smuzhiyun 	if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2127*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
2128*4882a593Smuzhiyun 		return 0;
2129*4882a593Smuzhiyun 	}
2130*4882a593Smuzhiyun 	if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2131*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
2132*4882a593Smuzhiyun 		return 0;
2133*4882a593Smuzhiyun 	}
2134*4882a593Smuzhiyun 	if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2135*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
2136*4882a593Smuzhiyun 		return 0;
2137*4882a593Smuzhiyun 	}
2138*4882a593Smuzhiyun 	if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2139*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
2140*4882a593Smuzhiyun 		return 0;
2141*4882a593Smuzhiyun 	}
2142*4882a593Smuzhiyun 	if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2143*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
2144*4882a593Smuzhiyun 		return 0;
2145*4882a593Smuzhiyun 	}
2146*4882a593Smuzhiyun 	if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2147*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
2148*4882a593Smuzhiyun 		return 0;
2149*4882a593Smuzhiyun 	}
2150*4882a593Smuzhiyun 	if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2151*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
2152*4882a593Smuzhiyun 		return 0;
2153*4882a593Smuzhiyun 	}
2154*4882a593Smuzhiyun 
2155*4882a593Smuzhiyun 	/*
2156*4882a593Smuzhiyun 	 * In PCI 33 mode, the P_PLL is not used, and therefore,
2157*4882a593Smuzhiyun 	 * the the P_PLL_LOCK bit in the adapter_status register will
2158*4882a593Smuzhiyun 	 * not be asserted.
2159*4882a593Smuzhiyun 	 */
2160*4882a593Smuzhiyun 	if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2161*4882a593Smuzhiyun 	    sp->device_type == XFRAME_II_DEVICE &&
2162*4882a593Smuzhiyun 	    mode != PCI_MODE_PCI_33) {
2163*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
2164*4882a593Smuzhiyun 		return 0;
2165*4882a593Smuzhiyun 	}
2166*4882a593Smuzhiyun 	if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2167*4882a593Smuzhiyun 	      ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2168*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
2169*4882a593Smuzhiyun 		return 0;
2170*4882a593Smuzhiyun 	}
2171*4882a593Smuzhiyun 	return 1;
2172*4882a593Smuzhiyun }
2173*4882a593Smuzhiyun 
2174*4882a593Smuzhiyun /**
2175*4882a593Smuzhiyun  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2176*4882a593Smuzhiyun  * @sp: Pointer to device specifc structure
2177*4882a593Smuzhiyun  * Description :
2178*4882a593Smuzhiyun  * New procedure to clear mac address reading  problems on Alpha platforms
2179*4882a593Smuzhiyun  *
2180*4882a593Smuzhiyun  */
2181*4882a593Smuzhiyun 
fix_mac_address(struct s2io_nic * sp)2182*4882a593Smuzhiyun static void fix_mac_address(struct s2io_nic *sp)
2183*4882a593Smuzhiyun {
2184*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2185*4882a593Smuzhiyun 	int i = 0;
2186*4882a593Smuzhiyun 
2187*4882a593Smuzhiyun 	while (fix_mac[i] != END_SIGN) {
2188*4882a593Smuzhiyun 		writeq(fix_mac[i++], &bar0->gpio_control);
2189*4882a593Smuzhiyun 		udelay(10);
2190*4882a593Smuzhiyun 		(void) readq(&bar0->gpio_control);
2191*4882a593Smuzhiyun 	}
2192*4882a593Smuzhiyun }
2193*4882a593Smuzhiyun 
2194*4882a593Smuzhiyun /**
2195*4882a593Smuzhiyun  *  start_nic - Turns the device on
2196*4882a593Smuzhiyun  *  @nic : device private variable.
2197*4882a593Smuzhiyun  *  Description:
2198*4882a593Smuzhiyun  *  This function actually turns the device on. Before this  function is
2199*4882a593Smuzhiyun  *  called,all Registers are configured from their reset states
2200*4882a593Smuzhiyun  *  and shared memory is allocated but the NIC is still quiescent. On
2201*4882a593Smuzhiyun  *  calling this function, the device interrupts are cleared and the NIC is
2202*4882a593Smuzhiyun  *  literally switched on by writing into the adapter control register.
2203*4882a593Smuzhiyun  *  Return Value:
2204*4882a593Smuzhiyun  *  SUCCESS on success and -1 on failure.
2205*4882a593Smuzhiyun  */
2206*4882a593Smuzhiyun 
start_nic(struct s2io_nic * nic)2207*4882a593Smuzhiyun static int start_nic(struct s2io_nic *nic)
2208*4882a593Smuzhiyun {
2209*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2210*4882a593Smuzhiyun 	struct net_device *dev = nic->dev;
2211*4882a593Smuzhiyun 	register u64 val64 = 0;
2212*4882a593Smuzhiyun 	u16 subid, i;
2213*4882a593Smuzhiyun 	struct config_param *config = &nic->config;
2214*4882a593Smuzhiyun 	struct mac_info *mac_control = &nic->mac_control;
2215*4882a593Smuzhiyun 
2216*4882a593Smuzhiyun 	/*  PRC Initialization and configuration */
2217*4882a593Smuzhiyun 	for (i = 0; i < config->rx_ring_num; i++) {
2218*4882a593Smuzhiyun 		struct ring_info *ring = &mac_control->rings[i];
2219*4882a593Smuzhiyun 
2220*4882a593Smuzhiyun 		writeq((u64)ring->rx_blocks[0].block_dma_addr,
2221*4882a593Smuzhiyun 		       &bar0->prc_rxd0_n[i]);
2222*4882a593Smuzhiyun 
2223*4882a593Smuzhiyun 		val64 = readq(&bar0->prc_ctrl_n[i]);
2224*4882a593Smuzhiyun 		if (nic->rxd_mode == RXD_MODE_1)
2225*4882a593Smuzhiyun 			val64 |= PRC_CTRL_RC_ENABLED;
2226*4882a593Smuzhiyun 		else
2227*4882a593Smuzhiyun 			val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2228*4882a593Smuzhiyun 		if (nic->device_type == XFRAME_II_DEVICE)
2229*4882a593Smuzhiyun 			val64 |= PRC_CTRL_GROUP_READS;
2230*4882a593Smuzhiyun 		val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2231*4882a593Smuzhiyun 		val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2232*4882a593Smuzhiyun 		writeq(val64, &bar0->prc_ctrl_n[i]);
2233*4882a593Smuzhiyun 	}
2234*4882a593Smuzhiyun 
2235*4882a593Smuzhiyun 	if (nic->rxd_mode == RXD_MODE_3B) {
2236*4882a593Smuzhiyun 		/* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2237*4882a593Smuzhiyun 		val64 = readq(&bar0->rx_pa_cfg);
2238*4882a593Smuzhiyun 		val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2239*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_pa_cfg);
2240*4882a593Smuzhiyun 	}
2241*4882a593Smuzhiyun 
2242*4882a593Smuzhiyun 	if (vlan_tag_strip == 0) {
2243*4882a593Smuzhiyun 		val64 = readq(&bar0->rx_pa_cfg);
2244*4882a593Smuzhiyun 		val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2245*4882a593Smuzhiyun 		writeq(val64, &bar0->rx_pa_cfg);
2246*4882a593Smuzhiyun 		nic->vlan_strip_flag = 0;
2247*4882a593Smuzhiyun 	}
2248*4882a593Smuzhiyun 
2249*4882a593Smuzhiyun 	/*
2250*4882a593Smuzhiyun 	 * Enabling MC-RLDRAM. After enabling the device, we timeout
2251*4882a593Smuzhiyun 	 * for around 100ms, which is approximately the time required
2252*4882a593Smuzhiyun 	 * for the device to be ready for operation.
2253*4882a593Smuzhiyun 	 */
2254*4882a593Smuzhiyun 	val64 = readq(&bar0->mc_rldram_mrs);
2255*4882a593Smuzhiyun 	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2256*4882a593Smuzhiyun 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2257*4882a593Smuzhiyun 	val64 = readq(&bar0->mc_rldram_mrs);
2258*4882a593Smuzhiyun 
2259*4882a593Smuzhiyun 	msleep(100);	/* Delay by around 100 ms. */
2260*4882a593Smuzhiyun 
2261*4882a593Smuzhiyun 	/* Enabling ECC Protection. */
2262*4882a593Smuzhiyun 	val64 = readq(&bar0->adapter_control);
2263*4882a593Smuzhiyun 	val64 &= ~ADAPTER_ECC_EN;
2264*4882a593Smuzhiyun 	writeq(val64, &bar0->adapter_control);
2265*4882a593Smuzhiyun 
2266*4882a593Smuzhiyun 	/*
2267*4882a593Smuzhiyun 	 * Verify if the device is ready to be enabled, if so enable
2268*4882a593Smuzhiyun 	 * it.
2269*4882a593Smuzhiyun 	 */
2270*4882a593Smuzhiyun 	val64 = readq(&bar0->adapter_status);
2271*4882a593Smuzhiyun 	if (!verify_xena_quiescence(nic)) {
2272*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2273*4882a593Smuzhiyun 			  "Adapter status reads: 0x%llx\n",
2274*4882a593Smuzhiyun 			  dev->name, (unsigned long long)val64);
2275*4882a593Smuzhiyun 		return FAILURE;
2276*4882a593Smuzhiyun 	}
2277*4882a593Smuzhiyun 
2278*4882a593Smuzhiyun 	/*
2279*4882a593Smuzhiyun 	 * With some switches, link might be already up at this point.
2280*4882a593Smuzhiyun 	 * Because of this weird behavior, when we enable laser,
2281*4882a593Smuzhiyun 	 * we may not get link. We need to handle this. We cannot
2282*4882a593Smuzhiyun 	 * figure out which switch is misbehaving. So we are forced to
2283*4882a593Smuzhiyun 	 * make a global change.
2284*4882a593Smuzhiyun 	 */
2285*4882a593Smuzhiyun 
2286*4882a593Smuzhiyun 	/* Enabling Laser. */
2287*4882a593Smuzhiyun 	val64 = readq(&bar0->adapter_control);
2288*4882a593Smuzhiyun 	val64 |= ADAPTER_EOI_TX_ON;
2289*4882a593Smuzhiyun 	writeq(val64, &bar0->adapter_control);
2290*4882a593Smuzhiyun 
2291*4882a593Smuzhiyun 	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2292*4882a593Smuzhiyun 		/*
2293*4882a593Smuzhiyun 		 * Dont see link state interrupts initially on some switches,
2294*4882a593Smuzhiyun 		 * so directly scheduling the link state task here.
2295*4882a593Smuzhiyun 		 */
2296*4882a593Smuzhiyun 		schedule_work(&nic->set_link_task);
2297*4882a593Smuzhiyun 	}
2298*4882a593Smuzhiyun 	/* SXE-002: Initialize link and activity LED */
2299*4882a593Smuzhiyun 	subid = nic->pdev->subsystem_device;
2300*4882a593Smuzhiyun 	if (((subid & 0xFF) >= 0x07) &&
2301*4882a593Smuzhiyun 	    (nic->device_type == XFRAME_I_DEVICE)) {
2302*4882a593Smuzhiyun 		val64 = readq(&bar0->gpio_control);
2303*4882a593Smuzhiyun 		val64 |= 0x0000800000000000ULL;
2304*4882a593Smuzhiyun 		writeq(val64, &bar0->gpio_control);
2305*4882a593Smuzhiyun 		val64 = 0x0411040400000000ULL;
2306*4882a593Smuzhiyun 		writeq(val64, (void __iomem *)bar0 + 0x2700);
2307*4882a593Smuzhiyun 	}
2308*4882a593Smuzhiyun 
2309*4882a593Smuzhiyun 	return SUCCESS;
2310*4882a593Smuzhiyun }
2311*4882a593Smuzhiyun /**
2312*4882a593Smuzhiyun  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2313*4882a593Smuzhiyun  * @fifo_data: fifo data pointer
2314*4882a593Smuzhiyun  * @txdlp: descriptor
2315*4882a593Smuzhiyun  * @get_off: unused
2316*4882a593Smuzhiyun  */
s2io_txdl_getskb(struct fifo_info * fifo_data,struct TxD * txdlp,int get_off)2317*4882a593Smuzhiyun static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2318*4882a593Smuzhiyun 					struct TxD *txdlp, int get_off)
2319*4882a593Smuzhiyun {
2320*4882a593Smuzhiyun 	struct s2io_nic *nic = fifo_data->nic;
2321*4882a593Smuzhiyun 	struct sk_buff *skb;
2322*4882a593Smuzhiyun 	struct TxD *txds;
2323*4882a593Smuzhiyun 	u16 j, frg_cnt;
2324*4882a593Smuzhiyun 
2325*4882a593Smuzhiyun 	txds = txdlp;
2326*4882a593Smuzhiyun 	if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2327*4882a593Smuzhiyun 		dma_unmap_single(&nic->pdev->dev,
2328*4882a593Smuzhiyun 				 (dma_addr_t)txds->Buffer_Pointer,
2329*4882a593Smuzhiyun 				 sizeof(u64), DMA_TO_DEVICE);
2330*4882a593Smuzhiyun 		txds++;
2331*4882a593Smuzhiyun 	}
2332*4882a593Smuzhiyun 
2333*4882a593Smuzhiyun 	skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2334*4882a593Smuzhiyun 	if (!skb) {
2335*4882a593Smuzhiyun 		memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2336*4882a593Smuzhiyun 		return NULL;
2337*4882a593Smuzhiyun 	}
2338*4882a593Smuzhiyun 	dma_unmap_single(&nic->pdev->dev, (dma_addr_t)txds->Buffer_Pointer,
2339*4882a593Smuzhiyun 			 skb_headlen(skb), DMA_TO_DEVICE);
2340*4882a593Smuzhiyun 	frg_cnt = skb_shinfo(skb)->nr_frags;
2341*4882a593Smuzhiyun 	if (frg_cnt) {
2342*4882a593Smuzhiyun 		txds++;
2343*4882a593Smuzhiyun 		for (j = 0; j < frg_cnt; j++, txds++) {
2344*4882a593Smuzhiyun 			const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2345*4882a593Smuzhiyun 			if (!txds->Buffer_Pointer)
2346*4882a593Smuzhiyun 				break;
2347*4882a593Smuzhiyun 			dma_unmap_page(&nic->pdev->dev,
2348*4882a593Smuzhiyun 				       (dma_addr_t)txds->Buffer_Pointer,
2349*4882a593Smuzhiyun 				       skb_frag_size(frag), DMA_TO_DEVICE);
2350*4882a593Smuzhiyun 		}
2351*4882a593Smuzhiyun 	}
2352*4882a593Smuzhiyun 	memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2353*4882a593Smuzhiyun 	return skb;
2354*4882a593Smuzhiyun }
2355*4882a593Smuzhiyun 
2356*4882a593Smuzhiyun /**
2357*4882a593Smuzhiyun  *  free_tx_buffers - Free all queued Tx buffers
2358*4882a593Smuzhiyun  *  @nic : device private variable.
2359*4882a593Smuzhiyun  *  Description:
2360*4882a593Smuzhiyun  *  Free all queued Tx buffers.
2361*4882a593Smuzhiyun  *  Return Value: void
2362*4882a593Smuzhiyun  */
2363*4882a593Smuzhiyun 
free_tx_buffers(struct s2io_nic * nic)2364*4882a593Smuzhiyun static void free_tx_buffers(struct s2io_nic *nic)
2365*4882a593Smuzhiyun {
2366*4882a593Smuzhiyun 	struct net_device *dev = nic->dev;
2367*4882a593Smuzhiyun 	struct sk_buff *skb;
2368*4882a593Smuzhiyun 	struct TxD *txdp;
2369*4882a593Smuzhiyun 	int i, j;
2370*4882a593Smuzhiyun 	int cnt = 0;
2371*4882a593Smuzhiyun 	struct config_param *config = &nic->config;
2372*4882a593Smuzhiyun 	struct mac_info *mac_control = &nic->mac_control;
2373*4882a593Smuzhiyun 	struct stat_block *stats = mac_control->stats_info;
2374*4882a593Smuzhiyun 	struct swStat *swstats = &stats->sw_stat;
2375*4882a593Smuzhiyun 
2376*4882a593Smuzhiyun 	for (i = 0; i < config->tx_fifo_num; i++) {
2377*4882a593Smuzhiyun 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2378*4882a593Smuzhiyun 		struct fifo_info *fifo = &mac_control->fifos[i];
2379*4882a593Smuzhiyun 		unsigned long flags;
2380*4882a593Smuzhiyun 
2381*4882a593Smuzhiyun 		spin_lock_irqsave(&fifo->tx_lock, flags);
2382*4882a593Smuzhiyun 		for (j = 0; j < tx_cfg->fifo_len; j++) {
2383*4882a593Smuzhiyun 			txdp = fifo->list_info[j].list_virt_addr;
2384*4882a593Smuzhiyun 			skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2385*4882a593Smuzhiyun 			if (skb) {
2386*4882a593Smuzhiyun 				swstats->mem_freed += skb->truesize;
2387*4882a593Smuzhiyun 				dev_kfree_skb(skb);
2388*4882a593Smuzhiyun 				cnt++;
2389*4882a593Smuzhiyun 			}
2390*4882a593Smuzhiyun 		}
2391*4882a593Smuzhiyun 		DBG_PRINT(INTR_DBG,
2392*4882a593Smuzhiyun 			  "%s: forcibly freeing %d skbs on FIFO%d\n",
2393*4882a593Smuzhiyun 			  dev->name, cnt, i);
2394*4882a593Smuzhiyun 		fifo->tx_curr_get_info.offset = 0;
2395*4882a593Smuzhiyun 		fifo->tx_curr_put_info.offset = 0;
2396*4882a593Smuzhiyun 		spin_unlock_irqrestore(&fifo->tx_lock, flags);
2397*4882a593Smuzhiyun 	}
2398*4882a593Smuzhiyun }
2399*4882a593Smuzhiyun 
2400*4882a593Smuzhiyun /**
2401*4882a593Smuzhiyun  *   stop_nic -  To stop the nic
2402*4882a593Smuzhiyun  *   @nic : device private variable.
2403*4882a593Smuzhiyun  *   Description:
2404*4882a593Smuzhiyun  *   This function does exactly the opposite of what the start_nic()
2405*4882a593Smuzhiyun  *   function does. This function is called to stop the device.
2406*4882a593Smuzhiyun  *   Return Value:
2407*4882a593Smuzhiyun  *   void.
2408*4882a593Smuzhiyun  */
2409*4882a593Smuzhiyun 
stop_nic(struct s2io_nic * nic)2410*4882a593Smuzhiyun static void stop_nic(struct s2io_nic *nic)
2411*4882a593Smuzhiyun {
2412*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2413*4882a593Smuzhiyun 	register u64 val64 = 0;
2414*4882a593Smuzhiyun 	u16 interruptible;
2415*4882a593Smuzhiyun 
2416*4882a593Smuzhiyun 	/*  Disable all interrupts */
2417*4882a593Smuzhiyun 	en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2418*4882a593Smuzhiyun 	interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2419*4882a593Smuzhiyun 	interruptible |= TX_PIC_INTR;
2420*4882a593Smuzhiyun 	en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2421*4882a593Smuzhiyun 
2422*4882a593Smuzhiyun 	/* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2423*4882a593Smuzhiyun 	val64 = readq(&bar0->adapter_control);
2424*4882a593Smuzhiyun 	val64 &= ~(ADAPTER_CNTL_EN);
2425*4882a593Smuzhiyun 	writeq(val64, &bar0->adapter_control);
2426*4882a593Smuzhiyun }
2427*4882a593Smuzhiyun 
2428*4882a593Smuzhiyun /**
2429*4882a593Smuzhiyun  *  fill_rx_buffers - Allocates the Rx side skbs
2430*4882a593Smuzhiyun  *  @nic : device private variable.
2431*4882a593Smuzhiyun  *  @ring: per ring structure
2432*4882a593Smuzhiyun  *  @from_card_up: If this is true, we will map the buffer to get
2433*4882a593Smuzhiyun  *     the dma address for buf0 and buf1 to give it to the card.
2434*4882a593Smuzhiyun  *     Else we will sync the already mapped buffer to give it to the card.
2435*4882a593Smuzhiyun  *  Description:
2436*4882a593Smuzhiyun  *  The function allocates Rx side skbs and puts the physical
2437*4882a593Smuzhiyun  *  address of these buffers into the RxD buffer pointers, so that the NIC
2438*4882a593Smuzhiyun  *  can DMA the received frame into these locations.
2439*4882a593Smuzhiyun  *  The NIC supports 3 receive modes, viz
2440*4882a593Smuzhiyun  *  1. single buffer,
2441*4882a593Smuzhiyun  *  2. three buffer and
2442*4882a593Smuzhiyun  *  3. Five buffer modes.
2443*4882a593Smuzhiyun  *  Each mode defines how many fragments the received frame will be split
2444*4882a593Smuzhiyun  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2445*4882a593Smuzhiyun  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2446*4882a593Smuzhiyun  *  is split into 3 fragments. As of now only single buffer mode is
2447*4882a593Smuzhiyun  *  supported.
2448*4882a593Smuzhiyun  *   Return Value:
2449*4882a593Smuzhiyun  *  SUCCESS on success or an appropriate -ve value on failure.
2450*4882a593Smuzhiyun  */
fill_rx_buffers(struct s2io_nic * nic,struct ring_info * ring,int from_card_up)2451*4882a593Smuzhiyun static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2452*4882a593Smuzhiyun 			   int from_card_up)
2453*4882a593Smuzhiyun {
2454*4882a593Smuzhiyun 	struct sk_buff *skb;
2455*4882a593Smuzhiyun 	struct RxD_t *rxdp;
2456*4882a593Smuzhiyun 	int off, size, block_no, block_no1;
2457*4882a593Smuzhiyun 	u32 alloc_tab = 0;
2458*4882a593Smuzhiyun 	u32 alloc_cnt;
2459*4882a593Smuzhiyun 	u64 tmp;
2460*4882a593Smuzhiyun 	struct buffAdd *ba;
2461*4882a593Smuzhiyun 	struct RxD_t *first_rxdp = NULL;
2462*4882a593Smuzhiyun 	u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2463*4882a593Smuzhiyun 	struct RxD1 *rxdp1;
2464*4882a593Smuzhiyun 	struct RxD3 *rxdp3;
2465*4882a593Smuzhiyun 	struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
2466*4882a593Smuzhiyun 
2467*4882a593Smuzhiyun 	alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2468*4882a593Smuzhiyun 
2469*4882a593Smuzhiyun 	block_no1 = ring->rx_curr_get_info.block_index;
2470*4882a593Smuzhiyun 	while (alloc_tab < alloc_cnt) {
2471*4882a593Smuzhiyun 		block_no = ring->rx_curr_put_info.block_index;
2472*4882a593Smuzhiyun 
2473*4882a593Smuzhiyun 		off = ring->rx_curr_put_info.offset;
2474*4882a593Smuzhiyun 
2475*4882a593Smuzhiyun 		rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2476*4882a593Smuzhiyun 
2477*4882a593Smuzhiyun 		if ((block_no == block_no1) &&
2478*4882a593Smuzhiyun 		    (off == ring->rx_curr_get_info.offset) &&
2479*4882a593Smuzhiyun 		    (rxdp->Host_Control)) {
2480*4882a593Smuzhiyun 			DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2481*4882a593Smuzhiyun 				  ring->dev->name);
2482*4882a593Smuzhiyun 			goto end;
2483*4882a593Smuzhiyun 		}
2484*4882a593Smuzhiyun 		if (off && (off == ring->rxd_count)) {
2485*4882a593Smuzhiyun 			ring->rx_curr_put_info.block_index++;
2486*4882a593Smuzhiyun 			if (ring->rx_curr_put_info.block_index ==
2487*4882a593Smuzhiyun 			    ring->block_count)
2488*4882a593Smuzhiyun 				ring->rx_curr_put_info.block_index = 0;
2489*4882a593Smuzhiyun 			block_no = ring->rx_curr_put_info.block_index;
2490*4882a593Smuzhiyun 			off = 0;
2491*4882a593Smuzhiyun 			ring->rx_curr_put_info.offset = off;
2492*4882a593Smuzhiyun 			rxdp = ring->rx_blocks[block_no].block_virt_addr;
2493*4882a593Smuzhiyun 			DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2494*4882a593Smuzhiyun 				  ring->dev->name, rxdp);
2495*4882a593Smuzhiyun 
2496*4882a593Smuzhiyun 		}
2497*4882a593Smuzhiyun 
2498*4882a593Smuzhiyun 		if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2499*4882a593Smuzhiyun 		    ((ring->rxd_mode == RXD_MODE_3B) &&
2500*4882a593Smuzhiyun 		     (rxdp->Control_2 & s2BIT(0)))) {
2501*4882a593Smuzhiyun 			ring->rx_curr_put_info.offset = off;
2502*4882a593Smuzhiyun 			goto end;
2503*4882a593Smuzhiyun 		}
2504*4882a593Smuzhiyun 		/* calculate size of skb based on ring mode */
2505*4882a593Smuzhiyun 		size = ring->mtu +
2506*4882a593Smuzhiyun 			HEADER_ETHERNET_II_802_3_SIZE +
2507*4882a593Smuzhiyun 			HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2508*4882a593Smuzhiyun 		if (ring->rxd_mode == RXD_MODE_1)
2509*4882a593Smuzhiyun 			size += NET_IP_ALIGN;
2510*4882a593Smuzhiyun 		else
2511*4882a593Smuzhiyun 			size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2512*4882a593Smuzhiyun 
2513*4882a593Smuzhiyun 		/* allocate skb */
2514*4882a593Smuzhiyun 		skb = netdev_alloc_skb(nic->dev, size);
2515*4882a593Smuzhiyun 		if (!skb) {
2516*4882a593Smuzhiyun 			DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2517*4882a593Smuzhiyun 				  ring->dev->name);
2518*4882a593Smuzhiyun 			if (first_rxdp) {
2519*4882a593Smuzhiyun 				dma_wmb();
2520*4882a593Smuzhiyun 				first_rxdp->Control_1 |= RXD_OWN_XENA;
2521*4882a593Smuzhiyun 			}
2522*4882a593Smuzhiyun 			swstats->mem_alloc_fail_cnt++;
2523*4882a593Smuzhiyun 
2524*4882a593Smuzhiyun 			return -ENOMEM ;
2525*4882a593Smuzhiyun 		}
2526*4882a593Smuzhiyun 		swstats->mem_allocated += skb->truesize;
2527*4882a593Smuzhiyun 
2528*4882a593Smuzhiyun 		if (ring->rxd_mode == RXD_MODE_1) {
2529*4882a593Smuzhiyun 			/* 1 buffer mode - normal operation mode */
2530*4882a593Smuzhiyun 			rxdp1 = (struct RxD1 *)rxdp;
2531*4882a593Smuzhiyun 			memset(rxdp, 0, sizeof(struct RxD1));
2532*4882a593Smuzhiyun 			skb_reserve(skb, NET_IP_ALIGN);
2533*4882a593Smuzhiyun 			rxdp1->Buffer0_ptr =
2534*4882a593Smuzhiyun 				dma_map_single(&ring->pdev->dev, skb->data,
2535*4882a593Smuzhiyun 					       size - NET_IP_ALIGN,
2536*4882a593Smuzhiyun 					       DMA_FROM_DEVICE);
2537*4882a593Smuzhiyun 			if (dma_mapping_error(&nic->pdev->dev, rxdp1->Buffer0_ptr))
2538*4882a593Smuzhiyun 				goto pci_map_failed;
2539*4882a593Smuzhiyun 
2540*4882a593Smuzhiyun 			rxdp->Control_2 =
2541*4882a593Smuzhiyun 				SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2542*4882a593Smuzhiyun 			rxdp->Host_Control = (unsigned long)skb;
2543*4882a593Smuzhiyun 		} else if (ring->rxd_mode == RXD_MODE_3B) {
2544*4882a593Smuzhiyun 			/*
2545*4882a593Smuzhiyun 			 * 2 buffer mode -
2546*4882a593Smuzhiyun 			 * 2 buffer mode provides 128
2547*4882a593Smuzhiyun 			 * byte aligned receive buffers.
2548*4882a593Smuzhiyun 			 */
2549*4882a593Smuzhiyun 
2550*4882a593Smuzhiyun 			rxdp3 = (struct RxD3 *)rxdp;
2551*4882a593Smuzhiyun 			/* save buffer pointers to avoid frequent dma mapping */
2552*4882a593Smuzhiyun 			Buffer0_ptr = rxdp3->Buffer0_ptr;
2553*4882a593Smuzhiyun 			Buffer1_ptr = rxdp3->Buffer1_ptr;
2554*4882a593Smuzhiyun 			memset(rxdp, 0, sizeof(struct RxD3));
2555*4882a593Smuzhiyun 			/* restore the buffer pointers for dma sync*/
2556*4882a593Smuzhiyun 			rxdp3->Buffer0_ptr = Buffer0_ptr;
2557*4882a593Smuzhiyun 			rxdp3->Buffer1_ptr = Buffer1_ptr;
2558*4882a593Smuzhiyun 
2559*4882a593Smuzhiyun 			ba = &ring->ba[block_no][off];
2560*4882a593Smuzhiyun 			skb_reserve(skb, BUF0_LEN);
2561*4882a593Smuzhiyun 			tmp = (u64)(unsigned long)skb->data;
2562*4882a593Smuzhiyun 			tmp += ALIGN_SIZE;
2563*4882a593Smuzhiyun 			tmp &= ~ALIGN_SIZE;
2564*4882a593Smuzhiyun 			skb->data = (void *) (unsigned long)tmp;
2565*4882a593Smuzhiyun 			skb_reset_tail_pointer(skb);
2566*4882a593Smuzhiyun 
2567*4882a593Smuzhiyun 			if (from_card_up) {
2568*4882a593Smuzhiyun 				rxdp3->Buffer0_ptr =
2569*4882a593Smuzhiyun 					dma_map_single(&ring->pdev->dev,
2570*4882a593Smuzhiyun 						       ba->ba_0, BUF0_LEN,
2571*4882a593Smuzhiyun 						       DMA_FROM_DEVICE);
2572*4882a593Smuzhiyun 				if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer0_ptr))
2573*4882a593Smuzhiyun 					goto pci_map_failed;
2574*4882a593Smuzhiyun 			} else
2575*4882a593Smuzhiyun 				dma_sync_single_for_device(&ring->pdev->dev,
2576*4882a593Smuzhiyun 							   (dma_addr_t)rxdp3->Buffer0_ptr,
2577*4882a593Smuzhiyun 							   BUF0_LEN,
2578*4882a593Smuzhiyun 							   DMA_FROM_DEVICE);
2579*4882a593Smuzhiyun 
2580*4882a593Smuzhiyun 			rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2581*4882a593Smuzhiyun 			if (ring->rxd_mode == RXD_MODE_3B) {
2582*4882a593Smuzhiyun 				/* Two buffer mode */
2583*4882a593Smuzhiyun 
2584*4882a593Smuzhiyun 				/*
2585*4882a593Smuzhiyun 				 * Buffer2 will have L3/L4 header plus
2586*4882a593Smuzhiyun 				 * L4 payload
2587*4882a593Smuzhiyun 				 */
2588*4882a593Smuzhiyun 				rxdp3->Buffer2_ptr = dma_map_single(&ring->pdev->dev,
2589*4882a593Smuzhiyun 								    skb->data,
2590*4882a593Smuzhiyun 								    ring->mtu + 4,
2591*4882a593Smuzhiyun 								    DMA_FROM_DEVICE);
2592*4882a593Smuzhiyun 
2593*4882a593Smuzhiyun 				if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer2_ptr))
2594*4882a593Smuzhiyun 					goto pci_map_failed;
2595*4882a593Smuzhiyun 
2596*4882a593Smuzhiyun 				if (from_card_up) {
2597*4882a593Smuzhiyun 					rxdp3->Buffer1_ptr =
2598*4882a593Smuzhiyun 						dma_map_single(&ring->pdev->dev,
2599*4882a593Smuzhiyun 							       ba->ba_1,
2600*4882a593Smuzhiyun 							       BUF1_LEN,
2601*4882a593Smuzhiyun 							       DMA_FROM_DEVICE);
2602*4882a593Smuzhiyun 
2603*4882a593Smuzhiyun 					if (dma_mapping_error(&nic->pdev->dev,
2604*4882a593Smuzhiyun 							      rxdp3->Buffer1_ptr)) {
2605*4882a593Smuzhiyun 						dma_unmap_single(&ring->pdev->dev,
2606*4882a593Smuzhiyun 								 (dma_addr_t)(unsigned long)
2607*4882a593Smuzhiyun 								 skb->data,
2608*4882a593Smuzhiyun 								 ring->mtu + 4,
2609*4882a593Smuzhiyun 								 DMA_FROM_DEVICE);
2610*4882a593Smuzhiyun 						goto pci_map_failed;
2611*4882a593Smuzhiyun 					}
2612*4882a593Smuzhiyun 				}
2613*4882a593Smuzhiyun 				rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2614*4882a593Smuzhiyun 				rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2615*4882a593Smuzhiyun 					(ring->mtu + 4);
2616*4882a593Smuzhiyun 			}
2617*4882a593Smuzhiyun 			rxdp->Control_2 |= s2BIT(0);
2618*4882a593Smuzhiyun 			rxdp->Host_Control = (unsigned long) (skb);
2619*4882a593Smuzhiyun 		}
2620*4882a593Smuzhiyun 		if (alloc_tab & ((1 << rxsync_frequency) - 1))
2621*4882a593Smuzhiyun 			rxdp->Control_1 |= RXD_OWN_XENA;
2622*4882a593Smuzhiyun 		off++;
2623*4882a593Smuzhiyun 		if (off == (ring->rxd_count + 1))
2624*4882a593Smuzhiyun 			off = 0;
2625*4882a593Smuzhiyun 		ring->rx_curr_put_info.offset = off;
2626*4882a593Smuzhiyun 
2627*4882a593Smuzhiyun 		rxdp->Control_2 |= SET_RXD_MARKER;
2628*4882a593Smuzhiyun 		if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2629*4882a593Smuzhiyun 			if (first_rxdp) {
2630*4882a593Smuzhiyun 				dma_wmb();
2631*4882a593Smuzhiyun 				first_rxdp->Control_1 |= RXD_OWN_XENA;
2632*4882a593Smuzhiyun 			}
2633*4882a593Smuzhiyun 			first_rxdp = rxdp;
2634*4882a593Smuzhiyun 		}
2635*4882a593Smuzhiyun 		ring->rx_bufs_left += 1;
2636*4882a593Smuzhiyun 		alloc_tab++;
2637*4882a593Smuzhiyun 	}
2638*4882a593Smuzhiyun 
2639*4882a593Smuzhiyun end:
2640*4882a593Smuzhiyun 	/* Transfer ownership of first descriptor to adapter just before
2641*4882a593Smuzhiyun 	 * exiting. Before that, use memory barrier so that ownership
2642*4882a593Smuzhiyun 	 * and other fields are seen by adapter correctly.
2643*4882a593Smuzhiyun 	 */
2644*4882a593Smuzhiyun 	if (first_rxdp) {
2645*4882a593Smuzhiyun 		dma_wmb();
2646*4882a593Smuzhiyun 		first_rxdp->Control_1 |= RXD_OWN_XENA;
2647*4882a593Smuzhiyun 	}
2648*4882a593Smuzhiyun 
2649*4882a593Smuzhiyun 	return SUCCESS;
2650*4882a593Smuzhiyun 
2651*4882a593Smuzhiyun pci_map_failed:
2652*4882a593Smuzhiyun 	swstats->pci_map_fail_cnt++;
2653*4882a593Smuzhiyun 	swstats->mem_freed += skb->truesize;
2654*4882a593Smuzhiyun 	dev_kfree_skb_irq(skb);
2655*4882a593Smuzhiyun 	return -ENOMEM;
2656*4882a593Smuzhiyun }
2657*4882a593Smuzhiyun 
free_rxd_blk(struct s2io_nic * sp,int ring_no,int blk)2658*4882a593Smuzhiyun static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2659*4882a593Smuzhiyun {
2660*4882a593Smuzhiyun 	struct net_device *dev = sp->dev;
2661*4882a593Smuzhiyun 	int j;
2662*4882a593Smuzhiyun 	struct sk_buff *skb;
2663*4882a593Smuzhiyun 	struct RxD_t *rxdp;
2664*4882a593Smuzhiyun 	struct RxD1 *rxdp1;
2665*4882a593Smuzhiyun 	struct RxD3 *rxdp3;
2666*4882a593Smuzhiyun 	struct mac_info *mac_control = &sp->mac_control;
2667*4882a593Smuzhiyun 	struct stat_block *stats = mac_control->stats_info;
2668*4882a593Smuzhiyun 	struct swStat *swstats = &stats->sw_stat;
2669*4882a593Smuzhiyun 
2670*4882a593Smuzhiyun 	for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2671*4882a593Smuzhiyun 		rxdp = mac_control->rings[ring_no].
2672*4882a593Smuzhiyun 			rx_blocks[blk].rxds[j].virt_addr;
2673*4882a593Smuzhiyun 		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2674*4882a593Smuzhiyun 		if (!skb)
2675*4882a593Smuzhiyun 			continue;
2676*4882a593Smuzhiyun 		if (sp->rxd_mode == RXD_MODE_1) {
2677*4882a593Smuzhiyun 			rxdp1 = (struct RxD1 *)rxdp;
2678*4882a593Smuzhiyun 			dma_unmap_single(&sp->pdev->dev,
2679*4882a593Smuzhiyun 					 (dma_addr_t)rxdp1->Buffer0_ptr,
2680*4882a593Smuzhiyun 					 dev->mtu +
2681*4882a593Smuzhiyun 					 HEADER_ETHERNET_II_802_3_SIZE +
2682*4882a593Smuzhiyun 					 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2683*4882a593Smuzhiyun 					 DMA_FROM_DEVICE);
2684*4882a593Smuzhiyun 			memset(rxdp, 0, sizeof(struct RxD1));
2685*4882a593Smuzhiyun 		} else if (sp->rxd_mode == RXD_MODE_3B) {
2686*4882a593Smuzhiyun 			rxdp3 = (struct RxD3 *)rxdp;
2687*4882a593Smuzhiyun 			dma_unmap_single(&sp->pdev->dev,
2688*4882a593Smuzhiyun 					 (dma_addr_t)rxdp3->Buffer0_ptr,
2689*4882a593Smuzhiyun 					 BUF0_LEN, DMA_FROM_DEVICE);
2690*4882a593Smuzhiyun 			dma_unmap_single(&sp->pdev->dev,
2691*4882a593Smuzhiyun 					 (dma_addr_t)rxdp3->Buffer1_ptr,
2692*4882a593Smuzhiyun 					 BUF1_LEN, DMA_FROM_DEVICE);
2693*4882a593Smuzhiyun 			dma_unmap_single(&sp->pdev->dev,
2694*4882a593Smuzhiyun 					 (dma_addr_t)rxdp3->Buffer2_ptr,
2695*4882a593Smuzhiyun 					 dev->mtu + 4, DMA_FROM_DEVICE);
2696*4882a593Smuzhiyun 			memset(rxdp, 0, sizeof(struct RxD3));
2697*4882a593Smuzhiyun 		}
2698*4882a593Smuzhiyun 		swstats->mem_freed += skb->truesize;
2699*4882a593Smuzhiyun 		dev_kfree_skb(skb);
2700*4882a593Smuzhiyun 		mac_control->rings[ring_no].rx_bufs_left -= 1;
2701*4882a593Smuzhiyun 	}
2702*4882a593Smuzhiyun }
2703*4882a593Smuzhiyun 
2704*4882a593Smuzhiyun /**
2705*4882a593Smuzhiyun  *  free_rx_buffers - Frees all Rx buffers
2706*4882a593Smuzhiyun  *  @sp: device private variable.
2707*4882a593Smuzhiyun  *  Description:
2708*4882a593Smuzhiyun  *  This function will free all Rx buffers allocated by host.
2709*4882a593Smuzhiyun  *  Return Value:
2710*4882a593Smuzhiyun  *  NONE.
2711*4882a593Smuzhiyun  */
2712*4882a593Smuzhiyun 
free_rx_buffers(struct s2io_nic * sp)2713*4882a593Smuzhiyun static void free_rx_buffers(struct s2io_nic *sp)
2714*4882a593Smuzhiyun {
2715*4882a593Smuzhiyun 	struct net_device *dev = sp->dev;
2716*4882a593Smuzhiyun 	int i, blk = 0, buf_cnt = 0;
2717*4882a593Smuzhiyun 	struct config_param *config = &sp->config;
2718*4882a593Smuzhiyun 	struct mac_info *mac_control = &sp->mac_control;
2719*4882a593Smuzhiyun 
2720*4882a593Smuzhiyun 	for (i = 0; i < config->rx_ring_num; i++) {
2721*4882a593Smuzhiyun 		struct ring_info *ring = &mac_control->rings[i];
2722*4882a593Smuzhiyun 
2723*4882a593Smuzhiyun 		for (blk = 0; blk < rx_ring_sz[i]; blk++)
2724*4882a593Smuzhiyun 			free_rxd_blk(sp, i, blk);
2725*4882a593Smuzhiyun 
2726*4882a593Smuzhiyun 		ring->rx_curr_put_info.block_index = 0;
2727*4882a593Smuzhiyun 		ring->rx_curr_get_info.block_index = 0;
2728*4882a593Smuzhiyun 		ring->rx_curr_put_info.offset = 0;
2729*4882a593Smuzhiyun 		ring->rx_curr_get_info.offset = 0;
2730*4882a593Smuzhiyun 		ring->rx_bufs_left = 0;
2731*4882a593Smuzhiyun 		DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
2732*4882a593Smuzhiyun 			  dev->name, buf_cnt, i);
2733*4882a593Smuzhiyun 	}
2734*4882a593Smuzhiyun }
2735*4882a593Smuzhiyun 
s2io_chk_rx_buffers(struct s2io_nic * nic,struct ring_info * ring)2736*4882a593Smuzhiyun static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2737*4882a593Smuzhiyun {
2738*4882a593Smuzhiyun 	if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2739*4882a593Smuzhiyun 		DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2740*4882a593Smuzhiyun 			  ring->dev->name);
2741*4882a593Smuzhiyun 	}
2742*4882a593Smuzhiyun 	return 0;
2743*4882a593Smuzhiyun }
2744*4882a593Smuzhiyun 
2745*4882a593Smuzhiyun /**
2746*4882a593Smuzhiyun  * s2io_poll - Rx interrupt handler for NAPI support
2747*4882a593Smuzhiyun  * @napi : pointer to the napi structure.
2748*4882a593Smuzhiyun  * @budget : The number of packets that were budgeted to be processed
2749*4882a593Smuzhiyun  * during  one pass through the 'Poll" function.
2750*4882a593Smuzhiyun  * Description:
2751*4882a593Smuzhiyun  * Comes into picture only if NAPI support has been incorporated. It does
2752*4882a593Smuzhiyun  * the same thing that rx_intr_handler does, but not in a interrupt context
2753*4882a593Smuzhiyun  * also It will process only a given number of packets.
2754*4882a593Smuzhiyun  * Return value:
2755*4882a593Smuzhiyun  * 0 on success and 1 if there are No Rx packets to be processed.
2756*4882a593Smuzhiyun  */
2757*4882a593Smuzhiyun 
s2io_poll_msix(struct napi_struct * napi,int budget)2758*4882a593Smuzhiyun static int s2io_poll_msix(struct napi_struct *napi, int budget)
2759*4882a593Smuzhiyun {
2760*4882a593Smuzhiyun 	struct ring_info *ring = container_of(napi, struct ring_info, napi);
2761*4882a593Smuzhiyun 	struct net_device *dev = ring->dev;
2762*4882a593Smuzhiyun 	int pkts_processed = 0;
2763*4882a593Smuzhiyun 	u8 __iomem *addr = NULL;
2764*4882a593Smuzhiyun 	u8 val8 = 0;
2765*4882a593Smuzhiyun 	struct s2io_nic *nic = netdev_priv(dev);
2766*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2767*4882a593Smuzhiyun 	int budget_org = budget;
2768*4882a593Smuzhiyun 
2769*4882a593Smuzhiyun 	if (unlikely(!is_s2io_card_up(nic)))
2770*4882a593Smuzhiyun 		return 0;
2771*4882a593Smuzhiyun 
2772*4882a593Smuzhiyun 	pkts_processed = rx_intr_handler(ring, budget);
2773*4882a593Smuzhiyun 	s2io_chk_rx_buffers(nic, ring);
2774*4882a593Smuzhiyun 
2775*4882a593Smuzhiyun 	if (pkts_processed < budget_org) {
2776*4882a593Smuzhiyun 		napi_complete_done(napi, pkts_processed);
2777*4882a593Smuzhiyun 		/*Re Enable MSI-Rx Vector*/
2778*4882a593Smuzhiyun 		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2779*4882a593Smuzhiyun 		addr += 7 - ring->ring_no;
2780*4882a593Smuzhiyun 		val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2781*4882a593Smuzhiyun 		writeb(val8, addr);
2782*4882a593Smuzhiyun 		val8 = readb(addr);
2783*4882a593Smuzhiyun 	}
2784*4882a593Smuzhiyun 	return pkts_processed;
2785*4882a593Smuzhiyun }
2786*4882a593Smuzhiyun 
s2io_poll_inta(struct napi_struct * napi,int budget)2787*4882a593Smuzhiyun static int s2io_poll_inta(struct napi_struct *napi, int budget)
2788*4882a593Smuzhiyun {
2789*4882a593Smuzhiyun 	struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2790*4882a593Smuzhiyun 	int pkts_processed = 0;
2791*4882a593Smuzhiyun 	int ring_pkts_processed, i;
2792*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2793*4882a593Smuzhiyun 	int budget_org = budget;
2794*4882a593Smuzhiyun 	struct config_param *config = &nic->config;
2795*4882a593Smuzhiyun 	struct mac_info *mac_control = &nic->mac_control;
2796*4882a593Smuzhiyun 
2797*4882a593Smuzhiyun 	if (unlikely(!is_s2io_card_up(nic)))
2798*4882a593Smuzhiyun 		return 0;
2799*4882a593Smuzhiyun 
2800*4882a593Smuzhiyun 	for (i = 0; i < config->rx_ring_num; i++) {
2801*4882a593Smuzhiyun 		struct ring_info *ring = &mac_control->rings[i];
2802*4882a593Smuzhiyun 		ring_pkts_processed = rx_intr_handler(ring, budget);
2803*4882a593Smuzhiyun 		s2io_chk_rx_buffers(nic, ring);
2804*4882a593Smuzhiyun 		pkts_processed += ring_pkts_processed;
2805*4882a593Smuzhiyun 		budget -= ring_pkts_processed;
2806*4882a593Smuzhiyun 		if (budget <= 0)
2807*4882a593Smuzhiyun 			break;
2808*4882a593Smuzhiyun 	}
2809*4882a593Smuzhiyun 	if (pkts_processed < budget_org) {
2810*4882a593Smuzhiyun 		napi_complete_done(napi, pkts_processed);
2811*4882a593Smuzhiyun 		/* Re enable the Rx interrupts for the ring */
2812*4882a593Smuzhiyun 		writeq(0, &bar0->rx_traffic_mask);
2813*4882a593Smuzhiyun 		readl(&bar0->rx_traffic_mask);
2814*4882a593Smuzhiyun 	}
2815*4882a593Smuzhiyun 	return pkts_processed;
2816*4882a593Smuzhiyun }
2817*4882a593Smuzhiyun 
2818*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
2819*4882a593Smuzhiyun /**
2820*4882a593Smuzhiyun  * s2io_netpoll - netpoll event handler entry point
2821*4882a593Smuzhiyun  * @dev : pointer to the device structure.
2822*4882a593Smuzhiyun  * Description:
2823*4882a593Smuzhiyun  * 	This function will be called by upper layer to check for events on the
2824*4882a593Smuzhiyun  * interface in situations where interrupts are disabled. It is used for
2825*4882a593Smuzhiyun  * specific in-kernel networking tasks, such as remote consoles and kernel
2826*4882a593Smuzhiyun  * debugging over the network (example netdump in RedHat).
2827*4882a593Smuzhiyun  */
s2io_netpoll(struct net_device * dev)2828*4882a593Smuzhiyun static void s2io_netpoll(struct net_device *dev)
2829*4882a593Smuzhiyun {
2830*4882a593Smuzhiyun 	struct s2io_nic *nic = netdev_priv(dev);
2831*4882a593Smuzhiyun 	const int irq = nic->pdev->irq;
2832*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2833*4882a593Smuzhiyun 	u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2834*4882a593Smuzhiyun 	int i;
2835*4882a593Smuzhiyun 	struct config_param *config = &nic->config;
2836*4882a593Smuzhiyun 	struct mac_info *mac_control = &nic->mac_control;
2837*4882a593Smuzhiyun 
2838*4882a593Smuzhiyun 	if (pci_channel_offline(nic->pdev))
2839*4882a593Smuzhiyun 		return;
2840*4882a593Smuzhiyun 
2841*4882a593Smuzhiyun 	disable_irq(irq);
2842*4882a593Smuzhiyun 
2843*4882a593Smuzhiyun 	writeq(val64, &bar0->rx_traffic_int);
2844*4882a593Smuzhiyun 	writeq(val64, &bar0->tx_traffic_int);
2845*4882a593Smuzhiyun 
2846*4882a593Smuzhiyun 	/* we need to free up the transmitted skbufs or else netpoll will
2847*4882a593Smuzhiyun 	 * run out of skbs and will fail and eventually netpoll application such
2848*4882a593Smuzhiyun 	 * as netdump will fail.
2849*4882a593Smuzhiyun 	 */
2850*4882a593Smuzhiyun 	for (i = 0; i < config->tx_fifo_num; i++)
2851*4882a593Smuzhiyun 		tx_intr_handler(&mac_control->fifos[i]);
2852*4882a593Smuzhiyun 
2853*4882a593Smuzhiyun 	/* check for received packet and indicate up to network */
2854*4882a593Smuzhiyun 	for (i = 0; i < config->rx_ring_num; i++) {
2855*4882a593Smuzhiyun 		struct ring_info *ring = &mac_control->rings[i];
2856*4882a593Smuzhiyun 
2857*4882a593Smuzhiyun 		rx_intr_handler(ring, 0);
2858*4882a593Smuzhiyun 	}
2859*4882a593Smuzhiyun 
2860*4882a593Smuzhiyun 	for (i = 0; i < config->rx_ring_num; i++) {
2861*4882a593Smuzhiyun 		struct ring_info *ring = &mac_control->rings[i];
2862*4882a593Smuzhiyun 
2863*4882a593Smuzhiyun 		if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2864*4882a593Smuzhiyun 			DBG_PRINT(INFO_DBG,
2865*4882a593Smuzhiyun 				  "%s: Out of memory in Rx Netpoll!!\n",
2866*4882a593Smuzhiyun 				  dev->name);
2867*4882a593Smuzhiyun 			break;
2868*4882a593Smuzhiyun 		}
2869*4882a593Smuzhiyun 	}
2870*4882a593Smuzhiyun 	enable_irq(irq);
2871*4882a593Smuzhiyun }
2872*4882a593Smuzhiyun #endif
2873*4882a593Smuzhiyun 
2874*4882a593Smuzhiyun /**
2875*4882a593Smuzhiyun  *  rx_intr_handler - Rx interrupt handler
2876*4882a593Smuzhiyun  *  @ring_data: per ring structure.
2877*4882a593Smuzhiyun  *  @budget: budget for napi processing.
2878*4882a593Smuzhiyun  *  Description:
2879*4882a593Smuzhiyun  *  If the interrupt is because of a received frame or if the
2880*4882a593Smuzhiyun  *  receive ring contains fresh as yet un-processed frames,this function is
2881*4882a593Smuzhiyun  *  called. It picks out the RxD at which place the last Rx processing had
2882*4882a593Smuzhiyun  *  stopped and sends the skb to the OSM's Rx handler and then increments
2883*4882a593Smuzhiyun  *  the offset.
2884*4882a593Smuzhiyun  *  Return Value:
2885*4882a593Smuzhiyun  *  No. of napi packets processed.
2886*4882a593Smuzhiyun  */
rx_intr_handler(struct ring_info * ring_data,int budget)2887*4882a593Smuzhiyun static int rx_intr_handler(struct ring_info *ring_data, int budget)
2888*4882a593Smuzhiyun {
2889*4882a593Smuzhiyun 	int get_block, put_block;
2890*4882a593Smuzhiyun 	struct rx_curr_get_info get_info, put_info;
2891*4882a593Smuzhiyun 	struct RxD_t *rxdp;
2892*4882a593Smuzhiyun 	struct sk_buff *skb;
2893*4882a593Smuzhiyun 	int pkt_cnt = 0, napi_pkts = 0;
2894*4882a593Smuzhiyun 	int i;
2895*4882a593Smuzhiyun 	struct RxD1 *rxdp1;
2896*4882a593Smuzhiyun 	struct RxD3 *rxdp3;
2897*4882a593Smuzhiyun 
2898*4882a593Smuzhiyun 	if (budget <= 0)
2899*4882a593Smuzhiyun 		return napi_pkts;
2900*4882a593Smuzhiyun 
2901*4882a593Smuzhiyun 	get_info = ring_data->rx_curr_get_info;
2902*4882a593Smuzhiyun 	get_block = get_info.block_index;
2903*4882a593Smuzhiyun 	memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2904*4882a593Smuzhiyun 	put_block = put_info.block_index;
2905*4882a593Smuzhiyun 	rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2906*4882a593Smuzhiyun 
2907*4882a593Smuzhiyun 	while (RXD_IS_UP2DT(rxdp)) {
2908*4882a593Smuzhiyun 		/*
2909*4882a593Smuzhiyun 		 * If your are next to put index then it's
2910*4882a593Smuzhiyun 		 * FIFO full condition
2911*4882a593Smuzhiyun 		 */
2912*4882a593Smuzhiyun 		if ((get_block == put_block) &&
2913*4882a593Smuzhiyun 		    (get_info.offset + 1) == put_info.offset) {
2914*4882a593Smuzhiyun 			DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2915*4882a593Smuzhiyun 				  ring_data->dev->name);
2916*4882a593Smuzhiyun 			break;
2917*4882a593Smuzhiyun 		}
2918*4882a593Smuzhiyun 		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2919*4882a593Smuzhiyun 		if (skb == NULL) {
2920*4882a593Smuzhiyun 			DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
2921*4882a593Smuzhiyun 				  ring_data->dev->name);
2922*4882a593Smuzhiyun 			return 0;
2923*4882a593Smuzhiyun 		}
2924*4882a593Smuzhiyun 		if (ring_data->rxd_mode == RXD_MODE_1) {
2925*4882a593Smuzhiyun 			rxdp1 = (struct RxD1 *)rxdp;
2926*4882a593Smuzhiyun 			dma_unmap_single(&ring_data->pdev->dev,
2927*4882a593Smuzhiyun 					 (dma_addr_t)rxdp1->Buffer0_ptr,
2928*4882a593Smuzhiyun 					 ring_data->mtu +
2929*4882a593Smuzhiyun 					 HEADER_ETHERNET_II_802_3_SIZE +
2930*4882a593Smuzhiyun 					 HEADER_802_2_SIZE +
2931*4882a593Smuzhiyun 					 HEADER_SNAP_SIZE,
2932*4882a593Smuzhiyun 					 DMA_FROM_DEVICE);
2933*4882a593Smuzhiyun 		} else if (ring_data->rxd_mode == RXD_MODE_3B) {
2934*4882a593Smuzhiyun 			rxdp3 = (struct RxD3 *)rxdp;
2935*4882a593Smuzhiyun 			dma_sync_single_for_cpu(&ring_data->pdev->dev,
2936*4882a593Smuzhiyun 						(dma_addr_t)rxdp3->Buffer0_ptr,
2937*4882a593Smuzhiyun 						BUF0_LEN, DMA_FROM_DEVICE);
2938*4882a593Smuzhiyun 			dma_unmap_single(&ring_data->pdev->dev,
2939*4882a593Smuzhiyun 					 (dma_addr_t)rxdp3->Buffer2_ptr,
2940*4882a593Smuzhiyun 					 ring_data->mtu + 4, DMA_FROM_DEVICE);
2941*4882a593Smuzhiyun 		}
2942*4882a593Smuzhiyun 		prefetch(skb->data);
2943*4882a593Smuzhiyun 		rx_osm_handler(ring_data, rxdp);
2944*4882a593Smuzhiyun 		get_info.offset++;
2945*4882a593Smuzhiyun 		ring_data->rx_curr_get_info.offset = get_info.offset;
2946*4882a593Smuzhiyun 		rxdp = ring_data->rx_blocks[get_block].
2947*4882a593Smuzhiyun 			rxds[get_info.offset].virt_addr;
2948*4882a593Smuzhiyun 		if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
2949*4882a593Smuzhiyun 			get_info.offset = 0;
2950*4882a593Smuzhiyun 			ring_data->rx_curr_get_info.offset = get_info.offset;
2951*4882a593Smuzhiyun 			get_block++;
2952*4882a593Smuzhiyun 			if (get_block == ring_data->block_count)
2953*4882a593Smuzhiyun 				get_block = 0;
2954*4882a593Smuzhiyun 			ring_data->rx_curr_get_info.block_index = get_block;
2955*4882a593Smuzhiyun 			rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2956*4882a593Smuzhiyun 		}
2957*4882a593Smuzhiyun 
2958*4882a593Smuzhiyun 		if (ring_data->nic->config.napi) {
2959*4882a593Smuzhiyun 			budget--;
2960*4882a593Smuzhiyun 			napi_pkts++;
2961*4882a593Smuzhiyun 			if (!budget)
2962*4882a593Smuzhiyun 				break;
2963*4882a593Smuzhiyun 		}
2964*4882a593Smuzhiyun 		pkt_cnt++;
2965*4882a593Smuzhiyun 		if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2966*4882a593Smuzhiyun 			break;
2967*4882a593Smuzhiyun 	}
2968*4882a593Smuzhiyun 	if (ring_data->lro) {
2969*4882a593Smuzhiyun 		/* Clear all LRO sessions before exiting */
2970*4882a593Smuzhiyun 		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
2971*4882a593Smuzhiyun 			struct lro *lro = &ring_data->lro0_n[i];
2972*4882a593Smuzhiyun 			if (lro->in_use) {
2973*4882a593Smuzhiyun 				update_L3L4_header(ring_data->nic, lro);
2974*4882a593Smuzhiyun 				queue_rx_frame(lro->parent, lro->vlan_tag);
2975*4882a593Smuzhiyun 				clear_lro_session(lro);
2976*4882a593Smuzhiyun 			}
2977*4882a593Smuzhiyun 		}
2978*4882a593Smuzhiyun 	}
2979*4882a593Smuzhiyun 	return napi_pkts;
2980*4882a593Smuzhiyun }
2981*4882a593Smuzhiyun 
2982*4882a593Smuzhiyun /**
2983*4882a593Smuzhiyun  *  tx_intr_handler - Transmit interrupt handler
2984*4882a593Smuzhiyun  *  @fifo_data : fifo data pointer
2985*4882a593Smuzhiyun  *  Description:
2986*4882a593Smuzhiyun  *  If an interrupt was raised to indicate DMA complete of the
2987*4882a593Smuzhiyun  *  Tx packet, this function is called. It identifies the last TxD
2988*4882a593Smuzhiyun  *  whose buffer was freed and frees all skbs whose data have already
2989*4882a593Smuzhiyun  *  DMA'ed into the NICs internal memory.
2990*4882a593Smuzhiyun  *  Return Value:
2991*4882a593Smuzhiyun  *  NONE
2992*4882a593Smuzhiyun  */
2993*4882a593Smuzhiyun 
tx_intr_handler(struct fifo_info * fifo_data)2994*4882a593Smuzhiyun static void tx_intr_handler(struct fifo_info *fifo_data)
2995*4882a593Smuzhiyun {
2996*4882a593Smuzhiyun 	struct s2io_nic *nic = fifo_data->nic;
2997*4882a593Smuzhiyun 	struct tx_curr_get_info get_info, put_info;
2998*4882a593Smuzhiyun 	struct sk_buff *skb = NULL;
2999*4882a593Smuzhiyun 	struct TxD *txdlp;
3000*4882a593Smuzhiyun 	int pkt_cnt = 0;
3001*4882a593Smuzhiyun 	unsigned long flags = 0;
3002*4882a593Smuzhiyun 	u8 err_mask;
3003*4882a593Smuzhiyun 	struct stat_block *stats = nic->mac_control.stats_info;
3004*4882a593Smuzhiyun 	struct swStat *swstats = &stats->sw_stat;
3005*4882a593Smuzhiyun 
3006*4882a593Smuzhiyun 	if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3007*4882a593Smuzhiyun 		return;
3008*4882a593Smuzhiyun 
3009*4882a593Smuzhiyun 	get_info = fifo_data->tx_curr_get_info;
3010*4882a593Smuzhiyun 	memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3011*4882a593Smuzhiyun 	txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3012*4882a593Smuzhiyun 	while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3013*4882a593Smuzhiyun 	       (get_info.offset != put_info.offset) &&
3014*4882a593Smuzhiyun 	       (txdlp->Host_Control)) {
3015*4882a593Smuzhiyun 		/* Check for TxD errors */
3016*4882a593Smuzhiyun 		if (txdlp->Control_1 & TXD_T_CODE) {
3017*4882a593Smuzhiyun 			unsigned long long err;
3018*4882a593Smuzhiyun 			err = txdlp->Control_1 & TXD_T_CODE;
3019*4882a593Smuzhiyun 			if (err & 0x1) {
3020*4882a593Smuzhiyun 				swstats->parity_err_cnt++;
3021*4882a593Smuzhiyun 			}
3022*4882a593Smuzhiyun 
3023*4882a593Smuzhiyun 			/* update t_code statistics */
3024*4882a593Smuzhiyun 			err_mask = err >> 48;
3025*4882a593Smuzhiyun 			switch (err_mask) {
3026*4882a593Smuzhiyun 			case 2:
3027*4882a593Smuzhiyun 				swstats->tx_buf_abort_cnt++;
3028*4882a593Smuzhiyun 				break;
3029*4882a593Smuzhiyun 
3030*4882a593Smuzhiyun 			case 3:
3031*4882a593Smuzhiyun 				swstats->tx_desc_abort_cnt++;
3032*4882a593Smuzhiyun 				break;
3033*4882a593Smuzhiyun 
3034*4882a593Smuzhiyun 			case 7:
3035*4882a593Smuzhiyun 				swstats->tx_parity_err_cnt++;
3036*4882a593Smuzhiyun 				break;
3037*4882a593Smuzhiyun 
3038*4882a593Smuzhiyun 			case 10:
3039*4882a593Smuzhiyun 				swstats->tx_link_loss_cnt++;
3040*4882a593Smuzhiyun 				break;
3041*4882a593Smuzhiyun 
3042*4882a593Smuzhiyun 			case 15:
3043*4882a593Smuzhiyun 				swstats->tx_list_proc_err_cnt++;
3044*4882a593Smuzhiyun 				break;
3045*4882a593Smuzhiyun 			}
3046*4882a593Smuzhiyun 		}
3047*4882a593Smuzhiyun 
3048*4882a593Smuzhiyun 		skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3049*4882a593Smuzhiyun 		if (skb == NULL) {
3050*4882a593Smuzhiyun 			spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3051*4882a593Smuzhiyun 			DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3052*4882a593Smuzhiyun 				  __func__);
3053*4882a593Smuzhiyun 			return;
3054*4882a593Smuzhiyun 		}
3055*4882a593Smuzhiyun 		pkt_cnt++;
3056*4882a593Smuzhiyun 
3057*4882a593Smuzhiyun 		/* Updating the statistics block */
3058*4882a593Smuzhiyun 		swstats->mem_freed += skb->truesize;
3059*4882a593Smuzhiyun 		dev_consume_skb_irq(skb);
3060*4882a593Smuzhiyun 
3061*4882a593Smuzhiyun 		get_info.offset++;
3062*4882a593Smuzhiyun 		if (get_info.offset == get_info.fifo_len + 1)
3063*4882a593Smuzhiyun 			get_info.offset = 0;
3064*4882a593Smuzhiyun 		txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3065*4882a593Smuzhiyun 		fifo_data->tx_curr_get_info.offset = get_info.offset;
3066*4882a593Smuzhiyun 	}
3067*4882a593Smuzhiyun 
3068*4882a593Smuzhiyun 	s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3069*4882a593Smuzhiyun 
3070*4882a593Smuzhiyun 	spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3071*4882a593Smuzhiyun }
3072*4882a593Smuzhiyun 
3073*4882a593Smuzhiyun /**
3074*4882a593Smuzhiyun  *  s2io_mdio_write - Function to write in to MDIO registers
3075*4882a593Smuzhiyun  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3076*4882a593Smuzhiyun  *  @addr     : address value
3077*4882a593Smuzhiyun  *  @value    : data value
3078*4882a593Smuzhiyun  *  @dev      : pointer to net_device structure
3079*4882a593Smuzhiyun  *  Description:
3080*4882a593Smuzhiyun  *  This function is used to write values to the MDIO registers
3081*4882a593Smuzhiyun  *  NONE
3082*4882a593Smuzhiyun  */
s2io_mdio_write(u32 mmd_type,u64 addr,u16 value,struct net_device * dev)3083*4882a593Smuzhiyun static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3084*4882a593Smuzhiyun 			    struct net_device *dev)
3085*4882a593Smuzhiyun {
3086*4882a593Smuzhiyun 	u64 val64;
3087*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
3088*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3089*4882a593Smuzhiyun 
3090*4882a593Smuzhiyun 	/* address transaction */
3091*4882a593Smuzhiyun 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3092*4882a593Smuzhiyun 		MDIO_MMD_DEV_ADDR(mmd_type) |
3093*4882a593Smuzhiyun 		MDIO_MMS_PRT_ADDR(0x0);
3094*4882a593Smuzhiyun 	writeq(val64, &bar0->mdio_control);
3095*4882a593Smuzhiyun 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3096*4882a593Smuzhiyun 	writeq(val64, &bar0->mdio_control);
3097*4882a593Smuzhiyun 	udelay(100);
3098*4882a593Smuzhiyun 
3099*4882a593Smuzhiyun 	/* Data transaction */
3100*4882a593Smuzhiyun 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3101*4882a593Smuzhiyun 		MDIO_MMD_DEV_ADDR(mmd_type) |
3102*4882a593Smuzhiyun 		MDIO_MMS_PRT_ADDR(0x0) |
3103*4882a593Smuzhiyun 		MDIO_MDIO_DATA(value) |
3104*4882a593Smuzhiyun 		MDIO_OP(MDIO_OP_WRITE_TRANS);
3105*4882a593Smuzhiyun 	writeq(val64, &bar0->mdio_control);
3106*4882a593Smuzhiyun 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3107*4882a593Smuzhiyun 	writeq(val64, &bar0->mdio_control);
3108*4882a593Smuzhiyun 	udelay(100);
3109*4882a593Smuzhiyun 
3110*4882a593Smuzhiyun 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3111*4882a593Smuzhiyun 		MDIO_MMD_DEV_ADDR(mmd_type) |
3112*4882a593Smuzhiyun 		MDIO_MMS_PRT_ADDR(0x0) |
3113*4882a593Smuzhiyun 		MDIO_OP(MDIO_OP_READ_TRANS);
3114*4882a593Smuzhiyun 	writeq(val64, &bar0->mdio_control);
3115*4882a593Smuzhiyun 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3116*4882a593Smuzhiyun 	writeq(val64, &bar0->mdio_control);
3117*4882a593Smuzhiyun 	udelay(100);
3118*4882a593Smuzhiyun }
3119*4882a593Smuzhiyun 
3120*4882a593Smuzhiyun /**
3121*4882a593Smuzhiyun  *  s2io_mdio_read - Function to write in to MDIO registers
3122*4882a593Smuzhiyun  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3123*4882a593Smuzhiyun  *  @addr     : address value
3124*4882a593Smuzhiyun  *  @dev      : pointer to net_device structure
3125*4882a593Smuzhiyun  *  Description:
3126*4882a593Smuzhiyun  *  This function is used to read values to the MDIO registers
3127*4882a593Smuzhiyun  *  NONE
3128*4882a593Smuzhiyun  */
s2io_mdio_read(u32 mmd_type,u64 addr,struct net_device * dev)3129*4882a593Smuzhiyun static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3130*4882a593Smuzhiyun {
3131*4882a593Smuzhiyun 	u64 val64 = 0x0;
3132*4882a593Smuzhiyun 	u64 rval64 = 0x0;
3133*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
3134*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3135*4882a593Smuzhiyun 
3136*4882a593Smuzhiyun 	/* address transaction */
3137*4882a593Smuzhiyun 	val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3138*4882a593Smuzhiyun 			 | MDIO_MMD_DEV_ADDR(mmd_type)
3139*4882a593Smuzhiyun 			 | MDIO_MMS_PRT_ADDR(0x0));
3140*4882a593Smuzhiyun 	writeq(val64, &bar0->mdio_control);
3141*4882a593Smuzhiyun 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3142*4882a593Smuzhiyun 	writeq(val64, &bar0->mdio_control);
3143*4882a593Smuzhiyun 	udelay(100);
3144*4882a593Smuzhiyun 
3145*4882a593Smuzhiyun 	/* Data transaction */
3146*4882a593Smuzhiyun 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3147*4882a593Smuzhiyun 		MDIO_MMD_DEV_ADDR(mmd_type) |
3148*4882a593Smuzhiyun 		MDIO_MMS_PRT_ADDR(0x0) |
3149*4882a593Smuzhiyun 		MDIO_OP(MDIO_OP_READ_TRANS);
3150*4882a593Smuzhiyun 	writeq(val64, &bar0->mdio_control);
3151*4882a593Smuzhiyun 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3152*4882a593Smuzhiyun 	writeq(val64, &bar0->mdio_control);
3153*4882a593Smuzhiyun 	udelay(100);
3154*4882a593Smuzhiyun 
3155*4882a593Smuzhiyun 	/* Read the value from regs */
3156*4882a593Smuzhiyun 	rval64 = readq(&bar0->mdio_control);
3157*4882a593Smuzhiyun 	rval64 = rval64 & 0xFFFF0000;
3158*4882a593Smuzhiyun 	rval64 = rval64 >> 16;
3159*4882a593Smuzhiyun 	return rval64;
3160*4882a593Smuzhiyun }
3161*4882a593Smuzhiyun 
3162*4882a593Smuzhiyun /**
3163*4882a593Smuzhiyun  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3164*4882a593Smuzhiyun  *  @counter      : counter value to be updated
3165*4882a593Smuzhiyun  *  @regs_stat    : registers status
3166*4882a593Smuzhiyun  *  @index        : index
3167*4882a593Smuzhiyun  *  @flag         : flag to indicate the status
3168*4882a593Smuzhiyun  *  @type         : counter type
3169*4882a593Smuzhiyun  *  Description:
3170*4882a593Smuzhiyun  *  This function is to check the status of the xpak counters value
3171*4882a593Smuzhiyun  *  NONE
3172*4882a593Smuzhiyun  */
3173*4882a593Smuzhiyun 
s2io_chk_xpak_counter(u64 * counter,u64 * regs_stat,u32 index,u16 flag,u16 type)3174*4882a593Smuzhiyun static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3175*4882a593Smuzhiyun 				  u16 flag, u16 type)
3176*4882a593Smuzhiyun {
3177*4882a593Smuzhiyun 	u64 mask = 0x3;
3178*4882a593Smuzhiyun 	u64 val64;
3179*4882a593Smuzhiyun 	int i;
3180*4882a593Smuzhiyun 	for (i = 0; i < index; i++)
3181*4882a593Smuzhiyun 		mask = mask << 0x2;
3182*4882a593Smuzhiyun 
3183*4882a593Smuzhiyun 	if (flag > 0) {
3184*4882a593Smuzhiyun 		*counter = *counter + 1;
3185*4882a593Smuzhiyun 		val64 = *regs_stat & mask;
3186*4882a593Smuzhiyun 		val64 = val64 >> (index * 0x2);
3187*4882a593Smuzhiyun 		val64 = val64 + 1;
3188*4882a593Smuzhiyun 		if (val64 == 3) {
3189*4882a593Smuzhiyun 			switch (type) {
3190*4882a593Smuzhiyun 			case 1:
3191*4882a593Smuzhiyun 				DBG_PRINT(ERR_DBG,
3192*4882a593Smuzhiyun 					  "Take Xframe NIC out of service.\n");
3193*4882a593Smuzhiyun 				DBG_PRINT(ERR_DBG,
3194*4882a593Smuzhiyun "Excessive temperatures may result in premature transceiver failure.\n");
3195*4882a593Smuzhiyun 				break;
3196*4882a593Smuzhiyun 			case 2:
3197*4882a593Smuzhiyun 				DBG_PRINT(ERR_DBG,
3198*4882a593Smuzhiyun 					  "Take Xframe NIC out of service.\n");
3199*4882a593Smuzhiyun 				DBG_PRINT(ERR_DBG,
3200*4882a593Smuzhiyun "Excessive bias currents may indicate imminent laser diode failure.\n");
3201*4882a593Smuzhiyun 				break;
3202*4882a593Smuzhiyun 			case 3:
3203*4882a593Smuzhiyun 				DBG_PRINT(ERR_DBG,
3204*4882a593Smuzhiyun 					  "Take Xframe NIC out of service.\n");
3205*4882a593Smuzhiyun 				DBG_PRINT(ERR_DBG,
3206*4882a593Smuzhiyun "Excessive laser output power may saturate far-end receiver.\n");
3207*4882a593Smuzhiyun 				break;
3208*4882a593Smuzhiyun 			default:
3209*4882a593Smuzhiyun 				DBG_PRINT(ERR_DBG,
3210*4882a593Smuzhiyun 					  "Incorrect XPAK Alarm type\n");
3211*4882a593Smuzhiyun 			}
3212*4882a593Smuzhiyun 			val64 = 0x0;
3213*4882a593Smuzhiyun 		}
3214*4882a593Smuzhiyun 		val64 = val64 << (index * 0x2);
3215*4882a593Smuzhiyun 		*regs_stat = (*regs_stat & (~mask)) | (val64);
3216*4882a593Smuzhiyun 
3217*4882a593Smuzhiyun 	} else {
3218*4882a593Smuzhiyun 		*regs_stat = *regs_stat & (~mask);
3219*4882a593Smuzhiyun 	}
3220*4882a593Smuzhiyun }
3221*4882a593Smuzhiyun 
3222*4882a593Smuzhiyun /**
3223*4882a593Smuzhiyun  *  s2io_updt_xpak_counter - Function to update the xpak counters
3224*4882a593Smuzhiyun  *  @dev         : pointer to net_device struct
3225*4882a593Smuzhiyun  *  Description:
3226*4882a593Smuzhiyun  *  This function is to upate the status of the xpak counters value
3227*4882a593Smuzhiyun  *  NONE
3228*4882a593Smuzhiyun  */
s2io_updt_xpak_counter(struct net_device * dev)3229*4882a593Smuzhiyun static void s2io_updt_xpak_counter(struct net_device *dev)
3230*4882a593Smuzhiyun {
3231*4882a593Smuzhiyun 	u16 flag  = 0x0;
3232*4882a593Smuzhiyun 	u16 type  = 0x0;
3233*4882a593Smuzhiyun 	u16 val16 = 0x0;
3234*4882a593Smuzhiyun 	u64 val64 = 0x0;
3235*4882a593Smuzhiyun 	u64 addr  = 0x0;
3236*4882a593Smuzhiyun 
3237*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
3238*4882a593Smuzhiyun 	struct stat_block *stats = sp->mac_control.stats_info;
3239*4882a593Smuzhiyun 	struct xpakStat *xstats = &stats->xpak_stat;
3240*4882a593Smuzhiyun 
3241*4882a593Smuzhiyun 	/* Check the communication with the MDIO slave */
3242*4882a593Smuzhiyun 	addr = MDIO_CTRL1;
3243*4882a593Smuzhiyun 	val64 = 0x0;
3244*4882a593Smuzhiyun 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3245*4882a593Smuzhiyun 	if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3246*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG,
3247*4882a593Smuzhiyun 			  "ERR: MDIO slave access failed - Returned %llx\n",
3248*4882a593Smuzhiyun 			  (unsigned long long)val64);
3249*4882a593Smuzhiyun 		return;
3250*4882a593Smuzhiyun 	}
3251*4882a593Smuzhiyun 
3252*4882a593Smuzhiyun 	/* Check for the expected value of control reg 1 */
3253*4882a593Smuzhiyun 	if (val64 != MDIO_CTRL1_SPEED10G) {
3254*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3255*4882a593Smuzhiyun 			  "Returned: %llx- Expected: 0x%x\n",
3256*4882a593Smuzhiyun 			  (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3257*4882a593Smuzhiyun 		return;
3258*4882a593Smuzhiyun 	}
3259*4882a593Smuzhiyun 
3260*4882a593Smuzhiyun 	/* Loading the DOM register to MDIO register */
3261*4882a593Smuzhiyun 	addr = 0xA100;
3262*4882a593Smuzhiyun 	s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3263*4882a593Smuzhiyun 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3264*4882a593Smuzhiyun 
3265*4882a593Smuzhiyun 	/* Reading the Alarm flags */
3266*4882a593Smuzhiyun 	addr = 0xA070;
3267*4882a593Smuzhiyun 	val64 = 0x0;
3268*4882a593Smuzhiyun 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3269*4882a593Smuzhiyun 
3270*4882a593Smuzhiyun 	flag = CHECKBIT(val64, 0x7);
3271*4882a593Smuzhiyun 	type = 1;
3272*4882a593Smuzhiyun 	s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3273*4882a593Smuzhiyun 			      &xstats->xpak_regs_stat,
3274*4882a593Smuzhiyun 			      0x0, flag, type);
3275*4882a593Smuzhiyun 
3276*4882a593Smuzhiyun 	if (CHECKBIT(val64, 0x6))
3277*4882a593Smuzhiyun 		xstats->alarm_transceiver_temp_low++;
3278*4882a593Smuzhiyun 
3279*4882a593Smuzhiyun 	flag = CHECKBIT(val64, 0x3);
3280*4882a593Smuzhiyun 	type = 2;
3281*4882a593Smuzhiyun 	s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3282*4882a593Smuzhiyun 			      &xstats->xpak_regs_stat,
3283*4882a593Smuzhiyun 			      0x2, flag, type);
3284*4882a593Smuzhiyun 
3285*4882a593Smuzhiyun 	if (CHECKBIT(val64, 0x2))
3286*4882a593Smuzhiyun 		xstats->alarm_laser_bias_current_low++;
3287*4882a593Smuzhiyun 
3288*4882a593Smuzhiyun 	flag = CHECKBIT(val64, 0x1);
3289*4882a593Smuzhiyun 	type = 3;
3290*4882a593Smuzhiyun 	s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3291*4882a593Smuzhiyun 			      &xstats->xpak_regs_stat,
3292*4882a593Smuzhiyun 			      0x4, flag, type);
3293*4882a593Smuzhiyun 
3294*4882a593Smuzhiyun 	if (CHECKBIT(val64, 0x0))
3295*4882a593Smuzhiyun 		xstats->alarm_laser_output_power_low++;
3296*4882a593Smuzhiyun 
3297*4882a593Smuzhiyun 	/* Reading the Warning flags */
3298*4882a593Smuzhiyun 	addr = 0xA074;
3299*4882a593Smuzhiyun 	val64 = 0x0;
3300*4882a593Smuzhiyun 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3301*4882a593Smuzhiyun 
3302*4882a593Smuzhiyun 	if (CHECKBIT(val64, 0x7))
3303*4882a593Smuzhiyun 		xstats->warn_transceiver_temp_high++;
3304*4882a593Smuzhiyun 
3305*4882a593Smuzhiyun 	if (CHECKBIT(val64, 0x6))
3306*4882a593Smuzhiyun 		xstats->warn_transceiver_temp_low++;
3307*4882a593Smuzhiyun 
3308*4882a593Smuzhiyun 	if (CHECKBIT(val64, 0x3))
3309*4882a593Smuzhiyun 		xstats->warn_laser_bias_current_high++;
3310*4882a593Smuzhiyun 
3311*4882a593Smuzhiyun 	if (CHECKBIT(val64, 0x2))
3312*4882a593Smuzhiyun 		xstats->warn_laser_bias_current_low++;
3313*4882a593Smuzhiyun 
3314*4882a593Smuzhiyun 	if (CHECKBIT(val64, 0x1))
3315*4882a593Smuzhiyun 		xstats->warn_laser_output_power_high++;
3316*4882a593Smuzhiyun 
3317*4882a593Smuzhiyun 	if (CHECKBIT(val64, 0x0))
3318*4882a593Smuzhiyun 		xstats->warn_laser_output_power_low++;
3319*4882a593Smuzhiyun }
3320*4882a593Smuzhiyun 
3321*4882a593Smuzhiyun /**
3322*4882a593Smuzhiyun  *  wait_for_cmd_complete - waits for a command to complete.
3323*4882a593Smuzhiyun  *  @addr: address
3324*4882a593Smuzhiyun  *  @busy_bit: bit to check for busy
3325*4882a593Smuzhiyun  *  @bit_state: state to check
3326*4882a593Smuzhiyun  *  Description: Function that waits for a command to Write into RMAC
3327*4882a593Smuzhiyun  *  ADDR DATA registers to be completed and returns either success or
3328*4882a593Smuzhiyun  *  error depending on whether the command was complete or not.
3329*4882a593Smuzhiyun  *  Return value:
3330*4882a593Smuzhiyun  *   SUCCESS on success and FAILURE on failure.
3331*4882a593Smuzhiyun  */
3332*4882a593Smuzhiyun 
wait_for_cmd_complete(void __iomem * addr,u64 busy_bit,int bit_state)3333*4882a593Smuzhiyun static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3334*4882a593Smuzhiyun 				 int bit_state)
3335*4882a593Smuzhiyun {
3336*4882a593Smuzhiyun 	int ret = FAILURE, cnt = 0, delay = 1;
3337*4882a593Smuzhiyun 	u64 val64;
3338*4882a593Smuzhiyun 
3339*4882a593Smuzhiyun 	if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3340*4882a593Smuzhiyun 		return FAILURE;
3341*4882a593Smuzhiyun 
3342*4882a593Smuzhiyun 	do {
3343*4882a593Smuzhiyun 		val64 = readq(addr);
3344*4882a593Smuzhiyun 		if (bit_state == S2IO_BIT_RESET) {
3345*4882a593Smuzhiyun 			if (!(val64 & busy_bit)) {
3346*4882a593Smuzhiyun 				ret = SUCCESS;
3347*4882a593Smuzhiyun 				break;
3348*4882a593Smuzhiyun 			}
3349*4882a593Smuzhiyun 		} else {
3350*4882a593Smuzhiyun 			if (val64 & busy_bit) {
3351*4882a593Smuzhiyun 				ret = SUCCESS;
3352*4882a593Smuzhiyun 				break;
3353*4882a593Smuzhiyun 			}
3354*4882a593Smuzhiyun 		}
3355*4882a593Smuzhiyun 
3356*4882a593Smuzhiyun 		if (in_interrupt())
3357*4882a593Smuzhiyun 			mdelay(delay);
3358*4882a593Smuzhiyun 		else
3359*4882a593Smuzhiyun 			msleep(delay);
3360*4882a593Smuzhiyun 
3361*4882a593Smuzhiyun 		if (++cnt >= 10)
3362*4882a593Smuzhiyun 			delay = 50;
3363*4882a593Smuzhiyun 	} while (cnt < 20);
3364*4882a593Smuzhiyun 	return ret;
3365*4882a593Smuzhiyun }
3366*4882a593Smuzhiyun /**
3367*4882a593Smuzhiyun  * check_pci_device_id - Checks if the device id is supported
3368*4882a593Smuzhiyun  * @id : device id
3369*4882a593Smuzhiyun  * Description: Function to check if the pci device id is supported by driver.
3370*4882a593Smuzhiyun  * Return value: Actual device id if supported else PCI_ANY_ID
3371*4882a593Smuzhiyun  */
check_pci_device_id(u16 id)3372*4882a593Smuzhiyun static u16 check_pci_device_id(u16 id)
3373*4882a593Smuzhiyun {
3374*4882a593Smuzhiyun 	switch (id) {
3375*4882a593Smuzhiyun 	case PCI_DEVICE_ID_HERC_WIN:
3376*4882a593Smuzhiyun 	case PCI_DEVICE_ID_HERC_UNI:
3377*4882a593Smuzhiyun 		return XFRAME_II_DEVICE;
3378*4882a593Smuzhiyun 	case PCI_DEVICE_ID_S2IO_UNI:
3379*4882a593Smuzhiyun 	case PCI_DEVICE_ID_S2IO_WIN:
3380*4882a593Smuzhiyun 		return XFRAME_I_DEVICE;
3381*4882a593Smuzhiyun 	default:
3382*4882a593Smuzhiyun 		return PCI_ANY_ID;
3383*4882a593Smuzhiyun 	}
3384*4882a593Smuzhiyun }
3385*4882a593Smuzhiyun 
3386*4882a593Smuzhiyun /**
3387*4882a593Smuzhiyun  *  s2io_reset - Resets the card.
3388*4882a593Smuzhiyun  *  @sp : private member of the device structure.
3389*4882a593Smuzhiyun  *  Description: Function to Reset the card. This function then also
3390*4882a593Smuzhiyun  *  restores the previously saved PCI configuration space registers as
3391*4882a593Smuzhiyun  *  the card reset also resets the configuration space.
3392*4882a593Smuzhiyun  *  Return value:
3393*4882a593Smuzhiyun  *  void.
3394*4882a593Smuzhiyun  */
3395*4882a593Smuzhiyun 
s2io_reset(struct s2io_nic * sp)3396*4882a593Smuzhiyun static void s2io_reset(struct s2io_nic *sp)
3397*4882a593Smuzhiyun {
3398*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3399*4882a593Smuzhiyun 	u64 val64;
3400*4882a593Smuzhiyun 	u16 subid, pci_cmd;
3401*4882a593Smuzhiyun 	int i;
3402*4882a593Smuzhiyun 	u16 val16;
3403*4882a593Smuzhiyun 	unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3404*4882a593Smuzhiyun 	unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3405*4882a593Smuzhiyun 	struct stat_block *stats;
3406*4882a593Smuzhiyun 	struct swStat *swstats;
3407*4882a593Smuzhiyun 
3408*4882a593Smuzhiyun 	DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3409*4882a593Smuzhiyun 		  __func__, pci_name(sp->pdev));
3410*4882a593Smuzhiyun 
3411*4882a593Smuzhiyun 	/* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3412*4882a593Smuzhiyun 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3413*4882a593Smuzhiyun 
3414*4882a593Smuzhiyun 	val64 = SW_RESET_ALL;
3415*4882a593Smuzhiyun 	writeq(val64, &bar0->sw_reset);
3416*4882a593Smuzhiyun 	if (strstr(sp->product_name, "CX4"))
3417*4882a593Smuzhiyun 		msleep(750);
3418*4882a593Smuzhiyun 	msleep(250);
3419*4882a593Smuzhiyun 	for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3420*4882a593Smuzhiyun 
3421*4882a593Smuzhiyun 		/* Restore the PCI state saved during initialization. */
3422*4882a593Smuzhiyun 		pci_restore_state(sp->pdev);
3423*4882a593Smuzhiyun 		pci_save_state(sp->pdev);
3424*4882a593Smuzhiyun 		pci_read_config_word(sp->pdev, 0x2, &val16);
3425*4882a593Smuzhiyun 		if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3426*4882a593Smuzhiyun 			break;
3427*4882a593Smuzhiyun 		msleep(200);
3428*4882a593Smuzhiyun 	}
3429*4882a593Smuzhiyun 
3430*4882a593Smuzhiyun 	if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3431*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3432*4882a593Smuzhiyun 
3433*4882a593Smuzhiyun 	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3434*4882a593Smuzhiyun 
3435*4882a593Smuzhiyun 	s2io_init_pci(sp);
3436*4882a593Smuzhiyun 
3437*4882a593Smuzhiyun 	/* Set swapper to enable I/O register access */
3438*4882a593Smuzhiyun 	s2io_set_swapper(sp);
3439*4882a593Smuzhiyun 
3440*4882a593Smuzhiyun 	/* restore mac_addr entries */
3441*4882a593Smuzhiyun 	do_s2io_restore_unicast_mc(sp);
3442*4882a593Smuzhiyun 
3443*4882a593Smuzhiyun 	/* Restore the MSIX table entries from local variables */
3444*4882a593Smuzhiyun 	restore_xmsi_data(sp);
3445*4882a593Smuzhiyun 
3446*4882a593Smuzhiyun 	/* Clear certain PCI/PCI-X fields after reset */
3447*4882a593Smuzhiyun 	if (sp->device_type == XFRAME_II_DEVICE) {
3448*4882a593Smuzhiyun 		/* Clear "detected parity error" bit */
3449*4882a593Smuzhiyun 		pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3450*4882a593Smuzhiyun 
3451*4882a593Smuzhiyun 		/* Clearing PCIX Ecc status register */
3452*4882a593Smuzhiyun 		pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3453*4882a593Smuzhiyun 
3454*4882a593Smuzhiyun 		/* Clearing PCI_STATUS error reflected here */
3455*4882a593Smuzhiyun 		writeq(s2BIT(62), &bar0->txpic_int_reg);
3456*4882a593Smuzhiyun 	}
3457*4882a593Smuzhiyun 
3458*4882a593Smuzhiyun 	/* Reset device statistics maintained by OS */
3459*4882a593Smuzhiyun 	memset(&sp->stats, 0, sizeof(struct net_device_stats));
3460*4882a593Smuzhiyun 
3461*4882a593Smuzhiyun 	stats = sp->mac_control.stats_info;
3462*4882a593Smuzhiyun 	swstats = &stats->sw_stat;
3463*4882a593Smuzhiyun 
3464*4882a593Smuzhiyun 	/* save link up/down time/cnt, reset/memory/watchdog cnt */
3465*4882a593Smuzhiyun 	up_cnt = swstats->link_up_cnt;
3466*4882a593Smuzhiyun 	down_cnt = swstats->link_down_cnt;
3467*4882a593Smuzhiyun 	up_time = swstats->link_up_time;
3468*4882a593Smuzhiyun 	down_time = swstats->link_down_time;
3469*4882a593Smuzhiyun 	reset_cnt = swstats->soft_reset_cnt;
3470*4882a593Smuzhiyun 	mem_alloc_cnt = swstats->mem_allocated;
3471*4882a593Smuzhiyun 	mem_free_cnt = swstats->mem_freed;
3472*4882a593Smuzhiyun 	watchdog_cnt = swstats->watchdog_timer_cnt;
3473*4882a593Smuzhiyun 
3474*4882a593Smuzhiyun 	memset(stats, 0, sizeof(struct stat_block));
3475*4882a593Smuzhiyun 
3476*4882a593Smuzhiyun 	/* restore link up/down time/cnt, reset/memory/watchdog cnt */
3477*4882a593Smuzhiyun 	swstats->link_up_cnt = up_cnt;
3478*4882a593Smuzhiyun 	swstats->link_down_cnt = down_cnt;
3479*4882a593Smuzhiyun 	swstats->link_up_time = up_time;
3480*4882a593Smuzhiyun 	swstats->link_down_time = down_time;
3481*4882a593Smuzhiyun 	swstats->soft_reset_cnt = reset_cnt;
3482*4882a593Smuzhiyun 	swstats->mem_allocated = mem_alloc_cnt;
3483*4882a593Smuzhiyun 	swstats->mem_freed = mem_free_cnt;
3484*4882a593Smuzhiyun 	swstats->watchdog_timer_cnt = watchdog_cnt;
3485*4882a593Smuzhiyun 
3486*4882a593Smuzhiyun 	/* SXE-002: Configure link and activity LED to turn it off */
3487*4882a593Smuzhiyun 	subid = sp->pdev->subsystem_device;
3488*4882a593Smuzhiyun 	if (((subid & 0xFF) >= 0x07) &&
3489*4882a593Smuzhiyun 	    (sp->device_type == XFRAME_I_DEVICE)) {
3490*4882a593Smuzhiyun 		val64 = readq(&bar0->gpio_control);
3491*4882a593Smuzhiyun 		val64 |= 0x0000800000000000ULL;
3492*4882a593Smuzhiyun 		writeq(val64, &bar0->gpio_control);
3493*4882a593Smuzhiyun 		val64 = 0x0411040400000000ULL;
3494*4882a593Smuzhiyun 		writeq(val64, (void __iomem *)bar0 + 0x2700);
3495*4882a593Smuzhiyun 	}
3496*4882a593Smuzhiyun 
3497*4882a593Smuzhiyun 	/*
3498*4882a593Smuzhiyun 	 * Clear spurious ECC interrupts that would have occurred on
3499*4882a593Smuzhiyun 	 * XFRAME II cards after reset.
3500*4882a593Smuzhiyun 	 */
3501*4882a593Smuzhiyun 	if (sp->device_type == XFRAME_II_DEVICE) {
3502*4882a593Smuzhiyun 		val64 = readq(&bar0->pcc_err_reg);
3503*4882a593Smuzhiyun 		writeq(val64, &bar0->pcc_err_reg);
3504*4882a593Smuzhiyun 	}
3505*4882a593Smuzhiyun 
3506*4882a593Smuzhiyun 	sp->device_enabled_once = false;
3507*4882a593Smuzhiyun }
3508*4882a593Smuzhiyun 
3509*4882a593Smuzhiyun /**
3510*4882a593Smuzhiyun  *  s2io_set_swapper - to set the swapper controle on the card
3511*4882a593Smuzhiyun  *  @sp : private member of the device structure,
3512*4882a593Smuzhiyun  *  pointer to the s2io_nic structure.
3513*4882a593Smuzhiyun  *  Description: Function to set the swapper control on the card
3514*4882a593Smuzhiyun  *  correctly depending on the 'endianness' of the system.
3515*4882a593Smuzhiyun  *  Return value:
3516*4882a593Smuzhiyun  *  SUCCESS on success and FAILURE on failure.
3517*4882a593Smuzhiyun  */
3518*4882a593Smuzhiyun 
s2io_set_swapper(struct s2io_nic * sp)3519*4882a593Smuzhiyun static int s2io_set_swapper(struct s2io_nic *sp)
3520*4882a593Smuzhiyun {
3521*4882a593Smuzhiyun 	struct net_device *dev = sp->dev;
3522*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3523*4882a593Smuzhiyun 	u64 val64, valt, valr;
3524*4882a593Smuzhiyun 
3525*4882a593Smuzhiyun 	/*
3526*4882a593Smuzhiyun 	 * Set proper endian settings and verify the same by reading
3527*4882a593Smuzhiyun 	 * the PIF Feed-back register.
3528*4882a593Smuzhiyun 	 */
3529*4882a593Smuzhiyun 
3530*4882a593Smuzhiyun 	val64 = readq(&bar0->pif_rd_swapper_fb);
3531*4882a593Smuzhiyun 	if (val64 != 0x0123456789ABCDEFULL) {
3532*4882a593Smuzhiyun 		int i = 0;
3533*4882a593Smuzhiyun 		static const u64 value[] = {
3534*4882a593Smuzhiyun 			0xC30000C3C30000C3ULL,	/* FE=1, SE=1 */
3535*4882a593Smuzhiyun 			0x8100008181000081ULL,	/* FE=1, SE=0 */
3536*4882a593Smuzhiyun 			0x4200004242000042ULL,	/* FE=0, SE=1 */
3537*4882a593Smuzhiyun 			0			/* FE=0, SE=0 */
3538*4882a593Smuzhiyun 		};
3539*4882a593Smuzhiyun 
3540*4882a593Smuzhiyun 		while (i < 4) {
3541*4882a593Smuzhiyun 			writeq(value[i], &bar0->swapper_ctrl);
3542*4882a593Smuzhiyun 			val64 = readq(&bar0->pif_rd_swapper_fb);
3543*4882a593Smuzhiyun 			if (val64 == 0x0123456789ABCDEFULL)
3544*4882a593Smuzhiyun 				break;
3545*4882a593Smuzhiyun 			i++;
3546*4882a593Smuzhiyun 		}
3547*4882a593Smuzhiyun 		if (i == 4) {
3548*4882a593Smuzhiyun 			DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3549*4882a593Smuzhiyun 				  "feedback read %llx\n",
3550*4882a593Smuzhiyun 				  dev->name, (unsigned long long)val64);
3551*4882a593Smuzhiyun 			return FAILURE;
3552*4882a593Smuzhiyun 		}
3553*4882a593Smuzhiyun 		valr = value[i];
3554*4882a593Smuzhiyun 	} else {
3555*4882a593Smuzhiyun 		valr = readq(&bar0->swapper_ctrl);
3556*4882a593Smuzhiyun 	}
3557*4882a593Smuzhiyun 
3558*4882a593Smuzhiyun 	valt = 0x0123456789ABCDEFULL;
3559*4882a593Smuzhiyun 	writeq(valt, &bar0->xmsi_address);
3560*4882a593Smuzhiyun 	val64 = readq(&bar0->xmsi_address);
3561*4882a593Smuzhiyun 
3562*4882a593Smuzhiyun 	if (val64 != valt) {
3563*4882a593Smuzhiyun 		int i = 0;
3564*4882a593Smuzhiyun 		static const u64 value[] = {
3565*4882a593Smuzhiyun 			0x00C3C30000C3C300ULL,	/* FE=1, SE=1 */
3566*4882a593Smuzhiyun 			0x0081810000818100ULL,	/* FE=1, SE=0 */
3567*4882a593Smuzhiyun 			0x0042420000424200ULL,	/* FE=0, SE=1 */
3568*4882a593Smuzhiyun 			0			/* FE=0, SE=0 */
3569*4882a593Smuzhiyun 		};
3570*4882a593Smuzhiyun 
3571*4882a593Smuzhiyun 		while (i < 4) {
3572*4882a593Smuzhiyun 			writeq((value[i] | valr), &bar0->swapper_ctrl);
3573*4882a593Smuzhiyun 			writeq(valt, &bar0->xmsi_address);
3574*4882a593Smuzhiyun 			val64 = readq(&bar0->xmsi_address);
3575*4882a593Smuzhiyun 			if (val64 == valt)
3576*4882a593Smuzhiyun 				break;
3577*4882a593Smuzhiyun 			i++;
3578*4882a593Smuzhiyun 		}
3579*4882a593Smuzhiyun 		if (i == 4) {
3580*4882a593Smuzhiyun 			unsigned long long x = val64;
3581*4882a593Smuzhiyun 			DBG_PRINT(ERR_DBG,
3582*4882a593Smuzhiyun 				  "Write failed, Xmsi_addr reads:0x%llx\n", x);
3583*4882a593Smuzhiyun 			return FAILURE;
3584*4882a593Smuzhiyun 		}
3585*4882a593Smuzhiyun 	}
3586*4882a593Smuzhiyun 	val64 = readq(&bar0->swapper_ctrl);
3587*4882a593Smuzhiyun 	val64 &= 0xFFFF000000000000ULL;
3588*4882a593Smuzhiyun 
3589*4882a593Smuzhiyun #ifdef __BIG_ENDIAN
3590*4882a593Smuzhiyun 	/*
3591*4882a593Smuzhiyun 	 * The device by default set to a big endian format, so a
3592*4882a593Smuzhiyun 	 * big endian driver need not set anything.
3593*4882a593Smuzhiyun 	 */
3594*4882a593Smuzhiyun 	val64 |= (SWAPPER_CTRL_TXP_FE |
3595*4882a593Smuzhiyun 		  SWAPPER_CTRL_TXP_SE |
3596*4882a593Smuzhiyun 		  SWAPPER_CTRL_TXD_R_FE |
3597*4882a593Smuzhiyun 		  SWAPPER_CTRL_TXD_W_FE |
3598*4882a593Smuzhiyun 		  SWAPPER_CTRL_TXF_R_FE |
3599*4882a593Smuzhiyun 		  SWAPPER_CTRL_RXD_R_FE |
3600*4882a593Smuzhiyun 		  SWAPPER_CTRL_RXD_W_FE |
3601*4882a593Smuzhiyun 		  SWAPPER_CTRL_RXF_W_FE |
3602*4882a593Smuzhiyun 		  SWAPPER_CTRL_XMSI_FE |
3603*4882a593Smuzhiyun 		  SWAPPER_CTRL_STATS_FE |
3604*4882a593Smuzhiyun 		  SWAPPER_CTRL_STATS_SE);
3605*4882a593Smuzhiyun 	if (sp->config.intr_type == INTA)
3606*4882a593Smuzhiyun 		val64 |= SWAPPER_CTRL_XMSI_SE;
3607*4882a593Smuzhiyun 	writeq(val64, &bar0->swapper_ctrl);
3608*4882a593Smuzhiyun #else
3609*4882a593Smuzhiyun 	/*
3610*4882a593Smuzhiyun 	 * Initially we enable all bits to make it accessible by the
3611*4882a593Smuzhiyun 	 * driver, then we selectively enable only those bits that
3612*4882a593Smuzhiyun 	 * we want to set.
3613*4882a593Smuzhiyun 	 */
3614*4882a593Smuzhiyun 	val64 |= (SWAPPER_CTRL_TXP_FE |
3615*4882a593Smuzhiyun 		  SWAPPER_CTRL_TXP_SE |
3616*4882a593Smuzhiyun 		  SWAPPER_CTRL_TXD_R_FE |
3617*4882a593Smuzhiyun 		  SWAPPER_CTRL_TXD_R_SE |
3618*4882a593Smuzhiyun 		  SWAPPER_CTRL_TXD_W_FE |
3619*4882a593Smuzhiyun 		  SWAPPER_CTRL_TXD_W_SE |
3620*4882a593Smuzhiyun 		  SWAPPER_CTRL_TXF_R_FE |
3621*4882a593Smuzhiyun 		  SWAPPER_CTRL_RXD_R_FE |
3622*4882a593Smuzhiyun 		  SWAPPER_CTRL_RXD_R_SE |
3623*4882a593Smuzhiyun 		  SWAPPER_CTRL_RXD_W_FE |
3624*4882a593Smuzhiyun 		  SWAPPER_CTRL_RXD_W_SE |
3625*4882a593Smuzhiyun 		  SWAPPER_CTRL_RXF_W_FE |
3626*4882a593Smuzhiyun 		  SWAPPER_CTRL_XMSI_FE |
3627*4882a593Smuzhiyun 		  SWAPPER_CTRL_STATS_FE |
3628*4882a593Smuzhiyun 		  SWAPPER_CTRL_STATS_SE);
3629*4882a593Smuzhiyun 	if (sp->config.intr_type == INTA)
3630*4882a593Smuzhiyun 		val64 |= SWAPPER_CTRL_XMSI_SE;
3631*4882a593Smuzhiyun 	writeq(val64, &bar0->swapper_ctrl);
3632*4882a593Smuzhiyun #endif
3633*4882a593Smuzhiyun 	val64 = readq(&bar0->swapper_ctrl);
3634*4882a593Smuzhiyun 
3635*4882a593Smuzhiyun 	/*
3636*4882a593Smuzhiyun 	 * Verifying if endian settings are accurate by reading a
3637*4882a593Smuzhiyun 	 * feedback register.
3638*4882a593Smuzhiyun 	 */
3639*4882a593Smuzhiyun 	val64 = readq(&bar0->pif_rd_swapper_fb);
3640*4882a593Smuzhiyun 	if (val64 != 0x0123456789ABCDEFULL) {
3641*4882a593Smuzhiyun 		/* Endian settings are incorrect, calls for another dekko. */
3642*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG,
3643*4882a593Smuzhiyun 			  "%s: Endian settings are wrong, feedback read %llx\n",
3644*4882a593Smuzhiyun 			  dev->name, (unsigned long long)val64);
3645*4882a593Smuzhiyun 		return FAILURE;
3646*4882a593Smuzhiyun 	}
3647*4882a593Smuzhiyun 
3648*4882a593Smuzhiyun 	return SUCCESS;
3649*4882a593Smuzhiyun }
3650*4882a593Smuzhiyun 
wait_for_msix_trans(struct s2io_nic * nic,int i)3651*4882a593Smuzhiyun static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3652*4882a593Smuzhiyun {
3653*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3654*4882a593Smuzhiyun 	u64 val64;
3655*4882a593Smuzhiyun 	int ret = 0, cnt = 0;
3656*4882a593Smuzhiyun 
3657*4882a593Smuzhiyun 	do {
3658*4882a593Smuzhiyun 		val64 = readq(&bar0->xmsi_access);
3659*4882a593Smuzhiyun 		if (!(val64 & s2BIT(15)))
3660*4882a593Smuzhiyun 			break;
3661*4882a593Smuzhiyun 		mdelay(1);
3662*4882a593Smuzhiyun 		cnt++;
3663*4882a593Smuzhiyun 	} while (cnt < 5);
3664*4882a593Smuzhiyun 	if (cnt == 5) {
3665*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3666*4882a593Smuzhiyun 		ret = 1;
3667*4882a593Smuzhiyun 	}
3668*4882a593Smuzhiyun 
3669*4882a593Smuzhiyun 	return ret;
3670*4882a593Smuzhiyun }
3671*4882a593Smuzhiyun 
restore_xmsi_data(struct s2io_nic * nic)3672*4882a593Smuzhiyun static void restore_xmsi_data(struct s2io_nic *nic)
3673*4882a593Smuzhiyun {
3674*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3675*4882a593Smuzhiyun 	u64 val64;
3676*4882a593Smuzhiyun 	int i, msix_index;
3677*4882a593Smuzhiyun 
3678*4882a593Smuzhiyun 	if (nic->device_type == XFRAME_I_DEVICE)
3679*4882a593Smuzhiyun 		return;
3680*4882a593Smuzhiyun 
3681*4882a593Smuzhiyun 	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3682*4882a593Smuzhiyun 		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3683*4882a593Smuzhiyun 		writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3684*4882a593Smuzhiyun 		writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3685*4882a593Smuzhiyun 		val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3686*4882a593Smuzhiyun 		writeq(val64, &bar0->xmsi_access);
3687*4882a593Smuzhiyun 		if (wait_for_msix_trans(nic, msix_index))
3688*4882a593Smuzhiyun 			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3689*4882a593Smuzhiyun 				  __func__, msix_index);
3690*4882a593Smuzhiyun 	}
3691*4882a593Smuzhiyun }
3692*4882a593Smuzhiyun 
store_xmsi_data(struct s2io_nic * nic)3693*4882a593Smuzhiyun static void store_xmsi_data(struct s2io_nic *nic)
3694*4882a593Smuzhiyun {
3695*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3696*4882a593Smuzhiyun 	u64 val64, addr, data;
3697*4882a593Smuzhiyun 	int i, msix_index;
3698*4882a593Smuzhiyun 
3699*4882a593Smuzhiyun 	if (nic->device_type == XFRAME_I_DEVICE)
3700*4882a593Smuzhiyun 		return;
3701*4882a593Smuzhiyun 
3702*4882a593Smuzhiyun 	/* Store and display */
3703*4882a593Smuzhiyun 	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3704*4882a593Smuzhiyun 		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3705*4882a593Smuzhiyun 		val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3706*4882a593Smuzhiyun 		writeq(val64, &bar0->xmsi_access);
3707*4882a593Smuzhiyun 		if (wait_for_msix_trans(nic, msix_index)) {
3708*4882a593Smuzhiyun 			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3709*4882a593Smuzhiyun 				  __func__, msix_index);
3710*4882a593Smuzhiyun 			continue;
3711*4882a593Smuzhiyun 		}
3712*4882a593Smuzhiyun 		addr = readq(&bar0->xmsi_address);
3713*4882a593Smuzhiyun 		data = readq(&bar0->xmsi_data);
3714*4882a593Smuzhiyun 		if (addr && data) {
3715*4882a593Smuzhiyun 			nic->msix_info[i].addr = addr;
3716*4882a593Smuzhiyun 			nic->msix_info[i].data = data;
3717*4882a593Smuzhiyun 		}
3718*4882a593Smuzhiyun 	}
3719*4882a593Smuzhiyun }
3720*4882a593Smuzhiyun 
s2io_enable_msi_x(struct s2io_nic * nic)3721*4882a593Smuzhiyun static int s2io_enable_msi_x(struct s2io_nic *nic)
3722*4882a593Smuzhiyun {
3723*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3724*4882a593Smuzhiyun 	u64 rx_mat;
3725*4882a593Smuzhiyun 	u16 msi_control; /* Temp variable */
3726*4882a593Smuzhiyun 	int ret, i, j, msix_indx = 1;
3727*4882a593Smuzhiyun 	int size;
3728*4882a593Smuzhiyun 	struct stat_block *stats = nic->mac_control.stats_info;
3729*4882a593Smuzhiyun 	struct swStat *swstats = &stats->sw_stat;
3730*4882a593Smuzhiyun 
3731*4882a593Smuzhiyun 	size = nic->num_entries * sizeof(struct msix_entry);
3732*4882a593Smuzhiyun 	nic->entries = kzalloc(size, GFP_KERNEL);
3733*4882a593Smuzhiyun 	if (!nic->entries) {
3734*4882a593Smuzhiyun 		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3735*4882a593Smuzhiyun 			  __func__);
3736*4882a593Smuzhiyun 		swstats->mem_alloc_fail_cnt++;
3737*4882a593Smuzhiyun 		return -ENOMEM;
3738*4882a593Smuzhiyun 	}
3739*4882a593Smuzhiyun 	swstats->mem_allocated += size;
3740*4882a593Smuzhiyun 
3741*4882a593Smuzhiyun 	size = nic->num_entries * sizeof(struct s2io_msix_entry);
3742*4882a593Smuzhiyun 	nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3743*4882a593Smuzhiyun 	if (!nic->s2io_entries) {
3744*4882a593Smuzhiyun 		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3745*4882a593Smuzhiyun 			  __func__);
3746*4882a593Smuzhiyun 		swstats->mem_alloc_fail_cnt++;
3747*4882a593Smuzhiyun 		kfree(nic->entries);
3748*4882a593Smuzhiyun 		swstats->mem_freed
3749*4882a593Smuzhiyun 			+= (nic->num_entries * sizeof(struct msix_entry));
3750*4882a593Smuzhiyun 		return -ENOMEM;
3751*4882a593Smuzhiyun 	}
3752*4882a593Smuzhiyun 	swstats->mem_allocated += size;
3753*4882a593Smuzhiyun 
3754*4882a593Smuzhiyun 	nic->entries[0].entry = 0;
3755*4882a593Smuzhiyun 	nic->s2io_entries[0].entry = 0;
3756*4882a593Smuzhiyun 	nic->s2io_entries[0].in_use = MSIX_FLG;
3757*4882a593Smuzhiyun 	nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3758*4882a593Smuzhiyun 	nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3759*4882a593Smuzhiyun 
3760*4882a593Smuzhiyun 	for (i = 1; i < nic->num_entries; i++) {
3761*4882a593Smuzhiyun 		nic->entries[i].entry = ((i - 1) * 8) + 1;
3762*4882a593Smuzhiyun 		nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3763*4882a593Smuzhiyun 		nic->s2io_entries[i].arg = NULL;
3764*4882a593Smuzhiyun 		nic->s2io_entries[i].in_use = 0;
3765*4882a593Smuzhiyun 	}
3766*4882a593Smuzhiyun 
3767*4882a593Smuzhiyun 	rx_mat = readq(&bar0->rx_mat);
3768*4882a593Smuzhiyun 	for (j = 0; j < nic->config.rx_ring_num; j++) {
3769*4882a593Smuzhiyun 		rx_mat |= RX_MAT_SET(j, msix_indx);
3770*4882a593Smuzhiyun 		nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3771*4882a593Smuzhiyun 		nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3772*4882a593Smuzhiyun 		nic->s2io_entries[j+1].in_use = MSIX_FLG;
3773*4882a593Smuzhiyun 		msix_indx += 8;
3774*4882a593Smuzhiyun 	}
3775*4882a593Smuzhiyun 	writeq(rx_mat, &bar0->rx_mat);
3776*4882a593Smuzhiyun 	readq(&bar0->rx_mat);
3777*4882a593Smuzhiyun 
3778*4882a593Smuzhiyun 	ret = pci_enable_msix_range(nic->pdev, nic->entries,
3779*4882a593Smuzhiyun 				    nic->num_entries, nic->num_entries);
3780*4882a593Smuzhiyun 	/* We fail init if error or we get less vectors than min required */
3781*4882a593Smuzhiyun 	if (ret < 0) {
3782*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3783*4882a593Smuzhiyun 		kfree(nic->entries);
3784*4882a593Smuzhiyun 		swstats->mem_freed += nic->num_entries *
3785*4882a593Smuzhiyun 			sizeof(struct msix_entry);
3786*4882a593Smuzhiyun 		kfree(nic->s2io_entries);
3787*4882a593Smuzhiyun 		swstats->mem_freed += nic->num_entries *
3788*4882a593Smuzhiyun 			sizeof(struct s2io_msix_entry);
3789*4882a593Smuzhiyun 		nic->entries = NULL;
3790*4882a593Smuzhiyun 		nic->s2io_entries = NULL;
3791*4882a593Smuzhiyun 		return -ENOMEM;
3792*4882a593Smuzhiyun 	}
3793*4882a593Smuzhiyun 
3794*4882a593Smuzhiyun 	/*
3795*4882a593Smuzhiyun 	 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3796*4882a593Smuzhiyun 	 * in the herc NIC. (Temp change, needs to be removed later)
3797*4882a593Smuzhiyun 	 */
3798*4882a593Smuzhiyun 	pci_read_config_word(nic->pdev, 0x42, &msi_control);
3799*4882a593Smuzhiyun 	msi_control |= 0x1; /* Enable MSI */
3800*4882a593Smuzhiyun 	pci_write_config_word(nic->pdev, 0x42, msi_control);
3801*4882a593Smuzhiyun 
3802*4882a593Smuzhiyun 	return 0;
3803*4882a593Smuzhiyun }
3804*4882a593Smuzhiyun 
3805*4882a593Smuzhiyun /* Handle software interrupt used during MSI(X) test */
s2io_test_intr(int irq,void * dev_id)3806*4882a593Smuzhiyun static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3807*4882a593Smuzhiyun {
3808*4882a593Smuzhiyun 	struct s2io_nic *sp = dev_id;
3809*4882a593Smuzhiyun 
3810*4882a593Smuzhiyun 	sp->msi_detected = 1;
3811*4882a593Smuzhiyun 	wake_up(&sp->msi_wait);
3812*4882a593Smuzhiyun 
3813*4882a593Smuzhiyun 	return IRQ_HANDLED;
3814*4882a593Smuzhiyun }
3815*4882a593Smuzhiyun 
3816*4882a593Smuzhiyun /* Test interrupt path by forcing a a software IRQ */
s2io_test_msi(struct s2io_nic * sp)3817*4882a593Smuzhiyun static int s2io_test_msi(struct s2io_nic *sp)
3818*4882a593Smuzhiyun {
3819*4882a593Smuzhiyun 	struct pci_dev *pdev = sp->pdev;
3820*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3821*4882a593Smuzhiyun 	int err;
3822*4882a593Smuzhiyun 	u64 val64, saved64;
3823*4882a593Smuzhiyun 
3824*4882a593Smuzhiyun 	err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3825*4882a593Smuzhiyun 			  sp->name, sp);
3826*4882a593Smuzhiyun 	if (err) {
3827*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3828*4882a593Smuzhiyun 			  sp->dev->name, pci_name(pdev), pdev->irq);
3829*4882a593Smuzhiyun 		return err;
3830*4882a593Smuzhiyun 	}
3831*4882a593Smuzhiyun 
3832*4882a593Smuzhiyun 	init_waitqueue_head(&sp->msi_wait);
3833*4882a593Smuzhiyun 	sp->msi_detected = 0;
3834*4882a593Smuzhiyun 
3835*4882a593Smuzhiyun 	saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3836*4882a593Smuzhiyun 	val64 |= SCHED_INT_CTRL_ONE_SHOT;
3837*4882a593Smuzhiyun 	val64 |= SCHED_INT_CTRL_TIMER_EN;
3838*4882a593Smuzhiyun 	val64 |= SCHED_INT_CTRL_INT2MSI(1);
3839*4882a593Smuzhiyun 	writeq(val64, &bar0->scheduled_int_ctrl);
3840*4882a593Smuzhiyun 
3841*4882a593Smuzhiyun 	wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3842*4882a593Smuzhiyun 
3843*4882a593Smuzhiyun 	if (!sp->msi_detected) {
3844*4882a593Smuzhiyun 		/* MSI(X) test failed, go back to INTx mode */
3845*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3846*4882a593Smuzhiyun 			  "using MSI(X) during test\n",
3847*4882a593Smuzhiyun 			  sp->dev->name, pci_name(pdev));
3848*4882a593Smuzhiyun 
3849*4882a593Smuzhiyun 		err = -EOPNOTSUPP;
3850*4882a593Smuzhiyun 	}
3851*4882a593Smuzhiyun 
3852*4882a593Smuzhiyun 	free_irq(sp->entries[1].vector, sp);
3853*4882a593Smuzhiyun 
3854*4882a593Smuzhiyun 	writeq(saved64, &bar0->scheduled_int_ctrl);
3855*4882a593Smuzhiyun 
3856*4882a593Smuzhiyun 	return err;
3857*4882a593Smuzhiyun }
3858*4882a593Smuzhiyun 
remove_msix_isr(struct s2io_nic * sp)3859*4882a593Smuzhiyun static void remove_msix_isr(struct s2io_nic *sp)
3860*4882a593Smuzhiyun {
3861*4882a593Smuzhiyun 	int i;
3862*4882a593Smuzhiyun 	u16 msi_control;
3863*4882a593Smuzhiyun 
3864*4882a593Smuzhiyun 	for (i = 0; i < sp->num_entries; i++) {
3865*4882a593Smuzhiyun 		if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3866*4882a593Smuzhiyun 			int vector = sp->entries[i].vector;
3867*4882a593Smuzhiyun 			void *arg = sp->s2io_entries[i].arg;
3868*4882a593Smuzhiyun 			free_irq(vector, arg);
3869*4882a593Smuzhiyun 		}
3870*4882a593Smuzhiyun 	}
3871*4882a593Smuzhiyun 
3872*4882a593Smuzhiyun 	kfree(sp->entries);
3873*4882a593Smuzhiyun 	kfree(sp->s2io_entries);
3874*4882a593Smuzhiyun 	sp->entries = NULL;
3875*4882a593Smuzhiyun 	sp->s2io_entries = NULL;
3876*4882a593Smuzhiyun 
3877*4882a593Smuzhiyun 	pci_read_config_word(sp->pdev, 0x42, &msi_control);
3878*4882a593Smuzhiyun 	msi_control &= 0xFFFE; /* Disable MSI */
3879*4882a593Smuzhiyun 	pci_write_config_word(sp->pdev, 0x42, msi_control);
3880*4882a593Smuzhiyun 
3881*4882a593Smuzhiyun 	pci_disable_msix(sp->pdev);
3882*4882a593Smuzhiyun }
3883*4882a593Smuzhiyun 
remove_inta_isr(struct s2io_nic * sp)3884*4882a593Smuzhiyun static void remove_inta_isr(struct s2io_nic *sp)
3885*4882a593Smuzhiyun {
3886*4882a593Smuzhiyun 	free_irq(sp->pdev->irq, sp->dev);
3887*4882a593Smuzhiyun }
3888*4882a593Smuzhiyun 
3889*4882a593Smuzhiyun /* ********************************************************* *
3890*4882a593Smuzhiyun  * Functions defined below concern the OS part of the driver *
3891*4882a593Smuzhiyun  * ********************************************************* */
3892*4882a593Smuzhiyun 
3893*4882a593Smuzhiyun /**
3894*4882a593Smuzhiyun  *  s2io_open - open entry point of the driver
3895*4882a593Smuzhiyun  *  @dev : pointer to the device structure.
3896*4882a593Smuzhiyun  *  Description:
3897*4882a593Smuzhiyun  *  This function is the open entry point of the driver. It mainly calls a
3898*4882a593Smuzhiyun  *  function to allocate Rx buffers and inserts them into the buffer
3899*4882a593Smuzhiyun  *  descriptors and then enables the Rx part of the NIC.
3900*4882a593Smuzhiyun  *  Return value:
3901*4882a593Smuzhiyun  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3902*4882a593Smuzhiyun  *   file on failure.
3903*4882a593Smuzhiyun  */
3904*4882a593Smuzhiyun 
s2io_open(struct net_device * dev)3905*4882a593Smuzhiyun static int s2io_open(struct net_device *dev)
3906*4882a593Smuzhiyun {
3907*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
3908*4882a593Smuzhiyun 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
3909*4882a593Smuzhiyun 	int err = 0;
3910*4882a593Smuzhiyun 
3911*4882a593Smuzhiyun 	/*
3912*4882a593Smuzhiyun 	 * Make sure you have link off by default every time
3913*4882a593Smuzhiyun 	 * Nic is initialized
3914*4882a593Smuzhiyun 	 */
3915*4882a593Smuzhiyun 	netif_carrier_off(dev);
3916*4882a593Smuzhiyun 	sp->last_link_state = 0;
3917*4882a593Smuzhiyun 
3918*4882a593Smuzhiyun 	/* Initialize H/W and enable interrupts */
3919*4882a593Smuzhiyun 	err = s2io_card_up(sp);
3920*4882a593Smuzhiyun 	if (err) {
3921*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3922*4882a593Smuzhiyun 			  dev->name);
3923*4882a593Smuzhiyun 		goto hw_init_failed;
3924*4882a593Smuzhiyun 	}
3925*4882a593Smuzhiyun 
3926*4882a593Smuzhiyun 	if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3927*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3928*4882a593Smuzhiyun 		s2io_card_down(sp);
3929*4882a593Smuzhiyun 		err = -ENODEV;
3930*4882a593Smuzhiyun 		goto hw_init_failed;
3931*4882a593Smuzhiyun 	}
3932*4882a593Smuzhiyun 	s2io_start_all_tx_queue(sp);
3933*4882a593Smuzhiyun 	return 0;
3934*4882a593Smuzhiyun 
3935*4882a593Smuzhiyun hw_init_failed:
3936*4882a593Smuzhiyun 	if (sp->config.intr_type == MSI_X) {
3937*4882a593Smuzhiyun 		if (sp->entries) {
3938*4882a593Smuzhiyun 			kfree(sp->entries);
3939*4882a593Smuzhiyun 			swstats->mem_freed += sp->num_entries *
3940*4882a593Smuzhiyun 				sizeof(struct msix_entry);
3941*4882a593Smuzhiyun 		}
3942*4882a593Smuzhiyun 		if (sp->s2io_entries) {
3943*4882a593Smuzhiyun 			kfree(sp->s2io_entries);
3944*4882a593Smuzhiyun 			swstats->mem_freed += sp->num_entries *
3945*4882a593Smuzhiyun 				sizeof(struct s2io_msix_entry);
3946*4882a593Smuzhiyun 		}
3947*4882a593Smuzhiyun 	}
3948*4882a593Smuzhiyun 	return err;
3949*4882a593Smuzhiyun }
3950*4882a593Smuzhiyun 
3951*4882a593Smuzhiyun /**
3952*4882a593Smuzhiyun  *  s2io_close -close entry point of the driver
3953*4882a593Smuzhiyun  *  @dev : device pointer.
3954*4882a593Smuzhiyun  *  Description:
3955*4882a593Smuzhiyun  *  This is the stop entry point of the driver. It needs to undo exactly
3956*4882a593Smuzhiyun  *  whatever was done by the open entry point,thus it's usually referred to
3957*4882a593Smuzhiyun  *  as the close function.Among other things this function mainly stops the
3958*4882a593Smuzhiyun  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3959*4882a593Smuzhiyun  *  Return value:
3960*4882a593Smuzhiyun  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3961*4882a593Smuzhiyun  *  file on failure.
3962*4882a593Smuzhiyun  */
3963*4882a593Smuzhiyun 
s2io_close(struct net_device * dev)3964*4882a593Smuzhiyun static int s2io_close(struct net_device *dev)
3965*4882a593Smuzhiyun {
3966*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
3967*4882a593Smuzhiyun 	struct config_param *config = &sp->config;
3968*4882a593Smuzhiyun 	u64 tmp64;
3969*4882a593Smuzhiyun 	int offset;
3970*4882a593Smuzhiyun 
3971*4882a593Smuzhiyun 	/* Return if the device is already closed               *
3972*4882a593Smuzhiyun 	 *  Can happen when s2io_card_up failed in change_mtu    *
3973*4882a593Smuzhiyun 	 */
3974*4882a593Smuzhiyun 	if (!is_s2io_card_up(sp))
3975*4882a593Smuzhiyun 		return 0;
3976*4882a593Smuzhiyun 
3977*4882a593Smuzhiyun 	s2io_stop_all_tx_queue(sp);
3978*4882a593Smuzhiyun 	/* delete all populated mac entries */
3979*4882a593Smuzhiyun 	for (offset = 1; offset < config->max_mc_addr; offset++) {
3980*4882a593Smuzhiyun 		tmp64 = do_s2io_read_unicast_mc(sp, offset);
3981*4882a593Smuzhiyun 		if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3982*4882a593Smuzhiyun 			do_s2io_delete_unicast_mc(sp, tmp64);
3983*4882a593Smuzhiyun 	}
3984*4882a593Smuzhiyun 
3985*4882a593Smuzhiyun 	s2io_card_down(sp);
3986*4882a593Smuzhiyun 
3987*4882a593Smuzhiyun 	return 0;
3988*4882a593Smuzhiyun }
3989*4882a593Smuzhiyun 
3990*4882a593Smuzhiyun /**
3991*4882a593Smuzhiyun  *  s2io_xmit - Tx entry point of te driver
3992*4882a593Smuzhiyun  *  @skb : the socket buffer containing the Tx data.
3993*4882a593Smuzhiyun  *  @dev : device pointer.
3994*4882a593Smuzhiyun  *  Description :
3995*4882a593Smuzhiyun  *  This function is the Tx entry point of the driver. S2IO NIC supports
3996*4882a593Smuzhiyun  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
3997*4882a593Smuzhiyun  *  NOTE: when device can't queue the pkt,just the trans_start variable will
3998*4882a593Smuzhiyun  *  not be upadted.
3999*4882a593Smuzhiyun  *  Return value:
4000*4882a593Smuzhiyun  *  0 on success & 1 on failure.
4001*4882a593Smuzhiyun  */
4002*4882a593Smuzhiyun 
s2io_xmit(struct sk_buff * skb,struct net_device * dev)4003*4882a593Smuzhiyun static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4004*4882a593Smuzhiyun {
4005*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
4006*4882a593Smuzhiyun 	u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4007*4882a593Smuzhiyun 	register u64 val64;
4008*4882a593Smuzhiyun 	struct TxD *txdp;
4009*4882a593Smuzhiyun 	struct TxFIFO_element __iomem *tx_fifo;
4010*4882a593Smuzhiyun 	unsigned long flags = 0;
4011*4882a593Smuzhiyun 	u16 vlan_tag = 0;
4012*4882a593Smuzhiyun 	struct fifo_info *fifo = NULL;
4013*4882a593Smuzhiyun 	int offload_type;
4014*4882a593Smuzhiyun 	int enable_per_list_interrupt = 0;
4015*4882a593Smuzhiyun 	struct config_param *config = &sp->config;
4016*4882a593Smuzhiyun 	struct mac_info *mac_control = &sp->mac_control;
4017*4882a593Smuzhiyun 	struct stat_block *stats = mac_control->stats_info;
4018*4882a593Smuzhiyun 	struct swStat *swstats = &stats->sw_stat;
4019*4882a593Smuzhiyun 
4020*4882a593Smuzhiyun 	DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4021*4882a593Smuzhiyun 
4022*4882a593Smuzhiyun 	if (unlikely(skb->len <= 0)) {
4023*4882a593Smuzhiyun 		DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
4024*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
4025*4882a593Smuzhiyun 		return NETDEV_TX_OK;
4026*4882a593Smuzhiyun 	}
4027*4882a593Smuzhiyun 
4028*4882a593Smuzhiyun 	if (!is_s2io_card_up(sp)) {
4029*4882a593Smuzhiyun 		DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4030*4882a593Smuzhiyun 			  dev->name);
4031*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
4032*4882a593Smuzhiyun 		return NETDEV_TX_OK;
4033*4882a593Smuzhiyun 	}
4034*4882a593Smuzhiyun 
4035*4882a593Smuzhiyun 	queue = 0;
4036*4882a593Smuzhiyun 	if (skb_vlan_tag_present(skb))
4037*4882a593Smuzhiyun 		vlan_tag = skb_vlan_tag_get(skb);
4038*4882a593Smuzhiyun 	if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4039*4882a593Smuzhiyun 		if (skb->protocol == htons(ETH_P_IP)) {
4040*4882a593Smuzhiyun 			struct iphdr *ip;
4041*4882a593Smuzhiyun 			struct tcphdr *th;
4042*4882a593Smuzhiyun 			ip = ip_hdr(skb);
4043*4882a593Smuzhiyun 
4044*4882a593Smuzhiyun 			if (!ip_is_fragment(ip)) {
4045*4882a593Smuzhiyun 				th = (struct tcphdr *)(((unsigned char *)ip) +
4046*4882a593Smuzhiyun 						       ip->ihl*4);
4047*4882a593Smuzhiyun 
4048*4882a593Smuzhiyun 				if (ip->protocol == IPPROTO_TCP) {
4049*4882a593Smuzhiyun 					queue_len = sp->total_tcp_fifos;
4050*4882a593Smuzhiyun 					queue = (ntohs(th->source) +
4051*4882a593Smuzhiyun 						 ntohs(th->dest)) &
4052*4882a593Smuzhiyun 						sp->fifo_selector[queue_len - 1];
4053*4882a593Smuzhiyun 					if (queue >= queue_len)
4054*4882a593Smuzhiyun 						queue = queue_len - 1;
4055*4882a593Smuzhiyun 				} else if (ip->protocol == IPPROTO_UDP) {
4056*4882a593Smuzhiyun 					queue_len = sp->total_udp_fifos;
4057*4882a593Smuzhiyun 					queue = (ntohs(th->source) +
4058*4882a593Smuzhiyun 						 ntohs(th->dest)) &
4059*4882a593Smuzhiyun 						sp->fifo_selector[queue_len - 1];
4060*4882a593Smuzhiyun 					if (queue >= queue_len)
4061*4882a593Smuzhiyun 						queue = queue_len - 1;
4062*4882a593Smuzhiyun 					queue += sp->udp_fifo_idx;
4063*4882a593Smuzhiyun 					if (skb->len > 1024)
4064*4882a593Smuzhiyun 						enable_per_list_interrupt = 1;
4065*4882a593Smuzhiyun 				}
4066*4882a593Smuzhiyun 			}
4067*4882a593Smuzhiyun 		}
4068*4882a593Smuzhiyun 	} else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4069*4882a593Smuzhiyun 		/* get fifo number based on skb->priority value */
4070*4882a593Smuzhiyun 		queue = config->fifo_mapping
4071*4882a593Smuzhiyun 			[skb->priority & (MAX_TX_FIFOS - 1)];
4072*4882a593Smuzhiyun 	fifo = &mac_control->fifos[queue];
4073*4882a593Smuzhiyun 
4074*4882a593Smuzhiyun 	spin_lock_irqsave(&fifo->tx_lock, flags);
4075*4882a593Smuzhiyun 
4076*4882a593Smuzhiyun 	if (sp->config.multiq) {
4077*4882a593Smuzhiyun 		if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4078*4882a593Smuzhiyun 			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4079*4882a593Smuzhiyun 			return NETDEV_TX_BUSY;
4080*4882a593Smuzhiyun 		}
4081*4882a593Smuzhiyun 	} else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4082*4882a593Smuzhiyun 		if (netif_queue_stopped(dev)) {
4083*4882a593Smuzhiyun 			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4084*4882a593Smuzhiyun 			return NETDEV_TX_BUSY;
4085*4882a593Smuzhiyun 		}
4086*4882a593Smuzhiyun 	}
4087*4882a593Smuzhiyun 
4088*4882a593Smuzhiyun 	put_off = (u16)fifo->tx_curr_put_info.offset;
4089*4882a593Smuzhiyun 	get_off = (u16)fifo->tx_curr_get_info.offset;
4090*4882a593Smuzhiyun 	txdp = fifo->list_info[put_off].list_virt_addr;
4091*4882a593Smuzhiyun 
4092*4882a593Smuzhiyun 	queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4093*4882a593Smuzhiyun 	/* Avoid "put" pointer going beyond "get" pointer */
4094*4882a593Smuzhiyun 	if (txdp->Host_Control ||
4095*4882a593Smuzhiyun 	    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4096*4882a593Smuzhiyun 		DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4097*4882a593Smuzhiyun 		s2io_stop_tx_queue(sp, fifo->fifo_no);
4098*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
4099*4882a593Smuzhiyun 		spin_unlock_irqrestore(&fifo->tx_lock, flags);
4100*4882a593Smuzhiyun 		return NETDEV_TX_OK;
4101*4882a593Smuzhiyun 	}
4102*4882a593Smuzhiyun 
4103*4882a593Smuzhiyun 	offload_type = s2io_offload_type(skb);
4104*4882a593Smuzhiyun 	if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4105*4882a593Smuzhiyun 		txdp->Control_1 |= TXD_TCP_LSO_EN;
4106*4882a593Smuzhiyun 		txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4107*4882a593Smuzhiyun 	}
4108*4882a593Smuzhiyun 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
4109*4882a593Smuzhiyun 		txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4110*4882a593Smuzhiyun 				    TXD_TX_CKO_TCP_EN |
4111*4882a593Smuzhiyun 				    TXD_TX_CKO_UDP_EN);
4112*4882a593Smuzhiyun 	}
4113*4882a593Smuzhiyun 	txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4114*4882a593Smuzhiyun 	txdp->Control_1 |= TXD_LIST_OWN_XENA;
4115*4882a593Smuzhiyun 	txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4116*4882a593Smuzhiyun 	if (enable_per_list_interrupt)
4117*4882a593Smuzhiyun 		if (put_off & (queue_len >> 5))
4118*4882a593Smuzhiyun 			txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4119*4882a593Smuzhiyun 	if (vlan_tag) {
4120*4882a593Smuzhiyun 		txdp->Control_2 |= TXD_VLAN_ENABLE;
4121*4882a593Smuzhiyun 		txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4122*4882a593Smuzhiyun 	}
4123*4882a593Smuzhiyun 
4124*4882a593Smuzhiyun 	frg_len = skb_headlen(skb);
4125*4882a593Smuzhiyun 	txdp->Buffer_Pointer = dma_map_single(&sp->pdev->dev, skb->data,
4126*4882a593Smuzhiyun 					      frg_len, DMA_TO_DEVICE);
4127*4882a593Smuzhiyun 	if (dma_mapping_error(&sp->pdev->dev, txdp->Buffer_Pointer))
4128*4882a593Smuzhiyun 		goto pci_map_failed;
4129*4882a593Smuzhiyun 
4130*4882a593Smuzhiyun 	txdp->Host_Control = (unsigned long)skb;
4131*4882a593Smuzhiyun 	txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4132*4882a593Smuzhiyun 
4133*4882a593Smuzhiyun 	frg_cnt = skb_shinfo(skb)->nr_frags;
4134*4882a593Smuzhiyun 	/* For fragmented SKB. */
4135*4882a593Smuzhiyun 	for (i = 0; i < frg_cnt; i++) {
4136*4882a593Smuzhiyun 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4137*4882a593Smuzhiyun 		/* A '0' length fragment will be ignored */
4138*4882a593Smuzhiyun 		if (!skb_frag_size(frag))
4139*4882a593Smuzhiyun 			continue;
4140*4882a593Smuzhiyun 		txdp++;
4141*4882a593Smuzhiyun 		txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
4142*4882a593Smuzhiyun 							     frag, 0,
4143*4882a593Smuzhiyun 							     skb_frag_size(frag),
4144*4882a593Smuzhiyun 							     DMA_TO_DEVICE);
4145*4882a593Smuzhiyun 		txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
4146*4882a593Smuzhiyun 	}
4147*4882a593Smuzhiyun 	txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4148*4882a593Smuzhiyun 
4149*4882a593Smuzhiyun 	tx_fifo = mac_control->tx_FIFO_start[queue];
4150*4882a593Smuzhiyun 	val64 = fifo->list_info[put_off].list_phy_addr;
4151*4882a593Smuzhiyun 	writeq(val64, &tx_fifo->TxDL_Pointer);
4152*4882a593Smuzhiyun 
4153*4882a593Smuzhiyun 	val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4154*4882a593Smuzhiyun 		 TX_FIFO_LAST_LIST);
4155*4882a593Smuzhiyun 	if (offload_type)
4156*4882a593Smuzhiyun 		val64 |= TX_FIFO_SPECIAL_FUNC;
4157*4882a593Smuzhiyun 
4158*4882a593Smuzhiyun 	writeq(val64, &tx_fifo->List_Control);
4159*4882a593Smuzhiyun 
4160*4882a593Smuzhiyun 	put_off++;
4161*4882a593Smuzhiyun 	if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4162*4882a593Smuzhiyun 		put_off = 0;
4163*4882a593Smuzhiyun 	fifo->tx_curr_put_info.offset = put_off;
4164*4882a593Smuzhiyun 
4165*4882a593Smuzhiyun 	/* Avoid "put" pointer going beyond "get" pointer */
4166*4882a593Smuzhiyun 	if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4167*4882a593Smuzhiyun 		swstats->fifo_full_cnt++;
4168*4882a593Smuzhiyun 		DBG_PRINT(TX_DBG,
4169*4882a593Smuzhiyun 			  "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4170*4882a593Smuzhiyun 			  put_off, get_off);
4171*4882a593Smuzhiyun 		s2io_stop_tx_queue(sp, fifo->fifo_no);
4172*4882a593Smuzhiyun 	}
4173*4882a593Smuzhiyun 	swstats->mem_allocated += skb->truesize;
4174*4882a593Smuzhiyun 	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4175*4882a593Smuzhiyun 
4176*4882a593Smuzhiyun 	if (sp->config.intr_type == MSI_X)
4177*4882a593Smuzhiyun 		tx_intr_handler(fifo);
4178*4882a593Smuzhiyun 
4179*4882a593Smuzhiyun 	return NETDEV_TX_OK;
4180*4882a593Smuzhiyun 
4181*4882a593Smuzhiyun pci_map_failed:
4182*4882a593Smuzhiyun 	swstats->pci_map_fail_cnt++;
4183*4882a593Smuzhiyun 	s2io_stop_tx_queue(sp, fifo->fifo_no);
4184*4882a593Smuzhiyun 	swstats->mem_freed += skb->truesize;
4185*4882a593Smuzhiyun 	dev_kfree_skb_any(skb);
4186*4882a593Smuzhiyun 	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4187*4882a593Smuzhiyun 	return NETDEV_TX_OK;
4188*4882a593Smuzhiyun }
4189*4882a593Smuzhiyun 
4190*4882a593Smuzhiyun static void
s2io_alarm_handle(struct timer_list * t)4191*4882a593Smuzhiyun s2io_alarm_handle(struct timer_list *t)
4192*4882a593Smuzhiyun {
4193*4882a593Smuzhiyun 	struct s2io_nic *sp = from_timer(sp, t, alarm_timer);
4194*4882a593Smuzhiyun 	struct net_device *dev = sp->dev;
4195*4882a593Smuzhiyun 
4196*4882a593Smuzhiyun 	s2io_handle_errors(dev);
4197*4882a593Smuzhiyun 	mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4198*4882a593Smuzhiyun }
4199*4882a593Smuzhiyun 
s2io_msix_ring_handle(int irq,void * dev_id)4200*4882a593Smuzhiyun static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4201*4882a593Smuzhiyun {
4202*4882a593Smuzhiyun 	struct ring_info *ring = (struct ring_info *)dev_id;
4203*4882a593Smuzhiyun 	struct s2io_nic *sp = ring->nic;
4204*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4205*4882a593Smuzhiyun 
4206*4882a593Smuzhiyun 	if (unlikely(!is_s2io_card_up(sp)))
4207*4882a593Smuzhiyun 		return IRQ_HANDLED;
4208*4882a593Smuzhiyun 
4209*4882a593Smuzhiyun 	if (sp->config.napi) {
4210*4882a593Smuzhiyun 		u8 __iomem *addr = NULL;
4211*4882a593Smuzhiyun 		u8 val8 = 0;
4212*4882a593Smuzhiyun 
4213*4882a593Smuzhiyun 		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4214*4882a593Smuzhiyun 		addr += (7 - ring->ring_no);
4215*4882a593Smuzhiyun 		val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4216*4882a593Smuzhiyun 		writeb(val8, addr);
4217*4882a593Smuzhiyun 		val8 = readb(addr);
4218*4882a593Smuzhiyun 		napi_schedule(&ring->napi);
4219*4882a593Smuzhiyun 	} else {
4220*4882a593Smuzhiyun 		rx_intr_handler(ring, 0);
4221*4882a593Smuzhiyun 		s2io_chk_rx_buffers(sp, ring);
4222*4882a593Smuzhiyun 	}
4223*4882a593Smuzhiyun 
4224*4882a593Smuzhiyun 	return IRQ_HANDLED;
4225*4882a593Smuzhiyun }
4226*4882a593Smuzhiyun 
s2io_msix_fifo_handle(int irq,void * dev_id)4227*4882a593Smuzhiyun static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4228*4882a593Smuzhiyun {
4229*4882a593Smuzhiyun 	int i;
4230*4882a593Smuzhiyun 	struct fifo_info *fifos = (struct fifo_info *)dev_id;
4231*4882a593Smuzhiyun 	struct s2io_nic *sp = fifos->nic;
4232*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4233*4882a593Smuzhiyun 	struct config_param *config  = &sp->config;
4234*4882a593Smuzhiyun 	u64 reason;
4235*4882a593Smuzhiyun 
4236*4882a593Smuzhiyun 	if (unlikely(!is_s2io_card_up(sp)))
4237*4882a593Smuzhiyun 		return IRQ_NONE;
4238*4882a593Smuzhiyun 
4239*4882a593Smuzhiyun 	reason = readq(&bar0->general_int_status);
4240*4882a593Smuzhiyun 	if (unlikely(reason == S2IO_MINUS_ONE))
4241*4882a593Smuzhiyun 		/* Nothing much can be done. Get out */
4242*4882a593Smuzhiyun 		return IRQ_HANDLED;
4243*4882a593Smuzhiyun 
4244*4882a593Smuzhiyun 	if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4245*4882a593Smuzhiyun 		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4246*4882a593Smuzhiyun 
4247*4882a593Smuzhiyun 		if (reason & GEN_INTR_TXPIC)
4248*4882a593Smuzhiyun 			s2io_txpic_intr_handle(sp);
4249*4882a593Smuzhiyun 
4250*4882a593Smuzhiyun 		if (reason & GEN_INTR_TXTRAFFIC)
4251*4882a593Smuzhiyun 			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4252*4882a593Smuzhiyun 
4253*4882a593Smuzhiyun 		for (i = 0; i < config->tx_fifo_num; i++)
4254*4882a593Smuzhiyun 			tx_intr_handler(&fifos[i]);
4255*4882a593Smuzhiyun 
4256*4882a593Smuzhiyun 		writeq(sp->general_int_mask, &bar0->general_int_mask);
4257*4882a593Smuzhiyun 		readl(&bar0->general_int_status);
4258*4882a593Smuzhiyun 		return IRQ_HANDLED;
4259*4882a593Smuzhiyun 	}
4260*4882a593Smuzhiyun 	/* The interrupt was not raised by us */
4261*4882a593Smuzhiyun 	return IRQ_NONE;
4262*4882a593Smuzhiyun }
4263*4882a593Smuzhiyun 
s2io_txpic_intr_handle(struct s2io_nic * sp)4264*4882a593Smuzhiyun static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4265*4882a593Smuzhiyun {
4266*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4267*4882a593Smuzhiyun 	u64 val64;
4268*4882a593Smuzhiyun 
4269*4882a593Smuzhiyun 	val64 = readq(&bar0->pic_int_status);
4270*4882a593Smuzhiyun 	if (val64 & PIC_INT_GPIO) {
4271*4882a593Smuzhiyun 		val64 = readq(&bar0->gpio_int_reg);
4272*4882a593Smuzhiyun 		if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4273*4882a593Smuzhiyun 		    (val64 & GPIO_INT_REG_LINK_UP)) {
4274*4882a593Smuzhiyun 			/*
4275*4882a593Smuzhiyun 			 * This is unstable state so clear both up/down
4276*4882a593Smuzhiyun 			 * interrupt and adapter to re-evaluate the link state.
4277*4882a593Smuzhiyun 			 */
4278*4882a593Smuzhiyun 			val64 |= GPIO_INT_REG_LINK_DOWN;
4279*4882a593Smuzhiyun 			val64 |= GPIO_INT_REG_LINK_UP;
4280*4882a593Smuzhiyun 			writeq(val64, &bar0->gpio_int_reg);
4281*4882a593Smuzhiyun 			val64 = readq(&bar0->gpio_int_mask);
4282*4882a593Smuzhiyun 			val64 &= ~(GPIO_INT_MASK_LINK_UP |
4283*4882a593Smuzhiyun 				   GPIO_INT_MASK_LINK_DOWN);
4284*4882a593Smuzhiyun 			writeq(val64, &bar0->gpio_int_mask);
4285*4882a593Smuzhiyun 		} else if (val64 & GPIO_INT_REG_LINK_UP) {
4286*4882a593Smuzhiyun 			val64 = readq(&bar0->adapter_status);
4287*4882a593Smuzhiyun 			/* Enable Adapter */
4288*4882a593Smuzhiyun 			val64 = readq(&bar0->adapter_control);
4289*4882a593Smuzhiyun 			val64 |= ADAPTER_CNTL_EN;
4290*4882a593Smuzhiyun 			writeq(val64, &bar0->adapter_control);
4291*4882a593Smuzhiyun 			val64 |= ADAPTER_LED_ON;
4292*4882a593Smuzhiyun 			writeq(val64, &bar0->adapter_control);
4293*4882a593Smuzhiyun 			if (!sp->device_enabled_once)
4294*4882a593Smuzhiyun 				sp->device_enabled_once = 1;
4295*4882a593Smuzhiyun 
4296*4882a593Smuzhiyun 			s2io_link(sp, LINK_UP);
4297*4882a593Smuzhiyun 			/*
4298*4882a593Smuzhiyun 			 * unmask link down interrupt and mask link-up
4299*4882a593Smuzhiyun 			 * intr
4300*4882a593Smuzhiyun 			 */
4301*4882a593Smuzhiyun 			val64 = readq(&bar0->gpio_int_mask);
4302*4882a593Smuzhiyun 			val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4303*4882a593Smuzhiyun 			val64 |= GPIO_INT_MASK_LINK_UP;
4304*4882a593Smuzhiyun 			writeq(val64, &bar0->gpio_int_mask);
4305*4882a593Smuzhiyun 
4306*4882a593Smuzhiyun 		} else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4307*4882a593Smuzhiyun 			val64 = readq(&bar0->adapter_status);
4308*4882a593Smuzhiyun 			s2io_link(sp, LINK_DOWN);
4309*4882a593Smuzhiyun 			/* Link is down so unmaks link up interrupt */
4310*4882a593Smuzhiyun 			val64 = readq(&bar0->gpio_int_mask);
4311*4882a593Smuzhiyun 			val64 &= ~GPIO_INT_MASK_LINK_UP;
4312*4882a593Smuzhiyun 			val64 |= GPIO_INT_MASK_LINK_DOWN;
4313*4882a593Smuzhiyun 			writeq(val64, &bar0->gpio_int_mask);
4314*4882a593Smuzhiyun 
4315*4882a593Smuzhiyun 			/* turn off LED */
4316*4882a593Smuzhiyun 			val64 = readq(&bar0->adapter_control);
4317*4882a593Smuzhiyun 			val64 = val64 & (~ADAPTER_LED_ON);
4318*4882a593Smuzhiyun 			writeq(val64, &bar0->adapter_control);
4319*4882a593Smuzhiyun 		}
4320*4882a593Smuzhiyun 	}
4321*4882a593Smuzhiyun 	val64 = readq(&bar0->gpio_int_mask);
4322*4882a593Smuzhiyun }
4323*4882a593Smuzhiyun 
4324*4882a593Smuzhiyun /**
4325*4882a593Smuzhiyun  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4326*4882a593Smuzhiyun  *  @value: alarm bits
4327*4882a593Smuzhiyun  *  @addr: address value
4328*4882a593Smuzhiyun  *  @cnt: counter variable
4329*4882a593Smuzhiyun  *  Description: Check for alarm and increment the counter
4330*4882a593Smuzhiyun  *  Return Value:
4331*4882a593Smuzhiyun  *  1 - if alarm bit set
4332*4882a593Smuzhiyun  *  0 - if alarm bit is not set
4333*4882a593Smuzhiyun  */
do_s2io_chk_alarm_bit(u64 value,void __iomem * addr,unsigned long long * cnt)4334*4882a593Smuzhiyun static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4335*4882a593Smuzhiyun 				 unsigned long long *cnt)
4336*4882a593Smuzhiyun {
4337*4882a593Smuzhiyun 	u64 val64;
4338*4882a593Smuzhiyun 	val64 = readq(addr);
4339*4882a593Smuzhiyun 	if (val64 & value) {
4340*4882a593Smuzhiyun 		writeq(val64, addr);
4341*4882a593Smuzhiyun 		(*cnt)++;
4342*4882a593Smuzhiyun 		return 1;
4343*4882a593Smuzhiyun 	}
4344*4882a593Smuzhiyun 	return 0;
4345*4882a593Smuzhiyun 
4346*4882a593Smuzhiyun }
4347*4882a593Smuzhiyun 
4348*4882a593Smuzhiyun /**
4349*4882a593Smuzhiyun  *  s2io_handle_errors - Xframe error indication handler
4350*4882a593Smuzhiyun  *  @dev_id: opaque handle to dev
4351*4882a593Smuzhiyun  *  Description: Handle alarms such as loss of link, single or
4352*4882a593Smuzhiyun  *  double ECC errors, critical and serious errors.
4353*4882a593Smuzhiyun  *  Return Value:
4354*4882a593Smuzhiyun  *  NONE
4355*4882a593Smuzhiyun  */
s2io_handle_errors(void * dev_id)4356*4882a593Smuzhiyun static void s2io_handle_errors(void *dev_id)
4357*4882a593Smuzhiyun {
4358*4882a593Smuzhiyun 	struct net_device *dev = (struct net_device *)dev_id;
4359*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
4360*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4361*4882a593Smuzhiyun 	u64 temp64 = 0, val64 = 0;
4362*4882a593Smuzhiyun 	int i = 0;
4363*4882a593Smuzhiyun 
4364*4882a593Smuzhiyun 	struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4365*4882a593Smuzhiyun 	struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4366*4882a593Smuzhiyun 
4367*4882a593Smuzhiyun 	if (!is_s2io_card_up(sp))
4368*4882a593Smuzhiyun 		return;
4369*4882a593Smuzhiyun 
4370*4882a593Smuzhiyun 	if (pci_channel_offline(sp->pdev))
4371*4882a593Smuzhiyun 		return;
4372*4882a593Smuzhiyun 
4373*4882a593Smuzhiyun 	memset(&sw_stat->ring_full_cnt, 0,
4374*4882a593Smuzhiyun 	       sizeof(sw_stat->ring_full_cnt));
4375*4882a593Smuzhiyun 
4376*4882a593Smuzhiyun 	/* Handling the XPAK counters update */
4377*4882a593Smuzhiyun 	if (stats->xpak_timer_count < 72000) {
4378*4882a593Smuzhiyun 		/* waiting for an hour */
4379*4882a593Smuzhiyun 		stats->xpak_timer_count++;
4380*4882a593Smuzhiyun 	} else {
4381*4882a593Smuzhiyun 		s2io_updt_xpak_counter(dev);
4382*4882a593Smuzhiyun 		/* reset the count to zero */
4383*4882a593Smuzhiyun 		stats->xpak_timer_count = 0;
4384*4882a593Smuzhiyun 	}
4385*4882a593Smuzhiyun 
4386*4882a593Smuzhiyun 	/* Handling link status change error Intr */
4387*4882a593Smuzhiyun 	if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4388*4882a593Smuzhiyun 		val64 = readq(&bar0->mac_rmac_err_reg);
4389*4882a593Smuzhiyun 		writeq(val64, &bar0->mac_rmac_err_reg);
4390*4882a593Smuzhiyun 		if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4391*4882a593Smuzhiyun 			schedule_work(&sp->set_link_task);
4392*4882a593Smuzhiyun 	}
4393*4882a593Smuzhiyun 
4394*4882a593Smuzhiyun 	/* In case of a serious error, the device will be Reset. */
4395*4882a593Smuzhiyun 	if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4396*4882a593Smuzhiyun 				  &sw_stat->serious_err_cnt))
4397*4882a593Smuzhiyun 		goto reset;
4398*4882a593Smuzhiyun 
4399*4882a593Smuzhiyun 	/* Check for data parity error */
4400*4882a593Smuzhiyun 	if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4401*4882a593Smuzhiyun 				  &sw_stat->parity_err_cnt))
4402*4882a593Smuzhiyun 		goto reset;
4403*4882a593Smuzhiyun 
4404*4882a593Smuzhiyun 	/* Check for ring full counter */
4405*4882a593Smuzhiyun 	if (sp->device_type == XFRAME_II_DEVICE) {
4406*4882a593Smuzhiyun 		val64 = readq(&bar0->ring_bump_counter1);
4407*4882a593Smuzhiyun 		for (i = 0; i < 4; i++) {
4408*4882a593Smuzhiyun 			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4409*4882a593Smuzhiyun 			temp64 >>= 64 - ((i+1)*16);
4410*4882a593Smuzhiyun 			sw_stat->ring_full_cnt[i] += temp64;
4411*4882a593Smuzhiyun 		}
4412*4882a593Smuzhiyun 
4413*4882a593Smuzhiyun 		val64 = readq(&bar0->ring_bump_counter2);
4414*4882a593Smuzhiyun 		for (i = 0; i < 4; i++) {
4415*4882a593Smuzhiyun 			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4416*4882a593Smuzhiyun 			temp64 >>= 64 - ((i+1)*16);
4417*4882a593Smuzhiyun 			sw_stat->ring_full_cnt[i+4] += temp64;
4418*4882a593Smuzhiyun 		}
4419*4882a593Smuzhiyun 	}
4420*4882a593Smuzhiyun 
4421*4882a593Smuzhiyun 	val64 = readq(&bar0->txdma_int_status);
4422*4882a593Smuzhiyun 	/*check for pfc_err*/
4423*4882a593Smuzhiyun 	if (val64 & TXDMA_PFC_INT) {
4424*4882a593Smuzhiyun 		if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4425*4882a593Smuzhiyun 					  PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4426*4882a593Smuzhiyun 					  PFC_PCIX_ERR,
4427*4882a593Smuzhiyun 					  &bar0->pfc_err_reg,
4428*4882a593Smuzhiyun 					  &sw_stat->pfc_err_cnt))
4429*4882a593Smuzhiyun 			goto reset;
4430*4882a593Smuzhiyun 		do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4431*4882a593Smuzhiyun 				      &bar0->pfc_err_reg,
4432*4882a593Smuzhiyun 				      &sw_stat->pfc_err_cnt);
4433*4882a593Smuzhiyun 	}
4434*4882a593Smuzhiyun 
4435*4882a593Smuzhiyun 	/*check for tda_err*/
4436*4882a593Smuzhiyun 	if (val64 & TXDMA_TDA_INT) {
4437*4882a593Smuzhiyun 		if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4438*4882a593Smuzhiyun 					  TDA_SM0_ERR_ALARM |
4439*4882a593Smuzhiyun 					  TDA_SM1_ERR_ALARM,
4440*4882a593Smuzhiyun 					  &bar0->tda_err_reg,
4441*4882a593Smuzhiyun 					  &sw_stat->tda_err_cnt))
4442*4882a593Smuzhiyun 			goto reset;
4443*4882a593Smuzhiyun 		do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4444*4882a593Smuzhiyun 				      &bar0->tda_err_reg,
4445*4882a593Smuzhiyun 				      &sw_stat->tda_err_cnt);
4446*4882a593Smuzhiyun 	}
4447*4882a593Smuzhiyun 	/*check for pcc_err*/
4448*4882a593Smuzhiyun 	if (val64 & TXDMA_PCC_INT) {
4449*4882a593Smuzhiyun 		if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4450*4882a593Smuzhiyun 					  PCC_N_SERR | PCC_6_COF_OV_ERR |
4451*4882a593Smuzhiyun 					  PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4452*4882a593Smuzhiyun 					  PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4453*4882a593Smuzhiyun 					  PCC_TXB_ECC_DB_ERR,
4454*4882a593Smuzhiyun 					  &bar0->pcc_err_reg,
4455*4882a593Smuzhiyun 					  &sw_stat->pcc_err_cnt))
4456*4882a593Smuzhiyun 			goto reset;
4457*4882a593Smuzhiyun 		do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4458*4882a593Smuzhiyun 				      &bar0->pcc_err_reg,
4459*4882a593Smuzhiyun 				      &sw_stat->pcc_err_cnt);
4460*4882a593Smuzhiyun 	}
4461*4882a593Smuzhiyun 
4462*4882a593Smuzhiyun 	/*check for tti_err*/
4463*4882a593Smuzhiyun 	if (val64 & TXDMA_TTI_INT) {
4464*4882a593Smuzhiyun 		if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4465*4882a593Smuzhiyun 					  &bar0->tti_err_reg,
4466*4882a593Smuzhiyun 					  &sw_stat->tti_err_cnt))
4467*4882a593Smuzhiyun 			goto reset;
4468*4882a593Smuzhiyun 		do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4469*4882a593Smuzhiyun 				      &bar0->tti_err_reg,
4470*4882a593Smuzhiyun 				      &sw_stat->tti_err_cnt);
4471*4882a593Smuzhiyun 	}
4472*4882a593Smuzhiyun 
4473*4882a593Smuzhiyun 	/*check for lso_err*/
4474*4882a593Smuzhiyun 	if (val64 & TXDMA_LSO_INT) {
4475*4882a593Smuzhiyun 		if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4476*4882a593Smuzhiyun 					  LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4477*4882a593Smuzhiyun 					  &bar0->lso_err_reg,
4478*4882a593Smuzhiyun 					  &sw_stat->lso_err_cnt))
4479*4882a593Smuzhiyun 			goto reset;
4480*4882a593Smuzhiyun 		do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4481*4882a593Smuzhiyun 				      &bar0->lso_err_reg,
4482*4882a593Smuzhiyun 				      &sw_stat->lso_err_cnt);
4483*4882a593Smuzhiyun 	}
4484*4882a593Smuzhiyun 
4485*4882a593Smuzhiyun 	/*check for tpa_err*/
4486*4882a593Smuzhiyun 	if (val64 & TXDMA_TPA_INT) {
4487*4882a593Smuzhiyun 		if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4488*4882a593Smuzhiyun 					  &bar0->tpa_err_reg,
4489*4882a593Smuzhiyun 					  &sw_stat->tpa_err_cnt))
4490*4882a593Smuzhiyun 			goto reset;
4491*4882a593Smuzhiyun 		do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4492*4882a593Smuzhiyun 				      &bar0->tpa_err_reg,
4493*4882a593Smuzhiyun 				      &sw_stat->tpa_err_cnt);
4494*4882a593Smuzhiyun 	}
4495*4882a593Smuzhiyun 
4496*4882a593Smuzhiyun 	/*check for sm_err*/
4497*4882a593Smuzhiyun 	if (val64 & TXDMA_SM_INT) {
4498*4882a593Smuzhiyun 		if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4499*4882a593Smuzhiyun 					  &bar0->sm_err_reg,
4500*4882a593Smuzhiyun 					  &sw_stat->sm_err_cnt))
4501*4882a593Smuzhiyun 			goto reset;
4502*4882a593Smuzhiyun 	}
4503*4882a593Smuzhiyun 
4504*4882a593Smuzhiyun 	val64 = readq(&bar0->mac_int_status);
4505*4882a593Smuzhiyun 	if (val64 & MAC_INT_STATUS_TMAC_INT) {
4506*4882a593Smuzhiyun 		if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4507*4882a593Smuzhiyun 					  &bar0->mac_tmac_err_reg,
4508*4882a593Smuzhiyun 					  &sw_stat->mac_tmac_err_cnt))
4509*4882a593Smuzhiyun 			goto reset;
4510*4882a593Smuzhiyun 		do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4511*4882a593Smuzhiyun 				      TMAC_DESC_ECC_SG_ERR |
4512*4882a593Smuzhiyun 				      TMAC_DESC_ECC_DB_ERR,
4513*4882a593Smuzhiyun 				      &bar0->mac_tmac_err_reg,
4514*4882a593Smuzhiyun 				      &sw_stat->mac_tmac_err_cnt);
4515*4882a593Smuzhiyun 	}
4516*4882a593Smuzhiyun 
4517*4882a593Smuzhiyun 	val64 = readq(&bar0->xgxs_int_status);
4518*4882a593Smuzhiyun 	if (val64 & XGXS_INT_STATUS_TXGXS) {
4519*4882a593Smuzhiyun 		if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4520*4882a593Smuzhiyun 					  &bar0->xgxs_txgxs_err_reg,
4521*4882a593Smuzhiyun 					  &sw_stat->xgxs_txgxs_err_cnt))
4522*4882a593Smuzhiyun 			goto reset;
4523*4882a593Smuzhiyun 		do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4524*4882a593Smuzhiyun 				      &bar0->xgxs_txgxs_err_reg,
4525*4882a593Smuzhiyun 				      &sw_stat->xgxs_txgxs_err_cnt);
4526*4882a593Smuzhiyun 	}
4527*4882a593Smuzhiyun 
4528*4882a593Smuzhiyun 	val64 = readq(&bar0->rxdma_int_status);
4529*4882a593Smuzhiyun 	if (val64 & RXDMA_INT_RC_INT_M) {
4530*4882a593Smuzhiyun 		if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4531*4882a593Smuzhiyun 					  RC_FTC_ECC_DB_ERR |
4532*4882a593Smuzhiyun 					  RC_PRCn_SM_ERR_ALARM |
4533*4882a593Smuzhiyun 					  RC_FTC_SM_ERR_ALARM,
4534*4882a593Smuzhiyun 					  &bar0->rc_err_reg,
4535*4882a593Smuzhiyun 					  &sw_stat->rc_err_cnt))
4536*4882a593Smuzhiyun 			goto reset;
4537*4882a593Smuzhiyun 		do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4538*4882a593Smuzhiyun 				      RC_FTC_ECC_SG_ERR |
4539*4882a593Smuzhiyun 				      RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4540*4882a593Smuzhiyun 				      &sw_stat->rc_err_cnt);
4541*4882a593Smuzhiyun 		if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4542*4882a593Smuzhiyun 					  PRC_PCI_AB_WR_Rn |
4543*4882a593Smuzhiyun 					  PRC_PCI_AB_F_WR_Rn,
4544*4882a593Smuzhiyun 					  &bar0->prc_pcix_err_reg,
4545*4882a593Smuzhiyun 					  &sw_stat->prc_pcix_err_cnt))
4546*4882a593Smuzhiyun 			goto reset;
4547*4882a593Smuzhiyun 		do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4548*4882a593Smuzhiyun 				      PRC_PCI_DP_WR_Rn |
4549*4882a593Smuzhiyun 				      PRC_PCI_DP_F_WR_Rn,
4550*4882a593Smuzhiyun 				      &bar0->prc_pcix_err_reg,
4551*4882a593Smuzhiyun 				      &sw_stat->prc_pcix_err_cnt);
4552*4882a593Smuzhiyun 	}
4553*4882a593Smuzhiyun 
4554*4882a593Smuzhiyun 	if (val64 & RXDMA_INT_RPA_INT_M) {
4555*4882a593Smuzhiyun 		if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4556*4882a593Smuzhiyun 					  &bar0->rpa_err_reg,
4557*4882a593Smuzhiyun 					  &sw_stat->rpa_err_cnt))
4558*4882a593Smuzhiyun 			goto reset;
4559*4882a593Smuzhiyun 		do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4560*4882a593Smuzhiyun 				      &bar0->rpa_err_reg,
4561*4882a593Smuzhiyun 				      &sw_stat->rpa_err_cnt);
4562*4882a593Smuzhiyun 	}
4563*4882a593Smuzhiyun 
4564*4882a593Smuzhiyun 	if (val64 & RXDMA_INT_RDA_INT_M) {
4565*4882a593Smuzhiyun 		if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4566*4882a593Smuzhiyun 					  RDA_FRM_ECC_DB_N_AERR |
4567*4882a593Smuzhiyun 					  RDA_SM1_ERR_ALARM |
4568*4882a593Smuzhiyun 					  RDA_SM0_ERR_ALARM |
4569*4882a593Smuzhiyun 					  RDA_RXD_ECC_DB_SERR,
4570*4882a593Smuzhiyun 					  &bar0->rda_err_reg,
4571*4882a593Smuzhiyun 					  &sw_stat->rda_err_cnt))
4572*4882a593Smuzhiyun 			goto reset;
4573*4882a593Smuzhiyun 		do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4574*4882a593Smuzhiyun 				      RDA_FRM_ECC_SG_ERR |
4575*4882a593Smuzhiyun 				      RDA_MISC_ERR |
4576*4882a593Smuzhiyun 				      RDA_PCIX_ERR,
4577*4882a593Smuzhiyun 				      &bar0->rda_err_reg,
4578*4882a593Smuzhiyun 				      &sw_stat->rda_err_cnt);
4579*4882a593Smuzhiyun 	}
4580*4882a593Smuzhiyun 
4581*4882a593Smuzhiyun 	if (val64 & RXDMA_INT_RTI_INT_M) {
4582*4882a593Smuzhiyun 		if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4583*4882a593Smuzhiyun 					  &bar0->rti_err_reg,
4584*4882a593Smuzhiyun 					  &sw_stat->rti_err_cnt))
4585*4882a593Smuzhiyun 			goto reset;
4586*4882a593Smuzhiyun 		do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4587*4882a593Smuzhiyun 				      &bar0->rti_err_reg,
4588*4882a593Smuzhiyun 				      &sw_stat->rti_err_cnt);
4589*4882a593Smuzhiyun 	}
4590*4882a593Smuzhiyun 
4591*4882a593Smuzhiyun 	val64 = readq(&bar0->mac_int_status);
4592*4882a593Smuzhiyun 	if (val64 & MAC_INT_STATUS_RMAC_INT) {
4593*4882a593Smuzhiyun 		if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4594*4882a593Smuzhiyun 					  &bar0->mac_rmac_err_reg,
4595*4882a593Smuzhiyun 					  &sw_stat->mac_rmac_err_cnt))
4596*4882a593Smuzhiyun 			goto reset;
4597*4882a593Smuzhiyun 		do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4598*4882a593Smuzhiyun 				      RMAC_SINGLE_ECC_ERR |
4599*4882a593Smuzhiyun 				      RMAC_DOUBLE_ECC_ERR,
4600*4882a593Smuzhiyun 				      &bar0->mac_rmac_err_reg,
4601*4882a593Smuzhiyun 				      &sw_stat->mac_rmac_err_cnt);
4602*4882a593Smuzhiyun 	}
4603*4882a593Smuzhiyun 
4604*4882a593Smuzhiyun 	val64 = readq(&bar0->xgxs_int_status);
4605*4882a593Smuzhiyun 	if (val64 & XGXS_INT_STATUS_RXGXS) {
4606*4882a593Smuzhiyun 		if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4607*4882a593Smuzhiyun 					  &bar0->xgxs_rxgxs_err_reg,
4608*4882a593Smuzhiyun 					  &sw_stat->xgxs_rxgxs_err_cnt))
4609*4882a593Smuzhiyun 			goto reset;
4610*4882a593Smuzhiyun 	}
4611*4882a593Smuzhiyun 
4612*4882a593Smuzhiyun 	val64 = readq(&bar0->mc_int_status);
4613*4882a593Smuzhiyun 	if (val64 & MC_INT_STATUS_MC_INT) {
4614*4882a593Smuzhiyun 		if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4615*4882a593Smuzhiyun 					  &bar0->mc_err_reg,
4616*4882a593Smuzhiyun 					  &sw_stat->mc_err_cnt))
4617*4882a593Smuzhiyun 			goto reset;
4618*4882a593Smuzhiyun 
4619*4882a593Smuzhiyun 		/* Handling Ecc errors */
4620*4882a593Smuzhiyun 		if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4621*4882a593Smuzhiyun 			writeq(val64, &bar0->mc_err_reg);
4622*4882a593Smuzhiyun 			if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4623*4882a593Smuzhiyun 				sw_stat->double_ecc_errs++;
4624*4882a593Smuzhiyun 				if (sp->device_type != XFRAME_II_DEVICE) {
4625*4882a593Smuzhiyun 					/*
4626*4882a593Smuzhiyun 					 * Reset XframeI only if critical error
4627*4882a593Smuzhiyun 					 */
4628*4882a593Smuzhiyun 					if (val64 &
4629*4882a593Smuzhiyun 					    (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4630*4882a593Smuzhiyun 					     MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4631*4882a593Smuzhiyun 						goto reset;
4632*4882a593Smuzhiyun 				}
4633*4882a593Smuzhiyun 			} else
4634*4882a593Smuzhiyun 				sw_stat->single_ecc_errs++;
4635*4882a593Smuzhiyun 		}
4636*4882a593Smuzhiyun 	}
4637*4882a593Smuzhiyun 	return;
4638*4882a593Smuzhiyun 
4639*4882a593Smuzhiyun reset:
4640*4882a593Smuzhiyun 	s2io_stop_all_tx_queue(sp);
4641*4882a593Smuzhiyun 	schedule_work(&sp->rst_timer_task);
4642*4882a593Smuzhiyun 	sw_stat->soft_reset_cnt++;
4643*4882a593Smuzhiyun }
4644*4882a593Smuzhiyun 
4645*4882a593Smuzhiyun /**
4646*4882a593Smuzhiyun  *  s2io_isr - ISR handler of the device .
4647*4882a593Smuzhiyun  *  @irq: the irq of the device.
4648*4882a593Smuzhiyun  *  @dev_id: a void pointer to the dev structure of the NIC.
4649*4882a593Smuzhiyun  *  Description:  This function is the ISR handler of the device. It
4650*4882a593Smuzhiyun  *  identifies the reason for the interrupt and calls the relevant
4651*4882a593Smuzhiyun  *  service routines. As a contongency measure, this ISR allocates the
4652*4882a593Smuzhiyun  *  recv buffers, if their numbers are below the panic value which is
4653*4882a593Smuzhiyun  *  presently set to 25% of the original number of rcv buffers allocated.
4654*4882a593Smuzhiyun  *  Return value:
4655*4882a593Smuzhiyun  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4656*4882a593Smuzhiyun  *   IRQ_NONE: will be returned if interrupt is not from our device
4657*4882a593Smuzhiyun  */
s2io_isr(int irq,void * dev_id)4658*4882a593Smuzhiyun static irqreturn_t s2io_isr(int irq, void *dev_id)
4659*4882a593Smuzhiyun {
4660*4882a593Smuzhiyun 	struct net_device *dev = (struct net_device *)dev_id;
4661*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
4662*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4663*4882a593Smuzhiyun 	int i;
4664*4882a593Smuzhiyun 	u64 reason = 0;
4665*4882a593Smuzhiyun 	struct mac_info *mac_control;
4666*4882a593Smuzhiyun 	struct config_param *config;
4667*4882a593Smuzhiyun 
4668*4882a593Smuzhiyun 	/* Pretend we handled any irq's from a disconnected card */
4669*4882a593Smuzhiyun 	if (pci_channel_offline(sp->pdev))
4670*4882a593Smuzhiyun 		return IRQ_NONE;
4671*4882a593Smuzhiyun 
4672*4882a593Smuzhiyun 	if (!is_s2io_card_up(sp))
4673*4882a593Smuzhiyun 		return IRQ_NONE;
4674*4882a593Smuzhiyun 
4675*4882a593Smuzhiyun 	config = &sp->config;
4676*4882a593Smuzhiyun 	mac_control = &sp->mac_control;
4677*4882a593Smuzhiyun 
4678*4882a593Smuzhiyun 	/*
4679*4882a593Smuzhiyun 	 * Identify the cause for interrupt and call the appropriate
4680*4882a593Smuzhiyun 	 * interrupt handler. Causes for the interrupt could be;
4681*4882a593Smuzhiyun 	 * 1. Rx of packet.
4682*4882a593Smuzhiyun 	 * 2. Tx complete.
4683*4882a593Smuzhiyun 	 * 3. Link down.
4684*4882a593Smuzhiyun 	 */
4685*4882a593Smuzhiyun 	reason = readq(&bar0->general_int_status);
4686*4882a593Smuzhiyun 
4687*4882a593Smuzhiyun 	if (unlikely(reason == S2IO_MINUS_ONE))
4688*4882a593Smuzhiyun 		return IRQ_HANDLED;	/* Nothing much can be done. Get out */
4689*4882a593Smuzhiyun 
4690*4882a593Smuzhiyun 	if (reason &
4691*4882a593Smuzhiyun 	    (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4692*4882a593Smuzhiyun 		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4693*4882a593Smuzhiyun 
4694*4882a593Smuzhiyun 		if (config->napi) {
4695*4882a593Smuzhiyun 			if (reason & GEN_INTR_RXTRAFFIC) {
4696*4882a593Smuzhiyun 				napi_schedule(&sp->napi);
4697*4882a593Smuzhiyun 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4698*4882a593Smuzhiyun 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4699*4882a593Smuzhiyun 				readl(&bar0->rx_traffic_int);
4700*4882a593Smuzhiyun 			}
4701*4882a593Smuzhiyun 		} else {
4702*4882a593Smuzhiyun 			/*
4703*4882a593Smuzhiyun 			 * rx_traffic_int reg is an R1 register, writing all 1's
4704*4882a593Smuzhiyun 			 * will ensure that the actual interrupt causing bit
4705*4882a593Smuzhiyun 			 * get's cleared and hence a read can be avoided.
4706*4882a593Smuzhiyun 			 */
4707*4882a593Smuzhiyun 			if (reason & GEN_INTR_RXTRAFFIC)
4708*4882a593Smuzhiyun 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4709*4882a593Smuzhiyun 
4710*4882a593Smuzhiyun 			for (i = 0; i < config->rx_ring_num; i++) {
4711*4882a593Smuzhiyun 				struct ring_info *ring = &mac_control->rings[i];
4712*4882a593Smuzhiyun 
4713*4882a593Smuzhiyun 				rx_intr_handler(ring, 0);
4714*4882a593Smuzhiyun 			}
4715*4882a593Smuzhiyun 		}
4716*4882a593Smuzhiyun 
4717*4882a593Smuzhiyun 		/*
4718*4882a593Smuzhiyun 		 * tx_traffic_int reg is an R1 register, writing all 1's
4719*4882a593Smuzhiyun 		 * will ensure that the actual interrupt causing bit get's
4720*4882a593Smuzhiyun 		 * cleared and hence a read can be avoided.
4721*4882a593Smuzhiyun 		 */
4722*4882a593Smuzhiyun 		if (reason & GEN_INTR_TXTRAFFIC)
4723*4882a593Smuzhiyun 			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4724*4882a593Smuzhiyun 
4725*4882a593Smuzhiyun 		for (i = 0; i < config->tx_fifo_num; i++)
4726*4882a593Smuzhiyun 			tx_intr_handler(&mac_control->fifos[i]);
4727*4882a593Smuzhiyun 
4728*4882a593Smuzhiyun 		if (reason & GEN_INTR_TXPIC)
4729*4882a593Smuzhiyun 			s2io_txpic_intr_handle(sp);
4730*4882a593Smuzhiyun 
4731*4882a593Smuzhiyun 		/*
4732*4882a593Smuzhiyun 		 * Reallocate the buffers from the interrupt handler itself.
4733*4882a593Smuzhiyun 		 */
4734*4882a593Smuzhiyun 		if (!config->napi) {
4735*4882a593Smuzhiyun 			for (i = 0; i < config->rx_ring_num; i++) {
4736*4882a593Smuzhiyun 				struct ring_info *ring = &mac_control->rings[i];
4737*4882a593Smuzhiyun 
4738*4882a593Smuzhiyun 				s2io_chk_rx_buffers(sp, ring);
4739*4882a593Smuzhiyun 			}
4740*4882a593Smuzhiyun 		}
4741*4882a593Smuzhiyun 		writeq(sp->general_int_mask, &bar0->general_int_mask);
4742*4882a593Smuzhiyun 		readl(&bar0->general_int_status);
4743*4882a593Smuzhiyun 
4744*4882a593Smuzhiyun 		return IRQ_HANDLED;
4745*4882a593Smuzhiyun 
4746*4882a593Smuzhiyun 	} else if (!reason) {
4747*4882a593Smuzhiyun 		/* The interrupt was not raised by us */
4748*4882a593Smuzhiyun 		return IRQ_NONE;
4749*4882a593Smuzhiyun 	}
4750*4882a593Smuzhiyun 
4751*4882a593Smuzhiyun 	return IRQ_HANDLED;
4752*4882a593Smuzhiyun }
4753*4882a593Smuzhiyun 
4754*4882a593Smuzhiyun /*
4755*4882a593Smuzhiyun  * s2io_updt_stats -
4756*4882a593Smuzhiyun  */
s2io_updt_stats(struct s2io_nic * sp)4757*4882a593Smuzhiyun static void s2io_updt_stats(struct s2io_nic *sp)
4758*4882a593Smuzhiyun {
4759*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4760*4882a593Smuzhiyun 	u64 val64;
4761*4882a593Smuzhiyun 	int cnt = 0;
4762*4882a593Smuzhiyun 
4763*4882a593Smuzhiyun 	if (is_s2io_card_up(sp)) {
4764*4882a593Smuzhiyun 		/* Apprx 30us on a 133 MHz bus */
4765*4882a593Smuzhiyun 		val64 = SET_UPDT_CLICKS(10) |
4766*4882a593Smuzhiyun 			STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4767*4882a593Smuzhiyun 		writeq(val64, &bar0->stat_cfg);
4768*4882a593Smuzhiyun 		do {
4769*4882a593Smuzhiyun 			udelay(100);
4770*4882a593Smuzhiyun 			val64 = readq(&bar0->stat_cfg);
4771*4882a593Smuzhiyun 			if (!(val64 & s2BIT(0)))
4772*4882a593Smuzhiyun 				break;
4773*4882a593Smuzhiyun 			cnt++;
4774*4882a593Smuzhiyun 			if (cnt == 5)
4775*4882a593Smuzhiyun 				break; /* Updt failed */
4776*4882a593Smuzhiyun 		} while (1);
4777*4882a593Smuzhiyun 	}
4778*4882a593Smuzhiyun }
4779*4882a593Smuzhiyun 
4780*4882a593Smuzhiyun /**
4781*4882a593Smuzhiyun  *  s2io_get_stats - Updates the device statistics structure.
4782*4882a593Smuzhiyun  *  @dev : pointer to the device structure.
4783*4882a593Smuzhiyun  *  Description:
4784*4882a593Smuzhiyun  *  This function updates the device statistics structure in the s2io_nic
4785*4882a593Smuzhiyun  *  structure and returns a pointer to the same.
4786*4882a593Smuzhiyun  *  Return value:
4787*4882a593Smuzhiyun  *  pointer to the updated net_device_stats structure.
4788*4882a593Smuzhiyun  */
s2io_get_stats(struct net_device * dev)4789*4882a593Smuzhiyun static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4790*4882a593Smuzhiyun {
4791*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
4792*4882a593Smuzhiyun 	struct mac_info *mac_control = &sp->mac_control;
4793*4882a593Smuzhiyun 	struct stat_block *stats = mac_control->stats_info;
4794*4882a593Smuzhiyun 	u64 delta;
4795*4882a593Smuzhiyun 
4796*4882a593Smuzhiyun 	/* Configure Stats for immediate updt */
4797*4882a593Smuzhiyun 	s2io_updt_stats(sp);
4798*4882a593Smuzhiyun 
4799*4882a593Smuzhiyun 	/* A device reset will cause the on-adapter statistics to be zero'ed.
4800*4882a593Smuzhiyun 	 * This can be done while running by changing the MTU.  To prevent the
4801*4882a593Smuzhiyun 	 * system from having the stats zero'ed, the driver keeps a copy of the
4802*4882a593Smuzhiyun 	 * last update to the system (which is also zero'ed on reset).  This
4803*4882a593Smuzhiyun 	 * enables the driver to accurately know the delta between the last
4804*4882a593Smuzhiyun 	 * update and the current update.
4805*4882a593Smuzhiyun 	 */
4806*4882a593Smuzhiyun 	delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4807*4882a593Smuzhiyun 		le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4808*4882a593Smuzhiyun 	sp->stats.rx_packets += delta;
4809*4882a593Smuzhiyun 	dev->stats.rx_packets += delta;
4810*4882a593Smuzhiyun 
4811*4882a593Smuzhiyun 	delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4812*4882a593Smuzhiyun 		le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4813*4882a593Smuzhiyun 	sp->stats.tx_packets += delta;
4814*4882a593Smuzhiyun 	dev->stats.tx_packets += delta;
4815*4882a593Smuzhiyun 
4816*4882a593Smuzhiyun 	delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4817*4882a593Smuzhiyun 		le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4818*4882a593Smuzhiyun 	sp->stats.rx_bytes += delta;
4819*4882a593Smuzhiyun 	dev->stats.rx_bytes += delta;
4820*4882a593Smuzhiyun 
4821*4882a593Smuzhiyun 	delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4822*4882a593Smuzhiyun 		le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4823*4882a593Smuzhiyun 	sp->stats.tx_bytes += delta;
4824*4882a593Smuzhiyun 	dev->stats.tx_bytes += delta;
4825*4882a593Smuzhiyun 
4826*4882a593Smuzhiyun 	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4827*4882a593Smuzhiyun 	sp->stats.rx_errors += delta;
4828*4882a593Smuzhiyun 	dev->stats.rx_errors += delta;
4829*4882a593Smuzhiyun 
4830*4882a593Smuzhiyun 	delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4831*4882a593Smuzhiyun 		le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4832*4882a593Smuzhiyun 	sp->stats.tx_errors += delta;
4833*4882a593Smuzhiyun 	dev->stats.tx_errors += delta;
4834*4882a593Smuzhiyun 
4835*4882a593Smuzhiyun 	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4836*4882a593Smuzhiyun 	sp->stats.rx_dropped += delta;
4837*4882a593Smuzhiyun 	dev->stats.rx_dropped += delta;
4838*4882a593Smuzhiyun 
4839*4882a593Smuzhiyun 	delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4840*4882a593Smuzhiyun 	sp->stats.tx_dropped += delta;
4841*4882a593Smuzhiyun 	dev->stats.tx_dropped += delta;
4842*4882a593Smuzhiyun 
4843*4882a593Smuzhiyun 	/* The adapter MAC interprets pause frames as multicast packets, but
4844*4882a593Smuzhiyun 	 * does not pass them up.  This erroneously increases the multicast
4845*4882a593Smuzhiyun 	 * packet count and needs to be deducted when the multicast frame count
4846*4882a593Smuzhiyun 	 * is queried.
4847*4882a593Smuzhiyun 	 */
4848*4882a593Smuzhiyun 	delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4849*4882a593Smuzhiyun 		le32_to_cpu(stats->rmac_vld_mcst_frms);
4850*4882a593Smuzhiyun 	delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4851*4882a593Smuzhiyun 	delta -= sp->stats.multicast;
4852*4882a593Smuzhiyun 	sp->stats.multicast += delta;
4853*4882a593Smuzhiyun 	dev->stats.multicast += delta;
4854*4882a593Smuzhiyun 
4855*4882a593Smuzhiyun 	delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4856*4882a593Smuzhiyun 		le32_to_cpu(stats->rmac_usized_frms)) +
4857*4882a593Smuzhiyun 		le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4858*4882a593Smuzhiyun 	sp->stats.rx_length_errors += delta;
4859*4882a593Smuzhiyun 	dev->stats.rx_length_errors += delta;
4860*4882a593Smuzhiyun 
4861*4882a593Smuzhiyun 	delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4862*4882a593Smuzhiyun 	sp->stats.rx_crc_errors += delta;
4863*4882a593Smuzhiyun 	dev->stats.rx_crc_errors += delta;
4864*4882a593Smuzhiyun 
4865*4882a593Smuzhiyun 	return &dev->stats;
4866*4882a593Smuzhiyun }
4867*4882a593Smuzhiyun 
4868*4882a593Smuzhiyun /**
4869*4882a593Smuzhiyun  *  s2io_set_multicast - entry point for multicast address enable/disable.
4870*4882a593Smuzhiyun  *  @dev : pointer to the device structure
4871*4882a593Smuzhiyun  *  Description:
4872*4882a593Smuzhiyun  *  This function is a driver entry point which gets called by the kernel
4873*4882a593Smuzhiyun  *  whenever multicast addresses must be enabled/disabled. This also gets
4874*4882a593Smuzhiyun  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4875*4882a593Smuzhiyun  *  determine, if multicast address must be enabled or if promiscuous mode
4876*4882a593Smuzhiyun  *  is to be disabled etc.
4877*4882a593Smuzhiyun  *  Return value:
4878*4882a593Smuzhiyun  *  void.
4879*4882a593Smuzhiyun  */
4880*4882a593Smuzhiyun 
s2io_set_multicast(struct net_device * dev)4881*4882a593Smuzhiyun static void s2io_set_multicast(struct net_device *dev)
4882*4882a593Smuzhiyun {
4883*4882a593Smuzhiyun 	int i, j, prev_cnt;
4884*4882a593Smuzhiyun 	struct netdev_hw_addr *ha;
4885*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
4886*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4887*4882a593Smuzhiyun 	u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4888*4882a593Smuzhiyun 		0xfeffffffffffULL;
4889*4882a593Smuzhiyun 	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4890*4882a593Smuzhiyun 	void __iomem *add;
4891*4882a593Smuzhiyun 	struct config_param *config = &sp->config;
4892*4882a593Smuzhiyun 
4893*4882a593Smuzhiyun 	if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4894*4882a593Smuzhiyun 		/*  Enable all Multicast addresses */
4895*4882a593Smuzhiyun 		writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4896*4882a593Smuzhiyun 		       &bar0->rmac_addr_data0_mem);
4897*4882a593Smuzhiyun 		writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4898*4882a593Smuzhiyun 		       &bar0->rmac_addr_data1_mem);
4899*4882a593Smuzhiyun 		val64 = RMAC_ADDR_CMD_MEM_WE |
4900*4882a593Smuzhiyun 			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4901*4882a593Smuzhiyun 			RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4902*4882a593Smuzhiyun 		writeq(val64, &bar0->rmac_addr_cmd_mem);
4903*4882a593Smuzhiyun 		/* Wait till command completes */
4904*4882a593Smuzhiyun 		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4905*4882a593Smuzhiyun 				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4906*4882a593Smuzhiyun 				      S2IO_BIT_RESET);
4907*4882a593Smuzhiyun 
4908*4882a593Smuzhiyun 		sp->m_cast_flg = 1;
4909*4882a593Smuzhiyun 		sp->all_multi_pos = config->max_mc_addr - 1;
4910*4882a593Smuzhiyun 	} else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4911*4882a593Smuzhiyun 		/*  Disable all Multicast addresses */
4912*4882a593Smuzhiyun 		writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4913*4882a593Smuzhiyun 		       &bar0->rmac_addr_data0_mem);
4914*4882a593Smuzhiyun 		writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4915*4882a593Smuzhiyun 		       &bar0->rmac_addr_data1_mem);
4916*4882a593Smuzhiyun 		val64 = RMAC_ADDR_CMD_MEM_WE |
4917*4882a593Smuzhiyun 			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4918*4882a593Smuzhiyun 			RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4919*4882a593Smuzhiyun 		writeq(val64, &bar0->rmac_addr_cmd_mem);
4920*4882a593Smuzhiyun 		/* Wait till command completes */
4921*4882a593Smuzhiyun 		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4922*4882a593Smuzhiyun 				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4923*4882a593Smuzhiyun 				      S2IO_BIT_RESET);
4924*4882a593Smuzhiyun 
4925*4882a593Smuzhiyun 		sp->m_cast_flg = 0;
4926*4882a593Smuzhiyun 		sp->all_multi_pos = 0;
4927*4882a593Smuzhiyun 	}
4928*4882a593Smuzhiyun 
4929*4882a593Smuzhiyun 	if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4930*4882a593Smuzhiyun 		/*  Put the NIC into promiscuous mode */
4931*4882a593Smuzhiyun 		add = &bar0->mac_cfg;
4932*4882a593Smuzhiyun 		val64 = readq(&bar0->mac_cfg);
4933*4882a593Smuzhiyun 		val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4934*4882a593Smuzhiyun 
4935*4882a593Smuzhiyun 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4936*4882a593Smuzhiyun 		writel((u32)val64, add);
4937*4882a593Smuzhiyun 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4938*4882a593Smuzhiyun 		writel((u32) (val64 >> 32), (add + 4));
4939*4882a593Smuzhiyun 
4940*4882a593Smuzhiyun 		if (vlan_tag_strip != 1) {
4941*4882a593Smuzhiyun 			val64 = readq(&bar0->rx_pa_cfg);
4942*4882a593Smuzhiyun 			val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4943*4882a593Smuzhiyun 			writeq(val64, &bar0->rx_pa_cfg);
4944*4882a593Smuzhiyun 			sp->vlan_strip_flag = 0;
4945*4882a593Smuzhiyun 		}
4946*4882a593Smuzhiyun 
4947*4882a593Smuzhiyun 		val64 = readq(&bar0->mac_cfg);
4948*4882a593Smuzhiyun 		sp->promisc_flg = 1;
4949*4882a593Smuzhiyun 		DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4950*4882a593Smuzhiyun 			  dev->name);
4951*4882a593Smuzhiyun 	} else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4952*4882a593Smuzhiyun 		/*  Remove the NIC from promiscuous mode */
4953*4882a593Smuzhiyun 		add = &bar0->mac_cfg;
4954*4882a593Smuzhiyun 		val64 = readq(&bar0->mac_cfg);
4955*4882a593Smuzhiyun 		val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4956*4882a593Smuzhiyun 
4957*4882a593Smuzhiyun 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4958*4882a593Smuzhiyun 		writel((u32)val64, add);
4959*4882a593Smuzhiyun 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4960*4882a593Smuzhiyun 		writel((u32) (val64 >> 32), (add + 4));
4961*4882a593Smuzhiyun 
4962*4882a593Smuzhiyun 		if (vlan_tag_strip != 0) {
4963*4882a593Smuzhiyun 			val64 = readq(&bar0->rx_pa_cfg);
4964*4882a593Smuzhiyun 			val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4965*4882a593Smuzhiyun 			writeq(val64, &bar0->rx_pa_cfg);
4966*4882a593Smuzhiyun 			sp->vlan_strip_flag = 1;
4967*4882a593Smuzhiyun 		}
4968*4882a593Smuzhiyun 
4969*4882a593Smuzhiyun 		val64 = readq(&bar0->mac_cfg);
4970*4882a593Smuzhiyun 		sp->promisc_flg = 0;
4971*4882a593Smuzhiyun 		DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
4972*4882a593Smuzhiyun 	}
4973*4882a593Smuzhiyun 
4974*4882a593Smuzhiyun 	/*  Update individual M_CAST address list */
4975*4882a593Smuzhiyun 	if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
4976*4882a593Smuzhiyun 		if (netdev_mc_count(dev) >
4977*4882a593Smuzhiyun 		    (config->max_mc_addr - config->max_mac_addr)) {
4978*4882a593Smuzhiyun 			DBG_PRINT(ERR_DBG,
4979*4882a593Smuzhiyun 				  "%s: No more Rx filters can be added - "
4980*4882a593Smuzhiyun 				  "please enable ALL_MULTI instead\n",
4981*4882a593Smuzhiyun 				  dev->name);
4982*4882a593Smuzhiyun 			return;
4983*4882a593Smuzhiyun 		}
4984*4882a593Smuzhiyun 
4985*4882a593Smuzhiyun 		prev_cnt = sp->mc_addr_count;
4986*4882a593Smuzhiyun 		sp->mc_addr_count = netdev_mc_count(dev);
4987*4882a593Smuzhiyun 
4988*4882a593Smuzhiyun 		/* Clear out the previous list of Mc in the H/W. */
4989*4882a593Smuzhiyun 		for (i = 0; i < prev_cnt; i++) {
4990*4882a593Smuzhiyun 			writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4991*4882a593Smuzhiyun 			       &bar0->rmac_addr_data0_mem);
4992*4882a593Smuzhiyun 			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4993*4882a593Smuzhiyun 			       &bar0->rmac_addr_data1_mem);
4994*4882a593Smuzhiyun 			val64 = RMAC_ADDR_CMD_MEM_WE |
4995*4882a593Smuzhiyun 				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4996*4882a593Smuzhiyun 				RMAC_ADDR_CMD_MEM_OFFSET
4997*4882a593Smuzhiyun 				(config->mc_start_offset + i);
4998*4882a593Smuzhiyun 			writeq(val64, &bar0->rmac_addr_cmd_mem);
4999*4882a593Smuzhiyun 
5000*4882a593Smuzhiyun 			/* Wait for command completes */
5001*4882a593Smuzhiyun 			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5002*4882a593Smuzhiyun 						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5003*4882a593Smuzhiyun 						  S2IO_BIT_RESET)) {
5004*4882a593Smuzhiyun 				DBG_PRINT(ERR_DBG,
5005*4882a593Smuzhiyun 					  "%s: Adding Multicasts failed\n",
5006*4882a593Smuzhiyun 					  dev->name);
5007*4882a593Smuzhiyun 				return;
5008*4882a593Smuzhiyun 			}
5009*4882a593Smuzhiyun 		}
5010*4882a593Smuzhiyun 
5011*4882a593Smuzhiyun 		/* Create the new Rx filter list and update the same in H/W. */
5012*4882a593Smuzhiyun 		i = 0;
5013*4882a593Smuzhiyun 		netdev_for_each_mc_addr(ha, dev) {
5014*4882a593Smuzhiyun 			mac_addr = 0;
5015*4882a593Smuzhiyun 			for (j = 0; j < ETH_ALEN; j++) {
5016*4882a593Smuzhiyun 				mac_addr |= ha->addr[j];
5017*4882a593Smuzhiyun 				mac_addr <<= 8;
5018*4882a593Smuzhiyun 			}
5019*4882a593Smuzhiyun 			mac_addr >>= 8;
5020*4882a593Smuzhiyun 			writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5021*4882a593Smuzhiyun 			       &bar0->rmac_addr_data0_mem);
5022*4882a593Smuzhiyun 			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5023*4882a593Smuzhiyun 			       &bar0->rmac_addr_data1_mem);
5024*4882a593Smuzhiyun 			val64 = RMAC_ADDR_CMD_MEM_WE |
5025*4882a593Smuzhiyun 				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5026*4882a593Smuzhiyun 				RMAC_ADDR_CMD_MEM_OFFSET
5027*4882a593Smuzhiyun 				(i + config->mc_start_offset);
5028*4882a593Smuzhiyun 			writeq(val64, &bar0->rmac_addr_cmd_mem);
5029*4882a593Smuzhiyun 
5030*4882a593Smuzhiyun 			/* Wait for command completes */
5031*4882a593Smuzhiyun 			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5032*4882a593Smuzhiyun 						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5033*4882a593Smuzhiyun 						  S2IO_BIT_RESET)) {
5034*4882a593Smuzhiyun 				DBG_PRINT(ERR_DBG,
5035*4882a593Smuzhiyun 					  "%s: Adding Multicasts failed\n",
5036*4882a593Smuzhiyun 					  dev->name);
5037*4882a593Smuzhiyun 				return;
5038*4882a593Smuzhiyun 			}
5039*4882a593Smuzhiyun 			i++;
5040*4882a593Smuzhiyun 		}
5041*4882a593Smuzhiyun 	}
5042*4882a593Smuzhiyun }
5043*4882a593Smuzhiyun 
5044*4882a593Smuzhiyun /* read from CAM unicast & multicast addresses and store it in
5045*4882a593Smuzhiyun  * def_mac_addr structure
5046*4882a593Smuzhiyun  */
do_s2io_store_unicast_mc(struct s2io_nic * sp)5047*4882a593Smuzhiyun static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5048*4882a593Smuzhiyun {
5049*4882a593Smuzhiyun 	int offset;
5050*4882a593Smuzhiyun 	u64 mac_addr = 0x0;
5051*4882a593Smuzhiyun 	struct config_param *config = &sp->config;
5052*4882a593Smuzhiyun 
5053*4882a593Smuzhiyun 	/* store unicast & multicast mac addresses */
5054*4882a593Smuzhiyun 	for (offset = 0; offset < config->max_mc_addr; offset++) {
5055*4882a593Smuzhiyun 		mac_addr = do_s2io_read_unicast_mc(sp, offset);
5056*4882a593Smuzhiyun 		/* if read fails disable the entry */
5057*4882a593Smuzhiyun 		if (mac_addr == FAILURE)
5058*4882a593Smuzhiyun 			mac_addr = S2IO_DISABLE_MAC_ENTRY;
5059*4882a593Smuzhiyun 		do_s2io_copy_mac_addr(sp, offset, mac_addr);
5060*4882a593Smuzhiyun 	}
5061*4882a593Smuzhiyun }
5062*4882a593Smuzhiyun 
5063*4882a593Smuzhiyun /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
do_s2io_restore_unicast_mc(struct s2io_nic * sp)5064*4882a593Smuzhiyun static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5065*4882a593Smuzhiyun {
5066*4882a593Smuzhiyun 	int offset;
5067*4882a593Smuzhiyun 	struct config_param *config = &sp->config;
5068*4882a593Smuzhiyun 	/* restore unicast mac address */
5069*4882a593Smuzhiyun 	for (offset = 0; offset < config->max_mac_addr; offset++)
5070*4882a593Smuzhiyun 		do_s2io_prog_unicast(sp->dev,
5071*4882a593Smuzhiyun 				     sp->def_mac_addr[offset].mac_addr);
5072*4882a593Smuzhiyun 
5073*4882a593Smuzhiyun 	/* restore multicast mac address */
5074*4882a593Smuzhiyun 	for (offset = config->mc_start_offset;
5075*4882a593Smuzhiyun 	     offset < config->max_mc_addr; offset++)
5076*4882a593Smuzhiyun 		do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5077*4882a593Smuzhiyun }
5078*4882a593Smuzhiyun 
5079*4882a593Smuzhiyun /* add a multicast MAC address to CAM */
do_s2io_add_mc(struct s2io_nic * sp,u8 * addr)5080*4882a593Smuzhiyun static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5081*4882a593Smuzhiyun {
5082*4882a593Smuzhiyun 	int i;
5083*4882a593Smuzhiyun 	u64 mac_addr = 0;
5084*4882a593Smuzhiyun 	struct config_param *config = &sp->config;
5085*4882a593Smuzhiyun 
5086*4882a593Smuzhiyun 	for (i = 0; i < ETH_ALEN; i++) {
5087*4882a593Smuzhiyun 		mac_addr <<= 8;
5088*4882a593Smuzhiyun 		mac_addr |= addr[i];
5089*4882a593Smuzhiyun 	}
5090*4882a593Smuzhiyun 	if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5091*4882a593Smuzhiyun 		return SUCCESS;
5092*4882a593Smuzhiyun 
5093*4882a593Smuzhiyun 	/* check if the multicast mac already preset in CAM */
5094*4882a593Smuzhiyun 	for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5095*4882a593Smuzhiyun 		u64 tmp64;
5096*4882a593Smuzhiyun 		tmp64 = do_s2io_read_unicast_mc(sp, i);
5097*4882a593Smuzhiyun 		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5098*4882a593Smuzhiyun 			break;
5099*4882a593Smuzhiyun 
5100*4882a593Smuzhiyun 		if (tmp64 == mac_addr)
5101*4882a593Smuzhiyun 			return SUCCESS;
5102*4882a593Smuzhiyun 	}
5103*4882a593Smuzhiyun 	if (i == config->max_mc_addr) {
5104*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG,
5105*4882a593Smuzhiyun 			  "CAM full no space left for multicast MAC\n");
5106*4882a593Smuzhiyun 		return FAILURE;
5107*4882a593Smuzhiyun 	}
5108*4882a593Smuzhiyun 	/* Update the internal structure with this new mac address */
5109*4882a593Smuzhiyun 	do_s2io_copy_mac_addr(sp, i, mac_addr);
5110*4882a593Smuzhiyun 
5111*4882a593Smuzhiyun 	return do_s2io_add_mac(sp, mac_addr, i);
5112*4882a593Smuzhiyun }
5113*4882a593Smuzhiyun 
5114*4882a593Smuzhiyun /* add MAC address to CAM */
do_s2io_add_mac(struct s2io_nic * sp,u64 addr,int off)5115*4882a593Smuzhiyun static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5116*4882a593Smuzhiyun {
5117*4882a593Smuzhiyun 	u64 val64;
5118*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5119*4882a593Smuzhiyun 
5120*4882a593Smuzhiyun 	writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5121*4882a593Smuzhiyun 	       &bar0->rmac_addr_data0_mem);
5122*4882a593Smuzhiyun 
5123*4882a593Smuzhiyun 	val64 =	RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5124*4882a593Smuzhiyun 		RMAC_ADDR_CMD_MEM_OFFSET(off);
5125*4882a593Smuzhiyun 	writeq(val64, &bar0->rmac_addr_cmd_mem);
5126*4882a593Smuzhiyun 
5127*4882a593Smuzhiyun 	/* Wait till command completes */
5128*4882a593Smuzhiyun 	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5129*4882a593Smuzhiyun 				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5130*4882a593Smuzhiyun 				  S2IO_BIT_RESET)) {
5131*4882a593Smuzhiyun 		DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5132*4882a593Smuzhiyun 		return FAILURE;
5133*4882a593Smuzhiyun 	}
5134*4882a593Smuzhiyun 	return SUCCESS;
5135*4882a593Smuzhiyun }
5136*4882a593Smuzhiyun /* deletes a specified unicast/multicast mac entry from CAM */
do_s2io_delete_unicast_mc(struct s2io_nic * sp,u64 addr)5137*4882a593Smuzhiyun static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5138*4882a593Smuzhiyun {
5139*4882a593Smuzhiyun 	int offset;
5140*4882a593Smuzhiyun 	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5141*4882a593Smuzhiyun 	struct config_param *config = &sp->config;
5142*4882a593Smuzhiyun 
5143*4882a593Smuzhiyun 	for (offset = 1;
5144*4882a593Smuzhiyun 	     offset < config->max_mc_addr; offset++) {
5145*4882a593Smuzhiyun 		tmp64 = do_s2io_read_unicast_mc(sp, offset);
5146*4882a593Smuzhiyun 		if (tmp64 == addr) {
5147*4882a593Smuzhiyun 			/* disable the entry by writing  0xffffffffffffULL */
5148*4882a593Smuzhiyun 			if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5149*4882a593Smuzhiyun 				return FAILURE;
5150*4882a593Smuzhiyun 			/* store the new mac list from CAM */
5151*4882a593Smuzhiyun 			do_s2io_store_unicast_mc(sp);
5152*4882a593Smuzhiyun 			return SUCCESS;
5153*4882a593Smuzhiyun 		}
5154*4882a593Smuzhiyun 	}
5155*4882a593Smuzhiyun 	DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5156*4882a593Smuzhiyun 		  (unsigned long long)addr);
5157*4882a593Smuzhiyun 	return FAILURE;
5158*4882a593Smuzhiyun }
5159*4882a593Smuzhiyun 
5160*4882a593Smuzhiyun /* read mac entries from CAM */
do_s2io_read_unicast_mc(struct s2io_nic * sp,int offset)5161*4882a593Smuzhiyun static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5162*4882a593Smuzhiyun {
5163*4882a593Smuzhiyun 	u64 tmp64, val64;
5164*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5165*4882a593Smuzhiyun 
5166*4882a593Smuzhiyun 	/* read mac addr */
5167*4882a593Smuzhiyun 	val64 =	RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5168*4882a593Smuzhiyun 		RMAC_ADDR_CMD_MEM_OFFSET(offset);
5169*4882a593Smuzhiyun 	writeq(val64, &bar0->rmac_addr_cmd_mem);
5170*4882a593Smuzhiyun 
5171*4882a593Smuzhiyun 	/* Wait till command completes */
5172*4882a593Smuzhiyun 	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5173*4882a593Smuzhiyun 				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5174*4882a593Smuzhiyun 				  S2IO_BIT_RESET)) {
5175*4882a593Smuzhiyun 		DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5176*4882a593Smuzhiyun 		return FAILURE;
5177*4882a593Smuzhiyun 	}
5178*4882a593Smuzhiyun 	tmp64 = readq(&bar0->rmac_addr_data0_mem);
5179*4882a593Smuzhiyun 
5180*4882a593Smuzhiyun 	return tmp64 >> 16;
5181*4882a593Smuzhiyun }
5182*4882a593Smuzhiyun 
5183*4882a593Smuzhiyun /*
5184*4882a593Smuzhiyun  * s2io_set_mac_addr - driver entry point
5185*4882a593Smuzhiyun  */
5186*4882a593Smuzhiyun 
s2io_set_mac_addr(struct net_device * dev,void * p)5187*4882a593Smuzhiyun static int s2io_set_mac_addr(struct net_device *dev, void *p)
5188*4882a593Smuzhiyun {
5189*4882a593Smuzhiyun 	struct sockaddr *addr = p;
5190*4882a593Smuzhiyun 
5191*4882a593Smuzhiyun 	if (!is_valid_ether_addr(addr->sa_data))
5192*4882a593Smuzhiyun 		return -EADDRNOTAVAIL;
5193*4882a593Smuzhiyun 
5194*4882a593Smuzhiyun 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5195*4882a593Smuzhiyun 
5196*4882a593Smuzhiyun 	/* store the MAC address in CAM */
5197*4882a593Smuzhiyun 	return do_s2io_prog_unicast(dev, dev->dev_addr);
5198*4882a593Smuzhiyun }
5199*4882a593Smuzhiyun /**
5200*4882a593Smuzhiyun  *  do_s2io_prog_unicast - Programs the Xframe mac address
5201*4882a593Smuzhiyun  *  @dev : pointer to the device structure.
5202*4882a593Smuzhiyun  *  @addr: a uchar pointer to the new mac address which is to be set.
5203*4882a593Smuzhiyun  *  Description : This procedure will program the Xframe to receive
5204*4882a593Smuzhiyun  *  frames with new Mac Address
5205*4882a593Smuzhiyun  *  Return value: SUCCESS on success and an appropriate (-)ve integer
5206*4882a593Smuzhiyun  *  as defined in errno.h file on failure.
5207*4882a593Smuzhiyun  */
5208*4882a593Smuzhiyun 
do_s2io_prog_unicast(struct net_device * dev,u8 * addr)5209*4882a593Smuzhiyun static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5210*4882a593Smuzhiyun {
5211*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
5212*4882a593Smuzhiyun 	register u64 mac_addr = 0, perm_addr = 0;
5213*4882a593Smuzhiyun 	int i;
5214*4882a593Smuzhiyun 	u64 tmp64;
5215*4882a593Smuzhiyun 	struct config_param *config = &sp->config;
5216*4882a593Smuzhiyun 
5217*4882a593Smuzhiyun 	/*
5218*4882a593Smuzhiyun 	 * Set the new MAC address as the new unicast filter and reflect this
5219*4882a593Smuzhiyun 	 * change on the device address registered with the OS. It will be
5220*4882a593Smuzhiyun 	 * at offset 0.
5221*4882a593Smuzhiyun 	 */
5222*4882a593Smuzhiyun 	for (i = 0; i < ETH_ALEN; i++) {
5223*4882a593Smuzhiyun 		mac_addr <<= 8;
5224*4882a593Smuzhiyun 		mac_addr |= addr[i];
5225*4882a593Smuzhiyun 		perm_addr <<= 8;
5226*4882a593Smuzhiyun 		perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5227*4882a593Smuzhiyun 	}
5228*4882a593Smuzhiyun 
5229*4882a593Smuzhiyun 	/* check if the dev_addr is different than perm_addr */
5230*4882a593Smuzhiyun 	if (mac_addr == perm_addr)
5231*4882a593Smuzhiyun 		return SUCCESS;
5232*4882a593Smuzhiyun 
5233*4882a593Smuzhiyun 	/* check if the mac already preset in CAM */
5234*4882a593Smuzhiyun 	for (i = 1; i < config->max_mac_addr; i++) {
5235*4882a593Smuzhiyun 		tmp64 = do_s2io_read_unicast_mc(sp, i);
5236*4882a593Smuzhiyun 		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5237*4882a593Smuzhiyun 			break;
5238*4882a593Smuzhiyun 
5239*4882a593Smuzhiyun 		if (tmp64 == mac_addr) {
5240*4882a593Smuzhiyun 			DBG_PRINT(INFO_DBG,
5241*4882a593Smuzhiyun 				  "MAC addr:0x%llx already present in CAM\n",
5242*4882a593Smuzhiyun 				  (unsigned long long)mac_addr);
5243*4882a593Smuzhiyun 			return SUCCESS;
5244*4882a593Smuzhiyun 		}
5245*4882a593Smuzhiyun 	}
5246*4882a593Smuzhiyun 	if (i == config->max_mac_addr) {
5247*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5248*4882a593Smuzhiyun 		return FAILURE;
5249*4882a593Smuzhiyun 	}
5250*4882a593Smuzhiyun 	/* Update the internal structure with this new mac address */
5251*4882a593Smuzhiyun 	do_s2io_copy_mac_addr(sp, i, mac_addr);
5252*4882a593Smuzhiyun 
5253*4882a593Smuzhiyun 	return do_s2io_add_mac(sp, mac_addr, i);
5254*4882a593Smuzhiyun }
5255*4882a593Smuzhiyun 
5256*4882a593Smuzhiyun /**
5257*4882a593Smuzhiyun  * s2io_ethtool_set_link_ksettings - Sets different link parameters.
5258*4882a593Smuzhiyun  * @dev : pointer to netdev
5259*4882a593Smuzhiyun  * @cmd: pointer to the structure with parameters given by ethtool to set
5260*4882a593Smuzhiyun  * link information.
5261*4882a593Smuzhiyun  * Description:
5262*4882a593Smuzhiyun  * The function sets different link parameters provided by the user onto
5263*4882a593Smuzhiyun  * the NIC.
5264*4882a593Smuzhiyun  * Return value:
5265*4882a593Smuzhiyun  * 0 on success.
5266*4882a593Smuzhiyun  */
5267*4882a593Smuzhiyun 
5268*4882a593Smuzhiyun static int
s2io_ethtool_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)5269*4882a593Smuzhiyun s2io_ethtool_set_link_ksettings(struct net_device *dev,
5270*4882a593Smuzhiyun 				const struct ethtool_link_ksettings *cmd)
5271*4882a593Smuzhiyun {
5272*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
5273*4882a593Smuzhiyun 	if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
5274*4882a593Smuzhiyun 	    (cmd->base.speed != SPEED_10000) ||
5275*4882a593Smuzhiyun 	    (cmd->base.duplex != DUPLEX_FULL))
5276*4882a593Smuzhiyun 		return -EINVAL;
5277*4882a593Smuzhiyun 	else {
5278*4882a593Smuzhiyun 		s2io_close(sp->dev);
5279*4882a593Smuzhiyun 		s2io_open(sp->dev);
5280*4882a593Smuzhiyun 	}
5281*4882a593Smuzhiyun 
5282*4882a593Smuzhiyun 	return 0;
5283*4882a593Smuzhiyun }
5284*4882a593Smuzhiyun 
5285*4882a593Smuzhiyun /**
5286*4882a593Smuzhiyun  * s2io_ethtol_get_link_ksettings - Return link specific information.
5287*4882a593Smuzhiyun  * @dev: pointer to netdev
5288*4882a593Smuzhiyun  * @cmd : pointer to the structure with parameters given by ethtool
5289*4882a593Smuzhiyun  * to return link information.
5290*4882a593Smuzhiyun  * Description:
5291*4882a593Smuzhiyun  * Returns link specific information like speed, duplex etc.. to ethtool.
5292*4882a593Smuzhiyun  * Return value :
5293*4882a593Smuzhiyun  * return 0 on success.
5294*4882a593Smuzhiyun  */
5295*4882a593Smuzhiyun 
5296*4882a593Smuzhiyun static int
s2io_ethtool_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)5297*4882a593Smuzhiyun s2io_ethtool_get_link_ksettings(struct net_device *dev,
5298*4882a593Smuzhiyun 				struct ethtool_link_ksettings *cmd)
5299*4882a593Smuzhiyun {
5300*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
5301*4882a593Smuzhiyun 
5302*4882a593Smuzhiyun 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
5303*4882a593Smuzhiyun 	ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
5304*4882a593Smuzhiyun 	ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
5305*4882a593Smuzhiyun 
5306*4882a593Smuzhiyun 	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
5307*4882a593Smuzhiyun 	ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
5308*4882a593Smuzhiyun 	ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
5309*4882a593Smuzhiyun 
5310*4882a593Smuzhiyun 	cmd->base.port = PORT_FIBRE;
5311*4882a593Smuzhiyun 
5312*4882a593Smuzhiyun 	if (netif_carrier_ok(sp->dev)) {
5313*4882a593Smuzhiyun 		cmd->base.speed = SPEED_10000;
5314*4882a593Smuzhiyun 		cmd->base.duplex = DUPLEX_FULL;
5315*4882a593Smuzhiyun 	} else {
5316*4882a593Smuzhiyun 		cmd->base.speed = SPEED_UNKNOWN;
5317*4882a593Smuzhiyun 		cmd->base.duplex = DUPLEX_UNKNOWN;
5318*4882a593Smuzhiyun 	}
5319*4882a593Smuzhiyun 
5320*4882a593Smuzhiyun 	cmd->base.autoneg = AUTONEG_DISABLE;
5321*4882a593Smuzhiyun 	return 0;
5322*4882a593Smuzhiyun }
5323*4882a593Smuzhiyun 
5324*4882a593Smuzhiyun /**
5325*4882a593Smuzhiyun  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5326*4882a593Smuzhiyun  * @dev: pointer to netdev
5327*4882a593Smuzhiyun  * @info : pointer to the structure with parameters given by ethtool to
5328*4882a593Smuzhiyun  * return driver information.
5329*4882a593Smuzhiyun  * Description:
5330*4882a593Smuzhiyun  * Returns driver specefic information like name, version etc.. to ethtool.
5331*4882a593Smuzhiyun  * Return value:
5332*4882a593Smuzhiyun  *  void
5333*4882a593Smuzhiyun  */
5334*4882a593Smuzhiyun 
s2io_ethtool_gdrvinfo(struct net_device * dev,struct ethtool_drvinfo * info)5335*4882a593Smuzhiyun static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5336*4882a593Smuzhiyun 				  struct ethtool_drvinfo *info)
5337*4882a593Smuzhiyun {
5338*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
5339*4882a593Smuzhiyun 
5340*4882a593Smuzhiyun 	strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
5341*4882a593Smuzhiyun 	strlcpy(info->version, s2io_driver_version, sizeof(info->version));
5342*4882a593Smuzhiyun 	strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5343*4882a593Smuzhiyun }
5344*4882a593Smuzhiyun 
5345*4882a593Smuzhiyun /**
5346*4882a593Smuzhiyun  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5347*4882a593Smuzhiyun  *  @dev: pointer to netdev
5348*4882a593Smuzhiyun  *  @regs : pointer to the structure with parameters given by ethtool for
5349*4882a593Smuzhiyun  *          dumping the registers.
5350*4882a593Smuzhiyun  *  @space: The input argument into which all the registers are dumped.
5351*4882a593Smuzhiyun  *  Description:
5352*4882a593Smuzhiyun  *  Dumps the entire register space of xFrame NIC into the user given
5353*4882a593Smuzhiyun  *  buffer area.
5354*4882a593Smuzhiyun  * Return value :
5355*4882a593Smuzhiyun  * void .
5356*4882a593Smuzhiyun  */
5357*4882a593Smuzhiyun 
s2io_ethtool_gregs(struct net_device * dev,struct ethtool_regs * regs,void * space)5358*4882a593Smuzhiyun static void s2io_ethtool_gregs(struct net_device *dev,
5359*4882a593Smuzhiyun 			       struct ethtool_regs *regs, void *space)
5360*4882a593Smuzhiyun {
5361*4882a593Smuzhiyun 	int i;
5362*4882a593Smuzhiyun 	u64 reg;
5363*4882a593Smuzhiyun 	u8 *reg_space = (u8 *)space;
5364*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
5365*4882a593Smuzhiyun 
5366*4882a593Smuzhiyun 	regs->len = XENA_REG_SPACE;
5367*4882a593Smuzhiyun 	regs->version = sp->pdev->subsystem_device;
5368*4882a593Smuzhiyun 
5369*4882a593Smuzhiyun 	for (i = 0; i < regs->len; i += 8) {
5370*4882a593Smuzhiyun 		reg = readq(sp->bar0 + i);
5371*4882a593Smuzhiyun 		memcpy((reg_space + i), &reg, 8);
5372*4882a593Smuzhiyun 	}
5373*4882a593Smuzhiyun }
5374*4882a593Smuzhiyun 
5375*4882a593Smuzhiyun /*
5376*4882a593Smuzhiyun  *  s2io_set_led - control NIC led
5377*4882a593Smuzhiyun  */
s2io_set_led(struct s2io_nic * sp,bool on)5378*4882a593Smuzhiyun static void s2io_set_led(struct s2io_nic *sp, bool on)
5379*4882a593Smuzhiyun {
5380*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5381*4882a593Smuzhiyun 	u16 subid = sp->pdev->subsystem_device;
5382*4882a593Smuzhiyun 	u64 val64;
5383*4882a593Smuzhiyun 
5384*4882a593Smuzhiyun 	if ((sp->device_type == XFRAME_II_DEVICE) ||
5385*4882a593Smuzhiyun 	    ((subid & 0xFF) >= 0x07)) {
5386*4882a593Smuzhiyun 		val64 = readq(&bar0->gpio_control);
5387*4882a593Smuzhiyun 		if (on)
5388*4882a593Smuzhiyun 			val64 |= GPIO_CTRL_GPIO_0;
5389*4882a593Smuzhiyun 		else
5390*4882a593Smuzhiyun 			val64 &= ~GPIO_CTRL_GPIO_0;
5391*4882a593Smuzhiyun 
5392*4882a593Smuzhiyun 		writeq(val64, &bar0->gpio_control);
5393*4882a593Smuzhiyun 	} else {
5394*4882a593Smuzhiyun 		val64 = readq(&bar0->adapter_control);
5395*4882a593Smuzhiyun 		if (on)
5396*4882a593Smuzhiyun 			val64 |= ADAPTER_LED_ON;
5397*4882a593Smuzhiyun 		else
5398*4882a593Smuzhiyun 			val64 &= ~ADAPTER_LED_ON;
5399*4882a593Smuzhiyun 
5400*4882a593Smuzhiyun 		writeq(val64, &bar0->adapter_control);
5401*4882a593Smuzhiyun 	}
5402*4882a593Smuzhiyun 
5403*4882a593Smuzhiyun }
5404*4882a593Smuzhiyun 
5405*4882a593Smuzhiyun /**
5406*4882a593Smuzhiyun  * s2io_ethtool_set_led - To physically identify the nic on the system.
5407*4882a593Smuzhiyun  * @dev : network device
5408*4882a593Smuzhiyun  * @state: led setting
5409*4882a593Smuzhiyun  *
5410*4882a593Smuzhiyun  * Description: Used to physically identify the NIC on the system.
5411*4882a593Smuzhiyun  * The Link LED will blink for a time specified by the user for
5412*4882a593Smuzhiyun  * identification.
5413*4882a593Smuzhiyun  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5414*4882a593Smuzhiyun  * identification is possible only if it's link is up.
5415*4882a593Smuzhiyun  */
5416*4882a593Smuzhiyun 
s2io_ethtool_set_led(struct net_device * dev,enum ethtool_phys_id_state state)5417*4882a593Smuzhiyun static int s2io_ethtool_set_led(struct net_device *dev,
5418*4882a593Smuzhiyun 				enum ethtool_phys_id_state state)
5419*4882a593Smuzhiyun {
5420*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
5421*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5422*4882a593Smuzhiyun 	u16 subid = sp->pdev->subsystem_device;
5423*4882a593Smuzhiyun 
5424*4882a593Smuzhiyun 	if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5425*4882a593Smuzhiyun 		u64 val64 = readq(&bar0->adapter_control);
5426*4882a593Smuzhiyun 		if (!(val64 & ADAPTER_CNTL_EN)) {
5427*4882a593Smuzhiyun 			pr_err("Adapter Link down, cannot blink LED\n");
5428*4882a593Smuzhiyun 			return -EAGAIN;
5429*4882a593Smuzhiyun 		}
5430*4882a593Smuzhiyun 	}
5431*4882a593Smuzhiyun 
5432*4882a593Smuzhiyun 	switch (state) {
5433*4882a593Smuzhiyun 	case ETHTOOL_ID_ACTIVE:
5434*4882a593Smuzhiyun 		sp->adapt_ctrl_org = readq(&bar0->gpio_control);
5435*4882a593Smuzhiyun 		return 1;	/* cycle on/off once per second */
5436*4882a593Smuzhiyun 
5437*4882a593Smuzhiyun 	case ETHTOOL_ID_ON:
5438*4882a593Smuzhiyun 		s2io_set_led(sp, true);
5439*4882a593Smuzhiyun 		break;
5440*4882a593Smuzhiyun 
5441*4882a593Smuzhiyun 	case ETHTOOL_ID_OFF:
5442*4882a593Smuzhiyun 		s2io_set_led(sp, false);
5443*4882a593Smuzhiyun 		break;
5444*4882a593Smuzhiyun 
5445*4882a593Smuzhiyun 	case ETHTOOL_ID_INACTIVE:
5446*4882a593Smuzhiyun 		if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5447*4882a593Smuzhiyun 			writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
5448*4882a593Smuzhiyun 	}
5449*4882a593Smuzhiyun 
5450*4882a593Smuzhiyun 	return 0;
5451*4882a593Smuzhiyun }
5452*4882a593Smuzhiyun 
s2io_ethtool_gringparam(struct net_device * dev,struct ethtool_ringparam * ering)5453*4882a593Smuzhiyun static void s2io_ethtool_gringparam(struct net_device *dev,
5454*4882a593Smuzhiyun 				    struct ethtool_ringparam *ering)
5455*4882a593Smuzhiyun {
5456*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
5457*4882a593Smuzhiyun 	int i, tx_desc_count = 0, rx_desc_count = 0;
5458*4882a593Smuzhiyun 
5459*4882a593Smuzhiyun 	if (sp->rxd_mode == RXD_MODE_1) {
5460*4882a593Smuzhiyun 		ering->rx_max_pending = MAX_RX_DESC_1;
5461*4882a593Smuzhiyun 		ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5462*4882a593Smuzhiyun 	} else {
5463*4882a593Smuzhiyun 		ering->rx_max_pending = MAX_RX_DESC_2;
5464*4882a593Smuzhiyun 		ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5465*4882a593Smuzhiyun 	}
5466*4882a593Smuzhiyun 
5467*4882a593Smuzhiyun 	ering->tx_max_pending = MAX_TX_DESC;
5468*4882a593Smuzhiyun 
5469*4882a593Smuzhiyun 	for (i = 0; i < sp->config.rx_ring_num; i++)
5470*4882a593Smuzhiyun 		rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5471*4882a593Smuzhiyun 	ering->rx_pending = rx_desc_count;
5472*4882a593Smuzhiyun 	ering->rx_jumbo_pending = rx_desc_count;
5473*4882a593Smuzhiyun 
5474*4882a593Smuzhiyun 	for (i = 0; i < sp->config.tx_fifo_num; i++)
5475*4882a593Smuzhiyun 		tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5476*4882a593Smuzhiyun 	ering->tx_pending = tx_desc_count;
5477*4882a593Smuzhiyun 	DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5478*4882a593Smuzhiyun }
5479*4882a593Smuzhiyun 
5480*4882a593Smuzhiyun /**
5481*4882a593Smuzhiyun  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5482*4882a593Smuzhiyun  * @dev: pointer to netdev
5483*4882a593Smuzhiyun  * @ep : pointer to the structure with pause parameters given by ethtool.
5484*4882a593Smuzhiyun  * Description:
5485*4882a593Smuzhiyun  * Returns the Pause frame generation and reception capability of the NIC.
5486*4882a593Smuzhiyun  * Return value:
5487*4882a593Smuzhiyun  *  void
5488*4882a593Smuzhiyun  */
s2io_ethtool_getpause_data(struct net_device * dev,struct ethtool_pauseparam * ep)5489*4882a593Smuzhiyun static void s2io_ethtool_getpause_data(struct net_device *dev,
5490*4882a593Smuzhiyun 				       struct ethtool_pauseparam *ep)
5491*4882a593Smuzhiyun {
5492*4882a593Smuzhiyun 	u64 val64;
5493*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
5494*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5495*4882a593Smuzhiyun 
5496*4882a593Smuzhiyun 	val64 = readq(&bar0->rmac_pause_cfg);
5497*4882a593Smuzhiyun 	if (val64 & RMAC_PAUSE_GEN_ENABLE)
5498*4882a593Smuzhiyun 		ep->tx_pause = true;
5499*4882a593Smuzhiyun 	if (val64 & RMAC_PAUSE_RX_ENABLE)
5500*4882a593Smuzhiyun 		ep->rx_pause = true;
5501*4882a593Smuzhiyun 	ep->autoneg = false;
5502*4882a593Smuzhiyun }
5503*4882a593Smuzhiyun 
5504*4882a593Smuzhiyun /**
5505*4882a593Smuzhiyun  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5506*4882a593Smuzhiyun  * @dev: pointer to netdev
5507*4882a593Smuzhiyun  * @ep : pointer to the structure with pause parameters given by ethtool.
5508*4882a593Smuzhiyun  * Description:
5509*4882a593Smuzhiyun  * It can be used to set or reset Pause frame generation or reception
5510*4882a593Smuzhiyun  * support of the NIC.
5511*4882a593Smuzhiyun  * Return value:
5512*4882a593Smuzhiyun  * int, returns 0 on Success
5513*4882a593Smuzhiyun  */
5514*4882a593Smuzhiyun 
s2io_ethtool_setpause_data(struct net_device * dev,struct ethtool_pauseparam * ep)5515*4882a593Smuzhiyun static int s2io_ethtool_setpause_data(struct net_device *dev,
5516*4882a593Smuzhiyun 				      struct ethtool_pauseparam *ep)
5517*4882a593Smuzhiyun {
5518*4882a593Smuzhiyun 	u64 val64;
5519*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
5520*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5521*4882a593Smuzhiyun 
5522*4882a593Smuzhiyun 	val64 = readq(&bar0->rmac_pause_cfg);
5523*4882a593Smuzhiyun 	if (ep->tx_pause)
5524*4882a593Smuzhiyun 		val64 |= RMAC_PAUSE_GEN_ENABLE;
5525*4882a593Smuzhiyun 	else
5526*4882a593Smuzhiyun 		val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5527*4882a593Smuzhiyun 	if (ep->rx_pause)
5528*4882a593Smuzhiyun 		val64 |= RMAC_PAUSE_RX_ENABLE;
5529*4882a593Smuzhiyun 	else
5530*4882a593Smuzhiyun 		val64 &= ~RMAC_PAUSE_RX_ENABLE;
5531*4882a593Smuzhiyun 	writeq(val64, &bar0->rmac_pause_cfg);
5532*4882a593Smuzhiyun 	return 0;
5533*4882a593Smuzhiyun }
5534*4882a593Smuzhiyun 
5535*4882a593Smuzhiyun #define S2IO_DEV_ID		5
5536*4882a593Smuzhiyun /**
5537*4882a593Smuzhiyun  * read_eeprom - reads 4 bytes of data from user given offset.
5538*4882a593Smuzhiyun  * @sp : private member of the device structure, which is a pointer to the
5539*4882a593Smuzhiyun  *      s2io_nic structure.
5540*4882a593Smuzhiyun  * @off : offset at which the data must be written
5541*4882a593Smuzhiyun  * @data : Its an output parameter where the data read at the given
5542*4882a593Smuzhiyun  *	offset is stored.
5543*4882a593Smuzhiyun  * Description:
5544*4882a593Smuzhiyun  * Will read 4 bytes of data from the user given offset and return the
5545*4882a593Smuzhiyun  * read data.
5546*4882a593Smuzhiyun  * NOTE: Will allow to read only part of the EEPROM visible through the
5547*4882a593Smuzhiyun  *   I2C bus.
5548*4882a593Smuzhiyun  * Return value:
5549*4882a593Smuzhiyun  *  -1 on failure and 0 on success.
5550*4882a593Smuzhiyun  */
read_eeprom(struct s2io_nic * sp,int off,u64 * data)5551*4882a593Smuzhiyun static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
5552*4882a593Smuzhiyun {
5553*4882a593Smuzhiyun 	int ret = -1;
5554*4882a593Smuzhiyun 	u32 exit_cnt = 0;
5555*4882a593Smuzhiyun 	u64 val64;
5556*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5557*4882a593Smuzhiyun 
5558*4882a593Smuzhiyun 	if (sp->device_type == XFRAME_I_DEVICE) {
5559*4882a593Smuzhiyun 		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5560*4882a593Smuzhiyun 			I2C_CONTROL_ADDR(off) |
5561*4882a593Smuzhiyun 			I2C_CONTROL_BYTE_CNT(0x3) |
5562*4882a593Smuzhiyun 			I2C_CONTROL_READ |
5563*4882a593Smuzhiyun 			I2C_CONTROL_CNTL_START;
5564*4882a593Smuzhiyun 		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5565*4882a593Smuzhiyun 
5566*4882a593Smuzhiyun 		while (exit_cnt < 5) {
5567*4882a593Smuzhiyun 			val64 = readq(&bar0->i2c_control);
5568*4882a593Smuzhiyun 			if (I2C_CONTROL_CNTL_END(val64)) {
5569*4882a593Smuzhiyun 				*data = I2C_CONTROL_GET_DATA(val64);
5570*4882a593Smuzhiyun 				ret = 0;
5571*4882a593Smuzhiyun 				break;
5572*4882a593Smuzhiyun 			}
5573*4882a593Smuzhiyun 			msleep(50);
5574*4882a593Smuzhiyun 			exit_cnt++;
5575*4882a593Smuzhiyun 		}
5576*4882a593Smuzhiyun 	}
5577*4882a593Smuzhiyun 
5578*4882a593Smuzhiyun 	if (sp->device_type == XFRAME_II_DEVICE) {
5579*4882a593Smuzhiyun 		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5580*4882a593Smuzhiyun 			SPI_CONTROL_BYTECNT(0x3) |
5581*4882a593Smuzhiyun 			SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5582*4882a593Smuzhiyun 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5583*4882a593Smuzhiyun 		val64 |= SPI_CONTROL_REQ;
5584*4882a593Smuzhiyun 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5585*4882a593Smuzhiyun 		while (exit_cnt < 5) {
5586*4882a593Smuzhiyun 			val64 = readq(&bar0->spi_control);
5587*4882a593Smuzhiyun 			if (val64 & SPI_CONTROL_NACK) {
5588*4882a593Smuzhiyun 				ret = 1;
5589*4882a593Smuzhiyun 				break;
5590*4882a593Smuzhiyun 			} else if (val64 & SPI_CONTROL_DONE) {
5591*4882a593Smuzhiyun 				*data = readq(&bar0->spi_data);
5592*4882a593Smuzhiyun 				*data &= 0xffffff;
5593*4882a593Smuzhiyun 				ret = 0;
5594*4882a593Smuzhiyun 				break;
5595*4882a593Smuzhiyun 			}
5596*4882a593Smuzhiyun 			msleep(50);
5597*4882a593Smuzhiyun 			exit_cnt++;
5598*4882a593Smuzhiyun 		}
5599*4882a593Smuzhiyun 	}
5600*4882a593Smuzhiyun 	return ret;
5601*4882a593Smuzhiyun }
5602*4882a593Smuzhiyun 
5603*4882a593Smuzhiyun /**
5604*4882a593Smuzhiyun  *  write_eeprom - actually writes the relevant part of the data value.
5605*4882a593Smuzhiyun  *  @sp : private member of the device structure, which is a pointer to the
5606*4882a593Smuzhiyun  *       s2io_nic structure.
5607*4882a593Smuzhiyun  *  @off : offset at which the data must be written
5608*4882a593Smuzhiyun  *  @data : The data that is to be written
5609*4882a593Smuzhiyun  *  @cnt : Number of bytes of the data that are actually to be written into
5610*4882a593Smuzhiyun  *  the Eeprom. (max of 3)
5611*4882a593Smuzhiyun  * Description:
5612*4882a593Smuzhiyun  *  Actually writes the relevant part of the data value into the Eeprom
5613*4882a593Smuzhiyun  *  through the I2C bus.
5614*4882a593Smuzhiyun  * Return value:
5615*4882a593Smuzhiyun  *  0 on success, -1 on failure.
5616*4882a593Smuzhiyun  */
5617*4882a593Smuzhiyun 
write_eeprom(struct s2io_nic * sp,int off,u64 data,int cnt)5618*4882a593Smuzhiyun static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
5619*4882a593Smuzhiyun {
5620*4882a593Smuzhiyun 	int exit_cnt = 0, ret = -1;
5621*4882a593Smuzhiyun 	u64 val64;
5622*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5623*4882a593Smuzhiyun 
5624*4882a593Smuzhiyun 	if (sp->device_type == XFRAME_I_DEVICE) {
5625*4882a593Smuzhiyun 		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5626*4882a593Smuzhiyun 			I2C_CONTROL_ADDR(off) |
5627*4882a593Smuzhiyun 			I2C_CONTROL_BYTE_CNT(cnt) |
5628*4882a593Smuzhiyun 			I2C_CONTROL_SET_DATA((u32)data) |
5629*4882a593Smuzhiyun 			I2C_CONTROL_CNTL_START;
5630*4882a593Smuzhiyun 		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5631*4882a593Smuzhiyun 
5632*4882a593Smuzhiyun 		while (exit_cnt < 5) {
5633*4882a593Smuzhiyun 			val64 = readq(&bar0->i2c_control);
5634*4882a593Smuzhiyun 			if (I2C_CONTROL_CNTL_END(val64)) {
5635*4882a593Smuzhiyun 				if (!(val64 & I2C_CONTROL_NACK))
5636*4882a593Smuzhiyun 					ret = 0;
5637*4882a593Smuzhiyun 				break;
5638*4882a593Smuzhiyun 			}
5639*4882a593Smuzhiyun 			msleep(50);
5640*4882a593Smuzhiyun 			exit_cnt++;
5641*4882a593Smuzhiyun 		}
5642*4882a593Smuzhiyun 	}
5643*4882a593Smuzhiyun 
5644*4882a593Smuzhiyun 	if (sp->device_type == XFRAME_II_DEVICE) {
5645*4882a593Smuzhiyun 		int write_cnt = (cnt == 8) ? 0 : cnt;
5646*4882a593Smuzhiyun 		writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
5647*4882a593Smuzhiyun 
5648*4882a593Smuzhiyun 		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5649*4882a593Smuzhiyun 			SPI_CONTROL_BYTECNT(write_cnt) |
5650*4882a593Smuzhiyun 			SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5651*4882a593Smuzhiyun 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5652*4882a593Smuzhiyun 		val64 |= SPI_CONTROL_REQ;
5653*4882a593Smuzhiyun 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5654*4882a593Smuzhiyun 		while (exit_cnt < 5) {
5655*4882a593Smuzhiyun 			val64 = readq(&bar0->spi_control);
5656*4882a593Smuzhiyun 			if (val64 & SPI_CONTROL_NACK) {
5657*4882a593Smuzhiyun 				ret = 1;
5658*4882a593Smuzhiyun 				break;
5659*4882a593Smuzhiyun 			} else if (val64 & SPI_CONTROL_DONE) {
5660*4882a593Smuzhiyun 				ret = 0;
5661*4882a593Smuzhiyun 				break;
5662*4882a593Smuzhiyun 			}
5663*4882a593Smuzhiyun 			msleep(50);
5664*4882a593Smuzhiyun 			exit_cnt++;
5665*4882a593Smuzhiyun 		}
5666*4882a593Smuzhiyun 	}
5667*4882a593Smuzhiyun 	return ret;
5668*4882a593Smuzhiyun }
s2io_vpd_read(struct s2io_nic * nic)5669*4882a593Smuzhiyun static void s2io_vpd_read(struct s2io_nic *nic)
5670*4882a593Smuzhiyun {
5671*4882a593Smuzhiyun 	u8 *vpd_data;
5672*4882a593Smuzhiyun 	u8 data;
5673*4882a593Smuzhiyun 	int i = 0, cnt, len, fail = 0;
5674*4882a593Smuzhiyun 	int vpd_addr = 0x80;
5675*4882a593Smuzhiyun 	struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
5676*4882a593Smuzhiyun 
5677*4882a593Smuzhiyun 	if (nic->device_type == XFRAME_II_DEVICE) {
5678*4882a593Smuzhiyun 		strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5679*4882a593Smuzhiyun 		vpd_addr = 0x80;
5680*4882a593Smuzhiyun 	} else {
5681*4882a593Smuzhiyun 		strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5682*4882a593Smuzhiyun 		vpd_addr = 0x50;
5683*4882a593Smuzhiyun 	}
5684*4882a593Smuzhiyun 	strcpy(nic->serial_num, "NOT AVAILABLE");
5685*4882a593Smuzhiyun 
5686*4882a593Smuzhiyun 	vpd_data = kmalloc(256, GFP_KERNEL);
5687*4882a593Smuzhiyun 	if (!vpd_data) {
5688*4882a593Smuzhiyun 		swstats->mem_alloc_fail_cnt++;
5689*4882a593Smuzhiyun 		return;
5690*4882a593Smuzhiyun 	}
5691*4882a593Smuzhiyun 	swstats->mem_allocated += 256;
5692*4882a593Smuzhiyun 
5693*4882a593Smuzhiyun 	for (i = 0; i < 256; i += 4) {
5694*4882a593Smuzhiyun 		pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5695*4882a593Smuzhiyun 		pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5696*4882a593Smuzhiyun 		pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5697*4882a593Smuzhiyun 		for (cnt = 0; cnt < 5; cnt++) {
5698*4882a593Smuzhiyun 			msleep(2);
5699*4882a593Smuzhiyun 			pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5700*4882a593Smuzhiyun 			if (data == 0x80)
5701*4882a593Smuzhiyun 				break;
5702*4882a593Smuzhiyun 		}
5703*4882a593Smuzhiyun 		if (cnt >= 5) {
5704*4882a593Smuzhiyun 			DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5705*4882a593Smuzhiyun 			fail = 1;
5706*4882a593Smuzhiyun 			break;
5707*4882a593Smuzhiyun 		}
5708*4882a593Smuzhiyun 		pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5709*4882a593Smuzhiyun 				      (u32 *)&vpd_data[i]);
5710*4882a593Smuzhiyun 	}
5711*4882a593Smuzhiyun 
5712*4882a593Smuzhiyun 	if (!fail) {
5713*4882a593Smuzhiyun 		/* read serial number of adapter */
5714*4882a593Smuzhiyun 		for (cnt = 0; cnt < 252; cnt++) {
5715*4882a593Smuzhiyun 			if ((vpd_data[cnt] == 'S') &&
5716*4882a593Smuzhiyun 			    (vpd_data[cnt+1] == 'N')) {
5717*4882a593Smuzhiyun 				len = vpd_data[cnt+2];
5718*4882a593Smuzhiyun 				if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5719*4882a593Smuzhiyun 					memcpy(nic->serial_num,
5720*4882a593Smuzhiyun 					       &vpd_data[cnt + 3],
5721*4882a593Smuzhiyun 					       len);
5722*4882a593Smuzhiyun 					memset(nic->serial_num+len,
5723*4882a593Smuzhiyun 					       0,
5724*4882a593Smuzhiyun 					       VPD_STRING_LEN-len);
5725*4882a593Smuzhiyun 					break;
5726*4882a593Smuzhiyun 				}
5727*4882a593Smuzhiyun 			}
5728*4882a593Smuzhiyun 		}
5729*4882a593Smuzhiyun 	}
5730*4882a593Smuzhiyun 
5731*4882a593Smuzhiyun 	if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5732*4882a593Smuzhiyun 		len = vpd_data[1];
5733*4882a593Smuzhiyun 		memcpy(nic->product_name, &vpd_data[3], len);
5734*4882a593Smuzhiyun 		nic->product_name[len] = 0;
5735*4882a593Smuzhiyun 	}
5736*4882a593Smuzhiyun 	kfree(vpd_data);
5737*4882a593Smuzhiyun 	swstats->mem_freed += 256;
5738*4882a593Smuzhiyun }
5739*4882a593Smuzhiyun 
5740*4882a593Smuzhiyun /**
5741*4882a593Smuzhiyun  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5742*4882a593Smuzhiyun  *  @dev: pointer to netdev
5743*4882a593Smuzhiyun  *  @eeprom : pointer to the user level structure provided by ethtool,
5744*4882a593Smuzhiyun  *  containing all relevant information.
5745*4882a593Smuzhiyun  *  @data_buf : user defined value to be written into Eeprom.
5746*4882a593Smuzhiyun  *  Description: Reads the values stored in the Eeprom at given offset
5747*4882a593Smuzhiyun  *  for a given length. Stores these values int the input argument data
5748*4882a593Smuzhiyun  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5749*4882a593Smuzhiyun  *  Return value:
5750*4882a593Smuzhiyun  *  int  0 on success
5751*4882a593Smuzhiyun  */
5752*4882a593Smuzhiyun 
s2io_ethtool_geeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data_buf)5753*4882a593Smuzhiyun static int s2io_ethtool_geeprom(struct net_device *dev,
5754*4882a593Smuzhiyun 				struct ethtool_eeprom *eeprom, u8 * data_buf)
5755*4882a593Smuzhiyun {
5756*4882a593Smuzhiyun 	u32 i, valid;
5757*4882a593Smuzhiyun 	u64 data;
5758*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
5759*4882a593Smuzhiyun 
5760*4882a593Smuzhiyun 	eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5761*4882a593Smuzhiyun 
5762*4882a593Smuzhiyun 	if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5763*4882a593Smuzhiyun 		eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5764*4882a593Smuzhiyun 
5765*4882a593Smuzhiyun 	for (i = 0; i < eeprom->len; i += 4) {
5766*4882a593Smuzhiyun 		if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5767*4882a593Smuzhiyun 			DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5768*4882a593Smuzhiyun 			return -EFAULT;
5769*4882a593Smuzhiyun 		}
5770*4882a593Smuzhiyun 		valid = INV(data);
5771*4882a593Smuzhiyun 		memcpy((data_buf + i), &valid, 4);
5772*4882a593Smuzhiyun 	}
5773*4882a593Smuzhiyun 	return 0;
5774*4882a593Smuzhiyun }
5775*4882a593Smuzhiyun 
5776*4882a593Smuzhiyun /**
5777*4882a593Smuzhiyun  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5778*4882a593Smuzhiyun  *  @dev: pointer to netdev
5779*4882a593Smuzhiyun  *  @eeprom : pointer to the user level structure provided by ethtool,
5780*4882a593Smuzhiyun  *  containing all relevant information.
5781*4882a593Smuzhiyun  *  @data_buf : user defined value to be written into Eeprom.
5782*4882a593Smuzhiyun  *  Description:
5783*4882a593Smuzhiyun  *  Tries to write the user provided value in the Eeprom, at the offset
5784*4882a593Smuzhiyun  *  given by the user.
5785*4882a593Smuzhiyun  *  Return value:
5786*4882a593Smuzhiyun  *  0 on success, -EFAULT on failure.
5787*4882a593Smuzhiyun  */
5788*4882a593Smuzhiyun 
s2io_ethtool_seeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data_buf)5789*4882a593Smuzhiyun static int s2io_ethtool_seeprom(struct net_device *dev,
5790*4882a593Smuzhiyun 				struct ethtool_eeprom *eeprom,
5791*4882a593Smuzhiyun 				u8 *data_buf)
5792*4882a593Smuzhiyun {
5793*4882a593Smuzhiyun 	int len = eeprom->len, cnt = 0;
5794*4882a593Smuzhiyun 	u64 valid = 0, data;
5795*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
5796*4882a593Smuzhiyun 
5797*4882a593Smuzhiyun 	if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5798*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG,
5799*4882a593Smuzhiyun 			  "ETHTOOL_WRITE_EEPROM Err: "
5800*4882a593Smuzhiyun 			  "Magic value is wrong, it is 0x%x should be 0x%x\n",
5801*4882a593Smuzhiyun 			  (sp->pdev->vendor | (sp->pdev->device << 16)),
5802*4882a593Smuzhiyun 			  eeprom->magic);
5803*4882a593Smuzhiyun 		return -EFAULT;
5804*4882a593Smuzhiyun 	}
5805*4882a593Smuzhiyun 
5806*4882a593Smuzhiyun 	while (len) {
5807*4882a593Smuzhiyun 		data = (u32)data_buf[cnt] & 0x000000FF;
5808*4882a593Smuzhiyun 		if (data)
5809*4882a593Smuzhiyun 			valid = (u32)(data << 24);
5810*4882a593Smuzhiyun 		else
5811*4882a593Smuzhiyun 			valid = data;
5812*4882a593Smuzhiyun 
5813*4882a593Smuzhiyun 		if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5814*4882a593Smuzhiyun 			DBG_PRINT(ERR_DBG,
5815*4882a593Smuzhiyun 				  "ETHTOOL_WRITE_EEPROM Err: "
5816*4882a593Smuzhiyun 				  "Cannot write into the specified offset\n");
5817*4882a593Smuzhiyun 			return -EFAULT;
5818*4882a593Smuzhiyun 		}
5819*4882a593Smuzhiyun 		cnt++;
5820*4882a593Smuzhiyun 		len--;
5821*4882a593Smuzhiyun 	}
5822*4882a593Smuzhiyun 
5823*4882a593Smuzhiyun 	return 0;
5824*4882a593Smuzhiyun }
5825*4882a593Smuzhiyun 
5826*4882a593Smuzhiyun /**
5827*4882a593Smuzhiyun  * s2io_register_test - reads and writes into all clock domains.
5828*4882a593Smuzhiyun  * @sp : private member of the device structure, which is a pointer to the
5829*4882a593Smuzhiyun  * s2io_nic structure.
5830*4882a593Smuzhiyun  * @data : variable that returns the result of each of the test conducted b
5831*4882a593Smuzhiyun  * by the driver.
5832*4882a593Smuzhiyun  * Description:
5833*4882a593Smuzhiyun  * Read and write into all clock domains. The NIC has 3 clock domains,
5834*4882a593Smuzhiyun  * see that registers in all the three regions are accessible.
5835*4882a593Smuzhiyun  * Return value:
5836*4882a593Smuzhiyun  * 0 on success.
5837*4882a593Smuzhiyun  */
5838*4882a593Smuzhiyun 
s2io_register_test(struct s2io_nic * sp,uint64_t * data)5839*4882a593Smuzhiyun static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
5840*4882a593Smuzhiyun {
5841*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5842*4882a593Smuzhiyun 	u64 val64 = 0, exp_val;
5843*4882a593Smuzhiyun 	int fail = 0;
5844*4882a593Smuzhiyun 
5845*4882a593Smuzhiyun 	val64 = readq(&bar0->pif_rd_swapper_fb);
5846*4882a593Smuzhiyun 	if (val64 != 0x123456789abcdefULL) {
5847*4882a593Smuzhiyun 		fail = 1;
5848*4882a593Smuzhiyun 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
5849*4882a593Smuzhiyun 	}
5850*4882a593Smuzhiyun 
5851*4882a593Smuzhiyun 	val64 = readq(&bar0->rmac_pause_cfg);
5852*4882a593Smuzhiyun 	if (val64 != 0xc000ffff00000000ULL) {
5853*4882a593Smuzhiyun 		fail = 1;
5854*4882a593Smuzhiyun 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
5855*4882a593Smuzhiyun 	}
5856*4882a593Smuzhiyun 
5857*4882a593Smuzhiyun 	val64 = readq(&bar0->rx_queue_cfg);
5858*4882a593Smuzhiyun 	if (sp->device_type == XFRAME_II_DEVICE)
5859*4882a593Smuzhiyun 		exp_val = 0x0404040404040404ULL;
5860*4882a593Smuzhiyun 	else
5861*4882a593Smuzhiyun 		exp_val = 0x0808080808080808ULL;
5862*4882a593Smuzhiyun 	if (val64 != exp_val) {
5863*4882a593Smuzhiyun 		fail = 1;
5864*4882a593Smuzhiyun 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
5865*4882a593Smuzhiyun 	}
5866*4882a593Smuzhiyun 
5867*4882a593Smuzhiyun 	val64 = readq(&bar0->xgxs_efifo_cfg);
5868*4882a593Smuzhiyun 	if (val64 != 0x000000001923141EULL) {
5869*4882a593Smuzhiyun 		fail = 1;
5870*4882a593Smuzhiyun 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
5871*4882a593Smuzhiyun 	}
5872*4882a593Smuzhiyun 
5873*4882a593Smuzhiyun 	val64 = 0x5A5A5A5A5A5A5A5AULL;
5874*4882a593Smuzhiyun 	writeq(val64, &bar0->xmsi_data);
5875*4882a593Smuzhiyun 	val64 = readq(&bar0->xmsi_data);
5876*4882a593Smuzhiyun 	if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5877*4882a593Smuzhiyun 		fail = 1;
5878*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
5879*4882a593Smuzhiyun 	}
5880*4882a593Smuzhiyun 
5881*4882a593Smuzhiyun 	val64 = 0xA5A5A5A5A5A5A5A5ULL;
5882*4882a593Smuzhiyun 	writeq(val64, &bar0->xmsi_data);
5883*4882a593Smuzhiyun 	val64 = readq(&bar0->xmsi_data);
5884*4882a593Smuzhiyun 	if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5885*4882a593Smuzhiyun 		fail = 1;
5886*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
5887*4882a593Smuzhiyun 	}
5888*4882a593Smuzhiyun 
5889*4882a593Smuzhiyun 	*data = fail;
5890*4882a593Smuzhiyun 	return fail;
5891*4882a593Smuzhiyun }
5892*4882a593Smuzhiyun 
5893*4882a593Smuzhiyun /**
5894*4882a593Smuzhiyun  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5895*4882a593Smuzhiyun  * @sp : private member of the device structure, which is a pointer to the
5896*4882a593Smuzhiyun  * s2io_nic structure.
5897*4882a593Smuzhiyun  * @data:variable that returns the result of each of the test conducted by
5898*4882a593Smuzhiyun  * the driver.
5899*4882a593Smuzhiyun  * Description:
5900*4882a593Smuzhiyun  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5901*4882a593Smuzhiyun  * register.
5902*4882a593Smuzhiyun  * Return value:
5903*4882a593Smuzhiyun  * 0 on success.
5904*4882a593Smuzhiyun  */
5905*4882a593Smuzhiyun 
s2io_eeprom_test(struct s2io_nic * sp,uint64_t * data)5906*4882a593Smuzhiyun static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
5907*4882a593Smuzhiyun {
5908*4882a593Smuzhiyun 	int fail = 0;
5909*4882a593Smuzhiyun 	u64 ret_data, org_4F0, org_7F0;
5910*4882a593Smuzhiyun 	u8 saved_4F0 = 0, saved_7F0 = 0;
5911*4882a593Smuzhiyun 	struct net_device *dev = sp->dev;
5912*4882a593Smuzhiyun 
5913*4882a593Smuzhiyun 	/* Test Write Error at offset 0 */
5914*4882a593Smuzhiyun 	/* Note that SPI interface allows write access to all areas
5915*4882a593Smuzhiyun 	 * of EEPROM. Hence doing all negative testing only for Xframe I.
5916*4882a593Smuzhiyun 	 */
5917*4882a593Smuzhiyun 	if (sp->device_type == XFRAME_I_DEVICE)
5918*4882a593Smuzhiyun 		if (!write_eeprom(sp, 0, 0, 3))
5919*4882a593Smuzhiyun 			fail = 1;
5920*4882a593Smuzhiyun 
5921*4882a593Smuzhiyun 	/* Save current values at offsets 0x4F0 and 0x7F0 */
5922*4882a593Smuzhiyun 	if (!read_eeprom(sp, 0x4F0, &org_4F0))
5923*4882a593Smuzhiyun 		saved_4F0 = 1;
5924*4882a593Smuzhiyun 	if (!read_eeprom(sp, 0x7F0, &org_7F0))
5925*4882a593Smuzhiyun 		saved_7F0 = 1;
5926*4882a593Smuzhiyun 
5927*4882a593Smuzhiyun 	/* Test Write at offset 4f0 */
5928*4882a593Smuzhiyun 	if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5929*4882a593Smuzhiyun 		fail = 1;
5930*4882a593Smuzhiyun 	if (read_eeprom(sp, 0x4F0, &ret_data))
5931*4882a593Smuzhiyun 		fail = 1;
5932*4882a593Smuzhiyun 
5933*4882a593Smuzhiyun 	if (ret_data != 0x012345) {
5934*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5935*4882a593Smuzhiyun 			  "Data written %llx Data read %llx\n",
5936*4882a593Smuzhiyun 			  dev->name, (unsigned long long)0x12345,
5937*4882a593Smuzhiyun 			  (unsigned long long)ret_data);
5938*4882a593Smuzhiyun 		fail = 1;
5939*4882a593Smuzhiyun 	}
5940*4882a593Smuzhiyun 
5941*4882a593Smuzhiyun 	/* Reset the EEPROM data go FFFF */
5942*4882a593Smuzhiyun 	write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5943*4882a593Smuzhiyun 
5944*4882a593Smuzhiyun 	/* Test Write Request Error at offset 0x7c */
5945*4882a593Smuzhiyun 	if (sp->device_type == XFRAME_I_DEVICE)
5946*4882a593Smuzhiyun 		if (!write_eeprom(sp, 0x07C, 0, 3))
5947*4882a593Smuzhiyun 			fail = 1;
5948*4882a593Smuzhiyun 
5949*4882a593Smuzhiyun 	/* Test Write Request at offset 0x7f0 */
5950*4882a593Smuzhiyun 	if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5951*4882a593Smuzhiyun 		fail = 1;
5952*4882a593Smuzhiyun 	if (read_eeprom(sp, 0x7F0, &ret_data))
5953*4882a593Smuzhiyun 		fail = 1;
5954*4882a593Smuzhiyun 
5955*4882a593Smuzhiyun 	if (ret_data != 0x012345) {
5956*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5957*4882a593Smuzhiyun 			  "Data written %llx Data read %llx\n",
5958*4882a593Smuzhiyun 			  dev->name, (unsigned long long)0x12345,
5959*4882a593Smuzhiyun 			  (unsigned long long)ret_data);
5960*4882a593Smuzhiyun 		fail = 1;
5961*4882a593Smuzhiyun 	}
5962*4882a593Smuzhiyun 
5963*4882a593Smuzhiyun 	/* Reset the EEPROM data go FFFF */
5964*4882a593Smuzhiyun 	write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5965*4882a593Smuzhiyun 
5966*4882a593Smuzhiyun 	if (sp->device_type == XFRAME_I_DEVICE) {
5967*4882a593Smuzhiyun 		/* Test Write Error at offset 0x80 */
5968*4882a593Smuzhiyun 		if (!write_eeprom(sp, 0x080, 0, 3))
5969*4882a593Smuzhiyun 			fail = 1;
5970*4882a593Smuzhiyun 
5971*4882a593Smuzhiyun 		/* Test Write Error at offset 0xfc */
5972*4882a593Smuzhiyun 		if (!write_eeprom(sp, 0x0FC, 0, 3))
5973*4882a593Smuzhiyun 			fail = 1;
5974*4882a593Smuzhiyun 
5975*4882a593Smuzhiyun 		/* Test Write Error at offset 0x100 */
5976*4882a593Smuzhiyun 		if (!write_eeprom(sp, 0x100, 0, 3))
5977*4882a593Smuzhiyun 			fail = 1;
5978*4882a593Smuzhiyun 
5979*4882a593Smuzhiyun 		/* Test Write Error at offset 4ec */
5980*4882a593Smuzhiyun 		if (!write_eeprom(sp, 0x4EC, 0, 3))
5981*4882a593Smuzhiyun 			fail = 1;
5982*4882a593Smuzhiyun 	}
5983*4882a593Smuzhiyun 
5984*4882a593Smuzhiyun 	/* Restore values at offsets 0x4F0 and 0x7F0 */
5985*4882a593Smuzhiyun 	if (saved_4F0)
5986*4882a593Smuzhiyun 		write_eeprom(sp, 0x4F0, org_4F0, 3);
5987*4882a593Smuzhiyun 	if (saved_7F0)
5988*4882a593Smuzhiyun 		write_eeprom(sp, 0x7F0, org_7F0, 3);
5989*4882a593Smuzhiyun 
5990*4882a593Smuzhiyun 	*data = fail;
5991*4882a593Smuzhiyun 	return fail;
5992*4882a593Smuzhiyun }
5993*4882a593Smuzhiyun 
5994*4882a593Smuzhiyun /**
5995*4882a593Smuzhiyun  * s2io_bist_test - invokes the MemBist test of the card .
5996*4882a593Smuzhiyun  * @sp : private member of the device structure, which is a pointer to the
5997*4882a593Smuzhiyun  * s2io_nic structure.
5998*4882a593Smuzhiyun  * @data:variable that returns the result of each of the test conducted by
5999*4882a593Smuzhiyun  * the driver.
6000*4882a593Smuzhiyun  * Description:
6001*4882a593Smuzhiyun  * This invokes the MemBist test of the card. We give around
6002*4882a593Smuzhiyun  * 2 secs time for the Test to complete. If it's still not complete
6003*4882a593Smuzhiyun  * within this peiod, we consider that the test failed.
6004*4882a593Smuzhiyun  * Return value:
6005*4882a593Smuzhiyun  * 0 on success and -1 on failure.
6006*4882a593Smuzhiyun  */
6007*4882a593Smuzhiyun 
s2io_bist_test(struct s2io_nic * sp,uint64_t * data)6008*4882a593Smuzhiyun static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6009*4882a593Smuzhiyun {
6010*4882a593Smuzhiyun 	u8 bist = 0;
6011*4882a593Smuzhiyun 	int cnt = 0, ret = -1;
6012*4882a593Smuzhiyun 
6013*4882a593Smuzhiyun 	pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6014*4882a593Smuzhiyun 	bist |= PCI_BIST_START;
6015*4882a593Smuzhiyun 	pci_write_config_word(sp->pdev, PCI_BIST, bist);
6016*4882a593Smuzhiyun 
6017*4882a593Smuzhiyun 	while (cnt < 20) {
6018*4882a593Smuzhiyun 		pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6019*4882a593Smuzhiyun 		if (!(bist & PCI_BIST_START)) {
6020*4882a593Smuzhiyun 			*data = (bist & PCI_BIST_CODE_MASK);
6021*4882a593Smuzhiyun 			ret = 0;
6022*4882a593Smuzhiyun 			break;
6023*4882a593Smuzhiyun 		}
6024*4882a593Smuzhiyun 		msleep(100);
6025*4882a593Smuzhiyun 		cnt++;
6026*4882a593Smuzhiyun 	}
6027*4882a593Smuzhiyun 
6028*4882a593Smuzhiyun 	return ret;
6029*4882a593Smuzhiyun }
6030*4882a593Smuzhiyun 
6031*4882a593Smuzhiyun /**
6032*4882a593Smuzhiyun  * s2io_link_test - verifies the link state of the nic
6033*4882a593Smuzhiyun  * @sp: private member of the device structure, which is a pointer to the
6034*4882a593Smuzhiyun  * s2io_nic structure.
6035*4882a593Smuzhiyun  * @data: variable that returns the result of each of the test conducted by
6036*4882a593Smuzhiyun  * the driver.
6037*4882a593Smuzhiyun  * Description:
6038*4882a593Smuzhiyun  * The function verifies the link state of the NIC and updates the input
6039*4882a593Smuzhiyun  * argument 'data' appropriately.
6040*4882a593Smuzhiyun  * Return value:
6041*4882a593Smuzhiyun  * 0 on success.
6042*4882a593Smuzhiyun  */
6043*4882a593Smuzhiyun 
s2io_link_test(struct s2io_nic * sp,uint64_t * data)6044*4882a593Smuzhiyun static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6045*4882a593Smuzhiyun {
6046*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6047*4882a593Smuzhiyun 	u64 val64;
6048*4882a593Smuzhiyun 
6049*4882a593Smuzhiyun 	val64 = readq(&bar0->adapter_status);
6050*4882a593Smuzhiyun 	if (!(LINK_IS_UP(val64)))
6051*4882a593Smuzhiyun 		*data = 1;
6052*4882a593Smuzhiyun 	else
6053*4882a593Smuzhiyun 		*data = 0;
6054*4882a593Smuzhiyun 
6055*4882a593Smuzhiyun 	return *data;
6056*4882a593Smuzhiyun }
6057*4882a593Smuzhiyun 
6058*4882a593Smuzhiyun /**
6059*4882a593Smuzhiyun  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6060*4882a593Smuzhiyun  * @sp: private member of the device structure, which is a pointer to the
6061*4882a593Smuzhiyun  * s2io_nic structure.
6062*4882a593Smuzhiyun  * @data: variable that returns the result of each of the test
6063*4882a593Smuzhiyun  * conducted by the driver.
6064*4882a593Smuzhiyun  * Description:
6065*4882a593Smuzhiyun  *  This is one of the offline test that tests the read and write
6066*4882a593Smuzhiyun  *  access to the RldRam chip on the NIC.
6067*4882a593Smuzhiyun  * Return value:
6068*4882a593Smuzhiyun  *  0 on success.
6069*4882a593Smuzhiyun  */
6070*4882a593Smuzhiyun 
s2io_rldram_test(struct s2io_nic * sp,uint64_t * data)6071*4882a593Smuzhiyun static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
6072*4882a593Smuzhiyun {
6073*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6074*4882a593Smuzhiyun 	u64 val64;
6075*4882a593Smuzhiyun 	int cnt, iteration = 0, test_fail = 0;
6076*4882a593Smuzhiyun 
6077*4882a593Smuzhiyun 	val64 = readq(&bar0->adapter_control);
6078*4882a593Smuzhiyun 	val64 &= ~ADAPTER_ECC_EN;
6079*4882a593Smuzhiyun 	writeq(val64, &bar0->adapter_control);
6080*4882a593Smuzhiyun 
6081*4882a593Smuzhiyun 	val64 = readq(&bar0->mc_rldram_test_ctrl);
6082*4882a593Smuzhiyun 	val64 |= MC_RLDRAM_TEST_MODE;
6083*4882a593Smuzhiyun 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6084*4882a593Smuzhiyun 
6085*4882a593Smuzhiyun 	val64 = readq(&bar0->mc_rldram_mrs);
6086*4882a593Smuzhiyun 	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6087*4882a593Smuzhiyun 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6088*4882a593Smuzhiyun 
6089*4882a593Smuzhiyun 	val64 |= MC_RLDRAM_MRS_ENABLE;
6090*4882a593Smuzhiyun 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6091*4882a593Smuzhiyun 
6092*4882a593Smuzhiyun 	while (iteration < 2) {
6093*4882a593Smuzhiyun 		val64 = 0x55555555aaaa0000ULL;
6094*4882a593Smuzhiyun 		if (iteration == 1)
6095*4882a593Smuzhiyun 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6096*4882a593Smuzhiyun 		writeq(val64, &bar0->mc_rldram_test_d0);
6097*4882a593Smuzhiyun 
6098*4882a593Smuzhiyun 		val64 = 0xaaaa5a5555550000ULL;
6099*4882a593Smuzhiyun 		if (iteration == 1)
6100*4882a593Smuzhiyun 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6101*4882a593Smuzhiyun 		writeq(val64, &bar0->mc_rldram_test_d1);
6102*4882a593Smuzhiyun 
6103*4882a593Smuzhiyun 		val64 = 0x55aaaaaaaa5a0000ULL;
6104*4882a593Smuzhiyun 		if (iteration == 1)
6105*4882a593Smuzhiyun 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6106*4882a593Smuzhiyun 		writeq(val64, &bar0->mc_rldram_test_d2);
6107*4882a593Smuzhiyun 
6108*4882a593Smuzhiyun 		val64 = (u64) (0x0000003ffffe0100ULL);
6109*4882a593Smuzhiyun 		writeq(val64, &bar0->mc_rldram_test_add);
6110*4882a593Smuzhiyun 
6111*4882a593Smuzhiyun 		val64 = MC_RLDRAM_TEST_MODE |
6112*4882a593Smuzhiyun 			MC_RLDRAM_TEST_WRITE |
6113*4882a593Smuzhiyun 			MC_RLDRAM_TEST_GO;
6114*4882a593Smuzhiyun 		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6115*4882a593Smuzhiyun 
6116*4882a593Smuzhiyun 		for (cnt = 0; cnt < 5; cnt++) {
6117*4882a593Smuzhiyun 			val64 = readq(&bar0->mc_rldram_test_ctrl);
6118*4882a593Smuzhiyun 			if (val64 & MC_RLDRAM_TEST_DONE)
6119*4882a593Smuzhiyun 				break;
6120*4882a593Smuzhiyun 			msleep(200);
6121*4882a593Smuzhiyun 		}
6122*4882a593Smuzhiyun 
6123*4882a593Smuzhiyun 		if (cnt == 5)
6124*4882a593Smuzhiyun 			break;
6125*4882a593Smuzhiyun 
6126*4882a593Smuzhiyun 		val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6127*4882a593Smuzhiyun 		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6128*4882a593Smuzhiyun 
6129*4882a593Smuzhiyun 		for (cnt = 0; cnt < 5; cnt++) {
6130*4882a593Smuzhiyun 			val64 = readq(&bar0->mc_rldram_test_ctrl);
6131*4882a593Smuzhiyun 			if (val64 & MC_RLDRAM_TEST_DONE)
6132*4882a593Smuzhiyun 				break;
6133*4882a593Smuzhiyun 			msleep(500);
6134*4882a593Smuzhiyun 		}
6135*4882a593Smuzhiyun 
6136*4882a593Smuzhiyun 		if (cnt == 5)
6137*4882a593Smuzhiyun 			break;
6138*4882a593Smuzhiyun 
6139*4882a593Smuzhiyun 		val64 = readq(&bar0->mc_rldram_test_ctrl);
6140*4882a593Smuzhiyun 		if (!(val64 & MC_RLDRAM_TEST_PASS))
6141*4882a593Smuzhiyun 			test_fail = 1;
6142*4882a593Smuzhiyun 
6143*4882a593Smuzhiyun 		iteration++;
6144*4882a593Smuzhiyun 	}
6145*4882a593Smuzhiyun 
6146*4882a593Smuzhiyun 	*data = test_fail;
6147*4882a593Smuzhiyun 
6148*4882a593Smuzhiyun 	/* Bring the adapter out of test mode */
6149*4882a593Smuzhiyun 	SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6150*4882a593Smuzhiyun 
6151*4882a593Smuzhiyun 	return test_fail;
6152*4882a593Smuzhiyun }
6153*4882a593Smuzhiyun 
6154*4882a593Smuzhiyun /**
6155*4882a593Smuzhiyun  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6156*4882a593Smuzhiyun  *  @dev: pointer to netdev
6157*4882a593Smuzhiyun  *  @ethtest : pointer to a ethtool command specific structure that will be
6158*4882a593Smuzhiyun  *  returned to the user.
6159*4882a593Smuzhiyun  *  @data : variable that returns the result of each of the test
6160*4882a593Smuzhiyun  * conducted by the driver.
6161*4882a593Smuzhiyun  * Description:
6162*4882a593Smuzhiyun  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
6163*4882a593Smuzhiyun  *  the health of the card.
6164*4882a593Smuzhiyun  * Return value:
6165*4882a593Smuzhiyun  *  void
6166*4882a593Smuzhiyun  */
6167*4882a593Smuzhiyun 
s2io_ethtool_test(struct net_device * dev,struct ethtool_test * ethtest,uint64_t * data)6168*4882a593Smuzhiyun static void s2io_ethtool_test(struct net_device *dev,
6169*4882a593Smuzhiyun 			      struct ethtool_test *ethtest,
6170*4882a593Smuzhiyun 			      uint64_t *data)
6171*4882a593Smuzhiyun {
6172*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
6173*4882a593Smuzhiyun 	int orig_state = netif_running(sp->dev);
6174*4882a593Smuzhiyun 
6175*4882a593Smuzhiyun 	if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6176*4882a593Smuzhiyun 		/* Offline Tests. */
6177*4882a593Smuzhiyun 		if (orig_state)
6178*4882a593Smuzhiyun 			s2io_close(sp->dev);
6179*4882a593Smuzhiyun 
6180*4882a593Smuzhiyun 		if (s2io_register_test(sp, &data[0]))
6181*4882a593Smuzhiyun 			ethtest->flags |= ETH_TEST_FL_FAILED;
6182*4882a593Smuzhiyun 
6183*4882a593Smuzhiyun 		s2io_reset(sp);
6184*4882a593Smuzhiyun 
6185*4882a593Smuzhiyun 		if (s2io_rldram_test(sp, &data[3]))
6186*4882a593Smuzhiyun 			ethtest->flags |= ETH_TEST_FL_FAILED;
6187*4882a593Smuzhiyun 
6188*4882a593Smuzhiyun 		s2io_reset(sp);
6189*4882a593Smuzhiyun 
6190*4882a593Smuzhiyun 		if (s2io_eeprom_test(sp, &data[1]))
6191*4882a593Smuzhiyun 			ethtest->flags |= ETH_TEST_FL_FAILED;
6192*4882a593Smuzhiyun 
6193*4882a593Smuzhiyun 		if (s2io_bist_test(sp, &data[4]))
6194*4882a593Smuzhiyun 			ethtest->flags |= ETH_TEST_FL_FAILED;
6195*4882a593Smuzhiyun 
6196*4882a593Smuzhiyun 		if (orig_state)
6197*4882a593Smuzhiyun 			s2io_open(sp->dev);
6198*4882a593Smuzhiyun 
6199*4882a593Smuzhiyun 		data[2] = 0;
6200*4882a593Smuzhiyun 	} else {
6201*4882a593Smuzhiyun 		/* Online Tests. */
6202*4882a593Smuzhiyun 		if (!orig_state) {
6203*4882a593Smuzhiyun 			DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
6204*4882a593Smuzhiyun 				  dev->name);
6205*4882a593Smuzhiyun 			data[0] = -1;
6206*4882a593Smuzhiyun 			data[1] = -1;
6207*4882a593Smuzhiyun 			data[2] = -1;
6208*4882a593Smuzhiyun 			data[3] = -1;
6209*4882a593Smuzhiyun 			data[4] = -1;
6210*4882a593Smuzhiyun 		}
6211*4882a593Smuzhiyun 
6212*4882a593Smuzhiyun 		if (s2io_link_test(sp, &data[2]))
6213*4882a593Smuzhiyun 			ethtest->flags |= ETH_TEST_FL_FAILED;
6214*4882a593Smuzhiyun 
6215*4882a593Smuzhiyun 		data[0] = 0;
6216*4882a593Smuzhiyun 		data[1] = 0;
6217*4882a593Smuzhiyun 		data[3] = 0;
6218*4882a593Smuzhiyun 		data[4] = 0;
6219*4882a593Smuzhiyun 	}
6220*4882a593Smuzhiyun }
6221*4882a593Smuzhiyun 
s2io_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * tmp_stats)6222*4882a593Smuzhiyun static void s2io_get_ethtool_stats(struct net_device *dev,
6223*4882a593Smuzhiyun 				   struct ethtool_stats *estats,
6224*4882a593Smuzhiyun 				   u64 *tmp_stats)
6225*4882a593Smuzhiyun {
6226*4882a593Smuzhiyun 	int i = 0, k;
6227*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
6228*4882a593Smuzhiyun 	struct stat_block *stats = sp->mac_control.stats_info;
6229*4882a593Smuzhiyun 	struct swStat *swstats = &stats->sw_stat;
6230*4882a593Smuzhiyun 	struct xpakStat *xstats = &stats->xpak_stat;
6231*4882a593Smuzhiyun 
6232*4882a593Smuzhiyun 	s2io_updt_stats(sp);
6233*4882a593Smuzhiyun 	tmp_stats[i++] =
6234*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->tmac_frms_oflow) << 32  |
6235*4882a593Smuzhiyun 		le32_to_cpu(stats->tmac_frms);
6236*4882a593Smuzhiyun 	tmp_stats[i++] =
6237*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6238*4882a593Smuzhiyun 		le32_to_cpu(stats->tmac_data_octets);
6239*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
6240*4882a593Smuzhiyun 	tmp_stats[i++] =
6241*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6242*4882a593Smuzhiyun 		le32_to_cpu(stats->tmac_mcst_frms);
6243*4882a593Smuzhiyun 	tmp_stats[i++] =
6244*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6245*4882a593Smuzhiyun 		le32_to_cpu(stats->tmac_bcst_frms);
6246*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
6247*4882a593Smuzhiyun 	tmp_stats[i++] =
6248*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6249*4882a593Smuzhiyun 		le32_to_cpu(stats->tmac_ttl_octets);
6250*4882a593Smuzhiyun 	tmp_stats[i++] =
6251*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6252*4882a593Smuzhiyun 		le32_to_cpu(stats->tmac_ucst_frms);
6253*4882a593Smuzhiyun 	tmp_stats[i++] =
6254*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6255*4882a593Smuzhiyun 		le32_to_cpu(stats->tmac_nucst_frms);
6256*4882a593Smuzhiyun 	tmp_stats[i++] =
6257*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6258*4882a593Smuzhiyun 		le32_to_cpu(stats->tmac_any_err_frms);
6259*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6260*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
6261*4882a593Smuzhiyun 	tmp_stats[i++] =
6262*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6263*4882a593Smuzhiyun 		le32_to_cpu(stats->tmac_vld_ip);
6264*4882a593Smuzhiyun 	tmp_stats[i++] =
6265*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6266*4882a593Smuzhiyun 		le32_to_cpu(stats->tmac_drop_ip);
6267*4882a593Smuzhiyun 	tmp_stats[i++] =
6268*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6269*4882a593Smuzhiyun 		le32_to_cpu(stats->tmac_icmp);
6270*4882a593Smuzhiyun 	tmp_stats[i++] =
6271*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6272*4882a593Smuzhiyun 		le32_to_cpu(stats->tmac_rst_tcp);
6273*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6274*4882a593Smuzhiyun 	tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6275*4882a593Smuzhiyun 		le32_to_cpu(stats->tmac_udp);
6276*4882a593Smuzhiyun 	tmp_stats[i++] =
6277*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6278*4882a593Smuzhiyun 		le32_to_cpu(stats->rmac_vld_frms);
6279*4882a593Smuzhiyun 	tmp_stats[i++] =
6280*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6281*4882a593Smuzhiyun 		le32_to_cpu(stats->rmac_data_octets);
6282*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6283*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
6284*4882a593Smuzhiyun 	tmp_stats[i++] =
6285*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6286*4882a593Smuzhiyun 		le32_to_cpu(stats->rmac_vld_mcst_frms);
6287*4882a593Smuzhiyun 	tmp_stats[i++] =
6288*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6289*4882a593Smuzhiyun 		le32_to_cpu(stats->rmac_vld_bcst_frms);
6290*4882a593Smuzhiyun 	tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6291*4882a593Smuzhiyun 	tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6292*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6293*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6294*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
6295*4882a593Smuzhiyun 	tmp_stats[i++] =
6296*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6297*4882a593Smuzhiyun 		le32_to_cpu(stats->rmac_ttl_octets);
6298*4882a593Smuzhiyun 	tmp_stats[i++] =
6299*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6300*4882a593Smuzhiyun 		| le32_to_cpu(stats->rmac_accepted_ucst_frms);
6301*4882a593Smuzhiyun 	tmp_stats[i++] =
6302*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6303*4882a593Smuzhiyun 		<< 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
6304*4882a593Smuzhiyun 	tmp_stats[i++] =
6305*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6306*4882a593Smuzhiyun 		le32_to_cpu(stats->rmac_discarded_frms);
6307*4882a593Smuzhiyun 	tmp_stats[i++] =
6308*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6309*4882a593Smuzhiyun 		<< 32 | le32_to_cpu(stats->rmac_drop_events);
6310*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6311*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
6312*4882a593Smuzhiyun 	tmp_stats[i++] =
6313*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6314*4882a593Smuzhiyun 		le32_to_cpu(stats->rmac_usized_frms);
6315*4882a593Smuzhiyun 	tmp_stats[i++] =
6316*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6317*4882a593Smuzhiyun 		le32_to_cpu(stats->rmac_osized_frms);
6318*4882a593Smuzhiyun 	tmp_stats[i++] =
6319*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6320*4882a593Smuzhiyun 		le32_to_cpu(stats->rmac_frag_frms);
6321*4882a593Smuzhiyun 	tmp_stats[i++] =
6322*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6323*4882a593Smuzhiyun 		le32_to_cpu(stats->rmac_jabber_frms);
6324*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6325*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6326*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6327*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6328*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6329*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
6330*4882a593Smuzhiyun 	tmp_stats[i++] =
6331*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6332*4882a593Smuzhiyun 		le32_to_cpu(stats->rmac_ip);
6333*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6334*4882a593Smuzhiyun 	tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
6335*4882a593Smuzhiyun 	tmp_stats[i++] =
6336*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6337*4882a593Smuzhiyun 		le32_to_cpu(stats->rmac_drop_ip);
6338*4882a593Smuzhiyun 	tmp_stats[i++] =
6339*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6340*4882a593Smuzhiyun 		le32_to_cpu(stats->rmac_icmp);
6341*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
6342*4882a593Smuzhiyun 	tmp_stats[i++] =
6343*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6344*4882a593Smuzhiyun 		le32_to_cpu(stats->rmac_udp);
6345*4882a593Smuzhiyun 	tmp_stats[i++] =
6346*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6347*4882a593Smuzhiyun 		le32_to_cpu(stats->rmac_err_drp_udp);
6348*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6349*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6350*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6351*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6352*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6353*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6354*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6355*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6356*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6357*4882a593Smuzhiyun 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6358*4882a593Smuzhiyun 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6359*4882a593Smuzhiyun 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6360*4882a593Smuzhiyun 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6361*4882a593Smuzhiyun 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6362*4882a593Smuzhiyun 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6363*4882a593Smuzhiyun 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6364*4882a593Smuzhiyun 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
6365*4882a593Smuzhiyun 	tmp_stats[i++] =
6366*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6367*4882a593Smuzhiyun 		le32_to_cpu(stats->rmac_pause_cnt);
6368*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6369*4882a593Smuzhiyun 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
6370*4882a593Smuzhiyun 	tmp_stats[i++] =
6371*4882a593Smuzhiyun 		(u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6372*4882a593Smuzhiyun 		le32_to_cpu(stats->rmac_accepted_ip);
6373*4882a593Smuzhiyun 	tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6374*4882a593Smuzhiyun 	tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6375*4882a593Smuzhiyun 	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6376*4882a593Smuzhiyun 	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6377*4882a593Smuzhiyun 	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6378*4882a593Smuzhiyun 	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6379*4882a593Smuzhiyun 	tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6380*4882a593Smuzhiyun 	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6381*4882a593Smuzhiyun 	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6382*4882a593Smuzhiyun 	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6383*4882a593Smuzhiyun 	tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6384*4882a593Smuzhiyun 	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6385*4882a593Smuzhiyun 	tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6386*4882a593Smuzhiyun 	tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6387*4882a593Smuzhiyun 	tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6388*4882a593Smuzhiyun 	tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6389*4882a593Smuzhiyun 	tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6390*4882a593Smuzhiyun 	tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6391*4882a593Smuzhiyun 	tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
6392*4882a593Smuzhiyun 
6393*4882a593Smuzhiyun 	/* Enhanced statistics exist only for Hercules */
6394*4882a593Smuzhiyun 	if (sp->device_type == XFRAME_II_DEVICE) {
6395*4882a593Smuzhiyun 		tmp_stats[i++] =
6396*4882a593Smuzhiyun 			le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
6397*4882a593Smuzhiyun 		tmp_stats[i++] =
6398*4882a593Smuzhiyun 			le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
6399*4882a593Smuzhiyun 		tmp_stats[i++] =
6400*4882a593Smuzhiyun 			le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6401*4882a593Smuzhiyun 		tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6402*4882a593Smuzhiyun 		tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6403*4882a593Smuzhiyun 		tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6404*4882a593Smuzhiyun 		tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6405*4882a593Smuzhiyun 		tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6406*4882a593Smuzhiyun 		tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6407*4882a593Smuzhiyun 		tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6408*4882a593Smuzhiyun 		tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6409*4882a593Smuzhiyun 		tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6410*4882a593Smuzhiyun 		tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6411*4882a593Smuzhiyun 		tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6412*4882a593Smuzhiyun 		tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6413*4882a593Smuzhiyun 		tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
6414*4882a593Smuzhiyun 	}
6415*4882a593Smuzhiyun 
6416*4882a593Smuzhiyun 	tmp_stats[i++] = 0;
6417*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->single_ecc_errs;
6418*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->double_ecc_errs;
6419*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->parity_err_cnt;
6420*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->serious_err_cnt;
6421*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->soft_reset_cnt;
6422*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->fifo_full_cnt;
6423*4882a593Smuzhiyun 	for (k = 0; k < MAX_RX_RINGS; k++)
6424*4882a593Smuzhiyun 		tmp_stats[i++] = swstats->ring_full_cnt[k];
6425*4882a593Smuzhiyun 	tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6426*4882a593Smuzhiyun 	tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6427*4882a593Smuzhiyun 	tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6428*4882a593Smuzhiyun 	tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6429*4882a593Smuzhiyun 	tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6430*4882a593Smuzhiyun 	tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6431*4882a593Smuzhiyun 	tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6432*4882a593Smuzhiyun 	tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6433*4882a593Smuzhiyun 	tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6434*4882a593Smuzhiyun 	tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6435*4882a593Smuzhiyun 	tmp_stats[i++] = xstats->warn_laser_output_power_high;
6436*4882a593Smuzhiyun 	tmp_stats[i++] = xstats->warn_laser_output_power_low;
6437*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->clubbed_frms_cnt;
6438*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->sending_both;
6439*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->outof_sequence_pkts;
6440*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->flush_max_pkts;
6441*4882a593Smuzhiyun 	if (swstats->num_aggregations) {
6442*4882a593Smuzhiyun 		u64 tmp = swstats->sum_avg_pkts_aggregated;
6443*4882a593Smuzhiyun 		int count = 0;
6444*4882a593Smuzhiyun 		/*
6445*4882a593Smuzhiyun 		 * Since 64-bit divide does not work on all platforms,
6446*4882a593Smuzhiyun 		 * do repeated subtraction.
6447*4882a593Smuzhiyun 		 */
6448*4882a593Smuzhiyun 		while (tmp >= swstats->num_aggregations) {
6449*4882a593Smuzhiyun 			tmp -= swstats->num_aggregations;
6450*4882a593Smuzhiyun 			count++;
6451*4882a593Smuzhiyun 		}
6452*4882a593Smuzhiyun 		tmp_stats[i++] = count;
6453*4882a593Smuzhiyun 	} else
6454*4882a593Smuzhiyun 		tmp_stats[i++] = 0;
6455*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6456*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->pci_map_fail_cnt;
6457*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->watchdog_timer_cnt;
6458*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->mem_allocated;
6459*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->mem_freed;
6460*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->link_up_cnt;
6461*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->link_down_cnt;
6462*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->link_up_time;
6463*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->link_down_time;
6464*4882a593Smuzhiyun 
6465*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6466*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6467*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->tx_parity_err_cnt;
6468*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->tx_link_loss_cnt;
6469*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6470*4882a593Smuzhiyun 
6471*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->rx_parity_err_cnt;
6472*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->rx_abort_cnt;
6473*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6474*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6475*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6476*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6477*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6478*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6479*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6480*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->tda_err_cnt;
6481*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->pfc_err_cnt;
6482*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->pcc_err_cnt;
6483*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->tti_err_cnt;
6484*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->tpa_err_cnt;
6485*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->sm_err_cnt;
6486*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->lso_err_cnt;
6487*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6488*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6489*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6490*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6491*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->rc_err_cnt;
6492*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6493*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->rpa_err_cnt;
6494*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->rda_err_cnt;
6495*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->rti_err_cnt;
6496*4882a593Smuzhiyun 	tmp_stats[i++] = swstats->mc_err_cnt;
6497*4882a593Smuzhiyun }
6498*4882a593Smuzhiyun 
s2io_ethtool_get_regs_len(struct net_device * dev)6499*4882a593Smuzhiyun static int s2io_ethtool_get_regs_len(struct net_device *dev)
6500*4882a593Smuzhiyun {
6501*4882a593Smuzhiyun 	return XENA_REG_SPACE;
6502*4882a593Smuzhiyun }
6503*4882a593Smuzhiyun 
6504*4882a593Smuzhiyun 
s2io_get_eeprom_len(struct net_device * dev)6505*4882a593Smuzhiyun static int s2io_get_eeprom_len(struct net_device *dev)
6506*4882a593Smuzhiyun {
6507*4882a593Smuzhiyun 	return XENA_EEPROM_SPACE;
6508*4882a593Smuzhiyun }
6509*4882a593Smuzhiyun 
s2io_get_sset_count(struct net_device * dev,int sset)6510*4882a593Smuzhiyun static int s2io_get_sset_count(struct net_device *dev, int sset)
6511*4882a593Smuzhiyun {
6512*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
6513*4882a593Smuzhiyun 
6514*4882a593Smuzhiyun 	switch (sset) {
6515*4882a593Smuzhiyun 	case ETH_SS_TEST:
6516*4882a593Smuzhiyun 		return S2IO_TEST_LEN;
6517*4882a593Smuzhiyun 	case ETH_SS_STATS:
6518*4882a593Smuzhiyun 		switch (sp->device_type) {
6519*4882a593Smuzhiyun 		case XFRAME_I_DEVICE:
6520*4882a593Smuzhiyun 			return XFRAME_I_STAT_LEN;
6521*4882a593Smuzhiyun 		case XFRAME_II_DEVICE:
6522*4882a593Smuzhiyun 			return XFRAME_II_STAT_LEN;
6523*4882a593Smuzhiyun 		default:
6524*4882a593Smuzhiyun 			return 0;
6525*4882a593Smuzhiyun 		}
6526*4882a593Smuzhiyun 	default:
6527*4882a593Smuzhiyun 		return -EOPNOTSUPP;
6528*4882a593Smuzhiyun 	}
6529*4882a593Smuzhiyun }
6530*4882a593Smuzhiyun 
s2io_ethtool_get_strings(struct net_device * dev,u32 stringset,u8 * data)6531*4882a593Smuzhiyun static void s2io_ethtool_get_strings(struct net_device *dev,
6532*4882a593Smuzhiyun 				     u32 stringset, u8 *data)
6533*4882a593Smuzhiyun {
6534*4882a593Smuzhiyun 	int stat_size = 0;
6535*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
6536*4882a593Smuzhiyun 
6537*4882a593Smuzhiyun 	switch (stringset) {
6538*4882a593Smuzhiyun 	case ETH_SS_TEST:
6539*4882a593Smuzhiyun 		memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6540*4882a593Smuzhiyun 		break;
6541*4882a593Smuzhiyun 	case ETH_SS_STATS:
6542*4882a593Smuzhiyun 		stat_size = sizeof(ethtool_xena_stats_keys);
6543*4882a593Smuzhiyun 		memcpy(data, &ethtool_xena_stats_keys, stat_size);
6544*4882a593Smuzhiyun 		if (sp->device_type == XFRAME_II_DEVICE) {
6545*4882a593Smuzhiyun 			memcpy(data + stat_size,
6546*4882a593Smuzhiyun 			       &ethtool_enhanced_stats_keys,
6547*4882a593Smuzhiyun 			       sizeof(ethtool_enhanced_stats_keys));
6548*4882a593Smuzhiyun 			stat_size += sizeof(ethtool_enhanced_stats_keys);
6549*4882a593Smuzhiyun 		}
6550*4882a593Smuzhiyun 
6551*4882a593Smuzhiyun 		memcpy(data + stat_size, &ethtool_driver_stats_keys,
6552*4882a593Smuzhiyun 		       sizeof(ethtool_driver_stats_keys));
6553*4882a593Smuzhiyun 	}
6554*4882a593Smuzhiyun }
6555*4882a593Smuzhiyun 
s2io_set_features(struct net_device * dev,netdev_features_t features)6556*4882a593Smuzhiyun static int s2io_set_features(struct net_device *dev, netdev_features_t features)
6557*4882a593Smuzhiyun {
6558*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
6559*4882a593Smuzhiyun 	netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
6560*4882a593Smuzhiyun 
6561*4882a593Smuzhiyun 	if (changed && netif_running(dev)) {
6562*4882a593Smuzhiyun 		int rc;
6563*4882a593Smuzhiyun 
6564*4882a593Smuzhiyun 		s2io_stop_all_tx_queue(sp);
6565*4882a593Smuzhiyun 		s2io_card_down(sp);
6566*4882a593Smuzhiyun 		dev->features = features;
6567*4882a593Smuzhiyun 		rc = s2io_card_up(sp);
6568*4882a593Smuzhiyun 		if (rc)
6569*4882a593Smuzhiyun 			s2io_reset(sp);
6570*4882a593Smuzhiyun 		else
6571*4882a593Smuzhiyun 			s2io_start_all_tx_queue(sp);
6572*4882a593Smuzhiyun 
6573*4882a593Smuzhiyun 		return rc ? rc : 1;
6574*4882a593Smuzhiyun 	}
6575*4882a593Smuzhiyun 
6576*4882a593Smuzhiyun 	return 0;
6577*4882a593Smuzhiyun }
6578*4882a593Smuzhiyun 
6579*4882a593Smuzhiyun static const struct ethtool_ops netdev_ethtool_ops = {
6580*4882a593Smuzhiyun 	.get_drvinfo = s2io_ethtool_gdrvinfo,
6581*4882a593Smuzhiyun 	.get_regs_len = s2io_ethtool_get_regs_len,
6582*4882a593Smuzhiyun 	.get_regs = s2io_ethtool_gregs,
6583*4882a593Smuzhiyun 	.get_link = ethtool_op_get_link,
6584*4882a593Smuzhiyun 	.get_eeprom_len = s2io_get_eeprom_len,
6585*4882a593Smuzhiyun 	.get_eeprom = s2io_ethtool_geeprom,
6586*4882a593Smuzhiyun 	.set_eeprom = s2io_ethtool_seeprom,
6587*4882a593Smuzhiyun 	.get_ringparam = s2io_ethtool_gringparam,
6588*4882a593Smuzhiyun 	.get_pauseparam = s2io_ethtool_getpause_data,
6589*4882a593Smuzhiyun 	.set_pauseparam = s2io_ethtool_setpause_data,
6590*4882a593Smuzhiyun 	.self_test = s2io_ethtool_test,
6591*4882a593Smuzhiyun 	.get_strings = s2io_ethtool_get_strings,
6592*4882a593Smuzhiyun 	.set_phys_id = s2io_ethtool_set_led,
6593*4882a593Smuzhiyun 	.get_ethtool_stats = s2io_get_ethtool_stats,
6594*4882a593Smuzhiyun 	.get_sset_count = s2io_get_sset_count,
6595*4882a593Smuzhiyun 	.get_link_ksettings = s2io_ethtool_get_link_ksettings,
6596*4882a593Smuzhiyun 	.set_link_ksettings = s2io_ethtool_set_link_ksettings,
6597*4882a593Smuzhiyun };
6598*4882a593Smuzhiyun 
6599*4882a593Smuzhiyun /**
6600*4882a593Smuzhiyun  *  s2io_ioctl - Entry point for the Ioctl
6601*4882a593Smuzhiyun  *  @dev :  Device pointer.
6602*4882a593Smuzhiyun  *  @rq :  An IOCTL specefic structure, that can contain a pointer to
6603*4882a593Smuzhiyun  *  a proprietary structure used to pass information to the driver.
6604*4882a593Smuzhiyun  *  @cmd :  This is used to distinguish between the different commands that
6605*4882a593Smuzhiyun  *  can be passed to the IOCTL functions.
6606*4882a593Smuzhiyun  *  Description:
6607*4882a593Smuzhiyun  *  Currently there are no special functionality supported in IOCTL, hence
6608*4882a593Smuzhiyun  *  function always return EOPNOTSUPPORTED
6609*4882a593Smuzhiyun  */
6610*4882a593Smuzhiyun 
s2io_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6611*4882a593Smuzhiyun static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6612*4882a593Smuzhiyun {
6613*4882a593Smuzhiyun 	return -EOPNOTSUPP;
6614*4882a593Smuzhiyun }
6615*4882a593Smuzhiyun 
6616*4882a593Smuzhiyun /**
6617*4882a593Smuzhiyun  *  s2io_change_mtu - entry point to change MTU size for the device.
6618*4882a593Smuzhiyun  *   @dev : device pointer.
6619*4882a593Smuzhiyun  *   @new_mtu : the new MTU size for the device.
6620*4882a593Smuzhiyun  *   Description: A driver entry point to change MTU size for the device.
6621*4882a593Smuzhiyun  *   Before changing the MTU the device must be stopped.
6622*4882a593Smuzhiyun  *  Return value:
6623*4882a593Smuzhiyun  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6624*4882a593Smuzhiyun  *   file on failure.
6625*4882a593Smuzhiyun  */
6626*4882a593Smuzhiyun 
s2io_change_mtu(struct net_device * dev,int new_mtu)6627*4882a593Smuzhiyun static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6628*4882a593Smuzhiyun {
6629*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
6630*4882a593Smuzhiyun 	int ret = 0;
6631*4882a593Smuzhiyun 
6632*4882a593Smuzhiyun 	dev->mtu = new_mtu;
6633*4882a593Smuzhiyun 	if (netif_running(dev)) {
6634*4882a593Smuzhiyun 		s2io_stop_all_tx_queue(sp);
6635*4882a593Smuzhiyun 		s2io_card_down(sp);
6636*4882a593Smuzhiyun 		ret = s2io_card_up(sp);
6637*4882a593Smuzhiyun 		if (ret) {
6638*4882a593Smuzhiyun 			DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6639*4882a593Smuzhiyun 				  __func__);
6640*4882a593Smuzhiyun 			return ret;
6641*4882a593Smuzhiyun 		}
6642*4882a593Smuzhiyun 		s2io_wake_all_tx_queue(sp);
6643*4882a593Smuzhiyun 	} else { /* Device is down */
6644*4882a593Smuzhiyun 		struct XENA_dev_config __iomem *bar0 = sp->bar0;
6645*4882a593Smuzhiyun 		u64 val64 = new_mtu;
6646*4882a593Smuzhiyun 
6647*4882a593Smuzhiyun 		writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6648*4882a593Smuzhiyun 	}
6649*4882a593Smuzhiyun 
6650*4882a593Smuzhiyun 	return ret;
6651*4882a593Smuzhiyun }
6652*4882a593Smuzhiyun 
6653*4882a593Smuzhiyun /**
6654*4882a593Smuzhiyun  * s2io_set_link - Set the LInk status
6655*4882a593Smuzhiyun  * @work: work struct containing a pointer to device private structue
6656*4882a593Smuzhiyun  * Description: Sets the link status for the adapter
6657*4882a593Smuzhiyun  */
6658*4882a593Smuzhiyun 
s2io_set_link(struct work_struct * work)6659*4882a593Smuzhiyun static void s2io_set_link(struct work_struct *work)
6660*4882a593Smuzhiyun {
6661*4882a593Smuzhiyun 	struct s2io_nic *nic = container_of(work, struct s2io_nic,
6662*4882a593Smuzhiyun 					    set_link_task);
6663*4882a593Smuzhiyun 	struct net_device *dev = nic->dev;
6664*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
6665*4882a593Smuzhiyun 	register u64 val64;
6666*4882a593Smuzhiyun 	u16 subid;
6667*4882a593Smuzhiyun 
6668*4882a593Smuzhiyun 	rtnl_lock();
6669*4882a593Smuzhiyun 
6670*4882a593Smuzhiyun 	if (!netif_running(dev))
6671*4882a593Smuzhiyun 		goto out_unlock;
6672*4882a593Smuzhiyun 
6673*4882a593Smuzhiyun 	if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6674*4882a593Smuzhiyun 		/* The card is being reset, no point doing anything */
6675*4882a593Smuzhiyun 		goto out_unlock;
6676*4882a593Smuzhiyun 	}
6677*4882a593Smuzhiyun 
6678*4882a593Smuzhiyun 	subid = nic->pdev->subsystem_device;
6679*4882a593Smuzhiyun 	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6680*4882a593Smuzhiyun 		/*
6681*4882a593Smuzhiyun 		 * Allow a small delay for the NICs self initiated
6682*4882a593Smuzhiyun 		 * cleanup to complete.
6683*4882a593Smuzhiyun 		 */
6684*4882a593Smuzhiyun 		msleep(100);
6685*4882a593Smuzhiyun 	}
6686*4882a593Smuzhiyun 
6687*4882a593Smuzhiyun 	val64 = readq(&bar0->adapter_status);
6688*4882a593Smuzhiyun 	if (LINK_IS_UP(val64)) {
6689*4882a593Smuzhiyun 		if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6690*4882a593Smuzhiyun 			if (verify_xena_quiescence(nic)) {
6691*4882a593Smuzhiyun 				val64 = readq(&bar0->adapter_control);
6692*4882a593Smuzhiyun 				val64 |= ADAPTER_CNTL_EN;
6693*4882a593Smuzhiyun 				writeq(val64, &bar0->adapter_control);
6694*4882a593Smuzhiyun 				if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6695*4882a593Smuzhiyun 					    nic->device_type, subid)) {
6696*4882a593Smuzhiyun 					val64 = readq(&bar0->gpio_control);
6697*4882a593Smuzhiyun 					val64 |= GPIO_CTRL_GPIO_0;
6698*4882a593Smuzhiyun 					writeq(val64, &bar0->gpio_control);
6699*4882a593Smuzhiyun 					val64 = readq(&bar0->gpio_control);
6700*4882a593Smuzhiyun 				} else {
6701*4882a593Smuzhiyun 					val64 |= ADAPTER_LED_ON;
6702*4882a593Smuzhiyun 					writeq(val64, &bar0->adapter_control);
6703*4882a593Smuzhiyun 				}
6704*4882a593Smuzhiyun 				nic->device_enabled_once = true;
6705*4882a593Smuzhiyun 			} else {
6706*4882a593Smuzhiyun 				DBG_PRINT(ERR_DBG,
6707*4882a593Smuzhiyun 					  "%s: Error: device is not Quiescent\n",
6708*4882a593Smuzhiyun 					  dev->name);
6709*4882a593Smuzhiyun 				s2io_stop_all_tx_queue(nic);
6710*4882a593Smuzhiyun 			}
6711*4882a593Smuzhiyun 		}
6712*4882a593Smuzhiyun 		val64 = readq(&bar0->adapter_control);
6713*4882a593Smuzhiyun 		val64 |= ADAPTER_LED_ON;
6714*4882a593Smuzhiyun 		writeq(val64, &bar0->adapter_control);
6715*4882a593Smuzhiyun 		s2io_link(nic, LINK_UP);
6716*4882a593Smuzhiyun 	} else {
6717*4882a593Smuzhiyun 		if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6718*4882a593Smuzhiyun 						      subid)) {
6719*4882a593Smuzhiyun 			val64 = readq(&bar0->gpio_control);
6720*4882a593Smuzhiyun 			val64 &= ~GPIO_CTRL_GPIO_0;
6721*4882a593Smuzhiyun 			writeq(val64, &bar0->gpio_control);
6722*4882a593Smuzhiyun 			val64 = readq(&bar0->gpio_control);
6723*4882a593Smuzhiyun 		}
6724*4882a593Smuzhiyun 		/* turn off LED */
6725*4882a593Smuzhiyun 		val64 = readq(&bar0->adapter_control);
6726*4882a593Smuzhiyun 		val64 = val64 & (~ADAPTER_LED_ON);
6727*4882a593Smuzhiyun 		writeq(val64, &bar0->adapter_control);
6728*4882a593Smuzhiyun 		s2io_link(nic, LINK_DOWN);
6729*4882a593Smuzhiyun 	}
6730*4882a593Smuzhiyun 	clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6731*4882a593Smuzhiyun 
6732*4882a593Smuzhiyun out_unlock:
6733*4882a593Smuzhiyun 	rtnl_unlock();
6734*4882a593Smuzhiyun }
6735*4882a593Smuzhiyun 
set_rxd_buffer_pointer(struct s2io_nic * sp,struct RxD_t * rxdp,struct buffAdd * ba,struct sk_buff ** skb,u64 * temp0,u64 * temp1,u64 * temp2,int size)6736*4882a593Smuzhiyun static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6737*4882a593Smuzhiyun 				  struct buffAdd *ba,
6738*4882a593Smuzhiyun 				  struct sk_buff **skb, u64 *temp0, u64 *temp1,
6739*4882a593Smuzhiyun 				  u64 *temp2, int size)
6740*4882a593Smuzhiyun {
6741*4882a593Smuzhiyun 	struct net_device *dev = sp->dev;
6742*4882a593Smuzhiyun 	struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6743*4882a593Smuzhiyun 
6744*4882a593Smuzhiyun 	if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6745*4882a593Smuzhiyun 		struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6746*4882a593Smuzhiyun 		/* allocate skb */
6747*4882a593Smuzhiyun 		if (*skb) {
6748*4882a593Smuzhiyun 			DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6749*4882a593Smuzhiyun 			/*
6750*4882a593Smuzhiyun 			 * As Rx frame are not going to be processed,
6751*4882a593Smuzhiyun 			 * using same mapped address for the Rxd
6752*4882a593Smuzhiyun 			 * buffer pointer
6753*4882a593Smuzhiyun 			 */
6754*4882a593Smuzhiyun 			rxdp1->Buffer0_ptr = *temp0;
6755*4882a593Smuzhiyun 		} else {
6756*4882a593Smuzhiyun 			*skb = netdev_alloc_skb(dev, size);
6757*4882a593Smuzhiyun 			if (!(*skb)) {
6758*4882a593Smuzhiyun 				DBG_PRINT(INFO_DBG,
6759*4882a593Smuzhiyun 					  "%s: Out of memory to allocate %s\n",
6760*4882a593Smuzhiyun 					  dev->name, "1 buf mode SKBs");
6761*4882a593Smuzhiyun 				stats->mem_alloc_fail_cnt++;
6762*4882a593Smuzhiyun 				return -ENOMEM ;
6763*4882a593Smuzhiyun 			}
6764*4882a593Smuzhiyun 			stats->mem_allocated += (*skb)->truesize;
6765*4882a593Smuzhiyun 			/* storing the mapped addr in a temp variable
6766*4882a593Smuzhiyun 			 * such it will be used for next rxd whose
6767*4882a593Smuzhiyun 			 * Host Control is NULL
6768*4882a593Smuzhiyun 			 */
6769*4882a593Smuzhiyun 			rxdp1->Buffer0_ptr = *temp0 =
6770*4882a593Smuzhiyun 				dma_map_single(&sp->pdev->dev, (*skb)->data,
6771*4882a593Smuzhiyun 					       size - NET_IP_ALIGN,
6772*4882a593Smuzhiyun 					       DMA_FROM_DEVICE);
6773*4882a593Smuzhiyun 			if (dma_mapping_error(&sp->pdev->dev, rxdp1->Buffer0_ptr))
6774*4882a593Smuzhiyun 				goto memalloc_failed;
6775*4882a593Smuzhiyun 			rxdp->Host_Control = (unsigned long) (*skb);
6776*4882a593Smuzhiyun 		}
6777*4882a593Smuzhiyun 	} else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6778*4882a593Smuzhiyun 		struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6779*4882a593Smuzhiyun 		/* Two buffer Mode */
6780*4882a593Smuzhiyun 		if (*skb) {
6781*4882a593Smuzhiyun 			rxdp3->Buffer2_ptr = *temp2;
6782*4882a593Smuzhiyun 			rxdp3->Buffer0_ptr = *temp0;
6783*4882a593Smuzhiyun 			rxdp3->Buffer1_ptr = *temp1;
6784*4882a593Smuzhiyun 		} else {
6785*4882a593Smuzhiyun 			*skb = netdev_alloc_skb(dev, size);
6786*4882a593Smuzhiyun 			if (!(*skb)) {
6787*4882a593Smuzhiyun 				DBG_PRINT(INFO_DBG,
6788*4882a593Smuzhiyun 					  "%s: Out of memory to allocate %s\n",
6789*4882a593Smuzhiyun 					  dev->name,
6790*4882a593Smuzhiyun 					  "2 buf mode SKBs");
6791*4882a593Smuzhiyun 				stats->mem_alloc_fail_cnt++;
6792*4882a593Smuzhiyun 				return -ENOMEM;
6793*4882a593Smuzhiyun 			}
6794*4882a593Smuzhiyun 			stats->mem_allocated += (*skb)->truesize;
6795*4882a593Smuzhiyun 			rxdp3->Buffer2_ptr = *temp2 =
6796*4882a593Smuzhiyun 				dma_map_single(&sp->pdev->dev, (*skb)->data,
6797*4882a593Smuzhiyun 					       dev->mtu + 4, DMA_FROM_DEVICE);
6798*4882a593Smuzhiyun 			if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer2_ptr))
6799*4882a593Smuzhiyun 				goto memalloc_failed;
6800*4882a593Smuzhiyun 			rxdp3->Buffer0_ptr = *temp0 =
6801*4882a593Smuzhiyun 				dma_map_single(&sp->pdev->dev, ba->ba_0,
6802*4882a593Smuzhiyun 					       BUF0_LEN, DMA_FROM_DEVICE);
6803*4882a593Smuzhiyun 			if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer0_ptr)) {
6804*4882a593Smuzhiyun 				dma_unmap_single(&sp->pdev->dev,
6805*4882a593Smuzhiyun 						 (dma_addr_t)rxdp3->Buffer2_ptr,
6806*4882a593Smuzhiyun 						 dev->mtu + 4,
6807*4882a593Smuzhiyun 						 DMA_FROM_DEVICE);
6808*4882a593Smuzhiyun 				goto memalloc_failed;
6809*4882a593Smuzhiyun 			}
6810*4882a593Smuzhiyun 			rxdp->Host_Control = (unsigned long) (*skb);
6811*4882a593Smuzhiyun 
6812*4882a593Smuzhiyun 			/* Buffer-1 will be dummy buffer not used */
6813*4882a593Smuzhiyun 			rxdp3->Buffer1_ptr = *temp1 =
6814*4882a593Smuzhiyun 				dma_map_single(&sp->pdev->dev, ba->ba_1,
6815*4882a593Smuzhiyun 					       BUF1_LEN, DMA_FROM_DEVICE);
6816*4882a593Smuzhiyun 			if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer1_ptr)) {
6817*4882a593Smuzhiyun 				dma_unmap_single(&sp->pdev->dev,
6818*4882a593Smuzhiyun 						 (dma_addr_t)rxdp3->Buffer0_ptr,
6819*4882a593Smuzhiyun 						 BUF0_LEN, DMA_FROM_DEVICE);
6820*4882a593Smuzhiyun 				dma_unmap_single(&sp->pdev->dev,
6821*4882a593Smuzhiyun 						 (dma_addr_t)rxdp3->Buffer2_ptr,
6822*4882a593Smuzhiyun 						 dev->mtu + 4,
6823*4882a593Smuzhiyun 						 DMA_FROM_DEVICE);
6824*4882a593Smuzhiyun 				goto memalloc_failed;
6825*4882a593Smuzhiyun 			}
6826*4882a593Smuzhiyun 		}
6827*4882a593Smuzhiyun 	}
6828*4882a593Smuzhiyun 	return 0;
6829*4882a593Smuzhiyun 
6830*4882a593Smuzhiyun memalloc_failed:
6831*4882a593Smuzhiyun 	stats->pci_map_fail_cnt++;
6832*4882a593Smuzhiyun 	stats->mem_freed += (*skb)->truesize;
6833*4882a593Smuzhiyun 	dev_kfree_skb(*skb);
6834*4882a593Smuzhiyun 	return -ENOMEM;
6835*4882a593Smuzhiyun }
6836*4882a593Smuzhiyun 
set_rxd_buffer_size(struct s2io_nic * sp,struct RxD_t * rxdp,int size)6837*4882a593Smuzhiyun static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6838*4882a593Smuzhiyun 				int size)
6839*4882a593Smuzhiyun {
6840*4882a593Smuzhiyun 	struct net_device *dev = sp->dev;
6841*4882a593Smuzhiyun 	if (sp->rxd_mode == RXD_MODE_1) {
6842*4882a593Smuzhiyun 		rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
6843*4882a593Smuzhiyun 	} else if (sp->rxd_mode == RXD_MODE_3B) {
6844*4882a593Smuzhiyun 		rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6845*4882a593Smuzhiyun 		rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6846*4882a593Smuzhiyun 		rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
6847*4882a593Smuzhiyun 	}
6848*4882a593Smuzhiyun }
6849*4882a593Smuzhiyun 
rxd_owner_bit_reset(struct s2io_nic * sp)6850*4882a593Smuzhiyun static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6851*4882a593Smuzhiyun {
6852*4882a593Smuzhiyun 	int i, j, k, blk_cnt = 0, size;
6853*4882a593Smuzhiyun 	struct config_param *config = &sp->config;
6854*4882a593Smuzhiyun 	struct mac_info *mac_control = &sp->mac_control;
6855*4882a593Smuzhiyun 	struct net_device *dev = sp->dev;
6856*4882a593Smuzhiyun 	struct RxD_t *rxdp = NULL;
6857*4882a593Smuzhiyun 	struct sk_buff *skb = NULL;
6858*4882a593Smuzhiyun 	struct buffAdd *ba = NULL;
6859*4882a593Smuzhiyun 	u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6860*4882a593Smuzhiyun 
6861*4882a593Smuzhiyun 	/* Calculate the size based on ring mode */
6862*4882a593Smuzhiyun 	size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6863*4882a593Smuzhiyun 		HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6864*4882a593Smuzhiyun 	if (sp->rxd_mode == RXD_MODE_1)
6865*4882a593Smuzhiyun 		size += NET_IP_ALIGN;
6866*4882a593Smuzhiyun 	else if (sp->rxd_mode == RXD_MODE_3B)
6867*4882a593Smuzhiyun 		size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6868*4882a593Smuzhiyun 
6869*4882a593Smuzhiyun 	for (i = 0; i < config->rx_ring_num; i++) {
6870*4882a593Smuzhiyun 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6871*4882a593Smuzhiyun 		struct ring_info *ring = &mac_control->rings[i];
6872*4882a593Smuzhiyun 
6873*4882a593Smuzhiyun 		blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
6874*4882a593Smuzhiyun 
6875*4882a593Smuzhiyun 		for (j = 0; j < blk_cnt; j++) {
6876*4882a593Smuzhiyun 			for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6877*4882a593Smuzhiyun 				rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
6878*4882a593Smuzhiyun 				if (sp->rxd_mode == RXD_MODE_3B)
6879*4882a593Smuzhiyun 					ba = &ring->ba[j][k];
6880*4882a593Smuzhiyun 				if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
6881*4882a593Smuzhiyun 							   &temp0_64,
6882*4882a593Smuzhiyun 							   &temp1_64,
6883*4882a593Smuzhiyun 							   &temp2_64,
6884*4882a593Smuzhiyun 							   size) == -ENOMEM) {
6885*4882a593Smuzhiyun 					return 0;
6886*4882a593Smuzhiyun 				}
6887*4882a593Smuzhiyun 
6888*4882a593Smuzhiyun 				set_rxd_buffer_size(sp, rxdp, size);
6889*4882a593Smuzhiyun 				dma_wmb();
6890*4882a593Smuzhiyun 				/* flip the Ownership bit to Hardware */
6891*4882a593Smuzhiyun 				rxdp->Control_1 |= RXD_OWN_XENA;
6892*4882a593Smuzhiyun 			}
6893*4882a593Smuzhiyun 		}
6894*4882a593Smuzhiyun 	}
6895*4882a593Smuzhiyun 	return 0;
6896*4882a593Smuzhiyun 
6897*4882a593Smuzhiyun }
6898*4882a593Smuzhiyun 
s2io_add_isr(struct s2io_nic * sp)6899*4882a593Smuzhiyun static int s2io_add_isr(struct s2io_nic *sp)
6900*4882a593Smuzhiyun {
6901*4882a593Smuzhiyun 	int ret = 0;
6902*4882a593Smuzhiyun 	struct net_device *dev = sp->dev;
6903*4882a593Smuzhiyun 	int err = 0;
6904*4882a593Smuzhiyun 
6905*4882a593Smuzhiyun 	if (sp->config.intr_type == MSI_X)
6906*4882a593Smuzhiyun 		ret = s2io_enable_msi_x(sp);
6907*4882a593Smuzhiyun 	if (ret) {
6908*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6909*4882a593Smuzhiyun 		sp->config.intr_type = INTA;
6910*4882a593Smuzhiyun 	}
6911*4882a593Smuzhiyun 
6912*4882a593Smuzhiyun 	/*
6913*4882a593Smuzhiyun 	 * Store the values of the MSIX table in
6914*4882a593Smuzhiyun 	 * the struct s2io_nic structure
6915*4882a593Smuzhiyun 	 */
6916*4882a593Smuzhiyun 	store_xmsi_data(sp);
6917*4882a593Smuzhiyun 
6918*4882a593Smuzhiyun 	/* After proper initialization of H/W, register ISR */
6919*4882a593Smuzhiyun 	if (sp->config.intr_type == MSI_X) {
6920*4882a593Smuzhiyun 		int i, msix_rx_cnt = 0;
6921*4882a593Smuzhiyun 
6922*4882a593Smuzhiyun 		for (i = 0; i < sp->num_entries; i++) {
6923*4882a593Smuzhiyun 			if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6924*4882a593Smuzhiyun 				if (sp->s2io_entries[i].type ==
6925*4882a593Smuzhiyun 				    MSIX_RING_TYPE) {
6926*4882a593Smuzhiyun 					snprintf(sp->desc[i],
6927*4882a593Smuzhiyun 						sizeof(sp->desc[i]),
6928*4882a593Smuzhiyun 						"%s:MSI-X-%d-RX",
6929*4882a593Smuzhiyun 						dev->name, i);
6930*4882a593Smuzhiyun 					err = request_irq(sp->entries[i].vector,
6931*4882a593Smuzhiyun 							  s2io_msix_ring_handle,
6932*4882a593Smuzhiyun 							  0,
6933*4882a593Smuzhiyun 							  sp->desc[i],
6934*4882a593Smuzhiyun 							  sp->s2io_entries[i].arg);
6935*4882a593Smuzhiyun 				} else if (sp->s2io_entries[i].type ==
6936*4882a593Smuzhiyun 					   MSIX_ALARM_TYPE) {
6937*4882a593Smuzhiyun 					snprintf(sp->desc[i],
6938*4882a593Smuzhiyun 						sizeof(sp->desc[i]),
6939*4882a593Smuzhiyun 						"%s:MSI-X-%d-TX",
6940*4882a593Smuzhiyun 						dev->name, i);
6941*4882a593Smuzhiyun 					err = request_irq(sp->entries[i].vector,
6942*4882a593Smuzhiyun 							  s2io_msix_fifo_handle,
6943*4882a593Smuzhiyun 							  0,
6944*4882a593Smuzhiyun 							  sp->desc[i],
6945*4882a593Smuzhiyun 							  sp->s2io_entries[i].arg);
6946*4882a593Smuzhiyun 
6947*4882a593Smuzhiyun 				}
6948*4882a593Smuzhiyun 				/* if either data or addr is zero print it. */
6949*4882a593Smuzhiyun 				if (!(sp->msix_info[i].addr &&
6950*4882a593Smuzhiyun 				      sp->msix_info[i].data)) {
6951*4882a593Smuzhiyun 					DBG_PRINT(ERR_DBG,
6952*4882a593Smuzhiyun 						  "%s @Addr:0x%llx Data:0x%llx\n",
6953*4882a593Smuzhiyun 						  sp->desc[i],
6954*4882a593Smuzhiyun 						  (unsigned long long)
6955*4882a593Smuzhiyun 						  sp->msix_info[i].addr,
6956*4882a593Smuzhiyun 						  (unsigned long long)
6957*4882a593Smuzhiyun 						  ntohl(sp->msix_info[i].data));
6958*4882a593Smuzhiyun 				} else
6959*4882a593Smuzhiyun 					msix_rx_cnt++;
6960*4882a593Smuzhiyun 				if (err) {
6961*4882a593Smuzhiyun 					remove_msix_isr(sp);
6962*4882a593Smuzhiyun 
6963*4882a593Smuzhiyun 					DBG_PRINT(ERR_DBG,
6964*4882a593Smuzhiyun 						  "%s:MSI-X-%d registration "
6965*4882a593Smuzhiyun 						  "failed\n", dev->name, i);
6966*4882a593Smuzhiyun 
6967*4882a593Smuzhiyun 					DBG_PRINT(ERR_DBG,
6968*4882a593Smuzhiyun 						  "%s: Defaulting to INTA\n",
6969*4882a593Smuzhiyun 						  dev->name);
6970*4882a593Smuzhiyun 					sp->config.intr_type = INTA;
6971*4882a593Smuzhiyun 					break;
6972*4882a593Smuzhiyun 				}
6973*4882a593Smuzhiyun 				sp->s2io_entries[i].in_use =
6974*4882a593Smuzhiyun 					MSIX_REGISTERED_SUCCESS;
6975*4882a593Smuzhiyun 			}
6976*4882a593Smuzhiyun 		}
6977*4882a593Smuzhiyun 		if (!err) {
6978*4882a593Smuzhiyun 			pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
6979*4882a593Smuzhiyun 			DBG_PRINT(INFO_DBG,
6980*4882a593Smuzhiyun 				  "MSI-X-TX entries enabled through alarm vector\n");
6981*4882a593Smuzhiyun 		}
6982*4882a593Smuzhiyun 	}
6983*4882a593Smuzhiyun 	if (sp->config.intr_type == INTA) {
6984*4882a593Smuzhiyun 		err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
6985*4882a593Smuzhiyun 				  sp->name, dev);
6986*4882a593Smuzhiyun 		if (err) {
6987*4882a593Smuzhiyun 			DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6988*4882a593Smuzhiyun 				  dev->name);
6989*4882a593Smuzhiyun 			return -1;
6990*4882a593Smuzhiyun 		}
6991*4882a593Smuzhiyun 	}
6992*4882a593Smuzhiyun 	return 0;
6993*4882a593Smuzhiyun }
6994*4882a593Smuzhiyun 
s2io_rem_isr(struct s2io_nic * sp)6995*4882a593Smuzhiyun static void s2io_rem_isr(struct s2io_nic *sp)
6996*4882a593Smuzhiyun {
6997*4882a593Smuzhiyun 	if (sp->config.intr_type == MSI_X)
6998*4882a593Smuzhiyun 		remove_msix_isr(sp);
6999*4882a593Smuzhiyun 	else
7000*4882a593Smuzhiyun 		remove_inta_isr(sp);
7001*4882a593Smuzhiyun }
7002*4882a593Smuzhiyun 
do_s2io_card_down(struct s2io_nic * sp,int do_io)7003*4882a593Smuzhiyun static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
7004*4882a593Smuzhiyun {
7005*4882a593Smuzhiyun 	int cnt = 0;
7006*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
7007*4882a593Smuzhiyun 	register u64 val64 = 0;
7008*4882a593Smuzhiyun 	struct config_param *config;
7009*4882a593Smuzhiyun 	config = &sp->config;
7010*4882a593Smuzhiyun 
7011*4882a593Smuzhiyun 	if (!is_s2io_card_up(sp))
7012*4882a593Smuzhiyun 		return;
7013*4882a593Smuzhiyun 
7014*4882a593Smuzhiyun 	del_timer_sync(&sp->alarm_timer);
7015*4882a593Smuzhiyun 	/* If s2io_set_link task is executing, wait till it completes. */
7016*4882a593Smuzhiyun 	while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
7017*4882a593Smuzhiyun 		msleep(50);
7018*4882a593Smuzhiyun 	clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7019*4882a593Smuzhiyun 
7020*4882a593Smuzhiyun 	/* Disable napi */
7021*4882a593Smuzhiyun 	if (sp->config.napi) {
7022*4882a593Smuzhiyun 		int off = 0;
7023*4882a593Smuzhiyun 		if (config->intr_type ==  MSI_X) {
7024*4882a593Smuzhiyun 			for (; off < sp->config.rx_ring_num; off++)
7025*4882a593Smuzhiyun 				napi_disable(&sp->mac_control.rings[off].napi);
7026*4882a593Smuzhiyun 		}
7027*4882a593Smuzhiyun 		else
7028*4882a593Smuzhiyun 			napi_disable(&sp->napi);
7029*4882a593Smuzhiyun 	}
7030*4882a593Smuzhiyun 
7031*4882a593Smuzhiyun 	/* disable Tx and Rx traffic on the NIC */
7032*4882a593Smuzhiyun 	if (do_io)
7033*4882a593Smuzhiyun 		stop_nic(sp);
7034*4882a593Smuzhiyun 
7035*4882a593Smuzhiyun 	s2io_rem_isr(sp);
7036*4882a593Smuzhiyun 
7037*4882a593Smuzhiyun 	/* stop the tx queue, indicate link down */
7038*4882a593Smuzhiyun 	s2io_link(sp, LINK_DOWN);
7039*4882a593Smuzhiyun 
7040*4882a593Smuzhiyun 	/* Check if the device is Quiescent and then Reset the NIC */
7041*4882a593Smuzhiyun 	while (do_io) {
7042*4882a593Smuzhiyun 		/* As per the HW requirement we need to replenish the
7043*4882a593Smuzhiyun 		 * receive buffer to avoid the ring bump. Since there is
7044*4882a593Smuzhiyun 		 * no intention of processing the Rx frame at this pointwe are
7045*4882a593Smuzhiyun 		 * just setting the ownership bit of rxd in Each Rx
7046*4882a593Smuzhiyun 		 * ring to HW and set the appropriate buffer size
7047*4882a593Smuzhiyun 		 * based on the ring mode
7048*4882a593Smuzhiyun 		 */
7049*4882a593Smuzhiyun 		rxd_owner_bit_reset(sp);
7050*4882a593Smuzhiyun 
7051*4882a593Smuzhiyun 		val64 = readq(&bar0->adapter_status);
7052*4882a593Smuzhiyun 		if (verify_xena_quiescence(sp)) {
7053*4882a593Smuzhiyun 			if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7054*4882a593Smuzhiyun 				break;
7055*4882a593Smuzhiyun 		}
7056*4882a593Smuzhiyun 
7057*4882a593Smuzhiyun 		msleep(50);
7058*4882a593Smuzhiyun 		cnt++;
7059*4882a593Smuzhiyun 		if (cnt == 10) {
7060*4882a593Smuzhiyun 			DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7061*4882a593Smuzhiyun 				  "adapter status reads 0x%llx\n",
7062*4882a593Smuzhiyun 				  (unsigned long long)val64);
7063*4882a593Smuzhiyun 			break;
7064*4882a593Smuzhiyun 		}
7065*4882a593Smuzhiyun 	}
7066*4882a593Smuzhiyun 	if (do_io)
7067*4882a593Smuzhiyun 		s2io_reset(sp);
7068*4882a593Smuzhiyun 
7069*4882a593Smuzhiyun 	/* Free all Tx buffers */
7070*4882a593Smuzhiyun 	free_tx_buffers(sp);
7071*4882a593Smuzhiyun 
7072*4882a593Smuzhiyun 	/* Free all Rx buffers */
7073*4882a593Smuzhiyun 	free_rx_buffers(sp);
7074*4882a593Smuzhiyun 
7075*4882a593Smuzhiyun 	clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7076*4882a593Smuzhiyun }
7077*4882a593Smuzhiyun 
s2io_card_down(struct s2io_nic * sp)7078*4882a593Smuzhiyun static void s2io_card_down(struct s2io_nic *sp)
7079*4882a593Smuzhiyun {
7080*4882a593Smuzhiyun 	do_s2io_card_down(sp, 1);
7081*4882a593Smuzhiyun }
7082*4882a593Smuzhiyun 
s2io_card_up(struct s2io_nic * sp)7083*4882a593Smuzhiyun static int s2io_card_up(struct s2io_nic *sp)
7084*4882a593Smuzhiyun {
7085*4882a593Smuzhiyun 	int i, ret = 0;
7086*4882a593Smuzhiyun 	struct config_param *config;
7087*4882a593Smuzhiyun 	struct mac_info *mac_control;
7088*4882a593Smuzhiyun 	struct net_device *dev = sp->dev;
7089*4882a593Smuzhiyun 	u16 interruptible;
7090*4882a593Smuzhiyun 
7091*4882a593Smuzhiyun 	/* Initialize the H/W I/O registers */
7092*4882a593Smuzhiyun 	ret = init_nic(sp);
7093*4882a593Smuzhiyun 	if (ret != 0) {
7094*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7095*4882a593Smuzhiyun 			  dev->name);
7096*4882a593Smuzhiyun 		if (ret != -EIO)
7097*4882a593Smuzhiyun 			s2io_reset(sp);
7098*4882a593Smuzhiyun 		return ret;
7099*4882a593Smuzhiyun 	}
7100*4882a593Smuzhiyun 
7101*4882a593Smuzhiyun 	/*
7102*4882a593Smuzhiyun 	 * Initializing the Rx buffers. For now we are considering only 1
7103*4882a593Smuzhiyun 	 * Rx ring and initializing buffers into 30 Rx blocks
7104*4882a593Smuzhiyun 	 */
7105*4882a593Smuzhiyun 	config = &sp->config;
7106*4882a593Smuzhiyun 	mac_control = &sp->mac_control;
7107*4882a593Smuzhiyun 
7108*4882a593Smuzhiyun 	for (i = 0; i < config->rx_ring_num; i++) {
7109*4882a593Smuzhiyun 		struct ring_info *ring = &mac_control->rings[i];
7110*4882a593Smuzhiyun 
7111*4882a593Smuzhiyun 		ring->mtu = dev->mtu;
7112*4882a593Smuzhiyun 		ring->lro = !!(dev->features & NETIF_F_LRO);
7113*4882a593Smuzhiyun 		ret = fill_rx_buffers(sp, ring, 1);
7114*4882a593Smuzhiyun 		if (ret) {
7115*4882a593Smuzhiyun 			DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7116*4882a593Smuzhiyun 				  dev->name);
7117*4882a593Smuzhiyun 			ret = -ENOMEM;
7118*4882a593Smuzhiyun 			goto err_fill_buff;
7119*4882a593Smuzhiyun 		}
7120*4882a593Smuzhiyun 		DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7121*4882a593Smuzhiyun 			  ring->rx_bufs_left);
7122*4882a593Smuzhiyun 	}
7123*4882a593Smuzhiyun 
7124*4882a593Smuzhiyun 	/* Initialise napi */
7125*4882a593Smuzhiyun 	if (config->napi) {
7126*4882a593Smuzhiyun 		if (config->intr_type ==  MSI_X) {
7127*4882a593Smuzhiyun 			for (i = 0; i < sp->config.rx_ring_num; i++)
7128*4882a593Smuzhiyun 				napi_enable(&sp->mac_control.rings[i].napi);
7129*4882a593Smuzhiyun 		} else {
7130*4882a593Smuzhiyun 			napi_enable(&sp->napi);
7131*4882a593Smuzhiyun 		}
7132*4882a593Smuzhiyun 	}
7133*4882a593Smuzhiyun 
7134*4882a593Smuzhiyun 	/* Maintain the state prior to the open */
7135*4882a593Smuzhiyun 	if (sp->promisc_flg)
7136*4882a593Smuzhiyun 		sp->promisc_flg = 0;
7137*4882a593Smuzhiyun 	if (sp->m_cast_flg) {
7138*4882a593Smuzhiyun 		sp->m_cast_flg = 0;
7139*4882a593Smuzhiyun 		sp->all_multi_pos = 0;
7140*4882a593Smuzhiyun 	}
7141*4882a593Smuzhiyun 
7142*4882a593Smuzhiyun 	/* Setting its receive mode */
7143*4882a593Smuzhiyun 	s2io_set_multicast(dev);
7144*4882a593Smuzhiyun 
7145*4882a593Smuzhiyun 	if (dev->features & NETIF_F_LRO) {
7146*4882a593Smuzhiyun 		/* Initialize max aggregatable pkts per session based on MTU */
7147*4882a593Smuzhiyun 		sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7148*4882a593Smuzhiyun 		/* Check if we can use (if specified) user provided value */
7149*4882a593Smuzhiyun 		if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7150*4882a593Smuzhiyun 			sp->lro_max_aggr_per_sess = lro_max_pkts;
7151*4882a593Smuzhiyun 	}
7152*4882a593Smuzhiyun 
7153*4882a593Smuzhiyun 	/* Enable Rx Traffic and interrupts on the NIC */
7154*4882a593Smuzhiyun 	if (start_nic(sp)) {
7155*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7156*4882a593Smuzhiyun 		ret = -ENODEV;
7157*4882a593Smuzhiyun 		goto err_out;
7158*4882a593Smuzhiyun 	}
7159*4882a593Smuzhiyun 
7160*4882a593Smuzhiyun 	/* Add interrupt service routine */
7161*4882a593Smuzhiyun 	if (s2io_add_isr(sp) != 0) {
7162*4882a593Smuzhiyun 		if (sp->config.intr_type == MSI_X)
7163*4882a593Smuzhiyun 			s2io_rem_isr(sp);
7164*4882a593Smuzhiyun 		ret = -ENODEV;
7165*4882a593Smuzhiyun 		goto err_out;
7166*4882a593Smuzhiyun 	}
7167*4882a593Smuzhiyun 
7168*4882a593Smuzhiyun 	timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0);
7169*4882a593Smuzhiyun 	mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
7170*4882a593Smuzhiyun 
7171*4882a593Smuzhiyun 	set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7172*4882a593Smuzhiyun 
7173*4882a593Smuzhiyun 	/*  Enable select interrupts */
7174*4882a593Smuzhiyun 	en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7175*4882a593Smuzhiyun 	if (sp->config.intr_type != INTA) {
7176*4882a593Smuzhiyun 		interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7177*4882a593Smuzhiyun 		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7178*4882a593Smuzhiyun 	} else {
7179*4882a593Smuzhiyun 		interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7180*4882a593Smuzhiyun 		interruptible |= TX_PIC_INTR;
7181*4882a593Smuzhiyun 		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7182*4882a593Smuzhiyun 	}
7183*4882a593Smuzhiyun 
7184*4882a593Smuzhiyun 	return 0;
7185*4882a593Smuzhiyun 
7186*4882a593Smuzhiyun err_out:
7187*4882a593Smuzhiyun 	if (config->napi) {
7188*4882a593Smuzhiyun 		if (config->intr_type == MSI_X) {
7189*4882a593Smuzhiyun 			for (i = 0; i < sp->config.rx_ring_num; i++)
7190*4882a593Smuzhiyun 				napi_disable(&sp->mac_control.rings[i].napi);
7191*4882a593Smuzhiyun 		} else {
7192*4882a593Smuzhiyun 			napi_disable(&sp->napi);
7193*4882a593Smuzhiyun 		}
7194*4882a593Smuzhiyun 	}
7195*4882a593Smuzhiyun err_fill_buff:
7196*4882a593Smuzhiyun 	s2io_reset(sp);
7197*4882a593Smuzhiyun 	free_rx_buffers(sp);
7198*4882a593Smuzhiyun 	return ret;
7199*4882a593Smuzhiyun }
7200*4882a593Smuzhiyun 
7201*4882a593Smuzhiyun /**
7202*4882a593Smuzhiyun  * s2io_restart_nic - Resets the NIC.
7203*4882a593Smuzhiyun  * @work : work struct containing a pointer to the device private structure
7204*4882a593Smuzhiyun  * Description:
7205*4882a593Smuzhiyun  * This function is scheduled to be run by the s2io_tx_watchdog
7206*4882a593Smuzhiyun  * function after 0.5 secs to reset the NIC. The idea is to reduce
7207*4882a593Smuzhiyun  * the run time of the watch dog routine which is run holding a
7208*4882a593Smuzhiyun  * spin lock.
7209*4882a593Smuzhiyun  */
7210*4882a593Smuzhiyun 
s2io_restart_nic(struct work_struct * work)7211*4882a593Smuzhiyun static void s2io_restart_nic(struct work_struct *work)
7212*4882a593Smuzhiyun {
7213*4882a593Smuzhiyun 	struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7214*4882a593Smuzhiyun 	struct net_device *dev = sp->dev;
7215*4882a593Smuzhiyun 
7216*4882a593Smuzhiyun 	rtnl_lock();
7217*4882a593Smuzhiyun 
7218*4882a593Smuzhiyun 	if (!netif_running(dev))
7219*4882a593Smuzhiyun 		goto out_unlock;
7220*4882a593Smuzhiyun 
7221*4882a593Smuzhiyun 	s2io_card_down(sp);
7222*4882a593Smuzhiyun 	if (s2io_card_up(sp)) {
7223*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
7224*4882a593Smuzhiyun 	}
7225*4882a593Smuzhiyun 	s2io_wake_all_tx_queue(sp);
7226*4882a593Smuzhiyun 	DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
7227*4882a593Smuzhiyun out_unlock:
7228*4882a593Smuzhiyun 	rtnl_unlock();
7229*4882a593Smuzhiyun }
7230*4882a593Smuzhiyun 
7231*4882a593Smuzhiyun /**
7232*4882a593Smuzhiyun  *  s2io_tx_watchdog - Watchdog for transmit side.
7233*4882a593Smuzhiyun  *  @dev : Pointer to net device structure
7234*4882a593Smuzhiyun  *  @txqueue: index of the hanging queue
7235*4882a593Smuzhiyun  *  Description:
7236*4882a593Smuzhiyun  *  This function is triggered if the Tx Queue is stopped
7237*4882a593Smuzhiyun  *  for a pre-defined amount of time when the Interface is still up.
7238*4882a593Smuzhiyun  *  If the Interface is jammed in such a situation, the hardware is
7239*4882a593Smuzhiyun  *  reset (by s2io_close) and restarted again (by s2io_open) to
7240*4882a593Smuzhiyun  *  overcome any problem that might have been caused in the hardware.
7241*4882a593Smuzhiyun  *  Return value:
7242*4882a593Smuzhiyun  *  void
7243*4882a593Smuzhiyun  */
7244*4882a593Smuzhiyun 
s2io_tx_watchdog(struct net_device * dev,unsigned int txqueue)7245*4882a593Smuzhiyun static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue)
7246*4882a593Smuzhiyun {
7247*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
7248*4882a593Smuzhiyun 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7249*4882a593Smuzhiyun 
7250*4882a593Smuzhiyun 	if (netif_carrier_ok(dev)) {
7251*4882a593Smuzhiyun 		swstats->watchdog_timer_cnt++;
7252*4882a593Smuzhiyun 		schedule_work(&sp->rst_timer_task);
7253*4882a593Smuzhiyun 		swstats->soft_reset_cnt++;
7254*4882a593Smuzhiyun 	}
7255*4882a593Smuzhiyun }
7256*4882a593Smuzhiyun 
7257*4882a593Smuzhiyun /**
7258*4882a593Smuzhiyun  *   rx_osm_handler - To perform some OS related operations on SKB.
7259*4882a593Smuzhiyun  *   @ring_data : the ring from which this RxD was extracted.
7260*4882a593Smuzhiyun  *   @rxdp: descriptor
7261*4882a593Smuzhiyun  *   Description:
7262*4882a593Smuzhiyun  *   This function is called by the Rx interrupt serivce routine to perform
7263*4882a593Smuzhiyun  *   some OS related operations on the SKB before passing it to the upper
7264*4882a593Smuzhiyun  *   layers. It mainly checks if the checksum is OK, if so adds it to the
7265*4882a593Smuzhiyun  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7266*4882a593Smuzhiyun  *   to the upper layer. If the checksum is wrong, it increments the Rx
7267*4882a593Smuzhiyun  *   packet error count, frees the SKB and returns error.
7268*4882a593Smuzhiyun  *   Return value:
7269*4882a593Smuzhiyun  *   SUCCESS on success and -1 on failure.
7270*4882a593Smuzhiyun  */
rx_osm_handler(struct ring_info * ring_data,struct RxD_t * rxdp)7271*4882a593Smuzhiyun static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7272*4882a593Smuzhiyun {
7273*4882a593Smuzhiyun 	struct s2io_nic *sp = ring_data->nic;
7274*4882a593Smuzhiyun 	struct net_device *dev = ring_data->dev;
7275*4882a593Smuzhiyun 	struct sk_buff *skb = (struct sk_buff *)
7276*4882a593Smuzhiyun 		((unsigned long)rxdp->Host_Control);
7277*4882a593Smuzhiyun 	int ring_no = ring_data->ring_no;
7278*4882a593Smuzhiyun 	u16 l3_csum, l4_csum;
7279*4882a593Smuzhiyun 	unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7280*4882a593Smuzhiyun 	struct lro *lro;
7281*4882a593Smuzhiyun 	u8 err_mask;
7282*4882a593Smuzhiyun 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7283*4882a593Smuzhiyun 
7284*4882a593Smuzhiyun 	skb->dev = dev;
7285*4882a593Smuzhiyun 
7286*4882a593Smuzhiyun 	if (err) {
7287*4882a593Smuzhiyun 		/* Check for parity error */
7288*4882a593Smuzhiyun 		if (err & 0x1)
7289*4882a593Smuzhiyun 			swstats->parity_err_cnt++;
7290*4882a593Smuzhiyun 
7291*4882a593Smuzhiyun 		err_mask = err >> 48;
7292*4882a593Smuzhiyun 		switch (err_mask) {
7293*4882a593Smuzhiyun 		case 1:
7294*4882a593Smuzhiyun 			swstats->rx_parity_err_cnt++;
7295*4882a593Smuzhiyun 			break;
7296*4882a593Smuzhiyun 
7297*4882a593Smuzhiyun 		case 2:
7298*4882a593Smuzhiyun 			swstats->rx_abort_cnt++;
7299*4882a593Smuzhiyun 			break;
7300*4882a593Smuzhiyun 
7301*4882a593Smuzhiyun 		case 3:
7302*4882a593Smuzhiyun 			swstats->rx_parity_abort_cnt++;
7303*4882a593Smuzhiyun 			break;
7304*4882a593Smuzhiyun 
7305*4882a593Smuzhiyun 		case 4:
7306*4882a593Smuzhiyun 			swstats->rx_rda_fail_cnt++;
7307*4882a593Smuzhiyun 			break;
7308*4882a593Smuzhiyun 
7309*4882a593Smuzhiyun 		case 5:
7310*4882a593Smuzhiyun 			swstats->rx_unkn_prot_cnt++;
7311*4882a593Smuzhiyun 			break;
7312*4882a593Smuzhiyun 
7313*4882a593Smuzhiyun 		case 6:
7314*4882a593Smuzhiyun 			swstats->rx_fcs_err_cnt++;
7315*4882a593Smuzhiyun 			break;
7316*4882a593Smuzhiyun 
7317*4882a593Smuzhiyun 		case 7:
7318*4882a593Smuzhiyun 			swstats->rx_buf_size_err_cnt++;
7319*4882a593Smuzhiyun 			break;
7320*4882a593Smuzhiyun 
7321*4882a593Smuzhiyun 		case 8:
7322*4882a593Smuzhiyun 			swstats->rx_rxd_corrupt_cnt++;
7323*4882a593Smuzhiyun 			break;
7324*4882a593Smuzhiyun 
7325*4882a593Smuzhiyun 		case 15:
7326*4882a593Smuzhiyun 			swstats->rx_unkn_err_cnt++;
7327*4882a593Smuzhiyun 			break;
7328*4882a593Smuzhiyun 		}
7329*4882a593Smuzhiyun 		/*
7330*4882a593Smuzhiyun 		 * Drop the packet if bad transfer code. Exception being
7331*4882a593Smuzhiyun 		 * 0x5, which could be due to unsupported IPv6 extension header.
7332*4882a593Smuzhiyun 		 * In this case, we let stack handle the packet.
7333*4882a593Smuzhiyun 		 * Note that in this case, since checksum will be incorrect,
7334*4882a593Smuzhiyun 		 * stack will validate the same.
7335*4882a593Smuzhiyun 		 */
7336*4882a593Smuzhiyun 		if (err_mask != 0x5) {
7337*4882a593Smuzhiyun 			DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7338*4882a593Smuzhiyun 				  dev->name, err_mask);
7339*4882a593Smuzhiyun 			dev->stats.rx_crc_errors++;
7340*4882a593Smuzhiyun 			swstats->mem_freed
7341*4882a593Smuzhiyun 				+= skb->truesize;
7342*4882a593Smuzhiyun 			dev_kfree_skb(skb);
7343*4882a593Smuzhiyun 			ring_data->rx_bufs_left -= 1;
7344*4882a593Smuzhiyun 			rxdp->Host_Control = 0;
7345*4882a593Smuzhiyun 			return 0;
7346*4882a593Smuzhiyun 		}
7347*4882a593Smuzhiyun 	}
7348*4882a593Smuzhiyun 
7349*4882a593Smuzhiyun 	rxdp->Host_Control = 0;
7350*4882a593Smuzhiyun 	if (sp->rxd_mode == RXD_MODE_1) {
7351*4882a593Smuzhiyun 		int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7352*4882a593Smuzhiyun 
7353*4882a593Smuzhiyun 		skb_put(skb, len);
7354*4882a593Smuzhiyun 	} else if (sp->rxd_mode == RXD_MODE_3B) {
7355*4882a593Smuzhiyun 		int get_block = ring_data->rx_curr_get_info.block_index;
7356*4882a593Smuzhiyun 		int get_off = ring_data->rx_curr_get_info.offset;
7357*4882a593Smuzhiyun 		int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7358*4882a593Smuzhiyun 		int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7359*4882a593Smuzhiyun 		unsigned char *buff = skb_push(skb, buf0_len);
7360*4882a593Smuzhiyun 
7361*4882a593Smuzhiyun 		struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7362*4882a593Smuzhiyun 		memcpy(buff, ba->ba_0, buf0_len);
7363*4882a593Smuzhiyun 		skb_put(skb, buf2_len);
7364*4882a593Smuzhiyun 	}
7365*4882a593Smuzhiyun 
7366*4882a593Smuzhiyun 	if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7367*4882a593Smuzhiyun 	    ((!ring_data->lro) ||
7368*4882a593Smuzhiyun 	     (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) &&
7369*4882a593Smuzhiyun 	    (dev->features & NETIF_F_RXCSUM)) {
7370*4882a593Smuzhiyun 		l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7371*4882a593Smuzhiyun 		l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7372*4882a593Smuzhiyun 		if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7373*4882a593Smuzhiyun 			/*
7374*4882a593Smuzhiyun 			 * NIC verifies if the Checksum of the received
7375*4882a593Smuzhiyun 			 * frame is Ok or not and accordingly returns
7376*4882a593Smuzhiyun 			 * a flag in the RxD.
7377*4882a593Smuzhiyun 			 */
7378*4882a593Smuzhiyun 			skb->ip_summed = CHECKSUM_UNNECESSARY;
7379*4882a593Smuzhiyun 			if (ring_data->lro) {
7380*4882a593Smuzhiyun 				u32 tcp_len = 0;
7381*4882a593Smuzhiyun 				u8 *tcp;
7382*4882a593Smuzhiyun 				int ret = 0;
7383*4882a593Smuzhiyun 
7384*4882a593Smuzhiyun 				ret = s2io_club_tcp_session(ring_data,
7385*4882a593Smuzhiyun 							    skb->data, &tcp,
7386*4882a593Smuzhiyun 							    &tcp_len, &lro,
7387*4882a593Smuzhiyun 							    rxdp, sp);
7388*4882a593Smuzhiyun 				switch (ret) {
7389*4882a593Smuzhiyun 				case 3: /* Begin anew */
7390*4882a593Smuzhiyun 					lro->parent = skb;
7391*4882a593Smuzhiyun 					goto aggregate;
7392*4882a593Smuzhiyun 				case 1: /* Aggregate */
7393*4882a593Smuzhiyun 					lro_append_pkt(sp, lro, skb, tcp_len);
7394*4882a593Smuzhiyun 					goto aggregate;
7395*4882a593Smuzhiyun 				case 4: /* Flush session */
7396*4882a593Smuzhiyun 					lro_append_pkt(sp, lro, skb, tcp_len);
7397*4882a593Smuzhiyun 					queue_rx_frame(lro->parent,
7398*4882a593Smuzhiyun 						       lro->vlan_tag);
7399*4882a593Smuzhiyun 					clear_lro_session(lro);
7400*4882a593Smuzhiyun 					swstats->flush_max_pkts++;
7401*4882a593Smuzhiyun 					goto aggregate;
7402*4882a593Smuzhiyun 				case 2: /* Flush both */
7403*4882a593Smuzhiyun 					lro->parent->data_len = lro->frags_len;
7404*4882a593Smuzhiyun 					swstats->sending_both++;
7405*4882a593Smuzhiyun 					queue_rx_frame(lro->parent,
7406*4882a593Smuzhiyun 						       lro->vlan_tag);
7407*4882a593Smuzhiyun 					clear_lro_session(lro);
7408*4882a593Smuzhiyun 					goto send_up;
7409*4882a593Smuzhiyun 				case 0: /* sessions exceeded */
7410*4882a593Smuzhiyun 				case -1: /* non-TCP or not L2 aggregatable */
7411*4882a593Smuzhiyun 				case 5: /*
7412*4882a593Smuzhiyun 					 * First pkt in session not
7413*4882a593Smuzhiyun 					 * L3/L4 aggregatable
7414*4882a593Smuzhiyun 					 */
7415*4882a593Smuzhiyun 					break;
7416*4882a593Smuzhiyun 				default:
7417*4882a593Smuzhiyun 					DBG_PRINT(ERR_DBG,
7418*4882a593Smuzhiyun 						  "%s: Samadhana!!\n",
7419*4882a593Smuzhiyun 						  __func__);
7420*4882a593Smuzhiyun 					BUG();
7421*4882a593Smuzhiyun 				}
7422*4882a593Smuzhiyun 			}
7423*4882a593Smuzhiyun 		} else {
7424*4882a593Smuzhiyun 			/*
7425*4882a593Smuzhiyun 			 * Packet with erroneous checksum, let the
7426*4882a593Smuzhiyun 			 * upper layers deal with it.
7427*4882a593Smuzhiyun 			 */
7428*4882a593Smuzhiyun 			skb_checksum_none_assert(skb);
7429*4882a593Smuzhiyun 		}
7430*4882a593Smuzhiyun 	} else
7431*4882a593Smuzhiyun 		skb_checksum_none_assert(skb);
7432*4882a593Smuzhiyun 
7433*4882a593Smuzhiyun 	swstats->mem_freed += skb->truesize;
7434*4882a593Smuzhiyun send_up:
7435*4882a593Smuzhiyun 	skb_record_rx_queue(skb, ring_no);
7436*4882a593Smuzhiyun 	queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7437*4882a593Smuzhiyun aggregate:
7438*4882a593Smuzhiyun 	sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7439*4882a593Smuzhiyun 	return SUCCESS;
7440*4882a593Smuzhiyun }
7441*4882a593Smuzhiyun 
7442*4882a593Smuzhiyun /**
7443*4882a593Smuzhiyun  *  s2io_link - stops/starts the Tx queue.
7444*4882a593Smuzhiyun  *  @sp : private member of the device structure, which is a pointer to the
7445*4882a593Smuzhiyun  *  s2io_nic structure.
7446*4882a593Smuzhiyun  *  @link : inidicates whether link is UP/DOWN.
7447*4882a593Smuzhiyun  *  Description:
7448*4882a593Smuzhiyun  *  This function stops/starts the Tx queue depending on whether the link
7449*4882a593Smuzhiyun  *  status of the NIC is is down or up. This is called by the Alarm
7450*4882a593Smuzhiyun  *  interrupt handler whenever a link change interrupt comes up.
7451*4882a593Smuzhiyun  *  Return value:
7452*4882a593Smuzhiyun  *  void.
7453*4882a593Smuzhiyun  */
7454*4882a593Smuzhiyun 
s2io_link(struct s2io_nic * sp,int link)7455*4882a593Smuzhiyun static void s2io_link(struct s2io_nic *sp, int link)
7456*4882a593Smuzhiyun {
7457*4882a593Smuzhiyun 	struct net_device *dev = sp->dev;
7458*4882a593Smuzhiyun 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7459*4882a593Smuzhiyun 
7460*4882a593Smuzhiyun 	if (link != sp->last_link_state) {
7461*4882a593Smuzhiyun 		init_tti(sp, link);
7462*4882a593Smuzhiyun 		if (link == LINK_DOWN) {
7463*4882a593Smuzhiyun 			DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7464*4882a593Smuzhiyun 			s2io_stop_all_tx_queue(sp);
7465*4882a593Smuzhiyun 			netif_carrier_off(dev);
7466*4882a593Smuzhiyun 			if (swstats->link_up_cnt)
7467*4882a593Smuzhiyun 				swstats->link_up_time =
7468*4882a593Smuzhiyun 					jiffies - sp->start_time;
7469*4882a593Smuzhiyun 			swstats->link_down_cnt++;
7470*4882a593Smuzhiyun 		} else {
7471*4882a593Smuzhiyun 			DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7472*4882a593Smuzhiyun 			if (swstats->link_down_cnt)
7473*4882a593Smuzhiyun 				swstats->link_down_time =
7474*4882a593Smuzhiyun 					jiffies - sp->start_time;
7475*4882a593Smuzhiyun 			swstats->link_up_cnt++;
7476*4882a593Smuzhiyun 			netif_carrier_on(dev);
7477*4882a593Smuzhiyun 			s2io_wake_all_tx_queue(sp);
7478*4882a593Smuzhiyun 		}
7479*4882a593Smuzhiyun 	}
7480*4882a593Smuzhiyun 	sp->last_link_state = link;
7481*4882a593Smuzhiyun 	sp->start_time = jiffies;
7482*4882a593Smuzhiyun }
7483*4882a593Smuzhiyun 
7484*4882a593Smuzhiyun /**
7485*4882a593Smuzhiyun  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7486*4882a593Smuzhiyun  *  @sp : private member of the device structure, which is a pointer to the
7487*4882a593Smuzhiyun  *  s2io_nic structure.
7488*4882a593Smuzhiyun  *  Description:
7489*4882a593Smuzhiyun  *  This function initializes a few of the PCI and PCI-X configuration registers
7490*4882a593Smuzhiyun  *  with recommended values.
7491*4882a593Smuzhiyun  *  Return value:
7492*4882a593Smuzhiyun  *  void
7493*4882a593Smuzhiyun  */
7494*4882a593Smuzhiyun 
s2io_init_pci(struct s2io_nic * sp)7495*4882a593Smuzhiyun static void s2io_init_pci(struct s2io_nic *sp)
7496*4882a593Smuzhiyun {
7497*4882a593Smuzhiyun 	u16 pci_cmd = 0, pcix_cmd = 0;
7498*4882a593Smuzhiyun 
7499*4882a593Smuzhiyun 	/* Enable Data Parity Error Recovery in PCI-X command register. */
7500*4882a593Smuzhiyun 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7501*4882a593Smuzhiyun 			     &(pcix_cmd));
7502*4882a593Smuzhiyun 	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7503*4882a593Smuzhiyun 			      (pcix_cmd | 1));
7504*4882a593Smuzhiyun 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7505*4882a593Smuzhiyun 			     &(pcix_cmd));
7506*4882a593Smuzhiyun 
7507*4882a593Smuzhiyun 	/* Set the PErr Response bit in PCI command register. */
7508*4882a593Smuzhiyun 	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7509*4882a593Smuzhiyun 	pci_write_config_word(sp->pdev, PCI_COMMAND,
7510*4882a593Smuzhiyun 			      (pci_cmd | PCI_COMMAND_PARITY));
7511*4882a593Smuzhiyun 	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7512*4882a593Smuzhiyun }
7513*4882a593Smuzhiyun 
s2io_verify_parm(struct pci_dev * pdev,u8 * dev_intr_type,u8 * dev_multiq)7514*4882a593Smuzhiyun static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7515*4882a593Smuzhiyun 			    u8 *dev_multiq)
7516*4882a593Smuzhiyun {
7517*4882a593Smuzhiyun 	int i;
7518*4882a593Smuzhiyun 
7519*4882a593Smuzhiyun 	if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7520*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7521*4882a593Smuzhiyun 			  "(%d) not supported\n", tx_fifo_num);
7522*4882a593Smuzhiyun 
7523*4882a593Smuzhiyun 		if (tx_fifo_num < 1)
7524*4882a593Smuzhiyun 			tx_fifo_num = 1;
7525*4882a593Smuzhiyun 		else
7526*4882a593Smuzhiyun 			tx_fifo_num = MAX_TX_FIFOS;
7527*4882a593Smuzhiyun 
7528*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
7529*4882a593Smuzhiyun 	}
7530*4882a593Smuzhiyun 
7531*4882a593Smuzhiyun 	if (multiq)
7532*4882a593Smuzhiyun 		*dev_multiq = multiq;
7533*4882a593Smuzhiyun 
7534*4882a593Smuzhiyun 	if (tx_steering_type && (1 == tx_fifo_num)) {
7535*4882a593Smuzhiyun 		if (tx_steering_type != TX_DEFAULT_STEERING)
7536*4882a593Smuzhiyun 			DBG_PRINT(ERR_DBG,
7537*4882a593Smuzhiyun 				  "Tx steering is not supported with "
7538*4882a593Smuzhiyun 				  "one fifo. Disabling Tx steering.\n");
7539*4882a593Smuzhiyun 		tx_steering_type = NO_STEERING;
7540*4882a593Smuzhiyun 	}
7541*4882a593Smuzhiyun 
7542*4882a593Smuzhiyun 	if ((tx_steering_type < NO_STEERING) ||
7543*4882a593Smuzhiyun 	    (tx_steering_type > TX_DEFAULT_STEERING)) {
7544*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG,
7545*4882a593Smuzhiyun 			  "Requested transmit steering not supported\n");
7546*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
7547*4882a593Smuzhiyun 		tx_steering_type = NO_STEERING;
7548*4882a593Smuzhiyun 	}
7549*4882a593Smuzhiyun 
7550*4882a593Smuzhiyun 	if (rx_ring_num > MAX_RX_RINGS) {
7551*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG,
7552*4882a593Smuzhiyun 			  "Requested number of rx rings not supported\n");
7553*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
7554*4882a593Smuzhiyun 			  MAX_RX_RINGS);
7555*4882a593Smuzhiyun 		rx_ring_num = MAX_RX_RINGS;
7556*4882a593Smuzhiyun 	}
7557*4882a593Smuzhiyun 
7558*4882a593Smuzhiyun 	if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7559*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
7560*4882a593Smuzhiyun 			  "Defaulting to INTA\n");
7561*4882a593Smuzhiyun 		*dev_intr_type = INTA;
7562*4882a593Smuzhiyun 	}
7563*4882a593Smuzhiyun 
7564*4882a593Smuzhiyun 	if ((*dev_intr_type == MSI_X) &&
7565*4882a593Smuzhiyun 	    ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7566*4882a593Smuzhiyun 	     (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7567*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
7568*4882a593Smuzhiyun 			  "Defaulting to INTA\n");
7569*4882a593Smuzhiyun 		*dev_intr_type = INTA;
7570*4882a593Smuzhiyun 	}
7571*4882a593Smuzhiyun 
7572*4882a593Smuzhiyun 	if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7573*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7574*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7575*4882a593Smuzhiyun 		rx_ring_mode = 1;
7576*4882a593Smuzhiyun 	}
7577*4882a593Smuzhiyun 
7578*4882a593Smuzhiyun 	for (i = 0; i < MAX_RX_RINGS; i++)
7579*4882a593Smuzhiyun 		if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7580*4882a593Smuzhiyun 			DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7581*4882a593Smuzhiyun 				  "supported\nDefaulting to %d\n",
7582*4882a593Smuzhiyun 				  MAX_RX_BLOCKS_PER_RING);
7583*4882a593Smuzhiyun 			rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7584*4882a593Smuzhiyun 		}
7585*4882a593Smuzhiyun 
7586*4882a593Smuzhiyun 	return SUCCESS;
7587*4882a593Smuzhiyun }
7588*4882a593Smuzhiyun 
7589*4882a593Smuzhiyun /**
7590*4882a593Smuzhiyun  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS or Traffic class respectively.
7591*4882a593Smuzhiyun  * @nic: device private variable
7592*4882a593Smuzhiyun  * @ds_codepoint: data
7593*4882a593Smuzhiyun  * @ring: ring index
7594*4882a593Smuzhiyun  * Description: The function configures the receive steering to
7595*4882a593Smuzhiyun  * desired receive ring.
7596*4882a593Smuzhiyun  * Return Value:  SUCCESS on success and
7597*4882a593Smuzhiyun  * '-1' on failure (endian settings incorrect).
7598*4882a593Smuzhiyun  */
rts_ds_steer(struct s2io_nic * nic,u8 ds_codepoint,u8 ring)7599*4882a593Smuzhiyun static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7600*4882a593Smuzhiyun {
7601*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
7602*4882a593Smuzhiyun 	register u64 val64 = 0;
7603*4882a593Smuzhiyun 
7604*4882a593Smuzhiyun 	if (ds_codepoint > 63)
7605*4882a593Smuzhiyun 		return FAILURE;
7606*4882a593Smuzhiyun 
7607*4882a593Smuzhiyun 	val64 = RTS_DS_MEM_DATA(ring);
7608*4882a593Smuzhiyun 	writeq(val64, &bar0->rts_ds_mem_data);
7609*4882a593Smuzhiyun 
7610*4882a593Smuzhiyun 	val64 = RTS_DS_MEM_CTRL_WE |
7611*4882a593Smuzhiyun 		RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7612*4882a593Smuzhiyun 		RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7613*4882a593Smuzhiyun 
7614*4882a593Smuzhiyun 	writeq(val64, &bar0->rts_ds_mem_ctrl);
7615*4882a593Smuzhiyun 
7616*4882a593Smuzhiyun 	return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7617*4882a593Smuzhiyun 				     RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7618*4882a593Smuzhiyun 				     S2IO_BIT_RESET);
7619*4882a593Smuzhiyun }
7620*4882a593Smuzhiyun 
7621*4882a593Smuzhiyun static const struct net_device_ops s2io_netdev_ops = {
7622*4882a593Smuzhiyun 	.ndo_open	        = s2io_open,
7623*4882a593Smuzhiyun 	.ndo_stop	        = s2io_close,
7624*4882a593Smuzhiyun 	.ndo_get_stats	        = s2io_get_stats,
7625*4882a593Smuzhiyun 	.ndo_start_xmit    	= s2io_xmit,
7626*4882a593Smuzhiyun 	.ndo_validate_addr	= eth_validate_addr,
7627*4882a593Smuzhiyun 	.ndo_set_rx_mode	= s2io_set_multicast,
7628*4882a593Smuzhiyun 	.ndo_do_ioctl	   	= s2io_ioctl,
7629*4882a593Smuzhiyun 	.ndo_set_mac_address    = s2io_set_mac_addr,
7630*4882a593Smuzhiyun 	.ndo_change_mtu	   	= s2io_change_mtu,
7631*4882a593Smuzhiyun 	.ndo_set_features	= s2io_set_features,
7632*4882a593Smuzhiyun 	.ndo_tx_timeout	   	= s2io_tx_watchdog,
7633*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
7634*4882a593Smuzhiyun 	.ndo_poll_controller    = s2io_netpoll,
7635*4882a593Smuzhiyun #endif
7636*4882a593Smuzhiyun };
7637*4882a593Smuzhiyun 
7638*4882a593Smuzhiyun /**
7639*4882a593Smuzhiyun  *  s2io_init_nic - Initialization of the adapter .
7640*4882a593Smuzhiyun  *  @pdev : structure containing the PCI related information of the device.
7641*4882a593Smuzhiyun  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7642*4882a593Smuzhiyun  *  Description:
7643*4882a593Smuzhiyun  *  The function initializes an adapter identified by the pci_dec structure.
7644*4882a593Smuzhiyun  *  All OS related initialization including memory and device structure and
7645*4882a593Smuzhiyun  *  initlaization of the device private variable is done. Also the swapper
7646*4882a593Smuzhiyun  *  control register is initialized to enable read and write into the I/O
7647*4882a593Smuzhiyun  *  registers of the device.
7648*4882a593Smuzhiyun  *  Return value:
7649*4882a593Smuzhiyun  *  returns 0 on success and negative on failure.
7650*4882a593Smuzhiyun  */
7651*4882a593Smuzhiyun 
7652*4882a593Smuzhiyun static int
s2io_init_nic(struct pci_dev * pdev,const struct pci_device_id * pre)7653*4882a593Smuzhiyun s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7654*4882a593Smuzhiyun {
7655*4882a593Smuzhiyun 	struct s2io_nic *sp;
7656*4882a593Smuzhiyun 	struct net_device *dev;
7657*4882a593Smuzhiyun 	int i, j, ret;
7658*4882a593Smuzhiyun 	int dma_flag = false;
7659*4882a593Smuzhiyun 	u32 mac_up, mac_down;
7660*4882a593Smuzhiyun 	u64 val64 = 0, tmp64 = 0;
7661*4882a593Smuzhiyun 	struct XENA_dev_config __iomem *bar0 = NULL;
7662*4882a593Smuzhiyun 	u16 subid;
7663*4882a593Smuzhiyun 	struct config_param *config;
7664*4882a593Smuzhiyun 	struct mac_info *mac_control;
7665*4882a593Smuzhiyun 	int mode;
7666*4882a593Smuzhiyun 	u8 dev_intr_type = intr_type;
7667*4882a593Smuzhiyun 	u8 dev_multiq = 0;
7668*4882a593Smuzhiyun 
7669*4882a593Smuzhiyun 	ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7670*4882a593Smuzhiyun 	if (ret)
7671*4882a593Smuzhiyun 		return ret;
7672*4882a593Smuzhiyun 
7673*4882a593Smuzhiyun 	ret = pci_enable_device(pdev);
7674*4882a593Smuzhiyun 	if (ret) {
7675*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG,
7676*4882a593Smuzhiyun 			  "%s: pci_enable_device failed\n", __func__);
7677*4882a593Smuzhiyun 		return ret;
7678*4882a593Smuzhiyun 	}
7679*4882a593Smuzhiyun 
7680*4882a593Smuzhiyun 	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
7681*4882a593Smuzhiyun 		DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
7682*4882a593Smuzhiyun 		dma_flag = true;
7683*4882a593Smuzhiyun 		if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
7684*4882a593Smuzhiyun 			DBG_PRINT(ERR_DBG,
7685*4882a593Smuzhiyun 				  "Unable to obtain 64bit DMA for coherent allocations\n");
7686*4882a593Smuzhiyun 			pci_disable_device(pdev);
7687*4882a593Smuzhiyun 			return -ENOMEM;
7688*4882a593Smuzhiyun 		}
7689*4882a593Smuzhiyun 	} else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
7690*4882a593Smuzhiyun 		DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
7691*4882a593Smuzhiyun 	} else {
7692*4882a593Smuzhiyun 		pci_disable_device(pdev);
7693*4882a593Smuzhiyun 		return -ENOMEM;
7694*4882a593Smuzhiyun 	}
7695*4882a593Smuzhiyun 	ret = pci_request_regions(pdev, s2io_driver_name);
7696*4882a593Smuzhiyun 	if (ret) {
7697*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
7698*4882a593Smuzhiyun 			  __func__, ret);
7699*4882a593Smuzhiyun 		pci_disable_device(pdev);
7700*4882a593Smuzhiyun 		return -ENODEV;
7701*4882a593Smuzhiyun 	}
7702*4882a593Smuzhiyun 	if (dev_multiq)
7703*4882a593Smuzhiyun 		dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7704*4882a593Smuzhiyun 	else
7705*4882a593Smuzhiyun 		dev = alloc_etherdev(sizeof(struct s2io_nic));
7706*4882a593Smuzhiyun 	if (dev == NULL) {
7707*4882a593Smuzhiyun 		pci_disable_device(pdev);
7708*4882a593Smuzhiyun 		pci_release_regions(pdev);
7709*4882a593Smuzhiyun 		return -ENODEV;
7710*4882a593Smuzhiyun 	}
7711*4882a593Smuzhiyun 
7712*4882a593Smuzhiyun 	pci_set_master(pdev);
7713*4882a593Smuzhiyun 	pci_set_drvdata(pdev, dev);
7714*4882a593Smuzhiyun 	SET_NETDEV_DEV(dev, &pdev->dev);
7715*4882a593Smuzhiyun 
7716*4882a593Smuzhiyun 	/*  Private member variable initialized to s2io NIC structure */
7717*4882a593Smuzhiyun 	sp = netdev_priv(dev);
7718*4882a593Smuzhiyun 	sp->dev = dev;
7719*4882a593Smuzhiyun 	sp->pdev = pdev;
7720*4882a593Smuzhiyun 	sp->high_dma_flag = dma_flag;
7721*4882a593Smuzhiyun 	sp->device_enabled_once = false;
7722*4882a593Smuzhiyun 	if (rx_ring_mode == 1)
7723*4882a593Smuzhiyun 		sp->rxd_mode = RXD_MODE_1;
7724*4882a593Smuzhiyun 	if (rx_ring_mode == 2)
7725*4882a593Smuzhiyun 		sp->rxd_mode = RXD_MODE_3B;
7726*4882a593Smuzhiyun 
7727*4882a593Smuzhiyun 	sp->config.intr_type = dev_intr_type;
7728*4882a593Smuzhiyun 
7729*4882a593Smuzhiyun 	if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7730*4882a593Smuzhiyun 	    (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7731*4882a593Smuzhiyun 		sp->device_type = XFRAME_II_DEVICE;
7732*4882a593Smuzhiyun 	else
7733*4882a593Smuzhiyun 		sp->device_type = XFRAME_I_DEVICE;
7734*4882a593Smuzhiyun 
7735*4882a593Smuzhiyun 
7736*4882a593Smuzhiyun 	/* Initialize some PCI/PCI-X fields of the NIC. */
7737*4882a593Smuzhiyun 	s2io_init_pci(sp);
7738*4882a593Smuzhiyun 
7739*4882a593Smuzhiyun 	/*
7740*4882a593Smuzhiyun 	 * Setting the device configuration parameters.
7741*4882a593Smuzhiyun 	 * Most of these parameters can be specified by the user during
7742*4882a593Smuzhiyun 	 * module insertion as they are module loadable parameters. If
7743*4882a593Smuzhiyun 	 * these parameters are not not specified during load time, they
7744*4882a593Smuzhiyun 	 * are initialized with default values.
7745*4882a593Smuzhiyun 	 */
7746*4882a593Smuzhiyun 	config = &sp->config;
7747*4882a593Smuzhiyun 	mac_control = &sp->mac_control;
7748*4882a593Smuzhiyun 
7749*4882a593Smuzhiyun 	config->napi = napi;
7750*4882a593Smuzhiyun 	config->tx_steering_type = tx_steering_type;
7751*4882a593Smuzhiyun 
7752*4882a593Smuzhiyun 	/* Tx side parameters. */
7753*4882a593Smuzhiyun 	if (config->tx_steering_type == TX_PRIORITY_STEERING)
7754*4882a593Smuzhiyun 		config->tx_fifo_num = MAX_TX_FIFOS;
7755*4882a593Smuzhiyun 	else
7756*4882a593Smuzhiyun 		config->tx_fifo_num = tx_fifo_num;
7757*4882a593Smuzhiyun 
7758*4882a593Smuzhiyun 	/* Initialize the fifos used for tx steering */
7759*4882a593Smuzhiyun 	if (config->tx_fifo_num < 5) {
7760*4882a593Smuzhiyun 		if (config->tx_fifo_num  == 1)
7761*4882a593Smuzhiyun 			sp->total_tcp_fifos = 1;
7762*4882a593Smuzhiyun 		else
7763*4882a593Smuzhiyun 			sp->total_tcp_fifos = config->tx_fifo_num - 1;
7764*4882a593Smuzhiyun 		sp->udp_fifo_idx = config->tx_fifo_num - 1;
7765*4882a593Smuzhiyun 		sp->total_udp_fifos = 1;
7766*4882a593Smuzhiyun 		sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7767*4882a593Smuzhiyun 	} else {
7768*4882a593Smuzhiyun 		sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7769*4882a593Smuzhiyun 				       FIFO_OTHER_MAX_NUM);
7770*4882a593Smuzhiyun 		sp->udp_fifo_idx = sp->total_tcp_fifos;
7771*4882a593Smuzhiyun 		sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7772*4882a593Smuzhiyun 		sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7773*4882a593Smuzhiyun 	}
7774*4882a593Smuzhiyun 
7775*4882a593Smuzhiyun 	config->multiq = dev_multiq;
7776*4882a593Smuzhiyun 	for (i = 0; i < config->tx_fifo_num; i++) {
7777*4882a593Smuzhiyun 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7778*4882a593Smuzhiyun 
7779*4882a593Smuzhiyun 		tx_cfg->fifo_len = tx_fifo_len[i];
7780*4882a593Smuzhiyun 		tx_cfg->fifo_priority = i;
7781*4882a593Smuzhiyun 	}
7782*4882a593Smuzhiyun 
7783*4882a593Smuzhiyun 	/* mapping the QoS priority to the configured fifos */
7784*4882a593Smuzhiyun 	for (i = 0; i < MAX_TX_FIFOS; i++)
7785*4882a593Smuzhiyun 		config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7786*4882a593Smuzhiyun 
7787*4882a593Smuzhiyun 	/* map the hashing selector table to the configured fifos */
7788*4882a593Smuzhiyun 	for (i = 0; i < config->tx_fifo_num; i++)
7789*4882a593Smuzhiyun 		sp->fifo_selector[i] = fifo_selector[i];
7790*4882a593Smuzhiyun 
7791*4882a593Smuzhiyun 
7792*4882a593Smuzhiyun 	config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7793*4882a593Smuzhiyun 	for (i = 0; i < config->tx_fifo_num; i++) {
7794*4882a593Smuzhiyun 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7795*4882a593Smuzhiyun 
7796*4882a593Smuzhiyun 		tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7797*4882a593Smuzhiyun 		if (tx_cfg->fifo_len < 65) {
7798*4882a593Smuzhiyun 			config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7799*4882a593Smuzhiyun 			break;
7800*4882a593Smuzhiyun 		}
7801*4882a593Smuzhiyun 	}
7802*4882a593Smuzhiyun 	/* + 2 because one Txd for skb->data and one Txd for UFO */
7803*4882a593Smuzhiyun 	config->max_txds = MAX_SKB_FRAGS + 2;
7804*4882a593Smuzhiyun 
7805*4882a593Smuzhiyun 	/* Rx side parameters. */
7806*4882a593Smuzhiyun 	config->rx_ring_num = rx_ring_num;
7807*4882a593Smuzhiyun 	for (i = 0; i < config->rx_ring_num; i++) {
7808*4882a593Smuzhiyun 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7809*4882a593Smuzhiyun 		struct ring_info *ring = &mac_control->rings[i];
7810*4882a593Smuzhiyun 
7811*4882a593Smuzhiyun 		rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7812*4882a593Smuzhiyun 		rx_cfg->ring_priority = i;
7813*4882a593Smuzhiyun 		ring->rx_bufs_left = 0;
7814*4882a593Smuzhiyun 		ring->rxd_mode = sp->rxd_mode;
7815*4882a593Smuzhiyun 		ring->rxd_count = rxd_count[sp->rxd_mode];
7816*4882a593Smuzhiyun 		ring->pdev = sp->pdev;
7817*4882a593Smuzhiyun 		ring->dev = sp->dev;
7818*4882a593Smuzhiyun 	}
7819*4882a593Smuzhiyun 
7820*4882a593Smuzhiyun 	for (i = 0; i < rx_ring_num; i++) {
7821*4882a593Smuzhiyun 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7822*4882a593Smuzhiyun 
7823*4882a593Smuzhiyun 		rx_cfg->ring_org = RING_ORG_BUFF1;
7824*4882a593Smuzhiyun 		rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7825*4882a593Smuzhiyun 	}
7826*4882a593Smuzhiyun 
7827*4882a593Smuzhiyun 	/*  Setting Mac Control parameters */
7828*4882a593Smuzhiyun 	mac_control->rmac_pause_time = rmac_pause_time;
7829*4882a593Smuzhiyun 	mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7830*4882a593Smuzhiyun 	mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7831*4882a593Smuzhiyun 
7832*4882a593Smuzhiyun 
7833*4882a593Smuzhiyun 	/*  initialize the shared memory used by the NIC and the host */
7834*4882a593Smuzhiyun 	if (init_shared_mem(sp)) {
7835*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
7836*4882a593Smuzhiyun 		ret = -ENOMEM;
7837*4882a593Smuzhiyun 		goto mem_alloc_failed;
7838*4882a593Smuzhiyun 	}
7839*4882a593Smuzhiyun 
7840*4882a593Smuzhiyun 	sp->bar0 = pci_ioremap_bar(pdev, 0);
7841*4882a593Smuzhiyun 	if (!sp->bar0) {
7842*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7843*4882a593Smuzhiyun 			  dev->name);
7844*4882a593Smuzhiyun 		ret = -ENOMEM;
7845*4882a593Smuzhiyun 		goto bar0_remap_failed;
7846*4882a593Smuzhiyun 	}
7847*4882a593Smuzhiyun 
7848*4882a593Smuzhiyun 	sp->bar1 = pci_ioremap_bar(pdev, 2);
7849*4882a593Smuzhiyun 	if (!sp->bar1) {
7850*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7851*4882a593Smuzhiyun 			  dev->name);
7852*4882a593Smuzhiyun 		ret = -ENOMEM;
7853*4882a593Smuzhiyun 		goto bar1_remap_failed;
7854*4882a593Smuzhiyun 	}
7855*4882a593Smuzhiyun 
7856*4882a593Smuzhiyun 	/* Initializing the BAR1 address as the start of the FIFO pointer. */
7857*4882a593Smuzhiyun 	for (j = 0; j < MAX_TX_FIFOS; j++) {
7858*4882a593Smuzhiyun 		mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
7859*4882a593Smuzhiyun 	}
7860*4882a593Smuzhiyun 
7861*4882a593Smuzhiyun 	/*  Driver entry points */
7862*4882a593Smuzhiyun 	dev->netdev_ops = &s2io_netdev_ops;
7863*4882a593Smuzhiyun 	dev->ethtool_ops = &netdev_ethtool_ops;
7864*4882a593Smuzhiyun 	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7865*4882a593Smuzhiyun 		NETIF_F_TSO | NETIF_F_TSO6 |
7866*4882a593Smuzhiyun 		NETIF_F_RXCSUM | NETIF_F_LRO;
7867*4882a593Smuzhiyun 	dev->features |= dev->hw_features |
7868*4882a593Smuzhiyun 		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
7869*4882a593Smuzhiyun 	if (sp->high_dma_flag == true)
7870*4882a593Smuzhiyun 		dev->features |= NETIF_F_HIGHDMA;
7871*4882a593Smuzhiyun 	dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7872*4882a593Smuzhiyun 	INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7873*4882a593Smuzhiyun 	INIT_WORK(&sp->set_link_task, s2io_set_link);
7874*4882a593Smuzhiyun 
7875*4882a593Smuzhiyun 	pci_save_state(sp->pdev);
7876*4882a593Smuzhiyun 
7877*4882a593Smuzhiyun 	/* Setting swapper control on the NIC, for proper reset operation */
7878*4882a593Smuzhiyun 	if (s2io_set_swapper(sp)) {
7879*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
7880*4882a593Smuzhiyun 			  dev->name);
7881*4882a593Smuzhiyun 		ret = -EAGAIN;
7882*4882a593Smuzhiyun 		goto set_swap_failed;
7883*4882a593Smuzhiyun 	}
7884*4882a593Smuzhiyun 
7885*4882a593Smuzhiyun 	/* Verify if the Herc works on the slot its placed into */
7886*4882a593Smuzhiyun 	if (sp->device_type & XFRAME_II_DEVICE) {
7887*4882a593Smuzhiyun 		mode = s2io_verify_pci_mode(sp);
7888*4882a593Smuzhiyun 		if (mode < 0) {
7889*4882a593Smuzhiyun 			DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
7890*4882a593Smuzhiyun 				  __func__);
7891*4882a593Smuzhiyun 			ret = -EBADSLT;
7892*4882a593Smuzhiyun 			goto set_swap_failed;
7893*4882a593Smuzhiyun 		}
7894*4882a593Smuzhiyun 	}
7895*4882a593Smuzhiyun 
7896*4882a593Smuzhiyun 	if (sp->config.intr_type == MSI_X) {
7897*4882a593Smuzhiyun 		sp->num_entries = config->rx_ring_num + 1;
7898*4882a593Smuzhiyun 		ret = s2io_enable_msi_x(sp);
7899*4882a593Smuzhiyun 
7900*4882a593Smuzhiyun 		if (!ret) {
7901*4882a593Smuzhiyun 			ret = s2io_test_msi(sp);
7902*4882a593Smuzhiyun 			/* rollback MSI-X, will re-enable during add_isr() */
7903*4882a593Smuzhiyun 			remove_msix_isr(sp);
7904*4882a593Smuzhiyun 		}
7905*4882a593Smuzhiyun 		if (ret) {
7906*4882a593Smuzhiyun 
7907*4882a593Smuzhiyun 			DBG_PRINT(ERR_DBG,
7908*4882a593Smuzhiyun 				  "MSI-X requested but failed to enable\n");
7909*4882a593Smuzhiyun 			sp->config.intr_type = INTA;
7910*4882a593Smuzhiyun 		}
7911*4882a593Smuzhiyun 	}
7912*4882a593Smuzhiyun 
7913*4882a593Smuzhiyun 	if (config->intr_type ==  MSI_X) {
7914*4882a593Smuzhiyun 		for (i = 0; i < config->rx_ring_num ; i++) {
7915*4882a593Smuzhiyun 			struct ring_info *ring = &mac_control->rings[i];
7916*4882a593Smuzhiyun 
7917*4882a593Smuzhiyun 			netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
7918*4882a593Smuzhiyun 		}
7919*4882a593Smuzhiyun 	} else {
7920*4882a593Smuzhiyun 		netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
7921*4882a593Smuzhiyun 	}
7922*4882a593Smuzhiyun 
7923*4882a593Smuzhiyun 	/* Not needed for Herc */
7924*4882a593Smuzhiyun 	if (sp->device_type & XFRAME_I_DEVICE) {
7925*4882a593Smuzhiyun 		/*
7926*4882a593Smuzhiyun 		 * Fix for all "FFs" MAC address problems observed on
7927*4882a593Smuzhiyun 		 * Alpha platforms
7928*4882a593Smuzhiyun 		 */
7929*4882a593Smuzhiyun 		fix_mac_address(sp);
7930*4882a593Smuzhiyun 		s2io_reset(sp);
7931*4882a593Smuzhiyun 	}
7932*4882a593Smuzhiyun 
7933*4882a593Smuzhiyun 	/*
7934*4882a593Smuzhiyun 	 * MAC address initialization.
7935*4882a593Smuzhiyun 	 * For now only one mac address will be read and used.
7936*4882a593Smuzhiyun 	 */
7937*4882a593Smuzhiyun 	bar0 = sp->bar0;
7938*4882a593Smuzhiyun 	val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7939*4882a593Smuzhiyun 		RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
7940*4882a593Smuzhiyun 	writeq(val64, &bar0->rmac_addr_cmd_mem);
7941*4882a593Smuzhiyun 	wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7942*4882a593Smuzhiyun 			      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
7943*4882a593Smuzhiyun 			      S2IO_BIT_RESET);
7944*4882a593Smuzhiyun 	tmp64 = readq(&bar0->rmac_addr_data0_mem);
7945*4882a593Smuzhiyun 	mac_down = (u32)tmp64;
7946*4882a593Smuzhiyun 	mac_up = (u32) (tmp64 >> 32);
7947*4882a593Smuzhiyun 
7948*4882a593Smuzhiyun 	sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7949*4882a593Smuzhiyun 	sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7950*4882a593Smuzhiyun 	sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7951*4882a593Smuzhiyun 	sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7952*4882a593Smuzhiyun 	sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7953*4882a593Smuzhiyun 	sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7954*4882a593Smuzhiyun 
7955*4882a593Smuzhiyun 	/*  Set the factory defined MAC address initially   */
7956*4882a593Smuzhiyun 	dev->addr_len = ETH_ALEN;
7957*4882a593Smuzhiyun 	memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7958*4882a593Smuzhiyun 
7959*4882a593Smuzhiyun 	/* initialize number of multicast & unicast MAC entries variables */
7960*4882a593Smuzhiyun 	if (sp->device_type == XFRAME_I_DEVICE) {
7961*4882a593Smuzhiyun 		config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
7962*4882a593Smuzhiyun 		config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
7963*4882a593Smuzhiyun 		config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
7964*4882a593Smuzhiyun 	} else if (sp->device_type == XFRAME_II_DEVICE) {
7965*4882a593Smuzhiyun 		config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
7966*4882a593Smuzhiyun 		config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
7967*4882a593Smuzhiyun 		config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
7968*4882a593Smuzhiyun 	}
7969*4882a593Smuzhiyun 
7970*4882a593Smuzhiyun 	/* MTU range: 46 - 9600 */
7971*4882a593Smuzhiyun 	dev->min_mtu = MIN_MTU;
7972*4882a593Smuzhiyun 	dev->max_mtu = S2IO_JUMBO_SIZE;
7973*4882a593Smuzhiyun 
7974*4882a593Smuzhiyun 	/* store mac addresses from CAM to s2io_nic structure */
7975*4882a593Smuzhiyun 	do_s2io_store_unicast_mc(sp);
7976*4882a593Smuzhiyun 
7977*4882a593Smuzhiyun 	/* Configure MSIX vector for number of rings configured plus one */
7978*4882a593Smuzhiyun 	if ((sp->device_type == XFRAME_II_DEVICE) &&
7979*4882a593Smuzhiyun 	    (config->intr_type == MSI_X))
7980*4882a593Smuzhiyun 		sp->num_entries = config->rx_ring_num + 1;
7981*4882a593Smuzhiyun 
7982*4882a593Smuzhiyun 	/* Store the values of the MSIX table in the s2io_nic structure */
7983*4882a593Smuzhiyun 	store_xmsi_data(sp);
7984*4882a593Smuzhiyun 	/* reset Nic and bring it to known state */
7985*4882a593Smuzhiyun 	s2io_reset(sp);
7986*4882a593Smuzhiyun 
7987*4882a593Smuzhiyun 	/*
7988*4882a593Smuzhiyun 	 * Initialize link state flags
7989*4882a593Smuzhiyun 	 * and the card state parameter
7990*4882a593Smuzhiyun 	 */
7991*4882a593Smuzhiyun 	sp->state = 0;
7992*4882a593Smuzhiyun 
7993*4882a593Smuzhiyun 	/* Initialize spinlocks */
7994*4882a593Smuzhiyun 	for (i = 0; i < sp->config.tx_fifo_num; i++) {
7995*4882a593Smuzhiyun 		struct fifo_info *fifo = &mac_control->fifos[i];
7996*4882a593Smuzhiyun 
7997*4882a593Smuzhiyun 		spin_lock_init(&fifo->tx_lock);
7998*4882a593Smuzhiyun 	}
7999*4882a593Smuzhiyun 
8000*4882a593Smuzhiyun 	/*
8001*4882a593Smuzhiyun 	 * SXE-002: Configure link and activity LED to init state
8002*4882a593Smuzhiyun 	 * on driver load.
8003*4882a593Smuzhiyun 	 */
8004*4882a593Smuzhiyun 	subid = sp->pdev->subsystem_device;
8005*4882a593Smuzhiyun 	if ((subid & 0xFF) >= 0x07) {
8006*4882a593Smuzhiyun 		val64 = readq(&bar0->gpio_control);
8007*4882a593Smuzhiyun 		val64 |= 0x0000800000000000ULL;
8008*4882a593Smuzhiyun 		writeq(val64, &bar0->gpio_control);
8009*4882a593Smuzhiyun 		val64 = 0x0411040400000000ULL;
8010*4882a593Smuzhiyun 		writeq(val64, (void __iomem *)bar0 + 0x2700);
8011*4882a593Smuzhiyun 		val64 = readq(&bar0->gpio_control);
8012*4882a593Smuzhiyun 	}
8013*4882a593Smuzhiyun 
8014*4882a593Smuzhiyun 	sp->rx_csum = 1;	/* Rx chksum verify enabled by default */
8015*4882a593Smuzhiyun 
8016*4882a593Smuzhiyun 	if (register_netdev(dev)) {
8017*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "Device registration failed\n");
8018*4882a593Smuzhiyun 		ret = -ENODEV;
8019*4882a593Smuzhiyun 		goto register_failed;
8020*4882a593Smuzhiyun 	}
8021*4882a593Smuzhiyun 	s2io_vpd_read(sp);
8022*4882a593Smuzhiyun 	DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
8023*4882a593Smuzhiyun 	DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8024*4882a593Smuzhiyun 		  sp->product_name, pdev->revision);
8025*4882a593Smuzhiyun 	DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8026*4882a593Smuzhiyun 		  s2io_driver_version);
8027*4882a593Smuzhiyun 	DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8028*4882a593Smuzhiyun 	DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
8029*4882a593Smuzhiyun 	if (sp->device_type & XFRAME_II_DEVICE) {
8030*4882a593Smuzhiyun 		mode = s2io_print_pci_mode(sp);
8031*4882a593Smuzhiyun 		if (mode < 0) {
8032*4882a593Smuzhiyun 			ret = -EBADSLT;
8033*4882a593Smuzhiyun 			unregister_netdev(dev);
8034*4882a593Smuzhiyun 			goto set_swap_failed;
8035*4882a593Smuzhiyun 		}
8036*4882a593Smuzhiyun 	}
8037*4882a593Smuzhiyun 	switch (sp->rxd_mode) {
8038*4882a593Smuzhiyun 	case RXD_MODE_1:
8039*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8040*4882a593Smuzhiyun 			  dev->name);
8041*4882a593Smuzhiyun 		break;
8042*4882a593Smuzhiyun 	case RXD_MODE_3B:
8043*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8044*4882a593Smuzhiyun 			  dev->name);
8045*4882a593Smuzhiyun 		break;
8046*4882a593Smuzhiyun 	}
8047*4882a593Smuzhiyun 
8048*4882a593Smuzhiyun 	switch (sp->config.napi) {
8049*4882a593Smuzhiyun 	case 0:
8050*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8051*4882a593Smuzhiyun 		break;
8052*4882a593Smuzhiyun 	case 1:
8053*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8054*4882a593Smuzhiyun 		break;
8055*4882a593Smuzhiyun 	}
8056*4882a593Smuzhiyun 
8057*4882a593Smuzhiyun 	DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8058*4882a593Smuzhiyun 		  sp->config.tx_fifo_num);
8059*4882a593Smuzhiyun 
8060*4882a593Smuzhiyun 	DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8061*4882a593Smuzhiyun 		  sp->config.rx_ring_num);
8062*4882a593Smuzhiyun 
8063*4882a593Smuzhiyun 	switch (sp->config.intr_type) {
8064*4882a593Smuzhiyun 	case INTA:
8065*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8066*4882a593Smuzhiyun 		break;
8067*4882a593Smuzhiyun 	case MSI_X:
8068*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8069*4882a593Smuzhiyun 		break;
8070*4882a593Smuzhiyun 	}
8071*4882a593Smuzhiyun 	if (sp->config.multiq) {
8072*4882a593Smuzhiyun 		for (i = 0; i < sp->config.tx_fifo_num; i++) {
8073*4882a593Smuzhiyun 			struct fifo_info *fifo = &mac_control->fifos[i];
8074*4882a593Smuzhiyun 
8075*4882a593Smuzhiyun 			fifo->multiq = config->multiq;
8076*4882a593Smuzhiyun 		}
8077*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8078*4882a593Smuzhiyun 			  dev->name);
8079*4882a593Smuzhiyun 	} else
8080*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8081*4882a593Smuzhiyun 			  dev->name);
8082*4882a593Smuzhiyun 
8083*4882a593Smuzhiyun 	switch (sp->config.tx_steering_type) {
8084*4882a593Smuzhiyun 	case NO_STEERING:
8085*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8086*4882a593Smuzhiyun 			  dev->name);
8087*4882a593Smuzhiyun 		break;
8088*4882a593Smuzhiyun 	case TX_PRIORITY_STEERING:
8089*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG,
8090*4882a593Smuzhiyun 			  "%s: Priority steering enabled for transmit\n",
8091*4882a593Smuzhiyun 			  dev->name);
8092*4882a593Smuzhiyun 		break;
8093*4882a593Smuzhiyun 	case TX_DEFAULT_STEERING:
8094*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG,
8095*4882a593Smuzhiyun 			  "%s: Default steering enabled for transmit\n",
8096*4882a593Smuzhiyun 			  dev->name);
8097*4882a593Smuzhiyun 	}
8098*4882a593Smuzhiyun 
8099*4882a593Smuzhiyun 	DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8100*4882a593Smuzhiyun 		  dev->name);
8101*4882a593Smuzhiyun 	/* Initialize device name */
8102*4882a593Smuzhiyun 	snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
8103*4882a593Smuzhiyun 		 sp->product_name);
8104*4882a593Smuzhiyun 
8105*4882a593Smuzhiyun 	if (vlan_tag_strip)
8106*4882a593Smuzhiyun 		sp->vlan_strip_flag = 1;
8107*4882a593Smuzhiyun 	else
8108*4882a593Smuzhiyun 		sp->vlan_strip_flag = 0;
8109*4882a593Smuzhiyun 
8110*4882a593Smuzhiyun 	/*
8111*4882a593Smuzhiyun 	 * Make Link state as off at this point, when the Link change
8112*4882a593Smuzhiyun 	 * interrupt comes the state will be automatically changed to
8113*4882a593Smuzhiyun 	 * the right state.
8114*4882a593Smuzhiyun 	 */
8115*4882a593Smuzhiyun 	netif_carrier_off(dev);
8116*4882a593Smuzhiyun 
8117*4882a593Smuzhiyun 	return 0;
8118*4882a593Smuzhiyun 
8119*4882a593Smuzhiyun register_failed:
8120*4882a593Smuzhiyun set_swap_failed:
8121*4882a593Smuzhiyun 	iounmap(sp->bar1);
8122*4882a593Smuzhiyun bar1_remap_failed:
8123*4882a593Smuzhiyun 	iounmap(sp->bar0);
8124*4882a593Smuzhiyun bar0_remap_failed:
8125*4882a593Smuzhiyun mem_alloc_failed:
8126*4882a593Smuzhiyun 	free_shared_mem(sp);
8127*4882a593Smuzhiyun 	pci_disable_device(pdev);
8128*4882a593Smuzhiyun 	pci_release_regions(pdev);
8129*4882a593Smuzhiyun 	free_netdev(dev);
8130*4882a593Smuzhiyun 
8131*4882a593Smuzhiyun 	return ret;
8132*4882a593Smuzhiyun }
8133*4882a593Smuzhiyun 
8134*4882a593Smuzhiyun /**
8135*4882a593Smuzhiyun  * s2io_rem_nic - Free the PCI device
8136*4882a593Smuzhiyun  * @pdev: structure containing the PCI related information of the device.
8137*4882a593Smuzhiyun  * Description: This function is called by the Pci subsystem to release a
8138*4882a593Smuzhiyun  * PCI device and free up all resource held up by the device. This could
8139*4882a593Smuzhiyun  * be in response to a Hot plug event or when the driver is to be removed
8140*4882a593Smuzhiyun  * from memory.
8141*4882a593Smuzhiyun  */
8142*4882a593Smuzhiyun 
s2io_rem_nic(struct pci_dev * pdev)8143*4882a593Smuzhiyun static void s2io_rem_nic(struct pci_dev *pdev)
8144*4882a593Smuzhiyun {
8145*4882a593Smuzhiyun 	struct net_device *dev = pci_get_drvdata(pdev);
8146*4882a593Smuzhiyun 	struct s2io_nic *sp;
8147*4882a593Smuzhiyun 
8148*4882a593Smuzhiyun 	if (dev == NULL) {
8149*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8150*4882a593Smuzhiyun 		return;
8151*4882a593Smuzhiyun 	}
8152*4882a593Smuzhiyun 
8153*4882a593Smuzhiyun 	sp = netdev_priv(dev);
8154*4882a593Smuzhiyun 
8155*4882a593Smuzhiyun 	cancel_work_sync(&sp->rst_timer_task);
8156*4882a593Smuzhiyun 	cancel_work_sync(&sp->set_link_task);
8157*4882a593Smuzhiyun 
8158*4882a593Smuzhiyun 	unregister_netdev(dev);
8159*4882a593Smuzhiyun 
8160*4882a593Smuzhiyun 	free_shared_mem(sp);
8161*4882a593Smuzhiyun 	iounmap(sp->bar0);
8162*4882a593Smuzhiyun 	iounmap(sp->bar1);
8163*4882a593Smuzhiyun 	pci_release_regions(pdev);
8164*4882a593Smuzhiyun 	free_netdev(dev);
8165*4882a593Smuzhiyun 	pci_disable_device(pdev);
8166*4882a593Smuzhiyun }
8167*4882a593Smuzhiyun 
8168*4882a593Smuzhiyun module_pci_driver(s2io_driver);
8169*4882a593Smuzhiyun 
check_L2_lro_capable(u8 * buffer,struct iphdr ** ip,struct tcphdr ** tcp,struct RxD_t * rxdp,struct s2io_nic * sp)8170*4882a593Smuzhiyun static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8171*4882a593Smuzhiyun 				struct tcphdr **tcp, struct RxD_t *rxdp,
8172*4882a593Smuzhiyun 				struct s2io_nic *sp)
8173*4882a593Smuzhiyun {
8174*4882a593Smuzhiyun 	int ip_off;
8175*4882a593Smuzhiyun 	u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8176*4882a593Smuzhiyun 
8177*4882a593Smuzhiyun 	if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8178*4882a593Smuzhiyun 		DBG_PRINT(INIT_DBG,
8179*4882a593Smuzhiyun 			  "%s: Non-TCP frames not supported for LRO\n",
8180*4882a593Smuzhiyun 			  __func__);
8181*4882a593Smuzhiyun 		return -1;
8182*4882a593Smuzhiyun 	}
8183*4882a593Smuzhiyun 
8184*4882a593Smuzhiyun 	/* Checking for DIX type or DIX type with VLAN */
8185*4882a593Smuzhiyun 	if ((l2_type == 0) || (l2_type == 4)) {
8186*4882a593Smuzhiyun 		ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8187*4882a593Smuzhiyun 		/*
8188*4882a593Smuzhiyun 		 * If vlan stripping is disabled and the frame is VLAN tagged,
8189*4882a593Smuzhiyun 		 * shift the offset by the VLAN header size bytes.
8190*4882a593Smuzhiyun 		 */
8191*4882a593Smuzhiyun 		if ((!sp->vlan_strip_flag) &&
8192*4882a593Smuzhiyun 		    (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8193*4882a593Smuzhiyun 			ip_off += HEADER_VLAN_SIZE;
8194*4882a593Smuzhiyun 	} else {
8195*4882a593Smuzhiyun 		/* LLC, SNAP etc are considered non-mergeable */
8196*4882a593Smuzhiyun 		return -1;
8197*4882a593Smuzhiyun 	}
8198*4882a593Smuzhiyun 
8199*4882a593Smuzhiyun 	*ip = (struct iphdr *)(buffer + ip_off);
8200*4882a593Smuzhiyun 	ip_len = (u8)((*ip)->ihl);
8201*4882a593Smuzhiyun 	ip_len <<= 2;
8202*4882a593Smuzhiyun 	*tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8203*4882a593Smuzhiyun 
8204*4882a593Smuzhiyun 	return 0;
8205*4882a593Smuzhiyun }
8206*4882a593Smuzhiyun 
check_for_socket_match(struct lro * lro,struct iphdr * ip,struct tcphdr * tcp)8207*4882a593Smuzhiyun static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8208*4882a593Smuzhiyun 				  struct tcphdr *tcp)
8209*4882a593Smuzhiyun {
8210*4882a593Smuzhiyun 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8211*4882a593Smuzhiyun 	if ((lro->iph->saddr != ip->saddr) ||
8212*4882a593Smuzhiyun 	    (lro->iph->daddr != ip->daddr) ||
8213*4882a593Smuzhiyun 	    (lro->tcph->source != tcp->source) ||
8214*4882a593Smuzhiyun 	    (lro->tcph->dest != tcp->dest))
8215*4882a593Smuzhiyun 		return -1;
8216*4882a593Smuzhiyun 	return 0;
8217*4882a593Smuzhiyun }
8218*4882a593Smuzhiyun 
get_l4_pyld_length(struct iphdr * ip,struct tcphdr * tcp)8219*4882a593Smuzhiyun static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8220*4882a593Smuzhiyun {
8221*4882a593Smuzhiyun 	return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8222*4882a593Smuzhiyun }
8223*4882a593Smuzhiyun 
initiate_new_session(struct lro * lro,u8 * l2h,struct iphdr * ip,struct tcphdr * tcp,u32 tcp_pyld_len,u16 vlan_tag)8224*4882a593Smuzhiyun static void initiate_new_session(struct lro *lro, u8 *l2h,
8225*4882a593Smuzhiyun 				 struct iphdr *ip, struct tcphdr *tcp,
8226*4882a593Smuzhiyun 				 u32 tcp_pyld_len, u16 vlan_tag)
8227*4882a593Smuzhiyun {
8228*4882a593Smuzhiyun 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8229*4882a593Smuzhiyun 	lro->l2h = l2h;
8230*4882a593Smuzhiyun 	lro->iph = ip;
8231*4882a593Smuzhiyun 	lro->tcph = tcp;
8232*4882a593Smuzhiyun 	lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8233*4882a593Smuzhiyun 	lro->tcp_ack = tcp->ack_seq;
8234*4882a593Smuzhiyun 	lro->sg_num = 1;
8235*4882a593Smuzhiyun 	lro->total_len = ntohs(ip->tot_len);
8236*4882a593Smuzhiyun 	lro->frags_len = 0;
8237*4882a593Smuzhiyun 	lro->vlan_tag = vlan_tag;
8238*4882a593Smuzhiyun 	/*
8239*4882a593Smuzhiyun 	 * Check if we saw TCP timestamp.
8240*4882a593Smuzhiyun 	 * Other consistency checks have already been done.
8241*4882a593Smuzhiyun 	 */
8242*4882a593Smuzhiyun 	if (tcp->doff == 8) {
8243*4882a593Smuzhiyun 		__be32 *ptr;
8244*4882a593Smuzhiyun 		ptr = (__be32 *)(tcp+1);
8245*4882a593Smuzhiyun 		lro->saw_ts = 1;
8246*4882a593Smuzhiyun 		lro->cur_tsval = ntohl(*(ptr+1));
8247*4882a593Smuzhiyun 		lro->cur_tsecr = *(ptr+2);
8248*4882a593Smuzhiyun 	}
8249*4882a593Smuzhiyun 	lro->in_use = 1;
8250*4882a593Smuzhiyun }
8251*4882a593Smuzhiyun 
update_L3L4_header(struct s2io_nic * sp,struct lro * lro)8252*4882a593Smuzhiyun static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8253*4882a593Smuzhiyun {
8254*4882a593Smuzhiyun 	struct iphdr *ip = lro->iph;
8255*4882a593Smuzhiyun 	struct tcphdr *tcp = lro->tcph;
8256*4882a593Smuzhiyun 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8257*4882a593Smuzhiyun 
8258*4882a593Smuzhiyun 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8259*4882a593Smuzhiyun 
8260*4882a593Smuzhiyun 	/* Update L3 header */
8261*4882a593Smuzhiyun 	csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
8262*4882a593Smuzhiyun 	ip->tot_len = htons(lro->total_len);
8263*4882a593Smuzhiyun 
8264*4882a593Smuzhiyun 	/* Update L4 header */
8265*4882a593Smuzhiyun 	tcp->ack_seq = lro->tcp_ack;
8266*4882a593Smuzhiyun 	tcp->window = lro->window;
8267*4882a593Smuzhiyun 
8268*4882a593Smuzhiyun 	/* Update tsecr field if this session has timestamps enabled */
8269*4882a593Smuzhiyun 	if (lro->saw_ts) {
8270*4882a593Smuzhiyun 		__be32 *ptr = (__be32 *)(tcp + 1);
8271*4882a593Smuzhiyun 		*(ptr+2) = lro->cur_tsecr;
8272*4882a593Smuzhiyun 	}
8273*4882a593Smuzhiyun 
8274*4882a593Smuzhiyun 	/* Update counters required for calculation of
8275*4882a593Smuzhiyun 	 * average no. of packets aggregated.
8276*4882a593Smuzhiyun 	 */
8277*4882a593Smuzhiyun 	swstats->sum_avg_pkts_aggregated += lro->sg_num;
8278*4882a593Smuzhiyun 	swstats->num_aggregations++;
8279*4882a593Smuzhiyun }
8280*4882a593Smuzhiyun 
aggregate_new_rx(struct lro * lro,struct iphdr * ip,struct tcphdr * tcp,u32 l4_pyld)8281*4882a593Smuzhiyun static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8282*4882a593Smuzhiyun 			     struct tcphdr *tcp, u32 l4_pyld)
8283*4882a593Smuzhiyun {
8284*4882a593Smuzhiyun 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8285*4882a593Smuzhiyun 	lro->total_len += l4_pyld;
8286*4882a593Smuzhiyun 	lro->frags_len += l4_pyld;
8287*4882a593Smuzhiyun 	lro->tcp_next_seq += l4_pyld;
8288*4882a593Smuzhiyun 	lro->sg_num++;
8289*4882a593Smuzhiyun 
8290*4882a593Smuzhiyun 	/* Update ack seq no. and window ad(from this pkt) in LRO object */
8291*4882a593Smuzhiyun 	lro->tcp_ack = tcp->ack_seq;
8292*4882a593Smuzhiyun 	lro->window = tcp->window;
8293*4882a593Smuzhiyun 
8294*4882a593Smuzhiyun 	if (lro->saw_ts) {
8295*4882a593Smuzhiyun 		__be32 *ptr;
8296*4882a593Smuzhiyun 		/* Update tsecr and tsval from this packet */
8297*4882a593Smuzhiyun 		ptr = (__be32 *)(tcp+1);
8298*4882a593Smuzhiyun 		lro->cur_tsval = ntohl(*(ptr+1));
8299*4882a593Smuzhiyun 		lro->cur_tsecr = *(ptr + 2);
8300*4882a593Smuzhiyun 	}
8301*4882a593Smuzhiyun }
8302*4882a593Smuzhiyun 
verify_l3_l4_lro_capable(struct lro * l_lro,struct iphdr * ip,struct tcphdr * tcp,u32 tcp_pyld_len)8303*4882a593Smuzhiyun static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8304*4882a593Smuzhiyun 				    struct tcphdr *tcp, u32 tcp_pyld_len)
8305*4882a593Smuzhiyun {
8306*4882a593Smuzhiyun 	u8 *ptr;
8307*4882a593Smuzhiyun 
8308*4882a593Smuzhiyun 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8309*4882a593Smuzhiyun 
8310*4882a593Smuzhiyun 	if (!tcp_pyld_len) {
8311*4882a593Smuzhiyun 		/* Runt frame or a pure ack */
8312*4882a593Smuzhiyun 		return -1;
8313*4882a593Smuzhiyun 	}
8314*4882a593Smuzhiyun 
8315*4882a593Smuzhiyun 	if (ip->ihl != 5) /* IP has options */
8316*4882a593Smuzhiyun 		return -1;
8317*4882a593Smuzhiyun 
8318*4882a593Smuzhiyun 	/* If we see CE codepoint in IP header, packet is not mergeable */
8319*4882a593Smuzhiyun 	if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8320*4882a593Smuzhiyun 		return -1;
8321*4882a593Smuzhiyun 
8322*4882a593Smuzhiyun 	/* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8323*4882a593Smuzhiyun 	if (tcp->urg || tcp->psh || tcp->rst ||
8324*4882a593Smuzhiyun 	    tcp->syn || tcp->fin ||
8325*4882a593Smuzhiyun 	    tcp->ece || tcp->cwr || !tcp->ack) {
8326*4882a593Smuzhiyun 		/*
8327*4882a593Smuzhiyun 		 * Currently recognize only the ack control word and
8328*4882a593Smuzhiyun 		 * any other control field being set would result in
8329*4882a593Smuzhiyun 		 * flushing the LRO session
8330*4882a593Smuzhiyun 		 */
8331*4882a593Smuzhiyun 		return -1;
8332*4882a593Smuzhiyun 	}
8333*4882a593Smuzhiyun 
8334*4882a593Smuzhiyun 	/*
8335*4882a593Smuzhiyun 	 * Allow only one TCP timestamp option. Don't aggregate if
8336*4882a593Smuzhiyun 	 * any other options are detected.
8337*4882a593Smuzhiyun 	 */
8338*4882a593Smuzhiyun 	if (tcp->doff != 5 && tcp->doff != 8)
8339*4882a593Smuzhiyun 		return -1;
8340*4882a593Smuzhiyun 
8341*4882a593Smuzhiyun 	if (tcp->doff == 8) {
8342*4882a593Smuzhiyun 		ptr = (u8 *)(tcp + 1);
8343*4882a593Smuzhiyun 		while (*ptr == TCPOPT_NOP)
8344*4882a593Smuzhiyun 			ptr++;
8345*4882a593Smuzhiyun 		if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8346*4882a593Smuzhiyun 			return -1;
8347*4882a593Smuzhiyun 
8348*4882a593Smuzhiyun 		/* Ensure timestamp value increases monotonically */
8349*4882a593Smuzhiyun 		if (l_lro)
8350*4882a593Smuzhiyun 			if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8351*4882a593Smuzhiyun 				return -1;
8352*4882a593Smuzhiyun 
8353*4882a593Smuzhiyun 		/* timestamp echo reply should be non-zero */
8354*4882a593Smuzhiyun 		if (*((__be32 *)(ptr+6)) == 0)
8355*4882a593Smuzhiyun 			return -1;
8356*4882a593Smuzhiyun 	}
8357*4882a593Smuzhiyun 
8358*4882a593Smuzhiyun 	return 0;
8359*4882a593Smuzhiyun }
8360*4882a593Smuzhiyun 
s2io_club_tcp_session(struct ring_info * ring_data,u8 * buffer,u8 ** tcp,u32 * tcp_len,struct lro ** lro,struct RxD_t * rxdp,struct s2io_nic * sp)8361*4882a593Smuzhiyun static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8362*4882a593Smuzhiyun 				 u8 **tcp, u32 *tcp_len, struct lro **lro,
8363*4882a593Smuzhiyun 				 struct RxD_t *rxdp, struct s2io_nic *sp)
8364*4882a593Smuzhiyun {
8365*4882a593Smuzhiyun 	struct iphdr *ip;
8366*4882a593Smuzhiyun 	struct tcphdr *tcph;
8367*4882a593Smuzhiyun 	int ret = 0, i;
8368*4882a593Smuzhiyun 	u16 vlan_tag = 0;
8369*4882a593Smuzhiyun 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8370*4882a593Smuzhiyun 
8371*4882a593Smuzhiyun 	ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8372*4882a593Smuzhiyun 				   rxdp, sp);
8373*4882a593Smuzhiyun 	if (ret)
8374*4882a593Smuzhiyun 		return ret;
8375*4882a593Smuzhiyun 
8376*4882a593Smuzhiyun 	DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8377*4882a593Smuzhiyun 
8378*4882a593Smuzhiyun 	vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8379*4882a593Smuzhiyun 	tcph = (struct tcphdr *)*tcp;
8380*4882a593Smuzhiyun 	*tcp_len = get_l4_pyld_length(ip, tcph);
8381*4882a593Smuzhiyun 	for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8382*4882a593Smuzhiyun 		struct lro *l_lro = &ring_data->lro0_n[i];
8383*4882a593Smuzhiyun 		if (l_lro->in_use) {
8384*4882a593Smuzhiyun 			if (check_for_socket_match(l_lro, ip, tcph))
8385*4882a593Smuzhiyun 				continue;
8386*4882a593Smuzhiyun 			/* Sock pair matched */
8387*4882a593Smuzhiyun 			*lro = l_lro;
8388*4882a593Smuzhiyun 
8389*4882a593Smuzhiyun 			if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8390*4882a593Smuzhiyun 				DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8391*4882a593Smuzhiyun 					  "expected 0x%x, actual 0x%x\n",
8392*4882a593Smuzhiyun 					  __func__,
8393*4882a593Smuzhiyun 					  (*lro)->tcp_next_seq,
8394*4882a593Smuzhiyun 					  ntohl(tcph->seq));
8395*4882a593Smuzhiyun 
8396*4882a593Smuzhiyun 				swstats->outof_sequence_pkts++;
8397*4882a593Smuzhiyun 				ret = 2;
8398*4882a593Smuzhiyun 				break;
8399*4882a593Smuzhiyun 			}
8400*4882a593Smuzhiyun 
8401*4882a593Smuzhiyun 			if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8402*4882a593Smuzhiyun 						      *tcp_len))
8403*4882a593Smuzhiyun 				ret = 1; /* Aggregate */
8404*4882a593Smuzhiyun 			else
8405*4882a593Smuzhiyun 				ret = 2; /* Flush both */
8406*4882a593Smuzhiyun 			break;
8407*4882a593Smuzhiyun 		}
8408*4882a593Smuzhiyun 	}
8409*4882a593Smuzhiyun 
8410*4882a593Smuzhiyun 	if (ret == 0) {
8411*4882a593Smuzhiyun 		/* Before searching for available LRO objects,
8412*4882a593Smuzhiyun 		 * check if the pkt is L3/L4 aggregatable. If not
8413*4882a593Smuzhiyun 		 * don't create new LRO session. Just send this
8414*4882a593Smuzhiyun 		 * packet up.
8415*4882a593Smuzhiyun 		 */
8416*4882a593Smuzhiyun 		if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
8417*4882a593Smuzhiyun 			return 5;
8418*4882a593Smuzhiyun 
8419*4882a593Smuzhiyun 		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8420*4882a593Smuzhiyun 			struct lro *l_lro = &ring_data->lro0_n[i];
8421*4882a593Smuzhiyun 			if (!(l_lro->in_use)) {
8422*4882a593Smuzhiyun 				*lro = l_lro;
8423*4882a593Smuzhiyun 				ret = 3; /* Begin anew */
8424*4882a593Smuzhiyun 				break;
8425*4882a593Smuzhiyun 			}
8426*4882a593Smuzhiyun 		}
8427*4882a593Smuzhiyun 	}
8428*4882a593Smuzhiyun 
8429*4882a593Smuzhiyun 	if (ret == 0) { /* sessions exceeded */
8430*4882a593Smuzhiyun 		DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
8431*4882a593Smuzhiyun 			  __func__);
8432*4882a593Smuzhiyun 		*lro = NULL;
8433*4882a593Smuzhiyun 		return ret;
8434*4882a593Smuzhiyun 	}
8435*4882a593Smuzhiyun 
8436*4882a593Smuzhiyun 	switch (ret) {
8437*4882a593Smuzhiyun 	case 3:
8438*4882a593Smuzhiyun 		initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8439*4882a593Smuzhiyun 				     vlan_tag);
8440*4882a593Smuzhiyun 		break;
8441*4882a593Smuzhiyun 	case 2:
8442*4882a593Smuzhiyun 		update_L3L4_header(sp, *lro);
8443*4882a593Smuzhiyun 		break;
8444*4882a593Smuzhiyun 	case 1:
8445*4882a593Smuzhiyun 		aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8446*4882a593Smuzhiyun 		if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8447*4882a593Smuzhiyun 			update_L3L4_header(sp, *lro);
8448*4882a593Smuzhiyun 			ret = 4; /* Flush the LRO */
8449*4882a593Smuzhiyun 		}
8450*4882a593Smuzhiyun 		break;
8451*4882a593Smuzhiyun 	default:
8452*4882a593Smuzhiyun 		DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
8453*4882a593Smuzhiyun 		break;
8454*4882a593Smuzhiyun 	}
8455*4882a593Smuzhiyun 
8456*4882a593Smuzhiyun 	return ret;
8457*4882a593Smuzhiyun }
8458*4882a593Smuzhiyun 
clear_lro_session(struct lro * lro)8459*4882a593Smuzhiyun static void clear_lro_session(struct lro *lro)
8460*4882a593Smuzhiyun {
8461*4882a593Smuzhiyun 	static u16 lro_struct_size = sizeof(struct lro);
8462*4882a593Smuzhiyun 
8463*4882a593Smuzhiyun 	memset(lro, 0, lro_struct_size);
8464*4882a593Smuzhiyun }
8465*4882a593Smuzhiyun 
queue_rx_frame(struct sk_buff * skb,u16 vlan_tag)8466*4882a593Smuzhiyun static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8467*4882a593Smuzhiyun {
8468*4882a593Smuzhiyun 	struct net_device *dev = skb->dev;
8469*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(dev);
8470*4882a593Smuzhiyun 
8471*4882a593Smuzhiyun 	skb->protocol = eth_type_trans(skb, dev);
8472*4882a593Smuzhiyun 	if (vlan_tag && sp->vlan_strip_flag)
8473*4882a593Smuzhiyun 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
8474*4882a593Smuzhiyun 	if (sp->config.napi)
8475*4882a593Smuzhiyun 		netif_receive_skb(skb);
8476*4882a593Smuzhiyun 	else
8477*4882a593Smuzhiyun 		netif_rx(skb);
8478*4882a593Smuzhiyun }
8479*4882a593Smuzhiyun 
lro_append_pkt(struct s2io_nic * sp,struct lro * lro,struct sk_buff * skb,u32 tcp_len)8480*4882a593Smuzhiyun static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8481*4882a593Smuzhiyun 			   struct sk_buff *skb, u32 tcp_len)
8482*4882a593Smuzhiyun {
8483*4882a593Smuzhiyun 	struct sk_buff *first = lro->parent;
8484*4882a593Smuzhiyun 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8485*4882a593Smuzhiyun 
8486*4882a593Smuzhiyun 	first->len += tcp_len;
8487*4882a593Smuzhiyun 	first->data_len = lro->frags_len;
8488*4882a593Smuzhiyun 	skb_pull(skb, (skb->len - tcp_len));
8489*4882a593Smuzhiyun 	if (skb_shinfo(first)->frag_list)
8490*4882a593Smuzhiyun 		lro->last_frag->next = skb;
8491*4882a593Smuzhiyun 	else
8492*4882a593Smuzhiyun 		skb_shinfo(first)->frag_list = skb;
8493*4882a593Smuzhiyun 	first->truesize += skb->truesize;
8494*4882a593Smuzhiyun 	lro->last_frag = skb;
8495*4882a593Smuzhiyun 	swstats->clubbed_frms_cnt++;
8496*4882a593Smuzhiyun }
8497*4882a593Smuzhiyun 
8498*4882a593Smuzhiyun /**
8499*4882a593Smuzhiyun  * s2io_io_error_detected - called when PCI error is detected
8500*4882a593Smuzhiyun  * @pdev: Pointer to PCI device
8501*4882a593Smuzhiyun  * @state: The current pci connection state
8502*4882a593Smuzhiyun  *
8503*4882a593Smuzhiyun  * This function is called after a PCI bus error affecting
8504*4882a593Smuzhiyun  * this device has been detected.
8505*4882a593Smuzhiyun  */
s2io_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)8506*4882a593Smuzhiyun static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8507*4882a593Smuzhiyun 					       pci_channel_state_t state)
8508*4882a593Smuzhiyun {
8509*4882a593Smuzhiyun 	struct net_device *netdev = pci_get_drvdata(pdev);
8510*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(netdev);
8511*4882a593Smuzhiyun 
8512*4882a593Smuzhiyun 	netif_device_detach(netdev);
8513*4882a593Smuzhiyun 
8514*4882a593Smuzhiyun 	if (state == pci_channel_io_perm_failure)
8515*4882a593Smuzhiyun 		return PCI_ERS_RESULT_DISCONNECT;
8516*4882a593Smuzhiyun 
8517*4882a593Smuzhiyun 	if (netif_running(netdev)) {
8518*4882a593Smuzhiyun 		/* Bring down the card, while avoiding PCI I/O */
8519*4882a593Smuzhiyun 		do_s2io_card_down(sp, 0);
8520*4882a593Smuzhiyun 	}
8521*4882a593Smuzhiyun 	pci_disable_device(pdev);
8522*4882a593Smuzhiyun 
8523*4882a593Smuzhiyun 	return PCI_ERS_RESULT_NEED_RESET;
8524*4882a593Smuzhiyun }
8525*4882a593Smuzhiyun 
8526*4882a593Smuzhiyun /**
8527*4882a593Smuzhiyun  * s2io_io_slot_reset - called after the pci bus has been reset.
8528*4882a593Smuzhiyun  * @pdev: Pointer to PCI device
8529*4882a593Smuzhiyun  *
8530*4882a593Smuzhiyun  * Restart the card from scratch, as if from a cold-boot.
8531*4882a593Smuzhiyun  * At this point, the card has exprienced a hard reset,
8532*4882a593Smuzhiyun  * followed by fixups by BIOS, and has its config space
8533*4882a593Smuzhiyun  * set up identically to what it was at cold boot.
8534*4882a593Smuzhiyun  */
s2io_io_slot_reset(struct pci_dev * pdev)8535*4882a593Smuzhiyun static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8536*4882a593Smuzhiyun {
8537*4882a593Smuzhiyun 	struct net_device *netdev = pci_get_drvdata(pdev);
8538*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(netdev);
8539*4882a593Smuzhiyun 
8540*4882a593Smuzhiyun 	if (pci_enable_device(pdev)) {
8541*4882a593Smuzhiyun 		pr_err("Cannot re-enable PCI device after reset.\n");
8542*4882a593Smuzhiyun 		return PCI_ERS_RESULT_DISCONNECT;
8543*4882a593Smuzhiyun 	}
8544*4882a593Smuzhiyun 
8545*4882a593Smuzhiyun 	pci_set_master(pdev);
8546*4882a593Smuzhiyun 	s2io_reset(sp);
8547*4882a593Smuzhiyun 
8548*4882a593Smuzhiyun 	return PCI_ERS_RESULT_RECOVERED;
8549*4882a593Smuzhiyun }
8550*4882a593Smuzhiyun 
8551*4882a593Smuzhiyun /**
8552*4882a593Smuzhiyun  * s2io_io_resume - called when traffic can start flowing again.
8553*4882a593Smuzhiyun  * @pdev: Pointer to PCI device
8554*4882a593Smuzhiyun  *
8555*4882a593Smuzhiyun  * This callback is called when the error recovery driver tells
8556*4882a593Smuzhiyun  * us that its OK to resume normal operation.
8557*4882a593Smuzhiyun  */
s2io_io_resume(struct pci_dev * pdev)8558*4882a593Smuzhiyun static void s2io_io_resume(struct pci_dev *pdev)
8559*4882a593Smuzhiyun {
8560*4882a593Smuzhiyun 	struct net_device *netdev = pci_get_drvdata(pdev);
8561*4882a593Smuzhiyun 	struct s2io_nic *sp = netdev_priv(netdev);
8562*4882a593Smuzhiyun 
8563*4882a593Smuzhiyun 	if (netif_running(netdev)) {
8564*4882a593Smuzhiyun 		if (s2io_card_up(sp)) {
8565*4882a593Smuzhiyun 			pr_err("Can't bring device back up after reset.\n");
8566*4882a593Smuzhiyun 			return;
8567*4882a593Smuzhiyun 		}
8568*4882a593Smuzhiyun 
8569*4882a593Smuzhiyun 		if (do_s2io_prog_unicast(netdev, netdev->dev_addr) == FAILURE) {
8570*4882a593Smuzhiyun 			s2io_card_down(sp);
8571*4882a593Smuzhiyun 			pr_err("Can't restore mac addr after reset.\n");
8572*4882a593Smuzhiyun 			return;
8573*4882a593Smuzhiyun 		}
8574*4882a593Smuzhiyun 	}
8575*4882a593Smuzhiyun 
8576*4882a593Smuzhiyun 	netif_device_attach(netdev);
8577*4882a593Smuzhiyun 	netif_tx_wake_all_queues(netdev);
8578*4882a593Smuzhiyun }
8579