xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/intel/e1000/e1000_main.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright(c) 1999 - 2006 Intel Corporation. */
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #include "e1000.h"
5*4882a593Smuzhiyun #include <net/ip6_checksum.h>
6*4882a593Smuzhiyun #include <linux/io.h>
7*4882a593Smuzhiyun #include <linux/prefetch.h>
8*4882a593Smuzhiyun #include <linux/bitops.h>
9*4882a593Smuzhiyun #include <linux/if_vlan.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun char e1000_driver_name[] = "e1000";
12*4882a593Smuzhiyun static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
13*4882a593Smuzhiyun static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun /* e1000_pci_tbl - PCI Device ID Table
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  * Last entry must be all 0s
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  * Macro expands to...
20*4882a593Smuzhiyun  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
21*4882a593Smuzhiyun  */
22*4882a593Smuzhiyun static const struct pci_device_id e1000_pci_tbl[] = {
23*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x1000),
24*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x1001),
25*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x1004),
26*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x1008),
27*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x1009),
28*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x100C),
29*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x100D),
30*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x100E),
31*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x100F),
32*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x1010),
33*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x1011),
34*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x1012),
35*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x1013),
36*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x1014),
37*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x1015),
38*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x1016),
39*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x1017),
40*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x1018),
41*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x1019),
42*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x101A),
43*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x101D),
44*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x101E),
45*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x1026),
46*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x1027),
47*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x1028),
48*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x1075),
49*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x1076),
50*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x1077),
51*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x1078),
52*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x1079),
53*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x107A),
54*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x107B),
55*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x107C),
56*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x108A),
57*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x1099),
58*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x10B5),
59*4882a593Smuzhiyun 	INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
60*4882a593Smuzhiyun 	/* required last entry */
61*4882a593Smuzhiyun 	{0,}
62*4882a593Smuzhiyun };
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun int e1000_up(struct e1000_adapter *adapter);
67*4882a593Smuzhiyun void e1000_down(struct e1000_adapter *adapter);
68*4882a593Smuzhiyun void e1000_reinit_locked(struct e1000_adapter *adapter);
69*4882a593Smuzhiyun void e1000_reset(struct e1000_adapter *adapter);
70*4882a593Smuzhiyun int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
71*4882a593Smuzhiyun int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
72*4882a593Smuzhiyun void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
73*4882a593Smuzhiyun void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
74*4882a593Smuzhiyun static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
75*4882a593Smuzhiyun 				    struct e1000_tx_ring *txdr);
76*4882a593Smuzhiyun static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
77*4882a593Smuzhiyun 				    struct e1000_rx_ring *rxdr);
78*4882a593Smuzhiyun static void e1000_free_tx_resources(struct e1000_adapter *adapter,
79*4882a593Smuzhiyun 				    struct e1000_tx_ring *tx_ring);
80*4882a593Smuzhiyun static void e1000_free_rx_resources(struct e1000_adapter *adapter,
81*4882a593Smuzhiyun 				    struct e1000_rx_ring *rx_ring);
82*4882a593Smuzhiyun void e1000_update_stats(struct e1000_adapter *adapter);
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun static int e1000_init_module(void);
85*4882a593Smuzhiyun static void e1000_exit_module(void);
86*4882a593Smuzhiyun static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
87*4882a593Smuzhiyun static void e1000_remove(struct pci_dev *pdev);
88*4882a593Smuzhiyun static int e1000_alloc_queues(struct e1000_adapter *adapter);
89*4882a593Smuzhiyun static int e1000_sw_init(struct e1000_adapter *adapter);
90*4882a593Smuzhiyun int e1000_open(struct net_device *netdev);
91*4882a593Smuzhiyun int e1000_close(struct net_device *netdev);
92*4882a593Smuzhiyun static void e1000_configure_tx(struct e1000_adapter *adapter);
93*4882a593Smuzhiyun static void e1000_configure_rx(struct e1000_adapter *adapter);
94*4882a593Smuzhiyun static void e1000_setup_rctl(struct e1000_adapter *adapter);
95*4882a593Smuzhiyun static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
96*4882a593Smuzhiyun static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
97*4882a593Smuzhiyun static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
98*4882a593Smuzhiyun 				struct e1000_tx_ring *tx_ring);
99*4882a593Smuzhiyun static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
100*4882a593Smuzhiyun 				struct e1000_rx_ring *rx_ring);
101*4882a593Smuzhiyun static void e1000_set_rx_mode(struct net_device *netdev);
102*4882a593Smuzhiyun static void e1000_update_phy_info_task(struct work_struct *work);
103*4882a593Smuzhiyun static void e1000_watchdog(struct work_struct *work);
104*4882a593Smuzhiyun static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
105*4882a593Smuzhiyun static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
106*4882a593Smuzhiyun 				    struct net_device *netdev);
107*4882a593Smuzhiyun static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
108*4882a593Smuzhiyun static int e1000_set_mac(struct net_device *netdev, void *p);
109*4882a593Smuzhiyun static irqreturn_t e1000_intr(int irq, void *data);
110*4882a593Smuzhiyun static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
111*4882a593Smuzhiyun 			       struct e1000_tx_ring *tx_ring);
112*4882a593Smuzhiyun static int e1000_clean(struct napi_struct *napi, int budget);
113*4882a593Smuzhiyun static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
114*4882a593Smuzhiyun 			       struct e1000_rx_ring *rx_ring,
115*4882a593Smuzhiyun 			       int *work_done, int work_to_do);
116*4882a593Smuzhiyun static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
117*4882a593Smuzhiyun 				     struct e1000_rx_ring *rx_ring,
118*4882a593Smuzhiyun 				     int *work_done, int work_to_do);
e1000_alloc_dummy_rx_buffers(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int cleaned_count)119*4882a593Smuzhiyun static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
120*4882a593Smuzhiyun 					 struct e1000_rx_ring *rx_ring,
121*4882a593Smuzhiyun 					 int cleaned_count)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
125*4882a593Smuzhiyun 				   struct e1000_rx_ring *rx_ring,
126*4882a593Smuzhiyun 				   int cleaned_count);
127*4882a593Smuzhiyun static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
128*4882a593Smuzhiyun 					 struct e1000_rx_ring *rx_ring,
129*4882a593Smuzhiyun 					 int cleaned_count);
130*4882a593Smuzhiyun static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
131*4882a593Smuzhiyun static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
132*4882a593Smuzhiyun 			   int cmd);
133*4882a593Smuzhiyun static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
134*4882a593Smuzhiyun static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
135*4882a593Smuzhiyun static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue);
136*4882a593Smuzhiyun static void e1000_reset_task(struct work_struct *work);
137*4882a593Smuzhiyun static void e1000_smartspeed(struct e1000_adapter *adapter);
138*4882a593Smuzhiyun static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
139*4882a593Smuzhiyun 				       struct sk_buff *skb);
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun static bool e1000_vlan_used(struct e1000_adapter *adapter);
142*4882a593Smuzhiyun static void e1000_vlan_mode(struct net_device *netdev,
143*4882a593Smuzhiyun 			    netdev_features_t features);
144*4882a593Smuzhiyun static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
145*4882a593Smuzhiyun 				     bool filter_on);
146*4882a593Smuzhiyun static int e1000_vlan_rx_add_vid(struct net_device *netdev,
147*4882a593Smuzhiyun 				 __be16 proto, u16 vid);
148*4882a593Smuzhiyun static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
149*4882a593Smuzhiyun 				  __be16 proto, u16 vid);
150*4882a593Smuzhiyun static void e1000_restore_vlan(struct e1000_adapter *adapter);
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun static int __maybe_unused e1000_suspend(struct device *dev);
153*4882a593Smuzhiyun static int __maybe_unused e1000_resume(struct device *dev);
154*4882a593Smuzhiyun static void e1000_shutdown(struct pci_dev *pdev);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
157*4882a593Smuzhiyun /* for netdump / net console */
158*4882a593Smuzhiyun static void e1000_netpoll (struct net_device *netdev);
159*4882a593Smuzhiyun #endif
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun #define COPYBREAK_DEFAULT 256
162*4882a593Smuzhiyun static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
163*4882a593Smuzhiyun module_param(copybreak, uint, 0644);
164*4882a593Smuzhiyun MODULE_PARM_DESC(copybreak,
165*4882a593Smuzhiyun 	"Maximum size of packet that is copied to a new buffer on receive");
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
168*4882a593Smuzhiyun 						pci_channel_state_t state);
169*4882a593Smuzhiyun static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
170*4882a593Smuzhiyun static void e1000_io_resume(struct pci_dev *pdev);
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun static const struct pci_error_handlers e1000_err_handler = {
173*4882a593Smuzhiyun 	.error_detected = e1000_io_error_detected,
174*4882a593Smuzhiyun 	.slot_reset = e1000_io_slot_reset,
175*4882a593Smuzhiyun 	.resume = e1000_io_resume,
176*4882a593Smuzhiyun };
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun static struct pci_driver e1000_driver = {
181*4882a593Smuzhiyun 	.name     = e1000_driver_name,
182*4882a593Smuzhiyun 	.id_table = e1000_pci_tbl,
183*4882a593Smuzhiyun 	.probe    = e1000_probe,
184*4882a593Smuzhiyun 	.remove   = e1000_remove,
185*4882a593Smuzhiyun 	.driver = {
186*4882a593Smuzhiyun 		.pm = &e1000_pm_ops,
187*4882a593Smuzhiyun 	},
188*4882a593Smuzhiyun 	.shutdown = e1000_shutdown,
189*4882a593Smuzhiyun 	.err_handler = &e1000_err_handler
190*4882a593Smuzhiyun };
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
193*4882a593Smuzhiyun MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
194*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
197*4882a593Smuzhiyun static int debug = -1;
198*4882a593Smuzhiyun module_param(debug, int, 0);
199*4882a593Smuzhiyun MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun /**
202*4882a593Smuzhiyun  * e1000_get_hw_dev - helper function for getting netdev
203*4882a593Smuzhiyun  * @hw: pointer to HW struct
204*4882a593Smuzhiyun  *
205*4882a593Smuzhiyun  * return device used by hardware layer to print debugging information
206*4882a593Smuzhiyun  *
207*4882a593Smuzhiyun  **/
e1000_get_hw_dev(struct e1000_hw * hw)208*4882a593Smuzhiyun struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun 	struct e1000_adapter *adapter = hw->back;
211*4882a593Smuzhiyun 	return adapter->netdev;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun /**
215*4882a593Smuzhiyun  * e1000_init_module - Driver Registration Routine
216*4882a593Smuzhiyun  *
217*4882a593Smuzhiyun  * e1000_init_module is the first routine called when the driver is
218*4882a593Smuzhiyun  * loaded. All it does is register with the PCI subsystem.
219*4882a593Smuzhiyun  **/
e1000_init_module(void)220*4882a593Smuzhiyun static int __init e1000_init_module(void)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun 	int ret;
223*4882a593Smuzhiyun 	pr_info("%s\n", e1000_driver_string);
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	pr_info("%s\n", e1000_copyright);
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	ret = pci_register_driver(&e1000_driver);
228*4882a593Smuzhiyun 	if (copybreak != COPYBREAK_DEFAULT) {
229*4882a593Smuzhiyun 		if (copybreak == 0)
230*4882a593Smuzhiyun 			pr_info("copybreak disabled\n");
231*4882a593Smuzhiyun 		else
232*4882a593Smuzhiyun 			pr_info("copybreak enabled for "
233*4882a593Smuzhiyun 				   "packets <= %u bytes\n", copybreak);
234*4882a593Smuzhiyun 	}
235*4882a593Smuzhiyun 	return ret;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun module_init(e1000_init_module);
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun /**
241*4882a593Smuzhiyun  * e1000_exit_module - Driver Exit Cleanup Routine
242*4882a593Smuzhiyun  *
243*4882a593Smuzhiyun  * e1000_exit_module is called just before the driver is removed
244*4882a593Smuzhiyun  * from memory.
245*4882a593Smuzhiyun  **/
e1000_exit_module(void)246*4882a593Smuzhiyun static void __exit e1000_exit_module(void)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	pci_unregister_driver(&e1000_driver);
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun module_exit(e1000_exit_module);
252*4882a593Smuzhiyun 
e1000_request_irq(struct e1000_adapter * adapter)253*4882a593Smuzhiyun static int e1000_request_irq(struct e1000_adapter *adapter)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
256*4882a593Smuzhiyun 	irq_handler_t handler = e1000_intr;
257*4882a593Smuzhiyun 	int irq_flags = IRQF_SHARED;
258*4882a593Smuzhiyun 	int err;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
261*4882a593Smuzhiyun 			  netdev);
262*4882a593Smuzhiyun 	if (err) {
263*4882a593Smuzhiyun 		e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
264*4882a593Smuzhiyun 	}
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	return err;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun 
e1000_free_irq(struct e1000_adapter * adapter)269*4882a593Smuzhiyun static void e1000_free_irq(struct e1000_adapter *adapter)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	free_irq(adapter->pdev->irq, netdev);
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun /**
277*4882a593Smuzhiyun  * e1000_irq_disable - Mask off interrupt generation on the NIC
278*4882a593Smuzhiyun  * @adapter: board private structure
279*4882a593Smuzhiyun  **/
e1000_irq_disable(struct e1000_adapter * adapter)280*4882a593Smuzhiyun static void e1000_irq_disable(struct e1000_adapter *adapter)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	ew32(IMC, ~0);
285*4882a593Smuzhiyun 	E1000_WRITE_FLUSH();
286*4882a593Smuzhiyun 	synchronize_irq(adapter->pdev->irq);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun /**
290*4882a593Smuzhiyun  * e1000_irq_enable - Enable default interrupt generation settings
291*4882a593Smuzhiyun  * @adapter: board private structure
292*4882a593Smuzhiyun  **/
e1000_irq_enable(struct e1000_adapter * adapter)293*4882a593Smuzhiyun static void e1000_irq_enable(struct e1000_adapter *adapter)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	ew32(IMS, IMS_ENABLE_MASK);
298*4882a593Smuzhiyun 	E1000_WRITE_FLUSH();
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun 
e1000_update_mng_vlan(struct e1000_adapter * adapter)301*4882a593Smuzhiyun static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
304*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
305*4882a593Smuzhiyun 	u16 vid = hw->mng_cookie.vlan_id;
306*4882a593Smuzhiyun 	u16 old_vid = adapter->mng_vlan_id;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	if (!e1000_vlan_used(adapter))
309*4882a593Smuzhiyun 		return;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	if (!test_bit(vid, adapter->active_vlans)) {
312*4882a593Smuzhiyun 		if (hw->mng_cookie.status &
313*4882a593Smuzhiyun 		    E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
314*4882a593Smuzhiyun 			e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
315*4882a593Smuzhiyun 			adapter->mng_vlan_id = vid;
316*4882a593Smuzhiyun 		} else {
317*4882a593Smuzhiyun 			adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
318*4882a593Smuzhiyun 		}
319*4882a593Smuzhiyun 		if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
320*4882a593Smuzhiyun 		    (vid != old_vid) &&
321*4882a593Smuzhiyun 		    !test_bit(old_vid, adapter->active_vlans))
322*4882a593Smuzhiyun 			e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
323*4882a593Smuzhiyun 					       old_vid);
324*4882a593Smuzhiyun 	} else {
325*4882a593Smuzhiyun 		adapter->mng_vlan_id = vid;
326*4882a593Smuzhiyun 	}
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun 
e1000_init_manageability(struct e1000_adapter * adapter)329*4882a593Smuzhiyun static void e1000_init_manageability(struct e1000_adapter *adapter)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	if (adapter->en_mng_pt) {
334*4882a593Smuzhiyun 		u32 manc = er32(MANC);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 		/* disable hardware interception of ARP */
337*4882a593Smuzhiyun 		manc &= ~(E1000_MANC_ARP_EN);
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 		ew32(MANC, manc);
340*4882a593Smuzhiyun 	}
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun 
e1000_release_manageability(struct e1000_adapter * adapter)343*4882a593Smuzhiyun static void e1000_release_manageability(struct e1000_adapter *adapter)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	if (adapter->en_mng_pt) {
348*4882a593Smuzhiyun 		u32 manc = er32(MANC);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 		/* re-enable hardware interception of ARP */
351*4882a593Smuzhiyun 		manc |= E1000_MANC_ARP_EN;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 		ew32(MANC, manc);
354*4882a593Smuzhiyun 	}
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun /**
358*4882a593Smuzhiyun  * e1000_configure - configure the hardware for RX and TX
359*4882a593Smuzhiyun  * @adapter: private board structure
360*4882a593Smuzhiyun  **/
e1000_configure(struct e1000_adapter * adapter)361*4882a593Smuzhiyun static void e1000_configure(struct e1000_adapter *adapter)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
364*4882a593Smuzhiyun 	int i;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	e1000_set_rx_mode(netdev);
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	e1000_restore_vlan(adapter);
369*4882a593Smuzhiyun 	e1000_init_manageability(adapter);
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	e1000_configure_tx(adapter);
372*4882a593Smuzhiyun 	e1000_setup_rctl(adapter);
373*4882a593Smuzhiyun 	e1000_configure_rx(adapter);
374*4882a593Smuzhiyun 	/* call E1000_DESC_UNUSED which always leaves
375*4882a593Smuzhiyun 	 * at least 1 descriptor unused to make sure
376*4882a593Smuzhiyun 	 * next_to_use != next_to_clean
377*4882a593Smuzhiyun 	 */
378*4882a593Smuzhiyun 	for (i = 0; i < adapter->num_rx_queues; i++) {
379*4882a593Smuzhiyun 		struct e1000_rx_ring *ring = &adapter->rx_ring[i];
380*4882a593Smuzhiyun 		adapter->alloc_rx_buf(adapter, ring,
381*4882a593Smuzhiyun 				      E1000_DESC_UNUSED(ring));
382*4882a593Smuzhiyun 	}
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun 
e1000_up(struct e1000_adapter * adapter)385*4882a593Smuzhiyun int e1000_up(struct e1000_adapter *adapter)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	/* hardware has been reset, we need to reload some things */
390*4882a593Smuzhiyun 	e1000_configure(adapter);
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	clear_bit(__E1000_DOWN, &adapter->flags);
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	napi_enable(&adapter->napi);
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	e1000_irq_enable(adapter);
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	netif_wake_queue(adapter->netdev);
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	/* fire a link change interrupt to start the watchdog */
401*4882a593Smuzhiyun 	ew32(ICS, E1000_ICS_LSC);
402*4882a593Smuzhiyun 	return 0;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun /**
406*4882a593Smuzhiyun  * e1000_power_up_phy - restore link in case the phy was powered down
407*4882a593Smuzhiyun  * @adapter: address of board private structure
408*4882a593Smuzhiyun  *
409*4882a593Smuzhiyun  * The phy may be powered down to save power and turn off link when the
410*4882a593Smuzhiyun  * driver is unloaded and wake on lan is not enabled (among others)
411*4882a593Smuzhiyun  * *** this routine MUST be followed by a call to e1000_reset ***
412*4882a593Smuzhiyun  **/
e1000_power_up_phy(struct e1000_adapter * adapter)413*4882a593Smuzhiyun void e1000_power_up_phy(struct e1000_adapter *adapter)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
416*4882a593Smuzhiyun 	u16 mii_reg = 0;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	/* Just clear the power down bit to wake the phy back up */
419*4882a593Smuzhiyun 	if (hw->media_type == e1000_media_type_copper) {
420*4882a593Smuzhiyun 		/* according to the manual, the phy will retain its
421*4882a593Smuzhiyun 		 * settings across a power-down/up cycle
422*4882a593Smuzhiyun 		 */
423*4882a593Smuzhiyun 		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
424*4882a593Smuzhiyun 		mii_reg &= ~MII_CR_POWER_DOWN;
425*4882a593Smuzhiyun 		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
426*4882a593Smuzhiyun 	}
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun 
e1000_power_down_phy(struct e1000_adapter * adapter)429*4882a593Smuzhiyun static void e1000_power_down_phy(struct e1000_adapter *adapter)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	/* Power down the PHY so no link is implied when interface is down *
434*4882a593Smuzhiyun 	 * The PHY cannot be powered down if any of the following is true *
435*4882a593Smuzhiyun 	 * (a) WoL is enabled
436*4882a593Smuzhiyun 	 * (b) AMT is active
437*4882a593Smuzhiyun 	 * (c) SoL/IDER session is active
438*4882a593Smuzhiyun 	 */
439*4882a593Smuzhiyun 	if (!adapter->wol && hw->mac_type >= e1000_82540 &&
440*4882a593Smuzhiyun 	   hw->media_type == e1000_media_type_copper) {
441*4882a593Smuzhiyun 		u16 mii_reg = 0;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 		switch (hw->mac_type) {
444*4882a593Smuzhiyun 		case e1000_82540:
445*4882a593Smuzhiyun 		case e1000_82545:
446*4882a593Smuzhiyun 		case e1000_82545_rev_3:
447*4882a593Smuzhiyun 		case e1000_82546:
448*4882a593Smuzhiyun 		case e1000_ce4100:
449*4882a593Smuzhiyun 		case e1000_82546_rev_3:
450*4882a593Smuzhiyun 		case e1000_82541:
451*4882a593Smuzhiyun 		case e1000_82541_rev_2:
452*4882a593Smuzhiyun 		case e1000_82547:
453*4882a593Smuzhiyun 		case e1000_82547_rev_2:
454*4882a593Smuzhiyun 			if (er32(MANC) & E1000_MANC_SMBUS_EN)
455*4882a593Smuzhiyun 				goto out;
456*4882a593Smuzhiyun 			break;
457*4882a593Smuzhiyun 		default:
458*4882a593Smuzhiyun 			goto out;
459*4882a593Smuzhiyun 		}
460*4882a593Smuzhiyun 		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
461*4882a593Smuzhiyun 		mii_reg |= MII_CR_POWER_DOWN;
462*4882a593Smuzhiyun 		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
463*4882a593Smuzhiyun 		msleep(1);
464*4882a593Smuzhiyun 	}
465*4882a593Smuzhiyun out:
466*4882a593Smuzhiyun 	return;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun 
e1000_down_and_stop(struct e1000_adapter * adapter)469*4882a593Smuzhiyun static void e1000_down_and_stop(struct e1000_adapter *adapter)
470*4882a593Smuzhiyun {
471*4882a593Smuzhiyun 	set_bit(__E1000_DOWN, &adapter->flags);
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	cancel_delayed_work_sync(&adapter->watchdog_task);
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	/*
476*4882a593Smuzhiyun 	 * Since the watchdog task can reschedule other tasks, we should cancel
477*4882a593Smuzhiyun 	 * it first, otherwise we can run into the situation when a work is
478*4882a593Smuzhiyun 	 * still running after the adapter has been turned down.
479*4882a593Smuzhiyun 	 */
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	cancel_delayed_work_sync(&adapter->phy_info_task);
482*4882a593Smuzhiyun 	cancel_delayed_work_sync(&adapter->fifo_stall_task);
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	/* Only kill reset task if adapter is not resetting */
485*4882a593Smuzhiyun 	if (!test_bit(__E1000_RESETTING, &adapter->flags))
486*4882a593Smuzhiyun 		cancel_work_sync(&adapter->reset_task);
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun 
e1000_down(struct e1000_adapter * adapter)489*4882a593Smuzhiyun void e1000_down(struct e1000_adapter *adapter)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
492*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
493*4882a593Smuzhiyun 	u32 rctl, tctl;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	/* disable receives in the hardware */
496*4882a593Smuzhiyun 	rctl = er32(RCTL);
497*4882a593Smuzhiyun 	ew32(RCTL, rctl & ~E1000_RCTL_EN);
498*4882a593Smuzhiyun 	/* flush and sleep below */
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	netif_tx_disable(netdev);
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	/* disable transmits in the hardware */
503*4882a593Smuzhiyun 	tctl = er32(TCTL);
504*4882a593Smuzhiyun 	tctl &= ~E1000_TCTL_EN;
505*4882a593Smuzhiyun 	ew32(TCTL, tctl);
506*4882a593Smuzhiyun 	/* flush both disables and wait for them to finish */
507*4882a593Smuzhiyun 	E1000_WRITE_FLUSH();
508*4882a593Smuzhiyun 	msleep(10);
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	/* Set the carrier off after transmits have been disabled in the
511*4882a593Smuzhiyun 	 * hardware, to avoid race conditions with e1000_watchdog() (which
512*4882a593Smuzhiyun 	 * may be running concurrently to us, checking for the carrier
513*4882a593Smuzhiyun 	 * bit to decide whether it should enable transmits again). Such
514*4882a593Smuzhiyun 	 * a race condition would result into transmission being disabled
515*4882a593Smuzhiyun 	 * in the hardware until the next IFF_DOWN+IFF_UP cycle.
516*4882a593Smuzhiyun 	 */
517*4882a593Smuzhiyun 	netif_carrier_off(netdev);
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	napi_disable(&adapter->napi);
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	e1000_irq_disable(adapter);
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	/* Setting DOWN must be after irq_disable to prevent
524*4882a593Smuzhiyun 	 * a screaming interrupt.  Setting DOWN also prevents
525*4882a593Smuzhiyun 	 * tasks from rescheduling.
526*4882a593Smuzhiyun 	 */
527*4882a593Smuzhiyun 	e1000_down_and_stop(adapter);
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	adapter->link_speed = 0;
530*4882a593Smuzhiyun 	adapter->link_duplex = 0;
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	e1000_reset(adapter);
533*4882a593Smuzhiyun 	e1000_clean_all_tx_rings(adapter);
534*4882a593Smuzhiyun 	e1000_clean_all_rx_rings(adapter);
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun 
e1000_reinit_locked(struct e1000_adapter * adapter)537*4882a593Smuzhiyun void e1000_reinit_locked(struct e1000_adapter *adapter)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
540*4882a593Smuzhiyun 		msleep(1);
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	/* only run the task if not already down */
543*4882a593Smuzhiyun 	if (!test_bit(__E1000_DOWN, &adapter->flags)) {
544*4882a593Smuzhiyun 		e1000_down(adapter);
545*4882a593Smuzhiyun 		e1000_up(adapter);
546*4882a593Smuzhiyun 	}
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	clear_bit(__E1000_RESETTING, &adapter->flags);
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun 
e1000_reset(struct e1000_adapter * adapter)551*4882a593Smuzhiyun void e1000_reset(struct e1000_adapter *adapter)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
554*4882a593Smuzhiyun 	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
555*4882a593Smuzhiyun 	bool legacy_pba_adjust = false;
556*4882a593Smuzhiyun 	u16 hwm;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	/* Repartition Pba for greater than 9k mtu
559*4882a593Smuzhiyun 	 * To take effect CTRL.RST is required.
560*4882a593Smuzhiyun 	 */
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	switch (hw->mac_type) {
563*4882a593Smuzhiyun 	case e1000_82542_rev2_0:
564*4882a593Smuzhiyun 	case e1000_82542_rev2_1:
565*4882a593Smuzhiyun 	case e1000_82543:
566*4882a593Smuzhiyun 	case e1000_82544:
567*4882a593Smuzhiyun 	case e1000_82540:
568*4882a593Smuzhiyun 	case e1000_82541:
569*4882a593Smuzhiyun 	case e1000_82541_rev_2:
570*4882a593Smuzhiyun 		legacy_pba_adjust = true;
571*4882a593Smuzhiyun 		pba = E1000_PBA_48K;
572*4882a593Smuzhiyun 		break;
573*4882a593Smuzhiyun 	case e1000_82545:
574*4882a593Smuzhiyun 	case e1000_82545_rev_3:
575*4882a593Smuzhiyun 	case e1000_82546:
576*4882a593Smuzhiyun 	case e1000_ce4100:
577*4882a593Smuzhiyun 	case e1000_82546_rev_3:
578*4882a593Smuzhiyun 		pba = E1000_PBA_48K;
579*4882a593Smuzhiyun 		break;
580*4882a593Smuzhiyun 	case e1000_82547:
581*4882a593Smuzhiyun 	case e1000_82547_rev_2:
582*4882a593Smuzhiyun 		legacy_pba_adjust = true;
583*4882a593Smuzhiyun 		pba = E1000_PBA_30K;
584*4882a593Smuzhiyun 		break;
585*4882a593Smuzhiyun 	case e1000_undefined:
586*4882a593Smuzhiyun 	case e1000_num_macs:
587*4882a593Smuzhiyun 		break;
588*4882a593Smuzhiyun 	}
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	if (legacy_pba_adjust) {
591*4882a593Smuzhiyun 		if (hw->max_frame_size > E1000_RXBUFFER_8192)
592*4882a593Smuzhiyun 			pba -= 8; /* allocate more FIFO for Tx */
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 		if (hw->mac_type == e1000_82547) {
595*4882a593Smuzhiyun 			adapter->tx_fifo_head = 0;
596*4882a593Smuzhiyun 			adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
597*4882a593Smuzhiyun 			adapter->tx_fifo_size =
598*4882a593Smuzhiyun 				(E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
599*4882a593Smuzhiyun 			atomic_set(&adapter->tx_fifo_stall, 0);
600*4882a593Smuzhiyun 		}
601*4882a593Smuzhiyun 	} else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
602*4882a593Smuzhiyun 		/* adjust PBA for jumbo frames */
603*4882a593Smuzhiyun 		ew32(PBA, pba);
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 		/* To maintain wire speed transmits, the Tx FIFO should be
606*4882a593Smuzhiyun 		 * large enough to accommodate two full transmit packets,
607*4882a593Smuzhiyun 		 * rounded up to the next 1KB and expressed in KB.  Likewise,
608*4882a593Smuzhiyun 		 * the Rx FIFO should be large enough to accommodate at least
609*4882a593Smuzhiyun 		 * one full receive packet and is similarly rounded up and
610*4882a593Smuzhiyun 		 * expressed in KB.
611*4882a593Smuzhiyun 		 */
612*4882a593Smuzhiyun 		pba = er32(PBA);
613*4882a593Smuzhiyun 		/* upper 16 bits has Tx packet buffer allocation size in KB */
614*4882a593Smuzhiyun 		tx_space = pba >> 16;
615*4882a593Smuzhiyun 		/* lower 16 bits has Rx packet buffer allocation size in KB */
616*4882a593Smuzhiyun 		pba &= 0xffff;
617*4882a593Smuzhiyun 		/* the Tx fifo also stores 16 bytes of information about the Tx
618*4882a593Smuzhiyun 		 * but don't include ethernet FCS because hardware appends it
619*4882a593Smuzhiyun 		 */
620*4882a593Smuzhiyun 		min_tx_space = (hw->max_frame_size +
621*4882a593Smuzhiyun 				sizeof(struct e1000_tx_desc) -
622*4882a593Smuzhiyun 				ETH_FCS_LEN) * 2;
623*4882a593Smuzhiyun 		min_tx_space = ALIGN(min_tx_space, 1024);
624*4882a593Smuzhiyun 		min_tx_space >>= 10;
625*4882a593Smuzhiyun 		/* software strips receive CRC, so leave room for it */
626*4882a593Smuzhiyun 		min_rx_space = hw->max_frame_size;
627*4882a593Smuzhiyun 		min_rx_space = ALIGN(min_rx_space, 1024);
628*4882a593Smuzhiyun 		min_rx_space >>= 10;
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 		/* If current Tx allocation is less than the min Tx FIFO size,
631*4882a593Smuzhiyun 		 * and the min Tx FIFO size is less than the current Rx FIFO
632*4882a593Smuzhiyun 		 * allocation, take space away from current Rx allocation
633*4882a593Smuzhiyun 		 */
634*4882a593Smuzhiyun 		if (tx_space < min_tx_space &&
635*4882a593Smuzhiyun 		    ((min_tx_space - tx_space) < pba)) {
636*4882a593Smuzhiyun 			pba = pba - (min_tx_space - tx_space);
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 			/* PCI/PCIx hardware has PBA alignment constraints */
639*4882a593Smuzhiyun 			switch (hw->mac_type) {
640*4882a593Smuzhiyun 			case e1000_82545 ... e1000_82546_rev_3:
641*4882a593Smuzhiyun 				pba &= ~(E1000_PBA_8K - 1);
642*4882a593Smuzhiyun 				break;
643*4882a593Smuzhiyun 			default:
644*4882a593Smuzhiyun 				break;
645*4882a593Smuzhiyun 			}
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 			/* if short on Rx space, Rx wins and must trump Tx
648*4882a593Smuzhiyun 			 * adjustment or use Early Receive if available
649*4882a593Smuzhiyun 			 */
650*4882a593Smuzhiyun 			if (pba < min_rx_space)
651*4882a593Smuzhiyun 				pba = min_rx_space;
652*4882a593Smuzhiyun 		}
653*4882a593Smuzhiyun 	}
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	ew32(PBA, pba);
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	/* flow control settings:
658*4882a593Smuzhiyun 	 * The high water mark must be low enough to fit one full frame
659*4882a593Smuzhiyun 	 * (or the size used for early receive) above it in the Rx FIFO.
660*4882a593Smuzhiyun 	 * Set it to the lower of:
661*4882a593Smuzhiyun 	 * - 90% of the Rx FIFO size, and
662*4882a593Smuzhiyun 	 * - the full Rx FIFO size minus the early receive size (for parts
663*4882a593Smuzhiyun 	 *   with ERT support assuming ERT set to E1000_ERT_2048), or
664*4882a593Smuzhiyun 	 * - the full Rx FIFO size minus one full frame
665*4882a593Smuzhiyun 	 */
666*4882a593Smuzhiyun 	hwm = min(((pba << 10) * 9 / 10),
667*4882a593Smuzhiyun 		  ((pba << 10) - hw->max_frame_size));
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	hw->fc_high_water = hwm & 0xFFF8;	/* 8-byte granularity */
670*4882a593Smuzhiyun 	hw->fc_low_water = hw->fc_high_water - 8;
671*4882a593Smuzhiyun 	hw->fc_pause_time = E1000_FC_PAUSE_TIME;
672*4882a593Smuzhiyun 	hw->fc_send_xon = 1;
673*4882a593Smuzhiyun 	hw->fc = hw->original_fc;
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	/* Allow time for pending master requests to run */
676*4882a593Smuzhiyun 	e1000_reset_hw(hw);
677*4882a593Smuzhiyun 	if (hw->mac_type >= e1000_82544)
678*4882a593Smuzhiyun 		ew32(WUC, 0);
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	if (e1000_init_hw(hw))
681*4882a593Smuzhiyun 		e_dev_err("Hardware Error\n");
682*4882a593Smuzhiyun 	e1000_update_mng_vlan(adapter);
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
685*4882a593Smuzhiyun 	if (hw->mac_type >= e1000_82544 &&
686*4882a593Smuzhiyun 	    hw->autoneg == 1 &&
687*4882a593Smuzhiyun 	    hw->autoneg_advertised == ADVERTISE_1000_FULL) {
688*4882a593Smuzhiyun 		u32 ctrl = er32(CTRL);
689*4882a593Smuzhiyun 		/* clear phy power management bit if we are in gig only mode,
690*4882a593Smuzhiyun 		 * which if enabled will attempt negotiation to 100Mb, which
691*4882a593Smuzhiyun 		 * can cause a loss of link at power off or driver unload
692*4882a593Smuzhiyun 		 */
693*4882a593Smuzhiyun 		ctrl &= ~E1000_CTRL_SWDPIN3;
694*4882a593Smuzhiyun 		ew32(CTRL, ctrl);
695*4882a593Smuzhiyun 	}
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
698*4882a593Smuzhiyun 	ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	e1000_reset_adaptive(hw);
701*4882a593Smuzhiyun 	e1000_phy_get_info(hw, &adapter->phy_info);
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	e1000_release_manageability(adapter);
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun /* Dump the eeprom for users having checksum issues */
e1000_dump_eeprom(struct e1000_adapter * adapter)707*4882a593Smuzhiyun static void e1000_dump_eeprom(struct e1000_adapter *adapter)
708*4882a593Smuzhiyun {
709*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
710*4882a593Smuzhiyun 	struct ethtool_eeprom eeprom;
711*4882a593Smuzhiyun 	const struct ethtool_ops *ops = netdev->ethtool_ops;
712*4882a593Smuzhiyun 	u8 *data;
713*4882a593Smuzhiyun 	int i;
714*4882a593Smuzhiyun 	u16 csum_old, csum_new = 0;
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 	eeprom.len = ops->get_eeprom_len(netdev);
717*4882a593Smuzhiyun 	eeprom.offset = 0;
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 	data = kmalloc(eeprom.len, GFP_KERNEL);
720*4882a593Smuzhiyun 	if (!data)
721*4882a593Smuzhiyun 		return;
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	ops->get_eeprom(netdev, &eeprom, data);
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
726*4882a593Smuzhiyun 		   (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
727*4882a593Smuzhiyun 	for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
728*4882a593Smuzhiyun 		csum_new += data[i] + (data[i + 1] << 8);
729*4882a593Smuzhiyun 	csum_new = EEPROM_SUM - csum_new;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	pr_err("/*********************/\n");
732*4882a593Smuzhiyun 	pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
733*4882a593Smuzhiyun 	pr_err("Calculated              : 0x%04x\n", csum_new);
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	pr_err("Offset    Values\n");
736*4882a593Smuzhiyun 	pr_err("========  ======\n");
737*4882a593Smuzhiyun 	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 	pr_err("Include this output when contacting your support provider.\n");
740*4882a593Smuzhiyun 	pr_err("This is not a software error! Something bad happened to\n");
741*4882a593Smuzhiyun 	pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
742*4882a593Smuzhiyun 	pr_err("result in further problems, possibly loss of data,\n");
743*4882a593Smuzhiyun 	pr_err("corruption or system hangs!\n");
744*4882a593Smuzhiyun 	pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
745*4882a593Smuzhiyun 	pr_err("which is invalid and requires you to set the proper MAC\n");
746*4882a593Smuzhiyun 	pr_err("address manually before continuing to enable this network\n");
747*4882a593Smuzhiyun 	pr_err("device. Please inspect the EEPROM dump and report the\n");
748*4882a593Smuzhiyun 	pr_err("issue to your hardware vendor or Intel Customer Support.\n");
749*4882a593Smuzhiyun 	pr_err("/*********************/\n");
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	kfree(data);
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun /**
755*4882a593Smuzhiyun  * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
756*4882a593Smuzhiyun  * @pdev: PCI device information struct
757*4882a593Smuzhiyun  *
758*4882a593Smuzhiyun  * Return true if an adapter needs ioport resources
759*4882a593Smuzhiyun  **/
e1000_is_need_ioport(struct pci_dev * pdev)760*4882a593Smuzhiyun static int e1000_is_need_ioport(struct pci_dev *pdev)
761*4882a593Smuzhiyun {
762*4882a593Smuzhiyun 	switch (pdev->device) {
763*4882a593Smuzhiyun 	case E1000_DEV_ID_82540EM:
764*4882a593Smuzhiyun 	case E1000_DEV_ID_82540EM_LOM:
765*4882a593Smuzhiyun 	case E1000_DEV_ID_82540EP:
766*4882a593Smuzhiyun 	case E1000_DEV_ID_82540EP_LOM:
767*4882a593Smuzhiyun 	case E1000_DEV_ID_82540EP_LP:
768*4882a593Smuzhiyun 	case E1000_DEV_ID_82541EI:
769*4882a593Smuzhiyun 	case E1000_DEV_ID_82541EI_MOBILE:
770*4882a593Smuzhiyun 	case E1000_DEV_ID_82541ER:
771*4882a593Smuzhiyun 	case E1000_DEV_ID_82541ER_LOM:
772*4882a593Smuzhiyun 	case E1000_DEV_ID_82541GI:
773*4882a593Smuzhiyun 	case E1000_DEV_ID_82541GI_LF:
774*4882a593Smuzhiyun 	case E1000_DEV_ID_82541GI_MOBILE:
775*4882a593Smuzhiyun 	case E1000_DEV_ID_82544EI_COPPER:
776*4882a593Smuzhiyun 	case E1000_DEV_ID_82544EI_FIBER:
777*4882a593Smuzhiyun 	case E1000_DEV_ID_82544GC_COPPER:
778*4882a593Smuzhiyun 	case E1000_DEV_ID_82544GC_LOM:
779*4882a593Smuzhiyun 	case E1000_DEV_ID_82545EM_COPPER:
780*4882a593Smuzhiyun 	case E1000_DEV_ID_82545EM_FIBER:
781*4882a593Smuzhiyun 	case E1000_DEV_ID_82546EB_COPPER:
782*4882a593Smuzhiyun 	case E1000_DEV_ID_82546EB_FIBER:
783*4882a593Smuzhiyun 	case E1000_DEV_ID_82546EB_QUAD_COPPER:
784*4882a593Smuzhiyun 		return true;
785*4882a593Smuzhiyun 	default:
786*4882a593Smuzhiyun 		return false;
787*4882a593Smuzhiyun 	}
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun 
e1000_fix_features(struct net_device * netdev,netdev_features_t features)790*4882a593Smuzhiyun static netdev_features_t e1000_fix_features(struct net_device *netdev,
791*4882a593Smuzhiyun 	netdev_features_t features)
792*4882a593Smuzhiyun {
793*4882a593Smuzhiyun 	/* Since there is no support for separate Rx/Tx vlan accel
794*4882a593Smuzhiyun 	 * enable/disable make sure Tx flag is always in same state as Rx.
795*4882a593Smuzhiyun 	 */
796*4882a593Smuzhiyun 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
797*4882a593Smuzhiyun 		features |= NETIF_F_HW_VLAN_CTAG_TX;
798*4882a593Smuzhiyun 	else
799*4882a593Smuzhiyun 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	return features;
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun 
e1000_set_features(struct net_device * netdev,netdev_features_t features)804*4882a593Smuzhiyun static int e1000_set_features(struct net_device *netdev,
805*4882a593Smuzhiyun 	netdev_features_t features)
806*4882a593Smuzhiyun {
807*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
808*4882a593Smuzhiyun 	netdev_features_t changed = features ^ netdev->features;
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
811*4882a593Smuzhiyun 		e1000_vlan_mode(netdev, features);
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 	if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
814*4882a593Smuzhiyun 		return 0;
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	netdev->features = features;
817*4882a593Smuzhiyun 	adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	if (netif_running(netdev))
820*4882a593Smuzhiyun 		e1000_reinit_locked(adapter);
821*4882a593Smuzhiyun 	else
822*4882a593Smuzhiyun 		e1000_reset(adapter);
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	return 1;
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun static const struct net_device_ops e1000_netdev_ops = {
828*4882a593Smuzhiyun 	.ndo_open		= e1000_open,
829*4882a593Smuzhiyun 	.ndo_stop		= e1000_close,
830*4882a593Smuzhiyun 	.ndo_start_xmit		= e1000_xmit_frame,
831*4882a593Smuzhiyun 	.ndo_set_rx_mode	= e1000_set_rx_mode,
832*4882a593Smuzhiyun 	.ndo_set_mac_address	= e1000_set_mac,
833*4882a593Smuzhiyun 	.ndo_tx_timeout		= e1000_tx_timeout,
834*4882a593Smuzhiyun 	.ndo_change_mtu		= e1000_change_mtu,
835*4882a593Smuzhiyun 	.ndo_do_ioctl		= e1000_ioctl,
836*4882a593Smuzhiyun 	.ndo_validate_addr	= eth_validate_addr,
837*4882a593Smuzhiyun 	.ndo_vlan_rx_add_vid	= e1000_vlan_rx_add_vid,
838*4882a593Smuzhiyun 	.ndo_vlan_rx_kill_vid	= e1000_vlan_rx_kill_vid,
839*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
840*4882a593Smuzhiyun 	.ndo_poll_controller	= e1000_netpoll,
841*4882a593Smuzhiyun #endif
842*4882a593Smuzhiyun 	.ndo_fix_features	= e1000_fix_features,
843*4882a593Smuzhiyun 	.ndo_set_features	= e1000_set_features,
844*4882a593Smuzhiyun };
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun /**
847*4882a593Smuzhiyun  * e1000_init_hw_struct - initialize members of hw struct
848*4882a593Smuzhiyun  * @adapter: board private struct
849*4882a593Smuzhiyun  * @hw: structure used by e1000_hw.c
850*4882a593Smuzhiyun  *
851*4882a593Smuzhiyun  * Factors out initialization of the e1000_hw struct to its own function
852*4882a593Smuzhiyun  * that can be called very early at init (just after struct allocation).
853*4882a593Smuzhiyun  * Fields are initialized based on PCI device information and
854*4882a593Smuzhiyun  * OS network device settings (MTU size).
855*4882a593Smuzhiyun  * Returns negative error codes if MAC type setup fails.
856*4882a593Smuzhiyun  */
e1000_init_hw_struct(struct e1000_adapter * adapter,struct e1000_hw * hw)857*4882a593Smuzhiyun static int e1000_init_hw_struct(struct e1000_adapter *adapter,
858*4882a593Smuzhiyun 				struct e1000_hw *hw)
859*4882a593Smuzhiyun {
860*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun 	/* PCI config space info */
863*4882a593Smuzhiyun 	hw->vendor_id = pdev->vendor;
864*4882a593Smuzhiyun 	hw->device_id = pdev->device;
865*4882a593Smuzhiyun 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
866*4882a593Smuzhiyun 	hw->subsystem_id = pdev->subsystem_device;
867*4882a593Smuzhiyun 	hw->revision_id = pdev->revision;
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	hw->max_frame_size = adapter->netdev->mtu +
872*4882a593Smuzhiyun 			     ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
873*4882a593Smuzhiyun 	hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	/* identify the MAC */
876*4882a593Smuzhiyun 	if (e1000_set_mac_type(hw)) {
877*4882a593Smuzhiyun 		e_err(probe, "Unknown MAC Type\n");
878*4882a593Smuzhiyun 		return -EIO;
879*4882a593Smuzhiyun 	}
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	switch (hw->mac_type) {
882*4882a593Smuzhiyun 	default:
883*4882a593Smuzhiyun 		break;
884*4882a593Smuzhiyun 	case e1000_82541:
885*4882a593Smuzhiyun 	case e1000_82547:
886*4882a593Smuzhiyun 	case e1000_82541_rev_2:
887*4882a593Smuzhiyun 	case e1000_82547_rev_2:
888*4882a593Smuzhiyun 		hw->phy_init_script = 1;
889*4882a593Smuzhiyun 		break;
890*4882a593Smuzhiyun 	}
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	e1000_set_media_type(hw);
893*4882a593Smuzhiyun 	e1000_get_bus_info(hw);
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	hw->wait_autoneg_complete = false;
896*4882a593Smuzhiyun 	hw->tbi_compatibility_en = true;
897*4882a593Smuzhiyun 	hw->adaptive_ifs = true;
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	/* Copper options */
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	if (hw->media_type == e1000_media_type_copper) {
902*4882a593Smuzhiyun 		hw->mdix = AUTO_ALL_MODES;
903*4882a593Smuzhiyun 		hw->disable_polarity_correction = false;
904*4882a593Smuzhiyun 		hw->master_slave = E1000_MASTER_SLAVE;
905*4882a593Smuzhiyun 	}
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun 	return 0;
908*4882a593Smuzhiyun }
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun /**
911*4882a593Smuzhiyun  * e1000_probe - Device Initialization Routine
912*4882a593Smuzhiyun  * @pdev: PCI device information struct
913*4882a593Smuzhiyun  * @ent: entry in e1000_pci_tbl
914*4882a593Smuzhiyun  *
915*4882a593Smuzhiyun  * Returns 0 on success, negative on failure
916*4882a593Smuzhiyun  *
917*4882a593Smuzhiyun  * e1000_probe initializes an adapter identified by a pci_dev structure.
918*4882a593Smuzhiyun  * The OS initialization, configuring of the adapter private structure,
919*4882a593Smuzhiyun  * and a hardware reset occur.
920*4882a593Smuzhiyun  **/
e1000_probe(struct pci_dev * pdev,const struct pci_device_id * ent)921*4882a593Smuzhiyun static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
922*4882a593Smuzhiyun {
923*4882a593Smuzhiyun 	struct net_device *netdev;
924*4882a593Smuzhiyun 	struct e1000_adapter *adapter = NULL;
925*4882a593Smuzhiyun 	struct e1000_hw *hw;
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	static int cards_found;
928*4882a593Smuzhiyun 	static int global_quad_port_a; /* global ksp3 port a indication */
929*4882a593Smuzhiyun 	int i, err, pci_using_dac;
930*4882a593Smuzhiyun 	u16 eeprom_data = 0;
931*4882a593Smuzhiyun 	u16 tmp = 0;
932*4882a593Smuzhiyun 	u16 eeprom_apme_mask = E1000_EEPROM_APME;
933*4882a593Smuzhiyun 	int bars, need_ioport;
934*4882a593Smuzhiyun 	bool disable_dev = false;
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	/* do not allocate ioport bars when not needed */
937*4882a593Smuzhiyun 	need_ioport = e1000_is_need_ioport(pdev);
938*4882a593Smuzhiyun 	if (need_ioport) {
939*4882a593Smuzhiyun 		bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
940*4882a593Smuzhiyun 		err = pci_enable_device(pdev);
941*4882a593Smuzhiyun 	} else {
942*4882a593Smuzhiyun 		bars = pci_select_bars(pdev, IORESOURCE_MEM);
943*4882a593Smuzhiyun 		err = pci_enable_device_mem(pdev);
944*4882a593Smuzhiyun 	}
945*4882a593Smuzhiyun 	if (err)
946*4882a593Smuzhiyun 		return err;
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
949*4882a593Smuzhiyun 	if (err)
950*4882a593Smuzhiyun 		goto err_pci_reg;
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	pci_set_master(pdev);
953*4882a593Smuzhiyun 	err = pci_save_state(pdev);
954*4882a593Smuzhiyun 	if (err)
955*4882a593Smuzhiyun 		goto err_alloc_etherdev;
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	err = -ENOMEM;
958*4882a593Smuzhiyun 	netdev = alloc_etherdev(sizeof(struct e1000_adapter));
959*4882a593Smuzhiyun 	if (!netdev)
960*4882a593Smuzhiyun 		goto err_alloc_etherdev;
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	SET_NETDEV_DEV(netdev, &pdev->dev);
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 	pci_set_drvdata(pdev, netdev);
965*4882a593Smuzhiyun 	adapter = netdev_priv(netdev);
966*4882a593Smuzhiyun 	adapter->netdev = netdev;
967*4882a593Smuzhiyun 	adapter->pdev = pdev;
968*4882a593Smuzhiyun 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
969*4882a593Smuzhiyun 	adapter->bars = bars;
970*4882a593Smuzhiyun 	adapter->need_ioport = need_ioport;
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	hw = &adapter->hw;
973*4882a593Smuzhiyun 	hw->back = adapter;
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 	err = -EIO;
976*4882a593Smuzhiyun 	hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
977*4882a593Smuzhiyun 	if (!hw->hw_addr)
978*4882a593Smuzhiyun 		goto err_ioremap;
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	if (adapter->need_ioport) {
981*4882a593Smuzhiyun 		for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
982*4882a593Smuzhiyun 			if (pci_resource_len(pdev, i) == 0)
983*4882a593Smuzhiyun 				continue;
984*4882a593Smuzhiyun 			if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
985*4882a593Smuzhiyun 				hw->io_base = pci_resource_start(pdev, i);
986*4882a593Smuzhiyun 				break;
987*4882a593Smuzhiyun 			}
988*4882a593Smuzhiyun 		}
989*4882a593Smuzhiyun 	}
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	/* make ready for any if (hw->...) below */
992*4882a593Smuzhiyun 	err = e1000_init_hw_struct(adapter, hw);
993*4882a593Smuzhiyun 	if (err)
994*4882a593Smuzhiyun 		goto err_sw_init;
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	/* there is a workaround being applied below that limits
997*4882a593Smuzhiyun 	 * 64-bit DMA addresses to 64-bit hardware.  There are some
998*4882a593Smuzhiyun 	 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
999*4882a593Smuzhiyun 	 */
1000*4882a593Smuzhiyun 	pci_using_dac = 0;
1001*4882a593Smuzhiyun 	if ((hw->bus_type == e1000_bus_type_pcix) &&
1002*4882a593Smuzhiyun 	    !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1003*4882a593Smuzhiyun 		pci_using_dac = 1;
1004*4882a593Smuzhiyun 	} else {
1005*4882a593Smuzhiyun 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1006*4882a593Smuzhiyun 		if (err) {
1007*4882a593Smuzhiyun 			pr_err("No usable DMA config, aborting\n");
1008*4882a593Smuzhiyun 			goto err_dma;
1009*4882a593Smuzhiyun 		}
1010*4882a593Smuzhiyun 	}
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 	netdev->netdev_ops = &e1000_netdev_ops;
1013*4882a593Smuzhiyun 	e1000_set_ethtool_ops(netdev);
1014*4882a593Smuzhiyun 	netdev->watchdog_timeo = 5 * HZ;
1015*4882a593Smuzhiyun 	netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 	adapter->bd_number = cards_found;
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 	/* setup the private structure */
1022*4882a593Smuzhiyun 
1023*4882a593Smuzhiyun 	err = e1000_sw_init(adapter);
1024*4882a593Smuzhiyun 	if (err)
1025*4882a593Smuzhiyun 		goto err_sw_init;
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	err = -EIO;
1028*4882a593Smuzhiyun 	if (hw->mac_type == e1000_ce4100) {
1029*4882a593Smuzhiyun 		hw->ce4100_gbe_mdio_base_virt =
1030*4882a593Smuzhiyun 					ioremap(pci_resource_start(pdev, BAR_1),
1031*4882a593Smuzhiyun 						pci_resource_len(pdev, BAR_1));
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 		if (!hw->ce4100_gbe_mdio_base_virt)
1034*4882a593Smuzhiyun 			goto err_mdio_ioremap;
1035*4882a593Smuzhiyun 	}
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	if (hw->mac_type >= e1000_82543) {
1038*4882a593Smuzhiyun 		netdev->hw_features = NETIF_F_SG |
1039*4882a593Smuzhiyun 				   NETIF_F_HW_CSUM |
1040*4882a593Smuzhiyun 				   NETIF_F_HW_VLAN_CTAG_RX;
1041*4882a593Smuzhiyun 		netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1042*4882a593Smuzhiyun 				   NETIF_F_HW_VLAN_CTAG_FILTER;
1043*4882a593Smuzhiyun 	}
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	if ((hw->mac_type >= e1000_82544) &&
1046*4882a593Smuzhiyun 	   (hw->mac_type != e1000_82547))
1047*4882a593Smuzhiyun 		netdev->hw_features |= NETIF_F_TSO;
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 	netdev->priv_flags |= IFF_SUPP_NOFCS;
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun 	netdev->features |= netdev->hw_features;
1052*4882a593Smuzhiyun 	netdev->hw_features |= (NETIF_F_RXCSUM |
1053*4882a593Smuzhiyun 				NETIF_F_RXALL |
1054*4882a593Smuzhiyun 				NETIF_F_RXFCS);
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun 	if (pci_using_dac) {
1057*4882a593Smuzhiyun 		netdev->features |= NETIF_F_HIGHDMA;
1058*4882a593Smuzhiyun 		netdev->vlan_features |= NETIF_F_HIGHDMA;
1059*4882a593Smuzhiyun 	}
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 	netdev->vlan_features |= (NETIF_F_TSO |
1062*4882a593Smuzhiyun 				  NETIF_F_HW_CSUM |
1063*4882a593Smuzhiyun 				  NETIF_F_SG);
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun 	/* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1066*4882a593Smuzhiyun 	if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1067*4882a593Smuzhiyun 	    hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1068*4882a593Smuzhiyun 		netdev->priv_flags |= IFF_UNICAST_FLT;
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 	/* MTU range: 46 - 16110 */
1071*4882a593Smuzhiyun 	netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
1072*4882a593Smuzhiyun 	netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun 	adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1075*4882a593Smuzhiyun 
1076*4882a593Smuzhiyun 	/* initialize eeprom parameters */
1077*4882a593Smuzhiyun 	if (e1000_init_eeprom_params(hw)) {
1078*4882a593Smuzhiyun 		e_err(probe, "EEPROM initialization failed\n");
1079*4882a593Smuzhiyun 		goto err_eeprom;
1080*4882a593Smuzhiyun 	}
1081*4882a593Smuzhiyun 
1082*4882a593Smuzhiyun 	/* before reading the EEPROM, reset the controller to
1083*4882a593Smuzhiyun 	 * put the device in a known good starting state
1084*4882a593Smuzhiyun 	 */
1085*4882a593Smuzhiyun 
1086*4882a593Smuzhiyun 	e1000_reset_hw(hw);
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 	/* make sure the EEPROM is good */
1089*4882a593Smuzhiyun 	if (e1000_validate_eeprom_checksum(hw) < 0) {
1090*4882a593Smuzhiyun 		e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1091*4882a593Smuzhiyun 		e1000_dump_eeprom(adapter);
1092*4882a593Smuzhiyun 		/* set MAC address to all zeroes to invalidate and temporary
1093*4882a593Smuzhiyun 		 * disable this device for the user. This blocks regular
1094*4882a593Smuzhiyun 		 * traffic while still permitting ethtool ioctls from reaching
1095*4882a593Smuzhiyun 		 * the hardware as well as allowing the user to run the
1096*4882a593Smuzhiyun 		 * interface after manually setting a hw addr using
1097*4882a593Smuzhiyun 		 * `ip set address`
1098*4882a593Smuzhiyun 		 */
1099*4882a593Smuzhiyun 		memset(hw->mac_addr, 0, netdev->addr_len);
1100*4882a593Smuzhiyun 	} else {
1101*4882a593Smuzhiyun 		/* copy the MAC address out of the EEPROM */
1102*4882a593Smuzhiyun 		if (e1000_read_mac_addr(hw))
1103*4882a593Smuzhiyun 			e_err(probe, "EEPROM Read Error\n");
1104*4882a593Smuzhiyun 	}
1105*4882a593Smuzhiyun 	/* don't block initialization here due to bad MAC address */
1106*4882a593Smuzhiyun 	memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 	if (!is_valid_ether_addr(netdev->dev_addr))
1109*4882a593Smuzhiyun 		e_err(probe, "Invalid MAC Address\n");
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1113*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1114*4882a593Smuzhiyun 			  e1000_82547_tx_fifo_stall_task);
1115*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1116*4882a593Smuzhiyun 	INIT_WORK(&adapter->reset_task, e1000_reset_task);
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun 	e1000_check_options(adapter);
1119*4882a593Smuzhiyun 
1120*4882a593Smuzhiyun 	/* Initial Wake on LAN setting
1121*4882a593Smuzhiyun 	 * If APM wake is enabled in the EEPROM,
1122*4882a593Smuzhiyun 	 * enable the ACPI Magic Packet filter
1123*4882a593Smuzhiyun 	 */
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 	switch (hw->mac_type) {
1126*4882a593Smuzhiyun 	case e1000_82542_rev2_0:
1127*4882a593Smuzhiyun 	case e1000_82542_rev2_1:
1128*4882a593Smuzhiyun 	case e1000_82543:
1129*4882a593Smuzhiyun 		break;
1130*4882a593Smuzhiyun 	case e1000_82544:
1131*4882a593Smuzhiyun 		e1000_read_eeprom(hw,
1132*4882a593Smuzhiyun 			EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1133*4882a593Smuzhiyun 		eeprom_apme_mask = E1000_EEPROM_82544_APM;
1134*4882a593Smuzhiyun 		break;
1135*4882a593Smuzhiyun 	case e1000_82546:
1136*4882a593Smuzhiyun 	case e1000_82546_rev_3:
1137*4882a593Smuzhiyun 		if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1138*4882a593Smuzhiyun 			e1000_read_eeprom(hw,
1139*4882a593Smuzhiyun 				EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1140*4882a593Smuzhiyun 			break;
1141*4882a593Smuzhiyun 		}
1142*4882a593Smuzhiyun 		fallthrough;
1143*4882a593Smuzhiyun 	default:
1144*4882a593Smuzhiyun 		e1000_read_eeprom(hw,
1145*4882a593Smuzhiyun 			EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1146*4882a593Smuzhiyun 		break;
1147*4882a593Smuzhiyun 	}
1148*4882a593Smuzhiyun 	if (eeprom_data & eeprom_apme_mask)
1149*4882a593Smuzhiyun 		adapter->eeprom_wol |= E1000_WUFC_MAG;
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun 	/* now that we have the eeprom settings, apply the special cases
1152*4882a593Smuzhiyun 	 * where the eeprom may be wrong or the board simply won't support
1153*4882a593Smuzhiyun 	 * wake on lan on a particular port
1154*4882a593Smuzhiyun 	 */
1155*4882a593Smuzhiyun 	switch (pdev->device) {
1156*4882a593Smuzhiyun 	case E1000_DEV_ID_82546GB_PCIE:
1157*4882a593Smuzhiyun 		adapter->eeprom_wol = 0;
1158*4882a593Smuzhiyun 		break;
1159*4882a593Smuzhiyun 	case E1000_DEV_ID_82546EB_FIBER:
1160*4882a593Smuzhiyun 	case E1000_DEV_ID_82546GB_FIBER:
1161*4882a593Smuzhiyun 		/* Wake events only supported on port A for dual fiber
1162*4882a593Smuzhiyun 		 * regardless of eeprom setting
1163*4882a593Smuzhiyun 		 */
1164*4882a593Smuzhiyun 		if (er32(STATUS) & E1000_STATUS_FUNC_1)
1165*4882a593Smuzhiyun 			adapter->eeprom_wol = 0;
1166*4882a593Smuzhiyun 		break;
1167*4882a593Smuzhiyun 	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1168*4882a593Smuzhiyun 		/* if quad port adapter, disable WoL on all but port A */
1169*4882a593Smuzhiyun 		if (global_quad_port_a != 0)
1170*4882a593Smuzhiyun 			adapter->eeprom_wol = 0;
1171*4882a593Smuzhiyun 		else
1172*4882a593Smuzhiyun 			adapter->quad_port_a = true;
1173*4882a593Smuzhiyun 		/* Reset for multiple quad port adapters */
1174*4882a593Smuzhiyun 		if (++global_quad_port_a == 4)
1175*4882a593Smuzhiyun 			global_quad_port_a = 0;
1176*4882a593Smuzhiyun 		break;
1177*4882a593Smuzhiyun 	}
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun 	/* initialize the wol settings based on the eeprom settings */
1180*4882a593Smuzhiyun 	adapter->wol = adapter->eeprom_wol;
1181*4882a593Smuzhiyun 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1182*4882a593Smuzhiyun 
1183*4882a593Smuzhiyun 	/* Auto detect PHY address */
1184*4882a593Smuzhiyun 	if (hw->mac_type == e1000_ce4100) {
1185*4882a593Smuzhiyun 		for (i = 0; i < 32; i++) {
1186*4882a593Smuzhiyun 			hw->phy_addr = i;
1187*4882a593Smuzhiyun 			e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1188*4882a593Smuzhiyun 
1189*4882a593Smuzhiyun 			if (tmp != 0 && tmp != 0xFF)
1190*4882a593Smuzhiyun 				break;
1191*4882a593Smuzhiyun 		}
1192*4882a593Smuzhiyun 
1193*4882a593Smuzhiyun 		if (i >= 32)
1194*4882a593Smuzhiyun 			goto err_eeprom;
1195*4882a593Smuzhiyun 	}
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 	/* reset the hardware with the new settings */
1198*4882a593Smuzhiyun 	e1000_reset(adapter);
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun 	strcpy(netdev->name, "eth%d");
1201*4882a593Smuzhiyun 	err = register_netdev(netdev);
1202*4882a593Smuzhiyun 	if (err)
1203*4882a593Smuzhiyun 		goto err_register;
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 	e1000_vlan_filter_on_off(adapter, false);
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	/* print bus type/speed/width info */
1208*4882a593Smuzhiyun 	e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1209*4882a593Smuzhiyun 	       ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1210*4882a593Smuzhiyun 	       ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1211*4882a593Smuzhiyun 		(hw->bus_speed == e1000_bus_speed_120) ? 120 :
1212*4882a593Smuzhiyun 		(hw->bus_speed == e1000_bus_speed_100) ? 100 :
1213*4882a593Smuzhiyun 		(hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1214*4882a593Smuzhiyun 	       ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1215*4882a593Smuzhiyun 	       netdev->dev_addr);
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 	/* carrier off reporting is important to ethtool even BEFORE open */
1218*4882a593Smuzhiyun 	netif_carrier_off(netdev);
1219*4882a593Smuzhiyun 
1220*4882a593Smuzhiyun 	e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1221*4882a593Smuzhiyun 
1222*4882a593Smuzhiyun 	cards_found++;
1223*4882a593Smuzhiyun 	return 0;
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun err_register:
1226*4882a593Smuzhiyun err_eeprom:
1227*4882a593Smuzhiyun 	e1000_phy_hw_reset(hw);
1228*4882a593Smuzhiyun 
1229*4882a593Smuzhiyun 	if (hw->flash_address)
1230*4882a593Smuzhiyun 		iounmap(hw->flash_address);
1231*4882a593Smuzhiyun 	kfree(adapter->tx_ring);
1232*4882a593Smuzhiyun 	kfree(adapter->rx_ring);
1233*4882a593Smuzhiyun err_dma:
1234*4882a593Smuzhiyun err_sw_init:
1235*4882a593Smuzhiyun err_mdio_ioremap:
1236*4882a593Smuzhiyun 	iounmap(hw->ce4100_gbe_mdio_base_virt);
1237*4882a593Smuzhiyun 	iounmap(hw->hw_addr);
1238*4882a593Smuzhiyun err_ioremap:
1239*4882a593Smuzhiyun 	disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1240*4882a593Smuzhiyun 	free_netdev(netdev);
1241*4882a593Smuzhiyun err_alloc_etherdev:
1242*4882a593Smuzhiyun 	pci_release_selected_regions(pdev, bars);
1243*4882a593Smuzhiyun err_pci_reg:
1244*4882a593Smuzhiyun 	if (!adapter || disable_dev)
1245*4882a593Smuzhiyun 		pci_disable_device(pdev);
1246*4882a593Smuzhiyun 	return err;
1247*4882a593Smuzhiyun }
1248*4882a593Smuzhiyun 
1249*4882a593Smuzhiyun /**
1250*4882a593Smuzhiyun  * e1000_remove - Device Removal Routine
1251*4882a593Smuzhiyun  * @pdev: PCI device information struct
1252*4882a593Smuzhiyun  *
1253*4882a593Smuzhiyun  * e1000_remove is called by the PCI subsystem to alert the driver
1254*4882a593Smuzhiyun  * that it should release a PCI device. That could be caused by a
1255*4882a593Smuzhiyun  * Hot-Plug event, or because the driver is going to be removed from
1256*4882a593Smuzhiyun  * memory.
1257*4882a593Smuzhiyun  **/
e1000_remove(struct pci_dev * pdev)1258*4882a593Smuzhiyun static void e1000_remove(struct pci_dev *pdev)
1259*4882a593Smuzhiyun {
1260*4882a593Smuzhiyun 	struct net_device *netdev = pci_get_drvdata(pdev);
1261*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
1262*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
1263*4882a593Smuzhiyun 	bool disable_dev;
1264*4882a593Smuzhiyun 
1265*4882a593Smuzhiyun 	e1000_down_and_stop(adapter);
1266*4882a593Smuzhiyun 	e1000_release_manageability(adapter);
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun 	unregister_netdev(netdev);
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun 	e1000_phy_hw_reset(hw);
1271*4882a593Smuzhiyun 
1272*4882a593Smuzhiyun 	kfree(adapter->tx_ring);
1273*4882a593Smuzhiyun 	kfree(adapter->rx_ring);
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun 	if (hw->mac_type == e1000_ce4100)
1276*4882a593Smuzhiyun 		iounmap(hw->ce4100_gbe_mdio_base_virt);
1277*4882a593Smuzhiyun 	iounmap(hw->hw_addr);
1278*4882a593Smuzhiyun 	if (hw->flash_address)
1279*4882a593Smuzhiyun 		iounmap(hw->flash_address);
1280*4882a593Smuzhiyun 	pci_release_selected_regions(pdev, adapter->bars);
1281*4882a593Smuzhiyun 
1282*4882a593Smuzhiyun 	disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1283*4882a593Smuzhiyun 	free_netdev(netdev);
1284*4882a593Smuzhiyun 
1285*4882a593Smuzhiyun 	if (disable_dev)
1286*4882a593Smuzhiyun 		pci_disable_device(pdev);
1287*4882a593Smuzhiyun }
1288*4882a593Smuzhiyun 
1289*4882a593Smuzhiyun /**
1290*4882a593Smuzhiyun  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1291*4882a593Smuzhiyun  * @adapter: board private structure to initialize
1292*4882a593Smuzhiyun  *
1293*4882a593Smuzhiyun  * e1000_sw_init initializes the Adapter private data structure.
1294*4882a593Smuzhiyun  * e1000_init_hw_struct MUST be called before this function
1295*4882a593Smuzhiyun  **/
e1000_sw_init(struct e1000_adapter * adapter)1296*4882a593Smuzhiyun static int e1000_sw_init(struct e1000_adapter *adapter)
1297*4882a593Smuzhiyun {
1298*4882a593Smuzhiyun 	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1299*4882a593Smuzhiyun 
1300*4882a593Smuzhiyun 	adapter->num_tx_queues = 1;
1301*4882a593Smuzhiyun 	adapter->num_rx_queues = 1;
1302*4882a593Smuzhiyun 
1303*4882a593Smuzhiyun 	if (e1000_alloc_queues(adapter)) {
1304*4882a593Smuzhiyun 		e_err(probe, "Unable to allocate memory for queues\n");
1305*4882a593Smuzhiyun 		return -ENOMEM;
1306*4882a593Smuzhiyun 	}
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 	/* Explicitly disable IRQ since the NIC can be in any state. */
1309*4882a593Smuzhiyun 	e1000_irq_disable(adapter);
1310*4882a593Smuzhiyun 
1311*4882a593Smuzhiyun 	spin_lock_init(&adapter->stats_lock);
1312*4882a593Smuzhiyun 
1313*4882a593Smuzhiyun 	set_bit(__E1000_DOWN, &adapter->flags);
1314*4882a593Smuzhiyun 
1315*4882a593Smuzhiyun 	return 0;
1316*4882a593Smuzhiyun }
1317*4882a593Smuzhiyun 
1318*4882a593Smuzhiyun /**
1319*4882a593Smuzhiyun  * e1000_alloc_queues - Allocate memory for all rings
1320*4882a593Smuzhiyun  * @adapter: board private structure to initialize
1321*4882a593Smuzhiyun  *
1322*4882a593Smuzhiyun  * We allocate one ring per queue at run-time since we don't know the
1323*4882a593Smuzhiyun  * number of queues at compile-time.
1324*4882a593Smuzhiyun  **/
e1000_alloc_queues(struct e1000_adapter * adapter)1325*4882a593Smuzhiyun static int e1000_alloc_queues(struct e1000_adapter *adapter)
1326*4882a593Smuzhiyun {
1327*4882a593Smuzhiyun 	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1328*4882a593Smuzhiyun 				   sizeof(struct e1000_tx_ring), GFP_KERNEL);
1329*4882a593Smuzhiyun 	if (!adapter->tx_ring)
1330*4882a593Smuzhiyun 		return -ENOMEM;
1331*4882a593Smuzhiyun 
1332*4882a593Smuzhiyun 	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1333*4882a593Smuzhiyun 				   sizeof(struct e1000_rx_ring), GFP_KERNEL);
1334*4882a593Smuzhiyun 	if (!adapter->rx_ring) {
1335*4882a593Smuzhiyun 		kfree(adapter->tx_ring);
1336*4882a593Smuzhiyun 		return -ENOMEM;
1337*4882a593Smuzhiyun 	}
1338*4882a593Smuzhiyun 
1339*4882a593Smuzhiyun 	return E1000_SUCCESS;
1340*4882a593Smuzhiyun }
1341*4882a593Smuzhiyun 
1342*4882a593Smuzhiyun /**
1343*4882a593Smuzhiyun  * e1000_open - Called when a network interface is made active
1344*4882a593Smuzhiyun  * @netdev: network interface device structure
1345*4882a593Smuzhiyun  *
1346*4882a593Smuzhiyun  * Returns 0 on success, negative value on failure
1347*4882a593Smuzhiyun  *
1348*4882a593Smuzhiyun  * The open entry point is called when a network interface is made
1349*4882a593Smuzhiyun  * active by the system (IFF_UP).  At this point all resources needed
1350*4882a593Smuzhiyun  * for transmit and receive operations are allocated, the interrupt
1351*4882a593Smuzhiyun  * handler is registered with the OS, the watchdog task is started,
1352*4882a593Smuzhiyun  * and the stack is notified that the interface is ready.
1353*4882a593Smuzhiyun  **/
e1000_open(struct net_device * netdev)1354*4882a593Smuzhiyun int e1000_open(struct net_device *netdev)
1355*4882a593Smuzhiyun {
1356*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
1357*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
1358*4882a593Smuzhiyun 	int err;
1359*4882a593Smuzhiyun 
1360*4882a593Smuzhiyun 	/* disallow open during test */
1361*4882a593Smuzhiyun 	if (test_bit(__E1000_TESTING, &adapter->flags))
1362*4882a593Smuzhiyun 		return -EBUSY;
1363*4882a593Smuzhiyun 
1364*4882a593Smuzhiyun 	netif_carrier_off(netdev);
1365*4882a593Smuzhiyun 
1366*4882a593Smuzhiyun 	/* allocate transmit descriptors */
1367*4882a593Smuzhiyun 	err = e1000_setup_all_tx_resources(adapter);
1368*4882a593Smuzhiyun 	if (err)
1369*4882a593Smuzhiyun 		goto err_setup_tx;
1370*4882a593Smuzhiyun 
1371*4882a593Smuzhiyun 	/* allocate receive descriptors */
1372*4882a593Smuzhiyun 	err = e1000_setup_all_rx_resources(adapter);
1373*4882a593Smuzhiyun 	if (err)
1374*4882a593Smuzhiyun 		goto err_setup_rx;
1375*4882a593Smuzhiyun 
1376*4882a593Smuzhiyun 	e1000_power_up_phy(adapter);
1377*4882a593Smuzhiyun 
1378*4882a593Smuzhiyun 	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1379*4882a593Smuzhiyun 	if ((hw->mng_cookie.status &
1380*4882a593Smuzhiyun 			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1381*4882a593Smuzhiyun 		e1000_update_mng_vlan(adapter);
1382*4882a593Smuzhiyun 	}
1383*4882a593Smuzhiyun 
1384*4882a593Smuzhiyun 	/* before we allocate an interrupt, we must be ready to handle it.
1385*4882a593Smuzhiyun 	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1386*4882a593Smuzhiyun 	 * as soon as we call pci_request_irq, so we have to setup our
1387*4882a593Smuzhiyun 	 * clean_rx handler before we do so.
1388*4882a593Smuzhiyun 	 */
1389*4882a593Smuzhiyun 	e1000_configure(adapter);
1390*4882a593Smuzhiyun 
1391*4882a593Smuzhiyun 	err = e1000_request_irq(adapter);
1392*4882a593Smuzhiyun 	if (err)
1393*4882a593Smuzhiyun 		goto err_req_irq;
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun 	/* From here on the code is the same as e1000_up() */
1396*4882a593Smuzhiyun 	clear_bit(__E1000_DOWN, &adapter->flags);
1397*4882a593Smuzhiyun 
1398*4882a593Smuzhiyun 	napi_enable(&adapter->napi);
1399*4882a593Smuzhiyun 
1400*4882a593Smuzhiyun 	e1000_irq_enable(adapter);
1401*4882a593Smuzhiyun 
1402*4882a593Smuzhiyun 	netif_start_queue(netdev);
1403*4882a593Smuzhiyun 
1404*4882a593Smuzhiyun 	/* fire a link status change interrupt to start the watchdog */
1405*4882a593Smuzhiyun 	ew32(ICS, E1000_ICS_LSC);
1406*4882a593Smuzhiyun 
1407*4882a593Smuzhiyun 	return E1000_SUCCESS;
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun err_req_irq:
1410*4882a593Smuzhiyun 	e1000_power_down_phy(adapter);
1411*4882a593Smuzhiyun 	e1000_free_all_rx_resources(adapter);
1412*4882a593Smuzhiyun err_setup_rx:
1413*4882a593Smuzhiyun 	e1000_free_all_tx_resources(adapter);
1414*4882a593Smuzhiyun err_setup_tx:
1415*4882a593Smuzhiyun 	e1000_reset(adapter);
1416*4882a593Smuzhiyun 
1417*4882a593Smuzhiyun 	return err;
1418*4882a593Smuzhiyun }
1419*4882a593Smuzhiyun 
1420*4882a593Smuzhiyun /**
1421*4882a593Smuzhiyun  * e1000_close - Disables a network interface
1422*4882a593Smuzhiyun  * @netdev: network interface device structure
1423*4882a593Smuzhiyun  *
1424*4882a593Smuzhiyun  * Returns 0, this is not allowed to fail
1425*4882a593Smuzhiyun  *
1426*4882a593Smuzhiyun  * The close entry point is called when an interface is de-activated
1427*4882a593Smuzhiyun  * by the OS.  The hardware is still under the drivers control, but
1428*4882a593Smuzhiyun  * needs to be disabled.  A global MAC reset is issued to stop the
1429*4882a593Smuzhiyun  * hardware, and all transmit and receive resources are freed.
1430*4882a593Smuzhiyun  **/
e1000_close(struct net_device * netdev)1431*4882a593Smuzhiyun int e1000_close(struct net_device *netdev)
1432*4882a593Smuzhiyun {
1433*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
1434*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
1435*4882a593Smuzhiyun 	int count = E1000_CHECK_RESET_COUNT;
1436*4882a593Smuzhiyun 
1437*4882a593Smuzhiyun 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--)
1438*4882a593Smuzhiyun 		usleep_range(10000, 20000);
1439*4882a593Smuzhiyun 
1440*4882a593Smuzhiyun 	WARN_ON(count < 0);
1441*4882a593Smuzhiyun 
1442*4882a593Smuzhiyun 	/* signal that we're down so that the reset task will no longer run */
1443*4882a593Smuzhiyun 	set_bit(__E1000_DOWN, &adapter->flags);
1444*4882a593Smuzhiyun 	clear_bit(__E1000_RESETTING, &adapter->flags);
1445*4882a593Smuzhiyun 
1446*4882a593Smuzhiyun 	e1000_down(adapter);
1447*4882a593Smuzhiyun 	e1000_power_down_phy(adapter);
1448*4882a593Smuzhiyun 	e1000_free_irq(adapter);
1449*4882a593Smuzhiyun 
1450*4882a593Smuzhiyun 	e1000_free_all_tx_resources(adapter);
1451*4882a593Smuzhiyun 	e1000_free_all_rx_resources(adapter);
1452*4882a593Smuzhiyun 
1453*4882a593Smuzhiyun 	/* kill manageability vlan ID if supported, but not if a vlan with
1454*4882a593Smuzhiyun 	 * the same ID is registered on the host OS (let 8021q kill it)
1455*4882a593Smuzhiyun 	 */
1456*4882a593Smuzhiyun 	if ((hw->mng_cookie.status &
1457*4882a593Smuzhiyun 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1458*4882a593Smuzhiyun 	    !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1459*4882a593Smuzhiyun 		e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1460*4882a593Smuzhiyun 				       adapter->mng_vlan_id);
1461*4882a593Smuzhiyun 	}
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun 	return 0;
1464*4882a593Smuzhiyun }
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun /**
1467*4882a593Smuzhiyun  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1468*4882a593Smuzhiyun  * @adapter: address of board private structure
1469*4882a593Smuzhiyun  * @start: address of beginning of memory
1470*4882a593Smuzhiyun  * @len: length of memory
1471*4882a593Smuzhiyun  **/
e1000_check_64k_bound(struct e1000_adapter * adapter,void * start,unsigned long len)1472*4882a593Smuzhiyun static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1473*4882a593Smuzhiyun 				  unsigned long len)
1474*4882a593Smuzhiyun {
1475*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
1476*4882a593Smuzhiyun 	unsigned long begin = (unsigned long)start;
1477*4882a593Smuzhiyun 	unsigned long end = begin + len;
1478*4882a593Smuzhiyun 
1479*4882a593Smuzhiyun 	/* First rev 82545 and 82546 need to not allow any memory
1480*4882a593Smuzhiyun 	 * write location to cross 64k boundary due to errata 23
1481*4882a593Smuzhiyun 	 */
1482*4882a593Smuzhiyun 	if (hw->mac_type == e1000_82545 ||
1483*4882a593Smuzhiyun 	    hw->mac_type == e1000_ce4100 ||
1484*4882a593Smuzhiyun 	    hw->mac_type == e1000_82546) {
1485*4882a593Smuzhiyun 		return ((begin ^ (end - 1)) >> 16) == 0;
1486*4882a593Smuzhiyun 	}
1487*4882a593Smuzhiyun 
1488*4882a593Smuzhiyun 	return true;
1489*4882a593Smuzhiyun }
1490*4882a593Smuzhiyun 
1491*4882a593Smuzhiyun /**
1492*4882a593Smuzhiyun  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1493*4882a593Smuzhiyun  * @adapter: board private structure
1494*4882a593Smuzhiyun  * @txdr:    tx descriptor ring (for a specific queue) to setup
1495*4882a593Smuzhiyun  *
1496*4882a593Smuzhiyun  * Return 0 on success, negative on failure
1497*4882a593Smuzhiyun  **/
e1000_setup_tx_resources(struct e1000_adapter * adapter,struct e1000_tx_ring * txdr)1498*4882a593Smuzhiyun static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1499*4882a593Smuzhiyun 				    struct e1000_tx_ring *txdr)
1500*4882a593Smuzhiyun {
1501*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
1502*4882a593Smuzhiyun 	int size;
1503*4882a593Smuzhiyun 
1504*4882a593Smuzhiyun 	size = sizeof(struct e1000_tx_buffer) * txdr->count;
1505*4882a593Smuzhiyun 	txdr->buffer_info = vzalloc(size);
1506*4882a593Smuzhiyun 	if (!txdr->buffer_info)
1507*4882a593Smuzhiyun 		return -ENOMEM;
1508*4882a593Smuzhiyun 
1509*4882a593Smuzhiyun 	/* round up to nearest 4K */
1510*4882a593Smuzhiyun 
1511*4882a593Smuzhiyun 	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1512*4882a593Smuzhiyun 	txdr->size = ALIGN(txdr->size, 4096);
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun 	txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1515*4882a593Smuzhiyun 					GFP_KERNEL);
1516*4882a593Smuzhiyun 	if (!txdr->desc) {
1517*4882a593Smuzhiyun setup_tx_desc_die:
1518*4882a593Smuzhiyun 		vfree(txdr->buffer_info);
1519*4882a593Smuzhiyun 		return -ENOMEM;
1520*4882a593Smuzhiyun 	}
1521*4882a593Smuzhiyun 
1522*4882a593Smuzhiyun 	/* Fix for errata 23, can't cross 64kB boundary */
1523*4882a593Smuzhiyun 	if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1524*4882a593Smuzhiyun 		void *olddesc = txdr->desc;
1525*4882a593Smuzhiyun 		dma_addr_t olddma = txdr->dma;
1526*4882a593Smuzhiyun 		e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1527*4882a593Smuzhiyun 		      txdr->size, txdr->desc);
1528*4882a593Smuzhiyun 		/* Try again, without freeing the previous */
1529*4882a593Smuzhiyun 		txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1530*4882a593Smuzhiyun 						&txdr->dma, GFP_KERNEL);
1531*4882a593Smuzhiyun 		/* Failed allocation, critical failure */
1532*4882a593Smuzhiyun 		if (!txdr->desc) {
1533*4882a593Smuzhiyun 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1534*4882a593Smuzhiyun 					  olddma);
1535*4882a593Smuzhiyun 			goto setup_tx_desc_die;
1536*4882a593Smuzhiyun 		}
1537*4882a593Smuzhiyun 
1538*4882a593Smuzhiyun 		if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1539*4882a593Smuzhiyun 			/* give up */
1540*4882a593Smuzhiyun 			dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1541*4882a593Smuzhiyun 					  txdr->dma);
1542*4882a593Smuzhiyun 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1543*4882a593Smuzhiyun 					  olddma);
1544*4882a593Smuzhiyun 			e_err(probe, "Unable to allocate aligned memory "
1545*4882a593Smuzhiyun 			      "for the transmit descriptor ring\n");
1546*4882a593Smuzhiyun 			vfree(txdr->buffer_info);
1547*4882a593Smuzhiyun 			return -ENOMEM;
1548*4882a593Smuzhiyun 		} else {
1549*4882a593Smuzhiyun 			/* Free old allocation, new allocation was successful */
1550*4882a593Smuzhiyun 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1551*4882a593Smuzhiyun 					  olddma);
1552*4882a593Smuzhiyun 		}
1553*4882a593Smuzhiyun 	}
1554*4882a593Smuzhiyun 	memset(txdr->desc, 0, txdr->size);
1555*4882a593Smuzhiyun 
1556*4882a593Smuzhiyun 	txdr->next_to_use = 0;
1557*4882a593Smuzhiyun 	txdr->next_to_clean = 0;
1558*4882a593Smuzhiyun 
1559*4882a593Smuzhiyun 	return 0;
1560*4882a593Smuzhiyun }
1561*4882a593Smuzhiyun 
1562*4882a593Smuzhiyun /**
1563*4882a593Smuzhiyun  * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1564*4882a593Smuzhiyun  * 				  (Descriptors) for all queues
1565*4882a593Smuzhiyun  * @adapter: board private structure
1566*4882a593Smuzhiyun  *
1567*4882a593Smuzhiyun  * Return 0 on success, negative on failure
1568*4882a593Smuzhiyun  **/
e1000_setup_all_tx_resources(struct e1000_adapter * adapter)1569*4882a593Smuzhiyun int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1570*4882a593Smuzhiyun {
1571*4882a593Smuzhiyun 	int i, err = 0;
1572*4882a593Smuzhiyun 
1573*4882a593Smuzhiyun 	for (i = 0; i < adapter->num_tx_queues; i++) {
1574*4882a593Smuzhiyun 		err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1575*4882a593Smuzhiyun 		if (err) {
1576*4882a593Smuzhiyun 			e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1577*4882a593Smuzhiyun 			for (i-- ; i >= 0; i--)
1578*4882a593Smuzhiyun 				e1000_free_tx_resources(adapter,
1579*4882a593Smuzhiyun 							&adapter->tx_ring[i]);
1580*4882a593Smuzhiyun 			break;
1581*4882a593Smuzhiyun 		}
1582*4882a593Smuzhiyun 	}
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun 	return err;
1585*4882a593Smuzhiyun }
1586*4882a593Smuzhiyun 
1587*4882a593Smuzhiyun /**
1588*4882a593Smuzhiyun  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1589*4882a593Smuzhiyun  * @adapter: board private structure
1590*4882a593Smuzhiyun  *
1591*4882a593Smuzhiyun  * Configure the Tx unit of the MAC after a reset.
1592*4882a593Smuzhiyun  **/
e1000_configure_tx(struct e1000_adapter * adapter)1593*4882a593Smuzhiyun static void e1000_configure_tx(struct e1000_adapter *adapter)
1594*4882a593Smuzhiyun {
1595*4882a593Smuzhiyun 	u64 tdba;
1596*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
1597*4882a593Smuzhiyun 	u32 tdlen, tctl, tipg;
1598*4882a593Smuzhiyun 	u32 ipgr1, ipgr2;
1599*4882a593Smuzhiyun 
1600*4882a593Smuzhiyun 	/* Setup the HW Tx Head and Tail descriptor pointers */
1601*4882a593Smuzhiyun 
1602*4882a593Smuzhiyun 	switch (adapter->num_tx_queues) {
1603*4882a593Smuzhiyun 	case 1:
1604*4882a593Smuzhiyun 	default:
1605*4882a593Smuzhiyun 		tdba = adapter->tx_ring[0].dma;
1606*4882a593Smuzhiyun 		tdlen = adapter->tx_ring[0].count *
1607*4882a593Smuzhiyun 			sizeof(struct e1000_tx_desc);
1608*4882a593Smuzhiyun 		ew32(TDLEN, tdlen);
1609*4882a593Smuzhiyun 		ew32(TDBAH, (tdba >> 32));
1610*4882a593Smuzhiyun 		ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1611*4882a593Smuzhiyun 		ew32(TDT, 0);
1612*4882a593Smuzhiyun 		ew32(TDH, 0);
1613*4882a593Smuzhiyun 		adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1614*4882a593Smuzhiyun 					   E1000_TDH : E1000_82542_TDH);
1615*4882a593Smuzhiyun 		adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1616*4882a593Smuzhiyun 					   E1000_TDT : E1000_82542_TDT);
1617*4882a593Smuzhiyun 		break;
1618*4882a593Smuzhiyun 	}
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun 	/* Set the default values for the Tx Inter Packet Gap timer */
1621*4882a593Smuzhiyun 	if ((hw->media_type == e1000_media_type_fiber ||
1622*4882a593Smuzhiyun 	     hw->media_type == e1000_media_type_internal_serdes))
1623*4882a593Smuzhiyun 		tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1624*4882a593Smuzhiyun 	else
1625*4882a593Smuzhiyun 		tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1626*4882a593Smuzhiyun 
1627*4882a593Smuzhiyun 	switch (hw->mac_type) {
1628*4882a593Smuzhiyun 	case e1000_82542_rev2_0:
1629*4882a593Smuzhiyun 	case e1000_82542_rev2_1:
1630*4882a593Smuzhiyun 		tipg = DEFAULT_82542_TIPG_IPGT;
1631*4882a593Smuzhiyun 		ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1632*4882a593Smuzhiyun 		ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1633*4882a593Smuzhiyun 		break;
1634*4882a593Smuzhiyun 	default:
1635*4882a593Smuzhiyun 		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1636*4882a593Smuzhiyun 		ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1637*4882a593Smuzhiyun 		break;
1638*4882a593Smuzhiyun 	}
1639*4882a593Smuzhiyun 	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1640*4882a593Smuzhiyun 	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1641*4882a593Smuzhiyun 	ew32(TIPG, tipg);
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun 	/* Set the Tx Interrupt Delay register */
1644*4882a593Smuzhiyun 
1645*4882a593Smuzhiyun 	ew32(TIDV, adapter->tx_int_delay);
1646*4882a593Smuzhiyun 	if (hw->mac_type >= e1000_82540)
1647*4882a593Smuzhiyun 		ew32(TADV, adapter->tx_abs_int_delay);
1648*4882a593Smuzhiyun 
1649*4882a593Smuzhiyun 	/* Program the Transmit Control Register */
1650*4882a593Smuzhiyun 
1651*4882a593Smuzhiyun 	tctl = er32(TCTL);
1652*4882a593Smuzhiyun 	tctl &= ~E1000_TCTL_CT;
1653*4882a593Smuzhiyun 	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1654*4882a593Smuzhiyun 		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1655*4882a593Smuzhiyun 
1656*4882a593Smuzhiyun 	e1000_config_collision_dist(hw);
1657*4882a593Smuzhiyun 
1658*4882a593Smuzhiyun 	/* Setup Transmit Descriptor Settings for eop descriptor */
1659*4882a593Smuzhiyun 	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1660*4882a593Smuzhiyun 
1661*4882a593Smuzhiyun 	/* only set IDE if we are delaying interrupts using the timers */
1662*4882a593Smuzhiyun 	if (adapter->tx_int_delay)
1663*4882a593Smuzhiyun 		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1664*4882a593Smuzhiyun 
1665*4882a593Smuzhiyun 	if (hw->mac_type < e1000_82543)
1666*4882a593Smuzhiyun 		adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1667*4882a593Smuzhiyun 	else
1668*4882a593Smuzhiyun 		adapter->txd_cmd |= E1000_TXD_CMD_RS;
1669*4882a593Smuzhiyun 
1670*4882a593Smuzhiyun 	/* Cache if we're 82544 running in PCI-X because we'll
1671*4882a593Smuzhiyun 	 * need this to apply a workaround later in the send path.
1672*4882a593Smuzhiyun 	 */
1673*4882a593Smuzhiyun 	if (hw->mac_type == e1000_82544 &&
1674*4882a593Smuzhiyun 	    hw->bus_type == e1000_bus_type_pcix)
1675*4882a593Smuzhiyun 		adapter->pcix_82544 = true;
1676*4882a593Smuzhiyun 
1677*4882a593Smuzhiyun 	ew32(TCTL, tctl);
1678*4882a593Smuzhiyun 
1679*4882a593Smuzhiyun }
1680*4882a593Smuzhiyun 
1681*4882a593Smuzhiyun /**
1682*4882a593Smuzhiyun  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1683*4882a593Smuzhiyun  * @adapter: board private structure
1684*4882a593Smuzhiyun  * @rxdr:    rx descriptor ring (for a specific queue) to setup
1685*4882a593Smuzhiyun  *
1686*4882a593Smuzhiyun  * Returns 0 on success, negative on failure
1687*4882a593Smuzhiyun  **/
e1000_setup_rx_resources(struct e1000_adapter * adapter,struct e1000_rx_ring * rxdr)1688*4882a593Smuzhiyun static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1689*4882a593Smuzhiyun 				    struct e1000_rx_ring *rxdr)
1690*4882a593Smuzhiyun {
1691*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
1692*4882a593Smuzhiyun 	int size, desc_len;
1693*4882a593Smuzhiyun 
1694*4882a593Smuzhiyun 	size = sizeof(struct e1000_rx_buffer) * rxdr->count;
1695*4882a593Smuzhiyun 	rxdr->buffer_info = vzalloc(size);
1696*4882a593Smuzhiyun 	if (!rxdr->buffer_info)
1697*4882a593Smuzhiyun 		return -ENOMEM;
1698*4882a593Smuzhiyun 
1699*4882a593Smuzhiyun 	desc_len = sizeof(struct e1000_rx_desc);
1700*4882a593Smuzhiyun 
1701*4882a593Smuzhiyun 	/* Round up to nearest 4K */
1702*4882a593Smuzhiyun 
1703*4882a593Smuzhiyun 	rxdr->size = rxdr->count * desc_len;
1704*4882a593Smuzhiyun 	rxdr->size = ALIGN(rxdr->size, 4096);
1705*4882a593Smuzhiyun 
1706*4882a593Smuzhiyun 	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1707*4882a593Smuzhiyun 					GFP_KERNEL);
1708*4882a593Smuzhiyun 	if (!rxdr->desc) {
1709*4882a593Smuzhiyun setup_rx_desc_die:
1710*4882a593Smuzhiyun 		vfree(rxdr->buffer_info);
1711*4882a593Smuzhiyun 		return -ENOMEM;
1712*4882a593Smuzhiyun 	}
1713*4882a593Smuzhiyun 
1714*4882a593Smuzhiyun 	/* Fix for errata 23, can't cross 64kB boundary */
1715*4882a593Smuzhiyun 	if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1716*4882a593Smuzhiyun 		void *olddesc = rxdr->desc;
1717*4882a593Smuzhiyun 		dma_addr_t olddma = rxdr->dma;
1718*4882a593Smuzhiyun 		e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1719*4882a593Smuzhiyun 		      rxdr->size, rxdr->desc);
1720*4882a593Smuzhiyun 		/* Try again, without freeing the previous */
1721*4882a593Smuzhiyun 		rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1722*4882a593Smuzhiyun 						&rxdr->dma, GFP_KERNEL);
1723*4882a593Smuzhiyun 		/* Failed allocation, critical failure */
1724*4882a593Smuzhiyun 		if (!rxdr->desc) {
1725*4882a593Smuzhiyun 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1726*4882a593Smuzhiyun 					  olddma);
1727*4882a593Smuzhiyun 			goto setup_rx_desc_die;
1728*4882a593Smuzhiyun 		}
1729*4882a593Smuzhiyun 
1730*4882a593Smuzhiyun 		if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1731*4882a593Smuzhiyun 			/* give up */
1732*4882a593Smuzhiyun 			dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1733*4882a593Smuzhiyun 					  rxdr->dma);
1734*4882a593Smuzhiyun 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1735*4882a593Smuzhiyun 					  olddma);
1736*4882a593Smuzhiyun 			e_err(probe, "Unable to allocate aligned memory for "
1737*4882a593Smuzhiyun 			      "the Rx descriptor ring\n");
1738*4882a593Smuzhiyun 			goto setup_rx_desc_die;
1739*4882a593Smuzhiyun 		} else {
1740*4882a593Smuzhiyun 			/* Free old allocation, new allocation was successful */
1741*4882a593Smuzhiyun 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1742*4882a593Smuzhiyun 					  olddma);
1743*4882a593Smuzhiyun 		}
1744*4882a593Smuzhiyun 	}
1745*4882a593Smuzhiyun 	memset(rxdr->desc, 0, rxdr->size);
1746*4882a593Smuzhiyun 
1747*4882a593Smuzhiyun 	rxdr->next_to_clean = 0;
1748*4882a593Smuzhiyun 	rxdr->next_to_use = 0;
1749*4882a593Smuzhiyun 	rxdr->rx_skb_top = NULL;
1750*4882a593Smuzhiyun 
1751*4882a593Smuzhiyun 	return 0;
1752*4882a593Smuzhiyun }
1753*4882a593Smuzhiyun 
1754*4882a593Smuzhiyun /**
1755*4882a593Smuzhiyun  * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1756*4882a593Smuzhiyun  * 				  (Descriptors) for all queues
1757*4882a593Smuzhiyun  * @adapter: board private structure
1758*4882a593Smuzhiyun  *
1759*4882a593Smuzhiyun  * Return 0 on success, negative on failure
1760*4882a593Smuzhiyun  **/
e1000_setup_all_rx_resources(struct e1000_adapter * adapter)1761*4882a593Smuzhiyun int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1762*4882a593Smuzhiyun {
1763*4882a593Smuzhiyun 	int i, err = 0;
1764*4882a593Smuzhiyun 
1765*4882a593Smuzhiyun 	for (i = 0; i < adapter->num_rx_queues; i++) {
1766*4882a593Smuzhiyun 		err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1767*4882a593Smuzhiyun 		if (err) {
1768*4882a593Smuzhiyun 			e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1769*4882a593Smuzhiyun 			for (i-- ; i >= 0; i--)
1770*4882a593Smuzhiyun 				e1000_free_rx_resources(adapter,
1771*4882a593Smuzhiyun 							&adapter->rx_ring[i]);
1772*4882a593Smuzhiyun 			break;
1773*4882a593Smuzhiyun 		}
1774*4882a593Smuzhiyun 	}
1775*4882a593Smuzhiyun 
1776*4882a593Smuzhiyun 	return err;
1777*4882a593Smuzhiyun }
1778*4882a593Smuzhiyun 
1779*4882a593Smuzhiyun /**
1780*4882a593Smuzhiyun  * e1000_setup_rctl - configure the receive control registers
1781*4882a593Smuzhiyun  * @adapter: Board private structure
1782*4882a593Smuzhiyun  **/
e1000_setup_rctl(struct e1000_adapter * adapter)1783*4882a593Smuzhiyun static void e1000_setup_rctl(struct e1000_adapter *adapter)
1784*4882a593Smuzhiyun {
1785*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
1786*4882a593Smuzhiyun 	u32 rctl;
1787*4882a593Smuzhiyun 
1788*4882a593Smuzhiyun 	rctl = er32(RCTL);
1789*4882a593Smuzhiyun 
1790*4882a593Smuzhiyun 	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1791*4882a593Smuzhiyun 
1792*4882a593Smuzhiyun 	rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1793*4882a593Smuzhiyun 		E1000_RCTL_RDMTS_HALF |
1794*4882a593Smuzhiyun 		(hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1795*4882a593Smuzhiyun 
1796*4882a593Smuzhiyun 	if (hw->tbi_compatibility_on == 1)
1797*4882a593Smuzhiyun 		rctl |= E1000_RCTL_SBP;
1798*4882a593Smuzhiyun 	else
1799*4882a593Smuzhiyun 		rctl &= ~E1000_RCTL_SBP;
1800*4882a593Smuzhiyun 
1801*4882a593Smuzhiyun 	if (adapter->netdev->mtu <= ETH_DATA_LEN)
1802*4882a593Smuzhiyun 		rctl &= ~E1000_RCTL_LPE;
1803*4882a593Smuzhiyun 	else
1804*4882a593Smuzhiyun 		rctl |= E1000_RCTL_LPE;
1805*4882a593Smuzhiyun 
1806*4882a593Smuzhiyun 	/* Setup buffer sizes */
1807*4882a593Smuzhiyun 	rctl &= ~E1000_RCTL_SZ_4096;
1808*4882a593Smuzhiyun 	rctl |= E1000_RCTL_BSEX;
1809*4882a593Smuzhiyun 	switch (adapter->rx_buffer_len) {
1810*4882a593Smuzhiyun 	case E1000_RXBUFFER_2048:
1811*4882a593Smuzhiyun 	default:
1812*4882a593Smuzhiyun 		rctl |= E1000_RCTL_SZ_2048;
1813*4882a593Smuzhiyun 		rctl &= ~E1000_RCTL_BSEX;
1814*4882a593Smuzhiyun 		break;
1815*4882a593Smuzhiyun 	case E1000_RXBUFFER_4096:
1816*4882a593Smuzhiyun 		rctl |= E1000_RCTL_SZ_4096;
1817*4882a593Smuzhiyun 		break;
1818*4882a593Smuzhiyun 	case E1000_RXBUFFER_8192:
1819*4882a593Smuzhiyun 		rctl |= E1000_RCTL_SZ_8192;
1820*4882a593Smuzhiyun 		break;
1821*4882a593Smuzhiyun 	case E1000_RXBUFFER_16384:
1822*4882a593Smuzhiyun 		rctl |= E1000_RCTL_SZ_16384;
1823*4882a593Smuzhiyun 		break;
1824*4882a593Smuzhiyun 	}
1825*4882a593Smuzhiyun 
1826*4882a593Smuzhiyun 	/* This is useful for sniffing bad packets. */
1827*4882a593Smuzhiyun 	if (adapter->netdev->features & NETIF_F_RXALL) {
1828*4882a593Smuzhiyun 		/* UPE and MPE will be handled by normal PROMISC logic
1829*4882a593Smuzhiyun 		 * in e1000e_set_rx_mode
1830*4882a593Smuzhiyun 		 */
1831*4882a593Smuzhiyun 		rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1832*4882a593Smuzhiyun 			 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1833*4882a593Smuzhiyun 			 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1834*4882a593Smuzhiyun 
1835*4882a593Smuzhiyun 		rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1836*4882a593Smuzhiyun 			  E1000_RCTL_DPF | /* Allow filtered pause */
1837*4882a593Smuzhiyun 			  E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1838*4882a593Smuzhiyun 		/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1839*4882a593Smuzhiyun 		 * and that breaks VLANs.
1840*4882a593Smuzhiyun 		 */
1841*4882a593Smuzhiyun 	}
1842*4882a593Smuzhiyun 
1843*4882a593Smuzhiyun 	ew32(RCTL, rctl);
1844*4882a593Smuzhiyun }
1845*4882a593Smuzhiyun 
1846*4882a593Smuzhiyun /**
1847*4882a593Smuzhiyun  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1848*4882a593Smuzhiyun  * @adapter: board private structure
1849*4882a593Smuzhiyun  *
1850*4882a593Smuzhiyun  * Configure the Rx unit of the MAC after a reset.
1851*4882a593Smuzhiyun  **/
e1000_configure_rx(struct e1000_adapter * adapter)1852*4882a593Smuzhiyun static void e1000_configure_rx(struct e1000_adapter *adapter)
1853*4882a593Smuzhiyun {
1854*4882a593Smuzhiyun 	u64 rdba;
1855*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
1856*4882a593Smuzhiyun 	u32 rdlen, rctl, rxcsum;
1857*4882a593Smuzhiyun 
1858*4882a593Smuzhiyun 	if (adapter->netdev->mtu > ETH_DATA_LEN) {
1859*4882a593Smuzhiyun 		rdlen = adapter->rx_ring[0].count *
1860*4882a593Smuzhiyun 			sizeof(struct e1000_rx_desc);
1861*4882a593Smuzhiyun 		adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1862*4882a593Smuzhiyun 		adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1863*4882a593Smuzhiyun 	} else {
1864*4882a593Smuzhiyun 		rdlen = adapter->rx_ring[0].count *
1865*4882a593Smuzhiyun 			sizeof(struct e1000_rx_desc);
1866*4882a593Smuzhiyun 		adapter->clean_rx = e1000_clean_rx_irq;
1867*4882a593Smuzhiyun 		adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1868*4882a593Smuzhiyun 	}
1869*4882a593Smuzhiyun 
1870*4882a593Smuzhiyun 	/* disable receives while setting up the descriptors */
1871*4882a593Smuzhiyun 	rctl = er32(RCTL);
1872*4882a593Smuzhiyun 	ew32(RCTL, rctl & ~E1000_RCTL_EN);
1873*4882a593Smuzhiyun 
1874*4882a593Smuzhiyun 	/* set the Receive Delay Timer Register */
1875*4882a593Smuzhiyun 	ew32(RDTR, adapter->rx_int_delay);
1876*4882a593Smuzhiyun 
1877*4882a593Smuzhiyun 	if (hw->mac_type >= e1000_82540) {
1878*4882a593Smuzhiyun 		ew32(RADV, adapter->rx_abs_int_delay);
1879*4882a593Smuzhiyun 		if (adapter->itr_setting != 0)
1880*4882a593Smuzhiyun 			ew32(ITR, 1000000000 / (adapter->itr * 256));
1881*4882a593Smuzhiyun 	}
1882*4882a593Smuzhiyun 
1883*4882a593Smuzhiyun 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
1884*4882a593Smuzhiyun 	 * the Base and Length of the Rx Descriptor Ring
1885*4882a593Smuzhiyun 	 */
1886*4882a593Smuzhiyun 	switch (adapter->num_rx_queues) {
1887*4882a593Smuzhiyun 	case 1:
1888*4882a593Smuzhiyun 	default:
1889*4882a593Smuzhiyun 		rdba = adapter->rx_ring[0].dma;
1890*4882a593Smuzhiyun 		ew32(RDLEN, rdlen);
1891*4882a593Smuzhiyun 		ew32(RDBAH, (rdba >> 32));
1892*4882a593Smuzhiyun 		ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1893*4882a593Smuzhiyun 		ew32(RDT, 0);
1894*4882a593Smuzhiyun 		ew32(RDH, 0);
1895*4882a593Smuzhiyun 		adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1896*4882a593Smuzhiyun 					   E1000_RDH : E1000_82542_RDH);
1897*4882a593Smuzhiyun 		adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1898*4882a593Smuzhiyun 					   E1000_RDT : E1000_82542_RDT);
1899*4882a593Smuzhiyun 		break;
1900*4882a593Smuzhiyun 	}
1901*4882a593Smuzhiyun 
1902*4882a593Smuzhiyun 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
1903*4882a593Smuzhiyun 	if (hw->mac_type >= e1000_82543) {
1904*4882a593Smuzhiyun 		rxcsum = er32(RXCSUM);
1905*4882a593Smuzhiyun 		if (adapter->rx_csum)
1906*4882a593Smuzhiyun 			rxcsum |= E1000_RXCSUM_TUOFL;
1907*4882a593Smuzhiyun 		else
1908*4882a593Smuzhiyun 			/* don't need to clear IPPCSE as it defaults to 0 */
1909*4882a593Smuzhiyun 			rxcsum &= ~E1000_RXCSUM_TUOFL;
1910*4882a593Smuzhiyun 		ew32(RXCSUM, rxcsum);
1911*4882a593Smuzhiyun 	}
1912*4882a593Smuzhiyun 
1913*4882a593Smuzhiyun 	/* Enable Receives */
1914*4882a593Smuzhiyun 	ew32(RCTL, rctl | E1000_RCTL_EN);
1915*4882a593Smuzhiyun }
1916*4882a593Smuzhiyun 
1917*4882a593Smuzhiyun /**
1918*4882a593Smuzhiyun  * e1000_free_tx_resources - Free Tx Resources per Queue
1919*4882a593Smuzhiyun  * @adapter: board private structure
1920*4882a593Smuzhiyun  * @tx_ring: Tx descriptor ring for a specific queue
1921*4882a593Smuzhiyun  *
1922*4882a593Smuzhiyun  * Free all transmit software resources
1923*4882a593Smuzhiyun  **/
e1000_free_tx_resources(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring)1924*4882a593Smuzhiyun static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1925*4882a593Smuzhiyun 				    struct e1000_tx_ring *tx_ring)
1926*4882a593Smuzhiyun {
1927*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
1928*4882a593Smuzhiyun 
1929*4882a593Smuzhiyun 	e1000_clean_tx_ring(adapter, tx_ring);
1930*4882a593Smuzhiyun 
1931*4882a593Smuzhiyun 	vfree(tx_ring->buffer_info);
1932*4882a593Smuzhiyun 	tx_ring->buffer_info = NULL;
1933*4882a593Smuzhiyun 
1934*4882a593Smuzhiyun 	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1935*4882a593Smuzhiyun 			  tx_ring->dma);
1936*4882a593Smuzhiyun 
1937*4882a593Smuzhiyun 	tx_ring->desc = NULL;
1938*4882a593Smuzhiyun }
1939*4882a593Smuzhiyun 
1940*4882a593Smuzhiyun /**
1941*4882a593Smuzhiyun  * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1942*4882a593Smuzhiyun  * @adapter: board private structure
1943*4882a593Smuzhiyun  *
1944*4882a593Smuzhiyun  * Free all transmit software resources
1945*4882a593Smuzhiyun  **/
e1000_free_all_tx_resources(struct e1000_adapter * adapter)1946*4882a593Smuzhiyun void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1947*4882a593Smuzhiyun {
1948*4882a593Smuzhiyun 	int i;
1949*4882a593Smuzhiyun 
1950*4882a593Smuzhiyun 	for (i = 0; i < adapter->num_tx_queues; i++)
1951*4882a593Smuzhiyun 		e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1952*4882a593Smuzhiyun }
1953*4882a593Smuzhiyun 
1954*4882a593Smuzhiyun static void
e1000_unmap_and_free_tx_resource(struct e1000_adapter * adapter,struct e1000_tx_buffer * buffer_info)1955*4882a593Smuzhiyun e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1956*4882a593Smuzhiyun 				 struct e1000_tx_buffer *buffer_info)
1957*4882a593Smuzhiyun {
1958*4882a593Smuzhiyun 	if (buffer_info->dma) {
1959*4882a593Smuzhiyun 		if (buffer_info->mapped_as_page)
1960*4882a593Smuzhiyun 			dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1961*4882a593Smuzhiyun 				       buffer_info->length, DMA_TO_DEVICE);
1962*4882a593Smuzhiyun 		else
1963*4882a593Smuzhiyun 			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1964*4882a593Smuzhiyun 					 buffer_info->length,
1965*4882a593Smuzhiyun 					 DMA_TO_DEVICE);
1966*4882a593Smuzhiyun 		buffer_info->dma = 0;
1967*4882a593Smuzhiyun 	}
1968*4882a593Smuzhiyun 	if (buffer_info->skb) {
1969*4882a593Smuzhiyun 		dev_kfree_skb_any(buffer_info->skb);
1970*4882a593Smuzhiyun 		buffer_info->skb = NULL;
1971*4882a593Smuzhiyun 	}
1972*4882a593Smuzhiyun 	buffer_info->time_stamp = 0;
1973*4882a593Smuzhiyun 	/* buffer_info must be completely set up in the transmit path */
1974*4882a593Smuzhiyun }
1975*4882a593Smuzhiyun 
1976*4882a593Smuzhiyun /**
1977*4882a593Smuzhiyun  * e1000_clean_tx_ring - Free Tx Buffers
1978*4882a593Smuzhiyun  * @adapter: board private structure
1979*4882a593Smuzhiyun  * @tx_ring: ring to be cleaned
1980*4882a593Smuzhiyun  **/
e1000_clean_tx_ring(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring)1981*4882a593Smuzhiyun static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1982*4882a593Smuzhiyun 				struct e1000_tx_ring *tx_ring)
1983*4882a593Smuzhiyun {
1984*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
1985*4882a593Smuzhiyun 	struct e1000_tx_buffer *buffer_info;
1986*4882a593Smuzhiyun 	unsigned long size;
1987*4882a593Smuzhiyun 	unsigned int i;
1988*4882a593Smuzhiyun 
1989*4882a593Smuzhiyun 	/* Free all the Tx ring sk_buffs */
1990*4882a593Smuzhiyun 
1991*4882a593Smuzhiyun 	for (i = 0; i < tx_ring->count; i++) {
1992*4882a593Smuzhiyun 		buffer_info = &tx_ring->buffer_info[i];
1993*4882a593Smuzhiyun 		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1994*4882a593Smuzhiyun 	}
1995*4882a593Smuzhiyun 
1996*4882a593Smuzhiyun 	netdev_reset_queue(adapter->netdev);
1997*4882a593Smuzhiyun 	size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
1998*4882a593Smuzhiyun 	memset(tx_ring->buffer_info, 0, size);
1999*4882a593Smuzhiyun 
2000*4882a593Smuzhiyun 	/* Zero out the descriptor ring */
2001*4882a593Smuzhiyun 
2002*4882a593Smuzhiyun 	memset(tx_ring->desc, 0, tx_ring->size);
2003*4882a593Smuzhiyun 
2004*4882a593Smuzhiyun 	tx_ring->next_to_use = 0;
2005*4882a593Smuzhiyun 	tx_ring->next_to_clean = 0;
2006*4882a593Smuzhiyun 	tx_ring->last_tx_tso = false;
2007*4882a593Smuzhiyun 
2008*4882a593Smuzhiyun 	writel(0, hw->hw_addr + tx_ring->tdh);
2009*4882a593Smuzhiyun 	writel(0, hw->hw_addr + tx_ring->tdt);
2010*4882a593Smuzhiyun }
2011*4882a593Smuzhiyun 
2012*4882a593Smuzhiyun /**
2013*4882a593Smuzhiyun  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2014*4882a593Smuzhiyun  * @adapter: board private structure
2015*4882a593Smuzhiyun  **/
e1000_clean_all_tx_rings(struct e1000_adapter * adapter)2016*4882a593Smuzhiyun static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2017*4882a593Smuzhiyun {
2018*4882a593Smuzhiyun 	int i;
2019*4882a593Smuzhiyun 
2020*4882a593Smuzhiyun 	for (i = 0; i < adapter->num_tx_queues; i++)
2021*4882a593Smuzhiyun 		e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2022*4882a593Smuzhiyun }
2023*4882a593Smuzhiyun 
2024*4882a593Smuzhiyun /**
2025*4882a593Smuzhiyun  * e1000_free_rx_resources - Free Rx Resources
2026*4882a593Smuzhiyun  * @adapter: board private structure
2027*4882a593Smuzhiyun  * @rx_ring: ring to clean the resources from
2028*4882a593Smuzhiyun  *
2029*4882a593Smuzhiyun  * Free all receive software resources
2030*4882a593Smuzhiyun  **/
e1000_free_rx_resources(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring)2031*4882a593Smuzhiyun static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2032*4882a593Smuzhiyun 				    struct e1000_rx_ring *rx_ring)
2033*4882a593Smuzhiyun {
2034*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
2035*4882a593Smuzhiyun 
2036*4882a593Smuzhiyun 	e1000_clean_rx_ring(adapter, rx_ring);
2037*4882a593Smuzhiyun 
2038*4882a593Smuzhiyun 	vfree(rx_ring->buffer_info);
2039*4882a593Smuzhiyun 	rx_ring->buffer_info = NULL;
2040*4882a593Smuzhiyun 
2041*4882a593Smuzhiyun 	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2042*4882a593Smuzhiyun 			  rx_ring->dma);
2043*4882a593Smuzhiyun 
2044*4882a593Smuzhiyun 	rx_ring->desc = NULL;
2045*4882a593Smuzhiyun }
2046*4882a593Smuzhiyun 
2047*4882a593Smuzhiyun /**
2048*4882a593Smuzhiyun  * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2049*4882a593Smuzhiyun  * @adapter: board private structure
2050*4882a593Smuzhiyun  *
2051*4882a593Smuzhiyun  * Free all receive software resources
2052*4882a593Smuzhiyun  **/
e1000_free_all_rx_resources(struct e1000_adapter * adapter)2053*4882a593Smuzhiyun void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2054*4882a593Smuzhiyun {
2055*4882a593Smuzhiyun 	int i;
2056*4882a593Smuzhiyun 
2057*4882a593Smuzhiyun 	for (i = 0; i < adapter->num_rx_queues; i++)
2058*4882a593Smuzhiyun 		e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2059*4882a593Smuzhiyun }
2060*4882a593Smuzhiyun 
2061*4882a593Smuzhiyun #define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
e1000_frag_len(const struct e1000_adapter * a)2062*4882a593Smuzhiyun static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2063*4882a593Smuzhiyun {
2064*4882a593Smuzhiyun 	return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2065*4882a593Smuzhiyun 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2066*4882a593Smuzhiyun }
2067*4882a593Smuzhiyun 
e1000_alloc_frag(const struct e1000_adapter * a)2068*4882a593Smuzhiyun static void *e1000_alloc_frag(const struct e1000_adapter *a)
2069*4882a593Smuzhiyun {
2070*4882a593Smuzhiyun 	unsigned int len = e1000_frag_len(a);
2071*4882a593Smuzhiyun 	u8 *data = netdev_alloc_frag(len);
2072*4882a593Smuzhiyun 
2073*4882a593Smuzhiyun 	if (likely(data))
2074*4882a593Smuzhiyun 		data += E1000_HEADROOM;
2075*4882a593Smuzhiyun 	return data;
2076*4882a593Smuzhiyun }
2077*4882a593Smuzhiyun 
2078*4882a593Smuzhiyun /**
2079*4882a593Smuzhiyun  * e1000_clean_rx_ring - Free Rx Buffers per Queue
2080*4882a593Smuzhiyun  * @adapter: board private structure
2081*4882a593Smuzhiyun  * @rx_ring: ring to free buffers from
2082*4882a593Smuzhiyun  **/
e1000_clean_rx_ring(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring)2083*4882a593Smuzhiyun static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2084*4882a593Smuzhiyun 				struct e1000_rx_ring *rx_ring)
2085*4882a593Smuzhiyun {
2086*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
2087*4882a593Smuzhiyun 	struct e1000_rx_buffer *buffer_info;
2088*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
2089*4882a593Smuzhiyun 	unsigned long size;
2090*4882a593Smuzhiyun 	unsigned int i;
2091*4882a593Smuzhiyun 
2092*4882a593Smuzhiyun 	/* Free all the Rx netfrags */
2093*4882a593Smuzhiyun 	for (i = 0; i < rx_ring->count; i++) {
2094*4882a593Smuzhiyun 		buffer_info = &rx_ring->buffer_info[i];
2095*4882a593Smuzhiyun 		if (adapter->clean_rx == e1000_clean_rx_irq) {
2096*4882a593Smuzhiyun 			if (buffer_info->dma)
2097*4882a593Smuzhiyun 				dma_unmap_single(&pdev->dev, buffer_info->dma,
2098*4882a593Smuzhiyun 						 adapter->rx_buffer_len,
2099*4882a593Smuzhiyun 						 DMA_FROM_DEVICE);
2100*4882a593Smuzhiyun 			if (buffer_info->rxbuf.data) {
2101*4882a593Smuzhiyun 				skb_free_frag(buffer_info->rxbuf.data);
2102*4882a593Smuzhiyun 				buffer_info->rxbuf.data = NULL;
2103*4882a593Smuzhiyun 			}
2104*4882a593Smuzhiyun 		} else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2105*4882a593Smuzhiyun 			if (buffer_info->dma)
2106*4882a593Smuzhiyun 				dma_unmap_page(&pdev->dev, buffer_info->dma,
2107*4882a593Smuzhiyun 					       adapter->rx_buffer_len,
2108*4882a593Smuzhiyun 					       DMA_FROM_DEVICE);
2109*4882a593Smuzhiyun 			if (buffer_info->rxbuf.page) {
2110*4882a593Smuzhiyun 				put_page(buffer_info->rxbuf.page);
2111*4882a593Smuzhiyun 				buffer_info->rxbuf.page = NULL;
2112*4882a593Smuzhiyun 			}
2113*4882a593Smuzhiyun 		}
2114*4882a593Smuzhiyun 
2115*4882a593Smuzhiyun 		buffer_info->dma = 0;
2116*4882a593Smuzhiyun 	}
2117*4882a593Smuzhiyun 
2118*4882a593Smuzhiyun 	/* there also may be some cached data from a chained receive */
2119*4882a593Smuzhiyun 	napi_free_frags(&adapter->napi);
2120*4882a593Smuzhiyun 	rx_ring->rx_skb_top = NULL;
2121*4882a593Smuzhiyun 
2122*4882a593Smuzhiyun 	size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
2123*4882a593Smuzhiyun 	memset(rx_ring->buffer_info, 0, size);
2124*4882a593Smuzhiyun 
2125*4882a593Smuzhiyun 	/* Zero out the descriptor ring */
2126*4882a593Smuzhiyun 	memset(rx_ring->desc, 0, rx_ring->size);
2127*4882a593Smuzhiyun 
2128*4882a593Smuzhiyun 	rx_ring->next_to_clean = 0;
2129*4882a593Smuzhiyun 	rx_ring->next_to_use = 0;
2130*4882a593Smuzhiyun 
2131*4882a593Smuzhiyun 	writel(0, hw->hw_addr + rx_ring->rdh);
2132*4882a593Smuzhiyun 	writel(0, hw->hw_addr + rx_ring->rdt);
2133*4882a593Smuzhiyun }
2134*4882a593Smuzhiyun 
2135*4882a593Smuzhiyun /**
2136*4882a593Smuzhiyun  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2137*4882a593Smuzhiyun  * @adapter: board private structure
2138*4882a593Smuzhiyun  **/
e1000_clean_all_rx_rings(struct e1000_adapter * adapter)2139*4882a593Smuzhiyun static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2140*4882a593Smuzhiyun {
2141*4882a593Smuzhiyun 	int i;
2142*4882a593Smuzhiyun 
2143*4882a593Smuzhiyun 	for (i = 0; i < adapter->num_rx_queues; i++)
2144*4882a593Smuzhiyun 		e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2145*4882a593Smuzhiyun }
2146*4882a593Smuzhiyun 
2147*4882a593Smuzhiyun /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2148*4882a593Smuzhiyun  * and memory write and invalidate disabled for certain operations
2149*4882a593Smuzhiyun  */
e1000_enter_82542_rst(struct e1000_adapter * adapter)2150*4882a593Smuzhiyun static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2151*4882a593Smuzhiyun {
2152*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
2153*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
2154*4882a593Smuzhiyun 	u32 rctl;
2155*4882a593Smuzhiyun 
2156*4882a593Smuzhiyun 	e1000_pci_clear_mwi(hw);
2157*4882a593Smuzhiyun 
2158*4882a593Smuzhiyun 	rctl = er32(RCTL);
2159*4882a593Smuzhiyun 	rctl |= E1000_RCTL_RST;
2160*4882a593Smuzhiyun 	ew32(RCTL, rctl);
2161*4882a593Smuzhiyun 	E1000_WRITE_FLUSH();
2162*4882a593Smuzhiyun 	mdelay(5);
2163*4882a593Smuzhiyun 
2164*4882a593Smuzhiyun 	if (netif_running(netdev))
2165*4882a593Smuzhiyun 		e1000_clean_all_rx_rings(adapter);
2166*4882a593Smuzhiyun }
2167*4882a593Smuzhiyun 
e1000_leave_82542_rst(struct e1000_adapter * adapter)2168*4882a593Smuzhiyun static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2169*4882a593Smuzhiyun {
2170*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
2171*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
2172*4882a593Smuzhiyun 	u32 rctl;
2173*4882a593Smuzhiyun 
2174*4882a593Smuzhiyun 	rctl = er32(RCTL);
2175*4882a593Smuzhiyun 	rctl &= ~E1000_RCTL_RST;
2176*4882a593Smuzhiyun 	ew32(RCTL, rctl);
2177*4882a593Smuzhiyun 	E1000_WRITE_FLUSH();
2178*4882a593Smuzhiyun 	mdelay(5);
2179*4882a593Smuzhiyun 
2180*4882a593Smuzhiyun 	if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2181*4882a593Smuzhiyun 		e1000_pci_set_mwi(hw);
2182*4882a593Smuzhiyun 
2183*4882a593Smuzhiyun 	if (netif_running(netdev)) {
2184*4882a593Smuzhiyun 		/* No need to loop, because 82542 supports only 1 queue */
2185*4882a593Smuzhiyun 		struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2186*4882a593Smuzhiyun 		e1000_configure_rx(adapter);
2187*4882a593Smuzhiyun 		adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2188*4882a593Smuzhiyun 	}
2189*4882a593Smuzhiyun }
2190*4882a593Smuzhiyun 
2191*4882a593Smuzhiyun /**
2192*4882a593Smuzhiyun  * e1000_set_mac - Change the Ethernet Address of the NIC
2193*4882a593Smuzhiyun  * @netdev: network interface device structure
2194*4882a593Smuzhiyun  * @p: pointer to an address structure
2195*4882a593Smuzhiyun  *
2196*4882a593Smuzhiyun  * Returns 0 on success, negative on failure
2197*4882a593Smuzhiyun  **/
e1000_set_mac(struct net_device * netdev,void * p)2198*4882a593Smuzhiyun static int e1000_set_mac(struct net_device *netdev, void *p)
2199*4882a593Smuzhiyun {
2200*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
2201*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
2202*4882a593Smuzhiyun 	struct sockaddr *addr = p;
2203*4882a593Smuzhiyun 
2204*4882a593Smuzhiyun 	if (!is_valid_ether_addr(addr->sa_data))
2205*4882a593Smuzhiyun 		return -EADDRNOTAVAIL;
2206*4882a593Smuzhiyun 
2207*4882a593Smuzhiyun 	/* 82542 2.0 needs to be in reset to write receive address registers */
2208*4882a593Smuzhiyun 
2209*4882a593Smuzhiyun 	if (hw->mac_type == e1000_82542_rev2_0)
2210*4882a593Smuzhiyun 		e1000_enter_82542_rst(adapter);
2211*4882a593Smuzhiyun 
2212*4882a593Smuzhiyun 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2213*4882a593Smuzhiyun 	memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2214*4882a593Smuzhiyun 
2215*4882a593Smuzhiyun 	e1000_rar_set(hw, hw->mac_addr, 0);
2216*4882a593Smuzhiyun 
2217*4882a593Smuzhiyun 	if (hw->mac_type == e1000_82542_rev2_0)
2218*4882a593Smuzhiyun 		e1000_leave_82542_rst(adapter);
2219*4882a593Smuzhiyun 
2220*4882a593Smuzhiyun 	return 0;
2221*4882a593Smuzhiyun }
2222*4882a593Smuzhiyun 
2223*4882a593Smuzhiyun /**
2224*4882a593Smuzhiyun  * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2225*4882a593Smuzhiyun  * @netdev: network interface device structure
2226*4882a593Smuzhiyun  *
2227*4882a593Smuzhiyun  * The set_rx_mode entry point is called whenever the unicast or multicast
2228*4882a593Smuzhiyun  * address lists or the network interface flags are updated. This routine is
2229*4882a593Smuzhiyun  * responsible for configuring the hardware for proper unicast, multicast,
2230*4882a593Smuzhiyun  * promiscuous mode, and all-multi behavior.
2231*4882a593Smuzhiyun  **/
e1000_set_rx_mode(struct net_device * netdev)2232*4882a593Smuzhiyun static void e1000_set_rx_mode(struct net_device *netdev)
2233*4882a593Smuzhiyun {
2234*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
2235*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
2236*4882a593Smuzhiyun 	struct netdev_hw_addr *ha;
2237*4882a593Smuzhiyun 	bool use_uc = false;
2238*4882a593Smuzhiyun 	u32 rctl;
2239*4882a593Smuzhiyun 	u32 hash_value;
2240*4882a593Smuzhiyun 	int i, rar_entries = E1000_RAR_ENTRIES;
2241*4882a593Smuzhiyun 	int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2242*4882a593Smuzhiyun 	u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2243*4882a593Smuzhiyun 
2244*4882a593Smuzhiyun 	if (!mcarray)
2245*4882a593Smuzhiyun 		return;
2246*4882a593Smuzhiyun 
2247*4882a593Smuzhiyun 	/* Check for Promiscuous and All Multicast modes */
2248*4882a593Smuzhiyun 
2249*4882a593Smuzhiyun 	rctl = er32(RCTL);
2250*4882a593Smuzhiyun 
2251*4882a593Smuzhiyun 	if (netdev->flags & IFF_PROMISC) {
2252*4882a593Smuzhiyun 		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2253*4882a593Smuzhiyun 		rctl &= ~E1000_RCTL_VFE;
2254*4882a593Smuzhiyun 	} else {
2255*4882a593Smuzhiyun 		if (netdev->flags & IFF_ALLMULTI)
2256*4882a593Smuzhiyun 			rctl |= E1000_RCTL_MPE;
2257*4882a593Smuzhiyun 		else
2258*4882a593Smuzhiyun 			rctl &= ~E1000_RCTL_MPE;
2259*4882a593Smuzhiyun 		/* Enable VLAN filter if there is a VLAN */
2260*4882a593Smuzhiyun 		if (e1000_vlan_used(adapter))
2261*4882a593Smuzhiyun 			rctl |= E1000_RCTL_VFE;
2262*4882a593Smuzhiyun 	}
2263*4882a593Smuzhiyun 
2264*4882a593Smuzhiyun 	if (netdev_uc_count(netdev) > rar_entries - 1) {
2265*4882a593Smuzhiyun 		rctl |= E1000_RCTL_UPE;
2266*4882a593Smuzhiyun 	} else if (!(netdev->flags & IFF_PROMISC)) {
2267*4882a593Smuzhiyun 		rctl &= ~E1000_RCTL_UPE;
2268*4882a593Smuzhiyun 		use_uc = true;
2269*4882a593Smuzhiyun 	}
2270*4882a593Smuzhiyun 
2271*4882a593Smuzhiyun 	ew32(RCTL, rctl);
2272*4882a593Smuzhiyun 
2273*4882a593Smuzhiyun 	/* 82542 2.0 needs to be in reset to write receive address registers */
2274*4882a593Smuzhiyun 
2275*4882a593Smuzhiyun 	if (hw->mac_type == e1000_82542_rev2_0)
2276*4882a593Smuzhiyun 		e1000_enter_82542_rst(adapter);
2277*4882a593Smuzhiyun 
2278*4882a593Smuzhiyun 	/* load the first 14 addresses into the exact filters 1-14. Unicast
2279*4882a593Smuzhiyun 	 * addresses take precedence to avoid disabling unicast filtering
2280*4882a593Smuzhiyun 	 * when possible.
2281*4882a593Smuzhiyun 	 *
2282*4882a593Smuzhiyun 	 * RAR 0 is used for the station MAC address
2283*4882a593Smuzhiyun 	 * if there are not 14 addresses, go ahead and clear the filters
2284*4882a593Smuzhiyun 	 */
2285*4882a593Smuzhiyun 	i = 1;
2286*4882a593Smuzhiyun 	if (use_uc)
2287*4882a593Smuzhiyun 		netdev_for_each_uc_addr(ha, netdev) {
2288*4882a593Smuzhiyun 			if (i == rar_entries)
2289*4882a593Smuzhiyun 				break;
2290*4882a593Smuzhiyun 			e1000_rar_set(hw, ha->addr, i++);
2291*4882a593Smuzhiyun 		}
2292*4882a593Smuzhiyun 
2293*4882a593Smuzhiyun 	netdev_for_each_mc_addr(ha, netdev) {
2294*4882a593Smuzhiyun 		if (i == rar_entries) {
2295*4882a593Smuzhiyun 			/* load any remaining addresses into the hash table */
2296*4882a593Smuzhiyun 			u32 hash_reg, hash_bit, mta;
2297*4882a593Smuzhiyun 			hash_value = e1000_hash_mc_addr(hw, ha->addr);
2298*4882a593Smuzhiyun 			hash_reg = (hash_value >> 5) & 0x7F;
2299*4882a593Smuzhiyun 			hash_bit = hash_value & 0x1F;
2300*4882a593Smuzhiyun 			mta = (1 << hash_bit);
2301*4882a593Smuzhiyun 			mcarray[hash_reg] |= mta;
2302*4882a593Smuzhiyun 		} else {
2303*4882a593Smuzhiyun 			e1000_rar_set(hw, ha->addr, i++);
2304*4882a593Smuzhiyun 		}
2305*4882a593Smuzhiyun 	}
2306*4882a593Smuzhiyun 
2307*4882a593Smuzhiyun 	for (; i < rar_entries; i++) {
2308*4882a593Smuzhiyun 		E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2309*4882a593Smuzhiyun 		E1000_WRITE_FLUSH();
2310*4882a593Smuzhiyun 		E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2311*4882a593Smuzhiyun 		E1000_WRITE_FLUSH();
2312*4882a593Smuzhiyun 	}
2313*4882a593Smuzhiyun 
2314*4882a593Smuzhiyun 	/* write the hash table completely, write from bottom to avoid
2315*4882a593Smuzhiyun 	 * both stupid write combining chipsets, and flushing each write
2316*4882a593Smuzhiyun 	 */
2317*4882a593Smuzhiyun 	for (i = mta_reg_count - 1; i >= 0 ; i--) {
2318*4882a593Smuzhiyun 		/* If we are on an 82544 has an errata where writing odd
2319*4882a593Smuzhiyun 		 * offsets overwrites the previous even offset, but writing
2320*4882a593Smuzhiyun 		 * backwards over the range solves the issue by always
2321*4882a593Smuzhiyun 		 * writing the odd offset first
2322*4882a593Smuzhiyun 		 */
2323*4882a593Smuzhiyun 		E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2324*4882a593Smuzhiyun 	}
2325*4882a593Smuzhiyun 	E1000_WRITE_FLUSH();
2326*4882a593Smuzhiyun 
2327*4882a593Smuzhiyun 	if (hw->mac_type == e1000_82542_rev2_0)
2328*4882a593Smuzhiyun 		e1000_leave_82542_rst(adapter);
2329*4882a593Smuzhiyun 
2330*4882a593Smuzhiyun 	kfree(mcarray);
2331*4882a593Smuzhiyun }
2332*4882a593Smuzhiyun 
2333*4882a593Smuzhiyun /**
2334*4882a593Smuzhiyun  * e1000_update_phy_info_task - get phy info
2335*4882a593Smuzhiyun  * @work: work struct contained inside adapter struct
2336*4882a593Smuzhiyun  *
2337*4882a593Smuzhiyun  * Need to wait a few seconds after link up to get diagnostic information from
2338*4882a593Smuzhiyun  * the phy
2339*4882a593Smuzhiyun  */
e1000_update_phy_info_task(struct work_struct * work)2340*4882a593Smuzhiyun static void e1000_update_phy_info_task(struct work_struct *work)
2341*4882a593Smuzhiyun {
2342*4882a593Smuzhiyun 	struct e1000_adapter *adapter = container_of(work,
2343*4882a593Smuzhiyun 						     struct e1000_adapter,
2344*4882a593Smuzhiyun 						     phy_info_task.work);
2345*4882a593Smuzhiyun 
2346*4882a593Smuzhiyun 	e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2347*4882a593Smuzhiyun }
2348*4882a593Smuzhiyun 
2349*4882a593Smuzhiyun /**
2350*4882a593Smuzhiyun  * e1000_82547_tx_fifo_stall_task - task to complete work
2351*4882a593Smuzhiyun  * @work: work struct contained inside adapter struct
2352*4882a593Smuzhiyun  **/
e1000_82547_tx_fifo_stall_task(struct work_struct * work)2353*4882a593Smuzhiyun static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2354*4882a593Smuzhiyun {
2355*4882a593Smuzhiyun 	struct e1000_adapter *adapter = container_of(work,
2356*4882a593Smuzhiyun 						     struct e1000_adapter,
2357*4882a593Smuzhiyun 						     fifo_stall_task.work);
2358*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
2359*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
2360*4882a593Smuzhiyun 	u32 tctl;
2361*4882a593Smuzhiyun 
2362*4882a593Smuzhiyun 	if (atomic_read(&adapter->tx_fifo_stall)) {
2363*4882a593Smuzhiyun 		if ((er32(TDT) == er32(TDH)) &&
2364*4882a593Smuzhiyun 		   (er32(TDFT) == er32(TDFH)) &&
2365*4882a593Smuzhiyun 		   (er32(TDFTS) == er32(TDFHS))) {
2366*4882a593Smuzhiyun 			tctl = er32(TCTL);
2367*4882a593Smuzhiyun 			ew32(TCTL, tctl & ~E1000_TCTL_EN);
2368*4882a593Smuzhiyun 			ew32(TDFT, adapter->tx_head_addr);
2369*4882a593Smuzhiyun 			ew32(TDFH, adapter->tx_head_addr);
2370*4882a593Smuzhiyun 			ew32(TDFTS, adapter->tx_head_addr);
2371*4882a593Smuzhiyun 			ew32(TDFHS, adapter->tx_head_addr);
2372*4882a593Smuzhiyun 			ew32(TCTL, tctl);
2373*4882a593Smuzhiyun 			E1000_WRITE_FLUSH();
2374*4882a593Smuzhiyun 
2375*4882a593Smuzhiyun 			adapter->tx_fifo_head = 0;
2376*4882a593Smuzhiyun 			atomic_set(&adapter->tx_fifo_stall, 0);
2377*4882a593Smuzhiyun 			netif_wake_queue(netdev);
2378*4882a593Smuzhiyun 		} else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2379*4882a593Smuzhiyun 			schedule_delayed_work(&adapter->fifo_stall_task, 1);
2380*4882a593Smuzhiyun 		}
2381*4882a593Smuzhiyun 	}
2382*4882a593Smuzhiyun }
2383*4882a593Smuzhiyun 
e1000_has_link(struct e1000_adapter * adapter)2384*4882a593Smuzhiyun bool e1000_has_link(struct e1000_adapter *adapter)
2385*4882a593Smuzhiyun {
2386*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
2387*4882a593Smuzhiyun 	bool link_active = false;
2388*4882a593Smuzhiyun 
2389*4882a593Smuzhiyun 	/* get_link_status is set on LSC (link status) interrupt or rx
2390*4882a593Smuzhiyun 	 * sequence error interrupt (except on intel ce4100).
2391*4882a593Smuzhiyun 	 * get_link_status will stay false until the
2392*4882a593Smuzhiyun 	 * e1000_check_for_link establishes link for copper adapters
2393*4882a593Smuzhiyun 	 * ONLY
2394*4882a593Smuzhiyun 	 */
2395*4882a593Smuzhiyun 	switch (hw->media_type) {
2396*4882a593Smuzhiyun 	case e1000_media_type_copper:
2397*4882a593Smuzhiyun 		if (hw->mac_type == e1000_ce4100)
2398*4882a593Smuzhiyun 			hw->get_link_status = 1;
2399*4882a593Smuzhiyun 		if (hw->get_link_status) {
2400*4882a593Smuzhiyun 			e1000_check_for_link(hw);
2401*4882a593Smuzhiyun 			link_active = !hw->get_link_status;
2402*4882a593Smuzhiyun 		} else {
2403*4882a593Smuzhiyun 			link_active = true;
2404*4882a593Smuzhiyun 		}
2405*4882a593Smuzhiyun 		break;
2406*4882a593Smuzhiyun 	case e1000_media_type_fiber:
2407*4882a593Smuzhiyun 		e1000_check_for_link(hw);
2408*4882a593Smuzhiyun 		link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2409*4882a593Smuzhiyun 		break;
2410*4882a593Smuzhiyun 	case e1000_media_type_internal_serdes:
2411*4882a593Smuzhiyun 		e1000_check_for_link(hw);
2412*4882a593Smuzhiyun 		link_active = hw->serdes_has_link;
2413*4882a593Smuzhiyun 		break;
2414*4882a593Smuzhiyun 	default:
2415*4882a593Smuzhiyun 		break;
2416*4882a593Smuzhiyun 	}
2417*4882a593Smuzhiyun 
2418*4882a593Smuzhiyun 	return link_active;
2419*4882a593Smuzhiyun }
2420*4882a593Smuzhiyun 
2421*4882a593Smuzhiyun /**
2422*4882a593Smuzhiyun  * e1000_watchdog - work function
2423*4882a593Smuzhiyun  * @work: work struct contained inside adapter struct
2424*4882a593Smuzhiyun  **/
e1000_watchdog(struct work_struct * work)2425*4882a593Smuzhiyun static void e1000_watchdog(struct work_struct *work)
2426*4882a593Smuzhiyun {
2427*4882a593Smuzhiyun 	struct e1000_adapter *adapter = container_of(work,
2428*4882a593Smuzhiyun 						     struct e1000_adapter,
2429*4882a593Smuzhiyun 						     watchdog_task.work);
2430*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
2431*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
2432*4882a593Smuzhiyun 	struct e1000_tx_ring *txdr = adapter->tx_ring;
2433*4882a593Smuzhiyun 	u32 link, tctl;
2434*4882a593Smuzhiyun 
2435*4882a593Smuzhiyun 	link = e1000_has_link(adapter);
2436*4882a593Smuzhiyun 	if ((netif_carrier_ok(netdev)) && link)
2437*4882a593Smuzhiyun 		goto link_up;
2438*4882a593Smuzhiyun 
2439*4882a593Smuzhiyun 	if (link) {
2440*4882a593Smuzhiyun 		if (!netif_carrier_ok(netdev)) {
2441*4882a593Smuzhiyun 			u32 ctrl;
2442*4882a593Smuzhiyun 			/* update snapshot of PHY registers on LSC */
2443*4882a593Smuzhiyun 			e1000_get_speed_and_duplex(hw,
2444*4882a593Smuzhiyun 						   &adapter->link_speed,
2445*4882a593Smuzhiyun 						   &adapter->link_duplex);
2446*4882a593Smuzhiyun 
2447*4882a593Smuzhiyun 			ctrl = er32(CTRL);
2448*4882a593Smuzhiyun 			pr_info("%s NIC Link is Up %d Mbps %s, "
2449*4882a593Smuzhiyun 				"Flow Control: %s\n",
2450*4882a593Smuzhiyun 				netdev->name,
2451*4882a593Smuzhiyun 				adapter->link_speed,
2452*4882a593Smuzhiyun 				adapter->link_duplex == FULL_DUPLEX ?
2453*4882a593Smuzhiyun 				"Full Duplex" : "Half Duplex",
2454*4882a593Smuzhiyun 				((ctrl & E1000_CTRL_TFCE) && (ctrl &
2455*4882a593Smuzhiyun 				E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2456*4882a593Smuzhiyun 				E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2457*4882a593Smuzhiyun 				E1000_CTRL_TFCE) ? "TX" : "None")));
2458*4882a593Smuzhiyun 
2459*4882a593Smuzhiyun 			/* adjust timeout factor according to speed/duplex */
2460*4882a593Smuzhiyun 			adapter->tx_timeout_factor = 1;
2461*4882a593Smuzhiyun 			switch (adapter->link_speed) {
2462*4882a593Smuzhiyun 			case SPEED_10:
2463*4882a593Smuzhiyun 				adapter->tx_timeout_factor = 16;
2464*4882a593Smuzhiyun 				break;
2465*4882a593Smuzhiyun 			case SPEED_100:
2466*4882a593Smuzhiyun 				/* maybe add some timeout factor ? */
2467*4882a593Smuzhiyun 				break;
2468*4882a593Smuzhiyun 			}
2469*4882a593Smuzhiyun 
2470*4882a593Smuzhiyun 			/* enable transmits in the hardware */
2471*4882a593Smuzhiyun 			tctl = er32(TCTL);
2472*4882a593Smuzhiyun 			tctl |= E1000_TCTL_EN;
2473*4882a593Smuzhiyun 			ew32(TCTL, tctl);
2474*4882a593Smuzhiyun 
2475*4882a593Smuzhiyun 			netif_carrier_on(netdev);
2476*4882a593Smuzhiyun 			if (!test_bit(__E1000_DOWN, &adapter->flags))
2477*4882a593Smuzhiyun 				schedule_delayed_work(&adapter->phy_info_task,
2478*4882a593Smuzhiyun 						      2 * HZ);
2479*4882a593Smuzhiyun 			adapter->smartspeed = 0;
2480*4882a593Smuzhiyun 		}
2481*4882a593Smuzhiyun 	} else {
2482*4882a593Smuzhiyun 		if (netif_carrier_ok(netdev)) {
2483*4882a593Smuzhiyun 			adapter->link_speed = 0;
2484*4882a593Smuzhiyun 			adapter->link_duplex = 0;
2485*4882a593Smuzhiyun 			pr_info("%s NIC Link is Down\n",
2486*4882a593Smuzhiyun 				netdev->name);
2487*4882a593Smuzhiyun 			netif_carrier_off(netdev);
2488*4882a593Smuzhiyun 
2489*4882a593Smuzhiyun 			if (!test_bit(__E1000_DOWN, &adapter->flags))
2490*4882a593Smuzhiyun 				schedule_delayed_work(&adapter->phy_info_task,
2491*4882a593Smuzhiyun 						      2 * HZ);
2492*4882a593Smuzhiyun 		}
2493*4882a593Smuzhiyun 
2494*4882a593Smuzhiyun 		e1000_smartspeed(adapter);
2495*4882a593Smuzhiyun 	}
2496*4882a593Smuzhiyun 
2497*4882a593Smuzhiyun link_up:
2498*4882a593Smuzhiyun 	e1000_update_stats(adapter);
2499*4882a593Smuzhiyun 
2500*4882a593Smuzhiyun 	hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2501*4882a593Smuzhiyun 	adapter->tpt_old = adapter->stats.tpt;
2502*4882a593Smuzhiyun 	hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2503*4882a593Smuzhiyun 	adapter->colc_old = adapter->stats.colc;
2504*4882a593Smuzhiyun 
2505*4882a593Smuzhiyun 	adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2506*4882a593Smuzhiyun 	adapter->gorcl_old = adapter->stats.gorcl;
2507*4882a593Smuzhiyun 	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2508*4882a593Smuzhiyun 	adapter->gotcl_old = adapter->stats.gotcl;
2509*4882a593Smuzhiyun 
2510*4882a593Smuzhiyun 	e1000_update_adaptive(hw);
2511*4882a593Smuzhiyun 
2512*4882a593Smuzhiyun 	if (!netif_carrier_ok(netdev)) {
2513*4882a593Smuzhiyun 		if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2514*4882a593Smuzhiyun 			/* We've lost link, so the controller stops DMA,
2515*4882a593Smuzhiyun 			 * but we've got queued Tx work that's never going
2516*4882a593Smuzhiyun 			 * to get done, so reset controller to flush Tx.
2517*4882a593Smuzhiyun 			 * (Do the reset outside of interrupt context).
2518*4882a593Smuzhiyun 			 */
2519*4882a593Smuzhiyun 			adapter->tx_timeout_count++;
2520*4882a593Smuzhiyun 			schedule_work(&adapter->reset_task);
2521*4882a593Smuzhiyun 			/* exit immediately since reset is imminent */
2522*4882a593Smuzhiyun 			return;
2523*4882a593Smuzhiyun 		}
2524*4882a593Smuzhiyun 	}
2525*4882a593Smuzhiyun 
2526*4882a593Smuzhiyun 	/* Simple mode for Interrupt Throttle Rate (ITR) */
2527*4882a593Smuzhiyun 	if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2528*4882a593Smuzhiyun 		/* Symmetric Tx/Rx gets a reduced ITR=2000;
2529*4882a593Smuzhiyun 		 * Total asymmetrical Tx or Rx gets ITR=8000;
2530*4882a593Smuzhiyun 		 * everyone else is between 2000-8000.
2531*4882a593Smuzhiyun 		 */
2532*4882a593Smuzhiyun 		u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2533*4882a593Smuzhiyun 		u32 dif = (adapter->gotcl > adapter->gorcl ?
2534*4882a593Smuzhiyun 			    adapter->gotcl - adapter->gorcl :
2535*4882a593Smuzhiyun 			    adapter->gorcl - adapter->gotcl) / 10000;
2536*4882a593Smuzhiyun 		u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2537*4882a593Smuzhiyun 
2538*4882a593Smuzhiyun 		ew32(ITR, 1000000000 / (itr * 256));
2539*4882a593Smuzhiyun 	}
2540*4882a593Smuzhiyun 
2541*4882a593Smuzhiyun 	/* Cause software interrupt to ensure rx ring is cleaned */
2542*4882a593Smuzhiyun 	ew32(ICS, E1000_ICS_RXDMT0);
2543*4882a593Smuzhiyun 
2544*4882a593Smuzhiyun 	/* Force detection of hung controller every watchdog period */
2545*4882a593Smuzhiyun 	adapter->detect_tx_hung = true;
2546*4882a593Smuzhiyun 
2547*4882a593Smuzhiyun 	/* Reschedule the task */
2548*4882a593Smuzhiyun 	if (!test_bit(__E1000_DOWN, &adapter->flags))
2549*4882a593Smuzhiyun 		schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2550*4882a593Smuzhiyun }
2551*4882a593Smuzhiyun 
2552*4882a593Smuzhiyun enum latency_range {
2553*4882a593Smuzhiyun 	lowest_latency = 0,
2554*4882a593Smuzhiyun 	low_latency = 1,
2555*4882a593Smuzhiyun 	bulk_latency = 2,
2556*4882a593Smuzhiyun 	latency_invalid = 255
2557*4882a593Smuzhiyun };
2558*4882a593Smuzhiyun 
2559*4882a593Smuzhiyun /**
2560*4882a593Smuzhiyun  * e1000_update_itr - update the dynamic ITR value based on statistics
2561*4882a593Smuzhiyun  * @adapter: pointer to adapter
2562*4882a593Smuzhiyun  * @itr_setting: current adapter->itr
2563*4882a593Smuzhiyun  * @packets: the number of packets during this measurement interval
2564*4882a593Smuzhiyun  * @bytes: the number of bytes during this measurement interval
2565*4882a593Smuzhiyun  *
2566*4882a593Smuzhiyun  *      Stores a new ITR value based on packets and byte
2567*4882a593Smuzhiyun  *      counts during the last interrupt.  The advantage of per interrupt
2568*4882a593Smuzhiyun  *      computation is faster updates and more accurate ITR for the current
2569*4882a593Smuzhiyun  *      traffic pattern.  Constants in this function were computed
2570*4882a593Smuzhiyun  *      based on theoretical maximum wire speed and thresholds were set based
2571*4882a593Smuzhiyun  *      on testing data as well as attempting to minimize response time
2572*4882a593Smuzhiyun  *      while increasing bulk throughput.
2573*4882a593Smuzhiyun  *      this functionality is controlled by the InterruptThrottleRate module
2574*4882a593Smuzhiyun  *      parameter (see e1000_param.c)
2575*4882a593Smuzhiyun  **/
e1000_update_itr(struct e1000_adapter * adapter,u16 itr_setting,int packets,int bytes)2576*4882a593Smuzhiyun static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2577*4882a593Smuzhiyun 				     u16 itr_setting, int packets, int bytes)
2578*4882a593Smuzhiyun {
2579*4882a593Smuzhiyun 	unsigned int retval = itr_setting;
2580*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
2581*4882a593Smuzhiyun 
2582*4882a593Smuzhiyun 	if (unlikely(hw->mac_type < e1000_82540))
2583*4882a593Smuzhiyun 		goto update_itr_done;
2584*4882a593Smuzhiyun 
2585*4882a593Smuzhiyun 	if (packets == 0)
2586*4882a593Smuzhiyun 		goto update_itr_done;
2587*4882a593Smuzhiyun 
2588*4882a593Smuzhiyun 	switch (itr_setting) {
2589*4882a593Smuzhiyun 	case lowest_latency:
2590*4882a593Smuzhiyun 		/* jumbo frames get bulk treatment*/
2591*4882a593Smuzhiyun 		if (bytes/packets > 8000)
2592*4882a593Smuzhiyun 			retval = bulk_latency;
2593*4882a593Smuzhiyun 		else if ((packets < 5) && (bytes > 512))
2594*4882a593Smuzhiyun 			retval = low_latency;
2595*4882a593Smuzhiyun 		break;
2596*4882a593Smuzhiyun 	case low_latency:  /* 50 usec aka 20000 ints/s */
2597*4882a593Smuzhiyun 		if (bytes > 10000) {
2598*4882a593Smuzhiyun 			/* jumbo frames need bulk latency setting */
2599*4882a593Smuzhiyun 			if (bytes/packets > 8000)
2600*4882a593Smuzhiyun 				retval = bulk_latency;
2601*4882a593Smuzhiyun 			else if ((packets < 10) || ((bytes/packets) > 1200))
2602*4882a593Smuzhiyun 				retval = bulk_latency;
2603*4882a593Smuzhiyun 			else if ((packets > 35))
2604*4882a593Smuzhiyun 				retval = lowest_latency;
2605*4882a593Smuzhiyun 		} else if (bytes/packets > 2000)
2606*4882a593Smuzhiyun 			retval = bulk_latency;
2607*4882a593Smuzhiyun 		else if (packets <= 2 && bytes < 512)
2608*4882a593Smuzhiyun 			retval = lowest_latency;
2609*4882a593Smuzhiyun 		break;
2610*4882a593Smuzhiyun 	case bulk_latency: /* 250 usec aka 4000 ints/s */
2611*4882a593Smuzhiyun 		if (bytes > 25000) {
2612*4882a593Smuzhiyun 			if (packets > 35)
2613*4882a593Smuzhiyun 				retval = low_latency;
2614*4882a593Smuzhiyun 		} else if (bytes < 6000) {
2615*4882a593Smuzhiyun 			retval = low_latency;
2616*4882a593Smuzhiyun 		}
2617*4882a593Smuzhiyun 		break;
2618*4882a593Smuzhiyun 	}
2619*4882a593Smuzhiyun 
2620*4882a593Smuzhiyun update_itr_done:
2621*4882a593Smuzhiyun 	return retval;
2622*4882a593Smuzhiyun }
2623*4882a593Smuzhiyun 
e1000_set_itr(struct e1000_adapter * adapter)2624*4882a593Smuzhiyun static void e1000_set_itr(struct e1000_adapter *adapter)
2625*4882a593Smuzhiyun {
2626*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
2627*4882a593Smuzhiyun 	u16 current_itr;
2628*4882a593Smuzhiyun 	u32 new_itr = adapter->itr;
2629*4882a593Smuzhiyun 
2630*4882a593Smuzhiyun 	if (unlikely(hw->mac_type < e1000_82540))
2631*4882a593Smuzhiyun 		return;
2632*4882a593Smuzhiyun 
2633*4882a593Smuzhiyun 	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2634*4882a593Smuzhiyun 	if (unlikely(adapter->link_speed != SPEED_1000)) {
2635*4882a593Smuzhiyun 		current_itr = 0;
2636*4882a593Smuzhiyun 		new_itr = 4000;
2637*4882a593Smuzhiyun 		goto set_itr_now;
2638*4882a593Smuzhiyun 	}
2639*4882a593Smuzhiyun 
2640*4882a593Smuzhiyun 	adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2641*4882a593Smuzhiyun 					   adapter->total_tx_packets,
2642*4882a593Smuzhiyun 					   adapter->total_tx_bytes);
2643*4882a593Smuzhiyun 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
2644*4882a593Smuzhiyun 	if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2645*4882a593Smuzhiyun 		adapter->tx_itr = low_latency;
2646*4882a593Smuzhiyun 
2647*4882a593Smuzhiyun 	adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2648*4882a593Smuzhiyun 					   adapter->total_rx_packets,
2649*4882a593Smuzhiyun 					   adapter->total_rx_bytes);
2650*4882a593Smuzhiyun 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
2651*4882a593Smuzhiyun 	if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2652*4882a593Smuzhiyun 		adapter->rx_itr = low_latency;
2653*4882a593Smuzhiyun 
2654*4882a593Smuzhiyun 	current_itr = max(adapter->rx_itr, adapter->tx_itr);
2655*4882a593Smuzhiyun 
2656*4882a593Smuzhiyun 	switch (current_itr) {
2657*4882a593Smuzhiyun 	/* counts and packets in update_itr are dependent on these numbers */
2658*4882a593Smuzhiyun 	case lowest_latency:
2659*4882a593Smuzhiyun 		new_itr = 70000;
2660*4882a593Smuzhiyun 		break;
2661*4882a593Smuzhiyun 	case low_latency:
2662*4882a593Smuzhiyun 		new_itr = 20000; /* aka hwitr = ~200 */
2663*4882a593Smuzhiyun 		break;
2664*4882a593Smuzhiyun 	case bulk_latency:
2665*4882a593Smuzhiyun 		new_itr = 4000;
2666*4882a593Smuzhiyun 		break;
2667*4882a593Smuzhiyun 	default:
2668*4882a593Smuzhiyun 		break;
2669*4882a593Smuzhiyun 	}
2670*4882a593Smuzhiyun 
2671*4882a593Smuzhiyun set_itr_now:
2672*4882a593Smuzhiyun 	if (new_itr != adapter->itr) {
2673*4882a593Smuzhiyun 		/* this attempts to bias the interrupt rate towards Bulk
2674*4882a593Smuzhiyun 		 * by adding intermediate steps when interrupt rate is
2675*4882a593Smuzhiyun 		 * increasing
2676*4882a593Smuzhiyun 		 */
2677*4882a593Smuzhiyun 		new_itr = new_itr > adapter->itr ?
2678*4882a593Smuzhiyun 			  min(adapter->itr + (new_itr >> 2), new_itr) :
2679*4882a593Smuzhiyun 			  new_itr;
2680*4882a593Smuzhiyun 		adapter->itr = new_itr;
2681*4882a593Smuzhiyun 		ew32(ITR, 1000000000 / (new_itr * 256));
2682*4882a593Smuzhiyun 	}
2683*4882a593Smuzhiyun }
2684*4882a593Smuzhiyun 
2685*4882a593Smuzhiyun #define E1000_TX_FLAGS_CSUM		0x00000001
2686*4882a593Smuzhiyun #define E1000_TX_FLAGS_VLAN		0x00000002
2687*4882a593Smuzhiyun #define E1000_TX_FLAGS_TSO		0x00000004
2688*4882a593Smuzhiyun #define E1000_TX_FLAGS_IPV4		0x00000008
2689*4882a593Smuzhiyun #define E1000_TX_FLAGS_NO_FCS		0x00000010
2690*4882a593Smuzhiyun #define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
2691*4882a593Smuzhiyun #define E1000_TX_FLAGS_VLAN_SHIFT	16
2692*4882a593Smuzhiyun 
e1000_tso(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,struct sk_buff * skb,__be16 protocol)2693*4882a593Smuzhiyun static int e1000_tso(struct e1000_adapter *adapter,
2694*4882a593Smuzhiyun 		     struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2695*4882a593Smuzhiyun 		     __be16 protocol)
2696*4882a593Smuzhiyun {
2697*4882a593Smuzhiyun 	struct e1000_context_desc *context_desc;
2698*4882a593Smuzhiyun 	struct e1000_tx_buffer *buffer_info;
2699*4882a593Smuzhiyun 	unsigned int i;
2700*4882a593Smuzhiyun 	u32 cmd_length = 0;
2701*4882a593Smuzhiyun 	u16 ipcse = 0, tucse, mss;
2702*4882a593Smuzhiyun 	u8 ipcss, ipcso, tucss, tucso, hdr_len;
2703*4882a593Smuzhiyun 
2704*4882a593Smuzhiyun 	if (skb_is_gso(skb)) {
2705*4882a593Smuzhiyun 		int err;
2706*4882a593Smuzhiyun 
2707*4882a593Smuzhiyun 		err = skb_cow_head(skb, 0);
2708*4882a593Smuzhiyun 		if (err < 0)
2709*4882a593Smuzhiyun 			return err;
2710*4882a593Smuzhiyun 
2711*4882a593Smuzhiyun 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2712*4882a593Smuzhiyun 		mss = skb_shinfo(skb)->gso_size;
2713*4882a593Smuzhiyun 		if (protocol == htons(ETH_P_IP)) {
2714*4882a593Smuzhiyun 			struct iphdr *iph = ip_hdr(skb);
2715*4882a593Smuzhiyun 			iph->tot_len = 0;
2716*4882a593Smuzhiyun 			iph->check = 0;
2717*4882a593Smuzhiyun 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2718*4882a593Smuzhiyun 								 iph->daddr, 0,
2719*4882a593Smuzhiyun 								 IPPROTO_TCP,
2720*4882a593Smuzhiyun 								 0);
2721*4882a593Smuzhiyun 			cmd_length = E1000_TXD_CMD_IP;
2722*4882a593Smuzhiyun 			ipcse = skb_transport_offset(skb) - 1;
2723*4882a593Smuzhiyun 		} else if (skb_is_gso_v6(skb)) {
2724*4882a593Smuzhiyun 			tcp_v6_gso_csum_prep(skb);
2725*4882a593Smuzhiyun 			ipcse = 0;
2726*4882a593Smuzhiyun 		}
2727*4882a593Smuzhiyun 		ipcss = skb_network_offset(skb);
2728*4882a593Smuzhiyun 		ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2729*4882a593Smuzhiyun 		tucss = skb_transport_offset(skb);
2730*4882a593Smuzhiyun 		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2731*4882a593Smuzhiyun 		tucse = 0;
2732*4882a593Smuzhiyun 
2733*4882a593Smuzhiyun 		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2734*4882a593Smuzhiyun 			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2735*4882a593Smuzhiyun 
2736*4882a593Smuzhiyun 		i = tx_ring->next_to_use;
2737*4882a593Smuzhiyun 		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2738*4882a593Smuzhiyun 		buffer_info = &tx_ring->buffer_info[i];
2739*4882a593Smuzhiyun 
2740*4882a593Smuzhiyun 		context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2741*4882a593Smuzhiyun 		context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2742*4882a593Smuzhiyun 		context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2743*4882a593Smuzhiyun 		context_desc->upper_setup.tcp_fields.tucss = tucss;
2744*4882a593Smuzhiyun 		context_desc->upper_setup.tcp_fields.tucso = tucso;
2745*4882a593Smuzhiyun 		context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2746*4882a593Smuzhiyun 		context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2747*4882a593Smuzhiyun 		context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2748*4882a593Smuzhiyun 		context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2749*4882a593Smuzhiyun 
2750*4882a593Smuzhiyun 		buffer_info->time_stamp = jiffies;
2751*4882a593Smuzhiyun 		buffer_info->next_to_watch = i;
2752*4882a593Smuzhiyun 
2753*4882a593Smuzhiyun 		if (++i == tx_ring->count)
2754*4882a593Smuzhiyun 			i = 0;
2755*4882a593Smuzhiyun 
2756*4882a593Smuzhiyun 		tx_ring->next_to_use = i;
2757*4882a593Smuzhiyun 
2758*4882a593Smuzhiyun 		return true;
2759*4882a593Smuzhiyun 	}
2760*4882a593Smuzhiyun 	return false;
2761*4882a593Smuzhiyun }
2762*4882a593Smuzhiyun 
e1000_tx_csum(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,struct sk_buff * skb,__be16 protocol)2763*4882a593Smuzhiyun static bool e1000_tx_csum(struct e1000_adapter *adapter,
2764*4882a593Smuzhiyun 			  struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2765*4882a593Smuzhiyun 			  __be16 protocol)
2766*4882a593Smuzhiyun {
2767*4882a593Smuzhiyun 	struct e1000_context_desc *context_desc;
2768*4882a593Smuzhiyun 	struct e1000_tx_buffer *buffer_info;
2769*4882a593Smuzhiyun 	unsigned int i;
2770*4882a593Smuzhiyun 	u8 css;
2771*4882a593Smuzhiyun 	u32 cmd_len = E1000_TXD_CMD_DEXT;
2772*4882a593Smuzhiyun 
2773*4882a593Smuzhiyun 	if (skb->ip_summed != CHECKSUM_PARTIAL)
2774*4882a593Smuzhiyun 		return false;
2775*4882a593Smuzhiyun 
2776*4882a593Smuzhiyun 	switch (protocol) {
2777*4882a593Smuzhiyun 	case cpu_to_be16(ETH_P_IP):
2778*4882a593Smuzhiyun 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2779*4882a593Smuzhiyun 			cmd_len |= E1000_TXD_CMD_TCP;
2780*4882a593Smuzhiyun 		break;
2781*4882a593Smuzhiyun 	case cpu_to_be16(ETH_P_IPV6):
2782*4882a593Smuzhiyun 		/* XXX not handling all IPV6 headers */
2783*4882a593Smuzhiyun 		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2784*4882a593Smuzhiyun 			cmd_len |= E1000_TXD_CMD_TCP;
2785*4882a593Smuzhiyun 		break;
2786*4882a593Smuzhiyun 	default:
2787*4882a593Smuzhiyun 		if (unlikely(net_ratelimit()))
2788*4882a593Smuzhiyun 			e_warn(drv, "checksum_partial proto=%x!\n",
2789*4882a593Smuzhiyun 			       skb->protocol);
2790*4882a593Smuzhiyun 		break;
2791*4882a593Smuzhiyun 	}
2792*4882a593Smuzhiyun 
2793*4882a593Smuzhiyun 	css = skb_checksum_start_offset(skb);
2794*4882a593Smuzhiyun 
2795*4882a593Smuzhiyun 	i = tx_ring->next_to_use;
2796*4882a593Smuzhiyun 	buffer_info = &tx_ring->buffer_info[i];
2797*4882a593Smuzhiyun 	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2798*4882a593Smuzhiyun 
2799*4882a593Smuzhiyun 	context_desc->lower_setup.ip_config = 0;
2800*4882a593Smuzhiyun 	context_desc->upper_setup.tcp_fields.tucss = css;
2801*4882a593Smuzhiyun 	context_desc->upper_setup.tcp_fields.tucso =
2802*4882a593Smuzhiyun 		css + skb->csum_offset;
2803*4882a593Smuzhiyun 	context_desc->upper_setup.tcp_fields.tucse = 0;
2804*4882a593Smuzhiyun 	context_desc->tcp_seg_setup.data = 0;
2805*4882a593Smuzhiyun 	context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2806*4882a593Smuzhiyun 
2807*4882a593Smuzhiyun 	buffer_info->time_stamp = jiffies;
2808*4882a593Smuzhiyun 	buffer_info->next_to_watch = i;
2809*4882a593Smuzhiyun 
2810*4882a593Smuzhiyun 	if (unlikely(++i == tx_ring->count))
2811*4882a593Smuzhiyun 		i = 0;
2812*4882a593Smuzhiyun 
2813*4882a593Smuzhiyun 	tx_ring->next_to_use = i;
2814*4882a593Smuzhiyun 
2815*4882a593Smuzhiyun 	return true;
2816*4882a593Smuzhiyun }
2817*4882a593Smuzhiyun 
2818*4882a593Smuzhiyun #define E1000_MAX_TXD_PWR	12
2819*4882a593Smuzhiyun #define E1000_MAX_DATA_PER_TXD	(1<<E1000_MAX_TXD_PWR)
2820*4882a593Smuzhiyun 
e1000_tx_map(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,struct sk_buff * skb,unsigned int first,unsigned int max_per_txd,unsigned int nr_frags,unsigned int mss)2821*4882a593Smuzhiyun static int e1000_tx_map(struct e1000_adapter *adapter,
2822*4882a593Smuzhiyun 			struct e1000_tx_ring *tx_ring,
2823*4882a593Smuzhiyun 			struct sk_buff *skb, unsigned int first,
2824*4882a593Smuzhiyun 			unsigned int max_per_txd, unsigned int nr_frags,
2825*4882a593Smuzhiyun 			unsigned int mss)
2826*4882a593Smuzhiyun {
2827*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
2828*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
2829*4882a593Smuzhiyun 	struct e1000_tx_buffer *buffer_info;
2830*4882a593Smuzhiyun 	unsigned int len = skb_headlen(skb);
2831*4882a593Smuzhiyun 	unsigned int offset = 0, size, count = 0, i;
2832*4882a593Smuzhiyun 	unsigned int f, bytecount, segs;
2833*4882a593Smuzhiyun 
2834*4882a593Smuzhiyun 	i = tx_ring->next_to_use;
2835*4882a593Smuzhiyun 
2836*4882a593Smuzhiyun 	while (len) {
2837*4882a593Smuzhiyun 		buffer_info = &tx_ring->buffer_info[i];
2838*4882a593Smuzhiyun 		size = min(len, max_per_txd);
2839*4882a593Smuzhiyun 		/* Workaround for Controller erratum --
2840*4882a593Smuzhiyun 		 * descriptor for non-tso packet in a linear SKB that follows a
2841*4882a593Smuzhiyun 		 * tso gets written back prematurely before the data is fully
2842*4882a593Smuzhiyun 		 * DMA'd to the controller
2843*4882a593Smuzhiyun 		 */
2844*4882a593Smuzhiyun 		if (!skb->data_len && tx_ring->last_tx_tso &&
2845*4882a593Smuzhiyun 		    !skb_is_gso(skb)) {
2846*4882a593Smuzhiyun 			tx_ring->last_tx_tso = false;
2847*4882a593Smuzhiyun 			size -= 4;
2848*4882a593Smuzhiyun 		}
2849*4882a593Smuzhiyun 
2850*4882a593Smuzhiyun 		/* Workaround for premature desc write-backs
2851*4882a593Smuzhiyun 		 * in TSO mode.  Append 4-byte sentinel desc
2852*4882a593Smuzhiyun 		 */
2853*4882a593Smuzhiyun 		if (unlikely(mss && !nr_frags && size == len && size > 8))
2854*4882a593Smuzhiyun 			size -= 4;
2855*4882a593Smuzhiyun 		/* work-around for errata 10 and it applies
2856*4882a593Smuzhiyun 		 * to all controllers in PCI-X mode
2857*4882a593Smuzhiyun 		 * The fix is to make sure that the first descriptor of a
2858*4882a593Smuzhiyun 		 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2859*4882a593Smuzhiyun 		 */
2860*4882a593Smuzhiyun 		if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2861*4882a593Smuzhiyun 			     (size > 2015) && count == 0))
2862*4882a593Smuzhiyun 			size = 2015;
2863*4882a593Smuzhiyun 
2864*4882a593Smuzhiyun 		/* Workaround for potential 82544 hang in PCI-X.  Avoid
2865*4882a593Smuzhiyun 		 * terminating buffers within evenly-aligned dwords.
2866*4882a593Smuzhiyun 		 */
2867*4882a593Smuzhiyun 		if (unlikely(adapter->pcix_82544 &&
2868*4882a593Smuzhiyun 		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2869*4882a593Smuzhiyun 		   size > 4))
2870*4882a593Smuzhiyun 			size -= 4;
2871*4882a593Smuzhiyun 
2872*4882a593Smuzhiyun 		buffer_info->length = size;
2873*4882a593Smuzhiyun 		/* set time_stamp *before* dma to help avoid a possible race */
2874*4882a593Smuzhiyun 		buffer_info->time_stamp = jiffies;
2875*4882a593Smuzhiyun 		buffer_info->mapped_as_page = false;
2876*4882a593Smuzhiyun 		buffer_info->dma = dma_map_single(&pdev->dev,
2877*4882a593Smuzhiyun 						  skb->data + offset,
2878*4882a593Smuzhiyun 						  size, DMA_TO_DEVICE);
2879*4882a593Smuzhiyun 		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2880*4882a593Smuzhiyun 			goto dma_error;
2881*4882a593Smuzhiyun 		buffer_info->next_to_watch = i;
2882*4882a593Smuzhiyun 
2883*4882a593Smuzhiyun 		len -= size;
2884*4882a593Smuzhiyun 		offset += size;
2885*4882a593Smuzhiyun 		count++;
2886*4882a593Smuzhiyun 		if (len) {
2887*4882a593Smuzhiyun 			i++;
2888*4882a593Smuzhiyun 			if (unlikely(i == tx_ring->count))
2889*4882a593Smuzhiyun 				i = 0;
2890*4882a593Smuzhiyun 		}
2891*4882a593Smuzhiyun 	}
2892*4882a593Smuzhiyun 
2893*4882a593Smuzhiyun 	for (f = 0; f < nr_frags; f++) {
2894*4882a593Smuzhiyun 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2895*4882a593Smuzhiyun 
2896*4882a593Smuzhiyun 		len = skb_frag_size(frag);
2897*4882a593Smuzhiyun 		offset = 0;
2898*4882a593Smuzhiyun 
2899*4882a593Smuzhiyun 		while (len) {
2900*4882a593Smuzhiyun 			unsigned long bufend;
2901*4882a593Smuzhiyun 			i++;
2902*4882a593Smuzhiyun 			if (unlikely(i == tx_ring->count))
2903*4882a593Smuzhiyun 				i = 0;
2904*4882a593Smuzhiyun 
2905*4882a593Smuzhiyun 			buffer_info = &tx_ring->buffer_info[i];
2906*4882a593Smuzhiyun 			size = min(len, max_per_txd);
2907*4882a593Smuzhiyun 			/* Workaround for premature desc write-backs
2908*4882a593Smuzhiyun 			 * in TSO mode.  Append 4-byte sentinel desc
2909*4882a593Smuzhiyun 			 */
2910*4882a593Smuzhiyun 			if (unlikely(mss && f == (nr_frags-1) &&
2911*4882a593Smuzhiyun 			    size == len && size > 8))
2912*4882a593Smuzhiyun 				size -= 4;
2913*4882a593Smuzhiyun 			/* Workaround for potential 82544 hang in PCI-X.
2914*4882a593Smuzhiyun 			 * Avoid terminating buffers within evenly-aligned
2915*4882a593Smuzhiyun 			 * dwords.
2916*4882a593Smuzhiyun 			 */
2917*4882a593Smuzhiyun 			bufend = (unsigned long)
2918*4882a593Smuzhiyun 				page_to_phys(skb_frag_page(frag));
2919*4882a593Smuzhiyun 			bufend += offset + size - 1;
2920*4882a593Smuzhiyun 			if (unlikely(adapter->pcix_82544 &&
2921*4882a593Smuzhiyun 				     !(bufend & 4) &&
2922*4882a593Smuzhiyun 				     size > 4))
2923*4882a593Smuzhiyun 				size -= 4;
2924*4882a593Smuzhiyun 
2925*4882a593Smuzhiyun 			buffer_info->length = size;
2926*4882a593Smuzhiyun 			buffer_info->time_stamp = jiffies;
2927*4882a593Smuzhiyun 			buffer_info->mapped_as_page = true;
2928*4882a593Smuzhiyun 			buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2929*4882a593Smuzhiyun 						offset, size, DMA_TO_DEVICE);
2930*4882a593Smuzhiyun 			if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2931*4882a593Smuzhiyun 				goto dma_error;
2932*4882a593Smuzhiyun 			buffer_info->next_to_watch = i;
2933*4882a593Smuzhiyun 
2934*4882a593Smuzhiyun 			len -= size;
2935*4882a593Smuzhiyun 			offset += size;
2936*4882a593Smuzhiyun 			count++;
2937*4882a593Smuzhiyun 		}
2938*4882a593Smuzhiyun 	}
2939*4882a593Smuzhiyun 
2940*4882a593Smuzhiyun 	segs = skb_shinfo(skb)->gso_segs ?: 1;
2941*4882a593Smuzhiyun 	/* multiply data chunks by size of headers */
2942*4882a593Smuzhiyun 	bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2943*4882a593Smuzhiyun 
2944*4882a593Smuzhiyun 	tx_ring->buffer_info[i].skb = skb;
2945*4882a593Smuzhiyun 	tx_ring->buffer_info[i].segs = segs;
2946*4882a593Smuzhiyun 	tx_ring->buffer_info[i].bytecount = bytecount;
2947*4882a593Smuzhiyun 	tx_ring->buffer_info[first].next_to_watch = i;
2948*4882a593Smuzhiyun 
2949*4882a593Smuzhiyun 	return count;
2950*4882a593Smuzhiyun 
2951*4882a593Smuzhiyun dma_error:
2952*4882a593Smuzhiyun 	dev_err(&pdev->dev, "TX DMA map failed\n");
2953*4882a593Smuzhiyun 	buffer_info->dma = 0;
2954*4882a593Smuzhiyun 	if (count)
2955*4882a593Smuzhiyun 		count--;
2956*4882a593Smuzhiyun 
2957*4882a593Smuzhiyun 	while (count--) {
2958*4882a593Smuzhiyun 		if (i == 0)
2959*4882a593Smuzhiyun 			i += tx_ring->count;
2960*4882a593Smuzhiyun 		i--;
2961*4882a593Smuzhiyun 		buffer_info = &tx_ring->buffer_info[i];
2962*4882a593Smuzhiyun 		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2963*4882a593Smuzhiyun 	}
2964*4882a593Smuzhiyun 
2965*4882a593Smuzhiyun 	return 0;
2966*4882a593Smuzhiyun }
2967*4882a593Smuzhiyun 
e1000_tx_queue(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,int tx_flags,int count)2968*4882a593Smuzhiyun static void e1000_tx_queue(struct e1000_adapter *adapter,
2969*4882a593Smuzhiyun 			   struct e1000_tx_ring *tx_ring, int tx_flags,
2970*4882a593Smuzhiyun 			   int count)
2971*4882a593Smuzhiyun {
2972*4882a593Smuzhiyun 	struct e1000_tx_desc *tx_desc = NULL;
2973*4882a593Smuzhiyun 	struct e1000_tx_buffer *buffer_info;
2974*4882a593Smuzhiyun 	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2975*4882a593Smuzhiyun 	unsigned int i;
2976*4882a593Smuzhiyun 
2977*4882a593Smuzhiyun 	if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2978*4882a593Smuzhiyun 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2979*4882a593Smuzhiyun 			     E1000_TXD_CMD_TSE;
2980*4882a593Smuzhiyun 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2981*4882a593Smuzhiyun 
2982*4882a593Smuzhiyun 		if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2983*4882a593Smuzhiyun 			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2984*4882a593Smuzhiyun 	}
2985*4882a593Smuzhiyun 
2986*4882a593Smuzhiyun 	if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2987*4882a593Smuzhiyun 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2988*4882a593Smuzhiyun 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2989*4882a593Smuzhiyun 	}
2990*4882a593Smuzhiyun 
2991*4882a593Smuzhiyun 	if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2992*4882a593Smuzhiyun 		txd_lower |= E1000_TXD_CMD_VLE;
2993*4882a593Smuzhiyun 		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2994*4882a593Smuzhiyun 	}
2995*4882a593Smuzhiyun 
2996*4882a593Smuzhiyun 	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
2997*4882a593Smuzhiyun 		txd_lower &= ~(E1000_TXD_CMD_IFCS);
2998*4882a593Smuzhiyun 
2999*4882a593Smuzhiyun 	i = tx_ring->next_to_use;
3000*4882a593Smuzhiyun 
3001*4882a593Smuzhiyun 	while (count--) {
3002*4882a593Smuzhiyun 		buffer_info = &tx_ring->buffer_info[i];
3003*4882a593Smuzhiyun 		tx_desc = E1000_TX_DESC(*tx_ring, i);
3004*4882a593Smuzhiyun 		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3005*4882a593Smuzhiyun 		tx_desc->lower.data =
3006*4882a593Smuzhiyun 			cpu_to_le32(txd_lower | buffer_info->length);
3007*4882a593Smuzhiyun 		tx_desc->upper.data = cpu_to_le32(txd_upper);
3008*4882a593Smuzhiyun 		if (unlikely(++i == tx_ring->count))
3009*4882a593Smuzhiyun 			i = 0;
3010*4882a593Smuzhiyun 	}
3011*4882a593Smuzhiyun 
3012*4882a593Smuzhiyun 	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3013*4882a593Smuzhiyun 
3014*4882a593Smuzhiyun 	/* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3015*4882a593Smuzhiyun 	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3016*4882a593Smuzhiyun 		tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3017*4882a593Smuzhiyun 
3018*4882a593Smuzhiyun 	/* Force memory writes to complete before letting h/w
3019*4882a593Smuzhiyun 	 * know there are new descriptors to fetch.  (Only
3020*4882a593Smuzhiyun 	 * applicable for weak-ordered memory model archs,
3021*4882a593Smuzhiyun 	 * such as IA-64).
3022*4882a593Smuzhiyun 	 */
3023*4882a593Smuzhiyun 	dma_wmb();
3024*4882a593Smuzhiyun 
3025*4882a593Smuzhiyun 	tx_ring->next_to_use = i;
3026*4882a593Smuzhiyun }
3027*4882a593Smuzhiyun 
3028*4882a593Smuzhiyun /* 82547 workaround to avoid controller hang in half-duplex environment.
3029*4882a593Smuzhiyun  * The workaround is to avoid queuing a large packet that would span
3030*4882a593Smuzhiyun  * the internal Tx FIFO ring boundary by notifying the stack to resend
3031*4882a593Smuzhiyun  * the packet at a later time.  This gives the Tx FIFO an opportunity to
3032*4882a593Smuzhiyun  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3033*4882a593Smuzhiyun  * to the beginning of the Tx FIFO.
3034*4882a593Smuzhiyun  */
3035*4882a593Smuzhiyun 
3036*4882a593Smuzhiyun #define E1000_FIFO_HDR			0x10
3037*4882a593Smuzhiyun #define E1000_82547_PAD_LEN		0x3E0
3038*4882a593Smuzhiyun 
e1000_82547_fifo_workaround(struct e1000_adapter * adapter,struct sk_buff * skb)3039*4882a593Smuzhiyun static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3040*4882a593Smuzhiyun 				       struct sk_buff *skb)
3041*4882a593Smuzhiyun {
3042*4882a593Smuzhiyun 	u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3043*4882a593Smuzhiyun 	u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3044*4882a593Smuzhiyun 
3045*4882a593Smuzhiyun 	skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3046*4882a593Smuzhiyun 
3047*4882a593Smuzhiyun 	if (adapter->link_duplex != HALF_DUPLEX)
3048*4882a593Smuzhiyun 		goto no_fifo_stall_required;
3049*4882a593Smuzhiyun 
3050*4882a593Smuzhiyun 	if (atomic_read(&adapter->tx_fifo_stall))
3051*4882a593Smuzhiyun 		return 1;
3052*4882a593Smuzhiyun 
3053*4882a593Smuzhiyun 	if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3054*4882a593Smuzhiyun 		atomic_set(&adapter->tx_fifo_stall, 1);
3055*4882a593Smuzhiyun 		return 1;
3056*4882a593Smuzhiyun 	}
3057*4882a593Smuzhiyun 
3058*4882a593Smuzhiyun no_fifo_stall_required:
3059*4882a593Smuzhiyun 	adapter->tx_fifo_head += skb_fifo_len;
3060*4882a593Smuzhiyun 	if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3061*4882a593Smuzhiyun 		adapter->tx_fifo_head -= adapter->tx_fifo_size;
3062*4882a593Smuzhiyun 	return 0;
3063*4882a593Smuzhiyun }
3064*4882a593Smuzhiyun 
__e1000_maybe_stop_tx(struct net_device * netdev,int size)3065*4882a593Smuzhiyun static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3066*4882a593Smuzhiyun {
3067*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
3068*4882a593Smuzhiyun 	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3069*4882a593Smuzhiyun 
3070*4882a593Smuzhiyun 	netif_stop_queue(netdev);
3071*4882a593Smuzhiyun 	/* Herbert's original patch had:
3072*4882a593Smuzhiyun 	 *  smp_mb__after_netif_stop_queue();
3073*4882a593Smuzhiyun 	 * but since that doesn't exist yet, just open code it.
3074*4882a593Smuzhiyun 	 */
3075*4882a593Smuzhiyun 	smp_mb();
3076*4882a593Smuzhiyun 
3077*4882a593Smuzhiyun 	/* We need to check again in a case another CPU has just
3078*4882a593Smuzhiyun 	 * made room available.
3079*4882a593Smuzhiyun 	 */
3080*4882a593Smuzhiyun 	if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3081*4882a593Smuzhiyun 		return -EBUSY;
3082*4882a593Smuzhiyun 
3083*4882a593Smuzhiyun 	/* A reprieve! */
3084*4882a593Smuzhiyun 	netif_start_queue(netdev);
3085*4882a593Smuzhiyun 	++adapter->restart_queue;
3086*4882a593Smuzhiyun 	return 0;
3087*4882a593Smuzhiyun }
3088*4882a593Smuzhiyun 
e1000_maybe_stop_tx(struct net_device * netdev,struct e1000_tx_ring * tx_ring,int size)3089*4882a593Smuzhiyun static int e1000_maybe_stop_tx(struct net_device *netdev,
3090*4882a593Smuzhiyun 			       struct e1000_tx_ring *tx_ring, int size)
3091*4882a593Smuzhiyun {
3092*4882a593Smuzhiyun 	if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3093*4882a593Smuzhiyun 		return 0;
3094*4882a593Smuzhiyun 	return __e1000_maybe_stop_tx(netdev, size);
3095*4882a593Smuzhiyun }
3096*4882a593Smuzhiyun 
3097*4882a593Smuzhiyun #define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
e1000_xmit_frame(struct sk_buff * skb,struct net_device * netdev)3098*4882a593Smuzhiyun static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3099*4882a593Smuzhiyun 				    struct net_device *netdev)
3100*4882a593Smuzhiyun {
3101*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
3102*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
3103*4882a593Smuzhiyun 	struct e1000_tx_ring *tx_ring;
3104*4882a593Smuzhiyun 	unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3105*4882a593Smuzhiyun 	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3106*4882a593Smuzhiyun 	unsigned int tx_flags = 0;
3107*4882a593Smuzhiyun 	unsigned int len = skb_headlen(skb);
3108*4882a593Smuzhiyun 	unsigned int nr_frags;
3109*4882a593Smuzhiyun 	unsigned int mss;
3110*4882a593Smuzhiyun 	int count = 0;
3111*4882a593Smuzhiyun 	int tso;
3112*4882a593Smuzhiyun 	unsigned int f;
3113*4882a593Smuzhiyun 	__be16 protocol = vlan_get_protocol(skb);
3114*4882a593Smuzhiyun 
3115*4882a593Smuzhiyun 	/* This goes back to the question of how to logically map a Tx queue
3116*4882a593Smuzhiyun 	 * to a flow.  Right now, performance is impacted slightly negatively
3117*4882a593Smuzhiyun 	 * if using multiple Tx queues.  If the stack breaks away from a
3118*4882a593Smuzhiyun 	 * single qdisc implementation, we can look at this again.
3119*4882a593Smuzhiyun 	 */
3120*4882a593Smuzhiyun 	tx_ring = adapter->tx_ring;
3121*4882a593Smuzhiyun 
3122*4882a593Smuzhiyun 	/* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3123*4882a593Smuzhiyun 	 * packets may get corrupted during padding by HW.
3124*4882a593Smuzhiyun 	 * To WA this issue, pad all small packets manually.
3125*4882a593Smuzhiyun 	 */
3126*4882a593Smuzhiyun 	if (eth_skb_pad(skb))
3127*4882a593Smuzhiyun 		return NETDEV_TX_OK;
3128*4882a593Smuzhiyun 
3129*4882a593Smuzhiyun 	mss = skb_shinfo(skb)->gso_size;
3130*4882a593Smuzhiyun 	/* The controller does a simple calculation to
3131*4882a593Smuzhiyun 	 * make sure there is enough room in the FIFO before
3132*4882a593Smuzhiyun 	 * initiating the DMA for each buffer.  The calc is:
3133*4882a593Smuzhiyun 	 * 4 = ceil(buffer len/mss).  To make sure we don't
3134*4882a593Smuzhiyun 	 * overrun the FIFO, adjust the max buffer len if mss
3135*4882a593Smuzhiyun 	 * drops.
3136*4882a593Smuzhiyun 	 */
3137*4882a593Smuzhiyun 	if (mss) {
3138*4882a593Smuzhiyun 		u8 hdr_len;
3139*4882a593Smuzhiyun 		max_per_txd = min(mss << 2, max_per_txd);
3140*4882a593Smuzhiyun 		max_txd_pwr = fls(max_per_txd) - 1;
3141*4882a593Smuzhiyun 
3142*4882a593Smuzhiyun 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3143*4882a593Smuzhiyun 		if (skb->data_len && hdr_len == len) {
3144*4882a593Smuzhiyun 			switch (hw->mac_type) {
3145*4882a593Smuzhiyun 			case e1000_82544: {
3146*4882a593Smuzhiyun 				unsigned int pull_size;
3147*4882a593Smuzhiyun 
3148*4882a593Smuzhiyun 				/* Make sure we have room to chop off 4 bytes,
3149*4882a593Smuzhiyun 				 * and that the end alignment will work out to
3150*4882a593Smuzhiyun 				 * this hardware's requirements
3151*4882a593Smuzhiyun 				 * NOTE: this is a TSO only workaround
3152*4882a593Smuzhiyun 				 * if end byte alignment not correct move us
3153*4882a593Smuzhiyun 				 * into the next dword
3154*4882a593Smuzhiyun 				 */
3155*4882a593Smuzhiyun 				if ((unsigned long)(skb_tail_pointer(skb) - 1)
3156*4882a593Smuzhiyun 				    & 4)
3157*4882a593Smuzhiyun 					break;
3158*4882a593Smuzhiyun 				pull_size = min((unsigned int)4, skb->data_len);
3159*4882a593Smuzhiyun 				if (!__pskb_pull_tail(skb, pull_size)) {
3160*4882a593Smuzhiyun 					e_err(drv, "__pskb_pull_tail "
3161*4882a593Smuzhiyun 					      "failed.\n");
3162*4882a593Smuzhiyun 					dev_kfree_skb_any(skb);
3163*4882a593Smuzhiyun 					return NETDEV_TX_OK;
3164*4882a593Smuzhiyun 				}
3165*4882a593Smuzhiyun 				len = skb_headlen(skb);
3166*4882a593Smuzhiyun 				break;
3167*4882a593Smuzhiyun 			}
3168*4882a593Smuzhiyun 			default:
3169*4882a593Smuzhiyun 				/* do nothing */
3170*4882a593Smuzhiyun 				break;
3171*4882a593Smuzhiyun 			}
3172*4882a593Smuzhiyun 		}
3173*4882a593Smuzhiyun 	}
3174*4882a593Smuzhiyun 
3175*4882a593Smuzhiyun 	/* reserve a descriptor for the offload context */
3176*4882a593Smuzhiyun 	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3177*4882a593Smuzhiyun 		count++;
3178*4882a593Smuzhiyun 	count++;
3179*4882a593Smuzhiyun 
3180*4882a593Smuzhiyun 	/* Controller Erratum workaround */
3181*4882a593Smuzhiyun 	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3182*4882a593Smuzhiyun 		count++;
3183*4882a593Smuzhiyun 
3184*4882a593Smuzhiyun 	count += TXD_USE_COUNT(len, max_txd_pwr);
3185*4882a593Smuzhiyun 
3186*4882a593Smuzhiyun 	if (adapter->pcix_82544)
3187*4882a593Smuzhiyun 		count++;
3188*4882a593Smuzhiyun 
3189*4882a593Smuzhiyun 	/* work-around for errata 10 and it applies to all controllers
3190*4882a593Smuzhiyun 	 * in PCI-X mode, so add one more descriptor to the count
3191*4882a593Smuzhiyun 	 */
3192*4882a593Smuzhiyun 	if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3193*4882a593Smuzhiyun 			(len > 2015)))
3194*4882a593Smuzhiyun 		count++;
3195*4882a593Smuzhiyun 
3196*4882a593Smuzhiyun 	nr_frags = skb_shinfo(skb)->nr_frags;
3197*4882a593Smuzhiyun 	for (f = 0; f < nr_frags; f++)
3198*4882a593Smuzhiyun 		count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3199*4882a593Smuzhiyun 				       max_txd_pwr);
3200*4882a593Smuzhiyun 	if (adapter->pcix_82544)
3201*4882a593Smuzhiyun 		count += nr_frags;
3202*4882a593Smuzhiyun 
3203*4882a593Smuzhiyun 	/* need: count + 2 desc gap to keep tail from touching
3204*4882a593Smuzhiyun 	 * head, otherwise try next time
3205*4882a593Smuzhiyun 	 */
3206*4882a593Smuzhiyun 	if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3207*4882a593Smuzhiyun 		return NETDEV_TX_BUSY;
3208*4882a593Smuzhiyun 
3209*4882a593Smuzhiyun 	if (unlikely((hw->mac_type == e1000_82547) &&
3210*4882a593Smuzhiyun 		     (e1000_82547_fifo_workaround(adapter, skb)))) {
3211*4882a593Smuzhiyun 		netif_stop_queue(netdev);
3212*4882a593Smuzhiyun 		if (!test_bit(__E1000_DOWN, &adapter->flags))
3213*4882a593Smuzhiyun 			schedule_delayed_work(&adapter->fifo_stall_task, 1);
3214*4882a593Smuzhiyun 		return NETDEV_TX_BUSY;
3215*4882a593Smuzhiyun 	}
3216*4882a593Smuzhiyun 
3217*4882a593Smuzhiyun 	if (skb_vlan_tag_present(skb)) {
3218*4882a593Smuzhiyun 		tx_flags |= E1000_TX_FLAGS_VLAN;
3219*4882a593Smuzhiyun 		tx_flags |= (skb_vlan_tag_get(skb) <<
3220*4882a593Smuzhiyun 			     E1000_TX_FLAGS_VLAN_SHIFT);
3221*4882a593Smuzhiyun 	}
3222*4882a593Smuzhiyun 
3223*4882a593Smuzhiyun 	first = tx_ring->next_to_use;
3224*4882a593Smuzhiyun 
3225*4882a593Smuzhiyun 	tso = e1000_tso(adapter, tx_ring, skb, protocol);
3226*4882a593Smuzhiyun 	if (tso < 0) {
3227*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
3228*4882a593Smuzhiyun 		return NETDEV_TX_OK;
3229*4882a593Smuzhiyun 	}
3230*4882a593Smuzhiyun 
3231*4882a593Smuzhiyun 	if (likely(tso)) {
3232*4882a593Smuzhiyun 		if (likely(hw->mac_type != e1000_82544))
3233*4882a593Smuzhiyun 			tx_ring->last_tx_tso = true;
3234*4882a593Smuzhiyun 		tx_flags |= E1000_TX_FLAGS_TSO;
3235*4882a593Smuzhiyun 	} else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3236*4882a593Smuzhiyun 		tx_flags |= E1000_TX_FLAGS_CSUM;
3237*4882a593Smuzhiyun 
3238*4882a593Smuzhiyun 	if (protocol == htons(ETH_P_IP))
3239*4882a593Smuzhiyun 		tx_flags |= E1000_TX_FLAGS_IPV4;
3240*4882a593Smuzhiyun 
3241*4882a593Smuzhiyun 	if (unlikely(skb->no_fcs))
3242*4882a593Smuzhiyun 		tx_flags |= E1000_TX_FLAGS_NO_FCS;
3243*4882a593Smuzhiyun 
3244*4882a593Smuzhiyun 	count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3245*4882a593Smuzhiyun 			     nr_frags, mss);
3246*4882a593Smuzhiyun 
3247*4882a593Smuzhiyun 	if (count) {
3248*4882a593Smuzhiyun 		/* The descriptors needed is higher than other Intel drivers
3249*4882a593Smuzhiyun 		 * due to a number of workarounds.  The breakdown is below:
3250*4882a593Smuzhiyun 		 * Data descriptors: MAX_SKB_FRAGS + 1
3251*4882a593Smuzhiyun 		 * Context Descriptor: 1
3252*4882a593Smuzhiyun 		 * Keep head from touching tail: 2
3253*4882a593Smuzhiyun 		 * Workarounds: 3
3254*4882a593Smuzhiyun 		 */
3255*4882a593Smuzhiyun 		int desc_needed = MAX_SKB_FRAGS + 7;
3256*4882a593Smuzhiyun 
3257*4882a593Smuzhiyun 		netdev_sent_queue(netdev, skb->len);
3258*4882a593Smuzhiyun 		skb_tx_timestamp(skb);
3259*4882a593Smuzhiyun 
3260*4882a593Smuzhiyun 		e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3261*4882a593Smuzhiyun 
3262*4882a593Smuzhiyun 		/* 82544 potentially requires twice as many data descriptors
3263*4882a593Smuzhiyun 		 * in order to guarantee buffers don't end on evenly-aligned
3264*4882a593Smuzhiyun 		 * dwords
3265*4882a593Smuzhiyun 		 */
3266*4882a593Smuzhiyun 		if (adapter->pcix_82544)
3267*4882a593Smuzhiyun 			desc_needed += MAX_SKB_FRAGS + 1;
3268*4882a593Smuzhiyun 
3269*4882a593Smuzhiyun 		/* Make sure there is space in the ring for the next send. */
3270*4882a593Smuzhiyun 		e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3271*4882a593Smuzhiyun 
3272*4882a593Smuzhiyun 		if (!netdev_xmit_more() ||
3273*4882a593Smuzhiyun 		    netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3274*4882a593Smuzhiyun 			writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3275*4882a593Smuzhiyun 		}
3276*4882a593Smuzhiyun 	} else {
3277*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
3278*4882a593Smuzhiyun 		tx_ring->buffer_info[first].time_stamp = 0;
3279*4882a593Smuzhiyun 		tx_ring->next_to_use = first;
3280*4882a593Smuzhiyun 	}
3281*4882a593Smuzhiyun 
3282*4882a593Smuzhiyun 	return NETDEV_TX_OK;
3283*4882a593Smuzhiyun }
3284*4882a593Smuzhiyun 
3285*4882a593Smuzhiyun #define NUM_REGS 38 /* 1 based count */
e1000_regdump(struct e1000_adapter * adapter)3286*4882a593Smuzhiyun static void e1000_regdump(struct e1000_adapter *adapter)
3287*4882a593Smuzhiyun {
3288*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
3289*4882a593Smuzhiyun 	u32 regs[NUM_REGS];
3290*4882a593Smuzhiyun 	u32 *regs_buff = regs;
3291*4882a593Smuzhiyun 	int i = 0;
3292*4882a593Smuzhiyun 
3293*4882a593Smuzhiyun 	static const char * const reg_name[] = {
3294*4882a593Smuzhiyun 		"CTRL",  "STATUS",
3295*4882a593Smuzhiyun 		"RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3296*4882a593Smuzhiyun 		"TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3297*4882a593Smuzhiyun 		"TIDV", "TXDCTL", "TADV", "TARC0",
3298*4882a593Smuzhiyun 		"TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3299*4882a593Smuzhiyun 		"TXDCTL1", "TARC1",
3300*4882a593Smuzhiyun 		"CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3301*4882a593Smuzhiyun 		"TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3302*4882a593Smuzhiyun 		"RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3303*4882a593Smuzhiyun 	};
3304*4882a593Smuzhiyun 
3305*4882a593Smuzhiyun 	regs_buff[0]  = er32(CTRL);
3306*4882a593Smuzhiyun 	regs_buff[1]  = er32(STATUS);
3307*4882a593Smuzhiyun 
3308*4882a593Smuzhiyun 	regs_buff[2]  = er32(RCTL);
3309*4882a593Smuzhiyun 	regs_buff[3]  = er32(RDLEN);
3310*4882a593Smuzhiyun 	regs_buff[4]  = er32(RDH);
3311*4882a593Smuzhiyun 	regs_buff[5]  = er32(RDT);
3312*4882a593Smuzhiyun 	regs_buff[6]  = er32(RDTR);
3313*4882a593Smuzhiyun 
3314*4882a593Smuzhiyun 	regs_buff[7]  = er32(TCTL);
3315*4882a593Smuzhiyun 	regs_buff[8]  = er32(TDBAL);
3316*4882a593Smuzhiyun 	regs_buff[9]  = er32(TDBAH);
3317*4882a593Smuzhiyun 	regs_buff[10] = er32(TDLEN);
3318*4882a593Smuzhiyun 	regs_buff[11] = er32(TDH);
3319*4882a593Smuzhiyun 	regs_buff[12] = er32(TDT);
3320*4882a593Smuzhiyun 	regs_buff[13] = er32(TIDV);
3321*4882a593Smuzhiyun 	regs_buff[14] = er32(TXDCTL);
3322*4882a593Smuzhiyun 	regs_buff[15] = er32(TADV);
3323*4882a593Smuzhiyun 	regs_buff[16] = er32(TARC0);
3324*4882a593Smuzhiyun 
3325*4882a593Smuzhiyun 	regs_buff[17] = er32(TDBAL1);
3326*4882a593Smuzhiyun 	regs_buff[18] = er32(TDBAH1);
3327*4882a593Smuzhiyun 	regs_buff[19] = er32(TDLEN1);
3328*4882a593Smuzhiyun 	regs_buff[20] = er32(TDH1);
3329*4882a593Smuzhiyun 	regs_buff[21] = er32(TDT1);
3330*4882a593Smuzhiyun 	regs_buff[22] = er32(TXDCTL1);
3331*4882a593Smuzhiyun 	regs_buff[23] = er32(TARC1);
3332*4882a593Smuzhiyun 	regs_buff[24] = er32(CTRL_EXT);
3333*4882a593Smuzhiyun 	regs_buff[25] = er32(ERT);
3334*4882a593Smuzhiyun 	regs_buff[26] = er32(RDBAL0);
3335*4882a593Smuzhiyun 	regs_buff[27] = er32(RDBAH0);
3336*4882a593Smuzhiyun 	regs_buff[28] = er32(TDFH);
3337*4882a593Smuzhiyun 	regs_buff[29] = er32(TDFT);
3338*4882a593Smuzhiyun 	regs_buff[30] = er32(TDFHS);
3339*4882a593Smuzhiyun 	regs_buff[31] = er32(TDFTS);
3340*4882a593Smuzhiyun 	regs_buff[32] = er32(TDFPC);
3341*4882a593Smuzhiyun 	regs_buff[33] = er32(RDFH);
3342*4882a593Smuzhiyun 	regs_buff[34] = er32(RDFT);
3343*4882a593Smuzhiyun 	regs_buff[35] = er32(RDFHS);
3344*4882a593Smuzhiyun 	regs_buff[36] = er32(RDFTS);
3345*4882a593Smuzhiyun 	regs_buff[37] = er32(RDFPC);
3346*4882a593Smuzhiyun 
3347*4882a593Smuzhiyun 	pr_info("Register dump\n");
3348*4882a593Smuzhiyun 	for (i = 0; i < NUM_REGS; i++)
3349*4882a593Smuzhiyun 		pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
3350*4882a593Smuzhiyun }
3351*4882a593Smuzhiyun 
3352*4882a593Smuzhiyun /*
3353*4882a593Smuzhiyun  * e1000_dump: Print registers, tx ring and rx ring
3354*4882a593Smuzhiyun  */
e1000_dump(struct e1000_adapter * adapter)3355*4882a593Smuzhiyun static void e1000_dump(struct e1000_adapter *adapter)
3356*4882a593Smuzhiyun {
3357*4882a593Smuzhiyun 	/* this code doesn't handle multiple rings */
3358*4882a593Smuzhiyun 	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3359*4882a593Smuzhiyun 	struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3360*4882a593Smuzhiyun 	int i;
3361*4882a593Smuzhiyun 
3362*4882a593Smuzhiyun 	if (!netif_msg_hw(adapter))
3363*4882a593Smuzhiyun 		return;
3364*4882a593Smuzhiyun 
3365*4882a593Smuzhiyun 	/* Print Registers */
3366*4882a593Smuzhiyun 	e1000_regdump(adapter);
3367*4882a593Smuzhiyun 
3368*4882a593Smuzhiyun 	/* transmit dump */
3369*4882a593Smuzhiyun 	pr_info("TX Desc ring0 dump\n");
3370*4882a593Smuzhiyun 
3371*4882a593Smuzhiyun 	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3372*4882a593Smuzhiyun 	 *
3373*4882a593Smuzhiyun 	 * Legacy Transmit Descriptor
3374*4882a593Smuzhiyun 	 *   +--------------------------------------------------------------+
3375*4882a593Smuzhiyun 	 * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
3376*4882a593Smuzhiyun 	 *   +--------------------------------------------------------------+
3377*4882a593Smuzhiyun 	 * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
3378*4882a593Smuzhiyun 	 *   +--------------------------------------------------------------+
3379*4882a593Smuzhiyun 	 *   63       48 47        36 35    32 31     24 23    16 15        0
3380*4882a593Smuzhiyun 	 *
3381*4882a593Smuzhiyun 	 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3382*4882a593Smuzhiyun 	 *   63      48 47    40 39       32 31             16 15    8 7      0
3383*4882a593Smuzhiyun 	 *   +----------------------------------------------------------------+
3384*4882a593Smuzhiyun 	 * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
3385*4882a593Smuzhiyun 	 *   +----------------------------------------------------------------+
3386*4882a593Smuzhiyun 	 * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
3387*4882a593Smuzhiyun 	 *   +----------------------------------------------------------------+
3388*4882a593Smuzhiyun 	 *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
3389*4882a593Smuzhiyun 	 *
3390*4882a593Smuzhiyun 	 * Extended Data Descriptor (DTYP=0x1)
3391*4882a593Smuzhiyun 	 *   +----------------------------------------------------------------+
3392*4882a593Smuzhiyun 	 * 0 |                     Buffer Address [63:0]                      |
3393*4882a593Smuzhiyun 	 *   +----------------------------------------------------------------+
3394*4882a593Smuzhiyun 	 * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
3395*4882a593Smuzhiyun 	 *   +----------------------------------------------------------------+
3396*4882a593Smuzhiyun 	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
3397*4882a593Smuzhiyun 	 */
3398*4882a593Smuzhiyun 	pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3399*4882a593Smuzhiyun 	pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3400*4882a593Smuzhiyun 
3401*4882a593Smuzhiyun 	if (!netif_msg_tx_done(adapter))
3402*4882a593Smuzhiyun 		goto rx_ring_summary;
3403*4882a593Smuzhiyun 
3404*4882a593Smuzhiyun 	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3405*4882a593Smuzhiyun 		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3406*4882a593Smuzhiyun 		struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
3407*4882a593Smuzhiyun 		struct my_u { __le64 a; __le64 b; };
3408*4882a593Smuzhiyun 		struct my_u *u = (struct my_u *)tx_desc;
3409*4882a593Smuzhiyun 		const char *type;
3410*4882a593Smuzhiyun 
3411*4882a593Smuzhiyun 		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3412*4882a593Smuzhiyun 			type = "NTC/U";
3413*4882a593Smuzhiyun 		else if (i == tx_ring->next_to_use)
3414*4882a593Smuzhiyun 			type = "NTU";
3415*4882a593Smuzhiyun 		else if (i == tx_ring->next_to_clean)
3416*4882a593Smuzhiyun 			type = "NTC";
3417*4882a593Smuzhiyun 		else
3418*4882a593Smuzhiyun 			type = "";
3419*4882a593Smuzhiyun 
3420*4882a593Smuzhiyun 		pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
3421*4882a593Smuzhiyun 			((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3422*4882a593Smuzhiyun 			le64_to_cpu(u->a), le64_to_cpu(u->b),
3423*4882a593Smuzhiyun 			(u64)buffer_info->dma, buffer_info->length,
3424*4882a593Smuzhiyun 			buffer_info->next_to_watch,
3425*4882a593Smuzhiyun 			(u64)buffer_info->time_stamp, buffer_info->skb, type);
3426*4882a593Smuzhiyun 	}
3427*4882a593Smuzhiyun 
3428*4882a593Smuzhiyun rx_ring_summary:
3429*4882a593Smuzhiyun 	/* receive dump */
3430*4882a593Smuzhiyun 	pr_info("\nRX Desc ring dump\n");
3431*4882a593Smuzhiyun 
3432*4882a593Smuzhiyun 	/* Legacy Receive Descriptor Format
3433*4882a593Smuzhiyun 	 *
3434*4882a593Smuzhiyun 	 * +-----------------------------------------------------+
3435*4882a593Smuzhiyun 	 * |                Buffer Address [63:0]                |
3436*4882a593Smuzhiyun 	 * +-----------------------------------------------------+
3437*4882a593Smuzhiyun 	 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3438*4882a593Smuzhiyun 	 * +-----------------------------------------------------+
3439*4882a593Smuzhiyun 	 * 63       48 47    40 39      32 31         16 15      0
3440*4882a593Smuzhiyun 	 */
3441*4882a593Smuzhiyun 	pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
3442*4882a593Smuzhiyun 
3443*4882a593Smuzhiyun 	if (!netif_msg_rx_status(adapter))
3444*4882a593Smuzhiyun 		goto exit;
3445*4882a593Smuzhiyun 
3446*4882a593Smuzhiyun 	for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3447*4882a593Smuzhiyun 		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3448*4882a593Smuzhiyun 		struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
3449*4882a593Smuzhiyun 		struct my_u { __le64 a; __le64 b; };
3450*4882a593Smuzhiyun 		struct my_u *u = (struct my_u *)rx_desc;
3451*4882a593Smuzhiyun 		const char *type;
3452*4882a593Smuzhiyun 
3453*4882a593Smuzhiyun 		if (i == rx_ring->next_to_use)
3454*4882a593Smuzhiyun 			type = "NTU";
3455*4882a593Smuzhiyun 		else if (i == rx_ring->next_to_clean)
3456*4882a593Smuzhiyun 			type = "NTC";
3457*4882a593Smuzhiyun 		else
3458*4882a593Smuzhiyun 			type = "";
3459*4882a593Smuzhiyun 
3460*4882a593Smuzhiyun 		pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
3461*4882a593Smuzhiyun 			i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3462*4882a593Smuzhiyun 			(u64)buffer_info->dma, buffer_info->rxbuf.data, type);
3463*4882a593Smuzhiyun 	} /* for */
3464*4882a593Smuzhiyun 
3465*4882a593Smuzhiyun 	/* dump the descriptor caches */
3466*4882a593Smuzhiyun 	/* rx */
3467*4882a593Smuzhiyun 	pr_info("Rx descriptor cache in 64bit format\n");
3468*4882a593Smuzhiyun 	for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3469*4882a593Smuzhiyun 		pr_info("R%04X: %08X|%08X %08X|%08X\n",
3470*4882a593Smuzhiyun 			i,
3471*4882a593Smuzhiyun 			readl(adapter->hw.hw_addr + i+4),
3472*4882a593Smuzhiyun 			readl(adapter->hw.hw_addr + i),
3473*4882a593Smuzhiyun 			readl(adapter->hw.hw_addr + i+12),
3474*4882a593Smuzhiyun 			readl(adapter->hw.hw_addr + i+8));
3475*4882a593Smuzhiyun 	}
3476*4882a593Smuzhiyun 	/* tx */
3477*4882a593Smuzhiyun 	pr_info("Tx descriptor cache in 64bit format\n");
3478*4882a593Smuzhiyun 	for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3479*4882a593Smuzhiyun 		pr_info("T%04X: %08X|%08X %08X|%08X\n",
3480*4882a593Smuzhiyun 			i,
3481*4882a593Smuzhiyun 			readl(adapter->hw.hw_addr + i+4),
3482*4882a593Smuzhiyun 			readl(adapter->hw.hw_addr + i),
3483*4882a593Smuzhiyun 			readl(adapter->hw.hw_addr + i+12),
3484*4882a593Smuzhiyun 			readl(adapter->hw.hw_addr + i+8));
3485*4882a593Smuzhiyun 	}
3486*4882a593Smuzhiyun exit:
3487*4882a593Smuzhiyun 	return;
3488*4882a593Smuzhiyun }
3489*4882a593Smuzhiyun 
3490*4882a593Smuzhiyun /**
3491*4882a593Smuzhiyun  * e1000_tx_timeout - Respond to a Tx Hang
3492*4882a593Smuzhiyun  * @netdev: network interface device structure
3493*4882a593Smuzhiyun  * @txqueue: number of the Tx queue that hung (unused)
3494*4882a593Smuzhiyun  **/
e1000_tx_timeout(struct net_device * netdev,unsigned int __always_unused txqueue)3495*4882a593Smuzhiyun static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
3496*4882a593Smuzhiyun {
3497*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
3498*4882a593Smuzhiyun 
3499*4882a593Smuzhiyun 	/* Do the reset outside of interrupt context */
3500*4882a593Smuzhiyun 	adapter->tx_timeout_count++;
3501*4882a593Smuzhiyun 	schedule_work(&adapter->reset_task);
3502*4882a593Smuzhiyun }
3503*4882a593Smuzhiyun 
e1000_reset_task(struct work_struct * work)3504*4882a593Smuzhiyun static void e1000_reset_task(struct work_struct *work)
3505*4882a593Smuzhiyun {
3506*4882a593Smuzhiyun 	struct e1000_adapter *adapter =
3507*4882a593Smuzhiyun 		container_of(work, struct e1000_adapter, reset_task);
3508*4882a593Smuzhiyun 
3509*4882a593Smuzhiyun 	e_err(drv, "Reset adapter\n");
3510*4882a593Smuzhiyun 	e1000_reinit_locked(adapter);
3511*4882a593Smuzhiyun }
3512*4882a593Smuzhiyun 
3513*4882a593Smuzhiyun /**
3514*4882a593Smuzhiyun  * e1000_change_mtu - Change the Maximum Transfer Unit
3515*4882a593Smuzhiyun  * @netdev: network interface device structure
3516*4882a593Smuzhiyun  * @new_mtu: new value for maximum frame size
3517*4882a593Smuzhiyun  *
3518*4882a593Smuzhiyun  * Returns 0 on success, negative on failure
3519*4882a593Smuzhiyun  **/
e1000_change_mtu(struct net_device * netdev,int new_mtu)3520*4882a593Smuzhiyun static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3521*4882a593Smuzhiyun {
3522*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
3523*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
3524*4882a593Smuzhiyun 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3525*4882a593Smuzhiyun 
3526*4882a593Smuzhiyun 	/* Adapter-specific max frame size limits. */
3527*4882a593Smuzhiyun 	switch (hw->mac_type) {
3528*4882a593Smuzhiyun 	case e1000_undefined ... e1000_82542_rev2_1:
3529*4882a593Smuzhiyun 		if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3530*4882a593Smuzhiyun 			e_err(probe, "Jumbo Frames not supported.\n");
3531*4882a593Smuzhiyun 			return -EINVAL;
3532*4882a593Smuzhiyun 		}
3533*4882a593Smuzhiyun 		break;
3534*4882a593Smuzhiyun 	default:
3535*4882a593Smuzhiyun 		/* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3536*4882a593Smuzhiyun 		break;
3537*4882a593Smuzhiyun 	}
3538*4882a593Smuzhiyun 
3539*4882a593Smuzhiyun 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3540*4882a593Smuzhiyun 		msleep(1);
3541*4882a593Smuzhiyun 	/* e1000_down has a dependency on max_frame_size */
3542*4882a593Smuzhiyun 	hw->max_frame_size = max_frame;
3543*4882a593Smuzhiyun 	if (netif_running(netdev)) {
3544*4882a593Smuzhiyun 		/* prevent buffers from being reallocated */
3545*4882a593Smuzhiyun 		adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
3546*4882a593Smuzhiyun 		e1000_down(adapter);
3547*4882a593Smuzhiyun 	}
3548*4882a593Smuzhiyun 
3549*4882a593Smuzhiyun 	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3550*4882a593Smuzhiyun 	 * means we reserve 2 more, this pushes us to allocate from the next
3551*4882a593Smuzhiyun 	 * larger slab size.
3552*4882a593Smuzhiyun 	 * i.e. RXBUFFER_2048 --> size-4096 slab
3553*4882a593Smuzhiyun 	 * however with the new *_jumbo_rx* routines, jumbo receives will use
3554*4882a593Smuzhiyun 	 * fragmented skbs
3555*4882a593Smuzhiyun 	 */
3556*4882a593Smuzhiyun 
3557*4882a593Smuzhiyun 	if (max_frame <= E1000_RXBUFFER_2048)
3558*4882a593Smuzhiyun 		adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3559*4882a593Smuzhiyun 	else
3560*4882a593Smuzhiyun #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3561*4882a593Smuzhiyun 		adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3562*4882a593Smuzhiyun #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3563*4882a593Smuzhiyun 		adapter->rx_buffer_len = PAGE_SIZE;
3564*4882a593Smuzhiyun #endif
3565*4882a593Smuzhiyun 
3566*4882a593Smuzhiyun 	/* adjust allocation if LPE protects us, and we aren't using SBP */
3567*4882a593Smuzhiyun 	if (!hw->tbi_compatibility_on &&
3568*4882a593Smuzhiyun 	    ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3569*4882a593Smuzhiyun 	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3570*4882a593Smuzhiyun 		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3571*4882a593Smuzhiyun 
3572*4882a593Smuzhiyun 	netdev_dbg(netdev, "changing MTU from %d to %d\n",
3573*4882a593Smuzhiyun 		   netdev->mtu, new_mtu);
3574*4882a593Smuzhiyun 	netdev->mtu = new_mtu;
3575*4882a593Smuzhiyun 
3576*4882a593Smuzhiyun 	if (netif_running(netdev))
3577*4882a593Smuzhiyun 		e1000_up(adapter);
3578*4882a593Smuzhiyun 	else
3579*4882a593Smuzhiyun 		e1000_reset(adapter);
3580*4882a593Smuzhiyun 
3581*4882a593Smuzhiyun 	clear_bit(__E1000_RESETTING, &adapter->flags);
3582*4882a593Smuzhiyun 
3583*4882a593Smuzhiyun 	return 0;
3584*4882a593Smuzhiyun }
3585*4882a593Smuzhiyun 
3586*4882a593Smuzhiyun /**
3587*4882a593Smuzhiyun  * e1000_update_stats - Update the board statistics counters
3588*4882a593Smuzhiyun  * @adapter: board private structure
3589*4882a593Smuzhiyun  **/
e1000_update_stats(struct e1000_adapter * adapter)3590*4882a593Smuzhiyun void e1000_update_stats(struct e1000_adapter *adapter)
3591*4882a593Smuzhiyun {
3592*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
3593*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
3594*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
3595*4882a593Smuzhiyun 	unsigned long flags;
3596*4882a593Smuzhiyun 	u16 phy_tmp;
3597*4882a593Smuzhiyun 
3598*4882a593Smuzhiyun #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3599*4882a593Smuzhiyun 
3600*4882a593Smuzhiyun 	/* Prevent stats update while adapter is being reset, or if the pci
3601*4882a593Smuzhiyun 	 * connection is down.
3602*4882a593Smuzhiyun 	 */
3603*4882a593Smuzhiyun 	if (adapter->link_speed == 0)
3604*4882a593Smuzhiyun 		return;
3605*4882a593Smuzhiyun 	if (pci_channel_offline(pdev))
3606*4882a593Smuzhiyun 		return;
3607*4882a593Smuzhiyun 
3608*4882a593Smuzhiyun 	spin_lock_irqsave(&adapter->stats_lock, flags);
3609*4882a593Smuzhiyun 
3610*4882a593Smuzhiyun 	/* these counters are modified from e1000_tbi_adjust_stats,
3611*4882a593Smuzhiyun 	 * called from the interrupt context, so they must only
3612*4882a593Smuzhiyun 	 * be written while holding adapter->stats_lock
3613*4882a593Smuzhiyun 	 */
3614*4882a593Smuzhiyun 
3615*4882a593Smuzhiyun 	adapter->stats.crcerrs += er32(CRCERRS);
3616*4882a593Smuzhiyun 	adapter->stats.gprc += er32(GPRC);
3617*4882a593Smuzhiyun 	adapter->stats.gorcl += er32(GORCL);
3618*4882a593Smuzhiyun 	adapter->stats.gorch += er32(GORCH);
3619*4882a593Smuzhiyun 	adapter->stats.bprc += er32(BPRC);
3620*4882a593Smuzhiyun 	adapter->stats.mprc += er32(MPRC);
3621*4882a593Smuzhiyun 	adapter->stats.roc += er32(ROC);
3622*4882a593Smuzhiyun 
3623*4882a593Smuzhiyun 	adapter->stats.prc64 += er32(PRC64);
3624*4882a593Smuzhiyun 	adapter->stats.prc127 += er32(PRC127);
3625*4882a593Smuzhiyun 	adapter->stats.prc255 += er32(PRC255);
3626*4882a593Smuzhiyun 	adapter->stats.prc511 += er32(PRC511);
3627*4882a593Smuzhiyun 	adapter->stats.prc1023 += er32(PRC1023);
3628*4882a593Smuzhiyun 	adapter->stats.prc1522 += er32(PRC1522);
3629*4882a593Smuzhiyun 
3630*4882a593Smuzhiyun 	adapter->stats.symerrs += er32(SYMERRS);
3631*4882a593Smuzhiyun 	adapter->stats.mpc += er32(MPC);
3632*4882a593Smuzhiyun 	adapter->stats.scc += er32(SCC);
3633*4882a593Smuzhiyun 	adapter->stats.ecol += er32(ECOL);
3634*4882a593Smuzhiyun 	adapter->stats.mcc += er32(MCC);
3635*4882a593Smuzhiyun 	adapter->stats.latecol += er32(LATECOL);
3636*4882a593Smuzhiyun 	adapter->stats.dc += er32(DC);
3637*4882a593Smuzhiyun 	adapter->stats.sec += er32(SEC);
3638*4882a593Smuzhiyun 	adapter->stats.rlec += er32(RLEC);
3639*4882a593Smuzhiyun 	adapter->stats.xonrxc += er32(XONRXC);
3640*4882a593Smuzhiyun 	adapter->stats.xontxc += er32(XONTXC);
3641*4882a593Smuzhiyun 	adapter->stats.xoffrxc += er32(XOFFRXC);
3642*4882a593Smuzhiyun 	adapter->stats.xofftxc += er32(XOFFTXC);
3643*4882a593Smuzhiyun 	adapter->stats.fcruc += er32(FCRUC);
3644*4882a593Smuzhiyun 	adapter->stats.gptc += er32(GPTC);
3645*4882a593Smuzhiyun 	adapter->stats.gotcl += er32(GOTCL);
3646*4882a593Smuzhiyun 	adapter->stats.gotch += er32(GOTCH);
3647*4882a593Smuzhiyun 	adapter->stats.rnbc += er32(RNBC);
3648*4882a593Smuzhiyun 	adapter->stats.ruc += er32(RUC);
3649*4882a593Smuzhiyun 	adapter->stats.rfc += er32(RFC);
3650*4882a593Smuzhiyun 	adapter->stats.rjc += er32(RJC);
3651*4882a593Smuzhiyun 	adapter->stats.torl += er32(TORL);
3652*4882a593Smuzhiyun 	adapter->stats.torh += er32(TORH);
3653*4882a593Smuzhiyun 	adapter->stats.totl += er32(TOTL);
3654*4882a593Smuzhiyun 	adapter->stats.toth += er32(TOTH);
3655*4882a593Smuzhiyun 	adapter->stats.tpr += er32(TPR);
3656*4882a593Smuzhiyun 
3657*4882a593Smuzhiyun 	adapter->stats.ptc64 += er32(PTC64);
3658*4882a593Smuzhiyun 	adapter->stats.ptc127 += er32(PTC127);
3659*4882a593Smuzhiyun 	adapter->stats.ptc255 += er32(PTC255);
3660*4882a593Smuzhiyun 	adapter->stats.ptc511 += er32(PTC511);
3661*4882a593Smuzhiyun 	adapter->stats.ptc1023 += er32(PTC1023);
3662*4882a593Smuzhiyun 	adapter->stats.ptc1522 += er32(PTC1522);
3663*4882a593Smuzhiyun 
3664*4882a593Smuzhiyun 	adapter->stats.mptc += er32(MPTC);
3665*4882a593Smuzhiyun 	adapter->stats.bptc += er32(BPTC);
3666*4882a593Smuzhiyun 
3667*4882a593Smuzhiyun 	/* used for adaptive IFS */
3668*4882a593Smuzhiyun 
3669*4882a593Smuzhiyun 	hw->tx_packet_delta = er32(TPT);
3670*4882a593Smuzhiyun 	adapter->stats.tpt += hw->tx_packet_delta;
3671*4882a593Smuzhiyun 	hw->collision_delta = er32(COLC);
3672*4882a593Smuzhiyun 	adapter->stats.colc += hw->collision_delta;
3673*4882a593Smuzhiyun 
3674*4882a593Smuzhiyun 	if (hw->mac_type >= e1000_82543) {
3675*4882a593Smuzhiyun 		adapter->stats.algnerrc += er32(ALGNERRC);
3676*4882a593Smuzhiyun 		adapter->stats.rxerrc += er32(RXERRC);
3677*4882a593Smuzhiyun 		adapter->stats.tncrs += er32(TNCRS);
3678*4882a593Smuzhiyun 		adapter->stats.cexterr += er32(CEXTERR);
3679*4882a593Smuzhiyun 		adapter->stats.tsctc += er32(TSCTC);
3680*4882a593Smuzhiyun 		adapter->stats.tsctfc += er32(TSCTFC);
3681*4882a593Smuzhiyun 	}
3682*4882a593Smuzhiyun 
3683*4882a593Smuzhiyun 	/* Fill out the OS statistics structure */
3684*4882a593Smuzhiyun 	netdev->stats.multicast = adapter->stats.mprc;
3685*4882a593Smuzhiyun 	netdev->stats.collisions = adapter->stats.colc;
3686*4882a593Smuzhiyun 
3687*4882a593Smuzhiyun 	/* Rx Errors */
3688*4882a593Smuzhiyun 
3689*4882a593Smuzhiyun 	/* RLEC on some newer hardware can be incorrect so build
3690*4882a593Smuzhiyun 	 * our own version based on RUC and ROC
3691*4882a593Smuzhiyun 	 */
3692*4882a593Smuzhiyun 	netdev->stats.rx_errors = adapter->stats.rxerrc +
3693*4882a593Smuzhiyun 		adapter->stats.crcerrs + adapter->stats.algnerrc +
3694*4882a593Smuzhiyun 		adapter->stats.ruc + adapter->stats.roc +
3695*4882a593Smuzhiyun 		adapter->stats.cexterr;
3696*4882a593Smuzhiyun 	adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3697*4882a593Smuzhiyun 	netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3698*4882a593Smuzhiyun 	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3699*4882a593Smuzhiyun 	netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3700*4882a593Smuzhiyun 	netdev->stats.rx_missed_errors = adapter->stats.mpc;
3701*4882a593Smuzhiyun 
3702*4882a593Smuzhiyun 	/* Tx Errors */
3703*4882a593Smuzhiyun 	adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3704*4882a593Smuzhiyun 	netdev->stats.tx_errors = adapter->stats.txerrc;
3705*4882a593Smuzhiyun 	netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3706*4882a593Smuzhiyun 	netdev->stats.tx_window_errors = adapter->stats.latecol;
3707*4882a593Smuzhiyun 	netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3708*4882a593Smuzhiyun 	if (hw->bad_tx_carr_stats_fd &&
3709*4882a593Smuzhiyun 	    adapter->link_duplex == FULL_DUPLEX) {
3710*4882a593Smuzhiyun 		netdev->stats.tx_carrier_errors = 0;
3711*4882a593Smuzhiyun 		adapter->stats.tncrs = 0;
3712*4882a593Smuzhiyun 	}
3713*4882a593Smuzhiyun 
3714*4882a593Smuzhiyun 	/* Tx Dropped needs to be maintained elsewhere */
3715*4882a593Smuzhiyun 
3716*4882a593Smuzhiyun 	/* Phy Stats */
3717*4882a593Smuzhiyun 	if (hw->media_type == e1000_media_type_copper) {
3718*4882a593Smuzhiyun 		if ((adapter->link_speed == SPEED_1000) &&
3719*4882a593Smuzhiyun 		   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3720*4882a593Smuzhiyun 			phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3721*4882a593Smuzhiyun 			adapter->phy_stats.idle_errors += phy_tmp;
3722*4882a593Smuzhiyun 		}
3723*4882a593Smuzhiyun 
3724*4882a593Smuzhiyun 		if ((hw->mac_type <= e1000_82546) &&
3725*4882a593Smuzhiyun 		   (hw->phy_type == e1000_phy_m88) &&
3726*4882a593Smuzhiyun 		   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3727*4882a593Smuzhiyun 			adapter->phy_stats.receive_errors += phy_tmp;
3728*4882a593Smuzhiyun 	}
3729*4882a593Smuzhiyun 
3730*4882a593Smuzhiyun 	/* Management Stats */
3731*4882a593Smuzhiyun 	if (hw->has_smbus) {
3732*4882a593Smuzhiyun 		adapter->stats.mgptc += er32(MGTPTC);
3733*4882a593Smuzhiyun 		adapter->stats.mgprc += er32(MGTPRC);
3734*4882a593Smuzhiyun 		adapter->stats.mgpdc += er32(MGTPDC);
3735*4882a593Smuzhiyun 	}
3736*4882a593Smuzhiyun 
3737*4882a593Smuzhiyun 	spin_unlock_irqrestore(&adapter->stats_lock, flags);
3738*4882a593Smuzhiyun }
3739*4882a593Smuzhiyun 
3740*4882a593Smuzhiyun /**
3741*4882a593Smuzhiyun  * e1000_intr - Interrupt Handler
3742*4882a593Smuzhiyun  * @irq: interrupt number
3743*4882a593Smuzhiyun  * @data: pointer to a network interface device structure
3744*4882a593Smuzhiyun  **/
e1000_intr(int irq,void * data)3745*4882a593Smuzhiyun static irqreturn_t e1000_intr(int irq, void *data)
3746*4882a593Smuzhiyun {
3747*4882a593Smuzhiyun 	struct net_device *netdev = data;
3748*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
3749*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
3750*4882a593Smuzhiyun 	u32 icr = er32(ICR);
3751*4882a593Smuzhiyun 
3752*4882a593Smuzhiyun 	if (unlikely((!icr)))
3753*4882a593Smuzhiyun 		return IRQ_NONE;  /* Not our interrupt */
3754*4882a593Smuzhiyun 
3755*4882a593Smuzhiyun 	/* we might have caused the interrupt, but the above
3756*4882a593Smuzhiyun 	 * read cleared it, and just in case the driver is
3757*4882a593Smuzhiyun 	 * down there is nothing to do so return handled
3758*4882a593Smuzhiyun 	 */
3759*4882a593Smuzhiyun 	if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3760*4882a593Smuzhiyun 		return IRQ_HANDLED;
3761*4882a593Smuzhiyun 
3762*4882a593Smuzhiyun 	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3763*4882a593Smuzhiyun 		hw->get_link_status = 1;
3764*4882a593Smuzhiyun 		/* guard against interrupt when we're going down */
3765*4882a593Smuzhiyun 		if (!test_bit(__E1000_DOWN, &adapter->flags))
3766*4882a593Smuzhiyun 			schedule_delayed_work(&adapter->watchdog_task, 1);
3767*4882a593Smuzhiyun 	}
3768*4882a593Smuzhiyun 
3769*4882a593Smuzhiyun 	/* disable interrupts, without the synchronize_irq bit */
3770*4882a593Smuzhiyun 	ew32(IMC, ~0);
3771*4882a593Smuzhiyun 	E1000_WRITE_FLUSH();
3772*4882a593Smuzhiyun 
3773*4882a593Smuzhiyun 	if (likely(napi_schedule_prep(&adapter->napi))) {
3774*4882a593Smuzhiyun 		adapter->total_tx_bytes = 0;
3775*4882a593Smuzhiyun 		adapter->total_tx_packets = 0;
3776*4882a593Smuzhiyun 		adapter->total_rx_bytes = 0;
3777*4882a593Smuzhiyun 		adapter->total_rx_packets = 0;
3778*4882a593Smuzhiyun 		__napi_schedule(&adapter->napi);
3779*4882a593Smuzhiyun 	} else {
3780*4882a593Smuzhiyun 		/* this really should not happen! if it does it is basically a
3781*4882a593Smuzhiyun 		 * bug, but not a hard error, so enable ints and continue
3782*4882a593Smuzhiyun 		 */
3783*4882a593Smuzhiyun 		if (!test_bit(__E1000_DOWN, &adapter->flags))
3784*4882a593Smuzhiyun 			e1000_irq_enable(adapter);
3785*4882a593Smuzhiyun 	}
3786*4882a593Smuzhiyun 
3787*4882a593Smuzhiyun 	return IRQ_HANDLED;
3788*4882a593Smuzhiyun }
3789*4882a593Smuzhiyun 
3790*4882a593Smuzhiyun /**
3791*4882a593Smuzhiyun  * e1000_clean - NAPI Rx polling callback
3792*4882a593Smuzhiyun  * @napi: napi struct containing references to driver info
3793*4882a593Smuzhiyun  * @budget: budget given to driver for receive packets
3794*4882a593Smuzhiyun  **/
e1000_clean(struct napi_struct * napi,int budget)3795*4882a593Smuzhiyun static int e1000_clean(struct napi_struct *napi, int budget)
3796*4882a593Smuzhiyun {
3797*4882a593Smuzhiyun 	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3798*4882a593Smuzhiyun 						     napi);
3799*4882a593Smuzhiyun 	int tx_clean_complete = 0, work_done = 0;
3800*4882a593Smuzhiyun 
3801*4882a593Smuzhiyun 	tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3802*4882a593Smuzhiyun 
3803*4882a593Smuzhiyun 	adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3804*4882a593Smuzhiyun 
3805*4882a593Smuzhiyun 	if (!tx_clean_complete || work_done == budget)
3806*4882a593Smuzhiyun 		return budget;
3807*4882a593Smuzhiyun 
3808*4882a593Smuzhiyun 	/* Exit the polling mode, but don't re-enable interrupts if stack might
3809*4882a593Smuzhiyun 	 * poll us due to busy-polling
3810*4882a593Smuzhiyun 	 */
3811*4882a593Smuzhiyun 	if (likely(napi_complete_done(napi, work_done))) {
3812*4882a593Smuzhiyun 		if (likely(adapter->itr_setting & 3))
3813*4882a593Smuzhiyun 			e1000_set_itr(adapter);
3814*4882a593Smuzhiyun 		if (!test_bit(__E1000_DOWN, &adapter->flags))
3815*4882a593Smuzhiyun 			e1000_irq_enable(adapter);
3816*4882a593Smuzhiyun 	}
3817*4882a593Smuzhiyun 
3818*4882a593Smuzhiyun 	return work_done;
3819*4882a593Smuzhiyun }
3820*4882a593Smuzhiyun 
3821*4882a593Smuzhiyun /**
3822*4882a593Smuzhiyun  * e1000_clean_tx_irq - Reclaim resources after transmit completes
3823*4882a593Smuzhiyun  * @adapter: board private structure
3824*4882a593Smuzhiyun  * @tx_ring: ring to clean
3825*4882a593Smuzhiyun  **/
e1000_clean_tx_irq(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring)3826*4882a593Smuzhiyun static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3827*4882a593Smuzhiyun 			       struct e1000_tx_ring *tx_ring)
3828*4882a593Smuzhiyun {
3829*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
3830*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
3831*4882a593Smuzhiyun 	struct e1000_tx_desc *tx_desc, *eop_desc;
3832*4882a593Smuzhiyun 	struct e1000_tx_buffer *buffer_info;
3833*4882a593Smuzhiyun 	unsigned int i, eop;
3834*4882a593Smuzhiyun 	unsigned int count = 0;
3835*4882a593Smuzhiyun 	unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3836*4882a593Smuzhiyun 	unsigned int bytes_compl = 0, pkts_compl = 0;
3837*4882a593Smuzhiyun 
3838*4882a593Smuzhiyun 	i = tx_ring->next_to_clean;
3839*4882a593Smuzhiyun 	eop = tx_ring->buffer_info[i].next_to_watch;
3840*4882a593Smuzhiyun 	eop_desc = E1000_TX_DESC(*tx_ring, eop);
3841*4882a593Smuzhiyun 
3842*4882a593Smuzhiyun 	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3843*4882a593Smuzhiyun 	       (count < tx_ring->count)) {
3844*4882a593Smuzhiyun 		bool cleaned = false;
3845*4882a593Smuzhiyun 		dma_rmb();	/* read buffer_info after eop_desc */
3846*4882a593Smuzhiyun 		for ( ; !cleaned; count++) {
3847*4882a593Smuzhiyun 			tx_desc = E1000_TX_DESC(*tx_ring, i);
3848*4882a593Smuzhiyun 			buffer_info = &tx_ring->buffer_info[i];
3849*4882a593Smuzhiyun 			cleaned = (i == eop);
3850*4882a593Smuzhiyun 
3851*4882a593Smuzhiyun 			if (cleaned) {
3852*4882a593Smuzhiyun 				total_tx_packets += buffer_info->segs;
3853*4882a593Smuzhiyun 				total_tx_bytes += buffer_info->bytecount;
3854*4882a593Smuzhiyun 				if (buffer_info->skb) {
3855*4882a593Smuzhiyun 					bytes_compl += buffer_info->skb->len;
3856*4882a593Smuzhiyun 					pkts_compl++;
3857*4882a593Smuzhiyun 				}
3858*4882a593Smuzhiyun 
3859*4882a593Smuzhiyun 			}
3860*4882a593Smuzhiyun 			e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3861*4882a593Smuzhiyun 			tx_desc->upper.data = 0;
3862*4882a593Smuzhiyun 
3863*4882a593Smuzhiyun 			if (unlikely(++i == tx_ring->count))
3864*4882a593Smuzhiyun 				i = 0;
3865*4882a593Smuzhiyun 		}
3866*4882a593Smuzhiyun 
3867*4882a593Smuzhiyun 		eop = tx_ring->buffer_info[i].next_to_watch;
3868*4882a593Smuzhiyun 		eop_desc = E1000_TX_DESC(*tx_ring, eop);
3869*4882a593Smuzhiyun 	}
3870*4882a593Smuzhiyun 
3871*4882a593Smuzhiyun 	/* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
3872*4882a593Smuzhiyun 	 * which will reuse the cleaned buffers.
3873*4882a593Smuzhiyun 	 */
3874*4882a593Smuzhiyun 	smp_store_release(&tx_ring->next_to_clean, i);
3875*4882a593Smuzhiyun 
3876*4882a593Smuzhiyun 	netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3877*4882a593Smuzhiyun 
3878*4882a593Smuzhiyun #define TX_WAKE_THRESHOLD 32
3879*4882a593Smuzhiyun 	if (unlikely(count && netif_carrier_ok(netdev) &&
3880*4882a593Smuzhiyun 		     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3881*4882a593Smuzhiyun 		/* Make sure that anybody stopping the queue after this
3882*4882a593Smuzhiyun 		 * sees the new next_to_clean.
3883*4882a593Smuzhiyun 		 */
3884*4882a593Smuzhiyun 		smp_mb();
3885*4882a593Smuzhiyun 
3886*4882a593Smuzhiyun 		if (netif_queue_stopped(netdev) &&
3887*4882a593Smuzhiyun 		    !(test_bit(__E1000_DOWN, &adapter->flags))) {
3888*4882a593Smuzhiyun 			netif_wake_queue(netdev);
3889*4882a593Smuzhiyun 			++adapter->restart_queue;
3890*4882a593Smuzhiyun 		}
3891*4882a593Smuzhiyun 	}
3892*4882a593Smuzhiyun 
3893*4882a593Smuzhiyun 	if (adapter->detect_tx_hung) {
3894*4882a593Smuzhiyun 		/* Detect a transmit hang in hardware, this serializes the
3895*4882a593Smuzhiyun 		 * check with the clearing of time_stamp and movement of i
3896*4882a593Smuzhiyun 		 */
3897*4882a593Smuzhiyun 		adapter->detect_tx_hung = false;
3898*4882a593Smuzhiyun 		if (tx_ring->buffer_info[eop].time_stamp &&
3899*4882a593Smuzhiyun 		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3900*4882a593Smuzhiyun 			       (adapter->tx_timeout_factor * HZ)) &&
3901*4882a593Smuzhiyun 		    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3902*4882a593Smuzhiyun 
3903*4882a593Smuzhiyun 			/* detected Tx unit hang */
3904*4882a593Smuzhiyun 			e_err(drv, "Detected Tx Unit Hang\n"
3905*4882a593Smuzhiyun 			      "  Tx Queue             <%lu>\n"
3906*4882a593Smuzhiyun 			      "  TDH                  <%x>\n"
3907*4882a593Smuzhiyun 			      "  TDT                  <%x>\n"
3908*4882a593Smuzhiyun 			      "  next_to_use          <%x>\n"
3909*4882a593Smuzhiyun 			      "  next_to_clean        <%x>\n"
3910*4882a593Smuzhiyun 			      "buffer_info[next_to_clean]\n"
3911*4882a593Smuzhiyun 			      "  time_stamp           <%lx>\n"
3912*4882a593Smuzhiyun 			      "  next_to_watch        <%x>\n"
3913*4882a593Smuzhiyun 			      "  jiffies              <%lx>\n"
3914*4882a593Smuzhiyun 			      "  next_to_watch.status <%x>\n",
3915*4882a593Smuzhiyun 				(unsigned long)(tx_ring - adapter->tx_ring),
3916*4882a593Smuzhiyun 				readl(hw->hw_addr + tx_ring->tdh),
3917*4882a593Smuzhiyun 				readl(hw->hw_addr + tx_ring->tdt),
3918*4882a593Smuzhiyun 				tx_ring->next_to_use,
3919*4882a593Smuzhiyun 				tx_ring->next_to_clean,
3920*4882a593Smuzhiyun 				tx_ring->buffer_info[eop].time_stamp,
3921*4882a593Smuzhiyun 				eop,
3922*4882a593Smuzhiyun 				jiffies,
3923*4882a593Smuzhiyun 				eop_desc->upper.fields.status);
3924*4882a593Smuzhiyun 			e1000_dump(adapter);
3925*4882a593Smuzhiyun 			netif_stop_queue(netdev);
3926*4882a593Smuzhiyun 		}
3927*4882a593Smuzhiyun 	}
3928*4882a593Smuzhiyun 	adapter->total_tx_bytes += total_tx_bytes;
3929*4882a593Smuzhiyun 	adapter->total_tx_packets += total_tx_packets;
3930*4882a593Smuzhiyun 	netdev->stats.tx_bytes += total_tx_bytes;
3931*4882a593Smuzhiyun 	netdev->stats.tx_packets += total_tx_packets;
3932*4882a593Smuzhiyun 	return count < tx_ring->count;
3933*4882a593Smuzhiyun }
3934*4882a593Smuzhiyun 
3935*4882a593Smuzhiyun /**
3936*4882a593Smuzhiyun  * e1000_rx_checksum - Receive Checksum Offload for 82543
3937*4882a593Smuzhiyun  * @adapter:     board private structure
3938*4882a593Smuzhiyun  * @status_err:  receive descriptor status and error fields
3939*4882a593Smuzhiyun  * @csum:        receive descriptor csum field
3940*4882a593Smuzhiyun  * @skb:         socket buffer with received data
3941*4882a593Smuzhiyun  **/
e1000_rx_checksum(struct e1000_adapter * adapter,u32 status_err,u32 csum,struct sk_buff * skb)3942*4882a593Smuzhiyun static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3943*4882a593Smuzhiyun 			      u32 csum, struct sk_buff *skb)
3944*4882a593Smuzhiyun {
3945*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
3946*4882a593Smuzhiyun 	u16 status = (u16)status_err;
3947*4882a593Smuzhiyun 	u8 errors = (u8)(status_err >> 24);
3948*4882a593Smuzhiyun 
3949*4882a593Smuzhiyun 	skb_checksum_none_assert(skb);
3950*4882a593Smuzhiyun 
3951*4882a593Smuzhiyun 	/* 82543 or newer only */
3952*4882a593Smuzhiyun 	if (unlikely(hw->mac_type < e1000_82543))
3953*4882a593Smuzhiyun 		return;
3954*4882a593Smuzhiyun 	/* Ignore Checksum bit is set */
3955*4882a593Smuzhiyun 	if (unlikely(status & E1000_RXD_STAT_IXSM))
3956*4882a593Smuzhiyun 		return;
3957*4882a593Smuzhiyun 	/* TCP/UDP checksum error bit is set */
3958*4882a593Smuzhiyun 	if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3959*4882a593Smuzhiyun 		/* let the stack verify checksum errors */
3960*4882a593Smuzhiyun 		adapter->hw_csum_err++;
3961*4882a593Smuzhiyun 		return;
3962*4882a593Smuzhiyun 	}
3963*4882a593Smuzhiyun 	/* TCP/UDP Checksum has not been calculated */
3964*4882a593Smuzhiyun 	if (!(status & E1000_RXD_STAT_TCPCS))
3965*4882a593Smuzhiyun 		return;
3966*4882a593Smuzhiyun 
3967*4882a593Smuzhiyun 	/* It must be a TCP or UDP packet with a valid checksum */
3968*4882a593Smuzhiyun 	if (likely(status & E1000_RXD_STAT_TCPCS)) {
3969*4882a593Smuzhiyun 		/* TCP checksum is good */
3970*4882a593Smuzhiyun 		skb->ip_summed = CHECKSUM_UNNECESSARY;
3971*4882a593Smuzhiyun 	}
3972*4882a593Smuzhiyun 	adapter->hw_csum_good++;
3973*4882a593Smuzhiyun }
3974*4882a593Smuzhiyun 
3975*4882a593Smuzhiyun /**
3976*4882a593Smuzhiyun  * e1000_consume_page - helper function for jumbo Rx path
3977*4882a593Smuzhiyun  * @bi: software descriptor shadow data
3978*4882a593Smuzhiyun  * @skb: skb being modified
3979*4882a593Smuzhiyun  * @length: length of data being added
3980*4882a593Smuzhiyun  **/
e1000_consume_page(struct e1000_rx_buffer * bi,struct sk_buff * skb,u16 length)3981*4882a593Smuzhiyun static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
3982*4882a593Smuzhiyun 			       u16 length)
3983*4882a593Smuzhiyun {
3984*4882a593Smuzhiyun 	bi->rxbuf.page = NULL;
3985*4882a593Smuzhiyun 	skb->len += length;
3986*4882a593Smuzhiyun 	skb->data_len += length;
3987*4882a593Smuzhiyun 	skb->truesize += PAGE_SIZE;
3988*4882a593Smuzhiyun }
3989*4882a593Smuzhiyun 
3990*4882a593Smuzhiyun /**
3991*4882a593Smuzhiyun  * e1000_receive_skb - helper function to handle rx indications
3992*4882a593Smuzhiyun  * @adapter: board private structure
3993*4882a593Smuzhiyun  * @status: descriptor status field as written by hardware
3994*4882a593Smuzhiyun  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3995*4882a593Smuzhiyun  * @skb: pointer to sk_buff to be indicated to stack
3996*4882a593Smuzhiyun  */
e1000_receive_skb(struct e1000_adapter * adapter,u8 status,__le16 vlan,struct sk_buff * skb)3997*4882a593Smuzhiyun static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3998*4882a593Smuzhiyun 			      __le16 vlan, struct sk_buff *skb)
3999*4882a593Smuzhiyun {
4000*4882a593Smuzhiyun 	skb->protocol = eth_type_trans(skb, adapter->netdev);
4001*4882a593Smuzhiyun 
4002*4882a593Smuzhiyun 	if (status & E1000_RXD_STAT_VP) {
4003*4882a593Smuzhiyun 		u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4004*4882a593Smuzhiyun 
4005*4882a593Smuzhiyun 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4006*4882a593Smuzhiyun 	}
4007*4882a593Smuzhiyun 	napi_gro_receive(&adapter->napi, skb);
4008*4882a593Smuzhiyun }
4009*4882a593Smuzhiyun 
4010*4882a593Smuzhiyun /**
4011*4882a593Smuzhiyun  * e1000_tbi_adjust_stats
4012*4882a593Smuzhiyun  * @hw: Struct containing variables accessed by shared code
4013*4882a593Smuzhiyun  * @stats: point to stats struct
4014*4882a593Smuzhiyun  * @frame_len: The length of the frame in question
4015*4882a593Smuzhiyun  * @mac_addr: The Ethernet destination address of the frame in question
4016*4882a593Smuzhiyun  *
4017*4882a593Smuzhiyun  * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
4018*4882a593Smuzhiyun  */
e1000_tbi_adjust_stats(struct e1000_hw * hw,struct e1000_hw_stats * stats,u32 frame_len,const u8 * mac_addr)4019*4882a593Smuzhiyun static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4020*4882a593Smuzhiyun 				   struct e1000_hw_stats *stats,
4021*4882a593Smuzhiyun 				   u32 frame_len, const u8 *mac_addr)
4022*4882a593Smuzhiyun {
4023*4882a593Smuzhiyun 	u64 carry_bit;
4024*4882a593Smuzhiyun 
4025*4882a593Smuzhiyun 	/* First adjust the frame length. */
4026*4882a593Smuzhiyun 	frame_len--;
4027*4882a593Smuzhiyun 	/* We need to adjust the statistics counters, since the hardware
4028*4882a593Smuzhiyun 	 * counters overcount this packet as a CRC error and undercount
4029*4882a593Smuzhiyun 	 * the packet as a good packet
4030*4882a593Smuzhiyun 	 */
4031*4882a593Smuzhiyun 	/* This packet should not be counted as a CRC error. */
4032*4882a593Smuzhiyun 	stats->crcerrs--;
4033*4882a593Smuzhiyun 	/* This packet does count as a Good Packet Received. */
4034*4882a593Smuzhiyun 	stats->gprc++;
4035*4882a593Smuzhiyun 
4036*4882a593Smuzhiyun 	/* Adjust the Good Octets received counters */
4037*4882a593Smuzhiyun 	carry_bit = 0x80000000 & stats->gorcl;
4038*4882a593Smuzhiyun 	stats->gorcl += frame_len;
4039*4882a593Smuzhiyun 	/* If the high bit of Gorcl (the low 32 bits of the Good Octets
4040*4882a593Smuzhiyun 	 * Received Count) was one before the addition,
4041*4882a593Smuzhiyun 	 * AND it is zero after, then we lost the carry out,
4042*4882a593Smuzhiyun 	 * need to add one to Gorch (Good Octets Received Count High).
4043*4882a593Smuzhiyun 	 * This could be simplified if all environments supported
4044*4882a593Smuzhiyun 	 * 64-bit integers.
4045*4882a593Smuzhiyun 	 */
4046*4882a593Smuzhiyun 	if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4047*4882a593Smuzhiyun 		stats->gorch++;
4048*4882a593Smuzhiyun 	/* Is this a broadcast or multicast?  Check broadcast first,
4049*4882a593Smuzhiyun 	 * since the test for a multicast frame will test positive on
4050*4882a593Smuzhiyun 	 * a broadcast frame.
4051*4882a593Smuzhiyun 	 */
4052*4882a593Smuzhiyun 	if (is_broadcast_ether_addr(mac_addr))
4053*4882a593Smuzhiyun 		stats->bprc++;
4054*4882a593Smuzhiyun 	else if (is_multicast_ether_addr(mac_addr))
4055*4882a593Smuzhiyun 		stats->mprc++;
4056*4882a593Smuzhiyun 
4057*4882a593Smuzhiyun 	if (frame_len == hw->max_frame_size) {
4058*4882a593Smuzhiyun 		/* In this case, the hardware has overcounted the number of
4059*4882a593Smuzhiyun 		 * oversize frames.
4060*4882a593Smuzhiyun 		 */
4061*4882a593Smuzhiyun 		if (stats->roc > 0)
4062*4882a593Smuzhiyun 			stats->roc--;
4063*4882a593Smuzhiyun 	}
4064*4882a593Smuzhiyun 
4065*4882a593Smuzhiyun 	/* Adjust the bin counters when the extra byte put the frame in the
4066*4882a593Smuzhiyun 	 * wrong bin. Remember that the frame_len was adjusted above.
4067*4882a593Smuzhiyun 	 */
4068*4882a593Smuzhiyun 	if (frame_len == 64) {
4069*4882a593Smuzhiyun 		stats->prc64++;
4070*4882a593Smuzhiyun 		stats->prc127--;
4071*4882a593Smuzhiyun 	} else if (frame_len == 127) {
4072*4882a593Smuzhiyun 		stats->prc127++;
4073*4882a593Smuzhiyun 		stats->prc255--;
4074*4882a593Smuzhiyun 	} else if (frame_len == 255) {
4075*4882a593Smuzhiyun 		stats->prc255++;
4076*4882a593Smuzhiyun 		stats->prc511--;
4077*4882a593Smuzhiyun 	} else if (frame_len == 511) {
4078*4882a593Smuzhiyun 		stats->prc511++;
4079*4882a593Smuzhiyun 		stats->prc1023--;
4080*4882a593Smuzhiyun 	} else if (frame_len == 1023) {
4081*4882a593Smuzhiyun 		stats->prc1023++;
4082*4882a593Smuzhiyun 		stats->prc1522--;
4083*4882a593Smuzhiyun 	} else if (frame_len == 1522) {
4084*4882a593Smuzhiyun 		stats->prc1522++;
4085*4882a593Smuzhiyun 	}
4086*4882a593Smuzhiyun }
4087*4882a593Smuzhiyun 
e1000_tbi_should_accept(struct e1000_adapter * adapter,u8 status,u8 errors,u32 length,const u8 * data)4088*4882a593Smuzhiyun static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4089*4882a593Smuzhiyun 				    u8 status, u8 errors,
4090*4882a593Smuzhiyun 				    u32 length, const u8 *data)
4091*4882a593Smuzhiyun {
4092*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
4093*4882a593Smuzhiyun 	u8 last_byte = *(data + length - 1);
4094*4882a593Smuzhiyun 
4095*4882a593Smuzhiyun 	if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4096*4882a593Smuzhiyun 		unsigned long irq_flags;
4097*4882a593Smuzhiyun 
4098*4882a593Smuzhiyun 		spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4099*4882a593Smuzhiyun 		e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4100*4882a593Smuzhiyun 		spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4101*4882a593Smuzhiyun 
4102*4882a593Smuzhiyun 		return true;
4103*4882a593Smuzhiyun 	}
4104*4882a593Smuzhiyun 
4105*4882a593Smuzhiyun 	return false;
4106*4882a593Smuzhiyun }
4107*4882a593Smuzhiyun 
e1000_alloc_rx_skb(struct e1000_adapter * adapter,unsigned int bufsz)4108*4882a593Smuzhiyun static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4109*4882a593Smuzhiyun 					  unsigned int bufsz)
4110*4882a593Smuzhiyun {
4111*4882a593Smuzhiyun 	struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
4112*4882a593Smuzhiyun 
4113*4882a593Smuzhiyun 	if (unlikely(!skb))
4114*4882a593Smuzhiyun 		adapter->alloc_rx_buff_failed++;
4115*4882a593Smuzhiyun 	return skb;
4116*4882a593Smuzhiyun }
4117*4882a593Smuzhiyun 
4118*4882a593Smuzhiyun /**
4119*4882a593Smuzhiyun  * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4120*4882a593Smuzhiyun  * @adapter: board private structure
4121*4882a593Smuzhiyun  * @rx_ring: ring to clean
4122*4882a593Smuzhiyun  * @work_done: amount of napi work completed this call
4123*4882a593Smuzhiyun  * @work_to_do: max amount of work allowed for this call to do
4124*4882a593Smuzhiyun  *
4125*4882a593Smuzhiyun  * the return value indicates whether actual cleaning was done, there
4126*4882a593Smuzhiyun  * is no guarantee that everything was cleaned
4127*4882a593Smuzhiyun  */
e1000_clean_jumbo_rx_irq(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int * work_done,int work_to_do)4128*4882a593Smuzhiyun static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4129*4882a593Smuzhiyun 				     struct e1000_rx_ring *rx_ring,
4130*4882a593Smuzhiyun 				     int *work_done, int work_to_do)
4131*4882a593Smuzhiyun {
4132*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
4133*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
4134*4882a593Smuzhiyun 	struct e1000_rx_desc *rx_desc, *next_rxd;
4135*4882a593Smuzhiyun 	struct e1000_rx_buffer *buffer_info, *next_buffer;
4136*4882a593Smuzhiyun 	u32 length;
4137*4882a593Smuzhiyun 	unsigned int i;
4138*4882a593Smuzhiyun 	int cleaned_count = 0;
4139*4882a593Smuzhiyun 	bool cleaned = false;
4140*4882a593Smuzhiyun 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4141*4882a593Smuzhiyun 
4142*4882a593Smuzhiyun 	i = rx_ring->next_to_clean;
4143*4882a593Smuzhiyun 	rx_desc = E1000_RX_DESC(*rx_ring, i);
4144*4882a593Smuzhiyun 	buffer_info = &rx_ring->buffer_info[i];
4145*4882a593Smuzhiyun 
4146*4882a593Smuzhiyun 	while (rx_desc->status & E1000_RXD_STAT_DD) {
4147*4882a593Smuzhiyun 		struct sk_buff *skb;
4148*4882a593Smuzhiyun 		u8 status;
4149*4882a593Smuzhiyun 
4150*4882a593Smuzhiyun 		if (*work_done >= work_to_do)
4151*4882a593Smuzhiyun 			break;
4152*4882a593Smuzhiyun 		(*work_done)++;
4153*4882a593Smuzhiyun 		dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4154*4882a593Smuzhiyun 
4155*4882a593Smuzhiyun 		status = rx_desc->status;
4156*4882a593Smuzhiyun 
4157*4882a593Smuzhiyun 		if (++i == rx_ring->count)
4158*4882a593Smuzhiyun 			i = 0;
4159*4882a593Smuzhiyun 
4160*4882a593Smuzhiyun 		next_rxd = E1000_RX_DESC(*rx_ring, i);
4161*4882a593Smuzhiyun 		prefetch(next_rxd);
4162*4882a593Smuzhiyun 
4163*4882a593Smuzhiyun 		next_buffer = &rx_ring->buffer_info[i];
4164*4882a593Smuzhiyun 
4165*4882a593Smuzhiyun 		cleaned = true;
4166*4882a593Smuzhiyun 		cleaned_count++;
4167*4882a593Smuzhiyun 		dma_unmap_page(&pdev->dev, buffer_info->dma,
4168*4882a593Smuzhiyun 			       adapter->rx_buffer_len, DMA_FROM_DEVICE);
4169*4882a593Smuzhiyun 		buffer_info->dma = 0;
4170*4882a593Smuzhiyun 
4171*4882a593Smuzhiyun 		length = le16_to_cpu(rx_desc->length);
4172*4882a593Smuzhiyun 
4173*4882a593Smuzhiyun 		/* errors is only valid for DD + EOP descriptors */
4174*4882a593Smuzhiyun 		if (unlikely((status & E1000_RXD_STAT_EOP) &&
4175*4882a593Smuzhiyun 		    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4176*4882a593Smuzhiyun 			u8 *mapped = page_address(buffer_info->rxbuf.page);
4177*4882a593Smuzhiyun 
4178*4882a593Smuzhiyun 			if (e1000_tbi_should_accept(adapter, status,
4179*4882a593Smuzhiyun 						    rx_desc->errors,
4180*4882a593Smuzhiyun 						    length, mapped)) {
4181*4882a593Smuzhiyun 				length--;
4182*4882a593Smuzhiyun 			} else if (netdev->features & NETIF_F_RXALL) {
4183*4882a593Smuzhiyun 				goto process_skb;
4184*4882a593Smuzhiyun 			} else {
4185*4882a593Smuzhiyun 				/* an error means any chain goes out the window
4186*4882a593Smuzhiyun 				 * too
4187*4882a593Smuzhiyun 				 */
4188*4882a593Smuzhiyun 				dev_kfree_skb(rx_ring->rx_skb_top);
4189*4882a593Smuzhiyun 				rx_ring->rx_skb_top = NULL;
4190*4882a593Smuzhiyun 				goto next_desc;
4191*4882a593Smuzhiyun 			}
4192*4882a593Smuzhiyun 		}
4193*4882a593Smuzhiyun 
4194*4882a593Smuzhiyun #define rxtop rx_ring->rx_skb_top
4195*4882a593Smuzhiyun process_skb:
4196*4882a593Smuzhiyun 		if (!(status & E1000_RXD_STAT_EOP)) {
4197*4882a593Smuzhiyun 			/* this descriptor is only the beginning (or middle) */
4198*4882a593Smuzhiyun 			if (!rxtop) {
4199*4882a593Smuzhiyun 				/* this is the beginning of a chain */
4200*4882a593Smuzhiyun 				rxtop = napi_get_frags(&adapter->napi);
4201*4882a593Smuzhiyun 				if (!rxtop)
4202*4882a593Smuzhiyun 					break;
4203*4882a593Smuzhiyun 
4204*4882a593Smuzhiyun 				skb_fill_page_desc(rxtop, 0,
4205*4882a593Smuzhiyun 						   buffer_info->rxbuf.page,
4206*4882a593Smuzhiyun 						   0, length);
4207*4882a593Smuzhiyun 			} else {
4208*4882a593Smuzhiyun 				/* this is the middle of a chain */
4209*4882a593Smuzhiyun 				skb_fill_page_desc(rxtop,
4210*4882a593Smuzhiyun 				    skb_shinfo(rxtop)->nr_frags,
4211*4882a593Smuzhiyun 				    buffer_info->rxbuf.page, 0, length);
4212*4882a593Smuzhiyun 			}
4213*4882a593Smuzhiyun 			e1000_consume_page(buffer_info, rxtop, length);
4214*4882a593Smuzhiyun 			goto next_desc;
4215*4882a593Smuzhiyun 		} else {
4216*4882a593Smuzhiyun 			if (rxtop) {
4217*4882a593Smuzhiyun 				/* end of the chain */
4218*4882a593Smuzhiyun 				skb_fill_page_desc(rxtop,
4219*4882a593Smuzhiyun 				    skb_shinfo(rxtop)->nr_frags,
4220*4882a593Smuzhiyun 				    buffer_info->rxbuf.page, 0, length);
4221*4882a593Smuzhiyun 				skb = rxtop;
4222*4882a593Smuzhiyun 				rxtop = NULL;
4223*4882a593Smuzhiyun 				e1000_consume_page(buffer_info, skb, length);
4224*4882a593Smuzhiyun 			} else {
4225*4882a593Smuzhiyun 				struct page *p;
4226*4882a593Smuzhiyun 				/* no chain, got EOP, this buf is the packet
4227*4882a593Smuzhiyun 				 * copybreak to save the put_page/alloc_page
4228*4882a593Smuzhiyun 				 */
4229*4882a593Smuzhiyun 				p = buffer_info->rxbuf.page;
4230*4882a593Smuzhiyun 				if (length <= copybreak) {
4231*4882a593Smuzhiyun 					u8 *vaddr;
4232*4882a593Smuzhiyun 
4233*4882a593Smuzhiyun 					if (likely(!(netdev->features & NETIF_F_RXFCS)))
4234*4882a593Smuzhiyun 						length -= 4;
4235*4882a593Smuzhiyun 					skb = e1000_alloc_rx_skb(adapter,
4236*4882a593Smuzhiyun 								 length);
4237*4882a593Smuzhiyun 					if (!skb)
4238*4882a593Smuzhiyun 						break;
4239*4882a593Smuzhiyun 
4240*4882a593Smuzhiyun 					vaddr = kmap_atomic(p);
4241*4882a593Smuzhiyun 					memcpy(skb_tail_pointer(skb), vaddr,
4242*4882a593Smuzhiyun 					       length);
4243*4882a593Smuzhiyun 					kunmap_atomic(vaddr);
4244*4882a593Smuzhiyun 					/* re-use the page, so don't erase
4245*4882a593Smuzhiyun 					 * buffer_info->rxbuf.page
4246*4882a593Smuzhiyun 					 */
4247*4882a593Smuzhiyun 					skb_put(skb, length);
4248*4882a593Smuzhiyun 					e1000_rx_checksum(adapter,
4249*4882a593Smuzhiyun 							  status | rx_desc->errors << 24,
4250*4882a593Smuzhiyun 							  le16_to_cpu(rx_desc->csum), skb);
4251*4882a593Smuzhiyun 
4252*4882a593Smuzhiyun 					total_rx_bytes += skb->len;
4253*4882a593Smuzhiyun 					total_rx_packets++;
4254*4882a593Smuzhiyun 
4255*4882a593Smuzhiyun 					e1000_receive_skb(adapter, status,
4256*4882a593Smuzhiyun 							  rx_desc->special, skb);
4257*4882a593Smuzhiyun 					goto next_desc;
4258*4882a593Smuzhiyun 				} else {
4259*4882a593Smuzhiyun 					skb = napi_get_frags(&adapter->napi);
4260*4882a593Smuzhiyun 					if (!skb) {
4261*4882a593Smuzhiyun 						adapter->alloc_rx_buff_failed++;
4262*4882a593Smuzhiyun 						break;
4263*4882a593Smuzhiyun 					}
4264*4882a593Smuzhiyun 					skb_fill_page_desc(skb, 0, p, 0,
4265*4882a593Smuzhiyun 							   length);
4266*4882a593Smuzhiyun 					e1000_consume_page(buffer_info, skb,
4267*4882a593Smuzhiyun 							   length);
4268*4882a593Smuzhiyun 				}
4269*4882a593Smuzhiyun 			}
4270*4882a593Smuzhiyun 		}
4271*4882a593Smuzhiyun 
4272*4882a593Smuzhiyun 		/* Receive Checksum Offload XXX recompute due to CRC strip? */
4273*4882a593Smuzhiyun 		e1000_rx_checksum(adapter,
4274*4882a593Smuzhiyun 				  (u32)(status) |
4275*4882a593Smuzhiyun 				  ((u32)(rx_desc->errors) << 24),
4276*4882a593Smuzhiyun 				  le16_to_cpu(rx_desc->csum), skb);
4277*4882a593Smuzhiyun 
4278*4882a593Smuzhiyun 		total_rx_bytes += (skb->len - 4); /* don't count FCS */
4279*4882a593Smuzhiyun 		if (likely(!(netdev->features & NETIF_F_RXFCS)))
4280*4882a593Smuzhiyun 			pskb_trim(skb, skb->len - 4);
4281*4882a593Smuzhiyun 		total_rx_packets++;
4282*4882a593Smuzhiyun 
4283*4882a593Smuzhiyun 		if (status & E1000_RXD_STAT_VP) {
4284*4882a593Smuzhiyun 			__le16 vlan = rx_desc->special;
4285*4882a593Smuzhiyun 			u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4286*4882a593Smuzhiyun 
4287*4882a593Smuzhiyun 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4288*4882a593Smuzhiyun 		}
4289*4882a593Smuzhiyun 
4290*4882a593Smuzhiyun 		napi_gro_frags(&adapter->napi);
4291*4882a593Smuzhiyun 
4292*4882a593Smuzhiyun next_desc:
4293*4882a593Smuzhiyun 		rx_desc->status = 0;
4294*4882a593Smuzhiyun 
4295*4882a593Smuzhiyun 		/* return some buffers to hardware, one at a time is too slow */
4296*4882a593Smuzhiyun 		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4297*4882a593Smuzhiyun 			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4298*4882a593Smuzhiyun 			cleaned_count = 0;
4299*4882a593Smuzhiyun 		}
4300*4882a593Smuzhiyun 
4301*4882a593Smuzhiyun 		/* use prefetched values */
4302*4882a593Smuzhiyun 		rx_desc = next_rxd;
4303*4882a593Smuzhiyun 		buffer_info = next_buffer;
4304*4882a593Smuzhiyun 	}
4305*4882a593Smuzhiyun 	rx_ring->next_to_clean = i;
4306*4882a593Smuzhiyun 
4307*4882a593Smuzhiyun 	cleaned_count = E1000_DESC_UNUSED(rx_ring);
4308*4882a593Smuzhiyun 	if (cleaned_count)
4309*4882a593Smuzhiyun 		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4310*4882a593Smuzhiyun 
4311*4882a593Smuzhiyun 	adapter->total_rx_packets += total_rx_packets;
4312*4882a593Smuzhiyun 	adapter->total_rx_bytes += total_rx_bytes;
4313*4882a593Smuzhiyun 	netdev->stats.rx_bytes += total_rx_bytes;
4314*4882a593Smuzhiyun 	netdev->stats.rx_packets += total_rx_packets;
4315*4882a593Smuzhiyun 	return cleaned;
4316*4882a593Smuzhiyun }
4317*4882a593Smuzhiyun 
4318*4882a593Smuzhiyun /* this should improve performance for small packets with large amounts
4319*4882a593Smuzhiyun  * of reassembly being done in the stack
4320*4882a593Smuzhiyun  */
e1000_copybreak(struct e1000_adapter * adapter,struct e1000_rx_buffer * buffer_info,u32 length,const void * data)4321*4882a593Smuzhiyun static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
4322*4882a593Smuzhiyun 				       struct e1000_rx_buffer *buffer_info,
4323*4882a593Smuzhiyun 				       u32 length, const void *data)
4324*4882a593Smuzhiyun {
4325*4882a593Smuzhiyun 	struct sk_buff *skb;
4326*4882a593Smuzhiyun 
4327*4882a593Smuzhiyun 	if (length > copybreak)
4328*4882a593Smuzhiyun 		return NULL;
4329*4882a593Smuzhiyun 
4330*4882a593Smuzhiyun 	skb = e1000_alloc_rx_skb(adapter, length);
4331*4882a593Smuzhiyun 	if (!skb)
4332*4882a593Smuzhiyun 		return NULL;
4333*4882a593Smuzhiyun 
4334*4882a593Smuzhiyun 	dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4335*4882a593Smuzhiyun 				length, DMA_FROM_DEVICE);
4336*4882a593Smuzhiyun 
4337*4882a593Smuzhiyun 	skb_put_data(skb, data, length);
4338*4882a593Smuzhiyun 
4339*4882a593Smuzhiyun 	return skb;
4340*4882a593Smuzhiyun }
4341*4882a593Smuzhiyun 
4342*4882a593Smuzhiyun /**
4343*4882a593Smuzhiyun  * e1000_clean_rx_irq - Send received data up the network stack; legacy
4344*4882a593Smuzhiyun  * @adapter: board private structure
4345*4882a593Smuzhiyun  * @rx_ring: ring to clean
4346*4882a593Smuzhiyun  * @work_done: amount of napi work completed this call
4347*4882a593Smuzhiyun  * @work_to_do: max amount of work allowed for this call to do
4348*4882a593Smuzhiyun  */
e1000_clean_rx_irq(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int * work_done,int work_to_do)4349*4882a593Smuzhiyun static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4350*4882a593Smuzhiyun 			       struct e1000_rx_ring *rx_ring,
4351*4882a593Smuzhiyun 			       int *work_done, int work_to_do)
4352*4882a593Smuzhiyun {
4353*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
4354*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
4355*4882a593Smuzhiyun 	struct e1000_rx_desc *rx_desc, *next_rxd;
4356*4882a593Smuzhiyun 	struct e1000_rx_buffer *buffer_info, *next_buffer;
4357*4882a593Smuzhiyun 	u32 length;
4358*4882a593Smuzhiyun 	unsigned int i;
4359*4882a593Smuzhiyun 	int cleaned_count = 0;
4360*4882a593Smuzhiyun 	bool cleaned = false;
4361*4882a593Smuzhiyun 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4362*4882a593Smuzhiyun 
4363*4882a593Smuzhiyun 	i = rx_ring->next_to_clean;
4364*4882a593Smuzhiyun 	rx_desc = E1000_RX_DESC(*rx_ring, i);
4365*4882a593Smuzhiyun 	buffer_info = &rx_ring->buffer_info[i];
4366*4882a593Smuzhiyun 
4367*4882a593Smuzhiyun 	while (rx_desc->status & E1000_RXD_STAT_DD) {
4368*4882a593Smuzhiyun 		struct sk_buff *skb;
4369*4882a593Smuzhiyun 		u8 *data;
4370*4882a593Smuzhiyun 		u8 status;
4371*4882a593Smuzhiyun 
4372*4882a593Smuzhiyun 		if (*work_done >= work_to_do)
4373*4882a593Smuzhiyun 			break;
4374*4882a593Smuzhiyun 		(*work_done)++;
4375*4882a593Smuzhiyun 		dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4376*4882a593Smuzhiyun 
4377*4882a593Smuzhiyun 		status = rx_desc->status;
4378*4882a593Smuzhiyun 		length = le16_to_cpu(rx_desc->length);
4379*4882a593Smuzhiyun 
4380*4882a593Smuzhiyun 		data = buffer_info->rxbuf.data;
4381*4882a593Smuzhiyun 		prefetch(data);
4382*4882a593Smuzhiyun 		skb = e1000_copybreak(adapter, buffer_info, length, data);
4383*4882a593Smuzhiyun 		if (!skb) {
4384*4882a593Smuzhiyun 			unsigned int frag_len = e1000_frag_len(adapter);
4385*4882a593Smuzhiyun 
4386*4882a593Smuzhiyun 			skb = build_skb(data - E1000_HEADROOM, frag_len);
4387*4882a593Smuzhiyun 			if (!skb) {
4388*4882a593Smuzhiyun 				adapter->alloc_rx_buff_failed++;
4389*4882a593Smuzhiyun 				break;
4390*4882a593Smuzhiyun 			}
4391*4882a593Smuzhiyun 
4392*4882a593Smuzhiyun 			skb_reserve(skb, E1000_HEADROOM);
4393*4882a593Smuzhiyun 			dma_unmap_single(&pdev->dev, buffer_info->dma,
4394*4882a593Smuzhiyun 					 adapter->rx_buffer_len,
4395*4882a593Smuzhiyun 					 DMA_FROM_DEVICE);
4396*4882a593Smuzhiyun 			buffer_info->dma = 0;
4397*4882a593Smuzhiyun 			buffer_info->rxbuf.data = NULL;
4398*4882a593Smuzhiyun 		}
4399*4882a593Smuzhiyun 
4400*4882a593Smuzhiyun 		if (++i == rx_ring->count)
4401*4882a593Smuzhiyun 			i = 0;
4402*4882a593Smuzhiyun 
4403*4882a593Smuzhiyun 		next_rxd = E1000_RX_DESC(*rx_ring, i);
4404*4882a593Smuzhiyun 		prefetch(next_rxd);
4405*4882a593Smuzhiyun 
4406*4882a593Smuzhiyun 		next_buffer = &rx_ring->buffer_info[i];
4407*4882a593Smuzhiyun 
4408*4882a593Smuzhiyun 		cleaned = true;
4409*4882a593Smuzhiyun 		cleaned_count++;
4410*4882a593Smuzhiyun 
4411*4882a593Smuzhiyun 		/* !EOP means multiple descriptors were used to store a single
4412*4882a593Smuzhiyun 		 * packet, if thats the case we need to toss it.  In fact, we
4413*4882a593Smuzhiyun 		 * to toss every packet with the EOP bit clear and the next
4414*4882a593Smuzhiyun 		 * frame that _does_ have the EOP bit set, as it is by
4415*4882a593Smuzhiyun 		 * definition only a frame fragment
4416*4882a593Smuzhiyun 		 */
4417*4882a593Smuzhiyun 		if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4418*4882a593Smuzhiyun 			adapter->discarding = true;
4419*4882a593Smuzhiyun 
4420*4882a593Smuzhiyun 		if (adapter->discarding) {
4421*4882a593Smuzhiyun 			/* All receives must fit into a single buffer */
4422*4882a593Smuzhiyun 			netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
4423*4882a593Smuzhiyun 			dev_kfree_skb(skb);
4424*4882a593Smuzhiyun 			if (status & E1000_RXD_STAT_EOP)
4425*4882a593Smuzhiyun 				adapter->discarding = false;
4426*4882a593Smuzhiyun 			goto next_desc;
4427*4882a593Smuzhiyun 		}
4428*4882a593Smuzhiyun 
4429*4882a593Smuzhiyun 		if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4430*4882a593Smuzhiyun 			if (e1000_tbi_should_accept(adapter, status,
4431*4882a593Smuzhiyun 						    rx_desc->errors,
4432*4882a593Smuzhiyun 						    length, data)) {
4433*4882a593Smuzhiyun 				length--;
4434*4882a593Smuzhiyun 			} else if (netdev->features & NETIF_F_RXALL) {
4435*4882a593Smuzhiyun 				goto process_skb;
4436*4882a593Smuzhiyun 			} else {
4437*4882a593Smuzhiyun 				dev_kfree_skb(skb);
4438*4882a593Smuzhiyun 				goto next_desc;
4439*4882a593Smuzhiyun 			}
4440*4882a593Smuzhiyun 		}
4441*4882a593Smuzhiyun 
4442*4882a593Smuzhiyun process_skb:
4443*4882a593Smuzhiyun 		total_rx_bytes += (length - 4); /* don't count FCS */
4444*4882a593Smuzhiyun 		total_rx_packets++;
4445*4882a593Smuzhiyun 
4446*4882a593Smuzhiyun 		if (likely(!(netdev->features & NETIF_F_RXFCS)))
4447*4882a593Smuzhiyun 			/* adjust length to remove Ethernet CRC, this must be
4448*4882a593Smuzhiyun 			 * done after the TBI_ACCEPT workaround above
4449*4882a593Smuzhiyun 			 */
4450*4882a593Smuzhiyun 			length -= 4;
4451*4882a593Smuzhiyun 
4452*4882a593Smuzhiyun 		if (buffer_info->rxbuf.data == NULL)
4453*4882a593Smuzhiyun 			skb_put(skb, length);
4454*4882a593Smuzhiyun 		else /* copybreak skb */
4455*4882a593Smuzhiyun 			skb_trim(skb, length);
4456*4882a593Smuzhiyun 
4457*4882a593Smuzhiyun 		/* Receive Checksum Offload */
4458*4882a593Smuzhiyun 		e1000_rx_checksum(adapter,
4459*4882a593Smuzhiyun 				  (u32)(status) |
4460*4882a593Smuzhiyun 				  ((u32)(rx_desc->errors) << 24),
4461*4882a593Smuzhiyun 				  le16_to_cpu(rx_desc->csum), skb);
4462*4882a593Smuzhiyun 
4463*4882a593Smuzhiyun 		e1000_receive_skb(adapter, status, rx_desc->special, skb);
4464*4882a593Smuzhiyun 
4465*4882a593Smuzhiyun next_desc:
4466*4882a593Smuzhiyun 		rx_desc->status = 0;
4467*4882a593Smuzhiyun 
4468*4882a593Smuzhiyun 		/* return some buffers to hardware, one at a time is too slow */
4469*4882a593Smuzhiyun 		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4470*4882a593Smuzhiyun 			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4471*4882a593Smuzhiyun 			cleaned_count = 0;
4472*4882a593Smuzhiyun 		}
4473*4882a593Smuzhiyun 
4474*4882a593Smuzhiyun 		/* use prefetched values */
4475*4882a593Smuzhiyun 		rx_desc = next_rxd;
4476*4882a593Smuzhiyun 		buffer_info = next_buffer;
4477*4882a593Smuzhiyun 	}
4478*4882a593Smuzhiyun 	rx_ring->next_to_clean = i;
4479*4882a593Smuzhiyun 
4480*4882a593Smuzhiyun 	cleaned_count = E1000_DESC_UNUSED(rx_ring);
4481*4882a593Smuzhiyun 	if (cleaned_count)
4482*4882a593Smuzhiyun 		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4483*4882a593Smuzhiyun 
4484*4882a593Smuzhiyun 	adapter->total_rx_packets += total_rx_packets;
4485*4882a593Smuzhiyun 	adapter->total_rx_bytes += total_rx_bytes;
4486*4882a593Smuzhiyun 	netdev->stats.rx_bytes += total_rx_bytes;
4487*4882a593Smuzhiyun 	netdev->stats.rx_packets += total_rx_packets;
4488*4882a593Smuzhiyun 	return cleaned;
4489*4882a593Smuzhiyun }
4490*4882a593Smuzhiyun 
4491*4882a593Smuzhiyun /**
4492*4882a593Smuzhiyun  * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4493*4882a593Smuzhiyun  * @adapter: address of board private structure
4494*4882a593Smuzhiyun  * @rx_ring: pointer to receive ring structure
4495*4882a593Smuzhiyun  * @cleaned_count: number of buffers to allocate this pass
4496*4882a593Smuzhiyun  **/
4497*4882a593Smuzhiyun static void
e1000_alloc_jumbo_rx_buffers(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int cleaned_count)4498*4882a593Smuzhiyun e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4499*4882a593Smuzhiyun 			     struct e1000_rx_ring *rx_ring, int cleaned_count)
4500*4882a593Smuzhiyun {
4501*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
4502*4882a593Smuzhiyun 	struct e1000_rx_desc *rx_desc;
4503*4882a593Smuzhiyun 	struct e1000_rx_buffer *buffer_info;
4504*4882a593Smuzhiyun 	unsigned int i;
4505*4882a593Smuzhiyun 
4506*4882a593Smuzhiyun 	i = rx_ring->next_to_use;
4507*4882a593Smuzhiyun 	buffer_info = &rx_ring->buffer_info[i];
4508*4882a593Smuzhiyun 
4509*4882a593Smuzhiyun 	while (cleaned_count--) {
4510*4882a593Smuzhiyun 		/* allocate a new page if necessary */
4511*4882a593Smuzhiyun 		if (!buffer_info->rxbuf.page) {
4512*4882a593Smuzhiyun 			buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4513*4882a593Smuzhiyun 			if (unlikely(!buffer_info->rxbuf.page)) {
4514*4882a593Smuzhiyun 				adapter->alloc_rx_buff_failed++;
4515*4882a593Smuzhiyun 				break;
4516*4882a593Smuzhiyun 			}
4517*4882a593Smuzhiyun 		}
4518*4882a593Smuzhiyun 
4519*4882a593Smuzhiyun 		if (!buffer_info->dma) {
4520*4882a593Smuzhiyun 			buffer_info->dma = dma_map_page(&pdev->dev,
4521*4882a593Smuzhiyun 							buffer_info->rxbuf.page, 0,
4522*4882a593Smuzhiyun 							adapter->rx_buffer_len,
4523*4882a593Smuzhiyun 							DMA_FROM_DEVICE);
4524*4882a593Smuzhiyun 			if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4525*4882a593Smuzhiyun 				put_page(buffer_info->rxbuf.page);
4526*4882a593Smuzhiyun 				buffer_info->rxbuf.page = NULL;
4527*4882a593Smuzhiyun 				buffer_info->dma = 0;
4528*4882a593Smuzhiyun 				adapter->alloc_rx_buff_failed++;
4529*4882a593Smuzhiyun 				break;
4530*4882a593Smuzhiyun 			}
4531*4882a593Smuzhiyun 		}
4532*4882a593Smuzhiyun 
4533*4882a593Smuzhiyun 		rx_desc = E1000_RX_DESC(*rx_ring, i);
4534*4882a593Smuzhiyun 		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4535*4882a593Smuzhiyun 
4536*4882a593Smuzhiyun 		if (unlikely(++i == rx_ring->count))
4537*4882a593Smuzhiyun 			i = 0;
4538*4882a593Smuzhiyun 		buffer_info = &rx_ring->buffer_info[i];
4539*4882a593Smuzhiyun 	}
4540*4882a593Smuzhiyun 
4541*4882a593Smuzhiyun 	if (likely(rx_ring->next_to_use != i)) {
4542*4882a593Smuzhiyun 		rx_ring->next_to_use = i;
4543*4882a593Smuzhiyun 		if (unlikely(i-- == 0))
4544*4882a593Smuzhiyun 			i = (rx_ring->count - 1);
4545*4882a593Smuzhiyun 
4546*4882a593Smuzhiyun 		/* Force memory writes to complete before letting h/w
4547*4882a593Smuzhiyun 		 * know there are new descriptors to fetch.  (Only
4548*4882a593Smuzhiyun 		 * applicable for weak-ordered memory model archs,
4549*4882a593Smuzhiyun 		 * such as IA-64).
4550*4882a593Smuzhiyun 		 */
4551*4882a593Smuzhiyun 		dma_wmb();
4552*4882a593Smuzhiyun 		writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4553*4882a593Smuzhiyun 	}
4554*4882a593Smuzhiyun }
4555*4882a593Smuzhiyun 
4556*4882a593Smuzhiyun /**
4557*4882a593Smuzhiyun  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4558*4882a593Smuzhiyun  * @adapter: address of board private structure
4559*4882a593Smuzhiyun  * @rx_ring: pointer to ring struct
4560*4882a593Smuzhiyun  * @cleaned_count: number of new Rx buffers to try to allocate
4561*4882a593Smuzhiyun  **/
e1000_alloc_rx_buffers(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int cleaned_count)4562*4882a593Smuzhiyun static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4563*4882a593Smuzhiyun 				   struct e1000_rx_ring *rx_ring,
4564*4882a593Smuzhiyun 				   int cleaned_count)
4565*4882a593Smuzhiyun {
4566*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
4567*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
4568*4882a593Smuzhiyun 	struct e1000_rx_desc *rx_desc;
4569*4882a593Smuzhiyun 	struct e1000_rx_buffer *buffer_info;
4570*4882a593Smuzhiyun 	unsigned int i;
4571*4882a593Smuzhiyun 	unsigned int bufsz = adapter->rx_buffer_len;
4572*4882a593Smuzhiyun 
4573*4882a593Smuzhiyun 	i = rx_ring->next_to_use;
4574*4882a593Smuzhiyun 	buffer_info = &rx_ring->buffer_info[i];
4575*4882a593Smuzhiyun 
4576*4882a593Smuzhiyun 	while (cleaned_count--) {
4577*4882a593Smuzhiyun 		void *data;
4578*4882a593Smuzhiyun 
4579*4882a593Smuzhiyun 		if (buffer_info->rxbuf.data)
4580*4882a593Smuzhiyun 			goto skip;
4581*4882a593Smuzhiyun 
4582*4882a593Smuzhiyun 		data = e1000_alloc_frag(adapter);
4583*4882a593Smuzhiyun 		if (!data) {
4584*4882a593Smuzhiyun 			/* Better luck next round */
4585*4882a593Smuzhiyun 			adapter->alloc_rx_buff_failed++;
4586*4882a593Smuzhiyun 			break;
4587*4882a593Smuzhiyun 		}
4588*4882a593Smuzhiyun 
4589*4882a593Smuzhiyun 		/* Fix for errata 23, can't cross 64kB boundary */
4590*4882a593Smuzhiyun 		if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4591*4882a593Smuzhiyun 			void *olddata = data;
4592*4882a593Smuzhiyun 			e_err(rx_err, "skb align check failed: %u bytes at "
4593*4882a593Smuzhiyun 			      "%p\n", bufsz, data);
4594*4882a593Smuzhiyun 			/* Try again, without freeing the previous */
4595*4882a593Smuzhiyun 			data = e1000_alloc_frag(adapter);
4596*4882a593Smuzhiyun 			/* Failed allocation, critical failure */
4597*4882a593Smuzhiyun 			if (!data) {
4598*4882a593Smuzhiyun 				skb_free_frag(olddata);
4599*4882a593Smuzhiyun 				adapter->alloc_rx_buff_failed++;
4600*4882a593Smuzhiyun 				break;
4601*4882a593Smuzhiyun 			}
4602*4882a593Smuzhiyun 
4603*4882a593Smuzhiyun 			if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4604*4882a593Smuzhiyun 				/* give up */
4605*4882a593Smuzhiyun 				skb_free_frag(data);
4606*4882a593Smuzhiyun 				skb_free_frag(olddata);
4607*4882a593Smuzhiyun 				adapter->alloc_rx_buff_failed++;
4608*4882a593Smuzhiyun 				break;
4609*4882a593Smuzhiyun 			}
4610*4882a593Smuzhiyun 
4611*4882a593Smuzhiyun 			/* Use new allocation */
4612*4882a593Smuzhiyun 			skb_free_frag(olddata);
4613*4882a593Smuzhiyun 		}
4614*4882a593Smuzhiyun 		buffer_info->dma = dma_map_single(&pdev->dev,
4615*4882a593Smuzhiyun 						  data,
4616*4882a593Smuzhiyun 						  adapter->rx_buffer_len,
4617*4882a593Smuzhiyun 						  DMA_FROM_DEVICE);
4618*4882a593Smuzhiyun 		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4619*4882a593Smuzhiyun 			skb_free_frag(data);
4620*4882a593Smuzhiyun 			buffer_info->dma = 0;
4621*4882a593Smuzhiyun 			adapter->alloc_rx_buff_failed++;
4622*4882a593Smuzhiyun 			break;
4623*4882a593Smuzhiyun 		}
4624*4882a593Smuzhiyun 
4625*4882a593Smuzhiyun 		/* XXX if it was allocated cleanly it will never map to a
4626*4882a593Smuzhiyun 		 * boundary crossing
4627*4882a593Smuzhiyun 		 */
4628*4882a593Smuzhiyun 
4629*4882a593Smuzhiyun 		/* Fix for errata 23, can't cross 64kB boundary */
4630*4882a593Smuzhiyun 		if (!e1000_check_64k_bound(adapter,
4631*4882a593Smuzhiyun 					(void *)(unsigned long)buffer_info->dma,
4632*4882a593Smuzhiyun 					adapter->rx_buffer_len)) {
4633*4882a593Smuzhiyun 			e_err(rx_err, "dma align check failed: %u bytes at "
4634*4882a593Smuzhiyun 			      "%p\n", adapter->rx_buffer_len,
4635*4882a593Smuzhiyun 			      (void *)(unsigned long)buffer_info->dma);
4636*4882a593Smuzhiyun 
4637*4882a593Smuzhiyun 			dma_unmap_single(&pdev->dev, buffer_info->dma,
4638*4882a593Smuzhiyun 					 adapter->rx_buffer_len,
4639*4882a593Smuzhiyun 					 DMA_FROM_DEVICE);
4640*4882a593Smuzhiyun 
4641*4882a593Smuzhiyun 			skb_free_frag(data);
4642*4882a593Smuzhiyun 			buffer_info->rxbuf.data = NULL;
4643*4882a593Smuzhiyun 			buffer_info->dma = 0;
4644*4882a593Smuzhiyun 
4645*4882a593Smuzhiyun 			adapter->alloc_rx_buff_failed++;
4646*4882a593Smuzhiyun 			break;
4647*4882a593Smuzhiyun 		}
4648*4882a593Smuzhiyun 		buffer_info->rxbuf.data = data;
4649*4882a593Smuzhiyun  skip:
4650*4882a593Smuzhiyun 		rx_desc = E1000_RX_DESC(*rx_ring, i);
4651*4882a593Smuzhiyun 		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4652*4882a593Smuzhiyun 
4653*4882a593Smuzhiyun 		if (unlikely(++i == rx_ring->count))
4654*4882a593Smuzhiyun 			i = 0;
4655*4882a593Smuzhiyun 		buffer_info = &rx_ring->buffer_info[i];
4656*4882a593Smuzhiyun 	}
4657*4882a593Smuzhiyun 
4658*4882a593Smuzhiyun 	if (likely(rx_ring->next_to_use != i)) {
4659*4882a593Smuzhiyun 		rx_ring->next_to_use = i;
4660*4882a593Smuzhiyun 		if (unlikely(i-- == 0))
4661*4882a593Smuzhiyun 			i = (rx_ring->count - 1);
4662*4882a593Smuzhiyun 
4663*4882a593Smuzhiyun 		/* Force memory writes to complete before letting h/w
4664*4882a593Smuzhiyun 		 * know there are new descriptors to fetch.  (Only
4665*4882a593Smuzhiyun 		 * applicable for weak-ordered memory model archs,
4666*4882a593Smuzhiyun 		 * such as IA-64).
4667*4882a593Smuzhiyun 		 */
4668*4882a593Smuzhiyun 		dma_wmb();
4669*4882a593Smuzhiyun 		writel(i, hw->hw_addr + rx_ring->rdt);
4670*4882a593Smuzhiyun 	}
4671*4882a593Smuzhiyun }
4672*4882a593Smuzhiyun 
4673*4882a593Smuzhiyun /**
4674*4882a593Smuzhiyun  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4675*4882a593Smuzhiyun  * @adapter: address of board private structure
4676*4882a593Smuzhiyun  **/
e1000_smartspeed(struct e1000_adapter * adapter)4677*4882a593Smuzhiyun static void e1000_smartspeed(struct e1000_adapter *adapter)
4678*4882a593Smuzhiyun {
4679*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
4680*4882a593Smuzhiyun 	u16 phy_status;
4681*4882a593Smuzhiyun 	u16 phy_ctrl;
4682*4882a593Smuzhiyun 
4683*4882a593Smuzhiyun 	if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4684*4882a593Smuzhiyun 	   !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4685*4882a593Smuzhiyun 		return;
4686*4882a593Smuzhiyun 
4687*4882a593Smuzhiyun 	if (adapter->smartspeed == 0) {
4688*4882a593Smuzhiyun 		/* If Master/Slave config fault is asserted twice,
4689*4882a593Smuzhiyun 		 * we assume back-to-back
4690*4882a593Smuzhiyun 		 */
4691*4882a593Smuzhiyun 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4692*4882a593Smuzhiyun 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4693*4882a593Smuzhiyun 			return;
4694*4882a593Smuzhiyun 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4695*4882a593Smuzhiyun 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4696*4882a593Smuzhiyun 			return;
4697*4882a593Smuzhiyun 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4698*4882a593Smuzhiyun 		if (phy_ctrl & CR_1000T_MS_ENABLE) {
4699*4882a593Smuzhiyun 			phy_ctrl &= ~CR_1000T_MS_ENABLE;
4700*4882a593Smuzhiyun 			e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4701*4882a593Smuzhiyun 					    phy_ctrl);
4702*4882a593Smuzhiyun 			adapter->smartspeed++;
4703*4882a593Smuzhiyun 			if (!e1000_phy_setup_autoneg(hw) &&
4704*4882a593Smuzhiyun 			   !e1000_read_phy_reg(hw, PHY_CTRL,
4705*4882a593Smuzhiyun 					       &phy_ctrl)) {
4706*4882a593Smuzhiyun 				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4707*4882a593Smuzhiyun 					     MII_CR_RESTART_AUTO_NEG);
4708*4882a593Smuzhiyun 				e1000_write_phy_reg(hw, PHY_CTRL,
4709*4882a593Smuzhiyun 						    phy_ctrl);
4710*4882a593Smuzhiyun 			}
4711*4882a593Smuzhiyun 		}
4712*4882a593Smuzhiyun 		return;
4713*4882a593Smuzhiyun 	} else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4714*4882a593Smuzhiyun 		/* If still no link, perhaps using 2/3 pair cable */
4715*4882a593Smuzhiyun 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4716*4882a593Smuzhiyun 		phy_ctrl |= CR_1000T_MS_ENABLE;
4717*4882a593Smuzhiyun 		e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4718*4882a593Smuzhiyun 		if (!e1000_phy_setup_autoneg(hw) &&
4719*4882a593Smuzhiyun 		   !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4720*4882a593Smuzhiyun 			phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4721*4882a593Smuzhiyun 				     MII_CR_RESTART_AUTO_NEG);
4722*4882a593Smuzhiyun 			e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4723*4882a593Smuzhiyun 		}
4724*4882a593Smuzhiyun 	}
4725*4882a593Smuzhiyun 	/* Restart process after E1000_SMARTSPEED_MAX iterations */
4726*4882a593Smuzhiyun 	if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4727*4882a593Smuzhiyun 		adapter->smartspeed = 0;
4728*4882a593Smuzhiyun }
4729*4882a593Smuzhiyun 
4730*4882a593Smuzhiyun /**
4731*4882a593Smuzhiyun  * e1000_ioctl - handle ioctl calls
4732*4882a593Smuzhiyun  * @netdev: pointer to our netdev
4733*4882a593Smuzhiyun  * @ifr: pointer to interface request structure
4734*4882a593Smuzhiyun  * @cmd: ioctl data
4735*4882a593Smuzhiyun  **/
e1000_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)4736*4882a593Smuzhiyun static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4737*4882a593Smuzhiyun {
4738*4882a593Smuzhiyun 	switch (cmd) {
4739*4882a593Smuzhiyun 	case SIOCGMIIPHY:
4740*4882a593Smuzhiyun 	case SIOCGMIIREG:
4741*4882a593Smuzhiyun 	case SIOCSMIIREG:
4742*4882a593Smuzhiyun 		return e1000_mii_ioctl(netdev, ifr, cmd);
4743*4882a593Smuzhiyun 	default:
4744*4882a593Smuzhiyun 		return -EOPNOTSUPP;
4745*4882a593Smuzhiyun 	}
4746*4882a593Smuzhiyun }
4747*4882a593Smuzhiyun 
4748*4882a593Smuzhiyun /**
4749*4882a593Smuzhiyun  * e1000_mii_ioctl -
4750*4882a593Smuzhiyun  * @netdev: pointer to our netdev
4751*4882a593Smuzhiyun  * @ifr: pointer to interface request structure
4752*4882a593Smuzhiyun  * @cmd: ioctl data
4753*4882a593Smuzhiyun  **/
e1000_mii_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)4754*4882a593Smuzhiyun static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4755*4882a593Smuzhiyun 			   int cmd)
4756*4882a593Smuzhiyun {
4757*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
4758*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
4759*4882a593Smuzhiyun 	struct mii_ioctl_data *data = if_mii(ifr);
4760*4882a593Smuzhiyun 	int retval;
4761*4882a593Smuzhiyun 	u16 mii_reg;
4762*4882a593Smuzhiyun 	unsigned long flags;
4763*4882a593Smuzhiyun 
4764*4882a593Smuzhiyun 	if (hw->media_type != e1000_media_type_copper)
4765*4882a593Smuzhiyun 		return -EOPNOTSUPP;
4766*4882a593Smuzhiyun 
4767*4882a593Smuzhiyun 	switch (cmd) {
4768*4882a593Smuzhiyun 	case SIOCGMIIPHY:
4769*4882a593Smuzhiyun 		data->phy_id = hw->phy_addr;
4770*4882a593Smuzhiyun 		break;
4771*4882a593Smuzhiyun 	case SIOCGMIIREG:
4772*4882a593Smuzhiyun 		spin_lock_irqsave(&adapter->stats_lock, flags);
4773*4882a593Smuzhiyun 		if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4774*4882a593Smuzhiyun 				   &data->val_out)) {
4775*4882a593Smuzhiyun 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
4776*4882a593Smuzhiyun 			return -EIO;
4777*4882a593Smuzhiyun 		}
4778*4882a593Smuzhiyun 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
4779*4882a593Smuzhiyun 		break;
4780*4882a593Smuzhiyun 	case SIOCSMIIREG:
4781*4882a593Smuzhiyun 		if (data->reg_num & ~(0x1F))
4782*4882a593Smuzhiyun 			return -EFAULT;
4783*4882a593Smuzhiyun 		mii_reg = data->val_in;
4784*4882a593Smuzhiyun 		spin_lock_irqsave(&adapter->stats_lock, flags);
4785*4882a593Smuzhiyun 		if (e1000_write_phy_reg(hw, data->reg_num,
4786*4882a593Smuzhiyun 					mii_reg)) {
4787*4882a593Smuzhiyun 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
4788*4882a593Smuzhiyun 			return -EIO;
4789*4882a593Smuzhiyun 		}
4790*4882a593Smuzhiyun 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
4791*4882a593Smuzhiyun 		if (hw->media_type == e1000_media_type_copper) {
4792*4882a593Smuzhiyun 			switch (data->reg_num) {
4793*4882a593Smuzhiyun 			case PHY_CTRL:
4794*4882a593Smuzhiyun 				if (mii_reg & MII_CR_POWER_DOWN)
4795*4882a593Smuzhiyun 					break;
4796*4882a593Smuzhiyun 				if (mii_reg & MII_CR_AUTO_NEG_EN) {
4797*4882a593Smuzhiyun 					hw->autoneg = 1;
4798*4882a593Smuzhiyun 					hw->autoneg_advertised = 0x2F;
4799*4882a593Smuzhiyun 				} else {
4800*4882a593Smuzhiyun 					u32 speed;
4801*4882a593Smuzhiyun 					if (mii_reg & 0x40)
4802*4882a593Smuzhiyun 						speed = SPEED_1000;
4803*4882a593Smuzhiyun 					else if (mii_reg & 0x2000)
4804*4882a593Smuzhiyun 						speed = SPEED_100;
4805*4882a593Smuzhiyun 					else
4806*4882a593Smuzhiyun 						speed = SPEED_10;
4807*4882a593Smuzhiyun 					retval = e1000_set_spd_dplx(
4808*4882a593Smuzhiyun 						adapter, speed,
4809*4882a593Smuzhiyun 						((mii_reg & 0x100)
4810*4882a593Smuzhiyun 						 ? DUPLEX_FULL :
4811*4882a593Smuzhiyun 						 DUPLEX_HALF));
4812*4882a593Smuzhiyun 					if (retval)
4813*4882a593Smuzhiyun 						return retval;
4814*4882a593Smuzhiyun 				}
4815*4882a593Smuzhiyun 				if (netif_running(adapter->netdev))
4816*4882a593Smuzhiyun 					e1000_reinit_locked(adapter);
4817*4882a593Smuzhiyun 				else
4818*4882a593Smuzhiyun 					e1000_reset(adapter);
4819*4882a593Smuzhiyun 				break;
4820*4882a593Smuzhiyun 			case M88E1000_PHY_SPEC_CTRL:
4821*4882a593Smuzhiyun 			case M88E1000_EXT_PHY_SPEC_CTRL:
4822*4882a593Smuzhiyun 				if (e1000_phy_reset(hw))
4823*4882a593Smuzhiyun 					return -EIO;
4824*4882a593Smuzhiyun 				break;
4825*4882a593Smuzhiyun 			}
4826*4882a593Smuzhiyun 		} else {
4827*4882a593Smuzhiyun 			switch (data->reg_num) {
4828*4882a593Smuzhiyun 			case PHY_CTRL:
4829*4882a593Smuzhiyun 				if (mii_reg & MII_CR_POWER_DOWN)
4830*4882a593Smuzhiyun 					break;
4831*4882a593Smuzhiyun 				if (netif_running(adapter->netdev))
4832*4882a593Smuzhiyun 					e1000_reinit_locked(adapter);
4833*4882a593Smuzhiyun 				else
4834*4882a593Smuzhiyun 					e1000_reset(adapter);
4835*4882a593Smuzhiyun 				break;
4836*4882a593Smuzhiyun 			}
4837*4882a593Smuzhiyun 		}
4838*4882a593Smuzhiyun 		break;
4839*4882a593Smuzhiyun 	default:
4840*4882a593Smuzhiyun 		return -EOPNOTSUPP;
4841*4882a593Smuzhiyun 	}
4842*4882a593Smuzhiyun 	return E1000_SUCCESS;
4843*4882a593Smuzhiyun }
4844*4882a593Smuzhiyun 
e1000_pci_set_mwi(struct e1000_hw * hw)4845*4882a593Smuzhiyun void e1000_pci_set_mwi(struct e1000_hw *hw)
4846*4882a593Smuzhiyun {
4847*4882a593Smuzhiyun 	struct e1000_adapter *adapter = hw->back;
4848*4882a593Smuzhiyun 	int ret_val = pci_set_mwi(adapter->pdev);
4849*4882a593Smuzhiyun 
4850*4882a593Smuzhiyun 	if (ret_val)
4851*4882a593Smuzhiyun 		e_err(probe, "Error in setting MWI\n");
4852*4882a593Smuzhiyun }
4853*4882a593Smuzhiyun 
e1000_pci_clear_mwi(struct e1000_hw * hw)4854*4882a593Smuzhiyun void e1000_pci_clear_mwi(struct e1000_hw *hw)
4855*4882a593Smuzhiyun {
4856*4882a593Smuzhiyun 	struct e1000_adapter *adapter = hw->back;
4857*4882a593Smuzhiyun 
4858*4882a593Smuzhiyun 	pci_clear_mwi(adapter->pdev);
4859*4882a593Smuzhiyun }
4860*4882a593Smuzhiyun 
e1000_pcix_get_mmrbc(struct e1000_hw * hw)4861*4882a593Smuzhiyun int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4862*4882a593Smuzhiyun {
4863*4882a593Smuzhiyun 	struct e1000_adapter *adapter = hw->back;
4864*4882a593Smuzhiyun 	return pcix_get_mmrbc(adapter->pdev);
4865*4882a593Smuzhiyun }
4866*4882a593Smuzhiyun 
e1000_pcix_set_mmrbc(struct e1000_hw * hw,int mmrbc)4867*4882a593Smuzhiyun void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4868*4882a593Smuzhiyun {
4869*4882a593Smuzhiyun 	struct e1000_adapter *adapter = hw->back;
4870*4882a593Smuzhiyun 	pcix_set_mmrbc(adapter->pdev, mmrbc);
4871*4882a593Smuzhiyun }
4872*4882a593Smuzhiyun 
e1000_io_write(struct e1000_hw * hw,unsigned long port,u32 value)4873*4882a593Smuzhiyun void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4874*4882a593Smuzhiyun {
4875*4882a593Smuzhiyun 	outl(value, port);
4876*4882a593Smuzhiyun }
4877*4882a593Smuzhiyun 
e1000_vlan_used(struct e1000_adapter * adapter)4878*4882a593Smuzhiyun static bool e1000_vlan_used(struct e1000_adapter *adapter)
4879*4882a593Smuzhiyun {
4880*4882a593Smuzhiyun 	u16 vid;
4881*4882a593Smuzhiyun 
4882*4882a593Smuzhiyun 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4883*4882a593Smuzhiyun 		return true;
4884*4882a593Smuzhiyun 	return false;
4885*4882a593Smuzhiyun }
4886*4882a593Smuzhiyun 
__e1000_vlan_mode(struct e1000_adapter * adapter,netdev_features_t features)4887*4882a593Smuzhiyun static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4888*4882a593Smuzhiyun 			      netdev_features_t features)
4889*4882a593Smuzhiyun {
4890*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
4891*4882a593Smuzhiyun 	u32 ctrl;
4892*4882a593Smuzhiyun 
4893*4882a593Smuzhiyun 	ctrl = er32(CTRL);
4894*4882a593Smuzhiyun 	if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4895*4882a593Smuzhiyun 		/* enable VLAN tag insert/strip */
4896*4882a593Smuzhiyun 		ctrl |= E1000_CTRL_VME;
4897*4882a593Smuzhiyun 	} else {
4898*4882a593Smuzhiyun 		/* disable VLAN tag insert/strip */
4899*4882a593Smuzhiyun 		ctrl &= ~E1000_CTRL_VME;
4900*4882a593Smuzhiyun 	}
4901*4882a593Smuzhiyun 	ew32(CTRL, ctrl);
4902*4882a593Smuzhiyun }
e1000_vlan_filter_on_off(struct e1000_adapter * adapter,bool filter_on)4903*4882a593Smuzhiyun static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4904*4882a593Smuzhiyun 				     bool filter_on)
4905*4882a593Smuzhiyun {
4906*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
4907*4882a593Smuzhiyun 	u32 rctl;
4908*4882a593Smuzhiyun 
4909*4882a593Smuzhiyun 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4910*4882a593Smuzhiyun 		e1000_irq_disable(adapter);
4911*4882a593Smuzhiyun 
4912*4882a593Smuzhiyun 	__e1000_vlan_mode(adapter, adapter->netdev->features);
4913*4882a593Smuzhiyun 	if (filter_on) {
4914*4882a593Smuzhiyun 		/* enable VLAN receive filtering */
4915*4882a593Smuzhiyun 		rctl = er32(RCTL);
4916*4882a593Smuzhiyun 		rctl &= ~E1000_RCTL_CFIEN;
4917*4882a593Smuzhiyun 		if (!(adapter->netdev->flags & IFF_PROMISC))
4918*4882a593Smuzhiyun 			rctl |= E1000_RCTL_VFE;
4919*4882a593Smuzhiyun 		ew32(RCTL, rctl);
4920*4882a593Smuzhiyun 		e1000_update_mng_vlan(adapter);
4921*4882a593Smuzhiyun 	} else {
4922*4882a593Smuzhiyun 		/* disable VLAN receive filtering */
4923*4882a593Smuzhiyun 		rctl = er32(RCTL);
4924*4882a593Smuzhiyun 		rctl &= ~E1000_RCTL_VFE;
4925*4882a593Smuzhiyun 		ew32(RCTL, rctl);
4926*4882a593Smuzhiyun 	}
4927*4882a593Smuzhiyun 
4928*4882a593Smuzhiyun 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4929*4882a593Smuzhiyun 		e1000_irq_enable(adapter);
4930*4882a593Smuzhiyun }
4931*4882a593Smuzhiyun 
e1000_vlan_mode(struct net_device * netdev,netdev_features_t features)4932*4882a593Smuzhiyun static void e1000_vlan_mode(struct net_device *netdev,
4933*4882a593Smuzhiyun 			    netdev_features_t features)
4934*4882a593Smuzhiyun {
4935*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
4936*4882a593Smuzhiyun 
4937*4882a593Smuzhiyun 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4938*4882a593Smuzhiyun 		e1000_irq_disable(adapter);
4939*4882a593Smuzhiyun 
4940*4882a593Smuzhiyun 	__e1000_vlan_mode(adapter, features);
4941*4882a593Smuzhiyun 
4942*4882a593Smuzhiyun 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4943*4882a593Smuzhiyun 		e1000_irq_enable(adapter);
4944*4882a593Smuzhiyun }
4945*4882a593Smuzhiyun 
e1000_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)4946*4882a593Smuzhiyun static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4947*4882a593Smuzhiyun 				 __be16 proto, u16 vid)
4948*4882a593Smuzhiyun {
4949*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
4950*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
4951*4882a593Smuzhiyun 	u32 vfta, index;
4952*4882a593Smuzhiyun 
4953*4882a593Smuzhiyun 	if ((hw->mng_cookie.status &
4954*4882a593Smuzhiyun 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4955*4882a593Smuzhiyun 	    (vid == adapter->mng_vlan_id))
4956*4882a593Smuzhiyun 		return 0;
4957*4882a593Smuzhiyun 
4958*4882a593Smuzhiyun 	if (!e1000_vlan_used(adapter))
4959*4882a593Smuzhiyun 		e1000_vlan_filter_on_off(adapter, true);
4960*4882a593Smuzhiyun 
4961*4882a593Smuzhiyun 	/* add VID to filter table */
4962*4882a593Smuzhiyun 	index = (vid >> 5) & 0x7F;
4963*4882a593Smuzhiyun 	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4964*4882a593Smuzhiyun 	vfta |= (1 << (vid & 0x1F));
4965*4882a593Smuzhiyun 	e1000_write_vfta(hw, index, vfta);
4966*4882a593Smuzhiyun 
4967*4882a593Smuzhiyun 	set_bit(vid, adapter->active_vlans);
4968*4882a593Smuzhiyun 
4969*4882a593Smuzhiyun 	return 0;
4970*4882a593Smuzhiyun }
4971*4882a593Smuzhiyun 
e1000_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)4972*4882a593Smuzhiyun static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4973*4882a593Smuzhiyun 				  __be16 proto, u16 vid)
4974*4882a593Smuzhiyun {
4975*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
4976*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
4977*4882a593Smuzhiyun 	u32 vfta, index;
4978*4882a593Smuzhiyun 
4979*4882a593Smuzhiyun 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4980*4882a593Smuzhiyun 		e1000_irq_disable(adapter);
4981*4882a593Smuzhiyun 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4982*4882a593Smuzhiyun 		e1000_irq_enable(adapter);
4983*4882a593Smuzhiyun 
4984*4882a593Smuzhiyun 	/* remove VID from filter table */
4985*4882a593Smuzhiyun 	index = (vid >> 5) & 0x7F;
4986*4882a593Smuzhiyun 	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4987*4882a593Smuzhiyun 	vfta &= ~(1 << (vid & 0x1F));
4988*4882a593Smuzhiyun 	e1000_write_vfta(hw, index, vfta);
4989*4882a593Smuzhiyun 
4990*4882a593Smuzhiyun 	clear_bit(vid, adapter->active_vlans);
4991*4882a593Smuzhiyun 
4992*4882a593Smuzhiyun 	if (!e1000_vlan_used(adapter))
4993*4882a593Smuzhiyun 		e1000_vlan_filter_on_off(adapter, false);
4994*4882a593Smuzhiyun 
4995*4882a593Smuzhiyun 	return 0;
4996*4882a593Smuzhiyun }
4997*4882a593Smuzhiyun 
e1000_restore_vlan(struct e1000_adapter * adapter)4998*4882a593Smuzhiyun static void e1000_restore_vlan(struct e1000_adapter *adapter)
4999*4882a593Smuzhiyun {
5000*4882a593Smuzhiyun 	u16 vid;
5001*4882a593Smuzhiyun 
5002*4882a593Smuzhiyun 	if (!e1000_vlan_used(adapter))
5003*4882a593Smuzhiyun 		return;
5004*4882a593Smuzhiyun 
5005*4882a593Smuzhiyun 	e1000_vlan_filter_on_off(adapter, true);
5006*4882a593Smuzhiyun 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
5007*4882a593Smuzhiyun 		e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
5008*4882a593Smuzhiyun }
5009*4882a593Smuzhiyun 
e1000_set_spd_dplx(struct e1000_adapter * adapter,u32 spd,u8 dplx)5010*4882a593Smuzhiyun int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
5011*4882a593Smuzhiyun {
5012*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
5013*4882a593Smuzhiyun 
5014*4882a593Smuzhiyun 	hw->autoneg = 0;
5015*4882a593Smuzhiyun 
5016*4882a593Smuzhiyun 	/* Make sure dplx is at most 1 bit and lsb of speed is not set
5017*4882a593Smuzhiyun 	 * for the switch() below to work
5018*4882a593Smuzhiyun 	 */
5019*4882a593Smuzhiyun 	if ((spd & 1) || (dplx & ~1))
5020*4882a593Smuzhiyun 		goto err_inval;
5021*4882a593Smuzhiyun 
5022*4882a593Smuzhiyun 	/* Fiber NICs only allow 1000 gbps Full duplex */
5023*4882a593Smuzhiyun 	if ((hw->media_type == e1000_media_type_fiber) &&
5024*4882a593Smuzhiyun 	    spd != SPEED_1000 &&
5025*4882a593Smuzhiyun 	    dplx != DUPLEX_FULL)
5026*4882a593Smuzhiyun 		goto err_inval;
5027*4882a593Smuzhiyun 
5028*4882a593Smuzhiyun 	switch (spd + dplx) {
5029*4882a593Smuzhiyun 	case SPEED_10 + DUPLEX_HALF:
5030*4882a593Smuzhiyun 		hw->forced_speed_duplex = e1000_10_half;
5031*4882a593Smuzhiyun 		break;
5032*4882a593Smuzhiyun 	case SPEED_10 + DUPLEX_FULL:
5033*4882a593Smuzhiyun 		hw->forced_speed_duplex = e1000_10_full;
5034*4882a593Smuzhiyun 		break;
5035*4882a593Smuzhiyun 	case SPEED_100 + DUPLEX_HALF:
5036*4882a593Smuzhiyun 		hw->forced_speed_duplex = e1000_100_half;
5037*4882a593Smuzhiyun 		break;
5038*4882a593Smuzhiyun 	case SPEED_100 + DUPLEX_FULL:
5039*4882a593Smuzhiyun 		hw->forced_speed_duplex = e1000_100_full;
5040*4882a593Smuzhiyun 		break;
5041*4882a593Smuzhiyun 	case SPEED_1000 + DUPLEX_FULL:
5042*4882a593Smuzhiyun 		hw->autoneg = 1;
5043*4882a593Smuzhiyun 		hw->autoneg_advertised = ADVERTISE_1000_FULL;
5044*4882a593Smuzhiyun 		break;
5045*4882a593Smuzhiyun 	case SPEED_1000 + DUPLEX_HALF: /* not supported */
5046*4882a593Smuzhiyun 	default:
5047*4882a593Smuzhiyun 		goto err_inval;
5048*4882a593Smuzhiyun 	}
5049*4882a593Smuzhiyun 
5050*4882a593Smuzhiyun 	/* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5051*4882a593Smuzhiyun 	hw->mdix = AUTO_ALL_MODES;
5052*4882a593Smuzhiyun 
5053*4882a593Smuzhiyun 	return 0;
5054*4882a593Smuzhiyun 
5055*4882a593Smuzhiyun err_inval:
5056*4882a593Smuzhiyun 	e_err(probe, "Unsupported Speed/Duplex configuration\n");
5057*4882a593Smuzhiyun 	return -EINVAL;
5058*4882a593Smuzhiyun }
5059*4882a593Smuzhiyun 
__e1000_shutdown(struct pci_dev * pdev,bool * enable_wake)5060*4882a593Smuzhiyun static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5061*4882a593Smuzhiyun {
5062*4882a593Smuzhiyun 	struct net_device *netdev = pci_get_drvdata(pdev);
5063*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
5064*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
5065*4882a593Smuzhiyun 	u32 ctrl, ctrl_ext, rctl, status;
5066*4882a593Smuzhiyun 	u32 wufc = adapter->wol;
5067*4882a593Smuzhiyun 
5068*4882a593Smuzhiyun 	netif_device_detach(netdev);
5069*4882a593Smuzhiyun 
5070*4882a593Smuzhiyun 	if (netif_running(netdev)) {
5071*4882a593Smuzhiyun 		int count = E1000_CHECK_RESET_COUNT;
5072*4882a593Smuzhiyun 
5073*4882a593Smuzhiyun 		while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5074*4882a593Smuzhiyun 			usleep_range(10000, 20000);
5075*4882a593Smuzhiyun 
5076*4882a593Smuzhiyun 		WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5077*4882a593Smuzhiyun 		e1000_down(adapter);
5078*4882a593Smuzhiyun 	}
5079*4882a593Smuzhiyun 
5080*4882a593Smuzhiyun 	status = er32(STATUS);
5081*4882a593Smuzhiyun 	if (status & E1000_STATUS_LU)
5082*4882a593Smuzhiyun 		wufc &= ~E1000_WUFC_LNKC;
5083*4882a593Smuzhiyun 
5084*4882a593Smuzhiyun 	if (wufc) {
5085*4882a593Smuzhiyun 		e1000_setup_rctl(adapter);
5086*4882a593Smuzhiyun 		e1000_set_rx_mode(netdev);
5087*4882a593Smuzhiyun 
5088*4882a593Smuzhiyun 		rctl = er32(RCTL);
5089*4882a593Smuzhiyun 
5090*4882a593Smuzhiyun 		/* turn on all-multi mode if wake on multicast is enabled */
5091*4882a593Smuzhiyun 		if (wufc & E1000_WUFC_MC)
5092*4882a593Smuzhiyun 			rctl |= E1000_RCTL_MPE;
5093*4882a593Smuzhiyun 
5094*4882a593Smuzhiyun 		/* enable receives in the hardware */
5095*4882a593Smuzhiyun 		ew32(RCTL, rctl | E1000_RCTL_EN);
5096*4882a593Smuzhiyun 
5097*4882a593Smuzhiyun 		if (hw->mac_type >= e1000_82540) {
5098*4882a593Smuzhiyun 			ctrl = er32(CTRL);
5099*4882a593Smuzhiyun 			/* advertise wake from D3Cold */
5100*4882a593Smuzhiyun 			#define E1000_CTRL_ADVD3WUC 0x00100000
5101*4882a593Smuzhiyun 			/* phy power management enable */
5102*4882a593Smuzhiyun 			#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5103*4882a593Smuzhiyun 			ctrl |= E1000_CTRL_ADVD3WUC |
5104*4882a593Smuzhiyun 				E1000_CTRL_EN_PHY_PWR_MGMT;
5105*4882a593Smuzhiyun 			ew32(CTRL, ctrl);
5106*4882a593Smuzhiyun 		}
5107*4882a593Smuzhiyun 
5108*4882a593Smuzhiyun 		if (hw->media_type == e1000_media_type_fiber ||
5109*4882a593Smuzhiyun 		    hw->media_type == e1000_media_type_internal_serdes) {
5110*4882a593Smuzhiyun 			/* keep the laser running in D3 */
5111*4882a593Smuzhiyun 			ctrl_ext = er32(CTRL_EXT);
5112*4882a593Smuzhiyun 			ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5113*4882a593Smuzhiyun 			ew32(CTRL_EXT, ctrl_ext);
5114*4882a593Smuzhiyun 		}
5115*4882a593Smuzhiyun 
5116*4882a593Smuzhiyun 		ew32(WUC, E1000_WUC_PME_EN);
5117*4882a593Smuzhiyun 		ew32(WUFC, wufc);
5118*4882a593Smuzhiyun 	} else {
5119*4882a593Smuzhiyun 		ew32(WUC, 0);
5120*4882a593Smuzhiyun 		ew32(WUFC, 0);
5121*4882a593Smuzhiyun 	}
5122*4882a593Smuzhiyun 
5123*4882a593Smuzhiyun 	e1000_release_manageability(adapter);
5124*4882a593Smuzhiyun 
5125*4882a593Smuzhiyun 	*enable_wake = !!wufc;
5126*4882a593Smuzhiyun 
5127*4882a593Smuzhiyun 	/* make sure adapter isn't asleep if manageability is enabled */
5128*4882a593Smuzhiyun 	if (adapter->en_mng_pt)
5129*4882a593Smuzhiyun 		*enable_wake = true;
5130*4882a593Smuzhiyun 
5131*4882a593Smuzhiyun 	if (netif_running(netdev))
5132*4882a593Smuzhiyun 		e1000_free_irq(adapter);
5133*4882a593Smuzhiyun 
5134*4882a593Smuzhiyun 	if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5135*4882a593Smuzhiyun 		pci_disable_device(pdev);
5136*4882a593Smuzhiyun 
5137*4882a593Smuzhiyun 	return 0;
5138*4882a593Smuzhiyun }
5139*4882a593Smuzhiyun 
e1000_suspend(struct device * dev)5140*4882a593Smuzhiyun static int __maybe_unused e1000_suspend(struct device *dev)
5141*4882a593Smuzhiyun {
5142*4882a593Smuzhiyun 	int retval;
5143*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(dev);
5144*4882a593Smuzhiyun 	bool wake;
5145*4882a593Smuzhiyun 
5146*4882a593Smuzhiyun 	retval = __e1000_shutdown(pdev, &wake);
5147*4882a593Smuzhiyun 	device_set_wakeup_enable(dev, wake);
5148*4882a593Smuzhiyun 
5149*4882a593Smuzhiyun 	return retval;
5150*4882a593Smuzhiyun }
5151*4882a593Smuzhiyun 
e1000_resume(struct device * dev)5152*4882a593Smuzhiyun static int __maybe_unused e1000_resume(struct device *dev)
5153*4882a593Smuzhiyun {
5154*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(dev);
5155*4882a593Smuzhiyun 	struct net_device *netdev = pci_get_drvdata(pdev);
5156*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
5157*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
5158*4882a593Smuzhiyun 	u32 err;
5159*4882a593Smuzhiyun 
5160*4882a593Smuzhiyun 	if (adapter->need_ioport)
5161*4882a593Smuzhiyun 		err = pci_enable_device(pdev);
5162*4882a593Smuzhiyun 	else
5163*4882a593Smuzhiyun 		err = pci_enable_device_mem(pdev);
5164*4882a593Smuzhiyun 	if (err) {
5165*4882a593Smuzhiyun 		pr_err("Cannot enable PCI device from suspend\n");
5166*4882a593Smuzhiyun 		return err;
5167*4882a593Smuzhiyun 	}
5168*4882a593Smuzhiyun 
5169*4882a593Smuzhiyun 	/* flush memory to make sure state is correct */
5170*4882a593Smuzhiyun 	smp_mb__before_atomic();
5171*4882a593Smuzhiyun 	clear_bit(__E1000_DISABLED, &adapter->flags);
5172*4882a593Smuzhiyun 	pci_set_master(pdev);
5173*4882a593Smuzhiyun 
5174*4882a593Smuzhiyun 	pci_enable_wake(pdev, PCI_D3hot, 0);
5175*4882a593Smuzhiyun 	pci_enable_wake(pdev, PCI_D3cold, 0);
5176*4882a593Smuzhiyun 
5177*4882a593Smuzhiyun 	if (netif_running(netdev)) {
5178*4882a593Smuzhiyun 		err = e1000_request_irq(adapter);
5179*4882a593Smuzhiyun 		if (err)
5180*4882a593Smuzhiyun 			return err;
5181*4882a593Smuzhiyun 	}
5182*4882a593Smuzhiyun 
5183*4882a593Smuzhiyun 	e1000_power_up_phy(adapter);
5184*4882a593Smuzhiyun 	e1000_reset(adapter);
5185*4882a593Smuzhiyun 	ew32(WUS, ~0);
5186*4882a593Smuzhiyun 
5187*4882a593Smuzhiyun 	e1000_init_manageability(adapter);
5188*4882a593Smuzhiyun 
5189*4882a593Smuzhiyun 	if (netif_running(netdev))
5190*4882a593Smuzhiyun 		e1000_up(adapter);
5191*4882a593Smuzhiyun 
5192*4882a593Smuzhiyun 	netif_device_attach(netdev);
5193*4882a593Smuzhiyun 
5194*4882a593Smuzhiyun 	return 0;
5195*4882a593Smuzhiyun }
5196*4882a593Smuzhiyun 
e1000_shutdown(struct pci_dev * pdev)5197*4882a593Smuzhiyun static void e1000_shutdown(struct pci_dev *pdev)
5198*4882a593Smuzhiyun {
5199*4882a593Smuzhiyun 	bool wake;
5200*4882a593Smuzhiyun 
5201*4882a593Smuzhiyun 	__e1000_shutdown(pdev, &wake);
5202*4882a593Smuzhiyun 
5203*4882a593Smuzhiyun 	if (system_state == SYSTEM_POWER_OFF) {
5204*4882a593Smuzhiyun 		pci_wake_from_d3(pdev, wake);
5205*4882a593Smuzhiyun 		pci_set_power_state(pdev, PCI_D3hot);
5206*4882a593Smuzhiyun 	}
5207*4882a593Smuzhiyun }
5208*4882a593Smuzhiyun 
5209*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
5210*4882a593Smuzhiyun /* Polling 'interrupt' - used by things like netconsole to send skbs
5211*4882a593Smuzhiyun  * without having to re-enable interrupts. It's not called while
5212*4882a593Smuzhiyun  * the interrupt routine is executing.
5213*4882a593Smuzhiyun  */
e1000_netpoll(struct net_device * netdev)5214*4882a593Smuzhiyun static void e1000_netpoll(struct net_device *netdev)
5215*4882a593Smuzhiyun {
5216*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
5217*4882a593Smuzhiyun 
5218*4882a593Smuzhiyun 	if (disable_hardirq(adapter->pdev->irq))
5219*4882a593Smuzhiyun 		e1000_intr(adapter->pdev->irq, netdev);
5220*4882a593Smuzhiyun 	enable_irq(adapter->pdev->irq);
5221*4882a593Smuzhiyun }
5222*4882a593Smuzhiyun #endif
5223*4882a593Smuzhiyun 
5224*4882a593Smuzhiyun /**
5225*4882a593Smuzhiyun  * e1000_io_error_detected - called when PCI error is detected
5226*4882a593Smuzhiyun  * @pdev: Pointer to PCI device
5227*4882a593Smuzhiyun  * @state: The current pci connection state
5228*4882a593Smuzhiyun  *
5229*4882a593Smuzhiyun  * This function is called after a PCI bus error affecting
5230*4882a593Smuzhiyun  * this device has been detected.
5231*4882a593Smuzhiyun  */
e1000_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)5232*4882a593Smuzhiyun static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5233*4882a593Smuzhiyun 						pci_channel_state_t state)
5234*4882a593Smuzhiyun {
5235*4882a593Smuzhiyun 	struct net_device *netdev = pci_get_drvdata(pdev);
5236*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
5237*4882a593Smuzhiyun 
5238*4882a593Smuzhiyun 	netif_device_detach(netdev);
5239*4882a593Smuzhiyun 
5240*4882a593Smuzhiyun 	if (state == pci_channel_io_perm_failure)
5241*4882a593Smuzhiyun 		return PCI_ERS_RESULT_DISCONNECT;
5242*4882a593Smuzhiyun 
5243*4882a593Smuzhiyun 	if (netif_running(netdev))
5244*4882a593Smuzhiyun 		e1000_down(adapter);
5245*4882a593Smuzhiyun 
5246*4882a593Smuzhiyun 	if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5247*4882a593Smuzhiyun 		pci_disable_device(pdev);
5248*4882a593Smuzhiyun 
5249*4882a593Smuzhiyun 	/* Request a slot slot reset. */
5250*4882a593Smuzhiyun 	return PCI_ERS_RESULT_NEED_RESET;
5251*4882a593Smuzhiyun }
5252*4882a593Smuzhiyun 
5253*4882a593Smuzhiyun /**
5254*4882a593Smuzhiyun  * e1000_io_slot_reset - called after the pci bus has been reset.
5255*4882a593Smuzhiyun  * @pdev: Pointer to PCI device
5256*4882a593Smuzhiyun  *
5257*4882a593Smuzhiyun  * Restart the card from scratch, as if from a cold-boot. Implementation
5258*4882a593Smuzhiyun  * resembles the first-half of the e1000_resume routine.
5259*4882a593Smuzhiyun  */
e1000_io_slot_reset(struct pci_dev * pdev)5260*4882a593Smuzhiyun static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5261*4882a593Smuzhiyun {
5262*4882a593Smuzhiyun 	struct net_device *netdev = pci_get_drvdata(pdev);
5263*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
5264*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
5265*4882a593Smuzhiyun 	int err;
5266*4882a593Smuzhiyun 
5267*4882a593Smuzhiyun 	if (adapter->need_ioport)
5268*4882a593Smuzhiyun 		err = pci_enable_device(pdev);
5269*4882a593Smuzhiyun 	else
5270*4882a593Smuzhiyun 		err = pci_enable_device_mem(pdev);
5271*4882a593Smuzhiyun 	if (err) {
5272*4882a593Smuzhiyun 		pr_err("Cannot re-enable PCI device after reset.\n");
5273*4882a593Smuzhiyun 		return PCI_ERS_RESULT_DISCONNECT;
5274*4882a593Smuzhiyun 	}
5275*4882a593Smuzhiyun 
5276*4882a593Smuzhiyun 	/* flush memory to make sure state is correct */
5277*4882a593Smuzhiyun 	smp_mb__before_atomic();
5278*4882a593Smuzhiyun 	clear_bit(__E1000_DISABLED, &adapter->flags);
5279*4882a593Smuzhiyun 	pci_set_master(pdev);
5280*4882a593Smuzhiyun 
5281*4882a593Smuzhiyun 	pci_enable_wake(pdev, PCI_D3hot, 0);
5282*4882a593Smuzhiyun 	pci_enable_wake(pdev, PCI_D3cold, 0);
5283*4882a593Smuzhiyun 
5284*4882a593Smuzhiyun 	e1000_reset(adapter);
5285*4882a593Smuzhiyun 	ew32(WUS, ~0);
5286*4882a593Smuzhiyun 
5287*4882a593Smuzhiyun 	return PCI_ERS_RESULT_RECOVERED;
5288*4882a593Smuzhiyun }
5289*4882a593Smuzhiyun 
5290*4882a593Smuzhiyun /**
5291*4882a593Smuzhiyun  * e1000_io_resume - called when traffic can start flowing again.
5292*4882a593Smuzhiyun  * @pdev: Pointer to PCI device
5293*4882a593Smuzhiyun  *
5294*4882a593Smuzhiyun  * This callback is called when the error recovery driver tells us that
5295*4882a593Smuzhiyun  * its OK to resume normal operation. Implementation resembles the
5296*4882a593Smuzhiyun  * second-half of the e1000_resume routine.
5297*4882a593Smuzhiyun  */
e1000_io_resume(struct pci_dev * pdev)5298*4882a593Smuzhiyun static void e1000_io_resume(struct pci_dev *pdev)
5299*4882a593Smuzhiyun {
5300*4882a593Smuzhiyun 	struct net_device *netdev = pci_get_drvdata(pdev);
5301*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
5302*4882a593Smuzhiyun 
5303*4882a593Smuzhiyun 	e1000_init_manageability(adapter);
5304*4882a593Smuzhiyun 
5305*4882a593Smuzhiyun 	if (netif_running(netdev)) {
5306*4882a593Smuzhiyun 		if (e1000_up(adapter)) {
5307*4882a593Smuzhiyun 			pr_info("can't bring device back up after reset\n");
5308*4882a593Smuzhiyun 			return;
5309*4882a593Smuzhiyun 		}
5310*4882a593Smuzhiyun 	}
5311*4882a593Smuzhiyun 
5312*4882a593Smuzhiyun 	netif_device_attach(netdev);
5313*4882a593Smuzhiyun }
5314*4882a593Smuzhiyun 
5315*4882a593Smuzhiyun /* e1000_main.c */
5316