xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/intel/e1000e/netdev.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright(c) 1999 - 2018 Intel Corporation. */
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/module.h>
7*4882a593Smuzhiyun #include <linux/types.h>
8*4882a593Smuzhiyun #include <linux/init.h>
9*4882a593Smuzhiyun #include <linux/pci.h>
10*4882a593Smuzhiyun #include <linux/vmalloc.h>
11*4882a593Smuzhiyun #include <linux/pagemap.h>
12*4882a593Smuzhiyun #include <linux/delay.h>
13*4882a593Smuzhiyun #include <linux/netdevice.h>
14*4882a593Smuzhiyun #include <linux/interrupt.h>
15*4882a593Smuzhiyun #include <linux/tcp.h>
16*4882a593Smuzhiyun #include <linux/ipv6.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun #include <net/checksum.h>
19*4882a593Smuzhiyun #include <net/ip6_checksum.h>
20*4882a593Smuzhiyun #include <linux/ethtool.h>
21*4882a593Smuzhiyun #include <linux/if_vlan.h>
22*4882a593Smuzhiyun #include <linux/cpu.h>
23*4882a593Smuzhiyun #include <linux/smp.h>
24*4882a593Smuzhiyun #include <linux/pm_qos.h>
25*4882a593Smuzhiyun #include <linux/pm_runtime.h>
26*4882a593Smuzhiyun #include <linux/aer.h>
27*4882a593Smuzhiyun #include <linux/prefetch.h>
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #include "e1000.h"
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun char e1000e_driver_name[] = "e1000e";
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
34*4882a593Smuzhiyun static int debug = -1;
35*4882a593Smuzhiyun module_param(debug, int, 0);
36*4882a593Smuzhiyun MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun static const struct e1000_info *e1000_info_tbl[] = {
39*4882a593Smuzhiyun 	[board_82571]		= &e1000_82571_info,
40*4882a593Smuzhiyun 	[board_82572]		= &e1000_82572_info,
41*4882a593Smuzhiyun 	[board_82573]		= &e1000_82573_info,
42*4882a593Smuzhiyun 	[board_82574]		= &e1000_82574_info,
43*4882a593Smuzhiyun 	[board_82583]		= &e1000_82583_info,
44*4882a593Smuzhiyun 	[board_80003es2lan]	= &e1000_es2_info,
45*4882a593Smuzhiyun 	[board_ich8lan]		= &e1000_ich8_info,
46*4882a593Smuzhiyun 	[board_ich9lan]		= &e1000_ich9_info,
47*4882a593Smuzhiyun 	[board_ich10lan]	= &e1000_ich10_info,
48*4882a593Smuzhiyun 	[board_pchlan]		= &e1000_pch_info,
49*4882a593Smuzhiyun 	[board_pch2lan]		= &e1000_pch2_info,
50*4882a593Smuzhiyun 	[board_pch_lpt]		= &e1000_pch_lpt_info,
51*4882a593Smuzhiyun 	[board_pch_spt]		= &e1000_pch_spt_info,
52*4882a593Smuzhiyun 	[board_pch_cnp]		= &e1000_pch_cnp_info,
53*4882a593Smuzhiyun 	[board_pch_tgp]		= &e1000_pch_tgp_info,
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun struct e1000_reg_info {
57*4882a593Smuzhiyun 	u32 ofs;
58*4882a593Smuzhiyun 	char *name;
59*4882a593Smuzhiyun };
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun static const struct e1000_reg_info e1000_reg_info_tbl[] = {
62*4882a593Smuzhiyun 	/* General Registers */
63*4882a593Smuzhiyun 	{E1000_CTRL, "CTRL"},
64*4882a593Smuzhiyun 	{E1000_STATUS, "STATUS"},
65*4882a593Smuzhiyun 	{E1000_CTRL_EXT, "CTRL_EXT"},
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	/* Interrupt Registers */
68*4882a593Smuzhiyun 	{E1000_ICR, "ICR"},
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	/* Rx Registers */
71*4882a593Smuzhiyun 	{E1000_RCTL, "RCTL"},
72*4882a593Smuzhiyun 	{E1000_RDLEN(0), "RDLEN"},
73*4882a593Smuzhiyun 	{E1000_RDH(0), "RDH"},
74*4882a593Smuzhiyun 	{E1000_RDT(0), "RDT"},
75*4882a593Smuzhiyun 	{E1000_RDTR, "RDTR"},
76*4882a593Smuzhiyun 	{E1000_RXDCTL(0), "RXDCTL"},
77*4882a593Smuzhiyun 	{E1000_ERT, "ERT"},
78*4882a593Smuzhiyun 	{E1000_RDBAL(0), "RDBAL"},
79*4882a593Smuzhiyun 	{E1000_RDBAH(0), "RDBAH"},
80*4882a593Smuzhiyun 	{E1000_RDFH, "RDFH"},
81*4882a593Smuzhiyun 	{E1000_RDFT, "RDFT"},
82*4882a593Smuzhiyun 	{E1000_RDFHS, "RDFHS"},
83*4882a593Smuzhiyun 	{E1000_RDFTS, "RDFTS"},
84*4882a593Smuzhiyun 	{E1000_RDFPC, "RDFPC"},
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	/* Tx Registers */
87*4882a593Smuzhiyun 	{E1000_TCTL, "TCTL"},
88*4882a593Smuzhiyun 	{E1000_TDBAL(0), "TDBAL"},
89*4882a593Smuzhiyun 	{E1000_TDBAH(0), "TDBAH"},
90*4882a593Smuzhiyun 	{E1000_TDLEN(0), "TDLEN"},
91*4882a593Smuzhiyun 	{E1000_TDH(0), "TDH"},
92*4882a593Smuzhiyun 	{E1000_TDT(0), "TDT"},
93*4882a593Smuzhiyun 	{E1000_TIDV, "TIDV"},
94*4882a593Smuzhiyun 	{E1000_TXDCTL(0), "TXDCTL"},
95*4882a593Smuzhiyun 	{E1000_TADV, "TADV"},
96*4882a593Smuzhiyun 	{E1000_TARC(0), "TARC"},
97*4882a593Smuzhiyun 	{E1000_TDFH, "TDFH"},
98*4882a593Smuzhiyun 	{E1000_TDFT, "TDFT"},
99*4882a593Smuzhiyun 	{E1000_TDFHS, "TDFHS"},
100*4882a593Smuzhiyun 	{E1000_TDFTS, "TDFTS"},
101*4882a593Smuzhiyun 	{E1000_TDFPC, "TDFPC"},
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	/* List Terminator */
104*4882a593Smuzhiyun 	{0, NULL}
105*4882a593Smuzhiyun };
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun /**
108*4882a593Smuzhiyun  * __ew32_prepare - prepare to write to MAC CSR register on certain parts
109*4882a593Smuzhiyun  * @hw: pointer to the HW structure
110*4882a593Smuzhiyun  *
111*4882a593Smuzhiyun  * When updating the MAC CSR registers, the Manageability Engine (ME) could
112*4882a593Smuzhiyun  * be accessing the registers at the same time.  Normally, this is handled in
113*4882a593Smuzhiyun  * h/w by an arbiter but on some parts there is a bug that acknowledges Host
114*4882a593Smuzhiyun  * accesses later than it should which could result in the register to have
115*4882a593Smuzhiyun  * an incorrect value.  Workaround this by checking the FWSM register which
116*4882a593Smuzhiyun  * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
117*4882a593Smuzhiyun  * and try again a number of times.
118*4882a593Smuzhiyun  **/
__ew32_prepare(struct e1000_hw * hw)119*4882a593Smuzhiyun static void __ew32_prepare(struct e1000_hw *hw)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
124*4882a593Smuzhiyun 		udelay(50);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun 
__ew32(struct e1000_hw * hw,unsigned long reg,u32 val)127*4882a593Smuzhiyun void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
130*4882a593Smuzhiyun 		__ew32_prepare(hw);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	writel(val, hw->hw_addr + reg);
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun /**
136*4882a593Smuzhiyun  * e1000_regdump - register printout routine
137*4882a593Smuzhiyun  * @hw: pointer to the HW structure
138*4882a593Smuzhiyun  * @reginfo: pointer to the register info table
139*4882a593Smuzhiyun  **/
e1000_regdump(struct e1000_hw * hw,struct e1000_reg_info * reginfo)140*4882a593Smuzhiyun static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	int n = 0;
143*4882a593Smuzhiyun 	char rname[16];
144*4882a593Smuzhiyun 	u32 regs[8];
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	switch (reginfo->ofs) {
147*4882a593Smuzhiyun 	case E1000_RXDCTL(0):
148*4882a593Smuzhiyun 		for (n = 0; n < 2; n++)
149*4882a593Smuzhiyun 			regs[n] = __er32(hw, E1000_RXDCTL(n));
150*4882a593Smuzhiyun 		break;
151*4882a593Smuzhiyun 	case E1000_TXDCTL(0):
152*4882a593Smuzhiyun 		for (n = 0; n < 2; n++)
153*4882a593Smuzhiyun 			regs[n] = __er32(hw, E1000_TXDCTL(n));
154*4882a593Smuzhiyun 		break;
155*4882a593Smuzhiyun 	case E1000_TARC(0):
156*4882a593Smuzhiyun 		for (n = 0; n < 2; n++)
157*4882a593Smuzhiyun 			regs[n] = __er32(hw, E1000_TARC(n));
158*4882a593Smuzhiyun 		break;
159*4882a593Smuzhiyun 	default:
160*4882a593Smuzhiyun 		pr_info("%-15s %08x\n",
161*4882a593Smuzhiyun 			reginfo->name, __er32(hw, reginfo->ofs));
162*4882a593Smuzhiyun 		return;
163*4882a593Smuzhiyun 	}
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
166*4882a593Smuzhiyun 	pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
e1000e_dump_ps_pages(struct e1000_adapter * adapter,struct e1000_buffer * bi)169*4882a593Smuzhiyun static void e1000e_dump_ps_pages(struct e1000_adapter *adapter,
170*4882a593Smuzhiyun 				 struct e1000_buffer *bi)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	int i;
173*4882a593Smuzhiyun 	struct e1000_ps_page *ps_page;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	for (i = 0; i < adapter->rx_ps_pages; i++) {
176*4882a593Smuzhiyun 		ps_page = &bi->ps_pages[i];
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 		if (ps_page->page) {
179*4882a593Smuzhiyun 			pr_info("packet dump for ps_page %d:\n", i);
180*4882a593Smuzhiyun 			print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
181*4882a593Smuzhiyun 				       16, 1, page_address(ps_page->page),
182*4882a593Smuzhiyun 				       PAGE_SIZE, true);
183*4882a593Smuzhiyun 		}
184*4882a593Smuzhiyun 	}
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun /**
188*4882a593Smuzhiyun  * e1000e_dump - Print registers, Tx-ring and Rx-ring
189*4882a593Smuzhiyun  * @adapter: board private structure
190*4882a593Smuzhiyun  **/
e1000e_dump(struct e1000_adapter * adapter)191*4882a593Smuzhiyun static void e1000e_dump(struct e1000_adapter *adapter)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
194*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
195*4882a593Smuzhiyun 	struct e1000_reg_info *reginfo;
196*4882a593Smuzhiyun 	struct e1000_ring *tx_ring = adapter->tx_ring;
197*4882a593Smuzhiyun 	struct e1000_tx_desc *tx_desc;
198*4882a593Smuzhiyun 	struct my_u0 {
199*4882a593Smuzhiyun 		__le64 a;
200*4882a593Smuzhiyun 		__le64 b;
201*4882a593Smuzhiyun 	} *u0;
202*4882a593Smuzhiyun 	struct e1000_buffer *buffer_info;
203*4882a593Smuzhiyun 	struct e1000_ring *rx_ring = adapter->rx_ring;
204*4882a593Smuzhiyun 	union e1000_rx_desc_packet_split *rx_desc_ps;
205*4882a593Smuzhiyun 	union e1000_rx_desc_extended *rx_desc;
206*4882a593Smuzhiyun 	struct my_u1 {
207*4882a593Smuzhiyun 		__le64 a;
208*4882a593Smuzhiyun 		__le64 b;
209*4882a593Smuzhiyun 		__le64 c;
210*4882a593Smuzhiyun 		__le64 d;
211*4882a593Smuzhiyun 	} *u1;
212*4882a593Smuzhiyun 	u32 staterr;
213*4882a593Smuzhiyun 	int i = 0;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	if (!netif_msg_hw(adapter))
216*4882a593Smuzhiyun 		return;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	/* Print netdevice Info */
219*4882a593Smuzhiyun 	if (netdev) {
220*4882a593Smuzhiyun 		dev_info(&adapter->pdev->dev, "Net device Info\n");
221*4882a593Smuzhiyun 		pr_info("Device Name     state            trans_start\n");
222*4882a593Smuzhiyun 		pr_info("%-15s %016lX %016lX\n", netdev->name,
223*4882a593Smuzhiyun 			netdev->state, dev_trans_start(netdev));
224*4882a593Smuzhiyun 	}
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	/* Print Registers */
227*4882a593Smuzhiyun 	dev_info(&adapter->pdev->dev, "Register Dump\n");
228*4882a593Smuzhiyun 	pr_info(" Register Name   Value\n");
229*4882a593Smuzhiyun 	for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
230*4882a593Smuzhiyun 	     reginfo->name; reginfo++) {
231*4882a593Smuzhiyun 		e1000_regdump(hw, reginfo);
232*4882a593Smuzhiyun 	}
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	/* Print Tx Ring Summary */
235*4882a593Smuzhiyun 	if (!netdev || !netif_running(netdev))
236*4882a593Smuzhiyun 		return;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
239*4882a593Smuzhiyun 	pr_info("Queue [NTU] [NTC] [bi(ntc)->dma  ] leng ntw timestamp\n");
240*4882a593Smuzhiyun 	buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
241*4882a593Smuzhiyun 	pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
242*4882a593Smuzhiyun 		0, tx_ring->next_to_use, tx_ring->next_to_clean,
243*4882a593Smuzhiyun 		(unsigned long long)buffer_info->dma,
244*4882a593Smuzhiyun 		buffer_info->length,
245*4882a593Smuzhiyun 		buffer_info->next_to_watch,
246*4882a593Smuzhiyun 		(unsigned long long)buffer_info->time_stamp);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	/* Print Tx Ring */
249*4882a593Smuzhiyun 	if (!netif_msg_tx_done(adapter))
250*4882a593Smuzhiyun 		goto rx_ring_summary;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
255*4882a593Smuzhiyun 	 *
256*4882a593Smuzhiyun 	 * Legacy Transmit Descriptor
257*4882a593Smuzhiyun 	 *   +--------------------------------------------------------------+
258*4882a593Smuzhiyun 	 * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
259*4882a593Smuzhiyun 	 *   +--------------------------------------------------------------+
260*4882a593Smuzhiyun 	 * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
261*4882a593Smuzhiyun 	 *   +--------------------------------------------------------------+
262*4882a593Smuzhiyun 	 *   63       48 47        36 35    32 31     24 23    16 15        0
263*4882a593Smuzhiyun 	 *
264*4882a593Smuzhiyun 	 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
265*4882a593Smuzhiyun 	 *   63      48 47    40 39       32 31             16 15    8 7      0
266*4882a593Smuzhiyun 	 *   +----------------------------------------------------------------+
267*4882a593Smuzhiyun 	 * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
268*4882a593Smuzhiyun 	 *   +----------------------------------------------------------------+
269*4882a593Smuzhiyun 	 * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
270*4882a593Smuzhiyun 	 *   +----------------------------------------------------------------+
271*4882a593Smuzhiyun 	 *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
272*4882a593Smuzhiyun 	 *
273*4882a593Smuzhiyun 	 * Extended Data Descriptor (DTYP=0x1)
274*4882a593Smuzhiyun 	 *   +----------------------------------------------------------------+
275*4882a593Smuzhiyun 	 * 0 |                     Buffer Address [63:0]                      |
276*4882a593Smuzhiyun 	 *   +----------------------------------------------------------------+
277*4882a593Smuzhiyun 	 * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
278*4882a593Smuzhiyun 	 *   +----------------------------------------------------------------+
279*4882a593Smuzhiyun 	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
280*4882a593Smuzhiyun 	 */
281*4882a593Smuzhiyun 	pr_info("Tl[desc]     [address 63:0  ] [SpeCssSCmCsLen] [bi->dma       ] leng  ntw timestamp        bi->skb <-- Legacy format\n");
282*4882a593Smuzhiyun 	pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestamp        bi->skb <-- Ext Context format\n");
283*4882a593Smuzhiyun 	pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestamp        bi->skb <-- Ext Data format\n");
284*4882a593Smuzhiyun 	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
285*4882a593Smuzhiyun 		const char *next_desc;
286*4882a593Smuzhiyun 		tx_desc = E1000_TX_DESC(*tx_ring, i);
287*4882a593Smuzhiyun 		buffer_info = &tx_ring->buffer_info[i];
288*4882a593Smuzhiyun 		u0 = (struct my_u0 *)tx_desc;
289*4882a593Smuzhiyun 		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
290*4882a593Smuzhiyun 			next_desc = " NTC/U";
291*4882a593Smuzhiyun 		else if (i == tx_ring->next_to_use)
292*4882a593Smuzhiyun 			next_desc = " NTU";
293*4882a593Smuzhiyun 		else if (i == tx_ring->next_to_clean)
294*4882a593Smuzhiyun 			next_desc = " NTC";
295*4882a593Smuzhiyun 		else
296*4882a593Smuzhiyun 			next_desc = "";
297*4882a593Smuzhiyun 		pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p%s\n",
298*4882a593Smuzhiyun 			(!(le64_to_cpu(u0->b) & BIT(29)) ? 'l' :
299*4882a593Smuzhiyun 			 ((le64_to_cpu(u0->b) & BIT(20)) ? 'd' : 'c')),
300*4882a593Smuzhiyun 			i,
301*4882a593Smuzhiyun 			(unsigned long long)le64_to_cpu(u0->a),
302*4882a593Smuzhiyun 			(unsigned long long)le64_to_cpu(u0->b),
303*4882a593Smuzhiyun 			(unsigned long long)buffer_info->dma,
304*4882a593Smuzhiyun 			buffer_info->length, buffer_info->next_to_watch,
305*4882a593Smuzhiyun 			(unsigned long long)buffer_info->time_stamp,
306*4882a593Smuzhiyun 			buffer_info->skb, next_desc);
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 		if (netif_msg_pktdata(adapter) && buffer_info->skb)
309*4882a593Smuzhiyun 			print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
310*4882a593Smuzhiyun 				       16, 1, buffer_info->skb->data,
311*4882a593Smuzhiyun 				       buffer_info->skb->len, true);
312*4882a593Smuzhiyun 	}
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	/* Print Rx Ring Summary */
315*4882a593Smuzhiyun rx_ring_summary:
316*4882a593Smuzhiyun 	dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
317*4882a593Smuzhiyun 	pr_info("Queue [NTU] [NTC]\n");
318*4882a593Smuzhiyun 	pr_info(" %5d %5X %5X\n",
319*4882a593Smuzhiyun 		0, rx_ring->next_to_use, rx_ring->next_to_clean);
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	/* Print Rx Ring */
322*4882a593Smuzhiyun 	if (!netif_msg_rx_status(adapter))
323*4882a593Smuzhiyun 		return;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
326*4882a593Smuzhiyun 	switch (adapter->rx_ps_pages) {
327*4882a593Smuzhiyun 	case 1:
328*4882a593Smuzhiyun 	case 2:
329*4882a593Smuzhiyun 	case 3:
330*4882a593Smuzhiyun 		/* [Extended] Packet Split Receive Descriptor Format
331*4882a593Smuzhiyun 		 *
332*4882a593Smuzhiyun 		 *    +-----------------------------------------------------+
333*4882a593Smuzhiyun 		 *  0 |                Buffer Address 0 [63:0]              |
334*4882a593Smuzhiyun 		 *    +-----------------------------------------------------+
335*4882a593Smuzhiyun 		 *  8 |                Buffer Address 1 [63:0]              |
336*4882a593Smuzhiyun 		 *    +-----------------------------------------------------+
337*4882a593Smuzhiyun 		 * 16 |                Buffer Address 2 [63:0]              |
338*4882a593Smuzhiyun 		 *    +-----------------------------------------------------+
339*4882a593Smuzhiyun 		 * 24 |                Buffer Address 3 [63:0]              |
340*4882a593Smuzhiyun 		 *    +-----------------------------------------------------+
341*4882a593Smuzhiyun 		 */
342*4882a593Smuzhiyun 		pr_info("R  [desc]      [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma       ] [bi->skb] <-- Ext Pkt Split format\n");
343*4882a593Smuzhiyun 		/* [Extended] Receive Descriptor (Write-Back) Format
344*4882a593Smuzhiyun 		 *
345*4882a593Smuzhiyun 		 *   63       48 47    32 31     13 12    8 7    4 3        0
346*4882a593Smuzhiyun 		 *   +------------------------------------------------------+
347*4882a593Smuzhiyun 		 * 0 | Packet   | IP     |  Rsvd   | MRQ   | Rsvd | MRQ RSS |
348*4882a593Smuzhiyun 		 *   | Checksum | Ident  |         | Queue |      |  Type   |
349*4882a593Smuzhiyun 		 *   +------------------------------------------------------+
350*4882a593Smuzhiyun 		 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
351*4882a593Smuzhiyun 		 *   +------------------------------------------------------+
352*4882a593Smuzhiyun 		 *   63       48 47    32 31            20 19               0
353*4882a593Smuzhiyun 		 */
354*4882a593Smuzhiyun 		pr_info("RWB[desc]      [ck ipid mrqhsh] [vl   l0 ee  es] [ l3  l2  l1 hs] [reserved      ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
355*4882a593Smuzhiyun 		for (i = 0; i < rx_ring->count; i++) {
356*4882a593Smuzhiyun 			const char *next_desc;
357*4882a593Smuzhiyun 			buffer_info = &rx_ring->buffer_info[i];
358*4882a593Smuzhiyun 			rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
359*4882a593Smuzhiyun 			u1 = (struct my_u1 *)rx_desc_ps;
360*4882a593Smuzhiyun 			staterr =
361*4882a593Smuzhiyun 			    le32_to_cpu(rx_desc_ps->wb.middle.status_error);
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 			if (i == rx_ring->next_to_use)
364*4882a593Smuzhiyun 				next_desc = " NTU";
365*4882a593Smuzhiyun 			else if (i == rx_ring->next_to_clean)
366*4882a593Smuzhiyun 				next_desc = " NTC";
367*4882a593Smuzhiyun 			else
368*4882a593Smuzhiyun 				next_desc = "";
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 			if (staterr & E1000_RXD_STAT_DD) {
371*4882a593Smuzhiyun 				/* Descriptor Done */
372*4882a593Smuzhiyun 				pr_info("%s[0x%03X]     %016llX %016llX %016llX %016llX ---------------- %p%s\n",
373*4882a593Smuzhiyun 					"RWB", i,
374*4882a593Smuzhiyun 					(unsigned long long)le64_to_cpu(u1->a),
375*4882a593Smuzhiyun 					(unsigned long long)le64_to_cpu(u1->b),
376*4882a593Smuzhiyun 					(unsigned long long)le64_to_cpu(u1->c),
377*4882a593Smuzhiyun 					(unsigned long long)le64_to_cpu(u1->d),
378*4882a593Smuzhiyun 					buffer_info->skb, next_desc);
379*4882a593Smuzhiyun 			} else {
380*4882a593Smuzhiyun 				pr_info("%s[0x%03X]     %016llX %016llX %016llX %016llX %016llX %p%s\n",
381*4882a593Smuzhiyun 					"R  ", i,
382*4882a593Smuzhiyun 					(unsigned long long)le64_to_cpu(u1->a),
383*4882a593Smuzhiyun 					(unsigned long long)le64_to_cpu(u1->b),
384*4882a593Smuzhiyun 					(unsigned long long)le64_to_cpu(u1->c),
385*4882a593Smuzhiyun 					(unsigned long long)le64_to_cpu(u1->d),
386*4882a593Smuzhiyun 					(unsigned long long)buffer_info->dma,
387*4882a593Smuzhiyun 					buffer_info->skb, next_desc);
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 				if (netif_msg_pktdata(adapter))
390*4882a593Smuzhiyun 					e1000e_dump_ps_pages(adapter,
391*4882a593Smuzhiyun 							     buffer_info);
392*4882a593Smuzhiyun 			}
393*4882a593Smuzhiyun 		}
394*4882a593Smuzhiyun 		break;
395*4882a593Smuzhiyun 	default:
396*4882a593Smuzhiyun 	case 0:
397*4882a593Smuzhiyun 		/* Extended Receive Descriptor (Read) Format
398*4882a593Smuzhiyun 		 *
399*4882a593Smuzhiyun 		 *   +-----------------------------------------------------+
400*4882a593Smuzhiyun 		 * 0 |                Buffer Address [63:0]                |
401*4882a593Smuzhiyun 		 *   +-----------------------------------------------------+
402*4882a593Smuzhiyun 		 * 8 |                      Reserved                       |
403*4882a593Smuzhiyun 		 *   +-----------------------------------------------------+
404*4882a593Smuzhiyun 		 */
405*4882a593Smuzhiyun 		pr_info("R  [desc]      [buf addr 63:0 ] [reserved 63:0 ] [bi->dma       ] [bi->skb] <-- Ext (Read) format\n");
406*4882a593Smuzhiyun 		/* Extended Receive Descriptor (Write-Back) Format
407*4882a593Smuzhiyun 		 *
408*4882a593Smuzhiyun 		 *   63       48 47    32 31    24 23            4 3        0
409*4882a593Smuzhiyun 		 *   +------------------------------------------------------+
410*4882a593Smuzhiyun 		 *   |     RSS Hash      |        |               |         |
411*4882a593Smuzhiyun 		 * 0 +-------------------+  Rsvd  |   Reserved    | MRQ RSS |
412*4882a593Smuzhiyun 		 *   | Packet   | IP     |        |               |  Type   |
413*4882a593Smuzhiyun 		 *   | Checksum | Ident  |        |               |         |
414*4882a593Smuzhiyun 		 *   +------------------------------------------------------+
415*4882a593Smuzhiyun 		 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
416*4882a593Smuzhiyun 		 *   +------------------------------------------------------+
417*4882a593Smuzhiyun 		 *   63       48 47    32 31            20 19               0
418*4882a593Smuzhiyun 		 */
419*4882a593Smuzhiyun 		pr_info("RWB[desc]      [cs ipid    mrq] [vt   ln xe  xs] [bi->skb] <-- Ext (Write-Back) format\n");
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 		for (i = 0; i < rx_ring->count; i++) {
422*4882a593Smuzhiyun 			const char *next_desc;
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 			buffer_info = &rx_ring->buffer_info[i];
425*4882a593Smuzhiyun 			rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
426*4882a593Smuzhiyun 			u1 = (struct my_u1 *)rx_desc;
427*4882a593Smuzhiyun 			staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 			if (i == rx_ring->next_to_use)
430*4882a593Smuzhiyun 				next_desc = " NTU";
431*4882a593Smuzhiyun 			else if (i == rx_ring->next_to_clean)
432*4882a593Smuzhiyun 				next_desc = " NTC";
433*4882a593Smuzhiyun 			else
434*4882a593Smuzhiyun 				next_desc = "";
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 			if (staterr & E1000_RXD_STAT_DD) {
437*4882a593Smuzhiyun 				/* Descriptor Done */
438*4882a593Smuzhiyun 				pr_info("%s[0x%03X]     %016llX %016llX ---------------- %p%s\n",
439*4882a593Smuzhiyun 					"RWB", i,
440*4882a593Smuzhiyun 					(unsigned long long)le64_to_cpu(u1->a),
441*4882a593Smuzhiyun 					(unsigned long long)le64_to_cpu(u1->b),
442*4882a593Smuzhiyun 					buffer_info->skb, next_desc);
443*4882a593Smuzhiyun 			} else {
444*4882a593Smuzhiyun 				pr_info("%s[0x%03X]     %016llX %016llX %016llX %p%s\n",
445*4882a593Smuzhiyun 					"R  ", i,
446*4882a593Smuzhiyun 					(unsigned long long)le64_to_cpu(u1->a),
447*4882a593Smuzhiyun 					(unsigned long long)le64_to_cpu(u1->b),
448*4882a593Smuzhiyun 					(unsigned long long)buffer_info->dma,
449*4882a593Smuzhiyun 					buffer_info->skb, next_desc);
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 				if (netif_msg_pktdata(adapter) &&
452*4882a593Smuzhiyun 				    buffer_info->skb)
453*4882a593Smuzhiyun 					print_hex_dump(KERN_INFO, "",
454*4882a593Smuzhiyun 						       DUMP_PREFIX_ADDRESS, 16,
455*4882a593Smuzhiyun 						       1,
456*4882a593Smuzhiyun 						       buffer_info->skb->data,
457*4882a593Smuzhiyun 						       adapter->rx_buffer_len,
458*4882a593Smuzhiyun 						       true);
459*4882a593Smuzhiyun 			}
460*4882a593Smuzhiyun 		}
461*4882a593Smuzhiyun 	}
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun /**
465*4882a593Smuzhiyun  * e1000_desc_unused - calculate if we have unused descriptors
466*4882a593Smuzhiyun  * @ring: pointer to ring struct to perform calculation on
467*4882a593Smuzhiyun  **/
e1000_desc_unused(struct e1000_ring * ring)468*4882a593Smuzhiyun static int e1000_desc_unused(struct e1000_ring *ring)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun 	if (ring->next_to_clean > ring->next_to_use)
471*4882a593Smuzhiyun 		return ring->next_to_clean - ring->next_to_use - 1;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	return ring->count + ring->next_to_clean - ring->next_to_use - 1;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun /**
477*4882a593Smuzhiyun  * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp
478*4882a593Smuzhiyun  * @adapter: board private structure
479*4882a593Smuzhiyun  * @hwtstamps: time stamp structure to update
480*4882a593Smuzhiyun  * @systim: unsigned 64bit system time value.
481*4882a593Smuzhiyun  *
482*4882a593Smuzhiyun  * Convert the system time value stored in the RX/TXSTMP registers into a
483*4882a593Smuzhiyun  * hwtstamp which can be used by the upper level time stamping functions.
484*4882a593Smuzhiyun  *
485*4882a593Smuzhiyun  * The 'systim_lock' spinlock is used to protect the consistency of the
486*4882a593Smuzhiyun  * system time value. This is needed because reading the 64 bit time
487*4882a593Smuzhiyun  * value involves reading two 32 bit registers. The first read latches the
488*4882a593Smuzhiyun  * value.
489*4882a593Smuzhiyun  **/
e1000e_systim_to_hwtstamp(struct e1000_adapter * adapter,struct skb_shared_hwtstamps * hwtstamps,u64 systim)490*4882a593Smuzhiyun static void e1000e_systim_to_hwtstamp(struct e1000_adapter *adapter,
491*4882a593Smuzhiyun 				      struct skb_shared_hwtstamps *hwtstamps,
492*4882a593Smuzhiyun 				      u64 systim)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun 	u64 ns;
495*4882a593Smuzhiyun 	unsigned long flags;
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	spin_lock_irqsave(&adapter->systim_lock, flags);
498*4882a593Smuzhiyun 	ns = timecounter_cyc2time(&adapter->tc, systim);
499*4882a593Smuzhiyun 	spin_unlock_irqrestore(&adapter->systim_lock, flags);
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	memset(hwtstamps, 0, sizeof(*hwtstamps));
502*4882a593Smuzhiyun 	hwtstamps->hwtstamp = ns_to_ktime(ns);
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun /**
506*4882a593Smuzhiyun  * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp
507*4882a593Smuzhiyun  * @adapter: board private structure
508*4882a593Smuzhiyun  * @status: descriptor extended error and status field
509*4882a593Smuzhiyun  * @skb: particular skb to include time stamp
510*4882a593Smuzhiyun  *
511*4882a593Smuzhiyun  * If the time stamp is valid, convert it into the timecounter ns value
512*4882a593Smuzhiyun  * and store that result into the shhwtstamps structure which is passed
513*4882a593Smuzhiyun  * up the network stack.
514*4882a593Smuzhiyun  **/
e1000e_rx_hwtstamp(struct e1000_adapter * adapter,u32 status,struct sk_buff * skb)515*4882a593Smuzhiyun static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status,
516*4882a593Smuzhiyun 			       struct sk_buff *skb)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
519*4882a593Smuzhiyun 	u64 rxstmp;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) ||
522*4882a593Smuzhiyun 	    !(status & E1000_RXDEXT_STATERR_TST) ||
523*4882a593Smuzhiyun 	    !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
524*4882a593Smuzhiyun 		return;
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	/* The Rx time stamp registers contain the time stamp.  No other
527*4882a593Smuzhiyun 	 * received packet will be time stamped until the Rx time stamp
528*4882a593Smuzhiyun 	 * registers are read.  Because only one packet can be time stamped
529*4882a593Smuzhiyun 	 * at a time, the register values must belong to this packet and
530*4882a593Smuzhiyun 	 * therefore none of the other additional attributes need to be
531*4882a593Smuzhiyun 	 * compared.
532*4882a593Smuzhiyun 	 */
533*4882a593Smuzhiyun 	rxstmp = (u64)er32(RXSTMPL);
534*4882a593Smuzhiyun 	rxstmp |= (u64)er32(RXSTMPH) << 32;
535*4882a593Smuzhiyun 	e1000e_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), rxstmp);
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun /**
541*4882a593Smuzhiyun  * e1000_receive_skb - helper function to handle Rx indications
542*4882a593Smuzhiyun  * @adapter: board private structure
543*4882a593Smuzhiyun  * @netdev: pointer to netdev struct
544*4882a593Smuzhiyun  * @staterr: descriptor extended error and status field as written by hardware
545*4882a593Smuzhiyun  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
546*4882a593Smuzhiyun  * @skb: pointer to sk_buff to be indicated to stack
547*4882a593Smuzhiyun  **/
e1000_receive_skb(struct e1000_adapter * adapter,struct net_device * netdev,struct sk_buff * skb,u32 staterr,__le16 vlan)548*4882a593Smuzhiyun static void e1000_receive_skb(struct e1000_adapter *adapter,
549*4882a593Smuzhiyun 			      struct net_device *netdev, struct sk_buff *skb,
550*4882a593Smuzhiyun 			      u32 staterr, __le16 vlan)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun 	u16 tag = le16_to_cpu(vlan);
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	e1000e_rx_hwtstamp(adapter, staterr, skb);
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	skb->protocol = eth_type_trans(skb, netdev);
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	if (staterr & E1000_RXD_STAT_VP)
559*4882a593Smuzhiyun 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	napi_gro_receive(&adapter->napi, skb);
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun /**
565*4882a593Smuzhiyun  * e1000_rx_checksum - Receive Checksum Offload
566*4882a593Smuzhiyun  * @adapter: board private structure
567*4882a593Smuzhiyun  * @status_err: receive descriptor status and error fields
568*4882a593Smuzhiyun  * @skb: socket buffer with received data
569*4882a593Smuzhiyun  **/
e1000_rx_checksum(struct e1000_adapter * adapter,u32 status_err,struct sk_buff * skb)570*4882a593Smuzhiyun static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
571*4882a593Smuzhiyun 			      struct sk_buff *skb)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun 	u16 status = (u16)status_err;
574*4882a593Smuzhiyun 	u8 errors = (u8)(status_err >> 24);
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	skb_checksum_none_assert(skb);
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	/* Rx checksum disabled */
579*4882a593Smuzhiyun 	if (!(adapter->netdev->features & NETIF_F_RXCSUM))
580*4882a593Smuzhiyun 		return;
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	/* Ignore Checksum bit is set */
583*4882a593Smuzhiyun 	if (status & E1000_RXD_STAT_IXSM)
584*4882a593Smuzhiyun 		return;
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	/* TCP/UDP checksum error bit or IP checksum error bit is set */
587*4882a593Smuzhiyun 	if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
588*4882a593Smuzhiyun 		/* let the stack verify checksum errors */
589*4882a593Smuzhiyun 		adapter->hw_csum_err++;
590*4882a593Smuzhiyun 		return;
591*4882a593Smuzhiyun 	}
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	/* TCP/UDP Checksum has not been calculated */
594*4882a593Smuzhiyun 	if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
595*4882a593Smuzhiyun 		return;
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	/* It must be a TCP or UDP packet with a valid checksum */
598*4882a593Smuzhiyun 	skb->ip_summed = CHECKSUM_UNNECESSARY;
599*4882a593Smuzhiyun 	adapter->hw_csum_good++;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun 
e1000e_update_rdt_wa(struct e1000_ring * rx_ring,unsigned int i)602*4882a593Smuzhiyun static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun 	struct e1000_adapter *adapter = rx_ring->adapter;
605*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	__ew32_prepare(hw);
608*4882a593Smuzhiyun 	writel(i, rx_ring->tail);
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	if (unlikely(i != readl(rx_ring->tail))) {
611*4882a593Smuzhiyun 		u32 rctl = er32(RCTL);
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 		ew32(RCTL, rctl & ~E1000_RCTL_EN);
614*4882a593Smuzhiyun 		e_err("ME firmware caused invalid RDT - resetting\n");
615*4882a593Smuzhiyun 		schedule_work(&adapter->reset_task);
616*4882a593Smuzhiyun 	}
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun 
e1000e_update_tdt_wa(struct e1000_ring * tx_ring,unsigned int i)619*4882a593Smuzhiyun static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
620*4882a593Smuzhiyun {
621*4882a593Smuzhiyun 	struct e1000_adapter *adapter = tx_ring->adapter;
622*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	__ew32_prepare(hw);
625*4882a593Smuzhiyun 	writel(i, tx_ring->tail);
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	if (unlikely(i != readl(tx_ring->tail))) {
628*4882a593Smuzhiyun 		u32 tctl = er32(TCTL);
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 		ew32(TCTL, tctl & ~E1000_TCTL_EN);
631*4882a593Smuzhiyun 		e_err("ME firmware caused invalid TDT - resetting\n");
632*4882a593Smuzhiyun 		schedule_work(&adapter->reset_task);
633*4882a593Smuzhiyun 	}
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun /**
637*4882a593Smuzhiyun  * e1000_alloc_rx_buffers - Replace used receive buffers
638*4882a593Smuzhiyun  * @rx_ring: Rx descriptor ring
639*4882a593Smuzhiyun  * @cleaned_count: number to reallocate
640*4882a593Smuzhiyun  * @gfp: flags for allocation
641*4882a593Smuzhiyun  **/
e1000_alloc_rx_buffers(struct e1000_ring * rx_ring,int cleaned_count,gfp_t gfp)642*4882a593Smuzhiyun static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring,
643*4882a593Smuzhiyun 				   int cleaned_count, gfp_t gfp)
644*4882a593Smuzhiyun {
645*4882a593Smuzhiyun 	struct e1000_adapter *adapter = rx_ring->adapter;
646*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
647*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
648*4882a593Smuzhiyun 	union e1000_rx_desc_extended *rx_desc;
649*4882a593Smuzhiyun 	struct e1000_buffer *buffer_info;
650*4882a593Smuzhiyun 	struct sk_buff *skb;
651*4882a593Smuzhiyun 	unsigned int i;
652*4882a593Smuzhiyun 	unsigned int bufsz = adapter->rx_buffer_len;
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	i = rx_ring->next_to_use;
655*4882a593Smuzhiyun 	buffer_info = &rx_ring->buffer_info[i];
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	while (cleaned_count--) {
658*4882a593Smuzhiyun 		skb = buffer_info->skb;
659*4882a593Smuzhiyun 		if (skb) {
660*4882a593Smuzhiyun 			skb_trim(skb, 0);
661*4882a593Smuzhiyun 			goto map_skb;
662*4882a593Smuzhiyun 		}
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 		skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
665*4882a593Smuzhiyun 		if (!skb) {
666*4882a593Smuzhiyun 			/* Better luck next round */
667*4882a593Smuzhiyun 			adapter->alloc_rx_buff_failed++;
668*4882a593Smuzhiyun 			break;
669*4882a593Smuzhiyun 		}
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 		buffer_info->skb = skb;
672*4882a593Smuzhiyun map_skb:
673*4882a593Smuzhiyun 		buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
674*4882a593Smuzhiyun 						  adapter->rx_buffer_len,
675*4882a593Smuzhiyun 						  DMA_FROM_DEVICE);
676*4882a593Smuzhiyun 		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
677*4882a593Smuzhiyun 			dev_err(&pdev->dev, "Rx DMA map failed\n");
678*4882a593Smuzhiyun 			adapter->rx_dma_failed++;
679*4882a593Smuzhiyun 			break;
680*4882a593Smuzhiyun 		}
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 		rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
683*4882a593Smuzhiyun 		rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 		if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
686*4882a593Smuzhiyun 			/* Force memory writes to complete before letting h/w
687*4882a593Smuzhiyun 			 * know there are new descriptors to fetch.  (Only
688*4882a593Smuzhiyun 			 * applicable for weak-ordered memory model archs,
689*4882a593Smuzhiyun 			 * such as IA-64).
690*4882a593Smuzhiyun 			 */
691*4882a593Smuzhiyun 			wmb();
692*4882a593Smuzhiyun 			if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
693*4882a593Smuzhiyun 				e1000e_update_rdt_wa(rx_ring, i);
694*4882a593Smuzhiyun 			else
695*4882a593Smuzhiyun 				writel(i, rx_ring->tail);
696*4882a593Smuzhiyun 		}
697*4882a593Smuzhiyun 		i++;
698*4882a593Smuzhiyun 		if (i == rx_ring->count)
699*4882a593Smuzhiyun 			i = 0;
700*4882a593Smuzhiyun 		buffer_info = &rx_ring->buffer_info[i];
701*4882a593Smuzhiyun 	}
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	rx_ring->next_to_use = i;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun /**
707*4882a593Smuzhiyun  * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
708*4882a593Smuzhiyun  * @rx_ring: Rx descriptor ring
709*4882a593Smuzhiyun  * @cleaned_count: number to reallocate
710*4882a593Smuzhiyun  * @gfp: flags for allocation
711*4882a593Smuzhiyun  **/
e1000_alloc_rx_buffers_ps(struct e1000_ring * rx_ring,int cleaned_count,gfp_t gfp)712*4882a593Smuzhiyun static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
713*4882a593Smuzhiyun 				      int cleaned_count, gfp_t gfp)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun 	struct e1000_adapter *adapter = rx_ring->adapter;
716*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
717*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
718*4882a593Smuzhiyun 	union e1000_rx_desc_packet_split *rx_desc;
719*4882a593Smuzhiyun 	struct e1000_buffer *buffer_info;
720*4882a593Smuzhiyun 	struct e1000_ps_page *ps_page;
721*4882a593Smuzhiyun 	struct sk_buff *skb;
722*4882a593Smuzhiyun 	unsigned int i, j;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	i = rx_ring->next_to_use;
725*4882a593Smuzhiyun 	buffer_info = &rx_ring->buffer_info[i];
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	while (cleaned_count--) {
728*4882a593Smuzhiyun 		rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 		for (j = 0; j < PS_PAGE_BUFFERS; j++) {
731*4882a593Smuzhiyun 			ps_page = &buffer_info->ps_pages[j];
732*4882a593Smuzhiyun 			if (j >= adapter->rx_ps_pages) {
733*4882a593Smuzhiyun 				/* all unused desc entries get hw null ptr */
734*4882a593Smuzhiyun 				rx_desc->read.buffer_addr[j + 1] =
735*4882a593Smuzhiyun 				    ~cpu_to_le64(0);
736*4882a593Smuzhiyun 				continue;
737*4882a593Smuzhiyun 			}
738*4882a593Smuzhiyun 			if (!ps_page->page) {
739*4882a593Smuzhiyun 				ps_page->page = alloc_page(gfp);
740*4882a593Smuzhiyun 				if (!ps_page->page) {
741*4882a593Smuzhiyun 					adapter->alloc_rx_buff_failed++;
742*4882a593Smuzhiyun 					goto no_buffers;
743*4882a593Smuzhiyun 				}
744*4882a593Smuzhiyun 				ps_page->dma = dma_map_page(&pdev->dev,
745*4882a593Smuzhiyun 							    ps_page->page,
746*4882a593Smuzhiyun 							    0, PAGE_SIZE,
747*4882a593Smuzhiyun 							    DMA_FROM_DEVICE);
748*4882a593Smuzhiyun 				if (dma_mapping_error(&pdev->dev,
749*4882a593Smuzhiyun 						      ps_page->dma)) {
750*4882a593Smuzhiyun 					dev_err(&adapter->pdev->dev,
751*4882a593Smuzhiyun 						"Rx DMA page map failed\n");
752*4882a593Smuzhiyun 					adapter->rx_dma_failed++;
753*4882a593Smuzhiyun 					goto no_buffers;
754*4882a593Smuzhiyun 				}
755*4882a593Smuzhiyun 			}
756*4882a593Smuzhiyun 			/* Refresh the desc even if buffer_addrs
757*4882a593Smuzhiyun 			 * didn't change because each write-back
758*4882a593Smuzhiyun 			 * erases this info.
759*4882a593Smuzhiyun 			 */
760*4882a593Smuzhiyun 			rx_desc->read.buffer_addr[j + 1] =
761*4882a593Smuzhiyun 			    cpu_to_le64(ps_page->dma);
762*4882a593Smuzhiyun 		}
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 		skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0,
765*4882a593Smuzhiyun 						  gfp);
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 		if (!skb) {
768*4882a593Smuzhiyun 			adapter->alloc_rx_buff_failed++;
769*4882a593Smuzhiyun 			break;
770*4882a593Smuzhiyun 		}
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 		buffer_info->skb = skb;
773*4882a593Smuzhiyun 		buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
774*4882a593Smuzhiyun 						  adapter->rx_ps_bsize0,
775*4882a593Smuzhiyun 						  DMA_FROM_DEVICE);
776*4882a593Smuzhiyun 		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
777*4882a593Smuzhiyun 			dev_err(&pdev->dev, "Rx DMA map failed\n");
778*4882a593Smuzhiyun 			adapter->rx_dma_failed++;
779*4882a593Smuzhiyun 			/* cleanup skb */
780*4882a593Smuzhiyun 			dev_kfree_skb_any(skb);
781*4882a593Smuzhiyun 			buffer_info->skb = NULL;
782*4882a593Smuzhiyun 			break;
783*4882a593Smuzhiyun 		}
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 		rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 		if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
788*4882a593Smuzhiyun 			/* Force memory writes to complete before letting h/w
789*4882a593Smuzhiyun 			 * know there are new descriptors to fetch.  (Only
790*4882a593Smuzhiyun 			 * applicable for weak-ordered memory model archs,
791*4882a593Smuzhiyun 			 * such as IA-64).
792*4882a593Smuzhiyun 			 */
793*4882a593Smuzhiyun 			wmb();
794*4882a593Smuzhiyun 			if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
795*4882a593Smuzhiyun 				e1000e_update_rdt_wa(rx_ring, i << 1);
796*4882a593Smuzhiyun 			else
797*4882a593Smuzhiyun 				writel(i << 1, rx_ring->tail);
798*4882a593Smuzhiyun 		}
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 		i++;
801*4882a593Smuzhiyun 		if (i == rx_ring->count)
802*4882a593Smuzhiyun 			i = 0;
803*4882a593Smuzhiyun 		buffer_info = &rx_ring->buffer_info[i];
804*4882a593Smuzhiyun 	}
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun no_buffers:
807*4882a593Smuzhiyun 	rx_ring->next_to_use = i;
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun /**
811*4882a593Smuzhiyun  * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
812*4882a593Smuzhiyun  * @rx_ring: Rx descriptor ring
813*4882a593Smuzhiyun  * @cleaned_count: number of buffers to allocate this pass
814*4882a593Smuzhiyun  * @gfp: flags for allocation
815*4882a593Smuzhiyun  **/
816*4882a593Smuzhiyun 
e1000_alloc_jumbo_rx_buffers(struct e1000_ring * rx_ring,int cleaned_count,gfp_t gfp)817*4882a593Smuzhiyun static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
818*4882a593Smuzhiyun 					 int cleaned_count, gfp_t gfp)
819*4882a593Smuzhiyun {
820*4882a593Smuzhiyun 	struct e1000_adapter *adapter = rx_ring->adapter;
821*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
822*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
823*4882a593Smuzhiyun 	union e1000_rx_desc_extended *rx_desc;
824*4882a593Smuzhiyun 	struct e1000_buffer *buffer_info;
825*4882a593Smuzhiyun 	struct sk_buff *skb;
826*4882a593Smuzhiyun 	unsigned int i;
827*4882a593Smuzhiyun 	unsigned int bufsz = 256 - 16;	/* for skb_reserve */
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	i = rx_ring->next_to_use;
830*4882a593Smuzhiyun 	buffer_info = &rx_ring->buffer_info[i];
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	while (cleaned_count--) {
833*4882a593Smuzhiyun 		skb = buffer_info->skb;
834*4882a593Smuzhiyun 		if (skb) {
835*4882a593Smuzhiyun 			skb_trim(skb, 0);
836*4882a593Smuzhiyun 			goto check_page;
837*4882a593Smuzhiyun 		}
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 		skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
840*4882a593Smuzhiyun 		if (unlikely(!skb)) {
841*4882a593Smuzhiyun 			/* Better luck next round */
842*4882a593Smuzhiyun 			adapter->alloc_rx_buff_failed++;
843*4882a593Smuzhiyun 			break;
844*4882a593Smuzhiyun 		}
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 		buffer_info->skb = skb;
847*4882a593Smuzhiyun check_page:
848*4882a593Smuzhiyun 		/* allocate a new page if necessary */
849*4882a593Smuzhiyun 		if (!buffer_info->page) {
850*4882a593Smuzhiyun 			buffer_info->page = alloc_page(gfp);
851*4882a593Smuzhiyun 			if (unlikely(!buffer_info->page)) {
852*4882a593Smuzhiyun 				adapter->alloc_rx_buff_failed++;
853*4882a593Smuzhiyun 				break;
854*4882a593Smuzhiyun 			}
855*4882a593Smuzhiyun 		}
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 		if (!buffer_info->dma) {
858*4882a593Smuzhiyun 			buffer_info->dma = dma_map_page(&pdev->dev,
859*4882a593Smuzhiyun 							buffer_info->page, 0,
860*4882a593Smuzhiyun 							PAGE_SIZE,
861*4882a593Smuzhiyun 							DMA_FROM_DEVICE);
862*4882a593Smuzhiyun 			if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
863*4882a593Smuzhiyun 				adapter->alloc_rx_buff_failed++;
864*4882a593Smuzhiyun 				break;
865*4882a593Smuzhiyun 			}
866*4882a593Smuzhiyun 		}
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 		rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
869*4882a593Smuzhiyun 		rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 		if (unlikely(++i == rx_ring->count))
872*4882a593Smuzhiyun 			i = 0;
873*4882a593Smuzhiyun 		buffer_info = &rx_ring->buffer_info[i];
874*4882a593Smuzhiyun 	}
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	if (likely(rx_ring->next_to_use != i)) {
877*4882a593Smuzhiyun 		rx_ring->next_to_use = i;
878*4882a593Smuzhiyun 		if (unlikely(i-- == 0))
879*4882a593Smuzhiyun 			i = (rx_ring->count - 1);
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 		/* Force memory writes to complete before letting h/w
882*4882a593Smuzhiyun 		 * know there are new descriptors to fetch.  (Only
883*4882a593Smuzhiyun 		 * applicable for weak-ordered memory model archs,
884*4882a593Smuzhiyun 		 * such as IA-64).
885*4882a593Smuzhiyun 		 */
886*4882a593Smuzhiyun 		wmb();
887*4882a593Smuzhiyun 		if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
888*4882a593Smuzhiyun 			e1000e_update_rdt_wa(rx_ring, i);
889*4882a593Smuzhiyun 		else
890*4882a593Smuzhiyun 			writel(i, rx_ring->tail);
891*4882a593Smuzhiyun 	}
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun 
e1000_rx_hash(struct net_device * netdev,__le32 rss,struct sk_buff * skb)894*4882a593Smuzhiyun static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
895*4882a593Smuzhiyun 				 struct sk_buff *skb)
896*4882a593Smuzhiyun {
897*4882a593Smuzhiyun 	if (netdev->features & NETIF_F_RXHASH)
898*4882a593Smuzhiyun 		skb_set_hash(skb, le32_to_cpu(rss), PKT_HASH_TYPE_L3);
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun /**
902*4882a593Smuzhiyun  * e1000_clean_rx_irq - Send received data up the network stack
903*4882a593Smuzhiyun  * @rx_ring: Rx descriptor ring
904*4882a593Smuzhiyun  * @work_done: output parameter for indicating completed work
905*4882a593Smuzhiyun  * @work_to_do: how many packets we can clean
906*4882a593Smuzhiyun  *
907*4882a593Smuzhiyun  * the return value indicates whether actual cleaning was done, there
908*4882a593Smuzhiyun  * is no guarantee that everything was cleaned
909*4882a593Smuzhiyun  **/
e1000_clean_rx_irq(struct e1000_ring * rx_ring,int * work_done,int work_to_do)910*4882a593Smuzhiyun static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
911*4882a593Smuzhiyun 			       int work_to_do)
912*4882a593Smuzhiyun {
913*4882a593Smuzhiyun 	struct e1000_adapter *adapter = rx_ring->adapter;
914*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
915*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
916*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
917*4882a593Smuzhiyun 	union e1000_rx_desc_extended *rx_desc, *next_rxd;
918*4882a593Smuzhiyun 	struct e1000_buffer *buffer_info, *next_buffer;
919*4882a593Smuzhiyun 	u32 length, staterr;
920*4882a593Smuzhiyun 	unsigned int i;
921*4882a593Smuzhiyun 	int cleaned_count = 0;
922*4882a593Smuzhiyun 	bool cleaned = false;
923*4882a593Smuzhiyun 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 	i = rx_ring->next_to_clean;
926*4882a593Smuzhiyun 	rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
927*4882a593Smuzhiyun 	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
928*4882a593Smuzhiyun 	buffer_info = &rx_ring->buffer_info[i];
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun 	while (staterr & E1000_RXD_STAT_DD) {
931*4882a593Smuzhiyun 		struct sk_buff *skb;
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun 		if (*work_done >= work_to_do)
934*4882a593Smuzhiyun 			break;
935*4882a593Smuzhiyun 		(*work_done)++;
936*4882a593Smuzhiyun 		dma_rmb();	/* read descriptor and rx_buffer_info after status DD */
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 		skb = buffer_info->skb;
939*4882a593Smuzhiyun 		buffer_info->skb = NULL;
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 		prefetch(skb->data - NET_IP_ALIGN);
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun 		i++;
944*4882a593Smuzhiyun 		if (i == rx_ring->count)
945*4882a593Smuzhiyun 			i = 0;
946*4882a593Smuzhiyun 		next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
947*4882a593Smuzhiyun 		prefetch(next_rxd);
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 		next_buffer = &rx_ring->buffer_info[i];
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 		cleaned = true;
952*4882a593Smuzhiyun 		cleaned_count++;
953*4882a593Smuzhiyun 		dma_unmap_single(&pdev->dev, buffer_info->dma,
954*4882a593Smuzhiyun 				 adapter->rx_buffer_len, DMA_FROM_DEVICE);
955*4882a593Smuzhiyun 		buffer_info->dma = 0;
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 		length = le16_to_cpu(rx_desc->wb.upper.length);
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 		/* !EOP means multiple descriptors were used to store a single
960*4882a593Smuzhiyun 		 * packet, if that's the case we need to toss it.  In fact, we
961*4882a593Smuzhiyun 		 * need to toss every packet with the EOP bit clear and the
962*4882a593Smuzhiyun 		 * next frame that _does_ have the EOP bit set, as it is by
963*4882a593Smuzhiyun 		 * definition only a frame fragment
964*4882a593Smuzhiyun 		 */
965*4882a593Smuzhiyun 		if (unlikely(!(staterr & E1000_RXD_STAT_EOP)))
966*4882a593Smuzhiyun 			adapter->flags2 |= FLAG2_IS_DISCARDING;
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 		if (adapter->flags2 & FLAG2_IS_DISCARDING) {
969*4882a593Smuzhiyun 			/* All receives must fit into a single buffer */
970*4882a593Smuzhiyun 			e_dbg("Receive packet consumed multiple buffers\n");
971*4882a593Smuzhiyun 			/* recycle */
972*4882a593Smuzhiyun 			buffer_info->skb = skb;
973*4882a593Smuzhiyun 			if (staterr & E1000_RXD_STAT_EOP)
974*4882a593Smuzhiyun 				adapter->flags2 &= ~FLAG2_IS_DISCARDING;
975*4882a593Smuzhiyun 			goto next_desc;
976*4882a593Smuzhiyun 		}
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 		if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
979*4882a593Smuzhiyun 			     !(netdev->features & NETIF_F_RXALL))) {
980*4882a593Smuzhiyun 			/* recycle */
981*4882a593Smuzhiyun 			buffer_info->skb = skb;
982*4882a593Smuzhiyun 			goto next_desc;
983*4882a593Smuzhiyun 		}
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 		/* adjust length to remove Ethernet CRC */
986*4882a593Smuzhiyun 		if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
987*4882a593Smuzhiyun 			/* If configured to store CRC, don't subtract FCS,
988*4882a593Smuzhiyun 			 * but keep the FCS bytes out of the total_rx_bytes
989*4882a593Smuzhiyun 			 * counter
990*4882a593Smuzhiyun 			 */
991*4882a593Smuzhiyun 			if (netdev->features & NETIF_F_RXFCS)
992*4882a593Smuzhiyun 				total_rx_bytes -= 4;
993*4882a593Smuzhiyun 			else
994*4882a593Smuzhiyun 				length -= 4;
995*4882a593Smuzhiyun 		}
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun 		total_rx_bytes += length;
998*4882a593Smuzhiyun 		total_rx_packets++;
999*4882a593Smuzhiyun 
1000*4882a593Smuzhiyun 		/* code added for copybreak, this should improve
1001*4882a593Smuzhiyun 		 * performance for small packets with large amounts
1002*4882a593Smuzhiyun 		 * of reassembly being done in the stack
1003*4882a593Smuzhiyun 		 */
1004*4882a593Smuzhiyun 		if (length < copybreak) {
1005*4882a593Smuzhiyun 			struct sk_buff *new_skb =
1006*4882a593Smuzhiyun 				napi_alloc_skb(&adapter->napi, length);
1007*4882a593Smuzhiyun 			if (new_skb) {
1008*4882a593Smuzhiyun 				skb_copy_to_linear_data_offset(new_skb,
1009*4882a593Smuzhiyun 							       -NET_IP_ALIGN,
1010*4882a593Smuzhiyun 							       (skb->data -
1011*4882a593Smuzhiyun 								NET_IP_ALIGN),
1012*4882a593Smuzhiyun 							       (length +
1013*4882a593Smuzhiyun 								NET_IP_ALIGN));
1014*4882a593Smuzhiyun 				/* save the skb in buffer_info as good */
1015*4882a593Smuzhiyun 				buffer_info->skb = skb;
1016*4882a593Smuzhiyun 				skb = new_skb;
1017*4882a593Smuzhiyun 			}
1018*4882a593Smuzhiyun 			/* else just continue with the old one */
1019*4882a593Smuzhiyun 		}
1020*4882a593Smuzhiyun 		/* end copybreak code */
1021*4882a593Smuzhiyun 		skb_put(skb, length);
1022*4882a593Smuzhiyun 
1023*4882a593Smuzhiyun 		/* Receive Checksum Offload */
1024*4882a593Smuzhiyun 		e1000_rx_checksum(adapter, staterr, skb);
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 		e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun 		e1000_receive_skb(adapter, netdev, skb, staterr,
1029*4882a593Smuzhiyun 				  rx_desc->wb.upper.vlan);
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun next_desc:
1032*4882a593Smuzhiyun 		rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun 		/* return some buffers to hardware, one at a time is too slow */
1035*4882a593Smuzhiyun 		if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1036*4882a593Smuzhiyun 			adapter->alloc_rx_buf(rx_ring, cleaned_count,
1037*4882a593Smuzhiyun 					      GFP_ATOMIC);
1038*4882a593Smuzhiyun 			cleaned_count = 0;
1039*4882a593Smuzhiyun 		}
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 		/* use prefetched values */
1042*4882a593Smuzhiyun 		rx_desc = next_rxd;
1043*4882a593Smuzhiyun 		buffer_info = next_buffer;
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 		staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1046*4882a593Smuzhiyun 	}
1047*4882a593Smuzhiyun 	rx_ring->next_to_clean = i;
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 	cleaned_count = e1000_desc_unused(rx_ring);
1050*4882a593Smuzhiyun 	if (cleaned_count)
1051*4882a593Smuzhiyun 		adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun 	adapter->total_rx_bytes += total_rx_bytes;
1054*4882a593Smuzhiyun 	adapter->total_rx_packets += total_rx_packets;
1055*4882a593Smuzhiyun 	return cleaned;
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun 
e1000_put_txbuf(struct e1000_ring * tx_ring,struct e1000_buffer * buffer_info,bool drop)1058*4882a593Smuzhiyun static void e1000_put_txbuf(struct e1000_ring *tx_ring,
1059*4882a593Smuzhiyun 			    struct e1000_buffer *buffer_info,
1060*4882a593Smuzhiyun 			    bool drop)
1061*4882a593Smuzhiyun {
1062*4882a593Smuzhiyun 	struct e1000_adapter *adapter = tx_ring->adapter;
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 	if (buffer_info->dma) {
1065*4882a593Smuzhiyun 		if (buffer_info->mapped_as_page)
1066*4882a593Smuzhiyun 			dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1067*4882a593Smuzhiyun 				       buffer_info->length, DMA_TO_DEVICE);
1068*4882a593Smuzhiyun 		else
1069*4882a593Smuzhiyun 			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1070*4882a593Smuzhiyun 					 buffer_info->length, DMA_TO_DEVICE);
1071*4882a593Smuzhiyun 		buffer_info->dma = 0;
1072*4882a593Smuzhiyun 	}
1073*4882a593Smuzhiyun 	if (buffer_info->skb) {
1074*4882a593Smuzhiyun 		if (drop)
1075*4882a593Smuzhiyun 			dev_kfree_skb_any(buffer_info->skb);
1076*4882a593Smuzhiyun 		else
1077*4882a593Smuzhiyun 			dev_consume_skb_any(buffer_info->skb);
1078*4882a593Smuzhiyun 		buffer_info->skb = NULL;
1079*4882a593Smuzhiyun 	}
1080*4882a593Smuzhiyun 	buffer_info->time_stamp = 0;
1081*4882a593Smuzhiyun }
1082*4882a593Smuzhiyun 
e1000_print_hw_hang(struct work_struct * work)1083*4882a593Smuzhiyun static void e1000_print_hw_hang(struct work_struct *work)
1084*4882a593Smuzhiyun {
1085*4882a593Smuzhiyun 	struct e1000_adapter *adapter = container_of(work,
1086*4882a593Smuzhiyun 						     struct e1000_adapter,
1087*4882a593Smuzhiyun 						     print_hang_task);
1088*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
1089*4882a593Smuzhiyun 	struct e1000_ring *tx_ring = adapter->tx_ring;
1090*4882a593Smuzhiyun 	unsigned int i = tx_ring->next_to_clean;
1091*4882a593Smuzhiyun 	unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
1092*4882a593Smuzhiyun 	struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
1093*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
1094*4882a593Smuzhiyun 	u16 phy_status, phy_1000t_status, phy_ext_status;
1095*4882a593Smuzhiyun 	u16 pci_status;
1096*4882a593Smuzhiyun 
1097*4882a593Smuzhiyun 	if (test_bit(__E1000_DOWN, &adapter->state))
1098*4882a593Smuzhiyun 		return;
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 	if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) {
1101*4882a593Smuzhiyun 		/* May be block on write-back, flush and detect again
1102*4882a593Smuzhiyun 		 * flush pending descriptor writebacks to memory
1103*4882a593Smuzhiyun 		 */
1104*4882a593Smuzhiyun 		ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1105*4882a593Smuzhiyun 		/* execute the writes immediately */
1106*4882a593Smuzhiyun 		e1e_flush();
1107*4882a593Smuzhiyun 		/* Due to rare timing issues, write to TIDV again to ensure
1108*4882a593Smuzhiyun 		 * the write is successful
1109*4882a593Smuzhiyun 		 */
1110*4882a593Smuzhiyun 		ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1111*4882a593Smuzhiyun 		/* execute the writes immediately */
1112*4882a593Smuzhiyun 		e1e_flush();
1113*4882a593Smuzhiyun 		adapter->tx_hang_recheck = true;
1114*4882a593Smuzhiyun 		return;
1115*4882a593Smuzhiyun 	}
1116*4882a593Smuzhiyun 	adapter->tx_hang_recheck = false;
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun 	if (er32(TDH(0)) == er32(TDT(0))) {
1119*4882a593Smuzhiyun 		e_dbg("false hang detected, ignoring\n");
1120*4882a593Smuzhiyun 		return;
1121*4882a593Smuzhiyun 	}
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 	/* Real hang detected */
1124*4882a593Smuzhiyun 	netif_stop_queue(netdev);
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun 	e1e_rphy(hw, MII_BMSR, &phy_status);
1127*4882a593Smuzhiyun 	e1e_rphy(hw, MII_STAT1000, &phy_1000t_status);
1128*4882a593Smuzhiyun 	e1e_rphy(hw, MII_ESTATUS, &phy_ext_status);
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 	pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun 	/* detected Hardware unit hang */
1133*4882a593Smuzhiyun 	e_err("Detected Hardware Unit Hang:\n"
1134*4882a593Smuzhiyun 	      "  TDH                  <%x>\n"
1135*4882a593Smuzhiyun 	      "  TDT                  <%x>\n"
1136*4882a593Smuzhiyun 	      "  next_to_use          <%x>\n"
1137*4882a593Smuzhiyun 	      "  next_to_clean        <%x>\n"
1138*4882a593Smuzhiyun 	      "buffer_info[next_to_clean]:\n"
1139*4882a593Smuzhiyun 	      "  time_stamp           <%lx>\n"
1140*4882a593Smuzhiyun 	      "  next_to_watch        <%x>\n"
1141*4882a593Smuzhiyun 	      "  jiffies              <%lx>\n"
1142*4882a593Smuzhiyun 	      "  next_to_watch.status <%x>\n"
1143*4882a593Smuzhiyun 	      "MAC Status             <%x>\n"
1144*4882a593Smuzhiyun 	      "PHY Status             <%x>\n"
1145*4882a593Smuzhiyun 	      "PHY 1000BASE-T Status  <%x>\n"
1146*4882a593Smuzhiyun 	      "PHY Extended Status    <%x>\n"
1147*4882a593Smuzhiyun 	      "PCI Status             <%x>\n",
1148*4882a593Smuzhiyun 	      readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use,
1149*4882a593Smuzhiyun 	      tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp,
1150*4882a593Smuzhiyun 	      eop, jiffies, eop_desc->upper.fields.status, er32(STATUS),
1151*4882a593Smuzhiyun 	      phy_status, phy_1000t_status, phy_ext_status, pci_status);
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun 	e1000e_dump(adapter);
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun 	/* Suggest workaround for known h/w issue */
1156*4882a593Smuzhiyun 	if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
1157*4882a593Smuzhiyun 		e_err("Try turning off Tx pause (flow control) via ethtool\n");
1158*4882a593Smuzhiyun }
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun /**
1161*4882a593Smuzhiyun  * e1000e_tx_hwtstamp_work - check for Tx time stamp
1162*4882a593Smuzhiyun  * @work: pointer to work struct
1163*4882a593Smuzhiyun  *
1164*4882a593Smuzhiyun  * This work function polls the TSYNCTXCTL valid bit to determine when a
1165*4882a593Smuzhiyun  * timestamp has been taken for the current stored skb.  The timestamp must
1166*4882a593Smuzhiyun  * be for this skb because only one such packet is allowed in the queue.
1167*4882a593Smuzhiyun  */
e1000e_tx_hwtstamp_work(struct work_struct * work)1168*4882a593Smuzhiyun static void e1000e_tx_hwtstamp_work(struct work_struct *work)
1169*4882a593Smuzhiyun {
1170*4882a593Smuzhiyun 	struct e1000_adapter *adapter = container_of(work, struct e1000_adapter,
1171*4882a593Smuzhiyun 						     tx_hwtstamp_work);
1172*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun 	if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
1175*4882a593Smuzhiyun 		struct sk_buff *skb = adapter->tx_hwtstamp_skb;
1176*4882a593Smuzhiyun 		struct skb_shared_hwtstamps shhwtstamps;
1177*4882a593Smuzhiyun 		u64 txstmp;
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun 		txstmp = er32(TXSTMPL);
1180*4882a593Smuzhiyun 		txstmp |= (u64)er32(TXSTMPH) << 32;
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun 		e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp);
1183*4882a593Smuzhiyun 
1184*4882a593Smuzhiyun 		/* Clear the global tx_hwtstamp_skb pointer and force writes
1185*4882a593Smuzhiyun 		 * prior to notifying the stack of a Tx timestamp.
1186*4882a593Smuzhiyun 		 */
1187*4882a593Smuzhiyun 		adapter->tx_hwtstamp_skb = NULL;
1188*4882a593Smuzhiyun 		wmb(); /* force write prior to skb_tstamp_tx */
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 		skb_tstamp_tx(skb, &shhwtstamps);
1191*4882a593Smuzhiyun 		dev_consume_skb_any(skb);
1192*4882a593Smuzhiyun 	} else if (time_after(jiffies, adapter->tx_hwtstamp_start
1193*4882a593Smuzhiyun 			      + adapter->tx_timeout_factor * HZ)) {
1194*4882a593Smuzhiyun 		dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
1195*4882a593Smuzhiyun 		adapter->tx_hwtstamp_skb = NULL;
1196*4882a593Smuzhiyun 		adapter->tx_hwtstamp_timeouts++;
1197*4882a593Smuzhiyun 		e_warn("clearing Tx timestamp hang\n");
1198*4882a593Smuzhiyun 	} else {
1199*4882a593Smuzhiyun 		/* reschedule to check later */
1200*4882a593Smuzhiyun 		schedule_work(&adapter->tx_hwtstamp_work);
1201*4882a593Smuzhiyun 	}
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun /**
1205*4882a593Smuzhiyun  * e1000_clean_tx_irq - Reclaim resources after transmit completes
1206*4882a593Smuzhiyun  * @tx_ring: Tx descriptor ring
1207*4882a593Smuzhiyun  *
1208*4882a593Smuzhiyun  * the return value indicates whether actual cleaning was done, there
1209*4882a593Smuzhiyun  * is no guarantee that everything was cleaned
1210*4882a593Smuzhiyun  **/
e1000_clean_tx_irq(struct e1000_ring * tx_ring)1211*4882a593Smuzhiyun static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
1212*4882a593Smuzhiyun {
1213*4882a593Smuzhiyun 	struct e1000_adapter *adapter = tx_ring->adapter;
1214*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
1215*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
1216*4882a593Smuzhiyun 	struct e1000_tx_desc *tx_desc, *eop_desc;
1217*4882a593Smuzhiyun 	struct e1000_buffer *buffer_info;
1218*4882a593Smuzhiyun 	unsigned int i, eop;
1219*4882a593Smuzhiyun 	unsigned int count = 0;
1220*4882a593Smuzhiyun 	unsigned int total_tx_bytes = 0, total_tx_packets = 0;
1221*4882a593Smuzhiyun 	unsigned int bytes_compl = 0, pkts_compl = 0;
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun 	i = tx_ring->next_to_clean;
1224*4882a593Smuzhiyun 	eop = tx_ring->buffer_info[i].next_to_watch;
1225*4882a593Smuzhiyun 	eop_desc = E1000_TX_DESC(*tx_ring, eop);
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
1228*4882a593Smuzhiyun 	       (count < tx_ring->count)) {
1229*4882a593Smuzhiyun 		bool cleaned = false;
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun 		dma_rmb();		/* read buffer_info after eop_desc */
1232*4882a593Smuzhiyun 		for (; !cleaned; count++) {
1233*4882a593Smuzhiyun 			tx_desc = E1000_TX_DESC(*tx_ring, i);
1234*4882a593Smuzhiyun 			buffer_info = &tx_ring->buffer_info[i];
1235*4882a593Smuzhiyun 			cleaned = (i == eop);
1236*4882a593Smuzhiyun 
1237*4882a593Smuzhiyun 			if (cleaned) {
1238*4882a593Smuzhiyun 				total_tx_packets += buffer_info->segs;
1239*4882a593Smuzhiyun 				total_tx_bytes += buffer_info->bytecount;
1240*4882a593Smuzhiyun 				if (buffer_info->skb) {
1241*4882a593Smuzhiyun 					bytes_compl += buffer_info->skb->len;
1242*4882a593Smuzhiyun 					pkts_compl++;
1243*4882a593Smuzhiyun 				}
1244*4882a593Smuzhiyun 			}
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun 			e1000_put_txbuf(tx_ring, buffer_info, false);
1247*4882a593Smuzhiyun 			tx_desc->upper.data = 0;
1248*4882a593Smuzhiyun 
1249*4882a593Smuzhiyun 			i++;
1250*4882a593Smuzhiyun 			if (i == tx_ring->count)
1251*4882a593Smuzhiyun 				i = 0;
1252*4882a593Smuzhiyun 		}
1253*4882a593Smuzhiyun 
1254*4882a593Smuzhiyun 		if (i == tx_ring->next_to_use)
1255*4882a593Smuzhiyun 			break;
1256*4882a593Smuzhiyun 		eop = tx_ring->buffer_info[i].next_to_watch;
1257*4882a593Smuzhiyun 		eop_desc = E1000_TX_DESC(*tx_ring, eop);
1258*4882a593Smuzhiyun 	}
1259*4882a593Smuzhiyun 
1260*4882a593Smuzhiyun 	tx_ring->next_to_clean = i;
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun 	netdev_completed_queue(netdev, pkts_compl, bytes_compl);
1263*4882a593Smuzhiyun 
1264*4882a593Smuzhiyun #define TX_WAKE_THRESHOLD 32
1265*4882a593Smuzhiyun 	if (count && netif_carrier_ok(netdev) &&
1266*4882a593Smuzhiyun 	    e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
1267*4882a593Smuzhiyun 		/* Make sure that anybody stopping the queue after this
1268*4882a593Smuzhiyun 		 * sees the new next_to_clean.
1269*4882a593Smuzhiyun 		 */
1270*4882a593Smuzhiyun 		smp_mb();
1271*4882a593Smuzhiyun 
1272*4882a593Smuzhiyun 		if (netif_queue_stopped(netdev) &&
1273*4882a593Smuzhiyun 		    !(test_bit(__E1000_DOWN, &adapter->state))) {
1274*4882a593Smuzhiyun 			netif_wake_queue(netdev);
1275*4882a593Smuzhiyun 			++adapter->restart_queue;
1276*4882a593Smuzhiyun 		}
1277*4882a593Smuzhiyun 	}
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun 	if (adapter->detect_tx_hung) {
1280*4882a593Smuzhiyun 		/* Detect a transmit hang in hardware, this serializes the
1281*4882a593Smuzhiyun 		 * check with the clearing of time_stamp and movement of i
1282*4882a593Smuzhiyun 		 */
1283*4882a593Smuzhiyun 		adapter->detect_tx_hung = false;
1284*4882a593Smuzhiyun 		if (tx_ring->buffer_info[i].time_stamp &&
1285*4882a593Smuzhiyun 		    time_after(jiffies, tx_ring->buffer_info[i].time_stamp
1286*4882a593Smuzhiyun 			       + (adapter->tx_timeout_factor * HZ)) &&
1287*4882a593Smuzhiyun 		    !(er32(STATUS) & E1000_STATUS_TXOFF))
1288*4882a593Smuzhiyun 			schedule_work(&adapter->print_hang_task);
1289*4882a593Smuzhiyun 		else
1290*4882a593Smuzhiyun 			adapter->tx_hang_recheck = false;
1291*4882a593Smuzhiyun 	}
1292*4882a593Smuzhiyun 	adapter->total_tx_bytes += total_tx_bytes;
1293*4882a593Smuzhiyun 	adapter->total_tx_packets += total_tx_packets;
1294*4882a593Smuzhiyun 	return count < tx_ring->count;
1295*4882a593Smuzhiyun }
1296*4882a593Smuzhiyun 
1297*4882a593Smuzhiyun /**
1298*4882a593Smuzhiyun  * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
1299*4882a593Smuzhiyun  * @rx_ring: Rx descriptor ring
1300*4882a593Smuzhiyun  * @work_done: output parameter for indicating completed work
1301*4882a593Smuzhiyun  * @work_to_do: how many packets we can clean
1302*4882a593Smuzhiyun  *
1303*4882a593Smuzhiyun  * the return value indicates whether actual cleaning was done, there
1304*4882a593Smuzhiyun  * is no guarantee that everything was cleaned
1305*4882a593Smuzhiyun  **/
e1000_clean_rx_irq_ps(struct e1000_ring * rx_ring,int * work_done,int work_to_do)1306*4882a593Smuzhiyun static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
1307*4882a593Smuzhiyun 				  int work_to_do)
1308*4882a593Smuzhiyun {
1309*4882a593Smuzhiyun 	struct e1000_adapter *adapter = rx_ring->adapter;
1310*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
1311*4882a593Smuzhiyun 	union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
1312*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
1313*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
1314*4882a593Smuzhiyun 	struct e1000_buffer *buffer_info, *next_buffer;
1315*4882a593Smuzhiyun 	struct e1000_ps_page *ps_page;
1316*4882a593Smuzhiyun 	struct sk_buff *skb;
1317*4882a593Smuzhiyun 	unsigned int i, j;
1318*4882a593Smuzhiyun 	u32 length, staterr;
1319*4882a593Smuzhiyun 	int cleaned_count = 0;
1320*4882a593Smuzhiyun 	bool cleaned = false;
1321*4882a593Smuzhiyun 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1322*4882a593Smuzhiyun 
1323*4882a593Smuzhiyun 	i = rx_ring->next_to_clean;
1324*4882a593Smuzhiyun 	rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
1325*4882a593Smuzhiyun 	staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1326*4882a593Smuzhiyun 	buffer_info = &rx_ring->buffer_info[i];
1327*4882a593Smuzhiyun 
1328*4882a593Smuzhiyun 	while (staterr & E1000_RXD_STAT_DD) {
1329*4882a593Smuzhiyun 		if (*work_done >= work_to_do)
1330*4882a593Smuzhiyun 			break;
1331*4882a593Smuzhiyun 		(*work_done)++;
1332*4882a593Smuzhiyun 		skb = buffer_info->skb;
1333*4882a593Smuzhiyun 		dma_rmb();	/* read descriptor and rx_buffer_info after status DD */
1334*4882a593Smuzhiyun 
1335*4882a593Smuzhiyun 		/* in the packet split case this is header only */
1336*4882a593Smuzhiyun 		prefetch(skb->data - NET_IP_ALIGN);
1337*4882a593Smuzhiyun 
1338*4882a593Smuzhiyun 		i++;
1339*4882a593Smuzhiyun 		if (i == rx_ring->count)
1340*4882a593Smuzhiyun 			i = 0;
1341*4882a593Smuzhiyun 		next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
1342*4882a593Smuzhiyun 		prefetch(next_rxd);
1343*4882a593Smuzhiyun 
1344*4882a593Smuzhiyun 		next_buffer = &rx_ring->buffer_info[i];
1345*4882a593Smuzhiyun 
1346*4882a593Smuzhiyun 		cleaned = true;
1347*4882a593Smuzhiyun 		cleaned_count++;
1348*4882a593Smuzhiyun 		dma_unmap_single(&pdev->dev, buffer_info->dma,
1349*4882a593Smuzhiyun 				 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
1350*4882a593Smuzhiyun 		buffer_info->dma = 0;
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun 		/* see !EOP comment in other Rx routine */
1353*4882a593Smuzhiyun 		if (!(staterr & E1000_RXD_STAT_EOP))
1354*4882a593Smuzhiyun 			adapter->flags2 |= FLAG2_IS_DISCARDING;
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun 		if (adapter->flags2 & FLAG2_IS_DISCARDING) {
1357*4882a593Smuzhiyun 			e_dbg("Packet Split buffers didn't pick up the full packet\n");
1358*4882a593Smuzhiyun 			dev_kfree_skb_irq(skb);
1359*4882a593Smuzhiyun 			if (staterr & E1000_RXD_STAT_EOP)
1360*4882a593Smuzhiyun 				adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1361*4882a593Smuzhiyun 			goto next_desc;
1362*4882a593Smuzhiyun 		}
1363*4882a593Smuzhiyun 
1364*4882a593Smuzhiyun 		if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
1365*4882a593Smuzhiyun 			     !(netdev->features & NETIF_F_RXALL))) {
1366*4882a593Smuzhiyun 			dev_kfree_skb_irq(skb);
1367*4882a593Smuzhiyun 			goto next_desc;
1368*4882a593Smuzhiyun 		}
1369*4882a593Smuzhiyun 
1370*4882a593Smuzhiyun 		length = le16_to_cpu(rx_desc->wb.middle.length0);
1371*4882a593Smuzhiyun 
1372*4882a593Smuzhiyun 		if (!length) {
1373*4882a593Smuzhiyun 			e_dbg("Last part of the packet spanning multiple descriptors\n");
1374*4882a593Smuzhiyun 			dev_kfree_skb_irq(skb);
1375*4882a593Smuzhiyun 			goto next_desc;
1376*4882a593Smuzhiyun 		}
1377*4882a593Smuzhiyun 
1378*4882a593Smuzhiyun 		/* Good Receive */
1379*4882a593Smuzhiyun 		skb_put(skb, length);
1380*4882a593Smuzhiyun 
1381*4882a593Smuzhiyun 		{
1382*4882a593Smuzhiyun 			/* this looks ugly, but it seems compiler issues make
1383*4882a593Smuzhiyun 			 * it more efficient than reusing j
1384*4882a593Smuzhiyun 			 */
1385*4882a593Smuzhiyun 			int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
1386*4882a593Smuzhiyun 
1387*4882a593Smuzhiyun 			/* page alloc/put takes too long and effects small
1388*4882a593Smuzhiyun 			 * packet throughput, so unsplit small packets and
1389*4882a593Smuzhiyun 			 * save the alloc/put only valid in softirq (napi)
1390*4882a593Smuzhiyun 			 * context to call kmap_*
1391*4882a593Smuzhiyun 			 */
1392*4882a593Smuzhiyun 			if (l1 && (l1 <= copybreak) &&
1393*4882a593Smuzhiyun 			    ((length + l1) <= adapter->rx_ps_bsize0)) {
1394*4882a593Smuzhiyun 				u8 *vaddr;
1395*4882a593Smuzhiyun 
1396*4882a593Smuzhiyun 				ps_page = &buffer_info->ps_pages[0];
1397*4882a593Smuzhiyun 
1398*4882a593Smuzhiyun 				/* there is no documentation about how to call
1399*4882a593Smuzhiyun 				 * kmap_atomic, so we can't hold the mapping
1400*4882a593Smuzhiyun 				 * very long
1401*4882a593Smuzhiyun 				 */
1402*4882a593Smuzhiyun 				dma_sync_single_for_cpu(&pdev->dev,
1403*4882a593Smuzhiyun 							ps_page->dma,
1404*4882a593Smuzhiyun 							PAGE_SIZE,
1405*4882a593Smuzhiyun 							DMA_FROM_DEVICE);
1406*4882a593Smuzhiyun 				vaddr = kmap_atomic(ps_page->page);
1407*4882a593Smuzhiyun 				memcpy(skb_tail_pointer(skb), vaddr, l1);
1408*4882a593Smuzhiyun 				kunmap_atomic(vaddr);
1409*4882a593Smuzhiyun 				dma_sync_single_for_device(&pdev->dev,
1410*4882a593Smuzhiyun 							   ps_page->dma,
1411*4882a593Smuzhiyun 							   PAGE_SIZE,
1412*4882a593Smuzhiyun 							   DMA_FROM_DEVICE);
1413*4882a593Smuzhiyun 
1414*4882a593Smuzhiyun 				/* remove the CRC */
1415*4882a593Smuzhiyun 				if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
1416*4882a593Smuzhiyun 					if (!(netdev->features & NETIF_F_RXFCS))
1417*4882a593Smuzhiyun 						l1 -= 4;
1418*4882a593Smuzhiyun 				}
1419*4882a593Smuzhiyun 
1420*4882a593Smuzhiyun 				skb_put(skb, l1);
1421*4882a593Smuzhiyun 				goto copydone;
1422*4882a593Smuzhiyun 			}	/* if */
1423*4882a593Smuzhiyun 		}
1424*4882a593Smuzhiyun 
1425*4882a593Smuzhiyun 		for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1426*4882a593Smuzhiyun 			length = le16_to_cpu(rx_desc->wb.upper.length[j]);
1427*4882a593Smuzhiyun 			if (!length)
1428*4882a593Smuzhiyun 				break;
1429*4882a593Smuzhiyun 
1430*4882a593Smuzhiyun 			ps_page = &buffer_info->ps_pages[j];
1431*4882a593Smuzhiyun 			dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1432*4882a593Smuzhiyun 				       DMA_FROM_DEVICE);
1433*4882a593Smuzhiyun 			ps_page->dma = 0;
1434*4882a593Smuzhiyun 			skb_fill_page_desc(skb, j, ps_page->page, 0, length);
1435*4882a593Smuzhiyun 			ps_page->page = NULL;
1436*4882a593Smuzhiyun 			skb->len += length;
1437*4882a593Smuzhiyun 			skb->data_len += length;
1438*4882a593Smuzhiyun 			skb->truesize += PAGE_SIZE;
1439*4882a593Smuzhiyun 		}
1440*4882a593Smuzhiyun 
1441*4882a593Smuzhiyun 		/* strip the ethernet crc, problem is we're using pages now so
1442*4882a593Smuzhiyun 		 * this whole operation can get a little cpu intensive
1443*4882a593Smuzhiyun 		 */
1444*4882a593Smuzhiyun 		if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
1445*4882a593Smuzhiyun 			if (!(netdev->features & NETIF_F_RXFCS))
1446*4882a593Smuzhiyun 				pskb_trim(skb, skb->len - 4);
1447*4882a593Smuzhiyun 		}
1448*4882a593Smuzhiyun 
1449*4882a593Smuzhiyun copydone:
1450*4882a593Smuzhiyun 		total_rx_bytes += skb->len;
1451*4882a593Smuzhiyun 		total_rx_packets++;
1452*4882a593Smuzhiyun 
1453*4882a593Smuzhiyun 		e1000_rx_checksum(adapter, staterr, skb);
1454*4882a593Smuzhiyun 
1455*4882a593Smuzhiyun 		e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1456*4882a593Smuzhiyun 
1457*4882a593Smuzhiyun 		if (rx_desc->wb.upper.header_status &
1458*4882a593Smuzhiyun 		    cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
1459*4882a593Smuzhiyun 			adapter->rx_hdr_split++;
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun 		e1000_receive_skb(adapter, netdev, skb, staterr,
1462*4882a593Smuzhiyun 				  rx_desc->wb.middle.vlan);
1463*4882a593Smuzhiyun 
1464*4882a593Smuzhiyun next_desc:
1465*4882a593Smuzhiyun 		rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
1466*4882a593Smuzhiyun 		buffer_info->skb = NULL;
1467*4882a593Smuzhiyun 
1468*4882a593Smuzhiyun 		/* return some buffers to hardware, one at a time is too slow */
1469*4882a593Smuzhiyun 		if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1470*4882a593Smuzhiyun 			adapter->alloc_rx_buf(rx_ring, cleaned_count,
1471*4882a593Smuzhiyun 					      GFP_ATOMIC);
1472*4882a593Smuzhiyun 			cleaned_count = 0;
1473*4882a593Smuzhiyun 		}
1474*4882a593Smuzhiyun 
1475*4882a593Smuzhiyun 		/* use prefetched values */
1476*4882a593Smuzhiyun 		rx_desc = next_rxd;
1477*4882a593Smuzhiyun 		buffer_info = next_buffer;
1478*4882a593Smuzhiyun 
1479*4882a593Smuzhiyun 		staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1480*4882a593Smuzhiyun 	}
1481*4882a593Smuzhiyun 	rx_ring->next_to_clean = i;
1482*4882a593Smuzhiyun 
1483*4882a593Smuzhiyun 	cleaned_count = e1000_desc_unused(rx_ring);
1484*4882a593Smuzhiyun 	if (cleaned_count)
1485*4882a593Smuzhiyun 		adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1486*4882a593Smuzhiyun 
1487*4882a593Smuzhiyun 	adapter->total_rx_bytes += total_rx_bytes;
1488*4882a593Smuzhiyun 	adapter->total_rx_packets += total_rx_packets;
1489*4882a593Smuzhiyun 	return cleaned;
1490*4882a593Smuzhiyun }
1491*4882a593Smuzhiyun 
e1000_consume_page(struct e1000_buffer * bi,struct sk_buff * skb,u16 length)1492*4882a593Smuzhiyun static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
1493*4882a593Smuzhiyun 			       u16 length)
1494*4882a593Smuzhiyun {
1495*4882a593Smuzhiyun 	bi->page = NULL;
1496*4882a593Smuzhiyun 	skb->len += length;
1497*4882a593Smuzhiyun 	skb->data_len += length;
1498*4882a593Smuzhiyun 	skb->truesize += PAGE_SIZE;
1499*4882a593Smuzhiyun }
1500*4882a593Smuzhiyun 
1501*4882a593Smuzhiyun /**
1502*4882a593Smuzhiyun  * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
1503*4882a593Smuzhiyun  * @rx_ring: Rx descriptor ring
1504*4882a593Smuzhiyun  * @work_done: output parameter for indicating completed work
1505*4882a593Smuzhiyun  * @work_to_do: how many packets we can clean
1506*4882a593Smuzhiyun  *
1507*4882a593Smuzhiyun  * the return value indicates whether actual cleaning was done, there
1508*4882a593Smuzhiyun  * is no guarantee that everything was cleaned
1509*4882a593Smuzhiyun  **/
e1000_clean_jumbo_rx_irq(struct e1000_ring * rx_ring,int * work_done,int work_to_do)1510*4882a593Smuzhiyun static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1511*4882a593Smuzhiyun 				     int work_to_do)
1512*4882a593Smuzhiyun {
1513*4882a593Smuzhiyun 	struct e1000_adapter *adapter = rx_ring->adapter;
1514*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
1515*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
1516*4882a593Smuzhiyun 	union e1000_rx_desc_extended *rx_desc, *next_rxd;
1517*4882a593Smuzhiyun 	struct e1000_buffer *buffer_info, *next_buffer;
1518*4882a593Smuzhiyun 	u32 length, staterr;
1519*4882a593Smuzhiyun 	unsigned int i;
1520*4882a593Smuzhiyun 	int cleaned_count = 0;
1521*4882a593Smuzhiyun 	bool cleaned = false;
1522*4882a593Smuzhiyun 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1523*4882a593Smuzhiyun 	struct skb_shared_info *shinfo;
1524*4882a593Smuzhiyun 
1525*4882a593Smuzhiyun 	i = rx_ring->next_to_clean;
1526*4882a593Smuzhiyun 	rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
1527*4882a593Smuzhiyun 	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1528*4882a593Smuzhiyun 	buffer_info = &rx_ring->buffer_info[i];
1529*4882a593Smuzhiyun 
1530*4882a593Smuzhiyun 	while (staterr & E1000_RXD_STAT_DD) {
1531*4882a593Smuzhiyun 		struct sk_buff *skb;
1532*4882a593Smuzhiyun 
1533*4882a593Smuzhiyun 		if (*work_done >= work_to_do)
1534*4882a593Smuzhiyun 			break;
1535*4882a593Smuzhiyun 		(*work_done)++;
1536*4882a593Smuzhiyun 		dma_rmb();	/* read descriptor and rx_buffer_info after status DD */
1537*4882a593Smuzhiyun 
1538*4882a593Smuzhiyun 		skb = buffer_info->skb;
1539*4882a593Smuzhiyun 		buffer_info->skb = NULL;
1540*4882a593Smuzhiyun 
1541*4882a593Smuzhiyun 		++i;
1542*4882a593Smuzhiyun 		if (i == rx_ring->count)
1543*4882a593Smuzhiyun 			i = 0;
1544*4882a593Smuzhiyun 		next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
1545*4882a593Smuzhiyun 		prefetch(next_rxd);
1546*4882a593Smuzhiyun 
1547*4882a593Smuzhiyun 		next_buffer = &rx_ring->buffer_info[i];
1548*4882a593Smuzhiyun 
1549*4882a593Smuzhiyun 		cleaned = true;
1550*4882a593Smuzhiyun 		cleaned_count++;
1551*4882a593Smuzhiyun 		dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
1552*4882a593Smuzhiyun 			       DMA_FROM_DEVICE);
1553*4882a593Smuzhiyun 		buffer_info->dma = 0;
1554*4882a593Smuzhiyun 
1555*4882a593Smuzhiyun 		length = le16_to_cpu(rx_desc->wb.upper.length);
1556*4882a593Smuzhiyun 
1557*4882a593Smuzhiyun 		/* errors is only valid for DD + EOP descriptors */
1558*4882a593Smuzhiyun 		if (unlikely((staterr & E1000_RXD_STAT_EOP) &&
1559*4882a593Smuzhiyun 			     ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
1560*4882a593Smuzhiyun 			      !(netdev->features & NETIF_F_RXALL)))) {
1561*4882a593Smuzhiyun 			/* recycle both page and skb */
1562*4882a593Smuzhiyun 			buffer_info->skb = skb;
1563*4882a593Smuzhiyun 			/* an error means any chain goes out the window too */
1564*4882a593Smuzhiyun 			if (rx_ring->rx_skb_top)
1565*4882a593Smuzhiyun 				dev_kfree_skb_irq(rx_ring->rx_skb_top);
1566*4882a593Smuzhiyun 			rx_ring->rx_skb_top = NULL;
1567*4882a593Smuzhiyun 			goto next_desc;
1568*4882a593Smuzhiyun 		}
1569*4882a593Smuzhiyun #define rxtop (rx_ring->rx_skb_top)
1570*4882a593Smuzhiyun 		if (!(staterr & E1000_RXD_STAT_EOP)) {
1571*4882a593Smuzhiyun 			/* this descriptor is only the beginning (or middle) */
1572*4882a593Smuzhiyun 			if (!rxtop) {
1573*4882a593Smuzhiyun 				/* this is the beginning of a chain */
1574*4882a593Smuzhiyun 				rxtop = skb;
1575*4882a593Smuzhiyun 				skb_fill_page_desc(rxtop, 0, buffer_info->page,
1576*4882a593Smuzhiyun 						   0, length);
1577*4882a593Smuzhiyun 			} else {
1578*4882a593Smuzhiyun 				/* this is the middle of a chain */
1579*4882a593Smuzhiyun 				shinfo = skb_shinfo(rxtop);
1580*4882a593Smuzhiyun 				skb_fill_page_desc(rxtop, shinfo->nr_frags,
1581*4882a593Smuzhiyun 						   buffer_info->page, 0,
1582*4882a593Smuzhiyun 						   length);
1583*4882a593Smuzhiyun 				/* re-use the skb, only consumed the page */
1584*4882a593Smuzhiyun 				buffer_info->skb = skb;
1585*4882a593Smuzhiyun 			}
1586*4882a593Smuzhiyun 			e1000_consume_page(buffer_info, rxtop, length);
1587*4882a593Smuzhiyun 			goto next_desc;
1588*4882a593Smuzhiyun 		} else {
1589*4882a593Smuzhiyun 			if (rxtop) {
1590*4882a593Smuzhiyun 				/* end of the chain */
1591*4882a593Smuzhiyun 				shinfo = skb_shinfo(rxtop);
1592*4882a593Smuzhiyun 				skb_fill_page_desc(rxtop, shinfo->nr_frags,
1593*4882a593Smuzhiyun 						   buffer_info->page, 0,
1594*4882a593Smuzhiyun 						   length);
1595*4882a593Smuzhiyun 				/* re-use the current skb, we only consumed the
1596*4882a593Smuzhiyun 				 * page
1597*4882a593Smuzhiyun 				 */
1598*4882a593Smuzhiyun 				buffer_info->skb = skb;
1599*4882a593Smuzhiyun 				skb = rxtop;
1600*4882a593Smuzhiyun 				rxtop = NULL;
1601*4882a593Smuzhiyun 				e1000_consume_page(buffer_info, skb, length);
1602*4882a593Smuzhiyun 			} else {
1603*4882a593Smuzhiyun 				/* no chain, got EOP, this buf is the packet
1604*4882a593Smuzhiyun 				 * copybreak to save the put_page/alloc_page
1605*4882a593Smuzhiyun 				 */
1606*4882a593Smuzhiyun 				if (length <= copybreak &&
1607*4882a593Smuzhiyun 				    skb_tailroom(skb) >= length) {
1608*4882a593Smuzhiyun 					u8 *vaddr;
1609*4882a593Smuzhiyun 					vaddr = kmap_atomic(buffer_info->page);
1610*4882a593Smuzhiyun 					memcpy(skb_tail_pointer(skb), vaddr,
1611*4882a593Smuzhiyun 					       length);
1612*4882a593Smuzhiyun 					kunmap_atomic(vaddr);
1613*4882a593Smuzhiyun 					/* re-use the page, so don't erase
1614*4882a593Smuzhiyun 					 * buffer_info->page
1615*4882a593Smuzhiyun 					 */
1616*4882a593Smuzhiyun 					skb_put(skb, length);
1617*4882a593Smuzhiyun 				} else {
1618*4882a593Smuzhiyun 					skb_fill_page_desc(skb, 0,
1619*4882a593Smuzhiyun 							   buffer_info->page, 0,
1620*4882a593Smuzhiyun 							   length);
1621*4882a593Smuzhiyun 					e1000_consume_page(buffer_info, skb,
1622*4882a593Smuzhiyun 							   length);
1623*4882a593Smuzhiyun 				}
1624*4882a593Smuzhiyun 			}
1625*4882a593Smuzhiyun 		}
1626*4882a593Smuzhiyun 
1627*4882a593Smuzhiyun 		/* Receive Checksum Offload */
1628*4882a593Smuzhiyun 		e1000_rx_checksum(adapter, staterr, skb);
1629*4882a593Smuzhiyun 
1630*4882a593Smuzhiyun 		e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1631*4882a593Smuzhiyun 
1632*4882a593Smuzhiyun 		/* probably a little skewed due to removing CRC */
1633*4882a593Smuzhiyun 		total_rx_bytes += skb->len;
1634*4882a593Smuzhiyun 		total_rx_packets++;
1635*4882a593Smuzhiyun 
1636*4882a593Smuzhiyun 		/* eth type trans needs skb->data to point to something */
1637*4882a593Smuzhiyun 		if (!pskb_may_pull(skb, ETH_HLEN)) {
1638*4882a593Smuzhiyun 			e_err("pskb_may_pull failed.\n");
1639*4882a593Smuzhiyun 			dev_kfree_skb_irq(skb);
1640*4882a593Smuzhiyun 			goto next_desc;
1641*4882a593Smuzhiyun 		}
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun 		e1000_receive_skb(adapter, netdev, skb, staterr,
1644*4882a593Smuzhiyun 				  rx_desc->wb.upper.vlan);
1645*4882a593Smuzhiyun 
1646*4882a593Smuzhiyun next_desc:
1647*4882a593Smuzhiyun 		rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
1648*4882a593Smuzhiyun 
1649*4882a593Smuzhiyun 		/* return some buffers to hardware, one at a time is too slow */
1650*4882a593Smuzhiyun 		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
1651*4882a593Smuzhiyun 			adapter->alloc_rx_buf(rx_ring, cleaned_count,
1652*4882a593Smuzhiyun 					      GFP_ATOMIC);
1653*4882a593Smuzhiyun 			cleaned_count = 0;
1654*4882a593Smuzhiyun 		}
1655*4882a593Smuzhiyun 
1656*4882a593Smuzhiyun 		/* use prefetched values */
1657*4882a593Smuzhiyun 		rx_desc = next_rxd;
1658*4882a593Smuzhiyun 		buffer_info = next_buffer;
1659*4882a593Smuzhiyun 
1660*4882a593Smuzhiyun 		staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1661*4882a593Smuzhiyun 	}
1662*4882a593Smuzhiyun 	rx_ring->next_to_clean = i;
1663*4882a593Smuzhiyun 
1664*4882a593Smuzhiyun 	cleaned_count = e1000_desc_unused(rx_ring);
1665*4882a593Smuzhiyun 	if (cleaned_count)
1666*4882a593Smuzhiyun 		adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1667*4882a593Smuzhiyun 
1668*4882a593Smuzhiyun 	adapter->total_rx_bytes += total_rx_bytes;
1669*4882a593Smuzhiyun 	adapter->total_rx_packets += total_rx_packets;
1670*4882a593Smuzhiyun 	return cleaned;
1671*4882a593Smuzhiyun }
1672*4882a593Smuzhiyun 
1673*4882a593Smuzhiyun /**
1674*4882a593Smuzhiyun  * e1000_clean_rx_ring - Free Rx Buffers per Queue
1675*4882a593Smuzhiyun  * @rx_ring: Rx descriptor ring
1676*4882a593Smuzhiyun  **/
e1000_clean_rx_ring(struct e1000_ring * rx_ring)1677*4882a593Smuzhiyun static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
1678*4882a593Smuzhiyun {
1679*4882a593Smuzhiyun 	struct e1000_adapter *adapter = rx_ring->adapter;
1680*4882a593Smuzhiyun 	struct e1000_buffer *buffer_info;
1681*4882a593Smuzhiyun 	struct e1000_ps_page *ps_page;
1682*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
1683*4882a593Smuzhiyun 	unsigned int i, j;
1684*4882a593Smuzhiyun 
1685*4882a593Smuzhiyun 	/* Free all the Rx ring sk_buffs */
1686*4882a593Smuzhiyun 	for (i = 0; i < rx_ring->count; i++) {
1687*4882a593Smuzhiyun 		buffer_info = &rx_ring->buffer_info[i];
1688*4882a593Smuzhiyun 		if (buffer_info->dma) {
1689*4882a593Smuzhiyun 			if (adapter->clean_rx == e1000_clean_rx_irq)
1690*4882a593Smuzhiyun 				dma_unmap_single(&pdev->dev, buffer_info->dma,
1691*4882a593Smuzhiyun 						 adapter->rx_buffer_len,
1692*4882a593Smuzhiyun 						 DMA_FROM_DEVICE);
1693*4882a593Smuzhiyun 			else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1694*4882a593Smuzhiyun 				dma_unmap_page(&pdev->dev, buffer_info->dma,
1695*4882a593Smuzhiyun 					       PAGE_SIZE, DMA_FROM_DEVICE);
1696*4882a593Smuzhiyun 			else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1697*4882a593Smuzhiyun 				dma_unmap_single(&pdev->dev, buffer_info->dma,
1698*4882a593Smuzhiyun 						 adapter->rx_ps_bsize0,
1699*4882a593Smuzhiyun 						 DMA_FROM_DEVICE);
1700*4882a593Smuzhiyun 			buffer_info->dma = 0;
1701*4882a593Smuzhiyun 		}
1702*4882a593Smuzhiyun 
1703*4882a593Smuzhiyun 		if (buffer_info->page) {
1704*4882a593Smuzhiyun 			put_page(buffer_info->page);
1705*4882a593Smuzhiyun 			buffer_info->page = NULL;
1706*4882a593Smuzhiyun 		}
1707*4882a593Smuzhiyun 
1708*4882a593Smuzhiyun 		if (buffer_info->skb) {
1709*4882a593Smuzhiyun 			dev_kfree_skb(buffer_info->skb);
1710*4882a593Smuzhiyun 			buffer_info->skb = NULL;
1711*4882a593Smuzhiyun 		}
1712*4882a593Smuzhiyun 
1713*4882a593Smuzhiyun 		for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1714*4882a593Smuzhiyun 			ps_page = &buffer_info->ps_pages[j];
1715*4882a593Smuzhiyun 			if (!ps_page->page)
1716*4882a593Smuzhiyun 				break;
1717*4882a593Smuzhiyun 			dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1718*4882a593Smuzhiyun 				       DMA_FROM_DEVICE);
1719*4882a593Smuzhiyun 			ps_page->dma = 0;
1720*4882a593Smuzhiyun 			put_page(ps_page->page);
1721*4882a593Smuzhiyun 			ps_page->page = NULL;
1722*4882a593Smuzhiyun 		}
1723*4882a593Smuzhiyun 	}
1724*4882a593Smuzhiyun 
1725*4882a593Smuzhiyun 	/* there also may be some cached data from a chained receive */
1726*4882a593Smuzhiyun 	if (rx_ring->rx_skb_top) {
1727*4882a593Smuzhiyun 		dev_kfree_skb(rx_ring->rx_skb_top);
1728*4882a593Smuzhiyun 		rx_ring->rx_skb_top = NULL;
1729*4882a593Smuzhiyun 	}
1730*4882a593Smuzhiyun 
1731*4882a593Smuzhiyun 	/* Zero out the descriptor ring */
1732*4882a593Smuzhiyun 	memset(rx_ring->desc, 0, rx_ring->size);
1733*4882a593Smuzhiyun 
1734*4882a593Smuzhiyun 	rx_ring->next_to_clean = 0;
1735*4882a593Smuzhiyun 	rx_ring->next_to_use = 0;
1736*4882a593Smuzhiyun 	adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1737*4882a593Smuzhiyun }
1738*4882a593Smuzhiyun 
e1000e_downshift_workaround(struct work_struct * work)1739*4882a593Smuzhiyun static void e1000e_downshift_workaround(struct work_struct *work)
1740*4882a593Smuzhiyun {
1741*4882a593Smuzhiyun 	struct e1000_adapter *adapter = container_of(work,
1742*4882a593Smuzhiyun 						     struct e1000_adapter,
1743*4882a593Smuzhiyun 						     downshift_task);
1744*4882a593Smuzhiyun 
1745*4882a593Smuzhiyun 	if (test_bit(__E1000_DOWN, &adapter->state))
1746*4882a593Smuzhiyun 		return;
1747*4882a593Smuzhiyun 
1748*4882a593Smuzhiyun 	e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1749*4882a593Smuzhiyun }
1750*4882a593Smuzhiyun 
1751*4882a593Smuzhiyun /**
1752*4882a593Smuzhiyun  * e1000_intr_msi - Interrupt Handler
1753*4882a593Smuzhiyun  * @irq: interrupt number
1754*4882a593Smuzhiyun  * @data: pointer to a network interface device structure
1755*4882a593Smuzhiyun  **/
e1000_intr_msi(int __always_unused irq,void * data)1756*4882a593Smuzhiyun static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
1757*4882a593Smuzhiyun {
1758*4882a593Smuzhiyun 	struct net_device *netdev = data;
1759*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
1760*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
1761*4882a593Smuzhiyun 	u32 icr = er32(ICR);
1762*4882a593Smuzhiyun 
1763*4882a593Smuzhiyun 	/* read ICR disables interrupts using IAM */
1764*4882a593Smuzhiyun 	if (icr & E1000_ICR_LSC) {
1765*4882a593Smuzhiyun 		hw->mac.get_link_status = true;
1766*4882a593Smuzhiyun 		/* ICH8 workaround-- Call gig speed drop workaround on cable
1767*4882a593Smuzhiyun 		 * disconnect (LSC) before accessing any PHY registers
1768*4882a593Smuzhiyun 		 */
1769*4882a593Smuzhiyun 		if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1770*4882a593Smuzhiyun 		    (!(er32(STATUS) & E1000_STATUS_LU)))
1771*4882a593Smuzhiyun 			schedule_work(&adapter->downshift_task);
1772*4882a593Smuzhiyun 
1773*4882a593Smuzhiyun 		/* 80003ES2LAN workaround-- For packet buffer work-around on
1774*4882a593Smuzhiyun 		 * link down event; disable receives here in the ISR and reset
1775*4882a593Smuzhiyun 		 * adapter in watchdog
1776*4882a593Smuzhiyun 		 */
1777*4882a593Smuzhiyun 		if (netif_carrier_ok(netdev) &&
1778*4882a593Smuzhiyun 		    adapter->flags & FLAG_RX_NEEDS_RESTART) {
1779*4882a593Smuzhiyun 			/* disable receives */
1780*4882a593Smuzhiyun 			u32 rctl = er32(RCTL);
1781*4882a593Smuzhiyun 
1782*4882a593Smuzhiyun 			ew32(RCTL, rctl & ~E1000_RCTL_EN);
1783*4882a593Smuzhiyun 			adapter->flags |= FLAG_RESTART_NOW;
1784*4882a593Smuzhiyun 		}
1785*4882a593Smuzhiyun 		/* guard against interrupt when we're going down */
1786*4882a593Smuzhiyun 		if (!test_bit(__E1000_DOWN, &adapter->state))
1787*4882a593Smuzhiyun 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
1788*4882a593Smuzhiyun 	}
1789*4882a593Smuzhiyun 
1790*4882a593Smuzhiyun 	/* Reset on uncorrectable ECC error */
1791*4882a593Smuzhiyun 	if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) {
1792*4882a593Smuzhiyun 		u32 pbeccsts = er32(PBECCSTS);
1793*4882a593Smuzhiyun 
1794*4882a593Smuzhiyun 		adapter->corr_errors +=
1795*4882a593Smuzhiyun 		    pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
1796*4882a593Smuzhiyun 		adapter->uncorr_errors +=
1797*4882a593Smuzhiyun 		    (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
1798*4882a593Smuzhiyun 		    E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
1799*4882a593Smuzhiyun 
1800*4882a593Smuzhiyun 		/* Do the reset outside of interrupt context */
1801*4882a593Smuzhiyun 		schedule_work(&adapter->reset_task);
1802*4882a593Smuzhiyun 
1803*4882a593Smuzhiyun 		/* return immediately since reset is imminent */
1804*4882a593Smuzhiyun 		return IRQ_HANDLED;
1805*4882a593Smuzhiyun 	}
1806*4882a593Smuzhiyun 
1807*4882a593Smuzhiyun 	if (napi_schedule_prep(&adapter->napi)) {
1808*4882a593Smuzhiyun 		adapter->total_tx_bytes = 0;
1809*4882a593Smuzhiyun 		adapter->total_tx_packets = 0;
1810*4882a593Smuzhiyun 		adapter->total_rx_bytes = 0;
1811*4882a593Smuzhiyun 		adapter->total_rx_packets = 0;
1812*4882a593Smuzhiyun 		__napi_schedule(&adapter->napi);
1813*4882a593Smuzhiyun 	}
1814*4882a593Smuzhiyun 
1815*4882a593Smuzhiyun 	return IRQ_HANDLED;
1816*4882a593Smuzhiyun }
1817*4882a593Smuzhiyun 
1818*4882a593Smuzhiyun /**
1819*4882a593Smuzhiyun  * e1000_intr - Interrupt Handler
1820*4882a593Smuzhiyun  * @irq: interrupt number
1821*4882a593Smuzhiyun  * @data: pointer to a network interface device structure
1822*4882a593Smuzhiyun  **/
e1000_intr(int __always_unused irq,void * data)1823*4882a593Smuzhiyun static irqreturn_t e1000_intr(int __always_unused irq, void *data)
1824*4882a593Smuzhiyun {
1825*4882a593Smuzhiyun 	struct net_device *netdev = data;
1826*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
1827*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
1828*4882a593Smuzhiyun 	u32 rctl, icr = er32(ICR);
1829*4882a593Smuzhiyun 
1830*4882a593Smuzhiyun 	if (!icr || test_bit(__E1000_DOWN, &adapter->state))
1831*4882a593Smuzhiyun 		return IRQ_NONE;	/* Not our interrupt */
1832*4882a593Smuzhiyun 
1833*4882a593Smuzhiyun 	/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1834*4882a593Smuzhiyun 	 * not set, then the adapter didn't send an interrupt
1835*4882a593Smuzhiyun 	 */
1836*4882a593Smuzhiyun 	if (!(icr & E1000_ICR_INT_ASSERTED))
1837*4882a593Smuzhiyun 		return IRQ_NONE;
1838*4882a593Smuzhiyun 
1839*4882a593Smuzhiyun 	/* Interrupt Auto-Mask...upon reading ICR,
1840*4882a593Smuzhiyun 	 * interrupts are masked.  No need for the
1841*4882a593Smuzhiyun 	 * IMC write
1842*4882a593Smuzhiyun 	 */
1843*4882a593Smuzhiyun 
1844*4882a593Smuzhiyun 	if (icr & E1000_ICR_LSC) {
1845*4882a593Smuzhiyun 		hw->mac.get_link_status = true;
1846*4882a593Smuzhiyun 		/* ICH8 workaround-- Call gig speed drop workaround on cable
1847*4882a593Smuzhiyun 		 * disconnect (LSC) before accessing any PHY registers
1848*4882a593Smuzhiyun 		 */
1849*4882a593Smuzhiyun 		if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1850*4882a593Smuzhiyun 		    (!(er32(STATUS) & E1000_STATUS_LU)))
1851*4882a593Smuzhiyun 			schedule_work(&adapter->downshift_task);
1852*4882a593Smuzhiyun 
1853*4882a593Smuzhiyun 		/* 80003ES2LAN workaround--
1854*4882a593Smuzhiyun 		 * For packet buffer work-around on link down event;
1855*4882a593Smuzhiyun 		 * disable receives here in the ISR and
1856*4882a593Smuzhiyun 		 * reset adapter in watchdog
1857*4882a593Smuzhiyun 		 */
1858*4882a593Smuzhiyun 		if (netif_carrier_ok(netdev) &&
1859*4882a593Smuzhiyun 		    (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
1860*4882a593Smuzhiyun 			/* disable receives */
1861*4882a593Smuzhiyun 			rctl = er32(RCTL);
1862*4882a593Smuzhiyun 			ew32(RCTL, rctl & ~E1000_RCTL_EN);
1863*4882a593Smuzhiyun 			adapter->flags |= FLAG_RESTART_NOW;
1864*4882a593Smuzhiyun 		}
1865*4882a593Smuzhiyun 		/* guard against interrupt when we're going down */
1866*4882a593Smuzhiyun 		if (!test_bit(__E1000_DOWN, &adapter->state))
1867*4882a593Smuzhiyun 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
1868*4882a593Smuzhiyun 	}
1869*4882a593Smuzhiyun 
1870*4882a593Smuzhiyun 	/* Reset on uncorrectable ECC error */
1871*4882a593Smuzhiyun 	if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) {
1872*4882a593Smuzhiyun 		u32 pbeccsts = er32(PBECCSTS);
1873*4882a593Smuzhiyun 
1874*4882a593Smuzhiyun 		adapter->corr_errors +=
1875*4882a593Smuzhiyun 		    pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
1876*4882a593Smuzhiyun 		adapter->uncorr_errors +=
1877*4882a593Smuzhiyun 		    (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
1878*4882a593Smuzhiyun 		    E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
1879*4882a593Smuzhiyun 
1880*4882a593Smuzhiyun 		/* Do the reset outside of interrupt context */
1881*4882a593Smuzhiyun 		schedule_work(&adapter->reset_task);
1882*4882a593Smuzhiyun 
1883*4882a593Smuzhiyun 		/* return immediately since reset is imminent */
1884*4882a593Smuzhiyun 		return IRQ_HANDLED;
1885*4882a593Smuzhiyun 	}
1886*4882a593Smuzhiyun 
1887*4882a593Smuzhiyun 	if (napi_schedule_prep(&adapter->napi)) {
1888*4882a593Smuzhiyun 		adapter->total_tx_bytes = 0;
1889*4882a593Smuzhiyun 		adapter->total_tx_packets = 0;
1890*4882a593Smuzhiyun 		adapter->total_rx_bytes = 0;
1891*4882a593Smuzhiyun 		adapter->total_rx_packets = 0;
1892*4882a593Smuzhiyun 		__napi_schedule(&adapter->napi);
1893*4882a593Smuzhiyun 	}
1894*4882a593Smuzhiyun 
1895*4882a593Smuzhiyun 	return IRQ_HANDLED;
1896*4882a593Smuzhiyun }
1897*4882a593Smuzhiyun 
e1000_msix_other(int __always_unused irq,void * data)1898*4882a593Smuzhiyun static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
1899*4882a593Smuzhiyun {
1900*4882a593Smuzhiyun 	struct net_device *netdev = data;
1901*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
1902*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
1903*4882a593Smuzhiyun 	u32 icr = er32(ICR);
1904*4882a593Smuzhiyun 
1905*4882a593Smuzhiyun 	if (icr & adapter->eiac_mask)
1906*4882a593Smuzhiyun 		ew32(ICS, (icr & adapter->eiac_mask));
1907*4882a593Smuzhiyun 
1908*4882a593Smuzhiyun 	if (icr & E1000_ICR_LSC) {
1909*4882a593Smuzhiyun 		hw->mac.get_link_status = true;
1910*4882a593Smuzhiyun 		/* guard against interrupt when we're going down */
1911*4882a593Smuzhiyun 		if (!test_bit(__E1000_DOWN, &adapter->state))
1912*4882a593Smuzhiyun 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
1913*4882a593Smuzhiyun 	}
1914*4882a593Smuzhiyun 
1915*4882a593Smuzhiyun 	if (!test_bit(__E1000_DOWN, &adapter->state))
1916*4882a593Smuzhiyun 		ew32(IMS, E1000_IMS_OTHER | IMS_OTHER_MASK);
1917*4882a593Smuzhiyun 
1918*4882a593Smuzhiyun 	return IRQ_HANDLED;
1919*4882a593Smuzhiyun }
1920*4882a593Smuzhiyun 
e1000_intr_msix_tx(int __always_unused irq,void * data)1921*4882a593Smuzhiyun static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data)
1922*4882a593Smuzhiyun {
1923*4882a593Smuzhiyun 	struct net_device *netdev = data;
1924*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
1925*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
1926*4882a593Smuzhiyun 	struct e1000_ring *tx_ring = adapter->tx_ring;
1927*4882a593Smuzhiyun 
1928*4882a593Smuzhiyun 	adapter->total_tx_bytes = 0;
1929*4882a593Smuzhiyun 	adapter->total_tx_packets = 0;
1930*4882a593Smuzhiyun 
1931*4882a593Smuzhiyun 	if (!e1000_clean_tx_irq(tx_ring))
1932*4882a593Smuzhiyun 		/* Ring was not completely cleaned, so fire another interrupt */
1933*4882a593Smuzhiyun 		ew32(ICS, tx_ring->ims_val);
1934*4882a593Smuzhiyun 
1935*4882a593Smuzhiyun 	if (!test_bit(__E1000_DOWN, &adapter->state))
1936*4882a593Smuzhiyun 		ew32(IMS, adapter->tx_ring->ims_val);
1937*4882a593Smuzhiyun 
1938*4882a593Smuzhiyun 	return IRQ_HANDLED;
1939*4882a593Smuzhiyun }
1940*4882a593Smuzhiyun 
e1000_intr_msix_rx(int __always_unused irq,void * data)1941*4882a593Smuzhiyun static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data)
1942*4882a593Smuzhiyun {
1943*4882a593Smuzhiyun 	struct net_device *netdev = data;
1944*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
1945*4882a593Smuzhiyun 	struct e1000_ring *rx_ring = adapter->rx_ring;
1946*4882a593Smuzhiyun 
1947*4882a593Smuzhiyun 	/* Write the ITR value calculated at the end of the
1948*4882a593Smuzhiyun 	 * previous interrupt.
1949*4882a593Smuzhiyun 	 */
1950*4882a593Smuzhiyun 	if (rx_ring->set_itr) {
1951*4882a593Smuzhiyun 		u32 itr = rx_ring->itr_val ?
1952*4882a593Smuzhiyun 			  1000000000 / (rx_ring->itr_val * 256) : 0;
1953*4882a593Smuzhiyun 
1954*4882a593Smuzhiyun 		writel(itr, rx_ring->itr_register);
1955*4882a593Smuzhiyun 		rx_ring->set_itr = 0;
1956*4882a593Smuzhiyun 	}
1957*4882a593Smuzhiyun 
1958*4882a593Smuzhiyun 	if (napi_schedule_prep(&adapter->napi)) {
1959*4882a593Smuzhiyun 		adapter->total_rx_bytes = 0;
1960*4882a593Smuzhiyun 		adapter->total_rx_packets = 0;
1961*4882a593Smuzhiyun 		__napi_schedule(&adapter->napi);
1962*4882a593Smuzhiyun 	}
1963*4882a593Smuzhiyun 	return IRQ_HANDLED;
1964*4882a593Smuzhiyun }
1965*4882a593Smuzhiyun 
1966*4882a593Smuzhiyun /**
1967*4882a593Smuzhiyun  * e1000_configure_msix - Configure MSI-X hardware
1968*4882a593Smuzhiyun  * @adapter: board private structure
1969*4882a593Smuzhiyun  *
1970*4882a593Smuzhiyun  * e1000_configure_msix sets up the hardware to properly
1971*4882a593Smuzhiyun  * generate MSI-X interrupts.
1972*4882a593Smuzhiyun  **/
e1000_configure_msix(struct e1000_adapter * adapter)1973*4882a593Smuzhiyun static void e1000_configure_msix(struct e1000_adapter *adapter)
1974*4882a593Smuzhiyun {
1975*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
1976*4882a593Smuzhiyun 	struct e1000_ring *rx_ring = adapter->rx_ring;
1977*4882a593Smuzhiyun 	struct e1000_ring *tx_ring = adapter->tx_ring;
1978*4882a593Smuzhiyun 	int vector = 0;
1979*4882a593Smuzhiyun 	u32 ctrl_ext, ivar = 0;
1980*4882a593Smuzhiyun 
1981*4882a593Smuzhiyun 	adapter->eiac_mask = 0;
1982*4882a593Smuzhiyun 
1983*4882a593Smuzhiyun 	/* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1984*4882a593Smuzhiyun 	if (hw->mac.type == e1000_82574) {
1985*4882a593Smuzhiyun 		u32 rfctl = er32(RFCTL);
1986*4882a593Smuzhiyun 
1987*4882a593Smuzhiyun 		rfctl |= E1000_RFCTL_ACK_DIS;
1988*4882a593Smuzhiyun 		ew32(RFCTL, rfctl);
1989*4882a593Smuzhiyun 	}
1990*4882a593Smuzhiyun 
1991*4882a593Smuzhiyun 	/* Configure Rx vector */
1992*4882a593Smuzhiyun 	rx_ring->ims_val = E1000_IMS_RXQ0;
1993*4882a593Smuzhiyun 	adapter->eiac_mask |= rx_ring->ims_val;
1994*4882a593Smuzhiyun 	if (rx_ring->itr_val)
1995*4882a593Smuzhiyun 		writel(1000000000 / (rx_ring->itr_val * 256),
1996*4882a593Smuzhiyun 		       rx_ring->itr_register);
1997*4882a593Smuzhiyun 	else
1998*4882a593Smuzhiyun 		writel(1, rx_ring->itr_register);
1999*4882a593Smuzhiyun 	ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
2000*4882a593Smuzhiyun 
2001*4882a593Smuzhiyun 	/* Configure Tx vector */
2002*4882a593Smuzhiyun 	tx_ring->ims_val = E1000_IMS_TXQ0;
2003*4882a593Smuzhiyun 	vector++;
2004*4882a593Smuzhiyun 	if (tx_ring->itr_val)
2005*4882a593Smuzhiyun 		writel(1000000000 / (tx_ring->itr_val * 256),
2006*4882a593Smuzhiyun 		       tx_ring->itr_register);
2007*4882a593Smuzhiyun 	else
2008*4882a593Smuzhiyun 		writel(1, tx_ring->itr_register);
2009*4882a593Smuzhiyun 	adapter->eiac_mask |= tx_ring->ims_val;
2010*4882a593Smuzhiyun 	ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
2011*4882a593Smuzhiyun 
2012*4882a593Smuzhiyun 	/* set vector for Other Causes, e.g. link changes */
2013*4882a593Smuzhiyun 	vector++;
2014*4882a593Smuzhiyun 	ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
2015*4882a593Smuzhiyun 	if (rx_ring->itr_val)
2016*4882a593Smuzhiyun 		writel(1000000000 / (rx_ring->itr_val * 256),
2017*4882a593Smuzhiyun 		       hw->hw_addr + E1000_EITR_82574(vector));
2018*4882a593Smuzhiyun 	else
2019*4882a593Smuzhiyun 		writel(1, hw->hw_addr + E1000_EITR_82574(vector));
2020*4882a593Smuzhiyun 
2021*4882a593Smuzhiyun 	/* Cause Tx interrupts on every write back */
2022*4882a593Smuzhiyun 	ivar |= BIT(31);
2023*4882a593Smuzhiyun 
2024*4882a593Smuzhiyun 	ew32(IVAR, ivar);
2025*4882a593Smuzhiyun 
2026*4882a593Smuzhiyun 	/* enable MSI-X PBA support */
2027*4882a593Smuzhiyun 	ctrl_ext = er32(CTRL_EXT) & ~E1000_CTRL_EXT_IAME;
2028*4882a593Smuzhiyun 	ctrl_ext |= E1000_CTRL_EXT_PBA_CLR | E1000_CTRL_EXT_EIAME;
2029*4882a593Smuzhiyun 	ew32(CTRL_EXT, ctrl_ext);
2030*4882a593Smuzhiyun 	e1e_flush();
2031*4882a593Smuzhiyun }
2032*4882a593Smuzhiyun 
e1000e_reset_interrupt_capability(struct e1000_adapter * adapter)2033*4882a593Smuzhiyun void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
2034*4882a593Smuzhiyun {
2035*4882a593Smuzhiyun 	if (adapter->msix_entries) {
2036*4882a593Smuzhiyun 		pci_disable_msix(adapter->pdev);
2037*4882a593Smuzhiyun 		kfree(adapter->msix_entries);
2038*4882a593Smuzhiyun 		adapter->msix_entries = NULL;
2039*4882a593Smuzhiyun 	} else if (adapter->flags & FLAG_MSI_ENABLED) {
2040*4882a593Smuzhiyun 		pci_disable_msi(adapter->pdev);
2041*4882a593Smuzhiyun 		adapter->flags &= ~FLAG_MSI_ENABLED;
2042*4882a593Smuzhiyun 	}
2043*4882a593Smuzhiyun }
2044*4882a593Smuzhiyun 
2045*4882a593Smuzhiyun /**
2046*4882a593Smuzhiyun  * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
2047*4882a593Smuzhiyun  * @adapter: board private structure
2048*4882a593Smuzhiyun  *
2049*4882a593Smuzhiyun  * Attempt to configure interrupts using the best available
2050*4882a593Smuzhiyun  * capabilities of the hardware and kernel.
2051*4882a593Smuzhiyun  **/
e1000e_set_interrupt_capability(struct e1000_adapter * adapter)2052*4882a593Smuzhiyun void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
2053*4882a593Smuzhiyun {
2054*4882a593Smuzhiyun 	int err;
2055*4882a593Smuzhiyun 	int i;
2056*4882a593Smuzhiyun 
2057*4882a593Smuzhiyun 	switch (adapter->int_mode) {
2058*4882a593Smuzhiyun 	case E1000E_INT_MODE_MSIX:
2059*4882a593Smuzhiyun 		if (adapter->flags & FLAG_HAS_MSIX) {
2060*4882a593Smuzhiyun 			adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
2061*4882a593Smuzhiyun 			adapter->msix_entries = kcalloc(adapter->num_vectors,
2062*4882a593Smuzhiyun 							sizeof(struct
2063*4882a593Smuzhiyun 							       msix_entry),
2064*4882a593Smuzhiyun 							GFP_KERNEL);
2065*4882a593Smuzhiyun 			if (adapter->msix_entries) {
2066*4882a593Smuzhiyun 				struct e1000_adapter *a = adapter;
2067*4882a593Smuzhiyun 
2068*4882a593Smuzhiyun 				for (i = 0; i < adapter->num_vectors; i++)
2069*4882a593Smuzhiyun 					adapter->msix_entries[i].entry = i;
2070*4882a593Smuzhiyun 
2071*4882a593Smuzhiyun 				err = pci_enable_msix_range(a->pdev,
2072*4882a593Smuzhiyun 							    a->msix_entries,
2073*4882a593Smuzhiyun 							    a->num_vectors,
2074*4882a593Smuzhiyun 							    a->num_vectors);
2075*4882a593Smuzhiyun 				if (err > 0)
2076*4882a593Smuzhiyun 					return;
2077*4882a593Smuzhiyun 			}
2078*4882a593Smuzhiyun 			/* MSI-X failed, so fall through and try MSI */
2079*4882a593Smuzhiyun 			e_err("Failed to initialize MSI-X interrupts.  Falling back to MSI interrupts.\n");
2080*4882a593Smuzhiyun 			e1000e_reset_interrupt_capability(adapter);
2081*4882a593Smuzhiyun 		}
2082*4882a593Smuzhiyun 		adapter->int_mode = E1000E_INT_MODE_MSI;
2083*4882a593Smuzhiyun 		fallthrough;
2084*4882a593Smuzhiyun 	case E1000E_INT_MODE_MSI:
2085*4882a593Smuzhiyun 		if (!pci_enable_msi(adapter->pdev)) {
2086*4882a593Smuzhiyun 			adapter->flags |= FLAG_MSI_ENABLED;
2087*4882a593Smuzhiyun 		} else {
2088*4882a593Smuzhiyun 			adapter->int_mode = E1000E_INT_MODE_LEGACY;
2089*4882a593Smuzhiyun 			e_err("Failed to initialize MSI interrupts.  Falling back to legacy interrupts.\n");
2090*4882a593Smuzhiyun 		}
2091*4882a593Smuzhiyun 		fallthrough;
2092*4882a593Smuzhiyun 	case E1000E_INT_MODE_LEGACY:
2093*4882a593Smuzhiyun 		/* Don't do anything; this is the system default */
2094*4882a593Smuzhiyun 		break;
2095*4882a593Smuzhiyun 	}
2096*4882a593Smuzhiyun 
2097*4882a593Smuzhiyun 	/* store the number of vectors being used */
2098*4882a593Smuzhiyun 	adapter->num_vectors = 1;
2099*4882a593Smuzhiyun }
2100*4882a593Smuzhiyun 
2101*4882a593Smuzhiyun /**
2102*4882a593Smuzhiyun  * e1000_request_msix - Initialize MSI-X interrupts
2103*4882a593Smuzhiyun  * @adapter: board private structure
2104*4882a593Smuzhiyun  *
2105*4882a593Smuzhiyun  * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
2106*4882a593Smuzhiyun  * kernel.
2107*4882a593Smuzhiyun  **/
e1000_request_msix(struct e1000_adapter * adapter)2108*4882a593Smuzhiyun static int e1000_request_msix(struct e1000_adapter *adapter)
2109*4882a593Smuzhiyun {
2110*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
2111*4882a593Smuzhiyun 	int err = 0, vector = 0;
2112*4882a593Smuzhiyun 
2113*4882a593Smuzhiyun 	if (strlen(netdev->name) < (IFNAMSIZ - 5))
2114*4882a593Smuzhiyun 		snprintf(adapter->rx_ring->name,
2115*4882a593Smuzhiyun 			 sizeof(adapter->rx_ring->name) - 1,
2116*4882a593Smuzhiyun 			 "%.14s-rx-0", netdev->name);
2117*4882a593Smuzhiyun 	else
2118*4882a593Smuzhiyun 		memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
2119*4882a593Smuzhiyun 	err = request_irq(adapter->msix_entries[vector].vector,
2120*4882a593Smuzhiyun 			  e1000_intr_msix_rx, 0, adapter->rx_ring->name,
2121*4882a593Smuzhiyun 			  netdev);
2122*4882a593Smuzhiyun 	if (err)
2123*4882a593Smuzhiyun 		return err;
2124*4882a593Smuzhiyun 	adapter->rx_ring->itr_register = adapter->hw.hw_addr +
2125*4882a593Smuzhiyun 	    E1000_EITR_82574(vector);
2126*4882a593Smuzhiyun 	adapter->rx_ring->itr_val = adapter->itr;
2127*4882a593Smuzhiyun 	vector++;
2128*4882a593Smuzhiyun 
2129*4882a593Smuzhiyun 	if (strlen(netdev->name) < (IFNAMSIZ - 5))
2130*4882a593Smuzhiyun 		snprintf(adapter->tx_ring->name,
2131*4882a593Smuzhiyun 			 sizeof(adapter->tx_ring->name) - 1,
2132*4882a593Smuzhiyun 			 "%.14s-tx-0", netdev->name);
2133*4882a593Smuzhiyun 	else
2134*4882a593Smuzhiyun 		memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
2135*4882a593Smuzhiyun 	err = request_irq(adapter->msix_entries[vector].vector,
2136*4882a593Smuzhiyun 			  e1000_intr_msix_tx, 0, adapter->tx_ring->name,
2137*4882a593Smuzhiyun 			  netdev);
2138*4882a593Smuzhiyun 	if (err)
2139*4882a593Smuzhiyun 		return err;
2140*4882a593Smuzhiyun 	adapter->tx_ring->itr_register = adapter->hw.hw_addr +
2141*4882a593Smuzhiyun 	    E1000_EITR_82574(vector);
2142*4882a593Smuzhiyun 	adapter->tx_ring->itr_val = adapter->itr;
2143*4882a593Smuzhiyun 	vector++;
2144*4882a593Smuzhiyun 
2145*4882a593Smuzhiyun 	err = request_irq(adapter->msix_entries[vector].vector,
2146*4882a593Smuzhiyun 			  e1000_msix_other, 0, netdev->name, netdev);
2147*4882a593Smuzhiyun 	if (err)
2148*4882a593Smuzhiyun 		return err;
2149*4882a593Smuzhiyun 
2150*4882a593Smuzhiyun 	e1000_configure_msix(adapter);
2151*4882a593Smuzhiyun 
2152*4882a593Smuzhiyun 	return 0;
2153*4882a593Smuzhiyun }
2154*4882a593Smuzhiyun 
2155*4882a593Smuzhiyun /**
2156*4882a593Smuzhiyun  * e1000_request_irq - initialize interrupts
2157*4882a593Smuzhiyun  * @adapter: board private structure
2158*4882a593Smuzhiyun  *
2159*4882a593Smuzhiyun  * Attempts to configure interrupts using the best available
2160*4882a593Smuzhiyun  * capabilities of the hardware and kernel.
2161*4882a593Smuzhiyun  **/
e1000_request_irq(struct e1000_adapter * adapter)2162*4882a593Smuzhiyun static int e1000_request_irq(struct e1000_adapter *adapter)
2163*4882a593Smuzhiyun {
2164*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
2165*4882a593Smuzhiyun 	int err;
2166*4882a593Smuzhiyun 
2167*4882a593Smuzhiyun 	if (adapter->msix_entries) {
2168*4882a593Smuzhiyun 		err = e1000_request_msix(adapter);
2169*4882a593Smuzhiyun 		if (!err)
2170*4882a593Smuzhiyun 			return err;
2171*4882a593Smuzhiyun 		/* fall back to MSI */
2172*4882a593Smuzhiyun 		e1000e_reset_interrupt_capability(adapter);
2173*4882a593Smuzhiyun 		adapter->int_mode = E1000E_INT_MODE_MSI;
2174*4882a593Smuzhiyun 		e1000e_set_interrupt_capability(adapter);
2175*4882a593Smuzhiyun 	}
2176*4882a593Smuzhiyun 	if (adapter->flags & FLAG_MSI_ENABLED) {
2177*4882a593Smuzhiyun 		err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
2178*4882a593Smuzhiyun 				  netdev->name, netdev);
2179*4882a593Smuzhiyun 		if (!err)
2180*4882a593Smuzhiyun 			return err;
2181*4882a593Smuzhiyun 
2182*4882a593Smuzhiyun 		/* fall back to legacy interrupt */
2183*4882a593Smuzhiyun 		e1000e_reset_interrupt_capability(adapter);
2184*4882a593Smuzhiyun 		adapter->int_mode = E1000E_INT_MODE_LEGACY;
2185*4882a593Smuzhiyun 	}
2186*4882a593Smuzhiyun 
2187*4882a593Smuzhiyun 	err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
2188*4882a593Smuzhiyun 			  netdev->name, netdev);
2189*4882a593Smuzhiyun 	if (err)
2190*4882a593Smuzhiyun 		e_err("Unable to allocate interrupt, Error: %d\n", err);
2191*4882a593Smuzhiyun 
2192*4882a593Smuzhiyun 	return err;
2193*4882a593Smuzhiyun }
2194*4882a593Smuzhiyun 
e1000_free_irq(struct e1000_adapter * adapter)2195*4882a593Smuzhiyun static void e1000_free_irq(struct e1000_adapter *adapter)
2196*4882a593Smuzhiyun {
2197*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
2198*4882a593Smuzhiyun 
2199*4882a593Smuzhiyun 	if (adapter->msix_entries) {
2200*4882a593Smuzhiyun 		int vector = 0;
2201*4882a593Smuzhiyun 
2202*4882a593Smuzhiyun 		free_irq(adapter->msix_entries[vector].vector, netdev);
2203*4882a593Smuzhiyun 		vector++;
2204*4882a593Smuzhiyun 
2205*4882a593Smuzhiyun 		free_irq(adapter->msix_entries[vector].vector, netdev);
2206*4882a593Smuzhiyun 		vector++;
2207*4882a593Smuzhiyun 
2208*4882a593Smuzhiyun 		/* Other Causes interrupt vector */
2209*4882a593Smuzhiyun 		free_irq(adapter->msix_entries[vector].vector, netdev);
2210*4882a593Smuzhiyun 		return;
2211*4882a593Smuzhiyun 	}
2212*4882a593Smuzhiyun 
2213*4882a593Smuzhiyun 	free_irq(adapter->pdev->irq, netdev);
2214*4882a593Smuzhiyun }
2215*4882a593Smuzhiyun 
2216*4882a593Smuzhiyun /**
2217*4882a593Smuzhiyun  * e1000_irq_disable - Mask off interrupt generation on the NIC
2218*4882a593Smuzhiyun  * @adapter: board private structure
2219*4882a593Smuzhiyun  **/
e1000_irq_disable(struct e1000_adapter * adapter)2220*4882a593Smuzhiyun static void e1000_irq_disable(struct e1000_adapter *adapter)
2221*4882a593Smuzhiyun {
2222*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
2223*4882a593Smuzhiyun 
2224*4882a593Smuzhiyun 	ew32(IMC, ~0);
2225*4882a593Smuzhiyun 	if (adapter->msix_entries)
2226*4882a593Smuzhiyun 		ew32(EIAC_82574, 0);
2227*4882a593Smuzhiyun 	e1e_flush();
2228*4882a593Smuzhiyun 
2229*4882a593Smuzhiyun 	if (adapter->msix_entries) {
2230*4882a593Smuzhiyun 		int i;
2231*4882a593Smuzhiyun 
2232*4882a593Smuzhiyun 		for (i = 0; i < adapter->num_vectors; i++)
2233*4882a593Smuzhiyun 			synchronize_irq(adapter->msix_entries[i].vector);
2234*4882a593Smuzhiyun 	} else {
2235*4882a593Smuzhiyun 		synchronize_irq(adapter->pdev->irq);
2236*4882a593Smuzhiyun 	}
2237*4882a593Smuzhiyun }
2238*4882a593Smuzhiyun 
2239*4882a593Smuzhiyun /**
2240*4882a593Smuzhiyun  * e1000_irq_enable - Enable default interrupt generation settings
2241*4882a593Smuzhiyun  * @adapter: board private structure
2242*4882a593Smuzhiyun  **/
e1000_irq_enable(struct e1000_adapter * adapter)2243*4882a593Smuzhiyun static void e1000_irq_enable(struct e1000_adapter *adapter)
2244*4882a593Smuzhiyun {
2245*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
2246*4882a593Smuzhiyun 
2247*4882a593Smuzhiyun 	if (adapter->msix_entries) {
2248*4882a593Smuzhiyun 		ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
2249*4882a593Smuzhiyun 		ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER |
2250*4882a593Smuzhiyun 		     IMS_OTHER_MASK);
2251*4882a593Smuzhiyun 	} else if (hw->mac.type >= e1000_pch_lpt) {
2252*4882a593Smuzhiyun 		ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
2253*4882a593Smuzhiyun 	} else {
2254*4882a593Smuzhiyun 		ew32(IMS, IMS_ENABLE_MASK);
2255*4882a593Smuzhiyun 	}
2256*4882a593Smuzhiyun 	e1e_flush();
2257*4882a593Smuzhiyun }
2258*4882a593Smuzhiyun 
2259*4882a593Smuzhiyun /**
2260*4882a593Smuzhiyun  * e1000e_get_hw_control - get control of the h/w from f/w
2261*4882a593Smuzhiyun  * @adapter: address of board private structure
2262*4882a593Smuzhiyun  *
2263*4882a593Smuzhiyun  * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2264*4882a593Smuzhiyun  * For ASF and Pass Through versions of f/w this means that
2265*4882a593Smuzhiyun  * the driver is loaded. For AMT version (only with 82573)
2266*4882a593Smuzhiyun  * of the f/w this means that the network i/f is open.
2267*4882a593Smuzhiyun  **/
e1000e_get_hw_control(struct e1000_adapter * adapter)2268*4882a593Smuzhiyun void e1000e_get_hw_control(struct e1000_adapter *adapter)
2269*4882a593Smuzhiyun {
2270*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
2271*4882a593Smuzhiyun 	u32 ctrl_ext;
2272*4882a593Smuzhiyun 	u32 swsm;
2273*4882a593Smuzhiyun 
2274*4882a593Smuzhiyun 	/* Let firmware know the driver has taken over */
2275*4882a593Smuzhiyun 	if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2276*4882a593Smuzhiyun 		swsm = er32(SWSM);
2277*4882a593Smuzhiyun 		ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
2278*4882a593Smuzhiyun 	} else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2279*4882a593Smuzhiyun 		ctrl_ext = er32(CTRL_EXT);
2280*4882a593Smuzhiyun 		ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2281*4882a593Smuzhiyun 	}
2282*4882a593Smuzhiyun }
2283*4882a593Smuzhiyun 
2284*4882a593Smuzhiyun /**
2285*4882a593Smuzhiyun  * e1000e_release_hw_control - release control of the h/w to f/w
2286*4882a593Smuzhiyun  * @adapter: address of board private structure
2287*4882a593Smuzhiyun  *
2288*4882a593Smuzhiyun  * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2289*4882a593Smuzhiyun  * For ASF and Pass Through versions of f/w this means that the
2290*4882a593Smuzhiyun  * driver is no longer loaded. For AMT version (only with 82573) i
2291*4882a593Smuzhiyun  * of the f/w this means that the network i/f is closed.
2292*4882a593Smuzhiyun  *
2293*4882a593Smuzhiyun  **/
e1000e_release_hw_control(struct e1000_adapter * adapter)2294*4882a593Smuzhiyun void e1000e_release_hw_control(struct e1000_adapter *adapter)
2295*4882a593Smuzhiyun {
2296*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
2297*4882a593Smuzhiyun 	u32 ctrl_ext;
2298*4882a593Smuzhiyun 	u32 swsm;
2299*4882a593Smuzhiyun 
2300*4882a593Smuzhiyun 	/* Let firmware taken over control of h/w */
2301*4882a593Smuzhiyun 	if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2302*4882a593Smuzhiyun 		swsm = er32(SWSM);
2303*4882a593Smuzhiyun 		ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
2304*4882a593Smuzhiyun 	} else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2305*4882a593Smuzhiyun 		ctrl_ext = er32(CTRL_EXT);
2306*4882a593Smuzhiyun 		ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2307*4882a593Smuzhiyun 	}
2308*4882a593Smuzhiyun }
2309*4882a593Smuzhiyun 
2310*4882a593Smuzhiyun /**
2311*4882a593Smuzhiyun  * e1000_alloc_ring_dma - allocate memory for a ring structure
2312*4882a593Smuzhiyun  * @adapter: board private structure
2313*4882a593Smuzhiyun  * @ring: ring struct for which to allocate dma
2314*4882a593Smuzhiyun  **/
e1000_alloc_ring_dma(struct e1000_adapter * adapter,struct e1000_ring * ring)2315*4882a593Smuzhiyun static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2316*4882a593Smuzhiyun 				struct e1000_ring *ring)
2317*4882a593Smuzhiyun {
2318*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
2319*4882a593Smuzhiyun 
2320*4882a593Smuzhiyun 	ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
2321*4882a593Smuzhiyun 					GFP_KERNEL);
2322*4882a593Smuzhiyun 	if (!ring->desc)
2323*4882a593Smuzhiyun 		return -ENOMEM;
2324*4882a593Smuzhiyun 
2325*4882a593Smuzhiyun 	return 0;
2326*4882a593Smuzhiyun }
2327*4882a593Smuzhiyun 
2328*4882a593Smuzhiyun /**
2329*4882a593Smuzhiyun  * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
2330*4882a593Smuzhiyun  * @tx_ring: Tx descriptor ring
2331*4882a593Smuzhiyun  *
2332*4882a593Smuzhiyun  * Return 0 on success, negative on failure
2333*4882a593Smuzhiyun  **/
e1000e_setup_tx_resources(struct e1000_ring * tx_ring)2334*4882a593Smuzhiyun int e1000e_setup_tx_resources(struct e1000_ring *tx_ring)
2335*4882a593Smuzhiyun {
2336*4882a593Smuzhiyun 	struct e1000_adapter *adapter = tx_ring->adapter;
2337*4882a593Smuzhiyun 	int err = -ENOMEM, size;
2338*4882a593Smuzhiyun 
2339*4882a593Smuzhiyun 	size = sizeof(struct e1000_buffer) * tx_ring->count;
2340*4882a593Smuzhiyun 	tx_ring->buffer_info = vzalloc(size);
2341*4882a593Smuzhiyun 	if (!tx_ring->buffer_info)
2342*4882a593Smuzhiyun 		goto err;
2343*4882a593Smuzhiyun 
2344*4882a593Smuzhiyun 	/* round up to nearest 4K */
2345*4882a593Smuzhiyun 	tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
2346*4882a593Smuzhiyun 	tx_ring->size = ALIGN(tx_ring->size, 4096);
2347*4882a593Smuzhiyun 
2348*4882a593Smuzhiyun 	err = e1000_alloc_ring_dma(adapter, tx_ring);
2349*4882a593Smuzhiyun 	if (err)
2350*4882a593Smuzhiyun 		goto err;
2351*4882a593Smuzhiyun 
2352*4882a593Smuzhiyun 	tx_ring->next_to_use = 0;
2353*4882a593Smuzhiyun 	tx_ring->next_to_clean = 0;
2354*4882a593Smuzhiyun 
2355*4882a593Smuzhiyun 	return 0;
2356*4882a593Smuzhiyun err:
2357*4882a593Smuzhiyun 	vfree(tx_ring->buffer_info);
2358*4882a593Smuzhiyun 	e_err("Unable to allocate memory for the transmit descriptor ring\n");
2359*4882a593Smuzhiyun 	return err;
2360*4882a593Smuzhiyun }
2361*4882a593Smuzhiyun 
2362*4882a593Smuzhiyun /**
2363*4882a593Smuzhiyun  * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
2364*4882a593Smuzhiyun  * @rx_ring: Rx descriptor ring
2365*4882a593Smuzhiyun  *
2366*4882a593Smuzhiyun  * Returns 0 on success, negative on failure
2367*4882a593Smuzhiyun  **/
e1000e_setup_rx_resources(struct e1000_ring * rx_ring)2368*4882a593Smuzhiyun int e1000e_setup_rx_resources(struct e1000_ring *rx_ring)
2369*4882a593Smuzhiyun {
2370*4882a593Smuzhiyun 	struct e1000_adapter *adapter = rx_ring->adapter;
2371*4882a593Smuzhiyun 	struct e1000_buffer *buffer_info;
2372*4882a593Smuzhiyun 	int i, size, desc_len, err = -ENOMEM;
2373*4882a593Smuzhiyun 
2374*4882a593Smuzhiyun 	size = sizeof(struct e1000_buffer) * rx_ring->count;
2375*4882a593Smuzhiyun 	rx_ring->buffer_info = vzalloc(size);
2376*4882a593Smuzhiyun 	if (!rx_ring->buffer_info)
2377*4882a593Smuzhiyun 		goto err;
2378*4882a593Smuzhiyun 
2379*4882a593Smuzhiyun 	for (i = 0; i < rx_ring->count; i++) {
2380*4882a593Smuzhiyun 		buffer_info = &rx_ring->buffer_info[i];
2381*4882a593Smuzhiyun 		buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
2382*4882a593Smuzhiyun 						sizeof(struct e1000_ps_page),
2383*4882a593Smuzhiyun 						GFP_KERNEL);
2384*4882a593Smuzhiyun 		if (!buffer_info->ps_pages)
2385*4882a593Smuzhiyun 			goto err_pages;
2386*4882a593Smuzhiyun 	}
2387*4882a593Smuzhiyun 
2388*4882a593Smuzhiyun 	desc_len = sizeof(union e1000_rx_desc_packet_split);
2389*4882a593Smuzhiyun 
2390*4882a593Smuzhiyun 	/* Round up to nearest 4K */
2391*4882a593Smuzhiyun 	rx_ring->size = rx_ring->count * desc_len;
2392*4882a593Smuzhiyun 	rx_ring->size = ALIGN(rx_ring->size, 4096);
2393*4882a593Smuzhiyun 
2394*4882a593Smuzhiyun 	err = e1000_alloc_ring_dma(adapter, rx_ring);
2395*4882a593Smuzhiyun 	if (err)
2396*4882a593Smuzhiyun 		goto err_pages;
2397*4882a593Smuzhiyun 
2398*4882a593Smuzhiyun 	rx_ring->next_to_clean = 0;
2399*4882a593Smuzhiyun 	rx_ring->next_to_use = 0;
2400*4882a593Smuzhiyun 	rx_ring->rx_skb_top = NULL;
2401*4882a593Smuzhiyun 
2402*4882a593Smuzhiyun 	return 0;
2403*4882a593Smuzhiyun 
2404*4882a593Smuzhiyun err_pages:
2405*4882a593Smuzhiyun 	for (i = 0; i < rx_ring->count; i++) {
2406*4882a593Smuzhiyun 		buffer_info = &rx_ring->buffer_info[i];
2407*4882a593Smuzhiyun 		kfree(buffer_info->ps_pages);
2408*4882a593Smuzhiyun 	}
2409*4882a593Smuzhiyun err:
2410*4882a593Smuzhiyun 	vfree(rx_ring->buffer_info);
2411*4882a593Smuzhiyun 	e_err("Unable to allocate memory for the receive descriptor ring\n");
2412*4882a593Smuzhiyun 	return err;
2413*4882a593Smuzhiyun }
2414*4882a593Smuzhiyun 
2415*4882a593Smuzhiyun /**
2416*4882a593Smuzhiyun  * e1000_clean_tx_ring - Free Tx Buffers
2417*4882a593Smuzhiyun  * @tx_ring: Tx descriptor ring
2418*4882a593Smuzhiyun  **/
e1000_clean_tx_ring(struct e1000_ring * tx_ring)2419*4882a593Smuzhiyun static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
2420*4882a593Smuzhiyun {
2421*4882a593Smuzhiyun 	struct e1000_adapter *adapter = tx_ring->adapter;
2422*4882a593Smuzhiyun 	struct e1000_buffer *buffer_info;
2423*4882a593Smuzhiyun 	unsigned long size;
2424*4882a593Smuzhiyun 	unsigned int i;
2425*4882a593Smuzhiyun 
2426*4882a593Smuzhiyun 	for (i = 0; i < tx_ring->count; i++) {
2427*4882a593Smuzhiyun 		buffer_info = &tx_ring->buffer_info[i];
2428*4882a593Smuzhiyun 		e1000_put_txbuf(tx_ring, buffer_info, false);
2429*4882a593Smuzhiyun 	}
2430*4882a593Smuzhiyun 
2431*4882a593Smuzhiyun 	netdev_reset_queue(adapter->netdev);
2432*4882a593Smuzhiyun 	size = sizeof(struct e1000_buffer) * tx_ring->count;
2433*4882a593Smuzhiyun 	memset(tx_ring->buffer_info, 0, size);
2434*4882a593Smuzhiyun 
2435*4882a593Smuzhiyun 	memset(tx_ring->desc, 0, tx_ring->size);
2436*4882a593Smuzhiyun 
2437*4882a593Smuzhiyun 	tx_ring->next_to_use = 0;
2438*4882a593Smuzhiyun 	tx_ring->next_to_clean = 0;
2439*4882a593Smuzhiyun }
2440*4882a593Smuzhiyun 
2441*4882a593Smuzhiyun /**
2442*4882a593Smuzhiyun  * e1000e_free_tx_resources - Free Tx Resources per Queue
2443*4882a593Smuzhiyun  * @tx_ring: Tx descriptor ring
2444*4882a593Smuzhiyun  *
2445*4882a593Smuzhiyun  * Free all transmit software resources
2446*4882a593Smuzhiyun  **/
e1000e_free_tx_resources(struct e1000_ring * tx_ring)2447*4882a593Smuzhiyun void e1000e_free_tx_resources(struct e1000_ring *tx_ring)
2448*4882a593Smuzhiyun {
2449*4882a593Smuzhiyun 	struct e1000_adapter *adapter = tx_ring->adapter;
2450*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
2451*4882a593Smuzhiyun 
2452*4882a593Smuzhiyun 	e1000_clean_tx_ring(tx_ring);
2453*4882a593Smuzhiyun 
2454*4882a593Smuzhiyun 	vfree(tx_ring->buffer_info);
2455*4882a593Smuzhiyun 	tx_ring->buffer_info = NULL;
2456*4882a593Smuzhiyun 
2457*4882a593Smuzhiyun 	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2458*4882a593Smuzhiyun 			  tx_ring->dma);
2459*4882a593Smuzhiyun 	tx_ring->desc = NULL;
2460*4882a593Smuzhiyun }
2461*4882a593Smuzhiyun 
2462*4882a593Smuzhiyun /**
2463*4882a593Smuzhiyun  * e1000e_free_rx_resources - Free Rx Resources
2464*4882a593Smuzhiyun  * @rx_ring: Rx descriptor ring
2465*4882a593Smuzhiyun  *
2466*4882a593Smuzhiyun  * Free all receive software resources
2467*4882a593Smuzhiyun  **/
e1000e_free_rx_resources(struct e1000_ring * rx_ring)2468*4882a593Smuzhiyun void e1000e_free_rx_resources(struct e1000_ring *rx_ring)
2469*4882a593Smuzhiyun {
2470*4882a593Smuzhiyun 	struct e1000_adapter *adapter = rx_ring->adapter;
2471*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
2472*4882a593Smuzhiyun 	int i;
2473*4882a593Smuzhiyun 
2474*4882a593Smuzhiyun 	e1000_clean_rx_ring(rx_ring);
2475*4882a593Smuzhiyun 
2476*4882a593Smuzhiyun 	for (i = 0; i < rx_ring->count; i++)
2477*4882a593Smuzhiyun 		kfree(rx_ring->buffer_info[i].ps_pages);
2478*4882a593Smuzhiyun 
2479*4882a593Smuzhiyun 	vfree(rx_ring->buffer_info);
2480*4882a593Smuzhiyun 	rx_ring->buffer_info = NULL;
2481*4882a593Smuzhiyun 
2482*4882a593Smuzhiyun 	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2483*4882a593Smuzhiyun 			  rx_ring->dma);
2484*4882a593Smuzhiyun 	rx_ring->desc = NULL;
2485*4882a593Smuzhiyun }
2486*4882a593Smuzhiyun 
2487*4882a593Smuzhiyun /**
2488*4882a593Smuzhiyun  * e1000_update_itr - update the dynamic ITR value based on statistics
2489*4882a593Smuzhiyun  * @itr_setting: current adapter->itr
2490*4882a593Smuzhiyun  * @packets: the number of packets during this measurement interval
2491*4882a593Smuzhiyun  * @bytes: the number of bytes during this measurement interval
2492*4882a593Smuzhiyun  *
2493*4882a593Smuzhiyun  *      Stores a new ITR value based on packets and byte
2494*4882a593Smuzhiyun  *      counts during the last interrupt.  The advantage of per interrupt
2495*4882a593Smuzhiyun  *      computation is faster updates and more accurate ITR for the current
2496*4882a593Smuzhiyun  *      traffic pattern.  Constants in this function were computed
2497*4882a593Smuzhiyun  *      based on theoretical maximum wire speed and thresholds were set based
2498*4882a593Smuzhiyun  *      on testing data as well as attempting to minimize response time
2499*4882a593Smuzhiyun  *      while increasing bulk throughput.  This functionality is controlled
2500*4882a593Smuzhiyun  *      by the InterruptThrottleRate module parameter.
2501*4882a593Smuzhiyun  **/
e1000_update_itr(u16 itr_setting,int packets,int bytes)2502*4882a593Smuzhiyun static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
2503*4882a593Smuzhiyun {
2504*4882a593Smuzhiyun 	unsigned int retval = itr_setting;
2505*4882a593Smuzhiyun 
2506*4882a593Smuzhiyun 	if (packets == 0)
2507*4882a593Smuzhiyun 		return itr_setting;
2508*4882a593Smuzhiyun 
2509*4882a593Smuzhiyun 	switch (itr_setting) {
2510*4882a593Smuzhiyun 	case lowest_latency:
2511*4882a593Smuzhiyun 		/* handle TSO and jumbo frames */
2512*4882a593Smuzhiyun 		if (bytes / packets > 8000)
2513*4882a593Smuzhiyun 			retval = bulk_latency;
2514*4882a593Smuzhiyun 		else if ((packets < 5) && (bytes > 512))
2515*4882a593Smuzhiyun 			retval = low_latency;
2516*4882a593Smuzhiyun 		break;
2517*4882a593Smuzhiyun 	case low_latency:	/* 50 usec aka 20000 ints/s */
2518*4882a593Smuzhiyun 		if (bytes > 10000) {
2519*4882a593Smuzhiyun 			/* this if handles the TSO accounting */
2520*4882a593Smuzhiyun 			if (bytes / packets > 8000)
2521*4882a593Smuzhiyun 				retval = bulk_latency;
2522*4882a593Smuzhiyun 			else if ((packets < 10) || ((bytes / packets) > 1200))
2523*4882a593Smuzhiyun 				retval = bulk_latency;
2524*4882a593Smuzhiyun 			else if ((packets > 35))
2525*4882a593Smuzhiyun 				retval = lowest_latency;
2526*4882a593Smuzhiyun 		} else if (bytes / packets > 2000) {
2527*4882a593Smuzhiyun 			retval = bulk_latency;
2528*4882a593Smuzhiyun 		} else if (packets <= 2 && bytes < 512) {
2529*4882a593Smuzhiyun 			retval = lowest_latency;
2530*4882a593Smuzhiyun 		}
2531*4882a593Smuzhiyun 		break;
2532*4882a593Smuzhiyun 	case bulk_latency:	/* 250 usec aka 4000 ints/s */
2533*4882a593Smuzhiyun 		if (bytes > 25000) {
2534*4882a593Smuzhiyun 			if (packets > 35)
2535*4882a593Smuzhiyun 				retval = low_latency;
2536*4882a593Smuzhiyun 		} else if (bytes < 6000) {
2537*4882a593Smuzhiyun 			retval = low_latency;
2538*4882a593Smuzhiyun 		}
2539*4882a593Smuzhiyun 		break;
2540*4882a593Smuzhiyun 	}
2541*4882a593Smuzhiyun 
2542*4882a593Smuzhiyun 	return retval;
2543*4882a593Smuzhiyun }
2544*4882a593Smuzhiyun 
e1000_set_itr(struct e1000_adapter * adapter)2545*4882a593Smuzhiyun static void e1000_set_itr(struct e1000_adapter *adapter)
2546*4882a593Smuzhiyun {
2547*4882a593Smuzhiyun 	u16 current_itr;
2548*4882a593Smuzhiyun 	u32 new_itr = adapter->itr;
2549*4882a593Smuzhiyun 
2550*4882a593Smuzhiyun 	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2551*4882a593Smuzhiyun 	if (adapter->link_speed != SPEED_1000) {
2552*4882a593Smuzhiyun 		current_itr = 0;
2553*4882a593Smuzhiyun 		new_itr = 4000;
2554*4882a593Smuzhiyun 		goto set_itr_now;
2555*4882a593Smuzhiyun 	}
2556*4882a593Smuzhiyun 
2557*4882a593Smuzhiyun 	if (adapter->flags2 & FLAG2_DISABLE_AIM) {
2558*4882a593Smuzhiyun 		new_itr = 0;
2559*4882a593Smuzhiyun 		goto set_itr_now;
2560*4882a593Smuzhiyun 	}
2561*4882a593Smuzhiyun 
2562*4882a593Smuzhiyun 	adapter->tx_itr = e1000_update_itr(adapter->tx_itr,
2563*4882a593Smuzhiyun 					   adapter->total_tx_packets,
2564*4882a593Smuzhiyun 					   adapter->total_tx_bytes);
2565*4882a593Smuzhiyun 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
2566*4882a593Smuzhiyun 	if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2567*4882a593Smuzhiyun 		adapter->tx_itr = low_latency;
2568*4882a593Smuzhiyun 
2569*4882a593Smuzhiyun 	adapter->rx_itr = e1000_update_itr(adapter->rx_itr,
2570*4882a593Smuzhiyun 					   adapter->total_rx_packets,
2571*4882a593Smuzhiyun 					   adapter->total_rx_bytes);
2572*4882a593Smuzhiyun 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
2573*4882a593Smuzhiyun 	if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2574*4882a593Smuzhiyun 		adapter->rx_itr = low_latency;
2575*4882a593Smuzhiyun 
2576*4882a593Smuzhiyun 	current_itr = max(adapter->rx_itr, adapter->tx_itr);
2577*4882a593Smuzhiyun 
2578*4882a593Smuzhiyun 	/* counts and packets in update_itr are dependent on these numbers */
2579*4882a593Smuzhiyun 	switch (current_itr) {
2580*4882a593Smuzhiyun 	case lowest_latency:
2581*4882a593Smuzhiyun 		new_itr = 70000;
2582*4882a593Smuzhiyun 		break;
2583*4882a593Smuzhiyun 	case low_latency:
2584*4882a593Smuzhiyun 		new_itr = 20000;	/* aka hwitr = ~200 */
2585*4882a593Smuzhiyun 		break;
2586*4882a593Smuzhiyun 	case bulk_latency:
2587*4882a593Smuzhiyun 		new_itr = 4000;
2588*4882a593Smuzhiyun 		break;
2589*4882a593Smuzhiyun 	default:
2590*4882a593Smuzhiyun 		break;
2591*4882a593Smuzhiyun 	}
2592*4882a593Smuzhiyun 
2593*4882a593Smuzhiyun set_itr_now:
2594*4882a593Smuzhiyun 	if (new_itr != adapter->itr) {
2595*4882a593Smuzhiyun 		/* this attempts to bias the interrupt rate towards Bulk
2596*4882a593Smuzhiyun 		 * by adding intermediate steps when interrupt rate is
2597*4882a593Smuzhiyun 		 * increasing
2598*4882a593Smuzhiyun 		 */
2599*4882a593Smuzhiyun 		new_itr = new_itr > adapter->itr ?
2600*4882a593Smuzhiyun 		    min(adapter->itr + (new_itr >> 2), new_itr) : new_itr;
2601*4882a593Smuzhiyun 		adapter->itr = new_itr;
2602*4882a593Smuzhiyun 		adapter->rx_ring->itr_val = new_itr;
2603*4882a593Smuzhiyun 		if (adapter->msix_entries)
2604*4882a593Smuzhiyun 			adapter->rx_ring->set_itr = 1;
2605*4882a593Smuzhiyun 		else
2606*4882a593Smuzhiyun 			e1000e_write_itr(adapter, new_itr);
2607*4882a593Smuzhiyun 	}
2608*4882a593Smuzhiyun }
2609*4882a593Smuzhiyun 
2610*4882a593Smuzhiyun /**
2611*4882a593Smuzhiyun  * e1000e_write_itr - write the ITR value to the appropriate registers
2612*4882a593Smuzhiyun  * @adapter: address of board private structure
2613*4882a593Smuzhiyun  * @itr: new ITR value to program
2614*4882a593Smuzhiyun  *
2615*4882a593Smuzhiyun  * e1000e_write_itr determines if the adapter is in MSI-X mode
2616*4882a593Smuzhiyun  * and, if so, writes the EITR registers with the ITR value.
2617*4882a593Smuzhiyun  * Otherwise, it writes the ITR value into the ITR register.
2618*4882a593Smuzhiyun  **/
e1000e_write_itr(struct e1000_adapter * adapter,u32 itr)2619*4882a593Smuzhiyun void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr)
2620*4882a593Smuzhiyun {
2621*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
2622*4882a593Smuzhiyun 	u32 new_itr = itr ? 1000000000 / (itr * 256) : 0;
2623*4882a593Smuzhiyun 
2624*4882a593Smuzhiyun 	if (adapter->msix_entries) {
2625*4882a593Smuzhiyun 		int vector;
2626*4882a593Smuzhiyun 
2627*4882a593Smuzhiyun 		for (vector = 0; vector < adapter->num_vectors; vector++)
2628*4882a593Smuzhiyun 			writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector));
2629*4882a593Smuzhiyun 	} else {
2630*4882a593Smuzhiyun 		ew32(ITR, new_itr);
2631*4882a593Smuzhiyun 	}
2632*4882a593Smuzhiyun }
2633*4882a593Smuzhiyun 
2634*4882a593Smuzhiyun /**
2635*4882a593Smuzhiyun  * e1000_alloc_queues - Allocate memory for all rings
2636*4882a593Smuzhiyun  * @adapter: board private structure to initialize
2637*4882a593Smuzhiyun  **/
e1000_alloc_queues(struct e1000_adapter * adapter)2638*4882a593Smuzhiyun static int e1000_alloc_queues(struct e1000_adapter *adapter)
2639*4882a593Smuzhiyun {
2640*4882a593Smuzhiyun 	int size = sizeof(struct e1000_ring);
2641*4882a593Smuzhiyun 
2642*4882a593Smuzhiyun 	adapter->tx_ring = kzalloc(size, GFP_KERNEL);
2643*4882a593Smuzhiyun 	if (!adapter->tx_ring)
2644*4882a593Smuzhiyun 		goto err;
2645*4882a593Smuzhiyun 	adapter->tx_ring->count = adapter->tx_ring_count;
2646*4882a593Smuzhiyun 	adapter->tx_ring->adapter = adapter;
2647*4882a593Smuzhiyun 
2648*4882a593Smuzhiyun 	adapter->rx_ring = kzalloc(size, GFP_KERNEL);
2649*4882a593Smuzhiyun 	if (!adapter->rx_ring)
2650*4882a593Smuzhiyun 		goto err;
2651*4882a593Smuzhiyun 	adapter->rx_ring->count = adapter->rx_ring_count;
2652*4882a593Smuzhiyun 	adapter->rx_ring->adapter = adapter;
2653*4882a593Smuzhiyun 
2654*4882a593Smuzhiyun 	return 0;
2655*4882a593Smuzhiyun err:
2656*4882a593Smuzhiyun 	e_err("Unable to allocate memory for queues\n");
2657*4882a593Smuzhiyun 	kfree(adapter->rx_ring);
2658*4882a593Smuzhiyun 	kfree(adapter->tx_ring);
2659*4882a593Smuzhiyun 	return -ENOMEM;
2660*4882a593Smuzhiyun }
2661*4882a593Smuzhiyun 
2662*4882a593Smuzhiyun /**
2663*4882a593Smuzhiyun  * e1000e_poll - NAPI Rx polling callback
2664*4882a593Smuzhiyun  * @napi: struct associated with this polling callback
2665*4882a593Smuzhiyun  * @budget: number of packets driver is allowed to process this poll
2666*4882a593Smuzhiyun  **/
e1000e_poll(struct napi_struct * napi,int budget)2667*4882a593Smuzhiyun static int e1000e_poll(struct napi_struct *napi, int budget)
2668*4882a593Smuzhiyun {
2669*4882a593Smuzhiyun 	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
2670*4882a593Smuzhiyun 						     napi);
2671*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
2672*4882a593Smuzhiyun 	struct net_device *poll_dev = adapter->netdev;
2673*4882a593Smuzhiyun 	int tx_cleaned = 1, work_done = 0;
2674*4882a593Smuzhiyun 
2675*4882a593Smuzhiyun 	adapter = netdev_priv(poll_dev);
2676*4882a593Smuzhiyun 
2677*4882a593Smuzhiyun 	if (!adapter->msix_entries ||
2678*4882a593Smuzhiyun 	    (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2679*4882a593Smuzhiyun 		tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
2680*4882a593Smuzhiyun 
2681*4882a593Smuzhiyun 	adapter->clean_rx(adapter->rx_ring, &work_done, budget);
2682*4882a593Smuzhiyun 
2683*4882a593Smuzhiyun 	if (!tx_cleaned || work_done == budget)
2684*4882a593Smuzhiyun 		return budget;
2685*4882a593Smuzhiyun 
2686*4882a593Smuzhiyun 	/* Exit the polling mode, but don't re-enable interrupts if stack might
2687*4882a593Smuzhiyun 	 * poll us due to busy-polling
2688*4882a593Smuzhiyun 	 */
2689*4882a593Smuzhiyun 	if (likely(napi_complete_done(napi, work_done))) {
2690*4882a593Smuzhiyun 		if (adapter->itr_setting & 3)
2691*4882a593Smuzhiyun 			e1000_set_itr(adapter);
2692*4882a593Smuzhiyun 		if (!test_bit(__E1000_DOWN, &adapter->state)) {
2693*4882a593Smuzhiyun 			if (adapter->msix_entries)
2694*4882a593Smuzhiyun 				ew32(IMS, adapter->rx_ring->ims_val);
2695*4882a593Smuzhiyun 			else
2696*4882a593Smuzhiyun 				e1000_irq_enable(adapter);
2697*4882a593Smuzhiyun 		}
2698*4882a593Smuzhiyun 	}
2699*4882a593Smuzhiyun 
2700*4882a593Smuzhiyun 	return work_done;
2701*4882a593Smuzhiyun }
2702*4882a593Smuzhiyun 
e1000_vlan_rx_add_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)2703*4882a593Smuzhiyun static int e1000_vlan_rx_add_vid(struct net_device *netdev,
2704*4882a593Smuzhiyun 				 __always_unused __be16 proto, u16 vid)
2705*4882a593Smuzhiyun {
2706*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
2707*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
2708*4882a593Smuzhiyun 	u32 vfta, index;
2709*4882a593Smuzhiyun 
2710*4882a593Smuzhiyun 	/* don't update vlan cookie if already programmed */
2711*4882a593Smuzhiyun 	if ((adapter->hw.mng_cookie.status &
2712*4882a593Smuzhiyun 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2713*4882a593Smuzhiyun 	    (vid == adapter->mng_vlan_id))
2714*4882a593Smuzhiyun 		return 0;
2715*4882a593Smuzhiyun 
2716*4882a593Smuzhiyun 	/* add VID to filter table */
2717*4882a593Smuzhiyun 	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2718*4882a593Smuzhiyun 		index = (vid >> 5) & 0x7F;
2719*4882a593Smuzhiyun 		vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2720*4882a593Smuzhiyun 		vfta |= BIT((vid & 0x1F));
2721*4882a593Smuzhiyun 		hw->mac.ops.write_vfta(hw, index, vfta);
2722*4882a593Smuzhiyun 	}
2723*4882a593Smuzhiyun 
2724*4882a593Smuzhiyun 	set_bit(vid, adapter->active_vlans);
2725*4882a593Smuzhiyun 
2726*4882a593Smuzhiyun 	return 0;
2727*4882a593Smuzhiyun }
2728*4882a593Smuzhiyun 
e1000_vlan_rx_kill_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)2729*4882a593Smuzhiyun static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
2730*4882a593Smuzhiyun 				  __always_unused __be16 proto, u16 vid)
2731*4882a593Smuzhiyun {
2732*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
2733*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
2734*4882a593Smuzhiyun 	u32 vfta, index;
2735*4882a593Smuzhiyun 
2736*4882a593Smuzhiyun 	if ((adapter->hw.mng_cookie.status &
2737*4882a593Smuzhiyun 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2738*4882a593Smuzhiyun 	    (vid == adapter->mng_vlan_id)) {
2739*4882a593Smuzhiyun 		/* release control to f/w */
2740*4882a593Smuzhiyun 		e1000e_release_hw_control(adapter);
2741*4882a593Smuzhiyun 		return 0;
2742*4882a593Smuzhiyun 	}
2743*4882a593Smuzhiyun 
2744*4882a593Smuzhiyun 	/* remove VID from filter table */
2745*4882a593Smuzhiyun 	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2746*4882a593Smuzhiyun 		index = (vid >> 5) & 0x7F;
2747*4882a593Smuzhiyun 		vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2748*4882a593Smuzhiyun 		vfta &= ~BIT((vid & 0x1F));
2749*4882a593Smuzhiyun 		hw->mac.ops.write_vfta(hw, index, vfta);
2750*4882a593Smuzhiyun 	}
2751*4882a593Smuzhiyun 
2752*4882a593Smuzhiyun 	clear_bit(vid, adapter->active_vlans);
2753*4882a593Smuzhiyun 
2754*4882a593Smuzhiyun 	return 0;
2755*4882a593Smuzhiyun }
2756*4882a593Smuzhiyun 
2757*4882a593Smuzhiyun /**
2758*4882a593Smuzhiyun  * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
2759*4882a593Smuzhiyun  * @adapter: board private structure to initialize
2760*4882a593Smuzhiyun  **/
e1000e_vlan_filter_disable(struct e1000_adapter * adapter)2761*4882a593Smuzhiyun static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
2762*4882a593Smuzhiyun {
2763*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
2764*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
2765*4882a593Smuzhiyun 	u32 rctl;
2766*4882a593Smuzhiyun 
2767*4882a593Smuzhiyun 	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2768*4882a593Smuzhiyun 		/* disable VLAN receive filtering */
2769*4882a593Smuzhiyun 		rctl = er32(RCTL);
2770*4882a593Smuzhiyun 		rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
2771*4882a593Smuzhiyun 		ew32(RCTL, rctl);
2772*4882a593Smuzhiyun 
2773*4882a593Smuzhiyun 		if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
2774*4882a593Smuzhiyun 			e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
2775*4882a593Smuzhiyun 					       adapter->mng_vlan_id);
2776*4882a593Smuzhiyun 			adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2777*4882a593Smuzhiyun 		}
2778*4882a593Smuzhiyun 	}
2779*4882a593Smuzhiyun }
2780*4882a593Smuzhiyun 
2781*4882a593Smuzhiyun /**
2782*4882a593Smuzhiyun  * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
2783*4882a593Smuzhiyun  * @adapter: board private structure to initialize
2784*4882a593Smuzhiyun  **/
e1000e_vlan_filter_enable(struct e1000_adapter * adapter)2785*4882a593Smuzhiyun static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
2786*4882a593Smuzhiyun {
2787*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
2788*4882a593Smuzhiyun 	u32 rctl;
2789*4882a593Smuzhiyun 
2790*4882a593Smuzhiyun 	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2791*4882a593Smuzhiyun 		/* enable VLAN receive filtering */
2792*4882a593Smuzhiyun 		rctl = er32(RCTL);
2793*4882a593Smuzhiyun 		rctl |= E1000_RCTL_VFE;
2794*4882a593Smuzhiyun 		rctl &= ~E1000_RCTL_CFIEN;
2795*4882a593Smuzhiyun 		ew32(RCTL, rctl);
2796*4882a593Smuzhiyun 	}
2797*4882a593Smuzhiyun }
2798*4882a593Smuzhiyun 
2799*4882a593Smuzhiyun /**
2800*4882a593Smuzhiyun  * e1000e_vlan_strip_disable - helper to disable HW VLAN stripping
2801*4882a593Smuzhiyun  * @adapter: board private structure to initialize
2802*4882a593Smuzhiyun  **/
e1000e_vlan_strip_disable(struct e1000_adapter * adapter)2803*4882a593Smuzhiyun static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
2804*4882a593Smuzhiyun {
2805*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
2806*4882a593Smuzhiyun 	u32 ctrl;
2807*4882a593Smuzhiyun 
2808*4882a593Smuzhiyun 	/* disable VLAN tag insert/strip */
2809*4882a593Smuzhiyun 	ctrl = er32(CTRL);
2810*4882a593Smuzhiyun 	ctrl &= ~E1000_CTRL_VME;
2811*4882a593Smuzhiyun 	ew32(CTRL, ctrl);
2812*4882a593Smuzhiyun }
2813*4882a593Smuzhiyun 
2814*4882a593Smuzhiyun /**
2815*4882a593Smuzhiyun  * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
2816*4882a593Smuzhiyun  * @adapter: board private structure to initialize
2817*4882a593Smuzhiyun  **/
e1000e_vlan_strip_enable(struct e1000_adapter * adapter)2818*4882a593Smuzhiyun static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
2819*4882a593Smuzhiyun {
2820*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
2821*4882a593Smuzhiyun 	u32 ctrl;
2822*4882a593Smuzhiyun 
2823*4882a593Smuzhiyun 	/* enable VLAN tag insert/strip */
2824*4882a593Smuzhiyun 	ctrl = er32(CTRL);
2825*4882a593Smuzhiyun 	ctrl |= E1000_CTRL_VME;
2826*4882a593Smuzhiyun 	ew32(CTRL, ctrl);
2827*4882a593Smuzhiyun }
2828*4882a593Smuzhiyun 
e1000_update_mng_vlan(struct e1000_adapter * adapter)2829*4882a593Smuzhiyun static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2830*4882a593Smuzhiyun {
2831*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
2832*4882a593Smuzhiyun 	u16 vid = adapter->hw.mng_cookie.vlan_id;
2833*4882a593Smuzhiyun 	u16 old_vid = adapter->mng_vlan_id;
2834*4882a593Smuzhiyun 
2835*4882a593Smuzhiyun 	if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
2836*4882a593Smuzhiyun 		e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
2837*4882a593Smuzhiyun 		adapter->mng_vlan_id = vid;
2838*4882a593Smuzhiyun 	}
2839*4882a593Smuzhiyun 
2840*4882a593Smuzhiyun 	if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
2841*4882a593Smuzhiyun 		e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid);
2842*4882a593Smuzhiyun }
2843*4882a593Smuzhiyun 
e1000_restore_vlan(struct e1000_adapter * adapter)2844*4882a593Smuzhiyun static void e1000_restore_vlan(struct e1000_adapter *adapter)
2845*4882a593Smuzhiyun {
2846*4882a593Smuzhiyun 	u16 vid;
2847*4882a593Smuzhiyun 
2848*4882a593Smuzhiyun 	e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
2849*4882a593Smuzhiyun 
2850*4882a593Smuzhiyun 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2851*4882a593Smuzhiyun 	    e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
2852*4882a593Smuzhiyun }
2853*4882a593Smuzhiyun 
e1000_init_manageability_pt(struct e1000_adapter * adapter)2854*4882a593Smuzhiyun static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
2855*4882a593Smuzhiyun {
2856*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
2857*4882a593Smuzhiyun 	u32 manc, manc2h, mdef, i, j;
2858*4882a593Smuzhiyun 
2859*4882a593Smuzhiyun 	if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
2860*4882a593Smuzhiyun 		return;
2861*4882a593Smuzhiyun 
2862*4882a593Smuzhiyun 	manc = er32(MANC);
2863*4882a593Smuzhiyun 
2864*4882a593Smuzhiyun 	/* enable receiving management packets to the host. this will probably
2865*4882a593Smuzhiyun 	 * generate destination unreachable messages from the host OS, but
2866*4882a593Smuzhiyun 	 * the packets will be handled on SMBUS
2867*4882a593Smuzhiyun 	 */
2868*4882a593Smuzhiyun 	manc |= E1000_MANC_EN_MNG2HOST;
2869*4882a593Smuzhiyun 	manc2h = er32(MANC2H);
2870*4882a593Smuzhiyun 
2871*4882a593Smuzhiyun 	switch (hw->mac.type) {
2872*4882a593Smuzhiyun 	default:
2873*4882a593Smuzhiyun 		manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
2874*4882a593Smuzhiyun 		break;
2875*4882a593Smuzhiyun 	case e1000_82574:
2876*4882a593Smuzhiyun 	case e1000_82583:
2877*4882a593Smuzhiyun 		/* Check if IPMI pass-through decision filter already exists;
2878*4882a593Smuzhiyun 		 * if so, enable it.
2879*4882a593Smuzhiyun 		 */
2880*4882a593Smuzhiyun 		for (i = 0, j = 0; i < 8; i++) {
2881*4882a593Smuzhiyun 			mdef = er32(MDEF(i));
2882*4882a593Smuzhiyun 
2883*4882a593Smuzhiyun 			/* Ignore filters with anything other than IPMI ports */
2884*4882a593Smuzhiyun 			if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2885*4882a593Smuzhiyun 				continue;
2886*4882a593Smuzhiyun 
2887*4882a593Smuzhiyun 			/* Enable this decision filter in MANC2H */
2888*4882a593Smuzhiyun 			if (mdef)
2889*4882a593Smuzhiyun 				manc2h |= BIT(i);
2890*4882a593Smuzhiyun 
2891*4882a593Smuzhiyun 			j |= mdef;
2892*4882a593Smuzhiyun 		}
2893*4882a593Smuzhiyun 
2894*4882a593Smuzhiyun 		if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2895*4882a593Smuzhiyun 			break;
2896*4882a593Smuzhiyun 
2897*4882a593Smuzhiyun 		/* Create new decision filter in an empty filter */
2898*4882a593Smuzhiyun 		for (i = 0, j = 0; i < 8; i++)
2899*4882a593Smuzhiyun 			if (er32(MDEF(i)) == 0) {
2900*4882a593Smuzhiyun 				ew32(MDEF(i), (E1000_MDEF_PORT_623 |
2901*4882a593Smuzhiyun 					       E1000_MDEF_PORT_664));
2902*4882a593Smuzhiyun 				manc2h |= BIT(1);
2903*4882a593Smuzhiyun 				j++;
2904*4882a593Smuzhiyun 				break;
2905*4882a593Smuzhiyun 			}
2906*4882a593Smuzhiyun 
2907*4882a593Smuzhiyun 		if (!j)
2908*4882a593Smuzhiyun 			e_warn("Unable to create IPMI pass-through filter\n");
2909*4882a593Smuzhiyun 		break;
2910*4882a593Smuzhiyun 	}
2911*4882a593Smuzhiyun 
2912*4882a593Smuzhiyun 	ew32(MANC2H, manc2h);
2913*4882a593Smuzhiyun 	ew32(MANC, manc);
2914*4882a593Smuzhiyun }
2915*4882a593Smuzhiyun 
2916*4882a593Smuzhiyun /**
2917*4882a593Smuzhiyun  * e1000_configure_tx - Configure Transmit Unit after Reset
2918*4882a593Smuzhiyun  * @adapter: board private structure
2919*4882a593Smuzhiyun  *
2920*4882a593Smuzhiyun  * Configure the Tx unit of the MAC after a reset.
2921*4882a593Smuzhiyun  **/
e1000_configure_tx(struct e1000_adapter * adapter)2922*4882a593Smuzhiyun static void e1000_configure_tx(struct e1000_adapter *adapter)
2923*4882a593Smuzhiyun {
2924*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
2925*4882a593Smuzhiyun 	struct e1000_ring *tx_ring = adapter->tx_ring;
2926*4882a593Smuzhiyun 	u64 tdba;
2927*4882a593Smuzhiyun 	u32 tdlen, tctl, tarc;
2928*4882a593Smuzhiyun 
2929*4882a593Smuzhiyun 	/* Setup the HW Tx Head and Tail descriptor pointers */
2930*4882a593Smuzhiyun 	tdba = tx_ring->dma;
2931*4882a593Smuzhiyun 	tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
2932*4882a593Smuzhiyun 	ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
2933*4882a593Smuzhiyun 	ew32(TDBAH(0), (tdba >> 32));
2934*4882a593Smuzhiyun 	ew32(TDLEN(0), tdlen);
2935*4882a593Smuzhiyun 	ew32(TDH(0), 0);
2936*4882a593Smuzhiyun 	ew32(TDT(0), 0);
2937*4882a593Smuzhiyun 	tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
2938*4882a593Smuzhiyun 	tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
2939*4882a593Smuzhiyun 
2940*4882a593Smuzhiyun 	writel(0, tx_ring->head);
2941*4882a593Smuzhiyun 	if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
2942*4882a593Smuzhiyun 		e1000e_update_tdt_wa(tx_ring, 0);
2943*4882a593Smuzhiyun 	else
2944*4882a593Smuzhiyun 		writel(0, tx_ring->tail);
2945*4882a593Smuzhiyun 
2946*4882a593Smuzhiyun 	/* Set the Tx Interrupt Delay register */
2947*4882a593Smuzhiyun 	ew32(TIDV, adapter->tx_int_delay);
2948*4882a593Smuzhiyun 	/* Tx irq moderation */
2949*4882a593Smuzhiyun 	ew32(TADV, adapter->tx_abs_int_delay);
2950*4882a593Smuzhiyun 
2951*4882a593Smuzhiyun 	if (adapter->flags2 & FLAG2_DMA_BURST) {
2952*4882a593Smuzhiyun 		u32 txdctl = er32(TXDCTL(0));
2953*4882a593Smuzhiyun 
2954*4882a593Smuzhiyun 		txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
2955*4882a593Smuzhiyun 			    E1000_TXDCTL_WTHRESH);
2956*4882a593Smuzhiyun 		/* set up some performance related parameters to encourage the
2957*4882a593Smuzhiyun 		 * hardware to use the bus more efficiently in bursts, depends
2958*4882a593Smuzhiyun 		 * on the tx_int_delay to be enabled,
2959*4882a593Smuzhiyun 		 * wthresh = 1 ==> burst write is disabled to avoid Tx stalls
2960*4882a593Smuzhiyun 		 * hthresh = 1 ==> prefetch when one or more available
2961*4882a593Smuzhiyun 		 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
2962*4882a593Smuzhiyun 		 * BEWARE: this seems to work but should be considered first if
2963*4882a593Smuzhiyun 		 * there are Tx hangs or other Tx related bugs
2964*4882a593Smuzhiyun 		 */
2965*4882a593Smuzhiyun 		txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
2966*4882a593Smuzhiyun 		ew32(TXDCTL(0), txdctl);
2967*4882a593Smuzhiyun 	}
2968*4882a593Smuzhiyun 	/* erratum work around: set txdctl the same for both queues */
2969*4882a593Smuzhiyun 	ew32(TXDCTL(1), er32(TXDCTL(0)));
2970*4882a593Smuzhiyun 
2971*4882a593Smuzhiyun 	/* Program the Transmit Control Register */
2972*4882a593Smuzhiyun 	tctl = er32(TCTL);
2973*4882a593Smuzhiyun 	tctl &= ~E1000_TCTL_CT;
2974*4882a593Smuzhiyun 	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2975*4882a593Smuzhiyun 		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2976*4882a593Smuzhiyun 
2977*4882a593Smuzhiyun 	if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
2978*4882a593Smuzhiyun 		tarc = er32(TARC(0));
2979*4882a593Smuzhiyun 		/* set the speed mode bit, we'll clear it if we're not at
2980*4882a593Smuzhiyun 		 * gigabit link later
2981*4882a593Smuzhiyun 		 */
2982*4882a593Smuzhiyun #define SPEED_MODE_BIT BIT(21)
2983*4882a593Smuzhiyun 		tarc |= SPEED_MODE_BIT;
2984*4882a593Smuzhiyun 		ew32(TARC(0), tarc);
2985*4882a593Smuzhiyun 	}
2986*4882a593Smuzhiyun 
2987*4882a593Smuzhiyun 	/* errata: program both queues to unweighted RR */
2988*4882a593Smuzhiyun 	if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
2989*4882a593Smuzhiyun 		tarc = er32(TARC(0));
2990*4882a593Smuzhiyun 		tarc |= 1;
2991*4882a593Smuzhiyun 		ew32(TARC(0), tarc);
2992*4882a593Smuzhiyun 		tarc = er32(TARC(1));
2993*4882a593Smuzhiyun 		tarc |= 1;
2994*4882a593Smuzhiyun 		ew32(TARC(1), tarc);
2995*4882a593Smuzhiyun 	}
2996*4882a593Smuzhiyun 
2997*4882a593Smuzhiyun 	/* Setup Transmit Descriptor Settings for eop descriptor */
2998*4882a593Smuzhiyun 	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
2999*4882a593Smuzhiyun 
3000*4882a593Smuzhiyun 	/* only set IDE if we are delaying interrupts using the timers */
3001*4882a593Smuzhiyun 	if (adapter->tx_int_delay)
3002*4882a593Smuzhiyun 		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3003*4882a593Smuzhiyun 
3004*4882a593Smuzhiyun 	/* enable Report Status bit */
3005*4882a593Smuzhiyun 	adapter->txd_cmd |= E1000_TXD_CMD_RS;
3006*4882a593Smuzhiyun 
3007*4882a593Smuzhiyun 	ew32(TCTL, tctl);
3008*4882a593Smuzhiyun 
3009*4882a593Smuzhiyun 	hw->mac.ops.config_collision_dist(hw);
3010*4882a593Smuzhiyun 
3011*4882a593Smuzhiyun 	/* SPT and KBL Si errata workaround to avoid data corruption */
3012*4882a593Smuzhiyun 	if (hw->mac.type == e1000_pch_spt) {
3013*4882a593Smuzhiyun 		u32 reg_val;
3014*4882a593Smuzhiyun 
3015*4882a593Smuzhiyun 		reg_val = er32(IOSFPC);
3016*4882a593Smuzhiyun 		reg_val |= E1000_RCTL_RDMTS_HEX;
3017*4882a593Smuzhiyun 		ew32(IOSFPC, reg_val);
3018*4882a593Smuzhiyun 
3019*4882a593Smuzhiyun 		reg_val = er32(TARC(0));
3020*4882a593Smuzhiyun 		/* SPT and KBL Si errata workaround to avoid Tx hang.
3021*4882a593Smuzhiyun 		 * Dropping the number of outstanding requests from
3022*4882a593Smuzhiyun 		 * 3 to 2 in order to avoid a buffer overrun.
3023*4882a593Smuzhiyun 		 */
3024*4882a593Smuzhiyun 		reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ;
3025*4882a593Smuzhiyun 		reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ;
3026*4882a593Smuzhiyun 		ew32(TARC(0), reg_val);
3027*4882a593Smuzhiyun 	}
3028*4882a593Smuzhiyun }
3029*4882a593Smuzhiyun 
3030*4882a593Smuzhiyun #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
3031*4882a593Smuzhiyun 			   (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
3032*4882a593Smuzhiyun 
3033*4882a593Smuzhiyun /**
3034*4882a593Smuzhiyun  * e1000_setup_rctl - configure the receive control registers
3035*4882a593Smuzhiyun  * @adapter: Board private structure
3036*4882a593Smuzhiyun  **/
e1000_setup_rctl(struct e1000_adapter * adapter)3037*4882a593Smuzhiyun static void e1000_setup_rctl(struct e1000_adapter *adapter)
3038*4882a593Smuzhiyun {
3039*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
3040*4882a593Smuzhiyun 	u32 rctl, rfctl;
3041*4882a593Smuzhiyun 	u32 pages = 0;
3042*4882a593Smuzhiyun 
3043*4882a593Smuzhiyun 	/* Workaround Si errata on PCHx - configure jumbo frame flow.
3044*4882a593Smuzhiyun 	 * If jumbo frames not set, program related MAC/PHY registers
3045*4882a593Smuzhiyun 	 * to h/w defaults
3046*4882a593Smuzhiyun 	 */
3047*4882a593Smuzhiyun 	if (hw->mac.type >= e1000_pch2lan) {
3048*4882a593Smuzhiyun 		s32 ret_val;
3049*4882a593Smuzhiyun 
3050*4882a593Smuzhiyun 		if (adapter->netdev->mtu > ETH_DATA_LEN)
3051*4882a593Smuzhiyun 			ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
3052*4882a593Smuzhiyun 		else
3053*4882a593Smuzhiyun 			ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
3054*4882a593Smuzhiyun 
3055*4882a593Smuzhiyun 		if (ret_val)
3056*4882a593Smuzhiyun 			e_dbg("failed to enable|disable jumbo frame workaround mode\n");
3057*4882a593Smuzhiyun 	}
3058*4882a593Smuzhiyun 
3059*4882a593Smuzhiyun 	/* Program MC offset vector base */
3060*4882a593Smuzhiyun 	rctl = er32(RCTL);
3061*4882a593Smuzhiyun 	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3062*4882a593Smuzhiyun 	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
3063*4882a593Smuzhiyun 	    E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
3064*4882a593Smuzhiyun 	    (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3065*4882a593Smuzhiyun 
3066*4882a593Smuzhiyun 	/* Do not Store bad packets */
3067*4882a593Smuzhiyun 	rctl &= ~E1000_RCTL_SBP;
3068*4882a593Smuzhiyun 
3069*4882a593Smuzhiyun 	/* Enable Long Packet receive */
3070*4882a593Smuzhiyun 	if (adapter->netdev->mtu <= ETH_DATA_LEN)
3071*4882a593Smuzhiyun 		rctl &= ~E1000_RCTL_LPE;
3072*4882a593Smuzhiyun 	else
3073*4882a593Smuzhiyun 		rctl |= E1000_RCTL_LPE;
3074*4882a593Smuzhiyun 
3075*4882a593Smuzhiyun 	/* Some systems expect that the CRC is included in SMBUS traffic. The
3076*4882a593Smuzhiyun 	 * hardware strips the CRC before sending to both SMBUS (BMC) and to
3077*4882a593Smuzhiyun 	 * host memory when this is enabled
3078*4882a593Smuzhiyun 	 */
3079*4882a593Smuzhiyun 	if (adapter->flags2 & FLAG2_CRC_STRIPPING)
3080*4882a593Smuzhiyun 		rctl |= E1000_RCTL_SECRC;
3081*4882a593Smuzhiyun 
3082*4882a593Smuzhiyun 	/* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
3083*4882a593Smuzhiyun 	if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
3084*4882a593Smuzhiyun 		u16 phy_data;
3085*4882a593Smuzhiyun 
3086*4882a593Smuzhiyun 		e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
3087*4882a593Smuzhiyun 		phy_data &= 0xfff8;
3088*4882a593Smuzhiyun 		phy_data |= BIT(2);
3089*4882a593Smuzhiyun 		e1e_wphy(hw, PHY_REG(770, 26), phy_data);
3090*4882a593Smuzhiyun 
3091*4882a593Smuzhiyun 		e1e_rphy(hw, 22, &phy_data);
3092*4882a593Smuzhiyun 		phy_data &= 0x0fff;
3093*4882a593Smuzhiyun 		phy_data |= BIT(14);
3094*4882a593Smuzhiyun 		e1e_wphy(hw, 0x10, 0x2823);
3095*4882a593Smuzhiyun 		e1e_wphy(hw, 0x11, 0x0003);
3096*4882a593Smuzhiyun 		e1e_wphy(hw, 22, phy_data);
3097*4882a593Smuzhiyun 	}
3098*4882a593Smuzhiyun 
3099*4882a593Smuzhiyun 	/* Setup buffer sizes */
3100*4882a593Smuzhiyun 	rctl &= ~E1000_RCTL_SZ_4096;
3101*4882a593Smuzhiyun 	rctl |= E1000_RCTL_BSEX;
3102*4882a593Smuzhiyun 	switch (adapter->rx_buffer_len) {
3103*4882a593Smuzhiyun 	case 2048:
3104*4882a593Smuzhiyun 	default:
3105*4882a593Smuzhiyun 		rctl |= E1000_RCTL_SZ_2048;
3106*4882a593Smuzhiyun 		rctl &= ~E1000_RCTL_BSEX;
3107*4882a593Smuzhiyun 		break;
3108*4882a593Smuzhiyun 	case 4096:
3109*4882a593Smuzhiyun 		rctl |= E1000_RCTL_SZ_4096;
3110*4882a593Smuzhiyun 		break;
3111*4882a593Smuzhiyun 	case 8192:
3112*4882a593Smuzhiyun 		rctl |= E1000_RCTL_SZ_8192;
3113*4882a593Smuzhiyun 		break;
3114*4882a593Smuzhiyun 	case 16384:
3115*4882a593Smuzhiyun 		rctl |= E1000_RCTL_SZ_16384;
3116*4882a593Smuzhiyun 		break;
3117*4882a593Smuzhiyun 	}
3118*4882a593Smuzhiyun 
3119*4882a593Smuzhiyun 	/* Enable Extended Status in all Receive Descriptors */
3120*4882a593Smuzhiyun 	rfctl = er32(RFCTL);
3121*4882a593Smuzhiyun 	rfctl |= E1000_RFCTL_EXTEN;
3122*4882a593Smuzhiyun 	ew32(RFCTL, rfctl);
3123*4882a593Smuzhiyun 
3124*4882a593Smuzhiyun 	/* 82571 and greater support packet-split where the protocol
3125*4882a593Smuzhiyun 	 * header is placed in skb->data and the packet data is
3126*4882a593Smuzhiyun 	 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
3127*4882a593Smuzhiyun 	 * In the case of a non-split, skb->data is linearly filled,
3128*4882a593Smuzhiyun 	 * followed by the page buffers.  Therefore, skb->data is
3129*4882a593Smuzhiyun 	 * sized to hold the largest protocol header.
3130*4882a593Smuzhiyun 	 *
3131*4882a593Smuzhiyun 	 * allocations using alloc_page take too long for regular MTU
3132*4882a593Smuzhiyun 	 * so only enable packet split for jumbo frames
3133*4882a593Smuzhiyun 	 *
3134*4882a593Smuzhiyun 	 * Using pages when the page size is greater than 16k wastes
3135*4882a593Smuzhiyun 	 * a lot of memory, since we allocate 3 pages at all times
3136*4882a593Smuzhiyun 	 * per packet.
3137*4882a593Smuzhiyun 	 */
3138*4882a593Smuzhiyun 	pages = PAGE_USE_COUNT(adapter->netdev->mtu);
3139*4882a593Smuzhiyun 	if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
3140*4882a593Smuzhiyun 		adapter->rx_ps_pages = pages;
3141*4882a593Smuzhiyun 	else
3142*4882a593Smuzhiyun 		adapter->rx_ps_pages = 0;
3143*4882a593Smuzhiyun 
3144*4882a593Smuzhiyun 	if (adapter->rx_ps_pages) {
3145*4882a593Smuzhiyun 		u32 psrctl = 0;
3146*4882a593Smuzhiyun 
3147*4882a593Smuzhiyun 		/* Enable Packet split descriptors */
3148*4882a593Smuzhiyun 		rctl |= E1000_RCTL_DTYP_PS;
3149*4882a593Smuzhiyun 
3150*4882a593Smuzhiyun 		psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT;
3151*4882a593Smuzhiyun 
3152*4882a593Smuzhiyun 		switch (adapter->rx_ps_pages) {
3153*4882a593Smuzhiyun 		case 3:
3154*4882a593Smuzhiyun 			psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT;
3155*4882a593Smuzhiyun 			fallthrough;
3156*4882a593Smuzhiyun 		case 2:
3157*4882a593Smuzhiyun 			psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT;
3158*4882a593Smuzhiyun 			fallthrough;
3159*4882a593Smuzhiyun 		case 1:
3160*4882a593Smuzhiyun 			psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT;
3161*4882a593Smuzhiyun 			break;
3162*4882a593Smuzhiyun 		}
3163*4882a593Smuzhiyun 
3164*4882a593Smuzhiyun 		ew32(PSRCTL, psrctl);
3165*4882a593Smuzhiyun 	}
3166*4882a593Smuzhiyun 
3167*4882a593Smuzhiyun 	/* This is useful for sniffing bad packets. */
3168*4882a593Smuzhiyun 	if (adapter->netdev->features & NETIF_F_RXALL) {
3169*4882a593Smuzhiyun 		/* UPE and MPE will be handled by normal PROMISC logic
3170*4882a593Smuzhiyun 		 * in e1000e_set_rx_mode
3171*4882a593Smuzhiyun 		 */
3172*4882a593Smuzhiyun 		rctl |= (E1000_RCTL_SBP |	/* Receive bad packets */
3173*4882a593Smuzhiyun 			 E1000_RCTL_BAM |	/* RX All Bcast Pkts */
3174*4882a593Smuzhiyun 			 E1000_RCTL_PMCF);	/* RX All MAC Ctrl Pkts */
3175*4882a593Smuzhiyun 
3176*4882a593Smuzhiyun 		rctl &= ~(E1000_RCTL_VFE |	/* Disable VLAN filter */
3177*4882a593Smuzhiyun 			  E1000_RCTL_DPF |	/* Allow filtered pause */
3178*4882a593Smuzhiyun 			  E1000_RCTL_CFIEN);	/* Dis VLAN CFIEN Filter */
3179*4882a593Smuzhiyun 		/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
3180*4882a593Smuzhiyun 		 * and that breaks VLANs.
3181*4882a593Smuzhiyun 		 */
3182*4882a593Smuzhiyun 	}
3183*4882a593Smuzhiyun 
3184*4882a593Smuzhiyun 	ew32(RCTL, rctl);
3185*4882a593Smuzhiyun 	/* just started the receive unit, no need to restart */
3186*4882a593Smuzhiyun 	adapter->flags &= ~FLAG_RESTART_NOW;
3187*4882a593Smuzhiyun }
3188*4882a593Smuzhiyun 
3189*4882a593Smuzhiyun /**
3190*4882a593Smuzhiyun  * e1000_configure_rx - Configure Receive Unit after Reset
3191*4882a593Smuzhiyun  * @adapter: board private structure
3192*4882a593Smuzhiyun  *
3193*4882a593Smuzhiyun  * Configure the Rx unit of the MAC after a reset.
3194*4882a593Smuzhiyun  **/
e1000_configure_rx(struct e1000_adapter * adapter)3195*4882a593Smuzhiyun static void e1000_configure_rx(struct e1000_adapter *adapter)
3196*4882a593Smuzhiyun {
3197*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
3198*4882a593Smuzhiyun 	struct e1000_ring *rx_ring = adapter->rx_ring;
3199*4882a593Smuzhiyun 	u64 rdba;
3200*4882a593Smuzhiyun 	u32 rdlen, rctl, rxcsum, ctrl_ext;
3201*4882a593Smuzhiyun 
3202*4882a593Smuzhiyun 	if (adapter->rx_ps_pages) {
3203*4882a593Smuzhiyun 		/* this is a 32 byte descriptor */
3204*4882a593Smuzhiyun 		rdlen = rx_ring->count *
3205*4882a593Smuzhiyun 		    sizeof(union e1000_rx_desc_packet_split);
3206*4882a593Smuzhiyun 		adapter->clean_rx = e1000_clean_rx_irq_ps;
3207*4882a593Smuzhiyun 		adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
3208*4882a593Smuzhiyun 	} else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
3209*4882a593Smuzhiyun 		rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
3210*4882a593Smuzhiyun 		adapter->clean_rx = e1000_clean_jumbo_rx_irq;
3211*4882a593Smuzhiyun 		adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
3212*4882a593Smuzhiyun 	} else {
3213*4882a593Smuzhiyun 		rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
3214*4882a593Smuzhiyun 		adapter->clean_rx = e1000_clean_rx_irq;
3215*4882a593Smuzhiyun 		adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
3216*4882a593Smuzhiyun 	}
3217*4882a593Smuzhiyun 
3218*4882a593Smuzhiyun 	/* disable receives while setting up the descriptors */
3219*4882a593Smuzhiyun 	rctl = er32(RCTL);
3220*4882a593Smuzhiyun 	if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
3221*4882a593Smuzhiyun 		ew32(RCTL, rctl & ~E1000_RCTL_EN);
3222*4882a593Smuzhiyun 	e1e_flush();
3223*4882a593Smuzhiyun 	usleep_range(10000, 11000);
3224*4882a593Smuzhiyun 
3225*4882a593Smuzhiyun 	if (adapter->flags2 & FLAG2_DMA_BURST) {
3226*4882a593Smuzhiyun 		/* set the writeback threshold (only takes effect if the RDTR
3227*4882a593Smuzhiyun 		 * is set). set GRAN=1 and write back up to 0x4 worth, and
3228*4882a593Smuzhiyun 		 * enable prefetching of 0x20 Rx descriptors
3229*4882a593Smuzhiyun 		 * granularity = 01
3230*4882a593Smuzhiyun 		 * wthresh = 04,
3231*4882a593Smuzhiyun 		 * hthresh = 04,
3232*4882a593Smuzhiyun 		 * pthresh = 0x20
3233*4882a593Smuzhiyun 		 */
3234*4882a593Smuzhiyun 		ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
3235*4882a593Smuzhiyun 		ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
3236*4882a593Smuzhiyun 	}
3237*4882a593Smuzhiyun 
3238*4882a593Smuzhiyun 	/* set the Receive Delay Timer Register */
3239*4882a593Smuzhiyun 	ew32(RDTR, adapter->rx_int_delay);
3240*4882a593Smuzhiyun 
3241*4882a593Smuzhiyun 	/* irq moderation */
3242*4882a593Smuzhiyun 	ew32(RADV, adapter->rx_abs_int_delay);
3243*4882a593Smuzhiyun 	if ((adapter->itr_setting != 0) && (adapter->itr != 0))
3244*4882a593Smuzhiyun 		e1000e_write_itr(adapter, adapter->itr);
3245*4882a593Smuzhiyun 
3246*4882a593Smuzhiyun 	ctrl_ext = er32(CTRL_EXT);
3247*4882a593Smuzhiyun 	/* Auto-Mask interrupts upon ICR access */
3248*4882a593Smuzhiyun 	ctrl_ext |= E1000_CTRL_EXT_IAME;
3249*4882a593Smuzhiyun 	ew32(IAM, 0xffffffff);
3250*4882a593Smuzhiyun 	ew32(CTRL_EXT, ctrl_ext);
3251*4882a593Smuzhiyun 	e1e_flush();
3252*4882a593Smuzhiyun 
3253*4882a593Smuzhiyun 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
3254*4882a593Smuzhiyun 	 * the Base and Length of the Rx Descriptor Ring
3255*4882a593Smuzhiyun 	 */
3256*4882a593Smuzhiyun 	rdba = rx_ring->dma;
3257*4882a593Smuzhiyun 	ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
3258*4882a593Smuzhiyun 	ew32(RDBAH(0), (rdba >> 32));
3259*4882a593Smuzhiyun 	ew32(RDLEN(0), rdlen);
3260*4882a593Smuzhiyun 	ew32(RDH(0), 0);
3261*4882a593Smuzhiyun 	ew32(RDT(0), 0);
3262*4882a593Smuzhiyun 	rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
3263*4882a593Smuzhiyun 	rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
3264*4882a593Smuzhiyun 
3265*4882a593Smuzhiyun 	writel(0, rx_ring->head);
3266*4882a593Smuzhiyun 	if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
3267*4882a593Smuzhiyun 		e1000e_update_rdt_wa(rx_ring, 0);
3268*4882a593Smuzhiyun 	else
3269*4882a593Smuzhiyun 		writel(0, rx_ring->tail);
3270*4882a593Smuzhiyun 
3271*4882a593Smuzhiyun 	/* Enable Receive Checksum Offload for TCP and UDP */
3272*4882a593Smuzhiyun 	rxcsum = er32(RXCSUM);
3273*4882a593Smuzhiyun 	if (adapter->netdev->features & NETIF_F_RXCSUM)
3274*4882a593Smuzhiyun 		rxcsum |= E1000_RXCSUM_TUOFL;
3275*4882a593Smuzhiyun 	else
3276*4882a593Smuzhiyun 		rxcsum &= ~E1000_RXCSUM_TUOFL;
3277*4882a593Smuzhiyun 	ew32(RXCSUM, rxcsum);
3278*4882a593Smuzhiyun 
3279*4882a593Smuzhiyun 	/* With jumbo frames, excessive C-state transition latencies result
3280*4882a593Smuzhiyun 	 * in dropped transactions.
3281*4882a593Smuzhiyun 	 */
3282*4882a593Smuzhiyun 	if (adapter->netdev->mtu > ETH_DATA_LEN) {
3283*4882a593Smuzhiyun 		u32 lat =
3284*4882a593Smuzhiyun 		    ((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 -
3285*4882a593Smuzhiyun 		     adapter->max_frame_size) * 8 / 1000;
3286*4882a593Smuzhiyun 
3287*4882a593Smuzhiyun 		if (adapter->flags & FLAG_IS_ICH) {
3288*4882a593Smuzhiyun 			u32 rxdctl = er32(RXDCTL(0));
3289*4882a593Smuzhiyun 
3290*4882a593Smuzhiyun 			ew32(RXDCTL(0), rxdctl | 0x3 | BIT(8));
3291*4882a593Smuzhiyun 		}
3292*4882a593Smuzhiyun 
3293*4882a593Smuzhiyun 		dev_info(&adapter->pdev->dev,
3294*4882a593Smuzhiyun 			 "Some CPU C-states have been disabled in order to enable jumbo frames\n");
3295*4882a593Smuzhiyun 		cpu_latency_qos_update_request(&adapter->pm_qos_req, lat);
3296*4882a593Smuzhiyun 	} else {
3297*4882a593Smuzhiyun 		cpu_latency_qos_update_request(&adapter->pm_qos_req,
3298*4882a593Smuzhiyun 					       PM_QOS_DEFAULT_VALUE);
3299*4882a593Smuzhiyun 	}
3300*4882a593Smuzhiyun 
3301*4882a593Smuzhiyun 	/* Enable Receives */
3302*4882a593Smuzhiyun 	ew32(RCTL, rctl);
3303*4882a593Smuzhiyun }
3304*4882a593Smuzhiyun 
3305*4882a593Smuzhiyun /**
3306*4882a593Smuzhiyun  * e1000e_write_mc_addr_list - write multicast addresses to MTA
3307*4882a593Smuzhiyun  * @netdev: network interface device structure
3308*4882a593Smuzhiyun  *
3309*4882a593Smuzhiyun  * Writes multicast address list to the MTA hash table.
3310*4882a593Smuzhiyun  * Returns: -ENOMEM on failure
3311*4882a593Smuzhiyun  *                0 on no addresses written
3312*4882a593Smuzhiyun  *                X on writing X addresses to MTA
3313*4882a593Smuzhiyun  */
e1000e_write_mc_addr_list(struct net_device * netdev)3314*4882a593Smuzhiyun static int e1000e_write_mc_addr_list(struct net_device *netdev)
3315*4882a593Smuzhiyun {
3316*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
3317*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
3318*4882a593Smuzhiyun 	struct netdev_hw_addr *ha;
3319*4882a593Smuzhiyun 	u8 *mta_list;
3320*4882a593Smuzhiyun 	int i;
3321*4882a593Smuzhiyun 
3322*4882a593Smuzhiyun 	if (netdev_mc_empty(netdev)) {
3323*4882a593Smuzhiyun 		/* nothing to program, so clear mc list */
3324*4882a593Smuzhiyun 		hw->mac.ops.update_mc_addr_list(hw, NULL, 0);
3325*4882a593Smuzhiyun 		return 0;
3326*4882a593Smuzhiyun 	}
3327*4882a593Smuzhiyun 
3328*4882a593Smuzhiyun 	mta_list = kcalloc(netdev_mc_count(netdev), ETH_ALEN, GFP_ATOMIC);
3329*4882a593Smuzhiyun 	if (!mta_list)
3330*4882a593Smuzhiyun 		return -ENOMEM;
3331*4882a593Smuzhiyun 
3332*4882a593Smuzhiyun 	/* update_mc_addr_list expects a packed array of only addresses. */
3333*4882a593Smuzhiyun 	i = 0;
3334*4882a593Smuzhiyun 	netdev_for_each_mc_addr(ha, netdev)
3335*4882a593Smuzhiyun 	    memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
3336*4882a593Smuzhiyun 
3337*4882a593Smuzhiyun 	hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
3338*4882a593Smuzhiyun 	kfree(mta_list);
3339*4882a593Smuzhiyun 
3340*4882a593Smuzhiyun 	return netdev_mc_count(netdev);
3341*4882a593Smuzhiyun }
3342*4882a593Smuzhiyun 
3343*4882a593Smuzhiyun /**
3344*4882a593Smuzhiyun  * e1000e_write_uc_addr_list - write unicast addresses to RAR table
3345*4882a593Smuzhiyun  * @netdev: network interface device structure
3346*4882a593Smuzhiyun  *
3347*4882a593Smuzhiyun  * Writes unicast address list to the RAR table.
3348*4882a593Smuzhiyun  * Returns: -ENOMEM on failure/insufficient address space
3349*4882a593Smuzhiyun  *                0 on no addresses written
3350*4882a593Smuzhiyun  *                X on writing X addresses to the RAR table
3351*4882a593Smuzhiyun  **/
e1000e_write_uc_addr_list(struct net_device * netdev)3352*4882a593Smuzhiyun static int e1000e_write_uc_addr_list(struct net_device *netdev)
3353*4882a593Smuzhiyun {
3354*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
3355*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
3356*4882a593Smuzhiyun 	unsigned int rar_entries;
3357*4882a593Smuzhiyun 	int count = 0;
3358*4882a593Smuzhiyun 
3359*4882a593Smuzhiyun 	rar_entries = hw->mac.ops.rar_get_count(hw);
3360*4882a593Smuzhiyun 
3361*4882a593Smuzhiyun 	/* save a rar entry for our hardware address */
3362*4882a593Smuzhiyun 	rar_entries--;
3363*4882a593Smuzhiyun 
3364*4882a593Smuzhiyun 	/* save a rar entry for the LAA workaround */
3365*4882a593Smuzhiyun 	if (adapter->flags & FLAG_RESET_OVERWRITES_LAA)
3366*4882a593Smuzhiyun 		rar_entries--;
3367*4882a593Smuzhiyun 
3368*4882a593Smuzhiyun 	/* return ENOMEM indicating insufficient memory for addresses */
3369*4882a593Smuzhiyun 	if (netdev_uc_count(netdev) > rar_entries)
3370*4882a593Smuzhiyun 		return -ENOMEM;
3371*4882a593Smuzhiyun 
3372*4882a593Smuzhiyun 	if (!netdev_uc_empty(netdev) && rar_entries) {
3373*4882a593Smuzhiyun 		struct netdev_hw_addr *ha;
3374*4882a593Smuzhiyun 
3375*4882a593Smuzhiyun 		/* write the addresses in reverse order to avoid write
3376*4882a593Smuzhiyun 		 * combining
3377*4882a593Smuzhiyun 		 */
3378*4882a593Smuzhiyun 		netdev_for_each_uc_addr(ha, netdev) {
3379*4882a593Smuzhiyun 			int ret_val;
3380*4882a593Smuzhiyun 
3381*4882a593Smuzhiyun 			if (!rar_entries)
3382*4882a593Smuzhiyun 				break;
3383*4882a593Smuzhiyun 			ret_val = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
3384*4882a593Smuzhiyun 			if (ret_val < 0)
3385*4882a593Smuzhiyun 				return -ENOMEM;
3386*4882a593Smuzhiyun 			count++;
3387*4882a593Smuzhiyun 		}
3388*4882a593Smuzhiyun 	}
3389*4882a593Smuzhiyun 
3390*4882a593Smuzhiyun 	/* zero out the remaining RAR entries not used above */
3391*4882a593Smuzhiyun 	for (; rar_entries > 0; rar_entries--) {
3392*4882a593Smuzhiyun 		ew32(RAH(rar_entries), 0);
3393*4882a593Smuzhiyun 		ew32(RAL(rar_entries), 0);
3394*4882a593Smuzhiyun 	}
3395*4882a593Smuzhiyun 	e1e_flush();
3396*4882a593Smuzhiyun 
3397*4882a593Smuzhiyun 	return count;
3398*4882a593Smuzhiyun }
3399*4882a593Smuzhiyun 
3400*4882a593Smuzhiyun /**
3401*4882a593Smuzhiyun  * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
3402*4882a593Smuzhiyun  * @netdev: network interface device structure
3403*4882a593Smuzhiyun  *
3404*4882a593Smuzhiyun  * The ndo_set_rx_mode entry point is called whenever the unicast or multicast
3405*4882a593Smuzhiyun  * address list or the network interface flags are updated.  This routine is
3406*4882a593Smuzhiyun  * responsible for configuring the hardware for proper unicast, multicast,
3407*4882a593Smuzhiyun  * promiscuous mode, and all-multi behavior.
3408*4882a593Smuzhiyun  **/
e1000e_set_rx_mode(struct net_device * netdev)3409*4882a593Smuzhiyun static void e1000e_set_rx_mode(struct net_device *netdev)
3410*4882a593Smuzhiyun {
3411*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
3412*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
3413*4882a593Smuzhiyun 	u32 rctl;
3414*4882a593Smuzhiyun 
3415*4882a593Smuzhiyun 	if (pm_runtime_suspended(netdev->dev.parent))
3416*4882a593Smuzhiyun 		return;
3417*4882a593Smuzhiyun 
3418*4882a593Smuzhiyun 	/* Check for Promiscuous and All Multicast modes */
3419*4882a593Smuzhiyun 	rctl = er32(RCTL);
3420*4882a593Smuzhiyun 
3421*4882a593Smuzhiyun 	/* clear the affected bits */
3422*4882a593Smuzhiyun 	rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
3423*4882a593Smuzhiyun 
3424*4882a593Smuzhiyun 	if (netdev->flags & IFF_PROMISC) {
3425*4882a593Smuzhiyun 		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
3426*4882a593Smuzhiyun 		/* Do not hardware filter VLANs in promisc mode */
3427*4882a593Smuzhiyun 		e1000e_vlan_filter_disable(adapter);
3428*4882a593Smuzhiyun 	} else {
3429*4882a593Smuzhiyun 		int count;
3430*4882a593Smuzhiyun 
3431*4882a593Smuzhiyun 		if (netdev->flags & IFF_ALLMULTI) {
3432*4882a593Smuzhiyun 			rctl |= E1000_RCTL_MPE;
3433*4882a593Smuzhiyun 		} else {
3434*4882a593Smuzhiyun 			/* Write addresses to the MTA, if the attempt fails
3435*4882a593Smuzhiyun 			 * then we should just turn on promiscuous mode so
3436*4882a593Smuzhiyun 			 * that we can at least receive multicast traffic
3437*4882a593Smuzhiyun 			 */
3438*4882a593Smuzhiyun 			count = e1000e_write_mc_addr_list(netdev);
3439*4882a593Smuzhiyun 			if (count < 0)
3440*4882a593Smuzhiyun 				rctl |= E1000_RCTL_MPE;
3441*4882a593Smuzhiyun 		}
3442*4882a593Smuzhiyun 		e1000e_vlan_filter_enable(adapter);
3443*4882a593Smuzhiyun 		/* Write addresses to available RAR registers, if there is not
3444*4882a593Smuzhiyun 		 * sufficient space to store all the addresses then enable
3445*4882a593Smuzhiyun 		 * unicast promiscuous mode
3446*4882a593Smuzhiyun 		 */
3447*4882a593Smuzhiyun 		count = e1000e_write_uc_addr_list(netdev);
3448*4882a593Smuzhiyun 		if (count < 0)
3449*4882a593Smuzhiyun 			rctl |= E1000_RCTL_UPE;
3450*4882a593Smuzhiyun 	}
3451*4882a593Smuzhiyun 
3452*4882a593Smuzhiyun 	ew32(RCTL, rctl);
3453*4882a593Smuzhiyun 
3454*4882a593Smuzhiyun 	if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
3455*4882a593Smuzhiyun 		e1000e_vlan_strip_enable(adapter);
3456*4882a593Smuzhiyun 	else
3457*4882a593Smuzhiyun 		e1000e_vlan_strip_disable(adapter);
3458*4882a593Smuzhiyun }
3459*4882a593Smuzhiyun 
e1000e_setup_rss_hash(struct e1000_adapter * adapter)3460*4882a593Smuzhiyun static void e1000e_setup_rss_hash(struct e1000_adapter *adapter)
3461*4882a593Smuzhiyun {
3462*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
3463*4882a593Smuzhiyun 	u32 mrqc, rxcsum;
3464*4882a593Smuzhiyun 	u32 rss_key[10];
3465*4882a593Smuzhiyun 	int i;
3466*4882a593Smuzhiyun 
3467*4882a593Smuzhiyun 	netdev_rss_key_fill(rss_key, sizeof(rss_key));
3468*4882a593Smuzhiyun 	for (i = 0; i < 10; i++)
3469*4882a593Smuzhiyun 		ew32(RSSRK(i), rss_key[i]);
3470*4882a593Smuzhiyun 
3471*4882a593Smuzhiyun 	/* Direct all traffic to queue 0 */
3472*4882a593Smuzhiyun 	for (i = 0; i < 32; i++)
3473*4882a593Smuzhiyun 		ew32(RETA(i), 0);
3474*4882a593Smuzhiyun 
3475*4882a593Smuzhiyun 	/* Disable raw packet checksumming so that RSS hash is placed in
3476*4882a593Smuzhiyun 	 * descriptor on writeback.
3477*4882a593Smuzhiyun 	 */
3478*4882a593Smuzhiyun 	rxcsum = er32(RXCSUM);
3479*4882a593Smuzhiyun 	rxcsum |= E1000_RXCSUM_PCSD;
3480*4882a593Smuzhiyun 
3481*4882a593Smuzhiyun 	ew32(RXCSUM, rxcsum);
3482*4882a593Smuzhiyun 
3483*4882a593Smuzhiyun 	mrqc = (E1000_MRQC_RSS_FIELD_IPV4 |
3484*4882a593Smuzhiyun 		E1000_MRQC_RSS_FIELD_IPV4_TCP |
3485*4882a593Smuzhiyun 		E1000_MRQC_RSS_FIELD_IPV6 |
3486*4882a593Smuzhiyun 		E1000_MRQC_RSS_FIELD_IPV6_TCP |
3487*4882a593Smuzhiyun 		E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
3488*4882a593Smuzhiyun 
3489*4882a593Smuzhiyun 	ew32(MRQC, mrqc);
3490*4882a593Smuzhiyun }
3491*4882a593Smuzhiyun 
3492*4882a593Smuzhiyun /**
3493*4882a593Smuzhiyun  * e1000e_get_base_timinca - get default SYSTIM time increment attributes
3494*4882a593Smuzhiyun  * @adapter: board private structure
3495*4882a593Smuzhiyun  * @timinca: pointer to returned time increment attributes
3496*4882a593Smuzhiyun  *
3497*4882a593Smuzhiyun  * Get attributes for incrementing the System Time Register SYSTIML/H at
3498*4882a593Smuzhiyun  * the default base frequency, and set the cyclecounter shift value.
3499*4882a593Smuzhiyun  **/
e1000e_get_base_timinca(struct e1000_adapter * adapter,u32 * timinca)3500*4882a593Smuzhiyun s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
3501*4882a593Smuzhiyun {
3502*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
3503*4882a593Smuzhiyun 	u32 incvalue, incperiod, shift;
3504*4882a593Smuzhiyun 
3505*4882a593Smuzhiyun 	/* Make sure clock is enabled on I217/I218/I219  before checking
3506*4882a593Smuzhiyun 	 * the frequency
3507*4882a593Smuzhiyun 	 */
3508*4882a593Smuzhiyun 	if ((hw->mac.type >= e1000_pch_lpt) &&
3509*4882a593Smuzhiyun 	    !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) &&
3510*4882a593Smuzhiyun 	    !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) {
3511*4882a593Smuzhiyun 		u32 fextnvm7 = er32(FEXTNVM7);
3512*4882a593Smuzhiyun 
3513*4882a593Smuzhiyun 		if (!(fextnvm7 & BIT(0))) {
3514*4882a593Smuzhiyun 			ew32(FEXTNVM7, fextnvm7 | BIT(0));
3515*4882a593Smuzhiyun 			e1e_flush();
3516*4882a593Smuzhiyun 		}
3517*4882a593Smuzhiyun 	}
3518*4882a593Smuzhiyun 
3519*4882a593Smuzhiyun 	switch (hw->mac.type) {
3520*4882a593Smuzhiyun 	case e1000_pch2lan:
3521*4882a593Smuzhiyun 		/* Stable 96MHz frequency */
3522*4882a593Smuzhiyun 		incperiod = INCPERIOD_96MHZ;
3523*4882a593Smuzhiyun 		incvalue = INCVALUE_96MHZ;
3524*4882a593Smuzhiyun 		shift = INCVALUE_SHIFT_96MHZ;
3525*4882a593Smuzhiyun 		adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ;
3526*4882a593Smuzhiyun 		break;
3527*4882a593Smuzhiyun 	case e1000_pch_lpt:
3528*4882a593Smuzhiyun 		if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
3529*4882a593Smuzhiyun 			/* Stable 96MHz frequency */
3530*4882a593Smuzhiyun 			incperiod = INCPERIOD_96MHZ;
3531*4882a593Smuzhiyun 			incvalue = INCVALUE_96MHZ;
3532*4882a593Smuzhiyun 			shift = INCVALUE_SHIFT_96MHZ;
3533*4882a593Smuzhiyun 			adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ;
3534*4882a593Smuzhiyun 		} else {
3535*4882a593Smuzhiyun 			/* Stable 25MHz frequency */
3536*4882a593Smuzhiyun 			incperiod = INCPERIOD_25MHZ;
3537*4882a593Smuzhiyun 			incvalue = INCVALUE_25MHZ;
3538*4882a593Smuzhiyun 			shift = INCVALUE_SHIFT_25MHZ;
3539*4882a593Smuzhiyun 			adapter->cc.shift = shift;
3540*4882a593Smuzhiyun 		}
3541*4882a593Smuzhiyun 		break;
3542*4882a593Smuzhiyun 	case e1000_pch_spt:
3543*4882a593Smuzhiyun 		/* Stable 24MHz frequency */
3544*4882a593Smuzhiyun 		incperiod = INCPERIOD_24MHZ;
3545*4882a593Smuzhiyun 		incvalue = INCVALUE_24MHZ;
3546*4882a593Smuzhiyun 		shift = INCVALUE_SHIFT_24MHZ;
3547*4882a593Smuzhiyun 		adapter->cc.shift = shift;
3548*4882a593Smuzhiyun 		break;
3549*4882a593Smuzhiyun 	case e1000_pch_cnp:
3550*4882a593Smuzhiyun 	case e1000_pch_tgp:
3551*4882a593Smuzhiyun 	case e1000_pch_adp:
3552*4882a593Smuzhiyun 	case e1000_pch_mtp:
3553*4882a593Smuzhiyun 		if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
3554*4882a593Smuzhiyun 			/* Stable 24MHz frequency */
3555*4882a593Smuzhiyun 			incperiod = INCPERIOD_24MHZ;
3556*4882a593Smuzhiyun 			incvalue = INCVALUE_24MHZ;
3557*4882a593Smuzhiyun 			shift = INCVALUE_SHIFT_24MHZ;
3558*4882a593Smuzhiyun 			adapter->cc.shift = shift;
3559*4882a593Smuzhiyun 		} else {
3560*4882a593Smuzhiyun 			/* Stable 38400KHz frequency */
3561*4882a593Smuzhiyun 			incperiod = INCPERIOD_38400KHZ;
3562*4882a593Smuzhiyun 			incvalue = INCVALUE_38400KHZ;
3563*4882a593Smuzhiyun 			shift = INCVALUE_SHIFT_38400KHZ;
3564*4882a593Smuzhiyun 			adapter->cc.shift = shift;
3565*4882a593Smuzhiyun 		}
3566*4882a593Smuzhiyun 		break;
3567*4882a593Smuzhiyun 	case e1000_82574:
3568*4882a593Smuzhiyun 	case e1000_82583:
3569*4882a593Smuzhiyun 		/* Stable 25MHz frequency */
3570*4882a593Smuzhiyun 		incperiod = INCPERIOD_25MHZ;
3571*4882a593Smuzhiyun 		incvalue = INCVALUE_25MHZ;
3572*4882a593Smuzhiyun 		shift = INCVALUE_SHIFT_25MHZ;
3573*4882a593Smuzhiyun 		adapter->cc.shift = shift;
3574*4882a593Smuzhiyun 		break;
3575*4882a593Smuzhiyun 	default:
3576*4882a593Smuzhiyun 		return -EINVAL;
3577*4882a593Smuzhiyun 	}
3578*4882a593Smuzhiyun 
3579*4882a593Smuzhiyun 	*timinca = ((incperiod << E1000_TIMINCA_INCPERIOD_SHIFT) |
3580*4882a593Smuzhiyun 		    ((incvalue << shift) & E1000_TIMINCA_INCVALUE_MASK));
3581*4882a593Smuzhiyun 
3582*4882a593Smuzhiyun 	return 0;
3583*4882a593Smuzhiyun }
3584*4882a593Smuzhiyun 
3585*4882a593Smuzhiyun /**
3586*4882a593Smuzhiyun  * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable
3587*4882a593Smuzhiyun  * @adapter: board private structure
3588*4882a593Smuzhiyun  * @config: timestamp configuration
3589*4882a593Smuzhiyun  *
3590*4882a593Smuzhiyun  * Outgoing time stamping can be enabled and disabled. Play nice and
3591*4882a593Smuzhiyun  * disable it when requested, although it shouldn't cause any overhead
3592*4882a593Smuzhiyun  * when no packet needs it. At most one packet in the queue may be
3593*4882a593Smuzhiyun  * marked for time stamping, otherwise it would be impossible to tell
3594*4882a593Smuzhiyun  * for sure to which packet the hardware time stamp belongs.
3595*4882a593Smuzhiyun  *
3596*4882a593Smuzhiyun  * Incoming time stamping has to be configured via the hardware filters.
3597*4882a593Smuzhiyun  * Not all combinations are supported, in particular event type has to be
3598*4882a593Smuzhiyun  * specified. Matching the kind of event packet is not supported, with the
3599*4882a593Smuzhiyun  * exception of "all V2 events regardless of level 2 or 4".
3600*4882a593Smuzhiyun  **/
e1000e_config_hwtstamp(struct e1000_adapter * adapter,struct hwtstamp_config * config)3601*4882a593Smuzhiyun static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
3602*4882a593Smuzhiyun 				  struct hwtstamp_config *config)
3603*4882a593Smuzhiyun {
3604*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
3605*4882a593Smuzhiyun 	u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
3606*4882a593Smuzhiyun 	u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
3607*4882a593Smuzhiyun 	u32 rxmtrl = 0;
3608*4882a593Smuzhiyun 	u16 rxudp = 0;
3609*4882a593Smuzhiyun 	bool is_l4 = false;
3610*4882a593Smuzhiyun 	bool is_l2 = false;
3611*4882a593Smuzhiyun 	u32 regval;
3612*4882a593Smuzhiyun 
3613*4882a593Smuzhiyun 	if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
3614*4882a593Smuzhiyun 		return -EINVAL;
3615*4882a593Smuzhiyun 
3616*4882a593Smuzhiyun 	/* flags reserved for future extensions - must be zero */
3617*4882a593Smuzhiyun 	if (config->flags)
3618*4882a593Smuzhiyun 		return -EINVAL;
3619*4882a593Smuzhiyun 
3620*4882a593Smuzhiyun 	switch (config->tx_type) {
3621*4882a593Smuzhiyun 	case HWTSTAMP_TX_OFF:
3622*4882a593Smuzhiyun 		tsync_tx_ctl = 0;
3623*4882a593Smuzhiyun 		break;
3624*4882a593Smuzhiyun 	case HWTSTAMP_TX_ON:
3625*4882a593Smuzhiyun 		break;
3626*4882a593Smuzhiyun 	default:
3627*4882a593Smuzhiyun 		return -ERANGE;
3628*4882a593Smuzhiyun 	}
3629*4882a593Smuzhiyun 
3630*4882a593Smuzhiyun 	switch (config->rx_filter) {
3631*4882a593Smuzhiyun 	case HWTSTAMP_FILTER_NONE:
3632*4882a593Smuzhiyun 		tsync_rx_ctl = 0;
3633*4882a593Smuzhiyun 		break;
3634*4882a593Smuzhiyun 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3635*4882a593Smuzhiyun 		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
3636*4882a593Smuzhiyun 		rxmtrl = E1000_RXMTRL_PTP_V1_SYNC_MESSAGE;
3637*4882a593Smuzhiyun 		is_l4 = true;
3638*4882a593Smuzhiyun 		break;
3639*4882a593Smuzhiyun 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3640*4882a593Smuzhiyun 		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
3641*4882a593Smuzhiyun 		rxmtrl = E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE;
3642*4882a593Smuzhiyun 		is_l4 = true;
3643*4882a593Smuzhiyun 		break;
3644*4882a593Smuzhiyun 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3645*4882a593Smuzhiyun 		/* Also time stamps V2 L2 Path Delay Request/Response */
3646*4882a593Smuzhiyun 		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2;
3647*4882a593Smuzhiyun 		rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE;
3648*4882a593Smuzhiyun 		is_l2 = true;
3649*4882a593Smuzhiyun 		break;
3650*4882a593Smuzhiyun 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3651*4882a593Smuzhiyun 		/* Also time stamps V2 L2 Path Delay Request/Response. */
3652*4882a593Smuzhiyun 		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2;
3653*4882a593Smuzhiyun 		rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE;
3654*4882a593Smuzhiyun 		is_l2 = true;
3655*4882a593Smuzhiyun 		break;
3656*4882a593Smuzhiyun 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3657*4882a593Smuzhiyun 		/* Hardware cannot filter just V2 L4 Sync messages */
3658*4882a593Smuzhiyun 		fallthrough;
3659*4882a593Smuzhiyun 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
3660*4882a593Smuzhiyun 		/* Also time stamps V2 Path Delay Request/Response. */
3661*4882a593Smuzhiyun 		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
3662*4882a593Smuzhiyun 		rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE;
3663*4882a593Smuzhiyun 		is_l2 = true;
3664*4882a593Smuzhiyun 		is_l4 = true;
3665*4882a593Smuzhiyun 		break;
3666*4882a593Smuzhiyun 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3667*4882a593Smuzhiyun 		/* Hardware cannot filter just V2 L4 Delay Request messages */
3668*4882a593Smuzhiyun 		fallthrough;
3669*4882a593Smuzhiyun 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3670*4882a593Smuzhiyun 		/* Also time stamps V2 Path Delay Request/Response. */
3671*4882a593Smuzhiyun 		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
3672*4882a593Smuzhiyun 		rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE;
3673*4882a593Smuzhiyun 		is_l2 = true;
3674*4882a593Smuzhiyun 		is_l4 = true;
3675*4882a593Smuzhiyun 		break;
3676*4882a593Smuzhiyun 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3677*4882a593Smuzhiyun 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3678*4882a593Smuzhiyun 		/* Hardware cannot filter just V2 L4 or L2 Event messages */
3679*4882a593Smuzhiyun 		fallthrough;
3680*4882a593Smuzhiyun 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
3681*4882a593Smuzhiyun 		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
3682*4882a593Smuzhiyun 		config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
3683*4882a593Smuzhiyun 		is_l2 = true;
3684*4882a593Smuzhiyun 		is_l4 = true;
3685*4882a593Smuzhiyun 		break;
3686*4882a593Smuzhiyun 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3687*4882a593Smuzhiyun 		/* For V1, the hardware can only filter Sync messages or
3688*4882a593Smuzhiyun 		 * Delay Request messages but not both so fall-through to
3689*4882a593Smuzhiyun 		 * time stamp all packets.
3690*4882a593Smuzhiyun 		 */
3691*4882a593Smuzhiyun 		fallthrough;
3692*4882a593Smuzhiyun 	case HWTSTAMP_FILTER_NTP_ALL:
3693*4882a593Smuzhiyun 	case HWTSTAMP_FILTER_ALL:
3694*4882a593Smuzhiyun 		is_l2 = true;
3695*4882a593Smuzhiyun 		is_l4 = true;
3696*4882a593Smuzhiyun 		tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
3697*4882a593Smuzhiyun 		config->rx_filter = HWTSTAMP_FILTER_ALL;
3698*4882a593Smuzhiyun 		break;
3699*4882a593Smuzhiyun 	default:
3700*4882a593Smuzhiyun 		return -ERANGE;
3701*4882a593Smuzhiyun 	}
3702*4882a593Smuzhiyun 
3703*4882a593Smuzhiyun 	adapter->hwtstamp_config = *config;
3704*4882a593Smuzhiyun 
3705*4882a593Smuzhiyun 	/* enable/disable Tx h/w time stamping */
3706*4882a593Smuzhiyun 	regval = er32(TSYNCTXCTL);
3707*4882a593Smuzhiyun 	regval &= ~E1000_TSYNCTXCTL_ENABLED;
3708*4882a593Smuzhiyun 	regval |= tsync_tx_ctl;
3709*4882a593Smuzhiyun 	ew32(TSYNCTXCTL, regval);
3710*4882a593Smuzhiyun 	if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) !=
3711*4882a593Smuzhiyun 	    (regval & E1000_TSYNCTXCTL_ENABLED)) {
3712*4882a593Smuzhiyun 		e_err("Timesync Tx Control register not set as expected\n");
3713*4882a593Smuzhiyun 		return -EAGAIN;
3714*4882a593Smuzhiyun 	}
3715*4882a593Smuzhiyun 
3716*4882a593Smuzhiyun 	/* enable/disable Rx h/w time stamping */
3717*4882a593Smuzhiyun 	regval = er32(TSYNCRXCTL);
3718*4882a593Smuzhiyun 	regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
3719*4882a593Smuzhiyun 	regval |= tsync_rx_ctl;
3720*4882a593Smuzhiyun 	ew32(TSYNCRXCTL, regval);
3721*4882a593Smuzhiyun 	if ((er32(TSYNCRXCTL) & (E1000_TSYNCRXCTL_ENABLED |
3722*4882a593Smuzhiyun 				 E1000_TSYNCRXCTL_TYPE_MASK)) !=
3723*4882a593Smuzhiyun 	    (regval & (E1000_TSYNCRXCTL_ENABLED |
3724*4882a593Smuzhiyun 		       E1000_TSYNCRXCTL_TYPE_MASK))) {
3725*4882a593Smuzhiyun 		e_err("Timesync Rx Control register not set as expected\n");
3726*4882a593Smuzhiyun 		return -EAGAIN;
3727*4882a593Smuzhiyun 	}
3728*4882a593Smuzhiyun 
3729*4882a593Smuzhiyun 	/* L2: define ethertype filter for time stamped packets */
3730*4882a593Smuzhiyun 	if (is_l2)
3731*4882a593Smuzhiyun 		rxmtrl |= ETH_P_1588;
3732*4882a593Smuzhiyun 
3733*4882a593Smuzhiyun 	/* define which PTP packets get time stamped */
3734*4882a593Smuzhiyun 	ew32(RXMTRL, rxmtrl);
3735*4882a593Smuzhiyun 
3736*4882a593Smuzhiyun 	/* Filter by destination port */
3737*4882a593Smuzhiyun 	if (is_l4) {
3738*4882a593Smuzhiyun 		rxudp = PTP_EV_PORT;
3739*4882a593Smuzhiyun 		cpu_to_be16s(&rxudp);
3740*4882a593Smuzhiyun 	}
3741*4882a593Smuzhiyun 	ew32(RXUDP, rxudp);
3742*4882a593Smuzhiyun 
3743*4882a593Smuzhiyun 	e1e_flush();
3744*4882a593Smuzhiyun 
3745*4882a593Smuzhiyun 	/* Clear TSYNCRXCTL_VALID & TSYNCTXCTL_VALID bit */
3746*4882a593Smuzhiyun 	er32(RXSTMPH);
3747*4882a593Smuzhiyun 	er32(TXSTMPH);
3748*4882a593Smuzhiyun 
3749*4882a593Smuzhiyun 	return 0;
3750*4882a593Smuzhiyun }
3751*4882a593Smuzhiyun 
3752*4882a593Smuzhiyun /**
3753*4882a593Smuzhiyun  * e1000_configure - configure the hardware for Rx and Tx
3754*4882a593Smuzhiyun  * @adapter: private board structure
3755*4882a593Smuzhiyun  **/
e1000_configure(struct e1000_adapter * adapter)3756*4882a593Smuzhiyun static void e1000_configure(struct e1000_adapter *adapter)
3757*4882a593Smuzhiyun {
3758*4882a593Smuzhiyun 	struct e1000_ring *rx_ring = adapter->rx_ring;
3759*4882a593Smuzhiyun 
3760*4882a593Smuzhiyun 	e1000e_set_rx_mode(adapter->netdev);
3761*4882a593Smuzhiyun 
3762*4882a593Smuzhiyun 	e1000_restore_vlan(adapter);
3763*4882a593Smuzhiyun 	e1000_init_manageability_pt(adapter);
3764*4882a593Smuzhiyun 
3765*4882a593Smuzhiyun 	e1000_configure_tx(adapter);
3766*4882a593Smuzhiyun 
3767*4882a593Smuzhiyun 	if (adapter->netdev->features & NETIF_F_RXHASH)
3768*4882a593Smuzhiyun 		e1000e_setup_rss_hash(adapter);
3769*4882a593Smuzhiyun 	e1000_setup_rctl(adapter);
3770*4882a593Smuzhiyun 	e1000_configure_rx(adapter);
3771*4882a593Smuzhiyun 	adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL);
3772*4882a593Smuzhiyun }
3773*4882a593Smuzhiyun 
3774*4882a593Smuzhiyun /**
3775*4882a593Smuzhiyun  * e1000e_power_up_phy - restore link in case the phy was powered down
3776*4882a593Smuzhiyun  * @adapter: address of board private structure
3777*4882a593Smuzhiyun  *
3778*4882a593Smuzhiyun  * The phy may be powered down to save power and turn off link when the
3779*4882a593Smuzhiyun  * driver is unloaded and wake on lan is not enabled (among others)
3780*4882a593Smuzhiyun  * *** this routine MUST be followed by a call to e1000e_reset ***
3781*4882a593Smuzhiyun  **/
e1000e_power_up_phy(struct e1000_adapter * adapter)3782*4882a593Smuzhiyun void e1000e_power_up_phy(struct e1000_adapter *adapter)
3783*4882a593Smuzhiyun {
3784*4882a593Smuzhiyun 	if (adapter->hw.phy.ops.power_up)
3785*4882a593Smuzhiyun 		adapter->hw.phy.ops.power_up(&adapter->hw);
3786*4882a593Smuzhiyun 
3787*4882a593Smuzhiyun 	adapter->hw.mac.ops.setup_link(&adapter->hw);
3788*4882a593Smuzhiyun }
3789*4882a593Smuzhiyun 
3790*4882a593Smuzhiyun /**
3791*4882a593Smuzhiyun  * e1000_power_down_phy - Power down the PHY
3792*4882a593Smuzhiyun  * @adapter: board private structure
3793*4882a593Smuzhiyun  *
3794*4882a593Smuzhiyun  * Power down the PHY so no link is implied when interface is down.
3795*4882a593Smuzhiyun  * The PHY cannot be powered down if management or WoL is active.
3796*4882a593Smuzhiyun  */
e1000_power_down_phy(struct e1000_adapter * adapter)3797*4882a593Smuzhiyun static void e1000_power_down_phy(struct e1000_adapter *adapter)
3798*4882a593Smuzhiyun {
3799*4882a593Smuzhiyun 	if (adapter->hw.phy.ops.power_down)
3800*4882a593Smuzhiyun 		adapter->hw.phy.ops.power_down(&adapter->hw);
3801*4882a593Smuzhiyun }
3802*4882a593Smuzhiyun 
3803*4882a593Smuzhiyun /**
3804*4882a593Smuzhiyun  * e1000_flush_tx_ring - remove all descriptors from the tx_ring
3805*4882a593Smuzhiyun  * @adapter: board private structure
3806*4882a593Smuzhiyun  *
3807*4882a593Smuzhiyun  * We want to clear all pending descriptors from the TX ring.
3808*4882a593Smuzhiyun  * zeroing happens when the HW reads the regs. We  assign the ring itself as
3809*4882a593Smuzhiyun  * the data of the next descriptor. We don't care about the data we are about
3810*4882a593Smuzhiyun  * to reset the HW.
3811*4882a593Smuzhiyun  */
e1000_flush_tx_ring(struct e1000_adapter * adapter)3812*4882a593Smuzhiyun static void e1000_flush_tx_ring(struct e1000_adapter *adapter)
3813*4882a593Smuzhiyun {
3814*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
3815*4882a593Smuzhiyun 	struct e1000_ring *tx_ring = adapter->tx_ring;
3816*4882a593Smuzhiyun 	struct e1000_tx_desc *tx_desc = NULL;
3817*4882a593Smuzhiyun 	u32 tdt, tctl, txd_lower = E1000_TXD_CMD_IFCS;
3818*4882a593Smuzhiyun 	u16 size = 512;
3819*4882a593Smuzhiyun 
3820*4882a593Smuzhiyun 	tctl = er32(TCTL);
3821*4882a593Smuzhiyun 	ew32(TCTL, tctl | E1000_TCTL_EN);
3822*4882a593Smuzhiyun 	tdt = er32(TDT(0));
3823*4882a593Smuzhiyun 	BUG_ON(tdt != tx_ring->next_to_use);
3824*4882a593Smuzhiyun 	tx_desc =  E1000_TX_DESC(*tx_ring, tx_ring->next_to_use);
3825*4882a593Smuzhiyun 	tx_desc->buffer_addr = cpu_to_le64(tx_ring->dma);
3826*4882a593Smuzhiyun 
3827*4882a593Smuzhiyun 	tx_desc->lower.data = cpu_to_le32(txd_lower | size);
3828*4882a593Smuzhiyun 	tx_desc->upper.data = 0;
3829*4882a593Smuzhiyun 	/* flush descriptors to memory before notifying the HW */
3830*4882a593Smuzhiyun 	wmb();
3831*4882a593Smuzhiyun 	tx_ring->next_to_use++;
3832*4882a593Smuzhiyun 	if (tx_ring->next_to_use == tx_ring->count)
3833*4882a593Smuzhiyun 		tx_ring->next_to_use = 0;
3834*4882a593Smuzhiyun 	ew32(TDT(0), tx_ring->next_to_use);
3835*4882a593Smuzhiyun 	usleep_range(200, 250);
3836*4882a593Smuzhiyun }
3837*4882a593Smuzhiyun 
3838*4882a593Smuzhiyun /**
3839*4882a593Smuzhiyun  * e1000_flush_rx_ring - remove all descriptors from the rx_ring
3840*4882a593Smuzhiyun  * @adapter: board private structure
3841*4882a593Smuzhiyun  *
3842*4882a593Smuzhiyun  * Mark all descriptors in the RX ring as consumed and disable the rx ring
3843*4882a593Smuzhiyun  */
e1000_flush_rx_ring(struct e1000_adapter * adapter)3844*4882a593Smuzhiyun static void e1000_flush_rx_ring(struct e1000_adapter *adapter)
3845*4882a593Smuzhiyun {
3846*4882a593Smuzhiyun 	u32 rctl, rxdctl;
3847*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
3848*4882a593Smuzhiyun 
3849*4882a593Smuzhiyun 	rctl = er32(RCTL);
3850*4882a593Smuzhiyun 	ew32(RCTL, rctl & ~E1000_RCTL_EN);
3851*4882a593Smuzhiyun 	e1e_flush();
3852*4882a593Smuzhiyun 	usleep_range(100, 150);
3853*4882a593Smuzhiyun 
3854*4882a593Smuzhiyun 	rxdctl = er32(RXDCTL(0));
3855*4882a593Smuzhiyun 	/* zero the lower 14 bits (prefetch and host thresholds) */
3856*4882a593Smuzhiyun 	rxdctl &= 0xffffc000;
3857*4882a593Smuzhiyun 
3858*4882a593Smuzhiyun 	/* update thresholds: prefetch threshold to 31, host threshold to 1
3859*4882a593Smuzhiyun 	 * and make sure the granularity is "descriptors" and not "cache lines"
3860*4882a593Smuzhiyun 	 */
3861*4882a593Smuzhiyun 	rxdctl |= (0x1F | BIT(8) | E1000_RXDCTL_THRESH_UNIT_DESC);
3862*4882a593Smuzhiyun 
3863*4882a593Smuzhiyun 	ew32(RXDCTL(0), rxdctl);
3864*4882a593Smuzhiyun 	/* momentarily enable the RX ring for the changes to take effect */
3865*4882a593Smuzhiyun 	ew32(RCTL, rctl | E1000_RCTL_EN);
3866*4882a593Smuzhiyun 	e1e_flush();
3867*4882a593Smuzhiyun 	usleep_range(100, 150);
3868*4882a593Smuzhiyun 	ew32(RCTL, rctl & ~E1000_RCTL_EN);
3869*4882a593Smuzhiyun }
3870*4882a593Smuzhiyun 
3871*4882a593Smuzhiyun /**
3872*4882a593Smuzhiyun  * e1000_flush_desc_rings - remove all descriptors from the descriptor rings
3873*4882a593Smuzhiyun  * @adapter: board private structure
3874*4882a593Smuzhiyun  *
3875*4882a593Smuzhiyun  * In i219, the descriptor rings must be emptied before resetting the HW
3876*4882a593Smuzhiyun  * or before changing the device state to D3 during runtime (runtime PM).
3877*4882a593Smuzhiyun  *
3878*4882a593Smuzhiyun  * Failure to do this will cause the HW to enter a unit hang state which can
3879*4882a593Smuzhiyun  * only be released by PCI reset on the device
3880*4882a593Smuzhiyun  *
3881*4882a593Smuzhiyun  */
3882*4882a593Smuzhiyun 
e1000_flush_desc_rings(struct e1000_adapter * adapter)3883*4882a593Smuzhiyun static void e1000_flush_desc_rings(struct e1000_adapter *adapter)
3884*4882a593Smuzhiyun {
3885*4882a593Smuzhiyun 	u16 hang_state;
3886*4882a593Smuzhiyun 	u32 fext_nvm11, tdlen;
3887*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
3888*4882a593Smuzhiyun 
3889*4882a593Smuzhiyun 	/* First, disable MULR fix in FEXTNVM11 */
3890*4882a593Smuzhiyun 	fext_nvm11 = er32(FEXTNVM11);
3891*4882a593Smuzhiyun 	fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
3892*4882a593Smuzhiyun 	ew32(FEXTNVM11, fext_nvm11);
3893*4882a593Smuzhiyun 	/* do nothing if we're not in faulty state, or if the queue is empty */
3894*4882a593Smuzhiyun 	tdlen = er32(TDLEN(0));
3895*4882a593Smuzhiyun 	pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS,
3896*4882a593Smuzhiyun 			     &hang_state);
3897*4882a593Smuzhiyun 	if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen)
3898*4882a593Smuzhiyun 		return;
3899*4882a593Smuzhiyun 	e1000_flush_tx_ring(adapter);
3900*4882a593Smuzhiyun 	/* recheck, maybe the fault is caused by the rx ring */
3901*4882a593Smuzhiyun 	pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS,
3902*4882a593Smuzhiyun 			     &hang_state);
3903*4882a593Smuzhiyun 	if (hang_state & FLUSH_DESC_REQUIRED)
3904*4882a593Smuzhiyun 		e1000_flush_rx_ring(adapter);
3905*4882a593Smuzhiyun }
3906*4882a593Smuzhiyun 
3907*4882a593Smuzhiyun /**
3908*4882a593Smuzhiyun  * e1000e_systim_reset - reset the timesync registers after a hardware reset
3909*4882a593Smuzhiyun  * @adapter: board private structure
3910*4882a593Smuzhiyun  *
3911*4882a593Smuzhiyun  * When the MAC is reset, all hardware bits for timesync will be reset to the
3912*4882a593Smuzhiyun  * default values. This function will restore the settings last in place.
3913*4882a593Smuzhiyun  * Since the clock SYSTIME registers are reset, we will simply restore the
3914*4882a593Smuzhiyun  * cyclecounter to the kernel real clock time.
3915*4882a593Smuzhiyun  **/
e1000e_systim_reset(struct e1000_adapter * adapter)3916*4882a593Smuzhiyun static void e1000e_systim_reset(struct e1000_adapter *adapter)
3917*4882a593Smuzhiyun {
3918*4882a593Smuzhiyun 	struct ptp_clock_info *info = &adapter->ptp_clock_info;
3919*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
3920*4882a593Smuzhiyun 	unsigned long flags;
3921*4882a593Smuzhiyun 	u32 timinca;
3922*4882a593Smuzhiyun 	s32 ret_val;
3923*4882a593Smuzhiyun 
3924*4882a593Smuzhiyun 	if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
3925*4882a593Smuzhiyun 		return;
3926*4882a593Smuzhiyun 
3927*4882a593Smuzhiyun 	if (info->adjfreq) {
3928*4882a593Smuzhiyun 		/* restore the previous ptp frequency delta */
3929*4882a593Smuzhiyun 		ret_val = info->adjfreq(info, adapter->ptp_delta);
3930*4882a593Smuzhiyun 	} else {
3931*4882a593Smuzhiyun 		/* set the default base frequency if no adjustment possible */
3932*4882a593Smuzhiyun 		ret_val = e1000e_get_base_timinca(adapter, &timinca);
3933*4882a593Smuzhiyun 		if (!ret_val)
3934*4882a593Smuzhiyun 			ew32(TIMINCA, timinca);
3935*4882a593Smuzhiyun 	}
3936*4882a593Smuzhiyun 
3937*4882a593Smuzhiyun 	if (ret_val) {
3938*4882a593Smuzhiyun 		dev_warn(&adapter->pdev->dev,
3939*4882a593Smuzhiyun 			 "Failed to restore TIMINCA clock rate delta: %d\n",
3940*4882a593Smuzhiyun 			 ret_val);
3941*4882a593Smuzhiyun 		return;
3942*4882a593Smuzhiyun 	}
3943*4882a593Smuzhiyun 
3944*4882a593Smuzhiyun 	/* reset the systim ns time counter */
3945*4882a593Smuzhiyun 	spin_lock_irqsave(&adapter->systim_lock, flags);
3946*4882a593Smuzhiyun 	timecounter_init(&adapter->tc, &adapter->cc,
3947*4882a593Smuzhiyun 			 ktime_to_ns(ktime_get_real()));
3948*4882a593Smuzhiyun 	spin_unlock_irqrestore(&adapter->systim_lock, flags);
3949*4882a593Smuzhiyun 
3950*4882a593Smuzhiyun 	/* restore the previous hwtstamp configuration settings */
3951*4882a593Smuzhiyun 	e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
3952*4882a593Smuzhiyun }
3953*4882a593Smuzhiyun 
3954*4882a593Smuzhiyun /**
3955*4882a593Smuzhiyun  * e1000e_reset - bring the hardware into a known good state
3956*4882a593Smuzhiyun  * @adapter: board private structure
3957*4882a593Smuzhiyun  *
3958*4882a593Smuzhiyun  * This function boots the hardware and enables some settings that
3959*4882a593Smuzhiyun  * require a configuration cycle of the hardware - those cannot be
3960*4882a593Smuzhiyun  * set/changed during runtime. After reset the device needs to be
3961*4882a593Smuzhiyun  * properly configured for Rx, Tx etc.
3962*4882a593Smuzhiyun  */
e1000e_reset(struct e1000_adapter * adapter)3963*4882a593Smuzhiyun void e1000e_reset(struct e1000_adapter *adapter)
3964*4882a593Smuzhiyun {
3965*4882a593Smuzhiyun 	struct e1000_mac_info *mac = &adapter->hw.mac;
3966*4882a593Smuzhiyun 	struct e1000_fc_info *fc = &adapter->hw.fc;
3967*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
3968*4882a593Smuzhiyun 	u32 tx_space, min_tx_space, min_rx_space;
3969*4882a593Smuzhiyun 	u32 pba = adapter->pba;
3970*4882a593Smuzhiyun 	u16 hwm;
3971*4882a593Smuzhiyun 
3972*4882a593Smuzhiyun 	/* reset Packet Buffer Allocation to default */
3973*4882a593Smuzhiyun 	ew32(PBA, pba);
3974*4882a593Smuzhiyun 
3975*4882a593Smuzhiyun 	if (adapter->max_frame_size > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) {
3976*4882a593Smuzhiyun 		/* To maintain wire speed transmits, the Tx FIFO should be
3977*4882a593Smuzhiyun 		 * large enough to accommodate two full transmit packets,
3978*4882a593Smuzhiyun 		 * rounded up to the next 1KB and expressed in KB.  Likewise,
3979*4882a593Smuzhiyun 		 * the Rx FIFO should be large enough to accommodate at least
3980*4882a593Smuzhiyun 		 * one full receive packet and is similarly rounded up and
3981*4882a593Smuzhiyun 		 * expressed in KB.
3982*4882a593Smuzhiyun 		 */
3983*4882a593Smuzhiyun 		pba = er32(PBA);
3984*4882a593Smuzhiyun 		/* upper 16 bits has Tx packet buffer allocation size in KB */
3985*4882a593Smuzhiyun 		tx_space = pba >> 16;
3986*4882a593Smuzhiyun 		/* lower 16 bits has Rx packet buffer allocation size in KB */
3987*4882a593Smuzhiyun 		pba &= 0xffff;
3988*4882a593Smuzhiyun 		/* the Tx fifo also stores 16 bytes of information about the Tx
3989*4882a593Smuzhiyun 		 * but don't include ethernet FCS because hardware appends it
3990*4882a593Smuzhiyun 		 */
3991*4882a593Smuzhiyun 		min_tx_space = (adapter->max_frame_size +
3992*4882a593Smuzhiyun 				sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2;
3993*4882a593Smuzhiyun 		min_tx_space = ALIGN(min_tx_space, 1024);
3994*4882a593Smuzhiyun 		min_tx_space >>= 10;
3995*4882a593Smuzhiyun 		/* software strips receive CRC, so leave room for it */
3996*4882a593Smuzhiyun 		min_rx_space = adapter->max_frame_size;
3997*4882a593Smuzhiyun 		min_rx_space = ALIGN(min_rx_space, 1024);
3998*4882a593Smuzhiyun 		min_rx_space >>= 10;
3999*4882a593Smuzhiyun 
4000*4882a593Smuzhiyun 		/* If current Tx allocation is less than the min Tx FIFO size,
4001*4882a593Smuzhiyun 		 * and the min Tx FIFO size is less than the current Rx FIFO
4002*4882a593Smuzhiyun 		 * allocation, take space away from current Rx allocation
4003*4882a593Smuzhiyun 		 */
4004*4882a593Smuzhiyun 		if ((tx_space < min_tx_space) &&
4005*4882a593Smuzhiyun 		    ((min_tx_space - tx_space) < pba)) {
4006*4882a593Smuzhiyun 			pba -= min_tx_space - tx_space;
4007*4882a593Smuzhiyun 
4008*4882a593Smuzhiyun 			/* if short on Rx space, Rx wins and must trump Tx
4009*4882a593Smuzhiyun 			 * adjustment
4010*4882a593Smuzhiyun 			 */
4011*4882a593Smuzhiyun 			if (pba < min_rx_space)
4012*4882a593Smuzhiyun 				pba = min_rx_space;
4013*4882a593Smuzhiyun 		}
4014*4882a593Smuzhiyun 
4015*4882a593Smuzhiyun 		ew32(PBA, pba);
4016*4882a593Smuzhiyun 	}
4017*4882a593Smuzhiyun 
4018*4882a593Smuzhiyun 	/* flow control settings
4019*4882a593Smuzhiyun 	 *
4020*4882a593Smuzhiyun 	 * The high water mark must be low enough to fit one full frame
4021*4882a593Smuzhiyun 	 * (or the size used for early receive) above it in the Rx FIFO.
4022*4882a593Smuzhiyun 	 * Set it to the lower of:
4023*4882a593Smuzhiyun 	 * - 90% of the Rx FIFO size, and
4024*4882a593Smuzhiyun 	 * - the full Rx FIFO size minus one full frame
4025*4882a593Smuzhiyun 	 */
4026*4882a593Smuzhiyun 	if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
4027*4882a593Smuzhiyun 		fc->pause_time = 0xFFFF;
4028*4882a593Smuzhiyun 	else
4029*4882a593Smuzhiyun 		fc->pause_time = E1000_FC_PAUSE_TIME;
4030*4882a593Smuzhiyun 	fc->send_xon = true;
4031*4882a593Smuzhiyun 	fc->current_mode = fc->requested_mode;
4032*4882a593Smuzhiyun 
4033*4882a593Smuzhiyun 	switch (hw->mac.type) {
4034*4882a593Smuzhiyun 	case e1000_ich9lan:
4035*4882a593Smuzhiyun 	case e1000_ich10lan:
4036*4882a593Smuzhiyun 		if (adapter->netdev->mtu > ETH_DATA_LEN) {
4037*4882a593Smuzhiyun 			pba = 14;
4038*4882a593Smuzhiyun 			ew32(PBA, pba);
4039*4882a593Smuzhiyun 			fc->high_water = 0x2800;
4040*4882a593Smuzhiyun 			fc->low_water = fc->high_water - 8;
4041*4882a593Smuzhiyun 			break;
4042*4882a593Smuzhiyun 		}
4043*4882a593Smuzhiyun 		fallthrough;
4044*4882a593Smuzhiyun 	default:
4045*4882a593Smuzhiyun 		hwm = min(((pba << 10) * 9 / 10),
4046*4882a593Smuzhiyun 			  ((pba << 10) - adapter->max_frame_size));
4047*4882a593Smuzhiyun 
4048*4882a593Smuzhiyun 		fc->high_water = hwm & E1000_FCRTH_RTH;	/* 8-byte granularity */
4049*4882a593Smuzhiyun 		fc->low_water = fc->high_water - 8;
4050*4882a593Smuzhiyun 		break;
4051*4882a593Smuzhiyun 	case e1000_pchlan:
4052*4882a593Smuzhiyun 		/* Workaround PCH LOM adapter hangs with certain network
4053*4882a593Smuzhiyun 		 * loads.  If hangs persist, try disabling Tx flow control.
4054*4882a593Smuzhiyun 		 */
4055*4882a593Smuzhiyun 		if (adapter->netdev->mtu > ETH_DATA_LEN) {
4056*4882a593Smuzhiyun 			fc->high_water = 0x3500;
4057*4882a593Smuzhiyun 			fc->low_water = 0x1500;
4058*4882a593Smuzhiyun 		} else {
4059*4882a593Smuzhiyun 			fc->high_water = 0x5000;
4060*4882a593Smuzhiyun 			fc->low_water = 0x3000;
4061*4882a593Smuzhiyun 		}
4062*4882a593Smuzhiyun 		fc->refresh_time = 0x1000;
4063*4882a593Smuzhiyun 		break;
4064*4882a593Smuzhiyun 	case e1000_pch2lan:
4065*4882a593Smuzhiyun 	case e1000_pch_lpt:
4066*4882a593Smuzhiyun 	case e1000_pch_spt:
4067*4882a593Smuzhiyun 	case e1000_pch_cnp:
4068*4882a593Smuzhiyun 	case e1000_pch_tgp:
4069*4882a593Smuzhiyun 	case e1000_pch_adp:
4070*4882a593Smuzhiyun 	case e1000_pch_mtp:
4071*4882a593Smuzhiyun 		fc->refresh_time = 0xFFFF;
4072*4882a593Smuzhiyun 		fc->pause_time = 0xFFFF;
4073*4882a593Smuzhiyun 
4074*4882a593Smuzhiyun 		if (adapter->netdev->mtu <= ETH_DATA_LEN) {
4075*4882a593Smuzhiyun 			fc->high_water = 0x05C20;
4076*4882a593Smuzhiyun 			fc->low_water = 0x05048;
4077*4882a593Smuzhiyun 			break;
4078*4882a593Smuzhiyun 		}
4079*4882a593Smuzhiyun 
4080*4882a593Smuzhiyun 		pba = 14;
4081*4882a593Smuzhiyun 		ew32(PBA, pba);
4082*4882a593Smuzhiyun 		fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH;
4083*4882a593Smuzhiyun 		fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL;
4084*4882a593Smuzhiyun 		break;
4085*4882a593Smuzhiyun 	}
4086*4882a593Smuzhiyun 
4087*4882a593Smuzhiyun 	/* Alignment of Tx data is on an arbitrary byte boundary with the
4088*4882a593Smuzhiyun 	 * maximum size per Tx descriptor limited only to the transmit
4089*4882a593Smuzhiyun 	 * allocation of the packet buffer minus 96 bytes with an upper
4090*4882a593Smuzhiyun 	 * limit of 24KB due to receive synchronization limitations.
4091*4882a593Smuzhiyun 	 */
4092*4882a593Smuzhiyun 	adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
4093*4882a593Smuzhiyun 				       24 << 10);
4094*4882a593Smuzhiyun 
4095*4882a593Smuzhiyun 	/* Disable Adaptive Interrupt Moderation if 2 full packets cannot
4096*4882a593Smuzhiyun 	 * fit in receive buffer.
4097*4882a593Smuzhiyun 	 */
4098*4882a593Smuzhiyun 	if (adapter->itr_setting & 0x3) {
4099*4882a593Smuzhiyun 		if ((adapter->max_frame_size * 2) > (pba << 10)) {
4100*4882a593Smuzhiyun 			if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
4101*4882a593Smuzhiyun 				dev_info(&adapter->pdev->dev,
4102*4882a593Smuzhiyun 					 "Interrupt Throttle Rate off\n");
4103*4882a593Smuzhiyun 				adapter->flags2 |= FLAG2_DISABLE_AIM;
4104*4882a593Smuzhiyun 				e1000e_write_itr(adapter, 0);
4105*4882a593Smuzhiyun 			}
4106*4882a593Smuzhiyun 		} else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
4107*4882a593Smuzhiyun 			dev_info(&adapter->pdev->dev,
4108*4882a593Smuzhiyun 				 "Interrupt Throttle Rate on\n");
4109*4882a593Smuzhiyun 			adapter->flags2 &= ~FLAG2_DISABLE_AIM;
4110*4882a593Smuzhiyun 			adapter->itr = 20000;
4111*4882a593Smuzhiyun 			e1000e_write_itr(adapter, adapter->itr);
4112*4882a593Smuzhiyun 		}
4113*4882a593Smuzhiyun 	}
4114*4882a593Smuzhiyun 
4115*4882a593Smuzhiyun 	if (hw->mac.type >= e1000_pch_spt)
4116*4882a593Smuzhiyun 		e1000_flush_desc_rings(adapter);
4117*4882a593Smuzhiyun 	/* Allow time for pending master requests to run */
4118*4882a593Smuzhiyun 	mac->ops.reset_hw(hw);
4119*4882a593Smuzhiyun 
4120*4882a593Smuzhiyun 	/* For parts with AMT enabled, let the firmware know
4121*4882a593Smuzhiyun 	 * that the network interface is in control
4122*4882a593Smuzhiyun 	 */
4123*4882a593Smuzhiyun 	if (adapter->flags & FLAG_HAS_AMT)
4124*4882a593Smuzhiyun 		e1000e_get_hw_control(adapter);
4125*4882a593Smuzhiyun 
4126*4882a593Smuzhiyun 	ew32(WUC, 0);
4127*4882a593Smuzhiyun 
4128*4882a593Smuzhiyun 	if (mac->ops.init_hw(hw))
4129*4882a593Smuzhiyun 		e_err("Hardware Error\n");
4130*4882a593Smuzhiyun 
4131*4882a593Smuzhiyun 	e1000_update_mng_vlan(adapter);
4132*4882a593Smuzhiyun 
4133*4882a593Smuzhiyun 	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
4134*4882a593Smuzhiyun 	ew32(VET, ETH_P_8021Q);
4135*4882a593Smuzhiyun 
4136*4882a593Smuzhiyun 	e1000e_reset_adaptive(hw);
4137*4882a593Smuzhiyun 
4138*4882a593Smuzhiyun 	/* restore systim and hwtstamp settings */
4139*4882a593Smuzhiyun 	e1000e_systim_reset(adapter);
4140*4882a593Smuzhiyun 
4141*4882a593Smuzhiyun 	/* Set EEE advertisement as appropriate */
4142*4882a593Smuzhiyun 	if (adapter->flags2 & FLAG2_HAS_EEE) {
4143*4882a593Smuzhiyun 		s32 ret_val;
4144*4882a593Smuzhiyun 		u16 adv_addr;
4145*4882a593Smuzhiyun 
4146*4882a593Smuzhiyun 		switch (hw->phy.type) {
4147*4882a593Smuzhiyun 		case e1000_phy_82579:
4148*4882a593Smuzhiyun 			adv_addr = I82579_EEE_ADVERTISEMENT;
4149*4882a593Smuzhiyun 			break;
4150*4882a593Smuzhiyun 		case e1000_phy_i217:
4151*4882a593Smuzhiyun 			adv_addr = I217_EEE_ADVERTISEMENT;
4152*4882a593Smuzhiyun 			break;
4153*4882a593Smuzhiyun 		default:
4154*4882a593Smuzhiyun 			dev_err(&adapter->pdev->dev,
4155*4882a593Smuzhiyun 				"Invalid PHY type setting EEE advertisement\n");
4156*4882a593Smuzhiyun 			return;
4157*4882a593Smuzhiyun 		}
4158*4882a593Smuzhiyun 
4159*4882a593Smuzhiyun 		ret_val = hw->phy.ops.acquire(hw);
4160*4882a593Smuzhiyun 		if (ret_val) {
4161*4882a593Smuzhiyun 			dev_err(&adapter->pdev->dev,
4162*4882a593Smuzhiyun 				"EEE advertisement - unable to acquire PHY\n");
4163*4882a593Smuzhiyun 			return;
4164*4882a593Smuzhiyun 		}
4165*4882a593Smuzhiyun 
4166*4882a593Smuzhiyun 		e1000_write_emi_reg_locked(hw, adv_addr,
4167*4882a593Smuzhiyun 					   hw->dev_spec.ich8lan.eee_disable ?
4168*4882a593Smuzhiyun 					   0 : adapter->eee_advert);
4169*4882a593Smuzhiyun 
4170*4882a593Smuzhiyun 		hw->phy.ops.release(hw);
4171*4882a593Smuzhiyun 	}
4172*4882a593Smuzhiyun 
4173*4882a593Smuzhiyun 	if (!netif_running(adapter->netdev) &&
4174*4882a593Smuzhiyun 	    !test_bit(__E1000_TESTING, &adapter->state))
4175*4882a593Smuzhiyun 		e1000_power_down_phy(adapter);
4176*4882a593Smuzhiyun 
4177*4882a593Smuzhiyun 	e1000_get_phy_info(hw);
4178*4882a593Smuzhiyun 
4179*4882a593Smuzhiyun 	if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
4180*4882a593Smuzhiyun 	    !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
4181*4882a593Smuzhiyun 		u16 phy_data = 0;
4182*4882a593Smuzhiyun 		/* speed up time to link by disabling smart power down, ignore
4183*4882a593Smuzhiyun 		 * the return value of this function because there is nothing
4184*4882a593Smuzhiyun 		 * different we would do if it failed
4185*4882a593Smuzhiyun 		 */
4186*4882a593Smuzhiyun 		e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
4187*4882a593Smuzhiyun 		phy_data &= ~IGP02E1000_PM_SPD;
4188*4882a593Smuzhiyun 		e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
4189*4882a593Smuzhiyun 	}
4190*4882a593Smuzhiyun 	if (hw->mac.type >= e1000_pch_spt && adapter->int_mode == 0) {
4191*4882a593Smuzhiyun 		u32 reg;
4192*4882a593Smuzhiyun 
4193*4882a593Smuzhiyun 		/* Fextnvm7 @ 0xe4[2] = 1 */
4194*4882a593Smuzhiyun 		reg = er32(FEXTNVM7);
4195*4882a593Smuzhiyun 		reg |= E1000_FEXTNVM7_SIDE_CLK_UNGATE;
4196*4882a593Smuzhiyun 		ew32(FEXTNVM7, reg);
4197*4882a593Smuzhiyun 		/* Fextnvm9 @ 0x5bb4[13:12] = 11 */
4198*4882a593Smuzhiyun 		reg = er32(FEXTNVM9);
4199*4882a593Smuzhiyun 		reg |= E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS |
4200*4882a593Smuzhiyun 		       E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS;
4201*4882a593Smuzhiyun 		ew32(FEXTNVM9, reg);
4202*4882a593Smuzhiyun 	}
4203*4882a593Smuzhiyun 
4204*4882a593Smuzhiyun }
4205*4882a593Smuzhiyun 
4206*4882a593Smuzhiyun /**
4207*4882a593Smuzhiyun  * e1000e_trigger_lsc - trigger an LSC interrupt
4208*4882a593Smuzhiyun  * @adapter:
4209*4882a593Smuzhiyun  *
4210*4882a593Smuzhiyun  * Fire a link status change interrupt to start the watchdog.
4211*4882a593Smuzhiyun  **/
e1000e_trigger_lsc(struct e1000_adapter * adapter)4212*4882a593Smuzhiyun static void e1000e_trigger_lsc(struct e1000_adapter *adapter)
4213*4882a593Smuzhiyun {
4214*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
4215*4882a593Smuzhiyun 
4216*4882a593Smuzhiyun 	if (adapter->msix_entries)
4217*4882a593Smuzhiyun 		ew32(ICS, E1000_ICS_LSC | E1000_ICS_OTHER);
4218*4882a593Smuzhiyun 	else
4219*4882a593Smuzhiyun 		ew32(ICS, E1000_ICS_LSC);
4220*4882a593Smuzhiyun }
4221*4882a593Smuzhiyun 
e1000e_up(struct e1000_adapter * adapter)4222*4882a593Smuzhiyun void e1000e_up(struct e1000_adapter *adapter)
4223*4882a593Smuzhiyun {
4224*4882a593Smuzhiyun 	/* hardware has been reset, we need to reload some things */
4225*4882a593Smuzhiyun 	e1000_configure(adapter);
4226*4882a593Smuzhiyun 
4227*4882a593Smuzhiyun 	clear_bit(__E1000_DOWN, &adapter->state);
4228*4882a593Smuzhiyun 
4229*4882a593Smuzhiyun 	if (adapter->msix_entries)
4230*4882a593Smuzhiyun 		e1000_configure_msix(adapter);
4231*4882a593Smuzhiyun 	e1000_irq_enable(adapter);
4232*4882a593Smuzhiyun 
4233*4882a593Smuzhiyun 	/* Tx queue started by watchdog timer when link is up */
4234*4882a593Smuzhiyun 
4235*4882a593Smuzhiyun 	e1000e_trigger_lsc(adapter);
4236*4882a593Smuzhiyun }
4237*4882a593Smuzhiyun 
e1000e_flush_descriptors(struct e1000_adapter * adapter)4238*4882a593Smuzhiyun static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
4239*4882a593Smuzhiyun {
4240*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
4241*4882a593Smuzhiyun 
4242*4882a593Smuzhiyun 	if (!(adapter->flags2 & FLAG2_DMA_BURST))
4243*4882a593Smuzhiyun 		return;
4244*4882a593Smuzhiyun 
4245*4882a593Smuzhiyun 	/* flush pending descriptor writebacks to memory */
4246*4882a593Smuzhiyun 	ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
4247*4882a593Smuzhiyun 	ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
4248*4882a593Smuzhiyun 
4249*4882a593Smuzhiyun 	/* execute the writes immediately */
4250*4882a593Smuzhiyun 	e1e_flush();
4251*4882a593Smuzhiyun 
4252*4882a593Smuzhiyun 	/* due to rare timing issues, write to TIDV/RDTR again to ensure the
4253*4882a593Smuzhiyun 	 * write is successful
4254*4882a593Smuzhiyun 	 */
4255*4882a593Smuzhiyun 	ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
4256*4882a593Smuzhiyun 	ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
4257*4882a593Smuzhiyun 
4258*4882a593Smuzhiyun 	/* execute the writes immediately */
4259*4882a593Smuzhiyun 	e1e_flush();
4260*4882a593Smuzhiyun }
4261*4882a593Smuzhiyun 
4262*4882a593Smuzhiyun static void e1000e_update_stats(struct e1000_adapter *adapter);
4263*4882a593Smuzhiyun 
4264*4882a593Smuzhiyun /**
4265*4882a593Smuzhiyun  * e1000e_down - quiesce the device and optionally reset the hardware
4266*4882a593Smuzhiyun  * @adapter: board private structure
4267*4882a593Smuzhiyun  * @reset: boolean flag to reset the hardware or not
4268*4882a593Smuzhiyun  */
e1000e_down(struct e1000_adapter * adapter,bool reset)4269*4882a593Smuzhiyun void e1000e_down(struct e1000_adapter *adapter, bool reset)
4270*4882a593Smuzhiyun {
4271*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
4272*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
4273*4882a593Smuzhiyun 	u32 tctl, rctl;
4274*4882a593Smuzhiyun 
4275*4882a593Smuzhiyun 	/* signal that we're down so the interrupt handler does not
4276*4882a593Smuzhiyun 	 * reschedule our watchdog timer
4277*4882a593Smuzhiyun 	 */
4278*4882a593Smuzhiyun 	set_bit(__E1000_DOWN, &adapter->state);
4279*4882a593Smuzhiyun 
4280*4882a593Smuzhiyun 	netif_carrier_off(netdev);
4281*4882a593Smuzhiyun 
4282*4882a593Smuzhiyun 	/* disable receives in the hardware */
4283*4882a593Smuzhiyun 	rctl = er32(RCTL);
4284*4882a593Smuzhiyun 	if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
4285*4882a593Smuzhiyun 		ew32(RCTL, rctl & ~E1000_RCTL_EN);
4286*4882a593Smuzhiyun 	/* flush and sleep below */
4287*4882a593Smuzhiyun 
4288*4882a593Smuzhiyun 	netif_stop_queue(netdev);
4289*4882a593Smuzhiyun 
4290*4882a593Smuzhiyun 	/* disable transmits in the hardware */
4291*4882a593Smuzhiyun 	tctl = er32(TCTL);
4292*4882a593Smuzhiyun 	tctl &= ~E1000_TCTL_EN;
4293*4882a593Smuzhiyun 	ew32(TCTL, tctl);
4294*4882a593Smuzhiyun 
4295*4882a593Smuzhiyun 	/* flush both disables and wait for them to finish */
4296*4882a593Smuzhiyun 	e1e_flush();
4297*4882a593Smuzhiyun 	usleep_range(10000, 11000);
4298*4882a593Smuzhiyun 
4299*4882a593Smuzhiyun 	e1000_irq_disable(adapter);
4300*4882a593Smuzhiyun 
4301*4882a593Smuzhiyun 	napi_synchronize(&adapter->napi);
4302*4882a593Smuzhiyun 
4303*4882a593Smuzhiyun 	del_timer_sync(&adapter->watchdog_timer);
4304*4882a593Smuzhiyun 	del_timer_sync(&adapter->phy_info_timer);
4305*4882a593Smuzhiyun 
4306*4882a593Smuzhiyun 	spin_lock(&adapter->stats64_lock);
4307*4882a593Smuzhiyun 	e1000e_update_stats(adapter);
4308*4882a593Smuzhiyun 	spin_unlock(&adapter->stats64_lock);
4309*4882a593Smuzhiyun 
4310*4882a593Smuzhiyun 	e1000e_flush_descriptors(adapter);
4311*4882a593Smuzhiyun 
4312*4882a593Smuzhiyun 	adapter->link_speed = 0;
4313*4882a593Smuzhiyun 	adapter->link_duplex = 0;
4314*4882a593Smuzhiyun 
4315*4882a593Smuzhiyun 	/* Disable Si errata workaround on PCHx for jumbo frame flow */
4316*4882a593Smuzhiyun 	if ((hw->mac.type >= e1000_pch2lan) &&
4317*4882a593Smuzhiyun 	    (adapter->netdev->mtu > ETH_DATA_LEN) &&
4318*4882a593Smuzhiyun 	    e1000_lv_jumbo_workaround_ich8lan(hw, false))
4319*4882a593Smuzhiyun 		e_dbg("failed to disable jumbo frame workaround mode\n");
4320*4882a593Smuzhiyun 
4321*4882a593Smuzhiyun 	if (!pci_channel_offline(adapter->pdev)) {
4322*4882a593Smuzhiyun 		if (reset)
4323*4882a593Smuzhiyun 			e1000e_reset(adapter);
4324*4882a593Smuzhiyun 		else if (hw->mac.type >= e1000_pch_spt)
4325*4882a593Smuzhiyun 			e1000_flush_desc_rings(adapter);
4326*4882a593Smuzhiyun 	}
4327*4882a593Smuzhiyun 	e1000_clean_tx_ring(adapter->tx_ring);
4328*4882a593Smuzhiyun 	e1000_clean_rx_ring(adapter->rx_ring);
4329*4882a593Smuzhiyun }
4330*4882a593Smuzhiyun 
e1000e_reinit_locked(struct e1000_adapter * adapter)4331*4882a593Smuzhiyun void e1000e_reinit_locked(struct e1000_adapter *adapter)
4332*4882a593Smuzhiyun {
4333*4882a593Smuzhiyun 	might_sleep();
4334*4882a593Smuzhiyun 	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
4335*4882a593Smuzhiyun 		usleep_range(1000, 1100);
4336*4882a593Smuzhiyun 	e1000e_down(adapter, true);
4337*4882a593Smuzhiyun 	e1000e_up(adapter);
4338*4882a593Smuzhiyun 	clear_bit(__E1000_RESETTING, &adapter->state);
4339*4882a593Smuzhiyun }
4340*4882a593Smuzhiyun 
4341*4882a593Smuzhiyun /**
4342*4882a593Smuzhiyun  * e1000e_sanitize_systim - sanitize raw cycle counter reads
4343*4882a593Smuzhiyun  * @hw: pointer to the HW structure
4344*4882a593Smuzhiyun  * @systim: PHC time value read, sanitized and returned
4345*4882a593Smuzhiyun  * @sts: structure to hold system time before and after reading SYSTIML,
4346*4882a593Smuzhiyun  * may be NULL
4347*4882a593Smuzhiyun  *
4348*4882a593Smuzhiyun  * Errata for 82574/82583 possible bad bits read from SYSTIMH/L:
4349*4882a593Smuzhiyun  * check to see that the time is incrementing at a reasonable
4350*4882a593Smuzhiyun  * rate and is a multiple of incvalue.
4351*4882a593Smuzhiyun  **/
e1000e_sanitize_systim(struct e1000_hw * hw,u64 systim,struct ptp_system_timestamp * sts)4352*4882a593Smuzhiyun static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim,
4353*4882a593Smuzhiyun 				  struct ptp_system_timestamp *sts)
4354*4882a593Smuzhiyun {
4355*4882a593Smuzhiyun 	u64 time_delta, rem, temp;
4356*4882a593Smuzhiyun 	u64 systim_next;
4357*4882a593Smuzhiyun 	u32 incvalue;
4358*4882a593Smuzhiyun 	int i;
4359*4882a593Smuzhiyun 
4360*4882a593Smuzhiyun 	incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
4361*4882a593Smuzhiyun 	for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
4362*4882a593Smuzhiyun 		/* latch SYSTIMH on read of SYSTIML */
4363*4882a593Smuzhiyun 		ptp_read_system_prets(sts);
4364*4882a593Smuzhiyun 		systim_next = (u64)er32(SYSTIML);
4365*4882a593Smuzhiyun 		ptp_read_system_postts(sts);
4366*4882a593Smuzhiyun 		systim_next |= (u64)er32(SYSTIMH) << 32;
4367*4882a593Smuzhiyun 
4368*4882a593Smuzhiyun 		time_delta = systim_next - systim;
4369*4882a593Smuzhiyun 		temp = time_delta;
4370*4882a593Smuzhiyun 		/* VMWare users have seen incvalue of zero, don't div / 0 */
4371*4882a593Smuzhiyun 		rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0);
4372*4882a593Smuzhiyun 
4373*4882a593Smuzhiyun 		systim = systim_next;
4374*4882a593Smuzhiyun 
4375*4882a593Smuzhiyun 		if ((time_delta < E1000_82574_SYSTIM_EPSILON) && (rem == 0))
4376*4882a593Smuzhiyun 			break;
4377*4882a593Smuzhiyun 	}
4378*4882a593Smuzhiyun 
4379*4882a593Smuzhiyun 	return systim;
4380*4882a593Smuzhiyun }
4381*4882a593Smuzhiyun 
4382*4882a593Smuzhiyun /**
4383*4882a593Smuzhiyun  * e1000e_read_systim - read SYSTIM register
4384*4882a593Smuzhiyun  * @adapter: board private structure
4385*4882a593Smuzhiyun  * @sts: structure which will contain system time before and after reading
4386*4882a593Smuzhiyun  * SYSTIML, may be NULL
4387*4882a593Smuzhiyun  **/
e1000e_read_systim(struct e1000_adapter * adapter,struct ptp_system_timestamp * sts)4388*4882a593Smuzhiyun u64 e1000e_read_systim(struct e1000_adapter *adapter,
4389*4882a593Smuzhiyun 		       struct ptp_system_timestamp *sts)
4390*4882a593Smuzhiyun {
4391*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
4392*4882a593Smuzhiyun 	u32 systimel, systimel_2, systimeh;
4393*4882a593Smuzhiyun 	u64 systim;
4394*4882a593Smuzhiyun 	/* SYSTIMH latching upon SYSTIML read does not work well.
4395*4882a593Smuzhiyun 	 * This means that if SYSTIML overflows after we read it but before
4396*4882a593Smuzhiyun 	 * we read SYSTIMH, the value of SYSTIMH has been incremented and we
4397*4882a593Smuzhiyun 	 * will experience a huge non linear increment in the systime value
4398*4882a593Smuzhiyun 	 * to fix that we test for overflow and if true, we re-read systime.
4399*4882a593Smuzhiyun 	 */
4400*4882a593Smuzhiyun 	ptp_read_system_prets(sts);
4401*4882a593Smuzhiyun 	systimel = er32(SYSTIML);
4402*4882a593Smuzhiyun 	ptp_read_system_postts(sts);
4403*4882a593Smuzhiyun 	systimeh = er32(SYSTIMH);
4404*4882a593Smuzhiyun 	/* Is systimel is so large that overflow is possible? */
4405*4882a593Smuzhiyun 	if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) {
4406*4882a593Smuzhiyun 		ptp_read_system_prets(sts);
4407*4882a593Smuzhiyun 		systimel_2 = er32(SYSTIML);
4408*4882a593Smuzhiyun 		ptp_read_system_postts(sts);
4409*4882a593Smuzhiyun 		if (systimel > systimel_2) {
4410*4882a593Smuzhiyun 			/* There was an overflow, read again SYSTIMH, and use
4411*4882a593Smuzhiyun 			 * systimel_2
4412*4882a593Smuzhiyun 			 */
4413*4882a593Smuzhiyun 			systimeh = er32(SYSTIMH);
4414*4882a593Smuzhiyun 			systimel = systimel_2;
4415*4882a593Smuzhiyun 		}
4416*4882a593Smuzhiyun 	}
4417*4882a593Smuzhiyun 	systim = (u64)systimel;
4418*4882a593Smuzhiyun 	systim |= (u64)systimeh << 32;
4419*4882a593Smuzhiyun 
4420*4882a593Smuzhiyun 	if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW)
4421*4882a593Smuzhiyun 		systim = e1000e_sanitize_systim(hw, systim, sts);
4422*4882a593Smuzhiyun 
4423*4882a593Smuzhiyun 	return systim;
4424*4882a593Smuzhiyun }
4425*4882a593Smuzhiyun 
4426*4882a593Smuzhiyun /**
4427*4882a593Smuzhiyun  * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
4428*4882a593Smuzhiyun  * @cc: cyclecounter structure
4429*4882a593Smuzhiyun  **/
e1000e_cyclecounter_read(const struct cyclecounter * cc)4430*4882a593Smuzhiyun static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
4431*4882a593Smuzhiyun {
4432*4882a593Smuzhiyun 	struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
4433*4882a593Smuzhiyun 						     cc);
4434*4882a593Smuzhiyun 
4435*4882a593Smuzhiyun 	return e1000e_read_systim(adapter, NULL);
4436*4882a593Smuzhiyun }
4437*4882a593Smuzhiyun 
4438*4882a593Smuzhiyun /**
4439*4882a593Smuzhiyun  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
4440*4882a593Smuzhiyun  * @adapter: board private structure to initialize
4441*4882a593Smuzhiyun  *
4442*4882a593Smuzhiyun  * e1000_sw_init initializes the Adapter private data structure.
4443*4882a593Smuzhiyun  * Fields are initialized based on PCI device information and
4444*4882a593Smuzhiyun  * OS network device settings (MTU size).
4445*4882a593Smuzhiyun  **/
e1000_sw_init(struct e1000_adapter * adapter)4446*4882a593Smuzhiyun static int e1000_sw_init(struct e1000_adapter *adapter)
4447*4882a593Smuzhiyun {
4448*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
4449*4882a593Smuzhiyun 
4450*4882a593Smuzhiyun 	adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
4451*4882a593Smuzhiyun 	adapter->rx_ps_bsize0 = 128;
4452*4882a593Smuzhiyun 	adapter->max_frame_size = netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
4453*4882a593Smuzhiyun 	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
4454*4882a593Smuzhiyun 	adapter->tx_ring_count = E1000_DEFAULT_TXD;
4455*4882a593Smuzhiyun 	adapter->rx_ring_count = E1000_DEFAULT_RXD;
4456*4882a593Smuzhiyun 
4457*4882a593Smuzhiyun 	spin_lock_init(&adapter->stats64_lock);
4458*4882a593Smuzhiyun 
4459*4882a593Smuzhiyun 	e1000e_set_interrupt_capability(adapter);
4460*4882a593Smuzhiyun 
4461*4882a593Smuzhiyun 	if (e1000_alloc_queues(adapter))
4462*4882a593Smuzhiyun 		return -ENOMEM;
4463*4882a593Smuzhiyun 
4464*4882a593Smuzhiyun 	/* Setup hardware time stamping cyclecounter */
4465*4882a593Smuzhiyun 	if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
4466*4882a593Smuzhiyun 		adapter->cc.read = e1000e_cyclecounter_read;
4467*4882a593Smuzhiyun 		adapter->cc.mask = CYCLECOUNTER_MASK(64);
4468*4882a593Smuzhiyun 		adapter->cc.mult = 1;
4469*4882a593Smuzhiyun 		/* cc.shift set in e1000e_get_base_tininca() */
4470*4882a593Smuzhiyun 
4471*4882a593Smuzhiyun 		spin_lock_init(&adapter->systim_lock);
4472*4882a593Smuzhiyun 		INIT_WORK(&adapter->tx_hwtstamp_work, e1000e_tx_hwtstamp_work);
4473*4882a593Smuzhiyun 	}
4474*4882a593Smuzhiyun 
4475*4882a593Smuzhiyun 	/* Explicitly disable IRQ since the NIC can be in any state. */
4476*4882a593Smuzhiyun 	e1000_irq_disable(adapter);
4477*4882a593Smuzhiyun 
4478*4882a593Smuzhiyun 	set_bit(__E1000_DOWN, &adapter->state);
4479*4882a593Smuzhiyun 	return 0;
4480*4882a593Smuzhiyun }
4481*4882a593Smuzhiyun 
4482*4882a593Smuzhiyun /**
4483*4882a593Smuzhiyun  * e1000_intr_msi_test - Interrupt Handler
4484*4882a593Smuzhiyun  * @irq: interrupt number
4485*4882a593Smuzhiyun  * @data: pointer to a network interface device structure
4486*4882a593Smuzhiyun  **/
e1000_intr_msi_test(int __always_unused irq,void * data)4487*4882a593Smuzhiyun static irqreturn_t e1000_intr_msi_test(int __always_unused irq, void *data)
4488*4882a593Smuzhiyun {
4489*4882a593Smuzhiyun 	struct net_device *netdev = data;
4490*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
4491*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
4492*4882a593Smuzhiyun 	u32 icr = er32(ICR);
4493*4882a593Smuzhiyun 
4494*4882a593Smuzhiyun 	e_dbg("icr is %08X\n", icr);
4495*4882a593Smuzhiyun 	if (icr & E1000_ICR_RXSEQ) {
4496*4882a593Smuzhiyun 		adapter->flags &= ~FLAG_MSI_TEST_FAILED;
4497*4882a593Smuzhiyun 		/* Force memory writes to complete before acknowledging the
4498*4882a593Smuzhiyun 		 * interrupt is handled.
4499*4882a593Smuzhiyun 		 */
4500*4882a593Smuzhiyun 		wmb();
4501*4882a593Smuzhiyun 	}
4502*4882a593Smuzhiyun 
4503*4882a593Smuzhiyun 	return IRQ_HANDLED;
4504*4882a593Smuzhiyun }
4505*4882a593Smuzhiyun 
4506*4882a593Smuzhiyun /**
4507*4882a593Smuzhiyun  * e1000_test_msi_interrupt - Returns 0 for successful test
4508*4882a593Smuzhiyun  * @adapter: board private struct
4509*4882a593Smuzhiyun  *
4510*4882a593Smuzhiyun  * code flow taken from tg3.c
4511*4882a593Smuzhiyun  **/
e1000_test_msi_interrupt(struct e1000_adapter * adapter)4512*4882a593Smuzhiyun static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
4513*4882a593Smuzhiyun {
4514*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
4515*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
4516*4882a593Smuzhiyun 	int err;
4517*4882a593Smuzhiyun 
4518*4882a593Smuzhiyun 	/* poll_enable hasn't been called yet, so don't need disable */
4519*4882a593Smuzhiyun 	/* clear any pending events */
4520*4882a593Smuzhiyun 	er32(ICR);
4521*4882a593Smuzhiyun 
4522*4882a593Smuzhiyun 	/* free the real vector and request a test handler */
4523*4882a593Smuzhiyun 	e1000_free_irq(adapter);
4524*4882a593Smuzhiyun 	e1000e_reset_interrupt_capability(adapter);
4525*4882a593Smuzhiyun 
4526*4882a593Smuzhiyun 	/* Assume that the test fails, if it succeeds then the test
4527*4882a593Smuzhiyun 	 * MSI irq handler will unset this flag
4528*4882a593Smuzhiyun 	 */
4529*4882a593Smuzhiyun 	adapter->flags |= FLAG_MSI_TEST_FAILED;
4530*4882a593Smuzhiyun 
4531*4882a593Smuzhiyun 	err = pci_enable_msi(adapter->pdev);
4532*4882a593Smuzhiyun 	if (err)
4533*4882a593Smuzhiyun 		goto msi_test_failed;
4534*4882a593Smuzhiyun 
4535*4882a593Smuzhiyun 	err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
4536*4882a593Smuzhiyun 			  netdev->name, netdev);
4537*4882a593Smuzhiyun 	if (err) {
4538*4882a593Smuzhiyun 		pci_disable_msi(adapter->pdev);
4539*4882a593Smuzhiyun 		goto msi_test_failed;
4540*4882a593Smuzhiyun 	}
4541*4882a593Smuzhiyun 
4542*4882a593Smuzhiyun 	/* Force memory writes to complete before enabling and firing an
4543*4882a593Smuzhiyun 	 * interrupt.
4544*4882a593Smuzhiyun 	 */
4545*4882a593Smuzhiyun 	wmb();
4546*4882a593Smuzhiyun 
4547*4882a593Smuzhiyun 	e1000_irq_enable(adapter);
4548*4882a593Smuzhiyun 
4549*4882a593Smuzhiyun 	/* fire an unusual interrupt on the test handler */
4550*4882a593Smuzhiyun 	ew32(ICS, E1000_ICS_RXSEQ);
4551*4882a593Smuzhiyun 	e1e_flush();
4552*4882a593Smuzhiyun 	msleep(100);
4553*4882a593Smuzhiyun 
4554*4882a593Smuzhiyun 	e1000_irq_disable(adapter);
4555*4882a593Smuzhiyun 
4556*4882a593Smuzhiyun 	rmb();			/* read flags after interrupt has been fired */
4557*4882a593Smuzhiyun 
4558*4882a593Smuzhiyun 	if (adapter->flags & FLAG_MSI_TEST_FAILED) {
4559*4882a593Smuzhiyun 		adapter->int_mode = E1000E_INT_MODE_LEGACY;
4560*4882a593Smuzhiyun 		e_info("MSI interrupt test failed, using legacy interrupt.\n");
4561*4882a593Smuzhiyun 	} else {
4562*4882a593Smuzhiyun 		e_dbg("MSI interrupt test succeeded!\n");
4563*4882a593Smuzhiyun 	}
4564*4882a593Smuzhiyun 
4565*4882a593Smuzhiyun 	free_irq(adapter->pdev->irq, netdev);
4566*4882a593Smuzhiyun 	pci_disable_msi(adapter->pdev);
4567*4882a593Smuzhiyun 
4568*4882a593Smuzhiyun msi_test_failed:
4569*4882a593Smuzhiyun 	e1000e_set_interrupt_capability(adapter);
4570*4882a593Smuzhiyun 	return e1000_request_irq(adapter);
4571*4882a593Smuzhiyun }
4572*4882a593Smuzhiyun 
4573*4882a593Smuzhiyun /**
4574*4882a593Smuzhiyun  * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
4575*4882a593Smuzhiyun  * @adapter: board private struct
4576*4882a593Smuzhiyun  *
4577*4882a593Smuzhiyun  * code flow taken from tg3.c, called with e1000 interrupts disabled.
4578*4882a593Smuzhiyun  **/
e1000_test_msi(struct e1000_adapter * adapter)4579*4882a593Smuzhiyun static int e1000_test_msi(struct e1000_adapter *adapter)
4580*4882a593Smuzhiyun {
4581*4882a593Smuzhiyun 	int err;
4582*4882a593Smuzhiyun 	u16 pci_cmd;
4583*4882a593Smuzhiyun 
4584*4882a593Smuzhiyun 	if (!(adapter->flags & FLAG_MSI_ENABLED))
4585*4882a593Smuzhiyun 		return 0;
4586*4882a593Smuzhiyun 
4587*4882a593Smuzhiyun 	/* disable SERR in case the MSI write causes a master abort */
4588*4882a593Smuzhiyun 	pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
4589*4882a593Smuzhiyun 	if (pci_cmd & PCI_COMMAND_SERR)
4590*4882a593Smuzhiyun 		pci_write_config_word(adapter->pdev, PCI_COMMAND,
4591*4882a593Smuzhiyun 				      pci_cmd & ~PCI_COMMAND_SERR);
4592*4882a593Smuzhiyun 
4593*4882a593Smuzhiyun 	err = e1000_test_msi_interrupt(adapter);
4594*4882a593Smuzhiyun 
4595*4882a593Smuzhiyun 	/* re-enable SERR */
4596*4882a593Smuzhiyun 	if (pci_cmd & PCI_COMMAND_SERR) {
4597*4882a593Smuzhiyun 		pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
4598*4882a593Smuzhiyun 		pci_cmd |= PCI_COMMAND_SERR;
4599*4882a593Smuzhiyun 		pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
4600*4882a593Smuzhiyun 	}
4601*4882a593Smuzhiyun 
4602*4882a593Smuzhiyun 	return err;
4603*4882a593Smuzhiyun }
4604*4882a593Smuzhiyun 
4605*4882a593Smuzhiyun /**
4606*4882a593Smuzhiyun  * e1000e_open - Called when a network interface is made active
4607*4882a593Smuzhiyun  * @netdev: network interface device structure
4608*4882a593Smuzhiyun  *
4609*4882a593Smuzhiyun  * Returns 0 on success, negative value on failure
4610*4882a593Smuzhiyun  *
4611*4882a593Smuzhiyun  * The open entry point is called when a network interface is made
4612*4882a593Smuzhiyun  * active by the system (IFF_UP).  At this point all resources needed
4613*4882a593Smuzhiyun  * for transmit and receive operations are allocated, the interrupt
4614*4882a593Smuzhiyun  * handler is registered with the OS, the watchdog timer is started,
4615*4882a593Smuzhiyun  * and the stack is notified that the interface is ready.
4616*4882a593Smuzhiyun  **/
e1000e_open(struct net_device * netdev)4617*4882a593Smuzhiyun int e1000e_open(struct net_device *netdev)
4618*4882a593Smuzhiyun {
4619*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
4620*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
4621*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
4622*4882a593Smuzhiyun 	int err;
4623*4882a593Smuzhiyun 
4624*4882a593Smuzhiyun 	/* disallow open during test */
4625*4882a593Smuzhiyun 	if (test_bit(__E1000_TESTING, &adapter->state))
4626*4882a593Smuzhiyun 		return -EBUSY;
4627*4882a593Smuzhiyun 
4628*4882a593Smuzhiyun 	pm_runtime_get_sync(&pdev->dev);
4629*4882a593Smuzhiyun 
4630*4882a593Smuzhiyun 	netif_carrier_off(netdev);
4631*4882a593Smuzhiyun 	netif_stop_queue(netdev);
4632*4882a593Smuzhiyun 
4633*4882a593Smuzhiyun 	/* allocate transmit descriptors */
4634*4882a593Smuzhiyun 	err = e1000e_setup_tx_resources(adapter->tx_ring);
4635*4882a593Smuzhiyun 	if (err)
4636*4882a593Smuzhiyun 		goto err_setup_tx;
4637*4882a593Smuzhiyun 
4638*4882a593Smuzhiyun 	/* allocate receive descriptors */
4639*4882a593Smuzhiyun 	err = e1000e_setup_rx_resources(adapter->rx_ring);
4640*4882a593Smuzhiyun 	if (err)
4641*4882a593Smuzhiyun 		goto err_setup_rx;
4642*4882a593Smuzhiyun 
4643*4882a593Smuzhiyun 	/* If AMT is enabled, let the firmware know that the network
4644*4882a593Smuzhiyun 	 * interface is now open and reset the part to a known state.
4645*4882a593Smuzhiyun 	 */
4646*4882a593Smuzhiyun 	if (adapter->flags & FLAG_HAS_AMT) {
4647*4882a593Smuzhiyun 		e1000e_get_hw_control(adapter);
4648*4882a593Smuzhiyun 		e1000e_reset(adapter);
4649*4882a593Smuzhiyun 	}
4650*4882a593Smuzhiyun 
4651*4882a593Smuzhiyun 	e1000e_power_up_phy(adapter);
4652*4882a593Smuzhiyun 
4653*4882a593Smuzhiyun 	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4654*4882a593Smuzhiyun 	if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
4655*4882a593Smuzhiyun 		e1000_update_mng_vlan(adapter);
4656*4882a593Smuzhiyun 
4657*4882a593Smuzhiyun 	/* DMA latency requirement to workaround jumbo issue */
4658*4882a593Smuzhiyun 	cpu_latency_qos_add_request(&adapter->pm_qos_req, PM_QOS_DEFAULT_VALUE);
4659*4882a593Smuzhiyun 
4660*4882a593Smuzhiyun 	/* before we allocate an interrupt, we must be ready to handle it.
4661*4882a593Smuzhiyun 	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
4662*4882a593Smuzhiyun 	 * as soon as we call pci_request_irq, so we have to setup our
4663*4882a593Smuzhiyun 	 * clean_rx handler before we do so.
4664*4882a593Smuzhiyun 	 */
4665*4882a593Smuzhiyun 	e1000_configure(adapter);
4666*4882a593Smuzhiyun 
4667*4882a593Smuzhiyun 	err = e1000_request_irq(adapter);
4668*4882a593Smuzhiyun 	if (err)
4669*4882a593Smuzhiyun 		goto err_req_irq;
4670*4882a593Smuzhiyun 
4671*4882a593Smuzhiyun 	/* Work around PCIe errata with MSI interrupts causing some chipsets to
4672*4882a593Smuzhiyun 	 * ignore e1000e MSI messages, which means we need to test our MSI
4673*4882a593Smuzhiyun 	 * interrupt now
4674*4882a593Smuzhiyun 	 */
4675*4882a593Smuzhiyun 	if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
4676*4882a593Smuzhiyun 		err = e1000_test_msi(adapter);
4677*4882a593Smuzhiyun 		if (err) {
4678*4882a593Smuzhiyun 			e_err("Interrupt allocation failed\n");
4679*4882a593Smuzhiyun 			goto err_req_irq;
4680*4882a593Smuzhiyun 		}
4681*4882a593Smuzhiyun 	}
4682*4882a593Smuzhiyun 
4683*4882a593Smuzhiyun 	/* From here on the code is the same as e1000e_up() */
4684*4882a593Smuzhiyun 	clear_bit(__E1000_DOWN, &adapter->state);
4685*4882a593Smuzhiyun 
4686*4882a593Smuzhiyun 	napi_enable(&adapter->napi);
4687*4882a593Smuzhiyun 
4688*4882a593Smuzhiyun 	e1000_irq_enable(adapter);
4689*4882a593Smuzhiyun 
4690*4882a593Smuzhiyun 	adapter->tx_hang_recheck = false;
4691*4882a593Smuzhiyun 
4692*4882a593Smuzhiyun 	hw->mac.get_link_status = true;
4693*4882a593Smuzhiyun 	pm_runtime_put(&pdev->dev);
4694*4882a593Smuzhiyun 
4695*4882a593Smuzhiyun 	e1000e_trigger_lsc(adapter);
4696*4882a593Smuzhiyun 
4697*4882a593Smuzhiyun 	return 0;
4698*4882a593Smuzhiyun 
4699*4882a593Smuzhiyun err_req_irq:
4700*4882a593Smuzhiyun 	cpu_latency_qos_remove_request(&adapter->pm_qos_req);
4701*4882a593Smuzhiyun 	e1000e_release_hw_control(adapter);
4702*4882a593Smuzhiyun 	e1000_power_down_phy(adapter);
4703*4882a593Smuzhiyun 	e1000e_free_rx_resources(adapter->rx_ring);
4704*4882a593Smuzhiyun err_setup_rx:
4705*4882a593Smuzhiyun 	e1000e_free_tx_resources(adapter->tx_ring);
4706*4882a593Smuzhiyun err_setup_tx:
4707*4882a593Smuzhiyun 	e1000e_reset(adapter);
4708*4882a593Smuzhiyun 	pm_runtime_put_sync(&pdev->dev);
4709*4882a593Smuzhiyun 
4710*4882a593Smuzhiyun 	return err;
4711*4882a593Smuzhiyun }
4712*4882a593Smuzhiyun 
4713*4882a593Smuzhiyun /**
4714*4882a593Smuzhiyun  * e1000e_close - Disables a network interface
4715*4882a593Smuzhiyun  * @netdev: network interface device structure
4716*4882a593Smuzhiyun  *
4717*4882a593Smuzhiyun  * Returns 0, this is not allowed to fail
4718*4882a593Smuzhiyun  *
4719*4882a593Smuzhiyun  * The close entry point is called when an interface is de-activated
4720*4882a593Smuzhiyun  * by the OS.  The hardware is still under the drivers control, but
4721*4882a593Smuzhiyun  * needs to be disabled.  A global MAC reset is issued to stop the
4722*4882a593Smuzhiyun  * hardware, and all transmit and receive resources are freed.
4723*4882a593Smuzhiyun  **/
e1000e_close(struct net_device * netdev)4724*4882a593Smuzhiyun int e1000e_close(struct net_device *netdev)
4725*4882a593Smuzhiyun {
4726*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
4727*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
4728*4882a593Smuzhiyun 	int count = E1000_CHECK_RESET_COUNT;
4729*4882a593Smuzhiyun 
4730*4882a593Smuzhiyun 	while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
4731*4882a593Smuzhiyun 		usleep_range(10000, 11000);
4732*4882a593Smuzhiyun 
4733*4882a593Smuzhiyun 	WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
4734*4882a593Smuzhiyun 
4735*4882a593Smuzhiyun 	pm_runtime_get_sync(&pdev->dev);
4736*4882a593Smuzhiyun 
4737*4882a593Smuzhiyun 	if (netif_device_present(netdev)) {
4738*4882a593Smuzhiyun 		e1000e_down(adapter, true);
4739*4882a593Smuzhiyun 		e1000_free_irq(adapter);
4740*4882a593Smuzhiyun 
4741*4882a593Smuzhiyun 		/* Link status message must follow this format */
4742*4882a593Smuzhiyun 		netdev_info(netdev, "NIC Link is Down\n");
4743*4882a593Smuzhiyun 	}
4744*4882a593Smuzhiyun 
4745*4882a593Smuzhiyun 	napi_disable(&adapter->napi);
4746*4882a593Smuzhiyun 
4747*4882a593Smuzhiyun 	e1000e_free_tx_resources(adapter->tx_ring);
4748*4882a593Smuzhiyun 	e1000e_free_rx_resources(adapter->rx_ring);
4749*4882a593Smuzhiyun 
4750*4882a593Smuzhiyun 	/* kill manageability vlan ID if supported, but not if a vlan with
4751*4882a593Smuzhiyun 	 * the same ID is registered on the host OS (let 8021q kill it)
4752*4882a593Smuzhiyun 	 */
4753*4882a593Smuzhiyun 	if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
4754*4882a593Smuzhiyun 		e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
4755*4882a593Smuzhiyun 				       adapter->mng_vlan_id);
4756*4882a593Smuzhiyun 
4757*4882a593Smuzhiyun 	/* If AMT is enabled, let the firmware know that the network
4758*4882a593Smuzhiyun 	 * interface is now closed
4759*4882a593Smuzhiyun 	 */
4760*4882a593Smuzhiyun 	if ((adapter->flags & FLAG_HAS_AMT) &&
4761*4882a593Smuzhiyun 	    !test_bit(__E1000_TESTING, &adapter->state))
4762*4882a593Smuzhiyun 		e1000e_release_hw_control(adapter);
4763*4882a593Smuzhiyun 
4764*4882a593Smuzhiyun 	cpu_latency_qos_remove_request(&adapter->pm_qos_req);
4765*4882a593Smuzhiyun 
4766*4882a593Smuzhiyun 	pm_runtime_put_sync(&pdev->dev);
4767*4882a593Smuzhiyun 
4768*4882a593Smuzhiyun 	return 0;
4769*4882a593Smuzhiyun }
4770*4882a593Smuzhiyun 
4771*4882a593Smuzhiyun /**
4772*4882a593Smuzhiyun  * e1000_set_mac - Change the Ethernet Address of the NIC
4773*4882a593Smuzhiyun  * @netdev: network interface device structure
4774*4882a593Smuzhiyun  * @p: pointer to an address structure
4775*4882a593Smuzhiyun  *
4776*4882a593Smuzhiyun  * Returns 0 on success, negative on failure
4777*4882a593Smuzhiyun  **/
e1000_set_mac(struct net_device * netdev,void * p)4778*4882a593Smuzhiyun static int e1000_set_mac(struct net_device *netdev, void *p)
4779*4882a593Smuzhiyun {
4780*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
4781*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
4782*4882a593Smuzhiyun 	struct sockaddr *addr = p;
4783*4882a593Smuzhiyun 
4784*4882a593Smuzhiyun 	if (!is_valid_ether_addr(addr->sa_data))
4785*4882a593Smuzhiyun 		return -EADDRNOTAVAIL;
4786*4882a593Smuzhiyun 
4787*4882a593Smuzhiyun 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4788*4882a593Smuzhiyun 	memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
4789*4882a593Smuzhiyun 
4790*4882a593Smuzhiyun 	hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
4791*4882a593Smuzhiyun 
4792*4882a593Smuzhiyun 	if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
4793*4882a593Smuzhiyun 		/* activate the work around */
4794*4882a593Smuzhiyun 		e1000e_set_laa_state_82571(&adapter->hw, 1);
4795*4882a593Smuzhiyun 
4796*4882a593Smuzhiyun 		/* Hold a copy of the LAA in RAR[14] This is done so that
4797*4882a593Smuzhiyun 		 * between the time RAR[0] gets clobbered  and the time it
4798*4882a593Smuzhiyun 		 * gets fixed (in e1000_watchdog), the actual LAA is in one
4799*4882a593Smuzhiyun 		 * of the RARs and no incoming packets directed to this port
4800*4882a593Smuzhiyun 		 * are dropped. Eventually the LAA will be in RAR[0] and
4801*4882a593Smuzhiyun 		 * RAR[14]
4802*4882a593Smuzhiyun 		 */
4803*4882a593Smuzhiyun 		hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr,
4804*4882a593Smuzhiyun 				    adapter->hw.mac.rar_entry_count - 1);
4805*4882a593Smuzhiyun 	}
4806*4882a593Smuzhiyun 
4807*4882a593Smuzhiyun 	return 0;
4808*4882a593Smuzhiyun }
4809*4882a593Smuzhiyun 
4810*4882a593Smuzhiyun /**
4811*4882a593Smuzhiyun  * e1000e_update_phy_task - work thread to update phy
4812*4882a593Smuzhiyun  * @work: pointer to our work struct
4813*4882a593Smuzhiyun  *
4814*4882a593Smuzhiyun  * this worker thread exists because we must acquire a
4815*4882a593Smuzhiyun  * semaphore to read the phy, which we could msleep while
4816*4882a593Smuzhiyun  * waiting for it, and we can't msleep in a timer.
4817*4882a593Smuzhiyun  **/
e1000e_update_phy_task(struct work_struct * work)4818*4882a593Smuzhiyun static void e1000e_update_phy_task(struct work_struct *work)
4819*4882a593Smuzhiyun {
4820*4882a593Smuzhiyun 	struct e1000_adapter *adapter = container_of(work,
4821*4882a593Smuzhiyun 						     struct e1000_adapter,
4822*4882a593Smuzhiyun 						     update_phy_task);
4823*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
4824*4882a593Smuzhiyun 
4825*4882a593Smuzhiyun 	if (test_bit(__E1000_DOWN, &adapter->state))
4826*4882a593Smuzhiyun 		return;
4827*4882a593Smuzhiyun 
4828*4882a593Smuzhiyun 	e1000_get_phy_info(hw);
4829*4882a593Smuzhiyun 
4830*4882a593Smuzhiyun 	/* Enable EEE on 82579 after link up */
4831*4882a593Smuzhiyun 	if (hw->phy.type >= e1000_phy_82579)
4832*4882a593Smuzhiyun 		e1000_set_eee_pchlan(hw);
4833*4882a593Smuzhiyun }
4834*4882a593Smuzhiyun 
4835*4882a593Smuzhiyun /**
4836*4882a593Smuzhiyun  * e1000_update_phy_info - timre call-back to update PHY info
4837*4882a593Smuzhiyun  * @t: pointer to timer_list containing private info adapter
4838*4882a593Smuzhiyun  *
4839*4882a593Smuzhiyun  * Need to wait a few seconds after link up to get diagnostic information from
4840*4882a593Smuzhiyun  * the phy
4841*4882a593Smuzhiyun  **/
e1000_update_phy_info(struct timer_list * t)4842*4882a593Smuzhiyun static void e1000_update_phy_info(struct timer_list *t)
4843*4882a593Smuzhiyun {
4844*4882a593Smuzhiyun 	struct e1000_adapter *adapter = from_timer(adapter, t, phy_info_timer);
4845*4882a593Smuzhiyun 
4846*4882a593Smuzhiyun 	if (test_bit(__E1000_DOWN, &adapter->state))
4847*4882a593Smuzhiyun 		return;
4848*4882a593Smuzhiyun 
4849*4882a593Smuzhiyun 	schedule_work(&adapter->update_phy_task);
4850*4882a593Smuzhiyun }
4851*4882a593Smuzhiyun 
4852*4882a593Smuzhiyun /**
4853*4882a593Smuzhiyun  * e1000e_update_phy_stats - Update the PHY statistics counters
4854*4882a593Smuzhiyun  * @adapter: board private structure
4855*4882a593Smuzhiyun  *
4856*4882a593Smuzhiyun  * Read/clear the upper 16-bit PHY registers and read/accumulate lower
4857*4882a593Smuzhiyun  **/
e1000e_update_phy_stats(struct e1000_adapter * adapter)4858*4882a593Smuzhiyun static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
4859*4882a593Smuzhiyun {
4860*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
4861*4882a593Smuzhiyun 	s32 ret_val;
4862*4882a593Smuzhiyun 	u16 phy_data;
4863*4882a593Smuzhiyun 
4864*4882a593Smuzhiyun 	ret_val = hw->phy.ops.acquire(hw);
4865*4882a593Smuzhiyun 	if (ret_val)
4866*4882a593Smuzhiyun 		return;
4867*4882a593Smuzhiyun 
4868*4882a593Smuzhiyun 	/* A page set is expensive so check if already on desired page.
4869*4882a593Smuzhiyun 	 * If not, set to the page with the PHY status registers.
4870*4882a593Smuzhiyun 	 */
4871*4882a593Smuzhiyun 	hw->phy.addr = 1;
4872*4882a593Smuzhiyun 	ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4873*4882a593Smuzhiyun 					   &phy_data);
4874*4882a593Smuzhiyun 	if (ret_val)
4875*4882a593Smuzhiyun 		goto release;
4876*4882a593Smuzhiyun 	if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) {
4877*4882a593Smuzhiyun 		ret_val = hw->phy.ops.set_page(hw,
4878*4882a593Smuzhiyun 					       HV_STATS_PAGE << IGP_PAGE_SHIFT);
4879*4882a593Smuzhiyun 		if (ret_val)
4880*4882a593Smuzhiyun 			goto release;
4881*4882a593Smuzhiyun 	}
4882*4882a593Smuzhiyun 
4883*4882a593Smuzhiyun 	/* Single Collision Count */
4884*4882a593Smuzhiyun 	hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4885*4882a593Smuzhiyun 	ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
4886*4882a593Smuzhiyun 	if (!ret_val)
4887*4882a593Smuzhiyun 		adapter->stats.scc += phy_data;
4888*4882a593Smuzhiyun 
4889*4882a593Smuzhiyun 	/* Excessive Collision Count */
4890*4882a593Smuzhiyun 	hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4891*4882a593Smuzhiyun 	ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
4892*4882a593Smuzhiyun 	if (!ret_val)
4893*4882a593Smuzhiyun 		adapter->stats.ecol += phy_data;
4894*4882a593Smuzhiyun 
4895*4882a593Smuzhiyun 	/* Multiple Collision Count */
4896*4882a593Smuzhiyun 	hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4897*4882a593Smuzhiyun 	ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4898*4882a593Smuzhiyun 	if (!ret_val)
4899*4882a593Smuzhiyun 		adapter->stats.mcc += phy_data;
4900*4882a593Smuzhiyun 
4901*4882a593Smuzhiyun 	/* Late Collision Count */
4902*4882a593Smuzhiyun 	hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4903*4882a593Smuzhiyun 	ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4904*4882a593Smuzhiyun 	if (!ret_val)
4905*4882a593Smuzhiyun 		adapter->stats.latecol += phy_data;
4906*4882a593Smuzhiyun 
4907*4882a593Smuzhiyun 	/* Collision Count - also used for adaptive IFS */
4908*4882a593Smuzhiyun 	hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4909*4882a593Smuzhiyun 	ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4910*4882a593Smuzhiyun 	if (!ret_val)
4911*4882a593Smuzhiyun 		hw->mac.collision_delta = phy_data;
4912*4882a593Smuzhiyun 
4913*4882a593Smuzhiyun 	/* Defer Count */
4914*4882a593Smuzhiyun 	hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4915*4882a593Smuzhiyun 	ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4916*4882a593Smuzhiyun 	if (!ret_val)
4917*4882a593Smuzhiyun 		adapter->stats.dc += phy_data;
4918*4882a593Smuzhiyun 
4919*4882a593Smuzhiyun 	/* Transmit with no CRS */
4920*4882a593Smuzhiyun 	hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4921*4882a593Smuzhiyun 	ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4922*4882a593Smuzhiyun 	if (!ret_val)
4923*4882a593Smuzhiyun 		adapter->stats.tncrs += phy_data;
4924*4882a593Smuzhiyun 
4925*4882a593Smuzhiyun release:
4926*4882a593Smuzhiyun 	hw->phy.ops.release(hw);
4927*4882a593Smuzhiyun }
4928*4882a593Smuzhiyun 
4929*4882a593Smuzhiyun /**
4930*4882a593Smuzhiyun  * e1000e_update_stats - Update the board statistics counters
4931*4882a593Smuzhiyun  * @adapter: board private structure
4932*4882a593Smuzhiyun  **/
e1000e_update_stats(struct e1000_adapter * adapter)4933*4882a593Smuzhiyun static void e1000e_update_stats(struct e1000_adapter *adapter)
4934*4882a593Smuzhiyun {
4935*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
4936*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
4937*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
4938*4882a593Smuzhiyun 
4939*4882a593Smuzhiyun 	/* Prevent stats update while adapter is being reset, or if the pci
4940*4882a593Smuzhiyun 	 * connection is down.
4941*4882a593Smuzhiyun 	 */
4942*4882a593Smuzhiyun 	if (adapter->link_speed == 0)
4943*4882a593Smuzhiyun 		return;
4944*4882a593Smuzhiyun 	if (pci_channel_offline(pdev))
4945*4882a593Smuzhiyun 		return;
4946*4882a593Smuzhiyun 
4947*4882a593Smuzhiyun 	adapter->stats.crcerrs += er32(CRCERRS);
4948*4882a593Smuzhiyun 	adapter->stats.gprc += er32(GPRC);
4949*4882a593Smuzhiyun 	adapter->stats.gorc += er32(GORCL);
4950*4882a593Smuzhiyun 	er32(GORCH);		/* Clear gorc */
4951*4882a593Smuzhiyun 	adapter->stats.bprc += er32(BPRC);
4952*4882a593Smuzhiyun 	adapter->stats.mprc += er32(MPRC);
4953*4882a593Smuzhiyun 	adapter->stats.roc += er32(ROC);
4954*4882a593Smuzhiyun 
4955*4882a593Smuzhiyun 	adapter->stats.mpc += er32(MPC);
4956*4882a593Smuzhiyun 
4957*4882a593Smuzhiyun 	/* Half-duplex statistics */
4958*4882a593Smuzhiyun 	if (adapter->link_duplex == HALF_DUPLEX) {
4959*4882a593Smuzhiyun 		if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
4960*4882a593Smuzhiyun 			e1000e_update_phy_stats(adapter);
4961*4882a593Smuzhiyun 		} else {
4962*4882a593Smuzhiyun 			adapter->stats.scc += er32(SCC);
4963*4882a593Smuzhiyun 			adapter->stats.ecol += er32(ECOL);
4964*4882a593Smuzhiyun 			adapter->stats.mcc += er32(MCC);
4965*4882a593Smuzhiyun 			adapter->stats.latecol += er32(LATECOL);
4966*4882a593Smuzhiyun 			adapter->stats.dc += er32(DC);
4967*4882a593Smuzhiyun 
4968*4882a593Smuzhiyun 			hw->mac.collision_delta = er32(COLC);
4969*4882a593Smuzhiyun 
4970*4882a593Smuzhiyun 			if ((hw->mac.type != e1000_82574) &&
4971*4882a593Smuzhiyun 			    (hw->mac.type != e1000_82583))
4972*4882a593Smuzhiyun 				adapter->stats.tncrs += er32(TNCRS);
4973*4882a593Smuzhiyun 		}
4974*4882a593Smuzhiyun 		adapter->stats.colc += hw->mac.collision_delta;
4975*4882a593Smuzhiyun 	}
4976*4882a593Smuzhiyun 
4977*4882a593Smuzhiyun 	adapter->stats.xonrxc += er32(XONRXC);
4978*4882a593Smuzhiyun 	adapter->stats.xontxc += er32(XONTXC);
4979*4882a593Smuzhiyun 	adapter->stats.xoffrxc += er32(XOFFRXC);
4980*4882a593Smuzhiyun 	adapter->stats.xofftxc += er32(XOFFTXC);
4981*4882a593Smuzhiyun 	adapter->stats.gptc += er32(GPTC);
4982*4882a593Smuzhiyun 	adapter->stats.gotc += er32(GOTCL);
4983*4882a593Smuzhiyun 	er32(GOTCH);		/* Clear gotc */
4984*4882a593Smuzhiyun 	adapter->stats.rnbc += er32(RNBC);
4985*4882a593Smuzhiyun 	adapter->stats.ruc += er32(RUC);
4986*4882a593Smuzhiyun 
4987*4882a593Smuzhiyun 	adapter->stats.mptc += er32(MPTC);
4988*4882a593Smuzhiyun 	adapter->stats.bptc += er32(BPTC);
4989*4882a593Smuzhiyun 
4990*4882a593Smuzhiyun 	/* used for adaptive IFS */
4991*4882a593Smuzhiyun 
4992*4882a593Smuzhiyun 	hw->mac.tx_packet_delta = er32(TPT);
4993*4882a593Smuzhiyun 	adapter->stats.tpt += hw->mac.tx_packet_delta;
4994*4882a593Smuzhiyun 
4995*4882a593Smuzhiyun 	adapter->stats.algnerrc += er32(ALGNERRC);
4996*4882a593Smuzhiyun 	adapter->stats.rxerrc += er32(RXERRC);
4997*4882a593Smuzhiyun 	adapter->stats.cexterr += er32(CEXTERR);
4998*4882a593Smuzhiyun 	adapter->stats.tsctc += er32(TSCTC);
4999*4882a593Smuzhiyun 	adapter->stats.tsctfc += er32(TSCTFC);
5000*4882a593Smuzhiyun 
5001*4882a593Smuzhiyun 	/* Fill out the OS statistics structure */
5002*4882a593Smuzhiyun 	netdev->stats.multicast = adapter->stats.mprc;
5003*4882a593Smuzhiyun 	netdev->stats.collisions = adapter->stats.colc;
5004*4882a593Smuzhiyun 
5005*4882a593Smuzhiyun 	/* Rx Errors */
5006*4882a593Smuzhiyun 
5007*4882a593Smuzhiyun 	/* RLEC on some newer hardware can be incorrect so build
5008*4882a593Smuzhiyun 	 * our own version based on RUC and ROC
5009*4882a593Smuzhiyun 	 */
5010*4882a593Smuzhiyun 	netdev->stats.rx_errors = adapter->stats.rxerrc +
5011*4882a593Smuzhiyun 	    adapter->stats.crcerrs + adapter->stats.algnerrc +
5012*4882a593Smuzhiyun 	    adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
5013*4882a593Smuzhiyun 	netdev->stats.rx_length_errors = adapter->stats.ruc +
5014*4882a593Smuzhiyun 	    adapter->stats.roc;
5015*4882a593Smuzhiyun 	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
5016*4882a593Smuzhiyun 	netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
5017*4882a593Smuzhiyun 	netdev->stats.rx_missed_errors = adapter->stats.mpc;
5018*4882a593Smuzhiyun 
5019*4882a593Smuzhiyun 	/* Tx Errors */
5020*4882a593Smuzhiyun 	netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol;
5021*4882a593Smuzhiyun 	netdev->stats.tx_aborted_errors = adapter->stats.ecol;
5022*4882a593Smuzhiyun 	netdev->stats.tx_window_errors = adapter->stats.latecol;
5023*4882a593Smuzhiyun 	netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
5024*4882a593Smuzhiyun 
5025*4882a593Smuzhiyun 	/* Tx Dropped needs to be maintained elsewhere */
5026*4882a593Smuzhiyun 
5027*4882a593Smuzhiyun 	/* Management Stats */
5028*4882a593Smuzhiyun 	adapter->stats.mgptc += er32(MGTPTC);
5029*4882a593Smuzhiyun 	adapter->stats.mgprc += er32(MGTPRC);
5030*4882a593Smuzhiyun 	adapter->stats.mgpdc += er32(MGTPDC);
5031*4882a593Smuzhiyun 
5032*4882a593Smuzhiyun 	/* Correctable ECC Errors */
5033*4882a593Smuzhiyun 	if (hw->mac.type >= e1000_pch_lpt) {
5034*4882a593Smuzhiyun 		u32 pbeccsts = er32(PBECCSTS);
5035*4882a593Smuzhiyun 
5036*4882a593Smuzhiyun 		adapter->corr_errors +=
5037*4882a593Smuzhiyun 		    pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
5038*4882a593Smuzhiyun 		adapter->uncorr_errors +=
5039*4882a593Smuzhiyun 		    (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
5040*4882a593Smuzhiyun 		    E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
5041*4882a593Smuzhiyun 	}
5042*4882a593Smuzhiyun }
5043*4882a593Smuzhiyun 
5044*4882a593Smuzhiyun /**
5045*4882a593Smuzhiyun  * e1000_phy_read_status - Update the PHY register status snapshot
5046*4882a593Smuzhiyun  * @adapter: board private structure
5047*4882a593Smuzhiyun  **/
e1000_phy_read_status(struct e1000_adapter * adapter)5048*4882a593Smuzhiyun static void e1000_phy_read_status(struct e1000_adapter *adapter)
5049*4882a593Smuzhiyun {
5050*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
5051*4882a593Smuzhiyun 	struct e1000_phy_regs *phy = &adapter->phy_regs;
5052*4882a593Smuzhiyun 
5053*4882a593Smuzhiyun 	if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) &&
5054*4882a593Smuzhiyun 	    (er32(STATUS) & E1000_STATUS_LU) &&
5055*4882a593Smuzhiyun 	    (adapter->hw.phy.media_type == e1000_media_type_copper)) {
5056*4882a593Smuzhiyun 		int ret_val;
5057*4882a593Smuzhiyun 
5058*4882a593Smuzhiyun 		ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr);
5059*4882a593Smuzhiyun 		ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr);
5060*4882a593Smuzhiyun 		ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise);
5061*4882a593Smuzhiyun 		ret_val |= e1e_rphy(hw, MII_LPA, &phy->lpa);
5062*4882a593Smuzhiyun 		ret_val |= e1e_rphy(hw, MII_EXPANSION, &phy->expansion);
5063*4882a593Smuzhiyun 		ret_val |= e1e_rphy(hw, MII_CTRL1000, &phy->ctrl1000);
5064*4882a593Smuzhiyun 		ret_val |= e1e_rphy(hw, MII_STAT1000, &phy->stat1000);
5065*4882a593Smuzhiyun 		ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus);
5066*4882a593Smuzhiyun 		if (ret_val)
5067*4882a593Smuzhiyun 			e_warn("Error reading PHY register\n");
5068*4882a593Smuzhiyun 	} else {
5069*4882a593Smuzhiyun 		/* Do not read PHY registers if link is not up
5070*4882a593Smuzhiyun 		 * Set values to typical power-on defaults
5071*4882a593Smuzhiyun 		 */
5072*4882a593Smuzhiyun 		phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
5073*4882a593Smuzhiyun 		phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
5074*4882a593Smuzhiyun 			     BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
5075*4882a593Smuzhiyun 			     BMSR_ERCAP);
5076*4882a593Smuzhiyun 		phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
5077*4882a593Smuzhiyun 				  ADVERTISE_ALL | ADVERTISE_CSMA);
5078*4882a593Smuzhiyun 		phy->lpa = 0;
5079*4882a593Smuzhiyun 		phy->expansion = EXPANSION_ENABLENPAGE;
5080*4882a593Smuzhiyun 		phy->ctrl1000 = ADVERTISE_1000FULL;
5081*4882a593Smuzhiyun 		phy->stat1000 = 0;
5082*4882a593Smuzhiyun 		phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
5083*4882a593Smuzhiyun 	}
5084*4882a593Smuzhiyun }
5085*4882a593Smuzhiyun 
e1000_print_link_info(struct e1000_adapter * adapter)5086*4882a593Smuzhiyun static void e1000_print_link_info(struct e1000_adapter *adapter)
5087*4882a593Smuzhiyun {
5088*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
5089*4882a593Smuzhiyun 	u32 ctrl = er32(CTRL);
5090*4882a593Smuzhiyun 
5091*4882a593Smuzhiyun 	/* Link status message must follow this format for user tools */
5092*4882a593Smuzhiyun 	netdev_info(adapter->netdev,
5093*4882a593Smuzhiyun 		    "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
5094*4882a593Smuzhiyun 		    adapter->link_speed,
5095*4882a593Smuzhiyun 		    adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
5096*4882a593Smuzhiyun 		    (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
5097*4882a593Smuzhiyun 		    (ctrl & E1000_CTRL_RFCE) ? "Rx" :
5098*4882a593Smuzhiyun 		    (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
5099*4882a593Smuzhiyun }
5100*4882a593Smuzhiyun 
e1000e_has_link(struct e1000_adapter * adapter)5101*4882a593Smuzhiyun static bool e1000e_has_link(struct e1000_adapter *adapter)
5102*4882a593Smuzhiyun {
5103*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
5104*4882a593Smuzhiyun 	bool link_active = false;
5105*4882a593Smuzhiyun 	s32 ret_val = 0;
5106*4882a593Smuzhiyun 
5107*4882a593Smuzhiyun 	/* get_link_status is set on LSC (link status) interrupt or
5108*4882a593Smuzhiyun 	 * Rx sequence error interrupt.  get_link_status will stay
5109*4882a593Smuzhiyun 	 * true until the check_for_link establishes link
5110*4882a593Smuzhiyun 	 * for copper adapters ONLY
5111*4882a593Smuzhiyun 	 */
5112*4882a593Smuzhiyun 	switch (hw->phy.media_type) {
5113*4882a593Smuzhiyun 	case e1000_media_type_copper:
5114*4882a593Smuzhiyun 		if (hw->mac.get_link_status) {
5115*4882a593Smuzhiyun 			ret_val = hw->mac.ops.check_for_link(hw);
5116*4882a593Smuzhiyun 			link_active = !hw->mac.get_link_status;
5117*4882a593Smuzhiyun 		} else {
5118*4882a593Smuzhiyun 			link_active = true;
5119*4882a593Smuzhiyun 		}
5120*4882a593Smuzhiyun 		break;
5121*4882a593Smuzhiyun 	case e1000_media_type_fiber:
5122*4882a593Smuzhiyun 		ret_val = hw->mac.ops.check_for_link(hw);
5123*4882a593Smuzhiyun 		link_active = !!(er32(STATUS) & E1000_STATUS_LU);
5124*4882a593Smuzhiyun 		break;
5125*4882a593Smuzhiyun 	case e1000_media_type_internal_serdes:
5126*4882a593Smuzhiyun 		ret_val = hw->mac.ops.check_for_link(hw);
5127*4882a593Smuzhiyun 		link_active = hw->mac.serdes_has_link;
5128*4882a593Smuzhiyun 		break;
5129*4882a593Smuzhiyun 	default:
5130*4882a593Smuzhiyun 	case e1000_media_type_unknown:
5131*4882a593Smuzhiyun 		break;
5132*4882a593Smuzhiyun 	}
5133*4882a593Smuzhiyun 
5134*4882a593Smuzhiyun 	if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
5135*4882a593Smuzhiyun 	    (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
5136*4882a593Smuzhiyun 		/* See e1000_kmrn_lock_loss_workaround_ich8lan() */
5137*4882a593Smuzhiyun 		e_info("Gigabit has been disabled, downgrading speed\n");
5138*4882a593Smuzhiyun 	}
5139*4882a593Smuzhiyun 
5140*4882a593Smuzhiyun 	return link_active;
5141*4882a593Smuzhiyun }
5142*4882a593Smuzhiyun 
e1000e_enable_receives(struct e1000_adapter * adapter)5143*4882a593Smuzhiyun static void e1000e_enable_receives(struct e1000_adapter *adapter)
5144*4882a593Smuzhiyun {
5145*4882a593Smuzhiyun 	/* make sure the receive unit is started */
5146*4882a593Smuzhiyun 	if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
5147*4882a593Smuzhiyun 	    (adapter->flags & FLAG_RESTART_NOW)) {
5148*4882a593Smuzhiyun 		struct e1000_hw *hw = &adapter->hw;
5149*4882a593Smuzhiyun 		u32 rctl = er32(RCTL);
5150*4882a593Smuzhiyun 
5151*4882a593Smuzhiyun 		ew32(RCTL, rctl | E1000_RCTL_EN);
5152*4882a593Smuzhiyun 		adapter->flags &= ~FLAG_RESTART_NOW;
5153*4882a593Smuzhiyun 	}
5154*4882a593Smuzhiyun }
5155*4882a593Smuzhiyun 
e1000e_check_82574_phy_workaround(struct e1000_adapter * adapter)5156*4882a593Smuzhiyun static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
5157*4882a593Smuzhiyun {
5158*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
5159*4882a593Smuzhiyun 
5160*4882a593Smuzhiyun 	/* With 82574 controllers, PHY needs to be checked periodically
5161*4882a593Smuzhiyun 	 * for hung state and reset, if two calls return true
5162*4882a593Smuzhiyun 	 */
5163*4882a593Smuzhiyun 	if (e1000_check_phy_82574(hw))
5164*4882a593Smuzhiyun 		adapter->phy_hang_count++;
5165*4882a593Smuzhiyun 	else
5166*4882a593Smuzhiyun 		adapter->phy_hang_count = 0;
5167*4882a593Smuzhiyun 
5168*4882a593Smuzhiyun 	if (adapter->phy_hang_count > 1) {
5169*4882a593Smuzhiyun 		adapter->phy_hang_count = 0;
5170*4882a593Smuzhiyun 		e_dbg("PHY appears hung - resetting\n");
5171*4882a593Smuzhiyun 		schedule_work(&adapter->reset_task);
5172*4882a593Smuzhiyun 	}
5173*4882a593Smuzhiyun }
5174*4882a593Smuzhiyun 
5175*4882a593Smuzhiyun /**
5176*4882a593Smuzhiyun  * e1000_watchdog - Timer Call-back
5177*4882a593Smuzhiyun  * @t: pointer to timer_list containing private info adapter
5178*4882a593Smuzhiyun  **/
e1000_watchdog(struct timer_list * t)5179*4882a593Smuzhiyun static void e1000_watchdog(struct timer_list *t)
5180*4882a593Smuzhiyun {
5181*4882a593Smuzhiyun 	struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer);
5182*4882a593Smuzhiyun 
5183*4882a593Smuzhiyun 	/* Do the rest outside of interrupt context */
5184*4882a593Smuzhiyun 	schedule_work(&adapter->watchdog_task);
5185*4882a593Smuzhiyun 
5186*4882a593Smuzhiyun 	/* TODO: make this use queue_delayed_work() */
5187*4882a593Smuzhiyun }
5188*4882a593Smuzhiyun 
e1000_watchdog_task(struct work_struct * work)5189*4882a593Smuzhiyun static void e1000_watchdog_task(struct work_struct *work)
5190*4882a593Smuzhiyun {
5191*4882a593Smuzhiyun 	struct e1000_adapter *adapter = container_of(work,
5192*4882a593Smuzhiyun 						     struct e1000_adapter,
5193*4882a593Smuzhiyun 						     watchdog_task);
5194*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
5195*4882a593Smuzhiyun 	struct e1000_mac_info *mac = &adapter->hw.mac;
5196*4882a593Smuzhiyun 	struct e1000_phy_info *phy = &adapter->hw.phy;
5197*4882a593Smuzhiyun 	struct e1000_ring *tx_ring = adapter->tx_ring;
5198*4882a593Smuzhiyun 	u32 dmoff_exit_timeout = 100, tries = 0;
5199*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
5200*4882a593Smuzhiyun 	u32 link, tctl, pcim_state;
5201*4882a593Smuzhiyun 
5202*4882a593Smuzhiyun 	if (test_bit(__E1000_DOWN, &adapter->state))
5203*4882a593Smuzhiyun 		return;
5204*4882a593Smuzhiyun 
5205*4882a593Smuzhiyun 	link = e1000e_has_link(adapter);
5206*4882a593Smuzhiyun 	if ((netif_carrier_ok(netdev)) && link) {
5207*4882a593Smuzhiyun 		/* Cancel scheduled suspend requests. */
5208*4882a593Smuzhiyun 		pm_runtime_resume(netdev->dev.parent);
5209*4882a593Smuzhiyun 
5210*4882a593Smuzhiyun 		e1000e_enable_receives(adapter);
5211*4882a593Smuzhiyun 		goto link_up;
5212*4882a593Smuzhiyun 	}
5213*4882a593Smuzhiyun 
5214*4882a593Smuzhiyun 	if ((e1000e_enable_tx_pkt_filtering(hw)) &&
5215*4882a593Smuzhiyun 	    (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
5216*4882a593Smuzhiyun 		e1000_update_mng_vlan(adapter);
5217*4882a593Smuzhiyun 
5218*4882a593Smuzhiyun 	if (link) {
5219*4882a593Smuzhiyun 		if (!netif_carrier_ok(netdev)) {
5220*4882a593Smuzhiyun 			bool txb2b = true;
5221*4882a593Smuzhiyun 
5222*4882a593Smuzhiyun 			/* Cancel scheduled suspend requests. */
5223*4882a593Smuzhiyun 			pm_runtime_resume(netdev->dev.parent);
5224*4882a593Smuzhiyun 
5225*4882a593Smuzhiyun 			/* Checking if MAC is in DMoff state*/
5226*4882a593Smuzhiyun 			if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
5227*4882a593Smuzhiyun 				pcim_state = er32(STATUS);
5228*4882a593Smuzhiyun 				while (pcim_state & E1000_STATUS_PCIM_STATE) {
5229*4882a593Smuzhiyun 					if (tries++ == dmoff_exit_timeout) {
5230*4882a593Smuzhiyun 						e_dbg("Error in exiting dmoff\n");
5231*4882a593Smuzhiyun 						break;
5232*4882a593Smuzhiyun 					}
5233*4882a593Smuzhiyun 					usleep_range(10000, 20000);
5234*4882a593Smuzhiyun 					pcim_state = er32(STATUS);
5235*4882a593Smuzhiyun 
5236*4882a593Smuzhiyun 					/* Checking if MAC exited DMoff state */
5237*4882a593Smuzhiyun 					if (!(pcim_state & E1000_STATUS_PCIM_STATE))
5238*4882a593Smuzhiyun 						e1000_phy_hw_reset(&adapter->hw);
5239*4882a593Smuzhiyun 				}
5240*4882a593Smuzhiyun 			}
5241*4882a593Smuzhiyun 
5242*4882a593Smuzhiyun 			/* update snapshot of PHY registers on LSC */
5243*4882a593Smuzhiyun 			e1000_phy_read_status(adapter);
5244*4882a593Smuzhiyun 			mac->ops.get_link_up_info(&adapter->hw,
5245*4882a593Smuzhiyun 						  &adapter->link_speed,
5246*4882a593Smuzhiyun 						  &adapter->link_duplex);
5247*4882a593Smuzhiyun 			e1000_print_link_info(adapter);
5248*4882a593Smuzhiyun 
5249*4882a593Smuzhiyun 			/* check if SmartSpeed worked */
5250*4882a593Smuzhiyun 			e1000e_check_downshift(hw);
5251*4882a593Smuzhiyun 			if (phy->speed_downgraded)
5252*4882a593Smuzhiyun 				netdev_warn(netdev,
5253*4882a593Smuzhiyun 					    "Link Speed was downgraded by SmartSpeed\n");
5254*4882a593Smuzhiyun 
5255*4882a593Smuzhiyun 			/* On supported PHYs, check for duplex mismatch only
5256*4882a593Smuzhiyun 			 * if link has autonegotiated at 10/100 half
5257*4882a593Smuzhiyun 			 */
5258*4882a593Smuzhiyun 			if ((hw->phy.type == e1000_phy_igp_3 ||
5259*4882a593Smuzhiyun 			     hw->phy.type == e1000_phy_bm) &&
5260*4882a593Smuzhiyun 			    hw->mac.autoneg &&
5261*4882a593Smuzhiyun 			    (adapter->link_speed == SPEED_10 ||
5262*4882a593Smuzhiyun 			     adapter->link_speed == SPEED_100) &&
5263*4882a593Smuzhiyun 			    (adapter->link_duplex == HALF_DUPLEX)) {
5264*4882a593Smuzhiyun 				u16 autoneg_exp;
5265*4882a593Smuzhiyun 
5266*4882a593Smuzhiyun 				e1e_rphy(hw, MII_EXPANSION, &autoneg_exp);
5267*4882a593Smuzhiyun 
5268*4882a593Smuzhiyun 				if (!(autoneg_exp & EXPANSION_NWAY))
5269*4882a593Smuzhiyun 					e_info("Autonegotiated half duplex but link partner cannot autoneg.  Try forcing full duplex if link gets many collisions.\n");
5270*4882a593Smuzhiyun 			}
5271*4882a593Smuzhiyun 
5272*4882a593Smuzhiyun 			/* adjust timeout factor according to speed/duplex */
5273*4882a593Smuzhiyun 			adapter->tx_timeout_factor = 1;
5274*4882a593Smuzhiyun 			switch (adapter->link_speed) {
5275*4882a593Smuzhiyun 			case SPEED_10:
5276*4882a593Smuzhiyun 				txb2b = false;
5277*4882a593Smuzhiyun 				adapter->tx_timeout_factor = 16;
5278*4882a593Smuzhiyun 				break;
5279*4882a593Smuzhiyun 			case SPEED_100:
5280*4882a593Smuzhiyun 				txb2b = false;
5281*4882a593Smuzhiyun 				adapter->tx_timeout_factor = 10;
5282*4882a593Smuzhiyun 				break;
5283*4882a593Smuzhiyun 			}
5284*4882a593Smuzhiyun 
5285*4882a593Smuzhiyun 			/* workaround: re-program speed mode bit after
5286*4882a593Smuzhiyun 			 * link-up event
5287*4882a593Smuzhiyun 			 */
5288*4882a593Smuzhiyun 			if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
5289*4882a593Smuzhiyun 			    !txb2b) {
5290*4882a593Smuzhiyun 				u32 tarc0;
5291*4882a593Smuzhiyun 
5292*4882a593Smuzhiyun 				tarc0 = er32(TARC(0));
5293*4882a593Smuzhiyun 				tarc0 &= ~SPEED_MODE_BIT;
5294*4882a593Smuzhiyun 				ew32(TARC(0), tarc0);
5295*4882a593Smuzhiyun 			}
5296*4882a593Smuzhiyun 
5297*4882a593Smuzhiyun 			/* disable TSO for pcie and 10/100 speeds, to avoid
5298*4882a593Smuzhiyun 			 * some hardware issues
5299*4882a593Smuzhiyun 			 */
5300*4882a593Smuzhiyun 			if (!(adapter->flags & FLAG_TSO_FORCE)) {
5301*4882a593Smuzhiyun 				switch (adapter->link_speed) {
5302*4882a593Smuzhiyun 				case SPEED_10:
5303*4882a593Smuzhiyun 				case SPEED_100:
5304*4882a593Smuzhiyun 					e_info("10/100 speed: disabling TSO\n");
5305*4882a593Smuzhiyun 					netdev->features &= ~NETIF_F_TSO;
5306*4882a593Smuzhiyun 					netdev->features &= ~NETIF_F_TSO6;
5307*4882a593Smuzhiyun 					break;
5308*4882a593Smuzhiyun 				case SPEED_1000:
5309*4882a593Smuzhiyun 					netdev->features |= NETIF_F_TSO;
5310*4882a593Smuzhiyun 					netdev->features |= NETIF_F_TSO6;
5311*4882a593Smuzhiyun 					break;
5312*4882a593Smuzhiyun 				default:
5313*4882a593Smuzhiyun 					/* oops */
5314*4882a593Smuzhiyun 					break;
5315*4882a593Smuzhiyun 				}
5316*4882a593Smuzhiyun 				if (hw->mac.type == e1000_pch_spt) {
5317*4882a593Smuzhiyun 					netdev->features &= ~NETIF_F_TSO;
5318*4882a593Smuzhiyun 					netdev->features &= ~NETIF_F_TSO6;
5319*4882a593Smuzhiyun 				}
5320*4882a593Smuzhiyun 			}
5321*4882a593Smuzhiyun 
5322*4882a593Smuzhiyun 			/* enable transmits in the hardware, need to do this
5323*4882a593Smuzhiyun 			 * after setting TARC(0)
5324*4882a593Smuzhiyun 			 */
5325*4882a593Smuzhiyun 			tctl = er32(TCTL);
5326*4882a593Smuzhiyun 			tctl |= E1000_TCTL_EN;
5327*4882a593Smuzhiyun 			ew32(TCTL, tctl);
5328*4882a593Smuzhiyun 
5329*4882a593Smuzhiyun 			/* Perform any post-link-up configuration before
5330*4882a593Smuzhiyun 			 * reporting link up.
5331*4882a593Smuzhiyun 			 */
5332*4882a593Smuzhiyun 			if (phy->ops.cfg_on_link_up)
5333*4882a593Smuzhiyun 				phy->ops.cfg_on_link_up(hw);
5334*4882a593Smuzhiyun 
5335*4882a593Smuzhiyun 			netif_wake_queue(netdev);
5336*4882a593Smuzhiyun 			netif_carrier_on(netdev);
5337*4882a593Smuzhiyun 
5338*4882a593Smuzhiyun 			if (!test_bit(__E1000_DOWN, &adapter->state))
5339*4882a593Smuzhiyun 				mod_timer(&adapter->phy_info_timer,
5340*4882a593Smuzhiyun 					  round_jiffies(jiffies + 2 * HZ));
5341*4882a593Smuzhiyun 		}
5342*4882a593Smuzhiyun 	} else {
5343*4882a593Smuzhiyun 		if (netif_carrier_ok(netdev)) {
5344*4882a593Smuzhiyun 			adapter->link_speed = 0;
5345*4882a593Smuzhiyun 			adapter->link_duplex = 0;
5346*4882a593Smuzhiyun 			/* Link status message must follow this format */
5347*4882a593Smuzhiyun 			netdev_info(netdev, "NIC Link is Down\n");
5348*4882a593Smuzhiyun 			netif_carrier_off(netdev);
5349*4882a593Smuzhiyun 			netif_stop_queue(netdev);
5350*4882a593Smuzhiyun 			if (!test_bit(__E1000_DOWN, &adapter->state))
5351*4882a593Smuzhiyun 				mod_timer(&adapter->phy_info_timer,
5352*4882a593Smuzhiyun 					  round_jiffies(jiffies + 2 * HZ));
5353*4882a593Smuzhiyun 
5354*4882a593Smuzhiyun 			/* 8000ES2LAN requires a Rx packet buffer work-around
5355*4882a593Smuzhiyun 			 * on link down event; reset the controller to flush
5356*4882a593Smuzhiyun 			 * the Rx packet buffer.
5357*4882a593Smuzhiyun 			 */
5358*4882a593Smuzhiyun 			if (adapter->flags & FLAG_RX_NEEDS_RESTART)
5359*4882a593Smuzhiyun 				adapter->flags |= FLAG_RESTART_NOW;
5360*4882a593Smuzhiyun 			else
5361*4882a593Smuzhiyun 				pm_schedule_suspend(netdev->dev.parent,
5362*4882a593Smuzhiyun 						    LINK_TIMEOUT);
5363*4882a593Smuzhiyun 		}
5364*4882a593Smuzhiyun 	}
5365*4882a593Smuzhiyun 
5366*4882a593Smuzhiyun link_up:
5367*4882a593Smuzhiyun 	spin_lock(&adapter->stats64_lock);
5368*4882a593Smuzhiyun 	e1000e_update_stats(adapter);
5369*4882a593Smuzhiyun 
5370*4882a593Smuzhiyun 	mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
5371*4882a593Smuzhiyun 	adapter->tpt_old = adapter->stats.tpt;
5372*4882a593Smuzhiyun 	mac->collision_delta = adapter->stats.colc - adapter->colc_old;
5373*4882a593Smuzhiyun 	adapter->colc_old = adapter->stats.colc;
5374*4882a593Smuzhiyun 
5375*4882a593Smuzhiyun 	adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
5376*4882a593Smuzhiyun 	adapter->gorc_old = adapter->stats.gorc;
5377*4882a593Smuzhiyun 	adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
5378*4882a593Smuzhiyun 	adapter->gotc_old = adapter->stats.gotc;
5379*4882a593Smuzhiyun 	spin_unlock(&adapter->stats64_lock);
5380*4882a593Smuzhiyun 
5381*4882a593Smuzhiyun 	/* If the link is lost the controller stops DMA, but
5382*4882a593Smuzhiyun 	 * if there is queued Tx work it cannot be done.  So
5383*4882a593Smuzhiyun 	 * reset the controller to flush the Tx packet buffers.
5384*4882a593Smuzhiyun 	 */
5385*4882a593Smuzhiyun 	if (!netif_carrier_ok(netdev) &&
5386*4882a593Smuzhiyun 	    (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
5387*4882a593Smuzhiyun 		adapter->flags |= FLAG_RESTART_NOW;
5388*4882a593Smuzhiyun 
5389*4882a593Smuzhiyun 	/* If reset is necessary, do it outside of interrupt context. */
5390*4882a593Smuzhiyun 	if (adapter->flags & FLAG_RESTART_NOW) {
5391*4882a593Smuzhiyun 		schedule_work(&adapter->reset_task);
5392*4882a593Smuzhiyun 		/* return immediately since reset is imminent */
5393*4882a593Smuzhiyun 		return;
5394*4882a593Smuzhiyun 	}
5395*4882a593Smuzhiyun 
5396*4882a593Smuzhiyun 	e1000e_update_adaptive(&adapter->hw);
5397*4882a593Smuzhiyun 
5398*4882a593Smuzhiyun 	/* Simple mode for Interrupt Throttle Rate (ITR) */
5399*4882a593Smuzhiyun 	if (adapter->itr_setting == 4) {
5400*4882a593Smuzhiyun 		/* Symmetric Tx/Rx gets a reduced ITR=2000;
5401*4882a593Smuzhiyun 		 * Total asymmetrical Tx or Rx gets ITR=8000;
5402*4882a593Smuzhiyun 		 * everyone else is between 2000-8000.
5403*4882a593Smuzhiyun 		 */
5404*4882a593Smuzhiyun 		u32 goc = (adapter->gotc + adapter->gorc) / 10000;
5405*4882a593Smuzhiyun 		u32 dif = (adapter->gotc > adapter->gorc ?
5406*4882a593Smuzhiyun 			   adapter->gotc - adapter->gorc :
5407*4882a593Smuzhiyun 			   adapter->gorc - adapter->gotc) / 10000;
5408*4882a593Smuzhiyun 		u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
5409*4882a593Smuzhiyun 
5410*4882a593Smuzhiyun 		e1000e_write_itr(adapter, itr);
5411*4882a593Smuzhiyun 	}
5412*4882a593Smuzhiyun 
5413*4882a593Smuzhiyun 	/* Cause software interrupt to ensure Rx ring is cleaned */
5414*4882a593Smuzhiyun 	if (adapter->msix_entries)
5415*4882a593Smuzhiyun 		ew32(ICS, adapter->rx_ring->ims_val);
5416*4882a593Smuzhiyun 	else
5417*4882a593Smuzhiyun 		ew32(ICS, E1000_ICS_RXDMT0);
5418*4882a593Smuzhiyun 
5419*4882a593Smuzhiyun 	/* flush pending descriptors to memory before detecting Tx hang */
5420*4882a593Smuzhiyun 	e1000e_flush_descriptors(adapter);
5421*4882a593Smuzhiyun 
5422*4882a593Smuzhiyun 	/* Force detection of hung controller every watchdog period */
5423*4882a593Smuzhiyun 	adapter->detect_tx_hung = true;
5424*4882a593Smuzhiyun 
5425*4882a593Smuzhiyun 	/* With 82571 controllers, LAA may be overwritten due to controller
5426*4882a593Smuzhiyun 	 * reset from the other port. Set the appropriate LAA in RAR[0]
5427*4882a593Smuzhiyun 	 */
5428*4882a593Smuzhiyun 	if (e1000e_get_laa_state_82571(hw))
5429*4882a593Smuzhiyun 		hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0);
5430*4882a593Smuzhiyun 
5431*4882a593Smuzhiyun 	if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
5432*4882a593Smuzhiyun 		e1000e_check_82574_phy_workaround(adapter);
5433*4882a593Smuzhiyun 
5434*4882a593Smuzhiyun 	/* Clear valid timestamp stuck in RXSTMPL/H due to a Rx error */
5435*4882a593Smuzhiyun 	if (adapter->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
5436*4882a593Smuzhiyun 		if ((adapter->flags2 & FLAG2_CHECK_RX_HWTSTAMP) &&
5437*4882a593Smuzhiyun 		    (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) {
5438*4882a593Smuzhiyun 			er32(RXSTMPH);
5439*4882a593Smuzhiyun 			adapter->rx_hwtstamp_cleared++;
5440*4882a593Smuzhiyun 		} else {
5441*4882a593Smuzhiyun 			adapter->flags2 |= FLAG2_CHECK_RX_HWTSTAMP;
5442*4882a593Smuzhiyun 		}
5443*4882a593Smuzhiyun 	}
5444*4882a593Smuzhiyun 
5445*4882a593Smuzhiyun 	/* Reset the timer */
5446*4882a593Smuzhiyun 	if (!test_bit(__E1000_DOWN, &adapter->state))
5447*4882a593Smuzhiyun 		mod_timer(&adapter->watchdog_timer,
5448*4882a593Smuzhiyun 			  round_jiffies(jiffies + 2 * HZ));
5449*4882a593Smuzhiyun }
5450*4882a593Smuzhiyun 
5451*4882a593Smuzhiyun #define E1000_TX_FLAGS_CSUM		0x00000001
5452*4882a593Smuzhiyun #define E1000_TX_FLAGS_VLAN		0x00000002
5453*4882a593Smuzhiyun #define E1000_TX_FLAGS_TSO		0x00000004
5454*4882a593Smuzhiyun #define E1000_TX_FLAGS_IPV4		0x00000008
5455*4882a593Smuzhiyun #define E1000_TX_FLAGS_NO_FCS		0x00000010
5456*4882a593Smuzhiyun #define E1000_TX_FLAGS_HWTSTAMP		0x00000020
5457*4882a593Smuzhiyun #define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
5458*4882a593Smuzhiyun #define E1000_TX_FLAGS_VLAN_SHIFT	16
5459*4882a593Smuzhiyun 
e1000_tso(struct e1000_ring * tx_ring,struct sk_buff * skb,__be16 protocol)5460*4882a593Smuzhiyun static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb,
5461*4882a593Smuzhiyun 		     __be16 protocol)
5462*4882a593Smuzhiyun {
5463*4882a593Smuzhiyun 	struct e1000_context_desc *context_desc;
5464*4882a593Smuzhiyun 	struct e1000_buffer *buffer_info;
5465*4882a593Smuzhiyun 	unsigned int i;
5466*4882a593Smuzhiyun 	u32 cmd_length = 0;
5467*4882a593Smuzhiyun 	u16 ipcse = 0, mss;
5468*4882a593Smuzhiyun 	u8 ipcss, ipcso, tucss, tucso, hdr_len;
5469*4882a593Smuzhiyun 	int err;
5470*4882a593Smuzhiyun 
5471*4882a593Smuzhiyun 	if (!skb_is_gso(skb))
5472*4882a593Smuzhiyun 		return 0;
5473*4882a593Smuzhiyun 
5474*4882a593Smuzhiyun 	err = skb_cow_head(skb, 0);
5475*4882a593Smuzhiyun 	if (err < 0)
5476*4882a593Smuzhiyun 		return err;
5477*4882a593Smuzhiyun 
5478*4882a593Smuzhiyun 	hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
5479*4882a593Smuzhiyun 	mss = skb_shinfo(skb)->gso_size;
5480*4882a593Smuzhiyun 	if (protocol == htons(ETH_P_IP)) {
5481*4882a593Smuzhiyun 		struct iphdr *iph = ip_hdr(skb);
5482*4882a593Smuzhiyun 		iph->tot_len = 0;
5483*4882a593Smuzhiyun 		iph->check = 0;
5484*4882a593Smuzhiyun 		tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
5485*4882a593Smuzhiyun 							 0, IPPROTO_TCP, 0);
5486*4882a593Smuzhiyun 		cmd_length = E1000_TXD_CMD_IP;
5487*4882a593Smuzhiyun 		ipcse = skb_transport_offset(skb) - 1;
5488*4882a593Smuzhiyun 	} else if (skb_is_gso_v6(skb)) {
5489*4882a593Smuzhiyun 		tcp_v6_gso_csum_prep(skb);
5490*4882a593Smuzhiyun 		ipcse = 0;
5491*4882a593Smuzhiyun 	}
5492*4882a593Smuzhiyun 	ipcss = skb_network_offset(skb);
5493*4882a593Smuzhiyun 	ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
5494*4882a593Smuzhiyun 	tucss = skb_transport_offset(skb);
5495*4882a593Smuzhiyun 	tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
5496*4882a593Smuzhiyun 
5497*4882a593Smuzhiyun 	cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
5498*4882a593Smuzhiyun 		       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
5499*4882a593Smuzhiyun 
5500*4882a593Smuzhiyun 	i = tx_ring->next_to_use;
5501*4882a593Smuzhiyun 	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
5502*4882a593Smuzhiyun 	buffer_info = &tx_ring->buffer_info[i];
5503*4882a593Smuzhiyun 
5504*4882a593Smuzhiyun 	context_desc->lower_setup.ip_fields.ipcss = ipcss;
5505*4882a593Smuzhiyun 	context_desc->lower_setup.ip_fields.ipcso = ipcso;
5506*4882a593Smuzhiyun 	context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
5507*4882a593Smuzhiyun 	context_desc->upper_setup.tcp_fields.tucss = tucss;
5508*4882a593Smuzhiyun 	context_desc->upper_setup.tcp_fields.tucso = tucso;
5509*4882a593Smuzhiyun 	context_desc->upper_setup.tcp_fields.tucse = 0;
5510*4882a593Smuzhiyun 	context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
5511*4882a593Smuzhiyun 	context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
5512*4882a593Smuzhiyun 	context_desc->cmd_and_length = cpu_to_le32(cmd_length);
5513*4882a593Smuzhiyun 
5514*4882a593Smuzhiyun 	buffer_info->time_stamp = jiffies;
5515*4882a593Smuzhiyun 	buffer_info->next_to_watch = i;
5516*4882a593Smuzhiyun 
5517*4882a593Smuzhiyun 	i++;
5518*4882a593Smuzhiyun 	if (i == tx_ring->count)
5519*4882a593Smuzhiyun 		i = 0;
5520*4882a593Smuzhiyun 	tx_ring->next_to_use = i;
5521*4882a593Smuzhiyun 
5522*4882a593Smuzhiyun 	return 1;
5523*4882a593Smuzhiyun }
5524*4882a593Smuzhiyun 
e1000_tx_csum(struct e1000_ring * tx_ring,struct sk_buff * skb,__be16 protocol)5525*4882a593Smuzhiyun static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb,
5526*4882a593Smuzhiyun 			  __be16 protocol)
5527*4882a593Smuzhiyun {
5528*4882a593Smuzhiyun 	struct e1000_adapter *adapter = tx_ring->adapter;
5529*4882a593Smuzhiyun 	struct e1000_context_desc *context_desc;
5530*4882a593Smuzhiyun 	struct e1000_buffer *buffer_info;
5531*4882a593Smuzhiyun 	unsigned int i;
5532*4882a593Smuzhiyun 	u8 css;
5533*4882a593Smuzhiyun 	u32 cmd_len = E1000_TXD_CMD_DEXT;
5534*4882a593Smuzhiyun 
5535*4882a593Smuzhiyun 	if (skb->ip_summed != CHECKSUM_PARTIAL)
5536*4882a593Smuzhiyun 		return false;
5537*4882a593Smuzhiyun 
5538*4882a593Smuzhiyun 	switch (protocol) {
5539*4882a593Smuzhiyun 	case cpu_to_be16(ETH_P_IP):
5540*4882a593Smuzhiyun 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
5541*4882a593Smuzhiyun 			cmd_len |= E1000_TXD_CMD_TCP;
5542*4882a593Smuzhiyun 		break;
5543*4882a593Smuzhiyun 	case cpu_to_be16(ETH_P_IPV6):
5544*4882a593Smuzhiyun 		/* XXX not handling all IPV6 headers */
5545*4882a593Smuzhiyun 		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
5546*4882a593Smuzhiyun 			cmd_len |= E1000_TXD_CMD_TCP;
5547*4882a593Smuzhiyun 		break;
5548*4882a593Smuzhiyun 	default:
5549*4882a593Smuzhiyun 		if (unlikely(net_ratelimit()))
5550*4882a593Smuzhiyun 			e_warn("checksum_partial proto=%x!\n",
5551*4882a593Smuzhiyun 			       be16_to_cpu(protocol));
5552*4882a593Smuzhiyun 		break;
5553*4882a593Smuzhiyun 	}
5554*4882a593Smuzhiyun 
5555*4882a593Smuzhiyun 	css = skb_checksum_start_offset(skb);
5556*4882a593Smuzhiyun 
5557*4882a593Smuzhiyun 	i = tx_ring->next_to_use;
5558*4882a593Smuzhiyun 	buffer_info = &tx_ring->buffer_info[i];
5559*4882a593Smuzhiyun 	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
5560*4882a593Smuzhiyun 
5561*4882a593Smuzhiyun 	context_desc->lower_setup.ip_config = 0;
5562*4882a593Smuzhiyun 	context_desc->upper_setup.tcp_fields.tucss = css;
5563*4882a593Smuzhiyun 	context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset;
5564*4882a593Smuzhiyun 	context_desc->upper_setup.tcp_fields.tucse = 0;
5565*4882a593Smuzhiyun 	context_desc->tcp_seg_setup.data = 0;
5566*4882a593Smuzhiyun 	context_desc->cmd_and_length = cpu_to_le32(cmd_len);
5567*4882a593Smuzhiyun 
5568*4882a593Smuzhiyun 	buffer_info->time_stamp = jiffies;
5569*4882a593Smuzhiyun 	buffer_info->next_to_watch = i;
5570*4882a593Smuzhiyun 
5571*4882a593Smuzhiyun 	i++;
5572*4882a593Smuzhiyun 	if (i == tx_ring->count)
5573*4882a593Smuzhiyun 		i = 0;
5574*4882a593Smuzhiyun 	tx_ring->next_to_use = i;
5575*4882a593Smuzhiyun 
5576*4882a593Smuzhiyun 	return true;
5577*4882a593Smuzhiyun }
5578*4882a593Smuzhiyun 
e1000_tx_map(struct e1000_ring * tx_ring,struct sk_buff * skb,unsigned int first,unsigned int max_per_txd,unsigned int nr_frags)5579*4882a593Smuzhiyun static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
5580*4882a593Smuzhiyun 			unsigned int first, unsigned int max_per_txd,
5581*4882a593Smuzhiyun 			unsigned int nr_frags)
5582*4882a593Smuzhiyun {
5583*4882a593Smuzhiyun 	struct e1000_adapter *adapter = tx_ring->adapter;
5584*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
5585*4882a593Smuzhiyun 	struct e1000_buffer *buffer_info;
5586*4882a593Smuzhiyun 	unsigned int len = skb_headlen(skb);
5587*4882a593Smuzhiyun 	unsigned int offset = 0, size, count = 0, i;
5588*4882a593Smuzhiyun 	unsigned int f, bytecount, segs;
5589*4882a593Smuzhiyun 
5590*4882a593Smuzhiyun 	i = tx_ring->next_to_use;
5591*4882a593Smuzhiyun 
5592*4882a593Smuzhiyun 	while (len) {
5593*4882a593Smuzhiyun 		buffer_info = &tx_ring->buffer_info[i];
5594*4882a593Smuzhiyun 		size = min(len, max_per_txd);
5595*4882a593Smuzhiyun 
5596*4882a593Smuzhiyun 		buffer_info->length = size;
5597*4882a593Smuzhiyun 		buffer_info->time_stamp = jiffies;
5598*4882a593Smuzhiyun 		buffer_info->next_to_watch = i;
5599*4882a593Smuzhiyun 		buffer_info->dma = dma_map_single(&pdev->dev,
5600*4882a593Smuzhiyun 						  skb->data + offset,
5601*4882a593Smuzhiyun 						  size, DMA_TO_DEVICE);
5602*4882a593Smuzhiyun 		buffer_info->mapped_as_page = false;
5603*4882a593Smuzhiyun 		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
5604*4882a593Smuzhiyun 			goto dma_error;
5605*4882a593Smuzhiyun 
5606*4882a593Smuzhiyun 		len -= size;
5607*4882a593Smuzhiyun 		offset += size;
5608*4882a593Smuzhiyun 		count++;
5609*4882a593Smuzhiyun 
5610*4882a593Smuzhiyun 		if (len) {
5611*4882a593Smuzhiyun 			i++;
5612*4882a593Smuzhiyun 			if (i == tx_ring->count)
5613*4882a593Smuzhiyun 				i = 0;
5614*4882a593Smuzhiyun 		}
5615*4882a593Smuzhiyun 	}
5616*4882a593Smuzhiyun 
5617*4882a593Smuzhiyun 	for (f = 0; f < nr_frags; f++) {
5618*4882a593Smuzhiyun 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
5619*4882a593Smuzhiyun 
5620*4882a593Smuzhiyun 		len = skb_frag_size(frag);
5621*4882a593Smuzhiyun 		offset = 0;
5622*4882a593Smuzhiyun 
5623*4882a593Smuzhiyun 		while (len) {
5624*4882a593Smuzhiyun 			i++;
5625*4882a593Smuzhiyun 			if (i == tx_ring->count)
5626*4882a593Smuzhiyun 				i = 0;
5627*4882a593Smuzhiyun 
5628*4882a593Smuzhiyun 			buffer_info = &tx_ring->buffer_info[i];
5629*4882a593Smuzhiyun 			size = min(len, max_per_txd);
5630*4882a593Smuzhiyun 
5631*4882a593Smuzhiyun 			buffer_info->length = size;
5632*4882a593Smuzhiyun 			buffer_info->time_stamp = jiffies;
5633*4882a593Smuzhiyun 			buffer_info->next_to_watch = i;
5634*4882a593Smuzhiyun 			buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
5635*4882a593Smuzhiyun 							    offset, size,
5636*4882a593Smuzhiyun 							    DMA_TO_DEVICE);
5637*4882a593Smuzhiyun 			buffer_info->mapped_as_page = true;
5638*4882a593Smuzhiyun 			if (dma_mapping_error(&pdev->dev, buffer_info->dma))
5639*4882a593Smuzhiyun 				goto dma_error;
5640*4882a593Smuzhiyun 
5641*4882a593Smuzhiyun 			len -= size;
5642*4882a593Smuzhiyun 			offset += size;
5643*4882a593Smuzhiyun 			count++;
5644*4882a593Smuzhiyun 		}
5645*4882a593Smuzhiyun 	}
5646*4882a593Smuzhiyun 
5647*4882a593Smuzhiyun 	segs = skb_shinfo(skb)->gso_segs ? : 1;
5648*4882a593Smuzhiyun 	/* multiply data chunks by size of headers */
5649*4882a593Smuzhiyun 	bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
5650*4882a593Smuzhiyun 
5651*4882a593Smuzhiyun 	tx_ring->buffer_info[i].skb = skb;
5652*4882a593Smuzhiyun 	tx_ring->buffer_info[i].segs = segs;
5653*4882a593Smuzhiyun 	tx_ring->buffer_info[i].bytecount = bytecount;
5654*4882a593Smuzhiyun 	tx_ring->buffer_info[first].next_to_watch = i;
5655*4882a593Smuzhiyun 
5656*4882a593Smuzhiyun 	return count;
5657*4882a593Smuzhiyun 
5658*4882a593Smuzhiyun dma_error:
5659*4882a593Smuzhiyun 	dev_err(&pdev->dev, "Tx DMA map failed\n");
5660*4882a593Smuzhiyun 	buffer_info->dma = 0;
5661*4882a593Smuzhiyun 	if (count)
5662*4882a593Smuzhiyun 		count--;
5663*4882a593Smuzhiyun 
5664*4882a593Smuzhiyun 	while (count--) {
5665*4882a593Smuzhiyun 		if (i == 0)
5666*4882a593Smuzhiyun 			i += tx_ring->count;
5667*4882a593Smuzhiyun 		i--;
5668*4882a593Smuzhiyun 		buffer_info = &tx_ring->buffer_info[i];
5669*4882a593Smuzhiyun 		e1000_put_txbuf(tx_ring, buffer_info, true);
5670*4882a593Smuzhiyun 	}
5671*4882a593Smuzhiyun 
5672*4882a593Smuzhiyun 	return 0;
5673*4882a593Smuzhiyun }
5674*4882a593Smuzhiyun 
e1000_tx_queue(struct e1000_ring * tx_ring,int tx_flags,int count)5675*4882a593Smuzhiyun static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
5676*4882a593Smuzhiyun {
5677*4882a593Smuzhiyun 	struct e1000_adapter *adapter = tx_ring->adapter;
5678*4882a593Smuzhiyun 	struct e1000_tx_desc *tx_desc = NULL;
5679*4882a593Smuzhiyun 	struct e1000_buffer *buffer_info;
5680*4882a593Smuzhiyun 	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
5681*4882a593Smuzhiyun 	unsigned int i;
5682*4882a593Smuzhiyun 
5683*4882a593Smuzhiyun 	if (tx_flags & E1000_TX_FLAGS_TSO) {
5684*4882a593Smuzhiyun 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
5685*4882a593Smuzhiyun 		    E1000_TXD_CMD_TSE;
5686*4882a593Smuzhiyun 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
5687*4882a593Smuzhiyun 
5688*4882a593Smuzhiyun 		if (tx_flags & E1000_TX_FLAGS_IPV4)
5689*4882a593Smuzhiyun 			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
5690*4882a593Smuzhiyun 	}
5691*4882a593Smuzhiyun 
5692*4882a593Smuzhiyun 	if (tx_flags & E1000_TX_FLAGS_CSUM) {
5693*4882a593Smuzhiyun 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
5694*4882a593Smuzhiyun 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
5695*4882a593Smuzhiyun 	}
5696*4882a593Smuzhiyun 
5697*4882a593Smuzhiyun 	if (tx_flags & E1000_TX_FLAGS_VLAN) {
5698*4882a593Smuzhiyun 		txd_lower |= E1000_TXD_CMD_VLE;
5699*4882a593Smuzhiyun 		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
5700*4882a593Smuzhiyun 	}
5701*4882a593Smuzhiyun 
5702*4882a593Smuzhiyun 	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
5703*4882a593Smuzhiyun 		txd_lower &= ~(E1000_TXD_CMD_IFCS);
5704*4882a593Smuzhiyun 
5705*4882a593Smuzhiyun 	if (unlikely(tx_flags & E1000_TX_FLAGS_HWTSTAMP)) {
5706*4882a593Smuzhiyun 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
5707*4882a593Smuzhiyun 		txd_upper |= E1000_TXD_EXTCMD_TSTAMP;
5708*4882a593Smuzhiyun 	}
5709*4882a593Smuzhiyun 
5710*4882a593Smuzhiyun 	i = tx_ring->next_to_use;
5711*4882a593Smuzhiyun 
5712*4882a593Smuzhiyun 	do {
5713*4882a593Smuzhiyun 		buffer_info = &tx_ring->buffer_info[i];
5714*4882a593Smuzhiyun 		tx_desc = E1000_TX_DESC(*tx_ring, i);
5715*4882a593Smuzhiyun 		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
5716*4882a593Smuzhiyun 		tx_desc->lower.data = cpu_to_le32(txd_lower |
5717*4882a593Smuzhiyun 						  buffer_info->length);
5718*4882a593Smuzhiyun 		tx_desc->upper.data = cpu_to_le32(txd_upper);
5719*4882a593Smuzhiyun 
5720*4882a593Smuzhiyun 		i++;
5721*4882a593Smuzhiyun 		if (i == tx_ring->count)
5722*4882a593Smuzhiyun 			i = 0;
5723*4882a593Smuzhiyun 	} while (--count > 0);
5724*4882a593Smuzhiyun 
5725*4882a593Smuzhiyun 	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
5726*4882a593Smuzhiyun 
5727*4882a593Smuzhiyun 	/* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
5728*4882a593Smuzhiyun 	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
5729*4882a593Smuzhiyun 		tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
5730*4882a593Smuzhiyun 
5731*4882a593Smuzhiyun 	/* Force memory writes to complete before letting h/w
5732*4882a593Smuzhiyun 	 * know there are new descriptors to fetch.  (Only
5733*4882a593Smuzhiyun 	 * applicable for weak-ordered memory model archs,
5734*4882a593Smuzhiyun 	 * such as IA-64).
5735*4882a593Smuzhiyun 	 */
5736*4882a593Smuzhiyun 	wmb();
5737*4882a593Smuzhiyun 
5738*4882a593Smuzhiyun 	tx_ring->next_to_use = i;
5739*4882a593Smuzhiyun }
5740*4882a593Smuzhiyun 
5741*4882a593Smuzhiyun #define MINIMUM_DHCP_PACKET_SIZE 282
e1000_transfer_dhcp_info(struct e1000_adapter * adapter,struct sk_buff * skb)5742*4882a593Smuzhiyun static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
5743*4882a593Smuzhiyun 				    struct sk_buff *skb)
5744*4882a593Smuzhiyun {
5745*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
5746*4882a593Smuzhiyun 	u16 length, offset;
5747*4882a593Smuzhiyun 
5748*4882a593Smuzhiyun 	if (skb_vlan_tag_present(skb) &&
5749*4882a593Smuzhiyun 	    !((skb_vlan_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
5750*4882a593Smuzhiyun 	      (adapter->hw.mng_cookie.status &
5751*4882a593Smuzhiyun 	       E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
5752*4882a593Smuzhiyun 		return 0;
5753*4882a593Smuzhiyun 
5754*4882a593Smuzhiyun 	if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
5755*4882a593Smuzhiyun 		return 0;
5756*4882a593Smuzhiyun 
5757*4882a593Smuzhiyun 	if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP))
5758*4882a593Smuzhiyun 		return 0;
5759*4882a593Smuzhiyun 
5760*4882a593Smuzhiyun 	{
5761*4882a593Smuzhiyun 		const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14);
5762*4882a593Smuzhiyun 		struct udphdr *udp;
5763*4882a593Smuzhiyun 
5764*4882a593Smuzhiyun 		if (ip->protocol != IPPROTO_UDP)
5765*4882a593Smuzhiyun 			return 0;
5766*4882a593Smuzhiyun 
5767*4882a593Smuzhiyun 		udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
5768*4882a593Smuzhiyun 		if (ntohs(udp->dest) != 67)
5769*4882a593Smuzhiyun 			return 0;
5770*4882a593Smuzhiyun 
5771*4882a593Smuzhiyun 		offset = (u8 *)udp + 8 - skb->data;
5772*4882a593Smuzhiyun 		length = skb->len - offset;
5773*4882a593Smuzhiyun 		return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
5774*4882a593Smuzhiyun 	}
5775*4882a593Smuzhiyun 
5776*4882a593Smuzhiyun 	return 0;
5777*4882a593Smuzhiyun }
5778*4882a593Smuzhiyun 
__e1000_maybe_stop_tx(struct e1000_ring * tx_ring,int size)5779*4882a593Smuzhiyun static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
5780*4882a593Smuzhiyun {
5781*4882a593Smuzhiyun 	struct e1000_adapter *adapter = tx_ring->adapter;
5782*4882a593Smuzhiyun 
5783*4882a593Smuzhiyun 	netif_stop_queue(adapter->netdev);
5784*4882a593Smuzhiyun 	/* Herbert's original patch had:
5785*4882a593Smuzhiyun 	 *  smp_mb__after_netif_stop_queue();
5786*4882a593Smuzhiyun 	 * but since that doesn't exist yet, just open code it.
5787*4882a593Smuzhiyun 	 */
5788*4882a593Smuzhiyun 	smp_mb();
5789*4882a593Smuzhiyun 
5790*4882a593Smuzhiyun 	/* We need to check again in a case another CPU has just
5791*4882a593Smuzhiyun 	 * made room available.
5792*4882a593Smuzhiyun 	 */
5793*4882a593Smuzhiyun 	if (e1000_desc_unused(tx_ring) < size)
5794*4882a593Smuzhiyun 		return -EBUSY;
5795*4882a593Smuzhiyun 
5796*4882a593Smuzhiyun 	/* A reprieve! */
5797*4882a593Smuzhiyun 	netif_start_queue(adapter->netdev);
5798*4882a593Smuzhiyun 	++adapter->restart_queue;
5799*4882a593Smuzhiyun 	return 0;
5800*4882a593Smuzhiyun }
5801*4882a593Smuzhiyun 
e1000_maybe_stop_tx(struct e1000_ring * tx_ring,int size)5802*4882a593Smuzhiyun static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
5803*4882a593Smuzhiyun {
5804*4882a593Smuzhiyun 	BUG_ON(size > tx_ring->count);
5805*4882a593Smuzhiyun 
5806*4882a593Smuzhiyun 	if (e1000_desc_unused(tx_ring) >= size)
5807*4882a593Smuzhiyun 		return 0;
5808*4882a593Smuzhiyun 	return __e1000_maybe_stop_tx(tx_ring, size);
5809*4882a593Smuzhiyun }
5810*4882a593Smuzhiyun 
e1000_xmit_frame(struct sk_buff * skb,struct net_device * netdev)5811*4882a593Smuzhiyun static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5812*4882a593Smuzhiyun 				    struct net_device *netdev)
5813*4882a593Smuzhiyun {
5814*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
5815*4882a593Smuzhiyun 	struct e1000_ring *tx_ring = adapter->tx_ring;
5816*4882a593Smuzhiyun 	unsigned int first;
5817*4882a593Smuzhiyun 	unsigned int tx_flags = 0;
5818*4882a593Smuzhiyun 	unsigned int len = skb_headlen(skb);
5819*4882a593Smuzhiyun 	unsigned int nr_frags;
5820*4882a593Smuzhiyun 	unsigned int mss;
5821*4882a593Smuzhiyun 	int count = 0;
5822*4882a593Smuzhiyun 	int tso;
5823*4882a593Smuzhiyun 	unsigned int f;
5824*4882a593Smuzhiyun 	__be16 protocol = vlan_get_protocol(skb);
5825*4882a593Smuzhiyun 
5826*4882a593Smuzhiyun 	if (test_bit(__E1000_DOWN, &adapter->state)) {
5827*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
5828*4882a593Smuzhiyun 		return NETDEV_TX_OK;
5829*4882a593Smuzhiyun 	}
5830*4882a593Smuzhiyun 
5831*4882a593Smuzhiyun 	if (skb->len <= 0) {
5832*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
5833*4882a593Smuzhiyun 		return NETDEV_TX_OK;
5834*4882a593Smuzhiyun 	}
5835*4882a593Smuzhiyun 
5836*4882a593Smuzhiyun 	/* The minimum packet size with TCTL.PSP set is 17 bytes so
5837*4882a593Smuzhiyun 	 * pad skb in order to meet this minimum size requirement
5838*4882a593Smuzhiyun 	 */
5839*4882a593Smuzhiyun 	if (skb_put_padto(skb, 17))
5840*4882a593Smuzhiyun 		return NETDEV_TX_OK;
5841*4882a593Smuzhiyun 
5842*4882a593Smuzhiyun 	mss = skb_shinfo(skb)->gso_size;
5843*4882a593Smuzhiyun 	if (mss) {
5844*4882a593Smuzhiyun 		u8 hdr_len;
5845*4882a593Smuzhiyun 
5846*4882a593Smuzhiyun 		/* TSO Workaround for 82571/2/3 Controllers -- if skb->data
5847*4882a593Smuzhiyun 		 * points to just header, pull a few bytes of payload from
5848*4882a593Smuzhiyun 		 * frags into skb->data
5849*4882a593Smuzhiyun 		 */
5850*4882a593Smuzhiyun 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
5851*4882a593Smuzhiyun 		/* we do this workaround for ES2LAN, but it is un-necessary,
5852*4882a593Smuzhiyun 		 * avoiding it could save a lot of cycles
5853*4882a593Smuzhiyun 		 */
5854*4882a593Smuzhiyun 		if (skb->data_len && (hdr_len == len)) {
5855*4882a593Smuzhiyun 			unsigned int pull_size;
5856*4882a593Smuzhiyun 
5857*4882a593Smuzhiyun 			pull_size = min_t(unsigned int, 4, skb->data_len);
5858*4882a593Smuzhiyun 			if (!__pskb_pull_tail(skb, pull_size)) {
5859*4882a593Smuzhiyun 				e_err("__pskb_pull_tail failed.\n");
5860*4882a593Smuzhiyun 				dev_kfree_skb_any(skb);
5861*4882a593Smuzhiyun 				return NETDEV_TX_OK;
5862*4882a593Smuzhiyun 			}
5863*4882a593Smuzhiyun 			len = skb_headlen(skb);
5864*4882a593Smuzhiyun 		}
5865*4882a593Smuzhiyun 	}
5866*4882a593Smuzhiyun 
5867*4882a593Smuzhiyun 	/* reserve a descriptor for the offload context */
5868*4882a593Smuzhiyun 	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
5869*4882a593Smuzhiyun 		count++;
5870*4882a593Smuzhiyun 	count++;
5871*4882a593Smuzhiyun 
5872*4882a593Smuzhiyun 	count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
5873*4882a593Smuzhiyun 
5874*4882a593Smuzhiyun 	nr_frags = skb_shinfo(skb)->nr_frags;
5875*4882a593Smuzhiyun 	for (f = 0; f < nr_frags; f++)
5876*4882a593Smuzhiyun 		count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
5877*4882a593Smuzhiyun 				      adapter->tx_fifo_limit);
5878*4882a593Smuzhiyun 
5879*4882a593Smuzhiyun 	if (adapter->hw.mac.tx_pkt_filtering)
5880*4882a593Smuzhiyun 		e1000_transfer_dhcp_info(adapter, skb);
5881*4882a593Smuzhiyun 
5882*4882a593Smuzhiyun 	/* need: count + 2 desc gap to keep tail from touching
5883*4882a593Smuzhiyun 	 * head, otherwise try next time
5884*4882a593Smuzhiyun 	 */
5885*4882a593Smuzhiyun 	if (e1000_maybe_stop_tx(tx_ring, count + 2))
5886*4882a593Smuzhiyun 		return NETDEV_TX_BUSY;
5887*4882a593Smuzhiyun 
5888*4882a593Smuzhiyun 	if (skb_vlan_tag_present(skb)) {
5889*4882a593Smuzhiyun 		tx_flags |= E1000_TX_FLAGS_VLAN;
5890*4882a593Smuzhiyun 		tx_flags |= (skb_vlan_tag_get(skb) <<
5891*4882a593Smuzhiyun 			     E1000_TX_FLAGS_VLAN_SHIFT);
5892*4882a593Smuzhiyun 	}
5893*4882a593Smuzhiyun 
5894*4882a593Smuzhiyun 	first = tx_ring->next_to_use;
5895*4882a593Smuzhiyun 
5896*4882a593Smuzhiyun 	tso = e1000_tso(tx_ring, skb, protocol);
5897*4882a593Smuzhiyun 	if (tso < 0) {
5898*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
5899*4882a593Smuzhiyun 		return NETDEV_TX_OK;
5900*4882a593Smuzhiyun 	}
5901*4882a593Smuzhiyun 
5902*4882a593Smuzhiyun 	if (tso)
5903*4882a593Smuzhiyun 		tx_flags |= E1000_TX_FLAGS_TSO;
5904*4882a593Smuzhiyun 	else if (e1000_tx_csum(tx_ring, skb, protocol))
5905*4882a593Smuzhiyun 		tx_flags |= E1000_TX_FLAGS_CSUM;
5906*4882a593Smuzhiyun 
5907*4882a593Smuzhiyun 	/* Old method was to assume IPv4 packet by default if TSO was enabled.
5908*4882a593Smuzhiyun 	 * 82571 hardware supports TSO capabilities for IPv6 as well...
5909*4882a593Smuzhiyun 	 * no longer assume, we must.
5910*4882a593Smuzhiyun 	 */
5911*4882a593Smuzhiyun 	if (protocol == htons(ETH_P_IP))
5912*4882a593Smuzhiyun 		tx_flags |= E1000_TX_FLAGS_IPV4;
5913*4882a593Smuzhiyun 
5914*4882a593Smuzhiyun 	if (unlikely(skb->no_fcs))
5915*4882a593Smuzhiyun 		tx_flags |= E1000_TX_FLAGS_NO_FCS;
5916*4882a593Smuzhiyun 
5917*4882a593Smuzhiyun 	/* if count is 0 then mapping error has occurred */
5918*4882a593Smuzhiyun 	count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
5919*4882a593Smuzhiyun 			     nr_frags);
5920*4882a593Smuzhiyun 	if (count) {
5921*4882a593Smuzhiyun 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
5922*4882a593Smuzhiyun 		    (adapter->flags & FLAG_HAS_HW_TIMESTAMP)) {
5923*4882a593Smuzhiyun 			if (!adapter->tx_hwtstamp_skb) {
5924*4882a593Smuzhiyun 				skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
5925*4882a593Smuzhiyun 				tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
5926*4882a593Smuzhiyun 				adapter->tx_hwtstamp_skb = skb_get(skb);
5927*4882a593Smuzhiyun 				adapter->tx_hwtstamp_start = jiffies;
5928*4882a593Smuzhiyun 				schedule_work(&adapter->tx_hwtstamp_work);
5929*4882a593Smuzhiyun 			} else {
5930*4882a593Smuzhiyun 				adapter->tx_hwtstamp_skipped++;
5931*4882a593Smuzhiyun 			}
5932*4882a593Smuzhiyun 		}
5933*4882a593Smuzhiyun 
5934*4882a593Smuzhiyun 		skb_tx_timestamp(skb);
5935*4882a593Smuzhiyun 
5936*4882a593Smuzhiyun 		netdev_sent_queue(netdev, skb->len);
5937*4882a593Smuzhiyun 		e1000_tx_queue(tx_ring, tx_flags, count);
5938*4882a593Smuzhiyun 		/* Make sure there is space in the ring for the next send. */
5939*4882a593Smuzhiyun 		e1000_maybe_stop_tx(tx_ring,
5940*4882a593Smuzhiyun 				    ((MAX_SKB_FRAGS + 1) *
5941*4882a593Smuzhiyun 				     DIV_ROUND_UP(PAGE_SIZE,
5942*4882a593Smuzhiyun 						  adapter->tx_fifo_limit) + 4));
5943*4882a593Smuzhiyun 
5944*4882a593Smuzhiyun 		if (!netdev_xmit_more() ||
5945*4882a593Smuzhiyun 		    netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
5946*4882a593Smuzhiyun 			if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
5947*4882a593Smuzhiyun 				e1000e_update_tdt_wa(tx_ring,
5948*4882a593Smuzhiyun 						     tx_ring->next_to_use);
5949*4882a593Smuzhiyun 			else
5950*4882a593Smuzhiyun 				writel(tx_ring->next_to_use, tx_ring->tail);
5951*4882a593Smuzhiyun 		}
5952*4882a593Smuzhiyun 	} else {
5953*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
5954*4882a593Smuzhiyun 		tx_ring->buffer_info[first].time_stamp = 0;
5955*4882a593Smuzhiyun 		tx_ring->next_to_use = first;
5956*4882a593Smuzhiyun 	}
5957*4882a593Smuzhiyun 
5958*4882a593Smuzhiyun 	return NETDEV_TX_OK;
5959*4882a593Smuzhiyun }
5960*4882a593Smuzhiyun 
5961*4882a593Smuzhiyun /**
5962*4882a593Smuzhiyun  * e1000_tx_timeout - Respond to a Tx Hang
5963*4882a593Smuzhiyun  * @netdev: network interface device structure
5964*4882a593Smuzhiyun  * @txqueue: index of the hung queue (unused)
5965*4882a593Smuzhiyun  **/
e1000_tx_timeout(struct net_device * netdev,unsigned int __always_unused txqueue)5966*4882a593Smuzhiyun static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
5967*4882a593Smuzhiyun {
5968*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
5969*4882a593Smuzhiyun 
5970*4882a593Smuzhiyun 	/* Do the reset outside of interrupt context */
5971*4882a593Smuzhiyun 	adapter->tx_timeout_count++;
5972*4882a593Smuzhiyun 	schedule_work(&adapter->reset_task);
5973*4882a593Smuzhiyun }
5974*4882a593Smuzhiyun 
e1000_reset_task(struct work_struct * work)5975*4882a593Smuzhiyun static void e1000_reset_task(struct work_struct *work)
5976*4882a593Smuzhiyun {
5977*4882a593Smuzhiyun 	struct e1000_adapter *adapter;
5978*4882a593Smuzhiyun 	adapter = container_of(work, struct e1000_adapter, reset_task);
5979*4882a593Smuzhiyun 
5980*4882a593Smuzhiyun 	rtnl_lock();
5981*4882a593Smuzhiyun 	/* don't run the task if already down */
5982*4882a593Smuzhiyun 	if (test_bit(__E1000_DOWN, &adapter->state)) {
5983*4882a593Smuzhiyun 		rtnl_unlock();
5984*4882a593Smuzhiyun 		return;
5985*4882a593Smuzhiyun 	}
5986*4882a593Smuzhiyun 
5987*4882a593Smuzhiyun 	if (!(adapter->flags & FLAG_RESTART_NOW)) {
5988*4882a593Smuzhiyun 		e1000e_dump(adapter);
5989*4882a593Smuzhiyun 		e_err("Reset adapter unexpectedly\n");
5990*4882a593Smuzhiyun 	}
5991*4882a593Smuzhiyun 	e1000e_reinit_locked(adapter);
5992*4882a593Smuzhiyun 	rtnl_unlock();
5993*4882a593Smuzhiyun }
5994*4882a593Smuzhiyun 
5995*4882a593Smuzhiyun /**
5996*4882a593Smuzhiyun  * e1000_get_stats64 - Get System Network Statistics
5997*4882a593Smuzhiyun  * @netdev: network interface device structure
5998*4882a593Smuzhiyun  * @stats: rtnl_link_stats64 pointer
5999*4882a593Smuzhiyun  *
6000*4882a593Smuzhiyun  * Returns the address of the device statistics structure.
6001*4882a593Smuzhiyun  **/
e1000e_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)6002*4882a593Smuzhiyun void e1000e_get_stats64(struct net_device *netdev,
6003*4882a593Smuzhiyun 			struct rtnl_link_stats64 *stats)
6004*4882a593Smuzhiyun {
6005*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
6006*4882a593Smuzhiyun 
6007*4882a593Smuzhiyun 	spin_lock(&adapter->stats64_lock);
6008*4882a593Smuzhiyun 	e1000e_update_stats(adapter);
6009*4882a593Smuzhiyun 	/* Fill out the OS statistics structure */
6010*4882a593Smuzhiyun 	stats->rx_bytes = adapter->stats.gorc;
6011*4882a593Smuzhiyun 	stats->rx_packets = adapter->stats.gprc;
6012*4882a593Smuzhiyun 	stats->tx_bytes = adapter->stats.gotc;
6013*4882a593Smuzhiyun 	stats->tx_packets = adapter->stats.gptc;
6014*4882a593Smuzhiyun 	stats->multicast = adapter->stats.mprc;
6015*4882a593Smuzhiyun 	stats->collisions = adapter->stats.colc;
6016*4882a593Smuzhiyun 
6017*4882a593Smuzhiyun 	/* Rx Errors */
6018*4882a593Smuzhiyun 
6019*4882a593Smuzhiyun 	/* RLEC on some newer hardware can be incorrect so build
6020*4882a593Smuzhiyun 	 * our own version based on RUC and ROC
6021*4882a593Smuzhiyun 	 */
6022*4882a593Smuzhiyun 	stats->rx_errors = adapter->stats.rxerrc +
6023*4882a593Smuzhiyun 	    adapter->stats.crcerrs + adapter->stats.algnerrc +
6024*4882a593Smuzhiyun 	    adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
6025*4882a593Smuzhiyun 	stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc;
6026*4882a593Smuzhiyun 	stats->rx_crc_errors = adapter->stats.crcerrs;
6027*4882a593Smuzhiyun 	stats->rx_frame_errors = adapter->stats.algnerrc;
6028*4882a593Smuzhiyun 	stats->rx_missed_errors = adapter->stats.mpc;
6029*4882a593Smuzhiyun 
6030*4882a593Smuzhiyun 	/* Tx Errors */
6031*4882a593Smuzhiyun 	stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol;
6032*4882a593Smuzhiyun 	stats->tx_aborted_errors = adapter->stats.ecol;
6033*4882a593Smuzhiyun 	stats->tx_window_errors = adapter->stats.latecol;
6034*4882a593Smuzhiyun 	stats->tx_carrier_errors = adapter->stats.tncrs;
6035*4882a593Smuzhiyun 
6036*4882a593Smuzhiyun 	/* Tx Dropped needs to be maintained elsewhere */
6037*4882a593Smuzhiyun 
6038*4882a593Smuzhiyun 	spin_unlock(&adapter->stats64_lock);
6039*4882a593Smuzhiyun }
6040*4882a593Smuzhiyun 
6041*4882a593Smuzhiyun /**
6042*4882a593Smuzhiyun  * e1000_change_mtu - Change the Maximum Transfer Unit
6043*4882a593Smuzhiyun  * @netdev: network interface device structure
6044*4882a593Smuzhiyun  * @new_mtu: new value for maximum frame size
6045*4882a593Smuzhiyun  *
6046*4882a593Smuzhiyun  * Returns 0 on success, negative on failure
6047*4882a593Smuzhiyun  **/
e1000_change_mtu(struct net_device * netdev,int new_mtu)6048*4882a593Smuzhiyun static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
6049*4882a593Smuzhiyun {
6050*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
6051*4882a593Smuzhiyun 	int max_frame = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
6052*4882a593Smuzhiyun 
6053*4882a593Smuzhiyun 	/* Jumbo frame support */
6054*4882a593Smuzhiyun 	if ((new_mtu > ETH_DATA_LEN) &&
6055*4882a593Smuzhiyun 	    !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
6056*4882a593Smuzhiyun 		e_err("Jumbo Frames not supported.\n");
6057*4882a593Smuzhiyun 		return -EINVAL;
6058*4882a593Smuzhiyun 	}
6059*4882a593Smuzhiyun 
6060*4882a593Smuzhiyun 	/* Jumbo frame workaround on 82579 and newer requires CRC be stripped */
6061*4882a593Smuzhiyun 	if ((adapter->hw.mac.type >= e1000_pch2lan) &&
6062*4882a593Smuzhiyun 	    !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
6063*4882a593Smuzhiyun 	    (new_mtu > ETH_DATA_LEN)) {
6064*4882a593Smuzhiyun 		e_err("Jumbo Frames not supported on this device when CRC stripping is disabled.\n");
6065*4882a593Smuzhiyun 		return -EINVAL;
6066*4882a593Smuzhiyun 	}
6067*4882a593Smuzhiyun 
6068*4882a593Smuzhiyun 	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
6069*4882a593Smuzhiyun 		usleep_range(1000, 1100);
6070*4882a593Smuzhiyun 	/* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
6071*4882a593Smuzhiyun 	adapter->max_frame_size = max_frame;
6072*4882a593Smuzhiyun 	netdev_dbg(netdev, "changing MTU from %d to %d\n",
6073*4882a593Smuzhiyun 		   netdev->mtu, new_mtu);
6074*4882a593Smuzhiyun 	netdev->mtu = new_mtu;
6075*4882a593Smuzhiyun 
6076*4882a593Smuzhiyun 	pm_runtime_get_sync(netdev->dev.parent);
6077*4882a593Smuzhiyun 
6078*4882a593Smuzhiyun 	if (netif_running(netdev))
6079*4882a593Smuzhiyun 		e1000e_down(adapter, true);
6080*4882a593Smuzhiyun 
6081*4882a593Smuzhiyun 	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
6082*4882a593Smuzhiyun 	 * means we reserve 2 more, this pushes us to allocate from the next
6083*4882a593Smuzhiyun 	 * larger slab size.
6084*4882a593Smuzhiyun 	 * i.e. RXBUFFER_2048 --> size-4096 slab
6085*4882a593Smuzhiyun 	 * However with the new *_jumbo_rx* routines, jumbo receives will use
6086*4882a593Smuzhiyun 	 * fragmented skbs
6087*4882a593Smuzhiyun 	 */
6088*4882a593Smuzhiyun 
6089*4882a593Smuzhiyun 	if (max_frame <= 2048)
6090*4882a593Smuzhiyun 		adapter->rx_buffer_len = 2048;
6091*4882a593Smuzhiyun 	else
6092*4882a593Smuzhiyun 		adapter->rx_buffer_len = 4096;
6093*4882a593Smuzhiyun 
6094*4882a593Smuzhiyun 	/* adjust allocation if LPE protects us, and we aren't using SBP */
6095*4882a593Smuzhiyun 	if (max_frame <= (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN))
6096*4882a593Smuzhiyun 		adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
6097*4882a593Smuzhiyun 
6098*4882a593Smuzhiyun 	if (netif_running(netdev))
6099*4882a593Smuzhiyun 		e1000e_up(adapter);
6100*4882a593Smuzhiyun 	else
6101*4882a593Smuzhiyun 		e1000e_reset(adapter);
6102*4882a593Smuzhiyun 
6103*4882a593Smuzhiyun 	pm_runtime_put_sync(netdev->dev.parent);
6104*4882a593Smuzhiyun 
6105*4882a593Smuzhiyun 	clear_bit(__E1000_RESETTING, &adapter->state);
6106*4882a593Smuzhiyun 
6107*4882a593Smuzhiyun 	return 0;
6108*4882a593Smuzhiyun }
6109*4882a593Smuzhiyun 
e1000_mii_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)6110*4882a593Smuzhiyun static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
6111*4882a593Smuzhiyun 			   int cmd)
6112*4882a593Smuzhiyun {
6113*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
6114*4882a593Smuzhiyun 	struct mii_ioctl_data *data = if_mii(ifr);
6115*4882a593Smuzhiyun 
6116*4882a593Smuzhiyun 	if (adapter->hw.phy.media_type != e1000_media_type_copper)
6117*4882a593Smuzhiyun 		return -EOPNOTSUPP;
6118*4882a593Smuzhiyun 
6119*4882a593Smuzhiyun 	switch (cmd) {
6120*4882a593Smuzhiyun 	case SIOCGMIIPHY:
6121*4882a593Smuzhiyun 		data->phy_id = adapter->hw.phy.addr;
6122*4882a593Smuzhiyun 		break;
6123*4882a593Smuzhiyun 	case SIOCGMIIREG:
6124*4882a593Smuzhiyun 		e1000_phy_read_status(adapter);
6125*4882a593Smuzhiyun 
6126*4882a593Smuzhiyun 		switch (data->reg_num & 0x1F) {
6127*4882a593Smuzhiyun 		case MII_BMCR:
6128*4882a593Smuzhiyun 			data->val_out = adapter->phy_regs.bmcr;
6129*4882a593Smuzhiyun 			break;
6130*4882a593Smuzhiyun 		case MII_BMSR:
6131*4882a593Smuzhiyun 			data->val_out = adapter->phy_regs.bmsr;
6132*4882a593Smuzhiyun 			break;
6133*4882a593Smuzhiyun 		case MII_PHYSID1:
6134*4882a593Smuzhiyun 			data->val_out = (adapter->hw.phy.id >> 16);
6135*4882a593Smuzhiyun 			break;
6136*4882a593Smuzhiyun 		case MII_PHYSID2:
6137*4882a593Smuzhiyun 			data->val_out = (adapter->hw.phy.id & 0xFFFF);
6138*4882a593Smuzhiyun 			break;
6139*4882a593Smuzhiyun 		case MII_ADVERTISE:
6140*4882a593Smuzhiyun 			data->val_out = adapter->phy_regs.advertise;
6141*4882a593Smuzhiyun 			break;
6142*4882a593Smuzhiyun 		case MII_LPA:
6143*4882a593Smuzhiyun 			data->val_out = adapter->phy_regs.lpa;
6144*4882a593Smuzhiyun 			break;
6145*4882a593Smuzhiyun 		case MII_EXPANSION:
6146*4882a593Smuzhiyun 			data->val_out = adapter->phy_regs.expansion;
6147*4882a593Smuzhiyun 			break;
6148*4882a593Smuzhiyun 		case MII_CTRL1000:
6149*4882a593Smuzhiyun 			data->val_out = adapter->phy_regs.ctrl1000;
6150*4882a593Smuzhiyun 			break;
6151*4882a593Smuzhiyun 		case MII_STAT1000:
6152*4882a593Smuzhiyun 			data->val_out = adapter->phy_regs.stat1000;
6153*4882a593Smuzhiyun 			break;
6154*4882a593Smuzhiyun 		case MII_ESTATUS:
6155*4882a593Smuzhiyun 			data->val_out = adapter->phy_regs.estatus;
6156*4882a593Smuzhiyun 			break;
6157*4882a593Smuzhiyun 		default:
6158*4882a593Smuzhiyun 			return -EIO;
6159*4882a593Smuzhiyun 		}
6160*4882a593Smuzhiyun 		break;
6161*4882a593Smuzhiyun 	case SIOCSMIIREG:
6162*4882a593Smuzhiyun 	default:
6163*4882a593Smuzhiyun 		return -EOPNOTSUPP;
6164*4882a593Smuzhiyun 	}
6165*4882a593Smuzhiyun 	return 0;
6166*4882a593Smuzhiyun }
6167*4882a593Smuzhiyun 
6168*4882a593Smuzhiyun /**
6169*4882a593Smuzhiyun  * e1000e_hwtstamp_ioctl - control hardware time stamping
6170*4882a593Smuzhiyun  * @netdev: network interface device structure
6171*4882a593Smuzhiyun  * @ifr: interface request
6172*4882a593Smuzhiyun  *
6173*4882a593Smuzhiyun  * Outgoing time stamping can be enabled and disabled. Play nice and
6174*4882a593Smuzhiyun  * disable it when requested, although it shouldn't cause any overhead
6175*4882a593Smuzhiyun  * when no packet needs it. At most one packet in the queue may be
6176*4882a593Smuzhiyun  * marked for time stamping, otherwise it would be impossible to tell
6177*4882a593Smuzhiyun  * for sure to which packet the hardware time stamp belongs.
6178*4882a593Smuzhiyun  *
6179*4882a593Smuzhiyun  * Incoming time stamping has to be configured via the hardware filters.
6180*4882a593Smuzhiyun  * Not all combinations are supported, in particular event type has to be
6181*4882a593Smuzhiyun  * specified. Matching the kind of event packet is not supported, with the
6182*4882a593Smuzhiyun  * exception of "all V2 events regardless of level 2 or 4".
6183*4882a593Smuzhiyun  **/
e1000e_hwtstamp_set(struct net_device * netdev,struct ifreq * ifr)6184*4882a593Smuzhiyun static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
6185*4882a593Smuzhiyun {
6186*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
6187*4882a593Smuzhiyun 	struct hwtstamp_config config;
6188*4882a593Smuzhiyun 	int ret_val;
6189*4882a593Smuzhiyun 
6190*4882a593Smuzhiyun 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
6191*4882a593Smuzhiyun 		return -EFAULT;
6192*4882a593Smuzhiyun 
6193*4882a593Smuzhiyun 	ret_val = e1000e_config_hwtstamp(adapter, &config);
6194*4882a593Smuzhiyun 	if (ret_val)
6195*4882a593Smuzhiyun 		return ret_val;
6196*4882a593Smuzhiyun 
6197*4882a593Smuzhiyun 	switch (config.rx_filter) {
6198*4882a593Smuzhiyun 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
6199*4882a593Smuzhiyun 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6200*4882a593Smuzhiyun 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
6201*4882a593Smuzhiyun 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
6202*4882a593Smuzhiyun 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6203*4882a593Smuzhiyun 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
6204*4882a593Smuzhiyun 		/* With V2 type filters which specify a Sync or Delay Request,
6205*4882a593Smuzhiyun 		 * Path Delay Request/Response messages are also time stamped
6206*4882a593Smuzhiyun 		 * by hardware so notify the caller the requested packets plus
6207*4882a593Smuzhiyun 		 * some others are time stamped.
6208*4882a593Smuzhiyun 		 */
6209*4882a593Smuzhiyun 		config.rx_filter = HWTSTAMP_FILTER_SOME;
6210*4882a593Smuzhiyun 		break;
6211*4882a593Smuzhiyun 	default:
6212*4882a593Smuzhiyun 		break;
6213*4882a593Smuzhiyun 	}
6214*4882a593Smuzhiyun 
6215*4882a593Smuzhiyun 	return copy_to_user(ifr->ifr_data, &config,
6216*4882a593Smuzhiyun 			    sizeof(config)) ? -EFAULT : 0;
6217*4882a593Smuzhiyun }
6218*4882a593Smuzhiyun 
e1000e_hwtstamp_get(struct net_device * netdev,struct ifreq * ifr)6219*4882a593Smuzhiyun static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
6220*4882a593Smuzhiyun {
6221*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
6222*4882a593Smuzhiyun 
6223*4882a593Smuzhiyun 	return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
6224*4882a593Smuzhiyun 			    sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0;
6225*4882a593Smuzhiyun }
6226*4882a593Smuzhiyun 
e1000_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)6227*4882a593Smuzhiyun static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6228*4882a593Smuzhiyun {
6229*4882a593Smuzhiyun 	switch (cmd) {
6230*4882a593Smuzhiyun 	case SIOCGMIIPHY:
6231*4882a593Smuzhiyun 	case SIOCGMIIREG:
6232*4882a593Smuzhiyun 	case SIOCSMIIREG:
6233*4882a593Smuzhiyun 		return e1000_mii_ioctl(netdev, ifr, cmd);
6234*4882a593Smuzhiyun 	case SIOCSHWTSTAMP:
6235*4882a593Smuzhiyun 		return e1000e_hwtstamp_set(netdev, ifr);
6236*4882a593Smuzhiyun 	case SIOCGHWTSTAMP:
6237*4882a593Smuzhiyun 		return e1000e_hwtstamp_get(netdev, ifr);
6238*4882a593Smuzhiyun 	default:
6239*4882a593Smuzhiyun 		return -EOPNOTSUPP;
6240*4882a593Smuzhiyun 	}
6241*4882a593Smuzhiyun }
6242*4882a593Smuzhiyun 
e1000_init_phy_wakeup(struct e1000_adapter * adapter,u32 wufc)6243*4882a593Smuzhiyun static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
6244*4882a593Smuzhiyun {
6245*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
6246*4882a593Smuzhiyun 	u32 i, mac_reg, wuc;
6247*4882a593Smuzhiyun 	u16 phy_reg, wuc_enable;
6248*4882a593Smuzhiyun 	int retval;
6249*4882a593Smuzhiyun 
6250*4882a593Smuzhiyun 	/* copy MAC RARs to PHY RARs */
6251*4882a593Smuzhiyun 	e1000_copy_rx_addrs_to_phy_ich8lan(hw);
6252*4882a593Smuzhiyun 
6253*4882a593Smuzhiyun 	retval = hw->phy.ops.acquire(hw);
6254*4882a593Smuzhiyun 	if (retval) {
6255*4882a593Smuzhiyun 		e_err("Could not acquire PHY\n");
6256*4882a593Smuzhiyun 		return retval;
6257*4882a593Smuzhiyun 	}
6258*4882a593Smuzhiyun 
6259*4882a593Smuzhiyun 	/* Enable access to wakeup registers on and set page to BM_WUC_PAGE */
6260*4882a593Smuzhiyun 	retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
6261*4882a593Smuzhiyun 	if (retval)
6262*4882a593Smuzhiyun 		goto release;
6263*4882a593Smuzhiyun 
6264*4882a593Smuzhiyun 	/* copy MAC MTA to PHY MTA - only needed for pchlan */
6265*4882a593Smuzhiyun 	for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
6266*4882a593Smuzhiyun 		mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
6267*4882a593Smuzhiyun 		hw->phy.ops.write_reg_page(hw, BM_MTA(i),
6268*4882a593Smuzhiyun 					   (u16)(mac_reg & 0xFFFF));
6269*4882a593Smuzhiyun 		hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1,
6270*4882a593Smuzhiyun 					   (u16)((mac_reg >> 16) & 0xFFFF));
6271*4882a593Smuzhiyun 	}
6272*4882a593Smuzhiyun 
6273*4882a593Smuzhiyun 	/* configure PHY Rx Control register */
6274*4882a593Smuzhiyun 	hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg);
6275*4882a593Smuzhiyun 	mac_reg = er32(RCTL);
6276*4882a593Smuzhiyun 	if (mac_reg & E1000_RCTL_UPE)
6277*4882a593Smuzhiyun 		phy_reg |= BM_RCTL_UPE;
6278*4882a593Smuzhiyun 	if (mac_reg & E1000_RCTL_MPE)
6279*4882a593Smuzhiyun 		phy_reg |= BM_RCTL_MPE;
6280*4882a593Smuzhiyun 	phy_reg &= ~(BM_RCTL_MO_MASK);
6281*4882a593Smuzhiyun 	if (mac_reg & E1000_RCTL_MO_3)
6282*4882a593Smuzhiyun 		phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
6283*4882a593Smuzhiyun 			    << BM_RCTL_MO_SHIFT);
6284*4882a593Smuzhiyun 	if (mac_reg & E1000_RCTL_BAM)
6285*4882a593Smuzhiyun 		phy_reg |= BM_RCTL_BAM;
6286*4882a593Smuzhiyun 	if (mac_reg & E1000_RCTL_PMCF)
6287*4882a593Smuzhiyun 		phy_reg |= BM_RCTL_PMCF;
6288*4882a593Smuzhiyun 	mac_reg = er32(CTRL);
6289*4882a593Smuzhiyun 	if (mac_reg & E1000_CTRL_RFCE)
6290*4882a593Smuzhiyun 		phy_reg |= BM_RCTL_RFCE;
6291*4882a593Smuzhiyun 	hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
6292*4882a593Smuzhiyun 
6293*4882a593Smuzhiyun 	wuc = E1000_WUC_PME_EN;
6294*4882a593Smuzhiyun 	if (wufc & (E1000_WUFC_MAG | E1000_WUFC_LNKC))
6295*4882a593Smuzhiyun 		wuc |= E1000_WUC_APME;
6296*4882a593Smuzhiyun 
6297*4882a593Smuzhiyun 	/* enable PHY wakeup in MAC register */
6298*4882a593Smuzhiyun 	ew32(WUFC, wufc);
6299*4882a593Smuzhiyun 	ew32(WUC, (E1000_WUC_PHY_WAKE | E1000_WUC_APMPME |
6300*4882a593Smuzhiyun 		   E1000_WUC_PME_STATUS | wuc));
6301*4882a593Smuzhiyun 
6302*4882a593Smuzhiyun 	/* configure and enable PHY wakeup in PHY registers */
6303*4882a593Smuzhiyun 	hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
6304*4882a593Smuzhiyun 	hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, wuc);
6305*4882a593Smuzhiyun 
6306*4882a593Smuzhiyun 	/* activate PHY wakeup */
6307*4882a593Smuzhiyun 	wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
6308*4882a593Smuzhiyun 	retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
6309*4882a593Smuzhiyun 	if (retval)
6310*4882a593Smuzhiyun 		e_err("Could not set PHY Host Wakeup bit\n");
6311*4882a593Smuzhiyun release:
6312*4882a593Smuzhiyun 	hw->phy.ops.release(hw);
6313*4882a593Smuzhiyun 
6314*4882a593Smuzhiyun 	return retval;
6315*4882a593Smuzhiyun }
6316*4882a593Smuzhiyun 
e1000e_flush_lpic(struct pci_dev * pdev)6317*4882a593Smuzhiyun static void e1000e_flush_lpic(struct pci_dev *pdev)
6318*4882a593Smuzhiyun {
6319*4882a593Smuzhiyun 	struct net_device *netdev = pci_get_drvdata(pdev);
6320*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
6321*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
6322*4882a593Smuzhiyun 	u32 ret_val;
6323*4882a593Smuzhiyun 
6324*4882a593Smuzhiyun 	pm_runtime_get_sync(netdev->dev.parent);
6325*4882a593Smuzhiyun 
6326*4882a593Smuzhiyun 	ret_val = hw->phy.ops.acquire(hw);
6327*4882a593Smuzhiyun 	if (ret_val)
6328*4882a593Smuzhiyun 		goto fl_out;
6329*4882a593Smuzhiyun 
6330*4882a593Smuzhiyun 	pr_info("EEE TX LPI TIMER: %08X\n",
6331*4882a593Smuzhiyun 		er32(LPIC) >> E1000_LPIC_LPIET_SHIFT);
6332*4882a593Smuzhiyun 
6333*4882a593Smuzhiyun 	hw->phy.ops.release(hw);
6334*4882a593Smuzhiyun 
6335*4882a593Smuzhiyun fl_out:
6336*4882a593Smuzhiyun 	pm_runtime_put_sync(netdev->dev.parent);
6337*4882a593Smuzhiyun }
6338*4882a593Smuzhiyun 
6339*4882a593Smuzhiyun /* S0ix implementation */
e1000e_s0ix_entry_flow(struct e1000_adapter * adapter)6340*4882a593Smuzhiyun static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
6341*4882a593Smuzhiyun {
6342*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
6343*4882a593Smuzhiyun 	u32 mac_data;
6344*4882a593Smuzhiyun 	u16 phy_data;
6345*4882a593Smuzhiyun 
6346*4882a593Smuzhiyun 	/* Disable the periodic inband message,
6347*4882a593Smuzhiyun 	 * don't request PCIe clock in K1 page770_17[10:9] = 10b
6348*4882a593Smuzhiyun 	 */
6349*4882a593Smuzhiyun 	e1e_rphy(hw, HV_PM_CTRL, &phy_data);
6350*4882a593Smuzhiyun 	phy_data &= ~HV_PM_CTRL_K1_CLK_REQ;
6351*4882a593Smuzhiyun 	phy_data |= BIT(10);
6352*4882a593Smuzhiyun 	e1e_wphy(hw, HV_PM_CTRL, phy_data);
6353*4882a593Smuzhiyun 
6354*4882a593Smuzhiyun 	/* Make sure we don't exit K1 every time a new packet arrives
6355*4882a593Smuzhiyun 	 * 772_29[5] = 1 CS_Mode_Stay_In_K1
6356*4882a593Smuzhiyun 	 */
6357*4882a593Smuzhiyun 	e1e_rphy(hw, I217_CGFREG, &phy_data);
6358*4882a593Smuzhiyun 	phy_data |= BIT(5);
6359*4882a593Smuzhiyun 	e1e_wphy(hw, I217_CGFREG, phy_data);
6360*4882a593Smuzhiyun 
6361*4882a593Smuzhiyun 	/* Change the MAC/PHY interface to SMBus
6362*4882a593Smuzhiyun 	 * Force the SMBus in PHY page769_23[0] = 1
6363*4882a593Smuzhiyun 	 * Force the SMBus in MAC CTRL_EXT[11] = 1
6364*4882a593Smuzhiyun 	 */
6365*4882a593Smuzhiyun 	e1e_rphy(hw, CV_SMB_CTRL, &phy_data);
6366*4882a593Smuzhiyun 	phy_data |= CV_SMB_CTRL_FORCE_SMBUS;
6367*4882a593Smuzhiyun 	e1e_wphy(hw, CV_SMB_CTRL, phy_data);
6368*4882a593Smuzhiyun 	mac_data = er32(CTRL_EXT);
6369*4882a593Smuzhiyun 	mac_data |= E1000_CTRL_EXT_FORCE_SMBUS;
6370*4882a593Smuzhiyun 	ew32(CTRL_EXT, mac_data);
6371*4882a593Smuzhiyun 
6372*4882a593Smuzhiyun 	/* DFT control: PHY bit: page769_20[0] = 1
6373*4882a593Smuzhiyun 	 * Gate PPW via EXTCNF_CTRL - set 0x0F00[7] = 1
6374*4882a593Smuzhiyun 	 */
6375*4882a593Smuzhiyun 	e1e_rphy(hw, I82579_DFT_CTRL, &phy_data);
6376*4882a593Smuzhiyun 	phy_data |= BIT(0);
6377*4882a593Smuzhiyun 	e1e_wphy(hw, I82579_DFT_CTRL, phy_data);
6378*4882a593Smuzhiyun 
6379*4882a593Smuzhiyun 	mac_data = er32(EXTCNF_CTRL);
6380*4882a593Smuzhiyun 	mac_data |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
6381*4882a593Smuzhiyun 	ew32(EXTCNF_CTRL, mac_data);
6382*4882a593Smuzhiyun 
6383*4882a593Smuzhiyun 	/* Check MAC Tx/Rx packet buffer pointers.
6384*4882a593Smuzhiyun 	 * Reset MAC Tx/Rx packet buffer pointers to suppress any
6385*4882a593Smuzhiyun 	 * pending traffic indication that would prevent power gating.
6386*4882a593Smuzhiyun 	 */
6387*4882a593Smuzhiyun 	mac_data = er32(TDFH);
6388*4882a593Smuzhiyun 	if (mac_data)
6389*4882a593Smuzhiyun 		ew32(TDFH, 0);
6390*4882a593Smuzhiyun 	mac_data = er32(TDFT);
6391*4882a593Smuzhiyun 	if (mac_data)
6392*4882a593Smuzhiyun 		ew32(TDFT, 0);
6393*4882a593Smuzhiyun 	mac_data = er32(TDFHS);
6394*4882a593Smuzhiyun 	if (mac_data)
6395*4882a593Smuzhiyun 		ew32(TDFHS, 0);
6396*4882a593Smuzhiyun 	mac_data = er32(TDFTS);
6397*4882a593Smuzhiyun 	if (mac_data)
6398*4882a593Smuzhiyun 		ew32(TDFTS, 0);
6399*4882a593Smuzhiyun 	mac_data = er32(TDFPC);
6400*4882a593Smuzhiyun 	if (mac_data)
6401*4882a593Smuzhiyun 		ew32(TDFPC, 0);
6402*4882a593Smuzhiyun 	mac_data = er32(RDFH);
6403*4882a593Smuzhiyun 	if (mac_data)
6404*4882a593Smuzhiyun 		ew32(RDFH, 0);
6405*4882a593Smuzhiyun 	mac_data = er32(RDFT);
6406*4882a593Smuzhiyun 	if (mac_data)
6407*4882a593Smuzhiyun 		ew32(RDFT, 0);
6408*4882a593Smuzhiyun 	mac_data = er32(RDFHS);
6409*4882a593Smuzhiyun 	if (mac_data)
6410*4882a593Smuzhiyun 		ew32(RDFHS, 0);
6411*4882a593Smuzhiyun 	mac_data = er32(RDFTS);
6412*4882a593Smuzhiyun 	if (mac_data)
6413*4882a593Smuzhiyun 		ew32(RDFTS, 0);
6414*4882a593Smuzhiyun 	mac_data = er32(RDFPC);
6415*4882a593Smuzhiyun 	if (mac_data)
6416*4882a593Smuzhiyun 		ew32(RDFPC, 0);
6417*4882a593Smuzhiyun 
6418*4882a593Smuzhiyun 	/* Enable the Dynamic Power Gating in the MAC */
6419*4882a593Smuzhiyun 	mac_data = er32(FEXTNVM7);
6420*4882a593Smuzhiyun 	mac_data |= BIT(22);
6421*4882a593Smuzhiyun 	ew32(FEXTNVM7, mac_data);
6422*4882a593Smuzhiyun 
6423*4882a593Smuzhiyun 	/* Disable the time synchronization clock */
6424*4882a593Smuzhiyun 	mac_data = er32(FEXTNVM7);
6425*4882a593Smuzhiyun 	mac_data |= BIT(31);
6426*4882a593Smuzhiyun 	mac_data &= ~BIT(0);
6427*4882a593Smuzhiyun 	ew32(FEXTNVM7, mac_data);
6428*4882a593Smuzhiyun 
6429*4882a593Smuzhiyun 	/* Dynamic Power Gating Enable */
6430*4882a593Smuzhiyun 	mac_data = er32(CTRL_EXT);
6431*4882a593Smuzhiyun 	mac_data |= BIT(3);
6432*4882a593Smuzhiyun 	ew32(CTRL_EXT, mac_data);
6433*4882a593Smuzhiyun 
6434*4882a593Smuzhiyun 	/* Disable disconnected cable conditioning for Power Gating */
6435*4882a593Smuzhiyun 	mac_data = er32(DPGFR);
6436*4882a593Smuzhiyun 	mac_data |= BIT(2);
6437*4882a593Smuzhiyun 	ew32(DPGFR, mac_data);
6438*4882a593Smuzhiyun 
6439*4882a593Smuzhiyun 	/* Don't wake from dynamic Power Gating with clock request */
6440*4882a593Smuzhiyun 	mac_data = er32(FEXTNVM12);
6441*4882a593Smuzhiyun 	mac_data |= BIT(12);
6442*4882a593Smuzhiyun 	ew32(FEXTNVM12, mac_data);
6443*4882a593Smuzhiyun 
6444*4882a593Smuzhiyun 	/* Ungate PGCB clock */
6445*4882a593Smuzhiyun 	mac_data = er32(FEXTNVM9);
6446*4882a593Smuzhiyun 	mac_data &= ~BIT(28);
6447*4882a593Smuzhiyun 	ew32(FEXTNVM9, mac_data);
6448*4882a593Smuzhiyun 
6449*4882a593Smuzhiyun 	/* Enable K1 off to enable mPHY Power Gating */
6450*4882a593Smuzhiyun 	mac_data = er32(FEXTNVM6);
6451*4882a593Smuzhiyun 	mac_data |= BIT(31);
6452*4882a593Smuzhiyun 	ew32(FEXTNVM6, mac_data);
6453*4882a593Smuzhiyun 
6454*4882a593Smuzhiyun 	/* Enable mPHY power gating for any link and speed */
6455*4882a593Smuzhiyun 	mac_data = er32(FEXTNVM8);
6456*4882a593Smuzhiyun 	mac_data |= BIT(9);
6457*4882a593Smuzhiyun 	ew32(FEXTNVM8, mac_data);
6458*4882a593Smuzhiyun 
6459*4882a593Smuzhiyun 	/* Enable the Dynamic Clock Gating in the DMA and MAC */
6460*4882a593Smuzhiyun 	mac_data = er32(CTRL_EXT);
6461*4882a593Smuzhiyun 	mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN;
6462*4882a593Smuzhiyun 	ew32(CTRL_EXT, mac_data);
6463*4882a593Smuzhiyun 
6464*4882a593Smuzhiyun 	/* No MAC DPG gating SLP_S0 in modern standby
6465*4882a593Smuzhiyun 	 * Switch the logic of the lanphypc to use PMC counter
6466*4882a593Smuzhiyun 	 */
6467*4882a593Smuzhiyun 	mac_data = er32(FEXTNVM5);
6468*4882a593Smuzhiyun 	mac_data |= BIT(7);
6469*4882a593Smuzhiyun 	ew32(FEXTNVM5, mac_data);
6470*4882a593Smuzhiyun }
6471*4882a593Smuzhiyun 
e1000e_s0ix_exit_flow(struct e1000_adapter * adapter)6472*4882a593Smuzhiyun static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
6473*4882a593Smuzhiyun {
6474*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
6475*4882a593Smuzhiyun 	u32 mac_data;
6476*4882a593Smuzhiyun 	u16 phy_data;
6477*4882a593Smuzhiyun 
6478*4882a593Smuzhiyun 	/* Disable the Dynamic Power Gating in the MAC */
6479*4882a593Smuzhiyun 	mac_data = er32(FEXTNVM7);
6480*4882a593Smuzhiyun 	mac_data &= 0xFFBFFFFF;
6481*4882a593Smuzhiyun 	ew32(FEXTNVM7, mac_data);
6482*4882a593Smuzhiyun 
6483*4882a593Smuzhiyun 	/* Enable the time synchronization clock */
6484*4882a593Smuzhiyun 	mac_data = er32(FEXTNVM7);
6485*4882a593Smuzhiyun 	mac_data |= BIT(0);
6486*4882a593Smuzhiyun 	ew32(FEXTNVM7, mac_data);
6487*4882a593Smuzhiyun 
6488*4882a593Smuzhiyun 	/* Disable mPHY power gating for any link and speed */
6489*4882a593Smuzhiyun 	mac_data = er32(FEXTNVM8);
6490*4882a593Smuzhiyun 	mac_data &= ~BIT(9);
6491*4882a593Smuzhiyun 	ew32(FEXTNVM8, mac_data);
6492*4882a593Smuzhiyun 
6493*4882a593Smuzhiyun 	/* Disable K1 off */
6494*4882a593Smuzhiyun 	mac_data = er32(FEXTNVM6);
6495*4882a593Smuzhiyun 	mac_data &= ~BIT(31);
6496*4882a593Smuzhiyun 	ew32(FEXTNVM6, mac_data);
6497*4882a593Smuzhiyun 
6498*4882a593Smuzhiyun 	/* Disable Ungate PGCB clock */
6499*4882a593Smuzhiyun 	mac_data = er32(FEXTNVM9);
6500*4882a593Smuzhiyun 	mac_data |= BIT(28);
6501*4882a593Smuzhiyun 	ew32(FEXTNVM9, mac_data);
6502*4882a593Smuzhiyun 
6503*4882a593Smuzhiyun 	/* Cancel not waking from dynamic
6504*4882a593Smuzhiyun 	 * Power Gating with clock request
6505*4882a593Smuzhiyun 	 */
6506*4882a593Smuzhiyun 	mac_data = er32(FEXTNVM12);
6507*4882a593Smuzhiyun 	mac_data &= ~BIT(12);
6508*4882a593Smuzhiyun 	ew32(FEXTNVM12, mac_data);
6509*4882a593Smuzhiyun 
6510*4882a593Smuzhiyun 	/* Cancel disable disconnected cable conditioning
6511*4882a593Smuzhiyun 	 * for Power Gating
6512*4882a593Smuzhiyun 	 */
6513*4882a593Smuzhiyun 	mac_data = er32(DPGFR);
6514*4882a593Smuzhiyun 	mac_data &= ~BIT(2);
6515*4882a593Smuzhiyun 	ew32(DPGFR, mac_data);
6516*4882a593Smuzhiyun 
6517*4882a593Smuzhiyun 	/* Disable Dynamic Power Gating */
6518*4882a593Smuzhiyun 	mac_data = er32(CTRL_EXT);
6519*4882a593Smuzhiyun 	mac_data &= 0xFFFFFFF7;
6520*4882a593Smuzhiyun 	ew32(CTRL_EXT, mac_data);
6521*4882a593Smuzhiyun 
6522*4882a593Smuzhiyun 	/* Disable the Dynamic Clock Gating in the DMA and MAC */
6523*4882a593Smuzhiyun 	mac_data = er32(CTRL_EXT);
6524*4882a593Smuzhiyun 	mac_data &= 0xFFF7FFFF;
6525*4882a593Smuzhiyun 	ew32(CTRL_EXT, mac_data);
6526*4882a593Smuzhiyun 
6527*4882a593Smuzhiyun 	/* Revert the lanphypc logic to use the internal Gbe counter
6528*4882a593Smuzhiyun 	 * and not the PMC counter
6529*4882a593Smuzhiyun 	 */
6530*4882a593Smuzhiyun 	mac_data = er32(FEXTNVM5);
6531*4882a593Smuzhiyun 	mac_data &= 0xFFFFFF7F;
6532*4882a593Smuzhiyun 	ew32(FEXTNVM5, mac_data);
6533*4882a593Smuzhiyun 
6534*4882a593Smuzhiyun 	/* Enable the periodic inband message,
6535*4882a593Smuzhiyun 	 * Request PCIe clock in K1 page770_17[10:9] =01b
6536*4882a593Smuzhiyun 	 */
6537*4882a593Smuzhiyun 	e1e_rphy(hw, HV_PM_CTRL, &phy_data);
6538*4882a593Smuzhiyun 	phy_data &= 0xFBFF;
6539*4882a593Smuzhiyun 	phy_data |= HV_PM_CTRL_K1_CLK_REQ;
6540*4882a593Smuzhiyun 	e1e_wphy(hw, HV_PM_CTRL, phy_data);
6541*4882a593Smuzhiyun 
6542*4882a593Smuzhiyun 	/* Return back configuration
6543*4882a593Smuzhiyun 	 * 772_29[5] = 0 CS_Mode_Stay_In_K1
6544*4882a593Smuzhiyun 	 */
6545*4882a593Smuzhiyun 	e1e_rphy(hw, I217_CGFREG, &phy_data);
6546*4882a593Smuzhiyun 	phy_data &= 0xFFDF;
6547*4882a593Smuzhiyun 	e1e_wphy(hw, I217_CGFREG, phy_data);
6548*4882a593Smuzhiyun 
6549*4882a593Smuzhiyun 	/* Change the MAC/PHY interface to Kumeran
6550*4882a593Smuzhiyun 	 * Unforce the SMBus in PHY page769_23[0] = 0
6551*4882a593Smuzhiyun 	 * Unforce the SMBus in MAC CTRL_EXT[11] = 0
6552*4882a593Smuzhiyun 	 */
6553*4882a593Smuzhiyun 	e1e_rphy(hw, CV_SMB_CTRL, &phy_data);
6554*4882a593Smuzhiyun 	phy_data &= ~CV_SMB_CTRL_FORCE_SMBUS;
6555*4882a593Smuzhiyun 	e1e_wphy(hw, CV_SMB_CTRL, phy_data);
6556*4882a593Smuzhiyun 	mac_data = er32(CTRL_EXT);
6557*4882a593Smuzhiyun 	mac_data &= ~E1000_CTRL_EXT_FORCE_SMBUS;
6558*4882a593Smuzhiyun 	ew32(CTRL_EXT, mac_data);
6559*4882a593Smuzhiyun }
6560*4882a593Smuzhiyun 
e1000e_pm_freeze(struct device * dev)6561*4882a593Smuzhiyun static int e1000e_pm_freeze(struct device *dev)
6562*4882a593Smuzhiyun {
6563*4882a593Smuzhiyun 	struct net_device *netdev = dev_get_drvdata(dev);
6564*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
6565*4882a593Smuzhiyun 	bool present;
6566*4882a593Smuzhiyun 
6567*4882a593Smuzhiyun 	rtnl_lock();
6568*4882a593Smuzhiyun 
6569*4882a593Smuzhiyun 	present = netif_device_present(netdev);
6570*4882a593Smuzhiyun 	netif_device_detach(netdev);
6571*4882a593Smuzhiyun 
6572*4882a593Smuzhiyun 	if (present && netif_running(netdev)) {
6573*4882a593Smuzhiyun 		int count = E1000_CHECK_RESET_COUNT;
6574*4882a593Smuzhiyun 
6575*4882a593Smuzhiyun 		while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
6576*4882a593Smuzhiyun 			usleep_range(10000, 11000);
6577*4882a593Smuzhiyun 
6578*4882a593Smuzhiyun 		WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
6579*4882a593Smuzhiyun 
6580*4882a593Smuzhiyun 		/* Quiesce the device without resetting the hardware */
6581*4882a593Smuzhiyun 		e1000e_down(adapter, false);
6582*4882a593Smuzhiyun 		e1000_free_irq(adapter);
6583*4882a593Smuzhiyun 	}
6584*4882a593Smuzhiyun 	rtnl_unlock();
6585*4882a593Smuzhiyun 
6586*4882a593Smuzhiyun 	e1000e_reset_interrupt_capability(adapter);
6587*4882a593Smuzhiyun 
6588*4882a593Smuzhiyun 	/* Allow time for pending master requests to run */
6589*4882a593Smuzhiyun 	e1000e_disable_pcie_master(&adapter->hw);
6590*4882a593Smuzhiyun 
6591*4882a593Smuzhiyun 	return 0;
6592*4882a593Smuzhiyun }
6593*4882a593Smuzhiyun 
__e1000_shutdown(struct pci_dev * pdev,bool runtime)6594*4882a593Smuzhiyun static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
6595*4882a593Smuzhiyun {
6596*4882a593Smuzhiyun 	struct net_device *netdev = pci_get_drvdata(pdev);
6597*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
6598*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
6599*4882a593Smuzhiyun 	u32 ctrl, ctrl_ext, rctl, status, wufc;
6600*4882a593Smuzhiyun 	int retval = 0;
6601*4882a593Smuzhiyun 
6602*4882a593Smuzhiyun 	/* Runtime suspend should only enable wakeup for link changes */
6603*4882a593Smuzhiyun 	if (runtime)
6604*4882a593Smuzhiyun 		wufc = E1000_WUFC_LNKC;
6605*4882a593Smuzhiyun 	else if (device_may_wakeup(&pdev->dev))
6606*4882a593Smuzhiyun 		wufc = adapter->wol;
6607*4882a593Smuzhiyun 	else
6608*4882a593Smuzhiyun 		wufc = 0;
6609*4882a593Smuzhiyun 
6610*4882a593Smuzhiyun 	status = er32(STATUS);
6611*4882a593Smuzhiyun 	if (status & E1000_STATUS_LU)
6612*4882a593Smuzhiyun 		wufc &= ~E1000_WUFC_LNKC;
6613*4882a593Smuzhiyun 
6614*4882a593Smuzhiyun 	if (wufc) {
6615*4882a593Smuzhiyun 		e1000_setup_rctl(adapter);
6616*4882a593Smuzhiyun 		e1000e_set_rx_mode(netdev);
6617*4882a593Smuzhiyun 
6618*4882a593Smuzhiyun 		/* turn on all-multi mode if wake on multicast is enabled */
6619*4882a593Smuzhiyun 		if (wufc & E1000_WUFC_MC) {
6620*4882a593Smuzhiyun 			rctl = er32(RCTL);
6621*4882a593Smuzhiyun 			rctl |= E1000_RCTL_MPE;
6622*4882a593Smuzhiyun 			ew32(RCTL, rctl);
6623*4882a593Smuzhiyun 		}
6624*4882a593Smuzhiyun 
6625*4882a593Smuzhiyun 		ctrl = er32(CTRL);
6626*4882a593Smuzhiyun 		ctrl |= E1000_CTRL_ADVD3WUC;
6627*4882a593Smuzhiyun 		if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
6628*4882a593Smuzhiyun 			ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
6629*4882a593Smuzhiyun 		ew32(CTRL, ctrl);
6630*4882a593Smuzhiyun 
6631*4882a593Smuzhiyun 		if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
6632*4882a593Smuzhiyun 		    adapter->hw.phy.media_type ==
6633*4882a593Smuzhiyun 		    e1000_media_type_internal_serdes) {
6634*4882a593Smuzhiyun 			/* keep the laser running in D3 */
6635*4882a593Smuzhiyun 			ctrl_ext = er32(CTRL_EXT);
6636*4882a593Smuzhiyun 			ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
6637*4882a593Smuzhiyun 			ew32(CTRL_EXT, ctrl_ext);
6638*4882a593Smuzhiyun 		}
6639*4882a593Smuzhiyun 
6640*4882a593Smuzhiyun 		if (!runtime)
6641*4882a593Smuzhiyun 			e1000e_power_up_phy(adapter);
6642*4882a593Smuzhiyun 
6643*4882a593Smuzhiyun 		if (adapter->flags & FLAG_IS_ICH)
6644*4882a593Smuzhiyun 			e1000_suspend_workarounds_ich8lan(&adapter->hw);
6645*4882a593Smuzhiyun 
6646*4882a593Smuzhiyun 		if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
6647*4882a593Smuzhiyun 			/* enable wakeup by the PHY */
6648*4882a593Smuzhiyun 			retval = e1000_init_phy_wakeup(adapter, wufc);
6649*4882a593Smuzhiyun 			if (retval)
6650*4882a593Smuzhiyun 				return retval;
6651*4882a593Smuzhiyun 		} else {
6652*4882a593Smuzhiyun 			/* enable wakeup by the MAC */
6653*4882a593Smuzhiyun 			ew32(WUFC, wufc);
6654*4882a593Smuzhiyun 			ew32(WUC, E1000_WUC_PME_EN);
6655*4882a593Smuzhiyun 		}
6656*4882a593Smuzhiyun 	} else {
6657*4882a593Smuzhiyun 		ew32(WUC, 0);
6658*4882a593Smuzhiyun 		ew32(WUFC, 0);
6659*4882a593Smuzhiyun 
6660*4882a593Smuzhiyun 		e1000_power_down_phy(adapter);
6661*4882a593Smuzhiyun 	}
6662*4882a593Smuzhiyun 
6663*4882a593Smuzhiyun 	if (adapter->hw.phy.type == e1000_phy_igp_3) {
6664*4882a593Smuzhiyun 		e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
6665*4882a593Smuzhiyun 	} else if (hw->mac.type >= e1000_pch_lpt) {
6666*4882a593Smuzhiyun 		if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
6667*4882a593Smuzhiyun 			/* ULP does not support wake from unicast, multicast
6668*4882a593Smuzhiyun 			 * or broadcast.
6669*4882a593Smuzhiyun 			 */
6670*4882a593Smuzhiyun 			retval = e1000_enable_ulp_lpt_lp(hw, !runtime);
6671*4882a593Smuzhiyun 
6672*4882a593Smuzhiyun 		if (retval)
6673*4882a593Smuzhiyun 			return retval;
6674*4882a593Smuzhiyun 	}
6675*4882a593Smuzhiyun 
6676*4882a593Smuzhiyun 	/* Ensure that the appropriate bits are set in LPI_CTRL
6677*4882a593Smuzhiyun 	 * for EEE in Sx
6678*4882a593Smuzhiyun 	 */
6679*4882a593Smuzhiyun 	if ((hw->phy.type >= e1000_phy_i217) &&
6680*4882a593Smuzhiyun 	    adapter->eee_advert && hw->dev_spec.ich8lan.eee_lp_ability) {
6681*4882a593Smuzhiyun 		u16 lpi_ctrl = 0;
6682*4882a593Smuzhiyun 
6683*4882a593Smuzhiyun 		retval = hw->phy.ops.acquire(hw);
6684*4882a593Smuzhiyun 		if (!retval) {
6685*4882a593Smuzhiyun 			retval = e1e_rphy_locked(hw, I82579_LPI_CTRL,
6686*4882a593Smuzhiyun 						 &lpi_ctrl);
6687*4882a593Smuzhiyun 			if (!retval) {
6688*4882a593Smuzhiyun 				if (adapter->eee_advert &
6689*4882a593Smuzhiyun 				    hw->dev_spec.ich8lan.eee_lp_ability &
6690*4882a593Smuzhiyun 				    I82579_EEE_100_SUPPORTED)
6691*4882a593Smuzhiyun 					lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
6692*4882a593Smuzhiyun 				if (adapter->eee_advert &
6693*4882a593Smuzhiyun 				    hw->dev_spec.ich8lan.eee_lp_ability &
6694*4882a593Smuzhiyun 				    I82579_EEE_1000_SUPPORTED)
6695*4882a593Smuzhiyun 					lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
6696*4882a593Smuzhiyun 
6697*4882a593Smuzhiyun 				retval = e1e_wphy_locked(hw, I82579_LPI_CTRL,
6698*4882a593Smuzhiyun 							 lpi_ctrl);
6699*4882a593Smuzhiyun 			}
6700*4882a593Smuzhiyun 		}
6701*4882a593Smuzhiyun 		hw->phy.ops.release(hw);
6702*4882a593Smuzhiyun 	}
6703*4882a593Smuzhiyun 
6704*4882a593Smuzhiyun 	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
6705*4882a593Smuzhiyun 	 * would have already happened in close and is redundant.
6706*4882a593Smuzhiyun 	 */
6707*4882a593Smuzhiyun 	e1000e_release_hw_control(adapter);
6708*4882a593Smuzhiyun 
6709*4882a593Smuzhiyun 	pci_clear_master(pdev);
6710*4882a593Smuzhiyun 
6711*4882a593Smuzhiyun 	/* The pci-e switch on some quad port adapters will report a
6712*4882a593Smuzhiyun 	 * correctable error when the MAC transitions from D0 to D3.  To
6713*4882a593Smuzhiyun 	 * prevent this we need to mask off the correctable errors on the
6714*4882a593Smuzhiyun 	 * downstream port of the pci-e switch.
6715*4882a593Smuzhiyun 	 *
6716*4882a593Smuzhiyun 	 * We don't have the associated upstream bridge while assigning
6717*4882a593Smuzhiyun 	 * the PCI device into guest. For example, the KVM on power is
6718*4882a593Smuzhiyun 	 * one of the cases.
6719*4882a593Smuzhiyun 	 */
6720*4882a593Smuzhiyun 	if (adapter->flags & FLAG_IS_QUAD_PORT) {
6721*4882a593Smuzhiyun 		struct pci_dev *us_dev = pdev->bus->self;
6722*4882a593Smuzhiyun 		u16 devctl;
6723*4882a593Smuzhiyun 
6724*4882a593Smuzhiyun 		if (!us_dev)
6725*4882a593Smuzhiyun 			return 0;
6726*4882a593Smuzhiyun 
6727*4882a593Smuzhiyun 		pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl);
6728*4882a593Smuzhiyun 		pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
6729*4882a593Smuzhiyun 					   (devctl & ~PCI_EXP_DEVCTL_CERE));
6730*4882a593Smuzhiyun 
6731*4882a593Smuzhiyun 		pci_save_state(pdev);
6732*4882a593Smuzhiyun 		pci_prepare_to_sleep(pdev);
6733*4882a593Smuzhiyun 
6734*4882a593Smuzhiyun 		pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl);
6735*4882a593Smuzhiyun 	}
6736*4882a593Smuzhiyun 
6737*4882a593Smuzhiyun 	return 0;
6738*4882a593Smuzhiyun }
6739*4882a593Smuzhiyun 
6740*4882a593Smuzhiyun /**
6741*4882a593Smuzhiyun  * __e1000e_disable_aspm - Disable ASPM states
6742*4882a593Smuzhiyun  * @pdev: pointer to PCI device struct
6743*4882a593Smuzhiyun  * @state: bit-mask of ASPM states to disable
6744*4882a593Smuzhiyun  * @locked: indication if this context holds pci_bus_sem locked.
6745*4882a593Smuzhiyun  *
6746*4882a593Smuzhiyun  * Some devices *must* have certain ASPM states disabled per hardware errata.
6747*4882a593Smuzhiyun  **/
__e1000e_disable_aspm(struct pci_dev * pdev,u16 state,int locked)6748*4882a593Smuzhiyun static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state, int locked)
6749*4882a593Smuzhiyun {
6750*4882a593Smuzhiyun 	struct pci_dev *parent = pdev->bus->self;
6751*4882a593Smuzhiyun 	u16 aspm_dis_mask = 0;
6752*4882a593Smuzhiyun 	u16 pdev_aspmc, parent_aspmc;
6753*4882a593Smuzhiyun 
6754*4882a593Smuzhiyun 	switch (state) {
6755*4882a593Smuzhiyun 	case PCIE_LINK_STATE_L0S:
6756*4882a593Smuzhiyun 	case PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1:
6757*4882a593Smuzhiyun 		aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S;
6758*4882a593Smuzhiyun 		fallthrough; /* can't have L1 without L0s */
6759*4882a593Smuzhiyun 	case PCIE_LINK_STATE_L1:
6760*4882a593Smuzhiyun 		aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1;
6761*4882a593Smuzhiyun 		break;
6762*4882a593Smuzhiyun 	default:
6763*4882a593Smuzhiyun 		return;
6764*4882a593Smuzhiyun 	}
6765*4882a593Smuzhiyun 
6766*4882a593Smuzhiyun 	pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
6767*4882a593Smuzhiyun 	pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6768*4882a593Smuzhiyun 
6769*4882a593Smuzhiyun 	if (parent) {
6770*4882a593Smuzhiyun 		pcie_capability_read_word(parent, PCI_EXP_LNKCTL,
6771*4882a593Smuzhiyun 					  &parent_aspmc);
6772*4882a593Smuzhiyun 		parent_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6773*4882a593Smuzhiyun 	}
6774*4882a593Smuzhiyun 
6775*4882a593Smuzhiyun 	/* Nothing to do if the ASPM states to be disabled already are */
6776*4882a593Smuzhiyun 	if (!(pdev_aspmc & aspm_dis_mask) &&
6777*4882a593Smuzhiyun 	    (!parent || !(parent_aspmc & aspm_dis_mask)))
6778*4882a593Smuzhiyun 		return;
6779*4882a593Smuzhiyun 
6780*4882a593Smuzhiyun 	dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
6781*4882a593Smuzhiyun 		 (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L0S) ?
6782*4882a593Smuzhiyun 		 "L0s" : "",
6783*4882a593Smuzhiyun 		 (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L1) ?
6784*4882a593Smuzhiyun 		 "L1" : "");
6785*4882a593Smuzhiyun 
6786*4882a593Smuzhiyun #ifdef CONFIG_PCIEASPM
6787*4882a593Smuzhiyun 	if (locked)
6788*4882a593Smuzhiyun 		pci_disable_link_state_locked(pdev, state);
6789*4882a593Smuzhiyun 	else
6790*4882a593Smuzhiyun 		pci_disable_link_state(pdev, state);
6791*4882a593Smuzhiyun 
6792*4882a593Smuzhiyun 	/* Double-check ASPM control.  If not disabled by the above, the
6793*4882a593Smuzhiyun 	 * BIOS is preventing that from happening (or CONFIG_PCIEASPM is
6794*4882a593Smuzhiyun 	 * not enabled); override by writing PCI config space directly.
6795*4882a593Smuzhiyun 	 */
6796*4882a593Smuzhiyun 	pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
6797*4882a593Smuzhiyun 	pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6798*4882a593Smuzhiyun 
6799*4882a593Smuzhiyun 	if (!(aspm_dis_mask & pdev_aspmc))
6800*4882a593Smuzhiyun 		return;
6801*4882a593Smuzhiyun #endif
6802*4882a593Smuzhiyun 
6803*4882a593Smuzhiyun 	/* Both device and parent should have the same ASPM setting.
6804*4882a593Smuzhiyun 	 * Disable ASPM in downstream component first and then upstream.
6805*4882a593Smuzhiyun 	 */
6806*4882a593Smuzhiyun 	pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_dis_mask);
6807*4882a593Smuzhiyun 
6808*4882a593Smuzhiyun 	if (parent)
6809*4882a593Smuzhiyun 		pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
6810*4882a593Smuzhiyun 					   aspm_dis_mask);
6811*4882a593Smuzhiyun }
6812*4882a593Smuzhiyun 
6813*4882a593Smuzhiyun /**
6814*4882a593Smuzhiyun  * e1000e_disable_aspm - Disable ASPM states.
6815*4882a593Smuzhiyun  * @pdev: pointer to PCI device struct
6816*4882a593Smuzhiyun  * @state: bit-mask of ASPM states to disable
6817*4882a593Smuzhiyun  *
6818*4882a593Smuzhiyun  * This function acquires the pci_bus_sem!
6819*4882a593Smuzhiyun  * Some devices *must* have certain ASPM states disabled per hardware errata.
6820*4882a593Smuzhiyun  **/
e1000e_disable_aspm(struct pci_dev * pdev,u16 state)6821*4882a593Smuzhiyun static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
6822*4882a593Smuzhiyun {
6823*4882a593Smuzhiyun 	__e1000e_disable_aspm(pdev, state, 0);
6824*4882a593Smuzhiyun }
6825*4882a593Smuzhiyun 
6826*4882a593Smuzhiyun /**
6827*4882a593Smuzhiyun  * e1000e_disable_aspm_locked   Disable ASPM states.
6828*4882a593Smuzhiyun  * @pdev: pointer to PCI device struct
6829*4882a593Smuzhiyun  * @state: bit-mask of ASPM states to disable
6830*4882a593Smuzhiyun  *
6831*4882a593Smuzhiyun  * This function must be called with pci_bus_sem acquired!
6832*4882a593Smuzhiyun  * Some devices *must* have certain ASPM states disabled per hardware errata.
6833*4882a593Smuzhiyun  **/
e1000e_disable_aspm_locked(struct pci_dev * pdev,u16 state)6834*4882a593Smuzhiyun static void e1000e_disable_aspm_locked(struct pci_dev *pdev, u16 state)
6835*4882a593Smuzhiyun {
6836*4882a593Smuzhiyun 	__e1000e_disable_aspm(pdev, state, 1);
6837*4882a593Smuzhiyun }
6838*4882a593Smuzhiyun 
e1000e_pm_thaw(struct device * dev)6839*4882a593Smuzhiyun static int e1000e_pm_thaw(struct device *dev)
6840*4882a593Smuzhiyun {
6841*4882a593Smuzhiyun 	struct net_device *netdev = dev_get_drvdata(dev);
6842*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
6843*4882a593Smuzhiyun 	int rc = 0;
6844*4882a593Smuzhiyun 
6845*4882a593Smuzhiyun 	e1000e_set_interrupt_capability(adapter);
6846*4882a593Smuzhiyun 
6847*4882a593Smuzhiyun 	rtnl_lock();
6848*4882a593Smuzhiyun 	if (netif_running(netdev)) {
6849*4882a593Smuzhiyun 		rc = e1000_request_irq(adapter);
6850*4882a593Smuzhiyun 		if (rc)
6851*4882a593Smuzhiyun 			goto err_irq;
6852*4882a593Smuzhiyun 
6853*4882a593Smuzhiyun 		e1000e_up(adapter);
6854*4882a593Smuzhiyun 	}
6855*4882a593Smuzhiyun 
6856*4882a593Smuzhiyun 	netif_device_attach(netdev);
6857*4882a593Smuzhiyun err_irq:
6858*4882a593Smuzhiyun 	rtnl_unlock();
6859*4882a593Smuzhiyun 
6860*4882a593Smuzhiyun 	return rc;
6861*4882a593Smuzhiyun }
6862*4882a593Smuzhiyun 
__e1000_resume(struct pci_dev * pdev)6863*4882a593Smuzhiyun static int __e1000_resume(struct pci_dev *pdev)
6864*4882a593Smuzhiyun {
6865*4882a593Smuzhiyun 	struct net_device *netdev = pci_get_drvdata(pdev);
6866*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
6867*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
6868*4882a593Smuzhiyun 	u16 aspm_disable_flag = 0;
6869*4882a593Smuzhiyun 
6870*4882a593Smuzhiyun 	if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
6871*4882a593Smuzhiyun 		aspm_disable_flag = PCIE_LINK_STATE_L0S;
6872*4882a593Smuzhiyun 	if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
6873*4882a593Smuzhiyun 		aspm_disable_flag |= PCIE_LINK_STATE_L1;
6874*4882a593Smuzhiyun 	if (aspm_disable_flag)
6875*4882a593Smuzhiyun 		e1000e_disable_aspm(pdev, aspm_disable_flag);
6876*4882a593Smuzhiyun 
6877*4882a593Smuzhiyun 	pci_set_master(pdev);
6878*4882a593Smuzhiyun 
6879*4882a593Smuzhiyun 	if (hw->mac.type >= e1000_pch2lan)
6880*4882a593Smuzhiyun 		e1000_resume_workarounds_pchlan(&adapter->hw);
6881*4882a593Smuzhiyun 
6882*4882a593Smuzhiyun 	e1000e_power_up_phy(adapter);
6883*4882a593Smuzhiyun 
6884*4882a593Smuzhiyun 	/* report the system wakeup cause from S3/S4 */
6885*4882a593Smuzhiyun 	if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
6886*4882a593Smuzhiyun 		u16 phy_data;
6887*4882a593Smuzhiyun 
6888*4882a593Smuzhiyun 		e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
6889*4882a593Smuzhiyun 		if (phy_data) {
6890*4882a593Smuzhiyun 			e_info("PHY Wakeup cause - %s\n",
6891*4882a593Smuzhiyun 			       phy_data & E1000_WUS_EX ? "Unicast Packet" :
6892*4882a593Smuzhiyun 			       phy_data & E1000_WUS_MC ? "Multicast Packet" :
6893*4882a593Smuzhiyun 			       phy_data & E1000_WUS_BC ? "Broadcast Packet" :
6894*4882a593Smuzhiyun 			       phy_data & E1000_WUS_MAG ? "Magic Packet" :
6895*4882a593Smuzhiyun 			       phy_data & E1000_WUS_LNKC ?
6896*4882a593Smuzhiyun 			       "Link Status Change" : "other");
6897*4882a593Smuzhiyun 		}
6898*4882a593Smuzhiyun 		e1e_wphy(&adapter->hw, BM_WUS, ~0);
6899*4882a593Smuzhiyun 	} else {
6900*4882a593Smuzhiyun 		u32 wus = er32(WUS);
6901*4882a593Smuzhiyun 
6902*4882a593Smuzhiyun 		if (wus) {
6903*4882a593Smuzhiyun 			e_info("MAC Wakeup cause - %s\n",
6904*4882a593Smuzhiyun 			       wus & E1000_WUS_EX ? "Unicast Packet" :
6905*4882a593Smuzhiyun 			       wus & E1000_WUS_MC ? "Multicast Packet" :
6906*4882a593Smuzhiyun 			       wus & E1000_WUS_BC ? "Broadcast Packet" :
6907*4882a593Smuzhiyun 			       wus & E1000_WUS_MAG ? "Magic Packet" :
6908*4882a593Smuzhiyun 			       wus & E1000_WUS_LNKC ? "Link Status Change" :
6909*4882a593Smuzhiyun 			       "other");
6910*4882a593Smuzhiyun 		}
6911*4882a593Smuzhiyun 		ew32(WUS, ~0);
6912*4882a593Smuzhiyun 	}
6913*4882a593Smuzhiyun 
6914*4882a593Smuzhiyun 	e1000e_reset(adapter);
6915*4882a593Smuzhiyun 
6916*4882a593Smuzhiyun 	e1000_init_manageability_pt(adapter);
6917*4882a593Smuzhiyun 
6918*4882a593Smuzhiyun 	/* If the controller has AMT, do not set DRV_LOAD until the interface
6919*4882a593Smuzhiyun 	 * is up.  For all other cases, let the f/w know that the h/w is now
6920*4882a593Smuzhiyun 	 * under the control of the driver.
6921*4882a593Smuzhiyun 	 */
6922*4882a593Smuzhiyun 	if (!(adapter->flags & FLAG_HAS_AMT))
6923*4882a593Smuzhiyun 		e1000e_get_hw_control(adapter);
6924*4882a593Smuzhiyun 
6925*4882a593Smuzhiyun 	return 0;
6926*4882a593Smuzhiyun }
6927*4882a593Smuzhiyun 
e1000e_pm_suspend(struct device * dev)6928*4882a593Smuzhiyun static __maybe_unused int e1000e_pm_suspend(struct device *dev)
6929*4882a593Smuzhiyun {
6930*4882a593Smuzhiyun 	struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
6931*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
6932*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(dev);
6933*4882a593Smuzhiyun 	int rc;
6934*4882a593Smuzhiyun 
6935*4882a593Smuzhiyun 	e1000e_flush_lpic(pdev);
6936*4882a593Smuzhiyun 
6937*4882a593Smuzhiyun 	e1000e_pm_freeze(dev);
6938*4882a593Smuzhiyun 
6939*4882a593Smuzhiyun 	rc = __e1000_shutdown(pdev, false);
6940*4882a593Smuzhiyun 	if (rc) {
6941*4882a593Smuzhiyun 		e1000e_pm_thaw(dev);
6942*4882a593Smuzhiyun 	} else {
6943*4882a593Smuzhiyun 		/* Introduce S0ix implementation */
6944*4882a593Smuzhiyun 		if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
6945*4882a593Smuzhiyun 			e1000e_s0ix_entry_flow(adapter);
6946*4882a593Smuzhiyun 	}
6947*4882a593Smuzhiyun 
6948*4882a593Smuzhiyun 	return rc;
6949*4882a593Smuzhiyun }
6950*4882a593Smuzhiyun 
e1000e_pm_resume(struct device * dev)6951*4882a593Smuzhiyun static __maybe_unused int e1000e_pm_resume(struct device *dev)
6952*4882a593Smuzhiyun {
6953*4882a593Smuzhiyun 	struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
6954*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
6955*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(dev);
6956*4882a593Smuzhiyun 	int rc;
6957*4882a593Smuzhiyun 
6958*4882a593Smuzhiyun 	/* Introduce S0ix implementation */
6959*4882a593Smuzhiyun 	if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
6960*4882a593Smuzhiyun 		e1000e_s0ix_exit_flow(adapter);
6961*4882a593Smuzhiyun 
6962*4882a593Smuzhiyun 	rc = __e1000_resume(pdev);
6963*4882a593Smuzhiyun 	if (rc)
6964*4882a593Smuzhiyun 		return rc;
6965*4882a593Smuzhiyun 
6966*4882a593Smuzhiyun 	return e1000e_pm_thaw(dev);
6967*4882a593Smuzhiyun }
6968*4882a593Smuzhiyun 
e1000e_pm_runtime_idle(struct device * dev)6969*4882a593Smuzhiyun static __maybe_unused int e1000e_pm_runtime_idle(struct device *dev)
6970*4882a593Smuzhiyun {
6971*4882a593Smuzhiyun 	struct net_device *netdev = dev_get_drvdata(dev);
6972*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
6973*4882a593Smuzhiyun 	u16 eee_lp;
6974*4882a593Smuzhiyun 
6975*4882a593Smuzhiyun 	eee_lp = adapter->hw.dev_spec.ich8lan.eee_lp_ability;
6976*4882a593Smuzhiyun 
6977*4882a593Smuzhiyun 	if (!e1000e_has_link(adapter)) {
6978*4882a593Smuzhiyun 		adapter->hw.dev_spec.ich8lan.eee_lp_ability = eee_lp;
6979*4882a593Smuzhiyun 		pm_schedule_suspend(dev, 5 * MSEC_PER_SEC);
6980*4882a593Smuzhiyun 	}
6981*4882a593Smuzhiyun 
6982*4882a593Smuzhiyun 	return -EBUSY;
6983*4882a593Smuzhiyun }
6984*4882a593Smuzhiyun 
e1000e_pm_runtime_resume(struct device * dev)6985*4882a593Smuzhiyun static __maybe_unused int e1000e_pm_runtime_resume(struct device *dev)
6986*4882a593Smuzhiyun {
6987*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(dev);
6988*4882a593Smuzhiyun 	struct net_device *netdev = pci_get_drvdata(pdev);
6989*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
6990*4882a593Smuzhiyun 	int rc;
6991*4882a593Smuzhiyun 
6992*4882a593Smuzhiyun 	rc = __e1000_resume(pdev);
6993*4882a593Smuzhiyun 	if (rc)
6994*4882a593Smuzhiyun 		return rc;
6995*4882a593Smuzhiyun 
6996*4882a593Smuzhiyun 	if (netdev->flags & IFF_UP)
6997*4882a593Smuzhiyun 		e1000e_up(adapter);
6998*4882a593Smuzhiyun 
6999*4882a593Smuzhiyun 	return rc;
7000*4882a593Smuzhiyun }
7001*4882a593Smuzhiyun 
e1000e_pm_runtime_suspend(struct device * dev)7002*4882a593Smuzhiyun static __maybe_unused int e1000e_pm_runtime_suspend(struct device *dev)
7003*4882a593Smuzhiyun {
7004*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(dev);
7005*4882a593Smuzhiyun 	struct net_device *netdev = pci_get_drvdata(pdev);
7006*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
7007*4882a593Smuzhiyun 
7008*4882a593Smuzhiyun 	if (netdev->flags & IFF_UP) {
7009*4882a593Smuzhiyun 		int count = E1000_CHECK_RESET_COUNT;
7010*4882a593Smuzhiyun 
7011*4882a593Smuzhiyun 		while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
7012*4882a593Smuzhiyun 			usleep_range(10000, 11000);
7013*4882a593Smuzhiyun 
7014*4882a593Smuzhiyun 		WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
7015*4882a593Smuzhiyun 
7016*4882a593Smuzhiyun 		/* Down the device without resetting the hardware */
7017*4882a593Smuzhiyun 		e1000e_down(adapter, false);
7018*4882a593Smuzhiyun 	}
7019*4882a593Smuzhiyun 
7020*4882a593Smuzhiyun 	if (__e1000_shutdown(pdev, true)) {
7021*4882a593Smuzhiyun 		e1000e_pm_runtime_resume(dev);
7022*4882a593Smuzhiyun 		return -EBUSY;
7023*4882a593Smuzhiyun 	}
7024*4882a593Smuzhiyun 
7025*4882a593Smuzhiyun 	return 0;
7026*4882a593Smuzhiyun }
7027*4882a593Smuzhiyun 
e1000_shutdown(struct pci_dev * pdev)7028*4882a593Smuzhiyun static void e1000_shutdown(struct pci_dev *pdev)
7029*4882a593Smuzhiyun {
7030*4882a593Smuzhiyun 	e1000e_flush_lpic(pdev);
7031*4882a593Smuzhiyun 
7032*4882a593Smuzhiyun 	e1000e_pm_freeze(&pdev->dev);
7033*4882a593Smuzhiyun 
7034*4882a593Smuzhiyun 	__e1000_shutdown(pdev, false);
7035*4882a593Smuzhiyun }
7036*4882a593Smuzhiyun 
7037*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
7038*4882a593Smuzhiyun 
e1000_intr_msix(int __always_unused irq,void * data)7039*4882a593Smuzhiyun static irqreturn_t e1000_intr_msix(int __always_unused irq, void *data)
7040*4882a593Smuzhiyun {
7041*4882a593Smuzhiyun 	struct net_device *netdev = data;
7042*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
7043*4882a593Smuzhiyun 
7044*4882a593Smuzhiyun 	if (adapter->msix_entries) {
7045*4882a593Smuzhiyun 		int vector, msix_irq;
7046*4882a593Smuzhiyun 
7047*4882a593Smuzhiyun 		vector = 0;
7048*4882a593Smuzhiyun 		msix_irq = adapter->msix_entries[vector].vector;
7049*4882a593Smuzhiyun 		if (disable_hardirq(msix_irq))
7050*4882a593Smuzhiyun 			e1000_intr_msix_rx(msix_irq, netdev);
7051*4882a593Smuzhiyun 		enable_irq(msix_irq);
7052*4882a593Smuzhiyun 
7053*4882a593Smuzhiyun 		vector++;
7054*4882a593Smuzhiyun 		msix_irq = adapter->msix_entries[vector].vector;
7055*4882a593Smuzhiyun 		if (disable_hardirq(msix_irq))
7056*4882a593Smuzhiyun 			e1000_intr_msix_tx(msix_irq, netdev);
7057*4882a593Smuzhiyun 		enable_irq(msix_irq);
7058*4882a593Smuzhiyun 
7059*4882a593Smuzhiyun 		vector++;
7060*4882a593Smuzhiyun 		msix_irq = adapter->msix_entries[vector].vector;
7061*4882a593Smuzhiyun 		if (disable_hardirq(msix_irq))
7062*4882a593Smuzhiyun 			e1000_msix_other(msix_irq, netdev);
7063*4882a593Smuzhiyun 		enable_irq(msix_irq);
7064*4882a593Smuzhiyun 	}
7065*4882a593Smuzhiyun 
7066*4882a593Smuzhiyun 	return IRQ_HANDLED;
7067*4882a593Smuzhiyun }
7068*4882a593Smuzhiyun 
7069*4882a593Smuzhiyun /**
7070*4882a593Smuzhiyun  * e1000_netpoll
7071*4882a593Smuzhiyun  * @netdev: network interface device structure
7072*4882a593Smuzhiyun  *
7073*4882a593Smuzhiyun  * Polling 'interrupt' - used by things like netconsole to send skbs
7074*4882a593Smuzhiyun  * without having to re-enable interrupts. It's not called while
7075*4882a593Smuzhiyun  * the interrupt routine is executing.
7076*4882a593Smuzhiyun  */
e1000_netpoll(struct net_device * netdev)7077*4882a593Smuzhiyun static void e1000_netpoll(struct net_device *netdev)
7078*4882a593Smuzhiyun {
7079*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
7080*4882a593Smuzhiyun 
7081*4882a593Smuzhiyun 	switch (adapter->int_mode) {
7082*4882a593Smuzhiyun 	case E1000E_INT_MODE_MSIX:
7083*4882a593Smuzhiyun 		e1000_intr_msix(adapter->pdev->irq, netdev);
7084*4882a593Smuzhiyun 		break;
7085*4882a593Smuzhiyun 	case E1000E_INT_MODE_MSI:
7086*4882a593Smuzhiyun 		if (disable_hardirq(adapter->pdev->irq))
7087*4882a593Smuzhiyun 			e1000_intr_msi(adapter->pdev->irq, netdev);
7088*4882a593Smuzhiyun 		enable_irq(adapter->pdev->irq);
7089*4882a593Smuzhiyun 		break;
7090*4882a593Smuzhiyun 	default:		/* E1000E_INT_MODE_LEGACY */
7091*4882a593Smuzhiyun 		if (disable_hardirq(adapter->pdev->irq))
7092*4882a593Smuzhiyun 			e1000_intr(adapter->pdev->irq, netdev);
7093*4882a593Smuzhiyun 		enable_irq(adapter->pdev->irq);
7094*4882a593Smuzhiyun 		break;
7095*4882a593Smuzhiyun 	}
7096*4882a593Smuzhiyun }
7097*4882a593Smuzhiyun #endif
7098*4882a593Smuzhiyun 
7099*4882a593Smuzhiyun /**
7100*4882a593Smuzhiyun  * e1000_io_error_detected - called when PCI error is detected
7101*4882a593Smuzhiyun  * @pdev: Pointer to PCI device
7102*4882a593Smuzhiyun  * @state: The current pci connection state
7103*4882a593Smuzhiyun  *
7104*4882a593Smuzhiyun  * This function is called after a PCI bus error affecting
7105*4882a593Smuzhiyun  * this device has been detected.
7106*4882a593Smuzhiyun  */
e1000_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)7107*4882a593Smuzhiyun static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
7108*4882a593Smuzhiyun 						pci_channel_state_t state)
7109*4882a593Smuzhiyun {
7110*4882a593Smuzhiyun 	e1000e_pm_freeze(&pdev->dev);
7111*4882a593Smuzhiyun 
7112*4882a593Smuzhiyun 	if (state == pci_channel_io_perm_failure)
7113*4882a593Smuzhiyun 		return PCI_ERS_RESULT_DISCONNECT;
7114*4882a593Smuzhiyun 
7115*4882a593Smuzhiyun 	pci_disable_device(pdev);
7116*4882a593Smuzhiyun 
7117*4882a593Smuzhiyun 	/* Request a slot slot reset. */
7118*4882a593Smuzhiyun 	return PCI_ERS_RESULT_NEED_RESET;
7119*4882a593Smuzhiyun }
7120*4882a593Smuzhiyun 
7121*4882a593Smuzhiyun /**
7122*4882a593Smuzhiyun  * e1000_io_slot_reset - called after the pci bus has been reset.
7123*4882a593Smuzhiyun  * @pdev: Pointer to PCI device
7124*4882a593Smuzhiyun  *
7125*4882a593Smuzhiyun  * Restart the card from scratch, as if from a cold-boot. Implementation
7126*4882a593Smuzhiyun  * resembles the first-half of the e1000e_pm_resume routine.
7127*4882a593Smuzhiyun  */
e1000_io_slot_reset(struct pci_dev * pdev)7128*4882a593Smuzhiyun static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
7129*4882a593Smuzhiyun {
7130*4882a593Smuzhiyun 	struct net_device *netdev = pci_get_drvdata(pdev);
7131*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
7132*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
7133*4882a593Smuzhiyun 	u16 aspm_disable_flag = 0;
7134*4882a593Smuzhiyun 	int err;
7135*4882a593Smuzhiyun 	pci_ers_result_t result;
7136*4882a593Smuzhiyun 
7137*4882a593Smuzhiyun 	if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
7138*4882a593Smuzhiyun 		aspm_disable_flag = PCIE_LINK_STATE_L0S;
7139*4882a593Smuzhiyun 	if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
7140*4882a593Smuzhiyun 		aspm_disable_flag |= PCIE_LINK_STATE_L1;
7141*4882a593Smuzhiyun 	if (aspm_disable_flag)
7142*4882a593Smuzhiyun 		e1000e_disable_aspm_locked(pdev, aspm_disable_flag);
7143*4882a593Smuzhiyun 
7144*4882a593Smuzhiyun 	err = pci_enable_device_mem(pdev);
7145*4882a593Smuzhiyun 	if (err) {
7146*4882a593Smuzhiyun 		dev_err(&pdev->dev,
7147*4882a593Smuzhiyun 			"Cannot re-enable PCI device after reset.\n");
7148*4882a593Smuzhiyun 		result = PCI_ERS_RESULT_DISCONNECT;
7149*4882a593Smuzhiyun 	} else {
7150*4882a593Smuzhiyun 		pdev->state_saved = true;
7151*4882a593Smuzhiyun 		pci_restore_state(pdev);
7152*4882a593Smuzhiyun 		pci_set_master(pdev);
7153*4882a593Smuzhiyun 
7154*4882a593Smuzhiyun 		pci_enable_wake(pdev, PCI_D3hot, 0);
7155*4882a593Smuzhiyun 		pci_enable_wake(pdev, PCI_D3cold, 0);
7156*4882a593Smuzhiyun 
7157*4882a593Smuzhiyun 		e1000e_reset(adapter);
7158*4882a593Smuzhiyun 		ew32(WUS, ~0);
7159*4882a593Smuzhiyun 		result = PCI_ERS_RESULT_RECOVERED;
7160*4882a593Smuzhiyun 	}
7161*4882a593Smuzhiyun 
7162*4882a593Smuzhiyun 	return result;
7163*4882a593Smuzhiyun }
7164*4882a593Smuzhiyun 
7165*4882a593Smuzhiyun /**
7166*4882a593Smuzhiyun  * e1000_io_resume - called when traffic can start flowing again.
7167*4882a593Smuzhiyun  * @pdev: Pointer to PCI device
7168*4882a593Smuzhiyun  *
7169*4882a593Smuzhiyun  * This callback is called when the error recovery driver tells us that
7170*4882a593Smuzhiyun  * its OK to resume normal operation. Implementation resembles the
7171*4882a593Smuzhiyun  * second-half of the e1000e_pm_resume routine.
7172*4882a593Smuzhiyun  */
e1000_io_resume(struct pci_dev * pdev)7173*4882a593Smuzhiyun static void e1000_io_resume(struct pci_dev *pdev)
7174*4882a593Smuzhiyun {
7175*4882a593Smuzhiyun 	struct net_device *netdev = pci_get_drvdata(pdev);
7176*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
7177*4882a593Smuzhiyun 
7178*4882a593Smuzhiyun 	e1000_init_manageability_pt(adapter);
7179*4882a593Smuzhiyun 
7180*4882a593Smuzhiyun 	e1000e_pm_thaw(&pdev->dev);
7181*4882a593Smuzhiyun 
7182*4882a593Smuzhiyun 	/* If the controller has AMT, do not set DRV_LOAD until the interface
7183*4882a593Smuzhiyun 	 * is up.  For all other cases, let the f/w know that the h/w is now
7184*4882a593Smuzhiyun 	 * under the control of the driver.
7185*4882a593Smuzhiyun 	 */
7186*4882a593Smuzhiyun 	if (!(adapter->flags & FLAG_HAS_AMT))
7187*4882a593Smuzhiyun 		e1000e_get_hw_control(adapter);
7188*4882a593Smuzhiyun }
7189*4882a593Smuzhiyun 
e1000_print_device_info(struct e1000_adapter * adapter)7190*4882a593Smuzhiyun static void e1000_print_device_info(struct e1000_adapter *adapter)
7191*4882a593Smuzhiyun {
7192*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
7193*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
7194*4882a593Smuzhiyun 	u32 ret_val;
7195*4882a593Smuzhiyun 	u8 pba_str[E1000_PBANUM_LENGTH];
7196*4882a593Smuzhiyun 
7197*4882a593Smuzhiyun 	/* print bus type/speed/width info */
7198*4882a593Smuzhiyun 	e_info("(PCI Express:2.5GT/s:%s) %pM\n",
7199*4882a593Smuzhiyun 	       /* bus width */
7200*4882a593Smuzhiyun 	       ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
7201*4882a593Smuzhiyun 		"Width x1"),
7202*4882a593Smuzhiyun 	       /* MAC address */
7203*4882a593Smuzhiyun 	       netdev->dev_addr);
7204*4882a593Smuzhiyun 	e_info("Intel(R) PRO/%s Network Connection\n",
7205*4882a593Smuzhiyun 	       (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
7206*4882a593Smuzhiyun 	ret_val = e1000_read_pba_string_generic(hw, pba_str,
7207*4882a593Smuzhiyun 						E1000_PBANUM_LENGTH);
7208*4882a593Smuzhiyun 	if (ret_val)
7209*4882a593Smuzhiyun 		strlcpy((char *)pba_str, "Unknown", sizeof(pba_str));
7210*4882a593Smuzhiyun 	e_info("MAC: %d, PHY: %d, PBA No: %s\n",
7211*4882a593Smuzhiyun 	       hw->mac.type, hw->phy.type, pba_str);
7212*4882a593Smuzhiyun }
7213*4882a593Smuzhiyun 
e1000_eeprom_checks(struct e1000_adapter * adapter)7214*4882a593Smuzhiyun static void e1000_eeprom_checks(struct e1000_adapter *adapter)
7215*4882a593Smuzhiyun {
7216*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
7217*4882a593Smuzhiyun 	int ret_val;
7218*4882a593Smuzhiyun 	u16 buf = 0;
7219*4882a593Smuzhiyun 
7220*4882a593Smuzhiyun 	if (hw->mac.type != e1000_82573)
7221*4882a593Smuzhiyun 		return;
7222*4882a593Smuzhiyun 
7223*4882a593Smuzhiyun 	ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
7224*4882a593Smuzhiyun 	le16_to_cpus(&buf);
7225*4882a593Smuzhiyun 	if (!ret_val && (!(buf & BIT(0)))) {
7226*4882a593Smuzhiyun 		/* Deep Smart Power Down (DSPD) */
7227*4882a593Smuzhiyun 		dev_warn(&adapter->pdev->dev,
7228*4882a593Smuzhiyun 			 "Warning: detected DSPD enabled in EEPROM\n");
7229*4882a593Smuzhiyun 	}
7230*4882a593Smuzhiyun }
7231*4882a593Smuzhiyun 
e1000_fix_features(struct net_device * netdev,netdev_features_t features)7232*4882a593Smuzhiyun static netdev_features_t e1000_fix_features(struct net_device *netdev,
7233*4882a593Smuzhiyun 					    netdev_features_t features)
7234*4882a593Smuzhiyun {
7235*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
7236*4882a593Smuzhiyun 	struct e1000_hw *hw = &adapter->hw;
7237*4882a593Smuzhiyun 
7238*4882a593Smuzhiyun 	/* Jumbo frame workaround on 82579 and newer requires CRC be stripped */
7239*4882a593Smuzhiyun 	if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN))
7240*4882a593Smuzhiyun 		features &= ~NETIF_F_RXFCS;
7241*4882a593Smuzhiyun 
7242*4882a593Smuzhiyun 	/* Since there is no support for separate Rx/Tx vlan accel
7243*4882a593Smuzhiyun 	 * enable/disable make sure Tx flag is always in same state as Rx.
7244*4882a593Smuzhiyun 	 */
7245*4882a593Smuzhiyun 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
7246*4882a593Smuzhiyun 		features |= NETIF_F_HW_VLAN_CTAG_TX;
7247*4882a593Smuzhiyun 	else
7248*4882a593Smuzhiyun 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
7249*4882a593Smuzhiyun 
7250*4882a593Smuzhiyun 	return features;
7251*4882a593Smuzhiyun }
7252*4882a593Smuzhiyun 
e1000_set_features(struct net_device * netdev,netdev_features_t features)7253*4882a593Smuzhiyun static int e1000_set_features(struct net_device *netdev,
7254*4882a593Smuzhiyun 			      netdev_features_t features)
7255*4882a593Smuzhiyun {
7256*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
7257*4882a593Smuzhiyun 	netdev_features_t changed = features ^ netdev->features;
7258*4882a593Smuzhiyun 
7259*4882a593Smuzhiyun 	if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
7260*4882a593Smuzhiyun 		adapter->flags |= FLAG_TSO_FORCE;
7261*4882a593Smuzhiyun 
7262*4882a593Smuzhiyun 	if (!(changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
7263*4882a593Smuzhiyun 			 NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS |
7264*4882a593Smuzhiyun 			 NETIF_F_RXALL)))
7265*4882a593Smuzhiyun 		return 0;
7266*4882a593Smuzhiyun 
7267*4882a593Smuzhiyun 	if (changed & NETIF_F_RXFCS) {
7268*4882a593Smuzhiyun 		if (features & NETIF_F_RXFCS) {
7269*4882a593Smuzhiyun 			adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
7270*4882a593Smuzhiyun 		} else {
7271*4882a593Smuzhiyun 			/* We need to take it back to defaults, which might mean
7272*4882a593Smuzhiyun 			 * stripping is still disabled at the adapter level.
7273*4882a593Smuzhiyun 			 */
7274*4882a593Smuzhiyun 			if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING)
7275*4882a593Smuzhiyun 				adapter->flags2 |= FLAG2_CRC_STRIPPING;
7276*4882a593Smuzhiyun 			else
7277*4882a593Smuzhiyun 				adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
7278*4882a593Smuzhiyun 		}
7279*4882a593Smuzhiyun 	}
7280*4882a593Smuzhiyun 
7281*4882a593Smuzhiyun 	netdev->features = features;
7282*4882a593Smuzhiyun 
7283*4882a593Smuzhiyun 	if (netif_running(netdev))
7284*4882a593Smuzhiyun 		e1000e_reinit_locked(adapter);
7285*4882a593Smuzhiyun 	else
7286*4882a593Smuzhiyun 		e1000e_reset(adapter);
7287*4882a593Smuzhiyun 
7288*4882a593Smuzhiyun 	return 1;
7289*4882a593Smuzhiyun }
7290*4882a593Smuzhiyun 
7291*4882a593Smuzhiyun static const struct net_device_ops e1000e_netdev_ops = {
7292*4882a593Smuzhiyun 	.ndo_open		= e1000e_open,
7293*4882a593Smuzhiyun 	.ndo_stop		= e1000e_close,
7294*4882a593Smuzhiyun 	.ndo_start_xmit		= e1000_xmit_frame,
7295*4882a593Smuzhiyun 	.ndo_get_stats64	= e1000e_get_stats64,
7296*4882a593Smuzhiyun 	.ndo_set_rx_mode	= e1000e_set_rx_mode,
7297*4882a593Smuzhiyun 	.ndo_set_mac_address	= e1000_set_mac,
7298*4882a593Smuzhiyun 	.ndo_change_mtu		= e1000_change_mtu,
7299*4882a593Smuzhiyun 	.ndo_do_ioctl		= e1000_ioctl,
7300*4882a593Smuzhiyun 	.ndo_tx_timeout		= e1000_tx_timeout,
7301*4882a593Smuzhiyun 	.ndo_validate_addr	= eth_validate_addr,
7302*4882a593Smuzhiyun 
7303*4882a593Smuzhiyun 	.ndo_vlan_rx_add_vid	= e1000_vlan_rx_add_vid,
7304*4882a593Smuzhiyun 	.ndo_vlan_rx_kill_vid	= e1000_vlan_rx_kill_vid,
7305*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
7306*4882a593Smuzhiyun 	.ndo_poll_controller	= e1000_netpoll,
7307*4882a593Smuzhiyun #endif
7308*4882a593Smuzhiyun 	.ndo_set_features = e1000_set_features,
7309*4882a593Smuzhiyun 	.ndo_fix_features = e1000_fix_features,
7310*4882a593Smuzhiyun 	.ndo_features_check	= passthru_features_check,
7311*4882a593Smuzhiyun };
7312*4882a593Smuzhiyun 
7313*4882a593Smuzhiyun /**
7314*4882a593Smuzhiyun  * e1000_probe - Device Initialization Routine
7315*4882a593Smuzhiyun  * @pdev: PCI device information struct
7316*4882a593Smuzhiyun  * @ent: entry in e1000_pci_tbl
7317*4882a593Smuzhiyun  *
7318*4882a593Smuzhiyun  * Returns 0 on success, negative on failure
7319*4882a593Smuzhiyun  *
7320*4882a593Smuzhiyun  * e1000_probe initializes an adapter identified by a pci_dev structure.
7321*4882a593Smuzhiyun  * The OS initialization, configuring of the adapter private structure,
7322*4882a593Smuzhiyun  * and a hardware reset occur.
7323*4882a593Smuzhiyun  **/
e1000_probe(struct pci_dev * pdev,const struct pci_device_id * ent)7324*4882a593Smuzhiyun static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7325*4882a593Smuzhiyun {
7326*4882a593Smuzhiyun 	struct net_device *netdev;
7327*4882a593Smuzhiyun 	struct e1000_adapter *adapter;
7328*4882a593Smuzhiyun 	struct e1000_hw *hw;
7329*4882a593Smuzhiyun 	const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
7330*4882a593Smuzhiyun 	resource_size_t mmio_start, mmio_len;
7331*4882a593Smuzhiyun 	resource_size_t flash_start, flash_len;
7332*4882a593Smuzhiyun 	static int cards_found;
7333*4882a593Smuzhiyun 	u16 aspm_disable_flag = 0;
7334*4882a593Smuzhiyun 	int bars, i, err, pci_using_dac;
7335*4882a593Smuzhiyun 	u16 eeprom_data = 0;
7336*4882a593Smuzhiyun 	u16 eeprom_apme_mask = E1000_EEPROM_APME;
7337*4882a593Smuzhiyun 	s32 ret_val = 0;
7338*4882a593Smuzhiyun 
7339*4882a593Smuzhiyun 	if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
7340*4882a593Smuzhiyun 		aspm_disable_flag = PCIE_LINK_STATE_L0S;
7341*4882a593Smuzhiyun 	if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
7342*4882a593Smuzhiyun 		aspm_disable_flag |= PCIE_LINK_STATE_L1;
7343*4882a593Smuzhiyun 	if (aspm_disable_flag)
7344*4882a593Smuzhiyun 		e1000e_disable_aspm(pdev, aspm_disable_flag);
7345*4882a593Smuzhiyun 
7346*4882a593Smuzhiyun 	err = pci_enable_device_mem(pdev);
7347*4882a593Smuzhiyun 	if (err)
7348*4882a593Smuzhiyun 		return err;
7349*4882a593Smuzhiyun 
7350*4882a593Smuzhiyun 	pci_using_dac = 0;
7351*4882a593Smuzhiyun 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7352*4882a593Smuzhiyun 	if (!err) {
7353*4882a593Smuzhiyun 		pci_using_dac = 1;
7354*4882a593Smuzhiyun 	} else {
7355*4882a593Smuzhiyun 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7356*4882a593Smuzhiyun 		if (err) {
7357*4882a593Smuzhiyun 			dev_err(&pdev->dev,
7358*4882a593Smuzhiyun 				"No usable DMA configuration, aborting\n");
7359*4882a593Smuzhiyun 			goto err_dma;
7360*4882a593Smuzhiyun 		}
7361*4882a593Smuzhiyun 	}
7362*4882a593Smuzhiyun 
7363*4882a593Smuzhiyun 	bars = pci_select_bars(pdev, IORESOURCE_MEM);
7364*4882a593Smuzhiyun 	err = pci_request_selected_regions_exclusive(pdev, bars,
7365*4882a593Smuzhiyun 						     e1000e_driver_name);
7366*4882a593Smuzhiyun 	if (err)
7367*4882a593Smuzhiyun 		goto err_pci_reg;
7368*4882a593Smuzhiyun 
7369*4882a593Smuzhiyun 	/* AER (Advanced Error Reporting) hooks */
7370*4882a593Smuzhiyun 	pci_enable_pcie_error_reporting(pdev);
7371*4882a593Smuzhiyun 
7372*4882a593Smuzhiyun 	pci_set_master(pdev);
7373*4882a593Smuzhiyun 	/* PCI config space info */
7374*4882a593Smuzhiyun 	err = pci_save_state(pdev);
7375*4882a593Smuzhiyun 	if (err)
7376*4882a593Smuzhiyun 		goto err_alloc_etherdev;
7377*4882a593Smuzhiyun 
7378*4882a593Smuzhiyun 	err = -ENOMEM;
7379*4882a593Smuzhiyun 	netdev = alloc_etherdev(sizeof(struct e1000_adapter));
7380*4882a593Smuzhiyun 	if (!netdev)
7381*4882a593Smuzhiyun 		goto err_alloc_etherdev;
7382*4882a593Smuzhiyun 
7383*4882a593Smuzhiyun 	SET_NETDEV_DEV(netdev, &pdev->dev);
7384*4882a593Smuzhiyun 
7385*4882a593Smuzhiyun 	netdev->irq = pdev->irq;
7386*4882a593Smuzhiyun 
7387*4882a593Smuzhiyun 	pci_set_drvdata(pdev, netdev);
7388*4882a593Smuzhiyun 	adapter = netdev_priv(netdev);
7389*4882a593Smuzhiyun 	hw = &adapter->hw;
7390*4882a593Smuzhiyun 	adapter->netdev = netdev;
7391*4882a593Smuzhiyun 	adapter->pdev = pdev;
7392*4882a593Smuzhiyun 	adapter->ei = ei;
7393*4882a593Smuzhiyun 	adapter->pba = ei->pba;
7394*4882a593Smuzhiyun 	adapter->flags = ei->flags;
7395*4882a593Smuzhiyun 	adapter->flags2 = ei->flags2;
7396*4882a593Smuzhiyun 	adapter->hw.adapter = adapter;
7397*4882a593Smuzhiyun 	adapter->hw.mac.type = ei->mac;
7398*4882a593Smuzhiyun 	adapter->max_hw_frame_size = ei->max_hw_frame_size;
7399*4882a593Smuzhiyun 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
7400*4882a593Smuzhiyun 
7401*4882a593Smuzhiyun 	mmio_start = pci_resource_start(pdev, 0);
7402*4882a593Smuzhiyun 	mmio_len = pci_resource_len(pdev, 0);
7403*4882a593Smuzhiyun 
7404*4882a593Smuzhiyun 	err = -EIO;
7405*4882a593Smuzhiyun 	adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
7406*4882a593Smuzhiyun 	if (!adapter->hw.hw_addr)
7407*4882a593Smuzhiyun 		goto err_ioremap;
7408*4882a593Smuzhiyun 
7409*4882a593Smuzhiyun 	if ((adapter->flags & FLAG_HAS_FLASH) &&
7410*4882a593Smuzhiyun 	    (pci_resource_flags(pdev, 1) & IORESOURCE_MEM) &&
7411*4882a593Smuzhiyun 	    (hw->mac.type < e1000_pch_spt)) {
7412*4882a593Smuzhiyun 		flash_start = pci_resource_start(pdev, 1);
7413*4882a593Smuzhiyun 		flash_len = pci_resource_len(pdev, 1);
7414*4882a593Smuzhiyun 		adapter->hw.flash_address = ioremap(flash_start, flash_len);
7415*4882a593Smuzhiyun 		if (!adapter->hw.flash_address)
7416*4882a593Smuzhiyun 			goto err_flashmap;
7417*4882a593Smuzhiyun 	}
7418*4882a593Smuzhiyun 
7419*4882a593Smuzhiyun 	/* Set default EEE advertisement */
7420*4882a593Smuzhiyun 	if (adapter->flags2 & FLAG2_HAS_EEE)
7421*4882a593Smuzhiyun 		adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
7422*4882a593Smuzhiyun 
7423*4882a593Smuzhiyun 	/* construct the net_device struct */
7424*4882a593Smuzhiyun 	netdev->netdev_ops = &e1000e_netdev_ops;
7425*4882a593Smuzhiyun 	e1000e_set_ethtool_ops(netdev);
7426*4882a593Smuzhiyun 	netdev->watchdog_timeo = 5 * HZ;
7427*4882a593Smuzhiyun 	netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64);
7428*4882a593Smuzhiyun 	strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
7429*4882a593Smuzhiyun 
7430*4882a593Smuzhiyun 	netdev->mem_start = mmio_start;
7431*4882a593Smuzhiyun 	netdev->mem_end = mmio_start + mmio_len;
7432*4882a593Smuzhiyun 
7433*4882a593Smuzhiyun 	adapter->bd_number = cards_found++;
7434*4882a593Smuzhiyun 
7435*4882a593Smuzhiyun 	e1000e_check_options(adapter);
7436*4882a593Smuzhiyun 
7437*4882a593Smuzhiyun 	/* setup adapter struct */
7438*4882a593Smuzhiyun 	err = e1000_sw_init(adapter);
7439*4882a593Smuzhiyun 	if (err)
7440*4882a593Smuzhiyun 		goto err_sw_init;
7441*4882a593Smuzhiyun 
7442*4882a593Smuzhiyun 	memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
7443*4882a593Smuzhiyun 	memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
7444*4882a593Smuzhiyun 	memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
7445*4882a593Smuzhiyun 
7446*4882a593Smuzhiyun 	err = ei->get_variants(adapter);
7447*4882a593Smuzhiyun 	if (err)
7448*4882a593Smuzhiyun 		goto err_hw_init;
7449*4882a593Smuzhiyun 
7450*4882a593Smuzhiyun 	if ((adapter->flags & FLAG_IS_ICH) &&
7451*4882a593Smuzhiyun 	    (adapter->flags & FLAG_READ_ONLY_NVM) &&
7452*4882a593Smuzhiyun 	    (hw->mac.type < e1000_pch_spt))
7453*4882a593Smuzhiyun 		e1000e_write_protect_nvm_ich8lan(&adapter->hw);
7454*4882a593Smuzhiyun 
7455*4882a593Smuzhiyun 	hw->mac.ops.get_bus_info(&adapter->hw);
7456*4882a593Smuzhiyun 
7457*4882a593Smuzhiyun 	adapter->hw.phy.autoneg_wait_to_complete = 0;
7458*4882a593Smuzhiyun 
7459*4882a593Smuzhiyun 	/* Copper options */
7460*4882a593Smuzhiyun 	if (adapter->hw.phy.media_type == e1000_media_type_copper) {
7461*4882a593Smuzhiyun 		adapter->hw.phy.mdix = AUTO_ALL_MODES;
7462*4882a593Smuzhiyun 		adapter->hw.phy.disable_polarity_correction = 0;
7463*4882a593Smuzhiyun 		adapter->hw.phy.ms_type = e1000_ms_hw_default;
7464*4882a593Smuzhiyun 	}
7465*4882a593Smuzhiyun 
7466*4882a593Smuzhiyun 	if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
7467*4882a593Smuzhiyun 		dev_info(&pdev->dev,
7468*4882a593Smuzhiyun 			 "PHY reset is blocked due to SOL/IDER session.\n");
7469*4882a593Smuzhiyun 
7470*4882a593Smuzhiyun 	/* Set initial default active device features */
7471*4882a593Smuzhiyun 	netdev->features = (NETIF_F_SG |
7472*4882a593Smuzhiyun 			    NETIF_F_HW_VLAN_CTAG_RX |
7473*4882a593Smuzhiyun 			    NETIF_F_HW_VLAN_CTAG_TX |
7474*4882a593Smuzhiyun 			    NETIF_F_TSO |
7475*4882a593Smuzhiyun 			    NETIF_F_TSO6 |
7476*4882a593Smuzhiyun 			    NETIF_F_RXHASH |
7477*4882a593Smuzhiyun 			    NETIF_F_RXCSUM |
7478*4882a593Smuzhiyun 			    NETIF_F_HW_CSUM);
7479*4882a593Smuzhiyun 
7480*4882a593Smuzhiyun 	/* Set user-changeable features (subset of all device features) */
7481*4882a593Smuzhiyun 	netdev->hw_features = netdev->features;
7482*4882a593Smuzhiyun 	netdev->hw_features |= NETIF_F_RXFCS;
7483*4882a593Smuzhiyun 	netdev->priv_flags |= IFF_SUPP_NOFCS;
7484*4882a593Smuzhiyun 	netdev->hw_features |= NETIF_F_RXALL;
7485*4882a593Smuzhiyun 
7486*4882a593Smuzhiyun 	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
7487*4882a593Smuzhiyun 		netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7488*4882a593Smuzhiyun 
7489*4882a593Smuzhiyun 	netdev->vlan_features |= (NETIF_F_SG |
7490*4882a593Smuzhiyun 				  NETIF_F_TSO |
7491*4882a593Smuzhiyun 				  NETIF_F_TSO6 |
7492*4882a593Smuzhiyun 				  NETIF_F_HW_CSUM);
7493*4882a593Smuzhiyun 
7494*4882a593Smuzhiyun 	netdev->priv_flags |= IFF_UNICAST_FLT;
7495*4882a593Smuzhiyun 
7496*4882a593Smuzhiyun 	if (pci_using_dac) {
7497*4882a593Smuzhiyun 		netdev->features |= NETIF_F_HIGHDMA;
7498*4882a593Smuzhiyun 		netdev->vlan_features |= NETIF_F_HIGHDMA;
7499*4882a593Smuzhiyun 	}
7500*4882a593Smuzhiyun 
7501*4882a593Smuzhiyun 	/* MTU range: 68 - max_hw_frame_size */
7502*4882a593Smuzhiyun 	netdev->min_mtu = ETH_MIN_MTU;
7503*4882a593Smuzhiyun 	netdev->max_mtu = adapter->max_hw_frame_size -
7504*4882a593Smuzhiyun 			  (VLAN_ETH_HLEN + ETH_FCS_LEN);
7505*4882a593Smuzhiyun 
7506*4882a593Smuzhiyun 	if (e1000e_enable_mng_pass_thru(&adapter->hw))
7507*4882a593Smuzhiyun 		adapter->flags |= FLAG_MNG_PT_ENABLED;
7508*4882a593Smuzhiyun 
7509*4882a593Smuzhiyun 	/* before reading the NVM, reset the controller to
7510*4882a593Smuzhiyun 	 * put the device in a known good starting state
7511*4882a593Smuzhiyun 	 */
7512*4882a593Smuzhiyun 	adapter->hw.mac.ops.reset_hw(&adapter->hw);
7513*4882a593Smuzhiyun 
7514*4882a593Smuzhiyun 	/* systems with ASPM and others may see the checksum fail on the first
7515*4882a593Smuzhiyun 	 * attempt. Let's give it a few tries
7516*4882a593Smuzhiyun 	 */
7517*4882a593Smuzhiyun 	for (i = 0;; i++) {
7518*4882a593Smuzhiyun 		if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
7519*4882a593Smuzhiyun 			break;
7520*4882a593Smuzhiyun 		if (i == 2) {
7521*4882a593Smuzhiyun 			dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
7522*4882a593Smuzhiyun 			err = -EIO;
7523*4882a593Smuzhiyun 			goto err_eeprom;
7524*4882a593Smuzhiyun 		}
7525*4882a593Smuzhiyun 	}
7526*4882a593Smuzhiyun 
7527*4882a593Smuzhiyun 	e1000_eeprom_checks(adapter);
7528*4882a593Smuzhiyun 
7529*4882a593Smuzhiyun 	/* copy the MAC address */
7530*4882a593Smuzhiyun 	if (e1000e_read_mac_addr(&adapter->hw))
7531*4882a593Smuzhiyun 		dev_err(&pdev->dev,
7532*4882a593Smuzhiyun 			"NVM Read Error while reading MAC address\n");
7533*4882a593Smuzhiyun 
7534*4882a593Smuzhiyun 	memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
7535*4882a593Smuzhiyun 
7536*4882a593Smuzhiyun 	if (!is_valid_ether_addr(netdev->dev_addr)) {
7537*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
7538*4882a593Smuzhiyun 			netdev->dev_addr);
7539*4882a593Smuzhiyun 		err = -EIO;
7540*4882a593Smuzhiyun 		goto err_eeprom;
7541*4882a593Smuzhiyun 	}
7542*4882a593Smuzhiyun 
7543*4882a593Smuzhiyun 	timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0);
7544*4882a593Smuzhiyun 	timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0);
7545*4882a593Smuzhiyun 
7546*4882a593Smuzhiyun 	INIT_WORK(&adapter->reset_task, e1000_reset_task);
7547*4882a593Smuzhiyun 	INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
7548*4882a593Smuzhiyun 	INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
7549*4882a593Smuzhiyun 	INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
7550*4882a593Smuzhiyun 	INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
7551*4882a593Smuzhiyun 
7552*4882a593Smuzhiyun 	/* Initialize link parameters. User can change them with ethtool */
7553*4882a593Smuzhiyun 	adapter->hw.mac.autoneg = 1;
7554*4882a593Smuzhiyun 	adapter->fc_autoneg = true;
7555*4882a593Smuzhiyun 	adapter->hw.fc.requested_mode = e1000_fc_default;
7556*4882a593Smuzhiyun 	adapter->hw.fc.current_mode = e1000_fc_default;
7557*4882a593Smuzhiyun 	adapter->hw.phy.autoneg_advertised = 0x2f;
7558*4882a593Smuzhiyun 
7559*4882a593Smuzhiyun 	/* Initial Wake on LAN setting - If APM wake is enabled in
7560*4882a593Smuzhiyun 	 * the EEPROM, enable the ACPI Magic Packet filter
7561*4882a593Smuzhiyun 	 */
7562*4882a593Smuzhiyun 	if (adapter->flags & FLAG_APME_IN_WUC) {
7563*4882a593Smuzhiyun 		/* APME bit in EEPROM is mapped to WUC.APME */
7564*4882a593Smuzhiyun 		eeprom_data = er32(WUC);
7565*4882a593Smuzhiyun 		eeprom_apme_mask = E1000_WUC_APME;
7566*4882a593Smuzhiyun 		if ((hw->mac.type > e1000_ich10lan) &&
7567*4882a593Smuzhiyun 		    (eeprom_data & E1000_WUC_PHY_WAKE))
7568*4882a593Smuzhiyun 			adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
7569*4882a593Smuzhiyun 	} else if (adapter->flags & FLAG_APME_IN_CTRL3) {
7570*4882a593Smuzhiyun 		if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
7571*4882a593Smuzhiyun 		    (adapter->hw.bus.func == 1))
7572*4882a593Smuzhiyun 			ret_val = e1000_read_nvm(&adapter->hw,
7573*4882a593Smuzhiyun 					      NVM_INIT_CONTROL3_PORT_B,
7574*4882a593Smuzhiyun 					      1, &eeprom_data);
7575*4882a593Smuzhiyun 		else
7576*4882a593Smuzhiyun 			ret_val = e1000_read_nvm(&adapter->hw,
7577*4882a593Smuzhiyun 					      NVM_INIT_CONTROL3_PORT_A,
7578*4882a593Smuzhiyun 					      1, &eeprom_data);
7579*4882a593Smuzhiyun 	}
7580*4882a593Smuzhiyun 
7581*4882a593Smuzhiyun 	/* fetch WoL from EEPROM */
7582*4882a593Smuzhiyun 	if (ret_val)
7583*4882a593Smuzhiyun 		e_dbg("NVM read error getting WoL initial values: %d\n", ret_val);
7584*4882a593Smuzhiyun 	else if (eeprom_data & eeprom_apme_mask)
7585*4882a593Smuzhiyun 		adapter->eeprom_wol |= E1000_WUFC_MAG;
7586*4882a593Smuzhiyun 
7587*4882a593Smuzhiyun 	/* now that we have the eeprom settings, apply the special cases
7588*4882a593Smuzhiyun 	 * where the eeprom may be wrong or the board simply won't support
7589*4882a593Smuzhiyun 	 * wake on lan on a particular port
7590*4882a593Smuzhiyun 	 */
7591*4882a593Smuzhiyun 	if (!(adapter->flags & FLAG_HAS_WOL))
7592*4882a593Smuzhiyun 		adapter->eeprom_wol = 0;
7593*4882a593Smuzhiyun 
7594*4882a593Smuzhiyun 	/* initialize the wol settings based on the eeprom settings */
7595*4882a593Smuzhiyun 	adapter->wol = adapter->eeprom_wol;
7596*4882a593Smuzhiyun 
7597*4882a593Smuzhiyun 	/* make sure adapter isn't asleep if manageability is enabled */
7598*4882a593Smuzhiyun 	if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) ||
7599*4882a593Smuzhiyun 	    (hw->mac.ops.check_mng_mode(hw)))
7600*4882a593Smuzhiyun 		device_wakeup_enable(&pdev->dev);
7601*4882a593Smuzhiyun 
7602*4882a593Smuzhiyun 	/* save off EEPROM version number */
7603*4882a593Smuzhiyun 	ret_val = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
7604*4882a593Smuzhiyun 
7605*4882a593Smuzhiyun 	if (ret_val) {
7606*4882a593Smuzhiyun 		e_dbg("NVM read error getting EEPROM version: %d\n", ret_val);
7607*4882a593Smuzhiyun 		adapter->eeprom_vers = 0;
7608*4882a593Smuzhiyun 	}
7609*4882a593Smuzhiyun 
7610*4882a593Smuzhiyun 	/* init PTP hardware clock */
7611*4882a593Smuzhiyun 	e1000e_ptp_init(adapter);
7612*4882a593Smuzhiyun 
7613*4882a593Smuzhiyun 	/* reset the hardware with the new settings */
7614*4882a593Smuzhiyun 	e1000e_reset(adapter);
7615*4882a593Smuzhiyun 
7616*4882a593Smuzhiyun 	/* If the controller has AMT, do not set DRV_LOAD until the interface
7617*4882a593Smuzhiyun 	 * is up.  For all other cases, let the f/w know that the h/w is now
7618*4882a593Smuzhiyun 	 * under the control of the driver.
7619*4882a593Smuzhiyun 	 */
7620*4882a593Smuzhiyun 	if (!(adapter->flags & FLAG_HAS_AMT))
7621*4882a593Smuzhiyun 		e1000e_get_hw_control(adapter);
7622*4882a593Smuzhiyun 
7623*4882a593Smuzhiyun 	if (hw->mac.type >= e1000_pch_cnp)
7624*4882a593Smuzhiyun 		adapter->flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
7625*4882a593Smuzhiyun 
7626*4882a593Smuzhiyun 	strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
7627*4882a593Smuzhiyun 	err = register_netdev(netdev);
7628*4882a593Smuzhiyun 	if (err)
7629*4882a593Smuzhiyun 		goto err_register;
7630*4882a593Smuzhiyun 
7631*4882a593Smuzhiyun 	/* carrier off reporting is important to ethtool even BEFORE open */
7632*4882a593Smuzhiyun 	netif_carrier_off(netdev);
7633*4882a593Smuzhiyun 
7634*4882a593Smuzhiyun 	e1000_print_device_info(adapter);
7635*4882a593Smuzhiyun 
7636*4882a593Smuzhiyun 	dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
7637*4882a593Smuzhiyun 
7638*4882a593Smuzhiyun 	if (pci_dev_run_wake(pdev) && hw->mac.type < e1000_pch_cnp)
7639*4882a593Smuzhiyun 		pm_runtime_put_noidle(&pdev->dev);
7640*4882a593Smuzhiyun 
7641*4882a593Smuzhiyun 	return 0;
7642*4882a593Smuzhiyun 
7643*4882a593Smuzhiyun err_register:
7644*4882a593Smuzhiyun 	if (!(adapter->flags & FLAG_HAS_AMT))
7645*4882a593Smuzhiyun 		e1000e_release_hw_control(adapter);
7646*4882a593Smuzhiyun err_eeprom:
7647*4882a593Smuzhiyun 	if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw))
7648*4882a593Smuzhiyun 		e1000_phy_hw_reset(&adapter->hw);
7649*4882a593Smuzhiyun err_hw_init:
7650*4882a593Smuzhiyun 	kfree(adapter->tx_ring);
7651*4882a593Smuzhiyun 	kfree(adapter->rx_ring);
7652*4882a593Smuzhiyun err_sw_init:
7653*4882a593Smuzhiyun 	if ((adapter->hw.flash_address) && (hw->mac.type < e1000_pch_spt))
7654*4882a593Smuzhiyun 		iounmap(adapter->hw.flash_address);
7655*4882a593Smuzhiyun 	e1000e_reset_interrupt_capability(adapter);
7656*4882a593Smuzhiyun err_flashmap:
7657*4882a593Smuzhiyun 	iounmap(adapter->hw.hw_addr);
7658*4882a593Smuzhiyun err_ioremap:
7659*4882a593Smuzhiyun 	free_netdev(netdev);
7660*4882a593Smuzhiyun err_alloc_etherdev:
7661*4882a593Smuzhiyun 	pci_disable_pcie_error_reporting(pdev);
7662*4882a593Smuzhiyun 	pci_release_mem_regions(pdev);
7663*4882a593Smuzhiyun err_pci_reg:
7664*4882a593Smuzhiyun err_dma:
7665*4882a593Smuzhiyun 	pci_disable_device(pdev);
7666*4882a593Smuzhiyun 	return err;
7667*4882a593Smuzhiyun }
7668*4882a593Smuzhiyun 
7669*4882a593Smuzhiyun /**
7670*4882a593Smuzhiyun  * e1000_remove - Device Removal Routine
7671*4882a593Smuzhiyun  * @pdev: PCI device information struct
7672*4882a593Smuzhiyun  *
7673*4882a593Smuzhiyun  * e1000_remove is called by the PCI subsystem to alert the driver
7674*4882a593Smuzhiyun  * that it should release a PCI device.  The could be caused by a
7675*4882a593Smuzhiyun  * Hot-Plug event, or because the driver is going to be removed from
7676*4882a593Smuzhiyun  * memory.
7677*4882a593Smuzhiyun  **/
e1000_remove(struct pci_dev * pdev)7678*4882a593Smuzhiyun static void e1000_remove(struct pci_dev *pdev)
7679*4882a593Smuzhiyun {
7680*4882a593Smuzhiyun 	struct net_device *netdev = pci_get_drvdata(pdev);
7681*4882a593Smuzhiyun 	struct e1000_adapter *adapter = netdev_priv(netdev);
7682*4882a593Smuzhiyun 
7683*4882a593Smuzhiyun 	e1000e_ptp_remove(adapter);
7684*4882a593Smuzhiyun 
7685*4882a593Smuzhiyun 	/* The timers may be rescheduled, so explicitly disable them
7686*4882a593Smuzhiyun 	 * from being rescheduled.
7687*4882a593Smuzhiyun 	 */
7688*4882a593Smuzhiyun 	set_bit(__E1000_DOWN, &adapter->state);
7689*4882a593Smuzhiyun 	del_timer_sync(&adapter->watchdog_timer);
7690*4882a593Smuzhiyun 	del_timer_sync(&adapter->phy_info_timer);
7691*4882a593Smuzhiyun 
7692*4882a593Smuzhiyun 	cancel_work_sync(&adapter->reset_task);
7693*4882a593Smuzhiyun 	cancel_work_sync(&adapter->watchdog_task);
7694*4882a593Smuzhiyun 	cancel_work_sync(&adapter->downshift_task);
7695*4882a593Smuzhiyun 	cancel_work_sync(&adapter->update_phy_task);
7696*4882a593Smuzhiyun 	cancel_work_sync(&adapter->print_hang_task);
7697*4882a593Smuzhiyun 
7698*4882a593Smuzhiyun 	if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
7699*4882a593Smuzhiyun 		cancel_work_sync(&adapter->tx_hwtstamp_work);
7700*4882a593Smuzhiyun 		if (adapter->tx_hwtstamp_skb) {
7701*4882a593Smuzhiyun 			dev_consume_skb_any(adapter->tx_hwtstamp_skb);
7702*4882a593Smuzhiyun 			adapter->tx_hwtstamp_skb = NULL;
7703*4882a593Smuzhiyun 		}
7704*4882a593Smuzhiyun 	}
7705*4882a593Smuzhiyun 
7706*4882a593Smuzhiyun 	unregister_netdev(netdev);
7707*4882a593Smuzhiyun 
7708*4882a593Smuzhiyun 	if (pci_dev_run_wake(pdev))
7709*4882a593Smuzhiyun 		pm_runtime_get_noresume(&pdev->dev);
7710*4882a593Smuzhiyun 
7711*4882a593Smuzhiyun 	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
7712*4882a593Smuzhiyun 	 * would have already happened in close and is redundant.
7713*4882a593Smuzhiyun 	 */
7714*4882a593Smuzhiyun 	e1000e_release_hw_control(adapter);
7715*4882a593Smuzhiyun 
7716*4882a593Smuzhiyun 	e1000e_reset_interrupt_capability(adapter);
7717*4882a593Smuzhiyun 	kfree(adapter->tx_ring);
7718*4882a593Smuzhiyun 	kfree(adapter->rx_ring);
7719*4882a593Smuzhiyun 
7720*4882a593Smuzhiyun 	iounmap(adapter->hw.hw_addr);
7721*4882a593Smuzhiyun 	if ((adapter->hw.flash_address) &&
7722*4882a593Smuzhiyun 	    (adapter->hw.mac.type < e1000_pch_spt))
7723*4882a593Smuzhiyun 		iounmap(adapter->hw.flash_address);
7724*4882a593Smuzhiyun 	pci_release_mem_regions(pdev);
7725*4882a593Smuzhiyun 
7726*4882a593Smuzhiyun 	free_netdev(netdev);
7727*4882a593Smuzhiyun 
7728*4882a593Smuzhiyun 	/* AER disable */
7729*4882a593Smuzhiyun 	pci_disable_pcie_error_reporting(pdev);
7730*4882a593Smuzhiyun 
7731*4882a593Smuzhiyun 	pci_disable_device(pdev);
7732*4882a593Smuzhiyun }
7733*4882a593Smuzhiyun 
7734*4882a593Smuzhiyun /* PCI Error Recovery (ERS) */
7735*4882a593Smuzhiyun static const struct pci_error_handlers e1000_err_handler = {
7736*4882a593Smuzhiyun 	.error_detected = e1000_io_error_detected,
7737*4882a593Smuzhiyun 	.slot_reset = e1000_io_slot_reset,
7738*4882a593Smuzhiyun 	.resume = e1000_io_resume,
7739*4882a593Smuzhiyun };
7740*4882a593Smuzhiyun 
7741*4882a593Smuzhiyun static const struct pci_device_id e1000_pci_tbl[] = {
7742*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
7743*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
7744*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
7745*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP),
7746*4882a593Smuzhiyun 	  board_82571 },
7747*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
7748*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
7749*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
7750*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
7751*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
7752*4882a593Smuzhiyun 
7753*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
7754*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
7755*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
7756*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
7757*4882a593Smuzhiyun 
7758*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
7759*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
7760*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
7761*4882a593Smuzhiyun 
7762*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
7763*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
7764*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
7765*4882a593Smuzhiyun 
7766*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
7767*4882a593Smuzhiyun 	  board_80003es2lan },
7768*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
7769*4882a593Smuzhiyun 	  board_80003es2lan },
7770*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
7771*4882a593Smuzhiyun 	  board_80003es2lan },
7772*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
7773*4882a593Smuzhiyun 	  board_80003es2lan },
7774*4882a593Smuzhiyun 
7775*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
7776*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
7777*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
7778*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
7779*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
7780*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
7781*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
7782*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
7783*4882a593Smuzhiyun 
7784*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
7785*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
7786*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
7787*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
7788*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
7789*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
7790*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
7791*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
7792*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
7793*4882a593Smuzhiyun 
7794*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
7795*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
7796*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
7797*4882a593Smuzhiyun 
7798*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
7799*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
7800*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
7801*4882a593Smuzhiyun 
7802*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
7803*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
7804*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
7805*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
7806*4882a593Smuzhiyun 
7807*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
7808*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
7809*4882a593Smuzhiyun 
7810*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt },
7811*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
7812*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt },
7813*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt },
7814*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM2), board_pch_lpt },
7815*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt },
7816*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt },
7817*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt },
7818*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM), board_pch_spt },
7819*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V), board_pch_spt },
7820*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM2), board_pch_spt },
7821*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V2), board_pch_spt },
7822*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LBG_I219_LM3), board_pch_spt },
7823*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM4), board_pch_spt },
7824*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V4), board_pch_spt },
7825*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM5), board_pch_spt },
7826*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V5), board_pch_spt },
7827*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_LM6), board_pch_cnp },
7828*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_V6), board_pch_cnp },
7829*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_LM7), board_pch_cnp },
7830*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_V7), board_pch_cnp },
7831*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_LM8), board_pch_cnp },
7832*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V8), board_pch_cnp },
7833*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_LM9), board_pch_cnp },
7834*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V9), board_pch_cnp },
7835*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM10), board_pch_cnp },
7836*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V10), board_pch_cnp },
7837*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM11), board_pch_cnp },
7838*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V11), board_pch_cnp },
7839*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM12), board_pch_spt },
7840*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V12), board_pch_spt },
7841*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_tgp },
7842*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_tgp },
7843*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_tgp },
7844*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_tgp },
7845*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_tgp },
7846*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_tgp },
7847*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_tgp },
7848*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_tgp },
7849*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_tgp },
7850*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_tgp },
7851*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_tgp },
7852*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_tgp },
7853*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_tgp },
7854*4882a593Smuzhiyun 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_tgp },
7855*4882a593Smuzhiyun 
7856*4882a593Smuzhiyun 	{ 0, 0, 0, 0, 0, 0, 0 }	/* terminate list */
7857*4882a593Smuzhiyun };
7858*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
7859*4882a593Smuzhiyun 
7860*4882a593Smuzhiyun static const struct dev_pm_ops e1000_pm_ops = {
7861*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
7862*4882a593Smuzhiyun 	.suspend	= e1000e_pm_suspend,
7863*4882a593Smuzhiyun 	.resume		= e1000e_pm_resume,
7864*4882a593Smuzhiyun 	.freeze		= e1000e_pm_freeze,
7865*4882a593Smuzhiyun 	.thaw		= e1000e_pm_thaw,
7866*4882a593Smuzhiyun 	.poweroff	= e1000e_pm_suspend,
7867*4882a593Smuzhiyun 	.restore	= e1000e_pm_resume,
7868*4882a593Smuzhiyun #endif
7869*4882a593Smuzhiyun 	SET_RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume,
7870*4882a593Smuzhiyun 			   e1000e_pm_runtime_idle)
7871*4882a593Smuzhiyun };
7872*4882a593Smuzhiyun 
7873*4882a593Smuzhiyun /* PCI Device API Driver */
7874*4882a593Smuzhiyun static struct pci_driver e1000_driver = {
7875*4882a593Smuzhiyun 	.name     = e1000e_driver_name,
7876*4882a593Smuzhiyun 	.id_table = e1000_pci_tbl,
7877*4882a593Smuzhiyun 	.probe    = e1000_probe,
7878*4882a593Smuzhiyun 	.remove   = e1000_remove,
7879*4882a593Smuzhiyun 	.driver   = {
7880*4882a593Smuzhiyun 		.pm = &e1000_pm_ops,
7881*4882a593Smuzhiyun 	},
7882*4882a593Smuzhiyun 	.shutdown = e1000_shutdown,
7883*4882a593Smuzhiyun 	.err_handler = &e1000_err_handler
7884*4882a593Smuzhiyun };
7885*4882a593Smuzhiyun 
7886*4882a593Smuzhiyun /**
7887*4882a593Smuzhiyun  * e1000_init_module - Driver Registration Routine
7888*4882a593Smuzhiyun  *
7889*4882a593Smuzhiyun  * e1000_init_module is the first routine called when the driver is
7890*4882a593Smuzhiyun  * loaded. All it does is register with the PCI subsystem.
7891*4882a593Smuzhiyun  **/
e1000_init_module(void)7892*4882a593Smuzhiyun static int __init e1000_init_module(void)
7893*4882a593Smuzhiyun {
7894*4882a593Smuzhiyun 	pr_info("Intel(R) PRO/1000 Network Driver\n");
7895*4882a593Smuzhiyun 	pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n");
7896*4882a593Smuzhiyun 
7897*4882a593Smuzhiyun 	return pci_register_driver(&e1000_driver);
7898*4882a593Smuzhiyun }
7899*4882a593Smuzhiyun module_init(e1000_init_module);
7900*4882a593Smuzhiyun 
7901*4882a593Smuzhiyun /**
7902*4882a593Smuzhiyun  * e1000_exit_module - Driver Exit Cleanup Routine
7903*4882a593Smuzhiyun  *
7904*4882a593Smuzhiyun  * e1000_exit_module is called just before the driver is removed
7905*4882a593Smuzhiyun  * from memory.
7906*4882a593Smuzhiyun  **/
e1000_exit_module(void)7907*4882a593Smuzhiyun static void __exit e1000_exit_module(void)
7908*4882a593Smuzhiyun {
7909*4882a593Smuzhiyun 	pci_unregister_driver(&e1000_driver);
7910*4882a593Smuzhiyun }
7911*4882a593Smuzhiyun module_exit(e1000_exit_module);
7912*4882a593Smuzhiyun 
7913*4882a593Smuzhiyun MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
7914*4882a593Smuzhiyun MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
7915*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
7916*4882a593Smuzhiyun 
7917*4882a593Smuzhiyun /* netdev.c */
7918