xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/intel/igb/igb_main.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2007 - 2018 Intel Corporation. */
3 
4 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 
6 #include <linux/module.h>
7 #include <linux/types.h>
8 #include <linux/init.h>
9 #include <linux/bitops.h>
10 #include <linux/vmalloc.h>
11 #include <linux/pagemap.h>
12 #include <linux/netdevice.h>
13 #include <linux/ipv6.h>
14 #include <linux/slab.h>
15 #include <net/checksum.h>
16 #include <net/ip6_checksum.h>
17 #include <net/pkt_sched.h>
18 #include <net/pkt_cls.h>
19 #include <linux/net_tstamp.h>
20 #include <linux/mii.h>
21 #include <linux/ethtool.h>
22 #include <linux/if.h>
23 #include <linux/if_vlan.h>
24 #include <linux/pci.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/ip.h>
28 #include <linux/tcp.h>
29 #include <linux/sctp.h>
30 #include <linux/if_ether.h>
31 #include <linux/aer.h>
32 #include <linux/prefetch.h>
33 #include <linux/bpf.h>
34 #include <linux/bpf_trace.h>
35 #include <linux/pm_runtime.h>
36 #include <linux/etherdevice.h>
37 #ifdef CONFIG_IGB_DCA
38 #include <linux/dca.h>
39 #endif
40 #include <linux/i2c.h>
41 #include "igb.h"
42 
43 enum queue_mode {
44 	QUEUE_MODE_STRICT_PRIORITY,
45 	QUEUE_MODE_STREAM_RESERVATION,
46 };
47 
48 enum tx_queue_prio {
49 	TX_QUEUE_PRIO_HIGH,
50 	TX_QUEUE_PRIO_LOW,
51 };
52 
53 char igb_driver_name[] = "igb";
54 static const char igb_driver_string[] =
55 				"Intel(R) Gigabit Ethernet Network Driver";
56 static const char igb_copyright[] =
57 				"Copyright (c) 2007-2014 Intel Corporation.";
58 
59 static const struct e1000_info *igb_info_tbl[] = {
60 	[board_82575] = &e1000_82575_info,
61 };
62 
63 static const struct pci_device_id igb_pci_tbl[] = {
64 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
65 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
66 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
67 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
68 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
69 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
70 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
71 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
72 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
73 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
74 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
75 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
76 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
77 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
78 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
79 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
80 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
81 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
82 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
83 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
84 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
85 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
86 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
87 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
88 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
89 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
90 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
91 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
92 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
93 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
94 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
95 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
96 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
97 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
98 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
99 	/* required last entry */
100 	{0, }
101 };
102 
103 MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
104 
105 static int igb_setup_all_tx_resources(struct igb_adapter *);
106 static int igb_setup_all_rx_resources(struct igb_adapter *);
107 static void igb_free_all_tx_resources(struct igb_adapter *);
108 static void igb_free_all_rx_resources(struct igb_adapter *);
109 static void igb_setup_mrqc(struct igb_adapter *);
110 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
111 static void igb_remove(struct pci_dev *pdev);
112 static int igb_sw_init(struct igb_adapter *);
113 int igb_open(struct net_device *);
114 int igb_close(struct net_device *);
115 static void igb_configure(struct igb_adapter *);
116 static void igb_configure_tx(struct igb_adapter *);
117 static void igb_configure_rx(struct igb_adapter *);
118 static void igb_clean_all_tx_rings(struct igb_adapter *);
119 static void igb_clean_all_rx_rings(struct igb_adapter *);
120 static void igb_clean_tx_ring(struct igb_ring *);
121 static void igb_clean_rx_ring(struct igb_ring *);
122 static void igb_set_rx_mode(struct net_device *);
123 static void igb_update_phy_info(struct timer_list *);
124 static void igb_watchdog(struct timer_list *);
125 static void igb_watchdog_task(struct work_struct *);
126 static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
127 static void igb_get_stats64(struct net_device *dev,
128 			    struct rtnl_link_stats64 *stats);
129 static int igb_change_mtu(struct net_device *, int);
130 static int igb_set_mac(struct net_device *, void *);
131 static void igb_set_uta(struct igb_adapter *adapter, bool set);
132 static irqreturn_t igb_intr(int irq, void *);
133 static irqreturn_t igb_intr_msi(int irq, void *);
134 static irqreturn_t igb_msix_other(int irq, void *);
135 static irqreturn_t igb_msix_ring(int irq, void *);
136 #ifdef CONFIG_IGB_DCA
137 static void igb_update_dca(struct igb_q_vector *);
138 static void igb_setup_dca(struct igb_adapter *);
139 #endif /* CONFIG_IGB_DCA */
140 static int igb_poll(struct napi_struct *, int);
141 static bool igb_clean_tx_irq(struct igb_q_vector *, int);
142 static int igb_clean_rx_irq(struct igb_q_vector *, int);
143 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
144 static void igb_tx_timeout(struct net_device *, unsigned int txqueue);
145 static void igb_reset_task(struct work_struct *);
146 static void igb_vlan_mode(struct net_device *netdev,
147 			  netdev_features_t features);
148 static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
149 static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
150 static void igb_restore_vlan(struct igb_adapter *);
151 static void igb_rar_set_index(struct igb_adapter *, u32);
152 static void igb_ping_all_vfs(struct igb_adapter *);
153 static void igb_msg_task(struct igb_adapter *);
154 static void igb_vmm_control(struct igb_adapter *);
155 static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
156 static void igb_flush_mac_table(struct igb_adapter *);
157 static int igb_available_rars(struct igb_adapter *, u8);
158 static void igb_set_default_mac_filter(struct igb_adapter *);
159 static int igb_uc_sync(struct net_device *, const unsigned char *);
160 static int igb_uc_unsync(struct net_device *, const unsigned char *);
161 static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
162 static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
163 static int igb_ndo_set_vf_vlan(struct net_device *netdev,
164 			       int vf, u16 vlan, u8 qos, __be16 vlan_proto);
165 static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
166 static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
167 				   bool setting);
168 static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf,
169 				bool setting);
170 static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
171 				 struct ifla_vf_info *ivi);
172 static void igb_check_vf_rate_limit(struct igb_adapter *);
173 static void igb_nfc_filter_exit(struct igb_adapter *adapter);
174 static void igb_nfc_filter_restore(struct igb_adapter *adapter);
175 
176 #ifdef CONFIG_PCI_IOV
177 static int igb_vf_configure(struct igb_adapter *adapter, int vf);
178 static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
179 static int igb_disable_sriov(struct pci_dev *dev);
180 static int igb_pci_disable_sriov(struct pci_dev *dev);
181 #endif
182 
183 static int igb_suspend(struct device *);
184 static int igb_resume(struct device *);
185 static int igb_runtime_suspend(struct device *dev);
186 static int igb_runtime_resume(struct device *dev);
187 static int igb_runtime_idle(struct device *dev);
188 static const struct dev_pm_ops igb_pm_ops = {
189 	SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
190 	SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
191 			igb_runtime_idle)
192 };
193 static void igb_shutdown(struct pci_dev *);
194 static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
195 #ifdef CONFIG_IGB_DCA
196 static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
197 static struct notifier_block dca_notifier = {
198 	.notifier_call	= igb_notify_dca,
199 	.next		= NULL,
200 	.priority	= 0
201 };
202 #endif
203 #ifdef CONFIG_PCI_IOV
204 static unsigned int max_vfs;
205 module_param(max_vfs, uint, 0);
206 MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
207 #endif /* CONFIG_PCI_IOV */
208 
209 static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
210 		     pci_channel_state_t);
211 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
212 static void igb_io_resume(struct pci_dev *);
213 
214 static const struct pci_error_handlers igb_err_handler = {
215 	.error_detected = igb_io_error_detected,
216 	.slot_reset = igb_io_slot_reset,
217 	.resume = igb_io_resume,
218 };
219 
220 static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
221 
222 static struct pci_driver igb_driver = {
223 	.name     = igb_driver_name,
224 	.id_table = igb_pci_tbl,
225 	.probe    = igb_probe,
226 	.remove   = igb_remove,
227 #ifdef CONFIG_PM
228 	.driver.pm = &igb_pm_ops,
229 #endif
230 	.shutdown = igb_shutdown,
231 	.sriov_configure = igb_pci_sriov_configure,
232 	.err_handler = &igb_err_handler
233 };
234 
235 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
236 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
237 MODULE_LICENSE("GPL v2");
238 
239 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
240 static int debug = -1;
241 module_param(debug, int, 0);
242 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
243 
244 struct igb_reg_info {
245 	u32 ofs;
246 	char *name;
247 };
248 
249 static const struct igb_reg_info igb_reg_info_tbl[] = {
250 
251 	/* General Registers */
252 	{E1000_CTRL, "CTRL"},
253 	{E1000_STATUS, "STATUS"},
254 	{E1000_CTRL_EXT, "CTRL_EXT"},
255 
256 	/* Interrupt Registers */
257 	{E1000_ICR, "ICR"},
258 
259 	/* RX Registers */
260 	{E1000_RCTL, "RCTL"},
261 	{E1000_RDLEN(0), "RDLEN"},
262 	{E1000_RDH(0), "RDH"},
263 	{E1000_RDT(0), "RDT"},
264 	{E1000_RXDCTL(0), "RXDCTL"},
265 	{E1000_RDBAL(0), "RDBAL"},
266 	{E1000_RDBAH(0), "RDBAH"},
267 
268 	/* TX Registers */
269 	{E1000_TCTL, "TCTL"},
270 	{E1000_TDBAL(0), "TDBAL"},
271 	{E1000_TDBAH(0), "TDBAH"},
272 	{E1000_TDLEN(0), "TDLEN"},
273 	{E1000_TDH(0), "TDH"},
274 	{E1000_TDT(0), "TDT"},
275 	{E1000_TXDCTL(0), "TXDCTL"},
276 	{E1000_TDFH, "TDFH"},
277 	{E1000_TDFT, "TDFT"},
278 	{E1000_TDFHS, "TDFHS"},
279 	{E1000_TDFPC, "TDFPC"},
280 
281 	/* List Terminator */
282 	{}
283 };
284 
285 /* igb_regdump - register printout routine */
igb_regdump(struct e1000_hw * hw,struct igb_reg_info * reginfo)286 static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
287 {
288 	int n = 0;
289 	char rname[16];
290 	u32 regs[8];
291 
292 	switch (reginfo->ofs) {
293 	case E1000_RDLEN(0):
294 		for (n = 0; n < 4; n++)
295 			regs[n] = rd32(E1000_RDLEN(n));
296 		break;
297 	case E1000_RDH(0):
298 		for (n = 0; n < 4; n++)
299 			regs[n] = rd32(E1000_RDH(n));
300 		break;
301 	case E1000_RDT(0):
302 		for (n = 0; n < 4; n++)
303 			regs[n] = rd32(E1000_RDT(n));
304 		break;
305 	case E1000_RXDCTL(0):
306 		for (n = 0; n < 4; n++)
307 			regs[n] = rd32(E1000_RXDCTL(n));
308 		break;
309 	case E1000_RDBAL(0):
310 		for (n = 0; n < 4; n++)
311 			regs[n] = rd32(E1000_RDBAL(n));
312 		break;
313 	case E1000_RDBAH(0):
314 		for (n = 0; n < 4; n++)
315 			regs[n] = rd32(E1000_RDBAH(n));
316 		break;
317 	case E1000_TDBAL(0):
318 		for (n = 0; n < 4; n++)
319 			regs[n] = rd32(E1000_RDBAL(n));
320 		break;
321 	case E1000_TDBAH(0):
322 		for (n = 0; n < 4; n++)
323 			regs[n] = rd32(E1000_TDBAH(n));
324 		break;
325 	case E1000_TDLEN(0):
326 		for (n = 0; n < 4; n++)
327 			regs[n] = rd32(E1000_TDLEN(n));
328 		break;
329 	case E1000_TDH(0):
330 		for (n = 0; n < 4; n++)
331 			regs[n] = rd32(E1000_TDH(n));
332 		break;
333 	case E1000_TDT(0):
334 		for (n = 0; n < 4; n++)
335 			regs[n] = rd32(E1000_TDT(n));
336 		break;
337 	case E1000_TXDCTL(0):
338 		for (n = 0; n < 4; n++)
339 			regs[n] = rd32(E1000_TXDCTL(n));
340 		break;
341 	default:
342 		pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
343 		return;
344 	}
345 
346 	snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
347 	pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
348 		regs[2], regs[3]);
349 }
350 
351 /* igb_dump - Print registers, Tx-rings and Rx-rings */
igb_dump(struct igb_adapter * adapter)352 static void igb_dump(struct igb_adapter *adapter)
353 {
354 	struct net_device *netdev = adapter->netdev;
355 	struct e1000_hw *hw = &adapter->hw;
356 	struct igb_reg_info *reginfo;
357 	struct igb_ring *tx_ring;
358 	union e1000_adv_tx_desc *tx_desc;
359 	struct my_u0 { u64 a; u64 b; } *u0;
360 	struct igb_ring *rx_ring;
361 	union e1000_adv_rx_desc *rx_desc;
362 	u32 staterr;
363 	u16 i, n;
364 
365 	if (!netif_msg_hw(adapter))
366 		return;
367 
368 	/* Print netdevice Info */
369 	if (netdev) {
370 		dev_info(&adapter->pdev->dev, "Net device Info\n");
371 		pr_info("Device Name     state            trans_start\n");
372 		pr_info("%-15s %016lX %016lX\n", netdev->name,
373 			netdev->state, dev_trans_start(netdev));
374 	}
375 
376 	/* Print Registers */
377 	dev_info(&adapter->pdev->dev, "Register Dump\n");
378 	pr_info(" Register Name   Value\n");
379 	for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
380 	     reginfo->name; reginfo++) {
381 		igb_regdump(hw, reginfo);
382 	}
383 
384 	/* Print TX Ring Summary */
385 	if (!netdev || !netif_running(netdev))
386 		goto exit;
387 
388 	dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
389 	pr_info("Queue [NTU] [NTC] [bi(ntc)->dma  ] leng ntw timestamp\n");
390 	for (n = 0; n < adapter->num_tx_queues; n++) {
391 		struct igb_tx_buffer *buffer_info;
392 		tx_ring = adapter->tx_ring[n];
393 		buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
394 		pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
395 			n, tx_ring->next_to_use, tx_ring->next_to_clean,
396 			(u64)dma_unmap_addr(buffer_info, dma),
397 			dma_unmap_len(buffer_info, len),
398 			buffer_info->next_to_watch,
399 			(u64)buffer_info->time_stamp);
400 	}
401 
402 	/* Print TX Rings */
403 	if (!netif_msg_tx_done(adapter))
404 		goto rx_ring_summary;
405 
406 	dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
407 
408 	/* Transmit Descriptor Formats
409 	 *
410 	 * Advanced Transmit Descriptor
411 	 *   +--------------------------------------------------------------+
412 	 * 0 |         Buffer Address [63:0]                                |
413 	 *   +--------------------------------------------------------------+
414 	 * 8 | PAYLEN  | PORTS  |CC|IDX | STA | DCMD  |DTYP|MAC|RSV| DTALEN |
415 	 *   +--------------------------------------------------------------+
416 	 *   63      46 45    40 39 38 36 35 32 31   24             15       0
417 	 */
418 
419 	for (n = 0; n < adapter->num_tx_queues; n++) {
420 		tx_ring = adapter->tx_ring[n];
421 		pr_info("------------------------------------\n");
422 		pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
423 		pr_info("------------------------------------\n");
424 		pr_info("T [desc]     [address 63:0  ] [PlPOCIStDDM Ln] [bi->dma       ] leng  ntw timestamp        bi->skb\n");
425 
426 		for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
427 			const char *next_desc;
428 			struct igb_tx_buffer *buffer_info;
429 			tx_desc = IGB_TX_DESC(tx_ring, i);
430 			buffer_info = &tx_ring->tx_buffer_info[i];
431 			u0 = (struct my_u0 *)tx_desc;
432 			if (i == tx_ring->next_to_use &&
433 			    i == tx_ring->next_to_clean)
434 				next_desc = " NTC/U";
435 			else if (i == tx_ring->next_to_use)
436 				next_desc = " NTU";
437 			else if (i == tx_ring->next_to_clean)
438 				next_desc = " NTC";
439 			else
440 				next_desc = "";
441 
442 			pr_info("T [0x%03X]    %016llX %016llX %016llX %04X  %p %016llX %p%s\n",
443 				i, le64_to_cpu(u0->a),
444 				le64_to_cpu(u0->b),
445 				(u64)dma_unmap_addr(buffer_info, dma),
446 				dma_unmap_len(buffer_info, len),
447 				buffer_info->next_to_watch,
448 				(u64)buffer_info->time_stamp,
449 				buffer_info->skb, next_desc);
450 
451 			if (netif_msg_pktdata(adapter) && buffer_info->skb)
452 				print_hex_dump(KERN_INFO, "",
453 					DUMP_PREFIX_ADDRESS,
454 					16, 1, buffer_info->skb->data,
455 					dma_unmap_len(buffer_info, len),
456 					true);
457 		}
458 	}
459 
460 	/* Print RX Rings Summary */
461 rx_ring_summary:
462 	dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
463 	pr_info("Queue [NTU] [NTC]\n");
464 	for (n = 0; n < adapter->num_rx_queues; n++) {
465 		rx_ring = adapter->rx_ring[n];
466 		pr_info(" %5d %5X %5X\n",
467 			n, rx_ring->next_to_use, rx_ring->next_to_clean);
468 	}
469 
470 	/* Print RX Rings */
471 	if (!netif_msg_rx_status(adapter))
472 		goto exit;
473 
474 	dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
475 
476 	/* Advanced Receive Descriptor (Read) Format
477 	 *    63                                           1        0
478 	 *    +-----------------------------------------------------+
479 	 *  0 |       Packet Buffer Address [63:1]           |A0/NSE|
480 	 *    +----------------------------------------------+------+
481 	 *  8 |       Header Buffer Address [63:1]           |  DD  |
482 	 *    +-----------------------------------------------------+
483 	 *
484 	 *
485 	 * Advanced Receive Descriptor (Write-Back) Format
486 	 *
487 	 *   63       48 47    32 31  30      21 20 17 16   4 3     0
488 	 *   +------------------------------------------------------+
489 	 * 0 | Packet     IP     |SPH| HDR_LEN   | RSV|Packet|  RSS |
490 	 *   | Checksum   Ident  |   |           |    | Type | Type |
491 	 *   +------------------------------------------------------+
492 	 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
493 	 *   +------------------------------------------------------+
494 	 *   63       48 47    32 31            20 19               0
495 	 */
496 
497 	for (n = 0; n < adapter->num_rx_queues; n++) {
498 		rx_ring = adapter->rx_ring[n];
499 		pr_info("------------------------------------\n");
500 		pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
501 		pr_info("------------------------------------\n");
502 		pr_info("R  [desc]      [ PktBuf     A0] [  HeadBuf   DD] [bi->dma       ] [bi->skb] <-- Adv Rx Read format\n");
503 		pr_info("RWB[desc]      [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
504 
505 		for (i = 0; i < rx_ring->count; i++) {
506 			const char *next_desc;
507 			struct igb_rx_buffer *buffer_info;
508 			buffer_info = &rx_ring->rx_buffer_info[i];
509 			rx_desc = IGB_RX_DESC(rx_ring, i);
510 			u0 = (struct my_u0 *)rx_desc;
511 			staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
512 
513 			if (i == rx_ring->next_to_use)
514 				next_desc = " NTU";
515 			else if (i == rx_ring->next_to_clean)
516 				next_desc = " NTC";
517 			else
518 				next_desc = "";
519 
520 			if (staterr & E1000_RXD_STAT_DD) {
521 				/* Descriptor Done */
522 				pr_info("%s[0x%03X]     %016llX %016llX ---------------- %s\n",
523 					"RWB", i,
524 					le64_to_cpu(u0->a),
525 					le64_to_cpu(u0->b),
526 					next_desc);
527 			} else {
528 				pr_info("%s[0x%03X]     %016llX %016llX %016llX %s\n",
529 					"R  ", i,
530 					le64_to_cpu(u0->a),
531 					le64_to_cpu(u0->b),
532 					(u64)buffer_info->dma,
533 					next_desc);
534 
535 				if (netif_msg_pktdata(adapter) &&
536 				    buffer_info->dma && buffer_info->page) {
537 					print_hex_dump(KERN_INFO, "",
538 					  DUMP_PREFIX_ADDRESS,
539 					  16, 1,
540 					  page_address(buffer_info->page) +
541 						      buffer_info->page_offset,
542 					  igb_rx_bufsz(rx_ring), true);
543 				}
544 			}
545 		}
546 	}
547 
548 exit:
549 	return;
550 }
551 
552 /**
553  *  igb_get_i2c_data - Reads the I2C SDA data bit
554  *  @data: opaque pointer to adapter struct
555  *
556  *  Returns the I2C data bit value
557  **/
igb_get_i2c_data(void * data)558 static int igb_get_i2c_data(void *data)
559 {
560 	struct igb_adapter *adapter = (struct igb_adapter *)data;
561 	struct e1000_hw *hw = &adapter->hw;
562 	s32 i2cctl = rd32(E1000_I2CPARAMS);
563 
564 	return !!(i2cctl & E1000_I2C_DATA_IN);
565 }
566 
567 /**
568  *  igb_set_i2c_data - Sets the I2C data bit
569  *  @data: pointer to hardware structure
570  *  @state: I2C data value (0 or 1) to set
571  *
572  *  Sets the I2C data bit
573  **/
igb_set_i2c_data(void * data,int state)574 static void igb_set_i2c_data(void *data, int state)
575 {
576 	struct igb_adapter *adapter = (struct igb_adapter *)data;
577 	struct e1000_hw *hw = &adapter->hw;
578 	s32 i2cctl = rd32(E1000_I2CPARAMS);
579 
580 	if (state)
581 		i2cctl |= E1000_I2C_DATA_OUT;
582 	else
583 		i2cctl &= ~E1000_I2C_DATA_OUT;
584 
585 	i2cctl &= ~E1000_I2C_DATA_OE_N;
586 	i2cctl |= E1000_I2C_CLK_OE_N;
587 	wr32(E1000_I2CPARAMS, i2cctl);
588 	wrfl();
589 
590 }
591 
592 /**
593  *  igb_set_i2c_clk - Sets the I2C SCL clock
594  *  @data: pointer to hardware structure
595  *  @state: state to set clock
596  *
597  *  Sets the I2C clock line to state
598  **/
igb_set_i2c_clk(void * data,int state)599 static void igb_set_i2c_clk(void *data, int state)
600 {
601 	struct igb_adapter *adapter = (struct igb_adapter *)data;
602 	struct e1000_hw *hw = &adapter->hw;
603 	s32 i2cctl = rd32(E1000_I2CPARAMS);
604 
605 	if (state) {
606 		i2cctl |= E1000_I2C_CLK_OUT;
607 		i2cctl &= ~E1000_I2C_CLK_OE_N;
608 	} else {
609 		i2cctl &= ~E1000_I2C_CLK_OUT;
610 		i2cctl &= ~E1000_I2C_CLK_OE_N;
611 	}
612 	wr32(E1000_I2CPARAMS, i2cctl);
613 	wrfl();
614 }
615 
616 /**
617  *  igb_get_i2c_clk - Gets the I2C SCL clock state
618  *  @data: pointer to hardware structure
619  *
620  *  Gets the I2C clock state
621  **/
igb_get_i2c_clk(void * data)622 static int igb_get_i2c_clk(void *data)
623 {
624 	struct igb_adapter *adapter = (struct igb_adapter *)data;
625 	struct e1000_hw *hw = &adapter->hw;
626 	s32 i2cctl = rd32(E1000_I2CPARAMS);
627 
628 	return !!(i2cctl & E1000_I2C_CLK_IN);
629 }
630 
631 static const struct i2c_algo_bit_data igb_i2c_algo = {
632 	.setsda		= igb_set_i2c_data,
633 	.setscl		= igb_set_i2c_clk,
634 	.getsda		= igb_get_i2c_data,
635 	.getscl		= igb_get_i2c_clk,
636 	.udelay		= 5,
637 	.timeout	= 20,
638 };
639 
640 /**
641  *  igb_get_hw_dev - return device
642  *  @hw: pointer to hardware structure
643  *
644  *  used by hardware layer to print debugging information
645  **/
igb_get_hw_dev(struct e1000_hw * hw)646 struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
647 {
648 	struct igb_adapter *adapter = hw->back;
649 	return adapter->netdev;
650 }
651 
652 /**
653  *  igb_init_module - Driver Registration Routine
654  *
655  *  igb_init_module is the first routine called when the driver is
656  *  loaded. All it does is register with the PCI subsystem.
657  **/
igb_init_module(void)658 static int __init igb_init_module(void)
659 {
660 	int ret;
661 
662 	pr_info("%s\n", igb_driver_string);
663 	pr_info("%s\n", igb_copyright);
664 
665 #ifdef CONFIG_IGB_DCA
666 	dca_register_notify(&dca_notifier);
667 #endif
668 	ret = pci_register_driver(&igb_driver);
669 	return ret;
670 }
671 
672 module_init(igb_init_module);
673 
674 /**
675  *  igb_exit_module - Driver Exit Cleanup Routine
676  *
677  *  igb_exit_module is called just before the driver is removed
678  *  from memory.
679  **/
igb_exit_module(void)680 static void __exit igb_exit_module(void)
681 {
682 #ifdef CONFIG_IGB_DCA
683 	dca_unregister_notify(&dca_notifier);
684 #endif
685 	pci_unregister_driver(&igb_driver);
686 }
687 
688 module_exit(igb_exit_module);
689 
690 #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
691 /**
692  *  igb_cache_ring_register - Descriptor ring to register mapping
693  *  @adapter: board private structure to initialize
694  *
695  *  Once we know the feature-set enabled for the device, we'll cache
696  *  the register offset the descriptor ring is assigned to.
697  **/
igb_cache_ring_register(struct igb_adapter * adapter)698 static void igb_cache_ring_register(struct igb_adapter *adapter)
699 {
700 	int i = 0, j = 0;
701 	u32 rbase_offset = adapter->vfs_allocated_count;
702 
703 	switch (adapter->hw.mac.type) {
704 	case e1000_82576:
705 		/* The queues are allocated for virtualization such that VF 0
706 		 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
707 		 * In order to avoid collision we start at the first free queue
708 		 * and continue consuming queues in the same sequence
709 		 */
710 		if (adapter->vfs_allocated_count) {
711 			for (; i < adapter->rss_queues; i++)
712 				adapter->rx_ring[i]->reg_idx = rbase_offset +
713 							       Q_IDX_82576(i);
714 		}
715 		fallthrough;
716 	case e1000_82575:
717 	case e1000_82580:
718 	case e1000_i350:
719 	case e1000_i354:
720 	case e1000_i210:
721 	case e1000_i211:
722 	default:
723 		for (; i < adapter->num_rx_queues; i++)
724 			adapter->rx_ring[i]->reg_idx = rbase_offset + i;
725 		for (; j < adapter->num_tx_queues; j++)
726 			adapter->tx_ring[j]->reg_idx = rbase_offset + j;
727 		break;
728 	}
729 }
730 
igb_rd32(struct e1000_hw * hw,u32 reg)731 u32 igb_rd32(struct e1000_hw *hw, u32 reg)
732 {
733 	struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
734 	u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
735 	u32 value = 0;
736 
737 	if (E1000_REMOVED(hw_addr))
738 		return ~value;
739 
740 	value = readl(&hw_addr[reg]);
741 
742 	/* reads should not return all F's */
743 	if (!(~value) && (!reg || !(~readl(hw_addr)))) {
744 		struct net_device *netdev = igb->netdev;
745 		hw->hw_addr = NULL;
746 		netdev_err(netdev, "PCIe link lost\n");
747 		WARN(pci_device_is_present(igb->pdev),
748 		     "igb: Failed to read reg 0x%x!\n", reg);
749 	}
750 
751 	return value;
752 }
753 
754 /**
755  *  igb_write_ivar - configure ivar for given MSI-X vector
756  *  @hw: pointer to the HW structure
757  *  @msix_vector: vector number we are allocating to a given ring
758  *  @index: row index of IVAR register to write within IVAR table
759  *  @offset: column offset of in IVAR, should be multiple of 8
760  *
761  *  This function is intended to handle the writing of the IVAR register
762  *  for adapters 82576 and newer.  The IVAR table consists of 2 columns,
763  *  each containing an cause allocation for an Rx and Tx ring, and a
764  *  variable number of rows depending on the number of queues supported.
765  **/
igb_write_ivar(struct e1000_hw * hw,int msix_vector,int index,int offset)766 static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
767 			   int index, int offset)
768 {
769 	u32 ivar = array_rd32(E1000_IVAR0, index);
770 
771 	/* clear any bits that are currently set */
772 	ivar &= ~((u32)0xFF << offset);
773 
774 	/* write vector and valid bit */
775 	ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
776 
777 	array_wr32(E1000_IVAR0, index, ivar);
778 }
779 
780 #define IGB_N0_QUEUE -1
igb_assign_vector(struct igb_q_vector * q_vector,int msix_vector)781 static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
782 {
783 	struct igb_adapter *adapter = q_vector->adapter;
784 	struct e1000_hw *hw = &adapter->hw;
785 	int rx_queue = IGB_N0_QUEUE;
786 	int tx_queue = IGB_N0_QUEUE;
787 	u32 msixbm = 0;
788 
789 	if (q_vector->rx.ring)
790 		rx_queue = q_vector->rx.ring->reg_idx;
791 	if (q_vector->tx.ring)
792 		tx_queue = q_vector->tx.ring->reg_idx;
793 
794 	switch (hw->mac.type) {
795 	case e1000_82575:
796 		/* The 82575 assigns vectors using a bitmask, which matches the
797 		 * bitmask for the EICR/EIMS/EIMC registers.  To assign one
798 		 * or more queues to a vector, we write the appropriate bits
799 		 * into the MSIXBM register for that vector.
800 		 */
801 		if (rx_queue > IGB_N0_QUEUE)
802 			msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
803 		if (tx_queue > IGB_N0_QUEUE)
804 			msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
805 		if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
806 			msixbm |= E1000_EIMS_OTHER;
807 		array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
808 		q_vector->eims_value = msixbm;
809 		break;
810 	case e1000_82576:
811 		/* 82576 uses a table that essentially consists of 2 columns
812 		 * with 8 rows.  The ordering is column-major so we use the
813 		 * lower 3 bits as the row index, and the 4th bit as the
814 		 * column offset.
815 		 */
816 		if (rx_queue > IGB_N0_QUEUE)
817 			igb_write_ivar(hw, msix_vector,
818 				       rx_queue & 0x7,
819 				       (rx_queue & 0x8) << 1);
820 		if (tx_queue > IGB_N0_QUEUE)
821 			igb_write_ivar(hw, msix_vector,
822 				       tx_queue & 0x7,
823 				       ((tx_queue & 0x8) << 1) + 8);
824 		q_vector->eims_value = BIT(msix_vector);
825 		break;
826 	case e1000_82580:
827 	case e1000_i350:
828 	case e1000_i354:
829 	case e1000_i210:
830 	case e1000_i211:
831 		/* On 82580 and newer adapters the scheme is similar to 82576
832 		 * however instead of ordering column-major we have things
833 		 * ordered row-major.  So we traverse the table by using
834 		 * bit 0 as the column offset, and the remaining bits as the
835 		 * row index.
836 		 */
837 		if (rx_queue > IGB_N0_QUEUE)
838 			igb_write_ivar(hw, msix_vector,
839 				       rx_queue >> 1,
840 				       (rx_queue & 0x1) << 4);
841 		if (tx_queue > IGB_N0_QUEUE)
842 			igb_write_ivar(hw, msix_vector,
843 				       tx_queue >> 1,
844 				       ((tx_queue & 0x1) << 4) + 8);
845 		q_vector->eims_value = BIT(msix_vector);
846 		break;
847 	default:
848 		BUG();
849 		break;
850 	}
851 
852 	/* add q_vector eims value to global eims_enable_mask */
853 	adapter->eims_enable_mask |= q_vector->eims_value;
854 
855 	/* configure q_vector to set itr on first interrupt */
856 	q_vector->set_itr = 1;
857 }
858 
859 /**
860  *  igb_configure_msix - Configure MSI-X hardware
861  *  @adapter: board private structure to initialize
862  *
863  *  igb_configure_msix sets up the hardware to properly
864  *  generate MSI-X interrupts.
865  **/
igb_configure_msix(struct igb_adapter * adapter)866 static void igb_configure_msix(struct igb_adapter *adapter)
867 {
868 	u32 tmp;
869 	int i, vector = 0;
870 	struct e1000_hw *hw = &adapter->hw;
871 
872 	adapter->eims_enable_mask = 0;
873 
874 	/* set vector for other causes, i.e. link changes */
875 	switch (hw->mac.type) {
876 	case e1000_82575:
877 		tmp = rd32(E1000_CTRL_EXT);
878 		/* enable MSI-X PBA support*/
879 		tmp |= E1000_CTRL_EXT_PBA_CLR;
880 
881 		/* Auto-Mask interrupts upon ICR read. */
882 		tmp |= E1000_CTRL_EXT_EIAME;
883 		tmp |= E1000_CTRL_EXT_IRCA;
884 
885 		wr32(E1000_CTRL_EXT, tmp);
886 
887 		/* enable msix_other interrupt */
888 		array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
889 		adapter->eims_other = E1000_EIMS_OTHER;
890 
891 		break;
892 
893 	case e1000_82576:
894 	case e1000_82580:
895 	case e1000_i350:
896 	case e1000_i354:
897 	case e1000_i210:
898 	case e1000_i211:
899 		/* Turn on MSI-X capability first, or our settings
900 		 * won't stick.  And it will take days to debug.
901 		 */
902 		wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
903 		     E1000_GPIE_PBA | E1000_GPIE_EIAME |
904 		     E1000_GPIE_NSICR);
905 
906 		/* enable msix_other interrupt */
907 		adapter->eims_other = BIT(vector);
908 		tmp = (vector++ | E1000_IVAR_VALID) << 8;
909 
910 		wr32(E1000_IVAR_MISC, tmp);
911 		break;
912 	default:
913 		/* do nothing, since nothing else supports MSI-X */
914 		break;
915 	} /* switch (hw->mac.type) */
916 
917 	adapter->eims_enable_mask |= adapter->eims_other;
918 
919 	for (i = 0; i < adapter->num_q_vectors; i++)
920 		igb_assign_vector(adapter->q_vector[i], vector++);
921 
922 	wrfl();
923 }
924 
925 /**
926  *  igb_request_msix - Initialize MSI-X interrupts
927  *  @adapter: board private structure to initialize
928  *
929  *  igb_request_msix allocates MSI-X vectors and requests interrupts from the
930  *  kernel.
931  **/
igb_request_msix(struct igb_adapter * adapter)932 static int igb_request_msix(struct igb_adapter *adapter)
933 {
934 	unsigned int num_q_vectors = adapter->num_q_vectors;
935 	struct net_device *netdev = adapter->netdev;
936 	int i, err = 0, vector = 0, free_vector = 0;
937 
938 	err = request_irq(adapter->msix_entries[vector].vector,
939 			  igb_msix_other, 0, netdev->name, adapter);
940 	if (err)
941 		goto err_out;
942 
943 	if (num_q_vectors > MAX_Q_VECTORS) {
944 		num_q_vectors = MAX_Q_VECTORS;
945 		dev_warn(&adapter->pdev->dev,
946 			 "The number of queue vectors (%d) is higher than max allowed (%d)\n",
947 			 adapter->num_q_vectors, MAX_Q_VECTORS);
948 	}
949 	for (i = 0; i < num_q_vectors; i++) {
950 		struct igb_q_vector *q_vector = adapter->q_vector[i];
951 
952 		vector++;
953 
954 		q_vector->itr_register = adapter->io_addr + E1000_EITR(vector);
955 
956 		if (q_vector->rx.ring && q_vector->tx.ring)
957 			sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
958 				q_vector->rx.ring->queue_index);
959 		else if (q_vector->tx.ring)
960 			sprintf(q_vector->name, "%s-tx-%u", netdev->name,
961 				q_vector->tx.ring->queue_index);
962 		else if (q_vector->rx.ring)
963 			sprintf(q_vector->name, "%s-rx-%u", netdev->name,
964 				q_vector->rx.ring->queue_index);
965 		else
966 			sprintf(q_vector->name, "%s-unused", netdev->name);
967 
968 		err = request_irq(adapter->msix_entries[vector].vector,
969 				  igb_msix_ring, 0, q_vector->name,
970 				  q_vector);
971 		if (err)
972 			goto err_free;
973 	}
974 
975 	igb_configure_msix(adapter);
976 	return 0;
977 
978 err_free:
979 	/* free already assigned IRQs */
980 	free_irq(adapter->msix_entries[free_vector++].vector, adapter);
981 
982 	vector--;
983 	for (i = 0; i < vector; i++) {
984 		free_irq(adapter->msix_entries[free_vector++].vector,
985 			 adapter->q_vector[i]);
986 	}
987 err_out:
988 	return err;
989 }
990 
991 /**
992  *  igb_free_q_vector - Free memory allocated for specific interrupt vector
993  *  @adapter: board private structure to initialize
994  *  @v_idx: Index of vector to be freed
995  *
996  *  This function frees the memory allocated to the q_vector.
997  **/
igb_free_q_vector(struct igb_adapter * adapter,int v_idx)998 static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
999 {
1000 	struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1001 
1002 	adapter->q_vector[v_idx] = NULL;
1003 
1004 	/* igb_get_stats64() might access the rings on this vector,
1005 	 * we must wait a grace period before freeing it.
1006 	 */
1007 	if (q_vector)
1008 		kfree_rcu(q_vector, rcu);
1009 }
1010 
1011 /**
1012  *  igb_reset_q_vector - Reset config for interrupt vector
1013  *  @adapter: board private structure to initialize
1014  *  @v_idx: Index of vector to be reset
1015  *
1016  *  If NAPI is enabled it will delete any references to the
1017  *  NAPI struct. This is preparation for igb_free_q_vector.
1018  **/
igb_reset_q_vector(struct igb_adapter * adapter,int v_idx)1019 static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
1020 {
1021 	struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1022 
1023 	/* Coming from igb_set_interrupt_capability, the vectors are not yet
1024 	 * allocated. So, q_vector is NULL so we should stop here.
1025 	 */
1026 	if (!q_vector)
1027 		return;
1028 
1029 	if (q_vector->tx.ring)
1030 		adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
1031 
1032 	if (q_vector->rx.ring)
1033 		adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
1034 
1035 	netif_napi_del(&q_vector->napi);
1036 
1037 }
1038 
igb_reset_interrupt_capability(struct igb_adapter * adapter)1039 static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
1040 {
1041 	int v_idx = adapter->num_q_vectors;
1042 
1043 	if (adapter->flags & IGB_FLAG_HAS_MSIX)
1044 		pci_disable_msix(adapter->pdev);
1045 	else if (adapter->flags & IGB_FLAG_HAS_MSI)
1046 		pci_disable_msi(adapter->pdev);
1047 
1048 	while (v_idx--)
1049 		igb_reset_q_vector(adapter, v_idx);
1050 }
1051 
1052 /**
1053  *  igb_free_q_vectors - Free memory allocated for interrupt vectors
1054  *  @adapter: board private structure to initialize
1055  *
1056  *  This function frees the memory allocated to the q_vectors.  In addition if
1057  *  NAPI is enabled it will delete any references to the NAPI struct prior
1058  *  to freeing the q_vector.
1059  **/
igb_free_q_vectors(struct igb_adapter * adapter)1060 static void igb_free_q_vectors(struct igb_adapter *adapter)
1061 {
1062 	int v_idx = adapter->num_q_vectors;
1063 
1064 	adapter->num_tx_queues = 0;
1065 	adapter->num_rx_queues = 0;
1066 	adapter->num_q_vectors = 0;
1067 
1068 	while (v_idx--) {
1069 		igb_reset_q_vector(adapter, v_idx);
1070 		igb_free_q_vector(adapter, v_idx);
1071 	}
1072 }
1073 
1074 /**
1075  *  igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1076  *  @adapter: board private structure to initialize
1077  *
1078  *  This function resets the device so that it has 0 Rx queues, Tx queues, and
1079  *  MSI-X interrupts allocated.
1080  */
igb_clear_interrupt_scheme(struct igb_adapter * adapter)1081 static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1082 {
1083 	igb_free_q_vectors(adapter);
1084 	igb_reset_interrupt_capability(adapter);
1085 }
1086 
1087 /**
1088  *  igb_set_interrupt_capability - set MSI or MSI-X if supported
1089  *  @adapter: board private structure to initialize
1090  *  @msix: boolean value of MSIX capability
1091  *
1092  *  Attempt to configure interrupts using the best available
1093  *  capabilities of the hardware and kernel.
1094  **/
igb_set_interrupt_capability(struct igb_adapter * adapter,bool msix)1095 static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1096 {
1097 	int err;
1098 	int numvecs, i;
1099 
1100 	if (!msix)
1101 		goto msi_only;
1102 	adapter->flags |= IGB_FLAG_HAS_MSIX;
1103 
1104 	/* Number of supported queues. */
1105 	adapter->num_rx_queues = adapter->rss_queues;
1106 	if (adapter->vfs_allocated_count)
1107 		adapter->num_tx_queues = 1;
1108 	else
1109 		adapter->num_tx_queues = adapter->rss_queues;
1110 
1111 	/* start with one vector for every Rx queue */
1112 	numvecs = adapter->num_rx_queues;
1113 
1114 	/* if Tx handler is separate add 1 for every Tx queue */
1115 	if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1116 		numvecs += adapter->num_tx_queues;
1117 
1118 	/* store the number of vectors reserved for queues */
1119 	adapter->num_q_vectors = numvecs;
1120 
1121 	/* add 1 vector for link status interrupts */
1122 	numvecs++;
1123 	for (i = 0; i < numvecs; i++)
1124 		adapter->msix_entries[i].entry = i;
1125 
1126 	err = pci_enable_msix_range(adapter->pdev,
1127 				    adapter->msix_entries,
1128 				    numvecs,
1129 				    numvecs);
1130 	if (err > 0)
1131 		return;
1132 
1133 	igb_reset_interrupt_capability(adapter);
1134 
1135 	/* If we can't do MSI-X, try MSI */
1136 msi_only:
1137 	adapter->flags &= ~IGB_FLAG_HAS_MSIX;
1138 #ifdef CONFIG_PCI_IOV
1139 	/* disable SR-IOV for non MSI-X configurations */
1140 	if (adapter->vf_data) {
1141 		struct e1000_hw *hw = &adapter->hw;
1142 		/* disable iov and allow time for transactions to clear */
1143 		pci_disable_sriov(adapter->pdev);
1144 		msleep(500);
1145 
1146 		kfree(adapter->vf_mac_list);
1147 		adapter->vf_mac_list = NULL;
1148 		kfree(adapter->vf_data);
1149 		adapter->vf_data = NULL;
1150 		wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1151 		wrfl();
1152 		msleep(100);
1153 		dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1154 	}
1155 #endif
1156 	adapter->vfs_allocated_count = 0;
1157 	adapter->rss_queues = 1;
1158 	adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1159 	adapter->num_rx_queues = 1;
1160 	adapter->num_tx_queues = 1;
1161 	adapter->num_q_vectors = 1;
1162 	if (!pci_enable_msi(adapter->pdev))
1163 		adapter->flags |= IGB_FLAG_HAS_MSI;
1164 }
1165 
igb_add_ring(struct igb_ring * ring,struct igb_ring_container * head)1166 static void igb_add_ring(struct igb_ring *ring,
1167 			 struct igb_ring_container *head)
1168 {
1169 	head->ring = ring;
1170 	head->count++;
1171 }
1172 
1173 /**
1174  *  igb_alloc_q_vector - Allocate memory for a single interrupt vector
1175  *  @adapter: board private structure to initialize
1176  *  @v_count: q_vectors allocated on adapter, used for ring interleaving
1177  *  @v_idx: index of vector in adapter struct
1178  *  @txr_count: total number of Tx rings to allocate
1179  *  @txr_idx: index of first Tx ring to allocate
1180  *  @rxr_count: total number of Rx rings to allocate
1181  *  @rxr_idx: index of first Rx ring to allocate
1182  *
1183  *  We allocate one q_vector.  If allocation fails we return -ENOMEM.
1184  **/
igb_alloc_q_vector(struct igb_adapter * adapter,int v_count,int v_idx,int txr_count,int txr_idx,int rxr_count,int rxr_idx)1185 static int igb_alloc_q_vector(struct igb_adapter *adapter,
1186 			      int v_count, int v_idx,
1187 			      int txr_count, int txr_idx,
1188 			      int rxr_count, int rxr_idx)
1189 {
1190 	struct igb_q_vector *q_vector;
1191 	struct igb_ring *ring;
1192 	int ring_count;
1193 	size_t size;
1194 
1195 	/* igb only supports 1 Tx and/or 1 Rx queue per vector */
1196 	if (txr_count > 1 || rxr_count > 1)
1197 		return -ENOMEM;
1198 
1199 	ring_count = txr_count + rxr_count;
1200 	size = struct_size(q_vector, ring, ring_count);
1201 
1202 	/* allocate q_vector and rings */
1203 	q_vector = adapter->q_vector[v_idx];
1204 	if (!q_vector) {
1205 		q_vector = kzalloc(size, GFP_KERNEL);
1206 	} else if (size > ksize(q_vector)) {
1207 		kfree_rcu(q_vector, rcu);
1208 		q_vector = kzalloc(size, GFP_KERNEL);
1209 	} else {
1210 		memset(q_vector, 0, size);
1211 	}
1212 	if (!q_vector)
1213 		return -ENOMEM;
1214 
1215 	/* initialize NAPI */
1216 	netif_napi_add(adapter->netdev, &q_vector->napi,
1217 		       igb_poll, 64);
1218 
1219 	/* tie q_vector and adapter together */
1220 	adapter->q_vector[v_idx] = q_vector;
1221 	q_vector->adapter = adapter;
1222 
1223 	/* initialize work limits */
1224 	q_vector->tx.work_limit = adapter->tx_work_limit;
1225 
1226 	/* initialize ITR configuration */
1227 	q_vector->itr_register = adapter->io_addr + E1000_EITR(0);
1228 	q_vector->itr_val = IGB_START_ITR;
1229 
1230 	/* initialize pointer to rings */
1231 	ring = q_vector->ring;
1232 
1233 	/* intialize ITR */
1234 	if (rxr_count) {
1235 		/* rx or rx/tx vector */
1236 		if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
1237 			q_vector->itr_val = adapter->rx_itr_setting;
1238 	} else {
1239 		/* tx only vector */
1240 		if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
1241 			q_vector->itr_val = adapter->tx_itr_setting;
1242 	}
1243 
1244 	if (txr_count) {
1245 		/* assign generic ring traits */
1246 		ring->dev = &adapter->pdev->dev;
1247 		ring->netdev = adapter->netdev;
1248 
1249 		/* configure backlink on ring */
1250 		ring->q_vector = q_vector;
1251 
1252 		/* update q_vector Tx values */
1253 		igb_add_ring(ring, &q_vector->tx);
1254 
1255 		/* For 82575, context index must be unique per ring. */
1256 		if (adapter->hw.mac.type == e1000_82575)
1257 			set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
1258 
1259 		/* apply Tx specific ring traits */
1260 		ring->count = adapter->tx_ring_count;
1261 		ring->queue_index = txr_idx;
1262 
1263 		ring->cbs_enable = false;
1264 		ring->idleslope = 0;
1265 		ring->sendslope = 0;
1266 		ring->hicredit = 0;
1267 		ring->locredit = 0;
1268 
1269 		u64_stats_init(&ring->tx_syncp);
1270 		u64_stats_init(&ring->tx_syncp2);
1271 
1272 		/* assign ring to adapter */
1273 		adapter->tx_ring[txr_idx] = ring;
1274 
1275 		/* push pointer to next ring */
1276 		ring++;
1277 	}
1278 
1279 	if (rxr_count) {
1280 		/* assign generic ring traits */
1281 		ring->dev = &adapter->pdev->dev;
1282 		ring->netdev = adapter->netdev;
1283 
1284 		/* configure backlink on ring */
1285 		ring->q_vector = q_vector;
1286 
1287 		/* update q_vector Rx values */
1288 		igb_add_ring(ring, &q_vector->rx);
1289 
1290 		/* set flag indicating ring supports SCTP checksum offload */
1291 		if (adapter->hw.mac.type >= e1000_82576)
1292 			set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1293 
1294 		/* On i350, i354, i210, and i211, loopback VLAN packets
1295 		 * have the tag byte-swapped.
1296 		 */
1297 		if (adapter->hw.mac.type >= e1000_i350)
1298 			set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
1299 
1300 		/* apply Rx specific ring traits */
1301 		ring->count = adapter->rx_ring_count;
1302 		ring->queue_index = rxr_idx;
1303 
1304 		u64_stats_init(&ring->rx_syncp);
1305 
1306 		/* assign ring to adapter */
1307 		adapter->rx_ring[rxr_idx] = ring;
1308 	}
1309 
1310 	return 0;
1311 }
1312 
1313 
1314 /**
1315  *  igb_alloc_q_vectors - Allocate memory for interrupt vectors
1316  *  @adapter: board private structure to initialize
1317  *
1318  *  We allocate one q_vector per queue interrupt.  If allocation fails we
1319  *  return -ENOMEM.
1320  **/
igb_alloc_q_vectors(struct igb_adapter * adapter)1321 static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1322 {
1323 	int q_vectors = adapter->num_q_vectors;
1324 	int rxr_remaining = adapter->num_rx_queues;
1325 	int txr_remaining = adapter->num_tx_queues;
1326 	int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1327 	int err;
1328 
1329 	if (q_vectors >= (rxr_remaining + txr_remaining)) {
1330 		for (; rxr_remaining; v_idx++) {
1331 			err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1332 						 0, 0, 1, rxr_idx);
1333 
1334 			if (err)
1335 				goto err_out;
1336 
1337 			/* update counts and index */
1338 			rxr_remaining--;
1339 			rxr_idx++;
1340 		}
1341 	}
1342 
1343 	for (; v_idx < q_vectors; v_idx++) {
1344 		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1345 		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1346 
1347 		err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1348 					 tqpv, txr_idx, rqpv, rxr_idx);
1349 
1350 		if (err)
1351 			goto err_out;
1352 
1353 		/* update counts and index */
1354 		rxr_remaining -= rqpv;
1355 		txr_remaining -= tqpv;
1356 		rxr_idx++;
1357 		txr_idx++;
1358 	}
1359 
1360 	return 0;
1361 
1362 err_out:
1363 	adapter->num_tx_queues = 0;
1364 	adapter->num_rx_queues = 0;
1365 	adapter->num_q_vectors = 0;
1366 
1367 	while (v_idx--)
1368 		igb_free_q_vector(adapter, v_idx);
1369 
1370 	return -ENOMEM;
1371 }
1372 
1373 /**
1374  *  igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1375  *  @adapter: board private structure to initialize
1376  *  @msix: boolean value of MSIX capability
1377  *
1378  *  This function initializes the interrupts and allocates all of the queues.
1379  **/
igb_init_interrupt_scheme(struct igb_adapter * adapter,bool msix)1380 static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
1381 {
1382 	struct pci_dev *pdev = adapter->pdev;
1383 	int err;
1384 
1385 	igb_set_interrupt_capability(adapter, msix);
1386 
1387 	err = igb_alloc_q_vectors(adapter);
1388 	if (err) {
1389 		dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1390 		goto err_alloc_q_vectors;
1391 	}
1392 
1393 	igb_cache_ring_register(adapter);
1394 
1395 	return 0;
1396 
1397 err_alloc_q_vectors:
1398 	igb_reset_interrupt_capability(adapter);
1399 	return err;
1400 }
1401 
1402 /**
1403  *  igb_request_irq - initialize interrupts
1404  *  @adapter: board private structure to initialize
1405  *
1406  *  Attempts to configure interrupts using the best available
1407  *  capabilities of the hardware and kernel.
1408  **/
igb_request_irq(struct igb_adapter * adapter)1409 static int igb_request_irq(struct igb_adapter *adapter)
1410 {
1411 	struct net_device *netdev = adapter->netdev;
1412 	struct pci_dev *pdev = adapter->pdev;
1413 	int err = 0;
1414 
1415 	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1416 		err = igb_request_msix(adapter);
1417 		if (!err)
1418 			goto request_done;
1419 		/* fall back to MSI */
1420 		igb_free_all_tx_resources(adapter);
1421 		igb_free_all_rx_resources(adapter);
1422 
1423 		igb_clear_interrupt_scheme(adapter);
1424 		err = igb_init_interrupt_scheme(adapter, false);
1425 		if (err)
1426 			goto request_done;
1427 
1428 		igb_setup_all_tx_resources(adapter);
1429 		igb_setup_all_rx_resources(adapter);
1430 		igb_configure(adapter);
1431 	}
1432 
1433 	igb_assign_vector(adapter->q_vector[0], 0);
1434 
1435 	if (adapter->flags & IGB_FLAG_HAS_MSI) {
1436 		err = request_irq(pdev->irq, igb_intr_msi, 0,
1437 				  netdev->name, adapter);
1438 		if (!err)
1439 			goto request_done;
1440 
1441 		/* fall back to legacy interrupts */
1442 		igb_reset_interrupt_capability(adapter);
1443 		adapter->flags &= ~IGB_FLAG_HAS_MSI;
1444 	}
1445 
1446 	err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
1447 			  netdev->name, adapter);
1448 
1449 	if (err)
1450 		dev_err(&pdev->dev, "Error %d getting interrupt\n",
1451 			err);
1452 
1453 request_done:
1454 	return err;
1455 }
1456 
igb_free_irq(struct igb_adapter * adapter)1457 static void igb_free_irq(struct igb_adapter *adapter)
1458 {
1459 	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1460 		int vector = 0, i;
1461 
1462 		free_irq(adapter->msix_entries[vector++].vector, adapter);
1463 
1464 		for (i = 0; i < adapter->num_q_vectors; i++)
1465 			free_irq(adapter->msix_entries[vector++].vector,
1466 				 adapter->q_vector[i]);
1467 	} else {
1468 		free_irq(adapter->pdev->irq, adapter);
1469 	}
1470 }
1471 
1472 /**
1473  *  igb_irq_disable - Mask off interrupt generation on the NIC
1474  *  @adapter: board private structure
1475  **/
igb_irq_disable(struct igb_adapter * adapter)1476 static void igb_irq_disable(struct igb_adapter *adapter)
1477 {
1478 	struct e1000_hw *hw = &adapter->hw;
1479 
1480 	/* we need to be careful when disabling interrupts.  The VFs are also
1481 	 * mapped into these registers and so clearing the bits can cause
1482 	 * issues on the VF drivers so we only need to clear what we set
1483 	 */
1484 	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1485 		u32 regval = rd32(E1000_EIAM);
1486 
1487 		wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1488 		wr32(E1000_EIMC, adapter->eims_enable_mask);
1489 		regval = rd32(E1000_EIAC);
1490 		wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
1491 	}
1492 
1493 	wr32(E1000_IAM, 0);
1494 	wr32(E1000_IMC, ~0);
1495 	wrfl();
1496 	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1497 		int i;
1498 
1499 		for (i = 0; i < adapter->num_q_vectors; i++)
1500 			synchronize_irq(adapter->msix_entries[i].vector);
1501 	} else {
1502 		synchronize_irq(adapter->pdev->irq);
1503 	}
1504 }
1505 
1506 /**
1507  *  igb_irq_enable - Enable default interrupt generation settings
1508  *  @adapter: board private structure
1509  **/
igb_irq_enable(struct igb_adapter * adapter)1510 static void igb_irq_enable(struct igb_adapter *adapter)
1511 {
1512 	struct e1000_hw *hw = &adapter->hw;
1513 
1514 	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1515 		u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
1516 		u32 regval = rd32(E1000_EIAC);
1517 
1518 		wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1519 		regval = rd32(E1000_EIAM);
1520 		wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
1521 		wr32(E1000_EIMS, adapter->eims_enable_mask);
1522 		if (adapter->vfs_allocated_count) {
1523 			wr32(E1000_MBVFIMR, 0xFF);
1524 			ims |= E1000_IMS_VMMB;
1525 		}
1526 		wr32(E1000_IMS, ims);
1527 	} else {
1528 		wr32(E1000_IMS, IMS_ENABLE_MASK |
1529 				E1000_IMS_DRSTA);
1530 		wr32(E1000_IAM, IMS_ENABLE_MASK |
1531 				E1000_IMS_DRSTA);
1532 	}
1533 }
1534 
igb_update_mng_vlan(struct igb_adapter * adapter)1535 static void igb_update_mng_vlan(struct igb_adapter *adapter)
1536 {
1537 	struct e1000_hw *hw = &adapter->hw;
1538 	u16 pf_id = adapter->vfs_allocated_count;
1539 	u16 vid = adapter->hw.mng_cookie.vlan_id;
1540 	u16 old_vid = adapter->mng_vlan_id;
1541 
1542 	if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1543 		/* add VID to filter table */
1544 		igb_vfta_set(hw, vid, pf_id, true, true);
1545 		adapter->mng_vlan_id = vid;
1546 	} else {
1547 		adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1548 	}
1549 
1550 	if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1551 	    (vid != old_vid) &&
1552 	    !test_bit(old_vid, adapter->active_vlans)) {
1553 		/* remove VID from filter table */
1554 		igb_vfta_set(hw, vid, pf_id, false, true);
1555 	}
1556 }
1557 
1558 /**
1559  *  igb_release_hw_control - release control of the h/w to f/w
1560  *  @adapter: address of board private structure
1561  *
1562  *  igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1563  *  For ASF and Pass Through versions of f/w this means that the
1564  *  driver is no longer loaded.
1565  **/
igb_release_hw_control(struct igb_adapter * adapter)1566 static void igb_release_hw_control(struct igb_adapter *adapter)
1567 {
1568 	struct e1000_hw *hw = &adapter->hw;
1569 	u32 ctrl_ext;
1570 
1571 	/* Let firmware take over control of h/w */
1572 	ctrl_ext = rd32(E1000_CTRL_EXT);
1573 	wr32(E1000_CTRL_EXT,
1574 			ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1575 }
1576 
1577 /**
1578  *  igb_get_hw_control - get control of the h/w from f/w
1579  *  @adapter: address of board private structure
1580  *
1581  *  igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1582  *  For ASF and Pass Through versions of f/w this means that
1583  *  the driver is loaded.
1584  **/
igb_get_hw_control(struct igb_adapter * adapter)1585 static void igb_get_hw_control(struct igb_adapter *adapter)
1586 {
1587 	struct e1000_hw *hw = &adapter->hw;
1588 	u32 ctrl_ext;
1589 
1590 	/* Let firmware know the driver has taken over */
1591 	ctrl_ext = rd32(E1000_CTRL_EXT);
1592 	wr32(E1000_CTRL_EXT,
1593 			ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1594 }
1595 
enable_fqtss(struct igb_adapter * adapter,bool enable)1596 static void enable_fqtss(struct igb_adapter *adapter, bool enable)
1597 {
1598 	struct net_device *netdev = adapter->netdev;
1599 	struct e1000_hw *hw = &adapter->hw;
1600 
1601 	WARN_ON(hw->mac.type != e1000_i210);
1602 
1603 	if (enable)
1604 		adapter->flags |= IGB_FLAG_FQTSS;
1605 	else
1606 		adapter->flags &= ~IGB_FLAG_FQTSS;
1607 
1608 	if (netif_running(netdev))
1609 		schedule_work(&adapter->reset_task);
1610 }
1611 
is_fqtss_enabled(struct igb_adapter * adapter)1612 static bool is_fqtss_enabled(struct igb_adapter *adapter)
1613 {
1614 	return (adapter->flags & IGB_FLAG_FQTSS) ? true : false;
1615 }
1616 
set_tx_desc_fetch_prio(struct e1000_hw * hw,int queue,enum tx_queue_prio prio)1617 static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue,
1618 				   enum tx_queue_prio prio)
1619 {
1620 	u32 val;
1621 
1622 	WARN_ON(hw->mac.type != e1000_i210);
1623 	WARN_ON(queue < 0 || queue > 4);
1624 
1625 	val = rd32(E1000_I210_TXDCTL(queue));
1626 
1627 	if (prio == TX_QUEUE_PRIO_HIGH)
1628 		val |= E1000_TXDCTL_PRIORITY;
1629 	else
1630 		val &= ~E1000_TXDCTL_PRIORITY;
1631 
1632 	wr32(E1000_I210_TXDCTL(queue), val);
1633 }
1634 
set_queue_mode(struct e1000_hw * hw,int queue,enum queue_mode mode)1635 static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode)
1636 {
1637 	u32 val;
1638 
1639 	WARN_ON(hw->mac.type != e1000_i210);
1640 	WARN_ON(queue < 0 || queue > 1);
1641 
1642 	val = rd32(E1000_I210_TQAVCC(queue));
1643 
1644 	if (mode == QUEUE_MODE_STREAM_RESERVATION)
1645 		val |= E1000_TQAVCC_QUEUEMODE;
1646 	else
1647 		val &= ~E1000_TQAVCC_QUEUEMODE;
1648 
1649 	wr32(E1000_I210_TQAVCC(queue), val);
1650 }
1651 
is_any_cbs_enabled(struct igb_adapter * adapter)1652 static bool is_any_cbs_enabled(struct igb_adapter *adapter)
1653 {
1654 	int i;
1655 
1656 	for (i = 0; i < adapter->num_tx_queues; i++) {
1657 		if (adapter->tx_ring[i]->cbs_enable)
1658 			return true;
1659 	}
1660 
1661 	return false;
1662 }
1663 
is_any_txtime_enabled(struct igb_adapter * adapter)1664 static bool is_any_txtime_enabled(struct igb_adapter *adapter)
1665 {
1666 	int i;
1667 
1668 	for (i = 0; i < adapter->num_tx_queues; i++) {
1669 		if (adapter->tx_ring[i]->launchtime_enable)
1670 			return true;
1671 	}
1672 
1673 	return false;
1674 }
1675 
1676 /**
1677  *  igb_config_tx_modes - Configure "Qav Tx mode" features on igb
1678  *  @adapter: pointer to adapter struct
1679  *  @queue: queue number
1680  *
1681  *  Configure CBS and Launchtime for a given hardware queue.
1682  *  Parameters are retrieved from the correct Tx ring, so
1683  *  igb_save_cbs_params() and igb_save_txtime_params() should be used
1684  *  for setting those correctly prior to this function being called.
1685  **/
igb_config_tx_modes(struct igb_adapter * adapter,int queue)1686 static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
1687 {
1688 	struct net_device *netdev = adapter->netdev;
1689 	struct e1000_hw *hw = &adapter->hw;
1690 	struct igb_ring *ring;
1691 	u32 tqavcc, tqavctrl;
1692 	u16 value;
1693 
1694 	WARN_ON(hw->mac.type != e1000_i210);
1695 	WARN_ON(queue < 0 || queue > 1);
1696 	ring = adapter->tx_ring[queue];
1697 
1698 	/* If any of the Qav features is enabled, configure queues as SR and
1699 	 * with HIGH PRIO. If none is, then configure them with LOW PRIO and
1700 	 * as SP.
1701 	 */
1702 	if (ring->cbs_enable || ring->launchtime_enable) {
1703 		set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
1704 		set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
1705 	} else {
1706 		set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
1707 		set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
1708 	}
1709 
1710 	/* If CBS is enabled, set DataTranARB and config its parameters. */
1711 	if (ring->cbs_enable || queue == 0) {
1712 		/* i210 does not allow the queue 0 to be in the Strict
1713 		 * Priority mode while the Qav mode is enabled, so,
1714 		 * instead of disabling strict priority mode, we give
1715 		 * queue 0 the maximum of credits possible.
1716 		 *
1717 		 * See section 8.12.19 of the i210 datasheet, "Note:
1718 		 * Queue0 QueueMode must be set to 1b when
1719 		 * TransmitMode is set to Qav."
1720 		 */
1721 		if (queue == 0 && !ring->cbs_enable) {
1722 			/* max "linkspeed" idleslope in kbps */
1723 			ring->idleslope = 1000000;
1724 			ring->hicredit = ETH_FRAME_LEN;
1725 		}
1726 
1727 		/* Always set data transfer arbitration to credit-based
1728 		 * shaper algorithm on TQAVCTRL if CBS is enabled for any of
1729 		 * the queues.
1730 		 */
1731 		tqavctrl = rd32(E1000_I210_TQAVCTRL);
1732 		tqavctrl |= E1000_TQAVCTRL_DATATRANARB;
1733 		wr32(E1000_I210_TQAVCTRL, tqavctrl);
1734 
1735 		/* According to i210 datasheet section 7.2.7.7, we should set
1736 		 * the 'idleSlope' field from TQAVCC register following the
1737 		 * equation:
1738 		 *
1739 		 * For 100 Mbps link speed:
1740 		 *
1741 		 *     value = BW * 0x7735 * 0.2                          (E1)
1742 		 *
1743 		 * For 1000Mbps link speed:
1744 		 *
1745 		 *     value = BW * 0x7735 * 2                            (E2)
1746 		 *
1747 		 * E1 and E2 can be merged into one equation as shown below.
1748 		 * Note that 'link-speed' is in Mbps.
1749 		 *
1750 		 *     value = BW * 0x7735 * 2 * link-speed
1751 		 *                           --------------               (E3)
1752 		 *                                1000
1753 		 *
1754 		 * 'BW' is the percentage bandwidth out of full link speed
1755 		 * which can be found with the following equation. Note that
1756 		 * idleSlope here is the parameter from this function which
1757 		 * is in kbps.
1758 		 *
1759 		 *     BW =     idleSlope
1760 		 *          -----------------                             (E4)
1761 		 *          link-speed * 1000
1762 		 *
1763 		 * That said, we can come up with a generic equation to
1764 		 * calculate the value we should set it TQAVCC register by
1765 		 * replacing 'BW' in E3 by E4. The resulting equation is:
1766 		 *
1767 		 * value =     idleSlope     * 0x7735 * 2 * link-speed
1768 		 *         -----------------            --------------    (E5)
1769 		 *         link-speed * 1000                 1000
1770 		 *
1771 		 * 'link-speed' is present in both sides of the fraction so
1772 		 * it is canceled out. The final equation is the following:
1773 		 *
1774 		 *     value = idleSlope * 61034
1775 		 *             -----------------                          (E6)
1776 		 *                  1000000
1777 		 *
1778 		 * NOTE: For i210, given the above, we can see that idleslope
1779 		 *       is represented in 16.38431 kbps units by the value at
1780 		 *       the TQAVCC register (1Gbps / 61034), which reduces
1781 		 *       the granularity for idleslope increments.
1782 		 *       For instance, if you want to configure a 2576kbps
1783 		 *       idleslope, the value to be written on the register
1784 		 *       would have to be 157.23. If rounded down, you end
1785 		 *       up with less bandwidth available than originally
1786 		 *       required (~2572 kbps). If rounded up, you end up
1787 		 *       with a higher bandwidth (~2589 kbps). Below the
1788 		 *       approach we take is to always round up the
1789 		 *       calculated value, so the resulting bandwidth might
1790 		 *       be slightly higher for some configurations.
1791 		 */
1792 		value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000);
1793 
1794 		tqavcc = rd32(E1000_I210_TQAVCC(queue));
1795 		tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1796 		tqavcc |= value;
1797 		wr32(E1000_I210_TQAVCC(queue), tqavcc);
1798 
1799 		wr32(E1000_I210_TQAVHC(queue),
1800 		     0x80000000 + ring->hicredit * 0x7735);
1801 	} else {
1802 
1803 		/* Set idleSlope to zero. */
1804 		tqavcc = rd32(E1000_I210_TQAVCC(queue));
1805 		tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1806 		wr32(E1000_I210_TQAVCC(queue), tqavcc);
1807 
1808 		/* Set hiCredit to zero. */
1809 		wr32(E1000_I210_TQAVHC(queue), 0);
1810 
1811 		/* If CBS is not enabled for any queues anymore, then return to
1812 		 * the default state of Data Transmission Arbitration on
1813 		 * TQAVCTRL.
1814 		 */
1815 		if (!is_any_cbs_enabled(adapter)) {
1816 			tqavctrl = rd32(E1000_I210_TQAVCTRL);
1817 			tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB;
1818 			wr32(E1000_I210_TQAVCTRL, tqavctrl);
1819 		}
1820 	}
1821 
1822 	/* If LaunchTime is enabled, set DataTranTIM. */
1823 	if (ring->launchtime_enable) {
1824 		/* Always set DataTranTIM on TQAVCTRL if LaunchTime is enabled
1825 		 * for any of the SR queues, and configure fetchtime delta.
1826 		 * XXX NOTE:
1827 		 *     - LaunchTime will be enabled for all SR queues.
1828 		 *     - A fixed offset can be added relative to the launch
1829 		 *       time of all packets if configured at reg LAUNCH_OS0.
1830 		 *       We are keeping it as 0 for now (default value).
1831 		 */
1832 		tqavctrl = rd32(E1000_I210_TQAVCTRL);
1833 		tqavctrl |= E1000_TQAVCTRL_DATATRANTIM |
1834 		       E1000_TQAVCTRL_FETCHTIME_DELTA;
1835 		wr32(E1000_I210_TQAVCTRL, tqavctrl);
1836 	} else {
1837 		/* If Launchtime is not enabled for any SR queues anymore,
1838 		 * then clear DataTranTIM on TQAVCTRL and clear fetchtime delta,
1839 		 * effectively disabling Launchtime.
1840 		 */
1841 		if (!is_any_txtime_enabled(adapter)) {
1842 			tqavctrl = rd32(E1000_I210_TQAVCTRL);
1843 			tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM;
1844 			tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA;
1845 			wr32(E1000_I210_TQAVCTRL, tqavctrl);
1846 		}
1847 	}
1848 
1849 	/* XXX: In i210 controller the sendSlope and loCredit parameters from
1850 	 * CBS are not configurable by software so we don't do any 'controller
1851 	 * configuration' in respect to these parameters.
1852 	 */
1853 
1854 	netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
1855 		   ring->cbs_enable ? "enabled" : "disabled",
1856 		   ring->launchtime_enable ? "enabled" : "disabled",
1857 		   queue,
1858 		   ring->idleslope, ring->sendslope,
1859 		   ring->hicredit, ring->locredit);
1860 }
1861 
igb_save_txtime_params(struct igb_adapter * adapter,int queue,bool enable)1862 static int igb_save_txtime_params(struct igb_adapter *adapter, int queue,
1863 				  bool enable)
1864 {
1865 	struct igb_ring *ring;
1866 
1867 	if (queue < 0 || queue > adapter->num_tx_queues)
1868 		return -EINVAL;
1869 
1870 	ring = adapter->tx_ring[queue];
1871 	ring->launchtime_enable = enable;
1872 
1873 	return 0;
1874 }
1875 
igb_save_cbs_params(struct igb_adapter * adapter,int queue,bool enable,int idleslope,int sendslope,int hicredit,int locredit)1876 static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
1877 			       bool enable, int idleslope, int sendslope,
1878 			       int hicredit, int locredit)
1879 {
1880 	struct igb_ring *ring;
1881 
1882 	if (queue < 0 || queue > adapter->num_tx_queues)
1883 		return -EINVAL;
1884 
1885 	ring = adapter->tx_ring[queue];
1886 
1887 	ring->cbs_enable = enable;
1888 	ring->idleslope = idleslope;
1889 	ring->sendslope = sendslope;
1890 	ring->hicredit = hicredit;
1891 	ring->locredit = locredit;
1892 
1893 	return 0;
1894 }
1895 
1896 /**
1897  *  igb_setup_tx_mode - Switch to/from Qav Tx mode when applicable
1898  *  @adapter: pointer to adapter struct
1899  *
1900  *  Configure TQAVCTRL register switching the controller's Tx mode
1901  *  if FQTSS mode is enabled or disabled. Additionally, will issue
1902  *  a call to igb_config_tx_modes() per queue so any previously saved
1903  *  Tx parameters are applied.
1904  **/
igb_setup_tx_mode(struct igb_adapter * adapter)1905 static void igb_setup_tx_mode(struct igb_adapter *adapter)
1906 {
1907 	struct net_device *netdev = adapter->netdev;
1908 	struct e1000_hw *hw = &adapter->hw;
1909 	u32 val;
1910 
1911 	/* Only i210 controller supports changing the transmission mode. */
1912 	if (hw->mac.type != e1000_i210)
1913 		return;
1914 
1915 	if (is_fqtss_enabled(adapter)) {
1916 		int i, max_queue;
1917 
1918 		/* Configure TQAVCTRL register: set transmit mode to 'Qav',
1919 		 * set data fetch arbitration to 'round robin', set SP_WAIT_SR
1920 		 * so SP queues wait for SR ones.
1921 		 */
1922 		val = rd32(E1000_I210_TQAVCTRL);
1923 		val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR;
1924 		val &= ~E1000_TQAVCTRL_DATAFETCHARB;
1925 		wr32(E1000_I210_TQAVCTRL, val);
1926 
1927 		/* Configure Tx and Rx packet buffers sizes as described in
1928 		 * i210 datasheet section 7.2.7.7.
1929 		 */
1930 		val = rd32(E1000_TXPBS);
1931 		val &= ~I210_TXPBSIZE_MASK;
1932 		val |= I210_TXPBSIZE_PB0_8KB | I210_TXPBSIZE_PB1_8KB |
1933 			I210_TXPBSIZE_PB2_4KB | I210_TXPBSIZE_PB3_4KB;
1934 		wr32(E1000_TXPBS, val);
1935 
1936 		val = rd32(E1000_RXPBS);
1937 		val &= ~I210_RXPBSIZE_MASK;
1938 		val |= I210_RXPBSIZE_PB_30KB;
1939 		wr32(E1000_RXPBS, val);
1940 
1941 		/* Section 8.12.9 states that MAX_TPKT_SIZE from DTXMXPKTSZ
1942 		 * register should not exceed the buffer size programmed in
1943 		 * TXPBS. The smallest buffer size programmed in TXPBS is 4kB
1944 		 * so according to the datasheet we should set MAX_TPKT_SIZE to
1945 		 * 4kB / 64.
1946 		 *
1947 		 * However, when we do so, no frame from queue 2 and 3 are
1948 		 * transmitted.  It seems the MAX_TPKT_SIZE should not be great
1949 		 * or _equal_ to the buffer size programmed in TXPBS. For this
1950 		 * reason, we set set MAX_ TPKT_SIZE to (4kB - 1) / 64.
1951 		 */
1952 		val = (4096 - 1) / 64;
1953 		wr32(E1000_I210_DTXMXPKTSZ, val);
1954 
1955 		/* Since FQTSS mode is enabled, apply any CBS configuration
1956 		 * previously set. If no previous CBS configuration has been
1957 		 * done, then the initial configuration is applied, which means
1958 		 * CBS is disabled.
1959 		 */
1960 		max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ?
1961 			    adapter->num_tx_queues : I210_SR_QUEUES_NUM;
1962 
1963 		for (i = 0; i < max_queue; i++) {
1964 			igb_config_tx_modes(adapter, i);
1965 		}
1966 	} else {
1967 		wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
1968 		wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
1969 		wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT);
1970 
1971 		val = rd32(E1000_I210_TQAVCTRL);
1972 		/* According to Section 8.12.21, the other flags we've set when
1973 		 * enabling FQTSS are not relevant when disabling FQTSS so we
1974 		 * don't set they here.
1975 		 */
1976 		val &= ~E1000_TQAVCTRL_XMIT_MODE;
1977 		wr32(E1000_I210_TQAVCTRL, val);
1978 	}
1979 
1980 	netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ?
1981 		   "enabled" : "disabled");
1982 }
1983 
1984 /**
1985  *  igb_configure - configure the hardware for RX and TX
1986  *  @adapter: private board structure
1987  **/
igb_configure(struct igb_adapter * adapter)1988 static void igb_configure(struct igb_adapter *adapter)
1989 {
1990 	struct net_device *netdev = adapter->netdev;
1991 	int i;
1992 
1993 	igb_get_hw_control(adapter);
1994 	igb_set_rx_mode(netdev);
1995 	igb_setup_tx_mode(adapter);
1996 
1997 	igb_restore_vlan(adapter);
1998 
1999 	igb_setup_tctl(adapter);
2000 	igb_setup_mrqc(adapter);
2001 	igb_setup_rctl(adapter);
2002 
2003 	igb_nfc_filter_restore(adapter);
2004 	igb_configure_tx(adapter);
2005 	igb_configure_rx(adapter);
2006 
2007 	igb_rx_fifo_flush_82575(&adapter->hw);
2008 
2009 	/* call igb_desc_unused which always leaves
2010 	 * at least 1 descriptor unused to make sure
2011 	 * next_to_use != next_to_clean
2012 	 */
2013 	for (i = 0; i < adapter->num_rx_queues; i++) {
2014 		struct igb_ring *ring = adapter->rx_ring[i];
2015 		igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
2016 	}
2017 }
2018 
2019 /**
2020  *  igb_power_up_link - Power up the phy/serdes link
2021  *  @adapter: address of board private structure
2022  **/
igb_power_up_link(struct igb_adapter * adapter)2023 void igb_power_up_link(struct igb_adapter *adapter)
2024 {
2025 	igb_reset_phy(&adapter->hw);
2026 
2027 	if (adapter->hw.phy.media_type == e1000_media_type_copper)
2028 		igb_power_up_phy_copper(&adapter->hw);
2029 	else
2030 		igb_power_up_serdes_link_82575(&adapter->hw);
2031 
2032 	igb_setup_link(&adapter->hw);
2033 }
2034 
2035 /**
2036  *  igb_power_down_link - Power down the phy/serdes link
2037  *  @adapter: address of board private structure
2038  */
igb_power_down_link(struct igb_adapter * adapter)2039 static void igb_power_down_link(struct igb_adapter *adapter)
2040 {
2041 	if (adapter->hw.phy.media_type == e1000_media_type_copper)
2042 		igb_power_down_phy_copper_82575(&adapter->hw);
2043 	else
2044 		igb_shutdown_serdes_link_82575(&adapter->hw);
2045 }
2046 
2047 /**
2048  * Detect and switch function for Media Auto Sense
2049  * @adapter: address of the board private structure
2050  **/
igb_check_swap_media(struct igb_adapter * adapter)2051 static void igb_check_swap_media(struct igb_adapter *adapter)
2052 {
2053 	struct e1000_hw *hw = &adapter->hw;
2054 	u32 ctrl_ext, connsw;
2055 	bool swap_now = false;
2056 
2057 	ctrl_ext = rd32(E1000_CTRL_EXT);
2058 	connsw = rd32(E1000_CONNSW);
2059 
2060 	/* need to live swap if current media is copper and we have fiber/serdes
2061 	 * to go to.
2062 	 */
2063 
2064 	if ((hw->phy.media_type == e1000_media_type_copper) &&
2065 	    (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
2066 		swap_now = true;
2067 	} else if ((hw->phy.media_type != e1000_media_type_copper) &&
2068 		   !(connsw & E1000_CONNSW_SERDESD)) {
2069 		/* copper signal takes time to appear */
2070 		if (adapter->copper_tries < 4) {
2071 			adapter->copper_tries++;
2072 			connsw |= E1000_CONNSW_AUTOSENSE_CONF;
2073 			wr32(E1000_CONNSW, connsw);
2074 			return;
2075 		} else {
2076 			adapter->copper_tries = 0;
2077 			if ((connsw & E1000_CONNSW_PHYSD) &&
2078 			    (!(connsw & E1000_CONNSW_PHY_PDN))) {
2079 				swap_now = true;
2080 				connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
2081 				wr32(E1000_CONNSW, connsw);
2082 			}
2083 		}
2084 	}
2085 
2086 	if (!swap_now)
2087 		return;
2088 
2089 	switch (hw->phy.media_type) {
2090 	case e1000_media_type_copper:
2091 		netdev_info(adapter->netdev,
2092 			"MAS: changing media to fiber/serdes\n");
2093 		ctrl_ext |=
2094 			E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2095 		adapter->flags |= IGB_FLAG_MEDIA_RESET;
2096 		adapter->copper_tries = 0;
2097 		break;
2098 	case e1000_media_type_internal_serdes:
2099 	case e1000_media_type_fiber:
2100 		netdev_info(adapter->netdev,
2101 			"MAS: changing media to copper\n");
2102 		ctrl_ext &=
2103 			~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2104 		adapter->flags |= IGB_FLAG_MEDIA_RESET;
2105 		break;
2106 	default:
2107 		/* shouldn't get here during regular operation */
2108 		netdev_err(adapter->netdev,
2109 			"AMS: Invalid media type found, returning\n");
2110 		break;
2111 	}
2112 	wr32(E1000_CTRL_EXT, ctrl_ext);
2113 }
2114 
2115 /**
2116  *  igb_up - Open the interface and prepare it to handle traffic
2117  *  @adapter: board private structure
2118  **/
igb_up(struct igb_adapter * adapter)2119 int igb_up(struct igb_adapter *adapter)
2120 {
2121 	struct e1000_hw *hw = &adapter->hw;
2122 	int i;
2123 
2124 	/* hardware has been reset, we need to reload some things */
2125 	igb_configure(adapter);
2126 
2127 	clear_bit(__IGB_DOWN, &adapter->state);
2128 
2129 	for (i = 0; i < adapter->num_q_vectors; i++)
2130 		napi_enable(&(adapter->q_vector[i]->napi));
2131 
2132 	if (adapter->flags & IGB_FLAG_HAS_MSIX)
2133 		igb_configure_msix(adapter);
2134 	else
2135 		igb_assign_vector(adapter->q_vector[0], 0);
2136 
2137 	/* Clear any pending interrupts. */
2138 	rd32(E1000_TSICR);
2139 	rd32(E1000_ICR);
2140 	igb_irq_enable(adapter);
2141 
2142 	/* notify VFs that reset has been completed */
2143 	if (adapter->vfs_allocated_count) {
2144 		u32 reg_data = rd32(E1000_CTRL_EXT);
2145 
2146 		reg_data |= E1000_CTRL_EXT_PFRSTD;
2147 		wr32(E1000_CTRL_EXT, reg_data);
2148 	}
2149 
2150 	netif_tx_start_all_queues(adapter->netdev);
2151 
2152 	/* start the watchdog. */
2153 	hw->mac.get_link_status = 1;
2154 	schedule_work(&adapter->watchdog_task);
2155 
2156 	if ((adapter->flags & IGB_FLAG_EEE) &&
2157 	    (!hw->dev_spec._82575.eee_disable))
2158 		adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
2159 
2160 	return 0;
2161 }
2162 
igb_down(struct igb_adapter * adapter)2163 void igb_down(struct igb_adapter *adapter)
2164 {
2165 	struct net_device *netdev = adapter->netdev;
2166 	struct e1000_hw *hw = &adapter->hw;
2167 	u32 tctl, rctl;
2168 	int i;
2169 
2170 	/* signal that we're down so the interrupt handler does not
2171 	 * reschedule our watchdog timer
2172 	 */
2173 	set_bit(__IGB_DOWN, &adapter->state);
2174 
2175 	/* disable receives in the hardware */
2176 	rctl = rd32(E1000_RCTL);
2177 	wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2178 	/* flush and sleep below */
2179 
2180 	igb_nfc_filter_exit(adapter);
2181 
2182 	netif_carrier_off(netdev);
2183 	netif_tx_stop_all_queues(netdev);
2184 
2185 	/* disable transmits in the hardware */
2186 	tctl = rd32(E1000_TCTL);
2187 	tctl &= ~E1000_TCTL_EN;
2188 	wr32(E1000_TCTL, tctl);
2189 	/* flush both disables and wait for them to finish */
2190 	wrfl();
2191 	usleep_range(10000, 11000);
2192 
2193 	igb_irq_disable(adapter);
2194 
2195 	adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
2196 
2197 	for (i = 0; i < adapter->num_q_vectors; i++) {
2198 		if (adapter->q_vector[i]) {
2199 			napi_synchronize(&adapter->q_vector[i]->napi);
2200 			napi_disable(&adapter->q_vector[i]->napi);
2201 		}
2202 	}
2203 
2204 	del_timer_sync(&adapter->watchdog_timer);
2205 	del_timer_sync(&adapter->phy_info_timer);
2206 
2207 	/* record the stats before reset*/
2208 	spin_lock(&adapter->stats64_lock);
2209 	igb_update_stats(adapter);
2210 	spin_unlock(&adapter->stats64_lock);
2211 
2212 	adapter->link_speed = 0;
2213 	adapter->link_duplex = 0;
2214 
2215 	if (!pci_channel_offline(adapter->pdev))
2216 		igb_reset(adapter);
2217 
2218 	/* clear VLAN promisc flag so VFTA will be updated if necessary */
2219 	adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
2220 
2221 	igb_clean_all_tx_rings(adapter);
2222 	igb_clean_all_rx_rings(adapter);
2223 #ifdef CONFIG_IGB_DCA
2224 
2225 	/* since we reset the hardware DCA settings were cleared */
2226 	igb_setup_dca(adapter);
2227 #endif
2228 }
2229 
igb_reinit_locked(struct igb_adapter * adapter)2230 void igb_reinit_locked(struct igb_adapter *adapter)
2231 {
2232 	while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
2233 		usleep_range(1000, 2000);
2234 	igb_down(adapter);
2235 	igb_up(adapter);
2236 	clear_bit(__IGB_RESETTING, &adapter->state);
2237 }
2238 
2239 /** igb_enable_mas - Media Autosense re-enable after swap
2240  *
2241  * @adapter: adapter struct
2242  **/
igb_enable_mas(struct igb_adapter * adapter)2243 static void igb_enable_mas(struct igb_adapter *adapter)
2244 {
2245 	struct e1000_hw *hw = &adapter->hw;
2246 	u32 connsw = rd32(E1000_CONNSW);
2247 
2248 	/* configure for SerDes media detect */
2249 	if ((hw->phy.media_type == e1000_media_type_copper) &&
2250 	    (!(connsw & E1000_CONNSW_SERDESD))) {
2251 		connsw |= E1000_CONNSW_ENRGSRC;
2252 		connsw |= E1000_CONNSW_AUTOSENSE_EN;
2253 		wr32(E1000_CONNSW, connsw);
2254 		wrfl();
2255 	}
2256 }
2257 
igb_reset(struct igb_adapter * adapter)2258 void igb_reset(struct igb_adapter *adapter)
2259 {
2260 	struct pci_dev *pdev = adapter->pdev;
2261 	struct e1000_hw *hw = &adapter->hw;
2262 	struct e1000_mac_info *mac = &hw->mac;
2263 	struct e1000_fc_info *fc = &hw->fc;
2264 	u32 pba, hwm;
2265 
2266 	/* Repartition Pba for greater than 9k mtu
2267 	 * To take effect CTRL.RST is required.
2268 	 */
2269 	switch (mac->type) {
2270 	case e1000_i350:
2271 	case e1000_i354:
2272 	case e1000_82580:
2273 		pba = rd32(E1000_RXPBS);
2274 		pba = igb_rxpbs_adjust_82580(pba);
2275 		break;
2276 	case e1000_82576:
2277 		pba = rd32(E1000_RXPBS);
2278 		pba &= E1000_RXPBS_SIZE_MASK_82576;
2279 		break;
2280 	case e1000_82575:
2281 	case e1000_i210:
2282 	case e1000_i211:
2283 	default:
2284 		pba = E1000_PBA_34K;
2285 		break;
2286 	}
2287 
2288 	if (mac->type == e1000_82575) {
2289 		u32 min_rx_space, min_tx_space, needed_tx_space;
2290 
2291 		/* write Rx PBA so that hardware can report correct Tx PBA */
2292 		wr32(E1000_PBA, pba);
2293 
2294 		/* To maintain wire speed transmits, the Tx FIFO should be
2295 		 * large enough to accommodate two full transmit packets,
2296 		 * rounded up to the next 1KB and expressed in KB.  Likewise,
2297 		 * the Rx FIFO should be large enough to accommodate at least
2298 		 * one full receive packet and is similarly rounded up and
2299 		 * expressed in KB.
2300 		 */
2301 		min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024);
2302 
2303 		/* The Tx FIFO also stores 16 bytes of information about the Tx
2304 		 * but don't include Ethernet FCS because hardware appends it.
2305 		 * We only need to round down to the nearest 512 byte block
2306 		 * count since the value we care about is 2 frames, not 1.
2307 		 */
2308 		min_tx_space = adapter->max_frame_size;
2309 		min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN;
2310 		min_tx_space = DIV_ROUND_UP(min_tx_space, 512);
2311 
2312 		/* upper 16 bits has Tx packet buffer allocation size in KB */
2313 		needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16);
2314 
2315 		/* If current Tx allocation is less than the min Tx FIFO size,
2316 		 * and the min Tx FIFO size is less than the current Rx FIFO
2317 		 * allocation, take space away from current Rx allocation.
2318 		 */
2319 		if (needed_tx_space < pba) {
2320 			pba -= needed_tx_space;
2321 
2322 			/* if short on Rx space, Rx wins and must trump Tx
2323 			 * adjustment
2324 			 */
2325 			if (pba < min_rx_space)
2326 				pba = min_rx_space;
2327 		}
2328 
2329 		/* adjust PBA for jumbo frames */
2330 		wr32(E1000_PBA, pba);
2331 	}
2332 
2333 	/* flow control settings
2334 	 * The high water mark must be low enough to fit one full frame
2335 	 * after transmitting the pause frame.  As such we must have enough
2336 	 * space to allow for us to complete our current transmit and then
2337 	 * receive the frame that is in progress from the link partner.
2338 	 * Set it to:
2339 	 * - the full Rx FIFO size minus one full Tx plus one full Rx frame
2340 	 */
2341 	hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
2342 
2343 	fc->high_water = hwm & 0xFFFFFFF0;	/* 16-byte granularity */
2344 	fc->low_water = fc->high_water - 16;
2345 	fc->pause_time = 0xFFFF;
2346 	fc->send_xon = 1;
2347 	fc->current_mode = fc->requested_mode;
2348 
2349 	/* disable receive for all VFs and wait one second */
2350 	if (adapter->vfs_allocated_count) {
2351 		int i;
2352 
2353 		for (i = 0 ; i < adapter->vfs_allocated_count; i++)
2354 			adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
2355 
2356 		/* ping all the active vfs to let them know we are going down */
2357 		igb_ping_all_vfs(adapter);
2358 
2359 		/* disable transmits and receives */
2360 		wr32(E1000_VFRE, 0);
2361 		wr32(E1000_VFTE, 0);
2362 	}
2363 
2364 	/* Allow time for pending master requests to run */
2365 	hw->mac.ops.reset_hw(hw);
2366 	wr32(E1000_WUC, 0);
2367 
2368 	if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
2369 		/* need to resetup here after media swap */
2370 		adapter->ei.get_invariants(hw);
2371 		adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
2372 	}
2373 	if ((mac->type == e1000_82575 || mac->type == e1000_i350) &&
2374 	    (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
2375 		igb_enable_mas(adapter);
2376 	}
2377 	if (hw->mac.ops.init_hw(hw))
2378 		dev_err(&pdev->dev, "Hardware Error\n");
2379 
2380 	/* RAR registers were cleared during init_hw, clear mac table */
2381 	igb_flush_mac_table(adapter);
2382 	__dev_uc_unsync(adapter->netdev, NULL);
2383 
2384 	/* Recover default RAR entry */
2385 	igb_set_default_mac_filter(adapter);
2386 
2387 	/* Flow control settings reset on hardware reset, so guarantee flow
2388 	 * control is off when forcing speed.
2389 	 */
2390 	if (!hw->mac.autoneg)
2391 		igb_force_mac_fc(hw);
2392 
2393 	igb_init_dmac(adapter, pba);
2394 #ifdef CONFIG_IGB_HWMON
2395 	/* Re-initialize the thermal sensor on i350 devices. */
2396 	if (!test_bit(__IGB_DOWN, &adapter->state)) {
2397 		if (mac->type == e1000_i350 && hw->bus.func == 0) {
2398 			/* If present, re-initialize the external thermal sensor
2399 			 * interface.
2400 			 */
2401 			if (adapter->ets)
2402 				mac->ops.init_thermal_sensor_thresh(hw);
2403 		}
2404 	}
2405 #endif
2406 	/* Re-establish EEE setting */
2407 	if (hw->phy.media_type == e1000_media_type_copper) {
2408 		switch (mac->type) {
2409 		case e1000_i350:
2410 		case e1000_i210:
2411 		case e1000_i211:
2412 			igb_set_eee_i350(hw, true, true);
2413 			break;
2414 		case e1000_i354:
2415 			igb_set_eee_i354(hw, true, true);
2416 			break;
2417 		default:
2418 			break;
2419 		}
2420 	}
2421 	if (!netif_running(adapter->netdev))
2422 		igb_power_down_link(adapter);
2423 
2424 	igb_update_mng_vlan(adapter);
2425 
2426 	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
2427 	wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
2428 
2429 	/* Re-enable PTP, where applicable. */
2430 	if (adapter->ptp_flags & IGB_PTP_ENABLED)
2431 		igb_ptp_reset(adapter);
2432 
2433 	igb_get_phy_info(hw);
2434 }
2435 
igb_fix_features(struct net_device * netdev,netdev_features_t features)2436 static netdev_features_t igb_fix_features(struct net_device *netdev,
2437 	netdev_features_t features)
2438 {
2439 	/* Since there is no support for separate Rx/Tx vlan accel
2440 	 * enable/disable make sure Tx flag is always in same state as Rx.
2441 	 */
2442 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2443 		features |= NETIF_F_HW_VLAN_CTAG_TX;
2444 	else
2445 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2446 
2447 	return features;
2448 }
2449 
igb_set_features(struct net_device * netdev,netdev_features_t features)2450 static int igb_set_features(struct net_device *netdev,
2451 	netdev_features_t features)
2452 {
2453 	netdev_features_t changed = netdev->features ^ features;
2454 	struct igb_adapter *adapter = netdev_priv(netdev);
2455 
2456 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2457 		igb_vlan_mode(netdev, features);
2458 
2459 	if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
2460 		return 0;
2461 
2462 	if (!(features & NETIF_F_NTUPLE)) {
2463 		struct hlist_node *node2;
2464 		struct igb_nfc_filter *rule;
2465 
2466 		spin_lock(&adapter->nfc_lock);
2467 		hlist_for_each_entry_safe(rule, node2,
2468 					  &adapter->nfc_filter_list, nfc_node) {
2469 			igb_erase_filter(adapter, rule);
2470 			hlist_del(&rule->nfc_node);
2471 			kfree(rule);
2472 		}
2473 		spin_unlock(&adapter->nfc_lock);
2474 		adapter->nfc_filter_count = 0;
2475 	}
2476 
2477 	netdev->features = features;
2478 
2479 	if (netif_running(netdev))
2480 		igb_reinit_locked(adapter);
2481 	else
2482 		igb_reset(adapter);
2483 
2484 	return 1;
2485 }
2486 
igb_ndo_fdb_add(struct ndmsg * ndm,struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,u16 vid,u16 flags,struct netlink_ext_ack * extack)2487 static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
2488 			   struct net_device *dev,
2489 			   const unsigned char *addr, u16 vid,
2490 			   u16 flags,
2491 			   struct netlink_ext_ack *extack)
2492 {
2493 	/* guarantee we can provide a unique filter for the unicast address */
2494 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
2495 		struct igb_adapter *adapter = netdev_priv(dev);
2496 		int vfn = adapter->vfs_allocated_count;
2497 
2498 		if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn))
2499 			return -ENOMEM;
2500 	}
2501 
2502 	return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
2503 }
2504 
2505 #define IGB_MAX_MAC_HDR_LEN	127
2506 #define IGB_MAX_NETWORK_HDR_LEN	511
2507 
2508 static netdev_features_t
igb_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)2509 igb_features_check(struct sk_buff *skb, struct net_device *dev,
2510 		   netdev_features_t features)
2511 {
2512 	unsigned int network_hdr_len, mac_hdr_len;
2513 
2514 	/* Make certain the headers can be described by a context descriptor */
2515 	mac_hdr_len = skb_network_header(skb) - skb->data;
2516 	if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
2517 		return features & ~(NETIF_F_HW_CSUM |
2518 				    NETIF_F_SCTP_CRC |
2519 				    NETIF_F_GSO_UDP_L4 |
2520 				    NETIF_F_HW_VLAN_CTAG_TX |
2521 				    NETIF_F_TSO |
2522 				    NETIF_F_TSO6);
2523 
2524 	network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
2525 	if (unlikely(network_hdr_len >  IGB_MAX_NETWORK_HDR_LEN))
2526 		return features & ~(NETIF_F_HW_CSUM |
2527 				    NETIF_F_SCTP_CRC |
2528 				    NETIF_F_GSO_UDP_L4 |
2529 				    NETIF_F_TSO |
2530 				    NETIF_F_TSO6);
2531 
2532 	/* We can only support IPV4 TSO in tunnels if we can mangle the
2533 	 * inner IP ID field, so strip TSO if MANGLEID is not supported.
2534 	 */
2535 	if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
2536 		features &= ~NETIF_F_TSO;
2537 
2538 	return features;
2539 }
2540 
igb_offload_apply(struct igb_adapter * adapter,s32 queue)2541 static void igb_offload_apply(struct igb_adapter *adapter, s32 queue)
2542 {
2543 	if (!is_fqtss_enabled(adapter)) {
2544 		enable_fqtss(adapter, true);
2545 		return;
2546 	}
2547 
2548 	igb_config_tx_modes(adapter, queue);
2549 
2550 	if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter))
2551 		enable_fqtss(adapter, false);
2552 }
2553 
igb_offload_cbs(struct igb_adapter * adapter,struct tc_cbs_qopt_offload * qopt)2554 static int igb_offload_cbs(struct igb_adapter *adapter,
2555 			   struct tc_cbs_qopt_offload *qopt)
2556 {
2557 	struct e1000_hw *hw = &adapter->hw;
2558 	int err;
2559 
2560 	/* CBS offloading is only supported by i210 controller. */
2561 	if (hw->mac.type != e1000_i210)
2562 		return -EOPNOTSUPP;
2563 
2564 	/* CBS offloading is only supported by queue 0 and queue 1. */
2565 	if (qopt->queue < 0 || qopt->queue > 1)
2566 		return -EINVAL;
2567 
2568 	err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable,
2569 				  qopt->idleslope, qopt->sendslope,
2570 				  qopt->hicredit, qopt->locredit);
2571 	if (err)
2572 		return err;
2573 
2574 	igb_offload_apply(adapter, qopt->queue);
2575 
2576 	return 0;
2577 }
2578 
2579 #define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
2580 #define VLAN_PRIO_FULL_MASK (0x07)
2581 
igb_parse_cls_flower(struct igb_adapter * adapter,struct flow_cls_offload * f,int traffic_class,struct igb_nfc_filter * input)2582 static int igb_parse_cls_flower(struct igb_adapter *adapter,
2583 				struct flow_cls_offload *f,
2584 				int traffic_class,
2585 				struct igb_nfc_filter *input)
2586 {
2587 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2588 	struct flow_dissector *dissector = rule->match.dissector;
2589 	struct netlink_ext_ack *extack = f->common.extack;
2590 
2591 	if (dissector->used_keys &
2592 	    ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
2593 	      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2594 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2595 	      BIT(FLOW_DISSECTOR_KEY_VLAN))) {
2596 		NL_SET_ERR_MSG_MOD(extack,
2597 				   "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported");
2598 		return -EOPNOTSUPP;
2599 	}
2600 
2601 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2602 		struct flow_match_eth_addrs match;
2603 
2604 		flow_rule_match_eth_addrs(rule, &match);
2605 		if (!is_zero_ether_addr(match.mask->dst)) {
2606 			if (!is_broadcast_ether_addr(match.mask->dst)) {
2607 				NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address");
2608 				return -EINVAL;
2609 			}
2610 
2611 			input->filter.match_flags |=
2612 				IGB_FILTER_FLAG_DST_MAC_ADDR;
2613 			ether_addr_copy(input->filter.dst_addr, match.key->dst);
2614 		}
2615 
2616 		if (!is_zero_ether_addr(match.mask->src)) {
2617 			if (!is_broadcast_ether_addr(match.mask->src)) {
2618 				NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address");
2619 				return -EINVAL;
2620 			}
2621 
2622 			input->filter.match_flags |=
2623 				IGB_FILTER_FLAG_SRC_MAC_ADDR;
2624 			ether_addr_copy(input->filter.src_addr, match.key->src);
2625 		}
2626 	}
2627 
2628 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2629 		struct flow_match_basic match;
2630 
2631 		flow_rule_match_basic(rule, &match);
2632 		if (match.mask->n_proto) {
2633 			if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
2634 				NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter");
2635 				return -EINVAL;
2636 			}
2637 
2638 			input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE;
2639 			input->filter.etype = match.key->n_proto;
2640 		}
2641 	}
2642 
2643 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
2644 		struct flow_match_vlan match;
2645 
2646 		flow_rule_match_vlan(rule, &match);
2647 		if (match.mask->vlan_priority) {
2648 			if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
2649 				NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
2650 				return -EINVAL;
2651 			}
2652 
2653 			input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
2654 			input->filter.vlan_tci =
2655 				(__force __be16)match.key->vlan_priority;
2656 		}
2657 	}
2658 
2659 	input->action = traffic_class;
2660 	input->cookie = f->cookie;
2661 
2662 	return 0;
2663 }
2664 
igb_configure_clsflower(struct igb_adapter * adapter,struct flow_cls_offload * cls_flower)2665 static int igb_configure_clsflower(struct igb_adapter *adapter,
2666 				   struct flow_cls_offload *cls_flower)
2667 {
2668 	struct netlink_ext_ack *extack = cls_flower->common.extack;
2669 	struct igb_nfc_filter *filter, *f;
2670 	int err, tc;
2671 
2672 	tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
2673 	if (tc < 0) {
2674 		NL_SET_ERR_MSG_MOD(extack, "Invalid traffic class");
2675 		return -EINVAL;
2676 	}
2677 
2678 	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
2679 	if (!filter)
2680 		return -ENOMEM;
2681 
2682 	err = igb_parse_cls_flower(adapter, cls_flower, tc, filter);
2683 	if (err < 0)
2684 		goto err_parse;
2685 
2686 	spin_lock(&adapter->nfc_lock);
2687 
2688 	hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) {
2689 		if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
2690 			err = -EEXIST;
2691 			NL_SET_ERR_MSG_MOD(extack,
2692 					   "This filter is already set in ethtool");
2693 			goto err_locked;
2694 		}
2695 	}
2696 
2697 	hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) {
2698 		if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
2699 			err = -EEXIST;
2700 			NL_SET_ERR_MSG_MOD(extack,
2701 					   "This filter is already set in cls_flower");
2702 			goto err_locked;
2703 		}
2704 	}
2705 
2706 	err = igb_add_filter(adapter, filter);
2707 	if (err < 0) {
2708 		NL_SET_ERR_MSG_MOD(extack, "Could not add filter to the adapter");
2709 		goto err_locked;
2710 	}
2711 
2712 	hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list);
2713 
2714 	spin_unlock(&adapter->nfc_lock);
2715 
2716 	return 0;
2717 
2718 err_locked:
2719 	spin_unlock(&adapter->nfc_lock);
2720 
2721 err_parse:
2722 	kfree(filter);
2723 
2724 	return err;
2725 }
2726 
igb_delete_clsflower(struct igb_adapter * adapter,struct flow_cls_offload * cls_flower)2727 static int igb_delete_clsflower(struct igb_adapter *adapter,
2728 				struct flow_cls_offload *cls_flower)
2729 {
2730 	struct igb_nfc_filter *filter;
2731 	int err;
2732 
2733 	spin_lock(&adapter->nfc_lock);
2734 
2735 	hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node)
2736 		if (filter->cookie == cls_flower->cookie)
2737 			break;
2738 
2739 	if (!filter) {
2740 		err = -ENOENT;
2741 		goto out;
2742 	}
2743 
2744 	err = igb_erase_filter(adapter, filter);
2745 	if (err < 0)
2746 		goto out;
2747 
2748 	hlist_del(&filter->nfc_node);
2749 	kfree(filter);
2750 
2751 out:
2752 	spin_unlock(&adapter->nfc_lock);
2753 
2754 	return err;
2755 }
2756 
igb_setup_tc_cls_flower(struct igb_adapter * adapter,struct flow_cls_offload * cls_flower)2757 static int igb_setup_tc_cls_flower(struct igb_adapter *adapter,
2758 				   struct flow_cls_offload *cls_flower)
2759 {
2760 	switch (cls_flower->command) {
2761 	case FLOW_CLS_REPLACE:
2762 		return igb_configure_clsflower(adapter, cls_flower);
2763 	case FLOW_CLS_DESTROY:
2764 		return igb_delete_clsflower(adapter, cls_flower);
2765 	case FLOW_CLS_STATS:
2766 		return -EOPNOTSUPP;
2767 	default:
2768 		return -EOPNOTSUPP;
2769 	}
2770 }
2771 
igb_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)2772 static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2773 				 void *cb_priv)
2774 {
2775 	struct igb_adapter *adapter = cb_priv;
2776 
2777 	if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
2778 		return -EOPNOTSUPP;
2779 
2780 	switch (type) {
2781 	case TC_SETUP_CLSFLOWER:
2782 		return igb_setup_tc_cls_flower(adapter, type_data);
2783 
2784 	default:
2785 		return -EOPNOTSUPP;
2786 	}
2787 }
2788 
igb_offload_txtime(struct igb_adapter * adapter,struct tc_etf_qopt_offload * qopt)2789 static int igb_offload_txtime(struct igb_adapter *adapter,
2790 			      struct tc_etf_qopt_offload *qopt)
2791 {
2792 	struct e1000_hw *hw = &adapter->hw;
2793 	int err;
2794 
2795 	/* Launchtime offloading is only supported by i210 controller. */
2796 	if (hw->mac.type != e1000_i210)
2797 		return -EOPNOTSUPP;
2798 
2799 	/* Launchtime offloading is only supported by queues 0 and 1. */
2800 	if (qopt->queue < 0 || qopt->queue > 1)
2801 		return -EINVAL;
2802 
2803 	err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable);
2804 	if (err)
2805 		return err;
2806 
2807 	igb_offload_apply(adapter, qopt->queue);
2808 
2809 	return 0;
2810 }
2811 
2812 static LIST_HEAD(igb_block_cb_list);
2813 
igb_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)2814 static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
2815 			void *type_data)
2816 {
2817 	struct igb_adapter *adapter = netdev_priv(dev);
2818 
2819 	switch (type) {
2820 	case TC_SETUP_QDISC_CBS:
2821 		return igb_offload_cbs(adapter, type_data);
2822 	case TC_SETUP_BLOCK:
2823 		return flow_block_cb_setup_simple(type_data,
2824 						  &igb_block_cb_list,
2825 						  igb_setup_tc_block_cb,
2826 						  adapter, adapter, true);
2827 
2828 	case TC_SETUP_QDISC_ETF:
2829 		return igb_offload_txtime(adapter, type_data);
2830 
2831 	default:
2832 		return -EOPNOTSUPP;
2833 	}
2834 }
2835 
igb_xdp_setup(struct net_device * dev,struct netdev_bpf * bpf)2836 static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf)
2837 {
2838 	int i, frame_size = dev->mtu + IGB_ETH_PKT_HDR_PAD;
2839 	struct igb_adapter *adapter = netdev_priv(dev);
2840 	struct bpf_prog *prog = bpf->prog, *old_prog;
2841 	bool running = netif_running(dev);
2842 	bool need_reset;
2843 
2844 	/* verify igb ring attributes are sufficient for XDP */
2845 	for (i = 0; i < adapter->num_rx_queues; i++) {
2846 		struct igb_ring *ring = adapter->rx_ring[i];
2847 
2848 		if (frame_size > igb_rx_bufsz(ring)) {
2849 			NL_SET_ERR_MSG_MOD(bpf->extack,
2850 					   "The RX buffer size is too small for the frame size");
2851 			netdev_warn(dev, "XDP RX buffer size %d is too small for the frame size %d\n",
2852 				    igb_rx_bufsz(ring), frame_size);
2853 			return -EINVAL;
2854 		}
2855 	}
2856 
2857 	old_prog = xchg(&adapter->xdp_prog, prog);
2858 	need_reset = (!!prog != !!old_prog);
2859 
2860 	/* device is up and bpf is added/removed, must setup the RX queues */
2861 	if (need_reset && running) {
2862 		igb_close(dev);
2863 	} else {
2864 		for (i = 0; i < adapter->num_rx_queues; i++)
2865 			(void)xchg(&adapter->rx_ring[i]->xdp_prog,
2866 			    adapter->xdp_prog);
2867 	}
2868 
2869 	if (old_prog)
2870 		bpf_prog_put(old_prog);
2871 
2872 	/* bpf is just replaced, RXQ and MTU are already setup */
2873 	if (!need_reset)
2874 		return 0;
2875 
2876 	if (running)
2877 		igb_open(dev);
2878 
2879 	return 0;
2880 }
2881 
igb_xdp(struct net_device * dev,struct netdev_bpf * xdp)2882 static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2883 {
2884 	switch (xdp->command) {
2885 	case XDP_SETUP_PROG:
2886 		return igb_xdp_setup(dev, xdp);
2887 	default:
2888 		return -EINVAL;
2889 	}
2890 }
2891 
igb_xdp_ring_update_tail(struct igb_ring * ring)2892 static void igb_xdp_ring_update_tail(struct igb_ring *ring)
2893 {
2894 	/* Force memory writes to complete before letting h/w know there
2895 	 * are new descriptors to fetch.
2896 	 */
2897 	wmb();
2898 	writel(ring->next_to_use, ring->tail);
2899 }
2900 
igb_xdp_tx_queue_mapping(struct igb_adapter * adapter)2901 static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter)
2902 {
2903 	unsigned int r_idx = smp_processor_id();
2904 
2905 	if (r_idx >= adapter->num_tx_queues)
2906 		r_idx = r_idx % adapter->num_tx_queues;
2907 
2908 	return adapter->tx_ring[r_idx];
2909 }
2910 
igb_xdp_xmit_back(struct igb_adapter * adapter,struct xdp_buff * xdp)2911 static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
2912 {
2913 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2914 	int cpu = smp_processor_id();
2915 	struct igb_ring *tx_ring;
2916 	struct netdev_queue *nq;
2917 	u32 ret;
2918 
2919 	if (unlikely(!xdpf))
2920 		return IGB_XDP_CONSUMED;
2921 
2922 	/* During program transitions its possible adapter->xdp_prog is assigned
2923 	 * but ring has not been configured yet. In this case simply abort xmit.
2924 	 */
2925 	tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
2926 	if (unlikely(!tx_ring))
2927 		return IGB_XDP_CONSUMED;
2928 
2929 	nq = txring_txq(tx_ring);
2930 	__netif_tx_lock(nq, cpu);
2931 	/* Avoid transmit queue timeout since we share it with the slow path */
2932 	nq->trans_start = jiffies;
2933 	ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
2934 	__netif_tx_unlock(nq);
2935 
2936 	return ret;
2937 }
2938 
igb_xdp_xmit(struct net_device * dev,int n,struct xdp_frame ** frames,u32 flags)2939 static int igb_xdp_xmit(struct net_device *dev, int n,
2940 			struct xdp_frame **frames, u32 flags)
2941 {
2942 	struct igb_adapter *adapter = netdev_priv(dev);
2943 	int cpu = smp_processor_id();
2944 	struct igb_ring *tx_ring;
2945 	struct netdev_queue *nq;
2946 	int drops = 0;
2947 	int i;
2948 
2949 	if (unlikely(test_bit(__IGB_DOWN, &adapter->state)))
2950 		return -ENETDOWN;
2951 
2952 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2953 		return -EINVAL;
2954 
2955 	/* During program transitions its possible adapter->xdp_prog is assigned
2956 	 * but ring has not been configured yet. In this case simply abort xmit.
2957 	 */
2958 	tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
2959 	if (unlikely(!tx_ring))
2960 		return -ENXIO;
2961 
2962 	nq = txring_txq(tx_ring);
2963 	__netif_tx_lock(nq, cpu);
2964 
2965 	/* Avoid transmit queue timeout since we share it with the slow path */
2966 	nq->trans_start = jiffies;
2967 
2968 	for (i = 0; i < n; i++) {
2969 		struct xdp_frame *xdpf = frames[i];
2970 		int err;
2971 
2972 		err = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
2973 		if (err != IGB_XDP_TX) {
2974 			xdp_return_frame_rx_napi(xdpf);
2975 			drops++;
2976 		}
2977 	}
2978 
2979 	__netif_tx_unlock(nq);
2980 
2981 	if (unlikely(flags & XDP_XMIT_FLUSH))
2982 		igb_xdp_ring_update_tail(tx_ring);
2983 
2984 	return n - drops;
2985 }
2986 
2987 static const struct net_device_ops igb_netdev_ops = {
2988 	.ndo_open		= igb_open,
2989 	.ndo_stop		= igb_close,
2990 	.ndo_start_xmit		= igb_xmit_frame,
2991 	.ndo_get_stats64	= igb_get_stats64,
2992 	.ndo_set_rx_mode	= igb_set_rx_mode,
2993 	.ndo_set_mac_address	= igb_set_mac,
2994 	.ndo_change_mtu		= igb_change_mtu,
2995 	.ndo_do_ioctl		= igb_ioctl,
2996 	.ndo_tx_timeout		= igb_tx_timeout,
2997 	.ndo_validate_addr	= eth_validate_addr,
2998 	.ndo_vlan_rx_add_vid	= igb_vlan_rx_add_vid,
2999 	.ndo_vlan_rx_kill_vid	= igb_vlan_rx_kill_vid,
3000 	.ndo_set_vf_mac		= igb_ndo_set_vf_mac,
3001 	.ndo_set_vf_vlan	= igb_ndo_set_vf_vlan,
3002 	.ndo_set_vf_rate	= igb_ndo_set_vf_bw,
3003 	.ndo_set_vf_spoofchk	= igb_ndo_set_vf_spoofchk,
3004 	.ndo_set_vf_trust	= igb_ndo_set_vf_trust,
3005 	.ndo_get_vf_config	= igb_ndo_get_vf_config,
3006 	.ndo_fix_features	= igb_fix_features,
3007 	.ndo_set_features	= igb_set_features,
3008 	.ndo_fdb_add		= igb_ndo_fdb_add,
3009 	.ndo_features_check	= igb_features_check,
3010 	.ndo_setup_tc		= igb_setup_tc,
3011 	.ndo_bpf		= igb_xdp,
3012 	.ndo_xdp_xmit		= igb_xdp_xmit,
3013 };
3014 
3015 /**
3016  * igb_set_fw_version - Configure version string for ethtool
3017  * @adapter: adapter struct
3018  **/
igb_set_fw_version(struct igb_adapter * adapter)3019 void igb_set_fw_version(struct igb_adapter *adapter)
3020 {
3021 	struct e1000_hw *hw = &adapter->hw;
3022 	struct e1000_fw_version fw;
3023 
3024 	igb_get_fw_version(hw, &fw);
3025 
3026 	switch (hw->mac.type) {
3027 	case e1000_i210:
3028 	case e1000_i211:
3029 		if (!(igb_get_flash_presence_i210(hw))) {
3030 			snprintf(adapter->fw_version,
3031 				 sizeof(adapter->fw_version),
3032 				 "%2d.%2d-%d",
3033 				 fw.invm_major, fw.invm_minor,
3034 				 fw.invm_img_type);
3035 			break;
3036 		}
3037 		fallthrough;
3038 	default:
3039 		/* if option is rom valid, display its version too */
3040 		if (fw.or_valid) {
3041 			snprintf(adapter->fw_version,
3042 				 sizeof(adapter->fw_version),
3043 				 "%d.%d, 0x%08x, %d.%d.%d",
3044 				 fw.eep_major, fw.eep_minor, fw.etrack_id,
3045 				 fw.or_major, fw.or_build, fw.or_patch);
3046 		/* no option rom */
3047 		} else if (fw.etrack_id != 0X0000) {
3048 			snprintf(adapter->fw_version,
3049 			    sizeof(adapter->fw_version),
3050 			    "%d.%d, 0x%08x",
3051 			    fw.eep_major, fw.eep_minor, fw.etrack_id);
3052 		} else {
3053 		snprintf(adapter->fw_version,
3054 		    sizeof(adapter->fw_version),
3055 		    "%d.%d.%d",
3056 		    fw.eep_major, fw.eep_minor, fw.eep_build);
3057 		}
3058 		break;
3059 	}
3060 }
3061 
3062 /**
3063  * igb_init_mas - init Media Autosense feature if enabled in the NVM
3064  *
3065  * @adapter: adapter struct
3066  **/
igb_init_mas(struct igb_adapter * adapter)3067 static void igb_init_mas(struct igb_adapter *adapter)
3068 {
3069 	struct e1000_hw *hw = &adapter->hw;
3070 	u16 eeprom_data;
3071 
3072 	hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
3073 	switch (hw->bus.func) {
3074 	case E1000_FUNC_0:
3075 		if (eeprom_data & IGB_MAS_ENABLE_0) {
3076 			adapter->flags |= IGB_FLAG_MAS_ENABLE;
3077 			netdev_info(adapter->netdev,
3078 				"MAS: Enabling Media Autosense for port %d\n",
3079 				hw->bus.func);
3080 		}
3081 		break;
3082 	case E1000_FUNC_1:
3083 		if (eeprom_data & IGB_MAS_ENABLE_1) {
3084 			adapter->flags |= IGB_FLAG_MAS_ENABLE;
3085 			netdev_info(adapter->netdev,
3086 				"MAS: Enabling Media Autosense for port %d\n",
3087 				hw->bus.func);
3088 		}
3089 		break;
3090 	case E1000_FUNC_2:
3091 		if (eeprom_data & IGB_MAS_ENABLE_2) {
3092 			adapter->flags |= IGB_FLAG_MAS_ENABLE;
3093 			netdev_info(adapter->netdev,
3094 				"MAS: Enabling Media Autosense for port %d\n",
3095 				hw->bus.func);
3096 		}
3097 		break;
3098 	case E1000_FUNC_3:
3099 		if (eeprom_data & IGB_MAS_ENABLE_3) {
3100 			adapter->flags |= IGB_FLAG_MAS_ENABLE;
3101 			netdev_info(adapter->netdev,
3102 				"MAS: Enabling Media Autosense for port %d\n",
3103 				hw->bus.func);
3104 		}
3105 		break;
3106 	default:
3107 		/* Shouldn't get here */
3108 		netdev_err(adapter->netdev,
3109 			"MAS: Invalid port configuration, returning\n");
3110 		break;
3111 	}
3112 }
3113 
3114 /**
3115  *  igb_init_i2c - Init I2C interface
3116  *  @adapter: pointer to adapter structure
3117  **/
igb_init_i2c(struct igb_adapter * adapter)3118 static s32 igb_init_i2c(struct igb_adapter *adapter)
3119 {
3120 	s32 status = 0;
3121 
3122 	/* I2C interface supported on i350 devices */
3123 	if (adapter->hw.mac.type != e1000_i350)
3124 		return 0;
3125 
3126 	/* Initialize the i2c bus which is controlled by the registers.
3127 	 * This bus will use the i2c_algo_bit structue that implements
3128 	 * the protocol through toggling of the 4 bits in the register.
3129 	 */
3130 	adapter->i2c_adap.owner = THIS_MODULE;
3131 	adapter->i2c_algo = igb_i2c_algo;
3132 	adapter->i2c_algo.data = adapter;
3133 	adapter->i2c_adap.algo_data = &adapter->i2c_algo;
3134 	adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
3135 	strlcpy(adapter->i2c_adap.name, "igb BB",
3136 		sizeof(adapter->i2c_adap.name));
3137 	status = i2c_bit_add_bus(&adapter->i2c_adap);
3138 	return status;
3139 }
3140 
3141 /**
3142  *  igb_probe - Device Initialization Routine
3143  *  @pdev: PCI device information struct
3144  *  @ent: entry in igb_pci_tbl
3145  *
3146  *  Returns 0 on success, negative on failure
3147  *
3148  *  igb_probe initializes an adapter identified by a pci_dev structure.
3149  *  The OS initialization, configuring of the adapter private structure,
3150  *  and a hardware reset occur.
3151  **/
igb_probe(struct pci_dev * pdev,const struct pci_device_id * ent)3152 static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3153 {
3154 	struct net_device *netdev;
3155 	struct igb_adapter *adapter;
3156 	struct e1000_hw *hw;
3157 	u16 eeprom_data = 0;
3158 	s32 ret_val;
3159 	static int global_quad_port_a; /* global quad port a indication */
3160 	const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
3161 	int err, pci_using_dac;
3162 	u8 part_str[E1000_PBANUM_LENGTH];
3163 
3164 	/* Catch broken hardware that put the wrong VF device ID in
3165 	 * the PCIe SR-IOV capability.
3166 	 */
3167 	if (pdev->is_virtfn) {
3168 		WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
3169 			pci_name(pdev), pdev->vendor, pdev->device);
3170 		return -EINVAL;
3171 	}
3172 
3173 	err = pci_enable_device_mem(pdev);
3174 	if (err)
3175 		return err;
3176 
3177 	pci_using_dac = 0;
3178 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3179 	if (!err) {
3180 		pci_using_dac = 1;
3181 	} else {
3182 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3183 		if (err) {
3184 			dev_err(&pdev->dev,
3185 				"No usable DMA configuration, aborting\n");
3186 			goto err_dma;
3187 		}
3188 	}
3189 
3190 	err = pci_request_mem_regions(pdev, igb_driver_name);
3191 	if (err)
3192 		goto err_pci_reg;
3193 
3194 	pci_enable_pcie_error_reporting(pdev);
3195 
3196 	pci_set_master(pdev);
3197 	pci_save_state(pdev);
3198 
3199 	err = -ENOMEM;
3200 	netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
3201 				   IGB_MAX_TX_QUEUES);
3202 	if (!netdev)
3203 		goto err_alloc_etherdev;
3204 
3205 	SET_NETDEV_DEV(netdev, &pdev->dev);
3206 
3207 	pci_set_drvdata(pdev, netdev);
3208 	adapter = netdev_priv(netdev);
3209 	adapter->netdev = netdev;
3210 	adapter->pdev = pdev;
3211 	hw = &adapter->hw;
3212 	hw->back = adapter;
3213 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3214 
3215 	err = -EIO;
3216 	adapter->io_addr = pci_iomap(pdev, 0, 0);
3217 	if (!adapter->io_addr)
3218 		goto err_ioremap;
3219 	/* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */
3220 	hw->hw_addr = adapter->io_addr;
3221 
3222 	netdev->netdev_ops = &igb_netdev_ops;
3223 	igb_set_ethtool_ops(netdev);
3224 	netdev->watchdog_timeo = 5 * HZ;
3225 
3226 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
3227 
3228 	netdev->mem_start = pci_resource_start(pdev, 0);
3229 	netdev->mem_end = pci_resource_end(pdev, 0);
3230 
3231 	/* PCI config space info */
3232 	hw->vendor_id = pdev->vendor;
3233 	hw->device_id = pdev->device;
3234 	hw->revision_id = pdev->revision;
3235 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
3236 	hw->subsystem_device_id = pdev->subsystem_device;
3237 
3238 	/* Copy the default MAC, PHY and NVM function pointers */
3239 	memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
3240 	memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
3241 	memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
3242 	/* Initialize skew-specific constants */
3243 	err = ei->get_invariants(hw);
3244 	if (err)
3245 		goto err_sw_init;
3246 
3247 	/* setup the private structure */
3248 	err = igb_sw_init(adapter);
3249 	if (err)
3250 		goto err_sw_init;
3251 
3252 	igb_get_bus_info_pcie(hw);
3253 
3254 	hw->phy.autoneg_wait_to_complete = false;
3255 
3256 	/* Copper options */
3257 	if (hw->phy.media_type == e1000_media_type_copper) {
3258 		hw->phy.mdix = AUTO_ALL_MODES;
3259 		hw->phy.disable_polarity_correction = false;
3260 		hw->phy.ms_type = e1000_ms_hw_default;
3261 	}
3262 
3263 	if (igb_check_reset_block(hw))
3264 		dev_info(&pdev->dev,
3265 			"PHY reset is blocked due to SOL/IDER session.\n");
3266 
3267 	/* features is initialized to 0 in allocation, it might have bits
3268 	 * set by igb_sw_init so we should use an or instead of an
3269 	 * assignment.
3270 	 */
3271 	netdev->features |= NETIF_F_SG |
3272 			    NETIF_F_TSO |
3273 			    NETIF_F_TSO6 |
3274 			    NETIF_F_RXHASH |
3275 			    NETIF_F_RXCSUM |
3276 			    NETIF_F_HW_CSUM;
3277 
3278 	if (hw->mac.type >= e1000_82576)
3279 		netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4;
3280 
3281 	if (hw->mac.type >= e1000_i350)
3282 		netdev->features |= NETIF_F_HW_TC;
3283 
3284 #define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
3285 				  NETIF_F_GSO_GRE_CSUM | \
3286 				  NETIF_F_GSO_IPXIP4 | \
3287 				  NETIF_F_GSO_IPXIP6 | \
3288 				  NETIF_F_GSO_UDP_TUNNEL | \
3289 				  NETIF_F_GSO_UDP_TUNNEL_CSUM)
3290 
3291 	netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES;
3292 	netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES;
3293 
3294 	/* copy netdev features into list of user selectable features */
3295 	netdev->hw_features |= netdev->features |
3296 			       NETIF_F_HW_VLAN_CTAG_RX |
3297 			       NETIF_F_HW_VLAN_CTAG_TX |
3298 			       NETIF_F_RXALL;
3299 
3300 	if (hw->mac.type >= e1000_i350)
3301 		netdev->hw_features |= NETIF_F_NTUPLE;
3302 
3303 	if (pci_using_dac)
3304 		netdev->features |= NETIF_F_HIGHDMA;
3305 
3306 	netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
3307 	netdev->mpls_features |= NETIF_F_HW_CSUM;
3308 	netdev->hw_enc_features |= netdev->vlan_features;
3309 
3310 	/* set this bit last since it cannot be part of vlan_features */
3311 	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
3312 			    NETIF_F_HW_VLAN_CTAG_RX |
3313 			    NETIF_F_HW_VLAN_CTAG_TX;
3314 
3315 	netdev->priv_flags |= IFF_SUPP_NOFCS;
3316 
3317 	netdev->priv_flags |= IFF_UNICAST_FLT;
3318 
3319 	/* MTU range: 68 - 9216 */
3320 	netdev->min_mtu = ETH_MIN_MTU;
3321 	netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
3322 
3323 	adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
3324 
3325 	/* before reading the NVM, reset the controller to put the device in a
3326 	 * known good starting state
3327 	 */
3328 	hw->mac.ops.reset_hw(hw);
3329 
3330 	/* make sure the NVM is good , i211/i210 parts can have special NVM
3331 	 * that doesn't contain a checksum
3332 	 */
3333 	switch (hw->mac.type) {
3334 	case e1000_i210:
3335 	case e1000_i211:
3336 		if (igb_get_flash_presence_i210(hw)) {
3337 			if (hw->nvm.ops.validate(hw) < 0) {
3338 				dev_err(&pdev->dev,
3339 					"The NVM Checksum Is Not Valid\n");
3340 				err = -EIO;
3341 				goto err_eeprom;
3342 			}
3343 		}
3344 		break;
3345 	default:
3346 		if (hw->nvm.ops.validate(hw) < 0) {
3347 			dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
3348 			err = -EIO;
3349 			goto err_eeprom;
3350 		}
3351 		break;
3352 	}
3353 
3354 	if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
3355 		/* copy the MAC address out of the NVM */
3356 		if (hw->mac.ops.read_mac_addr(hw))
3357 			dev_err(&pdev->dev, "NVM Read Error\n");
3358 	}
3359 
3360 	memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
3361 
3362 	if (!is_valid_ether_addr(netdev->dev_addr)) {
3363 		dev_err(&pdev->dev, "Invalid MAC Address\n");
3364 		err = -EIO;
3365 		goto err_eeprom;
3366 	}
3367 
3368 	igb_set_default_mac_filter(adapter);
3369 
3370 	/* get firmware version for ethtool -i */
3371 	igb_set_fw_version(adapter);
3372 
3373 	/* configure RXPBSIZE and TXPBSIZE */
3374 	if (hw->mac.type == e1000_i210) {
3375 		wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
3376 		wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
3377 	}
3378 
3379 	timer_setup(&adapter->watchdog_timer, igb_watchdog, 0);
3380 	timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0);
3381 
3382 	INIT_WORK(&adapter->reset_task, igb_reset_task);
3383 	INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
3384 
3385 	/* Initialize link properties that are user-changeable */
3386 	adapter->fc_autoneg = true;
3387 	hw->mac.autoneg = true;
3388 	hw->phy.autoneg_advertised = 0x2f;
3389 
3390 	hw->fc.requested_mode = e1000_fc_default;
3391 	hw->fc.current_mode = e1000_fc_default;
3392 
3393 	igb_validate_mdi_setting(hw);
3394 
3395 	/* By default, support wake on port A */
3396 	if (hw->bus.func == 0)
3397 		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3398 
3399 	/* Check the NVM for wake support on non-port A ports */
3400 	if (hw->mac.type >= e1000_82580)
3401 		hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
3402 				 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
3403 				 &eeprom_data);
3404 	else if (hw->bus.func == 1)
3405 		hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3406 
3407 	if (eeprom_data & IGB_EEPROM_APME)
3408 		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3409 
3410 	/* now that we have the eeprom settings, apply the special cases where
3411 	 * the eeprom may be wrong or the board simply won't support wake on
3412 	 * lan on a particular port
3413 	 */
3414 	switch (pdev->device) {
3415 	case E1000_DEV_ID_82575GB_QUAD_COPPER:
3416 		adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3417 		break;
3418 	case E1000_DEV_ID_82575EB_FIBER_SERDES:
3419 	case E1000_DEV_ID_82576_FIBER:
3420 	case E1000_DEV_ID_82576_SERDES:
3421 		/* Wake events only supported on port A for dual fiber
3422 		 * regardless of eeprom setting
3423 		 */
3424 		if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
3425 			adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3426 		break;
3427 	case E1000_DEV_ID_82576_QUAD_COPPER:
3428 	case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
3429 		/* if quad port adapter, disable WoL on all but port A */
3430 		if (global_quad_port_a != 0)
3431 			adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3432 		else
3433 			adapter->flags |= IGB_FLAG_QUAD_PORT_A;
3434 		/* Reset for multiple quad port adapters */
3435 		if (++global_quad_port_a == 4)
3436 			global_quad_port_a = 0;
3437 		break;
3438 	default:
3439 		/* If the device can't wake, don't set software support */
3440 		if (!device_can_wakeup(&adapter->pdev->dev))
3441 			adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3442 	}
3443 
3444 	/* initialize the wol settings based on the eeprom settings */
3445 	if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
3446 		adapter->wol |= E1000_WUFC_MAG;
3447 
3448 	/* Some vendors want WoL disabled by default, but still supported */
3449 	if ((hw->mac.type == e1000_i350) &&
3450 	    (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
3451 		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3452 		adapter->wol = 0;
3453 	}
3454 
3455 	/* Some vendors want the ability to Use the EEPROM setting as
3456 	 * enable/disable only, and not for capability
3457 	 */
3458 	if (((hw->mac.type == e1000_i350) ||
3459 	     (hw->mac.type == e1000_i354)) &&
3460 	    (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) {
3461 		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3462 		adapter->wol = 0;
3463 	}
3464 	if (hw->mac.type == e1000_i350) {
3465 		if (((pdev->subsystem_device == 0x5001) ||
3466 		     (pdev->subsystem_device == 0x5002)) &&
3467 				(hw->bus.func == 0)) {
3468 			adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3469 			adapter->wol = 0;
3470 		}
3471 		if (pdev->subsystem_device == 0x1F52)
3472 			adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3473 	}
3474 
3475 	device_set_wakeup_enable(&adapter->pdev->dev,
3476 				 adapter->flags & IGB_FLAG_WOL_SUPPORTED);
3477 
3478 	/* reset the hardware with the new settings */
3479 	igb_reset(adapter);
3480 
3481 	/* Init the I2C interface */
3482 	err = igb_init_i2c(adapter);
3483 	if (err) {
3484 		dev_err(&pdev->dev, "failed to init i2c interface\n");
3485 		goto err_eeprom;
3486 	}
3487 
3488 	/* let the f/w know that the h/w is now under the control of the
3489 	 * driver.
3490 	 */
3491 	igb_get_hw_control(adapter);
3492 
3493 	strcpy(netdev->name, "eth%d");
3494 	err = register_netdev(netdev);
3495 	if (err)
3496 		goto err_register;
3497 
3498 	/* carrier off reporting is important to ethtool even BEFORE open */
3499 	netif_carrier_off(netdev);
3500 
3501 #ifdef CONFIG_IGB_DCA
3502 	if (dca_add_requester(&pdev->dev) == 0) {
3503 		adapter->flags |= IGB_FLAG_DCA_ENABLED;
3504 		dev_info(&pdev->dev, "DCA enabled\n");
3505 		igb_setup_dca(adapter);
3506 	}
3507 
3508 #endif
3509 #ifdef CONFIG_IGB_HWMON
3510 	/* Initialize the thermal sensor on i350 devices. */
3511 	if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
3512 		u16 ets_word;
3513 
3514 		/* Read the NVM to determine if this i350 device supports an
3515 		 * external thermal sensor.
3516 		 */
3517 		hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
3518 		if (ets_word != 0x0000 && ets_word != 0xFFFF)
3519 			adapter->ets = true;
3520 		else
3521 			adapter->ets = false;
3522 		if (igb_sysfs_init(adapter))
3523 			dev_err(&pdev->dev,
3524 				"failed to allocate sysfs resources\n");
3525 	} else {
3526 		adapter->ets = false;
3527 	}
3528 #endif
3529 	/* Check if Media Autosense is enabled */
3530 	adapter->ei = *ei;
3531 	if (hw->dev_spec._82575.mas_capable)
3532 		igb_init_mas(adapter);
3533 
3534 	/* do hw tstamp init after resetting */
3535 	igb_ptp_init(adapter);
3536 
3537 	dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
3538 	/* print bus type/speed/width info, not applicable to i354 */
3539 	if (hw->mac.type != e1000_i354) {
3540 		dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
3541 			 netdev->name,
3542 			 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
3543 			  (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
3544 			   "unknown"),
3545 			 ((hw->bus.width == e1000_bus_width_pcie_x4) ?
3546 			  "Width x4" :
3547 			  (hw->bus.width == e1000_bus_width_pcie_x2) ?
3548 			  "Width x2" :
3549 			  (hw->bus.width == e1000_bus_width_pcie_x1) ?
3550 			  "Width x1" : "unknown"), netdev->dev_addr);
3551 	}
3552 
3553 	if ((hw->mac.type == e1000_82576 &&
3554 	     rd32(E1000_EECD) & E1000_EECD_PRES) ||
3555 	    (hw->mac.type >= e1000_i210 ||
3556 	     igb_get_flash_presence_i210(hw))) {
3557 		ret_val = igb_read_part_string(hw, part_str,
3558 					       E1000_PBANUM_LENGTH);
3559 	} else {
3560 		ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
3561 	}
3562 
3563 	if (ret_val)
3564 		strcpy(part_str, "Unknown");
3565 	dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
3566 	dev_info(&pdev->dev,
3567 		"Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
3568 		(adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
3569 		(adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
3570 		adapter->num_rx_queues, adapter->num_tx_queues);
3571 	if (hw->phy.media_type == e1000_media_type_copper) {
3572 		switch (hw->mac.type) {
3573 		case e1000_i350:
3574 		case e1000_i210:
3575 		case e1000_i211:
3576 			/* Enable EEE for internal copper PHY devices */
3577 			err = igb_set_eee_i350(hw, true, true);
3578 			if ((!err) &&
3579 			    (!hw->dev_spec._82575.eee_disable)) {
3580 				adapter->eee_advert =
3581 					MDIO_EEE_100TX | MDIO_EEE_1000T;
3582 				adapter->flags |= IGB_FLAG_EEE;
3583 			}
3584 			break;
3585 		case e1000_i354:
3586 			if ((rd32(E1000_CTRL_EXT) &
3587 			    E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3588 				err = igb_set_eee_i354(hw, true, true);
3589 				if ((!err) &&
3590 					(!hw->dev_spec._82575.eee_disable)) {
3591 					adapter->eee_advert =
3592 					   MDIO_EEE_100TX | MDIO_EEE_1000T;
3593 					adapter->flags |= IGB_FLAG_EEE;
3594 				}
3595 			}
3596 			break;
3597 		default:
3598 			break;
3599 		}
3600 	}
3601 
3602 	dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
3603 
3604 	pm_runtime_put_noidle(&pdev->dev);
3605 	return 0;
3606 
3607 err_register:
3608 	igb_release_hw_control(adapter);
3609 	memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
3610 err_eeprom:
3611 	if (!igb_check_reset_block(hw))
3612 		igb_reset_phy(hw);
3613 
3614 	if (hw->flash_address)
3615 		iounmap(hw->flash_address);
3616 err_sw_init:
3617 	kfree(adapter->mac_table);
3618 	kfree(adapter->shadow_vfta);
3619 	igb_clear_interrupt_scheme(adapter);
3620 #ifdef CONFIG_PCI_IOV
3621 	igb_disable_sriov(pdev);
3622 #endif
3623 	pci_iounmap(pdev, adapter->io_addr);
3624 err_ioremap:
3625 	free_netdev(netdev);
3626 err_alloc_etherdev:
3627 	pci_disable_pcie_error_reporting(pdev);
3628 	pci_release_mem_regions(pdev);
3629 err_pci_reg:
3630 err_dma:
3631 	pci_disable_device(pdev);
3632 	return err;
3633 }
3634 
3635 #ifdef CONFIG_PCI_IOV
igb_disable_sriov(struct pci_dev * pdev)3636 static int igb_disable_sriov(struct pci_dev *pdev)
3637 {
3638 	struct net_device *netdev = pci_get_drvdata(pdev);
3639 	struct igb_adapter *adapter = netdev_priv(netdev);
3640 	struct e1000_hw *hw = &adapter->hw;
3641 	unsigned long flags;
3642 
3643 	/* reclaim resources allocated to VFs */
3644 	if (adapter->vf_data) {
3645 		/* disable iov and allow time for transactions to clear */
3646 		if (pci_vfs_assigned(pdev)) {
3647 			dev_warn(&pdev->dev,
3648 				 "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
3649 			return -EPERM;
3650 		} else {
3651 			pci_disable_sriov(pdev);
3652 			msleep(500);
3653 		}
3654 		spin_lock_irqsave(&adapter->vfs_lock, flags);
3655 		kfree(adapter->vf_mac_list);
3656 		adapter->vf_mac_list = NULL;
3657 		kfree(adapter->vf_data);
3658 		adapter->vf_data = NULL;
3659 		adapter->vfs_allocated_count = 0;
3660 		spin_unlock_irqrestore(&adapter->vfs_lock, flags);
3661 		wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
3662 		wrfl();
3663 		msleep(100);
3664 		dev_info(&pdev->dev, "IOV Disabled\n");
3665 
3666 		/* Re-enable DMA Coalescing flag since IOV is turned off */
3667 		adapter->flags |= IGB_FLAG_DMAC;
3668 	}
3669 
3670 	return 0;
3671 }
3672 
igb_enable_sriov(struct pci_dev * pdev,int num_vfs)3673 static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
3674 {
3675 	struct net_device *netdev = pci_get_drvdata(pdev);
3676 	struct igb_adapter *adapter = netdev_priv(netdev);
3677 	int old_vfs = pci_num_vf(pdev);
3678 	struct vf_mac_filter *mac_list;
3679 	int err = 0;
3680 	int num_vf_mac_filters, i;
3681 
3682 	if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
3683 		err = -EPERM;
3684 		goto out;
3685 	}
3686 	if (!num_vfs)
3687 		goto out;
3688 
3689 	if (old_vfs) {
3690 		dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
3691 			 old_vfs, max_vfs);
3692 		adapter->vfs_allocated_count = old_vfs;
3693 	} else
3694 		adapter->vfs_allocated_count = num_vfs;
3695 
3696 	adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
3697 				sizeof(struct vf_data_storage), GFP_KERNEL);
3698 
3699 	/* if allocation failed then we do not support SR-IOV */
3700 	if (!adapter->vf_data) {
3701 		adapter->vfs_allocated_count = 0;
3702 		err = -ENOMEM;
3703 		goto out;
3704 	}
3705 
3706 	/* Due to the limited number of RAR entries calculate potential
3707 	 * number of MAC filters available for the VFs. Reserve entries
3708 	 * for PF default MAC, PF MAC filters and at least one RAR entry
3709 	 * for each VF for VF MAC.
3710 	 */
3711 	num_vf_mac_filters = adapter->hw.mac.rar_entry_count -
3712 			     (1 + IGB_PF_MAC_FILTERS_RESERVED +
3713 			      adapter->vfs_allocated_count);
3714 
3715 	adapter->vf_mac_list = kcalloc(num_vf_mac_filters,
3716 				       sizeof(struct vf_mac_filter),
3717 				       GFP_KERNEL);
3718 
3719 	mac_list = adapter->vf_mac_list;
3720 	INIT_LIST_HEAD(&adapter->vf_macs.l);
3721 
3722 	if (adapter->vf_mac_list) {
3723 		/* Initialize list of VF MAC filters */
3724 		for (i = 0; i < num_vf_mac_filters; i++) {
3725 			mac_list->vf = -1;
3726 			mac_list->free = true;
3727 			list_add(&mac_list->l, &adapter->vf_macs.l);
3728 			mac_list++;
3729 		}
3730 	} else {
3731 		/* If we could not allocate memory for the VF MAC filters
3732 		 * we can continue without this feature but warn user.
3733 		 */
3734 		dev_err(&pdev->dev,
3735 			"Unable to allocate memory for VF MAC filter list\n");
3736 	}
3737 
3738 	/* only call pci_enable_sriov() if no VFs are allocated already */
3739 	if (!old_vfs) {
3740 		err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
3741 		if (err)
3742 			goto err_out;
3743 	}
3744 	dev_info(&pdev->dev, "%d VFs allocated\n",
3745 		 adapter->vfs_allocated_count);
3746 	for (i = 0; i < adapter->vfs_allocated_count; i++)
3747 		igb_vf_configure(adapter, i);
3748 
3749 	/* DMA Coalescing is not supported in IOV mode. */
3750 	adapter->flags &= ~IGB_FLAG_DMAC;
3751 	goto out;
3752 
3753 err_out:
3754 	kfree(adapter->vf_mac_list);
3755 	adapter->vf_mac_list = NULL;
3756 	kfree(adapter->vf_data);
3757 	adapter->vf_data = NULL;
3758 	adapter->vfs_allocated_count = 0;
3759 out:
3760 	return err;
3761 }
3762 
3763 #endif
3764 /**
3765  *  igb_remove_i2c - Cleanup  I2C interface
3766  *  @adapter: pointer to adapter structure
3767  **/
igb_remove_i2c(struct igb_adapter * adapter)3768 static void igb_remove_i2c(struct igb_adapter *adapter)
3769 {
3770 	/* free the adapter bus structure */
3771 	i2c_del_adapter(&adapter->i2c_adap);
3772 }
3773 
3774 /**
3775  *  igb_remove - Device Removal Routine
3776  *  @pdev: PCI device information struct
3777  *
3778  *  igb_remove is called by the PCI subsystem to alert the driver
3779  *  that it should release a PCI device.  The could be caused by a
3780  *  Hot-Plug event, or because the driver is going to be removed from
3781  *  memory.
3782  **/
igb_remove(struct pci_dev * pdev)3783 static void igb_remove(struct pci_dev *pdev)
3784 {
3785 	struct net_device *netdev = pci_get_drvdata(pdev);
3786 	struct igb_adapter *adapter = netdev_priv(netdev);
3787 	struct e1000_hw *hw = &adapter->hw;
3788 
3789 	pm_runtime_get_noresume(&pdev->dev);
3790 #ifdef CONFIG_IGB_HWMON
3791 	igb_sysfs_exit(adapter);
3792 #endif
3793 	igb_remove_i2c(adapter);
3794 	igb_ptp_stop(adapter);
3795 	/* The watchdog timer may be rescheduled, so explicitly
3796 	 * disable watchdog from being rescheduled.
3797 	 */
3798 	set_bit(__IGB_DOWN, &adapter->state);
3799 	del_timer_sync(&adapter->watchdog_timer);
3800 	del_timer_sync(&adapter->phy_info_timer);
3801 
3802 	cancel_work_sync(&adapter->reset_task);
3803 	cancel_work_sync(&adapter->watchdog_task);
3804 
3805 #ifdef CONFIG_IGB_DCA
3806 	if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
3807 		dev_info(&pdev->dev, "DCA disabled\n");
3808 		dca_remove_requester(&pdev->dev);
3809 		adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
3810 		wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
3811 	}
3812 #endif
3813 
3814 	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
3815 	 * would have already happened in close and is redundant.
3816 	 */
3817 	igb_release_hw_control(adapter);
3818 
3819 #ifdef CONFIG_PCI_IOV
3820 	rtnl_lock();
3821 	igb_disable_sriov(pdev);
3822 	rtnl_unlock();
3823 #endif
3824 
3825 	unregister_netdev(netdev);
3826 
3827 	igb_clear_interrupt_scheme(adapter);
3828 
3829 	pci_iounmap(pdev, adapter->io_addr);
3830 	if (hw->flash_address)
3831 		iounmap(hw->flash_address);
3832 	pci_release_mem_regions(pdev);
3833 
3834 	kfree(adapter->mac_table);
3835 	kfree(adapter->shadow_vfta);
3836 	free_netdev(netdev);
3837 
3838 	pci_disable_pcie_error_reporting(pdev);
3839 
3840 	pci_disable_device(pdev);
3841 }
3842 
3843 /**
3844  *  igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
3845  *  @adapter: board private structure to initialize
3846  *
3847  *  This function initializes the vf specific data storage and then attempts to
3848  *  allocate the VFs.  The reason for ordering it this way is because it is much
3849  *  mor expensive time wise to disable SR-IOV than it is to allocate and free
3850  *  the memory for the VFs.
3851  **/
igb_probe_vfs(struct igb_adapter * adapter)3852 static void igb_probe_vfs(struct igb_adapter *adapter)
3853 {
3854 #ifdef CONFIG_PCI_IOV
3855 	struct pci_dev *pdev = adapter->pdev;
3856 	struct e1000_hw *hw = &adapter->hw;
3857 
3858 	/* Virtualization features not supported on i210 family. */
3859 	if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
3860 		return;
3861 
3862 	/* Of the below we really only want the effect of getting
3863 	 * IGB_FLAG_HAS_MSIX set (if available), without which
3864 	 * igb_enable_sriov() has no effect.
3865 	 */
3866 	igb_set_interrupt_capability(adapter, true);
3867 	igb_reset_interrupt_capability(adapter);
3868 
3869 	pci_sriov_set_totalvfs(pdev, 7);
3870 	igb_enable_sriov(pdev, max_vfs);
3871 
3872 #endif /* CONFIG_PCI_IOV */
3873 }
3874 
igb_get_max_rss_queues(struct igb_adapter * adapter)3875 unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter)
3876 {
3877 	struct e1000_hw *hw = &adapter->hw;
3878 	unsigned int max_rss_queues;
3879 
3880 	/* Determine the maximum number of RSS queues supported. */
3881 	switch (hw->mac.type) {
3882 	case e1000_i211:
3883 		max_rss_queues = IGB_MAX_RX_QUEUES_I211;
3884 		break;
3885 	case e1000_82575:
3886 	case e1000_i210:
3887 		max_rss_queues = IGB_MAX_RX_QUEUES_82575;
3888 		break;
3889 	case e1000_i350:
3890 		/* I350 cannot do RSS and SR-IOV at the same time */
3891 		if (!!adapter->vfs_allocated_count) {
3892 			max_rss_queues = 1;
3893 			break;
3894 		}
3895 		fallthrough;
3896 	case e1000_82576:
3897 		if (!!adapter->vfs_allocated_count) {
3898 			max_rss_queues = 2;
3899 			break;
3900 		}
3901 		fallthrough;
3902 	case e1000_82580:
3903 	case e1000_i354:
3904 	default:
3905 		max_rss_queues = IGB_MAX_RX_QUEUES;
3906 		break;
3907 	}
3908 
3909 	return max_rss_queues;
3910 }
3911 
igb_init_queue_configuration(struct igb_adapter * adapter)3912 static void igb_init_queue_configuration(struct igb_adapter *adapter)
3913 {
3914 	u32 max_rss_queues;
3915 
3916 	max_rss_queues = igb_get_max_rss_queues(adapter);
3917 	adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
3918 
3919 	igb_set_flag_queue_pairs(adapter, max_rss_queues);
3920 }
3921 
igb_set_flag_queue_pairs(struct igb_adapter * adapter,const u32 max_rss_queues)3922 void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
3923 			      const u32 max_rss_queues)
3924 {
3925 	struct e1000_hw *hw = &adapter->hw;
3926 
3927 	/* Determine if we need to pair queues. */
3928 	switch (hw->mac.type) {
3929 	case e1000_82575:
3930 	case e1000_i211:
3931 		/* Device supports enough interrupts without queue pairing. */
3932 		break;
3933 	case e1000_82576:
3934 	case e1000_82580:
3935 	case e1000_i350:
3936 	case e1000_i354:
3937 	case e1000_i210:
3938 	default:
3939 		/* If rss_queues > half of max_rss_queues, pair the queues in
3940 		 * order to conserve interrupts due to limited supply.
3941 		 */
3942 		if (adapter->rss_queues > (max_rss_queues / 2))
3943 			adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
3944 		else
3945 			adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS;
3946 		break;
3947 	}
3948 }
3949 
3950 /**
3951  *  igb_sw_init - Initialize general software structures (struct igb_adapter)
3952  *  @adapter: board private structure to initialize
3953  *
3954  *  igb_sw_init initializes the Adapter private data structure.
3955  *  Fields are initialized based on PCI device information and
3956  *  OS network device settings (MTU size).
3957  **/
igb_sw_init(struct igb_adapter * adapter)3958 static int igb_sw_init(struct igb_adapter *adapter)
3959 {
3960 	struct e1000_hw *hw = &adapter->hw;
3961 	struct net_device *netdev = adapter->netdev;
3962 	struct pci_dev *pdev = adapter->pdev;
3963 
3964 	pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
3965 
3966 	/* set default ring sizes */
3967 	adapter->tx_ring_count = IGB_DEFAULT_TXD;
3968 	adapter->rx_ring_count = IGB_DEFAULT_RXD;
3969 
3970 	/* set default ITR values */
3971 	adapter->rx_itr_setting = IGB_DEFAULT_ITR;
3972 	adapter->tx_itr_setting = IGB_DEFAULT_ITR;
3973 
3974 	/* set default work limits */
3975 	adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
3976 
3977 	adapter->max_frame_size = netdev->mtu + IGB_ETH_PKT_HDR_PAD;
3978 	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3979 
3980 	spin_lock_init(&adapter->nfc_lock);
3981 	spin_lock_init(&adapter->stats64_lock);
3982 
3983 	/* init spinlock to avoid concurrency of VF resources */
3984 	spin_lock_init(&adapter->vfs_lock);
3985 #ifdef CONFIG_PCI_IOV
3986 	switch (hw->mac.type) {
3987 	case e1000_82576:
3988 	case e1000_i350:
3989 		if (max_vfs > 7) {
3990 			dev_warn(&pdev->dev,
3991 				 "Maximum of 7 VFs per PF, using max\n");
3992 			max_vfs = adapter->vfs_allocated_count = 7;
3993 		} else
3994 			adapter->vfs_allocated_count = max_vfs;
3995 		if (adapter->vfs_allocated_count)
3996 			dev_warn(&pdev->dev,
3997 				 "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
3998 		break;
3999 	default:
4000 		break;
4001 	}
4002 #endif /* CONFIG_PCI_IOV */
4003 
4004 	/* Assume MSI-X interrupts, will be checked during IRQ allocation */
4005 	adapter->flags |= IGB_FLAG_HAS_MSIX;
4006 
4007 	adapter->mac_table = kcalloc(hw->mac.rar_entry_count,
4008 				     sizeof(struct igb_mac_addr),
4009 				     GFP_KERNEL);
4010 	if (!adapter->mac_table)
4011 		return -ENOMEM;
4012 
4013 	igb_probe_vfs(adapter);
4014 
4015 	igb_init_queue_configuration(adapter);
4016 
4017 	/* Setup and initialize a copy of the hw vlan table array */
4018 	adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
4019 				       GFP_KERNEL);
4020 	if (!adapter->shadow_vfta)
4021 		return -ENOMEM;
4022 
4023 	/* This call may decrease the number of queues */
4024 	if (igb_init_interrupt_scheme(adapter, true)) {
4025 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
4026 		return -ENOMEM;
4027 	}
4028 
4029 	/* Explicitly disable IRQ since the NIC can be in any state. */
4030 	igb_irq_disable(adapter);
4031 
4032 	if (hw->mac.type >= e1000_i350)
4033 		adapter->flags &= ~IGB_FLAG_DMAC;
4034 
4035 	set_bit(__IGB_DOWN, &adapter->state);
4036 	return 0;
4037 }
4038 
4039 /**
4040  *  igb_open - Called when a network interface is made active
4041  *  @netdev: network interface device structure
4042  *  @resuming: indicates whether we are in a resume call
4043  *
4044  *  Returns 0 on success, negative value on failure
4045  *
4046  *  The open entry point is called when a network interface is made
4047  *  active by the system (IFF_UP).  At this point all resources needed
4048  *  for transmit and receive operations are allocated, the interrupt
4049  *  handler is registered with the OS, the watchdog timer is started,
4050  *  and the stack is notified that the interface is ready.
4051  **/
__igb_open(struct net_device * netdev,bool resuming)4052 static int __igb_open(struct net_device *netdev, bool resuming)
4053 {
4054 	struct igb_adapter *adapter = netdev_priv(netdev);
4055 	struct e1000_hw *hw = &adapter->hw;
4056 	struct pci_dev *pdev = adapter->pdev;
4057 	int err;
4058 	int i;
4059 
4060 	/* disallow open during test */
4061 	if (test_bit(__IGB_TESTING, &adapter->state)) {
4062 		WARN_ON(resuming);
4063 		return -EBUSY;
4064 	}
4065 
4066 	if (!resuming)
4067 		pm_runtime_get_sync(&pdev->dev);
4068 
4069 	netif_carrier_off(netdev);
4070 
4071 	/* allocate transmit descriptors */
4072 	err = igb_setup_all_tx_resources(adapter);
4073 	if (err)
4074 		goto err_setup_tx;
4075 
4076 	/* allocate receive descriptors */
4077 	err = igb_setup_all_rx_resources(adapter);
4078 	if (err)
4079 		goto err_setup_rx;
4080 
4081 	igb_power_up_link(adapter);
4082 
4083 	/* before we allocate an interrupt, we must be ready to handle it.
4084 	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
4085 	 * as soon as we call pci_request_irq, so we have to setup our
4086 	 * clean_rx handler before we do so.
4087 	 */
4088 	igb_configure(adapter);
4089 
4090 	err = igb_request_irq(adapter);
4091 	if (err)
4092 		goto err_req_irq;
4093 
4094 	/* Notify the stack of the actual queue counts. */
4095 	err = netif_set_real_num_tx_queues(adapter->netdev,
4096 					   adapter->num_tx_queues);
4097 	if (err)
4098 		goto err_set_queues;
4099 
4100 	err = netif_set_real_num_rx_queues(adapter->netdev,
4101 					   adapter->num_rx_queues);
4102 	if (err)
4103 		goto err_set_queues;
4104 
4105 	/* From here on the code is the same as igb_up() */
4106 	clear_bit(__IGB_DOWN, &adapter->state);
4107 
4108 	for (i = 0; i < adapter->num_q_vectors; i++)
4109 		napi_enable(&(adapter->q_vector[i]->napi));
4110 
4111 	/* Clear any pending interrupts. */
4112 	rd32(E1000_TSICR);
4113 	rd32(E1000_ICR);
4114 
4115 	igb_irq_enable(adapter);
4116 
4117 	/* notify VFs that reset has been completed */
4118 	if (adapter->vfs_allocated_count) {
4119 		u32 reg_data = rd32(E1000_CTRL_EXT);
4120 
4121 		reg_data |= E1000_CTRL_EXT_PFRSTD;
4122 		wr32(E1000_CTRL_EXT, reg_data);
4123 	}
4124 
4125 	netif_tx_start_all_queues(netdev);
4126 
4127 	if (!resuming)
4128 		pm_runtime_put(&pdev->dev);
4129 
4130 	/* start the watchdog. */
4131 	hw->mac.get_link_status = 1;
4132 	schedule_work(&adapter->watchdog_task);
4133 
4134 	return 0;
4135 
4136 err_set_queues:
4137 	igb_free_irq(adapter);
4138 err_req_irq:
4139 	igb_release_hw_control(adapter);
4140 	igb_power_down_link(adapter);
4141 	igb_free_all_rx_resources(adapter);
4142 err_setup_rx:
4143 	igb_free_all_tx_resources(adapter);
4144 err_setup_tx:
4145 	igb_reset(adapter);
4146 	if (!resuming)
4147 		pm_runtime_put(&pdev->dev);
4148 
4149 	return err;
4150 }
4151 
igb_open(struct net_device * netdev)4152 int igb_open(struct net_device *netdev)
4153 {
4154 	return __igb_open(netdev, false);
4155 }
4156 
4157 /**
4158  *  igb_close - Disables a network interface
4159  *  @netdev: network interface device structure
4160  *  @suspending: indicates we are in a suspend call
4161  *
4162  *  Returns 0, this is not allowed to fail
4163  *
4164  *  The close entry point is called when an interface is de-activated
4165  *  by the OS.  The hardware is still under the driver's control, but
4166  *  needs to be disabled.  A global MAC reset is issued to stop the
4167  *  hardware, and all transmit and receive resources are freed.
4168  **/
__igb_close(struct net_device * netdev,bool suspending)4169 static int __igb_close(struct net_device *netdev, bool suspending)
4170 {
4171 	struct igb_adapter *adapter = netdev_priv(netdev);
4172 	struct pci_dev *pdev = adapter->pdev;
4173 
4174 	WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
4175 
4176 	if (!suspending)
4177 		pm_runtime_get_sync(&pdev->dev);
4178 
4179 	igb_down(adapter);
4180 	igb_free_irq(adapter);
4181 
4182 	igb_free_all_tx_resources(adapter);
4183 	igb_free_all_rx_resources(adapter);
4184 
4185 	if (!suspending)
4186 		pm_runtime_put_sync(&pdev->dev);
4187 	return 0;
4188 }
4189 
igb_close(struct net_device * netdev)4190 int igb_close(struct net_device *netdev)
4191 {
4192 	if (netif_device_present(netdev) || netdev->dismantle)
4193 		return __igb_close(netdev, false);
4194 	return 0;
4195 }
4196 
4197 /**
4198  *  igb_setup_tx_resources - allocate Tx resources (Descriptors)
4199  *  @tx_ring: tx descriptor ring (for a specific queue) to setup
4200  *
4201  *  Return 0 on success, negative on failure
4202  **/
igb_setup_tx_resources(struct igb_ring * tx_ring)4203 int igb_setup_tx_resources(struct igb_ring *tx_ring)
4204 {
4205 	struct device *dev = tx_ring->dev;
4206 	int size;
4207 
4208 	size = sizeof(struct igb_tx_buffer) * tx_ring->count;
4209 
4210 	tx_ring->tx_buffer_info = vmalloc(size);
4211 	if (!tx_ring->tx_buffer_info)
4212 		goto err;
4213 
4214 	/* round up to nearest 4K */
4215 	tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
4216 	tx_ring->size = ALIGN(tx_ring->size, 4096);
4217 
4218 	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
4219 					   &tx_ring->dma, GFP_KERNEL);
4220 	if (!tx_ring->desc)
4221 		goto err;
4222 
4223 	tx_ring->next_to_use = 0;
4224 	tx_ring->next_to_clean = 0;
4225 
4226 	return 0;
4227 
4228 err:
4229 	vfree(tx_ring->tx_buffer_info);
4230 	tx_ring->tx_buffer_info = NULL;
4231 	dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
4232 	return -ENOMEM;
4233 }
4234 
4235 /**
4236  *  igb_setup_all_tx_resources - wrapper to allocate Tx resources
4237  *				 (Descriptors) for all queues
4238  *  @adapter: board private structure
4239  *
4240  *  Return 0 on success, negative on failure
4241  **/
igb_setup_all_tx_resources(struct igb_adapter * adapter)4242 static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
4243 {
4244 	struct pci_dev *pdev = adapter->pdev;
4245 	int i, err = 0;
4246 
4247 	for (i = 0; i < adapter->num_tx_queues; i++) {
4248 		err = igb_setup_tx_resources(adapter->tx_ring[i]);
4249 		if (err) {
4250 			dev_err(&pdev->dev,
4251 				"Allocation for Tx Queue %u failed\n", i);
4252 			for (i--; i >= 0; i--)
4253 				igb_free_tx_resources(adapter->tx_ring[i]);
4254 			break;
4255 		}
4256 	}
4257 
4258 	return err;
4259 }
4260 
4261 /**
4262  *  igb_setup_tctl - configure the transmit control registers
4263  *  @adapter: Board private structure
4264  **/
igb_setup_tctl(struct igb_adapter * adapter)4265 void igb_setup_tctl(struct igb_adapter *adapter)
4266 {
4267 	struct e1000_hw *hw = &adapter->hw;
4268 	u32 tctl;
4269 
4270 	/* disable queue 0 which is enabled by default on 82575 and 82576 */
4271 	wr32(E1000_TXDCTL(0), 0);
4272 
4273 	/* Program the Transmit Control Register */
4274 	tctl = rd32(E1000_TCTL);
4275 	tctl &= ~E1000_TCTL_CT;
4276 	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
4277 		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
4278 
4279 	igb_config_collision_dist(hw);
4280 
4281 	/* Enable transmits */
4282 	tctl |= E1000_TCTL_EN;
4283 
4284 	wr32(E1000_TCTL, tctl);
4285 }
4286 
4287 /**
4288  *  igb_configure_tx_ring - Configure transmit ring after Reset
4289  *  @adapter: board private structure
4290  *  @ring: tx ring to configure
4291  *
4292  *  Configure a transmit ring after a reset.
4293  **/
igb_configure_tx_ring(struct igb_adapter * adapter,struct igb_ring * ring)4294 void igb_configure_tx_ring(struct igb_adapter *adapter,
4295 			   struct igb_ring *ring)
4296 {
4297 	struct e1000_hw *hw = &adapter->hw;
4298 	u32 txdctl = 0;
4299 	u64 tdba = ring->dma;
4300 	int reg_idx = ring->reg_idx;
4301 
4302 	wr32(E1000_TDLEN(reg_idx),
4303 	     ring->count * sizeof(union e1000_adv_tx_desc));
4304 	wr32(E1000_TDBAL(reg_idx),
4305 	     tdba & 0x00000000ffffffffULL);
4306 	wr32(E1000_TDBAH(reg_idx), tdba >> 32);
4307 
4308 	ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
4309 	wr32(E1000_TDH(reg_idx), 0);
4310 	writel(0, ring->tail);
4311 
4312 	txdctl |= IGB_TX_PTHRESH;
4313 	txdctl |= IGB_TX_HTHRESH << 8;
4314 	txdctl |= IGB_TX_WTHRESH << 16;
4315 
4316 	/* reinitialize tx_buffer_info */
4317 	memset(ring->tx_buffer_info, 0,
4318 	       sizeof(struct igb_tx_buffer) * ring->count);
4319 
4320 	txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
4321 	wr32(E1000_TXDCTL(reg_idx), txdctl);
4322 }
4323 
4324 /**
4325  *  igb_configure_tx - Configure transmit Unit after Reset
4326  *  @adapter: board private structure
4327  *
4328  *  Configure the Tx unit of the MAC after a reset.
4329  **/
igb_configure_tx(struct igb_adapter * adapter)4330 static void igb_configure_tx(struct igb_adapter *adapter)
4331 {
4332 	struct e1000_hw *hw = &adapter->hw;
4333 	int i;
4334 
4335 	/* disable the queues */
4336 	for (i = 0; i < adapter->num_tx_queues; i++)
4337 		wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0);
4338 
4339 	wrfl();
4340 	usleep_range(10000, 20000);
4341 
4342 	for (i = 0; i < adapter->num_tx_queues; i++)
4343 		igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
4344 }
4345 
4346 /**
4347  *  igb_setup_rx_resources - allocate Rx resources (Descriptors)
4348  *  @rx_ring: Rx descriptor ring (for a specific queue) to setup
4349  *
4350  *  Returns 0 on success, negative on failure
4351  **/
igb_setup_rx_resources(struct igb_ring * rx_ring)4352 int igb_setup_rx_resources(struct igb_ring *rx_ring)
4353 {
4354 	struct igb_adapter *adapter = netdev_priv(rx_ring->netdev);
4355 	struct device *dev = rx_ring->dev;
4356 	int size;
4357 
4358 	size = sizeof(struct igb_rx_buffer) * rx_ring->count;
4359 
4360 	rx_ring->rx_buffer_info = vmalloc(size);
4361 	if (!rx_ring->rx_buffer_info)
4362 		goto err;
4363 
4364 	/* Round up to nearest 4K */
4365 	rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
4366 	rx_ring->size = ALIGN(rx_ring->size, 4096);
4367 
4368 	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
4369 					   &rx_ring->dma, GFP_KERNEL);
4370 	if (!rx_ring->desc)
4371 		goto err;
4372 
4373 	rx_ring->next_to_alloc = 0;
4374 	rx_ring->next_to_clean = 0;
4375 	rx_ring->next_to_use = 0;
4376 
4377 	rx_ring->xdp_prog = adapter->xdp_prog;
4378 
4379 	/* XDP RX-queue info */
4380 	if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
4381 			     rx_ring->queue_index) < 0)
4382 		goto err;
4383 
4384 	return 0;
4385 
4386 err:
4387 	vfree(rx_ring->rx_buffer_info);
4388 	rx_ring->rx_buffer_info = NULL;
4389 	dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
4390 	return -ENOMEM;
4391 }
4392 
4393 /**
4394  *  igb_setup_all_rx_resources - wrapper to allocate Rx resources
4395  *				 (Descriptors) for all queues
4396  *  @adapter: board private structure
4397  *
4398  *  Return 0 on success, negative on failure
4399  **/
igb_setup_all_rx_resources(struct igb_adapter * adapter)4400 static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
4401 {
4402 	struct pci_dev *pdev = adapter->pdev;
4403 	int i, err = 0;
4404 
4405 	for (i = 0; i < adapter->num_rx_queues; i++) {
4406 		err = igb_setup_rx_resources(adapter->rx_ring[i]);
4407 		if (err) {
4408 			dev_err(&pdev->dev,
4409 				"Allocation for Rx Queue %u failed\n", i);
4410 			for (i--; i >= 0; i--)
4411 				igb_free_rx_resources(adapter->rx_ring[i]);
4412 			break;
4413 		}
4414 	}
4415 
4416 	return err;
4417 }
4418 
4419 /**
4420  *  igb_setup_mrqc - configure the multiple receive queue control registers
4421  *  @adapter: Board private structure
4422  **/
igb_setup_mrqc(struct igb_adapter * adapter)4423 static void igb_setup_mrqc(struct igb_adapter *adapter)
4424 {
4425 	struct e1000_hw *hw = &adapter->hw;
4426 	u32 mrqc, rxcsum;
4427 	u32 j, num_rx_queues;
4428 	u32 rss_key[10];
4429 
4430 	netdev_rss_key_fill(rss_key, sizeof(rss_key));
4431 	for (j = 0; j < 10; j++)
4432 		wr32(E1000_RSSRK(j), rss_key[j]);
4433 
4434 	num_rx_queues = adapter->rss_queues;
4435 
4436 	switch (hw->mac.type) {
4437 	case e1000_82576:
4438 		/* 82576 supports 2 RSS queues for SR-IOV */
4439 		if (adapter->vfs_allocated_count)
4440 			num_rx_queues = 2;
4441 		break;
4442 	default:
4443 		break;
4444 	}
4445 
4446 	if (adapter->rss_indir_tbl_init != num_rx_queues) {
4447 		for (j = 0; j < IGB_RETA_SIZE; j++)
4448 			adapter->rss_indir_tbl[j] =
4449 			(j * num_rx_queues) / IGB_RETA_SIZE;
4450 		adapter->rss_indir_tbl_init = num_rx_queues;
4451 	}
4452 	igb_write_rss_indir_tbl(adapter);
4453 
4454 	/* Disable raw packet checksumming so that RSS hash is placed in
4455 	 * descriptor on writeback.  No need to enable TCP/UDP/IP checksum
4456 	 * offloads as they are enabled by default
4457 	 */
4458 	rxcsum = rd32(E1000_RXCSUM);
4459 	rxcsum |= E1000_RXCSUM_PCSD;
4460 
4461 	if (adapter->hw.mac.type >= e1000_82576)
4462 		/* Enable Receive Checksum Offload for SCTP */
4463 		rxcsum |= E1000_RXCSUM_CRCOFL;
4464 
4465 	/* Don't need to set TUOFL or IPOFL, they default to 1 */
4466 	wr32(E1000_RXCSUM, rxcsum);
4467 
4468 	/* Generate RSS hash based on packet types, TCP/UDP
4469 	 * port numbers and/or IPv4/v6 src and dst addresses
4470 	 */
4471 	mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
4472 	       E1000_MRQC_RSS_FIELD_IPV4_TCP |
4473 	       E1000_MRQC_RSS_FIELD_IPV6 |
4474 	       E1000_MRQC_RSS_FIELD_IPV6_TCP |
4475 	       E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
4476 
4477 	if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
4478 		mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
4479 	if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
4480 		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
4481 
4482 	/* If VMDq is enabled then we set the appropriate mode for that, else
4483 	 * we default to RSS so that an RSS hash is calculated per packet even
4484 	 * if we are only using one queue
4485 	 */
4486 	if (adapter->vfs_allocated_count) {
4487 		if (hw->mac.type > e1000_82575) {
4488 			/* Set the default pool for the PF's first queue */
4489 			u32 vtctl = rd32(E1000_VT_CTL);
4490 
4491 			vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
4492 				   E1000_VT_CTL_DISABLE_DEF_POOL);
4493 			vtctl |= adapter->vfs_allocated_count <<
4494 				E1000_VT_CTL_DEFAULT_POOL_SHIFT;
4495 			wr32(E1000_VT_CTL, vtctl);
4496 		}
4497 		if (adapter->rss_queues > 1)
4498 			mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ;
4499 		else
4500 			mrqc |= E1000_MRQC_ENABLE_VMDQ;
4501 	} else {
4502 		mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
4503 	}
4504 	igb_vmm_control(adapter);
4505 
4506 	wr32(E1000_MRQC, mrqc);
4507 }
4508 
4509 /**
4510  *  igb_setup_rctl - configure the receive control registers
4511  *  @adapter: Board private structure
4512  **/
igb_setup_rctl(struct igb_adapter * adapter)4513 void igb_setup_rctl(struct igb_adapter *adapter)
4514 {
4515 	struct e1000_hw *hw = &adapter->hw;
4516 	u32 rctl;
4517 
4518 	rctl = rd32(E1000_RCTL);
4519 
4520 	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4521 	rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
4522 
4523 	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
4524 		(hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4525 
4526 	/* enable stripping of CRC. It's unlikely this will break BMC
4527 	 * redirection as it did with e1000. Newer features require
4528 	 * that the HW strips the CRC.
4529 	 */
4530 	rctl |= E1000_RCTL_SECRC;
4531 
4532 	/* disable store bad packets and clear size bits. */
4533 	rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
4534 
4535 	/* enable LPE to allow for reception of jumbo frames */
4536 	rctl |= E1000_RCTL_LPE;
4537 
4538 	/* disable queue 0 to prevent tail write w/o re-config */
4539 	wr32(E1000_RXDCTL(0), 0);
4540 
4541 	/* Attention!!!  For SR-IOV PF driver operations you must enable
4542 	 * queue drop for all VF and PF queues to prevent head of line blocking
4543 	 * if an un-trusted VF does not provide descriptors to hardware.
4544 	 */
4545 	if (adapter->vfs_allocated_count) {
4546 		/* set all queue drop enable bits */
4547 		wr32(E1000_QDE, ALL_QUEUES);
4548 	}
4549 
4550 	/* This is useful for sniffing bad packets. */
4551 	if (adapter->netdev->features & NETIF_F_RXALL) {
4552 		/* UPE and MPE will be handled by normal PROMISC logic
4553 		 * in e1000e_set_rx_mode
4554 		 */
4555 		rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
4556 			 E1000_RCTL_BAM | /* RX All Bcast Pkts */
4557 			 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
4558 
4559 		rctl &= ~(E1000_RCTL_DPF | /* Allow filtered pause */
4560 			  E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
4561 		/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
4562 		 * and that breaks VLANs.
4563 		 */
4564 	}
4565 
4566 	wr32(E1000_RCTL, rctl);
4567 }
4568 
igb_set_vf_rlpml(struct igb_adapter * adapter,int size,int vfn)4569 static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
4570 				   int vfn)
4571 {
4572 	struct e1000_hw *hw = &adapter->hw;
4573 	u32 vmolr;
4574 
4575 	if (size > MAX_JUMBO_FRAME_SIZE)
4576 		size = MAX_JUMBO_FRAME_SIZE;
4577 
4578 	vmolr = rd32(E1000_VMOLR(vfn));
4579 	vmolr &= ~E1000_VMOLR_RLPML_MASK;
4580 	vmolr |= size | E1000_VMOLR_LPE;
4581 	wr32(E1000_VMOLR(vfn), vmolr);
4582 
4583 	return 0;
4584 }
4585 
igb_set_vf_vlan_strip(struct igb_adapter * adapter,int vfn,bool enable)4586 static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter,
4587 					 int vfn, bool enable)
4588 {
4589 	struct e1000_hw *hw = &adapter->hw;
4590 	u32 val, reg;
4591 
4592 	if (hw->mac.type < e1000_82576)
4593 		return;
4594 
4595 	if (hw->mac.type == e1000_i350)
4596 		reg = E1000_DVMOLR(vfn);
4597 	else
4598 		reg = E1000_VMOLR(vfn);
4599 
4600 	val = rd32(reg);
4601 	if (enable)
4602 		val |= E1000_VMOLR_STRVLAN;
4603 	else
4604 		val &= ~(E1000_VMOLR_STRVLAN);
4605 	wr32(reg, val);
4606 }
4607 
igb_set_vmolr(struct igb_adapter * adapter,int vfn,bool aupe)4608 static inline void igb_set_vmolr(struct igb_adapter *adapter,
4609 				 int vfn, bool aupe)
4610 {
4611 	struct e1000_hw *hw = &adapter->hw;
4612 	u32 vmolr;
4613 
4614 	/* This register exists only on 82576 and newer so if we are older then
4615 	 * we should exit and do nothing
4616 	 */
4617 	if (hw->mac.type < e1000_82576)
4618 		return;
4619 
4620 	vmolr = rd32(E1000_VMOLR(vfn));
4621 	if (aupe)
4622 		vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
4623 	else
4624 		vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
4625 
4626 	/* clear all bits that might not be set */
4627 	vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
4628 
4629 	if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
4630 		vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
4631 	/* for VMDq only allow the VFs and pool 0 to accept broadcast and
4632 	 * multicast packets
4633 	 */
4634 	if (vfn <= adapter->vfs_allocated_count)
4635 		vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
4636 
4637 	wr32(E1000_VMOLR(vfn), vmolr);
4638 }
4639 
4640 /**
4641  *  igb_setup_srrctl - configure the split and replication receive control
4642  *                     registers
4643  *  @adapter: Board private structure
4644  *  @ring: receive ring to be configured
4645  **/
igb_setup_srrctl(struct igb_adapter * adapter,struct igb_ring * ring)4646 void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring)
4647 {
4648 	struct e1000_hw *hw = &adapter->hw;
4649 	int reg_idx = ring->reg_idx;
4650 	u32 srrctl = 0;
4651 
4652 	srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
4653 	if (ring_uses_large_buffer(ring))
4654 		srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4655 	else
4656 		srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4657 	srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
4658 	if (hw->mac.type >= e1000_82580)
4659 		srrctl |= E1000_SRRCTL_TIMESTAMP;
4660 	/* Only set Drop Enable if VFs allocated, or we are supporting multiple
4661 	 * queues and rx flow control is disabled
4662 	 */
4663 	if (adapter->vfs_allocated_count ||
4664 	    (!(hw->fc.current_mode & e1000_fc_rx_pause) &&
4665 	     adapter->num_rx_queues > 1))
4666 		srrctl |= E1000_SRRCTL_DROP_EN;
4667 
4668 	wr32(E1000_SRRCTL(reg_idx), srrctl);
4669 }
4670 
4671 /**
4672  *  igb_configure_rx_ring - Configure a receive ring after Reset
4673  *  @adapter: board private structure
4674  *  @ring: receive ring to be configured
4675  *
4676  *  Configure the Rx unit of the MAC after a reset.
4677  **/
igb_configure_rx_ring(struct igb_adapter * adapter,struct igb_ring * ring)4678 void igb_configure_rx_ring(struct igb_adapter *adapter,
4679 			   struct igb_ring *ring)
4680 {
4681 	struct e1000_hw *hw = &adapter->hw;
4682 	union e1000_adv_rx_desc *rx_desc;
4683 	u64 rdba = ring->dma;
4684 	int reg_idx = ring->reg_idx;
4685 	u32 rxdctl = 0;
4686 
4687 	xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
4688 	WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
4689 					   MEM_TYPE_PAGE_SHARED, NULL));
4690 
4691 	/* disable the queue */
4692 	wr32(E1000_RXDCTL(reg_idx), 0);
4693 
4694 	/* Set DMA base address registers */
4695 	wr32(E1000_RDBAL(reg_idx),
4696 	     rdba & 0x00000000ffffffffULL);
4697 	wr32(E1000_RDBAH(reg_idx), rdba >> 32);
4698 	wr32(E1000_RDLEN(reg_idx),
4699 	     ring->count * sizeof(union e1000_adv_rx_desc));
4700 
4701 	/* initialize head and tail */
4702 	ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
4703 	wr32(E1000_RDH(reg_idx), 0);
4704 	writel(0, ring->tail);
4705 
4706 	/* set descriptor configuration */
4707 	igb_setup_srrctl(adapter, ring);
4708 
4709 	/* set filtering for VMDQ pools */
4710 	igb_set_vmolr(adapter, reg_idx & 0x7, true);
4711 
4712 	rxdctl |= IGB_RX_PTHRESH;
4713 	rxdctl |= IGB_RX_HTHRESH << 8;
4714 	rxdctl |= IGB_RX_WTHRESH << 16;
4715 
4716 	/* initialize rx_buffer_info */
4717 	memset(ring->rx_buffer_info, 0,
4718 	       sizeof(struct igb_rx_buffer) * ring->count);
4719 
4720 	/* initialize Rx descriptor 0 */
4721 	rx_desc = IGB_RX_DESC(ring, 0);
4722 	rx_desc->wb.upper.length = 0;
4723 
4724 	/* enable receive descriptor fetching */
4725 	rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
4726 	wr32(E1000_RXDCTL(reg_idx), rxdctl);
4727 }
4728 
igb_set_rx_buffer_len(struct igb_adapter * adapter,struct igb_ring * rx_ring)4729 static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
4730 				  struct igb_ring *rx_ring)
4731 {
4732 	/* set build_skb and buffer size flags */
4733 	clear_ring_build_skb_enabled(rx_ring);
4734 	clear_ring_uses_large_buffer(rx_ring);
4735 
4736 	if (adapter->flags & IGB_FLAG_RX_LEGACY)
4737 		return;
4738 
4739 	set_ring_build_skb_enabled(rx_ring);
4740 
4741 #if (PAGE_SIZE < 8192)
4742 	if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
4743 		return;
4744 
4745 	set_ring_uses_large_buffer(rx_ring);
4746 #endif
4747 }
4748 
4749 /**
4750  *  igb_configure_rx - Configure receive Unit after Reset
4751  *  @adapter: board private structure
4752  *
4753  *  Configure the Rx unit of the MAC after a reset.
4754  **/
igb_configure_rx(struct igb_adapter * adapter)4755 static void igb_configure_rx(struct igb_adapter *adapter)
4756 {
4757 	int i;
4758 
4759 	/* set the correct pool for the PF default MAC address in entry 0 */
4760 	igb_set_default_mac_filter(adapter);
4761 
4762 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
4763 	 * the Base and Length of the Rx Descriptor Ring
4764 	 */
4765 	for (i = 0; i < adapter->num_rx_queues; i++) {
4766 		struct igb_ring *rx_ring = adapter->rx_ring[i];
4767 
4768 		igb_set_rx_buffer_len(adapter, rx_ring);
4769 		igb_configure_rx_ring(adapter, rx_ring);
4770 	}
4771 }
4772 
4773 /**
4774  *  igb_free_tx_resources - Free Tx Resources per Queue
4775  *  @tx_ring: Tx descriptor ring for a specific queue
4776  *
4777  *  Free all transmit software resources
4778  **/
igb_free_tx_resources(struct igb_ring * tx_ring)4779 void igb_free_tx_resources(struct igb_ring *tx_ring)
4780 {
4781 	igb_clean_tx_ring(tx_ring);
4782 
4783 	vfree(tx_ring->tx_buffer_info);
4784 	tx_ring->tx_buffer_info = NULL;
4785 
4786 	/* if not set, then don't free */
4787 	if (!tx_ring->desc)
4788 		return;
4789 
4790 	dma_free_coherent(tx_ring->dev, tx_ring->size,
4791 			  tx_ring->desc, tx_ring->dma);
4792 
4793 	tx_ring->desc = NULL;
4794 }
4795 
4796 /**
4797  *  igb_free_all_tx_resources - Free Tx Resources for All Queues
4798  *  @adapter: board private structure
4799  *
4800  *  Free all transmit software resources
4801  **/
igb_free_all_tx_resources(struct igb_adapter * adapter)4802 static void igb_free_all_tx_resources(struct igb_adapter *adapter)
4803 {
4804 	int i;
4805 
4806 	for (i = 0; i < adapter->num_tx_queues; i++)
4807 		if (adapter->tx_ring[i])
4808 			igb_free_tx_resources(adapter->tx_ring[i]);
4809 }
4810 
4811 /**
4812  *  igb_clean_tx_ring - Free Tx Buffers
4813  *  @tx_ring: ring to be cleaned
4814  **/
igb_clean_tx_ring(struct igb_ring * tx_ring)4815 static void igb_clean_tx_ring(struct igb_ring *tx_ring)
4816 {
4817 	u16 i = tx_ring->next_to_clean;
4818 	struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
4819 
4820 	while (i != tx_ring->next_to_use) {
4821 		union e1000_adv_tx_desc *eop_desc, *tx_desc;
4822 
4823 		/* Free all the Tx ring sk_buffs or xdp frames */
4824 		if (tx_buffer->type == IGB_TYPE_SKB)
4825 			dev_kfree_skb_any(tx_buffer->skb);
4826 		else
4827 			xdp_return_frame(tx_buffer->xdpf);
4828 
4829 		/* unmap skb header data */
4830 		dma_unmap_single(tx_ring->dev,
4831 				 dma_unmap_addr(tx_buffer, dma),
4832 				 dma_unmap_len(tx_buffer, len),
4833 				 DMA_TO_DEVICE);
4834 
4835 		/* check for eop_desc to determine the end of the packet */
4836 		eop_desc = tx_buffer->next_to_watch;
4837 		tx_desc = IGB_TX_DESC(tx_ring, i);
4838 
4839 		/* unmap remaining buffers */
4840 		while (tx_desc != eop_desc) {
4841 			tx_buffer++;
4842 			tx_desc++;
4843 			i++;
4844 			if (unlikely(i == tx_ring->count)) {
4845 				i = 0;
4846 				tx_buffer = tx_ring->tx_buffer_info;
4847 				tx_desc = IGB_TX_DESC(tx_ring, 0);
4848 			}
4849 
4850 			/* unmap any remaining paged data */
4851 			if (dma_unmap_len(tx_buffer, len))
4852 				dma_unmap_page(tx_ring->dev,
4853 					       dma_unmap_addr(tx_buffer, dma),
4854 					       dma_unmap_len(tx_buffer, len),
4855 					       DMA_TO_DEVICE);
4856 		}
4857 
4858 		tx_buffer->next_to_watch = NULL;
4859 
4860 		/* move us one more past the eop_desc for start of next pkt */
4861 		tx_buffer++;
4862 		i++;
4863 		if (unlikely(i == tx_ring->count)) {
4864 			i = 0;
4865 			tx_buffer = tx_ring->tx_buffer_info;
4866 		}
4867 	}
4868 
4869 	/* reset BQL for queue */
4870 	netdev_tx_reset_queue(txring_txq(tx_ring));
4871 
4872 	/* reset next_to_use and next_to_clean */
4873 	tx_ring->next_to_use = 0;
4874 	tx_ring->next_to_clean = 0;
4875 }
4876 
4877 /**
4878  *  igb_clean_all_tx_rings - Free Tx Buffers for all queues
4879  *  @adapter: board private structure
4880  **/
igb_clean_all_tx_rings(struct igb_adapter * adapter)4881 static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
4882 {
4883 	int i;
4884 
4885 	for (i = 0; i < adapter->num_tx_queues; i++)
4886 		if (adapter->tx_ring[i])
4887 			igb_clean_tx_ring(adapter->tx_ring[i]);
4888 }
4889 
4890 /**
4891  *  igb_free_rx_resources - Free Rx Resources
4892  *  @rx_ring: ring to clean the resources from
4893  *
4894  *  Free all receive software resources
4895  **/
igb_free_rx_resources(struct igb_ring * rx_ring)4896 void igb_free_rx_resources(struct igb_ring *rx_ring)
4897 {
4898 	igb_clean_rx_ring(rx_ring);
4899 
4900 	rx_ring->xdp_prog = NULL;
4901 	xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
4902 	vfree(rx_ring->rx_buffer_info);
4903 	rx_ring->rx_buffer_info = NULL;
4904 
4905 	/* if not set, then don't free */
4906 	if (!rx_ring->desc)
4907 		return;
4908 
4909 	dma_free_coherent(rx_ring->dev, rx_ring->size,
4910 			  rx_ring->desc, rx_ring->dma);
4911 
4912 	rx_ring->desc = NULL;
4913 }
4914 
4915 /**
4916  *  igb_free_all_rx_resources - Free Rx Resources for All Queues
4917  *  @adapter: board private structure
4918  *
4919  *  Free all receive software resources
4920  **/
igb_free_all_rx_resources(struct igb_adapter * adapter)4921 static void igb_free_all_rx_resources(struct igb_adapter *adapter)
4922 {
4923 	int i;
4924 
4925 	for (i = 0; i < adapter->num_rx_queues; i++)
4926 		if (adapter->rx_ring[i])
4927 			igb_free_rx_resources(adapter->rx_ring[i]);
4928 }
4929 
4930 /**
4931  *  igb_clean_rx_ring - Free Rx Buffers per Queue
4932  *  @rx_ring: ring to free buffers from
4933  **/
igb_clean_rx_ring(struct igb_ring * rx_ring)4934 static void igb_clean_rx_ring(struct igb_ring *rx_ring)
4935 {
4936 	u16 i = rx_ring->next_to_clean;
4937 
4938 	dev_kfree_skb(rx_ring->skb);
4939 	rx_ring->skb = NULL;
4940 
4941 	/* Free all the Rx ring sk_buffs */
4942 	while (i != rx_ring->next_to_alloc) {
4943 		struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
4944 
4945 		/* Invalidate cache lines that may have been written to by
4946 		 * device so that we avoid corrupting memory.
4947 		 */
4948 		dma_sync_single_range_for_cpu(rx_ring->dev,
4949 					      buffer_info->dma,
4950 					      buffer_info->page_offset,
4951 					      igb_rx_bufsz(rx_ring),
4952 					      DMA_FROM_DEVICE);
4953 
4954 		/* free resources associated with mapping */
4955 		dma_unmap_page_attrs(rx_ring->dev,
4956 				     buffer_info->dma,
4957 				     igb_rx_pg_size(rx_ring),
4958 				     DMA_FROM_DEVICE,
4959 				     IGB_RX_DMA_ATTR);
4960 		__page_frag_cache_drain(buffer_info->page,
4961 					buffer_info->pagecnt_bias);
4962 
4963 		i++;
4964 		if (i == rx_ring->count)
4965 			i = 0;
4966 	}
4967 
4968 	rx_ring->next_to_alloc = 0;
4969 	rx_ring->next_to_clean = 0;
4970 	rx_ring->next_to_use = 0;
4971 }
4972 
4973 /**
4974  *  igb_clean_all_rx_rings - Free Rx Buffers for all queues
4975  *  @adapter: board private structure
4976  **/
igb_clean_all_rx_rings(struct igb_adapter * adapter)4977 static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
4978 {
4979 	int i;
4980 
4981 	for (i = 0; i < adapter->num_rx_queues; i++)
4982 		if (adapter->rx_ring[i])
4983 			igb_clean_rx_ring(adapter->rx_ring[i]);
4984 }
4985 
4986 /**
4987  *  igb_set_mac - Change the Ethernet Address of the NIC
4988  *  @netdev: network interface device structure
4989  *  @p: pointer to an address structure
4990  *
4991  *  Returns 0 on success, negative on failure
4992  **/
igb_set_mac(struct net_device * netdev,void * p)4993 static int igb_set_mac(struct net_device *netdev, void *p)
4994 {
4995 	struct igb_adapter *adapter = netdev_priv(netdev);
4996 	struct e1000_hw *hw = &adapter->hw;
4997 	struct sockaddr *addr = p;
4998 
4999 	if (!is_valid_ether_addr(addr->sa_data))
5000 		return -EADDRNOTAVAIL;
5001 
5002 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5003 	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
5004 
5005 	/* set the correct pool for the new PF MAC address in entry 0 */
5006 	igb_set_default_mac_filter(adapter);
5007 
5008 	return 0;
5009 }
5010 
5011 /**
5012  *  igb_write_mc_addr_list - write multicast addresses to MTA
5013  *  @netdev: network interface device structure
5014  *
5015  *  Writes multicast address list to the MTA hash table.
5016  *  Returns: -ENOMEM on failure
5017  *           0 on no addresses written
5018  *           X on writing X addresses to MTA
5019  **/
igb_write_mc_addr_list(struct net_device * netdev)5020 static int igb_write_mc_addr_list(struct net_device *netdev)
5021 {
5022 	struct igb_adapter *adapter = netdev_priv(netdev);
5023 	struct e1000_hw *hw = &adapter->hw;
5024 	struct netdev_hw_addr *ha;
5025 	u8  *mta_list;
5026 	int i;
5027 
5028 	if (netdev_mc_empty(netdev)) {
5029 		/* nothing to program, so clear mc list */
5030 		igb_update_mc_addr_list(hw, NULL, 0);
5031 		igb_restore_vf_multicasts(adapter);
5032 		return 0;
5033 	}
5034 
5035 	mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
5036 	if (!mta_list)
5037 		return -ENOMEM;
5038 
5039 	/* The shared function expects a packed array of only addresses. */
5040 	i = 0;
5041 	netdev_for_each_mc_addr(ha, netdev)
5042 		memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
5043 
5044 	igb_update_mc_addr_list(hw, mta_list, i);
5045 	kfree(mta_list);
5046 
5047 	return netdev_mc_count(netdev);
5048 }
5049 
igb_vlan_promisc_enable(struct igb_adapter * adapter)5050 static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
5051 {
5052 	struct e1000_hw *hw = &adapter->hw;
5053 	u32 i, pf_id;
5054 
5055 	switch (hw->mac.type) {
5056 	case e1000_i210:
5057 	case e1000_i211:
5058 	case e1000_i350:
5059 		/* VLAN filtering needed for VLAN prio filter */
5060 		if (adapter->netdev->features & NETIF_F_NTUPLE)
5061 			break;
5062 		fallthrough;
5063 	case e1000_82576:
5064 	case e1000_82580:
5065 	case e1000_i354:
5066 		/* VLAN filtering needed for pool filtering */
5067 		if (adapter->vfs_allocated_count)
5068 			break;
5069 		fallthrough;
5070 	default:
5071 		return 1;
5072 	}
5073 
5074 	/* We are already in VLAN promisc, nothing to do */
5075 	if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
5076 		return 0;
5077 
5078 	if (!adapter->vfs_allocated_count)
5079 		goto set_vfta;
5080 
5081 	/* Add PF to all active pools */
5082 	pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
5083 
5084 	for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
5085 		u32 vlvf = rd32(E1000_VLVF(i));
5086 
5087 		vlvf |= BIT(pf_id);
5088 		wr32(E1000_VLVF(i), vlvf);
5089 	}
5090 
5091 set_vfta:
5092 	/* Set all bits in the VLAN filter table array */
5093 	for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;)
5094 		hw->mac.ops.write_vfta(hw, i, ~0U);
5095 
5096 	/* Set flag so we don't redo unnecessary work */
5097 	adapter->flags |= IGB_FLAG_VLAN_PROMISC;
5098 
5099 	return 0;
5100 }
5101 
5102 #define VFTA_BLOCK_SIZE 8
igb_scrub_vfta(struct igb_adapter * adapter,u32 vfta_offset)5103 static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
5104 {
5105 	struct e1000_hw *hw = &adapter->hw;
5106 	u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
5107 	u32 vid_start = vfta_offset * 32;
5108 	u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
5109 	u32 i, vid, word, bits, pf_id;
5110 
5111 	/* guarantee that we don't scrub out management VLAN */
5112 	vid = adapter->mng_vlan_id;
5113 	if (vid >= vid_start && vid < vid_end)
5114 		vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
5115 
5116 	if (!adapter->vfs_allocated_count)
5117 		goto set_vfta;
5118 
5119 	pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
5120 
5121 	for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
5122 		u32 vlvf = rd32(E1000_VLVF(i));
5123 
5124 		/* pull VLAN ID from VLVF */
5125 		vid = vlvf & VLAN_VID_MASK;
5126 
5127 		/* only concern ourselves with a certain range */
5128 		if (vid < vid_start || vid >= vid_end)
5129 			continue;
5130 
5131 		if (vlvf & E1000_VLVF_VLANID_ENABLE) {
5132 			/* record VLAN ID in VFTA */
5133 			vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
5134 
5135 			/* if PF is part of this then continue */
5136 			if (test_bit(vid, adapter->active_vlans))
5137 				continue;
5138 		}
5139 
5140 		/* remove PF from the pool */
5141 		bits = ~BIT(pf_id);
5142 		bits &= rd32(E1000_VLVF(i));
5143 		wr32(E1000_VLVF(i), bits);
5144 	}
5145 
5146 set_vfta:
5147 	/* extract values from active_vlans and write back to VFTA */
5148 	for (i = VFTA_BLOCK_SIZE; i--;) {
5149 		vid = (vfta_offset + i) * 32;
5150 		word = vid / BITS_PER_LONG;
5151 		bits = vid % BITS_PER_LONG;
5152 
5153 		vfta[i] |= adapter->active_vlans[word] >> bits;
5154 
5155 		hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]);
5156 	}
5157 }
5158 
igb_vlan_promisc_disable(struct igb_adapter * adapter)5159 static void igb_vlan_promisc_disable(struct igb_adapter *adapter)
5160 {
5161 	u32 i;
5162 
5163 	/* We are not in VLAN promisc, nothing to do */
5164 	if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC))
5165 		return;
5166 
5167 	/* Set flag so we don't redo unnecessary work */
5168 	adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
5169 
5170 	for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE)
5171 		igb_scrub_vfta(adapter, i);
5172 }
5173 
5174 /**
5175  *  igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
5176  *  @netdev: network interface device structure
5177  *
5178  *  The set_rx_mode entry point is called whenever the unicast or multicast
5179  *  address lists or the network interface flags are updated.  This routine is
5180  *  responsible for configuring the hardware for proper unicast, multicast,
5181  *  promiscuous mode, and all-multi behavior.
5182  **/
igb_set_rx_mode(struct net_device * netdev)5183 static void igb_set_rx_mode(struct net_device *netdev)
5184 {
5185 	struct igb_adapter *adapter = netdev_priv(netdev);
5186 	struct e1000_hw *hw = &adapter->hw;
5187 	unsigned int vfn = adapter->vfs_allocated_count;
5188 	u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
5189 	int count;
5190 
5191 	/* Check for Promiscuous and All Multicast modes */
5192 	if (netdev->flags & IFF_PROMISC) {
5193 		rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE;
5194 		vmolr |= E1000_VMOLR_MPME;
5195 
5196 		/* enable use of UTA filter to force packets to default pool */
5197 		if (hw->mac.type == e1000_82576)
5198 			vmolr |= E1000_VMOLR_ROPE;
5199 	} else {
5200 		if (netdev->flags & IFF_ALLMULTI) {
5201 			rctl |= E1000_RCTL_MPE;
5202 			vmolr |= E1000_VMOLR_MPME;
5203 		} else {
5204 			/* Write addresses to the MTA, if the attempt fails
5205 			 * then we should just turn on promiscuous mode so
5206 			 * that we can at least receive multicast traffic
5207 			 */
5208 			count = igb_write_mc_addr_list(netdev);
5209 			if (count < 0) {
5210 				rctl |= E1000_RCTL_MPE;
5211 				vmolr |= E1000_VMOLR_MPME;
5212 			} else if (count) {
5213 				vmolr |= E1000_VMOLR_ROMPE;
5214 			}
5215 		}
5216 	}
5217 
5218 	/* Write addresses to available RAR registers, if there is not
5219 	 * sufficient space to store all the addresses then enable
5220 	 * unicast promiscuous mode
5221 	 */
5222 	if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) {
5223 		rctl |= E1000_RCTL_UPE;
5224 		vmolr |= E1000_VMOLR_ROPE;
5225 	}
5226 
5227 	/* enable VLAN filtering by default */
5228 	rctl |= E1000_RCTL_VFE;
5229 
5230 	/* disable VLAN filtering for modes that require it */
5231 	if ((netdev->flags & IFF_PROMISC) ||
5232 	    (netdev->features & NETIF_F_RXALL)) {
5233 		/* if we fail to set all rules then just clear VFE */
5234 		if (igb_vlan_promisc_enable(adapter))
5235 			rctl &= ~E1000_RCTL_VFE;
5236 	} else {
5237 		igb_vlan_promisc_disable(adapter);
5238 	}
5239 
5240 	/* update state of unicast, multicast, and VLAN filtering modes */
5241 	rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE |
5242 				     E1000_RCTL_VFE);
5243 	wr32(E1000_RCTL, rctl);
5244 
5245 #if (PAGE_SIZE < 8192)
5246 	if (!adapter->vfs_allocated_count) {
5247 		if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
5248 			rlpml = IGB_MAX_FRAME_BUILD_SKB;
5249 	}
5250 #endif
5251 	wr32(E1000_RLPML, rlpml);
5252 
5253 	/* In order to support SR-IOV and eventually VMDq it is necessary to set
5254 	 * the VMOLR to enable the appropriate modes.  Without this workaround
5255 	 * we will have issues with VLAN tag stripping not being done for frames
5256 	 * that are only arriving because we are the default pool
5257 	 */
5258 	if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
5259 		return;
5260 
5261 	/* set UTA to appropriate mode */
5262 	igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE));
5263 
5264 	vmolr |= rd32(E1000_VMOLR(vfn)) &
5265 		 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
5266 
5267 	/* enable Rx jumbo frames, restrict as needed to support build_skb */
5268 	vmolr &= ~E1000_VMOLR_RLPML_MASK;
5269 #if (PAGE_SIZE < 8192)
5270 	if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
5271 		vmolr |= IGB_MAX_FRAME_BUILD_SKB;
5272 	else
5273 #endif
5274 		vmolr |= MAX_JUMBO_FRAME_SIZE;
5275 	vmolr |= E1000_VMOLR_LPE;
5276 
5277 	wr32(E1000_VMOLR(vfn), vmolr);
5278 
5279 	igb_restore_vf_multicasts(adapter);
5280 }
5281 
igb_check_wvbr(struct igb_adapter * adapter)5282 static void igb_check_wvbr(struct igb_adapter *adapter)
5283 {
5284 	struct e1000_hw *hw = &adapter->hw;
5285 	u32 wvbr = 0;
5286 
5287 	switch (hw->mac.type) {
5288 	case e1000_82576:
5289 	case e1000_i350:
5290 		wvbr = rd32(E1000_WVBR);
5291 		if (!wvbr)
5292 			return;
5293 		break;
5294 	default:
5295 		break;
5296 	}
5297 
5298 	adapter->wvbr |= wvbr;
5299 }
5300 
5301 #define IGB_STAGGERED_QUEUE_OFFSET 8
5302 
igb_spoof_check(struct igb_adapter * adapter)5303 static void igb_spoof_check(struct igb_adapter *adapter)
5304 {
5305 	int j;
5306 
5307 	if (!adapter->wvbr)
5308 		return;
5309 
5310 	for (j = 0; j < adapter->vfs_allocated_count; j++) {
5311 		if (adapter->wvbr & BIT(j) ||
5312 		    adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) {
5313 			dev_warn(&adapter->pdev->dev,
5314 				"Spoof event(s) detected on VF %d\n", j);
5315 			adapter->wvbr &=
5316 				~(BIT(j) |
5317 				  BIT(j + IGB_STAGGERED_QUEUE_OFFSET));
5318 		}
5319 	}
5320 }
5321 
5322 /* Need to wait a few seconds after link up to get diagnostic information from
5323  * the phy
5324  */
igb_update_phy_info(struct timer_list * t)5325 static void igb_update_phy_info(struct timer_list *t)
5326 {
5327 	struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
5328 	igb_get_phy_info(&adapter->hw);
5329 }
5330 
5331 /**
5332  *  igb_has_link - check shared code for link and determine up/down
5333  *  @adapter: pointer to driver private info
5334  **/
igb_has_link(struct igb_adapter * adapter)5335 bool igb_has_link(struct igb_adapter *adapter)
5336 {
5337 	struct e1000_hw *hw = &adapter->hw;
5338 	bool link_active = false;
5339 
5340 	/* get_link_status is set on LSC (link status) interrupt or
5341 	 * rx sequence error interrupt.  get_link_status will stay
5342 	 * false until the e1000_check_for_link establishes link
5343 	 * for copper adapters ONLY
5344 	 */
5345 	switch (hw->phy.media_type) {
5346 	case e1000_media_type_copper:
5347 		if (!hw->mac.get_link_status)
5348 			return true;
5349 		fallthrough;
5350 	case e1000_media_type_internal_serdes:
5351 		hw->mac.ops.check_for_link(hw);
5352 		link_active = !hw->mac.get_link_status;
5353 		break;
5354 	default:
5355 	case e1000_media_type_unknown:
5356 		break;
5357 	}
5358 
5359 	if (((hw->mac.type == e1000_i210) ||
5360 	     (hw->mac.type == e1000_i211)) &&
5361 	     (hw->phy.id == I210_I_PHY_ID)) {
5362 		if (!netif_carrier_ok(adapter->netdev)) {
5363 			adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
5364 		} else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
5365 			adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
5366 			adapter->link_check_timeout = jiffies;
5367 		}
5368 	}
5369 
5370 	return link_active;
5371 }
5372 
igb_thermal_sensor_event(struct e1000_hw * hw,u32 event)5373 static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
5374 {
5375 	bool ret = false;
5376 	u32 ctrl_ext, thstat;
5377 
5378 	/* check for thermal sensor event on i350 copper only */
5379 	if (hw->mac.type == e1000_i350) {
5380 		thstat = rd32(E1000_THSTAT);
5381 		ctrl_ext = rd32(E1000_CTRL_EXT);
5382 
5383 		if ((hw->phy.media_type == e1000_media_type_copper) &&
5384 		    !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
5385 			ret = !!(thstat & event);
5386 	}
5387 
5388 	return ret;
5389 }
5390 
5391 /**
5392  *  igb_check_lvmmc - check for malformed packets received
5393  *  and indicated in LVMMC register
5394  *  @adapter: pointer to adapter
5395  **/
igb_check_lvmmc(struct igb_adapter * adapter)5396 static void igb_check_lvmmc(struct igb_adapter *adapter)
5397 {
5398 	struct e1000_hw *hw = &adapter->hw;
5399 	u32 lvmmc;
5400 
5401 	lvmmc = rd32(E1000_LVMMC);
5402 	if (lvmmc) {
5403 		if (unlikely(net_ratelimit())) {
5404 			netdev_warn(adapter->netdev,
5405 				    "malformed Tx packet detected and dropped, LVMMC:0x%08x\n",
5406 				    lvmmc);
5407 		}
5408 	}
5409 }
5410 
5411 /**
5412  *  igb_watchdog - Timer Call-back
5413  *  @t: pointer to timer_list containing our private info pointer
5414  **/
igb_watchdog(struct timer_list * t)5415 static void igb_watchdog(struct timer_list *t)
5416 {
5417 	struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
5418 	/* Do the rest outside of interrupt context */
5419 	schedule_work(&adapter->watchdog_task);
5420 }
5421 
igb_watchdog_task(struct work_struct * work)5422 static void igb_watchdog_task(struct work_struct *work)
5423 {
5424 	struct igb_adapter *adapter = container_of(work,
5425 						   struct igb_adapter,
5426 						   watchdog_task);
5427 	struct e1000_hw *hw = &adapter->hw;
5428 	struct e1000_phy_info *phy = &hw->phy;
5429 	struct net_device *netdev = adapter->netdev;
5430 	u32 link;
5431 	int i;
5432 	u32 connsw;
5433 	u16 phy_data, retry_count = 20;
5434 
5435 	link = igb_has_link(adapter);
5436 
5437 	if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
5438 		if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
5439 			adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
5440 		else
5441 			link = false;
5442 	}
5443 
5444 	/* Force link down if we have fiber to swap to */
5445 	if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5446 		if (hw->phy.media_type == e1000_media_type_copper) {
5447 			connsw = rd32(E1000_CONNSW);
5448 			if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
5449 				link = 0;
5450 		}
5451 	}
5452 	if (link) {
5453 		/* Perform a reset if the media type changed. */
5454 		if (hw->dev_spec._82575.media_changed) {
5455 			hw->dev_spec._82575.media_changed = false;
5456 			adapter->flags |= IGB_FLAG_MEDIA_RESET;
5457 			igb_reset(adapter);
5458 		}
5459 		/* Cancel scheduled suspend requests. */
5460 		pm_runtime_resume(netdev->dev.parent);
5461 
5462 		if (!netif_carrier_ok(netdev)) {
5463 			u32 ctrl;
5464 
5465 			hw->mac.ops.get_speed_and_duplex(hw,
5466 							 &adapter->link_speed,
5467 							 &adapter->link_duplex);
5468 
5469 			ctrl = rd32(E1000_CTRL);
5470 			/* Links status message must follow this format */
5471 			netdev_info(netdev,
5472 			       "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
5473 			       netdev->name,
5474 			       adapter->link_speed,
5475 			       adapter->link_duplex == FULL_DUPLEX ?
5476 			       "Full" : "Half",
5477 			       (ctrl & E1000_CTRL_TFCE) &&
5478 			       (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
5479 			       (ctrl & E1000_CTRL_RFCE) ?  "RX" :
5480 			       (ctrl & E1000_CTRL_TFCE) ?  "TX" : "None");
5481 
5482 			/* disable EEE if enabled */
5483 			if ((adapter->flags & IGB_FLAG_EEE) &&
5484 				(adapter->link_duplex == HALF_DUPLEX)) {
5485 				dev_info(&adapter->pdev->dev,
5486 				"EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
5487 				adapter->hw.dev_spec._82575.eee_disable = true;
5488 				adapter->flags &= ~IGB_FLAG_EEE;
5489 			}
5490 
5491 			/* check if SmartSpeed worked */
5492 			igb_check_downshift(hw);
5493 			if (phy->speed_downgraded)
5494 				netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
5495 
5496 			/* check for thermal sensor event */
5497 			if (igb_thermal_sensor_event(hw,
5498 			    E1000_THSTAT_LINK_THROTTLE))
5499 				netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
5500 
5501 			/* adjust timeout factor according to speed/duplex */
5502 			adapter->tx_timeout_factor = 1;
5503 			switch (adapter->link_speed) {
5504 			case SPEED_10:
5505 				adapter->tx_timeout_factor = 14;
5506 				break;
5507 			case SPEED_100:
5508 				/* maybe add some timeout factor ? */
5509 				break;
5510 			}
5511 
5512 			if (adapter->link_speed != SPEED_1000 ||
5513 			    !hw->phy.ops.read_reg)
5514 				goto no_wait;
5515 
5516 			/* wait for Remote receiver status OK */
5517 retry_read_status:
5518 			if (!igb_read_phy_reg(hw, PHY_1000T_STATUS,
5519 					      &phy_data)) {
5520 				if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
5521 				    retry_count) {
5522 					msleep(100);
5523 					retry_count--;
5524 					goto retry_read_status;
5525 				} else if (!retry_count) {
5526 					dev_err(&adapter->pdev->dev, "exceed max 2 second\n");
5527 				}
5528 			} else {
5529 				dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n");
5530 			}
5531 no_wait:
5532 			netif_carrier_on(netdev);
5533 
5534 			igb_ping_all_vfs(adapter);
5535 			igb_check_vf_rate_limit(adapter);
5536 
5537 			/* link state has changed, schedule phy info update */
5538 			if (!test_bit(__IGB_DOWN, &adapter->state))
5539 				mod_timer(&adapter->phy_info_timer,
5540 					  round_jiffies(jiffies + 2 * HZ));
5541 		}
5542 	} else {
5543 		if (netif_carrier_ok(netdev)) {
5544 			adapter->link_speed = 0;
5545 			adapter->link_duplex = 0;
5546 
5547 			/* check for thermal sensor event */
5548 			if (igb_thermal_sensor_event(hw,
5549 			    E1000_THSTAT_PWR_DOWN)) {
5550 				netdev_err(netdev, "The network adapter was stopped because it overheated\n");
5551 			}
5552 
5553 			/* Links status message must follow this format */
5554 			netdev_info(netdev, "igb: %s NIC Link is Down\n",
5555 			       netdev->name);
5556 			netif_carrier_off(netdev);
5557 
5558 			igb_ping_all_vfs(adapter);
5559 
5560 			/* link state has changed, schedule phy info update */
5561 			if (!test_bit(__IGB_DOWN, &adapter->state))
5562 				mod_timer(&adapter->phy_info_timer,
5563 					  round_jiffies(jiffies + 2 * HZ));
5564 
5565 			/* link is down, time to check for alternate media */
5566 			if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5567 				igb_check_swap_media(adapter);
5568 				if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5569 					schedule_work(&adapter->reset_task);
5570 					/* return immediately */
5571 					return;
5572 				}
5573 			}
5574 			pm_schedule_suspend(netdev->dev.parent,
5575 					    MSEC_PER_SEC * 5);
5576 
5577 		/* also check for alternate media here */
5578 		} else if (!netif_carrier_ok(netdev) &&
5579 			   (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
5580 			igb_check_swap_media(adapter);
5581 			if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5582 				schedule_work(&adapter->reset_task);
5583 				/* return immediately */
5584 				return;
5585 			}
5586 		}
5587 	}
5588 
5589 	spin_lock(&adapter->stats64_lock);
5590 	igb_update_stats(adapter);
5591 	spin_unlock(&adapter->stats64_lock);
5592 
5593 	for (i = 0; i < adapter->num_tx_queues; i++) {
5594 		struct igb_ring *tx_ring = adapter->tx_ring[i];
5595 		if (!netif_carrier_ok(netdev)) {
5596 			/* We've lost link, so the controller stops DMA,
5597 			 * but we've got queued Tx work that's never going
5598 			 * to get done, so reset controller to flush Tx.
5599 			 * (Do the reset outside of interrupt context).
5600 			 */
5601 			if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
5602 				adapter->tx_timeout_count++;
5603 				schedule_work(&adapter->reset_task);
5604 				/* return immediately since reset is imminent */
5605 				return;
5606 			}
5607 		}
5608 
5609 		/* Force detection of hung controller every watchdog period */
5610 		set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5611 	}
5612 
5613 	/* Cause software interrupt to ensure Rx ring is cleaned */
5614 	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
5615 		u32 eics = 0;
5616 
5617 		for (i = 0; i < adapter->num_q_vectors; i++)
5618 			eics |= adapter->q_vector[i]->eims_value;
5619 		wr32(E1000_EICS, eics);
5620 	} else {
5621 		wr32(E1000_ICS, E1000_ICS_RXDMT0);
5622 	}
5623 
5624 	igb_spoof_check(adapter);
5625 	igb_ptp_rx_hang(adapter);
5626 	igb_ptp_tx_hang(adapter);
5627 
5628 	/* Check LVMMC register on i350/i354 only */
5629 	if ((adapter->hw.mac.type == e1000_i350) ||
5630 	    (adapter->hw.mac.type == e1000_i354))
5631 		igb_check_lvmmc(adapter);
5632 
5633 	/* Reset the timer */
5634 	if (!test_bit(__IGB_DOWN, &adapter->state)) {
5635 		if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
5636 			mod_timer(&adapter->watchdog_timer,
5637 				  round_jiffies(jiffies +  HZ));
5638 		else
5639 			mod_timer(&adapter->watchdog_timer,
5640 				  round_jiffies(jiffies + 2 * HZ));
5641 	}
5642 }
5643 
5644 enum latency_range {
5645 	lowest_latency = 0,
5646 	low_latency = 1,
5647 	bulk_latency = 2,
5648 	latency_invalid = 255
5649 };
5650 
5651 /**
5652  *  igb_update_ring_itr - update the dynamic ITR value based on packet size
5653  *  @q_vector: pointer to q_vector
5654  *
5655  *  Stores a new ITR value based on strictly on packet size.  This
5656  *  algorithm is less sophisticated than that used in igb_update_itr,
5657  *  due to the difficulty of synchronizing statistics across multiple
5658  *  receive rings.  The divisors and thresholds used by this function
5659  *  were determined based on theoretical maximum wire speed and testing
5660  *  data, in order to minimize response time while increasing bulk
5661  *  throughput.
5662  *  This functionality is controlled by ethtool's coalescing settings.
5663  *  NOTE:  This function is called only when operating in a multiqueue
5664  *         receive environment.
5665  **/
igb_update_ring_itr(struct igb_q_vector * q_vector)5666 static void igb_update_ring_itr(struct igb_q_vector *q_vector)
5667 {
5668 	int new_val = q_vector->itr_val;
5669 	int avg_wire_size = 0;
5670 	struct igb_adapter *adapter = q_vector->adapter;
5671 	unsigned int packets;
5672 
5673 	/* For non-gigabit speeds, just fix the interrupt rate at 4000
5674 	 * ints/sec - ITR timer value of 120 ticks.
5675 	 */
5676 	if (adapter->link_speed != SPEED_1000) {
5677 		new_val = IGB_4K_ITR;
5678 		goto set_itr_val;
5679 	}
5680 
5681 	packets = q_vector->rx.total_packets;
5682 	if (packets)
5683 		avg_wire_size = q_vector->rx.total_bytes / packets;
5684 
5685 	packets = q_vector->tx.total_packets;
5686 	if (packets)
5687 		avg_wire_size = max_t(u32, avg_wire_size,
5688 				      q_vector->tx.total_bytes / packets);
5689 
5690 	/* if avg_wire_size isn't set no work was done */
5691 	if (!avg_wire_size)
5692 		goto clear_counts;
5693 
5694 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
5695 	avg_wire_size += 24;
5696 
5697 	/* Don't starve jumbo frames */
5698 	avg_wire_size = min(avg_wire_size, 3000);
5699 
5700 	/* Give a little boost to mid-size frames */
5701 	if ((avg_wire_size > 300) && (avg_wire_size < 1200))
5702 		new_val = avg_wire_size / 3;
5703 	else
5704 		new_val = avg_wire_size / 2;
5705 
5706 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
5707 	if (new_val < IGB_20K_ITR &&
5708 	    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5709 	     (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5710 		new_val = IGB_20K_ITR;
5711 
5712 set_itr_val:
5713 	if (new_val != q_vector->itr_val) {
5714 		q_vector->itr_val = new_val;
5715 		q_vector->set_itr = 1;
5716 	}
5717 clear_counts:
5718 	q_vector->rx.total_bytes = 0;
5719 	q_vector->rx.total_packets = 0;
5720 	q_vector->tx.total_bytes = 0;
5721 	q_vector->tx.total_packets = 0;
5722 }
5723 
5724 /**
5725  *  igb_update_itr - update the dynamic ITR value based on statistics
5726  *  @q_vector: pointer to q_vector
5727  *  @ring_container: ring info to update the itr for
5728  *
5729  *  Stores a new ITR value based on packets and byte
5730  *  counts during the last interrupt.  The advantage of per interrupt
5731  *  computation is faster updates and more accurate ITR for the current
5732  *  traffic pattern.  Constants in this function were computed
5733  *  based on theoretical maximum wire speed and thresholds were set based
5734  *  on testing data as well as attempting to minimize response time
5735  *  while increasing bulk throughput.
5736  *  This functionality is controlled by ethtool's coalescing settings.
5737  *  NOTE:  These calculations are only valid when operating in a single-
5738  *         queue environment.
5739  **/
igb_update_itr(struct igb_q_vector * q_vector,struct igb_ring_container * ring_container)5740 static void igb_update_itr(struct igb_q_vector *q_vector,
5741 			   struct igb_ring_container *ring_container)
5742 {
5743 	unsigned int packets = ring_container->total_packets;
5744 	unsigned int bytes = ring_container->total_bytes;
5745 	u8 itrval = ring_container->itr;
5746 
5747 	/* no packets, exit with status unchanged */
5748 	if (packets == 0)
5749 		return;
5750 
5751 	switch (itrval) {
5752 	case lowest_latency:
5753 		/* handle TSO and jumbo frames */
5754 		if (bytes/packets > 8000)
5755 			itrval = bulk_latency;
5756 		else if ((packets < 5) && (bytes > 512))
5757 			itrval = low_latency;
5758 		break;
5759 	case low_latency:  /* 50 usec aka 20000 ints/s */
5760 		if (bytes > 10000) {
5761 			/* this if handles the TSO accounting */
5762 			if (bytes/packets > 8000)
5763 				itrval = bulk_latency;
5764 			else if ((packets < 10) || ((bytes/packets) > 1200))
5765 				itrval = bulk_latency;
5766 			else if ((packets > 35))
5767 				itrval = lowest_latency;
5768 		} else if (bytes/packets > 2000) {
5769 			itrval = bulk_latency;
5770 		} else if (packets <= 2 && bytes < 512) {
5771 			itrval = lowest_latency;
5772 		}
5773 		break;
5774 	case bulk_latency: /* 250 usec aka 4000 ints/s */
5775 		if (bytes > 25000) {
5776 			if (packets > 35)
5777 				itrval = low_latency;
5778 		} else if (bytes < 1500) {
5779 			itrval = low_latency;
5780 		}
5781 		break;
5782 	}
5783 
5784 	/* clear work counters since we have the values we need */
5785 	ring_container->total_bytes = 0;
5786 	ring_container->total_packets = 0;
5787 
5788 	/* write updated itr to ring container */
5789 	ring_container->itr = itrval;
5790 }
5791 
igb_set_itr(struct igb_q_vector * q_vector)5792 static void igb_set_itr(struct igb_q_vector *q_vector)
5793 {
5794 	struct igb_adapter *adapter = q_vector->adapter;
5795 	u32 new_itr = q_vector->itr_val;
5796 	u8 current_itr = 0;
5797 
5798 	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
5799 	if (adapter->link_speed != SPEED_1000) {
5800 		current_itr = 0;
5801 		new_itr = IGB_4K_ITR;
5802 		goto set_itr_now;
5803 	}
5804 
5805 	igb_update_itr(q_vector, &q_vector->tx);
5806 	igb_update_itr(q_vector, &q_vector->rx);
5807 
5808 	current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
5809 
5810 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
5811 	if (current_itr == lowest_latency &&
5812 	    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5813 	     (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5814 		current_itr = low_latency;
5815 
5816 	switch (current_itr) {
5817 	/* counts and packets in update_itr are dependent on these numbers */
5818 	case lowest_latency:
5819 		new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
5820 		break;
5821 	case low_latency:
5822 		new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
5823 		break;
5824 	case bulk_latency:
5825 		new_itr = IGB_4K_ITR;  /* 4,000 ints/sec */
5826 		break;
5827 	default:
5828 		break;
5829 	}
5830 
5831 set_itr_now:
5832 	if (new_itr != q_vector->itr_val) {
5833 		/* this attempts to bias the interrupt rate towards Bulk
5834 		 * by adding intermediate steps when interrupt rate is
5835 		 * increasing
5836 		 */
5837 		new_itr = new_itr > q_vector->itr_val ?
5838 			  max((new_itr * q_vector->itr_val) /
5839 			  (new_itr + (q_vector->itr_val >> 2)),
5840 			  new_itr) : new_itr;
5841 		/* Don't write the value here; it resets the adapter's
5842 		 * internal timer, and causes us to delay far longer than
5843 		 * we should between interrupts.  Instead, we write the ITR
5844 		 * value at the beginning of the next interrupt so the timing
5845 		 * ends up being correct.
5846 		 */
5847 		q_vector->itr_val = new_itr;
5848 		q_vector->set_itr = 1;
5849 	}
5850 }
5851 
igb_tx_ctxtdesc(struct igb_ring * tx_ring,struct igb_tx_buffer * first,u32 vlan_macip_lens,u32 type_tucmd,u32 mss_l4len_idx)5852 static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
5853 			    struct igb_tx_buffer *first,
5854 			    u32 vlan_macip_lens, u32 type_tucmd,
5855 			    u32 mss_l4len_idx)
5856 {
5857 	struct e1000_adv_tx_context_desc *context_desc;
5858 	u16 i = tx_ring->next_to_use;
5859 	struct timespec64 ts;
5860 
5861 	context_desc = IGB_TX_CTXTDESC(tx_ring, i);
5862 
5863 	i++;
5864 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
5865 
5866 	/* set bits to identify this as an advanced context descriptor */
5867 	type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
5868 
5869 	/* For 82575, context index must be unique per ring. */
5870 	if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
5871 		mss_l4len_idx |= tx_ring->reg_idx << 4;
5872 
5873 	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
5874 	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
5875 	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
5876 
5877 	/* We assume there is always a valid tx time available. Invalid times
5878 	 * should have been handled by the upper layers.
5879 	 */
5880 	if (tx_ring->launchtime_enable) {
5881 		ts = ktime_to_timespec64(first->skb->tstamp);
5882 		first->skb->tstamp = ktime_set(0, 0);
5883 		context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
5884 	} else {
5885 		context_desc->seqnum_seed = 0;
5886 	}
5887 }
5888 
igb_tso(struct igb_ring * tx_ring,struct igb_tx_buffer * first,u8 * hdr_len)5889 static int igb_tso(struct igb_ring *tx_ring,
5890 		   struct igb_tx_buffer *first,
5891 		   u8 *hdr_len)
5892 {
5893 	u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
5894 	struct sk_buff *skb = first->skb;
5895 	union {
5896 		struct iphdr *v4;
5897 		struct ipv6hdr *v6;
5898 		unsigned char *hdr;
5899 	} ip;
5900 	union {
5901 		struct tcphdr *tcp;
5902 		struct udphdr *udp;
5903 		unsigned char *hdr;
5904 	} l4;
5905 	u32 paylen, l4_offset;
5906 	int err;
5907 
5908 	if (skb->ip_summed != CHECKSUM_PARTIAL)
5909 		return 0;
5910 
5911 	if (!skb_is_gso(skb))
5912 		return 0;
5913 
5914 	err = skb_cow_head(skb, 0);
5915 	if (err < 0)
5916 		return err;
5917 
5918 	ip.hdr = skb_network_header(skb);
5919 	l4.hdr = skb_checksum_start(skb);
5920 
5921 	/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
5922 	type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
5923 		      E1000_ADVTXD_TUCMD_L4T_UDP : E1000_ADVTXD_TUCMD_L4T_TCP;
5924 
5925 	/* initialize outer IP header fields */
5926 	if (ip.v4->version == 4) {
5927 		unsigned char *csum_start = skb_checksum_start(skb);
5928 		unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
5929 
5930 		/* IP header will have to cancel out any data that
5931 		 * is not a part of the outer IP header
5932 		 */
5933 		ip.v4->check = csum_fold(csum_partial(trans_start,
5934 						      csum_start - trans_start,
5935 						      0));
5936 		type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
5937 
5938 		ip.v4->tot_len = 0;
5939 		first->tx_flags |= IGB_TX_FLAGS_TSO |
5940 				   IGB_TX_FLAGS_CSUM |
5941 				   IGB_TX_FLAGS_IPV4;
5942 	} else {
5943 		ip.v6->payload_len = 0;
5944 		first->tx_flags |= IGB_TX_FLAGS_TSO |
5945 				   IGB_TX_FLAGS_CSUM;
5946 	}
5947 
5948 	/* determine offset of inner transport header */
5949 	l4_offset = l4.hdr - skb->data;
5950 
5951 	/* remove payload length from inner checksum */
5952 	paylen = skb->len - l4_offset;
5953 	if (type_tucmd & E1000_ADVTXD_TUCMD_L4T_TCP) {
5954 		/* compute length of segmentation header */
5955 		*hdr_len = (l4.tcp->doff * 4) + l4_offset;
5956 		csum_replace_by_diff(&l4.tcp->check,
5957 			(__force __wsum)htonl(paylen));
5958 	} else {
5959 		/* compute length of segmentation header */
5960 		*hdr_len = sizeof(*l4.udp) + l4_offset;
5961 		csum_replace_by_diff(&l4.udp->check,
5962 				     (__force __wsum)htonl(paylen));
5963 	}
5964 
5965 	/* update gso size and bytecount with header size */
5966 	first->gso_segs = skb_shinfo(skb)->gso_segs;
5967 	first->bytecount += (first->gso_segs - 1) * *hdr_len;
5968 
5969 	/* MSS L4LEN IDX */
5970 	mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
5971 	mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
5972 
5973 	/* VLAN MACLEN IPLEN */
5974 	vlan_macip_lens = l4.hdr - ip.hdr;
5975 	vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
5976 	vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
5977 
5978 	igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
5979 			type_tucmd, mss_l4len_idx);
5980 
5981 	return 1;
5982 }
5983 
igb_ipv6_csum_is_sctp(struct sk_buff * skb)5984 static inline bool igb_ipv6_csum_is_sctp(struct sk_buff *skb)
5985 {
5986 	unsigned int offset = 0;
5987 
5988 	ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
5989 
5990 	return offset == skb_checksum_start_offset(skb);
5991 }
5992 
igb_tx_csum(struct igb_ring * tx_ring,struct igb_tx_buffer * first)5993 static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
5994 {
5995 	struct sk_buff *skb = first->skb;
5996 	u32 vlan_macip_lens = 0;
5997 	u32 type_tucmd = 0;
5998 
5999 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
6000 csum_failed:
6001 		if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) &&
6002 		    !tx_ring->launchtime_enable)
6003 			return;
6004 		goto no_csum;
6005 	}
6006 
6007 	switch (skb->csum_offset) {
6008 	case offsetof(struct tcphdr, check):
6009 		type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
6010 		fallthrough;
6011 	case offsetof(struct udphdr, check):
6012 		break;
6013 	case offsetof(struct sctphdr, checksum):
6014 		/* validate that this is actually an SCTP request */
6015 		if (((first->protocol == htons(ETH_P_IP)) &&
6016 		     (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
6017 		    ((first->protocol == htons(ETH_P_IPV6)) &&
6018 		     igb_ipv6_csum_is_sctp(skb))) {
6019 			type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
6020 			break;
6021 		}
6022 		fallthrough;
6023 	default:
6024 		skb_checksum_help(skb);
6025 		goto csum_failed;
6026 	}
6027 
6028 	/* update TX checksum flag */
6029 	first->tx_flags |= IGB_TX_FLAGS_CSUM;
6030 	vlan_macip_lens = skb_checksum_start_offset(skb) -
6031 			  skb_network_offset(skb);
6032 no_csum:
6033 	vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
6034 	vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
6035 
6036 	igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
6037 }
6038 
6039 #define IGB_SET_FLAG(_input, _flag, _result) \
6040 	((_flag <= _result) ? \
6041 	 ((u32)(_input & _flag) * (_result / _flag)) : \
6042 	 ((u32)(_input & _flag) / (_flag / _result)))
6043 
igb_tx_cmd_type(struct sk_buff * skb,u32 tx_flags)6044 static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
6045 {
6046 	/* set type for advanced descriptor with frame checksum insertion */
6047 	u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
6048 		       E1000_ADVTXD_DCMD_DEXT |
6049 		       E1000_ADVTXD_DCMD_IFCS;
6050 
6051 	/* set HW vlan bit if vlan is present */
6052 	cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
6053 				 (E1000_ADVTXD_DCMD_VLE));
6054 
6055 	/* set segmentation bits for TSO */
6056 	cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
6057 				 (E1000_ADVTXD_DCMD_TSE));
6058 
6059 	/* set timestamp bit if present */
6060 	cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
6061 				 (E1000_ADVTXD_MAC_TSTAMP));
6062 
6063 	/* insert frame checksum */
6064 	cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
6065 
6066 	return cmd_type;
6067 }
6068 
igb_tx_olinfo_status(struct igb_ring * tx_ring,union e1000_adv_tx_desc * tx_desc,u32 tx_flags,unsigned int paylen)6069 static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
6070 				 union e1000_adv_tx_desc *tx_desc,
6071 				 u32 tx_flags, unsigned int paylen)
6072 {
6073 	u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
6074 
6075 	/* 82575 requires a unique index per ring */
6076 	if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
6077 		olinfo_status |= tx_ring->reg_idx << 4;
6078 
6079 	/* insert L4 checksum */
6080 	olinfo_status |= IGB_SET_FLAG(tx_flags,
6081 				      IGB_TX_FLAGS_CSUM,
6082 				      (E1000_TXD_POPTS_TXSM << 8));
6083 
6084 	/* insert IPv4 checksum */
6085 	olinfo_status |= IGB_SET_FLAG(tx_flags,
6086 				      IGB_TX_FLAGS_IPV4,
6087 				      (E1000_TXD_POPTS_IXSM << 8));
6088 
6089 	tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
6090 }
6091 
__igb_maybe_stop_tx(struct igb_ring * tx_ring,const u16 size)6092 static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
6093 {
6094 	struct net_device *netdev = tx_ring->netdev;
6095 
6096 	netif_stop_subqueue(netdev, tx_ring->queue_index);
6097 
6098 	/* Herbert's original patch had:
6099 	 *  smp_mb__after_netif_stop_queue();
6100 	 * but since that doesn't exist yet, just open code it.
6101 	 */
6102 	smp_mb();
6103 
6104 	/* We need to check again in a case another CPU has just
6105 	 * made room available.
6106 	 */
6107 	if (igb_desc_unused(tx_ring) < size)
6108 		return -EBUSY;
6109 
6110 	/* A reprieve! */
6111 	netif_wake_subqueue(netdev, tx_ring->queue_index);
6112 
6113 	u64_stats_update_begin(&tx_ring->tx_syncp2);
6114 	tx_ring->tx_stats.restart_queue2++;
6115 	u64_stats_update_end(&tx_ring->tx_syncp2);
6116 
6117 	return 0;
6118 }
6119 
igb_maybe_stop_tx(struct igb_ring * tx_ring,const u16 size)6120 static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
6121 {
6122 	if (igb_desc_unused(tx_ring) >= size)
6123 		return 0;
6124 	return __igb_maybe_stop_tx(tx_ring, size);
6125 }
6126 
igb_tx_map(struct igb_ring * tx_ring,struct igb_tx_buffer * first,const u8 hdr_len)6127 static int igb_tx_map(struct igb_ring *tx_ring,
6128 		      struct igb_tx_buffer *first,
6129 		      const u8 hdr_len)
6130 {
6131 	struct sk_buff *skb = first->skb;
6132 	struct igb_tx_buffer *tx_buffer;
6133 	union e1000_adv_tx_desc *tx_desc;
6134 	skb_frag_t *frag;
6135 	dma_addr_t dma;
6136 	unsigned int data_len, size;
6137 	u32 tx_flags = first->tx_flags;
6138 	u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
6139 	u16 i = tx_ring->next_to_use;
6140 
6141 	tx_desc = IGB_TX_DESC(tx_ring, i);
6142 
6143 	igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
6144 
6145 	size = skb_headlen(skb);
6146 	data_len = skb->data_len;
6147 
6148 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
6149 
6150 	tx_buffer = first;
6151 
6152 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
6153 		if (dma_mapping_error(tx_ring->dev, dma))
6154 			goto dma_error;
6155 
6156 		/* record length, and DMA address */
6157 		dma_unmap_len_set(tx_buffer, len, size);
6158 		dma_unmap_addr_set(tx_buffer, dma, dma);
6159 
6160 		tx_desc->read.buffer_addr = cpu_to_le64(dma);
6161 
6162 		while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
6163 			tx_desc->read.cmd_type_len =
6164 				cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
6165 
6166 			i++;
6167 			tx_desc++;
6168 			if (i == tx_ring->count) {
6169 				tx_desc = IGB_TX_DESC(tx_ring, 0);
6170 				i = 0;
6171 			}
6172 			tx_desc->read.olinfo_status = 0;
6173 
6174 			dma += IGB_MAX_DATA_PER_TXD;
6175 			size -= IGB_MAX_DATA_PER_TXD;
6176 
6177 			tx_desc->read.buffer_addr = cpu_to_le64(dma);
6178 		}
6179 
6180 		if (likely(!data_len))
6181 			break;
6182 
6183 		tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
6184 
6185 		i++;
6186 		tx_desc++;
6187 		if (i == tx_ring->count) {
6188 			tx_desc = IGB_TX_DESC(tx_ring, 0);
6189 			i = 0;
6190 		}
6191 		tx_desc->read.olinfo_status = 0;
6192 
6193 		size = skb_frag_size(frag);
6194 		data_len -= size;
6195 
6196 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
6197 				       size, DMA_TO_DEVICE);
6198 
6199 		tx_buffer = &tx_ring->tx_buffer_info[i];
6200 	}
6201 
6202 	/* write last descriptor with RS and EOP bits */
6203 	cmd_type |= size | IGB_TXD_DCMD;
6204 	tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
6205 
6206 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
6207 
6208 	/* set the timestamp */
6209 	first->time_stamp = jiffies;
6210 
6211 	skb_tx_timestamp(skb);
6212 
6213 	/* Force memory writes to complete before letting h/w know there
6214 	 * are new descriptors to fetch.  (Only applicable for weak-ordered
6215 	 * memory model archs, such as IA-64).
6216 	 *
6217 	 * We also need this memory barrier to make certain all of the
6218 	 * status bits have been updated before next_to_watch is written.
6219 	 */
6220 	dma_wmb();
6221 
6222 	/* set next_to_watch value indicating a packet is present */
6223 	first->next_to_watch = tx_desc;
6224 
6225 	i++;
6226 	if (i == tx_ring->count)
6227 		i = 0;
6228 
6229 	tx_ring->next_to_use = i;
6230 
6231 	/* Make sure there is space in the ring for the next send. */
6232 	igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
6233 
6234 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
6235 		writel(i, tx_ring->tail);
6236 	}
6237 	return 0;
6238 
6239 dma_error:
6240 	dev_err(tx_ring->dev, "TX DMA map failed\n");
6241 	tx_buffer = &tx_ring->tx_buffer_info[i];
6242 
6243 	/* clear dma mappings for failed tx_buffer_info map */
6244 	while (tx_buffer != first) {
6245 		if (dma_unmap_len(tx_buffer, len))
6246 			dma_unmap_page(tx_ring->dev,
6247 				       dma_unmap_addr(tx_buffer, dma),
6248 				       dma_unmap_len(tx_buffer, len),
6249 				       DMA_TO_DEVICE);
6250 		dma_unmap_len_set(tx_buffer, len, 0);
6251 
6252 		if (i-- == 0)
6253 			i += tx_ring->count;
6254 		tx_buffer = &tx_ring->tx_buffer_info[i];
6255 	}
6256 
6257 	if (dma_unmap_len(tx_buffer, len))
6258 		dma_unmap_single(tx_ring->dev,
6259 				 dma_unmap_addr(tx_buffer, dma),
6260 				 dma_unmap_len(tx_buffer, len),
6261 				 DMA_TO_DEVICE);
6262 	dma_unmap_len_set(tx_buffer, len, 0);
6263 
6264 	dev_kfree_skb_any(tx_buffer->skb);
6265 	tx_buffer->skb = NULL;
6266 
6267 	tx_ring->next_to_use = i;
6268 
6269 	return -1;
6270 }
6271 
igb_xmit_xdp_ring(struct igb_adapter * adapter,struct igb_ring * tx_ring,struct xdp_frame * xdpf)6272 int igb_xmit_xdp_ring(struct igb_adapter *adapter,
6273 		      struct igb_ring *tx_ring,
6274 		      struct xdp_frame *xdpf)
6275 {
6276 	union e1000_adv_tx_desc *tx_desc;
6277 	u32 len, cmd_type, olinfo_status;
6278 	struct igb_tx_buffer *tx_buffer;
6279 	dma_addr_t dma;
6280 	u16 i;
6281 
6282 	len = xdpf->len;
6283 
6284 	if (unlikely(!igb_desc_unused(tx_ring)))
6285 		return IGB_XDP_CONSUMED;
6286 
6287 	dma = dma_map_single(tx_ring->dev, xdpf->data, len, DMA_TO_DEVICE);
6288 	if (dma_mapping_error(tx_ring->dev, dma))
6289 		return IGB_XDP_CONSUMED;
6290 
6291 	/* record the location of the first descriptor for this packet */
6292 	tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
6293 	tx_buffer->bytecount = len;
6294 	tx_buffer->gso_segs = 1;
6295 	tx_buffer->protocol = 0;
6296 
6297 	i = tx_ring->next_to_use;
6298 	tx_desc = IGB_TX_DESC(tx_ring, i);
6299 
6300 	dma_unmap_len_set(tx_buffer, len, len);
6301 	dma_unmap_addr_set(tx_buffer, dma, dma);
6302 	tx_buffer->type = IGB_TYPE_XDP;
6303 	tx_buffer->xdpf = xdpf;
6304 
6305 	tx_desc->read.buffer_addr = cpu_to_le64(dma);
6306 
6307 	/* put descriptor type bits */
6308 	cmd_type = E1000_ADVTXD_DTYP_DATA |
6309 		   E1000_ADVTXD_DCMD_DEXT |
6310 		   E1000_ADVTXD_DCMD_IFCS;
6311 	cmd_type |= len | IGB_TXD_DCMD;
6312 	tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
6313 
6314 	olinfo_status = len << E1000_ADVTXD_PAYLEN_SHIFT;
6315 	/* 82575 requires a unique index per ring */
6316 	if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
6317 		olinfo_status |= tx_ring->reg_idx << 4;
6318 
6319 	tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
6320 
6321 	netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer->bytecount);
6322 
6323 	/* set the timestamp */
6324 	tx_buffer->time_stamp = jiffies;
6325 
6326 	/* Avoid any potential race with xdp_xmit and cleanup */
6327 	smp_wmb();
6328 
6329 	/* set next_to_watch value indicating a packet is present */
6330 	i++;
6331 	if (i == tx_ring->count)
6332 		i = 0;
6333 
6334 	tx_buffer->next_to_watch = tx_desc;
6335 	tx_ring->next_to_use = i;
6336 
6337 	/* Make sure there is space in the ring for the next send. */
6338 	igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
6339 
6340 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
6341 		writel(i, tx_ring->tail);
6342 
6343 	return IGB_XDP_TX;
6344 }
6345 
igb_xmit_frame_ring(struct sk_buff * skb,struct igb_ring * tx_ring)6346 netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
6347 				struct igb_ring *tx_ring)
6348 {
6349 	struct igb_tx_buffer *first;
6350 	int tso;
6351 	u32 tx_flags = 0;
6352 	unsigned short f;
6353 	u16 count = TXD_USE_COUNT(skb_headlen(skb));
6354 	__be16 protocol = vlan_get_protocol(skb);
6355 	u8 hdr_len = 0;
6356 
6357 	/* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
6358 	 *       + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
6359 	 *       + 2 desc gap to keep tail from touching head,
6360 	 *       + 1 desc for context descriptor,
6361 	 * otherwise try next time
6362 	 */
6363 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6364 		count += TXD_USE_COUNT(skb_frag_size(
6365 						&skb_shinfo(skb)->frags[f]));
6366 
6367 	if (igb_maybe_stop_tx(tx_ring, count + 3)) {
6368 		/* this is a hard error */
6369 		return NETDEV_TX_BUSY;
6370 	}
6371 
6372 	/* record the location of the first descriptor for this packet */
6373 	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
6374 	first->type = IGB_TYPE_SKB;
6375 	first->skb = skb;
6376 	first->bytecount = skb->len;
6377 	first->gso_segs = 1;
6378 
6379 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
6380 		struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6381 
6382 		if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
6383 		    !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
6384 					   &adapter->state)) {
6385 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
6386 			tx_flags |= IGB_TX_FLAGS_TSTAMP;
6387 
6388 			adapter->ptp_tx_skb = skb_get(skb);
6389 			adapter->ptp_tx_start = jiffies;
6390 			if (adapter->hw.mac.type == e1000_82576)
6391 				schedule_work(&adapter->ptp_tx_work);
6392 		} else {
6393 			adapter->tx_hwtstamp_skipped++;
6394 		}
6395 	}
6396 
6397 	if (skb_vlan_tag_present(skb)) {
6398 		tx_flags |= IGB_TX_FLAGS_VLAN;
6399 		tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
6400 	}
6401 
6402 	/* record initial flags and protocol */
6403 	first->tx_flags = tx_flags;
6404 	first->protocol = protocol;
6405 
6406 	tso = igb_tso(tx_ring, first, &hdr_len);
6407 	if (tso < 0)
6408 		goto out_drop;
6409 	else if (!tso)
6410 		igb_tx_csum(tx_ring, first);
6411 
6412 	if (igb_tx_map(tx_ring, first, hdr_len))
6413 		goto cleanup_tx_tstamp;
6414 
6415 	return NETDEV_TX_OK;
6416 
6417 out_drop:
6418 	dev_kfree_skb_any(first->skb);
6419 	first->skb = NULL;
6420 cleanup_tx_tstamp:
6421 	if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) {
6422 		struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6423 
6424 		dev_kfree_skb_any(adapter->ptp_tx_skb);
6425 		adapter->ptp_tx_skb = NULL;
6426 		if (adapter->hw.mac.type == e1000_82576)
6427 			cancel_work_sync(&adapter->ptp_tx_work);
6428 		clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
6429 	}
6430 
6431 	return NETDEV_TX_OK;
6432 }
6433 
igb_tx_queue_mapping(struct igb_adapter * adapter,struct sk_buff * skb)6434 static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
6435 						    struct sk_buff *skb)
6436 {
6437 	unsigned int r_idx = skb->queue_mapping;
6438 
6439 	if (r_idx >= adapter->num_tx_queues)
6440 		r_idx = r_idx % adapter->num_tx_queues;
6441 
6442 	return adapter->tx_ring[r_idx];
6443 }
6444 
igb_xmit_frame(struct sk_buff * skb,struct net_device * netdev)6445 static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
6446 				  struct net_device *netdev)
6447 {
6448 	struct igb_adapter *adapter = netdev_priv(netdev);
6449 
6450 	/* The minimum packet size with TCTL.PSP set is 17 so pad the skb
6451 	 * in order to meet this minimum size requirement.
6452 	 */
6453 	if (skb_put_padto(skb, 17))
6454 		return NETDEV_TX_OK;
6455 
6456 	return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
6457 }
6458 
6459 /**
6460  *  igb_tx_timeout - Respond to a Tx Hang
6461  *  @netdev: network interface device structure
6462  *  @txqueue: number of the Tx queue that hung (unused)
6463  **/
igb_tx_timeout(struct net_device * netdev,unsigned int __always_unused txqueue)6464 static void igb_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
6465 {
6466 	struct igb_adapter *adapter = netdev_priv(netdev);
6467 	struct e1000_hw *hw = &adapter->hw;
6468 
6469 	/* Do the reset outside of interrupt context */
6470 	adapter->tx_timeout_count++;
6471 
6472 	if (hw->mac.type >= e1000_82580)
6473 		hw->dev_spec._82575.global_device_reset = true;
6474 
6475 	schedule_work(&adapter->reset_task);
6476 	wr32(E1000_EICS,
6477 	     (adapter->eims_enable_mask & ~adapter->eims_other));
6478 }
6479 
igb_reset_task(struct work_struct * work)6480 static void igb_reset_task(struct work_struct *work)
6481 {
6482 	struct igb_adapter *adapter;
6483 	adapter = container_of(work, struct igb_adapter, reset_task);
6484 
6485 	rtnl_lock();
6486 	/* If we're already down or resetting, just bail */
6487 	if (test_bit(__IGB_DOWN, &adapter->state) ||
6488 	    test_bit(__IGB_RESETTING, &adapter->state)) {
6489 		rtnl_unlock();
6490 		return;
6491 	}
6492 
6493 	igb_dump(adapter);
6494 	netdev_err(adapter->netdev, "Reset adapter\n");
6495 	igb_reinit_locked(adapter);
6496 	rtnl_unlock();
6497 }
6498 
6499 /**
6500  *  igb_get_stats64 - Get System Network Statistics
6501  *  @netdev: network interface device structure
6502  *  @stats: rtnl_link_stats64 pointer
6503  **/
igb_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)6504 static void igb_get_stats64(struct net_device *netdev,
6505 			    struct rtnl_link_stats64 *stats)
6506 {
6507 	struct igb_adapter *adapter = netdev_priv(netdev);
6508 
6509 	spin_lock(&adapter->stats64_lock);
6510 	igb_update_stats(adapter);
6511 	memcpy(stats, &adapter->stats64, sizeof(*stats));
6512 	spin_unlock(&adapter->stats64_lock);
6513 }
6514 
6515 /**
6516  *  igb_change_mtu - Change the Maximum Transfer Unit
6517  *  @netdev: network interface device structure
6518  *  @new_mtu: new value for maximum frame size
6519  *
6520  *  Returns 0 on success, negative on failure
6521  **/
igb_change_mtu(struct net_device * netdev,int new_mtu)6522 static int igb_change_mtu(struct net_device *netdev, int new_mtu)
6523 {
6524 	struct igb_adapter *adapter = netdev_priv(netdev);
6525 	int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD;
6526 
6527 	if (adapter->xdp_prog) {
6528 		int i;
6529 
6530 		for (i = 0; i < adapter->num_rx_queues; i++) {
6531 			struct igb_ring *ring = adapter->rx_ring[i];
6532 
6533 			if (max_frame > igb_rx_bufsz(ring)) {
6534 				netdev_warn(adapter->netdev,
6535 					    "Requested MTU size is not supported with XDP. Max frame size is %d\n",
6536 					    max_frame);
6537 				return -EINVAL;
6538 			}
6539 		}
6540 	}
6541 
6542 	/* adjust max frame to be at least the size of a standard frame */
6543 	if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
6544 		max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
6545 
6546 	while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
6547 		usleep_range(1000, 2000);
6548 
6549 	/* igb_down has a dependency on max_frame_size */
6550 	adapter->max_frame_size = max_frame;
6551 
6552 	if (netif_running(netdev))
6553 		igb_down(adapter);
6554 
6555 	netdev_dbg(netdev, "changing MTU from %d to %d\n",
6556 		   netdev->mtu, new_mtu);
6557 	netdev->mtu = new_mtu;
6558 
6559 	if (netif_running(netdev))
6560 		igb_up(adapter);
6561 	else
6562 		igb_reset(adapter);
6563 
6564 	clear_bit(__IGB_RESETTING, &adapter->state);
6565 
6566 	return 0;
6567 }
6568 
6569 /**
6570  *  igb_update_stats - Update the board statistics counters
6571  *  @adapter: board private structure
6572  **/
igb_update_stats(struct igb_adapter * adapter)6573 void igb_update_stats(struct igb_adapter *adapter)
6574 {
6575 	struct rtnl_link_stats64 *net_stats = &adapter->stats64;
6576 	struct e1000_hw *hw = &adapter->hw;
6577 	struct pci_dev *pdev = adapter->pdev;
6578 	u32 reg, mpc;
6579 	int i;
6580 	u64 bytes, packets;
6581 	unsigned int start;
6582 	u64 _bytes, _packets;
6583 
6584 	/* Prevent stats update while adapter is being reset, or if the pci
6585 	 * connection is down.
6586 	 */
6587 	if (adapter->link_speed == 0)
6588 		return;
6589 	if (pci_channel_offline(pdev))
6590 		return;
6591 
6592 	bytes = 0;
6593 	packets = 0;
6594 
6595 	rcu_read_lock();
6596 	for (i = 0; i < adapter->num_rx_queues; i++) {
6597 		struct igb_ring *ring = adapter->rx_ring[i];
6598 		u32 rqdpc = rd32(E1000_RQDPC(i));
6599 		if (hw->mac.type >= e1000_i210)
6600 			wr32(E1000_RQDPC(i), 0);
6601 
6602 		if (rqdpc) {
6603 			ring->rx_stats.drops += rqdpc;
6604 			net_stats->rx_fifo_errors += rqdpc;
6605 		}
6606 
6607 		do {
6608 			start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
6609 			_bytes = ring->rx_stats.bytes;
6610 			_packets = ring->rx_stats.packets;
6611 		} while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
6612 		bytes += _bytes;
6613 		packets += _packets;
6614 	}
6615 
6616 	net_stats->rx_bytes = bytes;
6617 	net_stats->rx_packets = packets;
6618 
6619 	bytes = 0;
6620 	packets = 0;
6621 	for (i = 0; i < adapter->num_tx_queues; i++) {
6622 		struct igb_ring *ring = adapter->tx_ring[i];
6623 		do {
6624 			start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
6625 			_bytes = ring->tx_stats.bytes;
6626 			_packets = ring->tx_stats.packets;
6627 		} while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
6628 		bytes += _bytes;
6629 		packets += _packets;
6630 	}
6631 	net_stats->tx_bytes = bytes;
6632 	net_stats->tx_packets = packets;
6633 	rcu_read_unlock();
6634 
6635 	/* read stats registers */
6636 	adapter->stats.crcerrs += rd32(E1000_CRCERRS);
6637 	adapter->stats.gprc += rd32(E1000_GPRC);
6638 	adapter->stats.gorc += rd32(E1000_GORCL);
6639 	rd32(E1000_GORCH); /* clear GORCL */
6640 	adapter->stats.bprc += rd32(E1000_BPRC);
6641 	adapter->stats.mprc += rd32(E1000_MPRC);
6642 	adapter->stats.roc += rd32(E1000_ROC);
6643 
6644 	adapter->stats.prc64 += rd32(E1000_PRC64);
6645 	adapter->stats.prc127 += rd32(E1000_PRC127);
6646 	adapter->stats.prc255 += rd32(E1000_PRC255);
6647 	adapter->stats.prc511 += rd32(E1000_PRC511);
6648 	adapter->stats.prc1023 += rd32(E1000_PRC1023);
6649 	adapter->stats.prc1522 += rd32(E1000_PRC1522);
6650 	adapter->stats.symerrs += rd32(E1000_SYMERRS);
6651 	adapter->stats.sec += rd32(E1000_SEC);
6652 
6653 	mpc = rd32(E1000_MPC);
6654 	adapter->stats.mpc += mpc;
6655 	net_stats->rx_fifo_errors += mpc;
6656 	adapter->stats.scc += rd32(E1000_SCC);
6657 	adapter->stats.ecol += rd32(E1000_ECOL);
6658 	adapter->stats.mcc += rd32(E1000_MCC);
6659 	adapter->stats.latecol += rd32(E1000_LATECOL);
6660 	adapter->stats.dc += rd32(E1000_DC);
6661 	adapter->stats.rlec += rd32(E1000_RLEC);
6662 	adapter->stats.xonrxc += rd32(E1000_XONRXC);
6663 	adapter->stats.xontxc += rd32(E1000_XONTXC);
6664 	adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
6665 	adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
6666 	adapter->stats.fcruc += rd32(E1000_FCRUC);
6667 	adapter->stats.gptc += rd32(E1000_GPTC);
6668 	adapter->stats.gotc += rd32(E1000_GOTCL);
6669 	rd32(E1000_GOTCH); /* clear GOTCL */
6670 	adapter->stats.rnbc += rd32(E1000_RNBC);
6671 	adapter->stats.ruc += rd32(E1000_RUC);
6672 	adapter->stats.rfc += rd32(E1000_RFC);
6673 	adapter->stats.rjc += rd32(E1000_RJC);
6674 	adapter->stats.tor += rd32(E1000_TORH);
6675 	adapter->stats.tot += rd32(E1000_TOTH);
6676 	adapter->stats.tpr += rd32(E1000_TPR);
6677 
6678 	adapter->stats.ptc64 += rd32(E1000_PTC64);
6679 	adapter->stats.ptc127 += rd32(E1000_PTC127);
6680 	adapter->stats.ptc255 += rd32(E1000_PTC255);
6681 	adapter->stats.ptc511 += rd32(E1000_PTC511);
6682 	adapter->stats.ptc1023 += rd32(E1000_PTC1023);
6683 	adapter->stats.ptc1522 += rd32(E1000_PTC1522);
6684 
6685 	adapter->stats.mptc += rd32(E1000_MPTC);
6686 	adapter->stats.bptc += rd32(E1000_BPTC);
6687 
6688 	adapter->stats.tpt += rd32(E1000_TPT);
6689 	adapter->stats.colc += rd32(E1000_COLC);
6690 
6691 	adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
6692 	/* read internal phy specific stats */
6693 	reg = rd32(E1000_CTRL_EXT);
6694 	if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
6695 		adapter->stats.rxerrc += rd32(E1000_RXERRC);
6696 
6697 		/* this stat has invalid values on i210/i211 */
6698 		if ((hw->mac.type != e1000_i210) &&
6699 		    (hw->mac.type != e1000_i211))
6700 			adapter->stats.tncrs += rd32(E1000_TNCRS);
6701 	}
6702 
6703 	adapter->stats.tsctc += rd32(E1000_TSCTC);
6704 	adapter->stats.tsctfc += rd32(E1000_TSCTFC);
6705 
6706 	adapter->stats.iac += rd32(E1000_IAC);
6707 	adapter->stats.icrxoc += rd32(E1000_ICRXOC);
6708 	adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
6709 	adapter->stats.icrxatc += rd32(E1000_ICRXATC);
6710 	adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
6711 	adapter->stats.ictxatc += rd32(E1000_ICTXATC);
6712 	adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
6713 	adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
6714 	adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
6715 
6716 	/* Fill out the OS statistics structure */
6717 	net_stats->multicast = adapter->stats.mprc;
6718 	net_stats->collisions = adapter->stats.colc;
6719 
6720 	/* Rx Errors */
6721 
6722 	/* RLEC on some newer hardware can be incorrect so build
6723 	 * our own version based on RUC and ROC
6724 	 */
6725 	net_stats->rx_errors = adapter->stats.rxerrc +
6726 		adapter->stats.crcerrs + adapter->stats.algnerrc +
6727 		adapter->stats.ruc + adapter->stats.roc +
6728 		adapter->stats.cexterr;
6729 	net_stats->rx_length_errors = adapter->stats.ruc +
6730 				      adapter->stats.roc;
6731 	net_stats->rx_crc_errors = adapter->stats.crcerrs;
6732 	net_stats->rx_frame_errors = adapter->stats.algnerrc;
6733 	net_stats->rx_missed_errors = adapter->stats.mpc;
6734 
6735 	/* Tx Errors */
6736 	net_stats->tx_errors = adapter->stats.ecol +
6737 			       adapter->stats.latecol;
6738 	net_stats->tx_aborted_errors = adapter->stats.ecol;
6739 	net_stats->tx_window_errors = adapter->stats.latecol;
6740 	net_stats->tx_carrier_errors = adapter->stats.tncrs;
6741 
6742 	/* Tx Dropped needs to be maintained elsewhere */
6743 
6744 	/* Management Stats */
6745 	adapter->stats.mgptc += rd32(E1000_MGTPTC);
6746 	adapter->stats.mgprc += rd32(E1000_MGTPRC);
6747 	adapter->stats.mgpdc += rd32(E1000_MGTPDC);
6748 
6749 	/* OS2BMC Stats */
6750 	reg = rd32(E1000_MANC);
6751 	if (reg & E1000_MANC_EN_BMC2OS) {
6752 		adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
6753 		adapter->stats.o2bspc += rd32(E1000_O2BSPC);
6754 		adapter->stats.b2ospc += rd32(E1000_B2OSPC);
6755 		adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
6756 	}
6757 }
6758 
igb_tsync_interrupt(struct igb_adapter * adapter)6759 static void igb_tsync_interrupt(struct igb_adapter *adapter)
6760 {
6761 	struct e1000_hw *hw = &adapter->hw;
6762 	struct ptp_clock_event event;
6763 	struct timespec64 ts;
6764 	u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
6765 
6766 	if (tsicr & TSINTR_SYS_WRAP) {
6767 		event.type = PTP_CLOCK_PPS;
6768 		if (adapter->ptp_caps.pps)
6769 			ptp_clock_event(adapter->ptp_clock, &event);
6770 		ack |= TSINTR_SYS_WRAP;
6771 	}
6772 
6773 	if (tsicr & E1000_TSICR_TXTS) {
6774 		/* retrieve hardware timestamp */
6775 		schedule_work(&adapter->ptp_tx_work);
6776 		ack |= E1000_TSICR_TXTS;
6777 	}
6778 
6779 	if (tsicr & TSINTR_TT0) {
6780 		spin_lock(&adapter->tmreg_lock);
6781 		ts = timespec64_add(adapter->perout[0].start,
6782 				    adapter->perout[0].period);
6783 		/* u32 conversion of tv_sec is safe until y2106 */
6784 		wr32(E1000_TRGTTIML0, ts.tv_nsec);
6785 		wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec);
6786 		tsauxc = rd32(E1000_TSAUXC);
6787 		tsauxc |= TSAUXC_EN_TT0;
6788 		wr32(E1000_TSAUXC, tsauxc);
6789 		adapter->perout[0].start = ts;
6790 		spin_unlock(&adapter->tmreg_lock);
6791 		ack |= TSINTR_TT0;
6792 	}
6793 
6794 	if (tsicr & TSINTR_TT1) {
6795 		spin_lock(&adapter->tmreg_lock);
6796 		ts = timespec64_add(adapter->perout[1].start,
6797 				    adapter->perout[1].period);
6798 		wr32(E1000_TRGTTIML1, ts.tv_nsec);
6799 		wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec);
6800 		tsauxc = rd32(E1000_TSAUXC);
6801 		tsauxc |= TSAUXC_EN_TT1;
6802 		wr32(E1000_TSAUXC, tsauxc);
6803 		adapter->perout[1].start = ts;
6804 		spin_unlock(&adapter->tmreg_lock);
6805 		ack |= TSINTR_TT1;
6806 	}
6807 
6808 	if (tsicr & TSINTR_AUTT0) {
6809 		nsec = rd32(E1000_AUXSTMPL0);
6810 		sec  = rd32(E1000_AUXSTMPH0);
6811 		event.type = PTP_CLOCK_EXTTS;
6812 		event.index = 0;
6813 		event.timestamp = sec * 1000000000ULL + nsec;
6814 		ptp_clock_event(adapter->ptp_clock, &event);
6815 		ack |= TSINTR_AUTT0;
6816 	}
6817 
6818 	if (tsicr & TSINTR_AUTT1) {
6819 		nsec = rd32(E1000_AUXSTMPL1);
6820 		sec  = rd32(E1000_AUXSTMPH1);
6821 		event.type = PTP_CLOCK_EXTTS;
6822 		event.index = 1;
6823 		event.timestamp = sec * 1000000000ULL + nsec;
6824 		ptp_clock_event(adapter->ptp_clock, &event);
6825 		ack |= TSINTR_AUTT1;
6826 	}
6827 
6828 	/* acknowledge the interrupts */
6829 	wr32(E1000_TSICR, ack);
6830 }
6831 
igb_msix_other(int irq,void * data)6832 static irqreturn_t igb_msix_other(int irq, void *data)
6833 {
6834 	struct igb_adapter *adapter = data;
6835 	struct e1000_hw *hw = &adapter->hw;
6836 	u32 icr = rd32(E1000_ICR);
6837 	/* reading ICR causes bit 31 of EICR to be cleared */
6838 
6839 	if (icr & E1000_ICR_DRSTA)
6840 		schedule_work(&adapter->reset_task);
6841 
6842 	if (icr & E1000_ICR_DOUTSYNC) {
6843 		/* HW is reporting DMA is out of sync */
6844 		adapter->stats.doosync++;
6845 		/* The DMA Out of Sync is also indication of a spoof event
6846 		 * in IOV mode. Check the Wrong VM Behavior register to
6847 		 * see if it is really a spoof event.
6848 		 */
6849 		igb_check_wvbr(adapter);
6850 	}
6851 
6852 	/* Check for a mailbox event */
6853 	if (icr & E1000_ICR_VMMB)
6854 		igb_msg_task(adapter);
6855 
6856 	if (icr & E1000_ICR_LSC) {
6857 		hw->mac.get_link_status = 1;
6858 		/* guard against interrupt when we're going down */
6859 		if (!test_bit(__IGB_DOWN, &adapter->state))
6860 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
6861 	}
6862 
6863 	if (icr & E1000_ICR_TS)
6864 		igb_tsync_interrupt(adapter);
6865 
6866 	wr32(E1000_EIMS, adapter->eims_other);
6867 
6868 	return IRQ_HANDLED;
6869 }
6870 
igb_write_itr(struct igb_q_vector * q_vector)6871 static void igb_write_itr(struct igb_q_vector *q_vector)
6872 {
6873 	struct igb_adapter *adapter = q_vector->adapter;
6874 	u32 itr_val = q_vector->itr_val & 0x7FFC;
6875 
6876 	if (!q_vector->set_itr)
6877 		return;
6878 
6879 	if (!itr_val)
6880 		itr_val = 0x4;
6881 
6882 	if (adapter->hw.mac.type == e1000_82575)
6883 		itr_val |= itr_val << 16;
6884 	else
6885 		itr_val |= E1000_EITR_CNT_IGNR;
6886 
6887 	writel(itr_val, q_vector->itr_register);
6888 	q_vector->set_itr = 0;
6889 }
6890 
igb_msix_ring(int irq,void * data)6891 static irqreturn_t igb_msix_ring(int irq, void *data)
6892 {
6893 	struct igb_q_vector *q_vector = data;
6894 
6895 	/* Write the ITR value calculated from the previous interrupt. */
6896 	igb_write_itr(q_vector);
6897 
6898 	napi_schedule(&q_vector->napi);
6899 
6900 	return IRQ_HANDLED;
6901 }
6902 
6903 #ifdef CONFIG_IGB_DCA
igb_update_tx_dca(struct igb_adapter * adapter,struct igb_ring * tx_ring,int cpu)6904 static void igb_update_tx_dca(struct igb_adapter *adapter,
6905 			      struct igb_ring *tx_ring,
6906 			      int cpu)
6907 {
6908 	struct e1000_hw *hw = &adapter->hw;
6909 	u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
6910 
6911 	if (hw->mac.type != e1000_82575)
6912 		txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
6913 
6914 	/* We can enable relaxed ordering for reads, but not writes when
6915 	 * DCA is enabled.  This is due to a known issue in some chipsets
6916 	 * which will cause the DCA tag to be cleared.
6917 	 */
6918 	txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
6919 		  E1000_DCA_TXCTRL_DATA_RRO_EN |
6920 		  E1000_DCA_TXCTRL_DESC_DCA_EN;
6921 
6922 	wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
6923 }
6924 
igb_update_rx_dca(struct igb_adapter * adapter,struct igb_ring * rx_ring,int cpu)6925 static void igb_update_rx_dca(struct igb_adapter *adapter,
6926 			      struct igb_ring *rx_ring,
6927 			      int cpu)
6928 {
6929 	struct e1000_hw *hw = &adapter->hw;
6930 	u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
6931 
6932 	if (hw->mac.type != e1000_82575)
6933 		rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
6934 
6935 	/* We can enable relaxed ordering for reads, but not writes when
6936 	 * DCA is enabled.  This is due to a known issue in some chipsets
6937 	 * which will cause the DCA tag to be cleared.
6938 	 */
6939 	rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
6940 		  E1000_DCA_RXCTRL_DESC_DCA_EN;
6941 
6942 	wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
6943 }
6944 
igb_update_dca(struct igb_q_vector * q_vector)6945 static void igb_update_dca(struct igb_q_vector *q_vector)
6946 {
6947 	struct igb_adapter *adapter = q_vector->adapter;
6948 	int cpu = get_cpu();
6949 
6950 	if (q_vector->cpu == cpu)
6951 		goto out_no_update;
6952 
6953 	if (q_vector->tx.ring)
6954 		igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
6955 
6956 	if (q_vector->rx.ring)
6957 		igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
6958 
6959 	q_vector->cpu = cpu;
6960 out_no_update:
6961 	put_cpu();
6962 }
6963 
igb_setup_dca(struct igb_adapter * adapter)6964 static void igb_setup_dca(struct igb_adapter *adapter)
6965 {
6966 	struct e1000_hw *hw = &adapter->hw;
6967 	int i;
6968 
6969 	if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
6970 		return;
6971 
6972 	/* Always use CB2 mode, difference is masked in the CB driver. */
6973 	wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
6974 
6975 	for (i = 0; i < adapter->num_q_vectors; i++) {
6976 		adapter->q_vector[i]->cpu = -1;
6977 		igb_update_dca(adapter->q_vector[i]);
6978 	}
6979 }
6980 
__igb_notify_dca(struct device * dev,void * data)6981 static int __igb_notify_dca(struct device *dev, void *data)
6982 {
6983 	struct net_device *netdev = dev_get_drvdata(dev);
6984 	struct igb_adapter *adapter = netdev_priv(netdev);
6985 	struct pci_dev *pdev = adapter->pdev;
6986 	struct e1000_hw *hw = &adapter->hw;
6987 	unsigned long event = *(unsigned long *)data;
6988 
6989 	switch (event) {
6990 	case DCA_PROVIDER_ADD:
6991 		/* if already enabled, don't do it again */
6992 		if (adapter->flags & IGB_FLAG_DCA_ENABLED)
6993 			break;
6994 		if (dca_add_requester(dev) == 0) {
6995 			adapter->flags |= IGB_FLAG_DCA_ENABLED;
6996 			dev_info(&pdev->dev, "DCA enabled\n");
6997 			igb_setup_dca(adapter);
6998 			break;
6999 		}
7000 		fallthrough; /* since DCA is disabled. */
7001 	case DCA_PROVIDER_REMOVE:
7002 		if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
7003 			/* without this a class_device is left
7004 			 * hanging around in the sysfs model
7005 			 */
7006 			dca_remove_requester(dev);
7007 			dev_info(&pdev->dev, "DCA disabled\n");
7008 			adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
7009 			wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
7010 		}
7011 		break;
7012 	}
7013 
7014 	return 0;
7015 }
7016 
igb_notify_dca(struct notifier_block * nb,unsigned long event,void * p)7017 static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
7018 			  void *p)
7019 {
7020 	int ret_val;
7021 
7022 	ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
7023 					 __igb_notify_dca);
7024 
7025 	return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
7026 }
7027 #endif /* CONFIG_IGB_DCA */
7028 
7029 #ifdef CONFIG_PCI_IOV
igb_vf_configure(struct igb_adapter * adapter,int vf)7030 static int igb_vf_configure(struct igb_adapter *adapter, int vf)
7031 {
7032 	unsigned char mac_addr[ETH_ALEN];
7033 
7034 	eth_zero_addr(mac_addr);
7035 	igb_set_vf_mac(adapter, vf, mac_addr);
7036 
7037 	/* By default spoof check is enabled for all VFs */
7038 	adapter->vf_data[vf].spoofchk_enabled = true;
7039 
7040 	/* By default VFs are not trusted */
7041 	adapter->vf_data[vf].trusted = false;
7042 
7043 	return 0;
7044 }
7045 
7046 #endif
igb_ping_all_vfs(struct igb_adapter * adapter)7047 static void igb_ping_all_vfs(struct igb_adapter *adapter)
7048 {
7049 	struct e1000_hw *hw = &adapter->hw;
7050 	u32 ping;
7051 	int i;
7052 
7053 	for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
7054 		ping = E1000_PF_CONTROL_MSG;
7055 		if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
7056 			ping |= E1000_VT_MSGTYPE_CTS;
7057 		igb_write_mbx(hw, &ping, 1, i);
7058 	}
7059 }
7060 
igb_set_vf_promisc(struct igb_adapter * adapter,u32 * msgbuf,u32 vf)7061 static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
7062 {
7063 	struct e1000_hw *hw = &adapter->hw;
7064 	u32 vmolr = rd32(E1000_VMOLR(vf));
7065 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7066 
7067 	vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
7068 			    IGB_VF_FLAG_MULTI_PROMISC);
7069 	vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
7070 
7071 	if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
7072 		vmolr |= E1000_VMOLR_MPME;
7073 		vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
7074 		*msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
7075 	} else {
7076 		/* if we have hashes and we are clearing a multicast promisc
7077 		 * flag we need to write the hashes to the MTA as this step
7078 		 * was previously skipped
7079 		 */
7080 		if (vf_data->num_vf_mc_hashes > 30) {
7081 			vmolr |= E1000_VMOLR_MPME;
7082 		} else if (vf_data->num_vf_mc_hashes) {
7083 			int j;
7084 
7085 			vmolr |= E1000_VMOLR_ROMPE;
7086 			for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
7087 				igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
7088 		}
7089 	}
7090 
7091 	wr32(E1000_VMOLR(vf), vmolr);
7092 
7093 	/* there are flags left unprocessed, likely not supported */
7094 	if (*msgbuf & E1000_VT_MSGINFO_MASK)
7095 		return -EINVAL;
7096 
7097 	return 0;
7098 }
7099 
igb_set_vf_multicasts(struct igb_adapter * adapter,u32 * msgbuf,u32 vf)7100 static int igb_set_vf_multicasts(struct igb_adapter *adapter,
7101 				  u32 *msgbuf, u32 vf)
7102 {
7103 	int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
7104 	u16 *hash_list = (u16 *)&msgbuf[1];
7105 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7106 	int i;
7107 
7108 	/* salt away the number of multicast addresses assigned
7109 	 * to this VF for later use to restore when the PF multi cast
7110 	 * list changes
7111 	 */
7112 	vf_data->num_vf_mc_hashes = n;
7113 
7114 	/* only up to 30 hash values supported */
7115 	if (n > 30)
7116 		n = 30;
7117 
7118 	/* store the hashes for later use */
7119 	for (i = 0; i < n; i++)
7120 		vf_data->vf_mc_hashes[i] = hash_list[i];
7121 
7122 	/* Flush and reset the mta with the new values */
7123 	igb_set_rx_mode(adapter->netdev);
7124 
7125 	return 0;
7126 }
7127 
igb_restore_vf_multicasts(struct igb_adapter * adapter)7128 static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
7129 {
7130 	struct e1000_hw *hw = &adapter->hw;
7131 	struct vf_data_storage *vf_data;
7132 	int i, j;
7133 
7134 	for (i = 0; i < adapter->vfs_allocated_count; i++) {
7135 		u32 vmolr = rd32(E1000_VMOLR(i));
7136 
7137 		vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
7138 
7139 		vf_data = &adapter->vf_data[i];
7140 
7141 		if ((vf_data->num_vf_mc_hashes > 30) ||
7142 		    (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
7143 			vmolr |= E1000_VMOLR_MPME;
7144 		} else if (vf_data->num_vf_mc_hashes) {
7145 			vmolr |= E1000_VMOLR_ROMPE;
7146 			for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
7147 				igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
7148 		}
7149 		wr32(E1000_VMOLR(i), vmolr);
7150 	}
7151 }
7152 
igb_clear_vf_vfta(struct igb_adapter * adapter,u32 vf)7153 static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
7154 {
7155 	struct e1000_hw *hw = &adapter->hw;
7156 	u32 pool_mask, vlvf_mask, i;
7157 
7158 	/* create mask for VF and other pools */
7159 	pool_mask = E1000_VLVF_POOLSEL_MASK;
7160 	vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf);
7161 
7162 	/* drop PF from pool bits */
7163 	pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT +
7164 			     adapter->vfs_allocated_count);
7165 
7166 	/* Find the vlan filter for this id */
7167 	for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
7168 		u32 vlvf = rd32(E1000_VLVF(i));
7169 		u32 vfta_mask, vid, vfta;
7170 
7171 		/* remove the vf from the pool */
7172 		if (!(vlvf & vlvf_mask))
7173 			continue;
7174 
7175 		/* clear out bit from VLVF */
7176 		vlvf ^= vlvf_mask;
7177 
7178 		/* if other pools are present, just remove ourselves */
7179 		if (vlvf & pool_mask)
7180 			goto update_vlvfb;
7181 
7182 		/* if PF is present, leave VFTA */
7183 		if (vlvf & E1000_VLVF_POOLSEL_MASK)
7184 			goto update_vlvf;
7185 
7186 		vid = vlvf & E1000_VLVF_VLANID_MASK;
7187 		vfta_mask = BIT(vid % 32);
7188 
7189 		/* clear bit from VFTA */
7190 		vfta = adapter->shadow_vfta[vid / 32];
7191 		if (vfta & vfta_mask)
7192 			hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask);
7193 update_vlvf:
7194 		/* clear pool selection enable */
7195 		if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
7196 			vlvf &= E1000_VLVF_POOLSEL_MASK;
7197 		else
7198 			vlvf = 0;
7199 update_vlvfb:
7200 		/* clear pool bits */
7201 		wr32(E1000_VLVF(i), vlvf);
7202 	}
7203 }
7204 
igb_find_vlvf_entry(struct e1000_hw * hw,u32 vlan)7205 static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
7206 {
7207 	u32 vlvf;
7208 	int idx;
7209 
7210 	/* short cut the special case */
7211 	if (vlan == 0)
7212 		return 0;
7213 
7214 	/* Search for the VLAN id in the VLVF entries */
7215 	for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) {
7216 		vlvf = rd32(E1000_VLVF(idx));
7217 		if ((vlvf & VLAN_VID_MASK) == vlan)
7218 			break;
7219 	}
7220 
7221 	return idx;
7222 }
7223 
igb_update_pf_vlvf(struct igb_adapter * adapter,u32 vid)7224 static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
7225 {
7226 	struct e1000_hw *hw = &adapter->hw;
7227 	u32 bits, pf_id;
7228 	int idx;
7229 
7230 	idx = igb_find_vlvf_entry(hw, vid);
7231 	if (!idx)
7232 		return;
7233 
7234 	/* See if any other pools are set for this VLAN filter
7235 	 * entry other than the PF.
7236 	 */
7237 	pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
7238 	bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK;
7239 	bits &= rd32(E1000_VLVF(idx));
7240 
7241 	/* Disable the filter so this falls into the default pool. */
7242 	if (!bits) {
7243 		if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
7244 			wr32(E1000_VLVF(idx), BIT(pf_id));
7245 		else
7246 			wr32(E1000_VLVF(idx), 0);
7247 	}
7248 }
7249 
igb_set_vf_vlan(struct igb_adapter * adapter,u32 vid,bool add,u32 vf)7250 static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid,
7251 			   bool add, u32 vf)
7252 {
7253 	int pf_id = adapter->vfs_allocated_count;
7254 	struct e1000_hw *hw = &adapter->hw;
7255 	int err;
7256 
7257 	/* If VLAN overlaps with one the PF is currently monitoring make
7258 	 * sure that we are able to allocate a VLVF entry.  This may be
7259 	 * redundant but it guarantees PF will maintain visibility to
7260 	 * the VLAN.
7261 	 */
7262 	if (add && test_bit(vid, adapter->active_vlans)) {
7263 		err = igb_vfta_set(hw, vid, pf_id, true, false);
7264 		if (err)
7265 			return err;
7266 	}
7267 
7268 	err = igb_vfta_set(hw, vid, vf, add, false);
7269 
7270 	if (add && !err)
7271 		return err;
7272 
7273 	/* If we failed to add the VF VLAN or we are removing the VF VLAN
7274 	 * we may need to drop the PF pool bit in order to allow us to free
7275 	 * up the VLVF resources.
7276 	 */
7277 	if (test_bit(vid, adapter->active_vlans) ||
7278 	    (adapter->flags & IGB_FLAG_VLAN_PROMISC))
7279 		igb_update_pf_vlvf(adapter, vid);
7280 
7281 	return err;
7282 }
7283 
igb_set_vmvir(struct igb_adapter * adapter,u32 vid,u32 vf)7284 static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
7285 {
7286 	struct e1000_hw *hw = &adapter->hw;
7287 
7288 	if (vid)
7289 		wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
7290 	else
7291 		wr32(E1000_VMVIR(vf), 0);
7292 }
7293 
igb_enable_port_vlan(struct igb_adapter * adapter,int vf,u16 vlan,u8 qos)7294 static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf,
7295 				u16 vlan, u8 qos)
7296 {
7297 	int err;
7298 
7299 	err = igb_set_vf_vlan(adapter, vlan, true, vf);
7300 	if (err)
7301 		return err;
7302 
7303 	igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
7304 	igb_set_vmolr(adapter, vf, !vlan);
7305 
7306 	/* revoke access to previous VLAN */
7307 	if (vlan != adapter->vf_data[vf].pf_vlan)
7308 		igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
7309 				false, vf);
7310 
7311 	adapter->vf_data[vf].pf_vlan = vlan;
7312 	adapter->vf_data[vf].pf_qos = qos;
7313 	igb_set_vf_vlan_strip(adapter, vf, true);
7314 	dev_info(&adapter->pdev->dev,
7315 		 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
7316 	if (test_bit(__IGB_DOWN, &adapter->state)) {
7317 		dev_warn(&adapter->pdev->dev,
7318 			 "The VF VLAN has been set, but the PF device is not up.\n");
7319 		dev_warn(&adapter->pdev->dev,
7320 			 "Bring the PF device up before attempting to use the VF device.\n");
7321 	}
7322 
7323 	return err;
7324 }
7325 
igb_disable_port_vlan(struct igb_adapter * adapter,int vf)7326 static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf)
7327 {
7328 	/* Restore tagless access via VLAN 0 */
7329 	igb_set_vf_vlan(adapter, 0, true, vf);
7330 
7331 	igb_set_vmvir(adapter, 0, vf);
7332 	igb_set_vmolr(adapter, vf, true);
7333 
7334 	/* Remove any PF assigned VLAN */
7335 	if (adapter->vf_data[vf].pf_vlan)
7336 		igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
7337 				false, vf);
7338 
7339 	adapter->vf_data[vf].pf_vlan = 0;
7340 	adapter->vf_data[vf].pf_qos = 0;
7341 	igb_set_vf_vlan_strip(adapter, vf, false);
7342 
7343 	return 0;
7344 }
7345 
igb_ndo_set_vf_vlan(struct net_device * netdev,int vf,u16 vlan,u8 qos,__be16 vlan_proto)7346 static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
7347 			       u16 vlan, u8 qos, __be16 vlan_proto)
7348 {
7349 	struct igb_adapter *adapter = netdev_priv(netdev);
7350 
7351 	if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
7352 		return -EINVAL;
7353 
7354 	if (vlan_proto != htons(ETH_P_8021Q))
7355 		return -EPROTONOSUPPORT;
7356 
7357 	return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) :
7358 			       igb_disable_port_vlan(adapter, vf);
7359 }
7360 
igb_set_vf_vlan_msg(struct igb_adapter * adapter,u32 * msgbuf,u32 vf)7361 static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
7362 {
7363 	int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
7364 	int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
7365 	int ret;
7366 
7367 	if (adapter->vf_data[vf].pf_vlan)
7368 		return -1;
7369 
7370 	/* VLAN 0 is a special case, don't allow it to be removed */
7371 	if (!vid && !add)
7372 		return 0;
7373 
7374 	ret = igb_set_vf_vlan(adapter, vid, !!add, vf);
7375 	if (!ret)
7376 		igb_set_vf_vlan_strip(adapter, vf, !!vid);
7377 	return ret;
7378 }
7379 
igb_vf_reset(struct igb_adapter * adapter,u32 vf)7380 static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
7381 {
7382 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7383 
7384 	/* clear flags - except flag that indicates PF has set the MAC */
7385 	vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC;
7386 	vf_data->last_nack = jiffies;
7387 
7388 	/* reset vlans for device */
7389 	igb_clear_vf_vfta(adapter, vf);
7390 	igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf);
7391 	igb_set_vmvir(adapter, vf_data->pf_vlan |
7392 			       (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf);
7393 	igb_set_vmolr(adapter, vf, !vf_data->pf_vlan);
7394 	igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan));
7395 
7396 	/* reset multicast table array for vf */
7397 	adapter->vf_data[vf].num_vf_mc_hashes = 0;
7398 
7399 	/* Flush and reset the mta with the new values */
7400 	igb_set_rx_mode(adapter->netdev);
7401 }
7402 
igb_vf_reset_event(struct igb_adapter * adapter,u32 vf)7403 static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
7404 {
7405 	unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7406 
7407 	/* clear mac address as we were hotplug removed/added */
7408 	if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
7409 		eth_zero_addr(vf_mac);
7410 
7411 	/* process remaining reset events */
7412 	igb_vf_reset(adapter, vf);
7413 }
7414 
igb_vf_reset_msg(struct igb_adapter * adapter,u32 vf)7415 static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
7416 {
7417 	struct e1000_hw *hw = &adapter->hw;
7418 	unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7419 	u32 reg, msgbuf[3] = {};
7420 	u8 *addr = (u8 *)(&msgbuf[1]);
7421 
7422 	/* process all the same items cleared in a function level reset */
7423 	igb_vf_reset(adapter, vf);
7424 
7425 	/* set vf mac address */
7426 	igb_set_vf_mac(adapter, vf, vf_mac);
7427 
7428 	/* enable transmit and receive for vf */
7429 	reg = rd32(E1000_VFTE);
7430 	wr32(E1000_VFTE, reg | BIT(vf));
7431 	reg = rd32(E1000_VFRE);
7432 	wr32(E1000_VFRE, reg | BIT(vf));
7433 
7434 	adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
7435 
7436 	/* reply to reset with ack and vf mac address */
7437 	if (!is_zero_ether_addr(vf_mac)) {
7438 		msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
7439 		memcpy(addr, vf_mac, ETH_ALEN);
7440 	} else {
7441 		msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK;
7442 	}
7443 	igb_write_mbx(hw, msgbuf, 3, vf);
7444 }
7445 
igb_flush_mac_table(struct igb_adapter * adapter)7446 static void igb_flush_mac_table(struct igb_adapter *adapter)
7447 {
7448 	struct e1000_hw *hw = &adapter->hw;
7449 	int i;
7450 
7451 	for (i = 0; i < hw->mac.rar_entry_count; i++) {
7452 		adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
7453 		eth_zero_addr(adapter->mac_table[i].addr);
7454 		adapter->mac_table[i].queue = 0;
7455 		igb_rar_set_index(adapter, i);
7456 	}
7457 }
7458 
igb_available_rars(struct igb_adapter * adapter,u8 queue)7459 static int igb_available_rars(struct igb_adapter *adapter, u8 queue)
7460 {
7461 	struct e1000_hw *hw = &adapter->hw;
7462 	/* do not count rar entries reserved for VFs MAC addresses */
7463 	int rar_entries = hw->mac.rar_entry_count -
7464 			  adapter->vfs_allocated_count;
7465 	int i, count = 0;
7466 
7467 	for (i = 0; i < rar_entries; i++) {
7468 		/* do not count default entries */
7469 		if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT)
7470 			continue;
7471 
7472 		/* do not count "in use" entries for different queues */
7473 		if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) &&
7474 		    (adapter->mac_table[i].queue != queue))
7475 			continue;
7476 
7477 		count++;
7478 	}
7479 
7480 	return count;
7481 }
7482 
7483 /* Set default MAC address for the PF in the first RAR entry */
igb_set_default_mac_filter(struct igb_adapter * adapter)7484 static void igb_set_default_mac_filter(struct igb_adapter *adapter)
7485 {
7486 	struct igb_mac_addr *mac_table = &adapter->mac_table[0];
7487 
7488 	ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
7489 	mac_table->queue = adapter->vfs_allocated_count;
7490 	mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
7491 
7492 	igb_rar_set_index(adapter, 0);
7493 }
7494 
7495 /* If the filter to be added and an already existing filter express
7496  * the same address and address type, it should be possible to only
7497  * override the other configurations, for example the queue to steer
7498  * traffic.
7499  */
igb_mac_entry_can_be_used(const struct igb_mac_addr * entry,const u8 * addr,const u8 flags)7500 static bool igb_mac_entry_can_be_used(const struct igb_mac_addr *entry,
7501 				      const u8 *addr, const u8 flags)
7502 {
7503 	if (!(entry->state & IGB_MAC_STATE_IN_USE))
7504 		return true;
7505 
7506 	if ((entry->state & IGB_MAC_STATE_SRC_ADDR) !=
7507 	    (flags & IGB_MAC_STATE_SRC_ADDR))
7508 		return false;
7509 
7510 	if (!ether_addr_equal(addr, entry->addr))
7511 		return false;
7512 
7513 	return true;
7514 }
7515 
7516 /* Add a MAC filter for 'addr' directing matching traffic to 'queue',
7517  * 'flags' is used to indicate what kind of match is made, match is by
7518  * default for the destination address, if matching by source address
7519  * is desired the flag IGB_MAC_STATE_SRC_ADDR can be used.
7520  */
igb_add_mac_filter_flags(struct igb_adapter * adapter,const u8 * addr,const u8 queue,const u8 flags)7521 static int igb_add_mac_filter_flags(struct igb_adapter *adapter,
7522 				    const u8 *addr, const u8 queue,
7523 				    const u8 flags)
7524 {
7525 	struct e1000_hw *hw = &adapter->hw;
7526 	int rar_entries = hw->mac.rar_entry_count -
7527 			  adapter->vfs_allocated_count;
7528 	int i;
7529 
7530 	if (is_zero_ether_addr(addr))
7531 		return -EINVAL;
7532 
7533 	/* Search for the first empty entry in the MAC table.
7534 	 * Do not touch entries at the end of the table reserved for the VF MAC
7535 	 * addresses.
7536 	 */
7537 	for (i = 0; i < rar_entries; i++) {
7538 		if (!igb_mac_entry_can_be_used(&adapter->mac_table[i],
7539 					       addr, flags))
7540 			continue;
7541 
7542 		ether_addr_copy(adapter->mac_table[i].addr, addr);
7543 		adapter->mac_table[i].queue = queue;
7544 		adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags;
7545 
7546 		igb_rar_set_index(adapter, i);
7547 		return i;
7548 	}
7549 
7550 	return -ENOSPC;
7551 }
7552 
igb_add_mac_filter(struct igb_adapter * adapter,const u8 * addr,const u8 queue)7553 static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
7554 			      const u8 queue)
7555 {
7556 	return igb_add_mac_filter_flags(adapter, addr, queue, 0);
7557 }
7558 
7559 /* Remove a MAC filter for 'addr' directing matching traffic to
7560  * 'queue', 'flags' is used to indicate what kind of match need to be
7561  * removed, match is by default for the destination address, if
7562  * matching by source address is to be removed the flag
7563  * IGB_MAC_STATE_SRC_ADDR can be used.
7564  */
igb_del_mac_filter_flags(struct igb_adapter * adapter,const u8 * addr,const u8 queue,const u8 flags)7565 static int igb_del_mac_filter_flags(struct igb_adapter *adapter,
7566 				    const u8 *addr, const u8 queue,
7567 				    const u8 flags)
7568 {
7569 	struct e1000_hw *hw = &adapter->hw;
7570 	int rar_entries = hw->mac.rar_entry_count -
7571 			  adapter->vfs_allocated_count;
7572 	int i;
7573 
7574 	if (is_zero_ether_addr(addr))
7575 		return -EINVAL;
7576 
7577 	/* Search for matching entry in the MAC table based on given address
7578 	 * and queue. Do not touch entries at the end of the table reserved
7579 	 * for the VF MAC addresses.
7580 	 */
7581 	for (i = 0; i < rar_entries; i++) {
7582 		if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE))
7583 			continue;
7584 		if ((adapter->mac_table[i].state & flags) != flags)
7585 			continue;
7586 		if (adapter->mac_table[i].queue != queue)
7587 			continue;
7588 		if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
7589 			continue;
7590 
7591 		/* When a filter for the default address is "deleted",
7592 		 * we return it to its initial configuration
7593 		 */
7594 		if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) {
7595 			adapter->mac_table[i].state =
7596 				IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
7597 			adapter->mac_table[i].queue =
7598 				adapter->vfs_allocated_count;
7599 		} else {
7600 			adapter->mac_table[i].state = 0;
7601 			adapter->mac_table[i].queue = 0;
7602 			eth_zero_addr(adapter->mac_table[i].addr);
7603 		}
7604 
7605 		igb_rar_set_index(adapter, i);
7606 		return 0;
7607 	}
7608 
7609 	return -ENOENT;
7610 }
7611 
igb_del_mac_filter(struct igb_adapter * adapter,const u8 * addr,const u8 queue)7612 static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
7613 			      const u8 queue)
7614 {
7615 	return igb_del_mac_filter_flags(adapter, addr, queue, 0);
7616 }
7617 
igb_add_mac_steering_filter(struct igb_adapter * adapter,const u8 * addr,u8 queue,u8 flags)7618 int igb_add_mac_steering_filter(struct igb_adapter *adapter,
7619 				const u8 *addr, u8 queue, u8 flags)
7620 {
7621 	struct e1000_hw *hw = &adapter->hw;
7622 
7623 	/* In theory, this should be supported on 82575 as well, but
7624 	 * that part wasn't easily accessible during development.
7625 	 */
7626 	if (hw->mac.type != e1000_i210)
7627 		return -EOPNOTSUPP;
7628 
7629 	return igb_add_mac_filter_flags(adapter, addr, queue,
7630 					IGB_MAC_STATE_QUEUE_STEERING | flags);
7631 }
7632 
igb_del_mac_steering_filter(struct igb_adapter * adapter,const u8 * addr,u8 queue,u8 flags)7633 int igb_del_mac_steering_filter(struct igb_adapter *adapter,
7634 				const u8 *addr, u8 queue, u8 flags)
7635 {
7636 	return igb_del_mac_filter_flags(adapter, addr, queue,
7637 					IGB_MAC_STATE_QUEUE_STEERING | flags);
7638 }
7639 
igb_uc_sync(struct net_device * netdev,const unsigned char * addr)7640 static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr)
7641 {
7642 	struct igb_adapter *adapter = netdev_priv(netdev);
7643 	int ret;
7644 
7645 	ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count);
7646 
7647 	return min_t(int, ret, 0);
7648 }
7649 
igb_uc_unsync(struct net_device * netdev,const unsigned char * addr)7650 static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr)
7651 {
7652 	struct igb_adapter *adapter = netdev_priv(netdev);
7653 
7654 	igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count);
7655 
7656 	return 0;
7657 }
7658 
igb_set_vf_mac_filter(struct igb_adapter * adapter,const int vf,const u32 info,const u8 * addr)7659 static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
7660 				 const u32 info, const u8 *addr)
7661 {
7662 	struct pci_dev *pdev = adapter->pdev;
7663 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7664 	struct list_head *pos;
7665 	struct vf_mac_filter *entry = NULL;
7666 	int ret = 0;
7667 
7668 	if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7669 	    !vf_data->trusted) {
7670 		dev_warn(&pdev->dev,
7671 			 "VF %d requested MAC filter but is administratively denied\n",
7672 			  vf);
7673 		return -EINVAL;
7674 	}
7675 	if (!is_valid_ether_addr(addr)) {
7676 		dev_warn(&pdev->dev,
7677 			 "VF %d attempted to set invalid MAC filter\n",
7678 			  vf);
7679 		return -EINVAL;
7680 	}
7681 
7682 	switch (info) {
7683 	case E1000_VF_MAC_FILTER_CLR:
7684 		/* remove all unicast MAC filters related to the current VF */
7685 		list_for_each(pos, &adapter->vf_macs.l) {
7686 			entry = list_entry(pos, struct vf_mac_filter, l);
7687 			if (entry->vf == vf) {
7688 				entry->vf = -1;
7689 				entry->free = true;
7690 				igb_del_mac_filter(adapter, entry->vf_mac, vf);
7691 			}
7692 		}
7693 		break;
7694 	case E1000_VF_MAC_FILTER_ADD:
7695 		/* try to find empty slot in the list */
7696 		list_for_each(pos, &adapter->vf_macs.l) {
7697 			entry = list_entry(pos, struct vf_mac_filter, l);
7698 			if (entry->free)
7699 				break;
7700 		}
7701 
7702 		if (entry && entry->free) {
7703 			entry->free = false;
7704 			entry->vf = vf;
7705 			ether_addr_copy(entry->vf_mac, addr);
7706 
7707 			ret = igb_add_mac_filter(adapter, addr, vf);
7708 			ret = min_t(int, ret, 0);
7709 		} else {
7710 			ret = -ENOSPC;
7711 		}
7712 
7713 		if (ret == -ENOSPC)
7714 			dev_warn(&pdev->dev,
7715 				 "VF %d has requested MAC filter but there is no space for it\n",
7716 				 vf);
7717 		break;
7718 	default:
7719 		ret = -EINVAL;
7720 		break;
7721 	}
7722 
7723 	return ret;
7724 }
7725 
igb_set_vf_mac_addr(struct igb_adapter * adapter,u32 * msg,int vf)7726 static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
7727 {
7728 	struct pci_dev *pdev = adapter->pdev;
7729 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7730 	u32 info = msg[0] & E1000_VT_MSGINFO_MASK;
7731 
7732 	/* The VF MAC Address is stored in a packed array of bytes
7733 	 * starting at the second 32 bit word of the msg array
7734 	 */
7735 	unsigned char *addr = (unsigned char *)&msg[1];
7736 	int ret = 0;
7737 
7738 	if (!info) {
7739 		if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7740 		    !vf_data->trusted) {
7741 			dev_warn(&pdev->dev,
7742 				 "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
7743 				 vf);
7744 			return -EINVAL;
7745 		}
7746 
7747 		if (!is_valid_ether_addr(addr)) {
7748 			dev_warn(&pdev->dev,
7749 				 "VF %d attempted to set invalid MAC\n",
7750 				 vf);
7751 			return -EINVAL;
7752 		}
7753 
7754 		ret = igb_set_vf_mac(adapter, vf, addr);
7755 	} else {
7756 		ret = igb_set_vf_mac_filter(adapter, vf, info, addr);
7757 	}
7758 
7759 	return ret;
7760 }
7761 
igb_rcv_ack_from_vf(struct igb_adapter * adapter,u32 vf)7762 static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
7763 {
7764 	struct e1000_hw *hw = &adapter->hw;
7765 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7766 	u32 msg = E1000_VT_MSGTYPE_NACK;
7767 
7768 	/* if device isn't clear to send it shouldn't be reading either */
7769 	if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
7770 	    time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
7771 		igb_write_mbx(hw, &msg, 1, vf);
7772 		vf_data->last_nack = jiffies;
7773 	}
7774 }
7775 
igb_rcv_msg_from_vf(struct igb_adapter * adapter,u32 vf)7776 static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
7777 {
7778 	struct pci_dev *pdev = adapter->pdev;
7779 	u32 msgbuf[E1000_VFMAILBOX_SIZE];
7780 	struct e1000_hw *hw = &adapter->hw;
7781 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7782 	s32 retval;
7783 
7784 	retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false);
7785 
7786 	if (retval) {
7787 		/* if receive failed revoke VF CTS stats and restart init */
7788 		dev_err(&pdev->dev, "Error receiving message from VF\n");
7789 		vf_data->flags &= ~IGB_VF_FLAG_CTS;
7790 		if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7791 			goto unlock;
7792 		goto out;
7793 	}
7794 
7795 	/* this is a message we already processed, do nothing */
7796 	if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
7797 		goto unlock;
7798 
7799 	/* until the vf completes a reset it should not be
7800 	 * allowed to start any configuration.
7801 	 */
7802 	if (msgbuf[0] == E1000_VF_RESET) {
7803 		/* unlocks mailbox */
7804 		igb_vf_reset_msg(adapter, vf);
7805 		return;
7806 	}
7807 
7808 	if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
7809 		if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7810 			goto unlock;
7811 		retval = -1;
7812 		goto out;
7813 	}
7814 
7815 	switch ((msgbuf[0] & 0xFFFF)) {
7816 	case E1000_VF_SET_MAC_ADDR:
7817 		retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
7818 		break;
7819 	case E1000_VF_SET_PROMISC:
7820 		retval = igb_set_vf_promisc(adapter, msgbuf, vf);
7821 		break;
7822 	case E1000_VF_SET_MULTICAST:
7823 		retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
7824 		break;
7825 	case E1000_VF_SET_LPE:
7826 		retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
7827 		break;
7828 	case E1000_VF_SET_VLAN:
7829 		retval = -1;
7830 		if (vf_data->pf_vlan)
7831 			dev_warn(&pdev->dev,
7832 				 "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
7833 				 vf);
7834 		else
7835 			retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf);
7836 		break;
7837 	default:
7838 		dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
7839 		retval = -1;
7840 		break;
7841 	}
7842 
7843 	msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
7844 out:
7845 	/* notify the VF of the results of what it sent us */
7846 	if (retval)
7847 		msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
7848 	else
7849 		msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
7850 
7851 	/* unlocks mailbox */
7852 	igb_write_mbx(hw, msgbuf, 1, vf);
7853 	return;
7854 
7855 unlock:
7856 	igb_unlock_mbx(hw, vf);
7857 }
7858 
igb_msg_task(struct igb_adapter * adapter)7859 static void igb_msg_task(struct igb_adapter *adapter)
7860 {
7861 	struct e1000_hw *hw = &adapter->hw;
7862 	unsigned long flags;
7863 	u32 vf;
7864 
7865 	spin_lock_irqsave(&adapter->vfs_lock, flags);
7866 	for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
7867 		/* process any reset requests */
7868 		if (!igb_check_for_rst(hw, vf))
7869 			igb_vf_reset_event(adapter, vf);
7870 
7871 		/* process any messages pending */
7872 		if (!igb_check_for_msg(hw, vf))
7873 			igb_rcv_msg_from_vf(adapter, vf);
7874 
7875 		/* process any acks */
7876 		if (!igb_check_for_ack(hw, vf))
7877 			igb_rcv_ack_from_vf(adapter, vf);
7878 	}
7879 	spin_unlock_irqrestore(&adapter->vfs_lock, flags);
7880 }
7881 
7882 /**
7883  *  igb_set_uta - Set unicast filter table address
7884  *  @adapter: board private structure
7885  *  @set: boolean indicating if we are setting or clearing bits
7886  *
7887  *  The unicast table address is a register array of 32-bit registers.
7888  *  The table is meant to be used in a way similar to how the MTA is used
7889  *  however due to certain limitations in the hardware it is necessary to
7890  *  set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
7891  *  enable bit to allow vlan tag stripping when promiscuous mode is enabled
7892  **/
igb_set_uta(struct igb_adapter * adapter,bool set)7893 static void igb_set_uta(struct igb_adapter *adapter, bool set)
7894 {
7895 	struct e1000_hw *hw = &adapter->hw;
7896 	u32 uta = set ? ~0 : 0;
7897 	int i;
7898 
7899 	/* we only need to do this if VMDq is enabled */
7900 	if (!adapter->vfs_allocated_count)
7901 		return;
7902 
7903 	for (i = hw->mac.uta_reg_count; i--;)
7904 		array_wr32(E1000_UTA, i, uta);
7905 }
7906 
7907 /**
7908  *  igb_intr_msi - Interrupt Handler
7909  *  @irq: interrupt number
7910  *  @data: pointer to a network interface device structure
7911  **/
igb_intr_msi(int irq,void * data)7912 static irqreturn_t igb_intr_msi(int irq, void *data)
7913 {
7914 	struct igb_adapter *adapter = data;
7915 	struct igb_q_vector *q_vector = adapter->q_vector[0];
7916 	struct e1000_hw *hw = &adapter->hw;
7917 	/* read ICR disables interrupts using IAM */
7918 	u32 icr = rd32(E1000_ICR);
7919 
7920 	igb_write_itr(q_vector);
7921 
7922 	if (icr & E1000_ICR_DRSTA)
7923 		schedule_work(&adapter->reset_task);
7924 
7925 	if (icr & E1000_ICR_DOUTSYNC) {
7926 		/* HW is reporting DMA is out of sync */
7927 		adapter->stats.doosync++;
7928 	}
7929 
7930 	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
7931 		hw->mac.get_link_status = 1;
7932 		if (!test_bit(__IGB_DOWN, &adapter->state))
7933 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
7934 	}
7935 
7936 	if (icr & E1000_ICR_TS)
7937 		igb_tsync_interrupt(adapter);
7938 
7939 	napi_schedule(&q_vector->napi);
7940 
7941 	return IRQ_HANDLED;
7942 }
7943 
7944 /**
7945  *  igb_intr - Legacy Interrupt Handler
7946  *  @irq: interrupt number
7947  *  @data: pointer to a network interface device structure
7948  **/
igb_intr(int irq,void * data)7949 static irqreturn_t igb_intr(int irq, void *data)
7950 {
7951 	struct igb_adapter *adapter = data;
7952 	struct igb_q_vector *q_vector = adapter->q_vector[0];
7953 	struct e1000_hw *hw = &adapter->hw;
7954 	/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
7955 	 * need for the IMC write
7956 	 */
7957 	u32 icr = rd32(E1000_ICR);
7958 
7959 	/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
7960 	 * not set, then the adapter didn't send an interrupt
7961 	 */
7962 	if (!(icr & E1000_ICR_INT_ASSERTED))
7963 		return IRQ_NONE;
7964 
7965 	igb_write_itr(q_vector);
7966 
7967 	if (icr & E1000_ICR_DRSTA)
7968 		schedule_work(&adapter->reset_task);
7969 
7970 	if (icr & E1000_ICR_DOUTSYNC) {
7971 		/* HW is reporting DMA is out of sync */
7972 		adapter->stats.doosync++;
7973 	}
7974 
7975 	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
7976 		hw->mac.get_link_status = 1;
7977 		/* guard against interrupt when we're going down */
7978 		if (!test_bit(__IGB_DOWN, &adapter->state))
7979 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
7980 	}
7981 
7982 	if (icr & E1000_ICR_TS)
7983 		igb_tsync_interrupt(adapter);
7984 
7985 	napi_schedule(&q_vector->napi);
7986 
7987 	return IRQ_HANDLED;
7988 }
7989 
igb_ring_irq_enable(struct igb_q_vector * q_vector)7990 static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
7991 {
7992 	struct igb_adapter *adapter = q_vector->adapter;
7993 	struct e1000_hw *hw = &adapter->hw;
7994 
7995 	if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
7996 	    (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
7997 		if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
7998 			igb_set_itr(q_vector);
7999 		else
8000 			igb_update_ring_itr(q_vector);
8001 	}
8002 
8003 	if (!test_bit(__IGB_DOWN, &adapter->state)) {
8004 		if (adapter->flags & IGB_FLAG_HAS_MSIX)
8005 			wr32(E1000_EIMS, q_vector->eims_value);
8006 		else
8007 			igb_irq_enable(adapter);
8008 	}
8009 }
8010 
8011 /**
8012  *  igb_poll - NAPI Rx polling callback
8013  *  @napi: napi polling structure
8014  *  @budget: count of how many packets we should handle
8015  **/
igb_poll(struct napi_struct * napi,int budget)8016 static int igb_poll(struct napi_struct *napi, int budget)
8017 {
8018 	struct igb_q_vector *q_vector = container_of(napi,
8019 						     struct igb_q_vector,
8020 						     napi);
8021 	bool clean_complete = true;
8022 	int work_done = 0;
8023 
8024 #ifdef CONFIG_IGB_DCA
8025 	if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
8026 		igb_update_dca(q_vector);
8027 #endif
8028 	if (q_vector->tx.ring)
8029 		clean_complete = igb_clean_tx_irq(q_vector, budget);
8030 
8031 	if (q_vector->rx.ring) {
8032 		int cleaned = igb_clean_rx_irq(q_vector, budget);
8033 
8034 		work_done += cleaned;
8035 		if (cleaned >= budget)
8036 			clean_complete = false;
8037 	}
8038 
8039 	/* If all work not completed, return budget and keep polling */
8040 	if (!clean_complete)
8041 		return budget;
8042 
8043 	/* Exit the polling mode, but don't re-enable interrupts if stack might
8044 	 * poll us due to busy-polling
8045 	 */
8046 	if (likely(napi_complete_done(napi, work_done)))
8047 		igb_ring_irq_enable(q_vector);
8048 
8049 	return work_done;
8050 }
8051 
8052 /**
8053  *  igb_clean_tx_irq - Reclaim resources after transmit completes
8054  *  @q_vector: pointer to q_vector containing needed info
8055  *  @napi_budget: Used to determine if we are in netpoll
8056  *
8057  *  returns true if ring is completely cleaned
8058  **/
igb_clean_tx_irq(struct igb_q_vector * q_vector,int napi_budget)8059 static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
8060 {
8061 	struct igb_adapter *adapter = q_vector->adapter;
8062 	struct igb_ring *tx_ring = q_vector->tx.ring;
8063 	struct igb_tx_buffer *tx_buffer;
8064 	union e1000_adv_tx_desc *tx_desc;
8065 	unsigned int total_bytes = 0, total_packets = 0;
8066 	unsigned int budget = q_vector->tx.work_limit;
8067 	unsigned int i = tx_ring->next_to_clean;
8068 
8069 	if (test_bit(__IGB_DOWN, &adapter->state))
8070 		return true;
8071 
8072 	tx_buffer = &tx_ring->tx_buffer_info[i];
8073 	tx_desc = IGB_TX_DESC(tx_ring, i);
8074 	i -= tx_ring->count;
8075 
8076 	do {
8077 		union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
8078 
8079 		/* if next_to_watch is not set then there is no work pending */
8080 		if (!eop_desc)
8081 			break;
8082 
8083 		/* prevent any other reads prior to eop_desc */
8084 		smp_rmb();
8085 
8086 		/* if DD is not set pending work has not been completed */
8087 		if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
8088 			break;
8089 
8090 		/* clear next_to_watch to prevent false hangs */
8091 		tx_buffer->next_to_watch = NULL;
8092 
8093 		/* update the statistics for this packet */
8094 		total_bytes += tx_buffer->bytecount;
8095 		total_packets += tx_buffer->gso_segs;
8096 
8097 		/* free the skb */
8098 		if (tx_buffer->type == IGB_TYPE_SKB)
8099 			napi_consume_skb(tx_buffer->skb, napi_budget);
8100 		else
8101 			xdp_return_frame(tx_buffer->xdpf);
8102 
8103 		/* unmap skb header data */
8104 		dma_unmap_single(tx_ring->dev,
8105 				 dma_unmap_addr(tx_buffer, dma),
8106 				 dma_unmap_len(tx_buffer, len),
8107 				 DMA_TO_DEVICE);
8108 
8109 		/* clear tx_buffer data */
8110 		dma_unmap_len_set(tx_buffer, len, 0);
8111 
8112 		/* clear last DMA location and unmap remaining buffers */
8113 		while (tx_desc != eop_desc) {
8114 			tx_buffer++;
8115 			tx_desc++;
8116 			i++;
8117 			if (unlikely(!i)) {
8118 				i -= tx_ring->count;
8119 				tx_buffer = tx_ring->tx_buffer_info;
8120 				tx_desc = IGB_TX_DESC(tx_ring, 0);
8121 			}
8122 
8123 			/* unmap any remaining paged data */
8124 			if (dma_unmap_len(tx_buffer, len)) {
8125 				dma_unmap_page(tx_ring->dev,
8126 					       dma_unmap_addr(tx_buffer, dma),
8127 					       dma_unmap_len(tx_buffer, len),
8128 					       DMA_TO_DEVICE);
8129 				dma_unmap_len_set(tx_buffer, len, 0);
8130 			}
8131 		}
8132 
8133 		/* move us one more past the eop_desc for start of next pkt */
8134 		tx_buffer++;
8135 		tx_desc++;
8136 		i++;
8137 		if (unlikely(!i)) {
8138 			i -= tx_ring->count;
8139 			tx_buffer = tx_ring->tx_buffer_info;
8140 			tx_desc = IGB_TX_DESC(tx_ring, 0);
8141 		}
8142 
8143 		/* issue prefetch for next Tx descriptor */
8144 		prefetch(tx_desc);
8145 
8146 		/* update budget accounting */
8147 		budget--;
8148 	} while (likely(budget));
8149 
8150 	netdev_tx_completed_queue(txring_txq(tx_ring),
8151 				  total_packets, total_bytes);
8152 	i += tx_ring->count;
8153 	tx_ring->next_to_clean = i;
8154 	u64_stats_update_begin(&tx_ring->tx_syncp);
8155 	tx_ring->tx_stats.bytes += total_bytes;
8156 	tx_ring->tx_stats.packets += total_packets;
8157 	u64_stats_update_end(&tx_ring->tx_syncp);
8158 	q_vector->tx.total_bytes += total_bytes;
8159 	q_vector->tx.total_packets += total_packets;
8160 
8161 	if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
8162 		struct e1000_hw *hw = &adapter->hw;
8163 
8164 		/* Detect a transmit hang in hardware, this serializes the
8165 		 * check with the clearing of time_stamp and movement of i
8166 		 */
8167 		clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
8168 		if (tx_buffer->next_to_watch &&
8169 		    time_after(jiffies, tx_buffer->time_stamp +
8170 			       (adapter->tx_timeout_factor * HZ)) &&
8171 		    !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
8172 
8173 			/* detected Tx unit hang */
8174 			dev_err(tx_ring->dev,
8175 				"Detected Tx Unit Hang\n"
8176 				"  Tx Queue             <%d>\n"
8177 				"  TDH                  <%x>\n"
8178 				"  TDT                  <%x>\n"
8179 				"  next_to_use          <%x>\n"
8180 				"  next_to_clean        <%x>\n"
8181 				"buffer_info[next_to_clean]\n"
8182 				"  time_stamp           <%lx>\n"
8183 				"  next_to_watch        <%p>\n"
8184 				"  jiffies              <%lx>\n"
8185 				"  desc.status          <%x>\n",
8186 				tx_ring->queue_index,
8187 				rd32(E1000_TDH(tx_ring->reg_idx)),
8188 				readl(tx_ring->tail),
8189 				tx_ring->next_to_use,
8190 				tx_ring->next_to_clean,
8191 				tx_buffer->time_stamp,
8192 				tx_buffer->next_to_watch,
8193 				jiffies,
8194 				tx_buffer->next_to_watch->wb.status);
8195 			netif_stop_subqueue(tx_ring->netdev,
8196 					    tx_ring->queue_index);
8197 
8198 			/* we are about to reset, no point in enabling stuff */
8199 			return true;
8200 		}
8201 	}
8202 
8203 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
8204 	if (unlikely(total_packets &&
8205 	    netif_carrier_ok(tx_ring->netdev) &&
8206 	    igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
8207 		/* Make sure that anybody stopping the queue after this
8208 		 * sees the new next_to_clean.
8209 		 */
8210 		smp_mb();
8211 		if (__netif_subqueue_stopped(tx_ring->netdev,
8212 					     tx_ring->queue_index) &&
8213 		    !(test_bit(__IGB_DOWN, &adapter->state))) {
8214 			netif_wake_subqueue(tx_ring->netdev,
8215 					    tx_ring->queue_index);
8216 
8217 			u64_stats_update_begin(&tx_ring->tx_syncp);
8218 			tx_ring->tx_stats.restart_queue++;
8219 			u64_stats_update_end(&tx_ring->tx_syncp);
8220 		}
8221 	}
8222 
8223 	return !!budget;
8224 }
8225 
8226 /**
8227  *  igb_reuse_rx_page - page flip buffer and store it back on the ring
8228  *  @rx_ring: rx descriptor ring to store buffers on
8229  *  @old_buff: donor buffer to have page reused
8230  *
8231  *  Synchronizes page for reuse by the adapter
8232  **/
igb_reuse_rx_page(struct igb_ring * rx_ring,struct igb_rx_buffer * old_buff)8233 static void igb_reuse_rx_page(struct igb_ring *rx_ring,
8234 			      struct igb_rx_buffer *old_buff)
8235 {
8236 	struct igb_rx_buffer *new_buff;
8237 	u16 nta = rx_ring->next_to_alloc;
8238 
8239 	new_buff = &rx_ring->rx_buffer_info[nta];
8240 
8241 	/* update, and store next to alloc */
8242 	nta++;
8243 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
8244 
8245 	/* Transfer page from old buffer to new buffer.
8246 	 * Move each member individually to avoid possible store
8247 	 * forwarding stalls.
8248 	 */
8249 	new_buff->dma		= old_buff->dma;
8250 	new_buff->page		= old_buff->page;
8251 	new_buff->page_offset	= old_buff->page_offset;
8252 	new_buff->pagecnt_bias	= old_buff->pagecnt_bias;
8253 }
8254 
igb_page_is_reserved(struct page * page)8255 static inline bool igb_page_is_reserved(struct page *page)
8256 {
8257 	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
8258 }
8259 
igb_can_reuse_rx_page(struct igb_rx_buffer * rx_buffer,int rx_buf_pgcnt)8260 static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
8261 				  int rx_buf_pgcnt)
8262 {
8263 	unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
8264 	struct page *page = rx_buffer->page;
8265 
8266 	/* avoid re-using remote pages */
8267 	if (unlikely(igb_page_is_reserved(page)))
8268 		return false;
8269 
8270 #if (PAGE_SIZE < 8192)
8271 	/* if we are only owner of page we can reuse it */
8272 	if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
8273 		return false;
8274 #else
8275 #define IGB_LAST_OFFSET \
8276 	(SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
8277 
8278 	if (rx_buffer->page_offset > IGB_LAST_OFFSET)
8279 		return false;
8280 #endif
8281 
8282 	/* If we have drained the page fragment pool we need to update
8283 	 * the pagecnt_bias and page count so that we fully restock the
8284 	 * number of references the driver holds.
8285 	 */
8286 	if (unlikely(pagecnt_bias == 1)) {
8287 		page_ref_add(page, USHRT_MAX - 1);
8288 		rx_buffer->pagecnt_bias = USHRT_MAX;
8289 	}
8290 
8291 	return true;
8292 }
8293 
8294 /**
8295  *  igb_add_rx_frag - Add contents of Rx buffer to sk_buff
8296  *  @rx_ring: rx descriptor ring to transact packets on
8297  *  @rx_buffer: buffer containing page to add
8298  *  @skb: sk_buff to place the data into
8299  *  @size: size of buffer to be added
8300  *
8301  *  This function will add the data contained in rx_buffer->page to the skb.
8302  **/
igb_add_rx_frag(struct igb_ring * rx_ring,struct igb_rx_buffer * rx_buffer,struct sk_buff * skb,unsigned int size)8303 static void igb_add_rx_frag(struct igb_ring *rx_ring,
8304 			    struct igb_rx_buffer *rx_buffer,
8305 			    struct sk_buff *skb,
8306 			    unsigned int size)
8307 {
8308 #if (PAGE_SIZE < 8192)
8309 	unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8310 #else
8311 	unsigned int truesize = ring_uses_build_skb(rx_ring) ?
8312 				SKB_DATA_ALIGN(IGB_SKB_PAD + size) :
8313 				SKB_DATA_ALIGN(size);
8314 #endif
8315 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
8316 			rx_buffer->page_offset, size, truesize);
8317 #if (PAGE_SIZE < 8192)
8318 	rx_buffer->page_offset ^= truesize;
8319 #else
8320 	rx_buffer->page_offset += truesize;
8321 #endif
8322 }
8323 
igb_construct_skb(struct igb_ring * rx_ring,struct igb_rx_buffer * rx_buffer,struct xdp_buff * xdp,union e1000_adv_rx_desc * rx_desc)8324 static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
8325 					 struct igb_rx_buffer *rx_buffer,
8326 					 struct xdp_buff *xdp,
8327 					 union e1000_adv_rx_desc *rx_desc)
8328 {
8329 #if (PAGE_SIZE < 8192)
8330 	unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8331 #else
8332 	unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
8333 					       xdp->data_hard_start);
8334 #endif
8335 	unsigned int size = xdp->data_end - xdp->data;
8336 	unsigned int headlen;
8337 	struct sk_buff *skb;
8338 
8339 	/* prefetch first cache line of first page */
8340 	net_prefetch(xdp->data);
8341 
8342 	/* allocate a skb to store the frags */
8343 	skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
8344 	if (unlikely(!skb))
8345 		return NULL;
8346 
8347 	if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
8348 		if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb)) {
8349 			xdp->data += IGB_TS_HDR_LEN;
8350 			size -= IGB_TS_HDR_LEN;
8351 		}
8352 	}
8353 
8354 	/* Determine available headroom for copy */
8355 	headlen = size;
8356 	if (headlen > IGB_RX_HDR_LEN)
8357 		headlen = eth_get_headlen(skb->dev, xdp->data, IGB_RX_HDR_LEN);
8358 
8359 	/* align pull length to size of long to optimize memcpy performance */
8360 	memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, sizeof(long)));
8361 
8362 	/* update all of the pointers */
8363 	size -= headlen;
8364 	if (size) {
8365 		skb_add_rx_frag(skb, 0, rx_buffer->page,
8366 				(xdp->data + headlen) - page_address(rx_buffer->page),
8367 				size, truesize);
8368 #if (PAGE_SIZE < 8192)
8369 		rx_buffer->page_offset ^= truesize;
8370 #else
8371 		rx_buffer->page_offset += truesize;
8372 #endif
8373 	} else {
8374 		rx_buffer->pagecnt_bias++;
8375 	}
8376 
8377 	return skb;
8378 }
8379 
igb_build_skb(struct igb_ring * rx_ring,struct igb_rx_buffer * rx_buffer,struct xdp_buff * xdp,union e1000_adv_rx_desc * rx_desc)8380 static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
8381 				     struct igb_rx_buffer *rx_buffer,
8382 				     struct xdp_buff *xdp,
8383 				     union e1000_adv_rx_desc *rx_desc)
8384 {
8385 #if (PAGE_SIZE < 8192)
8386 	unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8387 #else
8388 	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
8389 				SKB_DATA_ALIGN(xdp->data_end -
8390 					       xdp->data_hard_start);
8391 #endif
8392 	unsigned int metasize = xdp->data - xdp->data_meta;
8393 	struct sk_buff *skb;
8394 
8395 	/* prefetch first cache line of first page */
8396 	net_prefetch(xdp->data_meta);
8397 
8398 	/* build an skb around the page buffer */
8399 	skb = build_skb(xdp->data_hard_start, truesize);
8400 	if (unlikely(!skb))
8401 		return NULL;
8402 
8403 	/* update pointers within the skb to store the data */
8404 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
8405 	__skb_put(skb, xdp->data_end - xdp->data);
8406 
8407 	if (metasize)
8408 		skb_metadata_set(skb, metasize);
8409 
8410 	/* pull timestamp out of packet data */
8411 	if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
8412 		if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb))
8413 			__skb_pull(skb, IGB_TS_HDR_LEN);
8414 	}
8415 
8416 	/* update buffer offset */
8417 #if (PAGE_SIZE < 8192)
8418 	rx_buffer->page_offset ^= truesize;
8419 #else
8420 	rx_buffer->page_offset += truesize;
8421 #endif
8422 
8423 	return skb;
8424 }
8425 
igb_run_xdp(struct igb_adapter * adapter,struct igb_ring * rx_ring,struct xdp_buff * xdp)8426 static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
8427 				   struct igb_ring *rx_ring,
8428 				   struct xdp_buff *xdp)
8429 {
8430 	int err, result = IGB_XDP_PASS;
8431 	struct bpf_prog *xdp_prog;
8432 	u32 act;
8433 
8434 	rcu_read_lock();
8435 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
8436 
8437 	if (!xdp_prog)
8438 		goto xdp_out;
8439 
8440 	prefetchw(xdp->data_hard_start); /* xdp_frame write */
8441 
8442 	act = bpf_prog_run_xdp(xdp_prog, xdp);
8443 	switch (act) {
8444 	case XDP_PASS:
8445 		break;
8446 	case XDP_TX:
8447 		result = igb_xdp_xmit_back(adapter, xdp);
8448 		if (result == IGB_XDP_CONSUMED)
8449 			goto out_failure;
8450 		break;
8451 	case XDP_REDIRECT:
8452 		err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
8453 		if (err)
8454 			goto out_failure;
8455 		result = IGB_XDP_REDIR;
8456 		break;
8457 	default:
8458 		bpf_warn_invalid_xdp_action(act);
8459 		fallthrough;
8460 	case XDP_ABORTED:
8461 out_failure:
8462 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
8463 		fallthrough;
8464 	case XDP_DROP:
8465 		result = IGB_XDP_CONSUMED;
8466 		break;
8467 	}
8468 xdp_out:
8469 	rcu_read_unlock();
8470 	return ERR_PTR(-result);
8471 }
8472 
igb_rx_frame_truesize(struct igb_ring * rx_ring,unsigned int size)8473 static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring,
8474 					  unsigned int size)
8475 {
8476 	unsigned int truesize;
8477 
8478 #if (PAGE_SIZE < 8192)
8479 	truesize = igb_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
8480 #else
8481 	truesize = ring_uses_build_skb(rx_ring) ?
8482 		SKB_DATA_ALIGN(IGB_SKB_PAD + size) +
8483 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
8484 		SKB_DATA_ALIGN(size);
8485 #endif
8486 	return truesize;
8487 }
8488 
igb_rx_buffer_flip(struct igb_ring * rx_ring,struct igb_rx_buffer * rx_buffer,unsigned int size)8489 static void igb_rx_buffer_flip(struct igb_ring *rx_ring,
8490 			       struct igb_rx_buffer *rx_buffer,
8491 			       unsigned int size)
8492 {
8493 	unsigned int truesize = igb_rx_frame_truesize(rx_ring, size);
8494 #if (PAGE_SIZE < 8192)
8495 	rx_buffer->page_offset ^= truesize;
8496 #else
8497 	rx_buffer->page_offset += truesize;
8498 #endif
8499 }
8500 
igb_rx_checksum(struct igb_ring * ring,union e1000_adv_rx_desc * rx_desc,struct sk_buff * skb)8501 static inline void igb_rx_checksum(struct igb_ring *ring,
8502 				   union e1000_adv_rx_desc *rx_desc,
8503 				   struct sk_buff *skb)
8504 {
8505 	skb_checksum_none_assert(skb);
8506 
8507 	/* Ignore Checksum bit is set */
8508 	if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
8509 		return;
8510 
8511 	/* Rx checksum disabled via ethtool */
8512 	if (!(ring->netdev->features & NETIF_F_RXCSUM))
8513 		return;
8514 
8515 	/* TCP/UDP checksum error bit is set */
8516 	if (igb_test_staterr(rx_desc,
8517 			     E1000_RXDEXT_STATERR_TCPE |
8518 			     E1000_RXDEXT_STATERR_IPE)) {
8519 		/* work around errata with sctp packets where the TCPE aka
8520 		 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
8521 		 * packets, (aka let the stack check the crc32c)
8522 		 */
8523 		if (!((skb->len == 60) &&
8524 		      test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
8525 			u64_stats_update_begin(&ring->rx_syncp);
8526 			ring->rx_stats.csum_err++;
8527 			u64_stats_update_end(&ring->rx_syncp);
8528 		}
8529 		/* let the stack verify checksum errors */
8530 		return;
8531 	}
8532 	/* It must be a TCP or UDP packet with a valid checksum */
8533 	if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
8534 				      E1000_RXD_STAT_UDPCS))
8535 		skb->ip_summed = CHECKSUM_UNNECESSARY;
8536 
8537 	dev_dbg(ring->dev, "cksum success: bits %08X\n",
8538 		le32_to_cpu(rx_desc->wb.upper.status_error));
8539 }
8540 
igb_rx_hash(struct igb_ring * ring,union e1000_adv_rx_desc * rx_desc,struct sk_buff * skb)8541 static inline void igb_rx_hash(struct igb_ring *ring,
8542 			       union e1000_adv_rx_desc *rx_desc,
8543 			       struct sk_buff *skb)
8544 {
8545 	if (ring->netdev->features & NETIF_F_RXHASH)
8546 		skb_set_hash(skb,
8547 			     le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
8548 			     PKT_HASH_TYPE_L3);
8549 }
8550 
8551 /**
8552  *  igb_is_non_eop - process handling of non-EOP buffers
8553  *  @rx_ring: Rx ring being processed
8554  *  @rx_desc: Rx descriptor for current buffer
8555  *
8556  *  This function updates next to clean.  If the buffer is an EOP buffer
8557  *  this function exits returning false, otherwise it will place the
8558  *  sk_buff in the next buffer to be chained and return true indicating
8559  *  that this is in fact a non-EOP buffer.
8560  **/
igb_is_non_eop(struct igb_ring * rx_ring,union e1000_adv_rx_desc * rx_desc)8561 static bool igb_is_non_eop(struct igb_ring *rx_ring,
8562 			   union e1000_adv_rx_desc *rx_desc)
8563 {
8564 	u32 ntc = rx_ring->next_to_clean + 1;
8565 
8566 	/* fetch, update, and store next to clean */
8567 	ntc = (ntc < rx_ring->count) ? ntc : 0;
8568 	rx_ring->next_to_clean = ntc;
8569 
8570 	prefetch(IGB_RX_DESC(rx_ring, ntc));
8571 
8572 	if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
8573 		return false;
8574 
8575 	return true;
8576 }
8577 
8578 /**
8579  *  igb_cleanup_headers - Correct corrupted or empty headers
8580  *  @rx_ring: rx descriptor ring packet is being transacted on
8581  *  @rx_desc: pointer to the EOP Rx descriptor
8582  *  @skb: pointer to current skb being fixed
8583  *
8584  *  Address the case where we are pulling data in on pages only
8585  *  and as such no data is present in the skb header.
8586  *
8587  *  In addition if skb is not at least 60 bytes we need to pad it so that
8588  *  it is large enough to qualify as a valid Ethernet frame.
8589  *
8590  *  Returns true if an error was encountered and skb was freed.
8591  **/
igb_cleanup_headers(struct igb_ring * rx_ring,union e1000_adv_rx_desc * rx_desc,struct sk_buff * skb)8592 static bool igb_cleanup_headers(struct igb_ring *rx_ring,
8593 				union e1000_adv_rx_desc *rx_desc,
8594 				struct sk_buff *skb)
8595 {
8596 	/* XDP packets use error pointer so abort at this point */
8597 	if (IS_ERR(skb))
8598 		return true;
8599 
8600 	if (unlikely((igb_test_staterr(rx_desc,
8601 				       E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
8602 		struct net_device *netdev = rx_ring->netdev;
8603 		if (!(netdev->features & NETIF_F_RXALL)) {
8604 			dev_kfree_skb_any(skb);
8605 			return true;
8606 		}
8607 	}
8608 
8609 	/* if eth_skb_pad returns an error the skb was freed */
8610 	if (eth_skb_pad(skb))
8611 		return true;
8612 
8613 	return false;
8614 }
8615 
8616 /**
8617  *  igb_process_skb_fields - Populate skb header fields from Rx descriptor
8618  *  @rx_ring: rx descriptor ring packet is being transacted on
8619  *  @rx_desc: pointer to the EOP Rx descriptor
8620  *  @skb: pointer to current skb being populated
8621  *
8622  *  This function checks the ring, descriptor, and packet information in
8623  *  order to populate the hash, checksum, VLAN, timestamp, protocol, and
8624  *  other fields within the skb.
8625  **/
igb_process_skb_fields(struct igb_ring * rx_ring,union e1000_adv_rx_desc * rx_desc,struct sk_buff * skb)8626 static void igb_process_skb_fields(struct igb_ring *rx_ring,
8627 				   union e1000_adv_rx_desc *rx_desc,
8628 				   struct sk_buff *skb)
8629 {
8630 	struct net_device *dev = rx_ring->netdev;
8631 
8632 	igb_rx_hash(rx_ring, rx_desc, skb);
8633 
8634 	igb_rx_checksum(rx_ring, rx_desc, skb);
8635 
8636 	if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
8637 	    !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
8638 		igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
8639 
8640 	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
8641 	    igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
8642 		u16 vid;
8643 
8644 		if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
8645 		    test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
8646 			vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
8647 		else
8648 			vid = le16_to_cpu(rx_desc->wb.upper.vlan);
8649 
8650 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
8651 	}
8652 
8653 	skb_record_rx_queue(skb, rx_ring->queue_index);
8654 
8655 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
8656 }
8657 
igb_rx_offset(struct igb_ring * rx_ring)8658 static unsigned int igb_rx_offset(struct igb_ring *rx_ring)
8659 {
8660 	return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
8661 }
8662 
igb_get_rx_buffer(struct igb_ring * rx_ring,const unsigned int size,int * rx_buf_pgcnt)8663 static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
8664 					       const unsigned int size, int *rx_buf_pgcnt)
8665 {
8666 	struct igb_rx_buffer *rx_buffer;
8667 
8668 	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
8669 	*rx_buf_pgcnt =
8670 #if (PAGE_SIZE < 8192)
8671 		page_count(rx_buffer->page);
8672 #else
8673 		0;
8674 #endif
8675 	prefetchw(rx_buffer->page);
8676 
8677 	/* we are reusing so sync this buffer for CPU use */
8678 	dma_sync_single_range_for_cpu(rx_ring->dev,
8679 				      rx_buffer->dma,
8680 				      rx_buffer->page_offset,
8681 				      size,
8682 				      DMA_FROM_DEVICE);
8683 
8684 	rx_buffer->pagecnt_bias--;
8685 
8686 	return rx_buffer;
8687 }
8688 
igb_put_rx_buffer(struct igb_ring * rx_ring,struct igb_rx_buffer * rx_buffer,int rx_buf_pgcnt)8689 static void igb_put_rx_buffer(struct igb_ring *rx_ring,
8690 			      struct igb_rx_buffer *rx_buffer, int rx_buf_pgcnt)
8691 {
8692 	if (igb_can_reuse_rx_page(rx_buffer, rx_buf_pgcnt)) {
8693 		/* hand second half of page back to the ring */
8694 		igb_reuse_rx_page(rx_ring, rx_buffer);
8695 	} else {
8696 		/* We are not reusing the buffer so unmap it and free
8697 		 * any references we are holding to it
8698 		 */
8699 		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
8700 				     igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
8701 				     IGB_RX_DMA_ATTR);
8702 		__page_frag_cache_drain(rx_buffer->page,
8703 					rx_buffer->pagecnt_bias);
8704 	}
8705 
8706 	/* clear contents of rx_buffer */
8707 	rx_buffer->page = NULL;
8708 }
8709 
igb_clean_rx_irq(struct igb_q_vector * q_vector,const int budget)8710 static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
8711 {
8712 	struct igb_adapter *adapter = q_vector->adapter;
8713 	struct igb_ring *rx_ring = q_vector->rx.ring;
8714 	struct sk_buff *skb = rx_ring->skb;
8715 	unsigned int total_bytes = 0, total_packets = 0;
8716 	u16 cleaned_count = igb_desc_unused(rx_ring);
8717 	unsigned int xdp_xmit = 0;
8718 	struct xdp_buff xdp;
8719 	int rx_buf_pgcnt;
8720 
8721 	xdp.rxq = &rx_ring->xdp_rxq;
8722 
8723 	/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
8724 #if (PAGE_SIZE < 8192)
8725 	xdp.frame_sz = igb_rx_frame_truesize(rx_ring, 0);
8726 #endif
8727 
8728 	while (likely(total_packets < budget)) {
8729 		union e1000_adv_rx_desc *rx_desc;
8730 		struct igb_rx_buffer *rx_buffer;
8731 		unsigned int size;
8732 
8733 		/* return some buffers to hardware, one at a time is too slow */
8734 		if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
8735 			igb_alloc_rx_buffers(rx_ring, cleaned_count);
8736 			cleaned_count = 0;
8737 		}
8738 
8739 		rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
8740 		size = le16_to_cpu(rx_desc->wb.upper.length);
8741 		if (!size)
8742 			break;
8743 
8744 		/* This memory barrier is needed to keep us from reading
8745 		 * any other fields out of the rx_desc until we know the
8746 		 * descriptor has been written back
8747 		 */
8748 		dma_rmb();
8749 
8750 		rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt);
8751 
8752 		/* retrieve a buffer from the ring */
8753 		if (!skb) {
8754 			xdp.data = page_address(rx_buffer->page) +
8755 				   rx_buffer->page_offset;
8756 			xdp.data_meta = xdp.data;
8757 			xdp.data_hard_start = xdp.data -
8758 					      igb_rx_offset(rx_ring);
8759 			xdp.data_end = xdp.data + size;
8760 #if (PAGE_SIZE > 4096)
8761 			/* At larger PAGE_SIZE, frame_sz depend on len size */
8762 			xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);
8763 #endif
8764 			skb = igb_run_xdp(adapter, rx_ring, &xdp);
8765 		}
8766 
8767 		if (IS_ERR(skb)) {
8768 			unsigned int xdp_res = -PTR_ERR(skb);
8769 
8770 			if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) {
8771 				xdp_xmit |= xdp_res;
8772 				igb_rx_buffer_flip(rx_ring, rx_buffer, size);
8773 			} else {
8774 				rx_buffer->pagecnt_bias++;
8775 			}
8776 			total_packets++;
8777 			total_bytes += size;
8778 		} else if (skb)
8779 			igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
8780 		else if (ring_uses_build_skb(rx_ring))
8781 			skb = igb_build_skb(rx_ring, rx_buffer, &xdp, rx_desc);
8782 		else
8783 			skb = igb_construct_skb(rx_ring, rx_buffer,
8784 						&xdp, rx_desc);
8785 
8786 		/* exit if we failed to retrieve a buffer */
8787 		if (!skb) {
8788 			rx_ring->rx_stats.alloc_failed++;
8789 			rx_buffer->pagecnt_bias++;
8790 			break;
8791 		}
8792 
8793 		igb_put_rx_buffer(rx_ring, rx_buffer, rx_buf_pgcnt);
8794 		cleaned_count++;
8795 
8796 		/* fetch next buffer in frame if non-eop */
8797 		if (igb_is_non_eop(rx_ring, rx_desc))
8798 			continue;
8799 
8800 		/* verify the packet layout is correct */
8801 		if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
8802 			skb = NULL;
8803 			continue;
8804 		}
8805 
8806 		/* probably a little skewed due to removing CRC */
8807 		total_bytes += skb->len;
8808 
8809 		/* populate checksum, timestamp, VLAN, and protocol */
8810 		igb_process_skb_fields(rx_ring, rx_desc, skb);
8811 
8812 		napi_gro_receive(&q_vector->napi, skb);
8813 
8814 		/* reset skb pointer */
8815 		skb = NULL;
8816 
8817 		/* update budget accounting */
8818 		total_packets++;
8819 	}
8820 
8821 	/* place incomplete frames back on ring for completion */
8822 	rx_ring->skb = skb;
8823 
8824 	if (xdp_xmit & IGB_XDP_REDIR)
8825 		xdp_do_flush();
8826 
8827 	if (xdp_xmit & IGB_XDP_TX) {
8828 		struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
8829 
8830 		igb_xdp_ring_update_tail(tx_ring);
8831 	}
8832 
8833 	u64_stats_update_begin(&rx_ring->rx_syncp);
8834 	rx_ring->rx_stats.packets += total_packets;
8835 	rx_ring->rx_stats.bytes += total_bytes;
8836 	u64_stats_update_end(&rx_ring->rx_syncp);
8837 	q_vector->rx.total_packets += total_packets;
8838 	q_vector->rx.total_bytes += total_bytes;
8839 
8840 	if (cleaned_count)
8841 		igb_alloc_rx_buffers(rx_ring, cleaned_count);
8842 
8843 	return total_packets;
8844 }
8845 
igb_alloc_mapped_page(struct igb_ring * rx_ring,struct igb_rx_buffer * bi)8846 static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
8847 				  struct igb_rx_buffer *bi)
8848 {
8849 	struct page *page = bi->page;
8850 	dma_addr_t dma;
8851 
8852 	/* since we are recycling buffers we should seldom need to alloc */
8853 	if (likely(page))
8854 		return true;
8855 
8856 	/* alloc new page for storage */
8857 	page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
8858 	if (unlikely(!page)) {
8859 		rx_ring->rx_stats.alloc_failed++;
8860 		return false;
8861 	}
8862 
8863 	/* map page for use */
8864 	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
8865 				 igb_rx_pg_size(rx_ring),
8866 				 DMA_FROM_DEVICE,
8867 				 IGB_RX_DMA_ATTR);
8868 
8869 	/* if mapping failed free memory back to system since
8870 	 * there isn't much point in holding memory we can't use
8871 	 */
8872 	if (dma_mapping_error(rx_ring->dev, dma)) {
8873 		__free_pages(page, igb_rx_pg_order(rx_ring));
8874 
8875 		rx_ring->rx_stats.alloc_failed++;
8876 		return false;
8877 	}
8878 
8879 	bi->dma = dma;
8880 	bi->page = page;
8881 	bi->page_offset = igb_rx_offset(rx_ring);
8882 	page_ref_add(page, USHRT_MAX - 1);
8883 	bi->pagecnt_bias = USHRT_MAX;
8884 
8885 	return true;
8886 }
8887 
8888 /**
8889  *  igb_alloc_rx_buffers - Replace used receive buffers
8890  *  @rx_ring: rx descriptor ring to allocate new receive buffers
8891  *  @cleaned_count: count of buffers to allocate
8892  **/
igb_alloc_rx_buffers(struct igb_ring * rx_ring,u16 cleaned_count)8893 void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
8894 {
8895 	union e1000_adv_rx_desc *rx_desc;
8896 	struct igb_rx_buffer *bi;
8897 	u16 i = rx_ring->next_to_use;
8898 	u16 bufsz;
8899 
8900 	/* nothing to do */
8901 	if (!cleaned_count)
8902 		return;
8903 
8904 	rx_desc = IGB_RX_DESC(rx_ring, i);
8905 	bi = &rx_ring->rx_buffer_info[i];
8906 	i -= rx_ring->count;
8907 
8908 	bufsz = igb_rx_bufsz(rx_ring);
8909 
8910 	do {
8911 		if (!igb_alloc_mapped_page(rx_ring, bi))
8912 			break;
8913 
8914 		/* sync the buffer for use by the device */
8915 		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
8916 						 bi->page_offset, bufsz,
8917 						 DMA_FROM_DEVICE);
8918 
8919 		/* Refresh the desc even if buffer_addrs didn't change
8920 		 * because each write-back erases this info.
8921 		 */
8922 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
8923 
8924 		rx_desc++;
8925 		bi++;
8926 		i++;
8927 		if (unlikely(!i)) {
8928 			rx_desc = IGB_RX_DESC(rx_ring, 0);
8929 			bi = rx_ring->rx_buffer_info;
8930 			i -= rx_ring->count;
8931 		}
8932 
8933 		/* clear the length for the next_to_use descriptor */
8934 		rx_desc->wb.upper.length = 0;
8935 
8936 		cleaned_count--;
8937 	} while (cleaned_count);
8938 
8939 	i += rx_ring->count;
8940 
8941 	if (rx_ring->next_to_use != i) {
8942 		/* record the next descriptor to use */
8943 		rx_ring->next_to_use = i;
8944 
8945 		/* update next to alloc since we have filled the ring */
8946 		rx_ring->next_to_alloc = i;
8947 
8948 		/* Force memory writes to complete before letting h/w
8949 		 * know there are new descriptors to fetch.  (Only
8950 		 * applicable for weak-ordered memory model archs,
8951 		 * such as IA-64).
8952 		 */
8953 		dma_wmb();
8954 		writel(i, rx_ring->tail);
8955 	}
8956 }
8957 
8958 /**
8959  * igb_mii_ioctl -
8960  * @netdev: pointer to netdev struct
8961  * @ifr: interface structure
8962  * @cmd: ioctl command to execute
8963  **/
igb_mii_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)8964 static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8965 {
8966 	struct igb_adapter *adapter = netdev_priv(netdev);
8967 	struct mii_ioctl_data *data = if_mii(ifr);
8968 
8969 	if (adapter->hw.phy.media_type != e1000_media_type_copper)
8970 		return -EOPNOTSUPP;
8971 
8972 	switch (cmd) {
8973 	case SIOCGMIIPHY:
8974 		data->phy_id = adapter->hw.phy.addr;
8975 		break;
8976 	case SIOCGMIIREG:
8977 		if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
8978 				     &data->val_out))
8979 			return -EIO;
8980 		break;
8981 	case SIOCSMIIREG:
8982 	default:
8983 		return -EOPNOTSUPP;
8984 	}
8985 	return 0;
8986 }
8987 
8988 /**
8989  * igb_ioctl -
8990  * @netdev: pointer to netdev struct
8991  * @ifr: interface structure
8992  * @cmd: ioctl command to execute
8993  **/
igb_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)8994 static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8995 {
8996 	switch (cmd) {
8997 	case SIOCGMIIPHY:
8998 	case SIOCGMIIREG:
8999 	case SIOCSMIIREG:
9000 		return igb_mii_ioctl(netdev, ifr, cmd);
9001 	case SIOCGHWTSTAMP:
9002 		return igb_ptp_get_ts_config(netdev, ifr);
9003 	case SIOCSHWTSTAMP:
9004 		return igb_ptp_set_ts_config(netdev, ifr);
9005 	default:
9006 		return -EOPNOTSUPP;
9007 	}
9008 }
9009 
igb_read_pci_cfg(struct e1000_hw * hw,u32 reg,u16 * value)9010 void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
9011 {
9012 	struct igb_adapter *adapter = hw->back;
9013 
9014 	pci_read_config_word(adapter->pdev, reg, value);
9015 }
9016 
igb_write_pci_cfg(struct e1000_hw * hw,u32 reg,u16 * value)9017 void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
9018 {
9019 	struct igb_adapter *adapter = hw->back;
9020 
9021 	pci_write_config_word(adapter->pdev, reg, *value);
9022 }
9023 
igb_read_pcie_cap_reg(struct e1000_hw * hw,u32 reg,u16 * value)9024 s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
9025 {
9026 	struct igb_adapter *adapter = hw->back;
9027 
9028 	if (pcie_capability_read_word(adapter->pdev, reg, value))
9029 		return -E1000_ERR_CONFIG;
9030 
9031 	return 0;
9032 }
9033 
igb_write_pcie_cap_reg(struct e1000_hw * hw,u32 reg,u16 * value)9034 s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
9035 {
9036 	struct igb_adapter *adapter = hw->back;
9037 
9038 	if (pcie_capability_write_word(adapter->pdev, reg, *value))
9039 		return -E1000_ERR_CONFIG;
9040 
9041 	return 0;
9042 }
9043 
igb_vlan_mode(struct net_device * netdev,netdev_features_t features)9044 static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
9045 {
9046 	struct igb_adapter *adapter = netdev_priv(netdev);
9047 	struct e1000_hw *hw = &adapter->hw;
9048 	u32 ctrl, rctl;
9049 	bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
9050 
9051 	if (enable) {
9052 		/* enable VLAN tag insert/strip */
9053 		ctrl = rd32(E1000_CTRL);
9054 		ctrl |= E1000_CTRL_VME;
9055 		wr32(E1000_CTRL, ctrl);
9056 
9057 		/* Disable CFI check */
9058 		rctl = rd32(E1000_RCTL);
9059 		rctl &= ~E1000_RCTL_CFIEN;
9060 		wr32(E1000_RCTL, rctl);
9061 	} else {
9062 		/* disable VLAN tag insert/strip */
9063 		ctrl = rd32(E1000_CTRL);
9064 		ctrl &= ~E1000_CTRL_VME;
9065 		wr32(E1000_CTRL, ctrl);
9066 	}
9067 
9068 	igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable);
9069 }
9070 
igb_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)9071 static int igb_vlan_rx_add_vid(struct net_device *netdev,
9072 			       __be16 proto, u16 vid)
9073 {
9074 	struct igb_adapter *adapter = netdev_priv(netdev);
9075 	struct e1000_hw *hw = &adapter->hw;
9076 	int pf_id = adapter->vfs_allocated_count;
9077 
9078 	/* add the filter since PF can receive vlans w/o entry in vlvf */
9079 	if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
9080 		igb_vfta_set(hw, vid, pf_id, true, !!vid);
9081 
9082 	set_bit(vid, adapter->active_vlans);
9083 
9084 	return 0;
9085 }
9086 
igb_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)9087 static int igb_vlan_rx_kill_vid(struct net_device *netdev,
9088 				__be16 proto, u16 vid)
9089 {
9090 	struct igb_adapter *adapter = netdev_priv(netdev);
9091 	int pf_id = adapter->vfs_allocated_count;
9092 	struct e1000_hw *hw = &adapter->hw;
9093 
9094 	/* remove VID from filter table */
9095 	if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
9096 		igb_vfta_set(hw, vid, pf_id, false, true);
9097 
9098 	clear_bit(vid, adapter->active_vlans);
9099 
9100 	return 0;
9101 }
9102 
igb_restore_vlan(struct igb_adapter * adapter)9103 static void igb_restore_vlan(struct igb_adapter *adapter)
9104 {
9105 	u16 vid = 1;
9106 
9107 	igb_vlan_mode(adapter->netdev, adapter->netdev->features);
9108 	igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
9109 
9110 	for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
9111 		igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
9112 }
9113 
igb_set_spd_dplx(struct igb_adapter * adapter,u32 spd,u8 dplx)9114 int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
9115 {
9116 	struct pci_dev *pdev = adapter->pdev;
9117 	struct e1000_mac_info *mac = &adapter->hw.mac;
9118 
9119 	mac->autoneg = 0;
9120 
9121 	/* Make sure dplx is at most 1 bit and lsb of speed is not set
9122 	 * for the switch() below to work
9123 	 */
9124 	if ((spd & 1) || (dplx & ~1))
9125 		goto err_inval;
9126 
9127 	/* Fiber NIC's only allow 1000 gbps Full duplex
9128 	 * and 100Mbps Full duplex for 100baseFx sfp
9129 	 */
9130 	if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
9131 		switch (spd + dplx) {
9132 		case SPEED_10 + DUPLEX_HALF:
9133 		case SPEED_10 + DUPLEX_FULL:
9134 		case SPEED_100 + DUPLEX_HALF:
9135 			goto err_inval;
9136 		default:
9137 			break;
9138 		}
9139 	}
9140 
9141 	switch (spd + dplx) {
9142 	case SPEED_10 + DUPLEX_HALF:
9143 		mac->forced_speed_duplex = ADVERTISE_10_HALF;
9144 		break;
9145 	case SPEED_10 + DUPLEX_FULL:
9146 		mac->forced_speed_duplex = ADVERTISE_10_FULL;
9147 		break;
9148 	case SPEED_100 + DUPLEX_HALF:
9149 		mac->forced_speed_duplex = ADVERTISE_100_HALF;
9150 		break;
9151 	case SPEED_100 + DUPLEX_FULL:
9152 		mac->forced_speed_duplex = ADVERTISE_100_FULL;
9153 		break;
9154 	case SPEED_1000 + DUPLEX_FULL:
9155 		mac->autoneg = 1;
9156 		adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
9157 		break;
9158 	case SPEED_1000 + DUPLEX_HALF: /* not supported */
9159 	default:
9160 		goto err_inval;
9161 	}
9162 
9163 	/* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
9164 	adapter->hw.phy.mdix = AUTO_ALL_MODES;
9165 
9166 	return 0;
9167 
9168 err_inval:
9169 	dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
9170 	return -EINVAL;
9171 }
9172 
__igb_shutdown(struct pci_dev * pdev,bool * enable_wake,bool runtime)9173 static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
9174 			  bool runtime)
9175 {
9176 	struct net_device *netdev = pci_get_drvdata(pdev);
9177 	struct igb_adapter *adapter = netdev_priv(netdev);
9178 	struct e1000_hw *hw = &adapter->hw;
9179 	u32 ctrl, rctl, status;
9180 	u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
9181 	bool wake;
9182 
9183 	rtnl_lock();
9184 	netif_device_detach(netdev);
9185 
9186 	if (netif_running(netdev))
9187 		__igb_close(netdev, true);
9188 
9189 	igb_ptp_suspend(adapter);
9190 
9191 	igb_clear_interrupt_scheme(adapter);
9192 	rtnl_unlock();
9193 
9194 	status = rd32(E1000_STATUS);
9195 	if (status & E1000_STATUS_LU)
9196 		wufc &= ~E1000_WUFC_LNKC;
9197 
9198 	if (wufc) {
9199 		igb_setup_rctl(adapter);
9200 		igb_set_rx_mode(netdev);
9201 
9202 		/* turn on all-multi mode if wake on multicast is enabled */
9203 		if (wufc & E1000_WUFC_MC) {
9204 			rctl = rd32(E1000_RCTL);
9205 			rctl |= E1000_RCTL_MPE;
9206 			wr32(E1000_RCTL, rctl);
9207 		}
9208 
9209 		ctrl = rd32(E1000_CTRL);
9210 		ctrl |= E1000_CTRL_ADVD3WUC;
9211 		wr32(E1000_CTRL, ctrl);
9212 
9213 		/* Allow time for pending master requests to run */
9214 		igb_disable_pcie_master(hw);
9215 
9216 		wr32(E1000_WUC, E1000_WUC_PME_EN);
9217 		wr32(E1000_WUFC, wufc);
9218 	} else {
9219 		wr32(E1000_WUC, 0);
9220 		wr32(E1000_WUFC, 0);
9221 	}
9222 
9223 	wake = wufc || adapter->en_mng_pt;
9224 	if (!wake)
9225 		igb_power_down_link(adapter);
9226 	else
9227 		igb_power_up_link(adapter);
9228 
9229 	if (enable_wake)
9230 		*enable_wake = wake;
9231 
9232 	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
9233 	 * would have already happened in close and is redundant.
9234 	 */
9235 	igb_release_hw_control(adapter);
9236 
9237 	pci_disable_device(pdev);
9238 
9239 	return 0;
9240 }
9241 
igb_deliver_wake_packet(struct net_device * netdev)9242 static void igb_deliver_wake_packet(struct net_device *netdev)
9243 {
9244 	struct igb_adapter *adapter = netdev_priv(netdev);
9245 	struct e1000_hw *hw = &adapter->hw;
9246 	struct sk_buff *skb;
9247 	u32 wupl;
9248 
9249 	wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK;
9250 
9251 	/* WUPM stores only the first 128 bytes of the wake packet.
9252 	 * Read the packet only if we have the whole thing.
9253 	 */
9254 	if ((wupl == 0) || (wupl > E1000_WUPM_BYTES))
9255 		return;
9256 
9257 	skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES);
9258 	if (!skb)
9259 		return;
9260 
9261 	skb_put(skb, wupl);
9262 
9263 	/* Ensure reads are 32-bit aligned */
9264 	wupl = roundup(wupl, 4);
9265 
9266 	memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl);
9267 
9268 	skb->protocol = eth_type_trans(skb, netdev);
9269 	netif_rx(skb);
9270 }
9271 
igb_suspend(struct device * dev)9272 static int __maybe_unused igb_suspend(struct device *dev)
9273 {
9274 	return __igb_shutdown(to_pci_dev(dev), NULL, 0);
9275 }
9276 
__igb_resume(struct device * dev,bool rpm)9277 static int __maybe_unused __igb_resume(struct device *dev, bool rpm)
9278 {
9279 	struct pci_dev *pdev = to_pci_dev(dev);
9280 	struct net_device *netdev = pci_get_drvdata(pdev);
9281 	struct igb_adapter *adapter = netdev_priv(netdev);
9282 	struct e1000_hw *hw = &adapter->hw;
9283 	u32 err, val;
9284 
9285 	pci_set_power_state(pdev, PCI_D0);
9286 	pci_restore_state(pdev);
9287 	pci_save_state(pdev);
9288 
9289 	if (!pci_device_is_present(pdev))
9290 		return -ENODEV;
9291 	err = pci_enable_device_mem(pdev);
9292 	if (err) {
9293 		dev_err(&pdev->dev,
9294 			"igb: Cannot enable PCI device from suspend\n");
9295 		return err;
9296 	}
9297 	pci_set_master(pdev);
9298 
9299 	pci_enable_wake(pdev, PCI_D3hot, 0);
9300 	pci_enable_wake(pdev, PCI_D3cold, 0);
9301 
9302 	if (igb_init_interrupt_scheme(adapter, true)) {
9303 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
9304 		return -ENOMEM;
9305 	}
9306 
9307 	igb_reset(adapter);
9308 
9309 	/* let the f/w know that the h/w is now under the control of the
9310 	 * driver.
9311 	 */
9312 	igb_get_hw_control(adapter);
9313 
9314 	val = rd32(E1000_WUS);
9315 	if (val & WAKE_PKT_WUS)
9316 		igb_deliver_wake_packet(netdev);
9317 
9318 	wr32(E1000_WUS, ~0);
9319 
9320 	if (!rpm)
9321 		rtnl_lock();
9322 	if (!err && netif_running(netdev))
9323 		err = __igb_open(netdev, true);
9324 
9325 	if (!err)
9326 		netif_device_attach(netdev);
9327 	if (!rpm)
9328 		rtnl_unlock();
9329 
9330 	return err;
9331 }
9332 
igb_resume(struct device * dev)9333 static int __maybe_unused igb_resume(struct device *dev)
9334 {
9335 	return __igb_resume(dev, false);
9336 }
9337 
igb_runtime_idle(struct device * dev)9338 static int __maybe_unused igb_runtime_idle(struct device *dev)
9339 {
9340 	struct net_device *netdev = dev_get_drvdata(dev);
9341 	struct igb_adapter *adapter = netdev_priv(netdev);
9342 
9343 	if (!igb_has_link(adapter))
9344 		pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
9345 
9346 	return -EBUSY;
9347 }
9348 
igb_runtime_suspend(struct device * dev)9349 static int __maybe_unused igb_runtime_suspend(struct device *dev)
9350 {
9351 	return __igb_shutdown(to_pci_dev(dev), NULL, 1);
9352 }
9353 
igb_runtime_resume(struct device * dev)9354 static int __maybe_unused igb_runtime_resume(struct device *dev)
9355 {
9356 	return __igb_resume(dev, true);
9357 }
9358 
igb_shutdown(struct pci_dev * pdev)9359 static void igb_shutdown(struct pci_dev *pdev)
9360 {
9361 	bool wake;
9362 
9363 	__igb_shutdown(pdev, &wake, 0);
9364 
9365 	if (system_state == SYSTEM_POWER_OFF) {
9366 		pci_wake_from_d3(pdev, wake);
9367 		pci_set_power_state(pdev, PCI_D3hot);
9368 	}
9369 }
9370 
9371 #ifdef CONFIG_PCI_IOV
igb_sriov_reinit(struct pci_dev * dev)9372 static int igb_sriov_reinit(struct pci_dev *dev)
9373 {
9374 	struct net_device *netdev = pci_get_drvdata(dev);
9375 	struct igb_adapter *adapter = netdev_priv(netdev);
9376 	struct pci_dev *pdev = adapter->pdev;
9377 
9378 	rtnl_lock();
9379 
9380 	if (netif_running(netdev))
9381 		igb_close(netdev);
9382 	else
9383 		igb_reset(adapter);
9384 
9385 	igb_clear_interrupt_scheme(adapter);
9386 
9387 	igb_init_queue_configuration(adapter);
9388 
9389 	if (igb_init_interrupt_scheme(adapter, true)) {
9390 		rtnl_unlock();
9391 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
9392 		return -ENOMEM;
9393 	}
9394 
9395 	if (netif_running(netdev))
9396 		igb_open(netdev);
9397 
9398 	rtnl_unlock();
9399 
9400 	return 0;
9401 }
9402 
igb_pci_disable_sriov(struct pci_dev * dev)9403 static int igb_pci_disable_sriov(struct pci_dev *dev)
9404 {
9405 	int err = igb_disable_sriov(dev);
9406 
9407 	if (!err)
9408 		err = igb_sriov_reinit(dev);
9409 
9410 	return err;
9411 }
9412 
igb_pci_enable_sriov(struct pci_dev * dev,int num_vfs)9413 static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
9414 {
9415 	int err = igb_enable_sriov(dev, num_vfs);
9416 
9417 	if (err)
9418 		goto out;
9419 
9420 	err = igb_sriov_reinit(dev);
9421 	if (!err)
9422 		return num_vfs;
9423 
9424 out:
9425 	return err;
9426 }
9427 
9428 #endif
igb_pci_sriov_configure(struct pci_dev * dev,int num_vfs)9429 static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
9430 {
9431 #ifdef CONFIG_PCI_IOV
9432 	if (num_vfs == 0)
9433 		return igb_pci_disable_sriov(dev);
9434 	else
9435 		return igb_pci_enable_sriov(dev, num_vfs);
9436 #endif
9437 	return 0;
9438 }
9439 
9440 /**
9441  *  igb_io_error_detected - called when PCI error is detected
9442  *  @pdev: Pointer to PCI device
9443  *  @state: The current pci connection state
9444  *
9445  *  This function is called after a PCI bus error affecting
9446  *  this device has been detected.
9447  **/
igb_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)9448 static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
9449 					      pci_channel_state_t state)
9450 {
9451 	struct net_device *netdev = pci_get_drvdata(pdev);
9452 	struct igb_adapter *adapter = netdev_priv(netdev);
9453 
9454 	netif_device_detach(netdev);
9455 
9456 	if (state == pci_channel_io_perm_failure)
9457 		return PCI_ERS_RESULT_DISCONNECT;
9458 
9459 	if (netif_running(netdev))
9460 		igb_down(adapter);
9461 	pci_disable_device(pdev);
9462 
9463 	/* Request a slot slot reset. */
9464 	return PCI_ERS_RESULT_NEED_RESET;
9465 }
9466 
9467 /**
9468  *  igb_io_slot_reset - called after the pci bus has been reset.
9469  *  @pdev: Pointer to PCI device
9470  *
9471  *  Restart the card from scratch, as if from a cold-boot. Implementation
9472  *  resembles the first-half of the __igb_resume routine.
9473  **/
igb_io_slot_reset(struct pci_dev * pdev)9474 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
9475 {
9476 	struct net_device *netdev = pci_get_drvdata(pdev);
9477 	struct igb_adapter *adapter = netdev_priv(netdev);
9478 	struct e1000_hw *hw = &adapter->hw;
9479 	pci_ers_result_t result;
9480 
9481 	if (pci_enable_device_mem(pdev)) {
9482 		dev_err(&pdev->dev,
9483 			"Cannot re-enable PCI device after reset.\n");
9484 		result = PCI_ERS_RESULT_DISCONNECT;
9485 	} else {
9486 		pci_set_master(pdev);
9487 		pci_restore_state(pdev);
9488 		pci_save_state(pdev);
9489 
9490 		pci_enable_wake(pdev, PCI_D3hot, 0);
9491 		pci_enable_wake(pdev, PCI_D3cold, 0);
9492 
9493 		/* In case of PCI error, adapter lose its HW address
9494 		 * so we should re-assign it here.
9495 		 */
9496 		hw->hw_addr = adapter->io_addr;
9497 
9498 		igb_reset(adapter);
9499 		wr32(E1000_WUS, ~0);
9500 		result = PCI_ERS_RESULT_RECOVERED;
9501 	}
9502 
9503 	return result;
9504 }
9505 
9506 /**
9507  *  igb_io_resume - called when traffic can start flowing again.
9508  *  @pdev: Pointer to PCI device
9509  *
9510  *  This callback is called when the error recovery driver tells us that
9511  *  its OK to resume normal operation. Implementation resembles the
9512  *  second-half of the __igb_resume routine.
9513  */
igb_io_resume(struct pci_dev * pdev)9514 static void igb_io_resume(struct pci_dev *pdev)
9515 {
9516 	struct net_device *netdev = pci_get_drvdata(pdev);
9517 	struct igb_adapter *adapter = netdev_priv(netdev);
9518 
9519 	if (netif_running(netdev)) {
9520 		if (igb_up(adapter)) {
9521 			dev_err(&pdev->dev, "igb_up failed after reset\n");
9522 			return;
9523 		}
9524 	}
9525 
9526 	netif_device_attach(netdev);
9527 
9528 	/* let the f/w know that the h/w is now under the control of the
9529 	 * driver.
9530 	 */
9531 	igb_get_hw_control(adapter);
9532 }
9533 
9534 /**
9535  *  igb_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
9536  *  @adapter: Pointer to adapter structure
9537  *  @index: Index of the RAR entry which need to be synced with MAC table
9538  **/
igb_rar_set_index(struct igb_adapter * adapter,u32 index)9539 static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
9540 {
9541 	struct e1000_hw *hw = &adapter->hw;
9542 	u32 rar_low, rar_high;
9543 	u8 *addr = adapter->mac_table[index].addr;
9544 
9545 	/* HW expects these to be in network order when they are plugged
9546 	 * into the registers which are little endian.  In order to guarantee
9547 	 * that ordering we need to do an leXX_to_cpup here in order to be
9548 	 * ready for the byteswap that occurs with writel
9549 	 */
9550 	rar_low = le32_to_cpup((__le32 *)(addr));
9551 	rar_high = le16_to_cpup((__le16 *)(addr + 4));
9552 
9553 	/* Indicate to hardware the Address is Valid. */
9554 	if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) {
9555 		if (is_valid_ether_addr(addr))
9556 			rar_high |= E1000_RAH_AV;
9557 
9558 		if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR)
9559 			rar_high |= E1000_RAH_ASEL_SRC_ADDR;
9560 
9561 		switch (hw->mac.type) {
9562 		case e1000_82575:
9563 		case e1000_i210:
9564 			if (adapter->mac_table[index].state &
9565 			    IGB_MAC_STATE_QUEUE_STEERING)
9566 				rar_high |= E1000_RAH_QSEL_ENABLE;
9567 
9568 			rar_high |= E1000_RAH_POOL_1 *
9569 				    adapter->mac_table[index].queue;
9570 			break;
9571 		default:
9572 			rar_high |= E1000_RAH_POOL_1 <<
9573 				    adapter->mac_table[index].queue;
9574 			break;
9575 		}
9576 	}
9577 
9578 	wr32(E1000_RAL(index), rar_low);
9579 	wrfl();
9580 	wr32(E1000_RAH(index), rar_high);
9581 	wrfl();
9582 }
9583 
igb_set_vf_mac(struct igb_adapter * adapter,int vf,unsigned char * mac_addr)9584 static int igb_set_vf_mac(struct igb_adapter *adapter,
9585 			  int vf, unsigned char *mac_addr)
9586 {
9587 	struct e1000_hw *hw = &adapter->hw;
9588 	/* VF MAC addresses start at end of receive addresses and moves
9589 	 * towards the first, as a result a collision should not be possible
9590 	 */
9591 	int rar_entry = hw->mac.rar_entry_count - (vf + 1);
9592 	unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses;
9593 
9594 	ether_addr_copy(vf_mac_addr, mac_addr);
9595 	ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr);
9596 	adapter->mac_table[rar_entry].queue = vf;
9597 	adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE;
9598 	igb_rar_set_index(adapter, rar_entry);
9599 
9600 	return 0;
9601 }
9602 
igb_ndo_set_vf_mac(struct net_device * netdev,int vf,u8 * mac)9603 static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
9604 {
9605 	struct igb_adapter *adapter = netdev_priv(netdev);
9606 
9607 	if (vf >= adapter->vfs_allocated_count)
9608 		return -EINVAL;
9609 
9610 	/* Setting the VF MAC to 0 reverts the IGB_VF_FLAG_PF_SET_MAC
9611 	 * flag and allows to overwrite the MAC via VF netdev.  This
9612 	 * is necessary to allow libvirt a way to restore the original
9613 	 * MAC after unbinding vfio-pci and reloading igbvf after shutting
9614 	 * down a VM.
9615 	 */
9616 	if (is_zero_ether_addr(mac)) {
9617 		adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC;
9618 		dev_info(&adapter->pdev->dev,
9619 			 "remove administratively set MAC on VF %d\n",
9620 			 vf);
9621 	} else if (is_valid_ether_addr(mac)) {
9622 		adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
9623 		dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
9624 			 mac, vf);
9625 		dev_info(&adapter->pdev->dev,
9626 			 "Reload the VF driver to make this change effective.");
9627 		/* Generate additional warning if PF is down */
9628 		if (test_bit(__IGB_DOWN, &adapter->state)) {
9629 			dev_warn(&adapter->pdev->dev,
9630 				 "The VF MAC address has been set, but the PF device is not up.\n");
9631 			dev_warn(&adapter->pdev->dev,
9632 				 "Bring the PF device up before attempting to use the VF device.\n");
9633 		}
9634 	} else {
9635 		return -EINVAL;
9636 	}
9637 	return igb_set_vf_mac(adapter, vf, mac);
9638 }
9639 
igb_link_mbps(int internal_link_speed)9640 static int igb_link_mbps(int internal_link_speed)
9641 {
9642 	switch (internal_link_speed) {
9643 	case SPEED_100:
9644 		return 100;
9645 	case SPEED_1000:
9646 		return 1000;
9647 	default:
9648 		return 0;
9649 	}
9650 }
9651 
igb_set_vf_rate_limit(struct e1000_hw * hw,int vf,int tx_rate,int link_speed)9652 static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
9653 				  int link_speed)
9654 {
9655 	int rf_dec, rf_int;
9656 	u32 bcnrc_val;
9657 
9658 	if (tx_rate != 0) {
9659 		/* Calculate the rate factor values to set */
9660 		rf_int = link_speed / tx_rate;
9661 		rf_dec = (link_speed - (rf_int * tx_rate));
9662 		rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) /
9663 			 tx_rate;
9664 
9665 		bcnrc_val = E1000_RTTBCNRC_RS_ENA;
9666 		bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
9667 			      E1000_RTTBCNRC_RF_INT_MASK);
9668 		bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
9669 	} else {
9670 		bcnrc_val = 0;
9671 	}
9672 
9673 	wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
9674 	/* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
9675 	 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
9676 	 */
9677 	wr32(E1000_RTTBCNRM, 0x14);
9678 	wr32(E1000_RTTBCNRC, bcnrc_val);
9679 }
9680 
igb_check_vf_rate_limit(struct igb_adapter * adapter)9681 static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
9682 {
9683 	int actual_link_speed, i;
9684 	bool reset_rate = false;
9685 
9686 	/* VF TX rate limit was not set or not supported */
9687 	if ((adapter->vf_rate_link_speed == 0) ||
9688 	    (adapter->hw.mac.type != e1000_82576))
9689 		return;
9690 
9691 	actual_link_speed = igb_link_mbps(adapter->link_speed);
9692 	if (actual_link_speed != adapter->vf_rate_link_speed) {
9693 		reset_rate = true;
9694 		adapter->vf_rate_link_speed = 0;
9695 		dev_info(&adapter->pdev->dev,
9696 			 "Link speed has been changed. VF Transmit rate is disabled\n");
9697 	}
9698 
9699 	for (i = 0; i < adapter->vfs_allocated_count; i++) {
9700 		if (reset_rate)
9701 			adapter->vf_data[i].tx_rate = 0;
9702 
9703 		igb_set_vf_rate_limit(&adapter->hw, i,
9704 				      adapter->vf_data[i].tx_rate,
9705 				      actual_link_speed);
9706 	}
9707 }
9708 
igb_ndo_set_vf_bw(struct net_device * netdev,int vf,int min_tx_rate,int max_tx_rate)9709 static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
9710 			     int min_tx_rate, int max_tx_rate)
9711 {
9712 	struct igb_adapter *adapter = netdev_priv(netdev);
9713 	struct e1000_hw *hw = &adapter->hw;
9714 	int actual_link_speed;
9715 
9716 	if (hw->mac.type != e1000_82576)
9717 		return -EOPNOTSUPP;
9718 
9719 	if (min_tx_rate)
9720 		return -EINVAL;
9721 
9722 	actual_link_speed = igb_link_mbps(adapter->link_speed);
9723 	if ((vf >= adapter->vfs_allocated_count) ||
9724 	    (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
9725 	    (max_tx_rate < 0) ||
9726 	    (max_tx_rate > actual_link_speed))
9727 		return -EINVAL;
9728 
9729 	adapter->vf_rate_link_speed = actual_link_speed;
9730 	adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
9731 	igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
9732 
9733 	return 0;
9734 }
9735 
igb_ndo_set_vf_spoofchk(struct net_device * netdev,int vf,bool setting)9736 static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
9737 				   bool setting)
9738 {
9739 	struct igb_adapter *adapter = netdev_priv(netdev);
9740 	struct e1000_hw *hw = &adapter->hw;
9741 	u32 reg_val, reg_offset;
9742 
9743 	if (!adapter->vfs_allocated_count)
9744 		return -EOPNOTSUPP;
9745 
9746 	if (vf >= adapter->vfs_allocated_count)
9747 		return -EINVAL;
9748 
9749 	reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
9750 	reg_val = rd32(reg_offset);
9751 	if (setting)
9752 		reg_val |= (BIT(vf) |
9753 			    BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
9754 	else
9755 		reg_val &= ~(BIT(vf) |
9756 			     BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
9757 	wr32(reg_offset, reg_val);
9758 
9759 	adapter->vf_data[vf].spoofchk_enabled = setting;
9760 	return 0;
9761 }
9762 
igb_ndo_set_vf_trust(struct net_device * netdev,int vf,bool setting)9763 static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
9764 {
9765 	struct igb_adapter *adapter = netdev_priv(netdev);
9766 
9767 	if (vf >= adapter->vfs_allocated_count)
9768 		return -EINVAL;
9769 	if (adapter->vf_data[vf].trusted == setting)
9770 		return 0;
9771 
9772 	adapter->vf_data[vf].trusted = setting;
9773 
9774 	dev_info(&adapter->pdev->dev, "VF %u is %strusted\n",
9775 		 vf, setting ? "" : "not ");
9776 	return 0;
9777 }
9778 
igb_ndo_get_vf_config(struct net_device * netdev,int vf,struct ifla_vf_info * ivi)9779 static int igb_ndo_get_vf_config(struct net_device *netdev,
9780 				 int vf, struct ifla_vf_info *ivi)
9781 {
9782 	struct igb_adapter *adapter = netdev_priv(netdev);
9783 	if (vf >= adapter->vfs_allocated_count)
9784 		return -EINVAL;
9785 	ivi->vf = vf;
9786 	memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
9787 	ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
9788 	ivi->min_tx_rate = 0;
9789 	ivi->vlan = adapter->vf_data[vf].pf_vlan;
9790 	ivi->qos = adapter->vf_data[vf].pf_qos;
9791 	ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
9792 	ivi->trusted = adapter->vf_data[vf].trusted;
9793 	return 0;
9794 }
9795 
igb_vmm_control(struct igb_adapter * adapter)9796 static void igb_vmm_control(struct igb_adapter *adapter)
9797 {
9798 	struct e1000_hw *hw = &adapter->hw;
9799 	u32 reg;
9800 
9801 	switch (hw->mac.type) {
9802 	case e1000_82575:
9803 	case e1000_i210:
9804 	case e1000_i211:
9805 	case e1000_i354:
9806 	default:
9807 		/* replication is not supported for 82575 */
9808 		return;
9809 	case e1000_82576:
9810 		/* notify HW that the MAC is adding vlan tags */
9811 		reg = rd32(E1000_DTXCTL);
9812 		reg |= E1000_DTXCTL_VLAN_ADDED;
9813 		wr32(E1000_DTXCTL, reg);
9814 		fallthrough;
9815 	case e1000_82580:
9816 		/* enable replication vlan tag stripping */
9817 		reg = rd32(E1000_RPLOLR);
9818 		reg |= E1000_RPLOLR_STRVLAN;
9819 		wr32(E1000_RPLOLR, reg);
9820 		fallthrough;
9821 	case e1000_i350:
9822 		/* none of the above registers are supported by i350 */
9823 		break;
9824 	}
9825 
9826 	if (adapter->vfs_allocated_count) {
9827 		igb_vmdq_set_loopback_pf(hw, true);
9828 		igb_vmdq_set_replication_pf(hw, true);
9829 		igb_vmdq_set_anti_spoofing_pf(hw, true,
9830 					      adapter->vfs_allocated_count);
9831 	} else {
9832 		igb_vmdq_set_loopback_pf(hw, false);
9833 		igb_vmdq_set_replication_pf(hw, false);
9834 	}
9835 }
9836 
igb_init_dmac(struct igb_adapter * adapter,u32 pba)9837 static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
9838 {
9839 	struct e1000_hw *hw = &adapter->hw;
9840 	u32 dmac_thr;
9841 	u16 hwm;
9842 	u32 reg;
9843 
9844 	if (hw->mac.type > e1000_82580) {
9845 		if (adapter->flags & IGB_FLAG_DMAC) {
9846 			/* force threshold to 0. */
9847 			wr32(E1000_DMCTXTH, 0);
9848 
9849 			/* DMA Coalescing high water mark needs to be greater
9850 			 * than the Rx threshold. Set hwm to PBA - max frame
9851 			 * size in 16B units, capping it at PBA - 6KB.
9852 			 */
9853 			hwm = 64 * (pba - 6);
9854 			reg = rd32(E1000_FCRTC);
9855 			reg &= ~E1000_FCRTC_RTH_COAL_MASK;
9856 			reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
9857 				& E1000_FCRTC_RTH_COAL_MASK);
9858 			wr32(E1000_FCRTC, reg);
9859 
9860 			/* Set the DMA Coalescing Rx threshold to PBA - 2 * max
9861 			 * frame size, capping it at PBA - 10KB.
9862 			 */
9863 			dmac_thr = pba - 10;
9864 			reg = rd32(E1000_DMACR);
9865 			reg &= ~E1000_DMACR_DMACTHR_MASK;
9866 			reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
9867 				& E1000_DMACR_DMACTHR_MASK);
9868 
9869 			/* transition to L0x or L1 if available..*/
9870 			reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
9871 
9872 			/* watchdog timer= +-1000 usec in 32usec intervals */
9873 			reg |= (1000 >> 5);
9874 
9875 			/* Disable BMC-to-OS Watchdog Enable */
9876 			if (hw->mac.type != e1000_i354)
9877 				reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
9878 			wr32(E1000_DMACR, reg);
9879 
9880 			/* no lower threshold to disable
9881 			 * coalescing(smart fifb)-UTRESH=0
9882 			 */
9883 			wr32(E1000_DMCRTRH, 0);
9884 
9885 			reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
9886 
9887 			wr32(E1000_DMCTLX, reg);
9888 
9889 			/* free space in tx packet buffer to wake from
9890 			 * DMA coal
9891 			 */
9892 			wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
9893 			     (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
9894 		}
9895 
9896 		if (hw->mac.type >= e1000_i210 ||
9897 		    (adapter->flags & IGB_FLAG_DMAC)) {
9898 			reg = rd32(E1000_PCIEMISC);
9899 			reg |= E1000_PCIEMISC_LX_DECISION;
9900 			wr32(E1000_PCIEMISC, reg);
9901 		} /* endif adapter->dmac is not disabled */
9902 	} else if (hw->mac.type == e1000_82580) {
9903 		u32 reg = rd32(E1000_PCIEMISC);
9904 
9905 		wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
9906 		wr32(E1000_DMACR, 0);
9907 	}
9908 }
9909 
9910 /**
9911  *  igb_read_i2c_byte - Reads 8 bit word over I2C
9912  *  @hw: pointer to hardware structure
9913  *  @byte_offset: byte offset to read
9914  *  @dev_addr: device address
9915  *  @data: value read
9916  *
9917  *  Performs byte read operation over I2C interface at
9918  *  a specified device address.
9919  **/
igb_read_i2c_byte(struct e1000_hw * hw,u8 byte_offset,u8 dev_addr,u8 * data)9920 s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
9921 		      u8 dev_addr, u8 *data)
9922 {
9923 	struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
9924 	struct i2c_client *this_client = adapter->i2c_client;
9925 	s32 status;
9926 	u16 swfw_mask = 0;
9927 
9928 	if (!this_client)
9929 		return E1000_ERR_I2C;
9930 
9931 	swfw_mask = E1000_SWFW_PHY0_SM;
9932 
9933 	if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
9934 		return E1000_ERR_SWFW_SYNC;
9935 
9936 	status = i2c_smbus_read_byte_data(this_client, byte_offset);
9937 	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
9938 
9939 	if (status < 0)
9940 		return E1000_ERR_I2C;
9941 	else {
9942 		*data = status;
9943 		return 0;
9944 	}
9945 }
9946 
9947 /**
9948  *  igb_write_i2c_byte - Writes 8 bit word over I2C
9949  *  @hw: pointer to hardware structure
9950  *  @byte_offset: byte offset to write
9951  *  @dev_addr: device address
9952  *  @data: value to write
9953  *
9954  *  Performs byte write operation over I2C interface at
9955  *  a specified device address.
9956  **/
igb_write_i2c_byte(struct e1000_hw * hw,u8 byte_offset,u8 dev_addr,u8 data)9957 s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
9958 		       u8 dev_addr, u8 data)
9959 {
9960 	struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
9961 	struct i2c_client *this_client = adapter->i2c_client;
9962 	s32 status;
9963 	u16 swfw_mask = E1000_SWFW_PHY0_SM;
9964 
9965 	if (!this_client)
9966 		return E1000_ERR_I2C;
9967 
9968 	if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
9969 		return E1000_ERR_SWFW_SYNC;
9970 	status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
9971 	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
9972 
9973 	if (status)
9974 		return E1000_ERR_I2C;
9975 	else
9976 		return 0;
9977 
9978 }
9979 
igb_reinit_queues(struct igb_adapter * adapter)9980 int igb_reinit_queues(struct igb_adapter *adapter)
9981 {
9982 	struct net_device *netdev = adapter->netdev;
9983 	struct pci_dev *pdev = adapter->pdev;
9984 	int err = 0;
9985 
9986 	if (netif_running(netdev))
9987 		igb_close(netdev);
9988 
9989 	igb_reset_interrupt_capability(adapter);
9990 
9991 	if (igb_init_interrupt_scheme(adapter, true)) {
9992 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
9993 		return -ENOMEM;
9994 	}
9995 
9996 	if (netif_running(netdev))
9997 		err = igb_open(netdev);
9998 
9999 	return err;
10000 }
10001 
igb_nfc_filter_exit(struct igb_adapter * adapter)10002 static void igb_nfc_filter_exit(struct igb_adapter *adapter)
10003 {
10004 	struct igb_nfc_filter *rule;
10005 
10006 	spin_lock(&adapter->nfc_lock);
10007 
10008 	hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
10009 		igb_erase_filter(adapter, rule);
10010 
10011 	hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
10012 		igb_erase_filter(adapter, rule);
10013 
10014 	spin_unlock(&adapter->nfc_lock);
10015 }
10016 
igb_nfc_filter_restore(struct igb_adapter * adapter)10017 static void igb_nfc_filter_restore(struct igb_adapter *adapter)
10018 {
10019 	struct igb_nfc_filter *rule;
10020 
10021 	spin_lock(&adapter->nfc_lock);
10022 
10023 	hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
10024 		igb_add_filter(adapter, rule);
10025 
10026 	spin_unlock(&adapter->nfc_lock);
10027 }
10028 /* igb_main.c */
10029