1*4882a593Smuzhiyun /* bnx2.c: QLogic bnx2 network driver.
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * Copyright (c) 2004-2014 Broadcom Corporation
4*4882a593Smuzhiyun * Copyright (c) 2014-2015 QLogic Corporation
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
7*4882a593Smuzhiyun * it under the terms of the GNU General Public License as published by
8*4882a593Smuzhiyun * the Free Software Foundation.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Written by: Michael Chan (mchan@broadcom.com)
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <linux/module.h>
16*4882a593Smuzhiyun #include <linux/moduleparam.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <linux/stringify.h>
19*4882a593Smuzhiyun #include <linux/kernel.h>
20*4882a593Smuzhiyun #include <linux/timer.h>
21*4882a593Smuzhiyun #include <linux/errno.h>
22*4882a593Smuzhiyun #include <linux/ioport.h>
23*4882a593Smuzhiyun #include <linux/slab.h>
24*4882a593Smuzhiyun #include <linux/vmalloc.h>
25*4882a593Smuzhiyun #include <linux/interrupt.h>
26*4882a593Smuzhiyun #include <linux/pci.h>
27*4882a593Smuzhiyun #include <linux/netdevice.h>
28*4882a593Smuzhiyun #include <linux/etherdevice.h>
29*4882a593Smuzhiyun #include <linux/skbuff.h>
30*4882a593Smuzhiyun #include <linux/dma-mapping.h>
31*4882a593Smuzhiyun #include <linux/bitops.h>
32*4882a593Smuzhiyun #include <asm/io.h>
33*4882a593Smuzhiyun #include <asm/irq.h>
34*4882a593Smuzhiyun #include <linux/delay.h>
35*4882a593Smuzhiyun #include <asm/byteorder.h>
36*4882a593Smuzhiyun #include <asm/page.h>
37*4882a593Smuzhiyun #include <linux/time.h>
38*4882a593Smuzhiyun #include <linux/ethtool.h>
39*4882a593Smuzhiyun #include <linux/mii.h>
40*4882a593Smuzhiyun #include <linux/if.h>
41*4882a593Smuzhiyun #include <linux/if_vlan.h>
42*4882a593Smuzhiyun #include <net/ip.h>
43*4882a593Smuzhiyun #include <net/tcp.h>
44*4882a593Smuzhiyun #include <net/checksum.h>
45*4882a593Smuzhiyun #include <linux/workqueue.h>
46*4882a593Smuzhiyun #include <linux/crc32.h>
47*4882a593Smuzhiyun #include <linux/prefetch.h>
48*4882a593Smuzhiyun #include <linux/cache.h>
49*4882a593Smuzhiyun #include <linux/firmware.h>
50*4882a593Smuzhiyun #include <linux/log2.h>
51*4882a593Smuzhiyun #include <linux/aer.h>
52*4882a593Smuzhiyun #include <linux/crash_dump.h>
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_CNIC)
55*4882a593Smuzhiyun #define BCM_CNIC 1
56*4882a593Smuzhiyun #include "cnic_if.h"
57*4882a593Smuzhiyun #endif
58*4882a593Smuzhiyun #include "bnx2.h"
59*4882a593Smuzhiyun #include "bnx2_fw.h"
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun #define DRV_MODULE_NAME "bnx2"
62*4882a593Smuzhiyun #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
63*4882a593Smuzhiyun #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
64*4882a593Smuzhiyun #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
65*4882a593Smuzhiyun #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
66*4882a593Smuzhiyun #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun #define RUN_AT(x) (jiffies + (x))
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /* Time in jiffies before concluding the transmitter is hung. */
71*4882a593Smuzhiyun #define TX_TIMEOUT (5*HZ)
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
74*4882a593Smuzhiyun MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
75*4882a593Smuzhiyun MODULE_LICENSE("GPL");
76*4882a593Smuzhiyun MODULE_FIRMWARE(FW_MIPS_FILE_06);
77*4882a593Smuzhiyun MODULE_FIRMWARE(FW_RV2P_FILE_06);
78*4882a593Smuzhiyun MODULE_FIRMWARE(FW_MIPS_FILE_09);
79*4882a593Smuzhiyun MODULE_FIRMWARE(FW_RV2P_FILE_09);
80*4882a593Smuzhiyun MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun static int disable_msi = 0;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun module_param(disable_msi, int, 0444);
85*4882a593Smuzhiyun MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun typedef enum {
88*4882a593Smuzhiyun BCM5706 = 0,
89*4882a593Smuzhiyun NC370T,
90*4882a593Smuzhiyun NC370I,
91*4882a593Smuzhiyun BCM5706S,
92*4882a593Smuzhiyun NC370F,
93*4882a593Smuzhiyun BCM5708,
94*4882a593Smuzhiyun BCM5708S,
95*4882a593Smuzhiyun BCM5709,
96*4882a593Smuzhiyun BCM5709S,
97*4882a593Smuzhiyun BCM5716,
98*4882a593Smuzhiyun BCM5716S,
99*4882a593Smuzhiyun } board_t;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /* indexed by board_t, above */
102*4882a593Smuzhiyun static struct {
103*4882a593Smuzhiyun char *name;
104*4882a593Smuzhiyun } board_info[] = {
105*4882a593Smuzhiyun { "Broadcom NetXtreme II BCM5706 1000Base-T" },
106*4882a593Smuzhiyun { "HP NC370T Multifunction Gigabit Server Adapter" },
107*4882a593Smuzhiyun { "HP NC370i Multifunction Gigabit Server Adapter" },
108*4882a593Smuzhiyun { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
109*4882a593Smuzhiyun { "HP NC370F Multifunction Gigabit Server Adapter" },
110*4882a593Smuzhiyun { "Broadcom NetXtreme II BCM5708 1000Base-T" },
111*4882a593Smuzhiyun { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
112*4882a593Smuzhiyun { "Broadcom NetXtreme II BCM5709 1000Base-T" },
113*4882a593Smuzhiyun { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
114*4882a593Smuzhiyun { "Broadcom NetXtreme II BCM5716 1000Base-T" },
115*4882a593Smuzhiyun { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
116*4882a593Smuzhiyun };
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun static const struct pci_device_id bnx2_pci_tbl[] = {
119*4882a593Smuzhiyun { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
120*4882a593Smuzhiyun PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
121*4882a593Smuzhiyun { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
122*4882a593Smuzhiyun PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
123*4882a593Smuzhiyun { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
124*4882a593Smuzhiyun PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
125*4882a593Smuzhiyun { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
126*4882a593Smuzhiyun PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
127*4882a593Smuzhiyun { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
128*4882a593Smuzhiyun PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
129*4882a593Smuzhiyun { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
130*4882a593Smuzhiyun PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
131*4882a593Smuzhiyun { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
132*4882a593Smuzhiyun PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
133*4882a593Smuzhiyun { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
134*4882a593Smuzhiyun PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
135*4882a593Smuzhiyun { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
136*4882a593Smuzhiyun PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
137*4882a593Smuzhiyun { PCI_VENDOR_ID_BROADCOM, 0x163b,
138*4882a593Smuzhiyun PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
139*4882a593Smuzhiyun { PCI_VENDOR_ID_BROADCOM, 0x163c,
140*4882a593Smuzhiyun PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
141*4882a593Smuzhiyun { 0, }
142*4882a593Smuzhiyun };
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun static const struct flash_spec flash_table[] =
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
147*4882a593Smuzhiyun #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
148*4882a593Smuzhiyun /* Slow EEPROM */
149*4882a593Smuzhiyun {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
150*4882a593Smuzhiyun BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
151*4882a593Smuzhiyun SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
152*4882a593Smuzhiyun "EEPROM - slow"},
153*4882a593Smuzhiyun /* Expansion entry 0001 */
154*4882a593Smuzhiyun {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
155*4882a593Smuzhiyun NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
156*4882a593Smuzhiyun SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
157*4882a593Smuzhiyun "Entry 0001"},
158*4882a593Smuzhiyun /* Saifun SA25F010 (non-buffered flash) */
159*4882a593Smuzhiyun /* strap, cfg1, & write1 need updates */
160*4882a593Smuzhiyun {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
161*4882a593Smuzhiyun NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162*4882a593Smuzhiyun SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
163*4882a593Smuzhiyun "Non-buffered flash (128kB)"},
164*4882a593Smuzhiyun /* Saifun SA25F020 (non-buffered flash) */
165*4882a593Smuzhiyun /* strap, cfg1, & write1 need updates */
166*4882a593Smuzhiyun {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
167*4882a593Smuzhiyun NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
168*4882a593Smuzhiyun SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
169*4882a593Smuzhiyun "Non-buffered flash (256kB)"},
170*4882a593Smuzhiyun /* Expansion entry 0100 */
171*4882a593Smuzhiyun {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
172*4882a593Smuzhiyun NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173*4882a593Smuzhiyun SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
174*4882a593Smuzhiyun "Entry 0100"},
175*4882a593Smuzhiyun /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
176*4882a593Smuzhiyun {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
177*4882a593Smuzhiyun NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
178*4882a593Smuzhiyun ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
179*4882a593Smuzhiyun "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
180*4882a593Smuzhiyun /* Entry 0110: ST M45PE20 (non-buffered flash)*/
181*4882a593Smuzhiyun {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
182*4882a593Smuzhiyun NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183*4882a593Smuzhiyun ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
184*4882a593Smuzhiyun "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
185*4882a593Smuzhiyun /* Saifun SA25F005 (non-buffered flash) */
186*4882a593Smuzhiyun /* strap, cfg1, & write1 need updates */
187*4882a593Smuzhiyun {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
188*4882a593Smuzhiyun NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189*4882a593Smuzhiyun SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
190*4882a593Smuzhiyun "Non-buffered flash (64kB)"},
191*4882a593Smuzhiyun /* Fast EEPROM */
192*4882a593Smuzhiyun {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
193*4882a593Smuzhiyun BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
194*4882a593Smuzhiyun SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
195*4882a593Smuzhiyun "EEPROM - fast"},
196*4882a593Smuzhiyun /* Expansion entry 1001 */
197*4882a593Smuzhiyun {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
198*4882a593Smuzhiyun NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199*4882a593Smuzhiyun SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200*4882a593Smuzhiyun "Entry 1001"},
201*4882a593Smuzhiyun /* Expansion entry 1010 */
202*4882a593Smuzhiyun {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
203*4882a593Smuzhiyun NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204*4882a593Smuzhiyun SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205*4882a593Smuzhiyun "Entry 1010"},
206*4882a593Smuzhiyun /* ATMEL AT45DB011B (buffered flash) */
207*4882a593Smuzhiyun {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
208*4882a593Smuzhiyun BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209*4882a593Smuzhiyun BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
210*4882a593Smuzhiyun "Buffered flash (128kB)"},
211*4882a593Smuzhiyun /* Expansion entry 1100 */
212*4882a593Smuzhiyun {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
213*4882a593Smuzhiyun NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
214*4882a593Smuzhiyun SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
215*4882a593Smuzhiyun "Entry 1100"},
216*4882a593Smuzhiyun /* Expansion entry 1101 */
217*4882a593Smuzhiyun {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
218*4882a593Smuzhiyun NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219*4882a593Smuzhiyun SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220*4882a593Smuzhiyun "Entry 1101"},
221*4882a593Smuzhiyun /* Ateml Expansion entry 1110 */
222*4882a593Smuzhiyun {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
223*4882a593Smuzhiyun BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
224*4882a593Smuzhiyun BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
225*4882a593Smuzhiyun "Entry 1110 (Atmel)"},
226*4882a593Smuzhiyun /* ATMEL AT45DB021B (buffered flash) */
227*4882a593Smuzhiyun {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
228*4882a593Smuzhiyun BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229*4882a593Smuzhiyun BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
230*4882a593Smuzhiyun "Buffered flash (256kB)"},
231*4882a593Smuzhiyun };
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun static const struct flash_spec flash_5709 = {
234*4882a593Smuzhiyun .flags = BNX2_NV_BUFFERED,
235*4882a593Smuzhiyun .page_bits = BCM5709_FLASH_PAGE_BITS,
236*4882a593Smuzhiyun .page_size = BCM5709_FLASH_PAGE_SIZE,
237*4882a593Smuzhiyun .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
238*4882a593Smuzhiyun .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
239*4882a593Smuzhiyun .name = "5709 Buffered flash (256kB)",
240*4882a593Smuzhiyun };
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun static void bnx2_init_napi(struct bnx2 *bp);
245*4882a593Smuzhiyun static void bnx2_del_napi(struct bnx2 *bp);
246*4882a593Smuzhiyun
bnx2_tx_avail(struct bnx2 * bp,struct bnx2_tx_ring_info * txr)247*4882a593Smuzhiyun static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun u32 diff;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /* The ring uses 256 indices for 255 entries, one of them
252*4882a593Smuzhiyun * needs to be skipped.
253*4882a593Smuzhiyun */
254*4882a593Smuzhiyun diff = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);
255*4882a593Smuzhiyun if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
256*4882a593Smuzhiyun diff &= 0xffff;
257*4882a593Smuzhiyun if (diff == BNX2_TX_DESC_CNT)
258*4882a593Smuzhiyun diff = BNX2_MAX_TX_DESC_CNT;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun return bp->tx_ring_size - diff;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun static u32
bnx2_reg_rd_ind(struct bnx2 * bp,u32 offset)264*4882a593Smuzhiyun bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun unsigned long flags;
267*4882a593Smuzhiyun u32 val;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun spin_lock_irqsave(&bp->indirect_lock, flags);
270*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
271*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
272*4882a593Smuzhiyun spin_unlock_irqrestore(&bp->indirect_lock, flags);
273*4882a593Smuzhiyun return val;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun static void
bnx2_reg_wr_ind(struct bnx2 * bp,u32 offset,u32 val)277*4882a593Smuzhiyun bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun unsigned long flags;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun spin_lock_irqsave(&bp->indirect_lock, flags);
282*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
283*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
284*4882a593Smuzhiyun spin_unlock_irqrestore(&bp->indirect_lock, flags);
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun static void
bnx2_shmem_wr(struct bnx2 * bp,u32 offset,u32 val)288*4882a593Smuzhiyun bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun static u32
bnx2_shmem_rd(struct bnx2 * bp,u32 offset)294*4882a593Smuzhiyun bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun static void
bnx2_ctx_wr(struct bnx2 * bp,u32 cid_addr,u32 offset,u32 val)300*4882a593Smuzhiyun bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun unsigned long flags;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun offset += cid_addr;
305*4882a593Smuzhiyun spin_lock_irqsave(&bp->indirect_lock, flags);
306*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
307*4882a593Smuzhiyun int i;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
310*4882a593Smuzhiyun BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
311*4882a593Smuzhiyun offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
312*4882a593Smuzhiyun for (i = 0; i < 5; i++) {
313*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
314*4882a593Smuzhiyun if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
315*4882a593Smuzhiyun break;
316*4882a593Smuzhiyun udelay(5);
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun } else {
319*4882a593Smuzhiyun BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
320*4882a593Smuzhiyun BNX2_WR(bp, BNX2_CTX_DATA, val);
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun spin_unlock_irqrestore(&bp->indirect_lock, flags);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun #ifdef BCM_CNIC
326*4882a593Smuzhiyun static int
bnx2_drv_ctl(struct net_device * dev,struct drv_ctl_info * info)327*4882a593Smuzhiyun bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
330*4882a593Smuzhiyun struct drv_ctl_io *io = &info->data.io;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun switch (info->cmd) {
333*4882a593Smuzhiyun case DRV_CTL_IO_WR_CMD:
334*4882a593Smuzhiyun bnx2_reg_wr_ind(bp, io->offset, io->data);
335*4882a593Smuzhiyun break;
336*4882a593Smuzhiyun case DRV_CTL_IO_RD_CMD:
337*4882a593Smuzhiyun io->data = bnx2_reg_rd_ind(bp, io->offset);
338*4882a593Smuzhiyun break;
339*4882a593Smuzhiyun case DRV_CTL_CTX_WR_CMD:
340*4882a593Smuzhiyun bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
341*4882a593Smuzhiyun break;
342*4882a593Smuzhiyun default:
343*4882a593Smuzhiyun return -EINVAL;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun return 0;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
bnx2_setup_cnic_irq_info(struct bnx2 * bp)348*4882a593Smuzhiyun static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
351*4882a593Smuzhiyun struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
352*4882a593Smuzhiyun int sb_id;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun if (bp->flags & BNX2_FLAG_USING_MSIX) {
355*4882a593Smuzhiyun cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
356*4882a593Smuzhiyun bnapi->cnic_present = 0;
357*4882a593Smuzhiyun sb_id = bp->irq_nvecs;
358*4882a593Smuzhiyun cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
359*4882a593Smuzhiyun } else {
360*4882a593Smuzhiyun cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
361*4882a593Smuzhiyun bnapi->cnic_tag = bnapi->last_status_idx;
362*4882a593Smuzhiyun bnapi->cnic_present = 1;
363*4882a593Smuzhiyun sb_id = 0;
364*4882a593Smuzhiyun cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
368*4882a593Smuzhiyun cp->irq_arr[0].status_blk = (void *)
369*4882a593Smuzhiyun ((unsigned long) bnapi->status_blk.msi +
370*4882a593Smuzhiyun (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
371*4882a593Smuzhiyun cp->irq_arr[0].status_blk_num = sb_id;
372*4882a593Smuzhiyun cp->num_irq = 1;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
bnx2_register_cnic(struct net_device * dev,struct cnic_ops * ops,void * data)375*4882a593Smuzhiyun static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
376*4882a593Smuzhiyun void *data)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
379*4882a593Smuzhiyun struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun if (!ops)
382*4882a593Smuzhiyun return -EINVAL;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun if (cp->drv_state & CNIC_DRV_STATE_REGD)
385*4882a593Smuzhiyun return -EBUSY;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
388*4882a593Smuzhiyun return -ENODEV;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun bp->cnic_data = data;
391*4882a593Smuzhiyun rcu_assign_pointer(bp->cnic_ops, ops);
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun cp->num_irq = 0;
394*4882a593Smuzhiyun cp->drv_state = CNIC_DRV_STATE_REGD;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun bnx2_setup_cnic_irq_info(bp);
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun return 0;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
bnx2_unregister_cnic(struct net_device * dev)401*4882a593Smuzhiyun static int bnx2_unregister_cnic(struct net_device *dev)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
404*4882a593Smuzhiyun struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
405*4882a593Smuzhiyun struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun mutex_lock(&bp->cnic_lock);
408*4882a593Smuzhiyun cp->drv_state = 0;
409*4882a593Smuzhiyun bnapi->cnic_present = 0;
410*4882a593Smuzhiyun RCU_INIT_POINTER(bp->cnic_ops, NULL);
411*4882a593Smuzhiyun mutex_unlock(&bp->cnic_lock);
412*4882a593Smuzhiyun synchronize_rcu();
413*4882a593Smuzhiyun return 0;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
bnx2_cnic_probe(struct net_device * dev)416*4882a593Smuzhiyun static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
419*4882a593Smuzhiyun struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun if (!cp->max_iscsi_conn)
422*4882a593Smuzhiyun return NULL;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun cp->drv_owner = THIS_MODULE;
425*4882a593Smuzhiyun cp->chip_id = bp->chip_id;
426*4882a593Smuzhiyun cp->pdev = bp->pdev;
427*4882a593Smuzhiyun cp->io_base = bp->regview;
428*4882a593Smuzhiyun cp->drv_ctl = bnx2_drv_ctl;
429*4882a593Smuzhiyun cp->drv_register_cnic = bnx2_register_cnic;
430*4882a593Smuzhiyun cp->drv_unregister_cnic = bnx2_unregister_cnic;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun return cp;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun static void
bnx2_cnic_stop(struct bnx2 * bp)436*4882a593Smuzhiyun bnx2_cnic_stop(struct bnx2 *bp)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun struct cnic_ops *c_ops;
439*4882a593Smuzhiyun struct cnic_ctl_info info;
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun mutex_lock(&bp->cnic_lock);
442*4882a593Smuzhiyun c_ops = rcu_dereference_protected(bp->cnic_ops,
443*4882a593Smuzhiyun lockdep_is_held(&bp->cnic_lock));
444*4882a593Smuzhiyun if (c_ops) {
445*4882a593Smuzhiyun info.cmd = CNIC_CTL_STOP_CMD;
446*4882a593Smuzhiyun c_ops->cnic_ctl(bp->cnic_data, &info);
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun mutex_unlock(&bp->cnic_lock);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun static void
bnx2_cnic_start(struct bnx2 * bp)452*4882a593Smuzhiyun bnx2_cnic_start(struct bnx2 *bp)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun struct cnic_ops *c_ops;
455*4882a593Smuzhiyun struct cnic_ctl_info info;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun mutex_lock(&bp->cnic_lock);
458*4882a593Smuzhiyun c_ops = rcu_dereference_protected(bp->cnic_ops,
459*4882a593Smuzhiyun lockdep_is_held(&bp->cnic_lock));
460*4882a593Smuzhiyun if (c_ops) {
461*4882a593Smuzhiyun if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
462*4882a593Smuzhiyun struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun bnapi->cnic_tag = bnapi->last_status_idx;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun info.cmd = CNIC_CTL_START_CMD;
467*4882a593Smuzhiyun c_ops->cnic_ctl(bp->cnic_data, &info);
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun mutex_unlock(&bp->cnic_lock);
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun #else
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun static void
bnx2_cnic_stop(struct bnx2 * bp)475*4882a593Smuzhiyun bnx2_cnic_stop(struct bnx2 *bp)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun static void
bnx2_cnic_start(struct bnx2 * bp)480*4882a593Smuzhiyun bnx2_cnic_start(struct bnx2 *bp)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun #endif
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun static int
bnx2_read_phy(struct bnx2 * bp,u32 reg,u32 * val)487*4882a593Smuzhiyun bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun u32 val1;
490*4882a593Smuzhiyun int i, ret;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
493*4882a593Smuzhiyun val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
494*4882a593Smuzhiyun val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
497*4882a593Smuzhiyun BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun udelay(40);
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun val1 = (bp->phy_addr << 21) | (reg << 16) |
503*4882a593Smuzhiyun BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
504*4882a593Smuzhiyun BNX2_EMAC_MDIO_COMM_START_BUSY;
505*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun for (i = 0; i < 50; i++) {
508*4882a593Smuzhiyun udelay(10);
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
511*4882a593Smuzhiyun if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
512*4882a593Smuzhiyun udelay(5);
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
515*4882a593Smuzhiyun val1 &= BNX2_EMAC_MDIO_COMM_DATA;
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun break;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
522*4882a593Smuzhiyun *val = 0x0;
523*4882a593Smuzhiyun ret = -EBUSY;
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun else {
526*4882a593Smuzhiyun *val = val1;
527*4882a593Smuzhiyun ret = 0;
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
531*4882a593Smuzhiyun val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
532*4882a593Smuzhiyun val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
535*4882a593Smuzhiyun BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun udelay(40);
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun return ret;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun static int
bnx2_write_phy(struct bnx2 * bp,u32 reg,u32 val)544*4882a593Smuzhiyun bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun u32 val1;
547*4882a593Smuzhiyun int i, ret;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
550*4882a593Smuzhiyun val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
551*4882a593Smuzhiyun val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
554*4882a593Smuzhiyun BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun udelay(40);
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun val1 = (bp->phy_addr << 21) | (reg << 16) | val |
560*4882a593Smuzhiyun BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
561*4882a593Smuzhiyun BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
562*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun for (i = 0; i < 50; i++) {
565*4882a593Smuzhiyun udelay(10);
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
568*4882a593Smuzhiyun if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
569*4882a593Smuzhiyun udelay(5);
570*4882a593Smuzhiyun break;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
575*4882a593Smuzhiyun ret = -EBUSY;
576*4882a593Smuzhiyun else
577*4882a593Smuzhiyun ret = 0;
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
580*4882a593Smuzhiyun val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
581*4882a593Smuzhiyun val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
584*4882a593Smuzhiyun BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun udelay(40);
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun return ret;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun static void
bnx2_disable_int(struct bnx2 * bp)593*4882a593Smuzhiyun bnx2_disable_int(struct bnx2 *bp)
594*4882a593Smuzhiyun {
595*4882a593Smuzhiyun int i;
596*4882a593Smuzhiyun struct bnx2_napi *bnapi;
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun for (i = 0; i < bp->irq_nvecs; i++) {
599*4882a593Smuzhiyun bnapi = &bp->bnx2_napi[i];
600*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
601*4882a593Smuzhiyun BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun static void
bnx2_enable_int(struct bnx2 * bp)607*4882a593Smuzhiyun bnx2_enable_int(struct bnx2 *bp)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun int i;
610*4882a593Smuzhiyun struct bnx2_napi *bnapi;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun for (i = 0; i < bp->irq_nvecs; i++) {
613*4882a593Smuzhiyun bnapi = &bp->bnx2_napi[i];
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
616*4882a593Smuzhiyun BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
617*4882a593Smuzhiyun BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
618*4882a593Smuzhiyun bnapi->last_status_idx);
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
621*4882a593Smuzhiyun BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
622*4882a593Smuzhiyun bnapi->last_status_idx);
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun static void
bnx2_disable_int_sync(struct bnx2 * bp)628*4882a593Smuzhiyun bnx2_disable_int_sync(struct bnx2 *bp)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun int i;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun atomic_inc(&bp->intr_sem);
633*4882a593Smuzhiyun if (!netif_running(bp->dev))
634*4882a593Smuzhiyun return;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun bnx2_disable_int(bp);
637*4882a593Smuzhiyun for (i = 0; i < bp->irq_nvecs; i++)
638*4882a593Smuzhiyun synchronize_irq(bp->irq_tbl[i].vector);
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun static void
bnx2_napi_disable(struct bnx2 * bp)642*4882a593Smuzhiyun bnx2_napi_disable(struct bnx2 *bp)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun int i;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun for (i = 0; i < bp->irq_nvecs; i++)
647*4882a593Smuzhiyun napi_disable(&bp->bnx2_napi[i].napi);
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun static void
bnx2_napi_enable(struct bnx2 * bp)651*4882a593Smuzhiyun bnx2_napi_enable(struct bnx2 *bp)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun int i;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun for (i = 0; i < bp->irq_nvecs; i++)
656*4882a593Smuzhiyun napi_enable(&bp->bnx2_napi[i].napi);
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun static void
bnx2_netif_stop(struct bnx2 * bp,bool stop_cnic)660*4882a593Smuzhiyun bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun if (stop_cnic)
663*4882a593Smuzhiyun bnx2_cnic_stop(bp);
664*4882a593Smuzhiyun if (netif_running(bp->dev)) {
665*4882a593Smuzhiyun bnx2_napi_disable(bp);
666*4882a593Smuzhiyun netif_tx_disable(bp->dev);
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun bnx2_disable_int_sync(bp);
669*4882a593Smuzhiyun netif_carrier_off(bp->dev); /* prevent tx timeout */
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun static void
bnx2_netif_start(struct bnx2 * bp,bool start_cnic)673*4882a593Smuzhiyun bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun if (atomic_dec_and_test(&bp->intr_sem)) {
676*4882a593Smuzhiyun if (netif_running(bp->dev)) {
677*4882a593Smuzhiyun netif_tx_wake_all_queues(bp->dev);
678*4882a593Smuzhiyun spin_lock_bh(&bp->phy_lock);
679*4882a593Smuzhiyun if (bp->link_up)
680*4882a593Smuzhiyun netif_carrier_on(bp->dev);
681*4882a593Smuzhiyun spin_unlock_bh(&bp->phy_lock);
682*4882a593Smuzhiyun bnx2_napi_enable(bp);
683*4882a593Smuzhiyun bnx2_enable_int(bp);
684*4882a593Smuzhiyun if (start_cnic)
685*4882a593Smuzhiyun bnx2_cnic_start(bp);
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun static void
bnx2_free_tx_mem(struct bnx2 * bp)691*4882a593Smuzhiyun bnx2_free_tx_mem(struct bnx2 *bp)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun int i;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun for (i = 0; i < bp->num_tx_rings; i++) {
696*4882a593Smuzhiyun struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
697*4882a593Smuzhiyun struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun if (txr->tx_desc_ring) {
700*4882a593Smuzhiyun dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
701*4882a593Smuzhiyun txr->tx_desc_ring,
702*4882a593Smuzhiyun txr->tx_desc_mapping);
703*4882a593Smuzhiyun txr->tx_desc_ring = NULL;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun kfree(txr->tx_buf_ring);
706*4882a593Smuzhiyun txr->tx_buf_ring = NULL;
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun static void
bnx2_free_rx_mem(struct bnx2 * bp)711*4882a593Smuzhiyun bnx2_free_rx_mem(struct bnx2 *bp)
712*4882a593Smuzhiyun {
713*4882a593Smuzhiyun int i;
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun for (i = 0; i < bp->num_rx_rings; i++) {
716*4882a593Smuzhiyun struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
717*4882a593Smuzhiyun struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
718*4882a593Smuzhiyun int j;
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun for (j = 0; j < bp->rx_max_ring; j++) {
721*4882a593Smuzhiyun if (rxr->rx_desc_ring[j])
722*4882a593Smuzhiyun dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
723*4882a593Smuzhiyun rxr->rx_desc_ring[j],
724*4882a593Smuzhiyun rxr->rx_desc_mapping[j]);
725*4882a593Smuzhiyun rxr->rx_desc_ring[j] = NULL;
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun vfree(rxr->rx_buf_ring);
728*4882a593Smuzhiyun rxr->rx_buf_ring = NULL;
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun for (j = 0; j < bp->rx_max_pg_ring; j++) {
731*4882a593Smuzhiyun if (rxr->rx_pg_desc_ring[j])
732*4882a593Smuzhiyun dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
733*4882a593Smuzhiyun rxr->rx_pg_desc_ring[j],
734*4882a593Smuzhiyun rxr->rx_pg_desc_mapping[j]);
735*4882a593Smuzhiyun rxr->rx_pg_desc_ring[j] = NULL;
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun vfree(rxr->rx_pg_ring);
738*4882a593Smuzhiyun rxr->rx_pg_ring = NULL;
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun static int
bnx2_alloc_tx_mem(struct bnx2 * bp)743*4882a593Smuzhiyun bnx2_alloc_tx_mem(struct bnx2 *bp)
744*4882a593Smuzhiyun {
745*4882a593Smuzhiyun int i;
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun for (i = 0; i < bp->num_tx_rings; i++) {
748*4882a593Smuzhiyun struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
749*4882a593Smuzhiyun struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
752*4882a593Smuzhiyun if (!txr->tx_buf_ring)
753*4882a593Smuzhiyun return -ENOMEM;
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun txr->tx_desc_ring =
756*4882a593Smuzhiyun dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
757*4882a593Smuzhiyun &txr->tx_desc_mapping, GFP_KERNEL);
758*4882a593Smuzhiyun if (!txr->tx_desc_ring)
759*4882a593Smuzhiyun return -ENOMEM;
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun return 0;
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun static int
bnx2_alloc_rx_mem(struct bnx2 * bp)765*4882a593Smuzhiyun bnx2_alloc_rx_mem(struct bnx2 *bp)
766*4882a593Smuzhiyun {
767*4882a593Smuzhiyun int i;
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun for (i = 0; i < bp->num_rx_rings; i++) {
770*4882a593Smuzhiyun struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
771*4882a593Smuzhiyun struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
772*4882a593Smuzhiyun int j;
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun rxr->rx_buf_ring =
775*4882a593Smuzhiyun vzalloc(array_size(SW_RXBD_RING_SIZE, bp->rx_max_ring));
776*4882a593Smuzhiyun if (!rxr->rx_buf_ring)
777*4882a593Smuzhiyun return -ENOMEM;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun for (j = 0; j < bp->rx_max_ring; j++) {
780*4882a593Smuzhiyun rxr->rx_desc_ring[j] =
781*4882a593Smuzhiyun dma_alloc_coherent(&bp->pdev->dev,
782*4882a593Smuzhiyun RXBD_RING_SIZE,
783*4882a593Smuzhiyun &rxr->rx_desc_mapping[j],
784*4882a593Smuzhiyun GFP_KERNEL);
785*4882a593Smuzhiyun if (!rxr->rx_desc_ring[j])
786*4882a593Smuzhiyun return -ENOMEM;
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun if (bp->rx_pg_ring_size) {
791*4882a593Smuzhiyun rxr->rx_pg_ring =
792*4882a593Smuzhiyun vzalloc(array_size(SW_RXPG_RING_SIZE,
793*4882a593Smuzhiyun bp->rx_max_pg_ring));
794*4882a593Smuzhiyun if (!rxr->rx_pg_ring)
795*4882a593Smuzhiyun return -ENOMEM;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun for (j = 0; j < bp->rx_max_pg_ring; j++) {
800*4882a593Smuzhiyun rxr->rx_pg_desc_ring[j] =
801*4882a593Smuzhiyun dma_alloc_coherent(&bp->pdev->dev,
802*4882a593Smuzhiyun RXBD_RING_SIZE,
803*4882a593Smuzhiyun &rxr->rx_pg_desc_mapping[j],
804*4882a593Smuzhiyun GFP_KERNEL);
805*4882a593Smuzhiyun if (!rxr->rx_pg_desc_ring[j])
806*4882a593Smuzhiyun return -ENOMEM;
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun return 0;
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun static void
bnx2_free_stats_blk(struct net_device * dev)814*4882a593Smuzhiyun bnx2_free_stats_blk(struct net_device *dev)
815*4882a593Smuzhiyun {
816*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun if (bp->status_blk) {
819*4882a593Smuzhiyun dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
820*4882a593Smuzhiyun bp->status_blk,
821*4882a593Smuzhiyun bp->status_blk_mapping);
822*4882a593Smuzhiyun bp->status_blk = NULL;
823*4882a593Smuzhiyun bp->stats_blk = NULL;
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun static int
bnx2_alloc_stats_blk(struct net_device * dev)828*4882a593Smuzhiyun bnx2_alloc_stats_blk(struct net_device *dev)
829*4882a593Smuzhiyun {
830*4882a593Smuzhiyun int status_blk_size;
831*4882a593Smuzhiyun void *status_blk;
832*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun /* Combine status and statistics blocks into one allocation. */
835*4882a593Smuzhiyun status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
836*4882a593Smuzhiyun if (bp->flags & BNX2_FLAG_MSIX_CAP)
837*4882a593Smuzhiyun status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
838*4882a593Smuzhiyun BNX2_SBLK_MSIX_ALIGN_SIZE);
839*4882a593Smuzhiyun bp->status_stats_size = status_blk_size +
840*4882a593Smuzhiyun sizeof(struct statistics_block);
841*4882a593Smuzhiyun status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
842*4882a593Smuzhiyun &bp->status_blk_mapping, GFP_KERNEL);
843*4882a593Smuzhiyun if (!status_blk)
844*4882a593Smuzhiyun return -ENOMEM;
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun bp->status_blk = status_blk;
847*4882a593Smuzhiyun bp->stats_blk = status_blk + status_blk_size;
848*4882a593Smuzhiyun bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun return 0;
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun static void
bnx2_free_mem(struct bnx2 * bp)854*4882a593Smuzhiyun bnx2_free_mem(struct bnx2 *bp)
855*4882a593Smuzhiyun {
856*4882a593Smuzhiyun int i;
857*4882a593Smuzhiyun struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun bnx2_free_tx_mem(bp);
860*4882a593Smuzhiyun bnx2_free_rx_mem(bp);
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun for (i = 0; i < bp->ctx_pages; i++) {
863*4882a593Smuzhiyun if (bp->ctx_blk[i]) {
864*4882a593Smuzhiyun dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
865*4882a593Smuzhiyun bp->ctx_blk[i],
866*4882a593Smuzhiyun bp->ctx_blk_mapping[i]);
867*4882a593Smuzhiyun bp->ctx_blk[i] = NULL;
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun if (bnapi->status_blk.msi)
872*4882a593Smuzhiyun bnapi->status_blk.msi = NULL;
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun static int
bnx2_alloc_mem(struct bnx2 * bp)876*4882a593Smuzhiyun bnx2_alloc_mem(struct bnx2 *bp)
877*4882a593Smuzhiyun {
878*4882a593Smuzhiyun int i, err;
879*4882a593Smuzhiyun struct bnx2_napi *bnapi;
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun bnapi = &bp->bnx2_napi[0];
882*4882a593Smuzhiyun bnapi->status_blk.msi = bp->status_blk;
883*4882a593Smuzhiyun bnapi->hw_tx_cons_ptr =
884*4882a593Smuzhiyun &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
885*4882a593Smuzhiyun bnapi->hw_rx_cons_ptr =
886*4882a593Smuzhiyun &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
887*4882a593Smuzhiyun if (bp->flags & BNX2_FLAG_MSIX_CAP) {
888*4882a593Smuzhiyun for (i = 1; i < bp->irq_nvecs; i++) {
889*4882a593Smuzhiyun struct status_block_msix *sblk;
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun bnapi = &bp->bnx2_napi[i];
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
894*4882a593Smuzhiyun bnapi->status_blk.msix = sblk;
895*4882a593Smuzhiyun bnapi->hw_tx_cons_ptr =
896*4882a593Smuzhiyun &sblk->status_tx_quick_consumer_index;
897*4882a593Smuzhiyun bnapi->hw_rx_cons_ptr =
898*4882a593Smuzhiyun &sblk->status_rx_quick_consumer_index;
899*4882a593Smuzhiyun bnapi->int_num = i << 24;
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
904*4882a593Smuzhiyun bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
905*4882a593Smuzhiyun if (bp->ctx_pages == 0)
906*4882a593Smuzhiyun bp->ctx_pages = 1;
907*4882a593Smuzhiyun for (i = 0; i < bp->ctx_pages; i++) {
908*4882a593Smuzhiyun bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
909*4882a593Smuzhiyun BNX2_PAGE_SIZE,
910*4882a593Smuzhiyun &bp->ctx_blk_mapping[i],
911*4882a593Smuzhiyun GFP_KERNEL);
912*4882a593Smuzhiyun if (!bp->ctx_blk[i])
913*4882a593Smuzhiyun goto alloc_mem_err;
914*4882a593Smuzhiyun }
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun err = bnx2_alloc_rx_mem(bp);
918*4882a593Smuzhiyun if (err)
919*4882a593Smuzhiyun goto alloc_mem_err;
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun err = bnx2_alloc_tx_mem(bp);
922*4882a593Smuzhiyun if (err)
923*4882a593Smuzhiyun goto alloc_mem_err;
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun return 0;
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun alloc_mem_err:
928*4882a593Smuzhiyun bnx2_free_mem(bp);
929*4882a593Smuzhiyun return -ENOMEM;
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun static void
bnx2_report_fw_link(struct bnx2 * bp)933*4882a593Smuzhiyun bnx2_report_fw_link(struct bnx2 *bp)
934*4882a593Smuzhiyun {
935*4882a593Smuzhiyun u32 fw_link_status = 0;
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
938*4882a593Smuzhiyun return;
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun if (bp->link_up) {
941*4882a593Smuzhiyun u32 bmsr;
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun switch (bp->line_speed) {
944*4882a593Smuzhiyun case SPEED_10:
945*4882a593Smuzhiyun if (bp->duplex == DUPLEX_HALF)
946*4882a593Smuzhiyun fw_link_status = BNX2_LINK_STATUS_10HALF;
947*4882a593Smuzhiyun else
948*4882a593Smuzhiyun fw_link_status = BNX2_LINK_STATUS_10FULL;
949*4882a593Smuzhiyun break;
950*4882a593Smuzhiyun case SPEED_100:
951*4882a593Smuzhiyun if (bp->duplex == DUPLEX_HALF)
952*4882a593Smuzhiyun fw_link_status = BNX2_LINK_STATUS_100HALF;
953*4882a593Smuzhiyun else
954*4882a593Smuzhiyun fw_link_status = BNX2_LINK_STATUS_100FULL;
955*4882a593Smuzhiyun break;
956*4882a593Smuzhiyun case SPEED_1000:
957*4882a593Smuzhiyun if (bp->duplex == DUPLEX_HALF)
958*4882a593Smuzhiyun fw_link_status = BNX2_LINK_STATUS_1000HALF;
959*4882a593Smuzhiyun else
960*4882a593Smuzhiyun fw_link_status = BNX2_LINK_STATUS_1000FULL;
961*4882a593Smuzhiyun break;
962*4882a593Smuzhiyun case SPEED_2500:
963*4882a593Smuzhiyun if (bp->duplex == DUPLEX_HALF)
964*4882a593Smuzhiyun fw_link_status = BNX2_LINK_STATUS_2500HALF;
965*4882a593Smuzhiyun else
966*4882a593Smuzhiyun fw_link_status = BNX2_LINK_STATUS_2500FULL;
967*4882a593Smuzhiyun break;
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun if (bp->autoneg) {
973*4882a593Smuzhiyun fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
976*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun if (!(bmsr & BMSR_ANEGCOMPLETE) ||
979*4882a593Smuzhiyun bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
980*4882a593Smuzhiyun fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
981*4882a593Smuzhiyun else
982*4882a593Smuzhiyun fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
983*4882a593Smuzhiyun }
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun else
986*4882a593Smuzhiyun fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun static char *
bnx2_xceiver_str(struct bnx2 * bp)992*4882a593Smuzhiyun bnx2_xceiver_str(struct bnx2 *bp)
993*4882a593Smuzhiyun {
994*4882a593Smuzhiyun return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
995*4882a593Smuzhiyun ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
996*4882a593Smuzhiyun "Copper");
997*4882a593Smuzhiyun }
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun static void
bnx2_report_link(struct bnx2 * bp)1000*4882a593Smuzhiyun bnx2_report_link(struct bnx2 *bp)
1001*4882a593Smuzhiyun {
1002*4882a593Smuzhiyun if (bp->link_up) {
1003*4882a593Smuzhiyun netif_carrier_on(bp->dev);
1004*4882a593Smuzhiyun netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
1005*4882a593Smuzhiyun bnx2_xceiver_str(bp),
1006*4882a593Smuzhiyun bp->line_speed,
1007*4882a593Smuzhiyun bp->duplex == DUPLEX_FULL ? "full" : "half");
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun if (bp->flow_ctrl) {
1010*4882a593Smuzhiyun if (bp->flow_ctrl & FLOW_CTRL_RX) {
1011*4882a593Smuzhiyun pr_cont(", receive ");
1012*4882a593Smuzhiyun if (bp->flow_ctrl & FLOW_CTRL_TX)
1013*4882a593Smuzhiyun pr_cont("& transmit ");
1014*4882a593Smuzhiyun }
1015*4882a593Smuzhiyun else {
1016*4882a593Smuzhiyun pr_cont(", transmit ");
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun pr_cont("flow control ON");
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun pr_cont("\n");
1021*4882a593Smuzhiyun } else {
1022*4882a593Smuzhiyun netif_carrier_off(bp->dev);
1023*4882a593Smuzhiyun netdev_err(bp->dev, "NIC %s Link is Down\n",
1024*4882a593Smuzhiyun bnx2_xceiver_str(bp));
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun bnx2_report_fw_link(bp);
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun static void
bnx2_resolve_flow_ctrl(struct bnx2 * bp)1031*4882a593Smuzhiyun bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1032*4882a593Smuzhiyun {
1033*4882a593Smuzhiyun u32 local_adv, remote_adv;
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun bp->flow_ctrl = 0;
1036*4882a593Smuzhiyun if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1037*4882a593Smuzhiyun (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun if (bp->duplex == DUPLEX_FULL) {
1040*4882a593Smuzhiyun bp->flow_ctrl = bp->req_flow_ctrl;
1041*4882a593Smuzhiyun }
1042*4882a593Smuzhiyun return;
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun if (bp->duplex != DUPLEX_FULL) {
1046*4882a593Smuzhiyun return;
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1050*4882a593Smuzhiyun (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1051*4882a593Smuzhiyun u32 val;
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1054*4882a593Smuzhiyun if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1055*4882a593Smuzhiyun bp->flow_ctrl |= FLOW_CTRL_TX;
1056*4882a593Smuzhiyun if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1057*4882a593Smuzhiyun bp->flow_ctrl |= FLOW_CTRL_RX;
1058*4882a593Smuzhiyun return;
1059*4882a593Smuzhiyun }
1060*4882a593Smuzhiyun
1061*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1062*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1065*4882a593Smuzhiyun u32 new_local_adv = 0;
1066*4882a593Smuzhiyun u32 new_remote_adv = 0;
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun if (local_adv & ADVERTISE_1000XPAUSE)
1069*4882a593Smuzhiyun new_local_adv |= ADVERTISE_PAUSE_CAP;
1070*4882a593Smuzhiyun if (local_adv & ADVERTISE_1000XPSE_ASYM)
1071*4882a593Smuzhiyun new_local_adv |= ADVERTISE_PAUSE_ASYM;
1072*4882a593Smuzhiyun if (remote_adv & ADVERTISE_1000XPAUSE)
1073*4882a593Smuzhiyun new_remote_adv |= ADVERTISE_PAUSE_CAP;
1074*4882a593Smuzhiyun if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1075*4882a593Smuzhiyun new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun local_adv = new_local_adv;
1078*4882a593Smuzhiyun remote_adv = new_remote_adv;
1079*4882a593Smuzhiyun }
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun /* See Table 28B-3 of 802.3ab-1999 spec. */
1082*4882a593Smuzhiyun if (local_adv & ADVERTISE_PAUSE_CAP) {
1083*4882a593Smuzhiyun if(local_adv & ADVERTISE_PAUSE_ASYM) {
1084*4882a593Smuzhiyun if (remote_adv & ADVERTISE_PAUSE_CAP) {
1085*4882a593Smuzhiyun bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1086*4882a593Smuzhiyun }
1087*4882a593Smuzhiyun else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1088*4882a593Smuzhiyun bp->flow_ctrl = FLOW_CTRL_RX;
1089*4882a593Smuzhiyun }
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun else {
1092*4882a593Smuzhiyun if (remote_adv & ADVERTISE_PAUSE_CAP) {
1093*4882a593Smuzhiyun bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1094*4882a593Smuzhiyun }
1095*4882a593Smuzhiyun }
1096*4882a593Smuzhiyun }
1097*4882a593Smuzhiyun else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1098*4882a593Smuzhiyun if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1099*4882a593Smuzhiyun (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun bp->flow_ctrl = FLOW_CTRL_TX;
1102*4882a593Smuzhiyun }
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun static int
bnx2_5709s_linkup(struct bnx2 * bp)1107*4882a593Smuzhiyun bnx2_5709s_linkup(struct bnx2 *bp)
1108*4882a593Smuzhiyun {
1109*4882a593Smuzhiyun u32 val, speed;
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun bp->link_up = 1;
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1114*4882a593Smuzhiyun bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1115*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1118*4882a593Smuzhiyun bp->line_speed = bp->req_line_speed;
1119*4882a593Smuzhiyun bp->duplex = bp->req_duplex;
1120*4882a593Smuzhiyun return 0;
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1123*4882a593Smuzhiyun switch (speed) {
1124*4882a593Smuzhiyun case MII_BNX2_GP_TOP_AN_SPEED_10:
1125*4882a593Smuzhiyun bp->line_speed = SPEED_10;
1126*4882a593Smuzhiyun break;
1127*4882a593Smuzhiyun case MII_BNX2_GP_TOP_AN_SPEED_100:
1128*4882a593Smuzhiyun bp->line_speed = SPEED_100;
1129*4882a593Smuzhiyun break;
1130*4882a593Smuzhiyun case MII_BNX2_GP_TOP_AN_SPEED_1G:
1131*4882a593Smuzhiyun case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1132*4882a593Smuzhiyun bp->line_speed = SPEED_1000;
1133*4882a593Smuzhiyun break;
1134*4882a593Smuzhiyun case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1135*4882a593Smuzhiyun bp->line_speed = SPEED_2500;
1136*4882a593Smuzhiyun break;
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun if (val & MII_BNX2_GP_TOP_AN_FD)
1139*4882a593Smuzhiyun bp->duplex = DUPLEX_FULL;
1140*4882a593Smuzhiyun else
1141*4882a593Smuzhiyun bp->duplex = DUPLEX_HALF;
1142*4882a593Smuzhiyun return 0;
1143*4882a593Smuzhiyun }
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun static int
bnx2_5708s_linkup(struct bnx2 * bp)1146*4882a593Smuzhiyun bnx2_5708s_linkup(struct bnx2 *bp)
1147*4882a593Smuzhiyun {
1148*4882a593Smuzhiyun u32 val;
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun bp->link_up = 1;
1151*4882a593Smuzhiyun bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1152*4882a593Smuzhiyun switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1153*4882a593Smuzhiyun case BCM5708S_1000X_STAT1_SPEED_10:
1154*4882a593Smuzhiyun bp->line_speed = SPEED_10;
1155*4882a593Smuzhiyun break;
1156*4882a593Smuzhiyun case BCM5708S_1000X_STAT1_SPEED_100:
1157*4882a593Smuzhiyun bp->line_speed = SPEED_100;
1158*4882a593Smuzhiyun break;
1159*4882a593Smuzhiyun case BCM5708S_1000X_STAT1_SPEED_1G:
1160*4882a593Smuzhiyun bp->line_speed = SPEED_1000;
1161*4882a593Smuzhiyun break;
1162*4882a593Smuzhiyun case BCM5708S_1000X_STAT1_SPEED_2G5:
1163*4882a593Smuzhiyun bp->line_speed = SPEED_2500;
1164*4882a593Smuzhiyun break;
1165*4882a593Smuzhiyun }
1166*4882a593Smuzhiyun if (val & BCM5708S_1000X_STAT1_FD)
1167*4882a593Smuzhiyun bp->duplex = DUPLEX_FULL;
1168*4882a593Smuzhiyun else
1169*4882a593Smuzhiyun bp->duplex = DUPLEX_HALF;
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun return 0;
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun static int
bnx2_5706s_linkup(struct bnx2 * bp)1175*4882a593Smuzhiyun bnx2_5706s_linkup(struct bnx2 *bp)
1176*4882a593Smuzhiyun {
1177*4882a593Smuzhiyun u32 bmcr, local_adv, remote_adv, common;
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun bp->link_up = 1;
1180*4882a593Smuzhiyun bp->line_speed = SPEED_1000;
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1183*4882a593Smuzhiyun if (bmcr & BMCR_FULLDPLX) {
1184*4882a593Smuzhiyun bp->duplex = DUPLEX_FULL;
1185*4882a593Smuzhiyun }
1186*4882a593Smuzhiyun else {
1187*4882a593Smuzhiyun bp->duplex = DUPLEX_HALF;
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun if (!(bmcr & BMCR_ANENABLE)) {
1191*4882a593Smuzhiyun return 0;
1192*4882a593Smuzhiyun }
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1195*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun common = local_adv & remote_adv;
1198*4882a593Smuzhiyun if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun if (common & ADVERTISE_1000XFULL) {
1201*4882a593Smuzhiyun bp->duplex = DUPLEX_FULL;
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun else {
1204*4882a593Smuzhiyun bp->duplex = DUPLEX_HALF;
1205*4882a593Smuzhiyun }
1206*4882a593Smuzhiyun }
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun return 0;
1209*4882a593Smuzhiyun }
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun static int
bnx2_copper_linkup(struct bnx2 * bp)1212*4882a593Smuzhiyun bnx2_copper_linkup(struct bnx2 *bp)
1213*4882a593Smuzhiyun {
1214*4882a593Smuzhiyun u32 bmcr;
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1219*4882a593Smuzhiyun if (bmcr & BMCR_ANENABLE) {
1220*4882a593Smuzhiyun u32 local_adv, remote_adv, common;
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1223*4882a593Smuzhiyun bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1224*4882a593Smuzhiyun
1225*4882a593Smuzhiyun common = local_adv & (remote_adv >> 2);
1226*4882a593Smuzhiyun if (common & ADVERTISE_1000FULL) {
1227*4882a593Smuzhiyun bp->line_speed = SPEED_1000;
1228*4882a593Smuzhiyun bp->duplex = DUPLEX_FULL;
1229*4882a593Smuzhiyun }
1230*4882a593Smuzhiyun else if (common & ADVERTISE_1000HALF) {
1231*4882a593Smuzhiyun bp->line_speed = SPEED_1000;
1232*4882a593Smuzhiyun bp->duplex = DUPLEX_HALF;
1233*4882a593Smuzhiyun }
1234*4882a593Smuzhiyun else {
1235*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1236*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun common = local_adv & remote_adv;
1239*4882a593Smuzhiyun if (common & ADVERTISE_100FULL) {
1240*4882a593Smuzhiyun bp->line_speed = SPEED_100;
1241*4882a593Smuzhiyun bp->duplex = DUPLEX_FULL;
1242*4882a593Smuzhiyun }
1243*4882a593Smuzhiyun else if (common & ADVERTISE_100HALF) {
1244*4882a593Smuzhiyun bp->line_speed = SPEED_100;
1245*4882a593Smuzhiyun bp->duplex = DUPLEX_HALF;
1246*4882a593Smuzhiyun }
1247*4882a593Smuzhiyun else if (common & ADVERTISE_10FULL) {
1248*4882a593Smuzhiyun bp->line_speed = SPEED_10;
1249*4882a593Smuzhiyun bp->duplex = DUPLEX_FULL;
1250*4882a593Smuzhiyun }
1251*4882a593Smuzhiyun else if (common & ADVERTISE_10HALF) {
1252*4882a593Smuzhiyun bp->line_speed = SPEED_10;
1253*4882a593Smuzhiyun bp->duplex = DUPLEX_HALF;
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun else {
1256*4882a593Smuzhiyun bp->line_speed = 0;
1257*4882a593Smuzhiyun bp->link_up = 0;
1258*4882a593Smuzhiyun }
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun }
1261*4882a593Smuzhiyun else {
1262*4882a593Smuzhiyun if (bmcr & BMCR_SPEED100) {
1263*4882a593Smuzhiyun bp->line_speed = SPEED_100;
1264*4882a593Smuzhiyun }
1265*4882a593Smuzhiyun else {
1266*4882a593Smuzhiyun bp->line_speed = SPEED_10;
1267*4882a593Smuzhiyun }
1268*4882a593Smuzhiyun if (bmcr & BMCR_FULLDPLX) {
1269*4882a593Smuzhiyun bp->duplex = DUPLEX_FULL;
1270*4882a593Smuzhiyun }
1271*4882a593Smuzhiyun else {
1272*4882a593Smuzhiyun bp->duplex = DUPLEX_HALF;
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun }
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun if (bp->link_up) {
1277*4882a593Smuzhiyun u32 ext_status;
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1280*4882a593Smuzhiyun if (ext_status & EXT_STATUS_MDIX)
1281*4882a593Smuzhiyun bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1282*4882a593Smuzhiyun }
1283*4882a593Smuzhiyun
1284*4882a593Smuzhiyun return 0;
1285*4882a593Smuzhiyun }
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun static void
bnx2_init_rx_context(struct bnx2 * bp,u32 cid)1288*4882a593Smuzhiyun bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1289*4882a593Smuzhiyun {
1290*4882a593Smuzhiyun u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1293*4882a593Smuzhiyun val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1294*4882a593Smuzhiyun val |= 0x02 << 8;
1295*4882a593Smuzhiyun
1296*4882a593Smuzhiyun if (bp->flow_ctrl & FLOW_CTRL_TX)
1297*4882a593Smuzhiyun val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1300*4882a593Smuzhiyun }
1301*4882a593Smuzhiyun
1302*4882a593Smuzhiyun static void
bnx2_init_all_rx_contexts(struct bnx2 * bp)1303*4882a593Smuzhiyun bnx2_init_all_rx_contexts(struct bnx2 *bp)
1304*4882a593Smuzhiyun {
1305*4882a593Smuzhiyun int i;
1306*4882a593Smuzhiyun u32 cid;
1307*4882a593Smuzhiyun
1308*4882a593Smuzhiyun for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1309*4882a593Smuzhiyun if (i == 1)
1310*4882a593Smuzhiyun cid = RX_RSS_CID;
1311*4882a593Smuzhiyun bnx2_init_rx_context(bp, cid);
1312*4882a593Smuzhiyun }
1313*4882a593Smuzhiyun }
1314*4882a593Smuzhiyun
1315*4882a593Smuzhiyun static void
bnx2_set_mac_link(struct bnx2 * bp)1316*4882a593Smuzhiyun bnx2_set_mac_link(struct bnx2 *bp)
1317*4882a593Smuzhiyun {
1318*4882a593Smuzhiyun u32 val;
1319*4882a593Smuzhiyun
1320*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1321*4882a593Smuzhiyun if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1322*4882a593Smuzhiyun (bp->duplex == DUPLEX_HALF)) {
1323*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1324*4882a593Smuzhiyun }
1325*4882a593Smuzhiyun
1326*4882a593Smuzhiyun /* Configure the EMAC mode register. */
1327*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_EMAC_MODE);
1328*4882a593Smuzhiyun
1329*4882a593Smuzhiyun val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1330*4882a593Smuzhiyun BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1331*4882a593Smuzhiyun BNX2_EMAC_MODE_25G_MODE);
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun if (bp->link_up) {
1334*4882a593Smuzhiyun switch (bp->line_speed) {
1335*4882a593Smuzhiyun case SPEED_10:
1336*4882a593Smuzhiyun if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1337*4882a593Smuzhiyun val |= BNX2_EMAC_MODE_PORT_MII_10M;
1338*4882a593Smuzhiyun break;
1339*4882a593Smuzhiyun }
1340*4882a593Smuzhiyun fallthrough;
1341*4882a593Smuzhiyun case SPEED_100:
1342*4882a593Smuzhiyun val |= BNX2_EMAC_MODE_PORT_MII;
1343*4882a593Smuzhiyun break;
1344*4882a593Smuzhiyun case SPEED_2500:
1345*4882a593Smuzhiyun val |= BNX2_EMAC_MODE_25G_MODE;
1346*4882a593Smuzhiyun fallthrough;
1347*4882a593Smuzhiyun case SPEED_1000:
1348*4882a593Smuzhiyun val |= BNX2_EMAC_MODE_PORT_GMII;
1349*4882a593Smuzhiyun break;
1350*4882a593Smuzhiyun }
1351*4882a593Smuzhiyun }
1352*4882a593Smuzhiyun else {
1353*4882a593Smuzhiyun val |= BNX2_EMAC_MODE_PORT_GMII;
1354*4882a593Smuzhiyun }
1355*4882a593Smuzhiyun
1356*4882a593Smuzhiyun /* Set the MAC to operate in the appropriate duplex mode. */
1357*4882a593Smuzhiyun if (bp->duplex == DUPLEX_HALF)
1358*4882a593Smuzhiyun val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1359*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_MODE, val);
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun /* Enable/disable rx PAUSE. */
1362*4882a593Smuzhiyun bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1363*4882a593Smuzhiyun
1364*4882a593Smuzhiyun if (bp->flow_ctrl & FLOW_CTRL_RX)
1365*4882a593Smuzhiyun bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1366*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1367*4882a593Smuzhiyun
1368*4882a593Smuzhiyun /* Enable/disable tx PAUSE. */
1369*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1370*4882a593Smuzhiyun val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun if (bp->flow_ctrl & FLOW_CTRL_TX)
1373*4882a593Smuzhiyun val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1374*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun /* Acknowledge the interrupt. */
1377*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1378*4882a593Smuzhiyun
1379*4882a593Smuzhiyun bnx2_init_all_rx_contexts(bp);
1380*4882a593Smuzhiyun }
1381*4882a593Smuzhiyun
1382*4882a593Smuzhiyun static void
bnx2_enable_bmsr1(struct bnx2 * bp)1383*4882a593Smuzhiyun bnx2_enable_bmsr1(struct bnx2 *bp)
1384*4882a593Smuzhiyun {
1385*4882a593Smuzhiyun if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1386*4882a593Smuzhiyun (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1387*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1388*4882a593Smuzhiyun MII_BNX2_BLK_ADDR_GP_STATUS);
1389*4882a593Smuzhiyun }
1390*4882a593Smuzhiyun
1391*4882a593Smuzhiyun static void
bnx2_disable_bmsr1(struct bnx2 * bp)1392*4882a593Smuzhiyun bnx2_disable_bmsr1(struct bnx2 *bp)
1393*4882a593Smuzhiyun {
1394*4882a593Smuzhiyun if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1395*4882a593Smuzhiyun (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1396*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1397*4882a593Smuzhiyun MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1398*4882a593Smuzhiyun }
1399*4882a593Smuzhiyun
1400*4882a593Smuzhiyun static int
bnx2_test_and_enable_2g5(struct bnx2 * bp)1401*4882a593Smuzhiyun bnx2_test_and_enable_2g5(struct bnx2 *bp)
1402*4882a593Smuzhiyun {
1403*4882a593Smuzhiyun u32 up1;
1404*4882a593Smuzhiyun int ret = 1;
1405*4882a593Smuzhiyun
1406*4882a593Smuzhiyun if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1407*4882a593Smuzhiyun return 0;
1408*4882a593Smuzhiyun
1409*4882a593Smuzhiyun if (bp->autoneg & AUTONEG_SPEED)
1410*4882a593Smuzhiyun bp->advertising |= ADVERTISED_2500baseX_Full;
1411*4882a593Smuzhiyun
1412*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1413*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_up1, &up1);
1416*4882a593Smuzhiyun if (!(up1 & BCM5708S_UP1_2G5)) {
1417*4882a593Smuzhiyun up1 |= BCM5708S_UP1_2G5;
1418*4882a593Smuzhiyun bnx2_write_phy(bp, bp->mii_up1, up1);
1419*4882a593Smuzhiyun ret = 0;
1420*4882a593Smuzhiyun }
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1423*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1424*4882a593Smuzhiyun MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1425*4882a593Smuzhiyun
1426*4882a593Smuzhiyun return ret;
1427*4882a593Smuzhiyun }
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun static int
bnx2_test_and_disable_2g5(struct bnx2 * bp)1430*4882a593Smuzhiyun bnx2_test_and_disable_2g5(struct bnx2 *bp)
1431*4882a593Smuzhiyun {
1432*4882a593Smuzhiyun u32 up1;
1433*4882a593Smuzhiyun int ret = 0;
1434*4882a593Smuzhiyun
1435*4882a593Smuzhiyun if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1436*4882a593Smuzhiyun return 0;
1437*4882a593Smuzhiyun
1438*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1439*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_up1, &up1);
1442*4882a593Smuzhiyun if (up1 & BCM5708S_UP1_2G5) {
1443*4882a593Smuzhiyun up1 &= ~BCM5708S_UP1_2G5;
1444*4882a593Smuzhiyun bnx2_write_phy(bp, bp->mii_up1, up1);
1445*4882a593Smuzhiyun ret = 1;
1446*4882a593Smuzhiyun }
1447*4882a593Smuzhiyun
1448*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1449*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1450*4882a593Smuzhiyun MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1451*4882a593Smuzhiyun
1452*4882a593Smuzhiyun return ret;
1453*4882a593Smuzhiyun }
1454*4882a593Smuzhiyun
1455*4882a593Smuzhiyun static void
bnx2_enable_forced_2g5(struct bnx2 * bp)1456*4882a593Smuzhiyun bnx2_enable_forced_2g5(struct bnx2 *bp)
1457*4882a593Smuzhiyun {
1458*4882a593Smuzhiyun u32 bmcr;
1459*4882a593Smuzhiyun int err;
1460*4882a593Smuzhiyun
1461*4882a593Smuzhiyun if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1462*4882a593Smuzhiyun return;
1463*4882a593Smuzhiyun
1464*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1465*4882a593Smuzhiyun u32 val;
1466*4882a593Smuzhiyun
1467*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1468*4882a593Smuzhiyun MII_BNX2_BLK_ADDR_SERDES_DIG);
1469*4882a593Smuzhiyun if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1470*4882a593Smuzhiyun val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1471*4882a593Smuzhiyun val |= MII_BNX2_SD_MISC1_FORCE |
1472*4882a593Smuzhiyun MII_BNX2_SD_MISC1_FORCE_2_5G;
1473*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1474*4882a593Smuzhiyun }
1475*4882a593Smuzhiyun
1476*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1477*4882a593Smuzhiyun MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1478*4882a593Smuzhiyun err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1479*4882a593Smuzhiyun
1480*4882a593Smuzhiyun } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1481*4882a593Smuzhiyun err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1482*4882a593Smuzhiyun if (!err)
1483*4882a593Smuzhiyun bmcr |= BCM5708S_BMCR_FORCE_2500;
1484*4882a593Smuzhiyun } else {
1485*4882a593Smuzhiyun return;
1486*4882a593Smuzhiyun }
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun if (err)
1489*4882a593Smuzhiyun return;
1490*4882a593Smuzhiyun
1491*4882a593Smuzhiyun if (bp->autoneg & AUTONEG_SPEED) {
1492*4882a593Smuzhiyun bmcr &= ~BMCR_ANENABLE;
1493*4882a593Smuzhiyun if (bp->req_duplex == DUPLEX_FULL)
1494*4882a593Smuzhiyun bmcr |= BMCR_FULLDPLX;
1495*4882a593Smuzhiyun }
1496*4882a593Smuzhiyun bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1497*4882a593Smuzhiyun }
1498*4882a593Smuzhiyun
1499*4882a593Smuzhiyun static void
bnx2_disable_forced_2g5(struct bnx2 * bp)1500*4882a593Smuzhiyun bnx2_disable_forced_2g5(struct bnx2 *bp)
1501*4882a593Smuzhiyun {
1502*4882a593Smuzhiyun u32 bmcr;
1503*4882a593Smuzhiyun int err;
1504*4882a593Smuzhiyun
1505*4882a593Smuzhiyun if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1506*4882a593Smuzhiyun return;
1507*4882a593Smuzhiyun
1508*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1509*4882a593Smuzhiyun u32 val;
1510*4882a593Smuzhiyun
1511*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1512*4882a593Smuzhiyun MII_BNX2_BLK_ADDR_SERDES_DIG);
1513*4882a593Smuzhiyun if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1514*4882a593Smuzhiyun val &= ~MII_BNX2_SD_MISC1_FORCE;
1515*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1516*4882a593Smuzhiyun }
1517*4882a593Smuzhiyun
1518*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1519*4882a593Smuzhiyun MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1520*4882a593Smuzhiyun err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1521*4882a593Smuzhiyun
1522*4882a593Smuzhiyun } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1523*4882a593Smuzhiyun err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1524*4882a593Smuzhiyun if (!err)
1525*4882a593Smuzhiyun bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1526*4882a593Smuzhiyun } else {
1527*4882a593Smuzhiyun return;
1528*4882a593Smuzhiyun }
1529*4882a593Smuzhiyun
1530*4882a593Smuzhiyun if (err)
1531*4882a593Smuzhiyun return;
1532*4882a593Smuzhiyun
1533*4882a593Smuzhiyun if (bp->autoneg & AUTONEG_SPEED)
1534*4882a593Smuzhiyun bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1535*4882a593Smuzhiyun bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1536*4882a593Smuzhiyun }
1537*4882a593Smuzhiyun
1538*4882a593Smuzhiyun static void
bnx2_5706s_force_link_dn(struct bnx2 * bp,int start)1539*4882a593Smuzhiyun bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1540*4882a593Smuzhiyun {
1541*4882a593Smuzhiyun u32 val;
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1544*4882a593Smuzhiyun bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1545*4882a593Smuzhiyun if (start)
1546*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1547*4882a593Smuzhiyun else
1548*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1549*4882a593Smuzhiyun }
1550*4882a593Smuzhiyun
1551*4882a593Smuzhiyun static int
bnx2_set_link(struct bnx2 * bp)1552*4882a593Smuzhiyun bnx2_set_link(struct bnx2 *bp)
1553*4882a593Smuzhiyun {
1554*4882a593Smuzhiyun u32 bmsr;
1555*4882a593Smuzhiyun u8 link_up;
1556*4882a593Smuzhiyun
1557*4882a593Smuzhiyun if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1558*4882a593Smuzhiyun bp->link_up = 1;
1559*4882a593Smuzhiyun return 0;
1560*4882a593Smuzhiyun }
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1563*4882a593Smuzhiyun return 0;
1564*4882a593Smuzhiyun
1565*4882a593Smuzhiyun link_up = bp->link_up;
1566*4882a593Smuzhiyun
1567*4882a593Smuzhiyun bnx2_enable_bmsr1(bp);
1568*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1569*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1570*4882a593Smuzhiyun bnx2_disable_bmsr1(bp);
1571*4882a593Smuzhiyun
1572*4882a593Smuzhiyun if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1573*4882a593Smuzhiyun (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1574*4882a593Smuzhiyun u32 val, an_dbg;
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1577*4882a593Smuzhiyun bnx2_5706s_force_link_dn(bp, 0);
1578*4882a593Smuzhiyun bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1579*4882a593Smuzhiyun }
1580*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1581*4882a593Smuzhiyun
1582*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1583*4882a593Smuzhiyun bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1584*4882a593Smuzhiyun bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1585*4882a593Smuzhiyun
1586*4882a593Smuzhiyun if ((val & BNX2_EMAC_STATUS_LINK) &&
1587*4882a593Smuzhiyun !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1588*4882a593Smuzhiyun bmsr |= BMSR_LSTATUS;
1589*4882a593Smuzhiyun else
1590*4882a593Smuzhiyun bmsr &= ~BMSR_LSTATUS;
1591*4882a593Smuzhiyun }
1592*4882a593Smuzhiyun
1593*4882a593Smuzhiyun if (bmsr & BMSR_LSTATUS) {
1594*4882a593Smuzhiyun bp->link_up = 1;
1595*4882a593Smuzhiyun
1596*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1597*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1598*4882a593Smuzhiyun bnx2_5706s_linkup(bp);
1599*4882a593Smuzhiyun else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1600*4882a593Smuzhiyun bnx2_5708s_linkup(bp);
1601*4882a593Smuzhiyun else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1602*4882a593Smuzhiyun bnx2_5709s_linkup(bp);
1603*4882a593Smuzhiyun }
1604*4882a593Smuzhiyun else {
1605*4882a593Smuzhiyun bnx2_copper_linkup(bp);
1606*4882a593Smuzhiyun }
1607*4882a593Smuzhiyun bnx2_resolve_flow_ctrl(bp);
1608*4882a593Smuzhiyun }
1609*4882a593Smuzhiyun else {
1610*4882a593Smuzhiyun if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1611*4882a593Smuzhiyun (bp->autoneg & AUTONEG_SPEED))
1612*4882a593Smuzhiyun bnx2_disable_forced_2g5(bp);
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1615*4882a593Smuzhiyun u32 bmcr;
1616*4882a593Smuzhiyun
1617*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1618*4882a593Smuzhiyun bmcr |= BMCR_ANENABLE;
1619*4882a593Smuzhiyun bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1620*4882a593Smuzhiyun
1621*4882a593Smuzhiyun bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1622*4882a593Smuzhiyun }
1623*4882a593Smuzhiyun bp->link_up = 0;
1624*4882a593Smuzhiyun }
1625*4882a593Smuzhiyun
1626*4882a593Smuzhiyun if (bp->link_up != link_up) {
1627*4882a593Smuzhiyun bnx2_report_link(bp);
1628*4882a593Smuzhiyun }
1629*4882a593Smuzhiyun
1630*4882a593Smuzhiyun bnx2_set_mac_link(bp);
1631*4882a593Smuzhiyun
1632*4882a593Smuzhiyun return 0;
1633*4882a593Smuzhiyun }
1634*4882a593Smuzhiyun
1635*4882a593Smuzhiyun static int
bnx2_reset_phy(struct bnx2 * bp)1636*4882a593Smuzhiyun bnx2_reset_phy(struct bnx2 *bp)
1637*4882a593Smuzhiyun {
1638*4882a593Smuzhiyun int i;
1639*4882a593Smuzhiyun u32 reg;
1640*4882a593Smuzhiyun
1641*4882a593Smuzhiyun bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1642*4882a593Smuzhiyun
1643*4882a593Smuzhiyun #define PHY_RESET_MAX_WAIT 100
1644*4882a593Smuzhiyun for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1645*4882a593Smuzhiyun udelay(10);
1646*4882a593Smuzhiyun
1647*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_bmcr, ®);
1648*4882a593Smuzhiyun if (!(reg & BMCR_RESET)) {
1649*4882a593Smuzhiyun udelay(20);
1650*4882a593Smuzhiyun break;
1651*4882a593Smuzhiyun }
1652*4882a593Smuzhiyun }
1653*4882a593Smuzhiyun if (i == PHY_RESET_MAX_WAIT) {
1654*4882a593Smuzhiyun return -EBUSY;
1655*4882a593Smuzhiyun }
1656*4882a593Smuzhiyun return 0;
1657*4882a593Smuzhiyun }
1658*4882a593Smuzhiyun
1659*4882a593Smuzhiyun static u32
bnx2_phy_get_pause_adv(struct bnx2 * bp)1660*4882a593Smuzhiyun bnx2_phy_get_pause_adv(struct bnx2 *bp)
1661*4882a593Smuzhiyun {
1662*4882a593Smuzhiyun u32 adv = 0;
1663*4882a593Smuzhiyun
1664*4882a593Smuzhiyun if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1665*4882a593Smuzhiyun (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1666*4882a593Smuzhiyun
1667*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1668*4882a593Smuzhiyun adv = ADVERTISE_1000XPAUSE;
1669*4882a593Smuzhiyun }
1670*4882a593Smuzhiyun else {
1671*4882a593Smuzhiyun adv = ADVERTISE_PAUSE_CAP;
1672*4882a593Smuzhiyun }
1673*4882a593Smuzhiyun }
1674*4882a593Smuzhiyun else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1675*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1676*4882a593Smuzhiyun adv = ADVERTISE_1000XPSE_ASYM;
1677*4882a593Smuzhiyun }
1678*4882a593Smuzhiyun else {
1679*4882a593Smuzhiyun adv = ADVERTISE_PAUSE_ASYM;
1680*4882a593Smuzhiyun }
1681*4882a593Smuzhiyun }
1682*4882a593Smuzhiyun else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1683*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1684*4882a593Smuzhiyun adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1685*4882a593Smuzhiyun }
1686*4882a593Smuzhiyun else {
1687*4882a593Smuzhiyun adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1688*4882a593Smuzhiyun }
1689*4882a593Smuzhiyun }
1690*4882a593Smuzhiyun return adv;
1691*4882a593Smuzhiyun }
1692*4882a593Smuzhiyun
1693*4882a593Smuzhiyun static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1694*4882a593Smuzhiyun
1695*4882a593Smuzhiyun static int
bnx2_setup_remote_phy(struct bnx2 * bp,u8 port)1696*4882a593Smuzhiyun bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1697*4882a593Smuzhiyun __releases(&bp->phy_lock)
1698*4882a593Smuzhiyun __acquires(&bp->phy_lock)
1699*4882a593Smuzhiyun {
1700*4882a593Smuzhiyun u32 speed_arg = 0, pause_adv;
1701*4882a593Smuzhiyun
1702*4882a593Smuzhiyun pause_adv = bnx2_phy_get_pause_adv(bp);
1703*4882a593Smuzhiyun
1704*4882a593Smuzhiyun if (bp->autoneg & AUTONEG_SPEED) {
1705*4882a593Smuzhiyun speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1706*4882a593Smuzhiyun if (bp->advertising & ADVERTISED_10baseT_Half)
1707*4882a593Smuzhiyun speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1708*4882a593Smuzhiyun if (bp->advertising & ADVERTISED_10baseT_Full)
1709*4882a593Smuzhiyun speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1710*4882a593Smuzhiyun if (bp->advertising & ADVERTISED_100baseT_Half)
1711*4882a593Smuzhiyun speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1712*4882a593Smuzhiyun if (bp->advertising & ADVERTISED_100baseT_Full)
1713*4882a593Smuzhiyun speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1714*4882a593Smuzhiyun if (bp->advertising & ADVERTISED_1000baseT_Full)
1715*4882a593Smuzhiyun speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1716*4882a593Smuzhiyun if (bp->advertising & ADVERTISED_2500baseX_Full)
1717*4882a593Smuzhiyun speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1718*4882a593Smuzhiyun } else {
1719*4882a593Smuzhiyun if (bp->req_line_speed == SPEED_2500)
1720*4882a593Smuzhiyun speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1721*4882a593Smuzhiyun else if (bp->req_line_speed == SPEED_1000)
1722*4882a593Smuzhiyun speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1723*4882a593Smuzhiyun else if (bp->req_line_speed == SPEED_100) {
1724*4882a593Smuzhiyun if (bp->req_duplex == DUPLEX_FULL)
1725*4882a593Smuzhiyun speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1726*4882a593Smuzhiyun else
1727*4882a593Smuzhiyun speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1728*4882a593Smuzhiyun } else if (bp->req_line_speed == SPEED_10) {
1729*4882a593Smuzhiyun if (bp->req_duplex == DUPLEX_FULL)
1730*4882a593Smuzhiyun speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1731*4882a593Smuzhiyun else
1732*4882a593Smuzhiyun speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1733*4882a593Smuzhiyun }
1734*4882a593Smuzhiyun }
1735*4882a593Smuzhiyun
1736*4882a593Smuzhiyun if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1737*4882a593Smuzhiyun speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1738*4882a593Smuzhiyun if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1739*4882a593Smuzhiyun speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1740*4882a593Smuzhiyun
1741*4882a593Smuzhiyun if (port == PORT_TP)
1742*4882a593Smuzhiyun speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1743*4882a593Smuzhiyun BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1744*4882a593Smuzhiyun
1745*4882a593Smuzhiyun bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1746*4882a593Smuzhiyun
1747*4882a593Smuzhiyun spin_unlock_bh(&bp->phy_lock);
1748*4882a593Smuzhiyun bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1749*4882a593Smuzhiyun spin_lock_bh(&bp->phy_lock);
1750*4882a593Smuzhiyun
1751*4882a593Smuzhiyun return 0;
1752*4882a593Smuzhiyun }
1753*4882a593Smuzhiyun
1754*4882a593Smuzhiyun static int
bnx2_setup_serdes_phy(struct bnx2 * bp,u8 port)1755*4882a593Smuzhiyun bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1756*4882a593Smuzhiyun __releases(&bp->phy_lock)
1757*4882a593Smuzhiyun __acquires(&bp->phy_lock)
1758*4882a593Smuzhiyun {
1759*4882a593Smuzhiyun u32 adv, bmcr;
1760*4882a593Smuzhiyun u32 new_adv = 0;
1761*4882a593Smuzhiyun
1762*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1763*4882a593Smuzhiyun return bnx2_setup_remote_phy(bp, port);
1764*4882a593Smuzhiyun
1765*4882a593Smuzhiyun if (!(bp->autoneg & AUTONEG_SPEED)) {
1766*4882a593Smuzhiyun u32 new_bmcr;
1767*4882a593Smuzhiyun int force_link_down = 0;
1768*4882a593Smuzhiyun
1769*4882a593Smuzhiyun if (bp->req_line_speed == SPEED_2500) {
1770*4882a593Smuzhiyun if (!bnx2_test_and_enable_2g5(bp))
1771*4882a593Smuzhiyun force_link_down = 1;
1772*4882a593Smuzhiyun } else if (bp->req_line_speed == SPEED_1000) {
1773*4882a593Smuzhiyun if (bnx2_test_and_disable_2g5(bp))
1774*4882a593Smuzhiyun force_link_down = 1;
1775*4882a593Smuzhiyun }
1776*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_adv, &adv);
1777*4882a593Smuzhiyun adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1778*4882a593Smuzhiyun
1779*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1780*4882a593Smuzhiyun new_bmcr = bmcr & ~BMCR_ANENABLE;
1781*4882a593Smuzhiyun new_bmcr |= BMCR_SPEED1000;
1782*4882a593Smuzhiyun
1783*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1784*4882a593Smuzhiyun if (bp->req_line_speed == SPEED_2500)
1785*4882a593Smuzhiyun bnx2_enable_forced_2g5(bp);
1786*4882a593Smuzhiyun else if (bp->req_line_speed == SPEED_1000) {
1787*4882a593Smuzhiyun bnx2_disable_forced_2g5(bp);
1788*4882a593Smuzhiyun new_bmcr &= ~0x2000;
1789*4882a593Smuzhiyun }
1790*4882a593Smuzhiyun
1791*4882a593Smuzhiyun } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1792*4882a593Smuzhiyun if (bp->req_line_speed == SPEED_2500)
1793*4882a593Smuzhiyun new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1794*4882a593Smuzhiyun else
1795*4882a593Smuzhiyun new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1796*4882a593Smuzhiyun }
1797*4882a593Smuzhiyun
1798*4882a593Smuzhiyun if (bp->req_duplex == DUPLEX_FULL) {
1799*4882a593Smuzhiyun adv |= ADVERTISE_1000XFULL;
1800*4882a593Smuzhiyun new_bmcr |= BMCR_FULLDPLX;
1801*4882a593Smuzhiyun }
1802*4882a593Smuzhiyun else {
1803*4882a593Smuzhiyun adv |= ADVERTISE_1000XHALF;
1804*4882a593Smuzhiyun new_bmcr &= ~BMCR_FULLDPLX;
1805*4882a593Smuzhiyun }
1806*4882a593Smuzhiyun if ((new_bmcr != bmcr) || (force_link_down)) {
1807*4882a593Smuzhiyun /* Force a link down visible on the other side */
1808*4882a593Smuzhiyun if (bp->link_up) {
1809*4882a593Smuzhiyun bnx2_write_phy(bp, bp->mii_adv, adv &
1810*4882a593Smuzhiyun ~(ADVERTISE_1000XFULL |
1811*4882a593Smuzhiyun ADVERTISE_1000XHALF));
1812*4882a593Smuzhiyun bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1813*4882a593Smuzhiyun BMCR_ANRESTART | BMCR_ANENABLE);
1814*4882a593Smuzhiyun
1815*4882a593Smuzhiyun bp->link_up = 0;
1816*4882a593Smuzhiyun netif_carrier_off(bp->dev);
1817*4882a593Smuzhiyun bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1818*4882a593Smuzhiyun bnx2_report_link(bp);
1819*4882a593Smuzhiyun }
1820*4882a593Smuzhiyun bnx2_write_phy(bp, bp->mii_adv, adv);
1821*4882a593Smuzhiyun bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1822*4882a593Smuzhiyun } else {
1823*4882a593Smuzhiyun bnx2_resolve_flow_ctrl(bp);
1824*4882a593Smuzhiyun bnx2_set_mac_link(bp);
1825*4882a593Smuzhiyun }
1826*4882a593Smuzhiyun return 0;
1827*4882a593Smuzhiyun }
1828*4882a593Smuzhiyun
1829*4882a593Smuzhiyun bnx2_test_and_enable_2g5(bp);
1830*4882a593Smuzhiyun
1831*4882a593Smuzhiyun if (bp->advertising & ADVERTISED_1000baseT_Full)
1832*4882a593Smuzhiyun new_adv |= ADVERTISE_1000XFULL;
1833*4882a593Smuzhiyun
1834*4882a593Smuzhiyun new_adv |= bnx2_phy_get_pause_adv(bp);
1835*4882a593Smuzhiyun
1836*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_adv, &adv);
1837*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1838*4882a593Smuzhiyun
1839*4882a593Smuzhiyun bp->serdes_an_pending = 0;
1840*4882a593Smuzhiyun if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1841*4882a593Smuzhiyun /* Force a link down visible on the other side */
1842*4882a593Smuzhiyun if (bp->link_up) {
1843*4882a593Smuzhiyun bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1844*4882a593Smuzhiyun spin_unlock_bh(&bp->phy_lock);
1845*4882a593Smuzhiyun msleep(20);
1846*4882a593Smuzhiyun spin_lock_bh(&bp->phy_lock);
1847*4882a593Smuzhiyun }
1848*4882a593Smuzhiyun
1849*4882a593Smuzhiyun bnx2_write_phy(bp, bp->mii_adv, new_adv);
1850*4882a593Smuzhiyun bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1851*4882a593Smuzhiyun BMCR_ANENABLE);
1852*4882a593Smuzhiyun /* Speed up link-up time when the link partner
1853*4882a593Smuzhiyun * does not autonegotiate which is very common
1854*4882a593Smuzhiyun * in blade servers. Some blade servers use
1855*4882a593Smuzhiyun * IPMI for kerboard input and it's important
1856*4882a593Smuzhiyun * to minimize link disruptions. Autoneg. involves
1857*4882a593Smuzhiyun * exchanging base pages plus 3 next pages and
1858*4882a593Smuzhiyun * normally completes in about 120 msec.
1859*4882a593Smuzhiyun */
1860*4882a593Smuzhiyun bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1861*4882a593Smuzhiyun bp->serdes_an_pending = 1;
1862*4882a593Smuzhiyun mod_timer(&bp->timer, jiffies + bp->current_interval);
1863*4882a593Smuzhiyun } else {
1864*4882a593Smuzhiyun bnx2_resolve_flow_ctrl(bp);
1865*4882a593Smuzhiyun bnx2_set_mac_link(bp);
1866*4882a593Smuzhiyun }
1867*4882a593Smuzhiyun
1868*4882a593Smuzhiyun return 0;
1869*4882a593Smuzhiyun }
1870*4882a593Smuzhiyun
1871*4882a593Smuzhiyun #define ETHTOOL_ALL_FIBRE_SPEED \
1872*4882a593Smuzhiyun (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1873*4882a593Smuzhiyun (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1874*4882a593Smuzhiyun (ADVERTISED_1000baseT_Full)
1875*4882a593Smuzhiyun
1876*4882a593Smuzhiyun #define ETHTOOL_ALL_COPPER_SPEED \
1877*4882a593Smuzhiyun (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1878*4882a593Smuzhiyun ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1879*4882a593Smuzhiyun ADVERTISED_1000baseT_Full)
1880*4882a593Smuzhiyun
1881*4882a593Smuzhiyun #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1882*4882a593Smuzhiyun ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1883*4882a593Smuzhiyun
1884*4882a593Smuzhiyun #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1885*4882a593Smuzhiyun
1886*4882a593Smuzhiyun static void
bnx2_set_default_remote_link(struct bnx2 * bp)1887*4882a593Smuzhiyun bnx2_set_default_remote_link(struct bnx2 *bp)
1888*4882a593Smuzhiyun {
1889*4882a593Smuzhiyun u32 link;
1890*4882a593Smuzhiyun
1891*4882a593Smuzhiyun if (bp->phy_port == PORT_TP)
1892*4882a593Smuzhiyun link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1893*4882a593Smuzhiyun else
1894*4882a593Smuzhiyun link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1895*4882a593Smuzhiyun
1896*4882a593Smuzhiyun if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1897*4882a593Smuzhiyun bp->req_line_speed = 0;
1898*4882a593Smuzhiyun bp->autoneg |= AUTONEG_SPEED;
1899*4882a593Smuzhiyun bp->advertising = ADVERTISED_Autoneg;
1900*4882a593Smuzhiyun if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1901*4882a593Smuzhiyun bp->advertising |= ADVERTISED_10baseT_Half;
1902*4882a593Smuzhiyun if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1903*4882a593Smuzhiyun bp->advertising |= ADVERTISED_10baseT_Full;
1904*4882a593Smuzhiyun if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1905*4882a593Smuzhiyun bp->advertising |= ADVERTISED_100baseT_Half;
1906*4882a593Smuzhiyun if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1907*4882a593Smuzhiyun bp->advertising |= ADVERTISED_100baseT_Full;
1908*4882a593Smuzhiyun if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1909*4882a593Smuzhiyun bp->advertising |= ADVERTISED_1000baseT_Full;
1910*4882a593Smuzhiyun if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1911*4882a593Smuzhiyun bp->advertising |= ADVERTISED_2500baseX_Full;
1912*4882a593Smuzhiyun } else {
1913*4882a593Smuzhiyun bp->autoneg = 0;
1914*4882a593Smuzhiyun bp->advertising = 0;
1915*4882a593Smuzhiyun bp->req_duplex = DUPLEX_FULL;
1916*4882a593Smuzhiyun if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1917*4882a593Smuzhiyun bp->req_line_speed = SPEED_10;
1918*4882a593Smuzhiyun if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1919*4882a593Smuzhiyun bp->req_duplex = DUPLEX_HALF;
1920*4882a593Smuzhiyun }
1921*4882a593Smuzhiyun if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1922*4882a593Smuzhiyun bp->req_line_speed = SPEED_100;
1923*4882a593Smuzhiyun if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1924*4882a593Smuzhiyun bp->req_duplex = DUPLEX_HALF;
1925*4882a593Smuzhiyun }
1926*4882a593Smuzhiyun if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1927*4882a593Smuzhiyun bp->req_line_speed = SPEED_1000;
1928*4882a593Smuzhiyun if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1929*4882a593Smuzhiyun bp->req_line_speed = SPEED_2500;
1930*4882a593Smuzhiyun }
1931*4882a593Smuzhiyun }
1932*4882a593Smuzhiyun
1933*4882a593Smuzhiyun static void
bnx2_set_default_link(struct bnx2 * bp)1934*4882a593Smuzhiyun bnx2_set_default_link(struct bnx2 *bp)
1935*4882a593Smuzhiyun {
1936*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1937*4882a593Smuzhiyun bnx2_set_default_remote_link(bp);
1938*4882a593Smuzhiyun return;
1939*4882a593Smuzhiyun }
1940*4882a593Smuzhiyun
1941*4882a593Smuzhiyun bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1942*4882a593Smuzhiyun bp->req_line_speed = 0;
1943*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1944*4882a593Smuzhiyun u32 reg;
1945*4882a593Smuzhiyun
1946*4882a593Smuzhiyun bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1947*4882a593Smuzhiyun
1948*4882a593Smuzhiyun reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1949*4882a593Smuzhiyun reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1950*4882a593Smuzhiyun if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1951*4882a593Smuzhiyun bp->autoneg = 0;
1952*4882a593Smuzhiyun bp->req_line_speed = bp->line_speed = SPEED_1000;
1953*4882a593Smuzhiyun bp->req_duplex = DUPLEX_FULL;
1954*4882a593Smuzhiyun }
1955*4882a593Smuzhiyun } else
1956*4882a593Smuzhiyun bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1957*4882a593Smuzhiyun }
1958*4882a593Smuzhiyun
1959*4882a593Smuzhiyun static void
bnx2_send_heart_beat(struct bnx2 * bp)1960*4882a593Smuzhiyun bnx2_send_heart_beat(struct bnx2 *bp)
1961*4882a593Smuzhiyun {
1962*4882a593Smuzhiyun u32 msg;
1963*4882a593Smuzhiyun u32 addr;
1964*4882a593Smuzhiyun
1965*4882a593Smuzhiyun spin_lock(&bp->indirect_lock);
1966*4882a593Smuzhiyun msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1967*4882a593Smuzhiyun addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1968*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1969*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1970*4882a593Smuzhiyun spin_unlock(&bp->indirect_lock);
1971*4882a593Smuzhiyun }
1972*4882a593Smuzhiyun
1973*4882a593Smuzhiyun static void
bnx2_remote_phy_event(struct bnx2 * bp)1974*4882a593Smuzhiyun bnx2_remote_phy_event(struct bnx2 *bp)
1975*4882a593Smuzhiyun {
1976*4882a593Smuzhiyun u32 msg;
1977*4882a593Smuzhiyun u8 link_up = bp->link_up;
1978*4882a593Smuzhiyun u8 old_port;
1979*4882a593Smuzhiyun
1980*4882a593Smuzhiyun msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1981*4882a593Smuzhiyun
1982*4882a593Smuzhiyun if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1983*4882a593Smuzhiyun bnx2_send_heart_beat(bp);
1984*4882a593Smuzhiyun
1985*4882a593Smuzhiyun msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1986*4882a593Smuzhiyun
1987*4882a593Smuzhiyun if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1988*4882a593Smuzhiyun bp->link_up = 0;
1989*4882a593Smuzhiyun else {
1990*4882a593Smuzhiyun u32 speed;
1991*4882a593Smuzhiyun
1992*4882a593Smuzhiyun bp->link_up = 1;
1993*4882a593Smuzhiyun speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1994*4882a593Smuzhiyun bp->duplex = DUPLEX_FULL;
1995*4882a593Smuzhiyun switch (speed) {
1996*4882a593Smuzhiyun case BNX2_LINK_STATUS_10HALF:
1997*4882a593Smuzhiyun bp->duplex = DUPLEX_HALF;
1998*4882a593Smuzhiyun fallthrough;
1999*4882a593Smuzhiyun case BNX2_LINK_STATUS_10FULL:
2000*4882a593Smuzhiyun bp->line_speed = SPEED_10;
2001*4882a593Smuzhiyun break;
2002*4882a593Smuzhiyun case BNX2_LINK_STATUS_100HALF:
2003*4882a593Smuzhiyun bp->duplex = DUPLEX_HALF;
2004*4882a593Smuzhiyun fallthrough;
2005*4882a593Smuzhiyun case BNX2_LINK_STATUS_100BASE_T4:
2006*4882a593Smuzhiyun case BNX2_LINK_STATUS_100FULL:
2007*4882a593Smuzhiyun bp->line_speed = SPEED_100;
2008*4882a593Smuzhiyun break;
2009*4882a593Smuzhiyun case BNX2_LINK_STATUS_1000HALF:
2010*4882a593Smuzhiyun bp->duplex = DUPLEX_HALF;
2011*4882a593Smuzhiyun fallthrough;
2012*4882a593Smuzhiyun case BNX2_LINK_STATUS_1000FULL:
2013*4882a593Smuzhiyun bp->line_speed = SPEED_1000;
2014*4882a593Smuzhiyun break;
2015*4882a593Smuzhiyun case BNX2_LINK_STATUS_2500HALF:
2016*4882a593Smuzhiyun bp->duplex = DUPLEX_HALF;
2017*4882a593Smuzhiyun fallthrough;
2018*4882a593Smuzhiyun case BNX2_LINK_STATUS_2500FULL:
2019*4882a593Smuzhiyun bp->line_speed = SPEED_2500;
2020*4882a593Smuzhiyun break;
2021*4882a593Smuzhiyun default:
2022*4882a593Smuzhiyun bp->line_speed = 0;
2023*4882a593Smuzhiyun break;
2024*4882a593Smuzhiyun }
2025*4882a593Smuzhiyun
2026*4882a593Smuzhiyun bp->flow_ctrl = 0;
2027*4882a593Smuzhiyun if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2028*4882a593Smuzhiyun (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2029*4882a593Smuzhiyun if (bp->duplex == DUPLEX_FULL)
2030*4882a593Smuzhiyun bp->flow_ctrl = bp->req_flow_ctrl;
2031*4882a593Smuzhiyun } else {
2032*4882a593Smuzhiyun if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2033*4882a593Smuzhiyun bp->flow_ctrl |= FLOW_CTRL_TX;
2034*4882a593Smuzhiyun if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2035*4882a593Smuzhiyun bp->flow_ctrl |= FLOW_CTRL_RX;
2036*4882a593Smuzhiyun }
2037*4882a593Smuzhiyun
2038*4882a593Smuzhiyun old_port = bp->phy_port;
2039*4882a593Smuzhiyun if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2040*4882a593Smuzhiyun bp->phy_port = PORT_FIBRE;
2041*4882a593Smuzhiyun else
2042*4882a593Smuzhiyun bp->phy_port = PORT_TP;
2043*4882a593Smuzhiyun
2044*4882a593Smuzhiyun if (old_port != bp->phy_port)
2045*4882a593Smuzhiyun bnx2_set_default_link(bp);
2046*4882a593Smuzhiyun
2047*4882a593Smuzhiyun }
2048*4882a593Smuzhiyun if (bp->link_up != link_up)
2049*4882a593Smuzhiyun bnx2_report_link(bp);
2050*4882a593Smuzhiyun
2051*4882a593Smuzhiyun bnx2_set_mac_link(bp);
2052*4882a593Smuzhiyun }
2053*4882a593Smuzhiyun
2054*4882a593Smuzhiyun static int
bnx2_set_remote_link(struct bnx2 * bp)2055*4882a593Smuzhiyun bnx2_set_remote_link(struct bnx2 *bp)
2056*4882a593Smuzhiyun {
2057*4882a593Smuzhiyun u32 evt_code;
2058*4882a593Smuzhiyun
2059*4882a593Smuzhiyun evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2060*4882a593Smuzhiyun switch (evt_code) {
2061*4882a593Smuzhiyun case BNX2_FW_EVT_CODE_LINK_EVENT:
2062*4882a593Smuzhiyun bnx2_remote_phy_event(bp);
2063*4882a593Smuzhiyun break;
2064*4882a593Smuzhiyun case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2065*4882a593Smuzhiyun default:
2066*4882a593Smuzhiyun bnx2_send_heart_beat(bp);
2067*4882a593Smuzhiyun break;
2068*4882a593Smuzhiyun }
2069*4882a593Smuzhiyun return 0;
2070*4882a593Smuzhiyun }
2071*4882a593Smuzhiyun
2072*4882a593Smuzhiyun static int
bnx2_setup_copper_phy(struct bnx2 * bp)2073*4882a593Smuzhiyun bnx2_setup_copper_phy(struct bnx2 *bp)
2074*4882a593Smuzhiyun __releases(&bp->phy_lock)
2075*4882a593Smuzhiyun __acquires(&bp->phy_lock)
2076*4882a593Smuzhiyun {
2077*4882a593Smuzhiyun u32 bmcr, adv_reg, new_adv = 0;
2078*4882a593Smuzhiyun u32 new_bmcr;
2079*4882a593Smuzhiyun
2080*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2081*4882a593Smuzhiyun
2082*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2083*4882a593Smuzhiyun adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2084*4882a593Smuzhiyun ADVERTISE_PAUSE_ASYM);
2085*4882a593Smuzhiyun
2086*4882a593Smuzhiyun new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2087*4882a593Smuzhiyun
2088*4882a593Smuzhiyun if (bp->autoneg & AUTONEG_SPEED) {
2089*4882a593Smuzhiyun u32 adv1000_reg;
2090*4882a593Smuzhiyun u32 new_adv1000 = 0;
2091*4882a593Smuzhiyun
2092*4882a593Smuzhiyun new_adv |= bnx2_phy_get_pause_adv(bp);
2093*4882a593Smuzhiyun
2094*4882a593Smuzhiyun bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2095*4882a593Smuzhiyun adv1000_reg &= PHY_ALL_1000_SPEED;
2096*4882a593Smuzhiyun
2097*4882a593Smuzhiyun new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2098*4882a593Smuzhiyun if ((adv1000_reg != new_adv1000) ||
2099*4882a593Smuzhiyun (adv_reg != new_adv) ||
2100*4882a593Smuzhiyun ((bmcr & BMCR_ANENABLE) == 0)) {
2101*4882a593Smuzhiyun
2102*4882a593Smuzhiyun bnx2_write_phy(bp, bp->mii_adv, new_adv);
2103*4882a593Smuzhiyun bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2104*4882a593Smuzhiyun bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2105*4882a593Smuzhiyun BMCR_ANENABLE);
2106*4882a593Smuzhiyun }
2107*4882a593Smuzhiyun else if (bp->link_up) {
2108*4882a593Smuzhiyun /* Flow ctrl may have changed from auto to forced */
2109*4882a593Smuzhiyun /* or vice-versa. */
2110*4882a593Smuzhiyun
2111*4882a593Smuzhiyun bnx2_resolve_flow_ctrl(bp);
2112*4882a593Smuzhiyun bnx2_set_mac_link(bp);
2113*4882a593Smuzhiyun }
2114*4882a593Smuzhiyun return 0;
2115*4882a593Smuzhiyun }
2116*4882a593Smuzhiyun
2117*4882a593Smuzhiyun /* advertise nothing when forcing speed */
2118*4882a593Smuzhiyun if (adv_reg != new_adv)
2119*4882a593Smuzhiyun bnx2_write_phy(bp, bp->mii_adv, new_adv);
2120*4882a593Smuzhiyun
2121*4882a593Smuzhiyun new_bmcr = 0;
2122*4882a593Smuzhiyun if (bp->req_line_speed == SPEED_100) {
2123*4882a593Smuzhiyun new_bmcr |= BMCR_SPEED100;
2124*4882a593Smuzhiyun }
2125*4882a593Smuzhiyun if (bp->req_duplex == DUPLEX_FULL) {
2126*4882a593Smuzhiyun new_bmcr |= BMCR_FULLDPLX;
2127*4882a593Smuzhiyun }
2128*4882a593Smuzhiyun if (new_bmcr != bmcr) {
2129*4882a593Smuzhiyun u32 bmsr;
2130*4882a593Smuzhiyun
2131*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2132*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2133*4882a593Smuzhiyun
2134*4882a593Smuzhiyun if (bmsr & BMSR_LSTATUS) {
2135*4882a593Smuzhiyun /* Force link down */
2136*4882a593Smuzhiyun bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2137*4882a593Smuzhiyun spin_unlock_bh(&bp->phy_lock);
2138*4882a593Smuzhiyun msleep(50);
2139*4882a593Smuzhiyun spin_lock_bh(&bp->phy_lock);
2140*4882a593Smuzhiyun
2141*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2142*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2143*4882a593Smuzhiyun }
2144*4882a593Smuzhiyun
2145*4882a593Smuzhiyun bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2146*4882a593Smuzhiyun
2147*4882a593Smuzhiyun /* Normally, the new speed is setup after the link has
2148*4882a593Smuzhiyun * gone down and up again. In some cases, link will not go
2149*4882a593Smuzhiyun * down so we need to set up the new speed here.
2150*4882a593Smuzhiyun */
2151*4882a593Smuzhiyun if (bmsr & BMSR_LSTATUS) {
2152*4882a593Smuzhiyun bp->line_speed = bp->req_line_speed;
2153*4882a593Smuzhiyun bp->duplex = bp->req_duplex;
2154*4882a593Smuzhiyun bnx2_resolve_flow_ctrl(bp);
2155*4882a593Smuzhiyun bnx2_set_mac_link(bp);
2156*4882a593Smuzhiyun }
2157*4882a593Smuzhiyun } else {
2158*4882a593Smuzhiyun bnx2_resolve_flow_ctrl(bp);
2159*4882a593Smuzhiyun bnx2_set_mac_link(bp);
2160*4882a593Smuzhiyun }
2161*4882a593Smuzhiyun return 0;
2162*4882a593Smuzhiyun }
2163*4882a593Smuzhiyun
2164*4882a593Smuzhiyun static int
bnx2_setup_phy(struct bnx2 * bp,u8 port)2165*4882a593Smuzhiyun bnx2_setup_phy(struct bnx2 *bp, u8 port)
2166*4882a593Smuzhiyun __releases(&bp->phy_lock)
2167*4882a593Smuzhiyun __acquires(&bp->phy_lock)
2168*4882a593Smuzhiyun {
2169*4882a593Smuzhiyun if (bp->loopback == MAC_LOOPBACK)
2170*4882a593Smuzhiyun return 0;
2171*4882a593Smuzhiyun
2172*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2173*4882a593Smuzhiyun return bnx2_setup_serdes_phy(bp, port);
2174*4882a593Smuzhiyun }
2175*4882a593Smuzhiyun else {
2176*4882a593Smuzhiyun return bnx2_setup_copper_phy(bp);
2177*4882a593Smuzhiyun }
2178*4882a593Smuzhiyun }
2179*4882a593Smuzhiyun
2180*4882a593Smuzhiyun static int
bnx2_init_5709s_phy(struct bnx2 * bp,int reset_phy)2181*4882a593Smuzhiyun bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2182*4882a593Smuzhiyun {
2183*4882a593Smuzhiyun u32 val;
2184*4882a593Smuzhiyun
2185*4882a593Smuzhiyun bp->mii_bmcr = MII_BMCR + 0x10;
2186*4882a593Smuzhiyun bp->mii_bmsr = MII_BMSR + 0x10;
2187*4882a593Smuzhiyun bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2188*4882a593Smuzhiyun bp->mii_adv = MII_ADVERTISE + 0x10;
2189*4882a593Smuzhiyun bp->mii_lpa = MII_LPA + 0x10;
2190*4882a593Smuzhiyun bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2191*4882a593Smuzhiyun
2192*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2193*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2194*4882a593Smuzhiyun
2195*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2196*4882a593Smuzhiyun if (reset_phy)
2197*4882a593Smuzhiyun bnx2_reset_phy(bp);
2198*4882a593Smuzhiyun
2199*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2200*4882a593Smuzhiyun
2201*4882a593Smuzhiyun bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2202*4882a593Smuzhiyun val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2203*4882a593Smuzhiyun val |= MII_BNX2_SD_1000XCTL1_FIBER;
2204*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2205*4882a593Smuzhiyun
2206*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2207*4882a593Smuzhiyun bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2208*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2209*4882a593Smuzhiyun val |= BCM5708S_UP1_2G5;
2210*4882a593Smuzhiyun else
2211*4882a593Smuzhiyun val &= ~BCM5708S_UP1_2G5;
2212*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2213*4882a593Smuzhiyun
2214*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2215*4882a593Smuzhiyun bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2216*4882a593Smuzhiyun val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2217*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2218*4882a593Smuzhiyun
2219*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2220*4882a593Smuzhiyun
2221*4882a593Smuzhiyun val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2222*4882a593Smuzhiyun MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2223*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2224*4882a593Smuzhiyun
2225*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2226*4882a593Smuzhiyun
2227*4882a593Smuzhiyun return 0;
2228*4882a593Smuzhiyun }
2229*4882a593Smuzhiyun
2230*4882a593Smuzhiyun static int
bnx2_init_5708s_phy(struct bnx2 * bp,int reset_phy)2231*4882a593Smuzhiyun bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2232*4882a593Smuzhiyun {
2233*4882a593Smuzhiyun u32 val;
2234*4882a593Smuzhiyun
2235*4882a593Smuzhiyun if (reset_phy)
2236*4882a593Smuzhiyun bnx2_reset_phy(bp);
2237*4882a593Smuzhiyun
2238*4882a593Smuzhiyun bp->mii_up1 = BCM5708S_UP1;
2239*4882a593Smuzhiyun
2240*4882a593Smuzhiyun bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2241*4882a593Smuzhiyun bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2242*4882a593Smuzhiyun bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2243*4882a593Smuzhiyun
2244*4882a593Smuzhiyun bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2245*4882a593Smuzhiyun val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2246*4882a593Smuzhiyun bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2247*4882a593Smuzhiyun
2248*4882a593Smuzhiyun bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2249*4882a593Smuzhiyun val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2250*4882a593Smuzhiyun bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2251*4882a593Smuzhiyun
2252*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2253*4882a593Smuzhiyun bnx2_read_phy(bp, BCM5708S_UP1, &val);
2254*4882a593Smuzhiyun val |= BCM5708S_UP1_2G5;
2255*4882a593Smuzhiyun bnx2_write_phy(bp, BCM5708S_UP1, val);
2256*4882a593Smuzhiyun }
2257*4882a593Smuzhiyun
2258*4882a593Smuzhiyun if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2259*4882a593Smuzhiyun (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2260*4882a593Smuzhiyun (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2261*4882a593Smuzhiyun /* increase tx signal amplitude */
2262*4882a593Smuzhiyun bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2263*4882a593Smuzhiyun BCM5708S_BLK_ADDR_TX_MISC);
2264*4882a593Smuzhiyun bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2265*4882a593Smuzhiyun val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2266*4882a593Smuzhiyun bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2267*4882a593Smuzhiyun bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2268*4882a593Smuzhiyun }
2269*4882a593Smuzhiyun
2270*4882a593Smuzhiyun val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2271*4882a593Smuzhiyun BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2272*4882a593Smuzhiyun
2273*4882a593Smuzhiyun if (val) {
2274*4882a593Smuzhiyun u32 is_backplane;
2275*4882a593Smuzhiyun
2276*4882a593Smuzhiyun is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2277*4882a593Smuzhiyun if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2278*4882a593Smuzhiyun bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2279*4882a593Smuzhiyun BCM5708S_BLK_ADDR_TX_MISC);
2280*4882a593Smuzhiyun bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2281*4882a593Smuzhiyun bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2282*4882a593Smuzhiyun BCM5708S_BLK_ADDR_DIG);
2283*4882a593Smuzhiyun }
2284*4882a593Smuzhiyun }
2285*4882a593Smuzhiyun return 0;
2286*4882a593Smuzhiyun }
2287*4882a593Smuzhiyun
2288*4882a593Smuzhiyun static int
bnx2_init_5706s_phy(struct bnx2 * bp,int reset_phy)2289*4882a593Smuzhiyun bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2290*4882a593Smuzhiyun {
2291*4882a593Smuzhiyun if (reset_phy)
2292*4882a593Smuzhiyun bnx2_reset_phy(bp);
2293*4882a593Smuzhiyun
2294*4882a593Smuzhiyun bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2295*4882a593Smuzhiyun
2296*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2297*4882a593Smuzhiyun BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2298*4882a593Smuzhiyun
2299*4882a593Smuzhiyun if (bp->dev->mtu > ETH_DATA_LEN) {
2300*4882a593Smuzhiyun u32 val;
2301*4882a593Smuzhiyun
2302*4882a593Smuzhiyun /* Set extended packet length bit */
2303*4882a593Smuzhiyun bnx2_write_phy(bp, 0x18, 0x7);
2304*4882a593Smuzhiyun bnx2_read_phy(bp, 0x18, &val);
2305*4882a593Smuzhiyun bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2306*4882a593Smuzhiyun
2307*4882a593Smuzhiyun bnx2_write_phy(bp, 0x1c, 0x6c00);
2308*4882a593Smuzhiyun bnx2_read_phy(bp, 0x1c, &val);
2309*4882a593Smuzhiyun bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2310*4882a593Smuzhiyun }
2311*4882a593Smuzhiyun else {
2312*4882a593Smuzhiyun u32 val;
2313*4882a593Smuzhiyun
2314*4882a593Smuzhiyun bnx2_write_phy(bp, 0x18, 0x7);
2315*4882a593Smuzhiyun bnx2_read_phy(bp, 0x18, &val);
2316*4882a593Smuzhiyun bnx2_write_phy(bp, 0x18, val & ~0x4007);
2317*4882a593Smuzhiyun
2318*4882a593Smuzhiyun bnx2_write_phy(bp, 0x1c, 0x6c00);
2319*4882a593Smuzhiyun bnx2_read_phy(bp, 0x1c, &val);
2320*4882a593Smuzhiyun bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2321*4882a593Smuzhiyun }
2322*4882a593Smuzhiyun
2323*4882a593Smuzhiyun return 0;
2324*4882a593Smuzhiyun }
2325*4882a593Smuzhiyun
2326*4882a593Smuzhiyun static int
bnx2_init_copper_phy(struct bnx2 * bp,int reset_phy)2327*4882a593Smuzhiyun bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2328*4882a593Smuzhiyun {
2329*4882a593Smuzhiyun u32 val;
2330*4882a593Smuzhiyun
2331*4882a593Smuzhiyun if (reset_phy)
2332*4882a593Smuzhiyun bnx2_reset_phy(bp);
2333*4882a593Smuzhiyun
2334*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2335*4882a593Smuzhiyun bnx2_write_phy(bp, 0x18, 0x0c00);
2336*4882a593Smuzhiyun bnx2_write_phy(bp, 0x17, 0x000a);
2337*4882a593Smuzhiyun bnx2_write_phy(bp, 0x15, 0x310b);
2338*4882a593Smuzhiyun bnx2_write_phy(bp, 0x17, 0x201f);
2339*4882a593Smuzhiyun bnx2_write_phy(bp, 0x15, 0x9506);
2340*4882a593Smuzhiyun bnx2_write_phy(bp, 0x17, 0x401f);
2341*4882a593Smuzhiyun bnx2_write_phy(bp, 0x15, 0x14e2);
2342*4882a593Smuzhiyun bnx2_write_phy(bp, 0x18, 0x0400);
2343*4882a593Smuzhiyun }
2344*4882a593Smuzhiyun
2345*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2346*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2347*4882a593Smuzhiyun MII_BNX2_DSP_EXPAND_REG | 0x8);
2348*4882a593Smuzhiyun bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2349*4882a593Smuzhiyun val &= ~(1 << 8);
2350*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2351*4882a593Smuzhiyun }
2352*4882a593Smuzhiyun
2353*4882a593Smuzhiyun if (bp->dev->mtu > ETH_DATA_LEN) {
2354*4882a593Smuzhiyun /* Set extended packet length bit */
2355*4882a593Smuzhiyun bnx2_write_phy(bp, 0x18, 0x7);
2356*4882a593Smuzhiyun bnx2_read_phy(bp, 0x18, &val);
2357*4882a593Smuzhiyun bnx2_write_phy(bp, 0x18, val | 0x4000);
2358*4882a593Smuzhiyun
2359*4882a593Smuzhiyun bnx2_read_phy(bp, 0x10, &val);
2360*4882a593Smuzhiyun bnx2_write_phy(bp, 0x10, val | 0x1);
2361*4882a593Smuzhiyun }
2362*4882a593Smuzhiyun else {
2363*4882a593Smuzhiyun bnx2_write_phy(bp, 0x18, 0x7);
2364*4882a593Smuzhiyun bnx2_read_phy(bp, 0x18, &val);
2365*4882a593Smuzhiyun bnx2_write_phy(bp, 0x18, val & ~0x4007);
2366*4882a593Smuzhiyun
2367*4882a593Smuzhiyun bnx2_read_phy(bp, 0x10, &val);
2368*4882a593Smuzhiyun bnx2_write_phy(bp, 0x10, val & ~0x1);
2369*4882a593Smuzhiyun }
2370*4882a593Smuzhiyun
2371*4882a593Smuzhiyun /* ethernet@wirespeed */
2372*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2373*4882a593Smuzhiyun bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2374*4882a593Smuzhiyun val |= AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2375*4882a593Smuzhiyun
2376*4882a593Smuzhiyun /* auto-mdix */
2377*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2378*4882a593Smuzhiyun val |= AUX_CTL_MISC_CTL_AUTOMDIX;
2379*4882a593Smuzhiyun
2380*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2381*4882a593Smuzhiyun return 0;
2382*4882a593Smuzhiyun }
2383*4882a593Smuzhiyun
2384*4882a593Smuzhiyun
2385*4882a593Smuzhiyun static int
bnx2_init_phy(struct bnx2 * bp,int reset_phy)2386*4882a593Smuzhiyun bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2387*4882a593Smuzhiyun __releases(&bp->phy_lock)
2388*4882a593Smuzhiyun __acquires(&bp->phy_lock)
2389*4882a593Smuzhiyun {
2390*4882a593Smuzhiyun u32 val;
2391*4882a593Smuzhiyun int rc = 0;
2392*4882a593Smuzhiyun
2393*4882a593Smuzhiyun bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2394*4882a593Smuzhiyun bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2395*4882a593Smuzhiyun
2396*4882a593Smuzhiyun bp->mii_bmcr = MII_BMCR;
2397*4882a593Smuzhiyun bp->mii_bmsr = MII_BMSR;
2398*4882a593Smuzhiyun bp->mii_bmsr1 = MII_BMSR;
2399*4882a593Smuzhiyun bp->mii_adv = MII_ADVERTISE;
2400*4882a593Smuzhiyun bp->mii_lpa = MII_LPA;
2401*4882a593Smuzhiyun
2402*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2403*4882a593Smuzhiyun
2404*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2405*4882a593Smuzhiyun goto setup_phy;
2406*4882a593Smuzhiyun
2407*4882a593Smuzhiyun bnx2_read_phy(bp, MII_PHYSID1, &val);
2408*4882a593Smuzhiyun bp->phy_id = val << 16;
2409*4882a593Smuzhiyun bnx2_read_phy(bp, MII_PHYSID2, &val);
2410*4882a593Smuzhiyun bp->phy_id |= val & 0xffff;
2411*4882a593Smuzhiyun
2412*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2413*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2414*4882a593Smuzhiyun rc = bnx2_init_5706s_phy(bp, reset_phy);
2415*4882a593Smuzhiyun else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2416*4882a593Smuzhiyun rc = bnx2_init_5708s_phy(bp, reset_phy);
2417*4882a593Smuzhiyun else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2418*4882a593Smuzhiyun rc = bnx2_init_5709s_phy(bp, reset_phy);
2419*4882a593Smuzhiyun }
2420*4882a593Smuzhiyun else {
2421*4882a593Smuzhiyun rc = bnx2_init_copper_phy(bp, reset_phy);
2422*4882a593Smuzhiyun }
2423*4882a593Smuzhiyun
2424*4882a593Smuzhiyun setup_phy:
2425*4882a593Smuzhiyun if (!rc)
2426*4882a593Smuzhiyun rc = bnx2_setup_phy(bp, bp->phy_port);
2427*4882a593Smuzhiyun
2428*4882a593Smuzhiyun return rc;
2429*4882a593Smuzhiyun }
2430*4882a593Smuzhiyun
2431*4882a593Smuzhiyun static int
bnx2_set_mac_loopback(struct bnx2 * bp)2432*4882a593Smuzhiyun bnx2_set_mac_loopback(struct bnx2 *bp)
2433*4882a593Smuzhiyun {
2434*4882a593Smuzhiyun u32 mac_mode;
2435*4882a593Smuzhiyun
2436*4882a593Smuzhiyun mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2437*4882a593Smuzhiyun mac_mode &= ~BNX2_EMAC_MODE_PORT;
2438*4882a593Smuzhiyun mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2439*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2440*4882a593Smuzhiyun bp->link_up = 1;
2441*4882a593Smuzhiyun return 0;
2442*4882a593Smuzhiyun }
2443*4882a593Smuzhiyun
2444*4882a593Smuzhiyun static int bnx2_test_link(struct bnx2 *);
2445*4882a593Smuzhiyun
2446*4882a593Smuzhiyun static int
bnx2_set_phy_loopback(struct bnx2 * bp)2447*4882a593Smuzhiyun bnx2_set_phy_loopback(struct bnx2 *bp)
2448*4882a593Smuzhiyun {
2449*4882a593Smuzhiyun u32 mac_mode;
2450*4882a593Smuzhiyun int rc, i;
2451*4882a593Smuzhiyun
2452*4882a593Smuzhiyun spin_lock_bh(&bp->phy_lock);
2453*4882a593Smuzhiyun rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2454*4882a593Smuzhiyun BMCR_SPEED1000);
2455*4882a593Smuzhiyun spin_unlock_bh(&bp->phy_lock);
2456*4882a593Smuzhiyun if (rc)
2457*4882a593Smuzhiyun return rc;
2458*4882a593Smuzhiyun
2459*4882a593Smuzhiyun for (i = 0; i < 10; i++) {
2460*4882a593Smuzhiyun if (bnx2_test_link(bp) == 0)
2461*4882a593Smuzhiyun break;
2462*4882a593Smuzhiyun msleep(100);
2463*4882a593Smuzhiyun }
2464*4882a593Smuzhiyun
2465*4882a593Smuzhiyun mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2466*4882a593Smuzhiyun mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2467*4882a593Smuzhiyun BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2468*4882a593Smuzhiyun BNX2_EMAC_MODE_25G_MODE);
2469*4882a593Smuzhiyun
2470*4882a593Smuzhiyun mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2471*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2472*4882a593Smuzhiyun bp->link_up = 1;
2473*4882a593Smuzhiyun return 0;
2474*4882a593Smuzhiyun }
2475*4882a593Smuzhiyun
2476*4882a593Smuzhiyun static void
bnx2_dump_mcp_state(struct bnx2 * bp)2477*4882a593Smuzhiyun bnx2_dump_mcp_state(struct bnx2 *bp)
2478*4882a593Smuzhiyun {
2479*4882a593Smuzhiyun struct net_device *dev = bp->dev;
2480*4882a593Smuzhiyun u32 mcp_p0, mcp_p1;
2481*4882a593Smuzhiyun
2482*4882a593Smuzhiyun netdev_err(dev, "<--- start MCP states dump --->\n");
2483*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2484*4882a593Smuzhiyun mcp_p0 = BNX2_MCP_STATE_P0;
2485*4882a593Smuzhiyun mcp_p1 = BNX2_MCP_STATE_P1;
2486*4882a593Smuzhiyun } else {
2487*4882a593Smuzhiyun mcp_p0 = BNX2_MCP_STATE_P0_5708;
2488*4882a593Smuzhiyun mcp_p1 = BNX2_MCP_STATE_P1_5708;
2489*4882a593Smuzhiyun }
2490*4882a593Smuzhiyun netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2491*4882a593Smuzhiyun bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2492*4882a593Smuzhiyun netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2493*4882a593Smuzhiyun bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2494*4882a593Smuzhiyun bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2495*4882a593Smuzhiyun bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2496*4882a593Smuzhiyun netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2497*4882a593Smuzhiyun bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2498*4882a593Smuzhiyun bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2499*4882a593Smuzhiyun bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2500*4882a593Smuzhiyun netdev_err(dev, "DEBUG: shmem states:\n");
2501*4882a593Smuzhiyun netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2502*4882a593Smuzhiyun bnx2_shmem_rd(bp, BNX2_DRV_MB),
2503*4882a593Smuzhiyun bnx2_shmem_rd(bp, BNX2_FW_MB),
2504*4882a593Smuzhiyun bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2505*4882a593Smuzhiyun pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2506*4882a593Smuzhiyun netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2507*4882a593Smuzhiyun bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2508*4882a593Smuzhiyun bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2509*4882a593Smuzhiyun pr_cont(" condition[%08x]\n",
2510*4882a593Smuzhiyun bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2511*4882a593Smuzhiyun DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2512*4882a593Smuzhiyun DP_SHMEM_LINE(bp, 0x3cc);
2513*4882a593Smuzhiyun DP_SHMEM_LINE(bp, 0x3dc);
2514*4882a593Smuzhiyun DP_SHMEM_LINE(bp, 0x3ec);
2515*4882a593Smuzhiyun netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2516*4882a593Smuzhiyun netdev_err(dev, "<--- end MCP states dump --->\n");
2517*4882a593Smuzhiyun }
2518*4882a593Smuzhiyun
2519*4882a593Smuzhiyun static int
bnx2_fw_sync(struct bnx2 * bp,u32 msg_data,int ack,int silent)2520*4882a593Smuzhiyun bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2521*4882a593Smuzhiyun {
2522*4882a593Smuzhiyun int i;
2523*4882a593Smuzhiyun u32 val;
2524*4882a593Smuzhiyun
2525*4882a593Smuzhiyun bp->fw_wr_seq++;
2526*4882a593Smuzhiyun msg_data |= bp->fw_wr_seq;
2527*4882a593Smuzhiyun bp->fw_last_msg = msg_data;
2528*4882a593Smuzhiyun
2529*4882a593Smuzhiyun bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2530*4882a593Smuzhiyun
2531*4882a593Smuzhiyun if (!ack)
2532*4882a593Smuzhiyun return 0;
2533*4882a593Smuzhiyun
2534*4882a593Smuzhiyun /* wait for an acknowledgement. */
2535*4882a593Smuzhiyun for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2536*4882a593Smuzhiyun msleep(10);
2537*4882a593Smuzhiyun
2538*4882a593Smuzhiyun val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2539*4882a593Smuzhiyun
2540*4882a593Smuzhiyun if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2541*4882a593Smuzhiyun break;
2542*4882a593Smuzhiyun }
2543*4882a593Smuzhiyun if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2544*4882a593Smuzhiyun return 0;
2545*4882a593Smuzhiyun
2546*4882a593Smuzhiyun /* If we timed out, inform the firmware that this is the case. */
2547*4882a593Smuzhiyun if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2548*4882a593Smuzhiyun msg_data &= ~BNX2_DRV_MSG_CODE;
2549*4882a593Smuzhiyun msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2550*4882a593Smuzhiyun
2551*4882a593Smuzhiyun bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2552*4882a593Smuzhiyun if (!silent) {
2553*4882a593Smuzhiyun pr_err("fw sync timeout, reset code = %x\n", msg_data);
2554*4882a593Smuzhiyun bnx2_dump_mcp_state(bp);
2555*4882a593Smuzhiyun }
2556*4882a593Smuzhiyun
2557*4882a593Smuzhiyun return -EBUSY;
2558*4882a593Smuzhiyun }
2559*4882a593Smuzhiyun
2560*4882a593Smuzhiyun if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2561*4882a593Smuzhiyun return -EIO;
2562*4882a593Smuzhiyun
2563*4882a593Smuzhiyun return 0;
2564*4882a593Smuzhiyun }
2565*4882a593Smuzhiyun
2566*4882a593Smuzhiyun static int
bnx2_init_5709_context(struct bnx2 * bp)2567*4882a593Smuzhiyun bnx2_init_5709_context(struct bnx2 *bp)
2568*4882a593Smuzhiyun {
2569*4882a593Smuzhiyun int i, ret = 0;
2570*4882a593Smuzhiyun u32 val;
2571*4882a593Smuzhiyun
2572*4882a593Smuzhiyun val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2573*4882a593Smuzhiyun val |= (BNX2_PAGE_BITS - 8) << 16;
2574*4882a593Smuzhiyun BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2575*4882a593Smuzhiyun for (i = 0; i < 10; i++) {
2576*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2577*4882a593Smuzhiyun if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2578*4882a593Smuzhiyun break;
2579*4882a593Smuzhiyun udelay(2);
2580*4882a593Smuzhiyun }
2581*4882a593Smuzhiyun if (val & BNX2_CTX_COMMAND_MEM_INIT)
2582*4882a593Smuzhiyun return -EBUSY;
2583*4882a593Smuzhiyun
2584*4882a593Smuzhiyun for (i = 0; i < bp->ctx_pages; i++) {
2585*4882a593Smuzhiyun int j;
2586*4882a593Smuzhiyun
2587*4882a593Smuzhiyun if (bp->ctx_blk[i])
2588*4882a593Smuzhiyun memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2589*4882a593Smuzhiyun else
2590*4882a593Smuzhiyun return -ENOMEM;
2591*4882a593Smuzhiyun
2592*4882a593Smuzhiyun BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2593*4882a593Smuzhiyun (bp->ctx_blk_mapping[i] & 0xffffffff) |
2594*4882a593Smuzhiyun BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2595*4882a593Smuzhiyun BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2596*4882a593Smuzhiyun (u64) bp->ctx_blk_mapping[i] >> 32);
2597*4882a593Smuzhiyun BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2598*4882a593Smuzhiyun BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2599*4882a593Smuzhiyun for (j = 0; j < 10; j++) {
2600*4882a593Smuzhiyun
2601*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2602*4882a593Smuzhiyun if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2603*4882a593Smuzhiyun break;
2604*4882a593Smuzhiyun udelay(5);
2605*4882a593Smuzhiyun }
2606*4882a593Smuzhiyun if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2607*4882a593Smuzhiyun ret = -EBUSY;
2608*4882a593Smuzhiyun break;
2609*4882a593Smuzhiyun }
2610*4882a593Smuzhiyun }
2611*4882a593Smuzhiyun return ret;
2612*4882a593Smuzhiyun }
2613*4882a593Smuzhiyun
2614*4882a593Smuzhiyun static void
bnx2_init_context(struct bnx2 * bp)2615*4882a593Smuzhiyun bnx2_init_context(struct bnx2 *bp)
2616*4882a593Smuzhiyun {
2617*4882a593Smuzhiyun u32 vcid;
2618*4882a593Smuzhiyun
2619*4882a593Smuzhiyun vcid = 96;
2620*4882a593Smuzhiyun while (vcid) {
2621*4882a593Smuzhiyun u32 vcid_addr, pcid_addr, offset;
2622*4882a593Smuzhiyun int i;
2623*4882a593Smuzhiyun
2624*4882a593Smuzhiyun vcid--;
2625*4882a593Smuzhiyun
2626*4882a593Smuzhiyun if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2627*4882a593Smuzhiyun u32 new_vcid;
2628*4882a593Smuzhiyun
2629*4882a593Smuzhiyun vcid_addr = GET_PCID_ADDR(vcid);
2630*4882a593Smuzhiyun if (vcid & 0x8) {
2631*4882a593Smuzhiyun new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2632*4882a593Smuzhiyun }
2633*4882a593Smuzhiyun else {
2634*4882a593Smuzhiyun new_vcid = vcid;
2635*4882a593Smuzhiyun }
2636*4882a593Smuzhiyun pcid_addr = GET_PCID_ADDR(new_vcid);
2637*4882a593Smuzhiyun }
2638*4882a593Smuzhiyun else {
2639*4882a593Smuzhiyun vcid_addr = GET_CID_ADDR(vcid);
2640*4882a593Smuzhiyun pcid_addr = vcid_addr;
2641*4882a593Smuzhiyun }
2642*4882a593Smuzhiyun
2643*4882a593Smuzhiyun for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2644*4882a593Smuzhiyun vcid_addr += (i << PHY_CTX_SHIFT);
2645*4882a593Smuzhiyun pcid_addr += (i << PHY_CTX_SHIFT);
2646*4882a593Smuzhiyun
2647*4882a593Smuzhiyun BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2648*4882a593Smuzhiyun BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2649*4882a593Smuzhiyun
2650*4882a593Smuzhiyun /* Zero out the context. */
2651*4882a593Smuzhiyun for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2652*4882a593Smuzhiyun bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2653*4882a593Smuzhiyun }
2654*4882a593Smuzhiyun }
2655*4882a593Smuzhiyun }
2656*4882a593Smuzhiyun
2657*4882a593Smuzhiyun static int
bnx2_alloc_bad_rbuf(struct bnx2 * bp)2658*4882a593Smuzhiyun bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2659*4882a593Smuzhiyun {
2660*4882a593Smuzhiyun u16 *good_mbuf;
2661*4882a593Smuzhiyun u32 good_mbuf_cnt;
2662*4882a593Smuzhiyun u32 val;
2663*4882a593Smuzhiyun
2664*4882a593Smuzhiyun good_mbuf = kmalloc_array(512, sizeof(u16), GFP_KERNEL);
2665*4882a593Smuzhiyun if (!good_mbuf)
2666*4882a593Smuzhiyun return -ENOMEM;
2667*4882a593Smuzhiyun
2668*4882a593Smuzhiyun BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2669*4882a593Smuzhiyun BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2670*4882a593Smuzhiyun
2671*4882a593Smuzhiyun good_mbuf_cnt = 0;
2672*4882a593Smuzhiyun
2673*4882a593Smuzhiyun /* Allocate a bunch of mbufs and save the good ones in an array. */
2674*4882a593Smuzhiyun val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2675*4882a593Smuzhiyun while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2676*4882a593Smuzhiyun bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2677*4882a593Smuzhiyun BNX2_RBUF_COMMAND_ALLOC_REQ);
2678*4882a593Smuzhiyun
2679*4882a593Smuzhiyun val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2680*4882a593Smuzhiyun
2681*4882a593Smuzhiyun val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2682*4882a593Smuzhiyun
2683*4882a593Smuzhiyun /* The addresses with Bit 9 set are bad memory blocks. */
2684*4882a593Smuzhiyun if (!(val & (1 << 9))) {
2685*4882a593Smuzhiyun good_mbuf[good_mbuf_cnt] = (u16) val;
2686*4882a593Smuzhiyun good_mbuf_cnt++;
2687*4882a593Smuzhiyun }
2688*4882a593Smuzhiyun
2689*4882a593Smuzhiyun val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2690*4882a593Smuzhiyun }
2691*4882a593Smuzhiyun
2692*4882a593Smuzhiyun /* Free the good ones back to the mbuf pool thus discarding
2693*4882a593Smuzhiyun * all the bad ones. */
2694*4882a593Smuzhiyun while (good_mbuf_cnt) {
2695*4882a593Smuzhiyun good_mbuf_cnt--;
2696*4882a593Smuzhiyun
2697*4882a593Smuzhiyun val = good_mbuf[good_mbuf_cnt];
2698*4882a593Smuzhiyun val = (val << 9) | val | 1;
2699*4882a593Smuzhiyun
2700*4882a593Smuzhiyun bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2701*4882a593Smuzhiyun }
2702*4882a593Smuzhiyun kfree(good_mbuf);
2703*4882a593Smuzhiyun return 0;
2704*4882a593Smuzhiyun }
2705*4882a593Smuzhiyun
2706*4882a593Smuzhiyun static void
bnx2_set_mac_addr(struct bnx2 * bp,u8 * mac_addr,u32 pos)2707*4882a593Smuzhiyun bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2708*4882a593Smuzhiyun {
2709*4882a593Smuzhiyun u32 val;
2710*4882a593Smuzhiyun
2711*4882a593Smuzhiyun val = (mac_addr[0] << 8) | mac_addr[1];
2712*4882a593Smuzhiyun
2713*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2714*4882a593Smuzhiyun
2715*4882a593Smuzhiyun val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2716*4882a593Smuzhiyun (mac_addr[4] << 8) | mac_addr[5];
2717*4882a593Smuzhiyun
2718*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2719*4882a593Smuzhiyun }
2720*4882a593Smuzhiyun
2721*4882a593Smuzhiyun static inline int
bnx2_alloc_rx_page(struct bnx2 * bp,struct bnx2_rx_ring_info * rxr,u16 index,gfp_t gfp)2722*4882a593Smuzhiyun bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2723*4882a593Smuzhiyun {
2724*4882a593Smuzhiyun dma_addr_t mapping;
2725*4882a593Smuzhiyun struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2726*4882a593Smuzhiyun struct bnx2_rx_bd *rxbd =
2727*4882a593Smuzhiyun &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2728*4882a593Smuzhiyun struct page *page = alloc_page(gfp);
2729*4882a593Smuzhiyun
2730*4882a593Smuzhiyun if (!page)
2731*4882a593Smuzhiyun return -ENOMEM;
2732*4882a593Smuzhiyun mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2733*4882a593Smuzhiyun PCI_DMA_FROMDEVICE);
2734*4882a593Smuzhiyun if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2735*4882a593Smuzhiyun __free_page(page);
2736*4882a593Smuzhiyun return -EIO;
2737*4882a593Smuzhiyun }
2738*4882a593Smuzhiyun
2739*4882a593Smuzhiyun rx_pg->page = page;
2740*4882a593Smuzhiyun dma_unmap_addr_set(rx_pg, mapping, mapping);
2741*4882a593Smuzhiyun rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2742*4882a593Smuzhiyun rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2743*4882a593Smuzhiyun return 0;
2744*4882a593Smuzhiyun }
2745*4882a593Smuzhiyun
2746*4882a593Smuzhiyun static void
bnx2_free_rx_page(struct bnx2 * bp,struct bnx2_rx_ring_info * rxr,u16 index)2747*4882a593Smuzhiyun bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2748*4882a593Smuzhiyun {
2749*4882a593Smuzhiyun struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2750*4882a593Smuzhiyun struct page *page = rx_pg->page;
2751*4882a593Smuzhiyun
2752*4882a593Smuzhiyun if (!page)
2753*4882a593Smuzhiyun return;
2754*4882a593Smuzhiyun
2755*4882a593Smuzhiyun dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2756*4882a593Smuzhiyun PAGE_SIZE, PCI_DMA_FROMDEVICE);
2757*4882a593Smuzhiyun
2758*4882a593Smuzhiyun __free_page(page);
2759*4882a593Smuzhiyun rx_pg->page = NULL;
2760*4882a593Smuzhiyun }
2761*4882a593Smuzhiyun
2762*4882a593Smuzhiyun static inline int
bnx2_alloc_rx_data(struct bnx2 * bp,struct bnx2_rx_ring_info * rxr,u16 index,gfp_t gfp)2763*4882a593Smuzhiyun bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2764*4882a593Smuzhiyun {
2765*4882a593Smuzhiyun u8 *data;
2766*4882a593Smuzhiyun struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2767*4882a593Smuzhiyun dma_addr_t mapping;
2768*4882a593Smuzhiyun struct bnx2_rx_bd *rxbd =
2769*4882a593Smuzhiyun &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2770*4882a593Smuzhiyun
2771*4882a593Smuzhiyun data = kmalloc(bp->rx_buf_size, gfp);
2772*4882a593Smuzhiyun if (!data)
2773*4882a593Smuzhiyun return -ENOMEM;
2774*4882a593Smuzhiyun
2775*4882a593Smuzhiyun mapping = dma_map_single(&bp->pdev->dev,
2776*4882a593Smuzhiyun get_l2_fhdr(data),
2777*4882a593Smuzhiyun bp->rx_buf_use_size,
2778*4882a593Smuzhiyun PCI_DMA_FROMDEVICE);
2779*4882a593Smuzhiyun if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2780*4882a593Smuzhiyun kfree(data);
2781*4882a593Smuzhiyun return -EIO;
2782*4882a593Smuzhiyun }
2783*4882a593Smuzhiyun
2784*4882a593Smuzhiyun rx_buf->data = data;
2785*4882a593Smuzhiyun dma_unmap_addr_set(rx_buf, mapping, mapping);
2786*4882a593Smuzhiyun
2787*4882a593Smuzhiyun rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2788*4882a593Smuzhiyun rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2789*4882a593Smuzhiyun
2790*4882a593Smuzhiyun rxr->rx_prod_bseq += bp->rx_buf_use_size;
2791*4882a593Smuzhiyun
2792*4882a593Smuzhiyun return 0;
2793*4882a593Smuzhiyun }
2794*4882a593Smuzhiyun
2795*4882a593Smuzhiyun static int
bnx2_phy_event_is_set(struct bnx2 * bp,struct bnx2_napi * bnapi,u32 event)2796*4882a593Smuzhiyun bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2797*4882a593Smuzhiyun {
2798*4882a593Smuzhiyun struct status_block *sblk = bnapi->status_blk.msi;
2799*4882a593Smuzhiyun u32 new_link_state, old_link_state;
2800*4882a593Smuzhiyun int is_set = 1;
2801*4882a593Smuzhiyun
2802*4882a593Smuzhiyun new_link_state = sblk->status_attn_bits & event;
2803*4882a593Smuzhiyun old_link_state = sblk->status_attn_bits_ack & event;
2804*4882a593Smuzhiyun if (new_link_state != old_link_state) {
2805*4882a593Smuzhiyun if (new_link_state)
2806*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2807*4882a593Smuzhiyun else
2808*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2809*4882a593Smuzhiyun } else
2810*4882a593Smuzhiyun is_set = 0;
2811*4882a593Smuzhiyun
2812*4882a593Smuzhiyun return is_set;
2813*4882a593Smuzhiyun }
2814*4882a593Smuzhiyun
2815*4882a593Smuzhiyun static void
bnx2_phy_int(struct bnx2 * bp,struct bnx2_napi * bnapi)2816*4882a593Smuzhiyun bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2817*4882a593Smuzhiyun {
2818*4882a593Smuzhiyun spin_lock(&bp->phy_lock);
2819*4882a593Smuzhiyun
2820*4882a593Smuzhiyun if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2821*4882a593Smuzhiyun bnx2_set_link(bp);
2822*4882a593Smuzhiyun if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2823*4882a593Smuzhiyun bnx2_set_remote_link(bp);
2824*4882a593Smuzhiyun
2825*4882a593Smuzhiyun spin_unlock(&bp->phy_lock);
2826*4882a593Smuzhiyun
2827*4882a593Smuzhiyun }
2828*4882a593Smuzhiyun
2829*4882a593Smuzhiyun static inline u16
bnx2_get_hw_tx_cons(struct bnx2_napi * bnapi)2830*4882a593Smuzhiyun bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2831*4882a593Smuzhiyun {
2832*4882a593Smuzhiyun u16 cons;
2833*4882a593Smuzhiyun
2834*4882a593Smuzhiyun cons = READ_ONCE(*bnapi->hw_tx_cons_ptr);
2835*4882a593Smuzhiyun
2836*4882a593Smuzhiyun if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2837*4882a593Smuzhiyun cons++;
2838*4882a593Smuzhiyun return cons;
2839*4882a593Smuzhiyun }
2840*4882a593Smuzhiyun
2841*4882a593Smuzhiyun static int
bnx2_tx_int(struct bnx2 * bp,struct bnx2_napi * bnapi,int budget)2842*4882a593Smuzhiyun bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2843*4882a593Smuzhiyun {
2844*4882a593Smuzhiyun struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2845*4882a593Smuzhiyun u16 hw_cons, sw_cons, sw_ring_cons;
2846*4882a593Smuzhiyun int tx_pkt = 0, index;
2847*4882a593Smuzhiyun unsigned int tx_bytes = 0;
2848*4882a593Smuzhiyun struct netdev_queue *txq;
2849*4882a593Smuzhiyun
2850*4882a593Smuzhiyun index = (bnapi - bp->bnx2_napi);
2851*4882a593Smuzhiyun txq = netdev_get_tx_queue(bp->dev, index);
2852*4882a593Smuzhiyun
2853*4882a593Smuzhiyun hw_cons = bnx2_get_hw_tx_cons(bnapi);
2854*4882a593Smuzhiyun sw_cons = txr->tx_cons;
2855*4882a593Smuzhiyun
2856*4882a593Smuzhiyun while (sw_cons != hw_cons) {
2857*4882a593Smuzhiyun struct bnx2_sw_tx_bd *tx_buf;
2858*4882a593Smuzhiyun struct sk_buff *skb;
2859*4882a593Smuzhiyun int i, last;
2860*4882a593Smuzhiyun
2861*4882a593Smuzhiyun sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2862*4882a593Smuzhiyun
2863*4882a593Smuzhiyun tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2864*4882a593Smuzhiyun skb = tx_buf->skb;
2865*4882a593Smuzhiyun
2866*4882a593Smuzhiyun /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2867*4882a593Smuzhiyun prefetch(&skb->end);
2868*4882a593Smuzhiyun
2869*4882a593Smuzhiyun /* partial BD completions possible with TSO packets */
2870*4882a593Smuzhiyun if (tx_buf->is_gso) {
2871*4882a593Smuzhiyun u16 last_idx, last_ring_idx;
2872*4882a593Smuzhiyun
2873*4882a593Smuzhiyun last_idx = sw_cons + tx_buf->nr_frags + 1;
2874*4882a593Smuzhiyun last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2875*4882a593Smuzhiyun if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2876*4882a593Smuzhiyun last_idx++;
2877*4882a593Smuzhiyun }
2878*4882a593Smuzhiyun if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2879*4882a593Smuzhiyun break;
2880*4882a593Smuzhiyun }
2881*4882a593Smuzhiyun }
2882*4882a593Smuzhiyun
2883*4882a593Smuzhiyun dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2884*4882a593Smuzhiyun skb_headlen(skb), PCI_DMA_TODEVICE);
2885*4882a593Smuzhiyun
2886*4882a593Smuzhiyun tx_buf->skb = NULL;
2887*4882a593Smuzhiyun last = tx_buf->nr_frags;
2888*4882a593Smuzhiyun
2889*4882a593Smuzhiyun for (i = 0; i < last; i++) {
2890*4882a593Smuzhiyun struct bnx2_sw_tx_bd *tx_buf;
2891*4882a593Smuzhiyun
2892*4882a593Smuzhiyun sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2893*4882a593Smuzhiyun
2894*4882a593Smuzhiyun tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2895*4882a593Smuzhiyun dma_unmap_page(&bp->pdev->dev,
2896*4882a593Smuzhiyun dma_unmap_addr(tx_buf, mapping),
2897*4882a593Smuzhiyun skb_frag_size(&skb_shinfo(skb)->frags[i]),
2898*4882a593Smuzhiyun PCI_DMA_TODEVICE);
2899*4882a593Smuzhiyun }
2900*4882a593Smuzhiyun
2901*4882a593Smuzhiyun sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2902*4882a593Smuzhiyun
2903*4882a593Smuzhiyun tx_bytes += skb->len;
2904*4882a593Smuzhiyun dev_kfree_skb_any(skb);
2905*4882a593Smuzhiyun tx_pkt++;
2906*4882a593Smuzhiyun if (tx_pkt == budget)
2907*4882a593Smuzhiyun break;
2908*4882a593Smuzhiyun
2909*4882a593Smuzhiyun if (hw_cons == sw_cons)
2910*4882a593Smuzhiyun hw_cons = bnx2_get_hw_tx_cons(bnapi);
2911*4882a593Smuzhiyun }
2912*4882a593Smuzhiyun
2913*4882a593Smuzhiyun netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2914*4882a593Smuzhiyun txr->hw_tx_cons = hw_cons;
2915*4882a593Smuzhiyun txr->tx_cons = sw_cons;
2916*4882a593Smuzhiyun
2917*4882a593Smuzhiyun /* Need to make the tx_cons update visible to bnx2_start_xmit()
2918*4882a593Smuzhiyun * before checking for netif_tx_queue_stopped(). Without the
2919*4882a593Smuzhiyun * memory barrier, there is a small possibility that bnx2_start_xmit()
2920*4882a593Smuzhiyun * will miss it and cause the queue to be stopped forever.
2921*4882a593Smuzhiyun */
2922*4882a593Smuzhiyun smp_mb();
2923*4882a593Smuzhiyun
2924*4882a593Smuzhiyun if (unlikely(netif_tx_queue_stopped(txq)) &&
2925*4882a593Smuzhiyun (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2926*4882a593Smuzhiyun __netif_tx_lock(txq, smp_processor_id());
2927*4882a593Smuzhiyun if ((netif_tx_queue_stopped(txq)) &&
2928*4882a593Smuzhiyun (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2929*4882a593Smuzhiyun netif_tx_wake_queue(txq);
2930*4882a593Smuzhiyun __netif_tx_unlock(txq);
2931*4882a593Smuzhiyun }
2932*4882a593Smuzhiyun
2933*4882a593Smuzhiyun return tx_pkt;
2934*4882a593Smuzhiyun }
2935*4882a593Smuzhiyun
2936*4882a593Smuzhiyun static void
bnx2_reuse_rx_skb_pages(struct bnx2 * bp,struct bnx2_rx_ring_info * rxr,struct sk_buff * skb,int count)2937*4882a593Smuzhiyun bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2938*4882a593Smuzhiyun struct sk_buff *skb, int count)
2939*4882a593Smuzhiyun {
2940*4882a593Smuzhiyun struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2941*4882a593Smuzhiyun struct bnx2_rx_bd *cons_bd, *prod_bd;
2942*4882a593Smuzhiyun int i;
2943*4882a593Smuzhiyun u16 hw_prod, prod;
2944*4882a593Smuzhiyun u16 cons = rxr->rx_pg_cons;
2945*4882a593Smuzhiyun
2946*4882a593Smuzhiyun cons_rx_pg = &rxr->rx_pg_ring[cons];
2947*4882a593Smuzhiyun
2948*4882a593Smuzhiyun /* The caller was unable to allocate a new page to replace the
2949*4882a593Smuzhiyun * last one in the frags array, so we need to recycle that page
2950*4882a593Smuzhiyun * and then free the skb.
2951*4882a593Smuzhiyun */
2952*4882a593Smuzhiyun if (skb) {
2953*4882a593Smuzhiyun struct page *page;
2954*4882a593Smuzhiyun struct skb_shared_info *shinfo;
2955*4882a593Smuzhiyun
2956*4882a593Smuzhiyun shinfo = skb_shinfo(skb);
2957*4882a593Smuzhiyun shinfo->nr_frags--;
2958*4882a593Smuzhiyun page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2959*4882a593Smuzhiyun __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2960*4882a593Smuzhiyun
2961*4882a593Smuzhiyun cons_rx_pg->page = page;
2962*4882a593Smuzhiyun dev_kfree_skb(skb);
2963*4882a593Smuzhiyun }
2964*4882a593Smuzhiyun
2965*4882a593Smuzhiyun hw_prod = rxr->rx_pg_prod;
2966*4882a593Smuzhiyun
2967*4882a593Smuzhiyun for (i = 0; i < count; i++) {
2968*4882a593Smuzhiyun prod = BNX2_RX_PG_RING_IDX(hw_prod);
2969*4882a593Smuzhiyun
2970*4882a593Smuzhiyun prod_rx_pg = &rxr->rx_pg_ring[prod];
2971*4882a593Smuzhiyun cons_rx_pg = &rxr->rx_pg_ring[cons];
2972*4882a593Smuzhiyun cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2973*4882a593Smuzhiyun [BNX2_RX_IDX(cons)];
2974*4882a593Smuzhiyun prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2975*4882a593Smuzhiyun [BNX2_RX_IDX(prod)];
2976*4882a593Smuzhiyun
2977*4882a593Smuzhiyun if (prod != cons) {
2978*4882a593Smuzhiyun prod_rx_pg->page = cons_rx_pg->page;
2979*4882a593Smuzhiyun cons_rx_pg->page = NULL;
2980*4882a593Smuzhiyun dma_unmap_addr_set(prod_rx_pg, mapping,
2981*4882a593Smuzhiyun dma_unmap_addr(cons_rx_pg, mapping));
2982*4882a593Smuzhiyun
2983*4882a593Smuzhiyun prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2984*4882a593Smuzhiyun prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2985*4882a593Smuzhiyun
2986*4882a593Smuzhiyun }
2987*4882a593Smuzhiyun cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2988*4882a593Smuzhiyun hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2989*4882a593Smuzhiyun }
2990*4882a593Smuzhiyun rxr->rx_pg_prod = hw_prod;
2991*4882a593Smuzhiyun rxr->rx_pg_cons = cons;
2992*4882a593Smuzhiyun }
2993*4882a593Smuzhiyun
2994*4882a593Smuzhiyun static inline void
bnx2_reuse_rx_data(struct bnx2 * bp,struct bnx2_rx_ring_info * rxr,u8 * data,u16 cons,u16 prod)2995*4882a593Smuzhiyun bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2996*4882a593Smuzhiyun u8 *data, u16 cons, u16 prod)
2997*4882a593Smuzhiyun {
2998*4882a593Smuzhiyun struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2999*4882a593Smuzhiyun struct bnx2_rx_bd *cons_bd, *prod_bd;
3000*4882a593Smuzhiyun
3001*4882a593Smuzhiyun cons_rx_buf = &rxr->rx_buf_ring[cons];
3002*4882a593Smuzhiyun prod_rx_buf = &rxr->rx_buf_ring[prod];
3003*4882a593Smuzhiyun
3004*4882a593Smuzhiyun dma_sync_single_for_device(&bp->pdev->dev,
3005*4882a593Smuzhiyun dma_unmap_addr(cons_rx_buf, mapping),
3006*4882a593Smuzhiyun BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
3007*4882a593Smuzhiyun
3008*4882a593Smuzhiyun rxr->rx_prod_bseq += bp->rx_buf_use_size;
3009*4882a593Smuzhiyun
3010*4882a593Smuzhiyun prod_rx_buf->data = data;
3011*4882a593Smuzhiyun
3012*4882a593Smuzhiyun if (cons == prod)
3013*4882a593Smuzhiyun return;
3014*4882a593Smuzhiyun
3015*4882a593Smuzhiyun dma_unmap_addr_set(prod_rx_buf, mapping,
3016*4882a593Smuzhiyun dma_unmap_addr(cons_rx_buf, mapping));
3017*4882a593Smuzhiyun
3018*4882a593Smuzhiyun cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3019*4882a593Smuzhiyun prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3020*4882a593Smuzhiyun prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3021*4882a593Smuzhiyun prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3022*4882a593Smuzhiyun }
3023*4882a593Smuzhiyun
3024*4882a593Smuzhiyun static struct sk_buff *
bnx2_rx_skb(struct bnx2 * bp,struct bnx2_rx_ring_info * rxr,u8 * data,unsigned int len,unsigned int hdr_len,dma_addr_t dma_addr,u32 ring_idx)3025*4882a593Smuzhiyun bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3026*4882a593Smuzhiyun unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3027*4882a593Smuzhiyun u32 ring_idx)
3028*4882a593Smuzhiyun {
3029*4882a593Smuzhiyun int err;
3030*4882a593Smuzhiyun u16 prod = ring_idx & 0xffff;
3031*4882a593Smuzhiyun struct sk_buff *skb;
3032*4882a593Smuzhiyun
3033*4882a593Smuzhiyun err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3034*4882a593Smuzhiyun if (unlikely(err)) {
3035*4882a593Smuzhiyun bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3036*4882a593Smuzhiyun error:
3037*4882a593Smuzhiyun if (hdr_len) {
3038*4882a593Smuzhiyun unsigned int raw_len = len + 4;
3039*4882a593Smuzhiyun int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3040*4882a593Smuzhiyun
3041*4882a593Smuzhiyun bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3042*4882a593Smuzhiyun }
3043*4882a593Smuzhiyun return NULL;
3044*4882a593Smuzhiyun }
3045*4882a593Smuzhiyun
3046*4882a593Smuzhiyun dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3047*4882a593Smuzhiyun PCI_DMA_FROMDEVICE);
3048*4882a593Smuzhiyun skb = build_skb(data, 0);
3049*4882a593Smuzhiyun if (!skb) {
3050*4882a593Smuzhiyun kfree(data);
3051*4882a593Smuzhiyun goto error;
3052*4882a593Smuzhiyun }
3053*4882a593Smuzhiyun skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3054*4882a593Smuzhiyun if (hdr_len == 0) {
3055*4882a593Smuzhiyun skb_put(skb, len);
3056*4882a593Smuzhiyun return skb;
3057*4882a593Smuzhiyun } else {
3058*4882a593Smuzhiyun unsigned int i, frag_len, frag_size, pages;
3059*4882a593Smuzhiyun struct bnx2_sw_pg *rx_pg;
3060*4882a593Smuzhiyun u16 pg_cons = rxr->rx_pg_cons;
3061*4882a593Smuzhiyun u16 pg_prod = rxr->rx_pg_prod;
3062*4882a593Smuzhiyun
3063*4882a593Smuzhiyun frag_size = len + 4 - hdr_len;
3064*4882a593Smuzhiyun pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3065*4882a593Smuzhiyun skb_put(skb, hdr_len);
3066*4882a593Smuzhiyun
3067*4882a593Smuzhiyun for (i = 0; i < pages; i++) {
3068*4882a593Smuzhiyun dma_addr_t mapping_old;
3069*4882a593Smuzhiyun
3070*4882a593Smuzhiyun frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3071*4882a593Smuzhiyun if (unlikely(frag_len <= 4)) {
3072*4882a593Smuzhiyun unsigned int tail = 4 - frag_len;
3073*4882a593Smuzhiyun
3074*4882a593Smuzhiyun rxr->rx_pg_cons = pg_cons;
3075*4882a593Smuzhiyun rxr->rx_pg_prod = pg_prod;
3076*4882a593Smuzhiyun bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3077*4882a593Smuzhiyun pages - i);
3078*4882a593Smuzhiyun skb->len -= tail;
3079*4882a593Smuzhiyun if (i == 0) {
3080*4882a593Smuzhiyun skb->tail -= tail;
3081*4882a593Smuzhiyun } else {
3082*4882a593Smuzhiyun skb_frag_t *frag =
3083*4882a593Smuzhiyun &skb_shinfo(skb)->frags[i - 1];
3084*4882a593Smuzhiyun skb_frag_size_sub(frag, tail);
3085*4882a593Smuzhiyun skb->data_len -= tail;
3086*4882a593Smuzhiyun }
3087*4882a593Smuzhiyun return skb;
3088*4882a593Smuzhiyun }
3089*4882a593Smuzhiyun rx_pg = &rxr->rx_pg_ring[pg_cons];
3090*4882a593Smuzhiyun
3091*4882a593Smuzhiyun /* Don't unmap yet. If we're unable to allocate a new
3092*4882a593Smuzhiyun * page, we need to recycle the page and the DMA addr.
3093*4882a593Smuzhiyun */
3094*4882a593Smuzhiyun mapping_old = dma_unmap_addr(rx_pg, mapping);
3095*4882a593Smuzhiyun if (i == pages - 1)
3096*4882a593Smuzhiyun frag_len -= 4;
3097*4882a593Smuzhiyun
3098*4882a593Smuzhiyun skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3099*4882a593Smuzhiyun rx_pg->page = NULL;
3100*4882a593Smuzhiyun
3101*4882a593Smuzhiyun err = bnx2_alloc_rx_page(bp, rxr,
3102*4882a593Smuzhiyun BNX2_RX_PG_RING_IDX(pg_prod),
3103*4882a593Smuzhiyun GFP_ATOMIC);
3104*4882a593Smuzhiyun if (unlikely(err)) {
3105*4882a593Smuzhiyun rxr->rx_pg_cons = pg_cons;
3106*4882a593Smuzhiyun rxr->rx_pg_prod = pg_prod;
3107*4882a593Smuzhiyun bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3108*4882a593Smuzhiyun pages - i);
3109*4882a593Smuzhiyun return NULL;
3110*4882a593Smuzhiyun }
3111*4882a593Smuzhiyun
3112*4882a593Smuzhiyun dma_unmap_page(&bp->pdev->dev, mapping_old,
3113*4882a593Smuzhiyun PAGE_SIZE, PCI_DMA_FROMDEVICE);
3114*4882a593Smuzhiyun
3115*4882a593Smuzhiyun frag_size -= frag_len;
3116*4882a593Smuzhiyun skb->data_len += frag_len;
3117*4882a593Smuzhiyun skb->truesize += PAGE_SIZE;
3118*4882a593Smuzhiyun skb->len += frag_len;
3119*4882a593Smuzhiyun
3120*4882a593Smuzhiyun pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3121*4882a593Smuzhiyun pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3122*4882a593Smuzhiyun }
3123*4882a593Smuzhiyun rxr->rx_pg_prod = pg_prod;
3124*4882a593Smuzhiyun rxr->rx_pg_cons = pg_cons;
3125*4882a593Smuzhiyun }
3126*4882a593Smuzhiyun return skb;
3127*4882a593Smuzhiyun }
3128*4882a593Smuzhiyun
3129*4882a593Smuzhiyun static inline u16
bnx2_get_hw_rx_cons(struct bnx2_napi * bnapi)3130*4882a593Smuzhiyun bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3131*4882a593Smuzhiyun {
3132*4882a593Smuzhiyun u16 cons;
3133*4882a593Smuzhiyun
3134*4882a593Smuzhiyun cons = READ_ONCE(*bnapi->hw_rx_cons_ptr);
3135*4882a593Smuzhiyun
3136*4882a593Smuzhiyun if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3137*4882a593Smuzhiyun cons++;
3138*4882a593Smuzhiyun return cons;
3139*4882a593Smuzhiyun }
3140*4882a593Smuzhiyun
3141*4882a593Smuzhiyun static int
bnx2_rx_int(struct bnx2 * bp,struct bnx2_napi * bnapi,int budget)3142*4882a593Smuzhiyun bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3143*4882a593Smuzhiyun {
3144*4882a593Smuzhiyun struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3145*4882a593Smuzhiyun u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3146*4882a593Smuzhiyun struct l2_fhdr *rx_hdr;
3147*4882a593Smuzhiyun int rx_pkt = 0, pg_ring_used = 0;
3148*4882a593Smuzhiyun
3149*4882a593Smuzhiyun if (budget <= 0)
3150*4882a593Smuzhiyun return rx_pkt;
3151*4882a593Smuzhiyun
3152*4882a593Smuzhiyun hw_cons = bnx2_get_hw_rx_cons(bnapi);
3153*4882a593Smuzhiyun sw_cons = rxr->rx_cons;
3154*4882a593Smuzhiyun sw_prod = rxr->rx_prod;
3155*4882a593Smuzhiyun
3156*4882a593Smuzhiyun /* Memory barrier necessary as speculative reads of the rx
3157*4882a593Smuzhiyun * buffer can be ahead of the index in the status block
3158*4882a593Smuzhiyun */
3159*4882a593Smuzhiyun rmb();
3160*4882a593Smuzhiyun while (sw_cons != hw_cons) {
3161*4882a593Smuzhiyun unsigned int len, hdr_len;
3162*4882a593Smuzhiyun u32 status;
3163*4882a593Smuzhiyun struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3164*4882a593Smuzhiyun struct sk_buff *skb;
3165*4882a593Smuzhiyun dma_addr_t dma_addr;
3166*4882a593Smuzhiyun u8 *data;
3167*4882a593Smuzhiyun u16 next_ring_idx;
3168*4882a593Smuzhiyun
3169*4882a593Smuzhiyun sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3170*4882a593Smuzhiyun sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3171*4882a593Smuzhiyun
3172*4882a593Smuzhiyun rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3173*4882a593Smuzhiyun data = rx_buf->data;
3174*4882a593Smuzhiyun rx_buf->data = NULL;
3175*4882a593Smuzhiyun
3176*4882a593Smuzhiyun rx_hdr = get_l2_fhdr(data);
3177*4882a593Smuzhiyun prefetch(rx_hdr);
3178*4882a593Smuzhiyun
3179*4882a593Smuzhiyun dma_addr = dma_unmap_addr(rx_buf, mapping);
3180*4882a593Smuzhiyun
3181*4882a593Smuzhiyun dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3182*4882a593Smuzhiyun BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3183*4882a593Smuzhiyun PCI_DMA_FROMDEVICE);
3184*4882a593Smuzhiyun
3185*4882a593Smuzhiyun next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3186*4882a593Smuzhiyun next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3187*4882a593Smuzhiyun prefetch(get_l2_fhdr(next_rx_buf->data));
3188*4882a593Smuzhiyun
3189*4882a593Smuzhiyun len = rx_hdr->l2_fhdr_pkt_len;
3190*4882a593Smuzhiyun status = rx_hdr->l2_fhdr_status;
3191*4882a593Smuzhiyun
3192*4882a593Smuzhiyun hdr_len = 0;
3193*4882a593Smuzhiyun if (status & L2_FHDR_STATUS_SPLIT) {
3194*4882a593Smuzhiyun hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3195*4882a593Smuzhiyun pg_ring_used = 1;
3196*4882a593Smuzhiyun } else if (len > bp->rx_jumbo_thresh) {
3197*4882a593Smuzhiyun hdr_len = bp->rx_jumbo_thresh;
3198*4882a593Smuzhiyun pg_ring_used = 1;
3199*4882a593Smuzhiyun }
3200*4882a593Smuzhiyun
3201*4882a593Smuzhiyun if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3202*4882a593Smuzhiyun L2_FHDR_ERRORS_PHY_DECODE |
3203*4882a593Smuzhiyun L2_FHDR_ERRORS_ALIGNMENT |
3204*4882a593Smuzhiyun L2_FHDR_ERRORS_TOO_SHORT |
3205*4882a593Smuzhiyun L2_FHDR_ERRORS_GIANT_FRAME))) {
3206*4882a593Smuzhiyun
3207*4882a593Smuzhiyun bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3208*4882a593Smuzhiyun sw_ring_prod);
3209*4882a593Smuzhiyun if (pg_ring_used) {
3210*4882a593Smuzhiyun int pages;
3211*4882a593Smuzhiyun
3212*4882a593Smuzhiyun pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3213*4882a593Smuzhiyun
3214*4882a593Smuzhiyun bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3215*4882a593Smuzhiyun }
3216*4882a593Smuzhiyun goto next_rx;
3217*4882a593Smuzhiyun }
3218*4882a593Smuzhiyun
3219*4882a593Smuzhiyun len -= 4;
3220*4882a593Smuzhiyun
3221*4882a593Smuzhiyun if (len <= bp->rx_copy_thresh) {
3222*4882a593Smuzhiyun skb = netdev_alloc_skb(bp->dev, len + 6);
3223*4882a593Smuzhiyun if (!skb) {
3224*4882a593Smuzhiyun bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3225*4882a593Smuzhiyun sw_ring_prod);
3226*4882a593Smuzhiyun goto next_rx;
3227*4882a593Smuzhiyun }
3228*4882a593Smuzhiyun
3229*4882a593Smuzhiyun /* aligned copy */
3230*4882a593Smuzhiyun memcpy(skb->data,
3231*4882a593Smuzhiyun (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3232*4882a593Smuzhiyun len + 6);
3233*4882a593Smuzhiyun skb_reserve(skb, 6);
3234*4882a593Smuzhiyun skb_put(skb, len);
3235*4882a593Smuzhiyun
3236*4882a593Smuzhiyun bnx2_reuse_rx_data(bp, rxr, data,
3237*4882a593Smuzhiyun sw_ring_cons, sw_ring_prod);
3238*4882a593Smuzhiyun
3239*4882a593Smuzhiyun } else {
3240*4882a593Smuzhiyun skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3241*4882a593Smuzhiyun (sw_ring_cons << 16) | sw_ring_prod);
3242*4882a593Smuzhiyun if (!skb)
3243*4882a593Smuzhiyun goto next_rx;
3244*4882a593Smuzhiyun }
3245*4882a593Smuzhiyun if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3246*4882a593Smuzhiyun !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3247*4882a593Smuzhiyun __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3248*4882a593Smuzhiyun
3249*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, bp->dev);
3250*4882a593Smuzhiyun
3251*4882a593Smuzhiyun if (len > (bp->dev->mtu + ETH_HLEN) &&
3252*4882a593Smuzhiyun skb->protocol != htons(0x8100) &&
3253*4882a593Smuzhiyun skb->protocol != htons(ETH_P_8021AD)) {
3254*4882a593Smuzhiyun
3255*4882a593Smuzhiyun dev_kfree_skb(skb);
3256*4882a593Smuzhiyun goto next_rx;
3257*4882a593Smuzhiyun
3258*4882a593Smuzhiyun }
3259*4882a593Smuzhiyun
3260*4882a593Smuzhiyun skb_checksum_none_assert(skb);
3261*4882a593Smuzhiyun if ((bp->dev->features & NETIF_F_RXCSUM) &&
3262*4882a593Smuzhiyun (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3263*4882a593Smuzhiyun L2_FHDR_STATUS_UDP_DATAGRAM))) {
3264*4882a593Smuzhiyun
3265*4882a593Smuzhiyun if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3266*4882a593Smuzhiyun L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3267*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_UNNECESSARY;
3268*4882a593Smuzhiyun }
3269*4882a593Smuzhiyun if ((bp->dev->features & NETIF_F_RXHASH) &&
3270*4882a593Smuzhiyun ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3271*4882a593Smuzhiyun L2_FHDR_STATUS_USE_RXHASH))
3272*4882a593Smuzhiyun skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3273*4882a593Smuzhiyun PKT_HASH_TYPE_L3);
3274*4882a593Smuzhiyun
3275*4882a593Smuzhiyun skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3276*4882a593Smuzhiyun napi_gro_receive(&bnapi->napi, skb);
3277*4882a593Smuzhiyun rx_pkt++;
3278*4882a593Smuzhiyun
3279*4882a593Smuzhiyun next_rx:
3280*4882a593Smuzhiyun sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3281*4882a593Smuzhiyun sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3282*4882a593Smuzhiyun
3283*4882a593Smuzhiyun if (rx_pkt == budget)
3284*4882a593Smuzhiyun break;
3285*4882a593Smuzhiyun
3286*4882a593Smuzhiyun /* Refresh hw_cons to see if there is new work */
3287*4882a593Smuzhiyun if (sw_cons == hw_cons) {
3288*4882a593Smuzhiyun hw_cons = bnx2_get_hw_rx_cons(bnapi);
3289*4882a593Smuzhiyun rmb();
3290*4882a593Smuzhiyun }
3291*4882a593Smuzhiyun }
3292*4882a593Smuzhiyun rxr->rx_cons = sw_cons;
3293*4882a593Smuzhiyun rxr->rx_prod = sw_prod;
3294*4882a593Smuzhiyun
3295*4882a593Smuzhiyun if (pg_ring_used)
3296*4882a593Smuzhiyun BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3297*4882a593Smuzhiyun
3298*4882a593Smuzhiyun BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3299*4882a593Smuzhiyun
3300*4882a593Smuzhiyun BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3301*4882a593Smuzhiyun
3302*4882a593Smuzhiyun return rx_pkt;
3303*4882a593Smuzhiyun
3304*4882a593Smuzhiyun }
3305*4882a593Smuzhiyun
3306*4882a593Smuzhiyun /* MSI ISR - The only difference between this and the INTx ISR
3307*4882a593Smuzhiyun * is that the MSI interrupt is always serviced.
3308*4882a593Smuzhiyun */
3309*4882a593Smuzhiyun static irqreturn_t
bnx2_msi(int irq,void * dev_instance)3310*4882a593Smuzhiyun bnx2_msi(int irq, void *dev_instance)
3311*4882a593Smuzhiyun {
3312*4882a593Smuzhiyun struct bnx2_napi *bnapi = dev_instance;
3313*4882a593Smuzhiyun struct bnx2 *bp = bnapi->bp;
3314*4882a593Smuzhiyun
3315*4882a593Smuzhiyun prefetch(bnapi->status_blk.msi);
3316*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3317*4882a593Smuzhiyun BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3318*4882a593Smuzhiyun BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3319*4882a593Smuzhiyun
3320*4882a593Smuzhiyun /* Return here if interrupt is disabled. */
3321*4882a593Smuzhiyun if (unlikely(atomic_read(&bp->intr_sem) != 0))
3322*4882a593Smuzhiyun return IRQ_HANDLED;
3323*4882a593Smuzhiyun
3324*4882a593Smuzhiyun napi_schedule(&bnapi->napi);
3325*4882a593Smuzhiyun
3326*4882a593Smuzhiyun return IRQ_HANDLED;
3327*4882a593Smuzhiyun }
3328*4882a593Smuzhiyun
3329*4882a593Smuzhiyun static irqreturn_t
bnx2_msi_1shot(int irq,void * dev_instance)3330*4882a593Smuzhiyun bnx2_msi_1shot(int irq, void *dev_instance)
3331*4882a593Smuzhiyun {
3332*4882a593Smuzhiyun struct bnx2_napi *bnapi = dev_instance;
3333*4882a593Smuzhiyun struct bnx2 *bp = bnapi->bp;
3334*4882a593Smuzhiyun
3335*4882a593Smuzhiyun prefetch(bnapi->status_blk.msi);
3336*4882a593Smuzhiyun
3337*4882a593Smuzhiyun /* Return here if interrupt is disabled. */
3338*4882a593Smuzhiyun if (unlikely(atomic_read(&bp->intr_sem) != 0))
3339*4882a593Smuzhiyun return IRQ_HANDLED;
3340*4882a593Smuzhiyun
3341*4882a593Smuzhiyun napi_schedule(&bnapi->napi);
3342*4882a593Smuzhiyun
3343*4882a593Smuzhiyun return IRQ_HANDLED;
3344*4882a593Smuzhiyun }
3345*4882a593Smuzhiyun
3346*4882a593Smuzhiyun static irqreturn_t
bnx2_interrupt(int irq,void * dev_instance)3347*4882a593Smuzhiyun bnx2_interrupt(int irq, void *dev_instance)
3348*4882a593Smuzhiyun {
3349*4882a593Smuzhiyun struct bnx2_napi *bnapi = dev_instance;
3350*4882a593Smuzhiyun struct bnx2 *bp = bnapi->bp;
3351*4882a593Smuzhiyun struct status_block *sblk = bnapi->status_blk.msi;
3352*4882a593Smuzhiyun
3353*4882a593Smuzhiyun /* When using INTx, it is possible for the interrupt to arrive
3354*4882a593Smuzhiyun * at the CPU before the status block posted prior to the
3355*4882a593Smuzhiyun * interrupt. Reading a register will flush the status block.
3356*4882a593Smuzhiyun * When using MSI, the MSI message will always complete after
3357*4882a593Smuzhiyun * the status block write.
3358*4882a593Smuzhiyun */
3359*4882a593Smuzhiyun if ((sblk->status_idx == bnapi->last_status_idx) &&
3360*4882a593Smuzhiyun (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3361*4882a593Smuzhiyun BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3362*4882a593Smuzhiyun return IRQ_NONE;
3363*4882a593Smuzhiyun
3364*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3365*4882a593Smuzhiyun BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3366*4882a593Smuzhiyun BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3367*4882a593Smuzhiyun
3368*4882a593Smuzhiyun /* Read back to deassert IRQ immediately to avoid too many
3369*4882a593Smuzhiyun * spurious interrupts.
3370*4882a593Smuzhiyun */
3371*4882a593Smuzhiyun BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3372*4882a593Smuzhiyun
3373*4882a593Smuzhiyun /* Return here if interrupt is shared and is disabled. */
3374*4882a593Smuzhiyun if (unlikely(atomic_read(&bp->intr_sem) != 0))
3375*4882a593Smuzhiyun return IRQ_HANDLED;
3376*4882a593Smuzhiyun
3377*4882a593Smuzhiyun if (napi_schedule_prep(&bnapi->napi)) {
3378*4882a593Smuzhiyun bnapi->last_status_idx = sblk->status_idx;
3379*4882a593Smuzhiyun __napi_schedule(&bnapi->napi);
3380*4882a593Smuzhiyun }
3381*4882a593Smuzhiyun
3382*4882a593Smuzhiyun return IRQ_HANDLED;
3383*4882a593Smuzhiyun }
3384*4882a593Smuzhiyun
3385*4882a593Smuzhiyun static inline int
bnx2_has_fast_work(struct bnx2_napi * bnapi)3386*4882a593Smuzhiyun bnx2_has_fast_work(struct bnx2_napi *bnapi)
3387*4882a593Smuzhiyun {
3388*4882a593Smuzhiyun struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3389*4882a593Smuzhiyun struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3390*4882a593Smuzhiyun
3391*4882a593Smuzhiyun if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3392*4882a593Smuzhiyun (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3393*4882a593Smuzhiyun return 1;
3394*4882a593Smuzhiyun return 0;
3395*4882a593Smuzhiyun }
3396*4882a593Smuzhiyun
3397*4882a593Smuzhiyun #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3398*4882a593Smuzhiyun STATUS_ATTN_BITS_TIMER_ABORT)
3399*4882a593Smuzhiyun
3400*4882a593Smuzhiyun static inline int
bnx2_has_work(struct bnx2_napi * bnapi)3401*4882a593Smuzhiyun bnx2_has_work(struct bnx2_napi *bnapi)
3402*4882a593Smuzhiyun {
3403*4882a593Smuzhiyun struct status_block *sblk = bnapi->status_blk.msi;
3404*4882a593Smuzhiyun
3405*4882a593Smuzhiyun if (bnx2_has_fast_work(bnapi))
3406*4882a593Smuzhiyun return 1;
3407*4882a593Smuzhiyun
3408*4882a593Smuzhiyun #ifdef BCM_CNIC
3409*4882a593Smuzhiyun if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3410*4882a593Smuzhiyun return 1;
3411*4882a593Smuzhiyun #endif
3412*4882a593Smuzhiyun
3413*4882a593Smuzhiyun if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3414*4882a593Smuzhiyun (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3415*4882a593Smuzhiyun return 1;
3416*4882a593Smuzhiyun
3417*4882a593Smuzhiyun return 0;
3418*4882a593Smuzhiyun }
3419*4882a593Smuzhiyun
3420*4882a593Smuzhiyun static void
bnx2_chk_missed_msi(struct bnx2 * bp)3421*4882a593Smuzhiyun bnx2_chk_missed_msi(struct bnx2 *bp)
3422*4882a593Smuzhiyun {
3423*4882a593Smuzhiyun struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3424*4882a593Smuzhiyun u32 msi_ctrl;
3425*4882a593Smuzhiyun
3426*4882a593Smuzhiyun if (bnx2_has_work(bnapi)) {
3427*4882a593Smuzhiyun msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3428*4882a593Smuzhiyun if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3429*4882a593Smuzhiyun return;
3430*4882a593Smuzhiyun
3431*4882a593Smuzhiyun if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3432*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3433*4882a593Smuzhiyun ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3434*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3435*4882a593Smuzhiyun bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3436*4882a593Smuzhiyun }
3437*4882a593Smuzhiyun }
3438*4882a593Smuzhiyun
3439*4882a593Smuzhiyun bp->idle_chk_status_idx = bnapi->last_status_idx;
3440*4882a593Smuzhiyun }
3441*4882a593Smuzhiyun
3442*4882a593Smuzhiyun #ifdef BCM_CNIC
bnx2_poll_cnic(struct bnx2 * bp,struct bnx2_napi * bnapi)3443*4882a593Smuzhiyun static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3444*4882a593Smuzhiyun {
3445*4882a593Smuzhiyun struct cnic_ops *c_ops;
3446*4882a593Smuzhiyun
3447*4882a593Smuzhiyun if (!bnapi->cnic_present)
3448*4882a593Smuzhiyun return;
3449*4882a593Smuzhiyun
3450*4882a593Smuzhiyun rcu_read_lock();
3451*4882a593Smuzhiyun c_ops = rcu_dereference(bp->cnic_ops);
3452*4882a593Smuzhiyun if (c_ops)
3453*4882a593Smuzhiyun bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3454*4882a593Smuzhiyun bnapi->status_blk.msi);
3455*4882a593Smuzhiyun rcu_read_unlock();
3456*4882a593Smuzhiyun }
3457*4882a593Smuzhiyun #endif
3458*4882a593Smuzhiyun
bnx2_poll_link(struct bnx2 * bp,struct bnx2_napi * bnapi)3459*4882a593Smuzhiyun static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3460*4882a593Smuzhiyun {
3461*4882a593Smuzhiyun struct status_block *sblk = bnapi->status_blk.msi;
3462*4882a593Smuzhiyun u32 status_attn_bits = sblk->status_attn_bits;
3463*4882a593Smuzhiyun u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3464*4882a593Smuzhiyun
3465*4882a593Smuzhiyun if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3466*4882a593Smuzhiyun (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3467*4882a593Smuzhiyun
3468*4882a593Smuzhiyun bnx2_phy_int(bp, bnapi);
3469*4882a593Smuzhiyun
3470*4882a593Smuzhiyun /* This is needed to take care of transient status
3471*4882a593Smuzhiyun * during link changes.
3472*4882a593Smuzhiyun */
3473*4882a593Smuzhiyun BNX2_WR(bp, BNX2_HC_COMMAND,
3474*4882a593Smuzhiyun bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3475*4882a593Smuzhiyun BNX2_RD(bp, BNX2_HC_COMMAND);
3476*4882a593Smuzhiyun }
3477*4882a593Smuzhiyun }
3478*4882a593Smuzhiyun
bnx2_poll_work(struct bnx2 * bp,struct bnx2_napi * bnapi,int work_done,int budget)3479*4882a593Smuzhiyun static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3480*4882a593Smuzhiyun int work_done, int budget)
3481*4882a593Smuzhiyun {
3482*4882a593Smuzhiyun struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3483*4882a593Smuzhiyun struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3484*4882a593Smuzhiyun
3485*4882a593Smuzhiyun if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3486*4882a593Smuzhiyun bnx2_tx_int(bp, bnapi, 0);
3487*4882a593Smuzhiyun
3488*4882a593Smuzhiyun if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3489*4882a593Smuzhiyun work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3490*4882a593Smuzhiyun
3491*4882a593Smuzhiyun return work_done;
3492*4882a593Smuzhiyun }
3493*4882a593Smuzhiyun
bnx2_poll_msix(struct napi_struct * napi,int budget)3494*4882a593Smuzhiyun static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3495*4882a593Smuzhiyun {
3496*4882a593Smuzhiyun struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3497*4882a593Smuzhiyun struct bnx2 *bp = bnapi->bp;
3498*4882a593Smuzhiyun int work_done = 0;
3499*4882a593Smuzhiyun struct status_block_msix *sblk = bnapi->status_blk.msix;
3500*4882a593Smuzhiyun
3501*4882a593Smuzhiyun while (1) {
3502*4882a593Smuzhiyun work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3503*4882a593Smuzhiyun if (unlikely(work_done >= budget))
3504*4882a593Smuzhiyun break;
3505*4882a593Smuzhiyun
3506*4882a593Smuzhiyun bnapi->last_status_idx = sblk->status_idx;
3507*4882a593Smuzhiyun /* status idx must be read before checking for more work. */
3508*4882a593Smuzhiyun rmb();
3509*4882a593Smuzhiyun if (likely(!bnx2_has_fast_work(bnapi))) {
3510*4882a593Smuzhiyun
3511*4882a593Smuzhiyun napi_complete_done(napi, work_done);
3512*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3513*4882a593Smuzhiyun BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3514*4882a593Smuzhiyun bnapi->last_status_idx);
3515*4882a593Smuzhiyun break;
3516*4882a593Smuzhiyun }
3517*4882a593Smuzhiyun }
3518*4882a593Smuzhiyun return work_done;
3519*4882a593Smuzhiyun }
3520*4882a593Smuzhiyun
bnx2_poll(struct napi_struct * napi,int budget)3521*4882a593Smuzhiyun static int bnx2_poll(struct napi_struct *napi, int budget)
3522*4882a593Smuzhiyun {
3523*4882a593Smuzhiyun struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3524*4882a593Smuzhiyun struct bnx2 *bp = bnapi->bp;
3525*4882a593Smuzhiyun int work_done = 0;
3526*4882a593Smuzhiyun struct status_block *sblk = bnapi->status_blk.msi;
3527*4882a593Smuzhiyun
3528*4882a593Smuzhiyun while (1) {
3529*4882a593Smuzhiyun bnx2_poll_link(bp, bnapi);
3530*4882a593Smuzhiyun
3531*4882a593Smuzhiyun work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3532*4882a593Smuzhiyun
3533*4882a593Smuzhiyun #ifdef BCM_CNIC
3534*4882a593Smuzhiyun bnx2_poll_cnic(bp, bnapi);
3535*4882a593Smuzhiyun #endif
3536*4882a593Smuzhiyun
3537*4882a593Smuzhiyun /* bnapi->last_status_idx is used below to tell the hw how
3538*4882a593Smuzhiyun * much work has been processed, so we must read it before
3539*4882a593Smuzhiyun * checking for more work.
3540*4882a593Smuzhiyun */
3541*4882a593Smuzhiyun bnapi->last_status_idx = sblk->status_idx;
3542*4882a593Smuzhiyun
3543*4882a593Smuzhiyun if (unlikely(work_done >= budget))
3544*4882a593Smuzhiyun break;
3545*4882a593Smuzhiyun
3546*4882a593Smuzhiyun rmb();
3547*4882a593Smuzhiyun if (likely(!bnx2_has_work(bnapi))) {
3548*4882a593Smuzhiyun napi_complete_done(napi, work_done);
3549*4882a593Smuzhiyun if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3550*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3551*4882a593Smuzhiyun BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3552*4882a593Smuzhiyun bnapi->last_status_idx);
3553*4882a593Smuzhiyun break;
3554*4882a593Smuzhiyun }
3555*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3556*4882a593Smuzhiyun BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3557*4882a593Smuzhiyun BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3558*4882a593Smuzhiyun bnapi->last_status_idx);
3559*4882a593Smuzhiyun
3560*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3561*4882a593Smuzhiyun BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3562*4882a593Smuzhiyun bnapi->last_status_idx);
3563*4882a593Smuzhiyun break;
3564*4882a593Smuzhiyun }
3565*4882a593Smuzhiyun }
3566*4882a593Smuzhiyun
3567*4882a593Smuzhiyun return work_done;
3568*4882a593Smuzhiyun }
3569*4882a593Smuzhiyun
3570*4882a593Smuzhiyun /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3571*4882a593Smuzhiyun * from set_multicast.
3572*4882a593Smuzhiyun */
3573*4882a593Smuzhiyun static void
bnx2_set_rx_mode(struct net_device * dev)3574*4882a593Smuzhiyun bnx2_set_rx_mode(struct net_device *dev)
3575*4882a593Smuzhiyun {
3576*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
3577*4882a593Smuzhiyun u32 rx_mode, sort_mode;
3578*4882a593Smuzhiyun struct netdev_hw_addr *ha;
3579*4882a593Smuzhiyun int i;
3580*4882a593Smuzhiyun
3581*4882a593Smuzhiyun if (!netif_running(dev))
3582*4882a593Smuzhiyun return;
3583*4882a593Smuzhiyun
3584*4882a593Smuzhiyun spin_lock_bh(&bp->phy_lock);
3585*4882a593Smuzhiyun
3586*4882a593Smuzhiyun rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3587*4882a593Smuzhiyun BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3588*4882a593Smuzhiyun sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3589*4882a593Smuzhiyun if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3590*4882a593Smuzhiyun (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3591*4882a593Smuzhiyun rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3592*4882a593Smuzhiyun if (dev->flags & IFF_PROMISC) {
3593*4882a593Smuzhiyun /* Promiscuous mode. */
3594*4882a593Smuzhiyun rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3595*4882a593Smuzhiyun sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3596*4882a593Smuzhiyun BNX2_RPM_SORT_USER0_PROM_VLAN;
3597*4882a593Smuzhiyun }
3598*4882a593Smuzhiyun else if (dev->flags & IFF_ALLMULTI) {
3599*4882a593Smuzhiyun for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3600*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3601*4882a593Smuzhiyun 0xffffffff);
3602*4882a593Smuzhiyun }
3603*4882a593Smuzhiyun sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3604*4882a593Smuzhiyun }
3605*4882a593Smuzhiyun else {
3606*4882a593Smuzhiyun /* Accept one or more multicast(s). */
3607*4882a593Smuzhiyun u32 mc_filter[NUM_MC_HASH_REGISTERS];
3608*4882a593Smuzhiyun u32 regidx;
3609*4882a593Smuzhiyun u32 bit;
3610*4882a593Smuzhiyun u32 crc;
3611*4882a593Smuzhiyun
3612*4882a593Smuzhiyun memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3613*4882a593Smuzhiyun
3614*4882a593Smuzhiyun netdev_for_each_mc_addr(ha, dev) {
3615*4882a593Smuzhiyun crc = ether_crc_le(ETH_ALEN, ha->addr);
3616*4882a593Smuzhiyun bit = crc & 0xff;
3617*4882a593Smuzhiyun regidx = (bit & 0xe0) >> 5;
3618*4882a593Smuzhiyun bit &= 0x1f;
3619*4882a593Smuzhiyun mc_filter[regidx] |= (1 << bit);
3620*4882a593Smuzhiyun }
3621*4882a593Smuzhiyun
3622*4882a593Smuzhiyun for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3623*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3624*4882a593Smuzhiyun mc_filter[i]);
3625*4882a593Smuzhiyun }
3626*4882a593Smuzhiyun
3627*4882a593Smuzhiyun sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3628*4882a593Smuzhiyun }
3629*4882a593Smuzhiyun
3630*4882a593Smuzhiyun if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3631*4882a593Smuzhiyun rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3632*4882a593Smuzhiyun sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3633*4882a593Smuzhiyun BNX2_RPM_SORT_USER0_PROM_VLAN;
3634*4882a593Smuzhiyun } else if (!(dev->flags & IFF_PROMISC)) {
3635*4882a593Smuzhiyun /* Add all entries into to the match filter list */
3636*4882a593Smuzhiyun i = 0;
3637*4882a593Smuzhiyun netdev_for_each_uc_addr(ha, dev) {
3638*4882a593Smuzhiyun bnx2_set_mac_addr(bp, ha->addr,
3639*4882a593Smuzhiyun i + BNX2_START_UNICAST_ADDRESS_INDEX);
3640*4882a593Smuzhiyun sort_mode |= (1 <<
3641*4882a593Smuzhiyun (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3642*4882a593Smuzhiyun i++;
3643*4882a593Smuzhiyun }
3644*4882a593Smuzhiyun
3645*4882a593Smuzhiyun }
3646*4882a593Smuzhiyun
3647*4882a593Smuzhiyun if (rx_mode != bp->rx_mode) {
3648*4882a593Smuzhiyun bp->rx_mode = rx_mode;
3649*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3650*4882a593Smuzhiyun }
3651*4882a593Smuzhiyun
3652*4882a593Smuzhiyun BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3653*4882a593Smuzhiyun BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3654*4882a593Smuzhiyun BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3655*4882a593Smuzhiyun
3656*4882a593Smuzhiyun spin_unlock_bh(&bp->phy_lock);
3657*4882a593Smuzhiyun }
3658*4882a593Smuzhiyun
3659*4882a593Smuzhiyun static int
check_fw_section(const struct firmware * fw,const struct bnx2_fw_file_section * section,u32 alignment,bool non_empty)3660*4882a593Smuzhiyun check_fw_section(const struct firmware *fw,
3661*4882a593Smuzhiyun const struct bnx2_fw_file_section *section,
3662*4882a593Smuzhiyun u32 alignment, bool non_empty)
3663*4882a593Smuzhiyun {
3664*4882a593Smuzhiyun u32 offset = be32_to_cpu(section->offset);
3665*4882a593Smuzhiyun u32 len = be32_to_cpu(section->len);
3666*4882a593Smuzhiyun
3667*4882a593Smuzhiyun if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3668*4882a593Smuzhiyun return -EINVAL;
3669*4882a593Smuzhiyun if ((non_empty && len == 0) || len > fw->size - offset ||
3670*4882a593Smuzhiyun len & (alignment - 1))
3671*4882a593Smuzhiyun return -EINVAL;
3672*4882a593Smuzhiyun return 0;
3673*4882a593Smuzhiyun }
3674*4882a593Smuzhiyun
3675*4882a593Smuzhiyun static int
check_mips_fw_entry(const struct firmware * fw,const struct bnx2_mips_fw_file_entry * entry)3676*4882a593Smuzhiyun check_mips_fw_entry(const struct firmware *fw,
3677*4882a593Smuzhiyun const struct bnx2_mips_fw_file_entry *entry)
3678*4882a593Smuzhiyun {
3679*4882a593Smuzhiyun if (check_fw_section(fw, &entry->text, 4, true) ||
3680*4882a593Smuzhiyun check_fw_section(fw, &entry->data, 4, false) ||
3681*4882a593Smuzhiyun check_fw_section(fw, &entry->rodata, 4, false))
3682*4882a593Smuzhiyun return -EINVAL;
3683*4882a593Smuzhiyun return 0;
3684*4882a593Smuzhiyun }
3685*4882a593Smuzhiyun
bnx2_release_firmware(struct bnx2 * bp)3686*4882a593Smuzhiyun static void bnx2_release_firmware(struct bnx2 *bp)
3687*4882a593Smuzhiyun {
3688*4882a593Smuzhiyun if (bp->rv2p_firmware) {
3689*4882a593Smuzhiyun release_firmware(bp->mips_firmware);
3690*4882a593Smuzhiyun release_firmware(bp->rv2p_firmware);
3691*4882a593Smuzhiyun bp->rv2p_firmware = NULL;
3692*4882a593Smuzhiyun }
3693*4882a593Smuzhiyun }
3694*4882a593Smuzhiyun
bnx2_request_uncached_firmware(struct bnx2 * bp)3695*4882a593Smuzhiyun static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3696*4882a593Smuzhiyun {
3697*4882a593Smuzhiyun const char *mips_fw_file, *rv2p_fw_file;
3698*4882a593Smuzhiyun const struct bnx2_mips_fw_file *mips_fw;
3699*4882a593Smuzhiyun const struct bnx2_rv2p_fw_file *rv2p_fw;
3700*4882a593Smuzhiyun int rc;
3701*4882a593Smuzhiyun
3702*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3703*4882a593Smuzhiyun mips_fw_file = FW_MIPS_FILE_09;
3704*4882a593Smuzhiyun if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3705*4882a593Smuzhiyun (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3706*4882a593Smuzhiyun rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3707*4882a593Smuzhiyun else
3708*4882a593Smuzhiyun rv2p_fw_file = FW_RV2P_FILE_09;
3709*4882a593Smuzhiyun } else {
3710*4882a593Smuzhiyun mips_fw_file = FW_MIPS_FILE_06;
3711*4882a593Smuzhiyun rv2p_fw_file = FW_RV2P_FILE_06;
3712*4882a593Smuzhiyun }
3713*4882a593Smuzhiyun
3714*4882a593Smuzhiyun rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3715*4882a593Smuzhiyun if (rc) {
3716*4882a593Smuzhiyun pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3717*4882a593Smuzhiyun goto out;
3718*4882a593Smuzhiyun }
3719*4882a593Smuzhiyun
3720*4882a593Smuzhiyun rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3721*4882a593Smuzhiyun if (rc) {
3722*4882a593Smuzhiyun pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3723*4882a593Smuzhiyun goto err_release_mips_firmware;
3724*4882a593Smuzhiyun }
3725*4882a593Smuzhiyun mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3726*4882a593Smuzhiyun rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3727*4882a593Smuzhiyun if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3728*4882a593Smuzhiyun check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3729*4882a593Smuzhiyun check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3730*4882a593Smuzhiyun check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3731*4882a593Smuzhiyun check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3732*4882a593Smuzhiyun check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3733*4882a593Smuzhiyun pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3734*4882a593Smuzhiyun rc = -EINVAL;
3735*4882a593Smuzhiyun goto err_release_firmware;
3736*4882a593Smuzhiyun }
3737*4882a593Smuzhiyun if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3738*4882a593Smuzhiyun check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3739*4882a593Smuzhiyun check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3740*4882a593Smuzhiyun pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3741*4882a593Smuzhiyun rc = -EINVAL;
3742*4882a593Smuzhiyun goto err_release_firmware;
3743*4882a593Smuzhiyun }
3744*4882a593Smuzhiyun out:
3745*4882a593Smuzhiyun return rc;
3746*4882a593Smuzhiyun
3747*4882a593Smuzhiyun err_release_firmware:
3748*4882a593Smuzhiyun release_firmware(bp->rv2p_firmware);
3749*4882a593Smuzhiyun bp->rv2p_firmware = NULL;
3750*4882a593Smuzhiyun err_release_mips_firmware:
3751*4882a593Smuzhiyun release_firmware(bp->mips_firmware);
3752*4882a593Smuzhiyun goto out;
3753*4882a593Smuzhiyun }
3754*4882a593Smuzhiyun
bnx2_request_firmware(struct bnx2 * bp)3755*4882a593Smuzhiyun static int bnx2_request_firmware(struct bnx2 *bp)
3756*4882a593Smuzhiyun {
3757*4882a593Smuzhiyun return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3758*4882a593Smuzhiyun }
3759*4882a593Smuzhiyun
3760*4882a593Smuzhiyun static u32
rv2p_fw_fixup(u32 rv2p_proc,int idx,u32 loc,u32 rv2p_code)3761*4882a593Smuzhiyun rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3762*4882a593Smuzhiyun {
3763*4882a593Smuzhiyun switch (idx) {
3764*4882a593Smuzhiyun case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3765*4882a593Smuzhiyun rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3766*4882a593Smuzhiyun rv2p_code |= RV2P_BD_PAGE_SIZE;
3767*4882a593Smuzhiyun break;
3768*4882a593Smuzhiyun }
3769*4882a593Smuzhiyun return rv2p_code;
3770*4882a593Smuzhiyun }
3771*4882a593Smuzhiyun
3772*4882a593Smuzhiyun static int
load_rv2p_fw(struct bnx2 * bp,u32 rv2p_proc,const struct bnx2_rv2p_fw_file_entry * fw_entry)3773*4882a593Smuzhiyun load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3774*4882a593Smuzhiyun const struct bnx2_rv2p_fw_file_entry *fw_entry)
3775*4882a593Smuzhiyun {
3776*4882a593Smuzhiyun u32 rv2p_code_len, file_offset;
3777*4882a593Smuzhiyun __be32 *rv2p_code;
3778*4882a593Smuzhiyun int i;
3779*4882a593Smuzhiyun u32 val, cmd, addr;
3780*4882a593Smuzhiyun
3781*4882a593Smuzhiyun rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3782*4882a593Smuzhiyun file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3783*4882a593Smuzhiyun
3784*4882a593Smuzhiyun rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3785*4882a593Smuzhiyun
3786*4882a593Smuzhiyun if (rv2p_proc == RV2P_PROC1) {
3787*4882a593Smuzhiyun cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3788*4882a593Smuzhiyun addr = BNX2_RV2P_PROC1_ADDR_CMD;
3789*4882a593Smuzhiyun } else {
3790*4882a593Smuzhiyun cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3791*4882a593Smuzhiyun addr = BNX2_RV2P_PROC2_ADDR_CMD;
3792*4882a593Smuzhiyun }
3793*4882a593Smuzhiyun
3794*4882a593Smuzhiyun for (i = 0; i < rv2p_code_len; i += 8) {
3795*4882a593Smuzhiyun BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3796*4882a593Smuzhiyun rv2p_code++;
3797*4882a593Smuzhiyun BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3798*4882a593Smuzhiyun rv2p_code++;
3799*4882a593Smuzhiyun
3800*4882a593Smuzhiyun val = (i / 8) | cmd;
3801*4882a593Smuzhiyun BNX2_WR(bp, addr, val);
3802*4882a593Smuzhiyun }
3803*4882a593Smuzhiyun
3804*4882a593Smuzhiyun rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3805*4882a593Smuzhiyun for (i = 0; i < 8; i++) {
3806*4882a593Smuzhiyun u32 loc, code;
3807*4882a593Smuzhiyun
3808*4882a593Smuzhiyun loc = be32_to_cpu(fw_entry->fixup[i]);
3809*4882a593Smuzhiyun if (loc && ((loc * 4) < rv2p_code_len)) {
3810*4882a593Smuzhiyun code = be32_to_cpu(*(rv2p_code + loc - 1));
3811*4882a593Smuzhiyun BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3812*4882a593Smuzhiyun code = be32_to_cpu(*(rv2p_code + loc));
3813*4882a593Smuzhiyun code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3814*4882a593Smuzhiyun BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3815*4882a593Smuzhiyun
3816*4882a593Smuzhiyun val = (loc / 2) | cmd;
3817*4882a593Smuzhiyun BNX2_WR(bp, addr, val);
3818*4882a593Smuzhiyun }
3819*4882a593Smuzhiyun }
3820*4882a593Smuzhiyun
3821*4882a593Smuzhiyun /* Reset the processor, un-stall is done later. */
3822*4882a593Smuzhiyun if (rv2p_proc == RV2P_PROC1) {
3823*4882a593Smuzhiyun BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3824*4882a593Smuzhiyun }
3825*4882a593Smuzhiyun else {
3826*4882a593Smuzhiyun BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3827*4882a593Smuzhiyun }
3828*4882a593Smuzhiyun
3829*4882a593Smuzhiyun return 0;
3830*4882a593Smuzhiyun }
3831*4882a593Smuzhiyun
3832*4882a593Smuzhiyun static int
load_cpu_fw(struct bnx2 * bp,const struct cpu_reg * cpu_reg,const struct bnx2_mips_fw_file_entry * fw_entry)3833*4882a593Smuzhiyun load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3834*4882a593Smuzhiyun const struct bnx2_mips_fw_file_entry *fw_entry)
3835*4882a593Smuzhiyun {
3836*4882a593Smuzhiyun u32 addr, len, file_offset;
3837*4882a593Smuzhiyun __be32 *data;
3838*4882a593Smuzhiyun u32 offset;
3839*4882a593Smuzhiyun u32 val;
3840*4882a593Smuzhiyun
3841*4882a593Smuzhiyun /* Halt the CPU. */
3842*4882a593Smuzhiyun val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3843*4882a593Smuzhiyun val |= cpu_reg->mode_value_halt;
3844*4882a593Smuzhiyun bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3845*4882a593Smuzhiyun bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3846*4882a593Smuzhiyun
3847*4882a593Smuzhiyun /* Load the Text area. */
3848*4882a593Smuzhiyun addr = be32_to_cpu(fw_entry->text.addr);
3849*4882a593Smuzhiyun len = be32_to_cpu(fw_entry->text.len);
3850*4882a593Smuzhiyun file_offset = be32_to_cpu(fw_entry->text.offset);
3851*4882a593Smuzhiyun data = (__be32 *)(bp->mips_firmware->data + file_offset);
3852*4882a593Smuzhiyun
3853*4882a593Smuzhiyun offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3854*4882a593Smuzhiyun if (len) {
3855*4882a593Smuzhiyun int j;
3856*4882a593Smuzhiyun
3857*4882a593Smuzhiyun for (j = 0; j < (len / 4); j++, offset += 4)
3858*4882a593Smuzhiyun bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3859*4882a593Smuzhiyun }
3860*4882a593Smuzhiyun
3861*4882a593Smuzhiyun /* Load the Data area. */
3862*4882a593Smuzhiyun addr = be32_to_cpu(fw_entry->data.addr);
3863*4882a593Smuzhiyun len = be32_to_cpu(fw_entry->data.len);
3864*4882a593Smuzhiyun file_offset = be32_to_cpu(fw_entry->data.offset);
3865*4882a593Smuzhiyun data = (__be32 *)(bp->mips_firmware->data + file_offset);
3866*4882a593Smuzhiyun
3867*4882a593Smuzhiyun offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3868*4882a593Smuzhiyun if (len) {
3869*4882a593Smuzhiyun int j;
3870*4882a593Smuzhiyun
3871*4882a593Smuzhiyun for (j = 0; j < (len / 4); j++, offset += 4)
3872*4882a593Smuzhiyun bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3873*4882a593Smuzhiyun }
3874*4882a593Smuzhiyun
3875*4882a593Smuzhiyun /* Load the Read-Only area. */
3876*4882a593Smuzhiyun addr = be32_to_cpu(fw_entry->rodata.addr);
3877*4882a593Smuzhiyun len = be32_to_cpu(fw_entry->rodata.len);
3878*4882a593Smuzhiyun file_offset = be32_to_cpu(fw_entry->rodata.offset);
3879*4882a593Smuzhiyun data = (__be32 *)(bp->mips_firmware->data + file_offset);
3880*4882a593Smuzhiyun
3881*4882a593Smuzhiyun offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3882*4882a593Smuzhiyun if (len) {
3883*4882a593Smuzhiyun int j;
3884*4882a593Smuzhiyun
3885*4882a593Smuzhiyun for (j = 0; j < (len / 4); j++, offset += 4)
3886*4882a593Smuzhiyun bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3887*4882a593Smuzhiyun }
3888*4882a593Smuzhiyun
3889*4882a593Smuzhiyun /* Clear the pre-fetch instruction. */
3890*4882a593Smuzhiyun bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3891*4882a593Smuzhiyun
3892*4882a593Smuzhiyun val = be32_to_cpu(fw_entry->start_addr);
3893*4882a593Smuzhiyun bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3894*4882a593Smuzhiyun
3895*4882a593Smuzhiyun /* Start the CPU. */
3896*4882a593Smuzhiyun val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3897*4882a593Smuzhiyun val &= ~cpu_reg->mode_value_halt;
3898*4882a593Smuzhiyun bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3899*4882a593Smuzhiyun bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3900*4882a593Smuzhiyun
3901*4882a593Smuzhiyun return 0;
3902*4882a593Smuzhiyun }
3903*4882a593Smuzhiyun
3904*4882a593Smuzhiyun static int
bnx2_init_cpus(struct bnx2 * bp)3905*4882a593Smuzhiyun bnx2_init_cpus(struct bnx2 *bp)
3906*4882a593Smuzhiyun {
3907*4882a593Smuzhiyun const struct bnx2_mips_fw_file *mips_fw =
3908*4882a593Smuzhiyun (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3909*4882a593Smuzhiyun const struct bnx2_rv2p_fw_file *rv2p_fw =
3910*4882a593Smuzhiyun (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3911*4882a593Smuzhiyun int rc;
3912*4882a593Smuzhiyun
3913*4882a593Smuzhiyun /* Initialize the RV2P processor. */
3914*4882a593Smuzhiyun load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3915*4882a593Smuzhiyun load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3916*4882a593Smuzhiyun
3917*4882a593Smuzhiyun /* Initialize the RX Processor. */
3918*4882a593Smuzhiyun rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3919*4882a593Smuzhiyun if (rc)
3920*4882a593Smuzhiyun goto init_cpu_err;
3921*4882a593Smuzhiyun
3922*4882a593Smuzhiyun /* Initialize the TX Processor. */
3923*4882a593Smuzhiyun rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3924*4882a593Smuzhiyun if (rc)
3925*4882a593Smuzhiyun goto init_cpu_err;
3926*4882a593Smuzhiyun
3927*4882a593Smuzhiyun /* Initialize the TX Patch-up Processor. */
3928*4882a593Smuzhiyun rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3929*4882a593Smuzhiyun if (rc)
3930*4882a593Smuzhiyun goto init_cpu_err;
3931*4882a593Smuzhiyun
3932*4882a593Smuzhiyun /* Initialize the Completion Processor. */
3933*4882a593Smuzhiyun rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3934*4882a593Smuzhiyun if (rc)
3935*4882a593Smuzhiyun goto init_cpu_err;
3936*4882a593Smuzhiyun
3937*4882a593Smuzhiyun /* Initialize the Command Processor. */
3938*4882a593Smuzhiyun rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3939*4882a593Smuzhiyun
3940*4882a593Smuzhiyun init_cpu_err:
3941*4882a593Smuzhiyun return rc;
3942*4882a593Smuzhiyun }
3943*4882a593Smuzhiyun
3944*4882a593Smuzhiyun static void
bnx2_setup_wol(struct bnx2 * bp)3945*4882a593Smuzhiyun bnx2_setup_wol(struct bnx2 *bp)
3946*4882a593Smuzhiyun {
3947*4882a593Smuzhiyun int i;
3948*4882a593Smuzhiyun u32 val, wol_msg;
3949*4882a593Smuzhiyun
3950*4882a593Smuzhiyun if (bp->wol) {
3951*4882a593Smuzhiyun u32 advertising;
3952*4882a593Smuzhiyun u8 autoneg;
3953*4882a593Smuzhiyun
3954*4882a593Smuzhiyun autoneg = bp->autoneg;
3955*4882a593Smuzhiyun advertising = bp->advertising;
3956*4882a593Smuzhiyun
3957*4882a593Smuzhiyun if (bp->phy_port == PORT_TP) {
3958*4882a593Smuzhiyun bp->autoneg = AUTONEG_SPEED;
3959*4882a593Smuzhiyun bp->advertising = ADVERTISED_10baseT_Half |
3960*4882a593Smuzhiyun ADVERTISED_10baseT_Full |
3961*4882a593Smuzhiyun ADVERTISED_100baseT_Half |
3962*4882a593Smuzhiyun ADVERTISED_100baseT_Full |
3963*4882a593Smuzhiyun ADVERTISED_Autoneg;
3964*4882a593Smuzhiyun }
3965*4882a593Smuzhiyun
3966*4882a593Smuzhiyun spin_lock_bh(&bp->phy_lock);
3967*4882a593Smuzhiyun bnx2_setup_phy(bp, bp->phy_port);
3968*4882a593Smuzhiyun spin_unlock_bh(&bp->phy_lock);
3969*4882a593Smuzhiyun
3970*4882a593Smuzhiyun bp->autoneg = autoneg;
3971*4882a593Smuzhiyun bp->advertising = advertising;
3972*4882a593Smuzhiyun
3973*4882a593Smuzhiyun bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3974*4882a593Smuzhiyun
3975*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_EMAC_MODE);
3976*4882a593Smuzhiyun
3977*4882a593Smuzhiyun /* Enable port mode. */
3978*4882a593Smuzhiyun val &= ~BNX2_EMAC_MODE_PORT;
3979*4882a593Smuzhiyun val |= BNX2_EMAC_MODE_MPKT_RCVD |
3980*4882a593Smuzhiyun BNX2_EMAC_MODE_ACPI_RCVD |
3981*4882a593Smuzhiyun BNX2_EMAC_MODE_MPKT;
3982*4882a593Smuzhiyun if (bp->phy_port == PORT_TP) {
3983*4882a593Smuzhiyun val |= BNX2_EMAC_MODE_PORT_MII;
3984*4882a593Smuzhiyun } else {
3985*4882a593Smuzhiyun val |= BNX2_EMAC_MODE_PORT_GMII;
3986*4882a593Smuzhiyun if (bp->line_speed == SPEED_2500)
3987*4882a593Smuzhiyun val |= BNX2_EMAC_MODE_25G_MODE;
3988*4882a593Smuzhiyun }
3989*4882a593Smuzhiyun
3990*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_MODE, val);
3991*4882a593Smuzhiyun
3992*4882a593Smuzhiyun /* receive all multicast */
3993*4882a593Smuzhiyun for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3994*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3995*4882a593Smuzhiyun 0xffffffff);
3996*4882a593Smuzhiyun }
3997*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3998*4882a593Smuzhiyun
3999*4882a593Smuzhiyun val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
4000*4882a593Smuzhiyun BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
4001*4882a593Smuzhiyun BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
4002*4882a593Smuzhiyun BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
4003*4882a593Smuzhiyun
4004*4882a593Smuzhiyun /* Need to enable EMAC and RPM for WOL. */
4005*4882a593Smuzhiyun BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4006*4882a593Smuzhiyun BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4007*4882a593Smuzhiyun BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4008*4882a593Smuzhiyun BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4009*4882a593Smuzhiyun
4010*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4011*4882a593Smuzhiyun val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4012*4882a593Smuzhiyun BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4013*4882a593Smuzhiyun
4014*4882a593Smuzhiyun wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4015*4882a593Smuzhiyun } else {
4016*4882a593Smuzhiyun wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4017*4882a593Smuzhiyun }
4018*4882a593Smuzhiyun
4019*4882a593Smuzhiyun if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4020*4882a593Smuzhiyun u32 val;
4021*4882a593Smuzhiyun
4022*4882a593Smuzhiyun wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4023*4882a593Smuzhiyun if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4024*4882a593Smuzhiyun bnx2_fw_sync(bp, wol_msg, 1, 0);
4025*4882a593Smuzhiyun return;
4026*4882a593Smuzhiyun }
4027*4882a593Smuzhiyun /* Tell firmware not to power down the PHY yet, otherwise
4028*4882a593Smuzhiyun * the chip will take a long time to respond to MMIO reads.
4029*4882a593Smuzhiyun */
4030*4882a593Smuzhiyun val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4031*4882a593Smuzhiyun bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4032*4882a593Smuzhiyun val | BNX2_PORT_FEATURE_ASF_ENABLED);
4033*4882a593Smuzhiyun bnx2_fw_sync(bp, wol_msg, 1, 0);
4034*4882a593Smuzhiyun bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4035*4882a593Smuzhiyun }
4036*4882a593Smuzhiyun
4037*4882a593Smuzhiyun }
4038*4882a593Smuzhiyun
4039*4882a593Smuzhiyun static int
bnx2_set_power_state(struct bnx2 * bp,pci_power_t state)4040*4882a593Smuzhiyun bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4041*4882a593Smuzhiyun {
4042*4882a593Smuzhiyun switch (state) {
4043*4882a593Smuzhiyun case PCI_D0: {
4044*4882a593Smuzhiyun u32 val;
4045*4882a593Smuzhiyun
4046*4882a593Smuzhiyun pci_enable_wake(bp->pdev, PCI_D0, false);
4047*4882a593Smuzhiyun pci_set_power_state(bp->pdev, PCI_D0);
4048*4882a593Smuzhiyun
4049*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_EMAC_MODE);
4050*4882a593Smuzhiyun val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4051*4882a593Smuzhiyun val &= ~BNX2_EMAC_MODE_MPKT;
4052*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_MODE, val);
4053*4882a593Smuzhiyun
4054*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4055*4882a593Smuzhiyun val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4056*4882a593Smuzhiyun BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4057*4882a593Smuzhiyun break;
4058*4882a593Smuzhiyun }
4059*4882a593Smuzhiyun case PCI_D3hot: {
4060*4882a593Smuzhiyun bnx2_setup_wol(bp);
4061*4882a593Smuzhiyun pci_wake_from_d3(bp->pdev, bp->wol);
4062*4882a593Smuzhiyun if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4063*4882a593Smuzhiyun (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4064*4882a593Smuzhiyun
4065*4882a593Smuzhiyun if (bp->wol)
4066*4882a593Smuzhiyun pci_set_power_state(bp->pdev, PCI_D3hot);
4067*4882a593Smuzhiyun break;
4068*4882a593Smuzhiyun
4069*4882a593Smuzhiyun }
4070*4882a593Smuzhiyun if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4071*4882a593Smuzhiyun u32 val;
4072*4882a593Smuzhiyun
4073*4882a593Smuzhiyun /* Tell firmware not to power down the PHY yet,
4074*4882a593Smuzhiyun * otherwise the other port may not respond to
4075*4882a593Smuzhiyun * MMIO reads.
4076*4882a593Smuzhiyun */
4077*4882a593Smuzhiyun val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4078*4882a593Smuzhiyun val &= ~BNX2_CONDITION_PM_STATE_MASK;
4079*4882a593Smuzhiyun val |= BNX2_CONDITION_PM_STATE_UNPREP;
4080*4882a593Smuzhiyun bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4081*4882a593Smuzhiyun }
4082*4882a593Smuzhiyun pci_set_power_state(bp->pdev, PCI_D3hot);
4083*4882a593Smuzhiyun
4084*4882a593Smuzhiyun /* No more memory access after this point until
4085*4882a593Smuzhiyun * device is brought back to D0.
4086*4882a593Smuzhiyun */
4087*4882a593Smuzhiyun break;
4088*4882a593Smuzhiyun }
4089*4882a593Smuzhiyun default:
4090*4882a593Smuzhiyun return -EINVAL;
4091*4882a593Smuzhiyun }
4092*4882a593Smuzhiyun return 0;
4093*4882a593Smuzhiyun }
4094*4882a593Smuzhiyun
4095*4882a593Smuzhiyun static int
bnx2_acquire_nvram_lock(struct bnx2 * bp)4096*4882a593Smuzhiyun bnx2_acquire_nvram_lock(struct bnx2 *bp)
4097*4882a593Smuzhiyun {
4098*4882a593Smuzhiyun u32 val;
4099*4882a593Smuzhiyun int j;
4100*4882a593Smuzhiyun
4101*4882a593Smuzhiyun /* Request access to the flash interface. */
4102*4882a593Smuzhiyun BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4103*4882a593Smuzhiyun for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4104*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4105*4882a593Smuzhiyun if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4106*4882a593Smuzhiyun break;
4107*4882a593Smuzhiyun
4108*4882a593Smuzhiyun udelay(5);
4109*4882a593Smuzhiyun }
4110*4882a593Smuzhiyun
4111*4882a593Smuzhiyun if (j >= NVRAM_TIMEOUT_COUNT)
4112*4882a593Smuzhiyun return -EBUSY;
4113*4882a593Smuzhiyun
4114*4882a593Smuzhiyun return 0;
4115*4882a593Smuzhiyun }
4116*4882a593Smuzhiyun
4117*4882a593Smuzhiyun static int
bnx2_release_nvram_lock(struct bnx2 * bp)4118*4882a593Smuzhiyun bnx2_release_nvram_lock(struct bnx2 *bp)
4119*4882a593Smuzhiyun {
4120*4882a593Smuzhiyun int j;
4121*4882a593Smuzhiyun u32 val;
4122*4882a593Smuzhiyun
4123*4882a593Smuzhiyun /* Relinquish nvram interface. */
4124*4882a593Smuzhiyun BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4125*4882a593Smuzhiyun
4126*4882a593Smuzhiyun for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4127*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4128*4882a593Smuzhiyun if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4129*4882a593Smuzhiyun break;
4130*4882a593Smuzhiyun
4131*4882a593Smuzhiyun udelay(5);
4132*4882a593Smuzhiyun }
4133*4882a593Smuzhiyun
4134*4882a593Smuzhiyun if (j >= NVRAM_TIMEOUT_COUNT)
4135*4882a593Smuzhiyun return -EBUSY;
4136*4882a593Smuzhiyun
4137*4882a593Smuzhiyun return 0;
4138*4882a593Smuzhiyun }
4139*4882a593Smuzhiyun
4140*4882a593Smuzhiyun
4141*4882a593Smuzhiyun static int
bnx2_enable_nvram_write(struct bnx2 * bp)4142*4882a593Smuzhiyun bnx2_enable_nvram_write(struct bnx2 *bp)
4143*4882a593Smuzhiyun {
4144*4882a593Smuzhiyun u32 val;
4145*4882a593Smuzhiyun
4146*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_MISC_CFG);
4147*4882a593Smuzhiyun BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4148*4882a593Smuzhiyun
4149*4882a593Smuzhiyun if (bp->flash_info->flags & BNX2_NV_WREN) {
4150*4882a593Smuzhiyun int j;
4151*4882a593Smuzhiyun
4152*4882a593Smuzhiyun BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4153*4882a593Smuzhiyun BNX2_WR(bp, BNX2_NVM_COMMAND,
4154*4882a593Smuzhiyun BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4155*4882a593Smuzhiyun
4156*4882a593Smuzhiyun for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4157*4882a593Smuzhiyun udelay(5);
4158*4882a593Smuzhiyun
4159*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4160*4882a593Smuzhiyun if (val & BNX2_NVM_COMMAND_DONE)
4161*4882a593Smuzhiyun break;
4162*4882a593Smuzhiyun }
4163*4882a593Smuzhiyun
4164*4882a593Smuzhiyun if (j >= NVRAM_TIMEOUT_COUNT)
4165*4882a593Smuzhiyun return -EBUSY;
4166*4882a593Smuzhiyun }
4167*4882a593Smuzhiyun return 0;
4168*4882a593Smuzhiyun }
4169*4882a593Smuzhiyun
4170*4882a593Smuzhiyun static void
bnx2_disable_nvram_write(struct bnx2 * bp)4171*4882a593Smuzhiyun bnx2_disable_nvram_write(struct bnx2 *bp)
4172*4882a593Smuzhiyun {
4173*4882a593Smuzhiyun u32 val;
4174*4882a593Smuzhiyun
4175*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_MISC_CFG);
4176*4882a593Smuzhiyun BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4177*4882a593Smuzhiyun }
4178*4882a593Smuzhiyun
4179*4882a593Smuzhiyun
4180*4882a593Smuzhiyun static void
bnx2_enable_nvram_access(struct bnx2 * bp)4181*4882a593Smuzhiyun bnx2_enable_nvram_access(struct bnx2 *bp)
4182*4882a593Smuzhiyun {
4183*4882a593Smuzhiyun u32 val;
4184*4882a593Smuzhiyun
4185*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4186*4882a593Smuzhiyun /* Enable both bits, even on read. */
4187*4882a593Smuzhiyun BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4188*4882a593Smuzhiyun val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4189*4882a593Smuzhiyun }
4190*4882a593Smuzhiyun
4191*4882a593Smuzhiyun static void
bnx2_disable_nvram_access(struct bnx2 * bp)4192*4882a593Smuzhiyun bnx2_disable_nvram_access(struct bnx2 *bp)
4193*4882a593Smuzhiyun {
4194*4882a593Smuzhiyun u32 val;
4195*4882a593Smuzhiyun
4196*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4197*4882a593Smuzhiyun /* Disable both bits, even after read. */
4198*4882a593Smuzhiyun BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4199*4882a593Smuzhiyun val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4200*4882a593Smuzhiyun BNX2_NVM_ACCESS_ENABLE_WR_EN));
4201*4882a593Smuzhiyun }
4202*4882a593Smuzhiyun
4203*4882a593Smuzhiyun static int
bnx2_nvram_erase_page(struct bnx2 * bp,u32 offset)4204*4882a593Smuzhiyun bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4205*4882a593Smuzhiyun {
4206*4882a593Smuzhiyun u32 cmd;
4207*4882a593Smuzhiyun int j;
4208*4882a593Smuzhiyun
4209*4882a593Smuzhiyun if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4210*4882a593Smuzhiyun /* Buffered flash, no erase needed */
4211*4882a593Smuzhiyun return 0;
4212*4882a593Smuzhiyun
4213*4882a593Smuzhiyun /* Build an erase command */
4214*4882a593Smuzhiyun cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4215*4882a593Smuzhiyun BNX2_NVM_COMMAND_DOIT;
4216*4882a593Smuzhiyun
4217*4882a593Smuzhiyun /* Need to clear DONE bit separately. */
4218*4882a593Smuzhiyun BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4219*4882a593Smuzhiyun
4220*4882a593Smuzhiyun /* Address of the NVRAM to read from. */
4221*4882a593Smuzhiyun BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4222*4882a593Smuzhiyun
4223*4882a593Smuzhiyun /* Issue an erase command. */
4224*4882a593Smuzhiyun BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4225*4882a593Smuzhiyun
4226*4882a593Smuzhiyun /* Wait for completion. */
4227*4882a593Smuzhiyun for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4228*4882a593Smuzhiyun u32 val;
4229*4882a593Smuzhiyun
4230*4882a593Smuzhiyun udelay(5);
4231*4882a593Smuzhiyun
4232*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4233*4882a593Smuzhiyun if (val & BNX2_NVM_COMMAND_DONE)
4234*4882a593Smuzhiyun break;
4235*4882a593Smuzhiyun }
4236*4882a593Smuzhiyun
4237*4882a593Smuzhiyun if (j >= NVRAM_TIMEOUT_COUNT)
4238*4882a593Smuzhiyun return -EBUSY;
4239*4882a593Smuzhiyun
4240*4882a593Smuzhiyun return 0;
4241*4882a593Smuzhiyun }
4242*4882a593Smuzhiyun
4243*4882a593Smuzhiyun static int
bnx2_nvram_read_dword(struct bnx2 * bp,u32 offset,u8 * ret_val,u32 cmd_flags)4244*4882a593Smuzhiyun bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4245*4882a593Smuzhiyun {
4246*4882a593Smuzhiyun u32 cmd;
4247*4882a593Smuzhiyun int j;
4248*4882a593Smuzhiyun
4249*4882a593Smuzhiyun /* Build the command word. */
4250*4882a593Smuzhiyun cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4251*4882a593Smuzhiyun
4252*4882a593Smuzhiyun /* Calculate an offset of a buffered flash, not needed for 5709. */
4253*4882a593Smuzhiyun if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4254*4882a593Smuzhiyun offset = ((offset / bp->flash_info->page_size) <<
4255*4882a593Smuzhiyun bp->flash_info->page_bits) +
4256*4882a593Smuzhiyun (offset % bp->flash_info->page_size);
4257*4882a593Smuzhiyun }
4258*4882a593Smuzhiyun
4259*4882a593Smuzhiyun /* Need to clear DONE bit separately. */
4260*4882a593Smuzhiyun BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4261*4882a593Smuzhiyun
4262*4882a593Smuzhiyun /* Address of the NVRAM to read from. */
4263*4882a593Smuzhiyun BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4264*4882a593Smuzhiyun
4265*4882a593Smuzhiyun /* Issue a read command. */
4266*4882a593Smuzhiyun BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4267*4882a593Smuzhiyun
4268*4882a593Smuzhiyun /* Wait for completion. */
4269*4882a593Smuzhiyun for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4270*4882a593Smuzhiyun u32 val;
4271*4882a593Smuzhiyun
4272*4882a593Smuzhiyun udelay(5);
4273*4882a593Smuzhiyun
4274*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4275*4882a593Smuzhiyun if (val & BNX2_NVM_COMMAND_DONE) {
4276*4882a593Smuzhiyun __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4277*4882a593Smuzhiyun memcpy(ret_val, &v, 4);
4278*4882a593Smuzhiyun break;
4279*4882a593Smuzhiyun }
4280*4882a593Smuzhiyun }
4281*4882a593Smuzhiyun if (j >= NVRAM_TIMEOUT_COUNT)
4282*4882a593Smuzhiyun return -EBUSY;
4283*4882a593Smuzhiyun
4284*4882a593Smuzhiyun return 0;
4285*4882a593Smuzhiyun }
4286*4882a593Smuzhiyun
4287*4882a593Smuzhiyun
4288*4882a593Smuzhiyun static int
bnx2_nvram_write_dword(struct bnx2 * bp,u32 offset,u8 * val,u32 cmd_flags)4289*4882a593Smuzhiyun bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4290*4882a593Smuzhiyun {
4291*4882a593Smuzhiyun u32 cmd;
4292*4882a593Smuzhiyun __be32 val32;
4293*4882a593Smuzhiyun int j;
4294*4882a593Smuzhiyun
4295*4882a593Smuzhiyun /* Build the command word. */
4296*4882a593Smuzhiyun cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4297*4882a593Smuzhiyun
4298*4882a593Smuzhiyun /* Calculate an offset of a buffered flash, not needed for 5709. */
4299*4882a593Smuzhiyun if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4300*4882a593Smuzhiyun offset = ((offset / bp->flash_info->page_size) <<
4301*4882a593Smuzhiyun bp->flash_info->page_bits) +
4302*4882a593Smuzhiyun (offset % bp->flash_info->page_size);
4303*4882a593Smuzhiyun }
4304*4882a593Smuzhiyun
4305*4882a593Smuzhiyun /* Need to clear DONE bit separately. */
4306*4882a593Smuzhiyun BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4307*4882a593Smuzhiyun
4308*4882a593Smuzhiyun memcpy(&val32, val, 4);
4309*4882a593Smuzhiyun
4310*4882a593Smuzhiyun /* Write the data. */
4311*4882a593Smuzhiyun BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4312*4882a593Smuzhiyun
4313*4882a593Smuzhiyun /* Address of the NVRAM to write to. */
4314*4882a593Smuzhiyun BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4315*4882a593Smuzhiyun
4316*4882a593Smuzhiyun /* Issue the write command. */
4317*4882a593Smuzhiyun BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4318*4882a593Smuzhiyun
4319*4882a593Smuzhiyun /* Wait for completion. */
4320*4882a593Smuzhiyun for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4321*4882a593Smuzhiyun udelay(5);
4322*4882a593Smuzhiyun
4323*4882a593Smuzhiyun if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4324*4882a593Smuzhiyun break;
4325*4882a593Smuzhiyun }
4326*4882a593Smuzhiyun if (j >= NVRAM_TIMEOUT_COUNT)
4327*4882a593Smuzhiyun return -EBUSY;
4328*4882a593Smuzhiyun
4329*4882a593Smuzhiyun return 0;
4330*4882a593Smuzhiyun }
4331*4882a593Smuzhiyun
4332*4882a593Smuzhiyun static int
bnx2_init_nvram(struct bnx2 * bp)4333*4882a593Smuzhiyun bnx2_init_nvram(struct bnx2 *bp)
4334*4882a593Smuzhiyun {
4335*4882a593Smuzhiyun u32 val;
4336*4882a593Smuzhiyun int j, entry_count, rc = 0;
4337*4882a593Smuzhiyun const struct flash_spec *flash;
4338*4882a593Smuzhiyun
4339*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4340*4882a593Smuzhiyun bp->flash_info = &flash_5709;
4341*4882a593Smuzhiyun goto get_flash_size;
4342*4882a593Smuzhiyun }
4343*4882a593Smuzhiyun
4344*4882a593Smuzhiyun /* Determine the selected interface. */
4345*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_NVM_CFG1);
4346*4882a593Smuzhiyun
4347*4882a593Smuzhiyun entry_count = ARRAY_SIZE(flash_table);
4348*4882a593Smuzhiyun
4349*4882a593Smuzhiyun if (val & 0x40000000) {
4350*4882a593Smuzhiyun
4351*4882a593Smuzhiyun /* Flash interface has been reconfigured */
4352*4882a593Smuzhiyun for (j = 0, flash = &flash_table[0]; j < entry_count;
4353*4882a593Smuzhiyun j++, flash++) {
4354*4882a593Smuzhiyun if ((val & FLASH_BACKUP_STRAP_MASK) ==
4355*4882a593Smuzhiyun (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4356*4882a593Smuzhiyun bp->flash_info = flash;
4357*4882a593Smuzhiyun break;
4358*4882a593Smuzhiyun }
4359*4882a593Smuzhiyun }
4360*4882a593Smuzhiyun }
4361*4882a593Smuzhiyun else {
4362*4882a593Smuzhiyun u32 mask;
4363*4882a593Smuzhiyun /* Not yet been reconfigured */
4364*4882a593Smuzhiyun
4365*4882a593Smuzhiyun if (val & (1 << 23))
4366*4882a593Smuzhiyun mask = FLASH_BACKUP_STRAP_MASK;
4367*4882a593Smuzhiyun else
4368*4882a593Smuzhiyun mask = FLASH_STRAP_MASK;
4369*4882a593Smuzhiyun
4370*4882a593Smuzhiyun for (j = 0, flash = &flash_table[0]; j < entry_count;
4371*4882a593Smuzhiyun j++, flash++) {
4372*4882a593Smuzhiyun
4373*4882a593Smuzhiyun if ((val & mask) == (flash->strapping & mask)) {
4374*4882a593Smuzhiyun bp->flash_info = flash;
4375*4882a593Smuzhiyun
4376*4882a593Smuzhiyun /* Request access to the flash interface. */
4377*4882a593Smuzhiyun if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4378*4882a593Smuzhiyun return rc;
4379*4882a593Smuzhiyun
4380*4882a593Smuzhiyun /* Enable access to flash interface */
4381*4882a593Smuzhiyun bnx2_enable_nvram_access(bp);
4382*4882a593Smuzhiyun
4383*4882a593Smuzhiyun /* Reconfigure the flash interface */
4384*4882a593Smuzhiyun BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4385*4882a593Smuzhiyun BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4386*4882a593Smuzhiyun BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4387*4882a593Smuzhiyun BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4388*4882a593Smuzhiyun
4389*4882a593Smuzhiyun /* Disable access to flash interface */
4390*4882a593Smuzhiyun bnx2_disable_nvram_access(bp);
4391*4882a593Smuzhiyun bnx2_release_nvram_lock(bp);
4392*4882a593Smuzhiyun
4393*4882a593Smuzhiyun break;
4394*4882a593Smuzhiyun }
4395*4882a593Smuzhiyun }
4396*4882a593Smuzhiyun } /* if (val & 0x40000000) */
4397*4882a593Smuzhiyun
4398*4882a593Smuzhiyun if (j == entry_count) {
4399*4882a593Smuzhiyun bp->flash_info = NULL;
4400*4882a593Smuzhiyun pr_alert("Unknown flash/EEPROM type\n");
4401*4882a593Smuzhiyun return -ENODEV;
4402*4882a593Smuzhiyun }
4403*4882a593Smuzhiyun
4404*4882a593Smuzhiyun get_flash_size:
4405*4882a593Smuzhiyun val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4406*4882a593Smuzhiyun val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4407*4882a593Smuzhiyun if (val)
4408*4882a593Smuzhiyun bp->flash_size = val;
4409*4882a593Smuzhiyun else
4410*4882a593Smuzhiyun bp->flash_size = bp->flash_info->total_size;
4411*4882a593Smuzhiyun
4412*4882a593Smuzhiyun return rc;
4413*4882a593Smuzhiyun }
4414*4882a593Smuzhiyun
4415*4882a593Smuzhiyun static int
bnx2_nvram_read(struct bnx2 * bp,u32 offset,u8 * ret_buf,int buf_size)4416*4882a593Smuzhiyun bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4417*4882a593Smuzhiyun int buf_size)
4418*4882a593Smuzhiyun {
4419*4882a593Smuzhiyun int rc = 0;
4420*4882a593Smuzhiyun u32 cmd_flags, offset32, len32, extra;
4421*4882a593Smuzhiyun
4422*4882a593Smuzhiyun if (buf_size == 0)
4423*4882a593Smuzhiyun return 0;
4424*4882a593Smuzhiyun
4425*4882a593Smuzhiyun /* Request access to the flash interface. */
4426*4882a593Smuzhiyun if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4427*4882a593Smuzhiyun return rc;
4428*4882a593Smuzhiyun
4429*4882a593Smuzhiyun /* Enable access to flash interface */
4430*4882a593Smuzhiyun bnx2_enable_nvram_access(bp);
4431*4882a593Smuzhiyun
4432*4882a593Smuzhiyun len32 = buf_size;
4433*4882a593Smuzhiyun offset32 = offset;
4434*4882a593Smuzhiyun extra = 0;
4435*4882a593Smuzhiyun
4436*4882a593Smuzhiyun cmd_flags = 0;
4437*4882a593Smuzhiyun
4438*4882a593Smuzhiyun if (offset32 & 3) {
4439*4882a593Smuzhiyun u8 buf[4];
4440*4882a593Smuzhiyun u32 pre_len;
4441*4882a593Smuzhiyun
4442*4882a593Smuzhiyun offset32 &= ~3;
4443*4882a593Smuzhiyun pre_len = 4 - (offset & 3);
4444*4882a593Smuzhiyun
4445*4882a593Smuzhiyun if (pre_len >= len32) {
4446*4882a593Smuzhiyun pre_len = len32;
4447*4882a593Smuzhiyun cmd_flags = BNX2_NVM_COMMAND_FIRST |
4448*4882a593Smuzhiyun BNX2_NVM_COMMAND_LAST;
4449*4882a593Smuzhiyun }
4450*4882a593Smuzhiyun else {
4451*4882a593Smuzhiyun cmd_flags = BNX2_NVM_COMMAND_FIRST;
4452*4882a593Smuzhiyun }
4453*4882a593Smuzhiyun
4454*4882a593Smuzhiyun rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4455*4882a593Smuzhiyun
4456*4882a593Smuzhiyun if (rc)
4457*4882a593Smuzhiyun return rc;
4458*4882a593Smuzhiyun
4459*4882a593Smuzhiyun memcpy(ret_buf, buf + (offset & 3), pre_len);
4460*4882a593Smuzhiyun
4461*4882a593Smuzhiyun offset32 += 4;
4462*4882a593Smuzhiyun ret_buf += pre_len;
4463*4882a593Smuzhiyun len32 -= pre_len;
4464*4882a593Smuzhiyun }
4465*4882a593Smuzhiyun if (len32 & 3) {
4466*4882a593Smuzhiyun extra = 4 - (len32 & 3);
4467*4882a593Smuzhiyun len32 = (len32 + 4) & ~3;
4468*4882a593Smuzhiyun }
4469*4882a593Smuzhiyun
4470*4882a593Smuzhiyun if (len32 == 4) {
4471*4882a593Smuzhiyun u8 buf[4];
4472*4882a593Smuzhiyun
4473*4882a593Smuzhiyun if (cmd_flags)
4474*4882a593Smuzhiyun cmd_flags = BNX2_NVM_COMMAND_LAST;
4475*4882a593Smuzhiyun else
4476*4882a593Smuzhiyun cmd_flags = BNX2_NVM_COMMAND_FIRST |
4477*4882a593Smuzhiyun BNX2_NVM_COMMAND_LAST;
4478*4882a593Smuzhiyun
4479*4882a593Smuzhiyun rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4480*4882a593Smuzhiyun
4481*4882a593Smuzhiyun memcpy(ret_buf, buf, 4 - extra);
4482*4882a593Smuzhiyun }
4483*4882a593Smuzhiyun else if (len32 > 0) {
4484*4882a593Smuzhiyun u8 buf[4];
4485*4882a593Smuzhiyun
4486*4882a593Smuzhiyun /* Read the first word. */
4487*4882a593Smuzhiyun if (cmd_flags)
4488*4882a593Smuzhiyun cmd_flags = 0;
4489*4882a593Smuzhiyun else
4490*4882a593Smuzhiyun cmd_flags = BNX2_NVM_COMMAND_FIRST;
4491*4882a593Smuzhiyun
4492*4882a593Smuzhiyun rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4493*4882a593Smuzhiyun
4494*4882a593Smuzhiyun /* Advance to the next dword. */
4495*4882a593Smuzhiyun offset32 += 4;
4496*4882a593Smuzhiyun ret_buf += 4;
4497*4882a593Smuzhiyun len32 -= 4;
4498*4882a593Smuzhiyun
4499*4882a593Smuzhiyun while (len32 > 4 && rc == 0) {
4500*4882a593Smuzhiyun rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4501*4882a593Smuzhiyun
4502*4882a593Smuzhiyun /* Advance to the next dword. */
4503*4882a593Smuzhiyun offset32 += 4;
4504*4882a593Smuzhiyun ret_buf += 4;
4505*4882a593Smuzhiyun len32 -= 4;
4506*4882a593Smuzhiyun }
4507*4882a593Smuzhiyun
4508*4882a593Smuzhiyun if (rc)
4509*4882a593Smuzhiyun return rc;
4510*4882a593Smuzhiyun
4511*4882a593Smuzhiyun cmd_flags = BNX2_NVM_COMMAND_LAST;
4512*4882a593Smuzhiyun rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4513*4882a593Smuzhiyun
4514*4882a593Smuzhiyun memcpy(ret_buf, buf, 4 - extra);
4515*4882a593Smuzhiyun }
4516*4882a593Smuzhiyun
4517*4882a593Smuzhiyun /* Disable access to flash interface */
4518*4882a593Smuzhiyun bnx2_disable_nvram_access(bp);
4519*4882a593Smuzhiyun
4520*4882a593Smuzhiyun bnx2_release_nvram_lock(bp);
4521*4882a593Smuzhiyun
4522*4882a593Smuzhiyun return rc;
4523*4882a593Smuzhiyun }
4524*4882a593Smuzhiyun
4525*4882a593Smuzhiyun static int
bnx2_nvram_write(struct bnx2 * bp,u32 offset,u8 * data_buf,int buf_size)4526*4882a593Smuzhiyun bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4527*4882a593Smuzhiyun int buf_size)
4528*4882a593Smuzhiyun {
4529*4882a593Smuzhiyun u32 written, offset32, len32;
4530*4882a593Smuzhiyun u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4531*4882a593Smuzhiyun int rc = 0;
4532*4882a593Smuzhiyun int align_start, align_end;
4533*4882a593Smuzhiyun
4534*4882a593Smuzhiyun buf = data_buf;
4535*4882a593Smuzhiyun offset32 = offset;
4536*4882a593Smuzhiyun len32 = buf_size;
4537*4882a593Smuzhiyun align_start = align_end = 0;
4538*4882a593Smuzhiyun
4539*4882a593Smuzhiyun if ((align_start = (offset32 & 3))) {
4540*4882a593Smuzhiyun offset32 &= ~3;
4541*4882a593Smuzhiyun len32 += align_start;
4542*4882a593Smuzhiyun if (len32 < 4)
4543*4882a593Smuzhiyun len32 = 4;
4544*4882a593Smuzhiyun if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4545*4882a593Smuzhiyun return rc;
4546*4882a593Smuzhiyun }
4547*4882a593Smuzhiyun
4548*4882a593Smuzhiyun if (len32 & 3) {
4549*4882a593Smuzhiyun align_end = 4 - (len32 & 3);
4550*4882a593Smuzhiyun len32 += align_end;
4551*4882a593Smuzhiyun if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4552*4882a593Smuzhiyun return rc;
4553*4882a593Smuzhiyun }
4554*4882a593Smuzhiyun
4555*4882a593Smuzhiyun if (align_start || align_end) {
4556*4882a593Smuzhiyun align_buf = kmalloc(len32, GFP_KERNEL);
4557*4882a593Smuzhiyun if (!align_buf)
4558*4882a593Smuzhiyun return -ENOMEM;
4559*4882a593Smuzhiyun if (align_start) {
4560*4882a593Smuzhiyun memcpy(align_buf, start, 4);
4561*4882a593Smuzhiyun }
4562*4882a593Smuzhiyun if (align_end) {
4563*4882a593Smuzhiyun memcpy(align_buf + len32 - 4, end, 4);
4564*4882a593Smuzhiyun }
4565*4882a593Smuzhiyun memcpy(align_buf + align_start, data_buf, buf_size);
4566*4882a593Smuzhiyun buf = align_buf;
4567*4882a593Smuzhiyun }
4568*4882a593Smuzhiyun
4569*4882a593Smuzhiyun if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4570*4882a593Smuzhiyun flash_buffer = kmalloc(264, GFP_KERNEL);
4571*4882a593Smuzhiyun if (!flash_buffer) {
4572*4882a593Smuzhiyun rc = -ENOMEM;
4573*4882a593Smuzhiyun goto nvram_write_end;
4574*4882a593Smuzhiyun }
4575*4882a593Smuzhiyun }
4576*4882a593Smuzhiyun
4577*4882a593Smuzhiyun written = 0;
4578*4882a593Smuzhiyun while ((written < len32) && (rc == 0)) {
4579*4882a593Smuzhiyun u32 page_start, page_end, data_start, data_end;
4580*4882a593Smuzhiyun u32 addr, cmd_flags;
4581*4882a593Smuzhiyun int i;
4582*4882a593Smuzhiyun
4583*4882a593Smuzhiyun /* Find the page_start addr */
4584*4882a593Smuzhiyun page_start = offset32 + written;
4585*4882a593Smuzhiyun page_start -= (page_start % bp->flash_info->page_size);
4586*4882a593Smuzhiyun /* Find the page_end addr */
4587*4882a593Smuzhiyun page_end = page_start + bp->flash_info->page_size;
4588*4882a593Smuzhiyun /* Find the data_start addr */
4589*4882a593Smuzhiyun data_start = (written == 0) ? offset32 : page_start;
4590*4882a593Smuzhiyun /* Find the data_end addr */
4591*4882a593Smuzhiyun data_end = (page_end > offset32 + len32) ?
4592*4882a593Smuzhiyun (offset32 + len32) : page_end;
4593*4882a593Smuzhiyun
4594*4882a593Smuzhiyun /* Request access to the flash interface. */
4595*4882a593Smuzhiyun if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4596*4882a593Smuzhiyun goto nvram_write_end;
4597*4882a593Smuzhiyun
4598*4882a593Smuzhiyun /* Enable access to flash interface */
4599*4882a593Smuzhiyun bnx2_enable_nvram_access(bp);
4600*4882a593Smuzhiyun
4601*4882a593Smuzhiyun cmd_flags = BNX2_NVM_COMMAND_FIRST;
4602*4882a593Smuzhiyun if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4603*4882a593Smuzhiyun int j;
4604*4882a593Smuzhiyun
4605*4882a593Smuzhiyun /* Read the whole page into the buffer
4606*4882a593Smuzhiyun * (non-buffer flash only) */
4607*4882a593Smuzhiyun for (j = 0; j < bp->flash_info->page_size; j += 4) {
4608*4882a593Smuzhiyun if (j == (bp->flash_info->page_size - 4)) {
4609*4882a593Smuzhiyun cmd_flags |= BNX2_NVM_COMMAND_LAST;
4610*4882a593Smuzhiyun }
4611*4882a593Smuzhiyun rc = bnx2_nvram_read_dword(bp,
4612*4882a593Smuzhiyun page_start + j,
4613*4882a593Smuzhiyun &flash_buffer[j],
4614*4882a593Smuzhiyun cmd_flags);
4615*4882a593Smuzhiyun
4616*4882a593Smuzhiyun if (rc)
4617*4882a593Smuzhiyun goto nvram_write_end;
4618*4882a593Smuzhiyun
4619*4882a593Smuzhiyun cmd_flags = 0;
4620*4882a593Smuzhiyun }
4621*4882a593Smuzhiyun }
4622*4882a593Smuzhiyun
4623*4882a593Smuzhiyun /* Enable writes to flash interface (unlock write-protect) */
4624*4882a593Smuzhiyun if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4625*4882a593Smuzhiyun goto nvram_write_end;
4626*4882a593Smuzhiyun
4627*4882a593Smuzhiyun /* Loop to write back the buffer data from page_start to
4628*4882a593Smuzhiyun * data_start */
4629*4882a593Smuzhiyun i = 0;
4630*4882a593Smuzhiyun if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4631*4882a593Smuzhiyun /* Erase the page */
4632*4882a593Smuzhiyun if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4633*4882a593Smuzhiyun goto nvram_write_end;
4634*4882a593Smuzhiyun
4635*4882a593Smuzhiyun /* Re-enable the write again for the actual write */
4636*4882a593Smuzhiyun bnx2_enable_nvram_write(bp);
4637*4882a593Smuzhiyun
4638*4882a593Smuzhiyun for (addr = page_start; addr < data_start;
4639*4882a593Smuzhiyun addr += 4, i += 4) {
4640*4882a593Smuzhiyun
4641*4882a593Smuzhiyun rc = bnx2_nvram_write_dword(bp, addr,
4642*4882a593Smuzhiyun &flash_buffer[i], cmd_flags);
4643*4882a593Smuzhiyun
4644*4882a593Smuzhiyun if (rc != 0)
4645*4882a593Smuzhiyun goto nvram_write_end;
4646*4882a593Smuzhiyun
4647*4882a593Smuzhiyun cmd_flags = 0;
4648*4882a593Smuzhiyun }
4649*4882a593Smuzhiyun }
4650*4882a593Smuzhiyun
4651*4882a593Smuzhiyun /* Loop to write the new data from data_start to data_end */
4652*4882a593Smuzhiyun for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4653*4882a593Smuzhiyun if ((addr == page_end - 4) ||
4654*4882a593Smuzhiyun ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4655*4882a593Smuzhiyun (addr == data_end - 4))) {
4656*4882a593Smuzhiyun
4657*4882a593Smuzhiyun cmd_flags |= BNX2_NVM_COMMAND_LAST;
4658*4882a593Smuzhiyun }
4659*4882a593Smuzhiyun rc = bnx2_nvram_write_dword(bp, addr, buf,
4660*4882a593Smuzhiyun cmd_flags);
4661*4882a593Smuzhiyun
4662*4882a593Smuzhiyun if (rc != 0)
4663*4882a593Smuzhiyun goto nvram_write_end;
4664*4882a593Smuzhiyun
4665*4882a593Smuzhiyun cmd_flags = 0;
4666*4882a593Smuzhiyun buf += 4;
4667*4882a593Smuzhiyun }
4668*4882a593Smuzhiyun
4669*4882a593Smuzhiyun /* Loop to write back the buffer data from data_end
4670*4882a593Smuzhiyun * to page_end */
4671*4882a593Smuzhiyun if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4672*4882a593Smuzhiyun for (addr = data_end; addr < page_end;
4673*4882a593Smuzhiyun addr += 4, i += 4) {
4674*4882a593Smuzhiyun
4675*4882a593Smuzhiyun if (addr == page_end-4) {
4676*4882a593Smuzhiyun cmd_flags = BNX2_NVM_COMMAND_LAST;
4677*4882a593Smuzhiyun }
4678*4882a593Smuzhiyun rc = bnx2_nvram_write_dword(bp, addr,
4679*4882a593Smuzhiyun &flash_buffer[i], cmd_flags);
4680*4882a593Smuzhiyun
4681*4882a593Smuzhiyun if (rc != 0)
4682*4882a593Smuzhiyun goto nvram_write_end;
4683*4882a593Smuzhiyun
4684*4882a593Smuzhiyun cmd_flags = 0;
4685*4882a593Smuzhiyun }
4686*4882a593Smuzhiyun }
4687*4882a593Smuzhiyun
4688*4882a593Smuzhiyun /* Disable writes to flash interface (lock write-protect) */
4689*4882a593Smuzhiyun bnx2_disable_nvram_write(bp);
4690*4882a593Smuzhiyun
4691*4882a593Smuzhiyun /* Disable access to flash interface */
4692*4882a593Smuzhiyun bnx2_disable_nvram_access(bp);
4693*4882a593Smuzhiyun bnx2_release_nvram_lock(bp);
4694*4882a593Smuzhiyun
4695*4882a593Smuzhiyun /* Increment written */
4696*4882a593Smuzhiyun written += data_end - data_start;
4697*4882a593Smuzhiyun }
4698*4882a593Smuzhiyun
4699*4882a593Smuzhiyun nvram_write_end:
4700*4882a593Smuzhiyun kfree(flash_buffer);
4701*4882a593Smuzhiyun kfree(align_buf);
4702*4882a593Smuzhiyun return rc;
4703*4882a593Smuzhiyun }
4704*4882a593Smuzhiyun
4705*4882a593Smuzhiyun static void
bnx2_init_fw_cap(struct bnx2 * bp)4706*4882a593Smuzhiyun bnx2_init_fw_cap(struct bnx2 *bp)
4707*4882a593Smuzhiyun {
4708*4882a593Smuzhiyun u32 val, sig = 0;
4709*4882a593Smuzhiyun
4710*4882a593Smuzhiyun bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4711*4882a593Smuzhiyun bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4712*4882a593Smuzhiyun
4713*4882a593Smuzhiyun if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4714*4882a593Smuzhiyun bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4715*4882a593Smuzhiyun
4716*4882a593Smuzhiyun val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4717*4882a593Smuzhiyun if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4718*4882a593Smuzhiyun return;
4719*4882a593Smuzhiyun
4720*4882a593Smuzhiyun if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4721*4882a593Smuzhiyun bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4722*4882a593Smuzhiyun sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4723*4882a593Smuzhiyun }
4724*4882a593Smuzhiyun
4725*4882a593Smuzhiyun if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4726*4882a593Smuzhiyun (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4727*4882a593Smuzhiyun u32 link;
4728*4882a593Smuzhiyun
4729*4882a593Smuzhiyun bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4730*4882a593Smuzhiyun
4731*4882a593Smuzhiyun link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4732*4882a593Smuzhiyun if (link & BNX2_LINK_STATUS_SERDES_LINK)
4733*4882a593Smuzhiyun bp->phy_port = PORT_FIBRE;
4734*4882a593Smuzhiyun else
4735*4882a593Smuzhiyun bp->phy_port = PORT_TP;
4736*4882a593Smuzhiyun
4737*4882a593Smuzhiyun sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4738*4882a593Smuzhiyun BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4739*4882a593Smuzhiyun }
4740*4882a593Smuzhiyun
4741*4882a593Smuzhiyun if (netif_running(bp->dev) && sig)
4742*4882a593Smuzhiyun bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4743*4882a593Smuzhiyun }
4744*4882a593Smuzhiyun
4745*4882a593Smuzhiyun static void
bnx2_setup_msix_tbl(struct bnx2 * bp)4746*4882a593Smuzhiyun bnx2_setup_msix_tbl(struct bnx2 *bp)
4747*4882a593Smuzhiyun {
4748*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4749*4882a593Smuzhiyun
4750*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4751*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4752*4882a593Smuzhiyun }
4753*4882a593Smuzhiyun
4754*4882a593Smuzhiyun static void
bnx2_wait_dma_complete(struct bnx2 * bp)4755*4882a593Smuzhiyun bnx2_wait_dma_complete(struct bnx2 *bp)
4756*4882a593Smuzhiyun {
4757*4882a593Smuzhiyun u32 val;
4758*4882a593Smuzhiyun int i;
4759*4882a593Smuzhiyun
4760*4882a593Smuzhiyun /*
4761*4882a593Smuzhiyun * Wait for the current PCI transaction to complete before
4762*4882a593Smuzhiyun * issuing a reset.
4763*4882a593Smuzhiyun */
4764*4882a593Smuzhiyun if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4765*4882a593Smuzhiyun (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4766*4882a593Smuzhiyun BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4767*4882a593Smuzhiyun BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4768*4882a593Smuzhiyun BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4769*4882a593Smuzhiyun BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4770*4882a593Smuzhiyun BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4771*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4772*4882a593Smuzhiyun udelay(5);
4773*4882a593Smuzhiyun } else { /* 5709 */
4774*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4775*4882a593Smuzhiyun val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4776*4882a593Smuzhiyun BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4777*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4778*4882a593Smuzhiyun
4779*4882a593Smuzhiyun for (i = 0; i < 100; i++) {
4780*4882a593Smuzhiyun msleep(1);
4781*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4782*4882a593Smuzhiyun if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4783*4882a593Smuzhiyun break;
4784*4882a593Smuzhiyun }
4785*4882a593Smuzhiyun }
4786*4882a593Smuzhiyun
4787*4882a593Smuzhiyun return;
4788*4882a593Smuzhiyun }
4789*4882a593Smuzhiyun
4790*4882a593Smuzhiyun
4791*4882a593Smuzhiyun static int
bnx2_reset_chip(struct bnx2 * bp,u32 reset_code)4792*4882a593Smuzhiyun bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4793*4882a593Smuzhiyun {
4794*4882a593Smuzhiyun u32 val;
4795*4882a593Smuzhiyun int i, rc = 0;
4796*4882a593Smuzhiyun u8 old_port;
4797*4882a593Smuzhiyun
4798*4882a593Smuzhiyun /* Wait for the current PCI transaction to complete before
4799*4882a593Smuzhiyun * issuing a reset. */
4800*4882a593Smuzhiyun bnx2_wait_dma_complete(bp);
4801*4882a593Smuzhiyun
4802*4882a593Smuzhiyun /* Wait for the firmware to tell us it is ok to issue a reset. */
4803*4882a593Smuzhiyun bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4804*4882a593Smuzhiyun
4805*4882a593Smuzhiyun /* Deposit a driver reset signature so the firmware knows that
4806*4882a593Smuzhiyun * this is a soft reset. */
4807*4882a593Smuzhiyun bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4808*4882a593Smuzhiyun BNX2_DRV_RESET_SIGNATURE_MAGIC);
4809*4882a593Smuzhiyun
4810*4882a593Smuzhiyun /* Do a dummy read to force the chip to complete all current transaction
4811*4882a593Smuzhiyun * before we issue a reset. */
4812*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_MISC_ID);
4813*4882a593Smuzhiyun
4814*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4815*4882a593Smuzhiyun BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4816*4882a593Smuzhiyun BNX2_RD(bp, BNX2_MISC_COMMAND);
4817*4882a593Smuzhiyun udelay(5);
4818*4882a593Smuzhiyun
4819*4882a593Smuzhiyun val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4820*4882a593Smuzhiyun BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4821*4882a593Smuzhiyun
4822*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4823*4882a593Smuzhiyun
4824*4882a593Smuzhiyun } else {
4825*4882a593Smuzhiyun val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4826*4882a593Smuzhiyun BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4827*4882a593Smuzhiyun BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4828*4882a593Smuzhiyun
4829*4882a593Smuzhiyun /* Chip reset. */
4830*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4831*4882a593Smuzhiyun
4832*4882a593Smuzhiyun /* Reading back any register after chip reset will hang the
4833*4882a593Smuzhiyun * bus on 5706 A0 and A1. The msleep below provides plenty
4834*4882a593Smuzhiyun * of margin for write posting.
4835*4882a593Smuzhiyun */
4836*4882a593Smuzhiyun if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4837*4882a593Smuzhiyun (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4838*4882a593Smuzhiyun msleep(20);
4839*4882a593Smuzhiyun
4840*4882a593Smuzhiyun /* Reset takes approximate 30 usec */
4841*4882a593Smuzhiyun for (i = 0; i < 10; i++) {
4842*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4843*4882a593Smuzhiyun if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4844*4882a593Smuzhiyun BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4845*4882a593Smuzhiyun break;
4846*4882a593Smuzhiyun udelay(10);
4847*4882a593Smuzhiyun }
4848*4882a593Smuzhiyun
4849*4882a593Smuzhiyun if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4850*4882a593Smuzhiyun BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4851*4882a593Smuzhiyun pr_err("Chip reset did not complete\n");
4852*4882a593Smuzhiyun return -EBUSY;
4853*4882a593Smuzhiyun }
4854*4882a593Smuzhiyun }
4855*4882a593Smuzhiyun
4856*4882a593Smuzhiyun /* Make sure byte swapping is properly configured. */
4857*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4858*4882a593Smuzhiyun if (val != 0x01020304) {
4859*4882a593Smuzhiyun pr_err("Chip not in correct endian mode\n");
4860*4882a593Smuzhiyun return -ENODEV;
4861*4882a593Smuzhiyun }
4862*4882a593Smuzhiyun
4863*4882a593Smuzhiyun /* Wait for the firmware to finish its initialization. */
4864*4882a593Smuzhiyun rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4865*4882a593Smuzhiyun if (rc)
4866*4882a593Smuzhiyun return rc;
4867*4882a593Smuzhiyun
4868*4882a593Smuzhiyun spin_lock_bh(&bp->phy_lock);
4869*4882a593Smuzhiyun old_port = bp->phy_port;
4870*4882a593Smuzhiyun bnx2_init_fw_cap(bp);
4871*4882a593Smuzhiyun if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4872*4882a593Smuzhiyun old_port != bp->phy_port)
4873*4882a593Smuzhiyun bnx2_set_default_remote_link(bp);
4874*4882a593Smuzhiyun spin_unlock_bh(&bp->phy_lock);
4875*4882a593Smuzhiyun
4876*4882a593Smuzhiyun if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4877*4882a593Smuzhiyun /* Adjust the voltage regular to two steps lower. The default
4878*4882a593Smuzhiyun * of this register is 0x0000000e. */
4879*4882a593Smuzhiyun BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4880*4882a593Smuzhiyun
4881*4882a593Smuzhiyun /* Remove bad rbuf memory from the free pool. */
4882*4882a593Smuzhiyun rc = bnx2_alloc_bad_rbuf(bp);
4883*4882a593Smuzhiyun }
4884*4882a593Smuzhiyun
4885*4882a593Smuzhiyun if (bp->flags & BNX2_FLAG_USING_MSIX) {
4886*4882a593Smuzhiyun bnx2_setup_msix_tbl(bp);
4887*4882a593Smuzhiyun /* Prevent MSIX table reads and write from timing out */
4888*4882a593Smuzhiyun BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4889*4882a593Smuzhiyun BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4890*4882a593Smuzhiyun }
4891*4882a593Smuzhiyun
4892*4882a593Smuzhiyun return rc;
4893*4882a593Smuzhiyun }
4894*4882a593Smuzhiyun
4895*4882a593Smuzhiyun static int
bnx2_init_chip(struct bnx2 * bp)4896*4882a593Smuzhiyun bnx2_init_chip(struct bnx2 *bp)
4897*4882a593Smuzhiyun {
4898*4882a593Smuzhiyun u32 val, mtu;
4899*4882a593Smuzhiyun int rc, i;
4900*4882a593Smuzhiyun
4901*4882a593Smuzhiyun /* Make sure the interrupt is not active. */
4902*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4903*4882a593Smuzhiyun
4904*4882a593Smuzhiyun val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4905*4882a593Smuzhiyun BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4906*4882a593Smuzhiyun #ifdef __BIG_ENDIAN
4907*4882a593Smuzhiyun BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4908*4882a593Smuzhiyun #endif
4909*4882a593Smuzhiyun BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4910*4882a593Smuzhiyun DMA_READ_CHANS << 12 |
4911*4882a593Smuzhiyun DMA_WRITE_CHANS << 16;
4912*4882a593Smuzhiyun
4913*4882a593Smuzhiyun val |= (0x2 << 20) | (1 << 11);
4914*4882a593Smuzhiyun
4915*4882a593Smuzhiyun if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4916*4882a593Smuzhiyun val |= (1 << 23);
4917*4882a593Smuzhiyun
4918*4882a593Smuzhiyun if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4919*4882a593Smuzhiyun (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4920*4882a593Smuzhiyun !(bp->flags & BNX2_FLAG_PCIX))
4921*4882a593Smuzhiyun val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4922*4882a593Smuzhiyun
4923*4882a593Smuzhiyun BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4924*4882a593Smuzhiyun
4925*4882a593Smuzhiyun if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4926*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4927*4882a593Smuzhiyun val |= BNX2_TDMA_CONFIG_ONE_DMA;
4928*4882a593Smuzhiyun BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4929*4882a593Smuzhiyun }
4930*4882a593Smuzhiyun
4931*4882a593Smuzhiyun if (bp->flags & BNX2_FLAG_PCIX) {
4932*4882a593Smuzhiyun u16 val16;
4933*4882a593Smuzhiyun
4934*4882a593Smuzhiyun pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4935*4882a593Smuzhiyun &val16);
4936*4882a593Smuzhiyun pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4937*4882a593Smuzhiyun val16 & ~PCI_X_CMD_ERO);
4938*4882a593Smuzhiyun }
4939*4882a593Smuzhiyun
4940*4882a593Smuzhiyun BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4941*4882a593Smuzhiyun BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4942*4882a593Smuzhiyun BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4943*4882a593Smuzhiyun BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4944*4882a593Smuzhiyun
4945*4882a593Smuzhiyun /* Initialize context mapping and zero out the quick contexts. The
4946*4882a593Smuzhiyun * context block must have already been enabled. */
4947*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4948*4882a593Smuzhiyun rc = bnx2_init_5709_context(bp);
4949*4882a593Smuzhiyun if (rc)
4950*4882a593Smuzhiyun return rc;
4951*4882a593Smuzhiyun } else
4952*4882a593Smuzhiyun bnx2_init_context(bp);
4953*4882a593Smuzhiyun
4954*4882a593Smuzhiyun if ((rc = bnx2_init_cpus(bp)) != 0)
4955*4882a593Smuzhiyun return rc;
4956*4882a593Smuzhiyun
4957*4882a593Smuzhiyun bnx2_init_nvram(bp);
4958*4882a593Smuzhiyun
4959*4882a593Smuzhiyun bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4960*4882a593Smuzhiyun
4961*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4962*4882a593Smuzhiyun val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4963*4882a593Smuzhiyun val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4964*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4965*4882a593Smuzhiyun val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4966*4882a593Smuzhiyun if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4967*4882a593Smuzhiyun val |= BNX2_MQ_CONFIG_HALT_DIS;
4968*4882a593Smuzhiyun }
4969*4882a593Smuzhiyun
4970*4882a593Smuzhiyun BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4971*4882a593Smuzhiyun
4972*4882a593Smuzhiyun val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4973*4882a593Smuzhiyun BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4974*4882a593Smuzhiyun BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4975*4882a593Smuzhiyun
4976*4882a593Smuzhiyun val = (BNX2_PAGE_BITS - 8) << 24;
4977*4882a593Smuzhiyun BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4978*4882a593Smuzhiyun
4979*4882a593Smuzhiyun /* Configure page size. */
4980*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4981*4882a593Smuzhiyun val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4982*4882a593Smuzhiyun val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4983*4882a593Smuzhiyun BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4984*4882a593Smuzhiyun
4985*4882a593Smuzhiyun val = bp->mac_addr[0] +
4986*4882a593Smuzhiyun (bp->mac_addr[1] << 8) +
4987*4882a593Smuzhiyun (bp->mac_addr[2] << 16) +
4988*4882a593Smuzhiyun bp->mac_addr[3] +
4989*4882a593Smuzhiyun (bp->mac_addr[4] << 8) +
4990*4882a593Smuzhiyun (bp->mac_addr[5] << 16);
4991*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4992*4882a593Smuzhiyun
4993*4882a593Smuzhiyun /* Program the MTU. Also include 4 bytes for CRC32. */
4994*4882a593Smuzhiyun mtu = bp->dev->mtu;
4995*4882a593Smuzhiyun val = mtu + ETH_HLEN + ETH_FCS_LEN;
4996*4882a593Smuzhiyun if (val > (MAX_ETHERNET_PACKET_SIZE + ETH_HLEN + 4))
4997*4882a593Smuzhiyun val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4998*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4999*4882a593Smuzhiyun
5000*4882a593Smuzhiyun if (mtu < ETH_DATA_LEN)
5001*4882a593Smuzhiyun mtu = ETH_DATA_LEN;
5002*4882a593Smuzhiyun
5003*4882a593Smuzhiyun bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
5004*4882a593Smuzhiyun bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
5005*4882a593Smuzhiyun bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
5006*4882a593Smuzhiyun
5007*4882a593Smuzhiyun memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
5008*4882a593Smuzhiyun for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5009*4882a593Smuzhiyun bp->bnx2_napi[i].last_status_idx = 0;
5010*4882a593Smuzhiyun
5011*4882a593Smuzhiyun bp->idle_chk_status_idx = 0xffff;
5012*4882a593Smuzhiyun
5013*4882a593Smuzhiyun /* Set up how to generate a link change interrupt. */
5014*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
5015*4882a593Smuzhiyun
5016*4882a593Smuzhiyun BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
5017*4882a593Smuzhiyun (u64) bp->status_blk_mapping & 0xffffffff);
5018*4882a593Smuzhiyun BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
5019*4882a593Smuzhiyun
5020*4882a593Smuzhiyun BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
5021*4882a593Smuzhiyun (u64) bp->stats_blk_mapping & 0xffffffff);
5022*4882a593Smuzhiyun BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
5023*4882a593Smuzhiyun (u64) bp->stats_blk_mapping >> 32);
5024*4882a593Smuzhiyun
5025*4882a593Smuzhiyun BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5026*4882a593Smuzhiyun (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
5027*4882a593Smuzhiyun
5028*4882a593Smuzhiyun BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5029*4882a593Smuzhiyun (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5030*4882a593Smuzhiyun
5031*4882a593Smuzhiyun BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5032*4882a593Smuzhiyun (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5033*4882a593Smuzhiyun
5034*4882a593Smuzhiyun BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5035*4882a593Smuzhiyun
5036*4882a593Smuzhiyun BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5037*4882a593Smuzhiyun
5038*4882a593Smuzhiyun BNX2_WR(bp, BNX2_HC_COM_TICKS,
5039*4882a593Smuzhiyun (bp->com_ticks_int << 16) | bp->com_ticks);
5040*4882a593Smuzhiyun
5041*4882a593Smuzhiyun BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5042*4882a593Smuzhiyun (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5043*4882a593Smuzhiyun
5044*4882a593Smuzhiyun if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5045*4882a593Smuzhiyun BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5046*4882a593Smuzhiyun else
5047*4882a593Smuzhiyun BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5048*4882a593Smuzhiyun BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
5049*4882a593Smuzhiyun
5050*4882a593Smuzhiyun if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5051*4882a593Smuzhiyun val = BNX2_HC_CONFIG_COLLECT_STATS;
5052*4882a593Smuzhiyun else {
5053*4882a593Smuzhiyun val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5054*4882a593Smuzhiyun BNX2_HC_CONFIG_COLLECT_STATS;
5055*4882a593Smuzhiyun }
5056*4882a593Smuzhiyun
5057*4882a593Smuzhiyun if (bp->flags & BNX2_FLAG_USING_MSIX) {
5058*4882a593Smuzhiyun BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5059*4882a593Smuzhiyun BNX2_HC_MSIX_BIT_VECTOR_VAL);
5060*4882a593Smuzhiyun
5061*4882a593Smuzhiyun val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5062*4882a593Smuzhiyun }
5063*4882a593Smuzhiyun
5064*4882a593Smuzhiyun if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5065*4882a593Smuzhiyun val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5066*4882a593Smuzhiyun
5067*4882a593Smuzhiyun BNX2_WR(bp, BNX2_HC_CONFIG, val);
5068*4882a593Smuzhiyun
5069*4882a593Smuzhiyun if (bp->rx_ticks < 25)
5070*4882a593Smuzhiyun bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5071*4882a593Smuzhiyun else
5072*4882a593Smuzhiyun bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5073*4882a593Smuzhiyun
5074*4882a593Smuzhiyun for (i = 1; i < bp->irq_nvecs; i++) {
5075*4882a593Smuzhiyun u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5076*4882a593Smuzhiyun BNX2_HC_SB_CONFIG_1;
5077*4882a593Smuzhiyun
5078*4882a593Smuzhiyun BNX2_WR(bp, base,
5079*4882a593Smuzhiyun BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5080*4882a593Smuzhiyun BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5081*4882a593Smuzhiyun BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5082*4882a593Smuzhiyun
5083*4882a593Smuzhiyun BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5084*4882a593Smuzhiyun (bp->tx_quick_cons_trip_int << 16) |
5085*4882a593Smuzhiyun bp->tx_quick_cons_trip);
5086*4882a593Smuzhiyun
5087*4882a593Smuzhiyun BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5088*4882a593Smuzhiyun (bp->tx_ticks_int << 16) | bp->tx_ticks);
5089*4882a593Smuzhiyun
5090*4882a593Smuzhiyun BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5091*4882a593Smuzhiyun (bp->rx_quick_cons_trip_int << 16) |
5092*4882a593Smuzhiyun bp->rx_quick_cons_trip);
5093*4882a593Smuzhiyun
5094*4882a593Smuzhiyun BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5095*4882a593Smuzhiyun (bp->rx_ticks_int << 16) | bp->rx_ticks);
5096*4882a593Smuzhiyun }
5097*4882a593Smuzhiyun
5098*4882a593Smuzhiyun /* Clear internal stats counters. */
5099*4882a593Smuzhiyun BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5100*4882a593Smuzhiyun
5101*4882a593Smuzhiyun BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5102*4882a593Smuzhiyun
5103*4882a593Smuzhiyun /* Initialize the receive filter. */
5104*4882a593Smuzhiyun bnx2_set_rx_mode(bp->dev);
5105*4882a593Smuzhiyun
5106*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5107*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5108*4882a593Smuzhiyun val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5109*4882a593Smuzhiyun BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5110*4882a593Smuzhiyun }
5111*4882a593Smuzhiyun rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5112*4882a593Smuzhiyun 1, 0);
5113*4882a593Smuzhiyun
5114*4882a593Smuzhiyun BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5115*4882a593Smuzhiyun BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5116*4882a593Smuzhiyun
5117*4882a593Smuzhiyun udelay(20);
5118*4882a593Smuzhiyun
5119*4882a593Smuzhiyun bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5120*4882a593Smuzhiyun
5121*4882a593Smuzhiyun return rc;
5122*4882a593Smuzhiyun }
5123*4882a593Smuzhiyun
5124*4882a593Smuzhiyun static void
bnx2_clear_ring_states(struct bnx2 * bp)5125*4882a593Smuzhiyun bnx2_clear_ring_states(struct bnx2 *bp)
5126*4882a593Smuzhiyun {
5127*4882a593Smuzhiyun struct bnx2_napi *bnapi;
5128*4882a593Smuzhiyun struct bnx2_tx_ring_info *txr;
5129*4882a593Smuzhiyun struct bnx2_rx_ring_info *rxr;
5130*4882a593Smuzhiyun int i;
5131*4882a593Smuzhiyun
5132*4882a593Smuzhiyun for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5133*4882a593Smuzhiyun bnapi = &bp->bnx2_napi[i];
5134*4882a593Smuzhiyun txr = &bnapi->tx_ring;
5135*4882a593Smuzhiyun rxr = &bnapi->rx_ring;
5136*4882a593Smuzhiyun
5137*4882a593Smuzhiyun txr->tx_cons = 0;
5138*4882a593Smuzhiyun txr->hw_tx_cons = 0;
5139*4882a593Smuzhiyun rxr->rx_prod_bseq = 0;
5140*4882a593Smuzhiyun rxr->rx_prod = 0;
5141*4882a593Smuzhiyun rxr->rx_cons = 0;
5142*4882a593Smuzhiyun rxr->rx_pg_prod = 0;
5143*4882a593Smuzhiyun rxr->rx_pg_cons = 0;
5144*4882a593Smuzhiyun }
5145*4882a593Smuzhiyun }
5146*4882a593Smuzhiyun
5147*4882a593Smuzhiyun static void
bnx2_init_tx_context(struct bnx2 * bp,u32 cid,struct bnx2_tx_ring_info * txr)5148*4882a593Smuzhiyun bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5149*4882a593Smuzhiyun {
5150*4882a593Smuzhiyun u32 val, offset0, offset1, offset2, offset3;
5151*4882a593Smuzhiyun u32 cid_addr = GET_CID_ADDR(cid);
5152*4882a593Smuzhiyun
5153*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5154*4882a593Smuzhiyun offset0 = BNX2_L2CTX_TYPE_XI;
5155*4882a593Smuzhiyun offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5156*4882a593Smuzhiyun offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5157*4882a593Smuzhiyun offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5158*4882a593Smuzhiyun } else {
5159*4882a593Smuzhiyun offset0 = BNX2_L2CTX_TYPE;
5160*4882a593Smuzhiyun offset1 = BNX2_L2CTX_CMD_TYPE;
5161*4882a593Smuzhiyun offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5162*4882a593Smuzhiyun offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5163*4882a593Smuzhiyun }
5164*4882a593Smuzhiyun val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5165*4882a593Smuzhiyun bnx2_ctx_wr(bp, cid_addr, offset0, val);
5166*4882a593Smuzhiyun
5167*4882a593Smuzhiyun val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5168*4882a593Smuzhiyun bnx2_ctx_wr(bp, cid_addr, offset1, val);
5169*4882a593Smuzhiyun
5170*4882a593Smuzhiyun val = (u64) txr->tx_desc_mapping >> 32;
5171*4882a593Smuzhiyun bnx2_ctx_wr(bp, cid_addr, offset2, val);
5172*4882a593Smuzhiyun
5173*4882a593Smuzhiyun val = (u64) txr->tx_desc_mapping & 0xffffffff;
5174*4882a593Smuzhiyun bnx2_ctx_wr(bp, cid_addr, offset3, val);
5175*4882a593Smuzhiyun }
5176*4882a593Smuzhiyun
5177*4882a593Smuzhiyun static void
bnx2_init_tx_ring(struct bnx2 * bp,int ring_num)5178*4882a593Smuzhiyun bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5179*4882a593Smuzhiyun {
5180*4882a593Smuzhiyun struct bnx2_tx_bd *txbd;
5181*4882a593Smuzhiyun u32 cid = TX_CID;
5182*4882a593Smuzhiyun struct bnx2_napi *bnapi;
5183*4882a593Smuzhiyun struct bnx2_tx_ring_info *txr;
5184*4882a593Smuzhiyun
5185*4882a593Smuzhiyun bnapi = &bp->bnx2_napi[ring_num];
5186*4882a593Smuzhiyun txr = &bnapi->tx_ring;
5187*4882a593Smuzhiyun
5188*4882a593Smuzhiyun if (ring_num == 0)
5189*4882a593Smuzhiyun cid = TX_CID;
5190*4882a593Smuzhiyun else
5191*4882a593Smuzhiyun cid = TX_TSS_CID + ring_num - 1;
5192*4882a593Smuzhiyun
5193*4882a593Smuzhiyun bp->tx_wake_thresh = bp->tx_ring_size / 2;
5194*4882a593Smuzhiyun
5195*4882a593Smuzhiyun txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5196*4882a593Smuzhiyun
5197*4882a593Smuzhiyun txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5198*4882a593Smuzhiyun txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5199*4882a593Smuzhiyun
5200*4882a593Smuzhiyun txr->tx_prod = 0;
5201*4882a593Smuzhiyun txr->tx_prod_bseq = 0;
5202*4882a593Smuzhiyun
5203*4882a593Smuzhiyun txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5204*4882a593Smuzhiyun txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5205*4882a593Smuzhiyun
5206*4882a593Smuzhiyun bnx2_init_tx_context(bp, cid, txr);
5207*4882a593Smuzhiyun }
5208*4882a593Smuzhiyun
5209*4882a593Smuzhiyun static void
bnx2_init_rxbd_rings(struct bnx2_rx_bd * rx_ring[],dma_addr_t dma[],u32 buf_size,int num_rings)5210*4882a593Smuzhiyun bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5211*4882a593Smuzhiyun u32 buf_size, int num_rings)
5212*4882a593Smuzhiyun {
5213*4882a593Smuzhiyun int i;
5214*4882a593Smuzhiyun struct bnx2_rx_bd *rxbd;
5215*4882a593Smuzhiyun
5216*4882a593Smuzhiyun for (i = 0; i < num_rings; i++) {
5217*4882a593Smuzhiyun int j;
5218*4882a593Smuzhiyun
5219*4882a593Smuzhiyun rxbd = &rx_ring[i][0];
5220*4882a593Smuzhiyun for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5221*4882a593Smuzhiyun rxbd->rx_bd_len = buf_size;
5222*4882a593Smuzhiyun rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5223*4882a593Smuzhiyun }
5224*4882a593Smuzhiyun if (i == (num_rings - 1))
5225*4882a593Smuzhiyun j = 0;
5226*4882a593Smuzhiyun else
5227*4882a593Smuzhiyun j = i + 1;
5228*4882a593Smuzhiyun rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5229*4882a593Smuzhiyun rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5230*4882a593Smuzhiyun }
5231*4882a593Smuzhiyun }
5232*4882a593Smuzhiyun
5233*4882a593Smuzhiyun static void
bnx2_init_rx_ring(struct bnx2 * bp,int ring_num)5234*4882a593Smuzhiyun bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5235*4882a593Smuzhiyun {
5236*4882a593Smuzhiyun int i;
5237*4882a593Smuzhiyun u16 prod, ring_prod;
5238*4882a593Smuzhiyun u32 cid, rx_cid_addr, val;
5239*4882a593Smuzhiyun struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5240*4882a593Smuzhiyun struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5241*4882a593Smuzhiyun
5242*4882a593Smuzhiyun if (ring_num == 0)
5243*4882a593Smuzhiyun cid = RX_CID;
5244*4882a593Smuzhiyun else
5245*4882a593Smuzhiyun cid = RX_RSS_CID + ring_num - 1;
5246*4882a593Smuzhiyun
5247*4882a593Smuzhiyun rx_cid_addr = GET_CID_ADDR(cid);
5248*4882a593Smuzhiyun
5249*4882a593Smuzhiyun bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5250*4882a593Smuzhiyun bp->rx_buf_use_size, bp->rx_max_ring);
5251*4882a593Smuzhiyun
5252*4882a593Smuzhiyun bnx2_init_rx_context(bp, cid);
5253*4882a593Smuzhiyun
5254*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5255*4882a593Smuzhiyun val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5256*4882a593Smuzhiyun BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5257*4882a593Smuzhiyun }
5258*4882a593Smuzhiyun
5259*4882a593Smuzhiyun bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5260*4882a593Smuzhiyun if (bp->rx_pg_ring_size) {
5261*4882a593Smuzhiyun bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5262*4882a593Smuzhiyun rxr->rx_pg_desc_mapping,
5263*4882a593Smuzhiyun PAGE_SIZE, bp->rx_max_pg_ring);
5264*4882a593Smuzhiyun val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5265*4882a593Smuzhiyun bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5266*4882a593Smuzhiyun bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5267*4882a593Smuzhiyun BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5268*4882a593Smuzhiyun
5269*4882a593Smuzhiyun val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5270*4882a593Smuzhiyun bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5271*4882a593Smuzhiyun
5272*4882a593Smuzhiyun val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5273*4882a593Smuzhiyun bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5274*4882a593Smuzhiyun
5275*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5276*4882a593Smuzhiyun BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5277*4882a593Smuzhiyun }
5278*4882a593Smuzhiyun
5279*4882a593Smuzhiyun val = (u64) rxr->rx_desc_mapping[0] >> 32;
5280*4882a593Smuzhiyun bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5281*4882a593Smuzhiyun
5282*4882a593Smuzhiyun val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5283*4882a593Smuzhiyun bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5284*4882a593Smuzhiyun
5285*4882a593Smuzhiyun ring_prod = prod = rxr->rx_pg_prod;
5286*4882a593Smuzhiyun for (i = 0; i < bp->rx_pg_ring_size; i++) {
5287*4882a593Smuzhiyun if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5288*4882a593Smuzhiyun netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5289*4882a593Smuzhiyun ring_num, i, bp->rx_pg_ring_size);
5290*4882a593Smuzhiyun break;
5291*4882a593Smuzhiyun }
5292*4882a593Smuzhiyun prod = BNX2_NEXT_RX_BD(prod);
5293*4882a593Smuzhiyun ring_prod = BNX2_RX_PG_RING_IDX(prod);
5294*4882a593Smuzhiyun }
5295*4882a593Smuzhiyun rxr->rx_pg_prod = prod;
5296*4882a593Smuzhiyun
5297*4882a593Smuzhiyun ring_prod = prod = rxr->rx_prod;
5298*4882a593Smuzhiyun for (i = 0; i < bp->rx_ring_size; i++) {
5299*4882a593Smuzhiyun if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5300*4882a593Smuzhiyun netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5301*4882a593Smuzhiyun ring_num, i, bp->rx_ring_size);
5302*4882a593Smuzhiyun break;
5303*4882a593Smuzhiyun }
5304*4882a593Smuzhiyun prod = BNX2_NEXT_RX_BD(prod);
5305*4882a593Smuzhiyun ring_prod = BNX2_RX_RING_IDX(prod);
5306*4882a593Smuzhiyun }
5307*4882a593Smuzhiyun rxr->rx_prod = prod;
5308*4882a593Smuzhiyun
5309*4882a593Smuzhiyun rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5310*4882a593Smuzhiyun rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5311*4882a593Smuzhiyun rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5312*4882a593Smuzhiyun
5313*4882a593Smuzhiyun BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5314*4882a593Smuzhiyun BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5315*4882a593Smuzhiyun
5316*4882a593Smuzhiyun BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5317*4882a593Smuzhiyun }
5318*4882a593Smuzhiyun
5319*4882a593Smuzhiyun static void
bnx2_init_all_rings(struct bnx2 * bp)5320*4882a593Smuzhiyun bnx2_init_all_rings(struct bnx2 *bp)
5321*4882a593Smuzhiyun {
5322*4882a593Smuzhiyun int i;
5323*4882a593Smuzhiyun u32 val;
5324*4882a593Smuzhiyun
5325*4882a593Smuzhiyun bnx2_clear_ring_states(bp);
5326*4882a593Smuzhiyun
5327*4882a593Smuzhiyun BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5328*4882a593Smuzhiyun for (i = 0; i < bp->num_tx_rings; i++)
5329*4882a593Smuzhiyun bnx2_init_tx_ring(bp, i);
5330*4882a593Smuzhiyun
5331*4882a593Smuzhiyun if (bp->num_tx_rings > 1)
5332*4882a593Smuzhiyun BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5333*4882a593Smuzhiyun (TX_TSS_CID << 7));
5334*4882a593Smuzhiyun
5335*4882a593Smuzhiyun BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5336*4882a593Smuzhiyun bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5337*4882a593Smuzhiyun
5338*4882a593Smuzhiyun for (i = 0; i < bp->num_rx_rings; i++)
5339*4882a593Smuzhiyun bnx2_init_rx_ring(bp, i);
5340*4882a593Smuzhiyun
5341*4882a593Smuzhiyun if (bp->num_rx_rings > 1) {
5342*4882a593Smuzhiyun u32 tbl_32 = 0;
5343*4882a593Smuzhiyun
5344*4882a593Smuzhiyun for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5345*4882a593Smuzhiyun int shift = (i % 8) << 2;
5346*4882a593Smuzhiyun
5347*4882a593Smuzhiyun tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5348*4882a593Smuzhiyun if ((i % 8) == 7) {
5349*4882a593Smuzhiyun BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5350*4882a593Smuzhiyun BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5351*4882a593Smuzhiyun BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5352*4882a593Smuzhiyun BNX2_RLUP_RSS_COMMAND_WRITE |
5353*4882a593Smuzhiyun BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5354*4882a593Smuzhiyun tbl_32 = 0;
5355*4882a593Smuzhiyun }
5356*4882a593Smuzhiyun }
5357*4882a593Smuzhiyun
5358*4882a593Smuzhiyun val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5359*4882a593Smuzhiyun BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5360*4882a593Smuzhiyun
5361*4882a593Smuzhiyun BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5362*4882a593Smuzhiyun
5363*4882a593Smuzhiyun }
5364*4882a593Smuzhiyun }
5365*4882a593Smuzhiyun
bnx2_find_max_ring(u32 ring_size,u32 max_size)5366*4882a593Smuzhiyun static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5367*4882a593Smuzhiyun {
5368*4882a593Smuzhiyun u32 max, num_rings = 1;
5369*4882a593Smuzhiyun
5370*4882a593Smuzhiyun while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5371*4882a593Smuzhiyun ring_size -= BNX2_MAX_RX_DESC_CNT;
5372*4882a593Smuzhiyun num_rings++;
5373*4882a593Smuzhiyun }
5374*4882a593Smuzhiyun /* round to next power of 2 */
5375*4882a593Smuzhiyun max = max_size;
5376*4882a593Smuzhiyun while ((max & num_rings) == 0)
5377*4882a593Smuzhiyun max >>= 1;
5378*4882a593Smuzhiyun
5379*4882a593Smuzhiyun if (num_rings != max)
5380*4882a593Smuzhiyun max <<= 1;
5381*4882a593Smuzhiyun
5382*4882a593Smuzhiyun return max;
5383*4882a593Smuzhiyun }
5384*4882a593Smuzhiyun
5385*4882a593Smuzhiyun static void
bnx2_set_rx_ring_size(struct bnx2 * bp,u32 size)5386*4882a593Smuzhiyun bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5387*4882a593Smuzhiyun {
5388*4882a593Smuzhiyun u32 rx_size, rx_space, jumbo_size;
5389*4882a593Smuzhiyun
5390*4882a593Smuzhiyun /* 8 for CRC and VLAN */
5391*4882a593Smuzhiyun rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5392*4882a593Smuzhiyun
5393*4882a593Smuzhiyun rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5394*4882a593Smuzhiyun SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5395*4882a593Smuzhiyun
5396*4882a593Smuzhiyun bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5397*4882a593Smuzhiyun bp->rx_pg_ring_size = 0;
5398*4882a593Smuzhiyun bp->rx_max_pg_ring = 0;
5399*4882a593Smuzhiyun bp->rx_max_pg_ring_idx = 0;
5400*4882a593Smuzhiyun if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5401*4882a593Smuzhiyun int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5402*4882a593Smuzhiyun
5403*4882a593Smuzhiyun jumbo_size = size * pages;
5404*4882a593Smuzhiyun if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5405*4882a593Smuzhiyun jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5406*4882a593Smuzhiyun
5407*4882a593Smuzhiyun bp->rx_pg_ring_size = jumbo_size;
5408*4882a593Smuzhiyun bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5409*4882a593Smuzhiyun BNX2_MAX_RX_PG_RINGS);
5410*4882a593Smuzhiyun bp->rx_max_pg_ring_idx =
5411*4882a593Smuzhiyun (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5412*4882a593Smuzhiyun rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5413*4882a593Smuzhiyun bp->rx_copy_thresh = 0;
5414*4882a593Smuzhiyun }
5415*4882a593Smuzhiyun
5416*4882a593Smuzhiyun bp->rx_buf_use_size = rx_size;
5417*4882a593Smuzhiyun /* hw alignment + build_skb() overhead*/
5418*4882a593Smuzhiyun bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5419*4882a593Smuzhiyun NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5420*4882a593Smuzhiyun bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5421*4882a593Smuzhiyun bp->rx_ring_size = size;
5422*4882a593Smuzhiyun bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5423*4882a593Smuzhiyun bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5424*4882a593Smuzhiyun }
5425*4882a593Smuzhiyun
5426*4882a593Smuzhiyun static void
bnx2_free_tx_skbs(struct bnx2 * bp)5427*4882a593Smuzhiyun bnx2_free_tx_skbs(struct bnx2 *bp)
5428*4882a593Smuzhiyun {
5429*4882a593Smuzhiyun int i;
5430*4882a593Smuzhiyun
5431*4882a593Smuzhiyun for (i = 0; i < bp->num_tx_rings; i++) {
5432*4882a593Smuzhiyun struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5433*4882a593Smuzhiyun struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5434*4882a593Smuzhiyun int j;
5435*4882a593Smuzhiyun
5436*4882a593Smuzhiyun if (!txr->tx_buf_ring)
5437*4882a593Smuzhiyun continue;
5438*4882a593Smuzhiyun
5439*4882a593Smuzhiyun for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5440*4882a593Smuzhiyun struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5441*4882a593Smuzhiyun struct sk_buff *skb = tx_buf->skb;
5442*4882a593Smuzhiyun int k, last;
5443*4882a593Smuzhiyun
5444*4882a593Smuzhiyun if (!skb) {
5445*4882a593Smuzhiyun j = BNX2_NEXT_TX_BD(j);
5446*4882a593Smuzhiyun continue;
5447*4882a593Smuzhiyun }
5448*4882a593Smuzhiyun
5449*4882a593Smuzhiyun dma_unmap_single(&bp->pdev->dev,
5450*4882a593Smuzhiyun dma_unmap_addr(tx_buf, mapping),
5451*4882a593Smuzhiyun skb_headlen(skb),
5452*4882a593Smuzhiyun PCI_DMA_TODEVICE);
5453*4882a593Smuzhiyun
5454*4882a593Smuzhiyun tx_buf->skb = NULL;
5455*4882a593Smuzhiyun
5456*4882a593Smuzhiyun last = tx_buf->nr_frags;
5457*4882a593Smuzhiyun j = BNX2_NEXT_TX_BD(j);
5458*4882a593Smuzhiyun for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5459*4882a593Smuzhiyun tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5460*4882a593Smuzhiyun dma_unmap_page(&bp->pdev->dev,
5461*4882a593Smuzhiyun dma_unmap_addr(tx_buf, mapping),
5462*4882a593Smuzhiyun skb_frag_size(&skb_shinfo(skb)->frags[k]),
5463*4882a593Smuzhiyun PCI_DMA_TODEVICE);
5464*4882a593Smuzhiyun }
5465*4882a593Smuzhiyun dev_kfree_skb(skb);
5466*4882a593Smuzhiyun }
5467*4882a593Smuzhiyun netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5468*4882a593Smuzhiyun }
5469*4882a593Smuzhiyun }
5470*4882a593Smuzhiyun
5471*4882a593Smuzhiyun static void
bnx2_free_rx_skbs(struct bnx2 * bp)5472*4882a593Smuzhiyun bnx2_free_rx_skbs(struct bnx2 *bp)
5473*4882a593Smuzhiyun {
5474*4882a593Smuzhiyun int i;
5475*4882a593Smuzhiyun
5476*4882a593Smuzhiyun for (i = 0; i < bp->num_rx_rings; i++) {
5477*4882a593Smuzhiyun struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5478*4882a593Smuzhiyun struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5479*4882a593Smuzhiyun int j;
5480*4882a593Smuzhiyun
5481*4882a593Smuzhiyun if (!rxr->rx_buf_ring)
5482*4882a593Smuzhiyun return;
5483*4882a593Smuzhiyun
5484*4882a593Smuzhiyun for (j = 0; j < bp->rx_max_ring_idx; j++) {
5485*4882a593Smuzhiyun struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5486*4882a593Smuzhiyun u8 *data = rx_buf->data;
5487*4882a593Smuzhiyun
5488*4882a593Smuzhiyun if (!data)
5489*4882a593Smuzhiyun continue;
5490*4882a593Smuzhiyun
5491*4882a593Smuzhiyun dma_unmap_single(&bp->pdev->dev,
5492*4882a593Smuzhiyun dma_unmap_addr(rx_buf, mapping),
5493*4882a593Smuzhiyun bp->rx_buf_use_size,
5494*4882a593Smuzhiyun PCI_DMA_FROMDEVICE);
5495*4882a593Smuzhiyun
5496*4882a593Smuzhiyun rx_buf->data = NULL;
5497*4882a593Smuzhiyun
5498*4882a593Smuzhiyun kfree(data);
5499*4882a593Smuzhiyun }
5500*4882a593Smuzhiyun for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5501*4882a593Smuzhiyun bnx2_free_rx_page(bp, rxr, j);
5502*4882a593Smuzhiyun }
5503*4882a593Smuzhiyun }
5504*4882a593Smuzhiyun
5505*4882a593Smuzhiyun static void
bnx2_free_skbs(struct bnx2 * bp)5506*4882a593Smuzhiyun bnx2_free_skbs(struct bnx2 *bp)
5507*4882a593Smuzhiyun {
5508*4882a593Smuzhiyun bnx2_free_tx_skbs(bp);
5509*4882a593Smuzhiyun bnx2_free_rx_skbs(bp);
5510*4882a593Smuzhiyun }
5511*4882a593Smuzhiyun
5512*4882a593Smuzhiyun static int
bnx2_reset_nic(struct bnx2 * bp,u32 reset_code)5513*4882a593Smuzhiyun bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5514*4882a593Smuzhiyun {
5515*4882a593Smuzhiyun int rc;
5516*4882a593Smuzhiyun
5517*4882a593Smuzhiyun rc = bnx2_reset_chip(bp, reset_code);
5518*4882a593Smuzhiyun bnx2_free_skbs(bp);
5519*4882a593Smuzhiyun if (rc)
5520*4882a593Smuzhiyun return rc;
5521*4882a593Smuzhiyun
5522*4882a593Smuzhiyun if ((rc = bnx2_init_chip(bp)) != 0)
5523*4882a593Smuzhiyun return rc;
5524*4882a593Smuzhiyun
5525*4882a593Smuzhiyun bnx2_init_all_rings(bp);
5526*4882a593Smuzhiyun return 0;
5527*4882a593Smuzhiyun }
5528*4882a593Smuzhiyun
5529*4882a593Smuzhiyun static int
bnx2_init_nic(struct bnx2 * bp,int reset_phy)5530*4882a593Smuzhiyun bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5531*4882a593Smuzhiyun {
5532*4882a593Smuzhiyun int rc;
5533*4882a593Smuzhiyun
5534*4882a593Smuzhiyun if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5535*4882a593Smuzhiyun return rc;
5536*4882a593Smuzhiyun
5537*4882a593Smuzhiyun spin_lock_bh(&bp->phy_lock);
5538*4882a593Smuzhiyun bnx2_init_phy(bp, reset_phy);
5539*4882a593Smuzhiyun bnx2_set_link(bp);
5540*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5541*4882a593Smuzhiyun bnx2_remote_phy_event(bp);
5542*4882a593Smuzhiyun spin_unlock_bh(&bp->phy_lock);
5543*4882a593Smuzhiyun return 0;
5544*4882a593Smuzhiyun }
5545*4882a593Smuzhiyun
5546*4882a593Smuzhiyun static int
bnx2_shutdown_chip(struct bnx2 * bp)5547*4882a593Smuzhiyun bnx2_shutdown_chip(struct bnx2 *bp)
5548*4882a593Smuzhiyun {
5549*4882a593Smuzhiyun u32 reset_code;
5550*4882a593Smuzhiyun
5551*4882a593Smuzhiyun if (bp->flags & BNX2_FLAG_NO_WOL)
5552*4882a593Smuzhiyun reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5553*4882a593Smuzhiyun else if (bp->wol)
5554*4882a593Smuzhiyun reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5555*4882a593Smuzhiyun else
5556*4882a593Smuzhiyun reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5557*4882a593Smuzhiyun
5558*4882a593Smuzhiyun return bnx2_reset_chip(bp, reset_code);
5559*4882a593Smuzhiyun }
5560*4882a593Smuzhiyun
5561*4882a593Smuzhiyun static int
bnx2_test_registers(struct bnx2 * bp)5562*4882a593Smuzhiyun bnx2_test_registers(struct bnx2 *bp)
5563*4882a593Smuzhiyun {
5564*4882a593Smuzhiyun int ret;
5565*4882a593Smuzhiyun int i, is_5709;
5566*4882a593Smuzhiyun static const struct {
5567*4882a593Smuzhiyun u16 offset;
5568*4882a593Smuzhiyun u16 flags;
5569*4882a593Smuzhiyun #define BNX2_FL_NOT_5709 1
5570*4882a593Smuzhiyun u32 rw_mask;
5571*4882a593Smuzhiyun u32 ro_mask;
5572*4882a593Smuzhiyun } reg_tbl[] = {
5573*4882a593Smuzhiyun { 0x006c, 0, 0x00000000, 0x0000003f },
5574*4882a593Smuzhiyun { 0x0090, 0, 0xffffffff, 0x00000000 },
5575*4882a593Smuzhiyun { 0x0094, 0, 0x00000000, 0x00000000 },
5576*4882a593Smuzhiyun
5577*4882a593Smuzhiyun { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5578*4882a593Smuzhiyun { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5579*4882a593Smuzhiyun { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5580*4882a593Smuzhiyun { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5581*4882a593Smuzhiyun { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5582*4882a593Smuzhiyun { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5583*4882a593Smuzhiyun { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5584*4882a593Smuzhiyun { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5585*4882a593Smuzhiyun { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5586*4882a593Smuzhiyun
5587*4882a593Smuzhiyun { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5588*4882a593Smuzhiyun { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5589*4882a593Smuzhiyun { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5590*4882a593Smuzhiyun { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5591*4882a593Smuzhiyun { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5592*4882a593Smuzhiyun { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5593*4882a593Smuzhiyun
5594*4882a593Smuzhiyun { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5595*4882a593Smuzhiyun { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5596*4882a593Smuzhiyun { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5597*4882a593Smuzhiyun
5598*4882a593Smuzhiyun { 0x1000, 0, 0x00000000, 0x00000001 },
5599*4882a593Smuzhiyun { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5600*4882a593Smuzhiyun
5601*4882a593Smuzhiyun { 0x1408, 0, 0x01c00800, 0x00000000 },
5602*4882a593Smuzhiyun { 0x149c, 0, 0x8000ffff, 0x00000000 },
5603*4882a593Smuzhiyun { 0x14a8, 0, 0x00000000, 0x000001ff },
5604*4882a593Smuzhiyun { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5605*4882a593Smuzhiyun { 0x14b0, 0, 0x00000002, 0x00000001 },
5606*4882a593Smuzhiyun { 0x14b8, 0, 0x00000000, 0x00000000 },
5607*4882a593Smuzhiyun { 0x14c0, 0, 0x00000000, 0x00000009 },
5608*4882a593Smuzhiyun { 0x14c4, 0, 0x00003fff, 0x00000000 },
5609*4882a593Smuzhiyun { 0x14cc, 0, 0x00000000, 0x00000001 },
5610*4882a593Smuzhiyun { 0x14d0, 0, 0xffffffff, 0x00000000 },
5611*4882a593Smuzhiyun
5612*4882a593Smuzhiyun { 0x1800, 0, 0x00000000, 0x00000001 },
5613*4882a593Smuzhiyun { 0x1804, 0, 0x00000000, 0x00000003 },
5614*4882a593Smuzhiyun
5615*4882a593Smuzhiyun { 0x2800, 0, 0x00000000, 0x00000001 },
5616*4882a593Smuzhiyun { 0x2804, 0, 0x00000000, 0x00003f01 },
5617*4882a593Smuzhiyun { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5618*4882a593Smuzhiyun { 0x2810, 0, 0xffff0000, 0x00000000 },
5619*4882a593Smuzhiyun { 0x2814, 0, 0xffff0000, 0x00000000 },
5620*4882a593Smuzhiyun { 0x2818, 0, 0xffff0000, 0x00000000 },
5621*4882a593Smuzhiyun { 0x281c, 0, 0xffff0000, 0x00000000 },
5622*4882a593Smuzhiyun { 0x2834, 0, 0xffffffff, 0x00000000 },
5623*4882a593Smuzhiyun { 0x2840, 0, 0x00000000, 0xffffffff },
5624*4882a593Smuzhiyun { 0x2844, 0, 0x00000000, 0xffffffff },
5625*4882a593Smuzhiyun { 0x2848, 0, 0xffffffff, 0x00000000 },
5626*4882a593Smuzhiyun { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5627*4882a593Smuzhiyun
5628*4882a593Smuzhiyun { 0x2c00, 0, 0x00000000, 0x00000011 },
5629*4882a593Smuzhiyun { 0x2c04, 0, 0x00000000, 0x00030007 },
5630*4882a593Smuzhiyun
5631*4882a593Smuzhiyun { 0x3c00, 0, 0x00000000, 0x00000001 },
5632*4882a593Smuzhiyun { 0x3c04, 0, 0x00000000, 0x00070000 },
5633*4882a593Smuzhiyun { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5634*4882a593Smuzhiyun { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5635*4882a593Smuzhiyun { 0x3c10, 0, 0xffffffff, 0x00000000 },
5636*4882a593Smuzhiyun { 0x3c14, 0, 0x00000000, 0xffffffff },
5637*4882a593Smuzhiyun { 0x3c18, 0, 0x00000000, 0xffffffff },
5638*4882a593Smuzhiyun { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5639*4882a593Smuzhiyun { 0x3c20, 0, 0xffffff00, 0x00000000 },
5640*4882a593Smuzhiyun
5641*4882a593Smuzhiyun { 0x5004, 0, 0x00000000, 0x0000007f },
5642*4882a593Smuzhiyun { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5643*4882a593Smuzhiyun
5644*4882a593Smuzhiyun { 0x5c00, 0, 0x00000000, 0x00000001 },
5645*4882a593Smuzhiyun { 0x5c04, 0, 0x00000000, 0x0003000f },
5646*4882a593Smuzhiyun { 0x5c08, 0, 0x00000003, 0x00000000 },
5647*4882a593Smuzhiyun { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5648*4882a593Smuzhiyun { 0x5c10, 0, 0x00000000, 0xffffffff },
5649*4882a593Smuzhiyun { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5650*4882a593Smuzhiyun { 0x5c84, 0, 0x00000000, 0x0000f333 },
5651*4882a593Smuzhiyun { 0x5c88, 0, 0x00000000, 0x00077373 },
5652*4882a593Smuzhiyun { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5653*4882a593Smuzhiyun
5654*4882a593Smuzhiyun { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5655*4882a593Smuzhiyun { 0x680c, 0, 0xffffffff, 0x00000000 },
5656*4882a593Smuzhiyun { 0x6810, 0, 0xffffffff, 0x00000000 },
5657*4882a593Smuzhiyun { 0x6814, 0, 0xffffffff, 0x00000000 },
5658*4882a593Smuzhiyun { 0x6818, 0, 0xffffffff, 0x00000000 },
5659*4882a593Smuzhiyun { 0x681c, 0, 0xffffffff, 0x00000000 },
5660*4882a593Smuzhiyun { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5661*4882a593Smuzhiyun { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5662*4882a593Smuzhiyun { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5663*4882a593Smuzhiyun { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5664*4882a593Smuzhiyun { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5665*4882a593Smuzhiyun { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5666*4882a593Smuzhiyun { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5667*4882a593Smuzhiyun { 0x683c, 0, 0x0000ffff, 0x00000000 },
5668*4882a593Smuzhiyun { 0x6840, 0, 0x00000ff0, 0x00000000 },
5669*4882a593Smuzhiyun { 0x6844, 0, 0x00ffff00, 0x00000000 },
5670*4882a593Smuzhiyun { 0x684c, 0, 0xffffffff, 0x00000000 },
5671*4882a593Smuzhiyun { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5672*4882a593Smuzhiyun { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5673*4882a593Smuzhiyun { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5674*4882a593Smuzhiyun { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5675*4882a593Smuzhiyun { 0x6908, 0, 0x00000000, 0x0001ff0f },
5676*4882a593Smuzhiyun { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5677*4882a593Smuzhiyun
5678*4882a593Smuzhiyun { 0xffff, 0, 0x00000000, 0x00000000 },
5679*4882a593Smuzhiyun };
5680*4882a593Smuzhiyun
5681*4882a593Smuzhiyun ret = 0;
5682*4882a593Smuzhiyun is_5709 = 0;
5683*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5684*4882a593Smuzhiyun is_5709 = 1;
5685*4882a593Smuzhiyun
5686*4882a593Smuzhiyun for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5687*4882a593Smuzhiyun u32 offset, rw_mask, ro_mask, save_val, val;
5688*4882a593Smuzhiyun u16 flags = reg_tbl[i].flags;
5689*4882a593Smuzhiyun
5690*4882a593Smuzhiyun if (is_5709 && (flags & BNX2_FL_NOT_5709))
5691*4882a593Smuzhiyun continue;
5692*4882a593Smuzhiyun
5693*4882a593Smuzhiyun offset = (u32) reg_tbl[i].offset;
5694*4882a593Smuzhiyun rw_mask = reg_tbl[i].rw_mask;
5695*4882a593Smuzhiyun ro_mask = reg_tbl[i].ro_mask;
5696*4882a593Smuzhiyun
5697*4882a593Smuzhiyun save_val = readl(bp->regview + offset);
5698*4882a593Smuzhiyun
5699*4882a593Smuzhiyun writel(0, bp->regview + offset);
5700*4882a593Smuzhiyun
5701*4882a593Smuzhiyun val = readl(bp->regview + offset);
5702*4882a593Smuzhiyun if ((val & rw_mask) != 0) {
5703*4882a593Smuzhiyun goto reg_test_err;
5704*4882a593Smuzhiyun }
5705*4882a593Smuzhiyun
5706*4882a593Smuzhiyun if ((val & ro_mask) != (save_val & ro_mask)) {
5707*4882a593Smuzhiyun goto reg_test_err;
5708*4882a593Smuzhiyun }
5709*4882a593Smuzhiyun
5710*4882a593Smuzhiyun writel(0xffffffff, bp->regview + offset);
5711*4882a593Smuzhiyun
5712*4882a593Smuzhiyun val = readl(bp->regview + offset);
5713*4882a593Smuzhiyun if ((val & rw_mask) != rw_mask) {
5714*4882a593Smuzhiyun goto reg_test_err;
5715*4882a593Smuzhiyun }
5716*4882a593Smuzhiyun
5717*4882a593Smuzhiyun if ((val & ro_mask) != (save_val & ro_mask)) {
5718*4882a593Smuzhiyun goto reg_test_err;
5719*4882a593Smuzhiyun }
5720*4882a593Smuzhiyun
5721*4882a593Smuzhiyun writel(save_val, bp->regview + offset);
5722*4882a593Smuzhiyun continue;
5723*4882a593Smuzhiyun
5724*4882a593Smuzhiyun reg_test_err:
5725*4882a593Smuzhiyun writel(save_val, bp->regview + offset);
5726*4882a593Smuzhiyun ret = -ENODEV;
5727*4882a593Smuzhiyun break;
5728*4882a593Smuzhiyun }
5729*4882a593Smuzhiyun return ret;
5730*4882a593Smuzhiyun }
5731*4882a593Smuzhiyun
5732*4882a593Smuzhiyun static int
bnx2_do_mem_test(struct bnx2 * bp,u32 start,u32 size)5733*4882a593Smuzhiyun bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5734*4882a593Smuzhiyun {
5735*4882a593Smuzhiyun static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5736*4882a593Smuzhiyun 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5737*4882a593Smuzhiyun int i;
5738*4882a593Smuzhiyun
5739*4882a593Smuzhiyun for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5740*4882a593Smuzhiyun u32 offset;
5741*4882a593Smuzhiyun
5742*4882a593Smuzhiyun for (offset = 0; offset < size; offset += 4) {
5743*4882a593Smuzhiyun
5744*4882a593Smuzhiyun bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5745*4882a593Smuzhiyun
5746*4882a593Smuzhiyun if (bnx2_reg_rd_ind(bp, start + offset) !=
5747*4882a593Smuzhiyun test_pattern[i]) {
5748*4882a593Smuzhiyun return -ENODEV;
5749*4882a593Smuzhiyun }
5750*4882a593Smuzhiyun }
5751*4882a593Smuzhiyun }
5752*4882a593Smuzhiyun return 0;
5753*4882a593Smuzhiyun }
5754*4882a593Smuzhiyun
5755*4882a593Smuzhiyun static int
bnx2_test_memory(struct bnx2 * bp)5756*4882a593Smuzhiyun bnx2_test_memory(struct bnx2 *bp)
5757*4882a593Smuzhiyun {
5758*4882a593Smuzhiyun int ret = 0;
5759*4882a593Smuzhiyun int i;
5760*4882a593Smuzhiyun static struct mem_entry {
5761*4882a593Smuzhiyun u32 offset;
5762*4882a593Smuzhiyun u32 len;
5763*4882a593Smuzhiyun } mem_tbl_5706[] = {
5764*4882a593Smuzhiyun { 0x60000, 0x4000 },
5765*4882a593Smuzhiyun { 0xa0000, 0x3000 },
5766*4882a593Smuzhiyun { 0xe0000, 0x4000 },
5767*4882a593Smuzhiyun { 0x120000, 0x4000 },
5768*4882a593Smuzhiyun { 0x1a0000, 0x4000 },
5769*4882a593Smuzhiyun { 0x160000, 0x4000 },
5770*4882a593Smuzhiyun { 0xffffffff, 0 },
5771*4882a593Smuzhiyun },
5772*4882a593Smuzhiyun mem_tbl_5709[] = {
5773*4882a593Smuzhiyun { 0x60000, 0x4000 },
5774*4882a593Smuzhiyun { 0xa0000, 0x3000 },
5775*4882a593Smuzhiyun { 0xe0000, 0x4000 },
5776*4882a593Smuzhiyun { 0x120000, 0x4000 },
5777*4882a593Smuzhiyun { 0x1a0000, 0x4000 },
5778*4882a593Smuzhiyun { 0xffffffff, 0 },
5779*4882a593Smuzhiyun };
5780*4882a593Smuzhiyun struct mem_entry *mem_tbl;
5781*4882a593Smuzhiyun
5782*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5783*4882a593Smuzhiyun mem_tbl = mem_tbl_5709;
5784*4882a593Smuzhiyun else
5785*4882a593Smuzhiyun mem_tbl = mem_tbl_5706;
5786*4882a593Smuzhiyun
5787*4882a593Smuzhiyun for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5788*4882a593Smuzhiyun if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5789*4882a593Smuzhiyun mem_tbl[i].len)) != 0) {
5790*4882a593Smuzhiyun return ret;
5791*4882a593Smuzhiyun }
5792*4882a593Smuzhiyun }
5793*4882a593Smuzhiyun
5794*4882a593Smuzhiyun return ret;
5795*4882a593Smuzhiyun }
5796*4882a593Smuzhiyun
5797*4882a593Smuzhiyun #define BNX2_MAC_LOOPBACK 0
5798*4882a593Smuzhiyun #define BNX2_PHY_LOOPBACK 1
5799*4882a593Smuzhiyun
5800*4882a593Smuzhiyun static int
bnx2_run_loopback(struct bnx2 * bp,int loopback_mode)5801*4882a593Smuzhiyun bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5802*4882a593Smuzhiyun {
5803*4882a593Smuzhiyun unsigned int pkt_size, num_pkts, i;
5804*4882a593Smuzhiyun struct sk_buff *skb;
5805*4882a593Smuzhiyun u8 *data;
5806*4882a593Smuzhiyun unsigned char *packet;
5807*4882a593Smuzhiyun u16 rx_start_idx, rx_idx;
5808*4882a593Smuzhiyun dma_addr_t map;
5809*4882a593Smuzhiyun struct bnx2_tx_bd *txbd;
5810*4882a593Smuzhiyun struct bnx2_sw_bd *rx_buf;
5811*4882a593Smuzhiyun struct l2_fhdr *rx_hdr;
5812*4882a593Smuzhiyun int ret = -ENODEV;
5813*4882a593Smuzhiyun struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5814*4882a593Smuzhiyun struct bnx2_tx_ring_info *txr;
5815*4882a593Smuzhiyun struct bnx2_rx_ring_info *rxr;
5816*4882a593Smuzhiyun
5817*4882a593Smuzhiyun tx_napi = bnapi;
5818*4882a593Smuzhiyun
5819*4882a593Smuzhiyun txr = &tx_napi->tx_ring;
5820*4882a593Smuzhiyun rxr = &bnapi->rx_ring;
5821*4882a593Smuzhiyun if (loopback_mode == BNX2_MAC_LOOPBACK) {
5822*4882a593Smuzhiyun bp->loopback = MAC_LOOPBACK;
5823*4882a593Smuzhiyun bnx2_set_mac_loopback(bp);
5824*4882a593Smuzhiyun }
5825*4882a593Smuzhiyun else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5826*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5827*4882a593Smuzhiyun return 0;
5828*4882a593Smuzhiyun
5829*4882a593Smuzhiyun bp->loopback = PHY_LOOPBACK;
5830*4882a593Smuzhiyun bnx2_set_phy_loopback(bp);
5831*4882a593Smuzhiyun }
5832*4882a593Smuzhiyun else
5833*4882a593Smuzhiyun return -EINVAL;
5834*4882a593Smuzhiyun
5835*4882a593Smuzhiyun pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5836*4882a593Smuzhiyun skb = netdev_alloc_skb(bp->dev, pkt_size);
5837*4882a593Smuzhiyun if (!skb)
5838*4882a593Smuzhiyun return -ENOMEM;
5839*4882a593Smuzhiyun packet = skb_put(skb, pkt_size);
5840*4882a593Smuzhiyun memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5841*4882a593Smuzhiyun memset(packet + ETH_ALEN, 0x0, 8);
5842*4882a593Smuzhiyun for (i = 14; i < pkt_size; i++)
5843*4882a593Smuzhiyun packet[i] = (unsigned char) (i & 0xff);
5844*4882a593Smuzhiyun
5845*4882a593Smuzhiyun map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5846*4882a593Smuzhiyun PCI_DMA_TODEVICE);
5847*4882a593Smuzhiyun if (dma_mapping_error(&bp->pdev->dev, map)) {
5848*4882a593Smuzhiyun dev_kfree_skb(skb);
5849*4882a593Smuzhiyun return -EIO;
5850*4882a593Smuzhiyun }
5851*4882a593Smuzhiyun
5852*4882a593Smuzhiyun BNX2_WR(bp, BNX2_HC_COMMAND,
5853*4882a593Smuzhiyun bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5854*4882a593Smuzhiyun
5855*4882a593Smuzhiyun BNX2_RD(bp, BNX2_HC_COMMAND);
5856*4882a593Smuzhiyun
5857*4882a593Smuzhiyun udelay(5);
5858*4882a593Smuzhiyun rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5859*4882a593Smuzhiyun
5860*4882a593Smuzhiyun num_pkts = 0;
5861*4882a593Smuzhiyun
5862*4882a593Smuzhiyun txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5863*4882a593Smuzhiyun
5864*4882a593Smuzhiyun txbd->tx_bd_haddr_hi = (u64) map >> 32;
5865*4882a593Smuzhiyun txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5866*4882a593Smuzhiyun txbd->tx_bd_mss_nbytes = pkt_size;
5867*4882a593Smuzhiyun txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5868*4882a593Smuzhiyun
5869*4882a593Smuzhiyun num_pkts++;
5870*4882a593Smuzhiyun txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5871*4882a593Smuzhiyun txr->tx_prod_bseq += pkt_size;
5872*4882a593Smuzhiyun
5873*4882a593Smuzhiyun BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5874*4882a593Smuzhiyun BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5875*4882a593Smuzhiyun
5876*4882a593Smuzhiyun udelay(100);
5877*4882a593Smuzhiyun
5878*4882a593Smuzhiyun BNX2_WR(bp, BNX2_HC_COMMAND,
5879*4882a593Smuzhiyun bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5880*4882a593Smuzhiyun
5881*4882a593Smuzhiyun BNX2_RD(bp, BNX2_HC_COMMAND);
5882*4882a593Smuzhiyun
5883*4882a593Smuzhiyun udelay(5);
5884*4882a593Smuzhiyun
5885*4882a593Smuzhiyun dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5886*4882a593Smuzhiyun dev_kfree_skb(skb);
5887*4882a593Smuzhiyun
5888*4882a593Smuzhiyun if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5889*4882a593Smuzhiyun goto loopback_test_done;
5890*4882a593Smuzhiyun
5891*4882a593Smuzhiyun rx_idx = bnx2_get_hw_rx_cons(bnapi);
5892*4882a593Smuzhiyun if (rx_idx != rx_start_idx + num_pkts) {
5893*4882a593Smuzhiyun goto loopback_test_done;
5894*4882a593Smuzhiyun }
5895*4882a593Smuzhiyun
5896*4882a593Smuzhiyun rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5897*4882a593Smuzhiyun data = rx_buf->data;
5898*4882a593Smuzhiyun
5899*4882a593Smuzhiyun rx_hdr = get_l2_fhdr(data);
5900*4882a593Smuzhiyun data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5901*4882a593Smuzhiyun
5902*4882a593Smuzhiyun dma_sync_single_for_cpu(&bp->pdev->dev,
5903*4882a593Smuzhiyun dma_unmap_addr(rx_buf, mapping),
5904*4882a593Smuzhiyun bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5905*4882a593Smuzhiyun
5906*4882a593Smuzhiyun if (rx_hdr->l2_fhdr_status &
5907*4882a593Smuzhiyun (L2_FHDR_ERRORS_BAD_CRC |
5908*4882a593Smuzhiyun L2_FHDR_ERRORS_PHY_DECODE |
5909*4882a593Smuzhiyun L2_FHDR_ERRORS_ALIGNMENT |
5910*4882a593Smuzhiyun L2_FHDR_ERRORS_TOO_SHORT |
5911*4882a593Smuzhiyun L2_FHDR_ERRORS_GIANT_FRAME)) {
5912*4882a593Smuzhiyun
5913*4882a593Smuzhiyun goto loopback_test_done;
5914*4882a593Smuzhiyun }
5915*4882a593Smuzhiyun
5916*4882a593Smuzhiyun if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5917*4882a593Smuzhiyun goto loopback_test_done;
5918*4882a593Smuzhiyun }
5919*4882a593Smuzhiyun
5920*4882a593Smuzhiyun for (i = 14; i < pkt_size; i++) {
5921*4882a593Smuzhiyun if (*(data + i) != (unsigned char) (i & 0xff)) {
5922*4882a593Smuzhiyun goto loopback_test_done;
5923*4882a593Smuzhiyun }
5924*4882a593Smuzhiyun }
5925*4882a593Smuzhiyun
5926*4882a593Smuzhiyun ret = 0;
5927*4882a593Smuzhiyun
5928*4882a593Smuzhiyun loopback_test_done:
5929*4882a593Smuzhiyun bp->loopback = 0;
5930*4882a593Smuzhiyun return ret;
5931*4882a593Smuzhiyun }
5932*4882a593Smuzhiyun
5933*4882a593Smuzhiyun #define BNX2_MAC_LOOPBACK_FAILED 1
5934*4882a593Smuzhiyun #define BNX2_PHY_LOOPBACK_FAILED 2
5935*4882a593Smuzhiyun #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5936*4882a593Smuzhiyun BNX2_PHY_LOOPBACK_FAILED)
5937*4882a593Smuzhiyun
5938*4882a593Smuzhiyun static int
bnx2_test_loopback(struct bnx2 * bp)5939*4882a593Smuzhiyun bnx2_test_loopback(struct bnx2 *bp)
5940*4882a593Smuzhiyun {
5941*4882a593Smuzhiyun int rc = 0;
5942*4882a593Smuzhiyun
5943*4882a593Smuzhiyun if (!netif_running(bp->dev))
5944*4882a593Smuzhiyun return BNX2_LOOPBACK_FAILED;
5945*4882a593Smuzhiyun
5946*4882a593Smuzhiyun bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5947*4882a593Smuzhiyun spin_lock_bh(&bp->phy_lock);
5948*4882a593Smuzhiyun bnx2_init_phy(bp, 1);
5949*4882a593Smuzhiyun spin_unlock_bh(&bp->phy_lock);
5950*4882a593Smuzhiyun if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5951*4882a593Smuzhiyun rc |= BNX2_MAC_LOOPBACK_FAILED;
5952*4882a593Smuzhiyun if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5953*4882a593Smuzhiyun rc |= BNX2_PHY_LOOPBACK_FAILED;
5954*4882a593Smuzhiyun return rc;
5955*4882a593Smuzhiyun }
5956*4882a593Smuzhiyun
5957*4882a593Smuzhiyun #define NVRAM_SIZE 0x200
5958*4882a593Smuzhiyun #define CRC32_RESIDUAL 0xdebb20e3
5959*4882a593Smuzhiyun
5960*4882a593Smuzhiyun static int
bnx2_test_nvram(struct bnx2 * bp)5961*4882a593Smuzhiyun bnx2_test_nvram(struct bnx2 *bp)
5962*4882a593Smuzhiyun {
5963*4882a593Smuzhiyun __be32 buf[NVRAM_SIZE / 4];
5964*4882a593Smuzhiyun u8 *data = (u8 *) buf;
5965*4882a593Smuzhiyun int rc = 0;
5966*4882a593Smuzhiyun u32 magic, csum;
5967*4882a593Smuzhiyun
5968*4882a593Smuzhiyun if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5969*4882a593Smuzhiyun goto test_nvram_done;
5970*4882a593Smuzhiyun
5971*4882a593Smuzhiyun magic = be32_to_cpu(buf[0]);
5972*4882a593Smuzhiyun if (magic != 0x669955aa) {
5973*4882a593Smuzhiyun rc = -ENODEV;
5974*4882a593Smuzhiyun goto test_nvram_done;
5975*4882a593Smuzhiyun }
5976*4882a593Smuzhiyun
5977*4882a593Smuzhiyun if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5978*4882a593Smuzhiyun goto test_nvram_done;
5979*4882a593Smuzhiyun
5980*4882a593Smuzhiyun csum = ether_crc_le(0x100, data);
5981*4882a593Smuzhiyun if (csum != CRC32_RESIDUAL) {
5982*4882a593Smuzhiyun rc = -ENODEV;
5983*4882a593Smuzhiyun goto test_nvram_done;
5984*4882a593Smuzhiyun }
5985*4882a593Smuzhiyun
5986*4882a593Smuzhiyun csum = ether_crc_le(0x100, data + 0x100);
5987*4882a593Smuzhiyun if (csum != CRC32_RESIDUAL) {
5988*4882a593Smuzhiyun rc = -ENODEV;
5989*4882a593Smuzhiyun }
5990*4882a593Smuzhiyun
5991*4882a593Smuzhiyun test_nvram_done:
5992*4882a593Smuzhiyun return rc;
5993*4882a593Smuzhiyun }
5994*4882a593Smuzhiyun
5995*4882a593Smuzhiyun static int
bnx2_test_link(struct bnx2 * bp)5996*4882a593Smuzhiyun bnx2_test_link(struct bnx2 *bp)
5997*4882a593Smuzhiyun {
5998*4882a593Smuzhiyun u32 bmsr;
5999*4882a593Smuzhiyun
6000*4882a593Smuzhiyun if (!netif_running(bp->dev))
6001*4882a593Smuzhiyun return -ENODEV;
6002*4882a593Smuzhiyun
6003*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6004*4882a593Smuzhiyun if (bp->link_up)
6005*4882a593Smuzhiyun return 0;
6006*4882a593Smuzhiyun return -ENODEV;
6007*4882a593Smuzhiyun }
6008*4882a593Smuzhiyun spin_lock_bh(&bp->phy_lock);
6009*4882a593Smuzhiyun bnx2_enable_bmsr1(bp);
6010*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6011*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6012*4882a593Smuzhiyun bnx2_disable_bmsr1(bp);
6013*4882a593Smuzhiyun spin_unlock_bh(&bp->phy_lock);
6014*4882a593Smuzhiyun
6015*4882a593Smuzhiyun if (bmsr & BMSR_LSTATUS) {
6016*4882a593Smuzhiyun return 0;
6017*4882a593Smuzhiyun }
6018*4882a593Smuzhiyun return -ENODEV;
6019*4882a593Smuzhiyun }
6020*4882a593Smuzhiyun
6021*4882a593Smuzhiyun static int
bnx2_test_intr(struct bnx2 * bp)6022*4882a593Smuzhiyun bnx2_test_intr(struct bnx2 *bp)
6023*4882a593Smuzhiyun {
6024*4882a593Smuzhiyun int i;
6025*4882a593Smuzhiyun u16 status_idx;
6026*4882a593Smuzhiyun
6027*4882a593Smuzhiyun if (!netif_running(bp->dev))
6028*4882a593Smuzhiyun return -ENODEV;
6029*4882a593Smuzhiyun
6030*4882a593Smuzhiyun status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6031*4882a593Smuzhiyun
6032*4882a593Smuzhiyun /* This register is not touched during run-time. */
6033*4882a593Smuzhiyun BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6034*4882a593Smuzhiyun BNX2_RD(bp, BNX2_HC_COMMAND);
6035*4882a593Smuzhiyun
6036*4882a593Smuzhiyun for (i = 0; i < 10; i++) {
6037*4882a593Smuzhiyun if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6038*4882a593Smuzhiyun status_idx) {
6039*4882a593Smuzhiyun
6040*4882a593Smuzhiyun break;
6041*4882a593Smuzhiyun }
6042*4882a593Smuzhiyun
6043*4882a593Smuzhiyun msleep_interruptible(10);
6044*4882a593Smuzhiyun }
6045*4882a593Smuzhiyun if (i < 10)
6046*4882a593Smuzhiyun return 0;
6047*4882a593Smuzhiyun
6048*4882a593Smuzhiyun return -ENODEV;
6049*4882a593Smuzhiyun }
6050*4882a593Smuzhiyun
6051*4882a593Smuzhiyun /* Determining link for parallel detection. */
6052*4882a593Smuzhiyun static int
bnx2_5706_serdes_has_link(struct bnx2 * bp)6053*4882a593Smuzhiyun bnx2_5706_serdes_has_link(struct bnx2 *bp)
6054*4882a593Smuzhiyun {
6055*4882a593Smuzhiyun u32 mode_ctl, an_dbg, exp;
6056*4882a593Smuzhiyun
6057*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6058*4882a593Smuzhiyun return 0;
6059*4882a593Smuzhiyun
6060*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6061*4882a593Smuzhiyun bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6062*4882a593Smuzhiyun
6063*4882a593Smuzhiyun if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6064*4882a593Smuzhiyun return 0;
6065*4882a593Smuzhiyun
6066*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6067*4882a593Smuzhiyun bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6068*4882a593Smuzhiyun bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6069*4882a593Smuzhiyun
6070*4882a593Smuzhiyun if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6071*4882a593Smuzhiyun return 0;
6072*4882a593Smuzhiyun
6073*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6074*4882a593Smuzhiyun bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6075*4882a593Smuzhiyun bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6076*4882a593Smuzhiyun
6077*4882a593Smuzhiyun if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
6078*4882a593Smuzhiyun return 0;
6079*4882a593Smuzhiyun
6080*4882a593Smuzhiyun return 1;
6081*4882a593Smuzhiyun }
6082*4882a593Smuzhiyun
6083*4882a593Smuzhiyun static void
bnx2_5706_serdes_timer(struct bnx2 * bp)6084*4882a593Smuzhiyun bnx2_5706_serdes_timer(struct bnx2 *bp)
6085*4882a593Smuzhiyun {
6086*4882a593Smuzhiyun int check_link = 1;
6087*4882a593Smuzhiyun
6088*4882a593Smuzhiyun spin_lock(&bp->phy_lock);
6089*4882a593Smuzhiyun if (bp->serdes_an_pending) {
6090*4882a593Smuzhiyun bp->serdes_an_pending--;
6091*4882a593Smuzhiyun check_link = 0;
6092*4882a593Smuzhiyun } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6093*4882a593Smuzhiyun u32 bmcr;
6094*4882a593Smuzhiyun
6095*4882a593Smuzhiyun bp->current_interval = BNX2_TIMER_INTERVAL;
6096*4882a593Smuzhiyun
6097*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6098*4882a593Smuzhiyun
6099*4882a593Smuzhiyun if (bmcr & BMCR_ANENABLE) {
6100*4882a593Smuzhiyun if (bnx2_5706_serdes_has_link(bp)) {
6101*4882a593Smuzhiyun bmcr &= ~BMCR_ANENABLE;
6102*4882a593Smuzhiyun bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6103*4882a593Smuzhiyun bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6104*4882a593Smuzhiyun bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6105*4882a593Smuzhiyun }
6106*4882a593Smuzhiyun }
6107*4882a593Smuzhiyun }
6108*4882a593Smuzhiyun else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6109*4882a593Smuzhiyun (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6110*4882a593Smuzhiyun u32 phy2;
6111*4882a593Smuzhiyun
6112*4882a593Smuzhiyun bnx2_write_phy(bp, 0x17, 0x0f01);
6113*4882a593Smuzhiyun bnx2_read_phy(bp, 0x15, &phy2);
6114*4882a593Smuzhiyun if (phy2 & 0x20) {
6115*4882a593Smuzhiyun u32 bmcr;
6116*4882a593Smuzhiyun
6117*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6118*4882a593Smuzhiyun bmcr |= BMCR_ANENABLE;
6119*4882a593Smuzhiyun bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6120*4882a593Smuzhiyun
6121*4882a593Smuzhiyun bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6122*4882a593Smuzhiyun }
6123*4882a593Smuzhiyun } else
6124*4882a593Smuzhiyun bp->current_interval = BNX2_TIMER_INTERVAL;
6125*4882a593Smuzhiyun
6126*4882a593Smuzhiyun if (check_link) {
6127*4882a593Smuzhiyun u32 val;
6128*4882a593Smuzhiyun
6129*4882a593Smuzhiyun bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6130*4882a593Smuzhiyun bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6131*4882a593Smuzhiyun bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6132*4882a593Smuzhiyun
6133*4882a593Smuzhiyun if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6134*4882a593Smuzhiyun if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6135*4882a593Smuzhiyun bnx2_5706s_force_link_dn(bp, 1);
6136*4882a593Smuzhiyun bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6137*4882a593Smuzhiyun } else
6138*4882a593Smuzhiyun bnx2_set_link(bp);
6139*4882a593Smuzhiyun } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6140*4882a593Smuzhiyun bnx2_set_link(bp);
6141*4882a593Smuzhiyun }
6142*4882a593Smuzhiyun spin_unlock(&bp->phy_lock);
6143*4882a593Smuzhiyun }
6144*4882a593Smuzhiyun
6145*4882a593Smuzhiyun static void
bnx2_5708_serdes_timer(struct bnx2 * bp)6146*4882a593Smuzhiyun bnx2_5708_serdes_timer(struct bnx2 *bp)
6147*4882a593Smuzhiyun {
6148*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6149*4882a593Smuzhiyun return;
6150*4882a593Smuzhiyun
6151*4882a593Smuzhiyun if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6152*4882a593Smuzhiyun bp->serdes_an_pending = 0;
6153*4882a593Smuzhiyun return;
6154*4882a593Smuzhiyun }
6155*4882a593Smuzhiyun
6156*4882a593Smuzhiyun spin_lock(&bp->phy_lock);
6157*4882a593Smuzhiyun if (bp->serdes_an_pending)
6158*4882a593Smuzhiyun bp->serdes_an_pending--;
6159*4882a593Smuzhiyun else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6160*4882a593Smuzhiyun u32 bmcr;
6161*4882a593Smuzhiyun
6162*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6163*4882a593Smuzhiyun if (bmcr & BMCR_ANENABLE) {
6164*4882a593Smuzhiyun bnx2_enable_forced_2g5(bp);
6165*4882a593Smuzhiyun bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6166*4882a593Smuzhiyun } else {
6167*4882a593Smuzhiyun bnx2_disable_forced_2g5(bp);
6168*4882a593Smuzhiyun bp->serdes_an_pending = 2;
6169*4882a593Smuzhiyun bp->current_interval = BNX2_TIMER_INTERVAL;
6170*4882a593Smuzhiyun }
6171*4882a593Smuzhiyun
6172*4882a593Smuzhiyun } else
6173*4882a593Smuzhiyun bp->current_interval = BNX2_TIMER_INTERVAL;
6174*4882a593Smuzhiyun
6175*4882a593Smuzhiyun spin_unlock(&bp->phy_lock);
6176*4882a593Smuzhiyun }
6177*4882a593Smuzhiyun
6178*4882a593Smuzhiyun static void
bnx2_timer(struct timer_list * t)6179*4882a593Smuzhiyun bnx2_timer(struct timer_list *t)
6180*4882a593Smuzhiyun {
6181*4882a593Smuzhiyun struct bnx2 *bp = from_timer(bp, t, timer);
6182*4882a593Smuzhiyun
6183*4882a593Smuzhiyun if (!netif_running(bp->dev))
6184*4882a593Smuzhiyun return;
6185*4882a593Smuzhiyun
6186*4882a593Smuzhiyun if (atomic_read(&bp->intr_sem) != 0)
6187*4882a593Smuzhiyun goto bnx2_restart_timer;
6188*4882a593Smuzhiyun
6189*4882a593Smuzhiyun if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6190*4882a593Smuzhiyun BNX2_FLAG_USING_MSI)
6191*4882a593Smuzhiyun bnx2_chk_missed_msi(bp);
6192*4882a593Smuzhiyun
6193*4882a593Smuzhiyun bnx2_send_heart_beat(bp);
6194*4882a593Smuzhiyun
6195*4882a593Smuzhiyun bp->stats_blk->stat_FwRxDrop =
6196*4882a593Smuzhiyun bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6197*4882a593Smuzhiyun
6198*4882a593Smuzhiyun /* workaround occasional corrupted counters */
6199*4882a593Smuzhiyun if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6200*4882a593Smuzhiyun BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6201*4882a593Smuzhiyun BNX2_HC_COMMAND_STATS_NOW);
6202*4882a593Smuzhiyun
6203*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6204*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6205*4882a593Smuzhiyun bnx2_5706_serdes_timer(bp);
6206*4882a593Smuzhiyun else
6207*4882a593Smuzhiyun bnx2_5708_serdes_timer(bp);
6208*4882a593Smuzhiyun }
6209*4882a593Smuzhiyun
6210*4882a593Smuzhiyun bnx2_restart_timer:
6211*4882a593Smuzhiyun mod_timer(&bp->timer, jiffies + bp->current_interval);
6212*4882a593Smuzhiyun }
6213*4882a593Smuzhiyun
6214*4882a593Smuzhiyun static int
bnx2_request_irq(struct bnx2 * bp)6215*4882a593Smuzhiyun bnx2_request_irq(struct bnx2 *bp)
6216*4882a593Smuzhiyun {
6217*4882a593Smuzhiyun unsigned long flags;
6218*4882a593Smuzhiyun struct bnx2_irq *irq;
6219*4882a593Smuzhiyun int rc = 0, i;
6220*4882a593Smuzhiyun
6221*4882a593Smuzhiyun if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6222*4882a593Smuzhiyun flags = 0;
6223*4882a593Smuzhiyun else
6224*4882a593Smuzhiyun flags = IRQF_SHARED;
6225*4882a593Smuzhiyun
6226*4882a593Smuzhiyun for (i = 0; i < bp->irq_nvecs; i++) {
6227*4882a593Smuzhiyun irq = &bp->irq_tbl[i];
6228*4882a593Smuzhiyun rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6229*4882a593Smuzhiyun &bp->bnx2_napi[i]);
6230*4882a593Smuzhiyun if (rc)
6231*4882a593Smuzhiyun break;
6232*4882a593Smuzhiyun irq->requested = 1;
6233*4882a593Smuzhiyun }
6234*4882a593Smuzhiyun return rc;
6235*4882a593Smuzhiyun }
6236*4882a593Smuzhiyun
6237*4882a593Smuzhiyun static void
__bnx2_free_irq(struct bnx2 * bp)6238*4882a593Smuzhiyun __bnx2_free_irq(struct bnx2 *bp)
6239*4882a593Smuzhiyun {
6240*4882a593Smuzhiyun struct bnx2_irq *irq;
6241*4882a593Smuzhiyun int i;
6242*4882a593Smuzhiyun
6243*4882a593Smuzhiyun for (i = 0; i < bp->irq_nvecs; i++) {
6244*4882a593Smuzhiyun irq = &bp->irq_tbl[i];
6245*4882a593Smuzhiyun if (irq->requested)
6246*4882a593Smuzhiyun free_irq(irq->vector, &bp->bnx2_napi[i]);
6247*4882a593Smuzhiyun irq->requested = 0;
6248*4882a593Smuzhiyun }
6249*4882a593Smuzhiyun }
6250*4882a593Smuzhiyun
6251*4882a593Smuzhiyun static void
bnx2_free_irq(struct bnx2 * bp)6252*4882a593Smuzhiyun bnx2_free_irq(struct bnx2 *bp)
6253*4882a593Smuzhiyun {
6254*4882a593Smuzhiyun
6255*4882a593Smuzhiyun __bnx2_free_irq(bp);
6256*4882a593Smuzhiyun if (bp->flags & BNX2_FLAG_USING_MSI)
6257*4882a593Smuzhiyun pci_disable_msi(bp->pdev);
6258*4882a593Smuzhiyun else if (bp->flags & BNX2_FLAG_USING_MSIX)
6259*4882a593Smuzhiyun pci_disable_msix(bp->pdev);
6260*4882a593Smuzhiyun
6261*4882a593Smuzhiyun bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6262*4882a593Smuzhiyun }
6263*4882a593Smuzhiyun
6264*4882a593Smuzhiyun static void
bnx2_enable_msix(struct bnx2 * bp,int msix_vecs)6265*4882a593Smuzhiyun bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6266*4882a593Smuzhiyun {
6267*4882a593Smuzhiyun int i, total_vecs;
6268*4882a593Smuzhiyun struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6269*4882a593Smuzhiyun struct net_device *dev = bp->dev;
6270*4882a593Smuzhiyun const int len = sizeof(bp->irq_tbl[0].name);
6271*4882a593Smuzhiyun
6272*4882a593Smuzhiyun bnx2_setup_msix_tbl(bp);
6273*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6274*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6275*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6276*4882a593Smuzhiyun
6277*4882a593Smuzhiyun /* Need to flush the previous three writes to ensure MSI-X
6278*4882a593Smuzhiyun * is setup properly */
6279*4882a593Smuzhiyun BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6280*4882a593Smuzhiyun
6281*4882a593Smuzhiyun for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6282*4882a593Smuzhiyun msix_ent[i].entry = i;
6283*4882a593Smuzhiyun msix_ent[i].vector = 0;
6284*4882a593Smuzhiyun }
6285*4882a593Smuzhiyun
6286*4882a593Smuzhiyun total_vecs = msix_vecs;
6287*4882a593Smuzhiyun #ifdef BCM_CNIC
6288*4882a593Smuzhiyun total_vecs++;
6289*4882a593Smuzhiyun #endif
6290*4882a593Smuzhiyun total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6291*4882a593Smuzhiyun BNX2_MIN_MSIX_VEC, total_vecs);
6292*4882a593Smuzhiyun if (total_vecs < 0)
6293*4882a593Smuzhiyun return;
6294*4882a593Smuzhiyun
6295*4882a593Smuzhiyun msix_vecs = total_vecs;
6296*4882a593Smuzhiyun #ifdef BCM_CNIC
6297*4882a593Smuzhiyun msix_vecs--;
6298*4882a593Smuzhiyun #endif
6299*4882a593Smuzhiyun bp->irq_nvecs = msix_vecs;
6300*4882a593Smuzhiyun bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6301*4882a593Smuzhiyun for (i = 0; i < total_vecs; i++) {
6302*4882a593Smuzhiyun bp->irq_tbl[i].vector = msix_ent[i].vector;
6303*4882a593Smuzhiyun snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6304*4882a593Smuzhiyun bp->irq_tbl[i].handler = bnx2_msi_1shot;
6305*4882a593Smuzhiyun }
6306*4882a593Smuzhiyun }
6307*4882a593Smuzhiyun
6308*4882a593Smuzhiyun static int
bnx2_setup_int_mode(struct bnx2 * bp,int dis_msi)6309*4882a593Smuzhiyun bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6310*4882a593Smuzhiyun {
6311*4882a593Smuzhiyun int cpus = netif_get_num_default_rss_queues();
6312*4882a593Smuzhiyun int msix_vecs;
6313*4882a593Smuzhiyun
6314*4882a593Smuzhiyun if (!bp->num_req_rx_rings)
6315*4882a593Smuzhiyun msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6316*4882a593Smuzhiyun else if (!bp->num_req_tx_rings)
6317*4882a593Smuzhiyun msix_vecs = max(cpus, bp->num_req_rx_rings);
6318*4882a593Smuzhiyun else
6319*4882a593Smuzhiyun msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6320*4882a593Smuzhiyun
6321*4882a593Smuzhiyun msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6322*4882a593Smuzhiyun
6323*4882a593Smuzhiyun bp->irq_tbl[0].handler = bnx2_interrupt;
6324*4882a593Smuzhiyun strcpy(bp->irq_tbl[0].name, bp->dev->name);
6325*4882a593Smuzhiyun bp->irq_nvecs = 1;
6326*4882a593Smuzhiyun bp->irq_tbl[0].vector = bp->pdev->irq;
6327*4882a593Smuzhiyun
6328*4882a593Smuzhiyun if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6329*4882a593Smuzhiyun bnx2_enable_msix(bp, msix_vecs);
6330*4882a593Smuzhiyun
6331*4882a593Smuzhiyun if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6332*4882a593Smuzhiyun !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6333*4882a593Smuzhiyun if (pci_enable_msi(bp->pdev) == 0) {
6334*4882a593Smuzhiyun bp->flags |= BNX2_FLAG_USING_MSI;
6335*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6336*4882a593Smuzhiyun bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6337*4882a593Smuzhiyun bp->irq_tbl[0].handler = bnx2_msi_1shot;
6338*4882a593Smuzhiyun } else
6339*4882a593Smuzhiyun bp->irq_tbl[0].handler = bnx2_msi;
6340*4882a593Smuzhiyun
6341*4882a593Smuzhiyun bp->irq_tbl[0].vector = bp->pdev->irq;
6342*4882a593Smuzhiyun }
6343*4882a593Smuzhiyun }
6344*4882a593Smuzhiyun
6345*4882a593Smuzhiyun if (!bp->num_req_tx_rings)
6346*4882a593Smuzhiyun bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6347*4882a593Smuzhiyun else
6348*4882a593Smuzhiyun bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6349*4882a593Smuzhiyun
6350*4882a593Smuzhiyun if (!bp->num_req_rx_rings)
6351*4882a593Smuzhiyun bp->num_rx_rings = bp->irq_nvecs;
6352*4882a593Smuzhiyun else
6353*4882a593Smuzhiyun bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6354*4882a593Smuzhiyun
6355*4882a593Smuzhiyun netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6356*4882a593Smuzhiyun
6357*4882a593Smuzhiyun return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6358*4882a593Smuzhiyun }
6359*4882a593Smuzhiyun
6360*4882a593Smuzhiyun /* Called with rtnl_lock */
6361*4882a593Smuzhiyun static int
bnx2_open(struct net_device * dev)6362*4882a593Smuzhiyun bnx2_open(struct net_device *dev)
6363*4882a593Smuzhiyun {
6364*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
6365*4882a593Smuzhiyun int rc;
6366*4882a593Smuzhiyun
6367*4882a593Smuzhiyun rc = bnx2_request_firmware(bp);
6368*4882a593Smuzhiyun if (rc < 0)
6369*4882a593Smuzhiyun goto out;
6370*4882a593Smuzhiyun
6371*4882a593Smuzhiyun netif_carrier_off(dev);
6372*4882a593Smuzhiyun
6373*4882a593Smuzhiyun bnx2_disable_int(bp);
6374*4882a593Smuzhiyun
6375*4882a593Smuzhiyun rc = bnx2_setup_int_mode(bp, disable_msi);
6376*4882a593Smuzhiyun if (rc)
6377*4882a593Smuzhiyun goto open_err;
6378*4882a593Smuzhiyun bnx2_init_napi(bp);
6379*4882a593Smuzhiyun bnx2_napi_enable(bp);
6380*4882a593Smuzhiyun rc = bnx2_alloc_mem(bp);
6381*4882a593Smuzhiyun if (rc)
6382*4882a593Smuzhiyun goto open_err;
6383*4882a593Smuzhiyun
6384*4882a593Smuzhiyun rc = bnx2_request_irq(bp);
6385*4882a593Smuzhiyun if (rc)
6386*4882a593Smuzhiyun goto open_err;
6387*4882a593Smuzhiyun
6388*4882a593Smuzhiyun rc = bnx2_init_nic(bp, 1);
6389*4882a593Smuzhiyun if (rc)
6390*4882a593Smuzhiyun goto open_err;
6391*4882a593Smuzhiyun
6392*4882a593Smuzhiyun mod_timer(&bp->timer, jiffies + bp->current_interval);
6393*4882a593Smuzhiyun
6394*4882a593Smuzhiyun atomic_set(&bp->intr_sem, 0);
6395*4882a593Smuzhiyun
6396*4882a593Smuzhiyun memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6397*4882a593Smuzhiyun
6398*4882a593Smuzhiyun bnx2_enable_int(bp);
6399*4882a593Smuzhiyun
6400*4882a593Smuzhiyun if (bp->flags & BNX2_FLAG_USING_MSI) {
6401*4882a593Smuzhiyun /* Test MSI to make sure it is working
6402*4882a593Smuzhiyun * If MSI test fails, go back to INTx mode
6403*4882a593Smuzhiyun */
6404*4882a593Smuzhiyun if (bnx2_test_intr(bp) != 0) {
6405*4882a593Smuzhiyun netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6406*4882a593Smuzhiyun
6407*4882a593Smuzhiyun bnx2_disable_int(bp);
6408*4882a593Smuzhiyun bnx2_free_irq(bp);
6409*4882a593Smuzhiyun
6410*4882a593Smuzhiyun bnx2_setup_int_mode(bp, 1);
6411*4882a593Smuzhiyun
6412*4882a593Smuzhiyun rc = bnx2_init_nic(bp, 0);
6413*4882a593Smuzhiyun
6414*4882a593Smuzhiyun if (!rc)
6415*4882a593Smuzhiyun rc = bnx2_request_irq(bp);
6416*4882a593Smuzhiyun
6417*4882a593Smuzhiyun if (rc) {
6418*4882a593Smuzhiyun del_timer_sync(&bp->timer);
6419*4882a593Smuzhiyun goto open_err;
6420*4882a593Smuzhiyun }
6421*4882a593Smuzhiyun bnx2_enable_int(bp);
6422*4882a593Smuzhiyun }
6423*4882a593Smuzhiyun }
6424*4882a593Smuzhiyun if (bp->flags & BNX2_FLAG_USING_MSI)
6425*4882a593Smuzhiyun netdev_info(dev, "using MSI\n");
6426*4882a593Smuzhiyun else if (bp->flags & BNX2_FLAG_USING_MSIX)
6427*4882a593Smuzhiyun netdev_info(dev, "using MSIX\n");
6428*4882a593Smuzhiyun
6429*4882a593Smuzhiyun netif_tx_start_all_queues(dev);
6430*4882a593Smuzhiyun out:
6431*4882a593Smuzhiyun return rc;
6432*4882a593Smuzhiyun
6433*4882a593Smuzhiyun open_err:
6434*4882a593Smuzhiyun bnx2_napi_disable(bp);
6435*4882a593Smuzhiyun bnx2_free_skbs(bp);
6436*4882a593Smuzhiyun bnx2_free_irq(bp);
6437*4882a593Smuzhiyun bnx2_free_mem(bp);
6438*4882a593Smuzhiyun bnx2_del_napi(bp);
6439*4882a593Smuzhiyun bnx2_release_firmware(bp);
6440*4882a593Smuzhiyun goto out;
6441*4882a593Smuzhiyun }
6442*4882a593Smuzhiyun
6443*4882a593Smuzhiyun static void
bnx2_reset_task(struct work_struct * work)6444*4882a593Smuzhiyun bnx2_reset_task(struct work_struct *work)
6445*4882a593Smuzhiyun {
6446*4882a593Smuzhiyun struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6447*4882a593Smuzhiyun int rc;
6448*4882a593Smuzhiyun u16 pcicmd;
6449*4882a593Smuzhiyun
6450*4882a593Smuzhiyun rtnl_lock();
6451*4882a593Smuzhiyun if (!netif_running(bp->dev)) {
6452*4882a593Smuzhiyun rtnl_unlock();
6453*4882a593Smuzhiyun return;
6454*4882a593Smuzhiyun }
6455*4882a593Smuzhiyun
6456*4882a593Smuzhiyun bnx2_netif_stop(bp, true);
6457*4882a593Smuzhiyun
6458*4882a593Smuzhiyun pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6459*4882a593Smuzhiyun if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6460*4882a593Smuzhiyun /* in case PCI block has reset */
6461*4882a593Smuzhiyun pci_restore_state(bp->pdev);
6462*4882a593Smuzhiyun pci_save_state(bp->pdev);
6463*4882a593Smuzhiyun }
6464*4882a593Smuzhiyun rc = bnx2_init_nic(bp, 1);
6465*4882a593Smuzhiyun if (rc) {
6466*4882a593Smuzhiyun netdev_err(bp->dev, "failed to reset NIC, closing\n");
6467*4882a593Smuzhiyun bnx2_napi_enable(bp);
6468*4882a593Smuzhiyun dev_close(bp->dev);
6469*4882a593Smuzhiyun rtnl_unlock();
6470*4882a593Smuzhiyun return;
6471*4882a593Smuzhiyun }
6472*4882a593Smuzhiyun
6473*4882a593Smuzhiyun atomic_set(&bp->intr_sem, 1);
6474*4882a593Smuzhiyun bnx2_netif_start(bp, true);
6475*4882a593Smuzhiyun rtnl_unlock();
6476*4882a593Smuzhiyun }
6477*4882a593Smuzhiyun
6478*4882a593Smuzhiyun #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6479*4882a593Smuzhiyun
6480*4882a593Smuzhiyun static void
bnx2_dump_ftq(struct bnx2 * bp)6481*4882a593Smuzhiyun bnx2_dump_ftq(struct bnx2 *bp)
6482*4882a593Smuzhiyun {
6483*4882a593Smuzhiyun int i;
6484*4882a593Smuzhiyun u32 reg, bdidx, cid, valid;
6485*4882a593Smuzhiyun struct net_device *dev = bp->dev;
6486*4882a593Smuzhiyun static const struct ftq_reg {
6487*4882a593Smuzhiyun char *name;
6488*4882a593Smuzhiyun u32 off;
6489*4882a593Smuzhiyun } ftq_arr[] = {
6490*4882a593Smuzhiyun BNX2_FTQ_ENTRY(RV2P_P),
6491*4882a593Smuzhiyun BNX2_FTQ_ENTRY(RV2P_T),
6492*4882a593Smuzhiyun BNX2_FTQ_ENTRY(RV2P_M),
6493*4882a593Smuzhiyun BNX2_FTQ_ENTRY(TBDR_),
6494*4882a593Smuzhiyun BNX2_FTQ_ENTRY(TDMA_),
6495*4882a593Smuzhiyun BNX2_FTQ_ENTRY(TXP_),
6496*4882a593Smuzhiyun BNX2_FTQ_ENTRY(TXP_),
6497*4882a593Smuzhiyun BNX2_FTQ_ENTRY(TPAT_),
6498*4882a593Smuzhiyun BNX2_FTQ_ENTRY(RXP_C),
6499*4882a593Smuzhiyun BNX2_FTQ_ENTRY(RXP_),
6500*4882a593Smuzhiyun BNX2_FTQ_ENTRY(COM_COMXQ_),
6501*4882a593Smuzhiyun BNX2_FTQ_ENTRY(COM_COMTQ_),
6502*4882a593Smuzhiyun BNX2_FTQ_ENTRY(COM_COMQ_),
6503*4882a593Smuzhiyun BNX2_FTQ_ENTRY(CP_CPQ_),
6504*4882a593Smuzhiyun };
6505*4882a593Smuzhiyun
6506*4882a593Smuzhiyun netdev_err(dev, "<--- start FTQ dump --->\n");
6507*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6508*4882a593Smuzhiyun netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6509*4882a593Smuzhiyun bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6510*4882a593Smuzhiyun
6511*4882a593Smuzhiyun netdev_err(dev, "CPU states:\n");
6512*4882a593Smuzhiyun for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6513*4882a593Smuzhiyun netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6514*4882a593Smuzhiyun reg, bnx2_reg_rd_ind(bp, reg),
6515*4882a593Smuzhiyun bnx2_reg_rd_ind(bp, reg + 4),
6516*4882a593Smuzhiyun bnx2_reg_rd_ind(bp, reg + 8),
6517*4882a593Smuzhiyun bnx2_reg_rd_ind(bp, reg + 0x1c),
6518*4882a593Smuzhiyun bnx2_reg_rd_ind(bp, reg + 0x1c),
6519*4882a593Smuzhiyun bnx2_reg_rd_ind(bp, reg + 0x20));
6520*4882a593Smuzhiyun
6521*4882a593Smuzhiyun netdev_err(dev, "<--- end FTQ dump --->\n");
6522*4882a593Smuzhiyun netdev_err(dev, "<--- start TBDC dump --->\n");
6523*4882a593Smuzhiyun netdev_err(dev, "TBDC free cnt: %ld\n",
6524*4882a593Smuzhiyun BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6525*4882a593Smuzhiyun netdev_err(dev, "LINE CID BIDX CMD VALIDS\n");
6526*4882a593Smuzhiyun for (i = 0; i < 0x20; i++) {
6527*4882a593Smuzhiyun int j = 0;
6528*4882a593Smuzhiyun
6529*4882a593Smuzhiyun BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6530*4882a593Smuzhiyun BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6531*4882a593Smuzhiyun BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6532*4882a593Smuzhiyun BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6533*4882a593Smuzhiyun while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6534*4882a593Smuzhiyun BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6535*4882a593Smuzhiyun j++;
6536*4882a593Smuzhiyun
6537*4882a593Smuzhiyun cid = BNX2_RD(bp, BNX2_TBDC_CID);
6538*4882a593Smuzhiyun bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6539*4882a593Smuzhiyun valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6540*4882a593Smuzhiyun netdev_err(dev, "%02x %06x %04lx %02x [%x]\n",
6541*4882a593Smuzhiyun i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6542*4882a593Smuzhiyun bdidx >> 24, (valid >> 8) & 0x0ff);
6543*4882a593Smuzhiyun }
6544*4882a593Smuzhiyun netdev_err(dev, "<--- end TBDC dump --->\n");
6545*4882a593Smuzhiyun }
6546*4882a593Smuzhiyun
6547*4882a593Smuzhiyun static void
bnx2_dump_state(struct bnx2 * bp)6548*4882a593Smuzhiyun bnx2_dump_state(struct bnx2 *bp)
6549*4882a593Smuzhiyun {
6550*4882a593Smuzhiyun struct net_device *dev = bp->dev;
6551*4882a593Smuzhiyun u32 val1, val2;
6552*4882a593Smuzhiyun
6553*4882a593Smuzhiyun pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6554*4882a593Smuzhiyun netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6555*4882a593Smuzhiyun atomic_read(&bp->intr_sem), val1);
6556*4882a593Smuzhiyun pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6557*4882a593Smuzhiyun pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6558*4882a593Smuzhiyun netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6559*4882a593Smuzhiyun netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6560*4882a593Smuzhiyun BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6561*4882a593Smuzhiyun BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6562*4882a593Smuzhiyun netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6563*4882a593Smuzhiyun BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6564*4882a593Smuzhiyun netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6565*4882a593Smuzhiyun BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6566*4882a593Smuzhiyun if (bp->flags & BNX2_FLAG_USING_MSIX)
6567*4882a593Smuzhiyun netdev_err(dev, "DEBUG: PBA[%08x]\n",
6568*4882a593Smuzhiyun BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6569*4882a593Smuzhiyun }
6570*4882a593Smuzhiyun
6571*4882a593Smuzhiyun static void
bnx2_tx_timeout(struct net_device * dev,unsigned int txqueue)6572*4882a593Smuzhiyun bnx2_tx_timeout(struct net_device *dev, unsigned int txqueue)
6573*4882a593Smuzhiyun {
6574*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
6575*4882a593Smuzhiyun
6576*4882a593Smuzhiyun bnx2_dump_ftq(bp);
6577*4882a593Smuzhiyun bnx2_dump_state(bp);
6578*4882a593Smuzhiyun bnx2_dump_mcp_state(bp);
6579*4882a593Smuzhiyun
6580*4882a593Smuzhiyun /* This allows the netif to be shutdown gracefully before resetting */
6581*4882a593Smuzhiyun schedule_work(&bp->reset_task);
6582*4882a593Smuzhiyun }
6583*4882a593Smuzhiyun
6584*4882a593Smuzhiyun /* Called with netif_tx_lock.
6585*4882a593Smuzhiyun * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6586*4882a593Smuzhiyun * netif_wake_queue().
6587*4882a593Smuzhiyun */
6588*4882a593Smuzhiyun static netdev_tx_t
bnx2_start_xmit(struct sk_buff * skb,struct net_device * dev)6589*4882a593Smuzhiyun bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6590*4882a593Smuzhiyun {
6591*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
6592*4882a593Smuzhiyun dma_addr_t mapping;
6593*4882a593Smuzhiyun struct bnx2_tx_bd *txbd;
6594*4882a593Smuzhiyun struct bnx2_sw_tx_bd *tx_buf;
6595*4882a593Smuzhiyun u32 len, vlan_tag_flags, last_frag, mss;
6596*4882a593Smuzhiyun u16 prod, ring_prod;
6597*4882a593Smuzhiyun int i;
6598*4882a593Smuzhiyun struct bnx2_napi *bnapi;
6599*4882a593Smuzhiyun struct bnx2_tx_ring_info *txr;
6600*4882a593Smuzhiyun struct netdev_queue *txq;
6601*4882a593Smuzhiyun
6602*4882a593Smuzhiyun /* Determine which tx ring we will be placed on */
6603*4882a593Smuzhiyun i = skb_get_queue_mapping(skb);
6604*4882a593Smuzhiyun bnapi = &bp->bnx2_napi[i];
6605*4882a593Smuzhiyun txr = &bnapi->tx_ring;
6606*4882a593Smuzhiyun txq = netdev_get_tx_queue(dev, i);
6607*4882a593Smuzhiyun
6608*4882a593Smuzhiyun if (unlikely(bnx2_tx_avail(bp, txr) <
6609*4882a593Smuzhiyun (skb_shinfo(skb)->nr_frags + 1))) {
6610*4882a593Smuzhiyun netif_tx_stop_queue(txq);
6611*4882a593Smuzhiyun netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6612*4882a593Smuzhiyun
6613*4882a593Smuzhiyun return NETDEV_TX_BUSY;
6614*4882a593Smuzhiyun }
6615*4882a593Smuzhiyun len = skb_headlen(skb);
6616*4882a593Smuzhiyun prod = txr->tx_prod;
6617*4882a593Smuzhiyun ring_prod = BNX2_TX_RING_IDX(prod);
6618*4882a593Smuzhiyun
6619*4882a593Smuzhiyun vlan_tag_flags = 0;
6620*4882a593Smuzhiyun if (skb->ip_summed == CHECKSUM_PARTIAL) {
6621*4882a593Smuzhiyun vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6622*4882a593Smuzhiyun }
6623*4882a593Smuzhiyun
6624*4882a593Smuzhiyun if (skb_vlan_tag_present(skb)) {
6625*4882a593Smuzhiyun vlan_tag_flags |=
6626*4882a593Smuzhiyun (TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
6627*4882a593Smuzhiyun }
6628*4882a593Smuzhiyun
6629*4882a593Smuzhiyun if ((mss = skb_shinfo(skb)->gso_size)) {
6630*4882a593Smuzhiyun u32 tcp_opt_len;
6631*4882a593Smuzhiyun struct iphdr *iph;
6632*4882a593Smuzhiyun
6633*4882a593Smuzhiyun vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6634*4882a593Smuzhiyun
6635*4882a593Smuzhiyun tcp_opt_len = tcp_optlen(skb);
6636*4882a593Smuzhiyun
6637*4882a593Smuzhiyun if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6638*4882a593Smuzhiyun u32 tcp_off = skb_transport_offset(skb) -
6639*4882a593Smuzhiyun sizeof(struct ipv6hdr) - ETH_HLEN;
6640*4882a593Smuzhiyun
6641*4882a593Smuzhiyun vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6642*4882a593Smuzhiyun TX_BD_FLAGS_SW_FLAGS;
6643*4882a593Smuzhiyun if (likely(tcp_off == 0))
6644*4882a593Smuzhiyun vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6645*4882a593Smuzhiyun else {
6646*4882a593Smuzhiyun tcp_off >>= 3;
6647*4882a593Smuzhiyun vlan_tag_flags |= ((tcp_off & 0x3) <<
6648*4882a593Smuzhiyun TX_BD_FLAGS_TCP6_OFF0_SHL) |
6649*4882a593Smuzhiyun ((tcp_off & 0x10) <<
6650*4882a593Smuzhiyun TX_BD_FLAGS_TCP6_OFF4_SHL);
6651*4882a593Smuzhiyun mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6652*4882a593Smuzhiyun }
6653*4882a593Smuzhiyun } else {
6654*4882a593Smuzhiyun iph = ip_hdr(skb);
6655*4882a593Smuzhiyun if (tcp_opt_len || (iph->ihl > 5)) {
6656*4882a593Smuzhiyun vlan_tag_flags |= ((iph->ihl - 5) +
6657*4882a593Smuzhiyun (tcp_opt_len >> 2)) << 8;
6658*4882a593Smuzhiyun }
6659*4882a593Smuzhiyun }
6660*4882a593Smuzhiyun } else
6661*4882a593Smuzhiyun mss = 0;
6662*4882a593Smuzhiyun
6663*4882a593Smuzhiyun mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6664*4882a593Smuzhiyun if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6665*4882a593Smuzhiyun dev_kfree_skb_any(skb);
6666*4882a593Smuzhiyun return NETDEV_TX_OK;
6667*4882a593Smuzhiyun }
6668*4882a593Smuzhiyun
6669*4882a593Smuzhiyun tx_buf = &txr->tx_buf_ring[ring_prod];
6670*4882a593Smuzhiyun tx_buf->skb = skb;
6671*4882a593Smuzhiyun dma_unmap_addr_set(tx_buf, mapping, mapping);
6672*4882a593Smuzhiyun
6673*4882a593Smuzhiyun txbd = &txr->tx_desc_ring[ring_prod];
6674*4882a593Smuzhiyun
6675*4882a593Smuzhiyun txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6676*4882a593Smuzhiyun txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6677*4882a593Smuzhiyun txbd->tx_bd_mss_nbytes = len | (mss << 16);
6678*4882a593Smuzhiyun txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6679*4882a593Smuzhiyun
6680*4882a593Smuzhiyun last_frag = skb_shinfo(skb)->nr_frags;
6681*4882a593Smuzhiyun tx_buf->nr_frags = last_frag;
6682*4882a593Smuzhiyun tx_buf->is_gso = skb_is_gso(skb);
6683*4882a593Smuzhiyun
6684*4882a593Smuzhiyun for (i = 0; i < last_frag; i++) {
6685*4882a593Smuzhiyun const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6686*4882a593Smuzhiyun
6687*4882a593Smuzhiyun prod = BNX2_NEXT_TX_BD(prod);
6688*4882a593Smuzhiyun ring_prod = BNX2_TX_RING_IDX(prod);
6689*4882a593Smuzhiyun txbd = &txr->tx_desc_ring[ring_prod];
6690*4882a593Smuzhiyun
6691*4882a593Smuzhiyun len = skb_frag_size(frag);
6692*4882a593Smuzhiyun mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6693*4882a593Smuzhiyun DMA_TO_DEVICE);
6694*4882a593Smuzhiyun if (dma_mapping_error(&bp->pdev->dev, mapping))
6695*4882a593Smuzhiyun goto dma_error;
6696*4882a593Smuzhiyun dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6697*4882a593Smuzhiyun mapping);
6698*4882a593Smuzhiyun
6699*4882a593Smuzhiyun txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6700*4882a593Smuzhiyun txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6701*4882a593Smuzhiyun txbd->tx_bd_mss_nbytes = len | (mss << 16);
6702*4882a593Smuzhiyun txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6703*4882a593Smuzhiyun
6704*4882a593Smuzhiyun }
6705*4882a593Smuzhiyun txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6706*4882a593Smuzhiyun
6707*4882a593Smuzhiyun /* Sync BD data before updating TX mailbox */
6708*4882a593Smuzhiyun wmb();
6709*4882a593Smuzhiyun
6710*4882a593Smuzhiyun netdev_tx_sent_queue(txq, skb->len);
6711*4882a593Smuzhiyun
6712*4882a593Smuzhiyun prod = BNX2_NEXT_TX_BD(prod);
6713*4882a593Smuzhiyun txr->tx_prod_bseq += skb->len;
6714*4882a593Smuzhiyun
6715*4882a593Smuzhiyun BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6716*4882a593Smuzhiyun BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6717*4882a593Smuzhiyun
6718*4882a593Smuzhiyun txr->tx_prod = prod;
6719*4882a593Smuzhiyun
6720*4882a593Smuzhiyun if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6721*4882a593Smuzhiyun netif_tx_stop_queue(txq);
6722*4882a593Smuzhiyun
6723*4882a593Smuzhiyun /* netif_tx_stop_queue() must be done before checking
6724*4882a593Smuzhiyun * tx index in bnx2_tx_avail() below, because in
6725*4882a593Smuzhiyun * bnx2_tx_int(), we update tx index before checking for
6726*4882a593Smuzhiyun * netif_tx_queue_stopped().
6727*4882a593Smuzhiyun */
6728*4882a593Smuzhiyun smp_mb();
6729*4882a593Smuzhiyun if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6730*4882a593Smuzhiyun netif_tx_wake_queue(txq);
6731*4882a593Smuzhiyun }
6732*4882a593Smuzhiyun
6733*4882a593Smuzhiyun return NETDEV_TX_OK;
6734*4882a593Smuzhiyun dma_error:
6735*4882a593Smuzhiyun /* save value of frag that failed */
6736*4882a593Smuzhiyun last_frag = i;
6737*4882a593Smuzhiyun
6738*4882a593Smuzhiyun /* start back at beginning and unmap skb */
6739*4882a593Smuzhiyun prod = txr->tx_prod;
6740*4882a593Smuzhiyun ring_prod = BNX2_TX_RING_IDX(prod);
6741*4882a593Smuzhiyun tx_buf = &txr->tx_buf_ring[ring_prod];
6742*4882a593Smuzhiyun tx_buf->skb = NULL;
6743*4882a593Smuzhiyun dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6744*4882a593Smuzhiyun skb_headlen(skb), PCI_DMA_TODEVICE);
6745*4882a593Smuzhiyun
6746*4882a593Smuzhiyun /* unmap remaining mapped pages */
6747*4882a593Smuzhiyun for (i = 0; i < last_frag; i++) {
6748*4882a593Smuzhiyun prod = BNX2_NEXT_TX_BD(prod);
6749*4882a593Smuzhiyun ring_prod = BNX2_TX_RING_IDX(prod);
6750*4882a593Smuzhiyun tx_buf = &txr->tx_buf_ring[ring_prod];
6751*4882a593Smuzhiyun dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6752*4882a593Smuzhiyun skb_frag_size(&skb_shinfo(skb)->frags[i]),
6753*4882a593Smuzhiyun PCI_DMA_TODEVICE);
6754*4882a593Smuzhiyun }
6755*4882a593Smuzhiyun
6756*4882a593Smuzhiyun dev_kfree_skb_any(skb);
6757*4882a593Smuzhiyun return NETDEV_TX_OK;
6758*4882a593Smuzhiyun }
6759*4882a593Smuzhiyun
6760*4882a593Smuzhiyun /* Called with rtnl_lock */
6761*4882a593Smuzhiyun static int
bnx2_close(struct net_device * dev)6762*4882a593Smuzhiyun bnx2_close(struct net_device *dev)
6763*4882a593Smuzhiyun {
6764*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
6765*4882a593Smuzhiyun
6766*4882a593Smuzhiyun bnx2_disable_int_sync(bp);
6767*4882a593Smuzhiyun bnx2_napi_disable(bp);
6768*4882a593Smuzhiyun netif_tx_disable(dev);
6769*4882a593Smuzhiyun del_timer_sync(&bp->timer);
6770*4882a593Smuzhiyun bnx2_shutdown_chip(bp);
6771*4882a593Smuzhiyun bnx2_free_irq(bp);
6772*4882a593Smuzhiyun bnx2_free_skbs(bp);
6773*4882a593Smuzhiyun bnx2_free_mem(bp);
6774*4882a593Smuzhiyun bnx2_del_napi(bp);
6775*4882a593Smuzhiyun bp->link_up = 0;
6776*4882a593Smuzhiyun netif_carrier_off(bp->dev);
6777*4882a593Smuzhiyun return 0;
6778*4882a593Smuzhiyun }
6779*4882a593Smuzhiyun
6780*4882a593Smuzhiyun static void
bnx2_save_stats(struct bnx2 * bp)6781*4882a593Smuzhiyun bnx2_save_stats(struct bnx2 *bp)
6782*4882a593Smuzhiyun {
6783*4882a593Smuzhiyun u32 *hw_stats = (u32 *) bp->stats_blk;
6784*4882a593Smuzhiyun u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6785*4882a593Smuzhiyun int i;
6786*4882a593Smuzhiyun
6787*4882a593Smuzhiyun /* The 1st 10 counters are 64-bit counters */
6788*4882a593Smuzhiyun for (i = 0; i < 20; i += 2) {
6789*4882a593Smuzhiyun u32 hi;
6790*4882a593Smuzhiyun u64 lo;
6791*4882a593Smuzhiyun
6792*4882a593Smuzhiyun hi = temp_stats[i] + hw_stats[i];
6793*4882a593Smuzhiyun lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6794*4882a593Smuzhiyun if (lo > 0xffffffff)
6795*4882a593Smuzhiyun hi++;
6796*4882a593Smuzhiyun temp_stats[i] = hi;
6797*4882a593Smuzhiyun temp_stats[i + 1] = lo & 0xffffffff;
6798*4882a593Smuzhiyun }
6799*4882a593Smuzhiyun
6800*4882a593Smuzhiyun for ( ; i < sizeof(struct statistics_block) / 4; i++)
6801*4882a593Smuzhiyun temp_stats[i] += hw_stats[i];
6802*4882a593Smuzhiyun }
6803*4882a593Smuzhiyun
6804*4882a593Smuzhiyun #define GET_64BIT_NET_STATS64(ctr) \
6805*4882a593Smuzhiyun (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6806*4882a593Smuzhiyun
6807*4882a593Smuzhiyun #define GET_64BIT_NET_STATS(ctr) \
6808*4882a593Smuzhiyun GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6809*4882a593Smuzhiyun GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6810*4882a593Smuzhiyun
6811*4882a593Smuzhiyun #define GET_32BIT_NET_STATS(ctr) \
6812*4882a593Smuzhiyun (unsigned long) (bp->stats_blk->ctr + \
6813*4882a593Smuzhiyun bp->temp_stats_blk->ctr)
6814*4882a593Smuzhiyun
6815*4882a593Smuzhiyun static void
bnx2_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * net_stats)6816*4882a593Smuzhiyun bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6817*4882a593Smuzhiyun {
6818*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
6819*4882a593Smuzhiyun
6820*4882a593Smuzhiyun if (!bp->stats_blk)
6821*4882a593Smuzhiyun return;
6822*4882a593Smuzhiyun
6823*4882a593Smuzhiyun net_stats->rx_packets =
6824*4882a593Smuzhiyun GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6825*4882a593Smuzhiyun GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6826*4882a593Smuzhiyun GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6827*4882a593Smuzhiyun
6828*4882a593Smuzhiyun net_stats->tx_packets =
6829*4882a593Smuzhiyun GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6830*4882a593Smuzhiyun GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6831*4882a593Smuzhiyun GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6832*4882a593Smuzhiyun
6833*4882a593Smuzhiyun net_stats->rx_bytes =
6834*4882a593Smuzhiyun GET_64BIT_NET_STATS(stat_IfHCInOctets);
6835*4882a593Smuzhiyun
6836*4882a593Smuzhiyun net_stats->tx_bytes =
6837*4882a593Smuzhiyun GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6838*4882a593Smuzhiyun
6839*4882a593Smuzhiyun net_stats->multicast =
6840*4882a593Smuzhiyun GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6841*4882a593Smuzhiyun
6842*4882a593Smuzhiyun net_stats->collisions =
6843*4882a593Smuzhiyun GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6844*4882a593Smuzhiyun
6845*4882a593Smuzhiyun net_stats->rx_length_errors =
6846*4882a593Smuzhiyun GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6847*4882a593Smuzhiyun GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6848*4882a593Smuzhiyun
6849*4882a593Smuzhiyun net_stats->rx_over_errors =
6850*4882a593Smuzhiyun GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6851*4882a593Smuzhiyun GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6852*4882a593Smuzhiyun
6853*4882a593Smuzhiyun net_stats->rx_frame_errors =
6854*4882a593Smuzhiyun GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6855*4882a593Smuzhiyun
6856*4882a593Smuzhiyun net_stats->rx_crc_errors =
6857*4882a593Smuzhiyun GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6858*4882a593Smuzhiyun
6859*4882a593Smuzhiyun net_stats->rx_errors = net_stats->rx_length_errors +
6860*4882a593Smuzhiyun net_stats->rx_over_errors + net_stats->rx_frame_errors +
6861*4882a593Smuzhiyun net_stats->rx_crc_errors;
6862*4882a593Smuzhiyun
6863*4882a593Smuzhiyun net_stats->tx_aborted_errors =
6864*4882a593Smuzhiyun GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6865*4882a593Smuzhiyun GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6866*4882a593Smuzhiyun
6867*4882a593Smuzhiyun if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6868*4882a593Smuzhiyun (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6869*4882a593Smuzhiyun net_stats->tx_carrier_errors = 0;
6870*4882a593Smuzhiyun else {
6871*4882a593Smuzhiyun net_stats->tx_carrier_errors =
6872*4882a593Smuzhiyun GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6873*4882a593Smuzhiyun }
6874*4882a593Smuzhiyun
6875*4882a593Smuzhiyun net_stats->tx_errors =
6876*4882a593Smuzhiyun GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6877*4882a593Smuzhiyun net_stats->tx_aborted_errors +
6878*4882a593Smuzhiyun net_stats->tx_carrier_errors;
6879*4882a593Smuzhiyun
6880*4882a593Smuzhiyun net_stats->rx_missed_errors =
6881*4882a593Smuzhiyun GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6882*4882a593Smuzhiyun GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6883*4882a593Smuzhiyun GET_32BIT_NET_STATS(stat_FwRxDrop);
6884*4882a593Smuzhiyun
6885*4882a593Smuzhiyun }
6886*4882a593Smuzhiyun
6887*4882a593Smuzhiyun /* All ethtool functions called with rtnl_lock */
6888*4882a593Smuzhiyun
6889*4882a593Smuzhiyun static int
bnx2_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)6890*4882a593Smuzhiyun bnx2_get_link_ksettings(struct net_device *dev,
6891*4882a593Smuzhiyun struct ethtool_link_ksettings *cmd)
6892*4882a593Smuzhiyun {
6893*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
6894*4882a593Smuzhiyun int support_serdes = 0, support_copper = 0;
6895*4882a593Smuzhiyun u32 supported, advertising;
6896*4882a593Smuzhiyun
6897*4882a593Smuzhiyun supported = SUPPORTED_Autoneg;
6898*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6899*4882a593Smuzhiyun support_serdes = 1;
6900*4882a593Smuzhiyun support_copper = 1;
6901*4882a593Smuzhiyun } else if (bp->phy_port == PORT_FIBRE)
6902*4882a593Smuzhiyun support_serdes = 1;
6903*4882a593Smuzhiyun else
6904*4882a593Smuzhiyun support_copper = 1;
6905*4882a593Smuzhiyun
6906*4882a593Smuzhiyun if (support_serdes) {
6907*4882a593Smuzhiyun supported |= SUPPORTED_1000baseT_Full |
6908*4882a593Smuzhiyun SUPPORTED_FIBRE;
6909*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6910*4882a593Smuzhiyun supported |= SUPPORTED_2500baseX_Full;
6911*4882a593Smuzhiyun }
6912*4882a593Smuzhiyun if (support_copper) {
6913*4882a593Smuzhiyun supported |= SUPPORTED_10baseT_Half |
6914*4882a593Smuzhiyun SUPPORTED_10baseT_Full |
6915*4882a593Smuzhiyun SUPPORTED_100baseT_Half |
6916*4882a593Smuzhiyun SUPPORTED_100baseT_Full |
6917*4882a593Smuzhiyun SUPPORTED_1000baseT_Full |
6918*4882a593Smuzhiyun SUPPORTED_TP;
6919*4882a593Smuzhiyun }
6920*4882a593Smuzhiyun
6921*4882a593Smuzhiyun spin_lock_bh(&bp->phy_lock);
6922*4882a593Smuzhiyun cmd->base.port = bp->phy_port;
6923*4882a593Smuzhiyun advertising = bp->advertising;
6924*4882a593Smuzhiyun
6925*4882a593Smuzhiyun if (bp->autoneg & AUTONEG_SPEED) {
6926*4882a593Smuzhiyun cmd->base.autoneg = AUTONEG_ENABLE;
6927*4882a593Smuzhiyun } else {
6928*4882a593Smuzhiyun cmd->base.autoneg = AUTONEG_DISABLE;
6929*4882a593Smuzhiyun }
6930*4882a593Smuzhiyun
6931*4882a593Smuzhiyun if (netif_carrier_ok(dev)) {
6932*4882a593Smuzhiyun cmd->base.speed = bp->line_speed;
6933*4882a593Smuzhiyun cmd->base.duplex = bp->duplex;
6934*4882a593Smuzhiyun if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6935*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6936*4882a593Smuzhiyun cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
6937*4882a593Smuzhiyun else
6938*4882a593Smuzhiyun cmd->base.eth_tp_mdix = ETH_TP_MDI;
6939*4882a593Smuzhiyun }
6940*4882a593Smuzhiyun }
6941*4882a593Smuzhiyun else {
6942*4882a593Smuzhiyun cmd->base.speed = SPEED_UNKNOWN;
6943*4882a593Smuzhiyun cmd->base.duplex = DUPLEX_UNKNOWN;
6944*4882a593Smuzhiyun }
6945*4882a593Smuzhiyun spin_unlock_bh(&bp->phy_lock);
6946*4882a593Smuzhiyun
6947*4882a593Smuzhiyun cmd->base.phy_address = bp->phy_addr;
6948*4882a593Smuzhiyun
6949*4882a593Smuzhiyun ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
6950*4882a593Smuzhiyun supported);
6951*4882a593Smuzhiyun ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
6952*4882a593Smuzhiyun advertising);
6953*4882a593Smuzhiyun
6954*4882a593Smuzhiyun return 0;
6955*4882a593Smuzhiyun }
6956*4882a593Smuzhiyun
6957*4882a593Smuzhiyun static int
bnx2_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)6958*4882a593Smuzhiyun bnx2_set_link_ksettings(struct net_device *dev,
6959*4882a593Smuzhiyun const struct ethtool_link_ksettings *cmd)
6960*4882a593Smuzhiyun {
6961*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
6962*4882a593Smuzhiyun u8 autoneg = bp->autoneg;
6963*4882a593Smuzhiyun u8 req_duplex = bp->req_duplex;
6964*4882a593Smuzhiyun u16 req_line_speed = bp->req_line_speed;
6965*4882a593Smuzhiyun u32 advertising = bp->advertising;
6966*4882a593Smuzhiyun int err = -EINVAL;
6967*4882a593Smuzhiyun
6968*4882a593Smuzhiyun spin_lock_bh(&bp->phy_lock);
6969*4882a593Smuzhiyun
6970*4882a593Smuzhiyun if (cmd->base.port != PORT_TP && cmd->base.port != PORT_FIBRE)
6971*4882a593Smuzhiyun goto err_out_unlock;
6972*4882a593Smuzhiyun
6973*4882a593Smuzhiyun if (cmd->base.port != bp->phy_port &&
6974*4882a593Smuzhiyun !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6975*4882a593Smuzhiyun goto err_out_unlock;
6976*4882a593Smuzhiyun
6977*4882a593Smuzhiyun /* If device is down, we can store the settings only if the user
6978*4882a593Smuzhiyun * is setting the currently active port.
6979*4882a593Smuzhiyun */
6980*4882a593Smuzhiyun if (!netif_running(dev) && cmd->base.port != bp->phy_port)
6981*4882a593Smuzhiyun goto err_out_unlock;
6982*4882a593Smuzhiyun
6983*4882a593Smuzhiyun if (cmd->base.autoneg == AUTONEG_ENABLE) {
6984*4882a593Smuzhiyun autoneg |= AUTONEG_SPEED;
6985*4882a593Smuzhiyun
6986*4882a593Smuzhiyun ethtool_convert_link_mode_to_legacy_u32(
6987*4882a593Smuzhiyun &advertising, cmd->link_modes.advertising);
6988*4882a593Smuzhiyun
6989*4882a593Smuzhiyun if (cmd->base.port == PORT_TP) {
6990*4882a593Smuzhiyun advertising &= ETHTOOL_ALL_COPPER_SPEED;
6991*4882a593Smuzhiyun if (!advertising)
6992*4882a593Smuzhiyun advertising = ETHTOOL_ALL_COPPER_SPEED;
6993*4882a593Smuzhiyun } else {
6994*4882a593Smuzhiyun advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6995*4882a593Smuzhiyun if (!advertising)
6996*4882a593Smuzhiyun advertising = ETHTOOL_ALL_FIBRE_SPEED;
6997*4882a593Smuzhiyun }
6998*4882a593Smuzhiyun advertising |= ADVERTISED_Autoneg;
6999*4882a593Smuzhiyun }
7000*4882a593Smuzhiyun else {
7001*4882a593Smuzhiyun u32 speed = cmd->base.speed;
7002*4882a593Smuzhiyun
7003*4882a593Smuzhiyun if (cmd->base.port == PORT_FIBRE) {
7004*4882a593Smuzhiyun if ((speed != SPEED_1000 &&
7005*4882a593Smuzhiyun speed != SPEED_2500) ||
7006*4882a593Smuzhiyun (cmd->base.duplex != DUPLEX_FULL))
7007*4882a593Smuzhiyun goto err_out_unlock;
7008*4882a593Smuzhiyun
7009*4882a593Smuzhiyun if (speed == SPEED_2500 &&
7010*4882a593Smuzhiyun !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7011*4882a593Smuzhiyun goto err_out_unlock;
7012*4882a593Smuzhiyun } else if (speed == SPEED_1000 || speed == SPEED_2500)
7013*4882a593Smuzhiyun goto err_out_unlock;
7014*4882a593Smuzhiyun
7015*4882a593Smuzhiyun autoneg &= ~AUTONEG_SPEED;
7016*4882a593Smuzhiyun req_line_speed = speed;
7017*4882a593Smuzhiyun req_duplex = cmd->base.duplex;
7018*4882a593Smuzhiyun advertising = 0;
7019*4882a593Smuzhiyun }
7020*4882a593Smuzhiyun
7021*4882a593Smuzhiyun bp->autoneg = autoneg;
7022*4882a593Smuzhiyun bp->advertising = advertising;
7023*4882a593Smuzhiyun bp->req_line_speed = req_line_speed;
7024*4882a593Smuzhiyun bp->req_duplex = req_duplex;
7025*4882a593Smuzhiyun
7026*4882a593Smuzhiyun err = 0;
7027*4882a593Smuzhiyun /* If device is down, the new settings will be picked up when it is
7028*4882a593Smuzhiyun * brought up.
7029*4882a593Smuzhiyun */
7030*4882a593Smuzhiyun if (netif_running(dev))
7031*4882a593Smuzhiyun err = bnx2_setup_phy(bp, cmd->base.port);
7032*4882a593Smuzhiyun
7033*4882a593Smuzhiyun err_out_unlock:
7034*4882a593Smuzhiyun spin_unlock_bh(&bp->phy_lock);
7035*4882a593Smuzhiyun
7036*4882a593Smuzhiyun return err;
7037*4882a593Smuzhiyun }
7038*4882a593Smuzhiyun
7039*4882a593Smuzhiyun static void
bnx2_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)7040*4882a593Smuzhiyun bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7041*4882a593Smuzhiyun {
7042*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
7043*4882a593Smuzhiyun
7044*4882a593Smuzhiyun strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7045*4882a593Smuzhiyun strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7046*4882a593Smuzhiyun strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7047*4882a593Smuzhiyun }
7048*4882a593Smuzhiyun
7049*4882a593Smuzhiyun #define BNX2_REGDUMP_LEN (32 * 1024)
7050*4882a593Smuzhiyun
7051*4882a593Smuzhiyun static int
bnx2_get_regs_len(struct net_device * dev)7052*4882a593Smuzhiyun bnx2_get_regs_len(struct net_device *dev)
7053*4882a593Smuzhiyun {
7054*4882a593Smuzhiyun return BNX2_REGDUMP_LEN;
7055*4882a593Smuzhiyun }
7056*4882a593Smuzhiyun
7057*4882a593Smuzhiyun static void
bnx2_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * _p)7058*4882a593Smuzhiyun bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7059*4882a593Smuzhiyun {
7060*4882a593Smuzhiyun u32 *p = _p, i, offset;
7061*4882a593Smuzhiyun u8 *orig_p = _p;
7062*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
7063*4882a593Smuzhiyun static const u32 reg_boundaries[] = {
7064*4882a593Smuzhiyun 0x0000, 0x0098, 0x0400, 0x045c,
7065*4882a593Smuzhiyun 0x0800, 0x0880, 0x0c00, 0x0c10,
7066*4882a593Smuzhiyun 0x0c30, 0x0d08, 0x1000, 0x101c,
7067*4882a593Smuzhiyun 0x1040, 0x1048, 0x1080, 0x10a4,
7068*4882a593Smuzhiyun 0x1400, 0x1490, 0x1498, 0x14f0,
7069*4882a593Smuzhiyun 0x1500, 0x155c, 0x1580, 0x15dc,
7070*4882a593Smuzhiyun 0x1600, 0x1658, 0x1680, 0x16d8,
7071*4882a593Smuzhiyun 0x1800, 0x1820, 0x1840, 0x1854,
7072*4882a593Smuzhiyun 0x1880, 0x1894, 0x1900, 0x1984,
7073*4882a593Smuzhiyun 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7074*4882a593Smuzhiyun 0x1c80, 0x1c94, 0x1d00, 0x1d84,
7075*4882a593Smuzhiyun 0x2000, 0x2030, 0x23c0, 0x2400,
7076*4882a593Smuzhiyun 0x2800, 0x2820, 0x2830, 0x2850,
7077*4882a593Smuzhiyun 0x2b40, 0x2c10, 0x2fc0, 0x3058,
7078*4882a593Smuzhiyun 0x3c00, 0x3c94, 0x4000, 0x4010,
7079*4882a593Smuzhiyun 0x4080, 0x4090, 0x43c0, 0x4458,
7080*4882a593Smuzhiyun 0x4c00, 0x4c18, 0x4c40, 0x4c54,
7081*4882a593Smuzhiyun 0x4fc0, 0x5010, 0x53c0, 0x5444,
7082*4882a593Smuzhiyun 0x5c00, 0x5c18, 0x5c80, 0x5c90,
7083*4882a593Smuzhiyun 0x5fc0, 0x6000, 0x6400, 0x6428,
7084*4882a593Smuzhiyun 0x6800, 0x6848, 0x684c, 0x6860,
7085*4882a593Smuzhiyun 0x6888, 0x6910, 0x8000
7086*4882a593Smuzhiyun };
7087*4882a593Smuzhiyun
7088*4882a593Smuzhiyun regs->version = 0;
7089*4882a593Smuzhiyun
7090*4882a593Smuzhiyun memset(p, 0, BNX2_REGDUMP_LEN);
7091*4882a593Smuzhiyun
7092*4882a593Smuzhiyun if (!netif_running(bp->dev))
7093*4882a593Smuzhiyun return;
7094*4882a593Smuzhiyun
7095*4882a593Smuzhiyun i = 0;
7096*4882a593Smuzhiyun offset = reg_boundaries[0];
7097*4882a593Smuzhiyun p += offset;
7098*4882a593Smuzhiyun while (offset < BNX2_REGDUMP_LEN) {
7099*4882a593Smuzhiyun *p++ = BNX2_RD(bp, offset);
7100*4882a593Smuzhiyun offset += 4;
7101*4882a593Smuzhiyun if (offset == reg_boundaries[i + 1]) {
7102*4882a593Smuzhiyun offset = reg_boundaries[i + 2];
7103*4882a593Smuzhiyun p = (u32 *) (orig_p + offset);
7104*4882a593Smuzhiyun i += 2;
7105*4882a593Smuzhiyun }
7106*4882a593Smuzhiyun }
7107*4882a593Smuzhiyun }
7108*4882a593Smuzhiyun
7109*4882a593Smuzhiyun static void
bnx2_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)7110*4882a593Smuzhiyun bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7111*4882a593Smuzhiyun {
7112*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
7113*4882a593Smuzhiyun
7114*4882a593Smuzhiyun if (bp->flags & BNX2_FLAG_NO_WOL) {
7115*4882a593Smuzhiyun wol->supported = 0;
7116*4882a593Smuzhiyun wol->wolopts = 0;
7117*4882a593Smuzhiyun }
7118*4882a593Smuzhiyun else {
7119*4882a593Smuzhiyun wol->supported = WAKE_MAGIC;
7120*4882a593Smuzhiyun if (bp->wol)
7121*4882a593Smuzhiyun wol->wolopts = WAKE_MAGIC;
7122*4882a593Smuzhiyun else
7123*4882a593Smuzhiyun wol->wolopts = 0;
7124*4882a593Smuzhiyun }
7125*4882a593Smuzhiyun memset(&wol->sopass, 0, sizeof(wol->sopass));
7126*4882a593Smuzhiyun }
7127*4882a593Smuzhiyun
7128*4882a593Smuzhiyun static int
bnx2_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)7129*4882a593Smuzhiyun bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7130*4882a593Smuzhiyun {
7131*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
7132*4882a593Smuzhiyun
7133*4882a593Smuzhiyun if (wol->wolopts & ~WAKE_MAGIC)
7134*4882a593Smuzhiyun return -EINVAL;
7135*4882a593Smuzhiyun
7136*4882a593Smuzhiyun if (wol->wolopts & WAKE_MAGIC) {
7137*4882a593Smuzhiyun if (bp->flags & BNX2_FLAG_NO_WOL)
7138*4882a593Smuzhiyun return -EINVAL;
7139*4882a593Smuzhiyun
7140*4882a593Smuzhiyun bp->wol = 1;
7141*4882a593Smuzhiyun }
7142*4882a593Smuzhiyun else {
7143*4882a593Smuzhiyun bp->wol = 0;
7144*4882a593Smuzhiyun }
7145*4882a593Smuzhiyun
7146*4882a593Smuzhiyun device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7147*4882a593Smuzhiyun
7148*4882a593Smuzhiyun return 0;
7149*4882a593Smuzhiyun }
7150*4882a593Smuzhiyun
7151*4882a593Smuzhiyun static int
bnx2_nway_reset(struct net_device * dev)7152*4882a593Smuzhiyun bnx2_nway_reset(struct net_device *dev)
7153*4882a593Smuzhiyun {
7154*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
7155*4882a593Smuzhiyun u32 bmcr;
7156*4882a593Smuzhiyun
7157*4882a593Smuzhiyun if (!netif_running(dev))
7158*4882a593Smuzhiyun return -EAGAIN;
7159*4882a593Smuzhiyun
7160*4882a593Smuzhiyun if (!(bp->autoneg & AUTONEG_SPEED)) {
7161*4882a593Smuzhiyun return -EINVAL;
7162*4882a593Smuzhiyun }
7163*4882a593Smuzhiyun
7164*4882a593Smuzhiyun spin_lock_bh(&bp->phy_lock);
7165*4882a593Smuzhiyun
7166*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7167*4882a593Smuzhiyun int rc;
7168*4882a593Smuzhiyun
7169*4882a593Smuzhiyun rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7170*4882a593Smuzhiyun spin_unlock_bh(&bp->phy_lock);
7171*4882a593Smuzhiyun return rc;
7172*4882a593Smuzhiyun }
7173*4882a593Smuzhiyun
7174*4882a593Smuzhiyun /* Force a link down visible on the other side */
7175*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7176*4882a593Smuzhiyun bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7177*4882a593Smuzhiyun spin_unlock_bh(&bp->phy_lock);
7178*4882a593Smuzhiyun
7179*4882a593Smuzhiyun msleep(20);
7180*4882a593Smuzhiyun
7181*4882a593Smuzhiyun spin_lock_bh(&bp->phy_lock);
7182*4882a593Smuzhiyun
7183*4882a593Smuzhiyun bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7184*4882a593Smuzhiyun bp->serdes_an_pending = 1;
7185*4882a593Smuzhiyun mod_timer(&bp->timer, jiffies + bp->current_interval);
7186*4882a593Smuzhiyun }
7187*4882a593Smuzhiyun
7188*4882a593Smuzhiyun bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7189*4882a593Smuzhiyun bmcr &= ~BMCR_LOOPBACK;
7190*4882a593Smuzhiyun bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7191*4882a593Smuzhiyun
7192*4882a593Smuzhiyun spin_unlock_bh(&bp->phy_lock);
7193*4882a593Smuzhiyun
7194*4882a593Smuzhiyun return 0;
7195*4882a593Smuzhiyun }
7196*4882a593Smuzhiyun
7197*4882a593Smuzhiyun static u32
bnx2_get_link(struct net_device * dev)7198*4882a593Smuzhiyun bnx2_get_link(struct net_device *dev)
7199*4882a593Smuzhiyun {
7200*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
7201*4882a593Smuzhiyun
7202*4882a593Smuzhiyun return bp->link_up;
7203*4882a593Smuzhiyun }
7204*4882a593Smuzhiyun
7205*4882a593Smuzhiyun static int
bnx2_get_eeprom_len(struct net_device * dev)7206*4882a593Smuzhiyun bnx2_get_eeprom_len(struct net_device *dev)
7207*4882a593Smuzhiyun {
7208*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
7209*4882a593Smuzhiyun
7210*4882a593Smuzhiyun if (!bp->flash_info)
7211*4882a593Smuzhiyun return 0;
7212*4882a593Smuzhiyun
7213*4882a593Smuzhiyun return (int) bp->flash_size;
7214*4882a593Smuzhiyun }
7215*4882a593Smuzhiyun
7216*4882a593Smuzhiyun static int
bnx2_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * eebuf)7217*4882a593Smuzhiyun bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7218*4882a593Smuzhiyun u8 *eebuf)
7219*4882a593Smuzhiyun {
7220*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
7221*4882a593Smuzhiyun int rc;
7222*4882a593Smuzhiyun
7223*4882a593Smuzhiyun /* parameters already validated in ethtool_get_eeprom */
7224*4882a593Smuzhiyun
7225*4882a593Smuzhiyun rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7226*4882a593Smuzhiyun
7227*4882a593Smuzhiyun return rc;
7228*4882a593Smuzhiyun }
7229*4882a593Smuzhiyun
7230*4882a593Smuzhiyun static int
bnx2_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * eebuf)7231*4882a593Smuzhiyun bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7232*4882a593Smuzhiyun u8 *eebuf)
7233*4882a593Smuzhiyun {
7234*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
7235*4882a593Smuzhiyun int rc;
7236*4882a593Smuzhiyun
7237*4882a593Smuzhiyun /* parameters already validated in ethtool_set_eeprom */
7238*4882a593Smuzhiyun
7239*4882a593Smuzhiyun rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7240*4882a593Smuzhiyun
7241*4882a593Smuzhiyun return rc;
7242*4882a593Smuzhiyun }
7243*4882a593Smuzhiyun
7244*4882a593Smuzhiyun static int
bnx2_get_coalesce(struct net_device * dev,struct ethtool_coalesce * coal)7245*4882a593Smuzhiyun bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7246*4882a593Smuzhiyun {
7247*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
7248*4882a593Smuzhiyun
7249*4882a593Smuzhiyun memset(coal, 0, sizeof(struct ethtool_coalesce));
7250*4882a593Smuzhiyun
7251*4882a593Smuzhiyun coal->rx_coalesce_usecs = bp->rx_ticks;
7252*4882a593Smuzhiyun coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7253*4882a593Smuzhiyun coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7254*4882a593Smuzhiyun coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7255*4882a593Smuzhiyun
7256*4882a593Smuzhiyun coal->tx_coalesce_usecs = bp->tx_ticks;
7257*4882a593Smuzhiyun coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7258*4882a593Smuzhiyun coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7259*4882a593Smuzhiyun coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7260*4882a593Smuzhiyun
7261*4882a593Smuzhiyun coal->stats_block_coalesce_usecs = bp->stats_ticks;
7262*4882a593Smuzhiyun
7263*4882a593Smuzhiyun return 0;
7264*4882a593Smuzhiyun }
7265*4882a593Smuzhiyun
7266*4882a593Smuzhiyun static int
bnx2_set_coalesce(struct net_device * dev,struct ethtool_coalesce * coal)7267*4882a593Smuzhiyun bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7268*4882a593Smuzhiyun {
7269*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
7270*4882a593Smuzhiyun
7271*4882a593Smuzhiyun bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7272*4882a593Smuzhiyun if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7273*4882a593Smuzhiyun
7274*4882a593Smuzhiyun bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7275*4882a593Smuzhiyun if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7276*4882a593Smuzhiyun
7277*4882a593Smuzhiyun bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7278*4882a593Smuzhiyun if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7279*4882a593Smuzhiyun
7280*4882a593Smuzhiyun bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7281*4882a593Smuzhiyun if (bp->rx_quick_cons_trip_int > 0xff)
7282*4882a593Smuzhiyun bp->rx_quick_cons_trip_int = 0xff;
7283*4882a593Smuzhiyun
7284*4882a593Smuzhiyun bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7285*4882a593Smuzhiyun if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7286*4882a593Smuzhiyun
7287*4882a593Smuzhiyun bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7288*4882a593Smuzhiyun if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7289*4882a593Smuzhiyun
7290*4882a593Smuzhiyun bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7291*4882a593Smuzhiyun if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7292*4882a593Smuzhiyun
7293*4882a593Smuzhiyun bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7294*4882a593Smuzhiyun if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7295*4882a593Smuzhiyun 0xff;
7296*4882a593Smuzhiyun
7297*4882a593Smuzhiyun bp->stats_ticks = coal->stats_block_coalesce_usecs;
7298*4882a593Smuzhiyun if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7299*4882a593Smuzhiyun if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7300*4882a593Smuzhiyun bp->stats_ticks = USEC_PER_SEC;
7301*4882a593Smuzhiyun }
7302*4882a593Smuzhiyun if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7303*4882a593Smuzhiyun bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7304*4882a593Smuzhiyun bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7305*4882a593Smuzhiyun
7306*4882a593Smuzhiyun if (netif_running(bp->dev)) {
7307*4882a593Smuzhiyun bnx2_netif_stop(bp, true);
7308*4882a593Smuzhiyun bnx2_init_nic(bp, 0);
7309*4882a593Smuzhiyun bnx2_netif_start(bp, true);
7310*4882a593Smuzhiyun }
7311*4882a593Smuzhiyun
7312*4882a593Smuzhiyun return 0;
7313*4882a593Smuzhiyun }
7314*4882a593Smuzhiyun
7315*4882a593Smuzhiyun static void
bnx2_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering)7316*4882a593Smuzhiyun bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7317*4882a593Smuzhiyun {
7318*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
7319*4882a593Smuzhiyun
7320*4882a593Smuzhiyun ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7321*4882a593Smuzhiyun ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7322*4882a593Smuzhiyun
7323*4882a593Smuzhiyun ering->rx_pending = bp->rx_ring_size;
7324*4882a593Smuzhiyun ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7325*4882a593Smuzhiyun
7326*4882a593Smuzhiyun ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7327*4882a593Smuzhiyun ering->tx_pending = bp->tx_ring_size;
7328*4882a593Smuzhiyun }
7329*4882a593Smuzhiyun
7330*4882a593Smuzhiyun static int
bnx2_change_ring_size(struct bnx2 * bp,u32 rx,u32 tx,bool reset_irq)7331*4882a593Smuzhiyun bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7332*4882a593Smuzhiyun {
7333*4882a593Smuzhiyun if (netif_running(bp->dev)) {
7334*4882a593Smuzhiyun /* Reset will erase chipset stats; save them */
7335*4882a593Smuzhiyun bnx2_save_stats(bp);
7336*4882a593Smuzhiyun
7337*4882a593Smuzhiyun bnx2_netif_stop(bp, true);
7338*4882a593Smuzhiyun bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7339*4882a593Smuzhiyun if (reset_irq) {
7340*4882a593Smuzhiyun bnx2_free_irq(bp);
7341*4882a593Smuzhiyun bnx2_del_napi(bp);
7342*4882a593Smuzhiyun } else {
7343*4882a593Smuzhiyun __bnx2_free_irq(bp);
7344*4882a593Smuzhiyun }
7345*4882a593Smuzhiyun bnx2_free_skbs(bp);
7346*4882a593Smuzhiyun bnx2_free_mem(bp);
7347*4882a593Smuzhiyun }
7348*4882a593Smuzhiyun
7349*4882a593Smuzhiyun bnx2_set_rx_ring_size(bp, rx);
7350*4882a593Smuzhiyun bp->tx_ring_size = tx;
7351*4882a593Smuzhiyun
7352*4882a593Smuzhiyun if (netif_running(bp->dev)) {
7353*4882a593Smuzhiyun int rc = 0;
7354*4882a593Smuzhiyun
7355*4882a593Smuzhiyun if (reset_irq) {
7356*4882a593Smuzhiyun rc = bnx2_setup_int_mode(bp, disable_msi);
7357*4882a593Smuzhiyun bnx2_init_napi(bp);
7358*4882a593Smuzhiyun }
7359*4882a593Smuzhiyun
7360*4882a593Smuzhiyun if (!rc)
7361*4882a593Smuzhiyun rc = bnx2_alloc_mem(bp);
7362*4882a593Smuzhiyun
7363*4882a593Smuzhiyun if (!rc)
7364*4882a593Smuzhiyun rc = bnx2_request_irq(bp);
7365*4882a593Smuzhiyun
7366*4882a593Smuzhiyun if (!rc)
7367*4882a593Smuzhiyun rc = bnx2_init_nic(bp, 0);
7368*4882a593Smuzhiyun
7369*4882a593Smuzhiyun if (rc) {
7370*4882a593Smuzhiyun bnx2_napi_enable(bp);
7371*4882a593Smuzhiyun dev_close(bp->dev);
7372*4882a593Smuzhiyun return rc;
7373*4882a593Smuzhiyun }
7374*4882a593Smuzhiyun #ifdef BCM_CNIC
7375*4882a593Smuzhiyun mutex_lock(&bp->cnic_lock);
7376*4882a593Smuzhiyun /* Let cnic know about the new status block. */
7377*4882a593Smuzhiyun if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7378*4882a593Smuzhiyun bnx2_setup_cnic_irq_info(bp);
7379*4882a593Smuzhiyun mutex_unlock(&bp->cnic_lock);
7380*4882a593Smuzhiyun #endif
7381*4882a593Smuzhiyun bnx2_netif_start(bp, true);
7382*4882a593Smuzhiyun }
7383*4882a593Smuzhiyun return 0;
7384*4882a593Smuzhiyun }
7385*4882a593Smuzhiyun
7386*4882a593Smuzhiyun static int
bnx2_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering)7387*4882a593Smuzhiyun bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7388*4882a593Smuzhiyun {
7389*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
7390*4882a593Smuzhiyun int rc;
7391*4882a593Smuzhiyun
7392*4882a593Smuzhiyun if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7393*4882a593Smuzhiyun (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7394*4882a593Smuzhiyun (ering->tx_pending <= MAX_SKB_FRAGS)) {
7395*4882a593Smuzhiyun
7396*4882a593Smuzhiyun return -EINVAL;
7397*4882a593Smuzhiyun }
7398*4882a593Smuzhiyun rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7399*4882a593Smuzhiyun false);
7400*4882a593Smuzhiyun return rc;
7401*4882a593Smuzhiyun }
7402*4882a593Smuzhiyun
7403*4882a593Smuzhiyun static void
bnx2_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)7404*4882a593Smuzhiyun bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7405*4882a593Smuzhiyun {
7406*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
7407*4882a593Smuzhiyun
7408*4882a593Smuzhiyun epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7409*4882a593Smuzhiyun epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7410*4882a593Smuzhiyun epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7411*4882a593Smuzhiyun }
7412*4882a593Smuzhiyun
7413*4882a593Smuzhiyun static int
bnx2_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)7414*4882a593Smuzhiyun bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7415*4882a593Smuzhiyun {
7416*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
7417*4882a593Smuzhiyun
7418*4882a593Smuzhiyun bp->req_flow_ctrl = 0;
7419*4882a593Smuzhiyun if (epause->rx_pause)
7420*4882a593Smuzhiyun bp->req_flow_ctrl |= FLOW_CTRL_RX;
7421*4882a593Smuzhiyun if (epause->tx_pause)
7422*4882a593Smuzhiyun bp->req_flow_ctrl |= FLOW_CTRL_TX;
7423*4882a593Smuzhiyun
7424*4882a593Smuzhiyun if (epause->autoneg) {
7425*4882a593Smuzhiyun bp->autoneg |= AUTONEG_FLOW_CTRL;
7426*4882a593Smuzhiyun }
7427*4882a593Smuzhiyun else {
7428*4882a593Smuzhiyun bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7429*4882a593Smuzhiyun }
7430*4882a593Smuzhiyun
7431*4882a593Smuzhiyun if (netif_running(dev)) {
7432*4882a593Smuzhiyun spin_lock_bh(&bp->phy_lock);
7433*4882a593Smuzhiyun bnx2_setup_phy(bp, bp->phy_port);
7434*4882a593Smuzhiyun spin_unlock_bh(&bp->phy_lock);
7435*4882a593Smuzhiyun }
7436*4882a593Smuzhiyun
7437*4882a593Smuzhiyun return 0;
7438*4882a593Smuzhiyun }
7439*4882a593Smuzhiyun
7440*4882a593Smuzhiyun static struct {
7441*4882a593Smuzhiyun char string[ETH_GSTRING_LEN];
7442*4882a593Smuzhiyun } bnx2_stats_str_arr[] = {
7443*4882a593Smuzhiyun { "rx_bytes" },
7444*4882a593Smuzhiyun { "rx_error_bytes" },
7445*4882a593Smuzhiyun { "tx_bytes" },
7446*4882a593Smuzhiyun { "tx_error_bytes" },
7447*4882a593Smuzhiyun { "rx_ucast_packets" },
7448*4882a593Smuzhiyun { "rx_mcast_packets" },
7449*4882a593Smuzhiyun { "rx_bcast_packets" },
7450*4882a593Smuzhiyun { "tx_ucast_packets" },
7451*4882a593Smuzhiyun { "tx_mcast_packets" },
7452*4882a593Smuzhiyun { "tx_bcast_packets" },
7453*4882a593Smuzhiyun { "tx_mac_errors" },
7454*4882a593Smuzhiyun { "tx_carrier_errors" },
7455*4882a593Smuzhiyun { "rx_crc_errors" },
7456*4882a593Smuzhiyun { "rx_align_errors" },
7457*4882a593Smuzhiyun { "tx_single_collisions" },
7458*4882a593Smuzhiyun { "tx_multi_collisions" },
7459*4882a593Smuzhiyun { "tx_deferred" },
7460*4882a593Smuzhiyun { "tx_excess_collisions" },
7461*4882a593Smuzhiyun { "tx_late_collisions" },
7462*4882a593Smuzhiyun { "tx_total_collisions" },
7463*4882a593Smuzhiyun { "rx_fragments" },
7464*4882a593Smuzhiyun { "rx_jabbers" },
7465*4882a593Smuzhiyun { "rx_undersize_packets" },
7466*4882a593Smuzhiyun { "rx_oversize_packets" },
7467*4882a593Smuzhiyun { "rx_64_byte_packets" },
7468*4882a593Smuzhiyun { "rx_65_to_127_byte_packets" },
7469*4882a593Smuzhiyun { "rx_128_to_255_byte_packets" },
7470*4882a593Smuzhiyun { "rx_256_to_511_byte_packets" },
7471*4882a593Smuzhiyun { "rx_512_to_1023_byte_packets" },
7472*4882a593Smuzhiyun { "rx_1024_to_1522_byte_packets" },
7473*4882a593Smuzhiyun { "rx_1523_to_9022_byte_packets" },
7474*4882a593Smuzhiyun { "tx_64_byte_packets" },
7475*4882a593Smuzhiyun { "tx_65_to_127_byte_packets" },
7476*4882a593Smuzhiyun { "tx_128_to_255_byte_packets" },
7477*4882a593Smuzhiyun { "tx_256_to_511_byte_packets" },
7478*4882a593Smuzhiyun { "tx_512_to_1023_byte_packets" },
7479*4882a593Smuzhiyun { "tx_1024_to_1522_byte_packets" },
7480*4882a593Smuzhiyun { "tx_1523_to_9022_byte_packets" },
7481*4882a593Smuzhiyun { "rx_xon_frames" },
7482*4882a593Smuzhiyun { "rx_xoff_frames" },
7483*4882a593Smuzhiyun { "tx_xon_frames" },
7484*4882a593Smuzhiyun { "tx_xoff_frames" },
7485*4882a593Smuzhiyun { "rx_mac_ctrl_frames" },
7486*4882a593Smuzhiyun { "rx_filtered_packets" },
7487*4882a593Smuzhiyun { "rx_ftq_discards" },
7488*4882a593Smuzhiyun { "rx_discards" },
7489*4882a593Smuzhiyun { "rx_fw_discards" },
7490*4882a593Smuzhiyun };
7491*4882a593Smuzhiyun
7492*4882a593Smuzhiyun #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7493*4882a593Smuzhiyun
7494*4882a593Smuzhiyun #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7495*4882a593Smuzhiyun
7496*4882a593Smuzhiyun static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7497*4882a593Smuzhiyun STATS_OFFSET32(stat_IfHCInOctets_hi),
7498*4882a593Smuzhiyun STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7499*4882a593Smuzhiyun STATS_OFFSET32(stat_IfHCOutOctets_hi),
7500*4882a593Smuzhiyun STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7501*4882a593Smuzhiyun STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7502*4882a593Smuzhiyun STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7503*4882a593Smuzhiyun STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7504*4882a593Smuzhiyun STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7505*4882a593Smuzhiyun STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7506*4882a593Smuzhiyun STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7507*4882a593Smuzhiyun STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7508*4882a593Smuzhiyun STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7509*4882a593Smuzhiyun STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7510*4882a593Smuzhiyun STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7511*4882a593Smuzhiyun STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7512*4882a593Smuzhiyun STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7513*4882a593Smuzhiyun STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7514*4882a593Smuzhiyun STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7515*4882a593Smuzhiyun STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7516*4882a593Smuzhiyun STATS_OFFSET32(stat_EtherStatsCollisions),
7517*4882a593Smuzhiyun STATS_OFFSET32(stat_EtherStatsFragments),
7518*4882a593Smuzhiyun STATS_OFFSET32(stat_EtherStatsJabbers),
7519*4882a593Smuzhiyun STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7520*4882a593Smuzhiyun STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7521*4882a593Smuzhiyun STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7522*4882a593Smuzhiyun STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7523*4882a593Smuzhiyun STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7524*4882a593Smuzhiyun STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7525*4882a593Smuzhiyun STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7526*4882a593Smuzhiyun STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7527*4882a593Smuzhiyun STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7528*4882a593Smuzhiyun STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7529*4882a593Smuzhiyun STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7530*4882a593Smuzhiyun STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7531*4882a593Smuzhiyun STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7532*4882a593Smuzhiyun STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7533*4882a593Smuzhiyun STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7534*4882a593Smuzhiyun STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7535*4882a593Smuzhiyun STATS_OFFSET32(stat_XonPauseFramesReceived),
7536*4882a593Smuzhiyun STATS_OFFSET32(stat_XoffPauseFramesReceived),
7537*4882a593Smuzhiyun STATS_OFFSET32(stat_OutXonSent),
7538*4882a593Smuzhiyun STATS_OFFSET32(stat_OutXoffSent),
7539*4882a593Smuzhiyun STATS_OFFSET32(stat_MacControlFramesReceived),
7540*4882a593Smuzhiyun STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7541*4882a593Smuzhiyun STATS_OFFSET32(stat_IfInFTQDiscards),
7542*4882a593Smuzhiyun STATS_OFFSET32(stat_IfInMBUFDiscards),
7543*4882a593Smuzhiyun STATS_OFFSET32(stat_FwRxDrop),
7544*4882a593Smuzhiyun };
7545*4882a593Smuzhiyun
7546*4882a593Smuzhiyun /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7547*4882a593Smuzhiyun * skipped because of errata.
7548*4882a593Smuzhiyun */
7549*4882a593Smuzhiyun static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7550*4882a593Smuzhiyun 8,0,8,8,8,8,8,8,8,8,
7551*4882a593Smuzhiyun 4,0,4,4,4,4,4,4,4,4,
7552*4882a593Smuzhiyun 4,4,4,4,4,4,4,4,4,4,
7553*4882a593Smuzhiyun 4,4,4,4,4,4,4,4,4,4,
7554*4882a593Smuzhiyun 4,4,4,4,4,4,4,
7555*4882a593Smuzhiyun };
7556*4882a593Smuzhiyun
7557*4882a593Smuzhiyun static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7558*4882a593Smuzhiyun 8,0,8,8,8,8,8,8,8,8,
7559*4882a593Smuzhiyun 4,4,4,4,4,4,4,4,4,4,
7560*4882a593Smuzhiyun 4,4,4,4,4,4,4,4,4,4,
7561*4882a593Smuzhiyun 4,4,4,4,4,4,4,4,4,4,
7562*4882a593Smuzhiyun 4,4,4,4,4,4,4,
7563*4882a593Smuzhiyun };
7564*4882a593Smuzhiyun
7565*4882a593Smuzhiyun #define BNX2_NUM_TESTS 6
7566*4882a593Smuzhiyun
7567*4882a593Smuzhiyun static struct {
7568*4882a593Smuzhiyun char string[ETH_GSTRING_LEN];
7569*4882a593Smuzhiyun } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7570*4882a593Smuzhiyun { "register_test (offline)" },
7571*4882a593Smuzhiyun { "memory_test (offline)" },
7572*4882a593Smuzhiyun { "loopback_test (offline)" },
7573*4882a593Smuzhiyun { "nvram_test (online)" },
7574*4882a593Smuzhiyun { "interrupt_test (online)" },
7575*4882a593Smuzhiyun { "link_test (online)" },
7576*4882a593Smuzhiyun };
7577*4882a593Smuzhiyun
7578*4882a593Smuzhiyun static int
bnx2_get_sset_count(struct net_device * dev,int sset)7579*4882a593Smuzhiyun bnx2_get_sset_count(struct net_device *dev, int sset)
7580*4882a593Smuzhiyun {
7581*4882a593Smuzhiyun switch (sset) {
7582*4882a593Smuzhiyun case ETH_SS_TEST:
7583*4882a593Smuzhiyun return BNX2_NUM_TESTS;
7584*4882a593Smuzhiyun case ETH_SS_STATS:
7585*4882a593Smuzhiyun return BNX2_NUM_STATS;
7586*4882a593Smuzhiyun default:
7587*4882a593Smuzhiyun return -EOPNOTSUPP;
7588*4882a593Smuzhiyun }
7589*4882a593Smuzhiyun }
7590*4882a593Smuzhiyun
7591*4882a593Smuzhiyun static void
bnx2_self_test(struct net_device * dev,struct ethtool_test * etest,u64 * buf)7592*4882a593Smuzhiyun bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7593*4882a593Smuzhiyun {
7594*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
7595*4882a593Smuzhiyun
7596*4882a593Smuzhiyun memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7597*4882a593Smuzhiyun if (etest->flags & ETH_TEST_FL_OFFLINE) {
7598*4882a593Smuzhiyun int i;
7599*4882a593Smuzhiyun
7600*4882a593Smuzhiyun bnx2_netif_stop(bp, true);
7601*4882a593Smuzhiyun bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7602*4882a593Smuzhiyun bnx2_free_skbs(bp);
7603*4882a593Smuzhiyun
7604*4882a593Smuzhiyun if (bnx2_test_registers(bp) != 0) {
7605*4882a593Smuzhiyun buf[0] = 1;
7606*4882a593Smuzhiyun etest->flags |= ETH_TEST_FL_FAILED;
7607*4882a593Smuzhiyun }
7608*4882a593Smuzhiyun if (bnx2_test_memory(bp) != 0) {
7609*4882a593Smuzhiyun buf[1] = 1;
7610*4882a593Smuzhiyun etest->flags |= ETH_TEST_FL_FAILED;
7611*4882a593Smuzhiyun }
7612*4882a593Smuzhiyun if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7613*4882a593Smuzhiyun etest->flags |= ETH_TEST_FL_FAILED;
7614*4882a593Smuzhiyun
7615*4882a593Smuzhiyun if (!netif_running(bp->dev))
7616*4882a593Smuzhiyun bnx2_shutdown_chip(bp);
7617*4882a593Smuzhiyun else {
7618*4882a593Smuzhiyun bnx2_init_nic(bp, 1);
7619*4882a593Smuzhiyun bnx2_netif_start(bp, true);
7620*4882a593Smuzhiyun }
7621*4882a593Smuzhiyun
7622*4882a593Smuzhiyun /* wait for link up */
7623*4882a593Smuzhiyun for (i = 0; i < 7; i++) {
7624*4882a593Smuzhiyun if (bp->link_up)
7625*4882a593Smuzhiyun break;
7626*4882a593Smuzhiyun msleep_interruptible(1000);
7627*4882a593Smuzhiyun }
7628*4882a593Smuzhiyun }
7629*4882a593Smuzhiyun
7630*4882a593Smuzhiyun if (bnx2_test_nvram(bp) != 0) {
7631*4882a593Smuzhiyun buf[3] = 1;
7632*4882a593Smuzhiyun etest->flags |= ETH_TEST_FL_FAILED;
7633*4882a593Smuzhiyun }
7634*4882a593Smuzhiyun if (bnx2_test_intr(bp) != 0) {
7635*4882a593Smuzhiyun buf[4] = 1;
7636*4882a593Smuzhiyun etest->flags |= ETH_TEST_FL_FAILED;
7637*4882a593Smuzhiyun }
7638*4882a593Smuzhiyun
7639*4882a593Smuzhiyun if (bnx2_test_link(bp) != 0) {
7640*4882a593Smuzhiyun buf[5] = 1;
7641*4882a593Smuzhiyun etest->flags |= ETH_TEST_FL_FAILED;
7642*4882a593Smuzhiyun
7643*4882a593Smuzhiyun }
7644*4882a593Smuzhiyun }
7645*4882a593Smuzhiyun
7646*4882a593Smuzhiyun static void
bnx2_get_strings(struct net_device * dev,u32 stringset,u8 * buf)7647*4882a593Smuzhiyun bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7648*4882a593Smuzhiyun {
7649*4882a593Smuzhiyun switch (stringset) {
7650*4882a593Smuzhiyun case ETH_SS_STATS:
7651*4882a593Smuzhiyun memcpy(buf, bnx2_stats_str_arr,
7652*4882a593Smuzhiyun sizeof(bnx2_stats_str_arr));
7653*4882a593Smuzhiyun break;
7654*4882a593Smuzhiyun case ETH_SS_TEST:
7655*4882a593Smuzhiyun memcpy(buf, bnx2_tests_str_arr,
7656*4882a593Smuzhiyun sizeof(bnx2_tests_str_arr));
7657*4882a593Smuzhiyun break;
7658*4882a593Smuzhiyun }
7659*4882a593Smuzhiyun }
7660*4882a593Smuzhiyun
7661*4882a593Smuzhiyun static void
bnx2_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * buf)7662*4882a593Smuzhiyun bnx2_get_ethtool_stats(struct net_device *dev,
7663*4882a593Smuzhiyun struct ethtool_stats *stats, u64 *buf)
7664*4882a593Smuzhiyun {
7665*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
7666*4882a593Smuzhiyun int i;
7667*4882a593Smuzhiyun u32 *hw_stats = (u32 *) bp->stats_blk;
7668*4882a593Smuzhiyun u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7669*4882a593Smuzhiyun u8 *stats_len_arr = NULL;
7670*4882a593Smuzhiyun
7671*4882a593Smuzhiyun if (!hw_stats) {
7672*4882a593Smuzhiyun memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7673*4882a593Smuzhiyun return;
7674*4882a593Smuzhiyun }
7675*4882a593Smuzhiyun
7676*4882a593Smuzhiyun if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7677*4882a593Smuzhiyun (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7678*4882a593Smuzhiyun (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7679*4882a593Smuzhiyun (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7680*4882a593Smuzhiyun stats_len_arr = bnx2_5706_stats_len_arr;
7681*4882a593Smuzhiyun else
7682*4882a593Smuzhiyun stats_len_arr = bnx2_5708_stats_len_arr;
7683*4882a593Smuzhiyun
7684*4882a593Smuzhiyun for (i = 0; i < BNX2_NUM_STATS; i++) {
7685*4882a593Smuzhiyun unsigned long offset;
7686*4882a593Smuzhiyun
7687*4882a593Smuzhiyun if (stats_len_arr[i] == 0) {
7688*4882a593Smuzhiyun /* skip this counter */
7689*4882a593Smuzhiyun buf[i] = 0;
7690*4882a593Smuzhiyun continue;
7691*4882a593Smuzhiyun }
7692*4882a593Smuzhiyun
7693*4882a593Smuzhiyun offset = bnx2_stats_offset_arr[i];
7694*4882a593Smuzhiyun if (stats_len_arr[i] == 4) {
7695*4882a593Smuzhiyun /* 4-byte counter */
7696*4882a593Smuzhiyun buf[i] = (u64) *(hw_stats + offset) +
7697*4882a593Smuzhiyun *(temp_stats + offset);
7698*4882a593Smuzhiyun continue;
7699*4882a593Smuzhiyun }
7700*4882a593Smuzhiyun /* 8-byte counter */
7701*4882a593Smuzhiyun buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7702*4882a593Smuzhiyun *(hw_stats + offset + 1) +
7703*4882a593Smuzhiyun (((u64) *(temp_stats + offset)) << 32) +
7704*4882a593Smuzhiyun *(temp_stats + offset + 1);
7705*4882a593Smuzhiyun }
7706*4882a593Smuzhiyun }
7707*4882a593Smuzhiyun
7708*4882a593Smuzhiyun static int
bnx2_set_phys_id(struct net_device * dev,enum ethtool_phys_id_state state)7709*4882a593Smuzhiyun bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7710*4882a593Smuzhiyun {
7711*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
7712*4882a593Smuzhiyun
7713*4882a593Smuzhiyun switch (state) {
7714*4882a593Smuzhiyun case ETHTOOL_ID_ACTIVE:
7715*4882a593Smuzhiyun bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7716*4882a593Smuzhiyun BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7717*4882a593Smuzhiyun return 1; /* cycle on/off once per second */
7718*4882a593Smuzhiyun
7719*4882a593Smuzhiyun case ETHTOOL_ID_ON:
7720*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7721*4882a593Smuzhiyun BNX2_EMAC_LED_1000MB_OVERRIDE |
7722*4882a593Smuzhiyun BNX2_EMAC_LED_100MB_OVERRIDE |
7723*4882a593Smuzhiyun BNX2_EMAC_LED_10MB_OVERRIDE |
7724*4882a593Smuzhiyun BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7725*4882a593Smuzhiyun BNX2_EMAC_LED_TRAFFIC);
7726*4882a593Smuzhiyun break;
7727*4882a593Smuzhiyun
7728*4882a593Smuzhiyun case ETHTOOL_ID_OFF:
7729*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7730*4882a593Smuzhiyun break;
7731*4882a593Smuzhiyun
7732*4882a593Smuzhiyun case ETHTOOL_ID_INACTIVE:
7733*4882a593Smuzhiyun BNX2_WR(bp, BNX2_EMAC_LED, 0);
7734*4882a593Smuzhiyun BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7735*4882a593Smuzhiyun break;
7736*4882a593Smuzhiyun }
7737*4882a593Smuzhiyun
7738*4882a593Smuzhiyun return 0;
7739*4882a593Smuzhiyun }
7740*4882a593Smuzhiyun
7741*4882a593Smuzhiyun static int
bnx2_set_features(struct net_device * dev,netdev_features_t features)7742*4882a593Smuzhiyun bnx2_set_features(struct net_device *dev, netdev_features_t features)
7743*4882a593Smuzhiyun {
7744*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
7745*4882a593Smuzhiyun
7746*4882a593Smuzhiyun /* TSO with VLAN tag won't work with current firmware */
7747*4882a593Smuzhiyun if (features & NETIF_F_HW_VLAN_CTAG_TX)
7748*4882a593Smuzhiyun dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7749*4882a593Smuzhiyun else
7750*4882a593Smuzhiyun dev->vlan_features &= ~NETIF_F_ALL_TSO;
7751*4882a593Smuzhiyun
7752*4882a593Smuzhiyun if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7753*4882a593Smuzhiyun !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7754*4882a593Smuzhiyun netif_running(dev)) {
7755*4882a593Smuzhiyun bnx2_netif_stop(bp, false);
7756*4882a593Smuzhiyun dev->features = features;
7757*4882a593Smuzhiyun bnx2_set_rx_mode(dev);
7758*4882a593Smuzhiyun bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7759*4882a593Smuzhiyun bnx2_netif_start(bp, false);
7760*4882a593Smuzhiyun return 1;
7761*4882a593Smuzhiyun }
7762*4882a593Smuzhiyun
7763*4882a593Smuzhiyun return 0;
7764*4882a593Smuzhiyun }
7765*4882a593Smuzhiyun
bnx2_get_channels(struct net_device * dev,struct ethtool_channels * channels)7766*4882a593Smuzhiyun static void bnx2_get_channels(struct net_device *dev,
7767*4882a593Smuzhiyun struct ethtool_channels *channels)
7768*4882a593Smuzhiyun {
7769*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
7770*4882a593Smuzhiyun u32 max_rx_rings = 1;
7771*4882a593Smuzhiyun u32 max_tx_rings = 1;
7772*4882a593Smuzhiyun
7773*4882a593Smuzhiyun if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7774*4882a593Smuzhiyun max_rx_rings = RX_MAX_RINGS;
7775*4882a593Smuzhiyun max_tx_rings = TX_MAX_RINGS;
7776*4882a593Smuzhiyun }
7777*4882a593Smuzhiyun
7778*4882a593Smuzhiyun channels->max_rx = max_rx_rings;
7779*4882a593Smuzhiyun channels->max_tx = max_tx_rings;
7780*4882a593Smuzhiyun channels->max_other = 0;
7781*4882a593Smuzhiyun channels->max_combined = 0;
7782*4882a593Smuzhiyun channels->rx_count = bp->num_rx_rings;
7783*4882a593Smuzhiyun channels->tx_count = bp->num_tx_rings;
7784*4882a593Smuzhiyun channels->other_count = 0;
7785*4882a593Smuzhiyun channels->combined_count = 0;
7786*4882a593Smuzhiyun }
7787*4882a593Smuzhiyun
bnx2_set_channels(struct net_device * dev,struct ethtool_channels * channels)7788*4882a593Smuzhiyun static int bnx2_set_channels(struct net_device *dev,
7789*4882a593Smuzhiyun struct ethtool_channels *channels)
7790*4882a593Smuzhiyun {
7791*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
7792*4882a593Smuzhiyun u32 max_rx_rings = 1;
7793*4882a593Smuzhiyun u32 max_tx_rings = 1;
7794*4882a593Smuzhiyun int rc = 0;
7795*4882a593Smuzhiyun
7796*4882a593Smuzhiyun if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7797*4882a593Smuzhiyun max_rx_rings = RX_MAX_RINGS;
7798*4882a593Smuzhiyun max_tx_rings = TX_MAX_RINGS;
7799*4882a593Smuzhiyun }
7800*4882a593Smuzhiyun if (channels->rx_count > max_rx_rings ||
7801*4882a593Smuzhiyun channels->tx_count > max_tx_rings)
7802*4882a593Smuzhiyun return -EINVAL;
7803*4882a593Smuzhiyun
7804*4882a593Smuzhiyun bp->num_req_rx_rings = channels->rx_count;
7805*4882a593Smuzhiyun bp->num_req_tx_rings = channels->tx_count;
7806*4882a593Smuzhiyun
7807*4882a593Smuzhiyun if (netif_running(dev))
7808*4882a593Smuzhiyun rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7809*4882a593Smuzhiyun bp->tx_ring_size, true);
7810*4882a593Smuzhiyun
7811*4882a593Smuzhiyun return rc;
7812*4882a593Smuzhiyun }
7813*4882a593Smuzhiyun
7814*4882a593Smuzhiyun static const struct ethtool_ops bnx2_ethtool_ops = {
7815*4882a593Smuzhiyun .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
7816*4882a593Smuzhiyun ETHTOOL_COALESCE_MAX_FRAMES |
7817*4882a593Smuzhiyun ETHTOOL_COALESCE_USECS_IRQ |
7818*4882a593Smuzhiyun ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
7819*4882a593Smuzhiyun ETHTOOL_COALESCE_STATS_BLOCK_USECS,
7820*4882a593Smuzhiyun .get_drvinfo = bnx2_get_drvinfo,
7821*4882a593Smuzhiyun .get_regs_len = bnx2_get_regs_len,
7822*4882a593Smuzhiyun .get_regs = bnx2_get_regs,
7823*4882a593Smuzhiyun .get_wol = bnx2_get_wol,
7824*4882a593Smuzhiyun .set_wol = bnx2_set_wol,
7825*4882a593Smuzhiyun .nway_reset = bnx2_nway_reset,
7826*4882a593Smuzhiyun .get_link = bnx2_get_link,
7827*4882a593Smuzhiyun .get_eeprom_len = bnx2_get_eeprom_len,
7828*4882a593Smuzhiyun .get_eeprom = bnx2_get_eeprom,
7829*4882a593Smuzhiyun .set_eeprom = bnx2_set_eeprom,
7830*4882a593Smuzhiyun .get_coalesce = bnx2_get_coalesce,
7831*4882a593Smuzhiyun .set_coalesce = bnx2_set_coalesce,
7832*4882a593Smuzhiyun .get_ringparam = bnx2_get_ringparam,
7833*4882a593Smuzhiyun .set_ringparam = bnx2_set_ringparam,
7834*4882a593Smuzhiyun .get_pauseparam = bnx2_get_pauseparam,
7835*4882a593Smuzhiyun .set_pauseparam = bnx2_set_pauseparam,
7836*4882a593Smuzhiyun .self_test = bnx2_self_test,
7837*4882a593Smuzhiyun .get_strings = bnx2_get_strings,
7838*4882a593Smuzhiyun .set_phys_id = bnx2_set_phys_id,
7839*4882a593Smuzhiyun .get_ethtool_stats = bnx2_get_ethtool_stats,
7840*4882a593Smuzhiyun .get_sset_count = bnx2_get_sset_count,
7841*4882a593Smuzhiyun .get_channels = bnx2_get_channels,
7842*4882a593Smuzhiyun .set_channels = bnx2_set_channels,
7843*4882a593Smuzhiyun .get_link_ksettings = bnx2_get_link_ksettings,
7844*4882a593Smuzhiyun .set_link_ksettings = bnx2_set_link_ksettings,
7845*4882a593Smuzhiyun };
7846*4882a593Smuzhiyun
7847*4882a593Smuzhiyun /* Called with rtnl_lock */
7848*4882a593Smuzhiyun static int
bnx2_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)7849*4882a593Smuzhiyun bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7850*4882a593Smuzhiyun {
7851*4882a593Smuzhiyun struct mii_ioctl_data *data = if_mii(ifr);
7852*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
7853*4882a593Smuzhiyun int err;
7854*4882a593Smuzhiyun
7855*4882a593Smuzhiyun switch(cmd) {
7856*4882a593Smuzhiyun case SIOCGMIIPHY:
7857*4882a593Smuzhiyun data->phy_id = bp->phy_addr;
7858*4882a593Smuzhiyun
7859*4882a593Smuzhiyun fallthrough;
7860*4882a593Smuzhiyun case SIOCGMIIREG: {
7861*4882a593Smuzhiyun u32 mii_regval;
7862*4882a593Smuzhiyun
7863*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7864*4882a593Smuzhiyun return -EOPNOTSUPP;
7865*4882a593Smuzhiyun
7866*4882a593Smuzhiyun if (!netif_running(dev))
7867*4882a593Smuzhiyun return -EAGAIN;
7868*4882a593Smuzhiyun
7869*4882a593Smuzhiyun spin_lock_bh(&bp->phy_lock);
7870*4882a593Smuzhiyun err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7871*4882a593Smuzhiyun spin_unlock_bh(&bp->phy_lock);
7872*4882a593Smuzhiyun
7873*4882a593Smuzhiyun data->val_out = mii_regval;
7874*4882a593Smuzhiyun
7875*4882a593Smuzhiyun return err;
7876*4882a593Smuzhiyun }
7877*4882a593Smuzhiyun
7878*4882a593Smuzhiyun case SIOCSMIIREG:
7879*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7880*4882a593Smuzhiyun return -EOPNOTSUPP;
7881*4882a593Smuzhiyun
7882*4882a593Smuzhiyun if (!netif_running(dev))
7883*4882a593Smuzhiyun return -EAGAIN;
7884*4882a593Smuzhiyun
7885*4882a593Smuzhiyun spin_lock_bh(&bp->phy_lock);
7886*4882a593Smuzhiyun err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7887*4882a593Smuzhiyun spin_unlock_bh(&bp->phy_lock);
7888*4882a593Smuzhiyun
7889*4882a593Smuzhiyun return err;
7890*4882a593Smuzhiyun
7891*4882a593Smuzhiyun default:
7892*4882a593Smuzhiyun /* do nothing */
7893*4882a593Smuzhiyun break;
7894*4882a593Smuzhiyun }
7895*4882a593Smuzhiyun return -EOPNOTSUPP;
7896*4882a593Smuzhiyun }
7897*4882a593Smuzhiyun
7898*4882a593Smuzhiyun /* Called with rtnl_lock */
7899*4882a593Smuzhiyun static int
bnx2_change_mac_addr(struct net_device * dev,void * p)7900*4882a593Smuzhiyun bnx2_change_mac_addr(struct net_device *dev, void *p)
7901*4882a593Smuzhiyun {
7902*4882a593Smuzhiyun struct sockaddr *addr = p;
7903*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
7904*4882a593Smuzhiyun
7905*4882a593Smuzhiyun if (!is_valid_ether_addr(addr->sa_data))
7906*4882a593Smuzhiyun return -EADDRNOTAVAIL;
7907*4882a593Smuzhiyun
7908*4882a593Smuzhiyun memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7909*4882a593Smuzhiyun if (netif_running(dev))
7910*4882a593Smuzhiyun bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7911*4882a593Smuzhiyun
7912*4882a593Smuzhiyun return 0;
7913*4882a593Smuzhiyun }
7914*4882a593Smuzhiyun
7915*4882a593Smuzhiyun /* Called with rtnl_lock */
7916*4882a593Smuzhiyun static int
bnx2_change_mtu(struct net_device * dev,int new_mtu)7917*4882a593Smuzhiyun bnx2_change_mtu(struct net_device *dev, int new_mtu)
7918*4882a593Smuzhiyun {
7919*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
7920*4882a593Smuzhiyun
7921*4882a593Smuzhiyun dev->mtu = new_mtu;
7922*4882a593Smuzhiyun return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7923*4882a593Smuzhiyun false);
7924*4882a593Smuzhiyun }
7925*4882a593Smuzhiyun
7926*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
7927*4882a593Smuzhiyun static void
poll_bnx2(struct net_device * dev)7928*4882a593Smuzhiyun poll_bnx2(struct net_device *dev)
7929*4882a593Smuzhiyun {
7930*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
7931*4882a593Smuzhiyun int i;
7932*4882a593Smuzhiyun
7933*4882a593Smuzhiyun for (i = 0; i < bp->irq_nvecs; i++) {
7934*4882a593Smuzhiyun struct bnx2_irq *irq = &bp->irq_tbl[i];
7935*4882a593Smuzhiyun
7936*4882a593Smuzhiyun disable_irq(irq->vector);
7937*4882a593Smuzhiyun irq->handler(irq->vector, &bp->bnx2_napi[i]);
7938*4882a593Smuzhiyun enable_irq(irq->vector);
7939*4882a593Smuzhiyun }
7940*4882a593Smuzhiyun }
7941*4882a593Smuzhiyun #endif
7942*4882a593Smuzhiyun
7943*4882a593Smuzhiyun static void
bnx2_get_5709_media(struct bnx2 * bp)7944*4882a593Smuzhiyun bnx2_get_5709_media(struct bnx2 *bp)
7945*4882a593Smuzhiyun {
7946*4882a593Smuzhiyun u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7947*4882a593Smuzhiyun u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7948*4882a593Smuzhiyun u32 strap;
7949*4882a593Smuzhiyun
7950*4882a593Smuzhiyun if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7951*4882a593Smuzhiyun return;
7952*4882a593Smuzhiyun else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7953*4882a593Smuzhiyun bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7954*4882a593Smuzhiyun return;
7955*4882a593Smuzhiyun }
7956*4882a593Smuzhiyun
7957*4882a593Smuzhiyun if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7958*4882a593Smuzhiyun strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7959*4882a593Smuzhiyun else
7960*4882a593Smuzhiyun strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7961*4882a593Smuzhiyun
7962*4882a593Smuzhiyun if (bp->func == 0) {
7963*4882a593Smuzhiyun switch (strap) {
7964*4882a593Smuzhiyun case 0x4:
7965*4882a593Smuzhiyun case 0x5:
7966*4882a593Smuzhiyun case 0x6:
7967*4882a593Smuzhiyun bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7968*4882a593Smuzhiyun return;
7969*4882a593Smuzhiyun }
7970*4882a593Smuzhiyun } else {
7971*4882a593Smuzhiyun switch (strap) {
7972*4882a593Smuzhiyun case 0x1:
7973*4882a593Smuzhiyun case 0x2:
7974*4882a593Smuzhiyun case 0x4:
7975*4882a593Smuzhiyun bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7976*4882a593Smuzhiyun return;
7977*4882a593Smuzhiyun }
7978*4882a593Smuzhiyun }
7979*4882a593Smuzhiyun }
7980*4882a593Smuzhiyun
7981*4882a593Smuzhiyun static void
bnx2_get_pci_speed(struct bnx2 * bp)7982*4882a593Smuzhiyun bnx2_get_pci_speed(struct bnx2 *bp)
7983*4882a593Smuzhiyun {
7984*4882a593Smuzhiyun u32 reg;
7985*4882a593Smuzhiyun
7986*4882a593Smuzhiyun reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7987*4882a593Smuzhiyun if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7988*4882a593Smuzhiyun u32 clkreg;
7989*4882a593Smuzhiyun
7990*4882a593Smuzhiyun bp->flags |= BNX2_FLAG_PCIX;
7991*4882a593Smuzhiyun
7992*4882a593Smuzhiyun clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7993*4882a593Smuzhiyun
7994*4882a593Smuzhiyun clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7995*4882a593Smuzhiyun switch (clkreg) {
7996*4882a593Smuzhiyun case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7997*4882a593Smuzhiyun bp->bus_speed_mhz = 133;
7998*4882a593Smuzhiyun break;
7999*4882a593Smuzhiyun
8000*4882a593Smuzhiyun case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
8001*4882a593Smuzhiyun bp->bus_speed_mhz = 100;
8002*4882a593Smuzhiyun break;
8003*4882a593Smuzhiyun
8004*4882a593Smuzhiyun case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
8005*4882a593Smuzhiyun case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
8006*4882a593Smuzhiyun bp->bus_speed_mhz = 66;
8007*4882a593Smuzhiyun break;
8008*4882a593Smuzhiyun
8009*4882a593Smuzhiyun case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
8010*4882a593Smuzhiyun case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
8011*4882a593Smuzhiyun bp->bus_speed_mhz = 50;
8012*4882a593Smuzhiyun break;
8013*4882a593Smuzhiyun
8014*4882a593Smuzhiyun case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
8015*4882a593Smuzhiyun case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
8016*4882a593Smuzhiyun case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
8017*4882a593Smuzhiyun bp->bus_speed_mhz = 33;
8018*4882a593Smuzhiyun break;
8019*4882a593Smuzhiyun }
8020*4882a593Smuzhiyun }
8021*4882a593Smuzhiyun else {
8022*4882a593Smuzhiyun if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
8023*4882a593Smuzhiyun bp->bus_speed_mhz = 66;
8024*4882a593Smuzhiyun else
8025*4882a593Smuzhiyun bp->bus_speed_mhz = 33;
8026*4882a593Smuzhiyun }
8027*4882a593Smuzhiyun
8028*4882a593Smuzhiyun if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
8029*4882a593Smuzhiyun bp->flags |= BNX2_FLAG_PCI_32BIT;
8030*4882a593Smuzhiyun
8031*4882a593Smuzhiyun }
8032*4882a593Smuzhiyun
8033*4882a593Smuzhiyun static void
bnx2_read_vpd_fw_ver(struct bnx2 * bp)8034*4882a593Smuzhiyun bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8035*4882a593Smuzhiyun {
8036*4882a593Smuzhiyun int rc, i, j;
8037*4882a593Smuzhiyun u8 *data;
8038*4882a593Smuzhiyun unsigned int block_end, rosize, len;
8039*4882a593Smuzhiyun
8040*4882a593Smuzhiyun #define BNX2_VPD_NVRAM_OFFSET 0x300
8041*4882a593Smuzhiyun #define BNX2_VPD_LEN 128
8042*4882a593Smuzhiyun #define BNX2_MAX_VER_SLEN 30
8043*4882a593Smuzhiyun
8044*4882a593Smuzhiyun data = kmalloc(256, GFP_KERNEL);
8045*4882a593Smuzhiyun if (!data)
8046*4882a593Smuzhiyun return;
8047*4882a593Smuzhiyun
8048*4882a593Smuzhiyun rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
8049*4882a593Smuzhiyun BNX2_VPD_LEN);
8050*4882a593Smuzhiyun if (rc)
8051*4882a593Smuzhiyun goto vpd_done;
8052*4882a593Smuzhiyun
8053*4882a593Smuzhiyun for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8054*4882a593Smuzhiyun data[i] = data[i + BNX2_VPD_LEN + 3];
8055*4882a593Smuzhiyun data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8056*4882a593Smuzhiyun data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8057*4882a593Smuzhiyun data[i + 3] = data[i + BNX2_VPD_LEN];
8058*4882a593Smuzhiyun }
8059*4882a593Smuzhiyun
8060*4882a593Smuzhiyun i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8061*4882a593Smuzhiyun if (i < 0)
8062*4882a593Smuzhiyun goto vpd_done;
8063*4882a593Smuzhiyun
8064*4882a593Smuzhiyun rosize = pci_vpd_lrdt_size(&data[i]);
8065*4882a593Smuzhiyun i += PCI_VPD_LRDT_TAG_SIZE;
8066*4882a593Smuzhiyun block_end = i + rosize;
8067*4882a593Smuzhiyun
8068*4882a593Smuzhiyun if (block_end > BNX2_VPD_LEN)
8069*4882a593Smuzhiyun goto vpd_done;
8070*4882a593Smuzhiyun
8071*4882a593Smuzhiyun j = pci_vpd_find_info_keyword(data, i, rosize,
8072*4882a593Smuzhiyun PCI_VPD_RO_KEYWORD_MFR_ID);
8073*4882a593Smuzhiyun if (j < 0)
8074*4882a593Smuzhiyun goto vpd_done;
8075*4882a593Smuzhiyun
8076*4882a593Smuzhiyun len = pci_vpd_info_field_size(&data[j]);
8077*4882a593Smuzhiyun
8078*4882a593Smuzhiyun j += PCI_VPD_INFO_FLD_HDR_SIZE;
8079*4882a593Smuzhiyun if (j + len > block_end || len != 4 ||
8080*4882a593Smuzhiyun memcmp(&data[j], "1028", 4))
8081*4882a593Smuzhiyun goto vpd_done;
8082*4882a593Smuzhiyun
8083*4882a593Smuzhiyun j = pci_vpd_find_info_keyword(data, i, rosize,
8084*4882a593Smuzhiyun PCI_VPD_RO_KEYWORD_VENDOR0);
8085*4882a593Smuzhiyun if (j < 0)
8086*4882a593Smuzhiyun goto vpd_done;
8087*4882a593Smuzhiyun
8088*4882a593Smuzhiyun len = pci_vpd_info_field_size(&data[j]);
8089*4882a593Smuzhiyun
8090*4882a593Smuzhiyun j += PCI_VPD_INFO_FLD_HDR_SIZE;
8091*4882a593Smuzhiyun if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8092*4882a593Smuzhiyun goto vpd_done;
8093*4882a593Smuzhiyun
8094*4882a593Smuzhiyun memcpy(bp->fw_version, &data[j], len);
8095*4882a593Smuzhiyun bp->fw_version[len] = ' ';
8096*4882a593Smuzhiyun
8097*4882a593Smuzhiyun vpd_done:
8098*4882a593Smuzhiyun kfree(data);
8099*4882a593Smuzhiyun }
8100*4882a593Smuzhiyun
8101*4882a593Smuzhiyun static int
bnx2_init_board(struct pci_dev * pdev,struct net_device * dev)8102*4882a593Smuzhiyun bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8103*4882a593Smuzhiyun {
8104*4882a593Smuzhiyun struct bnx2 *bp;
8105*4882a593Smuzhiyun int rc, i, j;
8106*4882a593Smuzhiyun u32 reg;
8107*4882a593Smuzhiyun u64 dma_mask, persist_dma_mask;
8108*4882a593Smuzhiyun int err;
8109*4882a593Smuzhiyun
8110*4882a593Smuzhiyun SET_NETDEV_DEV(dev, &pdev->dev);
8111*4882a593Smuzhiyun bp = netdev_priv(dev);
8112*4882a593Smuzhiyun
8113*4882a593Smuzhiyun bp->flags = 0;
8114*4882a593Smuzhiyun bp->phy_flags = 0;
8115*4882a593Smuzhiyun
8116*4882a593Smuzhiyun bp->temp_stats_blk =
8117*4882a593Smuzhiyun kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8118*4882a593Smuzhiyun
8119*4882a593Smuzhiyun if (!bp->temp_stats_blk) {
8120*4882a593Smuzhiyun rc = -ENOMEM;
8121*4882a593Smuzhiyun goto err_out;
8122*4882a593Smuzhiyun }
8123*4882a593Smuzhiyun
8124*4882a593Smuzhiyun /* enable device (incl. PCI PM wakeup), and bus-mastering */
8125*4882a593Smuzhiyun rc = pci_enable_device(pdev);
8126*4882a593Smuzhiyun if (rc) {
8127*4882a593Smuzhiyun dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8128*4882a593Smuzhiyun goto err_out;
8129*4882a593Smuzhiyun }
8130*4882a593Smuzhiyun
8131*4882a593Smuzhiyun if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8132*4882a593Smuzhiyun dev_err(&pdev->dev,
8133*4882a593Smuzhiyun "Cannot find PCI device base address, aborting\n");
8134*4882a593Smuzhiyun rc = -ENODEV;
8135*4882a593Smuzhiyun goto err_out_disable;
8136*4882a593Smuzhiyun }
8137*4882a593Smuzhiyun
8138*4882a593Smuzhiyun rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8139*4882a593Smuzhiyun if (rc) {
8140*4882a593Smuzhiyun dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8141*4882a593Smuzhiyun goto err_out_disable;
8142*4882a593Smuzhiyun }
8143*4882a593Smuzhiyun
8144*4882a593Smuzhiyun pci_set_master(pdev);
8145*4882a593Smuzhiyun
8146*4882a593Smuzhiyun bp->pm_cap = pdev->pm_cap;
8147*4882a593Smuzhiyun if (bp->pm_cap == 0) {
8148*4882a593Smuzhiyun dev_err(&pdev->dev,
8149*4882a593Smuzhiyun "Cannot find power management capability, aborting\n");
8150*4882a593Smuzhiyun rc = -EIO;
8151*4882a593Smuzhiyun goto err_out_release;
8152*4882a593Smuzhiyun }
8153*4882a593Smuzhiyun
8154*4882a593Smuzhiyun bp->dev = dev;
8155*4882a593Smuzhiyun bp->pdev = pdev;
8156*4882a593Smuzhiyun
8157*4882a593Smuzhiyun spin_lock_init(&bp->phy_lock);
8158*4882a593Smuzhiyun spin_lock_init(&bp->indirect_lock);
8159*4882a593Smuzhiyun #ifdef BCM_CNIC
8160*4882a593Smuzhiyun mutex_init(&bp->cnic_lock);
8161*4882a593Smuzhiyun #endif
8162*4882a593Smuzhiyun INIT_WORK(&bp->reset_task, bnx2_reset_task);
8163*4882a593Smuzhiyun
8164*4882a593Smuzhiyun bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8165*4882a593Smuzhiyun TX_MAX_TSS_RINGS + 1));
8166*4882a593Smuzhiyun if (!bp->regview) {
8167*4882a593Smuzhiyun dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8168*4882a593Smuzhiyun rc = -ENOMEM;
8169*4882a593Smuzhiyun goto err_out_release;
8170*4882a593Smuzhiyun }
8171*4882a593Smuzhiyun
8172*4882a593Smuzhiyun /* Configure byte swap and enable write to the reg_window registers.
8173*4882a593Smuzhiyun * Rely on CPU to do target byte swapping on big endian systems
8174*4882a593Smuzhiyun * The chip's target access swapping will not swap all accesses
8175*4882a593Smuzhiyun */
8176*4882a593Smuzhiyun BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8177*4882a593Smuzhiyun BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8178*4882a593Smuzhiyun BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8179*4882a593Smuzhiyun
8180*4882a593Smuzhiyun bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8181*4882a593Smuzhiyun
8182*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8183*4882a593Smuzhiyun if (!pci_is_pcie(pdev)) {
8184*4882a593Smuzhiyun dev_err(&pdev->dev, "Not PCIE, aborting\n");
8185*4882a593Smuzhiyun rc = -EIO;
8186*4882a593Smuzhiyun goto err_out_unmap;
8187*4882a593Smuzhiyun }
8188*4882a593Smuzhiyun bp->flags |= BNX2_FLAG_PCIE;
8189*4882a593Smuzhiyun if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8190*4882a593Smuzhiyun bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8191*4882a593Smuzhiyun
8192*4882a593Smuzhiyun /* AER (Advanced Error Reporting) hooks */
8193*4882a593Smuzhiyun err = pci_enable_pcie_error_reporting(pdev);
8194*4882a593Smuzhiyun if (!err)
8195*4882a593Smuzhiyun bp->flags |= BNX2_FLAG_AER_ENABLED;
8196*4882a593Smuzhiyun
8197*4882a593Smuzhiyun } else {
8198*4882a593Smuzhiyun bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8199*4882a593Smuzhiyun if (bp->pcix_cap == 0) {
8200*4882a593Smuzhiyun dev_err(&pdev->dev,
8201*4882a593Smuzhiyun "Cannot find PCIX capability, aborting\n");
8202*4882a593Smuzhiyun rc = -EIO;
8203*4882a593Smuzhiyun goto err_out_unmap;
8204*4882a593Smuzhiyun }
8205*4882a593Smuzhiyun bp->flags |= BNX2_FLAG_BROKEN_STATS;
8206*4882a593Smuzhiyun }
8207*4882a593Smuzhiyun
8208*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8209*4882a593Smuzhiyun BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8210*4882a593Smuzhiyun if (pdev->msix_cap)
8211*4882a593Smuzhiyun bp->flags |= BNX2_FLAG_MSIX_CAP;
8212*4882a593Smuzhiyun }
8213*4882a593Smuzhiyun
8214*4882a593Smuzhiyun if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8215*4882a593Smuzhiyun BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8216*4882a593Smuzhiyun if (pdev->msi_cap)
8217*4882a593Smuzhiyun bp->flags |= BNX2_FLAG_MSI_CAP;
8218*4882a593Smuzhiyun }
8219*4882a593Smuzhiyun
8220*4882a593Smuzhiyun /* 5708 cannot support DMA addresses > 40-bit. */
8221*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8222*4882a593Smuzhiyun persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8223*4882a593Smuzhiyun else
8224*4882a593Smuzhiyun persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8225*4882a593Smuzhiyun
8226*4882a593Smuzhiyun /* Configure DMA attributes. */
8227*4882a593Smuzhiyun if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8228*4882a593Smuzhiyun dev->features |= NETIF_F_HIGHDMA;
8229*4882a593Smuzhiyun rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8230*4882a593Smuzhiyun if (rc) {
8231*4882a593Smuzhiyun dev_err(&pdev->dev,
8232*4882a593Smuzhiyun "pci_set_consistent_dma_mask failed, aborting\n");
8233*4882a593Smuzhiyun goto err_out_unmap;
8234*4882a593Smuzhiyun }
8235*4882a593Smuzhiyun } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8236*4882a593Smuzhiyun dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8237*4882a593Smuzhiyun goto err_out_unmap;
8238*4882a593Smuzhiyun }
8239*4882a593Smuzhiyun
8240*4882a593Smuzhiyun if (!(bp->flags & BNX2_FLAG_PCIE))
8241*4882a593Smuzhiyun bnx2_get_pci_speed(bp);
8242*4882a593Smuzhiyun
8243*4882a593Smuzhiyun /* 5706A0 may falsely detect SERR and PERR. */
8244*4882a593Smuzhiyun if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8245*4882a593Smuzhiyun reg = BNX2_RD(bp, PCI_COMMAND);
8246*4882a593Smuzhiyun reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8247*4882a593Smuzhiyun BNX2_WR(bp, PCI_COMMAND, reg);
8248*4882a593Smuzhiyun } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8249*4882a593Smuzhiyun !(bp->flags & BNX2_FLAG_PCIX)) {
8250*4882a593Smuzhiyun dev_err(&pdev->dev,
8251*4882a593Smuzhiyun "5706 A1 can only be used in a PCIX bus, aborting\n");
8252*4882a593Smuzhiyun rc = -EPERM;
8253*4882a593Smuzhiyun goto err_out_unmap;
8254*4882a593Smuzhiyun }
8255*4882a593Smuzhiyun
8256*4882a593Smuzhiyun bnx2_init_nvram(bp);
8257*4882a593Smuzhiyun
8258*4882a593Smuzhiyun reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8259*4882a593Smuzhiyun
8260*4882a593Smuzhiyun if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8261*4882a593Smuzhiyun bp->func = 1;
8262*4882a593Smuzhiyun
8263*4882a593Smuzhiyun if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8264*4882a593Smuzhiyun BNX2_SHM_HDR_SIGNATURE_SIG) {
8265*4882a593Smuzhiyun u32 off = bp->func << 2;
8266*4882a593Smuzhiyun
8267*4882a593Smuzhiyun bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8268*4882a593Smuzhiyun } else
8269*4882a593Smuzhiyun bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8270*4882a593Smuzhiyun
8271*4882a593Smuzhiyun /* Get the permanent MAC address. First we need to make sure the
8272*4882a593Smuzhiyun * firmware is actually running.
8273*4882a593Smuzhiyun */
8274*4882a593Smuzhiyun reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8275*4882a593Smuzhiyun
8276*4882a593Smuzhiyun if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8277*4882a593Smuzhiyun BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8278*4882a593Smuzhiyun dev_err(&pdev->dev, "Firmware not running, aborting\n");
8279*4882a593Smuzhiyun rc = -ENODEV;
8280*4882a593Smuzhiyun goto err_out_unmap;
8281*4882a593Smuzhiyun }
8282*4882a593Smuzhiyun
8283*4882a593Smuzhiyun bnx2_read_vpd_fw_ver(bp);
8284*4882a593Smuzhiyun
8285*4882a593Smuzhiyun j = strlen(bp->fw_version);
8286*4882a593Smuzhiyun reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8287*4882a593Smuzhiyun for (i = 0; i < 3 && j < 24; i++) {
8288*4882a593Smuzhiyun u8 num, k, skip0;
8289*4882a593Smuzhiyun
8290*4882a593Smuzhiyun if (i == 0) {
8291*4882a593Smuzhiyun bp->fw_version[j++] = 'b';
8292*4882a593Smuzhiyun bp->fw_version[j++] = 'c';
8293*4882a593Smuzhiyun bp->fw_version[j++] = ' ';
8294*4882a593Smuzhiyun }
8295*4882a593Smuzhiyun num = (u8) (reg >> (24 - (i * 8)));
8296*4882a593Smuzhiyun for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8297*4882a593Smuzhiyun if (num >= k || !skip0 || k == 1) {
8298*4882a593Smuzhiyun bp->fw_version[j++] = (num / k) + '0';
8299*4882a593Smuzhiyun skip0 = 0;
8300*4882a593Smuzhiyun }
8301*4882a593Smuzhiyun }
8302*4882a593Smuzhiyun if (i != 2)
8303*4882a593Smuzhiyun bp->fw_version[j++] = '.';
8304*4882a593Smuzhiyun }
8305*4882a593Smuzhiyun reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8306*4882a593Smuzhiyun if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8307*4882a593Smuzhiyun bp->wol = 1;
8308*4882a593Smuzhiyun
8309*4882a593Smuzhiyun if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8310*4882a593Smuzhiyun bp->flags |= BNX2_FLAG_ASF_ENABLE;
8311*4882a593Smuzhiyun
8312*4882a593Smuzhiyun for (i = 0; i < 30; i++) {
8313*4882a593Smuzhiyun reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8314*4882a593Smuzhiyun if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8315*4882a593Smuzhiyun break;
8316*4882a593Smuzhiyun msleep(10);
8317*4882a593Smuzhiyun }
8318*4882a593Smuzhiyun }
8319*4882a593Smuzhiyun reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8320*4882a593Smuzhiyun reg &= BNX2_CONDITION_MFW_RUN_MASK;
8321*4882a593Smuzhiyun if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8322*4882a593Smuzhiyun reg != BNX2_CONDITION_MFW_RUN_NONE) {
8323*4882a593Smuzhiyun u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8324*4882a593Smuzhiyun
8325*4882a593Smuzhiyun if (j < 32)
8326*4882a593Smuzhiyun bp->fw_version[j++] = ' ';
8327*4882a593Smuzhiyun for (i = 0; i < 3 && j < 28; i++) {
8328*4882a593Smuzhiyun reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8329*4882a593Smuzhiyun reg = be32_to_cpu(reg);
8330*4882a593Smuzhiyun memcpy(&bp->fw_version[j], ®, 4);
8331*4882a593Smuzhiyun j += 4;
8332*4882a593Smuzhiyun }
8333*4882a593Smuzhiyun }
8334*4882a593Smuzhiyun
8335*4882a593Smuzhiyun reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8336*4882a593Smuzhiyun bp->mac_addr[0] = (u8) (reg >> 8);
8337*4882a593Smuzhiyun bp->mac_addr[1] = (u8) reg;
8338*4882a593Smuzhiyun
8339*4882a593Smuzhiyun reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8340*4882a593Smuzhiyun bp->mac_addr[2] = (u8) (reg >> 24);
8341*4882a593Smuzhiyun bp->mac_addr[3] = (u8) (reg >> 16);
8342*4882a593Smuzhiyun bp->mac_addr[4] = (u8) (reg >> 8);
8343*4882a593Smuzhiyun bp->mac_addr[5] = (u8) reg;
8344*4882a593Smuzhiyun
8345*4882a593Smuzhiyun bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8346*4882a593Smuzhiyun bnx2_set_rx_ring_size(bp, 255);
8347*4882a593Smuzhiyun
8348*4882a593Smuzhiyun bp->tx_quick_cons_trip_int = 2;
8349*4882a593Smuzhiyun bp->tx_quick_cons_trip = 20;
8350*4882a593Smuzhiyun bp->tx_ticks_int = 18;
8351*4882a593Smuzhiyun bp->tx_ticks = 80;
8352*4882a593Smuzhiyun
8353*4882a593Smuzhiyun bp->rx_quick_cons_trip_int = 2;
8354*4882a593Smuzhiyun bp->rx_quick_cons_trip = 12;
8355*4882a593Smuzhiyun bp->rx_ticks_int = 18;
8356*4882a593Smuzhiyun bp->rx_ticks = 18;
8357*4882a593Smuzhiyun
8358*4882a593Smuzhiyun bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8359*4882a593Smuzhiyun
8360*4882a593Smuzhiyun bp->current_interval = BNX2_TIMER_INTERVAL;
8361*4882a593Smuzhiyun
8362*4882a593Smuzhiyun bp->phy_addr = 1;
8363*4882a593Smuzhiyun
8364*4882a593Smuzhiyun /* allocate stats_blk */
8365*4882a593Smuzhiyun rc = bnx2_alloc_stats_blk(dev);
8366*4882a593Smuzhiyun if (rc)
8367*4882a593Smuzhiyun goto err_out_unmap;
8368*4882a593Smuzhiyun
8369*4882a593Smuzhiyun /* Disable WOL support if we are running on a SERDES chip. */
8370*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8371*4882a593Smuzhiyun bnx2_get_5709_media(bp);
8372*4882a593Smuzhiyun else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8373*4882a593Smuzhiyun bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8374*4882a593Smuzhiyun
8375*4882a593Smuzhiyun bp->phy_port = PORT_TP;
8376*4882a593Smuzhiyun if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8377*4882a593Smuzhiyun bp->phy_port = PORT_FIBRE;
8378*4882a593Smuzhiyun reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8379*4882a593Smuzhiyun if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8380*4882a593Smuzhiyun bp->flags |= BNX2_FLAG_NO_WOL;
8381*4882a593Smuzhiyun bp->wol = 0;
8382*4882a593Smuzhiyun }
8383*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8384*4882a593Smuzhiyun /* Don't do parallel detect on this board because of
8385*4882a593Smuzhiyun * some board problems. The link will not go down
8386*4882a593Smuzhiyun * if we do parallel detect.
8387*4882a593Smuzhiyun */
8388*4882a593Smuzhiyun if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8389*4882a593Smuzhiyun pdev->subsystem_device == 0x310c)
8390*4882a593Smuzhiyun bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8391*4882a593Smuzhiyun } else {
8392*4882a593Smuzhiyun bp->phy_addr = 2;
8393*4882a593Smuzhiyun if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8394*4882a593Smuzhiyun bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8395*4882a593Smuzhiyun }
8396*4882a593Smuzhiyun } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8397*4882a593Smuzhiyun BNX2_CHIP(bp) == BNX2_CHIP_5708)
8398*4882a593Smuzhiyun bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8399*4882a593Smuzhiyun else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8400*4882a593Smuzhiyun (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8401*4882a593Smuzhiyun BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8402*4882a593Smuzhiyun bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8403*4882a593Smuzhiyun
8404*4882a593Smuzhiyun bnx2_init_fw_cap(bp);
8405*4882a593Smuzhiyun
8406*4882a593Smuzhiyun if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8407*4882a593Smuzhiyun (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8408*4882a593Smuzhiyun (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8409*4882a593Smuzhiyun !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8410*4882a593Smuzhiyun bp->flags |= BNX2_FLAG_NO_WOL;
8411*4882a593Smuzhiyun bp->wol = 0;
8412*4882a593Smuzhiyun }
8413*4882a593Smuzhiyun
8414*4882a593Smuzhiyun if (bp->flags & BNX2_FLAG_NO_WOL)
8415*4882a593Smuzhiyun device_set_wakeup_capable(&bp->pdev->dev, false);
8416*4882a593Smuzhiyun else
8417*4882a593Smuzhiyun device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8418*4882a593Smuzhiyun
8419*4882a593Smuzhiyun if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8420*4882a593Smuzhiyun bp->tx_quick_cons_trip_int =
8421*4882a593Smuzhiyun bp->tx_quick_cons_trip;
8422*4882a593Smuzhiyun bp->tx_ticks_int = bp->tx_ticks;
8423*4882a593Smuzhiyun bp->rx_quick_cons_trip_int =
8424*4882a593Smuzhiyun bp->rx_quick_cons_trip;
8425*4882a593Smuzhiyun bp->rx_ticks_int = bp->rx_ticks;
8426*4882a593Smuzhiyun bp->comp_prod_trip_int = bp->comp_prod_trip;
8427*4882a593Smuzhiyun bp->com_ticks_int = bp->com_ticks;
8428*4882a593Smuzhiyun bp->cmd_ticks_int = bp->cmd_ticks;
8429*4882a593Smuzhiyun }
8430*4882a593Smuzhiyun
8431*4882a593Smuzhiyun /* Disable MSI on 5706 if AMD 8132 bridge is found.
8432*4882a593Smuzhiyun *
8433*4882a593Smuzhiyun * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8434*4882a593Smuzhiyun * with byte enables disabled on the unused 32-bit word. This is legal
8435*4882a593Smuzhiyun * but causes problems on the AMD 8132 which will eventually stop
8436*4882a593Smuzhiyun * responding after a while.
8437*4882a593Smuzhiyun *
8438*4882a593Smuzhiyun * AMD believes this incompatibility is unique to the 5706, and
8439*4882a593Smuzhiyun * prefers to locally disable MSI rather than globally disabling it.
8440*4882a593Smuzhiyun */
8441*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8442*4882a593Smuzhiyun struct pci_dev *amd_8132 = NULL;
8443*4882a593Smuzhiyun
8444*4882a593Smuzhiyun while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8445*4882a593Smuzhiyun PCI_DEVICE_ID_AMD_8132_BRIDGE,
8446*4882a593Smuzhiyun amd_8132))) {
8447*4882a593Smuzhiyun
8448*4882a593Smuzhiyun if (amd_8132->revision >= 0x10 &&
8449*4882a593Smuzhiyun amd_8132->revision <= 0x13) {
8450*4882a593Smuzhiyun disable_msi = 1;
8451*4882a593Smuzhiyun pci_dev_put(amd_8132);
8452*4882a593Smuzhiyun break;
8453*4882a593Smuzhiyun }
8454*4882a593Smuzhiyun }
8455*4882a593Smuzhiyun }
8456*4882a593Smuzhiyun
8457*4882a593Smuzhiyun bnx2_set_default_link(bp);
8458*4882a593Smuzhiyun bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8459*4882a593Smuzhiyun
8460*4882a593Smuzhiyun timer_setup(&bp->timer, bnx2_timer, 0);
8461*4882a593Smuzhiyun bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8462*4882a593Smuzhiyun
8463*4882a593Smuzhiyun #ifdef BCM_CNIC
8464*4882a593Smuzhiyun if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8465*4882a593Smuzhiyun bp->cnic_eth_dev.max_iscsi_conn =
8466*4882a593Smuzhiyun (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8467*4882a593Smuzhiyun BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8468*4882a593Smuzhiyun bp->cnic_probe = bnx2_cnic_probe;
8469*4882a593Smuzhiyun #endif
8470*4882a593Smuzhiyun pci_save_state(pdev);
8471*4882a593Smuzhiyun
8472*4882a593Smuzhiyun return 0;
8473*4882a593Smuzhiyun
8474*4882a593Smuzhiyun err_out_unmap:
8475*4882a593Smuzhiyun if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8476*4882a593Smuzhiyun pci_disable_pcie_error_reporting(pdev);
8477*4882a593Smuzhiyun bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8478*4882a593Smuzhiyun }
8479*4882a593Smuzhiyun
8480*4882a593Smuzhiyun pci_iounmap(pdev, bp->regview);
8481*4882a593Smuzhiyun bp->regview = NULL;
8482*4882a593Smuzhiyun
8483*4882a593Smuzhiyun err_out_release:
8484*4882a593Smuzhiyun pci_release_regions(pdev);
8485*4882a593Smuzhiyun
8486*4882a593Smuzhiyun err_out_disable:
8487*4882a593Smuzhiyun pci_disable_device(pdev);
8488*4882a593Smuzhiyun
8489*4882a593Smuzhiyun err_out:
8490*4882a593Smuzhiyun kfree(bp->temp_stats_blk);
8491*4882a593Smuzhiyun
8492*4882a593Smuzhiyun return rc;
8493*4882a593Smuzhiyun }
8494*4882a593Smuzhiyun
8495*4882a593Smuzhiyun static char *
bnx2_bus_string(struct bnx2 * bp,char * str)8496*4882a593Smuzhiyun bnx2_bus_string(struct bnx2 *bp, char *str)
8497*4882a593Smuzhiyun {
8498*4882a593Smuzhiyun char *s = str;
8499*4882a593Smuzhiyun
8500*4882a593Smuzhiyun if (bp->flags & BNX2_FLAG_PCIE) {
8501*4882a593Smuzhiyun s += sprintf(s, "PCI Express");
8502*4882a593Smuzhiyun } else {
8503*4882a593Smuzhiyun s += sprintf(s, "PCI");
8504*4882a593Smuzhiyun if (bp->flags & BNX2_FLAG_PCIX)
8505*4882a593Smuzhiyun s += sprintf(s, "-X");
8506*4882a593Smuzhiyun if (bp->flags & BNX2_FLAG_PCI_32BIT)
8507*4882a593Smuzhiyun s += sprintf(s, " 32-bit");
8508*4882a593Smuzhiyun else
8509*4882a593Smuzhiyun s += sprintf(s, " 64-bit");
8510*4882a593Smuzhiyun s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8511*4882a593Smuzhiyun }
8512*4882a593Smuzhiyun return str;
8513*4882a593Smuzhiyun }
8514*4882a593Smuzhiyun
8515*4882a593Smuzhiyun static void
bnx2_del_napi(struct bnx2 * bp)8516*4882a593Smuzhiyun bnx2_del_napi(struct bnx2 *bp)
8517*4882a593Smuzhiyun {
8518*4882a593Smuzhiyun int i;
8519*4882a593Smuzhiyun
8520*4882a593Smuzhiyun for (i = 0; i < bp->irq_nvecs; i++)
8521*4882a593Smuzhiyun netif_napi_del(&bp->bnx2_napi[i].napi);
8522*4882a593Smuzhiyun }
8523*4882a593Smuzhiyun
8524*4882a593Smuzhiyun static void
bnx2_init_napi(struct bnx2 * bp)8525*4882a593Smuzhiyun bnx2_init_napi(struct bnx2 *bp)
8526*4882a593Smuzhiyun {
8527*4882a593Smuzhiyun int i;
8528*4882a593Smuzhiyun
8529*4882a593Smuzhiyun for (i = 0; i < bp->irq_nvecs; i++) {
8530*4882a593Smuzhiyun struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8531*4882a593Smuzhiyun int (*poll)(struct napi_struct *, int);
8532*4882a593Smuzhiyun
8533*4882a593Smuzhiyun if (i == 0)
8534*4882a593Smuzhiyun poll = bnx2_poll;
8535*4882a593Smuzhiyun else
8536*4882a593Smuzhiyun poll = bnx2_poll_msix;
8537*4882a593Smuzhiyun
8538*4882a593Smuzhiyun netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8539*4882a593Smuzhiyun bnapi->bp = bp;
8540*4882a593Smuzhiyun }
8541*4882a593Smuzhiyun }
8542*4882a593Smuzhiyun
8543*4882a593Smuzhiyun static const struct net_device_ops bnx2_netdev_ops = {
8544*4882a593Smuzhiyun .ndo_open = bnx2_open,
8545*4882a593Smuzhiyun .ndo_start_xmit = bnx2_start_xmit,
8546*4882a593Smuzhiyun .ndo_stop = bnx2_close,
8547*4882a593Smuzhiyun .ndo_get_stats64 = bnx2_get_stats64,
8548*4882a593Smuzhiyun .ndo_set_rx_mode = bnx2_set_rx_mode,
8549*4882a593Smuzhiyun .ndo_do_ioctl = bnx2_ioctl,
8550*4882a593Smuzhiyun .ndo_validate_addr = eth_validate_addr,
8551*4882a593Smuzhiyun .ndo_set_mac_address = bnx2_change_mac_addr,
8552*4882a593Smuzhiyun .ndo_change_mtu = bnx2_change_mtu,
8553*4882a593Smuzhiyun .ndo_set_features = bnx2_set_features,
8554*4882a593Smuzhiyun .ndo_tx_timeout = bnx2_tx_timeout,
8555*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
8556*4882a593Smuzhiyun .ndo_poll_controller = poll_bnx2,
8557*4882a593Smuzhiyun #endif
8558*4882a593Smuzhiyun };
8559*4882a593Smuzhiyun
8560*4882a593Smuzhiyun static int
bnx2_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)8561*4882a593Smuzhiyun bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8562*4882a593Smuzhiyun {
8563*4882a593Smuzhiyun struct net_device *dev;
8564*4882a593Smuzhiyun struct bnx2 *bp;
8565*4882a593Smuzhiyun int rc;
8566*4882a593Smuzhiyun char str[40];
8567*4882a593Smuzhiyun
8568*4882a593Smuzhiyun /* dev zeroed in init_etherdev */
8569*4882a593Smuzhiyun dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8570*4882a593Smuzhiyun if (!dev)
8571*4882a593Smuzhiyun return -ENOMEM;
8572*4882a593Smuzhiyun
8573*4882a593Smuzhiyun rc = bnx2_init_board(pdev, dev);
8574*4882a593Smuzhiyun if (rc < 0)
8575*4882a593Smuzhiyun goto err_free;
8576*4882a593Smuzhiyun
8577*4882a593Smuzhiyun dev->netdev_ops = &bnx2_netdev_ops;
8578*4882a593Smuzhiyun dev->watchdog_timeo = TX_TIMEOUT;
8579*4882a593Smuzhiyun dev->ethtool_ops = &bnx2_ethtool_ops;
8580*4882a593Smuzhiyun
8581*4882a593Smuzhiyun bp = netdev_priv(dev);
8582*4882a593Smuzhiyun
8583*4882a593Smuzhiyun pci_set_drvdata(pdev, dev);
8584*4882a593Smuzhiyun
8585*4882a593Smuzhiyun /*
8586*4882a593Smuzhiyun * In-flight DMA from 1st kernel could continue going in kdump kernel.
8587*4882a593Smuzhiyun * New io-page table has been created before bnx2 does reset at open stage.
8588*4882a593Smuzhiyun * We have to wait for the in-flight DMA to complete to avoid it look up
8589*4882a593Smuzhiyun * into the newly created io-page table.
8590*4882a593Smuzhiyun */
8591*4882a593Smuzhiyun if (is_kdump_kernel())
8592*4882a593Smuzhiyun bnx2_wait_dma_complete(bp);
8593*4882a593Smuzhiyun
8594*4882a593Smuzhiyun memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8595*4882a593Smuzhiyun
8596*4882a593Smuzhiyun dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8597*4882a593Smuzhiyun NETIF_F_TSO | NETIF_F_TSO_ECN |
8598*4882a593Smuzhiyun NETIF_F_RXHASH | NETIF_F_RXCSUM;
8599*4882a593Smuzhiyun
8600*4882a593Smuzhiyun if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8601*4882a593Smuzhiyun dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8602*4882a593Smuzhiyun
8603*4882a593Smuzhiyun dev->vlan_features = dev->hw_features;
8604*4882a593Smuzhiyun dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8605*4882a593Smuzhiyun dev->features |= dev->hw_features;
8606*4882a593Smuzhiyun dev->priv_flags |= IFF_UNICAST_FLT;
8607*4882a593Smuzhiyun dev->min_mtu = MIN_ETHERNET_PACKET_SIZE;
8608*4882a593Smuzhiyun dev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE;
8609*4882a593Smuzhiyun
8610*4882a593Smuzhiyun if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
8611*4882a593Smuzhiyun dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
8612*4882a593Smuzhiyun
8613*4882a593Smuzhiyun if ((rc = register_netdev(dev))) {
8614*4882a593Smuzhiyun dev_err(&pdev->dev, "Cannot register net device\n");
8615*4882a593Smuzhiyun goto error;
8616*4882a593Smuzhiyun }
8617*4882a593Smuzhiyun
8618*4882a593Smuzhiyun netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8619*4882a593Smuzhiyun "node addr %pM\n", board_info[ent->driver_data].name,
8620*4882a593Smuzhiyun ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8621*4882a593Smuzhiyun ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8622*4882a593Smuzhiyun bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8623*4882a593Smuzhiyun pdev->irq, dev->dev_addr);
8624*4882a593Smuzhiyun
8625*4882a593Smuzhiyun return 0;
8626*4882a593Smuzhiyun
8627*4882a593Smuzhiyun error:
8628*4882a593Smuzhiyun pci_iounmap(pdev, bp->regview);
8629*4882a593Smuzhiyun pci_release_regions(pdev);
8630*4882a593Smuzhiyun pci_disable_device(pdev);
8631*4882a593Smuzhiyun err_free:
8632*4882a593Smuzhiyun bnx2_free_stats_blk(dev);
8633*4882a593Smuzhiyun free_netdev(dev);
8634*4882a593Smuzhiyun return rc;
8635*4882a593Smuzhiyun }
8636*4882a593Smuzhiyun
8637*4882a593Smuzhiyun static void
bnx2_remove_one(struct pci_dev * pdev)8638*4882a593Smuzhiyun bnx2_remove_one(struct pci_dev *pdev)
8639*4882a593Smuzhiyun {
8640*4882a593Smuzhiyun struct net_device *dev = pci_get_drvdata(pdev);
8641*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
8642*4882a593Smuzhiyun
8643*4882a593Smuzhiyun unregister_netdev(dev);
8644*4882a593Smuzhiyun
8645*4882a593Smuzhiyun del_timer_sync(&bp->timer);
8646*4882a593Smuzhiyun cancel_work_sync(&bp->reset_task);
8647*4882a593Smuzhiyun
8648*4882a593Smuzhiyun pci_iounmap(bp->pdev, bp->regview);
8649*4882a593Smuzhiyun
8650*4882a593Smuzhiyun bnx2_free_stats_blk(dev);
8651*4882a593Smuzhiyun kfree(bp->temp_stats_blk);
8652*4882a593Smuzhiyun
8653*4882a593Smuzhiyun if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8654*4882a593Smuzhiyun pci_disable_pcie_error_reporting(pdev);
8655*4882a593Smuzhiyun bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8656*4882a593Smuzhiyun }
8657*4882a593Smuzhiyun
8658*4882a593Smuzhiyun bnx2_release_firmware(bp);
8659*4882a593Smuzhiyun
8660*4882a593Smuzhiyun free_netdev(dev);
8661*4882a593Smuzhiyun
8662*4882a593Smuzhiyun pci_release_regions(pdev);
8663*4882a593Smuzhiyun pci_disable_device(pdev);
8664*4882a593Smuzhiyun }
8665*4882a593Smuzhiyun
8666*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
8667*4882a593Smuzhiyun static int
bnx2_suspend(struct device * device)8668*4882a593Smuzhiyun bnx2_suspend(struct device *device)
8669*4882a593Smuzhiyun {
8670*4882a593Smuzhiyun struct net_device *dev = dev_get_drvdata(device);
8671*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
8672*4882a593Smuzhiyun
8673*4882a593Smuzhiyun if (netif_running(dev)) {
8674*4882a593Smuzhiyun cancel_work_sync(&bp->reset_task);
8675*4882a593Smuzhiyun bnx2_netif_stop(bp, true);
8676*4882a593Smuzhiyun netif_device_detach(dev);
8677*4882a593Smuzhiyun del_timer_sync(&bp->timer);
8678*4882a593Smuzhiyun bnx2_shutdown_chip(bp);
8679*4882a593Smuzhiyun __bnx2_free_irq(bp);
8680*4882a593Smuzhiyun bnx2_free_skbs(bp);
8681*4882a593Smuzhiyun }
8682*4882a593Smuzhiyun bnx2_setup_wol(bp);
8683*4882a593Smuzhiyun return 0;
8684*4882a593Smuzhiyun }
8685*4882a593Smuzhiyun
8686*4882a593Smuzhiyun static int
bnx2_resume(struct device * device)8687*4882a593Smuzhiyun bnx2_resume(struct device *device)
8688*4882a593Smuzhiyun {
8689*4882a593Smuzhiyun struct net_device *dev = dev_get_drvdata(device);
8690*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
8691*4882a593Smuzhiyun
8692*4882a593Smuzhiyun if (!netif_running(dev))
8693*4882a593Smuzhiyun return 0;
8694*4882a593Smuzhiyun
8695*4882a593Smuzhiyun bnx2_set_power_state(bp, PCI_D0);
8696*4882a593Smuzhiyun netif_device_attach(dev);
8697*4882a593Smuzhiyun bnx2_request_irq(bp);
8698*4882a593Smuzhiyun bnx2_init_nic(bp, 1);
8699*4882a593Smuzhiyun bnx2_netif_start(bp, true);
8700*4882a593Smuzhiyun return 0;
8701*4882a593Smuzhiyun }
8702*4882a593Smuzhiyun
8703*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8704*4882a593Smuzhiyun #define BNX2_PM_OPS (&bnx2_pm_ops)
8705*4882a593Smuzhiyun
8706*4882a593Smuzhiyun #else
8707*4882a593Smuzhiyun
8708*4882a593Smuzhiyun #define BNX2_PM_OPS NULL
8709*4882a593Smuzhiyun
8710*4882a593Smuzhiyun #endif /* CONFIG_PM_SLEEP */
8711*4882a593Smuzhiyun /**
8712*4882a593Smuzhiyun * bnx2_io_error_detected - called when PCI error is detected
8713*4882a593Smuzhiyun * @pdev: Pointer to PCI device
8714*4882a593Smuzhiyun * @state: The current pci connection state
8715*4882a593Smuzhiyun *
8716*4882a593Smuzhiyun * This function is called after a PCI bus error affecting
8717*4882a593Smuzhiyun * this device has been detected.
8718*4882a593Smuzhiyun */
bnx2_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)8719*4882a593Smuzhiyun static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8720*4882a593Smuzhiyun pci_channel_state_t state)
8721*4882a593Smuzhiyun {
8722*4882a593Smuzhiyun struct net_device *dev = pci_get_drvdata(pdev);
8723*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
8724*4882a593Smuzhiyun
8725*4882a593Smuzhiyun rtnl_lock();
8726*4882a593Smuzhiyun netif_device_detach(dev);
8727*4882a593Smuzhiyun
8728*4882a593Smuzhiyun if (state == pci_channel_io_perm_failure) {
8729*4882a593Smuzhiyun rtnl_unlock();
8730*4882a593Smuzhiyun return PCI_ERS_RESULT_DISCONNECT;
8731*4882a593Smuzhiyun }
8732*4882a593Smuzhiyun
8733*4882a593Smuzhiyun if (netif_running(dev)) {
8734*4882a593Smuzhiyun bnx2_netif_stop(bp, true);
8735*4882a593Smuzhiyun del_timer_sync(&bp->timer);
8736*4882a593Smuzhiyun bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8737*4882a593Smuzhiyun }
8738*4882a593Smuzhiyun
8739*4882a593Smuzhiyun pci_disable_device(pdev);
8740*4882a593Smuzhiyun rtnl_unlock();
8741*4882a593Smuzhiyun
8742*4882a593Smuzhiyun /* Request a slot slot reset. */
8743*4882a593Smuzhiyun return PCI_ERS_RESULT_NEED_RESET;
8744*4882a593Smuzhiyun }
8745*4882a593Smuzhiyun
8746*4882a593Smuzhiyun /**
8747*4882a593Smuzhiyun * bnx2_io_slot_reset - called after the pci bus has been reset.
8748*4882a593Smuzhiyun * @pdev: Pointer to PCI device
8749*4882a593Smuzhiyun *
8750*4882a593Smuzhiyun * Restart the card from scratch, as if from a cold-boot.
8751*4882a593Smuzhiyun */
bnx2_io_slot_reset(struct pci_dev * pdev)8752*4882a593Smuzhiyun static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8753*4882a593Smuzhiyun {
8754*4882a593Smuzhiyun struct net_device *dev = pci_get_drvdata(pdev);
8755*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
8756*4882a593Smuzhiyun pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8757*4882a593Smuzhiyun int err = 0;
8758*4882a593Smuzhiyun
8759*4882a593Smuzhiyun rtnl_lock();
8760*4882a593Smuzhiyun if (pci_enable_device(pdev)) {
8761*4882a593Smuzhiyun dev_err(&pdev->dev,
8762*4882a593Smuzhiyun "Cannot re-enable PCI device after reset\n");
8763*4882a593Smuzhiyun } else {
8764*4882a593Smuzhiyun pci_set_master(pdev);
8765*4882a593Smuzhiyun pci_restore_state(pdev);
8766*4882a593Smuzhiyun pci_save_state(pdev);
8767*4882a593Smuzhiyun
8768*4882a593Smuzhiyun if (netif_running(dev))
8769*4882a593Smuzhiyun err = bnx2_init_nic(bp, 1);
8770*4882a593Smuzhiyun
8771*4882a593Smuzhiyun if (!err)
8772*4882a593Smuzhiyun result = PCI_ERS_RESULT_RECOVERED;
8773*4882a593Smuzhiyun }
8774*4882a593Smuzhiyun
8775*4882a593Smuzhiyun if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8776*4882a593Smuzhiyun bnx2_napi_enable(bp);
8777*4882a593Smuzhiyun dev_close(dev);
8778*4882a593Smuzhiyun }
8779*4882a593Smuzhiyun rtnl_unlock();
8780*4882a593Smuzhiyun
8781*4882a593Smuzhiyun if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8782*4882a593Smuzhiyun return result;
8783*4882a593Smuzhiyun
8784*4882a593Smuzhiyun return result;
8785*4882a593Smuzhiyun }
8786*4882a593Smuzhiyun
8787*4882a593Smuzhiyun /**
8788*4882a593Smuzhiyun * bnx2_io_resume - called when traffic can start flowing again.
8789*4882a593Smuzhiyun * @pdev: Pointer to PCI device
8790*4882a593Smuzhiyun *
8791*4882a593Smuzhiyun * This callback is called when the error recovery driver tells us that
8792*4882a593Smuzhiyun * its OK to resume normal operation.
8793*4882a593Smuzhiyun */
bnx2_io_resume(struct pci_dev * pdev)8794*4882a593Smuzhiyun static void bnx2_io_resume(struct pci_dev *pdev)
8795*4882a593Smuzhiyun {
8796*4882a593Smuzhiyun struct net_device *dev = pci_get_drvdata(pdev);
8797*4882a593Smuzhiyun struct bnx2 *bp = netdev_priv(dev);
8798*4882a593Smuzhiyun
8799*4882a593Smuzhiyun rtnl_lock();
8800*4882a593Smuzhiyun if (netif_running(dev))
8801*4882a593Smuzhiyun bnx2_netif_start(bp, true);
8802*4882a593Smuzhiyun
8803*4882a593Smuzhiyun netif_device_attach(dev);
8804*4882a593Smuzhiyun rtnl_unlock();
8805*4882a593Smuzhiyun }
8806*4882a593Smuzhiyun
bnx2_shutdown(struct pci_dev * pdev)8807*4882a593Smuzhiyun static void bnx2_shutdown(struct pci_dev *pdev)
8808*4882a593Smuzhiyun {
8809*4882a593Smuzhiyun struct net_device *dev = pci_get_drvdata(pdev);
8810*4882a593Smuzhiyun struct bnx2 *bp;
8811*4882a593Smuzhiyun
8812*4882a593Smuzhiyun if (!dev)
8813*4882a593Smuzhiyun return;
8814*4882a593Smuzhiyun
8815*4882a593Smuzhiyun bp = netdev_priv(dev);
8816*4882a593Smuzhiyun if (!bp)
8817*4882a593Smuzhiyun return;
8818*4882a593Smuzhiyun
8819*4882a593Smuzhiyun rtnl_lock();
8820*4882a593Smuzhiyun if (netif_running(dev))
8821*4882a593Smuzhiyun dev_close(bp->dev);
8822*4882a593Smuzhiyun
8823*4882a593Smuzhiyun if (system_state == SYSTEM_POWER_OFF)
8824*4882a593Smuzhiyun bnx2_set_power_state(bp, PCI_D3hot);
8825*4882a593Smuzhiyun
8826*4882a593Smuzhiyun rtnl_unlock();
8827*4882a593Smuzhiyun }
8828*4882a593Smuzhiyun
8829*4882a593Smuzhiyun static const struct pci_error_handlers bnx2_err_handler = {
8830*4882a593Smuzhiyun .error_detected = bnx2_io_error_detected,
8831*4882a593Smuzhiyun .slot_reset = bnx2_io_slot_reset,
8832*4882a593Smuzhiyun .resume = bnx2_io_resume,
8833*4882a593Smuzhiyun };
8834*4882a593Smuzhiyun
8835*4882a593Smuzhiyun static struct pci_driver bnx2_pci_driver = {
8836*4882a593Smuzhiyun .name = DRV_MODULE_NAME,
8837*4882a593Smuzhiyun .id_table = bnx2_pci_tbl,
8838*4882a593Smuzhiyun .probe = bnx2_init_one,
8839*4882a593Smuzhiyun .remove = bnx2_remove_one,
8840*4882a593Smuzhiyun .driver.pm = BNX2_PM_OPS,
8841*4882a593Smuzhiyun .err_handler = &bnx2_err_handler,
8842*4882a593Smuzhiyun .shutdown = bnx2_shutdown,
8843*4882a593Smuzhiyun };
8844*4882a593Smuzhiyun
8845*4882a593Smuzhiyun module_pci_driver(bnx2_pci_driver);
8846