1*4882a593Smuzhiyun /* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun Written 1998-2000 by Donald Becker.
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun Current maintainer is Ion Badulescu <ionut ta badula tod org>. Please
6*4882a593Smuzhiyun send all bug reports to me, and not to Donald Becker, as this code
7*4882a593Smuzhiyun has been heavily modified from Donald's original version.
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun This software may be used and distributed according to the terms of
10*4882a593Smuzhiyun the GNU General Public License (GPL), incorporated herein by reference.
11*4882a593Smuzhiyun Drivers based on or derived from this code fall under the GPL and must
12*4882a593Smuzhiyun retain the authorship, copyright and license notice. This file is not
13*4882a593Smuzhiyun a complete program and may only be used when the entire operating
14*4882a593Smuzhiyun system is licensed under the GPL.
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun The information below comes from Donald Becker's original driver:
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun The author may be reached as becker@scyld.com, or C/O
19*4882a593Smuzhiyun Scyld Computing Corporation
20*4882a593Smuzhiyun 410 Severn Ave., Suite 210
21*4882a593Smuzhiyun Annapolis MD 21403
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun Support and updates available at
24*4882a593Smuzhiyun http://www.scyld.com/network/starfire.html
25*4882a593Smuzhiyun [link no longer provides useful info -jgarzik]
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun */
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define DRV_NAME "starfire"
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include <linux/interrupt.h>
32*4882a593Smuzhiyun #include <linux/module.h>
33*4882a593Smuzhiyun #include <linux/kernel.h>
34*4882a593Smuzhiyun #include <linux/pci.h>
35*4882a593Smuzhiyun #include <linux/netdevice.h>
36*4882a593Smuzhiyun #include <linux/etherdevice.h>
37*4882a593Smuzhiyun #include <linux/init.h>
38*4882a593Smuzhiyun #include <linux/delay.h>
39*4882a593Smuzhiyun #include <linux/crc32.h>
40*4882a593Smuzhiyun #include <linux/ethtool.h>
41*4882a593Smuzhiyun #include <linux/mii.h>
42*4882a593Smuzhiyun #include <linux/if_vlan.h>
43*4882a593Smuzhiyun #include <linux/mm.h>
44*4882a593Smuzhiyun #include <linux/firmware.h>
45*4882a593Smuzhiyun #include <asm/processor.h> /* Processor type for cache alignment. */
46*4882a593Smuzhiyun #include <linux/uaccess.h>
47*4882a593Smuzhiyun #include <asm/io.h>
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /*
50*4882a593Smuzhiyun * The current frame processor firmware fails to checksum a fragment
51*4882a593Smuzhiyun * of length 1. If and when this is fixed, the #define below can be removed.
52*4882a593Smuzhiyun */
53*4882a593Smuzhiyun #define HAS_BROKEN_FIRMWARE
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun * If using the broken firmware, data must be padded to the next 32-bit boundary.
57*4882a593Smuzhiyun */
58*4882a593Smuzhiyun #ifdef HAS_BROKEN_FIRMWARE
59*4882a593Smuzhiyun #define PADDING_MASK 3
60*4882a593Smuzhiyun #endif
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /*
63*4882a593Smuzhiyun * Define this if using the driver with the zero-copy patch
64*4882a593Smuzhiyun */
65*4882a593Smuzhiyun #define ZEROCOPY
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_VLAN_8021Q)
68*4882a593Smuzhiyun #define VLAN_SUPPORT
69*4882a593Smuzhiyun #endif
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /* The user-configurable values.
72*4882a593Smuzhiyun These may be modified when a driver module is loaded.*/
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /* Used for tuning interrupt latency vs. overhead. */
75*4882a593Smuzhiyun static int intr_latency;
76*4882a593Smuzhiyun static int small_frames;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
79*4882a593Smuzhiyun static int max_interrupt_work = 20;
80*4882a593Smuzhiyun static int mtu;
81*4882a593Smuzhiyun /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
82*4882a593Smuzhiyun The Starfire has a 512 element hash table based on the Ethernet CRC. */
83*4882a593Smuzhiyun static const int multicast_filter_limit = 512;
84*4882a593Smuzhiyun /* Whether to do TCP/UDP checksums in hardware */
85*4882a593Smuzhiyun static int enable_hw_cksum = 1;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
88*4882a593Smuzhiyun /*
89*4882a593Smuzhiyun * Set the copy breakpoint for the copy-only-tiny-frames scheme.
90*4882a593Smuzhiyun * Setting to > 1518 effectively disables this feature.
91*4882a593Smuzhiyun *
92*4882a593Smuzhiyun * NOTE:
93*4882a593Smuzhiyun * The ia64 doesn't allow for unaligned loads even of integers being
94*4882a593Smuzhiyun * misaligned on a 2 byte boundary. Thus always force copying of
95*4882a593Smuzhiyun * packets as the starfire doesn't allow for misaligned DMAs ;-(
96*4882a593Smuzhiyun * 23/10/2000 - Jes
97*4882a593Smuzhiyun *
98*4882a593Smuzhiyun * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64,
99*4882a593Smuzhiyun * at least, having unaligned frames leads to a rather serious performance
100*4882a593Smuzhiyun * penalty. -Ion
101*4882a593Smuzhiyun */
102*4882a593Smuzhiyun #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
103*4882a593Smuzhiyun static int rx_copybreak = PKT_BUF_SZ;
104*4882a593Smuzhiyun #else
105*4882a593Smuzhiyun static int rx_copybreak /* = 0 */;
106*4882a593Smuzhiyun #endif
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */
109*4882a593Smuzhiyun #ifdef __sparc__
110*4882a593Smuzhiyun #define DMA_BURST_SIZE 64
111*4882a593Smuzhiyun #else
112*4882a593Smuzhiyun #define DMA_BURST_SIZE 128
113*4882a593Smuzhiyun #endif
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun /* Operational parameters that are set at compile time. */
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /* The "native" ring sizes are either 256 or 2048.
118*4882a593Smuzhiyun However in some modes a descriptor may be marked to wrap the ring earlier.
119*4882a593Smuzhiyun */
120*4882a593Smuzhiyun #define RX_RING_SIZE 256
121*4882a593Smuzhiyun #define TX_RING_SIZE 32
122*4882a593Smuzhiyun /* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
123*4882a593Smuzhiyun #define DONE_Q_SIZE 1024
124*4882a593Smuzhiyun /* All queues must be aligned on a 256-byte boundary */
125*4882a593Smuzhiyun #define QUEUE_ALIGN 256
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun #if RX_RING_SIZE > 256
128*4882a593Smuzhiyun #define RX_Q_ENTRIES Rx2048QEntries
129*4882a593Smuzhiyun #else
130*4882a593Smuzhiyun #define RX_Q_ENTRIES Rx256QEntries
131*4882a593Smuzhiyun #endif
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /* Operational parameters that usually are not changed. */
134*4882a593Smuzhiyun /* Time in jiffies before concluding the transmitter is hung. */
135*4882a593Smuzhiyun #define TX_TIMEOUT (2 * HZ)
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
138*4882a593Smuzhiyun /* 64-bit dma_addr_t */
139*4882a593Smuzhiyun #define ADDR_64BITS /* This chip uses 64 bit addresses. */
140*4882a593Smuzhiyun #define netdrv_addr_t __le64
141*4882a593Smuzhiyun #define cpu_to_dma(x) cpu_to_le64(x)
142*4882a593Smuzhiyun #define dma_to_cpu(x) le64_to_cpu(x)
143*4882a593Smuzhiyun #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
144*4882a593Smuzhiyun #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
145*4882a593Smuzhiyun #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
146*4882a593Smuzhiyun #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
147*4882a593Smuzhiyun #define RX_DESC_ADDR_SIZE RxDescAddr64bit
148*4882a593Smuzhiyun #else /* 32-bit dma_addr_t */
149*4882a593Smuzhiyun #define netdrv_addr_t __le32
150*4882a593Smuzhiyun #define cpu_to_dma(x) cpu_to_le32(x)
151*4882a593Smuzhiyun #define dma_to_cpu(x) le32_to_cpu(x)
152*4882a593Smuzhiyun #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
153*4882a593Smuzhiyun #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
154*4882a593Smuzhiyun #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
155*4882a593Smuzhiyun #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
156*4882a593Smuzhiyun #define RX_DESC_ADDR_SIZE RxDescAddr32bit
157*4882a593Smuzhiyun #endif
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun #define skb_first_frag_len(skb) skb_headlen(skb)
160*4882a593Smuzhiyun #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun /* Firmware names */
163*4882a593Smuzhiyun #define FIRMWARE_RX "adaptec/starfire_rx.bin"
164*4882a593Smuzhiyun #define FIRMWARE_TX "adaptec/starfire_tx.bin"
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
167*4882a593Smuzhiyun MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
168*4882a593Smuzhiyun MODULE_LICENSE("GPL");
169*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_RX);
170*4882a593Smuzhiyun MODULE_FIRMWARE(FIRMWARE_TX);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun module_param(max_interrupt_work, int, 0);
173*4882a593Smuzhiyun module_param(mtu, int, 0);
174*4882a593Smuzhiyun module_param(debug, int, 0);
175*4882a593Smuzhiyun module_param(rx_copybreak, int, 0);
176*4882a593Smuzhiyun module_param(intr_latency, int, 0);
177*4882a593Smuzhiyun module_param(small_frames, int, 0);
178*4882a593Smuzhiyun module_param(enable_hw_cksum, int, 0);
179*4882a593Smuzhiyun MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
180*4882a593Smuzhiyun MODULE_PARM_DESC(mtu, "MTU (all boards)");
181*4882a593Smuzhiyun MODULE_PARM_DESC(debug, "Debug level (0-6)");
182*4882a593Smuzhiyun MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
183*4882a593Smuzhiyun MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
184*4882a593Smuzhiyun MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
185*4882a593Smuzhiyun MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /*
188*4882a593Smuzhiyun Theory of Operation
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun I. Board Compatibility
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun II. Board-specific settings
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun III. Driver operation
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun IIIa. Ring buffers
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun The Starfire hardware uses multiple fixed-size descriptor queues/rings. The
201*4882a593Smuzhiyun ring sizes are set fixed by the hardware, but may optionally be wrapped
202*4882a593Smuzhiyun earlier by the END bit in the descriptor.
203*4882a593Smuzhiyun This driver uses that hardware queue size for the Rx ring, where a large
204*4882a593Smuzhiyun number of entries has no ill effect beyond increases the potential backlog.
205*4882a593Smuzhiyun The Tx ring is wrapped with the END bit, since a large hardware Tx queue
206*4882a593Smuzhiyun disables the queue layer priority ordering and we have no mechanism to
207*4882a593Smuzhiyun utilize the hardware two-level priority queue. When modifying the
208*4882a593Smuzhiyun RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
209*4882a593Smuzhiyun levels.
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun IIIb/c. Transmit/Receive Structure
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun See the Adaptec manual for the many possible structures, and options for
214*4882a593Smuzhiyun each structure. There are far too many to document all of them here.
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun For transmit this driver uses type 0/1 transmit descriptors (depending
217*4882a593Smuzhiyun on the 32/64 bitness of the architecture), and relies on automatic
218*4882a593Smuzhiyun minimum-length padding. It does not use the completion queue
219*4882a593Smuzhiyun consumer index, but instead checks for non-zero status entries.
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun For receive this driver uses type 2/3 receive descriptors. The driver
222*4882a593Smuzhiyun allocates full frame size skbuffs for the Rx ring buffers, so all frames
223*4882a593Smuzhiyun should fit in a single descriptor. The driver does not use the completion
224*4882a593Smuzhiyun queue consumer index, but instead checks for non-zero status entries.
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
227*4882a593Smuzhiyun is allocated and the frame is copied to the new skbuff. When the incoming
228*4882a593Smuzhiyun frame is larger, the skbuff is passed directly up the protocol stack.
229*4882a593Smuzhiyun Buffers consumed this way are replaced by newly allocated skbuffs in a later
230*4882a593Smuzhiyun phase of receive.
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun A notable aspect of operation is that unaligned buffers are not permitted by
233*4882a593Smuzhiyun the Starfire hardware. Thus the IP header at offset 14 in an ethernet frame
234*4882a593Smuzhiyun isn't longword aligned, which may cause problems on some machine
235*4882a593Smuzhiyun e.g. Alphas and IA64. For these architectures, the driver is forced to copy
236*4882a593Smuzhiyun the frame into a new skbuff unconditionally. Copied frames are put into the
237*4882a593Smuzhiyun skbuff at an offset of "+2", thus 16-byte aligning the IP header.
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun IIId. Synchronization
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun The driver runs as two independent, single-threaded flows of control. One
242*4882a593Smuzhiyun is the send-packet routine, which enforces single-threaded use by the
243*4882a593Smuzhiyun dev->tbusy flag. The other thread is the interrupt handler, which is single
244*4882a593Smuzhiyun threaded by the hardware and interrupt handling software.
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun The send packet thread has partial control over the Tx ring and the netif_queue
247*4882a593Smuzhiyun status. If the number of free Tx slots in the ring falls below a certain number
248*4882a593Smuzhiyun (currently hardcoded to 4), it signals the upper layer to stop the queue.
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun The interrupt handler has exclusive control over the Rx ring and records stats
251*4882a593Smuzhiyun from the Tx ring. After reaping the stats, it marks the Tx queue entry as
252*4882a593Smuzhiyun empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the
253*4882a593Smuzhiyun number of free Tx slow is above the threshold, it signals the upper layer to
254*4882a593Smuzhiyun restart the queue.
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun IV. Notes
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun IVb. References
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun The Adaptec Starfire manuals, available only from Adaptec.
261*4882a593Smuzhiyun http://www.scyld.com/expert/100mbps.html
262*4882a593Smuzhiyun http://www.scyld.com/expert/NWay.html
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun IVc. Errata
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun - StopOnPerr is broken, don't enable
267*4882a593Smuzhiyun - Hardware ethernet padding exposes random data, perform software padding
268*4882a593Smuzhiyun instead (unverified -- works correctly for all the hardware I have)
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun */
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun enum chip_capability_flags {CanHaveMII=1, };
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun enum chipset {
277*4882a593Smuzhiyun CH_6915 = 0,
278*4882a593Smuzhiyun };
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun static const struct pci_device_id starfire_pci_tbl[] = {
281*4882a593Smuzhiyun { PCI_VDEVICE(ADAPTEC, 0x6915), CH_6915 },
282*4882a593Smuzhiyun { 0, }
283*4882a593Smuzhiyun };
284*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
287*4882a593Smuzhiyun static const struct chip_info {
288*4882a593Smuzhiyun const char *name;
289*4882a593Smuzhiyun int drv_flags;
290*4882a593Smuzhiyun } netdrv_tbl[] = {
291*4882a593Smuzhiyun { "Adaptec Starfire 6915", CanHaveMII },
292*4882a593Smuzhiyun };
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun /* Offsets to the device registers.
296*4882a593Smuzhiyun Unlike software-only systems, device drivers interact with complex hardware.
297*4882a593Smuzhiyun It's not useful to define symbolic names for every register bit in the
298*4882a593Smuzhiyun device. The name can only partially document the semantics and make
299*4882a593Smuzhiyun the driver longer and more difficult to read.
300*4882a593Smuzhiyun In general, only the important configuration values or bits changed
301*4882a593Smuzhiyun multiple times should be defined symbolically.
302*4882a593Smuzhiyun */
303*4882a593Smuzhiyun enum register_offsets {
304*4882a593Smuzhiyun PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
305*4882a593Smuzhiyun IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
306*4882a593Smuzhiyun MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000,
307*4882a593Smuzhiyun GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
308*4882a593Smuzhiyun TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
309*4882a593Smuzhiyun TxRingHiAddr=0x5009C, /* 64 bit address extension. */
310*4882a593Smuzhiyun TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
311*4882a593Smuzhiyun TxThreshold=0x500B0,
312*4882a593Smuzhiyun CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
313*4882a593Smuzhiyun RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
314*4882a593Smuzhiyun CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
315*4882a593Smuzhiyun RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
316*4882a593Smuzhiyun RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
317*4882a593Smuzhiyun TxMode=0x55000, VlanType=0x55064,
318*4882a593Smuzhiyun PerfFilterTable=0x56000, HashTable=0x56100,
319*4882a593Smuzhiyun TxGfpMem=0x58000, RxGfpMem=0x5a000,
320*4882a593Smuzhiyun };
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun /*
323*4882a593Smuzhiyun * Bits in the interrupt status/mask registers.
324*4882a593Smuzhiyun * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register
325*4882a593Smuzhiyun * enables all the interrupt sources that are or'ed into those status bits.
326*4882a593Smuzhiyun */
327*4882a593Smuzhiyun enum intr_status_bits {
328*4882a593Smuzhiyun IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
329*4882a593Smuzhiyun IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
330*4882a593Smuzhiyun IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
331*4882a593Smuzhiyun IntrTxComplQLow=0x200000, IntrPCI=0x100000,
332*4882a593Smuzhiyun IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
333*4882a593Smuzhiyun IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
334*4882a593Smuzhiyun IntrNormalSummary=0x8000, IntrTxDone=0x4000,
335*4882a593Smuzhiyun IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
336*4882a593Smuzhiyun IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
337*4882a593Smuzhiyun IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
338*4882a593Smuzhiyun IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
339*4882a593Smuzhiyun IntrNoTxCsum=0x20, IntrTxBadID=0x10,
340*4882a593Smuzhiyun IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
341*4882a593Smuzhiyun IntrTxGfp=0x02, IntrPCIPad=0x01,
342*4882a593Smuzhiyun /* not quite bits */
343*4882a593Smuzhiyun IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
344*4882a593Smuzhiyun IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
345*4882a593Smuzhiyun IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
346*4882a593Smuzhiyun };
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun /* Bits in the RxFilterMode register. */
349*4882a593Smuzhiyun enum rx_mode_bits {
350*4882a593Smuzhiyun AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
351*4882a593Smuzhiyun AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30,
352*4882a593Smuzhiyun PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200,
353*4882a593Smuzhiyun WakeupOnGFP=0x0800,
354*4882a593Smuzhiyun };
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /* Bits in the TxMode register */
357*4882a593Smuzhiyun enum tx_mode_bits {
358*4882a593Smuzhiyun MiiSoftReset=0x8000, MIILoopback=0x4000,
359*4882a593Smuzhiyun TxFlowEnable=0x0800, RxFlowEnable=0x0400,
360*4882a593Smuzhiyun PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01,
361*4882a593Smuzhiyun };
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun /* Bits in the TxDescCtrl register. */
364*4882a593Smuzhiyun enum tx_ctrl_bits {
365*4882a593Smuzhiyun TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
366*4882a593Smuzhiyun TxDescSpace128=0x30, TxDescSpace256=0x40,
367*4882a593Smuzhiyun TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
368*4882a593Smuzhiyun TxDescType3=0x03, TxDescType4=0x04,
369*4882a593Smuzhiyun TxNoDMACompletion=0x08,
370*4882a593Smuzhiyun TxDescQAddr64bit=0x80, TxDescQAddr32bit=0,
371*4882a593Smuzhiyun TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
372*4882a593Smuzhiyun TxDMABurstSizeShift=8,
373*4882a593Smuzhiyun };
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /* Bits in the RxDescQCtrl register. */
376*4882a593Smuzhiyun enum rx_ctrl_bits {
377*4882a593Smuzhiyun RxBufferLenShift=16, RxMinDescrThreshShift=0,
378*4882a593Smuzhiyun RxPrefetchMode=0x8000, RxVariableQ=0x2000,
379*4882a593Smuzhiyun Rx2048QEntries=0x4000, Rx256QEntries=0,
380*4882a593Smuzhiyun RxDescAddr64bit=0x1000, RxDescAddr32bit=0,
381*4882a593Smuzhiyun RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0,
382*4882a593Smuzhiyun RxDescSpace4=0x000, RxDescSpace8=0x100,
383*4882a593Smuzhiyun RxDescSpace16=0x200, RxDescSpace32=0x300,
384*4882a593Smuzhiyun RxDescSpace64=0x400, RxDescSpace128=0x500,
385*4882a593Smuzhiyun RxConsumerWrEn=0x80,
386*4882a593Smuzhiyun };
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun /* Bits in the RxDMACtrl register. */
389*4882a593Smuzhiyun enum rx_dmactrl_bits {
390*4882a593Smuzhiyun RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000,
391*4882a593Smuzhiyun RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000,
392*4882a593Smuzhiyun RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000,
393*4882a593Smuzhiyun RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000,
394*4882a593Smuzhiyun RxChecksumRejectTCPOnly=0x01000000,
395*4882a593Smuzhiyun RxCompletionQ2Enable=0x800000,
396*4882a593Smuzhiyun RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000,
397*4882a593Smuzhiyun RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000,
398*4882a593Smuzhiyun RxDMAQ2NonIP=0x400000,
399*4882a593Smuzhiyun RxUseBackupQueue=0x080000, RxDMACRC=0x040000,
400*4882a593Smuzhiyun RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8,
401*4882a593Smuzhiyun RxBurstSizeShift=0,
402*4882a593Smuzhiyun };
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun /* Bits in the RxCompletionAddr register */
405*4882a593Smuzhiyun enum rx_compl_bits {
406*4882a593Smuzhiyun RxComplQAddr64bit=0x80, RxComplQAddr32bit=0,
407*4882a593Smuzhiyun RxComplProducerWrEn=0x40,
408*4882a593Smuzhiyun RxComplType0=0x00, RxComplType1=0x10,
409*4882a593Smuzhiyun RxComplType2=0x20, RxComplType3=0x30,
410*4882a593Smuzhiyun RxComplThreshShift=0,
411*4882a593Smuzhiyun };
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun /* Bits in the TxCompletionAddr register */
414*4882a593Smuzhiyun enum tx_compl_bits {
415*4882a593Smuzhiyun TxComplQAddr64bit=0x80, TxComplQAddr32bit=0,
416*4882a593Smuzhiyun TxComplProducerWrEn=0x40,
417*4882a593Smuzhiyun TxComplIntrStatus=0x20,
418*4882a593Smuzhiyun CommonQueueMode=0x10,
419*4882a593Smuzhiyun TxComplThreshShift=0,
420*4882a593Smuzhiyun };
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun /* Bits in the GenCtrl register */
423*4882a593Smuzhiyun enum gen_ctrl_bits {
424*4882a593Smuzhiyun RxEnable=0x05, TxEnable=0x0a,
425*4882a593Smuzhiyun RxGFPEnable=0x10, TxGFPEnable=0x20,
426*4882a593Smuzhiyun };
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun /* Bits in the IntrTimerCtrl register */
429*4882a593Smuzhiyun enum intr_ctrl_bits {
430*4882a593Smuzhiyun Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100,
431*4882a593Smuzhiyun SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600,
432*4882a593Smuzhiyun IntrLatencyMask=0x1f,
433*4882a593Smuzhiyun };
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun /* The Rx and Tx buffer descriptors. */
436*4882a593Smuzhiyun struct starfire_rx_desc {
437*4882a593Smuzhiyun netdrv_addr_t rxaddr;
438*4882a593Smuzhiyun };
439*4882a593Smuzhiyun enum rx_desc_bits {
440*4882a593Smuzhiyun RxDescValid=1, RxDescEndRing=2,
441*4882a593Smuzhiyun };
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun /* Completion queue entry. */
444*4882a593Smuzhiyun struct short_rx_done_desc {
445*4882a593Smuzhiyun __le32 status; /* Low 16 bits is length. */
446*4882a593Smuzhiyun };
447*4882a593Smuzhiyun struct basic_rx_done_desc {
448*4882a593Smuzhiyun __le32 status; /* Low 16 bits is length. */
449*4882a593Smuzhiyun __le16 vlanid;
450*4882a593Smuzhiyun __le16 status2;
451*4882a593Smuzhiyun };
452*4882a593Smuzhiyun struct csum_rx_done_desc {
453*4882a593Smuzhiyun __le32 status; /* Low 16 bits is length. */
454*4882a593Smuzhiyun __le16 csum; /* Partial checksum */
455*4882a593Smuzhiyun __le16 status2;
456*4882a593Smuzhiyun };
457*4882a593Smuzhiyun struct full_rx_done_desc {
458*4882a593Smuzhiyun __le32 status; /* Low 16 bits is length. */
459*4882a593Smuzhiyun __le16 status3;
460*4882a593Smuzhiyun __le16 status2;
461*4882a593Smuzhiyun __le16 vlanid;
462*4882a593Smuzhiyun __le16 csum; /* partial checksum */
463*4882a593Smuzhiyun __le32 timestamp;
464*4882a593Smuzhiyun };
465*4882a593Smuzhiyun /* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */
466*4882a593Smuzhiyun #ifdef VLAN_SUPPORT
467*4882a593Smuzhiyun typedef struct full_rx_done_desc rx_done_desc;
468*4882a593Smuzhiyun #define RxComplType RxComplType3
469*4882a593Smuzhiyun #else /* not VLAN_SUPPORT */
470*4882a593Smuzhiyun typedef struct csum_rx_done_desc rx_done_desc;
471*4882a593Smuzhiyun #define RxComplType RxComplType2
472*4882a593Smuzhiyun #endif /* not VLAN_SUPPORT */
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun enum rx_done_bits {
475*4882a593Smuzhiyun RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
476*4882a593Smuzhiyun };
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun /* Type 1 Tx descriptor. */
479*4882a593Smuzhiyun struct starfire_tx_desc_1 {
480*4882a593Smuzhiyun __le32 status; /* Upper bits are status, lower 16 length. */
481*4882a593Smuzhiyun __le32 addr;
482*4882a593Smuzhiyun };
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun /* Type 2 Tx descriptor. */
485*4882a593Smuzhiyun struct starfire_tx_desc_2 {
486*4882a593Smuzhiyun __le32 status; /* Upper bits are status, lower 16 length. */
487*4882a593Smuzhiyun __le32 reserved;
488*4882a593Smuzhiyun __le64 addr;
489*4882a593Smuzhiyun };
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun #ifdef ADDR_64BITS
492*4882a593Smuzhiyun typedef struct starfire_tx_desc_2 starfire_tx_desc;
493*4882a593Smuzhiyun #define TX_DESC_TYPE TxDescType2
494*4882a593Smuzhiyun #else /* not ADDR_64BITS */
495*4882a593Smuzhiyun typedef struct starfire_tx_desc_1 starfire_tx_desc;
496*4882a593Smuzhiyun #define TX_DESC_TYPE TxDescType1
497*4882a593Smuzhiyun #endif /* not ADDR_64BITS */
498*4882a593Smuzhiyun #define TX_DESC_SPACING TxDescSpaceUnlim
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun enum tx_desc_bits {
501*4882a593Smuzhiyun TxDescID=0xB0000000,
502*4882a593Smuzhiyun TxCRCEn=0x01000000, TxDescIntr=0x08000000,
503*4882a593Smuzhiyun TxRingWrap=0x04000000, TxCalTCP=0x02000000,
504*4882a593Smuzhiyun };
505*4882a593Smuzhiyun struct tx_done_desc {
506*4882a593Smuzhiyun __le32 status; /* timestamp, index. */
507*4882a593Smuzhiyun #if 0
508*4882a593Smuzhiyun __le32 intrstatus; /* interrupt status */
509*4882a593Smuzhiyun #endif
510*4882a593Smuzhiyun };
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun struct rx_ring_info {
513*4882a593Smuzhiyun struct sk_buff *skb;
514*4882a593Smuzhiyun dma_addr_t mapping;
515*4882a593Smuzhiyun };
516*4882a593Smuzhiyun struct tx_ring_info {
517*4882a593Smuzhiyun struct sk_buff *skb;
518*4882a593Smuzhiyun dma_addr_t mapping;
519*4882a593Smuzhiyun unsigned int used_slots;
520*4882a593Smuzhiyun };
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun #define PHY_CNT 2
523*4882a593Smuzhiyun struct netdev_private {
524*4882a593Smuzhiyun /* Descriptor rings first for alignment. */
525*4882a593Smuzhiyun struct starfire_rx_desc *rx_ring;
526*4882a593Smuzhiyun starfire_tx_desc *tx_ring;
527*4882a593Smuzhiyun dma_addr_t rx_ring_dma;
528*4882a593Smuzhiyun dma_addr_t tx_ring_dma;
529*4882a593Smuzhiyun /* The addresses of rx/tx-in-place skbuffs. */
530*4882a593Smuzhiyun struct rx_ring_info rx_info[RX_RING_SIZE];
531*4882a593Smuzhiyun struct tx_ring_info tx_info[TX_RING_SIZE];
532*4882a593Smuzhiyun /* Pointers to completion queues (full pages). */
533*4882a593Smuzhiyun rx_done_desc *rx_done_q;
534*4882a593Smuzhiyun dma_addr_t rx_done_q_dma;
535*4882a593Smuzhiyun unsigned int rx_done;
536*4882a593Smuzhiyun struct tx_done_desc *tx_done_q;
537*4882a593Smuzhiyun dma_addr_t tx_done_q_dma;
538*4882a593Smuzhiyun unsigned int tx_done;
539*4882a593Smuzhiyun struct napi_struct napi;
540*4882a593Smuzhiyun struct net_device *dev;
541*4882a593Smuzhiyun struct pci_dev *pci_dev;
542*4882a593Smuzhiyun #ifdef VLAN_SUPPORT
543*4882a593Smuzhiyun unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
544*4882a593Smuzhiyun #endif
545*4882a593Smuzhiyun void *queue_mem;
546*4882a593Smuzhiyun dma_addr_t queue_mem_dma;
547*4882a593Smuzhiyun size_t queue_mem_size;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun /* Frequently used values: keep some adjacent for cache effect. */
550*4882a593Smuzhiyun spinlock_t lock;
551*4882a593Smuzhiyun unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
552*4882a593Smuzhiyun unsigned int cur_tx, dirty_tx, reap_tx;
553*4882a593Smuzhiyun unsigned int rx_buf_sz; /* Based on MTU+slack. */
554*4882a593Smuzhiyun /* These values keep track of the transceiver/media in use. */
555*4882a593Smuzhiyun int speed100; /* Set if speed == 100MBit. */
556*4882a593Smuzhiyun u32 tx_mode;
557*4882a593Smuzhiyun u32 intr_timer_ctrl;
558*4882a593Smuzhiyun u8 tx_threshold;
559*4882a593Smuzhiyun /* MII transceiver section. */
560*4882a593Smuzhiyun struct mii_if_info mii_if; /* MII lib hooks/info */
561*4882a593Smuzhiyun int phy_cnt; /* MII device addresses. */
562*4882a593Smuzhiyun unsigned char phys[PHY_CNT]; /* MII device addresses. */
563*4882a593Smuzhiyun void __iomem *base;
564*4882a593Smuzhiyun };
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun static int mdio_read(struct net_device *dev, int phy_id, int location);
568*4882a593Smuzhiyun static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
569*4882a593Smuzhiyun static int netdev_open(struct net_device *dev);
570*4882a593Smuzhiyun static void check_duplex(struct net_device *dev);
571*4882a593Smuzhiyun static void tx_timeout(struct net_device *dev, unsigned int txqueue);
572*4882a593Smuzhiyun static void init_ring(struct net_device *dev);
573*4882a593Smuzhiyun static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
574*4882a593Smuzhiyun static irqreturn_t intr_handler(int irq, void *dev_instance);
575*4882a593Smuzhiyun static void netdev_error(struct net_device *dev, int intr_status);
576*4882a593Smuzhiyun static int __netdev_rx(struct net_device *dev, int *quota);
577*4882a593Smuzhiyun static int netdev_poll(struct napi_struct *napi, int budget);
578*4882a593Smuzhiyun static void refill_rx_ring(struct net_device *dev);
579*4882a593Smuzhiyun static void netdev_error(struct net_device *dev, int intr_status);
580*4882a593Smuzhiyun static void set_rx_mode(struct net_device *dev);
581*4882a593Smuzhiyun static struct net_device_stats *get_stats(struct net_device *dev);
582*4882a593Smuzhiyun static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
583*4882a593Smuzhiyun static int netdev_close(struct net_device *dev);
584*4882a593Smuzhiyun static void netdev_media_change(struct net_device *dev);
585*4882a593Smuzhiyun static const struct ethtool_ops ethtool_ops;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun #ifdef VLAN_SUPPORT
netdev_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)589*4882a593Smuzhiyun static int netdev_vlan_rx_add_vid(struct net_device *dev,
590*4882a593Smuzhiyun __be16 proto, u16 vid)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun struct netdev_private *np = netdev_priv(dev);
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun spin_lock(&np->lock);
595*4882a593Smuzhiyun if (debug > 1)
596*4882a593Smuzhiyun printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid);
597*4882a593Smuzhiyun set_bit(vid, np->active_vlans);
598*4882a593Smuzhiyun set_rx_mode(dev);
599*4882a593Smuzhiyun spin_unlock(&np->lock);
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun return 0;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun
netdev_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)604*4882a593Smuzhiyun static int netdev_vlan_rx_kill_vid(struct net_device *dev,
605*4882a593Smuzhiyun __be16 proto, u16 vid)
606*4882a593Smuzhiyun {
607*4882a593Smuzhiyun struct netdev_private *np = netdev_priv(dev);
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun spin_lock(&np->lock);
610*4882a593Smuzhiyun if (debug > 1)
611*4882a593Smuzhiyun printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
612*4882a593Smuzhiyun clear_bit(vid, np->active_vlans);
613*4882a593Smuzhiyun set_rx_mode(dev);
614*4882a593Smuzhiyun spin_unlock(&np->lock);
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun return 0;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun #endif /* VLAN_SUPPORT */
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun static const struct net_device_ops netdev_ops = {
622*4882a593Smuzhiyun .ndo_open = netdev_open,
623*4882a593Smuzhiyun .ndo_stop = netdev_close,
624*4882a593Smuzhiyun .ndo_start_xmit = start_tx,
625*4882a593Smuzhiyun .ndo_tx_timeout = tx_timeout,
626*4882a593Smuzhiyun .ndo_get_stats = get_stats,
627*4882a593Smuzhiyun .ndo_set_rx_mode = set_rx_mode,
628*4882a593Smuzhiyun .ndo_do_ioctl = netdev_ioctl,
629*4882a593Smuzhiyun .ndo_set_mac_address = eth_mac_addr,
630*4882a593Smuzhiyun .ndo_validate_addr = eth_validate_addr,
631*4882a593Smuzhiyun #ifdef VLAN_SUPPORT
632*4882a593Smuzhiyun .ndo_vlan_rx_add_vid = netdev_vlan_rx_add_vid,
633*4882a593Smuzhiyun .ndo_vlan_rx_kill_vid = netdev_vlan_rx_kill_vid,
634*4882a593Smuzhiyun #endif
635*4882a593Smuzhiyun };
636*4882a593Smuzhiyun
starfire_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)637*4882a593Smuzhiyun static int starfire_init_one(struct pci_dev *pdev,
638*4882a593Smuzhiyun const struct pci_device_id *ent)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun struct device *d = &pdev->dev;
641*4882a593Smuzhiyun struct netdev_private *np;
642*4882a593Smuzhiyun int i, irq, chip_idx = ent->driver_data;
643*4882a593Smuzhiyun struct net_device *dev;
644*4882a593Smuzhiyun long ioaddr;
645*4882a593Smuzhiyun void __iomem *base;
646*4882a593Smuzhiyun int drv_flags, io_size;
647*4882a593Smuzhiyun int boguscnt;
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun if (pci_enable_device (pdev))
650*4882a593Smuzhiyun return -EIO;
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun ioaddr = pci_resource_start(pdev, 0);
653*4882a593Smuzhiyun io_size = pci_resource_len(pdev, 0);
654*4882a593Smuzhiyun if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
655*4882a593Smuzhiyun dev_err(d, "no PCI MEM resources, aborting\n");
656*4882a593Smuzhiyun return -ENODEV;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun dev = alloc_etherdev(sizeof(*np));
660*4882a593Smuzhiyun if (!dev)
661*4882a593Smuzhiyun return -ENOMEM;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun SET_NETDEV_DEV(dev, &pdev->dev);
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun irq = pdev->irq;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun if (pci_request_regions (pdev, DRV_NAME)) {
668*4882a593Smuzhiyun dev_err(d, "cannot reserve PCI resources, aborting\n");
669*4882a593Smuzhiyun goto err_out_free_netdev;
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun base = ioremap(ioaddr, io_size);
673*4882a593Smuzhiyun if (!base) {
674*4882a593Smuzhiyun dev_err(d, "cannot remap %#x @ %#lx, aborting\n",
675*4882a593Smuzhiyun io_size, ioaddr);
676*4882a593Smuzhiyun goto err_out_free_res;
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun pci_set_master(pdev);
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun /* enable MWI -- it vastly improves Rx performance on sparc64 */
682*4882a593Smuzhiyun pci_try_set_mwi(pdev);
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun #ifdef ZEROCOPY
685*4882a593Smuzhiyun /* Starfire can do TCP/UDP checksumming */
686*4882a593Smuzhiyun if (enable_hw_cksum)
687*4882a593Smuzhiyun dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
688*4882a593Smuzhiyun #endif /* ZEROCOPY */
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun #ifdef VLAN_SUPPORT
691*4882a593Smuzhiyun dev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
692*4882a593Smuzhiyun #endif /* VLAN_RX_KILL_VID */
693*4882a593Smuzhiyun #ifdef ADDR_64BITS
694*4882a593Smuzhiyun dev->features |= NETIF_F_HIGHDMA;
695*4882a593Smuzhiyun #endif /* ADDR_64BITS */
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun /* Serial EEPROM reads are hidden by the hardware. */
698*4882a593Smuzhiyun for (i = 0; i < 6; i++)
699*4882a593Smuzhiyun dev->dev_addr[i] = readb(base + EEPROMCtrl + 20 - i);
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun #if ! defined(final_version) /* Dump the EEPROM contents during development. */
702*4882a593Smuzhiyun if (debug > 4)
703*4882a593Smuzhiyun for (i = 0; i < 0x20; i++)
704*4882a593Smuzhiyun printk("%2.2x%s",
705*4882a593Smuzhiyun (unsigned int)readb(base + EEPROMCtrl + i),
706*4882a593Smuzhiyun i % 16 != 15 ? " " : "\n");
707*4882a593Smuzhiyun #endif
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun /* Issue soft reset */
710*4882a593Smuzhiyun writel(MiiSoftReset, base + TxMode);
711*4882a593Smuzhiyun udelay(1000);
712*4882a593Smuzhiyun writel(0, base + TxMode);
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun /* Reset the chip to erase previous misconfiguration. */
715*4882a593Smuzhiyun writel(1, base + PCIDeviceConfig);
716*4882a593Smuzhiyun boguscnt = 1000;
717*4882a593Smuzhiyun while (--boguscnt > 0) {
718*4882a593Smuzhiyun udelay(10);
719*4882a593Smuzhiyun if ((readl(base + PCIDeviceConfig) & 1) == 0)
720*4882a593Smuzhiyun break;
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun if (boguscnt == 0)
723*4882a593Smuzhiyun printk("%s: chipset reset never completed!\n", dev->name);
724*4882a593Smuzhiyun /* wait a little longer */
725*4882a593Smuzhiyun udelay(1000);
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun np = netdev_priv(dev);
728*4882a593Smuzhiyun np->dev = dev;
729*4882a593Smuzhiyun np->base = base;
730*4882a593Smuzhiyun spin_lock_init(&np->lock);
731*4882a593Smuzhiyun pci_set_drvdata(pdev, dev);
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun np->pci_dev = pdev;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun np->mii_if.dev = dev;
736*4882a593Smuzhiyun np->mii_if.mdio_read = mdio_read;
737*4882a593Smuzhiyun np->mii_if.mdio_write = mdio_write;
738*4882a593Smuzhiyun np->mii_if.phy_id_mask = 0x1f;
739*4882a593Smuzhiyun np->mii_if.reg_num_mask = 0x1f;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun drv_flags = netdrv_tbl[chip_idx].drv_flags;
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun np->speed100 = 1;
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun /* timer resolution is 128 * 0.8us */
746*4882a593Smuzhiyun np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) |
747*4882a593Smuzhiyun Timer10X | EnableIntrMasking;
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun if (small_frames > 0) {
750*4882a593Smuzhiyun np->intr_timer_ctrl |= SmallFrameBypass;
751*4882a593Smuzhiyun switch (small_frames) {
752*4882a593Smuzhiyun case 1 ... 64:
753*4882a593Smuzhiyun np->intr_timer_ctrl |= SmallFrame64;
754*4882a593Smuzhiyun break;
755*4882a593Smuzhiyun case 65 ... 128:
756*4882a593Smuzhiyun np->intr_timer_ctrl |= SmallFrame128;
757*4882a593Smuzhiyun break;
758*4882a593Smuzhiyun case 129 ... 256:
759*4882a593Smuzhiyun np->intr_timer_ctrl |= SmallFrame256;
760*4882a593Smuzhiyun break;
761*4882a593Smuzhiyun default:
762*4882a593Smuzhiyun np->intr_timer_ctrl |= SmallFrame512;
763*4882a593Smuzhiyun if (small_frames > 512)
764*4882a593Smuzhiyun printk("Adjusting small_frames down to 512\n");
765*4882a593Smuzhiyun break;
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun dev->netdev_ops = &netdev_ops;
770*4882a593Smuzhiyun dev->watchdog_timeo = TX_TIMEOUT;
771*4882a593Smuzhiyun dev->ethtool_ops = ðtool_ops;
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun if (mtu)
776*4882a593Smuzhiyun dev->mtu = mtu;
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun if (register_netdev(dev))
779*4882a593Smuzhiyun goto err_out_cleardev;
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
782*4882a593Smuzhiyun dev->name, netdrv_tbl[chip_idx].name, base,
783*4882a593Smuzhiyun dev->dev_addr, irq);
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun if (drv_flags & CanHaveMII) {
786*4882a593Smuzhiyun int phy, phy_idx = 0;
787*4882a593Smuzhiyun int mii_status;
788*4882a593Smuzhiyun for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
789*4882a593Smuzhiyun mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
790*4882a593Smuzhiyun msleep(100);
791*4882a593Smuzhiyun boguscnt = 1000;
792*4882a593Smuzhiyun while (--boguscnt > 0)
793*4882a593Smuzhiyun if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
794*4882a593Smuzhiyun break;
795*4882a593Smuzhiyun if (boguscnt == 0) {
796*4882a593Smuzhiyun printk("%s: PHY#%d reset never completed!\n", dev->name, phy);
797*4882a593Smuzhiyun continue;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun mii_status = mdio_read(dev, phy, MII_BMSR);
800*4882a593Smuzhiyun if (mii_status != 0) {
801*4882a593Smuzhiyun np->phys[phy_idx++] = phy;
802*4882a593Smuzhiyun np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
803*4882a593Smuzhiyun printk(KERN_INFO "%s: MII PHY found at address %d, status "
804*4882a593Smuzhiyun "%#4.4x advertising %#4.4x.\n",
805*4882a593Smuzhiyun dev->name, phy, mii_status, np->mii_if.advertising);
806*4882a593Smuzhiyun /* there can be only one PHY on-board */
807*4882a593Smuzhiyun break;
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun np->phy_cnt = phy_idx;
811*4882a593Smuzhiyun if (np->phy_cnt > 0)
812*4882a593Smuzhiyun np->mii_if.phy_id = np->phys[0];
813*4882a593Smuzhiyun else
814*4882a593Smuzhiyun memset(&np->mii_if, 0, sizeof(np->mii_if));
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n",
818*4882a593Smuzhiyun dev->name, enable_hw_cksum ? "enabled" : "disabled");
819*4882a593Smuzhiyun return 0;
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun err_out_cleardev:
822*4882a593Smuzhiyun iounmap(base);
823*4882a593Smuzhiyun err_out_free_res:
824*4882a593Smuzhiyun pci_release_regions (pdev);
825*4882a593Smuzhiyun err_out_free_netdev:
826*4882a593Smuzhiyun free_netdev(dev);
827*4882a593Smuzhiyun return -ENODEV;
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun /* Read the MII Management Data I/O (MDIO) interfaces. */
mdio_read(struct net_device * dev,int phy_id,int location)832*4882a593Smuzhiyun static int mdio_read(struct net_device *dev, int phy_id, int location)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun struct netdev_private *np = netdev_priv(dev);
835*4882a593Smuzhiyun void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
836*4882a593Smuzhiyun int result, boguscnt=1000;
837*4882a593Smuzhiyun /* ??? Should we add a busy-wait here? */
838*4882a593Smuzhiyun do {
839*4882a593Smuzhiyun result = readl(mdio_addr);
840*4882a593Smuzhiyun } while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
841*4882a593Smuzhiyun if (boguscnt == 0)
842*4882a593Smuzhiyun return 0;
843*4882a593Smuzhiyun if ((result & 0xffff) == 0xffff)
844*4882a593Smuzhiyun return 0;
845*4882a593Smuzhiyun return result & 0xffff;
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun
mdio_write(struct net_device * dev,int phy_id,int location,int value)849*4882a593Smuzhiyun static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
850*4882a593Smuzhiyun {
851*4882a593Smuzhiyun struct netdev_private *np = netdev_priv(dev);
852*4882a593Smuzhiyun void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
853*4882a593Smuzhiyun writel(value, mdio_addr);
854*4882a593Smuzhiyun /* The busy-wait will occur before a read. */
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun
netdev_open(struct net_device * dev)858*4882a593Smuzhiyun static int netdev_open(struct net_device *dev)
859*4882a593Smuzhiyun {
860*4882a593Smuzhiyun const struct firmware *fw_rx, *fw_tx;
861*4882a593Smuzhiyun const __be32 *fw_rx_data, *fw_tx_data;
862*4882a593Smuzhiyun struct netdev_private *np = netdev_priv(dev);
863*4882a593Smuzhiyun void __iomem *ioaddr = np->base;
864*4882a593Smuzhiyun const int irq = np->pci_dev->irq;
865*4882a593Smuzhiyun int i, retval;
866*4882a593Smuzhiyun size_t tx_size, rx_size;
867*4882a593Smuzhiyun size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun /* Do we ever need to reset the chip??? */
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun retval = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
872*4882a593Smuzhiyun if (retval)
873*4882a593Smuzhiyun return retval;
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun /* Disable the Rx and Tx, and reset the chip. */
876*4882a593Smuzhiyun writel(0, ioaddr + GenCtrl);
877*4882a593Smuzhiyun writel(1, ioaddr + PCIDeviceConfig);
878*4882a593Smuzhiyun if (debug > 1)
879*4882a593Smuzhiyun printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
880*4882a593Smuzhiyun dev->name, irq);
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun /* Allocate the various queues. */
883*4882a593Smuzhiyun if (!np->queue_mem) {
884*4882a593Smuzhiyun tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
885*4882a593Smuzhiyun rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
886*4882a593Smuzhiyun tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
887*4882a593Smuzhiyun rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
888*4882a593Smuzhiyun np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
889*4882a593Smuzhiyun np->queue_mem = dma_alloc_coherent(&np->pci_dev->dev,
890*4882a593Smuzhiyun np->queue_mem_size,
891*4882a593Smuzhiyun &np->queue_mem_dma, GFP_ATOMIC);
892*4882a593Smuzhiyun if (np->queue_mem == NULL) {
893*4882a593Smuzhiyun free_irq(irq, dev);
894*4882a593Smuzhiyun return -ENOMEM;
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun np->tx_done_q = np->queue_mem;
898*4882a593Smuzhiyun np->tx_done_q_dma = np->queue_mem_dma;
899*4882a593Smuzhiyun np->rx_done_q = (void *) np->tx_done_q + tx_done_q_size;
900*4882a593Smuzhiyun np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size;
901*4882a593Smuzhiyun np->tx_ring = (void *) np->rx_done_q + rx_done_q_size;
902*4882a593Smuzhiyun np->tx_ring_dma = np->rx_done_q_dma + rx_done_q_size;
903*4882a593Smuzhiyun np->rx_ring = (void *) np->tx_ring + tx_ring_size;
904*4882a593Smuzhiyun np->rx_ring_dma = np->tx_ring_dma + tx_ring_size;
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun /* Start with no carrier, it gets adjusted later */
908*4882a593Smuzhiyun netif_carrier_off(dev);
909*4882a593Smuzhiyun init_ring(dev);
910*4882a593Smuzhiyun /* Set the size of the Rx buffers. */
911*4882a593Smuzhiyun writel((np->rx_buf_sz << RxBufferLenShift) |
912*4882a593Smuzhiyun (0 << RxMinDescrThreshShift) |
913*4882a593Smuzhiyun RxPrefetchMode | RxVariableQ |
914*4882a593Smuzhiyun RX_Q_ENTRIES |
915*4882a593Smuzhiyun RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE |
916*4882a593Smuzhiyun RxDescSpace4,
917*4882a593Smuzhiyun ioaddr + RxDescQCtrl);
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun /* Set up the Rx DMA controller. */
920*4882a593Smuzhiyun writel(RxChecksumIgnore |
921*4882a593Smuzhiyun (0 << RxEarlyIntThreshShift) |
922*4882a593Smuzhiyun (6 << RxHighPrioThreshShift) |
923*4882a593Smuzhiyun ((DMA_BURST_SIZE / 32) << RxBurstSizeShift),
924*4882a593Smuzhiyun ioaddr + RxDMACtrl);
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun /* Set Tx descriptor */
927*4882a593Smuzhiyun writel((2 << TxHiPriFIFOThreshShift) |
928*4882a593Smuzhiyun (0 << TxPadLenShift) |
929*4882a593Smuzhiyun ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) |
930*4882a593Smuzhiyun TX_DESC_Q_ADDR_SIZE |
931*4882a593Smuzhiyun TX_DESC_SPACING | TX_DESC_TYPE,
932*4882a593Smuzhiyun ioaddr + TxDescCtrl);
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr);
935*4882a593Smuzhiyun writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr);
936*4882a593Smuzhiyun writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr);
937*4882a593Smuzhiyun writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
938*4882a593Smuzhiyun writel(np->tx_ring_dma, ioaddr + TxRingPtr);
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
941*4882a593Smuzhiyun writel(np->rx_done_q_dma |
942*4882a593Smuzhiyun RxComplType |
943*4882a593Smuzhiyun (0 << RxComplThreshShift),
944*4882a593Smuzhiyun ioaddr + RxCompletionAddr);
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun if (debug > 1)
947*4882a593Smuzhiyun printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun /* Fill both the Tx SA register and the Rx perfect filter. */
950*4882a593Smuzhiyun for (i = 0; i < 6; i++)
951*4882a593Smuzhiyun writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i);
952*4882a593Smuzhiyun /* The first entry is special because it bypasses the VLAN filter.
953*4882a593Smuzhiyun Don't use it. */
954*4882a593Smuzhiyun writew(0, ioaddr + PerfFilterTable);
955*4882a593Smuzhiyun writew(0, ioaddr + PerfFilterTable + 4);
956*4882a593Smuzhiyun writew(0, ioaddr + PerfFilterTable + 8);
957*4882a593Smuzhiyun for (i = 1; i < 16; i++) {
958*4882a593Smuzhiyun __be16 *eaddrs = (__be16 *)dev->dev_addr;
959*4882a593Smuzhiyun void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
960*4882a593Smuzhiyun writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4;
961*4882a593Smuzhiyun writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4;
962*4882a593Smuzhiyun writew(be16_to_cpu(eaddrs[0]), setup_frm); setup_frm += 8;
963*4882a593Smuzhiyun }
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun /* Initialize other registers. */
966*4882a593Smuzhiyun /* Configure the PCI bus bursts and FIFO thresholds. */
967*4882a593Smuzhiyun np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable; /* modified when link is up. */
968*4882a593Smuzhiyun writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode);
969*4882a593Smuzhiyun udelay(1000);
970*4882a593Smuzhiyun writel(np->tx_mode, ioaddr + TxMode);
971*4882a593Smuzhiyun np->tx_threshold = 4;
972*4882a593Smuzhiyun writel(np->tx_threshold, ioaddr + TxThreshold);
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl);
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun napi_enable(&np->napi);
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun netif_start_queue(dev);
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun if (debug > 1)
981*4882a593Smuzhiyun printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
982*4882a593Smuzhiyun set_rx_mode(dev);
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
985*4882a593Smuzhiyun check_duplex(dev);
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun /* Enable GPIO interrupts on link change */
988*4882a593Smuzhiyun writel(0x0f00ff00, ioaddr + GPIOCtrl);
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun /* Set the interrupt mask */
991*4882a593Smuzhiyun writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
992*4882a593Smuzhiyun IntrTxDMADone | IntrStatsMax | IntrLinkChange |
993*4882a593Smuzhiyun IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
994*4882a593Smuzhiyun ioaddr + IntrEnable);
995*4882a593Smuzhiyun /* Enable PCI interrupts. */
996*4882a593Smuzhiyun writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
997*4882a593Smuzhiyun ioaddr + PCIDeviceConfig);
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun #ifdef VLAN_SUPPORT
1000*4882a593Smuzhiyun /* Set VLAN type to 802.1q */
1001*4882a593Smuzhiyun writel(ETH_P_8021Q, ioaddr + VlanType);
1002*4882a593Smuzhiyun #endif /* VLAN_SUPPORT */
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun retval = request_firmware(&fw_rx, FIRMWARE_RX, &np->pci_dev->dev);
1005*4882a593Smuzhiyun if (retval) {
1006*4882a593Smuzhiyun printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1007*4882a593Smuzhiyun FIRMWARE_RX);
1008*4882a593Smuzhiyun goto out_init;
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun if (fw_rx->size % 4) {
1011*4882a593Smuzhiyun printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1012*4882a593Smuzhiyun fw_rx->size, FIRMWARE_RX);
1013*4882a593Smuzhiyun retval = -EINVAL;
1014*4882a593Smuzhiyun goto out_rx;
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun retval = request_firmware(&fw_tx, FIRMWARE_TX, &np->pci_dev->dev);
1017*4882a593Smuzhiyun if (retval) {
1018*4882a593Smuzhiyun printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1019*4882a593Smuzhiyun FIRMWARE_TX);
1020*4882a593Smuzhiyun goto out_rx;
1021*4882a593Smuzhiyun }
1022*4882a593Smuzhiyun if (fw_tx->size % 4) {
1023*4882a593Smuzhiyun printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1024*4882a593Smuzhiyun fw_tx->size, FIRMWARE_TX);
1025*4882a593Smuzhiyun retval = -EINVAL;
1026*4882a593Smuzhiyun goto out_tx;
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun fw_rx_data = (const __be32 *)&fw_rx->data[0];
1029*4882a593Smuzhiyun fw_tx_data = (const __be32 *)&fw_tx->data[0];
1030*4882a593Smuzhiyun rx_size = fw_rx->size / 4;
1031*4882a593Smuzhiyun tx_size = fw_tx->size / 4;
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun /* Load Rx/Tx firmware into the frame processors */
1034*4882a593Smuzhiyun for (i = 0; i < rx_size; i++)
1035*4882a593Smuzhiyun writel(be32_to_cpup(&fw_rx_data[i]), ioaddr + RxGfpMem + i * 4);
1036*4882a593Smuzhiyun for (i = 0; i < tx_size; i++)
1037*4882a593Smuzhiyun writel(be32_to_cpup(&fw_tx_data[i]), ioaddr + TxGfpMem + i * 4);
1038*4882a593Smuzhiyun if (enable_hw_cksum)
1039*4882a593Smuzhiyun /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
1040*4882a593Smuzhiyun writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl);
1041*4882a593Smuzhiyun else
1042*4882a593Smuzhiyun /* Enable the Rx and Tx units only. */
1043*4882a593Smuzhiyun writel(TxEnable|RxEnable, ioaddr + GenCtrl);
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun if (debug > 1)
1046*4882a593Smuzhiyun printk(KERN_DEBUG "%s: Done netdev_open().\n",
1047*4882a593Smuzhiyun dev->name);
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun out_tx:
1050*4882a593Smuzhiyun release_firmware(fw_tx);
1051*4882a593Smuzhiyun out_rx:
1052*4882a593Smuzhiyun release_firmware(fw_rx);
1053*4882a593Smuzhiyun out_init:
1054*4882a593Smuzhiyun if (retval)
1055*4882a593Smuzhiyun netdev_close(dev);
1056*4882a593Smuzhiyun return retval;
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun
check_duplex(struct net_device * dev)1060*4882a593Smuzhiyun static void check_duplex(struct net_device *dev)
1061*4882a593Smuzhiyun {
1062*4882a593Smuzhiyun struct netdev_private *np = netdev_priv(dev);
1063*4882a593Smuzhiyun u16 reg0;
1064*4882a593Smuzhiyun int silly_count = 1000;
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising);
1067*4882a593Smuzhiyun mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
1068*4882a593Smuzhiyun udelay(500);
1069*4882a593Smuzhiyun while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET)
1070*4882a593Smuzhiyun /* do nothing */;
1071*4882a593Smuzhiyun if (!silly_count) {
1072*4882a593Smuzhiyun printk("%s: MII reset failed!\n", dev->name);
1073*4882a593Smuzhiyun return;
1074*4882a593Smuzhiyun }
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun if (!np->mii_if.force_media) {
1079*4882a593Smuzhiyun reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1080*4882a593Smuzhiyun } else {
1081*4882a593Smuzhiyun reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1082*4882a593Smuzhiyun if (np->speed100)
1083*4882a593Smuzhiyun reg0 |= BMCR_SPEED100;
1084*4882a593Smuzhiyun if (np->mii_if.full_duplex)
1085*4882a593Smuzhiyun reg0 |= BMCR_FULLDPLX;
1086*4882a593Smuzhiyun printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1087*4882a593Smuzhiyun dev->name,
1088*4882a593Smuzhiyun np->speed100 ? "100" : "10",
1089*4882a593Smuzhiyun np->mii_if.full_duplex ? "full" : "half");
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun mdio_write(dev, np->phys[0], MII_BMCR, reg0);
1092*4882a593Smuzhiyun }
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun
tx_timeout(struct net_device * dev,unsigned int txqueue)1095*4882a593Smuzhiyun static void tx_timeout(struct net_device *dev, unsigned int txqueue)
1096*4882a593Smuzhiyun {
1097*4882a593Smuzhiyun struct netdev_private *np = netdev_priv(dev);
1098*4882a593Smuzhiyun void __iomem *ioaddr = np->base;
1099*4882a593Smuzhiyun int old_debug;
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, "
1102*4882a593Smuzhiyun "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus));
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun /* Perhaps we should reinitialize the hardware here. */
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun /*
1107*4882a593Smuzhiyun * Stop and restart the interface.
1108*4882a593Smuzhiyun * Cheat and increase the debug level temporarily.
1109*4882a593Smuzhiyun */
1110*4882a593Smuzhiyun old_debug = debug;
1111*4882a593Smuzhiyun debug = 2;
1112*4882a593Smuzhiyun netdev_close(dev);
1113*4882a593Smuzhiyun netdev_open(dev);
1114*4882a593Smuzhiyun debug = old_debug;
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun /* Trigger an immediate transmit demand. */
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun netif_trans_update(dev); /* prevent tx timeout */
1119*4882a593Smuzhiyun dev->stats.tx_errors++;
1120*4882a593Smuzhiyun netif_wake_queue(dev);
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
init_ring(struct net_device * dev)1125*4882a593Smuzhiyun static void init_ring(struct net_device *dev)
1126*4882a593Smuzhiyun {
1127*4882a593Smuzhiyun struct netdev_private *np = netdev_priv(dev);
1128*4882a593Smuzhiyun int i;
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun np->cur_rx = np->cur_tx = np->reap_tx = 0;
1131*4882a593Smuzhiyun np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0;
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1134*4882a593Smuzhiyun
1135*4882a593Smuzhiyun /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1136*4882a593Smuzhiyun for (i = 0; i < RX_RING_SIZE; i++) {
1137*4882a593Smuzhiyun struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1138*4882a593Smuzhiyun np->rx_info[i].skb = skb;
1139*4882a593Smuzhiyun if (skb == NULL)
1140*4882a593Smuzhiyun break;
1141*4882a593Smuzhiyun np->rx_info[i].mapping = dma_map_single(&np->pci_dev->dev,
1142*4882a593Smuzhiyun skb->data,
1143*4882a593Smuzhiyun np->rx_buf_sz,
1144*4882a593Smuzhiyun DMA_FROM_DEVICE);
1145*4882a593Smuzhiyun if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[i].mapping)) {
1146*4882a593Smuzhiyun dev_kfree_skb(skb);
1147*4882a593Smuzhiyun np->rx_info[i].skb = NULL;
1148*4882a593Smuzhiyun break;
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun /* Grrr, we cannot offset to correctly align the IP header. */
1151*4882a593Smuzhiyun np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun writew(i - 1, np->base + RxDescQIdx);
1154*4882a593Smuzhiyun np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun /* Clear the remainder of the Rx buffer ring. */
1157*4882a593Smuzhiyun for ( ; i < RX_RING_SIZE; i++) {
1158*4882a593Smuzhiyun np->rx_ring[i].rxaddr = 0;
1159*4882a593Smuzhiyun np->rx_info[i].skb = NULL;
1160*4882a593Smuzhiyun np->rx_info[i].mapping = 0;
1161*4882a593Smuzhiyun }
1162*4882a593Smuzhiyun /* Mark the last entry as wrapping the ring. */
1163*4882a593Smuzhiyun np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing);
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun /* Clear the completion rings. */
1166*4882a593Smuzhiyun for (i = 0; i < DONE_Q_SIZE; i++) {
1167*4882a593Smuzhiyun np->rx_done_q[i].status = 0;
1168*4882a593Smuzhiyun np->tx_done_q[i].status = 0;
1169*4882a593Smuzhiyun }
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun for (i = 0; i < TX_RING_SIZE; i++)
1172*4882a593Smuzhiyun memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun
start_tx(struct sk_buff * skb,struct net_device * dev)1176*4882a593Smuzhiyun static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1177*4882a593Smuzhiyun {
1178*4882a593Smuzhiyun struct netdev_private *np = netdev_priv(dev);
1179*4882a593Smuzhiyun unsigned int entry;
1180*4882a593Smuzhiyun unsigned int prev_tx;
1181*4882a593Smuzhiyun u32 status;
1182*4882a593Smuzhiyun int i, j;
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun /*
1185*4882a593Smuzhiyun * be cautious here, wrapping the queue has weird semantics
1186*4882a593Smuzhiyun * and we may not have enough slots even when it seems we do.
1187*4882a593Smuzhiyun */
1188*4882a593Smuzhiyun if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
1189*4882a593Smuzhiyun netif_stop_queue(dev);
1190*4882a593Smuzhiyun return NETDEV_TX_BUSY;
1191*4882a593Smuzhiyun }
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1194*4882a593Smuzhiyun if (skb->ip_summed == CHECKSUM_PARTIAL) {
1195*4882a593Smuzhiyun if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK))
1196*4882a593Smuzhiyun return NETDEV_TX_OK;
1197*4882a593Smuzhiyun }
1198*4882a593Smuzhiyun #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun prev_tx = np->cur_tx;
1201*4882a593Smuzhiyun entry = np->cur_tx % TX_RING_SIZE;
1202*4882a593Smuzhiyun for (i = 0; i < skb_num_frags(skb); i++) {
1203*4882a593Smuzhiyun int wrap_ring = 0;
1204*4882a593Smuzhiyun status = TxDescID;
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun if (i == 0) {
1207*4882a593Smuzhiyun np->tx_info[entry].skb = skb;
1208*4882a593Smuzhiyun status |= TxCRCEn;
1209*4882a593Smuzhiyun if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
1210*4882a593Smuzhiyun status |= TxRingWrap;
1211*4882a593Smuzhiyun wrap_ring = 1;
1212*4882a593Smuzhiyun }
1213*4882a593Smuzhiyun if (np->reap_tx) {
1214*4882a593Smuzhiyun status |= TxDescIntr;
1215*4882a593Smuzhiyun np->reap_tx = 0;
1216*4882a593Smuzhiyun }
1217*4882a593Smuzhiyun if (skb->ip_summed == CHECKSUM_PARTIAL) {
1218*4882a593Smuzhiyun status |= TxCalTCP;
1219*4882a593Smuzhiyun dev->stats.tx_compressed++;
1220*4882a593Smuzhiyun }
1221*4882a593Smuzhiyun status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun np->tx_info[entry].mapping =
1224*4882a593Smuzhiyun dma_map_single(&np->pci_dev->dev, skb->data,
1225*4882a593Smuzhiyun skb_first_frag_len(skb),
1226*4882a593Smuzhiyun DMA_TO_DEVICE);
1227*4882a593Smuzhiyun } else {
1228*4882a593Smuzhiyun const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
1229*4882a593Smuzhiyun status |= skb_frag_size(this_frag);
1230*4882a593Smuzhiyun np->tx_info[entry].mapping =
1231*4882a593Smuzhiyun dma_map_single(&np->pci_dev->dev,
1232*4882a593Smuzhiyun skb_frag_address(this_frag),
1233*4882a593Smuzhiyun skb_frag_size(this_frag),
1234*4882a593Smuzhiyun DMA_TO_DEVICE);
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun if (dma_mapping_error(&np->pci_dev->dev, np->tx_info[entry].mapping)) {
1237*4882a593Smuzhiyun dev->stats.tx_dropped++;
1238*4882a593Smuzhiyun goto err_out;
1239*4882a593Smuzhiyun }
1240*4882a593Smuzhiyun
1241*4882a593Smuzhiyun np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1242*4882a593Smuzhiyun np->tx_ring[entry].status = cpu_to_le32(status);
1243*4882a593Smuzhiyun if (debug > 3)
1244*4882a593Smuzhiyun printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n",
1245*4882a593Smuzhiyun dev->name, np->cur_tx, np->dirty_tx,
1246*4882a593Smuzhiyun entry, status);
1247*4882a593Smuzhiyun if (wrap_ring) {
1248*4882a593Smuzhiyun np->tx_info[entry].used_slots = TX_RING_SIZE - entry;
1249*4882a593Smuzhiyun np->cur_tx += np->tx_info[entry].used_slots;
1250*4882a593Smuzhiyun entry = 0;
1251*4882a593Smuzhiyun } else {
1252*4882a593Smuzhiyun np->tx_info[entry].used_slots = 1;
1253*4882a593Smuzhiyun np->cur_tx += np->tx_info[entry].used_slots;
1254*4882a593Smuzhiyun entry++;
1255*4882a593Smuzhiyun }
1256*4882a593Smuzhiyun /* scavenge the tx descriptors twice per TX_RING_SIZE */
1257*4882a593Smuzhiyun if (np->cur_tx % (TX_RING_SIZE / 2) == 0)
1258*4882a593Smuzhiyun np->reap_tx = 1;
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun /* Non-x86: explicitly flush descriptor cache lines here. */
1262*4882a593Smuzhiyun /* Ensure all descriptors are written back before the transmit is
1263*4882a593Smuzhiyun initiated. - Jes */
1264*4882a593Smuzhiyun wmb();
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun /* Update the producer index. */
1267*4882a593Smuzhiyun writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx);
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun /* 4 is arbitrary, but should be ok */
1270*4882a593Smuzhiyun if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
1271*4882a593Smuzhiyun netif_stop_queue(dev);
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun return NETDEV_TX_OK;
1274*4882a593Smuzhiyun
1275*4882a593Smuzhiyun err_out:
1276*4882a593Smuzhiyun entry = prev_tx % TX_RING_SIZE;
1277*4882a593Smuzhiyun np->tx_info[entry].skb = NULL;
1278*4882a593Smuzhiyun if (i > 0) {
1279*4882a593Smuzhiyun dma_unmap_single(&np->pci_dev->dev,
1280*4882a593Smuzhiyun np->tx_info[entry].mapping,
1281*4882a593Smuzhiyun skb_first_frag_len(skb), DMA_TO_DEVICE);
1282*4882a593Smuzhiyun np->tx_info[entry].mapping = 0;
1283*4882a593Smuzhiyun entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1284*4882a593Smuzhiyun for (j = 1; j < i; j++) {
1285*4882a593Smuzhiyun dma_unmap_single(&np->pci_dev->dev,
1286*4882a593Smuzhiyun np->tx_info[entry].mapping,
1287*4882a593Smuzhiyun skb_frag_size(&skb_shinfo(skb)->frags[j - 1]),
1288*4882a593Smuzhiyun DMA_TO_DEVICE);
1289*4882a593Smuzhiyun entry++;
1290*4882a593Smuzhiyun }
1291*4882a593Smuzhiyun }
1292*4882a593Smuzhiyun dev_kfree_skb_any(skb);
1293*4882a593Smuzhiyun np->cur_tx = prev_tx;
1294*4882a593Smuzhiyun return NETDEV_TX_OK;
1295*4882a593Smuzhiyun }
1296*4882a593Smuzhiyun
1297*4882a593Smuzhiyun /* The interrupt handler does all of the Rx thread work and cleans up
1298*4882a593Smuzhiyun after the Tx thread. */
intr_handler(int irq,void * dev_instance)1299*4882a593Smuzhiyun static irqreturn_t intr_handler(int irq, void *dev_instance)
1300*4882a593Smuzhiyun {
1301*4882a593Smuzhiyun struct net_device *dev = dev_instance;
1302*4882a593Smuzhiyun struct netdev_private *np = netdev_priv(dev);
1303*4882a593Smuzhiyun void __iomem *ioaddr = np->base;
1304*4882a593Smuzhiyun int boguscnt = max_interrupt_work;
1305*4882a593Smuzhiyun int consumer;
1306*4882a593Smuzhiyun int tx_status;
1307*4882a593Smuzhiyun int handled = 0;
1308*4882a593Smuzhiyun
1309*4882a593Smuzhiyun do {
1310*4882a593Smuzhiyun u32 intr_status = readl(ioaddr + IntrClear);
1311*4882a593Smuzhiyun
1312*4882a593Smuzhiyun if (debug > 4)
1313*4882a593Smuzhiyun printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n",
1314*4882a593Smuzhiyun dev->name, intr_status);
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun if (intr_status == 0 || intr_status == (u32) -1)
1317*4882a593Smuzhiyun break;
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun handled = 1;
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun if (intr_status & (IntrRxDone | IntrRxEmpty)) {
1322*4882a593Smuzhiyun u32 enable;
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun if (likely(napi_schedule_prep(&np->napi))) {
1325*4882a593Smuzhiyun __napi_schedule(&np->napi);
1326*4882a593Smuzhiyun enable = readl(ioaddr + IntrEnable);
1327*4882a593Smuzhiyun enable &= ~(IntrRxDone | IntrRxEmpty);
1328*4882a593Smuzhiyun writel(enable, ioaddr + IntrEnable);
1329*4882a593Smuzhiyun /* flush PCI posting buffers */
1330*4882a593Smuzhiyun readl(ioaddr + IntrEnable);
1331*4882a593Smuzhiyun } else {
1332*4882a593Smuzhiyun /* Paranoia check */
1333*4882a593Smuzhiyun enable = readl(ioaddr + IntrEnable);
1334*4882a593Smuzhiyun if (enable & (IntrRxDone | IntrRxEmpty)) {
1335*4882a593Smuzhiyun printk(KERN_INFO
1336*4882a593Smuzhiyun "%s: interrupt while in poll!\n",
1337*4882a593Smuzhiyun dev->name);
1338*4882a593Smuzhiyun enable &= ~(IntrRxDone | IntrRxEmpty);
1339*4882a593Smuzhiyun writel(enable, ioaddr + IntrEnable);
1340*4882a593Smuzhiyun }
1341*4882a593Smuzhiyun }
1342*4882a593Smuzhiyun }
1343*4882a593Smuzhiyun
1344*4882a593Smuzhiyun /* Scavenge the skbuff list based on the Tx-done queue.
1345*4882a593Smuzhiyun There are redundant checks here that may be cleaned up
1346*4882a593Smuzhiyun after the driver has proven to be reliable. */
1347*4882a593Smuzhiyun consumer = readl(ioaddr + TxConsumerIdx);
1348*4882a593Smuzhiyun if (debug > 3)
1349*4882a593Smuzhiyun printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
1350*4882a593Smuzhiyun dev->name, consumer);
1351*4882a593Smuzhiyun
1352*4882a593Smuzhiyun while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
1353*4882a593Smuzhiyun if (debug > 3)
1354*4882a593Smuzhiyun printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
1355*4882a593Smuzhiyun dev->name, np->dirty_tx, np->tx_done, tx_status);
1356*4882a593Smuzhiyun if ((tx_status & 0xe0000000) == 0xa0000000) {
1357*4882a593Smuzhiyun dev->stats.tx_packets++;
1358*4882a593Smuzhiyun } else if ((tx_status & 0xe0000000) == 0x80000000) {
1359*4882a593Smuzhiyun u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
1360*4882a593Smuzhiyun struct sk_buff *skb = np->tx_info[entry].skb;
1361*4882a593Smuzhiyun np->tx_info[entry].skb = NULL;
1362*4882a593Smuzhiyun dma_unmap_single(&np->pci_dev->dev,
1363*4882a593Smuzhiyun np->tx_info[entry].mapping,
1364*4882a593Smuzhiyun skb_first_frag_len(skb),
1365*4882a593Smuzhiyun DMA_TO_DEVICE);
1366*4882a593Smuzhiyun np->tx_info[entry].mapping = 0;
1367*4882a593Smuzhiyun np->dirty_tx += np->tx_info[entry].used_slots;
1368*4882a593Smuzhiyun entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1369*4882a593Smuzhiyun {
1370*4882a593Smuzhiyun int i;
1371*4882a593Smuzhiyun for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1372*4882a593Smuzhiyun dma_unmap_single(&np->pci_dev->dev,
1373*4882a593Smuzhiyun np->tx_info[entry].mapping,
1374*4882a593Smuzhiyun skb_frag_size(&skb_shinfo(skb)->frags[i]),
1375*4882a593Smuzhiyun DMA_TO_DEVICE);
1376*4882a593Smuzhiyun np->dirty_tx++;
1377*4882a593Smuzhiyun entry++;
1378*4882a593Smuzhiyun }
1379*4882a593Smuzhiyun }
1380*4882a593Smuzhiyun
1381*4882a593Smuzhiyun dev_consume_skb_irq(skb);
1382*4882a593Smuzhiyun }
1383*4882a593Smuzhiyun np->tx_done_q[np->tx_done].status = 0;
1384*4882a593Smuzhiyun np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE;
1385*4882a593Smuzhiyun }
1386*4882a593Smuzhiyun writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
1387*4882a593Smuzhiyun
1388*4882a593Smuzhiyun if (netif_queue_stopped(dev) &&
1389*4882a593Smuzhiyun (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) {
1390*4882a593Smuzhiyun /* The ring is no longer full, wake the queue. */
1391*4882a593Smuzhiyun netif_wake_queue(dev);
1392*4882a593Smuzhiyun }
1393*4882a593Smuzhiyun
1394*4882a593Smuzhiyun /* Stats overflow */
1395*4882a593Smuzhiyun if (intr_status & IntrStatsMax)
1396*4882a593Smuzhiyun get_stats(dev);
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun /* Media change interrupt. */
1399*4882a593Smuzhiyun if (intr_status & IntrLinkChange)
1400*4882a593Smuzhiyun netdev_media_change(dev);
1401*4882a593Smuzhiyun
1402*4882a593Smuzhiyun /* Abnormal error summary/uncommon events handlers. */
1403*4882a593Smuzhiyun if (intr_status & IntrAbnormalSummary)
1404*4882a593Smuzhiyun netdev_error(dev, intr_status);
1405*4882a593Smuzhiyun
1406*4882a593Smuzhiyun if (--boguscnt < 0) {
1407*4882a593Smuzhiyun if (debug > 1)
1408*4882a593Smuzhiyun printk(KERN_WARNING "%s: Too much work at interrupt, "
1409*4882a593Smuzhiyun "status=%#8.8x.\n",
1410*4882a593Smuzhiyun dev->name, intr_status);
1411*4882a593Smuzhiyun break;
1412*4882a593Smuzhiyun }
1413*4882a593Smuzhiyun } while (1);
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun if (debug > 4)
1416*4882a593Smuzhiyun printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n",
1417*4882a593Smuzhiyun dev->name, (int) readl(ioaddr + IntrStatus));
1418*4882a593Smuzhiyun return IRQ_RETVAL(handled);
1419*4882a593Smuzhiyun }
1420*4882a593Smuzhiyun
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun /*
1423*4882a593Smuzhiyun * This routine is logically part of the interrupt/poll handler, but separated
1424*4882a593Smuzhiyun * for clarity and better register allocation.
1425*4882a593Smuzhiyun */
__netdev_rx(struct net_device * dev,int * quota)1426*4882a593Smuzhiyun static int __netdev_rx(struct net_device *dev, int *quota)
1427*4882a593Smuzhiyun {
1428*4882a593Smuzhiyun struct netdev_private *np = netdev_priv(dev);
1429*4882a593Smuzhiyun u32 desc_status;
1430*4882a593Smuzhiyun int retcode = 0;
1431*4882a593Smuzhiyun
1432*4882a593Smuzhiyun /* If EOP is set on the next entry, it's a new packet. Send it up. */
1433*4882a593Smuzhiyun while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
1434*4882a593Smuzhiyun struct sk_buff *skb;
1435*4882a593Smuzhiyun u16 pkt_len;
1436*4882a593Smuzhiyun int entry;
1437*4882a593Smuzhiyun rx_done_desc *desc = &np->rx_done_q[np->rx_done];
1438*4882a593Smuzhiyun
1439*4882a593Smuzhiyun if (debug > 4)
1440*4882a593Smuzhiyun printk(KERN_DEBUG " netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status);
1441*4882a593Smuzhiyun if (!(desc_status & RxOK)) {
1442*4882a593Smuzhiyun /* There was an error. */
1443*4882a593Smuzhiyun if (debug > 2)
1444*4882a593Smuzhiyun printk(KERN_DEBUG " netdev_rx() Rx error was %#8.8x.\n", desc_status);
1445*4882a593Smuzhiyun dev->stats.rx_errors++;
1446*4882a593Smuzhiyun if (desc_status & RxFIFOErr)
1447*4882a593Smuzhiyun dev->stats.rx_fifo_errors++;
1448*4882a593Smuzhiyun goto next_rx;
1449*4882a593Smuzhiyun }
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun if (*quota <= 0) { /* out of rx quota */
1452*4882a593Smuzhiyun retcode = 1;
1453*4882a593Smuzhiyun goto out;
1454*4882a593Smuzhiyun }
1455*4882a593Smuzhiyun (*quota)--;
1456*4882a593Smuzhiyun
1457*4882a593Smuzhiyun pkt_len = desc_status; /* Implicitly Truncate */
1458*4882a593Smuzhiyun entry = (desc_status >> 16) & 0x7ff;
1459*4882a593Smuzhiyun
1460*4882a593Smuzhiyun if (debug > 4)
1461*4882a593Smuzhiyun printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota);
1462*4882a593Smuzhiyun /* Check if the packet is long enough to accept without copying
1463*4882a593Smuzhiyun to a minimally-sized skbuff. */
1464*4882a593Smuzhiyun if (pkt_len < rx_copybreak &&
1465*4882a593Smuzhiyun (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1466*4882a593Smuzhiyun skb_reserve(skb, 2); /* 16 byte align the IP header */
1467*4882a593Smuzhiyun dma_sync_single_for_cpu(&np->pci_dev->dev,
1468*4882a593Smuzhiyun np->rx_info[entry].mapping,
1469*4882a593Smuzhiyun pkt_len, DMA_FROM_DEVICE);
1470*4882a593Smuzhiyun skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
1471*4882a593Smuzhiyun dma_sync_single_for_device(&np->pci_dev->dev,
1472*4882a593Smuzhiyun np->rx_info[entry].mapping,
1473*4882a593Smuzhiyun pkt_len, DMA_FROM_DEVICE);
1474*4882a593Smuzhiyun skb_put(skb, pkt_len);
1475*4882a593Smuzhiyun } else {
1476*4882a593Smuzhiyun dma_unmap_single(&np->pci_dev->dev,
1477*4882a593Smuzhiyun np->rx_info[entry].mapping,
1478*4882a593Smuzhiyun np->rx_buf_sz, DMA_FROM_DEVICE);
1479*4882a593Smuzhiyun skb = np->rx_info[entry].skb;
1480*4882a593Smuzhiyun skb_put(skb, pkt_len);
1481*4882a593Smuzhiyun np->rx_info[entry].skb = NULL;
1482*4882a593Smuzhiyun np->rx_info[entry].mapping = 0;
1483*4882a593Smuzhiyun }
1484*4882a593Smuzhiyun #ifndef final_version /* Remove after testing. */
1485*4882a593Smuzhiyun /* You will want this info for the initial debug. */
1486*4882a593Smuzhiyun if (debug > 5) {
1487*4882a593Smuzhiyun printk(KERN_DEBUG " Rx data %pM %pM %2.2x%2.2x.\n",
1488*4882a593Smuzhiyun skb->data, skb->data + 6,
1489*4882a593Smuzhiyun skb->data[12], skb->data[13]);
1490*4882a593Smuzhiyun }
1491*4882a593Smuzhiyun #endif
1492*4882a593Smuzhiyun
1493*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, dev);
1494*4882a593Smuzhiyun #ifdef VLAN_SUPPORT
1495*4882a593Smuzhiyun if (debug > 4)
1496*4882a593Smuzhiyun printk(KERN_DEBUG " netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2));
1497*4882a593Smuzhiyun #endif
1498*4882a593Smuzhiyun if (le16_to_cpu(desc->status2) & 0x0100) {
1499*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_UNNECESSARY;
1500*4882a593Smuzhiyun dev->stats.rx_compressed++;
1501*4882a593Smuzhiyun }
1502*4882a593Smuzhiyun /*
1503*4882a593Smuzhiyun * This feature doesn't seem to be working, at least
1504*4882a593Smuzhiyun * with the two firmware versions I have. If the GFP sees
1505*4882a593Smuzhiyun * an IP fragment, it either ignores it completely, or reports
1506*4882a593Smuzhiyun * "bad checksum" on it.
1507*4882a593Smuzhiyun *
1508*4882a593Smuzhiyun * Maybe I missed something -- corrections are welcome.
1509*4882a593Smuzhiyun * Until then, the printk stays. :-) -Ion
1510*4882a593Smuzhiyun */
1511*4882a593Smuzhiyun else if (le16_to_cpu(desc->status2) & 0x0040) {
1512*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_COMPLETE;
1513*4882a593Smuzhiyun skb->csum = le16_to_cpu(desc->csum);
1514*4882a593Smuzhiyun printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
1515*4882a593Smuzhiyun }
1516*4882a593Smuzhiyun #ifdef VLAN_SUPPORT
1517*4882a593Smuzhiyun if (le16_to_cpu(desc->status2) & 0x0200) {
1518*4882a593Smuzhiyun u16 vlid = le16_to_cpu(desc->vlanid);
1519*4882a593Smuzhiyun
1520*4882a593Smuzhiyun if (debug > 4) {
1521*4882a593Smuzhiyun printk(KERN_DEBUG " netdev_rx() vlanid = %d\n",
1522*4882a593Smuzhiyun vlid);
1523*4882a593Smuzhiyun }
1524*4882a593Smuzhiyun __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlid);
1525*4882a593Smuzhiyun }
1526*4882a593Smuzhiyun #endif /* VLAN_SUPPORT */
1527*4882a593Smuzhiyun netif_receive_skb(skb);
1528*4882a593Smuzhiyun dev->stats.rx_packets++;
1529*4882a593Smuzhiyun
1530*4882a593Smuzhiyun next_rx:
1531*4882a593Smuzhiyun np->cur_rx++;
1532*4882a593Smuzhiyun desc->status = 0;
1533*4882a593Smuzhiyun np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
1534*4882a593Smuzhiyun }
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun if (*quota == 0) { /* out of rx quota */
1537*4882a593Smuzhiyun retcode = 1;
1538*4882a593Smuzhiyun goto out;
1539*4882a593Smuzhiyun }
1540*4882a593Smuzhiyun writew(np->rx_done, np->base + CompletionQConsumerIdx);
1541*4882a593Smuzhiyun
1542*4882a593Smuzhiyun out:
1543*4882a593Smuzhiyun refill_rx_ring(dev);
1544*4882a593Smuzhiyun if (debug > 5)
1545*4882a593Smuzhiyun printk(KERN_DEBUG " exiting netdev_rx(): %d, status of %d was %#8.8x.\n",
1546*4882a593Smuzhiyun retcode, np->rx_done, desc_status);
1547*4882a593Smuzhiyun return retcode;
1548*4882a593Smuzhiyun }
1549*4882a593Smuzhiyun
netdev_poll(struct napi_struct * napi,int budget)1550*4882a593Smuzhiyun static int netdev_poll(struct napi_struct *napi, int budget)
1551*4882a593Smuzhiyun {
1552*4882a593Smuzhiyun struct netdev_private *np = container_of(napi, struct netdev_private, napi);
1553*4882a593Smuzhiyun struct net_device *dev = np->dev;
1554*4882a593Smuzhiyun u32 intr_status;
1555*4882a593Smuzhiyun void __iomem *ioaddr = np->base;
1556*4882a593Smuzhiyun int quota = budget;
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun do {
1559*4882a593Smuzhiyun writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear);
1560*4882a593Smuzhiyun
1561*4882a593Smuzhiyun if (__netdev_rx(dev, "a))
1562*4882a593Smuzhiyun goto out;
1563*4882a593Smuzhiyun
1564*4882a593Smuzhiyun intr_status = readl(ioaddr + IntrStatus);
1565*4882a593Smuzhiyun } while (intr_status & (IntrRxDone | IntrRxEmpty));
1566*4882a593Smuzhiyun
1567*4882a593Smuzhiyun napi_complete(napi);
1568*4882a593Smuzhiyun intr_status = readl(ioaddr + IntrEnable);
1569*4882a593Smuzhiyun intr_status |= IntrRxDone | IntrRxEmpty;
1570*4882a593Smuzhiyun writel(intr_status, ioaddr + IntrEnable);
1571*4882a593Smuzhiyun
1572*4882a593Smuzhiyun out:
1573*4882a593Smuzhiyun if (debug > 5)
1574*4882a593Smuzhiyun printk(KERN_DEBUG " exiting netdev_poll(): %d.\n",
1575*4882a593Smuzhiyun budget - quota);
1576*4882a593Smuzhiyun
1577*4882a593Smuzhiyun /* Restart Rx engine if stopped. */
1578*4882a593Smuzhiyun return budget - quota;
1579*4882a593Smuzhiyun }
1580*4882a593Smuzhiyun
refill_rx_ring(struct net_device * dev)1581*4882a593Smuzhiyun static void refill_rx_ring(struct net_device *dev)
1582*4882a593Smuzhiyun {
1583*4882a593Smuzhiyun struct netdev_private *np = netdev_priv(dev);
1584*4882a593Smuzhiyun struct sk_buff *skb;
1585*4882a593Smuzhiyun int entry = -1;
1586*4882a593Smuzhiyun
1587*4882a593Smuzhiyun /* Refill the Rx ring buffers. */
1588*4882a593Smuzhiyun for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1589*4882a593Smuzhiyun entry = np->dirty_rx % RX_RING_SIZE;
1590*4882a593Smuzhiyun if (np->rx_info[entry].skb == NULL) {
1591*4882a593Smuzhiyun skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1592*4882a593Smuzhiyun np->rx_info[entry].skb = skb;
1593*4882a593Smuzhiyun if (skb == NULL)
1594*4882a593Smuzhiyun break; /* Better luck next round. */
1595*4882a593Smuzhiyun np->rx_info[entry].mapping =
1596*4882a593Smuzhiyun dma_map_single(&np->pci_dev->dev, skb->data,
1597*4882a593Smuzhiyun np->rx_buf_sz, DMA_FROM_DEVICE);
1598*4882a593Smuzhiyun if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[entry].mapping)) {
1599*4882a593Smuzhiyun dev_kfree_skb(skb);
1600*4882a593Smuzhiyun np->rx_info[entry].skb = NULL;
1601*4882a593Smuzhiyun break;
1602*4882a593Smuzhiyun }
1603*4882a593Smuzhiyun np->rx_ring[entry].rxaddr =
1604*4882a593Smuzhiyun cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1605*4882a593Smuzhiyun }
1606*4882a593Smuzhiyun if (entry == RX_RING_SIZE - 1)
1607*4882a593Smuzhiyun np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing);
1608*4882a593Smuzhiyun }
1609*4882a593Smuzhiyun if (entry >= 0)
1610*4882a593Smuzhiyun writew(entry, np->base + RxDescQIdx);
1611*4882a593Smuzhiyun }
1612*4882a593Smuzhiyun
1613*4882a593Smuzhiyun
netdev_media_change(struct net_device * dev)1614*4882a593Smuzhiyun static void netdev_media_change(struct net_device *dev)
1615*4882a593Smuzhiyun {
1616*4882a593Smuzhiyun struct netdev_private *np = netdev_priv(dev);
1617*4882a593Smuzhiyun void __iomem *ioaddr = np->base;
1618*4882a593Smuzhiyun u16 reg0, reg1, reg4, reg5;
1619*4882a593Smuzhiyun u32 new_tx_mode;
1620*4882a593Smuzhiyun u32 new_intr_timer_ctrl;
1621*4882a593Smuzhiyun
1622*4882a593Smuzhiyun /* reset status first */
1623*4882a593Smuzhiyun mdio_read(dev, np->phys[0], MII_BMCR);
1624*4882a593Smuzhiyun mdio_read(dev, np->phys[0], MII_BMSR);
1625*4882a593Smuzhiyun
1626*4882a593Smuzhiyun reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1627*4882a593Smuzhiyun reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
1628*4882a593Smuzhiyun
1629*4882a593Smuzhiyun if (reg1 & BMSR_LSTATUS) {
1630*4882a593Smuzhiyun /* link is up */
1631*4882a593Smuzhiyun if (reg0 & BMCR_ANENABLE) {
1632*4882a593Smuzhiyun /* autonegotiation is enabled */
1633*4882a593Smuzhiyun reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1634*4882a593Smuzhiyun reg5 = mdio_read(dev, np->phys[0], MII_LPA);
1635*4882a593Smuzhiyun if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
1636*4882a593Smuzhiyun np->speed100 = 1;
1637*4882a593Smuzhiyun np->mii_if.full_duplex = 1;
1638*4882a593Smuzhiyun } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
1639*4882a593Smuzhiyun np->speed100 = 1;
1640*4882a593Smuzhiyun np->mii_if.full_duplex = 0;
1641*4882a593Smuzhiyun } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1642*4882a593Smuzhiyun np->speed100 = 0;
1643*4882a593Smuzhiyun np->mii_if.full_duplex = 1;
1644*4882a593Smuzhiyun } else {
1645*4882a593Smuzhiyun np->speed100 = 0;
1646*4882a593Smuzhiyun np->mii_if.full_duplex = 0;
1647*4882a593Smuzhiyun }
1648*4882a593Smuzhiyun } else {
1649*4882a593Smuzhiyun /* autonegotiation is disabled */
1650*4882a593Smuzhiyun if (reg0 & BMCR_SPEED100)
1651*4882a593Smuzhiyun np->speed100 = 1;
1652*4882a593Smuzhiyun else
1653*4882a593Smuzhiyun np->speed100 = 0;
1654*4882a593Smuzhiyun if (reg0 & BMCR_FULLDPLX)
1655*4882a593Smuzhiyun np->mii_if.full_duplex = 1;
1656*4882a593Smuzhiyun else
1657*4882a593Smuzhiyun np->mii_if.full_duplex = 0;
1658*4882a593Smuzhiyun }
1659*4882a593Smuzhiyun netif_carrier_on(dev);
1660*4882a593Smuzhiyun printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1661*4882a593Smuzhiyun dev->name,
1662*4882a593Smuzhiyun np->speed100 ? "100" : "10",
1663*4882a593Smuzhiyun np->mii_if.full_duplex ? "full" : "half");
1664*4882a593Smuzhiyun
1665*4882a593Smuzhiyun new_tx_mode = np->tx_mode & ~FullDuplex; /* duplex setting */
1666*4882a593Smuzhiyun if (np->mii_if.full_duplex)
1667*4882a593Smuzhiyun new_tx_mode |= FullDuplex;
1668*4882a593Smuzhiyun if (np->tx_mode != new_tx_mode) {
1669*4882a593Smuzhiyun np->tx_mode = new_tx_mode;
1670*4882a593Smuzhiyun writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode);
1671*4882a593Smuzhiyun udelay(1000);
1672*4882a593Smuzhiyun writel(np->tx_mode, ioaddr + TxMode);
1673*4882a593Smuzhiyun }
1674*4882a593Smuzhiyun
1675*4882a593Smuzhiyun new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X;
1676*4882a593Smuzhiyun if (np->speed100)
1677*4882a593Smuzhiyun new_intr_timer_ctrl |= Timer10X;
1678*4882a593Smuzhiyun if (np->intr_timer_ctrl != new_intr_timer_ctrl) {
1679*4882a593Smuzhiyun np->intr_timer_ctrl = new_intr_timer_ctrl;
1680*4882a593Smuzhiyun writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1681*4882a593Smuzhiyun }
1682*4882a593Smuzhiyun } else {
1683*4882a593Smuzhiyun netif_carrier_off(dev);
1684*4882a593Smuzhiyun printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1685*4882a593Smuzhiyun }
1686*4882a593Smuzhiyun }
1687*4882a593Smuzhiyun
1688*4882a593Smuzhiyun
netdev_error(struct net_device * dev,int intr_status)1689*4882a593Smuzhiyun static void netdev_error(struct net_device *dev, int intr_status)
1690*4882a593Smuzhiyun {
1691*4882a593Smuzhiyun struct netdev_private *np = netdev_priv(dev);
1692*4882a593Smuzhiyun
1693*4882a593Smuzhiyun /* Came close to underrunning the Tx FIFO, increase threshold. */
1694*4882a593Smuzhiyun if (intr_status & IntrTxDataLow) {
1695*4882a593Smuzhiyun if (np->tx_threshold <= PKT_BUF_SZ / 16) {
1696*4882a593Smuzhiyun writel(++np->tx_threshold, np->base + TxThreshold);
1697*4882a593Smuzhiyun printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n",
1698*4882a593Smuzhiyun dev->name, np->tx_threshold * 16);
1699*4882a593Smuzhiyun } else
1700*4882a593Smuzhiyun printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
1701*4882a593Smuzhiyun }
1702*4882a593Smuzhiyun if (intr_status & IntrRxGFPDead) {
1703*4882a593Smuzhiyun dev->stats.rx_fifo_errors++;
1704*4882a593Smuzhiyun dev->stats.rx_errors++;
1705*4882a593Smuzhiyun }
1706*4882a593Smuzhiyun if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
1707*4882a593Smuzhiyun dev->stats.tx_fifo_errors++;
1708*4882a593Smuzhiyun dev->stats.tx_errors++;
1709*4882a593Smuzhiyun }
1710*4882a593Smuzhiyun if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
1711*4882a593Smuzhiyun printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
1712*4882a593Smuzhiyun dev->name, intr_status);
1713*4882a593Smuzhiyun }
1714*4882a593Smuzhiyun
1715*4882a593Smuzhiyun
get_stats(struct net_device * dev)1716*4882a593Smuzhiyun static struct net_device_stats *get_stats(struct net_device *dev)
1717*4882a593Smuzhiyun {
1718*4882a593Smuzhiyun struct netdev_private *np = netdev_priv(dev);
1719*4882a593Smuzhiyun void __iomem *ioaddr = np->base;
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyun /* This adapter architecture needs no SMP locks. */
1722*4882a593Smuzhiyun dev->stats.tx_bytes = readl(ioaddr + 0x57010);
1723*4882a593Smuzhiyun dev->stats.rx_bytes = readl(ioaddr + 0x57044);
1724*4882a593Smuzhiyun dev->stats.tx_packets = readl(ioaddr + 0x57000);
1725*4882a593Smuzhiyun dev->stats.tx_aborted_errors =
1726*4882a593Smuzhiyun readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
1727*4882a593Smuzhiyun dev->stats.tx_window_errors = readl(ioaddr + 0x57018);
1728*4882a593Smuzhiyun dev->stats.collisions =
1729*4882a593Smuzhiyun readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
1730*4882a593Smuzhiyun
1731*4882a593Smuzhiyun /* The chip only need report frame silently dropped. */
1732*4882a593Smuzhiyun dev->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
1733*4882a593Smuzhiyun writew(0, ioaddr + RxDMAStatus);
1734*4882a593Smuzhiyun dev->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
1735*4882a593Smuzhiyun dev->stats.rx_frame_errors = readl(ioaddr + 0x57040);
1736*4882a593Smuzhiyun dev->stats.rx_length_errors = readl(ioaddr + 0x57058);
1737*4882a593Smuzhiyun dev->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
1738*4882a593Smuzhiyun
1739*4882a593Smuzhiyun return &dev->stats;
1740*4882a593Smuzhiyun }
1741*4882a593Smuzhiyun
1742*4882a593Smuzhiyun #ifdef VLAN_SUPPORT
set_vlan_mode(struct netdev_private * np)1743*4882a593Smuzhiyun static u32 set_vlan_mode(struct netdev_private *np)
1744*4882a593Smuzhiyun {
1745*4882a593Smuzhiyun u32 ret = VlanMode;
1746*4882a593Smuzhiyun u16 vid;
1747*4882a593Smuzhiyun void __iomem *filter_addr = np->base + HashTable + 8;
1748*4882a593Smuzhiyun int vlan_count = 0;
1749*4882a593Smuzhiyun
1750*4882a593Smuzhiyun for_each_set_bit(vid, np->active_vlans, VLAN_N_VID) {
1751*4882a593Smuzhiyun if (vlan_count == 32)
1752*4882a593Smuzhiyun break;
1753*4882a593Smuzhiyun writew(vid, filter_addr);
1754*4882a593Smuzhiyun filter_addr += 16;
1755*4882a593Smuzhiyun vlan_count++;
1756*4882a593Smuzhiyun }
1757*4882a593Smuzhiyun if (vlan_count == 32) {
1758*4882a593Smuzhiyun ret |= PerfectFilterVlan;
1759*4882a593Smuzhiyun while (vlan_count < 32) {
1760*4882a593Smuzhiyun writew(0, filter_addr);
1761*4882a593Smuzhiyun filter_addr += 16;
1762*4882a593Smuzhiyun vlan_count++;
1763*4882a593Smuzhiyun }
1764*4882a593Smuzhiyun }
1765*4882a593Smuzhiyun return ret;
1766*4882a593Smuzhiyun }
1767*4882a593Smuzhiyun #endif /* VLAN_SUPPORT */
1768*4882a593Smuzhiyun
set_rx_mode(struct net_device * dev)1769*4882a593Smuzhiyun static void set_rx_mode(struct net_device *dev)
1770*4882a593Smuzhiyun {
1771*4882a593Smuzhiyun struct netdev_private *np = netdev_priv(dev);
1772*4882a593Smuzhiyun void __iomem *ioaddr = np->base;
1773*4882a593Smuzhiyun u32 rx_mode = MinVLANPrio;
1774*4882a593Smuzhiyun struct netdev_hw_addr *ha;
1775*4882a593Smuzhiyun int i;
1776*4882a593Smuzhiyun
1777*4882a593Smuzhiyun #ifdef VLAN_SUPPORT
1778*4882a593Smuzhiyun rx_mode |= set_vlan_mode(np);
1779*4882a593Smuzhiyun #endif /* VLAN_SUPPORT */
1780*4882a593Smuzhiyun
1781*4882a593Smuzhiyun if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1782*4882a593Smuzhiyun rx_mode |= AcceptAll;
1783*4882a593Smuzhiyun } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1784*4882a593Smuzhiyun (dev->flags & IFF_ALLMULTI)) {
1785*4882a593Smuzhiyun /* Too many to match, or accept all multicasts. */
1786*4882a593Smuzhiyun rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
1787*4882a593Smuzhiyun } else if (netdev_mc_count(dev) <= 14) {
1788*4882a593Smuzhiyun /* Use the 16 element perfect filter, skip first two entries. */
1789*4882a593Smuzhiyun void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1790*4882a593Smuzhiyun __be16 *eaddrs;
1791*4882a593Smuzhiyun netdev_for_each_mc_addr(ha, dev) {
1792*4882a593Smuzhiyun eaddrs = (__be16 *) ha->addr;
1793*4882a593Smuzhiyun writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
1794*4882a593Smuzhiyun writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1795*4882a593Smuzhiyun writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
1796*4882a593Smuzhiyun }
1797*4882a593Smuzhiyun eaddrs = (__be16 *)dev->dev_addr;
1798*4882a593Smuzhiyun i = netdev_mc_count(dev) + 2;
1799*4882a593Smuzhiyun while (i++ < 16) {
1800*4882a593Smuzhiyun writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1801*4882a593Smuzhiyun writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1802*4882a593Smuzhiyun writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1803*4882a593Smuzhiyun }
1804*4882a593Smuzhiyun rx_mode |= AcceptBroadcast|PerfectFilter;
1805*4882a593Smuzhiyun } else {
1806*4882a593Smuzhiyun /* Must use a multicast hash table. */
1807*4882a593Smuzhiyun void __iomem *filter_addr;
1808*4882a593Smuzhiyun __be16 *eaddrs;
1809*4882a593Smuzhiyun __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
1810*4882a593Smuzhiyun
1811*4882a593Smuzhiyun memset(mc_filter, 0, sizeof(mc_filter));
1812*4882a593Smuzhiyun netdev_for_each_mc_addr(ha, dev) {
1813*4882a593Smuzhiyun /* The chip uses the upper 9 CRC bits
1814*4882a593Smuzhiyun as index into the hash table */
1815*4882a593Smuzhiyun int bit_nr = ether_crc_le(ETH_ALEN, ha->addr) >> 23;
1816*4882a593Smuzhiyun __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1];
1817*4882a593Smuzhiyun
1818*4882a593Smuzhiyun *fptr |= cpu_to_le32(1 << (bit_nr & 31));
1819*4882a593Smuzhiyun }
1820*4882a593Smuzhiyun /* Clear the perfect filter list, skip first two entries. */
1821*4882a593Smuzhiyun filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1822*4882a593Smuzhiyun eaddrs = (__be16 *)dev->dev_addr;
1823*4882a593Smuzhiyun for (i = 2; i < 16; i++) {
1824*4882a593Smuzhiyun writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1825*4882a593Smuzhiyun writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1826*4882a593Smuzhiyun writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1827*4882a593Smuzhiyun }
1828*4882a593Smuzhiyun for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++)
1829*4882a593Smuzhiyun writew(mc_filter[i], filter_addr);
1830*4882a593Smuzhiyun rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter;
1831*4882a593Smuzhiyun }
1832*4882a593Smuzhiyun writel(rx_mode, ioaddr + RxFilterMode);
1833*4882a593Smuzhiyun }
1834*4882a593Smuzhiyun
check_if_running(struct net_device * dev)1835*4882a593Smuzhiyun static int check_if_running(struct net_device *dev)
1836*4882a593Smuzhiyun {
1837*4882a593Smuzhiyun if (!netif_running(dev))
1838*4882a593Smuzhiyun return -EINVAL;
1839*4882a593Smuzhiyun return 0;
1840*4882a593Smuzhiyun }
1841*4882a593Smuzhiyun
get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1842*4882a593Smuzhiyun static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1843*4882a593Smuzhiyun {
1844*4882a593Smuzhiyun struct netdev_private *np = netdev_priv(dev);
1845*4882a593Smuzhiyun strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1846*4882a593Smuzhiyun strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1847*4882a593Smuzhiyun }
1848*4882a593Smuzhiyun
get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)1849*4882a593Smuzhiyun static int get_link_ksettings(struct net_device *dev,
1850*4882a593Smuzhiyun struct ethtool_link_ksettings *cmd)
1851*4882a593Smuzhiyun {
1852*4882a593Smuzhiyun struct netdev_private *np = netdev_priv(dev);
1853*4882a593Smuzhiyun spin_lock_irq(&np->lock);
1854*4882a593Smuzhiyun mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1855*4882a593Smuzhiyun spin_unlock_irq(&np->lock);
1856*4882a593Smuzhiyun return 0;
1857*4882a593Smuzhiyun }
1858*4882a593Smuzhiyun
set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)1859*4882a593Smuzhiyun static int set_link_ksettings(struct net_device *dev,
1860*4882a593Smuzhiyun const struct ethtool_link_ksettings *cmd)
1861*4882a593Smuzhiyun {
1862*4882a593Smuzhiyun struct netdev_private *np = netdev_priv(dev);
1863*4882a593Smuzhiyun int res;
1864*4882a593Smuzhiyun spin_lock_irq(&np->lock);
1865*4882a593Smuzhiyun res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1866*4882a593Smuzhiyun spin_unlock_irq(&np->lock);
1867*4882a593Smuzhiyun check_duplex(dev);
1868*4882a593Smuzhiyun return res;
1869*4882a593Smuzhiyun }
1870*4882a593Smuzhiyun
nway_reset(struct net_device * dev)1871*4882a593Smuzhiyun static int nway_reset(struct net_device *dev)
1872*4882a593Smuzhiyun {
1873*4882a593Smuzhiyun struct netdev_private *np = netdev_priv(dev);
1874*4882a593Smuzhiyun return mii_nway_restart(&np->mii_if);
1875*4882a593Smuzhiyun }
1876*4882a593Smuzhiyun
get_link(struct net_device * dev)1877*4882a593Smuzhiyun static u32 get_link(struct net_device *dev)
1878*4882a593Smuzhiyun {
1879*4882a593Smuzhiyun struct netdev_private *np = netdev_priv(dev);
1880*4882a593Smuzhiyun return mii_link_ok(&np->mii_if);
1881*4882a593Smuzhiyun }
1882*4882a593Smuzhiyun
get_msglevel(struct net_device * dev)1883*4882a593Smuzhiyun static u32 get_msglevel(struct net_device *dev)
1884*4882a593Smuzhiyun {
1885*4882a593Smuzhiyun return debug;
1886*4882a593Smuzhiyun }
1887*4882a593Smuzhiyun
set_msglevel(struct net_device * dev,u32 val)1888*4882a593Smuzhiyun static void set_msglevel(struct net_device *dev, u32 val)
1889*4882a593Smuzhiyun {
1890*4882a593Smuzhiyun debug = val;
1891*4882a593Smuzhiyun }
1892*4882a593Smuzhiyun
1893*4882a593Smuzhiyun static const struct ethtool_ops ethtool_ops = {
1894*4882a593Smuzhiyun .begin = check_if_running,
1895*4882a593Smuzhiyun .get_drvinfo = get_drvinfo,
1896*4882a593Smuzhiyun .nway_reset = nway_reset,
1897*4882a593Smuzhiyun .get_link = get_link,
1898*4882a593Smuzhiyun .get_msglevel = get_msglevel,
1899*4882a593Smuzhiyun .set_msglevel = set_msglevel,
1900*4882a593Smuzhiyun .get_link_ksettings = get_link_ksettings,
1901*4882a593Smuzhiyun .set_link_ksettings = set_link_ksettings,
1902*4882a593Smuzhiyun };
1903*4882a593Smuzhiyun
netdev_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1904*4882a593Smuzhiyun static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1905*4882a593Smuzhiyun {
1906*4882a593Smuzhiyun struct netdev_private *np = netdev_priv(dev);
1907*4882a593Smuzhiyun struct mii_ioctl_data *data = if_mii(rq);
1908*4882a593Smuzhiyun int rc;
1909*4882a593Smuzhiyun
1910*4882a593Smuzhiyun if (!netif_running(dev))
1911*4882a593Smuzhiyun return -EINVAL;
1912*4882a593Smuzhiyun
1913*4882a593Smuzhiyun spin_lock_irq(&np->lock);
1914*4882a593Smuzhiyun rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
1915*4882a593Smuzhiyun spin_unlock_irq(&np->lock);
1916*4882a593Smuzhiyun
1917*4882a593Smuzhiyun if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0]))
1918*4882a593Smuzhiyun check_duplex(dev);
1919*4882a593Smuzhiyun
1920*4882a593Smuzhiyun return rc;
1921*4882a593Smuzhiyun }
1922*4882a593Smuzhiyun
netdev_close(struct net_device * dev)1923*4882a593Smuzhiyun static int netdev_close(struct net_device *dev)
1924*4882a593Smuzhiyun {
1925*4882a593Smuzhiyun struct netdev_private *np = netdev_priv(dev);
1926*4882a593Smuzhiyun void __iomem *ioaddr = np->base;
1927*4882a593Smuzhiyun int i;
1928*4882a593Smuzhiyun
1929*4882a593Smuzhiyun netif_stop_queue(dev);
1930*4882a593Smuzhiyun
1931*4882a593Smuzhiyun napi_disable(&np->napi);
1932*4882a593Smuzhiyun
1933*4882a593Smuzhiyun if (debug > 1) {
1934*4882a593Smuzhiyun printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n",
1935*4882a593Smuzhiyun dev->name, (int) readl(ioaddr + IntrStatus));
1936*4882a593Smuzhiyun printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1937*4882a593Smuzhiyun dev->name, np->cur_tx, np->dirty_tx,
1938*4882a593Smuzhiyun np->cur_rx, np->dirty_rx);
1939*4882a593Smuzhiyun }
1940*4882a593Smuzhiyun
1941*4882a593Smuzhiyun /* Disable interrupts by clearing the interrupt mask. */
1942*4882a593Smuzhiyun writel(0, ioaddr + IntrEnable);
1943*4882a593Smuzhiyun
1944*4882a593Smuzhiyun /* Stop the chip's Tx and Rx processes. */
1945*4882a593Smuzhiyun writel(0, ioaddr + GenCtrl);
1946*4882a593Smuzhiyun readl(ioaddr + GenCtrl);
1947*4882a593Smuzhiyun
1948*4882a593Smuzhiyun if (debug > 5) {
1949*4882a593Smuzhiyun printk(KERN_DEBUG" Tx ring at %#llx:\n",
1950*4882a593Smuzhiyun (long long) np->tx_ring_dma);
1951*4882a593Smuzhiyun for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
1952*4882a593Smuzhiyun printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n",
1953*4882a593Smuzhiyun i, le32_to_cpu(np->tx_ring[i].status),
1954*4882a593Smuzhiyun (long long) dma_to_cpu(np->tx_ring[i].addr),
1955*4882a593Smuzhiyun le32_to_cpu(np->tx_done_q[i].status));
1956*4882a593Smuzhiyun printk(KERN_DEBUG " Rx ring at %#llx -> %p:\n",
1957*4882a593Smuzhiyun (long long) np->rx_ring_dma, np->rx_done_q);
1958*4882a593Smuzhiyun if (np->rx_done_q)
1959*4882a593Smuzhiyun for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
1960*4882a593Smuzhiyun printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n",
1961*4882a593Smuzhiyun i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
1962*4882a593Smuzhiyun }
1963*4882a593Smuzhiyun }
1964*4882a593Smuzhiyun
1965*4882a593Smuzhiyun free_irq(np->pci_dev->irq, dev);
1966*4882a593Smuzhiyun
1967*4882a593Smuzhiyun /* Free all the skbuffs in the Rx queue. */
1968*4882a593Smuzhiyun for (i = 0; i < RX_RING_SIZE; i++) {
1969*4882a593Smuzhiyun np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
1970*4882a593Smuzhiyun if (np->rx_info[i].skb != NULL) {
1971*4882a593Smuzhiyun dma_unmap_single(&np->pci_dev->dev,
1972*4882a593Smuzhiyun np->rx_info[i].mapping,
1973*4882a593Smuzhiyun np->rx_buf_sz, DMA_FROM_DEVICE);
1974*4882a593Smuzhiyun dev_kfree_skb(np->rx_info[i].skb);
1975*4882a593Smuzhiyun }
1976*4882a593Smuzhiyun np->rx_info[i].skb = NULL;
1977*4882a593Smuzhiyun np->rx_info[i].mapping = 0;
1978*4882a593Smuzhiyun }
1979*4882a593Smuzhiyun for (i = 0; i < TX_RING_SIZE; i++) {
1980*4882a593Smuzhiyun struct sk_buff *skb = np->tx_info[i].skb;
1981*4882a593Smuzhiyun if (skb == NULL)
1982*4882a593Smuzhiyun continue;
1983*4882a593Smuzhiyun dma_unmap_single(&np->pci_dev->dev, np->tx_info[i].mapping,
1984*4882a593Smuzhiyun skb_first_frag_len(skb), DMA_TO_DEVICE);
1985*4882a593Smuzhiyun np->tx_info[i].mapping = 0;
1986*4882a593Smuzhiyun dev_kfree_skb(skb);
1987*4882a593Smuzhiyun np->tx_info[i].skb = NULL;
1988*4882a593Smuzhiyun }
1989*4882a593Smuzhiyun
1990*4882a593Smuzhiyun return 0;
1991*4882a593Smuzhiyun }
1992*4882a593Smuzhiyun
starfire_suspend(struct device * dev_d)1993*4882a593Smuzhiyun static int __maybe_unused starfire_suspend(struct device *dev_d)
1994*4882a593Smuzhiyun {
1995*4882a593Smuzhiyun struct net_device *dev = dev_get_drvdata(dev_d);
1996*4882a593Smuzhiyun
1997*4882a593Smuzhiyun if (netif_running(dev)) {
1998*4882a593Smuzhiyun netif_device_detach(dev);
1999*4882a593Smuzhiyun netdev_close(dev);
2000*4882a593Smuzhiyun }
2001*4882a593Smuzhiyun
2002*4882a593Smuzhiyun return 0;
2003*4882a593Smuzhiyun }
2004*4882a593Smuzhiyun
starfire_resume(struct device * dev_d)2005*4882a593Smuzhiyun static int __maybe_unused starfire_resume(struct device *dev_d)
2006*4882a593Smuzhiyun {
2007*4882a593Smuzhiyun struct net_device *dev = dev_get_drvdata(dev_d);
2008*4882a593Smuzhiyun
2009*4882a593Smuzhiyun if (netif_running(dev)) {
2010*4882a593Smuzhiyun netdev_open(dev);
2011*4882a593Smuzhiyun netif_device_attach(dev);
2012*4882a593Smuzhiyun }
2013*4882a593Smuzhiyun
2014*4882a593Smuzhiyun return 0;
2015*4882a593Smuzhiyun }
2016*4882a593Smuzhiyun
starfire_remove_one(struct pci_dev * pdev)2017*4882a593Smuzhiyun static void starfire_remove_one(struct pci_dev *pdev)
2018*4882a593Smuzhiyun {
2019*4882a593Smuzhiyun struct net_device *dev = pci_get_drvdata(pdev);
2020*4882a593Smuzhiyun struct netdev_private *np = netdev_priv(dev);
2021*4882a593Smuzhiyun
2022*4882a593Smuzhiyun BUG_ON(!dev);
2023*4882a593Smuzhiyun
2024*4882a593Smuzhiyun unregister_netdev(dev);
2025*4882a593Smuzhiyun
2026*4882a593Smuzhiyun if (np->queue_mem)
2027*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, np->queue_mem_size,
2028*4882a593Smuzhiyun np->queue_mem, np->queue_mem_dma);
2029*4882a593Smuzhiyun
2030*4882a593Smuzhiyun
2031*4882a593Smuzhiyun /* XXX: add wakeup code -- requires firmware for MagicPacket */
2032*4882a593Smuzhiyun pci_set_power_state(pdev, PCI_D3hot); /* go to sleep in D3 mode */
2033*4882a593Smuzhiyun pci_disable_device(pdev);
2034*4882a593Smuzhiyun
2035*4882a593Smuzhiyun iounmap(np->base);
2036*4882a593Smuzhiyun pci_release_regions(pdev);
2037*4882a593Smuzhiyun
2038*4882a593Smuzhiyun free_netdev(dev); /* Will also free np!! */
2039*4882a593Smuzhiyun }
2040*4882a593Smuzhiyun
2041*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(starfire_pm_ops, starfire_suspend, starfire_resume);
2042*4882a593Smuzhiyun
2043*4882a593Smuzhiyun static struct pci_driver starfire_driver = {
2044*4882a593Smuzhiyun .name = DRV_NAME,
2045*4882a593Smuzhiyun .probe = starfire_init_one,
2046*4882a593Smuzhiyun .remove = starfire_remove_one,
2047*4882a593Smuzhiyun .driver.pm = &starfire_pm_ops,
2048*4882a593Smuzhiyun .id_table = starfire_pci_tbl,
2049*4882a593Smuzhiyun };
2050*4882a593Smuzhiyun
2051*4882a593Smuzhiyun
starfire_init(void)2052*4882a593Smuzhiyun static int __init starfire_init (void)
2053*4882a593Smuzhiyun {
2054*4882a593Smuzhiyun /* when a module, this is printed whether or not devices are found in probe */
2055*4882a593Smuzhiyun #ifdef MODULE
2056*4882a593Smuzhiyun printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
2057*4882a593Smuzhiyun #endif
2058*4882a593Smuzhiyun
2059*4882a593Smuzhiyun BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(netdrv_addr_t));
2060*4882a593Smuzhiyun
2061*4882a593Smuzhiyun return pci_register_driver(&starfire_driver);
2062*4882a593Smuzhiyun }
2063*4882a593Smuzhiyun
2064*4882a593Smuzhiyun
starfire_cleanup(void)2065*4882a593Smuzhiyun static void __exit starfire_cleanup (void)
2066*4882a593Smuzhiyun {
2067*4882a593Smuzhiyun pci_unregister_driver (&starfire_driver);
2068*4882a593Smuzhiyun }
2069*4882a593Smuzhiyun
2070*4882a593Smuzhiyun
2071*4882a593Smuzhiyun module_init(starfire_init);
2072*4882a593Smuzhiyun module_exit(starfire_cleanup);
2073*4882a593Smuzhiyun
2074*4882a593Smuzhiyun
2075*4882a593Smuzhiyun /*
2076*4882a593Smuzhiyun * Local variables:
2077*4882a593Smuzhiyun * c-basic-offset: 8
2078*4882a593Smuzhiyun * tab-width: 8
2079*4882a593Smuzhiyun * End:
2080*4882a593Smuzhiyun */
2081