xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/alteon/acenic.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ACENIC_H_
3*4882a593Smuzhiyun #define _ACENIC_H_
4*4882a593Smuzhiyun #include <linux/interrupt.h>
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun /*
8*4882a593Smuzhiyun  * Generate TX index update each time, when TX ring is closed.
9*4882a593Smuzhiyun  * Normally, this is not useful, because results in more dma (and irqs
10*4882a593Smuzhiyun  * without TX_COAL_INTS_ONLY).
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun #define USE_TX_COAL_NOW	 0
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun /*
15*4882a593Smuzhiyun  * Addressing:
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  * The Tigon uses 64-bit host addresses, regardless of their actual
18*4882a593Smuzhiyun  * length, and it expects a big-endian format. For 32 bit systems the
19*4882a593Smuzhiyun  * upper 32 bits of the address are simply ignored (zero), however for
20*4882a593Smuzhiyun  * little endian 64 bit systems (Alpha) this looks strange with the
21*4882a593Smuzhiyun  * two parts of the address word being swapped.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * The addresses are split in two 32 bit words for all architectures
24*4882a593Smuzhiyun  * as some of them are in PCI shared memory and it is necessary to use
25*4882a593Smuzhiyun  * readl/writel to access them.
26*4882a593Smuzhiyun  *
27*4882a593Smuzhiyun  * The addressing code is derived from Pete Wyckoff's work, but
28*4882a593Smuzhiyun  * modified to deal properly with readl/writel usage.
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun struct ace_regs {
32*4882a593Smuzhiyun 	u32	pad0[16];	/* PCI control registers */
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	u32	HostCtrl;	/* 0x40 */
35*4882a593Smuzhiyun 	u32	LocalCtrl;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	u32	pad1[2];
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	u32	MiscCfg;	/* 0x50 */
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	u32	pad2[2];
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	u32	PciState;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	u32	pad3[2];	/* 0x60 */
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	u32	WinBase;
48*4882a593Smuzhiyun 	u32	WinData;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	u32	pad4[12];	/* 0x70 */
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	u32	DmaWriteState;	/* 0xa0 */
53*4882a593Smuzhiyun 	u32	pad5[3];
54*4882a593Smuzhiyun 	u32	DmaReadState;	/* 0xb0 */
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	u32	pad6[26];
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	u32	AssistState;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	u32	pad7[8];	/* 0x120 */
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	u32	CpuCtrl;	/* 0x140 */
63*4882a593Smuzhiyun 	u32	Pc;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	u32	pad8[3];
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	u32	SramAddr;	/* 0x154 */
68*4882a593Smuzhiyun 	u32	SramData;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	u32	pad9[49];
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	u32	MacRxState;	/* 0x220 */
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	u32	pad10[7];
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	u32	CpuBCtrl;	/* 0x240 */
77*4882a593Smuzhiyun 	u32	PcB;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	u32	pad11[3];
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	u32	SramBAddr;	/* 0x254 */
82*4882a593Smuzhiyun 	u32	SramBData;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	u32	pad12[105];
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	u32	pad13[32];	/* 0x400 */
87*4882a593Smuzhiyun 	u32	Stats[32];
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	u32	Mb0Hi;		/* 0x500 */
90*4882a593Smuzhiyun 	u32	Mb0Lo;
91*4882a593Smuzhiyun 	u32	Mb1Hi;
92*4882a593Smuzhiyun 	u32	CmdPrd;
93*4882a593Smuzhiyun 	u32	Mb2Hi;
94*4882a593Smuzhiyun 	u32	TxPrd;
95*4882a593Smuzhiyun 	u32	Mb3Hi;
96*4882a593Smuzhiyun 	u32	RxStdPrd;
97*4882a593Smuzhiyun 	u32	Mb4Hi;
98*4882a593Smuzhiyun 	u32	RxJumboPrd;
99*4882a593Smuzhiyun 	u32	Mb5Hi;
100*4882a593Smuzhiyun 	u32	RxMiniPrd;
101*4882a593Smuzhiyun 	u32	Mb6Hi;
102*4882a593Smuzhiyun 	u32	Mb6Lo;
103*4882a593Smuzhiyun 	u32	Mb7Hi;
104*4882a593Smuzhiyun 	u32	Mb7Lo;
105*4882a593Smuzhiyun 	u32	Mb8Hi;
106*4882a593Smuzhiyun 	u32	Mb8Lo;
107*4882a593Smuzhiyun 	u32	Mb9Hi;
108*4882a593Smuzhiyun 	u32	Mb9Lo;
109*4882a593Smuzhiyun 	u32	MbAHi;
110*4882a593Smuzhiyun 	u32	MbALo;
111*4882a593Smuzhiyun 	u32	MbBHi;
112*4882a593Smuzhiyun 	u32	MbBLo;
113*4882a593Smuzhiyun 	u32	MbCHi;
114*4882a593Smuzhiyun 	u32	MbCLo;
115*4882a593Smuzhiyun 	u32	MbDHi;
116*4882a593Smuzhiyun 	u32	MbDLo;
117*4882a593Smuzhiyun 	u32	MbEHi;
118*4882a593Smuzhiyun 	u32	MbELo;
119*4882a593Smuzhiyun 	u32	MbFHi;
120*4882a593Smuzhiyun 	u32	MbFLo;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	u32	pad14[32];
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	u32	MacAddrHi;	/* 0x600 */
125*4882a593Smuzhiyun 	u32	MacAddrLo;
126*4882a593Smuzhiyun 	u32	InfoPtrHi;
127*4882a593Smuzhiyun 	u32	InfoPtrLo;
128*4882a593Smuzhiyun 	u32	MultiCastHi;	/* 0x610 */
129*4882a593Smuzhiyun 	u32	MultiCastLo;
130*4882a593Smuzhiyun 	u32	ModeStat;
131*4882a593Smuzhiyun 	u32	DmaReadCfg;
132*4882a593Smuzhiyun 	u32	DmaWriteCfg;	/* 0x620 */
133*4882a593Smuzhiyun 	u32	TxBufRat;
134*4882a593Smuzhiyun 	u32	EvtCsm;
135*4882a593Smuzhiyun 	u32	CmdCsm;
136*4882a593Smuzhiyun 	u32	TuneRxCoalTicks;/* 0x630 */
137*4882a593Smuzhiyun 	u32	TuneTxCoalTicks;
138*4882a593Smuzhiyun 	u32	TuneStatTicks;
139*4882a593Smuzhiyun 	u32	TuneMaxTxDesc;
140*4882a593Smuzhiyun 	u32	TuneMaxRxDesc;	/* 0x640 */
141*4882a593Smuzhiyun 	u32	TuneTrace;
142*4882a593Smuzhiyun 	u32	TuneLink;
143*4882a593Smuzhiyun 	u32	TuneFastLink;
144*4882a593Smuzhiyun 	u32	TracePtr;	/* 0x650 */
145*4882a593Smuzhiyun 	u32	TraceStrt;
146*4882a593Smuzhiyun 	u32	TraceLen;
147*4882a593Smuzhiyun 	u32	IfIdx;
148*4882a593Smuzhiyun 	u32	IfMtu;		/* 0x660 */
149*4882a593Smuzhiyun 	u32	MaskInt;
150*4882a593Smuzhiyun 	u32	GigLnkState;
151*4882a593Smuzhiyun 	u32	FastLnkState;
152*4882a593Smuzhiyun 	u32	pad16[4];	/* 0x670 */
153*4882a593Smuzhiyun 	u32	RxRetCsm;	/* 0x680 */
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	u32	pad17[31];
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	u32	CmdRng[64];	/* 0x700 */
158*4882a593Smuzhiyun 	u32	Window[0x200];
159*4882a593Smuzhiyun };
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun typedef struct {
163*4882a593Smuzhiyun 	u32 addrhi;
164*4882a593Smuzhiyun 	u32 addrlo;
165*4882a593Smuzhiyun } aceaddr;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun #define ACE_WINDOW_SIZE	0x800
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun #define ACE_JUMBO_MTU 9000
171*4882a593Smuzhiyun #define ACE_STD_MTU 1500
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun #define ACE_TRACE_SIZE 0x8000
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun /*
176*4882a593Smuzhiyun  * Host control register bits.
177*4882a593Smuzhiyun  */
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun #define IN_INT		0x01
180*4882a593Smuzhiyun #define CLR_INT		0x02
181*4882a593Smuzhiyun #define HW_RESET	0x08
182*4882a593Smuzhiyun #define BYTE_SWAP	0x10
183*4882a593Smuzhiyun #define WORD_SWAP	0x20
184*4882a593Smuzhiyun #define MASK_INTS	0x40
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun /*
187*4882a593Smuzhiyun  * Local control register bits.
188*4882a593Smuzhiyun  */
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun #define EEPROM_DATA_IN		0x800000
191*4882a593Smuzhiyun #define EEPROM_DATA_OUT		0x400000
192*4882a593Smuzhiyun #define EEPROM_WRITE_ENABLE	0x200000
193*4882a593Smuzhiyun #define EEPROM_CLK_OUT		0x100000
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun #define EEPROM_BASE		0xa0000000
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun #define EEPROM_WRITE_SELECT	0xa0
198*4882a593Smuzhiyun #define EEPROM_READ_SELECT	0xa1
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun #define SRAM_BANK_512K		0x200
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun /*
204*4882a593Smuzhiyun  * udelay() values for when clocking the eeprom
205*4882a593Smuzhiyun  */
206*4882a593Smuzhiyun #define ACE_SHORT_DELAY		2
207*4882a593Smuzhiyun #define ACE_LONG_DELAY		4
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun /*
211*4882a593Smuzhiyun  * Misc Config bits
212*4882a593Smuzhiyun  */
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun #define SYNC_SRAM_TIMING	0x100000
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun /*
218*4882a593Smuzhiyun  * CPU state bits.
219*4882a593Smuzhiyun  */
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun #define CPU_RESET		0x01
222*4882a593Smuzhiyun #define CPU_TRACE		0x02
223*4882a593Smuzhiyun #define CPU_PROM_FAILED		0x10
224*4882a593Smuzhiyun #define CPU_HALT		0x00010000
225*4882a593Smuzhiyun #define CPU_HALTED		0xffff0000
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun /*
229*4882a593Smuzhiyun  * PCI State bits.
230*4882a593Smuzhiyun  */
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun #define DMA_READ_MAX_4		0x04
233*4882a593Smuzhiyun #define DMA_READ_MAX_16		0x08
234*4882a593Smuzhiyun #define DMA_READ_MAX_32		0x0c
235*4882a593Smuzhiyun #define DMA_READ_MAX_64		0x10
236*4882a593Smuzhiyun #define DMA_READ_MAX_128	0x14
237*4882a593Smuzhiyun #define DMA_READ_MAX_256	0x18
238*4882a593Smuzhiyun #define DMA_READ_MAX_1K		0x1c
239*4882a593Smuzhiyun #define DMA_WRITE_MAX_4		0x20
240*4882a593Smuzhiyun #define DMA_WRITE_MAX_16	0x40
241*4882a593Smuzhiyun #define DMA_WRITE_MAX_32	0x60
242*4882a593Smuzhiyun #define DMA_WRITE_MAX_64	0x80
243*4882a593Smuzhiyun #define DMA_WRITE_MAX_128	0xa0
244*4882a593Smuzhiyun #define DMA_WRITE_MAX_256	0xc0
245*4882a593Smuzhiyun #define DMA_WRITE_MAX_1K	0xe0
246*4882a593Smuzhiyun #define DMA_READ_WRITE_MASK	0xfc
247*4882a593Smuzhiyun #define MEM_READ_MULTIPLE	0x00020000
248*4882a593Smuzhiyun #define PCI_66MHZ		0x00080000
249*4882a593Smuzhiyun #define PCI_32BIT		0x00100000
250*4882a593Smuzhiyun #define DMA_WRITE_ALL_ALIGN	0x00800000
251*4882a593Smuzhiyun #define READ_CMD_MEM		0x06000000
252*4882a593Smuzhiyun #define WRITE_CMD_MEM		0x70000000
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun /*
256*4882a593Smuzhiyun  * Mode status
257*4882a593Smuzhiyun  */
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun #define ACE_BYTE_SWAP_BD	0x02
260*4882a593Smuzhiyun #define ACE_WORD_SWAP_BD	0x04		/* not actually used */
261*4882a593Smuzhiyun #define ACE_WARN		0x08
262*4882a593Smuzhiyun #define ACE_BYTE_SWAP_DMA	0x10
263*4882a593Smuzhiyun #define ACE_NO_JUMBO_FRAG	0x200
264*4882a593Smuzhiyun #define ACE_FATAL		0x40000000
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun /*
268*4882a593Smuzhiyun  * DMA config
269*4882a593Smuzhiyun  */
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun #define DMA_THRESH_1W		0x10
272*4882a593Smuzhiyun #define DMA_THRESH_2W		0x20
273*4882a593Smuzhiyun #define DMA_THRESH_4W		0x40
274*4882a593Smuzhiyun #define DMA_THRESH_8W		0x80
275*4882a593Smuzhiyun #define DMA_THRESH_16W		0x100
276*4882a593Smuzhiyun #define DMA_THRESH_32W		0x0	/* not described in doc, but exists. */
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun /*
280*4882a593Smuzhiyun  * Tuning parameters
281*4882a593Smuzhiyun  */
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun #define TICKS_PER_SEC		1000000
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun /*
287*4882a593Smuzhiyun  * Link bits
288*4882a593Smuzhiyun  */
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun #define LNK_PREF		0x00008000
291*4882a593Smuzhiyun #define LNK_10MB		0x00010000
292*4882a593Smuzhiyun #define LNK_100MB		0x00020000
293*4882a593Smuzhiyun #define LNK_1000MB		0x00040000
294*4882a593Smuzhiyun #define LNK_FULL_DUPLEX		0x00080000
295*4882a593Smuzhiyun #define LNK_HALF_DUPLEX		0x00100000
296*4882a593Smuzhiyun #define LNK_TX_FLOW_CTL_Y	0x00200000
297*4882a593Smuzhiyun #define LNK_NEG_ADVANCED	0x00400000
298*4882a593Smuzhiyun #define LNK_RX_FLOW_CTL_Y	0x00800000
299*4882a593Smuzhiyun #define LNK_NIC			0x01000000
300*4882a593Smuzhiyun #define LNK_JAM			0x02000000
301*4882a593Smuzhiyun #define LNK_JUMBO		0x04000000
302*4882a593Smuzhiyun #define LNK_ALTEON		0x08000000
303*4882a593Smuzhiyun #define LNK_NEG_FCTL		0x10000000
304*4882a593Smuzhiyun #define LNK_NEGOTIATE		0x20000000
305*4882a593Smuzhiyun #define LNK_ENABLE		0x40000000
306*4882a593Smuzhiyun #define LNK_UP			0x80000000
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun /*
310*4882a593Smuzhiyun  * Event definitions
311*4882a593Smuzhiyun  */
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun #define EVT_RING_ENTRIES	256
314*4882a593Smuzhiyun #define EVT_RING_SIZE	(EVT_RING_ENTRIES * sizeof(struct event))
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun struct event {
317*4882a593Smuzhiyun #ifdef __LITTLE_ENDIAN_BITFIELD
318*4882a593Smuzhiyun 	u32	idx:12;
319*4882a593Smuzhiyun 	u32	code:12;
320*4882a593Smuzhiyun 	u32	evt:8;
321*4882a593Smuzhiyun #else
322*4882a593Smuzhiyun 	u32	evt:8;
323*4882a593Smuzhiyun 	u32	code:12;
324*4882a593Smuzhiyun 	u32	idx:12;
325*4882a593Smuzhiyun #endif
326*4882a593Smuzhiyun 	u32     pad;
327*4882a593Smuzhiyun };
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun /*
331*4882a593Smuzhiyun  * Events
332*4882a593Smuzhiyun  */
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun #define E_FW_RUNNING		0x01
335*4882a593Smuzhiyun #define E_STATS_UPDATED		0x04
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun #define E_STATS_UPDATE		0x04
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun #define E_LNK_STATE		0x06
340*4882a593Smuzhiyun #define E_C_LINK_UP		0x01
341*4882a593Smuzhiyun #define E_C_LINK_DOWN		0x02
342*4882a593Smuzhiyun #define E_C_LINK_10_100		0x03
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun #define E_ERROR			0x07
345*4882a593Smuzhiyun #define E_C_ERR_INVAL_CMD	0x01
346*4882a593Smuzhiyun #define E_C_ERR_UNIMP_CMD	0x02
347*4882a593Smuzhiyun #define E_C_ERR_BAD_CFG		0x03
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun #define E_MCAST_LIST		0x08
350*4882a593Smuzhiyun #define E_C_MCAST_ADDR_ADD	0x01
351*4882a593Smuzhiyun #define E_C_MCAST_ADDR_DEL	0x02
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun #define E_RESET_JUMBO_RNG	0x09
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun /*
357*4882a593Smuzhiyun  * Commands
358*4882a593Smuzhiyun  */
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun #define CMD_RING_ENTRIES	64
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun struct cmd {
363*4882a593Smuzhiyun #ifdef __LITTLE_ENDIAN_BITFIELD
364*4882a593Smuzhiyun 	u32	idx:12;
365*4882a593Smuzhiyun 	u32	code:12;
366*4882a593Smuzhiyun 	u32	evt:8;
367*4882a593Smuzhiyun #else
368*4882a593Smuzhiyun 	u32	evt:8;
369*4882a593Smuzhiyun 	u32	code:12;
370*4882a593Smuzhiyun 	u32	idx:12;
371*4882a593Smuzhiyun #endif
372*4882a593Smuzhiyun };
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun #define C_HOST_STATE		0x01
376*4882a593Smuzhiyun #define C_C_STACK_UP		0x01
377*4882a593Smuzhiyun #define C_C_STACK_DOWN		0x02
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun #define C_FDR_FILTERING		0x02
380*4882a593Smuzhiyun #define C_C_FDR_FILT_ENABLE	0x01
381*4882a593Smuzhiyun #define C_C_FDR_FILT_DISABLE	0x02
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun #define C_SET_RX_PRD_IDX	0x03
384*4882a593Smuzhiyun #define C_UPDATE_STATS		0x04
385*4882a593Smuzhiyun #define C_RESET_JUMBO_RNG	0x05
386*4882a593Smuzhiyun #define C_ADD_MULTICAST_ADDR	0x08
387*4882a593Smuzhiyun #define C_DEL_MULTICAST_ADDR	0x09
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun #define C_SET_PROMISC_MODE	0x0a
390*4882a593Smuzhiyun #define C_C_PROMISC_ENABLE	0x01
391*4882a593Smuzhiyun #define C_C_PROMISC_DISABLE	0x02
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun #define C_LNK_NEGOTIATION	0x0b
394*4882a593Smuzhiyun #define C_C_NEGOTIATE_BOTH	0x00
395*4882a593Smuzhiyun #define C_C_NEGOTIATE_GIG	0x01
396*4882a593Smuzhiyun #define C_C_NEGOTIATE_10_100	0x02
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun #define C_SET_MAC_ADDR		0x0c
399*4882a593Smuzhiyun #define C_CLEAR_PROFILE		0x0d
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun #define C_SET_MULTICAST_MODE	0x0e
402*4882a593Smuzhiyun #define C_C_MCAST_ENABLE	0x01
403*4882a593Smuzhiyun #define C_C_MCAST_DISABLE	0x02
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun #define C_CLEAR_STATS		0x0f
406*4882a593Smuzhiyun #define C_SET_RX_JUMBO_PRD_IDX	0x10
407*4882a593Smuzhiyun #define C_REFRESH_STATS		0x11
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun /*
411*4882a593Smuzhiyun  * Descriptor flags
412*4882a593Smuzhiyun  */
413*4882a593Smuzhiyun #define BD_FLG_TCP_UDP_SUM	0x01
414*4882a593Smuzhiyun #define BD_FLG_IP_SUM		0x02
415*4882a593Smuzhiyun #define BD_FLG_END		0x04
416*4882a593Smuzhiyun #define BD_FLG_MORE		0x08
417*4882a593Smuzhiyun #define BD_FLG_JUMBO		0x10
418*4882a593Smuzhiyun #define BD_FLG_UCAST		0x20
419*4882a593Smuzhiyun #define BD_FLG_MCAST		0x40
420*4882a593Smuzhiyun #define BD_FLG_BCAST		0x60
421*4882a593Smuzhiyun #define BD_FLG_TYP_MASK		0x60
422*4882a593Smuzhiyun #define BD_FLG_IP_FRAG		0x80
423*4882a593Smuzhiyun #define BD_FLG_IP_FRAG_END	0x100
424*4882a593Smuzhiyun #define BD_FLG_VLAN_TAG		0x200
425*4882a593Smuzhiyun #define BD_FLG_FRAME_ERROR	0x400
426*4882a593Smuzhiyun #define BD_FLG_COAL_NOW		0x800
427*4882a593Smuzhiyun #define BD_FLG_MINI		0x1000
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun /*
431*4882a593Smuzhiyun  * Ring Control block flags
432*4882a593Smuzhiyun  */
433*4882a593Smuzhiyun #define RCB_FLG_TCP_UDP_SUM	0x01
434*4882a593Smuzhiyun #define RCB_FLG_IP_SUM		0x02
435*4882a593Smuzhiyun #define RCB_FLG_NO_PSEUDO_HDR	0x08
436*4882a593Smuzhiyun #define RCB_FLG_VLAN_ASSIST	0x10
437*4882a593Smuzhiyun #define RCB_FLG_COAL_INT_ONLY	0x20
438*4882a593Smuzhiyun #define RCB_FLG_TX_HOST_RING	0x40
439*4882a593Smuzhiyun #define RCB_FLG_IEEE_SNAP_SUM	0x80
440*4882a593Smuzhiyun #define RCB_FLG_EXT_RX_BD	0x100
441*4882a593Smuzhiyun #define RCB_FLG_RNG_DISABLE	0x200
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun /*
445*4882a593Smuzhiyun  * TX ring - maximum TX ring entries for Tigon I's is 128
446*4882a593Smuzhiyun  */
447*4882a593Smuzhiyun #define MAX_TX_RING_ENTRIES	256
448*4882a593Smuzhiyun #define TIGON_I_TX_RING_ENTRIES	128
449*4882a593Smuzhiyun #define TX_RING_SIZE		(MAX_TX_RING_ENTRIES * sizeof(struct tx_desc))
450*4882a593Smuzhiyun #define TX_RING_BASE		0x3800
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun struct tx_desc{
453*4882a593Smuzhiyun         aceaddr	addr;
454*4882a593Smuzhiyun 	u32	flagsize;
455*4882a593Smuzhiyun #if 0
456*4882a593Smuzhiyun /*
457*4882a593Smuzhiyun  * This is in PCI shared mem and must be accessed with readl/writel
458*4882a593Smuzhiyun  * real layout is:
459*4882a593Smuzhiyun  */
460*4882a593Smuzhiyun #if __LITTLE_ENDIAN
461*4882a593Smuzhiyun 	u16	flags;
462*4882a593Smuzhiyun 	u16	size;
463*4882a593Smuzhiyun 	u16	vlan;
464*4882a593Smuzhiyun 	u16	reserved;
465*4882a593Smuzhiyun #else
466*4882a593Smuzhiyun 	u16	size;
467*4882a593Smuzhiyun 	u16	flags;
468*4882a593Smuzhiyun 	u16	reserved;
469*4882a593Smuzhiyun 	u16	vlan;
470*4882a593Smuzhiyun #endif
471*4882a593Smuzhiyun #endif
472*4882a593Smuzhiyun 	u32	vlanres;
473*4882a593Smuzhiyun };
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun #define RX_STD_RING_ENTRIES	512
477*4882a593Smuzhiyun #define RX_STD_RING_SIZE	(RX_STD_RING_ENTRIES * sizeof(struct rx_desc))
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun #define RX_JUMBO_RING_ENTRIES	256
480*4882a593Smuzhiyun #define RX_JUMBO_RING_SIZE	(RX_JUMBO_RING_ENTRIES *sizeof(struct rx_desc))
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun #define RX_MINI_RING_ENTRIES	1024
483*4882a593Smuzhiyun #define RX_MINI_RING_SIZE	(RX_MINI_RING_ENTRIES *sizeof(struct rx_desc))
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun #define RX_RETURN_RING_ENTRIES	2048
486*4882a593Smuzhiyun #define RX_RETURN_RING_SIZE	(RX_MAX_RETURN_RING_ENTRIES * \
487*4882a593Smuzhiyun 				 sizeof(struct rx_desc))
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun struct rx_desc{
490*4882a593Smuzhiyun 	aceaddr	addr;
491*4882a593Smuzhiyun #ifdef __LITTLE_ENDIAN
492*4882a593Smuzhiyun 	u16	size;
493*4882a593Smuzhiyun 	u16	idx;
494*4882a593Smuzhiyun #else
495*4882a593Smuzhiyun 	u16	idx;
496*4882a593Smuzhiyun 	u16	size;
497*4882a593Smuzhiyun #endif
498*4882a593Smuzhiyun #ifdef __LITTLE_ENDIAN
499*4882a593Smuzhiyun 	u16	flags;
500*4882a593Smuzhiyun 	u16	type;
501*4882a593Smuzhiyun #else
502*4882a593Smuzhiyun 	u16	type;
503*4882a593Smuzhiyun 	u16	flags;
504*4882a593Smuzhiyun #endif
505*4882a593Smuzhiyun #ifdef __LITTLE_ENDIAN
506*4882a593Smuzhiyun 	u16	tcp_udp_csum;
507*4882a593Smuzhiyun 	u16	ip_csum;
508*4882a593Smuzhiyun #else
509*4882a593Smuzhiyun 	u16	ip_csum;
510*4882a593Smuzhiyun 	u16	tcp_udp_csum;
511*4882a593Smuzhiyun #endif
512*4882a593Smuzhiyun #ifdef __LITTLE_ENDIAN
513*4882a593Smuzhiyun 	u16	vlan;
514*4882a593Smuzhiyun 	u16	err_flags;
515*4882a593Smuzhiyun #else
516*4882a593Smuzhiyun 	u16	err_flags;
517*4882a593Smuzhiyun 	u16	vlan;
518*4882a593Smuzhiyun #endif
519*4882a593Smuzhiyun 	u32	reserved;
520*4882a593Smuzhiyun 	u32	opague;
521*4882a593Smuzhiyun };
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun /*
525*4882a593Smuzhiyun  * This struct is shared with the NIC firmware.
526*4882a593Smuzhiyun  */
527*4882a593Smuzhiyun struct ring_ctrl {
528*4882a593Smuzhiyun 	aceaddr	rngptr;
529*4882a593Smuzhiyun #ifdef __LITTLE_ENDIAN
530*4882a593Smuzhiyun 	u16	flags;
531*4882a593Smuzhiyun 	u16	max_len;
532*4882a593Smuzhiyun #else
533*4882a593Smuzhiyun 	u16	max_len;
534*4882a593Smuzhiyun 	u16	flags;
535*4882a593Smuzhiyun #endif
536*4882a593Smuzhiyun 	u32	pad;
537*4882a593Smuzhiyun };
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun struct ace_mac_stats {
541*4882a593Smuzhiyun 	u32 excess_colls;
542*4882a593Smuzhiyun 	u32 coll_1;
543*4882a593Smuzhiyun 	u32 coll_2;
544*4882a593Smuzhiyun 	u32 coll_3;
545*4882a593Smuzhiyun 	u32 coll_4;
546*4882a593Smuzhiyun 	u32 coll_5;
547*4882a593Smuzhiyun 	u32 coll_6;
548*4882a593Smuzhiyun 	u32 coll_7;
549*4882a593Smuzhiyun 	u32 coll_8;
550*4882a593Smuzhiyun 	u32 coll_9;
551*4882a593Smuzhiyun 	u32 coll_10;
552*4882a593Smuzhiyun 	u32 coll_11;
553*4882a593Smuzhiyun 	u32 coll_12;
554*4882a593Smuzhiyun 	u32 coll_13;
555*4882a593Smuzhiyun 	u32 coll_14;
556*4882a593Smuzhiyun 	u32 coll_15;
557*4882a593Smuzhiyun 	u32 late_coll;
558*4882a593Smuzhiyun 	u32 defers;
559*4882a593Smuzhiyun 	u32 crc_err;
560*4882a593Smuzhiyun 	u32 underrun;
561*4882a593Smuzhiyun 	u32 crs_err;
562*4882a593Smuzhiyun 	u32 pad[3];
563*4882a593Smuzhiyun 	u32 drop_ula;
564*4882a593Smuzhiyun 	u32 drop_mc;
565*4882a593Smuzhiyun 	u32 drop_fc;
566*4882a593Smuzhiyun 	u32 drop_space;
567*4882a593Smuzhiyun 	u32 coll;
568*4882a593Smuzhiyun 	u32 kept_bc;
569*4882a593Smuzhiyun 	u32 kept_mc;
570*4882a593Smuzhiyun 	u32 kept_uc;
571*4882a593Smuzhiyun };
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun struct ace_info {
575*4882a593Smuzhiyun 	union {
576*4882a593Smuzhiyun 		u32 stats[256];
577*4882a593Smuzhiyun 	} s;
578*4882a593Smuzhiyun 	struct ring_ctrl	evt_ctrl;
579*4882a593Smuzhiyun 	struct ring_ctrl	cmd_ctrl;
580*4882a593Smuzhiyun 	struct ring_ctrl	tx_ctrl;
581*4882a593Smuzhiyun 	struct ring_ctrl	rx_std_ctrl;
582*4882a593Smuzhiyun 	struct ring_ctrl	rx_jumbo_ctrl;
583*4882a593Smuzhiyun 	struct ring_ctrl	rx_mini_ctrl;
584*4882a593Smuzhiyun 	struct ring_ctrl	rx_return_ctrl;
585*4882a593Smuzhiyun 	aceaddr	evt_prd_ptr;
586*4882a593Smuzhiyun 	aceaddr	rx_ret_prd_ptr;
587*4882a593Smuzhiyun 	aceaddr	tx_csm_ptr;
588*4882a593Smuzhiyun 	aceaddr	stats2_ptr;
589*4882a593Smuzhiyun };
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun struct ring_info {
593*4882a593Smuzhiyun 	struct sk_buff		*skb;
594*4882a593Smuzhiyun 	DEFINE_DMA_UNMAP_ADDR(mapping);
595*4882a593Smuzhiyun };
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun /*
599*4882a593Smuzhiyun  * Funny... As soon as we add maplen on alpha, it starts to work
600*4882a593Smuzhiyun  * much slower. Hmm... is it because struct does not fit to one cacheline?
601*4882a593Smuzhiyun  * So, split tx_ring_info.
602*4882a593Smuzhiyun  */
603*4882a593Smuzhiyun struct tx_ring_info {
604*4882a593Smuzhiyun 	struct sk_buff		*skb;
605*4882a593Smuzhiyun 	DEFINE_DMA_UNMAP_ADDR(mapping);
606*4882a593Smuzhiyun 	DEFINE_DMA_UNMAP_LEN(maplen);
607*4882a593Smuzhiyun };
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun /*
611*4882a593Smuzhiyun  * struct ace_skb holding the rings of skb's. This is an awful lot of
612*4882a593Smuzhiyun  * pointers, but I don't see any other smart mode to do this in an
613*4882a593Smuzhiyun  * efficient manner ;-(
614*4882a593Smuzhiyun  */
615*4882a593Smuzhiyun struct ace_skb
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun 	struct tx_ring_info	tx_skbuff[MAX_TX_RING_ENTRIES];
618*4882a593Smuzhiyun 	struct ring_info	rx_std_skbuff[RX_STD_RING_ENTRIES];
619*4882a593Smuzhiyun 	struct ring_info	rx_mini_skbuff[RX_MINI_RING_ENTRIES];
620*4882a593Smuzhiyun 	struct ring_info	rx_jumbo_skbuff[RX_JUMBO_RING_ENTRIES];
621*4882a593Smuzhiyun };
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun /*
625*4882a593Smuzhiyun  * Struct private for the AceNIC.
626*4882a593Smuzhiyun  *
627*4882a593Smuzhiyun  * Elements are grouped so variables used by the tx handling goes
628*4882a593Smuzhiyun  * together, and will go into the same cache lines etc. in order to
629*4882a593Smuzhiyun  * avoid cache line contention between the rx and tx handling on SMP.
630*4882a593Smuzhiyun  *
631*4882a593Smuzhiyun  * Frequently accessed variables are put at the beginning of the
632*4882a593Smuzhiyun  * struct to help the compiler generate better/shorter code.
633*4882a593Smuzhiyun  */
634*4882a593Smuzhiyun struct ace_private
635*4882a593Smuzhiyun {
636*4882a593Smuzhiyun 	struct net_device	*ndev;		/* backpointer */
637*4882a593Smuzhiyun 	struct ace_info		*info;
638*4882a593Smuzhiyun 	struct ace_regs	__iomem	*regs;		/* register base */
639*4882a593Smuzhiyun 	struct ace_skb		*skb;
640*4882a593Smuzhiyun 	dma_addr_t		info_dma;	/* 32/64 bit */
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	int			version, link;
643*4882a593Smuzhiyun 	int			promisc, mcast_all;
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	/*
646*4882a593Smuzhiyun 	 * TX elements
647*4882a593Smuzhiyun 	 */
648*4882a593Smuzhiyun 	struct tx_desc		*tx_ring;
649*4882a593Smuzhiyun 	u32			tx_prd;
650*4882a593Smuzhiyun 	volatile u32		tx_ret_csm;
651*4882a593Smuzhiyun 	int			tx_ring_entries;
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	/*
654*4882a593Smuzhiyun 	 * RX elements
655*4882a593Smuzhiyun 	 */
656*4882a593Smuzhiyun 	unsigned long		std_refill_busy
657*4882a593Smuzhiyun 				__attribute__ ((aligned (SMP_CACHE_BYTES)));
658*4882a593Smuzhiyun 	unsigned long		mini_refill_busy, jumbo_refill_busy;
659*4882a593Smuzhiyun 	atomic_t		cur_rx_bufs;
660*4882a593Smuzhiyun 	atomic_t		cur_mini_bufs;
661*4882a593Smuzhiyun 	atomic_t		cur_jumbo_bufs;
662*4882a593Smuzhiyun 	u32			rx_std_skbprd, rx_mini_skbprd, rx_jumbo_skbprd;
663*4882a593Smuzhiyun 	u32			cur_rx;
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	struct rx_desc		*rx_std_ring;
666*4882a593Smuzhiyun 	struct rx_desc		*rx_jumbo_ring;
667*4882a593Smuzhiyun 	struct rx_desc		*rx_mini_ring;
668*4882a593Smuzhiyun 	struct rx_desc		*rx_return_ring;
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	int			tasklet_pending, jumbo;
671*4882a593Smuzhiyun 	struct tasklet_struct	ace_tasklet;
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	struct event		*evt_ring;
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	volatile u32		*evt_prd, *rx_ret_prd, *tx_csm;
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	dma_addr_t		tx_ring_dma;	/* 32/64 bit */
678*4882a593Smuzhiyun 	dma_addr_t		rx_ring_base_dma;
679*4882a593Smuzhiyun 	dma_addr_t		evt_ring_dma;
680*4882a593Smuzhiyun 	dma_addr_t		evt_prd_dma, rx_ret_prd_dma, tx_csm_dma;
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	unsigned char		*trace_buf;
683*4882a593Smuzhiyun 	struct pci_dev		*pdev;
684*4882a593Smuzhiyun 	struct net_device	*next;
685*4882a593Smuzhiyun 	volatile int		fw_running;
686*4882a593Smuzhiyun 	int			board_idx;
687*4882a593Smuzhiyun 	u16			pci_command;
688*4882a593Smuzhiyun 	u8			pci_latency;
689*4882a593Smuzhiyun 	const char		*name;
690*4882a593Smuzhiyun #ifdef INDEX_DEBUG
691*4882a593Smuzhiyun 	spinlock_t		debug_lock
692*4882a593Smuzhiyun 				__attribute__ ((aligned (SMP_CACHE_BYTES)));
693*4882a593Smuzhiyun 	u32			last_tx, last_std_rx, last_mini_rx;
694*4882a593Smuzhiyun #endif
695*4882a593Smuzhiyun 	int			pci_using_dac;
696*4882a593Smuzhiyun 	u8			firmware_major;
697*4882a593Smuzhiyun 	u8			firmware_minor;
698*4882a593Smuzhiyun 	u8			firmware_fix;
699*4882a593Smuzhiyun 	u32			firmware_start;
700*4882a593Smuzhiyun };
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun #define TX_RESERVED	MAX_SKB_FRAGS
704*4882a593Smuzhiyun 
tx_space(struct ace_private * ap,u32 csm,u32 prd)705*4882a593Smuzhiyun static inline int tx_space (struct ace_private *ap, u32 csm, u32 prd)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun 	return (csm - prd - 1) & (ACE_TX_RING_ENTRIES(ap) - 1);
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun #define tx_free(ap) 		tx_space((ap)->tx_ret_csm, (ap)->tx_prd, ap)
711*4882a593Smuzhiyun #define tx_ring_full(ap, csm, prd)	(tx_space(ap, csm, prd) <= TX_RESERVED)
712*4882a593Smuzhiyun 
set_aceaddr(aceaddr * aa,dma_addr_t addr)713*4882a593Smuzhiyun static inline void set_aceaddr(aceaddr *aa, dma_addr_t addr)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun 	u64 baddr = (u64) addr;
716*4882a593Smuzhiyun 	aa->addrlo = baddr & 0xffffffff;
717*4882a593Smuzhiyun 	aa->addrhi = baddr >> 32;
718*4882a593Smuzhiyun 	wmb();
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 
ace_set_txprd(struct ace_regs __iomem * regs,struct ace_private * ap,u32 value)722*4882a593Smuzhiyun static inline void ace_set_txprd(struct ace_regs __iomem *regs,
723*4882a593Smuzhiyun 				 struct ace_private *ap, u32 value)
724*4882a593Smuzhiyun {
725*4882a593Smuzhiyun #ifdef INDEX_DEBUG
726*4882a593Smuzhiyun 	unsigned long flags;
727*4882a593Smuzhiyun 	spin_lock_irqsave(&ap->debug_lock, flags);
728*4882a593Smuzhiyun 	writel(value, &regs->TxPrd);
729*4882a593Smuzhiyun 	if (value == ap->last_tx)
730*4882a593Smuzhiyun 		printk(KERN_ERR "AceNIC RACE ALERT! writing identical value "
731*4882a593Smuzhiyun 		       "to tx producer (%i)\n", value);
732*4882a593Smuzhiyun 	ap->last_tx = value;
733*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ap->debug_lock, flags);
734*4882a593Smuzhiyun #else
735*4882a593Smuzhiyun 	writel(value, &regs->TxPrd);
736*4882a593Smuzhiyun #endif
737*4882a593Smuzhiyun 	wmb();
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 
ace_mask_irq(struct net_device * dev)741*4882a593Smuzhiyun static inline void ace_mask_irq(struct net_device *dev)
742*4882a593Smuzhiyun {
743*4882a593Smuzhiyun 	struct ace_private *ap = netdev_priv(dev);
744*4882a593Smuzhiyun 	struct ace_regs __iomem *regs = ap->regs;
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	if (ACE_IS_TIGON_I(ap))
747*4882a593Smuzhiyun 		writel(1, &regs->MaskInt);
748*4882a593Smuzhiyun 	else
749*4882a593Smuzhiyun 		writel(readl(&regs->HostCtrl) | MASK_INTS, &regs->HostCtrl);
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	ace_sync_irq(dev->irq);
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 
ace_unmask_irq(struct net_device * dev)755*4882a593Smuzhiyun static inline void ace_unmask_irq(struct net_device *dev)
756*4882a593Smuzhiyun {
757*4882a593Smuzhiyun 	struct ace_private *ap = netdev_priv(dev);
758*4882a593Smuzhiyun 	struct ace_regs __iomem *regs = ap->regs;
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	if (ACE_IS_TIGON_I(ap))
761*4882a593Smuzhiyun 		writel(0, &regs->MaskInt);
762*4882a593Smuzhiyun 	else
763*4882a593Smuzhiyun 		writel(readl(&regs->HostCtrl) & ~MASK_INTS, &regs->HostCtrl);
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun /*
768*4882a593Smuzhiyun  * Prototypes
769*4882a593Smuzhiyun  */
770*4882a593Smuzhiyun static int ace_init(struct net_device *dev);
771*4882a593Smuzhiyun static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs);
772*4882a593Smuzhiyun static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs);
773*4882a593Smuzhiyun static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs);
774*4882a593Smuzhiyun static irqreturn_t ace_interrupt(int irq, void *dev_id);
775*4882a593Smuzhiyun static int ace_load_firmware(struct net_device *dev);
776*4882a593Smuzhiyun static int ace_open(struct net_device *dev);
777*4882a593Smuzhiyun static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
778*4882a593Smuzhiyun 				  struct net_device *dev);
779*4882a593Smuzhiyun static int ace_close(struct net_device *dev);
780*4882a593Smuzhiyun static void ace_tasklet(struct tasklet_struct *t);
781*4882a593Smuzhiyun static void ace_dump_trace(struct ace_private *ap);
782*4882a593Smuzhiyun static void ace_set_multicast_list(struct net_device *dev);
783*4882a593Smuzhiyun static int ace_change_mtu(struct net_device *dev, int new_mtu);
784*4882a593Smuzhiyun static int ace_set_mac_addr(struct net_device *dev, void *p);
785*4882a593Smuzhiyun static void ace_set_rxtx_parms(struct net_device *dev, int jumbo);
786*4882a593Smuzhiyun static int ace_allocate_descriptors(struct net_device *dev);
787*4882a593Smuzhiyun static void ace_free_descriptors(struct net_device *dev);
788*4882a593Smuzhiyun static void ace_init_cleanup(struct net_device *dev);
789*4882a593Smuzhiyun static struct net_device_stats *ace_get_stats(struct net_device *dev);
790*4882a593Smuzhiyun static int read_eeprom_byte(struct net_device *dev, unsigned long offset);
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun #endif /* _ACENIC_H_ */
793