1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*------------------------------------------------------------------------
3*4882a593Smuzhiyun . smc91x.h - macros for SMSC's 91C9x/91C1xx single-chip Ethernet device.
4*4882a593Smuzhiyun .
5*4882a593Smuzhiyun . Copyright (C) 1996 by Erik Stahlman
6*4882a593Smuzhiyun . Copyright (C) 2001 Standard Microsystems Corporation
7*4882a593Smuzhiyun . Developed by Simple Network Magic Corporation
8*4882a593Smuzhiyun . Copyright (C) 2003 Monta Vista Software, Inc.
9*4882a593Smuzhiyun . Unified SMC91x driver by Nicolas Pitre
10*4882a593Smuzhiyun .
11*4882a593Smuzhiyun .
12*4882a593Smuzhiyun . Information contained in this file was obtained from the LAN91C111
13*4882a593Smuzhiyun . manual from SMC. To get a copy, if you really want one, you can find
14*4882a593Smuzhiyun . information under www.smsc.com.
15*4882a593Smuzhiyun .
16*4882a593Smuzhiyun . Authors
17*4882a593Smuzhiyun . Erik Stahlman <erik@vt.edu>
18*4882a593Smuzhiyun . Daris A Nevil <dnevil@snmc.com>
19*4882a593Smuzhiyun . Nicolas Pitre <nico@fluxnic.net>
20*4882a593Smuzhiyun .
21*4882a593Smuzhiyun ---------------------------------------------------------------------------*/
22*4882a593Smuzhiyun #ifndef _SMC91X_H_
23*4882a593Smuzhiyun #define _SMC91X_H_
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #include <linux/dmaengine.h>
26*4882a593Smuzhiyun #include <linux/smc91x.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun /*
29*4882a593Smuzhiyun * Any 16-bit access is performed with two 8-bit accesses if the hardware
30*4882a593Smuzhiyun * can't do it directly. Most registers are 16-bit so those are mandatory.
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun #define SMC_outw_b(x, a, r) \
33*4882a593Smuzhiyun do { \
34*4882a593Smuzhiyun unsigned int __val16 = (x); \
35*4882a593Smuzhiyun unsigned int __reg = (r); \
36*4882a593Smuzhiyun SMC_outb(__val16, a, __reg); \
37*4882a593Smuzhiyun SMC_outb(__val16 >> 8, a, __reg + (1 << SMC_IO_SHIFT)); \
38*4882a593Smuzhiyun } while (0)
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #define SMC_inw_b(a, r) \
41*4882a593Smuzhiyun ({ \
42*4882a593Smuzhiyun unsigned int __val16; \
43*4882a593Smuzhiyun unsigned int __reg = r; \
44*4882a593Smuzhiyun __val16 = SMC_inb(a, __reg); \
45*4882a593Smuzhiyun __val16 |= SMC_inb(a, __reg + (1 << SMC_IO_SHIFT)) << 8; \
46*4882a593Smuzhiyun __val16; \
47*4882a593Smuzhiyun })
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /*
50*4882a593Smuzhiyun * Define your architecture specific bus configuration parameters here.
51*4882a593Smuzhiyun */
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #if defined(CONFIG_ARM)
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #include <asm/mach-types.h>
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /* Now the bus width is specified in the platform data
58*4882a593Smuzhiyun * pretend here to support all I/O access types
59*4882a593Smuzhiyun */
60*4882a593Smuzhiyun #define SMC_CAN_USE_8BIT 1
61*4882a593Smuzhiyun #define SMC_CAN_USE_16BIT 1
62*4882a593Smuzhiyun #define SMC_CAN_USE_32BIT 1
63*4882a593Smuzhiyun #define SMC_NOWAIT 1
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun #define SMC_IO_SHIFT (lp->io_shift)
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun #define SMC_inb(a, r) readb((a) + (r))
68*4882a593Smuzhiyun #define SMC_inw(a, r) \
69*4882a593Smuzhiyun ({ \
70*4882a593Smuzhiyun unsigned int __smc_r = r; \
71*4882a593Smuzhiyun SMC_16BIT(lp) ? readw((a) + __smc_r) : \
72*4882a593Smuzhiyun SMC_8BIT(lp) ? SMC_inw_b(a, __smc_r) : \
73*4882a593Smuzhiyun ({ BUG(); 0; }); \
74*4882a593Smuzhiyun })
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun #define SMC_inl(a, r) readl((a) + (r))
77*4882a593Smuzhiyun #define SMC_outb(v, a, r) writeb(v, (a) + (r))
78*4882a593Smuzhiyun #define SMC_outw(lp, v, a, r) \
79*4882a593Smuzhiyun do { \
80*4882a593Smuzhiyun unsigned int __v = v, __smc_r = r; \
81*4882a593Smuzhiyun if (SMC_16BIT(lp)) \
82*4882a593Smuzhiyun __SMC_outw(lp, __v, a, __smc_r); \
83*4882a593Smuzhiyun else if (SMC_8BIT(lp)) \
84*4882a593Smuzhiyun SMC_outw_b(__v, a, __smc_r); \
85*4882a593Smuzhiyun else \
86*4882a593Smuzhiyun BUG(); \
87*4882a593Smuzhiyun } while (0)
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun #define SMC_outl(v, a, r) writel(v, (a) + (r))
90*4882a593Smuzhiyun #define SMC_insb(a, r, p, l) readsb((a) + (r), p, l)
91*4882a593Smuzhiyun #define SMC_outsb(a, r, p, l) writesb((a) + (r), p, l)
92*4882a593Smuzhiyun #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
93*4882a593Smuzhiyun #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
94*4882a593Smuzhiyun #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
95*4882a593Smuzhiyun #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
96*4882a593Smuzhiyun #define SMC_IRQ_FLAGS (-1) /* from resource */
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /* We actually can't write halfwords properly if not word aligned */
_SMC_outw_align4(u16 val,void __iomem * ioaddr,int reg,bool use_align4_workaround)99*4882a593Smuzhiyun static inline void _SMC_outw_align4(u16 val, void __iomem *ioaddr, int reg,
100*4882a593Smuzhiyun bool use_align4_workaround)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun if (use_align4_workaround) {
103*4882a593Smuzhiyun unsigned int v = val << 16;
104*4882a593Smuzhiyun v |= readl(ioaddr + (reg & ~2)) & 0xffff;
105*4882a593Smuzhiyun writel(v, ioaddr + (reg & ~2));
106*4882a593Smuzhiyun } else {
107*4882a593Smuzhiyun writew(val, ioaddr + reg);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun #define __SMC_outw(lp, v, a, r) \
112*4882a593Smuzhiyun _SMC_outw_align4((v), (a), (r), \
113*4882a593Smuzhiyun IS_BUILTIN(CONFIG_ARCH_PXA) && ((r) & 2) && \
114*4882a593Smuzhiyun (lp)->cfg.pxa_u16_align4)
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun #elif defined(CONFIG_SH_SH4202_MICRODEV)
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun #define SMC_CAN_USE_8BIT 0
120*4882a593Smuzhiyun #define SMC_CAN_USE_16BIT 1
121*4882a593Smuzhiyun #define SMC_CAN_USE_32BIT 0
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun #define SMC_inb(a, r) inb((a) + (r) - 0xa0000000)
124*4882a593Smuzhiyun #define SMC_inw(a, r) inw((a) + (r) - 0xa0000000)
125*4882a593Smuzhiyun #define SMC_inl(a, r) inl((a) + (r) - 0xa0000000)
126*4882a593Smuzhiyun #define SMC_outb(v, a, r) outb(v, (a) + (r) - 0xa0000000)
127*4882a593Smuzhiyun #define SMC_outw(lp, v, a, r) outw(v, (a) + (r) - 0xa0000000)
128*4882a593Smuzhiyun #define SMC_outl(v, a, r) outl(v, (a) + (r) - 0xa0000000)
129*4882a593Smuzhiyun #define SMC_insl(a, r, p, l) insl((a) + (r) - 0xa0000000, p, l)
130*4882a593Smuzhiyun #define SMC_outsl(a, r, p, l) outsl((a) + (r) - 0xa0000000, p, l)
131*4882a593Smuzhiyun #define SMC_insw(a, r, p, l) insw((a) + (r) - 0xa0000000, p, l)
132*4882a593Smuzhiyun #define SMC_outsw(a, r, p, l) outsw((a) + (r) - 0xa0000000, p, l)
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun #define SMC_IRQ_FLAGS (0)
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun #elif defined(CONFIG_ATARI)
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun #define SMC_CAN_USE_8BIT 1
139*4882a593Smuzhiyun #define SMC_CAN_USE_16BIT 1
140*4882a593Smuzhiyun #define SMC_CAN_USE_32BIT 1
141*4882a593Smuzhiyun #define SMC_NOWAIT 1
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun #define SMC_inb(a, r) readb((a) + (r))
144*4882a593Smuzhiyun #define SMC_inw(a, r) readw((a) + (r))
145*4882a593Smuzhiyun #define SMC_inl(a, r) readl((a) + (r))
146*4882a593Smuzhiyun #define SMC_outb(v, a, r) writeb(v, (a) + (r))
147*4882a593Smuzhiyun #define SMC_outw(lp, v, a, r) writew(v, (a) + (r))
148*4882a593Smuzhiyun #define SMC_outl(v, a, r) writel(v, (a) + (r))
149*4882a593Smuzhiyun #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
150*4882a593Smuzhiyun #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
151*4882a593Smuzhiyun #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
152*4882a593Smuzhiyun #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun #define RPC_LSA_DEFAULT RPC_LED_100_10
155*4882a593Smuzhiyun #define RPC_LSB_DEFAULT RPC_LED_TX_RX
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun #elif defined(CONFIG_COLDFIRE)
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun #define SMC_CAN_USE_8BIT 0
160*4882a593Smuzhiyun #define SMC_CAN_USE_16BIT 1
161*4882a593Smuzhiyun #define SMC_CAN_USE_32BIT 0
162*4882a593Smuzhiyun #define SMC_NOWAIT 1
163*4882a593Smuzhiyun
mcf_insw(void * a,unsigned char * p,int l)164*4882a593Smuzhiyun static inline void mcf_insw(void *a, unsigned char *p, int l)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun u16 *wp = (u16 *) p;
167*4882a593Smuzhiyun while (l-- > 0)
168*4882a593Smuzhiyun *wp++ = readw(a);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
mcf_outsw(void * a,unsigned char * p,int l)171*4882a593Smuzhiyun static inline void mcf_outsw(void *a, unsigned char *p, int l)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun u16 *wp = (u16 *) p;
174*4882a593Smuzhiyun while (l-- > 0)
175*4882a593Smuzhiyun writew(*wp++, a);
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun #define SMC_inw(a, r) _swapw(readw((a) + (r)))
179*4882a593Smuzhiyun #define SMC_outw(lp, v, a, r) writew(_swapw(v), (a) + (r))
180*4882a593Smuzhiyun #define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l)
181*4882a593Smuzhiyun #define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l)
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun #define SMC_IRQ_FLAGS 0
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun #elif defined(CONFIG_H8300)
186*4882a593Smuzhiyun #define SMC_CAN_USE_8BIT 1
187*4882a593Smuzhiyun #define SMC_CAN_USE_16BIT 0
188*4882a593Smuzhiyun #define SMC_CAN_USE_32BIT 0
189*4882a593Smuzhiyun #define SMC_NOWAIT 0
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun #define SMC_inb(a, r) ioread8((a) + (r))
192*4882a593Smuzhiyun #define SMC_outb(v, a, r) iowrite8(v, (a) + (r))
193*4882a593Smuzhiyun #define SMC_insb(a, r, p, l) ioread8_rep((a) + (r), p, l)
194*4882a593Smuzhiyun #define SMC_outsb(a, r, p, l) iowrite8_rep((a) + (r), p, l)
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun #else
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /*
199*4882a593Smuzhiyun * Default configuration
200*4882a593Smuzhiyun */
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun #define SMC_CAN_USE_8BIT 1
203*4882a593Smuzhiyun #define SMC_CAN_USE_16BIT 1
204*4882a593Smuzhiyun #define SMC_CAN_USE_32BIT 1
205*4882a593Smuzhiyun #define SMC_NOWAIT 1
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun #define SMC_IO_SHIFT (lp->io_shift)
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun #define SMC_inb(a, r) ioread8((a) + (r))
210*4882a593Smuzhiyun #define SMC_inw(a, r) ioread16((a) + (r))
211*4882a593Smuzhiyun #define SMC_inl(a, r) ioread32((a) + (r))
212*4882a593Smuzhiyun #define SMC_outb(v, a, r) iowrite8(v, (a) + (r))
213*4882a593Smuzhiyun #define SMC_outw(lp, v, a, r) iowrite16(v, (a) + (r))
214*4882a593Smuzhiyun #define SMC_outl(v, a, r) iowrite32(v, (a) + (r))
215*4882a593Smuzhiyun #define SMC_insw(a, r, p, l) ioread16_rep((a) + (r), p, l)
216*4882a593Smuzhiyun #define SMC_outsw(a, r, p, l) iowrite16_rep((a) + (r), p, l)
217*4882a593Smuzhiyun #define SMC_insl(a, r, p, l) ioread32_rep((a) + (r), p, l)
218*4882a593Smuzhiyun #define SMC_outsl(a, r, p, l) iowrite32_rep((a) + (r), p, l)
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun #define RPC_LSA_DEFAULT RPC_LED_100_10
221*4882a593Smuzhiyun #define RPC_LSB_DEFAULT RPC_LED_TX_RX
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun #endif
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /* store this information for the driver.. */
227*4882a593Smuzhiyun struct smc_local {
228*4882a593Smuzhiyun /*
229*4882a593Smuzhiyun * If I have to wait until memory is available to send a
230*4882a593Smuzhiyun * packet, I will store the skbuff here, until I get the
231*4882a593Smuzhiyun * desired memory. Then, I'll send it out and free it.
232*4882a593Smuzhiyun */
233*4882a593Smuzhiyun struct sk_buff *pending_tx_skb;
234*4882a593Smuzhiyun struct tasklet_struct tx_task;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun struct gpio_desc *power_gpio;
237*4882a593Smuzhiyun struct gpio_desc *reset_gpio;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /* version/revision of the SMC91x chip */
240*4882a593Smuzhiyun int version;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun /* Contains the current active transmission mode */
243*4882a593Smuzhiyun int tcr_cur_mode;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun /* Contains the current active receive mode */
246*4882a593Smuzhiyun int rcr_cur_mode;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun /* Contains the current active receive/phy mode */
249*4882a593Smuzhiyun int rpc_cur_mode;
250*4882a593Smuzhiyun int ctl_rfduplx;
251*4882a593Smuzhiyun int ctl_rspeed;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun u32 msg_enable;
254*4882a593Smuzhiyun u32 phy_type;
255*4882a593Smuzhiyun struct mii_if_info mii;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /* work queue */
258*4882a593Smuzhiyun struct work_struct phy_configure;
259*4882a593Smuzhiyun struct net_device *dev;
260*4882a593Smuzhiyun int work_pending;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun spinlock_t lock;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun #ifdef CONFIG_ARCH_PXA
265*4882a593Smuzhiyun /* DMA needs the physical address of the chip */
266*4882a593Smuzhiyun u_long physaddr;
267*4882a593Smuzhiyun struct device *device;
268*4882a593Smuzhiyun #endif
269*4882a593Smuzhiyun struct dma_chan *dma_chan;
270*4882a593Smuzhiyun void __iomem *base;
271*4882a593Smuzhiyun void __iomem *datacs;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /* the low address lines on some platforms aren't connected... */
274*4882a593Smuzhiyun int io_shift;
275*4882a593Smuzhiyun /* on some platforms a u16 write must be 4-bytes aligned */
276*4882a593Smuzhiyun bool half_word_align4;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun struct smc91x_platdata cfg;
279*4882a593Smuzhiyun };
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun #define SMC_8BIT(p) ((p)->cfg.flags & SMC91X_USE_8BIT)
282*4882a593Smuzhiyun #define SMC_16BIT(p) ((p)->cfg.flags & SMC91X_USE_16BIT)
283*4882a593Smuzhiyun #define SMC_32BIT(p) ((p)->cfg.flags & SMC91X_USE_32BIT)
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun #ifdef CONFIG_ARCH_PXA
286*4882a593Smuzhiyun /*
287*4882a593Smuzhiyun * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is
288*4882a593Smuzhiyun * always happening in irq context so no need to worry about races. TX is
289*4882a593Smuzhiyun * different and probably not worth it for that reason, and not as critical
290*4882a593Smuzhiyun * as RX which can overrun memory and lose packets.
291*4882a593Smuzhiyun */
292*4882a593Smuzhiyun #include <linux/dma-mapping.h>
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun #ifdef SMC_insl
295*4882a593Smuzhiyun #undef SMC_insl
296*4882a593Smuzhiyun #define SMC_insl(a, r, p, l) \
297*4882a593Smuzhiyun smc_pxa_dma_insl(a, lp, r, dev->dma, p, l)
298*4882a593Smuzhiyun static inline void
smc_pxa_dma_inpump(struct smc_local * lp,u_char * buf,int len)299*4882a593Smuzhiyun smc_pxa_dma_inpump(struct smc_local *lp, u_char *buf, int len)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun dma_addr_t dmabuf;
302*4882a593Smuzhiyun struct dma_async_tx_descriptor *tx;
303*4882a593Smuzhiyun dma_cookie_t cookie;
304*4882a593Smuzhiyun enum dma_status status;
305*4882a593Smuzhiyun struct dma_tx_state state;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun dmabuf = dma_map_single(lp->device, buf, len, DMA_FROM_DEVICE);
308*4882a593Smuzhiyun tx = dmaengine_prep_slave_single(lp->dma_chan, dmabuf, len,
309*4882a593Smuzhiyun DMA_DEV_TO_MEM, 0);
310*4882a593Smuzhiyun if (tx) {
311*4882a593Smuzhiyun cookie = dmaengine_submit(tx);
312*4882a593Smuzhiyun dma_async_issue_pending(lp->dma_chan);
313*4882a593Smuzhiyun do {
314*4882a593Smuzhiyun status = dmaengine_tx_status(lp->dma_chan, cookie,
315*4882a593Smuzhiyun &state);
316*4882a593Smuzhiyun cpu_relax();
317*4882a593Smuzhiyun } while (status != DMA_COMPLETE && status != DMA_ERROR &&
318*4882a593Smuzhiyun state.residue);
319*4882a593Smuzhiyun dmaengine_terminate_all(lp->dma_chan);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun dma_unmap_single(lp->device, dmabuf, len, DMA_FROM_DEVICE);
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun static inline void
smc_pxa_dma_insl(void __iomem * ioaddr,struct smc_local * lp,int reg,int dma,u_char * buf,int len)325*4882a593Smuzhiyun smc_pxa_dma_insl(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
326*4882a593Smuzhiyun u_char *buf, int len)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun struct dma_slave_config config;
329*4882a593Smuzhiyun int ret;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun /* fallback if no DMA available */
332*4882a593Smuzhiyun if (!lp->dma_chan) {
333*4882a593Smuzhiyun readsl(ioaddr + reg, buf, len);
334*4882a593Smuzhiyun return;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun /* 64 bit alignment is required for memory to memory DMA */
338*4882a593Smuzhiyun if ((long)buf & 4) {
339*4882a593Smuzhiyun *((u32 *)buf) = SMC_inl(ioaddr, reg);
340*4882a593Smuzhiyun buf += 4;
341*4882a593Smuzhiyun len--;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun memset(&config, 0, sizeof(config));
345*4882a593Smuzhiyun config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
346*4882a593Smuzhiyun config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
347*4882a593Smuzhiyun config.src_addr = lp->physaddr + reg;
348*4882a593Smuzhiyun config.dst_addr = lp->physaddr + reg;
349*4882a593Smuzhiyun config.src_maxburst = 32;
350*4882a593Smuzhiyun config.dst_maxburst = 32;
351*4882a593Smuzhiyun ret = dmaengine_slave_config(lp->dma_chan, &config);
352*4882a593Smuzhiyun if (ret) {
353*4882a593Smuzhiyun dev_err(lp->device, "dma channel configuration failed: %d\n",
354*4882a593Smuzhiyun ret);
355*4882a593Smuzhiyun return;
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun len *= 4;
359*4882a593Smuzhiyun smc_pxa_dma_inpump(lp, buf, len);
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun #endif
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun #ifdef SMC_insw
364*4882a593Smuzhiyun #undef SMC_insw
365*4882a593Smuzhiyun #define SMC_insw(a, r, p, l) \
366*4882a593Smuzhiyun smc_pxa_dma_insw(a, lp, r, dev->dma, p, l)
367*4882a593Smuzhiyun static inline void
smc_pxa_dma_insw(void __iomem * ioaddr,struct smc_local * lp,int reg,int dma,u_char * buf,int len)368*4882a593Smuzhiyun smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
369*4882a593Smuzhiyun u_char *buf, int len)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun struct dma_slave_config config;
372*4882a593Smuzhiyun int ret;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun /* fallback if no DMA available */
375*4882a593Smuzhiyun if (!lp->dma_chan) {
376*4882a593Smuzhiyun readsw(ioaddr + reg, buf, len);
377*4882a593Smuzhiyun return;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /* 64 bit alignment is required for memory to memory DMA */
381*4882a593Smuzhiyun while ((long)buf & 6) {
382*4882a593Smuzhiyun *((u16 *)buf) = SMC_inw(ioaddr, reg);
383*4882a593Smuzhiyun buf += 2;
384*4882a593Smuzhiyun len--;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun memset(&config, 0, sizeof(config));
388*4882a593Smuzhiyun config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
389*4882a593Smuzhiyun config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
390*4882a593Smuzhiyun config.src_addr = lp->physaddr + reg;
391*4882a593Smuzhiyun config.dst_addr = lp->physaddr + reg;
392*4882a593Smuzhiyun config.src_maxburst = 32;
393*4882a593Smuzhiyun config.dst_maxburst = 32;
394*4882a593Smuzhiyun ret = dmaengine_slave_config(lp->dma_chan, &config);
395*4882a593Smuzhiyun if (ret) {
396*4882a593Smuzhiyun dev_err(lp->device, "dma channel configuration failed: %d\n",
397*4882a593Smuzhiyun ret);
398*4882a593Smuzhiyun return;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun len *= 2;
402*4882a593Smuzhiyun smc_pxa_dma_inpump(lp, buf, len);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun #endif
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun #endif /* CONFIG_ARCH_PXA */
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun /*
410*4882a593Smuzhiyun * Everything a particular hardware setup needs should have been defined
411*4882a593Smuzhiyun * at this point. Add stubs for the undefined cases, mainly to avoid
412*4882a593Smuzhiyun * compilation warnings since they'll be optimized away, or to prevent buggy
413*4882a593Smuzhiyun * use of them.
414*4882a593Smuzhiyun */
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun #if ! SMC_CAN_USE_32BIT
417*4882a593Smuzhiyun #define SMC_inl(ioaddr, reg) ({ BUG(); 0; })
418*4882a593Smuzhiyun #define SMC_outl(x, ioaddr, reg) BUG()
419*4882a593Smuzhiyun #define SMC_insl(a, r, p, l) BUG()
420*4882a593Smuzhiyun #define SMC_outsl(a, r, p, l) BUG()
421*4882a593Smuzhiyun #endif
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun #if !defined(SMC_insl) || !defined(SMC_outsl)
424*4882a593Smuzhiyun #define SMC_insl(a, r, p, l) BUG()
425*4882a593Smuzhiyun #define SMC_outsl(a, r, p, l) BUG()
426*4882a593Smuzhiyun #endif
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun #if ! SMC_CAN_USE_16BIT
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun #define SMC_outw(lp, x, ioaddr, reg) SMC_outw_b(x, ioaddr, reg)
431*4882a593Smuzhiyun #define SMC_inw(ioaddr, reg) SMC_inw_b(ioaddr, reg)
432*4882a593Smuzhiyun #define SMC_insw(a, r, p, l) BUG()
433*4882a593Smuzhiyun #define SMC_outsw(a, r, p, l) BUG()
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun #endif
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun #if !defined(SMC_insw) || !defined(SMC_outsw)
438*4882a593Smuzhiyun #define SMC_insw(a, r, p, l) BUG()
439*4882a593Smuzhiyun #define SMC_outsw(a, r, p, l) BUG()
440*4882a593Smuzhiyun #endif
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun #if ! SMC_CAN_USE_8BIT
443*4882a593Smuzhiyun #undef SMC_inb
444*4882a593Smuzhiyun #define SMC_inb(ioaddr, reg) ({ BUG(); 0; })
445*4882a593Smuzhiyun #undef SMC_outb
446*4882a593Smuzhiyun #define SMC_outb(x, ioaddr, reg) BUG()
447*4882a593Smuzhiyun #define SMC_insb(a, r, p, l) BUG()
448*4882a593Smuzhiyun #define SMC_outsb(a, r, p, l) BUG()
449*4882a593Smuzhiyun #endif
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun #if !defined(SMC_insb) || !defined(SMC_outsb)
452*4882a593Smuzhiyun #define SMC_insb(a, r, p, l) BUG()
453*4882a593Smuzhiyun #define SMC_outsb(a, r, p, l) BUG()
454*4882a593Smuzhiyun #endif
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun #ifndef SMC_CAN_USE_DATACS
457*4882a593Smuzhiyun #define SMC_CAN_USE_DATACS 0
458*4882a593Smuzhiyun #endif
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun #ifndef SMC_IO_SHIFT
461*4882a593Smuzhiyun #define SMC_IO_SHIFT 0
462*4882a593Smuzhiyun #endif
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun #ifndef SMC_IRQ_FLAGS
465*4882a593Smuzhiyun #define SMC_IRQ_FLAGS IRQF_TRIGGER_RISING
466*4882a593Smuzhiyun #endif
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun #ifndef SMC_INTERRUPT_PREAMBLE
469*4882a593Smuzhiyun #define SMC_INTERRUPT_PREAMBLE
470*4882a593Smuzhiyun #endif
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun /* Because of bank switching, the LAN91x uses only 16 I/O ports */
474*4882a593Smuzhiyun #define SMC_IO_EXTENT (16 << SMC_IO_SHIFT)
475*4882a593Smuzhiyun #define SMC_DATA_EXTENT (4)
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun /*
478*4882a593Smuzhiyun . Bank Select Register:
479*4882a593Smuzhiyun .
480*4882a593Smuzhiyun . yyyy yyyy 0000 00xx
481*4882a593Smuzhiyun . xx = bank number
482*4882a593Smuzhiyun . yyyy yyyy = 0x33, for identification purposes.
483*4882a593Smuzhiyun */
484*4882a593Smuzhiyun #define BANK_SELECT (14 << SMC_IO_SHIFT)
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun // Transmit Control Register
488*4882a593Smuzhiyun /* BANK 0 */
489*4882a593Smuzhiyun #define TCR_REG(lp) SMC_REG(lp, 0x0000, 0)
490*4882a593Smuzhiyun #define TCR_ENABLE 0x0001 // When 1 we can transmit
491*4882a593Smuzhiyun #define TCR_LOOP 0x0002 // Controls output pin LBK
492*4882a593Smuzhiyun #define TCR_FORCOL 0x0004 // When 1 will force a collision
493*4882a593Smuzhiyun #define TCR_PAD_EN 0x0080 // When 1 will pad tx frames < 64 bytes w/0
494*4882a593Smuzhiyun #define TCR_NOCRC 0x0100 // When 1 will not append CRC to tx frames
495*4882a593Smuzhiyun #define TCR_MON_CSN 0x0400 // When 1 tx monitors carrier
496*4882a593Smuzhiyun #define TCR_FDUPLX 0x0800 // When 1 enables full duplex operation
497*4882a593Smuzhiyun #define TCR_STP_SQET 0x1000 // When 1 stops tx if Signal Quality Error
498*4882a593Smuzhiyun #define TCR_EPH_LOOP 0x2000 // When 1 enables EPH block loopback
499*4882a593Smuzhiyun #define TCR_SWFDUP 0x8000 // When 1 enables Switched Full Duplex mode
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun #define TCR_CLEAR 0 /* do NOTHING */
502*4882a593Smuzhiyun /* the default settings for the TCR register : */
503*4882a593Smuzhiyun #define TCR_DEFAULT (TCR_ENABLE | TCR_PAD_EN)
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun // EPH Status Register
507*4882a593Smuzhiyun /* BANK 0 */
508*4882a593Smuzhiyun #define EPH_STATUS_REG(lp) SMC_REG(lp, 0x0002, 0)
509*4882a593Smuzhiyun #define ES_TX_SUC 0x0001 // Last TX was successful
510*4882a593Smuzhiyun #define ES_SNGL_COL 0x0002 // Single collision detected for last tx
511*4882a593Smuzhiyun #define ES_MUL_COL 0x0004 // Multiple collisions detected for last tx
512*4882a593Smuzhiyun #define ES_LTX_MULT 0x0008 // Last tx was a multicast
513*4882a593Smuzhiyun #define ES_16COL 0x0010 // 16 Collisions Reached
514*4882a593Smuzhiyun #define ES_SQET 0x0020 // Signal Quality Error Test
515*4882a593Smuzhiyun #define ES_LTXBRD 0x0040 // Last tx was a broadcast
516*4882a593Smuzhiyun #define ES_TXDEFR 0x0080 // Transmit Deferred
517*4882a593Smuzhiyun #define ES_LATCOL 0x0200 // Late collision detected on last tx
518*4882a593Smuzhiyun #define ES_LOSTCARR 0x0400 // Lost Carrier Sense
519*4882a593Smuzhiyun #define ES_EXC_DEF 0x0800 // Excessive Deferral
520*4882a593Smuzhiyun #define ES_CTR_ROL 0x1000 // Counter Roll Over indication
521*4882a593Smuzhiyun #define ES_LINK_OK 0x4000 // Driven by inverted value of nLNK pin
522*4882a593Smuzhiyun #define ES_TXUNRN 0x8000 // Tx Underrun
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun // Receive Control Register
526*4882a593Smuzhiyun /* BANK 0 */
527*4882a593Smuzhiyun #define RCR_REG(lp) SMC_REG(lp, 0x0004, 0)
528*4882a593Smuzhiyun #define RCR_RX_ABORT 0x0001 // Set if a rx frame was aborted
529*4882a593Smuzhiyun #define RCR_PRMS 0x0002 // Enable promiscuous mode
530*4882a593Smuzhiyun #define RCR_ALMUL 0x0004 // When set accepts all multicast frames
531*4882a593Smuzhiyun #define RCR_RXEN 0x0100 // IFF this is set, we can receive packets
532*4882a593Smuzhiyun #define RCR_STRIP_CRC 0x0200 // When set strips CRC from rx packets
533*4882a593Smuzhiyun #define RCR_ABORT_ENB 0x0200 // When set will abort rx on collision
534*4882a593Smuzhiyun #define RCR_FILT_CAR 0x0400 // When set filters leading 12 bit s of carrier
535*4882a593Smuzhiyun #define RCR_SOFTRST 0x8000 // resets the chip
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun /* the normal settings for the RCR register : */
538*4882a593Smuzhiyun #define RCR_DEFAULT (RCR_STRIP_CRC | RCR_RXEN)
539*4882a593Smuzhiyun #define RCR_CLEAR 0x0 // set it to a base state
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun // Counter Register
543*4882a593Smuzhiyun /* BANK 0 */
544*4882a593Smuzhiyun #define COUNTER_REG(lp) SMC_REG(lp, 0x0006, 0)
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun // Memory Information Register
548*4882a593Smuzhiyun /* BANK 0 */
549*4882a593Smuzhiyun #define MIR_REG(lp) SMC_REG(lp, 0x0008, 0)
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun // Receive/Phy Control Register
553*4882a593Smuzhiyun /* BANK 0 */
554*4882a593Smuzhiyun #define RPC_REG(lp) SMC_REG(lp, 0x000A, 0)
555*4882a593Smuzhiyun #define RPC_SPEED 0x2000 // When 1 PHY is in 100Mbps mode.
556*4882a593Smuzhiyun #define RPC_DPLX 0x1000 // When 1 PHY is in Full-Duplex Mode
557*4882a593Smuzhiyun #define RPC_ANEG 0x0800 // When 1 PHY is in Auto-Negotiate Mode
558*4882a593Smuzhiyun #define RPC_LSXA_SHFT 5 // Bits to shift LS2A,LS1A,LS0A to lsb
559*4882a593Smuzhiyun #define RPC_LSXB_SHFT 2 // Bits to get LS2B,LS1B,LS0B to lsb
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun #ifndef RPC_LSA_DEFAULT
562*4882a593Smuzhiyun #define RPC_LSA_DEFAULT RPC_LED_100
563*4882a593Smuzhiyun #endif
564*4882a593Smuzhiyun #ifndef RPC_LSB_DEFAULT
565*4882a593Smuzhiyun #define RPC_LSB_DEFAULT RPC_LED_FD
566*4882a593Smuzhiyun #endif
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun #define RPC_DEFAULT (RPC_ANEG | RPC_SPEED | RPC_DPLX)
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun /* Bank 0 0x0C is reserved */
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun // Bank Select Register
574*4882a593Smuzhiyun /* All Banks */
575*4882a593Smuzhiyun #define BSR_REG 0x000E
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun // Configuration Reg
579*4882a593Smuzhiyun /* BANK 1 */
580*4882a593Smuzhiyun #define CONFIG_REG(lp) SMC_REG(lp, 0x0000, 1)
581*4882a593Smuzhiyun #define CONFIG_EXT_PHY 0x0200 // 1=external MII, 0=internal Phy
582*4882a593Smuzhiyun #define CONFIG_GPCNTRL 0x0400 // Inverse value drives pin nCNTRL
583*4882a593Smuzhiyun #define CONFIG_NO_WAIT 0x1000 // When 1 no extra wait states on ISA bus
584*4882a593Smuzhiyun #define CONFIG_EPH_POWER_EN 0x8000 // When 0 EPH is placed into low power mode.
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun // Default is powered-up, Internal Phy, Wait States, and pin nCNTRL=low
587*4882a593Smuzhiyun #define CONFIG_DEFAULT (CONFIG_EPH_POWER_EN)
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun // Base Address Register
591*4882a593Smuzhiyun /* BANK 1 */
592*4882a593Smuzhiyun #define BASE_REG(lp) SMC_REG(lp, 0x0002, 1)
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun // Individual Address Registers
596*4882a593Smuzhiyun /* BANK 1 */
597*4882a593Smuzhiyun #define ADDR0_REG(lp) SMC_REG(lp, 0x0004, 1)
598*4882a593Smuzhiyun #define ADDR1_REG(lp) SMC_REG(lp, 0x0006, 1)
599*4882a593Smuzhiyun #define ADDR2_REG(lp) SMC_REG(lp, 0x0008, 1)
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun // General Purpose Register
603*4882a593Smuzhiyun /* BANK 1 */
604*4882a593Smuzhiyun #define GP_REG(lp) SMC_REG(lp, 0x000A, 1)
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun // Control Register
608*4882a593Smuzhiyun /* BANK 1 */
609*4882a593Smuzhiyun #define CTL_REG(lp) SMC_REG(lp, 0x000C, 1)
610*4882a593Smuzhiyun #define CTL_RCV_BAD 0x4000 // When 1 bad CRC packets are received
611*4882a593Smuzhiyun #define CTL_AUTO_RELEASE 0x0800 // When 1 tx pages are released automatically
612*4882a593Smuzhiyun #define CTL_LE_ENABLE 0x0080 // When 1 enables Link Error interrupt
613*4882a593Smuzhiyun #define CTL_CR_ENABLE 0x0040 // When 1 enables Counter Rollover interrupt
614*4882a593Smuzhiyun #define CTL_TE_ENABLE 0x0020 // When 1 enables Transmit Error interrupt
615*4882a593Smuzhiyun #define CTL_EEPROM_SELECT 0x0004 // Controls EEPROM reload & store
616*4882a593Smuzhiyun #define CTL_RELOAD 0x0002 // When set reads EEPROM into registers
617*4882a593Smuzhiyun #define CTL_STORE 0x0001 // When set stores registers into EEPROM
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun // MMU Command Register
621*4882a593Smuzhiyun /* BANK 2 */
622*4882a593Smuzhiyun #define MMU_CMD_REG(lp) SMC_REG(lp, 0x0000, 2)
623*4882a593Smuzhiyun #define MC_BUSY 1 // When 1 the last release has not completed
624*4882a593Smuzhiyun #define MC_NOP (0<<5) // No Op
625*4882a593Smuzhiyun #define MC_ALLOC (1<<5) // OR with number of 256 byte packets
626*4882a593Smuzhiyun #define MC_RESET (2<<5) // Reset MMU to initial state
627*4882a593Smuzhiyun #define MC_REMOVE (3<<5) // Remove the current rx packet
628*4882a593Smuzhiyun #define MC_RELEASE (4<<5) // Remove and release the current rx packet
629*4882a593Smuzhiyun #define MC_FREEPKT (5<<5) // Release packet in PNR register
630*4882a593Smuzhiyun #define MC_ENQUEUE (6<<5) // Enqueue the packet for transmit
631*4882a593Smuzhiyun #define MC_RSTTXFIFO (7<<5) // Reset the TX FIFOs
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun // Packet Number Register
635*4882a593Smuzhiyun /* BANK 2 */
636*4882a593Smuzhiyun #define PN_REG(lp) SMC_REG(lp, 0x0002, 2)
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun // Allocation Result Register
640*4882a593Smuzhiyun /* BANK 2 */
641*4882a593Smuzhiyun #define AR_REG(lp) SMC_REG(lp, 0x0003, 2)
642*4882a593Smuzhiyun #define AR_FAILED 0x80 // Alocation Failed
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun // TX FIFO Ports Register
646*4882a593Smuzhiyun /* BANK 2 */
647*4882a593Smuzhiyun #define TXFIFO_REG(lp) SMC_REG(lp, 0x0004, 2)
648*4882a593Smuzhiyun #define TXFIFO_TEMPTY 0x80 // TX FIFO Empty
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun // RX FIFO Ports Register
651*4882a593Smuzhiyun /* BANK 2 */
652*4882a593Smuzhiyun #define RXFIFO_REG(lp) SMC_REG(lp, 0x0005, 2)
653*4882a593Smuzhiyun #define RXFIFO_REMPTY 0x80 // RX FIFO Empty
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun #define FIFO_REG(lp) SMC_REG(lp, 0x0004, 2)
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun // Pointer Register
658*4882a593Smuzhiyun /* BANK 2 */
659*4882a593Smuzhiyun #define PTR_REG(lp) SMC_REG(lp, 0x0006, 2)
660*4882a593Smuzhiyun #define PTR_RCV 0x8000 // 1=Receive area, 0=Transmit area
661*4882a593Smuzhiyun #define PTR_AUTOINC 0x4000 // Auto increment the pointer on each access
662*4882a593Smuzhiyun #define PTR_READ 0x2000 // When 1 the operation is a read
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun // Data Register
666*4882a593Smuzhiyun /* BANK 2 */
667*4882a593Smuzhiyun #define DATA_REG(lp) SMC_REG(lp, 0x0008, 2)
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun // Interrupt Status/Acknowledge Register
671*4882a593Smuzhiyun /* BANK 2 */
672*4882a593Smuzhiyun #define INT_REG(lp) SMC_REG(lp, 0x000C, 2)
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun // Interrupt Mask Register
676*4882a593Smuzhiyun /* BANK 2 */
677*4882a593Smuzhiyun #define IM_REG(lp) SMC_REG(lp, 0x000D, 2)
678*4882a593Smuzhiyun #define IM_MDINT 0x80 // PHY MI Register 18 Interrupt
679*4882a593Smuzhiyun #define IM_ERCV_INT 0x40 // Early Receive Interrupt
680*4882a593Smuzhiyun #define IM_EPH_INT 0x20 // Set by Ethernet Protocol Handler section
681*4882a593Smuzhiyun #define IM_RX_OVRN_INT 0x10 // Set by Receiver Overruns
682*4882a593Smuzhiyun #define IM_ALLOC_INT 0x08 // Set when allocation request is completed
683*4882a593Smuzhiyun #define IM_TX_EMPTY_INT 0x04 // Set if the TX FIFO goes empty
684*4882a593Smuzhiyun #define IM_TX_INT 0x02 // Transmit Interrupt
685*4882a593Smuzhiyun #define IM_RCV_INT 0x01 // Receive Interrupt
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun // Multicast Table Registers
689*4882a593Smuzhiyun /* BANK 3 */
690*4882a593Smuzhiyun #define MCAST_REG1(lp) SMC_REG(lp, 0x0000, 3)
691*4882a593Smuzhiyun #define MCAST_REG2(lp) SMC_REG(lp, 0x0002, 3)
692*4882a593Smuzhiyun #define MCAST_REG3(lp) SMC_REG(lp, 0x0004, 3)
693*4882a593Smuzhiyun #define MCAST_REG4(lp) SMC_REG(lp, 0x0006, 3)
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun // Management Interface Register (MII)
697*4882a593Smuzhiyun /* BANK 3 */
698*4882a593Smuzhiyun #define MII_REG(lp) SMC_REG(lp, 0x0008, 3)
699*4882a593Smuzhiyun #define MII_MSK_CRS100 0x4000 // Disables CRS100 detection during tx half dup
700*4882a593Smuzhiyun #define MII_MDOE 0x0008 // MII Output Enable
701*4882a593Smuzhiyun #define MII_MCLK 0x0004 // MII Clock, pin MDCLK
702*4882a593Smuzhiyun #define MII_MDI 0x0002 // MII Input, pin MDI
703*4882a593Smuzhiyun #define MII_MDO 0x0001 // MII Output, pin MDO
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun // Revision Register
707*4882a593Smuzhiyun /* BANK 3 */
708*4882a593Smuzhiyun /* ( hi: chip id low: rev # ) */
709*4882a593Smuzhiyun #define REV_REG(lp) SMC_REG(lp, 0x000A, 3)
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun // Early RCV Register
713*4882a593Smuzhiyun /* BANK 3 */
714*4882a593Smuzhiyun /* this is NOT on SMC9192 */
715*4882a593Smuzhiyun #define ERCV_REG(lp) SMC_REG(lp, 0x000C, 3)
716*4882a593Smuzhiyun #define ERCV_RCV_DISCRD 0x0080 // When 1 discards a packet being received
717*4882a593Smuzhiyun #define ERCV_THRESHOLD 0x001F // ERCV Threshold Mask
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun // External Register
721*4882a593Smuzhiyun /* BANK 7 */
722*4882a593Smuzhiyun #define EXT_REG(lp) SMC_REG(lp, 0x0000, 7)
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun #define CHIP_9192 3
726*4882a593Smuzhiyun #define CHIP_9194 4
727*4882a593Smuzhiyun #define CHIP_9195 5
728*4882a593Smuzhiyun #define CHIP_9196 6
729*4882a593Smuzhiyun #define CHIP_91100 7
730*4882a593Smuzhiyun #define CHIP_91100FD 8
731*4882a593Smuzhiyun #define CHIP_91111FD 9
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun static const char * chip_ids[ 16 ] = {
734*4882a593Smuzhiyun NULL, NULL, NULL,
735*4882a593Smuzhiyun /* 3 */ "SMC91C90/91C92",
736*4882a593Smuzhiyun /* 4 */ "SMC91C94",
737*4882a593Smuzhiyun /* 5 */ "SMC91C95",
738*4882a593Smuzhiyun /* 6 */ "SMC91C96",
739*4882a593Smuzhiyun /* 7 */ "SMC91C100",
740*4882a593Smuzhiyun /* 8 */ "SMC91C100FD",
741*4882a593Smuzhiyun /* 9 */ "SMC91C11xFD",
742*4882a593Smuzhiyun NULL, NULL, NULL,
743*4882a593Smuzhiyun NULL, NULL, NULL};
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun /*
747*4882a593Smuzhiyun . Receive status bits
748*4882a593Smuzhiyun */
749*4882a593Smuzhiyun #define RS_ALGNERR 0x8000
750*4882a593Smuzhiyun #define RS_BRODCAST 0x4000
751*4882a593Smuzhiyun #define RS_BADCRC 0x2000
752*4882a593Smuzhiyun #define RS_ODDFRAME 0x1000
753*4882a593Smuzhiyun #define RS_TOOLONG 0x0800
754*4882a593Smuzhiyun #define RS_TOOSHORT 0x0400
755*4882a593Smuzhiyun #define RS_MULTICAST 0x0001
756*4882a593Smuzhiyun #define RS_ERRORS (RS_ALGNERR | RS_BADCRC | RS_TOOLONG | RS_TOOSHORT)
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun /*
760*4882a593Smuzhiyun * PHY IDs
761*4882a593Smuzhiyun * LAN83C183 == LAN91C111 Internal PHY
762*4882a593Smuzhiyun */
763*4882a593Smuzhiyun #define PHY_LAN83C183 0x0016f840
764*4882a593Smuzhiyun #define PHY_LAN83C180 0x02821c50
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun /*
767*4882a593Smuzhiyun * PHY Register Addresses (LAN91C111 Internal PHY)
768*4882a593Smuzhiyun *
769*4882a593Smuzhiyun * Generic PHY registers can be found in <linux/mii.h>
770*4882a593Smuzhiyun *
771*4882a593Smuzhiyun * These phy registers are specific to our on-board phy.
772*4882a593Smuzhiyun */
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun // PHY Configuration Register 1
775*4882a593Smuzhiyun #define PHY_CFG1_REG 0x10
776*4882a593Smuzhiyun #define PHY_CFG1_LNKDIS 0x8000 // 1=Rx Link Detect Function disabled
777*4882a593Smuzhiyun #define PHY_CFG1_XMTDIS 0x4000 // 1=TP Transmitter Disabled
778*4882a593Smuzhiyun #define PHY_CFG1_XMTPDN 0x2000 // 1=TP Transmitter Powered Down
779*4882a593Smuzhiyun #define PHY_CFG1_BYPSCR 0x0400 // 1=Bypass scrambler/descrambler
780*4882a593Smuzhiyun #define PHY_CFG1_UNSCDS 0x0200 // 1=Unscramble Idle Reception Disable
781*4882a593Smuzhiyun #define PHY_CFG1_EQLZR 0x0100 // 1=Rx Equalizer Disabled
782*4882a593Smuzhiyun #define PHY_CFG1_CABLE 0x0080 // 1=STP(150ohm), 0=UTP(100ohm)
783*4882a593Smuzhiyun #define PHY_CFG1_RLVL0 0x0040 // 1=Rx Squelch level reduced by 4.5db
784*4882a593Smuzhiyun #define PHY_CFG1_TLVL_SHIFT 2 // Transmit Output Level Adjust
785*4882a593Smuzhiyun #define PHY_CFG1_TLVL_MASK 0x003C
786*4882a593Smuzhiyun #define PHY_CFG1_TRF_MASK 0x0003 // Transmitter Rise/Fall time
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun // PHY Configuration Register 2
790*4882a593Smuzhiyun #define PHY_CFG2_REG 0x11
791*4882a593Smuzhiyun #define PHY_CFG2_APOLDIS 0x0020 // 1=Auto Polarity Correction disabled
792*4882a593Smuzhiyun #define PHY_CFG2_JABDIS 0x0010 // 1=Jabber disabled
793*4882a593Smuzhiyun #define PHY_CFG2_MREG 0x0008 // 1=Multiple register access (MII mgt)
794*4882a593Smuzhiyun #define PHY_CFG2_INTMDIO 0x0004 // 1=Interrupt signaled with MDIO pulseo
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun // PHY Status Output (and Interrupt status) Register
797*4882a593Smuzhiyun #define PHY_INT_REG 0x12 // Status Output (Interrupt Status)
798*4882a593Smuzhiyun #define PHY_INT_INT 0x8000 // 1=bits have changed since last read
799*4882a593Smuzhiyun #define PHY_INT_LNKFAIL 0x4000 // 1=Link Not detected
800*4882a593Smuzhiyun #define PHY_INT_LOSSSYNC 0x2000 // 1=Descrambler has lost sync
801*4882a593Smuzhiyun #define PHY_INT_CWRD 0x1000 // 1=Invalid 4B5B code detected on rx
802*4882a593Smuzhiyun #define PHY_INT_SSD 0x0800 // 1=No Start Of Stream detected on rx
803*4882a593Smuzhiyun #define PHY_INT_ESD 0x0400 // 1=No End Of Stream detected on rx
804*4882a593Smuzhiyun #define PHY_INT_RPOL 0x0200 // 1=Reverse Polarity detected
805*4882a593Smuzhiyun #define PHY_INT_JAB 0x0100 // 1=Jabber detected
806*4882a593Smuzhiyun #define PHY_INT_SPDDET 0x0080 // 1=100Base-TX mode, 0=10Base-T mode
807*4882a593Smuzhiyun #define PHY_INT_DPLXDET 0x0040 // 1=Device in Full Duplex
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun // PHY Interrupt/Status Mask Register
810*4882a593Smuzhiyun #define PHY_MASK_REG 0x13 // Interrupt Mask
811*4882a593Smuzhiyun // Uses the same bit definitions as PHY_INT_REG
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun /*
815*4882a593Smuzhiyun * SMC91C96 ethernet config and status registers.
816*4882a593Smuzhiyun * These are in the "attribute" space.
817*4882a593Smuzhiyun */
818*4882a593Smuzhiyun #define ECOR 0x8000
819*4882a593Smuzhiyun #define ECOR_RESET 0x80
820*4882a593Smuzhiyun #define ECOR_LEVEL_IRQ 0x40
821*4882a593Smuzhiyun #define ECOR_WR_ATTRIB 0x04
822*4882a593Smuzhiyun #define ECOR_ENABLE 0x01
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun #define ECSR 0x8002
825*4882a593Smuzhiyun #define ECSR_IOIS8 0x20
826*4882a593Smuzhiyun #define ECSR_PWRDWN 0x04
827*4882a593Smuzhiyun #define ECSR_INT 0x02
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun #define ATTRIB_SIZE ((64*1024) << SMC_IO_SHIFT)
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun /*
833*4882a593Smuzhiyun * Macros to abstract register access according to the data bus
834*4882a593Smuzhiyun * capabilities. Please use those and not the in/out primitives.
835*4882a593Smuzhiyun * Note: the following macros do *not* select the bank -- this must
836*4882a593Smuzhiyun * be done separately as needed in the main code. The SMC_REG() macro
837*4882a593Smuzhiyun * only uses the bank argument for debugging purposes (when enabled).
838*4882a593Smuzhiyun *
839*4882a593Smuzhiyun * Note: despite inline functions being safer, everything leading to this
840*4882a593Smuzhiyun * should preferably be macros to let BUG() display the line number in
841*4882a593Smuzhiyun * the core source code since we're interested in the top call site
842*4882a593Smuzhiyun * not in any inline function location.
843*4882a593Smuzhiyun */
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun #if SMC_DEBUG > 0
846*4882a593Smuzhiyun #define SMC_REG(lp, reg, bank) \
847*4882a593Smuzhiyun ({ \
848*4882a593Smuzhiyun int __b = SMC_CURRENT_BANK(lp); \
849*4882a593Smuzhiyun if (unlikely((__b & ~0xf0) != (0x3300 | bank))) { \
850*4882a593Smuzhiyun pr_err("%s: bank reg screwed (0x%04x)\n", \
851*4882a593Smuzhiyun CARDNAME, __b); \
852*4882a593Smuzhiyun BUG(); \
853*4882a593Smuzhiyun } \
854*4882a593Smuzhiyun reg<<SMC_IO_SHIFT; \
855*4882a593Smuzhiyun })
856*4882a593Smuzhiyun #else
857*4882a593Smuzhiyun #define SMC_REG(lp, reg, bank) (reg<<SMC_IO_SHIFT)
858*4882a593Smuzhiyun #endif
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun /*
861*4882a593Smuzhiyun * Hack Alert: Some setups just can't write 8 or 16 bits reliably when not
862*4882a593Smuzhiyun * aligned to a 32 bit boundary. I tell you that does exist!
863*4882a593Smuzhiyun * Fortunately the affected register accesses can be easily worked around
864*4882a593Smuzhiyun * since we can write zeroes to the preceding 16 bits without adverse
865*4882a593Smuzhiyun * effects and use a 32-bit access.
866*4882a593Smuzhiyun *
867*4882a593Smuzhiyun * Enforce it on any 32-bit capable setup for now.
868*4882a593Smuzhiyun */
869*4882a593Smuzhiyun #define SMC_MUST_ALIGN_WRITE(lp) SMC_32BIT(lp)
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun #define SMC_GET_PN(lp) \
872*4882a593Smuzhiyun (SMC_8BIT(lp) ? (SMC_inb(ioaddr, PN_REG(lp))) \
873*4882a593Smuzhiyun : (SMC_inw(ioaddr, PN_REG(lp)) & 0xFF))
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun #define SMC_SET_PN(lp, x) \
876*4882a593Smuzhiyun do { \
877*4882a593Smuzhiyun if (SMC_MUST_ALIGN_WRITE(lp)) \
878*4882a593Smuzhiyun SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 0, 2)); \
879*4882a593Smuzhiyun else if (SMC_8BIT(lp)) \
880*4882a593Smuzhiyun SMC_outb(x, ioaddr, PN_REG(lp)); \
881*4882a593Smuzhiyun else \
882*4882a593Smuzhiyun SMC_outw(lp, x, ioaddr, PN_REG(lp)); \
883*4882a593Smuzhiyun } while (0)
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun #define SMC_GET_AR(lp) \
886*4882a593Smuzhiyun (SMC_8BIT(lp) ? (SMC_inb(ioaddr, AR_REG(lp))) \
887*4882a593Smuzhiyun : (SMC_inw(ioaddr, PN_REG(lp)) >> 8))
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun #define SMC_GET_TXFIFO(lp) \
890*4882a593Smuzhiyun (SMC_8BIT(lp) ? (SMC_inb(ioaddr, TXFIFO_REG(lp))) \
891*4882a593Smuzhiyun : (SMC_inw(ioaddr, TXFIFO_REG(lp)) & 0xFF))
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun #define SMC_GET_RXFIFO(lp) \
894*4882a593Smuzhiyun (SMC_8BIT(lp) ? (SMC_inb(ioaddr, RXFIFO_REG(lp))) \
895*4882a593Smuzhiyun : (SMC_inw(ioaddr, TXFIFO_REG(lp)) >> 8))
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun #define SMC_GET_INT(lp) \
898*4882a593Smuzhiyun (SMC_8BIT(lp) ? (SMC_inb(ioaddr, INT_REG(lp))) \
899*4882a593Smuzhiyun : (SMC_inw(ioaddr, INT_REG(lp)) & 0xFF))
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun #define SMC_ACK_INT(lp, x) \
902*4882a593Smuzhiyun do { \
903*4882a593Smuzhiyun if (SMC_8BIT(lp)) \
904*4882a593Smuzhiyun SMC_outb(x, ioaddr, INT_REG(lp)); \
905*4882a593Smuzhiyun else { \
906*4882a593Smuzhiyun unsigned long __flags; \
907*4882a593Smuzhiyun int __mask; \
908*4882a593Smuzhiyun local_irq_save(__flags); \
909*4882a593Smuzhiyun __mask = SMC_inw(ioaddr, INT_REG(lp)) & ~0xff; \
910*4882a593Smuzhiyun SMC_outw(lp, __mask | (x), ioaddr, INT_REG(lp)); \
911*4882a593Smuzhiyun local_irq_restore(__flags); \
912*4882a593Smuzhiyun } \
913*4882a593Smuzhiyun } while (0)
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun #define SMC_GET_INT_MASK(lp) \
916*4882a593Smuzhiyun (SMC_8BIT(lp) ? (SMC_inb(ioaddr, IM_REG(lp))) \
917*4882a593Smuzhiyun : (SMC_inw(ioaddr, INT_REG(lp)) >> 8))
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun #define SMC_SET_INT_MASK(lp, x) \
920*4882a593Smuzhiyun do { \
921*4882a593Smuzhiyun if (SMC_8BIT(lp)) \
922*4882a593Smuzhiyun SMC_outb(x, ioaddr, IM_REG(lp)); \
923*4882a593Smuzhiyun else \
924*4882a593Smuzhiyun SMC_outw(lp, (x) << 8, ioaddr, INT_REG(lp)); \
925*4882a593Smuzhiyun } while (0)
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun #define SMC_CURRENT_BANK(lp) SMC_inw(ioaddr, BANK_SELECT)
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun #define SMC_SELECT_BANK(lp, x) \
930*4882a593Smuzhiyun do { \
931*4882a593Smuzhiyun if (SMC_MUST_ALIGN_WRITE(lp)) \
932*4882a593Smuzhiyun SMC_outl((x)<<16, ioaddr, 12<<SMC_IO_SHIFT); \
933*4882a593Smuzhiyun else \
934*4882a593Smuzhiyun SMC_outw(lp, x, ioaddr, BANK_SELECT); \
935*4882a593Smuzhiyun } while (0)
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun #define SMC_GET_BASE(lp) SMC_inw(ioaddr, BASE_REG(lp))
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun #define SMC_SET_BASE(lp, x) SMC_outw(lp, x, ioaddr, BASE_REG(lp))
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun #define SMC_GET_CONFIG(lp) SMC_inw(ioaddr, CONFIG_REG(lp))
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun #define SMC_SET_CONFIG(lp, x) SMC_outw(lp, x, ioaddr, CONFIG_REG(lp))
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun #define SMC_GET_COUNTER(lp) SMC_inw(ioaddr, COUNTER_REG(lp))
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun #define SMC_GET_CTL(lp) SMC_inw(ioaddr, CTL_REG(lp))
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun #define SMC_SET_CTL(lp, x) SMC_outw(lp, x, ioaddr, CTL_REG(lp))
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun #define SMC_GET_MII(lp) SMC_inw(ioaddr, MII_REG(lp))
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun #define SMC_GET_GP(lp) SMC_inw(ioaddr, GP_REG(lp))
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun #define SMC_SET_GP(lp, x) \
956*4882a593Smuzhiyun do { \
957*4882a593Smuzhiyun if (SMC_MUST_ALIGN_WRITE(lp)) \
958*4882a593Smuzhiyun SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 8, 1)); \
959*4882a593Smuzhiyun else \
960*4882a593Smuzhiyun SMC_outw(lp, x, ioaddr, GP_REG(lp)); \
961*4882a593Smuzhiyun } while (0)
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun #define SMC_SET_MII(lp, x) SMC_outw(lp, x, ioaddr, MII_REG(lp))
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun #define SMC_GET_MIR(lp) SMC_inw(ioaddr, MIR_REG(lp))
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun #define SMC_SET_MIR(lp, x) SMC_outw(lp, x, ioaddr, MIR_REG(lp))
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun #define SMC_GET_MMU_CMD(lp) SMC_inw(ioaddr, MMU_CMD_REG(lp))
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun #define SMC_SET_MMU_CMD(lp, x) SMC_outw(lp, x, ioaddr, MMU_CMD_REG(lp))
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun #define SMC_GET_FIFO(lp) SMC_inw(ioaddr, FIFO_REG(lp))
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun #define SMC_GET_PTR(lp) SMC_inw(ioaddr, PTR_REG(lp))
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun #define SMC_SET_PTR(lp, x) \
978*4882a593Smuzhiyun do { \
979*4882a593Smuzhiyun if (SMC_MUST_ALIGN_WRITE(lp)) \
980*4882a593Smuzhiyun SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 4, 2)); \
981*4882a593Smuzhiyun else \
982*4882a593Smuzhiyun SMC_outw(lp, x, ioaddr, PTR_REG(lp)); \
983*4882a593Smuzhiyun } while (0)
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun #define SMC_GET_EPH_STATUS(lp) SMC_inw(ioaddr, EPH_STATUS_REG(lp))
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun #define SMC_GET_RCR(lp) SMC_inw(ioaddr, RCR_REG(lp))
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun #define SMC_SET_RCR(lp, x) SMC_outw(lp, x, ioaddr, RCR_REG(lp))
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun #define SMC_GET_REV(lp) SMC_inw(ioaddr, REV_REG(lp))
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun #define SMC_GET_RPC(lp) SMC_inw(ioaddr, RPC_REG(lp))
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun #define SMC_SET_RPC(lp, x) \
996*4882a593Smuzhiyun do { \
997*4882a593Smuzhiyun if (SMC_MUST_ALIGN_WRITE(lp)) \
998*4882a593Smuzhiyun SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 8, 0)); \
999*4882a593Smuzhiyun else \
1000*4882a593Smuzhiyun SMC_outw(lp, x, ioaddr, RPC_REG(lp)); \
1001*4882a593Smuzhiyun } while (0)
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun #define SMC_GET_TCR(lp) SMC_inw(ioaddr, TCR_REG(lp))
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun #define SMC_SET_TCR(lp, x) SMC_outw(lp, x, ioaddr, TCR_REG(lp))
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun #ifndef SMC_GET_MAC_ADDR
1008*4882a593Smuzhiyun #define SMC_GET_MAC_ADDR(lp, addr) \
1009*4882a593Smuzhiyun do { \
1010*4882a593Smuzhiyun unsigned int __v; \
1011*4882a593Smuzhiyun __v = SMC_inw(ioaddr, ADDR0_REG(lp)); \
1012*4882a593Smuzhiyun addr[0] = __v; addr[1] = __v >> 8; \
1013*4882a593Smuzhiyun __v = SMC_inw(ioaddr, ADDR1_REG(lp)); \
1014*4882a593Smuzhiyun addr[2] = __v; addr[3] = __v >> 8; \
1015*4882a593Smuzhiyun __v = SMC_inw(ioaddr, ADDR2_REG(lp)); \
1016*4882a593Smuzhiyun addr[4] = __v; addr[5] = __v >> 8; \
1017*4882a593Smuzhiyun } while (0)
1018*4882a593Smuzhiyun #endif
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun #define SMC_SET_MAC_ADDR(lp, addr) \
1021*4882a593Smuzhiyun do { \
1022*4882a593Smuzhiyun SMC_outw(lp, addr[0] | (addr[1] << 8), ioaddr, ADDR0_REG(lp)); \
1023*4882a593Smuzhiyun SMC_outw(lp, addr[2] | (addr[3] << 8), ioaddr, ADDR1_REG(lp)); \
1024*4882a593Smuzhiyun SMC_outw(lp, addr[4] | (addr[5] << 8), ioaddr, ADDR2_REG(lp)); \
1025*4882a593Smuzhiyun } while (0)
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun #define SMC_SET_MCAST(lp, x) \
1028*4882a593Smuzhiyun do { \
1029*4882a593Smuzhiyun const unsigned char *mt = (x); \
1030*4882a593Smuzhiyun SMC_outw(lp, mt[0] | (mt[1] << 8), ioaddr, MCAST_REG1(lp)); \
1031*4882a593Smuzhiyun SMC_outw(lp, mt[2] | (mt[3] << 8), ioaddr, MCAST_REG2(lp)); \
1032*4882a593Smuzhiyun SMC_outw(lp, mt[4] | (mt[5] << 8), ioaddr, MCAST_REG3(lp)); \
1033*4882a593Smuzhiyun SMC_outw(lp, mt[6] | (mt[7] << 8), ioaddr, MCAST_REG4(lp)); \
1034*4882a593Smuzhiyun } while (0)
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun #define SMC_PUT_PKT_HDR(lp, status, length) \
1037*4882a593Smuzhiyun do { \
1038*4882a593Smuzhiyun if (SMC_32BIT(lp)) \
1039*4882a593Smuzhiyun SMC_outl((status) | (length)<<16, ioaddr, \
1040*4882a593Smuzhiyun DATA_REG(lp)); \
1041*4882a593Smuzhiyun else { \
1042*4882a593Smuzhiyun SMC_outw(lp, status, ioaddr, DATA_REG(lp)); \
1043*4882a593Smuzhiyun SMC_outw(lp, length, ioaddr, DATA_REG(lp)); \
1044*4882a593Smuzhiyun } \
1045*4882a593Smuzhiyun } while (0)
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun #define SMC_GET_PKT_HDR(lp, status, length) \
1048*4882a593Smuzhiyun do { \
1049*4882a593Smuzhiyun if (SMC_32BIT(lp)) { \
1050*4882a593Smuzhiyun unsigned int __val = SMC_inl(ioaddr, DATA_REG(lp)); \
1051*4882a593Smuzhiyun (status) = __val & 0xffff; \
1052*4882a593Smuzhiyun (length) = __val >> 16; \
1053*4882a593Smuzhiyun } else { \
1054*4882a593Smuzhiyun (status) = SMC_inw(ioaddr, DATA_REG(lp)); \
1055*4882a593Smuzhiyun (length) = SMC_inw(ioaddr, DATA_REG(lp)); \
1056*4882a593Smuzhiyun } \
1057*4882a593Smuzhiyun } while (0)
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun #define SMC_PUSH_DATA(lp, p, l) \
1060*4882a593Smuzhiyun do { \
1061*4882a593Smuzhiyun if (SMC_32BIT(lp)) { \
1062*4882a593Smuzhiyun void *__ptr = (p); \
1063*4882a593Smuzhiyun int __len = (l); \
1064*4882a593Smuzhiyun void __iomem *__ioaddr = ioaddr; \
1065*4882a593Smuzhiyun if (__len >= 2 && (unsigned long)__ptr & 2) { \
1066*4882a593Smuzhiyun __len -= 2; \
1067*4882a593Smuzhiyun SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \
1068*4882a593Smuzhiyun __ptr += 2; \
1069*4882a593Smuzhiyun } \
1070*4882a593Smuzhiyun if (SMC_CAN_USE_DATACS && lp->datacs) \
1071*4882a593Smuzhiyun __ioaddr = lp->datacs; \
1072*4882a593Smuzhiyun SMC_outsl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \
1073*4882a593Smuzhiyun if (__len & 2) { \
1074*4882a593Smuzhiyun __ptr += (__len & ~3); \
1075*4882a593Smuzhiyun SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \
1076*4882a593Smuzhiyun } \
1077*4882a593Smuzhiyun } else if (SMC_16BIT(lp)) \
1078*4882a593Smuzhiyun SMC_outsw(ioaddr, DATA_REG(lp), p, (l) >> 1); \
1079*4882a593Smuzhiyun else if (SMC_8BIT(lp)) \
1080*4882a593Smuzhiyun SMC_outsb(ioaddr, DATA_REG(lp), p, l); \
1081*4882a593Smuzhiyun } while (0)
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun #define SMC_PULL_DATA(lp, p, l) \
1084*4882a593Smuzhiyun do { \
1085*4882a593Smuzhiyun if (SMC_32BIT(lp)) { \
1086*4882a593Smuzhiyun void *__ptr = (p); \
1087*4882a593Smuzhiyun int __len = (l); \
1088*4882a593Smuzhiyun void __iomem *__ioaddr = ioaddr; \
1089*4882a593Smuzhiyun if ((unsigned long)__ptr & 2) { \
1090*4882a593Smuzhiyun /* \
1091*4882a593Smuzhiyun * We want 32bit alignment here. \
1092*4882a593Smuzhiyun * Since some buses perform a full \
1093*4882a593Smuzhiyun * 32bit fetch even for 16bit data \
1094*4882a593Smuzhiyun * we can't use SMC_inw() here. \
1095*4882a593Smuzhiyun * Back both source (on-chip) and \
1096*4882a593Smuzhiyun * destination pointers of 2 bytes. \
1097*4882a593Smuzhiyun * This is possible since the call to \
1098*4882a593Smuzhiyun * SMC_GET_PKT_HDR() already advanced \
1099*4882a593Smuzhiyun * the source pointer of 4 bytes, and \
1100*4882a593Smuzhiyun * the skb_reserve(skb, 2) advanced \
1101*4882a593Smuzhiyun * the destination pointer of 2 bytes. \
1102*4882a593Smuzhiyun */ \
1103*4882a593Smuzhiyun __ptr -= 2; \
1104*4882a593Smuzhiyun __len += 2; \
1105*4882a593Smuzhiyun SMC_SET_PTR(lp, \
1106*4882a593Smuzhiyun 2|PTR_READ|PTR_RCV|PTR_AUTOINC); \
1107*4882a593Smuzhiyun } \
1108*4882a593Smuzhiyun if (SMC_CAN_USE_DATACS && lp->datacs) \
1109*4882a593Smuzhiyun __ioaddr = lp->datacs; \
1110*4882a593Smuzhiyun __len += 2; \
1111*4882a593Smuzhiyun SMC_insl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \
1112*4882a593Smuzhiyun } else if (SMC_16BIT(lp)) \
1113*4882a593Smuzhiyun SMC_insw(ioaddr, DATA_REG(lp), p, (l) >> 1); \
1114*4882a593Smuzhiyun else if (SMC_8BIT(lp)) \
1115*4882a593Smuzhiyun SMC_insb(ioaddr, DATA_REG(lp), p, l); \
1116*4882a593Smuzhiyun } while (0)
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun #endif /* _SMC91X_H_ */
1119