1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Driver for the Macintosh 68K onboard MACE controller with PSC
4*4882a593Smuzhiyun * driven DMA. The MACE driver code is derived from mace.c. The
5*4882a593Smuzhiyun * Mac68k theory of operation is courtesy of the MacBSD wizards.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (C) 1996 Paul Mackerras.
8*4882a593Smuzhiyun * Copyright (C) 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Modified heavily by Joshua M. Thompson based on Dave Huang's NetBSD driver
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * Copyright (C) 2007 Finn Thain
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * Converted to DMA API, converted to unified driver model,
15*4882a593Smuzhiyun * sync'd some routines with mace.c and fixed various bugs.
16*4882a593Smuzhiyun */
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include <linux/kernel.h>
20*4882a593Smuzhiyun #include <linux/module.h>
21*4882a593Smuzhiyun #include <linux/netdevice.h>
22*4882a593Smuzhiyun #include <linux/etherdevice.h>
23*4882a593Smuzhiyun #include <linux/delay.h>
24*4882a593Smuzhiyun #include <linux/string.h>
25*4882a593Smuzhiyun #include <linux/crc32.h>
26*4882a593Smuzhiyun #include <linux/bitrev.h>
27*4882a593Smuzhiyun #include <linux/dma-mapping.h>
28*4882a593Smuzhiyun #include <linux/platform_device.h>
29*4882a593Smuzhiyun #include <linux/gfp.h>
30*4882a593Smuzhiyun #include <linux/interrupt.h>
31*4882a593Smuzhiyun #include <asm/io.h>
32*4882a593Smuzhiyun #include <asm/macints.h>
33*4882a593Smuzhiyun #include <asm/mac_psc.h>
34*4882a593Smuzhiyun #include <asm/page.h>
35*4882a593Smuzhiyun #include "mace.h"
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun static char mac_mace_string[] = "macmace";
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #define N_TX_BUFF_ORDER 0
40*4882a593Smuzhiyun #define N_TX_RING (1 << N_TX_BUFF_ORDER)
41*4882a593Smuzhiyun #define N_RX_BUFF_ORDER 3
42*4882a593Smuzhiyun #define N_RX_RING (1 << N_RX_BUFF_ORDER)
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #define TX_TIMEOUT HZ
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun #define MACE_BUFF_SIZE 0x800
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /* Chip rev needs workaround on HW & multicast addr change */
49*4882a593Smuzhiyun #define BROKEN_ADDRCHG_REV 0x0941
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /* The MACE is simply wired down on a Mac68K box */
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #define MACE_BASE (void *)(0x50F1C000)
54*4882a593Smuzhiyun #define MACE_PROM (void *)(0x50F08001)
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun struct mace_data {
57*4882a593Smuzhiyun volatile struct mace *mace;
58*4882a593Smuzhiyun unsigned char *tx_ring;
59*4882a593Smuzhiyun dma_addr_t tx_ring_phys;
60*4882a593Smuzhiyun unsigned char *rx_ring;
61*4882a593Smuzhiyun dma_addr_t rx_ring_phys;
62*4882a593Smuzhiyun int dma_intr;
63*4882a593Smuzhiyun int rx_slot, rx_tail;
64*4882a593Smuzhiyun int tx_slot, tx_sloti, tx_count;
65*4882a593Smuzhiyun int chipid;
66*4882a593Smuzhiyun struct device *device;
67*4882a593Smuzhiyun };
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun struct mace_frame {
70*4882a593Smuzhiyun u8 rcvcnt;
71*4882a593Smuzhiyun u8 pad1;
72*4882a593Smuzhiyun u8 rcvsts;
73*4882a593Smuzhiyun u8 pad2;
74*4882a593Smuzhiyun u8 rntpc;
75*4882a593Smuzhiyun u8 pad3;
76*4882a593Smuzhiyun u8 rcvcc;
77*4882a593Smuzhiyun u8 pad4;
78*4882a593Smuzhiyun u32 pad5;
79*4882a593Smuzhiyun u32 pad6;
80*4882a593Smuzhiyun u8 data[1];
81*4882a593Smuzhiyun /* And frame continues.. */
82*4882a593Smuzhiyun };
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun #define PRIV_BYTES sizeof(struct mace_data)
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun static int mace_open(struct net_device *dev);
87*4882a593Smuzhiyun static int mace_close(struct net_device *dev);
88*4882a593Smuzhiyun static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
89*4882a593Smuzhiyun static void mace_set_multicast(struct net_device *dev);
90*4882a593Smuzhiyun static int mace_set_address(struct net_device *dev, void *addr);
91*4882a593Smuzhiyun static void mace_reset(struct net_device *dev);
92*4882a593Smuzhiyun static irqreturn_t mace_interrupt(int irq, void *dev_id);
93*4882a593Smuzhiyun static irqreturn_t mace_dma_intr(int irq, void *dev_id);
94*4882a593Smuzhiyun static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue);
95*4882a593Smuzhiyun static void __mace_set_address(struct net_device *dev, void *addr);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /*
98*4882a593Smuzhiyun * Load a receive DMA channel with a base address and ring length
99*4882a593Smuzhiyun */
100*4882a593Smuzhiyun
mace_load_rxdma_base(struct net_device * dev,int set)101*4882a593Smuzhiyun static void mace_load_rxdma_base(struct net_device *dev, int set)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun struct mace_data *mp = netdev_priv(dev);
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun psc_write_word(PSC_ENETRD_CMD + set, 0x0100);
106*4882a593Smuzhiyun psc_write_long(PSC_ENETRD_ADDR + set, (u32) mp->rx_ring_phys);
107*4882a593Smuzhiyun psc_write_long(PSC_ENETRD_LEN + set, N_RX_RING);
108*4882a593Smuzhiyun psc_write_word(PSC_ENETRD_CMD + set, 0x9800);
109*4882a593Smuzhiyun mp->rx_tail = 0;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /*
113*4882a593Smuzhiyun * Reset the receive DMA subsystem
114*4882a593Smuzhiyun */
115*4882a593Smuzhiyun
mace_rxdma_reset(struct net_device * dev)116*4882a593Smuzhiyun static void mace_rxdma_reset(struct net_device *dev)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun struct mace_data *mp = netdev_priv(dev);
119*4882a593Smuzhiyun volatile struct mace *mace = mp->mace;
120*4882a593Smuzhiyun u8 maccc = mace->maccc;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun mace->maccc = maccc & ~ENRCV;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun psc_write_word(PSC_ENETRD_CTL, 0x8800);
125*4882a593Smuzhiyun mace_load_rxdma_base(dev, 0x00);
126*4882a593Smuzhiyun psc_write_word(PSC_ENETRD_CTL, 0x0400);
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun psc_write_word(PSC_ENETRD_CTL, 0x8800);
129*4882a593Smuzhiyun mace_load_rxdma_base(dev, 0x10);
130*4882a593Smuzhiyun psc_write_word(PSC_ENETRD_CTL, 0x0400);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun mace->maccc = maccc;
133*4882a593Smuzhiyun mp->rx_slot = 0;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x9800);
136*4882a593Smuzhiyun psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x9800);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /*
140*4882a593Smuzhiyun * Reset the transmit DMA subsystem
141*4882a593Smuzhiyun */
142*4882a593Smuzhiyun
mace_txdma_reset(struct net_device * dev)143*4882a593Smuzhiyun static void mace_txdma_reset(struct net_device *dev)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun struct mace_data *mp = netdev_priv(dev);
146*4882a593Smuzhiyun volatile struct mace *mace = mp->mace;
147*4882a593Smuzhiyun u8 maccc;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun psc_write_word(PSC_ENETWR_CTL, 0x8800);
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun maccc = mace->maccc;
152*4882a593Smuzhiyun mace->maccc = maccc & ~ENXMT;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun mp->tx_slot = mp->tx_sloti = 0;
155*4882a593Smuzhiyun mp->tx_count = N_TX_RING;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun psc_write_word(PSC_ENETWR_CTL, 0x0400);
158*4882a593Smuzhiyun mace->maccc = maccc;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /*
162*4882a593Smuzhiyun * Disable DMA
163*4882a593Smuzhiyun */
164*4882a593Smuzhiyun
mace_dma_off(struct net_device * dev)165*4882a593Smuzhiyun static void mace_dma_off(struct net_device *dev)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun psc_write_word(PSC_ENETRD_CTL, 0x8800);
168*4882a593Smuzhiyun psc_write_word(PSC_ENETRD_CTL, 0x1000);
169*4882a593Smuzhiyun psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x1100);
170*4882a593Smuzhiyun psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x1100);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun psc_write_word(PSC_ENETWR_CTL, 0x8800);
173*4882a593Smuzhiyun psc_write_word(PSC_ENETWR_CTL, 0x1000);
174*4882a593Smuzhiyun psc_write_word(PSC_ENETWR_CMD + PSC_SET0, 0x1100);
175*4882a593Smuzhiyun psc_write_word(PSC_ENETWR_CMD + PSC_SET1, 0x1100);
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun static const struct net_device_ops mace_netdev_ops = {
179*4882a593Smuzhiyun .ndo_open = mace_open,
180*4882a593Smuzhiyun .ndo_stop = mace_close,
181*4882a593Smuzhiyun .ndo_start_xmit = mace_xmit_start,
182*4882a593Smuzhiyun .ndo_tx_timeout = mace_tx_timeout,
183*4882a593Smuzhiyun .ndo_set_rx_mode = mace_set_multicast,
184*4882a593Smuzhiyun .ndo_set_mac_address = mace_set_address,
185*4882a593Smuzhiyun .ndo_validate_addr = eth_validate_addr,
186*4882a593Smuzhiyun };
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /*
189*4882a593Smuzhiyun * Not really much of a probe. The hardware table tells us if this
190*4882a593Smuzhiyun * model of Macintrash has a MACE (AV macintoshes)
191*4882a593Smuzhiyun */
192*4882a593Smuzhiyun
mace_probe(struct platform_device * pdev)193*4882a593Smuzhiyun static int mace_probe(struct platform_device *pdev)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun int j;
196*4882a593Smuzhiyun struct mace_data *mp;
197*4882a593Smuzhiyun unsigned char *addr;
198*4882a593Smuzhiyun struct net_device *dev;
199*4882a593Smuzhiyun unsigned char checksum = 0;
200*4882a593Smuzhiyun int err;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun dev = alloc_etherdev(PRIV_BYTES);
203*4882a593Smuzhiyun if (!dev)
204*4882a593Smuzhiyun return -ENOMEM;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun mp = netdev_priv(dev);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun mp->device = &pdev->dev;
209*4882a593Smuzhiyun platform_set_drvdata(pdev, dev);
210*4882a593Smuzhiyun SET_NETDEV_DEV(dev, &pdev->dev);
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun dev->base_addr = (u32)MACE_BASE;
213*4882a593Smuzhiyun mp->mace = MACE_BASE;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun dev->irq = IRQ_MAC_MACE;
216*4882a593Smuzhiyun mp->dma_intr = IRQ_MAC_MACE_DMA;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun mp->chipid = mp->mace->chipid_hi << 8 | mp->mace->chipid_lo;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /*
221*4882a593Smuzhiyun * The PROM contains 8 bytes which total 0xFF when XOR'd
222*4882a593Smuzhiyun * together. Due to the usual peculiar apple brain damage
223*4882a593Smuzhiyun * the bytes are spaced out in a strange boundary and the
224*4882a593Smuzhiyun * bits are reversed.
225*4882a593Smuzhiyun */
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun addr = MACE_PROM;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun for (j = 0; j < 6; ++j) {
230*4882a593Smuzhiyun u8 v = bitrev8(addr[j<<4]);
231*4882a593Smuzhiyun checksum ^= v;
232*4882a593Smuzhiyun dev->dev_addr[j] = v;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun for (; j < 8; ++j) {
235*4882a593Smuzhiyun checksum ^= bitrev8(addr[j<<4]);
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun if (checksum != 0xFF) {
239*4882a593Smuzhiyun free_netdev(dev);
240*4882a593Smuzhiyun return -ENODEV;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun dev->netdev_ops = &mace_netdev_ops;
244*4882a593Smuzhiyun dev->watchdog_timeo = TX_TIMEOUT;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun pr_info("Onboard MACE, hardware address %pM, chip revision 0x%04X\n",
247*4882a593Smuzhiyun dev->dev_addr, mp->chipid);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun err = register_netdev(dev);
250*4882a593Smuzhiyun if (!err)
251*4882a593Smuzhiyun return 0;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun free_netdev(dev);
254*4882a593Smuzhiyun return err;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /*
258*4882a593Smuzhiyun * Reset the chip.
259*4882a593Smuzhiyun */
260*4882a593Smuzhiyun
mace_reset(struct net_device * dev)261*4882a593Smuzhiyun static void mace_reset(struct net_device *dev)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun struct mace_data *mp = netdev_priv(dev);
264*4882a593Smuzhiyun volatile struct mace *mb = mp->mace;
265*4882a593Smuzhiyun int i;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /* soft-reset the chip */
268*4882a593Smuzhiyun i = 200;
269*4882a593Smuzhiyun while (--i) {
270*4882a593Smuzhiyun mb->biucc = SWRST;
271*4882a593Smuzhiyun if (mb->biucc & SWRST) {
272*4882a593Smuzhiyun udelay(10);
273*4882a593Smuzhiyun continue;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun break;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun if (!i) {
278*4882a593Smuzhiyun printk(KERN_ERR "macmace: cannot reset chip!\n");
279*4882a593Smuzhiyun return;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun mb->maccc = 0; /* turn off tx, rx */
283*4882a593Smuzhiyun mb->imr = 0xFF; /* disable all intrs for now */
284*4882a593Smuzhiyun i = mb->ir;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun mb->biucc = XMTSP_64;
287*4882a593Smuzhiyun mb->utr = RTRD;
288*4882a593Smuzhiyun mb->fifocc = XMTFW_8 | RCVFW_64 | XMTFWU | RCVFWU;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun mb->xmtfc = AUTO_PAD_XMIT; /* auto-pad short frames */
291*4882a593Smuzhiyun mb->rcvfc = 0;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun /* load up the hardware address */
294*4882a593Smuzhiyun __mace_set_address(dev, dev->dev_addr);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /* clear the multicast filter */
297*4882a593Smuzhiyun if (mp->chipid == BROKEN_ADDRCHG_REV)
298*4882a593Smuzhiyun mb->iac = LOGADDR;
299*4882a593Smuzhiyun else {
300*4882a593Smuzhiyun mb->iac = ADDRCHG | LOGADDR;
301*4882a593Smuzhiyun while ((mb->iac & ADDRCHG) != 0)
302*4882a593Smuzhiyun ;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun for (i = 0; i < 8; ++i)
305*4882a593Smuzhiyun mb->ladrf = 0;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun /* done changing address */
308*4882a593Smuzhiyun if (mp->chipid != BROKEN_ADDRCHG_REV)
309*4882a593Smuzhiyun mb->iac = 0;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun mb->plscc = PORTSEL_AUI;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /*
315*4882a593Smuzhiyun * Load the address on a mace controller.
316*4882a593Smuzhiyun */
317*4882a593Smuzhiyun
__mace_set_address(struct net_device * dev,void * addr)318*4882a593Smuzhiyun static void __mace_set_address(struct net_device *dev, void *addr)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun struct mace_data *mp = netdev_priv(dev);
321*4882a593Smuzhiyun volatile struct mace *mb = mp->mace;
322*4882a593Smuzhiyun unsigned char *p = addr;
323*4882a593Smuzhiyun int i;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun /* load up the hardware address */
326*4882a593Smuzhiyun if (mp->chipid == BROKEN_ADDRCHG_REV)
327*4882a593Smuzhiyun mb->iac = PHYADDR;
328*4882a593Smuzhiyun else {
329*4882a593Smuzhiyun mb->iac = ADDRCHG | PHYADDR;
330*4882a593Smuzhiyun while ((mb->iac & ADDRCHG) != 0)
331*4882a593Smuzhiyun ;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun for (i = 0; i < 6; ++i)
334*4882a593Smuzhiyun mb->padr = dev->dev_addr[i] = p[i];
335*4882a593Smuzhiyun if (mp->chipid != BROKEN_ADDRCHG_REV)
336*4882a593Smuzhiyun mb->iac = 0;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
mace_set_address(struct net_device * dev,void * addr)339*4882a593Smuzhiyun static int mace_set_address(struct net_device *dev, void *addr)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun struct mace_data *mp = netdev_priv(dev);
342*4882a593Smuzhiyun volatile struct mace *mb = mp->mace;
343*4882a593Smuzhiyun unsigned long flags;
344*4882a593Smuzhiyun u8 maccc;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun local_irq_save(flags);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun maccc = mb->maccc;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun __mace_set_address(dev, addr);
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun mb->maccc = maccc;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun local_irq_restore(flags);
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun return 0;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /*
360*4882a593Smuzhiyun * Open the Macintosh MACE. Most of this is playing with the DMA
361*4882a593Smuzhiyun * engine. The ethernet chip is quite friendly.
362*4882a593Smuzhiyun */
363*4882a593Smuzhiyun
mace_open(struct net_device * dev)364*4882a593Smuzhiyun static int mace_open(struct net_device *dev)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun struct mace_data *mp = netdev_priv(dev);
367*4882a593Smuzhiyun volatile struct mace *mb = mp->mace;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun /* reset the chip */
370*4882a593Smuzhiyun mace_reset(dev);
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
373*4882a593Smuzhiyun printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
374*4882a593Smuzhiyun return -EAGAIN;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) {
377*4882a593Smuzhiyun printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr);
378*4882a593Smuzhiyun free_irq(dev->irq, dev);
379*4882a593Smuzhiyun return -EAGAIN;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun /* Allocate the DMA ring buffers */
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun mp->tx_ring = dma_alloc_coherent(mp->device,
385*4882a593Smuzhiyun N_TX_RING * MACE_BUFF_SIZE,
386*4882a593Smuzhiyun &mp->tx_ring_phys, GFP_KERNEL);
387*4882a593Smuzhiyun if (mp->tx_ring == NULL)
388*4882a593Smuzhiyun goto out1;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun mp->rx_ring = dma_alloc_coherent(mp->device,
391*4882a593Smuzhiyun N_RX_RING * MACE_BUFF_SIZE,
392*4882a593Smuzhiyun &mp->rx_ring_phys, GFP_KERNEL);
393*4882a593Smuzhiyun if (mp->rx_ring == NULL)
394*4882a593Smuzhiyun goto out2;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun mace_dma_off(dev);
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /* Not sure what these do */
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun psc_write_word(PSC_ENETWR_CTL, 0x9000);
401*4882a593Smuzhiyun psc_write_word(PSC_ENETRD_CTL, 0x9000);
402*4882a593Smuzhiyun psc_write_word(PSC_ENETWR_CTL, 0x0400);
403*4882a593Smuzhiyun psc_write_word(PSC_ENETRD_CTL, 0x0400);
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun mace_rxdma_reset(dev);
406*4882a593Smuzhiyun mace_txdma_reset(dev);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun /* turn it on! */
409*4882a593Smuzhiyun mb->maccc = ENXMT | ENRCV;
410*4882a593Smuzhiyun /* enable all interrupts except receive interrupts */
411*4882a593Smuzhiyun mb->imr = RCVINT;
412*4882a593Smuzhiyun return 0;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun out2:
415*4882a593Smuzhiyun dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
416*4882a593Smuzhiyun mp->tx_ring, mp->tx_ring_phys);
417*4882a593Smuzhiyun out1:
418*4882a593Smuzhiyun free_irq(dev->irq, dev);
419*4882a593Smuzhiyun free_irq(mp->dma_intr, dev);
420*4882a593Smuzhiyun return -ENOMEM;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun /*
424*4882a593Smuzhiyun * Shut down the mace and its interrupt channel
425*4882a593Smuzhiyun */
426*4882a593Smuzhiyun
mace_close(struct net_device * dev)427*4882a593Smuzhiyun static int mace_close(struct net_device *dev)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun struct mace_data *mp = netdev_priv(dev);
430*4882a593Smuzhiyun volatile struct mace *mb = mp->mace;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun mb->maccc = 0; /* disable rx and tx */
433*4882a593Smuzhiyun mb->imr = 0xFF; /* disable all irqs */
434*4882a593Smuzhiyun mace_dma_off(dev); /* disable rx and tx dma */
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun return 0;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun /*
440*4882a593Smuzhiyun * Transmit a frame
441*4882a593Smuzhiyun */
442*4882a593Smuzhiyun
mace_xmit_start(struct sk_buff * skb,struct net_device * dev)443*4882a593Smuzhiyun static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun struct mace_data *mp = netdev_priv(dev);
446*4882a593Smuzhiyun unsigned long flags;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun /* Stop the queue since there's only the one buffer */
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun local_irq_save(flags);
451*4882a593Smuzhiyun netif_stop_queue(dev);
452*4882a593Smuzhiyun if (!mp->tx_count) {
453*4882a593Smuzhiyun printk(KERN_ERR "macmace: tx queue running but no free buffers.\n");
454*4882a593Smuzhiyun local_irq_restore(flags);
455*4882a593Smuzhiyun return NETDEV_TX_BUSY;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun mp->tx_count--;
458*4882a593Smuzhiyun local_irq_restore(flags);
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun dev->stats.tx_packets++;
461*4882a593Smuzhiyun dev->stats.tx_bytes += skb->len;
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun /* We need to copy into our xmit buffer to take care of alignment and caching issues */
464*4882a593Smuzhiyun skb_copy_from_linear_data(skb, mp->tx_ring, skb->len);
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun /* load the Tx DMA and fire it off */
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32) mp->tx_ring_phys);
469*4882a593Smuzhiyun psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len);
470*4882a593Smuzhiyun psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun mp->tx_slot ^= 0x10;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun dev_kfree_skb(skb);
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun return NETDEV_TX_OK;
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
mace_set_multicast(struct net_device * dev)479*4882a593Smuzhiyun static void mace_set_multicast(struct net_device *dev)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun struct mace_data *mp = netdev_priv(dev);
482*4882a593Smuzhiyun volatile struct mace *mb = mp->mace;
483*4882a593Smuzhiyun int i;
484*4882a593Smuzhiyun u32 crc;
485*4882a593Smuzhiyun u8 maccc;
486*4882a593Smuzhiyun unsigned long flags;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun local_irq_save(flags);
489*4882a593Smuzhiyun maccc = mb->maccc;
490*4882a593Smuzhiyun mb->maccc &= ~PROM;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun if (dev->flags & IFF_PROMISC) {
493*4882a593Smuzhiyun mb->maccc |= PROM;
494*4882a593Smuzhiyun } else {
495*4882a593Smuzhiyun unsigned char multicast_filter[8];
496*4882a593Smuzhiyun struct netdev_hw_addr *ha;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun if (dev->flags & IFF_ALLMULTI) {
499*4882a593Smuzhiyun for (i = 0; i < 8; i++) {
500*4882a593Smuzhiyun multicast_filter[i] = 0xFF;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun } else {
503*4882a593Smuzhiyun for (i = 0; i < 8; i++)
504*4882a593Smuzhiyun multicast_filter[i] = 0;
505*4882a593Smuzhiyun netdev_for_each_mc_addr(ha, dev) {
506*4882a593Smuzhiyun crc = ether_crc_le(6, ha->addr);
507*4882a593Smuzhiyun /* bit number in multicast_filter */
508*4882a593Smuzhiyun i = crc >> 26;
509*4882a593Smuzhiyun multicast_filter[i >> 3] |= 1 << (i & 7);
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun if (mp->chipid == BROKEN_ADDRCHG_REV)
514*4882a593Smuzhiyun mb->iac = LOGADDR;
515*4882a593Smuzhiyun else {
516*4882a593Smuzhiyun mb->iac = ADDRCHG | LOGADDR;
517*4882a593Smuzhiyun while ((mb->iac & ADDRCHG) != 0)
518*4882a593Smuzhiyun ;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun for (i = 0; i < 8; ++i)
521*4882a593Smuzhiyun mb->ladrf = multicast_filter[i];
522*4882a593Smuzhiyun if (mp->chipid != BROKEN_ADDRCHG_REV)
523*4882a593Smuzhiyun mb->iac = 0;
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun mb->maccc = maccc;
527*4882a593Smuzhiyun local_irq_restore(flags);
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
mace_handle_misc_intrs(struct net_device * dev,int intr)530*4882a593Smuzhiyun static void mace_handle_misc_intrs(struct net_device *dev, int intr)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun struct mace_data *mp = netdev_priv(dev);
533*4882a593Smuzhiyun volatile struct mace *mb = mp->mace;
534*4882a593Smuzhiyun static int mace_babbles, mace_jabbers;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun if (intr & MPCO)
537*4882a593Smuzhiyun dev->stats.rx_missed_errors += 256;
538*4882a593Smuzhiyun dev->stats.rx_missed_errors += mb->mpc; /* reading clears it */
539*4882a593Smuzhiyun if (intr & RNTPCO)
540*4882a593Smuzhiyun dev->stats.rx_length_errors += 256;
541*4882a593Smuzhiyun dev->stats.rx_length_errors += mb->rntpc; /* reading clears it */
542*4882a593Smuzhiyun if (intr & CERR)
543*4882a593Smuzhiyun ++dev->stats.tx_heartbeat_errors;
544*4882a593Smuzhiyun if (intr & BABBLE)
545*4882a593Smuzhiyun if (mace_babbles++ < 4)
546*4882a593Smuzhiyun printk(KERN_DEBUG "macmace: babbling transmitter\n");
547*4882a593Smuzhiyun if (intr & JABBER)
548*4882a593Smuzhiyun if (mace_jabbers++ < 4)
549*4882a593Smuzhiyun printk(KERN_DEBUG "macmace: jabbering transceiver\n");
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
mace_interrupt(int irq,void * dev_id)552*4882a593Smuzhiyun static irqreturn_t mace_interrupt(int irq, void *dev_id)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun struct net_device *dev = (struct net_device *) dev_id;
555*4882a593Smuzhiyun struct mace_data *mp = netdev_priv(dev);
556*4882a593Smuzhiyun volatile struct mace *mb = mp->mace;
557*4882a593Smuzhiyun int intr, fs;
558*4882a593Smuzhiyun unsigned long flags;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun /* don't want the dma interrupt handler to fire */
561*4882a593Smuzhiyun local_irq_save(flags);
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun intr = mb->ir; /* read interrupt register */
564*4882a593Smuzhiyun mace_handle_misc_intrs(dev, intr);
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun if (intr & XMTINT) {
567*4882a593Smuzhiyun fs = mb->xmtfs;
568*4882a593Smuzhiyun if ((fs & XMTSV) == 0) {
569*4882a593Smuzhiyun printk(KERN_ERR "macmace: xmtfs not valid! (fs=%x)\n", fs);
570*4882a593Smuzhiyun mace_reset(dev);
571*4882a593Smuzhiyun /*
572*4882a593Smuzhiyun * XXX mace likes to hang the machine after a xmtfs error.
573*4882a593Smuzhiyun * This is hard to reproduce, resetting *may* help
574*4882a593Smuzhiyun */
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun /* dma should have finished */
577*4882a593Smuzhiyun if (!mp->tx_count) {
578*4882a593Smuzhiyun printk(KERN_DEBUG "macmace: tx ring ran out? (fs=%x)\n", fs);
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun /* Update stats */
581*4882a593Smuzhiyun if (fs & (UFLO|LCOL|LCAR|RTRY)) {
582*4882a593Smuzhiyun ++dev->stats.tx_errors;
583*4882a593Smuzhiyun if (fs & LCAR)
584*4882a593Smuzhiyun ++dev->stats.tx_carrier_errors;
585*4882a593Smuzhiyun else if (fs & (UFLO|LCOL|RTRY)) {
586*4882a593Smuzhiyun ++dev->stats.tx_aborted_errors;
587*4882a593Smuzhiyun if (mb->xmtfs & UFLO) {
588*4882a593Smuzhiyun dev->stats.tx_fifo_errors++;
589*4882a593Smuzhiyun mace_txdma_reset(dev);
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun if (mp->tx_count)
596*4882a593Smuzhiyun netif_wake_queue(dev);
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun local_irq_restore(flags);
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun return IRQ_HANDLED;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
mace_tx_timeout(struct net_device * dev,unsigned int txqueue)603*4882a593Smuzhiyun static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue)
604*4882a593Smuzhiyun {
605*4882a593Smuzhiyun struct mace_data *mp = netdev_priv(dev);
606*4882a593Smuzhiyun volatile struct mace *mb = mp->mace;
607*4882a593Smuzhiyun unsigned long flags;
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun local_irq_save(flags);
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun /* turn off both tx and rx and reset the chip */
612*4882a593Smuzhiyun mb->maccc = 0;
613*4882a593Smuzhiyun printk(KERN_ERR "macmace: transmit timeout - resetting\n");
614*4882a593Smuzhiyun mace_txdma_reset(dev);
615*4882a593Smuzhiyun mace_reset(dev);
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun /* restart rx dma */
618*4882a593Smuzhiyun mace_rxdma_reset(dev);
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun mp->tx_count = N_TX_RING;
621*4882a593Smuzhiyun netif_wake_queue(dev);
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun /* turn it on! */
624*4882a593Smuzhiyun mb->maccc = ENXMT | ENRCV;
625*4882a593Smuzhiyun /* enable all interrupts except receive interrupts */
626*4882a593Smuzhiyun mb->imr = RCVINT;
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun local_irq_restore(flags);
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun /*
632*4882a593Smuzhiyun * Handle a newly arrived frame
633*4882a593Smuzhiyun */
634*4882a593Smuzhiyun
mace_dma_rx_frame(struct net_device * dev,struct mace_frame * mf)635*4882a593Smuzhiyun static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun struct sk_buff *skb;
638*4882a593Smuzhiyun unsigned int frame_status = mf->rcvsts;
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun if (frame_status & (RS_OFLO | RS_CLSN | RS_FRAMERR | RS_FCSERR)) {
641*4882a593Smuzhiyun dev->stats.rx_errors++;
642*4882a593Smuzhiyun if (frame_status & RS_OFLO)
643*4882a593Smuzhiyun dev->stats.rx_fifo_errors++;
644*4882a593Smuzhiyun if (frame_status & RS_CLSN)
645*4882a593Smuzhiyun dev->stats.collisions++;
646*4882a593Smuzhiyun if (frame_status & RS_FRAMERR)
647*4882a593Smuzhiyun dev->stats.rx_frame_errors++;
648*4882a593Smuzhiyun if (frame_status & RS_FCSERR)
649*4882a593Smuzhiyun dev->stats.rx_crc_errors++;
650*4882a593Smuzhiyun } else {
651*4882a593Smuzhiyun unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 );
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun skb = netdev_alloc_skb(dev, frame_length + 2);
654*4882a593Smuzhiyun if (!skb) {
655*4882a593Smuzhiyun dev->stats.rx_dropped++;
656*4882a593Smuzhiyun return;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun skb_reserve(skb, 2);
659*4882a593Smuzhiyun skb_put_data(skb, mf->data, frame_length);
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, dev);
662*4882a593Smuzhiyun netif_rx(skb);
663*4882a593Smuzhiyun dev->stats.rx_packets++;
664*4882a593Smuzhiyun dev->stats.rx_bytes += frame_length;
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun /*
669*4882a593Smuzhiyun * The PSC has passed us a DMA interrupt event.
670*4882a593Smuzhiyun */
671*4882a593Smuzhiyun
mace_dma_intr(int irq,void * dev_id)672*4882a593Smuzhiyun static irqreturn_t mace_dma_intr(int irq, void *dev_id)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun struct net_device *dev = (struct net_device *) dev_id;
675*4882a593Smuzhiyun struct mace_data *mp = netdev_priv(dev);
676*4882a593Smuzhiyun int left, head;
677*4882a593Smuzhiyun u16 status;
678*4882a593Smuzhiyun u32 baka;
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun /* Not sure what this does */
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun while ((baka = psc_read_long(PSC_MYSTERY)) != psc_read_long(PSC_MYSTERY));
683*4882a593Smuzhiyun if (!(baka & 0x60000000)) return IRQ_NONE;
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun /*
686*4882a593Smuzhiyun * Process the read queue
687*4882a593Smuzhiyun */
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun status = psc_read_word(PSC_ENETRD_CTL);
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun if (status & 0x2000) {
692*4882a593Smuzhiyun mace_rxdma_reset(dev);
693*4882a593Smuzhiyun } else if (status & 0x0100) {
694*4882a593Smuzhiyun psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x1100);
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun left = psc_read_long(PSC_ENETRD_LEN + mp->rx_slot);
697*4882a593Smuzhiyun head = N_RX_RING - left;
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun /* Loop through the ring buffer and process new packages */
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun while (mp->rx_tail < head) {
702*4882a593Smuzhiyun mace_dma_rx_frame(dev, (struct mace_frame*) (mp->rx_ring
703*4882a593Smuzhiyun + (mp->rx_tail * MACE_BUFF_SIZE)));
704*4882a593Smuzhiyun mp->rx_tail++;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun /* If we're out of buffers in this ring then switch to */
708*4882a593Smuzhiyun /* the other set, otherwise just reactivate this one. */
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun if (!left) {
711*4882a593Smuzhiyun mace_load_rxdma_base(dev, mp->rx_slot);
712*4882a593Smuzhiyun mp->rx_slot ^= 0x10;
713*4882a593Smuzhiyun } else {
714*4882a593Smuzhiyun psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x9800);
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun /*
719*4882a593Smuzhiyun * Process the write queue
720*4882a593Smuzhiyun */
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun status = psc_read_word(PSC_ENETWR_CTL);
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun if (status & 0x2000) {
725*4882a593Smuzhiyun mace_txdma_reset(dev);
726*4882a593Smuzhiyun } else if (status & 0x0100) {
727*4882a593Smuzhiyun psc_write_word(PSC_ENETWR_CMD + mp->tx_sloti, 0x0100);
728*4882a593Smuzhiyun mp->tx_sloti ^= 0x10;
729*4882a593Smuzhiyun mp->tx_count++;
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun return IRQ_HANDLED;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun MODULE_LICENSE("GPL");
735*4882a593Smuzhiyun MODULE_DESCRIPTION("Macintosh MACE ethernet driver");
736*4882a593Smuzhiyun MODULE_ALIAS("platform:macmace");
737*4882a593Smuzhiyun
mac_mace_device_remove(struct platform_device * pdev)738*4882a593Smuzhiyun static int mac_mace_device_remove(struct platform_device *pdev)
739*4882a593Smuzhiyun {
740*4882a593Smuzhiyun struct net_device *dev = platform_get_drvdata(pdev);
741*4882a593Smuzhiyun struct mace_data *mp = netdev_priv(dev);
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun unregister_netdev(dev);
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun free_irq(dev->irq, dev);
746*4882a593Smuzhiyun free_irq(IRQ_MAC_MACE_DMA, dev);
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun dma_free_coherent(mp->device, N_RX_RING * MACE_BUFF_SIZE,
749*4882a593Smuzhiyun mp->rx_ring, mp->rx_ring_phys);
750*4882a593Smuzhiyun dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
751*4882a593Smuzhiyun mp->tx_ring, mp->tx_ring_phys);
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun free_netdev(dev);
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun return 0;
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun static struct platform_driver mac_mace_driver = {
759*4882a593Smuzhiyun .probe = mace_probe,
760*4882a593Smuzhiyun .remove = mac_mace_device_remove,
761*4882a593Smuzhiyun .driver = {
762*4882a593Smuzhiyun .name = mac_mace_string,
763*4882a593Smuzhiyun },
764*4882a593Smuzhiyun };
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun module_platform_driver(mac_mace_driver);
767