xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/8390/xsurf100.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/module.h>
3*4882a593Smuzhiyun #include <linux/netdevice.h>
4*4882a593Smuzhiyun #include <linux/platform_device.h>
5*4882a593Smuzhiyun #include <linux/zorro.h>
6*4882a593Smuzhiyun #include <net/ax88796.h>
7*4882a593Smuzhiyun #include <asm/amigaints.h>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #define ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF100 \
10*4882a593Smuzhiyun 		ZORRO_ID(INDIVIDUAL_COMPUTERS, 0x64, 0)
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #define XS100_IRQSTATUS_BASE 0x40
13*4882a593Smuzhiyun #define XS100_8390_BASE 0x800
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun /* Longword-access area. Translated to 2 16-bit access cycles by the
16*4882a593Smuzhiyun  * X-Surf 100 FPGA
17*4882a593Smuzhiyun  */
18*4882a593Smuzhiyun #define XS100_8390_DATA32_BASE 0x8000
19*4882a593Smuzhiyun #define XS100_8390_DATA32_SIZE 0x2000
20*4882a593Smuzhiyun /* Sub-Areas for fast data register access; addresses relative to area begin */
21*4882a593Smuzhiyun #define XS100_8390_DATA_READ32_BASE 0x0880
22*4882a593Smuzhiyun #define XS100_8390_DATA_WRITE32_BASE 0x0C80
23*4882a593Smuzhiyun #define XS100_8390_DATA_AREA_SIZE 0x80
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #define __NS8390_init ax_NS8390_init
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /* force unsigned long back to 'void __iomem *' */
28*4882a593Smuzhiyun #define ax_convert_addr(_a) ((void __force __iomem *)(_a))
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define ei_inb(_a) z_readb(ax_convert_addr(_a))
31*4882a593Smuzhiyun #define ei_outb(_v, _a) z_writeb(_v, ax_convert_addr(_a))
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #define ei_inw(_a) z_readw(ax_convert_addr(_a))
34*4882a593Smuzhiyun #define ei_outw(_v, _a) z_writew(_v, ax_convert_addr(_a))
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #define ei_inb_p(_a) ei_inb(_a)
37*4882a593Smuzhiyun #define ei_outb_p(_v, _a) ei_outb(_v, _a)
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /* define EI_SHIFT() to take into account our register offsets */
40*4882a593Smuzhiyun #define EI_SHIFT(x) (ei_local->reg_offset[(x)])
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /* Ensure we have our RCR base value */
43*4882a593Smuzhiyun #define AX88796_PLATFORM
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun static unsigned char version[] =
46*4882a593Smuzhiyun 		"ax88796.c: Copyright 2005,2007 Simtec Electronics\n";
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #include "lib8390.c"
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /* from ne.c */
51*4882a593Smuzhiyun #define NE_CMD		EI_SHIFT(0x00)
52*4882a593Smuzhiyun #define NE_RESET	EI_SHIFT(0x1f)
53*4882a593Smuzhiyun #define NE_DATAPORT	EI_SHIFT(0x10)
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun struct xsurf100_ax_plat_data {
56*4882a593Smuzhiyun 	struct ax_plat_data ax;
57*4882a593Smuzhiyun 	void __iomem *base_regs;
58*4882a593Smuzhiyun 	void __iomem *data_area;
59*4882a593Smuzhiyun };
60*4882a593Smuzhiyun 
is_xsurf100_network_irq(struct platform_device * pdev)61*4882a593Smuzhiyun static int is_xsurf100_network_irq(struct platform_device *pdev)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev);
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	return (readw(xs100->base_regs + XS100_IRQSTATUS_BASE) & 0xaaaa) != 0;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun /* These functions guarantee that the iomem is accessed with 32 bit
69*4882a593Smuzhiyun  * cycles only. z_memcpy_fromio / z_memcpy_toio don't
70*4882a593Smuzhiyun  */
z_memcpy_fromio32(void * dst,const void __iomem * src,size_t bytes)71*4882a593Smuzhiyun static void z_memcpy_fromio32(void *dst, const void __iomem *src, size_t bytes)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	while (bytes > 32) {
74*4882a593Smuzhiyun 		asm __volatile__
75*4882a593Smuzhiyun 		   ("movem.l (%0)+,%%d0-%%d7\n"
76*4882a593Smuzhiyun 		    "movem.l %%d0-%%d7,(%1)\n"
77*4882a593Smuzhiyun 		    "adda.l #32,%1" : "=a"(src), "=a"(dst)
78*4882a593Smuzhiyun 		    : "0"(src), "1"(dst) : "d0", "d1", "d2", "d3", "d4",
79*4882a593Smuzhiyun 					   "d5", "d6", "d7", "memory");
80*4882a593Smuzhiyun 		bytes -= 32;
81*4882a593Smuzhiyun 	}
82*4882a593Smuzhiyun 	while (bytes) {
83*4882a593Smuzhiyun 		*(uint32_t *)dst = z_readl(src);
84*4882a593Smuzhiyun 		src += 4;
85*4882a593Smuzhiyun 		dst += 4;
86*4882a593Smuzhiyun 		bytes -= 4;
87*4882a593Smuzhiyun 	}
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
z_memcpy_toio32(void __iomem * dst,const void * src,size_t bytes)90*4882a593Smuzhiyun static void z_memcpy_toio32(void __iomem *dst, const void *src, size_t bytes)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun 	while (bytes) {
93*4882a593Smuzhiyun 		z_writel(*(const uint32_t *)src, dst);
94*4882a593Smuzhiyun 		src += 4;
95*4882a593Smuzhiyun 		dst += 4;
96*4882a593Smuzhiyun 		bytes -= 4;
97*4882a593Smuzhiyun 	}
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
xs100_write(struct net_device * dev,const void * src,unsigned int count)100*4882a593Smuzhiyun static void xs100_write(struct net_device *dev, const void *src,
101*4882a593Smuzhiyun 			unsigned int count)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	struct ei_device *ei_local = netdev_priv(dev);
104*4882a593Smuzhiyun 	struct platform_device *pdev = to_platform_device(dev->dev.parent);
105*4882a593Smuzhiyun 	struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	/* copy whole blocks */
108*4882a593Smuzhiyun 	while (count > XS100_8390_DATA_AREA_SIZE) {
109*4882a593Smuzhiyun 		z_memcpy_toio32(xs100->data_area +
110*4882a593Smuzhiyun 				XS100_8390_DATA_WRITE32_BASE, src,
111*4882a593Smuzhiyun 				XS100_8390_DATA_AREA_SIZE);
112*4882a593Smuzhiyun 		src += XS100_8390_DATA_AREA_SIZE;
113*4882a593Smuzhiyun 		count -= XS100_8390_DATA_AREA_SIZE;
114*4882a593Smuzhiyun 	}
115*4882a593Smuzhiyun 	/* copy whole dwords */
116*4882a593Smuzhiyun 	z_memcpy_toio32(xs100->data_area + XS100_8390_DATA_WRITE32_BASE,
117*4882a593Smuzhiyun 			src, count & ~3);
118*4882a593Smuzhiyun 	src += count & ~3;
119*4882a593Smuzhiyun 	if (count & 2) {
120*4882a593Smuzhiyun 		ei_outw(*(uint16_t *)src, ei_local->mem + NE_DATAPORT);
121*4882a593Smuzhiyun 		src += 2;
122*4882a593Smuzhiyun 	}
123*4882a593Smuzhiyun 	if (count & 1)
124*4882a593Smuzhiyun 		ei_outb(*(uint8_t *)src, ei_local->mem + NE_DATAPORT);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun 
xs100_read(struct net_device * dev,void * dst,unsigned int count)127*4882a593Smuzhiyun static void xs100_read(struct net_device *dev, void *dst, unsigned int count)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	struct ei_device *ei_local = netdev_priv(dev);
130*4882a593Smuzhiyun 	struct platform_device *pdev = to_platform_device(dev->dev.parent);
131*4882a593Smuzhiyun 	struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	/* copy whole blocks */
134*4882a593Smuzhiyun 	while (count > XS100_8390_DATA_AREA_SIZE) {
135*4882a593Smuzhiyun 		z_memcpy_fromio32(dst, xs100->data_area +
136*4882a593Smuzhiyun 				  XS100_8390_DATA_READ32_BASE,
137*4882a593Smuzhiyun 				  XS100_8390_DATA_AREA_SIZE);
138*4882a593Smuzhiyun 		dst += XS100_8390_DATA_AREA_SIZE;
139*4882a593Smuzhiyun 		count -= XS100_8390_DATA_AREA_SIZE;
140*4882a593Smuzhiyun 	}
141*4882a593Smuzhiyun 	/* copy whole dwords */
142*4882a593Smuzhiyun 	z_memcpy_fromio32(dst, xs100->data_area + XS100_8390_DATA_READ32_BASE,
143*4882a593Smuzhiyun 			  count & ~3);
144*4882a593Smuzhiyun 	dst += count & ~3;
145*4882a593Smuzhiyun 	if (count & 2) {
146*4882a593Smuzhiyun 		*(uint16_t *)dst = ei_inw(ei_local->mem + NE_DATAPORT);
147*4882a593Smuzhiyun 		dst += 2;
148*4882a593Smuzhiyun 	}
149*4882a593Smuzhiyun 	if (count & 1)
150*4882a593Smuzhiyun 		*(uint8_t *)dst = ei_inb(ei_local->mem + NE_DATAPORT);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun /* Block input and output, similar to the Crynwr packet driver. If
154*4882a593Smuzhiyun  * you are porting to a new ethercard, look at the packet driver
155*4882a593Smuzhiyun  * source for hints. The NEx000 doesn't share the on-board packet
156*4882a593Smuzhiyun  * memory -- you have to put the packet out through the "remote DMA"
157*4882a593Smuzhiyun  * dataport using ei_outb.
158*4882a593Smuzhiyun  */
xs100_block_input(struct net_device * dev,int count,struct sk_buff * skb,int ring_offset)159*4882a593Smuzhiyun static void xs100_block_input(struct net_device *dev, int count,
160*4882a593Smuzhiyun 			      struct sk_buff *skb, int ring_offset)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	struct ei_device *ei_local = netdev_priv(dev);
163*4882a593Smuzhiyun 	void __iomem *nic_base = ei_local->mem;
164*4882a593Smuzhiyun 	char *buf = skb->data;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	if (ei_local->dmaing) {
167*4882a593Smuzhiyun 		netdev_err(dev,
168*4882a593Smuzhiyun 			   "DMAing conflict in %s [DMAstat:%d][irqlock:%d]\n",
169*4882a593Smuzhiyun 			   __func__,
170*4882a593Smuzhiyun 			   ei_local->dmaing, ei_local->irqlock);
171*4882a593Smuzhiyun 		return;
172*4882a593Smuzhiyun 	}
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	ei_local->dmaing |= 0x01;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD);
177*4882a593Smuzhiyun 	ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
178*4882a593Smuzhiyun 	ei_outb(count >> 8, nic_base + EN0_RCNTHI);
179*4882a593Smuzhiyun 	ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO);
180*4882a593Smuzhiyun 	ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI);
181*4882a593Smuzhiyun 	ei_outb(E8390_RREAD + E8390_START, nic_base + NE_CMD);
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	xs100_read(dev, buf, count);
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	ei_local->dmaing &= ~1;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun 
xs100_block_output(struct net_device * dev,int count,const unsigned char * buf,const int start_page)188*4882a593Smuzhiyun static void xs100_block_output(struct net_device *dev, int count,
189*4882a593Smuzhiyun 			       const unsigned char *buf, const int start_page)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun 	struct ei_device *ei_local = netdev_priv(dev);
192*4882a593Smuzhiyun 	void __iomem *nic_base = ei_local->mem;
193*4882a593Smuzhiyun 	unsigned long dma_start;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	/* Round the count up for word writes. Do we need to do this?
196*4882a593Smuzhiyun 	 * What effect will an odd byte count have on the 8390?  I
197*4882a593Smuzhiyun 	 * should check someday.
198*4882a593Smuzhiyun 	 */
199*4882a593Smuzhiyun 	if (ei_local->word16 && (count & 0x01))
200*4882a593Smuzhiyun 		count++;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	/* This *shouldn't* happen. If it does, it's the last thing
203*4882a593Smuzhiyun 	 * you'll see
204*4882a593Smuzhiyun 	 */
205*4882a593Smuzhiyun 	if (ei_local->dmaing) {
206*4882a593Smuzhiyun 		netdev_err(dev,
207*4882a593Smuzhiyun 			   "DMAing conflict in %s [DMAstat:%d][irqlock:%d]\n",
208*4882a593Smuzhiyun 			   __func__,
209*4882a593Smuzhiyun 			   ei_local->dmaing, ei_local->irqlock);
210*4882a593Smuzhiyun 		return;
211*4882a593Smuzhiyun 	}
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	ei_local->dmaing |= 0x01;
214*4882a593Smuzhiyun 	/* We should already be in page 0, but to be safe... */
215*4882a593Smuzhiyun 	ei_outb(E8390_PAGE0 + E8390_START + E8390_NODMA, nic_base + NE_CMD);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	ei_outb(ENISR_RDC, nic_base + EN0_ISR);
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	/* Now the normal output. */
220*4882a593Smuzhiyun 	ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
221*4882a593Smuzhiyun 	ei_outb(count >> 8, nic_base + EN0_RCNTHI);
222*4882a593Smuzhiyun 	ei_outb(0x00, nic_base + EN0_RSARLO);
223*4882a593Smuzhiyun 	ei_outb(start_page, nic_base + EN0_RSARHI);
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	ei_outb(E8390_RWRITE + E8390_START, nic_base + NE_CMD);
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	xs100_write(dev, buf, count);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	dma_start = jiffies;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
232*4882a593Smuzhiyun 		if (jiffies - dma_start > 2 * HZ / 100) {	/* 20ms */
233*4882a593Smuzhiyun 			netdev_warn(dev, "timeout waiting for Tx RDC.\n");
234*4882a593Smuzhiyun 			ei_local->reset_8390(dev);
235*4882a593Smuzhiyun 			ax_NS8390_init(dev, 1);
236*4882a593Smuzhiyun 			break;
237*4882a593Smuzhiyun 		}
238*4882a593Smuzhiyun 	}
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	ei_outb(ENISR_RDC, nic_base + EN0_ISR);	/* Ack intr. */
241*4882a593Smuzhiyun 	ei_local->dmaing &= ~0x01;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
xsurf100_probe(struct zorro_dev * zdev,const struct zorro_device_id * ent)244*4882a593Smuzhiyun static int xsurf100_probe(struct zorro_dev *zdev,
245*4882a593Smuzhiyun 			  const struct zorro_device_id *ent)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	struct platform_device *pdev;
248*4882a593Smuzhiyun 	struct xsurf100_ax_plat_data ax88796_data;
249*4882a593Smuzhiyun 	struct resource res[2] = {
250*4882a593Smuzhiyun 		DEFINE_RES_NAMED(IRQ_AMIGA_PORTS, 1, NULL,
251*4882a593Smuzhiyun 				 IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE),
252*4882a593Smuzhiyun 		DEFINE_RES_MEM(zdev->resource.start + XS100_8390_BASE,
253*4882a593Smuzhiyun 			       4 * 0x20)
254*4882a593Smuzhiyun 	};
255*4882a593Smuzhiyun 	int reg;
256*4882a593Smuzhiyun 	/* This table is referenced in the device structure, so it must
257*4882a593Smuzhiyun 	 * outlive the scope of xsurf100_probe.
258*4882a593Smuzhiyun 	 */
259*4882a593Smuzhiyun 	static u32 reg_offsets[32];
260*4882a593Smuzhiyun 	int ret = 0;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	/* X-Surf 100 control and 32 bit ring buffer data access areas.
263*4882a593Smuzhiyun 	 * These resources are not used by the ax88796 driver, so must
264*4882a593Smuzhiyun 	 * be requested here and passed via platform data.
265*4882a593Smuzhiyun 	 */
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	if (!request_mem_region(zdev->resource.start, 0x100, zdev->name)) {
268*4882a593Smuzhiyun 		dev_err(&zdev->dev, "cannot reserve X-Surf 100 control registers\n");
269*4882a593Smuzhiyun 		return -ENXIO;
270*4882a593Smuzhiyun 	}
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	if (!request_mem_region(zdev->resource.start +
273*4882a593Smuzhiyun 				XS100_8390_DATA32_BASE,
274*4882a593Smuzhiyun 				XS100_8390_DATA32_SIZE,
275*4882a593Smuzhiyun 				"X-Surf 100 32-bit data access")) {
276*4882a593Smuzhiyun 		dev_err(&zdev->dev, "cannot reserve 32-bit area\n");
277*4882a593Smuzhiyun 		ret = -ENXIO;
278*4882a593Smuzhiyun 		goto exit_req;
279*4882a593Smuzhiyun 	}
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	for (reg = 0; reg < 0x20; reg++)
282*4882a593Smuzhiyun 		reg_offsets[reg] = 4 * reg;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	memset(&ax88796_data, 0, sizeof(ax88796_data));
285*4882a593Smuzhiyun 	ax88796_data.ax.flags = AXFLG_HAS_EEPROM;
286*4882a593Smuzhiyun 	ax88796_data.ax.wordlength = 2;
287*4882a593Smuzhiyun 	ax88796_data.ax.dcr_val = 0x48;
288*4882a593Smuzhiyun 	ax88796_data.ax.rcr_val = 0x40;
289*4882a593Smuzhiyun 	ax88796_data.ax.reg_offsets = reg_offsets;
290*4882a593Smuzhiyun 	ax88796_data.ax.check_irq = is_xsurf100_network_irq;
291*4882a593Smuzhiyun 	ax88796_data.base_regs = ioremap(zdev->resource.start, 0x100);
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	/* error handling for ioremap regs */
294*4882a593Smuzhiyun 	if (!ax88796_data.base_regs) {
295*4882a593Smuzhiyun 		dev_err(&zdev->dev, "Cannot ioremap area %pR (registers)\n",
296*4882a593Smuzhiyun 			&zdev->resource);
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 		ret = -ENXIO;
299*4882a593Smuzhiyun 		goto exit_req2;
300*4882a593Smuzhiyun 	}
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	ax88796_data.data_area = ioremap(zdev->resource.start +
303*4882a593Smuzhiyun 			XS100_8390_DATA32_BASE, XS100_8390_DATA32_SIZE);
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	/* error handling for ioremap data */
306*4882a593Smuzhiyun 	if (!ax88796_data.data_area) {
307*4882a593Smuzhiyun 		dev_err(&zdev->dev,
308*4882a593Smuzhiyun 			"Cannot ioremap area %pR offset %x (32-bit access)\n",
309*4882a593Smuzhiyun 			&zdev->resource,  XS100_8390_DATA32_BASE);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 		ret = -ENXIO;
312*4882a593Smuzhiyun 		goto exit_mem;
313*4882a593Smuzhiyun 	}
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	ax88796_data.ax.block_output = xs100_block_output;
316*4882a593Smuzhiyun 	ax88796_data.ax.block_input = xs100_block_input;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	pdev = platform_device_register_resndata(&zdev->dev, "ax88796",
319*4882a593Smuzhiyun 						 zdev->slotaddr, res, 2,
320*4882a593Smuzhiyun 						 &ax88796_data,
321*4882a593Smuzhiyun 						 sizeof(ax88796_data));
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	if (IS_ERR(pdev)) {
324*4882a593Smuzhiyun 		dev_err(&zdev->dev, "cannot register platform device\n");
325*4882a593Smuzhiyun 		ret = -ENXIO;
326*4882a593Smuzhiyun 		goto exit_mem2;
327*4882a593Smuzhiyun 	}
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	zorro_set_drvdata(zdev, pdev);
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	if (!ret)
332*4882a593Smuzhiyun 		return 0;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun  exit_mem2:
335*4882a593Smuzhiyun 	iounmap(ax88796_data.data_area);
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun  exit_mem:
338*4882a593Smuzhiyun 	iounmap(ax88796_data.base_regs);
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun  exit_req2:
341*4882a593Smuzhiyun 	release_mem_region(zdev->resource.start + XS100_8390_DATA32_BASE,
342*4882a593Smuzhiyun 			   XS100_8390_DATA32_SIZE);
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun  exit_req:
345*4882a593Smuzhiyun 	release_mem_region(zdev->resource.start, 0x100);
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	return ret;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun 
xsurf100_remove(struct zorro_dev * zdev)350*4882a593Smuzhiyun static void xsurf100_remove(struct zorro_dev *zdev)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun 	struct platform_device *pdev = zorro_get_drvdata(zdev);
353*4882a593Smuzhiyun 	struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev);
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	platform_device_unregister(pdev);
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	iounmap(xs100->base_regs);
358*4882a593Smuzhiyun 	release_mem_region(zdev->resource.start, 0x100);
359*4882a593Smuzhiyun 	iounmap(xs100->data_area);
360*4882a593Smuzhiyun 	release_mem_region(zdev->resource.start + XS100_8390_DATA32_BASE,
361*4882a593Smuzhiyun 			   XS100_8390_DATA32_SIZE);
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun static const struct zorro_device_id xsurf100_zorro_tbl[] = {
365*4882a593Smuzhiyun 	{ ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF100, },
366*4882a593Smuzhiyun 	{ 0 }
367*4882a593Smuzhiyun };
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun MODULE_DEVICE_TABLE(zorro, xsurf100_zorro_tbl);
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun static struct zorro_driver xsurf100_driver = {
372*4882a593Smuzhiyun 	.name           = "xsurf100",
373*4882a593Smuzhiyun 	.id_table       = xsurf100_zorro_tbl,
374*4882a593Smuzhiyun 	.probe          = xsurf100_probe,
375*4882a593Smuzhiyun 	.remove         = xsurf100_remove,
376*4882a593Smuzhiyun };
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun module_driver(xsurf100_driver, zorro_register_driver, zorro_unregister_driver);
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun MODULE_DESCRIPTION("X-Surf 100 driver");
381*4882a593Smuzhiyun MODULE_AUTHOR("Michael Karcher <kernel@mkarcher.dialup.fu-berlin.de>");
382*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
383