xref: /OK3568_Linux_fs/kernel/drivers/net/wan/sealevel.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *	Sealevel Systems 4021 driver.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *	(c) Copyright 1999, 2001 Alan Cox
6*4882a593Smuzhiyun  *	(c) Copyright 2001 Red Hat Inc.
7*4882a593Smuzhiyun  *	Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/kernel.h>
14*4882a593Smuzhiyun #include <linux/mm.h>
15*4882a593Smuzhiyun #include <linux/net.h>
16*4882a593Smuzhiyun #include <linux/skbuff.h>
17*4882a593Smuzhiyun #include <linux/netdevice.h>
18*4882a593Smuzhiyun #include <linux/if_arp.h>
19*4882a593Smuzhiyun #include <linux/delay.h>
20*4882a593Smuzhiyun #include <linux/hdlc.h>
21*4882a593Smuzhiyun #include <linux/ioport.h>
22*4882a593Smuzhiyun #include <linux/init.h>
23*4882a593Smuzhiyun #include <linux/slab.h>
24*4882a593Smuzhiyun #include <net/arp.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #include <asm/irq.h>
27*4882a593Smuzhiyun #include <asm/io.h>
28*4882a593Smuzhiyun #include <asm/dma.h>
29*4882a593Smuzhiyun #include <asm/byteorder.h>
30*4882a593Smuzhiyun #include "z85230.h"
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun struct slvl_device
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun 	struct z8530_channel *chan;
36*4882a593Smuzhiyun 	int channel;
37*4882a593Smuzhiyun };
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun struct slvl_board
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	struct slvl_device dev[2];
43*4882a593Smuzhiyun 	struct z8530_dev board;
44*4882a593Smuzhiyun 	int iobase;
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun /*
48*4882a593Smuzhiyun  *	Network driver support routines
49*4882a593Smuzhiyun  */
50*4882a593Smuzhiyun 
dev_to_chan(struct net_device * dev)51*4882a593Smuzhiyun static inline struct slvl_device* dev_to_chan(struct net_device *dev)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	return (struct slvl_device *)dev_to_hdlc(dev)->priv;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun  *	Frame receive. Simple for our card as we do HDLC and there
58*4882a593Smuzhiyun  *	is no funny garbage involved
59*4882a593Smuzhiyun  */
60*4882a593Smuzhiyun 
sealevel_input(struct z8530_channel * c,struct sk_buff * skb)61*4882a593Smuzhiyun static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	/* Drop the CRC - it's not a good idea to try and negotiate it ;) */
64*4882a593Smuzhiyun 	skb_trim(skb, skb->len - 2);
65*4882a593Smuzhiyun 	skb->protocol = hdlc_type_trans(skb, c->netdevice);
66*4882a593Smuzhiyun 	skb_reset_mac_header(skb);
67*4882a593Smuzhiyun 	skb->dev = c->netdevice;
68*4882a593Smuzhiyun 	netif_rx(skb);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun /*
72*4882a593Smuzhiyun  *	We've been placed in the UP state
73*4882a593Smuzhiyun  */
74*4882a593Smuzhiyun 
sealevel_open(struct net_device * d)75*4882a593Smuzhiyun static int sealevel_open(struct net_device *d)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	struct slvl_device *slvl = dev_to_chan(d);
78*4882a593Smuzhiyun 	int err = -1;
79*4882a593Smuzhiyun 	int unit = slvl->channel;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	/*
82*4882a593Smuzhiyun 	 *	Link layer up.
83*4882a593Smuzhiyun 	 */
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	switch (unit) {
86*4882a593Smuzhiyun 		case 0:
87*4882a593Smuzhiyun 			err = z8530_sync_dma_open(d, slvl->chan);
88*4882a593Smuzhiyun 			break;
89*4882a593Smuzhiyun 		case 1:
90*4882a593Smuzhiyun 			err = z8530_sync_open(d, slvl->chan);
91*4882a593Smuzhiyun 			break;
92*4882a593Smuzhiyun 	}
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	if (err)
95*4882a593Smuzhiyun 		return err;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	err = hdlc_open(d);
98*4882a593Smuzhiyun 	if (err) {
99*4882a593Smuzhiyun 		switch (unit) {
100*4882a593Smuzhiyun 			case 0:
101*4882a593Smuzhiyun 				z8530_sync_dma_close(d, slvl->chan);
102*4882a593Smuzhiyun 				break;
103*4882a593Smuzhiyun 			case 1:
104*4882a593Smuzhiyun 				z8530_sync_close(d, slvl->chan);
105*4882a593Smuzhiyun 				break;
106*4882a593Smuzhiyun 		}
107*4882a593Smuzhiyun 		return err;
108*4882a593Smuzhiyun 	}
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	slvl->chan->rx_function = sealevel_input;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	/*
113*4882a593Smuzhiyun 	 *	Go go go
114*4882a593Smuzhiyun 	 */
115*4882a593Smuzhiyun 	netif_start_queue(d);
116*4882a593Smuzhiyun 	return 0;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
sealevel_close(struct net_device * d)119*4882a593Smuzhiyun static int sealevel_close(struct net_device *d)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	struct slvl_device *slvl = dev_to_chan(d);
122*4882a593Smuzhiyun 	int unit = slvl->channel;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	/*
125*4882a593Smuzhiyun 	 *	Discard new frames
126*4882a593Smuzhiyun 	 */
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	slvl->chan->rx_function = z8530_null_rx;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	hdlc_close(d);
131*4882a593Smuzhiyun 	netif_stop_queue(d);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	switch (unit) {
134*4882a593Smuzhiyun 		case 0:
135*4882a593Smuzhiyun 			z8530_sync_dma_close(d, slvl->chan);
136*4882a593Smuzhiyun 			break;
137*4882a593Smuzhiyun 		case 1:
138*4882a593Smuzhiyun 			z8530_sync_close(d, slvl->chan);
139*4882a593Smuzhiyun 			break;
140*4882a593Smuzhiyun 	}
141*4882a593Smuzhiyun 	return 0;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
sealevel_ioctl(struct net_device * d,struct ifreq * ifr,int cmd)144*4882a593Smuzhiyun static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	/* struct slvl_device *slvl=dev_to_chan(d);
147*4882a593Smuzhiyun 	   z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) */
148*4882a593Smuzhiyun 	return hdlc_ioctl(d, ifr, cmd);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun  *	Passed network frames, fire them downwind.
153*4882a593Smuzhiyun  */
154*4882a593Smuzhiyun 
sealevel_queue_xmit(struct sk_buff * skb,struct net_device * d)155*4882a593Smuzhiyun static netdev_tx_t sealevel_queue_xmit(struct sk_buff *skb,
156*4882a593Smuzhiyun 					     struct net_device *d)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	return z8530_queue_xmit(dev_to_chan(d)->chan, skb);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun 
sealevel_attach(struct net_device * dev,unsigned short encoding,unsigned short parity)161*4882a593Smuzhiyun static int sealevel_attach(struct net_device *dev, unsigned short encoding,
162*4882a593Smuzhiyun 			   unsigned short parity)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun 	if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
165*4882a593Smuzhiyun 		return 0;
166*4882a593Smuzhiyun 	return -EINVAL;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun static const struct net_device_ops sealevel_ops = {
170*4882a593Smuzhiyun 	.ndo_open       = sealevel_open,
171*4882a593Smuzhiyun 	.ndo_stop       = sealevel_close,
172*4882a593Smuzhiyun 	.ndo_start_xmit = hdlc_start_xmit,
173*4882a593Smuzhiyun 	.ndo_do_ioctl   = sealevel_ioctl,
174*4882a593Smuzhiyun };
175*4882a593Smuzhiyun 
slvl_setup(struct slvl_device * sv,int iobase,int irq)176*4882a593Smuzhiyun static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun 	struct net_device *dev = alloc_hdlcdev(sv);
179*4882a593Smuzhiyun 	if (!dev)
180*4882a593Smuzhiyun 		return -1;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	dev_to_hdlc(dev)->attach = sealevel_attach;
183*4882a593Smuzhiyun 	dev_to_hdlc(dev)->xmit = sealevel_queue_xmit;
184*4882a593Smuzhiyun 	dev->netdev_ops = &sealevel_ops;
185*4882a593Smuzhiyun 	dev->base_addr = iobase;
186*4882a593Smuzhiyun 	dev->irq = irq;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	if (register_hdlc_device(dev)) {
189*4882a593Smuzhiyun 		pr_err("unable to register HDLC device\n");
190*4882a593Smuzhiyun 		free_netdev(dev);
191*4882a593Smuzhiyun 		return -1;
192*4882a593Smuzhiyun 	}
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	sv->chan->netdevice = dev;
195*4882a593Smuzhiyun 	return 0;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun /*
200*4882a593Smuzhiyun  *	Allocate and setup Sealevel board.
201*4882a593Smuzhiyun  */
202*4882a593Smuzhiyun 
slvl_init(int iobase,int irq,int txdma,int rxdma,int slow)203*4882a593Smuzhiyun static __init struct slvl_board *slvl_init(int iobase, int irq,
204*4882a593Smuzhiyun 					   int txdma, int rxdma, int slow)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	struct z8530_dev *dev;
207*4882a593Smuzhiyun 	struct slvl_board *b;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	/*
210*4882a593Smuzhiyun 	 *	Get the needed I/O space
211*4882a593Smuzhiyun 	 */
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	if (!request_region(iobase, 8, "Sealevel 4021")) {
214*4882a593Smuzhiyun 		pr_warn("I/O 0x%X already in use\n", iobase);
215*4882a593Smuzhiyun 		return NULL;
216*4882a593Smuzhiyun 	}
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL);
219*4882a593Smuzhiyun 	if (!b)
220*4882a593Smuzhiyun 		goto err_kzalloc;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	b->dev[0].chan = &b->board.chanA;
223*4882a593Smuzhiyun 	b->dev[0].channel = 0;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	b->dev[1].chan = &b->board.chanB;
226*4882a593Smuzhiyun 	b->dev[1].channel = 1;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	dev = &b->board;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	/*
231*4882a593Smuzhiyun 	 *	Stuff in the I/O addressing
232*4882a593Smuzhiyun 	 */
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	dev->active = 0;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	b->iobase = iobase;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	/*
239*4882a593Smuzhiyun 	 *	Select 8530 delays for the old board
240*4882a593Smuzhiyun 	 */
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	if (slow)
243*4882a593Smuzhiyun 		iobase |= Z8530_PORT_SLEEP;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	dev->chanA.ctrlio = iobase + 1;
246*4882a593Smuzhiyun 	dev->chanA.dataio = iobase;
247*4882a593Smuzhiyun 	dev->chanB.ctrlio = iobase + 3;
248*4882a593Smuzhiyun 	dev->chanB.dataio = iobase + 2;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	dev->chanA.irqs = &z8530_nop;
251*4882a593Smuzhiyun 	dev->chanB.irqs = &z8530_nop;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	/*
254*4882a593Smuzhiyun 	 *	Assert DTR enable DMA
255*4882a593Smuzhiyun 	 */
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	outb(3 | (1 << 7), b->iobase + 4);
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	/* We want a fast IRQ for this device. Actually we'd like an even faster
261*4882a593Smuzhiyun 	   IRQ ;) - This is one driver RtLinux is made for */
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	if (request_irq(irq, z8530_interrupt, 0,
264*4882a593Smuzhiyun 			"SeaLevel", dev) < 0) {
265*4882a593Smuzhiyun 		pr_warn("IRQ %d already in use\n", irq);
266*4882a593Smuzhiyun 		goto err_request_irq;
267*4882a593Smuzhiyun 	}
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	dev->irq = irq;
270*4882a593Smuzhiyun 	dev->chanA.private = &b->dev[0];
271*4882a593Smuzhiyun 	dev->chanB.private = &b->dev[1];
272*4882a593Smuzhiyun 	dev->chanA.dev = dev;
273*4882a593Smuzhiyun 	dev->chanB.dev = dev;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	dev->chanA.txdma = 3;
276*4882a593Smuzhiyun 	dev->chanA.rxdma = 1;
277*4882a593Smuzhiyun 	if (request_dma(dev->chanA.txdma, "SeaLevel (TX)"))
278*4882a593Smuzhiyun 		goto err_dma_tx;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	if (request_dma(dev->chanA.rxdma, "SeaLevel (RX)"))
281*4882a593Smuzhiyun 		goto err_dma_rx;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	disable_irq(irq);
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	/*
286*4882a593Smuzhiyun 	 *	Begin normal initialise
287*4882a593Smuzhiyun 	 */
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	if (z8530_init(dev) != 0) {
290*4882a593Smuzhiyun 		pr_err("Z8530 series device not found\n");
291*4882a593Smuzhiyun 		enable_irq(irq);
292*4882a593Smuzhiyun 		goto free_hw;
293*4882a593Smuzhiyun 	}
294*4882a593Smuzhiyun 	if (dev->type == Z85C30) {
295*4882a593Smuzhiyun 		z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream);
296*4882a593Smuzhiyun 		z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream);
297*4882a593Smuzhiyun 	} else {
298*4882a593Smuzhiyun 		z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230);
299*4882a593Smuzhiyun 		z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230);
300*4882a593Smuzhiyun 	}
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	/*
303*4882a593Smuzhiyun 	 *	Now we can take the IRQ
304*4882a593Smuzhiyun 	 */
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	enable_irq(irq);
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	if (slvl_setup(&b->dev[0], iobase, irq))
309*4882a593Smuzhiyun 		goto free_hw;
310*4882a593Smuzhiyun 	if (slvl_setup(&b->dev[1], iobase, irq))
311*4882a593Smuzhiyun 		goto free_netdev0;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	z8530_describe(dev, "I/O", iobase);
314*4882a593Smuzhiyun 	dev->active = 1;
315*4882a593Smuzhiyun 	return b;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun free_netdev0:
318*4882a593Smuzhiyun 	unregister_hdlc_device(b->dev[0].chan->netdevice);
319*4882a593Smuzhiyun 	free_netdev(b->dev[0].chan->netdevice);
320*4882a593Smuzhiyun free_hw:
321*4882a593Smuzhiyun 	free_dma(dev->chanA.rxdma);
322*4882a593Smuzhiyun err_dma_rx:
323*4882a593Smuzhiyun 	free_dma(dev->chanA.txdma);
324*4882a593Smuzhiyun err_dma_tx:
325*4882a593Smuzhiyun 	free_irq(irq, dev);
326*4882a593Smuzhiyun err_request_irq:
327*4882a593Smuzhiyun 	kfree(b);
328*4882a593Smuzhiyun err_kzalloc:
329*4882a593Smuzhiyun 	release_region(iobase, 8);
330*4882a593Smuzhiyun 	return NULL;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun 
slvl_shutdown(struct slvl_board * b)333*4882a593Smuzhiyun static void __exit slvl_shutdown(struct slvl_board *b)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun 	int u;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	z8530_shutdown(&b->board);
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	for (u = 0; u < 2; u++) {
340*4882a593Smuzhiyun 		struct net_device *d = b->dev[u].chan->netdevice;
341*4882a593Smuzhiyun 		unregister_hdlc_device(d);
342*4882a593Smuzhiyun 		free_netdev(d);
343*4882a593Smuzhiyun 	}
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	free_irq(b->board.irq, &b->board);
346*4882a593Smuzhiyun 	free_dma(b->board.chanA.rxdma);
347*4882a593Smuzhiyun 	free_dma(b->board.chanA.txdma);
348*4882a593Smuzhiyun 	/* DMA off on the card, drop DTR */
349*4882a593Smuzhiyun 	outb(0, b->iobase);
350*4882a593Smuzhiyun 	release_region(b->iobase, 8);
351*4882a593Smuzhiyun 	kfree(b);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun static int io=0x238;
356*4882a593Smuzhiyun static int txdma=1;
357*4882a593Smuzhiyun static int rxdma=3;
358*4882a593Smuzhiyun static int irq=5;
359*4882a593Smuzhiyun static bool slow=false;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun module_param_hw(io, int, ioport, 0);
362*4882a593Smuzhiyun MODULE_PARM_DESC(io, "The I/O base of the Sealevel card");
363*4882a593Smuzhiyun module_param_hw(txdma, int, dma, 0);
364*4882a593Smuzhiyun MODULE_PARM_DESC(txdma, "Transmit DMA channel");
365*4882a593Smuzhiyun module_param_hw(rxdma, int, dma, 0);
366*4882a593Smuzhiyun MODULE_PARM_DESC(rxdma, "Receive DMA channel");
367*4882a593Smuzhiyun module_param_hw(irq, int, irq, 0);
368*4882a593Smuzhiyun MODULE_PARM_DESC(irq, "The interrupt line setting for the SeaLevel card");
369*4882a593Smuzhiyun module_param(slow, bool, 0);
370*4882a593Smuzhiyun MODULE_PARM_DESC(slow, "Set this for an older Sealevel card such as the 4012");
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun MODULE_AUTHOR("Alan Cox");
373*4882a593Smuzhiyun MODULE_LICENSE("GPL");
374*4882a593Smuzhiyun MODULE_DESCRIPTION("Modular driver for the SeaLevel 4021");
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun static struct slvl_board *slvl_unit;
377*4882a593Smuzhiyun 
slvl_init_module(void)378*4882a593Smuzhiyun static int __init slvl_init_module(void)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun 	slvl_unit = slvl_init(io, irq, txdma, rxdma, slow);
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	return slvl_unit ? 0 : -ENODEV;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun 
slvl_cleanup_module(void)385*4882a593Smuzhiyun static void __exit slvl_cleanup_module(void)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun 	if (slvl_unit)
388*4882a593Smuzhiyun 		slvl_shutdown(slvl_unit);
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun module_init(slvl_init_module);
392*4882a593Smuzhiyun module_exit(slvl_cleanup_module);
393