1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Core maple bus functionality
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2007 - 2009 Adrian McMenamin
5*4882a593Smuzhiyun * Copyright (C) 2001 - 2008 Paul Mundt
6*4882a593Smuzhiyun * Copyright (C) 2000 - 2001 YAEGASHI Takeshi
7*4882a593Smuzhiyun * Copyright (C) 2001 M. R. Brown
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
10*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
11*4882a593Smuzhiyun * for more details.
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/device.h>
16*4882a593Smuzhiyun #include <linux/interrupt.h>
17*4882a593Smuzhiyun #include <linux/list.h>
18*4882a593Smuzhiyun #include <linux/io.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <linux/maple.h>
21*4882a593Smuzhiyun #include <linux/dma-mapping.h>
22*4882a593Smuzhiyun #include <linux/delay.h>
23*4882a593Smuzhiyun #include <linux/module.h>
24*4882a593Smuzhiyun #include <asm/cacheflush.h>
25*4882a593Smuzhiyun #include <asm/dma.h>
26*4882a593Smuzhiyun #include <asm/io.h>
27*4882a593Smuzhiyun #include <mach/dma.h>
28*4882a593Smuzhiyun #include <mach/sysasic.h>
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>");
31*4882a593Smuzhiyun MODULE_DESCRIPTION("Maple bus driver for Dreamcast");
32*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
33*4882a593Smuzhiyun MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}");
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun static void maple_dma_handler(struct work_struct *work);
36*4882a593Smuzhiyun static void maple_vblank_handler(struct work_struct *work);
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun static DECLARE_WORK(maple_dma_process, maple_dma_handler);
39*4882a593Smuzhiyun static DECLARE_WORK(maple_vblank_process, maple_vblank_handler);
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun static LIST_HEAD(maple_waitq);
42*4882a593Smuzhiyun static LIST_HEAD(maple_sentq);
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /* mutex to protect queue of waiting packets */
45*4882a593Smuzhiyun static DEFINE_MUTEX(maple_wlist_lock);
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun static struct maple_driver maple_unsupported_device;
48*4882a593Smuzhiyun static struct device maple_bus;
49*4882a593Smuzhiyun static int subdevice_map[MAPLE_PORTS];
50*4882a593Smuzhiyun static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr;
51*4882a593Smuzhiyun static unsigned long maple_pnp_time;
52*4882a593Smuzhiyun static int started, scanning, fullscan;
53*4882a593Smuzhiyun static struct kmem_cache *maple_queue_cache;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun struct maple_device_specify {
56*4882a593Smuzhiyun int port;
57*4882a593Smuzhiyun int unit;
58*4882a593Smuzhiyun };
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun static bool checked[MAPLE_PORTS];
61*4882a593Smuzhiyun static bool empty[MAPLE_PORTS];
62*4882a593Smuzhiyun static struct maple_device *baseunits[MAPLE_PORTS];
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /**
65*4882a593Smuzhiyun * maple_driver_register - register a maple driver
66*4882a593Smuzhiyun * @drv: maple driver to be registered.
67*4882a593Smuzhiyun *
68*4882a593Smuzhiyun * Registers the passed in @drv, while updating the bus type.
69*4882a593Smuzhiyun * Devices with matching function IDs will be automatically probed.
70*4882a593Smuzhiyun */
maple_driver_register(struct maple_driver * drv)71*4882a593Smuzhiyun int maple_driver_register(struct maple_driver *drv)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun if (!drv)
74*4882a593Smuzhiyun return -EINVAL;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun drv->drv.bus = &maple_bus_type;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun return driver_register(&drv->drv);
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(maple_driver_register);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /**
83*4882a593Smuzhiyun * maple_driver_unregister - unregister a maple driver.
84*4882a593Smuzhiyun * @drv: maple driver to unregister.
85*4882a593Smuzhiyun *
86*4882a593Smuzhiyun * Cleans up after maple_driver_register(). To be invoked in the exit
87*4882a593Smuzhiyun * path of any module drivers.
88*4882a593Smuzhiyun */
maple_driver_unregister(struct maple_driver * drv)89*4882a593Smuzhiyun void maple_driver_unregister(struct maple_driver *drv)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun driver_unregister(&drv->drv);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(maple_driver_unregister);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /* set hardware registers to enable next round of dma */
maple_dma_reset(void)96*4882a593Smuzhiyun static void maple_dma_reset(void)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun __raw_writel(MAPLE_MAGIC, MAPLE_RESET);
99*4882a593Smuzhiyun /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */
100*4882a593Smuzhiyun __raw_writel(1, MAPLE_TRIGTYPE);
101*4882a593Smuzhiyun /*
102*4882a593Smuzhiyun * Maple system register
103*4882a593Smuzhiyun * bits 31 - 16 timeout in units of 20nsec
104*4882a593Smuzhiyun * bit 12 hard trigger - set 0 to keep responding to VBLANK
105*4882a593Smuzhiyun * bits 9 - 8 set 00 for 2 Mbps, 01 for 1 Mbps
106*4882a593Smuzhiyun * bits 3 - 0 delay (in 1.3ms) between VBLANK and start of DMA
107*4882a593Smuzhiyun * max delay is 11
108*4882a593Smuzhiyun */
109*4882a593Smuzhiyun __raw_writel(MAPLE_2MBPS | MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED);
110*4882a593Smuzhiyun __raw_writel(virt_to_phys(maple_sendbuf), MAPLE_DMAADDR);
111*4882a593Smuzhiyun __raw_writel(1, MAPLE_ENABLE);
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /**
115*4882a593Smuzhiyun * maple_getcond_callback - setup handling MAPLE_COMMAND_GETCOND
116*4882a593Smuzhiyun * @dev: device responding
117*4882a593Smuzhiyun * @callback: handler callback
118*4882a593Smuzhiyun * @interval: interval in jiffies between callbacks
119*4882a593Smuzhiyun * @function: the function code for the device
120*4882a593Smuzhiyun */
maple_getcond_callback(struct maple_device * dev,void (* callback)(struct mapleq * mq),unsigned long interval,unsigned long function)121*4882a593Smuzhiyun void maple_getcond_callback(struct maple_device *dev,
122*4882a593Smuzhiyun void (*callback) (struct mapleq *mq),
123*4882a593Smuzhiyun unsigned long interval, unsigned long function)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun dev->callback = callback;
126*4882a593Smuzhiyun dev->interval = interval;
127*4882a593Smuzhiyun dev->function = cpu_to_be32(function);
128*4882a593Smuzhiyun dev->when = jiffies;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(maple_getcond_callback);
131*4882a593Smuzhiyun
maple_dma_done(void)132*4882a593Smuzhiyun static int maple_dma_done(void)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun return (__raw_readl(MAPLE_STATE) & 1) == 0;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
maple_release_device(struct device * dev)137*4882a593Smuzhiyun static void maple_release_device(struct device *dev)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun struct maple_device *mdev;
140*4882a593Smuzhiyun struct mapleq *mq;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun mdev = to_maple_dev(dev);
143*4882a593Smuzhiyun mq = mdev->mq;
144*4882a593Smuzhiyun kmem_cache_free(maple_queue_cache, mq->recvbuf);
145*4882a593Smuzhiyun kfree(mq);
146*4882a593Smuzhiyun kfree(mdev);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /**
150*4882a593Smuzhiyun * maple_add_packet - add a single instruction to the maple bus queue
151*4882a593Smuzhiyun * @mdev: maple device
152*4882a593Smuzhiyun * @function: function on device being queried
153*4882a593Smuzhiyun * @command: maple command to add
154*4882a593Smuzhiyun * @length: length of command string (in 32 bit words)
155*4882a593Smuzhiyun * @data: remainder of command string
156*4882a593Smuzhiyun */
maple_add_packet(struct maple_device * mdev,u32 function,u32 command,size_t length,void * data)157*4882a593Smuzhiyun int maple_add_packet(struct maple_device *mdev, u32 function, u32 command,
158*4882a593Smuzhiyun size_t length, void *data)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun int ret = 0;
161*4882a593Smuzhiyun void *sendbuf = NULL;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun if (length) {
164*4882a593Smuzhiyun sendbuf = kcalloc(length, 4, GFP_KERNEL);
165*4882a593Smuzhiyun if (!sendbuf) {
166*4882a593Smuzhiyun ret = -ENOMEM;
167*4882a593Smuzhiyun goto out;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun ((__be32 *)sendbuf)[0] = cpu_to_be32(function);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun mdev->mq->command = command;
173*4882a593Smuzhiyun mdev->mq->length = length;
174*4882a593Smuzhiyun if (length > 1)
175*4882a593Smuzhiyun memcpy(sendbuf + 4, data, (length - 1) * 4);
176*4882a593Smuzhiyun mdev->mq->sendbuf = sendbuf;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun mutex_lock(&maple_wlist_lock);
179*4882a593Smuzhiyun list_add_tail(&mdev->mq->list, &maple_waitq);
180*4882a593Smuzhiyun mutex_unlock(&maple_wlist_lock);
181*4882a593Smuzhiyun out:
182*4882a593Smuzhiyun return ret;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(maple_add_packet);
185*4882a593Smuzhiyun
maple_allocq(struct maple_device * mdev)186*4882a593Smuzhiyun static struct mapleq *maple_allocq(struct maple_device *mdev)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun struct mapleq *mq;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun mq = kzalloc(sizeof(*mq), GFP_KERNEL);
191*4882a593Smuzhiyun if (!mq)
192*4882a593Smuzhiyun goto failed_nomem;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun INIT_LIST_HEAD(&mq->list);
195*4882a593Smuzhiyun mq->dev = mdev;
196*4882a593Smuzhiyun mq->recvbuf = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL);
197*4882a593Smuzhiyun if (!mq->recvbuf)
198*4882a593Smuzhiyun goto failed_p2;
199*4882a593Smuzhiyun mq->recvbuf->buf = &((mq->recvbuf->bufx)[0]);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun return mq;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun failed_p2:
204*4882a593Smuzhiyun kfree(mq);
205*4882a593Smuzhiyun failed_nomem:
206*4882a593Smuzhiyun dev_err(&mdev->dev, "could not allocate memory for device (%d, %d)\n",
207*4882a593Smuzhiyun mdev->port, mdev->unit);
208*4882a593Smuzhiyun return NULL;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
maple_alloc_dev(int port,int unit)211*4882a593Smuzhiyun static struct maple_device *maple_alloc_dev(int port, int unit)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun struct maple_device *mdev;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /* zero this out to avoid kobj subsystem
216*4882a593Smuzhiyun * thinking it has already been registered */
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
219*4882a593Smuzhiyun if (!mdev)
220*4882a593Smuzhiyun return NULL;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun mdev->port = port;
223*4882a593Smuzhiyun mdev->unit = unit;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun mdev->mq = maple_allocq(mdev);
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun if (!mdev->mq) {
228*4882a593Smuzhiyun kfree(mdev);
229*4882a593Smuzhiyun return NULL;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun mdev->dev.bus = &maple_bus_type;
232*4882a593Smuzhiyun mdev->dev.parent = &maple_bus;
233*4882a593Smuzhiyun init_waitqueue_head(&mdev->maple_wait);
234*4882a593Smuzhiyun return mdev;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
maple_free_dev(struct maple_device * mdev)237*4882a593Smuzhiyun static void maple_free_dev(struct maple_device *mdev)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun kmem_cache_free(maple_queue_cache, mdev->mq->recvbuf);
240*4882a593Smuzhiyun kfree(mdev->mq);
241*4882a593Smuzhiyun kfree(mdev);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /* process the command queue into a maple command block
245*4882a593Smuzhiyun * terminating command has bit 32 of first long set to 0
246*4882a593Smuzhiyun */
maple_build_block(struct mapleq * mq)247*4882a593Smuzhiyun static void maple_build_block(struct mapleq *mq)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun int port, unit, from, to, len;
250*4882a593Smuzhiyun unsigned long *lsendbuf = mq->sendbuf;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun port = mq->dev->port & 3;
253*4882a593Smuzhiyun unit = mq->dev->unit;
254*4882a593Smuzhiyun len = mq->length;
255*4882a593Smuzhiyun from = port << 6;
256*4882a593Smuzhiyun to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20);
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun *maple_lastptr &= 0x7fffffff;
259*4882a593Smuzhiyun maple_lastptr = maple_sendptr;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun *maple_sendptr++ = (port << 16) | len | 0x80000000;
262*4882a593Smuzhiyun *maple_sendptr++ = virt_to_phys(mq->recvbuf->buf);
263*4882a593Smuzhiyun *maple_sendptr++ =
264*4882a593Smuzhiyun mq->command | (to << 8) | (from << 16) | (len << 24);
265*4882a593Smuzhiyun while (len-- > 0)
266*4882a593Smuzhiyun *maple_sendptr++ = *lsendbuf++;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /* build up command queue */
maple_send(void)270*4882a593Smuzhiyun static void maple_send(void)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun int i, maple_packets = 0;
273*4882a593Smuzhiyun struct mapleq *mq, *nmq;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun if (!maple_dma_done())
276*4882a593Smuzhiyun return;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /* disable DMA */
279*4882a593Smuzhiyun __raw_writel(0, MAPLE_ENABLE);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun if (!list_empty(&maple_sentq))
282*4882a593Smuzhiyun goto finish;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun mutex_lock(&maple_wlist_lock);
285*4882a593Smuzhiyun if (list_empty(&maple_waitq)) {
286*4882a593Smuzhiyun mutex_unlock(&maple_wlist_lock);
287*4882a593Smuzhiyun goto finish;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun maple_lastptr = maple_sendbuf;
291*4882a593Smuzhiyun maple_sendptr = maple_sendbuf;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun list_for_each_entry_safe(mq, nmq, &maple_waitq, list) {
294*4882a593Smuzhiyun maple_build_block(mq);
295*4882a593Smuzhiyun list_del_init(&mq->list);
296*4882a593Smuzhiyun list_add_tail(&mq->list, &maple_sentq);
297*4882a593Smuzhiyun if (maple_packets++ > MAPLE_MAXPACKETS)
298*4882a593Smuzhiyun break;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun mutex_unlock(&maple_wlist_lock);
301*4882a593Smuzhiyun if (maple_packets > 0) {
302*4882a593Smuzhiyun for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
303*4882a593Smuzhiyun __flush_purge_region(maple_sendbuf + i * PAGE_SIZE,
304*4882a593Smuzhiyun PAGE_SIZE);
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun finish:
308*4882a593Smuzhiyun maple_dma_reset();
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /* check if there is a driver registered likely to match this device */
maple_check_matching_driver(struct device_driver * driver,void * devptr)312*4882a593Smuzhiyun static int maple_check_matching_driver(struct device_driver *driver,
313*4882a593Smuzhiyun void *devptr)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun struct maple_driver *maple_drv;
316*4882a593Smuzhiyun struct maple_device *mdev;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun mdev = devptr;
319*4882a593Smuzhiyun maple_drv = to_maple_driver(driver);
320*4882a593Smuzhiyun if (mdev->devinfo.function & cpu_to_be32(maple_drv->function))
321*4882a593Smuzhiyun return 1;
322*4882a593Smuzhiyun return 0;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
maple_detach_driver(struct maple_device * mdev)325*4882a593Smuzhiyun static void maple_detach_driver(struct maple_device *mdev)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun device_unregister(&mdev->dev);
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /* process initial MAPLE_COMMAND_DEVINFO for each device or port */
maple_attach_driver(struct maple_device * mdev)331*4882a593Smuzhiyun static void maple_attach_driver(struct maple_device *mdev)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun char *p, *recvbuf;
334*4882a593Smuzhiyun unsigned long function;
335*4882a593Smuzhiyun int matched, error;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun recvbuf = mdev->mq->recvbuf->buf;
338*4882a593Smuzhiyun /* copy the data as individual elements in
339*4882a593Smuzhiyun * case of memory optimisation */
340*4882a593Smuzhiyun memcpy(&mdev->devinfo.function, recvbuf + 4, 4);
341*4882a593Smuzhiyun memcpy(&mdev->devinfo.function_data[0], recvbuf + 8, 12);
342*4882a593Smuzhiyun memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1);
343*4882a593Smuzhiyun memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1);
344*4882a593Smuzhiyun memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30);
345*4882a593Smuzhiyun memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2);
346*4882a593Smuzhiyun memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2);
347*4882a593Smuzhiyun memcpy(mdev->product_name, mdev->devinfo.product_name, 30);
348*4882a593Smuzhiyun mdev->product_name[30] = '\0';
349*4882a593Smuzhiyun memcpy(mdev->product_licence, mdev->devinfo.product_licence, 60);
350*4882a593Smuzhiyun mdev->product_licence[60] = '\0';
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun for (p = mdev->product_name + 29; mdev->product_name <= p; p--)
353*4882a593Smuzhiyun if (*p == ' ')
354*4882a593Smuzhiyun *p = '\0';
355*4882a593Smuzhiyun else
356*4882a593Smuzhiyun break;
357*4882a593Smuzhiyun for (p = mdev->product_licence + 59; mdev->product_licence <= p; p--)
358*4882a593Smuzhiyun if (*p == ' ')
359*4882a593Smuzhiyun *p = '\0';
360*4882a593Smuzhiyun else
361*4882a593Smuzhiyun break;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun function = be32_to_cpu(mdev->devinfo.function);
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun dev_info(&mdev->dev, "detected %s: function 0x%lX: at (%d, %d)\n",
366*4882a593Smuzhiyun mdev->product_name, function, mdev->port, mdev->unit);
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun if (function > 0x200) {
369*4882a593Smuzhiyun /* Do this silently - as not a real device */
370*4882a593Smuzhiyun function = 0;
371*4882a593Smuzhiyun mdev->driver = &maple_unsupported_device;
372*4882a593Smuzhiyun dev_set_name(&mdev->dev, "%d:0.port", mdev->port);
373*4882a593Smuzhiyun } else {
374*4882a593Smuzhiyun matched =
375*4882a593Smuzhiyun bus_for_each_drv(&maple_bus_type, NULL, mdev,
376*4882a593Smuzhiyun maple_check_matching_driver);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun if (matched == 0) {
379*4882a593Smuzhiyun /* Driver does not exist yet */
380*4882a593Smuzhiyun dev_info(&mdev->dev, "no driver found\n");
381*4882a593Smuzhiyun mdev->driver = &maple_unsupported_device;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun dev_set_name(&mdev->dev, "%d:0%d.%lX", mdev->port,
384*4882a593Smuzhiyun mdev->unit, function);
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun mdev->function = function;
388*4882a593Smuzhiyun mdev->dev.release = &maple_release_device;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun atomic_set(&mdev->busy, 0);
391*4882a593Smuzhiyun error = device_register(&mdev->dev);
392*4882a593Smuzhiyun if (error) {
393*4882a593Smuzhiyun dev_warn(&mdev->dev, "could not register device at"
394*4882a593Smuzhiyun " (%d, %d), with error 0x%X\n", mdev->unit,
395*4882a593Smuzhiyun mdev->port, error);
396*4882a593Smuzhiyun maple_free_dev(mdev);
397*4882a593Smuzhiyun mdev = NULL;
398*4882a593Smuzhiyun return;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun /*
403*4882a593Smuzhiyun * if device has been registered for the given
404*4882a593Smuzhiyun * port and unit then return 1 - allows identification
405*4882a593Smuzhiyun * of which devices need to be attached or detached
406*4882a593Smuzhiyun */
check_maple_device(struct device * device,void * portptr)407*4882a593Smuzhiyun static int check_maple_device(struct device *device, void *portptr)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun struct maple_device_specify *ds;
410*4882a593Smuzhiyun struct maple_device *mdev;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun ds = portptr;
413*4882a593Smuzhiyun mdev = to_maple_dev(device);
414*4882a593Smuzhiyun if (mdev->port == ds->port && mdev->unit == ds->unit)
415*4882a593Smuzhiyun return 1;
416*4882a593Smuzhiyun return 0;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
setup_maple_commands(struct device * device,void * ignored)419*4882a593Smuzhiyun static int setup_maple_commands(struct device *device, void *ignored)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun int add;
422*4882a593Smuzhiyun struct maple_device *mdev = to_maple_dev(device);
423*4882a593Smuzhiyun if (mdev->interval > 0 && atomic_read(&mdev->busy) == 0 &&
424*4882a593Smuzhiyun time_after(jiffies, mdev->when)) {
425*4882a593Smuzhiyun /* bounce if we cannot add */
426*4882a593Smuzhiyun add = maple_add_packet(mdev,
427*4882a593Smuzhiyun be32_to_cpu(mdev->devinfo.function),
428*4882a593Smuzhiyun MAPLE_COMMAND_GETCOND, 1, NULL);
429*4882a593Smuzhiyun if (!add)
430*4882a593Smuzhiyun mdev->when = jiffies + mdev->interval;
431*4882a593Smuzhiyun } else {
432*4882a593Smuzhiyun if (time_after(jiffies, maple_pnp_time))
433*4882a593Smuzhiyun /* Ensure we don't have block reads and devinfo
434*4882a593Smuzhiyun * calls interfering with one another - so flag the
435*4882a593Smuzhiyun * device as busy */
436*4882a593Smuzhiyun if (atomic_read(&mdev->busy) == 0) {
437*4882a593Smuzhiyun atomic_set(&mdev->busy, 1);
438*4882a593Smuzhiyun maple_add_packet(mdev, 0,
439*4882a593Smuzhiyun MAPLE_COMMAND_DEVINFO, 0, NULL);
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun return 0;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun /* VBLANK bottom half - implemented via workqueue */
maple_vblank_handler(struct work_struct * work)446*4882a593Smuzhiyun static void maple_vblank_handler(struct work_struct *work)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun int x, locking;
449*4882a593Smuzhiyun struct maple_device *mdev;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun if (!maple_dma_done())
452*4882a593Smuzhiyun return;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun __raw_writel(0, MAPLE_ENABLE);
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun if (!list_empty(&maple_sentq))
457*4882a593Smuzhiyun goto finish;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun /*
460*4882a593Smuzhiyun * Set up essential commands - to fetch data and
461*4882a593Smuzhiyun * check devices are still present
462*4882a593Smuzhiyun */
463*4882a593Smuzhiyun bus_for_each_dev(&maple_bus_type, NULL, NULL,
464*4882a593Smuzhiyun setup_maple_commands);
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun if (time_after(jiffies, maple_pnp_time)) {
467*4882a593Smuzhiyun /*
468*4882a593Smuzhiyun * Scan the empty ports - bus is flakey and may have
469*4882a593Smuzhiyun * mis-reported emptyness
470*4882a593Smuzhiyun */
471*4882a593Smuzhiyun for (x = 0; x < MAPLE_PORTS; x++) {
472*4882a593Smuzhiyun if (checked[x] && empty[x]) {
473*4882a593Smuzhiyun mdev = baseunits[x];
474*4882a593Smuzhiyun if (!mdev)
475*4882a593Smuzhiyun break;
476*4882a593Smuzhiyun atomic_set(&mdev->busy, 1);
477*4882a593Smuzhiyun locking = maple_add_packet(mdev, 0,
478*4882a593Smuzhiyun MAPLE_COMMAND_DEVINFO, 0, NULL);
479*4882a593Smuzhiyun if (!locking)
480*4882a593Smuzhiyun break;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL;
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun finish:
488*4882a593Smuzhiyun maple_send();
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun /* handle devices added via hotplugs - placing them on queue for DEVINFO */
maple_map_subunits(struct maple_device * mdev,int submask)492*4882a593Smuzhiyun static void maple_map_subunits(struct maple_device *mdev, int submask)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun int retval, k, devcheck;
495*4882a593Smuzhiyun struct maple_device *mdev_add;
496*4882a593Smuzhiyun struct maple_device_specify ds;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun ds.port = mdev->port;
499*4882a593Smuzhiyun for (k = 0; k < 5; k++) {
500*4882a593Smuzhiyun ds.unit = k + 1;
501*4882a593Smuzhiyun retval =
502*4882a593Smuzhiyun bus_for_each_dev(&maple_bus_type, NULL, &ds,
503*4882a593Smuzhiyun check_maple_device);
504*4882a593Smuzhiyun if (retval) {
505*4882a593Smuzhiyun submask = submask >> 1;
506*4882a593Smuzhiyun continue;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun devcheck = submask & 0x01;
509*4882a593Smuzhiyun if (devcheck) {
510*4882a593Smuzhiyun mdev_add = maple_alloc_dev(mdev->port, k + 1);
511*4882a593Smuzhiyun if (!mdev_add)
512*4882a593Smuzhiyun return;
513*4882a593Smuzhiyun atomic_set(&mdev_add->busy, 1);
514*4882a593Smuzhiyun maple_add_packet(mdev_add, 0, MAPLE_COMMAND_DEVINFO,
515*4882a593Smuzhiyun 0, NULL);
516*4882a593Smuzhiyun /* mark that we are checking sub devices */
517*4882a593Smuzhiyun scanning = 1;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun submask = submask >> 1;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun /* mark a device as removed */
maple_clean_submap(struct maple_device * mdev)524*4882a593Smuzhiyun static void maple_clean_submap(struct maple_device *mdev)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun int killbit;
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20);
529*4882a593Smuzhiyun killbit = ~killbit;
530*4882a593Smuzhiyun killbit &= 0xFF;
531*4882a593Smuzhiyun subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun /* handle empty port or hotplug removal */
maple_response_none(struct maple_device * mdev)535*4882a593Smuzhiyun static void maple_response_none(struct maple_device *mdev)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun maple_clean_submap(mdev);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun if (likely(mdev->unit != 0)) {
540*4882a593Smuzhiyun /*
541*4882a593Smuzhiyun * Block devices play up
542*4882a593Smuzhiyun * and give the impression they have
543*4882a593Smuzhiyun * been removed even when still in place or
544*4882a593Smuzhiyun * trip the mtd layer when they have
545*4882a593Smuzhiyun * really gone - this code traps that eventuality
546*4882a593Smuzhiyun * and ensures we aren't overloaded with useless
547*4882a593Smuzhiyun * error messages
548*4882a593Smuzhiyun */
549*4882a593Smuzhiyun if (mdev->can_unload) {
550*4882a593Smuzhiyun if (!mdev->can_unload(mdev)) {
551*4882a593Smuzhiyun atomic_set(&mdev->busy, 2);
552*4882a593Smuzhiyun wake_up(&mdev->maple_wait);
553*4882a593Smuzhiyun return;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun dev_info(&mdev->dev, "detaching device at (%d, %d)\n",
558*4882a593Smuzhiyun mdev->port, mdev->unit);
559*4882a593Smuzhiyun maple_detach_driver(mdev);
560*4882a593Smuzhiyun return;
561*4882a593Smuzhiyun } else {
562*4882a593Smuzhiyun if (!started || !fullscan) {
563*4882a593Smuzhiyun if (checked[mdev->port] == false) {
564*4882a593Smuzhiyun checked[mdev->port] = true;
565*4882a593Smuzhiyun empty[mdev->port] = true;
566*4882a593Smuzhiyun dev_info(&mdev->dev, "no devices"
567*4882a593Smuzhiyun " to port %d\n", mdev->port);
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun return;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun /* Some hardware devices generate false detach messages on unit 0 */
573*4882a593Smuzhiyun atomic_set(&mdev->busy, 0);
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun /* preprocess hotplugs or scans */
maple_response_devinfo(struct maple_device * mdev,char * recvbuf)577*4882a593Smuzhiyun static void maple_response_devinfo(struct maple_device *mdev,
578*4882a593Smuzhiyun char *recvbuf)
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun char submask;
581*4882a593Smuzhiyun if (!started || (scanning == 2) || !fullscan) {
582*4882a593Smuzhiyun if ((mdev->unit == 0) && (checked[mdev->port] == false)) {
583*4882a593Smuzhiyun checked[mdev->port] = true;
584*4882a593Smuzhiyun maple_attach_driver(mdev);
585*4882a593Smuzhiyun } else {
586*4882a593Smuzhiyun if (mdev->unit != 0)
587*4882a593Smuzhiyun maple_attach_driver(mdev);
588*4882a593Smuzhiyun if (mdev->unit == 0) {
589*4882a593Smuzhiyun empty[mdev->port] = false;
590*4882a593Smuzhiyun maple_attach_driver(mdev);
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun if (mdev->unit == 0) {
595*4882a593Smuzhiyun submask = recvbuf[2] & 0x1F;
596*4882a593Smuzhiyun if (submask ^ subdevice_map[mdev->port]) {
597*4882a593Smuzhiyun maple_map_subunits(mdev, submask);
598*4882a593Smuzhiyun subdevice_map[mdev->port] = submask;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
maple_response_fileerr(struct maple_device * mdev,void * recvbuf)603*4882a593Smuzhiyun static void maple_response_fileerr(struct maple_device *mdev, void *recvbuf)
604*4882a593Smuzhiyun {
605*4882a593Smuzhiyun if (mdev->fileerr_handler) {
606*4882a593Smuzhiyun mdev->fileerr_handler(mdev, recvbuf);
607*4882a593Smuzhiyun return;
608*4882a593Smuzhiyun } else
609*4882a593Smuzhiyun dev_warn(&mdev->dev, "device at (%d, %d) reports"
610*4882a593Smuzhiyun "file error 0x%X\n", mdev->port, mdev->unit,
611*4882a593Smuzhiyun ((int *)recvbuf)[1]);
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun
maple_port_rescan(void)614*4882a593Smuzhiyun static void maple_port_rescan(void)
615*4882a593Smuzhiyun {
616*4882a593Smuzhiyun int i;
617*4882a593Smuzhiyun struct maple_device *mdev;
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun fullscan = 1;
620*4882a593Smuzhiyun for (i = 0; i < MAPLE_PORTS; i++) {
621*4882a593Smuzhiyun if (checked[i] == false) {
622*4882a593Smuzhiyun fullscan = 0;
623*4882a593Smuzhiyun mdev = baseunits[i];
624*4882a593Smuzhiyun maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO,
625*4882a593Smuzhiyun 0, NULL);
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun /* maple dma end bottom half - implemented via workqueue */
maple_dma_handler(struct work_struct * work)631*4882a593Smuzhiyun static void maple_dma_handler(struct work_struct *work)
632*4882a593Smuzhiyun {
633*4882a593Smuzhiyun struct mapleq *mq, *nmq;
634*4882a593Smuzhiyun struct maple_device *mdev;
635*4882a593Smuzhiyun char *recvbuf;
636*4882a593Smuzhiyun enum maple_code code;
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun if (!maple_dma_done())
639*4882a593Smuzhiyun return;
640*4882a593Smuzhiyun __raw_writel(0, MAPLE_ENABLE);
641*4882a593Smuzhiyun if (!list_empty(&maple_sentq)) {
642*4882a593Smuzhiyun list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
643*4882a593Smuzhiyun mdev = mq->dev;
644*4882a593Smuzhiyun recvbuf = mq->recvbuf->buf;
645*4882a593Smuzhiyun __flush_invalidate_region(sh_cacheop_vaddr(recvbuf),
646*4882a593Smuzhiyun 0x400);
647*4882a593Smuzhiyun code = recvbuf[0];
648*4882a593Smuzhiyun kfree(mq->sendbuf);
649*4882a593Smuzhiyun list_del_init(&mq->list);
650*4882a593Smuzhiyun switch (code) {
651*4882a593Smuzhiyun case MAPLE_RESPONSE_NONE:
652*4882a593Smuzhiyun maple_response_none(mdev);
653*4882a593Smuzhiyun break;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun case MAPLE_RESPONSE_DEVINFO:
656*4882a593Smuzhiyun maple_response_devinfo(mdev, recvbuf);
657*4882a593Smuzhiyun atomic_set(&mdev->busy, 0);
658*4882a593Smuzhiyun break;
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun case MAPLE_RESPONSE_DATATRF:
661*4882a593Smuzhiyun if (mdev->callback)
662*4882a593Smuzhiyun mdev->callback(mq);
663*4882a593Smuzhiyun atomic_set(&mdev->busy, 0);
664*4882a593Smuzhiyun wake_up(&mdev->maple_wait);
665*4882a593Smuzhiyun break;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun case MAPLE_RESPONSE_FILEERR:
668*4882a593Smuzhiyun maple_response_fileerr(mdev, recvbuf);
669*4882a593Smuzhiyun atomic_set(&mdev->busy, 0);
670*4882a593Smuzhiyun wake_up(&mdev->maple_wait);
671*4882a593Smuzhiyun break;
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun case MAPLE_RESPONSE_AGAIN:
674*4882a593Smuzhiyun case MAPLE_RESPONSE_BADCMD:
675*4882a593Smuzhiyun case MAPLE_RESPONSE_BADFUNC:
676*4882a593Smuzhiyun dev_warn(&mdev->dev, "non-fatal error"
677*4882a593Smuzhiyun " 0x%X at (%d, %d)\n", code,
678*4882a593Smuzhiyun mdev->port, mdev->unit);
679*4882a593Smuzhiyun atomic_set(&mdev->busy, 0);
680*4882a593Smuzhiyun break;
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun case MAPLE_RESPONSE_ALLINFO:
683*4882a593Smuzhiyun dev_notice(&mdev->dev, "extended"
684*4882a593Smuzhiyun " device information request for (%d, %d)"
685*4882a593Smuzhiyun " but call is not supported\n", mdev->port,
686*4882a593Smuzhiyun mdev->unit);
687*4882a593Smuzhiyun atomic_set(&mdev->busy, 0);
688*4882a593Smuzhiyun break;
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun case MAPLE_RESPONSE_OK:
691*4882a593Smuzhiyun atomic_set(&mdev->busy, 0);
692*4882a593Smuzhiyun wake_up(&mdev->maple_wait);
693*4882a593Smuzhiyun break;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun default:
696*4882a593Smuzhiyun break;
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun /* if scanning is 1 then we have subdevices to check */
700*4882a593Smuzhiyun if (scanning == 1) {
701*4882a593Smuzhiyun maple_send();
702*4882a593Smuzhiyun scanning = 2;
703*4882a593Smuzhiyun } else
704*4882a593Smuzhiyun scanning = 0;
705*4882a593Smuzhiyun /*check if we have actually tested all ports yet */
706*4882a593Smuzhiyun if (!fullscan)
707*4882a593Smuzhiyun maple_port_rescan();
708*4882a593Smuzhiyun /* mark that we have been through the first scan */
709*4882a593Smuzhiyun started = 1;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun maple_send();
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun
maple_dma_interrupt(int irq,void * dev_id)714*4882a593Smuzhiyun static irqreturn_t maple_dma_interrupt(int irq, void *dev_id)
715*4882a593Smuzhiyun {
716*4882a593Smuzhiyun /* Load everything into the bottom half */
717*4882a593Smuzhiyun schedule_work(&maple_dma_process);
718*4882a593Smuzhiyun return IRQ_HANDLED;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun
maple_vblank_interrupt(int irq,void * dev_id)721*4882a593Smuzhiyun static irqreturn_t maple_vblank_interrupt(int irq, void *dev_id)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun schedule_work(&maple_vblank_process);
724*4882a593Smuzhiyun return IRQ_HANDLED;
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun
maple_set_dma_interrupt_handler(void)727*4882a593Smuzhiyun static int maple_set_dma_interrupt_handler(void)
728*4882a593Smuzhiyun {
729*4882a593Smuzhiyun return request_irq(HW_EVENT_MAPLE_DMA, maple_dma_interrupt,
730*4882a593Smuzhiyun IRQF_SHARED, "maple bus DMA", &maple_unsupported_device);
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun
maple_set_vblank_interrupt_handler(void)733*4882a593Smuzhiyun static int maple_set_vblank_interrupt_handler(void)
734*4882a593Smuzhiyun {
735*4882a593Smuzhiyun return request_irq(HW_EVENT_VSYNC, maple_vblank_interrupt,
736*4882a593Smuzhiyun IRQF_SHARED, "maple bus VBLANK", &maple_unsupported_device);
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun
maple_get_dma_buffer(void)739*4882a593Smuzhiyun static int maple_get_dma_buffer(void)
740*4882a593Smuzhiyun {
741*4882a593Smuzhiyun maple_sendbuf =
742*4882a593Smuzhiyun (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
743*4882a593Smuzhiyun MAPLE_DMA_PAGES);
744*4882a593Smuzhiyun if (!maple_sendbuf)
745*4882a593Smuzhiyun return -ENOMEM;
746*4882a593Smuzhiyun return 0;
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun
maple_match_bus_driver(struct device * devptr,struct device_driver * drvptr)749*4882a593Smuzhiyun static int maple_match_bus_driver(struct device *devptr,
750*4882a593Smuzhiyun struct device_driver *drvptr)
751*4882a593Smuzhiyun {
752*4882a593Smuzhiyun struct maple_driver *maple_drv = to_maple_driver(drvptr);
753*4882a593Smuzhiyun struct maple_device *maple_dev = to_maple_dev(devptr);
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun /* Trap empty port case */
756*4882a593Smuzhiyun if (maple_dev->devinfo.function == 0xFFFFFFFF)
757*4882a593Smuzhiyun return 0;
758*4882a593Smuzhiyun else if (maple_dev->devinfo.function &
759*4882a593Smuzhiyun cpu_to_be32(maple_drv->function))
760*4882a593Smuzhiyun return 1;
761*4882a593Smuzhiyun return 0;
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun
maple_bus_uevent(struct device * dev,struct kobj_uevent_env * env)764*4882a593Smuzhiyun static int maple_bus_uevent(struct device *dev,
765*4882a593Smuzhiyun struct kobj_uevent_env *env)
766*4882a593Smuzhiyun {
767*4882a593Smuzhiyun return 0;
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
maple_bus_release(struct device * dev)770*4882a593Smuzhiyun static void maple_bus_release(struct device *dev)
771*4882a593Smuzhiyun {
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun static struct maple_driver maple_unsupported_device = {
775*4882a593Smuzhiyun .drv = {
776*4882a593Smuzhiyun .name = "maple_unsupported_device",
777*4882a593Smuzhiyun .bus = &maple_bus_type,
778*4882a593Smuzhiyun },
779*4882a593Smuzhiyun };
780*4882a593Smuzhiyun /*
781*4882a593Smuzhiyun * maple_bus_type - core maple bus structure
782*4882a593Smuzhiyun */
783*4882a593Smuzhiyun struct bus_type maple_bus_type = {
784*4882a593Smuzhiyun .name = "maple",
785*4882a593Smuzhiyun .match = maple_match_bus_driver,
786*4882a593Smuzhiyun .uevent = maple_bus_uevent,
787*4882a593Smuzhiyun };
788*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(maple_bus_type);
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun static struct device maple_bus = {
791*4882a593Smuzhiyun .init_name = "maple",
792*4882a593Smuzhiyun .release = maple_bus_release,
793*4882a593Smuzhiyun };
794*4882a593Smuzhiyun
maple_bus_init(void)795*4882a593Smuzhiyun static int __init maple_bus_init(void)
796*4882a593Smuzhiyun {
797*4882a593Smuzhiyun int retval, i;
798*4882a593Smuzhiyun struct maple_device *mdev[MAPLE_PORTS];
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun __raw_writel(0, MAPLE_ENABLE);
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun retval = device_register(&maple_bus);
803*4882a593Smuzhiyun if (retval)
804*4882a593Smuzhiyun goto cleanup;
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun retval = bus_register(&maple_bus_type);
807*4882a593Smuzhiyun if (retval)
808*4882a593Smuzhiyun goto cleanup_device;
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun retval = driver_register(&maple_unsupported_device.drv);
811*4882a593Smuzhiyun if (retval)
812*4882a593Smuzhiyun goto cleanup_bus;
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun /* allocate memory for maple bus dma */
815*4882a593Smuzhiyun retval = maple_get_dma_buffer();
816*4882a593Smuzhiyun if (retval) {
817*4882a593Smuzhiyun dev_err(&maple_bus, "failed to allocate DMA buffers\n");
818*4882a593Smuzhiyun goto cleanup_basic;
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun /* set up DMA interrupt handler */
822*4882a593Smuzhiyun retval = maple_set_dma_interrupt_handler();
823*4882a593Smuzhiyun if (retval) {
824*4882a593Smuzhiyun dev_err(&maple_bus, "bus failed to grab maple "
825*4882a593Smuzhiyun "DMA IRQ\n");
826*4882a593Smuzhiyun goto cleanup_dma;
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun /* set up VBLANK interrupt handler */
830*4882a593Smuzhiyun retval = maple_set_vblank_interrupt_handler();
831*4882a593Smuzhiyun if (retval) {
832*4882a593Smuzhiyun dev_err(&maple_bus, "bus failed to grab VBLANK IRQ\n");
833*4882a593Smuzhiyun goto cleanup_irq;
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun maple_queue_cache = KMEM_CACHE(maple_buffer, SLAB_HWCACHE_ALIGN);
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun if (!maple_queue_cache) {
839*4882a593Smuzhiyun retval = -ENOMEM;
840*4882a593Smuzhiyun goto cleanup_bothirqs;
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun INIT_LIST_HEAD(&maple_waitq);
844*4882a593Smuzhiyun INIT_LIST_HEAD(&maple_sentq);
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun /* setup maple ports */
847*4882a593Smuzhiyun for (i = 0; i < MAPLE_PORTS; i++) {
848*4882a593Smuzhiyun checked[i] = false;
849*4882a593Smuzhiyun empty[i] = false;
850*4882a593Smuzhiyun mdev[i] = maple_alloc_dev(i, 0);
851*4882a593Smuzhiyun if (!mdev[i]) {
852*4882a593Smuzhiyun while (i-- > 0)
853*4882a593Smuzhiyun maple_free_dev(mdev[i]);
854*4882a593Smuzhiyun retval = -ENOMEM;
855*4882a593Smuzhiyun goto cleanup_cache;
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun baseunits[i] = mdev[i];
858*4882a593Smuzhiyun atomic_set(&mdev[i]->busy, 1);
859*4882a593Smuzhiyun maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL);
860*4882a593Smuzhiyun subdevice_map[i] = 0;
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun maple_pnp_time = jiffies + HZ;
864*4882a593Smuzhiyun /* prepare initial queue */
865*4882a593Smuzhiyun maple_send();
866*4882a593Smuzhiyun dev_info(&maple_bus, "bus core now registered\n");
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun return 0;
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun cleanup_cache:
871*4882a593Smuzhiyun kmem_cache_destroy(maple_queue_cache);
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun cleanup_bothirqs:
874*4882a593Smuzhiyun free_irq(HW_EVENT_VSYNC, 0);
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun cleanup_irq:
877*4882a593Smuzhiyun free_irq(HW_EVENT_MAPLE_DMA, 0);
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun cleanup_dma:
880*4882a593Smuzhiyun free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES);
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun cleanup_basic:
883*4882a593Smuzhiyun driver_unregister(&maple_unsupported_device.drv);
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun cleanup_bus:
886*4882a593Smuzhiyun bus_unregister(&maple_bus_type);
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun cleanup_device:
889*4882a593Smuzhiyun device_unregister(&maple_bus);
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun cleanup:
892*4882a593Smuzhiyun printk(KERN_ERR "Maple bus registration failed\n");
893*4882a593Smuzhiyun return retval;
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun /* Push init to later to ensure hardware gets detected */
896*4882a593Smuzhiyun fs_initcall(maple_bus_init);
897