xref: /OK3568_Linux_fs/kernel/drivers/macintosh/via-cuda.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Device driver for the Cuda and Egret system controllers found on PowerMacs
4*4882a593Smuzhiyun  * and 68k Macs.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * The Cuda or Egret is a 6805 microcontroller interfaced to the 6522 VIA.
7*4882a593Smuzhiyun  * This MCU controls system power, Parameter RAM, Real Time Clock and the
8*4882a593Smuzhiyun  * Apple Desktop Bus (ADB) that connects to the keyboard and mouse.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * Copyright (C) 1996 Paul Mackerras.
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun #include <stdarg.h>
13*4882a593Smuzhiyun #include <linux/types.h>
14*4882a593Smuzhiyun #include <linux/errno.h>
15*4882a593Smuzhiyun #include <linux/kernel.h>
16*4882a593Smuzhiyun #include <linux/delay.h>
17*4882a593Smuzhiyun #include <linux/adb.h>
18*4882a593Smuzhiyun #include <linux/cuda.h>
19*4882a593Smuzhiyun #include <linux/spinlock.h>
20*4882a593Smuzhiyun #include <linux/interrupt.h>
21*4882a593Smuzhiyun #ifdef CONFIG_PPC
22*4882a593Smuzhiyun #include <asm/prom.h>
23*4882a593Smuzhiyun #include <asm/machdep.h>
24*4882a593Smuzhiyun #else
25*4882a593Smuzhiyun #include <asm/macintosh.h>
26*4882a593Smuzhiyun #include <asm/macints.h>
27*4882a593Smuzhiyun #include <asm/mac_via.h>
28*4882a593Smuzhiyun #endif
29*4882a593Smuzhiyun #include <asm/io.h>
30*4882a593Smuzhiyun #include <linux/init.h>
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun static volatile unsigned char __iomem *via;
33*4882a593Smuzhiyun static DEFINE_SPINLOCK(cuda_lock);
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun /* VIA registers - spaced 0x200 bytes apart */
36*4882a593Smuzhiyun #define RS		0x200		/* skip between registers */
37*4882a593Smuzhiyun #define B		0		/* B-side data */
38*4882a593Smuzhiyun #define A		RS		/* A-side data */
39*4882a593Smuzhiyun #define DIRB		(2*RS)		/* B-side direction (1=output) */
40*4882a593Smuzhiyun #define DIRA		(3*RS)		/* A-side direction (1=output) */
41*4882a593Smuzhiyun #define T1CL		(4*RS)		/* Timer 1 ctr/latch (low 8 bits) */
42*4882a593Smuzhiyun #define T1CH		(5*RS)		/* Timer 1 counter (high 8 bits) */
43*4882a593Smuzhiyun #define T1LL		(6*RS)		/* Timer 1 latch (low 8 bits) */
44*4882a593Smuzhiyun #define T1LH		(7*RS)		/* Timer 1 latch (high 8 bits) */
45*4882a593Smuzhiyun #define T2CL		(8*RS)		/* Timer 2 ctr/latch (low 8 bits) */
46*4882a593Smuzhiyun #define T2CH		(9*RS)		/* Timer 2 counter (high 8 bits) */
47*4882a593Smuzhiyun #define SR		(10*RS)		/* Shift register */
48*4882a593Smuzhiyun #define ACR		(11*RS)		/* Auxiliary control register */
49*4882a593Smuzhiyun #define PCR		(12*RS)		/* Peripheral control register */
50*4882a593Smuzhiyun #define IFR		(13*RS)		/* Interrupt flag register */
51*4882a593Smuzhiyun #define IER		(14*RS)		/* Interrupt enable register */
52*4882a593Smuzhiyun #define ANH		(15*RS)		/* A-side data, no handshake */
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun  * When the Cuda design replaced the Egret, some signal names and
56*4882a593Smuzhiyun  * logic sense changed. They all serve the same purposes, however.
57*4882a593Smuzhiyun  *
58*4882a593Smuzhiyun  *   VIA pin       |  Egret pin
59*4882a593Smuzhiyun  * ----------------+------------------------------------------
60*4882a593Smuzhiyun  *   PB3 (input)   |  Transceiver session   (active low)
61*4882a593Smuzhiyun  *   PB4 (output)  |  VIA full              (active high)
62*4882a593Smuzhiyun  *   PB5 (output)  |  System session        (active high)
63*4882a593Smuzhiyun  *
64*4882a593Smuzhiyun  *   VIA pin       |  Cuda pin
65*4882a593Smuzhiyun  * ----------------+------------------------------------------
66*4882a593Smuzhiyun  *   PB3 (input)   |  Transfer request      (active low)
67*4882a593Smuzhiyun  *   PB4 (output)  |  Byte acknowledge      (active low)
68*4882a593Smuzhiyun  *   PB5 (output)  |  Transfer in progress  (active low)
69*4882a593Smuzhiyun  */
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun /* Bits in Port B data register */
72*4882a593Smuzhiyun #define TREQ		0x08		/* Transfer request */
73*4882a593Smuzhiyun #define TACK		0x10		/* Transfer acknowledge */
74*4882a593Smuzhiyun #define TIP		0x20		/* Transfer in progress */
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun /* Bits in ACR */
77*4882a593Smuzhiyun #define SR_CTRL		0x1c		/* Shift register control bits */
78*4882a593Smuzhiyun #define SR_EXT		0x0c		/* Shift on external clock */
79*4882a593Smuzhiyun #define SR_OUT		0x10		/* Shift out if 1 */
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun /* Bits in IFR and IER */
82*4882a593Smuzhiyun #define IER_SET		0x80		/* set bits in IER */
83*4882a593Smuzhiyun #define IER_CLR		0		/* clear bits in IER */
84*4882a593Smuzhiyun #define SR_INT		0x04		/* Shift register full/empty */
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun /* Duration of byte acknowledgement pulse (us) */
87*4882a593Smuzhiyun #define EGRET_TACK_ASSERTED_DELAY	300
88*4882a593Smuzhiyun #define EGRET_TACK_NEGATED_DELAY	400
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /* Interval from interrupt to start of session (us) */
91*4882a593Smuzhiyun #define EGRET_SESSION_DELAY		450
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun #ifdef CONFIG_PPC
94*4882a593Smuzhiyun #define mcu_is_egret	false
95*4882a593Smuzhiyun #else
96*4882a593Smuzhiyun static bool mcu_is_egret;
97*4882a593Smuzhiyun #endif
98*4882a593Smuzhiyun 
TREQ_asserted(u8 portb)99*4882a593Smuzhiyun static inline bool TREQ_asserted(u8 portb)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	return !(portb & TREQ);
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
assert_TIP(void)104*4882a593Smuzhiyun static inline void assert_TIP(void)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	if (mcu_is_egret) {
107*4882a593Smuzhiyun 		udelay(EGRET_SESSION_DELAY);
108*4882a593Smuzhiyun 		out_8(&via[B], in_8(&via[B]) | TIP);
109*4882a593Smuzhiyun 	} else
110*4882a593Smuzhiyun 		out_8(&via[B], in_8(&via[B]) & ~TIP);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun 
assert_TIP_and_TACK(void)113*4882a593Smuzhiyun static inline void assert_TIP_and_TACK(void)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	if (mcu_is_egret) {
116*4882a593Smuzhiyun 		udelay(EGRET_SESSION_DELAY);
117*4882a593Smuzhiyun 		out_8(&via[B], in_8(&via[B]) | TIP | TACK);
118*4882a593Smuzhiyun 	} else
119*4882a593Smuzhiyun 		out_8(&via[B], in_8(&via[B]) & ~(TIP | TACK));
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
assert_TACK(void)122*4882a593Smuzhiyun static inline void assert_TACK(void)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun 	if (mcu_is_egret) {
125*4882a593Smuzhiyun 		udelay(EGRET_TACK_NEGATED_DELAY);
126*4882a593Smuzhiyun 		out_8(&via[B], in_8(&via[B]) | TACK);
127*4882a593Smuzhiyun 	} else
128*4882a593Smuzhiyun 		out_8(&via[B], in_8(&via[B]) & ~TACK);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
toggle_TACK(void)131*4882a593Smuzhiyun static inline void toggle_TACK(void)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	out_8(&via[B], in_8(&via[B]) ^ TACK);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun 
negate_TACK(void)136*4882a593Smuzhiyun static inline void negate_TACK(void)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun 	if (mcu_is_egret) {
139*4882a593Smuzhiyun 		udelay(EGRET_TACK_ASSERTED_DELAY);
140*4882a593Smuzhiyun 		out_8(&via[B], in_8(&via[B]) & ~TACK);
141*4882a593Smuzhiyun 	} else
142*4882a593Smuzhiyun 		out_8(&via[B], in_8(&via[B]) | TACK);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun 
negate_TIP_and_TACK(void)145*4882a593Smuzhiyun static inline void negate_TIP_and_TACK(void)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	if (mcu_is_egret) {
148*4882a593Smuzhiyun 		udelay(EGRET_TACK_ASSERTED_DELAY);
149*4882a593Smuzhiyun 		out_8(&via[B], in_8(&via[B]) & ~(TIP | TACK));
150*4882a593Smuzhiyun 	} else
151*4882a593Smuzhiyun 		out_8(&via[B], in_8(&via[B]) | TIP | TACK);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun static enum cuda_state {
155*4882a593Smuzhiyun     idle,
156*4882a593Smuzhiyun     sent_first_byte,
157*4882a593Smuzhiyun     sending,
158*4882a593Smuzhiyun     reading,
159*4882a593Smuzhiyun     read_done,
160*4882a593Smuzhiyun     awaiting_reply
161*4882a593Smuzhiyun } cuda_state;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun static struct adb_request *current_req;
164*4882a593Smuzhiyun static struct adb_request *last_req;
165*4882a593Smuzhiyun static unsigned char cuda_rbuf[16];
166*4882a593Smuzhiyun static unsigned char *reply_ptr;
167*4882a593Smuzhiyun static int reading_reply;
168*4882a593Smuzhiyun static int data_index;
169*4882a593Smuzhiyun static int cuda_irq;
170*4882a593Smuzhiyun #ifdef CONFIG_PPC
171*4882a593Smuzhiyun static struct device_node *vias;
172*4882a593Smuzhiyun #endif
173*4882a593Smuzhiyun static int cuda_fully_inited;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun #ifdef CONFIG_ADB
176*4882a593Smuzhiyun static int cuda_probe(void);
177*4882a593Smuzhiyun static int cuda_send_request(struct adb_request *req, int sync);
178*4882a593Smuzhiyun static int cuda_adb_autopoll(int devs);
179*4882a593Smuzhiyun static int cuda_reset_adb_bus(void);
180*4882a593Smuzhiyun #endif /* CONFIG_ADB */
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun static int cuda_init_via(void);
183*4882a593Smuzhiyun static void cuda_start(void);
184*4882a593Smuzhiyun static irqreturn_t cuda_interrupt(int irq, void *arg);
185*4882a593Smuzhiyun static void cuda_input(unsigned char *buf, int nb);
186*4882a593Smuzhiyun void cuda_poll(void);
187*4882a593Smuzhiyun static int cuda_write(struct adb_request *req);
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun int cuda_request(struct adb_request *req,
190*4882a593Smuzhiyun 		 void (*done)(struct adb_request *), int nbytes, ...);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun #ifdef CONFIG_ADB
193*4882a593Smuzhiyun struct adb_driver via_cuda_driver = {
194*4882a593Smuzhiyun 	.name         = "CUDA",
195*4882a593Smuzhiyun 	.probe        = cuda_probe,
196*4882a593Smuzhiyun 	.send_request = cuda_send_request,
197*4882a593Smuzhiyun 	.autopoll     = cuda_adb_autopoll,
198*4882a593Smuzhiyun 	.poll         = cuda_poll,
199*4882a593Smuzhiyun 	.reset_bus    = cuda_reset_adb_bus,
200*4882a593Smuzhiyun };
201*4882a593Smuzhiyun #endif /* CONFIG_ADB */
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun #ifdef CONFIG_MAC
find_via_cuda(void)204*4882a593Smuzhiyun int __init find_via_cuda(void)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun     struct adb_request req;
207*4882a593Smuzhiyun     int err;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun     if (macintosh_config->adb_type != MAC_ADB_CUDA &&
210*4882a593Smuzhiyun         macintosh_config->adb_type != MAC_ADB_EGRET)
211*4882a593Smuzhiyun 	return 0;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun     via = via1;
214*4882a593Smuzhiyun     cuda_state = idle;
215*4882a593Smuzhiyun     mcu_is_egret = macintosh_config->adb_type == MAC_ADB_EGRET;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun     err = cuda_init_via();
218*4882a593Smuzhiyun     if (err) {
219*4882a593Smuzhiyun 	printk(KERN_ERR "cuda_init_via() failed\n");
220*4882a593Smuzhiyun 	via = NULL;
221*4882a593Smuzhiyun 	return 0;
222*4882a593Smuzhiyun     }
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun     /* enable autopoll */
225*4882a593Smuzhiyun     cuda_request(&req, NULL, 3, CUDA_PACKET, CUDA_AUTOPOLL, 1);
226*4882a593Smuzhiyun     while (!req.complete)
227*4882a593Smuzhiyun 	cuda_poll();
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun     return 1;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun #else
find_via_cuda(void)232*4882a593Smuzhiyun int __init find_via_cuda(void)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun     struct adb_request req;
235*4882a593Smuzhiyun     phys_addr_t taddr;
236*4882a593Smuzhiyun     const u32 *reg;
237*4882a593Smuzhiyun     int err;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun     if (vias != 0)
240*4882a593Smuzhiyun 	return 1;
241*4882a593Smuzhiyun     vias = of_find_node_by_name(NULL, "via-cuda");
242*4882a593Smuzhiyun     if (vias == 0)
243*4882a593Smuzhiyun 	return 0;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun     reg = of_get_property(vias, "reg", NULL);
246*4882a593Smuzhiyun     if (reg == NULL) {
247*4882a593Smuzhiyun 	    printk(KERN_ERR "via-cuda: No \"reg\" property !\n");
248*4882a593Smuzhiyun 	    goto fail;
249*4882a593Smuzhiyun     }
250*4882a593Smuzhiyun     taddr = of_translate_address(vias, reg);
251*4882a593Smuzhiyun     if (taddr == 0) {
252*4882a593Smuzhiyun 	    printk(KERN_ERR "via-cuda: Can't translate address !\n");
253*4882a593Smuzhiyun 	    goto fail;
254*4882a593Smuzhiyun     }
255*4882a593Smuzhiyun     via = ioremap(taddr, 0x2000);
256*4882a593Smuzhiyun     if (via == NULL) {
257*4882a593Smuzhiyun 	    printk(KERN_ERR "via-cuda: Can't map address !\n");
258*4882a593Smuzhiyun 	    goto fail;
259*4882a593Smuzhiyun     }
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun     cuda_state = idle;
262*4882a593Smuzhiyun     sys_ctrler = SYS_CTRLER_CUDA;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun     err = cuda_init_via();
265*4882a593Smuzhiyun     if (err) {
266*4882a593Smuzhiyun 	printk(KERN_ERR "cuda_init_via() failed\n");
267*4882a593Smuzhiyun 	via = NULL;
268*4882a593Smuzhiyun 	return 0;
269*4882a593Smuzhiyun     }
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun     /* Clear and enable interrupts, but only on PPC. On 68K it's done  */
272*4882a593Smuzhiyun     /* for us by the main VIA driver in arch/m68k/mac/via.c        */
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun     out_8(&via[IFR], 0x7f);	/* clear interrupts by writing 1s */
275*4882a593Smuzhiyun     out_8(&via[IER], IER_SET|SR_INT); /* enable interrupt from SR */
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun     /* enable autopoll */
278*4882a593Smuzhiyun     cuda_request(&req, NULL, 3, CUDA_PACKET, CUDA_AUTOPOLL, 1);
279*4882a593Smuzhiyun     while (!req.complete)
280*4882a593Smuzhiyun 	cuda_poll();
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun     return 1;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun  fail:
285*4882a593Smuzhiyun     of_node_put(vias);
286*4882a593Smuzhiyun     vias = NULL;
287*4882a593Smuzhiyun     return 0;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun #endif /* !defined CONFIG_MAC */
290*4882a593Smuzhiyun 
via_cuda_start(void)291*4882a593Smuzhiyun static int __init via_cuda_start(void)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun     if (via == NULL)
294*4882a593Smuzhiyun 	return -ENODEV;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun #ifdef CONFIG_MAC
297*4882a593Smuzhiyun     cuda_irq = IRQ_MAC_ADB;
298*4882a593Smuzhiyun #else
299*4882a593Smuzhiyun     cuda_irq = irq_of_parse_and_map(vias, 0);
300*4882a593Smuzhiyun     if (!cuda_irq) {
301*4882a593Smuzhiyun 	printk(KERN_ERR "via-cuda: can't map interrupts for %pOF\n",
302*4882a593Smuzhiyun 	       vias);
303*4882a593Smuzhiyun 	return -ENODEV;
304*4882a593Smuzhiyun     }
305*4882a593Smuzhiyun #endif
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun     if (request_irq(cuda_irq, cuda_interrupt, 0, "ADB", cuda_interrupt)) {
308*4882a593Smuzhiyun 	printk(KERN_ERR "via-cuda: can't request irq %d\n", cuda_irq);
309*4882a593Smuzhiyun 	return -EAGAIN;
310*4882a593Smuzhiyun     }
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun     pr_info("Macintosh Cuda and Egret driver.\n");
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun     cuda_fully_inited = 1;
315*4882a593Smuzhiyun     return 0;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun device_initcall(via_cuda_start);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun #ifdef CONFIG_ADB
321*4882a593Smuzhiyun static int
cuda_probe(void)322*4882a593Smuzhiyun cuda_probe(void)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun #ifdef CONFIG_PPC
325*4882a593Smuzhiyun     if (sys_ctrler != SYS_CTRLER_CUDA)
326*4882a593Smuzhiyun 	return -ENODEV;
327*4882a593Smuzhiyun #else
328*4882a593Smuzhiyun     if (macintosh_config->adb_type != MAC_ADB_CUDA &&
329*4882a593Smuzhiyun         macintosh_config->adb_type != MAC_ADB_EGRET)
330*4882a593Smuzhiyun 	return -ENODEV;
331*4882a593Smuzhiyun #endif
332*4882a593Smuzhiyun     if (via == NULL)
333*4882a593Smuzhiyun 	return -ENODEV;
334*4882a593Smuzhiyun     return 0;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun #endif /* CONFIG_ADB */
337*4882a593Smuzhiyun 
sync_egret(void)338*4882a593Smuzhiyun static int __init sync_egret(void)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun 	if (TREQ_asserted(in_8(&via[B]))) {
341*4882a593Smuzhiyun 		/* Complete the inbound transfer */
342*4882a593Smuzhiyun 		assert_TIP_and_TACK();
343*4882a593Smuzhiyun 		while (1) {
344*4882a593Smuzhiyun 			negate_TACK();
345*4882a593Smuzhiyun 			mdelay(1);
346*4882a593Smuzhiyun 			(void)in_8(&via[SR]);
347*4882a593Smuzhiyun 			assert_TACK();
348*4882a593Smuzhiyun 			if (!TREQ_asserted(in_8(&via[B])))
349*4882a593Smuzhiyun 				break;
350*4882a593Smuzhiyun 		}
351*4882a593Smuzhiyun 		negate_TIP_and_TACK();
352*4882a593Smuzhiyun 	} else if (in_8(&via[B]) & TIP) {
353*4882a593Smuzhiyun 		/* Terminate the outbound transfer */
354*4882a593Smuzhiyun 		negate_TACK();
355*4882a593Smuzhiyun 		assert_TACK();
356*4882a593Smuzhiyun 		mdelay(1);
357*4882a593Smuzhiyun 		negate_TIP_and_TACK();
358*4882a593Smuzhiyun 	}
359*4882a593Smuzhiyun 	/* Clear shift register interrupt */
360*4882a593Smuzhiyun 	if (in_8(&via[IFR]) & SR_INT)
361*4882a593Smuzhiyun 		(void)in_8(&via[SR]);
362*4882a593Smuzhiyun 	return 0;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun #define WAIT_FOR(cond, what)					\
366*4882a593Smuzhiyun     do {                                                        \
367*4882a593Smuzhiyun     	int x;							\
368*4882a593Smuzhiyun 	for (x = 1000; !(cond); --x) {				\
369*4882a593Smuzhiyun 	    if (x == 0) {					\
370*4882a593Smuzhiyun 		pr_err("Timeout waiting for " what "\n");	\
371*4882a593Smuzhiyun 		return -ENXIO;					\
372*4882a593Smuzhiyun 	    }							\
373*4882a593Smuzhiyun 	    udelay(100);					\
374*4882a593Smuzhiyun 	}							\
375*4882a593Smuzhiyun     } while (0)
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun static int
cuda_init_via(void)378*4882a593Smuzhiyun __init cuda_init_via(void)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun #ifdef CONFIG_PPC
381*4882a593Smuzhiyun     out_8(&via[IER], 0x7f);					/* disable interrupts from VIA */
382*4882a593Smuzhiyun     (void)in_8(&via[IER]);
383*4882a593Smuzhiyun #else
384*4882a593Smuzhiyun     out_8(&via[IER], SR_INT);					/* disable SR interrupt from VIA */
385*4882a593Smuzhiyun #endif
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun     out_8(&via[DIRB], (in_8(&via[DIRB]) | TACK | TIP) & ~TREQ);	/* TACK & TIP out */
388*4882a593Smuzhiyun     out_8(&via[ACR], (in_8(&via[ACR]) & ~SR_CTRL) | SR_EXT);	/* SR data in */
389*4882a593Smuzhiyun     (void)in_8(&via[SR]);					/* clear any left-over data */
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun     if (mcu_is_egret)
392*4882a593Smuzhiyun 	return sync_egret();
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun     negate_TIP_and_TACK();
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun     /* delay 4ms and then clear any pending interrupt */
397*4882a593Smuzhiyun     mdelay(4);
398*4882a593Smuzhiyun     (void)in_8(&via[SR]);
399*4882a593Smuzhiyun     out_8(&via[IFR], SR_INT);
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun     /* sync with the CUDA - assert TACK without TIP */
402*4882a593Smuzhiyun     assert_TACK();
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun     /* wait for the CUDA to assert TREQ in response */
405*4882a593Smuzhiyun     WAIT_FOR(TREQ_asserted(in_8(&via[B])), "CUDA response to sync");
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun     /* wait for the interrupt and then clear it */
408*4882a593Smuzhiyun     WAIT_FOR(in_8(&via[IFR]) & SR_INT, "CUDA response to sync (2)");
409*4882a593Smuzhiyun     (void)in_8(&via[SR]);
410*4882a593Smuzhiyun     out_8(&via[IFR], SR_INT);
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun     /* finish the sync by negating TACK */
413*4882a593Smuzhiyun     negate_TACK();
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun     /* wait for the CUDA to negate TREQ and the corresponding interrupt */
416*4882a593Smuzhiyun     WAIT_FOR(!TREQ_asserted(in_8(&via[B])), "CUDA response to sync (3)");
417*4882a593Smuzhiyun     WAIT_FOR(in_8(&via[IFR]) & SR_INT, "CUDA response to sync (4)");
418*4882a593Smuzhiyun     (void)in_8(&via[SR]);
419*4882a593Smuzhiyun     out_8(&via[IFR], SR_INT);
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun     return 0;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun #ifdef CONFIG_ADB
425*4882a593Smuzhiyun /* Send an ADB command */
426*4882a593Smuzhiyun static int
cuda_send_request(struct adb_request * req,int sync)427*4882a593Smuzhiyun cuda_send_request(struct adb_request *req, int sync)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun     int i;
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun     if ((via == NULL) || !cuda_fully_inited) {
432*4882a593Smuzhiyun 	req->complete = 1;
433*4882a593Smuzhiyun 	return -ENXIO;
434*4882a593Smuzhiyun     }
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun     req->reply_expected = 1;
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun     i = cuda_write(req);
439*4882a593Smuzhiyun     if (i)
440*4882a593Smuzhiyun 	return i;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun     if (sync) {
443*4882a593Smuzhiyun 	while (!req->complete)
444*4882a593Smuzhiyun 	    cuda_poll();
445*4882a593Smuzhiyun     }
446*4882a593Smuzhiyun     return 0;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun /* Enable/disable autopolling */
451*4882a593Smuzhiyun static int
cuda_adb_autopoll(int devs)452*4882a593Smuzhiyun cuda_adb_autopoll(int devs)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun     struct adb_request req;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun     if ((via == NULL) || !cuda_fully_inited)
457*4882a593Smuzhiyun 	return -ENXIO;
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun     cuda_request(&req, NULL, 3, CUDA_PACKET, CUDA_AUTOPOLL, (devs? 1: 0));
460*4882a593Smuzhiyun     while (!req.complete)
461*4882a593Smuzhiyun 	cuda_poll();
462*4882a593Smuzhiyun     return 0;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun /* Reset adb bus - how do we do this?? */
466*4882a593Smuzhiyun static int
cuda_reset_adb_bus(void)467*4882a593Smuzhiyun cuda_reset_adb_bus(void)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun     struct adb_request req;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun     if ((via == NULL) || !cuda_fully_inited)
472*4882a593Smuzhiyun 	return -ENXIO;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun     cuda_request(&req, NULL, 2, ADB_PACKET, 0);		/* maybe? */
475*4882a593Smuzhiyun     while (!req.complete)
476*4882a593Smuzhiyun 	cuda_poll();
477*4882a593Smuzhiyun     return 0;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun #endif /* CONFIG_ADB */
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun /* Construct and send a cuda request */
482*4882a593Smuzhiyun int
cuda_request(struct adb_request * req,void (* done)(struct adb_request *),int nbytes,...)483*4882a593Smuzhiyun cuda_request(struct adb_request *req, void (*done)(struct adb_request *),
484*4882a593Smuzhiyun 	     int nbytes, ...)
485*4882a593Smuzhiyun {
486*4882a593Smuzhiyun     va_list list;
487*4882a593Smuzhiyun     int i;
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun     if (via == NULL) {
490*4882a593Smuzhiyun 	req->complete = 1;
491*4882a593Smuzhiyun 	return -ENXIO;
492*4882a593Smuzhiyun     }
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun     req->nbytes = nbytes;
495*4882a593Smuzhiyun     req->done = done;
496*4882a593Smuzhiyun     va_start(list, nbytes);
497*4882a593Smuzhiyun     for (i = 0; i < nbytes; ++i)
498*4882a593Smuzhiyun 	req->data[i] = va_arg(list, int);
499*4882a593Smuzhiyun     va_end(list);
500*4882a593Smuzhiyun     req->reply_expected = 1;
501*4882a593Smuzhiyun     return cuda_write(req);
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun EXPORT_SYMBOL(cuda_request);
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun static int
cuda_write(struct adb_request * req)506*4882a593Smuzhiyun cuda_write(struct adb_request *req)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun     unsigned long flags;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun     if (req->nbytes < 2 || req->data[0] > CUDA_PACKET) {
511*4882a593Smuzhiyun 	req->complete = 1;
512*4882a593Smuzhiyun 	return -EINVAL;
513*4882a593Smuzhiyun     }
514*4882a593Smuzhiyun     req->next = NULL;
515*4882a593Smuzhiyun     req->sent = 0;
516*4882a593Smuzhiyun     req->complete = 0;
517*4882a593Smuzhiyun     req->reply_len = 0;
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun     spin_lock_irqsave(&cuda_lock, flags);
520*4882a593Smuzhiyun     if (current_req != 0) {
521*4882a593Smuzhiyun 	last_req->next = req;
522*4882a593Smuzhiyun 	last_req = req;
523*4882a593Smuzhiyun     } else {
524*4882a593Smuzhiyun 	current_req = req;
525*4882a593Smuzhiyun 	last_req = req;
526*4882a593Smuzhiyun 	if (cuda_state == idle)
527*4882a593Smuzhiyun 	    cuda_start();
528*4882a593Smuzhiyun     }
529*4882a593Smuzhiyun     spin_unlock_irqrestore(&cuda_lock, flags);
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun     return 0;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun static void
cuda_start(void)535*4882a593Smuzhiyun cuda_start(void)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun     /* assert cuda_state == idle */
538*4882a593Smuzhiyun     if (current_req == NULL)
539*4882a593Smuzhiyun 	return;
540*4882a593Smuzhiyun     data_index = 0;
541*4882a593Smuzhiyun     if (TREQ_asserted(in_8(&via[B])))
542*4882a593Smuzhiyun 	return;			/* a byte is coming in from the CUDA */
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun     /* set the shift register to shift out and send a byte */
545*4882a593Smuzhiyun     out_8(&via[ACR], in_8(&via[ACR]) | SR_OUT);
546*4882a593Smuzhiyun     out_8(&via[SR], current_req->data[data_index++]);
547*4882a593Smuzhiyun     if (mcu_is_egret)
548*4882a593Smuzhiyun 	assert_TIP_and_TACK();
549*4882a593Smuzhiyun     else
550*4882a593Smuzhiyun 	assert_TIP();
551*4882a593Smuzhiyun     cuda_state = sent_first_byte;
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun void
cuda_poll(void)555*4882a593Smuzhiyun cuda_poll(void)
556*4882a593Smuzhiyun {
557*4882a593Smuzhiyun 	cuda_interrupt(0, NULL);
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun EXPORT_SYMBOL(cuda_poll);
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun #define ARRAY_FULL(a, p)	((p) - (a) == ARRAY_SIZE(a))
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun static irqreturn_t
cuda_interrupt(int irq,void * arg)564*4882a593Smuzhiyun cuda_interrupt(int irq, void *arg)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun     unsigned long flags;
567*4882a593Smuzhiyun     u8 status;
568*4882a593Smuzhiyun     struct adb_request *req = NULL;
569*4882a593Smuzhiyun     unsigned char ibuf[16];
570*4882a593Smuzhiyun     int ibuf_len = 0;
571*4882a593Smuzhiyun     int complete = 0;
572*4882a593Smuzhiyun     bool full;
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun     spin_lock_irqsave(&cuda_lock, flags);
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun     /* On powermacs, this handler is registered for the VIA IRQ. But they use
577*4882a593Smuzhiyun      * just the shift register IRQ -- other VIA interrupt sources are disabled.
578*4882a593Smuzhiyun      * On m68k macs, the VIA IRQ sources are dispatched individually. Unless
579*4882a593Smuzhiyun      * we are polling, the shift register IRQ flag has already been cleared.
580*4882a593Smuzhiyun      */
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun #ifdef CONFIG_MAC
583*4882a593Smuzhiyun     if (!arg)
584*4882a593Smuzhiyun #endif
585*4882a593Smuzhiyun     {
586*4882a593Smuzhiyun         if ((in_8(&via[IFR]) & SR_INT) == 0) {
587*4882a593Smuzhiyun             spin_unlock_irqrestore(&cuda_lock, flags);
588*4882a593Smuzhiyun             return IRQ_NONE;
589*4882a593Smuzhiyun         } else {
590*4882a593Smuzhiyun             out_8(&via[IFR], SR_INT);
591*4882a593Smuzhiyun         }
592*4882a593Smuzhiyun     }
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun     status = in_8(&via[B]) & (TIP | TACK | TREQ);
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun     switch (cuda_state) {
597*4882a593Smuzhiyun     case idle:
598*4882a593Smuzhiyun 	/* System controller has unsolicited data for us */
599*4882a593Smuzhiyun 	(void)in_8(&via[SR]);
600*4882a593Smuzhiyun idle_state:
601*4882a593Smuzhiyun 	assert_TIP();
602*4882a593Smuzhiyun 	cuda_state = reading;
603*4882a593Smuzhiyun 	reply_ptr = cuda_rbuf;
604*4882a593Smuzhiyun 	reading_reply = 0;
605*4882a593Smuzhiyun 	break;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun     case awaiting_reply:
608*4882a593Smuzhiyun 	/* System controller has reply data for us */
609*4882a593Smuzhiyun 	(void)in_8(&via[SR]);
610*4882a593Smuzhiyun 	assert_TIP();
611*4882a593Smuzhiyun 	cuda_state = reading;
612*4882a593Smuzhiyun 	reply_ptr = current_req->reply;
613*4882a593Smuzhiyun 	reading_reply = 1;
614*4882a593Smuzhiyun 	break;
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun     case sent_first_byte:
617*4882a593Smuzhiyun 	if (TREQ_asserted(status)) {
618*4882a593Smuzhiyun 	    /* collision */
619*4882a593Smuzhiyun 	    out_8(&via[ACR], in_8(&via[ACR]) & ~SR_OUT);
620*4882a593Smuzhiyun 	    (void)in_8(&via[SR]);
621*4882a593Smuzhiyun 	    negate_TIP_and_TACK();
622*4882a593Smuzhiyun 	    cuda_state = idle;
623*4882a593Smuzhiyun 	    /* Egret does not raise an "aborted" interrupt */
624*4882a593Smuzhiyun 	    if (mcu_is_egret)
625*4882a593Smuzhiyun 		goto idle_state;
626*4882a593Smuzhiyun 	} else {
627*4882a593Smuzhiyun 	    out_8(&via[SR], current_req->data[data_index++]);
628*4882a593Smuzhiyun 	    toggle_TACK();
629*4882a593Smuzhiyun 	    if (mcu_is_egret)
630*4882a593Smuzhiyun 		assert_TACK();
631*4882a593Smuzhiyun 	    cuda_state = sending;
632*4882a593Smuzhiyun 	}
633*4882a593Smuzhiyun 	break;
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun     case sending:
636*4882a593Smuzhiyun 	req = current_req;
637*4882a593Smuzhiyun 	if (data_index >= req->nbytes) {
638*4882a593Smuzhiyun 	    out_8(&via[ACR], in_8(&via[ACR]) & ~SR_OUT);
639*4882a593Smuzhiyun 	    (void)in_8(&via[SR]);
640*4882a593Smuzhiyun 	    negate_TIP_and_TACK();
641*4882a593Smuzhiyun 	    req->sent = 1;
642*4882a593Smuzhiyun 	    if (req->reply_expected) {
643*4882a593Smuzhiyun 		cuda_state = awaiting_reply;
644*4882a593Smuzhiyun 	    } else {
645*4882a593Smuzhiyun 		current_req = req->next;
646*4882a593Smuzhiyun 		complete = 1;
647*4882a593Smuzhiyun 		/* not sure about this */
648*4882a593Smuzhiyun 		cuda_state = idle;
649*4882a593Smuzhiyun 		cuda_start();
650*4882a593Smuzhiyun 	    }
651*4882a593Smuzhiyun 	} else {
652*4882a593Smuzhiyun 	    out_8(&via[SR], req->data[data_index++]);
653*4882a593Smuzhiyun 	    toggle_TACK();
654*4882a593Smuzhiyun 	    if (mcu_is_egret)
655*4882a593Smuzhiyun 		assert_TACK();
656*4882a593Smuzhiyun 	}
657*4882a593Smuzhiyun 	break;
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun     case reading:
660*4882a593Smuzhiyun 	full = reading_reply ? ARRAY_FULL(current_req->reply, reply_ptr)
661*4882a593Smuzhiyun 	                     : ARRAY_FULL(cuda_rbuf, reply_ptr);
662*4882a593Smuzhiyun 	if (full)
663*4882a593Smuzhiyun 	    (void)in_8(&via[SR]);
664*4882a593Smuzhiyun 	else
665*4882a593Smuzhiyun 	    *reply_ptr++ = in_8(&via[SR]);
666*4882a593Smuzhiyun 	if (!TREQ_asserted(status) || full) {
667*4882a593Smuzhiyun 	    if (mcu_is_egret)
668*4882a593Smuzhiyun 		assert_TACK();
669*4882a593Smuzhiyun 	    /* that's all folks */
670*4882a593Smuzhiyun 	    negate_TIP_and_TACK();
671*4882a593Smuzhiyun 	    cuda_state = read_done;
672*4882a593Smuzhiyun 	    /* Egret does not raise a "read done" interrupt */
673*4882a593Smuzhiyun 	    if (mcu_is_egret)
674*4882a593Smuzhiyun 		goto read_done_state;
675*4882a593Smuzhiyun 	} else {
676*4882a593Smuzhiyun 	    toggle_TACK();
677*4882a593Smuzhiyun 	    if (mcu_is_egret)
678*4882a593Smuzhiyun 		negate_TACK();
679*4882a593Smuzhiyun 	}
680*4882a593Smuzhiyun 	break;
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun     case read_done:
683*4882a593Smuzhiyun 	(void)in_8(&via[SR]);
684*4882a593Smuzhiyun read_done_state:
685*4882a593Smuzhiyun 	if (reading_reply) {
686*4882a593Smuzhiyun 	    req = current_req;
687*4882a593Smuzhiyun 	    req->reply_len = reply_ptr - req->reply;
688*4882a593Smuzhiyun 	    if (req->data[0] == ADB_PACKET) {
689*4882a593Smuzhiyun 		/* Have to adjust the reply from ADB commands */
690*4882a593Smuzhiyun 		if (req->reply_len <= 2 || (req->reply[1] & 2) != 0) {
691*4882a593Smuzhiyun 		    /* the 0x2 bit indicates no response */
692*4882a593Smuzhiyun 		    req->reply_len = 0;
693*4882a593Smuzhiyun 		} else {
694*4882a593Smuzhiyun 		    /* leave just the command and result bytes in the reply */
695*4882a593Smuzhiyun 		    req->reply_len -= 2;
696*4882a593Smuzhiyun 		    memmove(req->reply, req->reply + 2, req->reply_len);
697*4882a593Smuzhiyun 		}
698*4882a593Smuzhiyun 	    }
699*4882a593Smuzhiyun 	    current_req = req->next;
700*4882a593Smuzhiyun 	    complete = 1;
701*4882a593Smuzhiyun 	    reading_reply = 0;
702*4882a593Smuzhiyun 	} else {
703*4882a593Smuzhiyun 	    /* This is tricky. We must break the spinlock to call
704*4882a593Smuzhiyun 	     * cuda_input. However, doing so means we might get
705*4882a593Smuzhiyun 	     * re-entered from another CPU getting an interrupt
706*4882a593Smuzhiyun 	     * or calling cuda_poll(). I ended up using the stack
707*4882a593Smuzhiyun 	     * (it's only for 16 bytes) and moving the actual
708*4882a593Smuzhiyun 	     * call to cuda_input to outside of the lock.
709*4882a593Smuzhiyun 	     */
710*4882a593Smuzhiyun 	    ibuf_len = reply_ptr - cuda_rbuf;
711*4882a593Smuzhiyun 	    memcpy(ibuf, cuda_rbuf, ibuf_len);
712*4882a593Smuzhiyun 	}
713*4882a593Smuzhiyun 	reply_ptr = cuda_rbuf;
714*4882a593Smuzhiyun 	cuda_state = idle;
715*4882a593Smuzhiyun 	cuda_start();
716*4882a593Smuzhiyun 	if (cuda_state == idle && TREQ_asserted(in_8(&via[B]))) {
717*4882a593Smuzhiyun 	    assert_TIP();
718*4882a593Smuzhiyun 	    cuda_state = reading;
719*4882a593Smuzhiyun 	}
720*4882a593Smuzhiyun 	break;
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun     default:
723*4882a593Smuzhiyun 	pr_err("cuda_interrupt: unknown cuda_state %d?\n", cuda_state);
724*4882a593Smuzhiyun     }
725*4882a593Smuzhiyun     spin_unlock_irqrestore(&cuda_lock, flags);
726*4882a593Smuzhiyun     if (complete && req) {
727*4882a593Smuzhiyun     	void (*done)(struct adb_request *) = req->done;
728*4882a593Smuzhiyun     	mb();
729*4882a593Smuzhiyun     	req->complete = 1;
730*4882a593Smuzhiyun     	/* Here, we assume that if the request has a done member, the
731*4882a593Smuzhiyun     	 * struct request will survive to setting req->complete to 1
732*4882a593Smuzhiyun     	 */
733*4882a593Smuzhiyun     	if (done)
734*4882a593Smuzhiyun 		(*done)(req);
735*4882a593Smuzhiyun     }
736*4882a593Smuzhiyun     if (ibuf_len)
737*4882a593Smuzhiyun 	cuda_input(ibuf, ibuf_len);
738*4882a593Smuzhiyun     return IRQ_HANDLED;
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun static void
cuda_input(unsigned char * buf,int nb)742*4882a593Smuzhiyun cuda_input(unsigned char *buf, int nb)
743*4882a593Smuzhiyun {
744*4882a593Smuzhiyun     switch (buf[0]) {
745*4882a593Smuzhiyun     case ADB_PACKET:
746*4882a593Smuzhiyun #ifdef CONFIG_XMON
747*4882a593Smuzhiyun 	if (nb == 5 && buf[2] == 0x2c) {
748*4882a593Smuzhiyun 	    extern int xmon_wants_key, xmon_adb_keycode;
749*4882a593Smuzhiyun 	    if (xmon_wants_key) {
750*4882a593Smuzhiyun 		xmon_adb_keycode = buf[3];
751*4882a593Smuzhiyun 		return;
752*4882a593Smuzhiyun 	    }
753*4882a593Smuzhiyun 	}
754*4882a593Smuzhiyun #endif /* CONFIG_XMON */
755*4882a593Smuzhiyun #ifdef CONFIG_ADB
756*4882a593Smuzhiyun 	adb_input(buf+2, nb-2, buf[1] & 0x40);
757*4882a593Smuzhiyun #endif /* CONFIG_ADB */
758*4882a593Smuzhiyun 	break;
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun     case TIMER_PACKET:
761*4882a593Smuzhiyun 	/* Egret sends these periodically. Might be useful as a 'heartbeat'
762*4882a593Smuzhiyun 	 * to trigger a recovery for the VIA shift register errata.
763*4882a593Smuzhiyun 	 */
764*4882a593Smuzhiyun 	break;
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun     default:
767*4882a593Smuzhiyun 	print_hex_dump(KERN_INFO, "cuda_input: ", DUMP_PREFIX_NONE, 32, 1,
768*4882a593Smuzhiyun 	               buf, nb, false);
769*4882a593Smuzhiyun     }
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun /* Offset between Unix time (1970-based) and Mac time (1904-based) */
773*4882a593Smuzhiyun #define RTC_OFFSET	2082844800
774*4882a593Smuzhiyun 
cuda_get_time(void)775*4882a593Smuzhiyun time64_t cuda_get_time(void)
776*4882a593Smuzhiyun {
777*4882a593Smuzhiyun 	struct adb_request req;
778*4882a593Smuzhiyun 	u32 now;
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
781*4882a593Smuzhiyun 		return 0;
782*4882a593Smuzhiyun 	while (!req.complete)
783*4882a593Smuzhiyun 		cuda_poll();
784*4882a593Smuzhiyun 	if (req.reply_len != 7)
785*4882a593Smuzhiyun 		pr_err("%s: got %d byte reply\n", __func__, req.reply_len);
786*4882a593Smuzhiyun 	now = (req.reply[3] << 24) + (req.reply[4] << 16) +
787*4882a593Smuzhiyun 	      (req.reply[5] << 8) + req.reply[6];
788*4882a593Smuzhiyun 	return (time64_t)now - RTC_OFFSET;
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun 
cuda_set_rtc_time(struct rtc_time * tm)791*4882a593Smuzhiyun int cuda_set_rtc_time(struct rtc_time *tm)
792*4882a593Smuzhiyun {
793*4882a593Smuzhiyun 	u32 now;
794*4882a593Smuzhiyun 	struct adb_request req;
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	now = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET);
797*4882a593Smuzhiyun 	if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
798*4882a593Smuzhiyun 	                 now >> 24, now >> 16, now >> 8, now) < 0)
799*4882a593Smuzhiyun 		return -ENXIO;
800*4882a593Smuzhiyun 	while (!req.complete)
801*4882a593Smuzhiyun 		cuda_poll();
802*4882a593Smuzhiyun 	if ((req.reply_len != 3) && (req.reply_len != 7))
803*4882a593Smuzhiyun 		pr_err("%s: got %d byte reply\n", __func__, req.reply_len);
804*4882a593Smuzhiyun 	return 0;
805*4882a593Smuzhiyun }
806