xref: /OK3568_Linux_fs/kernel/drivers/block/xsysace.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Xilinx SystemACE device driver
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright 2007 Secret Lab Technologies Ltd.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun /*
9*4882a593Smuzhiyun  * The SystemACE chip is designed to configure FPGAs by loading an FPGA
10*4882a593Smuzhiyun  * bitstream from a file on a CF card and squirting it into FPGAs connected
11*4882a593Smuzhiyun  * to the SystemACE JTAG chain.  It also has the advantage of providing an
12*4882a593Smuzhiyun  * MPU interface which can be used to control the FPGA configuration process
13*4882a593Smuzhiyun  * and to use the attached CF card for general purpose storage.
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * This driver is a block device driver for the SystemACE.
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  * Initialization:
18*4882a593Smuzhiyun  *    The driver registers itself as a platform_device driver at module
19*4882a593Smuzhiyun  *    load time.  The platform bus will take care of calling the
20*4882a593Smuzhiyun  *    ace_probe() method for all SystemACE instances in the system.  Any
21*4882a593Smuzhiyun  *    number of SystemACE instances are supported.  ace_probe() calls
22*4882a593Smuzhiyun  *    ace_setup() which initialized all data structures, reads the CF
23*4882a593Smuzhiyun  *    id structure and registers the device.
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  * Processing:
26*4882a593Smuzhiyun  *    Just about all of the heavy lifting in this driver is performed by
27*4882a593Smuzhiyun  *    a Finite State Machine (FSM).  The driver needs to wait on a number
28*4882a593Smuzhiyun  *    of events; some raised by interrupts, some which need to be polled
29*4882a593Smuzhiyun  *    for.  Describing all of the behaviour in a FSM seems to be the
30*4882a593Smuzhiyun  *    easiest way to keep the complexity low and make it easy to
31*4882a593Smuzhiyun  *    understand what the driver is doing.  If the block ops or the
32*4882a593Smuzhiyun  *    request function need to interact with the hardware, then they
33*4882a593Smuzhiyun  *    simply need to flag the request and kick of FSM processing.
34*4882a593Smuzhiyun  *
35*4882a593Smuzhiyun  *    The FSM itself is atomic-safe code which can be run from any
36*4882a593Smuzhiyun  *    context.  The general process flow is:
37*4882a593Smuzhiyun  *    1. obtain the ace->lock spinlock.
38*4882a593Smuzhiyun  *    2. loop on ace_fsm_dostate() until the ace->fsm_continue flag is
39*4882a593Smuzhiyun  *       cleared.
40*4882a593Smuzhiyun  *    3. release the lock.
41*4882a593Smuzhiyun  *
42*4882a593Smuzhiyun  *    Individual states do not sleep in any way.  If a condition needs to
43*4882a593Smuzhiyun  *    be waited for then the state much clear the fsm_continue flag and
44*4882a593Smuzhiyun  *    either schedule the FSM to be run again at a later time, or expect
45*4882a593Smuzhiyun  *    an interrupt to call the FSM when the desired condition is met.
46*4882a593Smuzhiyun  *
47*4882a593Smuzhiyun  *    In normal operation, the FSM is processed at interrupt context
48*4882a593Smuzhiyun  *    either when the driver's tasklet is scheduled, or when an irq is
49*4882a593Smuzhiyun  *    raised by the hardware.  The tasklet can be scheduled at any time.
50*4882a593Smuzhiyun  *    The request method in particular schedules the tasklet when a new
51*4882a593Smuzhiyun  *    request has been indicated by the block layer.  Once started, the
52*4882a593Smuzhiyun  *    FSM proceeds as far as it can processing the request until it
53*4882a593Smuzhiyun  *    needs on a hardware event.  At this point, it must yield execution.
54*4882a593Smuzhiyun  *
55*4882a593Smuzhiyun  *    A state has two options when yielding execution:
56*4882a593Smuzhiyun  *    1. ace_fsm_yield()
57*4882a593Smuzhiyun  *       - Call if need to poll for event.
58*4882a593Smuzhiyun  *       - clears the fsm_continue flag to exit the processing loop
59*4882a593Smuzhiyun  *       - reschedules the tasklet to run again as soon as possible
60*4882a593Smuzhiyun  *    2. ace_fsm_yieldirq()
61*4882a593Smuzhiyun  *       - Call if an irq is expected from the HW
62*4882a593Smuzhiyun  *       - clears the fsm_continue flag to exit the processing loop
63*4882a593Smuzhiyun  *       - does not reschedule the tasklet so the FSM will not be processed
64*4882a593Smuzhiyun  *         again until an irq is received.
65*4882a593Smuzhiyun  *    After calling a yield function, the state must return control back
66*4882a593Smuzhiyun  *    to the FSM main loop.
67*4882a593Smuzhiyun  *
68*4882a593Smuzhiyun  *    Additionally, the driver maintains a kernel timer which can process
69*4882a593Smuzhiyun  *    the FSM.  If the FSM gets stalled, typically due to a missed
70*4882a593Smuzhiyun  *    interrupt, then the kernel timer will expire and the driver can
71*4882a593Smuzhiyun  *    continue where it left off.
72*4882a593Smuzhiyun  *
73*4882a593Smuzhiyun  * To Do:
74*4882a593Smuzhiyun  *    - Add FPGA configuration control interface.
75*4882a593Smuzhiyun  *    - Request major number from lanana
76*4882a593Smuzhiyun  */
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun #undef DEBUG
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun #include <linux/module.h>
81*4882a593Smuzhiyun #include <linux/ctype.h>
82*4882a593Smuzhiyun #include <linux/init.h>
83*4882a593Smuzhiyun #include <linux/interrupt.h>
84*4882a593Smuzhiyun #include <linux/errno.h>
85*4882a593Smuzhiyun #include <linux/kernel.h>
86*4882a593Smuzhiyun #include <linux/delay.h>
87*4882a593Smuzhiyun #include <linux/slab.h>
88*4882a593Smuzhiyun #include <linux/blk-mq.h>
89*4882a593Smuzhiyun #include <linux/mutex.h>
90*4882a593Smuzhiyun #include <linux/ata.h>
91*4882a593Smuzhiyun #include <linux/hdreg.h>
92*4882a593Smuzhiyun #include <linux/platform_device.h>
93*4882a593Smuzhiyun #if defined(CONFIG_OF)
94*4882a593Smuzhiyun #include <linux/of_address.h>
95*4882a593Smuzhiyun #include <linux/of_device.h>
96*4882a593Smuzhiyun #include <linux/of_platform.h>
97*4882a593Smuzhiyun #endif
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
100*4882a593Smuzhiyun MODULE_DESCRIPTION("Xilinx SystemACE device driver");
101*4882a593Smuzhiyun MODULE_LICENSE("GPL");
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun /* SystemACE register definitions */
104*4882a593Smuzhiyun #define ACE_BUSMODE (0x00)
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun #define ACE_STATUS (0x04)
107*4882a593Smuzhiyun #define ACE_STATUS_CFGLOCK      (0x00000001)
108*4882a593Smuzhiyun #define ACE_STATUS_MPULOCK      (0x00000002)
109*4882a593Smuzhiyun #define ACE_STATUS_CFGERROR     (0x00000004)	/* config controller error */
110*4882a593Smuzhiyun #define ACE_STATUS_CFCERROR     (0x00000008)	/* CF controller error */
111*4882a593Smuzhiyun #define ACE_STATUS_CFDETECT     (0x00000010)
112*4882a593Smuzhiyun #define ACE_STATUS_DATABUFRDY   (0x00000020)
113*4882a593Smuzhiyun #define ACE_STATUS_DATABUFMODE  (0x00000040)
114*4882a593Smuzhiyun #define ACE_STATUS_CFGDONE      (0x00000080)
115*4882a593Smuzhiyun #define ACE_STATUS_RDYFORCFCMD  (0x00000100)
116*4882a593Smuzhiyun #define ACE_STATUS_CFGMODEPIN   (0x00000200)
117*4882a593Smuzhiyun #define ACE_STATUS_CFGADDR_MASK (0x0000e000)
118*4882a593Smuzhiyun #define ACE_STATUS_CFBSY        (0x00020000)
119*4882a593Smuzhiyun #define ACE_STATUS_CFRDY        (0x00040000)
120*4882a593Smuzhiyun #define ACE_STATUS_CFDWF        (0x00080000)
121*4882a593Smuzhiyun #define ACE_STATUS_CFDSC        (0x00100000)
122*4882a593Smuzhiyun #define ACE_STATUS_CFDRQ        (0x00200000)
123*4882a593Smuzhiyun #define ACE_STATUS_CFCORR       (0x00400000)
124*4882a593Smuzhiyun #define ACE_STATUS_CFERR        (0x00800000)
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun #define ACE_ERROR (0x08)
127*4882a593Smuzhiyun #define ACE_CFGLBA (0x0c)
128*4882a593Smuzhiyun #define ACE_MPULBA (0x10)
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun #define ACE_SECCNTCMD (0x14)
131*4882a593Smuzhiyun #define ACE_SECCNTCMD_RESET      (0x0100)
132*4882a593Smuzhiyun #define ACE_SECCNTCMD_IDENTIFY   (0x0200)
133*4882a593Smuzhiyun #define ACE_SECCNTCMD_READ_DATA  (0x0300)
134*4882a593Smuzhiyun #define ACE_SECCNTCMD_WRITE_DATA (0x0400)
135*4882a593Smuzhiyun #define ACE_SECCNTCMD_ABORT      (0x0600)
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun #define ACE_VERSION (0x16)
138*4882a593Smuzhiyun #define ACE_VERSION_REVISION_MASK (0x00FF)
139*4882a593Smuzhiyun #define ACE_VERSION_MINOR_MASK    (0x0F00)
140*4882a593Smuzhiyun #define ACE_VERSION_MAJOR_MASK    (0xF000)
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun #define ACE_CTRL (0x18)
143*4882a593Smuzhiyun #define ACE_CTRL_FORCELOCKREQ   (0x0001)
144*4882a593Smuzhiyun #define ACE_CTRL_LOCKREQ        (0x0002)
145*4882a593Smuzhiyun #define ACE_CTRL_FORCECFGADDR   (0x0004)
146*4882a593Smuzhiyun #define ACE_CTRL_FORCECFGMODE   (0x0008)
147*4882a593Smuzhiyun #define ACE_CTRL_CFGMODE        (0x0010)
148*4882a593Smuzhiyun #define ACE_CTRL_CFGSTART       (0x0020)
149*4882a593Smuzhiyun #define ACE_CTRL_CFGSEL         (0x0040)
150*4882a593Smuzhiyun #define ACE_CTRL_CFGRESET       (0x0080)
151*4882a593Smuzhiyun #define ACE_CTRL_DATABUFRDYIRQ  (0x0100)
152*4882a593Smuzhiyun #define ACE_CTRL_ERRORIRQ       (0x0200)
153*4882a593Smuzhiyun #define ACE_CTRL_CFGDONEIRQ     (0x0400)
154*4882a593Smuzhiyun #define ACE_CTRL_RESETIRQ       (0x0800)
155*4882a593Smuzhiyun #define ACE_CTRL_CFGPROG        (0x1000)
156*4882a593Smuzhiyun #define ACE_CTRL_CFGADDR_MASK   (0xe000)
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun #define ACE_FATSTAT (0x1c)
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun #define ACE_NUM_MINORS 16
161*4882a593Smuzhiyun #define ACE_SECTOR_SIZE (512)
162*4882a593Smuzhiyun #define ACE_FIFO_SIZE (32)
163*4882a593Smuzhiyun #define ACE_BUF_PER_SECTOR (ACE_SECTOR_SIZE / ACE_FIFO_SIZE)
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun #define ACE_BUS_WIDTH_8  0
166*4882a593Smuzhiyun #define ACE_BUS_WIDTH_16 1
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun struct ace_reg_ops;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun struct ace_device {
171*4882a593Smuzhiyun 	/* driver state data */
172*4882a593Smuzhiyun 	int id;
173*4882a593Smuzhiyun 	int media_change;
174*4882a593Smuzhiyun 	int users;
175*4882a593Smuzhiyun 	struct list_head list;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	/* finite state machine data */
178*4882a593Smuzhiyun 	struct tasklet_struct fsm_tasklet;
179*4882a593Smuzhiyun 	uint fsm_task;		/* Current activity (ACE_TASK_*) */
180*4882a593Smuzhiyun 	uint fsm_state;		/* Current state (ACE_FSM_STATE_*) */
181*4882a593Smuzhiyun 	uint fsm_continue_flag;	/* cleared to exit FSM mainloop */
182*4882a593Smuzhiyun 	uint fsm_iter_num;
183*4882a593Smuzhiyun 	struct timer_list stall_timer;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	/* Transfer state/result, use for both id and block request */
186*4882a593Smuzhiyun 	struct request *req;	/* request being processed */
187*4882a593Smuzhiyun 	void *data_ptr;		/* pointer to I/O buffer */
188*4882a593Smuzhiyun 	int data_count;		/* number of buffers remaining */
189*4882a593Smuzhiyun 	int data_result;	/* Result of transfer; 0 := success */
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	int id_req_count;	/* count of id requests */
192*4882a593Smuzhiyun 	int id_result;
193*4882a593Smuzhiyun 	struct completion id_completion;	/* used when id req finishes */
194*4882a593Smuzhiyun 	int in_irq;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	/* Details of hardware device */
197*4882a593Smuzhiyun 	resource_size_t physaddr;
198*4882a593Smuzhiyun 	void __iomem *baseaddr;
199*4882a593Smuzhiyun 	int irq;
200*4882a593Smuzhiyun 	int bus_width;		/* 0 := 8 bit; 1 := 16 bit */
201*4882a593Smuzhiyun 	struct ace_reg_ops *reg_ops;
202*4882a593Smuzhiyun 	int lock_count;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	/* Block device data structures */
205*4882a593Smuzhiyun 	spinlock_t lock;
206*4882a593Smuzhiyun 	struct device *dev;
207*4882a593Smuzhiyun 	struct request_queue *queue;
208*4882a593Smuzhiyun 	struct gendisk *gd;
209*4882a593Smuzhiyun 	struct blk_mq_tag_set tag_set;
210*4882a593Smuzhiyun 	struct list_head rq_list;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	/* Inserted CF card parameters */
213*4882a593Smuzhiyun 	u16 cf_id[ATA_ID_WORDS];
214*4882a593Smuzhiyun };
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun static DEFINE_MUTEX(xsysace_mutex);
217*4882a593Smuzhiyun static int ace_major;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun /* ---------------------------------------------------------------------
220*4882a593Smuzhiyun  * Low level register access
221*4882a593Smuzhiyun  */
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun struct ace_reg_ops {
224*4882a593Smuzhiyun 	u16(*in) (struct ace_device * ace, int reg);
225*4882a593Smuzhiyun 	void (*out) (struct ace_device * ace, int reg, u16 val);
226*4882a593Smuzhiyun 	void (*datain) (struct ace_device * ace);
227*4882a593Smuzhiyun 	void (*dataout) (struct ace_device * ace);
228*4882a593Smuzhiyun };
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun /* 8 Bit bus width */
ace_in_8(struct ace_device * ace,int reg)231*4882a593Smuzhiyun static u16 ace_in_8(struct ace_device *ace, int reg)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	void __iomem *r = ace->baseaddr + reg;
234*4882a593Smuzhiyun 	return in_8(r) | (in_8(r + 1) << 8);
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun 
ace_out_8(struct ace_device * ace,int reg,u16 val)237*4882a593Smuzhiyun static void ace_out_8(struct ace_device *ace, int reg, u16 val)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	void __iomem *r = ace->baseaddr + reg;
240*4882a593Smuzhiyun 	out_8(r, val);
241*4882a593Smuzhiyun 	out_8(r + 1, val >> 8);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
ace_datain_8(struct ace_device * ace)244*4882a593Smuzhiyun static void ace_datain_8(struct ace_device *ace)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun 	void __iomem *r = ace->baseaddr + 0x40;
247*4882a593Smuzhiyun 	u8 *dst = ace->data_ptr;
248*4882a593Smuzhiyun 	int i = ACE_FIFO_SIZE;
249*4882a593Smuzhiyun 	while (i--)
250*4882a593Smuzhiyun 		*dst++ = in_8(r++);
251*4882a593Smuzhiyun 	ace->data_ptr = dst;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
ace_dataout_8(struct ace_device * ace)254*4882a593Smuzhiyun static void ace_dataout_8(struct ace_device *ace)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun 	void __iomem *r = ace->baseaddr + 0x40;
257*4882a593Smuzhiyun 	u8 *src = ace->data_ptr;
258*4882a593Smuzhiyun 	int i = ACE_FIFO_SIZE;
259*4882a593Smuzhiyun 	while (i--)
260*4882a593Smuzhiyun 		out_8(r++, *src++);
261*4882a593Smuzhiyun 	ace->data_ptr = src;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun static struct ace_reg_ops ace_reg_8_ops = {
265*4882a593Smuzhiyun 	.in = ace_in_8,
266*4882a593Smuzhiyun 	.out = ace_out_8,
267*4882a593Smuzhiyun 	.datain = ace_datain_8,
268*4882a593Smuzhiyun 	.dataout = ace_dataout_8,
269*4882a593Smuzhiyun };
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun /* 16 bit big endian bus attachment */
ace_in_be16(struct ace_device * ace,int reg)272*4882a593Smuzhiyun static u16 ace_in_be16(struct ace_device *ace, int reg)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun 	return in_be16(ace->baseaddr + reg);
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun 
ace_out_be16(struct ace_device * ace,int reg,u16 val)277*4882a593Smuzhiyun static void ace_out_be16(struct ace_device *ace, int reg, u16 val)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun 	out_be16(ace->baseaddr + reg, val);
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun 
ace_datain_be16(struct ace_device * ace)282*4882a593Smuzhiyun static void ace_datain_be16(struct ace_device *ace)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun 	int i = ACE_FIFO_SIZE / 2;
285*4882a593Smuzhiyun 	u16 *dst = ace->data_ptr;
286*4882a593Smuzhiyun 	while (i--)
287*4882a593Smuzhiyun 		*dst++ = in_le16(ace->baseaddr + 0x40);
288*4882a593Smuzhiyun 	ace->data_ptr = dst;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun 
ace_dataout_be16(struct ace_device * ace)291*4882a593Smuzhiyun static void ace_dataout_be16(struct ace_device *ace)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun 	int i = ACE_FIFO_SIZE / 2;
294*4882a593Smuzhiyun 	u16 *src = ace->data_ptr;
295*4882a593Smuzhiyun 	while (i--)
296*4882a593Smuzhiyun 		out_le16(ace->baseaddr + 0x40, *src++);
297*4882a593Smuzhiyun 	ace->data_ptr = src;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun /* 16 bit little endian bus attachment */
ace_in_le16(struct ace_device * ace,int reg)301*4882a593Smuzhiyun static u16 ace_in_le16(struct ace_device *ace, int reg)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun 	return in_le16(ace->baseaddr + reg);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
ace_out_le16(struct ace_device * ace,int reg,u16 val)306*4882a593Smuzhiyun static void ace_out_le16(struct ace_device *ace, int reg, u16 val)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	out_le16(ace->baseaddr + reg, val);
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
ace_datain_le16(struct ace_device * ace)311*4882a593Smuzhiyun static void ace_datain_le16(struct ace_device *ace)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	int i = ACE_FIFO_SIZE / 2;
314*4882a593Smuzhiyun 	u16 *dst = ace->data_ptr;
315*4882a593Smuzhiyun 	while (i--)
316*4882a593Smuzhiyun 		*dst++ = in_be16(ace->baseaddr + 0x40);
317*4882a593Smuzhiyun 	ace->data_ptr = dst;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
ace_dataout_le16(struct ace_device * ace)320*4882a593Smuzhiyun static void ace_dataout_le16(struct ace_device *ace)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun 	int i = ACE_FIFO_SIZE / 2;
323*4882a593Smuzhiyun 	u16 *src = ace->data_ptr;
324*4882a593Smuzhiyun 	while (i--)
325*4882a593Smuzhiyun 		out_be16(ace->baseaddr + 0x40, *src++);
326*4882a593Smuzhiyun 	ace->data_ptr = src;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun static struct ace_reg_ops ace_reg_be16_ops = {
330*4882a593Smuzhiyun 	.in = ace_in_be16,
331*4882a593Smuzhiyun 	.out = ace_out_be16,
332*4882a593Smuzhiyun 	.datain = ace_datain_be16,
333*4882a593Smuzhiyun 	.dataout = ace_dataout_be16,
334*4882a593Smuzhiyun };
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun static struct ace_reg_ops ace_reg_le16_ops = {
337*4882a593Smuzhiyun 	.in = ace_in_le16,
338*4882a593Smuzhiyun 	.out = ace_out_le16,
339*4882a593Smuzhiyun 	.datain = ace_datain_le16,
340*4882a593Smuzhiyun 	.dataout = ace_dataout_le16,
341*4882a593Smuzhiyun };
342*4882a593Smuzhiyun 
ace_in(struct ace_device * ace,int reg)343*4882a593Smuzhiyun static inline u16 ace_in(struct ace_device *ace, int reg)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun 	return ace->reg_ops->in(ace, reg);
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun 
ace_in32(struct ace_device * ace,int reg)348*4882a593Smuzhiyun static inline u32 ace_in32(struct ace_device *ace, int reg)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	return ace_in(ace, reg) | (ace_in(ace, reg + 2) << 16);
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun 
ace_out(struct ace_device * ace,int reg,u16 val)353*4882a593Smuzhiyun static inline void ace_out(struct ace_device *ace, int reg, u16 val)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun 	ace->reg_ops->out(ace, reg, val);
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun 
ace_out32(struct ace_device * ace,int reg,u32 val)358*4882a593Smuzhiyun static inline void ace_out32(struct ace_device *ace, int reg, u32 val)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun 	ace_out(ace, reg, val);
361*4882a593Smuzhiyun 	ace_out(ace, reg + 2, val >> 16);
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun /* ---------------------------------------------------------------------
365*4882a593Smuzhiyun  * Debug support functions
366*4882a593Smuzhiyun  */
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun #if defined(DEBUG)
ace_dump_mem(void * base,int len)369*4882a593Smuzhiyun static void ace_dump_mem(void *base, int len)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun 	const char *ptr = base;
372*4882a593Smuzhiyun 	int i, j;
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	for (i = 0; i < len; i += 16) {
375*4882a593Smuzhiyun 		printk(KERN_INFO "%.8x:", i);
376*4882a593Smuzhiyun 		for (j = 0; j < 16; j++) {
377*4882a593Smuzhiyun 			if (!(j % 4))
378*4882a593Smuzhiyun 				printk(" ");
379*4882a593Smuzhiyun 			printk("%.2x", ptr[i + j]);
380*4882a593Smuzhiyun 		}
381*4882a593Smuzhiyun 		printk(" ");
382*4882a593Smuzhiyun 		for (j = 0; j < 16; j++)
383*4882a593Smuzhiyun 			printk("%c", isprint(ptr[i + j]) ? ptr[i + j] : '.');
384*4882a593Smuzhiyun 		printk("\n");
385*4882a593Smuzhiyun 	}
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun #else
ace_dump_mem(void * base,int len)388*4882a593Smuzhiyun static inline void ace_dump_mem(void *base, int len)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun #endif
392*4882a593Smuzhiyun 
ace_dump_regs(struct ace_device * ace)393*4882a593Smuzhiyun static void ace_dump_regs(struct ace_device *ace)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun 	dev_info(ace->dev,
396*4882a593Smuzhiyun 		 "    ctrl:  %.8x  seccnt/cmd: %.4x      ver:%.4x\n"
397*4882a593Smuzhiyun 		 "    status:%.8x  mpu_lba:%.8x  busmode:%4x\n"
398*4882a593Smuzhiyun 		 "    error: %.8x  cfg_lba:%.8x  fatstat:%.4x\n",
399*4882a593Smuzhiyun 		 ace_in32(ace, ACE_CTRL),
400*4882a593Smuzhiyun 		 ace_in(ace, ACE_SECCNTCMD),
401*4882a593Smuzhiyun 		 ace_in(ace, ACE_VERSION),
402*4882a593Smuzhiyun 		 ace_in32(ace, ACE_STATUS),
403*4882a593Smuzhiyun 		 ace_in32(ace, ACE_MPULBA),
404*4882a593Smuzhiyun 		 ace_in(ace, ACE_BUSMODE),
405*4882a593Smuzhiyun 		 ace_in32(ace, ACE_ERROR),
406*4882a593Smuzhiyun 		 ace_in32(ace, ACE_CFGLBA), ace_in(ace, ACE_FATSTAT));
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun 
ace_fix_driveid(u16 * id)409*4882a593Smuzhiyun static void ace_fix_driveid(u16 *id)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun #if defined(__BIG_ENDIAN)
412*4882a593Smuzhiyun 	int i;
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	/* All half words have wrong byte order; swap the bytes */
415*4882a593Smuzhiyun 	for (i = 0; i < ATA_ID_WORDS; i++, id++)
416*4882a593Smuzhiyun 		*id = le16_to_cpu(*id);
417*4882a593Smuzhiyun #endif
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun /* ---------------------------------------------------------------------
421*4882a593Smuzhiyun  * Finite State Machine (FSM) implementation
422*4882a593Smuzhiyun  */
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun /* FSM tasks; used to direct state transitions */
425*4882a593Smuzhiyun #define ACE_TASK_IDLE      0
426*4882a593Smuzhiyun #define ACE_TASK_IDENTIFY  1
427*4882a593Smuzhiyun #define ACE_TASK_READ      2
428*4882a593Smuzhiyun #define ACE_TASK_WRITE     3
429*4882a593Smuzhiyun #define ACE_FSM_NUM_TASKS  4
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun /* FSM state definitions */
432*4882a593Smuzhiyun #define ACE_FSM_STATE_IDLE               0
433*4882a593Smuzhiyun #define ACE_FSM_STATE_REQ_LOCK           1
434*4882a593Smuzhiyun #define ACE_FSM_STATE_WAIT_LOCK          2
435*4882a593Smuzhiyun #define ACE_FSM_STATE_WAIT_CFREADY       3
436*4882a593Smuzhiyun #define ACE_FSM_STATE_IDENTIFY_PREPARE   4
437*4882a593Smuzhiyun #define ACE_FSM_STATE_IDENTIFY_TRANSFER  5
438*4882a593Smuzhiyun #define ACE_FSM_STATE_IDENTIFY_COMPLETE  6
439*4882a593Smuzhiyun #define ACE_FSM_STATE_REQ_PREPARE        7
440*4882a593Smuzhiyun #define ACE_FSM_STATE_REQ_TRANSFER       8
441*4882a593Smuzhiyun #define ACE_FSM_STATE_REQ_COMPLETE       9
442*4882a593Smuzhiyun #define ACE_FSM_STATE_ERROR             10
443*4882a593Smuzhiyun #define ACE_FSM_NUM_STATES              11
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun /* Set flag to exit FSM loop and reschedule tasklet */
ace_fsm_yieldpoll(struct ace_device * ace)446*4882a593Smuzhiyun static inline void ace_fsm_yieldpoll(struct ace_device *ace)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun 	tasklet_schedule(&ace->fsm_tasklet);
449*4882a593Smuzhiyun 	ace->fsm_continue_flag = 0;
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun 
ace_fsm_yield(struct ace_device * ace)452*4882a593Smuzhiyun static inline void ace_fsm_yield(struct ace_device *ace)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun 	dev_dbg(ace->dev, "%s()\n", __func__);
455*4882a593Smuzhiyun 	ace_fsm_yieldpoll(ace);
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun /* Set flag to exit FSM loop and wait for IRQ to reschedule tasklet */
ace_fsm_yieldirq(struct ace_device * ace)459*4882a593Smuzhiyun static inline void ace_fsm_yieldirq(struct ace_device *ace)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun 	dev_dbg(ace->dev, "ace_fsm_yieldirq()\n");
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	if (ace->irq > 0)
464*4882a593Smuzhiyun 		ace->fsm_continue_flag = 0;
465*4882a593Smuzhiyun 	else
466*4882a593Smuzhiyun 		ace_fsm_yieldpoll(ace);
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun 
ace_has_next_request(struct request_queue * q)469*4882a593Smuzhiyun static bool ace_has_next_request(struct request_queue *q)
470*4882a593Smuzhiyun {
471*4882a593Smuzhiyun 	struct ace_device *ace = q->queuedata;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	return !list_empty(&ace->rq_list);
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun /* Get the next read/write request; ending requests that we don't handle */
ace_get_next_request(struct request_queue * q)477*4882a593Smuzhiyun static struct request *ace_get_next_request(struct request_queue *q)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun 	struct ace_device *ace = q->queuedata;
480*4882a593Smuzhiyun 	struct request *rq;
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	rq = list_first_entry_or_null(&ace->rq_list, struct request, queuelist);
483*4882a593Smuzhiyun 	if (rq) {
484*4882a593Smuzhiyun 		list_del_init(&rq->queuelist);
485*4882a593Smuzhiyun 		blk_mq_start_request(rq);
486*4882a593Smuzhiyun 	}
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	return NULL;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun 
ace_fsm_dostate(struct ace_device * ace)491*4882a593Smuzhiyun static void ace_fsm_dostate(struct ace_device *ace)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun 	struct request *req;
494*4882a593Smuzhiyun 	u32 status;
495*4882a593Smuzhiyun 	u16 val;
496*4882a593Smuzhiyun 	int count;
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun #if defined(DEBUG)
499*4882a593Smuzhiyun 	dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n",
500*4882a593Smuzhiyun 		ace->fsm_state, ace->id_req_count);
501*4882a593Smuzhiyun #endif
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	/* Verify that there is actually a CF in the slot. If not, then
504*4882a593Smuzhiyun 	 * bail out back to the idle state and wake up all the waiters */
505*4882a593Smuzhiyun 	status = ace_in32(ace, ACE_STATUS);
506*4882a593Smuzhiyun 	if ((status & ACE_STATUS_CFDETECT) == 0) {
507*4882a593Smuzhiyun 		ace->fsm_state = ACE_FSM_STATE_IDLE;
508*4882a593Smuzhiyun 		ace->media_change = 1;
509*4882a593Smuzhiyun 		set_capacity(ace->gd, 0);
510*4882a593Smuzhiyun 		dev_info(ace->dev, "No CF in slot\n");
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 		/* Drop all in-flight and pending requests */
513*4882a593Smuzhiyun 		if (ace->req) {
514*4882a593Smuzhiyun 			blk_mq_end_request(ace->req, BLK_STS_IOERR);
515*4882a593Smuzhiyun 			ace->req = NULL;
516*4882a593Smuzhiyun 		}
517*4882a593Smuzhiyun 		while ((req = ace_get_next_request(ace->queue)) != NULL)
518*4882a593Smuzhiyun 			blk_mq_end_request(req, BLK_STS_IOERR);
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 		/* Drop back to IDLE state and notify waiters */
521*4882a593Smuzhiyun 		ace->fsm_state = ACE_FSM_STATE_IDLE;
522*4882a593Smuzhiyun 		ace->id_result = -EIO;
523*4882a593Smuzhiyun 		while (ace->id_req_count) {
524*4882a593Smuzhiyun 			complete(&ace->id_completion);
525*4882a593Smuzhiyun 			ace->id_req_count--;
526*4882a593Smuzhiyun 		}
527*4882a593Smuzhiyun 	}
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	switch (ace->fsm_state) {
530*4882a593Smuzhiyun 	case ACE_FSM_STATE_IDLE:
531*4882a593Smuzhiyun 		/* See if there is anything to do */
532*4882a593Smuzhiyun 		if (ace->id_req_count || ace_has_next_request(ace->queue)) {
533*4882a593Smuzhiyun 			ace->fsm_iter_num++;
534*4882a593Smuzhiyun 			ace->fsm_state = ACE_FSM_STATE_REQ_LOCK;
535*4882a593Smuzhiyun 			mod_timer(&ace->stall_timer, jiffies + HZ);
536*4882a593Smuzhiyun 			if (!timer_pending(&ace->stall_timer))
537*4882a593Smuzhiyun 				add_timer(&ace->stall_timer);
538*4882a593Smuzhiyun 			break;
539*4882a593Smuzhiyun 		}
540*4882a593Smuzhiyun 		del_timer(&ace->stall_timer);
541*4882a593Smuzhiyun 		ace->fsm_continue_flag = 0;
542*4882a593Smuzhiyun 		break;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	case ACE_FSM_STATE_REQ_LOCK:
545*4882a593Smuzhiyun 		if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
546*4882a593Smuzhiyun 			/* Already have the lock, jump to next state */
547*4882a593Smuzhiyun 			ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
548*4882a593Smuzhiyun 			break;
549*4882a593Smuzhiyun 		}
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 		/* Request the lock */
552*4882a593Smuzhiyun 		val = ace_in(ace, ACE_CTRL);
553*4882a593Smuzhiyun 		ace_out(ace, ACE_CTRL, val | ACE_CTRL_LOCKREQ);
554*4882a593Smuzhiyun 		ace->fsm_state = ACE_FSM_STATE_WAIT_LOCK;
555*4882a593Smuzhiyun 		break;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	case ACE_FSM_STATE_WAIT_LOCK:
558*4882a593Smuzhiyun 		if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
559*4882a593Smuzhiyun 			/* got the lock; move to next state */
560*4882a593Smuzhiyun 			ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
561*4882a593Smuzhiyun 			break;
562*4882a593Smuzhiyun 		}
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 		/* wait a bit for the lock */
565*4882a593Smuzhiyun 		ace_fsm_yield(ace);
566*4882a593Smuzhiyun 		break;
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	case ACE_FSM_STATE_WAIT_CFREADY:
569*4882a593Smuzhiyun 		status = ace_in32(ace, ACE_STATUS);
570*4882a593Smuzhiyun 		if (!(status & ACE_STATUS_RDYFORCFCMD) ||
571*4882a593Smuzhiyun 		    (status & ACE_STATUS_CFBSY)) {
572*4882a593Smuzhiyun 			/* CF card isn't ready; it needs to be polled */
573*4882a593Smuzhiyun 			ace_fsm_yield(ace);
574*4882a593Smuzhiyun 			break;
575*4882a593Smuzhiyun 		}
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 		/* Device is ready for command; determine what to do next */
578*4882a593Smuzhiyun 		if (ace->id_req_count)
579*4882a593Smuzhiyun 			ace->fsm_state = ACE_FSM_STATE_IDENTIFY_PREPARE;
580*4882a593Smuzhiyun 		else
581*4882a593Smuzhiyun 			ace->fsm_state = ACE_FSM_STATE_REQ_PREPARE;
582*4882a593Smuzhiyun 		break;
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	case ACE_FSM_STATE_IDENTIFY_PREPARE:
585*4882a593Smuzhiyun 		/* Send identify command */
586*4882a593Smuzhiyun 		ace->fsm_task = ACE_TASK_IDENTIFY;
587*4882a593Smuzhiyun 		ace->data_ptr = ace->cf_id;
588*4882a593Smuzhiyun 		ace->data_count = ACE_BUF_PER_SECTOR;
589*4882a593Smuzhiyun 		ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY);
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 		/* As per datasheet, put config controller in reset */
592*4882a593Smuzhiyun 		val = ace_in(ace, ACE_CTRL);
593*4882a593Smuzhiyun 		ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 		/* irq handler takes over from this point; wait for the
596*4882a593Smuzhiyun 		 * transfer to complete */
597*4882a593Smuzhiyun 		ace->fsm_state = ACE_FSM_STATE_IDENTIFY_TRANSFER;
598*4882a593Smuzhiyun 		ace_fsm_yieldirq(ace);
599*4882a593Smuzhiyun 		break;
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	case ACE_FSM_STATE_IDENTIFY_TRANSFER:
602*4882a593Smuzhiyun 		/* Check that the sysace is ready to receive data */
603*4882a593Smuzhiyun 		status = ace_in32(ace, ACE_STATUS);
604*4882a593Smuzhiyun 		if (status & ACE_STATUS_CFBSY) {
605*4882a593Smuzhiyun 			dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i dc=%i\n",
606*4882a593Smuzhiyun 				ace->fsm_task, ace->fsm_iter_num,
607*4882a593Smuzhiyun 				ace->data_count);
608*4882a593Smuzhiyun 			ace_fsm_yield(ace);
609*4882a593Smuzhiyun 			break;
610*4882a593Smuzhiyun 		}
611*4882a593Smuzhiyun 		if (!(status & ACE_STATUS_DATABUFRDY)) {
612*4882a593Smuzhiyun 			ace_fsm_yield(ace);
613*4882a593Smuzhiyun 			break;
614*4882a593Smuzhiyun 		}
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 		/* Transfer the next buffer */
617*4882a593Smuzhiyun 		ace->reg_ops->datain(ace);
618*4882a593Smuzhiyun 		ace->data_count--;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 		/* If there are still buffers to be transfers; jump out here */
621*4882a593Smuzhiyun 		if (ace->data_count != 0) {
622*4882a593Smuzhiyun 			ace_fsm_yieldirq(ace);
623*4882a593Smuzhiyun 			break;
624*4882a593Smuzhiyun 		}
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 		/* transfer finished; kick state machine */
627*4882a593Smuzhiyun 		dev_dbg(ace->dev, "identify finished\n");
628*4882a593Smuzhiyun 		ace->fsm_state = ACE_FSM_STATE_IDENTIFY_COMPLETE;
629*4882a593Smuzhiyun 		break;
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	case ACE_FSM_STATE_IDENTIFY_COMPLETE:
632*4882a593Smuzhiyun 		ace_fix_driveid(ace->cf_id);
633*4882a593Smuzhiyun 		ace_dump_mem(ace->cf_id, 512);	/* Debug: Dump out disk ID */
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 		if (ace->data_result) {
636*4882a593Smuzhiyun 			/* Error occurred, disable the disk */
637*4882a593Smuzhiyun 			ace->media_change = 1;
638*4882a593Smuzhiyun 			set_capacity(ace->gd, 0);
639*4882a593Smuzhiyun 			dev_err(ace->dev, "error fetching CF id (%i)\n",
640*4882a593Smuzhiyun 				ace->data_result);
641*4882a593Smuzhiyun 		} else {
642*4882a593Smuzhiyun 			ace->media_change = 0;
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 			/* Record disk parameters */
645*4882a593Smuzhiyun 			set_capacity(ace->gd,
646*4882a593Smuzhiyun 				ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY));
647*4882a593Smuzhiyun 			dev_info(ace->dev, "capacity: %i sectors\n",
648*4882a593Smuzhiyun 				ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY));
649*4882a593Smuzhiyun 		}
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 		/* We're done, drop to IDLE state and notify waiters */
652*4882a593Smuzhiyun 		ace->fsm_state = ACE_FSM_STATE_IDLE;
653*4882a593Smuzhiyun 		ace->id_result = ace->data_result;
654*4882a593Smuzhiyun 		while (ace->id_req_count) {
655*4882a593Smuzhiyun 			complete(&ace->id_completion);
656*4882a593Smuzhiyun 			ace->id_req_count--;
657*4882a593Smuzhiyun 		}
658*4882a593Smuzhiyun 		break;
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	case ACE_FSM_STATE_REQ_PREPARE:
661*4882a593Smuzhiyun 		req = ace_get_next_request(ace->queue);
662*4882a593Smuzhiyun 		if (!req) {
663*4882a593Smuzhiyun 			ace->fsm_state = ACE_FSM_STATE_IDLE;
664*4882a593Smuzhiyun 			break;
665*4882a593Smuzhiyun 		}
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 		/* Okay, it's a data request, set it up for transfer */
668*4882a593Smuzhiyun 		dev_dbg(ace->dev,
669*4882a593Smuzhiyun 			"request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n",
670*4882a593Smuzhiyun 			(unsigned long long)blk_rq_pos(req),
671*4882a593Smuzhiyun 			blk_rq_sectors(req), blk_rq_cur_sectors(req),
672*4882a593Smuzhiyun 			rq_data_dir(req));
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 		ace->req = req;
675*4882a593Smuzhiyun 		ace->data_ptr = bio_data(req->bio);
676*4882a593Smuzhiyun 		ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR;
677*4882a593Smuzhiyun 		ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF);
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 		count = blk_rq_sectors(req);
680*4882a593Smuzhiyun 		if (rq_data_dir(req)) {
681*4882a593Smuzhiyun 			/* Kick off write request */
682*4882a593Smuzhiyun 			dev_dbg(ace->dev, "write data\n");
683*4882a593Smuzhiyun 			ace->fsm_task = ACE_TASK_WRITE;
684*4882a593Smuzhiyun 			ace_out(ace, ACE_SECCNTCMD,
685*4882a593Smuzhiyun 				count | ACE_SECCNTCMD_WRITE_DATA);
686*4882a593Smuzhiyun 		} else {
687*4882a593Smuzhiyun 			/* Kick off read request */
688*4882a593Smuzhiyun 			dev_dbg(ace->dev, "read data\n");
689*4882a593Smuzhiyun 			ace->fsm_task = ACE_TASK_READ;
690*4882a593Smuzhiyun 			ace_out(ace, ACE_SECCNTCMD,
691*4882a593Smuzhiyun 				count | ACE_SECCNTCMD_READ_DATA);
692*4882a593Smuzhiyun 		}
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 		/* As per datasheet, put config controller in reset */
695*4882a593Smuzhiyun 		val = ace_in(ace, ACE_CTRL);
696*4882a593Smuzhiyun 		ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 		/* Move to the transfer state.  The systemace will raise
699*4882a593Smuzhiyun 		 * an interrupt once there is something to do
700*4882a593Smuzhiyun 		 */
701*4882a593Smuzhiyun 		ace->fsm_state = ACE_FSM_STATE_REQ_TRANSFER;
702*4882a593Smuzhiyun 		if (ace->fsm_task == ACE_TASK_READ)
703*4882a593Smuzhiyun 			ace_fsm_yieldirq(ace);	/* wait for data ready */
704*4882a593Smuzhiyun 		break;
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	case ACE_FSM_STATE_REQ_TRANSFER:
707*4882a593Smuzhiyun 		/* Check that the sysace is ready to receive data */
708*4882a593Smuzhiyun 		status = ace_in32(ace, ACE_STATUS);
709*4882a593Smuzhiyun 		if (status & ACE_STATUS_CFBSY) {
710*4882a593Smuzhiyun 			dev_dbg(ace->dev,
711*4882a593Smuzhiyun 				"CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
712*4882a593Smuzhiyun 				ace->fsm_task, ace->fsm_iter_num,
713*4882a593Smuzhiyun 				blk_rq_cur_sectors(ace->req) * 16,
714*4882a593Smuzhiyun 				ace->data_count, ace->in_irq);
715*4882a593Smuzhiyun 			ace_fsm_yield(ace);	/* need to poll CFBSY bit */
716*4882a593Smuzhiyun 			break;
717*4882a593Smuzhiyun 		}
718*4882a593Smuzhiyun 		if (!(status & ACE_STATUS_DATABUFRDY)) {
719*4882a593Smuzhiyun 			dev_dbg(ace->dev,
720*4882a593Smuzhiyun 				"DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
721*4882a593Smuzhiyun 				ace->fsm_task, ace->fsm_iter_num,
722*4882a593Smuzhiyun 				blk_rq_cur_sectors(ace->req) * 16,
723*4882a593Smuzhiyun 				ace->data_count, ace->in_irq);
724*4882a593Smuzhiyun 			ace_fsm_yieldirq(ace);
725*4882a593Smuzhiyun 			break;
726*4882a593Smuzhiyun 		}
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 		/* Transfer the next buffer */
729*4882a593Smuzhiyun 		if (ace->fsm_task == ACE_TASK_WRITE)
730*4882a593Smuzhiyun 			ace->reg_ops->dataout(ace);
731*4882a593Smuzhiyun 		else
732*4882a593Smuzhiyun 			ace->reg_ops->datain(ace);
733*4882a593Smuzhiyun 		ace->data_count--;
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 		/* If there are still buffers to be transfers; jump out here */
736*4882a593Smuzhiyun 		if (ace->data_count != 0) {
737*4882a593Smuzhiyun 			ace_fsm_yieldirq(ace);
738*4882a593Smuzhiyun 			break;
739*4882a593Smuzhiyun 		}
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 		/* bio finished; is there another one? */
742*4882a593Smuzhiyun 		if (blk_update_request(ace->req, BLK_STS_OK,
743*4882a593Smuzhiyun 		    blk_rq_cur_bytes(ace->req))) {
744*4882a593Smuzhiyun 			/* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
745*4882a593Smuzhiyun 			 *      blk_rq_sectors(ace->req),
746*4882a593Smuzhiyun 			 *      blk_rq_cur_sectors(ace->req));
747*4882a593Smuzhiyun 			 */
748*4882a593Smuzhiyun 			ace->data_ptr = bio_data(ace->req->bio);
749*4882a593Smuzhiyun 			ace->data_count = blk_rq_cur_sectors(ace->req) * 16;
750*4882a593Smuzhiyun 			ace_fsm_yieldirq(ace);
751*4882a593Smuzhiyun 			break;
752*4882a593Smuzhiyun 		}
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 		ace->fsm_state = ACE_FSM_STATE_REQ_COMPLETE;
755*4882a593Smuzhiyun 		break;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	case ACE_FSM_STATE_REQ_COMPLETE:
758*4882a593Smuzhiyun 		ace->req = NULL;
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 		/* Finished request; go to idle state */
761*4882a593Smuzhiyun 		ace->fsm_state = ACE_FSM_STATE_IDLE;
762*4882a593Smuzhiyun 		break;
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 	default:
765*4882a593Smuzhiyun 		ace->fsm_state = ACE_FSM_STATE_IDLE;
766*4882a593Smuzhiyun 		break;
767*4882a593Smuzhiyun 	}
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun 
ace_fsm_tasklet(unsigned long data)770*4882a593Smuzhiyun static void ace_fsm_tasklet(unsigned long data)
771*4882a593Smuzhiyun {
772*4882a593Smuzhiyun 	struct ace_device *ace = (void *)data;
773*4882a593Smuzhiyun 	unsigned long flags;
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	spin_lock_irqsave(&ace->lock, flags);
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 	/* Loop over state machine until told to stop */
778*4882a593Smuzhiyun 	ace->fsm_continue_flag = 1;
779*4882a593Smuzhiyun 	while (ace->fsm_continue_flag)
780*4882a593Smuzhiyun 		ace_fsm_dostate(ace);
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ace->lock, flags);
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun 
ace_stall_timer(struct timer_list * t)785*4882a593Smuzhiyun static void ace_stall_timer(struct timer_list *t)
786*4882a593Smuzhiyun {
787*4882a593Smuzhiyun 	struct ace_device *ace = from_timer(ace, t, stall_timer);
788*4882a593Smuzhiyun 	unsigned long flags;
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 	dev_warn(ace->dev,
791*4882a593Smuzhiyun 		 "kicking stalled fsm; state=%i task=%i iter=%i dc=%i\n",
792*4882a593Smuzhiyun 		 ace->fsm_state, ace->fsm_task, ace->fsm_iter_num,
793*4882a593Smuzhiyun 		 ace->data_count);
794*4882a593Smuzhiyun 	spin_lock_irqsave(&ace->lock, flags);
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	/* Rearm the stall timer *before* entering FSM (which may then
797*4882a593Smuzhiyun 	 * delete the timer) */
798*4882a593Smuzhiyun 	mod_timer(&ace->stall_timer, jiffies + HZ);
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 	/* Loop over state machine until told to stop */
801*4882a593Smuzhiyun 	ace->fsm_continue_flag = 1;
802*4882a593Smuzhiyun 	while (ace->fsm_continue_flag)
803*4882a593Smuzhiyun 		ace_fsm_dostate(ace);
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ace->lock, flags);
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun /* ---------------------------------------------------------------------
809*4882a593Smuzhiyun  * Interrupt handling routines
810*4882a593Smuzhiyun  */
ace_interrupt_checkstate(struct ace_device * ace)811*4882a593Smuzhiyun static int ace_interrupt_checkstate(struct ace_device *ace)
812*4882a593Smuzhiyun {
813*4882a593Smuzhiyun 	u32 sreg = ace_in32(ace, ACE_STATUS);
814*4882a593Smuzhiyun 	u16 creg = ace_in(ace, ACE_CTRL);
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	/* Check for error occurrence */
817*4882a593Smuzhiyun 	if ((sreg & (ACE_STATUS_CFGERROR | ACE_STATUS_CFCERROR)) &&
818*4882a593Smuzhiyun 	    (creg & ACE_CTRL_ERRORIRQ)) {
819*4882a593Smuzhiyun 		dev_err(ace->dev, "transfer failure\n");
820*4882a593Smuzhiyun 		ace_dump_regs(ace);
821*4882a593Smuzhiyun 		return -EIO;
822*4882a593Smuzhiyun 	}
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	return 0;
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun 
ace_interrupt(int irq,void * dev_id)827*4882a593Smuzhiyun static irqreturn_t ace_interrupt(int irq, void *dev_id)
828*4882a593Smuzhiyun {
829*4882a593Smuzhiyun 	u16 creg;
830*4882a593Smuzhiyun 	struct ace_device *ace = dev_id;
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	/* be safe and get the lock */
833*4882a593Smuzhiyun 	spin_lock(&ace->lock);
834*4882a593Smuzhiyun 	ace->in_irq = 1;
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 	/* clear the interrupt */
837*4882a593Smuzhiyun 	creg = ace_in(ace, ACE_CTRL);
838*4882a593Smuzhiyun 	ace_out(ace, ACE_CTRL, creg | ACE_CTRL_RESETIRQ);
839*4882a593Smuzhiyun 	ace_out(ace, ACE_CTRL, creg);
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	/* check for IO failures */
842*4882a593Smuzhiyun 	if (ace_interrupt_checkstate(ace))
843*4882a593Smuzhiyun 		ace->data_result = -EIO;
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	if (ace->fsm_task == 0) {
846*4882a593Smuzhiyun 		dev_err(ace->dev,
847*4882a593Smuzhiyun 			"spurious irq; stat=%.8x ctrl=%.8x cmd=%.4x\n",
848*4882a593Smuzhiyun 			ace_in32(ace, ACE_STATUS), ace_in32(ace, ACE_CTRL),
849*4882a593Smuzhiyun 			ace_in(ace, ACE_SECCNTCMD));
850*4882a593Smuzhiyun 		dev_err(ace->dev, "fsm_task=%i fsm_state=%i data_count=%i\n",
851*4882a593Smuzhiyun 			ace->fsm_task, ace->fsm_state, ace->data_count);
852*4882a593Smuzhiyun 	}
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 	/* Loop over state machine until told to stop */
855*4882a593Smuzhiyun 	ace->fsm_continue_flag = 1;
856*4882a593Smuzhiyun 	while (ace->fsm_continue_flag)
857*4882a593Smuzhiyun 		ace_fsm_dostate(ace);
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	/* done with interrupt; drop the lock */
860*4882a593Smuzhiyun 	ace->in_irq = 0;
861*4882a593Smuzhiyun 	spin_unlock(&ace->lock);
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 	return IRQ_HANDLED;
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun /* ---------------------------------------------------------------------
867*4882a593Smuzhiyun  * Block ops
868*4882a593Smuzhiyun  */
ace_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)869*4882a593Smuzhiyun static blk_status_t ace_queue_rq(struct blk_mq_hw_ctx *hctx,
870*4882a593Smuzhiyun 				 const struct blk_mq_queue_data *bd)
871*4882a593Smuzhiyun {
872*4882a593Smuzhiyun 	struct ace_device *ace = hctx->queue->queuedata;
873*4882a593Smuzhiyun 	struct request *req = bd->rq;
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	if (blk_rq_is_passthrough(req)) {
876*4882a593Smuzhiyun 		blk_mq_start_request(req);
877*4882a593Smuzhiyun 		return BLK_STS_IOERR;
878*4882a593Smuzhiyun 	}
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 	spin_lock_irq(&ace->lock);
881*4882a593Smuzhiyun 	list_add_tail(&req->queuelist, &ace->rq_list);
882*4882a593Smuzhiyun 	spin_unlock_irq(&ace->lock);
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 	tasklet_schedule(&ace->fsm_tasklet);
885*4882a593Smuzhiyun 	return BLK_STS_OK;
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun 
ace_check_events(struct gendisk * gd,unsigned int clearing)888*4882a593Smuzhiyun static unsigned int ace_check_events(struct gendisk *gd, unsigned int clearing)
889*4882a593Smuzhiyun {
890*4882a593Smuzhiyun 	struct ace_device *ace = gd->private_data;
891*4882a593Smuzhiyun 	dev_dbg(ace->dev, "ace_check_events(): %i\n", ace->media_change);
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	return ace->media_change ? DISK_EVENT_MEDIA_CHANGE : 0;
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun 
ace_media_changed(struct ace_device * ace)896*4882a593Smuzhiyun static void ace_media_changed(struct ace_device *ace)
897*4882a593Smuzhiyun {
898*4882a593Smuzhiyun 	unsigned long flags;
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	dev_dbg(ace->dev, "requesting cf id and scheduling tasklet\n");
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 	spin_lock_irqsave(&ace->lock, flags);
903*4882a593Smuzhiyun 	ace->id_req_count++;
904*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ace->lock, flags);
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	tasklet_schedule(&ace->fsm_tasklet);
907*4882a593Smuzhiyun 	wait_for_completion(&ace->id_completion);
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 	dev_dbg(ace->dev, "revalidate complete\n");
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun 
ace_open(struct block_device * bdev,fmode_t mode)912*4882a593Smuzhiyun static int ace_open(struct block_device *bdev, fmode_t mode)
913*4882a593Smuzhiyun {
914*4882a593Smuzhiyun 	struct ace_device *ace = bdev->bd_disk->private_data;
915*4882a593Smuzhiyun 	unsigned long flags;
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun 	dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1);
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	mutex_lock(&xsysace_mutex);
920*4882a593Smuzhiyun 	spin_lock_irqsave(&ace->lock, flags);
921*4882a593Smuzhiyun 	ace->users++;
922*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ace->lock, flags);
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	if (bdev_check_media_change(bdev) && ace->media_change)
925*4882a593Smuzhiyun 		ace_media_changed(ace);
926*4882a593Smuzhiyun 	mutex_unlock(&xsysace_mutex);
927*4882a593Smuzhiyun 
928*4882a593Smuzhiyun 	return 0;
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun 
ace_release(struct gendisk * disk,fmode_t mode)931*4882a593Smuzhiyun static void ace_release(struct gendisk *disk, fmode_t mode)
932*4882a593Smuzhiyun {
933*4882a593Smuzhiyun 	struct ace_device *ace = disk->private_data;
934*4882a593Smuzhiyun 	unsigned long flags;
935*4882a593Smuzhiyun 	u16 val;
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun 	dev_dbg(ace->dev, "ace_release() users=%i\n", ace->users - 1);
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	mutex_lock(&xsysace_mutex);
940*4882a593Smuzhiyun 	spin_lock_irqsave(&ace->lock, flags);
941*4882a593Smuzhiyun 	ace->users--;
942*4882a593Smuzhiyun 	if (ace->users == 0) {
943*4882a593Smuzhiyun 		val = ace_in(ace, ACE_CTRL);
944*4882a593Smuzhiyun 		ace_out(ace, ACE_CTRL, val & ~ACE_CTRL_LOCKREQ);
945*4882a593Smuzhiyun 	}
946*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ace->lock, flags);
947*4882a593Smuzhiyun 	mutex_unlock(&xsysace_mutex);
948*4882a593Smuzhiyun }
949*4882a593Smuzhiyun 
ace_getgeo(struct block_device * bdev,struct hd_geometry * geo)950*4882a593Smuzhiyun static int ace_getgeo(struct block_device *bdev, struct hd_geometry *geo)
951*4882a593Smuzhiyun {
952*4882a593Smuzhiyun 	struct ace_device *ace = bdev->bd_disk->private_data;
953*4882a593Smuzhiyun 	u16 *cf_id = ace->cf_id;
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	dev_dbg(ace->dev, "ace_getgeo()\n");
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	geo->heads	= cf_id[ATA_ID_HEADS];
958*4882a593Smuzhiyun 	geo->sectors	= cf_id[ATA_ID_SECTORS];
959*4882a593Smuzhiyun 	geo->cylinders	= cf_id[ATA_ID_CYLS];
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 	return 0;
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun static const struct block_device_operations ace_fops = {
965*4882a593Smuzhiyun 	.owner = THIS_MODULE,
966*4882a593Smuzhiyun 	.open = ace_open,
967*4882a593Smuzhiyun 	.release = ace_release,
968*4882a593Smuzhiyun 	.check_events = ace_check_events,
969*4882a593Smuzhiyun 	.getgeo = ace_getgeo,
970*4882a593Smuzhiyun };
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun static const struct blk_mq_ops ace_mq_ops = {
973*4882a593Smuzhiyun 	.queue_rq	= ace_queue_rq,
974*4882a593Smuzhiyun };
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun /* --------------------------------------------------------------------
977*4882a593Smuzhiyun  * SystemACE device setup/teardown code
978*4882a593Smuzhiyun  */
ace_setup(struct ace_device * ace)979*4882a593Smuzhiyun static int ace_setup(struct ace_device *ace)
980*4882a593Smuzhiyun {
981*4882a593Smuzhiyun 	u16 version;
982*4882a593Smuzhiyun 	u16 val;
983*4882a593Smuzhiyun 	int rc;
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	dev_dbg(ace->dev, "ace_setup(ace=0x%p)\n", ace);
986*4882a593Smuzhiyun 	dev_dbg(ace->dev, "physaddr=0x%llx irq=%i\n",
987*4882a593Smuzhiyun 		(unsigned long long)ace->physaddr, ace->irq);
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 	spin_lock_init(&ace->lock);
990*4882a593Smuzhiyun 	init_completion(&ace->id_completion);
991*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ace->rq_list);
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 	/*
994*4882a593Smuzhiyun 	 * Map the device
995*4882a593Smuzhiyun 	 */
996*4882a593Smuzhiyun 	ace->baseaddr = ioremap(ace->physaddr, 0x80);
997*4882a593Smuzhiyun 	if (!ace->baseaddr)
998*4882a593Smuzhiyun 		goto err_ioremap;
999*4882a593Smuzhiyun 
1000*4882a593Smuzhiyun 	/*
1001*4882a593Smuzhiyun 	 * Initialize the state machine tasklet and stall timer
1002*4882a593Smuzhiyun 	 */
1003*4882a593Smuzhiyun 	tasklet_init(&ace->fsm_tasklet, ace_fsm_tasklet, (unsigned long)ace);
1004*4882a593Smuzhiyun 	timer_setup(&ace->stall_timer, ace_stall_timer, 0);
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	/*
1007*4882a593Smuzhiyun 	 * Initialize the request queue
1008*4882a593Smuzhiyun 	 */
1009*4882a593Smuzhiyun 	ace->queue = blk_mq_init_sq_queue(&ace->tag_set, &ace_mq_ops, 2,
1010*4882a593Smuzhiyun 						BLK_MQ_F_SHOULD_MERGE);
1011*4882a593Smuzhiyun 	if (IS_ERR(ace->queue)) {
1012*4882a593Smuzhiyun 		rc = PTR_ERR(ace->queue);
1013*4882a593Smuzhiyun 		ace->queue = NULL;
1014*4882a593Smuzhiyun 		goto err_blk_initq;
1015*4882a593Smuzhiyun 	}
1016*4882a593Smuzhiyun 	ace->queue->queuedata = ace;
1017*4882a593Smuzhiyun 
1018*4882a593Smuzhiyun 	blk_queue_logical_block_size(ace->queue, 512);
1019*4882a593Smuzhiyun 	blk_queue_bounce_limit(ace->queue, BLK_BOUNCE_HIGH);
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 	/*
1022*4882a593Smuzhiyun 	 * Allocate and initialize GD structure
1023*4882a593Smuzhiyun 	 */
1024*4882a593Smuzhiyun 	ace->gd = alloc_disk(ACE_NUM_MINORS);
1025*4882a593Smuzhiyun 	if (!ace->gd)
1026*4882a593Smuzhiyun 		goto err_alloc_disk;
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun 	ace->gd->major = ace_major;
1029*4882a593Smuzhiyun 	ace->gd->first_minor = ace->id * ACE_NUM_MINORS;
1030*4882a593Smuzhiyun 	ace->gd->fops = &ace_fops;
1031*4882a593Smuzhiyun 	ace->gd->events = DISK_EVENT_MEDIA_CHANGE;
1032*4882a593Smuzhiyun 	ace->gd->queue = ace->queue;
1033*4882a593Smuzhiyun 	ace->gd->private_data = ace;
1034*4882a593Smuzhiyun 	snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 	/* set bus width */
1037*4882a593Smuzhiyun 	if (ace->bus_width == ACE_BUS_WIDTH_16) {
1038*4882a593Smuzhiyun 		/* 0x0101 should work regardless of endianess */
1039*4882a593Smuzhiyun 		ace_out_le16(ace, ACE_BUSMODE, 0x0101);
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 		/* read it back to determine endianess */
1042*4882a593Smuzhiyun 		if (ace_in_le16(ace, ACE_BUSMODE) == 0x0001)
1043*4882a593Smuzhiyun 			ace->reg_ops = &ace_reg_le16_ops;
1044*4882a593Smuzhiyun 		else
1045*4882a593Smuzhiyun 			ace->reg_ops = &ace_reg_be16_ops;
1046*4882a593Smuzhiyun 	} else {
1047*4882a593Smuzhiyun 		ace_out_8(ace, ACE_BUSMODE, 0x00);
1048*4882a593Smuzhiyun 		ace->reg_ops = &ace_reg_8_ops;
1049*4882a593Smuzhiyun 	}
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun 	/* Make sure version register is sane */
1052*4882a593Smuzhiyun 	version = ace_in(ace, ACE_VERSION);
1053*4882a593Smuzhiyun 	if ((version == 0) || (version == 0xFFFF))
1054*4882a593Smuzhiyun 		goto err_read;
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun 	/* Put sysace in a sane state by clearing most control reg bits */
1057*4882a593Smuzhiyun 	ace_out(ace, ACE_CTRL, ACE_CTRL_FORCECFGMODE |
1058*4882a593Smuzhiyun 		ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ);
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun 	/* Now we can hook up the irq handler */
1061*4882a593Smuzhiyun 	if (ace->irq > 0) {
1062*4882a593Smuzhiyun 		rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
1063*4882a593Smuzhiyun 		if (rc) {
1064*4882a593Smuzhiyun 			/* Failure - fall back to polled mode */
1065*4882a593Smuzhiyun 			dev_err(ace->dev, "request_irq failed\n");
1066*4882a593Smuzhiyun 			ace->irq = rc;
1067*4882a593Smuzhiyun 		}
1068*4882a593Smuzhiyun 	}
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 	/* Enable interrupts */
1071*4882a593Smuzhiyun 	val = ace_in(ace, ACE_CTRL);
1072*4882a593Smuzhiyun 	val |= ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ;
1073*4882a593Smuzhiyun 	ace_out(ace, ACE_CTRL, val);
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 	/* Print the identification */
1076*4882a593Smuzhiyun 	dev_info(ace->dev, "Xilinx SystemACE revision %i.%i.%i\n",
1077*4882a593Smuzhiyun 		 (version >> 12) & 0xf, (version >> 8) & 0x0f, version & 0xff);
1078*4882a593Smuzhiyun 	dev_dbg(ace->dev, "physaddr 0x%llx, mapped to 0x%p, irq=%i\n",
1079*4882a593Smuzhiyun 		(unsigned long long) ace->physaddr, ace->baseaddr, ace->irq);
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 	ace->media_change = 1;
1082*4882a593Smuzhiyun 	ace_media_changed(ace);
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun 	/* Make the sysace device 'live' */
1085*4882a593Smuzhiyun 	add_disk(ace->gd);
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 	return 0;
1088*4882a593Smuzhiyun 
1089*4882a593Smuzhiyun err_read:
1090*4882a593Smuzhiyun 	/* prevent double queue cleanup */
1091*4882a593Smuzhiyun 	ace->gd->queue = NULL;
1092*4882a593Smuzhiyun 	put_disk(ace->gd);
1093*4882a593Smuzhiyun err_alloc_disk:
1094*4882a593Smuzhiyun 	blk_cleanup_queue(ace->queue);
1095*4882a593Smuzhiyun 	blk_mq_free_tag_set(&ace->tag_set);
1096*4882a593Smuzhiyun err_blk_initq:
1097*4882a593Smuzhiyun 	iounmap(ace->baseaddr);
1098*4882a593Smuzhiyun err_ioremap:
1099*4882a593Smuzhiyun 	dev_info(ace->dev, "xsysace: error initializing device at 0x%llx\n",
1100*4882a593Smuzhiyun 		 (unsigned long long) ace->physaddr);
1101*4882a593Smuzhiyun 	return -ENOMEM;
1102*4882a593Smuzhiyun }
1103*4882a593Smuzhiyun 
ace_teardown(struct ace_device * ace)1104*4882a593Smuzhiyun static void ace_teardown(struct ace_device *ace)
1105*4882a593Smuzhiyun {
1106*4882a593Smuzhiyun 	if (ace->gd) {
1107*4882a593Smuzhiyun 		del_gendisk(ace->gd);
1108*4882a593Smuzhiyun 		put_disk(ace->gd);
1109*4882a593Smuzhiyun 	}
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 	if (ace->queue) {
1112*4882a593Smuzhiyun 		blk_cleanup_queue(ace->queue);
1113*4882a593Smuzhiyun 		blk_mq_free_tag_set(&ace->tag_set);
1114*4882a593Smuzhiyun 	}
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 	tasklet_kill(&ace->fsm_tasklet);
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun 	if (ace->irq > 0)
1119*4882a593Smuzhiyun 		free_irq(ace->irq, ace);
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	iounmap(ace->baseaddr);
1122*4882a593Smuzhiyun }
1123*4882a593Smuzhiyun 
ace_alloc(struct device * dev,int id,resource_size_t physaddr,int irq,int bus_width)1124*4882a593Smuzhiyun static int ace_alloc(struct device *dev, int id, resource_size_t physaddr,
1125*4882a593Smuzhiyun 		     int irq, int bus_width)
1126*4882a593Smuzhiyun {
1127*4882a593Smuzhiyun 	struct ace_device *ace;
1128*4882a593Smuzhiyun 	int rc;
1129*4882a593Smuzhiyun 	dev_dbg(dev, "ace_alloc(%p)\n", dev);
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 	/* Allocate and initialize the ace device structure */
1132*4882a593Smuzhiyun 	ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL);
1133*4882a593Smuzhiyun 	if (!ace) {
1134*4882a593Smuzhiyun 		rc = -ENOMEM;
1135*4882a593Smuzhiyun 		goto err_alloc;
1136*4882a593Smuzhiyun 	}
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun 	ace->dev = dev;
1139*4882a593Smuzhiyun 	ace->id = id;
1140*4882a593Smuzhiyun 	ace->physaddr = physaddr;
1141*4882a593Smuzhiyun 	ace->irq = irq;
1142*4882a593Smuzhiyun 	ace->bus_width = bus_width;
1143*4882a593Smuzhiyun 
1144*4882a593Smuzhiyun 	/* Call the setup code */
1145*4882a593Smuzhiyun 	rc = ace_setup(ace);
1146*4882a593Smuzhiyun 	if (rc)
1147*4882a593Smuzhiyun 		goto err_setup;
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun 	dev_set_drvdata(dev, ace);
1150*4882a593Smuzhiyun 	return 0;
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun err_setup:
1153*4882a593Smuzhiyun 	dev_set_drvdata(dev, NULL);
1154*4882a593Smuzhiyun 	kfree(ace);
1155*4882a593Smuzhiyun err_alloc:
1156*4882a593Smuzhiyun 	dev_err(dev, "could not initialize device, err=%i\n", rc);
1157*4882a593Smuzhiyun 	return rc;
1158*4882a593Smuzhiyun }
1159*4882a593Smuzhiyun 
ace_free(struct device * dev)1160*4882a593Smuzhiyun static void ace_free(struct device *dev)
1161*4882a593Smuzhiyun {
1162*4882a593Smuzhiyun 	struct ace_device *ace = dev_get_drvdata(dev);
1163*4882a593Smuzhiyun 	dev_dbg(dev, "ace_free(%p)\n", dev);
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 	if (ace) {
1166*4882a593Smuzhiyun 		ace_teardown(ace);
1167*4882a593Smuzhiyun 		dev_set_drvdata(dev, NULL);
1168*4882a593Smuzhiyun 		kfree(ace);
1169*4882a593Smuzhiyun 	}
1170*4882a593Smuzhiyun }
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun /* ---------------------------------------------------------------------
1173*4882a593Smuzhiyun  * Platform Bus Support
1174*4882a593Smuzhiyun  */
1175*4882a593Smuzhiyun 
ace_probe(struct platform_device * dev)1176*4882a593Smuzhiyun static int ace_probe(struct platform_device *dev)
1177*4882a593Smuzhiyun {
1178*4882a593Smuzhiyun 	int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */
1179*4882a593Smuzhiyun 	resource_size_t physaddr;
1180*4882a593Smuzhiyun 	struct resource *res;
1181*4882a593Smuzhiyun 	u32 id = dev->id;
1182*4882a593Smuzhiyun 	int irq;
1183*4882a593Smuzhiyun 	int i;
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	dev_dbg(&dev->dev, "ace_probe(%p)\n", dev);
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun 	/* device id and bus width */
1188*4882a593Smuzhiyun 	if (of_property_read_u32(dev->dev.of_node, "port-number", &id))
1189*4882a593Smuzhiyun 		id = 0;
1190*4882a593Smuzhiyun 	if (of_find_property(dev->dev.of_node, "8-bit", NULL))
1191*4882a593Smuzhiyun 		bus_width = ACE_BUS_WIDTH_8;
1192*4882a593Smuzhiyun 
1193*4882a593Smuzhiyun 	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
1194*4882a593Smuzhiyun 	if (!res)
1195*4882a593Smuzhiyun 		return -EINVAL;
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 	physaddr = res->start;
1198*4882a593Smuzhiyun 	if (!physaddr)
1199*4882a593Smuzhiyun 		return -ENODEV;
1200*4882a593Smuzhiyun 
1201*4882a593Smuzhiyun 	irq = platform_get_irq_optional(dev, 0);
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 	/* Call the bus-independent setup code */
1204*4882a593Smuzhiyun 	return ace_alloc(&dev->dev, id, physaddr, irq, bus_width);
1205*4882a593Smuzhiyun }
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun /*
1208*4882a593Smuzhiyun  * Platform bus remove() method
1209*4882a593Smuzhiyun  */
ace_remove(struct platform_device * dev)1210*4882a593Smuzhiyun static int ace_remove(struct platform_device *dev)
1211*4882a593Smuzhiyun {
1212*4882a593Smuzhiyun 	ace_free(&dev->dev);
1213*4882a593Smuzhiyun 	return 0;
1214*4882a593Smuzhiyun }
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun #if defined(CONFIG_OF)
1217*4882a593Smuzhiyun /* Match table for of_platform binding */
1218*4882a593Smuzhiyun static const struct of_device_id ace_of_match[] = {
1219*4882a593Smuzhiyun 	{ .compatible = "xlnx,opb-sysace-1.00.b", },
1220*4882a593Smuzhiyun 	{ .compatible = "xlnx,opb-sysace-1.00.c", },
1221*4882a593Smuzhiyun 	{ .compatible = "xlnx,xps-sysace-1.00.a", },
1222*4882a593Smuzhiyun 	{ .compatible = "xlnx,sysace", },
1223*4882a593Smuzhiyun 	{},
1224*4882a593Smuzhiyun };
1225*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, ace_of_match);
1226*4882a593Smuzhiyun #else /* CONFIG_OF */
1227*4882a593Smuzhiyun #define ace_of_match NULL
1228*4882a593Smuzhiyun #endif /* CONFIG_OF */
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun static struct platform_driver ace_platform_driver = {
1231*4882a593Smuzhiyun 	.probe = ace_probe,
1232*4882a593Smuzhiyun 	.remove = ace_remove,
1233*4882a593Smuzhiyun 	.driver = {
1234*4882a593Smuzhiyun 		.name = "xsysace",
1235*4882a593Smuzhiyun 		.of_match_table = ace_of_match,
1236*4882a593Smuzhiyun 	},
1237*4882a593Smuzhiyun };
1238*4882a593Smuzhiyun 
1239*4882a593Smuzhiyun /* ---------------------------------------------------------------------
1240*4882a593Smuzhiyun  * Module init/exit routines
1241*4882a593Smuzhiyun  */
ace_init(void)1242*4882a593Smuzhiyun static int __init ace_init(void)
1243*4882a593Smuzhiyun {
1244*4882a593Smuzhiyun 	int rc;
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun 	ace_major = register_blkdev(ace_major, "xsysace");
1247*4882a593Smuzhiyun 	if (ace_major <= 0) {
1248*4882a593Smuzhiyun 		rc = -ENOMEM;
1249*4882a593Smuzhiyun 		goto err_blk;
1250*4882a593Smuzhiyun 	}
1251*4882a593Smuzhiyun 
1252*4882a593Smuzhiyun 	rc = platform_driver_register(&ace_platform_driver);
1253*4882a593Smuzhiyun 	if (rc)
1254*4882a593Smuzhiyun 		goto err_plat;
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun 	pr_info("Xilinx SystemACE device driver, major=%i\n", ace_major);
1257*4882a593Smuzhiyun 	return 0;
1258*4882a593Smuzhiyun 
1259*4882a593Smuzhiyun err_plat:
1260*4882a593Smuzhiyun 	unregister_blkdev(ace_major, "xsysace");
1261*4882a593Smuzhiyun err_blk:
1262*4882a593Smuzhiyun 	printk(KERN_ERR "xsysace: registration failed; err=%i\n", rc);
1263*4882a593Smuzhiyun 	return rc;
1264*4882a593Smuzhiyun }
1265*4882a593Smuzhiyun module_init(ace_init);
1266*4882a593Smuzhiyun 
ace_exit(void)1267*4882a593Smuzhiyun static void __exit ace_exit(void)
1268*4882a593Smuzhiyun {
1269*4882a593Smuzhiyun 	pr_debug("Unregistering Xilinx SystemACE driver\n");
1270*4882a593Smuzhiyun 	platform_driver_unregister(&ace_platform_driver);
1271*4882a593Smuzhiyun 	unregister_blkdev(ace_major, "xsysace");
1272*4882a593Smuzhiyun }
1273*4882a593Smuzhiyun module_exit(ace_exit);
1274