xref: /OK3568_Linux_fs/kernel/drivers/block/swim3.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Driver for the SWIM3 (Super Woz Integrated Machine 3)
4*4882a593Smuzhiyun  * floppy controller found on Power Macintoshes.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (C) 1996 Paul Mackerras.
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun /*
10*4882a593Smuzhiyun  * TODO:
11*4882a593Smuzhiyun  * handle 2 drives
12*4882a593Smuzhiyun  * handle GCR disks
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #undef DEBUG
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <linux/stddef.h>
18*4882a593Smuzhiyun #include <linux/kernel.h>
19*4882a593Smuzhiyun #include <linux/sched/signal.h>
20*4882a593Smuzhiyun #include <linux/timer.h>
21*4882a593Smuzhiyun #include <linux/delay.h>
22*4882a593Smuzhiyun #include <linux/fd.h>
23*4882a593Smuzhiyun #include <linux/ioctl.h>
24*4882a593Smuzhiyun #include <linux/blk-mq.h>
25*4882a593Smuzhiyun #include <linux/interrupt.h>
26*4882a593Smuzhiyun #include <linux/mutex.h>
27*4882a593Smuzhiyun #include <linux/module.h>
28*4882a593Smuzhiyun #include <linux/spinlock.h>
29*4882a593Smuzhiyun #include <linux/wait.h>
30*4882a593Smuzhiyun #include <asm/io.h>
31*4882a593Smuzhiyun #include <asm/dbdma.h>
32*4882a593Smuzhiyun #include <asm/prom.h>
33*4882a593Smuzhiyun #include <linux/uaccess.h>
34*4882a593Smuzhiyun #include <asm/mediabay.h>
35*4882a593Smuzhiyun #include <asm/machdep.h>
36*4882a593Smuzhiyun #include <asm/pmac_feature.h>
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #define MAX_FLOPPIES	2
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun static DEFINE_MUTEX(swim3_mutex);
41*4882a593Smuzhiyun static struct gendisk *disks[MAX_FLOPPIES];
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun enum swim_state {
44*4882a593Smuzhiyun 	idle,
45*4882a593Smuzhiyun 	locating,
46*4882a593Smuzhiyun 	seeking,
47*4882a593Smuzhiyun 	settling,
48*4882a593Smuzhiyun 	do_transfer,
49*4882a593Smuzhiyun 	jogging,
50*4882a593Smuzhiyun 	available,
51*4882a593Smuzhiyun 	revalidating,
52*4882a593Smuzhiyun 	ejecting
53*4882a593Smuzhiyun };
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun #define REG(x)	unsigned char x; char x ## _pad[15];
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /*
58*4882a593Smuzhiyun  * The names for these registers mostly represent speculation on my part.
59*4882a593Smuzhiyun  * It will be interesting to see how close they are to the names Apple uses.
60*4882a593Smuzhiyun  */
61*4882a593Smuzhiyun struct swim3 {
62*4882a593Smuzhiyun 	REG(data);
63*4882a593Smuzhiyun 	REG(timer);		/* counts down at 1MHz */
64*4882a593Smuzhiyun 	REG(error);
65*4882a593Smuzhiyun 	REG(mode);
66*4882a593Smuzhiyun 	REG(select);		/* controls CA0, CA1, CA2 and LSTRB signals */
67*4882a593Smuzhiyun 	REG(setup);
68*4882a593Smuzhiyun 	REG(control);		/* writing bits clears them */
69*4882a593Smuzhiyun 	REG(status);		/* writing bits sets them in control */
70*4882a593Smuzhiyun 	REG(intr);
71*4882a593Smuzhiyun 	REG(nseek);		/* # tracks to seek */
72*4882a593Smuzhiyun 	REG(ctrack);		/* current track number */
73*4882a593Smuzhiyun 	REG(csect);		/* current sector number */
74*4882a593Smuzhiyun 	REG(gap3);		/* size of gap 3 in track format */
75*4882a593Smuzhiyun 	REG(sector);		/* sector # to read or write */
76*4882a593Smuzhiyun 	REG(nsect);		/* # sectors to read or write */
77*4882a593Smuzhiyun 	REG(intr_enable);
78*4882a593Smuzhiyun };
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun #define control_bic	control
81*4882a593Smuzhiyun #define control_bis	status
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun /* Bits in select register */
84*4882a593Smuzhiyun #define CA_MASK		7
85*4882a593Smuzhiyun #define LSTRB		8
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun /* Bits in control register */
88*4882a593Smuzhiyun #define DO_SEEK		0x80
89*4882a593Smuzhiyun #define FORMAT		0x40
90*4882a593Smuzhiyun #define SELECT		0x20
91*4882a593Smuzhiyun #define WRITE_SECTORS	0x10
92*4882a593Smuzhiyun #define DO_ACTION	0x08
93*4882a593Smuzhiyun #define DRIVE2_ENABLE	0x04
94*4882a593Smuzhiyun #define DRIVE_ENABLE	0x02
95*4882a593Smuzhiyun #define INTR_ENABLE	0x01
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /* Bits in status register */
98*4882a593Smuzhiyun #define FIFO_1BYTE	0x80
99*4882a593Smuzhiyun #define FIFO_2BYTE	0x40
100*4882a593Smuzhiyun #define ERROR		0x20
101*4882a593Smuzhiyun #define DATA		0x08
102*4882a593Smuzhiyun #define RDDATA		0x04
103*4882a593Smuzhiyun #define INTR_PENDING	0x02
104*4882a593Smuzhiyun #define MARK_BYTE	0x01
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun /* Bits in intr and intr_enable registers */
107*4882a593Smuzhiyun #define ERROR_INTR	0x20
108*4882a593Smuzhiyun #define DATA_CHANGED	0x10
109*4882a593Smuzhiyun #define TRANSFER_DONE	0x08
110*4882a593Smuzhiyun #define SEEN_SECTOR	0x04
111*4882a593Smuzhiyun #define SEEK_DONE	0x02
112*4882a593Smuzhiyun #define TIMER_DONE	0x01
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun /* Bits in error register */
115*4882a593Smuzhiyun #define ERR_DATA_CRC	0x80
116*4882a593Smuzhiyun #define ERR_ADDR_CRC	0x40
117*4882a593Smuzhiyun #define ERR_OVERRUN	0x04
118*4882a593Smuzhiyun #define ERR_UNDERRUN	0x01
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun /* Bits in setup register */
121*4882a593Smuzhiyun #define S_SW_RESET	0x80
122*4882a593Smuzhiyun #define S_GCR_WRITE	0x40
123*4882a593Smuzhiyun #define S_IBM_DRIVE	0x20
124*4882a593Smuzhiyun #define S_TEST_MODE	0x10
125*4882a593Smuzhiyun #define S_FCLK_DIV2	0x08
126*4882a593Smuzhiyun #define S_GCR		0x04
127*4882a593Smuzhiyun #define S_COPY_PROT	0x02
128*4882a593Smuzhiyun #define S_INV_WDATA	0x01
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /* Select values for swim3_action */
131*4882a593Smuzhiyun #define SEEK_POSITIVE	0
132*4882a593Smuzhiyun #define SEEK_NEGATIVE	4
133*4882a593Smuzhiyun #define STEP		1
134*4882a593Smuzhiyun #define MOTOR_ON	2
135*4882a593Smuzhiyun #define MOTOR_OFF	6
136*4882a593Smuzhiyun #define INDEX		3
137*4882a593Smuzhiyun #define EJECT		7
138*4882a593Smuzhiyun #define SETMFM		9
139*4882a593Smuzhiyun #define SETGCR		13
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun /* Select values for swim3_select and swim3_readbit */
142*4882a593Smuzhiyun #define STEP_DIR	0
143*4882a593Smuzhiyun #define STEPPING	1
144*4882a593Smuzhiyun #define MOTOR_ON	2
145*4882a593Smuzhiyun #define RELAX		3	/* also eject in progress */
146*4882a593Smuzhiyun #define READ_DATA_0	4
147*4882a593Smuzhiyun #define ONEMEG_DRIVE	5
148*4882a593Smuzhiyun #define SINGLE_SIDED	6	/* drive or diskette is 4MB type? */
149*4882a593Smuzhiyun #define DRIVE_PRESENT	7
150*4882a593Smuzhiyun #define DISK_IN		8
151*4882a593Smuzhiyun #define WRITE_PROT	9
152*4882a593Smuzhiyun #define TRACK_ZERO	10
153*4882a593Smuzhiyun #define TACHO		11
154*4882a593Smuzhiyun #define READ_DATA_1	12
155*4882a593Smuzhiyun #define GCR_MODE	13
156*4882a593Smuzhiyun #define SEEK_COMPLETE	14
157*4882a593Smuzhiyun #define TWOMEG_MEDIA	15
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun /* Definitions of values used in writing and formatting */
160*4882a593Smuzhiyun #define DATA_ESCAPE	0x99
161*4882a593Smuzhiyun #define GCR_SYNC_EXC	0x3f
162*4882a593Smuzhiyun #define GCR_SYNC_CONV	0x80
163*4882a593Smuzhiyun #define GCR_FIRST_MARK	0xd5
164*4882a593Smuzhiyun #define GCR_SECOND_MARK	0xaa
165*4882a593Smuzhiyun #define GCR_ADDR_MARK	"\xd5\xaa\x00"
166*4882a593Smuzhiyun #define GCR_DATA_MARK	"\xd5\xaa\x0b"
167*4882a593Smuzhiyun #define GCR_SLIP_BYTE	"\x27\xaa"
168*4882a593Smuzhiyun #define GCR_SELF_SYNC	"\x3f\xbf\x1e\x34\x3c\x3f"
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun #define DATA_99		"\x99\x99"
171*4882a593Smuzhiyun #define MFM_ADDR_MARK	"\x99\xa1\x99\xa1\x99\xa1\x99\xfe"
172*4882a593Smuzhiyun #define MFM_INDEX_MARK	"\x99\xc2\x99\xc2\x99\xc2\x99\xfc"
173*4882a593Smuzhiyun #define MFM_GAP_LEN	12
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun struct floppy_state {
176*4882a593Smuzhiyun 	enum swim_state	state;
177*4882a593Smuzhiyun 	struct swim3 __iomem *swim3;	/* hardware registers */
178*4882a593Smuzhiyun 	struct dbdma_regs __iomem *dma;	/* DMA controller registers */
179*4882a593Smuzhiyun 	int	swim3_intr;	/* interrupt number for SWIM3 */
180*4882a593Smuzhiyun 	int	dma_intr;	/* interrupt number for DMA channel */
181*4882a593Smuzhiyun 	int	cur_cyl;	/* cylinder head is on, or -1 */
182*4882a593Smuzhiyun 	int	cur_sector;	/* last sector we saw go past */
183*4882a593Smuzhiyun 	int	req_cyl;	/* the cylinder for the current r/w request */
184*4882a593Smuzhiyun 	int	head;		/* head number ditto */
185*4882a593Smuzhiyun 	int	req_sector;	/* sector number ditto */
186*4882a593Smuzhiyun 	int	scount;		/* # sectors we're transferring at present */
187*4882a593Smuzhiyun 	int	retries;
188*4882a593Smuzhiyun 	int	settle_time;
189*4882a593Smuzhiyun 	int	secpercyl;	/* disk geometry information */
190*4882a593Smuzhiyun 	int	secpertrack;
191*4882a593Smuzhiyun 	int	total_secs;
192*4882a593Smuzhiyun 	int	write_prot;	/* 1 if write-protected, 0 if not, -1 dunno */
193*4882a593Smuzhiyun 	struct dbdma_cmd *dma_cmd;
194*4882a593Smuzhiyun 	int	ref_count;
195*4882a593Smuzhiyun 	int	expect_cyl;
196*4882a593Smuzhiyun 	struct timer_list timeout;
197*4882a593Smuzhiyun 	int	timeout_pending;
198*4882a593Smuzhiyun 	int	ejected;
199*4882a593Smuzhiyun 	wait_queue_head_t wait;
200*4882a593Smuzhiyun 	int	wanted;
201*4882a593Smuzhiyun 	struct macio_dev *mdev;
202*4882a593Smuzhiyun 	char	dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
203*4882a593Smuzhiyun 	int	index;
204*4882a593Smuzhiyun 	struct request *cur_req;
205*4882a593Smuzhiyun 	struct blk_mq_tag_set tag_set;
206*4882a593Smuzhiyun };
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun #define swim3_err(fmt, arg...)	dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
209*4882a593Smuzhiyun #define swim3_warn(fmt, arg...)	dev_warn(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
210*4882a593Smuzhiyun #define swim3_info(fmt, arg...)	dev_info(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun #ifdef DEBUG
213*4882a593Smuzhiyun #define swim3_dbg(fmt, arg...)	dev_dbg(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
214*4882a593Smuzhiyun #else
215*4882a593Smuzhiyun #define swim3_dbg(fmt, arg...)	do { } while(0)
216*4882a593Smuzhiyun #endif
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun static struct floppy_state floppy_states[MAX_FLOPPIES];
219*4882a593Smuzhiyun static int floppy_count = 0;
220*4882a593Smuzhiyun static DEFINE_SPINLOCK(swim3_lock);
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun static unsigned short write_preamble[] = {
223*4882a593Smuzhiyun 	0x4e4e, 0x4e4e, 0x4e4e, 0x4e4e, 0x4e4e,	/* gap field */
224*4882a593Smuzhiyun 	0, 0, 0, 0, 0, 0,			/* sync field */
225*4882a593Smuzhiyun 	0x99a1, 0x99a1, 0x99a1, 0x99fb,		/* data address mark */
226*4882a593Smuzhiyun 	0x990f					/* no escape for 512 bytes */
227*4882a593Smuzhiyun };
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun static unsigned short write_postamble[] = {
230*4882a593Smuzhiyun 	0x9904,					/* insert CRC */
231*4882a593Smuzhiyun 	0x4e4e, 0x4e4e,
232*4882a593Smuzhiyun 	0x9908,					/* stop writing */
233*4882a593Smuzhiyun 	0, 0, 0, 0, 0, 0
234*4882a593Smuzhiyun };
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun static void seek_track(struct floppy_state *fs, int n);
237*4882a593Smuzhiyun static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
238*4882a593Smuzhiyun static void act(struct floppy_state *fs);
239*4882a593Smuzhiyun static void scan_timeout(struct timer_list *t);
240*4882a593Smuzhiyun static void seek_timeout(struct timer_list *t);
241*4882a593Smuzhiyun static void settle_timeout(struct timer_list *t);
242*4882a593Smuzhiyun static void xfer_timeout(struct timer_list *t);
243*4882a593Smuzhiyun static irqreturn_t swim3_interrupt(int irq, void *dev_id);
244*4882a593Smuzhiyun /*static void fd_dma_interrupt(int irq, void *dev_id);*/
245*4882a593Smuzhiyun static int grab_drive(struct floppy_state *fs, enum swim_state state,
246*4882a593Smuzhiyun 		      int interruptible);
247*4882a593Smuzhiyun static void release_drive(struct floppy_state *fs);
248*4882a593Smuzhiyun static int fd_eject(struct floppy_state *fs);
249*4882a593Smuzhiyun static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
250*4882a593Smuzhiyun 			unsigned int cmd, unsigned long param);
251*4882a593Smuzhiyun static int floppy_open(struct block_device *bdev, fmode_t mode);
252*4882a593Smuzhiyun static void floppy_release(struct gendisk *disk, fmode_t mode);
253*4882a593Smuzhiyun static unsigned int floppy_check_events(struct gendisk *disk,
254*4882a593Smuzhiyun 					unsigned int clearing);
255*4882a593Smuzhiyun static int floppy_revalidate(struct gendisk *disk);
256*4882a593Smuzhiyun 
swim3_end_request(struct floppy_state * fs,blk_status_t err,unsigned int nr_bytes)257*4882a593Smuzhiyun static bool swim3_end_request(struct floppy_state *fs, blk_status_t err, unsigned int nr_bytes)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun 	struct request *req = fs->cur_req;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	swim3_dbg("  end request, err=%d nr_bytes=%d, cur_req=%p\n",
262*4882a593Smuzhiyun 		  err, nr_bytes, req);
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	if (err)
265*4882a593Smuzhiyun 		nr_bytes = blk_rq_cur_bytes(req);
266*4882a593Smuzhiyun 	if (blk_update_request(req, err, nr_bytes))
267*4882a593Smuzhiyun 		return true;
268*4882a593Smuzhiyun 	__blk_mq_end_request(req, err);
269*4882a593Smuzhiyun 	fs->cur_req = NULL;
270*4882a593Smuzhiyun 	return false;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun 
swim3_select(struct floppy_state * fs,int sel)273*4882a593Smuzhiyun static void swim3_select(struct floppy_state *fs, int sel)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	struct swim3 __iomem *sw = fs->swim3;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	out_8(&sw->select, RELAX);
278*4882a593Smuzhiyun 	if (sel & 8)
279*4882a593Smuzhiyun 		out_8(&sw->control_bis, SELECT);
280*4882a593Smuzhiyun 	else
281*4882a593Smuzhiyun 		out_8(&sw->control_bic, SELECT);
282*4882a593Smuzhiyun 	out_8(&sw->select, sel & CA_MASK);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
swim3_action(struct floppy_state * fs,int action)285*4882a593Smuzhiyun static void swim3_action(struct floppy_state *fs, int action)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	struct swim3 __iomem *sw = fs->swim3;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	swim3_select(fs, action);
290*4882a593Smuzhiyun 	udelay(1);
291*4882a593Smuzhiyun 	out_8(&sw->select, sw->select | LSTRB);
292*4882a593Smuzhiyun 	udelay(2);
293*4882a593Smuzhiyun 	out_8(&sw->select, sw->select & ~LSTRB);
294*4882a593Smuzhiyun 	udelay(1);
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun 
swim3_readbit(struct floppy_state * fs,int bit)297*4882a593Smuzhiyun static int swim3_readbit(struct floppy_state *fs, int bit)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun 	struct swim3 __iomem *sw = fs->swim3;
300*4882a593Smuzhiyun 	int stat;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	swim3_select(fs, bit);
303*4882a593Smuzhiyun 	udelay(1);
304*4882a593Smuzhiyun 	stat = in_8(&sw->status);
305*4882a593Smuzhiyun 	return (stat & DATA) == 0;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun 
swim3_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)308*4882a593Smuzhiyun static blk_status_t swim3_queue_rq(struct blk_mq_hw_ctx *hctx,
309*4882a593Smuzhiyun 				   const struct blk_mq_queue_data *bd)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun 	struct floppy_state *fs = hctx->queue->queuedata;
312*4882a593Smuzhiyun 	struct request *req = bd->rq;
313*4882a593Smuzhiyun 	unsigned long x;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	spin_lock_irq(&swim3_lock);
316*4882a593Smuzhiyun 	if (fs->cur_req || fs->state != idle) {
317*4882a593Smuzhiyun 		spin_unlock_irq(&swim3_lock);
318*4882a593Smuzhiyun 		return BLK_STS_DEV_RESOURCE;
319*4882a593Smuzhiyun 	}
320*4882a593Smuzhiyun 	blk_mq_start_request(req);
321*4882a593Smuzhiyun 	fs->cur_req = req;
322*4882a593Smuzhiyun 	if (fs->mdev->media_bay &&
323*4882a593Smuzhiyun 	    check_media_bay(fs->mdev->media_bay) != MB_FD) {
324*4882a593Smuzhiyun 		swim3_dbg("%s", "  media bay absent, dropping req\n");
325*4882a593Smuzhiyun 		swim3_end_request(fs, BLK_STS_IOERR, 0);
326*4882a593Smuzhiyun 		goto out;
327*4882a593Smuzhiyun 	}
328*4882a593Smuzhiyun 	if (fs->ejected) {
329*4882a593Smuzhiyun 		swim3_dbg("%s", "  disk ejected\n");
330*4882a593Smuzhiyun 		swim3_end_request(fs, BLK_STS_IOERR, 0);
331*4882a593Smuzhiyun 		goto out;
332*4882a593Smuzhiyun 	}
333*4882a593Smuzhiyun 	if (rq_data_dir(req) == WRITE) {
334*4882a593Smuzhiyun 		if (fs->write_prot < 0)
335*4882a593Smuzhiyun 			fs->write_prot = swim3_readbit(fs, WRITE_PROT);
336*4882a593Smuzhiyun 		if (fs->write_prot) {
337*4882a593Smuzhiyun 			swim3_dbg("%s", "  try to write, disk write protected\n");
338*4882a593Smuzhiyun 			swim3_end_request(fs, BLK_STS_IOERR, 0);
339*4882a593Smuzhiyun 			goto out;
340*4882a593Smuzhiyun 		}
341*4882a593Smuzhiyun 	}
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	/*
344*4882a593Smuzhiyun 	 * Do not remove the cast. blk_rq_pos(req) is now a sector_t and can be
345*4882a593Smuzhiyun 	 * 64 bits, but it will never go past 32 bits for this driver anyway, so
346*4882a593Smuzhiyun 	 * we can safely cast it down and not have to do a 64/32 division
347*4882a593Smuzhiyun 	 */
348*4882a593Smuzhiyun 	fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
349*4882a593Smuzhiyun 	x = ((long)blk_rq_pos(req)) % fs->secpercyl;
350*4882a593Smuzhiyun 	fs->head = x / fs->secpertrack;
351*4882a593Smuzhiyun 	fs->req_sector = x % fs->secpertrack + 1;
352*4882a593Smuzhiyun 	fs->state = do_transfer;
353*4882a593Smuzhiyun 	fs->retries = 0;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	act(fs);
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun out:
358*4882a593Smuzhiyun 	spin_unlock_irq(&swim3_lock);
359*4882a593Smuzhiyun 	return BLK_STS_OK;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun 
set_timeout(struct floppy_state * fs,int nticks,void (* proc)(struct timer_list * t))362*4882a593Smuzhiyun static void set_timeout(struct floppy_state *fs, int nticks,
363*4882a593Smuzhiyun 			void (*proc)(struct timer_list *t))
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun 	if (fs->timeout_pending)
366*4882a593Smuzhiyun 		del_timer(&fs->timeout);
367*4882a593Smuzhiyun 	fs->timeout.expires = jiffies + nticks;
368*4882a593Smuzhiyun 	fs->timeout.function = proc;
369*4882a593Smuzhiyun 	add_timer(&fs->timeout);
370*4882a593Smuzhiyun 	fs->timeout_pending = 1;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun 
scan_track(struct floppy_state * fs)373*4882a593Smuzhiyun static inline void scan_track(struct floppy_state *fs)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun 	struct swim3 __iomem *sw = fs->swim3;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	swim3_select(fs, READ_DATA_0);
378*4882a593Smuzhiyun 	in_8(&sw->intr);		/* clear SEEN_SECTOR bit */
379*4882a593Smuzhiyun 	in_8(&sw->error);
380*4882a593Smuzhiyun 	out_8(&sw->intr_enable, SEEN_SECTOR);
381*4882a593Smuzhiyun 	out_8(&sw->control_bis, DO_ACTION);
382*4882a593Smuzhiyun 	/* enable intr when track found */
383*4882a593Smuzhiyun 	set_timeout(fs, HZ, scan_timeout);	/* enable timeout */
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun 
seek_track(struct floppy_state * fs,int n)386*4882a593Smuzhiyun static inline void seek_track(struct floppy_state *fs, int n)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun 	struct swim3 __iomem *sw = fs->swim3;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	if (n >= 0) {
391*4882a593Smuzhiyun 		swim3_action(fs, SEEK_POSITIVE);
392*4882a593Smuzhiyun 		sw->nseek = n;
393*4882a593Smuzhiyun 	} else {
394*4882a593Smuzhiyun 		swim3_action(fs, SEEK_NEGATIVE);
395*4882a593Smuzhiyun 		sw->nseek = -n;
396*4882a593Smuzhiyun 	}
397*4882a593Smuzhiyun 	fs->expect_cyl = (fs->cur_cyl >= 0)? fs->cur_cyl + n: -1;
398*4882a593Smuzhiyun 	swim3_select(fs, STEP);
399*4882a593Smuzhiyun 	in_8(&sw->error);
400*4882a593Smuzhiyun 	/* enable intr when seek finished */
401*4882a593Smuzhiyun 	out_8(&sw->intr_enable, SEEK_DONE);
402*4882a593Smuzhiyun 	out_8(&sw->control_bis, DO_SEEK);
403*4882a593Smuzhiyun 	set_timeout(fs, 3*HZ, seek_timeout);	/* enable timeout */
404*4882a593Smuzhiyun 	fs->settle_time = 0;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun 
init_dma(struct dbdma_cmd * cp,int cmd,void * buf,int count)407*4882a593Smuzhiyun static inline void init_dma(struct dbdma_cmd *cp, int cmd,
408*4882a593Smuzhiyun 			    void *buf, int count)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun 	cp->req_count = cpu_to_le16(count);
411*4882a593Smuzhiyun 	cp->command = cpu_to_le16(cmd);
412*4882a593Smuzhiyun 	cp->phy_addr = cpu_to_le32(virt_to_bus(buf));
413*4882a593Smuzhiyun 	cp->xfer_status = 0;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun 
setup_transfer(struct floppy_state * fs)416*4882a593Smuzhiyun static inline void setup_transfer(struct floppy_state *fs)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun 	int n;
419*4882a593Smuzhiyun 	struct swim3 __iomem *sw = fs->swim3;
420*4882a593Smuzhiyun 	struct dbdma_cmd *cp = fs->dma_cmd;
421*4882a593Smuzhiyun 	struct dbdma_regs __iomem *dr = fs->dma;
422*4882a593Smuzhiyun 	struct request *req = fs->cur_req;
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	if (blk_rq_cur_sectors(req) <= 0) {
425*4882a593Smuzhiyun 		swim3_warn("%s", "Transfer 0 sectors ?\n");
426*4882a593Smuzhiyun 		return;
427*4882a593Smuzhiyun 	}
428*4882a593Smuzhiyun 	if (rq_data_dir(req) == WRITE)
429*4882a593Smuzhiyun 		n = 1;
430*4882a593Smuzhiyun 	else {
431*4882a593Smuzhiyun 		n = fs->secpertrack - fs->req_sector + 1;
432*4882a593Smuzhiyun 		if (n > blk_rq_cur_sectors(req))
433*4882a593Smuzhiyun 			n = blk_rq_cur_sectors(req);
434*4882a593Smuzhiyun 	}
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	swim3_dbg("  setup xfer at sect %d (of %d) head %d for %d\n",
437*4882a593Smuzhiyun 		  fs->req_sector, fs->secpertrack, fs->head, n);
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	fs->scount = n;
440*4882a593Smuzhiyun 	swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
441*4882a593Smuzhiyun 	out_8(&sw->sector, fs->req_sector);
442*4882a593Smuzhiyun 	out_8(&sw->nsect, n);
443*4882a593Smuzhiyun 	out_8(&sw->gap3, 0);
444*4882a593Smuzhiyun 	out_le32(&dr->cmdptr, virt_to_bus(cp));
445*4882a593Smuzhiyun 	if (rq_data_dir(req) == WRITE) {
446*4882a593Smuzhiyun 		/* Set up 3 dma commands: write preamble, data, postamble */
447*4882a593Smuzhiyun 		init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
448*4882a593Smuzhiyun 		++cp;
449*4882a593Smuzhiyun 		init_dma(cp, OUTPUT_MORE, bio_data(req->bio), 512);
450*4882a593Smuzhiyun 		++cp;
451*4882a593Smuzhiyun 		init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
452*4882a593Smuzhiyun 	} else {
453*4882a593Smuzhiyun 		init_dma(cp, INPUT_LAST, bio_data(req->bio), n * 512);
454*4882a593Smuzhiyun 	}
455*4882a593Smuzhiyun 	++cp;
456*4882a593Smuzhiyun 	out_le16(&cp->command, DBDMA_STOP);
457*4882a593Smuzhiyun 	out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
458*4882a593Smuzhiyun 	in_8(&sw->error);
459*4882a593Smuzhiyun 	out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
460*4882a593Smuzhiyun 	if (rq_data_dir(req) == WRITE)
461*4882a593Smuzhiyun 		out_8(&sw->control_bis, WRITE_SECTORS);
462*4882a593Smuzhiyun 	in_8(&sw->intr);
463*4882a593Smuzhiyun 	out_le32(&dr->control, (RUN << 16) | RUN);
464*4882a593Smuzhiyun 	/* enable intr when transfer complete */
465*4882a593Smuzhiyun 	out_8(&sw->intr_enable, TRANSFER_DONE);
466*4882a593Smuzhiyun 	out_8(&sw->control_bis, DO_ACTION);
467*4882a593Smuzhiyun 	set_timeout(fs, 2*HZ, xfer_timeout);	/* enable timeout */
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun 
act(struct floppy_state * fs)470*4882a593Smuzhiyun static void act(struct floppy_state *fs)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun 	for (;;) {
473*4882a593Smuzhiyun 		swim3_dbg("  act loop, state=%d, req_cyl=%d, cur_cyl=%d\n",
474*4882a593Smuzhiyun 			  fs->state, fs->req_cyl, fs->cur_cyl);
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 		switch (fs->state) {
477*4882a593Smuzhiyun 		case idle:
478*4882a593Smuzhiyun 			return;		/* XXX shouldn't get here */
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 		case locating:
481*4882a593Smuzhiyun 			if (swim3_readbit(fs, TRACK_ZERO)) {
482*4882a593Smuzhiyun 				swim3_dbg("%s", "    locate track 0\n");
483*4882a593Smuzhiyun 				fs->cur_cyl = 0;
484*4882a593Smuzhiyun 				if (fs->req_cyl == 0)
485*4882a593Smuzhiyun 					fs->state = do_transfer;
486*4882a593Smuzhiyun 				else
487*4882a593Smuzhiyun 					fs->state = seeking;
488*4882a593Smuzhiyun 				break;
489*4882a593Smuzhiyun 			}
490*4882a593Smuzhiyun 			scan_track(fs);
491*4882a593Smuzhiyun 			return;
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 		case seeking:
494*4882a593Smuzhiyun 			if (fs->cur_cyl < 0) {
495*4882a593Smuzhiyun 				fs->expect_cyl = -1;
496*4882a593Smuzhiyun 				fs->state = locating;
497*4882a593Smuzhiyun 				break;
498*4882a593Smuzhiyun 			}
499*4882a593Smuzhiyun 			if (fs->req_cyl == fs->cur_cyl) {
500*4882a593Smuzhiyun 				swim3_warn("%s", "Whoops, seeking 0\n");
501*4882a593Smuzhiyun 				fs->state = do_transfer;
502*4882a593Smuzhiyun 				break;
503*4882a593Smuzhiyun 			}
504*4882a593Smuzhiyun 			seek_track(fs, fs->req_cyl - fs->cur_cyl);
505*4882a593Smuzhiyun 			return;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 		case settling:
508*4882a593Smuzhiyun 			/* check for SEEK_COMPLETE after 30ms */
509*4882a593Smuzhiyun 			fs->settle_time = (HZ + 32) / 33;
510*4882a593Smuzhiyun 			set_timeout(fs, fs->settle_time, settle_timeout);
511*4882a593Smuzhiyun 			return;
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 		case do_transfer:
514*4882a593Smuzhiyun 			if (fs->cur_cyl != fs->req_cyl) {
515*4882a593Smuzhiyun 				if (fs->retries > 5) {
516*4882a593Smuzhiyun 					swim3_err("Wrong cylinder in transfer, want: %d got %d\n",
517*4882a593Smuzhiyun 						  fs->req_cyl, fs->cur_cyl);
518*4882a593Smuzhiyun 					swim3_end_request(fs, BLK_STS_IOERR, 0);
519*4882a593Smuzhiyun 					fs->state = idle;
520*4882a593Smuzhiyun 					return;
521*4882a593Smuzhiyun 				}
522*4882a593Smuzhiyun 				fs->state = seeking;
523*4882a593Smuzhiyun 				break;
524*4882a593Smuzhiyun 			}
525*4882a593Smuzhiyun 			setup_transfer(fs);
526*4882a593Smuzhiyun 			return;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 		case jogging:
529*4882a593Smuzhiyun 			seek_track(fs, -5);
530*4882a593Smuzhiyun 			return;
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 		default:
533*4882a593Smuzhiyun 			swim3_err("Unknown state %d\n", fs->state);
534*4882a593Smuzhiyun 			return;
535*4882a593Smuzhiyun 		}
536*4882a593Smuzhiyun 	}
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun 
scan_timeout(struct timer_list * t)539*4882a593Smuzhiyun static void scan_timeout(struct timer_list *t)
540*4882a593Smuzhiyun {
541*4882a593Smuzhiyun 	struct floppy_state *fs = from_timer(fs, t, timeout);
542*4882a593Smuzhiyun 	struct swim3 __iomem *sw = fs->swim3;
543*4882a593Smuzhiyun 	unsigned long flags;
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	swim3_dbg("* scan timeout, state=%d\n", fs->state);
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	spin_lock_irqsave(&swim3_lock, flags);
548*4882a593Smuzhiyun 	fs->timeout_pending = 0;
549*4882a593Smuzhiyun 	out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
550*4882a593Smuzhiyun 	out_8(&sw->select, RELAX);
551*4882a593Smuzhiyun 	out_8(&sw->intr_enable, 0);
552*4882a593Smuzhiyun 	fs->cur_cyl = -1;
553*4882a593Smuzhiyun 	if (fs->retries > 5) {
554*4882a593Smuzhiyun 		swim3_end_request(fs, BLK_STS_IOERR, 0);
555*4882a593Smuzhiyun 		fs->state = idle;
556*4882a593Smuzhiyun 	} else {
557*4882a593Smuzhiyun 		fs->state = jogging;
558*4882a593Smuzhiyun 		act(fs);
559*4882a593Smuzhiyun 	}
560*4882a593Smuzhiyun 	spin_unlock_irqrestore(&swim3_lock, flags);
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun 
seek_timeout(struct timer_list * t)563*4882a593Smuzhiyun static void seek_timeout(struct timer_list *t)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun 	struct floppy_state *fs = from_timer(fs, t, timeout);
566*4882a593Smuzhiyun 	struct swim3 __iomem *sw = fs->swim3;
567*4882a593Smuzhiyun 	unsigned long flags;
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	swim3_dbg("* seek timeout, state=%d\n", fs->state);
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	spin_lock_irqsave(&swim3_lock, flags);
572*4882a593Smuzhiyun 	fs->timeout_pending = 0;
573*4882a593Smuzhiyun 	out_8(&sw->control_bic, DO_SEEK);
574*4882a593Smuzhiyun 	out_8(&sw->select, RELAX);
575*4882a593Smuzhiyun 	out_8(&sw->intr_enable, 0);
576*4882a593Smuzhiyun 	swim3_err("%s", "Seek timeout\n");
577*4882a593Smuzhiyun 	swim3_end_request(fs, BLK_STS_IOERR, 0);
578*4882a593Smuzhiyun 	fs->state = idle;
579*4882a593Smuzhiyun 	spin_unlock_irqrestore(&swim3_lock, flags);
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun 
settle_timeout(struct timer_list * t)582*4882a593Smuzhiyun static void settle_timeout(struct timer_list *t)
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun 	struct floppy_state *fs = from_timer(fs, t, timeout);
585*4882a593Smuzhiyun 	struct swim3 __iomem *sw = fs->swim3;
586*4882a593Smuzhiyun 	unsigned long flags;
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	swim3_dbg("* settle timeout, state=%d\n", fs->state);
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	spin_lock_irqsave(&swim3_lock, flags);
591*4882a593Smuzhiyun 	fs->timeout_pending = 0;
592*4882a593Smuzhiyun 	if (swim3_readbit(fs, SEEK_COMPLETE)) {
593*4882a593Smuzhiyun 		out_8(&sw->select, RELAX);
594*4882a593Smuzhiyun 		fs->state = locating;
595*4882a593Smuzhiyun 		act(fs);
596*4882a593Smuzhiyun 		goto unlock;
597*4882a593Smuzhiyun 	}
598*4882a593Smuzhiyun 	out_8(&sw->select, RELAX);
599*4882a593Smuzhiyun 	if (fs->settle_time < 2*HZ) {
600*4882a593Smuzhiyun 		++fs->settle_time;
601*4882a593Smuzhiyun 		set_timeout(fs, 1, settle_timeout);
602*4882a593Smuzhiyun 		goto unlock;
603*4882a593Smuzhiyun 	}
604*4882a593Smuzhiyun 	swim3_err("%s", "Seek settle timeout\n");
605*4882a593Smuzhiyun 	swim3_end_request(fs, BLK_STS_IOERR, 0);
606*4882a593Smuzhiyun 	fs->state = idle;
607*4882a593Smuzhiyun  unlock:
608*4882a593Smuzhiyun 	spin_unlock_irqrestore(&swim3_lock, flags);
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun 
xfer_timeout(struct timer_list * t)611*4882a593Smuzhiyun static void xfer_timeout(struct timer_list *t)
612*4882a593Smuzhiyun {
613*4882a593Smuzhiyun 	struct floppy_state *fs = from_timer(fs, t, timeout);
614*4882a593Smuzhiyun 	struct swim3 __iomem *sw = fs->swim3;
615*4882a593Smuzhiyun 	struct dbdma_regs __iomem *dr = fs->dma;
616*4882a593Smuzhiyun 	unsigned long flags;
617*4882a593Smuzhiyun 	int n;
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	swim3_dbg("* xfer timeout, state=%d\n", fs->state);
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	spin_lock_irqsave(&swim3_lock, flags);
622*4882a593Smuzhiyun 	fs->timeout_pending = 0;
623*4882a593Smuzhiyun 	out_le32(&dr->control, RUN << 16);
624*4882a593Smuzhiyun 	/* We must wait a bit for dbdma to stop */
625*4882a593Smuzhiyun 	for (n = 0; (in_le32(&dr->status) & ACTIVE) && n < 1000; n++)
626*4882a593Smuzhiyun 		udelay(1);
627*4882a593Smuzhiyun 	out_8(&sw->intr_enable, 0);
628*4882a593Smuzhiyun 	out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
629*4882a593Smuzhiyun 	out_8(&sw->select, RELAX);
630*4882a593Smuzhiyun 	swim3_err("Timeout %sing sector %ld\n",
631*4882a593Smuzhiyun 	       (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"),
632*4882a593Smuzhiyun 	       (long)blk_rq_pos(fs->cur_req));
633*4882a593Smuzhiyun 	swim3_end_request(fs, BLK_STS_IOERR, 0);
634*4882a593Smuzhiyun 	fs->state = idle;
635*4882a593Smuzhiyun 	spin_unlock_irqrestore(&swim3_lock, flags);
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun 
swim3_interrupt(int irq,void * dev_id)638*4882a593Smuzhiyun static irqreturn_t swim3_interrupt(int irq, void *dev_id)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun 	struct floppy_state *fs = (struct floppy_state *) dev_id;
641*4882a593Smuzhiyun 	struct swim3 __iomem *sw = fs->swim3;
642*4882a593Smuzhiyun 	int intr, err, n;
643*4882a593Smuzhiyun 	int stat, resid;
644*4882a593Smuzhiyun 	struct dbdma_regs __iomem *dr;
645*4882a593Smuzhiyun 	struct dbdma_cmd *cp;
646*4882a593Smuzhiyun 	unsigned long flags;
647*4882a593Smuzhiyun 	struct request *req = fs->cur_req;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	swim3_dbg("* interrupt, state=%d\n", fs->state);
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	spin_lock_irqsave(&swim3_lock, flags);
652*4882a593Smuzhiyun 	intr = in_8(&sw->intr);
653*4882a593Smuzhiyun 	err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
654*4882a593Smuzhiyun 	if ((intr & ERROR_INTR) && fs->state != do_transfer)
655*4882a593Smuzhiyun 		swim3_err("Non-transfer error interrupt: state=%d, dir=%x, intr=%x, err=%x\n",
656*4882a593Smuzhiyun 			  fs->state, rq_data_dir(req), intr, err);
657*4882a593Smuzhiyun 	switch (fs->state) {
658*4882a593Smuzhiyun 	case locating:
659*4882a593Smuzhiyun 		if (intr & SEEN_SECTOR) {
660*4882a593Smuzhiyun 			out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
661*4882a593Smuzhiyun 			out_8(&sw->select, RELAX);
662*4882a593Smuzhiyun 			out_8(&sw->intr_enable, 0);
663*4882a593Smuzhiyun 			del_timer(&fs->timeout);
664*4882a593Smuzhiyun 			fs->timeout_pending = 0;
665*4882a593Smuzhiyun 			if (sw->ctrack == 0xff) {
666*4882a593Smuzhiyun 				swim3_err("%s", "Seen sector but cyl=ff?\n");
667*4882a593Smuzhiyun 				fs->cur_cyl = -1;
668*4882a593Smuzhiyun 				if (fs->retries > 5) {
669*4882a593Smuzhiyun 					swim3_end_request(fs, BLK_STS_IOERR, 0);
670*4882a593Smuzhiyun 					fs->state = idle;
671*4882a593Smuzhiyun 				} else {
672*4882a593Smuzhiyun 					fs->state = jogging;
673*4882a593Smuzhiyun 					act(fs);
674*4882a593Smuzhiyun 				}
675*4882a593Smuzhiyun 				break;
676*4882a593Smuzhiyun 			}
677*4882a593Smuzhiyun 			fs->cur_cyl = sw->ctrack;
678*4882a593Smuzhiyun 			fs->cur_sector = sw->csect;
679*4882a593Smuzhiyun 			if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
680*4882a593Smuzhiyun 				swim3_err("Expected cyl %d, got %d\n",
681*4882a593Smuzhiyun 					  fs->expect_cyl, fs->cur_cyl);
682*4882a593Smuzhiyun 			fs->state = do_transfer;
683*4882a593Smuzhiyun 			act(fs);
684*4882a593Smuzhiyun 		}
685*4882a593Smuzhiyun 		break;
686*4882a593Smuzhiyun 	case seeking:
687*4882a593Smuzhiyun 	case jogging:
688*4882a593Smuzhiyun 		if (sw->nseek == 0) {
689*4882a593Smuzhiyun 			out_8(&sw->control_bic, DO_SEEK);
690*4882a593Smuzhiyun 			out_8(&sw->select, RELAX);
691*4882a593Smuzhiyun 			out_8(&sw->intr_enable, 0);
692*4882a593Smuzhiyun 			del_timer(&fs->timeout);
693*4882a593Smuzhiyun 			fs->timeout_pending = 0;
694*4882a593Smuzhiyun 			if (fs->state == seeking)
695*4882a593Smuzhiyun 				++fs->retries;
696*4882a593Smuzhiyun 			fs->state = settling;
697*4882a593Smuzhiyun 			act(fs);
698*4882a593Smuzhiyun 		}
699*4882a593Smuzhiyun 		break;
700*4882a593Smuzhiyun 	case settling:
701*4882a593Smuzhiyun 		out_8(&sw->intr_enable, 0);
702*4882a593Smuzhiyun 		del_timer(&fs->timeout);
703*4882a593Smuzhiyun 		fs->timeout_pending = 0;
704*4882a593Smuzhiyun 		act(fs);
705*4882a593Smuzhiyun 		break;
706*4882a593Smuzhiyun 	case do_transfer:
707*4882a593Smuzhiyun 		if ((intr & (ERROR_INTR | TRANSFER_DONE)) == 0)
708*4882a593Smuzhiyun 			break;
709*4882a593Smuzhiyun 		out_8(&sw->intr_enable, 0);
710*4882a593Smuzhiyun 		out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
711*4882a593Smuzhiyun 		out_8(&sw->select, RELAX);
712*4882a593Smuzhiyun 		del_timer(&fs->timeout);
713*4882a593Smuzhiyun 		fs->timeout_pending = 0;
714*4882a593Smuzhiyun 		dr = fs->dma;
715*4882a593Smuzhiyun 		cp = fs->dma_cmd;
716*4882a593Smuzhiyun 		if (rq_data_dir(req) == WRITE)
717*4882a593Smuzhiyun 			++cp;
718*4882a593Smuzhiyun 		/*
719*4882a593Smuzhiyun 		 * Check that the main data transfer has finished.
720*4882a593Smuzhiyun 		 * On writing, the swim3 sometimes doesn't use
721*4882a593Smuzhiyun 		 * up all the bytes of the postamble, so we can still
722*4882a593Smuzhiyun 		 * see DMA active here.  That doesn't matter as long
723*4882a593Smuzhiyun 		 * as all the sector data has been transferred.
724*4882a593Smuzhiyun 		 */
725*4882a593Smuzhiyun 		if ((intr & ERROR_INTR) == 0 && cp->xfer_status == 0) {
726*4882a593Smuzhiyun 			/* wait a little while for DMA to complete */
727*4882a593Smuzhiyun 			for (n = 0; n < 100; ++n) {
728*4882a593Smuzhiyun 				if (cp->xfer_status != 0)
729*4882a593Smuzhiyun 					break;
730*4882a593Smuzhiyun 				udelay(1);
731*4882a593Smuzhiyun 				barrier();
732*4882a593Smuzhiyun 			}
733*4882a593Smuzhiyun 		}
734*4882a593Smuzhiyun 		/* turn off DMA */
735*4882a593Smuzhiyun 		out_le32(&dr->control, (RUN | PAUSE) << 16);
736*4882a593Smuzhiyun 		stat = le16_to_cpu(cp->xfer_status);
737*4882a593Smuzhiyun 		resid = le16_to_cpu(cp->res_count);
738*4882a593Smuzhiyun 		if (intr & ERROR_INTR) {
739*4882a593Smuzhiyun 			n = fs->scount - 1 - resid / 512;
740*4882a593Smuzhiyun 			if (n > 0) {
741*4882a593Smuzhiyun 				blk_update_request(req, 0, n << 9);
742*4882a593Smuzhiyun 				fs->req_sector += n;
743*4882a593Smuzhiyun 			}
744*4882a593Smuzhiyun 			if (fs->retries < 5) {
745*4882a593Smuzhiyun 				++fs->retries;
746*4882a593Smuzhiyun 				act(fs);
747*4882a593Smuzhiyun 			} else {
748*4882a593Smuzhiyun 				swim3_err("Error %sing block %ld (err=%x)\n",
749*4882a593Smuzhiyun 				       rq_data_dir(req) == WRITE? "writ": "read",
750*4882a593Smuzhiyun 				       (long)blk_rq_pos(req), err);
751*4882a593Smuzhiyun 				swim3_end_request(fs, BLK_STS_IOERR, 0);
752*4882a593Smuzhiyun 				fs->state = idle;
753*4882a593Smuzhiyun 			}
754*4882a593Smuzhiyun 		} else {
755*4882a593Smuzhiyun 			if ((stat & ACTIVE) == 0 || resid != 0) {
756*4882a593Smuzhiyun 				/* musta been an error */
757*4882a593Smuzhiyun 				swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
758*4882a593Smuzhiyun 				swim3_err("  state=%d, dir=%x, intr=%x, err=%x\n",
759*4882a593Smuzhiyun 					  fs->state, rq_data_dir(req), intr, err);
760*4882a593Smuzhiyun 				swim3_end_request(fs, BLK_STS_IOERR, 0);
761*4882a593Smuzhiyun 				fs->state = idle;
762*4882a593Smuzhiyun 				break;
763*4882a593Smuzhiyun 			}
764*4882a593Smuzhiyun 			fs->retries = 0;
765*4882a593Smuzhiyun 			if (swim3_end_request(fs, 0, fs->scount << 9)) {
766*4882a593Smuzhiyun 				fs->req_sector += fs->scount;
767*4882a593Smuzhiyun 				if (fs->req_sector > fs->secpertrack) {
768*4882a593Smuzhiyun 					fs->req_sector -= fs->secpertrack;
769*4882a593Smuzhiyun 					if (++fs->head > 1) {
770*4882a593Smuzhiyun 						fs->head = 0;
771*4882a593Smuzhiyun 						++fs->req_cyl;
772*4882a593Smuzhiyun 					}
773*4882a593Smuzhiyun 				}
774*4882a593Smuzhiyun 				act(fs);
775*4882a593Smuzhiyun 			} else
776*4882a593Smuzhiyun 				fs->state = idle;
777*4882a593Smuzhiyun 		}
778*4882a593Smuzhiyun 		break;
779*4882a593Smuzhiyun 	default:
780*4882a593Smuzhiyun 		swim3_err("Don't know what to do in state %d\n", fs->state);
781*4882a593Smuzhiyun 	}
782*4882a593Smuzhiyun 	spin_unlock_irqrestore(&swim3_lock, flags);
783*4882a593Smuzhiyun 	return IRQ_HANDLED;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun /*
787*4882a593Smuzhiyun static void fd_dma_interrupt(int irq, void *dev_id)
788*4882a593Smuzhiyun {
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun */
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun /* Called under the mutex to grab exclusive access to a drive */
grab_drive(struct floppy_state * fs,enum swim_state state,int interruptible)793*4882a593Smuzhiyun static int grab_drive(struct floppy_state *fs, enum swim_state state,
794*4882a593Smuzhiyun 		      int interruptible)
795*4882a593Smuzhiyun {
796*4882a593Smuzhiyun 	unsigned long flags;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	swim3_dbg("%s", "-> grab drive\n");
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 	spin_lock_irqsave(&swim3_lock, flags);
801*4882a593Smuzhiyun 	if (fs->state != idle && fs->state != available) {
802*4882a593Smuzhiyun 		++fs->wanted;
803*4882a593Smuzhiyun 		/* this will enable irqs in order to sleep */
804*4882a593Smuzhiyun 		if (!interruptible)
805*4882a593Smuzhiyun 			wait_event_lock_irq(fs->wait,
806*4882a593Smuzhiyun                                         fs->state == available,
807*4882a593Smuzhiyun                                         swim3_lock);
808*4882a593Smuzhiyun 		else if (wait_event_interruptible_lock_irq(fs->wait,
809*4882a593Smuzhiyun 					fs->state == available,
810*4882a593Smuzhiyun 					swim3_lock)) {
811*4882a593Smuzhiyun 			--fs->wanted;
812*4882a593Smuzhiyun 			spin_unlock_irqrestore(&swim3_lock, flags);
813*4882a593Smuzhiyun 			return -EINTR;
814*4882a593Smuzhiyun 		}
815*4882a593Smuzhiyun 		--fs->wanted;
816*4882a593Smuzhiyun 	}
817*4882a593Smuzhiyun 	fs->state = state;
818*4882a593Smuzhiyun 	spin_unlock_irqrestore(&swim3_lock, flags);
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	return 0;
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun 
release_drive(struct floppy_state * fs)823*4882a593Smuzhiyun static void release_drive(struct floppy_state *fs)
824*4882a593Smuzhiyun {
825*4882a593Smuzhiyun 	struct request_queue *q = disks[fs->index]->queue;
826*4882a593Smuzhiyun 	unsigned long flags;
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 	swim3_dbg("%s", "-> release drive\n");
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 	spin_lock_irqsave(&swim3_lock, flags);
831*4882a593Smuzhiyun 	fs->state = idle;
832*4882a593Smuzhiyun 	spin_unlock_irqrestore(&swim3_lock, flags);
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 	blk_mq_freeze_queue(q);
835*4882a593Smuzhiyun 	blk_mq_quiesce_queue(q);
836*4882a593Smuzhiyun 	blk_mq_unquiesce_queue(q);
837*4882a593Smuzhiyun 	blk_mq_unfreeze_queue(q);
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun 
fd_eject(struct floppy_state * fs)840*4882a593Smuzhiyun static int fd_eject(struct floppy_state *fs)
841*4882a593Smuzhiyun {
842*4882a593Smuzhiyun 	int err, n;
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	err = grab_drive(fs, ejecting, 1);
845*4882a593Smuzhiyun 	if (err)
846*4882a593Smuzhiyun 		return err;
847*4882a593Smuzhiyun 	swim3_action(fs, EJECT);
848*4882a593Smuzhiyun 	for (n = 20; n > 0; --n) {
849*4882a593Smuzhiyun 		if (signal_pending(current)) {
850*4882a593Smuzhiyun 			err = -EINTR;
851*4882a593Smuzhiyun 			break;
852*4882a593Smuzhiyun 		}
853*4882a593Smuzhiyun 		swim3_select(fs, RELAX);
854*4882a593Smuzhiyun 		schedule_timeout_interruptible(1);
855*4882a593Smuzhiyun 		if (swim3_readbit(fs, DISK_IN) == 0)
856*4882a593Smuzhiyun 			break;
857*4882a593Smuzhiyun 	}
858*4882a593Smuzhiyun 	swim3_select(fs, RELAX);
859*4882a593Smuzhiyun 	udelay(150);
860*4882a593Smuzhiyun 	fs->ejected = 1;
861*4882a593Smuzhiyun 	release_drive(fs);
862*4882a593Smuzhiyun 	return err;
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun 
865*4882a593Smuzhiyun static struct floppy_struct floppy_type =
866*4882a593Smuzhiyun 	{ 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL };	/*  7 1.44MB 3.5"   */
867*4882a593Smuzhiyun 
floppy_locked_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long param)868*4882a593Smuzhiyun static int floppy_locked_ioctl(struct block_device *bdev, fmode_t mode,
869*4882a593Smuzhiyun 			unsigned int cmd, unsigned long param)
870*4882a593Smuzhiyun {
871*4882a593Smuzhiyun 	struct floppy_state *fs = bdev->bd_disk->private_data;
872*4882a593Smuzhiyun 	int err;
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 	if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))
875*4882a593Smuzhiyun 		return -EPERM;
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 	if (fs->mdev->media_bay &&
878*4882a593Smuzhiyun 	    check_media_bay(fs->mdev->media_bay) != MB_FD)
879*4882a593Smuzhiyun 		return -ENXIO;
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	switch (cmd) {
882*4882a593Smuzhiyun 	case FDEJECT:
883*4882a593Smuzhiyun 		if (fs->ref_count != 1)
884*4882a593Smuzhiyun 			return -EBUSY;
885*4882a593Smuzhiyun 		err = fd_eject(fs);
886*4882a593Smuzhiyun 		return err;
887*4882a593Smuzhiyun 	case FDGETPRM:
888*4882a593Smuzhiyun 	        if (copy_to_user((void __user *) param, &floppy_type,
889*4882a593Smuzhiyun 				 sizeof(struct floppy_struct)))
890*4882a593Smuzhiyun 			return -EFAULT;
891*4882a593Smuzhiyun 		return 0;
892*4882a593Smuzhiyun 	}
893*4882a593Smuzhiyun 	return -ENOTTY;
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun 
floppy_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long param)896*4882a593Smuzhiyun static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
897*4882a593Smuzhiyun 				 unsigned int cmd, unsigned long param)
898*4882a593Smuzhiyun {
899*4882a593Smuzhiyun 	int ret;
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	mutex_lock(&swim3_mutex);
902*4882a593Smuzhiyun 	ret = floppy_locked_ioctl(bdev, mode, cmd, param);
903*4882a593Smuzhiyun 	mutex_unlock(&swim3_mutex);
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	return ret;
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun 
floppy_open(struct block_device * bdev,fmode_t mode)908*4882a593Smuzhiyun static int floppy_open(struct block_device *bdev, fmode_t mode)
909*4882a593Smuzhiyun {
910*4882a593Smuzhiyun 	struct floppy_state *fs = bdev->bd_disk->private_data;
911*4882a593Smuzhiyun 	struct swim3 __iomem *sw = fs->swim3;
912*4882a593Smuzhiyun 	int n, err = 0;
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 	if (fs->ref_count == 0) {
915*4882a593Smuzhiyun 		if (fs->mdev->media_bay &&
916*4882a593Smuzhiyun 		    check_media_bay(fs->mdev->media_bay) != MB_FD)
917*4882a593Smuzhiyun 			return -ENXIO;
918*4882a593Smuzhiyun 		out_8(&sw->setup, S_IBM_DRIVE | S_FCLK_DIV2);
919*4882a593Smuzhiyun 		out_8(&sw->control_bic, 0xff);
920*4882a593Smuzhiyun 		out_8(&sw->mode, 0x95);
921*4882a593Smuzhiyun 		udelay(10);
922*4882a593Smuzhiyun 		out_8(&sw->intr_enable, 0);
923*4882a593Smuzhiyun 		out_8(&sw->control_bis, DRIVE_ENABLE | INTR_ENABLE);
924*4882a593Smuzhiyun 		swim3_action(fs, MOTOR_ON);
925*4882a593Smuzhiyun 		fs->write_prot = -1;
926*4882a593Smuzhiyun 		fs->cur_cyl = -1;
927*4882a593Smuzhiyun 		for (n = 0; n < 2 * HZ; ++n) {
928*4882a593Smuzhiyun 			if (n >= HZ/30 && swim3_readbit(fs, SEEK_COMPLETE))
929*4882a593Smuzhiyun 				break;
930*4882a593Smuzhiyun 			if (signal_pending(current)) {
931*4882a593Smuzhiyun 				err = -EINTR;
932*4882a593Smuzhiyun 				break;
933*4882a593Smuzhiyun 			}
934*4882a593Smuzhiyun 			swim3_select(fs, RELAX);
935*4882a593Smuzhiyun 			schedule_timeout_interruptible(1);
936*4882a593Smuzhiyun 		}
937*4882a593Smuzhiyun 		if (err == 0 && (swim3_readbit(fs, SEEK_COMPLETE) == 0
938*4882a593Smuzhiyun 				 || swim3_readbit(fs, DISK_IN) == 0))
939*4882a593Smuzhiyun 			err = -ENXIO;
940*4882a593Smuzhiyun 		swim3_action(fs, SETMFM);
941*4882a593Smuzhiyun 		swim3_select(fs, RELAX);
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun 	} else if (fs->ref_count == -1 || mode & FMODE_EXCL)
944*4882a593Smuzhiyun 		return -EBUSY;
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	if (err == 0 && (mode & FMODE_NDELAY) == 0
947*4882a593Smuzhiyun 	    && (mode & (FMODE_READ|FMODE_WRITE))) {
948*4882a593Smuzhiyun 		if (bdev_check_media_change(bdev))
949*4882a593Smuzhiyun 			floppy_revalidate(bdev->bd_disk);
950*4882a593Smuzhiyun 		if (fs->ejected)
951*4882a593Smuzhiyun 			err = -ENXIO;
952*4882a593Smuzhiyun 	}
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	if (err == 0 && (mode & FMODE_WRITE)) {
955*4882a593Smuzhiyun 		if (fs->write_prot < 0)
956*4882a593Smuzhiyun 			fs->write_prot = swim3_readbit(fs, WRITE_PROT);
957*4882a593Smuzhiyun 		if (fs->write_prot)
958*4882a593Smuzhiyun 			err = -EROFS;
959*4882a593Smuzhiyun 	}
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 	if (err) {
962*4882a593Smuzhiyun 		if (fs->ref_count == 0) {
963*4882a593Smuzhiyun 			swim3_action(fs, MOTOR_OFF);
964*4882a593Smuzhiyun 			out_8(&sw->control_bic, DRIVE_ENABLE | INTR_ENABLE);
965*4882a593Smuzhiyun 			swim3_select(fs, RELAX);
966*4882a593Smuzhiyun 		}
967*4882a593Smuzhiyun 		return err;
968*4882a593Smuzhiyun 	}
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	if (mode & FMODE_EXCL)
971*4882a593Smuzhiyun 		fs->ref_count = -1;
972*4882a593Smuzhiyun 	else
973*4882a593Smuzhiyun 		++fs->ref_count;
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 	return 0;
976*4882a593Smuzhiyun }
977*4882a593Smuzhiyun 
floppy_unlocked_open(struct block_device * bdev,fmode_t mode)978*4882a593Smuzhiyun static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode)
979*4882a593Smuzhiyun {
980*4882a593Smuzhiyun 	int ret;
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun 	mutex_lock(&swim3_mutex);
983*4882a593Smuzhiyun 	ret = floppy_open(bdev, mode);
984*4882a593Smuzhiyun 	mutex_unlock(&swim3_mutex);
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	return ret;
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun 
floppy_release(struct gendisk * disk,fmode_t mode)989*4882a593Smuzhiyun static void floppy_release(struct gendisk *disk, fmode_t mode)
990*4882a593Smuzhiyun {
991*4882a593Smuzhiyun 	struct floppy_state *fs = disk->private_data;
992*4882a593Smuzhiyun 	struct swim3 __iomem *sw = fs->swim3;
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 	mutex_lock(&swim3_mutex);
995*4882a593Smuzhiyun 	if (fs->ref_count > 0)
996*4882a593Smuzhiyun 		--fs->ref_count;
997*4882a593Smuzhiyun 	else if (fs->ref_count == -1)
998*4882a593Smuzhiyun 		fs->ref_count = 0;
999*4882a593Smuzhiyun 	if (fs->ref_count == 0) {
1000*4882a593Smuzhiyun 		swim3_action(fs, MOTOR_OFF);
1001*4882a593Smuzhiyun 		out_8(&sw->control_bic, 0xff);
1002*4882a593Smuzhiyun 		swim3_select(fs, RELAX);
1003*4882a593Smuzhiyun 	}
1004*4882a593Smuzhiyun 	mutex_unlock(&swim3_mutex);
1005*4882a593Smuzhiyun }
1006*4882a593Smuzhiyun 
floppy_check_events(struct gendisk * disk,unsigned int clearing)1007*4882a593Smuzhiyun static unsigned int floppy_check_events(struct gendisk *disk,
1008*4882a593Smuzhiyun 					unsigned int clearing)
1009*4882a593Smuzhiyun {
1010*4882a593Smuzhiyun 	struct floppy_state *fs = disk->private_data;
1011*4882a593Smuzhiyun 	return fs->ejected ? DISK_EVENT_MEDIA_CHANGE : 0;
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun 
floppy_revalidate(struct gendisk * disk)1014*4882a593Smuzhiyun static int floppy_revalidate(struct gendisk *disk)
1015*4882a593Smuzhiyun {
1016*4882a593Smuzhiyun 	struct floppy_state *fs = disk->private_data;
1017*4882a593Smuzhiyun 	struct swim3 __iomem *sw;
1018*4882a593Smuzhiyun 	int ret, n;
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun 	if (fs->mdev->media_bay &&
1021*4882a593Smuzhiyun 	    check_media_bay(fs->mdev->media_bay) != MB_FD)
1022*4882a593Smuzhiyun 		return -ENXIO;
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun 	sw = fs->swim3;
1025*4882a593Smuzhiyun 	grab_drive(fs, revalidating, 0);
1026*4882a593Smuzhiyun 	out_8(&sw->intr_enable, 0);
1027*4882a593Smuzhiyun 	out_8(&sw->control_bis, DRIVE_ENABLE);
1028*4882a593Smuzhiyun 	swim3_action(fs, MOTOR_ON);	/* necessary? */
1029*4882a593Smuzhiyun 	fs->write_prot = -1;
1030*4882a593Smuzhiyun 	fs->cur_cyl = -1;
1031*4882a593Smuzhiyun 	mdelay(1);
1032*4882a593Smuzhiyun 	for (n = HZ; n > 0; --n) {
1033*4882a593Smuzhiyun 		if (swim3_readbit(fs, SEEK_COMPLETE))
1034*4882a593Smuzhiyun 			break;
1035*4882a593Smuzhiyun 		if (signal_pending(current))
1036*4882a593Smuzhiyun 			break;
1037*4882a593Smuzhiyun 		swim3_select(fs, RELAX);
1038*4882a593Smuzhiyun 		schedule_timeout_interruptible(1);
1039*4882a593Smuzhiyun 	}
1040*4882a593Smuzhiyun 	ret = swim3_readbit(fs, SEEK_COMPLETE) == 0
1041*4882a593Smuzhiyun 		|| swim3_readbit(fs, DISK_IN) == 0;
1042*4882a593Smuzhiyun 	if (ret)
1043*4882a593Smuzhiyun 		swim3_action(fs, MOTOR_OFF);
1044*4882a593Smuzhiyun 	else {
1045*4882a593Smuzhiyun 		fs->ejected = 0;
1046*4882a593Smuzhiyun 		swim3_action(fs, SETMFM);
1047*4882a593Smuzhiyun 	}
1048*4882a593Smuzhiyun 	swim3_select(fs, RELAX);
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun 	release_drive(fs);
1051*4882a593Smuzhiyun 	return ret;
1052*4882a593Smuzhiyun }
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun static const struct block_device_operations floppy_fops = {
1055*4882a593Smuzhiyun 	.open		= floppy_unlocked_open,
1056*4882a593Smuzhiyun 	.release	= floppy_release,
1057*4882a593Smuzhiyun 	.ioctl		= floppy_ioctl,
1058*4882a593Smuzhiyun 	.check_events	= floppy_check_events,
1059*4882a593Smuzhiyun };
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun static const struct blk_mq_ops swim3_mq_ops = {
1062*4882a593Smuzhiyun 	.queue_rq = swim3_queue_rq,
1063*4882a593Smuzhiyun };
1064*4882a593Smuzhiyun 
swim3_mb_event(struct macio_dev * mdev,int mb_state)1065*4882a593Smuzhiyun static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
1066*4882a593Smuzhiyun {
1067*4882a593Smuzhiyun 	struct floppy_state *fs = macio_get_drvdata(mdev);
1068*4882a593Smuzhiyun 	struct swim3 __iomem *sw;
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 	if (!fs)
1071*4882a593Smuzhiyun 		return;
1072*4882a593Smuzhiyun 
1073*4882a593Smuzhiyun 	sw = fs->swim3;
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 	if (mb_state != MB_FD)
1076*4882a593Smuzhiyun 		return;
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 	/* Clear state */
1079*4882a593Smuzhiyun 	out_8(&sw->intr_enable, 0);
1080*4882a593Smuzhiyun 	in_8(&sw->intr);
1081*4882a593Smuzhiyun 	in_8(&sw->error);
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun 
swim3_add_device(struct macio_dev * mdev,int index)1084*4882a593Smuzhiyun static int swim3_add_device(struct macio_dev *mdev, int index)
1085*4882a593Smuzhiyun {
1086*4882a593Smuzhiyun 	struct device_node *swim = mdev->ofdev.dev.of_node;
1087*4882a593Smuzhiyun 	struct floppy_state *fs = &floppy_states[index];
1088*4882a593Smuzhiyun 	int rc = -EBUSY;
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 	fs->mdev = mdev;
1091*4882a593Smuzhiyun 	fs->index = index;
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 	/* Check & Request resources */
1094*4882a593Smuzhiyun 	if (macio_resource_count(mdev) < 2) {
1095*4882a593Smuzhiyun 		swim3_err("%s", "No address in device-tree\n");
1096*4882a593Smuzhiyun 		return -ENXIO;
1097*4882a593Smuzhiyun 	}
1098*4882a593Smuzhiyun 	if (macio_irq_count(mdev) < 1) {
1099*4882a593Smuzhiyun 		swim3_err("%s", "No interrupt in device-tree\n");
1100*4882a593Smuzhiyun 		return -ENXIO;
1101*4882a593Smuzhiyun 	}
1102*4882a593Smuzhiyun 	if (macio_request_resource(mdev, 0, "swim3 (mmio)")) {
1103*4882a593Smuzhiyun 		swim3_err("%s", "Can't request mmio resource\n");
1104*4882a593Smuzhiyun 		return -EBUSY;
1105*4882a593Smuzhiyun 	}
1106*4882a593Smuzhiyun 	if (macio_request_resource(mdev, 1, "swim3 (dma)")) {
1107*4882a593Smuzhiyun 		swim3_err("%s", "Can't request dma resource\n");
1108*4882a593Smuzhiyun 		macio_release_resource(mdev, 0);
1109*4882a593Smuzhiyun 		return -EBUSY;
1110*4882a593Smuzhiyun 	}
1111*4882a593Smuzhiyun 	dev_set_drvdata(&mdev->ofdev.dev, fs);
1112*4882a593Smuzhiyun 
1113*4882a593Smuzhiyun 	if (mdev->media_bay == NULL)
1114*4882a593Smuzhiyun 		pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 	fs->state = idle;
1117*4882a593Smuzhiyun 	fs->swim3 = (struct swim3 __iomem *)
1118*4882a593Smuzhiyun 		ioremap(macio_resource_start(mdev, 0), 0x200);
1119*4882a593Smuzhiyun 	if (fs->swim3 == NULL) {
1120*4882a593Smuzhiyun 		swim3_err("%s", "Couldn't map mmio registers\n");
1121*4882a593Smuzhiyun 		rc = -ENOMEM;
1122*4882a593Smuzhiyun 		goto out_release;
1123*4882a593Smuzhiyun 	}
1124*4882a593Smuzhiyun 	fs->dma = (struct dbdma_regs __iomem *)
1125*4882a593Smuzhiyun 		ioremap(macio_resource_start(mdev, 1), 0x200);
1126*4882a593Smuzhiyun 	if (fs->dma == NULL) {
1127*4882a593Smuzhiyun 		swim3_err("%s", "Couldn't map dma registers\n");
1128*4882a593Smuzhiyun 		iounmap(fs->swim3);
1129*4882a593Smuzhiyun 		rc = -ENOMEM;
1130*4882a593Smuzhiyun 		goto out_release;
1131*4882a593Smuzhiyun 	}
1132*4882a593Smuzhiyun 	fs->swim3_intr = macio_irq(mdev, 0);
1133*4882a593Smuzhiyun 	fs->dma_intr = macio_irq(mdev, 1);
1134*4882a593Smuzhiyun 	fs->cur_cyl = -1;
1135*4882a593Smuzhiyun 	fs->cur_sector = -1;
1136*4882a593Smuzhiyun 	fs->secpercyl = 36;
1137*4882a593Smuzhiyun 	fs->secpertrack = 18;
1138*4882a593Smuzhiyun 	fs->total_secs = 2880;
1139*4882a593Smuzhiyun 	init_waitqueue_head(&fs->wait);
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun 	fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
1142*4882a593Smuzhiyun 	memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
1143*4882a593Smuzhiyun 	fs->dma_cmd[1].command = cpu_to_le16(DBDMA_STOP);
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD)
1146*4882a593Smuzhiyun 		swim3_mb_event(mdev, MB_FD);
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 	if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
1149*4882a593Smuzhiyun 		swim3_err("%s", "Couldn't request interrupt\n");
1150*4882a593Smuzhiyun 		pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
1151*4882a593Smuzhiyun 		goto out_unmap;
1152*4882a593Smuzhiyun 	}
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun 	timer_setup(&fs->timeout, NULL, 0);
1155*4882a593Smuzhiyun 
1156*4882a593Smuzhiyun 	swim3_info("SWIM3 floppy controller %s\n",
1157*4882a593Smuzhiyun 		mdev->media_bay ? "in media bay" : "");
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun 	return 0;
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun  out_unmap:
1162*4882a593Smuzhiyun 	iounmap(fs->dma);
1163*4882a593Smuzhiyun 	iounmap(fs->swim3);
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun  out_release:
1166*4882a593Smuzhiyun 	macio_release_resource(mdev, 0);
1167*4882a593Smuzhiyun 	macio_release_resource(mdev, 1);
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun 	return rc;
1170*4882a593Smuzhiyun }
1171*4882a593Smuzhiyun 
swim3_attach(struct macio_dev * mdev,const struct of_device_id * match)1172*4882a593Smuzhiyun static int swim3_attach(struct macio_dev *mdev,
1173*4882a593Smuzhiyun 			const struct of_device_id *match)
1174*4882a593Smuzhiyun {
1175*4882a593Smuzhiyun 	struct floppy_state *fs;
1176*4882a593Smuzhiyun 	struct gendisk *disk;
1177*4882a593Smuzhiyun 	int rc;
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun 	if (floppy_count >= MAX_FLOPPIES)
1180*4882a593Smuzhiyun 		return -ENXIO;
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun 	if (floppy_count == 0) {
1183*4882a593Smuzhiyun 		rc = register_blkdev(FLOPPY_MAJOR, "fd");
1184*4882a593Smuzhiyun 		if (rc)
1185*4882a593Smuzhiyun 			return rc;
1186*4882a593Smuzhiyun 	}
1187*4882a593Smuzhiyun 
1188*4882a593Smuzhiyun 	disk = alloc_disk(1);
1189*4882a593Smuzhiyun 	if (disk == NULL) {
1190*4882a593Smuzhiyun 		rc = -ENOMEM;
1191*4882a593Smuzhiyun 		goto out_unregister;
1192*4882a593Smuzhiyun 	}
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	fs = &floppy_states[floppy_count];
1195*4882a593Smuzhiyun 	memset(fs, 0, sizeof(*fs));
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 	disk->queue = blk_mq_init_sq_queue(&fs->tag_set, &swim3_mq_ops, 2,
1198*4882a593Smuzhiyun 						BLK_MQ_F_SHOULD_MERGE);
1199*4882a593Smuzhiyun 	if (IS_ERR(disk->queue)) {
1200*4882a593Smuzhiyun 		rc = PTR_ERR(disk->queue);
1201*4882a593Smuzhiyun 		disk->queue = NULL;
1202*4882a593Smuzhiyun 		goto out_put_disk;
1203*4882a593Smuzhiyun 	}
1204*4882a593Smuzhiyun 	blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
1205*4882a593Smuzhiyun 	disk->queue->queuedata = fs;
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	rc = swim3_add_device(mdev, floppy_count);
1208*4882a593Smuzhiyun 	if (rc)
1209*4882a593Smuzhiyun 		goto out_cleanup_queue;
1210*4882a593Smuzhiyun 
1211*4882a593Smuzhiyun 	disk->major = FLOPPY_MAJOR;
1212*4882a593Smuzhiyun 	disk->first_minor = floppy_count;
1213*4882a593Smuzhiyun 	disk->fops = &floppy_fops;
1214*4882a593Smuzhiyun 	disk->private_data = fs;
1215*4882a593Smuzhiyun 	disk->events = DISK_EVENT_MEDIA_CHANGE;
1216*4882a593Smuzhiyun 	disk->flags |= GENHD_FL_REMOVABLE;
1217*4882a593Smuzhiyun 	sprintf(disk->disk_name, "fd%d", floppy_count);
1218*4882a593Smuzhiyun 	set_capacity(disk, 2880);
1219*4882a593Smuzhiyun 	add_disk(disk);
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 	disks[floppy_count++] = disk;
1222*4882a593Smuzhiyun 	return 0;
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun out_cleanup_queue:
1225*4882a593Smuzhiyun 	blk_cleanup_queue(disk->queue);
1226*4882a593Smuzhiyun 	disk->queue = NULL;
1227*4882a593Smuzhiyun 	blk_mq_free_tag_set(&fs->tag_set);
1228*4882a593Smuzhiyun out_put_disk:
1229*4882a593Smuzhiyun 	put_disk(disk);
1230*4882a593Smuzhiyun out_unregister:
1231*4882a593Smuzhiyun 	if (floppy_count == 0)
1232*4882a593Smuzhiyun 		unregister_blkdev(FLOPPY_MAJOR, "fd");
1233*4882a593Smuzhiyun 	return rc;
1234*4882a593Smuzhiyun }
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun static const struct of_device_id swim3_match[] =
1237*4882a593Smuzhiyun {
1238*4882a593Smuzhiyun 	{
1239*4882a593Smuzhiyun 	.name		= "swim3",
1240*4882a593Smuzhiyun 	},
1241*4882a593Smuzhiyun 	{
1242*4882a593Smuzhiyun 	.compatible	= "ohare-swim3"
1243*4882a593Smuzhiyun 	},
1244*4882a593Smuzhiyun 	{
1245*4882a593Smuzhiyun 	.compatible	= "swim3"
1246*4882a593Smuzhiyun 	},
1247*4882a593Smuzhiyun 	{ /* end of list */ }
1248*4882a593Smuzhiyun };
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun static struct macio_driver swim3_driver =
1251*4882a593Smuzhiyun {
1252*4882a593Smuzhiyun 	.driver = {
1253*4882a593Smuzhiyun 		.name 		= "swim3",
1254*4882a593Smuzhiyun 		.of_match_table	= swim3_match,
1255*4882a593Smuzhiyun 	},
1256*4882a593Smuzhiyun 	.probe		= swim3_attach,
1257*4882a593Smuzhiyun #ifdef CONFIG_PMAC_MEDIABAY
1258*4882a593Smuzhiyun 	.mediabay_event	= swim3_mb_event,
1259*4882a593Smuzhiyun #endif
1260*4882a593Smuzhiyun #if 0
1261*4882a593Smuzhiyun 	.suspend	= swim3_suspend,
1262*4882a593Smuzhiyun 	.resume		= swim3_resume,
1263*4882a593Smuzhiyun #endif
1264*4882a593Smuzhiyun };
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun 
swim3_init(void)1267*4882a593Smuzhiyun int swim3_init(void)
1268*4882a593Smuzhiyun {
1269*4882a593Smuzhiyun 	macio_register_driver(&swim3_driver);
1270*4882a593Smuzhiyun 	return 0;
1271*4882a593Smuzhiyun }
1272*4882a593Smuzhiyun 
1273*4882a593Smuzhiyun module_init(swim3_init)
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1276*4882a593Smuzhiyun MODULE_AUTHOR("Paul Mackerras");
1277*4882a593Smuzhiyun MODULE_ALIAS_BLOCKDEV_MAJOR(FLOPPY_MAJOR);
1278