1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * tape device discipline for 3480/3490 tapes.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright IBM Corp. 2001, 2009
6*4882a593Smuzhiyun * Author(s): Carsten Otte <cotte@de.ibm.com>
7*4882a593Smuzhiyun * Tuan Ngo-Anh <ngoanh@de.ibm.com>
8*4882a593Smuzhiyun * Martin Schwidefsky <schwidefsky@de.ibm.com>
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #define KMSG_COMPONENT "tape_34xx"
12*4882a593Smuzhiyun #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/init.h>
16*4882a593Smuzhiyun #include <linux/bio.h>
17*4882a593Smuzhiyun #include <linux/workqueue.h>
18*4882a593Smuzhiyun #include <linux/slab.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #define TAPE_DBF_AREA tape_34xx_dbf
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include "tape.h"
23*4882a593Smuzhiyun #include "tape_std.h"
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun * Pointer to debug area.
27*4882a593Smuzhiyun */
28*4882a593Smuzhiyun debug_info_t *TAPE_DBF_AREA = NULL;
29*4882a593Smuzhiyun EXPORT_SYMBOL(TAPE_DBF_AREA);
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #define TAPE34XX_FMT_3480 0
32*4882a593Smuzhiyun #define TAPE34XX_FMT_3480_2_XF 1
33*4882a593Smuzhiyun #define TAPE34XX_FMT_3480_XF 2
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun struct tape_34xx_block_id {
36*4882a593Smuzhiyun unsigned int wrap : 1;
37*4882a593Smuzhiyun unsigned int segment : 7;
38*4882a593Smuzhiyun unsigned int format : 2;
39*4882a593Smuzhiyun unsigned int block : 22;
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /*
43*4882a593Smuzhiyun * A list of block ID's is used to faster seek blocks.
44*4882a593Smuzhiyun */
45*4882a593Smuzhiyun struct tape_34xx_sbid {
46*4882a593Smuzhiyun struct list_head list;
47*4882a593Smuzhiyun struct tape_34xx_block_id bid;
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun static void tape_34xx_delete_sbid_from(struct tape_device *, int);
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun * Medium sense for 34xx tapes. There is no 'real' medium sense call.
54*4882a593Smuzhiyun * So we just do a normal sense.
55*4882a593Smuzhiyun */
__tape_34xx_medium_sense(struct tape_request * request)56*4882a593Smuzhiyun static void __tape_34xx_medium_sense(struct tape_request *request)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun struct tape_device *device = request->device;
59*4882a593Smuzhiyun unsigned char *sense;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun if (request->rc == 0) {
62*4882a593Smuzhiyun sense = request->cpdata;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /*
65*4882a593Smuzhiyun * This isn't quite correct. But since INTERVENTION_REQUIRED
66*4882a593Smuzhiyun * means that the drive is 'neither ready nor on-line' it is
67*4882a593Smuzhiyun * only slightly inaccurate to say there is no tape loaded if
68*4882a593Smuzhiyun * the drive isn't online...
69*4882a593Smuzhiyun */
70*4882a593Smuzhiyun if (sense[0] & SENSE_INTERVENTION_REQUIRED)
71*4882a593Smuzhiyun tape_med_state_set(device, MS_UNLOADED);
72*4882a593Smuzhiyun else
73*4882a593Smuzhiyun tape_med_state_set(device, MS_LOADED);
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun if (sense[1] & SENSE_WRITE_PROTECT)
76*4882a593Smuzhiyun device->tape_generic_status |= GMT_WR_PROT(~0);
77*4882a593Smuzhiyun else
78*4882a593Smuzhiyun device->tape_generic_status &= ~GMT_WR_PROT(~0);
79*4882a593Smuzhiyun } else
80*4882a593Smuzhiyun DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n",
81*4882a593Smuzhiyun request->rc);
82*4882a593Smuzhiyun tape_free_request(request);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
tape_34xx_medium_sense(struct tape_device * device)85*4882a593Smuzhiyun static int tape_34xx_medium_sense(struct tape_device *device)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun struct tape_request *request;
88*4882a593Smuzhiyun int rc;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun request = tape_alloc_request(1, 32);
91*4882a593Smuzhiyun if (IS_ERR(request)) {
92*4882a593Smuzhiyun DBF_EXCEPTION(6, "MSEN fail\n");
93*4882a593Smuzhiyun return PTR_ERR(request);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun request->op = TO_MSEN;
97*4882a593Smuzhiyun tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
98*4882a593Smuzhiyun rc = tape_do_io_interruptible(device, request);
99*4882a593Smuzhiyun __tape_34xx_medium_sense(request);
100*4882a593Smuzhiyun return rc;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
tape_34xx_medium_sense_async(struct tape_device * device)103*4882a593Smuzhiyun static void tape_34xx_medium_sense_async(struct tape_device *device)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun struct tape_request *request;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun request = tape_alloc_request(1, 32);
108*4882a593Smuzhiyun if (IS_ERR(request)) {
109*4882a593Smuzhiyun DBF_EXCEPTION(6, "MSEN fail\n");
110*4882a593Smuzhiyun return;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun request->op = TO_MSEN;
114*4882a593Smuzhiyun tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
115*4882a593Smuzhiyun request->callback = (void *) __tape_34xx_medium_sense;
116*4882a593Smuzhiyun request->callback_data = NULL;
117*4882a593Smuzhiyun tape_do_io_async(device, request);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun struct tape_34xx_work {
121*4882a593Smuzhiyun struct tape_device *device;
122*4882a593Smuzhiyun enum tape_op op;
123*4882a593Smuzhiyun struct work_struct work;
124*4882a593Smuzhiyun };
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /*
127*4882a593Smuzhiyun * These functions are currently used only to schedule a medium_sense for
128*4882a593Smuzhiyun * later execution. This is because we get an interrupt whenever a medium
129*4882a593Smuzhiyun * is inserted but cannot call tape_do_io* from an interrupt context.
130*4882a593Smuzhiyun * Maybe that's useful for other actions we want to start from the
131*4882a593Smuzhiyun * interrupt handler.
132*4882a593Smuzhiyun * Note: the work handler is called by the system work queue. The tape
133*4882a593Smuzhiyun * commands started by the handler need to be asynchrounous, otherwise
134*4882a593Smuzhiyun * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
135*4882a593Smuzhiyun */
136*4882a593Smuzhiyun static void
tape_34xx_work_handler(struct work_struct * work)137*4882a593Smuzhiyun tape_34xx_work_handler(struct work_struct *work)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun struct tape_34xx_work *p =
140*4882a593Smuzhiyun container_of(work, struct tape_34xx_work, work);
141*4882a593Smuzhiyun struct tape_device *device = p->device;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun switch(p->op) {
144*4882a593Smuzhiyun case TO_MSEN:
145*4882a593Smuzhiyun tape_34xx_medium_sense_async(device);
146*4882a593Smuzhiyun break;
147*4882a593Smuzhiyun default:
148*4882a593Smuzhiyun DBF_EVENT(3, "T34XX: internal error: unknown work\n");
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun tape_put_device(device);
151*4882a593Smuzhiyun kfree(p);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun static int
tape_34xx_schedule_work(struct tape_device * device,enum tape_op op)155*4882a593Smuzhiyun tape_34xx_schedule_work(struct tape_device *device, enum tape_op op)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun struct tape_34xx_work *p;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
160*4882a593Smuzhiyun return -ENOMEM;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun INIT_WORK(&p->work, tape_34xx_work_handler);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun p->device = tape_get_device(device);
165*4882a593Smuzhiyun p->op = op;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun schedule_work(&p->work);
168*4882a593Smuzhiyun return 0;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /*
172*4882a593Smuzhiyun * Done Handler is called when dev stat = DEVICE-END (successful operation)
173*4882a593Smuzhiyun */
174*4882a593Smuzhiyun static inline int
tape_34xx_done(struct tape_request * request)175*4882a593Smuzhiyun tape_34xx_done(struct tape_request *request)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun switch (request->op) {
180*4882a593Smuzhiyun case TO_DSE:
181*4882a593Smuzhiyun case TO_RUN:
182*4882a593Smuzhiyun case TO_WRI:
183*4882a593Smuzhiyun case TO_WTM:
184*4882a593Smuzhiyun case TO_ASSIGN:
185*4882a593Smuzhiyun case TO_UNASSIGN:
186*4882a593Smuzhiyun tape_34xx_delete_sbid_from(request->device, 0);
187*4882a593Smuzhiyun break;
188*4882a593Smuzhiyun default:
189*4882a593Smuzhiyun ;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun return TAPE_IO_SUCCESS;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun static inline int
tape_34xx_erp_failed(struct tape_request * request,int rc)195*4882a593Smuzhiyun tape_34xx_erp_failed(struct tape_request *request, int rc)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun DBF_EVENT(3, "Error recovery failed for %s (RC=%d)\n",
198*4882a593Smuzhiyun tape_op_verbose[request->op], rc);
199*4882a593Smuzhiyun return rc;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun static inline int
tape_34xx_erp_succeeded(struct tape_request * request)203*4882a593Smuzhiyun tape_34xx_erp_succeeded(struct tape_request *request)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun DBF_EVENT(3, "Error Recovery successful for %s\n",
206*4882a593Smuzhiyun tape_op_verbose[request->op]);
207*4882a593Smuzhiyun return tape_34xx_done(request);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun static inline int
tape_34xx_erp_retry(struct tape_request * request)211*4882a593Smuzhiyun tape_34xx_erp_retry(struct tape_request *request)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun DBF_EVENT(3, "xerp retr %s\n", tape_op_verbose[request->op]);
214*4882a593Smuzhiyun return TAPE_IO_RETRY;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /*
218*4882a593Smuzhiyun * This function is called, when no request is outstanding and we get an
219*4882a593Smuzhiyun * interrupt
220*4882a593Smuzhiyun */
221*4882a593Smuzhiyun static int
tape_34xx_unsolicited_irq(struct tape_device * device,struct irb * irb)222*4882a593Smuzhiyun tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun if (irb->scsw.cmd.dstat == 0x85) { /* READY */
225*4882a593Smuzhiyun /* A medium was inserted in the drive. */
226*4882a593Smuzhiyun DBF_EVENT(6, "xuud med\n");
227*4882a593Smuzhiyun tape_34xx_delete_sbid_from(device, 0);
228*4882a593Smuzhiyun tape_34xx_schedule_work(device, TO_MSEN);
229*4882a593Smuzhiyun } else {
230*4882a593Smuzhiyun DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id);
231*4882a593Smuzhiyun tape_dump_sense_dbf(device, NULL, irb);
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun return TAPE_IO_SUCCESS;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun /*
237*4882a593Smuzhiyun * Read Opposite Error Recovery Function:
238*4882a593Smuzhiyun * Used, when Read Forward does not work
239*4882a593Smuzhiyun */
240*4882a593Smuzhiyun static int
tape_34xx_erp_read_opposite(struct tape_device * device,struct tape_request * request)241*4882a593Smuzhiyun tape_34xx_erp_read_opposite(struct tape_device *device,
242*4882a593Smuzhiyun struct tape_request *request)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun if (request->op == TO_RFO) {
245*4882a593Smuzhiyun /*
246*4882a593Smuzhiyun * We did read forward, but the data could not be read
247*4882a593Smuzhiyun * *correctly*. We transform the request to a read backward
248*4882a593Smuzhiyun * and try again.
249*4882a593Smuzhiyun */
250*4882a593Smuzhiyun tape_std_read_backward(device, request);
251*4882a593Smuzhiyun return tape_34xx_erp_retry(request);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /*
255*4882a593Smuzhiyun * We tried to read forward and backward, but hat no
256*4882a593Smuzhiyun * success -> failed.
257*4882a593Smuzhiyun */
258*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun static int
tape_34xx_erp_bug(struct tape_device * device,struct tape_request * request,struct irb * irb,int no)262*4882a593Smuzhiyun tape_34xx_erp_bug(struct tape_device *device, struct tape_request *request,
263*4882a593Smuzhiyun struct irb *irb, int no)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun if (request->op != TO_ASSIGN) {
266*4882a593Smuzhiyun dev_err(&device->cdev->dev, "An unexpected condition %d "
267*4882a593Smuzhiyun "occurred in tape error recovery\n", no);
268*4882a593Smuzhiyun tape_dump_sense_dbf(device, request, irb);
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /*
274*4882a593Smuzhiyun * Handle data overrun between cu and drive. The channel speed might
275*4882a593Smuzhiyun * be too slow.
276*4882a593Smuzhiyun */
277*4882a593Smuzhiyun static int
tape_34xx_erp_overrun(struct tape_device * device,struct tape_request * request,struct irb * irb)278*4882a593Smuzhiyun tape_34xx_erp_overrun(struct tape_device *device, struct tape_request *request,
279*4882a593Smuzhiyun struct irb *irb)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun if (irb->ecw[3] == 0x40) {
282*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "A data overrun occurred between"
283*4882a593Smuzhiyun " the control unit and tape unit\n");
284*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun return tape_34xx_erp_bug(device, request, irb, -1);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun /*
290*4882a593Smuzhiyun * Handle record sequence error.
291*4882a593Smuzhiyun */
292*4882a593Smuzhiyun static int
tape_34xx_erp_sequence(struct tape_device * device,struct tape_request * request,struct irb * irb)293*4882a593Smuzhiyun tape_34xx_erp_sequence(struct tape_device *device,
294*4882a593Smuzhiyun struct tape_request *request, struct irb *irb)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun if (irb->ecw[3] == 0x41) {
297*4882a593Smuzhiyun /*
298*4882a593Smuzhiyun * cu detected incorrect block-id sequence on tape.
299*4882a593Smuzhiyun */
300*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "The block ID sequence on the "
301*4882a593Smuzhiyun "tape is incorrect\n");
302*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun /*
305*4882a593Smuzhiyun * Record sequence error bit is set, but erpa does not
306*4882a593Smuzhiyun * show record sequence error.
307*4882a593Smuzhiyun */
308*4882a593Smuzhiyun return tape_34xx_erp_bug(device, request, irb, -2);
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /*
312*4882a593Smuzhiyun * This function analyses the tape's sense-data in case of a unit-check.
313*4882a593Smuzhiyun * If possible, it tries to recover from the error. Else the user is
314*4882a593Smuzhiyun * informed about the problem.
315*4882a593Smuzhiyun */
316*4882a593Smuzhiyun static int
tape_34xx_unit_check(struct tape_device * device,struct tape_request * request,struct irb * irb)317*4882a593Smuzhiyun tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
318*4882a593Smuzhiyun struct irb *irb)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun int inhibit_cu_recovery;
321*4882a593Smuzhiyun __u8* sense;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun inhibit_cu_recovery = (*device->modeset_byte & 0x80) ? 1 : 0;
324*4882a593Smuzhiyun sense = irb->ecw;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun if (
327*4882a593Smuzhiyun sense[0] & SENSE_COMMAND_REJECT &&
328*4882a593Smuzhiyun sense[1] & SENSE_WRITE_PROTECT
329*4882a593Smuzhiyun ) {
330*4882a593Smuzhiyun if (
331*4882a593Smuzhiyun request->op == TO_DSE ||
332*4882a593Smuzhiyun request->op == TO_WRI ||
333*4882a593Smuzhiyun request->op == TO_WTM
334*4882a593Smuzhiyun ) {
335*4882a593Smuzhiyun /* medium is write protected */
336*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EACCES);
337*4882a593Smuzhiyun } else {
338*4882a593Smuzhiyun return tape_34xx_erp_bug(device, request, irb, -3);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /*
343*4882a593Smuzhiyun * Special cases for various tape-states when reaching
344*4882a593Smuzhiyun * end of recorded area
345*4882a593Smuzhiyun *
346*4882a593Smuzhiyun * FIXME: Maybe a special case of the special case:
347*4882a593Smuzhiyun * sense[0] == SENSE_EQUIPMENT_CHECK &&
348*4882a593Smuzhiyun * sense[1] == SENSE_DRIVE_ONLINE &&
349*4882a593Smuzhiyun * sense[3] == 0x47 (Volume Fenced)
350*4882a593Smuzhiyun *
351*4882a593Smuzhiyun * This was caused by continued FSF or FSR after an
352*4882a593Smuzhiyun * 'End Of Data'.
353*4882a593Smuzhiyun */
354*4882a593Smuzhiyun if ((
355*4882a593Smuzhiyun sense[0] == SENSE_DATA_CHECK ||
356*4882a593Smuzhiyun sense[0] == SENSE_EQUIPMENT_CHECK ||
357*4882a593Smuzhiyun sense[0] == SENSE_EQUIPMENT_CHECK + SENSE_DEFERRED_UNIT_CHECK
358*4882a593Smuzhiyun ) && (
359*4882a593Smuzhiyun sense[1] == SENSE_DRIVE_ONLINE ||
360*4882a593Smuzhiyun sense[1] == SENSE_BEGINNING_OF_TAPE + SENSE_WRITE_MODE
361*4882a593Smuzhiyun )) {
362*4882a593Smuzhiyun switch (request->op) {
363*4882a593Smuzhiyun /*
364*4882a593Smuzhiyun * sense[0] == SENSE_DATA_CHECK &&
365*4882a593Smuzhiyun * sense[1] == SENSE_DRIVE_ONLINE
366*4882a593Smuzhiyun * sense[3] == 0x36 (End Of Data)
367*4882a593Smuzhiyun *
368*4882a593Smuzhiyun * Further seeks might return a 'Volume Fenced'.
369*4882a593Smuzhiyun */
370*4882a593Smuzhiyun case TO_FSF:
371*4882a593Smuzhiyun case TO_FSB:
372*4882a593Smuzhiyun /* Trying to seek beyond end of recorded area */
373*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -ENOSPC);
374*4882a593Smuzhiyun case TO_BSB:
375*4882a593Smuzhiyun return tape_34xx_erp_retry(request);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun /*
378*4882a593Smuzhiyun * sense[0] == SENSE_DATA_CHECK &&
379*4882a593Smuzhiyun * sense[1] == SENSE_DRIVE_ONLINE &&
380*4882a593Smuzhiyun * sense[3] == 0x36 (End Of Data)
381*4882a593Smuzhiyun */
382*4882a593Smuzhiyun case TO_LBL:
383*4882a593Smuzhiyun /* Block could not be located. */
384*4882a593Smuzhiyun tape_34xx_delete_sbid_from(device, 0);
385*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun case TO_RFO:
388*4882a593Smuzhiyun /* Read beyond end of recorded area -> 0 bytes read */
389*4882a593Smuzhiyun return tape_34xx_erp_failed(request, 0);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun /*
392*4882a593Smuzhiyun * sense[0] == SENSE_EQUIPMENT_CHECK &&
393*4882a593Smuzhiyun * sense[1] == SENSE_DRIVE_ONLINE &&
394*4882a593Smuzhiyun * sense[3] == 0x38 (Physical End Of Volume)
395*4882a593Smuzhiyun */
396*4882a593Smuzhiyun case TO_WRI:
397*4882a593Smuzhiyun /* Writing at physical end of volume */
398*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -ENOSPC);
399*4882a593Smuzhiyun default:
400*4882a593Smuzhiyun return tape_34xx_erp_failed(request, 0);
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun /* Sensing special bits */
405*4882a593Smuzhiyun if (sense[0] & SENSE_BUS_OUT_CHECK)
406*4882a593Smuzhiyun return tape_34xx_erp_retry(request);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun if (sense[0] & SENSE_DATA_CHECK) {
409*4882a593Smuzhiyun /*
410*4882a593Smuzhiyun * hardware failure, damaged tape or improper
411*4882a593Smuzhiyun * operating conditions
412*4882a593Smuzhiyun */
413*4882a593Smuzhiyun switch (sense[3]) {
414*4882a593Smuzhiyun case 0x23:
415*4882a593Smuzhiyun /* a read data check occurred */
416*4882a593Smuzhiyun if ((sense[2] & SENSE_TAPE_SYNC_MODE) ||
417*4882a593Smuzhiyun inhibit_cu_recovery)
418*4882a593Smuzhiyun // data check is not permanent, may be
419*4882a593Smuzhiyun // recovered. We always use async-mode with
420*4882a593Smuzhiyun // cu-recovery, so this should *never* happen.
421*4882a593Smuzhiyun return tape_34xx_erp_bug(device, request,
422*4882a593Smuzhiyun irb, -4);
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun /* data check is permanent, CU recovery has failed */
425*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "A read error occurred "
426*4882a593Smuzhiyun "that cannot be recovered\n");
427*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
428*4882a593Smuzhiyun case 0x25:
429*4882a593Smuzhiyun // a write data check occurred
430*4882a593Smuzhiyun if ((sense[2] & SENSE_TAPE_SYNC_MODE) ||
431*4882a593Smuzhiyun inhibit_cu_recovery)
432*4882a593Smuzhiyun // data check is not permanent, may be
433*4882a593Smuzhiyun // recovered. We always use async-mode with
434*4882a593Smuzhiyun // cu-recovery, so this should *never* happen.
435*4882a593Smuzhiyun return tape_34xx_erp_bug(device, request,
436*4882a593Smuzhiyun irb, -5);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun // data check is permanent, cu-recovery has failed
439*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "A write error on the "
440*4882a593Smuzhiyun "tape cannot be recovered\n");
441*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
442*4882a593Smuzhiyun case 0x26:
443*4882a593Smuzhiyun /* Data Check (read opposite) occurred. */
444*4882a593Smuzhiyun return tape_34xx_erp_read_opposite(device, request);
445*4882a593Smuzhiyun case 0x28:
446*4882a593Smuzhiyun /* ID-Mark at tape start couldn't be written */
447*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "Writing the ID-mark "
448*4882a593Smuzhiyun "failed\n");
449*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
450*4882a593Smuzhiyun case 0x31:
451*4882a593Smuzhiyun /* Tape void. Tried to read beyond end of device. */
452*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "Reading the tape beyond"
453*4882a593Smuzhiyun " the end of the recorded area failed\n");
454*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -ENOSPC);
455*4882a593Smuzhiyun case 0x41:
456*4882a593Smuzhiyun /* Record sequence error. */
457*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "The tape contains an "
458*4882a593Smuzhiyun "incorrect block ID sequence\n");
459*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
460*4882a593Smuzhiyun default:
461*4882a593Smuzhiyun /* all data checks for 3480 should result in one of
462*4882a593Smuzhiyun * the above erpa-codes. For 3490, other data-check
463*4882a593Smuzhiyun * conditions do exist. */
464*4882a593Smuzhiyun if (device->cdev->id.driver_info == tape_3480)
465*4882a593Smuzhiyun return tape_34xx_erp_bug(device, request,
466*4882a593Smuzhiyun irb, -6);
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun if (sense[0] & SENSE_OVERRUN)
471*4882a593Smuzhiyun return tape_34xx_erp_overrun(device, request, irb);
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun if (sense[1] & SENSE_RECORD_SEQUENCE_ERR)
474*4882a593Smuzhiyun return tape_34xx_erp_sequence(device, request, irb);
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun /* Sensing erpa codes */
477*4882a593Smuzhiyun switch (sense[3]) {
478*4882a593Smuzhiyun case 0x00:
479*4882a593Smuzhiyun /* Unit check with erpa code 0. Report and ignore. */
480*4882a593Smuzhiyun return TAPE_IO_SUCCESS;
481*4882a593Smuzhiyun case 0x21:
482*4882a593Smuzhiyun /*
483*4882a593Smuzhiyun * Data streaming not operational. CU will switch to
484*4882a593Smuzhiyun * interlock mode. Reissue the command.
485*4882a593Smuzhiyun */
486*4882a593Smuzhiyun return tape_34xx_erp_retry(request);
487*4882a593Smuzhiyun case 0x22:
488*4882a593Smuzhiyun /*
489*4882a593Smuzhiyun * Path equipment check. Might be drive adapter error, buffer
490*4882a593Smuzhiyun * error on the lower interface, internal path not usable,
491*4882a593Smuzhiyun * or error during cartridge load.
492*4882a593Smuzhiyun */
493*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "A path equipment check occurred"
494*4882a593Smuzhiyun " for the tape device\n");
495*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
496*4882a593Smuzhiyun case 0x24:
497*4882a593Smuzhiyun /*
498*4882a593Smuzhiyun * Load display check. Load display was command was issued,
499*4882a593Smuzhiyun * but the drive is displaying a drive check message. Can
500*4882a593Smuzhiyun * be threated as "device end".
501*4882a593Smuzhiyun */
502*4882a593Smuzhiyun return tape_34xx_erp_succeeded(request);
503*4882a593Smuzhiyun case 0x27:
504*4882a593Smuzhiyun /*
505*4882a593Smuzhiyun * Command reject. May indicate illegal channel program or
506*4882a593Smuzhiyun * buffer over/underrun. Since all channel programs are
507*4882a593Smuzhiyun * issued by this driver and ought be correct, we assume a
508*4882a593Smuzhiyun * over/underrun situation and retry the channel program.
509*4882a593Smuzhiyun */
510*4882a593Smuzhiyun return tape_34xx_erp_retry(request);
511*4882a593Smuzhiyun case 0x29:
512*4882a593Smuzhiyun /*
513*4882a593Smuzhiyun * Function incompatible. Either the tape is idrc compressed
514*4882a593Smuzhiyun * but the hardware isn't capable to do idrc, or a perform
515*4882a593Smuzhiyun * subsystem func is issued and the CU is not on-line.
516*4882a593Smuzhiyun */
517*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
518*4882a593Smuzhiyun case 0x2a:
519*4882a593Smuzhiyun /*
520*4882a593Smuzhiyun * Unsolicited environmental data. An internal counter
521*4882a593Smuzhiyun * overflows, we can ignore this and reissue the cmd.
522*4882a593Smuzhiyun */
523*4882a593Smuzhiyun return tape_34xx_erp_retry(request);
524*4882a593Smuzhiyun case 0x2b:
525*4882a593Smuzhiyun /*
526*4882a593Smuzhiyun * Environmental data present. Indicates either unload
527*4882a593Smuzhiyun * completed ok or read buffered log command completed ok.
528*4882a593Smuzhiyun */
529*4882a593Smuzhiyun if (request->op == TO_RUN) {
530*4882a593Smuzhiyun /* Rewind unload completed ok. */
531*4882a593Smuzhiyun tape_med_state_set(device, MS_UNLOADED);
532*4882a593Smuzhiyun return tape_34xx_erp_succeeded(request);
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun /* tape_34xx doesn't use read buffered log commands. */
535*4882a593Smuzhiyun return tape_34xx_erp_bug(device, request, irb, sense[3]);
536*4882a593Smuzhiyun case 0x2c:
537*4882a593Smuzhiyun /*
538*4882a593Smuzhiyun * Permanent equipment check. CU has tried recovery, but
539*4882a593Smuzhiyun * did not succeed.
540*4882a593Smuzhiyun */
541*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
542*4882a593Smuzhiyun case 0x2d:
543*4882a593Smuzhiyun /* Data security erase failure. */
544*4882a593Smuzhiyun if (request->op == TO_DSE)
545*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
546*4882a593Smuzhiyun /* Data security erase failure, but no such command issued. */
547*4882a593Smuzhiyun return tape_34xx_erp_bug(device, request, irb, sense[3]);
548*4882a593Smuzhiyun case 0x2e:
549*4882a593Smuzhiyun /*
550*4882a593Smuzhiyun * Not capable. This indicates either that the drive fails
551*4882a593Smuzhiyun * reading the format id mark or that that format specified
552*4882a593Smuzhiyun * is not supported by the drive.
553*4882a593Smuzhiyun */
554*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "The tape unit cannot process "
555*4882a593Smuzhiyun "the tape format\n");
556*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
557*4882a593Smuzhiyun case 0x30:
558*4882a593Smuzhiyun /* The medium is write protected. */
559*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "The tape medium is write-"
560*4882a593Smuzhiyun "protected\n");
561*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EACCES);
562*4882a593Smuzhiyun case 0x32:
563*4882a593Smuzhiyun // Tension loss. We cannot recover this, it's an I/O error.
564*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "The tape does not have the "
565*4882a593Smuzhiyun "required tape tension\n");
566*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
567*4882a593Smuzhiyun case 0x33:
568*4882a593Smuzhiyun /*
569*4882a593Smuzhiyun * Load Failure. The cartridge was not inserted correctly or
570*4882a593Smuzhiyun * the tape is not threaded correctly.
571*4882a593Smuzhiyun */
572*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "The tape unit failed to load"
573*4882a593Smuzhiyun " the cartridge\n");
574*4882a593Smuzhiyun tape_34xx_delete_sbid_from(device, 0);
575*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
576*4882a593Smuzhiyun case 0x34:
577*4882a593Smuzhiyun /*
578*4882a593Smuzhiyun * Unload failure. The drive cannot maintain tape tension
579*4882a593Smuzhiyun * and control tape movement during an unload operation.
580*4882a593Smuzhiyun */
581*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "Automatic unloading of the tape"
582*4882a593Smuzhiyun " cartridge failed\n");
583*4882a593Smuzhiyun if (request->op == TO_RUN)
584*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
585*4882a593Smuzhiyun return tape_34xx_erp_bug(device, request, irb, sense[3]);
586*4882a593Smuzhiyun case 0x35:
587*4882a593Smuzhiyun /*
588*4882a593Smuzhiyun * Drive equipment check. One of the following:
589*4882a593Smuzhiyun * - cu cannot recover from a drive detected error
590*4882a593Smuzhiyun * - a check code message is shown on drive display
591*4882a593Smuzhiyun * - the cartridge loader does not respond correctly
592*4882a593Smuzhiyun * - a failure occurs during an index, load, or unload cycle
593*4882a593Smuzhiyun */
594*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "An equipment check has occurred"
595*4882a593Smuzhiyun " on the tape unit\n");
596*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
597*4882a593Smuzhiyun case 0x36:
598*4882a593Smuzhiyun if (device->cdev->id.driver_info == tape_3490)
599*4882a593Smuzhiyun /* End of data. */
600*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
601*4882a593Smuzhiyun /* This erpa is reserved for 3480 */
602*4882a593Smuzhiyun return tape_34xx_erp_bug(device, request, irb, sense[3]);
603*4882a593Smuzhiyun case 0x37:
604*4882a593Smuzhiyun /*
605*4882a593Smuzhiyun * Tape length error. The tape is shorter than reported in
606*4882a593Smuzhiyun * the beginning-of-tape data.
607*4882a593Smuzhiyun */
608*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "The tape information states an"
609*4882a593Smuzhiyun " incorrect length\n");
610*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
611*4882a593Smuzhiyun case 0x38:
612*4882a593Smuzhiyun /*
613*4882a593Smuzhiyun * Physical end of tape. A read/write operation reached
614*4882a593Smuzhiyun * the physical end of tape.
615*4882a593Smuzhiyun */
616*4882a593Smuzhiyun if (request->op==TO_WRI ||
617*4882a593Smuzhiyun request->op==TO_DSE ||
618*4882a593Smuzhiyun request->op==TO_WTM)
619*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -ENOSPC);
620*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
621*4882a593Smuzhiyun case 0x39:
622*4882a593Smuzhiyun /* Backward at Beginning of tape. */
623*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
624*4882a593Smuzhiyun case 0x3a:
625*4882a593Smuzhiyun /* Drive switched to not ready. */
626*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "The tape unit is not ready\n");
627*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
628*4882a593Smuzhiyun case 0x3b:
629*4882a593Smuzhiyun /* Manual rewind or unload. This causes an I/O error. */
630*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "The tape medium has been "
631*4882a593Smuzhiyun "rewound or unloaded manually\n");
632*4882a593Smuzhiyun tape_34xx_delete_sbid_from(device, 0);
633*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
634*4882a593Smuzhiyun case 0x42:
635*4882a593Smuzhiyun /*
636*4882a593Smuzhiyun * Degraded mode. A condition that can cause degraded
637*4882a593Smuzhiyun * performance is detected.
638*4882a593Smuzhiyun */
639*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "The tape subsystem is running "
640*4882a593Smuzhiyun "in degraded mode\n");
641*4882a593Smuzhiyun return tape_34xx_erp_retry(request);
642*4882a593Smuzhiyun case 0x43:
643*4882a593Smuzhiyun /* Drive not ready. */
644*4882a593Smuzhiyun tape_34xx_delete_sbid_from(device, 0);
645*4882a593Smuzhiyun tape_med_state_set(device, MS_UNLOADED);
646*4882a593Smuzhiyun /* Some commands commands are successful even in this case */
647*4882a593Smuzhiyun if (sense[1] & SENSE_DRIVE_ONLINE) {
648*4882a593Smuzhiyun switch(request->op) {
649*4882a593Smuzhiyun case TO_ASSIGN:
650*4882a593Smuzhiyun case TO_UNASSIGN:
651*4882a593Smuzhiyun case TO_DIS:
652*4882a593Smuzhiyun case TO_NOP:
653*4882a593Smuzhiyun return tape_34xx_done(request);
654*4882a593Smuzhiyun break;
655*4882a593Smuzhiyun default:
656*4882a593Smuzhiyun break;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -ENOMEDIUM);
660*4882a593Smuzhiyun case 0x44:
661*4882a593Smuzhiyun /* Locate Block unsuccessful. */
662*4882a593Smuzhiyun if (request->op != TO_BLOCK && request->op != TO_LBL)
663*4882a593Smuzhiyun /* No locate block was issued. */
664*4882a593Smuzhiyun return tape_34xx_erp_bug(device, request,
665*4882a593Smuzhiyun irb, sense[3]);
666*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
667*4882a593Smuzhiyun case 0x45:
668*4882a593Smuzhiyun /* The drive is assigned to a different channel path. */
669*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "The tape unit is already "
670*4882a593Smuzhiyun "assigned\n");
671*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
672*4882a593Smuzhiyun case 0x46:
673*4882a593Smuzhiyun /*
674*4882a593Smuzhiyun * Drive not on-line. Drive may be switched offline,
675*4882a593Smuzhiyun * the power supply may be switched off or
676*4882a593Smuzhiyun * the drive address may not be set correctly.
677*4882a593Smuzhiyun */
678*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "The tape unit is not online\n");
679*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
680*4882a593Smuzhiyun case 0x47:
681*4882a593Smuzhiyun /* Volume fenced. CU reports volume integrity is lost. */
682*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "The control unit has fenced "
683*4882a593Smuzhiyun "access to the tape volume\n");
684*4882a593Smuzhiyun tape_34xx_delete_sbid_from(device, 0);
685*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
686*4882a593Smuzhiyun case 0x48:
687*4882a593Smuzhiyun /* Log sense data and retry request. */
688*4882a593Smuzhiyun return tape_34xx_erp_retry(request);
689*4882a593Smuzhiyun case 0x49:
690*4882a593Smuzhiyun /* Bus out check. A parity check error on the bus was found. */
691*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "A parity error occurred on the "
692*4882a593Smuzhiyun "tape bus\n");
693*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
694*4882a593Smuzhiyun case 0x4a:
695*4882a593Smuzhiyun /* Control unit erp failed. */
696*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "I/O error recovery failed on "
697*4882a593Smuzhiyun "the tape control unit\n");
698*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
699*4882a593Smuzhiyun case 0x4b:
700*4882a593Smuzhiyun /*
701*4882a593Smuzhiyun * CU and drive incompatible. The drive requests micro-program
702*4882a593Smuzhiyun * patches, which are not available on the CU.
703*4882a593Smuzhiyun */
704*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "The tape unit requires a "
705*4882a593Smuzhiyun "firmware update\n");
706*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
707*4882a593Smuzhiyun case 0x4c:
708*4882a593Smuzhiyun /*
709*4882a593Smuzhiyun * Recovered Check-One failure. Cu develops a hardware error,
710*4882a593Smuzhiyun * but is able to recover.
711*4882a593Smuzhiyun */
712*4882a593Smuzhiyun return tape_34xx_erp_retry(request);
713*4882a593Smuzhiyun case 0x4d:
714*4882a593Smuzhiyun if (device->cdev->id.driver_info == tape_3490)
715*4882a593Smuzhiyun /*
716*4882a593Smuzhiyun * Resetting event received. Since the driver does
717*4882a593Smuzhiyun * not support resetting event recovery (which has to
718*4882a593Smuzhiyun * be handled by the I/O Layer), retry our command.
719*4882a593Smuzhiyun */
720*4882a593Smuzhiyun return tape_34xx_erp_retry(request);
721*4882a593Smuzhiyun /* This erpa is reserved for 3480. */
722*4882a593Smuzhiyun return tape_34xx_erp_bug(device, request, irb, sense[3]);
723*4882a593Smuzhiyun case 0x4e:
724*4882a593Smuzhiyun if (device->cdev->id.driver_info == tape_3490) {
725*4882a593Smuzhiyun /*
726*4882a593Smuzhiyun * Maximum block size exceeded. This indicates, that
727*4882a593Smuzhiyun * the block to be written is larger than allowed for
728*4882a593Smuzhiyun * buffered mode.
729*4882a593Smuzhiyun */
730*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "The maximum block size"
731*4882a593Smuzhiyun " for buffered mode is exceeded\n");
732*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -ENOBUFS);
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun /* This erpa is reserved for 3480. */
735*4882a593Smuzhiyun return tape_34xx_erp_bug(device, request, irb, sense[3]);
736*4882a593Smuzhiyun case 0x50:
737*4882a593Smuzhiyun /*
738*4882a593Smuzhiyun * Read buffered log (Overflow). CU is running in extended
739*4882a593Smuzhiyun * buffered log mode, and a counter overflows. This should
740*4882a593Smuzhiyun * never happen, since we're never running in extended
741*4882a593Smuzhiyun * buffered log mode.
742*4882a593Smuzhiyun */
743*4882a593Smuzhiyun return tape_34xx_erp_retry(request);
744*4882a593Smuzhiyun case 0x51:
745*4882a593Smuzhiyun /*
746*4882a593Smuzhiyun * Read buffered log (EOV). EOF processing occurs while the
747*4882a593Smuzhiyun * CU is in extended buffered log mode. This should never
748*4882a593Smuzhiyun * happen, since we're never running in extended buffered
749*4882a593Smuzhiyun * log mode.
750*4882a593Smuzhiyun */
751*4882a593Smuzhiyun return tape_34xx_erp_retry(request);
752*4882a593Smuzhiyun case 0x52:
753*4882a593Smuzhiyun /* End of Volume complete. Rewind unload completed ok. */
754*4882a593Smuzhiyun if (request->op == TO_RUN) {
755*4882a593Smuzhiyun tape_med_state_set(device, MS_UNLOADED);
756*4882a593Smuzhiyun tape_34xx_delete_sbid_from(device, 0);
757*4882a593Smuzhiyun return tape_34xx_erp_succeeded(request);
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun return tape_34xx_erp_bug(device, request, irb, sense[3]);
760*4882a593Smuzhiyun case 0x53:
761*4882a593Smuzhiyun /* Global command intercept. */
762*4882a593Smuzhiyun return tape_34xx_erp_retry(request);
763*4882a593Smuzhiyun case 0x54:
764*4882a593Smuzhiyun /* Channel interface recovery (temporary). */
765*4882a593Smuzhiyun return tape_34xx_erp_retry(request);
766*4882a593Smuzhiyun case 0x55:
767*4882a593Smuzhiyun /* Channel interface recovery (permanent). */
768*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "A channel interface error cannot be"
769*4882a593Smuzhiyun " recovered\n");
770*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
771*4882a593Smuzhiyun case 0x56:
772*4882a593Smuzhiyun /* Channel protocol error. */
773*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "A channel protocol error "
774*4882a593Smuzhiyun "occurred\n");
775*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
776*4882a593Smuzhiyun case 0x57:
777*4882a593Smuzhiyun /*
778*4882a593Smuzhiyun * 3480: Attention intercept.
779*4882a593Smuzhiyun * 3490: Global status intercept.
780*4882a593Smuzhiyun */
781*4882a593Smuzhiyun return tape_34xx_erp_retry(request);
782*4882a593Smuzhiyun case 0x5a:
783*4882a593Smuzhiyun /*
784*4882a593Smuzhiyun * Tape length incompatible. The tape inserted is too long,
785*4882a593Smuzhiyun * which could cause damage to the tape or the drive.
786*4882a593Smuzhiyun */
787*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "The tape unit does not support "
788*4882a593Smuzhiyun "the tape length\n");
789*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
790*4882a593Smuzhiyun case 0x5b:
791*4882a593Smuzhiyun /* Format 3480 XF incompatible */
792*4882a593Smuzhiyun if (sense[1] & SENSE_BEGINNING_OF_TAPE)
793*4882a593Smuzhiyun /* The tape will get overwritten. */
794*4882a593Smuzhiyun return tape_34xx_erp_retry(request);
795*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "The tape unit does not support"
796*4882a593Smuzhiyun " format 3480 XF\n");
797*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
798*4882a593Smuzhiyun case 0x5c:
799*4882a593Smuzhiyun /* Format 3480-2 XF incompatible */
800*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "The tape unit does not support tape "
801*4882a593Smuzhiyun "format 3480-2 XF\n");
802*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EIO);
803*4882a593Smuzhiyun case 0x5d:
804*4882a593Smuzhiyun /* Tape length violation. */
805*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "The tape unit does not support"
806*4882a593Smuzhiyun " the current tape length\n");
807*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
808*4882a593Smuzhiyun case 0x5e:
809*4882a593Smuzhiyun /* Compaction algorithm incompatible. */
810*4882a593Smuzhiyun dev_warn (&device->cdev->dev, "The tape unit does not support"
811*4882a593Smuzhiyun " the compaction algorithm\n");
812*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun /* The following erpas should have been covered earlier. */
815*4882a593Smuzhiyun case 0x23: /* Read data check. */
816*4882a593Smuzhiyun case 0x25: /* Write data check. */
817*4882a593Smuzhiyun case 0x26: /* Data check (read opposite). */
818*4882a593Smuzhiyun case 0x28: /* Write id mark check. */
819*4882a593Smuzhiyun case 0x31: /* Tape void. */
820*4882a593Smuzhiyun case 0x40: /* Overrun error. */
821*4882a593Smuzhiyun case 0x41: /* Record sequence error. */
822*4882a593Smuzhiyun /* All other erpas are reserved for future use. */
823*4882a593Smuzhiyun default:
824*4882a593Smuzhiyun return tape_34xx_erp_bug(device, request, irb, sense[3]);
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun /*
829*4882a593Smuzhiyun * 3480/3490 interrupt handler
830*4882a593Smuzhiyun */
831*4882a593Smuzhiyun static int
tape_34xx_irq(struct tape_device * device,struct tape_request * request,struct irb * irb)832*4882a593Smuzhiyun tape_34xx_irq(struct tape_device *device, struct tape_request *request,
833*4882a593Smuzhiyun struct irb *irb)
834*4882a593Smuzhiyun {
835*4882a593Smuzhiyun if (request == NULL)
836*4882a593Smuzhiyun return tape_34xx_unsolicited_irq(device, irb);
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) &&
839*4882a593Smuzhiyun (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) &&
840*4882a593Smuzhiyun (request->op == TO_WRI)) {
841*4882a593Smuzhiyun /* Write at end of volume */
842*4882a593Smuzhiyun return tape_34xx_erp_failed(request, -ENOSPC);
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
846*4882a593Smuzhiyun return tape_34xx_unit_check(device, request, irb);
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
849*4882a593Smuzhiyun /*
850*4882a593Smuzhiyun * A unit exception occurs on skipping over a tapemark block.
851*4882a593Smuzhiyun */
852*4882a593Smuzhiyun if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
853*4882a593Smuzhiyun if (request->op == TO_BSB || request->op == TO_FSB)
854*4882a593Smuzhiyun request->rescnt++;
855*4882a593Smuzhiyun else
856*4882a593Smuzhiyun DBF_EVENT(5, "Unit Exception!\n");
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun return tape_34xx_done(request);
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun DBF_EVENT(6, "xunknownirq\n");
862*4882a593Smuzhiyun tape_dump_sense_dbf(device, request, irb);
863*4882a593Smuzhiyun return TAPE_IO_STOP;
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun /*
867*4882a593Smuzhiyun * ioctl_overload
868*4882a593Smuzhiyun */
869*4882a593Smuzhiyun static int
tape_34xx_ioctl(struct tape_device * device,unsigned int cmd,unsigned long arg)870*4882a593Smuzhiyun tape_34xx_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
871*4882a593Smuzhiyun {
872*4882a593Smuzhiyun if (cmd == TAPE390_DISPLAY) {
873*4882a593Smuzhiyun struct display_struct disp;
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun if (copy_from_user(&disp, (char __user *) arg, sizeof(disp)) != 0)
876*4882a593Smuzhiyun return -EFAULT;
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun return tape_std_display(device, &disp);
879*4882a593Smuzhiyun } else
880*4882a593Smuzhiyun return -EINVAL;
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun static inline void
tape_34xx_append_new_sbid(struct tape_34xx_block_id bid,struct list_head * l)884*4882a593Smuzhiyun tape_34xx_append_new_sbid(struct tape_34xx_block_id bid, struct list_head *l)
885*4882a593Smuzhiyun {
886*4882a593Smuzhiyun struct tape_34xx_sbid * new_sbid;
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun new_sbid = kmalloc(sizeof(*new_sbid), GFP_ATOMIC);
889*4882a593Smuzhiyun if (!new_sbid)
890*4882a593Smuzhiyun return;
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun new_sbid->bid = bid;
893*4882a593Smuzhiyun list_add(&new_sbid->list, l);
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun /*
897*4882a593Smuzhiyun * Build up the search block ID list. The block ID consists of a logical
898*4882a593Smuzhiyun * block number and a hardware specific part. The hardware specific part
899*4882a593Smuzhiyun * helps the tape drive to speed up searching for a specific block.
900*4882a593Smuzhiyun */
901*4882a593Smuzhiyun static void
tape_34xx_add_sbid(struct tape_device * device,struct tape_34xx_block_id bid)902*4882a593Smuzhiyun tape_34xx_add_sbid(struct tape_device *device, struct tape_34xx_block_id bid)
903*4882a593Smuzhiyun {
904*4882a593Smuzhiyun struct list_head * sbid_list;
905*4882a593Smuzhiyun struct tape_34xx_sbid * sbid;
906*4882a593Smuzhiyun struct list_head * l;
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun /*
909*4882a593Smuzhiyun * immediately return if there is no list at all or the block to add
910*4882a593Smuzhiyun * is located in segment 1 of wrap 0 because this position is used
911*4882a593Smuzhiyun * if no hardware position data is supplied.
912*4882a593Smuzhiyun */
913*4882a593Smuzhiyun sbid_list = (struct list_head *) device->discdata;
914*4882a593Smuzhiyun if (!sbid_list || (bid.segment < 2 && bid.wrap == 0))
915*4882a593Smuzhiyun return;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun /*
918*4882a593Smuzhiyun * Search the position where to insert the new entry. Hardware
919*4882a593Smuzhiyun * acceleration uses only the segment and wrap number. So we
920*4882a593Smuzhiyun * need only one entry for a specific wrap/segment combination.
921*4882a593Smuzhiyun * If there is a block with a lower number but the same hard-
922*4882a593Smuzhiyun * ware position data we just update the block number in the
923*4882a593Smuzhiyun * existing entry.
924*4882a593Smuzhiyun */
925*4882a593Smuzhiyun list_for_each(l, sbid_list) {
926*4882a593Smuzhiyun sbid = list_entry(l, struct tape_34xx_sbid, list);
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun if (
929*4882a593Smuzhiyun (sbid->bid.segment == bid.segment) &&
930*4882a593Smuzhiyun (sbid->bid.wrap == bid.wrap)
931*4882a593Smuzhiyun ) {
932*4882a593Smuzhiyun if (bid.block < sbid->bid.block)
933*4882a593Smuzhiyun sbid->bid = bid;
934*4882a593Smuzhiyun else return;
935*4882a593Smuzhiyun break;
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun /* Sort in according to logical block number. */
939*4882a593Smuzhiyun if (bid.block < sbid->bid.block) {
940*4882a593Smuzhiyun tape_34xx_append_new_sbid(bid, l->prev);
941*4882a593Smuzhiyun break;
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun /* List empty or new block bigger than last entry. */
945*4882a593Smuzhiyun if (l == sbid_list)
946*4882a593Smuzhiyun tape_34xx_append_new_sbid(bid, l->prev);
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun DBF_LH(4, "Current list is:\n");
949*4882a593Smuzhiyun list_for_each(l, sbid_list) {
950*4882a593Smuzhiyun sbid = list_entry(l, struct tape_34xx_sbid, list);
951*4882a593Smuzhiyun DBF_LH(4, "%d:%03d@%05d\n",
952*4882a593Smuzhiyun sbid->bid.wrap,
953*4882a593Smuzhiyun sbid->bid.segment,
954*4882a593Smuzhiyun sbid->bid.block
955*4882a593Smuzhiyun );
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun /*
960*4882a593Smuzhiyun * Delete all entries from the search block ID list that belong to tape blocks
961*4882a593Smuzhiyun * equal or higher than the given number.
962*4882a593Smuzhiyun */
963*4882a593Smuzhiyun static void
tape_34xx_delete_sbid_from(struct tape_device * device,int from)964*4882a593Smuzhiyun tape_34xx_delete_sbid_from(struct tape_device *device, int from)
965*4882a593Smuzhiyun {
966*4882a593Smuzhiyun struct list_head * sbid_list;
967*4882a593Smuzhiyun struct tape_34xx_sbid * sbid;
968*4882a593Smuzhiyun struct list_head * l;
969*4882a593Smuzhiyun struct list_head * n;
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun sbid_list = (struct list_head *) device->discdata;
972*4882a593Smuzhiyun if (!sbid_list)
973*4882a593Smuzhiyun return;
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun list_for_each_safe(l, n, sbid_list) {
976*4882a593Smuzhiyun sbid = list_entry(l, struct tape_34xx_sbid, list);
977*4882a593Smuzhiyun if (sbid->bid.block >= from) {
978*4882a593Smuzhiyun DBF_LH(4, "Delete sbid %d:%03d@%05d\n",
979*4882a593Smuzhiyun sbid->bid.wrap,
980*4882a593Smuzhiyun sbid->bid.segment,
981*4882a593Smuzhiyun sbid->bid.block
982*4882a593Smuzhiyun );
983*4882a593Smuzhiyun list_del(l);
984*4882a593Smuzhiyun kfree(sbid);
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun }
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun /*
990*4882a593Smuzhiyun * Merge hardware position data into a block id.
991*4882a593Smuzhiyun */
992*4882a593Smuzhiyun static void
tape_34xx_merge_sbid(struct tape_device * device,struct tape_34xx_block_id * bid)993*4882a593Smuzhiyun tape_34xx_merge_sbid(
994*4882a593Smuzhiyun struct tape_device * device,
995*4882a593Smuzhiyun struct tape_34xx_block_id * bid
996*4882a593Smuzhiyun ) {
997*4882a593Smuzhiyun struct tape_34xx_sbid * sbid;
998*4882a593Smuzhiyun struct tape_34xx_sbid * sbid_to_use;
999*4882a593Smuzhiyun struct list_head * sbid_list;
1000*4882a593Smuzhiyun struct list_head * l;
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun sbid_list = (struct list_head *) device->discdata;
1003*4882a593Smuzhiyun bid->wrap = 0;
1004*4882a593Smuzhiyun bid->segment = 1;
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun if (!sbid_list || list_empty(sbid_list))
1007*4882a593Smuzhiyun return;
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun sbid_to_use = NULL;
1010*4882a593Smuzhiyun list_for_each(l, sbid_list) {
1011*4882a593Smuzhiyun sbid = list_entry(l, struct tape_34xx_sbid, list);
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun if (sbid->bid.block >= bid->block)
1014*4882a593Smuzhiyun break;
1015*4882a593Smuzhiyun sbid_to_use = sbid;
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun if (sbid_to_use) {
1018*4882a593Smuzhiyun bid->wrap = sbid_to_use->bid.wrap;
1019*4882a593Smuzhiyun bid->segment = sbid_to_use->bid.segment;
1020*4882a593Smuzhiyun DBF_LH(4, "Use %d:%03d@%05d for %05d\n",
1021*4882a593Smuzhiyun sbid_to_use->bid.wrap,
1022*4882a593Smuzhiyun sbid_to_use->bid.segment,
1023*4882a593Smuzhiyun sbid_to_use->bid.block,
1024*4882a593Smuzhiyun bid->block
1025*4882a593Smuzhiyun );
1026*4882a593Smuzhiyun }
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun static int
tape_34xx_setup_device(struct tape_device * device)1030*4882a593Smuzhiyun tape_34xx_setup_device(struct tape_device * device)
1031*4882a593Smuzhiyun {
1032*4882a593Smuzhiyun int rc;
1033*4882a593Smuzhiyun struct list_head * discdata;
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun DBF_EVENT(6, "34xx device setup\n");
1036*4882a593Smuzhiyun if ((rc = tape_std_assign(device)) == 0) {
1037*4882a593Smuzhiyun if ((rc = tape_34xx_medium_sense(device)) != 0) {
1038*4882a593Smuzhiyun DBF_LH(3, "34xx medium sense returned %d\n", rc);
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun }
1041*4882a593Smuzhiyun discdata = kmalloc(sizeof(struct list_head), GFP_KERNEL);
1042*4882a593Smuzhiyun if (discdata) {
1043*4882a593Smuzhiyun INIT_LIST_HEAD(discdata);
1044*4882a593Smuzhiyun device->discdata = discdata;
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun return rc;
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun static void
tape_34xx_cleanup_device(struct tape_device * device)1051*4882a593Smuzhiyun tape_34xx_cleanup_device(struct tape_device *device)
1052*4882a593Smuzhiyun {
1053*4882a593Smuzhiyun tape_std_unassign(device);
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun if (device->discdata) {
1056*4882a593Smuzhiyun tape_34xx_delete_sbid_from(device, 0);
1057*4882a593Smuzhiyun kfree(device->discdata);
1058*4882a593Smuzhiyun device->discdata = NULL;
1059*4882a593Smuzhiyun }
1060*4882a593Smuzhiyun }
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun /*
1064*4882a593Smuzhiyun * MTTELL: Tell block. Return the number of block relative to current file.
1065*4882a593Smuzhiyun */
1066*4882a593Smuzhiyun static int
tape_34xx_mttell(struct tape_device * device,int mt_count)1067*4882a593Smuzhiyun tape_34xx_mttell(struct tape_device *device, int mt_count)
1068*4882a593Smuzhiyun {
1069*4882a593Smuzhiyun struct {
1070*4882a593Smuzhiyun struct tape_34xx_block_id cbid;
1071*4882a593Smuzhiyun struct tape_34xx_block_id dbid;
1072*4882a593Smuzhiyun } __attribute__ ((packed)) block_id;
1073*4882a593Smuzhiyun int rc;
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun rc = tape_std_read_block_id(device, (__u64 *) &block_id);
1076*4882a593Smuzhiyun if (rc)
1077*4882a593Smuzhiyun return rc;
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun tape_34xx_add_sbid(device, block_id.cbid);
1080*4882a593Smuzhiyun return block_id.cbid.block;
1081*4882a593Smuzhiyun }
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun /*
1084*4882a593Smuzhiyun * MTSEEK: seek to the specified block.
1085*4882a593Smuzhiyun */
1086*4882a593Smuzhiyun static int
tape_34xx_mtseek(struct tape_device * device,int mt_count)1087*4882a593Smuzhiyun tape_34xx_mtseek(struct tape_device *device, int mt_count)
1088*4882a593Smuzhiyun {
1089*4882a593Smuzhiyun struct tape_request *request;
1090*4882a593Smuzhiyun struct tape_34xx_block_id * bid;
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun if (mt_count > 0x3fffff) {
1093*4882a593Smuzhiyun DBF_EXCEPTION(6, "xsee parm\n");
1094*4882a593Smuzhiyun return -EINVAL;
1095*4882a593Smuzhiyun }
1096*4882a593Smuzhiyun request = tape_alloc_request(3, 4);
1097*4882a593Smuzhiyun if (IS_ERR(request))
1098*4882a593Smuzhiyun return PTR_ERR(request);
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun /* setup ccws */
1101*4882a593Smuzhiyun request->op = TO_LBL;
1102*4882a593Smuzhiyun bid = (struct tape_34xx_block_id *) request->cpdata;
1103*4882a593Smuzhiyun bid->format = (*device->modeset_byte & 0x08) ?
1104*4882a593Smuzhiyun TAPE34XX_FMT_3480_XF : TAPE34XX_FMT_3480;
1105*4882a593Smuzhiyun bid->block = mt_count;
1106*4882a593Smuzhiyun tape_34xx_merge_sbid(device, bid);
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
1109*4882a593Smuzhiyun tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
1110*4882a593Smuzhiyun tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun /* execute it */
1113*4882a593Smuzhiyun return tape_do_io_free(device, request);
1114*4882a593Smuzhiyun }
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun /*
1117*4882a593Smuzhiyun * List of 3480/3490 magnetic tape commands.
1118*4882a593Smuzhiyun */
1119*4882a593Smuzhiyun static tape_mtop_fn tape_34xx_mtop[TAPE_NR_MTOPS] = {
1120*4882a593Smuzhiyun [MTRESET] = tape_std_mtreset,
1121*4882a593Smuzhiyun [MTFSF] = tape_std_mtfsf,
1122*4882a593Smuzhiyun [MTBSF] = tape_std_mtbsf,
1123*4882a593Smuzhiyun [MTFSR] = tape_std_mtfsr,
1124*4882a593Smuzhiyun [MTBSR] = tape_std_mtbsr,
1125*4882a593Smuzhiyun [MTWEOF] = tape_std_mtweof,
1126*4882a593Smuzhiyun [MTREW] = tape_std_mtrew,
1127*4882a593Smuzhiyun [MTOFFL] = tape_std_mtoffl,
1128*4882a593Smuzhiyun [MTNOP] = tape_std_mtnop,
1129*4882a593Smuzhiyun [MTRETEN] = tape_std_mtreten,
1130*4882a593Smuzhiyun [MTBSFM] = tape_std_mtbsfm,
1131*4882a593Smuzhiyun [MTFSFM] = tape_std_mtfsfm,
1132*4882a593Smuzhiyun [MTEOM] = tape_std_mteom,
1133*4882a593Smuzhiyun [MTERASE] = tape_std_mterase,
1134*4882a593Smuzhiyun [MTRAS1] = NULL,
1135*4882a593Smuzhiyun [MTRAS2] = NULL,
1136*4882a593Smuzhiyun [MTRAS3] = NULL,
1137*4882a593Smuzhiyun [MTSETBLK] = tape_std_mtsetblk,
1138*4882a593Smuzhiyun [MTSETDENSITY] = NULL,
1139*4882a593Smuzhiyun [MTSEEK] = tape_34xx_mtseek,
1140*4882a593Smuzhiyun [MTTELL] = tape_34xx_mttell,
1141*4882a593Smuzhiyun [MTSETDRVBUFFER] = NULL,
1142*4882a593Smuzhiyun [MTFSS] = NULL,
1143*4882a593Smuzhiyun [MTBSS] = NULL,
1144*4882a593Smuzhiyun [MTWSM] = NULL,
1145*4882a593Smuzhiyun [MTLOCK] = NULL,
1146*4882a593Smuzhiyun [MTUNLOCK] = NULL,
1147*4882a593Smuzhiyun [MTLOAD] = tape_std_mtload,
1148*4882a593Smuzhiyun [MTUNLOAD] = tape_std_mtunload,
1149*4882a593Smuzhiyun [MTCOMPRESSION] = tape_std_mtcompression,
1150*4882a593Smuzhiyun [MTSETPART] = NULL,
1151*4882a593Smuzhiyun [MTMKPART] = NULL
1152*4882a593Smuzhiyun };
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun /*
1155*4882a593Smuzhiyun * Tape discipline structure for 3480 and 3490.
1156*4882a593Smuzhiyun */
1157*4882a593Smuzhiyun static struct tape_discipline tape_discipline_34xx = {
1158*4882a593Smuzhiyun .owner = THIS_MODULE,
1159*4882a593Smuzhiyun .setup_device = tape_34xx_setup_device,
1160*4882a593Smuzhiyun .cleanup_device = tape_34xx_cleanup_device,
1161*4882a593Smuzhiyun .process_eov = tape_std_process_eov,
1162*4882a593Smuzhiyun .irq = tape_34xx_irq,
1163*4882a593Smuzhiyun .read_block = tape_std_read_block,
1164*4882a593Smuzhiyun .write_block = tape_std_write_block,
1165*4882a593Smuzhiyun .ioctl_fn = tape_34xx_ioctl,
1166*4882a593Smuzhiyun .mtop_array = tape_34xx_mtop
1167*4882a593Smuzhiyun };
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun static struct ccw_device_id tape_34xx_ids[] = {
1170*4882a593Smuzhiyun { CCW_DEVICE_DEVTYPE(0x3480, 0, 0x3480, 0), .driver_info = tape_3480},
1171*4882a593Smuzhiyun { CCW_DEVICE_DEVTYPE(0x3490, 0, 0x3490, 0), .driver_info = tape_3490},
1172*4882a593Smuzhiyun { /* end of list */ },
1173*4882a593Smuzhiyun };
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun static int
tape_34xx_online(struct ccw_device * cdev)1176*4882a593Smuzhiyun tape_34xx_online(struct ccw_device *cdev)
1177*4882a593Smuzhiyun {
1178*4882a593Smuzhiyun return tape_generic_online(
1179*4882a593Smuzhiyun dev_get_drvdata(&cdev->dev),
1180*4882a593Smuzhiyun &tape_discipline_34xx
1181*4882a593Smuzhiyun );
1182*4882a593Smuzhiyun }
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun static struct ccw_driver tape_34xx_driver = {
1185*4882a593Smuzhiyun .driver = {
1186*4882a593Smuzhiyun .name = "tape_34xx",
1187*4882a593Smuzhiyun .owner = THIS_MODULE,
1188*4882a593Smuzhiyun },
1189*4882a593Smuzhiyun .ids = tape_34xx_ids,
1190*4882a593Smuzhiyun .probe = tape_generic_probe,
1191*4882a593Smuzhiyun .remove = tape_generic_remove,
1192*4882a593Smuzhiyun .set_online = tape_34xx_online,
1193*4882a593Smuzhiyun .set_offline = tape_generic_offline,
1194*4882a593Smuzhiyun .freeze = tape_generic_pm_suspend,
1195*4882a593Smuzhiyun .int_class = IRQIO_TAP,
1196*4882a593Smuzhiyun };
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun static int
tape_34xx_init(void)1199*4882a593Smuzhiyun tape_34xx_init (void)
1200*4882a593Smuzhiyun {
1201*4882a593Smuzhiyun int rc;
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun TAPE_DBF_AREA = debug_register ( "tape_34xx", 2, 2, 4*sizeof(long));
1204*4882a593Smuzhiyun debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
1205*4882a593Smuzhiyun #ifdef DBF_LIKE_HELL
1206*4882a593Smuzhiyun debug_set_level(TAPE_DBF_AREA, 6);
1207*4882a593Smuzhiyun #endif
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun DBF_EVENT(3, "34xx init\n");
1210*4882a593Smuzhiyun /* Register driver for 3480/3490 tapes. */
1211*4882a593Smuzhiyun rc = ccw_driver_register(&tape_34xx_driver);
1212*4882a593Smuzhiyun if (rc)
1213*4882a593Smuzhiyun DBF_EVENT(3, "34xx init failed\n");
1214*4882a593Smuzhiyun else
1215*4882a593Smuzhiyun DBF_EVENT(3, "34xx registered\n");
1216*4882a593Smuzhiyun return rc;
1217*4882a593Smuzhiyun }
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun static void
tape_34xx_exit(void)1220*4882a593Smuzhiyun tape_34xx_exit(void)
1221*4882a593Smuzhiyun {
1222*4882a593Smuzhiyun ccw_driver_unregister(&tape_34xx_driver);
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun debug_unregister(TAPE_DBF_AREA);
1225*4882a593Smuzhiyun }
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun MODULE_DEVICE_TABLE(ccw, tape_34xx_ids);
1228*4882a593Smuzhiyun MODULE_AUTHOR("(C) 2001-2002 IBM Deutschland Entwicklung GmbH");
1229*4882a593Smuzhiyun MODULE_DESCRIPTION("Linux on zSeries channel attached 3480 tape device driver");
1230*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun module_init(tape_34xx_init);
1233*4882a593Smuzhiyun module_exit(tape_34xx_exit);
1234