1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * finite state machine for device handling
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright IBM Corp. 2002, 2008
6*4882a593Smuzhiyun * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
7*4882a593Smuzhiyun * Martin Schwidefsky (schwidefsky@de.ibm.com)
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun #include <linux/init.h>
12*4882a593Smuzhiyun #include <linux/jiffies.h>
13*4882a593Smuzhiyun #include <linux/string.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <asm/ccwdev.h>
16*4882a593Smuzhiyun #include <asm/cio.h>
17*4882a593Smuzhiyun #include <asm/chpid.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include "cio.h"
20*4882a593Smuzhiyun #include "cio_debug.h"
21*4882a593Smuzhiyun #include "css.h"
22*4882a593Smuzhiyun #include "device.h"
23*4882a593Smuzhiyun #include "chsc.h"
24*4882a593Smuzhiyun #include "ioasm.h"
25*4882a593Smuzhiyun #include "chp.h"
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun static int timeout_log_enabled;
28*4882a593Smuzhiyun
ccw_timeout_log_setup(char * unused)29*4882a593Smuzhiyun static int __init ccw_timeout_log_setup(char *unused)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun timeout_log_enabled = 1;
32*4882a593Smuzhiyun return 1;
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun __setup("ccw_timeout_log", ccw_timeout_log_setup);
36*4882a593Smuzhiyun
ccw_timeout_log(struct ccw_device * cdev)37*4882a593Smuzhiyun static void ccw_timeout_log(struct ccw_device *cdev)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun struct schib schib;
40*4882a593Smuzhiyun struct subchannel *sch;
41*4882a593Smuzhiyun struct io_subchannel_private *private;
42*4882a593Smuzhiyun union orb *orb;
43*4882a593Smuzhiyun int cc;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun sch = to_subchannel(cdev->dev.parent);
46*4882a593Smuzhiyun private = to_io_private(sch);
47*4882a593Smuzhiyun orb = &private->orb;
48*4882a593Smuzhiyun cc = stsch(sch->schid, &schib);
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
51*4882a593Smuzhiyun "device information:\n", get_tod_clock());
52*4882a593Smuzhiyun printk(KERN_WARNING "cio: orb:\n");
53*4882a593Smuzhiyun print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
54*4882a593Smuzhiyun orb, sizeof(*orb), 0);
55*4882a593Smuzhiyun printk(KERN_WARNING "cio: ccw device bus id: %s\n",
56*4882a593Smuzhiyun dev_name(&cdev->dev));
57*4882a593Smuzhiyun printk(KERN_WARNING "cio: subchannel bus id: %s\n",
58*4882a593Smuzhiyun dev_name(&sch->dev));
59*4882a593Smuzhiyun printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
60*4882a593Smuzhiyun "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun if (orb->tm.b) {
63*4882a593Smuzhiyun printk(KERN_WARNING "cio: orb indicates transport mode\n");
64*4882a593Smuzhiyun printk(KERN_WARNING "cio: last tcw:\n");
65*4882a593Smuzhiyun print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
66*4882a593Smuzhiyun (void *)(addr_t)orb->tm.tcw,
67*4882a593Smuzhiyun sizeof(struct tcw), 0);
68*4882a593Smuzhiyun } else {
69*4882a593Smuzhiyun printk(KERN_WARNING "cio: orb indicates command mode\n");
70*4882a593Smuzhiyun if ((void *)(addr_t)orb->cmd.cpa ==
71*4882a593Smuzhiyun &private->dma_area->sense_ccw ||
72*4882a593Smuzhiyun (void *)(addr_t)orb->cmd.cpa ==
73*4882a593Smuzhiyun cdev->private->dma_area->iccws)
74*4882a593Smuzhiyun printk(KERN_WARNING "cio: last channel program "
75*4882a593Smuzhiyun "(intern):\n");
76*4882a593Smuzhiyun else
77*4882a593Smuzhiyun printk(KERN_WARNING "cio: last channel program:\n");
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
80*4882a593Smuzhiyun (void *)(addr_t)orb->cmd.cpa,
81*4882a593Smuzhiyun sizeof(struct ccw1), 0);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun printk(KERN_WARNING "cio: ccw device state: %d\n",
84*4882a593Smuzhiyun cdev->private->state);
85*4882a593Smuzhiyun printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
86*4882a593Smuzhiyun printk(KERN_WARNING "cio: schib:\n");
87*4882a593Smuzhiyun print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
88*4882a593Smuzhiyun &schib, sizeof(schib), 0);
89*4882a593Smuzhiyun printk(KERN_WARNING "cio: ccw device flags:\n");
90*4882a593Smuzhiyun print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
91*4882a593Smuzhiyun &cdev->private->flags, sizeof(cdev->private->flags), 0);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /*
95*4882a593Smuzhiyun * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
96*4882a593Smuzhiyun */
97*4882a593Smuzhiyun void
ccw_device_timeout(struct timer_list * t)98*4882a593Smuzhiyun ccw_device_timeout(struct timer_list *t)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun struct ccw_device_private *priv = from_timer(priv, t, timer);
101*4882a593Smuzhiyun struct ccw_device *cdev = priv->cdev;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun spin_lock_irq(cdev->ccwlock);
104*4882a593Smuzhiyun if (timeout_log_enabled)
105*4882a593Smuzhiyun ccw_timeout_log(cdev);
106*4882a593Smuzhiyun dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
107*4882a593Smuzhiyun spin_unlock_irq(cdev->ccwlock);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /*
111*4882a593Smuzhiyun * Set timeout
112*4882a593Smuzhiyun */
113*4882a593Smuzhiyun void
ccw_device_set_timeout(struct ccw_device * cdev,int expires)114*4882a593Smuzhiyun ccw_device_set_timeout(struct ccw_device *cdev, int expires)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun if (expires == 0) {
117*4882a593Smuzhiyun del_timer(&cdev->private->timer);
118*4882a593Smuzhiyun return;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun if (timer_pending(&cdev->private->timer)) {
121*4882a593Smuzhiyun if (mod_timer(&cdev->private->timer, jiffies + expires))
122*4882a593Smuzhiyun return;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun cdev->private->timer.expires = jiffies + expires;
125*4882a593Smuzhiyun add_timer(&cdev->private->timer);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun int
ccw_device_cancel_halt_clear(struct ccw_device * cdev)129*4882a593Smuzhiyun ccw_device_cancel_halt_clear(struct ccw_device *cdev)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun struct subchannel *sch;
132*4882a593Smuzhiyun int ret;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun sch = to_subchannel(cdev->dev.parent);
135*4882a593Smuzhiyun ret = cio_cancel_halt_clear(sch, &cdev->private->iretry);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun if (ret == -EIO)
138*4882a593Smuzhiyun CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n",
139*4882a593Smuzhiyun cdev->private->dev_id.ssid,
140*4882a593Smuzhiyun cdev->private->dev_id.devno);
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun return ret;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
ccw_device_update_sense_data(struct ccw_device * cdev)145*4882a593Smuzhiyun void ccw_device_update_sense_data(struct ccw_device *cdev)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun memset(&cdev->id, 0, sizeof(cdev->id));
148*4882a593Smuzhiyun cdev->id.cu_type = cdev->private->dma_area->senseid.cu_type;
149*4882a593Smuzhiyun cdev->id.cu_model = cdev->private->dma_area->senseid.cu_model;
150*4882a593Smuzhiyun cdev->id.dev_type = cdev->private->dma_area->senseid.dev_type;
151*4882a593Smuzhiyun cdev->id.dev_model = cdev->private->dma_area->senseid.dev_model;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
ccw_device_test_sense_data(struct ccw_device * cdev)154*4882a593Smuzhiyun int ccw_device_test_sense_data(struct ccw_device *cdev)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun return cdev->id.cu_type ==
157*4882a593Smuzhiyun cdev->private->dma_area->senseid.cu_type &&
158*4882a593Smuzhiyun cdev->id.cu_model ==
159*4882a593Smuzhiyun cdev->private->dma_area->senseid.cu_model &&
160*4882a593Smuzhiyun cdev->id.dev_type ==
161*4882a593Smuzhiyun cdev->private->dma_area->senseid.dev_type &&
162*4882a593Smuzhiyun cdev->id.dev_model ==
163*4882a593Smuzhiyun cdev->private->dma_area->senseid.dev_model;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /*
167*4882a593Smuzhiyun * The machine won't give us any notification by machine check if a chpid has
168*4882a593Smuzhiyun * been varied online on the SE so we have to find out by magic (i. e. driving
169*4882a593Smuzhiyun * the channel subsystem to device selection and updating our path masks).
170*4882a593Smuzhiyun */
171*4882a593Smuzhiyun static void
__recover_lost_chpids(struct subchannel * sch,int old_lpm)172*4882a593Smuzhiyun __recover_lost_chpids(struct subchannel *sch, int old_lpm)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun int mask, i;
175*4882a593Smuzhiyun struct chp_id chpid;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun chp_id_init(&chpid);
178*4882a593Smuzhiyun for (i = 0; i<8; i++) {
179*4882a593Smuzhiyun mask = 0x80 >> i;
180*4882a593Smuzhiyun if (!(sch->lpm & mask))
181*4882a593Smuzhiyun continue;
182*4882a593Smuzhiyun if (old_lpm & mask)
183*4882a593Smuzhiyun continue;
184*4882a593Smuzhiyun chpid.id = sch->schib.pmcw.chpid[i];
185*4882a593Smuzhiyun if (!chp_is_registered(chpid))
186*4882a593Smuzhiyun css_schedule_eval_all();
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /*
191*4882a593Smuzhiyun * Stop device recognition.
192*4882a593Smuzhiyun */
193*4882a593Smuzhiyun static void
ccw_device_recog_done(struct ccw_device * cdev,int state)194*4882a593Smuzhiyun ccw_device_recog_done(struct ccw_device *cdev, int state)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun struct subchannel *sch;
197*4882a593Smuzhiyun int old_lpm;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun sch = to_subchannel(cdev->dev.parent);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun if (cio_disable_subchannel(sch))
202*4882a593Smuzhiyun state = DEV_STATE_NOT_OPER;
203*4882a593Smuzhiyun /*
204*4882a593Smuzhiyun * Now that we tried recognition, we have performed device selection
205*4882a593Smuzhiyun * through ssch() and the path information is up to date.
206*4882a593Smuzhiyun */
207*4882a593Smuzhiyun old_lpm = sch->lpm;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /* Check since device may again have become not operational. */
210*4882a593Smuzhiyun if (cio_update_schib(sch))
211*4882a593Smuzhiyun state = DEV_STATE_NOT_OPER;
212*4882a593Smuzhiyun else
213*4882a593Smuzhiyun sch->lpm = sch->schib.pmcw.pam & sch->opm;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
216*4882a593Smuzhiyun /* Force reprobe on all chpids. */
217*4882a593Smuzhiyun old_lpm = 0;
218*4882a593Smuzhiyun if (sch->lpm != old_lpm)
219*4882a593Smuzhiyun __recover_lost_chpids(sch, old_lpm);
220*4882a593Smuzhiyun if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID &&
221*4882a593Smuzhiyun (state == DEV_STATE_NOT_OPER || state == DEV_STATE_BOXED)) {
222*4882a593Smuzhiyun cdev->private->flags.recog_done = 1;
223*4882a593Smuzhiyun cdev->private->state = DEV_STATE_DISCONNECTED;
224*4882a593Smuzhiyun wake_up(&cdev->private->wait_q);
225*4882a593Smuzhiyun return;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun if (cdev->private->flags.resuming) {
228*4882a593Smuzhiyun cdev->private->state = state;
229*4882a593Smuzhiyun cdev->private->flags.recog_done = 1;
230*4882a593Smuzhiyun wake_up(&cdev->private->wait_q);
231*4882a593Smuzhiyun return;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun switch (state) {
234*4882a593Smuzhiyun case DEV_STATE_NOT_OPER:
235*4882a593Smuzhiyun break;
236*4882a593Smuzhiyun case DEV_STATE_OFFLINE:
237*4882a593Smuzhiyun if (!cdev->online) {
238*4882a593Smuzhiyun ccw_device_update_sense_data(cdev);
239*4882a593Smuzhiyun break;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun cdev->private->state = DEV_STATE_OFFLINE;
242*4882a593Smuzhiyun cdev->private->flags.recog_done = 1;
243*4882a593Smuzhiyun if (ccw_device_test_sense_data(cdev)) {
244*4882a593Smuzhiyun cdev->private->flags.donotify = 1;
245*4882a593Smuzhiyun ccw_device_online(cdev);
246*4882a593Smuzhiyun wake_up(&cdev->private->wait_q);
247*4882a593Smuzhiyun } else {
248*4882a593Smuzhiyun ccw_device_update_sense_data(cdev);
249*4882a593Smuzhiyun ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun return;
252*4882a593Smuzhiyun case DEV_STATE_BOXED:
253*4882a593Smuzhiyun if (cdev->id.cu_type != 0) { /* device was recognized before */
254*4882a593Smuzhiyun cdev->private->flags.recog_done = 1;
255*4882a593Smuzhiyun cdev->private->state = DEV_STATE_BOXED;
256*4882a593Smuzhiyun wake_up(&cdev->private->wait_q);
257*4882a593Smuzhiyun return;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun break;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun cdev->private->state = state;
262*4882a593Smuzhiyun io_subchannel_recog_done(cdev);
263*4882a593Smuzhiyun wake_up(&cdev->private->wait_q);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun /*
267*4882a593Smuzhiyun * Function called from device_id.c after sense id has completed.
268*4882a593Smuzhiyun */
269*4882a593Smuzhiyun void
ccw_device_sense_id_done(struct ccw_device * cdev,int err)270*4882a593Smuzhiyun ccw_device_sense_id_done(struct ccw_device *cdev, int err)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun switch (err) {
273*4882a593Smuzhiyun case 0:
274*4882a593Smuzhiyun ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
275*4882a593Smuzhiyun break;
276*4882a593Smuzhiyun case -ETIME: /* Sense id stopped by timeout. */
277*4882a593Smuzhiyun ccw_device_recog_done(cdev, DEV_STATE_BOXED);
278*4882a593Smuzhiyun break;
279*4882a593Smuzhiyun default:
280*4882a593Smuzhiyun ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
281*4882a593Smuzhiyun break;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /**
286*4882a593Smuzhiyun * ccw_device_notify() - inform the device's driver about an event
287*4882a593Smuzhiyun * @cdev: device for which an event occurred
288*4882a593Smuzhiyun * @event: event that occurred
289*4882a593Smuzhiyun *
290*4882a593Smuzhiyun * Returns:
291*4882a593Smuzhiyun * -%EINVAL if the device is offline or has no driver.
292*4882a593Smuzhiyun * -%EOPNOTSUPP if the device's driver has no notifier registered.
293*4882a593Smuzhiyun * %NOTIFY_OK if the driver wants to keep the device.
294*4882a593Smuzhiyun * %NOTIFY_BAD if the driver doesn't want to keep the device.
295*4882a593Smuzhiyun */
ccw_device_notify(struct ccw_device * cdev,int event)296*4882a593Smuzhiyun int ccw_device_notify(struct ccw_device *cdev, int event)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun int ret = -EINVAL;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun if (!cdev->drv)
301*4882a593Smuzhiyun goto out;
302*4882a593Smuzhiyun if (!cdev->online)
303*4882a593Smuzhiyun goto out;
304*4882a593Smuzhiyun CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n",
305*4882a593Smuzhiyun cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
306*4882a593Smuzhiyun event);
307*4882a593Smuzhiyun if (!cdev->drv->notify) {
308*4882a593Smuzhiyun ret = -EOPNOTSUPP;
309*4882a593Smuzhiyun goto out;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun if (cdev->drv->notify(cdev, event))
312*4882a593Smuzhiyun ret = NOTIFY_OK;
313*4882a593Smuzhiyun else
314*4882a593Smuzhiyun ret = NOTIFY_BAD;
315*4882a593Smuzhiyun out:
316*4882a593Smuzhiyun return ret;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
ccw_device_oper_notify(struct ccw_device * cdev)319*4882a593Smuzhiyun static void ccw_device_oper_notify(struct ccw_device *cdev)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(cdev->dev.parent);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
324*4882a593Smuzhiyun /* Reenable channel measurements, if needed. */
325*4882a593Smuzhiyun ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
326*4882a593Smuzhiyun /* Save indication for new paths. */
327*4882a593Smuzhiyun cdev->private->path_new_mask = sch->vpm;
328*4882a593Smuzhiyun return;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun /* Driver doesn't want device back. */
331*4882a593Smuzhiyun ccw_device_set_notoper(cdev);
332*4882a593Smuzhiyun ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun /*
336*4882a593Smuzhiyun * Finished with online/offline processing.
337*4882a593Smuzhiyun */
338*4882a593Smuzhiyun static void
ccw_device_done(struct ccw_device * cdev,int state)339*4882a593Smuzhiyun ccw_device_done(struct ccw_device *cdev, int state)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun struct subchannel *sch;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun sch = to_subchannel(cdev->dev.parent);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun ccw_device_set_timeout(cdev, 0);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (state != DEV_STATE_ONLINE)
348*4882a593Smuzhiyun cio_disable_subchannel(sch);
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /* Reset device status. */
351*4882a593Smuzhiyun memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun cdev->private->state = state;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun switch (state) {
356*4882a593Smuzhiyun case DEV_STATE_BOXED:
357*4882a593Smuzhiyun CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
358*4882a593Smuzhiyun cdev->private->dev_id.devno, sch->schid.sch_no);
359*4882a593Smuzhiyun if (cdev->online &&
360*4882a593Smuzhiyun ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK)
361*4882a593Smuzhiyun ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
362*4882a593Smuzhiyun cdev->private->flags.donotify = 0;
363*4882a593Smuzhiyun break;
364*4882a593Smuzhiyun case DEV_STATE_NOT_OPER:
365*4882a593Smuzhiyun CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
366*4882a593Smuzhiyun cdev->private->dev_id.devno, sch->schid.sch_no);
367*4882a593Smuzhiyun if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
368*4882a593Smuzhiyun ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
369*4882a593Smuzhiyun else
370*4882a593Smuzhiyun ccw_device_set_disconnected(cdev);
371*4882a593Smuzhiyun cdev->private->flags.donotify = 0;
372*4882a593Smuzhiyun break;
373*4882a593Smuzhiyun case DEV_STATE_DISCONNECTED:
374*4882a593Smuzhiyun CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
375*4882a593Smuzhiyun "%04x\n", cdev->private->dev_id.devno,
376*4882a593Smuzhiyun sch->schid.sch_no);
377*4882a593Smuzhiyun if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) {
378*4882a593Smuzhiyun cdev->private->state = DEV_STATE_NOT_OPER;
379*4882a593Smuzhiyun ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
380*4882a593Smuzhiyun } else
381*4882a593Smuzhiyun ccw_device_set_disconnected(cdev);
382*4882a593Smuzhiyun cdev->private->flags.donotify = 0;
383*4882a593Smuzhiyun break;
384*4882a593Smuzhiyun default:
385*4882a593Smuzhiyun break;
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun if (cdev->private->flags.donotify) {
389*4882a593Smuzhiyun cdev->private->flags.donotify = 0;
390*4882a593Smuzhiyun ccw_device_oper_notify(cdev);
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun wake_up(&cdev->private->wait_q);
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /*
396*4882a593Smuzhiyun * Start device recognition.
397*4882a593Smuzhiyun */
ccw_device_recognition(struct ccw_device * cdev)398*4882a593Smuzhiyun void ccw_device_recognition(struct ccw_device *cdev)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(cdev->dev.parent);
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun /*
403*4882a593Smuzhiyun * We used to start here with a sense pgid to find out whether a device
404*4882a593Smuzhiyun * is locked by someone else. Unfortunately, the sense pgid command
405*4882a593Smuzhiyun * code has other meanings on devices predating the path grouping
406*4882a593Smuzhiyun * algorithm, so we start with sense id and box the device after an
407*4882a593Smuzhiyun * timeout (or if sense pgid during path verification detects the device
408*4882a593Smuzhiyun * is locked, as may happen on newer devices).
409*4882a593Smuzhiyun */
410*4882a593Smuzhiyun cdev->private->flags.recog_done = 0;
411*4882a593Smuzhiyun cdev->private->state = DEV_STATE_SENSE_ID;
412*4882a593Smuzhiyun if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) {
413*4882a593Smuzhiyun ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
414*4882a593Smuzhiyun return;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun ccw_device_sense_id_start(cdev);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /*
420*4882a593Smuzhiyun * Handle events for states that use the ccw request infrastructure.
421*4882a593Smuzhiyun */
ccw_device_request_event(struct ccw_device * cdev,enum dev_event e)422*4882a593Smuzhiyun static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun switch (e) {
425*4882a593Smuzhiyun case DEV_EVENT_NOTOPER:
426*4882a593Smuzhiyun ccw_request_notoper(cdev);
427*4882a593Smuzhiyun break;
428*4882a593Smuzhiyun case DEV_EVENT_INTERRUPT:
429*4882a593Smuzhiyun ccw_request_handler(cdev);
430*4882a593Smuzhiyun break;
431*4882a593Smuzhiyun case DEV_EVENT_TIMEOUT:
432*4882a593Smuzhiyun ccw_request_timeout(cdev);
433*4882a593Smuzhiyun break;
434*4882a593Smuzhiyun default:
435*4882a593Smuzhiyun break;
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
ccw_device_report_path_events(struct ccw_device * cdev)439*4882a593Smuzhiyun static void ccw_device_report_path_events(struct ccw_device *cdev)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(cdev->dev.parent);
442*4882a593Smuzhiyun int path_event[8];
443*4882a593Smuzhiyun int chp, mask;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) {
446*4882a593Smuzhiyun path_event[chp] = PE_NONE;
447*4882a593Smuzhiyun if (mask & cdev->private->path_gone_mask & ~(sch->vpm))
448*4882a593Smuzhiyun path_event[chp] |= PE_PATH_GONE;
449*4882a593Smuzhiyun if (mask & cdev->private->path_new_mask & sch->vpm)
450*4882a593Smuzhiyun path_event[chp] |= PE_PATH_AVAILABLE;
451*4882a593Smuzhiyun if (mask & cdev->private->pgid_reset_mask & sch->vpm)
452*4882a593Smuzhiyun path_event[chp] |= PE_PATHGROUP_ESTABLISHED;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun if (cdev->online && cdev->drv->path_event)
455*4882a593Smuzhiyun cdev->drv->path_event(cdev, path_event);
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
ccw_device_reset_path_events(struct ccw_device * cdev)458*4882a593Smuzhiyun static void ccw_device_reset_path_events(struct ccw_device *cdev)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun cdev->private->path_gone_mask = 0;
461*4882a593Smuzhiyun cdev->private->path_new_mask = 0;
462*4882a593Smuzhiyun cdev->private->pgid_reset_mask = 0;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun
create_fake_irb(struct irb * irb,int type)465*4882a593Smuzhiyun static void create_fake_irb(struct irb *irb, int type)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun memset(irb, 0, sizeof(*irb));
468*4882a593Smuzhiyun if (type == FAKE_CMD_IRB) {
469*4882a593Smuzhiyun struct cmd_scsw *scsw = &irb->scsw.cmd;
470*4882a593Smuzhiyun scsw->cc = 1;
471*4882a593Smuzhiyun scsw->fctl = SCSW_FCTL_START_FUNC;
472*4882a593Smuzhiyun scsw->actl = SCSW_ACTL_START_PEND;
473*4882a593Smuzhiyun scsw->stctl = SCSW_STCTL_STATUS_PEND;
474*4882a593Smuzhiyun } else if (type == FAKE_TM_IRB) {
475*4882a593Smuzhiyun struct tm_scsw *scsw = &irb->scsw.tm;
476*4882a593Smuzhiyun scsw->x = 1;
477*4882a593Smuzhiyun scsw->cc = 1;
478*4882a593Smuzhiyun scsw->fctl = SCSW_FCTL_START_FUNC;
479*4882a593Smuzhiyun scsw->actl = SCSW_ACTL_START_PEND;
480*4882a593Smuzhiyun scsw->stctl = SCSW_STCTL_STATUS_PEND;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
ccw_device_handle_broken_paths(struct ccw_device * cdev)484*4882a593Smuzhiyun static void ccw_device_handle_broken_paths(struct ccw_device *cdev)
485*4882a593Smuzhiyun {
486*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(cdev->dev.parent);
487*4882a593Smuzhiyun u8 broken_paths = (sch->schib.pmcw.pam & sch->opm) ^ sch->vpm;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun if (broken_paths && (cdev->private->path_broken_mask != broken_paths))
490*4882a593Smuzhiyun ccw_device_schedule_recovery();
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun cdev->private->path_broken_mask = broken_paths;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
ccw_device_verify_done(struct ccw_device * cdev,int err)495*4882a593Smuzhiyun void ccw_device_verify_done(struct ccw_device *cdev, int err)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun struct subchannel *sch;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun sch = to_subchannel(cdev->dev.parent);
500*4882a593Smuzhiyun /* Update schib - pom may have changed. */
501*4882a593Smuzhiyun if (cio_update_schib(sch)) {
502*4882a593Smuzhiyun err = -ENODEV;
503*4882a593Smuzhiyun goto callback;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun /* Update lpm with verified path mask. */
506*4882a593Smuzhiyun sch->lpm = sch->vpm;
507*4882a593Smuzhiyun /* Repeat path verification? */
508*4882a593Smuzhiyun if (cdev->private->flags.doverify) {
509*4882a593Smuzhiyun ccw_device_verify_start(cdev);
510*4882a593Smuzhiyun return;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun callback:
513*4882a593Smuzhiyun switch (err) {
514*4882a593Smuzhiyun case 0:
515*4882a593Smuzhiyun ccw_device_done(cdev, DEV_STATE_ONLINE);
516*4882a593Smuzhiyun /* Deliver fake irb to device driver, if needed. */
517*4882a593Smuzhiyun if (cdev->private->flags.fake_irb) {
518*4882a593Smuzhiyun create_fake_irb(&cdev->private->dma_area->irb,
519*4882a593Smuzhiyun cdev->private->flags.fake_irb);
520*4882a593Smuzhiyun cdev->private->flags.fake_irb = 0;
521*4882a593Smuzhiyun if (cdev->handler)
522*4882a593Smuzhiyun cdev->handler(cdev, cdev->private->intparm,
523*4882a593Smuzhiyun &cdev->private->dma_area->irb);
524*4882a593Smuzhiyun memset(&cdev->private->dma_area->irb, 0,
525*4882a593Smuzhiyun sizeof(struct irb));
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun ccw_device_report_path_events(cdev);
528*4882a593Smuzhiyun ccw_device_handle_broken_paths(cdev);
529*4882a593Smuzhiyun break;
530*4882a593Smuzhiyun case -ETIME:
531*4882a593Smuzhiyun case -EUSERS:
532*4882a593Smuzhiyun /* Reset oper notify indication after verify error. */
533*4882a593Smuzhiyun cdev->private->flags.donotify = 0;
534*4882a593Smuzhiyun ccw_device_done(cdev, DEV_STATE_BOXED);
535*4882a593Smuzhiyun break;
536*4882a593Smuzhiyun case -EACCES:
537*4882a593Smuzhiyun /* Reset oper notify indication after verify error. */
538*4882a593Smuzhiyun cdev->private->flags.donotify = 0;
539*4882a593Smuzhiyun ccw_device_done(cdev, DEV_STATE_DISCONNECTED);
540*4882a593Smuzhiyun break;
541*4882a593Smuzhiyun default:
542*4882a593Smuzhiyun /* Reset oper notify indication after verify error. */
543*4882a593Smuzhiyun cdev->private->flags.donotify = 0;
544*4882a593Smuzhiyun ccw_device_done(cdev, DEV_STATE_NOT_OPER);
545*4882a593Smuzhiyun break;
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun ccw_device_reset_path_events(cdev);
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun /*
551*4882a593Smuzhiyun * Get device online.
552*4882a593Smuzhiyun */
553*4882a593Smuzhiyun int
ccw_device_online(struct ccw_device * cdev)554*4882a593Smuzhiyun ccw_device_online(struct ccw_device *cdev)
555*4882a593Smuzhiyun {
556*4882a593Smuzhiyun struct subchannel *sch;
557*4882a593Smuzhiyun int ret;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun if ((cdev->private->state != DEV_STATE_OFFLINE) &&
560*4882a593Smuzhiyun (cdev->private->state != DEV_STATE_BOXED))
561*4882a593Smuzhiyun return -EINVAL;
562*4882a593Smuzhiyun sch = to_subchannel(cdev->dev.parent);
563*4882a593Smuzhiyun ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
564*4882a593Smuzhiyun if (ret != 0) {
565*4882a593Smuzhiyun /* Couldn't enable the subchannel for i/o. Sick device. */
566*4882a593Smuzhiyun if (ret == -ENODEV)
567*4882a593Smuzhiyun dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
568*4882a593Smuzhiyun return ret;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun /* Start initial path verification. */
571*4882a593Smuzhiyun cdev->private->state = DEV_STATE_VERIFY;
572*4882a593Smuzhiyun ccw_device_verify_start(cdev);
573*4882a593Smuzhiyun return 0;
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun void
ccw_device_disband_done(struct ccw_device * cdev,int err)577*4882a593Smuzhiyun ccw_device_disband_done(struct ccw_device *cdev, int err)
578*4882a593Smuzhiyun {
579*4882a593Smuzhiyun switch (err) {
580*4882a593Smuzhiyun case 0:
581*4882a593Smuzhiyun ccw_device_done(cdev, DEV_STATE_OFFLINE);
582*4882a593Smuzhiyun break;
583*4882a593Smuzhiyun case -ETIME:
584*4882a593Smuzhiyun ccw_device_done(cdev, DEV_STATE_BOXED);
585*4882a593Smuzhiyun break;
586*4882a593Smuzhiyun default:
587*4882a593Smuzhiyun cdev->private->flags.donotify = 0;
588*4882a593Smuzhiyun ccw_device_done(cdev, DEV_STATE_NOT_OPER);
589*4882a593Smuzhiyun break;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun /*
594*4882a593Smuzhiyun * Shutdown device.
595*4882a593Smuzhiyun */
596*4882a593Smuzhiyun int
ccw_device_offline(struct ccw_device * cdev)597*4882a593Smuzhiyun ccw_device_offline(struct ccw_device *cdev)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun struct subchannel *sch;
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun /* Allow ccw_device_offline while disconnected. */
602*4882a593Smuzhiyun if (cdev->private->state == DEV_STATE_DISCONNECTED ||
603*4882a593Smuzhiyun cdev->private->state == DEV_STATE_NOT_OPER) {
604*4882a593Smuzhiyun cdev->private->flags.donotify = 0;
605*4882a593Smuzhiyun ccw_device_done(cdev, DEV_STATE_NOT_OPER);
606*4882a593Smuzhiyun return 0;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun if (cdev->private->state == DEV_STATE_BOXED) {
609*4882a593Smuzhiyun ccw_device_done(cdev, DEV_STATE_BOXED);
610*4882a593Smuzhiyun return 0;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun if (ccw_device_is_orphan(cdev)) {
613*4882a593Smuzhiyun ccw_device_done(cdev, DEV_STATE_OFFLINE);
614*4882a593Smuzhiyun return 0;
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun sch = to_subchannel(cdev->dev.parent);
617*4882a593Smuzhiyun if (cio_update_schib(sch))
618*4882a593Smuzhiyun return -ENODEV;
619*4882a593Smuzhiyun if (scsw_actl(&sch->schib.scsw) != 0)
620*4882a593Smuzhiyun return -EBUSY;
621*4882a593Smuzhiyun if (cdev->private->state != DEV_STATE_ONLINE)
622*4882a593Smuzhiyun return -EINVAL;
623*4882a593Smuzhiyun /* Are we doing path grouping? */
624*4882a593Smuzhiyun if (!cdev->private->flags.pgroup) {
625*4882a593Smuzhiyun /* No, set state offline immediately. */
626*4882a593Smuzhiyun ccw_device_done(cdev, DEV_STATE_OFFLINE);
627*4882a593Smuzhiyun return 0;
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun /* Start Set Path Group commands. */
630*4882a593Smuzhiyun cdev->private->state = DEV_STATE_DISBAND_PGID;
631*4882a593Smuzhiyun ccw_device_disband_start(cdev);
632*4882a593Smuzhiyun return 0;
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun /*
636*4882a593Smuzhiyun * Handle not operational event in non-special state.
637*4882a593Smuzhiyun */
ccw_device_generic_notoper(struct ccw_device * cdev,enum dev_event dev_event)638*4882a593Smuzhiyun static void ccw_device_generic_notoper(struct ccw_device *cdev,
639*4882a593Smuzhiyun enum dev_event dev_event)
640*4882a593Smuzhiyun {
641*4882a593Smuzhiyun if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
642*4882a593Smuzhiyun ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
643*4882a593Smuzhiyun else
644*4882a593Smuzhiyun ccw_device_set_disconnected(cdev);
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun /*
648*4882a593Smuzhiyun * Handle path verification event in offline state.
649*4882a593Smuzhiyun */
ccw_device_offline_verify(struct ccw_device * cdev,enum dev_event dev_event)650*4882a593Smuzhiyun static void ccw_device_offline_verify(struct ccw_device *cdev,
651*4882a593Smuzhiyun enum dev_event dev_event)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(cdev->dev.parent);
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun css_schedule_eval(sch->schid);
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun /*
659*4882a593Smuzhiyun * Handle path verification event.
660*4882a593Smuzhiyun */
661*4882a593Smuzhiyun static void
ccw_device_online_verify(struct ccw_device * cdev,enum dev_event dev_event)662*4882a593Smuzhiyun ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
663*4882a593Smuzhiyun {
664*4882a593Smuzhiyun struct subchannel *sch;
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun if (cdev->private->state == DEV_STATE_W4SENSE) {
667*4882a593Smuzhiyun cdev->private->flags.doverify = 1;
668*4882a593Smuzhiyun return;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun sch = to_subchannel(cdev->dev.parent);
671*4882a593Smuzhiyun /*
672*4882a593Smuzhiyun * Since we might not just be coming from an interrupt from the
673*4882a593Smuzhiyun * subchannel we have to update the schib.
674*4882a593Smuzhiyun */
675*4882a593Smuzhiyun if (cio_update_schib(sch)) {
676*4882a593Smuzhiyun ccw_device_verify_done(cdev, -ENODEV);
677*4882a593Smuzhiyun return;
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun if (scsw_actl(&sch->schib.scsw) != 0 ||
681*4882a593Smuzhiyun (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
682*4882a593Smuzhiyun (scsw_stctl(&cdev->private->dma_area->irb.scsw) &
683*4882a593Smuzhiyun SCSW_STCTL_STATUS_PEND)) {
684*4882a593Smuzhiyun /*
685*4882a593Smuzhiyun * No final status yet or final status not yet delivered
686*4882a593Smuzhiyun * to the device driver. Can't do path verification now,
687*4882a593Smuzhiyun * delay until final status was delivered.
688*4882a593Smuzhiyun */
689*4882a593Smuzhiyun cdev->private->flags.doverify = 1;
690*4882a593Smuzhiyun return;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun /* Device is idle, we can do the path verification. */
693*4882a593Smuzhiyun cdev->private->state = DEV_STATE_VERIFY;
694*4882a593Smuzhiyun ccw_device_verify_start(cdev);
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun /*
698*4882a593Smuzhiyun * Handle path verification event in boxed state.
699*4882a593Smuzhiyun */
ccw_device_boxed_verify(struct ccw_device * cdev,enum dev_event dev_event)700*4882a593Smuzhiyun static void ccw_device_boxed_verify(struct ccw_device *cdev,
701*4882a593Smuzhiyun enum dev_event dev_event)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(cdev->dev.parent);
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun if (cdev->online) {
706*4882a593Smuzhiyun if (cio_enable_subchannel(sch, (u32) (addr_t) sch))
707*4882a593Smuzhiyun ccw_device_done(cdev, DEV_STATE_NOT_OPER);
708*4882a593Smuzhiyun else
709*4882a593Smuzhiyun ccw_device_online_verify(cdev, dev_event);
710*4882a593Smuzhiyun } else
711*4882a593Smuzhiyun css_schedule_eval(sch->schid);
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun /*
715*4882a593Smuzhiyun * Pass interrupt to device driver.
716*4882a593Smuzhiyun */
ccw_device_call_handler(struct ccw_device * cdev)717*4882a593Smuzhiyun static int ccw_device_call_handler(struct ccw_device *cdev)
718*4882a593Smuzhiyun {
719*4882a593Smuzhiyun unsigned int stctl;
720*4882a593Smuzhiyun int ending_status;
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun /*
723*4882a593Smuzhiyun * we allow for the device action handler if .
724*4882a593Smuzhiyun * - we received ending status
725*4882a593Smuzhiyun * - the action handler requested to see all interrupts
726*4882a593Smuzhiyun * - we received an intermediate status
727*4882a593Smuzhiyun * - fast notification was requested (primary status)
728*4882a593Smuzhiyun * - unsolicited interrupts
729*4882a593Smuzhiyun */
730*4882a593Smuzhiyun stctl = scsw_stctl(&cdev->private->dma_area->irb.scsw);
731*4882a593Smuzhiyun ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
732*4882a593Smuzhiyun (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
733*4882a593Smuzhiyun (stctl == SCSW_STCTL_STATUS_PEND);
734*4882a593Smuzhiyun if (!ending_status &&
735*4882a593Smuzhiyun !cdev->private->options.repall &&
736*4882a593Smuzhiyun !(stctl & SCSW_STCTL_INTER_STATUS) &&
737*4882a593Smuzhiyun !(cdev->private->options.fast &&
738*4882a593Smuzhiyun (stctl & SCSW_STCTL_PRIM_STATUS)))
739*4882a593Smuzhiyun return 0;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun if (ending_status)
742*4882a593Smuzhiyun ccw_device_set_timeout(cdev, 0);
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun if (cdev->handler)
745*4882a593Smuzhiyun cdev->handler(cdev, cdev->private->intparm,
746*4882a593Smuzhiyun &cdev->private->dma_area->irb);
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
749*4882a593Smuzhiyun return 1;
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun /*
753*4882a593Smuzhiyun * Got an interrupt for a normal io (state online).
754*4882a593Smuzhiyun */
755*4882a593Smuzhiyun static void
ccw_device_irq(struct ccw_device * cdev,enum dev_event dev_event)756*4882a593Smuzhiyun ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
757*4882a593Smuzhiyun {
758*4882a593Smuzhiyun struct irb *irb;
759*4882a593Smuzhiyun int is_cmd;
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun irb = this_cpu_ptr(&cio_irb);
762*4882a593Smuzhiyun is_cmd = !scsw_is_tm(&irb->scsw);
763*4882a593Smuzhiyun /* Check for unsolicited interrupt. */
764*4882a593Smuzhiyun if (!scsw_is_solicited(&irb->scsw)) {
765*4882a593Smuzhiyun if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
766*4882a593Smuzhiyun !irb->esw.esw0.erw.cons) {
767*4882a593Smuzhiyun /* Unit check but no sense data. Need basic sense. */
768*4882a593Smuzhiyun if (ccw_device_do_sense(cdev, irb) != 0)
769*4882a593Smuzhiyun goto call_handler_unsol;
770*4882a593Smuzhiyun memcpy(&cdev->private->dma_area->irb, irb,
771*4882a593Smuzhiyun sizeof(struct irb));
772*4882a593Smuzhiyun cdev->private->state = DEV_STATE_W4SENSE;
773*4882a593Smuzhiyun cdev->private->intparm = 0;
774*4882a593Smuzhiyun return;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun call_handler_unsol:
777*4882a593Smuzhiyun if (cdev->handler)
778*4882a593Smuzhiyun cdev->handler (cdev, 0, irb);
779*4882a593Smuzhiyun if (cdev->private->flags.doverify)
780*4882a593Smuzhiyun ccw_device_online_verify(cdev, 0);
781*4882a593Smuzhiyun return;
782*4882a593Smuzhiyun }
783*4882a593Smuzhiyun /* Accumulate status and find out if a basic sense is needed. */
784*4882a593Smuzhiyun ccw_device_accumulate_irb(cdev, irb);
785*4882a593Smuzhiyun if (is_cmd && cdev->private->flags.dosense) {
786*4882a593Smuzhiyun if (ccw_device_do_sense(cdev, irb) == 0) {
787*4882a593Smuzhiyun cdev->private->state = DEV_STATE_W4SENSE;
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun return;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun /* Call the handler. */
792*4882a593Smuzhiyun if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
793*4882a593Smuzhiyun /* Start delayed path verification. */
794*4882a593Smuzhiyun ccw_device_online_verify(cdev, 0);
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun /*
798*4882a593Smuzhiyun * Got an timeout in online state.
799*4882a593Smuzhiyun */
800*4882a593Smuzhiyun static void
ccw_device_online_timeout(struct ccw_device * cdev,enum dev_event dev_event)801*4882a593Smuzhiyun ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun int ret;
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun ccw_device_set_timeout(cdev, 0);
806*4882a593Smuzhiyun cdev->private->iretry = 255;
807*4882a593Smuzhiyun cdev->private->async_kill_io_rc = -ETIMEDOUT;
808*4882a593Smuzhiyun ret = ccw_device_cancel_halt_clear(cdev);
809*4882a593Smuzhiyun if (ret == -EBUSY) {
810*4882a593Smuzhiyun ccw_device_set_timeout(cdev, 3*HZ);
811*4882a593Smuzhiyun cdev->private->state = DEV_STATE_TIMEOUT_KILL;
812*4882a593Smuzhiyun return;
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun if (ret)
815*4882a593Smuzhiyun dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
816*4882a593Smuzhiyun else if (cdev->handler)
817*4882a593Smuzhiyun cdev->handler(cdev, cdev->private->intparm,
818*4882a593Smuzhiyun ERR_PTR(-ETIMEDOUT));
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun /*
822*4882a593Smuzhiyun * Got an interrupt for a basic sense.
823*4882a593Smuzhiyun */
824*4882a593Smuzhiyun static void
ccw_device_w4sense(struct ccw_device * cdev,enum dev_event dev_event)825*4882a593Smuzhiyun ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
826*4882a593Smuzhiyun {
827*4882a593Smuzhiyun struct irb *irb;
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun irb = this_cpu_ptr(&cio_irb);
830*4882a593Smuzhiyun /* Check for unsolicited interrupt. */
831*4882a593Smuzhiyun if (scsw_stctl(&irb->scsw) ==
832*4882a593Smuzhiyun (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
833*4882a593Smuzhiyun if (scsw_cc(&irb->scsw) == 1)
834*4882a593Smuzhiyun /* Basic sense hasn't started. Try again. */
835*4882a593Smuzhiyun ccw_device_do_sense(cdev, irb);
836*4882a593Smuzhiyun else {
837*4882a593Smuzhiyun CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited "
838*4882a593Smuzhiyun "interrupt during w4sense...\n",
839*4882a593Smuzhiyun cdev->private->dev_id.ssid,
840*4882a593Smuzhiyun cdev->private->dev_id.devno);
841*4882a593Smuzhiyun if (cdev->handler)
842*4882a593Smuzhiyun cdev->handler (cdev, 0, irb);
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun return;
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun /*
847*4882a593Smuzhiyun * Check if a halt or clear has been issued in the meanwhile. If yes,
848*4882a593Smuzhiyun * only deliver the halt/clear interrupt to the device driver as if it
849*4882a593Smuzhiyun * had killed the original request.
850*4882a593Smuzhiyun */
851*4882a593Smuzhiyun if (scsw_fctl(&irb->scsw) &
852*4882a593Smuzhiyun (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
853*4882a593Smuzhiyun cdev->private->flags.dosense = 0;
854*4882a593Smuzhiyun memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
855*4882a593Smuzhiyun ccw_device_accumulate_irb(cdev, irb);
856*4882a593Smuzhiyun goto call_handler;
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun /* Add basic sense info to irb. */
859*4882a593Smuzhiyun ccw_device_accumulate_basic_sense(cdev, irb);
860*4882a593Smuzhiyun if (cdev->private->flags.dosense) {
861*4882a593Smuzhiyun /* Another basic sense is needed. */
862*4882a593Smuzhiyun ccw_device_do_sense(cdev, irb);
863*4882a593Smuzhiyun return;
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun call_handler:
866*4882a593Smuzhiyun cdev->private->state = DEV_STATE_ONLINE;
867*4882a593Smuzhiyun /* In case sensing interfered with setting the device online */
868*4882a593Smuzhiyun wake_up(&cdev->private->wait_q);
869*4882a593Smuzhiyun /* Call the handler. */
870*4882a593Smuzhiyun if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
871*4882a593Smuzhiyun /* Start delayed path verification. */
872*4882a593Smuzhiyun ccw_device_online_verify(cdev, 0);
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun static void
ccw_device_killing_irq(struct ccw_device * cdev,enum dev_event dev_event)876*4882a593Smuzhiyun ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
877*4882a593Smuzhiyun {
878*4882a593Smuzhiyun ccw_device_set_timeout(cdev, 0);
879*4882a593Smuzhiyun /* Start delayed path verification. */
880*4882a593Smuzhiyun ccw_device_online_verify(cdev, 0);
881*4882a593Smuzhiyun /* OK, i/o is dead now. Call interrupt handler. */
882*4882a593Smuzhiyun if (cdev->handler)
883*4882a593Smuzhiyun cdev->handler(cdev, cdev->private->intparm,
884*4882a593Smuzhiyun ERR_PTR(cdev->private->async_kill_io_rc));
885*4882a593Smuzhiyun }
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun static void
ccw_device_killing_timeout(struct ccw_device * cdev,enum dev_event dev_event)888*4882a593Smuzhiyun ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
889*4882a593Smuzhiyun {
890*4882a593Smuzhiyun int ret;
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun ret = ccw_device_cancel_halt_clear(cdev);
893*4882a593Smuzhiyun if (ret == -EBUSY) {
894*4882a593Smuzhiyun ccw_device_set_timeout(cdev, 3*HZ);
895*4882a593Smuzhiyun return;
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun /* Start delayed path verification. */
898*4882a593Smuzhiyun ccw_device_online_verify(cdev, 0);
899*4882a593Smuzhiyun if (cdev->handler)
900*4882a593Smuzhiyun cdev->handler(cdev, cdev->private->intparm,
901*4882a593Smuzhiyun ERR_PTR(cdev->private->async_kill_io_rc));
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun
ccw_device_kill_io(struct ccw_device * cdev)904*4882a593Smuzhiyun void ccw_device_kill_io(struct ccw_device *cdev)
905*4882a593Smuzhiyun {
906*4882a593Smuzhiyun int ret;
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun ccw_device_set_timeout(cdev, 0);
909*4882a593Smuzhiyun cdev->private->iretry = 255;
910*4882a593Smuzhiyun cdev->private->async_kill_io_rc = -EIO;
911*4882a593Smuzhiyun ret = ccw_device_cancel_halt_clear(cdev);
912*4882a593Smuzhiyun if (ret == -EBUSY) {
913*4882a593Smuzhiyun ccw_device_set_timeout(cdev, 3*HZ);
914*4882a593Smuzhiyun cdev->private->state = DEV_STATE_TIMEOUT_KILL;
915*4882a593Smuzhiyun return;
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun /* Start delayed path verification. */
918*4882a593Smuzhiyun ccw_device_online_verify(cdev, 0);
919*4882a593Smuzhiyun if (cdev->handler)
920*4882a593Smuzhiyun cdev->handler(cdev, cdev->private->intparm,
921*4882a593Smuzhiyun ERR_PTR(-EIO));
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun static void
ccw_device_delay_verify(struct ccw_device * cdev,enum dev_event dev_event)925*4882a593Smuzhiyun ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
926*4882a593Smuzhiyun {
927*4882a593Smuzhiyun /* Start verification after current task finished. */
928*4882a593Smuzhiyun cdev->private->flags.doverify = 1;
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun static void
ccw_device_start_id(struct ccw_device * cdev,enum dev_event dev_event)932*4882a593Smuzhiyun ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
933*4882a593Smuzhiyun {
934*4882a593Smuzhiyun struct subchannel *sch;
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun sch = to_subchannel(cdev->dev.parent);
937*4882a593Smuzhiyun if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0)
938*4882a593Smuzhiyun /* Couldn't enable the subchannel for i/o. Sick device. */
939*4882a593Smuzhiyun return;
940*4882a593Smuzhiyun cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
941*4882a593Smuzhiyun ccw_device_sense_id_start(cdev);
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun
ccw_device_trigger_reprobe(struct ccw_device * cdev)944*4882a593Smuzhiyun void ccw_device_trigger_reprobe(struct ccw_device *cdev)
945*4882a593Smuzhiyun {
946*4882a593Smuzhiyun struct subchannel *sch;
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun if (cdev->private->state != DEV_STATE_DISCONNECTED)
949*4882a593Smuzhiyun return;
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun sch = to_subchannel(cdev->dev.parent);
952*4882a593Smuzhiyun /* Update some values. */
953*4882a593Smuzhiyun if (cio_update_schib(sch))
954*4882a593Smuzhiyun return;
955*4882a593Smuzhiyun /*
956*4882a593Smuzhiyun * The pim, pam, pom values may not be accurate, but they are the best
957*4882a593Smuzhiyun * we have before performing device selection :/
958*4882a593Smuzhiyun */
959*4882a593Smuzhiyun sch->lpm = sch->schib.pmcw.pam & sch->opm;
960*4882a593Smuzhiyun /*
961*4882a593Smuzhiyun * Use the initial configuration since we can't be shure that the old
962*4882a593Smuzhiyun * paths are valid.
963*4882a593Smuzhiyun */
964*4882a593Smuzhiyun io_subchannel_init_config(sch);
965*4882a593Smuzhiyun if (cio_commit_config(sch))
966*4882a593Smuzhiyun return;
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun /* We should also udate ssd info, but this has to wait. */
969*4882a593Smuzhiyun /* Check if this is another device which appeared on the same sch. */
970*4882a593Smuzhiyun if (sch->schib.pmcw.dev != cdev->private->dev_id.devno)
971*4882a593Smuzhiyun css_schedule_eval(sch->schid);
972*4882a593Smuzhiyun else
973*4882a593Smuzhiyun ccw_device_start_id(cdev, 0);
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun
ccw_device_disabled_irq(struct ccw_device * cdev,enum dev_event dev_event)976*4882a593Smuzhiyun static void ccw_device_disabled_irq(struct ccw_device *cdev,
977*4882a593Smuzhiyun enum dev_event dev_event)
978*4882a593Smuzhiyun {
979*4882a593Smuzhiyun struct subchannel *sch;
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun sch = to_subchannel(cdev->dev.parent);
982*4882a593Smuzhiyun /*
983*4882a593Smuzhiyun * An interrupt in a disabled state means a previous disable was not
984*4882a593Smuzhiyun * successful - should not happen, but we try to disable again.
985*4882a593Smuzhiyun */
986*4882a593Smuzhiyun cio_disable_subchannel(sch);
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun static void
ccw_device_change_cmfstate(struct ccw_device * cdev,enum dev_event dev_event)990*4882a593Smuzhiyun ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun retry_set_schib(cdev);
993*4882a593Smuzhiyun cdev->private->state = DEV_STATE_ONLINE;
994*4882a593Smuzhiyun dev_fsm_event(cdev, dev_event);
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun
ccw_device_update_cmfblock(struct ccw_device * cdev,enum dev_event dev_event)997*4882a593Smuzhiyun static void ccw_device_update_cmfblock(struct ccw_device *cdev,
998*4882a593Smuzhiyun enum dev_event dev_event)
999*4882a593Smuzhiyun {
1000*4882a593Smuzhiyun cmf_retry_copy_block(cdev);
1001*4882a593Smuzhiyun cdev->private->state = DEV_STATE_ONLINE;
1002*4882a593Smuzhiyun dev_fsm_event(cdev, dev_event);
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun static void
ccw_device_quiesce_done(struct ccw_device * cdev,enum dev_event dev_event)1006*4882a593Smuzhiyun ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1007*4882a593Smuzhiyun {
1008*4882a593Smuzhiyun ccw_device_set_timeout(cdev, 0);
1009*4882a593Smuzhiyun cdev->private->state = DEV_STATE_NOT_OPER;
1010*4882a593Smuzhiyun wake_up(&cdev->private->wait_q);
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun static void
ccw_device_quiesce_timeout(struct ccw_device * cdev,enum dev_event dev_event)1014*4882a593Smuzhiyun ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1015*4882a593Smuzhiyun {
1016*4882a593Smuzhiyun int ret;
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun ret = ccw_device_cancel_halt_clear(cdev);
1019*4882a593Smuzhiyun if (ret == -EBUSY) {
1020*4882a593Smuzhiyun ccw_device_set_timeout(cdev, HZ/10);
1021*4882a593Smuzhiyun } else {
1022*4882a593Smuzhiyun cdev->private->state = DEV_STATE_NOT_OPER;
1023*4882a593Smuzhiyun wake_up(&cdev->private->wait_q);
1024*4882a593Smuzhiyun }
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun /*
1028*4882a593Smuzhiyun * No operation action. This is used e.g. to ignore a timeout event in
1029*4882a593Smuzhiyun * state offline.
1030*4882a593Smuzhiyun */
1031*4882a593Smuzhiyun static void
ccw_device_nop(struct ccw_device * cdev,enum dev_event dev_event)1032*4882a593Smuzhiyun ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1033*4882a593Smuzhiyun {
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun /*
1037*4882a593Smuzhiyun * device statemachine
1038*4882a593Smuzhiyun */
1039*4882a593Smuzhiyun fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1040*4882a593Smuzhiyun [DEV_STATE_NOT_OPER] = {
1041*4882a593Smuzhiyun [DEV_EVENT_NOTOPER] = ccw_device_nop,
1042*4882a593Smuzhiyun [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
1043*4882a593Smuzhiyun [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1044*4882a593Smuzhiyun [DEV_EVENT_VERIFY] = ccw_device_nop,
1045*4882a593Smuzhiyun },
1046*4882a593Smuzhiyun [DEV_STATE_SENSE_ID] = {
1047*4882a593Smuzhiyun [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1048*4882a593Smuzhiyun [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1049*4882a593Smuzhiyun [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1050*4882a593Smuzhiyun [DEV_EVENT_VERIFY] = ccw_device_nop,
1051*4882a593Smuzhiyun },
1052*4882a593Smuzhiyun [DEV_STATE_OFFLINE] = {
1053*4882a593Smuzhiyun [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1054*4882a593Smuzhiyun [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
1055*4882a593Smuzhiyun [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1056*4882a593Smuzhiyun [DEV_EVENT_VERIFY] = ccw_device_offline_verify,
1057*4882a593Smuzhiyun },
1058*4882a593Smuzhiyun [DEV_STATE_VERIFY] = {
1059*4882a593Smuzhiyun [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1060*4882a593Smuzhiyun [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1061*4882a593Smuzhiyun [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1062*4882a593Smuzhiyun [DEV_EVENT_VERIFY] = ccw_device_delay_verify,
1063*4882a593Smuzhiyun },
1064*4882a593Smuzhiyun [DEV_STATE_ONLINE] = {
1065*4882a593Smuzhiyun [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1066*4882a593Smuzhiyun [DEV_EVENT_INTERRUPT] = ccw_device_irq,
1067*4882a593Smuzhiyun [DEV_EVENT_TIMEOUT] = ccw_device_online_timeout,
1068*4882a593Smuzhiyun [DEV_EVENT_VERIFY] = ccw_device_online_verify,
1069*4882a593Smuzhiyun },
1070*4882a593Smuzhiyun [DEV_STATE_W4SENSE] = {
1071*4882a593Smuzhiyun [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1072*4882a593Smuzhiyun [DEV_EVENT_INTERRUPT] = ccw_device_w4sense,
1073*4882a593Smuzhiyun [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1074*4882a593Smuzhiyun [DEV_EVENT_VERIFY] = ccw_device_online_verify,
1075*4882a593Smuzhiyun },
1076*4882a593Smuzhiyun [DEV_STATE_DISBAND_PGID] = {
1077*4882a593Smuzhiyun [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1078*4882a593Smuzhiyun [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1079*4882a593Smuzhiyun [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1080*4882a593Smuzhiyun [DEV_EVENT_VERIFY] = ccw_device_nop,
1081*4882a593Smuzhiyun },
1082*4882a593Smuzhiyun [DEV_STATE_BOXED] = {
1083*4882a593Smuzhiyun [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1084*4882a593Smuzhiyun [DEV_EVENT_INTERRUPT] = ccw_device_nop,
1085*4882a593Smuzhiyun [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1086*4882a593Smuzhiyun [DEV_EVENT_VERIFY] = ccw_device_boxed_verify,
1087*4882a593Smuzhiyun },
1088*4882a593Smuzhiyun /* states to wait for i/o completion before doing something */
1089*4882a593Smuzhiyun [DEV_STATE_TIMEOUT_KILL] = {
1090*4882a593Smuzhiyun [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1091*4882a593Smuzhiyun [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq,
1092*4882a593Smuzhiyun [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout,
1093*4882a593Smuzhiyun [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME
1094*4882a593Smuzhiyun },
1095*4882a593Smuzhiyun [DEV_STATE_QUIESCE] = {
1096*4882a593Smuzhiyun [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done,
1097*4882a593Smuzhiyun [DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done,
1098*4882a593Smuzhiyun [DEV_EVENT_TIMEOUT] = ccw_device_quiesce_timeout,
1099*4882a593Smuzhiyun [DEV_EVENT_VERIFY] = ccw_device_nop,
1100*4882a593Smuzhiyun },
1101*4882a593Smuzhiyun /* special states for devices gone not operational */
1102*4882a593Smuzhiyun [DEV_STATE_DISCONNECTED] = {
1103*4882a593Smuzhiyun [DEV_EVENT_NOTOPER] = ccw_device_nop,
1104*4882a593Smuzhiyun [DEV_EVENT_INTERRUPT] = ccw_device_start_id,
1105*4882a593Smuzhiyun [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1106*4882a593Smuzhiyun [DEV_EVENT_VERIFY] = ccw_device_start_id,
1107*4882a593Smuzhiyun },
1108*4882a593Smuzhiyun [DEV_STATE_DISCONNECTED_SENSE_ID] = {
1109*4882a593Smuzhiyun [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1110*4882a593Smuzhiyun [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1111*4882a593Smuzhiyun [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1112*4882a593Smuzhiyun [DEV_EVENT_VERIFY] = ccw_device_nop,
1113*4882a593Smuzhiyun },
1114*4882a593Smuzhiyun [DEV_STATE_CMFCHANGE] = {
1115*4882a593Smuzhiyun [DEV_EVENT_NOTOPER] = ccw_device_change_cmfstate,
1116*4882a593Smuzhiyun [DEV_EVENT_INTERRUPT] = ccw_device_change_cmfstate,
1117*4882a593Smuzhiyun [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate,
1118*4882a593Smuzhiyun [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate,
1119*4882a593Smuzhiyun },
1120*4882a593Smuzhiyun [DEV_STATE_CMFUPDATE] = {
1121*4882a593Smuzhiyun [DEV_EVENT_NOTOPER] = ccw_device_update_cmfblock,
1122*4882a593Smuzhiyun [DEV_EVENT_INTERRUPT] = ccw_device_update_cmfblock,
1123*4882a593Smuzhiyun [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock,
1124*4882a593Smuzhiyun [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock,
1125*4882a593Smuzhiyun },
1126*4882a593Smuzhiyun [DEV_STATE_STEAL_LOCK] = {
1127*4882a593Smuzhiyun [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1128*4882a593Smuzhiyun [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1129*4882a593Smuzhiyun [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1130*4882a593Smuzhiyun [DEV_EVENT_VERIFY] = ccw_device_nop,
1131*4882a593Smuzhiyun },
1132*4882a593Smuzhiyun };
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
1135