1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Handling of internal CCW device requests.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright IBM Corp. 2009, 2011
6*4882a593Smuzhiyun * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #define KMSG_COMPONENT "cio"
10*4882a593Smuzhiyun #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/types.h>
13*4882a593Smuzhiyun #include <linux/err.h>
14*4882a593Smuzhiyun #include <asm/ccwdev.h>
15*4882a593Smuzhiyun #include <asm/cio.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include "io_sch.h"
18*4882a593Smuzhiyun #include "cio.h"
19*4882a593Smuzhiyun #include "device.h"
20*4882a593Smuzhiyun #include "cio_debug.h"
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun /**
23*4882a593Smuzhiyun * lpm_adjust - adjust path mask
24*4882a593Smuzhiyun * @lpm: path mask to adjust
25*4882a593Smuzhiyun * @mask: mask of available paths
26*4882a593Smuzhiyun *
27*4882a593Smuzhiyun * Shift @lpm right until @lpm and @mask have at least one bit in common or
28*4882a593Smuzhiyun * until @lpm is zero. Return the resulting lpm.
29*4882a593Smuzhiyun */
lpm_adjust(int lpm,int mask)30*4882a593Smuzhiyun int lpm_adjust(int lpm, int mask)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun while (lpm && ((lpm & mask) == 0))
33*4882a593Smuzhiyun lpm >>= 1;
34*4882a593Smuzhiyun return lpm;
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /*
38*4882a593Smuzhiyun * Adjust path mask to use next path and reset retry count. Return resulting
39*4882a593Smuzhiyun * path mask.
40*4882a593Smuzhiyun */
ccwreq_next_path(struct ccw_device * cdev)41*4882a593Smuzhiyun static u16 ccwreq_next_path(struct ccw_device *cdev)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun struct ccw_request *req = &cdev->private->req;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun if (!req->singlepath) {
46*4882a593Smuzhiyun req->mask = 0;
47*4882a593Smuzhiyun goto out;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun req->retries = req->maxretries;
50*4882a593Smuzhiyun req->mask = lpm_adjust(req->mask >> 1, req->lpm);
51*4882a593Smuzhiyun out:
52*4882a593Smuzhiyun return req->mask;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun * Clean up device state and report to callback.
57*4882a593Smuzhiyun */
ccwreq_stop(struct ccw_device * cdev,int rc)58*4882a593Smuzhiyun static void ccwreq_stop(struct ccw_device *cdev, int rc)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun struct ccw_request *req = &cdev->private->req;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun if (req->done)
63*4882a593Smuzhiyun return;
64*4882a593Smuzhiyun req->done = 1;
65*4882a593Smuzhiyun ccw_device_set_timeout(cdev, 0);
66*4882a593Smuzhiyun memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
67*4882a593Smuzhiyun if (rc && rc != -ENODEV && req->drc)
68*4882a593Smuzhiyun rc = req->drc;
69*4882a593Smuzhiyun req->callback(cdev, req->data, rc);
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /*
73*4882a593Smuzhiyun * (Re-)Start the operation until retries and paths are exhausted.
74*4882a593Smuzhiyun */
ccwreq_do(struct ccw_device * cdev)75*4882a593Smuzhiyun static void ccwreq_do(struct ccw_device *cdev)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun struct ccw_request *req = &cdev->private->req;
78*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(cdev->dev.parent);
79*4882a593Smuzhiyun struct ccw1 *cp = req->cp;
80*4882a593Smuzhiyun int rc = -EACCES;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun while (req->mask) {
83*4882a593Smuzhiyun if (req->retries-- == 0) {
84*4882a593Smuzhiyun /* Retries exhausted, try next path. */
85*4882a593Smuzhiyun ccwreq_next_path(cdev);
86*4882a593Smuzhiyun continue;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun /* Perform start function. */
89*4882a593Smuzhiyun memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
90*4882a593Smuzhiyun rc = cio_start(sch, cp, (u8) req->mask);
91*4882a593Smuzhiyun if (rc == 0) {
92*4882a593Smuzhiyun /* I/O started successfully. */
93*4882a593Smuzhiyun ccw_device_set_timeout(cdev, req->timeout);
94*4882a593Smuzhiyun return;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun if (rc == -ENODEV) {
97*4882a593Smuzhiyun /* Permanent device error. */
98*4882a593Smuzhiyun break;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun if (rc == -EACCES) {
101*4882a593Smuzhiyun /* Permant path error. */
102*4882a593Smuzhiyun ccwreq_next_path(cdev);
103*4882a593Smuzhiyun continue;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun /* Temporary improper status. */
106*4882a593Smuzhiyun rc = cio_clear(sch);
107*4882a593Smuzhiyun if (rc)
108*4882a593Smuzhiyun break;
109*4882a593Smuzhiyun return;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun ccwreq_stop(cdev, rc);
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /**
115*4882a593Smuzhiyun * ccw_request_start - perform I/O request
116*4882a593Smuzhiyun * @cdev: ccw device
117*4882a593Smuzhiyun *
118*4882a593Smuzhiyun * Perform the I/O request specified by cdev->req.
119*4882a593Smuzhiyun */
ccw_request_start(struct ccw_device * cdev)120*4882a593Smuzhiyun void ccw_request_start(struct ccw_device *cdev)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun struct ccw_request *req = &cdev->private->req;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun if (req->singlepath) {
125*4882a593Smuzhiyun /* Try all paths twice to counter link flapping. */
126*4882a593Smuzhiyun req->mask = 0x8080;
127*4882a593Smuzhiyun } else
128*4882a593Smuzhiyun req->mask = req->lpm;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun req->retries = req->maxretries;
131*4882a593Smuzhiyun req->mask = lpm_adjust(req->mask, req->lpm);
132*4882a593Smuzhiyun req->drc = 0;
133*4882a593Smuzhiyun req->done = 0;
134*4882a593Smuzhiyun req->cancel = 0;
135*4882a593Smuzhiyun if (!req->mask)
136*4882a593Smuzhiyun goto out_nopath;
137*4882a593Smuzhiyun ccwreq_do(cdev);
138*4882a593Smuzhiyun return;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun out_nopath:
141*4882a593Smuzhiyun ccwreq_stop(cdev, -EACCES);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun /**
145*4882a593Smuzhiyun * ccw_request_cancel - cancel running I/O request
146*4882a593Smuzhiyun * @cdev: ccw device
147*4882a593Smuzhiyun *
148*4882a593Smuzhiyun * Cancel the I/O request specified by cdev->req. Return non-zero if request
149*4882a593Smuzhiyun * has already finished, zero otherwise.
150*4882a593Smuzhiyun */
ccw_request_cancel(struct ccw_device * cdev)151*4882a593Smuzhiyun int ccw_request_cancel(struct ccw_device *cdev)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(cdev->dev.parent);
154*4882a593Smuzhiyun struct ccw_request *req = &cdev->private->req;
155*4882a593Smuzhiyun int rc;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun if (req->done)
158*4882a593Smuzhiyun return 1;
159*4882a593Smuzhiyun req->cancel = 1;
160*4882a593Smuzhiyun rc = cio_clear(sch);
161*4882a593Smuzhiyun if (rc)
162*4882a593Smuzhiyun ccwreq_stop(cdev, rc);
163*4882a593Smuzhiyun return 0;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /*
167*4882a593Smuzhiyun * Return the status of the internal I/O started on the specified ccw device.
168*4882a593Smuzhiyun * Perform BASIC SENSE if required.
169*4882a593Smuzhiyun */
ccwreq_status(struct ccw_device * cdev,struct irb * lcirb)170*4882a593Smuzhiyun static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun struct irb *irb = &cdev->private->dma_area->irb;
173*4882a593Smuzhiyun struct cmd_scsw *scsw = &irb->scsw.cmd;
174*4882a593Smuzhiyun enum uc_todo todo;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun /* Perform BASIC SENSE if needed. */
177*4882a593Smuzhiyun if (ccw_device_accumulate_and_sense(cdev, lcirb))
178*4882a593Smuzhiyun return IO_RUNNING;
179*4882a593Smuzhiyun /* Check for halt/clear interrupt. */
180*4882a593Smuzhiyun if (scsw->fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
181*4882a593Smuzhiyun return IO_KILLED;
182*4882a593Smuzhiyun /* Check for path error. */
183*4882a593Smuzhiyun if (scsw->cc == 3 || scsw->pno)
184*4882a593Smuzhiyun return IO_PATH_ERROR;
185*4882a593Smuzhiyun /* Handle BASIC SENSE data. */
186*4882a593Smuzhiyun if (irb->esw.esw0.erw.cons) {
187*4882a593Smuzhiyun CIO_TRACE_EVENT(2, "sensedata");
188*4882a593Smuzhiyun CIO_HEX_EVENT(2, &cdev->private->dev_id,
189*4882a593Smuzhiyun sizeof(struct ccw_dev_id));
190*4882a593Smuzhiyun CIO_HEX_EVENT(2, &cdev->private->dma_area->irb.ecw,
191*4882a593Smuzhiyun SENSE_MAX_COUNT);
192*4882a593Smuzhiyun /* Check for command reject. */
193*4882a593Smuzhiyun if (irb->ecw[0] & SNS0_CMD_REJECT)
194*4882a593Smuzhiyun return IO_REJECTED;
195*4882a593Smuzhiyun /* Ask the driver what to do */
196*4882a593Smuzhiyun if (cdev->drv && cdev->drv->uc_handler) {
197*4882a593Smuzhiyun todo = cdev->drv->uc_handler(cdev, lcirb);
198*4882a593Smuzhiyun CIO_TRACE_EVENT(2, "uc_response");
199*4882a593Smuzhiyun CIO_HEX_EVENT(2, &todo, sizeof(todo));
200*4882a593Smuzhiyun switch (todo) {
201*4882a593Smuzhiyun case UC_TODO_RETRY:
202*4882a593Smuzhiyun return IO_STATUS_ERROR;
203*4882a593Smuzhiyun case UC_TODO_RETRY_ON_NEW_PATH:
204*4882a593Smuzhiyun return IO_PATH_ERROR;
205*4882a593Smuzhiyun case UC_TODO_STOP:
206*4882a593Smuzhiyun return IO_REJECTED;
207*4882a593Smuzhiyun default:
208*4882a593Smuzhiyun return IO_STATUS_ERROR;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun /* Assume that unexpected SENSE data implies an error. */
212*4882a593Smuzhiyun return IO_STATUS_ERROR;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun /* Check for channel errors. */
215*4882a593Smuzhiyun if (scsw->cstat != 0)
216*4882a593Smuzhiyun return IO_STATUS_ERROR;
217*4882a593Smuzhiyun /* Check for device errors. */
218*4882a593Smuzhiyun if (scsw->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
219*4882a593Smuzhiyun return IO_STATUS_ERROR;
220*4882a593Smuzhiyun /* Check for final state. */
221*4882a593Smuzhiyun if (!(scsw->dstat & DEV_STAT_DEV_END))
222*4882a593Smuzhiyun return IO_RUNNING;
223*4882a593Smuzhiyun /* Check for other improper status. */
224*4882a593Smuzhiyun if (scsw->cc == 1 && (scsw->stctl & SCSW_STCTL_ALERT_STATUS))
225*4882a593Smuzhiyun return IO_STATUS_ERROR;
226*4882a593Smuzhiyun return IO_DONE;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /*
230*4882a593Smuzhiyun * Log ccw request status.
231*4882a593Smuzhiyun */
ccwreq_log_status(struct ccw_device * cdev,enum io_status status)232*4882a593Smuzhiyun static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun struct ccw_request *req = &cdev->private->req;
235*4882a593Smuzhiyun struct {
236*4882a593Smuzhiyun struct ccw_dev_id dev_id;
237*4882a593Smuzhiyun u16 retries;
238*4882a593Smuzhiyun u8 lpm;
239*4882a593Smuzhiyun u8 status;
240*4882a593Smuzhiyun } __attribute__ ((packed)) data;
241*4882a593Smuzhiyun data.dev_id = cdev->private->dev_id;
242*4882a593Smuzhiyun data.retries = req->retries;
243*4882a593Smuzhiyun data.lpm = (u8) req->mask;
244*4882a593Smuzhiyun data.status = (u8) status;
245*4882a593Smuzhiyun CIO_TRACE_EVENT(2, "reqstat");
246*4882a593Smuzhiyun CIO_HEX_EVENT(2, &data, sizeof(data));
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /**
250*4882a593Smuzhiyun * ccw_request_handler - interrupt handler for I/O request procedure.
251*4882a593Smuzhiyun * @cdev: ccw device
252*4882a593Smuzhiyun *
253*4882a593Smuzhiyun * Handle interrupt during I/O request procedure.
254*4882a593Smuzhiyun */
ccw_request_handler(struct ccw_device * cdev)255*4882a593Smuzhiyun void ccw_request_handler(struct ccw_device *cdev)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun struct irb *irb = this_cpu_ptr(&cio_irb);
258*4882a593Smuzhiyun struct ccw_request *req = &cdev->private->req;
259*4882a593Smuzhiyun enum io_status status;
260*4882a593Smuzhiyun int rc = -EOPNOTSUPP;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun /* Check status of I/O request. */
263*4882a593Smuzhiyun status = ccwreq_status(cdev, irb);
264*4882a593Smuzhiyun if (req->filter)
265*4882a593Smuzhiyun status = req->filter(cdev, req->data, irb, status);
266*4882a593Smuzhiyun if (status != IO_RUNNING)
267*4882a593Smuzhiyun ccw_device_set_timeout(cdev, 0);
268*4882a593Smuzhiyun if (status != IO_DONE && status != IO_RUNNING)
269*4882a593Smuzhiyun ccwreq_log_status(cdev, status);
270*4882a593Smuzhiyun switch (status) {
271*4882a593Smuzhiyun case IO_DONE:
272*4882a593Smuzhiyun break;
273*4882a593Smuzhiyun case IO_RUNNING:
274*4882a593Smuzhiyun return;
275*4882a593Smuzhiyun case IO_REJECTED:
276*4882a593Smuzhiyun goto err;
277*4882a593Smuzhiyun case IO_PATH_ERROR:
278*4882a593Smuzhiyun goto out_next_path;
279*4882a593Smuzhiyun case IO_STATUS_ERROR:
280*4882a593Smuzhiyun goto out_restart;
281*4882a593Smuzhiyun case IO_KILLED:
282*4882a593Smuzhiyun /* Check if request was cancelled on purpose. */
283*4882a593Smuzhiyun if (req->cancel) {
284*4882a593Smuzhiyun rc = -EIO;
285*4882a593Smuzhiyun goto err;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun goto out_restart;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun /* Check back with request initiator. */
290*4882a593Smuzhiyun if (!req->check)
291*4882a593Smuzhiyun goto out;
292*4882a593Smuzhiyun switch (req->check(cdev, req->data)) {
293*4882a593Smuzhiyun case 0:
294*4882a593Smuzhiyun break;
295*4882a593Smuzhiyun case -EAGAIN:
296*4882a593Smuzhiyun goto out_restart;
297*4882a593Smuzhiyun case -EACCES:
298*4882a593Smuzhiyun goto out_next_path;
299*4882a593Smuzhiyun default:
300*4882a593Smuzhiyun goto err;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun out:
303*4882a593Smuzhiyun ccwreq_stop(cdev, 0);
304*4882a593Smuzhiyun return;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun out_next_path:
307*4882a593Smuzhiyun /* Try next path and restart I/O. */
308*4882a593Smuzhiyun if (!ccwreq_next_path(cdev)) {
309*4882a593Smuzhiyun rc = -EACCES;
310*4882a593Smuzhiyun goto err;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun out_restart:
313*4882a593Smuzhiyun /* Restart. */
314*4882a593Smuzhiyun ccwreq_do(cdev);
315*4882a593Smuzhiyun return;
316*4882a593Smuzhiyun err:
317*4882a593Smuzhiyun ccwreq_stop(cdev, rc);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /**
322*4882a593Smuzhiyun * ccw_request_timeout - timeout handler for I/O request procedure
323*4882a593Smuzhiyun * @cdev: ccw device
324*4882a593Smuzhiyun *
325*4882a593Smuzhiyun * Handle timeout during I/O request procedure.
326*4882a593Smuzhiyun */
ccw_request_timeout(struct ccw_device * cdev)327*4882a593Smuzhiyun void ccw_request_timeout(struct ccw_device *cdev)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(cdev->dev.parent);
330*4882a593Smuzhiyun struct ccw_request *req = &cdev->private->req;
331*4882a593Smuzhiyun int rc = -ENODEV, chp;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun if (cio_update_schib(sch))
334*4882a593Smuzhiyun goto err;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun for (chp = 0; chp < 8; chp++) {
337*4882a593Smuzhiyun if ((0x80 >> chp) & sch->schib.pmcw.lpum)
338*4882a593Smuzhiyun pr_warn("%s: No interrupt was received within %lus (CS=%02x, DS=%02x, CHPID=%x.%02x)\n",
339*4882a593Smuzhiyun dev_name(&cdev->dev), req->timeout / HZ,
340*4882a593Smuzhiyun scsw_cstat(&sch->schib.scsw),
341*4882a593Smuzhiyun scsw_dstat(&sch->schib.scsw),
342*4882a593Smuzhiyun sch->schid.cssid,
343*4882a593Smuzhiyun sch->schib.pmcw.chpid[chp]);
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun if (!ccwreq_next_path(cdev)) {
347*4882a593Smuzhiyun /* set the final return code for this request */
348*4882a593Smuzhiyun req->drc = -ETIME;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun rc = cio_clear(sch);
351*4882a593Smuzhiyun if (rc)
352*4882a593Smuzhiyun goto err;
353*4882a593Smuzhiyun return;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun err:
356*4882a593Smuzhiyun ccwreq_stop(cdev, rc);
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /**
360*4882a593Smuzhiyun * ccw_request_notoper - notoper handler for I/O request procedure
361*4882a593Smuzhiyun * @cdev: ccw device
362*4882a593Smuzhiyun *
363*4882a593Smuzhiyun * Handle notoper during I/O request procedure.
364*4882a593Smuzhiyun */
ccw_request_notoper(struct ccw_device * cdev)365*4882a593Smuzhiyun void ccw_request_notoper(struct ccw_device *cdev)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun ccwreq_stop(cdev, -ENODEV);
368*4882a593Smuzhiyun }
369