1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * CCW device PGID and path verification I/O handling.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright IBM Corp. 2002, 2009
6*4882a593Smuzhiyun * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
7*4882a593Smuzhiyun * Martin Schwidefsky <schwidefsky@de.ibm.com>
8*4882a593Smuzhiyun * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/string.h>
13*4882a593Smuzhiyun #include <linux/bitops.h>
14*4882a593Smuzhiyun #include <linux/types.h>
15*4882a593Smuzhiyun #include <linux/errno.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <asm/ccwdev.h>
18*4882a593Smuzhiyun #include <asm/cio.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include "cio.h"
21*4882a593Smuzhiyun #include "cio_debug.h"
22*4882a593Smuzhiyun #include "device.h"
23*4882a593Smuzhiyun #include "io_sch.h"
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #define PGID_RETRIES 256
26*4882a593Smuzhiyun #define PGID_TIMEOUT (10 * HZ)
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun static void verify_start(struct ccw_device *cdev);
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /*
31*4882a593Smuzhiyun * Process path verification data and report result.
32*4882a593Smuzhiyun */
verify_done(struct ccw_device * cdev,int rc)33*4882a593Smuzhiyun static void verify_done(struct ccw_device *cdev, int rc)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(cdev->dev.parent);
36*4882a593Smuzhiyun struct ccw_dev_id *id = &cdev->private->dev_id;
37*4882a593Smuzhiyun int mpath = cdev->private->flags.mpath;
38*4882a593Smuzhiyun int pgroup = cdev->private->flags.pgroup;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun if (rc)
41*4882a593Smuzhiyun goto out;
42*4882a593Smuzhiyun /* Ensure consistent multipathing state at device and channel. */
43*4882a593Smuzhiyun if (sch->config.mp != mpath) {
44*4882a593Smuzhiyun sch->config.mp = mpath;
45*4882a593Smuzhiyun rc = cio_commit_config(sch);
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun out:
48*4882a593Smuzhiyun CIO_MSG_EVENT(2, "vrfy: device 0.%x.%04x: rc=%d pgroup=%d mpath=%d "
49*4882a593Smuzhiyun "vpm=%02x\n", id->ssid, id->devno, rc, pgroup, mpath,
50*4882a593Smuzhiyun sch->vpm);
51*4882a593Smuzhiyun ccw_device_verify_done(cdev, rc);
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun * Create channel program to perform a NOOP.
56*4882a593Smuzhiyun */
nop_build_cp(struct ccw_device * cdev)57*4882a593Smuzhiyun static void nop_build_cp(struct ccw_device *cdev)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun struct ccw_request *req = &cdev->private->req;
60*4882a593Smuzhiyun struct ccw1 *cp = cdev->private->dma_area->iccws;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun cp->cmd_code = CCW_CMD_NOOP;
63*4882a593Smuzhiyun cp->cda = 0;
64*4882a593Smuzhiyun cp->count = 0;
65*4882a593Smuzhiyun cp->flags = CCW_FLAG_SLI;
66*4882a593Smuzhiyun req->cp = cp;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun * Perform NOOP on a single path.
71*4882a593Smuzhiyun */
nop_do(struct ccw_device * cdev)72*4882a593Smuzhiyun static void nop_do(struct ccw_device *cdev)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(cdev->dev.parent);
75*4882a593Smuzhiyun struct ccw_request *req = &cdev->private->req;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm &
78*4882a593Smuzhiyun ~cdev->private->path_noirq_mask);
79*4882a593Smuzhiyun if (!req->lpm)
80*4882a593Smuzhiyun goto out_nopath;
81*4882a593Smuzhiyun nop_build_cp(cdev);
82*4882a593Smuzhiyun ccw_request_start(cdev);
83*4882a593Smuzhiyun return;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun out_nopath:
86*4882a593Smuzhiyun verify_done(cdev, sch->vpm ? 0 : -EACCES);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun * Adjust NOOP I/O status.
91*4882a593Smuzhiyun */
nop_filter(struct ccw_device * cdev,void * data,struct irb * irb,enum io_status status)92*4882a593Smuzhiyun static enum io_status nop_filter(struct ccw_device *cdev, void *data,
93*4882a593Smuzhiyun struct irb *irb, enum io_status status)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun /* Only subchannel status might indicate a path error. */
96*4882a593Smuzhiyun if (status == IO_STATUS_ERROR && irb->scsw.cmd.cstat == 0)
97*4882a593Smuzhiyun return IO_DONE;
98*4882a593Smuzhiyun return status;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /*
102*4882a593Smuzhiyun * Process NOOP request result for a single path.
103*4882a593Smuzhiyun */
nop_callback(struct ccw_device * cdev,void * data,int rc)104*4882a593Smuzhiyun static void nop_callback(struct ccw_device *cdev, void *data, int rc)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(cdev->dev.parent);
107*4882a593Smuzhiyun struct ccw_request *req = &cdev->private->req;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun switch (rc) {
110*4882a593Smuzhiyun case 0:
111*4882a593Smuzhiyun sch->vpm |= req->lpm;
112*4882a593Smuzhiyun break;
113*4882a593Smuzhiyun case -ETIME:
114*4882a593Smuzhiyun cdev->private->path_noirq_mask |= req->lpm;
115*4882a593Smuzhiyun break;
116*4882a593Smuzhiyun case -EACCES:
117*4882a593Smuzhiyun cdev->private->path_notoper_mask |= req->lpm;
118*4882a593Smuzhiyun break;
119*4882a593Smuzhiyun default:
120*4882a593Smuzhiyun goto err;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun /* Continue on the next path. */
123*4882a593Smuzhiyun req->lpm >>= 1;
124*4882a593Smuzhiyun nop_do(cdev);
125*4882a593Smuzhiyun return;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun err:
128*4882a593Smuzhiyun verify_done(cdev, rc);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /*
132*4882a593Smuzhiyun * Create channel program to perform SET PGID on a single path.
133*4882a593Smuzhiyun */
spid_build_cp(struct ccw_device * cdev,u8 fn)134*4882a593Smuzhiyun static void spid_build_cp(struct ccw_device *cdev, u8 fn)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun struct ccw_request *req = &cdev->private->req;
137*4882a593Smuzhiyun struct ccw1 *cp = cdev->private->dma_area->iccws;
138*4882a593Smuzhiyun int i = pathmask_to_pos(req->lpm);
139*4882a593Smuzhiyun struct pgid *pgid = &cdev->private->dma_area->pgid[i];
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun pgid->inf.fc = fn;
142*4882a593Smuzhiyun cp->cmd_code = CCW_CMD_SET_PGID;
143*4882a593Smuzhiyun cp->cda = (u32) (addr_t) pgid;
144*4882a593Smuzhiyun cp->count = sizeof(*pgid);
145*4882a593Smuzhiyun cp->flags = CCW_FLAG_SLI;
146*4882a593Smuzhiyun req->cp = cp;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
pgid_wipeout_callback(struct ccw_device * cdev,void * data,int rc)149*4882a593Smuzhiyun static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun if (rc) {
152*4882a593Smuzhiyun /* We don't know the path groups' state. Abort. */
153*4882a593Smuzhiyun verify_done(cdev, rc);
154*4882a593Smuzhiyun return;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun /*
157*4882a593Smuzhiyun * Path groups have been reset. Restart path verification but
158*4882a593Smuzhiyun * leave paths in path_noirq_mask out.
159*4882a593Smuzhiyun */
160*4882a593Smuzhiyun cdev->private->flags.pgid_unknown = 0;
161*4882a593Smuzhiyun verify_start(cdev);
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /*
165*4882a593Smuzhiyun * Reset pathgroups and restart path verification, leave unusable paths out.
166*4882a593Smuzhiyun */
pgid_wipeout_start(struct ccw_device * cdev)167*4882a593Smuzhiyun static void pgid_wipeout_start(struct ccw_device *cdev)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(cdev->dev.parent);
170*4882a593Smuzhiyun struct ccw_dev_id *id = &cdev->private->dev_id;
171*4882a593Smuzhiyun struct ccw_request *req = &cdev->private->req;
172*4882a593Smuzhiyun u8 fn;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n",
175*4882a593Smuzhiyun id->ssid, id->devno, cdev->private->pgid_valid_mask,
176*4882a593Smuzhiyun cdev->private->path_noirq_mask);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /* Initialize request data. */
179*4882a593Smuzhiyun memset(req, 0, sizeof(*req));
180*4882a593Smuzhiyun req->timeout = PGID_TIMEOUT;
181*4882a593Smuzhiyun req->maxretries = PGID_RETRIES;
182*4882a593Smuzhiyun req->lpm = sch->schib.pmcw.pam;
183*4882a593Smuzhiyun req->callback = pgid_wipeout_callback;
184*4882a593Smuzhiyun fn = SPID_FUNC_DISBAND;
185*4882a593Smuzhiyun if (cdev->private->flags.mpath)
186*4882a593Smuzhiyun fn |= SPID_FUNC_MULTI_PATH;
187*4882a593Smuzhiyun spid_build_cp(cdev, fn);
188*4882a593Smuzhiyun ccw_request_start(cdev);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /*
192*4882a593Smuzhiyun * Perform establish/resign SET PGID on a single path.
193*4882a593Smuzhiyun */
spid_do(struct ccw_device * cdev)194*4882a593Smuzhiyun static void spid_do(struct ccw_device *cdev)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(cdev->dev.parent);
197*4882a593Smuzhiyun struct ccw_request *req = &cdev->private->req;
198*4882a593Smuzhiyun u8 fn;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun /* Use next available path that is not already in correct state. */
201*4882a593Smuzhiyun req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask);
202*4882a593Smuzhiyun if (!req->lpm)
203*4882a593Smuzhiyun goto out_nopath;
204*4882a593Smuzhiyun /* Channel program setup. */
205*4882a593Smuzhiyun if (req->lpm & sch->opm)
206*4882a593Smuzhiyun fn = SPID_FUNC_ESTABLISH;
207*4882a593Smuzhiyun else
208*4882a593Smuzhiyun fn = SPID_FUNC_RESIGN;
209*4882a593Smuzhiyun if (cdev->private->flags.mpath)
210*4882a593Smuzhiyun fn |= SPID_FUNC_MULTI_PATH;
211*4882a593Smuzhiyun spid_build_cp(cdev, fn);
212*4882a593Smuzhiyun ccw_request_start(cdev);
213*4882a593Smuzhiyun return;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun out_nopath:
216*4882a593Smuzhiyun if (cdev->private->flags.pgid_unknown) {
217*4882a593Smuzhiyun /* At least one SPID could be partially done. */
218*4882a593Smuzhiyun pgid_wipeout_start(cdev);
219*4882a593Smuzhiyun return;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun verify_done(cdev, sch->vpm ? 0 : -EACCES);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /*
225*4882a593Smuzhiyun * Process SET PGID request result for a single path.
226*4882a593Smuzhiyun */
spid_callback(struct ccw_device * cdev,void * data,int rc)227*4882a593Smuzhiyun static void spid_callback(struct ccw_device *cdev, void *data, int rc)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(cdev->dev.parent);
230*4882a593Smuzhiyun struct ccw_request *req = &cdev->private->req;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun switch (rc) {
233*4882a593Smuzhiyun case 0:
234*4882a593Smuzhiyun sch->vpm |= req->lpm & sch->opm;
235*4882a593Smuzhiyun break;
236*4882a593Smuzhiyun case -ETIME:
237*4882a593Smuzhiyun cdev->private->flags.pgid_unknown = 1;
238*4882a593Smuzhiyun cdev->private->path_noirq_mask |= req->lpm;
239*4882a593Smuzhiyun break;
240*4882a593Smuzhiyun case -EACCES:
241*4882a593Smuzhiyun cdev->private->path_notoper_mask |= req->lpm;
242*4882a593Smuzhiyun break;
243*4882a593Smuzhiyun case -EOPNOTSUPP:
244*4882a593Smuzhiyun if (cdev->private->flags.mpath) {
245*4882a593Smuzhiyun /* Try without multipathing. */
246*4882a593Smuzhiyun cdev->private->flags.mpath = 0;
247*4882a593Smuzhiyun goto out_restart;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun /* Try without pathgrouping. */
250*4882a593Smuzhiyun cdev->private->flags.pgroup = 0;
251*4882a593Smuzhiyun goto out_restart;
252*4882a593Smuzhiyun default:
253*4882a593Smuzhiyun goto err;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun req->lpm >>= 1;
256*4882a593Smuzhiyun spid_do(cdev);
257*4882a593Smuzhiyun return;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun out_restart:
260*4882a593Smuzhiyun verify_start(cdev);
261*4882a593Smuzhiyun return;
262*4882a593Smuzhiyun err:
263*4882a593Smuzhiyun verify_done(cdev, rc);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
spid_start(struct ccw_device * cdev)266*4882a593Smuzhiyun static void spid_start(struct ccw_device *cdev)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun struct ccw_request *req = &cdev->private->req;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun /* Initialize request data. */
271*4882a593Smuzhiyun memset(req, 0, sizeof(*req));
272*4882a593Smuzhiyun req->timeout = PGID_TIMEOUT;
273*4882a593Smuzhiyun req->maxretries = PGID_RETRIES;
274*4882a593Smuzhiyun req->lpm = 0x80;
275*4882a593Smuzhiyun req->singlepath = 1;
276*4882a593Smuzhiyun req->callback = spid_callback;
277*4882a593Smuzhiyun spid_do(cdev);
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
pgid_is_reset(struct pgid * p)280*4882a593Smuzhiyun static int pgid_is_reset(struct pgid *p)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun char *c;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun for (c = (char *)p + 1; c < (char *)(p + 1); c++) {
285*4882a593Smuzhiyun if (*c != 0)
286*4882a593Smuzhiyun return 0;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun return 1;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
pgid_cmp(struct pgid * p1,struct pgid * p2)291*4882a593Smuzhiyun static int pgid_cmp(struct pgid *p1, struct pgid *p2)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun return memcmp((char *) p1 + 1, (char *) p2 + 1,
294*4882a593Smuzhiyun sizeof(struct pgid) - 1);
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun /*
298*4882a593Smuzhiyun * Determine pathgroup state from PGID data.
299*4882a593Smuzhiyun */
pgid_analyze(struct ccw_device * cdev,struct pgid ** p,int * mismatch,u8 * reserved,u8 * reset)300*4882a593Smuzhiyun static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
301*4882a593Smuzhiyun int *mismatch, u8 *reserved, u8 *reset)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun struct pgid *pgid = &cdev->private->dma_area->pgid[0];
304*4882a593Smuzhiyun struct pgid *first = NULL;
305*4882a593Smuzhiyun int lpm;
306*4882a593Smuzhiyun int i;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun *mismatch = 0;
309*4882a593Smuzhiyun *reserved = 0;
310*4882a593Smuzhiyun *reset = 0;
311*4882a593Smuzhiyun for (i = 0, lpm = 0x80; i < 8; i++, pgid++, lpm >>= 1) {
312*4882a593Smuzhiyun if ((cdev->private->pgid_valid_mask & lpm) == 0)
313*4882a593Smuzhiyun continue;
314*4882a593Smuzhiyun if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
315*4882a593Smuzhiyun *reserved |= lpm;
316*4882a593Smuzhiyun if (pgid_is_reset(pgid)) {
317*4882a593Smuzhiyun *reset |= lpm;
318*4882a593Smuzhiyun continue;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun if (!first) {
321*4882a593Smuzhiyun first = pgid;
322*4882a593Smuzhiyun continue;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun if (pgid_cmp(pgid, first) != 0)
325*4882a593Smuzhiyun *mismatch = 1;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun if (!first)
328*4882a593Smuzhiyun first = &channel_subsystems[0]->global_pgid;
329*4882a593Smuzhiyun *p = first;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
pgid_to_donepm(struct ccw_device * cdev)332*4882a593Smuzhiyun static u8 pgid_to_donepm(struct ccw_device *cdev)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(cdev->dev.parent);
335*4882a593Smuzhiyun struct pgid *pgid;
336*4882a593Smuzhiyun int i;
337*4882a593Smuzhiyun int lpm;
338*4882a593Smuzhiyun u8 donepm = 0;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun /* Set bits for paths which are already in the target state. */
341*4882a593Smuzhiyun for (i = 0; i < 8; i++) {
342*4882a593Smuzhiyun lpm = 0x80 >> i;
343*4882a593Smuzhiyun if ((cdev->private->pgid_valid_mask & lpm) == 0)
344*4882a593Smuzhiyun continue;
345*4882a593Smuzhiyun pgid = &cdev->private->dma_area->pgid[i];
346*4882a593Smuzhiyun if (sch->opm & lpm) {
347*4882a593Smuzhiyun if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED)
348*4882a593Smuzhiyun continue;
349*4882a593Smuzhiyun } else {
350*4882a593Smuzhiyun if (pgid->inf.ps.state1 != SNID_STATE1_UNGROUPED)
351*4882a593Smuzhiyun continue;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun if (cdev->private->flags.mpath) {
354*4882a593Smuzhiyun if (pgid->inf.ps.state3 != SNID_STATE3_MULTI_PATH)
355*4882a593Smuzhiyun continue;
356*4882a593Smuzhiyun } else {
357*4882a593Smuzhiyun if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH)
358*4882a593Smuzhiyun continue;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun donepm |= lpm;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun return donepm;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
pgid_fill(struct ccw_device * cdev,struct pgid * pgid)366*4882a593Smuzhiyun static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun int i;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun for (i = 0; i < 8; i++)
371*4882a593Smuzhiyun memcpy(&cdev->private->dma_area->pgid[i], pgid,
372*4882a593Smuzhiyun sizeof(struct pgid));
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /*
376*4882a593Smuzhiyun * Process SENSE PGID data and report result.
377*4882a593Smuzhiyun */
snid_done(struct ccw_device * cdev,int rc)378*4882a593Smuzhiyun static void snid_done(struct ccw_device *cdev, int rc)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun struct ccw_dev_id *id = &cdev->private->dev_id;
381*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(cdev->dev.parent);
382*4882a593Smuzhiyun struct pgid *pgid;
383*4882a593Smuzhiyun int mismatch = 0;
384*4882a593Smuzhiyun u8 reserved = 0;
385*4882a593Smuzhiyun u8 reset = 0;
386*4882a593Smuzhiyun u8 donepm;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun if (rc)
389*4882a593Smuzhiyun goto out;
390*4882a593Smuzhiyun pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
391*4882a593Smuzhiyun if (reserved == cdev->private->pgid_valid_mask)
392*4882a593Smuzhiyun rc = -EUSERS;
393*4882a593Smuzhiyun else if (mismatch)
394*4882a593Smuzhiyun rc = -EOPNOTSUPP;
395*4882a593Smuzhiyun else {
396*4882a593Smuzhiyun donepm = pgid_to_donepm(cdev);
397*4882a593Smuzhiyun sch->vpm = donepm & sch->opm;
398*4882a593Smuzhiyun cdev->private->pgid_reset_mask |= reset;
399*4882a593Smuzhiyun cdev->private->pgid_todo_mask &=
400*4882a593Smuzhiyun ~(donepm | cdev->private->path_noirq_mask);
401*4882a593Smuzhiyun pgid_fill(cdev, pgid);
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun out:
404*4882a593Smuzhiyun CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
405*4882a593Smuzhiyun "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid,
406*4882a593Smuzhiyun id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
407*4882a593Smuzhiyun cdev->private->pgid_todo_mask, mismatch, reserved, reset);
408*4882a593Smuzhiyun switch (rc) {
409*4882a593Smuzhiyun case 0:
410*4882a593Smuzhiyun if (cdev->private->flags.pgid_unknown) {
411*4882a593Smuzhiyun pgid_wipeout_start(cdev);
412*4882a593Smuzhiyun return;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun /* Anything left to do? */
415*4882a593Smuzhiyun if (cdev->private->pgid_todo_mask == 0) {
416*4882a593Smuzhiyun verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
417*4882a593Smuzhiyun return;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun /* Perform path-grouping. */
420*4882a593Smuzhiyun spid_start(cdev);
421*4882a593Smuzhiyun break;
422*4882a593Smuzhiyun case -EOPNOTSUPP:
423*4882a593Smuzhiyun /* Path-grouping not supported. */
424*4882a593Smuzhiyun cdev->private->flags.pgroup = 0;
425*4882a593Smuzhiyun cdev->private->flags.mpath = 0;
426*4882a593Smuzhiyun verify_start(cdev);
427*4882a593Smuzhiyun break;
428*4882a593Smuzhiyun default:
429*4882a593Smuzhiyun verify_done(cdev, rc);
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun /*
434*4882a593Smuzhiyun * Create channel program to perform a SENSE PGID on a single path.
435*4882a593Smuzhiyun */
snid_build_cp(struct ccw_device * cdev)436*4882a593Smuzhiyun static void snid_build_cp(struct ccw_device *cdev)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun struct ccw_request *req = &cdev->private->req;
439*4882a593Smuzhiyun struct ccw1 *cp = cdev->private->dma_area->iccws;
440*4882a593Smuzhiyun int i = pathmask_to_pos(req->lpm);
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun /* Channel program setup. */
443*4882a593Smuzhiyun cp->cmd_code = CCW_CMD_SENSE_PGID;
444*4882a593Smuzhiyun cp->cda = (u32) (addr_t) &cdev->private->dma_area->pgid[i];
445*4882a593Smuzhiyun cp->count = sizeof(struct pgid);
446*4882a593Smuzhiyun cp->flags = CCW_FLAG_SLI;
447*4882a593Smuzhiyun req->cp = cp;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun /*
451*4882a593Smuzhiyun * Perform SENSE PGID on a single path.
452*4882a593Smuzhiyun */
snid_do(struct ccw_device * cdev)453*4882a593Smuzhiyun static void snid_do(struct ccw_device *cdev)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(cdev->dev.parent);
456*4882a593Smuzhiyun struct ccw_request *req = &cdev->private->req;
457*4882a593Smuzhiyun int ret;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam &
460*4882a593Smuzhiyun ~cdev->private->path_noirq_mask);
461*4882a593Smuzhiyun if (!req->lpm)
462*4882a593Smuzhiyun goto out_nopath;
463*4882a593Smuzhiyun snid_build_cp(cdev);
464*4882a593Smuzhiyun ccw_request_start(cdev);
465*4882a593Smuzhiyun return;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun out_nopath:
468*4882a593Smuzhiyun if (cdev->private->pgid_valid_mask)
469*4882a593Smuzhiyun ret = 0;
470*4882a593Smuzhiyun else if (cdev->private->path_noirq_mask)
471*4882a593Smuzhiyun ret = -ETIME;
472*4882a593Smuzhiyun else
473*4882a593Smuzhiyun ret = -EACCES;
474*4882a593Smuzhiyun snid_done(cdev, ret);
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun /*
478*4882a593Smuzhiyun * Process SENSE PGID request result for single path.
479*4882a593Smuzhiyun */
snid_callback(struct ccw_device * cdev,void * data,int rc)480*4882a593Smuzhiyun static void snid_callback(struct ccw_device *cdev, void *data, int rc)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun struct ccw_request *req = &cdev->private->req;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun switch (rc) {
485*4882a593Smuzhiyun case 0:
486*4882a593Smuzhiyun cdev->private->pgid_valid_mask |= req->lpm;
487*4882a593Smuzhiyun break;
488*4882a593Smuzhiyun case -ETIME:
489*4882a593Smuzhiyun cdev->private->flags.pgid_unknown = 1;
490*4882a593Smuzhiyun cdev->private->path_noirq_mask |= req->lpm;
491*4882a593Smuzhiyun break;
492*4882a593Smuzhiyun case -EACCES:
493*4882a593Smuzhiyun cdev->private->path_notoper_mask |= req->lpm;
494*4882a593Smuzhiyun break;
495*4882a593Smuzhiyun default:
496*4882a593Smuzhiyun goto err;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun /* Continue on the next path. */
499*4882a593Smuzhiyun req->lpm >>= 1;
500*4882a593Smuzhiyun snid_do(cdev);
501*4882a593Smuzhiyun return;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun err:
504*4882a593Smuzhiyun snid_done(cdev, rc);
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun /*
508*4882a593Smuzhiyun * Perform path verification.
509*4882a593Smuzhiyun */
verify_start(struct ccw_device * cdev)510*4882a593Smuzhiyun static void verify_start(struct ccw_device *cdev)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(cdev->dev.parent);
513*4882a593Smuzhiyun struct ccw_request *req = &cdev->private->req;
514*4882a593Smuzhiyun struct ccw_dev_id *devid = &cdev->private->dev_id;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun sch->vpm = 0;
517*4882a593Smuzhiyun sch->lpm = sch->schib.pmcw.pam;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun /* Initialize PGID data. */
520*4882a593Smuzhiyun memset(cdev->private->dma_area->pgid, 0,
521*4882a593Smuzhiyun sizeof(cdev->private->dma_area->pgid));
522*4882a593Smuzhiyun cdev->private->pgid_valid_mask = 0;
523*4882a593Smuzhiyun cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
524*4882a593Smuzhiyun cdev->private->path_notoper_mask = 0;
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun /* Initialize request data. */
527*4882a593Smuzhiyun memset(req, 0, sizeof(*req));
528*4882a593Smuzhiyun req->timeout = PGID_TIMEOUT;
529*4882a593Smuzhiyun req->maxretries = PGID_RETRIES;
530*4882a593Smuzhiyun req->lpm = 0x80;
531*4882a593Smuzhiyun req->singlepath = 1;
532*4882a593Smuzhiyun if (cdev->private->flags.pgroup) {
533*4882a593Smuzhiyun CIO_TRACE_EVENT(4, "snid");
534*4882a593Smuzhiyun CIO_HEX_EVENT(4, devid, sizeof(*devid));
535*4882a593Smuzhiyun req->callback = snid_callback;
536*4882a593Smuzhiyun snid_do(cdev);
537*4882a593Smuzhiyun } else {
538*4882a593Smuzhiyun CIO_TRACE_EVENT(4, "nop");
539*4882a593Smuzhiyun CIO_HEX_EVENT(4, devid, sizeof(*devid));
540*4882a593Smuzhiyun req->filter = nop_filter;
541*4882a593Smuzhiyun req->callback = nop_callback;
542*4882a593Smuzhiyun nop_do(cdev);
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun /**
547*4882a593Smuzhiyun * ccw_device_verify_start - perform path verification
548*4882a593Smuzhiyun * @cdev: ccw device
549*4882a593Smuzhiyun *
550*4882a593Smuzhiyun * Perform an I/O on each available channel path to @cdev to determine which
551*4882a593Smuzhiyun * paths are operational. The resulting path mask is stored in sch->vpm.
552*4882a593Smuzhiyun * If device options specify pathgrouping, establish a pathgroup for the
553*4882a593Smuzhiyun * operational paths. When finished, call ccw_device_verify_done with a
554*4882a593Smuzhiyun * return code specifying the result.
555*4882a593Smuzhiyun */
ccw_device_verify_start(struct ccw_device * cdev)556*4882a593Smuzhiyun void ccw_device_verify_start(struct ccw_device *cdev)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun CIO_TRACE_EVENT(4, "vrfy");
559*4882a593Smuzhiyun CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
560*4882a593Smuzhiyun /*
561*4882a593Smuzhiyun * Initialize pathgroup and multipath state with target values.
562*4882a593Smuzhiyun * They may change in the course of path verification.
563*4882a593Smuzhiyun */
564*4882a593Smuzhiyun cdev->private->flags.pgroup = cdev->private->options.pgroup;
565*4882a593Smuzhiyun cdev->private->flags.mpath = cdev->private->options.mpath;
566*4882a593Smuzhiyun cdev->private->flags.doverify = 0;
567*4882a593Smuzhiyun cdev->private->path_noirq_mask = 0;
568*4882a593Smuzhiyun verify_start(cdev);
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun /*
572*4882a593Smuzhiyun * Process disband SET PGID request result.
573*4882a593Smuzhiyun */
disband_callback(struct ccw_device * cdev,void * data,int rc)574*4882a593Smuzhiyun static void disband_callback(struct ccw_device *cdev, void *data, int rc)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(cdev->dev.parent);
577*4882a593Smuzhiyun struct ccw_dev_id *id = &cdev->private->dev_id;
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun if (rc)
580*4882a593Smuzhiyun goto out;
581*4882a593Smuzhiyun /* Ensure consistent multipathing state at device and channel. */
582*4882a593Smuzhiyun cdev->private->flags.mpath = 0;
583*4882a593Smuzhiyun if (sch->config.mp) {
584*4882a593Smuzhiyun sch->config.mp = 0;
585*4882a593Smuzhiyun rc = cio_commit_config(sch);
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun out:
588*4882a593Smuzhiyun CIO_MSG_EVENT(0, "disb: device 0.%x.%04x: rc=%d\n", id->ssid, id->devno,
589*4882a593Smuzhiyun rc);
590*4882a593Smuzhiyun ccw_device_disband_done(cdev, rc);
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun /**
594*4882a593Smuzhiyun * ccw_device_disband_start - disband pathgroup
595*4882a593Smuzhiyun * @cdev: ccw device
596*4882a593Smuzhiyun *
597*4882a593Smuzhiyun * Execute a SET PGID channel program on @cdev to disband a previously
598*4882a593Smuzhiyun * established pathgroup. When finished, call ccw_device_disband_done with
599*4882a593Smuzhiyun * a return code specifying the result.
600*4882a593Smuzhiyun */
ccw_device_disband_start(struct ccw_device * cdev)601*4882a593Smuzhiyun void ccw_device_disband_start(struct ccw_device *cdev)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(cdev->dev.parent);
604*4882a593Smuzhiyun struct ccw_request *req = &cdev->private->req;
605*4882a593Smuzhiyun u8 fn;
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun CIO_TRACE_EVENT(4, "disb");
608*4882a593Smuzhiyun CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
609*4882a593Smuzhiyun /* Request setup. */
610*4882a593Smuzhiyun memset(req, 0, sizeof(*req));
611*4882a593Smuzhiyun req->timeout = PGID_TIMEOUT;
612*4882a593Smuzhiyun req->maxretries = PGID_RETRIES;
613*4882a593Smuzhiyun req->lpm = sch->schib.pmcw.pam & sch->opm;
614*4882a593Smuzhiyun req->singlepath = 1;
615*4882a593Smuzhiyun req->callback = disband_callback;
616*4882a593Smuzhiyun fn = SPID_FUNC_DISBAND;
617*4882a593Smuzhiyun if (cdev->private->flags.mpath)
618*4882a593Smuzhiyun fn |= SPID_FUNC_MULTI_PATH;
619*4882a593Smuzhiyun spid_build_cp(cdev, fn);
620*4882a593Smuzhiyun ccw_request_start(cdev);
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun struct stlck_data {
624*4882a593Smuzhiyun struct completion done;
625*4882a593Smuzhiyun int rc;
626*4882a593Smuzhiyun };
627*4882a593Smuzhiyun
stlck_build_cp(struct ccw_device * cdev,void * buf1,void * buf2)628*4882a593Smuzhiyun static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun struct ccw_request *req = &cdev->private->req;
631*4882a593Smuzhiyun struct ccw1 *cp = cdev->private->dma_area->iccws;
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun cp[0].cmd_code = CCW_CMD_STLCK;
634*4882a593Smuzhiyun cp[0].cda = (u32) (addr_t) buf1;
635*4882a593Smuzhiyun cp[0].count = 32;
636*4882a593Smuzhiyun cp[0].flags = CCW_FLAG_CC;
637*4882a593Smuzhiyun cp[1].cmd_code = CCW_CMD_RELEASE;
638*4882a593Smuzhiyun cp[1].cda = (u32) (addr_t) buf2;
639*4882a593Smuzhiyun cp[1].count = 32;
640*4882a593Smuzhiyun cp[1].flags = 0;
641*4882a593Smuzhiyun req->cp = cp;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
stlck_callback(struct ccw_device * cdev,void * data,int rc)644*4882a593Smuzhiyun static void stlck_callback(struct ccw_device *cdev, void *data, int rc)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun struct stlck_data *sdata = data;
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun sdata->rc = rc;
649*4882a593Smuzhiyun complete(&sdata->done);
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun /**
653*4882a593Smuzhiyun * ccw_device_stlck_start - perform unconditional release
654*4882a593Smuzhiyun * @cdev: ccw device
655*4882a593Smuzhiyun * @data: data pointer to be passed to ccw_device_stlck_done
656*4882a593Smuzhiyun * @buf1: data pointer used in channel program
657*4882a593Smuzhiyun * @buf2: data pointer used in channel program
658*4882a593Smuzhiyun *
659*4882a593Smuzhiyun * Execute a channel program on @cdev to release an existing PGID reservation.
660*4882a593Smuzhiyun */
ccw_device_stlck_start(struct ccw_device * cdev,void * data,void * buf1,void * buf2)661*4882a593Smuzhiyun static void ccw_device_stlck_start(struct ccw_device *cdev, void *data,
662*4882a593Smuzhiyun void *buf1, void *buf2)
663*4882a593Smuzhiyun {
664*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(cdev->dev.parent);
665*4882a593Smuzhiyun struct ccw_request *req = &cdev->private->req;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun CIO_TRACE_EVENT(4, "stlck");
668*4882a593Smuzhiyun CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
669*4882a593Smuzhiyun /* Request setup. */
670*4882a593Smuzhiyun memset(req, 0, sizeof(*req));
671*4882a593Smuzhiyun req->timeout = PGID_TIMEOUT;
672*4882a593Smuzhiyun req->maxretries = PGID_RETRIES;
673*4882a593Smuzhiyun req->lpm = sch->schib.pmcw.pam & sch->opm;
674*4882a593Smuzhiyun req->data = data;
675*4882a593Smuzhiyun req->callback = stlck_callback;
676*4882a593Smuzhiyun stlck_build_cp(cdev, buf1, buf2);
677*4882a593Smuzhiyun ccw_request_start(cdev);
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun /*
681*4882a593Smuzhiyun * Perform unconditional reserve + release.
682*4882a593Smuzhiyun */
ccw_device_stlck(struct ccw_device * cdev)683*4882a593Smuzhiyun int ccw_device_stlck(struct ccw_device *cdev)
684*4882a593Smuzhiyun {
685*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(cdev->dev.parent);
686*4882a593Smuzhiyun struct stlck_data data;
687*4882a593Smuzhiyun u8 *buffer;
688*4882a593Smuzhiyun int rc;
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun /* Check if steal lock operation is valid for this device. */
691*4882a593Smuzhiyun if (cdev->drv) {
692*4882a593Smuzhiyun if (!cdev->private->options.force)
693*4882a593Smuzhiyun return -EINVAL;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun buffer = kzalloc(64, GFP_DMA | GFP_KERNEL);
696*4882a593Smuzhiyun if (!buffer)
697*4882a593Smuzhiyun return -ENOMEM;
698*4882a593Smuzhiyun init_completion(&data.done);
699*4882a593Smuzhiyun data.rc = -EIO;
700*4882a593Smuzhiyun spin_lock_irq(sch->lock);
701*4882a593Smuzhiyun rc = cio_enable_subchannel(sch, (u32) (addr_t) sch);
702*4882a593Smuzhiyun if (rc)
703*4882a593Smuzhiyun goto out_unlock;
704*4882a593Smuzhiyun /* Perform operation. */
705*4882a593Smuzhiyun cdev->private->state = DEV_STATE_STEAL_LOCK;
706*4882a593Smuzhiyun ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
707*4882a593Smuzhiyun spin_unlock_irq(sch->lock);
708*4882a593Smuzhiyun /* Wait for operation to finish. */
709*4882a593Smuzhiyun if (wait_for_completion_interruptible(&data.done)) {
710*4882a593Smuzhiyun /* Got a signal. */
711*4882a593Smuzhiyun spin_lock_irq(sch->lock);
712*4882a593Smuzhiyun ccw_request_cancel(cdev);
713*4882a593Smuzhiyun spin_unlock_irq(sch->lock);
714*4882a593Smuzhiyun wait_for_completion(&data.done);
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun rc = data.rc;
717*4882a593Smuzhiyun /* Check results. */
718*4882a593Smuzhiyun spin_lock_irq(sch->lock);
719*4882a593Smuzhiyun cio_disable_subchannel(sch);
720*4882a593Smuzhiyun cdev->private->state = DEV_STATE_BOXED;
721*4882a593Smuzhiyun out_unlock:
722*4882a593Smuzhiyun spin_unlock_irq(sch->lock);
723*4882a593Smuzhiyun kfree(buffer);
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun return rc;
726*4882a593Smuzhiyun }
727