xref: /OK3568_Linux_fs/kernel/drivers/s390/cio/device_status.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *    Copyright IBM Corp. 2002
4*4882a593Smuzhiyun  *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
5*4882a593Smuzhiyun  *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Status accumulation and basic sense functions.
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun #include <linux/init.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <asm/ccwdev.h>
14*4882a593Smuzhiyun #include <asm/cio.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include "cio.h"
17*4882a593Smuzhiyun #include "cio_debug.h"
18*4882a593Smuzhiyun #include "css.h"
19*4882a593Smuzhiyun #include "device.h"
20*4882a593Smuzhiyun #include "ioasm.h"
21*4882a593Smuzhiyun #include "io_sch.h"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun /*
24*4882a593Smuzhiyun  * Check for any kind of channel or interface control check but don't
25*4882a593Smuzhiyun  * issue the message for the console device
26*4882a593Smuzhiyun  */
27*4882a593Smuzhiyun static void
ccw_device_msg_control_check(struct ccw_device * cdev,struct irb * irb)28*4882a593Smuzhiyun ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
31*4882a593Smuzhiyun 	char dbf_text[15];
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 	if (!scsw_is_valid_cstat(&irb->scsw) ||
34*4882a593Smuzhiyun 	    !(scsw_cstat(&irb->scsw) & (SCHN_STAT_CHN_DATA_CHK |
35*4882a593Smuzhiyun 	      SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK)))
36*4882a593Smuzhiyun 		return;
37*4882a593Smuzhiyun 	CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
38*4882a593Smuzhiyun 		      "received"
39*4882a593Smuzhiyun 		      " ... device %04x on subchannel 0.%x.%04x, dev_stat "
40*4882a593Smuzhiyun 		      ": %02X sch_stat : %02X\n",
41*4882a593Smuzhiyun 		      cdev->private->dev_id.devno, sch->schid.ssid,
42*4882a593Smuzhiyun 		      sch->schid.sch_no,
43*4882a593Smuzhiyun 		      scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw));
44*4882a593Smuzhiyun 	sprintf(dbf_text, "chk%x", sch->schid.sch_no);
45*4882a593Smuzhiyun 	CIO_TRACE_EVENT(0, dbf_text);
46*4882a593Smuzhiyun 	CIO_HEX_EVENT(0, irb, sizeof(struct irb));
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /*
50*4882a593Smuzhiyun  * Some paths became not operational (pno bit in scsw is set).
51*4882a593Smuzhiyun  */
52*4882a593Smuzhiyun static void
ccw_device_path_notoper(struct ccw_device * cdev)53*4882a593Smuzhiyun ccw_device_path_notoper(struct ccw_device *cdev)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	struct subchannel *sch;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	sch = to_subchannel(cdev->dev.parent);
58*4882a593Smuzhiyun 	if (cio_update_schib(sch))
59*4882a593Smuzhiyun 		goto doverify;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are "
62*4882a593Smuzhiyun 		      "not operational \n", __func__,
63*4882a593Smuzhiyun 		      sch->schid.ssid, sch->schid.sch_no,
64*4882a593Smuzhiyun 		      sch->schib.pmcw.pnom);
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	sch->lpm &= ~sch->schib.pmcw.pnom;
67*4882a593Smuzhiyun doverify:
68*4882a593Smuzhiyun 	cdev->private->flags.doverify = 1;
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun /*
72*4882a593Smuzhiyun  * Copy valid bits from the extended control word to device irb.
73*4882a593Smuzhiyun  */
74*4882a593Smuzhiyun static void
ccw_device_accumulate_ecw(struct ccw_device * cdev,struct irb * irb)75*4882a593Smuzhiyun ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	/*
78*4882a593Smuzhiyun 	 * Copy extended control bit if it is valid... yes there
79*4882a593Smuzhiyun 	 * are condition that have to be met for the extended control
80*4882a593Smuzhiyun 	 * bit to have meaning. Sick.
81*4882a593Smuzhiyun 	 */
82*4882a593Smuzhiyun 	cdev->private->dma_area->irb.scsw.cmd.ectl = 0;
83*4882a593Smuzhiyun 	if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) &&
84*4882a593Smuzhiyun 	    !(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS))
85*4882a593Smuzhiyun 		cdev->private->dma_area->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
86*4882a593Smuzhiyun 	/* Check if extended control word is valid. */
87*4882a593Smuzhiyun 	if (!cdev->private->dma_area->irb.scsw.cmd.ectl)
88*4882a593Smuzhiyun 		return;
89*4882a593Smuzhiyun 	/* Copy concurrent sense / model dependent information. */
90*4882a593Smuzhiyun 	memcpy(&cdev->private->dma_area->irb.ecw, irb->ecw, sizeof(irb->ecw));
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun  * Check if extended status word is valid.
95*4882a593Smuzhiyun  */
96*4882a593Smuzhiyun static int
ccw_device_accumulate_esw_valid(struct irb * irb)97*4882a593Smuzhiyun ccw_device_accumulate_esw_valid(struct irb *irb)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	if (!irb->scsw.cmd.eswf &&
100*4882a593Smuzhiyun 	    (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND))
101*4882a593Smuzhiyun 		return 0;
102*4882a593Smuzhiyun 	if (irb->scsw.cmd.stctl ==
103*4882a593Smuzhiyun 			(SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) &&
104*4882a593Smuzhiyun 	    !(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
105*4882a593Smuzhiyun 		return 0;
106*4882a593Smuzhiyun 	return 1;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /*
110*4882a593Smuzhiyun  * Copy valid bits from the extended status word to device irb.
111*4882a593Smuzhiyun  */
112*4882a593Smuzhiyun static void
ccw_device_accumulate_esw(struct ccw_device * cdev,struct irb * irb)113*4882a593Smuzhiyun ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	struct irb *cdev_irb;
116*4882a593Smuzhiyun 	struct sublog *cdev_sublog, *sublog;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	if (!ccw_device_accumulate_esw_valid(irb))
119*4882a593Smuzhiyun 		return;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	cdev_irb = &cdev->private->dma_area->irb;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	/* Copy last path used mask. */
124*4882a593Smuzhiyun 	cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	/* Copy subchannel logout information if esw is of format 0. */
127*4882a593Smuzhiyun 	if (irb->scsw.cmd.eswf) {
128*4882a593Smuzhiyun 		cdev_sublog = &cdev_irb->esw.esw0.sublog;
129*4882a593Smuzhiyun 		sublog = &irb->esw.esw0.sublog;
130*4882a593Smuzhiyun 		/* Copy extended status flags. */
131*4882a593Smuzhiyun 		cdev_sublog->esf = sublog->esf;
132*4882a593Smuzhiyun 		/*
133*4882a593Smuzhiyun 		 * Copy fields that have a meaning for channel data check
134*4882a593Smuzhiyun 		 * channel control check and interface control check.
135*4882a593Smuzhiyun 		 */
136*4882a593Smuzhiyun 		if (irb->scsw.cmd.cstat & (SCHN_STAT_CHN_DATA_CHK |
137*4882a593Smuzhiyun 				       SCHN_STAT_CHN_CTRL_CHK |
138*4882a593Smuzhiyun 				       SCHN_STAT_INTF_CTRL_CHK)) {
139*4882a593Smuzhiyun 			/* Copy ancillary report bit. */
140*4882a593Smuzhiyun 			cdev_sublog->arep = sublog->arep;
141*4882a593Smuzhiyun 			/* Copy field-validity-flags. */
142*4882a593Smuzhiyun 			cdev_sublog->fvf = sublog->fvf;
143*4882a593Smuzhiyun 			/* Copy storage access code. */
144*4882a593Smuzhiyun 			cdev_sublog->sacc = sublog->sacc;
145*4882a593Smuzhiyun 			/* Copy termination code. */
146*4882a593Smuzhiyun 			cdev_sublog->termc = sublog->termc;
147*4882a593Smuzhiyun 			/* Copy sequence code. */
148*4882a593Smuzhiyun 			cdev_sublog->seqc = sublog->seqc;
149*4882a593Smuzhiyun 		}
150*4882a593Smuzhiyun 		/* Copy device status check. */
151*4882a593Smuzhiyun 		cdev_sublog->devsc = sublog->devsc;
152*4882a593Smuzhiyun 		/* Copy secondary error. */
153*4882a593Smuzhiyun 		cdev_sublog->serr = sublog->serr;
154*4882a593Smuzhiyun 		/* Copy i/o-error alert. */
155*4882a593Smuzhiyun 		cdev_sublog->ioerr = sublog->ioerr;
156*4882a593Smuzhiyun 		/* Copy channel path timeout bit. */
157*4882a593Smuzhiyun 		if (irb->scsw.cmd.cstat & SCHN_STAT_INTF_CTRL_CHK)
158*4882a593Smuzhiyun 			cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt;
159*4882a593Smuzhiyun 		/* Copy failing storage address validity flag. */
160*4882a593Smuzhiyun 		cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf;
161*4882a593Smuzhiyun 		if (cdev_irb->esw.esw0.erw.fsavf) {
162*4882a593Smuzhiyun 			/* ... and copy the failing storage address. */
163*4882a593Smuzhiyun 			memcpy(cdev_irb->esw.esw0.faddr, irb->esw.esw0.faddr,
164*4882a593Smuzhiyun 			       sizeof (irb->esw.esw0.faddr));
165*4882a593Smuzhiyun 			/* ... and copy the failing storage address format. */
166*4882a593Smuzhiyun 			cdev_irb->esw.esw0.erw.fsaf = irb->esw.esw0.erw.fsaf;
167*4882a593Smuzhiyun 		}
168*4882a593Smuzhiyun 		/* Copy secondary ccw address validity bit. */
169*4882a593Smuzhiyun 		cdev_irb->esw.esw0.erw.scavf = irb->esw.esw0.erw.scavf;
170*4882a593Smuzhiyun 		if (irb->esw.esw0.erw.scavf)
171*4882a593Smuzhiyun 			/* ... and copy the secondary ccw address. */
172*4882a593Smuzhiyun 			cdev_irb->esw.esw0.saddr = irb->esw.esw0.saddr;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	}
175*4882a593Smuzhiyun 	/* FIXME: DCTI for format 2? */
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	/* Copy authorization bit. */
178*4882a593Smuzhiyun 	cdev_irb->esw.esw0.erw.auth = irb->esw.esw0.erw.auth;
179*4882a593Smuzhiyun 	/* Copy path verification required flag. */
180*4882a593Smuzhiyun 	cdev_irb->esw.esw0.erw.pvrf = irb->esw.esw0.erw.pvrf;
181*4882a593Smuzhiyun 	if (irb->esw.esw0.erw.pvrf)
182*4882a593Smuzhiyun 		cdev->private->flags.doverify = 1;
183*4882a593Smuzhiyun 	/* Copy concurrent sense bit. */
184*4882a593Smuzhiyun 	cdev_irb->esw.esw0.erw.cons = irb->esw.esw0.erw.cons;
185*4882a593Smuzhiyun 	if (irb->esw.esw0.erw.cons)
186*4882a593Smuzhiyun 		cdev_irb->esw.esw0.erw.scnt = irb->esw.esw0.erw.scnt;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun /*
190*4882a593Smuzhiyun  * Accumulate status from irb to devstat.
191*4882a593Smuzhiyun  */
192*4882a593Smuzhiyun void
ccw_device_accumulate_irb(struct ccw_device * cdev,struct irb * irb)193*4882a593Smuzhiyun ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	struct irb *cdev_irb;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	/*
198*4882a593Smuzhiyun 	 * Check if the status pending bit is set in stctl.
199*4882a593Smuzhiyun 	 * If not, the remaining bit have no meaning and we must ignore them.
200*4882a593Smuzhiyun 	 * The esw is not meaningful as well...
201*4882a593Smuzhiyun 	 */
202*4882a593Smuzhiyun 	if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
203*4882a593Smuzhiyun 		return;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	/* Check for channel checks and interface control checks. */
206*4882a593Smuzhiyun 	ccw_device_msg_control_check(cdev, irb);
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	/* Check for path not operational. */
209*4882a593Smuzhiyun 	if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
210*4882a593Smuzhiyun 		ccw_device_path_notoper(cdev);
211*4882a593Smuzhiyun 	/* No irb accumulation for transport mode irbs. */
212*4882a593Smuzhiyun 	if (scsw_is_tm(&irb->scsw)) {
213*4882a593Smuzhiyun 		memcpy(&cdev->private->dma_area->irb, irb, sizeof(struct irb));
214*4882a593Smuzhiyun 		return;
215*4882a593Smuzhiyun 	}
216*4882a593Smuzhiyun 	/*
217*4882a593Smuzhiyun 	 * Don't accumulate unsolicited interrupts.
218*4882a593Smuzhiyun 	 */
219*4882a593Smuzhiyun 	if (!scsw_is_solicited(&irb->scsw))
220*4882a593Smuzhiyun 		return;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	cdev_irb = &cdev->private->dma_area->irb;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	/*
225*4882a593Smuzhiyun 	 * If the clear function had been performed, all formerly pending
226*4882a593Smuzhiyun 	 * status at the subchannel has been cleared and we must not pass
227*4882a593Smuzhiyun 	 * intermediate accumulated status to the device driver.
228*4882a593Smuzhiyun 	 */
229*4882a593Smuzhiyun 	if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
230*4882a593Smuzhiyun 		memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	/* Copy bits which are valid only for the start function. */
233*4882a593Smuzhiyun 	if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) {
234*4882a593Smuzhiyun 		/* Copy key. */
235*4882a593Smuzhiyun 		cdev_irb->scsw.cmd.key = irb->scsw.cmd.key;
236*4882a593Smuzhiyun 		/* Copy suspend control bit. */
237*4882a593Smuzhiyun 		cdev_irb->scsw.cmd.sctl = irb->scsw.cmd.sctl;
238*4882a593Smuzhiyun 		/* Accumulate deferred condition code. */
239*4882a593Smuzhiyun 		cdev_irb->scsw.cmd.cc |= irb->scsw.cmd.cc;
240*4882a593Smuzhiyun 		/* Copy ccw format bit. */
241*4882a593Smuzhiyun 		cdev_irb->scsw.cmd.fmt = irb->scsw.cmd.fmt;
242*4882a593Smuzhiyun 		/* Copy prefetch bit. */
243*4882a593Smuzhiyun 		cdev_irb->scsw.cmd.pfch = irb->scsw.cmd.pfch;
244*4882a593Smuzhiyun 		/* Copy initial-status-interruption-control. */
245*4882a593Smuzhiyun 		cdev_irb->scsw.cmd.isic = irb->scsw.cmd.isic;
246*4882a593Smuzhiyun 		/* Copy address limit checking control. */
247*4882a593Smuzhiyun 		cdev_irb->scsw.cmd.alcc = irb->scsw.cmd.alcc;
248*4882a593Smuzhiyun 		/* Copy suppress suspend bit. */
249*4882a593Smuzhiyun 		cdev_irb->scsw.cmd.ssi = irb->scsw.cmd.ssi;
250*4882a593Smuzhiyun 	}
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	/* Take care of the extended control bit and extended control word. */
253*4882a593Smuzhiyun 	ccw_device_accumulate_ecw(cdev, irb);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	/* Accumulate function control. */
256*4882a593Smuzhiyun 	cdev_irb->scsw.cmd.fctl |= irb->scsw.cmd.fctl;
257*4882a593Smuzhiyun 	/* Copy activity control. */
258*4882a593Smuzhiyun 	cdev_irb->scsw.cmd.actl = irb->scsw.cmd.actl;
259*4882a593Smuzhiyun 	/* Accumulate status control. */
260*4882a593Smuzhiyun 	cdev_irb->scsw.cmd.stctl |= irb->scsw.cmd.stctl;
261*4882a593Smuzhiyun 	/*
262*4882a593Smuzhiyun 	 * Copy ccw address if it is valid. This is a bit simplified
263*4882a593Smuzhiyun 	 * but should be close enough for all practical purposes.
264*4882a593Smuzhiyun 	 */
265*4882a593Smuzhiyun 	if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) ||
266*4882a593Smuzhiyun 	    ((irb->scsw.cmd.stctl ==
267*4882a593Smuzhiyun 	      (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) &&
268*4882a593Smuzhiyun 	     (irb->scsw.cmd.actl & SCSW_ACTL_DEVACT) &&
269*4882a593Smuzhiyun 	     (irb->scsw.cmd.actl & SCSW_ACTL_SCHACT)) ||
270*4882a593Smuzhiyun 	    (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
271*4882a593Smuzhiyun 		cdev_irb->scsw.cmd.cpa = irb->scsw.cmd.cpa;
272*4882a593Smuzhiyun 	/* Accumulate device status, but not the device busy flag. */
273*4882a593Smuzhiyun 	cdev_irb->scsw.cmd.dstat &= ~DEV_STAT_BUSY;
274*4882a593Smuzhiyun 	/* dstat is not always valid. */
275*4882a593Smuzhiyun 	if (irb->scsw.cmd.stctl &
276*4882a593Smuzhiyun 	    (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS
277*4882a593Smuzhiyun 	     | SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS))
278*4882a593Smuzhiyun 		cdev_irb->scsw.cmd.dstat |= irb->scsw.cmd.dstat;
279*4882a593Smuzhiyun 	/* Accumulate subchannel status. */
280*4882a593Smuzhiyun 	cdev_irb->scsw.cmd.cstat |= irb->scsw.cmd.cstat;
281*4882a593Smuzhiyun 	/* Copy residual count if it is valid. */
282*4882a593Smuzhiyun 	if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
283*4882a593Smuzhiyun 	    (irb->scsw.cmd.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN))
284*4882a593Smuzhiyun 	     == 0)
285*4882a593Smuzhiyun 		cdev_irb->scsw.cmd.count = irb->scsw.cmd.count;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	/* Take care of bits in the extended status word. */
288*4882a593Smuzhiyun 	ccw_device_accumulate_esw(cdev, irb);
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	/*
291*4882a593Smuzhiyun 	 * Check whether we must issue a SENSE CCW ourselves if there is no
292*4882a593Smuzhiyun 	 * concurrent sense facility installed for the subchannel.
293*4882a593Smuzhiyun 	 * No sense is required if no delayed sense is pending
294*4882a593Smuzhiyun 	 * and we did not get a unit check without sense information.
295*4882a593Smuzhiyun 	 *
296*4882a593Smuzhiyun 	 * Note: We should check for ioinfo[irq]->flags.consns but VM
297*4882a593Smuzhiyun 	 *	 violates the ESA/390 architecture and doesn't present an
298*4882a593Smuzhiyun 	 *	 operand exception for virtual devices without concurrent
299*4882a593Smuzhiyun 	 *	 sense facility available/supported when enabling the
300*4882a593Smuzhiyun 	 *	 concurrent sense facility.
301*4882a593Smuzhiyun 	 */
302*4882a593Smuzhiyun 	if ((cdev_irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
303*4882a593Smuzhiyun 	    !(cdev_irb->esw.esw0.erw.cons))
304*4882a593Smuzhiyun 		cdev->private->flags.dosense = 1;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun /*
308*4882a593Smuzhiyun  * Do a basic sense.
309*4882a593Smuzhiyun  */
310*4882a593Smuzhiyun int
ccw_device_do_sense(struct ccw_device * cdev,struct irb * irb)311*4882a593Smuzhiyun ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	struct subchannel *sch;
314*4882a593Smuzhiyun 	struct ccw1 *sense_ccw;
315*4882a593Smuzhiyun 	int rc;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	sch = to_subchannel(cdev->dev.parent);
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	/* A sense is required, can we do it now ? */
320*4882a593Smuzhiyun 	if (scsw_actl(&irb->scsw) & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT))
321*4882a593Smuzhiyun 		/*
322*4882a593Smuzhiyun 		 * we received an Unit Check but we have no final
323*4882a593Smuzhiyun 		 *  status yet, therefore we must delay the SENSE
324*4882a593Smuzhiyun 		 *  processing. We must not report this intermediate
325*4882a593Smuzhiyun 		 *  status to the device interrupt handler.
326*4882a593Smuzhiyun 		 */
327*4882a593Smuzhiyun 		return -EBUSY;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	/*
330*4882a593Smuzhiyun 	 * We have ending status but no sense information. Do a basic sense.
331*4882a593Smuzhiyun 	 */
332*4882a593Smuzhiyun 	sense_ccw = &to_io_private(sch)->dma_area->sense_ccw;
333*4882a593Smuzhiyun 	sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE;
334*4882a593Smuzhiyun 	sense_ccw->cda = (__u32) __pa(cdev->private->dma_area->irb.ecw);
335*4882a593Smuzhiyun 	sense_ccw->count = SENSE_MAX_COUNT;
336*4882a593Smuzhiyun 	sense_ccw->flags = CCW_FLAG_SLI;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	rc = cio_start(sch, sense_ccw, 0xff);
339*4882a593Smuzhiyun 	if (rc == -ENODEV || rc == -EACCES)
340*4882a593Smuzhiyun 		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
341*4882a593Smuzhiyun 	return rc;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun /*
345*4882a593Smuzhiyun  * Add information from basic sense to devstat.
346*4882a593Smuzhiyun  */
347*4882a593Smuzhiyun void
ccw_device_accumulate_basic_sense(struct ccw_device * cdev,struct irb * irb)348*4882a593Smuzhiyun ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	/*
351*4882a593Smuzhiyun 	 * Check if the status pending bit is set in stctl.
352*4882a593Smuzhiyun 	 * If not, the remaining bit have no meaning and we must ignore them.
353*4882a593Smuzhiyun 	 * The esw is not meaningful as well...
354*4882a593Smuzhiyun 	 */
355*4882a593Smuzhiyun 	if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
356*4882a593Smuzhiyun 		return;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	/* Check for channel checks and interface control checks. */
359*4882a593Smuzhiyun 	ccw_device_msg_control_check(cdev, irb);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	/* Check for path not operational. */
362*4882a593Smuzhiyun 	if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
363*4882a593Smuzhiyun 		ccw_device_path_notoper(cdev);
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
366*4882a593Smuzhiyun 	    (irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) {
367*4882a593Smuzhiyun 		cdev->private->dma_area->irb.esw.esw0.erw.cons = 1;
368*4882a593Smuzhiyun 		cdev->private->flags.dosense = 0;
369*4882a593Smuzhiyun 	}
370*4882a593Smuzhiyun 	/* Check if path verification is required. */
371*4882a593Smuzhiyun 	if (ccw_device_accumulate_esw_valid(irb) &&
372*4882a593Smuzhiyun 	    irb->esw.esw0.erw.pvrf)
373*4882a593Smuzhiyun 		cdev->private->flags.doverify = 1;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun /*
377*4882a593Smuzhiyun  * This function accumulates the status into the private devstat and
378*4882a593Smuzhiyun  * starts a basic sense if one is needed.
379*4882a593Smuzhiyun  */
380*4882a593Smuzhiyun int
ccw_device_accumulate_and_sense(struct ccw_device * cdev,struct irb * irb)381*4882a593Smuzhiyun ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun 	ccw_device_accumulate_irb(cdev, irb);
384*4882a593Smuzhiyun 	if ((irb->scsw.cmd.actl  & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
385*4882a593Smuzhiyun 		return -EBUSY;
386*4882a593Smuzhiyun 	/* Check for basic sense. */
387*4882a593Smuzhiyun 	if (cdev->private->flags.dosense &&
388*4882a593Smuzhiyun 	    !(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) {
389*4882a593Smuzhiyun 		cdev->private->dma_area->irb.esw.esw0.erw.cons = 1;
390*4882a593Smuzhiyun 		cdev->private->flags.dosense = 0;
391*4882a593Smuzhiyun 		return 0;
392*4882a593Smuzhiyun 	}
393*4882a593Smuzhiyun 	if (cdev->private->flags.dosense) {
394*4882a593Smuzhiyun 		ccw_device_do_sense(cdev, irb);
395*4882a593Smuzhiyun 		return -EBUSY;
396*4882a593Smuzhiyun 	}
397*4882a593Smuzhiyun 	return 0;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun 
400