1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Character device driver for extended error reporting.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright IBM Corp. 2005
6*4882a593Smuzhiyun * extended error reporting for DASD ECKD devices
7*4882a593Smuzhiyun * Author(s): Stefan Weinhuber <wein@de.ibm.com>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #define KMSG_COMPONENT "dasd-eckd"
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/init.h>
13*4882a593Smuzhiyun #include <linux/fs.h>
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/miscdevice.h>
16*4882a593Smuzhiyun #include <linux/module.h>
17*4882a593Smuzhiyun #include <linux/moduleparam.h>
18*4882a593Smuzhiyun #include <linux/device.h>
19*4882a593Smuzhiyun #include <linux/poll.h>
20*4882a593Smuzhiyun #include <linux/mutex.h>
21*4882a593Smuzhiyun #include <linux/err.h>
22*4882a593Smuzhiyun #include <linux/slab.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include <linux/uaccess.h>
25*4882a593Smuzhiyun #include <linux/atomic.h>
26*4882a593Smuzhiyun #include <asm/ebcdic.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include "dasd_int.h"
29*4882a593Smuzhiyun #include "dasd_eckd.h"
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #ifdef PRINTK_HEADER
32*4882a593Smuzhiyun #undef PRINTK_HEADER
33*4882a593Smuzhiyun #endif /* PRINTK_HEADER */
34*4882a593Smuzhiyun #define PRINTK_HEADER "dasd(eer):"
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun * SECTION: the internal buffer
38*4882a593Smuzhiyun */
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /*
41*4882a593Smuzhiyun * The internal buffer is meant to store obaque blobs of data, so it does
42*4882a593Smuzhiyun * not know of higher level concepts like triggers.
43*4882a593Smuzhiyun * It consists of a number of pages that are used as a ringbuffer. Each data
44*4882a593Smuzhiyun * blob is stored in a simple record that consists of an integer, which
45*4882a593Smuzhiyun * contains the size of the following data, and the data bytes themselfes.
46*4882a593Smuzhiyun *
47*4882a593Smuzhiyun * To allow for multiple independent readers we create one internal buffer
48*4882a593Smuzhiyun * each time the device is opened and destroy the buffer when the file is
49*4882a593Smuzhiyun * closed again. The number of pages used for this buffer is determined by
50*4882a593Smuzhiyun * the module parmeter eer_pages.
51*4882a593Smuzhiyun *
52*4882a593Smuzhiyun * One record can be written to a buffer by using the functions
53*4882a593Smuzhiyun * - dasd_eer_start_record (one time per record to write the size to the
54*4882a593Smuzhiyun * buffer and reserve the space for the data)
55*4882a593Smuzhiyun * - dasd_eer_write_buffer (one or more times per record to write the data)
56*4882a593Smuzhiyun * The data can be written in several steps but you will have to compute
57*4882a593Smuzhiyun * the total size up front for the invocation of dasd_eer_start_record.
58*4882a593Smuzhiyun * If the ringbuffer is full, dasd_eer_start_record will remove the required
59*4882a593Smuzhiyun * number of old records.
60*4882a593Smuzhiyun *
61*4882a593Smuzhiyun * A record is typically read in two steps, first read the integer that
62*4882a593Smuzhiyun * specifies the size of the following data, then read the data.
63*4882a593Smuzhiyun * Both can be done by
64*4882a593Smuzhiyun * - dasd_eer_read_buffer
65*4882a593Smuzhiyun *
66*4882a593Smuzhiyun * For all mentioned functions you need to get the bufferlock first and keep
67*4882a593Smuzhiyun * it until a complete record is written or read.
68*4882a593Smuzhiyun *
69*4882a593Smuzhiyun * All information necessary to keep track of an internal buffer is kept in
70*4882a593Smuzhiyun * a struct eerbuffer. The buffer specific to a file pointer is strored in
71*4882a593Smuzhiyun * the private_data field of that file. To be able to write data to all
72*4882a593Smuzhiyun * existing buffers, each buffer is also added to the bufferlist.
73*4882a593Smuzhiyun * If the user does not want to read a complete record in one go, we have to
74*4882a593Smuzhiyun * keep track of the rest of the record. residual stores the number of bytes
75*4882a593Smuzhiyun * that are still to deliver. If the rest of the record is invalidated between
76*4882a593Smuzhiyun * two reads then residual will be set to -1 so that the next read will fail.
77*4882a593Smuzhiyun * All entries in the eerbuffer structure are protected with the bufferlock.
78*4882a593Smuzhiyun * To avoid races between writing to a buffer on the one side and creating
79*4882a593Smuzhiyun * and destroying buffers on the other side, the bufferlock must also be used
80*4882a593Smuzhiyun * to protect the bufferlist.
81*4882a593Smuzhiyun */
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun static int eer_pages = 5;
84*4882a593Smuzhiyun module_param(eer_pages, int, S_IRUGO|S_IWUSR);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun struct eerbuffer {
87*4882a593Smuzhiyun struct list_head list;
88*4882a593Smuzhiyun char **buffer;
89*4882a593Smuzhiyun int buffersize;
90*4882a593Smuzhiyun int buffer_page_count;
91*4882a593Smuzhiyun int head;
92*4882a593Smuzhiyun int tail;
93*4882a593Smuzhiyun int residual;
94*4882a593Smuzhiyun };
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun static LIST_HEAD(bufferlist);
97*4882a593Smuzhiyun static DEFINE_SPINLOCK(bufferlock);
98*4882a593Smuzhiyun static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /*
101*4882a593Smuzhiyun * How many free bytes are available on the buffer.
102*4882a593Smuzhiyun * Needs to be called with bufferlock held.
103*4882a593Smuzhiyun */
dasd_eer_get_free_bytes(struct eerbuffer * eerb)104*4882a593Smuzhiyun static int dasd_eer_get_free_bytes(struct eerbuffer *eerb)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun if (eerb->head < eerb->tail)
107*4882a593Smuzhiyun return eerb->tail - eerb->head - 1;
108*4882a593Smuzhiyun return eerb->buffersize - eerb->head + eerb->tail -1;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun * How many bytes of buffer space are used.
113*4882a593Smuzhiyun * Needs to be called with bufferlock held.
114*4882a593Smuzhiyun */
dasd_eer_get_filled_bytes(struct eerbuffer * eerb)115*4882a593Smuzhiyun static int dasd_eer_get_filled_bytes(struct eerbuffer *eerb)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun if (eerb->head >= eerb->tail)
119*4882a593Smuzhiyun return eerb->head - eerb->tail;
120*4882a593Smuzhiyun return eerb->buffersize - eerb->tail + eerb->head;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /*
124*4882a593Smuzhiyun * The dasd_eer_write_buffer function just copies count bytes of data
125*4882a593Smuzhiyun * to the buffer. Make sure to call dasd_eer_start_record first, to
126*4882a593Smuzhiyun * make sure that enough free space is available.
127*4882a593Smuzhiyun * Needs to be called with bufferlock held.
128*4882a593Smuzhiyun */
dasd_eer_write_buffer(struct eerbuffer * eerb,char * data,int count)129*4882a593Smuzhiyun static void dasd_eer_write_buffer(struct eerbuffer *eerb,
130*4882a593Smuzhiyun char *data, int count)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun unsigned long headindex,localhead;
134*4882a593Smuzhiyun unsigned long rest, len;
135*4882a593Smuzhiyun char *nextdata;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun nextdata = data;
138*4882a593Smuzhiyun rest = count;
139*4882a593Smuzhiyun while (rest > 0) {
140*4882a593Smuzhiyun headindex = eerb->head / PAGE_SIZE;
141*4882a593Smuzhiyun localhead = eerb->head % PAGE_SIZE;
142*4882a593Smuzhiyun len = min(rest, PAGE_SIZE - localhead);
143*4882a593Smuzhiyun memcpy(eerb->buffer[headindex]+localhead, nextdata, len);
144*4882a593Smuzhiyun nextdata += len;
145*4882a593Smuzhiyun rest -= len;
146*4882a593Smuzhiyun eerb->head += len;
147*4882a593Smuzhiyun if (eerb->head == eerb->buffersize)
148*4882a593Smuzhiyun eerb->head = 0; /* wrap around */
149*4882a593Smuzhiyun BUG_ON(eerb->head > eerb->buffersize);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /*
154*4882a593Smuzhiyun * Needs to be called with bufferlock held.
155*4882a593Smuzhiyun */
dasd_eer_read_buffer(struct eerbuffer * eerb,char * data,int count)156*4882a593Smuzhiyun static int dasd_eer_read_buffer(struct eerbuffer *eerb, char *data, int count)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun unsigned long tailindex,localtail;
160*4882a593Smuzhiyun unsigned long rest, len, finalcount;
161*4882a593Smuzhiyun char *nextdata;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun finalcount = min(count, dasd_eer_get_filled_bytes(eerb));
164*4882a593Smuzhiyun nextdata = data;
165*4882a593Smuzhiyun rest = finalcount;
166*4882a593Smuzhiyun while (rest > 0) {
167*4882a593Smuzhiyun tailindex = eerb->tail / PAGE_SIZE;
168*4882a593Smuzhiyun localtail = eerb->tail % PAGE_SIZE;
169*4882a593Smuzhiyun len = min(rest, PAGE_SIZE - localtail);
170*4882a593Smuzhiyun memcpy(nextdata, eerb->buffer[tailindex] + localtail, len);
171*4882a593Smuzhiyun nextdata += len;
172*4882a593Smuzhiyun rest -= len;
173*4882a593Smuzhiyun eerb->tail += len;
174*4882a593Smuzhiyun if (eerb->tail == eerb->buffersize)
175*4882a593Smuzhiyun eerb->tail = 0; /* wrap around */
176*4882a593Smuzhiyun BUG_ON(eerb->tail > eerb->buffersize);
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun return finalcount;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /*
182*4882a593Smuzhiyun * Whenever you want to write a blob of data to the internal buffer you
183*4882a593Smuzhiyun * have to start by using this function first. It will write the number
184*4882a593Smuzhiyun * of bytes that will be written to the buffer. If necessary it will remove
185*4882a593Smuzhiyun * old records to make room for the new one.
186*4882a593Smuzhiyun * Needs to be called with bufferlock held.
187*4882a593Smuzhiyun */
dasd_eer_start_record(struct eerbuffer * eerb,int count)188*4882a593Smuzhiyun static int dasd_eer_start_record(struct eerbuffer *eerb, int count)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun int tailcount;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun if (count + sizeof(count) > eerb->buffersize)
193*4882a593Smuzhiyun return -ENOMEM;
194*4882a593Smuzhiyun while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) {
195*4882a593Smuzhiyun if (eerb->residual > 0) {
196*4882a593Smuzhiyun eerb->tail += eerb->residual;
197*4882a593Smuzhiyun if (eerb->tail >= eerb->buffersize)
198*4882a593Smuzhiyun eerb->tail -= eerb->buffersize;
199*4882a593Smuzhiyun eerb->residual = -1;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun dasd_eer_read_buffer(eerb, (char *) &tailcount,
202*4882a593Smuzhiyun sizeof(tailcount));
203*4882a593Smuzhiyun eerb->tail += tailcount;
204*4882a593Smuzhiyun if (eerb->tail >= eerb->buffersize)
205*4882a593Smuzhiyun eerb->tail -= eerb->buffersize;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun dasd_eer_write_buffer(eerb, (char*) &count, sizeof(count));
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun return 0;
210*4882a593Smuzhiyun };
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun /*
213*4882a593Smuzhiyun * Release pages that are not used anymore.
214*4882a593Smuzhiyun */
dasd_eer_free_buffer_pages(char ** buf,int no_pages)215*4882a593Smuzhiyun static void dasd_eer_free_buffer_pages(char **buf, int no_pages)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun int i;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun for (i = 0; i < no_pages; i++)
220*4882a593Smuzhiyun free_page((unsigned long) buf[i]);
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun /*
224*4882a593Smuzhiyun * Allocate a new set of memory pages.
225*4882a593Smuzhiyun */
dasd_eer_allocate_buffer_pages(char ** buf,int no_pages)226*4882a593Smuzhiyun static int dasd_eer_allocate_buffer_pages(char **buf, int no_pages)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun int i;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun for (i = 0; i < no_pages; i++) {
231*4882a593Smuzhiyun buf[i] = (char *) get_zeroed_page(GFP_KERNEL);
232*4882a593Smuzhiyun if (!buf[i]) {
233*4882a593Smuzhiyun dasd_eer_free_buffer_pages(buf, i);
234*4882a593Smuzhiyun return -ENOMEM;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun return 0;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /*
241*4882a593Smuzhiyun * SECTION: The extended error reporting functionality
242*4882a593Smuzhiyun */
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /*
245*4882a593Smuzhiyun * When a DASD device driver wants to report an error, it calls the
246*4882a593Smuzhiyun * function dasd_eer_write and gives the respective trigger ID as
247*4882a593Smuzhiyun * parameter. Currently there are four kinds of triggers:
248*4882a593Smuzhiyun *
249*4882a593Smuzhiyun * DASD_EER_FATALERROR: all kinds of unrecoverable I/O problems
250*4882a593Smuzhiyun * DASD_EER_PPRCSUSPEND: PPRC was suspended
251*4882a593Smuzhiyun * DASD_EER_NOPATH: There is no path to the device left.
252*4882a593Smuzhiyun * DASD_EER_STATECHANGE: The state of the device has changed.
253*4882a593Smuzhiyun *
254*4882a593Smuzhiyun * For the first three triggers all required information can be supplied by
255*4882a593Smuzhiyun * the caller. For these triggers a record is written by the function
256*4882a593Smuzhiyun * dasd_eer_write_standard_trigger.
257*4882a593Smuzhiyun *
258*4882a593Smuzhiyun * The DASD_EER_STATECHANGE trigger is special since a sense subsystem
259*4882a593Smuzhiyun * status ccw need to be executed to gather the necessary sense data first.
260*4882a593Smuzhiyun * The dasd_eer_snss function will queue the SNSS request and the request
261*4882a593Smuzhiyun * callback will then call dasd_eer_write with the DASD_EER_STATCHANGE
262*4882a593Smuzhiyun * trigger.
263*4882a593Smuzhiyun *
264*4882a593Smuzhiyun * To avoid memory allocations at runtime, the necessary memory is allocated
265*4882a593Smuzhiyun * when the extended error reporting is enabled for a device (by
266*4882a593Smuzhiyun * dasd_eer_probe). There is one sense subsystem status request for each
267*4882a593Smuzhiyun * eer enabled DASD device. The presence of the cqr in device->eer_cqr
268*4882a593Smuzhiyun * indicates that eer is enable for the device. The use of the snss request
269*4882a593Smuzhiyun * is protected by the DASD_FLAG_EER_IN_USE bit. When this flag indicates
270*4882a593Smuzhiyun * that the cqr is currently in use, dasd_eer_snss cannot start a second
271*4882a593Smuzhiyun * request but sets the DASD_FLAG_EER_SNSS flag instead. The callback of
272*4882a593Smuzhiyun * the SNSS request will check the bit and call dasd_eer_snss again.
273*4882a593Smuzhiyun */
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun #define SNSS_DATA_SIZE 44
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun #define DASD_EER_BUSID_SIZE 10
278*4882a593Smuzhiyun struct dasd_eer_header {
279*4882a593Smuzhiyun __u32 total_size;
280*4882a593Smuzhiyun __u32 trigger;
281*4882a593Smuzhiyun __u64 tv_sec;
282*4882a593Smuzhiyun __u64 tv_usec;
283*4882a593Smuzhiyun char busid[DASD_EER_BUSID_SIZE];
284*4882a593Smuzhiyun } __attribute__ ((packed));
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /*
287*4882a593Smuzhiyun * The following function can be used for those triggers that have
288*4882a593Smuzhiyun * all necessary data available when the function is called.
289*4882a593Smuzhiyun * If the parameter cqr is not NULL, the chain of requests will be searched
290*4882a593Smuzhiyun * for valid sense data, and all valid sense data sets will be added to
291*4882a593Smuzhiyun * the triggers data.
292*4882a593Smuzhiyun */
dasd_eer_write_standard_trigger(struct dasd_device * device,struct dasd_ccw_req * cqr,int trigger)293*4882a593Smuzhiyun static void dasd_eer_write_standard_trigger(struct dasd_device *device,
294*4882a593Smuzhiyun struct dasd_ccw_req *cqr,
295*4882a593Smuzhiyun int trigger)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun struct dasd_ccw_req *temp_cqr;
298*4882a593Smuzhiyun int data_size;
299*4882a593Smuzhiyun struct timespec64 ts;
300*4882a593Smuzhiyun struct dasd_eer_header header;
301*4882a593Smuzhiyun unsigned long flags;
302*4882a593Smuzhiyun struct eerbuffer *eerb;
303*4882a593Smuzhiyun char *sense;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun /* go through cqr chain and count the valid sense data sets */
306*4882a593Smuzhiyun data_size = 0;
307*4882a593Smuzhiyun for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers)
308*4882a593Smuzhiyun if (dasd_get_sense(&temp_cqr->irb))
309*4882a593Smuzhiyun data_size += 32;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
312*4882a593Smuzhiyun header.trigger = trigger;
313*4882a593Smuzhiyun ktime_get_real_ts64(&ts);
314*4882a593Smuzhiyun header.tv_sec = ts.tv_sec;
315*4882a593Smuzhiyun header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
316*4882a593Smuzhiyun strlcpy(header.busid, dev_name(&device->cdev->dev),
317*4882a593Smuzhiyun DASD_EER_BUSID_SIZE);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun spin_lock_irqsave(&bufferlock, flags);
320*4882a593Smuzhiyun list_for_each_entry(eerb, &bufferlist, list) {
321*4882a593Smuzhiyun dasd_eer_start_record(eerb, header.total_size);
322*4882a593Smuzhiyun dasd_eer_write_buffer(eerb, (char *) &header, sizeof(header));
323*4882a593Smuzhiyun for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) {
324*4882a593Smuzhiyun sense = dasd_get_sense(&temp_cqr->irb);
325*4882a593Smuzhiyun if (sense)
326*4882a593Smuzhiyun dasd_eer_write_buffer(eerb, sense, 32);
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun dasd_eer_write_buffer(eerb, "EOR", 4);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun spin_unlock_irqrestore(&bufferlock, flags);
331*4882a593Smuzhiyun wake_up_interruptible(&dasd_eer_read_wait_queue);
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /*
335*4882a593Smuzhiyun * This function writes a DASD_EER_STATECHANGE trigger.
336*4882a593Smuzhiyun */
dasd_eer_write_snss_trigger(struct dasd_device * device,struct dasd_ccw_req * cqr,int trigger)337*4882a593Smuzhiyun static void dasd_eer_write_snss_trigger(struct dasd_device *device,
338*4882a593Smuzhiyun struct dasd_ccw_req *cqr,
339*4882a593Smuzhiyun int trigger)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun int data_size;
342*4882a593Smuzhiyun int snss_rc;
343*4882a593Smuzhiyun struct timespec64 ts;
344*4882a593Smuzhiyun struct dasd_eer_header header;
345*4882a593Smuzhiyun unsigned long flags;
346*4882a593Smuzhiyun struct eerbuffer *eerb;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun snss_rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
349*4882a593Smuzhiyun if (snss_rc)
350*4882a593Smuzhiyun data_size = 0;
351*4882a593Smuzhiyun else
352*4882a593Smuzhiyun data_size = SNSS_DATA_SIZE;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
355*4882a593Smuzhiyun header.trigger = DASD_EER_STATECHANGE;
356*4882a593Smuzhiyun ktime_get_real_ts64(&ts);
357*4882a593Smuzhiyun header.tv_sec = ts.tv_sec;
358*4882a593Smuzhiyun header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
359*4882a593Smuzhiyun strlcpy(header.busid, dev_name(&device->cdev->dev),
360*4882a593Smuzhiyun DASD_EER_BUSID_SIZE);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun spin_lock_irqsave(&bufferlock, flags);
363*4882a593Smuzhiyun list_for_each_entry(eerb, &bufferlist, list) {
364*4882a593Smuzhiyun dasd_eer_start_record(eerb, header.total_size);
365*4882a593Smuzhiyun dasd_eer_write_buffer(eerb, (char *) &header , sizeof(header));
366*4882a593Smuzhiyun if (!snss_rc)
367*4882a593Smuzhiyun dasd_eer_write_buffer(eerb, cqr->data, SNSS_DATA_SIZE);
368*4882a593Smuzhiyun dasd_eer_write_buffer(eerb, "EOR", 4);
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun spin_unlock_irqrestore(&bufferlock, flags);
371*4882a593Smuzhiyun wake_up_interruptible(&dasd_eer_read_wait_queue);
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun /*
375*4882a593Smuzhiyun * This function is called for all triggers. It calls the appropriate
376*4882a593Smuzhiyun * function that writes the actual trigger records.
377*4882a593Smuzhiyun */
dasd_eer_write(struct dasd_device * device,struct dasd_ccw_req * cqr,unsigned int id)378*4882a593Smuzhiyun void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr,
379*4882a593Smuzhiyun unsigned int id)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun if (!device->eer_cqr)
382*4882a593Smuzhiyun return;
383*4882a593Smuzhiyun switch (id) {
384*4882a593Smuzhiyun case DASD_EER_FATALERROR:
385*4882a593Smuzhiyun case DASD_EER_PPRCSUSPEND:
386*4882a593Smuzhiyun dasd_eer_write_standard_trigger(device, cqr, id);
387*4882a593Smuzhiyun break;
388*4882a593Smuzhiyun case DASD_EER_NOPATH:
389*4882a593Smuzhiyun case DASD_EER_NOSPC:
390*4882a593Smuzhiyun dasd_eer_write_standard_trigger(device, NULL, id);
391*4882a593Smuzhiyun break;
392*4882a593Smuzhiyun case DASD_EER_STATECHANGE:
393*4882a593Smuzhiyun dasd_eer_write_snss_trigger(device, cqr, id);
394*4882a593Smuzhiyun break;
395*4882a593Smuzhiyun default: /* unknown trigger, so we write it without any sense data */
396*4882a593Smuzhiyun dasd_eer_write_standard_trigger(device, NULL, id);
397*4882a593Smuzhiyun break;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_eer_write);
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun /*
403*4882a593Smuzhiyun * Start a sense subsystem status request.
404*4882a593Smuzhiyun * Needs to be called with the device held.
405*4882a593Smuzhiyun */
dasd_eer_snss(struct dasd_device * device)406*4882a593Smuzhiyun void dasd_eer_snss(struct dasd_device *device)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun struct dasd_ccw_req *cqr;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun cqr = device->eer_cqr;
411*4882a593Smuzhiyun if (!cqr) /* Device not eer enabled. */
412*4882a593Smuzhiyun return;
413*4882a593Smuzhiyun if (test_and_set_bit(DASD_FLAG_EER_IN_USE, &device->flags)) {
414*4882a593Smuzhiyun /* Sense subsystem status request in use. */
415*4882a593Smuzhiyun set_bit(DASD_FLAG_EER_SNSS, &device->flags);
416*4882a593Smuzhiyun return;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun /* cdev is already locked, can't use dasd_add_request_head */
419*4882a593Smuzhiyun clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
420*4882a593Smuzhiyun cqr->status = DASD_CQR_QUEUED;
421*4882a593Smuzhiyun list_add(&cqr->devlist, &device->ccw_queue);
422*4882a593Smuzhiyun dasd_schedule_device_bh(device);
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun /*
426*4882a593Smuzhiyun * Callback function for use with sense subsystem status request.
427*4882a593Smuzhiyun */
dasd_eer_snss_cb(struct dasd_ccw_req * cqr,void * data)428*4882a593Smuzhiyun static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun struct dasd_device *device = cqr->startdev;
431*4882a593Smuzhiyun unsigned long flags;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun dasd_eer_write(device, cqr, DASD_EER_STATECHANGE);
434*4882a593Smuzhiyun spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
435*4882a593Smuzhiyun if (device->eer_cqr == cqr) {
436*4882a593Smuzhiyun clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
437*4882a593Smuzhiyun if (test_bit(DASD_FLAG_EER_SNSS, &device->flags))
438*4882a593Smuzhiyun /* Another SNSS has been requested in the meantime. */
439*4882a593Smuzhiyun dasd_eer_snss(device);
440*4882a593Smuzhiyun cqr = NULL;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
443*4882a593Smuzhiyun if (cqr)
444*4882a593Smuzhiyun /*
445*4882a593Smuzhiyun * Extended error recovery has been switched off while
446*4882a593Smuzhiyun * the SNSS request was running. It could even have
447*4882a593Smuzhiyun * been switched off and on again in which case there
448*4882a593Smuzhiyun * is a new ccw in device->eer_cqr. Free the "old"
449*4882a593Smuzhiyun * snss request now.
450*4882a593Smuzhiyun */
451*4882a593Smuzhiyun dasd_sfree_request(cqr, device);
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun /*
455*4882a593Smuzhiyun * Enable error reporting on a given device.
456*4882a593Smuzhiyun */
dasd_eer_enable(struct dasd_device * device)457*4882a593Smuzhiyun int dasd_eer_enable(struct dasd_device *device)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun struct dasd_ccw_req *cqr = NULL;
460*4882a593Smuzhiyun unsigned long flags;
461*4882a593Smuzhiyun struct ccw1 *ccw;
462*4882a593Smuzhiyun int rc = 0;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
465*4882a593Smuzhiyun if (device->eer_cqr)
466*4882a593Smuzhiyun goto out;
467*4882a593Smuzhiyun else if (!device->discipline ||
468*4882a593Smuzhiyun strcmp(device->discipline->name, "ECKD"))
469*4882a593Smuzhiyun rc = -EMEDIUMTYPE;
470*4882a593Smuzhiyun else if (test_bit(DASD_FLAG_OFFLINE, &device->flags))
471*4882a593Smuzhiyun rc = -EBUSY;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun if (rc)
474*4882a593Smuzhiyun goto out;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
477*4882a593Smuzhiyun SNSS_DATA_SIZE, device, NULL);
478*4882a593Smuzhiyun if (IS_ERR(cqr)) {
479*4882a593Smuzhiyun rc = -ENOMEM;
480*4882a593Smuzhiyun cqr = NULL;
481*4882a593Smuzhiyun goto out;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun cqr->startdev = device;
485*4882a593Smuzhiyun cqr->retries = 255;
486*4882a593Smuzhiyun cqr->expires = 10 * HZ;
487*4882a593Smuzhiyun clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
488*4882a593Smuzhiyun set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun ccw = cqr->cpaddr;
491*4882a593Smuzhiyun ccw->cmd_code = DASD_ECKD_CCW_SNSS;
492*4882a593Smuzhiyun ccw->count = SNSS_DATA_SIZE;
493*4882a593Smuzhiyun ccw->flags = 0;
494*4882a593Smuzhiyun ccw->cda = (__u32)(addr_t) cqr->data;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun cqr->buildclk = get_tod_clock();
497*4882a593Smuzhiyun cqr->status = DASD_CQR_FILLED;
498*4882a593Smuzhiyun cqr->callback = dasd_eer_snss_cb;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun if (!device->eer_cqr) {
501*4882a593Smuzhiyun device->eer_cqr = cqr;
502*4882a593Smuzhiyun cqr = NULL;
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun out:
506*4882a593Smuzhiyun spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun if (cqr)
509*4882a593Smuzhiyun dasd_sfree_request(cqr, device);
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun return rc;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun /*
515*4882a593Smuzhiyun * Disable error reporting on a given device.
516*4882a593Smuzhiyun */
dasd_eer_disable(struct dasd_device * device)517*4882a593Smuzhiyun void dasd_eer_disable(struct dasd_device *device)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun struct dasd_ccw_req *cqr;
520*4882a593Smuzhiyun unsigned long flags;
521*4882a593Smuzhiyun int in_use;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun if (!device->eer_cqr)
524*4882a593Smuzhiyun return;
525*4882a593Smuzhiyun spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
526*4882a593Smuzhiyun cqr = device->eer_cqr;
527*4882a593Smuzhiyun device->eer_cqr = NULL;
528*4882a593Smuzhiyun clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
529*4882a593Smuzhiyun in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
530*4882a593Smuzhiyun spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
531*4882a593Smuzhiyun if (cqr && !in_use)
532*4882a593Smuzhiyun dasd_sfree_request(cqr, device);
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun /*
536*4882a593Smuzhiyun * SECTION: the device operations
537*4882a593Smuzhiyun */
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun /*
540*4882a593Smuzhiyun * On the one side we need a lock to access our internal buffer, on the
541*4882a593Smuzhiyun * other side a copy_to_user can sleep. So we need to copy the data we have
542*4882a593Smuzhiyun * to transfer in a readbuffer, which is protected by the readbuffer_mutex.
543*4882a593Smuzhiyun */
544*4882a593Smuzhiyun static char readbuffer[PAGE_SIZE];
545*4882a593Smuzhiyun static DEFINE_MUTEX(readbuffer_mutex);
546*4882a593Smuzhiyun
dasd_eer_open(struct inode * inp,struct file * filp)547*4882a593Smuzhiyun static int dasd_eer_open(struct inode *inp, struct file *filp)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun struct eerbuffer *eerb;
550*4882a593Smuzhiyun unsigned long flags;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL);
553*4882a593Smuzhiyun if (!eerb)
554*4882a593Smuzhiyun return -ENOMEM;
555*4882a593Smuzhiyun eerb->buffer_page_count = eer_pages;
556*4882a593Smuzhiyun if (eerb->buffer_page_count < 1 ||
557*4882a593Smuzhiyun eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
558*4882a593Smuzhiyun kfree(eerb);
559*4882a593Smuzhiyun DBF_EVENT(DBF_WARNING, "can't open device since module "
560*4882a593Smuzhiyun "parameter eer_pages is smaller than 1 or"
561*4882a593Smuzhiyun " bigger than %d", (int)(INT_MAX / PAGE_SIZE));
562*4882a593Smuzhiyun return -EINVAL;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
565*4882a593Smuzhiyun eerb->buffer = kmalloc_array(eerb->buffer_page_count, sizeof(char *),
566*4882a593Smuzhiyun GFP_KERNEL);
567*4882a593Smuzhiyun if (!eerb->buffer) {
568*4882a593Smuzhiyun kfree(eerb);
569*4882a593Smuzhiyun return -ENOMEM;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun if (dasd_eer_allocate_buffer_pages(eerb->buffer,
572*4882a593Smuzhiyun eerb->buffer_page_count)) {
573*4882a593Smuzhiyun kfree(eerb->buffer);
574*4882a593Smuzhiyun kfree(eerb);
575*4882a593Smuzhiyun return -ENOMEM;
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun filp->private_data = eerb;
578*4882a593Smuzhiyun spin_lock_irqsave(&bufferlock, flags);
579*4882a593Smuzhiyun list_add(&eerb->list, &bufferlist);
580*4882a593Smuzhiyun spin_unlock_irqrestore(&bufferlock, flags);
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun return nonseekable_open(inp,filp);
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun
dasd_eer_close(struct inode * inp,struct file * filp)585*4882a593Smuzhiyun static int dasd_eer_close(struct inode *inp, struct file *filp)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun struct eerbuffer *eerb;
588*4882a593Smuzhiyun unsigned long flags;
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun eerb = (struct eerbuffer *) filp->private_data;
591*4882a593Smuzhiyun spin_lock_irqsave(&bufferlock, flags);
592*4882a593Smuzhiyun list_del(&eerb->list);
593*4882a593Smuzhiyun spin_unlock_irqrestore(&bufferlock, flags);
594*4882a593Smuzhiyun dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count);
595*4882a593Smuzhiyun kfree(eerb->buffer);
596*4882a593Smuzhiyun kfree(eerb);
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun return 0;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun
dasd_eer_read(struct file * filp,char __user * buf,size_t count,loff_t * ppos)601*4882a593Smuzhiyun static ssize_t dasd_eer_read(struct file *filp, char __user *buf,
602*4882a593Smuzhiyun size_t count, loff_t *ppos)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun int tc,rc;
605*4882a593Smuzhiyun int tailcount,effective_count;
606*4882a593Smuzhiyun unsigned long flags;
607*4882a593Smuzhiyun struct eerbuffer *eerb;
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun eerb = (struct eerbuffer *) filp->private_data;
610*4882a593Smuzhiyun if (mutex_lock_interruptible(&readbuffer_mutex))
611*4882a593Smuzhiyun return -ERESTARTSYS;
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun spin_lock_irqsave(&bufferlock, flags);
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun if (eerb->residual < 0) { /* the remainder of this record */
616*4882a593Smuzhiyun /* has been deleted */
617*4882a593Smuzhiyun eerb->residual = 0;
618*4882a593Smuzhiyun spin_unlock_irqrestore(&bufferlock, flags);
619*4882a593Smuzhiyun mutex_unlock(&readbuffer_mutex);
620*4882a593Smuzhiyun return -EIO;
621*4882a593Smuzhiyun } else if (eerb->residual > 0) {
622*4882a593Smuzhiyun /* OK we still have a second half of a record to deliver */
623*4882a593Smuzhiyun effective_count = min(eerb->residual, (int) count);
624*4882a593Smuzhiyun eerb->residual -= effective_count;
625*4882a593Smuzhiyun } else {
626*4882a593Smuzhiyun tc = 0;
627*4882a593Smuzhiyun while (!tc) {
628*4882a593Smuzhiyun tc = dasd_eer_read_buffer(eerb, (char *) &tailcount,
629*4882a593Smuzhiyun sizeof(tailcount));
630*4882a593Smuzhiyun if (!tc) {
631*4882a593Smuzhiyun /* no data available */
632*4882a593Smuzhiyun spin_unlock_irqrestore(&bufferlock, flags);
633*4882a593Smuzhiyun mutex_unlock(&readbuffer_mutex);
634*4882a593Smuzhiyun if (filp->f_flags & O_NONBLOCK)
635*4882a593Smuzhiyun return -EAGAIN;
636*4882a593Smuzhiyun rc = wait_event_interruptible(
637*4882a593Smuzhiyun dasd_eer_read_wait_queue,
638*4882a593Smuzhiyun eerb->head != eerb->tail);
639*4882a593Smuzhiyun if (rc)
640*4882a593Smuzhiyun return rc;
641*4882a593Smuzhiyun if (mutex_lock_interruptible(&readbuffer_mutex))
642*4882a593Smuzhiyun return -ERESTARTSYS;
643*4882a593Smuzhiyun spin_lock_irqsave(&bufferlock, flags);
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun WARN_ON(tc != sizeof(tailcount));
647*4882a593Smuzhiyun effective_count = min(tailcount,(int)count);
648*4882a593Smuzhiyun eerb->residual = tailcount - effective_count;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun tc = dasd_eer_read_buffer(eerb, readbuffer, effective_count);
652*4882a593Smuzhiyun WARN_ON(tc != effective_count);
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun spin_unlock_irqrestore(&bufferlock, flags);
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun if (copy_to_user(buf, readbuffer, effective_count)) {
657*4882a593Smuzhiyun mutex_unlock(&readbuffer_mutex);
658*4882a593Smuzhiyun return -EFAULT;
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun mutex_unlock(&readbuffer_mutex);
662*4882a593Smuzhiyun return effective_count;
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun
dasd_eer_poll(struct file * filp,poll_table * ptable)665*4882a593Smuzhiyun static __poll_t dasd_eer_poll(struct file *filp, poll_table *ptable)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun __poll_t mask;
668*4882a593Smuzhiyun unsigned long flags;
669*4882a593Smuzhiyun struct eerbuffer *eerb;
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun eerb = (struct eerbuffer *) filp->private_data;
672*4882a593Smuzhiyun poll_wait(filp, &dasd_eer_read_wait_queue, ptable);
673*4882a593Smuzhiyun spin_lock_irqsave(&bufferlock, flags);
674*4882a593Smuzhiyun if (eerb->head != eerb->tail)
675*4882a593Smuzhiyun mask = EPOLLIN | EPOLLRDNORM ;
676*4882a593Smuzhiyun else
677*4882a593Smuzhiyun mask = 0;
678*4882a593Smuzhiyun spin_unlock_irqrestore(&bufferlock, flags);
679*4882a593Smuzhiyun return mask;
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun static const struct file_operations dasd_eer_fops = {
683*4882a593Smuzhiyun .open = &dasd_eer_open,
684*4882a593Smuzhiyun .release = &dasd_eer_close,
685*4882a593Smuzhiyun .read = &dasd_eer_read,
686*4882a593Smuzhiyun .poll = &dasd_eer_poll,
687*4882a593Smuzhiyun .owner = THIS_MODULE,
688*4882a593Smuzhiyun .llseek = noop_llseek,
689*4882a593Smuzhiyun };
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun static struct miscdevice *dasd_eer_dev = NULL;
692*4882a593Smuzhiyun
dasd_eer_init(void)693*4882a593Smuzhiyun int __init dasd_eer_init(void)
694*4882a593Smuzhiyun {
695*4882a593Smuzhiyun int rc;
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL);
698*4882a593Smuzhiyun if (!dasd_eer_dev)
699*4882a593Smuzhiyun return -ENOMEM;
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun dasd_eer_dev->minor = MISC_DYNAMIC_MINOR;
702*4882a593Smuzhiyun dasd_eer_dev->name = "dasd_eer";
703*4882a593Smuzhiyun dasd_eer_dev->fops = &dasd_eer_fops;
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun rc = misc_register(dasd_eer_dev);
706*4882a593Smuzhiyun if (rc) {
707*4882a593Smuzhiyun kfree(dasd_eer_dev);
708*4882a593Smuzhiyun dasd_eer_dev = NULL;
709*4882a593Smuzhiyun DBF_EVENT(DBF_ERR, "%s", "dasd_eer_init could not "
710*4882a593Smuzhiyun "register misc device");
711*4882a593Smuzhiyun return rc;
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun return 0;
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
dasd_eer_exit(void)717*4882a593Smuzhiyun void dasd_eer_exit(void)
718*4882a593Smuzhiyun {
719*4882a593Smuzhiyun if (dasd_eer_dev) {
720*4882a593Smuzhiyun misc_deregister(dasd_eer_dev);
721*4882a593Smuzhiyun kfree(dasd_eer_dev);
722*4882a593Smuzhiyun dasd_eer_dev = NULL;
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun }
725