1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4*4882a593Smuzhiyun * Horst Hummel <Horst.Hummel@de.ibm.com>
5*4882a593Smuzhiyun * Carsten Otte <Cotte@de.ibm.com>
6*4882a593Smuzhiyun * Martin Schwidefsky <schwidefsky@de.ibm.com>
7*4882a593Smuzhiyun * Bugreports.to..: <Linux390@de.ibm.com>
8*4882a593Smuzhiyun * Copyright IBM Corp. 1999, 2009
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #define KMSG_COMPONENT "dasd"
12*4882a593Smuzhiyun #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <linux/kmod.h>
15*4882a593Smuzhiyun #include <linux/init.h>
16*4882a593Smuzhiyun #include <linux/interrupt.h>
17*4882a593Smuzhiyun #include <linux/ctype.h>
18*4882a593Smuzhiyun #include <linux/major.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <linux/hdreg.h>
21*4882a593Smuzhiyun #include <linux/async.h>
22*4882a593Smuzhiyun #include <linux/mutex.h>
23*4882a593Smuzhiyun #include <linux/debugfs.h>
24*4882a593Smuzhiyun #include <linux/seq_file.h>
25*4882a593Smuzhiyun #include <linux/vmalloc.h>
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #include <asm/ccwdev.h>
28*4882a593Smuzhiyun #include <asm/ebcdic.h>
29*4882a593Smuzhiyun #include <asm/idals.h>
30*4882a593Smuzhiyun #include <asm/itcw.h>
31*4882a593Smuzhiyun #include <asm/diag.h>
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun /* This is ugly... */
34*4882a593Smuzhiyun #define PRINTK_HEADER "dasd:"
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #include "dasd_int.h"
37*4882a593Smuzhiyun /*
38*4882a593Smuzhiyun * SECTION: Constant definitions to be used within this file
39*4882a593Smuzhiyun */
40*4882a593Smuzhiyun #define DASD_CHANQ_MAX_SIZE 4
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #define DASD_DIAG_MOD "dasd_diag_mod"
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun static unsigned int queue_depth = 32;
45*4882a593Smuzhiyun static unsigned int nr_hw_queues = 4;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun module_param(queue_depth, uint, 0444);
48*4882a593Smuzhiyun MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices");
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun module_param(nr_hw_queues, uint, 0444);
51*4882a593Smuzhiyun MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices");
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /*
54*4882a593Smuzhiyun * SECTION: exported variables of dasd.c
55*4882a593Smuzhiyun */
56*4882a593Smuzhiyun debug_info_t *dasd_debug_area;
57*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_debug_area);
58*4882a593Smuzhiyun static struct dentry *dasd_debugfs_root_entry;
59*4882a593Smuzhiyun struct dasd_discipline *dasd_diag_discipline_pointer;
60*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_diag_discipline_pointer);
61*4882a593Smuzhiyun void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
64*4882a593Smuzhiyun MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
65*4882a593Smuzhiyun " Copyright IBM Corp. 2000");
66*4882a593Smuzhiyun MODULE_SUPPORTED_DEVICE("dasd");
67*4882a593Smuzhiyun MODULE_LICENSE("GPL");
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun * SECTION: prototypes for static functions of dasd.c
71*4882a593Smuzhiyun */
72*4882a593Smuzhiyun static int dasd_alloc_queue(struct dasd_block *);
73*4882a593Smuzhiyun static void dasd_free_queue(struct dasd_block *);
74*4882a593Smuzhiyun static int dasd_flush_block_queue(struct dasd_block *);
75*4882a593Smuzhiyun static void dasd_device_tasklet(unsigned long);
76*4882a593Smuzhiyun static void dasd_block_tasklet(unsigned long);
77*4882a593Smuzhiyun static void do_kick_device(struct work_struct *);
78*4882a593Smuzhiyun static void do_restore_device(struct work_struct *);
79*4882a593Smuzhiyun static void do_reload_device(struct work_struct *);
80*4882a593Smuzhiyun static void do_requeue_requests(struct work_struct *);
81*4882a593Smuzhiyun static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
82*4882a593Smuzhiyun static void dasd_device_timeout(struct timer_list *);
83*4882a593Smuzhiyun static void dasd_block_timeout(struct timer_list *);
84*4882a593Smuzhiyun static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
85*4882a593Smuzhiyun static void dasd_profile_init(struct dasd_profile *, struct dentry *);
86*4882a593Smuzhiyun static void dasd_profile_exit(struct dasd_profile *);
87*4882a593Smuzhiyun static void dasd_hosts_init(struct dentry *, struct dasd_device *);
88*4882a593Smuzhiyun static void dasd_hosts_exit(struct dasd_device *);
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /*
91*4882a593Smuzhiyun * SECTION: Operations on the device structure.
92*4882a593Smuzhiyun */
93*4882a593Smuzhiyun static wait_queue_head_t dasd_init_waitq;
94*4882a593Smuzhiyun static wait_queue_head_t dasd_flush_wq;
95*4882a593Smuzhiyun static wait_queue_head_t generic_waitq;
96*4882a593Smuzhiyun static wait_queue_head_t shutdown_waitq;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /*
99*4882a593Smuzhiyun * Allocate memory for a new device structure.
100*4882a593Smuzhiyun */
dasd_alloc_device(void)101*4882a593Smuzhiyun struct dasd_device *dasd_alloc_device(void)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun struct dasd_device *device;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
106*4882a593Smuzhiyun if (!device)
107*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /* Get two pages for normal block device operations. */
110*4882a593Smuzhiyun device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
111*4882a593Smuzhiyun if (!device->ccw_mem) {
112*4882a593Smuzhiyun kfree(device);
113*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun /* Get one page for error recovery. */
116*4882a593Smuzhiyun device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
117*4882a593Smuzhiyun if (!device->erp_mem) {
118*4882a593Smuzhiyun free_pages((unsigned long) device->ccw_mem, 1);
119*4882a593Smuzhiyun kfree(device);
120*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun /* Get two pages for ese format. */
123*4882a593Smuzhiyun device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
124*4882a593Smuzhiyun if (!device->ese_mem) {
125*4882a593Smuzhiyun free_page((unsigned long) device->erp_mem);
126*4882a593Smuzhiyun free_pages((unsigned long) device->ccw_mem, 1);
127*4882a593Smuzhiyun kfree(device);
128*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
132*4882a593Smuzhiyun dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
133*4882a593Smuzhiyun dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2);
134*4882a593Smuzhiyun spin_lock_init(&device->mem_lock);
135*4882a593Smuzhiyun atomic_set(&device->tasklet_scheduled, 0);
136*4882a593Smuzhiyun tasklet_init(&device->tasklet, dasd_device_tasklet,
137*4882a593Smuzhiyun (unsigned long) device);
138*4882a593Smuzhiyun INIT_LIST_HEAD(&device->ccw_queue);
139*4882a593Smuzhiyun timer_setup(&device->timer, dasd_device_timeout, 0);
140*4882a593Smuzhiyun INIT_WORK(&device->kick_work, do_kick_device);
141*4882a593Smuzhiyun INIT_WORK(&device->restore_device, do_restore_device);
142*4882a593Smuzhiyun INIT_WORK(&device->reload_device, do_reload_device);
143*4882a593Smuzhiyun INIT_WORK(&device->requeue_requests, do_requeue_requests);
144*4882a593Smuzhiyun device->state = DASD_STATE_NEW;
145*4882a593Smuzhiyun device->target = DASD_STATE_NEW;
146*4882a593Smuzhiyun mutex_init(&device->state_mutex);
147*4882a593Smuzhiyun spin_lock_init(&device->profile.lock);
148*4882a593Smuzhiyun return device;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun * Free memory of a device structure.
153*4882a593Smuzhiyun */
dasd_free_device(struct dasd_device * device)154*4882a593Smuzhiyun void dasd_free_device(struct dasd_device *device)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun kfree(device->private);
157*4882a593Smuzhiyun free_pages((unsigned long) device->ese_mem, 1);
158*4882a593Smuzhiyun free_page((unsigned long) device->erp_mem);
159*4882a593Smuzhiyun free_pages((unsigned long) device->ccw_mem, 1);
160*4882a593Smuzhiyun kfree(device);
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun /*
164*4882a593Smuzhiyun * Allocate memory for a new device structure.
165*4882a593Smuzhiyun */
dasd_alloc_block(void)166*4882a593Smuzhiyun struct dasd_block *dasd_alloc_block(void)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun struct dasd_block *block;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun block = kzalloc(sizeof(*block), GFP_ATOMIC);
171*4882a593Smuzhiyun if (!block)
172*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
173*4882a593Smuzhiyun /* open_count = 0 means device online but not in use */
174*4882a593Smuzhiyun atomic_set(&block->open_count, -1);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun atomic_set(&block->tasklet_scheduled, 0);
177*4882a593Smuzhiyun tasklet_init(&block->tasklet, dasd_block_tasklet,
178*4882a593Smuzhiyun (unsigned long) block);
179*4882a593Smuzhiyun INIT_LIST_HEAD(&block->ccw_queue);
180*4882a593Smuzhiyun spin_lock_init(&block->queue_lock);
181*4882a593Smuzhiyun INIT_LIST_HEAD(&block->format_list);
182*4882a593Smuzhiyun spin_lock_init(&block->format_lock);
183*4882a593Smuzhiyun timer_setup(&block->timer, dasd_block_timeout, 0);
184*4882a593Smuzhiyun spin_lock_init(&block->profile.lock);
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun return block;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dasd_alloc_block);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /*
191*4882a593Smuzhiyun * Free memory of a device structure.
192*4882a593Smuzhiyun */
dasd_free_block(struct dasd_block * block)193*4882a593Smuzhiyun void dasd_free_block(struct dasd_block *block)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun kfree(block);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dasd_free_block);
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun /*
200*4882a593Smuzhiyun * Make a new device known to the system.
201*4882a593Smuzhiyun */
dasd_state_new_to_known(struct dasd_device * device)202*4882a593Smuzhiyun static int dasd_state_new_to_known(struct dasd_device *device)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun int rc;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun /*
207*4882a593Smuzhiyun * As long as the device is not in state DASD_STATE_NEW we want to
208*4882a593Smuzhiyun * keep the reference count > 0.
209*4882a593Smuzhiyun */
210*4882a593Smuzhiyun dasd_get_device(device);
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun if (device->block) {
213*4882a593Smuzhiyun rc = dasd_alloc_queue(device->block);
214*4882a593Smuzhiyun if (rc) {
215*4882a593Smuzhiyun dasd_put_device(device);
216*4882a593Smuzhiyun return rc;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun device->state = DASD_STATE_KNOWN;
220*4882a593Smuzhiyun return 0;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun /*
224*4882a593Smuzhiyun * Let the system forget about a device.
225*4882a593Smuzhiyun */
dasd_state_known_to_new(struct dasd_device * device)226*4882a593Smuzhiyun static int dasd_state_known_to_new(struct dasd_device *device)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun /* Disable extended error reporting for this device. */
229*4882a593Smuzhiyun dasd_eer_disable(device);
230*4882a593Smuzhiyun device->state = DASD_STATE_NEW;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun if (device->block)
233*4882a593Smuzhiyun dasd_free_queue(device->block);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /* Give up reference we took in dasd_state_new_to_known. */
236*4882a593Smuzhiyun dasd_put_device(device);
237*4882a593Smuzhiyun return 0;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
dasd_debugfs_setup(const char * name,struct dentry * base_dentry)240*4882a593Smuzhiyun static struct dentry *dasd_debugfs_setup(const char *name,
241*4882a593Smuzhiyun struct dentry *base_dentry)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun struct dentry *pde;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun if (!base_dentry)
246*4882a593Smuzhiyun return NULL;
247*4882a593Smuzhiyun pde = debugfs_create_dir(name, base_dentry);
248*4882a593Smuzhiyun if (!pde || IS_ERR(pde))
249*4882a593Smuzhiyun return NULL;
250*4882a593Smuzhiyun return pde;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /*
254*4882a593Smuzhiyun * Request the irq line for the device.
255*4882a593Smuzhiyun */
dasd_state_known_to_basic(struct dasd_device * device)256*4882a593Smuzhiyun static int dasd_state_known_to_basic(struct dasd_device *device)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun struct dasd_block *block = device->block;
259*4882a593Smuzhiyun int rc = 0;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /* Allocate and register gendisk structure. */
262*4882a593Smuzhiyun if (block) {
263*4882a593Smuzhiyun rc = dasd_gendisk_alloc(block);
264*4882a593Smuzhiyun if (rc)
265*4882a593Smuzhiyun return rc;
266*4882a593Smuzhiyun block->debugfs_dentry =
267*4882a593Smuzhiyun dasd_debugfs_setup(block->gdp->disk_name,
268*4882a593Smuzhiyun dasd_debugfs_root_entry);
269*4882a593Smuzhiyun dasd_profile_init(&block->profile, block->debugfs_dentry);
270*4882a593Smuzhiyun if (dasd_global_profile_level == DASD_PROFILE_ON)
271*4882a593Smuzhiyun dasd_profile_on(&device->block->profile);
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun device->debugfs_dentry =
274*4882a593Smuzhiyun dasd_debugfs_setup(dev_name(&device->cdev->dev),
275*4882a593Smuzhiyun dasd_debugfs_root_entry);
276*4882a593Smuzhiyun dasd_profile_init(&device->profile, device->debugfs_dentry);
277*4882a593Smuzhiyun dasd_hosts_init(device->debugfs_dentry, device);
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /* register 'device' debug area, used for all DBF_DEV_XXX calls */
280*4882a593Smuzhiyun device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
281*4882a593Smuzhiyun 8 * sizeof(long));
282*4882a593Smuzhiyun debug_register_view(device->debug_area, &debug_sprintf_view);
283*4882a593Smuzhiyun debug_set_level(device->debug_area, DBF_WARNING);
284*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun device->state = DASD_STATE_BASIC;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun return rc;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun /*
292*4882a593Smuzhiyun * Release the irq line for the device. Terminate any running i/o.
293*4882a593Smuzhiyun */
dasd_state_basic_to_known(struct dasd_device * device)294*4882a593Smuzhiyun static int dasd_state_basic_to_known(struct dasd_device *device)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun int rc;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun if (device->discipline->basic_to_known) {
299*4882a593Smuzhiyun rc = device->discipline->basic_to_known(device);
300*4882a593Smuzhiyun if (rc)
301*4882a593Smuzhiyun return rc;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun if (device->block) {
305*4882a593Smuzhiyun dasd_profile_exit(&device->block->profile);
306*4882a593Smuzhiyun debugfs_remove(device->block->debugfs_dentry);
307*4882a593Smuzhiyun dasd_gendisk_free(device->block);
308*4882a593Smuzhiyun dasd_block_clear_timer(device->block);
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun rc = dasd_flush_device_queue(device);
311*4882a593Smuzhiyun if (rc)
312*4882a593Smuzhiyun return rc;
313*4882a593Smuzhiyun dasd_device_clear_timer(device);
314*4882a593Smuzhiyun dasd_profile_exit(&device->profile);
315*4882a593Smuzhiyun dasd_hosts_exit(device);
316*4882a593Smuzhiyun debugfs_remove(device->debugfs_dentry);
317*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
318*4882a593Smuzhiyun if (device->debug_area != NULL) {
319*4882a593Smuzhiyun debug_unregister(device->debug_area);
320*4882a593Smuzhiyun device->debug_area = NULL;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun device->state = DASD_STATE_KNOWN;
323*4882a593Smuzhiyun return 0;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun /*
327*4882a593Smuzhiyun * Do the initial analysis. The do_analysis function may return
328*4882a593Smuzhiyun * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
329*4882a593Smuzhiyun * until the discipline decides to continue the startup sequence
330*4882a593Smuzhiyun * by calling the function dasd_change_state. The eckd disciplines
331*4882a593Smuzhiyun * uses this to start a ccw that detects the format. The completion
332*4882a593Smuzhiyun * interrupt for this detection ccw uses the kernel event daemon to
333*4882a593Smuzhiyun * trigger the call to dasd_change_state. All this is done in the
334*4882a593Smuzhiyun * discipline code, see dasd_eckd.c.
335*4882a593Smuzhiyun * After the analysis ccw is done (do_analysis returned 0) the block
336*4882a593Smuzhiyun * device is setup.
337*4882a593Smuzhiyun * In case the analysis returns an error, the device setup is stopped
338*4882a593Smuzhiyun * (a fake disk was already added to allow formatting).
339*4882a593Smuzhiyun */
dasd_state_basic_to_ready(struct dasd_device * device)340*4882a593Smuzhiyun static int dasd_state_basic_to_ready(struct dasd_device *device)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun int rc;
343*4882a593Smuzhiyun struct dasd_block *block;
344*4882a593Smuzhiyun struct gendisk *disk;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun rc = 0;
347*4882a593Smuzhiyun block = device->block;
348*4882a593Smuzhiyun /* make disk known with correct capacity */
349*4882a593Smuzhiyun if (block) {
350*4882a593Smuzhiyun if (block->base->discipline->do_analysis != NULL)
351*4882a593Smuzhiyun rc = block->base->discipline->do_analysis(block);
352*4882a593Smuzhiyun if (rc) {
353*4882a593Smuzhiyun if (rc != -EAGAIN) {
354*4882a593Smuzhiyun device->state = DASD_STATE_UNFMT;
355*4882a593Smuzhiyun disk = device->block->gdp;
356*4882a593Smuzhiyun kobject_uevent(&disk_to_dev(disk)->kobj,
357*4882a593Smuzhiyun KOBJ_CHANGE);
358*4882a593Smuzhiyun goto out;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun return rc;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun if (device->discipline->setup_blk_queue)
363*4882a593Smuzhiyun device->discipline->setup_blk_queue(block);
364*4882a593Smuzhiyun set_capacity(block->gdp,
365*4882a593Smuzhiyun block->blocks << block->s2b_shift);
366*4882a593Smuzhiyun device->state = DASD_STATE_READY;
367*4882a593Smuzhiyun rc = dasd_scan_partitions(block);
368*4882a593Smuzhiyun if (rc) {
369*4882a593Smuzhiyun device->state = DASD_STATE_BASIC;
370*4882a593Smuzhiyun return rc;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun } else {
373*4882a593Smuzhiyun device->state = DASD_STATE_READY;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun out:
376*4882a593Smuzhiyun if (device->discipline->basic_to_ready)
377*4882a593Smuzhiyun rc = device->discipline->basic_to_ready(device);
378*4882a593Smuzhiyun return rc;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun static inline
_wait_for_empty_queues(struct dasd_device * device)382*4882a593Smuzhiyun int _wait_for_empty_queues(struct dasd_device *device)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun if (device->block)
385*4882a593Smuzhiyun return list_empty(&device->ccw_queue) &&
386*4882a593Smuzhiyun list_empty(&device->block->ccw_queue);
387*4882a593Smuzhiyun else
388*4882a593Smuzhiyun return list_empty(&device->ccw_queue);
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun /*
392*4882a593Smuzhiyun * Remove device from block device layer. Destroy dirty buffers.
393*4882a593Smuzhiyun * Forget format information. Check if the target level is basic
394*4882a593Smuzhiyun * and if it is create fake disk for formatting.
395*4882a593Smuzhiyun */
dasd_state_ready_to_basic(struct dasd_device * device)396*4882a593Smuzhiyun static int dasd_state_ready_to_basic(struct dasd_device *device)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun int rc;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun device->state = DASD_STATE_BASIC;
401*4882a593Smuzhiyun if (device->block) {
402*4882a593Smuzhiyun struct dasd_block *block = device->block;
403*4882a593Smuzhiyun rc = dasd_flush_block_queue(block);
404*4882a593Smuzhiyun if (rc) {
405*4882a593Smuzhiyun device->state = DASD_STATE_READY;
406*4882a593Smuzhiyun return rc;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun dasd_destroy_partitions(block);
409*4882a593Smuzhiyun block->blocks = 0;
410*4882a593Smuzhiyun block->bp_block = 0;
411*4882a593Smuzhiyun block->s2b_shift = 0;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun return 0;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /*
417*4882a593Smuzhiyun * Back to basic.
418*4882a593Smuzhiyun */
dasd_state_unfmt_to_basic(struct dasd_device * device)419*4882a593Smuzhiyun static int dasd_state_unfmt_to_basic(struct dasd_device *device)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun device->state = DASD_STATE_BASIC;
422*4882a593Smuzhiyun return 0;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun /*
426*4882a593Smuzhiyun * Make the device online and schedule the bottom half to start
427*4882a593Smuzhiyun * the requeueing of requests from the linux request queue to the
428*4882a593Smuzhiyun * ccw queue.
429*4882a593Smuzhiyun */
430*4882a593Smuzhiyun static int
dasd_state_ready_to_online(struct dasd_device * device)431*4882a593Smuzhiyun dasd_state_ready_to_online(struct dasd_device * device)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun struct gendisk *disk;
434*4882a593Smuzhiyun struct disk_part_iter piter;
435*4882a593Smuzhiyun struct hd_struct *part;
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun device->state = DASD_STATE_ONLINE;
438*4882a593Smuzhiyun if (device->block) {
439*4882a593Smuzhiyun dasd_schedule_block_bh(device->block);
440*4882a593Smuzhiyun if ((device->features & DASD_FEATURE_USERAW)) {
441*4882a593Smuzhiyun disk = device->block->gdp;
442*4882a593Smuzhiyun kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
443*4882a593Smuzhiyun return 0;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun disk = device->block->bdev->bd_disk;
446*4882a593Smuzhiyun disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
447*4882a593Smuzhiyun while ((part = disk_part_iter_next(&piter)))
448*4882a593Smuzhiyun kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
449*4882a593Smuzhiyun disk_part_iter_exit(&piter);
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun return 0;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun /*
455*4882a593Smuzhiyun * Stop the requeueing of requests again.
456*4882a593Smuzhiyun */
dasd_state_online_to_ready(struct dasd_device * device)457*4882a593Smuzhiyun static int dasd_state_online_to_ready(struct dasd_device *device)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun int rc;
460*4882a593Smuzhiyun struct gendisk *disk;
461*4882a593Smuzhiyun struct disk_part_iter piter;
462*4882a593Smuzhiyun struct hd_struct *part;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun if (device->discipline->online_to_ready) {
465*4882a593Smuzhiyun rc = device->discipline->online_to_ready(device);
466*4882a593Smuzhiyun if (rc)
467*4882a593Smuzhiyun return rc;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun device->state = DASD_STATE_READY;
471*4882a593Smuzhiyun if (device->block && !(device->features & DASD_FEATURE_USERAW)) {
472*4882a593Smuzhiyun disk = device->block->bdev->bd_disk;
473*4882a593Smuzhiyun disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
474*4882a593Smuzhiyun while ((part = disk_part_iter_next(&piter)))
475*4882a593Smuzhiyun kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
476*4882a593Smuzhiyun disk_part_iter_exit(&piter);
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun return 0;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun /*
482*4882a593Smuzhiyun * Device startup state changes.
483*4882a593Smuzhiyun */
dasd_increase_state(struct dasd_device * device)484*4882a593Smuzhiyun static int dasd_increase_state(struct dasd_device *device)
485*4882a593Smuzhiyun {
486*4882a593Smuzhiyun int rc;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun rc = 0;
489*4882a593Smuzhiyun if (device->state == DASD_STATE_NEW &&
490*4882a593Smuzhiyun device->target >= DASD_STATE_KNOWN)
491*4882a593Smuzhiyun rc = dasd_state_new_to_known(device);
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun if (!rc &&
494*4882a593Smuzhiyun device->state == DASD_STATE_KNOWN &&
495*4882a593Smuzhiyun device->target >= DASD_STATE_BASIC)
496*4882a593Smuzhiyun rc = dasd_state_known_to_basic(device);
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun if (!rc &&
499*4882a593Smuzhiyun device->state == DASD_STATE_BASIC &&
500*4882a593Smuzhiyun device->target >= DASD_STATE_READY)
501*4882a593Smuzhiyun rc = dasd_state_basic_to_ready(device);
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun if (!rc &&
504*4882a593Smuzhiyun device->state == DASD_STATE_UNFMT &&
505*4882a593Smuzhiyun device->target > DASD_STATE_UNFMT)
506*4882a593Smuzhiyun rc = -EPERM;
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun if (!rc &&
509*4882a593Smuzhiyun device->state == DASD_STATE_READY &&
510*4882a593Smuzhiyun device->target >= DASD_STATE_ONLINE)
511*4882a593Smuzhiyun rc = dasd_state_ready_to_online(device);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun return rc;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun /*
517*4882a593Smuzhiyun * Device shutdown state changes.
518*4882a593Smuzhiyun */
dasd_decrease_state(struct dasd_device * device)519*4882a593Smuzhiyun static int dasd_decrease_state(struct dasd_device *device)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun int rc;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun rc = 0;
524*4882a593Smuzhiyun if (device->state == DASD_STATE_ONLINE &&
525*4882a593Smuzhiyun device->target <= DASD_STATE_READY)
526*4882a593Smuzhiyun rc = dasd_state_online_to_ready(device);
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun if (!rc &&
529*4882a593Smuzhiyun device->state == DASD_STATE_READY &&
530*4882a593Smuzhiyun device->target <= DASD_STATE_BASIC)
531*4882a593Smuzhiyun rc = dasd_state_ready_to_basic(device);
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun if (!rc &&
534*4882a593Smuzhiyun device->state == DASD_STATE_UNFMT &&
535*4882a593Smuzhiyun device->target <= DASD_STATE_BASIC)
536*4882a593Smuzhiyun rc = dasd_state_unfmt_to_basic(device);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun if (!rc &&
539*4882a593Smuzhiyun device->state == DASD_STATE_BASIC &&
540*4882a593Smuzhiyun device->target <= DASD_STATE_KNOWN)
541*4882a593Smuzhiyun rc = dasd_state_basic_to_known(device);
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun if (!rc &&
544*4882a593Smuzhiyun device->state == DASD_STATE_KNOWN &&
545*4882a593Smuzhiyun device->target <= DASD_STATE_NEW)
546*4882a593Smuzhiyun rc = dasd_state_known_to_new(device);
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun return rc;
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun /*
552*4882a593Smuzhiyun * This is the main startup/shutdown routine.
553*4882a593Smuzhiyun */
dasd_change_state(struct dasd_device * device)554*4882a593Smuzhiyun static void dasd_change_state(struct dasd_device *device)
555*4882a593Smuzhiyun {
556*4882a593Smuzhiyun int rc;
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun if (device->state == device->target)
559*4882a593Smuzhiyun /* Already where we want to go today... */
560*4882a593Smuzhiyun return;
561*4882a593Smuzhiyun if (device->state < device->target)
562*4882a593Smuzhiyun rc = dasd_increase_state(device);
563*4882a593Smuzhiyun else
564*4882a593Smuzhiyun rc = dasd_decrease_state(device);
565*4882a593Smuzhiyun if (rc == -EAGAIN)
566*4882a593Smuzhiyun return;
567*4882a593Smuzhiyun if (rc)
568*4882a593Smuzhiyun device->target = device->state;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun /* let user-space know that the device status changed */
571*4882a593Smuzhiyun kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun if (device->state == device->target)
574*4882a593Smuzhiyun wake_up(&dasd_init_waitq);
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun /*
578*4882a593Smuzhiyun * Kick starter for devices that did not complete the startup/shutdown
579*4882a593Smuzhiyun * procedure or were sleeping because of a pending state.
580*4882a593Smuzhiyun * dasd_kick_device will schedule a call do do_kick_device to the kernel
581*4882a593Smuzhiyun * event daemon.
582*4882a593Smuzhiyun */
do_kick_device(struct work_struct * work)583*4882a593Smuzhiyun static void do_kick_device(struct work_struct *work)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
586*4882a593Smuzhiyun mutex_lock(&device->state_mutex);
587*4882a593Smuzhiyun dasd_change_state(device);
588*4882a593Smuzhiyun mutex_unlock(&device->state_mutex);
589*4882a593Smuzhiyun dasd_schedule_device_bh(device);
590*4882a593Smuzhiyun dasd_put_device(device);
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun
dasd_kick_device(struct dasd_device * device)593*4882a593Smuzhiyun void dasd_kick_device(struct dasd_device *device)
594*4882a593Smuzhiyun {
595*4882a593Smuzhiyun dasd_get_device(device);
596*4882a593Smuzhiyun /* queue call to dasd_kick_device to the kernel event daemon. */
597*4882a593Smuzhiyun if (!schedule_work(&device->kick_work))
598*4882a593Smuzhiyun dasd_put_device(device);
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_kick_device);
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun /*
603*4882a593Smuzhiyun * dasd_reload_device will schedule a call do do_reload_device to the kernel
604*4882a593Smuzhiyun * event daemon.
605*4882a593Smuzhiyun */
do_reload_device(struct work_struct * work)606*4882a593Smuzhiyun static void do_reload_device(struct work_struct *work)
607*4882a593Smuzhiyun {
608*4882a593Smuzhiyun struct dasd_device *device = container_of(work, struct dasd_device,
609*4882a593Smuzhiyun reload_device);
610*4882a593Smuzhiyun device->discipline->reload(device);
611*4882a593Smuzhiyun dasd_put_device(device);
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun
dasd_reload_device(struct dasd_device * device)614*4882a593Smuzhiyun void dasd_reload_device(struct dasd_device *device)
615*4882a593Smuzhiyun {
616*4882a593Smuzhiyun dasd_get_device(device);
617*4882a593Smuzhiyun /* queue call to dasd_reload_device to the kernel event daemon. */
618*4882a593Smuzhiyun if (!schedule_work(&device->reload_device))
619*4882a593Smuzhiyun dasd_put_device(device);
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_reload_device);
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun /*
624*4882a593Smuzhiyun * dasd_restore_device will schedule a call do do_restore_device to the kernel
625*4882a593Smuzhiyun * event daemon.
626*4882a593Smuzhiyun */
do_restore_device(struct work_struct * work)627*4882a593Smuzhiyun static void do_restore_device(struct work_struct *work)
628*4882a593Smuzhiyun {
629*4882a593Smuzhiyun struct dasd_device *device = container_of(work, struct dasd_device,
630*4882a593Smuzhiyun restore_device);
631*4882a593Smuzhiyun device->cdev->drv->restore(device->cdev);
632*4882a593Smuzhiyun dasd_put_device(device);
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun
dasd_restore_device(struct dasd_device * device)635*4882a593Smuzhiyun void dasd_restore_device(struct dasd_device *device)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun dasd_get_device(device);
638*4882a593Smuzhiyun /* queue call to dasd_restore_device to the kernel event daemon. */
639*4882a593Smuzhiyun if (!schedule_work(&device->restore_device))
640*4882a593Smuzhiyun dasd_put_device(device);
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun /*
644*4882a593Smuzhiyun * Set the target state for a device and starts the state change.
645*4882a593Smuzhiyun */
dasd_set_target_state(struct dasd_device * device,int target)646*4882a593Smuzhiyun void dasd_set_target_state(struct dasd_device *device, int target)
647*4882a593Smuzhiyun {
648*4882a593Smuzhiyun dasd_get_device(device);
649*4882a593Smuzhiyun mutex_lock(&device->state_mutex);
650*4882a593Smuzhiyun /* If we are in probeonly mode stop at DASD_STATE_READY. */
651*4882a593Smuzhiyun if (dasd_probeonly && target > DASD_STATE_READY)
652*4882a593Smuzhiyun target = DASD_STATE_READY;
653*4882a593Smuzhiyun if (device->target != target) {
654*4882a593Smuzhiyun if (device->state == target)
655*4882a593Smuzhiyun wake_up(&dasd_init_waitq);
656*4882a593Smuzhiyun device->target = target;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun if (device->state != device->target)
659*4882a593Smuzhiyun dasd_change_state(device);
660*4882a593Smuzhiyun mutex_unlock(&device->state_mutex);
661*4882a593Smuzhiyun dasd_put_device(device);
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_set_target_state);
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun /*
666*4882a593Smuzhiyun * Enable devices with device numbers in [from..to].
667*4882a593Smuzhiyun */
_wait_for_device(struct dasd_device * device)668*4882a593Smuzhiyun static inline int _wait_for_device(struct dasd_device *device)
669*4882a593Smuzhiyun {
670*4882a593Smuzhiyun return (device->state == device->target);
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun
dasd_enable_device(struct dasd_device * device)673*4882a593Smuzhiyun void dasd_enable_device(struct dasd_device *device)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun dasd_set_target_state(device, DASD_STATE_ONLINE);
676*4882a593Smuzhiyun if (device->state <= DASD_STATE_KNOWN)
677*4882a593Smuzhiyun /* No discipline for device found. */
678*4882a593Smuzhiyun dasd_set_target_state(device, DASD_STATE_NEW);
679*4882a593Smuzhiyun /* Now wait for the devices to come up. */
680*4882a593Smuzhiyun wait_event(dasd_init_waitq, _wait_for_device(device));
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun dasd_reload_device(device);
683*4882a593Smuzhiyun if (device->discipline->kick_validate)
684*4882a593Smuzhiyun device->discipline->kick_validate(device);
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_enable_device);
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun /*
689*4882a593Smuzhiyun * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
690*4882a593Smuzhiyun */
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun unsigned int dasd_global_profile_level = DASD_PROFILE_OFF;
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun #ifdef CONFIG_DASD_PROFILE
695*4882a593Smuzhiyun struct dasd_profile dasd_global_profile = {
696*4882a593Smuzhiyun .lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock),
697*4882a593Smuzhiyun };
698*4882a593Smuzhiyun static struct dentry *dasd_debugfs_global_entry;
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun /*
701*4882a593Smuzhiyun * Add profiling information for cqr before execution.
702*4882a593Smuzhiyun */
dasd_profile_start(struct dasd_block * block,struct dasd_ccw_req * cqr,struct request * req)703*4882a593Smuzhiyun static void dasd_profile_start(struct dasd_block *block,
704*4882a593Smuzhiyun struct dasd_ccw_req *cqr,
705*4882a593Smuzhiyun struct request *req)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun struct list_head *l;
708*4882a593Smuzhiyun unsigned int counter;
709*4882a593Smuzhiyun struct dasd_device *device;
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun /* count the length of the chanq for statistics */
712*4882a593Smuzhiyun counter = 0;
713*4882a593Smuzhiyun if (dasd_global_profile_level || block->profile.data)
714*4882a593Smuzhiyun list_for_each(l, &block->ccw_queue)
715*4882a593Smuzhiyun if (++counter >= 31)
716*4882a593Smuzhiyun break;
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun spin_lock(&dasd_global_profile.lock);
719*4882a593Smuzhiyun if (dasd_global_profile.data) {
720*4882a593Smuzhiyun dasd_global_profile.data->dasd_io_nr_req[counter]++;
721*4882a593Smuzhiyun if (rq_data_dir(req) == READ)
722*4882a593Smuzhiyun dasd_global_profile.data->dasd_read_nr_req[counter]++;
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun spin_unlock(&dasd_global_profile.lock);
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun spin_lock(&block->profile.lock);
727*4882a593Smuzhiyun if (block->profile.data) {
728*4882a593Smuzhiyun block->profile.data->dasd_io_nr_req[counter]++;
729*4882a593Smuzhiyun if (rq_data_dir(req) == READ)
730*4882a593Smuzhiyun block->profile.data->dasd_read_nr_req[counter]++;
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun spin_unlock(&block->profile.lock);
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun /*
735*4882a593Smuzhiyun * We count the request for the start device, even though it may run on
736*4882a593Smuzhiyun * some other device due to error recovery. This way we make sure that
737*4882a593Smuzhiyun * we count each request only once.
738*4882a593Smuzhiyun */
739*4882a593Smuzhiyun device = cqr->startdev;
740*4882a593Smuzhiyun if (device->profile.data) {
741*4882a593Smuzhiyun counter = 1; /* request is not yet queued on the start device */
742*4882a593Smuzhiyun list_for_each(l, &device->ccw_queue)
743*4882a593Smuzhiyun if (++counter >= 31)
744*4882a593Smuzhiyun break;
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun spin_lock(&device->profile.lock);
747*4882a593Smuzhiyun if (device->profile.data) {
748*4882a593Smuzhiyun device->profile.data->dasd_io_nr_req[counter]++;
749*4882a593Smuzhiyun if (rq_data_dir(req) == READ)
750*4882a593Smuzhiyun device->profile.data->dasd_read_nr_req[counter]++;
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun spin_unlock(&device->profile.lock);
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun /*
756*4882a593Smuzhiyun * Add profiling information for cqr after execution.
757*4882a593Smuzhiyun */
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun #define dasd_profile_counter(value, index) \
760*4882a593Smuzhiyun { \
761*4882a593Smuzhiyun for (index = 0; index < 31 && value >> (2+index); index++) \
762*4882a593Smuzhiyun ; \
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun
dasd_profile_end_add_data(struct dasd_profile_info * data,int is_alias,int is_tpm,int is_read,long sectors,int sectors_ind,int tottime_ind,int tottimeps_ind,int strtime_ind,int irqtime_ind,int irqtimeps_ind,int endtime_ind)765*4882a593Smuzhiyun static void dasd_profile_end_add_data(struct dasd_profile_info *data,
766*4882a593Smuzhiyun int is_alias,
767*4882a593Smuzhiyun int is_tpm,
768*4882a593Smuzhiyun int is_read,
769*4882a593Smuzhiyun long sectors,
770*4882a593Smuzhiyun int sectors_ind,
771*4882a593Smuzhiyun int tottime_ind,
772*4882a593Smuzhiyun int tottimeps_ind,
773*4882a593Smuzhiyun int strtime_ind,
774*4882a593Smuzhiyun int irqtime_ind,
775*4882a593Smuzhiyun int irqtimeps_ind,
776*4882a593Smuzhiyun int endtime_ind)
777*4882a593Smuzhiyun {
778*4882a593Smuzhiyun /* in case of an overflow, reset the whole profile */
779*4882a593Smuzhiyun if (data->dasd_io_reqs == UINT_MAX) {
780*4882a593Smuzhiyun memset(data, 0, sizeof(*data));
781*4882a593Smuzhiyun ktime_get_real_ts64(&data->starttod);
782*4882a593Smuzhiyun }
783*4882a593Smuzhiyun data->dasd_io_reqs++;
784*4882a593Smuzhiyun data->dasd_io_sects += sectors;
785*4882a593Smuzhiyun if (is_alias)
786*4882a593Smuzhiyun data->dasd_io_alias++;
787*4882a593Smuzhiyun if (is_tpm)
788*4882a593Smuzhiyun data->dasd_io_tpm++;
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun data->dasd_io_secs[sectors_ind]++;
791*4882a593Smuzhiyun data->dasd_io_times[tottime_ind]++;
792*4882a593Smuzhiyun data->dasd_io_timps[tottimeps_ind]++;
793*4882a593Smuzhiyun data->dasd_io_time1[strtime_ind]++;
794*4882a593Smuzhiyun data->dasd_io_time2[irqtime_ind]++;
795*4882a593Smuzhiyun data->dasd_io_time2ps[irqtimeps_ind]++;
796*4882a593Smuzhiyun data->dasd_io_time3[endtime_ind]++;
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun if (is_read) {
799*4882a593Smuzhiyun data->dasd_read_reqs++;
800*4882a593Smuzhiyun data->dasd_read_sects += sectors;
801*4882a593Smuzhiyun if (is_alias)
802*4882a593Smuzhiyun data->dasd_read_alias++;
803*4882a593Smuzhiyun if (is_tpm)
804*4882a593Smuzhiyun data->dasd_read_tpm++;
805*4882a593Smuzhiyun data->dasd_read_secs[sectors_ind]++;
806*4882a593Smuzhiyun data->dasd_read_times[tottime_ind]++;
807*4882a593Smuzhiyun data->dasd_read_time1[strtime_ind]++;
808*4882a593Smuzhiyun data->dasd_read_time2[irqtime_ind]++;
809*4882a593Smuzhiyun data->dasd_read_time3[endtime_ind]++;
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun
dasd_profile_end(struct dasd_block * block,struct dasd_ccw_req * cqr,struct request * req)813*4882a593Smuzhiyun static void dasd_profile_end(struct dasd_block *block,
814*4882a593Smuzhiyun struct dasd_ccw_req *cqr,
815*4882a593Smuzhiyun struct request *req)
816*4882a593Smuzhiyun {
817*4882a593Smuzhiyun unsigned long strtime, irqtime, endtime, tottime;
818*4882a593Smuzhiyun unsigned long tottimeps, sectors;
819*4882a593Smuzhiyun struct dasd_device *device;
820*4882a593Smuzhiyun int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind;
821*4882a593Smuzhiyun int irqtime_ind, irqtimeps_ind, endtime_ind;
822*4882a593Smuzhiyun struct dasd_profile_info *data;
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun device = cqr->startdev;
825*4882a593Smuzhiyun if (!(dasd_global_profile_level ||
826*4882a593Smuzhiyun block->profile.data ||
827*4882a593Smuzhiyun device->profile.data))
828*4882a593Smuzhiyun return;
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun sectors = blk_rq_sectors(req);
831*4882a593Smuzhiyun if (!cqr->buildclk || !cqr->startclk ||
832*4882a593Smuzhiyun !cqr->stopclk || !cqr->endclk ||
833*4882a593Smuzhiyun !sectors)
834*4882a593Smuzhiyun return;
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun strtime = ((cqr->startclk - cqr->buildclk) >> 12);
837*4882a593Smuzhiyun irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
838*4882a593Smuzhiyun endtime = ((cqr->endclk - cqr->stopclk) >> 12);
839*4882a593Smuzhiyun tottime = ((cqr->endclk - cqr->buildclk) >> 12);
840*4882a593Smuzhiyun tottimeps = tottime / sectors;
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun dasd_profile_counter(sectors, sectors_ind);
843*4882a593Smuzhiyun dasd_profile_counter(tottime, tottime_ind);
844*4882a593Smuzhiyun dasd_profile_counter(tottimeps, tottimeps_ind);
845*4882a593Smuzhiyun dasd_profile_counter(strtime, strtime_ind);
846*4882a593Smuzhiyun dasd_profile_counter(irqtime, irqtime_ind);
847*4882a593Smuzhiyun dasd_profile_counter(irqtime / sectors, irqtimeps_ind);
848*4882a593Smuzhiyun dasd_profile_counter(endtime, endtime_ind);
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun spin_lock(&dasd_global_profile.lock);
851*4882a593Smuzhiyun if (dasd_global_profile.data) {
852*4882a593Smuzhiyun data = dasd_global_profile.data;
853*4882a593Smuzhiyun data->dasd_sum_times += tottime;
854*4882a593Smuzhiyun data->dasd_sum_time_str += strtime;
855*4882a593Smuzhiyun data->dasd_sum_time_irq += irqtime;
856*4882a593Smuzhiyun data->dasd_sum_time_end += endtime;
857*4882a593Smuzhiyun dasd_profile_end_add_data(dasd_global_profile.data,
858*4882a593Smuzhiyun cqr->startdev != block->base,
859*4882a593Smuzhiyun cqr->cpmode == 1,
860*4882a593Smuzhiyun rq_data_dir(req) == READ,
861*4882a593Smuzhiyun sectors, sectors_ind, tottime_ind,
862*4882a593Smuzhiyun tottimeps_ind, strtime_ind,
863*4882a593Smuzhiyun irqtime_ind, irqtimeps_ind,
864*4882a593Smuzhiyun endtime_ind);
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun spin_unlock(&dasd_global_profile.lock);
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun spin_lock(&block->profile.lock);
869*4882a593Smuzhiyun if (block->profile.data) {
870*4882a593Smuzhiyun data = block->profile.data;
871*4882a593Smuzhiyun data->dasd_sum_times += tottime;
872*4882a593Smuzhiyun data->dasd_sum_time_str += strtime;
873*4882a593Smuzhiyun data->dasd_sum_time_irq += irqtime;
874*4882a593Smuzhiyun data->dasd_sum_time_end += endtime;
875*4882a593Smuzhiyun dasd_profile_end_add_data(block->profile.data,
876*4882a593Smuzhiyun cqr->startdev != block->base,
877*4882a593Smuzhiyun cqr->cpmode == 1,
878*4882a593Smuzhiyun rq_data_dir(req) == READ,
879*4882a593Smuzhiyun sectors, sectors_ind, tottime_ind,
880*4882a593Smuzhiyun tottimeps_ind, strtime_ind,
881*4882a593Smuzhiyun irqtime_ind, irqtimeps_ind,
882*4882a593Smuzhiyun endtime_ind);
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun spin_unlock(&block->profile.lock);
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun spin_lock(&device->profile.lock);
887*4882a593Smuzhiyun if (device->profile.data) {
888*4882a593Smuzhiyun data = device->profile.data;
889*4882a593Smuzhiyun data->dasd_sum_times += tottime;
890*4882a593Smuzhiyun data->dasd_sum_time_str += strtime;
891*4882a593Smuzhiyun data->dasd_sum_time_irq += irqtime;
892*4882a593Smuzhiyun data->dasd_sum_time_end += endtime;
893*4882a593Smuzhiyun dasd_profile_end_add_data(device->profile.data,
894*4882a593Smuzhiyun cqr->startdev != block->base,
895*4882a593Smuzhiyun cqr->cpmode == 1,
896*4882a593Smuzhiyun rq_data_dir(req) == READ,
897*4882a593Smuzhiyun sectors, sectors_ind, tottime_ind,
898*4882a593Smuzhiyun tottimeps_ind, strtime_ind,
899*4882a593Smuzhiyun irqtime_ind, irqtimeps_ind,
900*4882a593Smuzhiyun endtime_ind);
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun spin_unlock(&device->profile.lock);
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun
dasd_profile_reset(struct dasd_profile * profile)905*4882a593Smuzhiyun void dasd_profile_reset(struct dasd_profile *profile)
906*4882a593Smuzhiyun {
907*4882a593Smuzhiyun struct dasd_profile_info *data;
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun spin_lock_bh(&profile->lock);
910*4882a593Smuzhiyun data = profile->data;
911*4882a593Smuzhiyun if (!data) {
912*4882a593Smuzhiyun spin_unlock_bh(&profile->lock);
913*4882a593Smuzhiyun return;
914*4882a593Smuzhiyun }
915*4882a593Smuzhiyun memset(data, 0, sizeof(*data));
916*4882a593Smuzhiyun ktime_get_real_ts64(&data->starttod);
917*4882a593Smuzhiyun spin_unlock_bh(&profile->lock);
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun
dasd_profile_on(struct dasd_profile * profile)920*4882a593Smuzhiyun int dasd_profile_on(struct dasd_profile *profile)
921*4882a593Smuzhiyun {
922*4882a593Smuzhiyun struct dasd_profile_info *data;
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun data = kzalloc(sizeof(*data), GFP_KERNEL);
925*4882a593Smuzhiyun if (!data)
926*4882a593Smuzhiyun return -ENOMEM;
927*4882a593Smuzhiyun spin_lock_bh(&profile->lock);
928*4882a593Smuzhiyun if (profile->data) {
929*4882a593Smuzhiyun spin_unlock_bh(&profile->lock);
930*4882a593Smuzhiyun kfree(data);
931*4882a593Smuzhiyun return 0;
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun ktime_get_real_ts64(&data->starttod);
934*4882a593Smuzhiyun profile->data = data;
935*4882a593Smuzhiyun spin_unlock_bh(&profile->lock);
936*4882a593Smuzhiyun return 0;
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun
dasd_profile_off(struct dasd_profile * profile)939*4882a593Smuzhiyun void dasd_profile_off(struct dasd_profile *profile)
940*4882a593Smuzhiyun {
941*4882a593Smuzhiyun spin_lock_bh(&profile->lock);
942*4882a593Smuzhiyun kfree(profile->data);
943*4882a593Smuzhiyun profile->data = NULL;
944*4882a593Smuzhiyun spin_unlock_bh(&profile->lock);
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun
dasd_get_user_string(const char __user * user_buf,size_t user_len)947*4882a593Smuzhiyun char *dasd_get_user_string(const char __user *user_buf, size_t user_len)
948*4882a593Smuzhiyun {
949*4882a593Smuzhiyun char *buffer;
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun buffer = vmalloc(user_len + 1);
952*4882a593Smuzhiyun if (buffer == NULL)
953*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
954*4882a593Smuzhiyun if (copy_from_user(buffer, user_buf, user_len) != 0) {
955*4882a593Smuzhiyun vfree(buffer);
956*4882a593Smuzhiyun return ERR_PTR(-EFAULT);
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun /* got the string, now strip linefeed. */
959*4882a593Smuzhiyun if (buffer[user_len - 1] == '\n')
960*4882a593Smuzhiyun buffer[user_len - 1] = 0;
961*4882a593Smuzhiyun else
962*4882a593Smuzhiyun buffer[user_len] = 0;
963*4882a593Smuzhiyun return buffer;
964*4882a593Smuzhiyun }
965*4882a593Smuzhiyun
dasd_stats_write(struct file * file,const char __user * user_buf,size_t user_len,loff_t * pos)966*4882a593Smuzhiyun static ssize_t dasd_stats_write(struct file *file,
967*4882a593Smuzhiyun const char __user *user_buf,
968*4882a593Smuzhiyun size_t user_len, loff_t *pos)
969*4882a593Smuzhiyun {
970*4882a593Smuzhiyun char *buffer, *str;
971*4882a593Smuzhiyun int rc;
972*4882a593Smuzhiyun struct seq_file *m = (struct seq_file *)file->private_data;
973*4882a593Smuzhiyun struct dasd_profile *prof = m->private;
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun if (user_len > 65536)
976*4882a593Smuzhiyun user_len = 65536;
977*4882a593Smuzhiyun buffer = dasd_get_user_string(user_buf, user_len);
978*4882a593Smuzhiyun if (IS_ERR(buffer))
979*4882a593Smuzhiyun return PTR_ERR(buffer);
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun str = skip_spaces(buffer);
982*4882a593Smuzhiyun rc = user_len;
983*4882a593Smuzhiyun if (strncmp(str, "reset", 5) == 0) {
984*4882a593Smuzhiyun dasd_profile_reset(prof);
985*4882a593Smuzhiyun } else if (strncmp(str, "on", 2) == 0) {
986*4882a593Smuzhiyun rc = dasd_profile_on(prof);
987*4882a593Smuzhiyun if (rc)
988*4882a593Smuzhiyun goto out;
989*4882a593Smuzhiyun rc = user_len;
990*4882a593Smuzhiyun if (prof == &dasd_global_profile) {
991*4882a593Smuzhiyun dasd_profile_reset(prof);
992*4882a593Smuzhiyun dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY;
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun } else if (strncmp(str, "off", 3) == 0) {
995*4882a593Smuzhiyun if (prof == &dasd_global_profile)
996*4882a593Smuzhiyun dasd_global_profile_level = DASD_PROFILE_OFF;
997*4882a593Smuzhiyun dasd_profile_off(prof);
998*4882a593Smuzhiyun } else
999*4882a593Smuzhiyun rc = -EINVAL;
1000*4882a593Smuzhiyun out:
1001*4882a593Smuzhiyun vfree(buffer);
1002*4882a593Smuzhiyun return rc;
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun
dasd_stats_array(struct seq_file * m,unsigned int * array)1005*4882a593Smuzhiyun static void dasd_stats_array(struct seq_file *m, unsigned int *array)
1006*4882a593Smuzhiyun {
1007*4882a593Smuzhiyun int i;
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun for (i = 0; i < 32; i++)
1010*4882a593Smuzhiyun seq_printf(m, "%u ", array[i]);
1011*4882a593Smuzhiyun seq_putc(m, '\n');
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun
dasd_stats_seq_print(struct seq_file * m,struct dasd_profile_info * data)1014*4882a593Smuzhiyun static void dasd_stats_seq_print(struct seq_file *m,
1015*4882a593Smuzhiyun struct dasd_profile_info *data)
1016*4882a593Smuzhiyun {
1017*4882a593Smuzhiyun seq_printf(m, "start_time %lld.%09ld\n",
1018*4882a593Smuzhiyun (s64)data->starttod.tv_sec, data->starttod.tv_nsec);
1019*4882a593Smuzhiyun seq_printf(m, "total_requests %u\n", data->dasd_io_reqs);
1020*4882a593Smuzhiyun seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
1021*4882a593Smuzhiyun seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
1022*4882a593Smuzhiyun seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm);
1023*4882a593Smuzhiyun seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ?
1024*4882a593Smuzhiyun data->dasd_sum_times / data->dasd_io_reqs : 0UL);
1025*4882a593Smuzhiyun seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ?
1026*4882a593Smuzhiyun data->dasd_sum_time_str / data->dasd_io_reqs : 0UL);
1027*4882a593Smuzhiyun seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ?
1028*4882a593Smuzhiyun data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL);
1029*4882a593Smuzhiyun seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ?
1030*4882a593Smuzhiyun data->dasd_sum_time_end / data->dasd_io_reqs : 0UL);
1031*4882a593Smuzhiyun seq_puts(m, "histogram_sectors ");
1032*4882a593Smuzhiyun dasd_stats_array(m, data->dasd_io_secs);
1033*4882a593Smuzhiyun seq_puts(m, "histogram_io_times ");
1034*4882a593Smuzhiyun dasd_stats_array(m, data->dasd_io_times);
1035*4882a593Smuzhiyun seq_puts(m, "histogram_io_times_weighted ");
1036*4882a593Smuzhiyun dasd_stats_array(m, data->dasd_io_timps);
1037*4882a593Smuzhiyun seq_puts(m, "histogram_time_build_to_ssch ");
1038*4882a593Smuzhiyun dasd_stats_array(m, data->dasd_io_time1);
1039*4882a593Smuzhiyun seq_puts(m, "histogram_time_ssch_to_irq ");
1040*4882a593Smuzhiyun dasd_stats_array(m, data->dasd_io_time2);
1041*4882a593Smuzhiyun seq_puts(m, "histogram_time_ssch_to_irq_weighted ");
1042*4882a593Smuzhiyun dasd_stats_array(m, data->dasd_io_time2ps);
1043*4882a593Smuzhiyun seq_puts(m, "histogram_time_irq_to_end ");
1044*4882a593Smuzhiyun dasd_stats_array(m, data->dasd_io_time3);
1045*4882a593Smuzhiyun seq_puts(m, "histogram_ccw_queue_length ");
1046*4882a593Smuzhiyun dasd_stats_array(m, data->dasd_io_nr_req);
1047*4882a593Smuzhiyun seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs);
1048*4882a593Smuzhiyun seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects);
1049*4882a593Smuzhiyun seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias);
1050*4882a593Smuzhiyun seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm);
1051*4882a593Smuzhiyun seq_puts(m, "histogram_read_sectors ");
1052*4882a593Smuzhiyun dasd_stats_array(m, data->dasd_read_secs);
1053*4882a593Smuzhiyun seq_puts(m, "histogram_read_times ");
1054*4882a593Smuzhiyun dasd_stats_array(m, data->dasd_read_times);
1055*4882a593Smuzhiyun seq_puts(m, "histogram_read_time_build_to_ssch ");
1056*4882a593Smuzhiyun dasd_stats_array(m, data->dasd_read_time1);
1057*4882a593Smuzhiyun seq_puts(m, "histogram_read_time_ssch_to_irq ");
1058*4882a593Smuzhiyun dasd_stats_array(m, data->dasd_read_time2);
1059*4882a593Smuzhiyun seq_puts(m, "histogram_read_time_irq_to_end ");
1060*4882a593Smuzhiyun dasd_stats_array(m, data->dasd_read_time3);
1061*4882a593Smuzhiyun seq_puts(m, "histogram_read_ccw_queue_length ");
1062*4882a593Smuzhiyun dasd_stats_array(m, data->dasd_read_nr_req);
1063*4882a593Smuzhiyun }
1064*4882a593Smuzhiyun
dasd_stats_show(struct seq_file * m,void * v)1065*4882a593Smuzhiyun static int dasd_stats_show(struct seq_file *m, void *v)
1066*4882a593Smuzhiyun {
1067*4882a593Smuzhiyun struct dasd_profile *profile;
1068*4882a593Smuzhiyun struct dasd_profile_info *data;
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun profile = m->private;
1071*4882a593Smuzhiyun spin_lock_bh(&profile->lock);
1072*4882a593Smuzhiyun data = profile->data;
1073*4882a593Smuzhiyun if (!data) {
1074*4882a593Smuzhiyun spin_unlock_bh(&profile->lock);
1075*4882a593Smuzhiyun seq_puts(m, "disabled\n");
1076*4882a593Smuzhiyun return 0;
1077*4882a593Smuzhiyun }
1078*4882a593Smuzhiyun dasd_stats_seq_print(m, data);
1079*4882a593Smuzhiyun spin_unlock_bh(&profile->lock);
1080*4882a593Smuzhiyun return 0;
1081*4882a593Smuzhiyun }
1082*4882a593Smuzhiyun
dasd_stats_open(struct inode * inode,struct file * file)1083*4882a593Smuzhiyun static int dasd_stats_open(struct inode *inode, struct file *file)
1084*4882a593Smuzhiyun {
1085*4882a593Smuzhiyun struct dasd_profile *profile = inode->i_private;
1086*4882a593Smuzhiyun return single_open(file, dasd_stats_show, profile);
1087*4882a593Smuzhiyun }
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun static const struct file_operations dasd_stats_raw_fops = {
1090*4882a593Smuzhiyun .owner = THIS_MODULE,
1091*4882a593Smuzhiyun .open = dasd_stats_open,
1092*4882a593Smuzhiyun .read = seq_read,
1093*4882a593Smuzhiyun .llseek = seq_lseek,
1094*4882a593Smuzhiyun .release = single_release,
1095*4882a593Smuzhiyun .write = dasd_stats_write,
1096*4882a593Smuzhiyun };
1097*4882a593Smuzhiyun
dasd_profile_init(struct dasd_profile * profile,struct dentry * base_dentry)1098*4882a593Smuzhiyun static void dasd_profile_init(struct dasd_profile *profile,
1099*4882a593Smuzhiyun struct dentry *base_dentry)
1100*4882a593Smuzhiyun {
1101*4882a593Smuzhiyun umode_t mode;
1102*4882a593Smuzhiyun struct dentry *pde;
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun if (!base_dentry)
1105*4882a593Smuzhiyun return;
1106*4882a593Smuzhiyun profile->dentry = NULL;
1107*4882a593Smuzhiyun profile->data = NULL;
1108*4882a593Smuzhiyun mode = (S_IRUSR | S_IWUSR | S_IFREG);
1109*4882a593Smuzhiyun pde = debugfs_create_file("statistics", mode, base_dentry,
1110*4882a593Smuzhiyun profile, &dasd_stats_raw_fops);
1111*4882a593Smuzhiyun if (pde && !IS_ERR(pde))
1112*4882a593Smuzhiyun profile->dentry = pde;
1113*4882a593Smuzhiyun return;
1114*4882a593Smuzhiyun }
1115*4882a593Smuzhiyun
dasd_profile_exit(struct dasd_profile * profile)1116*4882a593Smuzhiyun static void dasd_profile_exit(struct dasd_profile *profile)
1117*4882a593Smuzhiyun {
1118*4882a593Smuzhiyun dasd_profile_off(profile);
1119*4882a593Smuzhiyun debugfs_remove(profile->dentry);
1120*4882a593Smuzhiyun profile->dentry = NULL;
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun
dasd_statistics_removeroot(void)1123*4882a593Smuzhiyun static void dasd_statistics_removeroot(void)
1124*4882a593Smuzhiyun {
1125*4882a593Smuzhiyun dasd_global_profile_level = DASD_PROFILE_OFF;
1126*4882a593Smuzhiyun dasd_profile_exit(&dasd_global_profile);
1127*4882a593Smuzhiyun debugfs_remove(dasd_debugfs_global_entry);
1128*4882a593Smuzhiyun debugfs_remove(dasd_debugfs_root_entry);
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun
dasd_statistics_createroot(void)1131*4882a593Smuzhiyun static void dasd_statistics_createroot(void)
1132*4882a593Smuzhiyun {
1133*4882a593Smuzhiyun struct dentry *pde;
1134*4882a593Smuzhiyun
1135*4882a593Smuzhiyun dasd_debugfs_root_entry = NULL;
1136*4882a593Smuzhiyun pde = debugfs_create_dir("dasd", NULL);
1137*4882a593Smuzhiyun if (!pde || IS_ERR(pde))
1138*4882a593Smuzhiyun goto error;
1139*4882a593Smuzhiyun dasd_debugfs_root_entry = pde;
1140*4882a593Smuzhiyun pde = debugfs_create_dir("global", dasd_debugfs_root_entry);
1141*4882a593Smuzhiyun if (!pde || IS_ERR(pde))
1142*4882a593Smuzhiyun goto error;
1143*4882a593Smuzhiyun dasd_debugfs_global_entry = pde;
1144*4882a593Smuzhiyun dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry);
1145*4882a593Smuzhiyun return;
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun error:
1148*4882a593Smuzhiyun DBF_EVENT(DBF_ERR, "%s",
1149*4882a593Smuzhiyun "Creation of the dasd debugfs interface failed");
1150*4882a593Smuzhiyun dasd_statistics_removeroot();
1151*4882a593Smuzhiyun return;
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun #else
1155*4882a593Smuzhiyun #define dasd_profile_start(block, cqr, req) do {} while (0)
1156*4882a593Smuzhiyun #define dasd_profile_end(block, cqr, req) do {} while (0)
1157*4882a593Smuzhiyun
dasd_statistics_createroot(void)1158*4882a593Smuzhiyun static void dasd_statistics_createroot(void)
1159*4882a593Smuzhiyun {
1160*4882a593Smuzhiyun return;
1161*4882a593Smuzhiyun }
1162*4882a593Smuzhiyun
dasd_statistics_removeroot(void)1163*4882a593Smuzhiyun static void dasd_statistics_removeroot(void)
1164*4882a593Smuzhiyun {
1165*4882a593Smuzhiyun return;
1166*4882a593Smuzhiyun }
1167*4882a593Smuzhiyun
dasd_stats_generic_show(struct seq_file * m,void * v)1168*4882a593Smuzhiyun int dasd_stats_generic_show(struct seq_file *m, void *v)
1169*4882a593Smuzhiyun {
1170*4882a593Smuzhiyun seq_puts(m, "Statistics are not activated in this kernel\n");
1171*4882a593Smuzhiyun return 0;
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun
dasd_profile_init(struct dasd_profile * profile,struct dentry * base_dentry)1174*4882a593Smuzhiyun static void dasd_profile_init(struct dasd_profile *profile,
1175*4882a593Smuzhiyun struct dentry *base_dentry)
1176*4882a593Smuzhiyun {
1177*4882a593Smuzhiyun return;
1178*4882a593Smuzhiyun }
1179*4882a593Smuzhiyun
dasd_profile_exit(struct dasd_profile * profile)1180*4882a593Smuzhiyun static void dasd_profile_exit(struct dasd_profile *profile)
1181*4882a593Smuzhiyun {
1182*4882a593Smuzhiyun return;
1183*4882a593Smuzhiyun }
1184*4882a593Smuzhiyun
dasd_profile_on(struct dasd_profile * profile)1185*4882a593Smuzhiyun int dasd_profile_on(struct dasd_profile *profile)
1186*4882a593Smuzhiyun {
1187*4882a593Smuzhiyun return 0;
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun #endif /* CONFIG_DASD_PROFILE */
1191*4882a593Smuzhiyun
dasd_hosts_show(struct seq_file * m,void * v)1192*4882a593Smuzhiyun static int dasd_hosts_show(struct seq_file *m, void *v)
1193*4882a593Smuzhiyun {
1194*4882a593Smuzhiyun struct dasd_device *device;
1195*4882a593Smuzhiyun int rc = -EOPNOTSUPP;
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun device = m->private;
1198*4882a593Smuzhiyun dasd_get_device(device);
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun if (device->discipline->hosts_print)
1201*4882a593Smuzhiyun rc = device->discipline->hosts_print(device, m);
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun dasd_put_device(device);
1204*4882a593Smuzhiyun return rc;
1205*4882a593Smuzhiyun }
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(dasd_hosts);
1208*4882a593Smuzhiyun
dasd_hosts_exit(struct dasd_device * device)1209*4882a593Smuzhiyun static void dasd_hosts_exit(struct dasd_device *device)
1210*4882a593Smuzhiyun {
1211*4882a593Smuzhiyun debugfs_remove(device->hosts_dentry);
1212*4882a593Smuzhiyun device->hosts_dentry = NULL;
1213*4882a593Smuzhiyun }
1214*4882a593Smuzhiyun
dasd_hosts_init(struct dentry * base_dentry,struct dasd_device * device)1215*4882a593Smuzhiyun static void dasd_hosts_init(struct dentry *base_dentry,
1216*4882a593Smuzhiyun struct dasd_device *device)
1217*4882a593Smuzhiyun {
1218*4882a593Smuzhiyun struct dentry *pde;
1219*4882a593Smuzhiyun umode_t mode;
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun if (!base_dentry)
1222*4882a593Smuzhiyun return;
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun mode = S_IRUSR | S_IFREG;
1225*4882a593Smuzhiyun pde = debugfs_create_file("host_access_list", mode, base_dentry,
1226*4882a593Smuzhiyun device, &dasd_hosts_fops);
1227*4882a593Smuzhiyun if (pde && !IS_ERR(pde))
1228*4882a593Smuzhiyun device->hosts_dentry = pde;
1229*4882a593Smuzhiyun }
1230*4882a593Smuzhiyun
dasd_smalloc_request(int magic,int cplength,int datasize,struct dasd_device * device,struct dasd_ccw_req * cqr)1231*4882a593Smuzhiyun struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize,
1232*4882a593Smuzhiyun struct dasd_device *device,
1233*4882a593Smuzhiyun struct dasd_ccw_req *cqr)
1234*4882a593Smuzhiyun {
1235*4882a593Smuzhiyun unsigned long flags;
1236*4882a593Smuzhiyun char *data, *chunk;
1237*4882a593Smuzhiyun int size = 0;
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun if (cplength > 0)
1240*4882a593Smuzhiyun size += cplength * sizeof(struct ccw1);
1241*4882a593Smuzhiyun if (datasize > 0)
1242*4882a593Smuzhiyun size += datasize;
1243*4882a593Smuzhiyun if (!cqr)
1244*4882a593Smuzhiyun size += (sizeof(*cqr) + 7L) & -8L;
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun spin_lock_irqsave(&device->mem_lock, flags);
1247*4882a593Smuzhiyun data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size);
1248*4882a593Smuzhiyun spin_unlock_irqrestore(&device->mem_lock, flags);
1249*4882a593Smuzhiyun if (!chunk)
1250*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
1251*4882a593Smuzhiyun if (!cqr) {
1252*4882a593Smuzhiyun cqr = (void *) data;
1253*4882a593Smuzhiyun data += (sizeof(*cqr) + 7L) & -8L;
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun memset(cqr, 0, sizeof(*cqr));
1256*4882a593Smuzhiyun cqr->mem_chunk = chunk;
1257*4882a593Smuzhiyun if (cplength > 0) {
1258*4882a593Smuzhiyun cqr->cpaddr = data;
1259*4882a593Smuzhiyun data += cplength * sizeof(struct ccw1);
1260*4882a593Smuzhiyun memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
1261*4882a593Smuzhiyun }
1262*4882a593Smuzhiyun if (datasize > 0) {
1263*4882a593Smuzhiyun cqr->data = data;
1264*4882a593Smuzhiyun memset(cqr->data, 0, datasize);
1265*4882a593Smuzhiyun }
1266*4882a593Smuzhiyun cqr->magic = magic;
1267*4882a593Smuzhiyun set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1268*4882a593Smuzhiyun dasd_get_device(device);
1269*4882a593Smuzhiyun return cqr;
1270*4882a593Smuzhiyun }
1271*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_smalloc_request);
1272*4882a593Smuzhiyun
dasd_fmalloc_request(int magic,int cplength,int datasize,struct dasd_device * device)1273*4882a593Smuzhiyun struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength,
1274*4882a593Smuzhiyun int datasize,
1275*4882a593Smuzhiyun struct dasd_device *device)
1276*4882a593Smuzhiyun {
1277*4882a593Smuzhiyun struct dasd_ccw_req *cqr;
1278*4882a593Smuzhiyun unsigned long flags;
1279*4882a593Smuzhiyun int size, cqr_size;
1280*4882a593Smuzhiyun char *data;
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun cqr_size = (sizeof(*cqr) + 7L) & -8L;
1283*4882a593Smuzhiyun size = cqr_size;
1284*4882a593Smuzhiyun if (cplength > 0)
1285*4882a593Smuzhiyun size += cplength * sizeof(struct ccw1);
1286*4882a593Smuzhiyun if (datasize > 0)
1287*4882a593Smuzhiyun size += datasize;
1288*4882a593Smuzhiyun
1289*4882a593Smuzhiyun spin_lock_irqsave(&device->mem_lock, flags);
1290*4882a593Smuzhiyun cqr = dasd_alloc_chunk(&device->ese_chunks, size);
1291*4882a593Smuzhiyun spin_unlock_irqrestore(&device->mem_lock, flags);
1292*4882a593Smuzhiyun if (!cqr)
1293*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
1294*4882a593Smuzhiyun memset(cqr, 0, sizeof(*cqr));
1295*4882a593Smuzhiyun data = (char *)cqr + cqr_size;
1296*4882a593Smuzhiyun cqr->cpaddr = NULL;
1297*4882a593Smuzhiyun if (cplength > 0) {
1298*4882a593Smuzhiyun cqr->cpaddr = data;
1299*4882a593Smuzhiyun data += cplength * sizeof(struct ccw1);
1300*4882a593Smuzhiyun memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
1301*4882a593Smuzhiyun }
1302*4882a593Smuzhiyun cqr->data = NULL;
1303*4882a593Smuzhiyun if (datasize > 0) {
1304*4882a593Smuzhiyun cqr->data = data;
1305*4882a593Smuzhiyun memset(cqr->data, 0, datasize);
1306*4882a593Smuzhiyun }
1307*4882a593Smuzhiyun
1308*4882a593Smuzhiyun cqr->magic = magic;
1309*4882a593Smuzhiyun set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1310*4882a593Smuzhiyun dasd_get_device(device);
1311*4882a593Smuzhiyun
1312*4882a593Smuzhiyun return cqr;
1313*4882a593Smuzhiyun }
1314*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_fmalloc_request);
1315*4882a593Smuzhiyun
dasd_sfree_request(struct dasd_ccw_req * cqr,struct dasd_device * device)1316*4882a593Smuzhiyun void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1317*4882a593Smuzhiyun {
1318*4882a593Smuzhiyun unsigned long flags;
1319*4882a593Smuzhiyun
1320*4882a593Smuzhiyun spin_lock_irqsave(&device->mem_lock, flags);
1321*4882a593Smuzhiyun dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk);
1322*4882a593Smuzhiyun spin_unlock_irqrestore(&device->mem_lock, flags);
1323*4882a593Smuzhiyun dasd_put_device(device);
1324*4882a593Smuzhiyun }
1325*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_sfree_request);
1326*4882a593Smuzhiyun
dasd_ffree_request(struct dasd_ccw_req * cqr,struct dasd_device * device)1327*4882a593Smuzhiyun void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1328*4882a593Smuzhiyun {
1329*4882a593Smuzhiyun unsigned long flags;
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun spin_lock_irqsave(&device->mem_lock, flags);
1332*4882a593Smuzhiyun dasd_free_chunk(&device->ese_chunks, cqr);
1333*4882a593Smuzhiyun spin_unlock_irqrestore(&device->mem_lock, flags);
1334*4882a593Smuzhiyun dasd_put_device(device);
1335*4882a593Smuzhiyun }
1336*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_ffree_request);
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun /*
1339*4882a593Smuzhiyun * Check discipline magic in cqr.
1340*4882a593Smuzhiyun */
dasd_check_cqr(struct dasd_ccw_req * cqr)1341*4882a593Smuzhiyun static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
1342*4882a593Smuzhiyun {
1343*4882a593Smuzhiyun struct dasd_device *device;
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun if (cqr == NULL)
1346*4882a593Smuzhiyun return -EINVAL;
1347*4882a593Smuzhiyun device = cqr->startdev;
1348*4882a593Smuzhiyun if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
1349*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_WARNING, device,
1350*4882a593Smuzhiyun " dasd_ccw_req 0x%08x magic doesn't match"
1351*4882a593Smuzhiyun " discipline 0x%08x",
1352*4882a593Smuzhiyun cqr->magic,
1353*4882a593Smuzhiyun *(unsigned int *) device->discipline->name);
1354*4882a593Smuzhiyun return -EINVAL;
1355*4882a593Smuzhiyun }
1356*4882a593Smuzhiyun return 0;
1357*4882a593Smuzhiyun }
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun /*
1360*4882a593Smuzhiyun * Terminate the current i/o and set the request to clear_pending.
1361*4882a593Smuzhiyun * Timer keeps device runnig.
1362*4882a593Smuzhiyun * ccw_device_clear can fail if the i/o subsystem
1363*4882a593Smuzhiyun * is in a bad mood.
1364*4882a593Smuzhiyun */
dasd_term_IO(struct dasd_ccw_req * cqr)1365*4882a593Smuzhiyun int dasd_term_IO(struct dasd_ccw_req *cqr)
1366*4882a593Smuzhiyun {
1367*4882a593Smuzhiyun struct dasd_device *device;
1368*4882a593Smuzhiyun int retries, rc;
1369*4882a593Smuzhiyun char errorstring[ERRORLENGTH];
1370*4882a593Smuzhiyun
1371*4882a593Smuzhiyun /* Check the cqr */
1372*4882a593Smuzhiyun rc = dasd_check_cqr(cqr);
1373*4882a593Smuzhiyun if (rc)
1374*4882a593Smuzhiyun return rc;
1375*4882a593Smuzhiyun retries = 0;
1376*4882a593Smuzhiyun device = (struct dasd_device *) cqr->startdev;
1377*4882a593Smuzhiyun while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
1378*4882a593Smuzhiyun rc = ccw_device_clear(device->cdev, (long) cqr);
1379*4882a593Smuzhiyun switch (rc) {
1380*4882a593Smuzhiyun case 0: /* termination successful */
1381*4882a593Smuzhiyun cqr->status = DASD_CQR_CLEAR_PENDING;
1382*4882a593Smuzhiyun cqr->stopclk = get_tod_clock();
1383*4882a593Smuzhiyun cqr->starttime = 0;
1384*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_DEBUG, device,
1385*4882a593Smuzhiyun "terminate cqr %p successful",
1386*4882a593Smuzhiyun cqr);
1387*4882a593Smuzhiyun break;
1388*4882a593Smuzhiyun case -ENODEV:
1389*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_ERR, device, "%s",
1390*4882a593Smuzhiyun "device gone, retry");
1391*4882a593Smuzhiyun break;
1392*4882a593Smuzhiyun case -EINVAL:
1393*4882a593Smuzhiyun /*
1394*4882a593Smuzhiyun * device not valid so no I/O could be running
1395*4882a593Smuzhiyun * handle CQR as termination successful
1396*4882a593Smuzhiyun */
1397*4882a593Smuzhiyun cqr->status = DASD_CQR_CLEARED;
1398*4882a593Smuzhiyun cqr->stopclk = get_tod_clock();
1399*4882a593Smuzhiyun cqr->starttime = 0;
1400*4882a593Smuzhiyun /* no retries for invalid devices */
1401*4882a593Smuzhiyun cqr->retries = -1;
1402*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_ERR, device, "%s",
1403*4882a593Smuzhiyun "EINVAL, handle as terminated");
1404*4882a593Smuzhiyun /* fake rc to success */
1405*4882a593Smuzhiyun rc = 0;
1406*4882a593Smuzhiyun break;
1407*4882a593Smuzhiyun default:
1408*4882a593Smuzhiyun /* internal error 10 - unknown rc*/
1409*4882a593Smuzhiyun snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
1410*4882a593Smuzhiyun dev_err(&device->cdev->dev, "An error occurred in the "
1411*4882a593Smuzhiyun "DASD device driver, reason=%s\n", errorstring);
1412*4882a593Smuzhiyun BUG();
1413*4882a593Smuzhiyun break;
1414*4882a593Smuzhiyun }
1415*4882a593Smuzhiyun retries++;
1416*4882a593Smuzhiyun }
1417*4882a593Smuzhiyun dasd_schedule_device_bh(device);
1418*4882a593Smuzhiyun return rc;
1419*4882a593Smuzhiyun }
1420*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_term_IO);
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun /*
1423*4882a593Smuzhiyun * Start the i/o. This start_IO can fail if the channel is really busy.
1424*4882a593Smuzhiyun * In that case set up a timer to start the request later.
1425*4882a593Smuzhiyun */
dasd_start_IO(struct dasd_ccw_req * cqr)1426*4882a593Smuzhiyun int dasd_start_IO(struct dasd_ccw_req *cqr)
1427*4882a593Smuzhiyun {
1428*4882a593Smuzhiyun struct dasd_device *device;
1429*4882a593Smuzhiyun int rc;
1430*4882a593Smuzhiyun char errorstring[ERRORLENGTH];
1431*4882a593Smuzhiyun
1432*4882a593Smuzhiyun /* Check the cqr */
1433*4882a593Smuzhiyun rc = dasd_check_cqr(cqr);
1434*4882a593Smuzhiyun if (rc) {
1435*4882a593Smuzhiyun cqr->intrc = rc;
1436*4882a593Smuzhiyun return rc;
1437*4882a593Smuzhiyun }
1438*4882a593Smuzhiyun device = (struct dasd_device *) cqr->startdev;
1439*4882a593Smuzhiyun if (((cqr->block &&
1440*4882a593Smuzhiyun test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) ||
1441*4882a593Smuzhiyun test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) &&
1442*4882a593Smuzhiyun !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
1443*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p "
1444*4882a593Smuzhiyun "because of stolen lock", cqr);
1445*4882a593Smuzhiyun cqr->status = DASD_CQR_ERROR;
1446*4882a593Smuzhiyun cqr->intrc = -EPERM;
1447*4882a593Smuzhiyun return -EPERM;
1448*4882a593Smuzhiyun }
1449*4882a593Smuzhiyun if (cqr->retries < 0) {
1450*4882a593Smuzhiyun /* internal error 14 - start_IO run out of retries */
1451*4882a593Smuzhiyun sprintf(errorstring, "14 %p", cqr);
1452*4882a593Smuzhiyun dev_err(&device->cdev->dev, "An error occurred in the DASD "
1453*4882a593Smuzhiyun "device driver, reason=%s\n", errorstring);
1454*4882a593Smuzhiyun cqr->status = DASD_CQR_ERROR;
1455*4882a593Smuzhiyun return -EIO;
1456*4882a593Smuzhiyun }
1457*4882a593Smuzhiyun cqr->startclk = get_tod_clock();
1458*4882a593Smuzhiyun cqr->starttime = jiffies;
1459*4882a593Smuzhiyun cqr->retries--;
1460*4882a593Smuzhiyun if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1461*4882a593Smuzhiyun cqr->lpm &= dasd_path_get_opm(device);
1462*4882a593Smuzhiyun if (!cqr->lpm)
1463*4882a593Smuzhiyun cqr->lpm = dasd_path_get_opm(device);
1464*4882a593Smuzhiyun }
1465*4882a593Smuzhiyun /*
1466*4882a593Smuzhiyun * remember the amount of formatted tracks to prevent double format on
1467*4882a593Smuzhiyun * ESE devices
1468*4882a593Smuzhiyun */
1469*4882a593Smuzhiyun if (cqr->block)
1470*4882a593Smuzhiyun cqr->trkcount = atomic_read(&cqr->block->trkcount);
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun if (cqr->cpmode == 1) {
1473*4882a593Smuzhiyun rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
1474*4882a593Smuzhiyun (long) cqr, cqr->lpm);
1475*4882a593Smuzhiyun } else {
1476*4882a593Smuzhiyun rc = ccw_device_start(device->cdev, cqr->cpaddr,
1477*4882a593Smuzhiyun (long) cqr, cqr->lpm, 0);
1478*4882a593Smuzhiyun }
1479*4882a593Smuzhiyun switch (rc) {
1480*4882a593Smuzhiyun case 0:
1481*4882a593Smuzhiyun cqr->status = DASD_CQR_IN_IO;
1482*4882a593Smuzhiyun break;
1483*4882a593Smuzhiyun case -EBUSY:
1484*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1485*4882a593Smuzhiyun "start_IO: device busy, retry later");
1486*4882a593Smuzhiyun break;
1487*4882a593Smuzhiyun case -EACCES:
1488*4882a593Smuzhiyun /* -EACCES indicates that the request used only a subset of the
1489*4882a593Smuzhiyun * available paths and all these paths are gone. If the lpm of
1490*4882a593Smuzhiyun * this request was only a subset of the opm (e.g. the ppm) then
1491*4882a593Smuzhiyun * we just do a retry with all available paths.
1492*4882a593Smuzhiyun * If we already use the full opm, something is amiss, and we
1493*4882a593Smuzhiyun * need a full path verification.
1494*4882a593Smuzhiyun */
1495*4882a593Smuzhiyun if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1496*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_WARNING, device,
1497*4882a593Smuzhiyun "start_IO: selected paths gone (%x)",
1498*4882a593Smuzhiyun cqr->lpm);
1499*4882a593Smuzhiyun } else if (cqr->lpm != dasd_path_get_opm(device)) {
1500*4882a593Smuzhiyun cqr->lpm = dasd_path_get_opm(device);
1501*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
1502*4882a593Smuzhiyun "start_IO: selected paths gone,"
1503*4882a593Smuzhiyun " retry on all paths");
1504*4882a593Smuzhiyun } else {
1505*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1506*4882a593Smuzhiyun "start_IO: all paths in opm gone,"
1507*4882a593Smuzhiyun " do path verification");
1508*4882a593Smuzhiyun dasd_generic_last_path_gone(device);
1509*4882a593Smuzhiyun dasd_path_no_path(device);
1510*4882a593Smuzhiyun dasd_path_set_tbvpm(device,
1511*4882a593Smuzhiyun ccw_device_get_path_mask(
1512*4882a593Smuzhiyun device->cdev));
1513*4882a593Smuzhiyun }
1514*4882a593Smuzhiyun break;
1515*4882a593Smuzhiyun case -ENODEV:
1516*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1517*4882a593Smuzhiyun "start_IO: -ENODEV device gone, retry");
1518*4882a593Smuzhiyun break;
1519*4882a593Smuzhiyun case -EIO:
1520*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1521*4882a593Smuzhiyun "start_IO: -EIO device gone, retry");
1522*4882a593Smuzhiyun break;
1523*4882a593Smuzhiyun case -EINVAL:
1524*4882a593Smuzhiyun /* most likely caused in power management context */
1525*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1526*4882a593Smuzhiyun "start_IO: -EINVAL device currently "
1527*4882a593Smuzhiyun "not accessible");
1528*4882a593Smuzhiyun break;
1529*4882a593Smuzhiyun default:
1530*4882a593Smuzhiyun /* internal error 11 - unknown rc */
1531*4882a593Smuzhiyun snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
1532*4882a593Smuzhiyun dev_err(&device->cdev->dev,
1533*4882a593Smuzhiyun "An error occurred in the DASD device driver, "
1534*4882a593Smuzhiyun "reason=%s\n", errorstring);
1535*4882a593Smuzhiyun BUG();
1536*4882a593Smuzhiyun break;
1537*4882a593Smuzhiyun }
1538*4882a593Smuzhiyun cqr->intrc = rc;
1539*4882a593Smuzhiyun return rc;
1540*4882a593Smuzhiyun }
1541*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_start_IO);
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun /*
1544*4882a593Smuzhiyun * Timeout function for dasd devices. This is used for different purposes
1545*4882a593Smuzhiyun * 1) missing interrupt handler for normal operation
1546*4882a593Smuzhiyun * 2) delayed start of request where start_IO failed with -EBUSY
1547*4882a593Smuzhiyun * 3) timeout for missing state change interrupts
1548*4882a593Smuzhiyun * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
1549*4882a593Smuzhiyun * DASD_CQR_QUEUED for 2) and 3).
1550*4882a593Smuzhiyun */
dasd_device_timeout(struct timer_list * t)1551*4882a593Smuzhiyun static void dasd_device_timeout(struct timer_list *t)
1552*4882a593Smuzhiyun {
1553*4882a593Smuzhiyun unsigned long flags;
1554*4882a593Smuzhiyun struct dasd_device *device;
1555*4882a593Smuzhiyun
1556*4882a593Smuzhiyun device = from_timer(device, t, timer);
1557*4882a593Smuzhiyun spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1558*4882a593Smuzhiyun /* re-activate request queue */
1559*4882a593Smuzhiyun dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1560*4882a593Smuzhiyun spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1561*4882a593Smuzhiyun dasd_schedule_device_bh(device);
1562*4882a593Smuzhiyun }
1563*4882a593Smuzhiyun
1564*4882a593Smuzhiyun /*
1565*4882a593Smuzhiyun * Setup timeout for a device in jiffies.
1566*4882a593Smuzhiyun */
dasd_device_set_timer(struct dasd_device * device,int expires)1567*4882a593Smuzhiyun void dasd_device_set_timer(struct dasd_device *device, int expires)
1568*4882a593Smuzhiyun {
1569*4882a593Smuzhiyun if (expires == 0)
1570*4882a593Smuzhiyun del_timer(&device->timer);
1571*4882a593Smuzhiyun else
1572*4882a593Smuzhiyun mod_timer(&device->timer, jiffies + expires);
1573*4882a593Smuzhiyun }
1574*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_device_set_timer);
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun /*
1577*4882a593Smuzhiyun * Clear timeout for a device.
1578*4882a593Smuzhiyun */
dasd_device_clear_timer(struct dasd_device * device)1579*4882a593Smuzhiyun void dasd_device_clear_timer(struct dasd_device *device)
1580*4882a593Smuzhiyun {
1581*4882a593Smuzhiyun del_timer(&device->timer);
1582*4882a593Smuzhiyun }
1583*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_device_clear_timer);
1584*4882a593Smuzhiyun
dasd_handle_killed_request(struct ccw_device * cdev,unsigned long intparm)1585*4882a593Smuzhiyun static void dasd_handle_killed_request(struct ccw_device *cdev,
1586*4882a593Smuzhiyun unsigned long intparm)
1587*4882a593Smuzhiyun {
1588*4882a593Smuzhiyun struct dasd_ccw_req *cqr;
1589*4882a593Smuzhiyun struct dasd_device *device;
1590*4882a593Smuzhiyun
1591*4882a593Smuzhiyun if (!intparm)
1592*4882a593Smuzhiyun return;
1593*4882a593Smuzhiyun cqr = (struct dasd_ccw_req *) intparm;
1594*4882a593Smuzhiyun if (cqr->status != DASD_CQR_IN_IO) {
1595*4882a593Smuzhiyun DBF_EVENT_DEVID(DBF_DEBUG, cdev,
1596*4882a593Smuzhiyun "invalid status in handle_killed_request: "
1597*4882a593Smuzhiyun "%02x", cqr->status);
1598*4882a593Smuzhiyun return;
1599*4882a593Smuzhiyun }
1600*4882a593Smuzhiyun
1601*4882a593Smuzhiyun device = dasd_device_from_cdev_locked(cdev);
1602*4882a593Smuzhiyun if (IS_ERR(device)) {
1603*4882a593Smuzhiyun DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1604*4882a593Smuzhiyun "unable to get device from cdev");
1605*4882a593Smuzhiyun return;
1606*4882a593Smuzhiyun }
1607*4882a593Smuzhiyun
1608*4882a593Smuzhiyun if (!cqr->startdev ||
1609*4882a593Smuzhiyun device != cqr->startdev ||
1610*4882a593Smuzhiyun strncmp(cqr->startdev->discipline->ebcname,
1611*4882a593Smuzhiyun (char *) &cqr->magic, 4)) {
1612*4882a593Smuzhiyun DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1613*4882a593Smuzhiyun "invalid device in request");
1614*4882a593Smuzhiyun dasd_put_device(device);
1615*4882a593Smuzhiyun return;
1616*4882a593Smuzhiyun }
1617*4882a593Smuzhiyun
1618*4882a593Smuzhiyun /* Schedule request to be retried. */
1619*4882a593Smuzhiyun cqr->status = DASD_CQR_QUEUED;
1620*4882a593Smuzhiyun
1621*4882a593Smuzhiyun dasd_device_clear_timer(device);
1622*4882a593Smuzhiyun dasd_schedule_device_bh(device);
1623*4882a593Smuzhiyun dasd_put_device(device);
1624*4882a593Smuzhiyun }
1625*4882a593Smuzhiyun
dasd_generic_handle_state_change(struct dasd_device * device)1626*4882a593Smuzhiyun void dasd_generic_handle_state_change(struct dasd_device *device)
1627*4882a593Smuzhiyun {
1628*4882a593Smuzhiyun /* First of all start sense subsystem status request. */
1629*4882a593Smuzhiyun dasd_eer_snss(device);
1630*4882a593Smuzhiyun
1631*4882a593Smuzhiyun dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1632*4882a593Smuzhiyun dasd_schedule_device_bh(device);
1633*4882a593Smuzhiyun if (device->block) {
1634*4882a593Smuzhiyun dasd_schedule_block_bh(device->block);
1635*4882a593Smuzhiyun if (device->block->request_queue)
1636*4882a593Smuzhiyun blk_mq_run_hw_queues(device->block->request_queue,
1637*4882a593Smuzhiyun true);
1638*4882a593Smuzhiyun }
1639*4882a593Smuzhiyun }
1640*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
1641*4882a593Smuzhiyun
dasd_check_hpf_error(struct irb * irb)1642*4882a593Smuzhiyun static int dasd_check_hpf_error(struct irb *irb)
1643*4882a593Smuzhiyun {
1644*4882a593Smuzhiyun return (scsw_tm_is_valid_schxs(&irb->scsw) &&
1645*4882a593Smuzhiyun (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX ||
1646*4882a593Smuzhiyun irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX));
1647*4882a593Smuzhiyun }
1648*4882a593Smuzhiyun
dasd_ese_needs_format(struct dasd_block * block,struct irb * irb)1649*4882a593Smuzhiyun static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb)
1650*4882a593Smuzhiyun {
1651*4882a593Smuzhiyun struct dasd_device *device = NULL;
1652*4882a593Smuzhiyun u8 *sense = NULL;
1653*4882a593Smuzhiyun
1654*4882a593Smuzhiyun if (!block)
1655*4882a593Smuzhiyun return 0;
1656*4882a593Smuzhiyun device = block->base;
1657*4882a593Smuzhiyun if (!device || !device->discipline->is_ese)
1658*4882a593Smuzhiyun return 0;
1659*4882a593Smuzhiyun if (!device->discipline->is_ese(device))
1660*4882a593Smuzhiyun return 0;
1661*4882a593Smuzhiyun
1662*4882a593Smuzhiyun sense = dasd_get_sense(irb);
1663*4882a593Smuzhiyun if (!sense)
1664*4882a593Smuzhiyun return 0;
1665*4882a593Smuzhiyun
1666*4882a593Smuzhiyun return !!(sense[1] & SNS1_NO_REC_FOUND) ||
1667*4882a593Smuzhiyun !!(sense[1] & SNS1_FILE_PROTECTED) ||
1668*4882a593Smuzhiyun scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN;
1669*4882a593Smuzhiyun }
1670*4882a593Smuzhiyun
dasd_ese_oos_cond(u8 * sense)1671*4882a593Smuzhiyun static int dasd_ese_oos_cond(u8 *sense)
1672*4882a593Smuzhiyun {
1673*4882a593Smuzhiyun return sense[0] & SNS0_EQUIPMENT_CHECK &&
1674*4882a593Smuzhiyun sense[1] & SNS1_PERM_ERR &&
1675*4882a593Smuzhiyun sense[1] & SNS1_WRITE_INHIBITED &&
1676*4882a593Smuzhiyun sense[25] == 0x01;
1677*4882a593Smuzhiyun }
1678*4882a593Smuzhiyun
1679*4882a593Smuzhiyun /*
1680*4882a593Smuzhiyun * Interrupt handler for "normal" ssch-io based dasd devices.
1681*4882a593Smuzhiyun */
dasd_int_handler(struct ccw_device * cdev,unsigned long intparm,struct irb * irb)1682*4882a593Smuzhiyun void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1683*4882a593Smuzhiyun struct irb *irb)
1684*4882a593Smuzhiyun {
1685*4882a593Smuzhiyun struct dasd_ccw_req *cqr, *next, *fcqr;
1686*4882a593Smuzhiyun struct dasd_device *device;
1687*4882a593Smuzhiyun unsigned long now;
1688*4882a593Smuzhiyun int nrf_suppressed = 0;
1689*4882a593Smuzhiyun int fp_suppressed = 0;
1690*4882a593Smuzhiyun struct request *req;
1691*4882a593Smuzhiyun u8 *sense = NULL;
1692*4882a593Smuzhiyun int expires;
1693*4882a593Smuzhiyun
1694*4882a593Smuzhiyun cqr = (struct dasd_ccw_req *) intparm;
1695*4882a593Smuzhiyun if (IS_ERR(irb)) {
1696*4882a593Smuzhiyun switch (PTR_ERR(irb)) {
1697*4882a593Smuzhiyun case -EIO:
1698*4882a593Smuzhiyun if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
1699*4882a593Smuzhiyun device = cqr->startdev;
1700*4882a593Smuzhiyun cqr->status = DASD_CQR_CLEARED;
1701*4882a593Smuzhiyun dasd_device_clear_timer(device);
1702*4882a593Smuzhiyun wake_up(&dasd_flush_wq);
1703*4882a593Smuzhiyun dasd_schedule_device_bh(device);
1704*4882a593Smuzhiyun return;
1705*4882a593Smuzhiyun }
1706*4882a593Smuzhiyun break;
1707*4882a593Smuzhiyun case -ETIMEDOUT:
1708*4882a593Smuzhiyun DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1709*4882a593Smuzhiyun "request timed out\n", __func__);
1710*4882a593Smuzhiyun break;
1711*4882a593Smuzhiyun default:
1712*4882a593Smuzhiyun DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1713*4882a593Smuzhiyun "unknown error %ld\n", __func__,
1714*4882a593Smuzhiyun PTR_ERR(irb));
1715*4882a593Smuzhiyun }
1716*4882a593Smuzhiyun dasd_handle_killed_request(cdev, intparm);
1717*4882a593Smuzhiyun return;
1718*4882a593Smuzhiyun }
1719*4882a593Smuzhiyun
1720*4882a593Smuzhiyun now = get_tod_clock();
1721*4882a593Smuzhiyun /* check for conditions that should be handled immediately */
1722*4882a593Smuzhiyun if (!cqr ||
1723*4882a593Smuzhiyun !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1724*4882a593Smuzhiyun scsw_cstat(&irb->scsw) == 0)) {
1725*4882a593Smuzhiyun if (cqr)
1726*4882a593Smuzhiyun memcpy(&cqr->irb, irb, sizeof(*irb));
1727*4882a593Smuzhiyun device = dasd_device_from_cdev_locked(cdev);
1728*4882a593Smuzhiyun if (IS_ERR(device))
1729*4882a593Smuzhiyun return;
1730*4882a593Smuzhiyun /* ignore unsolicited interrupts for DIAG discipline */
1731*4882a593Smuzhiyun if (device->discipline == dasd_diag_discipline_pointer) {
1732*4882a593Smuzhiyun dasd_put_device(device);
1733*4882a593Smuzhiyun return;
1734*4882a593Smuzhiyun }
1735*4882a593Smuzhiyun
1736*4882a593Smuzhiyun /*
1737*4882a593Smuzhiyun * In some cases 'File Protected' or 'No Record Found' errors
1738*4882a593Smuzhiyun * might be expected and debug log messages for the
1739*4882a593Smuzhiyun * corresponding interrupts shouldn't be written then.
1740*4882a593Smuzhiyun * Check if either of the according suppress bits is set.
1741*4882a593Smuzhiyun */
1742*4882a593Smuzhiyun sense = dasd_get_sense(irb);
1743*4882a593Smuzhiyun if (sense) {
1744*4882a593Smuzhiyun fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) &&
1745*4882a593Smuzhiyun test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
1746*4882a593Smuzhiyun nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) &&
1747*4882a593Smuzhiyun test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
1748*4882a593Smuzhiyun
1749*4882a593Smuzhiyun /*
1750*4882a593Smuzhiyun * Extent pool probably out-of-space.
1751*4882a593Smuzhiyun * Stop device and check exhaust level.
1752*4882a593Smuzhiyun */
1753*4882a593Smuzhiyun if (dasd_ese_oos_cond(sense)) {
1754*4882a593Smuzhiyun dasd_generic_space_exhaust(device, cqr);
1755*4882a593Smuzhiyun device->discipline->ext_pool_exhaust(device, cqr);
1756*4882a593Smuzhiyun dasd_put_device(device);
1757*4882a593Smuzhiyun return;
1758*4882a593Smuzhiyun }
1759*4882a593Smuzhiyun }
1760*4882a593Smuzhiyun if (!(fp_suppressed || nrf_suppressed))
1761*4882a593Smuzhiyun device->discipline->dump_sense_dbf(device, irb, "int");
1762*4882a593Smuzhiyun
1763*4882a593Smuzhiyun if (device->features & DASD_FEATURE_ERPLOG)
1764*4882a593Smuzhiyun device->discipline->dump_sense(device, cqr, irb);
1765*4882a593Smuzhiyun device->discipline->check_for_device_change(device, cqr, irb);
1766*4882a593Smuzhiyun dasd_put_device(device);
1767*4882a593Smuzhiyun }
1768*4882a593Smuzhiyun
1769*4882a593Smuzhiyun /* check for for attention message */
1770*4882a593Smuzhiyun if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
1771*4882a593Smuzhiyun device = dasd_device_from_cdev_locked(cdev);
1772*4882a593Smuzhiyun if (!IS_ERR(device)) {
1773*4882a593Smuzhiyun device->discipline->check_attention(device,
1774*4882a593Smuzhiyun irb->esw.esw1.lpum);
1775*4882a593Smuzhiyun dasd_put_device(device);
1776*4882a593Smuzhiyun }
1777*4882a593Smuzhiyun }
1778*4882a593Smuzhiyun
1779*4882a593Smuzhiyun if (!cqr)
1780*4882a593Smuzhiyun return;
1781*4882a593Smuzhiyun
1782*4882a593Smuzhiyun device = (struct dasd_device *) cqr->startdev;
1783*4882a593Smuzhiyun if (!device ||
1784*4882a593Smuzhiyun strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1785*4882a593Smuzhiyun DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1786*4882a593Smuzhiyun "invalid device in request");
1787*4882a593Smuzhiyun return;
1788*4882a593Smuzhiyun }
1789*4882a593Smuzhiyun
1790*4882a593Smuzhiyun if (dasd_ese_needs_format(cqr->block, irb)) {
1791*4882a593Smuzhiyun req = dasd_get_callback_data(cqr);
1792*4882a593Smuzhiyun if (!req) {
1793*4882a593Smuzhiyun cqr->status = DASD_CQR_ERROR;
1794*4882a593Smuzhiyun return;
1795*4882a593Smuzhiyun }
1796*4882a593Smuzhiyun if (rq_data_dir(req) == READ) {
1797*4882a593Smuzhiyun device->discipline->ese_read(cqr, irb);
1798*4882a593Smuzhiyun cqr->status = DASD_CQR_SUCCESS;
1799*4882a593Smuzhiyun cqr->stopclk = now;
1800*4882a593Smuzhiyun dasd_device_clear_timer(device);
1801*4882a593Smuzhiyun dasd_schedule_device_bh(device);
1802*4882a593Smuzhiyun return;
1803*4882a593Smuzhiyun }
1804*4882a593Smuzhiyun fcqr = device->discipline->ese_format(device, cqr, irb);
1805*4882a593Smuzhiyun if (IS_ERR(fcqr)) {
1806*4882a593Smuzhiyun if (PTR_ERR(fcqr) == -EINVAL) {
1807*4882a593Smuzhiyun cqr->status = DASD_CQR_ERROR;
1808*4882a593Smuzhiyun return;
1809*4882a593Smuzhiyun }
1810*4882a593Smuzhiyun /*
1811*4882a593Smuzhiyun * If we can't format now, let the request go
1812*4882a593Smuzhiyun * one extra round. Maybe we can format later.
1813*4882a593Smuzhiyun */
1814*4882a593Smuzhiyun cqr->status = DASD_CQR_QUEUED;
1815*4882a593Smuzhiyun dasd_schedule_device_bh(device);
1816*4882a593Smuzhiyun return;
1817*4882a593Smuzhiyun } else {
1818*4882a593Smuzhiyun fcqr->status = DASD_CQR_QUEUED;
1819*4882a593Smuzhiyun cqr->status = DASD_CQR_QUEUED;
1820*4882a593Smuzhiyun list_add(&fcqr->devlist, &device->ccw_queue);
1821*4882a593Smuzhiyun dasd_schedule_device_bh(device);
1822*4882a593Smuzhiyun return;
1823*4882a593Smuzhiyun }
1824*4882a593Smuzhiyun }
1825*4882a593Smuzhiyun
1826*4882a593Smuzhiyun /* Check for clear pending */
1827*4882a593Smuzhiyun if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1828*4882a593Smuzhiyun scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
1829*4882a593Smuzhiyun cqr->status = DASD_CQR_CLEARED;
1830*4882a593Smuzhiyun dasd_device_clear_timer(device);
1831*4882a593Smuzhiyun wake_up(&dasd_flush_wq);
1832*4882a593Smuzhiyun dasd_schedule_device_bh(device);
1833*4882a593Smuzhiyun return;
1834*4882a593Smuzhiyun }
1835*4882a593Smuzhiyun
1836*4882a593Smuzhiyun /* check status - the request might have been killed by dyn detach */
1837*4882a593Smuzhiyun if (cqr->status != DASD_CQR_IN_IO) {
1838*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
1839*4882a593Smuzhiyun "status %02x", dev_name(&cdev->dev), cqr->status);
1840*4882a593Smuzhiyun return;
1841*4882a593Smuzhiyun }
1842*4882a593Smuzhiyun
1843*4882a593Smuzhiyun next = NULL;
1844*4882a593Smuzhiyun expires = 0;
1845*4882a593Smuzhiyun if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1846*4882a593Smuzhiyun scsw_cstat(&irb->scsw) == 0) {
1847*4882a593Smuzhiyun /* request was completed successfully */
1848*4882a593Smuzhiyun cqr->status = DASD_CQR_SUCCESS;
1849*4882a593Smuzhiyun cqr->stopclk = now;
1850*4882a593Smuzhiyun /* Start first request on queue if possible -> fast_io. */
1851*4882a593Smuzhiyun if (cqr->devlist.next != &device->ccw_queue) {
1852*4882a593Smuzhiyun next = list_entry(cqr->devlist.next,
1853*4882a593Smuzhiyun struct dasd_ccw_req, devlist);
1854*4882a593Smuzhiyun }
1855*4882a593Smuzhiyun } else { /* error */
1856*4882a593Smuzhiyun /* check for HPF error
1857*4882a593Smuzhiyun * call discipline function to requeue all requests
1858*4882a593Smuzhiyun * and disable HPF accordingly
1859*4882a593Smuzhiyun */
1860*4882a593Smuzhiyun if (cqr->cpmode && dasd_check_hpf_error(irb) &&
1861*4882a593Smuzhiyun device->discipline->handle_hpf_error)
1862*4882a593Smuzhiyun device->discipline->handle_hpf_error(device, irb);
1863*4882a593Smuzhiyun /*
1864*4882a593Smuzhiyun * If we don't want complex ERP for this request, then just
1865*4882a593Smuzhiyun * reset this and retry it in the fastpath
1866*4882a593Smuzhiyun */
1867*4882a593Smuzhiyun if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
1868*4882a593Smuzhiyun cqr->retries > 0) {
1869*4882a593Smuzhiyun if (cqr->lpm == dasd_path_get_opm(device))
1870*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_DEBUG, device,
1871*4882a593Smuzhiyun "default ERP in fastpath "
1872*4882a593Smuzhiyun "(%i retries left)",
1873*4882a593Smuzhiyun cqr->retries);
1874*4882a593Smuzhiyun if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
1875*4882a593Smuzhiyun cqr->lpm = dasd_path_get_opm(device);
1876*4882a593Smuzhiyun cqr->status = DASD_CQR_QUEUED;
1877*4882a593Smuzhiyun next = cqr;
1878*4882a593Smuzhiyun } else
1879*4882a593Smuzhiyun cqr->status = DASD_CQR_ERROR;
1880*4882a593Smuzhiyun }
1881*4882a593Smuzhiyun if (next && (next->status == DASD_CQR_QUEUED) &&
1882*4882a593Smuzhiyun (!device->stopped)) {
1883*4882a593Smuzhiyun if (device->discipline->start_IO(next) == 0)
1884*4882a593Smuzhiyun expires = next->expires;
1885*4882a593Smuzhiyun }
1886*4882a593Smuzhiyun if (expires != 0)
1887*4882a593Smuzhiyun dasd_device_set_timer(device, expires);
1888*4882a593Smuzhiyun else
1889*4882a593Smuzhiyun dasd_device_clear_timer(device);
1890*4882a593Smuzhiyun dasd_schedule_device_bh(device);
1891*4882a593Smuzhiyun }
1892*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_int_handler);
1893*4882a593Smuzhiyun
dasd_generic_uc_handler(struct ccw_device * cdev,struct irb * irb)1894*4882a593Smuzhiyun enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
1895*4882a593Smuzhiyun {
1896*4882a593Smuzhiyun struct dasd_device *device;
1897*4882a593Smuzhiyun
1898*4882a593Smuzhiyun device = dasd_device_from_cdev_locked(cdev);
1899*4882a593Smuzhiyun
1900*4882a593Smuzhiyun if (IS_ERR(device))
1901*4882a593Smuzhiyun goto out;
1902*4882a593Smuzhiyun if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1903*4882a593Smuzhiyun device->state != device->target ||
1904*4882a593Smuzhiyun !device->discipline->check_for_device_change){
1905*4882a593Smuzhiyun dasd_put_device(device);
1906*4882a593Smuzhiyun goto out;
1907*4882a593Smuzhiyun }
1908*4882a593Smuzhiyun if (device->discipline->dump_sense_dbf)
1909*4882a593Smuzhiyun device->discipline->dump_sense_dbf(device, irb, "uc");
1910*4882a593Smuzhiyun device->discipline->check_for_device_change(device, NULL, irb);
1911*4882a593Smuzhiyun dasd_put_device(device);
1912*4882a593Smuzhiyun out:
1913*4882a593Smuzhiyun return UC_TODO_RETRY;
1914*4882a593Smuzhiyun }
1915*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dasd_generic_uc_handler);
1916*4882a593Smuzhiyun
1917*4882a593Smuzhiyun /*
1918*4882a593Smuzhiyun * If we have an error on a dasd_block layer request then we cancel
1919*4882a593Smuzhiyun * and return all further requests from the same dasd_block as well.
1920*4882a593Smuzhiyun */
__dasd_device_recovery(struct dasd_device * device,struct dasd_ccw_req * ref_cqr)1921*4882a593Smuzhiyun static void __dasd_device_recovery(struct dasd_device *device,
1922*4882a593Smuzhiyun struct dasd_ccw_req *ref_cqr)
1923*4882a593Smuzhiyun {
1924*4882a593Smuzhiyun struct list_head *l, *n;
1925*4882a593Smuzhiyun struct dasd_ccw_req *cqr;
1926*4882a593Smuzhiyun
1927*4882a593Smuzhiyun /*
1928*4882a593Smuzhiyun * only requeue request that came from the dasd_block layer
1929*4882a593Smuzhiyun */
1930*4882a593Smuzhiyun if (!ref_cqr->block)
1931*4882a593Smuzhiyun return;
1932*4882a593Smuzhiyun
1933*4882a593Smuzhiyun list_for_each_safe(l, n, &device->ccw_queue) {
1934*4882a593Smuzhiyun cqr = list_entry(l, struct dasd_ccw_req, devlist);
1935*4882a593Smuzhiyun if (cqr->status == DASD_CQR_QUEUED &&
1936*4882a593Smuzhiyun ref_cqr->block == cqr->block) {
1937*4882a593Smuzhiyun cqr->status = DASD_CQR_CLEARED;
1938*4882a593Smuzhiyun }
1939*4882a593Smuzhiyun }
1940*4882a593Smuzhiyun };
1941*4882a593Smuzhiyun
1942*4882a593Smuzhiyun /*
1943*4882a593Smuzhiyun * Remove those ccw requests from the queue that need to be returned
1944*4882a593Smuzhiyun * to the upper layer.
1945*4882a593Smuzhiyun */
__dasd_device_process_ccw_queue(struct dasd_device * device,struct list_head * final_queue)1946*4882a593Smuzhiyun static void __dasd_device_process_ccw_queue(struct dasd_device *device,
1947*4882a593Smuzhiyun struct list_head *final_queue)
1948*4882a593Smuzhiyun {
1949*4882a593Smuzhiyun struct list_head *l, *n;
1950*4882a593Smuzhiyun struct dasd_ccw_req *cqr;
1951*4882a593Smuzhiyun
1952*4882a593Smuzhiyun /* Process request with final status. */
1953*4882a593Smuzhiyun list_for_each_safe(l, n, &device->ccw_queue) {
1954*4882a593Smuzhiyun cqr = list_entry(l, struct dasd_ccw_req, devlist);
1955*4882a593Smuzhiyun
1956*4882a593Smuzhiyun /* Skip any non-final request. */
1957*4882a593Smuzhiyun if (cqr->status == DASD_CQR_QUEUED ||
1958*4882a593Smuzhiyun cqr->status == DASD_CQR_IN_IO ||
1959*4882a593Smuzhiyun cqr->status == DASD_CQR_CLEAR_PENDING)
1960*4882a593Smuzhiyun continue;
1961*4882a593Smuzhiyun if (cqr->status == DASD_CQR_ERROR) {
1962*4882a593Smuzhiyun __dasd_device_recovery(device, cqr);
1963*4882a593Smuzhiyun }
1964*4882a593Smuzhiyun /* Rechain finished requests to final queue */
1965*4882a593Smuzhiyun list_move_tail(&cqr->devlist, final_queue);
1966*4882a593Smuzhiyun }
1967*4882a593Smuzhiyun }
1968*4882a593Smuzhiyun
__dasd_process_cqr(struct dasd_device * device,struct dasd_ccw_req * cqr)1969*4882a593Smuzhiyun static void __dasd_process_cqr(struct dasd_device *device,
1970*4882a593Smuzhiyun struct dasd_ccw_req *cqr)
1971*4882a593Smuzhiyun {
1972*4882a593Smuzhiyun char errorstring[ERRORLENGTH];
1973*4882a593Smuzhiyun
1974*4882a593Smuzhiyun switch (cqr->status) {
1975*4882a593Smuzhiyun case DASD_CQR_SUCCESS:
1976*4882a593Smuzhiyun cqr->status = DASD_CQR_DONE;
1977*4882a593Smuzhiyun break;
1978*4882a593Smuzhiyun case DASD_CQR_ERROR:
1979*4882a593Smuzhiyun cqr->status = DASD_CQR_NEED_ERP;
1980*4882a593Smuzhiyun break;
1981*4882a593Smuzhiyun case DASD_CQR_CLEARED:
1982*4882a593Smuzhiyun cqr->status = DASD_CQR_TERMINATED;
1983*4882a593Smuzhiyun break;
1984*4882a593Smuzhiyun default:
1985*4882a593Smuzhiyun /* internal error 12 - wrong cqr status*/
1986*4882a593Smuzhiyun snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
1987*4882a593Smuzhiyun dev_err(&device->cdev->dev,
1988*4882a593Smuzhiyun "An error occurred in the DASD device driver, "
1989*4882a593Smuzhiyun "reason=%s\n", errorstring);
1990*4882a593Smuzhiyun BUG();
1991*4882a593Smuzhiyun }
1992*4882a593Smuzhiyun if (cqr->callback)
1993*4882a593Smuzhiyun cqr->callback(cqr, cqr->callback_data);
1994*4882a593Smuzhiyun }
1995*4882a593Smuzhiyun
1996*4882a593Smuzhiyun /*
1997*4882a593Smuzhiyun * the cqrs from the final queue are returned to the upper layer
1998*4882a593Smuzhiyun * by setting a dasd_block state and calling the callback function
1999*4882a593Smuzhiyun */
__dasd_device_process_final_queue(struct dasd_device * device,struct list_head * final_queue)2000*4882a593Smuzhiyun static void __dasd_device_process_final_queue(struct dasd_device *device,
2001*4882a593Smuzhiyun struct list_head *final_queue)
2002*4882a593Smuzhiyun {
2003*4882a593Smuzhiyun struct list_head *l, *n;
2004*4882a593Smuzhiyun struct dasd_ccw_req *cqr;
2005*4882a593Smuzhiyun struct dasd_block *block;
2006*4882a593Smuzhiyun
2007*4882a593Smuzhiyun list_for_each_safe(l, n, final_queue) {
2008*4882a593Smuzhiyun cqr = list_entry(l, struct dasd_ccw_req, devlist);
2009*4882a593Smuzhiyun list_del_init(&cqr->devlist);
2010*4882a593Smuzhiyun block = cqr->block;
2011*4882a593Smuzhiyun if (!block) {
2012*4882a593Smuzhiyun __dasd_process_cqr(device, cqr);
2013*4882a593Smuzhiyun } else {
2014*4882a593Smuzhiyun spin_lock_bh(&block->queue_lock);
2015*4882a593Smuzhiyun __dasd_process_cqr(device, cqr);
2016*4882a593Smuzhiyun spin_unlock_bh(&block->queue_lock);
2017*4882a593Smuzhiyun }
2018*4882a593Smuzhiyun }
2019*4882a593Smuzhiyun }
2020*4882a593Smuzhiyun
2021*4882a593Smuzhiyun /*
2022*4882a593Smuzhiyun * Take a look at the first request on the ccw queue and check
2023*4882a593Smuzhiyun * if it reached its expire time. If so, terminate the IO.
2024*4882a593Smuzhiyun */
__dasd_device_check_expire(struct dasd_device * device)2025*4882a593Smuzhiyun static void __dasd_device_check_expire(struct dasd_device *device)
2026*4882a593Smuzhiyun {
2027*4882a593Smuzhiyun struct dasd_ccw_req *cqr;
2028*4882a593Smuzhiyun
2029*4882a593Smuzhiyun if (list_empty(&device->ccw_queue))
2030*4882a593Smuzhiyun return;
2031*4882a593Smuzhiyun cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
2032*4882a593Smuzhiyun if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
2033*4882a593Smuzhiyun (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
2034*4882a593Smuzhiyun if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
2035*4882a593Smuzhiyun /*
2036*4882a593Smuzhiyun * IO in safe offline processing should not
2037*4882a593Smuzhiyun * run out of retries
2038*4882a593Smuzhiyun */
2039*4882a593Smuzhiyun cqr->retries++;
2040*4882a593Smuzhiyun }
2041*4882a593Smuzhiyun if (device->discipline->term_IO(cqr) != 0) {
2042*4882a593Smuzhiyun /* Hmpf, try again in 5 sec */
2043*4882a593Smuzhiyun dev_err(&device->cdev->dev,
2044*4882a593Smuzhiyun "cqr %p timed out (%lus) but cannot be "
2045*4882a593Smuzhiyun "ended, retrying in 5 s\n",
2046*4882a593Smuzhiyun cqr, (cqr->expires/HZ));
2047*4882a593Smuzhiyun cqr->expires += 5*HZ;
2048*4882a593Smuzhiyun dasd_device_set_timer(device, 5*HZ);
2049*4882a593Smuzhiyun } else {
2050*4882a593Smuzhiyun dev_err(&device->cdev->dev,
2051*4882a593Smuzhiyun "cqr %p timed out (%lus), %i retries "
2052*4882a593Smuzhiyun "remaining\n", cqr, (cqr->expires/HZ),
2053*4882a593Smuzhiyun cqr->retries);
2054*4882a593Smuzhiyun }
2055*4882a593Smuzhiyun }
2056*4882a593Smuzhiyun }
2057*4882a593Smuzhiyun
2058*4882a593Smuzhiyun /*
2059*4882a593Smuzhiyun * return 1 when device is not eligible for IO
2060*4882a593Smuzhiyun */
__dasd_device_is_unusable(struct dasd_device * device,struct dasd_ccw_req * cqr)2061*4882a593Smuzhiyun static int __dasd_device_is_unusable(struct dasd_device *device,
2062*4882a593Smuzhiyun struct dasd_ccw_req *cqr)
2063*4882a593Smuzhiyun {
2064*4882a593Smuzhiyun int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM | DASD_STOPPED_NOSPC);
2065*4882a593Smuzhiyun
2066*4882a593Smuzhiyun if (test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
2067*4882a593Smuzhiyun !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
2068*4882a593Smuzhiyun /*
2069*4882a593Smuzhiyun * dasd is being set offline
2070*4882a593Smuzhiyun * but it is no safe offline where we have to allow I/O
2071*4882a593Smuzhiyun */
2072*4882a593Smuzhiyun return 1;
2073*4882a593Smuzhiyun }
2074*4882a593Smuzhiyun if (device->stopped) {
2075*4882a593Smuzhiyun if (device->stopped & mask) {
2076*4882a593Smuzhiyun /* stopped and CQR will not change that. */
2077*4882a593Smuzhiyun return 1;
2078*4882a593Smuzhiyun }
2079*4882a593Smuzhiyun if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
2080*4882a593Smuzhiyun /* CQR is not able to change device to
2081*4882a593Smuzhiyun * operational. */
2082*4882a593Smuzhiyun return 1;
2083*4882a593Smuzhiyun }
2084*4882a593Smuzhiyun /* CQR required to get device operational. */
2085*4882a593Smuzhiyun }
2086*4882a593Smuzhiyun return 0;
2087*4882a593Smuzhiyun }
2088*4882a593Smuzhiyun
2089*4882a593Smuzhiyun /*
2090*4882a593Smuzhiyun * Take a look at the first request on the ccw queue and check
2091*4882a593Smuzhiyun * if it needs to be started.
2092*4882a593Smuzhiyun */
__dasd_device_start_head(struct dasd_device * device)2093*4882a593Smuzhiyun static void __dasd_device_start_head(struct dasd_device *device)
2094*4882a593Smuzhiyun {
2095*4882a593Smuzhiyun struct dasd_ccw_req *cqr;
2096*4882a593Smuzhiyun int rc;
2097*4882a593Smuzhiyun
2098*4882a593Smuzhiyun if (list_empty(&device->ccw_queue))
2099*4882a593Smuzhiyun return;
2100*4882a593Smuzhiyun cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
2101*4882a593Smuzhiyun if (cqr->status != DASD_CQR_QUEUED)
2102*4882a593Smuzhiyun return;
2103*4882a593Smuzhiyun /* if device is not usable return request to upper layer */
2104*4882a593Smuzhiyun if (__dasd_device_is_unusable(device, cqr)) {
2105*4882a593Smuzhiyun cqr->intrc = -EAGAIN;
2106*4882a593Smuzhiyun cqr->status = DASD_CQR_CLEARED;
2107*4882a593Smuzhiyun dasd_schedule_device_bh(device);
2108*4882a593Smuzhiyun return;
2109*4882a593Smuzhiyun }
2110*4882a593Smuzhiyun
2111*4882a593Smuzhiyun rc = device->discipline->start_IO(cqr);
2112*4882a593Smuzhiyun if (rc == 0)
2113*4882a593Smuzhiyun dasd_device_set_timer(device, cqr->expires);
2114*4882a593Smuzhiyun else if (rc == -EACCES) {
2115*4882a593Smuzhiyun dasd_schedule_device_bh(device);
2116*4882a593Smuzhiyun } else
2117*4882a593Smuzhiyun /* Hmpf, try again in 1/2 sec */
2118*4882a593Smuzhiyun dasd_device_set_timer(device, 50);
2119*4882a593Smuzhiyun }
2120*4882a593Smuzhiyun
__dasd_device_check_path_events(struct dasd_device * device)2121*4882a593Smuzhiyun static void __dasd_device_check_path_events(struct dasd_device *device)
2122*4882a593Smuzhiyun {
2123*4882a593Smuzhiyun int rc;
2124*4882a593Smuzhiyun
2125*4882a593Smuzhiyun if (!dasd_path_get_tbvpm(device))
2126*4882a593Smuzhiyun return;
2127*4882a593Smuzhiyun
2128*4882a593Smuzhiyun if (device->stopped &
2129*4882a593Smuzhiyun ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
2130*4882a593Smuzhiyun return;
2131*4882a593Smuzhiyun rc = device->discipline->verify_path(device,
2132*4882a593Smuzhiyun dasd_path_get_tbvpm(device));
2133*4882a593Smuzhiyun if (rc)
2134*4882a593Smuzhiyun dasd_device_set_timer(device, 50);
2135*4882a593Smuzhiyun else
2136*4882a593Smuzhiyun dasd_path_clear_all_verify(device);
2137*4882a593Smuzhiyun };
2138*4882a593Smuzhiyun
2139*4882a593Smuzhiyun /*
2140*4882a593Smuzhiyun * Go through all request on the dasd_device request queue,
2141*4882a593Smuzhiyun * terminate them on the cdev if necessary, and return them to the
2142*4882a593Smuzhiyun * submitting layer via callback.
2143*4882a593Smuzhiyun * Note:
2144*4882a593Smuzhiyun * Make sure that all 'submitting layers' still exist when
2145*4882a593Smuzhiyun * this function is called!. In other words, when 'device' is a base
2146*4882a593Smuzhiyun * device then all block layer requests must have been removed before
2147*4882a593Smuzhiyun * via dasd_flush_block_queue.
2148*4882a593Smuzhiyun */
dasd_flush_device_queue(struct dasd_device * device)2149*4882a593Smuzhiyun int dasd_flush_device_queue(struct dasd_device *device)
2150*4882a593Smuzhiyun {
2151*4882a593Smuzhiyun struct dasd_ccw_req *cqr, *n;
2152*4882a593Smuzhiyun int rc;
2153*4882a593Smuzhiyun struct list_head flush_queue;
2154*4882a593Smuzhiyun
2155*4882a593Smuzhiyun INIT_LIST_HEAD(&flush_queue);
2156*4882a593Smuzhiyun spin_lock_irq(get_ccwdev_lock(device->cdev));
2157*4882a593Smuzhiyun rc = 0;
2158*4882a593Smuzhiyun list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
2159*4882a593Smuzhiyun /* Check status and move request to flush_queue */
2160*4882a593Smuzhiyun switch (cqr->status) {
2161*4882a593Smuzhiyun case DASD_CQR_IN_IO:
2162*4882a593Smuzhiyun rc = device->discipline->term_IO(cqr);
2163*4882a593Smuzhiyun if (rc) {
2164*4882a593Smuzhiyun /* unable to terminate requeust */
2165*4882a593Smuzhiyun dev_err(&device->cdev->dev,
2166*4882a593Smuzhiyun "Flushing the DASD request queue "
2167*4882a593Smuzhiyun "failed for request %p\n", cqr);
2168*4882a593Smuzhiyun /* stop flush processing */
2169*4882a593Smuzhiyun goto finished;
2170*4882a593Smuzhiyun }
2171*4882a593Smuzhiyun break;
2172*4882a593Smuzhiyun case DASD_CQR_QUEUED:
2173*4882a593Smuzhiyun cqr->stopclk = get_tod_clock();
2174*4882a593Smuzhiyun cqr->status = DASD_CQR_CLEARED;
2175*4882a593Smuzhiyun break;
2176*4882a593Smuzhiyun default: /* no need to modify the others */
2177*4882a593Smuzhiyun break;
2178*4882a593Smuzhiyun }
2179*4882a593Smuzhiyun list_move_tail(&cqr->devlist, &flush_queue);
2180*4882a593Smuzhiyun }
2181*4882a593Smuzhiyun finished:
2182*4882a593Smuzhiyun spin_unlock_irq(get_ccwdev_lock(device->cdev));
2183*4882a593Smuzhiyun /*
2184*4882a593Smuzhiyun * After this point all requests must be in state CLEAR_PENDING,
2185*4882a593Smuzhiyun * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
2186*4882a593Smuzhiyun * one of the others.
2187*4882a593Smuzhiyun */
2188*4882a593Smuzhiyun list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
2189*4882a593Smuzhiyun wait_event(dasd_flush_wq,
2190*4882a593Smuzhiyun (cqr->status != DASD_CQR_CLEAR_PENDING));
2191*4882a593Smuzhiyun /*
2192*4882a593Smuzhiyun * Now set each request back to TERMINATED, DONE or NEED_ERP
2193*4882a593Smuzhiyun * and call the callback function of flushed requests
2194*4882a593Smuzhiyun */
2195*4882a593Smuzhiyun __dasd_device_process_final_queue(device, &flush_queue);
2196*4882a593Smuzhiyun return rc;
2197*4882a593Smuzhiyun }
2198*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
2199*4882a593Smuzhiyun
2200*4882a593Smuzhiyun /*
2201*4882a593Smuzhiyun * Acquire the device lock and process queues for the device.
2202*4882a593Smuzhiyun */
dasd_device_tasklet(unsigned long data)2203*4882a593Smuzhiyun static void dasd_device_tasklet(unsigned long data)
2204*4882a593Smuzhiyun {
2205*4882a593Smuzhiyun struct dasd_device *device = (struct dasd_device *) data;
2206*4882a593Smuzhiyun struct list_head final_queue;
2207*4882a593Smuzhiyun
2208*4882a593Smuzhiyun atomic_set (&device->tasklet_scheduled, 0);
2209*4882a593Smuzhiyun INIT_LIST_HEAD(&final_queue);
2210*4882a593Smuzhiyun spin_lock_irq(get_ccwdev_lock(device->cdev));
2211*4882a593Smuzhiyun /* Check expire time of first request on the ccw queue. */
2212*4882a593Smuzhiyun __dasd_device_check_expire(device);
2213*4882a593Smuzhiyun /* find final requests on ccw queue */
2214*4882a593Smuzhiyun __dasd_device_process_ccw_queue(device, &final_queue);
2215*4882a593Smuzhiyun __dasd_device_check_path_events(device);
2216*4882a593Smuzhiyun spin_unlock_irq(get_ccwdev_lock(device->cdev));
2217*4882a593Smuzhiyun /* Now call the callback function of requests with final status */
2218*4882a593Smuzhiyun __dasd_device_process_final_queue(device, &final_queue);
2219*4882a593Smuzhiyun spin_lock_irq(get_ccwdev_lock(device->cdev));
2220*4882a593Smuzhiyun /* Now check if the head of the ccw queue needs to be started. */
2221*4882a593Smuzhiyun __dasd_device_start_head(device);
2222*4882a593Smuzhiyun spin_unlock_irq(get_ccwdev_lock(device->cdev));
2223*4882a593Smuzhiyun if (waitqueue_active(&shutdown_waitq))
2224*4882a593Smuzhiyun wake_up(&shutdown_waitq);
2225*4882a593Smuzhiyun dasd_put_device(device);
2226*4882a593Smuzhiyun }
2227*4882a593Smuzhiyun
2228*4882a593Smuzhiyun /*
2229*4882a593Smuzhiyun * Schedules a call to dasd_tasklet over the device tasklet.
2230*4882a593Smuzhiyun */
dasd_schedule_device_bh(struct dasd_device * device)2231*4882a593Smuzhiyun void dasd_schedule_device_bh(struct dasd_device *device)
2232*4882a593Smuzhiyun {
2233*4882a593Smuzhiyun /* Protect against rescheduling. */
2234*4882a593Smuzhiyun if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
2235*4882a593Smuzhiyun return;
2236*4882a593Smuzhiyun dasd_get_device(device);
2237*4882a593Smuzhiyun tasklet_hi_schedule(&device->tasklet);
2238*4882a593Smuzhiyun }
2239*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_schedule_device_bh);
2240*4882a593Smuzhiyun
dasd_device_set_stop_bits(struct dasd_device * device,int bits)2241*4882a593Smuzhiyun void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
2242*4882a593Smuzhiyun {
2243*4882a593Smuzhiyun device->stopped |= bits;
2244*4882a593Smuzhiyun }
2245*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
2246*4882a593Smuzhiyun
dasd_device_remove_stop_bits(struct dasd_device * device,int bits)2247*4882a593Smuzhiyun void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
2248*4882a593Smuzhiyun {
2249*4882a593Smuzhiyun device->stopped &= ~bits;
2250*4882a593Smuzhiyun if (!device->stopped)
2251*4882a593Smuzhiyun wake_up(&generic_waitq);
2252*4882a593Smuzhiyun }
2253*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
2254*4882a593Smuzhiyun
2255*4882a593Smuzhiyun /*
2256*4882a593Smuzhiyun * Queue a request to the head of the device ccw_queue.
2257*4882a593Smuzhiyun * Start the I/O if possible.
2258*4882a593Smuzhiyun */
dasd_add_request_head(struct dasd_ccw_req * cqr)2259*4882a593Smuzhiyun void dasd_add_request_head(struct dasd_ccw_req *cqr)
2260*4882a593Smuzhiyun {
2261*4882a593Smuzhiyun struct dasd_device *device;
2262*4882a593Smuzhiyun unsigned long flags;
2263*4882a593Smuzhiyun
2264*4882a593Smuzhiyun device = cqr->startdev;
2265*4882a593Smuzhiyun spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2266*4882a593Smuzhiyun cqr->status = DASD_CQR_QUEUED;
2267*4882a593Smuzhiyun list_add(&cqr->devlist, &device->ccw_queue);
2268*4882a593Smuzhiyun /* let the bh start the request to keep them in order */
2269*4882a593Smuzhiyun dasd_schedule_device_bh(device);
2270*4882a593Smuzhiyun spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2271*4882a593Smuzhiyun }
2272*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_add_request_head);
2273*4882a593Smuzhiyun
2274*4882a593Smuzhiyun /*
2275*4882a593Smuzhiyun * Queue a request to the tail of the device ccw_queue.
2276*4882a593Smuzhiyun * Start the I/O if possible.
2277*4882a593Smuzhiyun */
dasd_add_request_tail(struct dasd_ccw_req * cqr)2278*4882a593Smuzhiyun void dasd_add_request_tail(struct dasd_ccw_req *cqr)
2279*4882a593Smuzhiyun {
2280*4882a593Smuzhiyun struct dasd_device *device;
2281*4882a593Smuzhiyun unsigned long flags;
2282*4882a593Smuzhiyun
2283*4882a593Smuzhiyun device = cqr->startdev;
2284*4882a593Smuzhiyun spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2285*4882a593Smuzhiyun cqr->status = DASD_CQR_QUEUED;
2286*4882a593Smuzhiyun list_add_tail(&cqr->devlist, &device->ccw_queue);
2287*4882a593Smuzhiyun /* let the bh start the request to keep them in order */
2288*4882a593Smuzhiyun dasd_schedule_device_bh(device);
2289*4882a593Smuzhiyun spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2290*4882a593Smuzhiyun }
2291*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_add_request_tail);
2292*4882a593Smuzhiyun
2293*4882a593Smuzhiyun /*
2294*4882a593Smuzhiyun * Wakeup helper for the 'sleep_on' functions.
2295*4882a593Smuzhiyun */
dasd_wakeup_cb(struct dasd_ccw_req * cqr,void * data)2296*4882a593Smuzhiyun void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
2297*4882a593Smuzhiyun {
2298*4882a593Smuzhiyun spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
2299*4882a593Smuzhiyun cqr->callback_data = DASD_SLEEPON_END_TAG;
2300*4882a593Smuzhiyun spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
2301*4882a593Smuzhiyun wake_up(&generic_waitq);
2302*4882a593Smuzhiyun }
2303*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dasd_wakeup_cb);
2304*4882a593Smuzhiyun
_wait_for_wakeup(struct dasd_ccw_req * cqr)2305*4882a593Smuzhiyun static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
2306*4882a593Smuzhiyun {
2307*4882a593Smuzhiyun struct dasd_device *device;
2308*4882a593Smuzhiyun int rc;
2309*4882a593Smuzhiyun
2310*4882a593Smuzhiyun device = cqr->startdev;
2311*4882a593Smuzhiyun spin_lock_irq(get_ccwdev_lock(device->cdev));
2312*4882a593Smuzhiyun rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
2313*4882a593Smuzhiyun spin_unlock_irq(get_ccwdev_lock(device->cdev));
2314*4882a593Smuzhiyun return rc;
2315*4882a593Smuzhiyun }
2316*4882a593Smuzhiyun
2317*4882a593Smuzhiyun /*
2318*4882a593Smuzhiyun * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
2319*4882a593Smuzhiyun */
__dasd_sleep_on_erp(struct dasd_ccw_req * cqr)2320*4882a593Smuzhiyun static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
2321*4882a593Smuzhiyun {
2322*4882a593Smuzhiyun struct dasd_device *device;
2323*4882a593Smuzhiyun dasd_erp_fn_t erp_fn;
2324*4882a593Smuzhiyun
2325*4882a593Smuzhiyun if (cqr->status == DASD_CQR_FILLED)
2326*4882a593Smuzhiyun return 0;
2327*4882a593Smuzhiyun device = cqr->startdev;
2328*4882a593Smuzhiyun if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
2329*4882a593Smuzhiyun if (cqr->status == DASD_CQR_TERMINATED) {
2330*4882a593Smuzhiyun device->discipline->handle_terminated_request(cqr);
2331*4882a593Smuzhiyun return 1;
2332*4882a593Smuzhiyun }
2333*4882a593Smuzhiyun if (cqr->status == DASD_CQR_NEED_ERP) {
2334*4882a593Smuzhiyun erp_fn = device->discipline->erp_action(cqr);
2335*4882a593Smuzhiyun erp_fn(cqr);
2336*4882a593Smuzhiyun return 1;
2337*4882a593Smuzhiyun }
2338*4882a593Smuzhiyun if (cqr->status == DASD_CQR_FAILED)
2339*4882a593Smuzhiyun dasd_log_sense(cqr, &cqr->irb);
2340*4882a593Smuzhiyun if (cqr->refers) {
2341*4882a593Smuzhiyun __dasd_process_erp(device, cqr);
2342*4882a593Smuzhiyun return 1;
2343*4882a593Smuzhiyun }
2344*4882a593Smuzhiyun }
2345*4882a593Smuzhiyun return 0;
2346*4882a593Smuzhiyun }
2347*4882a593Smuzhiyun
__dasd_sleep_on_loop_condition(struct dasd_ccw_req * cqr)2348*4882a593Smuzhiyun static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
2349*4882a593Smuzhiyun {
2350*4882a593Smuzhiyun if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
2351*4882a593Smuzhiyun if (cqr->refers) /* erp is not done yet */
2352*4882a593Smuzhiyun return 1;
2353*4882a593Smuzhiyun return ((cqr->status != DASD_CQR_DONE) &&
2354*4882a593Smuzhiyun (cqr->status != DASD_CQR_FAILED));
2355*4882a593Smuzhiyun } else
2356*4882a593Smuzhiyun return (cqr->status == DASD_CQR_FILLED);
2357*4882a593Smuzhiyun }
2358*4882a593Smuzhiyun
_dasd_sleep_on(struct dasd_ccw_req * maincqr,int interruptible)2359*4882a593Smuzhiyun static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
2360*4882a593Smuzhiyun {
2361*4882a593Smuzhiyun struct dasd_device *device;
2362*4882a593Smuzhiyun int rc;
2363*4882a593Smuzhiyun struct list_head ccw_queue;
2364*4882a593Smuzhiyun struct dasd_ccw_req *cqr;
2365*4882a593Smuzhiyun
2366*4882a593Smuzhiyun INIT_LIST_HEAD(&ccw_queue);
2367*4882a593Smuzhiyun maincqr->status = DASD_CQR_FILLED;
2368*4882a593Smuzhiyun device = maincqr->startdev;
2369*4882a593Smuzhiyun list_add(&maincqr->blocklist, &ccw_queue);
2370*4882a593Smuzhiyun for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr);
2371*4882a593Smuzhiyun cqr = list_first_entry(&ccw_queue,
2372*4882a593Smuzhiyun struct dasd_ccw_req, blocklist)) {
2373*4882a593Smuzhiyun
2374*4882a593Smuzhiyun if (__dasd_sleep_on_erp(cqr))
2375*4882a593Smuzhiyun continue;
2376*4882a593Smuzhiyun if (cqr->status != DASD_CQR_FILLED) /* could be failed */
2377*4882a593Smuzhiyun continue;
2378*4882a593Smuzhiyun if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2379*4882a593Smuzhiyun !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2380*4882a593Smuzhiyun cqr->status = DASD_CQR_FAILED;
2381*4882a593Smuzhiyun cqr->intrc = -EPERM;
2382*4882a593Smuzhiyun continue;
2383*4882a593Smuzhiyun }
2384*4882a593Smuzhiyun /* Non-temporary stop condition will trigger fail fast */
2385*4882a593Smuzhiyun if (device->stopped & ~DASD_STOPPED_PENDING &&
2386*4882a593Smuzhiyun test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2387*4882a593Smuzhiyun (!dasd_eer_enabled(device))) {
2388*4882a593Smuzhiyun cqr->status = DASD_CQR_FAILED;
2389*4882a593Smuzhiyun cqr->intrc = -ENOLINK;
2390*4882a593Smuzhiyun continue;
2391*4882a593Smuzhiyun }
2392*4882a593Smuzhiyun /*
2393*4882a593Smuzhiyun * Don't try to start requests if device is in
2394*4882a593Smuzhiyun * offline processing, it might wait forever
2395*4882a593Smuzhiyun */
2396*4882a593Smuzhiyun if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2397*4882a593Smuzhiyun cqr->status = DASD_CQR_FAILED;
2398*4882a593Smuzhiyun cqr->intrc = -ENODEV;
2399*4882a593Smuzhiyun continue;
2400*4882a593Smuzhiyun }
2401*4882a593Smuzhiyun /*
2402*4882a593Smuzhiyun * Don't try to start requests if device is stopped
2403*4882a593Smuzhiyun * except path verification requests
2404*4882a593Smuzhiyun */
2405*4882a593Smuzhiyun if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
2406*4882a593Smuzhiyun if (interruptible) {
2407*4882a593Smuzhiyun rc = wait_event_interruptible(
2408*4882a593Smuzhiyun generic_waitq, !(device->stopped));
2409*4882a593Smuzhiyun if (rc == -ERESTARTSYS) {
2410*4882a593Smuzhiyun cqr->status = DASD_CQR_FAILED;
2411*4882a593Smuzhiyun maincqr->intrc = rc;
2412*4882a593Smuzhiyun continue;
2413*4882a593Smuzhiyun }
2414*4882a593Smuzhiyun } else
2415*4882a593Smuzhiyun wait_event(generic_waitq, !(device->stopped));
2416*4882a593Smuzhiyun }
2417*4882a593Smuzhiyun if (!cqr->callback)
2418*4882a593Smuzhiyun cqr->callback = dasd_wakeup_cb;
2419*4882a593Smuzhiyun
2420*4882a593Smuzhiyun cqr->callback_data = DASD_SLEEPON_START_TAG;
2421*4882a593Smuzhiyun dasd_add_request_tail(cqr);
2422*4882a593Smuzhiyun if (interruptible) {
2423*4882a593Smuzhiyun rc = wait_event_interruptible(
2424*4882a593Smuzhiyun generic_waitq, _wait_for_wakeup(cqr));
2425*4882a593Smuzhiyun if (rc == -ERESTARTSYS) {
2426*4882a593Smuzhiyun dasd_cancel_req(cqr);
2427*4882a593Smuzhiyun /* wait (non-interruptible) for final status */
2428*4882a593Smuzhiyun wait_event(generic_waitq,
2429*4882a593Smuzhiyun _wait_for_wakeup(cqr));
2430*4882a593Smuzhiyun cqr->status = DASD_CQR_FAILED;
2431*4882a593Smuzhiyun maincqr->intrc = rc;
2432*4882a593Smuzhiyun continue;
2433*4882a593Smuzhiyun }
2434*4882a593Smuzhiyun } else
2435*4882a593Smuzhiyun wait_event(generic_waitq, _wait_for_wakeup(cqr));
2436*4882a593Smuzhiyun }
2437*4882a593Smuzhiyun
2438*4882a593Smuzhiyun maincqr->endclk = get_tod_clock();
2439*4882a593Smuzhiyun if ((maincqr->status != DASD_CQR_DONE) &&
2440*4882a593Smuzhiyun (maincqr->intrc != -ERESTARTSYS))
2441*4882a593Smuzhiyun dasd_log_sense(maincqr, &maincqr->irb);
2442*4882a593Smuzhiyun if (maincqr->status == DASD_CQR_DONE)
2443*4882a593Smuzhiyun rc = 0;
2444*4882a593Smuzhiyun else if (maincqr->intrc)
2445*4882a593Smuzhiyun rc = maincqr->intrc;
2446*4882a593Smuzhiyun else
2447*4882a593Smuzhiyun rc = -EIO;
2448*4882a593Smuzhiyun return rc;
2449*4882a593Smuzhiyun }
2450*4882a593Smuzhiyun
_wait_for_wakeup_queue(struct list_head * ccw_queue)2451*4882a593Smuzhiyun static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue)
2452*4882a593Smuzhiyun {
2453*4882a593Smuzhiyun struct dasd_ccw_req *cqr;
2454*4882a593Smuzhiyun
2455*4882a593Smuzhiyun list_for_each_entry(cqr, ccw_queue, blocklist) {
2456*4882a593Smuzhiyun if (cqr->callback_data != DASD_SLEEPON_END_TAG)
2457*4882a593Smuzhiyun return 0;
2458*4882a593Smuzhiyun }
2459*4882a593Smuzhiyun
2460*4882a593Smuzhiyun return 1;
2461*4882a593Smuzhiyun }
2462*4882a593Smuzhiyun
_dasd_sleep_on_queue(struct list_head * ccw_queue,int interruptible)2463*4882a593Smuzhiyun static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
2464*4882a593Smuzhiyun {
2465*4882a593Smuzhiyun struct dasd_device *device;
2466*4882a593Smuzhiyun struct dasd_ccw_req *cqr, *n;
2467*4882a593Smuzhiyun u8 *sense = NULL;
2468*4882a593Smuzhiyun int rc;
2469*4882a593Smuzhiyun
2470*4882a593Smuzhiyun retry:
2471*4882a593Smuzhiyun list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
2472*4882a593Smuzhiyun device = cqr->startdev;
2473*4882a593Smuzhiyun if (cqr->status != DASD_CQR_FILLED) /*could be failed*/
2474*4882a593Smuzhiyun continue;
2475*4882a593Smuzhiyun
2476*4882a593Smuzhiyun if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2477*4882a593Smuzhiyun !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2478*4882a593Smuzhiyun cqr->status = DASD_CQR_FAILED;
2479*4882a593Smuzhiyun cqr->intrc = -EPERM;
2480*4882a593Smuzhiyun continue;
2481*4882a593Smuzhiyun }
2482*4882a593Smuzhiyun /*Non-temporary stop condition will trigger fail fast*/
2483*4882a593Smuzhiyun if (device->stopped & ~DASD_STOPPED_PENDING &&
2484*4882a593Smuzhiyun test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2485*4882a593Smuzhiyun !dasd_eer_enabled(device)) {
2486*4882a593Smuzhiyun cqr->status = DASD_CQR_FAILED;
2487*4882a593Smuzhiyun cqr->intrc = -EAGAIN;
2488*4882a593Smuzhiyun continue;
2489*4882a593Smuzhiyun }
2490*4882a593Smuzhiyun
2491*4882a593Smuzhiyun /*Don't try to start requests if device is stopped*/
2492*4882a593Smuzhiyun if (interruptible) {
2493*4882a593Smuzhiyun rc = wait_event_interruptible(
2494*4882a593Smuzhiyun generic_waitq, !device->stopped);
2495*4882a593Smuzhiyun if (rc == -ERESTARTSYS) {
2496*4882a593Smuzhiyun cqr->status = DASD_CQR_FAILED;
2497*4882a593Smuzhiyun cqr->intrc = rc;
2498*4882a593Smuzhiyun continue;
2499*4882a593Smuzhiyun }
2500*4882a593Smuzhiyun } else
2501*4882a593Smuzhiyun wait_event(generic_waitq, !(device->stopped));
2502*4882a593Smuzhiyun
2503*4882a593Smuzhiyun if (!cqr->callback)
2504*4882a593Smuzhiyun cqr->callback = dasd_wakeup_cb;
2505*4882a593Smuzhiyun cqr->callback_data = DASD_SLEEPON_START_TAG;
2506*4882a593Smuzhiyun dasd_add_request_tail(cqr);
2507*4882a593Smuzhiyun }
2508*4882a593Smuzhiyun
2509*4882a593Smuzhiyun wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue));
2510*4882a593Smuzhiyun
2511*4882a593Smuzhiyun rc = 0;
2512*4882a593Smuzhiyun list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
2513*4882a593Smuzhiyun /*
2514*4882a593Smuzhiyun * In some cases the 'File Protected' or 'Incorrect Length'
2515*4882a593Smuzhiyun * error might be expected and error recovery would be
2516*4882a593Smuzhiyun * unnecessary in these cases. Check if the according suppress
2517*4882a593Smuzhiyun * bit is set.
2518*4882a593Smuzhiyun */
2519*4882a593Smuzhiyun sense = dasd_get_sense(&cqr->irb);
2520*4882a593Smuzhiyun if (sense && sense[1] & SNS1_FILE_PROTECTED &&
2521*4882a593Smuzhiyun test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags))
2522*4882a593Smuzhiyun continue;
2523*4882a593Smuzhiyun if (scsw_cstat(&cqr->irb.scsw) == 0x40 &&
2524*4882a593Smuzhiyun test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags))
2525*4882a593Smuzhiyun continue;
2526*4882a593Smuzhiyun
2527*4882a593Smuzhiyun /*
2528*4882a593Smuzhiyun * for alias devices simplify error recovery and
2529*4882a593Smuzhiyun * return to upper layer
2530*4882a593Smuzhiyun * do not skip ERP requests
2531*4882a593Smuzhiyun */
2532*4882a593Smuzhiyun if (cqr->startdev != cqr->basedev && !cqr->refers &&
2533*4882a593Smuzhiyun (cqr->status == DASD_CQR_TERMINATED ||
2534*4882a593Smuzhiyun cqr->status == DASD_CQR_NEED_ERP))
2535*4882a593Smuzhiyun return -EAGAIN;
2536*4882a593Smuzhiyun
2537*4882a593Smuzhiyun /* normal recovery for basedev IO */
2538*4882a593Smuzhiyun if (__dasd_sleep_on_erp(cqr))
2539*4882a593Smuzhiyun /* handle erp first */
2540*4882a593Smuzhiyun goto retry;
2541*4882a593Smuzhiyun }
2542*4882a593Smuzhiyun
2543*4882a593Smuzhiyun return 0;
2544*4882a593Smuzhiyun }
2545*4882a593Smuzhiyun
2546*4882a593Smuzhiyun /*
2547*4882a593Smuzhiyun * Queue a request to the tail of the device ccw_queue and wait for
2548*4882a593Smuzhiyun * it's completion.
2549*4882a593Smuzhiyun */
dasd_sleep_on(struct dasd_ccw_req * cqr)2550*4882a593Smuzhiyun int dasd_sleep_on(struct dasd_ccw_req *cqr)
2551*4882a593Smuzhiyun {
2552*4882a593Smuzhiyun return _dasd_sleep_on(cqr, 0);
2553*4882a593Smuzhiyun }
2554*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_sleep_on);
2555*4882a593Smuzhiyun
2556*4882a593Smuzhiyun /*
2557*4882a593Smuzhiyun * Start requests from a ccw_queue and wait for their completion.
2558*4882a593Smuzhiyun */
dasd_sleep_on_queue(struct list_head * ccw_queue)2559*4882a593Smuzhiyun int dasd_sleep_on_queue(struct list_head *ccw_queue)
2560*4882a593Smuzhiyun {
2561*4882a593Smuzhiyun return _dasd_sleep_on_queue(ccw_queue, 0);
2562*4882a593Smuzhiyun }
2563*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_sleep_on_queue);
2564*4882a593Smuzhiyun
2565*4882a593Smuzhiyun /*
2566*4882a593Smuzhiyun * Start requests from a ccw_queue and wait interruptible for their completion.
2567*4882a593Smuzhiyun */
dasd_sleep_on_queue_interruptible(struct list_head * ccw_queue)2568*4882a593Smuzhiyun int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue)
2569*4882a593Smuzhiyun {
2570*4882a593Smuzhiyun return _dasd_sleep_on_queue(ccw_queue, 1);
2571*4882a593Smuzhiyun }
2572*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible);
2573*4882a593Smuzhiyun
2574*4882a593Smuzhiyun /*
2575*4882a593Smuzhiyun * Queue a request to the tail of the device ccw_queue and wait
2576*4882a593Smuzhiyun * interruptible for it's completion.
2577*4882a593Smuzhiyun */
dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)2578*4882a593Smuzhiyun int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
2579*4882a593Smuzhiyun {
2580*4882a593Smuzhiyun return _dasd_sleep_on(cqr, 1);
2581*4882a593Smuzhiyun }
2582*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2583*4882a593Smuzhiyun
2584*4882a593Smuzhiyun /*
2585*4882a593Smuzhiyun * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
2586*4882a593Smuzhiyun * for eckd devices) the currently running request has to be terminated
2587*4882a593Smuzhiyun * and be put back to status queued, before the special request is added
2588*4882a593Smuzhiyun * to the head of the queue. Then the special request is waited on normally.
2589*4882a593Smuzhiyun */
_dasd_term_running_cqr(struct dasd_device * device)2590*4882a593Smuzhiyun static inline int _dasd_term_running_cqr(struct dasd_device *device)
2591*4882a593Smuzhiyun {
2592*4882a593Smuzhiyun struct dasd_ccw_req *cqr;
2593*4882a593Smuzhiyun int rc;
2594*4882a593Smuzhiyun
2595*4882a593Smuzhiyun if (list_empty(&device->ccw_queue))
2596*4882a593Smuzhiyun return 0;
2597*4882a593Smuzhiyun cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
2598*4882a593Smuzhiyun rc = device->discipline->term_IO(cqr);
2599*4882a593Smuzhiyun if (!rc)
2600*4882a593Smuzhiyun /*
2601*4882a593Smuzhiyun * CQR terminated because a more important request is pending.
2602*4882a593Smuzhiyun * Undo decreasing of retry counter because this is
2603*4882a593Smuzhiyun * not an error case.
2604*4882a593Smuzhiyun */
2605*4882a593Smuzhiyun cqr->retries++;
2606*4882a593Smuzhiyun return rc;
2607*4882a593Smuzhiyun }
2608*4882a593Smuzhiyun
dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)2609*4882a593Smuzhiyun int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
2610*4882a593Smuzhiyun {
2611*4882a593Smuzhiyun struct dasd_device *device;
2612*4882a593Smuzhiyun int rc;
2613*4882a593Smuzhiyun
2614*4882a593Smuzhiyun device = cqr->startdev;
2615*4882a593Smuzhiyun if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2616*4882a593Smuzhiyun !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2617*4882a593Smuzhiyun cqr->status = DASD_CQR_FAILED;
2618*4882a593Smuzhiyun cqr->intrc = -EPERM;
2619*4882a593Smuzhiyun return -EIO;
2620*4882a593Smuzhiyun }
2621*4882a593Smuzhiyun spin_lock_irq(get_ccwdev_lock(device->cdev));
2622*4882a593Smuzhiyun rc = _dasd_term_running_cqr(device);
2623*4882a593Smuzhiyun if (rc) {
2624*4882a593Smuzhiyun spin_unlock_irq(get_ccwdev_lock(device->cdev));
2625*4882a593Smuzhiyun return rc;
2626*4882a593Smuzhiyun }
2627*4882a593Smuzhiyun cqr->callback = dasd_wakeup_cb;
2628*4882a593Smuzhiyun cqr->callback_data = DASD_SLEEPON_START_TAG;
2629*4882a593Smuzhiyun cqr->status = DASD_CQR_QUEUED;
2630*4882a593Smuzhiyun /*
2631*4882a593Smuzhiyun * add new request as second
2632*4882a593Smuzhiyun * first the terminated cqr needs to be finished
2633*4882a593Smuzhiyun */
2634*4882a593Smuzhiyun list_add(&cqr->devlist, device->ccw_queue.next);
2635*4882a593Smuzhiyun
2636*4882a593Smuzhiyun /* let the bh start the request to keep them in order */
2637*4882a593Smuzhiyun dasd_schedule_device_bh(device);
2638*4882a593Smuzhiyun
2639*4882a593Smuzhiyun spin_unlock_irq(get_ccwdev_lock(device->cdev));
2640*4882a593Smuzhiyun
2641*4882a593Smuzhiyun wait_event(generic_waitq, _wait_for_wakeup(cqr));
2642*4882a593Smuzhiyun
2643*4882a593Smuzhiyun if (cqr->status == DASD_CQR_DONE)
2644*4882a593Smuzhiyun rc = 0;
2645*4882a593Smuzhiyun else if (cqr->intrc)
2646*4882a593Smuzhiyun rc = cqr->intrc;
2647*4882a593Smuzhiyun else
2648*4882a593Smuzhiyun rc = -EIO;
2649*4882a593Smuzhiyun
2650*4882a593Smuzhiyun /* kick tasklets */
2651*4882a593Smuzhiyun dasd_schedule_device_bh(device);
2652*4882a593Smuzhiyun if (device->block)
2653*4882a593Smuzhiyun dasd_schedule_block_bh(device->block);
2654*4882a593Smuzhiyun
2655*4882a593Smuzhiyun return rc;
2656*4882a593Smuzhiyun }
2657*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2658*4882a593Smuzhiyun
2659*4882a593Smuzhiyun /*
2660*4882a593Smuzhiyun * Cancels a request that was started with dasd_sleep_on_req.
2661*4882a593Smuzhiyun * This is useful to timeout requests. The request will be
2662*4882a593Smuzhiyun * terminated if it is currently in i/o.
2663*4882a593Smuzhiyun * Returns 0 if request termination was successful
2664*4882a593Smuzhiyun * negative error code if termination failed
2665*4882a593Smuzhiyun * Cancellation of a request is an asynchronous operation! The calling
2666*4882a593Smuzhiyun * function has to wait until the request is properly returned via callback.
2667*4882a593Smuzhiyun */
__dasd_cancel_req(struct dasd_ccw_req * cqr)2668*4882a593Smuzhiyun static int __dasd_cancel_req(struct dasd_ccw_req *cqr)
2669*4882a593Smuzhiyun {
2670*4882a593Smuzhiyun struct dasd_device *device = cqr->startdev;
2671*4882a593Smuzhiyun int rc = 0;
2672*4882a593Smuzhiyun
2673*4882a593Smuzhiyun switch (cqr->status) {
2674*4882a593Smuzhiyun case DASD_CQR_QUEUED:
2675*4882a593Smuzhiyun /* request was not started - just set to cleared */
2676*4882a593Smuzhiyun cqr->status = DASD_CQR_CLEARED;
2677*4882a593Smuzhiyun break;
2678*4882a593Smuzhiyun case DASD_CQR_IN_IO:
2679*4882a593Smuzhiyun /* request in IO - terminate IO and release again */
2680*4882a593Smuzhiyun rc = device->discipline->term_IO(cqr);
2681*4882a593Smuzhiyun if (rc) {
2682*4882a593Smuzhiyun dev_err(&device->cdev->dev,
2683*4882a593Smuzhiyun "Cancelling request %p failed with rc=%d\n",
2684*4882a593Smuzhiyun cqr, rc);
2685*4882a593Smuzhiyun } else {
2686*4882a593Smuzhiyun cqr->stopclk = get_tod_clock();
2687*4882a593Smuzhiyun }
2688*4882a593Smuzhiyun break;
2689*4882a593Smuzhiyun default: /* already finished or clear pending - do nothing */
2690*4882a593Smuzhiyun break;
2691*4882a593Smuzhiyun }
2692*4882a593Smuzhiyun dasd_schedule_device_bh(device);
2693*4882a593Smuzhiyun return rc;
2694*4882a593Smuzhiyun }
2695*4882a593Smuzhiyun
dasd_cancel_req(struct dasd_ccw_req * cqr)2696*4882a593Smuzhiyun int dasd_cancel_req(struct dasd_ccw_req *cqr)
2697*4882a593Smuzhiyun {
2698*4882a593Smuzhiyun struct dasd_device *device = cqr->startdev;
2699*4882a593Smuzhiyun unsigned long flags;
2700*4882a593Smuzhiyun int rc;
2701*4882a593Smuzhiyun
2702*4882a593Smuzhiyun spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2703*4882a593Smuzhiyun rc = __dasd_cancel_req(cqr);
2704*4882a593Smuzhiyun spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2705*4882a593Smuzhiyun return rc;
2706*4882a593Smuzhiyun }
2707*4882a593Smuzhiyun
2708*4882a593Smuzhiyun /*
2709*4882a593Smuzhiyun * SECTION: Operations of the dasd_block layer.
2710*4882a593Smuzhiyun */
2711*4882a593Smuzhiyun
2712*4882a593Smuzhiyun /*
2713*4882a593Smuzhiyun * Timeout function for dasd_block. This is used when the block layer
2714*4882a593Smuzhiyun * is waiting for something that may not come reliably, (e.g. a state
2715*4882a593Smuzhiyun * change interrupt)
2716*4882a593Smuzhiyun */
dasd_block_timeout(struct timer_list * t)2717*4882a593Smuzhiyun static void dasd_block_timeout(struct timer_list *t)
2718*4882a593Smuzhiyun {
2719*4882a593Smuzhiyun unsigned long flags;
2720*4882a593Smuzhiyun struct dasd_block *block;
2721*4882a593Smuzhiyun
2722*4882a593Smuzhiyun block = from_timer(block, t, timer);
2723*4882a593Smuzhiyun spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
2724*4882a593Smuzhiyun /* re-activate request queue */
2725*4882a593Smuzhiyun dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
2726*4882a593Smuzhiyun spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
2727*4882a593Smuzhiyun dasd_schedule_block_bh(block);
2728*4882a593Smuzhiyun blk_mq_run_hw_queues(block->request_queue, true);
2729*4882a593Smuzhiyun }
2730*4882a593Smuzhiyun
2731*4882a593Smuzhiyun /*
2732*4882a593Smuzhiyun * Setup timeout for a dasd_block in jiffies.
2733*4882a593Smuzhiyun */
dasd_block_set_timer(struct dasd_block * block,int expires)2734*4882a593Smuzhiyun void dasd_block_set_timer(struct dasd_block *block, int expires)
2735*4882a593Smuzhiyun {
2736*4882a593Smuzhiyun if (expires == 0)
2737*4882a593Smuzhiyun del_timer(&block->timer);
2738*4882a593Smuzhiyun else
2739*4882a593Smuzhiyun mod_timer(&block->timer, jiffies + expires);
2740*4882a593Smuzhiyun }
2741*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_block_set_timer);
2742*4882a593Smuzhiyun
2743*4882a593Smuzhiyun /*
2744*4882a593Smuzhiyun * Clear timeout for a dasd_block.
2745*4882a593Smuzhiyun */
dasd_block_clear_timer(struct dasd_block * block)2746*4882a593Smuzhiyun void dasd_block_clear_timer(struct dasd_block *block)
2747*4882a593Smuzhiyun {
2748*4882a593Smuzhiyun del_timer(&block->timer);
2749*4882a593Smuzhiyun }
2750*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_block_clear_timer);
2751*4882a593Smuzhiyun
2752*4882a593Smuzhiyun /*
2753*4882a593Smuzhiyun * Process finished error recovery ccw.
2754*4882a593Smuzhiyun */
__dasd_process_erp(struct dasd_device * device,struct dasd_ccw_req * cqr)2755*4882a593Smuzhiyun static void __dasd_process_erp(struct dasd_device *device,
2756*4882a593Smuzhiyun struct dasd_ccw_req *cqr)
2757*4882a593Smuzhiyun {
2758*4882a593Smuzhiyun dasd_erp_fn_t erp_fn;
2759*4882a593Smuzhiyun
2760*4882a593Smuzhiyun if (cqr->status == DASD_CQR_DONE)
2761*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
2762*4882a593Smuzhiyun else
2763*4882a593Smuzhiyun dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
2764*4882a593Smuzhiyun erp_fn = device->discipline->erp_postaction(cqr);
2765*4882a593Smuzhiyun erp_fn(cqr);
2766*4882a593Smuzhiyun }
2767*4882a593Smuzhiyun
__dasd_cleanup_cqr(struct dasd_ccw_req * cqr)2768*4882a593Smuzhiyun static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
2769*4882a593Smuzhiyun {
2770*4882a593Smuzhiyun struct request *req;
2771*4882a593Smuzhiyun blk_status_t error = BLK_STS_OK;
2772*4882a593Smuzhiyun unsigned int proc_bytes;
2773*4882a593Smuzhiyun int status;
2774*4882a593Smuzhiyun
2775*4882a593Smuzhiyun req = (struct request *) cqr->callback_data;
2776*4882a593Smuzhiyun dasd_profile_end(cqr->block, cqr, req);
2777*4882a593Smuzhiyun
2778*4882a593Smuzhiyun proc_bytes = cqr->proc_bytes;
2779*4882a593Smuzhiyun status = cqr->block->base->discipline->free_cp(cqr, req);
2780*4882a593Smuzhiyun if (status < 0)
2781*4882a593Smuzhiyun error = errno_to_blk_status(status);
2782*4882a593Smuzhiyun else if (status == 0) {
2783*4882a593Smuzhiyun switch (cqr->intrc) {
2784*4882a593Smuzhiyun case -EPERM:
2785*4882a593Smuzhiyun error = BLK_STS_NEXUS;
2786*4882a593Smuzhiyun break;
2787*4882a593Smuzhiyun case -ENOLINK:
2788*4882a593Smuzhiyun error = BLK_STS_TRANSPORT;
2789*4882a593Smuzhiyun break;
2790*4882a593Smuzhiyun case -ETIMEDOUT:
2791*4882a593Smuzhiyun error = BLK_STS_TIMEOUT;
2792*4882a593Smuzhiyun break;
2793*4882a593Smuzhiyun default:
2794*4882a593Smuzhiyun error = BLK_STS_IOERR;
2795*4882a593Smuzhiyun break;
2796*4882a593Smuzhiyun }
2797*4882a593Smuzhiyun }
2798*4882a593Smuzhiyun
2799*4882a593Smuzhiyun /*
2800*4882a593Smuzhiyun * We need to take care for ETIMEDOUT errors here since the
2801*4882a593Smuzhiyun * complete callback does not get called in this case.
2802*4882a593Smuzhiyun * Take care of all errors here and avoid additional code to
2803*4882a593Smuzhiyun * transfer the error value to the complete callback.
2804*4882a593Smuzhiyun */
2805*4882a593Smuzhiyun if (error) {
2806*4882a593Smuzhiyun blk_mq_end_request(req, error);
2807*4882a593Smuzhiyun blk_mq_run_hw_queues(req->q, true);
2808*4882a593Smuzhiyun } else {
2809*4882a593Smuzhiyun /*
2810*4882a593Smuzhiyun * Partial completed requests can happen with ESE devices.
2811*4882a593Smuzhiyun * During read we might have gotten a NRF error and have to
2812*4882a593Smuzhiyun * complete a request partially.
2813*4882a593Smuzhiyun */
2814*4882a593Smuzhiyun if (proc_bytes) {
2815*4882a593Smuzhiyun blk_update_request(req, BLK_STS_OK, proc_bytes);
2816*4882a593Smuzhiyun blk_mq_requeue_request(req, true);
2817*4882a593Smuzhiyun } else if (likely(!blk_should_fake_timeout(req->q))) {
2818*4882a593Smuzhiyun blk_mq_complete_request(req);
2819*4882a593Smuzhiyun }
2820*4882a593Smuzhiyun }
2821*4882a593Smuzhiyun }
2822*4882a593Smuzhiyun
2823*4882a593Smuzhiyun /*
2824*4882a593Smuzhiyun * Process ccw request queue.
2825*4882a593Smuzhiyun */
__dasd_process_block_ccw_queue(struct dasd_block * block,struct list_head * final_queue)2826*4882a593Smuzhiyun static void __dasd_process_block_ccw_queue(struct dasd_block *block,
2827*4882a593Smuzhiyun struct list_head *final_queue)
2828*4882a593Smuzhiyun {
2829*4882a593Smuzhiyun struct list_head *l, *n;
2830*4882a593Smuzhiyun struct dasd_ccw_req *cqr;
2831*4882a593Smuzhiyun dasd_erp_fn_t erp_fn;
2832*4882a593Smuzhiyun unsigned long flags;
2833*4882a593Smuzhiyun struct dasd_device *base = block->base;
2834*4882a593Smuzhiyun
2835*4882a593Smuzhiyun restart:
2836*4882a593Smuzhiyun /* Process request with final status. */
2837*4882a593Smuzhiyun list_for_each_safe(l, n, &block->ccw_queue) {
2838*4882a593Smuzhiyun cqr = list_entry(l, struct dasd_ccw_req, blocklist);
2839*4882a593Smuzhiyun if (cqr->status != DASD_CQR_DONE &&
2840*4882a593Smuzhiyun cqr->status != DASD_CQR_FAILED &&
2841*4882a593Smuzhiyun cqr->status != DASD_CQR_NEED_ERP &&
2842*4882a593Smuzhiyun cqr->status != DASD_CQR_TERMINATED)
2843*4882a593Smuzhiyun continue;
2844*4882a593Smuzhiyun
2845*4882a593Smuzhiyun if (cqr->status == DASD_CQR_TERMINATED) {
2846*4882a593Smuzhiyun base->discipline->handle_terminated_request(cqr);
2847*4882a593Smuzhiyun goto restart;
2848*4882a593Smuzhiyun }
2849*4882a593Smuzhiyun
2850*4882a593Smuzhiyun /* Process requests that may be recovered */
2851*4882a593Smuzhiyun if (cqr->status == DASD_CQR_NEED_ERP) {
2852*4882a593Smuzhiyun erp_fn = base->discipline->erp_action(cqr);
2853*4882a593Smuzhiyun if (IS_ERR(erp_fn(cqr)))
2854*4882a593Smuzhiyun continue;
2855*4882a593Smuzhiyun goto restart;
2856*4882a593Smuzhiyun }
2857*4882a593Smuzhiyun
2858*4882a593Smuzhiyun /* log sense for fatal error */
2859*4882a593Smuzhiyun if (cqr->status == DASD_CQR_FAILED) {
2860*4882a593Smuzhiyun dasd_log_sense(cqr, &cqr->irb);
2861*4882a593Smuzhiyun }
2862*4882a593Smuzhiyun
2863*4882a593Smuzhiyun /* First of all call extended error reporting. */
2864*4882a593Smuzhiyun if (dasd_eer_enabled(base) &&
2865*4882a593Smuzhiyun cqr->status == DASD_CQR_FAILED) {
2866*4882a593Smuzhiyun dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
2867*4882a593Smuzhiyun
2868*4882a593Smuzhiyun /* restart request */
2869*4882a593Smuzhiyun cqr->status = DASD_CQR_FILLED;
2870*4882a593Smuzhiyun cqr->retries = 255;
2871*4882a593Smuzhiyun spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
2872*4882a593Smuzhiyun dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
2873*4882a593Smuzhiyun spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
2874*4882a593Smuzhiyun flags);
2875*4882a593Smuzhiyun goto restart;
2876*4882a593Smuzhiyun }
2877*4882a593Smuzhiyun
2878*4882a593Smuzhiyun /* Process finished ERP request. */
2879*4882a593Smuzhiyun if (cqr->refers) {
2880*4882a593Smuzhiyun __dasd_process_erp(base, cqr);
2881*4882a593Smuzhiyun goto restart;
2882*4882a593Smuzhiyun }
2883*4882a593Smuzhiyun
2884*4882a593Smuzhiyun /* Rechain finished requests to final queue */
2885*4882a593Smuzhiyun cqr->endclk = get_tod_clock();
2886*4882a593Smuzhiyun list_move_tail(&cqr->blocklist, final_queue);
2887*4882a593Smuzhiyun }
2888*4882a593Smuzhiyun }
2889*4882a593Smuzhiyun
dasd_return_cqr_cb(struct dasd_ccw_req * cqr,void * data)2890*4882a593Smuzhiyun static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
2891*4882a593Smuzhiyun {
2892*4882a593Smuzhiyun dasd_schedule_block_bh(cqr->block);
2893*4882a593Smuzhiyun }
2894*4882a593Smuzhiyun
__dasd_block_start_head(struct dasd_block * block)2895*4882a593Smuzhiyun static void __dasd_block_start_head(struct dasd_block *block)
2896*4882a593Smuzhiyun {
2897*4882a593Smuzhiyun struct dasd_ccw_req *cqr;
2898*4882a593Smuzhiyun
2899*4882a593Smuzhiyun if (list_empty(&block->ccw_queue))
2900*4882a593Smuzhiyun return;
2901*4882a593Smuzhiyun /* We allways begin with the first requests on the queue, as some
2902*4882a593Smuzhiyun * of previously started requests have to be enqueued on a
2903*4882a593Smuzhiyun * dasd_device again for error recovery.
2904*4882a593Smuzhiyun */
2905*4882a593Smuzhiyun list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
2906*4882a593Smuzhiyun if (cqr->status != DASD_CQR_FILLED)
2907*4882a593Smuzhiyun continue;
2908*4882a593Smuzhiyun if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) &&
2909*4882a593Smuzhiyun !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2910*4882a593Smuzhiyun cqr->status = DASD_CQR_FAILED;
2911*4882a593Smuzhiyun cqr->intrc = -EPERM;
2912*4882a593Smuzhiyun dasd_schedule_block_bh(block);
2913*4882a593Smuzhiyun continue;
2914*4882a593Smuzhiyun }
2915*4882a593Smuzhiyun /* Non-temporary stop condition will trigger fail fast */
2916*4882a593Smuzhiyun if (block->base->stopped & ~DASD_STOPPED_PENDING &&
2917*4882a593Smuzhiyun test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2918*4882a593Smuzhiyun (!dasd_eer_enabled(block->base))) {
2919*4882a593Smuzhiyun cqr->status = DASD_CQR_FAILED;
2920*4882a593Smuzhiyun cqr->intrc = -ENOLINK;
2921*4882a593Smuzhiyun dasd_schedule_block_bh(block);
2922*4882a593Smuzhiyun continue;
2923*4882a593Smuzhiyun }
2924*4882a593Smuzhiyun /* Don't try to start requests if device is stopped */
2925*4882a593Smuzhiyun if (block->base->stopped)
2926*4882a593Smuzhiyun return;
2927*4882a593Smuzhiyun
2928*4882a593Smuzhiyun /* just a fail safe check, should not happen */
2929*4882a593Smuzhiyun if (!cqr->startdev)
2930*4882a593Smuzhiyun cqr->startdev = block->base;
2931*4882a593Smuzhiyun
2932*4882a593Smuzhiyun /* make sure that the requests we submit find their way back */
2933*4882a593Smuzhiyun cqr->callback = dasd_return_cqr_cb;
2934*4882a593Smuzhiyun
2935*4882a593Smuzhiyun dasd_add_request_tail(cqr);
2936*4882a593Smuzhiyun }
2937*4882a593Smuzhiyun }
2938*4882a593Smuzhiyun
2939*4882a593Smuzhiyun /*
2940*4882a593Smuzhiyun * Central dasd_block layer routine. Takes requests from the generic
2941*4882a593Smuzhiyun * block layer request queue, creates ccw requests, enqueues them on
2942*4882a593Smuzhiyun * a dasd_device and processes ccw requests that have been returned.
2943*4882a593Smuzhiyun */
dasd_block_tasklet(unsigned long data)2944*4882a593Smuzhiyun static void dasd_block_tasklet(unsigned long data)
2945*4882a593Smuzhiyun {
2946*4882a593Smuzhiyun struct dasd_block *block = (struct dasd_block *) data;
2947*4882a593Smuzhiyun struct list_head final_queue;
2948*4882a593Smuzhiyun struct list_head *l, *n;
2949*4882a593Smuzhiyun struct dasd_ccw_req *cqr;
2950*4882a593Smuzhiyun struct dasd_queue *dq;
2951*4882a593Smuzhiyun
2952*4882a593Smuzhiyun atomic_set(&block->tasklet_scheduled, 0);
2953*4882a593Smuzhiyun INIT_LIST_HEAD(&final_queue);
2954*4882a593Smuzhiyun spin_lock_irq(&block->queue_lock);
2955*4882a593Smuzhiyun /* Finish off requests on ccw queue */
2956*4882a593Smuzhiyun __dasd_process_block_ccw_queue(block, &final_queue);
2957*4882a593Smuzhiyun spin_unlock_irq(&block->queue_lock);
2958*4882a593Smuzhiyun
2959*4882a593Smuzhiyun /* Now call the callback function of requests with final status */
2960*4882a593Smuzhiyun list_for_each_safe(l, n, &final_queue) {
2961*4882a593Smuzhiyun cqr = list_entry(l, struct dasd_ccw_req, blocklist);
2962*4882a593Smuzhiyun dq = cqr->dq;
2963*4882a593Smuzhiyun spin_lock_irq(&dq->lock);
2964*4882a593Smuzhiyun list_del_init(&cqr->blocklist);
2965*4882a593Smuzhiyun __dasd_cleanup_cqr(cqr);
2966*4882a593Smuzhiyun spin_unlock_irq(&dq->lock);
2967*4882a593Smuzhiyun }
2968*4882a593Smuzhiyun
2969*4882a593Smuzhiyun spin_lock_irq(&block->queue_lock);
2970*4882a593Smuzhiyun /* Now check if the head of the ccw queue needs to be started. */
2971*4882a593Smuzhiyun __dasd_block_start_head(block);
2972*4882a593Smuzhiyun spin_unlock_irq(&block->queue_lock);
2973*4882a593Smuzhiyun
2974*4882a593Smuzhiyun if (waitqueue_active(&shutdown_waitq))
2975*4882a593Smuzhiyun wake_up(&shutdown_waitq);
2976*4882a593Smuzhiyun dasd_put_device(block->base);
2977*4882a593Smuzhiyun }
2978*4882a593Smuzhiyun
_dasd_wake_block_flush_cb(struct dasd_ccw_req * cqr,void * data)2979*4882a593Smuzhiyun static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
2980*4882a593Smuzhiyun {
2981*4882a593Smuzhiyun wake_up(&dasd_flush_wq);
2982*4882a593Smuzhiyun }
2983*4882a593Smuzhiyun
2984*4882a593Smuzhiyun /*
2985*4882a593Smuzhiyun * Requeue a request back to the block request queue
2986*4882a593Smuzhiyun * only works for block requests
2987*4882a593Smuzhiyun */
_dasd_requeue_request(struct dasd_ccw_req * cqr)2988*4882a593Smuzhiyun static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
2989*4882a593Smuzhiyun {
2990*4882a593Smuzhiyun struct dasd_block *block = cqr->block;
2991*4882a593Smuzhiyun struct request *req;
2992*4882a593Smuzhiyun
2993*4882a593Smuzhiyun if (!block)
2994*4882a593Smuzhiyun return -EINVAL;
2995*4882a593Smuzhiyun /*
2996*4882a593Smuzhiyun * If the request is an ERP request there is nothing to requeue.
2997*4882a593Smuzhiyun * This will be done with the remaining original request.
2998*4882a593Smuzhiyun */
2999*4882a593Smuzhiyun if (cqr->refers)
3000*4882a593Smuzhiyun return 0;
3001*4882a593Smuzhiyun spin_lock_irq(&cqr->dq->lock);
3002*4882a593Smuzhiyun req = (struct request *) cqr->callback_data;
3003*4882a593Smuzhiyun blk_mq_requeue_request(req, false);
3004*4882a593Smuzhiyun spin_unlock_irq(&cqr->dq->lock);
3005*4882a593Smuzhiyun
3006*4882a593Smuzhiyun return 0;
3007*4882a593Smuzhiyun }
3008*4882a593Smuzhiyun
3009*4882a593Smuzhiyun /*
3010*4882a593Smuzhiyun * Go through all request on the dasd_block request queue, cancel them
3011*4882a593Smuzhiyun * on the respective dasd_device, and return them to the generic
3012*4882a593Smuzhiyun * block layer.
3013*4882a593Smuzhiyun */
dasd_flush_block_queue(struct dasd_block * block)3014*4882a593Smuzhiyun static int dasd_flush_block_queue(struct dasd_block *block)
3015*4882a593Smuzhiyun {
3016*4882a593Smuzhiyun struct dasd_ccw_req *cqr, *n;
3017*4882a593Smuzhiyun int rc, i;
3018*4882a593Smuzhiyun struct list_head flush_queue;
3019*4882a593Smuzhiyun unsigned long flags;
3020*4882a593Smuzhiyun
3021*4882a593Smuzhiyun INIT_LIST_HEAD(&flush_queue);
3022*4882a593Smuzhiyun spin_lock_bh(&block->queue_lock);
3023*4882a593Smuzhiyun rc = 0;
3024*4882a593Smuzhiyun restart:
3025*4882a593Smuzhiyun list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
3026*4882a593Smuzhiyun /* if this request currently owned by a dasd_device cancel it */
3027*4882a593Smuzhiyun if (cqr->status >= DASD_CQR_QUEUED)
3028*4882a593Smuzhiyun rc = dasd_cancel_req(cqr);
3029*4882a593Smuzhiyun if (rc < 0)
3030*4882a593Smuzhiyun break;
3031*4882a593Smuzhiyun /* Rechain request (including erp chain) so it won't be
3032*4882a593Smuzhiyun * touched by the dasd_block_tasklet anymore.
3033*4882a593Smuzhiyun * Replace the callback so we notice when the request
3034*4882a593Smuzhiyun * is returned from the dasd_device layer.
3035*4882a593Smuzhiyun */
3036*4882a593Smuzhiyun cqr->callback = _dasd_wake_block_flush_cb;
3037*4882a593Smuzhiyun for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
3038*4882a593Smuzhiyun list_move_tail(&cqr->blocklist, &flush_queue);
3039*4882a593Smuzhiyun if (i > 1)
3040*4882a593Smuzhiyun /* moved more than one request - need to restart */
3041*4882a593Smuzhiyun goto restart;
3042*4882a593Smuzhiyun }
3043*4882a593Smuzhiyun spin_unlock_bh(&block->queue_lock);
3044*4882a593Smuzhiyun /* Now call the callback function of flushed requests */
3045*4882a593Smuzhiyun restart_cb:
3046*4882a593Smuzhiyun list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
3047*4882a593Smuzhiyun wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
3048*4882a593Smuzhiyun /* Process finished ERP request. */
3049*4882a593Smuzhiyun if (cqr->refers) {
3050*4882a593Smuzhiyun spin_lock_bh(&block->queue_lock);
3051*4882a593Smuzhiyun __dasd_process_erp(block->base, cqr);
3052*4882a593Smuzhiyun spin_unlock_bh(&block->queue_lock);
3053*4882a593Smuzhiyun /* restart list_for_xx loop since dasd_process_erp
3054*4882a593Smuzhiyun * might remove multiple elements */
3055*4882a593Smuzhiyun goto restart_cb;
3056*4882a593Smuzhiyun }
3057*4882a593Smuzhiyun /* call the callback function */
3058*4882a593Smuzhiyun spin_lock_irqsave(&cqr->dq->lock, flags);
3059*4882a593Smuzhiyun cqr->endclk = get_tod_clock();
3060*4882a593Smuzhiyun list_del_init(&cqr->blocklist);
3061*4882a593Smuzhiyun __dasd_cleanup_cqr(cqr);
3062*4882a593Smuzhiyun spin_unlock_irqrestore(&cqr->dq->lock, flags);
3063*4882a593Smuzhiyun }
3064*4882a593Smuzhiyun return rc;
3065*4882a593Smuzhiyun }
3066*4882a593Smuzhiyun
3067*4882a593Smuzhiyun /*
3068*4882a593Smuzhiyun * Schedules a call to dasd_tasklet over the device tasklet.
3069*4882a593Smuzhiyun */
dasd_schedule_block_bh(struct dasd_block * block)3070*4882a593Smuzhiyun void dasd_schedule_block_bh(struct dasd_block *block)
3071*4882a593Smuzhiyun {
3072*4882a593Smuzhiyun /* Protect against rescheduling. */
3073*4882a593Smuzhiyun if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
3074*4882a593Smuzhiyun return;
3075*4882a593Smuzhiyun /* life cycle of block is bound to it's base device */
3076*4882a593Smuzhiyun dasd_get_device(block->base);
3077*4882a593Smuzhiyun tasklet_hi_schedule(&block->tasklet);
3078*4882a593Smuzhiyun }
3079*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_schedule_block_bh);
3080*4882a593Smuzhiyun
3081*4882a593Smuzhiyun
3082*4882a593Smuzhiyun /*
3083*4882a593Smuzhiyun * SECTION: external block device operations
3084*4882a593Smuzhiyun * (request queue handling, open, release, etc.)
3085*4882a593Smuzhiyun */
3086*4882a593Smuzhiyun
3087*4882a593Smuzhiyun /*
3088*4882a593Smuzhiyun * Dasd request queue function. Called from ll_rw_blk.c
3089*4882a593Smuzhiyun */
do_dasd_request(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * qd)3090*4882a593Smuzhiyun static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
3091*4882a593Smuzhiyun const struct blk_mq_queue_data *qd)
3092*4882a593Smuzhiyun {
3093*4882a593Smuzhiyun struct dasd_block *block = hctx->queue->queuedata;
3094*4882a593Smuzhiyun struct dasd_queue *dq = hctx->driver_data;
3095*4882a593Smuzhiyun struct request *req = qd->rq;
3096*4882a593Smuzhiyun struct dasd_device *basedev;
3097*4882a593Smuzhiyun struct dasd_ccw_req *cqr;
3098*4882a593Smuzhiyun blk_status_t rc = BLK_STS_OK;
3099*4882a593Smuzhiyun
3100*4882a593Smuzhiyun basedev = block->base;
3101*4882a593Smuzhiyun spin_lock_irq(&dq->lock);
3102*4882a593Smuzhiyun if (basedev->state < DASD_STATE_READY ||
3103*4882a593Smuzhiyun test_bit(DASD_FLAG_OFFLINE, &basedev->flags)) {
3104*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_ERR, basedev,
3105*4882a593Smuzhiyun "device not ready for request %p", req);
3106*4882a593Smuzhiyun rc = BLK_STS_IOERR;
3107*4882a593Smuzhiyun goto out;
3108*4882a593Smuzhiyun }
3109*4882a593Smuzhiyun
3110*4882a593Smuzhiyun /*
3111*4882a593Smuzhiyun * if device is stopped do not fetch new requests
3112*4882a593Smuzhiyun * except failfast is active which will let requests fail
3113*4882a593Smuzhiyun * immediately in __dasd_block_start_head()
3114*4882a593Smuzhiyun */
3115*4882a593Smuzhiyun if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) {
3116*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_ERR, basedev,
3117*4882a593Smuzhiyun "device stopped request %p", req);
3118*4882a593Smuzhiyun rc = BLK_STS_RESOURCE;
3119*4882a593Smuzhiyun goto out;
3120*4882a593Smuzhiyun }
3121*4882a593Smuzhiyun
3122*4882a593Smuzhiyun if (basedev->features & DASD_FEATURE_READONLY &&
3123*4882a593Smuzhiyun rq_data_dir(req) == WRITE) {
3124*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_ERR, basedev,
3125*4882a593Smuzhiyun "Rejecting write request %p", req);
3126*4882a593Smuzhiyun rc = BLK_STS_IOERR;
3127*4882a593Smuzhiyun goto out;
3128*4882a593Smuzhiyun }
3129*4882a593Smuzhiyun
3130*4882a593Smuzhiyun if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) &&
3131*4882a593Smuzhiyun (basedev->features & DASD_FEATURE_FAILFAST ||
3132*4882a593Smuzhiyun blk_noretry_request(req))) {
3133*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_ERR, basedev,
3134*4882a593Smuzhiyun "Rejecting failfast request %p", req);
3135*4882a593Smuzhiyun rc = BLK_STS_IOERR;
3136*4882a593Smuzhiyun goto out;
3137*4882a593Smuzhiyun }
3138*4882a593Smuzhiyun
3139*4882a593Smuzhiyun cqr = basedev->discipline->build_cp(basedev, block, req);
3140*4882a593Smuzhiyun if (IS_ERR(cqr)) {
3141*4882a593Smuzhiyun if (PTR_ERR(cqr) == -EBUSY ||
3142*4882a593Smuzhiyun PTR_ERR(cqr) == -ENOMEM ||
3143*4882a593Smuzhiyun PTR_ERR(cqr) == -EAGAIN) {
3144*4882a593Smuzhiyun rc = BLK_STS_RESOURCE;
3145*4882a593Smuzhiyun goto out;
3146*4882a593Smuzhiyun }
3147*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_ERR, basedev,
3148*4882a593Smuzhiyun "CCW creation failed (rc=%ld) on request %p",
3149*4882a593Smuzhiyun PTR_ERR(cqr), req);
3150*4882a593Smuzhiyun rc = BLK_STS_IOERR;
3151*4882a593Smuzhiyun goto out;
3152*4882a593Smuzhiyun }
3153*4882a593Smuzhiyun /*
3154*4882a593Smuzhiyun * Note: callback is set to dasd_return_cqr_cb in
3155*4882a593Smuzhiyun * __dasd_block_start_head to cover erp requests as well
3156*4882a593Smuzhiyun */
3157*4882a593Smuzhiyun cqr->callback_data = req;
3158*4882a593Smuzhiyun cqr->status = DASD_CQR_FILLED;
3159*4882a593Smuzhiyun cqr->dq = dq;
3160*4882a593Smuzhiyun
3161*4882a593Smuzhiyun blk_mq_start_request(req);
3162*4882a593Smuzhiyun spin_lock(&block->queue_lock);
3163*4882a593Smuzhiyun list_add_tail(&cqr->blocklist, &block->ccw_queue);
3164*4882a593Smuzhiyun INIT_LIST_HEAD(&cqr->devlist);
3165*4882a593Smuzhiyun dasd_profile_start(block, cqr, req);
3166*4882a593Smuzhiyun dasd_schedule_block_bh(block);
3167*4882a593Smuzhiyun spin_unlock(&block->queue_lock);
3168*4882a593Smuzhiyun
3169*4882a593Smuzhiyun out:
3170*4882a593Smuzhiyun spin_unlock_irq(&dq->lock);
3171*4882a593Smuzhiyun return rc;
3172*4882a593Smuzhiyun }
3173*4882a593Smuzhiyun
3174*4882a593Smuzhiyun /*
3175*4882a593Smuzhiyun * Block timeout callback, called from the block layer
3176*4882a593Smuzhiyun *
3177*4882a593Smuzhiyun * Return values:
3178*4882a593Smuzhiyun * BLK_EH_RESET_TIMER if the request should be left running
3179*4882a593Smuzhiyun * BLK_EH_DONE if the request is handled or terminated
3180*4882a593Smuzhiyun * by the driver.
3181*4882a593Smuzhiyun */
dasd_times_out(struct request * req,bool reserved)3182*4882a593Smuzhiyun enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
3183*4882a593Smuzhiyun {
3184*4882a593Smuzhiyun struct dasd_block *block = req->q->queuedata;
3185*4882a593Smuzhiyun struct dasd_device *device;
3186*4882a593Smuzhiyun struct dasd_ccw_req *cqr;
3187*4882a593Smuzhiyun unsigned long flags;
3188*4882a593Smuzhiyun int rc = 0;
3189*4882a593Smuzhiyun
3190*4882a593Smuzhiyun cqr = blk_mq_rq_to_pdu(req);
3191*4882a593Smuzhiyun if (!cqr)
3192*4882a593Smuzhiyun return BLK_EH_DONE;
3193*4882a593Smuzhiyun
3194*4882a593Smuzhiyun spin_lock_irqsave(&cqr->dq->lock, flags);
3195*4882a593Smuzhiyun device = cqr->startdev ? cqr->startdev : block->base;
3196*4882a593Smuzhiyun if (!device->blk_timeout) {
3197*4882a593Smuzhiyun spin_unlock_irqrestore(&cqr->dq->lock, flags);
3198*4882a593Smuzhiyun return BLK_EH_RESET_TIMER;
3199*4882a593Smuzhiyun }
3200*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_WARNING, device,
3201*4882a593Smuzhiyun " dasd_times_out cqr %p status %x",
3202*4882a593Smuzhiyun cqr, cqr->status);
3203*4882a593Smuzhiyun
3204*4882a593Smuzhiyun spin_lock(&block->queue_lock);
3205*4882a593Smuzhiyun spin_lock(get_ccwdev_lock(device->cdev));
3206*4882a593Smuzhiyun cqr->retries = -1;
3207*4882a593Smuzhiyun cqr->intrc = -ETIMEDOUT;
3208*4882a593Smuzhiyun if (cqr->status >= DASD_CQR_QUEUED) {
3209*4882a593Smuzhiyun rc = __dasd_cancel_req(cqr);
3210*4882a593Smuzhiyun } else if (cqr->status == DASD_CQR_FILLED ||
3211*4882a593Smuzhiyun cqr->status == DASD_CQR_NEED_ERP) {
3212*4882a593Smuzhiyun cqr->status = DASD_CQR_TERMINATED;
3213*4882a593Smuzhiyun } else if (cqr->status == DASD_CQR_IN_ERP) {
3214*4882a593Smuzhiyun struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr;
3215*4882a593Smuzhiyun
3216*4882a593Smuzhiyun list_for_each_entry_safe(searchcqr, nextcqr,
3217*4882a593Smuzhiyun &block->ccw_queue, blocklist) {
3218*4882a593Smuzhiyun tmpcqr = searchcqr;
3219*4882a593Smuzhiyun while (tmpcqr->refers)
3220*4882a593Smuzhiyun tmpcqr = tmpcqr->refers;
3221*4882a593Smuzhiyun if (tmpcqr != cqr)
3222*4882a593Smuzhiyun continue;
3223*4882a593Smuzhiyun /* searchcqr is an ERP request for cqr */
3224*4882a593Smuzhiyun searchcqr->retries = -1;
3225*4882a593Smuzhiyun searchcqr->intrc = -ETIMEDOUT;
3226*4882a593Smuzhiyun if (searchcqr->status >= DASD_CQR_QUEUED) {
3227*4882a593Smuzhiyun rc = __dasd_cancel_req(searchcqr);
3228*4882a593Smuzhiyun } else if ((searchcqr->status == DASD_CQR_FILLED) ||
3229*4882a593Smuzhiyun (searchcqr->status == DASD_CQR_NEED_ERP)) {
3230*4882a593Smuzhiyun searchcqr->status = DASD_CQR_TERMINATED;
3231*4882a593Smuzhiyun rc = 0;
3232*4882a593Smuzhiyun } else if (searchcqr->status == DASD_CQR_IN_ERP) {
3233*4882a593Smuzhiyun /*
3234*4882a593Smuzhiyun * Shouldn't happen; most recent ERP
3235*4882a593Smuzhiyun * request is at the front of queue
3236*4882a593Smuzhiyun */
3237*4882a593Smuzhiyun continue;
3238*4882a593Smuzhiyun }
3239*4882a593Smuzhiyun break;
3240*4882a593Smuzhiyun }
3241*4882a593Smuzhiyun }
3242*4882a593Smuzhiyun spin_unlock(get_ccwdev_lock(device->cdev));
3243*4882a593Smuzhiyun dasd_schedule_block_bh(block);
3244*4882a593Smuzhiyun spin_unlock(&block->queue_lock);
3245*4882a593Smuzhiyun spin_unlock_irqrestore(&cqr->dq->lock, flags);
3246*4882a593Smuzhiyun
3247*4882a593Smuzhiyun return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE;
3248*4882a593Smuzhiyun }
3249*4882a593Smuzhiyun
dasd_init_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int idx)3250*4882a593Smuzhiyun static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
3251*4882a593Smuzhiyun unsigned int idx)
3252*4882a593Smuzhiyun {
3253*4882a593Smuzhiyun struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL);
3254*4882a593Smuzhiyun
3255*4882a593Smuzhiyun if (!dq)
3256*4882a593Smuzhiyun return -ENOMEM;
3257*4882a593Smuzhiyun
3258*4882a593Smuzhiyun spin_lock_init(&dq->lock);
3259*4882a593Smuzhiyun hctx->driver_data = dq;
3260*4882a593Smuzhiyun
3261*4882a593Smuzhiyun return 0;
3262*4882a593Smuzhiyun }
3263*4882a593Smuzhiyun
dasd_exit_hctx(struct blk_mq_hw_ctx * hctx,unsigned int idx)3264*4882a593Smuzhiyun static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
3265*4882a593Smuzhiyun {
3266*4882a593Smuzhiyun kfree(hctx->driver_data);
3267*4882a593Smuzhiyun hctx->driver_data = NULL;
3268*4882a593Smuzhiyun }
3269*4882a593Smuzhiyun
dasd_request_done(struct request * req)3270*4882a593Smuzhiyun static void dasd_request_done(struct request *req)
3271*4882a593Smuzhiyun {
3272*4882a593Smuzhiyun blk_mq_end_request(req, 0);
3273*4882a593Smuzhiyun blk_mq_run_hw_queues(req->q, true);
3274*4882a593Smuzhiyun }
3275*4882a593Smuzhiyun
3276*4882a593Smuzhiyun static struct blk_mq_ops dasd_mq_ops = {
3277*4882a593Smuzhiyun .queue_rq = do_dasd_request,
3278*4882a593Smuzhiyun .complete = dasd_request_done,
3279*4882a593Smuzhiyun .timeout = dasd_times_out,
3280*4882a593Smuzhiyun .init_hctx = dasd_init_hctx,
3281*4882a593Smuzhiyun .exit_hctx = dasd_exit_hctx,
3282*4882a593Smuzhiyun };
3283*4882a593Smuzhiyun
3284*4882a593Smuzhiyun /*
3285*4882a593Smuzhiyun * Allocate and initialize request queue and default I/O scheduler.
3286*4882a593Smuzhiyun */
dasd_alloc_queue(struct dasd_block * block)3287*4882a593Smuzhiyun static int dasd_alloc_queue(struct dasd_block *block)
3288*4882a593Smuzhiyun {
3289*4882a593Smuzhiyun int rc;
3290*4882a593Smuzhiyun
3291*4882a593Smuzhiyun block->tag_set.ops = &dasd_mq_ops;
3292*4882a593Smuzhiyun block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
3293*4882a593Smuzhiyun block->tag_set.nr_hw_queues = nr_hw_queues;
3294*4882a593Smuzhiyun block->tag_set.queue_depth = queue_depth;
3295*4882a593Smuzhiyun block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
3296*4882a593Smuzhiyun block->tag_set.numa_node = NUMA_NO_NODE;
3297*4882a593Smuzhiyun
3298*4882a593Smuzhiyun rc = blk_mq_alloc_tag_set(&block->tag_set);
3299*4882a593Smuzhiyun if (rc)
3300*4882a593Smuzhiyun return rc;
3301*4882a593Smuzhiyun
3302*4882a593Smuzhiyun block->request_queue = blk_mq_init_queue(&block->tag_set);
3303*4882a593Smuzhiyun if (IS_ERR(block->request_queue))
3304*4882a593Smuzhiyun return PTR_ERR(block->request_queue);
3305*4882a593Smuzhiyun
3306*4882a593Smuzhiyun block->request_queue->queuedata = block;
3307*4882a593Smuzhiyun
3308*4882a593Smuzhiyun return 0;
3309*4882a593Smuzhiyun }
3310*4882a593Smuzhiyun
3311*4882a593Smuzhiyun /*
3312*4882a593Smuzhiyun * Deactivate and free request queue.
3313*4882a593Smuzhiyun */
dasd_free_queue(struct dasd_block * block)3314*4882a593Smuzhiyun static void dasd_free_queue(struct dasd_block *block)
3315*4882a593Smuzhiyun {
3316*4882a593Smuzhiyun if (block->request_queue) {
3317*4882a593Smuzhiyun blk_cleanup_queue(block->request_queue);
3318*4882a593Smuzhiyun blk_mq_free_tag_set(&block->tag_set);
3319*4882a593Smuzhiyun block->request_queue = NULL;
3320*4882a593Smuzhiyun }
3321*4882a593Smuzhiyun }
3322*4882a593Smuzhiyun
dasd_open(struct block_device * bdev,fmode_t mode)3323*4882a593Smuzhiyun static int dasd_open(struct block_device *bdev, fmode_t mode)
3324*4882a593Smuzhiyun {
3325*4882a593Smuzhiyun struct dasd_device *base;
3326*4882a593Smuzhiyun int rc;
3327*4882a593Smuzhiyun
3328*4882a593Smuzhiyun base = dasd_device_from_gendisk(bdev->bd_disk);
3329*4882a593Smuzhiyun if (!base)
3330*4882a593Smuzhiyun return -ENODEV;
3331*4882a593Smuzhiyun
3332*4882a593Smuzhiyun atomic_inc(&base->block->open_count);
3333*4882a593Smuzhiyun if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
3334*4882a593Smuzhiyun rc = -ENODEV;
3335*4882a593Smuzhiyun goto unlock;
3336*4882a593Smuzhiyun }
3337*4882a593Smuzhiyun
3338*4882a593Smuzhiyun if (!try_module_get(base->discipline->owner)) {
3339*4882a593Smuzhiyun rc = -EINVAL;
3340*4882a593Smuzhiyun goto unlock;
3341*4882a593Smuzhiyun }
3342*4882a593Smuzhiyun
3343*4882a593Smuzhiyun if (dasd_probeonly) {
3344*4882a593Smuzhiyun dev_info(&base->cdev->dev,
3345*4882a593Smuzhiyun "Accessing the DASD failed because it is in "
3346*4882a593Smuzhiyun "probeonly mode\n");
3347*4882a593Smuzhiyun rc = -EPERM;
3348*4882a593Smuzhiyun goto out;
3349*4882a593Smuzhiyun }
3350*4882a593Smuzhiyun
3351*4882a593Smuzhiyun if (base->state <= DASD_STATE_BASIC) {
3352*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_ERR, base, " %s",
3353*4882a593Smuzhiyun " Cannot open unrecognized device");
3354*4882a593Smuzhiyun rc = -ENODEV;
3355*4882a593Smuzhiyun goto out;
3356*4882a593Smuzhiyun }
3357*4882a593Smuzhiyun
3358*4882a593Smuzhiyun if ((mode & FMODE_WRITE) &&
3359*4882a593Smuzhiyun (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) ||
3360*4882a593Smuzhiyun (base->features & DASD_FEATURE_READONLY))) {
3361*4882a593Smuzhiyun rc = -EROFS;
3362*4882a593Smuzhiyun goto out;
3363*4882a593Smuzhiyun }
3364*4882a593Smuzhiyun
3365*4882a593Smuzhiyun dasd_put_device(base);
3366*4882a593Smuzhiyun return 0;
3367*4882a593Smuzhiyun
3368*4882a593Smuzhiyun out:
3369*4882a593Smuzhiyun module_put(base->discipline->owner);
3370*4882a593Smuzhiyun unlock:
3371*4882a593Smuzhiyun atomic_dec(&base->block->open_count);
3372*4882a593Smuzhiyun dasd_put_device(base);
3373*4882a593Smuzhiyun return rc;
3374*4882a593Smuzhiyun }
3375*4882a593Smuzhiyun
dasd_release(struct gendisk * disk,fmode_t mode)3376*4882a593Smuzhiyun static void dasd_release(struct gendisk *disk, fmode_t mode)
3377*4882a593Smuzhiyun {
3378*4882a593Smuzhiyun struct dasd_device *base = dasd_device_from_gendisk(disk);
3379*4882a593Smuzhiyun if (base) {
3380*4882a593Smuzhiyun atomic_dec(&base->block->open_count);
3381*4882a593Smuzhiyun module_put(base->discipline->owner);
3382*4882a593Smuzhiyun dasd_put_device(base);
3383*4882a593Smuzhiyun }
3384*4882a593Smuzhiyun }
3385*4882a593Smuzhiyun
3386*4882a593Smuzhiyun /*
3387*4882a593Smuzhiyun * Return disk geometry.
3388*4882a593Smuzhiyun */
dasd_getgeo(struct block_device * bdev,struct hd_geometry * geo)3389*4882a593Smuzhiyun static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
3390*4882a593Smuzhiyun {
3391*4882a593Smuzhiyun struct dasd_device *base;
3392*4882a593Smuzhiyun
3393*4882a593Smuzhiyun base = dasd_device_from_gendisk(bdev->bd_disk);
3394*4882a593Smuzhiyun if (!base)
3395*4882a593Smuzhiyun return -ENODEV;
3396*4882a593Smuzhiyun
3397*4882a593Smuzhiyun if (!base->discipline ||
3398*4882a593Smuzhiyun !base->discipline->fill_geometry) {
3399*4882a593Smuzhiyun dasd_put_device(base);
3400*4882a593Smuzhiyun return -EINVAL;
3401*4882a593Smuzhiyun }
3402*4882a593Smuzhiyun base->discipline->fill_geometry(base->block, geo);
3403*4882a593Smuzhiyun geo->start = get_start_sect(bdev) >> base->block->s2b_shift;
3404*4882a593Smuzhiyun dasd_put_device(base);
3405*4882a593Smuzhiyun return 0;
3406*4882a593Smuzhiyun }
3407*4882a593Smuzhiyun
3408*4882a593Smuzhiyun const struct block_device_operations
3409*4882a593Smuzhiyun dasd_device_operations = {
3410*4882a593Smuzhiyun .owner = THIS_MODULE,
3411*4882a593Smuzhiyun .open = dasd_open,
3412*4882a593Smuzhiyun .release = dasd_release,
3413*4882a593Smuzhiyun .ioctl = dasd_ioctl,
3414*4882a593Smuzhiyun .compat_ioctl = dasd_ioctl,
3415*4882a593Smuzhiyun .getgeo = dasd_getgeo,
3416*4882a593Smuzhiyun };
3417*4882a593Smuzhiyun
3418*4882a593Smuzhiyun /*******************************************************************************
3419*4882a593Smuzhiyun * end of block device operations
3420*4882a593Smuzhiyun */
3421*4882a593Smuzhiyun
3422*4882a593Smuzhiyun static void
dasd_exit(void)3423*4882a593Smuzhiyun dasd_exit(void)
3424*4882a593Smuzhiyun {
3425*4882a593Smuzhiyun #ifdef CONFIG_PROC_FS
3426*4882a593Smuzhiyun dasd_proc_exit();
3427*4882a593Smuzhiyun #endif
3428*4882a593Smuzhiyun dasd_eer_exit();
3429*4882a593Smuzhiyun kmem_cache_destroy(dasd_page_cache);
3430*4882a593Smuzhiyun dasd_page_cache = NULL;
3431*4882a593Smuzhiyun dasd_gendisk_exit();
3432*4882a593Smuzhiyun dasd_devmap_exit();
3433*4882a593Smuzhiyun if (dasd_debug_area != NULL) {
3434*4882a593Smuzhiyun debug_unregister(dasd_debug_area);
3435*4882a593Smuzhiyun dasd_debug_area = NULL;
3436*4882a593Smuzhiyun }
3437*4882a593Smuzhiyun dasd_statistics_removeroot();
3438*4882a593Smuzhiyun }
3439*4882a593Smuzhiyun
3440*4882a593Smuzhiyun /*
3441*4882a593Smuzhiyun * SECTION: common functions for ccw_driver use
3442*4882a593Smuzhiyun */
3443*4882a593Smuzhiyun
3444*4882a593Smuzhiyun /*
3445*4882a593Smuzhiyun * Is the device read-only?
3446*4882a593Smuzhiyun * Note that this function does not report the setting of the
3447*4882a593Smuzhiyun * readonly device attribute, but how it is configured in z/VM.
3448*4882a593Smuzhiyun */
dasd_device_is_ro(struct dasd_device * device)3449*4882a593Smuzhiyun int dasd_device_is_ro(struct dasd_device *device)
3450*4882a593Smuzhiyun {
3451*4882a593Smuzhiyun struct ccw_dev_id dev_id;
3452*4882a593Smuzhiyun struct diag210 diag_data;
3453*4882a593Smuzhiyun int rc;
3454*4882a593Smuzhiyun
3455*4882a593Smuzhiyun if (!MACHINE_IS_VM)
3456*4882a593Smuzhiyun return 0;
3457*4882a593Smuzhiyun ccw_device_get_id(device->cdev, &dev_id);
3458*4882a593Smuzhiyun memset(&diag_data, 0, sizeof(diag_data));
3459*4882a593Smuzhiyun diag_data.vrdcdvno = dev_id.devno;
3460*4882a593Smuzhiyun diag_data.vrdclen = sizeof(diag_data);
3461*4882a593Smuzhiyun rc = diag210(&diag_data);
3462*4882a593Smuzhiyun if (rc == 0 || rc == 2) {
3463*4882a593Smuzhiyun return diag_data.vrdcvfla & 0x80;
3464*4882a593Smuzhiyun } else {
3465*4882a593Smuzhiyun DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d",
3466*4882a593Smuzhiyun dev_id.devno, rc);
3467*4882a593Smuzhiyun return 0;
3468*4882a593Smuzhiyun }
3469*4882a593Smuzhiyun }
3470*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dasd_device_is_ro);
3471*4882a593Smuzhiyun
dasd_generic_auto_online(void * data,async_cookie_t cookie)3472*4882a593Smuzhiyun static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
3473*4882a593Smuzhiyun {
3474*4882a593Smuzhiyun struct ccw_device *cdev = data;
3475*4882a593Smuzhiyun int ret;
3476*4882a593Smuzhiyun
3477*4882a593Smuzhiyun ret = ccw_device_set_online(cdev);
3478*4882a593Smuzhiyun if (ret)
3479*4882a593Smuzhiyun pr_warn("%s: Setting the DASD online failed with rc=%d\n",
3480*4882a593Smuzhiyun dev_name(&cdev->dev), ret);
3481*4882a593Smuzhiyun }
3482*4882a593Smuzhiyun
3483*4882a593Smuzhiyun /*
3484*4882a593Smuzhiyun * Initial attempt at a probe function. this can be simplified once
3485*4882a593Smuzhiyun * the other detection code is gone.
3486*4882a593Smuzhiyun */
dasd_generic_probe(struct ccw_device * cdev,struct dasd_discipline * discipline)3487*4882a593Smuzhiyun int dasd_generic_probe(struct ccw_device *cdev,
3488*4882a593Smuzhiyun struct dasd_discipline *discipline)
3489*4882a593Smuzhiyun {
3490*4882a593Smuzhiyun int ret;
3491*4882a593Smuzhiyun
3492*4882a593Smuzhiyun ret = dasd_add_sysfs_files(cdev);
3493*4882a593Smuzhiyun if (ret) {
3494*4882a593Smuzhiyun DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
3495*4882a593Smuzhiyun "dasd_generic_probe: could not add "
3496*4882a593Smuzhiyun "sysfs entries");
3497*4882a593Smuzhiyun return ret;
3498*4882a593Smuzhiyun }
3499*4882a593Smuzhiyun cdev->handler = &dasd_int_handler;
3500*4882a593Smuzhiyun
3501*4882a593Smuzhiyun /*
3502*4882a593Smuzhiyun * Automatically online either all dasd devices (dasd_autodetect)
3503*4882a593Smuzhiyun * or all devices specified with dasd= parameters during
3504*4882a593Smuzhiyun * initial probe.
3505*4882a593Smuzhiyun */
3506*4882a593Smuzhiyun if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
3507*4882a593Smuzhiyun (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
3508*4882a593Smuzhiyun async_schedule(dasd_generic_auto_online, cdev);
3509*4882a593Smuzhiyun return 0;
3510*4882a593Smuzhiyun }
3511*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dasd_generic_probe);
3512*4882a593Smuzhiyun
dasd_generic_free_discipline(struct dasd_device * device)3513*4882a593Smuzhiyun void dasd_generic_free_discipline(struct dasd_device *device)
3514*4882a593Smuzhiyun {
3515*4882a593Smuzhiyun /* Forget the discipline information. */
3516*4882a593Smuzhiyun if (device->discipline) {
3517*4882a593Smuzhiyun if (device->discipline->uncheck_device)
3518*4882a593Smuzhiyun device->discipline->uncheck_device(device);
3519*4882a593Smuzhiyun module_put(device->discipline->owner);
3520*4882a593Smuzhiyun device->discipline = NULL;
3521*4882a593Smuzhiyun }
3522*4882a593Smuzhiyun if (device->base_discipline) {
3523*4882a593Smuzhiyun module_put(device->base_discipline->owner);
3524*4882a593Smuzhiyun device->base_discipline = NULL;
3525*4882a593Smuzhiyun }
3526*4882a593Smuzhiyun }
3527*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dasd_generic_free_discipline);
3528*4882a593Smuzhiyun
3529*4882a593Smuzhiyun /*
3530*4882a593Smuzhiyun * This will one day be called from a global not_oper handler.
3531*4882a593Smuzhiyun * It is also used by driver_unregister during module unload.
3532*4882a593Smuzhiyun */
dasd_generic_remove(struct ccw_device * cdev)3533*4882a593Smuzhiyun void dasd_generic_remove(struct ccw_device *cdev)
3534*4882a593Smuzhiyun {
3535*4882a593Smuzhiyun struct dasd_device *device;
3536*4882a593Smuzhiyun struct dasd_block *block;
3537*4882a593Smuzhiyun
3538*4882a593Smuzhiyun device = dasd_device_from_cdev(cdev);
3539*4882a593Smuzhiyun if (IS_ERR(device)) {
3540*4882a593Smuzhiyun dasd_remove_sysfs_files(cdev);
3541*4882a593Smuzhiyun return;
3542*4882a593Smuzhiyun }
3543*4882a593Smuzhiyun if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) &&
3544*4882a593Smuzhiyun !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3545*4882a593Smuzhiyun /* Already doing offline processing */
3546*4882a593Smuzhiyun dasd_put_device(device);
3547*4882a593Smuzhiyun dasd_remove_sysfs_files(cdev);
3548*4882a593Smuzhiyun return;
3549*4882a593Smuzhiyun }
3550*4882a593Smuzhiyun /*
3551*4882a593Smuzhiyun * This device is removed unconditionally. Set offline
3552*4882a593Smuzhiyun * flag to prevent dasd_open from opening it while it is
3553*4882a593Smuzhiyun * no quite down yet.
3554*4882a593Smuzhiyun */
3555*4882a593Smuzhiyun dasd_set_target_state(device, DASD_STATE_NEW);
3556*4882a593Smuzhiyun cdev->handler = NULL;
3557*4882a593Smuzhiyun /* dasd_delete_device destroys the device reference. */
3558*4882a593Smuzhiyun block = device->block;
3559*4882a593Smuzhiyun dasd_delete_device(device);
3560*4882a593Smuzhiyun /*
3561*4882a593Smuzhiyun * life cycle of block is bound to device, so delete it after
3562*4882a593Smuzhiyun * device was safely removed
3563*4882a593Smuzhiyun */
3564*4882a593Smuzhiyun if (block)
3565*4882a593Smuzhiyun dasd_free_block(block);
3566*4882a593Smuzhiyun
3567*4882a593Smuzhiyun dasd_remove_sysfs_files(cdev);
3568*4882a593Smuzhiyun }
3569*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dasd_generic_remove);
3570*4882a593Smuzhiyun
3571*4882a593Smuzhiyun /*
3572*4882a593Smuzhiyun * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
3573*4882a593Smuzhiyun * the device is detected for the first time and is supposed to be used
3574*4882a593Smuzhiyun * or the user has started activation through sysfs.
3575*4882a593Smuzhiyun */
dasd_generic_set_online(struct ccw_device * cdev,struct dasd_discipline * base_discipline)3576*4882a593Smuzhiyun int dasd_generic_set_online(struct ccw_device *cdev,
3577*4882a593Smuzhiyun struct dasd_discipline *base_discipline)
3578*4882a593Smuzhiyun {
3579*4882a593Smuzhiyun struct dasd_discipline *discipline;
3580*4882a593Smuzhiyun struct dasd_device *device;
3581*4882a593Smuzhiyun int rc;
3582*4882a593Smuzhiyun
3583*4882a593Smuzhiyun /* first online clears initial online feature flag */
3584*4882a593Smuzhiyun dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
3585*4882a593Smuzhiyun device = dasd_create_device(cdev);
3586*4882a593Smuzhiyun if (IS_ERR(device))
3587*4882a593Smuzhiyun return PTR_ERR(device);
3588*4882a593Smuzhiyun
3589*4882a593Smuzhiyun discipline = base_discipline;
3590*4882a593Smuzhiyun if (device->features & DASD_FEATURE_USEDIAG) {
3591*4882a593Smuzhiyun if (!dasd_diag_discipline_pointer) {
3592*4882a593Smuzhiyun /* Try to load the required module. */
3593*4882a593Smuzhiyun rc = request_module(DASD_DIAG_MOD);
3594*4882a593Smuzhiyun if (rc) {
3595*4882a593Smuzhiyun pr_warn("%s Setting the DASD online failed "
3596*4882a593Smuzhiyun "because the required module %s "
3597*4882a593Smuzhiyun "could not be loaded (rc=%d)\n",
3598*4882a593Smuzhiyun dev_name(&cdev->dev), DASD_DIAG_MOD,
3599*4882a593Smuzhiyun rc);
3600*4882a593Smuzhiyun dasd_delete_device(device);
3601*4882a593Smuzhiyun return -ENODEV;
3602*4882a593Smuzhiyun }
3603*4882a593Smuzhiyun }
3604*4882a593Smuzhiyun /* Module init could have failed, so check again here after
3605*4882a593Smuzhiyun * request_module(). */
3606*4882a593Smuzhiyun if (!dasd_diag_discipline_pointer) {
3607*4882a593Smuzhiyun pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n",
3608*4882a593Smuzhiyun dev_name(&cdev->dev));
3609*4882a593Smuzhiyun dasd_delete_device(device);
3610*4882a593Smuzhiyun return -ENODEV;
3611*4882a593Smuzhiyun }
3612*4882a593Smuzhiyun discipline = dasd_diag_discipline_pointer;
3613*4882a593Smuzhiyun }
3614*4882a593Smuzhiyun if (!try_module_get(base_discipline->owner)) {
3615*4882a593Smuzhiyun dasd_delete_device(device);
3616*4882a593Smuzhiyun return -EINVAL;
3617*4882a593Smuzhiyun }
3618*4882a593Smuzhiyun if (!try_module_get(discipline->owner)) {
3619*4882a593Smuzhiyun module_put(base_discipline->owner);
3620*4882a593Smuzhiyun dasd_delete_device(device);
3621*4882a593Smuzhiyun return -EINVAL;
3622*4882a593Smuzhiyun }
3623*4882a593Smuzhiyun device->base_discipline = base_discipline;
3624*4882a593Smuzhiyun device->discipline = discipline;
3625*4882a593Smuzhiyun
3626*4882a593Smuzhiyun /* check_device will allocate block device if necessary */
3627*4882a593Smuzhiyun rc = discipline->check_device(device);
3628*4882a593Smuzhiyun if (rc) {
3629*4882a593Smuzhiyun pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n",
3630*4882a593Smuzhiyun dev_name(&cdev->dev), discipline->name, rc);
3631*4882a593Smuzhiyun module_put(discipline->owner);
3632*4882a593Smuzhiyun module_put(base_discipline->owner);
3633*4882a593Smuzhiyun dasd_delete_device(device);
3634*4882a593Smuzhiyun return rc;
3635*4882a593Smuzhiyun }
3636*4882a593Smuzhiyun
3637*4882a593Smuzhiyun dasd_set_target_state(device, DASD_STATE_ONLINE);
3638*4882a593Smuzhiyun if (device->state <= DASD_STATE_KNOWN) {
3639*4882a593Smuzhiyun pr_warn("%s Setting the DASD online failed because of a missing discipline\n",
3640*4882a593Smuzhiyun dev_name(&cdev->dev));
3641*4882a593Smuzhiyun rc = -ENODEV;
3642*4882a593Smuzhiyun dasd_set_target_state(device, DASD_STATE_NEW);
3643*4882a593Smuzhiyun if (device->block)
3644*4882a593Smuzhiyun dasd_free_block(device->block);
3645*4882a593Smuzhiyun dasd_delete_device(device);
3646*4882a593Smuzhiyun } else
3647*4882a593Smuzhiyun pr_debug("dasd_generic device %s found\n",
3648*4882a593Smuzhiyun dev_name(&cdev->dev));
3649*4882a593Smuzhiyun
3650*4882a593Smuzhiyun wait_event(dasd_init_waitq, _wait_for_device(device));
3651*4882a593Smuzhiyun
3652*4882a593Smuzhiyun dasd_put_device(device);
3653*4882a593Smuzhiyun return rc;
3654*4882a593Smuzhiyun }
3655*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dasd_generic_set_online);
3656*4882a593Smuzhiyun
dasd_generic_set_offline(struct ccw_device * cdev)3657*4882a593Smuzhiyun int dasd_generic_set_offline(struct ccw_device *cdev)
3658*4882a593Smuzhiyun {
3659*4882a593Smuzhiyun struct dasd_device *device;
3660*4882a593Smuzhiyun struct dasd_block *block;
3661*4882a593Smuzhiyun int max_count, open_count, rc;
3662*4882a593Smuzhiyun unsigned long flags;
3663*4882a593Smuzhiyun
3664*4882a593Smuzhiyun rc = 0;
3665*4882a593Smuzhiyun spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3666*4882a593Smuzhiyun device = dasd_device_from_cdev_locked(cdev);
3667*4882a593Smuzhiyun if (IS_ERR(device)) {
3668*4882a593Smuzhiyun spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3669*4882a593Smuzhiyun return PTR_ERR(device);
3670*4882a593Smuzhiyun }
3671*4882a593Smuzhiyun
3672*4882a593Smuzhiyun /*
3673*4882a593Smuzhiyun * We must make sure that this device is currently not in use.
3674*4882a593Smuzhiyun * The open_count is increased for every opener, that includes
3675*4882a593Smuzhiyun * the blkdev_get in dasd_scan_partitions. We are only interested
3676*4882a593Smuzhiyun * in the other openers.
3677*4882a593Smuzhiyun */
3678*4882a593Smuzhiyun if (device->block) {
3679*4882a593Smuzhiyun max_count = device->block->bdev ? 0 : -1;
3680*4882a593Smuzhiyun open_count = atomic_read(&device->block->open_count);
3681*4882a593Smuzhiyun if (open_count > max_count) {
3682*4882a593Smuzhiyun if (open_count > 0)
3683*4882a593Smuzhiyun pr_warn("%s: The DASD cannot be set offline with open count %i\n",
3684*4882a593Smuzhiyun dev_name(&cdev->dev), open_count);
3685*4882a593Smuzhiyun else
3686*4882a593Smuzhiyun pr_warn("%s: The DASD cannot be set offline while it is in use\n",
3687*4882a593Smuzhiyun dev_name(&cdev->dev));
3688*4882a593Smuzhiyun rc = -EBUSY;
3689*4882a593Smuzhiyun goto out_err;
3690*4882a593Smuzhiyun }
3691*4882a593Smuzhiyun }
3692*4882a593Smuzhiyun
3693*4882a593Smuzhiyun /*
3694*4882a593Smuzhiyun * Test if the offline processing is already running and exit if so.
3695*4882a593Smuzhiyun * If a safe offline is being processed this could only be a normal
3696*4882a593Smuzhiyun * offline that should be able to overtake the safe offline and
3697*4882a593Smuzhiyun * cancel any I/O we do not want to wait for any longer
3698*4882a593Smuzhiyun */
3699*4882a593Smuzhiyun if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
3700*4882a593Smuzhiyun if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3701*4882a593Smuzhiyun clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING,
3702*4882a593Smuzhiyun &device->flags);
3703*4882a593Smuzhiyun } else {
3704*4882a593Smuzhiyun rc = -EBUSY;
3705*4882a593Smuzhiyun goto out_err;
3706*4882a593Smuzhiyun }
3707*4882a593Smuzhiyun }
3708*4882a593Smuzhiyun set_bit(DASD_FLAG_OFFLINE, &device->flags);
3709*4882a593Smuzhiyun
3710*4882a593Smuzhiyun /*
3711*4882a593Smuzhiyun * if safe_offline is called set safe_offline_running flag and
3712*4882a593Smuzhiyun * clear safe_offline so that a call to normal offline
3713*4882a593Smuzhiyun * can overrun safe_offline processing
3714*4882a593Smuzhiyun */
3715*4882a593Smuzhiyun if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) &&
3716*4882a593Smuzhiyun !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3717*4882a593Smuzhiyun /* need to unlock here to wait for outstanding I/O */
3718*4882a593Smuzhiyun spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3719*4882a593Smuzhiyun /*
3720*4882a593Smuzhiyun * If we want to set the device safe offline all IO operations
3721*4882a593Smuzhiyun * should be finished before continuing the offline process
3722*4882a593Smuzhiyun * so sync bdev first and then wait for our queues to become
3723*4882a593Smuzhiyun * empty
3724*4882a593Smuzhiyun */
3725*4882a593Smuzhiyun if (device->block) {
3726*4882a593Smuzhiyun rc = fsync_bdev(device->block->bdev);
3727*4882a593Smuzhiyun if (rc != 0)
3728*4882a593Smuzhiyun goto interrupted;
3729*4882a593Smuzhiyun }
3730*4882a593Smuzhiyun dasd_schedule_device_bh(device);
3731*4882a593Smuzhiyun rc = wait_event_interruptible(shutdown_waitq,
3732*4882a593Smuzhiyun _wait_for_empty_queues(device));
3733*4882a593Smuzhiyun if (rc != 0)
3734*4882a593Smuzhiyun goto interrupted;
3735*4882a593Smuzhiyun
3736*4882a593Smuzhiyun /*
3737*4882a593Smuzhiyun * check if a normal offline process overtook the offline
3738*4882a593Smuzhiyun * processing in this case simply do nothing beside returning
3739*4882a593Smuzhiyun * that we got interrupted
3740*4882a593Smuzhiyun * otherwise mark safe offline as not running any longer and
3741*4882a593Smuzhiyun * continue with normal offline
3742*4882a593Smuzhiyun */
3743*4882a593Smuzhiyun spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3744*4882a593Smuzhiyun if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3745*4882a593Smuzhiyun rc = -ERESTARTSYS;
3746*4882a593Smuzhiyun goto out_err;
3747*4882a593Smuzhiyun }
3748*4882a593Smuzhiyun clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
3749*4882a593Smuzhiyun }
3750*4882a593Smuzhiyun spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3751*4882a593Smuzhiyun
3752*4882a593Smuzhiyun dasd_set_target_state(device, DASD_STATE_NEW);
3753*4882a593Smuzhiyun /* dasd_delete_device destroys the device reference. */
3754*4882a593Smuzhiyun block = device->block;
3755*4882a593Smuzhiyun dasd_delete_device(device);
3756*4882a593Smuzhiyun /*
3757*4882a593Smuzhiyun * life cycle of block is bound to device, so delete it after
3758*4882a593Smuzhiyun * device was safely removed
3759*4882a593Smuzhiyun */
3760*4882a593Smuzhiyun if (block)
3761*4882a593Smuzhiyun dasd_free_block(block);
3762*4882a593Smuzhiyun
3763*4882a593Smuzhiyun return 0;
3764*4882a593Smuzhiyun
3765*4882a593Smuzhiyun interrupted:
3766*4882a593Smuzhiyun /* interrupted by signal */
3767*4882a593Smuzhiyun spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3768*4882a593Smuzhiyun clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
3769*4882a593Smuzhiyun clear_bit(DASD_FLAG_OFFLINE, &device->flags);
3770*4882a593Smuzhiyun out_err:
3771*4882a593Smuzhiyun dasd_put_device(device);
3772*4882a593Smuzhiyun spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3773*4882a593Smuzhiyun return rc;
3774*4882a593Smuzhiyun }
3775*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
3776*4882a593Smuzhiyun
dasd_generic_last_path_gone(struct dasd_device * device)3777*4882a593Smuzhiyun int dasd_generic_last_path_gone(struct dasd_device *device)
3778*4882a593Smuzhiyun {
3779*4882a593Smuzhiyun struct dasd_ccw_req *cqr;
3780*4882a593Smuzhiyun
3781*4882a593Smuzhiyun dev_warn(&device->cdev->dev, "No operational channel path is left "
3782*4882a593Smuzhiyun "for the device\n");
3783*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
3784*4882a593Smuzhiyun /* First of all call extended error reporting. */
3785*4882a593Smuzhiyun dasd_eer_write(device, NULL, DASD_EER_NOPATH);
3786*4882a593Smuzhiyun
3787*4882a593Smuzhiyun if (device->state < DASD_STATE_BASIC)
3788*4882a593Smuzhiyun return 0;
3789*4882a593Smuzhiyun /* Device is active. We want to keep it. */
3790*4882a593Smuzhiyun list_for_each_entry(cqr, &device->ccw_queue, devlist)
3791*4882a593Smuzhiyun if ((cqr->status == DASD_CQR_IN_IO) ||
3792*4882a593Smuzhiyun (cqr->status == DASD_CQR_CLEAR_PENDING)) {
3793*4882a593Smuzhiyun cqr->status = DASD_CQR_QUEUED;
3794*4882a593Smuzhiyun cqr->retries++;
3795*4882a593Smuzhiyun }
3796*4882a593Smuzhiyun dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
3797*4882a593Smuzhiyun dasd_device_clear_timer(device);
3798*4882a593Smuzhiyun dasd_schedule_device_bh(device);
3799*4882a593Smuzhiyun return 1;
3800*4882a593Smuzhiyun }
3801*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone);
3802*4882a593Smuzhiyun
dasd_generic_path_operational(struct dasd_device * device)3803*4882a593Smuzhiyun int dasd_generic_path_operational(struct dasd_device *device)
3804*4882a593Smuzhiyun {
3805*4882a593Smuzhiyun dev_info(&device->cdev->dev, "A channel path to the device has become "
3806*4882a593Smuzhiyun "operational\n");
3807*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational");
3808*4882a593Smuzhiyun dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
3809*4882a593Smuzhiyun if (device->stopped & DASD_UNRESUMED_PM) {
3810*4882a593Smuzhiyun dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
3811*4882a593Smuzhiyun dasd_restore_device(device);
3812*4882a593Smuzhiyun return 1;
3813*4882a593Smuzhiyun }
3814*4882a593Smuzhiyun dasd_schedule_device_bh(device);
3815*4882a593Smuzhiyun if (device->block) {
3816*4882a593Smuzhiyun dasd_schedule_block_bh(device->block);
3817*4882a593Smuzhiyun if (device->block->request_queue)
3818*4882a593Smuzhiyun blk_mq_run_hw_queues(device->block->request_queue,
3819*4882a593Smuzhiyun true);
3820*4882a593Smuzhiyun }
3821*4882a593Smuzhiyun
3822*4882a593Smuzhiyun if (!device->stopped)
3823*4882a593Smuzhiyun wake_up(&generic_waitq);
3824*4882a593Smuzhiyun
3825*4882a593Smuzhiyun return 1;
3826*4882a593Smuzhiyun }
3827*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dasd_generic_path_operational);
3828*4882a593Smuzhiyun
dasd_generic_notify(struct ccw_device * cdev,int event)3829*4882a593Smuzhiyun int dasd_generic_notify(struct ccw_device *cdev, int event)
3830*4882a593Smuzhiyun {
3831*4882a593Smuzhiyun struct dasd_device *device;
3832*4882a593Smuzhiyun int ret;
3833*4882a593Smuzhiyun
3834*4882a593Smuzhiyun device = dasd_device_from_cdev_locked(cdev);
3835*4882a593Smuzhiyun if (IS_ERR(device))
3836*4882a593Smuzhiyun return 0;
3837*4882a593Smuzhiyun ret = 0;
3838*4882a593Smuzhiyun switch (event) {
3839*4882a593Smuzhiyun case CIO_GONE:
3840*4882a593Smuzhiyun case CIO_BOXED:
3841*4882a593Smuzhiyun case CIO_NO_PATH:
3842*4882a593Smuzhiyun dasd_path_no_path(device);
3843*4882a593Smuzhiyun ret = dasd_generic_last_path_gone(device);
3844*4882a593Smuzhiyun break;
3845*4882a593Smuzhiyun case CIO_OPER:
3846*4882a593Smuzhiyun ret = 1;
3847*4882a593Smuzhiyun if (dasd_path_get_opm(device))
3848*4882a593Smuzhiyun ret = dasd_generic_path_operational(device);
3849*4882a593Smuzhiyun break;
3850*4882a593Smuzhiyun }
3851*4882a593Smuzhiyun dasd_put_device(device);
3852*4882a593Smuzhiyun return ret;
3853*4882a593Smuzhiyun }
3854*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dasd_generic_notify);
3855*4882a593Smuzhiyun
dasd_generic_path_event(struct ccw_device * cdev,int * path_event)3856*4882a593Smuzhiyun void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
3857*4882a593Smuzhiyun {
3858*4882a593Smuzhiyun struct dasd_device *device;
3859*4882a593Smuzhiyun int chp, oldopm, hpfpm, ifccpm;
3860*4882a593Smuzhiyun
3861*4882a593Smuzhiyun device = dasd_device_from_cdev_locked(cdev);
3862*4882a593Smuzhiyun if (IS_ERR(device))
3863*4882a593Smuzhiyun return;
3864*4882a593Smuzhiyun
3865*4882a593Smuzhiyun oldopm = dasd_path_get_opm(device);
3866*4882a593Smuzhiyun for (chp = 0; chp < 8; chp++) {
3867*4882a593Smuzhiyun if (path_event[chp] & PE_PATH_GONE) {
3868*4882a593Smuzhiyun dasd_path_notoper(device, chp);
3869*4882a593Smuzhiyun }
3870*4882a593Smuzhiyun if (path_event[chp] & PE_PATH_AVAILABLE) {
3871*4882a593Smuzhiyun dasd_path_available(device, chp);
3872*4882a593Smuzhiyun dasd_schedule_device_bh(device);
3873*4882a593Smuzhiyun }
3874*4882a593Smuzhiyun if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
3875*4882a593Smuzhiyun if (!dasd_path_is_operational(device, chp) &&
3876*4882a593Smuzhiyun !dasd_path_need_verify(device, chp)) {
3877*4882a593Smuzhiyun /*
3878*4882a593Smuzhiyun * we can not establish a pathgroup on an
3879*4882a593Smuzhiyun * unavailable path, so trigger a path
3880*4882a593Smuzhiyun * verification first
3881*4882a593Smuzhiyun */
3882*4882a593Smuzhiyun dasd_path_available(device, chp);
3883*4882a593Smuzhiyun dasd_schedule_device_bh(device);
3884*4882a593Smuzhiyun }
3885*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3886*4882a593Smuzhiyun "Pathgroup re-established\n");
3887*4882a593Smuzhiyun if (device->discipline->kick_validate)
3888*4882a593Smuzhiyun device->discipline->kick_validate(device);
3889*4882a593Smuzhiyun }
3890*4882a593Smuzhiyun }
3891*4882a593Smuzhiyun hpfpm = dasd_path_get_hpfpm(device);
3892*4882a593Smuzhiyun ifccpm = dasd_path_get_ifccpm(device);
3893*4882a593Smuzhiyun if (!dasd_path_get_opm(device) && hpfpm) {
3894*4882a593Smuzhiyun /*
3895*4882a593Smuzhiyun * device has no operational paths but at least one path is
3896*4882a593Smuzhiyun * disabled due to HPF errors
3897*4882a593Smuzhiyun * disable HPF at all and use the path(s) again
3898*4882a593Smuzhiyun */
3899*4882a593Smuzhiyun if (device->discipline->disable_hpf)
3900*4882a593Smuzhiyun device->discipline->disable_hpf(device);
3901*4882a593Smuzhiyun dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
3902*4882a593Smuzhiyun dasd_path_set_tbvpm(device, hpfpm);
3903*4882a593Smuzhiyun dasd_schedule_device_bh(device);
3904*4882a593Smuzhiyun dasd_schedule_requeue(device);
3905*4882a593Smuzhiyun } else if (!dasd_path_get_opm(device) && ifccpm) {
3906*4882a593Smuzhiyun /*
3907*4882a593Smuzhiyun * device has no operational paths but at least one path is
3908*4882a593Smuzhiyun * disabled due to IFCC errors
3909*4882a593Smuzhiyun * trigger path verification on paths with IFCC errors
3910*4882a593Smuzhiyun */
3911*4882a593Smuzhiyun dasd_path_set_tbvpm(device, ifccpm);
3912*4882a593Smuzhiyun dasd_schedule_device_bh(device);
3913*4882a593Smuzhiyun }
3914*4882a593Smuzhiyun if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) {
3915*4882a593Smuzhiyun dev_warn(&device->cdev->dev,
3916*4882a593Smuzhiyun "No verified channel paths remain for the device\n");
3917*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_WARNING, device,
3918*4882a593Smuzhiyun "%s", "last verified path gone");
3919*4882a593Smuzhiyun dasd_eer_write(device, NULL, DASD_EER_NOPATH);
3920*4882a593Smuzhiyun dasd_device_set_stop_bits(device,
3921*4882a593Smuzhiyun DASD_STOPPED_DC_WAIT);
3922*4882a593Smuzhiyun }
3923*4882a593Smuzhiyun dasd_put_device(device);
3924*4882a593Smuzhiyun }
3925*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dasd_generic_path_event);
3926*4882a593Smuzhiyun
dasd_generic_verify_path(struct dasd_device * device,__u8 lpm)3927*4882a593Smuzhiyun int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
3928*4882a593Smuzhiyun {
3929*4882a593Smuzhiyun if (!dasd_path_get_opm(device) && lpm) {
3930*4882a593Smuzhiyun dasd_path_set_opm(device, lpm);
3931*4882a593Smuzhiyun dasd_generic_path_operational(device);
3932*4882a593Smuzhiyun } else
3933*4882a593Smuzhiyun dasd_path_add_opm(device, lpm);
3934*4882a593Smuzhiyun return 0;
3935*4882a593Smuzhiyun }
3936*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
3937*4882a593Smuzhiyun
dasd_generic_space_exhaust(struct dasd_device * device,struct dasd_ccw_req * cqr)3938*4882a593Smuzhiyun void dasd_generic_space_exhaust(struct dasd_device *device,
3939*4882a593Smuzhiyun struct dasd_ccw_req *cqr)
3940*4882a593Smuzhiyun {
3941*4882a593Smuzhiyun dasd_eer_write(device, NULL, DASD_EER_NOSPC);
3942*4882a593Smuzhiyun
3943*4882a593Smuzhiyun if (device->state < DASD_STATE_BASIC)
3944*4882a593Smuzhiyun return;
3945*4882a593Smuzhiyun
3946*4882a593Smuzhiyun if (cqr->status == DASD_CQR_IN_IO ||
3947*4882a593Smuzhiyun cqr->status == DASD_CQR_CLEAR_PENDING) {
3948*4882a593Smuzhiyun cqr->status = DASD_CQR_QUEUED;
3949*4882a593Smuzhiyun cqr->retries++;
3950*4882a593Smuzhiyun }
3951*4882a593Smuzhiyun dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC);
3952*4882a593Smuzhiyun dasd_device_clear_timer(device);
3953*4882a593Smuzhiyun dasd_schedule_device_bh(device);
3954*4882a593Smuzhiyun }
3955*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust);
3956*4882a593Smuzhiyun
dasd_generic_space_avail(struct dasd_device * device)3957*4882a593Smuzhiyun void dasd_generic_space_avail(struct dasd_device *device)
3958*4882a593Smuzhiyun {
3959*4882a593Smuzhiyun dev_info(&device->cdev->dev, "Extent pool space is available\n");
3960*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available");
3961*4882a593Smuzhiyun
3962*4882a593Smuzhiyun dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC);
3963*4882a593Smuzhiyun dasd_schedule_device_bh(device);
3964*4882a593Smuzhiyun
3965*4882a593Smuzhiyun if (device->block) {
3966*4882a593Smuzhiyun dasd_schedule_block_bh(device->block);
3967*4882a593Smuzhiyun if (device->block->request_queue)
3968*4882a593Smuzhiyun blk_mq_run_hw_queues(device->block->request_queue, true);
3969*4882a593Smuzhiyun }
3970*4882a593Smuzhiyun if (!device->stopped)
3971*4882a593Smuzhiyun wake_up(&generic_waitq);
3972*4882a593Smuzhiyun }
3973*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dasd_generic_space_avail);
3974*4882a593Smuzhiyun
3975*4882a593Smuzhiyun /*
3976*4882a593Smuzhiyun * clear active requests and requeue them to block layer if possible
3977*4882a593Smuzhiyun */
dasd_generic_requeue_all_requests(struct dasd_device * device)3978*4882a593Smuzhiyun static int dasd_generic_requeue_all_requests(struct dasd_device *device)
3979*4882a593Smuzhiyun {
3980*4882a593Smuzhiyun struct list_head requeue_queue;
3981*4882a593Smuzhiyun struct dasd_ccw_req *cqr, *n;
3982*4882a593Smuzhiyun struct dasd_ccw_req *refers;
3983*4882a593Smuzhiyun int rc;
3984*4882a593Smuzhiyun
3985*4882a593Smuzhiyun INIT_LIST_HEAD(&requeue_queue);
3986*4882a593Smuzhiyun spin_lock_irq(get_ccwdev_lock(device->cdev));
3987*4882a593Smuzhiyun rc = 0;
3988*4882a593Smuzhiyun list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
3989*4882a593Smuzhiyun /* Check status and move request to flush_queue */
3990*4882a593Smuzhiyun if (cqr->status == DASD_CQR_IN_IO) {
3991*4882a593Smuzhiyun rc = device->discipline->term_IO(cqr);
3992*4882a593Smuzhiyun if (rc) {
3993*4882a593Smuzhiyun /* unable to terminate requeust */
3994*4882a593Smuzhiyun dev_err(&device->cdev->dev,
3995*4882a593Smuzhiyun "Unable to terminate request %p "
3996*4882a593Smuzhiyun "on suspend\n", cqr);
3997*4882a593Smuzhiyun spin_unlock_irq(get_ccwdev_lock(device->cdev));
3998*4882a593Smuzhiyun dasd_put_device(device);
3999*4882a593Smuzhiyun return rc;
4000*4882a593Smuzhiyun }
4001*4882a593Smuzhiyun }
4002*4882a593Smuzhiyun list_move_tail(&cqr->devlist, &requeue_queue);
4003*4882a593Smuzhiyun }
4004*4882a593Smuzhiyun spin_unlock_irq(get_ccwdev_lock(device->cdev));
4005*4882a593Smuzhiyun
4006*4882a593Smuzhiyun list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) {
4007*4882a593Smuzhiyun wait_event(dasd_flush_wq,
4008*4882a593Smuzhiyun (cqr->status != DASD_CQR_CLEAR_PENDING));
4009*4882a593Smuzhiyun
4010*4882a593Smuzhiyun /*
4011*4882a593Smuzhiyun * requeue requests to blocklayer will only work
4012*4882a593Smuzhiyun * for block device requests
4013*4882a593Smuzhiyun */
4014*4882a593Smuzhiyun if (_dasd_requeue_request(cqr))
4015*4882a593Smuzhiyun continue;
4016*4882a593Smuzhiyun
4017*4882a593Smuzhiyun /* remove requests from device and block queue */
4018*4882a593Smuzhiyun list_del_init(&cqr->devlist);
4019*4882a593Smuzhiyun while (cqr->refers != NULL) {
4020*4882a593Smuzhiyun refers = cqr->refers;
4021*4882a593Smuzhiyun /* remove the request from the block queue */
4022*4882a593Smuzhiyun list_del(&cqr->blocklist);
4023*4882a593Smuzhiyun /* free the finished erp request */
4024*4882a593Smuzhiyun dasd_free_erp_request(cqr, cqr->memdev);
4025*4882a593Smuzhiyun cqr = refers;
4026*4882a593Smuzhiyun }
4027*4882a593Smuzhiyun
4028*4882a593Smuzhiyun /*
4029*4882a593Smuzhiyun * _dasd_requeue_request already checked for a valid
4030*4882a593Smuzhiyun * blockdevice, no need to check again
4031*4882a593Smuzhiyun * all erp requests (cqr->refers) have a cqr->block
4032*4882a593Smuzhiyun * pointer copy from the original cqr
4033*4882a593Smuzhiyun */
4034*4882a593Smuzhiyun list_del_init(&cqr->blocklist);
4035*4882a593Smuzhiyun cqr->block->base->discipline->free_cp(
4036*4882a593Smuzhiyun cqr, (struct request *) cqr->callback_data);
4037*4882a593Smuzhiyun }
4038*4882a593Smuzhiyun
4039*4882a593Smuzhiyun /*
4040*4882a593Smuzhiyun * if requests remain then they are internal request
4041*4882a593Smuzhiyun * and go back to the device queue
4042*4882a593Smuzhiyun */
4043*4882a593Smuzhiyun if (!list_empty(&requeue_queue)) {
4044*4882a593Smuzhiyun /* move freeze_queue to start of the ccw_queue */
4045*4882a593Smuzhiyun spin_lock_irq(get_ccwdev_lock(device->cdev));
4046*4882a593Smuzhiyun list_splice_tail(&requeue_queue, &device->ccw_queue);
4047*4882a593Smuzhiyun spin_unlock_irq(get_ccwdev_lock(device->cdev));
4048*4882a593Smuzhiyun }
4049*4882a593Smuzhiyun dasd_schedule_device_bh(device);
4050*4882a593Smuzhiyun return rc;
4051*4882a593Smuzhiyun }
4052*4882a593Smuzhiyun
do_requeue_requests(struct work_struct * work)4053*4882a593Smuzhiyun static void do_requeue_requests(struct work_struct *work)
4054*4882a593Smuzhiyun {
4055*4882a593Smuzhiyun struct dasd_device *device = container_of(work, struct dasd_device,
4056*4882a593Smuzhiyun requeue_requests);
4057*4882a593Smuzhiyun dasd_generic_requeue_all_requests(device);
4058*4882a593Smuzhiyun dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC);
4059*4882a593Smuzhiyun if (device->block)
4060*4882a593Smuzhiyun dasd_schedule_block_bh(device->block);
4061*4882a593Smuzhiyun dasd_put_device(device);
4062*4882a593Smuzhiyun }
4063*4882a593Smuzhiyun
dasd_schedule_requeue(struct dasd_device * device)4064*4882a593Smuzhiyun void dasd_schedule_requeue(struct dasd_device *device)
4065*4882a593Smuzhiyun {
4066*4882a593Smuzhiyun dasd_get_device(device);
4067*4882a593Smuzhiyun /* queue call to dasd_reload_device to the kernel event daemon. */
4068*4882a593Smuzhiyun if (!schedule_work(&device->requeue_requests))
4069*4882a593Smuzhiyun dasd_put_device(device);
4070*4882a593Smuzhiyun }
4071*4882a593Smuzhiyun EXPORT_SYMBOL(dasd_schedule_requeue);
4072*4882a593Smuzhiyun
dasd_generic_pm_freeze(struct ccw_device * cdev)4073*4882a593Smuzhiyun int dasd_generic_pm_freeze(struct ccw_device *cdev)
4074*4882a593Smuzhiyun {
4075*4882a593Smuzhiyun struct dasd_device *device = dasd_device_from_cdev(cdev);
4076*4882a593Smuzhiyun
4077*4882a593Smuzhiyun if (IS_ERR(device))
4078*4882a593Smuzhiyun return PTR_ERR(device);
4079*4882a593Smuzhiyun
4080*4882a593Smuzhiyun /* mark device as suspended */
4081*4882a593Smuzhiyun set_bit(DASD_FLAG_SUSPENDED, &device->flags);
4082*4882a593Smuzhiyun
4083*4882a593Smuzhiyun if (device->discipline->freeze)
4084*4882a593Smuzhiyun device->discipline->freeze(device);
4085*4882a593Smuzhiyun
4086*4882a593Smuzhiyun /* disallow new I/O */
4087*4882a593Smuzhiyun dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
4088*4882a593Smuzhiyun
4089*4882a593Smuzhiyun return dasd_generic_requeue_all_requests(device);
4090*4882a593Smuzhiyun }
4091*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);
4092*4882a593Smuzhiyun
dasd_generic_restore_device(struct ccw_device * cdev)4093*4882a593Smuzhiyun int dasd_generic_restore_device(struct ccw_device *cdev)
4094*4882a593Smuzhiyun {
4095*4882a593Smuzhiyun struct dasd_device *device = dasd_device_from_cdev(cdev);
4096*4882a593Smuzhiyun int rc = 0;
4097*4882a593Smuzhiyun
4098*4882a593Smuzhiyun if (IS_ERR(device))
4099*4882a593Smuzhiyun return PTR_ERR(device);
4100*4882a593Smuzhiyun
4101*4882a593Smuzhiyun /* allow new IO again */
4102*4882a593Smuzhiyun dasd_device_remove_stop_bits(device,
4103*4882a593Smuzhiyun (DASD_STOPPED_PM | DASD_UNRESUMED_PM));
4104*4882a593Smuzhiyun
4105*4882a593Smuzhiyun dasd_schedule_device_bh(device);
4106*4882a593Smuzhiyun
4107*4882a593Smuzhiyun /*
4108*4882a593Smuzhiyun * call discipline restore function
4109*4882a593Smuzhiyun * if device is stopped do nothing e.g. for disconnected devices
4110*4882a593Smuzhiyun */
4111*4882a593Smuzhiyun if (device->discipline->restore && !(device->stopped))
4112*4882a593Smuzhiyun rc = device->discipline->restore(device);
4113*4882a593Smuzhiyun if (rc || device->stopped)
4114*4882a593Smuzhiyun /*
4115*4882a593Smuzhiyun * if the resume failed for the DASD we put it in
4116*4882a593Smuzhiyun * an UNRESUMED stop state
4117*4882a593Smuzhiyun */
4118*4882a593Smuzhiyun device->stopped |= DASD_UNRESUMED_PM;
4119*4882a593Smuzhiyun
4120*4882a593Smuzhiyun if (device->block) {
4121*4882a593Smuzhiyun dasd_schedule_block_bh(device->block);
4122*4882a593Smuzhiyun if (device->block->request_queue)
4123*4882a593Smuzhiyun blk_mq_run_hw_queues(device->block->request_queue,
4124*4882a593Smuzhiyun true);
4125*4882a593Smuzhiyun }
4126*4882a593Smuzhiyun
4127*4882a593Smuzhiyun clear_bit(DASD_FLAG_SUSPENDED, &device->flags);
4128*4882a593Smuzhiyun dasd_put_device(device);
4129*4882a593Smuzhiyun return 0;
4130*4882a593Smuzhiyun }
4131*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
4132*4882a593Smuzhiyun
dasd_generic_build_rdc(struct dasd_device * device,int rdc_buffer_size,int magic)4133*4882a593Smuzhiyun static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
4134*4882a593Smuzhiyun int rdc_buffer_size,
4135*4882a593Smuzhiyun int magic)
4136*4882a593Smuzhiyun {
4137*4882a593Smuzhiyun struct dasd_ccw_req *cqr;
4138*4882a593Smuzhiyun struct ccw1 *ccw;
4139*4882a593Smuzhiyun
4140*4882a593Smuzhiyun cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device,
4141*4882a593Smuzhiyun NULL);
4142*4882a593Smuzhiyun
4143*4882a593Smuzhiyun if (IS_ERR(cqr)) {
4144*4882a593Smuzhiyun /* internal error 13 - Allocating the RDC request failed*/
4145*4882a593Smuzhiyun dev_err(&device->cdev->dev,
4146*4882a593Smuzhiyun "An error occurred in the DASD device driver, "
4147*4882a593Smuzhiyun "reason=%s\n", "13");
4148*4882a593Smuzhiyun return cqr;
4149*4882a593Smuzhiyun }
4150*4882a593Smuzhiyun
4151*4882a593Smuzhiyun ccw = cqr->cpaddr;
4152*4882a593Smuzhiyun ccw->cmd_code = CCW_CMD_RDC;
4153*4882a593Smuzhiyun ccw->cda = (__u32)(addr_t) cqr->data;
4154*4882a593Smuzhiyun ccw->flags = 0;
4155*4882a593Smuzhiyun ccw->count = rdc_buffer_size;
4156*4882a593Smuzhiyun cqr->startdev = device;
4157*4882a593Smuzhiyun cqr->memdev = device;
4158*4882a593Smuzhiyun cqr->expires = 10*HZ;
4159*4882a593Smuzhiyun cqr->retries = 256;
4160*4882a593Smuzhiyun cqr->buildclk = get_tod_clock();
4161*4882a593Smuzhiyun cqr->status = DASD_CQR_FILLED;
4162*4882a593Smuzhiyun return cqr;
4163*4882a593Smuzhiyun }
4164*4882a593Smuzhiyun
4165*4882a593Smuzhiyun
dasd_generic_read_dev_chars(struct dasd_device * device,int magic,void * rdc_buffer,int rdc_buffer_size)4166*4882a593Smuzhiyun int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
4167*4882a593Smuzhiyun void *rdc_buffer, int rdc_buffer_size)
4168*4882a593Smuzhiyun {
4169*4882a593Smuzhiyun int ret;
4170*4882a593Smuzhiyun struct dasd_ccw_req *cqr;
4171*4882a593Smuzhiyun
4172*4882a593Smuzhiyun cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic);
4173*4882a593Smuzhiyun if (IS_ERR(cqr))
4174*4882a593Smuzhiyun return PTR_ERR(cqr);
4175*4882a593Smuzhiyun
4176*4882a593Smuzhiyun ret = dasd_sleep_on(cqr);
4177*4882a593Smuzhiyun if (ret == 0)
4178*4882a593Smuzhiyun memcpy(rdc_buffer, cqr->data, rdc_buffer_size);
4179*4882a593Smuzhiyun dasd_sfree_request(cqr, cqr->memdev);
4180*4882a593Smuzhiyun return ret;
4181*4882a593Smuzhiyun }
4182*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
4183*4882a593Smuzhiyun
4184*4882a593Smuzhiyun /*
4185*4882a593Smuzhiyun * In command mode and transport mode we need to look for sense
4186*4882a593Smuzhiyun * data in different places. The sense data itself is allways
4187*4882a593Smuzhiyun * an array of 32 bytes, so we can unify the sense data access
4188*4882a593Smuzhiyun * for both modes.
4189*4882a593Smuzhiyun */
dasd_get_sense(struct irb * irb)4190*4882a593Smuzhiyun char *dasd_get_sense(struct irb *irb)
4191*4882a593Smuzhiyun {
4192*4882a593Smuzhiyun struct tsb *tsb = NULL;
4193*4882a593Smuzhiyun char *sense = NULL;
4194*4882a593Smuzhiyun
4195*4882a593Smuzhiyun if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
4196*4882a593Smuzhiyun if (irb->scsw.tm.tcw)
4197*4882a593Smuzhiyun tsb = tcw_get_tsb((struct tcw *)(unsigned long)
4198*4882a593Smuzhiyun irb->scsw.tm.tcw);
4199*4882a593Smuzhiyun if (tsb && tsb->length == 64 && tsb->flags)
4200*4882a593Smuzhiyun switch (tsb->flags & 0x07) {
4201*4882a593Smuzhiyun case 1: /* tsa_iostat */
4202*4882a593Smuzhiyun sense = tsb->tsa.iostat.sense;
4203*4882a593Smuzhiyun break;
4204*4882a593Smuzhiyun case 2: /* tsa_ddpc */
4205*4882a593Smuzhiyun sense = tsb->tsa.ddpc.sense;
4206*4882a593Smuzhiyun break;
4207*4882a593Smuzhiyun default:
4208*4882a593Smuzhiyun /* currently we don't use interrogate data */
4209*4882a593Smuzhiyun break;
4210*4882a593Smuzhiyun }
4211*4882a593Smuzhiyun } else if (irb->esw.esw0.erw.cons) {
4212*4882a593Smuzhiyun sense = irb->ecw;
4213*4882a593Smuzhiyun }
4214*4882a593Smuzhiyun return sense;
4215*4882a593Smuzhiyun }
4216*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dasd_get_sense);
4217*4882a593Smuzhiyun
dasd_generic_shutdown(struct ccw_device * cdev)4218*4882a593Smuzhiyun void dasd_generic_shutdown(struct ccw_device *cdev)
4219*4882a593Smuzhiyun {
4220*4882a593Smuzhiyun struct dasd_device *device;
4221*4882a593Smuzhiyun
4222*4882a593Smuzhiyun device = dasd_device_from_cdev(cdev);
4223*4882a593Smuzhiyun if (IS_ERR(device))
4224*4882a593Smuzhiyun return;
4225*4882a593Smuzhiyun
4226*4882a593Smuzhiyun if (device->block)
4227*4882a593Smuzhiyun dasd_schedule_block_bh(device->block);
4228*4882a593Smuzhiyun
4229*4882a593Smuzhiyun dasd_schedule_device_bh(device);
4230*4882a593Smuzhiyun
4231*4882a593Smuzhiyun wait_event(shutdown_waitq, _wait_for_empty_queues(device));
4232*4882a593Smuzhiyun }
4233*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dasd_generic_shutdown);
4234*4882a593Smuzhiyun
dasd_init(void)4235*4882a593Smuzhiyun static int __init dasd_init(void)
4236*4882a593Smuzhiyun {
4237*4882a593Smuzhiyun int rc;
4238*4882a593Smuzhiyun
4239*4882a593Smuzhiyun init_waitqueue_head(&dasd_init_waitq);
4240*4882a593Smuzhiyun init_waitqueue_head(&dasd_flush_wq);
4241*4882a593Smuzhiyun init_waitqueue_head(&generic_waitq);
4242*4882a593Smuzhiyun init_waitqueue_head(&shutdown_waitq);
4243*4882a593Smuzhiyun
4244*4882a593Smuzhiyun /* register 'common' DASD debug area, used for all DBF_XXX calls */
4245*4882a593Smuzhiyun dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
4246*4882a593Smuzhiyun if (dasd_debug_area == NULL) {
4247*4882a593Smuzhiyun rc = -ENOMEM;
4248*4882a593Smuzhiyun goto failed;
4249*4882a593Smuzhiyun }
4250*4882a593Smuzhiyun debug_register_view(dasd_debug_area, &debug_sprintf_view);
4251*4882a593Smuzhiyun debug_set_level(dasd_debug_area, DBF_WARNING);
4252*4882a593Smuzhiyun
4253*4882a593Smuzhiyun DBF_EVENT(DBF_EMERG, "%s", "debug area created");
4254*4882a593Smuzhiyun
4255*4882a593Smuzhiyun dasd_diag_discipline_pointer = NULL;
4256*4882a593Smuzhiyun
4257*4882a593Smuzhiyun dasd_statistics_createroot();
4258*4882a593Smuzhiyun
4259*4882a593Smuzhiyun rc = dasd_devmap_init();
4260*4882a593Smuzhiyun if (rc)
4261*4882a593Smuzhiyun goto failed;
4262*4882a593Smuzhiyun rc = dasd_gendisk_init();
4263*4882a593Smuzhiyun if (rc)
4264*4882a593Smuzhiyun goto failed;
4265*4882a593Smuzhiyun rc = dasd_parse();
4266*4882a593Smuzhiyun if (rc)
4267*4882a593Smuzhiyun goto failed;
4268*4882a593Smuzhiyun rc = dasd_eer_init();
4269*4882a593Smuzhiyun if (rc)
4270*4882a593Smuzhiyun goto failed;
4271*4882a593Smuzhiyun #ifdef CONFIG_PROC_FS
4272*4882a593Smuzhiyun rc = dasd_proc_init();
4273*4882a593Smuzhiyun if (rc)
4274*4882a593Smuzhiyun goto failed;
4275*4882a593Smuzhiyun #endif
4276*4882a593Smuzhiyun
4277*4882a593Smuzhiyun return 0;
4278*4882a593Smuzhiyun failed:
4279*4882a593Smuzhiyun pr_info("The DASD device driver could not be initialized\n");
4280*4882a593Smuzhiyun dasd_exit();
4281*4882a593Smuzhiyun return rc;
4282*4882a593Smuzhiyun }
4283*4882a593Smuzhiyun
4284*4882a593Smuzhiyun module_init(dasd_init);
4285*4882a593Smuzhiyun module_exit(dasd_exit);
4286