1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * PAV alias management for the DASD ECKD discipline
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright IBM Corp. 2007
6*4882a593Smuzhiyun * Author(s): Stefan Weinhuber <wein@de.ibm.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #define KMSG_COMPONENT "dasd-eckd"
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/list.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun #include <asm/ebcdic.h>
14*4882a593Smuzhiyun #include "dasd_int.h"
15*4882a593Smuzhiyun #include "dasd_eckd.h"
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #ifdef PRINTK_HEADER
18*4882a593Smuzhiyun #undef PRINTK_HEADER
19*4882a593Smuzhiyun #endif /* PRINTK_HEADER */
20*4882a593Smuzhiyun #define PRINTK_HEADER "dasd(eckd):"
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun /*
24*4882a593Smuzhiyun * General concept of alias management:
25*4882a593Smuzhiyun * - PAV and DASD alias management is specific to the eckd discipline.
26*4882a593Smuzhiyun * - A device is connected to an lcu as long as the device exists.
27*4882a593Smuzhiyun * dasd_alias_make_device_known_to_lcu will be called wenn the
28*4882a593Smuzhiyun * device is checked by the eckd discipline and
29*4882a593Smuzhiyun * dasd_alias_disconnect_device_from_lcu will be called
30*4882a593Smuzhiyun * before the device is deleted.
31*4882a593Smuzhiyun * - The dasd_alias_add_device / dasd_alias_remove_device
32*4882a593Smuzhiyun * functions mark the point when a device is 'ready for service'.
33*4882a593Smuzhiyun * - A summary unit check is a rare occasion, but it is mandatory to
34*4882a593Smuzhiyun * support it. It requires some complex recovery actions before the
35*4882a593Smuzhiyun * devices can be used again (see dasd_alias_handle_summary_unit_check).
36*4882a593Smuzhiyun * - dasd_alias_get_start_dev will find an alias device that can be used
37*4882a593Smuzhiyun * instead of the base device and does some (very simple) load balancing.
38*4882a593Smuzhiyun * This is the function that gets called for each I/O, so when improving
39*4882a593Smuzhiyun * something, this function should get faster or better, the rest has just
40*4882a593Smuzhiyun * to be correct.
41*4882a593Smuzhiyun */
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun static void summary_unit_check_handling_work(struct work_struct *);
45*4882a593Smuzhiyun static void lcu_update_work(struct work_struct *);
46*4882a593Smuzhiyun static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun static struct alias_root aliastree = {
49*4882a593Smuzhiyun .serverlist = LIST_HEAD_INIT(aliastree.serverlist),
50*4882a593Smuzhiyun .lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun
_find_server(struct dasd_uid * uid)53*4882a593Smuzhiyun static struct alias_server *_find_server(struct dasd_uid *uid)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun struct alias_server *pos;
56*4882a593Smuzhiyun list_for_each_entry(pos, &aliastree.serverlist, server) {
57*4882a593Smuzhiyun if (!strncmp(pos->uid.vendor, uid->vendor,
58*4882a593Smuzhiyun sizeof(uid->vendor))
59*4882a593Smuzhiyun && !strncmp(pos->uid.serial, uid->serial,
60*4882a593Smuzhiyun sizeof(uid->serial)))
61*4882a593Smuzhiyun return pos;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun return NULL;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
_find_lcu(struct alias_server * server,struct dasd_uid * uid)66*4882a593Smuzhiyun static struct alias_lcu *_find_lcu(struct alias_server *server,
67*4882a593Smuzhiyun struct dasd_uid *uid)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun struct alias_lcu *pos;
70*4882a593Smuzhiyun list_for_each_entry(pos, &server->lculist, lcu) {
71*4882a593Smuzhiyun if (pos->uid.ssid == uid->ssid)
72*4882a593Smuzhiyun return pos;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun return NULL;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
_find_group(struct alias_lcu * lcu,struct dasd_uid * uid)77*4882a593Smuzhiyun static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
78*4882a593Smuzhiyun struct dasd_uid *uid)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun struct alias_pav_group *pos;
81*4882a593Smuzhiyun __u8 search_unit_addr;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /* for hyper pav there is only one group */
84*4882a593Smuzhiyun if (lcu->pav == HYPER_PAV) {
85*4882a593Smuzhiyun if (list_empty(&lcu->grouplist))
86*4882a593Smuzhiyun return NULL;
87*4882a593Smuzhiyun else
88*4882a593Smuzhiyun return list_first_entry(&lcu->grouplist,
89*4882a593Smuzhiyun struct alias_pav_group, group);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /* for base pav we have to find the group that matches the base */
93*4882a593Smuzhiyun if (uid->type == UA_BASE_DEVICE)
94*4882a593Smuzhiyun search_unit_addr = uid->real_unit_addr;
95*4882a593Smuzhiyun else
96*4882a593Smuzhiyun search_unit_addr = uid->base_unit_addr;
97*4882a593Smuzhiyun list_for_each_entry(pos, &lcu->grouplist, group) {
98*4882a593Smuzhiyun if (pos->uid.base_unit_addr == search_unit_addr &&
99*4882a593Smuzhiyun !strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
100*4882a593Smuzhiyun return pos;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun return NULL;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
_allocate_server(struct dasd_uid * uid)105*4882a593Smuzhiyun static struct alias_server *_allocate_server(struct dasd_uid *uid)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun struct alias_server *server;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun server = kzalloc(sizeof(*server), GFP_KERNEL);
110*4882a593Smuzhiyun if (!server)
111*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
112*4882a593Smuzhiyun memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
113*4882a593Smuzhiyun memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
114*4882a593Smuzhiyun INIT_LIST_HEAD(&server->server);
115*4882a593Smuzhiyun INIT_LIST_HEAD(&server->lculist);
116*4882a593Smuzhiyun return server;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
_free_server(struct alias_server * server)119*4882a593Smuzhiyun static void _free_server(struct alias_server *server)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun kfree(server);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
_allocate_lcu(struct dasd_uid * uid)124*4882a593Smuzhiyun static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun struct alias_lcu *lcu;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
129*4882a593Smuzhiyun if (!lcu)
130*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
131*4882a593Smuzhiyun lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
132*4882a593Smuzhiyun if (!lcu->uac)
133*4882a593Smuzhiyun goto out_err1;
134*4882a593Smuzhiyun lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
135*4882a593Smuzhiyun if (!lcu->rsu_cqr)
136*4882a593Smuzhiyun goto out_err2;
137*4882a593Smuzhiyun lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
138*4882a593Smuzhiyun GFP_KERNEL | GFP_DMA);
139*4882a593Smuzhiyun if (!lcu->rsu_cqr->cpaddr)
140*4882a593Smuzhiyun goto out_err3;
141*4882a593Smuzhiyun lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
142*4882a593Smuzhiyun if (!lcu->rsu_cqr->data)
143*4882a593Smuzhiyun goto out_err4;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
146*4882a593Smuzhiyun memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
147*4882a593Smuzhiyun lcu->uid.ssid = uid->ssid;
148*4882a593Smuzhiyun lcu->pav = NO_PAV;
149*4882a593Smuzhiyun lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
150*4882a593Smuzhiyun INIT_LIST_HEAD(&lcu->lcu);
151*4882a593Smuzhiyun INIT_LIST_HEAD(&lcu->inactive_devices);
152*4882a593Smuzhiyun INIT_LIST_HEAD(&lcu->active_devices);
153*4882a593Smuzhiyun INIT_LIST_HEAD(&lcu->grouplist);
154*4882a593Smuzhiyun INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
155*4882a593Smuzhiyun INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
156*4882a593Smuzhiyun spin_lock_init(&lcu->lock);
157*4882a593Smuzhiyun init_completion(&lcu->lcu_setup);
158*4882a593Smuzhiyun return lcu;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun out_err4:
161*4882a593Smuzhiyun kfree(lcu->rsu_cqr->cpaddr);
162*4882a593Smuzhiyun out_err3:
163*4882a593Smuzhiyun kfree(lcu->rsu_cqr);
164*4882a593Smuzhiyun out_err2:
165*4882a593Smuzhiyun kfree(lcu->uac);
166*4882a593Smuzhiyun out_err1:
167*4882a593Smuzhiyun kfree(lcu);
168*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
_free_lcu(struct alias_lcu * lcu)171*4882a593Smuzhiyun static void _free_lcu(struct alias_lcu *lcu)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun kfree(lcu->rsu_cqr->data);
174*4882a593Smuzhiyun kfree(lcu->rsu_cqr->cpaddr);
175*4882a593Smuzhiyun kfree(lcu->rsu_cqr);
176*4882a593Smuzhiyun kfree(lcu->uac);
177*4882a593Smuzhiyun kfree(lcu);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /*
181*4882a593Smuzhiyun * This is the function that will allocate all the server and lcu data,
182*4882a593Smuzhiyun * so this function must be called first for a new device.
183*4882a593Smuzhiyun * If the return value is 1, the lcu was already known before, if it
184*4882a593Smuzhiyun * is 0, this is a new lcu.
185*4882a593Smuzhiyun * Negative return code indicates that something went wrong (e.g. -ENOMEM)
186*4882a593Smuzhiyun */
dasd_alias_make_device_known_to_lcu(struct dasd_device * device)187*4882a593Smuzhiyun int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun struct dasd_eckd_private *private = device->private;
190*4882a593Smuzhiyun unsigned long flags;
191*4882a593Smuzhiyun struct alias_server *server, *newserver;
192*4882a593Smuzhiyun struct alias_lcu *lcu, *newlcu;
193*4882a593Smuzhiyun struct dasd_uid uid;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun device->discipline->get_uid(device, &uid);
196*4882a593Smuzhiyun spin_lock_irqsave(&aliastree.lock, flags);
197*4882a593Smuzhiyun server = _find_server(&uid);
198*4882a593Smuzhiyun if (!server) {
199*4882a593Smuzhiyun spin_unlock_irqrestore(&aliastree.lock, flags);
200*4882a593Smuzhiyun newserver = _allocate_server(&uid);
201*4882a593Smuzhiyun if (IS_ERR(newserver))
202*4882a593Smuzhiyun return PTR_ERR(newserver);
203*4882a593Smuzhiyun spin_lock_irqsave(&aliastree.lock, flags);
204*4882a593Smuzhiyun server = _find_server(&uid);
205*4882a593Smuzhiyun if (!server) {
206*4882a593Smuzhiyun list_add(&newserver->server, &aliastree.serverlist);
207*4882a593Smuzhiyun server = newserver;
208*4882a593Smuzhiyun } else {
209*4882a593Smuzhiyun /* someone was faster */
210*4882a593Smuzhiyun _free_server(newserver);
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun lcu = _find_lcu(server, &uid);
215*4882a593Smuzhiyun if (!lcu) {
216*4882a593Smuzhiyun spin_unlock_irqrestore(&aliastree.lock, flags);
217*4882a593Smuzhiyun newlcu = _allocate_lcu(&uid);
218*4882a593Smuzhiyun if (IS_ERR(newlcu))
219*4882a593Smuzhiyun return PTR_ERR(newlcu);
220*4882a593Smuzhiyun spin_lock_irqsave(&aliastree.lock, flags);
221*4882a593Smuzhiyun lcu = _find_lcu(server, &uid);
222*4882a593Smuzhiyun if (!lcu) {
223*4882a593Smuzhiyun list_add(&newlcu->lcu, &server->lculist);
224*4882a593Smuzhiyun lcu = newlcu;
225*4882a593Smuzhiyun } else {
226*4882a593Smuzhiyun /* someone was faster */
227*4882a593Smuzhiyun _free_lcu(newlcu);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun spin_lock(&lcu->lock);
231*4882a593Smuzhiyun list_add(&device->alias_list, &lcu->inactive_devices);
232*4882a593Smuzhiyun private->lcu = lcu;
233*4882a593Smuzhiyun spin_unlock(&lcu->lock);
234*4882a593Smuzhiyun spin_unlock_irqrestore(&aliastree.lock, flags);
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun return 0;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /*
240*4882a593Smuzhiyun * This function removes a device from the scope of alias management.
241*4882a593Smuzhiyun * The complicated part is to make sure that it is not in use by
242*4882a593Smuzhiyun * any of the workers. If necessary cancel the work.
243*4882a593Smuzhiyun */
dasd_alias_disconnect_device_from_lcu(struct dasd_device * device)244*4882a593Smuzhiyun void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun struct dasd_eckd_private *private = device->private;
247*4882a593Smuzhiyun unsigned long flags;
248*4882a593Smuzhiyun struct alias_lcu *lcu;
249*4882a593Smuzhiyun struct alias_server *server;
250*4882a593Smuzhiyun int was_pending;
251*4882a593Smuzhiyun struct dasd_uid uid;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun lcu = private->lcu;
254*4882a593Smuzhiyun /* nothing to do if already disconnected */
255*4882a593Smuzhiyun if (!lcu)
256*4882a593Smuzhiyun return;
257*4882a593Smuzhiyun device->discipline->get_uid(device, &uid);
258*4882a593Smuzhiyun spin_lock_irqsave(&lcu->lock, flags);
259*4882a593Smuzhiyun /* make sure that the workers don't use this device */
260*4882a593Smuzhiyun if (device == lcu->suc_data.device) {
261*4882a593Smuzhiyun spin_unlock_irqrestore(&lcu->lock, flags);
262*4882a593Smuzhiyun cancel_work_sync(&lcu->suc_data.worker);
263*4882a593Smuzhiyun spin_lock_irqsave(&lcu->lock, flags);
264*4882a593Smuzhiyun if (device == lcu->suc_data.device) {
265*4882a593Smuzhiyun dasd_put_device(device);
266*4882a593Smuzhiyun lcu->suc_data.device = NULL;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun was_pending = 0;
270*4882a593Smuzhiyun if (device == lcu->ruac_data.device) {
271*4882a593Smuzhiyun spin_unlock_irqrestore(&lcu->lock, flags);
272*4882a593Smuzhiyun was_pending = 1;
273*4882a593Smuzhiyun cancel_delayed_work_sync(&lcu->ruac_data.dwork);
274*4882a593Smuzhiyun spin_lock_irqsave(&lcu->lock, flags);
275*4882a593Smuzhiyun if (device == lcu->ruac_data.device) {
276*4882a593Smuzhiyun dasd_put_device(device);
277*4882a593Smuzhiyun lcu->ruac_data.device = NULL;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun private->lcu = NULL;
281*4882a593Smuzhiyun spin_unlock_irqrestore(&lcu->lock, flags);
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun spin_lock_irqsave(&aliastree.lock, flags);
284*4882a593Smuzhiyun spin_lock(&lcu->lock);
285*4882a593Smuzhiyun list_del_init(&device->alias_list);
286*4882a593Smuzhiyun if (list_empty(&lcu->grouplist) &&
287*4882a593Smuzhiyun list_empty(&lcu->active_devices) &&
288*4882a593Smuzhiyun list_empty(&lcu->inactive_devices)) {
289*4882a593Smuzhiyun list_del(&lcu->lcu);
290*4882a593Smuzhiyun spin_unlock(&lcu->lock);
291*4882a593Smuzhiyun _free_lcu(lcu);
292*4882a593Smuzhiyun lcu = NULL;
293*4882a593Smuzhiyun } else {
294*4882a593Smuzhiyun if (was_pending)
295*4882a593Smuzhiyun _schedule_lcu_update(lcu, NULL);
296*4882a593Smuzhiyun spin_unlock(&lcu->lock);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun server = _find_server(&uid);
299*4882a593Smuzhiyun if (server && list_empty(&server->lculist)) {
300*4882a593Smuzhiyun list_del(&server->server);
301*4882a593Smuzhiyun _free_server(server);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun spin_unlock_irqrestore(&aliastree.lock, flags);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun /*
307*4882a593Smuzhiyun * This function assumes that the unit address configuration stored
308*4882a593Smuzhiyun * in the lcu is up to date and will update the device uid before
309*4882a593Smuzhiyun * adding it to a pav group.
310*4882a593Smuzhiyun */
311*4882a593Smuzhiyun
_add_device_to_lcu(struct alias_lcu * lcu,struct dasd_device * device,struct dasd_device * pos)312*4882a593Smuzhiyun static int _add_device_to_lcu(struct alias_lcu *lcu,
313*4882a593Smuzhiyun struct dasd_device *device,
314*4882a593Smuzhiyun struct dasd_device *pos)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun struct dasd_eckd_private *private = device->private;
318*4882a593Smuzhiyun struct alias_pav_group *group;
319*4882a593Smuzhiyun struct dasd_uid uid;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun spin_lock(get_ccwdev_lock(device->cdev));
322*4882a593Smuzhiyun private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
323*4882a593Smuzhiyun private->uid.base_unit_addr =
324*4882a593Smuzhiyun lcu->uac->unit[private->uid.real_unit_addr].base_ua;
325*4882a593Smuzhiyun uid = private->uid;
326*4882a593Smuzhiyun spin_unlock(get_ccwdev_lock(device->cdev));
327*4882a593Smuzhiyun /* if we have no PAV anyway, we don't need to bother with PAV groups */
328*4882a593Smuzhiyun if (lcu->pav == NO_PAV) {
329*4882a593Smuzhiyun list_move(&device->alias_list, &lcu->active_devices);
330*4882a593Smuzhiyun return 0;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun group = _find_group(lcu, &uid);
333*4882a593Smuzhiyun if (!group) {
334*4882a593Smuzhiyun group = kzalloc(sizeof(*group), GFP_ATOMIC);
335*4882a593Smuzhiyun if (!group)
336*4882a593Smuzhiyun return -ENOMEM;
337*4882a593Smuzhiyun memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor));
338*4882a593Smuzhiyun memcpy(group->uid.serial, uid.serial, sizeof(uid.serial));
339*4882a593Smuzhiyun group->uid.ssid = uid.ssid;
340*4882a593Smuzhiyun if (uid.type == UA_BASE_DEVICE)
341*4882a593Smuzhiyun group->uid.base_unit_addr = uid.real_unit_addr;
342*4882a593Smuzhiyun else
343*4882a593Smuzhiyun group->uid.base_unit_addr = uid.base_unit_addr;
344*4882a593Smuzhiyun memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit));
345*4882a593Smuzhiyun INIT_LIST_HEAD(&group->group);
346*4882a593Smuzhiyun INIT_LIST_HEAD(&group->baselist);
347*4882a593Smuzhiyun INIT_LIST_HEAD(&group->aliaslist);
348*4882a593Smuzhiyun list_add(&group->group, &lcu->grouplist);
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun if (uid.type == UA_BASE_DEVICE)
351*4882a593Smuzhiyun list_move(&device->alias_list, &group->baselist);
352*4882a593Smuzhiyun else
353*4882a593Smuzhiyun list_move(&device->alias_list, &group->aliaslist);
354*4882a593Smuzhiyun private->pavgroup = group;
355*4882a593Smuzhiyun return 0;
356*4882a593Smuzhiyun };
357*4882a593Smuzhiyun
_remove_device_from_lcu(struct alias_lcu * lcu,struct dasd_device * device)358*4882a593Smuzhiyun static void _remove_device_from_lcu(struct alias_lcu *lcu,
359*4882a593Smuzhiyun struct dasd_device *device)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun struct dasd_eckd_private *private = device->private;
362*4882a593Smuzhiyun struct alias_pav_group *group;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun list_move(&device->alias_list, &lcu->inactive_devices);
365*4882a593Smuzhiyun group = private->pavgroup;
366*4882a593Smuzhiyun if (!group)
367*4882a593Smuzhiyun return;
368*4882a593Smuzhiyun private->pavgroup = NULL;
369*4882a593Smuzhiyun if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
370*4882a593Smuzhiyun list_del(&group->group);
371*4882a593Smuzhiyun kfree(group);
372*4882a593Smuzhiyun return;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun if (group->next == device)
375*4882a593Smuzhiyun group->next = NULL;
376*4882a593Smuzhiyun };
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun static int
suborder_not_supported(struct dasd_ccw_req * cqr)379*4882a593Smuzhiyun suborder_not_supported(struct dasd_ccw_req *cqr)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun char *sense;
382*4882a593Smuzhiyun char reason;
383*4882a593Smuzhiyun char msg_format;
384*4882a593Smuzhiyun char msg_no;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun /*
387*4882a593Smuzhiyun * intrc values ENODEV, ENOLINK and EPERM
388*4882a593Smuzhiyun * will be optained from sleep_on to indicate that no
389*4882a593Smuzhiyun * IO operation can be started
390*4882a593Smuzhiyun */
391*4882a593Smuzhiyun if (cqr->intrc == -ENODEV)
392*4882a593Smuzhiyun return 1;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun if (cqr->intrc == -ENOLINK)
395*4882a593Smuzhiyun return 1;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun if (cqr->intrc == -EPERM)
398*4882a593Smuzhiyun return 1;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun sense = dasd_get_sense(&cqr->irb);
401*4882a593Smuzhiyun if (!sense)
402*4882a593Smuzhiyun return 0;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun reason = sense[0];
405*4882a593Smuzhiyun msg_format = (sense[7] & 0xF0);
406*4882a593Smuzhiyun msg_no = (sense[7] & 0x0F);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun /* command reject, Format 0 MSG 4 - invalid parameter */
409*4882a593Smuzhiyun if ((reason == 0x80) && (msg_format == 0x00) && (msg_no == 0x04))
410*4882a593Smuzhiyun return 1;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun return 0;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
read_unit_address_configuration(struct dasd_device * device,struct alias_lcu * lcu)415*4882a593Smuzhiyun static int read_unit_address_configuration(struct dasd_device *device,
416*4882a593Smuzhiyun struct alias_lcu *lcu)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun struct dasd_psf_prssd_data *prssdp;
419*4882a593Smuzhiyun struct dasd_ccw_req *cqr;
420*4882a593Smuzhiyun struct ccw1 *ccw;
421*4882a593Smuzhiyun int rc;
422*4882a593Smuzhiyun unsigned long flags;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
425*4882a593Smuzhiyun (sizeof(struct dasd_psf_prssd_data)),
426*4882a593Smuzhiyun device, NULL);
427*4882a593Smuzhiyun if (IS_ERR(cqr))
428*4882a593Smuzhiyun return PTR_ERR(cqr);
429*4882a593Smuzhiyun cqr->startdev = device;
430*4882a593Smuzhiyun cqr->memdev = device;
431*4882a593Smuzhiyun clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
432*4882a593Smuzhiyun cqr->retries = 10;
433*4882a593Smuzhiyun cqr->expires = 20 * HZ;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun /* Prepare for Read Subsystem Data */
436*4882a593Smuzhiyun prssdp = (struct dasd_psf_prssd_data *) cqr->data;
437*4882a593Smuzhiyun memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
438*4882a593Smuzhiyun prssdp->order = PSF_ORDER_PRSSD;
439*4882a593Smuzhiyun prssdp->suborder = 0x0e; /* Read unit address configuration */
440*4882a593Smuzhiyun /* all other bytes of prssdp must be zero */
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun ccw = cqr->cpaddr;
443*4882a593Smuzhiyun ccw->cmd_code = DASD_ECKD_CCW_PSF;
444*4882a593Smuzhiyun ccw->count = sizeof(struct dasd_psf_prssd_data);
445*4882a593Smuzhiyun ccw->flags |= CCW_FLAG_CC;
446*4882a593Smuzhiyun ccw->cda = (__u32)(addr_t) prssdp;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun /* Read Subsystem Data - feature codes */
449*4882a593Smuzhiyun memset(lcu->uac, 0, sizeof(*(lcu->uac)));
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun ccw++;
452*4882a593Smuzhiyun ccw->cmd_code = DASD_ECKD_CCW_RSSD;
453*4882a593Smuzhiyun ccw->count = sizeof(*(lcu->uac));
454*4882a593Smuzhiyun ccw->cda = (__u32)(addr_t) lcu->uac;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun cqr->buildclk = get_tod_clock();
457*4882a593Smuzhiyun cqr->status = DASD_CQR_FILLED;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun /* need to unset flag here to detect race with summary unit check */
460*4882a593Smuzhiyun spin_lock_irqsave(&lcu->lock, flags);
461*4882a593Smuzhiyun lcu->flags &= ~NEED_UAC_UPDATE;
462*4882a593Smuzhiyun spin_unlock_irqrestore(&lcu->lock, flags);
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun rc = dasd_sleep_on(cqr);
465*4882a593Smuzhiyun if (!rc)
466*4882a593Smuzhiyun goto out;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun if (suborder_not_supported(cqr)) {
469*4882a593Smuzhiyun /* suborder not supported or device unusable for IO */
470*4882a593Smuzhiyun rc = -EOPNOTSUPP;
471*4882a593Smuzhiyun } else {
472*4882a593Smuzhiyun /* IO failed but should be retried */
473*4882a593Smuzhiyun spin_lock_irqsave(&lcu->lock, flags);
474*4882a593Smuzhiyun lcu->flags |= NEED_UAC_UPDATE;
475*4882a593Smuzhiyun spin_unlock_irqrestore(&lcu->lock, flags);
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun out:
478*4882a593Smuzhiyun dasd_sfree_request(cqr, cqr->memdev);
479*4882a593Smuzhiyun return rc;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
_lcu_update(struct dasd_device * refdev,struct alias_lcu * lcu)482*4882a593Smuzhiyun static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun unsigned long flags;
485*4882a593Smuzhiyun struct alias_pav_group *pavgroup, *tempgroup;
486*4882a593Smuzhiyun struct dasd_device *device, *tempdev;
487*4882a593Smuzhiyun int i, rc;
488*4882a593Smuzhiyun struct dasd_eckd_private *private;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun spin_lock_irqsave(&lcu->lock, flags);
491*4882a593Smuzhiyun list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
492*4882a593Smuzhiyun list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
493*4882a593Smuzhiyun alias_list) {
494*4882a593Smuzhiyun list_move(&device->alias_list, &lcu->active_devices);
495*4882a593Smuzhiyun private = device->private;
496*4882a593Smuzhiyun private->pavgroup = NULL;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
499*4882a593Smuzhiyun alias_list) {
500*4882a593Smuzhiyun list_move(&device->alias_list, &lcu->active_devices);
501*4882a593Smuzhiyun private = device->private;
502*4882a593Smuzhiyun private->pavgroup = NULL;
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun list_del(&pavgroup->group);
505*4882a593Smuzhiyun kfree(pavgroup);
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun spin_unlock_irqrestore(&lcu->lock, flags);
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun rc = read_unit_address_configuration(refdev, lcu);
510*4882a593Smuzhiyun if (rc)
511*4882a593Smuzhiyun return rc;
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun spin_lock_irqsave(&lcu->lock, flags);
514*4882a593Smuzhiyun /*
515*4882a593Smuzhiyun * there is another update needed skip the remaining handling
516*4882a593Smuzhiyun * the data might already be outdated
517*4882a593Smuzhiyun * but especially do not add the device to an LCU with pending
518*4882a593Smuzhiyun * update
519*4882a593Smuzhiyun */
520*4882a593Smuzhiyun if (lcu->flags & NEED_UAC_UPDATE)
521*4882a593Smuzhiyun goto out;
522*4882a593Smuzhiyun lcu->pav = NO_PAV;
523*4882a593Smuzhiyun for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
524*4882a593Smuzhiyun switch (lcu->uac->unit[i].ua_type) {
525*4882a593Smuzhiyun case UA_BASE_PAV_ALIAS:
526*4882a593Smuzhiyun lcu->pav = BASE_PAV;
527*4882a593Smuzhiyun break;
528*4882a593Smuzhiyun case UA_HYPER_PAV_ALIAS:
529*4882a593Smuzhiyun lcu->pav = HYPER_PAV;
530*4882a593Smuzhiyun break;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun if (lcu->pav != NO_PAV)
533*4882a593Smuzhiyun break;
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
537*4882a593Smuzhiyun alias_list) {
538*4882a593Smuzhiyun _add_device_to_lcu(lcu, device, refdev);
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun out:
541*4882a593Smuzhiyun spin_unlock_irqrestore(&lcu->lock, flags);
542*4882a593Smuzhiyun return 0;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
lcu_update_work(struct work_struct * work)545*4882a593Smuzhiyun static void lcu_update_work(struct work_struct *work)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun struct alias_lcu *lcu;
548*4882a593Smuzhiyun struct read_uac_work_data *ruac_data;
549*4882a593Smuzhiyun struct dasd_device *device;
550*4882a593Smuzhiyun unsigned long flags;
551*4882a593Smuzhiyun int rc;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
554*4882a593Smuzhiyun lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
555*4882a593Smuzhiyun device = ruac_data->device;
556*4882a593Smuzhiyun rc = _lcu_update(device, lcu);
557*4882a593Smuzhiyun /*
558*4882a593Smuzhiyun * Need to check flags again, as there could have been another
559*4882a593Smuzhiyun * prepare_update or a new device a new device while we were still
560*4882a593Smuzhiyun * processing the data
561*4882a593Smuzhiyun */
562*4882a593Smuzhiyun spin_lock_irqsave(&lcu->lock, flags);
563*4882a593Smuzhiyun if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
564*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
565*4882a593Smuzhiyun " alias data in lcu (rc = %d), retry later", rc);
566*4882a593Smuzhiyun if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
567*4882a593Smuzhiyun dasd_put_device(device);
568*4882a593Smuzhiyun } else {
569*4882a593Smuzhiyun dasd_put_device(device);
570*4882a593Smuzhiyun lcu->ruac_data.device = NULL;
571*4882a593Smuzhiyun lcu->flags &= ~UPDATE_PENDING;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun spin_unlock_irqrestore(&lcu->lock, flags);
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun
_schedule_lcu_update(struct alias_lcu * lcu,struct dasd_device * device)576*4882a593Smuzhiyun static int _schedule_lcu_update(struct alias_lcu *lcu,
577*4882a593Smuzhiyun struct dasd_device *device)
578*4882a593Smuzhiyun {
579*4882a593Smuzhiyun struct dasd_device *usedev = NULL;
580*4882a593Smuzhiyun struct alias_pav_group *group;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun lcu->flags |= NEED_UAC_UPDATE;
583*4882a593Smuzhiyun if (lcu->ruac_data.device) {
584*4882a593Smuzhiyun /* already scheduled or running */
585*4882a593Smuzhiyun return 0;
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun if (device && !list_empty(&device->alias_list))
588*4882a593Smuzhiyun usedev = device;
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun if (!usedev && !list_empty(&lcu->grouplist)) {
591*4882a593Smuzhiyun group = list_first_entry(&lcu->grouplist,
592*4882a593Smuzhiyun struct alias_pav_group, group);
593*4882a593Smuzhiyun if (!list_empty(&group->baselist))
594*4882a593Smuzhiyun usedev = list_first_entry(&group->baselist,
595*4882a593Smuzhiyun struct dasd_device,
596*4882a593Smuzhiyun alias_list);
597*4882a593Smuzhiyun else if (!list_empty(&group->aliaslist))
598*4882a593Smuzhiyun usedev = list_first_entry(&group->aliaslist,
599*4882a593Smuzhiyun struct dasd_device,
600*4882a593Smuzhiyun alias_list);
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun if (!usedev && !list_empty(&lcu->active_devices)) {
603*4882a593Smuzhiyun usedev = list_first_entry(&lcu->active_devices,
604*4882a593Smuzhiyun struct dasd_device, alias_list);
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun /*
607*4882a593Smuzhiyun * if we haven't found a proper device yet, give up for now, the next
608*4882a593Smuzhiyun * device that will be set active will trigger an lcu update
609*4882a593Smuzhiyun */
610*4882a593Smuzhiyun if (!usedev)
611*4882a593Smuzhiyun return -EINVAL;
612*4882a593Smuzhiyun dasd_get_device(usedev);
613*4882a593Smuzhiyun lcu->ruac_data.device = usedev;
614*4882a593Smuzhiyun if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
615*4882a593Smuzhiyun dasd_put_device(usedev);
616*4882a593Smuzhiyun return 0;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun
dasd_alias_add_device(struct dasd_device * device)619*4882a593Smuzhiyun int dasd_alias_add_device(struct dasd_device *device)
620*4882a593Smuzhiyun {
621*4882a593Smuzhiyun struct dasd_eckd_private *private = device->private;
622*4882a593Smuzhiyun __u8 uaddr = private->uid.real_unit_addr;
623*4882a593Smuzhiyun struct alias_lcu *lcu = private->lcu;
624*4882a593Smuzhiyun unsigned long flags;
625*4882a593Smuzhiyun int rc;
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun rc = 0;
628*4882a593Smuzhiyun spin_lock_irqsave(&lcu->lock, flags);
629*4882a593Smuzhiyun /*
630*4882a593Smuzhiyun * Check if device and lcu type differ. If so, the uac data may be
631*4882a593Smuzhiyun * outdated and needs to be updated.
632*4882a593Smuzhiyun */
633*4882a593Smuzhiyun if (private->uid.type != lcu->uac->unit[uaddr].ua_type) {
634*4882a593Smuzhiyun lcu->flags |= UPDATE_PENDING;
635*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_WARNING, device, "%s",
636*4882a593Smuzhiyun "uid type mismatch - trigger rescan");
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun if (!(lcu->flags & UPDATE_PENDING)) {
639*4882a593Smuzhiyun rc = _add_device_to_lcu(lcu, device, device);
640*4882a593Smuzhiyun if (rc)
641*4882a593Smuzhiyun lcu->flags |= UPDATE_PENDING;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun if (lcu->flags & UPDATE_PENDING) {
644*4882a593Smuzhiyun list_move(&device->alias_list, &lcu->active_devices);
645*4882a593Smuzhiyun private->pavgroup = NULL;
646*4882a593Smuzhiyun _schedule_lcu_update(lcu, device);
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun spin_unlock_irqrestore(&lcu->lock, flags);
649*4882a593Smuzhiyun return rc;
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun
dasd_alias_update_add_device(struct dasd_device * device)652*4882a593Smuzhiyun int dasd_alias_update_add_device(struct dasd_device *device)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun struct dasd_eckd_private *private = device->private;
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun private->lcu->flags |= UPDATE_PENDING;
657*4882a593Smuzhiyun return dasd_alias_add_device(device);
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun
dasd_alias_remove_device(struct dasd_device * device)660*4882a593Smuzhiyun int dasd_alias_remove_device(struct dasd_device *device)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun struct dasd_eckd_private *private = device->private;
663*4882a593Smuzhiyun struct alias_lcu *lcu = private->lcu;
664*4882a593Smuzhiyun unsigned long flags;
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun /* nothing to do if already removed */
667*4882a593Smuzhiyun if (!lcu)
668*4882a593Smuzhiyun return 0;
669*4882a593Smuzhiyun spin_lock_irqsave(&lcu->lock, flags);
670*4882a593Smuzhiyun _remove_device_from_lcu(lcu, device);
671*4882a593Smuzhiyun spin_unlock_irqrestore(&lcu->lock, flags);
672*4882a593Smuzhiyun return 0;
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun
dasd_alias_get_start_dev(struct dasd_device * base_device)675*4882a593Smuzhiyun struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun struct dasd_eckd_private *alias_priv, *private = base_device->private;
678*4882a593Smuzhiyun struct alias_lcu *lcu = private->lcu;
679*4882a593Smuzhiyun struct dasd_device *alias_device;
680*4882a593Smuzhiyun struct alias_pav_group *group;
681*4882a593Smuzhiyun unsigned long flags;
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun if (!lcu)
684*4882a593Smuzhiyun return NULL;
685*4882a593Smuzhiyun if (lcu->pav == NO_PAV ||
686*4882a593Smuzhiyun lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
687*4882a593Smuzhiyun return NULL;
688*4882a593Smuzhiyun if (unlikely(!(private->features.feature[8] & 0x01))) {
689*4882a593Smuzhiyun /*
690*4882a593Smuzhiyun * PAV enabled but prefix not, very unlikely
691*4882a593Smuzhiyun * seems to be a lost pathgroup
692*4882a593Smuzhiyun * use base device to do IO
693*4882a593Smuzhiyun */
694*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_ERR, base_device, "%s",
695*4882a593Smuzhiyun "Prefix not enabled with PAV enabled\n");
696*4882a593Smuzhiyun return NULL;
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun spin_lock_irqsave(&lcu->lock, flags);
700*4882a593Smuzhiyun group = private->pavgroup;
701*4882a593Smuzhiyun if (!group) {
702*4882a593Smuzhiyun spin_unlock_irqrestore(&lcu->lock, flags);
703*4882a593Smuzhiyun return NULL;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun alias_device = group->next;
706*4882a593Smuzhiyun if (!alias_device) {
707*4882a593Smuzhiyun if (list_empty(&group->aliaslist)) {
708*4882a593Smuzhiyun spin_unlock_irqrestore(&lcu->lock, flags);
709*4882a593Smuzhiyun return NULL;
710*4882a593Smuzhiyun } else {
711*4882a593Smuzhiyun alias_device = list_first_entry(&group->aliaslist,
712*4882a593Smuzhiyun struct dasd_device,
713*4882a593Smuzhiyun alias_list);
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun if (list_is_last(&alias_device->alias_list, &group->aliaslist))
717*4882a593Smuzhiyun group->next = list_first_entry(&group->aliaslist,
718*4882a593Smuzhiyun struct dasd_device, alias_list);
719*4882a593Smuzhiyun else
720*4882a593Smuzhiyun group->next = list_first_entry(&alias_device->alias_list,
721*4882a593Smuzhiyun struct dasd_device, alias_list);
722*4882a593Smuzhiyun spin_unlock_irqrestore(&lcu->lock, flags);
723*4882a593Smuzhiyun alias_priv = alias_device->private;
724*4882a593Smuzhiyun if ((alias_priv->count < private->count) && !alias_device->stopped &&
725*4882a593Smuzhiyun !test_bit(DASD_FLAG_OFFLINE, &alias_device->flags))
726*4882a593Smuzhiyun return alias_device;
727*4882a593Smuzhiyun else
728*4882a593Smuzhiyun return NULL;
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun /*
732*4882a593Smuzhiyun * Summary unit check handling depends on the way alias devices
733*4882a593Smuzhiyun * are handled so it is done here rather then in dasd_eckd.c
734*4882a593Smuzhiyun */
reset_summary_unit_check(struct alias_lcu * lcu,struct dasd_device * device,char reason)735*4882a593Smuzhiyun static int reset_summary_unit_check(struct alias_lcu *lcu,
736*4882a593Smuzhiyun struct dasd_device *device,
737*4882a593Smuzhiyun char reason)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun struct dasd_ccw_req *cqr;
740*4882a593Smuzhiyun int rc = 0;
741*4882a593Smuzhiyun struct ccw1 *ccw;
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun cqr = lcu->rsu_cqr;
744*4882a593Smuzhiyun memcpy((char *) &cqr->magic, "ECKD", 4);
745*4882a593Smuzhiyun ASCEBC((char *) &cqr->magic, 4);
746*4882a593Smuzhiyun ccw = cqr->cpaddr;
747*4882a593Smuzhiyun ccw->cmd_code = DASD_ECKD_CCW_RSCK;
748*4882a593Smuzhiyun ccw->flags = CCW_FLAG_SLI;
749*4882a593Smuzhiyun ccw->count = 16;
750*4882a593Smuzhiyun ccw->cda = (__u32)(addr_t) cqr->data;
751*4882a593Smuzhiyun ((char *)cqr->data)[0] = reason;
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
754*4882a593Smuzhiyun cqr->retries = 255; /* set retry counter to enable basic ERP */
755*4882a593Smuzhiyun cqr->startdev = device;
756*4882a593Smuzhiyun cqr->memdev = device;
757*4882a593Smuzhiyun cqr->block = NULL;
758*4882a593Smuzhiyun cqr->expires = 5 * HZ;
759*4882a593Smuzhiyun cqr->buildclk = get_tod_clock();
760*4882a593Smuzhiyun cqr->status = DASD_CQR_FILLED;
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun rc = dasd_sleep_on_immediatly(cqr);
763*4882a593Smuzhiyun return rc;
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun
_restart_all_base_devices_on_lcu(struct alias_lcu * lcu)766*4882a593Smuzhiyun static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
767*4882a593Smuzhiyun {
768*4882a593Smuzhiyun struct alias_pav_group *pavgroup;
769*4882a593Smuzhiyun struct dasd_device *device;
770*4882a593Smuzhiyun struct dasd_eckd_private *private;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun /* active and inactive list can contain alias as well as base devices */
773*4882a593Smuzhiyun list_for_each_entry(device, &lcu->active_devices, alias_list) {
774*4882a593Smuzhiyun private = device->private;
775*4882a593Smuzhiyun if (private->uid.type != UA_BASE_DEVICE)
776*4882a593Smuzhiyun continue;
777*4882a593Smuzhiyun dasd_schedule_block_bh(device->block);
778*4882a593Smuzhiyun dasd_schedule_device_bh(device);
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
781*4882a593Smuzhiyun private = device->private;
782*4882a593Smuzhiyun if (private->uid.type != UA_BASE_DEVICE)
783*4882a593Smuzhiyun continue;
784*4882a593Smuzhiyun dasd_schedule_block_bh(device->block);
785*4882a593Smuzhiyun dasd_schedule_device_bh(device);
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun list_for_each_entry(pavgroup, &lcu->grouplist, group) {
788*4882a593Smuzhiyun list_for_each_entry(device, &pavgroup->baselist, alias_list) {
789*4882a593Smuzhiyun dasd_schedule_block_bh(device->block);
790*4882a593Smuzhiyun dasd_schedule_device_bh(device);
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun
flush_all_alias_devices_on_lcu(struct alias_lcu * lcu)795*4882a593Smuzhiyun static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
796*4882a593Smuzhiyun {
797*4882a593Smuzhiyun struct alias_pav_group *pavgroup;
798*4882a593Smuzhiyun struct dasd_device *device, *temp;
799*4882a593Smuzhiyun struct dasd_eckd_private *private;
800*4882a593Smuzhiyun unsigned long flags;
801*4882a593Smuzhiyun LIST_HEAD(active);
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun /*
804*4882a593Smuzhiyun * Problem here ist that dasd_flush_device_queue may wait
805*4882a593Smuzhiyun * for termination of a request to complete. We can't keep
806*4882a593Smuzhiyun * the lcu lock during that time, so we must assume that
807*4882a593Smuzhiyun * the lists may have changed.
808*4882a593Smuzhiyun * Idea: first gather all active alias devices in a separate list,
809*4882a593Smuzhiyun * then flush the first element of this list unlocked, and afterwards
810*4882a593Smuzhiyun * check if it is still on the list before moving it to the
811*4882a593Smuzhiyun * active_devices list.
812*4882a593Smuzhiyun */
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun spin_lock_irqsave(&lcu->lock, flags);
815*4882a593Smuzhiyun list_for_each_entry_safe(device, temp, &lcu->active_devices,
816*4882a593Smuzhiyun alias_list) {
817*4882a593Smuzhiyun private = device->private;
818*4882a593Smuzhiyun if (private->uid.type == UA_BASE_DEVICE)
819*4882a593Smuzhiyun continue;
820*4882a593Smuzhiyun list_move(&device->alias_list, &active);
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun list_for_each_entry(pavgroup, &lcu->grouplist, group) {
824*4882a593Smuzhiyun list_splice_init(&pavgroup->aliaslist, &active);
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun while (!list_empty(&active)) {
827*4882a593Smuzhiyun device = list_first_entry(&active, struct dasd_device,
828*4882a593Smuzhiyun alias_list);
829*4882a593Smuzhiyun spin_unlock_irqrestore(&lcu->lock, flags);
830*4882a593Smuzhiyun dasd_flush_device_queue(device);
831*4882a593Smuzhiyun spin_lock_irqsave(&lcu->lock, flags);
832*4882a593Smuzhiyun /*
833*4882a593Smuzhiyun * only move device around if it wasn't moved away while we
834*4882a593Smuzhiyun * were waiting for the flush
835*4882a593Smuzhiyun */
836*4882a593Smuzhiyun if (device == list_first_entry(&active,
837*4882a593Smuzhiyun struct dasd_device, alias_list)) {
838*4882a593Smuzhiyun list_move(&device->alias_list, &lcu->active_devices);
839*4882a593Smuzhiyun private = device->private;
840*4882a593Smuzhiyun private->pavgroup = NULL;
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun spin_unlock_irqrestore(&lcu->lock, flags);
844*4882a593Smuzhiyun }
845*4882a593Smuzhiyun
_stop_all_devices_on_lcu(struct alias_lcu * lcu)846*4882a593Smuzhiyun static void _stop_all_devices_on_lcu(struct alias_lcu *lcu)
847*4882a593Smuzhiyun {
848*4882a593Smuzhiyun struct alias_pav_group *pavgroup;
849*4882a593Smuzhiyun struct dasd_device *device;
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun list_for_each_entry(device, &lcu->active_devices, alias_list) {
852*4882a593Smuzhiyun spin_lock(get_ccwdev_lock(device->cdev));
853*4882a593Smuzhiyun dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
854*4882a593Smuzhiyun spin_unlock(get_ccwdev_lock(device->cdev));
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
857*4882a593Smuzhiyun spin_lock(get_ccwdev_lock(device->cdev));
858*4882a593Smuzhiyun dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
859*4882a593Smuzhiyun spin_unlock(get_ccwdev_lock(device->cdev));
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun list_for_each_entry(pavgroup, &lcu->grouplist, group) {
862*4882a593Smuzhiyun list_for_each_entry(device, &pavgroup->baselist, alias_list) {
863*4882a593Smuzhiyun spin_lock(get_ccwdev_lock(device->cdev));
864*4882a593Smuzhiyun dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
865*4882a593Smuzhiyun spin_unlock(get_ccwdev_lock(device->cdev));
866*4882a593Smuzhiyun }
867*4882a593Smuzhiyun list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
868*4882a593Smuzhiyun spin_lock(get_ccwdev_lock(device->cdev));
869*4882a593Smuzhiyun dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
870*4882a593Smuzhiyun spin_unlock(get_ccwdev_lock(device->cdev));
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun
_unstop_all_devices_on_lcu(struct alias_lcu * lcu)875*4882a593Smuzhiyun static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
876*4882a593Smuzhiyun {
877*4882a593Smuzhiyun struct alias_pav_group *pavgroup;
878*4882a593Smuzhiyun struct dasd_device *device;
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun list_for_each_entry(device, &lcu->active_devices, alias_list) {
881*4882a593Smuzhiyun spin_lock(get_ccwdev_lock(device->cdev));
882*4882a593Smuzhiyun dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
883*4882a593Smuzhiyun spin_unlock(get_ccwdev_lock(device->cdev));
884*4882a593Smuzhiyun }
885*4882a593Smuzhiyun list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
886*4882a593Smuzhiyun spin_lock(get_ccwdev_lock(device->cdev));
887*4882a593Smuzhiyun dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
888*4882a593Smuzhiyun spin_unlock(get_ccwdev_lock(device->cdev));
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun list_for_each_entry(pavgroup, &lcu->grouplist, group) {
891*4882a593Smuzhiyun list_for_each_entry(device, &pavgroup->baselist, alias_list) {
892*4882a593Smuzhiyun spin_lock(get_ccwdev_lock(device->cdev));
893*4882a593Smuzhiyun dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
894*4882a593Smuzhiyun spin_unlock(get_ccwdev_lock(device->cdev));
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
897*4882a593Smuzhiyun spin_lock(get_ccwdev_lock(device->cdev));
898*4882a593Smuzhiyun dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
899*4882a593Smuzhiyun spin_unlock(get_ccwdev_lock(device->cdev));
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun
summary_unit_check_handling_work(struct work_struct * work)904*4882a593Smuzhiyun static void summary_unit_check_handling_work(struct work_struct *work)
905*4882a593Smuzhiyun {
906*4882a593Smuzhiyun struct alias_lcu *lcu;
907*4882a593Smuzhiyun struct summary_unit_check_work_data *suc_data;
908*4882a593Smuzhiyun unsigned long flags;
909*4882a593Smuzhiyun struct dasd_device *device;
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun suc_data = container_of(work, struct summary_unit_check_work_data,
912*4882a593Smuzhiyun worker);
913*4882a593Smuzhiyun lcu = container_of(suc_data, struct alias_lcu, suc_data);
914*4882a593Smuzhiyun device = suc_data->device;
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun /* 1. flush alias devices */
917*4882a593Smuzhiyun flush_all_alias_devices_on_lcu(lcu);
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun /* 2. reset summary unit check */
920*4882a593Smuzhiyun spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
921*4882a593Smuzhiyun dasd_device_remove_stop_bits(device,
922*4882a593Smuzhiyun (DASD_STOPPED_SU | DASD_STOPPED_PENDING));
923*4882a593Smuzhiyun spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
924*4882a593Smuzhiyun reset_summary_unit_check(lcu, device, suc_data->reason);
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun spin_lock_irqsave(&lcu->lock, flags);
927*4882a593Smuzhiyun _unstop_all_devices_on_lcu(lcu);
928*4882a593Smuzhiyun _restart_all_base_devices_on_lcu(lcu);
929*4882a593Smuzhiyun /* 3. read new alias configuration */
930*4882a593Smuzhiyun _schedule_lcu_update(lcu, device);
931*4882a593Smuzhiyun lcu->suc_data.device = NULL;
932*4882a593Smuzhiyun dasd_put_device(device);
933*4882a593Smuzhiyun spin_unlock_irqrestore(&lcu->lock, flags);
934*4882a593Smuzhiyun }
935*4882a593Smuzhiyun
dasd_alias_handle_summary_unit_check(struct work_struct * work)936*4882a593Smuzhiyun void dasd_alias_handle_summary_unit_check(struct work_struct *work)
937*4882a593Smuzhiyun {
938*4882a593Smuzhiyun struct dasd_device *device = container_of(work, struct dasd_device,
939*4882a593Smuzhiyun suc_work);
940*4882a593Smuzhiyun struct dasd_eckd_private *private = device->private;
941*4882a593Smuzhiyun struct alias_lcu *lcu;
942*4882a593Smuzhiyun unsigned long flags;
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun lcu = private->lcu;
945*4882a593Smuzhiyun if (!lcu) {
946*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_WARNING, device, "%s",
947*4882a593Smuzhiyun "device not ready to handle summary"
948*4882a593Smuzhiyun " unit check (no lcu structure)");
949*4882a593Smuzhiyun goto out;
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun spin_lock_irqsave(&lcu->lock, flags);
952*4882a593Smuzhiyun /* If this device is about to be removed just return and wait for
953*4882a593Smuzhiyun * the next interrupt on a different device
954*4882a593Smuzhiyun */
955*4882a593Smuzhiyun if (list_empty(&device->alias_list)) {
956*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_WARNING, device, "%s",
957*4882a593Smuzhiyun "device is in offline processing,"
958*4882a593Smuzhiyun " don't do summary unit check handling");
959*4882a593Smuzhiyun goto out_unlock;
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun if (lcu->suc_data.device) {
962*4882a593Smuzhiyun /* already scheduled or running */
963*4882a593Smuzhiyun DBF_DEV_EVENT(DBF_WARNING, device, "%s",
964*4882a593Smuzhiyun "previous instance of summary unit check worker"
965*4882a593Smuzhiyun " still pending");
966*4882a593Smuzhiyun goto out_unlock;
967*4882a593Smuzhiyun }
968*4882a593Smuzhiyun _stop_all_devices_on_lcu(lcu);
969*4882a593Smuzhiyun /* prepare for lcu_update */
970*4882a593Smuzhiyun lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
971*4882a593Smuzhiyun lcu->suc_data.reason = private->suc_reason;
972*4882a593Smuzhiyun lcu->suc_data.device = device;
973*4882a593Smuzhiyun dasd_get_device(device);
974*4882a593Smuzhiyun if (!schedule_work(&lcu->suc_data.worker))
975*4882a593Smuzhiyun dasd_put_device(device);
976*4882a593Smuzhiyun out_unlock:
977*4882a593Smuzhiyun spin_unlock_irqrestore(&lcu->lock, flags);
978*4882a593Smuzhiyun out:
979*4882a593Smuzhiyun clear_bit(DASD_FLAG_SUC, &device->flags);
980*4882a593Smuzhiyun dasd_put_device(device);
981*4882a593Smuzhiyun };
982