1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * driver for channel subsystem
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright IBM Corp. 2002, 2010
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Author(s): Arnd Bergmann (arndb@de.ibm.com)
8*4882a593Smuzhiyun * Cornelia Huck (cornelia.huck@de.ibm.com)
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #define KMSG_COMPONENT "cio"
12*4882a593Smuzhiyun #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <linux/export.h>
15*4882a593Smuzhiyun #include <linux/init.h>
16*4882a593Smuzhiyun #include <linux/device.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun #include <linux/errno.h>
19*4882a593Smuzhiyun #include <linux/list.h>
20*4882a593Smuzhiyun #include <linux/reboot.h>
21*4882a593Smuzhiyun #include <linux/suspend.h>
22*4882a593Smuzhiyun #include <linux/proc_fs.h>
23*4882a593Smuzhiyun #include <linux/genalloc.h>
24*4882a593Smuzhiyun #include <linux/dma-mapping.h>
25*4882a593Smuzhiyun #include <asm/isc.h>
26*4882a593Smuzhiyun #include <asm/crw.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include "css.h"
29*4882a593Smuzhiyun #include "cio.h"
30*4882a593Smuzhiyun #include "blacklist.h"
31*4882a593Smuzhiyun #include "cio_debug.h"
32*4882a593Smuzhiyun #include "ioasm.h"
33*4882a593Smuzhiyun #include "chsc.h"
34*4882a593Smuzhiyun #include "device.h"
35*4882a593Smuzhiyun #include "idset.h"
36*4882a593Smuzhiyun #include "chp.h"
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun int css_init_done = 0;
39*4882a593Smuzhiyun int max_ssid;
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun #define MAX_CSS_IDX 0
42*4882a593Smuzhiyun struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
43*4882a593Smuzhiyun static struct bus_type css_bus_type;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun int
for_each_subchannel(int (* fn)(struct subchannel_id,void *),void * data)46*4882a593Smuzhiyun for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun struct subchannel_id schid;
49*4882a593Smuzhiyun int ret;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun init_subchannel_id(&schid);
52*4882a593Smuzhiyun do {
53*4882a593Smuzhiyun do {
54*4882a593Smuzhiyun ret = fn(schid, data);
55*4882a593Smuzhiyun if (ret)
56*4882a593Smuzhiyun break;
57*4882a593Smuzhiyun } while (schid.sch_no++ < __MAX_SUBCHANNEL);
58*4882a593Smuzhiyun schid.sch_no = 0;
59*4882a593Smuzhiyun } while (schid.ssid++ < max_ssid);
60*4882a593Smuzhiyun return ret;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun struct cb_data {
64*4882a593Smuzhiyun void *data;
65*4882a593Smuzhiyun struct idset *set;
66*4882a593Smuzhiyun int (*fn_known_sch)(struct subchannel *, void *);
67*4882a593Smuzhiyun int (*fn_unknown_sch)(struct subchannel_id, void *);
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun
call_fn_known_sch(struct device * dev,void * data)70*4882a593Smuzhiyun static int call_fn_known_sch(struct device *dev, void *data)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(dev);
73*4882a593Smuzhiyun struct cb_data *cb = data;
74*4882a593Smuzhiyun int rc = 0;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun if (cb->set)
77*4882a593Smuzhiyun idset_sch_del(cb->set, sch->schid);
78*4882a593Smuzhiyun if (cb->fn_known_sch)
79*4882a593Smuzhiyun rc = cb->fn_known_sch(sch, cb->data);
80*4882a593Smuzhiyun return rc;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
call_fn_unknown_sch(struct subchannel_id schid,void * data)83*4882a593Smuzhiyun static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun struct cb_data *cb = data;
86*4882a593Smuzhiyun int rc = 0;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun if (idset_sch_contains(cb->set, schid))
89*4882a593Smuzhiyun rc = cb->fn_unknown_sch(schid, cb->data);
90*4882a593Smuzhiyun return rc;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
call_fn_all_sch(struct subchannel_id schid,void * data)93*4882a593Smuzhiyun static int call_fn_all_sch(struct subchannel_id schid, void *data)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun struct cb_data *cb = data;
96*4882a593Smuzhiyun struct subchannel *sch;
97*4882a593Smuzhiyun int rc = 0;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun sch = get_subchannel_by_schid(schid);
100*4882a593Smuzhiyun if (sch) {
101*4882a593Smuzhiyun if (cb->fn_known_sch)
102*4882a593Smuzhiyun rc = cb->fn_known_sch(sch, cb->data);
103*4882a593Smuzhiyun put_device(&sch->dev);
104*4882a593Smuzhiyun } else {
105*4882a593Smuzhiyun if (cb->fn_unknown_sch)
106*4882a593Smuzhiyun rc = cb->fn_unknown_sch(schid, cb->data);
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun return rc;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
for_each_subchannel_staged(int (* fn_known)(struct subchannel *,void *),int (* fn_unknown)(struct subchannel_id,void *),void * data)112*4882a593Smuzhiyun int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
113*4882a593Smuzhiyun int (*fn_unknown)(struct subchannel_id,
114*4882a593Smuzhiyun void *), void *data)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun struct cb_data cb;
117*4882a593Smuzhiyun int rc;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun cb.data = data;
120*4882a593Smuzhiyun cb.fn_known_sch = fn_known;
121*4882a593Smuzhiyun cb.fn_unknown_sch = fn_unknown;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun if (fn_known && !fn_unknown) {
124*4882a593Smuzhiyun /* Skip idset allocation in case of known-only loop. */
125*4882a593Smuzhiyun cb.set = NULL;
126*4882a593Smuzhiyun return bus_for_each_dev(&css_bus_type, NULL, &cb,
127*4882a593Smuzhiyun call_fn_known_sch);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun cb.set = idset_sch_new();
131*4882a593Smuzhiyun if (!cb.set)
132*4882a593Smuzhiyun /* fall back to brute force scanning in case of oom */
133*4882a593Smuzhiyun return for_each_subchannel(call_fn_all_sch, &cb);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun idset_fill(cb.set);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /* Process registered subchannels. */
138*4882a593Smuzhiyun rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
139*4882a593Smuzhiyun if (rc)
140*4882a593Smuzhiyun goto out;
141*4882a593Smuzhiyun /* Process unregistered subchannels. */
142*4882a593Smuzhiyun if (fn_unknown)
143*4882a593Smuzhiyun rc = for_each_subchannel(call_fn_unknown_sch, &cb);
144*4882a593Smuzhiyun out:
145*4882a593Smuzhiyun idset_free(cb.set);
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun return rc;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun static void css_sch_todo(struct work_struct *work);
151*4882a593Smuzhiyun
css_sch_create_locks(struct subchannel * sch)152*4882a593Smuzhiyun static int css_sch_create_locks(struct subchannel *sch)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
155*4882a593Smuzhiyun if (!sch->lock)
156*4882a593Smuzhiyun return -ENOMEM;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun spin_lock_init(sch->lock);
159*4882a593Smuzhiyun mutex_init(&sch->reg_mutex);
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun return 0;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
css_subchannel_release(struct device * dev)164*4882a593Smuzhiyun static void css_subchannel_release(struct device *dev)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(dev);
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun sch->config.intparm = 0;
169*4882a593Smuzhiyun cio_commit_config(sch);
170*4882a593Smuzhiyun kfree(sch->driver_override);
171*4882a593Smuzhiyun kfree(sch->lock);
172*4882a593Smuzhiyun kfree(sch);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
css_validate_subchannel(struct subchannel_id schid,struct schib * schib)175*4882a593Smuzhiyun static int css_validate_subchannel(struct subchannel_id schid,
176*4882a593Smuzhiyun struct schib *schib)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun int err;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun switch (schib->pmcw.st) {
181*4882a593Smuzhiyun case SUBCHANNEL_TYPE_IO:
182*4882a593Smuzhiyun case SUBCHANNEL_TYPE_MSG:
183*4882a593Smuzhiyun if (!css_sch_is_valid(schib))
184*4882a593Smuzhiyun err = -ENODEV;
185*4882a593Smuzhiyun else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
186*4882a593Smuzhiyun CIO_MSG_EVENT(6, "Blacklisted device detected "
187*4882a593Smuzhiyun "at devno %04X, subchannel set %x\n",
188*4882a593Smuzhiyun schib->pmcw.dev, schid.ssid);
189*4882a593Smuzhiyun err = -ENODEV;
190*4882a593Smuzhiyun } else
191*4882a593Smuzhiyun err = 0;
192*4882a593Smuzhiyun break;
193*4882a593Smuzhiyun default:
194*4882a593Smuzhiyun err = 0;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun if (err)
197*4882a593Smuzhiyun goto out;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
200*4882a593Smuzhiyun schid.ssid, schid.sch_no, schib->pmcw.st);
201*4882a593Smuzhiyun out:
202*4882a593Smuzhiyun return err;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
css_alloc_subchannel(struct subchannel_id schid,struct schib * schib)205*4882a593Smuzhiyun struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
206*4882a593Smuzhiyun struct schib *schib)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun struct subchannel *sch;
209*4882a593Smuzhiyun int ret;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun ret = css_validate_subchannel(schid, schib);
212*4882a593Smuzhiyun if (ret < 0)
213*4882a593Smuzhiyun return ERR_PTR(ret);
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
216*4882a593Smuzhiyun if (!sch)
217*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun sch->schid = schid;
220*4882a593Smuzhiyun sch->schib = *schib;
221*4882a593Smuzhiyun sch->st = schib->pmcw.st;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun ret = css_sch_create_locks(sch);
224*4882a593Smuzhiyun if (ret)
225*4882a593Smuzhiyun goto err;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun INIT_WORK(&sch->todo_work, css_sch_todo);
228*4882a593Smuzhiyun sch->dev.release = &css_subchannel_release;
229*4882a593Smuzhiyun device_initialize(&sch->dev);
230*4882a593Smuzhiyun /*
231*4882a593Smuzhiyun * The physical addresses of some the dma structures that can
232*4882a593Smuzhiyun * belong to a subchannel need to fit 31 bit width (e.g. ccw).
233*4882a593Smuzhiyun */
234*4882a593Smuzhiyun sch->dev.coherent_dma_mask = DMA_BIT_MASK(31);
235*4882a593Smuzhiyun /*
236*4882a593Smuzhiyun * But we don't have such restrictions imposed on the stuff that
237*4882a593Smuzhiyun * is handled by the streaming API.
238*4882a593Smuzhiyun */
239*4882a593Smuzhiyun sch->dma_mask = DMA_BIT_MASK(64);
240*4882a593Smuzhiyun sch->dev.dma_mask = &sch->dma_mask;
241*4882a593Smuzhiyun return sch;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun err:
244*4882a593Smuzhiyun kfree(sch);
245*4882a593Smuzhiyun return ERR_PTR(ret);
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
css_sch_device_register(struct subchannel * sch)248*4882a593Smuzhiyun static int css_sch_device_register(struct subchannel *sch)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun int ret;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun mutex_lock(&sch->reg_mutex);
253*4882a593Smuzhiyun dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
254*4882a593Smuzhiyun sch->schid.sch_no);
255*4882a593Smuzhiyun ret = device_add(&sch->dev);
256*4882a593Smuzhiyun mutex_unlock(&sch->reg_mutex);
257*4882a593Smuzhiyun return ret;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /**
261*4882a593Smuzhiyun * css_sch_device_unregister - unregister a subchannel
262*4882a593Smuzhiyun * @sch: subchannel to be unregistered
263*4882a593Smuzhiyun */
css_sch_device_unregister(struct subchannel * sch)264*4882a593Smuzhiyun void css_sch_device_unregister(struct subchannel *sch)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun mutex_lock(&sch->reg_mutex);
267*4882a593Smuzhiyun if (device_is_registered(&sch->dev))
268*4882a593Smuzhiyun device_unregister(&sch->dev);
269*4882a593Smuzhiyun mutex_unlock(&sch->reg_mutex);
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(css_sch_device_unregister);
272*4882a593Smuzhiyun
ssd_from_pmcw(struct chsc_ssd_info * ssd,struct pmcw * pmcw)273*4882a593Smuzhiyun static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun int i;
276*4882a593Smuzhiyun int mask;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun memset(ssd, 0, sizeof(struct chsc_ssd_info));
279*4882a593Smuzhiyun ssd->path_mask = pmcw->pim;
280*4882a593Smuzhiyun for (i = 0; i < 8; i++) {
281*4882a593Smuzhiyun mask = 0x80 >> i;
282*4882a593Smuzhiyun if (pmcw->pim & mask) {
283*4882a593Smuzhiyun chp_id_init(&ssd->chpid[i]);
284*4882a593Smuzhiyun ssd->chpid[i].id = pmcw->chpid[i];
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
ssd_register_chpids(struct chsc_ssd_info * ssd)289*4882a593Smuzhiyun static void ssd_register_chpids(struct chsc_ssd_info *ssd)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun int i;
292*4882a593Smuzhiyun int mask;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun for (i = 0; i < 8; i++) {
295*4882a593Smuzhiyun mask = 0x80 >> i;
296*4882a593Smuzhiyun if (ssd->path_mask & mask)
297*4882a593Smuzhiyun chp_new(ssd->chpid[i]);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
css_update_ssd_info(struct subchannel * sch)301*4882a593Smuzhiyun void css_update_ssd_info(struct subchannel *sch)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun int ret;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
306*4882a593Smuzhiyun if (ret)
307*4882a593Smuzhiyun ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun ssd_register_chpids(&sch->ssd_info);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
type_show(struct device * dev,struct device_attribute * attr,char * buf)312*4882a593Smuzhiyun static ssize_t type_show(struct device *dev, struct device_attribute *attr,
313*4882a593Smuzhiyun char *buf)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(dev);
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun return sprintf(buf, "%01x\n", sch->st);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun static DEVICE_ATTR_RO(type);
321*4882a593Smuzhiyun
modalias_show(struct device * dev,struct device_attribute * attr,char * buf)322*4882a593Smuzhiyun static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
323*4882a593Smuzhiyun char *buf)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(dev);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun return sprintf(buf, "css:t%01X\n", sch->st);
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun static DEVICE_ATTR_RO(modalias);
331*4882a593Smuzhiyun
driver_override_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)332*4882a593Smuzhiyun static ssize_t driver_override_store(struct device *dev,
333*4882a593Smuzhiyun struct device_attribute *attr,
334*4882a593Smuzhiyun const char *buf, size_t count)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(dev);
337*4882a593Smuzhiyun char *driver_override, *old, *cp;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun /* We need to keep extra room for a newline */
340*4882a593Smuzhiyun if (count >= (PAGE_SIZE - 1))
341*4882a593Smuzhiyun return -EINVAL;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun driver_override = kstrndup(buf, count, GFP_KERNEL);
344*4882a593Smuzhiyun if (!driver_override)
345*4882a593Smuzhiyun return -ENOMEM;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun cp = strchr(driver_override, '\n');
348*4882a593Smuzhiyun if (cp)
349*4882a593Smuzhiyun *cp = '\0';
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun device_lock(dev);
352*4882a593Smuzhiyun old = sch->driver_override;
353*4882a593Smuzhiyun if (strlen(driver_override)) {
354*4882a593Smuzhiyun sch->driver_override = driver_override;
355*4882a593Smuzhiyun } else {
356*4882a593Smuzhiyun kfree(driver_override);
357*4882a593Smuzhiyun sch->driver_override = NULL;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun device_unlock(dev);
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun kfree(old);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun return count;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
driver_override_show(struct device * dev,struct device_attribute * attr,char * buf)366*4882a593Smuzhiyun static ssize_t driver_override_show(struct device *dev,
367*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(dev);
370*4882a593Smuzhiyun ssize_t len;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun device_lock(dev);
373*4882a593Smuzhiyun len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override);
374*4882a593Smuzhiyun device_unlock(dev);
375*4882a593Smuzhiyun return len;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun static DEVICE_ATTR_RW(driver_override);
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun static struct attribute *subch_attrs[] = {
380*4882a593Smuzhiyun &dev_attr_type.attr,
381*4882a593Smuzhiyun &dev_attr_modalias.attr,
382*4882a593Smuzhiyun &dev_attr_driver_override.attr,
383*4882a593Smuzhiyun NULL,
384*4882a593Smuzhiyun };
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun static struct attribute_group subch_attr_group = {
387*4882a593Smuzhiyun .attrs = subch_attrs,
388*4882a593Smuzhiyun };
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun static const struct attribute_group *default_subch_attr_groups[] = {
391*4882a593Smuzhiyun &subch_attr_group,
392*4882a593Smuzhiyun NULL,
393*4882a593Smuzhiyun };
394*4882a593Smuzhiyun
chpids_show(struct device * dev,struct device_attribute * attr,char * buf)395*4882a593Smuzhiyun static ssize_t chpids_show(struct device *dev,
396*4882a593Smuzhiyun struct device_attribute *attr,
397*4882a593Smuzhiyun char *buf)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(dev);
400*4882a593Smuzhiyun struct chsc_ssd_info *ssd = &sch->ssd_info;
401*4882a593Smuzhiyun ssize_t ret = 0;
402*4882a593Smuzhiyun int mask;
403*4882a593Smuzhiyun int chp;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun for (chp = 0; chp < 8; chp++) {
406*4882a593Smuzhiyun mask = 0x80 >> chp;
407*4882a593Smuzhiyun if (ssd->path_mask & mask)
408*4882a593Smuzhiyun ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
409*4882a593Smuzhiyun else
410*4882a593Smuzhiyun ret += sprintf(buf + ret, "00 ");
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun ret += sprintf(buf + ret, "\n");
413*4882a593Smuzhiyun return ret;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun static DEVICE_ATTR_RO(chpids);
416*4882a593Smuzhiyun
pimpampom_show(struct device * dev,struct device_attribute * attr,char * buf)417*4882a593Smuzhiyun static ssize_t pimpampom_show(struct device *dev,
418*4882a593Smuzhiyun struct device_attribute *attr,
419*4882a593Smuzhiyun char *buf)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(dev);
422*4882a593Smuzhiyun struct pmcw *pmcw = &sch->schib.pmcw;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun return sprintf(buf, "%02x %02x %02x\n",
425*4882a593Smuzhiyun pmcw->pim, pmcw->pam, pmcw->pom);
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun static DEVICE_ATTR_RO(pimpampom);
428*4882a593Smuzhiyun
dev_busid_show(struct device * dev,struct device_attribute * attr,char * buf)429*4882a593Smuzhiyun static ssize_t dev_busid_show(struct device *dev,
430*4882a593Smuzhiyun struct device_attribute *attr,
431*4882a593Smuzhiyun char *buf)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(dev);
434*4882a593Smuzhiyun struct pmcw *pmcw = &sch->schib.pmcw;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun if ((pmcw->st == SUBCHANNEL_TYPE_IO && pmcw->dnv) ||
437*4882a593Smuzhiyun (pmcw->st == SUBCHANNEL_TYPE_MSG && pmcw->w))
438*4882a593Smuzhiyun return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
439*4882a593Smuzhiyun pmcw->dev);
440*4882a593Smuzhiyun else
441*4882a593Smuzhiyun return sysfs_emit(buf, "none\n");
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun static DEVICE_ATTR_RO(dev_busid);
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun static struct attribute *io_subchannel_type_attrs[] = {
446*4882a593Smuzhiyun &dev_attr_chpids.attr,
447*4882a593Smuzhiyun &dev_attr_pimpampom.attr,
448*4882a593Smuzhiyun &dev_attr_dev_busid.attr,
449*4882a593Smuzhiyun NULL,
450*4882a593Smuzhiyun };
451*4882a593Smuzhiyun ATTRIBUTE_GROUPS(io_subchannel_type);
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun static const struct device_type io_subchannel_type = {
454*4882a593Smuzhiyun .groups = io_subchannel_type_groups,
455*4882a593Smuzhiyun };
456*4882a593Smuzhiyun
css_register_subchannel(struct subchannel * sch)457*4882a593Smuzhiyun int css_register_subchannel(struct subchannel *sch)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun int ret;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun /* Initialize the subchannel structure */
462*4882a593Smuzhiyun sch->dev.parent = &channel_subsystems[0]->device;
463*4882a593Smuzhiyun sch->dev.bus = &css_bus_type;
464*4882a593Smuzhiyun sch->dev.groups = default_subch_attr_groups;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun if (sch->st == SUBCHANNEL_TYPE_IO)
467*4882a593Smuzhiyun sch->dev.type = &io_subchannel_type;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun /*
470*4882a593Smuzhiyun * We don't want to generate uevents for I/O subchannels that don't
471*4882a593Smuzhiyun * have a working ccw device behind them since they will be
472*4882a593Smuzhiyun * unregistered before they can be used anyway, so we delay the add
473*4882a593Smuzhiyun * uevent until after device recognition was successful.
474*4882a593Smuzhiyun * Note that we suppress the uevent for all subchannel types;
475*4882a593Smuzhiyun * the subchannel driver can decide itself when it wants to inform
476*4882a593Smuzhiyun * userspace of its existence.
477*4882a593Smuzhiyun */
478*4882a593Smuzhiyun dev_set_uevent_suppress(&sch->dev, 1);
479*4882a593Smuzhiyun css_update_ssd_info(sch);
480*4882a593Smuzhiyun /* make it known to the system */
481*4882a593Smuzhiyun ret = css_sch_device_register(sch);
482*4882a593Smuzhiyun if (ret) {
483*4882a593Smuzhiyun CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
484*4882a593Smuzhiyun sch->schid.ssid, sch->schid.sch_no, ret);
485*4882a593Smuzhiyun return ret;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun if (!sch->driver) {
488*4882a593Smuzhiyun /*
489*4882a593Smuzhiyun * No driver matched. Generate the uevent now so that
490*4882a593Smuzhiyun * a fitting driver module may be loaded based on the
491*4882a593Smuzhiyun * modalias.
492*4882a593Smuzhiyun */
493*4882a593Smuzhiyun dev_set_uevent_suppress(&sch->dev, 0);
494*4882a593Smuzhiyun kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun return ret;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun
css_probe_device(struct subchannel_id schid,struct schib * schib)499*4882a593Smuzhiyun static int css_probe_device(struct subchannel_id schid, struct schib *schib)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun struct subchannel *sch;
502*4882a593Smuzhiyun int ret;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun sch = css_alloc_subchannel(schid, schib);
505*4882a593Smuzhiyun if (IS_ERR(sch))
506*4882a593Smuzhiyun return PTR_ERR(sch);
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun ret = css_register_subchannel(sch);
509*4882a593Smuzhiyun if (ret)
510*4882a593Smuzhiyun put_device(&sch->dev);
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun return ret;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun static int
check_subchannel(struct device * dev,const void * data)516*4882a593Smuzhiyun check_subchannel(struct device *dev, const void *data)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun struct subchannel *sch;
519*4882a593Smuzhiyun struct subchannel_id *schid = (void *)data;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun sch = to_subchannel(dev);
522*4882a593Smuzhiyun return schid_equal(&sch->schid, schid);
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun struct subchannel *
get_subchannel_by_schid(struct subchannel_id schid)526*4882a593Smuzhiyun get_subchannel_by_schid(struct subchannel_id schid)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun struct device *dev;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun dev = bus_find_device(&css_bus_type, NULL,
531*4882a593Smuzhiyun &schid, check_subchannel);
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun return dev ? to_subchannel(dev) : NULL;
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun /**
537*4882a593Smuzhiyun * css_sch_is_valid() - check if a subchannel is valid
538*4882a593Smuzhiyun * @schib: subchannel information block for the subchannel
539*4882a593Smuzhiyun */
css_sch_is_valid(struct schib * schib)540*4882a593Smuzhiyun int css_sch_is_valid(struct schib *schib)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
543*4882a593Smuzhiyun return 0;
544*4882a593Smuzhiyun if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
545*4882a593Smuzhiyun return 0;
546*4882a593Smuzhiyun return 1;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(css_sch_is_valid);
549*4882a593Smuzhiyun
css_evaluate_new_subchannel(struct subchannel_id schid,int slow)550*4882a593Smuzhiyun static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun struct schib schib;
553*4882a593Smuzhiyun int ccode;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun if (!slow) {
556*4882a593Smuzhiyun /* Will be done on the slow path. */
557*4882a593Smuzhiyun return -EAGAIN;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun /*
560*4882a593Smuzhiyun * The first subchannel that is not-operational (ccode==3)
561*4882a593Smuzhiyun * indicates that there aren't any more devices available.
562*4882a593Smuzhiyun * If stsch gets an exception, it means the current subchannel set
563*4882a593Smuzhiyun * is not valid.
564*4882a593Smuzhiyun */
565*4882a593Smuzhiyun ccode = stsch(schid, &schib);
566*4882a593Smuzhiyun if (ccode)
567*4882a593Smuzhiyun return (ccode == 3) ? -ENXIO : ccode;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun return css_probe_device(schid, &schib);
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
css_evaluate_known_subchannel(struct subchannel * sch,int slow)572*4882a593Smuzhiyun static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun int ret = 0;
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun if (sch->driver) {
577*4882a593Smuzhiyun if (sch->driver->sch_event)
578*4882a593Smuzhiyun ret = sch->driver->sch_event(sch, slow);
579*4882a593Smuzhiyun else
580*4882a593Smuzhiyun dev_dbg(&sch->dev,
581*4882a593Smuzhiyun "Got subchannel machine check but "
582*4882a593Smuzhiyun "no sch_event handler provided.\n");
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun if (ret != 0 && ret != -EAGAIN) {
585*4882a593Smuzhiyun CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
586*4882a593Smuzhiyun sch->schid.ssid, sch->schid.sch_no, ret);
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun return ret;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun
css_evaluate_subchannel(struct subchannel_id schid,int slow)591*4882a593Smuzhiyun static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
592*4882a593Smuzhiyun {
593*4882a593Smuzhiyun struct subchannel *sch;
594*4882a593Smuzhiyun int ret;
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun sch = get_subchannel_by_schid(schid);
597*4882a593Smuzhiyun if (sch) {
598*4882a593Smuzhiyun ret = css_evaluate_known_subchannel(sch, slow);
599*4882a593Smuzhiyun put_device(&sch->dev);
600*4882a593Smuzhiyun } else
601*4882a593Smuzhiyun ret = css_evaluate_new_subchannel(schid, slow);
602*4882a593Smuzhiyun if (ret == -EAGAIN)
603*4882a593Smuzhiyun css_schedule_eval(schid);
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun /**
607*4882a593Smuzhiyun * css_sched_sch_todo - schedule a subchannel operation
608*4882a593Smuzhiyun * @sch: subchannel
609*4882a593Smuzhiyun * @todo: todo
610*4882a593Smuzhiyun *
611*4882a593Smuzhiyun * Schedule the operation identified by @todo to be performed on the slow path
612*4882a593Smuzhiyun * workqueue. Do nothing if another operation with higher priority is already
613*4882a593Smuzhiyun * scheduled. Needs to be called with subchannel lock held.
614*4882a593Smuzhiyun */
css_sched_sch_todo(struct subchannel * sch,enum sch_todo todo)615*4882a593Smuzhiyun void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
618*4882a593Smuzhiyun sch->schid.ssid, sch->schid.sch_no, todo);
619*4882a593Smuzhiyun if (sch->todo >= todo)
620*4882a593Smuzhiyun return;
621*4882a593Smuzhiyun /* Get workqueue ref. */
622*4882a593Smuzhiyun if (!get_device(&sch->dev))
623*4882a593Smuzhiyun return;
624*4882a593Smuzhiyun sch->todo = todo;
625*4882a593Smuzhiyun if (!queue_work(cio_work_q, &sch->todo_work)) {
626*4882a593Smuzhiyun /* Already queued, release workqueue ref. */
627*4882a593Smuzhiyun put_device(&sch->dev);
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(css_sched_sch_todo);
631*4882a593Smuzhiyun
css_sch_todo(struct work_struct * work)632*4882a593Smuzhiyun static void css_sch_todo(struct work_struct *work)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun struct subchannel *sch;
635*4882a593Smuzhiyun enum sch_todo todo;
636*4882a593Smuzhiyun int ret;
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun sch = container_of(work, struct subchannel, todo_work);
639*4882a593Smuzhiyun /* Find out todo. */
640*4882a593Smuzhiyun spin_lock_irq(sch->lock);
641*4882a593Smuzhiyun todo = sch->todo;
642*4882a593Smuzhiyun CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
643*4882a593Smuzhiyun sch->schid.sch_no, todo);
644*4882a593Smuzhiyun sch->todo = SCH_TODO_NOTHING;
645*4882a593Smuzhiyun spin_unlock_irq(sch->lock);
646*4882a593Smuzhiyun /* Perform todo. */
647*4882a593Smuzhiyun switch (todo) {
648*4882a593Smuzhiyun case SCH_TODO_NOTHING:
649*4882a593Smuzhiyun break;
650*4882a593Smuzhiyun case SCH_TODO_EVAL:
651*4882a593Smuzhiyun ret = css_evaluate_known_subchannel(sch, 1);
652*4882a593Smuzhiyun if (ret == -EAGAIN) {
653*4882a593Smuzhiyun spin_lock_irq(sch->lock);
654*4882a593Smuzhiyun css_sched_sch_todo(sch, todo);
655*4882a593Smuzhiyun spin_unlock_irq(sch->lock);
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun break;
658*4882a593Smuzhiyun case SCH_TODO_UNREG:
659*4882a593Smuzhiyun css_sch_device_unregister(sch);
660*4882a593Smuzhiyun break;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun /* Release workqueue ref. */
663*4882a593Smuzhiyun put_device(&sch->dev);
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun static struct idset *slow_subchannel_set;
667*4882a593Smuzhiyun static spinlock_t slow_subchannel_lock;
668*4882a593Smuzhiyun static wait_queue_head_t css_eval_wq;
669*4882a593Smuzhiyun static atomic_t css_eval_scheduled;
670*4882a593Smuzhiyun
slow_subchannel_init(void)671*4882a593Smuzhiyun static int __init slow_subchannel_init(void)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun spin_lock_init(&slow_subchannel_lock);
674*4882a593Smuzhiyun atomic_set(&css_eval_scheduled, 0);
675*4882a593Smuzhiyun init_waitqueue_head(&css_eval_wq);
676*4882a593Smuzhiyun slow_subchannel_set = idset_sch_new();
677*4882a593Smuzhiyun if (!slow_subchannel_set) {
678*4882a593Smuzhiyun CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
679*4882a593Smuzhiyun return -ENOMEM;
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun return 0;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun
slow_eval_known_fn(struct subchannel * sch,void * data)684*4882a593Smuzhiyun static int slow_eval_known_fn(struct subchannel *sch, void *data)
685*4882a593Smuzhiyun {
686*4882a593Smuzhiyun int eval;
687*4882a593Smuzhiyun int rc;
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun spin_lock_irq(&slow_subchannel_lock);
690*4882a593Smuzhiyun eval = idset_sch_contains(slow_subchannel_set, sch->schid);
691*4882a593Smuzhiyun idset_sch_del(slow_subchannel_set, sch->schid);
692*4882a593Smuzhiyun spin_unlock_irq(&slow_subchannel_lock);
693*4882a593Smuzhiyun if (eval) {
694*4882a593Smuzhiyun rc = css_evaluate_known_subchannel(sch, 1);
695*4882a593Smuzhiyun if (rc == -EAGAIN)
696*4882a593Smuzhiyun css_schedule_eval(sch->schid);
697*4882a593Smuzhiyun /*
698*4882a593Smuzhiyun * The loop might take long time for platforms with lots of
699*4882a593Smuzhiyun * known devices. Allow scheduling here.
700*4882a593Smuzhiyun */
701*4882a593Smuzhiyun cond_resched();
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun return 0;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun
slow_eval_unknown_fn(struct subchannel_id schid,void * data)706*4882a593Smuzhiyun static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
707*4882a593Smuzhiyun {
708*4882a593Smuzhiyun int eval;
709*4882a593Smuzhiyun int rc = 0;
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun spin_lock_irq(&slow_subchannel_lock);
712*4882a593Smuzhiyun eval = idset_sch_contains(slow_subchannel_set, schid);
713*4882a593Smuzhiyun idset_sch_del(slow_subchannel_set, schid);
714*4882a593Smuzhiyun spin_unlock_irq(&slow_subchannel_lock);
715*4882a593Smuzhiyun if (eval) {
716*4882a593Smuzhiyun rc = css_evaluate_new_subchannel(schid, 1);
717*4882a593Smuzhiyun switch (rc) {
718*4882a593Smuzhiyun case -EAGAIN:
719*4882a593Smuzhiyun css_schedule_eval(schid);
720*4882a593Smuzhiyun rc = 0;
721*4882a593Smuzhiyun break;
722*4882a593Smuzhiyun case -ENXIO:
723*4882a593Smuzhiyun case -ENOMEM:
724*4882a593Smuzhiyun case -EIO:
725*4882a593Smuzhiyun /* These should abort looping */
726*4882a593Smuzhiyun spin_lock_irq(&slow_subchannel_lock);
727*4882a593Smuzhiyun idset_sch_del_subseq(slow_subchannel_set, schid);
728*4882a593Smuzhiyun spin_unlock_irq(&slow_subchannel_lock);
729*4882a593Smuzhiyun break;
730*4882a593Smuzhiyun default:
731*4882a593Smuzhiyun rc = 0;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun /* Allow scheduling here since the containing loop might
734*4882a593Smuzhiyun * take a while. */
735*4882a593Smuzhiyun cond_resched();
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun return rc;
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun
css_slow_path_func(struct work_struct * unused)740*4882a593Smuzhiyun static void css_slow_path_func(struct work_struct *unused)
741*4882a593Smuzhiyun {
742*4882a593Smuzhiyun unsigned long flags;
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun CIO_TRACE_EVENT(4, "slowpath");
745*4882a593Smuzhiyun for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
746*4882a593Smuzhiyun NULL);
747*4882a593Smuzhiyun spin_lock_irqsave(&slow_subchannel_lock, flags);
748*4882a593Smuzhiyun if (idset_is_empty(slow_subchannel_set)) {
749*4882a593Smuzhiyun atomic_set(&css_eval_scheduled, 0);
750*4882a593Smuzhiyun wake_up(&css_eval_wq);
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun spin_unlock_irqrestore(&slow_subchannel_lock, flags);
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
756*4882a593Smuzhiyun struct workqueue_struct *cio_work_q;
757*4882a593Smuzhiyun
css_schedule_eval(struct subchannel_id schid)758*4882a593Smuzhiyun void css_schedule_eval(struct subchannel_id schid)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun unsigned long flags;
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun spin_lock_irqsave(&slow_subchannel_lock, flags);
763*4882a593Smuzhiyun idset_sch_add(slow_subchannel_set, schid);
764*4882a593Smuzhiyun atomic_set(&css_eval_scheduled, 1);
765*4882a593Smuzhiyun queue_delayed_work(cio_work_q, &slow_path_work, 0);
766*4882a593Smuzhiyun spin_unlock_irqrestore(&slow_subchannel_lock, flags);
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun
css_schedule_eval_all(void)769*4882a593Smuzhiyun void css_schedule_eval_all(void)
770*4882a593Smuzhiyun {
771*4882a593Smuzhiyun unsigned long flags;
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun spin_lock_irqsave(&slow_subchannel_lock, flags);
774*4882a593Smuzhiyun idset_fill(slow_subchannel_set);
775*4882a593Smuzhiyun atomic_set(&css_eval_scheduled, 1);
776*4882a593Smuzhiyun queue_delayed_work(cio_work_q, &slow_path_work, 0);
777*4882a593Smuzhiyun spin_unlock_irqrestore(&slow_subchannel_lock, flags);
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun
__unset_registered(struct device * dev,void * data)780*4882a593Smuzhiyun static int __unset_registered(struct device *dev, void *data)
781*4882a593Smuzhiyun {
782*4882a593Smuzhiyun struct idset *set = data;
783*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(dev);
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun idset_sch_del(set, sch->schid);
786*4882a593Smuzhiyun return 0;
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun
css_schedule_eval_all_unreg(unsigned long delay)789*4882a593Smuzhiyun void css_schedule_eval_all_unreg(unsigned long delay)
790*4882a593Smuzhiyun {
791*4882a593Smuzhiyun unsigned long flags;
792*4882a593Smuzhiyun struct idset *unreg_set;
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun /* Find unregistered subchannels. */
795*4882a593Smuzhiyun unreg_set = idset_sch_new();
796*4882a593Smuzhiyun if (!unreg_set) {
797*4882a593Smuzhiyun /* Fallback. */
798*4882a593Smuzhiyun css_schedule_eval_all();
799*4882a593Smuzhiyun return;
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun idset_fill(unreg_set);
802*4882a593Smuzhiyun bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
803*4882a593Smuzhiyun /* Apply to slow_subchannel_set. */
804*4882a593Smuzhiyun spin_lock_irqsave(&slow_subchannel_lock, flags);
805*4882a593Smuzhiyun idset_add_set(slow_subchannel_set, unreg_set);
806*4882a593Smuzhiyun atomic_set(&css_eval_scheduled, 1);
807*4882a593Smuzhiyun queue_delayed_work(cio_work_q, &slow_path_work, delay);
808*4882a593Smuzhiyun spin_unlock_irqrestore(&slow_subchannel_lock, flags);
809*4882a593Smuzhiyun idset_free(unreg_set);
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun
css_wait_for_slow_path(void)812*4882a593Smuzhiyun void css_wait_for_slow_path(void)
813*4882a593Smuzhiyun {
814*4882a593Smuzhiyun flush_workqueue(cio_work_q);
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun /* Schedule reprobing of all unregistered subchannels. */
css_schedule_reprobe(void)818*4882a593Smuzhiyun void css_schedule_reprobe(void)
819*4882a593Smuzhiyun {
820*4882a593Smuzhiyun /* Schedule with a delay to allow merging of subsequent calls. */
821*4882a593Smuzhiyun css_schedule_eval_all_unreg(1 * HZ);
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(css_schedule_reprobe);
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun /*
826*4882a593Smuzhiyun * Called from the machine check handler for subchannel report words.
827*4882a593Smuzhiyun */
css_process_crw(struct crw * crw0,struct crw * crw1,int overflow)828*4882a593Smuzhiyun static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
829*4882a593Smuzhiyun {
830*4882a593Smuzhiyun struct subchannel_id mchk_schid;
831*4882a593Smuzhiyun struct subchannel *sch;
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun if (overflow) {
834*4882a593Smuzhiyun css_schedule_eval_all();
835*4882a593Smuzhiyun return;
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
838*4882a593Smuzhiyun "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
839*4882a593Smuzhiyun crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
840*4882a593Smuzhiyun crw0->erc, crw0->rsid);
841*4882a593Smuzhiyun if (crw1)
842*4882a593Smuzhiyun CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
843*4882a593Smuzhiyun "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
844*4882a593Smuzhiyun crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
845*4882a593Smuzhiyun crw1->anc, crw1->erc, crw1->rsid);
846*4882a593Smuzhiyun init_subchannel_id(&mchk_schid);
847*4882a593Smuzhiyun mchk_schid.sch_no = crw0->rsid;
848*4882a593Smuzhiyun if (crw1)
849*4882a593Smuzhiyun mchk_schid.ssid = (crw1->rsid >> 4) & 3;
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun if (crw0->erc == CRW_ERC_PMOD) {
852*4882a593Smuzhiyun sch = get_subchannel_by_schid(mchk_schid);
853*4882a593Smuzhiyun if (sch) {
854*4882a593Smuzhiyun css_update_ssd_info(sch);
855*4882a593Smuzhiyun put_device(&sch->dev);
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun /*
859*4882a593Smuzhiyun * Since we are always presented with IPI in the CRW, we have to
860*4882a593Smuzhiyun * use stsch() to find out if the subchannel in question has come
861*4882a593Smuzhiyun * or gone.
862*4882a593Smuzhiyun */
863*4882a593Smuzhiyun css_evaluate_subchannel(mchk_schid, 0);
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun static void __init
css_generate_pgid(struct channel_subsystem * css,u32 tod_high)867*4882a593Smuzhiyun css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
868*4882a593Smuzhiyun {
869*4882a593Smuzhiyun struct cpuid cpu_id;
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun if (css_general_characteristics.mcss) {
872*4882a593Smuzhiyun css->global_pgid.pgid_high.ext_cssid.version = 0x80;
873*4882a593Smuzhiyun css->global_pgid.pgid_high.ext_cssid.cssid =
874*4882a593Smuzhiyun css->id_valid ? css->cssid : 0;
875*4882a593Smuzhiyun } else {
876*4882a593Smuzhiyun css->global_pgid.pgid_high.cpu_addr = stap();
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun get_cpu_id(&cpu_id);
879*4882a593Smuzhiyun css->global_pgid.cpu_id = cpu_id.ident;
880*4882a593Smuzhiyun css->global_pgid.cpu_model = cpu_id.machine;
881*4882a593Smuzhiyun css->global_pgid.tod_high = tod_high;
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun
channel_subsystem_release(struct device * dev)884*4882a593Smuzhiyun static void channel_subsystem_release(struct device *dev)
885*4882a593Smuzhiyun {
886*4882a593Smuzhiyun struct channel_subsystem *css = to_css(dev);
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun mutex_destroy(&css->mutex);
889*4882a593Smuzhiyun kfree(css);
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun
real_cssid_show(struct device * dev,struct device_attribute * a,char * buf)892*4882a593Smuzhiyun static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
893*4882a593Smuzhiyun char *buf)
894*4882a593Smuzhiyun {
895*4882a593Smuzhiyun struct channel_subsystem *css = to_css(dev);
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun if (!css->id_valid)
898*4882a593Smuzhiyun return -EINVAL;
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun return sprintf(buf, "%x\n", css->cssid);
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun static DEVICE_ATTR_RO(real_cssid);
903*4882a593Smuzhiyun
cm_enable_show(struct device * dev,struct device_attribute * a,char * buf)904*4882a593Smuzhiyun static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
905*4882a593Smuzhiyun char *buf)
906*4882a593Smuzhiyun {
907*4882a593Smuzhiyun struct channel_subsystem *css = to_css(dev);
908*4882a593Smuzhiyun int ret;
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun mutex_lock(&css->mutex);
911*4882a593Smuzhiyun ret = sprintf(buf, "%x\n", css->cm_enabled);
912*4882a593Smuzhiyun mutex_unlock(&css->mutex);
913*4882a593Smuzhiyun return ret;
914*4882a593Smuzhiyun }
915*4882a593Smuzhiyun
cm_enable_store(struct device * dev,struct device_attribute * a,const char * buf,size_t count)916*4882a593Smuzhiyun static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
917*4882a593Smuzhiyun const char *buf, size_t count)
918*4882a593Smuzhiyun {
919*4882a593Smuzhiyun struct channel_subsystem *css = to_css(dev);
920*4882a593Smuzhiyun unsigned long val;
921*4882a593Smuzhiyun int ret;
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun ret = kstrtoul(buf, 16, &val);
924*4882a593Smuzhiyun if (ret)
925*4882a593Smuzhiyun return ret;
926*4882a593Smuzhiyun mutex_lock(&css->mutex);
927*4882a593Smuzhiyun switch (val) {
928*4882a593Smuzhiyun case 0:
929*4882a593Smuzhiyun ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
930*4882a593Smuzhiyun break;
931*4882a593Smuzhiyun case 1:
932*4882a593Smuzhiyun ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
933*4882a593Smuzhiyun break;
934*4882a593Smuzhiyun default:
935*4882a593Smuzhiyun ret = -EINVAL;
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun mutex_unlock(&css->mutex);
938*4882a593Smuzhiyun return ret < 0 ? ret : count;
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun static DEVICE_ATTR_RW(cm_enable);
941*4882a593Smuzhiyun
cm_enable_mode(struct kobject * kobj,struct attribute * attr,int index)942*4882a593Smuzhiyun static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
943*4882a593Smuzhiyun int index)
944*4882a593Smuzhiyun {
945*4882a593Smuzhiyun return css_chsc_characteristics.secm ? attr->mode : 0;
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun static struct attribute *cssdev_attrs[] = {
949*4882a593Smuzhiyun &dev_attr_real_cssid.attr,
950*4882a593Smuzhiyun NULL,
951*4882a593Smuzhiyun };
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun static struct attribute_group cssdev_attr_group = {
954*4882a593Smuzhiyun .attrs = cssdev_attrs,
955*4882a593Smuzhiyun };
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun static struct attribute *cssdev_cm_attrs[] = {
958*4882a593Smuzhiyun &dev_attr_cm_enable.attr,
959*4882a593Smuzhiyun NULL,
960*4882a593Smuzhiyun };
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun static struct attribute_group cssdev_cm_attr_group = {
963*4882a593Smuzhiyun .attrs = cssdev_cm_attrs,
964*4882a593Smuzhiyun .is_visible = cm_enable_mode,
965*4882a593Smuzhiyun };
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun static const struct attribute_group *cssdev_attr_groups[] = {
968*4882a593Smuzhiyun &cssdev_attr_group,
969*4882a593Smuzhiyun &cssdev_cm_attr_group,
970*4882a593Smuzhiyun NULL,
971*4882a593Smuzhiyun };
972*4882a593Smuzhiyun
setup_css(int nr)973*4882a593Smuzhiyun static int __init setup_css(int nr)
974*4882a593Smuzhiyun {
975*4882a593Smuzhiyun struct channel_subsystem *css;
976*4882a593Smuzhiyun int ret;
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun css = kzalloc(sizeof(*css), GFP_KERNEL);
979*4882a593Smuzhiyun if (!css)
980*4882a593Smuzhiyun return -ENOMEM;
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun channel_subsystems[nr] = css;
983*4882a593Smuzhiyun dev_set_name(&css->device, "css%x", nr);
984*4882a593Smuzhiyun css->device.groups = cssdev_attr_groups;
985*4882a593Smuzhiyun css->device.release = channel_subsystem_release;
986*4882a593Smuzhiyun /*
987*4882a593Smuzhiyun * We currently allocate notifier bits with this (using
988*4882a593Smuzhiyun * css->device as the device argument with the DMA API)
989*4882a593Smuzhiyun * and are fine with 64 bit addresses.
990*4882a593Smuzhiyun */
991*4882a593Smuzhiyun css->device.coherent_dma_mask = DMA_BIT_MASK(64);
992*4882a593Smuzhiyun css->device.dma_mask = &css->device.coherent_dma_mask;
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun mutex_init(&css->mutex);
995*4882a593Smuzhiyun ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
996*4882a593Smuzhiyun if (!ret) {
997*4882a593Smuzhiyun css->id_valid = true;
998*4882a593Smuzhiyun pr_info("Partition identifier %01x.%01x\n", css->cssid,
999*4882a593Smuzhiyun css->iid);
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun ret = device_register(&css->device);
1004*4882a593Smuzhiyun if (ret) {
1005*4882a593Smuzhiyun put_device(&css->device);
1006*4882a593Smuzhiyun goto out_err;
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
1010*4882a593Smuzhiyun GFP_KERNEL);
1011*4882a593Smuzhiyun if (!css->pseudo_subchannel) {
1012*4882a593Smuzhiyun device_unregister(&css->device);
1013*4882a593Smuzhiyun ret = -ENOMEM;
1014*4882a593Smuzhiyun goto out_err;
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun css->pseudo_subchannel->dev.parent = &css->device;
1018*4882a593Smuzhiyun css->pseudo_subchannel->dev.release = css_subchannel_release;
1019*4882a593Smuzhiyun mutex_init(&css->pseudo_subchannel->reg_mutex);
1020*4882a593Smuzhiyun ret = css_sch_create_locks(css->pseudo_subchannel);
1021*4882a593Smuzhiyun if (ret) {
1022*4882a593Smuzhiyun kfree(css->pseudo_subchannel);
1023*4882a593Smuzhiyun device_unregister(&css->device);
1024*4882a593Smuzhiyun goto out_err;
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun dev_set_name(&css->pseudo_subchannel->dev, "defunct");
1028*4882a593Smuzhiyun ret = device_register(&css->pseudo_subchannel->dev);
1029*4882a593Smuzhiyun if (ret) {
1030*4882a593Smuzhiyun put_device(&css->pseudo_subchannel->dev);
1031*4882a593Smuzhiyun device_unregister(&css->device);
1032*4882a593Smuzhiyun goto out_err;
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun return ret;
1036*4882a593Smuzhiyun out_err:
1037*4882a593Smuzhiyun channel_subsystems[nr] = NULL;
1038*4882a593Smuzhiyun return ret;
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun
css_reboot_event(struct notifier_block * this,unsigned long event,void * ptr)1041*4882a593Smuzhiyun static int css_reboot_event(struct notifier_block *this,
1042*4882a593Smuzhiyun unsigned long event,
1043*4882a593Smuzhiyun void *ptr)
1044*4882a593Smuzhiyun {
1045*4882a593Smuzhiyun struct channel_subsystem *css;
1046*4882a593Smuzhiyun int ret;
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun ret = NOTIFY_DONE;
1049*4882a593Smuzhiyun for_each_css(css) {
1050*4882a593Smuzhiyun mutex_lock(&css->mutex);
1051*4882a593Smuzhiyun if (css->cm_enabled)
1052*4882a593Smuzhiyun if (chsc_secm(css, 0))
1053*4882a593Smuzhiyun ret = NOTIFY_BAD;
1054*4882a593Smuzhiyun mutex_unlock(&css->mutex);
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun return ret;
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun static struct notifier_block css_reboot_notifier = {
1061*4882a593Smuzhiyun .notifier_call = css_reboot_event,
1062*4882a593Smuzhiyun };
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun /*
1065*4882a593Smuzhiyun * Since the css devices are neither on a bus nor have a class
1066*4882a593Smuzhiyun * nor have a special device type, we cannot stop/restart channel
1067*4882a593Smuzhiyun * path measurements via the normal suspend/resume callbacks, but have
1068*4882a593Smuzhiyun * to use notifiers.
1069*4882a593Smuzhiyun */
css_power_event(struct notifier_block * this,unsigned long event,void * ptr)1070*4882a593Smuzhiyun static int css_power_event(struct notifier_block *this, unsigned long event,
1071*4882a593Smuzhiyun void *ptr)
1072*4882a593Smuzhiyun {
1073*4882a593Smuzhiyun struct channel_subsystem *css;
1074*4882a593Smuzhiyun int ret;
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun switch (event) {
1077*4882a593Smuzhiyun case PM_HIBERNATION_PREPARE:
1078*4882a593Smuzhiyun case PM_SUSPEND_PREPARE:
1079*4882a593Smuzhiyun ret = NOTIFY_DONE;
1080*4882a593Smuzhiyun for_each_css(css) {
1081*4882a593Smuzhiyun mutex_lock(&css->mutex);
1082*4882a593Smuzhiyun if (!css->cm_enabled) {
1083*4882a593Smuzhiyun mutex_unlock(&css->mutex);
1084*4882a593Smuzhiyun continue;
1085*4882a593Smuzhiyun }
1086*4882a593Smuzhiyun ret = __chsc_do_secm(css, 0);
1087*4882a593Smuzhiyun ret = notifier_from_errno(ret);
1088*4882a593Smuzhiyun mutex_unlock(&css->mutex);
1089*4882a593Smuzhiyun }
1090*4882a593Smuzhiyun break;
1091*4882a593Smuzhiyun case PM_POST_HIBERNATION:
1092*4882a593Smuzhiyun case PM_POST_SUSPEND:
1093*4882a593Smuzhiyun ret = NOTIFY_DONE;
1094*4882a593Smuzhiyun for_each_css(css) {
1095*4882a593Smuzhiyun mutex_lock(&css->mutex);
1096*4882a593Smuzhiyun if (!css->cm_enabled) {
1097*4882a593Smuzhiyun mutex_unlock(&css->mutex);
1098*4882a593Smuzhiyun continue;
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun ret = __chsc_do_secm(css, 1);
1101*4882a593Smuzhiyun ret = notifier_from_errno(ret);
1102*4882a593Smuzhiyun mutex_unlock(&css->mutex);
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun /* search for subchannels, which appeared during hibernation */
1105*4882a593Smuzhiyun css_schedule_reprobe();
1106*4882a593Smuzhiyun break;
1107*4882a593Smuzhiyun default:
1108*4882a593Smuzhiyun ret = NOTIFY_DONE;
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun return ret;
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun }
1113*4882a593Smuzhiyun static struct notifier_block css_power_notifier = {
1114*4882a593Smuzhiyun .notifier_call = css_power_event,
1115*4882a593Smuzhiyun };
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun #define CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
1118*4882a593Smuzhiyun static struct gen_pool *cio_dma_pool;
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun /* Currently cio supports only a single css */
cio_get_dma_css_dev(void)1121*4882a593Smuzhiyun struct device *cio_get_dma_css_dev(void)
1122*4882a593Smuzhiyun {
1123*4882a593Smuzhiyun return &channel_subsystems[0]->device;
1124*4882a593Smuzhiyun }
1125*4882a593Smuzhiyun
cio_gp_dma_create(struct device * dma_dev,int nr_pages)1126*4882a593Smuzhiyun struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
1127*4882a593Smuzhiyun {
1128*4882a593Smuzhiyun struct gen_pool *gp_dma;
1129*4882a593Smuzhiyun void *cpu_addr;
1130*4882a593Smuzhiyun dma_addr_t dma_addr;
1131*4882a593Smuzhiyun int i;
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun gp_dma = gen_pool_create(3, -1);
1134*4882a593Smuzhiyun if (!gp_dma)
1135*4882a593Smuzhiyun return NULL;
1136*4882a593Smuzhiyun for (i = 0; i < nr_pages; ++i) {
1137*4882a593Smuzhiyun cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
1138*4882a593Smuzhiyun CIO_DMA_GFP);
1139*4882a593Smuzhiyun if (!cpu_addr)
1140*4882a593Smuzhiyun return gp_dma;
1141*4882a593Smuzhiyun gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
1142*4882a593Smuzhiyun dma_addr, PAGE_SIZE, -1);
1143*4882a593Smuzhiyun }
1144*4882a593Smuzhiyun return gp_dma;
1145*4882a593Smuzhiyun }
1146*4882a593Smuzhiyun
__gp_dma_free_dma(struct gen_pool * pool,struct gen_pool_chunk * chunk,void * data)1147*4882a593Smuzhiyun static void __gp_dma_free_dma(struct gen_pool *pool,
1148*4882a593Smuzhiyun struct gen_pool_chunk *chunk, void *data)
1149*4882a593Smuzhiyun {
1150*4882a593Smuzhiyun size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyun dma_free_coherent((struct device *) data, chunk_size,
1153*4882a593Smuzhiyun (void *) chunk->start_addr,
1154*4882a593Smuzhiyun (dma_addr_t) chunk->phys_addr);
1155*4882a593Smuzhiyun }
1156*4882a593Smuzhiyun
cio_gp_dma_destroy(struct gen_pool * gp_dma,struct device * dma_dev)1157*4882a593Smuzhiyun void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
1158*4882a593Smuzhiyun {
1159*4882a593Smuzhiyun if (!gp_dma)
1160*4882a593Smuzhiyun return;
1161*4882a593Smuzhiyun /* this is quite ugly but no better idea */
1162*4882a593Smuzhiyun gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
1163*4882a593Smuzhiyun gen_pool_destroy(gp_dma);
1164*4882a593Smuzhiyun }
1165*4882a593Smuzhiyun
cio_dma_pool_init(void)1166*4882a593Smuzhiyun static int cio_dma_pool_init(void)
1167*4882a593Smuzhiyun {
1168*4882a593Smuzhiyun /* No need to free up the resources: compiled in */
1169*4882a593Smuzhiyun cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
1170*4882a593Smuzhiyun if (!cio_dma_pool)
1171*4882a593Smuzhiyun return -ENOMEM;
1172*4882a593Smuzhiyun return 0;
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun
cio_gp_dma_zalloc(struct gen_pool * gp_dma,struct device * dma_dev,size_t size)1175*4882a593Smuzhiyun void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
1176*4882a593Smuzhiyun size_t size)
1177*4882a593Smuzhiyun {
1178*4882a593Smuzhiyun dma_addr_t dma_addr;
1179*4882a593Smuzhiyun unsigned long addr;
1180*4882a593Smuzhiyun size_t chunk_size;
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun if (!gp_dma)
1183*4882a593Smuzhiyun return NULL;
1184*4882a593Smuzhiyun addr = gen_pool_alloc(gp_dma, size);
1185*4882a593Smuzhiyun while (!addr) {
1186*4882a593Smuzhiyun chunk_size = round_up(size, PAGE_SIZE);
1187*4882a593Smuzhiyun addr = (unsigned long) dma_alloc_coherent(dma_dev,
1188*4882a593Smuzhiyun chunk_size, &dma_addr, CIO_DMA_GFP);
1189*4882a593Smuzhiyun if (!addr)
1190*4882a593Smuzhiyun return NULL;
1191*4882a593Smuzhiyun gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
1192*4882a593Smuzhiyun addr = gen_pool_alloc(gp_dma, size);
1193*4882a593Smuzhiyun }
1194*4882a593Smuzhiyun return (void *) addr;
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun
cio_gp_dma_free(struct gen_pool * gp_dma,void * cpu_addr,size_t size)1197*4882a593Smuzhiyun void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
1198*4882a593Smuzhiyun {
1199*4882a593Smuzhiyun if (!cpu_addr)
1200*4882a593Smuzhiyun return;
1201*4882a593Smuzhiyun memset(cpu_addr, 0, size);
1202*4882a593Smuzhiyun gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
1203*4882a593Smuzhiyun }
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun /*
1206*4882a593Smuzhiyun * Allocate dma memory from the css global pool. Intended for memory not
1207*4882a593Smuzhiyun * specific to any single device within the css. The allocated memory
1208*4882a593Smuzhiyun * is not guaranteed to be 31-bit addressable.
1209*4882a593Smuzhiyun *
1210*4882a593Smuzhiyun * Caution: Not suitable for early stuff like console.
1211*4882a593Smuzhiyun */
cio_dma_zalloc(size_t size)1212*4882a593Smuzhiyun void *cio_dma_zalloc(size_t size)
1213*4882a593Smuzhiyun {
1214*4882a593Smuzhiyun return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
1215*4882a593Smuzhiyun }
1216*4882a593Smuzhiyun
cio_dma_free(void * cpu_addr,size_t size)1217*4882a593Smuzhiyun void cio_dma_free(void *cpu_addr, size_t size)
1218*4882a593Smuzhiyun {
1219*4882a593Smuzhiyun cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
1220*4882a593Smuzhiyun }
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun /*
1223*4882a593Smuzhiyun * Now that the driver core is running, we can setup our channel subsystem.
1224*4882a593Smuzhiyun * The struct subchannel's are created during probing.
1225*4882a593Smuzhiyun */
css_bus_init(void)1226*4882a593Smuzhiyun static int __init css_bus_init(void)
1227*4882a593Smuzhiyun {
1228*4882a593Smuzhiyun int ret, i;
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun ret = chsc_init();
1231*4882a593Smuzhiyun if (ret)
1232*4882a593Smuzhiyun return ret;
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun chsc_determine_css_characteristics();
1235*4882a593Smuzhiyun /* Try to enable MSS. */
1236*4882a593Smuzhiyun ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
1237*4882a593Smuzhiyun if (ret)
1238*4882a593Smuzhiyun max_ssid = 0;
1239*4882a593Smuzhiyun else /* Success. */
1240*4882a593Smuzhiyun max_ssid = __MAX_SSID;
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun ret = slow_subchannel_init();
1243*4882a593Smuzhiyun if (ret)
1244*4882a593Smuzhiyun goto out;
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
1247*4882a593Smuzhiyun if (ret)
1248*4882a593Smuzhiyun goto out;
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun if ((ret = bus_register(&css_bus_type)))
1251*4882a593Smuzhiyun goto out;
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun /* Setup css structure. */
1254*4882a593Smuzhiyun for (i = 0; i <= MAX_CSS_IDX; i++) {
1255*4882a593Smuzhiyun ret = setup_css(i);
1256*4882a593Smuzhiyun if (ret)
1257*4882a593Smuzhiyun goto out_unregister;
1258*4882a593Smuzhiyun }
1259*4882a593Smuzhiyun ret = register_reboot_notifier(&css_reboot_notifier);
1260*4882a593Smuzhiyun if (ret)
1261*4882a593Smuzhiyun goto out_unregister;
1262*4882a593Smuzhiyun ret = register_pm_notifier(&css_power_notifier);
1263*4882a593Smuzhiyun if (ret)
1264*4882a593Smuzhiyun goto out_unregister_rn;
1265*4882a593Smuzhiyun ret = cio_dma_pool_init();
1266*4882a593Smuzhiyun if (ret)
1267*4882a593Smuzhiyun goto out_unregister_pmn;
1268*4882a593Smuzhiyun airq_init();
1269*4882a593Smuzhiyun css_init_done = 1;
1270*4882a593Smuzhiyun
1271*4882a593Smuzhiyun /* Enable default isc for I/O subchannels. */
1272*4882a593Smuzhiyun isc_register(IO_SCH_ISC);
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun return 0;
1275*4882a593Smuzhiyun out_unregister_pmn:
1276*4882a593Smuzhiyun unregister_pm_notifier(&css_power_notifier);
1277*4882a593Smuzhiyun out_unregister_rn:
1278*4882a593Smuzhiyun unregister_reboot_notifier(&css_reboot_notifier);
1279*4882a593Smuzhiyun out_unregister:
1280*4882a593Smuzhiyun while (i-- > 0) {
1281*4882a593Smuzhiyun struct channel_subsystem *css = channel_subsystems[i];
1282*4882a593Smuzhiyun device_unregister(&css->pseudo_subchannel->dev);
1283*4882a593Smuzhiyun device_unregister(&css->device);
1284*4882a593Smuzhiyun }
1285*4882a593Smuzhiyun bus_unregister(&css_bus_type);
1286*4882a593Smuzhiyun out:
1287*4882a593Smuzhiyun crw_unregister_handler(CRW_RSC_SCH);
1288*4882a593Smuzhiyun idset_free(slow_subchannel_set);
1289*4882a593Smuzhiyun chsc_init_cleanup();
1290*4882a593Smuzhiyun pr_alert("The CSS device driver initialization failed with "
1291*4882a593Smuzhiyun "errno=%d\n", ret);
1292*4882a593Smuzhiyun return ret;
1293*4882a593Smuzhiyun }
1294*4882a593Smuzhiyun
css_bus_cleanup(void)1295*4882a593Smuzhiyun static void __init css_bus_cleanup(void)
1296*4882a593Smuzhiyun {
1297*4882a593Smuzhiyun struct channel_subsystem *css;
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun for_each_css(css) {
1300*4882a593Smuzhiyun device_unregister(&css->pseudo_subchannel->dev);
1301*4882a593Smuzhiyun device_unregister(&css->device);
1302*4882a593Smuzhiyun }
1303*4882a593Smuzhiyun bus_unregister(&css_bus_type);
1304*4882a593Smuzhiyun crw_unregister_handler(CRW_RSC_SCH);
1305*4882a593Smuzhiyun idset_free(slow_subchannel_set);
1306*4882a593Smuzhiyun chsc_init_cleanup();
1307*4882a593Smuzhiyun isc_unregister(IO_SCH_ISC);
1308*4882a593Smuzhiyun }
1309*4882a593Smuzhiyun
channel_subsystem_init(void)1310*4882a593Smuzhiyun static int __init channel_subsystem_init(void)
1311*4882a593Smuzhiyun {
1312*4882a593Smuzhiyun int ret;
1313*4882a593Smuzhiyun
1314*4882a593Smuzhiyun ret = css_bus_init();
1315*4882a593Smuzhiyun if (ret)
1316*4882a593Smuzhiyun return ret;
1317*4882a593Smuzhiyun cio_work_q = create_singlethread_workqueue("cio");
1318*4882a593Smuzhiyun if (!cio_work_q) {
1319*4882a593Smuzhiyun ret = -ENOMEM;
1320*4882a593Smuzhiyun goto out_bus;
1321*4882a593Smuzhiyun }
1322*4882a593Smuzhiyun ret = io_subchannel_init();
1323*4882a593Smuzhiyun if (ret)
1324*4882a593Smuzhiyun goto out_wq;
1325*4882a593Smuzhiyun
1326*4882a593Smuzhiyun /* Register subchannels which are already in use. */
1327*4882a593Smuzhiyun cio_register_early_subchannels();
1328*4882a593Smuzhiyun /* Start initial subchannel evaluation. */
1329*4882a593Smuzhiyun css_schedule_eval_all();
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun return ret;
1332*4882a593Smuzhiyun out_wq:
1333*4882a593Smuzhiyun destroy_workqueue(cio_work_q);
1334*4882a593Smuzhiyun out_bus:
1335*4882a593Smuzhiyun css_bus_cleanup();
1336*4882a593Smuzhiyun return ret;
1337*4882a593Smuzhiyun }
1338*4882a593Smuzhiyun subsys_initcall(channel_subsystem_init);
1339*4882a593Smuzhiyun
css_settle(struct device_driver * drv,void * unused)1340*4882a593Smuzhiyun static int css_settle(struct device_driver *drv, void *unused)
1341*4882a593Smuzhiyun {
1342*4882a593Smuzhiyun struct css_driver *cssdrv = to_cssdriver(drv);
1343*4882a593Smuzhiyun
1344*4882a593Smuzhiyun if (cssdrv->settle)
1345*4882a593Smuzhiyun return cssdrv->settle();
1346*4882a593Smuzhiyun return 0;
1347*4882a593Smuzhiyun }
1348*4882a593Smuzhiyun
css_complete_work(void)1349*4882a593Smuzhiyun int css_complete_work(void)
1350*4882a593Smuzhiyun {
1351*4882a593Smuzhiyun int ret;
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun /* Wait for the evaluation of subchannels to finish. */
1354*4882a593Smuzhiyun ret = wait_event_interruptible(css_eval_wq,
1355*4882a593Smuzhiyun atomic_read(&css_eval_scheduled) == 0);
1356*4882a593Smuzhiyun if (ret)
1357*4882a593Smuzhiyun return -EINTR;
1358*4882a593Smuzhiyun flush_workqueue(cio_work_q);
1359*4882a593Smuzhiyun /* Wait for the subchannel type specific initialization to finish */
1360*4882a593Smuzhiyun return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1361*4882a593Smuzhiyun }
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun
1364*4882a593Smuzhiyun /*
1365*4882a593Smuzhiyun * Wait for the initialization of devices to finish, to make sure we are
1366*4882a593Smuzhiyun * done with our setup if the search for the root device starts.
1367*4882a593Smuzhiyun */
channel_subsystem_init_sync(void)1368*4882a593Smuzhiyun static int __init channel_subsystem_init_sync(void)
1369*4882a593Smuzhiyun {
1370*4882a593Smuzhiyun css_complete_work();
1371*4882a593Smuzhiyun return 0;
1372*4882a593Smuzhiyun }
1373*4882a593Smuzhiyun subsys_initcall_sync(channel_subsystem_init_sync);
1374*4882a593Smuzhiyun
1375*4882a593Smuzhiyun #ifdef CONFIG_PROC_FS
cio_settle_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)1376*4882a593Smuzhiyun static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1377*4882a593Smuzhiyun size_t count, loff_t *ppos)
1378*4882a593Smuzhiyun {
1379*4882a593Smuzhiyun int ret;
1380*4882a593Smuzhiyun
1381*4882a593Smuzhiyun /* Handle pending CRW's. */
1382*4882a593Smuzhiyun crw_wait_for_channel_report();
1383*4882a593Smuzhiyun ret = css_complete_work();
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun return ret ? ret : count;
1386*4882a593Smuzhiyun }
1387*4882a593Smuzhiyun
1388*4882a593Smuzhiyun static const struct proc_ops cio_settle_proc_ops = {
1389*4882a593Smuzhiyun .proc_open = nonseekable_open,
1390*4882a593Smuzhiyun .proc_write = cio_settle_write,
1391*4882a593Smuzhiyun .proc_lseek = no_llseek,
1392*4882a593Smuzhiyun };
1393*4882a593Smuzhiyun
cio_settle_init(void)1394*4882a593Smuzhiyun static int __init cio_settle_init(void)
1395*4882a593Smuzhiyun {
1396*4882a593Smuzhiyun struct proc_dir_entry *entry;
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops);
1399*4882a593Smuzhiyun if (!entry)
1400*4882a593Smuzhiyun return -ENOMEM;
1401*4882a593Smuzhiyun return 0;
1402*4882a593Smuzhiyun }
1403*4882a593Smuzhiyun device_initcall(cio_settle_init);
1404*4882a593Smuzhiyun #endif /*CONFIG_PROC_FS*/
1405*4882a593Smuzhiyun
sch_is_pseudo_sch(struct subchannel * sch)1406*4882a593Smuzhiyun int sch_is_pseudo_sch(struct subchannel *sch)
1407*4882a593Smuzhiyun {
1408*4882a593Smuzhiyun if (!sch->dev.parent)
1409*4882a593Smuzhiyun return 0;
1410*4882a593Smuzhiyun return sch == to_css(sch->dev.parent)->pseudo_subchannel;
1411*4882a593Smuzhiyun }
1412*4882a593Smuzhiyun
css_bus_match(struct device * dev,struct device_driver * drv)1413*4882a593Smuzhiyun static int css_bus_match(struct device *dev, struct device_driver *drv)
1414*4882a593Smuzhiyun {
1415*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(dev);
1416*4882a593Smuzhiyun struct css_driver *driver = to_cssdriver(drv);
1417*4882a593Smuzhiyun struct css_device_id *id;
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun /* When driver_override is set, only bind to the matching driver */
1420*4882a593Smuzhiyun if (sch->driver_override && strcmp(sch->driver_override, drv->name))
1421*4882a593Smuzhiyun return 0;
1422*4882a593Smuzhiyun
1423*4882a593Smuzhiyun for (id = driver->subchannel_type; id->match_flags; id++) {
1424*4882a593Smuzhiyun if (sch->st == id->type)
1425*4882a593Smuzhiyun return 1;
1426*4882a593Smuzhiyun }
1427*4882a593Smuzhiyun
1428*4882a593Smuzhiyun return 0;
1429*4882a593Smuzhiyun }
1430*4882a593Smuzhiyun
css_probe(struct device * dev)1431*4882a593Smuzhiyun static int css_probe(struct device *dev)
1432*4882a593Smuzhiyun {
1433*4882a593Smuzhiyun struct subchannel *sch;
1434*4882a593Smuzhiyun int ret;
1435*4882a593Smuzhiyun
1436*4882a593Smuzhiyun sch = to_subchannel(dev);
1437*4882a593Smuzhiyun sch->driver = to_cssdriver(dev->driver);
1438*4882a593Smuzhiyun ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
1439*4882a593Smuzhiyun if (ret)
1440*4882a593Smuzhiyun sch->driver = NULL;
1441*4882a593Smuzhiyun return ret;
1442*4882a593Smuzhiyun }
1443*4882a593Smuzhiyun
css_remove(struct device * dev)1444*4882a593Smuzhiyun static int css_remove(struct device *dev)
1445*4882a593Smuzhiyun {
1446*4882a593Smuzhiyun struct subchannel *sch;
1447*4882a593Smuzhiyun int ret;
1448*4882a593Smuzhiyun
1449*4882a593Smuzhiyun sch = to_subchannel(dev);
1450*4882a593Smuzhiyun ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
1451*4882a593Smuzhiyun sch->driver = NULL;
1452*4882a593Smuzhiyun return ret;
1453*4882a593Smuzhiyun }
1454*4882a593Smuzhiyun
css_shutdown(struct device * dev)1455*4882a593Smuzhiyun static void css_shutdown(struct device *dev)
1456*4882a593Smuzhiyun {
1457*4882a593Smuzhiyun struct subchannel *sch;
1458*4882a593Smuzhiyun
1459*4882a593Smuzhiyun sch = to_subchannel(dev);
1460*4882a593Smuzhiyun if (sch->driver && sch->driver->shutdown)
1461*4882a593Smuzhiyun sch->driver->shutdown(sch);
1462*4882a593Smuzhiyun }
1463*4882a593Smuzhiyun
css_uevent(struct device * dev,struct kobj_uevent_env * env)1464*4882a593Smuzhiyun static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
1465*4882a593Smuzhiyun {
1466*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(dev);
1467*4882a593Smuzhiyun int ret;
1468*4882a593Smuzhiyun
1469*4882a593Smuzhiyun ret = add_uevent_var(env, "ST=%01X", sch->st);
1470*4882a593Smuzhiyun if (ret)
1471*4882a593Smuzhiyun return ret;
1472*4882a593Smuzhiyun ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
1473*4882a593Smuzhiyun return ret;
1474*4882a593Smuzhiyun }
1475*4882a593Smuzhiyun
css_pm_prepare(struct device * dev)1476*4882a593Smuzhiyun static int css_pm_prepare(struct device *dev)
1477*4882a593Smuzhiyun {
1478*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(dev);
1479*4882a593Smuzhiyun struct css_driver *drv;
1480*4882a593Smuzhiyun
1481*4882a593Smuzhiyun if (mutex_is_locked(&sch->reg_mutex))
1482*4882a593Smuzhiyun return -EAGAIN;
1483*4882a593Smuzhiyun if (!sch->dev.driver)
1484*4882a593Smuzhiyun return 0;
1485*4882a593Smuzhiyun drv = to_cssdriver(sch->dev.driver);
1486*4882a593Smuzhiyun /* Notify drivers that they may not register children. */
1487*4882a593Smuzhiyun return drv->prepare ? drv->prepare(sch) : 0;
1488*4882a593Smuzhiyun }
1489*4882a593Smuzhiyun
css_pm_complete(struct device * dev)1490*4882a593Smuzhiyun static void css_pm_complete(struct device *dev)
1491*4882a593Smuzhiyun {
1492*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(dev);
1493*4882a593Smuzhiyun struct css_driver *drv;
1494*4882a593Smuzhiyun
1495*4882a593Smuzhiyun if (!sch->dev.driver)
1496*4882a593Smuzhiyun return;
1497*4882a593Smuzhiyun drv = to_cssdriver(sch->dev.driver);
1498*4882a593Smuzhiyun if (drv->complete)
1499*4882a593Smuzhiyun drv->complete(sch);
1500*4882a593Smuzhiyun }
1501*4882a593Smuzhiyun
css_pm_freeze(struct device * dev)1502*4882a593Smuzhiyun static int css_pm_freeze(struct device *dev)
1503*4882a593Smuzhiyun {
1504*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(dev);
1505*4882a593Smuzhiyun struct css_driver *drv;
1506*4882a593Smuzhiyun
1507*4882a593Smuzhiyun if (!sch->dev.driver)
1508*4882a593Smuzhiyun return 0;
1509*4882a593Smuzhiyun drv = to_cssdriver(sch->dev.driver);
1510*4882a593Smuzhiyun return drv->freeze ? drv->freeze(sch) : 0;
1511*4882a593Smuzhiyun }
1512*4882a593Smuzhiyun
css_pm_thaw(struct device * dev)1513*4882a593Smuzhiyun static int css_pm_thaw(struct device *dev)
1514*4882a593Smuzhiyun {
1515*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(dev);
1516*4882a593Smuzhiyun struct css_driver *drv;
1517*4882a593Smuzhiyun
1518*4882a593Smuzhiyun if (!sch->dev.driver)
1519*4882a593Smuzhiyun return 0;
1520*4882a593Smuzhiyun drv = to_cssdriver(sch->dev.driver);
1521*4882a593Smuzhiyun return drv->thaw ? drv->thaw(sch) : 0;
1522*4882a593Smuzhiyun }
1523*4882a593Smuzhiyun
css_pm_restore(struct device * dev)1524*4882a593Smuzhiyun static int css_pm_restore(struct device *dev)
1525*4882a593Smuzhiyun {
1526*4882a593Smuzhiyun struct subchannel *sch = to_subchannel(dev);
1527*4882a593Smuzhiyun struct css_driver *drv;
1528*4882a593Smuzhiyun
1529*4882a593Smuzhiyun css_update_ssd_info(sch);
1530*4882a593Smuzhiyun if (!sch->dev.driver)
1531*4882a593Smuzhiyun return 0;
1532*4882a593Smuzhiyun drv = to_cssdriver(sch->dev.driver);
1533*4882a593Smuzhiyun return drv->restore ? drv->restore(sch) : 0;
1534*4882a593Smuzhiyun }
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun static const struct dev_pm_ops css_pm_ops = {
1537*4882a593Smuzhiyun .prepare = css_pm_prepare,
1538*4882a593Smuzhiyun .complete = css_pm_complete,
1539*4882a593Smuzhiyun .freeze = css_pm_freeze,
1540*4882a593Smuzhiyun .thaw = css_pm_thaw,
1541*4882a593Smuzhiyun .restore = css_pm_restore,
1542*4882a593Smuzhiyun };
1543*4882a593Smuzhiyun
1544*4882a593Smuzhiyun static struct bus_type css_bus_type = {
1545*4882a593Smuzhiyun .name = "css",
1546*4882a593Smuzhiyun .match = css_bus_match,
1547*4882a593Smuzhiyun .probe = css_probe,
1548*4882a593Smuzhiyun .remove = css_remove,
1549*4882a593Smuzhiyun .shutdown = css_shutdown,
1550*4882a593Smuzhiyun .uevent = css_uevent,
1551*4882a593Smuzhiyun .pm = &css_pm_ops,
1552*4882a593Smuzhiyun };
1553*4882a593Smuzhiyun
1554*4882a593Smuzhiyun /**
1555*4882a593Smuzhiyun * css_driver_register - register a css driver
1556*4882a593Smuzhiyun * @cdrv: css driver to register
1557*4882a593Smuzhiyun *
1558*4882a593Smuzhiyun * This is mainly a wrapper around driver_register that sets name
1559*4882a593Smuzhiyun * and bus_type in the embedded struct device_driver correctly.
1560*4882a593Smuzhiyun */
css_driver_register(struct css_driver * cdrv)1561*4882a593Smuzhiyun int css_driver_register(struct css_driver *cdrv)
1562*4882a593Smuzhiyun {
1563*4882a593Smuzhiyun cdrv->drv.bus = &css_bus_type;
1564*4882a593Smuzhiyun return driver_register(&cdrv->drv);
1565*4882a593Smuzhiyun }
1566*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(css_driver_register);
1567*4882a593Smuzhiyun
1568*4882a593Smuzhiyun /**
1569*4882a593Smuzhiyun * css_driver_unregister - unregister a css driver
1570*4882a593Smuzhiyun * @cdrv: css driver to unregister
1571*4882a593Smuzhiyun *
1572*4882a593Smuzhiyun * This is a wrapper around driver_unregister.
1573*4882a593Smuzhiyun */
css_driver_unregister(struct css_driver * cdrv)1574*4882a593Smuzhiyun void css_driver_unregister(struct css_driver *cdrv)
1575*4882a593Smuzhiyun {
1576*4882a593Smuzhiyun driver_unregister(&cdrv->drv);
1577*4882a593Smuzhiyun }
1578*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(css_driver_unregister);
1579