xref: /OK3568_Linux_fs/kernel/drivers/s390/cio/eadm_sch.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Driver for s390 eadm subchannels
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright IBM Corp. 2012
6*4882a593Smuzhiyun  * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/kernel_stat.h>
10*4882a593Smuzhiyun #include <linux/completion.h>
11*4882a593Smuzhiyun #include <linux/workqueue.h>
12*4882a593Smuzhiyun #include <linux/spinlock.h>
13*4882a593Smuzhiyun #include <linux/device.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/timer.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/list.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include <asm/css_chars.h>
20*4882a593Smuzhiyun #include <asm/debug.h>
21*4882a593Smuzhiyun #include <asm/isc.h>
22*4882a593Smuzhiyun #include <asm/cio.h>
23*4882a593Smuzhiyun #include <asm/scsw.h>
24*4882a593Smuzhiyun #include <asm/eadm.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #include "eadm_sch.h"
27*4882a593Smuzhiyun #include "ioasm.h"
28*4882a593Smuzhiyun #include "cio.h"
29*4882a593Smuzhiyun #include "css.h"
30*4882a593Smuzhiyun #include "orb.h"
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun MODULE_DESCRIPTION("driver for s390 eadm subchannels");
33*4882a593Smuzhiyun MODULE_LICENSE("GPL");
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #define EADM_TIMEOUT (7 * HZ)
36*4882a593Smuzhiyun static DEFINE_SPINLOCK(list_lock);
37*4882a593Smuzhiyun static LIST_HEAD(eadm_list);
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun static debug_info_t *eadm_debug;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun #define EADM_LOG(imp, txt) do {					\
42*4882a593Smuzhiyun 		debug_text_event(eadm_debug, imp, txt);		\
43*4882a593Smuzhiyun 	} while (0)
44*4882a593Smuzhiyun 
EADM_LOG_HEX(int level,void * data,int length)45*4882a593Smuzhiyun static void EADM_LOG_HEX(int level, void *data, int length)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun 	debug_event(eadm_debug, level, data, length);
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun 
orb_init(union orb * orb)50*4882a593Smuzhiyun static void orb_init(union orb *orb)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun 	memset(orb, 0, sizeof(union orb));
53*4882a593Smuzhiyun 	orb->eadm.compat1 = 1;
54*4882a593Smuzhiyun 	orb->eadm.compat2 = 1;
55*4882a593Smuzhiyun 	orb->eadm.fmt = 1;
56*4882a593Smuzhiyun 	orb->eadm.x = 1;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun 
eadm_subchannel_start(struct subchannel * sch,struct aob * aob)59*4882a593Smuzhiyun static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	union orb *orb = &get_eadm_private(sch)->orb;
62*4882a593Smuzhiyun 	int cc;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	orb_init(orb);
65*4882a593Smuzhiyun 	orb->eadm.aob = (u32)__pa(aob);
66*4882a593Smuzhiyun 	orb->eadm.intparm = (u32)(addr_t)sch;
67*4882a593Smuzhiyun 	orb->eadm.key = PAGE_DEFAULT_KEY >> 4;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	EADM_LOG(6, "start");
70*4882a593Smuzhiyun 	EADM_LOG_HEX(6, &sch->schid, sizeof(sch->schid));
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	cc = ssch(sch->schid, orb);
73*4882a593Smuzhiyun 	switch (cc) {
74*4882a593Smuzhiyun 	case 0:
75*4882a593Smuzhiyun 		sch->schib.scsw.eadm.actl |= SCSW_ACTL_START_PEND;
76*4882a593Smuzhiyun 		break;
77*4882a593Smuzhiyun 	case 1:		/* status pending */
78*4882a593Smuzhiyun 	case 2:		/* busy */
79*4882a593Smuzhiyun 		return -EBUSY;
80*4882a593Smuzhiyun 	case 3:		/* not operational */
81*4882a593Smuzhiyun 		return -ENODEV;
82*4882a593Smuzhiyun 	}
83*4882a593Smuzhiyun 	return 0;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
eadm_subchannel_clear(struct subchannel * sch)86*4882a593Smuzhiyun static int eadm_subchannel_clear(struct subchannel *sch)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	int cc;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	cc = csch(sch->schid);
91*4882a593Smuzhiyun 	if (cc)
92*4882a593Smuzhiyun 		return -ENODEV;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	sch->schib.scsw.eadm.actl |= SCSW_ACTL_CLEAR_PEND;
95*4882a593Smuzhiyun 	return 0;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
eadm_subchannel_timeout(struct timer_list * t)98*4882a593Smuzhiyun static void eadm_subchannel_timeout(struct timer_list *t)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	struct eadm_private *private = from_timer(private, t, timer);
101*4882a593Smuzhiyun 	struct subchannel *sch = private->sch;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	spin_lock_irq(sch->lock);
104*4882a593Smuzhiyun 	EADM_LOG(1, "timeout");
105*4882a593Smuzhiyun 	EADM_LOG_HEX(1, &sch->schid, sizeof(sch->schid));
106*4882a593Smuzhiyun 	if (eadm_subchannel_clear(sch))
107*4882a593Smuzhiyun 		EADM_LOG(0, "clear failed");
108*4882a593Smuzhiyun 	spin_unlock_irq(sch->lock);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
eadm_subchannel_set_timeout(struct subchannel * sch,int expires)111*4882a593Smuzhiyun static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	struct eadm_private *private = get_eadm_private(sch);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	if (expires == 0) {
116*4882a593Smuzhiyun 		del_timer(&private->timer);
117*4882a593Smuzhiyun 		return;
118*4882a593Smuzhiyun 	}
119*4882a593Smuzhiyun 	if (timer_pending(&private->timer)) {
120*4882a593Smuzhiyun 		if (mod_timer(&private->timer, jiffies + expires))
121*4882a593Smuzhiyun 			return;
122*4882a593Smuzhiyun 	}
123*4882a593Smuzhiyun 	private->timer.expires = jiffies + expires;
124*4882a593Smuzhiyun 	add_timer(&private->timer);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun 
eadm_subchannel_irq(struct subchannel * sch)127*4882a593Smuzhiyun static void eadm_subchannel_irq(struct subchannel *sch)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	struct eadm_private *private = get_eadm_private(sch);
130*4882a593Smuzhiyun 	struct eadm_scsw *scsw = &sch->schib.scsw.eadm;
131*4882a593Smuzhiyun 	struct irb *irb = this_cpu_ptr(&cio_irb);
132*4882a593Smuzhiyun 	blk_status_t error = BLK_STS_OK;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	EADM_LOG(6, "irq");
135*4882a593Smuzhiyun 	EADM_LOG_HEX(6, irb, sizeof(*irb));
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	inc_irq_stat(IRQIO_ADM);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))
140*4882a593Smuzhiyun 	    && scsw->eswf == 1 && irb->esw.eadm.erw.r)
141*4882a593Smuzhiyun 		error = BLK_STS_IOERR;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	if (scsw->fctl & SCSW_FCTL_CLEAR_FUNC)
144*4882a593Smuzhiyun 		error = BLK_STS_TIMEOUT;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	eadm_subchannel_set_timeout(sch, 0);
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	if (private->state != EADM_BUSY) {
149*4882a593Smuzhiyun 		EADM_LOG(1, "irq unsol");
150*4882a593Smuzhiyun 		EADM_LOG_HEX(1, irb, sizeof(*irb));
151*4882a593Smuzhiyun 		private->state = EADM_NOT_OPER;
152*4882a593Smuzhiyun 		css_sched_sch_todo(sch, SCH_TODO_EVAL);
153*4882a593Smuzhiyun 		return;
154*4882a593Smuzhiyun 	}
155*4882a593Smuzhiyun 	scm_irq_handler((struct aob *)(unsigned long)scsw->aob, error);
156*4882a593Smuzhiyun 	private->state = EADM_IDLE;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	if (private->completion)
159*4882a593Smuzhiyun 		complete(private->completion);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
eadm_get_idle_sch(void)162*4882a593Smuzhiyun static struct subchannel *eadm_get_idle_sch(void)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun 	struct eadm_private *private;
165*4882a593Smuzhiyun 	struct subchannel *sch;
166*4882a593Smuzhiyun 	unsigned long flags;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	spin_lock_irqsave(&list_lock, flags);
169*4882a593Smuzhiyun 	list_for_each_entry(private, &eadm_list, head) {
170*4882a593Smuzhiyun 		sch = private->sch;
171*4882a593Smuzhiyun 		spin_lock(sch->lock);
172*4882a593Smuzhiyun 		if (private->state == EADM_IDLE) {
173*4882a593Smuzhiyun 			private->state = EADM_BUSY;
174*4882a593Smuzhiyun 			list_move_tail(&private->head, &eadm_list);
175*4882a593Smuzhiyun 			spin_unlock(sch->lock);
176*4882a593Smuzhiyun 			spin_unlock_irqrestore(&list_lock, flags);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 			return sch;
179*4882a593Smuzhiyun 		}
180*4882a593Smuzhiyun 		spin_unlock(sch->lock);
181*4882a593Smuzhiyun 	}
182*4882a593Smuzhiyun 	spin_unlock_irqrestore(&list_lock, flags);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	return NULL;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
eadm_start_aob(struct aob * aob)187*4882a593Smuzhiyun int eadm_start_aob(struct aob *aob)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	struct eadm_private *private;
190*4882a593Smuzhiyun 	struct subchannel *sch;
191*4882a593Smuzhiyun 	unsigned long flags;
192*4882a593Smuzhiyun 	int ret;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	sch = eadm_get_idle_sch();
195*4882a593Smuzhiyun 	if (!sch)
196*4882a593Smuzhiyun 		return -EBUSY;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	spin_lock_irqsave(sch->lock, flags);
199*4882a593Smuzhiyun 	eadm_subchannel_set_timeout(sch, EADM_TIMEOUT);
200*4882a593Smuzhiyun 	ret = eadm_subchannel_start(sch, aob);
201*4882a593Smuzhiyun 	if (!ret)
202*4882a593Smuzhiyun 		goto out_unlock;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	/* Handle start subchannel failure. */
205*4882a593Smuzhiyun 	eadm_subchannel_set_timeout(sch, 0);
206*4882a593Smuzhiyun 	private = get_eadm_private(sch);
207*4882a593Smuzhiyun 	private->state = EADM_NOT_OPER;
208*4882a593Smuzhiyun 	css_sched_sch_todo(sch, SCH_TODO_EVAL);
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun out_unlock:
211*4882a593Smuzhiyun 	spin_unlock_irqrestore(sch->lock, flags);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	return ret;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(eadm_start_aob);
216*4882a593Smuzhiyun 
eadm_subchannel_probe(struct subchannel * sch)217*4882a593Smuzhiyun static int eadm_subchannel_probe(struct subchannel *sch)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	struct eadm_private *private;
220*4882a593Smuzhiyun 	int ret;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
223*4882a593Smuzhiyun 	if (!private)
224*4882a593Smuzhiyun 		return -ENOMEM;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	INIT_LIST_HEAD(&private->head);
227*4882a593Smuzhiyun 	timer_setup(&private->timer, eadm_subchannel_timeout, 0);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	spin_lock_irq(sch->lock);
230*4882a593Smuzhiyun 	set_eadm_private(sch, private);
231*4882a593Smuzhiyun 	private->state = EADM_IDLE;
232*4882a593Smuzhiyun 	private->sch = sch;
233*4882a593Smuzhiyun 	sch->isc = EADM_SCH_ISC;
234*4882a593Smuzhiyun 	ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
235*4882a593Smuzhiyun 	if (ret) {
236*4882a593Smuzhiyun 		set_eadm_private(sch, NULL);
237*4882a593Smuzhiyun 		spin_unlock_irq(sch->lock);
238*4882a593Smuzhiyun 		kfree(private);
239*4882a593Smuzhiyun 		goto out;
240*4882a593Smuzhiyun 	}
241*4882a593Smuzhiyun 	spin_unlock_irq(sch->lock);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	spin_lock_irq(&list_lock);
244*4882a593Smuzhiyun 	list_add(&private->head, &eadm_list);
245*4882a593Smuzhiyun 	spin_unlock_irq(&list_lock);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	if (dev_get_uevent_suppress(&sch->dev)) {
248*4882a593Smuzhiyun 		dev_set_uevent_suppress(&sch->dev, 0);
249*4882a593Smuzhiyun 		kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
250*4882a593Smuzhiyun 	}
251*4882a593Smuzhiyun out:
252*4882a593Smuzhiyun 	return ret;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun 
eadm_quiesce(struct subchannel * sch)255*4882a593Smuzhiyun static void eadm_quiesce(struct subchannel *sch)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	struct eadm_private *private = get_eadm_private(sch);
258*4882a593Smuzhiyun 	DECLARE_COMPLETION_ONSTACK(completion);
259*4882a593Smuzhiyun 	int ret;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	spin_lock_irq(sch->lock);
262*4882a593Smuzhiyun 	if (private->state != EADM_BUSY)
263*4882a593Smuzhiyun 		goto disable;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	if (eadm_subchannel_clear(sch))
266*4882a593Smuzhiyun 		goto disable;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	private->completion = &completion;
269*4882a593Smuzhiyun 	spin_unlock_irq(sch->lock);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	wait_for_completion_io(&completion);
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	spin_lock_irq(sch->lock);
274*4882a593Smuzhiyun 	private->completion = NULL;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun disable:
277*4882a593Smuzhiyun 	eadm_subchannel_set_timeout(sch, 0);
278*4882a593Smuzhiyun 	do {
279*4882a593Smuzhiyun 		ret = cio_disable_subchannel(sch);
280*4882a593Smuzhiyun 	} while (ret == -EBUSY);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	spin_unlock_irq(sch->lock);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
eadm_subchannel_remove(struct subchannel * sch)285*4882a593Smuzhiyun static int eadm_subchannel_remove(struct subchannel *sch)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	struct eadm_private *private = get_eadm_private(sch);
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	spin_lock_irq(&list_lock);
290*4882a593Smuzhiyun 	list_del(&private->head);
291*4882a593Smuzhiyun 	spin_unlock_irq(&list_lock);
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	eadm_quiesce(sch);
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	spin_lock_irq(sch->lock);
296*4882a593Smuzhiyun 	set_eadm_private(sch, NULL);
297*4882a593Smuzhiyun 	spin_unlock_irq(sch->lock);
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	kfree(private);
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	return 0;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun 
eadm_subchannel_shutdown(struct subchannel * sch)304*4882a593Smuzhiyun static void eadm_subchannel_shutdown(struct subchannel *sch)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun 	eadm_quiesce(sch);
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun 
eadm_subchannel_freeze(struct subchannel * sch)309*4882a593Smuzhiyun static int eadm_subchannel_freeze(struct subchannel *sch)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun 	return cio_disable_subchannel(sch);
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun 
eadm_subchannel_restore(struct subchannel * sch)314*4882a593Smuzhiyun static int eadm_subchannel_restore(struct subchannel *sch)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun 	return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun /**
320*4882a593Smuzhiyun  * eadm_subchannel_sch_event - process subchannel event
321*4882a593Smuzhiyun  * @sch: subchannel
322*4882a593Smuzhiyun  * @process: non-zero if function is called in process context
323*4882a593Smuzhiyun  *
324*4882a593Smuzhiyun  * An unspecified event occurred for this subchannel. Adjust data according
325*4882a593Smuzhiyun  * to the current operational state of the subchannel. Return zero when the
326*4882a593Smuzhiyun  * event has been handled sufficiently or -EAGAIN when this function should
327*4882a593Smuzhiyun  * be called again in process context.
328*4882a593Smuzhiyun  */
eadm_subchannel_sch_event(struct subchannel * sch,int process)329*4882a593Smuzhiyun static int eadm_subchannel_sch_event(struct subchannel *sch, int process)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun 	struct eadm_private *private;
332*4882a593Smuzhiyun 	unsigned long flags;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	spin_lock_irqsave(sch->lock, flags);
335*4882a593Smuzhiyun 	if (!device_is_registered(&sch->dev))
336*4882a593Smuzhiyun 		goto out_unlock;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	if (work_pending(&sch->todo_work))
339*4882a593Smuzhiyun 		goto out_unlock;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	if (cio_update_schib(sch)) {
342*4882a593Smuzhiyun 		css_sched_sch_todo(sch, SCH_TODO_UNREG);
343*4882a593Smuzhiyun 		goto out_unlock;
344*4882a593Smuzhiyun 	}
345*4882a593Smuzhiyun 	private = get_eadm_private(sch);
346*4882a593Smuzhiyun 	if (private->state == EADM_NOT_OPER)
347*4882a593Smuzhiyun 		private->state = EADM_IDLE;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun out_unlock:
350*4882a593Smuzhiyun 	spin_unlock_irqrestore(sch->lock, flags);
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	return 0;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun static struct css_device_id eadm_subchannel_ids[] = {
356*4882a593Smuzhiyun 	{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_ADM, },
357*4882a593Smuzhiyun 	{ /* end of list */ },
358*4882a593Smuzhiyun };
359*4882a593Smuzhiyun MODULE_DEVICE_TABLE(css, eadm_subchannel_ids);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun static struct css_driver eadm_subchannel_driver = {
362*4882a593Smuzhiyun 	.drv = {
363*4882a593Smuzhiyun 		.name = "eadm_subchannel",
364*4882a593Smuzhiyun 		.owner = THIS_MODULE,
365*4882a593Smuzhiyun 	},
366*4882a593Smuzhiyun 	.subchannel_type = eadm_subchannel_ids,
367*4882a593Smuzhiyun 	.irq = eadm_subchannel_irq,
368*4882a593Smuzhiyun 	.probe = eadm_subchannel_probe,
369*4882a593Smuzhiyun 	.remove = eadm_subchannel_remove,
370*4882a593Smuzhiyun 	.shutdown = eadm_subchannel_shutdown,
371*4882a593Smuzhiyun 	.sch_event = eadm_subchannel_sch_event,
372*4882a593Smuzhiyun 	.freeze = eadm_subchannel_freeze,
373*4882a593Smuzhiyun 	.thaw = eadm_subchannel_restore,
374*4882a593Smuzhiyun 	.restore = eadm_subchannel_restore,
375*4882a593Smuzhiyun };
376*4882a593Smuzhiyun 
eadm_sch_init(void)377*4882a593Smuzhiyun static int __init eadm_sch_init(void)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun 	int ret;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	if (!css_general_characteristics.eadm)
382*4882a593Smuzhiyun 		return -ENXIO;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	eadm_debug = debug_register("eadm_log", 16, 1, 16);
385*4882a593Smuzhiyun 	if (!eadm_debug)
386*4882a593Smuzhiyun 		return -ENOMEM;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	debug_register_view(eadm_debug, &debug_hex_ascii_view);
389*4882a593Smuzhiyun 	debug_set_level(eadm_debug, 2);
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	isc_register(EADM_SCH_ISC);
392*4882a593Smuzhiyun 	ret = css_driver_register(&eadm_subchannel_driver);
393*4882a593Smuzhiyun 	if (ret)
394*4882a593Smuzhiyun 		goto cleanup;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	return ret;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun cleanup:
399*4882a593Smuzhiyun 	isc_unregister(EADM_SCH_ISC);
400*4882a593Smuzhiyun 	debug_unregister(eadm_debug);
401*4882a593Smuzhiyun 	return ret;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun 
eadm_sch_exit(void)404*4882a593Smuzhiyun static void __exit eadm_sch_exit(void)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun 	css_driver_unregister(&eadm_subchannel_driver);
407*4882a593Smuzhiyun 	isc_unregister(EADM_SCH_ISC);
408*4882a593Smuzhiyun 	debug_unregister(eadm_debug);
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun module_init(eadm_sch_init);
411*4882a593Smuzhiyun module_exit(eadm_sch_exit);
412