xref: /OK3568_Linux_fs/kernel/drivers/s390/cio/device.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef S390_DEVICE_H
3*4882a593Smuzhiyun #define S390_DEVICE_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <asm/ccwdev.h>
6*4882a593Smuzhiyun #include <linux/atomic.h>
7*4882a593Smuzhiyun #include <linux/timer.h>
8*4882a593Smuzhiyun #include <linux/wait.h>
9*4882a593Smuzhiyun #include <linux/notifier.h>
10*4882a593Smuzhiyun #include <linux/kernel_stat.h>
11*4882a593Smuzhiyun #include "io_sch.h"
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun /*
14*4882a593Smuzhiyun  * states of the device statemachine
15*4882a593Smuzhiyun  */
16*4882a593Smuzhiyun enum dev_state {
17*4882a593Smuzhiyun 	DEV_STATE_NOT_OPER,
18*4882a593Smuzhiyun 	DEV_STATE_SENSE_ID,
19*4882a593Smuzhiyun 	DEV_STATE_OFFLINE,
20*4882a593Smuzhiyun 	DEV_STATE_VERIFY,
21*4882a593Smuzhiyun 	DEV_STATE_ONLINE,
22*4882a593Smuzhiyun 	DEV_STATE_W4SENSE,
23*4882a593Smuzhiyun 	DEV_STATE_DISBAND_PGID,
24*4882a593Smuzhiyun 	DEV_STATE_BOXED,
25*4882a593Smuzhiyun 	/* states to wait for i/o completion before doing something */
26*4882a593Smuzhiyun 	DEV_STATE_TIMEOUT_KILL,
27*4882a593Smuzhiyun 	DEV_STATE_QUIESCE,
28*4882a593Smuzhiyun 	/* special states for devices gone not operational */
29*4882a593Smuzhiyun 	DEV_STATE_DISCONNECTED,
30*4882a593Smuzhiyun 	DEV_STATE_DISCONNECTED_SENSE_ID,
31*4882a593Smuzhiyun 	DEV_STATE_CMFCHANGE,
32*4882a593Smuzhiyun 	DEV_STATE_CMFUPDATE,
33*4882a593Smuzhiyun 	DEV_STATE_STEAL_LOCK,
34*4882a593Smuzhiyun 	/* last element! */
35*4882a593Smuzhiyun 	NR_DEV_STATES
36*4882a593Smuzhiyun };
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun  * asynchronous events of the device statemachine
40*4882a593Smuzhiyun  */
41*4882a593Smuzhiyun enum dev_event {
42*4882a593Smuzhiyun 	DEV_EVENT_NOTOPER,
43*4882a593Smuzhiyun 	DEV_EVENT_INTERRUPT,
44*4882a593Smuzhiyun 	DEV_EVENT_TIMEOUT,
45*4882a593Smuzhiyun 	DEV_EVENT_VERIFY,
46*4882a593Smuzhiyun 	/* last element! */
47*4882a593Smuzhiyun 	NR_DEV_EVENTS
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun struct ccw_device;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun  * action called through jumptable
54*4882a593Smuzhiyun  */
55*4882a593Smuzhiyun typedef void (fsm_func_t)(struct ccw_device *, enum dev_event);
56*4882a593Smuzhiyun extern fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS];
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun static inline void
dev_fsm_event(struct ccw_device * cdev,enum dev_event dev_event)59*4882a593Smuzhiyun dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	int state = cdev->private->state;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	if (dev_event == DEV_EVENT_INTERRUPT) {
64*4882a593Smuzhiyun 		if (state == DEV_STATE_ONLINE)
65*4882a593Smuzhiyun 			inc_irq_stat(cdev->private->int_class);
66*4882a593Smuzhiyun 		else if (state != DEV_STATE_CMFCHANGE &&
67*4882a593Smuzhiyun 			 state != DEV_STATE_CMFUPDATE)
68*4882a593Smuzhiyun 			inc_irq_stat(IRQIO_CIO);
69*4882a593Smuzhiyun 	}
70*4882a593Smuzhiyun 	dev_jumptable[state][dev_event](cdev, dev_event);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun /*
74*4882a593Smuzhiyun  * Delivers 1 if the device state is final.
75*4882a593Smuzhiyun  */
76*4882a593Smuzhiyun static inline int
dev_fsm_final_state(struct ccw_device * cdev)77*4882a593Smuzhiyun dev_fsm_final_state(struct ccw_device *cdev)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	return (cdev->private->state == DEV_STATE_NOT_OPER ||
80*4882a593Smuzhiyun 		cdev->private->state == DEV_STATE_OFFLINE ||
81*4882a593Smuzhiyun 		cdev->private->state == DEV_STATE_ONLINE ||
82*4882a593Smuzhiyun 		cdev->private->state == DEV_STATE_BOXED);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun int __init io_subchannel_init(void);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun void io_subchannel_recog_done(struct ccw_device *cdev);
88*4882a593Smuzhiyun void io_subchannel_init_config(struct subchannel *sch);
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun int ccw_device_cancel_halt_clear(struct ccw_device *);
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun int ccw_device_is_orphan(struct ccw_device *);
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun void ccw_device_recognition(struct ccw_device *);
95*4882a593Smuzhiyun int ccw_device_online(struct ccw_device *);
96*4882a593Smuzhiyun int ccw_device_offline(struct ccw_device *);
97*4882a593Smuzhiyun void ccw_device_update_sense_data(struct ccw_device *);
98*4882a593Smuzhiyun int ccw_device_test_sense_data(struct ccw_device *);
99*4882a593Smuzhiyun int ccw_purge_blacklisted(void);
100*4882a593Smuzhiyun void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo);
101*4882a593Smuzhiyun struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun /* Function prototypes for device status and basic sense stuff. */
104*4882a593Smuzhiyun void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
105*4882a593Smuzhiyun void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
106*4882a593Smuzhiyun int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *);
107*4882a593Smuzhiyun int ccw_device_do_sense(struct ccw_device *, struct irb *);
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /* Function prototype for internal request handling. */
110*4882a593Smuzhiyun int lpm_adjust(int lpm, int mask);
111*4882a593Smuzhiyun void ccw_request_start(struct ccw_device *);
112*4882a593Smuzhiyun int ccw_request_cancel(struct ccw_device *cdev);
113*4882a593Smuzhiyun void ccw_request_handler(struct ccw_device *cdev);
114*4882a593Smuzhiyun void ccw_request_timeout(struct ccw_device *cdev);
115*4882a593Smuzhiyun void ccw_request_notoper(struct ccw_device *cdev);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun /* Function prototypes for sense id stuff. */
118*4882a593Smuzhiyun void ccw_device_sense_id_start(struct ccw_device *);
119*4882a593Smuzhiyun void ccw_device_sense_id_done(struct ccw_device *, int);
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun /* Function prototypes for path grouping stuff. */
122*4882a593Smuzhiyun void ccw_device_verify_start(struct ccw_device *);
123*4882a593Smuzhiyun void ccw_device_verify_done(struct ccw_device *, int);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun void ccw_device_disband_start(struct ccw_device *);
126*4882a593Smuzhiyun void ccw_device_disband_done(struct ccw_device *, int);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun int ccw_device_stlck(struct ccw_device *);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /* Helper function for machine check handling. */
131*4882a593Smuzhiyun void ccw_device_trigger_reprobe(struct ccw_device *);
132*4882a593Smuzhiyun void ccw_device_kill_io(struct ccw_device *);
133*4882a593Smuzhiyun int ccw_device_notify(struct ccw_device *, int);
134*4882a593Smuzhiyun void ccw_device_set_disconnected(struct ccw_device *cdev);
135*4882a593Smuzhiyun void ccw_device_set_notoper(struct ccw_device *cdev);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun void ccw_device_timeout(struct timer_list *t);
138*4882a593Smuzhiyun void ccw_device_set_timeout(struct ccw_device *, int);
139*4882a593Smuzhiyun void ccw_device_schedule_recovery(void);
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun /* Channel measurement facility related */
142*4882a593Smuzhiyun void retry_set_schib(struct ccw_device *cdev);
143*4882a593Smuzhiyun void cmf_retry_copy_block(struct ccw_device *);
144*4882a593Smuzhiyun int cmf_reenable(struct ccw_device *);
145*4882a593Smuzhiyun void cmf_reactivate(void);
146*4882a593Smuzhiyun int ccw_set_cmf(struct ccw_device *cdev, int enable);
147*4882a593Smuzhiyun extern struct device_attribute dev_attr_cmb_enable;
148*4882a593Smuzhiyun #endif
149