xref: /OK3568_Linux_fs/kernel/drivers/s390/cio/io_sch.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef S390_IO_SCH_H
3*4882a593Smuzhiyun #define S390_IO_SCH_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/types.h>
6*4882a593Smuzhiyun #include <asm/schid.h>
7*4882a593Smuzhiyun #include <asm/ccwdev.h>
8*4882a593Smuzhiyun #include <asm/irq.h>
9*4882a593Smuzhiyun #include "css.h"
10*4882a593Smuzhiyun #include "orb.h"
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun struct io_subchannel_dma_area {
13*4882a593Smuzhiyun 	struct ccw1 sense_ccw;	/* static ccw for sense command */
14*4882a593Smuzhiyun };
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun struct io_subchannel_private {
17*4882a593Smuzhiyun 	union orb orb;		/* operation request block */
18*4882a593Smuzhiyun 	struct ccw_device *cdev;/* pointer to the child ccw device */
19*4882a593Smuzhiyun 	struct {
20*4882a593Smuzhiyun 		unsigned int suspend:1;	/* allow suspend */
21*4882a593Smuzhiyun 		unsigned int prefetch:1;/* deny prefetch */
22*4882a593Smuzhiyun 		unsigned int inter:1;	/* suppress intermediate interrupts */
23*4882a593Smuzhiyun 	} __packed options;
24*4882a593Smuzhiyun 	struct io_subchannel_dma_area *dma_area;
25*4882a593Smuzhiyun 	dma_addr_t dma_area_dma;
26*4882a593Smuzhiyun } __aligned(8);
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define to_io_private(n) ((struct io_subchannel_private *) \
29*4882a593Smuzhiyun 			  dev_get_drvdata(&(n)->dev))
30*4882a593Smuzhiyun #define set_io_private(n, p) (dev_set_drvdata(&(n)->dev, p))
31*4882a593Smuzhiyun 
sch_get_cdev(struct subchannel * sch)32*4882a593Smuzhiyun static inline struct ccw_device *sch_get_cdev(struct subchannel *sch)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun 	struct io_subchannel_private *priv = to_io_private(sch);
35*4882a593Smuzhiyun 	return priv ? priv->cdev : NULL;
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun 
sch_set_cdev(struct subchannel * sch,struct ccw_device * cdev)38*4882a593Smuzhiyun static inline void sch_set_cdev(struct subchannel *sch,
39*4882a593Smuzhiyun 				struct ccw_device *cdev)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	struct io_subchannel_private *priv = to_io_private(sch);
42*4882a593Smuzhiyun 	if (priv)
43*4882a593Smuzhiyun 		priv->cdev = cdev;
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #define MAX_CIWS 8
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /*
49*4882a593Smuzhiyun  * Possible status values for a CCW request's I/O.
50*4882a593Smuzhiyun  */
51*4882a593Smuzhiyun enum io_status {
52*4882a593Smuzhiyun 	IO_DONE,
53*4882a593Smuzhiyun 	IO_RUNNING,
54*4882a593Smuzhiyun 	IO_STATUS_ERROR,
55*4882a593Smuzhiyun 	IO_PATH_ERROR,
56*4882a593Smuzhiyun 	IO_REJECTED,
57*4882a593Smuzhiyun 	IO_KILLED
58*4882a593Smuzhiyun };
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /**
61*4882a593Smuzhiyun  * ccw_request - Internal CCW request.
62*4882a593Smuzhiyun  * @cp: channel program to start
63*4882a593Smuzhiyun  * @timeout: maximum allowable time in jiffies between start I/O and interrupt
64*4882a593Smuzhiyun  * @maxretries: number of retries per I/O operation and path
65*4882a593Smuzhiyun  * @lpm: mask of paths to use
66*4882a593Smuzhiyun  * @check: optional callback that determines if results are final
67*4882a593Smuzhiyun  * @filter: optional callback to adjust request status based on IRB data
68*4882a593Smuzhiyun  * @callback: final callback
69*4882a593Smuzhiyun  * @data: user-defined pointer passed to all callbacks
70*4882a593Smuzhiyun  * @singlepath: if set, use only one path from @lpm per start I/O
71*4882a593Smuzhiyun  * @cancel: non-zero if request was cancelled
72*4882a593Smuzhiyun  * @done: non-zero if request was finished
73*4882a593Smuzhiyun  * @mask: current path mask
74*4882a593Smuzhiyun  * @retries: current number of retries
75*4882a593Smuzhiyun  * @drc: delayed return code
76*4882a593Smuzhiyun  */
77*4882a593Smuzhiyun struct ccw_request {
78*4882a593Smuzhiyun 	struct ccw1 *cp;
79*4882a593Smuzhiyun 	unsigned long timeout;
80*4882a593Smuzhiyun 	u16 maxretries;
81*4882a593Smuzhiyun 	u8 lpm;
82*4882a593Smuzhiyun 	int (*check)(struct ccw_device *, void *);
83*4882a593Smuzhiyun 	enum io_status (*filter)(struct ccw_device *, void *, struct irb *,
84*4882a593Smuzhiyun 				 enum io_status);
85*4882a593Smuzhiyun 	void (*callback)(struct ccw_device *, void *, int);
86*4882a593Smuzhiyun 	void *data;
87*4882a593Smuzhiyun 	unsigned int singlepath:1;
88*4882a593Smuzhiyun 	/* These fields are used internally. */
89*4882a593Smuzhiyun 	unsigned int cancel:1;
90*4882a593Smuzhiyun 	unsigned int done:1;
91*4882a593Smuzhiyun 	u16 mask;
92*4882a593Smuzhiyun 	u16 retries;
93*4882a593Smuzhiyun 	int drc;
94*4882a593Smuzhiyun } __attribute__((packed));
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun /*
97*4882a593Smuzhiyun  * sense-id response buffer layout
98*4882a593Smuzhiyun  */
99*4882a593Smuzhiyun struct senseid {
100*4882a593Smuzhiyun 	/* common part */
101*4882a593Smuzhiyun 	u8  reserved;	/* always 0x'FF' */
102*4882a593Smuzhiyun 	u16 cu_type;	/* control unit type */
103*4882a593Smuzhiyun 	u8  cu_model;	/* control unit model */
104*4882a593Smuzhiyun 	u16 dev_type;	/* device type */
105*4882a593Smuzhiyun 	u8  dev_model;	/* device model */
106*4882a593Smuzhiyun 	u8  unused;	/* padding byte */
107*4882a593Smuzhiyun 	/* extended part */
108*4882a593Smuzhiyun 	struct ciw ciw[MAX_CIWS];	/* variable # of CIWs */
109*4882a593Smuzhiyun }  __attribute__ ((packed, aligned(4)));
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun enum cdev_todo {
112*4882a593Smuzhiyun 	CDEV_TODO_NOTHING,
113*4882a593Smuzhiyun 	CDEV_TODO_ENABLE_CMF,
114*4882a593Smuzhiyun 	CDEV_TODO_REBIND,
115*4882a593Smuzhiyun 	CDEV_TODO_REGISTER,
116*4882a593Smuzhiyun 	CDEV_TODO_UNREG,
117*4882a593Smuzhiyun 	CDEV_TODO_UNREG_EVAL,
118*4882a593Smuzhiyun };
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun #define FAKE_CMD_IRB	1
121*4882a593Smuzhiyun #define FAKE_TM_IRB	2
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun struct ccw_device_dma_area {
124*4882a593Smuzhiyun 	struct senseid senseid;	/* SenseID info */
125*4882a593Smuzhiyun 	struct ccw1 iccws[2];	/* ccws for SNID/SID/SPGID commands */
126*4882a593Smuzhiyun 	struct irb irb;		/* device status */
127*4882a593Smuzhiyun 	struct pgid pgid[8];	/* path group IDs per chpid*/
128*4882a593Smuzhiyun };
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun struct ccw_device_private {
131*4882a593Smuzhiyun 	struct ccw_device *cdev;
132*4882a593Smuzhiyun 	struct subchannel *sch;
133*4882a593Smuzhiyun 	int state;		/* device state */
134*4882a593Smuzhiyun 	atomic_t onoff;
135*4882a593Smuzhiyun 	struct ccw_dev_id dev_id;	/* device id */
136*4882a593Smuzhiyun 	struct ccw_request req;		/* internal I/O request */
137*4882a593Smuzhiyun 	int iretry;
138*4882a593Smuzhiyun 	u8 pgid_valid_mask;	/* mask of valid PGIDs */
139*4882a593Smuzhiyun 	u8 pgid_todo_mask;	/* mask of PGIDs to be adjusted */
140*4882a593Smuzhiyun 	u8 pgid_reset_mask;	/* mask of PGIDs which were reset */
141*4882a593Smuzhiyun 	u8 path_noirq_mask;	/* mask of paths for which no irq was
142*4882a593Smuzhiyun 				   received */
143*4882a593Smuzhiyun 	u8 path_notoper_mask;	/* mask of paths which were found
144*4882a593Smuzhiyun 				   not operable */
145*4882a593Smuzhiyun 	u8 path_gone_mask;	/* mask of paths, that became unavailable */
146*4882a593Smuzhiyun 	u8 path_new_mask;	/* mask of paths, that became available */
147*4882a593Smuzhiyun 	u8 path_broken_mask;	/* mask of paths, which were found to be
148*4882a593Smuzhiyun 				   unusable */
149*4882a593Smuzhiyun 	struct {
150*4882a593Smuzhiyun 		unsigned int fast:1;	/* post with "channel end" */
151*4882a593Smuzhiyun 		unsigned int repall:1;	/* report every interrupt status */
152*4882a593Smuzhiyun 		unsigned int pgroup:1;	/* do path grouping */
153*4882a593Smuzhiyun 		unsigned int force:1;	/* allow forced online */
154*4882a593Smuzhiyun 		unsigned int mpath:1;	/* do multipathing */
155*4882a593Smuzhiyun 	} __attribute__ ((packed)) options;
156*4882a593Smuzhiyun 	struct {
157*4882a593Smuzhiyun 		unsigned int esid:1;	    /* Ext. SenseID supported by HW */
158*4882a593Smuzhiyun 		unsigned int dosense:1;	    /* delayed SENSE required */
159*4882a593Smuzhiyun 		unsigned int doverify:1;    /* delayed path verification */
160*4882a593Smuzhiyun 		unsigned int donotify:1;    /* call notify function */
161*4882a593Smuzhiyun 		unsigned int recog_done:1;  /* dev. recog. complete */
162*4882a593Smuzhiyun 		unsigned int fake_irb:2;    /* deliver faked irb */
163*4882a593Smuzhiyun 		unsigned int resuming:1;    /* recognition while resume */
164*4882a593Smuzhiyun 		unsigned int pgroup:1;	    /* pathgroup is set up */
165*4882a593Smuzhiyun 		unsigned int mpath:1;	    /* multipathing is set up */
166*4882a593Smuzhiyun 		unsigned int pgid_unknown:1;/* unknown pgid state */
167*4882a593Smuzhiyun 		unsigned int initialized:1; /* set if initial reference held */
168*4882a593Smuzhiyun 	} __attribute__((packed)) flags;
169*4882a593Smuzhiyun 	unsigned long intparm;	/* user interruption parameter */
170*4882a593Smuzhiyun 	struct qdio_irq *qdio_data;
171*4882a593Smuzhiyun 	int async_kill_io_rc;
172*4882a593Smuzhiyun 	struct work_struct todo_work;
173*4882a593Smuzhiyun 	enum cdev_todo todo;
174*4882a593Smuzhiyun 	wait_queue_head_t wait_q;
175*4882a593Smuzhiyun 	struct timer_list timer;
176*4882a593Smuzhiyun 	void *cmb;			/* measurement information */
177*4882a593Smuzhiyun 	struct list_head cmb_list;	/* list of measured devices */
178*4882a593Smuzhiyun 	u64 cmb_start_time;		/* clock value of cmb reset */
179*4882a593Smuzhiyun 	void *cmb_wait;			/* deferred cmb enable/disable */
180*4882a593Smuzhiyun 	struct gen_pool *dma_pool;
181*4882a593Smuzhiyun 	struct ccw_device_dma_area *dma_area;
182*4882a593Smuzhiyun 	enum interruption_class int_class;
183*4882a593Smuzhiyun };
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun #endif
186