xref: /OK3568_Linux_fs/kernel/drivers/s390/block/dasd_int.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4*4882a593Smuzhiyun  *		    Horst Hummel <Horst.Hummel@de.ibm.com>
5*4882a593Smuzhiyun  *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
6*4882a593Smuzhiyun  * Bugreports.to..: <Linux390@de.ibm.com>
7*4882a593Smuzhiyun  * Copyright IBM Corp. 1999, 2009
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #ifndef DASD_INT_H
11*4882a593Smuzhiyun #define DASD_INT_H
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun /* we keep old device allocation scheme; IOW, minors are still in 0..255 */
14*4882a593Smuzhiyun #define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS))
15*4882a593Smuzhiyun #define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1)
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun  * States a dasd device can have:
19*4882a593Smuzhiyun  *   new: the dasd_device structure is allocated.
20*4882a593Smuzhiyun  *   known: the discipline for the device is identified.
21*4882a593Smuzhiyun  *   basic: the device can do basic i/o.
22*4882a593Smuzhiyun  *   unfmt: the device could not be analyzed (format is unknown).
23*4882a593Smuzhiyun  *   ready: partition detection is done and the device is can do block io.
24*4882a593Smuzhiyun  *   online: the device accepts requests from the block device queue.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * Things to do for startup state transitions:
27*4882a593Smuzhiyun  *   new -> known: find discipline for the device and create devfs entries.
28*4882a593Smuzhiyun  *   known -> basic: request irq line for the device.
29*4882a593Smuzhiyun  *   basic -> ready: do the initial analysis, e.g. format detection,
30*4882a593Smuzhiyun  *                   do block device setup and detect partitions.
31*4882a593Smuzhiyun  *   ready -> online: schedule the device tasklet.
32*4882a593Smuzhiyun  * Things to do for shutdown state transitions:
33*4882a593Smuzhiyun  *   online -> ready: just set the new device state.
34*4882a593Smuzhiyun  *   ready -> basic: flush requests from the block device layer, clear
35*4882a593Smuzhiyun  *                   partition information and reset format information.
36*4882a593Smuzhiyun  *   basic -> known: terminate all requests and free irq.
37*4882a593Smuzhiyun  *   known -> new: remove devfs entries and forget discipline.
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #define DASD_STATE_NEW	  0
41*4882a593Smuzhiyun #define DASD_STATE_KNOWN  1
42*4882a593Smuzhiyun #define DASD_STATE_BASIC  2
43*4882a593Smuzhiyun #define DASD_STATE_UNFMT  3
44*4882a593Smuzhiyun #define DASD_STATE_READY  4
45*4882a593Smuzhiyun #define DASD_STATE_ONLINE 5
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #include <linux/module.h>
48*4882a593Smuzhiyun #include <linux/wait.h>
49*4882a593Smuzhiyun #include <linux/blkdev.h>
50*4882a593Smuzhiyun #include <linux/genhd.h>
51*4882a593Smuzhiyun #include <linux/hdreg.h>
52*4882a593Smuzhiyun #include <linux/interrupt.h>
53*4882a593Smuzhiyun #include <linux/log2.h>
54*4882a593Smuzhiyun #include <asm/ccwdev.h>
55*4882a593Smuzhiyun #include <linux/workqueue.h>
56*4882a593Smuzhiyun #include <asm/debug.h>
57*4882a593Smuzhiyun #include <asm/dasd.h>
58*4882a593Smuzhiyun #include <asm/idals.h>
59*4882a593Smuzhiyun #include <linux/bitops.h>
60*4882a593Smuzhiyun #include <linux/blk-mq.h>
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun /* DASD discipline magic */
63*4882a593Smuzhiyun #define DASD_ECKD_MAGIC 0xC5C3D2C4
64*4882a593Smuzhiyun #define DASD_DIAG_MAGIC 0xC4C9C1C7
65*4882a593Smuzhiyun #define DASD_FBA_MAGIC 0xC6C2C140
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun /*
68*4882a593Smuzhiyun  * SECTION: Type definitions
69*4882a593Smuzhiyun  */
70*4882a593Smuzhiyun struct dasd_device;
71*4882a593Smuzhiyun struct dasd_block;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun /* BIT DEFINITIONS FOR SENSE DATA */
74*4882a593Smuzhiyun #define DASD_SENSE_BIT_0 0x80
75*4882a593Smuzhiyun #define DASD_SENSE_BIT_1 0x40
76*4882a593Smuzhiyun #define DASD_SENSE_BIT_2 0x20
77*4882a593Smuzhiyun #define DASD_SENSE_BIT_3 0x10
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun /* BIT DEFINITIONS FOR SIM SENSE */
80*4882a593Smuzhiyun #define DASD_SIM_SENSE 0x0F
81*4882a593Smuzhiyun #define DASD_SIM_MSG_TO_OP 0x03
82*4882a593Smuzhiyun #define DASD_SIM_LOG 0x0C
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun /* lock class for nested cdev lock */
85*4882a593Smuzhiyun #define CDEV_NESTED_FIRST 1
86*4882a593Smuzhiyun #define CDEV_NESTED_SECOND 2
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun /*
89*4882a593Smuzhiyun  * SECTION: MACROs for klogd and s390 debug feature (dbf)
90*4882a593Smuzhiyun  */
91*4882a593Smuzhiyun #define DBF_DEV_EVENT(d_level, d_device, d_str, d_data...) \
92*4882a593Smuzhiyun do { \
93*4882a593Smuzhiyun 	debug_sprintf_event(d_device->debug_area, \
94*4882a593Smuzhiyun 			    d_level, \
95*4882a593Smuzhiyun 			    d_str "\n", \
96*4882a593Smuzhiyun 			    d_data); \
97*4882a593Smuzhiyun } while(0)
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun #define DBF_EVENT(d_level, d_str, d_data...)\
100*4882a593Smuzhiyun do { \
101*4882a593Smuzhiyun 	debug_sprintf_event(dasd_debug_area, \
102*4882a593Smuzhiyun 			    d_level,\
103*4882a593Smuzhiyun 			    d_str "\n", \
104*4882a593Smuzhiyun 			    d_data); \
105*4882a593Smuzhiyun } while(0)
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun #define DBF_EVENT_DEVID(d_level, d_cdev, d_str, d_data...)	\
108*4882a593Smuzhiyun do { \
109*4882a593Smuzhiyun 	struct ccw_dev_id __dev_id;			\
110*4882a593Smuzhiyun 	ccw_device_get_id(d_cdev, &__dev_id);		\
111*4882a593Smuzhiyun 	debug_sprintf_event(dasd_debug_area,		\
112*4882a593Smuzhiyun 			    d_level,					\
113*4882a593Smuzhiyun 			    "0.%x.%04x " d_str "\n",			\
114*4882a593Smuzhiyun 			    __dev_id.ssid, __dev_id.devno, d_data);	\
115*4882a593Smuzhiyun } while (0)
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun /* limit size for an errorstring */
118*4882a593Smuzhiyun #define ERRORLENGTH 30
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun /* definition of dbf debug levels */
121*4882a593Smuzhiyun #define	DBF_EMERG	0	/* system is unusable			*/
122*4882a593Smuzhiyun #define	DBF_ALERT	1	/* action must be taken immediately	*/
123*4882a593Smuzhiyun #define	DBF_CRIT	2	/* critical conditions			*/
124*4882a593Smuzhiyun #define	DBF_ERR		3	/* error conditions			*/
125*4882a593Smuzhiyun #define	DBF_WARNING	4	/* warning conditions			*/
126*4882a593Smuzhiyun #define	DBF_NOTICE	5	/* normal but significant condition	*/
127*4882a593Smuzhiyun #define	DBF_INFO	6	/* informational			*/
128*4882a593Smuzhiyun #define	DBF_DEBUG	6	/* debug-level messages			*/
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /* messages to be written via klogd and dbf */
131*4882a593Smuzhiyun #define DEV_MESSAGE(d_loglevel,d_device,d_string,d_args...)\
132*4882a593Smuzhiyun do { \
133*4882a593Smuzhiyun 	printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
134*4882a593Smuzhiyun 	       dev_name(&d_device->cdev->dev), d_args); \
135*4882a593Smuzhiyun 	DBF_DEV_EVENT(DBF_ALERT, d_device, d_string, d_args); \
136*4882a593Smuzhiyun } while(0)
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun #define MESSAGE(d_loglevel,d_string,d_args...)\
139*4882a593Smuzhiyun do { \
140*4882a593Smuzhiyun 	printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
141*4882a593Smuzhiyun 	DBF_EVENT(DBF_ALERT, d_string, d_args); \
142*4882a593Smuzhiyun } while(0)
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun /* messages to be written via klogd only */
145*4882a593Smuzhiyun #define DEV_MESSAGE_LOG(d_loglevel,d_device,d_string,d_args...)\
146*4882a593Smuzhiyun do { \
147*4882a593Smuzhiyun 	printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
148*4882a593Smuzhiyun 	       dev_name(&d_device->cdev->dev), d_args); \
149*4882a593Smuzhiyun } while(0)
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun #define MESSAGE_LOG(d_loglevel,d_string,d_args...)\
152*4882a593Smuzhiyun do { \
153*4882a593Smuzhiyun 	printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
154*4882a593Smuzhiyun } while(0)
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun /* Macro to calculate number of blocks per page */
157*4882a593Smuzhiyun #define BLOCKS_PER_PAGE(blksize) (PAGE_SIZE / blksize)
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun struct dasd_ccw_req {
160*4882a593Smuzhiyun 	unsigned int magic;		/* Eye catcher */
161*4882a593Smuzhiyun 	int intrc;			/* internal error, e.g. from start_IO */
162*4882a593Smuzhiyun 	struct list_head devlist;	/* for dasd_device request queue */
163*4882a593Smuzhiyun 	struct list_head blocklist;	/* for dasd_block request queue */
164*4882a593Smuzhiyun 	struct dasd_block *block;	/* the originating block device */
165*4882a593Smuzhiyun 	struct dasd_device *memdev;	/* the device used to allocate this */
166*4882a593Smuzhiyun 	struct dasd_device *startdev;	/* device the request is started on */
167*4882a593Smuzhiyun 	struct dasd_device *basedev;	/* base device if no block->base */
168*4882a593Smuzhiyun 	void *cpaddr;			/* address of ccw or tcw */
169*4882a593Smuzhiyun 	short retries;			/* A retry counter */
170*4882a593Smuzhiyun 	unsigned char cpmode;		/* 0 = cmd mode, 1 = itcw */
171*4882a593Smuzhiyun 	char status;			/* status of this request */
172*4882a593Smuzhiyun 	char lpm;			/* logical path mask */
173*4882a593Smuzhiyun 	unsigned long flags;        	/* flags of this request */
174*4882a593Smuzhiyun 	struct dasd_queue *dq;
175*4882a593Smuzhiyun 	unsigned long starttime;	/* jiffies time of request start */
176*4882a593Smuzhiyun 	unsigned long expires;		/* expiration period in jiffies */
177*4882a593Smuzhiyun 	void *data;			/* pointer to data area */
178*4882a593Smuzhiyun 	struct irb irb;			/* device status in case of an error */
179*4882a593Smuzhiyun 	struct dasd_ccw_req *refers;	/* ERP-chain queueing. */
180*4882a593Smuzhiyun 	void *function; 		/* originating ERP action */
181*4882a593Smuzhiyun 	void *mem_chunk;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	unsigned long buildclk;		/* TOD-clock of request generation */
184*4882a593Smuzhiyun 	unsigned long startclk;		/* TOD-clock of request start */
185*4882a593Smuzhiyun 	unsigned long stopclk;		/* TOD-clock of request interrupt */
186*4882a593Smuzhiyun 	unsigned long endclk;		/* TOD-clock of request termination */
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	void (*callback)(struct dasd_ccw_req *, void *data);
189*4882a593Smuzhiyun 	void *callback_data;
190*4882a593Smuzhiyun 	unsigned int proc_bytes;	/* bytes for partial completion */
191*4882a593Smuzhiyun 	unsigned int trkcount;		/* count formatted tracks */
192*4882a593Smuzhiyun };
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun /*
195*4882a593Smuzhiyun  * dasd_ccw_req -> status can be:
196*4882a593Smuzhiyun  */
197*4882a593Smuzhiyun #define DASD_CQR_FILLED 	0x00	/* request is ready to be processed */
198*4882a593Smuzhiyun #define DASD_CQR_DONE		0x01	/* request is completed successfully */
199*4882a593Smuzhiyun #define DASD_CQR_NEED_ERP	0x02	/* request needs recovery action */
200*4882a593Smuzhiyun #define DASD_CQR_IN_ERP 	0x03	/* request is in recovery */
201*4882a593Smuzhiyun #define DASD_CQR_FAILED 	0x04	/* request is finally failed */
202*4882a593Smuzhiyun #define DASD_CQR_TERMINATED	0x05	/* request was stopped by driver */
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun #define DASD_CQR_QUEUED 	0x80	/* request is queued to be processed */
205*4882a593Smuzhiyun #define DASD_CQR_IN_IO		0x81	/* request is currently in IO */
206*4882a593Smuzhiyun #define DASD_CQR_ERROR		0x82	/* request is completed with error */
207*4882a593Smuzhiyun #define DASD_CQR_CLEAR_PENDING	0x83	/* request is clear pending */
208*4882a593Smuzhiyun #define DASD_CQR_CLEARED	0x84	/* request was cleared */
209*4882a593Smuzhiyun #define DASD_CQR_SUCCESS	0x85	/* request was successful */
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun /* default expiration time*/
212*4882a593Smuzhiyun #define DASD_EXPIRES	  300
213*4882a593Smuzhiyun #define DASD_EXPIRES_MAX  40000000
214*4882a593Smuzhiyun #define DASD_RETRIES	  256
215*4882a593Smuzhiyun #define DASD_RETRIES_MAX  32768
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun /* per dasd_ccw_req flags */
218*4882a593Smuzhiyun #define DASD_CQR_FLAGS_USE_ERP   0	/* use ERP for this request */
219*4882a593Smuzhiyun #define DASD_CQR_FLAGS_FAILFAST  1	/* FAILFAST */
220*4882a593Smuzhiyun #define DASD_CQR_VERIFY_PATH	 2	/* path verification request */
221*4882a593Smuzhiyun #define DASD_CQR_ALLOW_SLOCK	 3	/* Try this request even when lock was
222*4882a593Smuzhiyun 					 * stolen. Should not be combined with
223*4882a593Smuzhiyun 					 * DASD_CQR_FLAGS_USE_ERP
224*4882a593Smuzhiyun 					 */
225*4882a593Smuzhiyun /*
226*4882a593Smuzhiyun  * The following flags are used to suppress output of certain errors.
227*4882a593Smuzhiyun  */
228*4882a593Smuzhiyun #define DASD_CQR_SUPPRESS_NRF	4	/* Suppress 'No Record Found' error */
229*4882a593Smuzhiyun #define DASD_CQR_SUPPRESS_FP	5	/* Suppress 'File Protected' error*/
230*4882a593Smuzhiyun #define DASD_CQR_SUPPRESS_IL	6	/* Suppress 'Incorrect Length' error */
231*4882a593Smuzhiyun #define DASD_CQR_SUPPRESS_CR	7	/* Suppress 'Command Reject' error */
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun #define DASD_REQ_PER_DEV 4
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun /* Signature for error recovery functions. */
236*4882a593Smuzhiyun typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun /*
239*4882a593Smuzhiyun  * A single CQR can only contain a maximum of 255 CCWs. It is limited by
240*4882a593Smuzhiyun  * the locate record and locate record extended count value which can only hold
241*4882a593Smuzhiyun  * 1 Byte max.
242*4882a593Smuzhiyun  */
243*4882a593Smuzhiyun #define DASD_CQR_MAX_CCW 255
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun /*
246*4882a593Smuzhiyun  * Unique identifier for dasd device.
247*4882a593Smuzhiyun  */
248*4882a593Smuzhiyun #define UA_NOT_CONFIGURED  0x00
249*4882a593Smuzhiyun #define UA_BASE_DEVICE	   0x01
250*4882a593Smuzhiyun #define UA_BASE_PAV_ALIAS  0x02
251*4882a593Smuzhiyun #define UA_HYPER_PAV_ALIAS 0x03
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun struct dasd_uid {
254*4882a593Smuzhiyun 	__u8 type;
255*4882a593Smuzhiyun 	char vendor[4];
256*4882a593Smuzhiyun 	char serial[15];
257*4882a593Smuzhiyun 	__u16 ssid;
258*4882a593Smuzhiyun 	__u8 real_unit_addr;
259*4882a593Smuzhiyun 	__u8 base_unit_addr;
260*4882a593Smuzhiyun 	char vduit[33];
261*4882a593Smuzhiyun };
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun /*
264*4882a593Smuzhiyun  * the struct dasd_discipline is
265*4882a593Smuzhiyun  * sth like a table of virtual functions, if you think of dasd_eckd
266*4882a593Smuzhiyun  * inheriting dasd...
267*4882a593Smuzhiyun  * no, currently we are not planning to reimplement the driver in C++
268*4882a593Smuzhiyun  */
269*4882a593Smuzhiyun struct dasd_discipline {
270*4882a593Smuzhiyun 	struct module *owner;
271*4882a593Smuzhiyun 	char ebcname[8];	/* a name used for tagging and printks */
272*4882a593Smuzhiyun 	char name[8];		/* a name used for tagging and printks */
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	struct list_head list;	/* used for list of disciplines */
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	/*
277*4882a593Smuzhiyun 	 * Device recognition functions. check_device is used to verify
278*4882a593Smuzhiyun 	 * the sense data and the information returned by read device
279*4882a593Smuzhiyun 	 * characteristics. It returns 0 if the discipline can be used
280*4882a593Smuzhiyun 	 * for the device in question. uncheck_device is called during
281*4882a593Smuzhiyun 	 * device shutdown to deregister a device from its discipline.
282*4882a593Smuzhiyun 	 */
283*4882a593Smuzhiyun 	int (*check_device) (struct dasd_device *);
284*4882a593Smuzhiyun 	void (*uncheck_device) (struct dasd_device *);
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	/*
287*4882a593Smuzhiyun 	 * do_analysis is used in the step from device state "basic" to
288*4882a593Smuzhiyun 	 * state "accept". It returns 0 if the device can be made ready,
289*4882a593Smuzhiyun 	 * it returns -EMEDIUMTYPE if the device can't be made ready or
290*4882a593Smuzhiyun 	 * -EAGAIN if do_analysis started a ccw that needs to complete
291*4882a593Smuzhiyun 	 * before the analysis may be repeated.
292*4882a593Smuzhiyun 	 */
293*4882a593Smuzhiyun 	int (*do_analysis) (struct dasd_block *);
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	/*
296*4882a593Smuzhiyun 	 * This function is called, when new paths become available.
297*4882a593Smuzhiyun 	 * Disciplins may use this callback to do necessary setup work,
298*4882a593Smuzhiyun 	 * e.g. verify that new path is compatible with the current
299*4882a593Smuzhiyun 	 * configuration.
300*4882a593Smuzhiyun 	 */
301*4882a593Smuzhiyun 	int (*verify_path)(struct dasd_device *, __u8);
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	/*
304*4882a593Smuzhiyun 	 * Last things to do when a device is set online, and first things
305*4882a593Smuzhiyun 	 * when it is set offline.
306*4882a593Smuzhiyun 	 */
307*4882a593Smuzhiyun 	int (*basic_to_ready) (struct dasd_device *);
308*4882a593Smuzhiyun 	int (*online_to_ready) (struct dasd_device *);
309*4882a593Smuzhiyun 	int (*basic_to_known)(struct dasd_device *);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	/*
312*4882a593Smuzhiyun 	 * Initialize block layer request queue.
313*4882a593Smuzhiyun 	 */
314*4882a593Smuzhiyun 	void (*setup_blk_queue)(struct dasd_block *);
315*4882a593Smuzhiyun 	/* (struct dasd_device *);
316*4882a593Smuzhiyun 	 * Device operation functions. build_cp creates a ccw chain for
317*4882a593Smuzhiyun 	 * a block device request, start_io starts the request and
318*4882a593Smuzhiyun 	 * term_IO cancels it (e.g. in case of a timeout). format_device
319*4882a593Smuzhiyun 	 * formats the device and check_device_format compares the format of
320*4882a593Smuzhiyun 	 * a device with the expected format_data.
321*4882a593Smuzhiyun 	 * handle_terminated_request allows to examine a cqr and prepare
322*4882a593Smuzhiyun 	 * it for retry.
323*4882a593Smuzhiyun 	 */
324*4882a593Smuzhiyun 	struct dasd_ccw_req *(*build_cp) (struct dasd_device *,
325*4882a593Smuzhiyun 					  struct dasd_block *,
326*4882a593Smuzhiyun 					  struct request *);
327*4882a593Smuzhiyun 	int (*start_IO) (struct dasd_ccw_req *);
328*4882a593Smuzhiyun 	int (*term_IO) (struct dasd_ccw_req *);
329*4882a593Smuzhiyun 	void (*handle_terminated_request) (struct dasd_ccw_req *);
330*4882a593Smuzhiyun 	int (*format_device) (struct dasd_device *,
331*4882a593Smuzhiyun 			      struct format_data_t *, int);
332*4882a593Smuzhiyun 	int (*check_device_format)(struct dasd_device *,
333*4882a593Smuzhiyun 				   struct format_check_t *, int);
334*4882a593Smuzhiyun 	int (*free_cp) (struct dasd_ccw_req *, struct request *);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	/*
337*4882a593Smuzhiyun 	 * Error recovery functions. examine_error() returns a value that
338*4882a593Smuzhiyun 	 * indicates what to do for an error condition. If examine_error()
339*4882a593Smuzhiyun 	 * returns 'dasd_era_recover' erp_action() is called to create a
340*4882a593Smuzhiyun 	 * special error recovery ccw. erp_postaction() is called after
341*4882a593Smuzhiyun 	 * an error recovery ccw has finished its execution. dump_sense
342*4882a593Smuzhiyun 	 * is called for every error condition to print the sense data
343*4882a593Smuzhiyun 	 * to the console.
344*4882a593Smuzhiyun 	 */
345*4882a593Smuzhiyun 	dasd_erp_fn_t(*erp_action) (struct dasd_ccw_req *);
346*4882a593Smuzhiyun 	dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *);
347*4882a593Smuzhiyun 	void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *,
348*4882a593Smuzhiyun 			    struct irb *);
349*4882a593Smuzhiyun 	void (*dump_sense_dbf) (struct dasd_device *, struct irb *, char *);
350*4882a593Smuzhiyun 	void (*check_for_device_change) (struct dasd_device *,
351*4882a593Smuzhiyun 					 struct dasd_ccw_req *,
352*4882a593Smuzhiyun 					 struct irb *);
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun         /* i/o control functions. */
355*4882a593Smuzhiyun 	int (*fill_geometry) (struct dasd_block *, struct hd_geometry *);
356*4882a593Smuzhiyun 	int (*fill_info) (struct dasd_device *, struct dasd_information2_t *);
357*4882a593Smuzhiyun 	int (*ioctl) (struct dasd_block *, unsigned int, void __user *);
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	/* suspend/resume functions */
360*4882a593Smuzhiyun 	int (*freeze) (struct dasd_device *);
361*4882a593Smuzhiyun 	int (*restore) (struct dasd_device *);
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	/* reload device after state change */
364*4882a593Smuzhiyun 	int (*reload) (struct dasd_device *);
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	int (*get_uid) (struct dasd_device *, struct dasd_uid *);
367*4882a593Smuzhiyun 	void (*kick_validate) (struct dasd_device *);
368*4882a593Smuzhiyun 	int (*check_attention)(struct dasd_device *, __u8);
369*4882a593Smuzhiyun 	int (*host_access_count)(struct dasd_device *);
370*4882a593Smuzhiyun 	int (*hosts_print)(struct dasd_device *, struct seq_file *);
371*4882a593Smuzhiyun 	void (*handle_hpf_error)(struct dasd_device *, struct irb *);
372*4882a593Smuzhiyun 	void (*disable_hpf)(struct dasd_device *);
373*4882a593Smuzhiyun 	int (*hpf_enabled)(struct dasd_device *);
374*4882a593Smuzhiyun 	void (*reset_path)(struct dasd_device *, __u8);
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	/*
377*4882a593Smuzhiyun 	 * Extent Space Efficient (ESE) relevant functions
378*4882a593Smuzhiyun 	 */
379*4882a593Smuzhiyun 	int (*is_ese)(struct dasd_device *);
380*4882a593Smuzhiyun 	/* Capacity */
381*4882a593Smuzhiyun 	int (*space_allocated)(struct dasd_device *);
382*4882a593Smuzhiyun 	int (*space_configured)(struct dasd_device *);
383*4882a593Smuzhiyun 	int (*logical_capacity)(struct dasd_device *);
384*4882a593Smuzhiyun 	int (*release_space)(struct dasd_device *, struct format_data_t *);
385*4882a593Smuzhiyun 	/* Extent Pool */
386*4882a593Smuzhiyun 	int (*ext_pool_id)(struct dasd_device *);
387*4882a593Smuzhiyun 	int (*ext_size)(struct dasd_device *);
388*4882a593Smuzhiyun 	int (*ext_pool_cap_at_warnlevel)(struct dasd_device *);
389*4882a593Smuzhiyun 	int (*ext_pool_warn_thrshld)(struct dasd_device *);
390*4882a593Smuzhiyun 	int (*ext_pool_oos)(struct dasd_device *);
391*4882a593Smuzhiyun 	int (*ext_pool_exhaust)(struct dasd_device *, struct dasd_ccw_req *);
392*4882a593Smuzhiyun 	struct dasd_ccw_req *(*ese_format)(struct dasd_device *,
393*4882a593Smuzhiyun 					   struct dasd_ccw_req *, struct irb *);
394*4882a593Smuzhiyun 	int (*ese_read)(struct dasd_ccw_req *, struct irb *);
395*4882a593Smuzhiyun };
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun extern struct dasd_discipline *dasd_diag_discipline_pointer;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun /*
400*4882a593Smuzhiyun  * Notification numbers for extended error reporting notifications:
401*4882a593Smuzhiyun  * The DASD_EER_DISABLE notification is sent before a dasd_device (and it's
402*4882a593Smuzhiyun  * eer pointer) is freed. The error reporting module needs to do all necessary
403*4882a593Smuzhiyun  * cleanup steps.
404*4882a593Smuzhiyun  * The DASD_EER_TRIGGER notification sends the actual error reports (triggers).
405*4882a593Smuzhiyun  */
406*4882a593Smuzhiyun #define DASD_EER_DISABLE 0
407*4882a593Smuzhiyun #define DASD_EER_TRIGGER 1
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun /* Trigger IDs for extended error reporting DASD_EER_TRIGGER notification */
410*4882a593Smuzhiyun #define DASD_EER_FATALERROR  1
411*4882a593Smuzhiyun #define DASD_EER_NOPATH      2
412*4882a593Smuzhiyun #define DASD_EER_STATECHANGE 3
413*4882a593Smuzhiyun #define DASD_EER_PPRCSUSPEND 4
414*4882a593Smuzhiyun #define DASD_EER_NOSPC	     5
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun /* DASD path handling */
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun #define DASD_PATH_OPERATIONAL  1
419*4882a593Smuzhiyun #define DASD_PATH_TBV	       2
420*4882a593Smuzhiyun #define DASD_PATH_PP	       3
421*4882a593Smuzhiyun #define DASD_PATH_NPP	       4
422*4882a593Smuzhiyun #define DASD_PATH_MISCABLED    5
423*4882a593Smuzhiyun #define DASD_PATH_NOHPF        6
424*4882a593Smuzhiyun #define DASD_PATH_CUIR	       7
425*4882a593Smuzhiyun #define DASD_PATH_IFCC	       8
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun #define DASD_THRHLD_MAX		4294967295U
428*4882a593Smuzhiyun #define DASD_INTERVAL_MAX	4294967295U
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun struct dasd_path {
431*4882a593Smuzhiyun 	unsigned long flags;
432*4882a593Smuzhiyun 	u8 cssid;
433*4882a593Smuzhiyun 	u8 ssid;
434*4882a593Smuzhiyun 	u8 chpid;
435*4882a593Smuzhiyun 	struct dasd_conf_data *conf_data;
436*4882a593Smuzhiyun 	atomic_t error_count;
437*4882a593Smuzhiyun 	unsigned long errorclk;
438*4882a593Smuzhiyun };
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun struct dasd_profile_info {
442*4882a593Smuzhiyun 	/* legacy part of profile data, as in dasd_profile_info_t */
443*4882a593Smuzhiyun 	unsigned int dasd_io_reqs;	 /* number of requests processed */
444*4882a593Smuzhiyun 	unsigned int dasd_io_sects;	 /* number of sectors processed */
445*4882a593Smuzhiyun 	unsigned int dasd_io_secs[32];	 /* histogram of request's sizes */
446*4882a593Smuzhiyun 	unsigned int dasd_io_times[32];	 /* histogram of requests's times */
447*4882a593Smuzhiyun 	unsigned int dasd_io_timps[32];	 /* h. of requests's times per sector */
448*4882a593Smuzhiyun 	unsigned int dasd_io_time1[32];	 /* hist. of time from build to start */
449*4882a593Smuzhiyun 	unsigned int dasd_io_time2[32];	 /* hist. of time from start to irq */
450*4882a593Smuzhiyun 	unsigned int dasd_io_time2ps[32]; /* hist. of time from start to irq */
451*4882a593Smuzhiyun 	unsigned int dasd_io_time3[32];	 /* hist. of time from irq to end */
452*4882a593Smuzhiyun 	unsigned int dasd_io_nr_req[32]; /* hist. of # of requests in chanq */
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	/* new data */
455*4882a593Smuzhiyun 	struct timespec64 starttod;	   /* time of start or last reset */
456*4882a593Smuzhiyun 	unsigned int dasd_io_alias;	   /* requests using an alias */
457*4882a593Smuzhiyun 	unsigned int dasd_io_tpm;	   /* requests using transport mode */
458*4882a593Smuzhiyun 	unsigned int dasd_read_reqs;	   /* total number of read  requests */
459*4882a593Smuzhiyun 	unsigned int dasd_read_sects;	   /* total number read sectors */
460*4882a593Smuzhiyun 	unsigned int dasd_read_alias;	   /* read request using an alias */
461*4882a593Smuzhiyun 	unsigned int dasd_read_tpm;	   /* read requests in transport mode */
462*4882a593Smuzhiyun 	unsigned int dasd_read_secs[32];   /* histogram of request's sizes */
463*4882a593Smuzhiyun 	unsigned int dasd_read_times[32];  /* histogram of requests's times */
464*4882a593Smuzhiyun 	unsigned int dasd_read_time1[32];  /* hist. time from build to start */
465*4882a593Smuzhiyun 	unsigned int dasd_read_time2[32];  /* hist. of time from start to irq */
466*4882a593Smuzhiyun 	unsigned int dasd_read_time3[32];  /* hist. of time from irq to end */
467*4882a593Smuzhiyun 	unsigned int dasd_read_nr_req[32]; /* hist. of # of requests in chanq */
468*4882a593Smuzhiyun 	unsigned long dasd_sum_times;	   /* sum of request times */
469*4882a593Smuzhiyun 	unsigned long dasd_sum_time_str;   /* sum of time from build to start */
470*4882a593Smuzhiyun 	unsigned long dasd_sum_time_irq;   /* sum of time from start to irq */
471*4882a593Smuzhiyun 	unsigned long dasd_sum_time_end;   /* sum of time from irq to end */
472*4882a593Smuzhiyun };
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun struct dasd_profile {
475*4882a593Smuzhiyun 	struct dentry *dentry;
476*4882a593Smuzhiyun 	struct dasd_profile_info *data;
477*4882a593Smuzhiyun 	spinlock_t lock;
478*4882a593Smuzhiyun };
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun struct dasd_format_entry {
481*4882a593Smuzhiyun 	struct list_head list;
482*4882a593Smuzhiyun 	sector_t track;
483*4882a593Smuzhiyun };
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun struct dasd_device {
486*4882a593Smuzhiyun 	/* Block device stuff. */
487*4882a593Smuzhiyun 	struct dasd_block *block;
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun         unsigned int devindex;
490*4882a593Smuzhiyun 	unsigned long flags;	   /* per device flags */
491*4882a593Smuzhiyun 	unsigned short features;   /* copy of devmap-features (read-only!) */
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	/* extended error reporting stuff (eer) */
494*4882a593Smuzhiyun 	struct dasd_ccw_req *eer_cqr;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	/* Device discipline stuff. */
497*4882a593Smuzhiyun 	struct dasd_discipline *discipline;
498*4882a593Smuzhiyun 	struct dasd_discipline *base_discipline;
499*4882a593Smuzhiyun 	void *private;
500*4882a593Smuzhiyun 	struct dasd_path path[8];
501*4882a593Smuzhiyun 	__u8 opm;
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	/* Device state and target state. */
504*4882a593Smuzhiyun 	int state, target;
505*4882a593Smuzhiyun 	struct mutex state_mutex;
506*4882a593Smuzhiyun 	int stopped;		/* device (ccw_device_start) was stopped */
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	/* reference count. */
509*4882a593Smuzhiyun         atomic_t ref_count;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	/* ccw queue and memory for static ccw/erp buffers. */
512*4882a593Smuzhiyun 	struct list_head ccw_queue;
513*4882a593Smuzhiyun 	spinlock_t mem_lock;
514*4882a593Smuzhiyun 	void *ccw_mem;
515*4882a593Smuzhiyun 	void *erp_mem;
516*4882a593Smuzhiyun 	void *ese_mem;
517*4882a593Smuzhiyun 	struct list_head ccw_chunks;
518*4882a593Smuzhiyun 	struct list_head erp_chunks;
519*4882a593Smuzhiyun 	struct list_head ese_chunks;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	atomic_t tasklet_scheduled;
522*4882a593Smuzhiyun         struct tasklet_struct tasklet;
523*4882a593Smuzhiyun 	struct work_struct kick_work;
524*4882a593Smuzhiyun 	struct work_struct restore_device;
525*4882a593Smuzhiyun 	struct work_struct reload_device;
526*4882a593Smuzhiyun 	struct work_struct kick_validate;
527*4882a593Smuzhiyun 	struct work_struct suc_work;
528*4882a593Smuzhiyun 	struct work_struct requeue_requests;
529*4882a593Smuzhiyun 	struct timer_list timer;
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	debug_info_t *debug_area;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	struct ccw_device *cdev;
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	/* hook for alias management */
536*4882a593Smuzhiyun 	struct list_head alias_list;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	/* default expiration time in s */
539*4882a593Smuzhiyun 	unsigned long default_expires;
540*4882a593Smuzhiyun 	unsigned long default_retries;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	unsigned long blk_timeout;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	unsigned long path_thrhld;
545*4882a593Smuzhiyun 	unsigned long path_interval;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	struct dentry *debugfs_dentry;
548*4882a593Smuzhiyun 	struct dentry *hosts_dentry;
549*4882a593Smuzhiyun 	struct dasd_profile profile;
550*4882a593Smuzhiyun 	struct dasd_format_entry format_entry;
551*4882a593Smuzhiyun };
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun struct dasd_block {
554*4882a593Smuzhiyun 	/* Block device stuff. */
555*4882a593Smuzhiyun 	struct gendisk *gdp;
556*4882a593Smuzhiyun 	struct request_queue *request_queue;
557*4882a593Smuzhiyun 	spinlock_t request_queue_lock;
558*4882a593Smuzhiyun 	struct blk_mq_tag_set tag_set;
559*4882a593Smuzhiyun 	struct block_device *bdev;
560*4882a593Smuzhiyun 	atomic_t open_count;
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	unsigned long blocks;	   /* size of volume in blocks */
563*4882a593Smuzhiyun 	unsigned int bp_block;	   /* bytes per block */
564*4882a593Smuzhiyun 	unsigned int s2b_shift;	   /* log2 (bp_block/512) */
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	struct dasd_device *base;
567*4882a593Smuzhiyun 	struct list_head ccw_queue;
568*4882a593Smuzhiyun 	spinlock_t queue_lock;
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	atomic_t tasklet_scheduled;
571*4882a593Smuzhiyun 	struct tasklet_struct tasklet;
572*4882a593Smuzhiyun 	struct timer_list timer;
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	struct dentry *debugfs_dentry;
575*4882a593Smuzhiyun 	struct dasd_profile profile;
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	struct list_head format_list;
578*4882a593Smuzhiyun 	spinlock_t format_lock;
579*4882a593Smuzhiyun 	atomic_t trkcount;
580*4882a593Smuzhiyun };
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun struct dasd_attention_data {
583*4882a593Smuzhiyun 	struct dasd_device *device;
584*4882a593Smuzhiyun 	__u8 lpum;
585*4882a593Smuzhiyun };
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun struct dasd_queue {
588*4882a593Smuzhiyun 	spinlock_t lock;
589*4882a593Smuzhiyun };
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun /* reasons why device (ccw_device_start) was stopped */
592*4882a593Smuzhiyun #define DASD_STOPPED_NOT_ACC 1         /* not accessible */
593*4882a593Smuzhiyun #define DASD_STOPPED_QUIESCE 2         /* Quiesced */
594*4882a593Smuzhiyun #define DASD_STOPPED_PENDING 4         /* long busy */
595*4882a593Smuzhiyun #define DASD_STOPPED_DC_WAIT 8         /* disconnected, wait */
596*4882a593Smuzhiyun #define DASD_STOPPED_SU      16        /* summary unit check handling */
597*4882a593Smuzhiyun #define DASD_STOPPED_PM      32        /* pm state transition */
598*4882a593Smuzhiyun #define DASD_UNRESUMED_PM    64        /* pm resume failed state */
599*4882a593Smuzhiyun #define DASD_STOPPED_NOSPC   128       /* no space left */
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun /* per device flags */
602*4882a593Smuzhiyun #define DASD_FLAG_OFFLINE	3	/* device is in offline processing */
603*4882a593Smuzhiyun #define DASD_FLAG_EER_SNSS	4	/* A SNSS is required */
604*4882a593Smuzhiyun #define DASD_FLAG_EER_IN_USE	5	/* A SNSS request is running */
605*4882a593Smuzhiyun #define DASD_FLAG_DEVICE_RO	6	/* The device itself is read-only. Don't
606*4882a593Smuzhiyun 					 * confuse this with the user specified
607*4882a593Smuzhiyun 					 * read-only feature.
608*4882a593Smuzhiyun 					 */
609*4882a593Smuzhiyun #define DASD_FLAG_IS_RESERVED	7	/* The device is reserved */
610*4882a593Smuzhiyun #define DASD_FLAG_LOCK_STOLEN	8	/* The device lock was stolen */
611*4882a593Smuzhiyun #define DASD_FLAG_SUSPENDED	9	/* The device was suspended */
612*4882a593Smuzhiyun #define DASD_FLAG_SAFE_OFFLINE	10	/* safe offline processing requested*/
613*4882a593Smuzhiyun #define DASD_FLAG_SAFE_OFFLINE_RUNNING	11	/* safe offline running */
614*4882a593Smuzhiyun #define DASD_FLAG_ABORTALL	12	/* Abort all noretry requests */
615*4882a593Smuzhiyun #define DASD_FLAG_PATH_VERIFY	13	/* Path verification worker running */
616*4882a593Smuzhiyun #define DASD_FLAG_SUC		14	/* unhandled summary unit check */
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun #define DASD_SLEEPON_START_TAG	((void *) 1)
619*4882a593Smuzhiyun #define DASD_SLEEPON_END_TAG	((void *) 2)
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun void dasd_put_device_wake(struct dasd_device *);
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun /*
624*4882a593Smuzhiyun  * Reference count inliners
625*4882a593Smuzhiyun  */
626*4882a593Smuzhiyun static inline void
dasd_get_device(struct dasd_device * device)627*4882a593Smuzhiyun dasd_get_device(struct dasd_device *device)
628*4882a593Smuzhiyun {
629*4882a593Smuzhiyun 	atomic_inc(&device->ref_count);
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun static inline void
dasd_put_device(struct dasd_device * device)633*4882a593Smuzhiyun dasd_put_device(struct dasd_device *device)
634*4882a593Smuzhiyun {
635*4882a593Smuzhiyun 	if (atomic_dec_return(&device->ref_count) == 0)
636*4882a593Smuzhiyun 		dasd_put_device_wake(device);
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun /*
640*4882a593Smuzhiyun  * The static memory in ccw_mem and erp_mem is managed by a sorted
641*4882a593Smuzhiyun  * list of free memory chunks.
642*4882a593Smuzhiyun  */
643*4882a593Smuzhiyun struct dasd_mchunk
644*4882a593Smuzhiyun {
645*4882a593Smuzhiyun 	struct list_head list;
646*4882a593Smuzhiyun 	unsigned long size;
647*4882a593Smuzhiyun } __attribute__ ((aligned(8)));
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun static inline void
dasd_init_chunklist(struct list_head * chunk_list,void * mem,unsigned long size)650*4882a593Smuzhiyun dasd_init_chunklist(struct list_head *chunk_list, void *mem,
651*4882a593Smuzhiyun 		    unsigned long size)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun 	struct dasd_mchunk *chunk;
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	INIT_LIST_HEAD(chunk_list);
656*4882a593Smuzhiyun 	chunk = (struct dasd_mchunk *) mem;
657*4882a593Smuzhiyun 	chunk->size = size - sizeof(struct dasd_mchunk);
658*4882a593Smuzhiyun 	list_add(&chunk->list, chunk_list);
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun static inline void *
dasd_alloc_chunk(struct list_head * chunk_list,unsigned long size)662*4882a593Smuzhiyun dasd_alloc_chunk(struct list_head *chunk_list, unsigned long size)
663*4882a593Smuzhiyun {
664*4882a593Smuzhiyun 	struct dasd_mchunk *chunk, *tmp;
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	size = (size + 7L) & -8L;
667*4882a593Smuzhiyun 	list_for_each_entry(chunk, chunk_list, list) {
668*4882a593Smuzhiyun 		if (chunk->size < size)
669*4882a593Smuzhiyun 			continue;
670*4882a593Smuzhiyun 		if (chunk->size > size + sizeof(struct dasd_mchunk)) {
671*4882a593Smuzhiyun 			char *endaddr = (char *) (chunk + 1) + chunk->size;
672*4882a593Smuzhiyun 			tmp = (struct dasd_mchunk *) (endaddr - size) - 1;
673*4882a593Smuzhiyun 			tmp->size = size;
674*4882a593Smuzhiyun 			chunk->size -= size + sizeof(struct dasd_mchunk);
675*4882a593Smuzhiyun 			chunk = tmp;
676*4882a593Smuzhiyun 		} else
677*4882a593Smuzhiyun 			list_del(&chunk->list);
678*4882a593Smuzhiyun 		return (void *) (chunk + 1);
679*4882a593Smuzhiyun 	}
680*4882a593Smuzhiyun 	return NULL;
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun static inline void
dasd_free_chunk(struct list_head * chunk_list,void * mem)684*4882a593Smuzhiyun dasd_free_chunk(struct list_head *chunk_list, void *mem)
685*4882a593Smuzhiyun {
686*4882a593Smuzhiyun 	struct dasd_mchunk *chunk, *tmp;
687*4882a593Smuzhiyun 	struct list_head *p, *left;
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	chunk = (struct dasd_mchunk *)
690*4882a593Smuzhiyun 		((char *) mem - sizeof(struct dasd_mchunk));
691*4882a593Smuzhiyun 	/* Find out the left neighbour in chunk_list. */
692*4882a593Smuzhiyun 	left = chunk_list;
693*4882a593Smuzhiyun 	list_for_each(p, chunk_list) {
694*4882a593Smuzhiyun 		if (list_entry(p, struct dasd_mchunk, list) > chunk)
695*4882a593Smuzhiyun 			break;
696*4882a593Smuzhiyun 		left = p;
697*4882a593Smuzhiyun 	}
698*4882a593Smuzhiyun 	/* Try to merge with right neighbour = next element from left. */
699*4882a593Smuzhiyun 	if (left->next != chunk_list) {
700*4882a593Smuzhiyun 		tmp = list_entry(left->next, struct dasd_mchunk, list);
701*4882a593Smuzhiyun 		if ((char *) (chunk + 1) + chunk->size == (char *) tmp) {
702*4882a593Smuzhiyun 			list_del(&tmp->list);
703*4882a593Smuzhiyun 			chunk->size += tmp->size + sizeof(struct dasd_mchunk);
704*4882a593Smuzhiyun 		}
705*4882a593Smuzhiyun 	}
706*4882a593Smuzhiyun 	/* Try to merge with left neighbour. */
707*4882a593Smuzhiyun 	if (left != chunk_list) {
708*4882a593Smuzhiyun 		tmp = list_entry(left, struct dasd_mchunk, list);
709*4882a593Smuzhiyun 		if ((char *) (tmp + 1) + tmp->size == (char *) chunk) {
710*4882a593Smuzhiyun 			tmp->size += chunk->size + sizeof(struct dasd_mchunk);
711*4882a593Smuzhiyun 			return;
712*4882a593Smuzhiyun 		}
713*4882a593Smuzhiyun 	}
714*4882a593Smuzhiyun 	__list_add(&chunk->list, left, left->next);
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun /*
718*4882a593Smuzhiyun  * Check if bsize is in { 512, 1024, 2048, 4096 }
719*4882a593Smuzhiyun  */
720*4882a593Smuzhiyun static inline int
dasd_check_blocksize(int bsize)721*4882a593Smuzhiyun dasd_check_blocksize(int bsize)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun 	if (bsize < 512 || bsize > 4096 || !is_power_of_2(bsize))
724*4882a593Smuzhiyun 		return -EMEDIUMTYPE;
725*4882a593Smuzhiyun 	return 0;
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun /*
729*4882a593Smuzhiyun  * return the callback data of the original request in case there are
730*4882a593Smuzhiyun  * ERP requests build on top of it
731*4882a593Smuzhiyun  */
dasd_get_callback_data(struct dasd_ccw_req * cqr)732*4882a593Smuzhiyun static inline void *dasd_get_callback_data(struct dasd_ccw_req *cqr)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun 	while (cqr->refers)
735*4882a593Smuzhiyun 		cqr = cqr->refers;
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	return cqr->callback_data;
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun /* externals in dasd.c */
741*4882a593Smuzhiyun #define DASD_PROFILE_OFF	 0
742*4882a593Smuzhiyun #define DASD_PROFILE_ON 	 1
743*4882a593Smuzhiyun #define DASD_PROFILE_GLOBAL_ONLY 2
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun extern debug_info_t *dasd_debug_area;
746*4882a593Smuzhiyun extern struct dasd_profile dasd_global_profile;
747*4882a593Smuzhiyun extern unsigned int dasd_global_profile_level;
748*4882a593Smuzhiyun extern const struct block_device_operations dasd_device_operations;
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun extern struct kmem_cache *dasd_page_cache;
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun struct dasd_ccw_req *
753*4882a593Smuzhiyun dasd_smalloc_request(int, int, int, struct dasd_device *, struct dasd_ccw_req *);
754*4882a593Smuzhiyun struct dasd_ccw_req *dasd_fmalloc_request(int, int, int, struct dasd_device *);
755*4882a593Smuzhiyun void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
756*4882a593Smuzhiyun void dasd_ffree_request(struct dasd_ccw_req *, struct dasd_device *);
757*4882a593Smuzhiyun void dasd_wakeup_cb(struct dasd_ccw_req *, void *);
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun struct dasd_device *dasd_alloc_device(void);
760*4882a593Smuzhiyun void dasd_free_device(struct dasd_device *);
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun struct dasd_block *dasd_alloc_block(void);
763*4882a593Smuzhiyun void dasd_free_block(struct dasd_block *);
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved);
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun void dasd_enable_device(struct dasd_device *);
768*4882a593Smuzhiyun void dasd_set_target_state(struct dasd_device *, int);
769*4882a593Smuzhiyun void dasd_kick_device(struct dasd_device *);
770*4882a593Smuzhiyun void dasd_restore_device(struct dasd_device *);
771*4882a593Smuzhiyun void dasd_reload_device(struct dasd_device *);
772*4882a593Smuzhiyun void dasd_schedule_requeue(struct dasd_device *);
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun void dasd_add_request_head(struct dasd_ccw_req *);
775*4882a593Smuzhiyun void dasd_add_request_tail(struct dasd_ccw_req *);
776*4882a593Smuzhiyun int  dasd_start_IO(struct dasd_ccw_req *);
777*4882a593Smuzhiyun int  dasd_term_IO(struct dasd_ccw_req *);
778*4882a593Smuzhiyun void dasd_schedule_device_bh(struct dasd_device *);
779*4882a593Smuzhiyun void dasd_schedule_block_bh(struct dasd_block *);
780*4882a593Smuzhiyun int  dasd_sleep_on(struct dasd_ccw_req *);
781*4882a593Smuzhiyun int  dasd_sleep_on_queue(struct list_head *);
782*4882a593Smuzhiyun int  dasd_sleep_on_immediatly(struct dasd_ccw_req *);
783*4882a593Smuzhiyun int  dasd_sleep_on_queue_interruptible(struct list_head *);
784*4882a593Smuzhiyun int  dasd_sleep_on_interruptible(struct dasd_ccw_req *);
785*4882a593Smuzhiyun void dasd_device_set_timer(struct dasd_device *, int);
786*4882a593Smuzhiyun void dasd_device_clear_timer(struct dasd_device *);
787*4882a593Smuzhiyun void dasd_block_set_timer(struct dasd_block *, int);
788*4882a593Smuzhiyun void dasd_block_clear_timer(struct dasd_block *);
789*4882a593Smuzhiyun int  dasd_cancel_req(struct dasd_ccw_req *);
790*4882a593Smuzhiyun int dasd_flush_device_queue(struct dasd_device *);
791*4882a593Smuzhiyun int dasd_generic_probe (struct ccw_device *, struct dasd_discipline *);
792*4882a593Smuzhiyun void dasd_generic_free_discipline(struct dasd_device *);
793*4882a593Smuzhiyun void dasd_generic_remove (struct ccw_device *cdev);
794*4882a593Smuzhiyun int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
795*4882a593Smuzhiyun int dasd_generic_set_offline (struct ccw_device *cdev);
796*4882a593Smuzhiyun int dasd_generic_notify(struct ccw_device *, int);
797*4882a593Smuzhiyun int dasd_generic_last_path_gone(struct dasd_device *);
798*4882a593Smuzhiyun int dasd_generic_path_operational(struct dasd_device *);
799*4882a593Smuzhiyun void dasd_generic_shutdown(struct ccw_device *);
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun void dasd_generic_handle_state_change(struct dasd_device *);
802*4882a593Smuzhiyun int dasd_generic_pm_freeze(struct ccw_device *);
803*4882a593Smuzhiyun int dasd_generic_restore_device(struct ccw_device *);
804*4882a593Smuzhiyun enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *);
805*4882a593Smuzhiyun void dasd_generic_path_event(struct ccw_device *, int *);
806*4882a593Smuzhiyun int dasd_generic_verify_path(struct dasd_device *, __u8);
807*4882a593Smuzhiyun void dasd_generic_space_exhaust(struct dasd_device *, struct dasd_ccw_req *);
808*4882a593Smuzhiyun void dasd_generic_space_avail(struct dasd_device *);
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
811*4882a593Smuzhiyun char *dasd_get_sense(struct irb *);
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun void dasd_device_set_stop_bits(struct dasd_device *, int);
814*4882a593Smuzhiyun void dasd_device_remove_stop_bits(struct dasd_device *, int);
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun int dasd_device_is_ro(struct dasd_device *);
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun void dasd_profile_reset(struct dasd_profile *);
819*4882a593Smuzhiyun int dasd_profile_on(struct dasd_profile *);
820*4882a593Smuzhiyun void dasd_profile_off(struct dasd_profile *);
821*4882a593Smuzhiyun char *dasd_get_user_string(const char __user *, size_t);
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun /* externals in dasd_devmap.c */
824*4882a593Smuzhiyun extern int dasd_max_devindex;
825*4882a593Smuzhiyun extern int dasd_probeonly;
826*4882a593Smuzhiyun extern int dasd_autodetect;
827*4882a593Smuzhiyun extern int dasd_nopav;
828*4882a593Smuzhiyun extern int dasd_nofcx;
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun int dasd_devmap_init(void);
831*4882a593Smuzhiyun void dasd_devmap_exit(void);
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun struct dasd_device *dasd_create_device(struct ccw_device *);
834*4882a593Smuzhiyun void dasd_delete_device(struct dasd_device *);
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun int dasd_get_feature(struct ccw_device *, int);
837*4882a593Smuzhiyun int dasd_set_feature(struct ccw_device *, int, int);
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun int dasd_add_sysfs_files(struct ccw_device *);
840*4882a593Smuzhiyun void dasd_remove_sysfs_files(struct ccw_device *);
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun struct dasd_device *dasd_device_from_cdev(struct ccw_device *);
843*4882a593Smuzhiyun struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *);
844*4882a593Smuzhiyun struct dasd_device *dasd_device_from_devindex(int);
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun void dasd_add_link_to_gendisk(struct gendisk *, struct dasd_device *);
847*4882a593Smuzhiyun struct dasd_device *dasd_device_from_gendisk(struct gendisk *);
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun int dasd_parse(void) __init;
850*4882a593Smuzhiyun int dasd_busid_known(const char *);
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun /* externals in dasd_gendisk.c */
853*4882a593Smuzhiyun int  dasd_gendisk_init(void);
854*4882a593Smuzhiyun void dasd_gendisk_exit(void);
855*4882a593Smuzhiyun int dasd_gendisk_alloc(struct dasd_block *);
856*4882a593Smuzhiyun void dasd_gendisk_free(struct dasd_block *);
857*4882a593Smuzhiyun int dasd_scan_partitions(struct dasd_block *);
858*4882a593Smuzhiyun void dasd_destroy_partitions(struct dasd_block *);
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun /* externals in dasd_ioctl.c */
861*4882a593Smuzhiyun int  dasd_ioctl(struct block_device *, fmode_t, unsigned int, unsigned long);
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun /* externals in dasd_proc.c */
864*4882a593Smuzhiyun int dasd_proc_init(void);
865*4882a593Smuzhiyun void dasd_proc_exit(void);
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun /* externals in dasd_erp.c */
868*4882a593Smuzhiyun struct dasd_ccw_req *dasd_default_erp_action(struct dasd_ccw_req *);
869*4882a593Smuzhiyun struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *);
870*4882a593Smuzhiyun struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int,
871*4882a593Smuzhiyun 					    struct dasd_device *);
872*4882a593Smuzhiyun void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *);
873*4882a593Smuzhiyun void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
874*4882a593Smuzhiyun void dasd_log_sense_dbf(struct dasd_ccw_req *cqr, struct irb *irb);
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun /* externals in dasd_3990_erp.c */
877*4882a593Smuzhiyun struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *);
878*4882a593Smuzhiyun void dasd_3990_erp_handle_sim(struct dasd_device *, char *);
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun /* externals in dasd_eer.c */
881*4882a593Smuzhiyun #ifdef CONFIG_DASD_EER
882*4882a593Smuzhiyun int dasd_eer_init(void);
883*4882a593Smuzhiyun void dasd_eer_exit(void);
884*4882a593Smuzhiyun int dasd_eer_enable(struct dasd_device *);
885*4882a593Smuzhiyun void dasd_eer_disable(struct dasd_device *);
886*4882a593Smuzhiyun void dasd_eer_write(struct dasd_device *, struct dasd_ccw_req *cqr,
887*4882a593Smuzhiyun 		    unsigned int id);
888*4882a593Smuzhiyun void dasd_eer_snss(struct dasd_device *);
889*4882a593Smuzhiyun 
dasd_eer_enabled(struct dasd_device * device)890*4882a593Smuzhiyun static inline int dasd_eer_enabled(struct dasd_device *device)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun 	return device->eer_cqr != NULL;
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun #else
895*4882a593Smuzhiyun #define dasd_eer_init()		(0)
896*4882a593Smuzhiyun #define dasd_eer_exit()		do { } while (0)
897*4882a593Smuzhiyun #define dasd_eer_enable(d)	(0)
898*4882a593Smuzhiyun #define dasd_eer_disable(d)	do { } while (0)
899*4882a593Smuzhiyun #define dasd_eer_write(d,c,i)	do { } while (0)
900*4882a593Smuzhiyun #define dasd_eer_snss(d)	do { } while (0)
901*4882a593Smuzhiyun #define dasd_eer_enabled(d)	(0)
902*4882a593Smuzhiyun #endif	/* CONFIG_DASD_ERR */
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun /* DASD path handling functions */
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun /*
908*4882a593Smuzhiyun  * helper functions to modify bit masks for a given channel path for a device
909*4882a593Smuzhiyun  */
dasd_path_is_operational(struct dasd_device * device,int chp)910*4882a593Smuzhiyun static inline int dasd_path_is_operational(struct dasd_device *device, int chp)
911*4882a593Smuzhiyun {
912*4882a593Smuzhiyun 	return test_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun 
dasd_path_need_verify(struct dasd_device * device,int chp)915*4882a593Smuzhiyun static inline int dasd_path_need_verify(struct dasd_device *device, int chp)
916*4882a593Smuzhiyun {
917*4882a593Smuzhiyun 	return test_bit(DASD_PATH_TBV, &device->path[chp].flags);
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun 
dasd_path_verify(struct dasd_device * device,int chp)920*4882a593Smuzhiyun static inline void dasd_path_verify(struct dasd_device *device, int chp)
921*4882a593Smuzhiyun {
922*4882a593Smuzhiyun 	__set_bit(DASD_PATH_TBV, &device->path[chp].flags);
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun 
dasd_path_clear_verify(struct dasd_device * device,int chp)925*4882a593Smuzhiyun static inline void dasd_path_clear_verify(struct dasd_device *device, int chp)
926*4882a593Smuzhiyun {
927*4882a593Smuzhiyun 	__clear_bit(DASD_PATH_TBV, &device->path[chp].flags);
928*4882a593Smuzhiyun }
929*4882a593Smuzhiyun 
dasd_path_clear_all_verify(struct dasd_device * device)930*4882a593Smuzhiyun static inline void dasd_path_clear_all_verify(struct dasd_device *device)
931*4882a593Smuzhiyun {
932*4882a593Smuzhiyun 	int chp;
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun 	for (chp = 0; chp < 8; chp++)
935*4882a593Smuzhiyun 		dasd_path_clear_verify(device, chp);
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun 
dasd_path_operational(struct dasd_device * device,int chp)938*4882a593Smuzhiyun static inline void dasd_path_operational(struct dasd_device *device, int chp)
939*4882a593Smuzhiyun {
940*4882a593Smuzhiyun 	__set_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
941*4882a593Smuzhiyun 	device->opm |= (0x80 >> chp);
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun 
dasd_path_nonpreferred(struct dasd_device * device,int chp)944*4882a593Smuzhiyun static inline void dasd_path_nonpreferred(struct dasd_device *device, int chp)
945*4882a593Smuzhiyun {
946*4882a593Smuzhiyun 	__set_bit(DASD_PATH_NPP, &device->path[chp].flags);
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun 
dasd_path_is_nonpreferred(struct dasd_device * device,int chp)949*4882a593Smuzhiyun static inline int dasd_path_is_nonpreferred(struct dasd_device *device, int chp)
950*4882a593Smuzhiyun {
951*4882a593Smuzhiyun 	return test_bit(DASD_PATH_NPP, &device->path[chp].flags);
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun 
dasd_path_clear_nonpreferred(struct dasd_device * device,int chp)954*4882a593Smuzhiyun static inline void dasd_path_clear_nonpreferred(struct dasd_device *device,
955*4882a593Smuzhiyun 						int chp)
956*4882a593Smuzhiyun {
957*4882a593Smuzhiyun 	__clear_bit(DASD_PATH_NPP, &device->path[chp].flags);
958*4882a593Smuzhiyun }
959*4882a593Smuzhiyun 
dasd_path_preferred(struct dasd_device * device,int chp)960*4882a593Smuzhiyun static inline void dasd_path_preferred(struct dasd_device *device, int chp)
961*4882a593Smuzhiyun {
962*4882a593Smuzhiyun 	__set_bit(DASD_PATH_PP, &device->path[chp].flags);
963*4882a593Smuzhiyun }
964*4882a593Smuzhiyun 
dasd_path_is_preferred(struct dasd_device * device,int chp)965*4882a593Smuzhiyun static inline int dasd_path_is_preferred(struct dasd_device *device, int chp)
966*4882a593Smuzhiyun {
967*4882a593Smuzhiyun 	return test_bit(DASD_PATH_PP, &device->path[chp].flags);
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun 
dasd_path_clear_preferred(struct dasd_device * device,int chp)970*4882a593Smuzhiyun static inline void dasd_path_clear_preferred(struct dasd_device *device,
971*4882a593Smuzhiyun 					     int chp)
972*4882a593Smuzhiyun {
973*4882a593Smuzhiyun 	__clear_bit(DASD_PATH_PP, &device->path[chp].flags);
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun 
dasd_path_clear_oper(struct dasd_device * device,int chp)976*4882a593Smuzhiyun static inline void dasd_path_clear_oper(struct dasd_device *device, int chp)
977*4882a593Smuzhiyun {
978*4882a593Smuzhiyun 	__clear_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
979*4882a593Smuzhiyun 	device->opm &= ~(0x80 >> chp);
980*4882a593Smuzhiyun }
981*4882a593Smuzhiyun 
dasd_path_clear_cable(struct dasd_device * device,int chp)982*4882a593Smuzhiyun static inline void dasd_path_clear_cable(struct dasd_device *device, int chp)
983*4882a593Smuzhiyun {
984*4882a593Smuzhiyun 	__clear_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun 
dasd_path_cuir(struct dasd_device * device,int chp)987*4882a593Smuzhiyun static inline void dasd_path_cuir(struct dasd_device *device, int chp)
988*4882a593Smuzhiyun {
989*4882a593Smuzhiyun 	__set_bit(DASD_PATH_CUIR, &device->path[chp].flags);
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun 
dasd_path_is_cuir(struct dasd_device * device,int chp)992*4882a593Smuzhiyun static inline int dasd_path_is_cuir(struct dasd_device *device, int chp)
993*4882a593Smuzhiyun {
994*4882a593Smuzhiyun 	return test_bit(DASD_PATH_CUIR, &device->path[chp].flags);
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun 
dasd_path_clear_cuir(struct dasd_device * device,int chp)997*4882a593Smuzhiyun static inline void dasd_path_clear_cuir(struct dasd_device *device, int chp)
998*4882a593Smuzhiyun {
999*4882a593Smuzhiyun 	__clear_bit(DASD_PATH_CUIR, &device->path[chp].flags);
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun 
dasd_path_ifcc(struct dasd_device * device,int chp)1002*4882a593Smuzhiyun static inline void dasd_path_ifcc(struct dasd_device *device, int chp)
1003*4882a593Smuzhiyun {
1004*4882a593Smuzhiyun 	set_bit(DASD_PATH_IFCC, &device->path[chp].flags);
1005*4882a593Smuzhiyun }
1006*4882a593Smuzhiyun 
dasd_path_is_ifcc(struct dasd_device * device,int chp)1007*4882a593Smuzhiyun static inline int dasd_path_is_ifcc(struct dasd_device *device, int chp)
1008*4882a593Smuzhiyun {
1009*4882a593Smuzhiyun 	return test_bit(DASD_PATH_IFCC, &device->path[chp].flags);
1010*4882a593Smuzhiyun }
1011*4882a593Smuzhiyun 
dasd_path_clear_ifcc(struct dasd_device * device,int chp)1012*4882a593Smuzhiyun static inline void dasd_path_clear_ifcc(struct dasd_device *device, int chp)
1013*4882a593Smuzhiyun {
1014*4882a593Smuzhiyun 	clear_bit(DASD_PATH_IFCC, &device->path[chp].flags);
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun 
dasd_path_clear_nohpf(struct dasd_device * device,int chp)1017*4882a593Smuzhiyun static inline void dasd_path_clear_nohpf(struct dasd_device *device, int chp)
1018*4882a593Smuzhiyun {
1019*4882a593Smuzhiyun 	__clear_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
1020*4882a593Smuzhiyun }
1021*4882a593Smuzhiyun 
dasd_path_miscabled(struct dasd_device * device,int chp)1022*4882a593Smuzhiyun static inline void dasd_path_miscabled(struct dasd_device *device, int chp)
1023*4882a593Smuzhiyun {
1024*4882a593Smuzhiyun 	__set_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun 
dasd_path_is_miscabled(struct dasd_device * device,int chp)1027*4882a593Smuzhiyun static inline int dasd_path_is_miscabled(struct dasd_device *device, int chp)
1028*4882a593Smuzhiyun {
1029*4882a593Smuzhiyun 	return test_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
1030*4882a593Smuzhiyun }
1031*4882a593Smuzhiyun 
dasd_path_nohpf(struct dasd_device * device,int chp)1032*4882a593Smuzhiyun static inline void dasd_path_nohpf(struct dasd_device *device, int chp)
1033*4882a593Smuzhiyun {
1034*4882a593Smuzhiyun 	__set_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun 
dasd_path_is_nohpf(struct dasd_device * device,int chp)1037*4882a593Smuzhiyun static inline int dasd_path_is_nohpf(struct dasd_device *device, int chp)
1038*4882a593Smuzhiyun {
1039*4882a593Smuzhiyun 	return test_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
1040*4882a593Smuzhiyun }
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun /*
1043*4882a593Smuzhiyun  * get functions for path masks
1044*4882a593Smuzhiyun  * will return a path masks for the given device
1045*4882a593Smuzhiyun  */
1046*4882a593Smuzhiyun 
dasd_path_get_opm(struct dasd_device * device)1047*4882a593Smuzhiyun static inline __u8 dasd_path_get_opm(struct dasd_device *device)
1048*4882a593Smuzhiyun {
1049*4882a593Smuzhiyun 	return device->opm;
1050*4882a593Smuzhiyun }
1051*4882a593Smuzhiyun 
dasd_path_get_tbvpm(struct dasd_device * device)1052*4882a593Smuzhiyun static inline __u8 dasd_path_get_tbvpm(struct dasd_device *device)
1053*4882a593Smuzhiyun {
1054*4882a593Smuzhiyun 	int chp;
1055*4882a593Smuzhiyun 	__u8 tbvpm = 0x00;
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 	for (chp = 0; chp < 8; chp++)
1058*4882a593Smuzhiyun 		if (dasd_path_need_verify(device, chp))
1059*4882a593Smuzhiyun 			tbvpm |= 0x80 >> chp;
1060*4882a593Smuzhiyun 	return tbvpm;
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun 
dasd_path_get_nppm(struct dasd_device * device)1063*4882a593Smuzhiyun static inline __u8 dasd_path_get_nppm(struct dasd_device *device)
1064*4882a593Smuzhiyun {
1065*4882a593Smuzhiyun 	int chp;
1066*4882a593Smuzhiyun 	__u8 npm = 0x00;
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun 	for (chp = 0; chp < 8; chp++) {
1069*4882a593Smuzhiyun 		if (dasd_path_is_nonpreferred(device, chp))
1070*4882a593Smuzhiyun 			npm |= 0x80 >> chp;
1071*4882a593Smuzhiyun 	}
1072*4882a593Smuzhiyun 	return npm;
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun 
dasd_path_get_ppm(struct dasd_device * device)1075*4882a593Smuzhiyun static inline __u8 dasd_path_get_ppm(struct dasd_device *device)
1076*4882a593Smuzhiyun {
1077*4882a593Smuzhiyun 	int chp;
1078*4882a593Smuzhiyun 	__u8 ppm = 0x00;
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 	for (chp = 0; chp < 8; chp++)
1081*4882a593Smuzhiyun 		if (dasd_path_is_preferred(device, chp))
1082*4882a593Smuzhiyun 			ppm |= 0x80 >> chp;
1083*4882a593Smuzhiyun 	return ppm;
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun 
dasd_path_get_cablepm(struct dasd_device * device)1086*4882a593Smuzhiyun static inline __u8 dasd_path_get_cablepm(struct dasd_device *device)
1087*4882a593Smuzhiyun {
1088*4882a593Smuzhiyun 	int chp;
1089*4882a593Smuzhiyun 	__u8 cablepm = 0x00;
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 	for (chp = 0; chp < 8; chp++)
1092*4882a593Smuzhiyun 		if (dasd_path_is_miscabled(device, chp))
1093*4882a593Smuzhiyun 			cablepm |= 0x80 >> chp;
1094*4882a593Smuzhiyun 	return cablepm;
1095*4882a593Smuzhiyun }
1096*4882a593Smuzhiyun 
dasd_path_get_cuirpm(struct dasd_device * device)1097*4882a593Smuzhiyun static inline __u8 dasd_path_get_cuirpm(struct dasd_device *device)
1098*4882a593Smuzhiyun {
1099*4882a593Smuzhiyun 	int chp;
1100*4882a593Smuzhiyun 	__u8 cuirpm = 0x00;
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 	for (chp = 0; chp < 8; chp++)
1103*4882a593Smuzhiyun 		if (dasd_path_is_cuir(device, chp))
1104*4882a593Smuzhiyun 			cuirpm |= 0x80 >> chp;
1105*4882a593Smuzhiyun 	return cuirpm;
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun 
dasd_path_get_ifccpm(struct dasd_device * device)1108*4882a593Smuzhiyun static inline __u8 dasd_path_get_ifccpm(struct dasd_device *device)
1109*4882a593Smuzhiyun {
1110*4882a593Smuzhiyun 	int chp;
1111*4882a593Smuzhiyun 	__u8 ifccpm = 0x00;
1112*4882a593Smuzhiyun 
1113*4882a593Smuzhiyun 	for (chp = 0; chp < 8; chp++)
1114*4882a593Smuzhiyun 		if (dasd_path_is_ifcc(device, chp))
1115*4882a593Smuzhiyun 			ifccpm |= 0x80 >> chp;
1116*4882a593Smuzhiyun 	return ifccpm;
1117*4882a593Smuzhiyun }
1118*4882a593Smuzhiyun 
dasd_path_get_hpfpm(struct dasd_device * device)1119*4882a593Smuzhiyun static inline __u8 dasd_path_get_hpfpm(struct dasd_device *device)
1120*4882a593Smuzhiyun {
1121*4882a593Smuzhiyun 	int chp;
1122*4882a593Smuzhiyun 	__u8 hpfpm = 0x00;
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun 	for (chp = 0; chp < 8; chp++)
1125*4882a593Smuzhiyun 		if (dasd_path_is_nohpf(device, chp))
1126*4882a593Smuzhiyun 			hpfpm |= 0x80 >> chp;
1127*4882a593Smuzhiyun 	return hpfpm;
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun /*
1131*4882a593Smuzhiyun  * add functions for path masks
1132*4882a593Smuzhiyun  * the existing path mask will be extended by the given path mask
1133*4882a593Smuzhiyun  */
dasd_path_add_tbvpm(struct dasd_device * device,__u8 pm)1134*4882a593Smuzhiyun static inline void dasd_path_add_tbvpm(struct dasd_device *device, __u8 pm)
1135*4882a593Smuzhiyun {
1136*4882a593Smuzhiyun 	int chp;
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun 	for (chp = 0; chp < 8; chp++)
1139*4882a593Smuzhiyun 		if (pm & (0x80 >> chp))
1140*4882a593Smuzhiyun 			dasd_path_verify(device, chp);
1141*4882a593Smuzhiyun }
1142*4882a593Smuzhiyun 
dasd_path_get_notoperpm(struct dasd_device * device)1143*4882a593Smuzhiyun static inline __u8 dasd_path_get_notoperpm(struct dasd_device *device)
1144*4882a593Smuzhiyun {
1145*4882a593Smuzhiyun 	int chp;
1146*4882a593Smuzhiyun 	__u8 nopm = 0x00;
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 	for (chp = 0; chp < 8; chp++)
1149*4882a593Smuzhiyun 		if (dasd_path_is_nohpf(device, chp) ||
1150*4882a593Smuzhiyun 		    dasd_path_is_ifcc(device, chp) ||
1151*4882a593Smuzhiyun 		    dasd_path_is_cuir(device, chp) ||
1152*4882a593Smuzhiyun 		    dasd_path_is_miscabled(device, chp))
1153*4882a593Smuzhiyun 			nopm |= 0x80 >> chp;
1154*4882a593Smuzhiyun 	return nopm;
1155*4882a593Smuzhiyun }
1156*4882a593Smuzhiyun 
dasd_path_add_opm(struct dasd_device * device,__u8 pm)1157*4882a593Smuzhiyun static inline void dasd_path_add_opm(struct dasd_device *device, __u8 pm)
1158*4882a593Smuzhiyun {
1159*4882a593Smuzhiyun 	int chp;
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun 	for (chp = 0; chp < 8; chp++)
1162*4882a593Smuzhiyun 		if (pm & (0x80 >> chp)) {
1163*4882a593Smuzhiyun 			dasd_path_operational(device, chp);
1164*4882a593Smuzhiyun 			/*
1165*4882a593Smuzhiyun 			 * if the path is used
1166*4882a593Smuzhiyun 			 * it should not be in one of the negative lists
1167*4882a593Smuzhiyun 			 */
1168*4882a593Smuzhiyun 			dasd_path_clear_nohpf(device, chp);
1169*4882a593Smuzhiyun 			dasd_path_clear_cuir(device, chp);
1170*4882a593Smuzhiyun 			dasd_path_clear_cable(device, chp);
1171*4882a593Smuzhiyun 			dasd_path_clear_ifcc(device, chp);
1172*4882a593Smuzhiyun 		}
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun 
dasd_path_add_cablepm(struct dasd_device * device,__u8 pm)1175*4882a593Smuzhiyun static inline void dasd_path_add_cablepm(struct dasd_device *device, __u8 pm)
1176*4882a593Smuzhiyun {
1177*4882a593Smuzhiyun 	int chp;
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun 	for (chp = 0; chp < 8; chp++)
1180*4882a593Smuzhiyun 		if (pm & (0x80 >> chp))
1181*4882a593Smuzhiyun 			dasd_path_miscabled(device, chp);
1182*4882a593Smuzhiyun }
1183*4882a593Smuzhiyun 
dasd_path_add_cuirpm(struct dasd_device * device,__u8 pm)1184*4882a593Smuzhiyun static inline void dasd_path_add_cuirpm(struct dasd_device *device, __u8 pm)
1185*4882a593Smuzhiyun {
1186*4882a593Smuzhiyun 	int chp;
1187*4882a593Smuzhiyun 
1188*4882a593Smuzhiyun 	for (chp = 0; chp < 8; chp++)
1189*4882a593Smuzhiyun 		if (pm & (0x80 >> chp))
1190*4882a593Smuzhiyun 			dasd_path_cuir(device, chp);
1191*4882a593Smuzhiyun }
1192*4882a593Smuzhiyun 
dasd_path_add_ifccpm(struct dasd_device * device,__u8 pm)1193*4882a593Smuzhiyun static inline void dasd_path_add_ifccpm(struct dasd_device *device, __u8 pm)
1194*4882a593Smuzhiyun {
1195*4882a593Smuzhiyun 	int chp;
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 	for (chp = 0; chp < 8; chp++)
1198*4882a593Smuzhiyun 		if (pm & (0x80 >> chp))
1199*4882a593Smuzhiyun 			dasd_path_ifcc(device, chp);
1200*4882a593Smuzhiyun }
1201*4882a593Smuzhiyun 
dasd_path_add_nppm(struct dasd_device * device,__u8 pm)1202*4882a593Smuzhiyun static inline void dasd_path_add_nppm(struct dasd_device *device, __u8 pm)
1203*4882a593Smuzhiyun {
1204*4882a593Smuzhiyun 	int chp;
1205*4882a593Smuzhiyun 
1206*4882a593Smuzhiyun 	for (chp = 0; chp < 8; chp++)
1207*4882a593Smuzhiyun 		if (pm & (0x80 >> chp))
1208*4882a593Smuzhiyun 			dasd_path_nonpreferred(device, chp);
1209*4882a593Smuzhiyun }
1210*4882a593Smuzhiyun 
dasd_path_add_nohpfpm(struct dasd_device * device,__u8 pm)1211*4882a593Smuzhiyun static inline void dasd_path_add_nohpfpm(struct dasd_device *device, __u8 pm)
1212*4882a593Smuzhiyun {
1213*4882a593Smuzhiyun 	int chp;
1214*4882a593Smuzhiyun 
1215*4882a593Smuzhiyun 	for (chp = 0; chp < 8; chp++)
1216*4882a593Smuzhiyun 		if (pm & (0x80 >> chp))
1217*4882a593Smuzhiyun 			dasd_path_nohpf(device, chp);
1218*4882a593Smuzhiyun }
1219*4882a593Smuzhiyun 
dasd_path_add_ppm(struct dasd_device * device,__u8 pm)1220*4882a593Smuzhiyun static inline void dasd_path_add_ppm(struct dasd_device *device, __u8 pm)
1221*4882a593Smuzhiyun {
1222*4882a593Smuzhiyun 	int chp;
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun 	for (chp = 0; chp < 8; chp++)
1225*4882a593Smuzhiyun 		if (pm & (0x80 >> chp))
1226*4882a593Smuzhiyun 			dasd_path_preferred(device, chp);
1227*4882a593Smuzhiyun }
1228*4882a593Smuzhiyun 
1229*4882a593Smuzhiyun /*
1230*4882a593Smuzhiyun  * set functions for path masks
1231*4882a593Smuzhiyun  * the existing path mask will be replaced by the given path mask
1232*4882a593Smuzhiyun  */
dasd_path_set_tbvpm(struct dasd_device * device,__u8 pm)1233*4882a593Smuzhiyun static inline void dasd_path_set_tbvpm(struct dasd_device *device, __u8 pm)
1234*4882a593Smuzhiyun {
1235*4882a593Smuzhiyun 	int chp;
1236*4882a593Smuzhiyun 
1237*4882a593Smuzhiyun 	for (chp = 0; chp < 8; chp++)
1238*4882a593Smuzhiyun 		if (pm & (0x80 >> chp))
1239*4882a593Smuzhiyun 			dasd_path_verify(device, chp);
1240*4882a593Smuzhiyun 		else
1241*4882a593Smuzhiyun 			dasd_path_clear_verify(device, chp);
1242*4882a593Smuzhiyun }
1243*4882a593Smuzhiyun 
dasd_path_set_opm(struct dasd_device * device,__u8 pm)1244*4882a593Smuzhiyun static inline void dasd_path_set_opm(struct dasd_device *device, __u8 pm)
1245*4882a593Smuzhiyun {
1246*4882a593Smuzhiyun 	int chp;
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun 	for (chp = 0; chp < 8; chp++) {
1249*4882a593Smuzhiyun 		dasd_path_clear_oper(device, chp);
1250*4882a593Smuzhiyun 		if (pm & (0x80 >> chp)) {
1251*4882a593Smuzhiyun 			dasd_path_operational(device, chp);
1252*4882a593Smuzhiyun 			/*
1253*4882a593Smuzhiyun 			 * if the path is used
1254*4882a593Smuzhiyun 			 * it should not be in one of the negative lists
1255*4882a593Smuzhiyun 			 */
1256*4882a593Smuzhiyun 			dasd_path_clear_nohpf(device, chp);
1257*4882a593Smuzhiyun 			dasd_path_clear_cuir(device, chp);
1258*4882a593Smuzhiyun 			dasd_path_clear_cable(device, chp);
1259*4882a593Smuzhiyun 			dasd_path_clear_ifcc(device, chp);
1260*4882a593Smuzhiyun 		}
1261*4882a593Smuzhiyun 	}
1262*4882a593Smuzhiyun }
1263*4882a593Smuzhiyun 
1264*4882a593Smuzhiyun /*
1265*4882a593Smuzhiyun  * remove functions for path masks
1266*4882a593Smuzhiyun  * the existing path mask will be cleared with the given path mask
1267*4882a593Smuzhiyun  */
dasd_path_remove_opm(struct dasd_device * device,__u8 pm)1268*4882a593Smuzhiyun static inline void dasd_path_remove_opm(struct dasd_device *device, __u8 pm)
1269*4882a593Smuzhiyun {
1270*4882a593Smuzhiyun 	int chp;
1271*4882a593Smuzhiyun 
1272*4882a593Smuzhiyun 	for (chp = 0; chp < 8; chp++) {
1273*4882a593Smuzhiyun 		if (pm & (0x80 >> chp))
1274*4882a593Smuzhiyun 			dasd_path_clear_oper(device, chp);
1275*4882a593Smuzhiyun 	}
1276*4882a593Smuzhiyun }
1277*4882a593Smuzhiyun 
1278*4882a593Smuzhiyun /*
1279*4882a593Smuzhiyun  * add the newly available path to the to be verified pm and remove it from
1280*4882a593Smuzhiyun  * normal operation until it is verified
1281*4882a593Smuzhiyun  */
dasd_path_available(struct dasd_device * device,int chp)1282*4882a593Smuzhiyun static inline void dasd_path_available(struct dasd_device *device, int chp)
1283*4882a593Smuzhiyun {
1284*4882a593Smuzhiyun 	dasd_path_clear_oper(device, chp);
1285*4882a593Smuzhiyun 	dasd_path_verify(device, chp);
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun 
dasd_path_notoper(struct dasd_device * device,int chp)1288*4882a593Smuzhiyun static inline void dasd_path_notoper(struct dasd_device *device, int chp)
1289*4882a593Smuzhiyun {
1290*4882a593Smuzhiyun 	dasd_path_clear_oper(device, chp);
1291*4882a593Smuzhiyun 	dasd_path_clear_preferred(device, chp);
1292*4882a593Smuzhiyun 	dasd_path_clear_nonpreferred(device, chp);
1293*4882a593Smuzhiyun }
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun /*
1296*4882a593Smuzhiyun  * remove all paths from normal operation
1297*4882a593Smuzhiyun  */
dasd_path_no_path(struct dasd_device * device)1298*4882a593Smuzhiyun static inline void dasd_path_no_path(struct dasd_device *device)
1299*4882a593Smuzhiyun {
1300*4882a593Smuzhiyun 	int chp;
1301*4882a593Smuzhiyun 
1302*4882a593Smuzhiyun 	for (chp = 0; chp < 8; chp++)
1303*4882a593Smuzhiyun 		dasd_path_notoper(device, chp);
1304*4882a593Smuzhiyun 
1305*4882a593Smuzhiyun 	dasd_path_clear_all_verify(device);
1306*4882a593Smuzhiyun }
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun /* end - path handling */
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun #endif				/* DASD_H */
1311