xref: /OK3568_Linux_fs/kernel/drivers/s390/char/vmlogrdr.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *	character device driver for reading z/VM system service records
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  *	Copyright IBM Corp. 2004, 2009
7*4882a593Smuzhiyun  *	character device driver for reading z/VM system service records,
8*4882a593Smuzhiyun  *	Version 1.0
9*4882a593Smuzhiyun  *	Author(s): Xenia Tkatschow <xenia@us.ibm.com>
10*4882a593Smuzhiyun  *		   Stefan Weinhuber <wein@de.ibm.com>
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  */
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #define KMSG_COMPONENT "vmlogrdr"
15*4882a593Smuzhiyun #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <linux/module.h>
18*4882a593Smuzhiyun #include <linux/init.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <linux/errno.h>
21*4882a593Smuzhiyun #include <linux/types.h>
22*4882a593Smuzhiyun #include <linux/interrupt.h>
23*4882a593Smuzhiyun #include <linux/spinlock.h>
24*4882a593Smuzhiyun #include <linux/atomic.h>
25*4882a593Smuzhiyun #include <linux/uaccess.h>
26*4882a593Smuzhiyun #include <asm/cpcmd.h>
27*4882a593Smuzhiyun #include <asm/debug.h>
28*4882a593Smuzhiyun #include <asm/ebcdic.h>
29*4882a593Smuzhiyun #include <net/iucv/iucv.h>
30*4882a593Smuzhiyun #include <linux/kmod.h>
31*4882a593Smuzhiyun #include <linux/cdev.h>
32*4882a593Smuzhiyun #include <linux/device.h>
33*4882a593Smuzhiyun #include <linux/string.h>
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun MODULE_AUTHOR
36*4882a593Smuzhiyun 	("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
37*4882a593Smuzhiyun 	 "                            Stefan Weinhuber (wein@de.ibm.com)");
38*4882a593Smuzhiyun MODULE_DESCRIPTION ("Character device driver for reading z/VM "
39*4882a593Smuzhiyun 		    "system service records.");
40*4882a593Smuzhiyun MODULE_LICENSE("GPL");
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /*
44*4882a593Smuzhiyun  * The size of the buffer for iucv data transfer is one page,
45*4882a593Smuzhiyun  * but in addition to the data we read from iucv we also
46*4882a593Smuzhiyun  * place an integer and some characters into that buffer,
47*4882a593Smuzhiyun  * so the maximum size for record data is a little less then
48*4882a593Smuzhiyun  * one page.
49*4882a593Smuzhiyun  */
50*4882a593Smuzhiyun #define NET_BUFFER_SIZE	(PAGE_SIZE - sizeof(int) - sizeof(FENCE))
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun  * The elements that are concurrently accessed by bottom halves are
54*4882a593Smuzhiyun  * connection_established, iucv_path_severed, local_interrupt_buffer
55*4882a593Smuzhiyun  * and receive_ready. The first three can be protected by
56*4882a593Smuzhiyun  * priv_lock.  receive_ready is atomic, so it can be incremented and
57*4882a593Smuzhiyun  * decremented without holding a lock.
58*4882a593Smuzhiyun  * The variable dev_in_use needs to be protected by the lock, since
59*4882a593Smuzhiyun  * it's a flag used by open to make sure that the device is opened only
60*4882a593Smuzhiyun  * by one user at the same time.
61*4882a593Smuzhiyun  */
62*4882a593Smuzhiyun struct vmlogrdr_priv_t {
63*4882a593Smuzhiyun 	char system_service[8];
64*4882a593Smuzhiyun 	char internal_name[8];
65*4882a593Smuzhiyun 	char recording_name[8];
66*4882a593Smuzhiyun 	struct iucv_path *path;
67*4882a593Smuzhiyun 	int connection_established;
68*4882a593Smuzhiyun 	int iucv_path_severed;
69*4882a593Smuzhiyun 	struct iucv_message local_interrupt_buffer;
70*4882a593Smuzhiyun 	atomic_t receive_ready;
71*4882a593Smuzhiyun 	int minor_num;
72*4882a593Smuzhiyun 	char * buffer;
73*4882a593Smuzhiyun 	char * current_position;
74*4882a593Smuzhiyun 	int remaining;
75*4882a593Smuzhiyun 	ulong residual_length;
76*4882a593Smuzhiyun 	int buffer_free;
77*4882a593Smuzhiyun 	int dev_in_use; /* 1: already opened, 0: not opened*/
78*4882a593Smuzhiyun 	spinlock_t priv_lock;
79*4882a593Smuzhiyun 	struct device  *device;
80*4882a593Smuzhiyun 	struct device  *class_device;
81*4882a593Smuzhiyun 	int autorecording;
82*4882a593Smuzhiyun 	int autopurge;
83*4882a593Smuzhiyun };
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun  * File operation structure for vmlogrdr devices
88*4882a593Smuzhiyun  */
89*4882a593Smuzhiyun static int vmlogrdr_open(struct inode *, struct file *);
90*4882a593Smuzhiyun static int vmlogrdr_release(struct inode *, struct file *);
91*4882a593Smuzhiyun static ssize_t vmlogrdr_read (struct file *filp, char __user *data,
92*4882a593Smuzhiyun 			      size_t count, loff_t * ppos);
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun static const struct file_operations vmlogrdr_fops = {
95*4882a593Smuzhiyun 	.owner   = THIS_MODULE,
96*4882a593Smuzhiyun 	.open    = vmlogrdr_open,
97*4882a593Smuzhiyun 	.release = vmlogrdr_release,
98*4882a593Smuzhiyun 	.read    = vmlogrdr_read,
99*4882a593Smuzhiyun 	.llseek  = no_llseek,
100*4882a593Smuzhiyun };
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 *ipuser);
104*4882a593Smuzhiyun static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 *ipuser);
105*4882a593Smuzhiyun static void vmlogrdr_iucv_message_pending(struct iucv_path *,
106*4882a593Smuzhiyun 					  struct iucv_message *);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun static struct iucv_handler vmlogrdr_iucv_handler = {
110*4882a593Smuzhiyun 	.path_complete	 = vmlogrdr_iucv_path_complete,
111*4882a593Smuzhiyun 	.path_severed	 = vmlogrdr_iucv_path_severed,
112*4882a593Smuzhiyun 	.message_pending = vmlogrdr_iucv_message_pending,
113*4882a593Smuzhiyun };
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
117*4882a593Smuzhiyun static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun /*
120*4882a593Smuzhiyun  * pointer to system service private structure
121*4882a593Smuzhiyun  * minor number 0 --> logrec
122*4882a593Smuzhiyun  * minor number 1 --> account
123*4882a593Smuzhiyun  * minor number 2 --> symptom
124*4882a593Smuzhiyun  */
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun static struct vmlogrdr_priv_t sys_ser[] = {
127*4882a593Smuzhiyun 	{ .system_service = "*LOGREC ",
128*4882a593Smuzhiyun 	  .internal_name  = "logrec",
129*4882a593Smuzhiyun 	  .recording_name = "EREP",
130*4882a593Smuzhiyun 	  .minor_num      = 0,
131*4882a593Smuzhiyun 	  .buffer_free    = 1,
132*4882a593Smuzhiyun 	  .priv_lock	  = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
133*4882a593Smuzhiyun 	  .autorecording  = 1,
134*4882a593Smuzhiyun 	  .autopurge      = 1,
135*4882a593Smuzhiyun 	},
136*4882a593Smuzhiyun 	{ .system_service = "*ACCOUNT",
137*4882a593Smuzhiyun 	  .internal_name  = "account",
138*4882a593Smuzhiyun 	  .recording_name = "ACCOUNT",
139*4882a593Smuzhiyun 	  .minor_num      = 1,
140*4882a593Smuzhiyun 	  .buffer_free    = 1,
141*4882a593Smuzhiyun 	  .priv_lock	  = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
142*4882a593Smuzhiyun 	  .autorecording  = 1,
143*4882a593Smuzhiyun 	  .autopurge      = 1,
144*4882a593Smuzhiyun 	},
145*4882a593Smuzhiyun 	{ .system_service = "*SYMPTOM",
146*4882a593Smuzhiyun 	  .internal_name  = "symptom",
147*4882a593Smuzhiyun 	  .recording_name = "SYMPTOM",
148*4882a593Smuzhiyun 	  .minor_num      = 2,
149*4882a593Smuzhiyun 	  .buffer_free    = 1,
150*4882a593Smuzhiyun 	  .priv_lock	  = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
151*4882a593Smuzhiyun 	  .autorecording  = 1,
152*4882a593Smuzhiyun 	  .autopurge      = 1,
153*4882a593Smuzhiyun 	}
154*4882a593Smuzhiyun };
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun #define MAXMINOR  ARRAY_SIZE(sys_ser)
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun static char FENCE[] = {"EOR"};
159*4882a593Smuzhiyun static int vmlogrdr_major = 0;
160*4882a593Smuzhiyun static struct cdev  *vmlogrdr_cdev = NULL;
161*4882a593Smuzhiyun static int recording_class_AB;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 
vmlogrdr_iucv_path_complete(struct iucv_path * path,u8 * ipuser)164*4882a593Smuzhiyun static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 *ipuser)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	struct vmlogrdr_priv_t * logptr = path->private;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	spin_lock(&logptr->priv_lock);
169*4882a593Smuzhiyun 	logptr->connection_established = 1;
170*4882a593Smuzhiyun 	spin_unlock(&logptr->priv_lock);
171*4882a593Smuzhiyun 	wake_up(&conn_wait_queue);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 
vmlogrdr_iucv_path_severed(struct iucv_path * path,u8 * ipuser)175*4882a593Smuzhiyun static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 *ipuser)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun 	struct vmlogrdr_priv_t * logptr = path->private;
178*4882a593Smuzhiyun 	u8 reason = (u8) ipuser[8];
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	pr_err("vmlogrdr: connection severed with reason %i\n", reason);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	iucv_path_sever(path, NULL);
183*4882a593Smuzhiyun 	kfree(path);
184*4882a593Smuzhiyun 	logptr->path = NULL;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	spin_lock(&logptr->priv_lock);
187*4882a593Smuzhiyun 	logptr->connection_established = 0;
188*4882a593Smuzhiyun 	logptr->iucv_path_severed = 1;
189*4882a593Smuzhiyun 	spin_unlock(&logptr->priv_lock);
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	wake_up(&conn_wait_queue);
192*4882a593Smuzhiyun 	/* just in case we're sleeping waiting for a record */
193*4882a593Smuzhiyun 	wake_up_interruptible(&read_wait_queue);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 
vmlogrdr_iucv_message_pending(struct iucv_path * path,struct iucv_message * msg)197*4882a593Smuzhiyun static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
198*4882a593Smuzhiyun 					  struct iucv_message *msg)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	struct vmlogrdr_priv_t * logptr = path->private;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	/*
203*4882a593Smuzhiyun 	 * This function is the bottom half so it should be quick.
204*4882a593Smuzhiyun 	 * Copy the external interrupt data into our local eib and increment
205*4882a593Smuzhiyun 	 * the usage count
206*4882a593Smuzhiyun 	 */
207*4882a593Smuzhiyun 	spin_lock(&logptr->priv_lock);
208*4882a593Smuzhiyun 	memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg));
209*4882a593Smuzhiyun 	atomic_inc(&logptr->receive_ready);
210*4882a593Smuzhiyun 	spin_unlock(&logptr->priv_lock);
211*4882a593Smuzhiyun 	wake_up_interruptible(&read_wait_queue);
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 
vmlogrdr_get_recording_class_AB(void)215*4882a593Smuzhiyun static int vmlogrdr_get_recording_class_AB(void)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun 	static const char cp_command[] = "QUERY COMMAND RECORDING ";
218*4882a593Smuzhiyun 	char cp_response[80];
219*4882a593Smuzhiyun 	char *tail;
220*4882a593Smuzhiyun 	int len,i;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
223*4882a593Smuzhiyun 	len = strnlen(cp_response,sizeof(cp_response));
224*4882a593Smuzhiyun 	// now the parsing
225*4882a593Smuzhiyun 	tail=strnchr(cp_response,len,'=');
226*4882a593Smuzhiyun 	if (!tail)
227*4882a593Smuzhiyun 		return 0;
228*4882a593Smuzhiyun 	tail++;
229*4882a593Smuzhiyun 	if (!strncmp("ANY",tail,3))
230*4882a593Smuzhiyun 		return 1;
231*4882a593Smuzhiyun 	if (!strncmp("NONE",tail,4))
232*4882a593Smuzhiyun 		return 0;
233*4882a593Smuzhiyun 	/*
234*4882a593Smuzhiyun 	 * expect comma separated list of classes here, if one of them
235*4882a593Smuzhiyun 	 * is A or B return 1 otherwise 0
236*4882a593Smuzhiyun 	 */
237*4882a593Smuzhiyun         for (i=tail-cp_response; i<len; i++)
238*4882a593Smuzhiyun 		if ( cp_response[i]=='A' || cp_response[i]=='B' )
239*4882a593Smuzhiyun 			return 1;
240*4882a593Smuzhiyun 	return 0;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 
vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,int action,int purge)244*4882a593Smuzhiyun static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
245*4882a593Smuzhiyun 			      int action, int purge)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	char cp_command[80];
249*4882a593Smuzhiyun 	char cp_response[160];
250*4882a593Smuzhiyun 	char *onoff, *qid_string;
251*4882a593Smuzhiyun 	int rc;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	onoff = ((action == 1) ? "ON" : "OFF");
254*4882a593Smuzhiyun 	qid_string = ((recording_class_AB == 1) ? " QID * " : "");
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	/*
257*4882a593Smuzhiyun 	 * The recording commands needs to be called with option QID
258*4882a593Smuzhiyun 	 * for guests that have previlege classes A or B.
259*4882a593Smuzhiyun 	 * Purging has to be done as separate step, because recording
260*4882a593Smuzhiyun 	 * can't be switched on as long as records are on the queue.
261*4882a593Smuzhiyun 	 * Doing both at the same time doesn't work.
262*4882a593Smuzhiyun 	 */
263*4882a593Smuzhiyun 	if (purge && (action == 1)) {
264*4882a593Smuzhiyun 		memset(cp_command, 0x00, sizeof(cp_command));
265*4882a593Smuzhiyun 		memset(cp_response, 0x00, sizeof(cp_response));
266*4882a593Smuzhiyun 		snprintf(cp_command, sizeof(cp_command),
267*4882a593Smuzhiyun 			 "RECORDING %s PURGE %s",
268*4882a593Smuzhiyun 			 logptr->recording_name,
269*4882a593Smuzhiyun 			 qid_string);
270*4882a593Smuzhiyun 		cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
271*4882a593Smuzhiyun 	}
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	memset(cp_command, 0x00, sizeof(cp_command));
274*4882a593Smuzhiyun 	memset(cp_response, 0x00, sizeof(cp_response));
275*4882a593Smuzhiyun 	snprintf(cp_command, sizeof(cp_command), "RECORDING %s %s %s",
276*4882a593Smuzhiyun 		logptr->recording_name,
277*4882a593Smuzhiyun 		onoff,
278*4882a593Smuzhiyun 		qid_string);
279*4882a593Smuzhiyun 	cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
280*4882a593Smuzhiyun 	/* The recording command will usually answer with 'Command complete'
281*4882a593Smuzhiyun 	 * on success, but when the specific service was never connected
282*4882a593Smuzhiyun 	 * before then there might be an additional informational message
283*4882a593Smuzhiyun 	 * 'HCPCRC8072I Recording entry not found' before the
284*4882a593Smuzhiyun 	 * 'Command complete'. So I use strstr rather then the strncmp.
285*4882a593Smuzhiyun 	 */
286*4882a593Smuzhiyun 	if (strstr(cp_response,"Command complete"))
287*4882a593Smuzhiyun 		rc = 0;
288*4882a593Smuzhiyun 	else
289*4882a593Smuzhiyun 		rc = -EIO;
290*4882a593Smuzhiyun 	/*
291*4882a593Smuzhiyun 	 * If we turn recording off, we have to purge any remaining records
292*4882a593Smuzhiyun 	 * afterwards, as a large number of queued records may impact z/VM
293*4882a593Smuzhiyun 	 * performance.
294*4882a593Smuzhiyun 	 */
295*4882a593Smuzhiyun 	if (purge && (action == 0)) {
296*4882a593Smuzhiyun 		memset(cp_command, 0x00, sizeof(cp_command));
297*4882a593Smuzhiyun 		memset(cp_response, 0x00, sizeof(cp_response));
298*4882a593Smuzhiyun 		snprintf(cp_command, sizeof(cp_command),
299*4882a593Smuzhiyun 			 "RECORDING %s PURGE %s",
300*4882a593Smuzhiyun 			 logptr->recording_name,
301*4882a593Smuzhiyun 			 qid_string);
302*4882a593Smuzhiyun 		cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
303*4882a593Smuzhiyun 	}
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	return rc;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 
vmlogrdr_open(struct inode * inode,struct file * filp)309*4882a593Smuzhiyun static int vmlogrdr_open (struct inode *inode, struct file *filp)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun 	int dev_num = 0;
312*4882a593Smuzhiyun 	struct vmlogrdr_priv_t * logptr = NULL;
313*4882a593Smuzhiyun 	int connect_rc = 0;
314*4882a593Smuzhiyun 	int ret;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	dev_num = iminor(inode);
317*4882a593Smuzhiyun 	if (dev_num >= MAXMINOR)
318*4882a593Smuzhiyun 		return -ENODEV;
319*4882a593Smuzhiyun 	logptr = &sys_ser[dev_num];
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	/*
322*4882a593Smuzhiyun 	 * only allow for blocking reads to be open
323*4882a593Smuzhiyun 	 */
324*4882a593Smuzhiyun 	if (filp->f_flags & O_NONBLOCK)
325*4882a593Smuzhiyun 		return -EOPNOTSUPP;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	/* Besure this device hasn't already been opened */
328*4882a593Smuzhiyun 	spin_lock_bh(&logptr->priv_lock);
329*4882a593Smuzhiyun 	if (logptr->dev_in_use)	{
330*4882a593Smuzhiyun 		spin_unlock_bh(&logptr->priv_lock);
331*4882a593Smuzhiyun 		return -EBUSY;
332*4882a593Smuzhiyun 	}
333*4882a593Smuzhiyun 	logptr->dev_in_use = 1;
334*4882a593Smuzhiyun 	logptr->connection_established = 0;
335*4882a593Smuzhiyun 	logptr->iucv_path_severed = 0;
336*4882a593Smuzhiyun 	atomic_set(&logptr->receive_ready, 0);
337*4882a593Smuzhiyun 	logptr->buffer_free = 1;
338*4882a593Smuzhiyun 	spin_unlock_bh(&logptr->priv_lock);
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	/* set the file options */
341*4882a593Smuzhiyun 	filp->private_data = logptr;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	/* start recording for this service*/
344*4882a593Smuzhiyun 	if (logptr->autorecording) {
345*4882a593Smuzhiyun 		ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
346*4882a593Smuzhiyun 		if (ret)
347*4882a593Smuzhiyun 			pr_warn("vmlogrdr: failed to start recording automatically\n");
348*4882a593Smuzhiyun 	}
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	/* create connection to the system service */
351*4882a593Smuzhiyun 	logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL);
352*4882a593Smuzhiyun 	if (!logptr->path)
353*4882a593Smuzhiyun 		goto out_dev;
354*4882a593Smuzhiyun 	connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler,
355*4882a593Smuzhiyun 				       logptr->system_service, NULL, NULL,
356*4882a593Smuzhiyun 				       logptr);
357*4882a593Smuzhiyun 	if (connect_rc) {
358*4882a593Smuzhiyun 		pr_err("vmlogrdr: iucv connection to %s "
359*4882a593Smuzhiyun 		       "failed with rc %i \n",
360*4882a593Smuzhiyun 		       logptr->system_service, connect_rc);
361*4882a593Smuzhiyun 		goto out_path;
362*4882a593Smuzhiyun 	}
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	/* We've issued the connect and now we must wait for a
365*4882a593Smuzhiyun 	 * ConnectionComplete or ConnectinSevered Interrupt
366*4882a593Smuzhiyun 	 * before we can continue to process.
367*4882a593Smuzhiyun 	 */
368*4882a593Smuzhiyun 	wait_event(conn_wait_queue, (logptr->connection_established)
369*4882a593Smuzhiyun 		   || (logptr->iucv_path_severed));
370*4882a593Smuzhiyun 	if (logptr->iucv_path_severed)
371*4882a593Smuzhiyun 		goto out_record;
372*4882a593Smuzhiyun 	nonseekable_open(inode, filp);
373*4882a593Smuzhiyun 	return 0;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun out_record:
376*4882a593Smuzhiyun 	if (logptr->autorecording)
377*4882a593Smuzhiyun 		vmlogrdr_recording(logptr,0,logptr->autopurge);
378*4882a593Smuzhiyun out_path:
379*4882a593Smuzhiyun 	kfree(logptr->path);	/* kfree(NULL) is ok. */
380*4882a593Smuzhiyun 	logptr->path = NULL;
381*4882a593Smuzhiyun out_dev:
382*4882a593Smuzhiyun 	logptr->dev_in_use = 0;
383*4882a593Smuzhiyun 	return -EIO;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 
vmlogrdr_release(struct inode * inode,struct file * filp)387*4882a593Smuzhiyun static int vmlogrdr_release (struct inode *inode, struct file *filp)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun 	int ret;
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	struct vmlogrdr_priv_t * logptr = filp->private_data;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	iucv_path_sever(logptr->path, NULL);
394*4882a593Smuzhiyun 	kfree(logptr->path);
395*4882a593Smuzhiyun 	logptr->path = NULL;
396*4882a593Smuzhiyun 	if (logptr->autorecording) {
397*4882a593Smuzhiyun 		ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
398*4882a593Smuzhiyun 		if (ret)
399*4882a593Smuzhiyun 			pr_warn("vmlogrdr: failed to stop recording automatically\n");
400*4882a593Smuzhiyun 	}
401*4882a593Smuzhiyun 	logptr->dev_in_use = 0;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	return 0;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 
vmlogrdr_receive_data(struct vmlogrdr_priv_t * priv)407*4882a593Smuzhiyun static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	int rc, *temp;
410*4882a593Smuzhiyun 	/* we need to keep track of two data sizes here:
411*4882a593Smuzhiyun 	 * The number of bytes we need to receive from iucv and
412*4882a593Smuzhiyun 	 * the total number of bytes we actually write into the buffer.
413*4882a593Smuzhiyun 	 */
414*4882a593Smuzhiyun 	int user_data_count, iucv_data_count;
415*4882a593Smuzhiyun 	char * buffer;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	if (atomic_read(&priv->receive_ready)) {
418*4882a593Smuzhiyun 		spin_lock_bh(&priv->priv_lock);
419*4882a593Smuzhiyun 		if (priv->residual_length){
420*4882a593Smuzhiyun 			/* receive second half of a record */
421*4882a593Smuzhiyun 			iucv_data_count = priv->residual_length;
422*4882a593Smuzhiyun 			user_data_count = 0;
423*4882a593Smuzhiyun 			buffer = priv->buffer;
424*4882a593Smuzhiyun 		} else {
425*4882a593Smuzhiyun 			/* receive a new record:
426*4882a593Smuzhiyun 			 * We need to return the total length of the record
427*4882a593Smuzhiyun                          * + size of FENCE in the first 4 bytes of the buffer.
428*4882a593Smuzhiyun 		         */
429*4882a593Smuzhiyun 			iucv_data_count = priv->local_interrupt_buffer.length;
430*4882a593Smuzhiyun 			user_data_count = sizeof(int);
431*4882a593Smuzhiyun 			temp = (int*)priv->buffer;
432*4882a593Smuzhiyun 			*temp= iucv_data_count + sizeof(FENCE);
433*4882a593Smuzhiyun 			buffer = priv->buffer + sizeof(int);
434*4882a593Smuzhiyun 		}
435*4882a593Smuzhiyun 		/*
436*4882a593Smuzhiyun 		 * If the record is bigger than our buffer, we receive only
437*4882a593Smuzhiyun 		 * a part of it. We can get the rest later.
438*4882a593Smuzhiyun 		 */
439*4882a593Smuzhiyun 		if (iucv_data_count > NET_BUFFER_SIZE)
440*4882a593Smuzhiyun 			iucv_data_count = NET_BUFFER_SIZE;
441*4882a593Smuzhiyun 		rc = iucv_message_receive(priv->path,
442*4882a593Smuzhiyun 					  &priv->local_interrupt_buffer,
443*4882a593Smuzhiyun 					  0, buffer, iucv_data_count,
444*4882a593Smuzhiyun 					  &priv->residual_length);
445*4882a593Smuzhiyun 		spin_unlock_bh(&priv->priv_lock);
446*4882a593Smuzhiyun 		/* An rc of 5 indicates that the record was bigger than
447*4882a593Smuzhiyun 		 * the buffer, which is OK for us. A 9 indicates that the
448*4882a593Smuzhiyun 		 * record was purged befor we could receive it.
449*4882a593Smuzhiyun 		 */
450*4882a593Smuzhiyun 		if (rc == 5)
451*4882a593Smuzhiyun 			rc = 0;
452*4882a593Smuzhiyun 		if (rc == 9)
453*4882a593Smuzhiyun 			atomic_set(&priv->receive_ready, 0);
454*4882a593Smuzhiyun 	} else {
455*4882a593Smuzhiyun 		rc = 1;
456*4882a593Smuzhiyun 	}
457*4882a593Smuzhiyun 	if (!rc) {
458*4882a593Smuzhiyun 		priv->buffer_free = 0;
459*4882a593Smuzhiyun  		user_data_count += iucv_data_count;
460*4882a593Smuzhiyun 		priv->current_position = priv->buffer;
461*4882a593Smuzhiyun 		if (priv->residual_length == 0){
462*4882a593Smuzhiyun 			/* the whole record has been captured,
463*4882a593Smuzhiyun 			 * now add the fence */
464*4882a593Smuzhiyun 			atomic_dec(&priv->receive_ready);
465*4882a593Smuzhiyun 			buffer = priv->buffer + user_data_count;
466*4882a593Smuzhiyun 			memcpy(buffer, FENCE, sizeof(FENCE));
467*4882a593Smuzhiyun 			user_data_count += sizeof(FENCE);
468*4882a593Smuzhiyun 		}
469*4882a593Smuzhiyun 		priv->remaining = user_data_count;
470*4882a593Smuzhiyun 	}
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	return rc;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 
vmlogrdr_read(struct file * filp,char __user * data,size_t count,loff_t * ppos)476*4882a593Smuzhiyun static ssize_t vmlogrdr_read(struct file *filp, char __user *data,
477*4882a593Smuzhiyun 			     size_t count, loff_t * ppos)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun 	int rc;
480*4882a593Smuzhiyun 	struct vmlogrdr_priv_t * priv = filp->private_data;
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	while (priv->buffer_free) {
483*4882a593Smuzhiyun 		rc = vmlogrdr_receive_data(priv);
484*4882a593Smuzhiyun 		if (rc) {
485*4882a593Smuzhiyun 			rc = wait_event_interruptible(read_wait_queue,
486*4882a593Smuzhiyun 					atomic_read(&priv->receive_ready));
487*4882a593Smuzhiyun 			if (rc)
488*4882a593Smuzhiyun 				return rc;
489*4882a593Smuzhiyun 		}
490*4882a593Smuzhiyun 	}
491*4882a593Smuzhiyun 	/* copy only up to end of record */
492*4882a593Smuzhiyun 	if (count > priv->remaining)
493*4882a593Smuzhiyun 		count = priv->remaining;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	if (copy_to_user(data, priv->current_position, count))
496*4882a593Smuzhiyun 		return -EFAULT;
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	*ppos += count;
499*4882a593Smuzhiyun 	priv->current_position += count;
500*4882a593Smuzhiyun 	priv->remaining -= count;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	/* if all data has been transferred, set buffer free */
503*4882a593Smuzhiyun 	if (priv->remaining == 0)
504*4882a593Smuzhiyun 		priv->buffer_free = 1;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	return count;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun 
vmlogrdr_autopurge_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)509*4882a593Smuzhiyun static ssize_t vmlogrdr_autopurge_store(struct device * dev,
510*4882a593Smuzhiyun 					struct device_attribute *attr,
511*4882a593Smuzhiyun 					const char * buf, size_t count)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
514*4882a593Smuzhiyun 	ssize_t ret = count;
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	switch (buf[0]) {
517*4882a593Smuzhiyun 	case '0':
518*4882a593Smuzhiyun 		priv->autopurge=0;
519*4882a593Smuzhiyun 		break;
520*4882a593Smuzhiyun 	case '1':
521*4882a593Smuzhiyun 		priv->autopurge=1;
522*4882a593Smuzhiyun 		break;
523*4882a593Smuzhiyun 	default:
524*4882a593Smuzhiyun 		ret = -EINVAL;
525*4882a593Smuzhiyun 	}
526*4882a593Smuzhiyun 	return ret;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 
vmlogrdr_autopurge_show(struct device * dev,struct device_attribute * attr,char * buf)530*4882a593Smuzhiyun static ssize_t vmlogrdr_autopurge_show(struct device *dev,
531*4882a593Smuzhiyun 				       struct device_attribute *attr,
532*4882a593Smuzhiyun 				       char *buf)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
535*4882a593Smuzhiyun 	return sprintf(buf, "%u\n", priv->autopurge);
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
540*4882a593Smuzhiyun 		   vmlogrdr_autopurge_store);
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 
vmlogrdr_purge_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)543*4882a593Smuzhiyun static ssize_t vmlogrdr_purge_store(struct device * dev,
544*4882a593Smuzhiyun 				    struct device_attribute *attr,
545*4882a593Smuzhiyun 				    const char * buf, size_t count)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	char cp_command[80];
549*4882a593Smuzhiyun 	char cp_response[80];
550*4882a593Smuzhiyun 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	if (buf[0] != '1')
553*4882a593Smuzhiyun 		return -EINVAL;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	memset(cp_command, 0x00, sizeof(cp_command));
556*4882a593Smuzhiyun 	memset(cp_response, 0x00, sizeof(cp_response));
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun         /*
559*4882a593Smuzhiyun 	 * The recording command needs to be called with option QID
560*4882a593Smuzhiyun 	 * for guests that have previlege classes A or B.
561*4882a593Smuzhiyun 	 * Other guests will not recognize the command and we have to
562*4882a593Smuzhiyun 	 * issue the same command without the QID parameter.
563*4882a593Smuzhiyun 	 */
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	if (recording_class_AB)
566*4882a593Smuzhiyun 		snprintf(cp_command, sizeof(cp_command),
567*4882a593Smuzhiyun 			 "RECORDING %s PURGE QID * ",
568*4882a593Smuzhiyun 			 priv->recording_name);
569*4882a593Smuzhiyun 	else
570*4882a593Smuzhiyun 		snprintf(cp_command, sizeof(cp_command),
571*4882a593Smuzhiyun 			 "RECORDING %s PURGE ",
572*4882a593Smuzhiyun 			 priv->recording_name);
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	return count;
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 
vmlogrdr_autorecording_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)583*4882a593Smuzhiyun static ssize_t vmlogrdr_autorecording_store(struct device *dev,
584*4882a593Smuzhiyun 					    struct device_attribute *attr,
585*4882a593Smuzhiyun 					    const char *buf, size_t count)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
588*4882a593Smuzhiyun 	ssize_t ret = count;
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	switch (buf[0]) {
591*4882a593Smuzhiyun 	case '0':
592*4882a593Smuzhiyun 		priv->autorecording=0;
593*4882a593Smuzhiyun 		break;
594*4882a593Smuzhiyun 	case '1':
595*4882a593Smuzhiyun 		priv->autorecording=1;
596*4882a593Smuzhiyun 		break;
597*4882a593Smuzhiyun 	default:
598*4882a593Smuzhiyun 		ret = -EINVAL;
599*4882a593Smuzhiyun 	}
600*4882a593Smuzhiyun 	return ret;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 
vmlogrdr_autorecording_show(struct device * dev,struct device_attribute * attr,char * buf)604*4882a593Smuzhiyun static ssize_t vmlogrdr_autorecording_show(struct device *dev,
605*4882a593Smuzhiyun 					   struct device_attribute *attr,
606*4882a593Smuzhiyun 					   char *buf)
607*4882a593Smuzhiyun {
608*4882a593Smuzhiyun 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
609*4882a593Smuzhiyun 	return sprintf(buf, "%u\n", priv->autorecording);
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
614*4882a593Smuzhiyun 		   vmlogrdr_autorecording_store);
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 
vmlogrdr_recording_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)617*4882a593Smuzhiyun static ssize_t vmlogrdr_recording_store(struct device * dev,
618*4882a593Smuzhiyun 					struct device_attribute *attr,
619*4882a593Smuzhiyun 					const char * buf, size_t count)
620*4882a593Smuzhiyun {
621*4882a593Smuzhiyun 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
622*4882a593Smuzhiyun 	ssize_t ret;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	switch (buf[0]) {
625*4882a593Smuzhiyun 	case '0':
626*4882a593Smuzhiyun 		ret = vmlogrdr_recording(priv,0,0);
627*4882a593Smuzhiyun 		break;
628*4882a593Smuzhiyun 	case '1':
629*4882a593Smuzhiyun 		ret = vmlogrdr_recording(priv,1,0);
630*4882a593Smuzhiyun 		break;
631*4882a593Smuzhiyun 	default:
632*4882a593Smuzhiyun 		ret = -EINVAL;
633*4882a593Smuzhiyun 	}
634*4882a593Smuzhiyun 	if (ret)
635*4882a593Smuzhiyun 		return ret;
636*4882a593Smuzhiyun 	else
637*4882a593Smuzhiyun 		return count;
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 
recording_status_show(struct device_driver * driver,char * buf)645*4882a593Smuzhiyun static ssize_t recording_status_show(struct device_driver *driver, char *buf)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun 	static const char cp_command[] = "QUERY RECORDING ";
648*4882a593Smuzhiyun 	int len;
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	cpcmd(cp_command, buf, 4096, NULL);
651*4882a593Smuzhiyun 	len = strlen(buf);
652*4882a593Smuzhiyun 	return len;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun static DRIVER_ATTR_RO(recording_status);
655*4882a593Smuzhiyun static struct attribute *vmlogrdr_drv_attrs[] = {
656*4882a593Smuzhiyun 	&driver_attr_recording_status.attr,
657*4882a593Smuzhiyun 	NULL,
658*4882a593Smuzhiyun };
659*4882a593Smuzhiyun static struct attribute_group vmlogrdr_drv_attr_group = {
660*4882a593Smuzhiyun 	.attrs = vmlogrdr_drv_attrs,
661*4882a593Smuzhiyun };
662*4882a593Smuzhiyun static const struct attribute_group *vmlogrdr_drv_attr_groups[] = {
663*4882a593Smuzhiyun 	&vmlogrdr_drv_attr_group,
664*4882a593Smuzhiyun 	NULL,
665*4882a593Smuzhiyun };
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun static struct attribute *vmlogrdr_attrs[] = {
668*4882a593Smuzhiyun 	&dev_attr_autopurge.attr,
669*4882a593Smuzhiyun 	&dev_attr_purge.attr,
670*4882a593Smuzhiyun 	&dev_attr_autorecording.attr,
671*4882a593Smuzhiyun 	&dev_attr_recording.attr,
672*4882a593Smuzhiyun 	NULL,
673*4882a593Smuzhiyun };
674*4882a593Smuzhiyun static struct attribute_group vmlogrdr_attr_group = {
675*4882a593Smuzhiyun 	.attrs = vmlogrdr_attrs,
676*4882a593Smuzhiyun };
677*4882a593Smuzhiyun static const struct attribute_group *vmlogrdr_attr_groups[] = {
678*4882a593Smuzhiyun 	&vmlogrdr_attr_group,
679*4882a593Smuzhiyun 	NULL,
680*4882a593Smuzhiyun };
681*4882a593Smuzhiyun 
vmlogrdr_pm_prepare(struct device * dev)682*4882a593Smuzhiyun static int vmlogrdr_pm_prepare(struct device *dev)
683*4882a593Smuzhiyun {
684*4882a593Smuzhiyun 	int rc;
685*4882a593Smuzhiyun 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	rc = 0;
688*4882a593Smuzhiyun 	if (priv) {
689*4882a593Smuzhiyun 		spin_lock_bh(&priv->priv_lock);
690*4882a593Smuzhiyun 		if (priv->dev_in_use)
691*4882a593Smuzhiyun 			rc = -EBUSY;
692*4882a593Smuzhiyun 		spin_unlock_bh(&priv->priv_lock);
693*4882a593Smuzhiyun 	}
694*4882a593Smuzhiyun 	if (rc)
695*4882a593Smuzhiyun 		pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n",
696*4882a593Smuzhiyun 		       dev_name(dev));
697*4882a593Smuzhiyun 	return rc;
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun static const struct dev_pm_ops vmlogrdr_pm_ops = {
702*4882a593Smuzhiyun 	.prepare = vmlogrdr_pm_prepare,
703*4882a593Smuzhiyun };
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun static struct class *vmlogrdr_class;
706*4882a593Smuzhiyun static struct device_driver vmlogrdr_driver = {
707*4882a593Smuzhiyun 	.name = "vmlogrdr",
708*4882a593Smuzhiyun 	.bus  = &iucv_bus,
709*4882a593Smuzhiyun 	.pm = &vmlogrdr_pm_ops,
710*4882a593Smuzhiyun 	.groups = vmlogrdr_drv_attr_groups,
711*4882a593Smuzhiyun };
712*4882a593Smuzhiyun 
vmlogrdr_register_driver(void)713*4882a593Smuzhiyun static int vmlogrdr_register_driver(void)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun 	int ret;
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	/* Register with iucv driver */
718*4882a593Smuzhiyun 	ret = iucv_register(&vmlogrdr_iucv_handler, 1);
719*4882a593Smuzhiyun 	if (ret)
720*4882a593Smuzhiyun 		goto out;
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	ret = driver_register(&vmlogrdr_driver);
723*4882a593Smuzhiyun 	if (ret)
724*4882a593Smuzhiyun 		goto out_iucv;
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
727*4882a593Smuzhiyun 	if (IS_ERR(vmlogrdr_class)) {
728*4882a593Smuzhiyun 		ret = PTR_ERR(vmlogrdr_class);
729*4882a593Smuzhiyun 		vmlogrdr_class = NULL;
730*4882a593Smuzhiyun 		goto out_driver;
731*4882a593Smuzhiyun 	}
732*4882a593Smuzhiyun 	return 0;
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun out_driver:
735*4882a593Smuzhiyun 	driver_unregister(&vmlogrdr_driver);
736*4882a593Smuzhiyun out_iucv:
737*4882a593Smuzhiyun 	iucv_unregister(&vmlogrdr_iucv_handler, 1);
738*4882a593Smuzhiyun out:
739*4882a593Smuzhiyun 	return ret;
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 
vmlogrdr_unregister_driver(void)743*4882a593Smuzhiyun static void vmlogrdr_unregister_driver(void)
744*4882a593Smuzhiyun {
745*4882a593Smuzhiyun 	class_destroy(vmlogrdr_class);
746*4882a593Smuzhiyun 	vmlogrdr_class = NULL;
747*4882a593Smuzhiyun 	driver_unregister(&vmlogrdr_driver);
748*4882a593Smuzhiyun 	iucv_unregister(&vmlogrdr_iucv_handler, 1);
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 
vmlogrdr_register_device(struct vmlogrdr_priv_t * priv)752*4882a593Smuzhiyun static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
753*4882a593Smuzhiyun {
754*4882a593Smuzhiyun 	struct device *dev;
755*4882a593Smuzhiyun 	int ret;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	dev = kzalloc(sizeof(struct device), GFP_KERNEL);
758*4882a593Smuzhiyun 	if (dev) {
759*4882a593Smuzhiyun 		dev_set_name(dev, "%s", priv->internal_name);
760*4882a593Smuzhiyun 		dev->bus = &iucv_bus;
761*4882a593Smuzhiyun 		dev->parent = iucv_root;
762*4882a593Smuzhiyun 		dev->driver = &vmlogrdr_driver;
763*4882a593Smuzhiyun 		dev->groups = vmlogrdr_attr_groups;
764*4882a593Smuzhiyun 		dev_set_drvdata(dev, priv);
765*4882a593Smuzhiyun 		/*
766*4882a593Smuzhiyun 		 * The release function could be called after the
767*4882a593Smuzhiyun 		 * module has been unloaded. It's _only_ task is to
768*4882a593Smuzhiyun 		 * free the struct. Therefore, we specify kfree()
769*4882a593Smuzhiyun 		 * directly here. (Probably a little bit obfuscating
770*4882a593Smuzhiyun 		 * but legitime ...).
771*4882a593Smuzhiyun 		 */
772*4882a593Smuzhiyun 		dev->release = (void (*)(struct device *))kfree;
773*4882a593Smuzhiyun 	} else
774*4882a593Smuzhiyun 		return -ENOMEM;
775*4882a593Smuzhiyun 	ret = device_register(dev);
776*4882a593Smuzhiyun 	if (ret) {
777*4882a593Smuzhiyun 		put_device(dev);
778*4882a593Smuzhiyun 		return ret;
779*4882a593Smuzhiyun 	}
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	priv->class_device = device_create(vmlogrdr_class, dev,
782*4882a593Smuzhiyun 					   MKDEV(vmlogrdr_major,
783*4882a593Smuzhiyun 						 priv->minor_num),
784*4882a593Smuzhiyun 					   priv, "%s", dev_name(dev));
785*4882a593Smuzhiyun 	if (IS_ERR(priv->class_device)) {
786*4882a593Smuzhiyun 		ret = PTR_ERR(priv->class_device);
787*4882a593Smuzhiyun 		priv->class_device=NULL;
788*4882a593Smuzhiyun 		device_unregister(dev);
789*4882a593Smuzhiyun 		return ret;
790*4882a593Smuzhiyun 	}
791*4882a593Smuzhiyun 	priv->device = dev;
792*4882a593Smuzhiyun 	return 0;
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 
vmlogrdr_unregister_device(struct vmlogrdr_priv_t * priv)796*4882a593Smuzhiyun static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
797*4882a593Smuzhiyun {
798*4882a593Smuzhiyun 	device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
799*4882a593Smuzhiyun 	if (priv->device != NULL) {
800*4882a593Smuzhiyun 		device_unregister(priv->device);
801*4882a593Smuzhiyun 		priv->device=NULL;
802*4882a593Smuzhiyun 	}
803*4882a593Smuzhiyun 	return 0;
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 
vmlogrdr_register_cdev(dev_t dev)807*4882a593Smuzhiyun static int vmlogrdr_register_cdev(dev_t dev)
808*4882a593Smuzhiyun {
809*4882a593Smuzhiyun 	int rc = 0;
810*4882a593Smuzhiyun 	vmlogrdr_cdev = cdev_alloc();
811*4882a593Smuzhiyun 	if (!vmlogrdr_cdev) {
812*4882a593Smuzhiyun 		return -ENOMEM;
813*4882a593Smuzhiyun 	}
814*4882a593Smuzhiyun 	vmlogrdr_cdev->owner = THIS_MODULE;
815*4882a593Smuzhiyun 	vmlogrdr_cdev->ops = &vmlogrdr_fops;
816*4882a593Smuzhiyun 	rc = cdev_add(vmlogrdr_cdev, dev, MAXMINOR);
817*4882a593Smuzhiyun 	if (!rc)
818*4882a593Smuzhiyun 		return 0;
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	// cleanup: cdev is not fully registered, no cdev_del here!
821*4882a593Smuzhiyun 	kobject_put(&vmlogrdr_cdev->kobj);
822*4882a593Smuzhiyun 	vmlogrdr_cdev=NULL;
823*4882a593Smuzhiyun 	return rc;
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 
vmlogrdr_cleanup(void)827*4882a593Smuzhiyun static void vmlogrdr_cleanup(void)
828*4882a593Smuzhiyun {
829*4882a593Smuzhiyun         int i;
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	if (vmlogrdr_cdev) {
832*4882a593Smuzhiyun 		cdev_del(vmlogrdr_cdev);
833*4882a593Smuzhiyun 		vmlogrdr_cdev=NULL;
834*4882a593Smuzhiyun 	}
835*4882a593Smuzhiyun 	for (i=0; i < MAXMINOR; ++i ) {
836*4882a593Smuzhiyun 		vmlogrdr_unregister_device(&sys_ser[i]);
837*4882a593Smuzhiyun 		free_page((unsigned long)sys_ser[i].buffer);
838*4882a593Smuzhiyun 	}
839*4882a593Smuzhiyun 	vmlogrdr_unregister_driver();
840*4882a593Smuzhiyun 	if (vmlogrdr_major) {
841*4882a593Smuzhiyun 		unregister_chrdev_region(MKDEV(vmlogrdr_major, 0), MAXMINOR);
842*4882a593Smuzhiyun 		vmlogrdr_major=0;
843*4882a593Smuzhiyun 	}
844*4882a593Smuzhiyun }
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 
vmlogrdr_init(void)847*4882a593Smuzhiyun static int __init vmlogrdr_init(void)
848*4882a593Smuzhiyun {
849*4882a593Smuzhiyun 	int rc;
850*4882a593Smuzhiyun 	int i;
851*4882a593Smuzhiyun 	dev_t dev;
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 	if (! MACHINE_IS_VM) {
854*4882a593Smuzhiyun 		pr_err("not running under VM, driver not loaded.\n");
855*4882a593Smuzhiyun 		return -ENODEV;
856*4882a593Smuzhiyun 	}
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun         recording_class_AB = vmlogrdr_get_recording_class_AB();
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 	rc = alloc_chrdev_region(&dev, 0, MAXMINOR, "vmlogrdr");
861*4882a593Smuzhiyun 	if (rc)
862*4882a593Smuzhiyun 		return rc;
863*4882a593Smuzhiyun 	vmlogrdr_major = MAJOR(dev);
864*4882a593Smuzhiyun 
865*4882a593Smuzhiyun 	rc=vmlogrdr_register_driver();
866*4882a593Smuzhiyun 	if (rc)
867*4882a593Smuzhiyun 		goto cleanup;
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	for (i=0; i < MAXMINOR; ++i ) {
870*4882a593Smuzhiyun 		sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
871*4882a593Smuzhiyun 		if (!sys_ser[i].buffer) {
872*4882a593Smuzhiyun 			rc = -ENOMEM;
873*4882a593Smuzhiyun 			break;
874*4882a593Smuzhiyun 		}
875*4882a593Smuzhiyun 		sys_ser[i].current_position = sys_ser[i].buffer;
876*4882a593Smuzhiyun 		rc=vmlogrdr_register_device(&sys_ser[i]);
877*4882a593Smuzhiyun 		if (rc)
878*4882a593Smuzhiyun 			break;
879*4882a593Smuzhiyun 	}
880*4882a593Smuzhiyun 	if (rc)
881*4882a593Smuzhiyun 		goto cleanup;
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	rc = vmlogrdr_register_cdev(dev);
884*4882a593Smuzhiyun 	if (rc)
885*4882a593Smuzhiyun 		goto cleanup;
886*4882a593Smuzhiyun 	return 0;
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun cleanup:
889*4882a593Smuzhiyun 	vmlogrdr_cleanup();
890*4882a593Smuzhiyun 	return rc;
891*4882a593Smuzhiyun }
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 
vmlogrdr_exit(void)894*4882a593Smuzhiyun static void __exit vmlogrdr_exit(void)
895*4882a593Smuzhiyun {
896*4882a593Smuzhiyun 	vmlogrdr_cleanup();
897*4882a593Smuzhiyun 	return;
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun module_init(vmlogrdr_init);
902*4882a593Smuzhiyun module_exit(vmlogrdr_exit);
903