xref: /OK3568_Linux_fs/kernel/drivers/misc/ibmvmc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * IBM Power Systems Virtual Management Channel Support.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2004, 2018 IBM Corp.
6*4882a593Smuzhiyun  *   Dave Engebretsen engebret@us.ibm.com
7*4882a593Smuzhiyun  *   Steven Royer seroyer@linux.vnet.ibm.com
8*4882a593Smuzhiyun  *   Adam Reznechek adreznec@linux.vnet.ibm.com
9*4882a593Smuzhiyun  *   Bryant G. Ly <bryantly@linux.vnet.ibm.com>
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/kernel.h>
14*4882a593Smuzhiyun #include <linux/kthread.h>
15*4882a593Smuzhiyun #include <linux/major.h>
16*4882a593Smuzhiyun #include <linux/string.h>
17*4882a593Smuzhiyun #include <linux/fcntl.h>
18*4882a593Smuzhiyun #include <linux/slab.h>
19*4882a593Smuzhiyun #include <linux/poll.h>
20*4882a593Smuzhiyun #include <linux/init.h>
21*4882a593Smuzhiyun #include <linux/fs.h>
22*4882a593Smuzhiyun #include <linux/interrupt.h>
23*4882a593Smuzhiyun #include <linux/spinlock.h>
24*4882a593Smuzhiyun #include <linux/percpu.h>
25*4882a593Smuzhiyun #include <linux/delay.h>
26*4882a593Smuzhiyun #include <linux/uaccess.h>
27*4882a593Smuzhiyun #include <linux/io.h>
28*4882a593Smuzhiyun #include <linux/miscdevice.h>
29*4882a593Smuzhiyun #include <linux/sched/signal.h>
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #include <asm/byteorder.h>
32*4882a593Smuzhiyun #include <asm/irq.h>
33*4882a593Smuzhiyun #include <asm/vio.h>
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #include "ibmvmc.h"
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #define IBMVMC_DRIVER_VERSION "1.0"
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /*
40*4882a593Smuzhiyun  * Static global variables
41*4882a593Smuzhiyun  */
42*4882a593Smuzhiyun static DECLARE_WAIT_QUEUE_HEAD(ibmvmc_read_wait);
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun static const char ibmvmc_driver_name[] = "ibmvmc";
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun static struct ibmvmc_struct ibmvmc;
47*4882a593Smuzhiyun static struct ibmvmc_hmc hmcs[MAX_HMCS];
48*4882a593Smuzhiyun static struct crq_server_adapter ibmvmc_adapter;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun static int ibmvmc_max_buf_pool_size = DEFAULT_BUF_POOL_SIZE;
51*4882a593Smuzhiyun static int ibmvmc_max_hmcs = DEFAULT_HMCS;
52*4882a593Smuzhiyun static int ibmvmc_max_mtu = DEFAULT_MTU;
53*4882a593Smuzhiyun 
h_copy_rdma(s64 length,u64 sliobn,u64 slioba,u64 dliobn,u64 dlioba)54*4882a593Smuzhiyun static inline long h_copy_rdma(s64 length, u64 sliobn, u64 slioba,
55*4882a593Smuzhiyun 			       u64 dliobn, u64 dlioba)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	long rc = 0;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	/* Ensure all writes to source memory are visible before hcall */
60*4882a593Smuzhiyun 	dma_wmb();
61*4882a593Smuzhiyun 	pr_debug("ibmvmc: h_copy_rdma(0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
62*4882a593Smuzhiyun 		 length, sliobn, slioba, dliobn, dlioba);
63*4882a593Smuzhiyun 	rc = plpar_hcall_norets(H_COPY_RDMA, length, sliobn, slioba,
64*4882a593Smuzhiyun 				dliobn, dlioba);
65*4882a593Smuzhiyun 	pr_debug("ibmvmc: h_copy_rdma rc = 0x%lx\n", rc);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	return rc;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun 
h_free_crq(uint32_t unit_address)70*4882a593Smuzhiyun static inline void h_free_crq(uint32_t unit_address)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	long rc = 0;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	do {
75*4882a593Smuzhiyun 		if (H_IS_LONG_BUSY(rc))
76*4882a593Smuzhiyun 			msleep(get_longbusy_msecs(rc));
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 		rc = plpar_hcall_norets(H_FREE_CRQ, unit_address);
79*4882a593Smuzhiyun 	} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun /**
83*4882a593Smuzhiyun  * h_request_vmc: - request a hypervisor virtual management channel device
84*4882a593Smuzhiyun  * @vmc_index: drc index of the vmc device created
85*4882a593Smuzhiyun  *
86*4882a593Smuzhiyun  * Requests the hypervisor create a new virtual management channel device,
87*4882a593Smuzhiyun  * allowing this partition to send hypervisor virtualization control
88*4882a593Smuzhiyun  * commands.
89*4882a593Smuzhiyun  *
90*4882a593Smuzhiyun  * Return:
91*4882a593Smuzhiyun  *	0 - Success
92*4882a593Smuzhiyun  *	Non-zero - Failure
93*4882a593Smuzhiyun  */
h_request_vmc(u32 * vmc_index)94*4882a593Smuzhiyun static inline long h_request_vmc(u32 *vmc_index)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	long rc = 0;
97*4882a593Smuzhiyun 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	do {
100*4882a593Smuzhiyun 		if (H_IS_LONG_BUSY(rc))
101*4882a593Smuzhiyun 			msleep(get_longbusy_msecs(rc));
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 		/* Call to request the VMC device from phyp */
104*4882a593Smuzhiyun 		rc = plpar_hcall(H_REQUEST_VMC, retbuf);
105*4882a593Smuzhiyun 		pr_debug("ibmvmc: %s rc = 0x%lx\n", __func__, rc);
106*4882a593Smuzhiyun 		*vmc_index = retbuf[0];
107*4882a593Smuzhiyun 	} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	return rc;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun /* routines for managing a command/response queue */
113*4882a593Smuzhiyun /**
114*4882a593Smuzhiyun  * ibmvmc_handle_event: - Interrupt handler for crq events
115*4882a593Smuzhiyun  * @irq:        number of irq to handle, not used
116*4882a593Smuzhiyun  * @dev_instance: crq_server_adapter that received interrupt
117*4882a593Smuzhiyun  *
118*4882a593Smuzhiyun  * Disables interrupts and schedules ibmvmc_task
119*4882a593Smuzhiyun  *
120*4882a593Smuzhiyun  * Always returns IRQ_HANDLED
121*4882a593Smuzhiyun  */
ibmvmc_handle_event(int irq,void * dev_instance)122*4882a593Smuzhiyun static irqreturn_t ibmvmc_handle_event(int irq, void *dev_instance)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun 	struct crq_server_adapter *adapter =
125*4882a593Smuzhiyun 		(struct crq_server_adapter *)dev_instance;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	vio_disable_interrupts(to_vio_dev(adapter->dev));
128*4882a593Smuzhiyun 	tasklet_schedule(&adapter->work_task);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	return IRQ_HANDLED;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun /**
134*4882a593Smuzhiyun  * ibmvmc_release_crq_queue - Release CRQ Queue
135*4882a593Smuzhiyun  *
136*4882a593Smuzhiyun  * @adapter:	crq_server_adapter struct
137*4882a593Smuzhiyun  *
138*4882a593Smuzhiyun  * Return:
139*4882a593Smuzhiyun  *	0 - Success
140*4882a593Smuzhiyun  *	Non-Zero - Failure
141*4882a593Smuzhiyun  */
ibmvmc_release_crq_queue(struct crq_server_adapter * adapter)142*4882a593Smuzhiyun static void ibmvmc_release_crq_queue(struct crq_server_adapter *adapter)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	struct vio_dev *vdev = to_vio_dev(adapter->dev);
145*4882a593Smuzhiyun 	struct crq_queue *queue = &adapter->queue;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	free_irq(vdev->irq, (void *)adapter);
148*4882a593Smuzhiyun 	tasklet_kill(&adapter->work_task);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	if (adapter->reset_task)
151*4882a593Smuzhiyun 		kthread_stop(adapter->reset_task);
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	h_free_crq(vdev->unit_address);
154*4882a593Smuzhiyun 	dma_unmap_single(adapter->dev,
155*4882a593Smuzhiyun 			 queue->msg_token,
156*4882a593Smuzhiyun 			 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
157*4882a593Smuzhiyun 	free_page((unsigned long)queue->msgs);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun /**
161*4882a593Smuzhiyun  * ibmvmc_reset_crq_queue - Reset CRQ Queue
162*4882a593Smuzhiyun  *
163*4882a593Smuzhiyun  * @adapter:	crq_server_adapter struct
164*4882a593Smuzhiyun  *
165*4882a593Smuzhiyun  * This function calls h_free_crq and then calls H_REG_CRQ and does all the
166*4882a593Smuzhiyun  * bookkeeping to get us back to where we can communicate.
167*4882a593Smuzhiyun  *
168*4882a593Smuzhiyun  * Return:
169*4882a593Smuzhiyun  *	0 - Success
170*4882a593Smuzhiyun  *	Non-Zero - Failure
171*4882a593Smuzhiyun  */
ibmvmc_reset_crq_queue(struct crq_server_adapter * adapter)172*4882a593Smuzhiyun static int ibmvmc_reset_crq_queue(struct crq_server_adapter *adapter)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	struct vio_dev *vdev = to_vio_dev(adapter->dev);
175*4882a593Smuzhiyun 	struct crq_queue *queue = &adapter->queue;
176*4882a593Smuzhiyun 	int rc = 0;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	/* Close the CRQ */
179*4882a593Smuzhiyun 	h_free_crq(vdev->unit_address);
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	/* Clean out the queue */
182*4882a593Smuzhiyun 	memset(queue->msgs, 0x00, PAGE_SIZE);
183*4882a593Smuzhiyun 	queue->cur = 0;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	/* And re-open it again */
186*4882a593Smuzhiyun 	rc = plpar_hcall_norets(H_REG_CRQ,
187*4882a593Smuzhiyun 				vdev->unit_address,
188*4882a593Smuzhiyun 				queue->msg_token, PAGE_SIZE);
189*4882a593Smuzhiyun 	if (rc == 2)
190*4882a593Smuzhiyun 		/* Adapter is good, but other end is not ready */
191*4882a593Smuzhiyun 		dev_warn(adapter->dev, "Partner adapter not ready\n");
192*4882a593Smuzhiyun 	else if (rc != 0)
193*4882a593Smuzhiyun 		dev_err(adapter->dev, "couldn't register crq--rc 0x%x\n", rc);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	return rc;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun /**
199*4882a593Smuzhiyun  * crq_queue_next_crq: - Returns the next entry in message queue
200*4882a593Smuzhiyun  * @queue:      crq_queue to use
201*4882a593Smuzhiyun  *
202*4882a593Smuzhiyun  * Returns pointer to next entry in queue, or NULL if there are no new
203*4882a593Smuzhiyun  * entried in the CRQ.
204*4882a593Smuzhiyun  */
crq_queue_next_crq(struct crq_queue * queue)205*4882a593Smuzhiyun static struct ibmvmc_crq_msg *crq_queue_next_crq(struct crq_queue *queue)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun 	struct ibmvmc_crq_msg *crq;
208*4882a593Smuzhiyun 	unsigned long flags;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	spin_lock_irqsave(&queue->lock, flags);
211*4882a593Smuzhiyun 	crq = &queue->msgs[queue->cur];
212*4882a593Smuzhiyun 	if (crq->valid & 0x80) {
213*4882a593Smuzhiyun 		if (++queue->cur == queue->size)
214*4882a593Smuzhiyun 			queue->cur = 0;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 		/* Ensure the read of the valid bit occurs before reading any
217*4882a593Smuzhiyun 		 * other bits of the CRQ entry
218*4882a593Smuzhiyun 		 */
219*4882a593Smuzhiyun 		dma_rmb();
220*4882a593Smuzhiyun 	} else {
221*4882a593Smuzhiyun 		crq = NULL;
222*4882a593Smuzhiyun 	}
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	spin_unlock_irqrestore(&queue->lock, flags);
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	return crq;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun /**
230*4882a593Smuzhiyun  * ibmvmc_send_crq - Send CRQ
231*4882a593Smuzhiyun  *
232*4882a593Smuzhiyun  * @adapter:	crq_server_adapter struct
233*4882a593Smuzhiyun  * @word1:	Word1 Data field
234*4882a593Smuzhiyun  * @word2:	Word2 Data field
235*4882a593Smuzhiyun  *
236*4882a593Smuzhiyun  * Return:
237*4882a593Smuzhiyun  *	0 - Success
238*4882a593Smuzhiyun  *	Non-Zero - Failure
239*4882a593Smuzhiyun  */
ibmvmc_send_crq(struct crq_server_adapter * adapter,u64 word1,u64 word2)240*4882a593Smuzhiyun static long ibmvmc_send_crq(struct crq_server_adapter *adapter,
241*4882a593Smuzhiyun 			    u64 word1, u64 word2)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	struct vio_dev *vdev = to_vio_dev(adapter->dev);
244*4882a593Smuzhiyun 	long rc = 0;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	dev_dbg(adapter->dev, "(0x%x, 0x%016llx, 0x%016llx)\n",
247*4882a593Smuzhiyun 		vdev->unit_address, word1, word2);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	/*
250*4882a593Smuzhiyun 	 * Ensure the command buffer is flushed to memory before handing it
251*4882a593Smuzhiyun 	 * over to the other side to prevent it from fetching any stale data.
252*4882a593Smuzhiyun 	 */
253*4882a593Smuzhiyun 	dma_wmb();
254*4882a593Smuzhiyun 	rc = plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
255*4882a593Smuzhiyun 	dev_dbg(adapter->dev, "rc = 0x%lx\n", rc);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	return rc;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun /**
261*4882a593Smuzhiyun  * alloc_dma_buffer - Create DMA Buffer
262*4882a593Smuzhiyun  *
263*4882a593Smuzhiyun  * @vdev:	vio_dev struct
264*4882a593Smuzhiyun  * @size:	Size field
265*4882a593Smuzhiyun  * @dma_handle:	DMA address field
266*4882a593Smuzhiyun  *
267*4882a593Smuzhiyun  * Allocates memory for the command queue and maps remote memory into an
268*4882a593Smuzhiyun  * ioba.
269*4882a593Smuzhiyun  *
270*4882a593Smuzhiyun  * Returns a pointer to the buffer
271*4882a593Smuzhiyun  */
alloc_dma_buffer(struct vio_dev * vdev,size_t size,dma_addr_t * dma_handle)272*4882a593Smuzhiyun static void *alloc_dma_buffer(struct vio_dev *vdev, size_t size,
273*4882a593Smuzhiyun 			      dma_addr_t *dma_handle)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	/* allocate memory */
276*4882a593Smuzhiyun 	void *buffer = kzalloc(size, GFP_ATOMIC);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	if (!buffer) {
279*4882a593Smuzhiyun 		*dma_handle = 0;
280*4882a593Smuzhiyun 		return NULL;
281*4882a593Smuzhiyun 	}
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	/* DMA map */
284*4882a593Smuzhiyun 	*dma_handle = dma_map_single(&vdev->dev, buffer, size,
285*4882a593Smuzhiyun 				     DMA_BIDIRECTIONAL);
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	if (dma_mapping_error(&vdev->dev, *dma_handle)) {
288*4882a593Smuzhiyun 		*dma_handle = 0;
289*4882a593Smuzhiyun 		kfree_sensitive(buffer);
290*4882a593Smuzhiyun 		return NULL;
291*4882a593Smuzhiyun 	}
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	return buffer;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun /**
297*4882a593Smuzhiyun  * free_dma_buffer - Free DMA Buffer
298*4882a593Smuzhiyun  *
299*4882a593Smuzhiyun  * @vdev:	vio_dev struct
300*4882a593Smuzhiyun  * @size:	Size field
301*4882a593Smuzhiyun  * @vaddr:	Address field
302*4882a593Smuzhiyun  * @dma_handle:	DMA address field
303*4882a593Smuzhiyun  *
304*4882a593Smuzhiyun  * Releases memory for a command queue and unmaps mapped remote memory.
305*4882a593Smuzhiyun  */
free_dma_buffer(struct vio_dev * vdev,size_t size,void * vaddr,dma_addr_t dma_handle)306*4882a593Smuzhiyun static void free_dma_buffer(struct vio_dev *vdev, size_t size, void *vaddr,
307*4882a593Smuzhiyun 			    dma_addr_t dma_handle)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun 	/* DMA unmap */
310*4882a593Smuzhiyun 	dma_unmap_single(&vdev->dev, dma_handle, size, DMA_BIDIRECTIONAL);
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	/* deallocate memory */
313*4882a593Smuzhiyun 	kfree_sensitive(vaddr);
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun /**
317*4882a593Smuzhiyun  * ibmvmc_get_valid_hmc_buffer - Retrieve Valid HMC Buffer
318*4882a593Smuzhiyun  *
319*4882a593Smuzhiyun  * @hmc_index:	HMC Index Field
320*4882a593Smuzhiyun  *
321*4882a593Smuzhiyun  * Return:
322*4882a593Smuzhiyun  *	Pointer to ibmvmc_buffer
323*4882a593Smuzhiyun  */
ibmvmc_get_valid_hmc_buffer(u8 hmc_index)324*4882a593Smuzhiyun static struct ibmvmc_buffer *ibmvmc_get_valid_hmc_buffer(u8 hmc_index)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun 	struct ibmvmc_buffer *buffer;
327*4882a593Smuzhiyun 	struct ibmvmc_buffer *ret_buf = NULL;
328*4882a593Smuzhiyun 	unsigned long i;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	if (hmc_index > ibmvmc.max_hmc_index)
331*4882a593Smuzhiyun 		return NULL;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	buffer = hmcs[hmc_index].buffer;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
336*4882a593Smuzhiyun 		if (buffer[i].valid && buffer[i].free &&
337*4882a593Smuzhiyun 		    buffer[i].owner == VMC_BUF_OWNER_ALPHA) {
338*4882a593Smuzhiyun 			buffer[i].free = 0;
339*4882a593Smuzhiyun 			ret_buf = &buffer[i];
340*4882a593Smuzhiyun 			break;
341*4882a593Smuzhiyun 		}
342*4882a593Smuzhiyun 	}
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	return ret_buf;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun /**
348*4882a593Smuzhiyun  * ibmvmc_get_free_hmc_buffer - Get Free HMC Buffer
349*4882a593Smuzhiyun  *
350*4882a593Smuzhiyun  * @adapter:	crq_server_adapter struct
351*4882a593Smuzhiyun  * @hmc_index:	Hmc Index field
352*4882a593Smuzhiyun  *
353*4882a593Smuzhiyun  * Return:
354*4882a593Smuzhiyun  *	Pointer to ibmvmc_buffer
355*4882a593Smuzhiyun  */
ibmvmc_get_free_hmc_buffer(struct crq_server_adapter * adapter,u8 hmc_index)356*4882a593Smuzhiyun static struct ibmvmc_buffer *ibmvmc_get_free_hmc_buffer(struct crq_server_adapter *adapter,
357*4882a593Smuzhiyun 							u8 hmc_index)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun 	struct ibmvmc_buffer *buffer;
360*4882a593Smuzhiyun 	struct ibmvmc_buffer *ret_buf = NULL;
361*4882a593Smuzhiyun 	unsigned long i;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	if (hmc_index > ibmvmc.max_hmc_index) {
364*4882a593Smuzhiyun 		dev_info(adapter->dev, "get_free_hmc_buffer: invalid hmc_index=0x%x\n",
365*4882a593Smuzhiyun 			 hmc_index);
366*4882a593Smuzhiyun 		return NULL;
367*4882a593Smuzhiyun 	}
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	buffer = hmcs[hmc_index].buffer;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
372*4882a593Smuzhiyun 		if (buffer[i].free &&
373*4882a593Smuzhiyun 		    buffer[i].owner == VMC_BUF_OWNER_ALPHA) {
374*4882a593Smuzhiyun 			buffer[i].free = 0;
375*4882a593Smuzhiyun 			ret_buf = &buffer[i];
376*4882a593Smuzhiyun 			break;
377*4882a593Smuzhiyun 		}
378*4882a593Smuzhiyun 	}
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	return ret_buf;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun /**
384*4882a593Smuzhiyun  * ibmvmc_free_hmc_buffer - Free an HMC Buffer
385*4882a593Smuzhiyun  *
386*4882a593Smuzhiyun  * @hmc:	ibmvmc_hmc struct
387*4882a593Smuzhiyun  * @buffer:	ibmvmc_buffer struct
388*4882a593Smuzhiyun  *
389*4882a593Smuzhiyun  */
ibmvmc_free_hmc_buffer(struct ibmvmc_hmc * hmc,struct ibmvmc_buffer * buffer)390*4882a593Smuzhiyun static void ibmvmc_free_hmc_buffer(struct ibmvmc_hmc *hmc,
391*4882a593Smuzhiyun 				   struct ibmvmc_buffer *buffer)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun 	unsigned long flags;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	spin_lock_irqsave(&hmc->lock, flags);
396*4882a593Smuzhiyun 	buffer->free = 1;
397*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hmc->lock, flags);
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun /**
401*4882a593Smuzhiyun  * ibmvmc_count_hmc_buffers - Count HMC Buffers
402*4882a593Smuzhiyun  *
403*4882a593Smuzhiyun  * @hmc_index:	HMC Index field
404*4882a593Smuzhiyun  * @valid:	Valid number of buffers field
405*4882a593Smuzhiyun  * @free:	Free number of buffers field
406*4882a593Smuzhiyun  *
407*4882a593Smuzhiyun  */
ibmvmc_count_hmc_buffers(u8 hmc_index,unsigned int * valid,unsigned int * free)408*4882a593Smuzhiyun static void ibmvmc_count_hmc_buffers(u8 hmc_index, unsigned int *valid,
409*4882a593Smuzhiyun 				     unsigned int *free)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun 	struct ibmvmc_buffer *buffer;
412*4882a593Smuzhiyun 	unsigned long i;
413*4882a593Smuzhiyun 	unsigned long flags;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	if (hmc_index > ibmvmc.max_hmc_index)
416*4882a593Smuzhiyun 		return;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	if (!valid || !free)
419*4882a593Smuzhiyun 		return;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	*valid = 0; *free = 0;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	buffer = hmcs[hmc_index].buffer;
424*4882a593Smuzhiyun 	spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
427*4882a593Smuzhiyun 		if (buffer[i].valid) {
428*4882a593Smuzhiyun 			*valid = *valid + 1;
429*4882a593Smuzhiyun 			if (buffer[i].free)
430*4882a593Smuzhiyun 				*free = *free + 1;
431*4882a593Smuzhiyun 		}
432*4882a593Smuzhiyun 	}
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun /**
438*4882a593Smuzhiyun  * ibmvmc_get_free_hmc - Get Free HMC
439*4882a593Smuzhiyun  *
440*4882a593Smuzhiyun  * Return:
441*4882a593Smuzhiyun  *	Pointer to an available HMC Connection
442*4882a593Smuzhiyun  *	Null otherwise
443*4882a593Smuzhiyun  */
ibmvmc_get_free_hmc(void)444*4882a593Smuzhiyun static struct ibmvmc_hmc *ibmvmc_get_free_hmc(void)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun 	unsigned long i;
447*4882a593Smuzhiyun 	unsigned long flags;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	/*
450*4882a593Smuzhiyun 	 * Find an available HMC connection.
451*4882a593Smuzhiyun 	 */
452*4882a593Smuzhiyun 	for (i = 0; i <= ibmvmc.max_hmc_index; i++) {
453*4882a593Smuzhiyun 		spin_lock_irqsave(&hmcs[i].lock, flags);
454*4882a593Smuzhiyun 		if (hmcs[i].state == ibmhmc_state_free) {
455*4882a593Smuzhiyun 			hmcs[i].index = i;
456*4882a593Smuzhiyun 			hmcs[i].state = ibmhmc_state_initial;
457*4882a593Smuzhiyun 			spin_unlock_irqrestore(&hmcs[i].lock, flags);
458*4882a593Smuzhiyun 			return &hmcs[i];
459*4882a593Smuzhiyun 		}
460*4882a593Smuzhiyun 		spin_unlock_irqrestore(&hmcs[i].lock, flags);
461*4882a593Smuzhiyun 	}
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	return NULL;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun /**
467*4882a593Smuzhiyun  * ibmvmc_return_hmc - Return an HMC Connection
468*4882a593Smuzhiyun  *
469*4882a593Smuzhiyun  * @hmc:		ibmvmc_hmc struct
470*4882a593Smuzhiyun  * @release_readers:	Number of readers connected to session
471*4882a593Smuzhiyun  *
472*4882a593Smuzhiyun  * This function releases the HMC connections back into the pool.
473*4882a593Smuzhiyun  *
474*4882a593Smuzhiyun  * Return:
475*4882a593Smuzhiyun  *	0 - Success
476*4882a593Smuzhiyun  *	Non-zero - Failure
477*4882a593Smuzhiyun  */
ibmvmc_return_hmc(struct ibmvmc_hmc * hmc,bool release_readers)478*4882a593Smuzhiyun static int ibmvmc_return_hmc(struct ibmvmc_hmc *hmc, bool release_readers)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun 	struct ibmvmc_buffer *buffer;
481*4882a593Smuzhiyun 	struct crq_server_adapter *adapter;
482*4882a593Smuzhiyun 	struct vio_dev *vdev;
483*4882a593Smuzhiyun 	unsigned long i;
484*4882a593Smuzhiyun 	unsigned long flags;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	if (!hmc || !hmc->adapter)
487*4882a593Smuzhiyun 		return -EIO;
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	if (release_readers) {
490*4882a593Smuzhiyun 		if (hmc->file_session) {
491*4882a593Smuzhiyun 			struct ibmvmc_file_session *session = hmc->file_session;
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 			session->valid = 0;
494*4882a593Smuzhiyun 			wake_up_interruptible(&ibmvmc_read_wait);
495*4882a593Smuzhiyun 		}
496*4882a593Smuzhiyun 	}
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	adapter = hmc->adapter;
499*4882a593Smuzhiyun 	vdev = to_vio_dev(adapter->dev);
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	spin_lock_irqsave(&hmc->lock, flags);
502*4882a593Smuzhiyun 	hmc->index = 0;
503*4882a593Smuzhiyun 	hmc->state = ibmhmc_state_free;
504*4882a593Smuzhiyun 	hmc->queue_head = 0;
505*4882a593Smuzhiyun 	hmc->queue_tail = 0;
506*4882a593Smuzhiyun 	buffer = hmc->buffer;
507*4882a593Smuzhiyun 	for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
508*4882a593Smuzhiyun 		if (buffer[i].valid) {
509*4882a593Smuzhiyun 			free_dma_buffer(vdev,
510*4882a593Smuzhiyun 					ibmvmc.max_mtu,
511*4882a593Smuzhiyun 					buffer[i].real_addr_local,
512*4882a593Smuzhiyun 					buffer[i].dma_addr_local);
513*4882a593Smuzhiyun 			dev_dbg(adapter->dev, "Forgot buffer id 0x%lx\n", i);
514*4882a593Smuzhiyun 		}
515*4882a593Smuzhiyun 		memset(&buffer[i], 0, sizeof(struct ibmvmc_buffer));
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 		hmc->queue_outbound_msgs[i] = VMC_INVALID_BUFFER_ID;
518*4882a593Smuzhiyun 	}
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hmc->lock, flags);
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	return 0;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun /**
526*4882a593Smuzhiyun  * ibmvmc_send_open - Interface Open
527*4882a593Smuzhiyun  * @buffer: Pointer to ibmvmc_buffer struct
528*4882a593Smuzhiyun  * @hmc: Pointer to ibmvmc_hmc struct
529*4882a593Smuzhiyun  *
530*4882a593Smuzhiyun  * This command is sent by the management partition as the result of a
531*4882a593Smuzhiyun  * management partition device request. It causes the hypervisor to
532*4882a593Smuzhiyun  * prepare a set of data buffers for the management application connection
533*4882a593Smuzhiyun  * indicated HMC idx. A unique HMC Idx would be used if multiple management
534*4882a593Smuzhiyun  * applications running concurrently were desired. Before responding to this
535*4882a593Smuzhiyun  * command, the hypervisor must provide the management partition with at
536*4882a593Smuzhiyun  * least one of these new buffers via the Add Buffer. This indicates whether
537*4882a593Smuzhiyun  * the messages are inbound or outbound from the hypervisor.
538*4882a593Smuzhiyun  *
539*4882a593Smuzhiyun  * Return:
540*4882a593Smuzhiyun  *	0 - Success
541*4882a593Smuzhiyun  *	Non-zero - Failure
542*4882a593Smuzhiyun  */
ibmvmc_send_open(struct ibmvmc_buffer * buffer,struct ibmvmc_hmc * hmc)543*4882a593Smuzhiyun static int ibmvmc_send_open(struct ibmvmc_buffer *buffer,
544*4882a593Smuzhiyun 			    struct ibmvmc_hmc *hmc)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun 	struct ibmvmc_crq_msg crq_msg;
547*4882a593Smuzhiyun 	struct crq_server_adapter *adapter;
548*4882a593Smuzhiyun 	__be64 *crq_as_u64 = (__be64 *)&crq_msg;
549*4882a593Smuzhiyun 	int rc = 0;
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	if (!hmc || !hmc->adapter)
552*4882a593Smuzhiyun 		return -EIO;
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	adapter = hmc->adapter;
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	dev_dbg(adapter->dev, "send_open: 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
557*4882a593Smuzhiyun 		(unsigned long)buffer->size, (unsigned long)adapter->liobn,
558*4882a593Smuzhiyun 		(unsigned long)buffer->dma_addr_local,
559*4882a593Smuzhiyun 		(unsigned long)adapter->riobn,
560*4882a593Smuzhiyun 		(unsigned long)buffer->dma_addr_remote);
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	rc = h_copy_rdma(buffer->size,
563*4882a593Smuzhiyun 			 adapter->liobn,
564*4882a593Smuzhiyun 			 buffer->dma_addr_local,
565*4882a593Smuzhiyun 			 adapter->riobn,
566*4882a593Smuzhiyun 			 buffer->dma_addr_remote);
567*4882a593Smuzhiyun 	if (rc) {
568*4882a593Smuzhiyun 		dev_err(adapter->dev, "Error: In send_open, h_copy_rdma rc 0x%x\n",
569*4882a593Smuzhiyun 			rc);
570*4882a593Smuzhiyun 		return -EIO;
571*4882a593Smuzhiyun 	}
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	hmc->state = ibmhmc_state_opening;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	crq_msg.valid = 0x80;
576*4882a593Smuzhiyun 	crq_msg.type = VMC_MSG_OPEN;
577*4882a593Smuzhiyun 	crq_msg.status = 0;
578*4882a593Smuzhiyun 	crq_msg.var1.rsvd = 0;
579*4882a593Smuzhiyun 	crq_msg.hmc_session = hmc->session;
580*4882a593Smuzhiyun 	crq_msg.hmc_index = hmc->index;
581*4882a593Smuzhiyun 	crq_msg.var2.buffer_id = cpu_to_be16(buffer->id);
582*4882a593Smuzhiyun 	crq_msg.rsvd = 0;
583*4882a593Smuzhiyun 	crq_msg.var3.rsvd = 0;
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
586*4882a593Smuzhiyun 			be64_to_cpu(crq_as_u64[1]));
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	return rc;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun /**
592*4882a593Smuzhiyun  * ibmvmc_send_close - Interface Close
593*4882a593Smuzhiyun  * @hmc: Pointer to ibmvmc_hmc struct
594*4882a593Smuzhiyun  *
595*4882a593Smuzhiyun  * This command is sent by the management partition to terminate a
596*4882a593Smuzhiyun  * management application to hypervisor connection. When this command is
597*4882a593Smuzhiyun  * sent, the management partition has quiesced all I/O operations to all
598*4882a593Smuzhiyun  * buffers associated with this management application connection, and
599*4882a593Smuzhiyun  * has freed any storage for these buffers.
600*4882a593Smuzhiyun  *
601*4882a593Smuzhiyun  * Return:
602*4882a593Smuzhiyun  *	0 - Success
603*4882a593Smuzhiyun  *	Non-zero - Failure
604*4882a593Smuzhiyun  */
ibmvmc_send_close(struct ibmvmc_hmc * hmc)605*4882a593Smuzhiyun static int ibmvmc_send_close(struct ibmvmc_hmc *hmc)
606*4882a593Smuzhiyun {
607*4882a593Smuzhiyun 	struct ibmvmc_crq_msg crq_msg;
608*4882a593Smuzhiyun 	struct crq_server_adapter *adapter;
609*4882a593Smuzhiyun 	__be64 *crq_as_u64 = (__be64 *)&crq_msg;
610*4882a593Smuzhiyun 	int rc = 0;
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	if (!hmc || !hmc->adapter)
613*4882a593Smuzhiyun 		return -EIO;
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	adapter = hmc->adapter;
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	dev_info(adapter->dev, "CRQ send: close\n");
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	crq_msg.valid = 0x80;
620*4882a593Smuzhiyun 	crq_msg.type = VMC_MSG_CLOSE;
621*4882a593Smuzhiyun 	crq_msg.status = 0;
622*4882a593Smuzhiyun 	crq_msg.var1.rsvd = 0;
623*4882a593Smuzhiyun 	crq_msg.hmc_session = hmc->session;
624*4882a593Smuzhiyun 	crq_msg.hmc_index = hmc->index;
625*4882a593Smuzhiyun 	crq_msg.var2.rsvd = 0;
626*4882a593Smuzhiyun 	crq_msg.rsvd = 0;
627*4882a593Smuzhiyun 	crq_msg.var3.rsvd = 0;
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
630*4882a593Smuzhiyun 			be64_to_cpu(crq_as_u64[1]));
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	return rc;
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun /**
636*4882a593Smuzhiyun  * ibmvmc_send_capabilities - Send VMC Capabilities
637*4882a593Smuzhiyun  *
638*4882a593Smuzhiyun  * @adapter:	crq_server_adapter struct
639*4882a593Smuzhiyun  *
640*4882a593Smuzhiyun  * The capabilities message is an administrative message sent after the CRQ
641*4882a593Smuzhiyun  * initialization sequence of messages and is used to exchange VMC capabilities
642*4882a593Smuzhiyun  * between the management partition and the hypervisor. The management
643*4882a593Smuzhiyun  * partition must send this message and the hypervisor must respond with VMC
644*4882a593Smuzhiyun  * capabilities Response message before HMC interface message can begin. Any
645*4882a593Smuzhiyun  * HMC interface messages received before the exchange of capabilities has
646*4882a593Smuzhiyun  * complete are dropped.
647*4882a593Smuzhiyun  *
648*4882a593Smuzhiyun  * Return:
649*4882a593Smuzhiyun  *	0 - Success
650*4882a593Smuzhiyun  */
ibmvmc_send_capabilities(struct crq_server_adapter * adapter)651*4882a593Smuzhiyun static int ibmvmc_send_capabilities(struct crq_server_adapter *adapter)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun 	struct ibmvmc_admin_crq_msg crq_msg;
654*4882a593Smuzhiyun 	__be64 *crq_as_u64 = (__be64 *)&crq_msg;
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	dev_dbg(adapter->dev, "ibmvmc: CRQ send: capabilities\n");
657*4882a593Smuzhiyun 	crq_msg.valid = 0x80;
658*4882a593Smuzhiyun 	crq_msg.type = VMC_MSG_CAP;
659*4882a593Smuzhiyun 	crq_msg.status = 0;
660*4882a593Smuzhiyun 	crq_msg.rsvd[0] = 0;
661*4882a593Smuzhiyun 	crq_msg.rsvd[1] = 0;
662*4882a593Smuzhiyun 	crq_msg.max_hmc = ibmvmc_max_hmcs;
663*4882a593Smuzhiyun 	crq_msg.max_mtu = cpu_to_be32(ibmvmc_max_mtu);
664*4882a593Smuzhiyun 	crq_msg.pool_size = cpu_to_be16(ibmvmc_max_buf_pool_size);
665*4882a593Smuzhiyun 	crq_msg.crq_size = cpu_to_be16(adapter->queue.size);
666*4882a593Smuzhiyun 	crq_msg.version = cpu_to_be16(IBMVMC_PROTOCOL_VERSION);
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
669*4882a593Smuzhiyun 			be64_to_cpu(crq_as_u64[1]));
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	ibmvmc.state = ibmvmc_state_capabilities;
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	return 0;
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun /**
677*4882a593Smuzhiyun  * ibmvmc_send_add_buffer_resp - Add Buffer Response
678*4882a593Smuzhiyun  *
679*4882a593Smuzhiyun  * @adapter:	crq_server_adapter struct
680*4882a593Smuzhiyun  * @status:	Status field
681*4882a593Smuzhiyun  * @hmc_session: HMC Session field
682*4882a593Smuzhiyun  * @hmc_index:	HMC Index field
683*4882a593Smuzhiyun  * @buffer_id:	Buffer Id field
684*4882a593Smuzhiyun  *
685*4882a593Smuzhiyun  * This command is sent by the management partition to the hypervisor in
686*4882a593Smuzhiyun  * response to the Add Buffer message. The Status field indicates the result of
687*4882a593Smuzhiyun  * the command.
688*4882a593Smuzhiyun  *
689*4882a593Smuzhiyun  * Return:
690*4882a593Smuzhiyun  *	0 - Success
691*4882a593Smuzhiyun  */
ibmvmc_send_add_buffer_resp(struct crq_server_adapter * adapter,u8 status,u8 hmc_session,u8 hmc_index,u16 buffer_id)692*4882a593Smuzhiyun static int ibmvmc_send_add_buffer_resp(struct crq_server_adapter *adapter,
693*4882a593Smuzhiyun 				       u8 status, u8 hmc_session,
694*4882a593Smuzhiyun 				       u8 hmc_index, u16 buffer_id)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun 	struct ibmvmc_crq_msg crq_msg;
697*4882a593Smuzhiyun 	__be64 *crq_as_u64 = (__be64 *)&crq_msg;
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	dev_dbg(adapter->dev, "CRQ send: add_buffer_resp\n");
700*4882a593Smuzhiyun 	crq_msg.valid = 0x80;
701*4882a593Smuzhiyun 	crq_msg.type = VMC_MSG_ADD_BUF_RESP;
702*4882a593Smuzhiyun 	crq_msg.status = status;
703*4882a593Smuzhiyun 	crq_msg.var1.rsvd = 0;
704*4882a593Smuzhiyun 	crq_msg.hmc_session = hmc_session;
705*4882a593Smuzhiyun 	crq_msg.hmc_index = hmc_index;
706*4882a593Smuzhiyun 	crq_msg.var2.buffer_id = cpu_to_be16(buffer_id);
707*4882a593Smuzhiyun 	crq_msg.rsvd = 0;
708*4882a593Smuzhiyun 	crq_msg.var3.rsvd = 0;
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
711*4882a593Smuzhiyun 			be64_to_cpu(crq_as_u64[1]));
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	return 0;
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun /**
717*4882a593Smuzhiyun  * ibmvmc_send_rem_buffer_resp - Remove Buffer Response
718*4882a593Smuzhiyun  *
719*4882a593Smuzhiyun  * @adapter:	crq_server_adapter struct
720*4882a593Smuzhiyun  * @status:	Status field
721*4882a593Smuzhiyun  * @hmc_session: HMC Session field
722*4882a593Smuzhiyun  * @hmc_index:	HMC Index field
723*4882a593Smuzhiyun  * @buffer_id:	Buffer Id field
724*4882a593Smuzhiyun  *
725*4882a593Smuzhiyun  * This command is sent by the management partition to the hypervisor in
726*4882a593Smuzhiyun  * response to the Remove Buffer message. The Buffer ID field indicates
727*4882a593Smuzhiyun  * which buffer the management partition selected to remove. The Status
728*4882a593Smuzhiyun  * field indicates the result of the command.
729*4882a593Smuzhiyun  *
730*4882a593Smuzhiyun  * Return:
731*4882a593Smuzhiyun  *	0 - Success
732*4882a593Smuzhiyun  */
ibmvmc_send_rem_buffer_resp(struct crq_server_adapter * adapter,u8 status,u8 hmc_session,u8 hmc_index,u16 buffer_id)733*4882a593Smuzhiyun static int ibmvmc_send_rem_buffer_resp(struct crq_server_adapter *adapter,
734*4882a593Smuzhiyun 				       u8 status, u8 hmc_session,
735*4882a593Smuzhiyun 				       u8 hmc_index, u16 buffer_id)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun 	struct ibmvmc_crq_msg crq_msg;
738*4882a593Smuzhiyun 	__be64 *crq_as_u64 = (__be64 *)&crq_msg;
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	dev_dbg(adapter->dev, "CRQ send: rem_buffer_resp\n");
741*4882a593Smuzhiyun 	crq_msg.valid = 0x80;
742*4882a593Smuzhiyun 	crq_msg.type = VMC_MSG_REM_BUF_RESP;
743*4882a593Smuzhiyun 	crq_msg.status = status;
744*4882a593Smuzhiyun 	crq_msg.var1.rsvd = 0;
745*4882a593Smuzhiyun 	crq_msg.hmc_session = hmc_session;
746*4882a593Smuzhiyun 	crq_msg.hmc_index = hmc_index;
747*4882a593Smuzhiyun 	crq_msg.var2.buffer_id = cpu_to_be16(buffer_id);
748*4882a593Smuzhiyun 	crq_msg.rsvd = 0;
749*4882a593Smuzhiyun 	crq_msg.var3.rsvd = 0;
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
752*4882a593Smuzhiyun 			be64_to_cpu(crq_as_u64[1]));
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	return 0;
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun /**
758*4882a593Smuzhiyun  * ibmvmc_send_msg - Signal Message
759*4882a593Smuzhiyun  *
760*4882a593Smuzhiyun  * @adapter:	crq_server_adapter struct
761*4882a593Smuzhiyun  * @buffer:	ibmvmc_buffer struct
762*4882a593Smuzhiyun  * @hmc:	ibmvmc_hmc struct
763*4882a593Smuzhiyun  * @msg_len:	message length field
764*4882a593Smuzhiyun  *
765*4882a593Smuzhiyun  * This command is sent between the management partition and the hypervisor
766*4882a593Smuzhiyun  * in order to signal the arrival of an HMC protocol message. The command
767*4882a593Smuzhiyun  * can be sent by both the management partition and the hypervisor. It is
768*4882a593Smuzhiyun  * used for all traffic between the management application and the hypervisor,
769*4882a593Smuzhiyun  * regardless of who initiated the communication.
770*4882a593Smuzhiyun  *
771*4882a593Smuzhiyun  * There is no response to this message.
772*4882a593Smuzhiyun  *
773*4882a593Smuzhiyun  * Return:
774*4882a593Smuzhiyun  *	0 - Success
775*4882a593Smuzhiyun  *	Non-zero - Failure
776*4882a593Smuzhiyun  */
ibmvmc_send_msg(struct crq_server_adapter * adapter,struct ibmvmc_buffer * buffer,struct ibmvmc_hmc * hmc,int msg_len)777*4882a593Smuzhiyun static int ibmvmc_send_msg(struct crq_server_adapter *adapter,
778*4882a593Smuzhiyun 			   struct ibmvmc_buffer *buffer,
779*4882a593Smuzhiyun 			   struct ibmvmc_hmc *hmc, int msg_len)
780*4882a593Smuzhiyun {
781*4882a593Smuzhiyun 	struct ibmvmc_crq_msg crq_msg;
782*4882a593Smuzhiyun 	__be64 *crq_as_u64 = (__be64 *)&crq_msg;
783*4882a593Smuzhiyun 	int rc = 0;
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	dev_dbg(adapter->dev, "CRQ send: rdma to HV\n");
786*4882a593Smuzhiyun 	rc = h_copy_rdma(msg_len,
787*4882a593Smuzhiyun 			 adapter->liobn,
788*4882a593Smuzhiyun 			 buffer->dma_addr_local,
789*4882a593Smuzhiyun 			 adapter->riobn,
790*4882a593Smuzhiyun 			 buffer->dma_addr_remote);
791*4882a593Smuzhiyun 	if (rc) {
792*4882a593Smuzhiyun 		dev_err(adapter->dev, "Error in send_msg, h_copy_rdma rc 0x%x\n",
793*4882a593Smuzhiyun 			rc);
794*4882a593Smuzhiyun 		return rc;
795*4882a593Smuzhiyun 	}
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	crq_msg.valid = 0x80;
798*4882a593Smuzhiyun 	crq_msg.type = VMC_MSG_SIGNAL;
799*4882a593Smuzhiyun 	crq_msg.status = 0;
800*4882a593Smuzhiyun 	crq_msg.var1.rsvd = 0;
801*4882a593Smuzhiyun 	crq_msg.hmc_session = hmc->session;
802*4882a593Smuzhiyun 	crq_msg.hmc_index = hmc->index;
803*4882a593Smuzhiyun 	crq_msg.var2.buffer_id = cpu_to_be16(buffer->id);
804*4882a593Smuzhiyun 	crq_msg.var3.msg_len = cpu_to_be32(msg_len);
805*4882a593Smuzhiyun 	dev_dbg(adapter->dev, "CRQ send: msg to HV 0x%llx 0x%llx\n",
806*4882a593Smuzhiyun 		be64_to_cpu(crq_as_u64[0]), be64_to_cpu(crq_as_u64[1]));
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	buffer->owner = VMC_BUF_OWNER_HV;
809*4882a593Smuzhiyun 	ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
810*4882a593Smuzhiyun 			be64_to_cpu(crq_as_u64[1]));
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	return rc;
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun /**
816*4882a593Smuzhiyun  * ibmvmc_open - Open Session
817*4882a593Smuzhiyun  *
818*4882a593Smuzhiyun  * @inode:	inode struct
819*4882a593Smuzhiyun  * @file:	file struct
820*4882a593Smuzhiyun  *
821*4882a593Smuzhiyun  * Return:
822*4882a593Smuzhiyun  *	0 - Success
823*4882a593Smuzhiyun  *	Non-zero - Failure
824*4882a593Smuzhiyun  */
ibmvmc_open(struct inode * inode,struct file * file)825*4882a593Smuzhiyun static int ibmvmc_open(struct inode *inode, struct file *file)
826*4882a593Smuzhiyun {
827*4882a593Smuzhiyun 	struct ibmvmc_file_session *session;
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__,
830*4882a593Smuzhiyun 		 (unsigned long)inode, (unsigned long)file,
831*4882a593Smuzhiyun 		 ibmvmc.state);
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	session = kzalloc(sizeof(*session), GFP_KERNEL);
834*4882a593Smuzhiyun 	if (!session)
835*4882a593Smuzhiyun 		return -ENOMEM;
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	session->file = file;
838*4882a593Smuzhiyun 	file->private_data = session;
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	return 0;
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun /**
844*4882a593Smuzhiyun  * ibmvmc_close - Close Session
845*4882a593Smuzhiyun  *
846*4882a593Smuzhiyun  * @inode:	inode struct
847*4882a593Smuzhiyun  * @file:	file struct
848*4882a593Smuzhiyun  *
849*4882a593Smuzhiyun  * Return:
850*4882a593Smuzhiyun  *	0 - Success
851*4882a593Smuzhiyun  *	Non-zero - Failure
852*4882a593Smuzhiyun  */
ibmvmc_close(struct inode * inode,struct file * file)853*4882a593Smuzhiyun static int ibmvmc_close(struct inode *inode, struct file *file)
854*4882a593Smuzhiyun {
855*4882a593Smuzhiyun 	struct ibmvmc_file_session *session;
856*4882a593Smuzhiyun 	struct ibmvmc_hmc *hmc;
857*4882a593Smuzhiyun 	int rc = 0;
858*4882a593Smuzhiyun 	unsigned long flags;
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 	pr_debug("%s: file = 0x%lx, state = 0x%x\n", __func__,
861*4882a593Smuzhiyun 		 (unsigned long)file, ibmvmc.state);
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 	session = file->private_data;
864*4882a593Smuzhiyun 	if (!session)
865*4882a593Smuzhiyun 		return -EIO;
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	hmc = session->hmc;
868*4882a593Smuzhiyun 	if (hmc) {
869*4882a593Smuzhiyun 		if (!hmc->adapter)
870*4882a593Smuzhiyun 			return -EIO;
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 		if (ibmvmc.state == ibmvmc_state_failed) {
873*4882a593Smuzhiyun 			dev_warn(hmc->adapter->dev, "close: state_failed\n");
874*4882a593Smuzhiyun 			return -EIO;
875*4882a593Smuzhiyun 		}
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 		spin_lock_irqsave(&hmc->lock, flags);
878*4882a593Smuzhiyun 		if (hmc->state >= ibmhmc_state_opening) {
879*4882a593Smuzhiyun 			rc = ibmvmc_send_close(hmc);
880*4882a593Smuzhiyun 			if (rc)
881*4882a593Smuzhiyun 				dev_warn(hmc->adapter->dev, "close: send_close failed.\n");
882*4882a593Smuzhiyun 		}
883*4882a593Smuzhiyun 		spin_unlock_irqrestore(&hmc->lock, flags);
884*4882a593Smuzhiyun 	}
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 	kfree_sensitive(session);
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 	return rc;
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun /**
892*4882a593Smuzhiyun  * ibmvmc_read - Read
893*4882a593Smuzhiyun  *
894*4882a593Smuzhiyun  * @file:	file struct
895*4882a593Smuzhiyun  * @buf:	Character buffer
896*4882a593Smuzhiyun  * @nbytes:	Size in bytes
897*4882a593Smuzhiyun  * @ppos:	Offset
898*4882a593Smuzhiyun  *
899*4882a593Smuzhiyun  * Return:
900*4882a593Smuzhiyun  *	0 - Success
901*4882a593Smuzhiyun  *	Non-zero - Failure
902*4882a593Smuzhiyun  */
ibmvmc_read(struct file * file,char * buf,size_t nbytes,loff_t * ppos)903*4882a593Smuzhiyun static ssize_t ibmvmc_read(struct file *file, char *buf, size_t nbytes,
904*4882a593Smuzhiyun 			   loff_t *ppos)
905*4882a593Smuzhiyun {
906*4882a593Smuzhiyun 	struct ibmvmc_file_session *session;
907*4882a593Smuzhiyun 	struct ibmvmc_hmc *hmc;
908*4882a593Smuzhiyun 	struct crq_server_adapter *adapter;
909*4882a593Smuzhiyun 	struct ibmvmc_buffer *buffer;
910*4882a593Smuzhiyun 	ssize_t n;
911*4882a593Smuzhiyun 	ssize_t retval = 0;
912*4882a593Smuzhiyun 	unsigned long flags;
913*4882a593Smuzhiyun 	DEFINE_WAIT(wait);
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	pr_debug("ibmvmc: read: file = 0x%lx, buf = 0x%lx, nbytes = 0x%lx\n",
916*4882a593Smuzhiyun 		 (unsigned long)file, (unsigned long)buf,
917*4882a593Smuzhiyun 		 (unsigned long)nbytes);
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	if (nbytes == 0)
920*4882a593Smuzhiyun 		return 0;
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	if (nbytes > ibmvmc.max_mtu) {
923*4882a593Smuzhiyun 		pr_warn("ibmvmc: read: nbytes invalid 0x%x\n",
924*4882a593Smuzhiyun 			(unsigned int)nbytes);
925*4882a593Smuzhiyun 		return -EINVAL;
926*4882a593Smuzhiyun 	}
927*4882a593Smuzhiyun 
928*4882a593Smuzhiyun 	session = file->private_data;
929*4882a593Smuzhiyun 	if (!session) {
930*4882a593Smuzhiyun 		pr_warn("ibmvmc: read: no session\n");
931*4882a593Smuzhiyun 		return -EIO;
932*4882a593Smuzhiyun 	}
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun 	hmc = session->hmc;
935*4882a593Smuzhiyun 	if (!hmc) {
936*4882a593Smuzhiyun 		pr_warn("ibmvmc: read: no hmc\n");
937*4882a593Smuzhiyun 		return -EIO;
938*4882a593Smuzhiyun 	}
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 	adapter = hmc->adapter;
941*4882a593Smuzhiyun 	if (!adapter) {
942*4882a593Smuzhiyun 		pr_warn("ibmvmc: read: no adapter\n");
943*4882a593Smuzhiyun 		return -EIO;
944*4882a593Smuzhiyun 	}
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	do {
947*4882a593Smuzhiyun 		prepare_to_wait(&ibmvmc_read_wait, &wait, TASK_INTERRUPTIBLE);
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 		spin_lock_irqsave(&hmc->lock, flags);
950*4882a593Smuzhiyun 		if (hmc->queue_tail != hmc->queue_head)
951*4882a593Smuzhiyun 			/* Data is available */
952*4882a593Smuzhiyun 			break;
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 		spin_unlock_irqrestore(&hmc->lock, flags);
955*4882a593Smuzhiyun 
956*4882a593Smuzhiyun 		if (!session->valid) {
957*4882a593Smuzhiyun 			retval = -EBADFD;
958*4882a593Smuzhiyun 			goto out;
959*4882a593Smuzhiyun 		}
960*4882a593Smuzhiyun 		if (file->f_flags & O_NONBLOCK) {
961*4882a593Smuzhiyun 			retval = -EAGAIN;
962*4882a593Smuzhiyun 			goto out;
963*4882a593Smuzhiyun 		}
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 		schedule();
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 		if (signal_pending(current)) {
968*4882a593Smuzhiyun 			retval = -ERESTARTSYS;
969*4882a593Smuzhiyun 			goto out;
970*4882a593Smuzhiyun 		}
971*4882a593Smuzhiyun 	} while (1);
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun 	buffer = &(hmc->buffer[hmc->queue_outbound_msgs[hmc->queue_tail]]);
974*4882a593Smuzhiyun 	hmc->queue_tail++;
975*4882a593Smuzhiyun 	if (hmc->queue_tail == ibmvmc_max_buf_pool_size)
976*4882a593Smuzhiyun 		hmc->queue_tail = 0;
977*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hmc->lock, flags);
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun 	nbytes = min_t(size_t, nbytes, buffer->msg_len);
980*4882a593Smuzhiyun 	n = copy_to_user((void *)buf, buffer->real_addr_local, nbytes);
981*4882a593Smuzhiyun 	dev_dbg(adapter->dev, "read: copy to user nbytes = 0x%lx.\n", nbytes);
982*4882a593Smuzhiyun 	ibmvmc_free_hmc_buffer(hmc, buffer);
983*4882a593Smuzhiyun 	retval = nbytes;
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	if (n) {
986*4882a593Smuzhiyun 		dev_warn(adapter->dev, "read: copy to user failed.\n");
987*4882a593Smuzhiyun 		retval = -EFAULT;
988*4882a593Smuzhiyun 	}
989*4882a593Smuzhiyun 
990*4882a593Smuzhiyun  out:
991*4882a593Smuzhiyun 	finish_wait(&ibmvmc_read_wait, &wait);
992*4882a593Smuzhiyun 	dev_dbg(adapter->dev, "read: out %ld\n", retval);
993*4882a593Smuzhiyun 	return retval;
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun /**
997*4882a593Smuzhiyun  * ibmvmc_poll - Poll
998*4882a593Smuzhiyun  *
999*4882a593Smuzhiyun  * @file:	file struct
1000*4882a593Smuzhiyun  * @wait:	Poll Table
1001*4882a593Smuzhiyun  *
1002*4882a593Smuzhiyun  * Return:
1003*4882a593Smuzhiyun  *	poll.h return values
1004*4882a593Smuzhiyun  */
ibmvmc_poll(struct file * file,poll_table * wait)1005*4882a593Smuzhiyun static unsigned int ibmvmc_poll(struct file *file, poll_table *wait)
1006*4882a593Smuzhiyun {
1007*4882a593Smuzhiyun 	struct ibmvmc_file_session *session;
1008*4882a593Smuzhiyun 	struct ibmvmc_hmc *hmc;
1009*4882a593Smuzhiyun 	unsigned int mask = 0;
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	session = file->private_data;
1012*4882a593Smuzhiyun 	if (!session)
1013*4882a593Smuzhiyun 		return 0;
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 	hmc = session->hmc;
1016*4882a593Smuzhiyun 	if (!hmc)
1017*4882a593Smuzhiyun 		return 0;
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 	poll_wait(file, &ibmvmc_read_wait, wait);
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 	if (hmc->queue_head != hmc->queue_tail)
1022*4882a593Smuzhiyun 		mask |= POLLIN | POLLRDNORM;
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun 	return mask;
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun /**
1028*4882a593Smuzhiyun  * ibmvmc_write - Write
1029*4882a593Smuzhiyun  *
1030*4882a593Smuzhiyun  * @file:	file struct
1031*4882a593Smuzhiyun  * @buffer:	Character buffer
1032*4882a593Smuzhiyun  * @count:	Count field
1033*4882a593Smuzhiyun  * @ppos:	Offset
1034*4882a593Smuzhiyun  *
1035*4882a593Smuzhiyun  * Return:
1036*4882a593Smuzhiyun  *	0 - Success
1037*4882a593Smuzhiyun  *	Non-zero - Failure
1038*4882a593Smuzhiyun  */
ibmvmc_write(struct file * file,const char * buffer,size_t count,loff_t * ppos)1039*4882a593Smuzhiyun static ssize_t ibmvmc_write(struct file *file, const char *buffer,
1040*4882a593Smuzhiyun 			    size_t count, loff_t *ppos)
1041*4882a593Smuzhiyun {
1042*4882a593Smuzhiyun 	struct ibmvmc_buffer *vmc_buffer;
1043*4882a593Smuzhiyun 	struct ibmvmc_file_session *session;
1044*4882a593Smuzhiyun 	struct crq_server_adapter *adapter;
1045*4882a593Smuzhiyun 	struct ibmvmc_hmc *hmc;
1046*4882a593Smuzhiyun 	unsigned char *buf;
1047*4882a593Smuzhiyun 	unsigned long flags;
1048*4882a593Smuzhiyun 	size_t bytes;
1049*4882a593Smuzhiyun 	const char *p = buffer;
1050*4882a593Smuzhiyun 	size_t c = count;
1051*4882a593Smuzhiyun 	int ret = 0;
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun 	session = file->private_data;
1054*4882a593Smuzhiyun 	if (!session)
1055*4882a593Smuzhiyun 		return -EIO;
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 	hmc = session->hmc;
1058*4882a593Smuzhiyun 	if (!hmc)
1059*4882a593Smuzhiyun 		return -EIO;
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 	spin_lock_irqsave(&hmc->lock, flags);
1062*4882a593Smuzhiyun 	if (hmc->state == ibmhmc_state_free) {
1063*4882a593Smuzhiyun 		/* HMC connection is not valid (possibly was reset under us). */
1064*4882a593Smuzhiyun 		ret = -EIO;
1065*4882a593Smuzhiyun 		goto out;
1066*4882a593Smuzhiyun 	}
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun 	adapter = hmc->adapter;
1069*4882a593Smuzhiyun 	if (!adapter) {
1070*4882a593Smuzhiyun 		ret = -EIO;
1071*4882a593Smuzhiyun 		goto out;
1072*4882a593Smuzhiyun 	}
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun 	if (count > ibmvmc.max_mtu) {
1075*4882a593Smuzhiyun 		dev_warn(adapter->dev, "invalid buffer size 0x%lx\n",
1076*4882a593Smuzhiyun 			 (unsigned long)count);
1077*4882a593Smuzhiyun 		ret = -EIO;
1078*4882a593Smuzhiyun 		goto out;
1079*4882a593Smuzhiyun 	}
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 	/* Waiting for the open resp message to the ioctl(1) - retry */
1082*4882a593Smuzhiyun 	if (hmc->state == ibmhmc_state_opening) {
1083*4882a593Smuzhiyun 		ret = -EBUSY;
1084*4882a593Smuzhiyun 		goto out;
1085*4882a593Smuzhiyun 	}
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 	/* Make sure the ioctl() was called & the open msg sent, and that
1088*4882a593Smuzhiyun 	 * the HMC connection has not failed.
1089*4882a593Smuzhiyun 	 */
1090*4882a593Smuzhiyun 	if (hmc->state != ibmhmc_state_ready) {
1091*4882a593Smuzhiyun 		ret = -EIO;
1092*4882a593Smuzhiyun 		goto out;
1093*4882a593Smuzhiyun 	}
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun 	vmc_buffer = ibmvmc_get_valid_hmc_buffer(hmc->index);
1096*4882a593Smuzhiyun 	if (!vmc_buffer) {
1097*4882a593Smuzhiyun 		/* No buffer available for the msg send, or we have not yet
1098*4882a593Smuzhiyun 		 * completed the open/open_resp sequence.  Retry until this is
1099*4882a593Smuzhiyun 		 * complete.
1100*4882a593Smuzhiyun 		 */
1101*4882a593Smuzhiyun 		ret = -EBUSY;
1102*4882a593Smuzhiyun 		goto out;
1103*4882a593Smuzhiyun 	}
1104*4882a593Smuzhiyun 	if (!vmc_buffer->real_addr_local) {
1105*4882a593Smuzhiyun 		dev_err(adapter->dev, "no buffer storage assigned\n");
1106*4882a593Smuzhiyun 		ret = -EIO;
1107*4882a593Smuzhiyun 		goto out;
1108*4882a593Smuzhiyun 	}
1109*4882a593Smuzhiyun 	buf = vmc_buffer->real_addr_local;
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 	while (c > 0) {
1112*4882a593Smuzhiyun 		bytes = min_t(size_t, c, vmc_buffer->size);
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 		bytes -= copy_from_user(buf, p, bytes);
1115*4882a593Smuzhiyun 		if (!bytes) {
1116*4882a593Smuzhiyun 			ret = -EFAULT;
1117*4882a593Smuzhiyun 			goto out;
1118*4882a593Smuzhiyun 		}
1119*4882a593Smuzhiyun 		c -= bytes;
1120*4882a593Smuzhiyun 		p += bytes;
1121*4882a593Smuzhiyun 	}
1122*4882a593Smuzhiyun 	if (p == buffer)
1123*4882a593Smuzhiyun 		goto out;
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 	file->f_path.dentry->d_inode->i_mtime = current_time(file_inode(file));
1126*4882a593Smuzhiyun 	mark_inode_dirty(file->f_path.dentry->d_inode);
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun 	dev_dbg(adapter->dev, "write: file = 0x%lx, count = 0x%lx\n",
1129*4882a593Smuzhiyun 		(unsigned long)file, (unsigned long)count);
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 	ibmvmc_send_msg(adapter, vmc_buffer, hmc, count);
1132*4882a593Smuzhiyun 	ret = p - buffer;
1133*4882a593Smuzhiyun  out:
1134*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hmc->lock, flags);
1135*4882a593Smuzhiyun 	return (ssize_t)(ret);
1136*4882a593Smuzhiyun }
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun /**
1139*4882a593Smuzhiyun  * ibmvmc_setup_hmc - Setup the HMC
1140*4882a593Smuzhiyun  *
1141*4882a593Smuzhiyun  * @session:	ibmvmc_file_session struct
1142*4882a593Smuzhiyun  *
1143*4882a593Smuzhiyun  * Return:
1144*4882a593Smuzhiyun  *	0 - Success
1145*4882a593Smuzhiyun  *	Non-zero - Failure
1146*4882a593Smuzhiyun  */
ibmvmc_setup_hmc(struct ibmvmc_file_session * session)1147*4882a593Smuzhiyun static long ibmvmc_setup_hmc(struct ibmvmc_file_session *session)
1148*4882a593Smuzhiyun {
1149*4882a593Smuzhiyun 	struct ibmvmc_hmc *hmc;
1150*4882a593Smuzhiyun 	unsigned int valid, free, index;
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun 	if (ibmvmc.state == ibmvmc_state_failed) {
1153*4882a593Smuzhiyun 		pr_warn("ibmvmc: Reserve HMC: state_failed\n");
1154*4882a593Smuzhiyun 		return -EIO;
1155*4882a593Smuzhiyun 	}
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 	if (ibmvmc.state < ibmvmc_state_ready) {
1158*4882a593Smuzhiyun 		pr_warn("ibmvmc: Reserve HMC: not state_ready\n");
1159*4882a593Smuzhiyun 		return -EAGAIN;
1160*4882a593Smuzhiyun 	}
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 	/* Device is busy until capabilities have been exchanged and we
1163*4882a593Smuzhiyun 	 * have a generic buffer for each possible HMC connection.
1164*4882a593Smuzhiyun 	 */
1165*4882a593Smuzhiyun 	for (index = 0; index <= ibmvmc.max_hmc_index; index++) {
1166*4882a593Smuzhiyun 		valid = 0;
1167*4882a593Smuzhiyun 		ibmvmc_count_hmc_buffers(index, &valid, &free);
1168*4882a593Smuzhiyun 		if (valid == 0) {
1169*4882a593Smuzhiyun 			pr_warn("ibmvmc: buffers not ready for index %d\n",
1170*4882a593Smuzhiyun 				index);
1171*4882a593Smuzhiyun 			return -ENOBUFS;
1172*4882a593Smuzhiyun 		}
1173*4882a593Smuzhiyun 	}
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun 	/* Get an hmc object, and transition to ibmhmc_state_initial */
1176*4882a593Smuzhiyun 	hmc = ibmvmc_get_free_hmc();
1177*4882a593Smuzhiyun 	if (!hmc) {
1178*4882a593Smuzhiyun 		pr_warn("%s: free hmc not found\n", __func__);
1179*4882a593Smuzhiyun 		return -EBUSY;
1180*4882a593Smuzhiyun 	}
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun 	hmc->session = hmc->session + 1;
1183*4882a593Smuzhiyun 	if (hmc->session == 0xff)
1184*4882a593Smuzhiyun 		hmc->session = 1;
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun 	session->hmc = hmc;
1187*4882a593Smuzhiyun 	hmc->adapter = &ibmvmc_adapter;
1188*4882a593Smuzhiyun 	hmc->file_session = session;
1189*4882a593Smuzhiyun 	session->valid = 1;
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun 	return 0;
1192*4882a593Smuzhiyun }
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun /**
1195*4882a593Smuzhiyun  * ibmvmc_ioctl_sethmcid - IOCTL Set HMC ID
1196*4882a593Smuzhiyun  *
1197*4882a593Smuzhiyun  * @session:	ibmvmc_file_session struct
1198*4882a593Smuzhiyun  * @new_hmc_id:	HMC id field
1199*4882a593Smuzhiyun  *
1200*4882a593Smuzhiyun  * IOCTL command to setup the hmc id
1201*4882a593Smuzhiyun  *
1202*4882a593Smuzhiyun  * Return:
1203*4882a593Smuzhiyun  *	0 - Success
1204*4882a593Smuzhiyun  *	Non-zero - Failure
1205*4882a593Smuzhiyun  */
ibmvmc_ioctl_sethmcid(struct ibmvmc_file_session * session,unsigned char __user * new_hmc_id)1206*4882a593Smuzhiyun static long ibmvmc_ioctl_sethmcid(struct ibmvmc_file_session *session,
1207*4882a593Smuzhiyun 				  unsigned char __user *new_hmc_id)
1208*4882a593Smuzhiyun {
1209*4882a593Smuzhiyun 	struct ibmvmc_hmc *hmc;
1210*4882a593Smuzhiyun 	struct ibmvmc_buffer *buffer;
1211*4882a593Smuzhiyun 	size_t bytes;
1212*4882a593Smuzhiyun 	char print_buffer[HMC_ID_LEN + 1];
1213*4882a593Smuzhiyun 	unsigned long flags;
1214*4882a593Smuzhiyun 	long rc = 0;
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 	/* Reserve HMC session */
1217*4882a593Smuzhiyun 	hmc = session->hmc;
1218*4882a593Smuzhiyun 	if (!hmc) {
1219*4882a593Smuzhiyun 		rc = ibmvmc_setup_hmc(session);
1220*4882a593Smuzhiyun 		if (rc)
1221*4882a593Smuzhiyun 			return rc;
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun 		hmc = session->hmc;
1224*4882a593Smuzhiyun 		if (!hmc) {
1225*4882a593Smuzhiyun 			pr_err("ibmvmc: setup_hmc success but no hmc\n");
1226*4882a593Smuzhiyun 			return -EIO;
1227*4882a593Smuzhiyun 		}
1228*4882a593Smuzhiyun 	}
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 	if (hmc->state != ibmhmc_state_initial) {
1231*4882a593Smuzhiyun 		pr_warn("ibmvmc: sethmcid: invalid state to send open 0x%x\n",
1232*4882a593Smuzhiyun 			hmc->state);
1233*4882a593Smuzhiyun 		return -EIO;
1234*4882a593Smuzhiyun 	}
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 	bytes = copy_from_user(hmc->hmc_id, new_hmc_id, HMC_ID_LEN);
1237*4882a593Smuzhiyun 	if (bytes)
1238*4882a593Smuzhiyun 		return -EFAULT;
1239*4882a593Smuzhiyun 
1240*4882a593Smuzhiyun 	/* Send Open Session command */
1241*4882a593Smuzhiyun 	spin_lock_irqsave(&hmc->lock, flags);
1242*4882a593Smuzhiyun 	buffer = ibmvmc_get_valid_hmc_buffer(hmc->index);
1243*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hmc->lock, flags);
1244*4882a593Smuzhiyun 
1245*4882a593Smuzhiyun 	if (!buffer || !buffer->real_addr_local) {
1246*4882a593Smuzhiyun 		pr_warn("ibmvmc: sethmcid: no buffer available\n");
1247*4882a593Smuzhiyun 		return -EIO;
1248*4882a593Smuzhiyun 	}
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun 	/* Make sure buffer is NULL terminated before trying to print it */
1251*4882a593Smuzhiyun 	memset(print_buffer, 0, HMC_ID_LEN + 1);
1252*4882a593Smuzhiyun 	strncpy(print_buffer, hmc->hmc_id, HMC_ID_LEN);
1253*4882a593Smuzhiyun 	pr_info("ibmvmc: sethmcid: Set HMC ID: \"%s\"\n", print_buffer);
1254*4882a593Smuzhiyun 
1255*4882a593Smuzhiyun 	memcpy(buffer->real_addr_local, hmc->hmc_id, HMC_ID_LEN);
1256*4882a593Smuzhiyun 	/* RDMA over ID, send open msg, change state to ibmhmc_state_opening */
1257*4882a593Smuzhiyun 	rc = ibmvmc_send_open(buffer, hmc);
1258*4882a593Smuzhiyun 
1259*4882a593Smuzhiyun 	return rc;
1260*4882a593Smuzhiyun }
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun /**
1263*4882a593Smuzhiyun  * ibmvmc_ioctl_query - IOCTL Query
1264*4882a593Smuzhiyun  *
1265*4882a593Smuzhiyun  * @session:	ibmvmc_file_session struct
1266*4882a593Smuzhiyun  * @ret_struct:	ibmvmc_query_struct
1267*4882a593Smuzhiyun  *
1268*4882a593Smuzhiyun  * Return:
1269*4882a593Smuzhiyun  *	0 - Success
1270*4882a593Smuzhiyun  *	Non-zero - Failure
1271*4882a593Smuzhiyun  */
ibmvmc_ioctl_query(struct ibmvmc_file_session * session,struct ibmvmc_query_struct __user * ret_struct)1272*4882a593Smuzhiyun static long ibmvmc_ioctl_query(struct ibmvmc_file_session *session,
1273*4882a593Smuzhiyun 			       struct ibmvmc_query_struct __user *ret_struct)
1274*4882a593Smuzhiyun {
1275*4882a593Smuzhiyun 	struct ibmvmc_query_struct query_struct;
1276*4882a593Smuzhiyun 	size_t bytes;
1277*4882a593Smuzhiyun 
1278*4882a593Smuzhiyun 	memset(&query_struct, 0, sizeof(query_struct));
1279*4882a593Smuzhiyun 	query_struct.have_vmc = (ibmvmc.state > ibmvmc_state_initial);
1280*4882a593Smuzhiyun 	query_struct.state = ibmvmc.state;
1281*4882a593Smuzhiyun 	query_struct.vmc_drc_index = ibmvmc.vmc_drc_index;
1282*4882a593Smuzhiyun 
1283*4882a593Smuzhiyun 	bytes = copy_to_user(ret_struct, &query_struct,
1284*4882a593Smuzhiyun 			     sizeof(query_struct));
1285*4882a593Smuzhiyun 	if (bytes)
1286*4882a593Smuzhiyun 		return -EFAULT;
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 	return 0;
1289*4882a593Smuzhiyun }
1290*4882a593Smuzhiyun 
1291*4882a593Smuzhiyun /**
1292*4882a593Smuzhiyun  * ibmvmc_ioctl_requestvmc - IOCTL Request VMC
1293*4882a593Smuzhiyun  *
1294*4882a593Smuzhiyun  * @session:	ibmvmc_file_session struct
1295*4882a593Smuzhiyun  * @ret_vmc_index:	VMC Index
1296*4882a593Smuzhiyun  *
1297*4882a593Smuzhiyun  * Return:
1298*4882a593Smuzhiyun  *	0 - Success
1299*4882a593Smuzhiyun  *	Non-zero - Failure
1300*4882a593Smuzhiyun  */
ibmvmc_ioctl_requestvmc(struct ibmvmc_file_session * session,u32 __user * ret_vmc_index)1301*4882a593Smuzhiyun static long ibmvmc_ioctl_requestvmc(struct ibmvmc_file_session *session,
1302*4882a593Smuzhiyun 				    u32 __user *ret_vmc_index)
1303*4882a593Smuzhiyun {
1304*4882a593Smuzhiyun 	/* TODO: (adreznec) Add locking to control multiple process access */
1305*4882a593Smuzhiyun 	size_t bytes;
1306*4882a593Smuzhiyun 	long rc;
1307*4882a593Smuzhiyun 	u32 vmc_drc_index;
1308*4882a593Smuzhiyun 
1309*4882a593Smuzhiyun 	/* Call to request the VMC device from phyp*/
1310*4882a593Smuzhiyun 	rc = h_request_vmc(&vmc_drc_index);
1311*4882a593Smuzhiyun 	pr_debug("ibmvmc: requestvmc: H_REQUEST_VMC rc = 0x%lx\n", rc);
1312*4882a593Smuzhiyun 
1313*4882a593Smuzhiyun 	if (rc == H_SUCCESS) {
1314*4882a593Smuzhiyun 		rc = 0;
1315*4882a593Smuzhiyun 	} else if (rc == H_FUNCTION) {
1316*4882a593Smuzhiyun 		pr_err("ibmvmc: requestvmc: h_request_vmc not supported\n");
1317*4882a593Smuzhiyun 		return -EPERM;
1318*4882a593Smuzhiyun 	} else if (rc == H_AUTHORITY) {
1319*4882a593Smuzhiyun 		pr_err("ibmvmc: requestvmc: hypervisor denied vmc request\n");
1320*4882a593Smuzhiyun 		return -EPERM;
1321*4882a593Smuzhiyun 	} else if (rc == H_HARDWARE) {
1322*4882a593Smuzhiyun 		pr_err("ibmvmc: requestvmc: hypervisor hardware fault\n");
1323*4882a593Smuzhiyun 		return -EIO;
1324*4882a593Smuzhiyun 	} else if (rc == H_RESOURCE) {
1325*4882a593Smuzhiyun 		pr_err("ibmvmc: requestvmc: vmc resource unavailable\n");
1326*4882a593Smuzhiyun 		return -ENODEV;
1327*4882a593Smuzhiyun 	} else if (rc == H_NOT_AVAILABLE) {
1328*4882a593Smuzhiyun 		pr_err("ibmvmc: requestvmc: system cannot be vmc managed\n");
1329*4882a593Smuzhiyun 		return -EPERM;
1330*4882a593Smuzhiyun 	} else if (rc == H_PARAMETER) {
1331*4882a593Smuzhiyun 		pr_err("ibmvmc: requestvmc: invalid parameter\n");
1332*4882a593Smuzhiyun 		return -EINVAL;
1333*4882a593Smuzhiyun 	}
1334*4882a593Smuzhiyun 
1335*4882a593Smuzhiyun 	/* Success, set the vmc index in global struct */
1336*4882a593Smuzhiyun 	ibmvmc.vmc_drc_index = vmc_drc_index;
1337*4882a593Smuzhiyun 
1338*4882a593Smuzhiyun 	bytes = copy_to_user(ret_vmc_index, &vmc_drc_index,
1339*4882a593Smuzhiyun 			     sizeof(*ret_vmc_index));
1340*4882a593Smuzhiyun 	if (bytes) {
1341*4882a593Smuzhiyun 		pr_warn("ibmvmc: requestvmc: copy to user failed.\n");
1342*4882a593Smuzhiyun 		return -EFAULT;
1343*4882a593Smuzhiyun 	}
1344*4882a593Smuzhiyun 	return rc;
1345*4882a593Smuzhiyun }
1346*4882a593Smuzhiyun 
1347*4882a593Smuzhiyun /**
1348*4882a593Smuzhiyun  * ibmvmc_ioctl - IOCTL
1349*4882a593Smuzhiyun  *
1350*4882a593Smuzhiyun  * @file:	file information
1351*4882a593Smuzhiyun  * @cmd:	cmd field
1352*4882a593Smuzhiyun  * @arg:	Argument field
1353*4882a593Smuzhiyun  *
1354*4882a593Smuzhiyun  * Return:
1355*4882a593Smuzhiyun  *	0 - Success
1356*4882a593Smuzhiyun  *	Non-zero - Failure
1357*4882a593Smuzhiyun  */
ibmvmc_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1358*4882a593Smuzhiyun static long ibmvmc_ioctl(struct file *file,
1359*4882a593Smuzhiyun 			 unsigned int cmd, unsigned long arg)
1360*4882a593Smuzhiyun {
1361*4882a593Smuzhiyun 	struct ibmvmc_file_session *session = file->private_data;
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun 	pr_debug("ibmvmc: ioctl file=0x%lx, cmd=0x%x, arg=0x%lx, ses=0x%lx\n",
1364*4882a593Smuzhiyun 		 (unsigned long)file, cmd, arg,
1365*4882a593Smuzhiyun 		 (unsigned long)session);
1366*4882a593Smuzhiyun 
1367*4882a593Smuzhiyun 	if (!session) {
1368*4882a593Smuzhiyun 		pr_warn("ibmvmc: ioctl: no session\n");
1369*4882a593Smuzhiyun 		return -EIO;
1370*4882a593Smuzhiyun 	}
1371*4882a593Smuzhiyun 
1372*4882a593Smuzhiyun 	switch (cmd) {
1373*4882a593Smuzhiyun 	case VMC_IOCTL_SETHMCID:
1374*4882a593Smuzhiyun 		return ibmvmc_ioctl_sethmcid(session,
1375*4882a593Smuzhiyun 			(unsigned char __user *)arg);
1376*4882a593Smuzhiyun 	case VMC_IOCTL_QUERY:
1377*4882a593Smuzhiyun 		return ibmvmc_ioctl_query(session,
1378*4882a593Smuzhiyun 			(struct ibmvmc_query_struct __user *)arg);
1379*4882a593Smuzhiyun 	case VMC_IOCTL_REQUESTVMC:
1380*4882a593Smuzhiyun 		return ibmvmc_ioctl_requestvmc(session,
1381*4882a593Smuzhiyun 			(unsigned int __user *)arg);
1382*4882a593Smuzhiyun 	default:
1383*4882a593Smuzhiyun 		pr_warn("ibmvmc: unknown ioctl 0x%x\n", cmd);
1384*4882a593Smuzhiyun 		return -EINVAL;
1385*4882a593Smuzhiyun 	}
1386*4882a593Smuzhiyun }
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun static const struct file_operations ibmvmc_fops = {
1389*4882a593Smuzhiyun 	.owner		= THIS_MODULE,
1390*4882a593Smuzhiyun 	.read		= ibmvmc_read,
1391*4882a593Smuzhiyun 	.write		= ibmvmc_write,
1392*4882a593Smuzhiyun 	.poll		= ibmvmc_poll,
1393*4882a593Smuzhiyun 	.unlocked_ioctl	= ibmvmc_ioctl,
1394*4882a593Smuzhiyun 	.open           = ibmvmc_open,
1395*4882a593Smuzhiyun 	.release        = ibmvmc_close,
1396*4882a593Smuzhiyun };
1397*4882a593Smuzhiyun 
1398*4882a593Smuzhiyun /**
1399*4882a593Smuzhiyun  * ibmvmc_add_buffer - Add Buffer
1400*4882a593Smuzhiyun  *
1401*4882a593Smuzhiyun  * @adapter: crq_server_adapter struct
1402*4882a593Smuzhiyun  * @crq:	ibmvmc_crq_msg struct
1403*4882a593Smuzhiyun  *
1404*4882a593Smuzhiyun  * This message transfers a buffer from hypervisor ownership to management
1405*4882a593Smuzhiyun  * partition ownership. The LIOBA is obtained from the virtual TCE table
1406*4882a593Smuzhiyun  * associated with the hypervisor side of the VMC device, and points to a
1407*4882a593Smuzhiyun  * buffer of size MTU (as established in the capabilities exchange).
1408*4882a593Smuzhiyun  *
1409*4882a593Smuzhiyun  * Typical flow for ading buffers:
1410*4882a593Smuzhiyun  * 1. A new management application connection is opened by the management
1411*4882a593Smuzhiyun  *	partition.
1412*4882a593Smuzhiyun  * 2. The hypervisor assigns new buffers for the traffic associated with
1413*4882a593Smuzhiyun  *	that connection.
1414*4882a593Smuzhiyun  * 3. The hypervisor sends VMC Add Buffer messages to the management
1415*4882a593Smuzhiyun  *	partition, informing it of the new buffers.
1416*4882a593Smuzhiyun  * 4. The hypervisor sends an HMC protocol message (to the management
1417*4882a593Smuzhiyun  *	application) notifying it of the new buffers. This informs the
1418*4882a593Smuzhiyun  *	application that it has buffers available for sending HMC
1419*4882a593Smuzhiyun  *	commands.
1420*4882a593Smuzhiyun  *
1421*4882a593Smuzhiyun  * Return:
1422*4882a593Smuzhiyun  *	0 - Success
1423*4882a593Smuzhiyun  *	Non-zero - Failure
1424*4882a593Smuzhiyun  */
ibmvmc_add_buffer(struct crq_server_adapter * adapter,struct ibmvmc_crq_msg * crq)1425*4882a593Smuzhiyun static int ibmvmc_add_buffer(struct crq_server_adapter *adapter,
1426*4882a593Smuzhiyun 			     struct ibmvmc_crq_msg *crq)
1427*4882a593Smuzhiyun {
1428*4882a593Smuzhiyun 	struct ibmvmc_buffer *buffer;
1429*4882a593Smuzhiyun 	u8 hmc_index;
1430*4882a593Smuzhiyun 	u8 hmc_session;
1431*4882a593Smuzhiyun 	u16 buffer_id;
1432*4882a593Smuzhiyun 	unsigned long flags;
1433*4882a593Smuzhiyun 	int rc = 0;
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun 	if (!crq)
1436*4882a593Smuzhiyun 		return -1;
1437*4882a593Smuzhiyun 
1438*4882a593Smuzhiyun 	hmc_session = crq->hmc_session;
1439*4882a593Smuzhiyun 	hmc_index = crq->hmc_index;
1440*4882a593Smuzhiyun 	buffer_id = be16_to_cpu(crq->var2.buffer_id);
1441*4882a593Smuzhiyun 
1442*4882a593Smuzhiyun 	if (hmc_index > ibmvmc.max_hmc_index) {
1443*4882a593Smuzhiyun 		dev_err(adapter->dev, "add_buffer: invalid hmc_index = 0x%x\n",
1444*4882a593Smuzhiyun 			hmc_index);
1445*4882a593Smuzhiyun 		ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
1446*4882a593Smuzhiyun 					    hmc_session, hmc_index, buffer_id);
1447*4882a593Smuzhiyun 		return -1;
1448*4882a593Smuzhiyun 	}
1449*4882a593Smuzhiyun 
1450*4882a593Smuzhiyun 	if (buffer_id >= ibmvmc.max_buffer_pool_size) {
1451*4882a593Smuzhiyun 		dev_err(adapter->dev, "add_buffer: invalid buffer_id = 0x%x\n",
1452*4882a593Smuzhiyun 			buffer_id);
1453*4882a593Smuzhiyun 		ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
1454*4882a593Smuzhiyun 					    hmc_session, hmc_index, buffer_id);
1455*4882a593Smuzhiyun 		return -1;
1456*4882a593Smuzhiyun 	}
1457*4882a593Smuzhiyun 
1458*4882a593Smuzhiyun 	spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
1459*4882a593Smuzhiyun 	buffer = &hmcs[hmc_index].buffer[buffer_id];
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun 	if (buffer->real_addr_local || buffer->dma_addr_local) {
1462*4882a593Smuzhiyun 		dev_warn(adapter->dev, "add_buffer: already allocated id = 0x%lx\n",
1463*4882a593Smuzhiyun 			 (unsigned long)buffer_id);
1464*4882a593Smuzhiyun 		spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1465*4882a593Smuzhiyun 		ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
1466*4882a593Smuzhiyun 					    hmc_session, hmc_index, buffer_id);
1467*4882a593Smuzhiyun 		return -1;
1468*4882a593Smuzhiyun 	}
1469*4882a593Smuzhiyun 
1470*4882a593Smuzhiyun 	buffer->real_addr_local = alloc_dma_buffer(to_vio_dev(adapter->dev),
1471*4882a593Smuzhiyun 						   ibmvmc.max_mtu,
1472*4882a593Smuzhiyun 						   &buffer->dma_addr_local);
1473*4882a593Smuzhiyun 
1474*4882a593Smuzhiyun 	if (!buffer->real_addr_local) {
1475*4882a593Smuzhiyun 		dev_err(adapter->dev, "add_buffer: alloc_dma_buffer failed.\n");
1476*4882a593Smuzhiyun 		spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1477*4882a593Smuzhiyun 		ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INTERFACE_FAILURE,
1478*4882a593Smuzhiyun 					    hmc_session, hmc_index, buffer_id);
1479*4882a593Smuzhiyun 		return -1;
1480*4882a593Smuzhiyun 	}
1481*4882a593Smuzhiyun 
1482*4882a593Smuzhiyun 	buffer->dma_addr_remote = be32_to_cpu(crq->var3.lioba);
1483*4882a593Smuzhiyun 	buffer->size = ibmvmc.max_mtu;
1484*4882a593Smuzhiyun 	buffer->owner = crq->var1.owner;
1485*4882a593Smuzhiyun 	buffer->free = 1;
1486*4882a593Smuzhiyun 	/* Must ensure valid==1 is observable only after all other fields are */
1487*4882a593Smuzhiyun 	dma_wmb();
1488*4882a593Smuzhiyun 	buffer->valid = 1;
1489*4882a593Smuzhiyun 	buffer->id = buffer_id;
1490*4882a593Smuzhiyun 
1491*4882a593Smuzhiyun 	dev_dbg(adapter->dev, "add_buffer: successfully added a buffer:\n");
1492*4882a593Smuzhiyun 	dev_dbg(adapter->dev, "   index: %d, session: %d, buffer: 0x%x, owner: %d\n",
1493*4882a593Smuzhiyun 		hmc_index, hmc_session, buffer_id, buffer->owner);
1494*4882a593Smuzhiyun 	dev_dbg(adapter->dev, "   local: 0x%x, remote: 0x%x\n",
1495*4882a593Smuzhiyun 		(u32)buffer->dma_addr_local,
1496*4882a593Smuzhiyun 		(u32)buffer->dma_addr_remote);
1497*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1498*4882a593Smuzhiyun 
1499*4882a593Smuzhiyun 	ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session,
1500*4882a593Smuzhiyun 				    hmc_index, buffer_id);
1501*4882a593Smuzhiyun 
1502*4882a593Smuzhiyun 	return rc;
1503*4882a593Smuzhiyun }
1504*4882a593Smuzhiyun 
1505*4882a593Smuzhiyun /**
1506*4882a593Smuzhiyun  * ibmvmc_rem_buffer - Remove Buffer
1507*4882a593Smuzhiyun  *
1508*4882a593Smuzhiyun  * @adapter: crq_server_adapter struct
1509*4882a593Smuzhiyun  * @crq:	ibmvmc_crq_msg struct
1510*4882a593Smuzhiyun  *
1511*4882a593Smuzhiyun  * This message requests an HMC buffer to be transferred from management
1512*4882a593Smuzhiyun  * partition ownership to hypervisor ownership. The management partition may
1513*4882a593Smuzhiyun  * not be able to satisfy the request at a particular point in time if all its
1514*4882a593Smuzhiyun  * buffers are in use. The management partition requires a depth of at least
1515*4882a593Smuzhiyun  * one inbound buffer to allow management application commands to flow to the
1516*4882a593Smuzhiyun  * hypervisor. It is, therefore, an interface error for the hypervisor to
1517*4882a593Smuzhiyun  * attempt to remove the management partition's last buffer.
1518*4882a593Smuzhiyun  *
1519*4882a593Smuzhiyun  * The hypervisor is expected to manage buffer usage with the management
1520*4882a593Smuzhiyun  * application directly and inform the management partition when buffers may be
1521*4882a593Smuzhiyun  * removed. The typical flow for removing buffers:
1522*4882a593Smuzhiyun  *
1523*4882a593Smuzhiyun  * 1. The management application no longer needs a communication path to a
1524*4882a593Smuzhiyun  *	particular hypervisor function. That function is closed.
1525*4882a593Smuzhiyun  * 2. The hypervisor and the management application quiesce all traffic to that
1526*4882a593Smuzhiyun  *	function. The hypervisor requests a reduction in buffer pool size.
1527*4882a593Smuzhiyun  * 3. The management application acknowledges the reduction in buffer pool size.
1528*4882a593Smuzhiyun  * 4. The hypervisor sends a Remove Buffer message to the management partition,
1529*4882a593Smuzhiyun  *	informing it of the reduction in buffers.
1530*4882a593Smuzhiyun  * 5. The management partition verifies it can remove the buffer. This is
1531*4882a593Smuzhiyun  *	possible if buffers have been quiesced.
1532*4882a593Smuzhiyun  *
1533*4882a593Smuzhiyun  * Return:
1534*4882a593Smuzhiyun  *	0 - Success
1535*4882a593Smuzhiyun  *	Non-zero - Failure
1536*4882a593Smuzhiyun  */
1537*4882a593Smuzhiyun /*
1538*4882a593Smuzhiyun  * The hypervisor requested that we pick an unused buffer, and return it.
1539*4882a593Smuzhiyun  * Before sending the buffer back, we free any storage associated with the
1540*4882a593Smuzhiyun  * buffer.
1541*4882a593Smuzhiyun  */
ibmvmc_rem_buffer(struct crq_server_adapter * adapter,struct ibmvmc_crq_msg * crq)1542*4882a593Smuzhiyun static int ibmvmc_rem_buffer(struct crq_server_adapter *adapter,
1543*4882a593Smuzhiyun 			     struct ibmvmc_crq_msg *crq)
1544*4882a593Smuzhiyun {
1545*4882a593Smuzhiyun 	struct ibmvmc_buffer *buffer;
1546*4882a593Smuzhiyun 	u8 hmc_index;
1547*4882a593Smuzhiyun 	u8 hmc_session;
1548*4882a593Smuzhiyun 	u16 buffer_id = 0;
1549*4882a593Smuzhiyun 	unsigned long flags;
1550*4882a593Smuzhiyun 	int rc = 0;
1551*4882a593Smuzhiyun 
1552*4882a593Smuzhiyun 	if (!crq)
1553*4882a593Smuzhiyun 		return -1;
1554*4882a593Smuzhiyun 
1555*4882a593Smuzhiyun 	hmc_session = crq->hmc_session;
1556*4882a593Smuzhiyun 	hmc_index = crq->hmc_index;
1557*4882a593Smuzhiyun 
1558*4882a593Smuzhiyun 	if (hmc_index > ibmvmc.max_hmc_index) {
1559*4882a593Smuzhiyun 		dev_warn(adapter->dev, "rem_buffer: invalid hmc_index = 0x%x\n",
1560*4882a593Smuzhiyun 			 hmc_index);
1561*4882a593Smuzhiyun 		ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
1562*4882a593Smuzhiyun 					    hmc_session, hmc_index, buffer_id);
1563*4882a593Smuzhiyun 		return -1;
1564*4882a593Smuzhiyun 	}
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun 	spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
1567*4882a593Smuzhiyun 	buffer = ibmvmc_get_free_hmc_buffer(adapter, hmc_index);
1568*4882a593Smuzhiyun 	if (!buffer) {
1569*4882a593Smuzhiyun 		dev_info(adapter->dev, "rem_buffer: no buffer to remove\n");
1570*4882a593Smuzhiyun 		spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1571*4882a593Smuzhiyun 		ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_NO_BUFFER,
1572*4882a593Smuzhiyun 					    hmc_session, hmc_index,
1573*4882a593Smuzhiyun 					    VMC_INVALID_BUFFER_ID);
1574*4882a593Smuzhiyun 		return -1;
1575*4882a593Smuzhiyun 	}
1576*4882a593Smuzhiyun 
1577*4882a593Smuzhiyun 	buffer_id = buffer->id;
1578*4882a593Smuzhiyun 
1579*4882a593Smuzhiyun 	if (buffer->valid)
1580*4882a593Smuzhiyun 		free_dma_buffer(to_vio_dev(adapter->dev),
1581*4882a593Smuzhiyun 				ibmvmc.max_mtu,
1582*4882a593Smuzhiyun 				buffer->real_addr_local,
1583*4882a593Smuzhiyun 				buffer->dma_addr_local);
1584*4882a593Smuzhiyun 
1585*4882a593Smuzhiyun 	memset(buffer, 0, sizeof(struct ibmvmc_buffer));
1586*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1587*4882a593Smuzhiyun 
1588*4882a593Smuzhiyun 	dev_dbg(adapter->dev, "rem_buffer: removed buffer 0x%x.\n", buffer_id);
1589*4882a593Smuzhiyun 	ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session,
1590*4882a593Smuzhiyun 				    hmc_index, buffer_id);
1591*4882a593Smuzhiyun 
1592*4882a593Smuzhiyun 	return rc;
1593*4882a593Smuzhiyun }
1594*4882a593Smuzhiyun 
ibmvmc_recv_msg(struct crq_server_adapter * adapter,struct ibmvmc_crq_msg * crq)1595*4882a593Smuzhiyun static int ibmvmc_recv_msg(struct crq_server_adapter *adapter,
1596*4882a593Smuzhiyun 			   struct ibmvmc_crq_msg *crq)
1597*4882a593Smuzhiyun {
1598*4882a593Smuzhiyun 	struct ibmvmc_buffer *buffer;
1599*4882a593Smuzhiyun 	struct ibmvmc_hmc *hmc;
1600*4882a593Smuzhiyun 	unsigned long msg_len;
1601*4882a593Smuzhiyun 	u8 hmc_index;
1602*4882a593Smuzhiyun 	u8 hmc_session;
1603*4882a593Smuzhiyun 	u16 buffer_id;
1604*4882a593Smuzhiyun 	unsigned long flags;
1605*4882a593Smuzhiyun 	int rc = 0;
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 	if (!crq)
1608*4882a593Smuzhiyun 		return -1;
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun 	/* Hypervisor writes CRQs directly into our memory in big endian */
1611*4882a593Smuzhiyun 	dev_dbg(adapter->dev, "Recv_msg: msg from HV 0x%016llx 0x%016llx\n",
1612*4882a593Smuzhiyun 		be64_to_cpu(*((unsigned long *)crq)),
1613*4882a593Smuzhiyun 		be64_to_cpu(*(((unsigned long *)crq) + 1)));
1614*4882a593Smuzhiyun 
1615*4882a593Smuzhiyun 	hmc_session = crq->hmc_session;
1616*4882a593Smuzhiyun 	hmc_index = crq->hmc_index;
1617*4882a593Smuzhiyun 	buffer_id = be16_to_cpu(crq->var2.buffer_id);
1618*4882a593Smuzhiyun 	msg_len = be32_to_cpu(crq->var3.msg_len);
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun 	if (hmc_index > ibmvmc.max_hmc_index) {
1621*4882a593Smuzhiyun 		dev_err(adapter->dev, "Recv_msg: invalid hmc_index = 0x%x\n",
1622*4882a593Smuzhiyun 			hmc_index);
1623*4882a593Smuzhiyun 		ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
1624*4882a593Smuzhiyun 					    hmc_session, hmc_index, buffer_id);
1625*4882a593Smuzhiyun 		return -1;
1626*4882a593Smuzhiyun 	}
1627*4882a593Smuzhiyun 
1628*4882a593Smuzhiyun 	if (buffer_id >= ibmvmc.max_buffer_pool_size) {
1629*4882a593Smuzhiyun 		dev_err(adapter->dev, "Recv_msg: invalid buffer_id = 0x%x\n",
1630*4882a593Smuzhiyun 			buffer_id);
1631*4882a593Smuzhiyun 		ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
1632*4882a593Smuzhiyun 					    hmc_session, hmc_index, buffer_id);
1633*4882a593Smuzhiyun 		return -1;
1634*4882a593Smuzhiyun 	}
1635*4882a593Smuzhiyun 
1636*4882a593Smuzhiyun 	hmc = &hmcs[hmc_index];
1637*4882a593Smuzhiyun 	spin_lock_irqsave(&hmc->lock, flags);
1638*4882a593Smuzhiyun 
1639*4882a593Smuzhiyun 	if (hmc->state == ibmhmc_state_free) {
1640*4882a593Smuzhiyun 		dev_err(adapter->dev, "Recv_msg: invalid hmc state = 0x%x\n",
1641*4882a593Smuzhiyun 			hmc->state);
1642*4882a593Smuzhiyun 		/* HMC connection is not valid (possibly was reset under us). */
1643*4882a593Smuzhiyun 		spin_unlock_irqrestore(&hmc->lock, flags);
1644*4882a593Smuzhiyun 		return -1;
1645*4882a593Smuzhiyun 	}
1646*4882a593Smuzhiyun 
1647*4882a593Smuzhiyun 	buffer = &hmc->buffer[buffer_id];
1648*4882a593Smuzhiyun 
1649*4882a593Smuzhiyun 	if (buffer->valid == 0 || buffer->owner == VMC_BUF_OWNER_ALPHA) {
1650*4882a593Smuzhiyun 		dev_err(adapter->dev, "Recv_msg: not valid, or not HV.  0x%x 0x%x\n",
1651*4882a593Smuzhiyun 			buffer->valid, buffer->owner);
1652*4882a593Smuzhiyun 		spin_unlock_irqrestore(&hmc->lock, flags);
1653*4882a593Smuzhiyun 		return -1;
1654*4882a593Smuzhiyun 	}
1655*4882a593Smuzhiyun 
1656*4882a593Smuzhiyun 	/* RDMA the data into the partition. */
1657*4882a593Smuzhiyun 	rc = h_copy_rdma(msg_len,
1658*4882a593Smuzhiyun 			 adapter->riobn,
1659*4882a593Smuzhiyun 			 buffer->dma_addr_remote,
1660*4882a593Smuzhiyun 			 adapter->liobn,
1661*4882a593Smuzhiyun 			 buffer->dma_addr_local);
1662*4882a593Smuzhiyun 
1663*4882a593Smuzhiyun 	dev_dbg(adapter->dev, "Recv_msg: msg_len = 0x%x, buffer_id = 0x%x, queue_head = 0x%x, hmc_idx = 0x%x\n",
1664*4882a593Smuzhiyun 		(unsigned int)msg_len, (unsigned int)buffer_id,
1665*4882a593Smuzhiyun 		(unsigned int)hmc->queue_head, (unsigned int)hmc_index);
1666*4882a593Smuzhiyun 	buffer->msg_len = msg_len;
1667*4882a593Smuzhiyun 	buffer->free = 0;
1668*4882a593Smuzhiyun 	buffer->owner = VMC_BUF_OWNER_ALPHA;
1669*4882a593Smuzhiyun 
1670*4882a593Smuzhiyun 	if (rc) {
1671*4882a593Smuzhiyun 		dev_err(adapter->dev, "Failure in recv_msg: h_copy_rdma = 0x%x\n",
1672*4882a593Smuzhiyun 			rc);
1673*4882a593Smuzhiyun 		spin_unlock_irqrestore(&hmc->lock, flags);
1674*4882a593Smuzhiyun 		return -1;
1675*4882a593Smuzhiyun 	}
1676*4882a593Smuzhiyun 
1677*4882a593Smuzhiyun 	/* Must be locked because read operates on the same data */
1678*4882a593Smuzhiyun 	hmc->queue_outbound_msgs[hmc->queue_head] = buffer_id;
1679*4882a593Smuzhiyun 	hmc->queue_head++;
1680*4882a593Smuzhiyun 	if (hmc->queue_head == ibmvmc_max_buf_pool_size)
1681*4882a593Smuzhiyun 		hmc->queue_head = 0;
1682*4882a593Smuzhiyun 
1683*4882a593Smuzhiyun 	if (hmc->queue_head == hmc->queue_tail)
1684*4882a593Smuzhiyun 		dev_err(adapter->dev, "outbound buffer queue wrapped.\n");
1685*4882a593Smuzhiyun 
1686*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hmc->lock, flags);
1687*4882a593Smuzhiyun 
1688*4882a593Smuzhiyun 	wake_up_interruptible(&ibmvmc_read_wait);
1689*4882a593Smuzhiyun 
1690*4882a593Smuzhiyun 	return 0;
1691*4882a593Smuzhiyun }
1692*4882a593Smuzhiyun 
1693*4882a593Smuzhiyun /**
1694*4882a593Smuzhiyun  * ibmvmc_process_capabilities - Process Capabilities
1695*4882a593Smuzhiyun  *
1696*4882a593Smuzhiyun  * @adapter:	crq_server_adapter struct
1697*4882a593Smuzhiyun  * @crqp:	ibmvmc_crq_msg struct
1698*4882a593Smuzhiyun  *
1699*4882a593Smuzhiyun  */
ibmvmc_process_capabilities(struct crq_server_adapter * adapter,struct ibmvmc_crq_msg * crqp)1700*4882a593Smuzhiyun static void ibmvmc_process_capabilities(struct crq_server_adapter *adapter,
1701*4882a593Smuzhiyun 					struct ibmvmc_crq_msg *crqp)
1702*4882a593Smuzhiyun {
1703*4882a593Smuzhiyun 	struct ibmvmc_admin_crq_msg *crq = (struct ibmvmc_admin_crq_msg *)crqp;
1704*4882a593Smuzhiyun 
1705*4882a593Smuzhiyun 	if ((be16_to_cpu(crq->version) >> 8) !=
1706*4882a593Smuzhiyun 			(IBMVMC_PROTOCOL_VERSION >> 8)) {
1707*4882a593Smuzhiyun 		dev_err(adapter->dev, "init failed, incompatible versions 0x%x 0x%x\n",
1708*4882a593Smuzhiyun 			be16_to_cpu(crq->version),
1709*4882a593Smuzhiyun 			IBMVMC_PROTOCOL_VERSION);
1710*4882a593Smuzhiyun 		ibmvmc.state = ibmvmc_state_failed;
1711*4882a593Smuzhiyun 		return;
1712*4882a593Smuzhiyun 	}
1713*4882a593Smuzhiyun 
1714*4882a593Smuzhiyun 	ibmvmc.max_mtu = min_t(u32, ibmvmc_max_mtu, be32_to_cpu(crq->max_mtu));
1715*4882a593Smuzhiyun 	ibmvmc.max_buffer_pool_size = min_t(u16, ibmvmc_max_buf_pool_size,
1716*4882a593Smuzhiyun 					    be16_to_cpu(crq->pool_size));
1717*4882a593Smuzhiyun 	ibmvmc.max_hmc_index = min_t(u8, ibmvmc_max_hmcs, crq->max_hmc) - 1;
1718*4882a593Smuzhiyun 	ibmvmc.state = ibmvmc_state_ready;
1719*4882a593Smuzhiyun 
1720*4882a593Smuzhiyun 	dev_info(adapter->dev, "Capabilities: mtu=0x%x, pool_size=0x%x, max_hmc=0x%x\n",
1721*4882a593Smuzhiyun 		 ibmvmc.max_mtu, ibmvmc.max_buffer_pool_size,
1722*4882a593Smuzhiyun 		 ibmvmc.max_hmc_index);
1723*4882a593Smuzhiyun }
1724*4882a593Smuzhiyun 
1725*4882a593Smuzhiyun /**
1726*4882a593Smuzhiyun  * ibmvmc_validate_hmc_session - Validate HMC Session
1727*4882a593Smuzhiyun  *
1728*4882a593Smuzhiyun  * @adapter:	crq_server_adapter struct
1729*4882a593Smuzhiyun  * @crq:	ibmvmc_crq_msg struct
1730*4882a593Smuzhiyun  *
1731*4882a593Smuzhiyun  * Return:
1732*4882a593Smuzhiyun  *	0 - Success
1733*4882a593Smuzhiyun  *	Non-zero - Failure
1734*4882a593Smuzhiyun  */
ibmvmc_validate_hmc_session(struct crq_server_adapter * adapter,struct ibmvmc_crq_msg * crq)1735*4882a593Smuzhiyun static int ibmvmc_validate_hmc_session(struct crq_server_adapter *adapter,
1736*4882a593Smuzhiyun 				       struct ibmvmc_crq_msg *crq)
1737*4882a593Smuzhiyun {
1738*4882a593Smuzhiyun 	unsigned char hmc_index;
1739*4882a593Smuzhiyun 
1740*4882a593Smuzhiyun 	hmc_index = crq->hmc_index;
1741*4882a593Smuzhiyun 
1742*4882a593Smuzhiyun 	if (crq->hmc_session == 0)
1743*4882a593Smuzhiyun 		return 0;
1744*4882a593Smuzhiyun 
1745*4882a593Smuzhiyun 	if (hmc_index > ibmvmc.max_hmc_index)
1746*4882a593Smuzhiyun 		return -1;
1747*4882a593Smuzhiyun 
1748*4882a593Smuzhiyun 	if (hmcs[hmc_index].session != crq->hmc_session) {
1749*4882a593Smuzhiyun 		dev_warn(adapter->dev, "Drop, bad session: expected 0x%x, recv 0x%x\n",
1750*4882a593Smuzhiyun 			 hmcs[hmc_index].session, crq->hmc_session);
1751*4882a593Smuzhiyun 		return -1;
1752*4882a593Smuzhiyun 	}
1753*4882a593Smuzhiyun 
1754*4882a593Smuzhiyun 	return 0;
1755*4882a593Smuzhiyun }
1756*4882a593Smuzhiyun 
1757*4882a593Smuzhiyun /**
1758*4882a593Smuzhiyun  * ibmvmc_reset - Reset
1759*4882a593Smuzhiyun  *
1760*4882a593Smuzhiyun  * @adapter:	crq_server_adapter struct
1761*4882a593Smuzhiyun  * @xport_event:	export_event field
1762*4882a593Smuzhiyun  *
1763*4882a593Smuzhiyun  * Closes all HMC sessions and conditionally schedules a CRQ reset.
1764*4882a593Smuzhiyun  * @xport_event: If true, the partner closed their CRQ; we don't need to reset.
1765*4882a593Smuzhiyun  *               If false, we need to schedule a CRQ reset.
1766*4882a593Smuzhiyun  */
ibmvmc_reset(struct crq_server_adapter * adapter,bool xport_event)1767*4882a593Smuzhiyun static void ibmvmc_reset(struct crq_server_adapter *adapter, bool xport_event)
1768*4882a593Smuzhiyun {
1769*4882a593Smuzhiyun 	int i;
1770*4882a593Smuzhiyun 
1771*4882a593Smuzhiyun 	if (ibmvmc.state != ibmvmc_state_sched_reset) {
1772*4882a593Smuzhiyun 		dev_info(adapter->dev, "*** Reset to initial state.\n");
1773*4882a593Smuzhiyun 		for (i = 0; i < ibmvmc_max_hmcs; i++)
1774*4882a593Smuzhiyun 			ibmvmc_return_hmc(&hmcs[i], xport_event);
1775*4882a593Smuzhiyun 
1776*4882a593Smuzhiyun 		if (xport_event) {
1777*4882a593Smuzhiyun 			/* CRQ was closed by the partner.  We don't need to do
1778*4882a593Smuzhiyun 			 * anything except set ourself to the correct state to
1779*4882a593Smuzhiyun 			 * handle init msgs.
1780*4882a593Smuzhiyun 			 */
1781*4882a593Smuzhiyun 			ibmvmc.state = ibmvmc_state_crqinit;
1782*4882a593Smuzhiyun 		} else {
1783*4882a593Smuzhiyun 			/* The partner did not close their CRQ - instead, we're
1784*4882a593Smuzhiyun 			 * closing the CRQ on our end. Need to schedule this
1785*4882a593Smuzhiyun 			 * for process context, because CRQ reset may require a
1786*4882a593Smuzhiyun 			 * sleep.
1787*4882a593Smuzhiyun 			 *
1788*4882a593Smuzhiyun 			 * Setting ibmvmc.state here immediately prevents
1789*4882a593Smuzhiyun 			 * ibmvmc_open from completing until the reset
1790*4882a593Smuzhiyun 			 * completes in process context.
1791*4882a593Smuzhiyun 			 */
1792*4882a593Smuzhiyun 			ibmvmc.state = ibmvmc_state_sched_reset;
1793*4882a593Smuzhiyun 			dev_dbg(adapter->dev, "Device reset scheduled");
1794*4882a593Smuzhiyun 			wake_up_interruptible(&adapter->reset_wait_queue);
1795*4882a593Smuzhiyun 		}
1796*4882a593Smuzhiyun 	}
1797*4882a593Smuzhiyun }
1798*4882a593Smuzhiyun 
1799*4882a593Smuzhiyun /**
1800*4882a593Smuzhiyun  * ibmvmc_reset_task - Reset Task
1801*4882a593Smuzhiyun  *
1802*4882a593Smuzhiyun  * @data:	Data field
1803*4882a593Smuzhiyun  *
1804*4882a593Smuzhiyun  * Performs a CRQ reset of the VMC device in process context.
1805*4882a593Smuzhiyun  * NOTE: This function should not be called directly, use ibmvmc_reset.
1806*4882a593Smuzhiyun  */
ibmvmc_reset_task(void * data)1807*4882a593Smuzhiyun static int ibmvmc_reset_task(void *data)
1808*4882a593Smuzhiyun {
1809*4882a593Smuzhiyun 	struct crq_server_adapter *adapter = data;
1810*4882a593Smuzhiyun 	int rc;
1811*4882a593Smuzhiyun 
1812*4882a593Smuzhiyun 	set_user_nice(current, -20);
1813*4882a593Smuzhiyun 
1814*4882a593Smuzhiyun 	while (!kthread_should_stop()) {
1815*4882a593Smuzhiyun 		wait_event_interruptible(adapter->reset_wait_queue,
1816*4882a593Smuzhiyun 			(ibmvmc.state == ibmvmc_state_sched_reset) ||
1817*4882a593Smuzhiyun 			kthread_should_stop());
1818*4882a593Smuzhiyun 
1819*4882a593Smuzhiyun 		if (kthread_should_stop())
1820*4882a593Smuzhiyun 			break;
1821*4882a593Smuzhiyun 
1822*4882a593Smuzhiyun 		dev_dbg(adapter->dev, "CRQ resetting in process context");
1823*4882a593Smuzhiyun 		tasklet_disable(&adapter->work_task);
1824*4882a593Smuzhiyun 
1825*4882a593Smuzhiyun 		rc = ibmvmc_reset_crq_queue(adapter);
1826*4882a593Smuzhiyun 
1827*4882a593Smuzhiyun 		if (rc != H_SUCCESS && rc != H_RESOURCE) {
1828*4882a593Smuzhiyun 			dev_err(adapter->dev, "Error initializing CRQ.  rc = 0x%x\n",
1829*4882a593Smuzhiyun 				rc);
1830*4882a593Smuzhiyun 			ibmvmc.state = ibmvmc_state_failed;
1831*4882a593Smuzhiyun 		} else {
1832*4882a593Smuzhiyun 			ibmvmc.state = ibmvmc_state_crqinit;
1833*4882a593Smuzhiyun 
1834*4882a593Smuzhiyun 			if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0)
1835*4882a593Smuzhiyun 			    != 0 && rc != H_RESOURCE)
1836*4882a593Smuzhiyun 				dev_warn(adapter->dev, "Failed to send initialize CRQ message\n");
1837*4882a593Smuzhiyun 		}
1838*4882a593Smuzhiyun 
1839*4882a593Smuzhiyun 		vio_enable_interrupts(to_vio_dev(adapter->dev));
1840*4882a593Smuzhiyun 		tasklet_enable(&adapter->work_task);
1841*4882a593Smuzhiyun 	}
1842*4882a593Smuzhiyun 
1843*4882a593Smuzhiyun 	return 0;
1844*4882a593Smuzhiyun }
1845*4882a593Smuzhiyun 
1846*4882a593Smuzhiyun /**
1847*4882a593Smuzhiyun  * ibmvmc_process_open_resp - Process Open Response
1848*4882a593Smuzhiyun  *
1849*4882a593Smuzhiyun  * @crq: ibmvmc_crq_msg struct
1850*4882a593Smuzhiyun  * @adapter:    crq_server_adapter struct
1851*4882a593Smuzhiyun  *
1852*4882a593Smuzhiyun  * This command is sent by the hypervisor in response to the Interface
1853*4882a593Smuzhiyun  * Open message. When this message is received, the indicated buffer is
1854*4882a593Smuzhiyun  * again available for management partition use.
1855*4882a593Smuzhiyun  */
ibmvmc_process_open_resp(struct ibmvmc_crq_msg * crq,struct crq_server_adapter * adapter)1856*4882a593Smuzhiyun static void ibmvmc_process_open_resp(struct ibmvmc_crq_msg *crq,
1857*4882a593Smuzhiyun 				     struct crq_server_adapter *adapter)
1858*4882a593Smuzhiyun {
1859*4882a593Smuzhiyun 	unsigned char hmc_index;
1860*4882a593Smuzhiyun 	unsigned short buffer_id;
1861*4882a593Smuzhiyun 
1862*4882a593Smuzhiyun 	hmc_index = crq->hmc_index;
1863*4882a593Smuzhiyun 	if (hmc_index > ibmvmc.max_hmc_index) {
1864*4882a593Smuzhiyun 		/* Why would PHYP give an index > max negotiated? */
1865*4882a593Smuzhiyun 		ibmvmc_reset(adapter, false);
1866*4882a593Smuzhiyun 		return;
1867*4882a593Smuzhiyun 	}
1868*4882a593Smuzhiyun 
1869*4882a593Smuzhiyun 	if (crq->status) {
1870*4882a593Smuzhiyun 		dev_warn(adapter->dev, "open_resp: failed - status 0x%x\n",
1871*4882a593Smuzhiyun 			 crq->status);
1872*4882a593Smuzhiyun 		ibmvmc_return_hmc(&hmcs[hmc_index], false);
1873*4882a593Smuzhiyun 		return;
1874*4882a593Smuzhiyun 	}
1875*4882a593Smuzhiyun 
1876*4882a593Smuzhiyun 	if (hmcs[hmc_index].state == ibmhmc_state_opening) {
1877*4882a593Smuzhiyun 		buffer_id = be16_to_cpu(crq->var2.buffer_id);
1878*4882a593Smuzhiyun 		if (buffer_id >= ibmvmc.max_buffer_pool_size) {
1879*4882a593Smuzhiyun 			dev_err(adapter->dev, "open_resp: invalid buffer_id = 0x%x\n",
1880*4882a593Smuzhiyun 				buffer_id);
1881*4882a593Smuzhiyun 			hmcs[hmc_index].state = ibmhmc_state_failed;
1882*4882a593Smuzhiyun 		} else {
1883*4882a593Smuzhiyun 			ibmvmc_free_hmc_buffer(&hmcs[hmc_index],
1884*4882a593Smuzhiyun 					       &hmcs[hmc_index].buffer[buffer_id]);
1885*4882a593Smuzhiyun 			hmcs[hmc_index].state = ibmhmc_state_ready;
1886*4882a593Smuzhiyun 			dev_dbg(adapter->dev, "open_resp: set hmc state = ready\n");
1887*4882a593Smuzhiyun 		}
1888*4882a593Smuzhiyun 	} else {
1889*4882a593Smuzhiyun 		dev_warn(adapter->dev, "open_resp: invalid hmc state (0x%x)\n",
1890*4882a593Smuzhiyun 			 hmcs[hmc_index].state);
1891*4882a593Smuzhiyun 	}
1892*4882a593Smuzhiyun }
1893*4882a593Smuzhiyun 
1894*4882a593Smuzhiyun /**
1895*4882a593Smuzhiyun  * ibmvmc_process_close_resp - Process Close Response
1896*4882a593Smuzhiyun  *
1897*4882a593Smuzhiyun  * @crq: ibmvmc_crq_msg struct
1898*4882a593Smuzhiyun  * @adapter:    crq_server_adapter struct
1899*4882a593Smuzhiyun  *
1900*4882a593Smuzhiyun  * This command is sent by the hypervisor in response to the managemant
1901*4882a593Smuzhiyun  * application Interface Close message.
1902*4882a593Smuzhiyun  *
1903*4882a593Smuzhiyun  * If the close fails, simply reset the entire driver as the state of the VMC
1904*4882a593Smuzhiyun  * must be in tough shape.
1905*4882a593Smuzhiyun  */
ibmvmc_process_close_resp(struct ibmvmc_crq_msg * crq,struct crq_server_adapter * adapter)1906*4882a593Smuzhiyun static void ibmvmc_process_close_resp(struct ibmvmc_crq_msg *crq,
1907*4882a593Smuzhiyun 				      struct crq_server_adapter *adapter)
1908*4882a593Smuzhiyun {
1909*4882a593Smuzhiyun 	unsigned char hmc_index;
1910*4882a593Smuzhiyun 
1911*4882a593Smuzhiyun 	hmc_index = crq->hmc_index;
1912*4882a593Smuzhiyun 	if (hmc_index > ibmvmc.max_hmc_index) {
1913*4882a593Smuzhiyun 		ibmvmc_reset(adapter, false);
1914*4882a593Smuzhiyun 		return;
1915*4882a593Smuzhiyun 	}
1916*4882a593Smuzhiyun 
1917*4882a593Smuzhiyun 	if (crq->status) {
1918*4882a593Smuzhiyun 		dev_warn(adapter->dev, "close_resp: failed - status 0x%x\n",
1919*4882a593Smuzhiyun 			 crq->status);
1920*4882a593Smuzhiyun 		ibmvmc_reset(adapter, false);
1921*4882a593Smuzhiyun 		return;
1922*4882a593Smuzhiyun 	}
1923*4882a593Smuzhiyun 
1924*4882a593Smuzhiyun 	ibmvmc_return_hmc(&hmcs[hmc_index], false);
1925*4882a593Smuzhiyun }
1926*4882a593Smuzhiyun 
1927*4882a593Smuzhiyun /**
1928*4882a593Smuzhiyun  * ibmvmc_crq_process - Process CRQ
1929*4882a593Smuzhiyun  *
1930*4882a593Smuzhiyun  * @adapter:    crq_server_adapter struct
1931*4882a593Smuzhiyun  * @crq:	ibmvmc_crq_msg struct
1932*4882a593Smuzhiyun  *
1933*4882a593Smuzhiyun  * Process the CRQ message based upon the type of message received.
1934*4882a593Smuzhiyun  *
1935*4882a593Smuzhiyun  */
ibmvmc_crq_process(struct crq_server_adapter * adapter,struct ibmvmc_crq_msg * crq)1936*4882a593Smuzhiyun static void ibmvmc_crq_process(struct crq_server_adapter *adapter,
1937*4882a593Smuzhiyun 			       struct ibmvmc_crq_msg *crq)
1938*4882a593Smuzhiyun {
1939*4882a593Smuzhiyun 	switch (crq->type) {
1940*4882a593Smuzhiyun 	case VMC_MSG_CAP_RESP:
1941*4882a593Smuzhiyun 		dev_dbg(adapter->dev, "CRQ recv: capabilities resp (0x%x)\n",
1942*4882a593Smuzhiyun 			crq->type);
1943*4882a593Smuzhiyun 		if (ibmvmc.state == ibmvmc_state_capabilities)
1944*4882a593Smuzhiyun 			ibmvmc_process_capabilities(adapter, crq);
1945*4882a593Smuzhiyun 		else
1946*4882a593Smuzhiyun 			dev_warn(adapter->dev, "caps msg invalid in state 0x%x\n",
1947*4882a593Smuzhiyun 				 ibmvmc.state);
1948*4882a593Smuzhiyun 		break;
1949*4882a593Smuzhiyun 	case VMC_MSG_OPEN_RESP:
1950*4882a593Smuzhiyun 		dev_dbg(adapter->dev, "CRQ recv: open resp (0x%x)\n",
1951*4882a593Smuzhiyun 			crq->type);
1952*4882a593Smuzhiyun 		if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1953*4882a593Smuzhiyun 			ibmvmc_process_open_resp(crq, adapter);
1954*4882a593Smuzhiyun 		break;
1955*4882a593Smuzhiyun 	case VMC_MSG_ADD_BUF:
1956*4882a593Smuzhiyun 		dev_dbg(adapter->dev, "CRQ recv: add buf (0x%x)\n",
1957*4882a593Smuzhiyun 			crq->type);
1958*4882a593Smuzhiyun 		if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1959*4882a593Smuzhiyun 			ibmvmc_add_buffer(adapter, crq);
1960*4882a593Smuzhiyun 		break;
1961*4882a593Smuzhiyun 	case VMC_MSG_REM_BUF:
1962*4882a593Smuzhiyun 		dev_dbg(adapter->dev, "CRQ recv: rem buf (0x%x)\n",
1963*4882a593Smuzhiyun 			crq->type);
1964*4882a593Smuzhiyun 		if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1965*4882a593Smuzhiyun 			ibmvmc_rem_buffer(adapter, crq);
1966*4882a593Smuzhiyun 		break;
1967*4882a593Smuzhiyun 	case VMC_MSG_SIGNAL:
1968*4882a593Smuzhiyun 		dev_dbg(adapter->dev, "CRQ recv: signal msg (0x%x)\n",
1969*4882a593Smuzhiyun 			crq->type);
1970*4882a593Smuzhiyun 		if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1971*4882a593Smuzhiyun 			ibmvmc_recv_msg(adapter, crq);
1972*4882a593Smuzhiyun 		break;
1973*4882a593Smuzhiyun 	case VMC_MSG_CLOSE_RESP:
1974*4882a593Smuzhiyun 		dev_dbg(adapter->dev, "CRQ recv: close resp (0x%x)\n",
1975*4882a593Smuzhiyun 			crq->type);
1976*4882a593Smuzhiyun 		if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1977*4882a593Smuzhiyun 			ibmvmc_process_close_resp(crq, adapter);
1978*4882a593Smuzhiyun 		break;
1979*4882a593Smuzhiyun 	case VMC_MSG_CAP:
1980*4882a593Smuzhiyun 	case VMC_MSG_OPEN:
1981*4882a593Smuzhiyun 	case VMC_MSG_CLOSE:
1982*4882a593Smuzhiyun 	case VMC_MSG_ADD_BUF_RESP:
1983*4882a593Smuzhiyun 	case VMC_MSG_REM_BUF_RESP:
1984*4882a593Smuzhiyun 		dev_warn(adapter->dev, "CRQ recv: unexpected msg (0x%x)\n",
1985*4882a593Smuzhiyun 			 crq->type);
1986*4882a593Smuzhiyun 		break;
1987*4882a593Smuzhiyun 	default:
1988*4882a593Smuzhiyun 		dev_warn(adapter->dev, "CRQ recv: unknown msg (0x%x)\n",
1989*4882a593Smuzhiyun 			 crq->type);
1990*4882a593Smuzhiyun 		break;
1991*4882a593Smuzhiyun 	}
1992*4882a593Smuzhiyun }
1993*4882a593Smuzhiyun 
1994*4882a593Smuzhiyun /**
1995*4882a593Smuzhiyun  * ibmvmc_handle_crq_init - Handle CRQ Init
1996*4882a593Smuzhiyun  *
1997*4882a593Smuzhiyun  * @crq:	ibmvmc_crq_msg struct
1998*4882a593Smuzhiyun  * @adapter:	crq_server_adapter struct
1999*4882a593Smuzhiyun  *
2000*4882a593Smuzhiyun  * Handle the type of crq initialization based on whether
2001*4882a593Smuzhiyun  * it is a message or a response.
2002*4882a593Smuzhiyun  *
2003*4882a593Smuzhiyun  */
ibmvmc_handle_crq_init(struct ibmvmc_crq_msg * crq,struct crq_server_adapter * adapter)2004*4882a593Smuzhiyun static void ibmvmc_handle_crq_init(struct ibmvmc_crq_msg *crq,
2005*4882a593Smuzhiyun 				   struct crq_server_adapter *adapter)
2006*4882a593Smuzhiyun {
2007*4882a593Smuzhiyun 	switch (crq->type) {
2008*4882a593Smuzhiyun 	case 0x01:	/* Initialization message */
2009*4882a593Smuzhiyun 		dev_dbg(adapter->dev, "CRQ recv: CRQ init msg - state 0x%x\n",
2010*4882a593Smuzhiyun 			ibmvmc.state);
2011*4882a593Smuzhiyun 		if (ibmvmc.state == ibmvmc_state_crqinit) {
2012*4882a593Smuzhiyun 			/* Send back a response */
2013*4882a593Smuzhiyun 			if (ibmvmc_send_crq(adapter, 0xC002000000000000,
2014*4882a593Smuzhiyun 					    0) == 0)
2015*4882a593Smuzhiyun 				ibmvmc_send_capabilities(adapter);
2016*4882a593Smuzhiyun 			else
2017*4882a593Smuzhiyun 				dev_err(adapter->dev, " Unable to send init rsp\n");
2018*4882a593Smuzhiyun 		} else {
2019*4882a593Smuzhiyun 			dev_err(adapter->dev, "Invalid state 0x%x mtu = 0x%x\n",
2020*4882a593Smuzhiyun 				ibmvmc.state, ibmvmc.max_mtu);
2021*4882a593Smuzhiyun 		}
2022*4882a593Smuzhiyun 
2023*4882a593Smuzhiyun 		break;
2024*4882a593Smuzhiyun 	case 0x02:	/* Initialization response */
2025*4882a593Smuzhiyun 		dev_dbg(adapter->dev, "CRQ recv: initialization resp msg - state 0x%x\n",
2026*4882a593Smuzhiyun 			ibmvmc.state);
2027*4882a593Smuzhiyun 		if (ibmvmc.state == ibmvmc_state_crqinit)
2028*4882a593Smuzhiyun 			ibmvmc_send_capabilities(adapter);
2029*4882a593Smuzhiyun 		break;
2030*4882a593Smuzhiyun 	default:
2031*4882a593Smuzhiyun 		dev_warn(adapter->dev, "Unknown crq message type 0x%lx\n",
2032*4882a593Smuzhiyun 			 (unsigned long)crq->type);
2033*4882a593Smuzhiyun 	}
2034*4882a593Smuzhiyun }
2035*4882a593Smuzhiyun 
2036*4882a593Smuzhiyun /**
2037*4882a593Smuzhiyun  * ibmvmc_handle_crq - Handle CRQ
2038*4882a593Smuzhiyun  *
2039*4882a593Smuzhiyun  * @crq:	ibmvmc_crq_msg struct
2040*4882a593Smuzhiyun  * @adapter:	crq_server_adapter struct
2041*4882a593Smuzhiyun  *
2042*4882a593Smuzhiyun  * Read the command elements from the command queue and execute the
2043*4882a593Smuzhiyun  * requests based upon the type of crq message.
2044*4882a593Smuzhiyun  *
2045*4882a593Smuzhiyun  */
ibmvmc_handle_crq(struct ibmvmc_crq_msg * crq,struct crq_server_adapter * adapter)2046*4882a593Smuzhiyun static void ibmvmc_handle_crq(struct ibmvmc_crq_msg *crq,
2047*4882a593Smuzhiyun 			      struct crq_server_adapter *adapter)
2048*4882a593Smuzhiyun {
2049*4882a593Smuzhiyun 	switch (crq->valid) {
2050*4882a593Smuzhiyun 	case 0xC0:		/* initialization */
2051*4882a593Smuzhiyun 		ibmvmc_handle_crq_init(crq, adapter);
2052*4882a593Smuzhiyun 		break;
2053*4882a593Smuzhiyun 	case 0xFF:	/* Hypervisor telling us the connection is closed */
2054*4882a593Smuzhiyun 		dev_warn(adapter->dev, "CRQ recv: virtual adapter failed - resetting.\n");
2055*4882a593Smuzhiyun 		ibmvmc_reset(adapter, true);
2056*4882a593Smuzhiyun 		break;
2057*4882a593Smuzhiyun 	case 0x80:	/* real payload */
2058*4882a593Smuzhiyun 		ibmvmc_crq_process(adapter, crq);
2059*4882a593Smuzhiyun 		break;
2060*4882a593Smuzhiyun 	default:
2061*4882a593Smuzhiyun 		dev_warn(adapter->dev, "CRQ recv: unknown msg 0x%02x.\n",
2062*4882a593Smuzhiyun 			 crq->valid);
2063*4882a593Smuzhiyun 		break;
2064*4882a593Smuzhiyun 	}
2065*4882a593Smuzhiyun }
2066*4882a593Smuzhiyun 
ibmvmc_task(unsigned long data)2067*4882a593Smuzhiyun static void ibmvmc_task(unsigned long data)
2068*4882a593Smuzhiyun {
2069*4882a593Smuzhiyun 	struct crq_server_adapter *adapter =
2070*4882a593Smuzhiyun 		(struct crq_server_adapter *)data;
2071*4882a593Smuzhiyun 	struct vio_dev *vdev = to_vio_dev(adapter->dev);
2072*4882a593Smuzhiyun 	struct ibmvmc_crq_msg *crq;
2073*4882a593Smuzhiyun 	int done = 0;
2074*4882a593Smuzhiyun 
2075*4882a593Smuzhiyun 	while (!done) {
2076*4882a593Smuzhiyun 		/* Pull all the valid messages off the CRQ */
2077*4882a593Smuzhiyun 		while ((crq = crq_queue_next_crq(&adapter->queue)) != NULL) {
2078*4882a593Smuzhiyun 			ibmvmc_handle_crq(crq, adapter);
2079*4882a593Smuzhiyun 			crq->valid = 0x00;
2080*4882a593Smuzhiyun 			/* CRQ reset was requested, stop processing CRQs.
2081*4882a593Smuzhiyun 			 * Interrupts will be re-enabled by the reset task.
2082*4882a593Smuzhiyun 			 */
2083*4882a593Smuzhiyun 			if (ibmvmc.state == ibmvmc_state_sched_reset)
2084*4882a593Smuzhiyun 				return;
2085*4882a593Smuzhiyun 		}
2086*4882a593Smuzhiyun 
2087*4882a593Smuzhiyun 		vio_enable_interrupts(vdev);
2088*4882a593Smuzhiyun 		crq = crq_queue_next_crq(&adapter->queue);
2089*4882a593Smuzhiyun 		if (crq) {
2090*4882a593Smuzhiyun 			vio_disable_interrupts(vdev);
2091*4882a593Smuzhiyun 			ibmvmc_handle_crq(crq, adapter);
2092*4882a593Smuzhiyun 			crq->valid = 0x00;
2093*4882a593Smuzhiyun 			/* CRQ reset was requested, stop processing CRQs.
2094*4882a593Smuzhiyun 			 * Interrupts will be re-enabled by the reset task.
2095*4882a593Smuzhiyun 			 */
2096*4882a593Smuzhiyun 			if (ibmvmc.state == ibmvmc_state_sched_reset)
2097*4882a593Smuzhiyun 				return;
2098*4882a593Smuzhiyun 		} else {
2099*4882a593Smuzhiyun 			done = 1;
2100*4882a593Smuzhiyun 		}
2101*4882a593Smuzhiyun 	}
2102*4882a593Smuzhiyun }
2103*4882a593Smuzhiyun 
2104*4882a593Smuzhiyun /**
2105*4882a593Smuzhiyun  * ibmvmc_init_crq_queue - Init CRQ Queue
2106*4882a593Smuzhiyun  *
2107*4882a593Smuzhiyun  * @adapter:	crq_server_adapter struct
2108*4882a593Smuzhiyun  *
2109*4882a593Smuzhiyun  * Return:
2110*4882a593Smuzhiyun  *	0 - Success
2111*4882a593Smuzhiyun  *	Non-zero - Failure
2112*4882a593Smuzhiyun  */
ibmvmc_init_crq_queue(struct crq_server_adapter * adapter)2113*4882a593Smuzhiyun static int ibmvmc_init_crq_queue(struct crq_server_adapter *adapter)
2114*4882a593Smuzhiyun {
2115*4882a593Smuzhiyun 	struct vio_dev *vdev = to_vio_dev(adapter->dev);
2116*4882a593Smuzhiyun 	struct crq_queue *queue = &adapter->queue;
2117*4882a593Smuzhiyun 	int rc = 0;
2118*4882a593Smuzhiyun 	int retrc = 0;
2119*4882a593Smuzhiyun 
2120*4882a593Smuzhiyun 	queue->msgs = (struct ibmvmc_crq_msg *)get_zeroed_page(GFP_KERNEL);
2121*4882a593Smuzhiyun 
2122*4882a593Smuzhiyun 	if (!queue->msgs)
2123*4882a593Smuzhiyun 		goto malloc_failed;
2124*4882a593Smuzhiyun 
2125*4882a593Smuzhiyun 	queue->size = PAGE_SIZE / sizeof(*queue->msgs);
2126*4882a593Smuzhiyun 
2127*4882a593Smuzhiyun 	queue->msg_token = dma_map_single(adapter->dev, queue->msgs,
2128*4882a593Smuzhiyun 					  queue->size * sizeof(*queue->msgs),
2129*4882a593Smuzhiyun 					  DMA_BIDIRECTIONAL);
2130*4882a593Smuzhiyun 
2131*4882a593Smuzhiyun 	if (dma_mapping_error(adapter->dev, queue->msg_token))
2132*4882a593Smuzhiyun 		goto map_failed;
2133*4882a593Smuzhiyun 
2134*4882a593Smuzhiyun 	retrc = plpar_hcall_norets(H_REG_CRQ,
2135*4882a593Smuzhiyun 				   vdev->unit_address,
2136*4882a593Smuzhiyun 				   queue->msg_token, PAGE_SIZE);
2137*4882a593Smuzhiyun 	rc = retrc;
2138*4882a593Smuzhiyun 
2139*4882a593Smuzhiyun 	if (rc == H_RESOURCE)
2140*4882a593Smuzhiyun 		rc = ibmvmc_reset_crq_queue(adapter);
2141*4882a593Smuzhiyun 
2142*4882a593Smuzhiyun 	if (rc == 2) {
2143*4882a593Smuzhiyun 		dev_warn(adapter->dev, "Partner adapter not ready\n");
2144*4882a593Smuzhiyun 		retrc = 0;
2145*4882a593Smuzhiyun 	} else if (rc != 0) {
2146*4882a593Smuzhiyun 		dev_err(adapter->dev, "Error %d opening adapter\n", rc);
2147*4882a593Smuzhiyun 		goto reg_crq_failed;
2148*4882a593Smuzhiyun 	}
2149*4882a593Smuzhiyun 
2150*4882a593Smuzhiyun 	queue->cur = 0;
2151*4882a593Smuzhiyun 	spin_lock_init(&queue->lock);
2152*4882a593Smuzhiyun 
2153*4882a593Smuzhiyun 	tasklet_init(&adapter->work_task, ibmvmc_task, (unsigned long)adapter);
2154*4882a593Smuzhiyun 
2155*4882a593Smuzhiyun 	if (request_irq(vdev->irq,
2156*4882a593Smuzhiyun 			ibmvmc_handle_event,
2157*4882a593Smuzhiyun 			0, "ibmvmc", (void *)adapter) != 0) {
2158*4882a593Smuzhiyun 		dev_err(adapter->dev, "couldn't register irq 0x%x\n",
2159*4882a593Smuzhiyun 			vdev->irq);
2160*4882a593Smuzhiyun 		goto req_irq_failed;
2161*4882a593Smuzhiyun 	}
2162*4882a593Smuzhiyun 
2163*4882a593Smuzhiyun 	rc = vio_enable_interrupts(vdev);
2164*4882a593Smuzhiyun 	if (rc != 0) {
2165*4882a593Smuzhiyun 		dev_err(adapter->dev, "Error %d enabling interrupts!!!\n", rc);
2166*4882a593Smuzhiyun 		goto req_irq_failed;
2167*4882a593Smuzhiyun 	}
2168*4882a593Smuzhiyun 
2169*4882a593Smuzhiyun 	return retrc;
2170*4882a593Smuzhiyun 
2171*4882a593Smuzhiyun req_irq_failed:
2172*4882a593Smuzhiyun 	/* Cannot have any work since we either never got our IRQ registered,
2173*4882a593Smuzhiyun 	 * or never got interrupts enabled
2174*4882a593Smuzhiyun 	 */
2175*4882a593Smuzhiyun 	tasklet_kill(&adapter->work_task);
2176*4882a593Smuzhiyun 	h_free_crq(vdev->unit_address);
2177*4882a593Smuzhiyun reg_crq_failed:
2178*4882a593Smuzhiyun 	dma_unmap_single(adapter->dev,
2179*4882a593Smuzhiyun 			 queue->msg_token,
2180*4882a593Smuzhiyun 			 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
2181*4882a593Smuzhiyun map_failed:
2182*4882a593Smuzhiyun 	free_page((unsigned long)queue->msgs);
2183*4882a593Smuzhiyun malloc_failed:
2184*4882a593Smuzhiyun 	return -ENOMEM;
2185*4882a593Smuzhiyun }
2186*4882a593Smuzhiyun 
2187*4882a593Smuzhiyun /* Fill in the liobn and riobn fields on the adapter */
read_dma_window(struct vio_dev * vdev,struct crq_server_adapter * adapter)2188*4882a593Smuzhiyun static int read_dma_window(struct vio_dev *vdev,
2189*4882a593Smuzhiyun 			   struct crq_server_adapter *adapter)
2190*4882a593Smuzhiyun {
2191*4882a593Smuzhiyun 	const __be32 *dma_window;
2192*4882a593Smuzhiyun 	const __be32 *prop;
2193*4882a593Smuzhiyun 
2194*4882a593Smuzhiyun 	/* TODO Using of_parse_dma_window would be better, but it doesn't give
2195*4882a593Smuzhiyun 	 * a way to read multiple windows without already knowing the size of
2196*4882a593Smuzhiyun 	 * a window or the number of windows
2197*4882a593Smuzhiyun 	 */
2198*4882a593Smuzhiyun 	dma_window =
2199*4882a593Smuzhiyun 		(const __be32 *)vio_get_attribute(vdev, "ibm,my-dma-window",
2200*4882a593Smuzhiyun 						NULL);
2201*4882a593Smuzhiyun 	if (!dma_window) {
2202*4882a593Smuzhiyun 		dev_warn(adapter->dev, "Couldn't find ibm,my-dma-window property\n");
2203*4882a593Smuzhiyun 		return -1;
2204*4882a593Smuzhiyun 	}
2205*4882a593Smuzhiyun 
2206*4882a593Smuzhiyun 	adapter->liobn = be32_to_cpu(*dma_window);
2207*4882a593Smuzhiyun 	dma_window++;
2208*4882a593Smuzhiyun 
2209*4882a593Smuzhiyun 	prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
2210*4882a593Smuzhiyun 						NULL);
2211*4882a593Smuzhiyun 	if (!prop) {
2212*4882a593Smuzhiyun 		dev_warn(adapter->dev, "Couldn't find ibm,#dma-address-cells property\n");
2213*4882a593Smuzhiyun 		dma_window++;
2214*4882a593Smuzhiyun 	} else {
2215*4882a593Smuzhiyun 		dma_window += be32_to_cpu(*prop);
2216*4882a593Smuzhiyun 	}
2217*4882a593Smuzhiyun 
2218*4882a593Smuzhiyun 	prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
2219*4882a593Smuzhiyun 						NULL);
2220*4882a593Smuzhiyun 	if (!prop) {
2221*4882a593Smuzhiyun 		dev_warn(adapter->dev, "Couldn't find ibm,#dma-size-cells property\n");
2222*4882a593Smuzhiyun 		dma_window++;
2223*4882a593Smuzhiyun 	} else {
2224*4882a593Smuzhiyun 		dma_window += be32_to_cpu(*prop);
2225*4882a593Smuzhiyun 	}
2226*4882a593Smuzhiyun 
2227*4882a593Smuzhiyun 	/* dma_window should point to the second window now */
2228*4882a593Smuzhiyun 	adapter->riobn = be32_to_cpu(*dma_window);
2229*4882a593Smuzhiyun 
2230*4882a593Smuzhiyun 	return 0;
2231*4882a593Smuzhiyun }
2232*4882a593Smuzhiyun 
ibmvmc_probe(struct vio_dev * vdev,const struct vio_device_id * id)2233*4882a593Smuzhiyun static int ibmvmc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
2234*4882a593Smuzhiyun {
2235*4882a593Smuzhiyun 	struct crq_server_adapter *adapter = &ibmvmc_adapter;
2236*4882a593Smuzhiyun 	int rc;
2237*4882a593Smuzhiyun 
2238*4882a593Smuzhiyun 	dev_set_drvdata(&vdev->dev, NULL);
2239*4882a593Smuzhiyun 	memset(adapter, 0, sizeof(*adapter));
2240*4882a593Smuzhiyun 	adapter->dev = &vdev->dev;
2241*4882a593Smuzhiyun 
2242*4882a593Smuzhiyun 	dev_info(adapter->dev, "Probe for UA 0x%x\n", vdev->unit_address);
2243*4882a593Smuzhiyun 
2244*4882a593Smuzhiyun 	rc = read_dma_window(vdev, adapter);
2245*4882a593Smuzhiyun 	if (rc != 0) {
2246*4882a593Smuzhiyun 		ibmvmc.state = ibmvmc_state_failed;
2247*4882a593Smuzhiyun 		return -1;
2248*4882a593Smuzhiyun 	}
2249*4882a593Smuzhiyun 
2250*4882a593Smuzhiyun 	dev_dbg(adapter->dev, "Probe: liobn 0x%x, riobn 0x%x\n",
2251*4882a593Smuzhiyun 		adapter->liobn, adapter->riobn);
2252*4882a593Smuzhiyun 
2253*4882a593Smuzhiyun 	init_waitqueue_head(&adapter->reset_wait_queue);
2254*4882a593Smuzhiyun 	adapter->reset_task = kthread_run(ibmvmc_reset_task, adapter, "ibmvmc");
2255*4882a593Smuzhiyun 	if (IS_ERR(adapter->reset_task)) {
2256*4882a593Smuzhiyun 		dev_err(adapter->dev, "Failed to start reset thread\n");
2257*4882a593Smuzhiyun 		ibmvmc.state = ibmvmc_state_failed;
2258*4882a593Smuzhiyun 		rc = PTR_ERR(adapter->reset_task);
2259*4882a593Smuzhiyun 		adapter->reset_task = NULL;
2260*4882a593Smuzhiyun 		return rc;
2261*4882a593Smuzhiyun 	}
2262*4882a593Smuzhiyun 
2263*4882a593Smuzhiyun 	rc = ibmvmc_init_crq_queue(adapter);
2264*4882a593Smuzhiyun 	if (rc != 0 && rc != H_RESOURCE) {
2265*4882a593Smuzhiyun 		dev_err(adapter->dev, "Error initializing CRQ.  rc = 0x%x\n",
2266*4882a593Smuzhiyun 			rc);
2267*4882a593Smuzhiyun 		ibmvmc.state = ibmvmc_state_failed;
2268*4882a593Smuzhiyun 		goto crq_failed;
2269*4882a593Smuzhiyun 	}
2270*4882a593Smuzhiyun 
2271*4882a593Smuzhiyun 	ibmvmc.state = ibmvmc_state_crqinit;
2272*4882a593Smuzhiyun 
2273*4882a593Smuzhiyun 	/* Try to send an initialization message.  Note that this is allowed
2274*4882a593Smuzhiyun 	 * to fail if the other end is not acive.  In that case we just wait
2275*4882a593Smuzhiyun 	 * for the other side to initialize.
2276*4882a593Smuzhiyun 	 */
2277*4882a593Smuzhiyun 	if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0) != 0 &&
2278*4882a593Smuzhiyun 	    rc != H_RESOURCE)
2279*4882a593Smuzhiyun 		dev_warn(adapter->dev, "Failed to send initialize CRQ message\n");
2280*4882a593Smuzhiyun 
2281*4882a593Smuzhiyun 	dev_set_drvdata(&vdev->dev, adapter);
2282*4882a593Smuzhiyun 
2283*4882a593Smuzhiyun 	return 0;
2284*4882a593Smuzhiyun 
2285*4882a593Smuzhiyun crq_failed:
2286*4882a593Smuzhiyun 	kthread_stop(adapter->reset_task);
2287*4882a593Smuzhiyun 	adapter->reset_task = NULL;
2288*4882a593Smuzhiyun 	return -EPERM;
2289*4882a593Smuzhiyun }
2290*4882a593Smuzhiyun 
ibmvmc_remove(struct vio_dev * vdev)2291*4882a593Smuzhiyun static int ibmvmc_remove(struct vio_dev *vdev)
2292*4882a593Smuzhiyun {
2293*4882a593Smuzhiyun 	struct crq_server_adapter *adapter = dev_get_drvdata(&vdev->dev);
2294*4882a593Smuzhiyun 
2295*4882a593Smuzhiyun 	dev_info(adapter->dev, "Entering remove for UA 0x%x\n",
2296*4882a593Smuzhiyun 		 vdev->unit_address);
2297*4882a593Smuzhiyun 	ibmvmc_release_crq_queue(adapter);
2298*4882a593Smuzhiyun 
2299*4882a593Smuzhiyun 	return 0;
2300*4882a593Smuzhiyun }
2301*4882a593Smuzhiyun 
2302*4882a593Smuzhiyun static struct vio_device_id ibmvmc_device_table[] = {
2303*4882a593Smuzhiyun 	{ "ibm,vmc", "IBM,vmc" },
2304*4882a593Smuzhiyun 	{ "", "" }
2305*4882a593Smuzhiyun };
2306*4882a593Smuzhiyun MODULE_DEVICE_TABLE(vio, ibmvmc_device_table);
2307*4882a593Smuzhiyun 
2308*4882a593Smuzhiyun static struct vio_driver ibmvmc_driver = {
2309*4882a593Smuzhiyun 	.name        = ibmvmc_driver_name,
2310*4882a593Smuzhiyun 	.id_table    = ibmvmc_device_table,
2311*4882a593Smuzhiyun 	.probe       = ibmvmc_probe,
2312*4882a593Smuzhiyun 	.remove      = ibmvmc_remove,
2313*4882a593Smuzhiyun };
2314*4882a593Smuzhiyun 
ibmvmc_scrub_module_parms(void)2315*4882a593Smuzhiyun static void __init ibmvmc_scrub_module_parms(void)
2316*4882a593Smuzhiyun {
2317*4882a593Smuzhiyun 	if (ibmvmc_max_mtu > MAX_MTU) {
2318*4882a593Smuzhiyun 		pr_warn("ibmvmc: Max MTU reduced to %d\n", MAX_MTU);
2319*4882a593Smuzhiyun 		ibmvmc_max_mtu = MAX_MTU;
2320*4882a593Smuzhiyun 	} else if (ibmvmc_max_mtu < MIN_MTU) {
2321*4882a593Smuzhiyun 		pr_warn("ibmvmc: Max MTU increased to %d\n", MIN_MTU);
2322*4882a593Smuzhiyun 		ibmvmc_max_mtu = MIN_MTU;
2323*4882a593Smuzhiyun 	}
2324*4882a593Smuzhiyun 
2325*4882a593Smuzhiyun 	if (ibmvmc_max_buf_pool_size > MAX_BUF_POOL_SIZE) {
2326*4882a593Smuzhiyun 		pr_warn("ibmvmc: Max buffer pool size reduced to %d\n",
2327*4882a593Smuzhiyun 			MAX_BUF_POOL_SIZE);
2328*4882a593Smuzhiyun 		ibmvmc_max_buf_pool_size = MAX_BUF_POOL_SIZE;
2329*4882a593Smuzhiyun 	} else if (ibmvmc_max_buf_pool_size < MIN_BUF_POOL_SIZE) {
2330*4882a593Smuzhiyun 		pr_warn("ibmvmc: Max buffer pool size increased to %d\n",
2331*4882a593Smuzhiyun 			MIN_BUF_POOL_SIZE);
2332*4882a593Smuzhiyun 		ibmvmc_max_buf_pool_size = MIN_BUF_POOL_SIZE;
2333*4882a593Smuzhiyun 	}
2334*4882a593Smuzhiyun 
2335*4882a593Smuzhiyun 	if (ibmvmc_max_hmcs > MAX_HMCS) {
2336*4882a593Smuzhiyun 		pr_warn("ibmvmc: Max HMCs reduced to %d\n", MAX_HMCS);
2337*4882a593Smuzhiyun 		ibmvmc_max_hmcs = MAX_HMCS;
2338*4882a593Smuzhiyun 	} else if (ibmvmc_max_hmcs < MIN_HMCS) {
2339*4882a593Smuzhiyun 		pr_warn("ibmvmc: Max HMCs increased to %d\n", MIN_HMCS);
2340*4882a593Smuzhiyun 		ibmvmc_max_hmcs = MIN_HMCS;
2341*4882a593Smuzhiyun 	}
2342*4882a593Smuzhiyun }
2343*4882a593Smuzhiyun 
2344*4882a593Smuzhiyun static struct miscdevice ibmvmc_miscdev = {
2345*4882a593Smuzhiyun 	.name = ibmvmc_driver_name,
2346*4882a593Smuzhiyun 	.minor = MISC_DYNAMIC_MINOR,
2347*4882a593Smuzhiyun 	.fops = &ibmvmc_fops,
2348*4882a593Smuzhiyun };
2349*4882a593Smuzhiyun 
ibmvmc_module_init(void)2350*4882a593Smuzhiyun static int __init ibmvmc_module_init(void)
2351*4882a593Smuzhiyun {
2352*4882a593Smuzhiyun 	int rc, i, j;
2353*4882a593Smuzhiyun 
2354*4882a593Smuzhiyun 	ibmvmc.state = ibmvmc_state_initial;
2355*4882a593Smuzhiyun 	pr_info("ibmvmc: version %s\n", IBMVMC_DRIVER_VERSION);
2356*4882a593Smuzhiyun 
2357*4882a593Smuzhiyun 	rc = misc_register(&ibmvmc_miscdev);
2358*4882a593Smuzhiyun 	if (rc) {
2359*4882a593Smuzhiyun 		pr_err("ibmvmc: misc registration failed\n");
2360*4882a593Smuzhiyun 		goto misc_register_failed;
2361*4882a593Smuzhiyun 	}
2362*4882a593Smuzhiyun 	pr_info("ibmvmc: node %d:%d\n", MISC_MAJOR,
2363*4882a593Smuzhiyun 		ibmvmc_miscdev.minor);
2364*4882a593Smuzhiyun 
2365*4882a593Smuzhiyun 	/* Initialize data structures */
2366*4882a593Smuzhiyun 	memset(hmcs, 0, sizeof(struct ibmvmc_hmc) * MAX_HMCS);
2367*4882a593Smuzhiyun 	for (i = 0; i < MAX_HMCS; i++) {
2368*4882a593Smuzhiyun 		spin_lock_init(&hmcs[i].lock);
2369*4882a593Smuzhiyun 		hmcs[i].state = ibmhmc_state_free;
2370*4882a593Smuzhiyun 		for (j = 0; j < MAX_BUF_POOL_SIZE; j++)
2371*4882a593Smuzhiyun 			hmcs[i].queue_outbound_msgs[j] = VMC_INVALID_BUFFER_ID;
2372*4882a593Smuzhiyun 	}
2373*4882a593Smuzhiyun 
2374*4882a593Smuzhiyun 	/* Sanity check module parms */
2375*4882a593Smuzhiyun 	ibmvmc_scrub_module_parms();
2376*4882a593Smuzhiyun 
2377*4882a593Smuzhiyun 	/*
2378*4882a593Smuzhiyun 	 * Initialize some reasonable values.  Might be negotiated smaller
2379*4882a593Smuzhiyun 	 * values during the capabilities exchange.
2380*4882a593Smuzhiyun 	 */
2381*4882a593Smuzhiyun 	ibmvmc.max_mtu = ibmvmc_max_mtu;
2382*4882a593Smuzhiyun 	ibmvmc.max_buffer_pool_size = ibmvmc_max_buf_pool_size;
2383*4882a593Smuzhiyun 	ibmvmc.max_hmc_index = ibmvmc_max_hmcs - 1;
2384*4882a593Smuzhiyun 
2385*4882a593Smuzhiyun 	rc = vio_register_driver(&ibmvmc_driver);
2386*4882a593Smuzhiyun 
2387*4882a593Smuzhiyun 	if (rc) {
2388*4882a593Smuzhiyun 		pr_err("ibmvmc: rc %d from vio_register_driver\n", rc);
2389*4882a593Smuzhiyun 		goto vio_reg_failed;
2390*4882a593Smuzhiyun 	}
2391*4882a593Smuzhiyun 
2392*4882a593Smuzhiyun 	return 0;
2393*4882a593Smuzhiyun 
2394*4882a593Smuzhiyun vio_reg_failed:
2395*4882a593Smuzhiyun 	misc_deregister(&ibmvmc_miscdev);
2396*4882a593Smuzhiyun misc_register_failed:
2397*4882a593Smuzhiyun 	return rc;
2398*4882a593Smuzhiyun }
2399*4882a593Smuzhiyun 
ibmvmc_module_exit(void)2400*4882a593Smuzhiyun static void __exit ibmvmc_module_exit(void)
2401*4882a593Smuzhiyun {
2402*4882a593Smuzhiyun 	pr_info("ibmvmc: module exit\n");
2403*4882a593Smuzhiyun 	vio_unregister_driver(&ibmvmc_driver);
2404*4882a593Smuzhiyun 	misc_deregister(&ibmvmc_miscdev);
2405*4882a593Smuzhiyun }
2406*4882a593Smuzhiyun 
2407*4882a593Smuzhiyun module_init(ibmvmc_module_init);
2408*4882a593Smuzhiyun module_exit(ibmvmc_module_exit);
2409*4882a593Smuzhiyun 
2410*4882a593Smuzhiyun module_param_named(buf_pool_size, ibmvmc_max_buf_pool_size,
2411*4882a593Smuzhiyun 		   int, 0644);
2412*4882a593Smuzhiyun MODULE_PARM_DESC(buf_pool_size, "Buffer pool size");
2413*4882a593Smuzhiyun module_param_named(max_hmcs, ibmvmc_max_hmcs, int, 0644);
2414*4882a593Smuzhiyun MODULE_PARM_DESC(max_hmcs, "Max HMCs");
2415*4882a593Smuzhiyun module_param_named(max_mtu, ibmvmc_max_mtu, int, 0644);
2416*4882a593Smuzhiyun MODULE_PARM_DESC(max_mtu, "Max MTU");
2417*4882a593Smuzhiyun 
2418*4882a593Smuzhiyun MODULE_AUTHOR("Steven Royer <seroyer@linux.vnet.ibm.com>");
2419*4882a593Smuzhiyun MODULE_DESCRIPTION("IBM VMC");
2420*4882a593Smuzhiyun MODULE_VERSION(IBMVMC_DRIVER_VERSION);
2421*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
2422