xref: /OK3568_Linux_fs/kernel/drivers/scsi/megaraid/megaraid_mm.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  *			Linux MegaRAID device driver
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (c) 2003-2004  LSI Logic Corporation.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * FILE		: megaraid_mm.c
9*4882a593Smuzhiyun  * Version	: v2.20.2.7 (Jul 16 2006)
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * Common management module
12*4882a593Smuzhiyun  */
13*4882a593Smuzhiyun #include <linux/sched.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <linux/mutex.h>
16*4882a593Smuzhiyun #include "megaraid_mm.h"
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun // Entry points for char node driver
20*4882a593Smuzhiyun static DEFINE_MUTEX(mraid_mm_mutex);
21*4882a593Smuzhiyun static int mraid_mm_open(struct inode *, struct file *);
22*4882a593Smuzhiyun static long mraid_mm_unlocked_ioctl(struct file *, uint, unsigned long);
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun // routines to convert to and from the old the format
26*4882a593Smuzhiyun static int mimd_to_kioc(mimd_t __user *, mraid_mmadp_t *, uioc_t *);
27*4882a593Smuzhiyun static int kioc_to_mimd(uioc_t *, mimd_t __user *);
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun // Helper functions
31*4882a593Smuzhiyun static int handle_drvrcmd(void __user *, uint8_t, int *);
32*4882a593Smuzhiyun static int lld_ioctl(mraid_mmadp_t *, uioc_t *);
33*4882a593Smuzhiyun static void ioctl_done(uioc_t *);
34*4882a593Smuzhiyun static void lld_timedout(struct timer_list *);
35*4882a593Smuzhiyun static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *);
36*4882a593Smuzhiyun static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *);
37*4882a593Smuzhiyun static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *);
38*4882a593Smuzhiyun static void mraid_mm_dealloc_kioc(mraid_mmadp_t *, uioc_t *);
39*4882a593Smuzhiyun static int mraid_mm_attach_buf(mraid_mmadp_t *, uioc_t *, int);
40*4882a593Smuzhiyun static int mraid_mm_setup_dma_pools(mraid_mmadp_t *);
41*4882a593Smuzhiyun static void mraid_mm_free_adp_resources(mraid_mmadp_t *);
42*4882a593Smuzhiyun static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *);
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun MODULE_AUTHOR("LSI Logic Corporation");
45*4882a593Smuzhiyun MODULE_DESCRIPTION("LSI Logic Management Module");
46*4882a593Smuzhiyun MODULE_LICENSE("GPL");
47*4882a593Smuzhiyun MODULE_VERSION(LSI_COMMON_MOD_VERSION);
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun static int dbglevel = CL_ANN;
50*4882a593Smuzhiyun module_param_named(dlevel, dbglevel, int, 0);
51*4882a593Smuzhiyun MODULE_PARM_DESC(dlevel, "Debug level (default=0)");
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun EXPORT_SYMBOL(mraid_mm_register_adp);
54*4882a593Smuzhiyun EXPORT_SYMBOL(mraid_mm_unregister_adp);
55*4882a593Smuzhiyun EXPORT_SYMBOL(mraid_mm_adapter_app_handle);
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun static uint32_t drvr_ver	= 0x02200207;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun static int adapters_count_g;
60*4882a593Smuzhiyun static struct list_head adapters_list_g;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun static wait_queue_head_t wait_q;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun static const struct file_operations lsi_fops = {
65*4882a593Smuzhiyun 	.open	= mraid_mm_open,
66*4882a593Smuzhiyun 	.unlocked_ioctl = mraid_mm_unlocked_ioctl,
67*4882a593Smuzhiyun 	.compat_ioctl = compat_ptr_ioctl,
68*4882a593Smuzhiyun 	.owner	= THIS_MODULE,
69*4882a593Smuzhiyun 	.llseek = noop_llseek,
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun static struct miscdevice megaraid_mm_dev = {
73*4882a593Smuzhiyun 	.minor	= MISC_DYNAMIC_MINOR,
74*4882a593Smuzhiyun 	.name   = "megadev0",
75*4882a593Smuzhiyun 	.fops   = &lsi_fops,
76*4882a593Smuzhiyun };
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun /**
79*4882a593Smuzhiyun  * mraid_mm_open - open routine for char node interface
80*4882a593Smuzhiyun  * @inode	: unused
81*4882a593Smuzhiyun  * @filep	: unused
82*4882a593Smuzhiyun  *
83*4882a593Smuzhiyun  * Allow ioctl operations by apps only if they have superuser privilege.
84*4882a593Smuzhiyun  */
85*4882a593Smuzhiyun static int
mraid_mm_open(struct inode * inode,struct file * filep)86*4882a593Smuzhiyun mraid_mm_open(struct inode *inode, struct file *filep)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	/*
89*4882a593Smuzhiyun 	 * Only allow superuser to access private ioctl interface
90*4882a593Smuzhiyun 	 */
91*4882a593Smuzhiyun 	if (!capable(CAP_SYS_ADMIN)) return (-EACCES);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	return 0;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun /**
97*4882a593Smuzhiyun  * mraid_mm_ioctl - module entry-point for ioctls
98*4882a593Smuzhiyun  * @filep	: file operations pointer (ignored)
99*4882a593Smuzhiyun  * @cmd		: ioctl command
100*4882a593Smuzhiyun  * @arg		: user ioctl packet
101*4882a593Smuzhiyun  */
102*4882a593Smuzhiyun static int
mraid_mm_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)103*4882a593Smuzhiyun mraid_mm_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	uioc_t		*kioc;
106*4882a593Smuzhiyun 	char		signature[EXT_IOCTL_SIGN_SZ]	= {0};
107*4882a593Smuzhiyun 	int		rval;
108*4882a593Smuzhiyun 	mraid_mmadp_t	*adp;
109*4882a593Smuzhiyun 	uint8_t		old_ioctl;
110*4882a593Smuzhiyun 	int		drvrcmd_rval;
111*4882a593Smuzhiyun 	void __user *argp = (void __user *)arg;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	/*
114*4882a593Smuzhiyun 	 * Make sure only USCSICMD are issued through this interface.
115*4882a593Smuzhiyun 	 * MIMD application would still fire different command.
116*4882a593Smuzhiyun 	 */
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	if ((_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD)) {
119*4882a593Smuzhiyun 		return (-EINVAL);
120*4882a593Smuzhiyun 	}
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	/*
123*4882a593Smuzhiyun 	 * Look for signature to see if this is the new or old ioctl format.
124*4882a593Smuzhiyun 	 */
125*4882a593Smuzhiyun 	if (copy_from_user(signature, argp, EXT_IOCTL_SIGN_SZ)) {
126*4882a593Smuzhiyun 		con_log(CL_ANN, (KERN_WARNING
127*4882a593Smuzhiyun 			"megaraid cmm: copy from usr addr failed\n"));
128*4882a593Smuzhiyun 		return (-EFAULT);
129*4882a593Smuzhiyun 	}
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	if (memcmp(signature, EXT_IOCTL_SIGN, EXT_IOCTL_SIGN_SZ) == 0)
132*4882a593Smuzhiyun 		old_ioctl = 0;
133*4882a593Smuzhiyun 	else
134*4882a593Smuzhiyun 		old_ioctl = 1;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	/*
137*4882a593Smuzhiyun 	 * At present, we don't support the new ioctl packet
138*4882a593Smuzhiyun 	 */
139*4882a593Smuzhiyun 	if (!old_ioctl )
140*4882a593Smuzhiyun 		return (-EINVAL);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	/*
143*4882a593Smuzhiyun 	 * If it is a driver ioctl (as opposed to fw ioctls), then we can
144*4882a593Smuzhiyun 	 * handle the command locally. rval > 0 means it is not a drvr cmd
145*4882a593Smuzhiyun 	 */
146*4882a593Smuzhiyun 	rval = handle_drvrcmd(argp, old_ioctl, &drvrcmd_rval);
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	if (rval < 0)
149*4882a593Smuzhiyun 		return rval;
150*4882a593Smuzhiyun 	else if (rval == 0)
151*4882a593Smuzhiyun 		return drvrcmd_rval;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	rval = 0;
154*4882a593Smuzhiyun 	if ((adp = mraid_mm_get_adapter(argp, &rval)) == NULL) {
155*4882a593Smuzhiyun 		return rval;
156*4882a593Smuzhiyun 	}
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	/*
159*4882a593Smuzhiyun 	 * Check if adapter can accept ioctl. We may have marked it offline
160*4882a593Smuzhiyun 	 * if any previous kioc had timedout on this controller.
161*4882a593Smuzhiyun 	 */
162*4882a593Smuzhiyun 	if (!adp->quiescent) {
163*4882a593Smuzhiyun 		con_log(CL_ANN, (KERN_WARNING
164*4882a593Smuzhiyun 			"megaraid cmm: controller cannot accept cmds due to "
165*4882a593Smuzhiyun 			"earlier errors\n" ));
166*4882a593Smuzhiyun 		return -EFAULT;
167*4882a593Smuzhiyun 	}
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	/*
170*4882a593Smuzhiyun 	 * The following call will block till a kioc is available
171*4882a593Smuzhiyun 	 * or return NULL if the list head is empty for the pointer
172*4882a593Smuzhiyun 	 * of type mraid_mmapt passed to mraid_mm_alloc_kioc
173*4882a593Smuzhiyun 	 */
174*4882a593Smuzhiyun 	kioc = mraid_mm_alloc_kioc(adp);
175*4882a593Smuzhiyun 	if (!kioc)
176*4882a593Smuzhiyun 		return -ENXIO;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	/*
179*4882a593Smuzhiyun 	 * User sent the old mimd_t ioctl packet. Convert it to uioc_t.
180*4882a593Smuzhiyun 	 */
181*4882a593Smuzhiyun 	if ((rval = mimd_to_kioc(argp, adp, kioc))) {
182*4882a593Smuzhiyun 		mraid_mm_dealloc_kioc(adp, kioc);
183*4882a593Smuzhiyun 		return rval;
184*4882a593Smuzhiyun 	}
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	kioc->done = ioctl_done;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	/*
189*4882a593Smuzhiyun 	 * Issue the IOCTL to the low level driver. After the IOCTL completes
190*4882a593Smuzhiyun 	 * release the kioc if and only if it was _not_ timedout. If it was
191*4882a593Smuzhiyun 	 * timedout, that means that resources are still with low level driver.
192*4882a593Smuzhiyun 	 */
193*4882a593Smuzhiyun 	if ((rval = lld_ioctl(adp, kioc))) {
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 		if (!kioc->timedout)
196*4882a593Smuzhiyun 			mraid_mm_dealloc_kioc(adp, kioc);
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 		return rval;
199*4882a593Smuzhiyun 	}
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	/*
202*4882a593Smuzhiyun 	 * Convert the kioc back to user space
203*4882a593Smuzhiyun 	 */
204*4882a593Smuzhiyun 	rval = kioc_to_mimd(kioc, argp);
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	/*
207*4882a593Smuzhiyun 	 * Return the kioc to free pool
208*4882a593Smuzhiyun 	 */
209*4882a593Smuzhiyun 	mraid_mm_dealloc_kioc(adp, kioc);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	return rval;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun static long
mraid_mm_unlocked_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)215*4882a593Smuzhiyun mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd,
216*4882a593Smuzhiyun 		        unsigned long arg)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	int err;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	mutex_lock(&mraid_mm_mutex);
221*4882a593Smuzhiyun 	err = mraid_mm_ioctl(filep, cmd, arg);
222*4882a593Smuzhiyun 	mutex_unlock(&mraid_mm_mutex);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	return err;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun /**
228*4882a593Smuzhiyun  * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet
229*4882a593Smuzhiyun  * @umimd	: User space mimd_t ioctl packet
230*4882a593Smuzhiyun  * @rval	: returned success/error status
231*4882a593Smuzhiyun  *
232*4882a593Smuzhiyun  * The function return value is a pointer to the located @adapter.
233*4882a593Smuzhiyun  */
234*4882a593Smuzhiyun static mraid_mmadp_t *
mraid_mm_get_adapter(mimd_t __user * umimd,int * rval)235*4882a593Smuzhiyun mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	mraid_mmadp_t	*adapter;
238*4882a593Smuzhiyun 	mimd_t		mimd;
239*4882a593Smuzhiyun 	uint32_t	adapno;
240*4882a593Smuzhiyun 	int		iterator;
241*4882a593Smuzhiyun 	bool		is_found;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
244*4882a593Smuzhiyun 		*rval = -EFAULT;
245*4882a593Smuzhiyun 		return NULL;
246*4882a593Smuzhiyun 	}
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	adapno = GETADAP(mimd.ui.fcs.adapno);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	if (adapno >= adapters_count_g) {
251*4882a593Smuzhiyun 		*rval = -ENODEV;
252*4882a593Smuzhiyun 		return NULL;
253*4882a593Smuzhiyun 	}
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	adapter = NULL;
256*4882a593Smuzhiyun 	iterator = 0;
257*4882a593Smuzhiyun 	is_found = false;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	list_for_each_entry(adapter, &adapters_list_g, list) {
260*4882a593Smuzhiyun 		if (iterator++ == adapno) {
261*4882a593Smuzhiyun 			is_found = true;
262*4882a593Smuzhiyun 			break;
263*4882a593Smuzhiyun 		}
264*4882a593Smuzhiyun 	}
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	if (!is_found) {
267*4882a593Smuzhiyun 		*rval = -ENODEV;
268*4882a593Smuzhiyun 		return NULL;
269*4882a593Smuzhiyun 	}
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	return adapter;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun /**
275*4882a593Smuzhiyun  * handle_drvrcmd - Checks if the opcode is a driver cmd and if it is, handles it.
276*4882a593Smuzhiyun  * @arg		: packet sent by the user app
277*4882a593Smuzhiyun  * @old_ioctl	: mimd if 1; uioc otherwise
278*4882a593Smuzhiyun  * @rval	: pointer for command's returned value (not function status)
279*4882a593Smuzhiyun  */
280*4882a593Smuzhiyun static int
handle_drvrcmd(void __user * arg,uint8_t old_ioctl,int * rval)281*4882a593Smuzhiyun handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	mimd_t		__user *umimd;
284*4882a593Smuzhiyun 	mimd_t		kmimd;
285*4882a593Smuzhiyun 	uint8_t		opcode;
286*4882a593Smuzhiyun 	uint8_t		subopcode;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	if (old_ioctl)
289*4882a593Smuzhiyun 		goto old_packet;
290*4882a593Smuzhiyun 	else
291*4882a593Smuzhiyun 		goto new_packet;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun new_packet:
294*4882a593Smuzhiyun 	return (-ENOTSUPP);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun old_packet:
297*4882a593Smuzhiyun 	*rval = 0;
298*4882a593Smuzhiyun 	umimd = arg;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	if (copy_from_user(&kmimd, umimd, sizeof(mimd_t)))
301*4882a593Smuzhiyun 		return (-EFAULT);
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	opcode		= kmimd.ui.fcs.opcode;
304*4882a593Smuzhiyun 	subopcode	= kmimd.ui.fcs.subopcode;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	/*
307*4882a593Smuzhiyun 	 * If the opcode is 0x82 and the subopcode is either GET_DRVRVER or
308*4882a593Smuzhiyun 	 * GET_NUMADP, then we can handle. Otherwise we should return 1 to
309*4882a593Smuzhiyun 	 * indicate that we cannot handle this.
310*4882a593Smuzhiyun 	 */
311*4882a593Smuzhiyun 	if (opcode != 0x82)
312*4882a593Smuzhiyun 		return 1;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	switch (subopcode) {
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	case MEGAIOC_QDRVRVER:
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 		if (copy_to_user(kmimd.data, &drvr_ver, sizeof(uint32_t)))
319*4882a593Smuzhiyun 			return (-EFAULT);
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 		return 0;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	case MEGAIOC_QNADAP:
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 		*rval = adapters_count_g;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 		if (copy_to_user(kmimd.data, &adapters_count_g,
328*4882a593Smuzhiyun 				sizeof(uint32_t)))
329*4882a593Smuzhiyun 			return (-EFAULT);
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 		return 0;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	default:
334*4882a593Smuzhiyun 		/* cannot handle */
335*4882a593Smuzhiyun 		return 1;
336*4882a593Smuzhiyun 	}
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	return 0;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun /**
343*4882a593Smuzhiyun  * mimd_to_kioc	- Converter from old to new ioctl format
344*4882a593Smuzhiyun  * @umimd	: user space old MIMD IOCTL
345*4882a593Smuzhiyun  * @adp		: adapter softstate
346*4882a593Smuzhiyun  * @kioc	: kernel space new format IOCTL
347*4882a593Smuzhiyun  *
348*4882a593Smuzhiyun  * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The
349*4882a593Smuzhiyun  * new packet is in kernel space so that driver can perform operations on it
350*4882a593Smuzhiyun  * freely.
351*4882a593Smuzhiyun  */
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun static int
mimd_to_kioc(mimd_t __user * umimd,mraid_mmadp_t * adp,uioc_t * kioc)354*4882a593Smuzhiyun mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun 	mbox64_t		*mbox64;
357*4882a593Smuzhiyun 	mbox_t			*mbox;
358*4882a593Smuzhiyun 	mraid_passthru_t	*pthru32;
359*4882a593Smuzhiyun 	uint32_t		adapno;
360*4882a593Smuzhiyun 	uint8_t			opcode;
361*4882a593Smuzhiyun 	uint8_t			subopcode;
362*4882a593Smuzhiyun 	mimd_t			mimd;
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	if (copy_from_user(&mimd, umimd, sizeof(mimd_t)))
365*4882a593Smuzhiyun 		return (-EFAULT);
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	/*
368*4882a593Smuzhiyun 	 * Applications are not allowed to send extd pthru
369*4882a593Smuzhiyun 	 */
370*4882a593Smuzhiyun 	if ((mimd.mbox[0] == MBOXCMD_PASSTHRU64) ||
371*4882a593Smuzhiyun 			(mimd.mbox[0] == MBOXCMD_EXTPTHRU))
372*4882a593Smuzhiyun 		return (-EINVAL);
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	opcode		= mimd.ui.fcs.opcode;
375*4882a593Smuzhiyun 	subopcode	= mimd.ui.fcs.subopcode;
376*4882a593Smuzhiyun 	adapno		= GETADAP(mimd.ui.fcs.adapno);
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	if (adapno >= adapters_count_g)
379*4882a593Smuzhiyun 		return (-ENODEV);
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	kioc->adapno	= adapno;
382*4882a593Smuzhiyun 	kioc->mb_type	= MBOX_LEGACY;
383*4882a593Smuzhiyun 	kioc->app_type	= APPTYPE_MIMD;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	switch (opcode) {
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	case 0x82:
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 		if (subopcode == MEGAIOC_QADAPINFO) {
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 			kioc->opcode	= GET_ADAP_INFO;
392*4882a593Smuzhiyun 			kioc->data_dir	= UIOC_RD;
393*4882a593Smuzhiyun 			kioc->xferlen	= sizeof(mraid_hba_info_t);
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 			if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
396*4882a593Smuzhiyun 				return (-ENOMEM);
397*4882a593Smuzhiyun 		}
398*4882a593Smuzhiyun 		else {
399*4882a593Smuzhiyun 			con_log(CL_ANN, (KERN_WARNING
400*4882a593Smuzhiyun 					"megaraid cmm: Invalid subop\n"));
401*4882a593Smuzhiyun 			return (-EINVAL);
402*4882a593Smuzhiyun 		}
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 		break;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	case 0x81:
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 		kioc->opcode		= MBOX_CMD;
409*4882a593Smuzhiyun 		kioc->xferlen		= mimd.ui.fcs.length;
410*4882a593Smuzhiyun 		kioc->user_data_len	= kioc->xferlen;
411*4882a593Smuzhiyun 		kioc->user_data		= mimd.ui.fcs.buffer;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 		if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
414*4882a593Smuzhiyun 			return (-ENOMEM);
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 		if (mimd.outlen) kioc->data_dir  = UIOC_RD;
417*4882a593Smuzhiyun 		if (mimd.inlen) kioc->data_dir |= UIOC_WR;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 		break;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	case 0x80:
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 		kioc->opcode		= MBOX_CMD;
424*4882a593Smuzhiyun 		kioc->xferlen		= (mimd.outlen > mimd.inlen) ?
425*4882a593Smuzhiyun 						mimd.outlen : mimd.inlen;
426*4882a593Smuzhiyun 		kioc->user_data_len	= kioc->xferlen;
427*4882a593Smuzhiyun 		kioc->user_data		= mimd.data;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 		if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
430*4882a593Smuzhiyun 			return (-ENOMEM);
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 		if (mimd.outlen) kioc->data_dir  = UIOC_RD;
433*4882a593Smuzhiyun 		if (mimd.inlen) kioc->data_dir |= UIOC_WR;
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 		break;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	default:
438*4882a593Smuzhiyun 		return (-EINVAL);
439*4882a593Smuzhiyun 	}
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	/*
442*4882a593Smuzhiyun 	 * If driver command, nothing else to do
443*4882a593Smuzhiyun 	 */
444*4882a593Smuzhiyun 	if (opcode == 0x82)
445*4882a593Smuzhiyun 		return 0;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	/*
448*4882a593Smuzhiyun 	 * This is a mailbox cmd; copy the mailbox from mimd
449*4882a593Smuzhiyun 	 */
450*4882a593Smuzhiyun 	mbox64	= (mbox64_t *)((unsigned long)kioc->cmdbuf);
451*4882a593Smuzhiyun 	mbox	= &mbox64->mbox32;
452*4882a593Smuzhiyun 	memcpy(mbox, mimd.mbox, 14);
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	if (mbox->cmd != MBOXCMD_PASSTHRU) {	// regular DCMD
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 		mbox->xferaddr	= (uint32_t)kioc->buf_paddr;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 		if (kioc->data_dir & UIOC_WR) {
459*4882a593Smuzhiyun 			if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
460*4882a593Smuzhiyun 							kioc->xferlen)) {
461*4882a593Smuzhiyun 				return (-EFAULT);
462*4882a593Smuzhiyun 			}
463*4882a593Smuzhiyun 		}
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 		return 0;
466*4882a593Smuzhiyun 	}
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	/*
469*4882a593Smuzhiyun 	 * This is a regular 32-bit pthru cmd; mbox points to pthru struct.
470*4882a593Smuzhiyun 	 * Just like in above case, the beginning for memblk is treated as
471*4882a593Smuzhiyun 	 * a mailbox. The passthru will begin at next 1K boundary. And the
472*4882a593Smuzhiyun 	 * data will start 1K after that.
473*4882a593Smuzhiyun 	 */
474*4882a593Smuzhiyun 	pthru32			= kioc->pthru32;
475*4882a593Smuzhiyun 	kioc->user_pthru	= &umimd->pthru;
476*4882a593Smuzhiyun 	mbox->xferaddr		= (uint32_t)kioc->pthru32_h;
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	if (copy_from_user(pthru32, kioc->user_pthru,
479*4882a593Smuzhiyun 			sizeof(mraid_passthru_t))) {
480*4882a593Smuzhiyun 		return (-EFAULT);
481*4882a593Smuzhiyun 	}
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	pthru32->dataxferaddr	= kioc->buf_paddr;
484*4882a593Smuzhiyun 	if (kioc->data_dir & UIOC_WR) {
485*4882a593Smuzhiyun 		if (pthru32->dataxferlen > kioc->xferlen)
486*4882a593Smuzhiyun 			return -EINVAL;
487*4882a593Smuzhiyun 		if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
488*4882a593Smuzhiyun 						pthru32->dataxferlen)) {
489*4882a593Smuzhiyun 			return (-EFAULT);
490*4882a593Smuzhiyun 		}
491*4882a593Smuzhiyun 	}
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	return 0;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun /**
497*4882a593Smuzhiyun  * mraid_mm_attch_buf - Attach a free dma buffer for required size
498*4882a593Smuzhiyun  * @adp		: Adapter softstate
499*4882a593Smuzhiyun  * @kioc	: kioc that the buffer needs to be attached to
500*4882a593Smuzhiyun  * @xferlen	: required length for buffer
501*4882a593Smuzhiyun  *
502*4882a593Smuzhiyun  * First we search for a pool with smallest buffer that is >= @xferlen. If
503*4882a593Smuzhiyun  * that pool has no free buffer, we will try for the next bigger size. If none
504*4882a593Smuzhiyun  * is available, we will try to allocate the smallest buffer that is >=
505*4882a593Smuzhiyun  * @xferlen and attach it the pool.
506*4882a593Smuzhiyun  */
507*4882a593Smuzhiyun static int
mraid_mm_attach_buf(mraid_mmadp_t * adp,uioc_t * kioc,int xferlen)508*4882a593Smuzhiyun mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen)
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun 	mm_dmapool_t	*pool;
511*4882a593Smuzhiyun 	int		right_pool = -1;
512*4882a593Smuzhiyun 	unsigned long	flags;
513*4882a593Smuzhiyun 	int		i;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	kioc->pool_index	= -1;
516*4882a593Smuzhiyun 	kioc->buf_vaddr		= NULL;
517*4882a593Smuzhiyun 	kioc->buf_paddr		= 0;
518*4882a593Smuzhiyun 	kioc->free_buf		= 0;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	/*
521*4882a593Smuzhiyun 	 * We need xferlen amount of memory. See if we can get it from our
522*4882a593Smuzhiyun 	 * dma pools. If we don't get exact size, we will try bigger buffer
523*4882a593Smuzhiyun 	 */
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	for (i = 0; i < MAX_DMA_POOLS; i++) {
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 		pool = &adp->dma_pool_list[i];
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 		if (xferlen > pool->buf_size)
530*4882a593Smuzhiyun 			continue;
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 		if (right_pool == -1)
533*4882a593Smuzhiyun 			right_pool = i;
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 		spin_lock_irqsave(&pool->lock, flags);
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 		if (!pool->in_use) {
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 			pool->in_use		= 1;
540*4882a593Smuzhiyun 			kioc->pool_index	= i;
541*4882a593Smuzhiyun 			kioc->buf_vaddr		= pool->vaddr;
542*4882a593Smuzhiyun 			kioc->buf_paddr		= pool->paddr;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 			spin_unlock_irqrestore(&pool->lock, flags);
545*4882a593Smuzhiyun 			return 0;
546*4882a593Smuzhiyun 		}
547*4882a593Smuzhiyun 		else {
548*4882a593Smuzhiyun 			spin_unlock_irqrestore(&pool->lock, flags);
549*4882a593Smuzhiyun 			continue;
550*4882a593Smuzhiyun 		}
551*4882a593Smuzhiyun 	}
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	/*
554*4882a593Smuzhiyun 	 * If xferlen doesn't match any of our pools, return error
555*4882a593Smuzhiyun 	 */
556*4882a593Smuzhiyun 	if (right_pool == -1)
557*4882a593Smuzhiyun 		return -EINVAL;
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	/*
560*4882a593Smuzhiyun 	 * We did not get any buffer from the preallocated pool. Let us try
561*4882a593Smuzhiyun 	 * to allocate one new buffer. NOTE: This is a blocking call.
562*4882a593Smuzhiyun 	 */
563*4882a593Smuzhiyun 	pool = &adp->dma_pool_list[right_pool];
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	spin_lock_irqsave(&pool->lock, flags);
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	kioc->pool_index	= right_pool;
568*4882a593Smuzhiyun 	kioc->free_buf		= 1;
569*4882a593Smuzhiyun 	kioc->buf_vaddr		= dma_pool_alloc(pool->handle, GFP_ATOMIC,
570*4882a593Smuzhiyun 							&kioc->buf_paddr);
571*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pool->lock, flags);
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	if (!kioc->buf_vaddr)
574*4882a593Smuzhiyun 		return -ENOMEM;
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	return 0;
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun /**
580*4882a593Smuzhiyun  * mraid_mm_alloc_kioc - Returns a uioc_t from free list
581*4882a593Smuzhiyun  * @adp	: Adapter softstate for this module
582*4882a593Smuzhiyun  *
583*4882a593Smuzhiyun  * The kioc_semaphore is initialized with number of kioc nodes in the
584*4882a593Smuzhiyun  * free kioc pool. If the kioc pool is empty, this function blocks till
585*4882a593Smuzhiyun  * a kioc becomes free.
586*4882a593Smuzhiyun  */
587*4882a593Smuzhiyun static uioc_t *
mraid_mm_alloc_kioc(mraid_mmadp_t * adp)588*4882a593Smuzhiyun mraid_mm_alloc_kioc(mraid_mmadp_t *adp)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun 	uioc_t			*kioc;
591*4882a593Smuzhiyun 	struct list_head*	head;
592*4882a593Smuzhiyun 	unsigned long		flags;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	down(&adp->kioc_semaphore);
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	spin_lock_irqsave(&adp->kioc_pool_lock, flags);
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	head = &adp->kioc_pool;
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	if (list_empty(head)) {
601*4882a593Smuzhiyun 		up(&adp->kioc_semaphore);
602*4882a593Smuzhiyun 		spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 		con_log(CL_ANN, ("megaraid cmm: kioc list empty!\n"));
605*4882a593Smuzhiyun 		return NULL;
606*4882a593Smuzhiyun 	}
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	kioc = list_entry(head->next, uioc_t, list);
609*4882a593Smuzhiyun 	list_del_init(&kioc->list);
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	memset((caddr_t)(unsigned long)kioc->cmdbuf, 0, sizeof(mbox64_t));
614*4882a593Smuzhiyun 	memset((caddr_t) kioc->pthru32, 0, sizeof(mraid_passthru_t));
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	kioc->buf_vaddr		= NULL;
617*4882a593Smuzhiyun 	kioc->buf_paddr		= 0;
618*4882a593Smuzhiyun 	kioc->pool_index	=-1;
619*4882a593Smuzhiyun 	kioc->free_buf		= 0;
620*4882a593Smuzhiyun 	kioc->user_data		= NULL;
621*4882a593Smuzhiyun 	kioc->user_data_len	= 0;
622*4882a593Smuzhiyun 	kioc->user_pthru	= NULL;
623*4882a593Smuzhiyun 	kioc->timedout		= 0;
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	return kioc;
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun /**
629*4882a593Smuzhiyun  * mraid_mm_dealloc_kioc - Return kioc to free pool
630*4882a593Smuzhiyun  * @adp		: Adapter softstate
631*4882a593Smuzhiyun  * @kioc	: uioc_t node to be returned to free pool
632*4882a593Smuzhiyun  */
633*4882a593Smuzhiyun static void
mraid_mm_dealloc_kioc(mraid_mmadp_t * adp,uioc_t * kioc)634*4882a593Smuzhiyun mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc)
635*4882a593Smuzhiyun {
636*4882a593Smuzhiyun 	mm_dmapool_t	*pool;
637*4882a593Smuzhiyun 	unsigned long	flags;
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	if (kioc->pool_index != -1) {
640*4882a593Smuzhiyun 		pool = &adp->dma_pool_list[kioc->pool_index];
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 		/* This routine may be called in non-isr context also */
643*4882a593Smuzhiyun 		spin_lock_irqsave(&pool->lock, flags);
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 		/*
646*4882a593Smuzhiyun 		 * While attaching the dma buffer, if we didn't get the
647*4882a593Smuzhiyun 		 * required buffer from the pool, we would have allocated
648*4882a593Smuzhiyun 		 * it at the run time and set the free_buf flag. We must
649*4882a593Smuzhiyun 		 * free that buffer. Otherwise, just mark that the buffer is
650*4882a593Smuzhiyun 		 * not in use
651*4882a593Smuzhiyun 		 */
652*4882a593Smuzhiyun 		if (kioc->free_buf == 1)
653*4882a593Smuzhiyun 			dma_pool_free(pool->handle, kioc->buf_vaddr,
654*4882a593Smuzhiyun 							kioc->buf_paddr);
655*4882a593Smuzhiyun 		else
656*4882a593Smuzhiyun 			pool->in_use = 0;
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 		spin_unlock_irqrestore(&pool->lock, flags);
659*4882a593Smuzhiyun 	}
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 	/* Return the kioc to the free pool */
662*4882a593Smuzhiyun 	spin_lock_irqsave(&adp->kioc_pool_lock, flags);
663*4882a593Smuzhiyun 	list_add(&kioc->list, &adp->kioc_pool);
664*4882a593Smuzhiyun 	spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	/* increment the free kioc count */
667*4882a593Smuzhiyun 	up(&adp->kioc_semaphore);
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	return;
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun /**
673*4882a593Smuzhiyun  * lld_ioctl - Routine to issue ioctl to low level drvr
674*4882a593Smuzhiyun  * @adp		: The adapter handle
675*4882a593Smuzhiyun  * @kioc	: The ioctl packet with kernel addresses
676*4882a593Smuzhiyun  */
677*4882a593Smuzhiyun static int
lld_ioctl(mraid_mmadp_t * adp,uioc_t * kioc)678*4882a593Smuzhiyun lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc)
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun 	int			rval;
681*4882a593Smuzhiyun 	struct uioc_timeout	timeout = { };
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 	kioc->status	= -ENODATA;
684*4882a593Smuzhiyun 	rval		= adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE);
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	if (rval) return rval;
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	/*
689*4882a593Smuzhiyun 	 * Start the timer
690*4882a593Smuzhiyun 	 */
691*4882a593Smuzhiyun 	if (adp->timeout > 0) {
692*4882a593Smuzhiyun 		timeout.uioc = kioc;
693*4882a593Smuzhiyun 		timer_setup_on_stack(&timeout.timer, lld_timedout, 0);
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 		timeout.timer.expires	= jiffies + adp->timeout * HZ;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 		add_timer(&timeout.timer);
698*4882a593Smuzhiyun 	}
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	/*
701*4882a593Smuzhiyun 	 * Wait till the low level driver completes the ioctl. After this
702*4882a593Smuzhiyun 	 * call, the ioctl either completed successfully or timedout.
703*4882a593Smuzhiyun 	 */
704*4882a593Smuzhiyun 	wait_event(wait_q, (kioc->status != -ENODATA));
705*4882a593Smuzhiyun 	if (timeout.timer.function) {
706*4882a593Smuzhiyun 		del_timer_sync(&timeout.timer);
707*4882a593Smuzhiyun 		destroy_timer_on_stack(&timeout.timer);
708*4882a593Smuzhiyun 	}
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	/*
711*4882a593Smuzhiyun 	 * If the command had timedout, we mark the controller offline
712*4882a593Smuzhiyun 	 * before returning
713*4882a593Smuzhiyun 	 */
714*4882a593Smuzhiyun 	if (kioc->timedout) {
715*4882a593Smuzhiyun 		adp->quiescent = 0;
716*4882a593Smuzhiyun 	}
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	return kioc->status;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun /**
723*4882a593Smuzhiyun  * ioctl_done - callback from the low level driver
724*4882a593Smuzhiyun  * @kioc	: completed ioctl packet
725*4882a593Smuzhiyun  */
726*4882a593Smuzhiyun static void
ioctl_done(uioc_t * kioc)727*4882a593Smuzhiyun ioctl_done(uioc_t *kioc)
728*4882a593Smuzhiyun {
729*4882a593Smuzhiyun 	uint32_t	adapno;
730*4882a593Smuzhiyun 	int		iterator;
731*4882a593Smuzhiyun 	mraid_mmadp_t*	adapter;
732*4882a593Smuzhiyun 	bool		is_found;
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	/*
735*4882a593Smuzhiyun 	 * When the kioc returns from driver, make sure it still doesn't
736*4882a593Smuzhiyun 	 * have ENODATA in status. Otherwise, driver will hang on wait_event
737*4882a593Smuzhiyun 	 * forever
738*4882a593Smuzhiyun 	 */
739*4882a593Smuzhiyun 	if (kioc->status == -ENODATA) {
740*4882a593Smuzhiyun 		con_log(CL_ANN, (KERN_WARNING
741*4882a593Smuzhiyun 			"megaraid cmm: lld didn't change status!\n"));
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 		kioc->status = -EINVAL;
744*4882a593Smuzhiyun 	}
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	/*
747*4882a593Smuzhiyun 	 * Check if this kioc was timedout before. If so, nobody is waiting
748*4882a593Smuzhiyun 	 * on this kioc. We don't have to wake up anybody. Instead, we just
749*4882a593Smuzhiyun 	 * have to free the kioc
750*4882a593Smuzhiyun 	 */
751*4882a593Smuzhiyun 	if (kioc->timedout) {
752*4882a593Smuzhiyun 		iterator	= 0;
753*4882a593Smuzhiyun 		adapter		= NULL;
754*4882a593Smuzhiyun 		adapno		= kioc->adapno;
755*4882a593Smuzhiyun 		is_found	= false;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 		con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
758*4882a593Smuzhiyun 					"ioctl that was timedout before\n"));
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 		list_for_each_entry(adapter, &adapters_list_g, list) {
761*4882a593Smuzhiyun 			if (iterator++ == adapno) {
762*4882a593Smuzhiyun 				is_found = true;
763*4882a593Smuzhiyun 				break;
764*4882a593Smuzhiyun 			}
765*4882a593Smuzhiyun 		}
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 		kioc->timedout = 0;
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 		if (is_found)
770*4882a593Smuzhiyun 			mraid_mm_dealloc_kioc( adapter, kioc );
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	}
773*4882a593Smuzhiyun 	else {
774*4882a593Smuzhiyun 		wake_up(&wait_q);
775*4882a593Smuzhiyun 	}
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun /**
780*4882a593Smuzhiyun  * lld_timedout	- callback from the expired timer
781*4882a593Smuzhiyun  * @t		: timer that timed out
782*4882a593Smuzhiyun  */
783*4882a593Smuzhiyun static void
lld_timedout(struct timer_list * t)784*4882a593Smuzhiyun lld_timedout(struct timer_list *t)
785*4882a593Smuzhiyun {
786*4882a593Smuzhiyun 	struct uioc_timeout *timeout = from_timer(timeout, t, timer);
787*4882a593Smuzhiyun 	uioc_t *kioc	= timeout->uioc;
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	kioc->status 	= -ETIME;
790*4882a593Smuzhiyun 	kioc->timedout	= 1;
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 	con_log(CL_ANN, (KERN_WARNING "megaraid cmm: ioctl timed out\n"));
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	wake_up(&wait_q);
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun /**
799*4882a593Smuzhiyun  * kioc_to_mimd	- Converter from new back to old format
800*4882a593Smuzhiyun  * @kioc	: Kernel space IOCTL packet (successfully issued)
801*4882a593Smuzhiyun  * @mimd	: User space MIMD packet
802*4882a593Smuzhiyun  */
803*4882a593Smuzhiyun static int
kioc_to_mimd(uioc_t * kioc,mimd_t __user * mimd)804*4882a593Smuzhiyun kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd)
805*4882a593Smuzhiyun {
806*4882a593Smuzhiyun 	mimd_t			kmimd;
807*4882a593Smuzhiyun 	uint8_t			opcode;
808*4882a593Smuzhiyun 	uint8_t			subopcode;
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 	mbox64_t		*mbox64;
811*4882a593Smuzhiyun 	mraid_passthru_t	__user *upthru32;
812*4882a593Smuzhiyun 	mraid_passthru_t	*kpthru32;
813*4882a593Smuzhiyun 	mcontroller_t		cinfo;
814*4882a593Smuzhiyun 	mraid_hba_info_t	*hinfo;
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	if (copy_from_user(&kmimd, mimd, sizeof(mimd_t)))
818*4882a593Smuzhiyun 		return (-EFAULT);
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	opcode		= kmimd.ui.fcs.opcode;
821*4882a593Smuzhiyun 	subopcode	= kmimd.ui.fcs.subopcode;
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	if (opcode == 0x82) {
824*4882a593Smuzhiyun 		switch (subopcode) {
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 		case MEGAIOC_QADAPINFO:
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 			hinfo = (mraid_hba_info_t *)(unsigned long)
829*4882a593Smuzhiyun 					kioc->buf_vaddr;
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 			hinfo_to_cinfo(hinfo, &cinfo);
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 			if (copy_to_user(kmimd.data, &cinfo, sizeof(cinfo)))
834*4882a593Smuzhiyun 				return (-EFAULT);
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 			return 0;
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 		default:
839*4882a593Smuzhiyun 			return (-EINVAL);
840*4882a593Smuzhiyun 		}
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 		return 0;
843*4882a593Smuzhiyun 	}
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	if (kioc->user_pthru) {
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 		upthru32 = kioc->user_pthru;
850*4882a593Smuzhiyun 		kpthru32 = kioc->pthru32;
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 		if (copy_to_user(&upthru32->scsistatus,
853*4882a593Smuzhiyun 					&kpthru32->scsistatus,
854*4882a593Smuzhiyun 					sizeof(uint8_t))) {
855*4882a593Smuzhiyun 			return (-EFAULT);
856*4882a593Smuzhiyun 		}
857*4882a593Smuzhiyun 	}
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	if (kioc->user_data) {
860*4882a593Smuzhiyun 		if (copy_to_user(kioc->user_data, kioc->buf_vaddr,
861*4882a593Smuzhiyun 					kioc->user_data_len)) {
862*4882a593Smuzhiyun 			return (-EFAULT);
863*4882a593Smuzhiyun 		}
864*4882a593Smuzhiyun 	}
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	if (copy_to_user(&mimd->mbox[17],
867*4882a593Smuzhiyun 			&mbox64->mbox32.status, sizeof(uint8_t))) {
868*4882a593Smuzhiyun 		return (-EFAULT);
869*4882a593Smuzhiyun 	}
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	return 0;
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun /**
876*4882a593Smuzhiyun  * hinfo_to_cinfo - Convert new format hba info into old format
877*4882a593Smuzhiyun  * @hinfo	: New format, more comprehensive adapter info
878*4882a593Smuzhiyun  * @cinfo	: Old format adapter info to support mimd_t apps
879*4882a593Smuzhiyun  */
880*4882a593Smuzhiyun static void
hinfo_to_cinfo(mraid_hba_info_t * hinfo,mcontroller_t * cinfo)881*4882a593Smuzhiyun hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo)
882*4882a593Smuzhiyun {
883*4882a593Smuzhiyun 	if (!hinfo || !cinfo)
884*4882a593Smuzhiyun 		return;
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 	cinfo->base		= hinfo->baseport;
887*4882a593Smuzhiyun 	cinfo->irq		= hinfo->irq;
888*4882a593Smuzhiyun 	cinfo->numldrv		= hinfo->num_ldrv;
889*4882a593Smuzhiyun 	cinfo->pcibus		= hinfo->pci_bus;
890*4882a593Smuzhiyun 	cinfo->pcidev		= hinfo->pci_slot;
891*4882a593Smuzhiyun 	cinfo->pcifun		= PCI_FUNC(hinfo->pci_dev_fn);
892*4882a593Smuzhiyun 	cinfo->pciid		= hinfo->pci_device_id;
893*4882a593Smuzhiyun 	cinfo->pcivendor	= hinfo->pci_vendor_id;
894*4882a593Smuzhiyun 	cinfo->pcislot		= hinfo->pci_slot;
895*4882a593Smuzhiyun 	cinfo->uid		= hinfo->unique_id;
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun /**
900*4882a593Smuzhiyun  * mraid_mm_register_adp - Registration routine for low level drivers
901*4882a593Smuzhiyun  * @lld_adp	: Adapter object
902*4882a593Smuzhiyun  */
903*4882a593Smuzhiyun int
mraid_mm_register_adp(mraid_mmadp_t * lld_adp)904*4882a593Smuzhiyun mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
905*4882a593Smuzhiyun {
906*4882a593Smuzhiyun 	mraid_mmadp_t	*adapter;
907*4882a593Smuzhiyun 	mbox64_t	*mbox_list;
908*4882a593Smuzhiyun 	uioc_t		*kioc;
909*4882a593Smuzhiyun 	uint32_t	rval;
910*4882a593Smuzhiyun 	int		i;
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	if (lld_adp->drvr_type != DRVRTYPE_MBOX)
914*4882a593Smuzhiyun 		return (-EINVAL);
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	adapter = kzalloc(sizeof(mraid_mmadp_t), GFP_KERNEL);
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 	if (!adapter)
919*4882a593Smuzhiyun 		return -ENOMEM;
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	adapter->unique_id	= lld_adp->unique_id;
923*4882a593Smuzhiyun 	adapter->drvr_type	= lld_adp->drvr_type;
924*4882a593Smuzhiyun 	adapter->drvr_data	= lld_adp->drvr_data;
925*4882a593Smuzhiyun 	adapter->pdev		= lld_adp->pdev;
926*4882a593Smuzhiyun 	adapter->issue_uioc	= lld_adp->issue_uioc;
927*4882a593Smuzhiyun 	adapter->timeout	= lld_adp->timeout;
928*4882a593Smuzhiyun 	adapter->max_kioc	= lld_adp->max_kioc;
929*4882a593Smuzhiyun 	adapter->quiescent	= 1;
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 	/*
932*4882a593Smuzhiyun 	 * Allocate single blocks of memory for all required kiocs,
933*4882a593Smuzhiyun 	 * mailboxes and passthru structures.
934*4882a593Smuzhiyun 	 */
935*4882a593Smuzhiyun 	adapter->kioc_list	= kmalloc_array(lld_adp->max_kioc,
936*4882a593Smuzhiyun 						  sizeof(uioc_t),
937*4882a593Smuzhiyun 						  GFP_KERNEL);
938*4882a593Smuzhiyun 	adapter->mbox_list	= kmalloc_array(lld_adp->max_kioc,
939*4882a593Smuzhiyun 						  sizeof(mbox64_t),
940*4882a593Smuzhiyun 						  GFP_KERNEL);
941*4882a593Smuzhiyun 	adapter->pthru_dma_pool = dma_pool_create("megaraid mm pthru pool",
942*4882a593Smuzhiyun 						&adapter->pdev->dev,
943*4882a593Smuzhiyun 						sizeof(mraid_passthru_t),
944*4882a593Smuzhiyun 						16, 0);
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	if (!adapter->kioc_list || !adapter->mbox_list ||
947*4882a593Smuzhiyun 			!adapter->pthru_dma_pool) {
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 		con_log(CL_ANN, (KERN_WARNING
950*4882a593Smuzhiyun 			"megaraid cmm: out of memory, %s %d\n", __func__,
951*4882a593Smuzhiyun 			__LINE__));
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 		rval = (-ENOMEM);
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 		goto memalloc_error;
956*4882a593Smuzhiyun 	}
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	/*
959*4882a593Smuzhiyun 	 * Slice kioc_list and make a kioc_pool with the individiual kiocs
960*4882a593Smuzhiyun 	 */
961*4882a593Smuzhiyun 	INIT_LIST_HEAD(&adapter->kioc_pool);
962*4882a593Smuzhiyun 	spin_lock_init(&adapter->kioc_pool_lock);
963*4882a593Smuzhiyun 	sema_init(&adapter->kioc_semaphore, lld_adp->max_kioc);
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	mbox_list	= (mbox64_t *)adapter->mbox_list;
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	for (i = 0; i < lld_adp->max_kioc; i++) {
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 		kioc		= adapter->kioc_list + i;
970*4882a593Smuzhiyun 		kioc->cmdbuf	= (uint64_t)(unsigned long)(mbox_list + i);
971*4882a593Smuzhiyun 		kioc->pthru32	= dma_pool_alloc(adapter->pthru_dma_pool,
972*4882a593Smuzhiyun 						GFP_KERNEL, &kioc->pthru32_h);
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 		if (!kioc->pthru32) {
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 			con_log(CL_ANN, (KERN_WARNING
977*4882a593Smuzhiyun 				"megaraid cmm: out of memory, %s %d\n",
978*4882a593Smuzhiyun 					__func__, __LINE__));
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 			rval = (-ENOMEM);
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun 			goto pthru_dma_pool_error;
983*4882a593Smuzhiyun 		}
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 		list_add_tail(&kioc->list, &adapter->kioc_pool);
986*4882a593Smuzhiyun 	}
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 	// Setup the dma pools for data buffers
989*4882a593Smuzhiyun 	if ((rval = mraid_mm_setup_dma_pools(adapter)) != 0) {
990*4882a593Smuzhiyun 		goto dma_pool_error;
991*4882a593Smuzhiyun 	}
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 	list_add_tail(&adapter->list, &adapters_list_g);
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	adapters_count_g++;
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun 	return 0;
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun dma_pool_error:
1000*4882a593Smuzhiyun 	/* Do nothing */
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun pthru_dma_pool_error:
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun 	for (i = 0; i < lld_adp->max_kioc; i++) {
1005*4882a593Smuzhiyun 		kioc = adapter->kioc_list + i;
1006*4882a593Smuzhiyun 		if (kioc->pthru32) {
1007*4882a593Smuzhiyun 			dma_pool_free(adapter->pthru_dma_pool, kioc->pthru32,
1008*4882a593Smuzhiyun 				kioc->pthru32_h);
1009*4882a593Smuzhiyun 		}
1010*4882a593Smuzhiyun 	}
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun memalloc_error:
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	kfree(adapter->kioc_list);
1015*4882a593Smuzhiyun 	kfree(adapter->mbox_list);
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	dma_pool_destroy(adapter->pthru_dma_pool);
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 	kfree(adapter);
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 	return rval;
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun 
1025*4882a593Smuzhiyun /**
1026*4882a593Smuzhiyun  * mraid_mm_adapter_app_handle - return the application handle for this adapter
1027*4882a593Smuzhiyun  * @unique_id	: adapter unique identifier
1028*4882a593Smuzhiyun  *
1029*4882a593Smuzhiyun  * For the given driver data, locate the adapter in our global list and
1030*4882a593Smuzhiyun  * return the corresponding handle, which is also used by applications to
1031*4882a593Smuzhiyun  * uniquely identify an adapter.
1032*4882a593Smuzhiyun  *
1033*4882a593Smuzhiyun  * Return adapter handle if found in the list.
1034*4882a593Smuzhiyun  * Return 0 if adapter could not be located, should never happen though.
1035*4882a593Smuzhiyun  */
1036*4882a593Smuzhiyun uint32_t
mraid_mm_adapter_app_handle(uint32_t unique_id)1037*4882a593Smuzhiyun mraid_mm_adapter_app_handle(uint32_t unique_id)
1038*4882a593Smuzhiyun {
1039*4882a593Smuzhiyun 	mraid_mmadp_t	*adapter;
1040*4882a593Smuzhiyun 	mraid_mmadp_t	*tmp;
1041*4882a593Smuzhiyun 	int		index = 0;
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 		if (adapter->unique_id == unique_id) {
1046*4882a593Smuzhiyun 
1047*4882a593Smuzhiyun 			return MKADAP(index);
1048*4882a593Smuzhiyun 		}
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun 		index++;
1051*4882a593Smuzhiyun 	}
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun 	return 0;
1054*4882a593Smuzhiyun }
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun /**
1058*4882a593Smuzhiyun  * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter
1059*4882a593Smuzhiyun  * @adp	: Adapter softstate
1060*4882a593Smuzhiyun  *
1061*4882a593Smuzhiyun  * We maintain a pool of dma buffers per each adapter. Each pool has one
1062*4882a593Smuzhiyun  * buffer. E.g, we may have 5 dma pools - one each for 4k, 8k ... 64k buffers.
1063*4882a593Smuzhiyun  * We have just one 4k buffer in 4k pool, one 8k buffer in 8k pool etc. We
1064*4882a593Smuzhiyun  * dont' want to waste too much memory by allocating more buffers per each
1065*4882a593Smuzhiyun  * pool.
1066*4882a593Smuzhiyun  */
1067*4882a593Smuzhiyun static int
mraid_mm_setup_dma_pools(mraid_mmadp_t * adp)1068*4882a593Smuzhiyun mraid_mm_setup_dma_pools(mraid_mmadp_t *adp)
1069*4882a593Smuzhiyun {
1070*4882a593Smuzhiyun 	mm_dmapool_t	*pool;
1071*4882a593Smuzhiyun 	int		bufsize;
1072*4882a593Smuzhiyun 	int		i;
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun 	/*
1075*4882a593Smuzhiyun 	 * Create MAX_DMA_POOLS number of pools
1076*4882a593Smuzhiyun 	 */
1077*4882a593Smuzhiyun 	bufsize = MRAID_MM_INIT_BUFF_SIZE;
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun 	for (i = 0; i < MAX_DMA_POOLS; i++){
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 		pool = &adp->dma_pool_list[i];
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 		pool->buf_size = bufsize;
1084*4882a593Smuzhiyun 		spin_lock_init(&pool->lock);
1085*4882a593Smuzhiyun 
1086*4882a593Smuzhiyun 		pool->handle = dma_pool_create("megaraid mm data buffer",
1087*4882a593Smuzhiyun 						&adp->pdev->dev, bufsize,
1088*4882a593Smuzhiyun 						16, 0);
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 		if (!pool->handle) {
1091*4882a593Smuzhiyun 			goto dma_pool_setup_error;
1092*4882a593Smuzhiyun 		}
1093*4882a593Smuzhiyun 
1094*4882a593Smuzhiyun 		pool->vaddr = dma_pool_alloc(pool->handle, GFP_KERNEL,
1095*4882a593Smuzhiyun 							&pool->paddr);
1096*4882a593Smuzhiyun 
1097*4882a593Smuzhiyun 		if (!pool->vaddr)
1098*4882a593Smuzhiyun 			goto dma_pool_setup_error;
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 		bufsize = bufsize * 2;
1101*4882a593Smuzhiyun 	}
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun 	return 0;
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun dma_pool_setup_error:
1106*4882a593Smuzhiyun 
1107*4882a593Smuzhiyun 	mraid_mm_teardown_dma_pools(adp);
1108*4882a593Smuzhiyun 	return (-ENOMEM);
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun /**
1113*4882a593Smuzhiyun  * mraid_mm_unregister_adp - Unregister routine for low level drivers
1114*4882a593Smuzhiyun  * @unique_id	: UID of the adpater
1115*4882a593Smuzhiyun  *
1116*4882a593Smuzhiyun  * Assumes no outstanding ioctls to llds.
1117*4882a593Smuzhiyun  */
1118*4882a593Smuzhiyun int
mraid_mm_unregister_adp(uint32_t unique_id)1119*4882a593Smuzhiyun mraid_mm_unregister_adp(uint32_t unique_id)
1120*4882a593Smuzhiyun {
1121*4882a593Smuzhiyun 	mraid_mmadp_t	*adapter;
1122*4882a593Smuzhiyun 	mraid_mmadp_t	*tmp;
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun 	list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun 
1127*4882a593Smuzhiyun 		if (adapter->unique_id == unique_id) {
1128*4882a593Smuzhiyun 
1129*4882a593Smuzhiyun 			adapters_count_g--;
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 			list_del_init(&adapter->list);
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 			mraid_mm_free_adp_resources(adapter);
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun 			kfree(adapter);
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun 			con_log(CL_ANN, (
1138*4882a593Smuzhiyun 				"megaraid cmm: Unregistered one adapter:%#x\n",
1139*4882a593Smuzhiyun 				unique_id));
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun 			return 0;
1142*4882a593Smuzhiyun 		}
1143*4882a593Smuzhiyun 	}
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	return (-ENODEV);
1146*4882a593Smuzhiyun }
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun /**
1149*4882a593Smuzhiyun  * mraid_mm_free_adp_resources - Free adapter softstate
1150*4882a593Smuzhiyun  * @adp	: Adapter softstate
1151*4882a593Smuzhiyun  */
1152*4882a593Smuzhiyun static void
mraid_mm_free_adp_resources(mraid_mmadp_t * adp)1153*4882a593Smuzhiyun mraid_mm_free_adp_resources(mraid_mmadp_t *adp)
1154*4882a593Smuzhiyun {
1155*4882a593Smuzhiyun 	uioc_t	*kioc;
1156*4882a593Smuzhiyun 	int	i;
1157*4882a593Smuzhiyun 
1158*4882a593Smuzhiyun 	mraid_mm_teardown_dma_pools(adp);
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun 	for (i = 0; i < adp->max_kioc; i++) {
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 		kioc = adp->kioc_list + i;
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun 		dma_pool_free(adp->pthru_dma_pool, kioc->pthru32,
1165*4882a593Smuzhiyun 				kioc->pthru32_h);
1166*4882a593Smuzhiyun 	}
1167*4882a593Smuzhiyun 
1168*4882a593Smuzhiyun 	kfree(adp->kioc_list);
1169*4882a593Smuzhiyun 	kfree(adp->mbox_list);
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun 	dma_pool_destroy(adp->pthru_dma_pool);
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun 	return;
1175*4882a593Smuzhiyun }
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun /**
1179*4882a593Smuzhiyun  * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers
1180*4882a593Smuzhiyun  * @adp	: Adapter softstate
1181*4882a593Smuzhiyun  */
1182*4882a593Smuzhiyun static void
mraid_mm_teardown_dma_pools(mraid_mmadp_t * adp)1183*4882a593Smuzhiyun mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp)
1184*4882a593Smuzhiyun {
1185*4882a593Smuzhiyun 	int		i;
1186*4882a593Smuzhiyun 	mm_dmapool_t	*pool;
1187*4882a593Smuzhiyun 
1188*4882a593Smuzhiyun 	for (i = 0; i < MAX_DMA_POOLS; i++) {
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 		pool = &adp->dma_pool_list[i];
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun 		if (pool->handle) {
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 			if (pool->vaddr)
1195*4882a593Smuzhiyun 				dma_pool_free(pool->handle, pool->vaddr,
1196*4882a593Smuzhiyun 							pool->paddr);
1197*4882a593Smuzhiyun 
1198*4882a593Smuzhiyun 			dma_pool_destroy(pool->handle);
1199*4882a593Smuzhiyun 			pool->handle = NULL;
1200*4882a593Smuzhiyun 		}
1201*4882a593Smuzhiyun 	}
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 	return;
1204*4882a593Smuzhiyun }
1205*4882a593Smuzhiyun 
1206*4882a593Smuzhiyun /**
1207*4882a593Smuzhiyun  * mraid_mm_init	- Module entry point
1208*4882a593Smuzhiyun  */
1209*4882a593Smuzhiyun static int __init
mraid_mm_init(void)1210*4882a593Smuzhiyun mraid_mm_init(void)
1211*4882a593Smuzhiyun {
1212*4882a593Smuzhiyun 	int err;
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun 	// Announce the driver version
1215*4882a593Smuzhiyun 	con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n",
1216*4882a593Smuzhiyun 		LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION));
1217*4882a593Smuzhiyun 
1218*4882a593Smuzhiyun 	err = misc_register(&megaraid_mm_dev);
1219*4882a593Smuzhiyun 	if (err < 0) {
1220*4882a593Smuzhiyun 		con_log(CL_ANN, ("megaraid cmm: cannot register misc device\n"));
1221*4882a593Smuzhiyun 		return err;
1222*4882a593Smuzhiyun 	}
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun 	init_waitqueue_head(&wait_q);
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun 	INIT_LIST_HEAD(&adapters_list_g);
1227*4882a593Smuzhiyun 
1228*4882a593Smuzhiyun 	return 0;
1229*4882a593Smuzhiyun }
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun /**
1233*4882a593Smuzhiyun  * mraid_mm_exit	- Module exit point
1234*4882a593Smuzhiyun  */
1235*4882a593Smuzhiyun static void __exit
mraid_mm_exit(void)1236*4882a593Smuzhiyun mraid_mm_exit(void)
1237*4882a593Smuzhiyun {
1238*4882a593Smuzhiyun 	con_log(CL_DLEVEL1 , ("exiting common mod\n"));
1239*4882a593Smuzhiyun 
1240*4882a593Smuzhiyun 	misc_deregister(&megaraid_mm_dev);
1241*4882a593Smuzhiyun }
1242*4882a593Smuzhiyun 
1243*4882a593Smuzhiyun module_init(mraid_mm_init);
1244*4882a593Smuzhiyun module_exit(mraid_mm_exit);
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun /* vi: set ts=8 sw=8 tw=78: */
1247