• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *
3  *			Linux MegaRAID device driver
4  *
5  * Copyright (c) 2003-2004  LSI Logic Corporation.
6  *
7  *	   This program is free software; you can redistribute it and/or
8  *	   modify it under the terms of the GNU General Public License
9  *	   as published by the Free Software Foundation; either version
10  *	   2 of the License, or (at your option) any later version.
11  *
12  * FILE		: megaraid_mm.c
13  * Version	: v2.20.2.7 (Jul 16 2006)
14  *
15  * Common management module
16  */
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/mutex.h>
20 #include "megaraid_mm.h"
21 
22 
23 // Entry points for char node driver
24 static DEFINE_MUTEX(mraid_mm_mutex);
25 static int mraid_mm_open(struct inode *, struct file *);
26 static long mraid_mm_unlocked_ioctl(struct file *, uint, unsigned long);
27 
28 
29 // routines to convert to and from the old the format
30 static int mimd_to_kioc(mimd_t __user *, mraid_mmadp_t *, uioc_t *);
31 static int kioc_to_mimd(uioc_t *, mimd_t __user *);
32 
33 
34 // Helper functions
35 static int handle_drvrcmd(void __user *, uint8_t, int *);
36 static int lld_ioctl(mraid_mmadp_t *, uioc_t *);
37 static void ioctl_done(uioc_t *);
38 static void lld_timedout(unsigned long);
39 static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *);
40 static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *);
41 static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *);
42 static void mraid_mm_dealloc_kioc(mraid_mmadp_t *, uioc_t *);
43 static int mraid_mm_attach_buf(mraid_mmadp_t *, uioc_t *, int);
44 static int mraid_mm_setup_dma_pools(mraid_mmadp_t *);
45 static void mraid_mm_free_adp_resources(mraid_mmadp_t *);
46 static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *);
47 
48 #ifdef CONFIG_COMPAT
49 static long mraid_mm_compat_ioctl(struct file *, unsigned int, unsigned long);
50 #endif
51 
52 MODULE_AUTHOR("LSI Logic Corporation");
53 MODULE_DESCRIPTION("LSI Logic Management Module");
54 MODULE_LICENSE("GPL");
55 MODULE_VERSION(LSI_COMMON_MOD_VERSION);
56 
57 static int dbglevel = CL_ANN;
58 module_param_named(dlevel, dbglevel, int, 0);
59 MODULE_PARM_DESC(dlevel, "Debug level (default=0)");
60 
61 EXPORT_SYMBOL(mraid_mm_register_adp);
62 EXPORT_SYMBOL(mraid_mm_unregister_adp);
63 EXPORT_SYMBOL(mraid_mm_adapter_app_handle);
64 
65 static uint32_t drvr_ver	= 0x02200207;
66 
67 static int adapters_count_g;
68 static struct list_head adapters_list_g;
69 
70 static wait_queue_head_t wait_q;
71 
72 static const struct file_operations lsi_fops = {
73 	.open	= mraid_mm_open,
74 	.unlocked_ioctl = mraid_mm_unlocked_ioctl,
75 #ifdef CONFIG_COMPAT
76 	.compat_ioctl = mraid_mm_compat_ioctl,
77 #endif
78 	.owner	= THIS_MODULE,
79 	.llseek = noop_llseek,
80 };
81 
82 static struct miscdevice megaraid_mm_dev = {
83 	.minor	= MISC_DYNAMIC_MINOR,
84 	.name   = "megadev0",
85 	.fops   = &lsi_fops,
86 };
87 
88 /**
89  * mraid_mm_open - open routine for char node interface
90  * @inode	: unused
91  * @filep	: unused
92  *
93  * Allow ioctl operations by apps only if they have superuser privilege.
94  */
95 static int
mraid_mm_open(struct inode * inode,struct file * filep)96 mraid_mm_open(struct inode *inode, struct file *filep)
97 {
98 	/*
99 	 * Only allow superuser to access private ioctl interface
100 	 */
101 	if (!capable(CAP_SYS_ADMIN)) return (-EACCES);
102 
103 	return 0;
104 }
105 
106 /**
107  * mraid_mm_ioctl - module entry-point for ioctls
108  * @inode	: inode (ignored)
109  * @filep	: file operations pointer (ignored)
110  * @cmd		: ioctl command
111  * @arg		: user ioctl packet
112  */
113 static int
mraid_mm_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)114 mraid_mm_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
115 {
116 	uioc_t		*kioc;
117 	char		signature[EXT_IOCTL_SIGN_SZ]	= {0};
118 	int		rval;
119 	mraid_mmadp_t	*adp;
120 	uint8_t		old_ioctl;
121 	int		drvrcmd_rval;
122 	void __user *argp = (void __user *)arg;
123 
124 	/*
125 	 * Make sure only USCSICMD are issued through this interface.
126 	 * MIMD application would still fire different command.
127 	 */
128 
129 	if ((_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD)) {
130 		return (-EINVAL);
131 	}
132 
133 	/*
134 	 * Look for signature to see if this is the new or old ioctl format.
135 	 */
136 	if (copy_from_user(signature, argp, EXT_IOCTL_SIGN_SZ)) {
137 		con_log(CL_ANN, (KERN_WARNING
138 			"megaraid cmm: copy from usr addr failed\n"));
139 		return (-EFAULT);
140 	}
141 
142 	if (memcmp(signature, EXT_IOCTL_SIGN, EXT_IOCTL_SIGN_SZ) == 0)
143 		old_ioctl = 0;
144 	else
145 		old_ioctl = 1;
146 
147 	/*
148 	 * At present, we don't support the new ioctl packet
149 	 */
150 	if (!old_ioctl )
151 		return (-EINVAL);
152 
153 	/*
154 	 * If it is a driver ioctl (as opposed to fw ioctls), then we can
155 	 * handle the command locally. rval > 0 means it is not a drvr cmd
156 	 */
157 	rval = handle_drvrcmd(argp, old_ioctl, &drvrcmd_rval);
158 
159 	if (rval < 0)
160 		return rval;
161 	else if (rval == 0)
162 		return drvrcmd_rval;
163 
164 	rval = 0;
165 	if ((adp = mraid_mm_get_adapter(argp, &rval)) == NULL) {
166 		return rval;
167 	}
168 
169 	/*
170 	 * Check if adapter can accept ioctl. We may have marked it offline
171 	 * if any previous kioc had timedout on this controller.
172 	 */
173 	if (!adp->quiescent) {
174 		con_log(CL_ANN, (KERN_WARNING
175 			"megaraid cmm: controller cannot accept cmds due to "
176 			"earlier errors\n" ));
177 		return -EFAULT;
178 	}
179 
180 	/*
181 	 * The following call will block till a kioc is available
182 	 */
183 	kioc = mraid_mm_alloc_kioc(adp);
184 
185 	/*
186 	 * User sent the old mimd_t ioctl packet. Convert it to uioc_t.
187 	 */
188 	if ((rval = mimd_to_kioc(argp, adp, kioc))) {
189 		mraid_mm_dealloc_kioc(adp, kioc);
190 		return rval;
191 	}
192 
193 	kioc->done = ioctl_done;
194 
195 	/*
196 	 * Issue the IOCTL to the low level driver. After the IOCTL completes
197 	 * release the kioc if and only if it was _not_ timedout. If it was
198 	 * timedout, that means that resources are still with low level driver.
199 	 */
200 	if ((rval = lld_ioctl(adp, kioc))) {
201 
202 		if (!kioc->timedout)
203 			mraid_mm_dealloc_kioc(adp, kioc);
204 
205 		return rval;
206 	}
207 
208 	/*
209 	 * Convert the kioc back to user space
210 	 */
211 	rval = kioc_to_mimd(kioc, argp);
212 
213 	/*
214 	 * Return the kioc to free pool
215 	 */
216 	mraid_mm_dealloc_kioc(adp, kioc);
217 
218 	return rval;
219 }
220 
221 static long
mraid_mm_unlocked_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)222 mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd,
223 		        unsigned long arg)
224 {
225 	int err;
226 
227 	/* inconsistent: mraid_mm_compat_ioctl doesn't take the BKL */
228 	mutex_lock(&mraid_mm_mutex);
229 	err = mraid_mm_ioctl(filep, cmd, arg);
230 	mutex_unlock(&mraid_mm_mutex);
231 
232 	return err;
233 }
234 
235 /**
236  * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet
237  * @umimd	: User space mimd_t ioctl packet
238  * @rval	: returned success/error status
239  *
240  * The function return value is a pointer to the located @adapter.
241  */
242 static mraid_mmadp_t *
mraid_mm_get_adapter(mimd_t __user * umimd,int * rval)243 mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
244 {
245 	mraid_mmadp_t	*adapter;
246 	mimd_t		mimd;
247 	uint32_t	adapno;
248 	int		iterator;
249 	bool		is_found;
250 
251 	if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
252 		*rval = -EFAULT;
253 		return NULL;
254 	}
255 
256 	adapno = GETADAP(mimd.ui.fcs.adapno);
257 
258 	if (adapno >= adapters_count_g) {
259 		*rval = -ENODEV;
260 		return NULL;
261 	}
262 
263 	adapter = NULL;
264 	iterator = 0;
265 	is_found = false;
266 
267 	list_for_each_entry(adapter, &adapters_list_g, list) {
268 		if (iterator++ == adapno) {
269 			is_found = true;
270 			break;
271 		}
272 	}
273 
274 	if (!is_found) {
275 		*rval = -ENODEV;
276 		return NULL;
277 	}
278 
279 	return adapter;
280 }
281 
282 /**
283  * handle_drvrcmd - Checks if the opcode is a driver cmd and if it is, handles it.
284  * @arg		: packet sent by the user app
285  * @old_ioctl	: mimd if 1; uioc otherwise
286  * @rval	: pointer for command's returned value (not function status)
287  */
288 static int
handle_drvrcmd(void __user * arg,uint8_t old_ioctl,int * rval)289 handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval)
290 {
291 	mimd_t		__user *umimd;
292 	mimd_t		kmimd;
293 	uint8_t		opcode;
294 	uint8_t		subopcode;
295 
296 	if (old_ioctl)
297 		goto old_packet;
298 	else
299 		goto new_packet;
300 
301 new_packet:
302 	return (-ENOTSUPP);
303 
304 old_packet:
305 	*rval = 0;
306 	umimd = arg;
307 
308 	if (copy_from_user(&kmimd, umimd, sizeof(mimd_t)))
309 		return (-EFAULT);
310 
311 	opcode		= kmimd.ui.fcs.opcode;
312 	subopcode	= kmimd.ui.fcs.subopcode;
313 
314 	/*
315 	 * If the opcode is 0x82 and the subopcode is either GET_DRVRVER or
316 	 * GET_NUMADP, then we can handle. Otherwise we should return 1 to
317 	 * indicate that we cannot handle this.
318 	 */
319 	if (opcode != 0x82)
320 		return 1;
321 
322 	switch (subopcode) {
323 
324 	case MEGAIOC_QDRVRVER:
325 
326 		if (copy_to_user(kmimd.data, &drvr_ver, sizeof(uint32_t)))
327 			return (-EFAULT);
328 
329 		return 0;
330 
331 	case MEGAIOC_QNADAP:
332 
333 		*rval = adapters_count_g;
334 
335 		if (copy_to_user(kmimd.data, &adapters_count_g,
336 				sizeof(uint32_t)))
337 			return (-EFAULT);
338 
339 		return 0;
340 
341 	default:
342 		/* cannot handle */
343 		return 1;
344 	}
345 
346 	return 0;
347 }
348 
349 
350 /**
351  * mimd_to_kioc	- Converter from old to new ioctl format
352  * @umimd	: user space old MIMD IOCTL
353  * @adp		: adapter softstate
354  * @kioc	: kernel space new format IOCTL
355  *
356  * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The
357  * new packet is in kernel space so that driver can perform operations on it
358  * freely.
359  */
360 
361 static int
mimd_to_kioc(mimd_t __user * umimd,mraid_mmadp_t * adp,uioc_t * kioc)362 mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
363 {
364 	mbox64_t		*mbox64;
365 	mbox_t			*mbox;
366 	mraid_passthru_t	*pthru32;
367 	uint32_t		adapno;
368 	uint8_t			opcode;
369 	uint8_t			subopcode;
370 	mimd_t			mimd;
371 
372 	if (copy_from_user(&mimd, umimd, sizeof(mimd_t)))
373 		return (-EFAULT);
374 
375 	/*
376 	 * Applications are not allowed to send extd pthru
377 	 */
378 	if ((mimd.mbox[0] == MBOXCMD_PASSTHRU64) ||
379 			(mimd.mbox[0] == MBOXCMD_EXTPTHRU))
380 		return (-EINVAL);
381 
382 	opcode		= mimd.ui.fcs.opcode;
383 	subopcode	= mimd.ui.fcs.subopcode;
384 	adapno		= GETADAP(mimd.ui.fcs.adapno);
385 
386 	if (adapno >= adapters_count_g)
387 		return (-ENODEV);
388 
389 	kioc->adapno	= adapno;
390 	kioc->mb_type	= MBOX_LEGACY;
391 	kioc->app_type	= APPTYPE_MIMD;
392 
393 	switch (opcode) {
394 
395 	case 0x82:
396 
397 		if (subopcode == MEGAIOC_QADAPINFO) {
398 
399 			kioc->opcode	= GET_ADAP_INFO;
400 			kioc->data_dir	= UIOC_RD;
401 			kioc->xferlen	= sizeof(mraid_hba_info_t);
402 
403 			if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
404 				return (-ENOMEM);
405 		}
406 		else {
407 			con_log(CL_ANN, (KERN_WARNING
408 					"megaraid cmm: Invalid subop\n"));
409 			return (-EINVAL);
410 		}
411 
412 		break;
413 
414 	case 0x81:
415 
416 		kioc->opcode		= MBOX_CMD;
417 		kioc->xferlen		= mimd.ui.fcs.length;
418 		kioc->user_data_len	= kioc->xferlen;
419 		kioc->user_data		= mimd.ui.fcs.buffer;
420 
421 		if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
422 			return (-ENOMEM);
423 
424 		if (mimd.outlen) kioc->data_dir  = UIOC_RD;
425 		if (mimd.inlen) kioc->data_dir |= UIOC_WR;
426 
427 		break;
428 
429 	case 0x80:
430 
431 		kioc->opcode		= MBOX_CMD;
432 		kioc->xferlen		= (mimd.outlen > mimd.inlen) ?
433 						mimd.outlen : mimd.inlen;
434 		kioc->user_data_len	= kioc->xferlen;
435 		kioc->user_data		= mimd.data;
436 
437 		if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
438 			return (-ENOMEM);
439 
440 		if (mimd.outlen) kioc->data_dir  = UIOC_RD;
441 		if (mimd.inlen) kioc->data_dir |= UIOC_WR;
442 
443 		break;
444 
445 	default:
446 		return (-EINVAL);
447 	}
448 
449 	/*
450 	 * If driver command, nothing else to do
451 	 */
452 	if (opcode == 0x82)
453 		return 0;
454 
455 	/*
456 	 * This is a mailbox cmd; copy the mailbox from mimd
457 	 */
458 	mbox64	= (mbox64_t *)((unsigned long)kioc->cmdbuf);
459 	mbox	= &mbox64->mbox32;
460 	memcpy(mbox, mimd.mbox, 14);
461 
462 	if (mbox->cmd != MBOXCMD_PASSTHRU) {	// regular DCMD
463 
464 		mbox->xferaddr	= (uint32_t)kioc->buf_paddr;
465 
466 		if (kioc->data_dir & UIOC_WR) {
467 			if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
468 							kioc->xferlen)) {
469 				return (-EFAULT);
470 			}
471 		}
472 
473 		return 0;
474 	}
475 
476 	/*
477 	 * This is a regular 32-bit pthru cmd; mbox points to pthru struct.
478 	 * Just like in above case, the beginning for memblk is treated as
479 	 * a mailbox. The passthru will begin at next 1K boundary. And the
480 	 * data will start 1K after that.
481 	 */
482 	pthru32			= kioc->pthru32;
483 	kioc->user_pthru	= &umimd->pthru;
484 	mbox->xferaddr		= (uint32_t)kioc->pthru32_h;
485 
486 	if (copy_from_user(pthru32, kioc->user_pthru,
487 			sizeof(mraid_passthru_t))) {
488 		return (-EFAULT);
489 	}
490 
491 	pthru32->dataxferaddr	= kioc->buf_paddr;
492 	if (kioc->data_dir & UIOC_WR) {
493 		if (pthru32->dataxferlen > kioc->xferlen)
494 			return -EINVAL;
495 		if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
496 						pthru32->dataxferlen)) {
497 			return (-EFAULT);
498 		}
499 	}
500 
501 	return 0;
502 }
503 
504 /**
505  * mraid_mm_attch_buf - Attach a free dma buffer for required size
506  * @adp		: Adapter softstate
507  * @kioc	: kioc that the buffer needs to be attached to
508  * @xferlen	: required length for buffer
509  *
510  * First we search for a pool with smallest buffer that is >= @xferlen. If
511  * that pool has no free buffer, we will try for the next bigger size. If none
512  * is available, we will try to allocate the smallest buffer that is >=
513  * @xferlen and attach it the pool.
514  */
515 static int
mraid_mm_attach_buf(mraid_mmadp_t * adp,uioc_t * kioc,int xferlen)516 mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen)
517 {
518 	mm_dmapool_t	*pool;
519 	int		right_pool = -1;
520 	unsigned long	flags;
521 	int		i;
522 
523 	kioc->pool_index	= -1;
524 	kioc->buf_vaddr		= NULL;
525 	kioc->buf_paddr		= 0;
526 	kioc->free_buf		= 0;
527 
528 	/*
529 	 * We need xferlen amount of memory. See if we can get it from our
530 	 * dma pools. If we don't get exact size, we will try bigger buffer
531 	 */
532 
533 	for (i = 0; i < MAX_DMA_POOLS; i++) {
534 
535 		pool = &adp->dma_pool_list[i];
536 
537 		if (xferlen > pool->buf_size)
538 			continue;
539 
540 		if (right_pool == -1)
541 			right_pool = i;
542 
543 		spin_lock_irqsave(&pool->lock, flags);
544 
545 		if (!pool->in_use) {
546 
547 			pool->in_use		= 1;
548 			kioc->pool_index	= i;
549 			kioc->buf_vaddr		= pool->vaddr;
550 			kioc->buf_paddr		= pool->paddr;
551 
552 			spin_unlock_irqrestore(&pool->lock, flags);
553 			return 0;
554 		}
555 		else {
556 			spin_unlock_irqrestore(&pool->lock, flags);
557 			continue;
558 		}
559 	}
560 
561 	/*
562 	 * If xferlen doesn't match any of our pools, return error
563 	 */
564 	if (right_pool == -1)
565 		return -EINVAL;
566 
567 	/*
568 	 * We did not get any buffer from the preallocated pool. Let us try
569 	 * to allocate one new buffer. NOTE: This is a blocking call.
570 	 */
571 	pool = &adp->dma_pool_list[right_pool];
572 
573 	spin_lock_irqsave(&pool->lock, flags);
574 
575 	kioc->pool_index	= right_pool;
576 	kioc->free_buf		= 1;
577 	kioc->buf_vaddr 	= pci_pool_alloc(pool->handle, GFP_KERNEL,
578 							&kioc->buf_paddr);
579 	spin_unlock_irqrestore(&pool->lock, flags);
580 
581 	if (!kioc->buf_vaddr)
582 		return -ENOMEM;
583 
584 	return 0;
585 }
586 
587 /**
588  * mraid_mm_alloc_kioc - Returns a uioc_t from free list
589  * @adp	: Adapter softstate for this module
590  *
591  * The kioc_semaphore is initialized with number of kioc nodes in the
592  * free kioc pool. If the kioc pool is empty, this function blocks till
593  * a kioc becomes free.
594  */
595 static uioc_t *
mraid_mm_alloc_kioc(mraid_mmadp_t * adp)596 mraid_mm_alloc_kioc(mraid_mmadp_t *adp)
597 {
598 	uioc_t			*kioc;
599 	struct list_head*	head;
600 	unsigned long		flags;
601 
602 	down(&adp->kioc_semaphore);
603 
604 	spin_lock_irqsave(&adp->kioc_pool_lock, flags);
605 
606 	head = &adp->kioc_pool;
607 
608 	if (list_empty(head)) {
609 		up(&adp->kioc_semaphore);
610 		spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
611 
612 		con_log(CL_ANN, ("megaraid cmm: kioc list empty!\n"));
613 		return NULL;
614 	}
615 
616 	kioc = list_entry(head->next, uioc_t, list);
617 	list_del_init(&kioc->list);
618 
619 	spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
620 
621 	memset((caddr_t)(unsigned long)kioc->cmdbuf, 0, sizeof(mbox64_t));
622 	memset((caddr_t) kioc->pthru32, 0, sizeof(mraid_passthru_t));
623 
624 	kioc->buf_vaddr		= NULL;
625 	kioc->buf_paddr		= 0;
626 	kioc->pool_index	=-1;
627 	kioc->free_buf		= 0;
628 	kioc->user_data		= NULL;
629 	kioc->user_data_len	= 0;
630 	kioc->user_pthru	= NULL;
631 	kioc->timedout		= 0;
632 
633 	return kioc;
634 }
635 
636 /**
637  * mraid_mm_dealloc_kioc - Return kioc to free pool
638  * @adp		: Adapter softstate
639  * @kioc	: uioc_t node to be returned to free pool
640  */
641 static void
mraid_mm_dealloc_kioc(mraid_mmadp_t * adp,uioc_t * kioc)642 mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc)
643 {
644 	mm_dmapool_t	*pool;
645 	unsigned long	flags;
646 
647 	if (kioc->pool_index != -1) {
648 		pool = &adp->dma_pool_list[kioc->pool_index];
649 
650 		/* This routine may be called in non-isr context also */
651 		spin_lock_irqsave(&pool->lock, flags);
652 
653 		/*
654 		 * While attaching the dma buffer, if we didn't get the
655 		 * required buffer from the pool, we would have allocated
656 		 * it at the run time and set the free_buf flag. We must
657 		 * free that buffer. Otherwise, just mark that the buffer is
658 		 * not in use
659 		 */
660 		if (kioc->free_buf == 1)
661 			pci_pool_free(pool->handle, kioc->buf_vaddr,
662 							kioc->buf_paddr);
663 		else
664 			pool->in_use = 0;
665 
666 		spin_unlock_irqrestore(&pool->lock, flags);
667 	}
668 
669 	/* Return the kioc to the free pool */
670 	spin_lock_irqsave(&adp->kioc_pool_lock, flags);
671 	list_add(&kioc->list, &adp->kioc_pool);
672 	spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
673 
674 	/* increment the free kioc count */
675 	up(&adp->kioc_semaphore);
676 
677 	return;
678 }
679 
680 /**
681  * lld_ioctl - Routine to issue ioctl to low level drvr
682  * @adp		: The adapter handle
683  * @kioc	: The ioctl packet with kernel addresses
684  */
685 static int
lld_ioctl(mraid_mmadp_t * adp,uioc_t * kioc)686 lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc)
687 {
688 	int			rval;
689 	struct timer_list	timer;
690 	struct timer_list	*tp = NULL;
691 
692 	kioc->status	= -ENODATA;
693 	rval		= adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE);
694 
695 	if (rval) return rval;
696 
697 	/*
698 	 * Start the timer
699 	 */
700 	if (adp->timeout > 0) {
701 		tp		= &timer;
702 		init_timer(tp);
703 
704 		tp->function	= lld_timedout;
705 		tp->data	= (unsigned long)kioc;
706 		tp->expires	= jiffies + adp->timeout * HZ;
707 
708 		add_timer(tp);
709 	}
710 
711 	/*
712 	 * Wait till the low level driver completes the ioctl. After this
713 	 * call, the ioctl either completed successfully or timedout.
714 	 */
715 	wait_event(wait_q, (kioc->status != -ENODATA));
716 	if (tp) {
717 		del_timer_sync(tp);
718 	}
719 
720 	/*
721 	 * If the command had timedout, we mark the controller offline
722 	 * before returning
723 	 */
724 	if (kioc->timedout) {
725 		adp->quiescent = 0;
726 	}
727 
728 	return kioc->status;
729 }
730 
731 
732 /**
733  * ioctl_done - callback from the low level driver
734  * @kioc	: completed ioctl packet
735  */
736 static void
ioctl_done(uioc_t * kioc)737 ioctl_done(uioc_t *kioc)
738 {
739 	uint32_t	adapno;
740 	int		iterator;
741 	mraid_mmadp_t*	adapter;
742 	bool		is_found;
743 
744 	/*
745 	 * When the kioc returns from driver, make sure it still doesn't
746 	 * have ENODATA in status. Otherwise, driver will hang on wait_event
747 	 * forever
748 	 */
749 	if (kioc->status == -ENODATA) {
750 		con_log(CL_ANN, (KERN_WARNING
751 			"megaraid cmm: lld didn't change status!\n"));
752 
753 		kioc->status = -EINVAL;
754 	}
755 
756 	/*
757 	 * Check if this kioc was timedout before. If so, nobody is waiting
758 	 * on this kioc. We don't have to wake up anybody. Instead, we just
759 	 * have to free the kioc
760 	 */
761 	if (kioc->timedout) {
762 		iterator	= 0;
763 		adapter		= NULL;
764 		adapno		= kioc->adapno;
765 		is_found	= false;
766 
767 		con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
768 					"ioctl that was timedout before\n"));
769 
770 		list_for_each_entry(adapter, &adapters_list_g, list) {
771 			if (iterator++ == adapno) {
772 				is_found = true;
773 				break;
774 			}
775 		}
776 
777 		kioc->timedout = 0;
778 
779 		if (is_found)
780 			mraid_mm_dealloc_kioc( adapter, kioc );
781 
782 	}
783 	else {
784 		wake_up(&wait_q);
785 	}
786 }
787 
788 
789 /**
790  * lld_timedout	- callback from the expired timer
791  * @ptr		: ioctl packet that timed out
792  */
793 static void
lld_timedout(unsigned long ptr)794 lld_timedout(unsigned long ptr)
795 {
796 	uioc_t *kioc	= (uioc_t *)ptr;
797 
798 	kioc->status 	= -ETIME;
799 	kioc->timedout	= 1;
800 
801 	con_log(CL_ANN, (KERN_WARNING "megaraid cmm: ioctl timed out\n"));
802 
803 	wake_up(&wait_q);
804 }
805 
806 
807 /**
808  * kioc_to_mimd	- Converter from new back to old format
809  * @kioc	: Kernel space IOCTL packet (successfully issued)
810  * @mimd	: User space MIMD packet
811  */
812 static int
kioc_to_mimd(uioc_t * kioc,mimd_t __user * mimd)813 kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd)
814 {
815 	mimd_t			kmimd;
816 	uint8_t			opcode;
817 	uint8_t			subopcode;
818 
819 	mbox64_t		*mbox64;
820 	mraid_passthru_t	__user *upthru32;
821 	mraid_passthru_t	*kpthru32;
822 	mcontroller_t		cinfo;
823 	mraid_hba_info_t	*hinfo;
824 
825 
826 	if (copy_from_user(&kmimd, mimd, sizeof(mimd_t)))
827 		return (-EFAULT);
828 
829 	opcode		= kmimd.ui.fcs.opcode;
830 	subopcode	= kmimd.ui.fcs.subopcode;
831 
832 	if (opcode == 0x82) {
833 		switch (subopcode) {
834 
835 		case MEGAIOC_QADAPINFO:
836 
837 			hinfo = (mraid_hba_info_t *)(unsigned long)
838 					kioc->buf_vaddr;
839 
840 			hinfo_to_cinfo(hinfo, &cinfo);
841 
842 			if (copy_to_user(kmimd.data, &cinfo, sizeof(cinfo)))
843 				return (-EFAULT);
844 
845 			return 0;
846 
847 		default:
848 			return (-EINVAL);
849 		}
850 
851 		return 0;
852 	}
853 
854 	mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
855 
856 	if (kioc->user_pthru) {
857 
858 		upthru32 = kioc->user_pthru;
859 		kpthru32 = kioc->pthru32;
860 
861 		if (copy_to_user(&upthru32->scsistatus,
862 					&kpthru32->scsistatus,
863 					sizeof(uint8_t))) {
864 			return (-EFAULT);
865 		}
866 	}
867 
868 	if (kioc->user_data) {
869 		if (copy_to_user(kioc->user_data, kioc->buf_vaddr,
870 					kioc->user_data_len)) {
871 			return (-EFAULT);
872 		}
873 	}
874 
875 	if (copy_to_user(&mimd->mbox[17],
876 			&mbox64->mbox32.status, sizeof(uint8_t))) {
877 		return (-EFAULT);
878 	}
879 
880 	return 0;
881 }
882 
883 
884 /**
885  * hinfo_to_cinfo - Convert new format hba info into old format
886  * @hinfo	: New format, more comprehensive adapter info
887  * @cinfo	: Old format adapter info to support mimd_t apps
888  */
889 static void
hinfo_to_cinfo(mraid_hba_info_t * hinfo,mcontroller_t * cinfo)890 hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo)
891 {
892 	if (!hinfo || !cinfo)
893 		return;
894 
895 	cinfo->base		= hinfo->baseport;
896 	cinfo->irq		= hinfo->irq;
897 	cinfo->numldrv		= hinfo->num_ldrv;
898 	cinfo->pcibus		= hinfo->pci_bus;
899 	cinfo->pcidev		= hinfo->pci_slot;
900 	cinfo->pcifun		= PCI_FUNC(hinfo->pci_dev_fn);
901 	cinfo->pciid		= hinfo->pci_device_id;
902 	cinfo->pcivendor	= hinfo->pci_vendor_id;
903 	cinfo->pcislot		= hinfo->pci_slot;
904 	cinfo->uid		= hinfo->unique_id;
905 }
906 
907 
908 /**
909  * mraid_mm_register_adp - Registration routine for low level drivers
910  * @lld_adp	: Adapter object
911  */
912 int
mraid_mm_register_adp(mraid_mmadp_t * lld_adp)913 mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
914 {
915 	mraid_mmadp_t	*adapter;
916 	mbox64_t	*mbox_list;
917 	uioc_t		*kioc;
918 	uint32_t	rval;
919 	int		i;
920 
921 
922 	if (lld_adp->drvr_type != DRVRTYPE_MBOX)
923 		return (-EINVAL);
924 
925 	adapter = kzalloc(sizeof(mraid_mmadp_t), GFP_KERNEL);
926 
927 	if (!adapter)
928 		return -ENOMEM;
929 
930 
931 	adapter->unique_id	= lld_adp->unique_id;
932 	adapter->drvr_type	= lld_adp->drvr_type;
933 	adapter->drvr_data	= lld_adp->drvr_data;
934 	adapter->pdev		= lld_adp->pdev;
935 	adapter->issue_uioc	= lld_adp->issue_uioc;
936 	adapter->timeout	= lld_adp->timeout;
937 	adapter->max_kioc	= lld_adp->max_kioc;
938 	adapter->quiescent	= 1;
939 
940 	/*
941 	 * Allocate single blocks of memory for all required kiocs,
942 	 * mailboxes and passthru structures.
943 	 */
944 	adapter->kioc_list	= kmalloc(sizeof(uioc_t) * lld_adp->max_kioc,
945 						GFP_KERNEL);
946 	adapter->mbox_list	= kmalloc(sizeof(mbox64_t) * lld_adp->max_kioc,
947 						GFP_KERNEL);
948 	adapter->pthru_dma_pool = pci_pool_create("megaraid mm pthru pool",
949 						adapter->pdev,
950 						sizeof(mraid_passthru_t),
951 						16, 0);
952 
953 	if (!adapter->kioc_list || !adapter->mbox_list ||
954 			!adapter->pthru_dma_pool) {
955 
956 		con_log(CL_ANN, (KERN_WARNING
957 			"megaraid cmm: out of memory, %s %d\n", __func__,
958 			__LINE__));
959 
960 		rval = (-ENOMEM);
961 
962 		goto memalloc_error;
963 	}
964 
965 	/*
966 	 * Slice kioc_list and make a kioc_pool with the individiual kiocs
967 	 */
968 	INIT_LIST_HEAD(&adapter->kioc_pool);
969 	spin_lock_init(&adapter->kioc_pool_lock);
970 	sema_init(&adapter->kioc_semaphore, lld_adp->max_kioc);
971 
972 	mbox_list	= (mbox64_t *)adapter->mbox_list;
973 
974 	for (i = 0; i < lld_adp->max_kioc; i++) {
975 
976 		kioc		= adapter->kioc_list + i;
977 		kioc->cmdbuf	= (uint64_t)(unsigned long)(mbox_list + i);
978 		kioc->pthru32	= pci_pool_alloc(adapter->pthru_dma_pool,
979 						GFP_KERNEL, &kioc->pthru32_h);
980 
981 		if (!kioc->pthru32) {
982 
983 			con_log(CL_ANN, (KERN_WARNING
984 				"megaraid cmm: out of memory, %s %d\n",
985 					__func__, __LINE__));
986 
987 			rval = (-ENOMEM);
988 
989 			goto pthru_dma_pool_error;
990 		}
991 
992 		list_add_tail(&kioc->list, &adapter->kioc_pool);
993 	}
994 
995 	// Setup the dma pools for data buffers
996 	if ((rval = mraid_mm_setup_dma_pools(adapter)) != 0) {
997 		goto dma_pool_error;
998 	}
999 
1000 	list_add_tail(&adapter->list, &adapters_list_g);
1001 
1002 	adapters_count_g++;
1003 
1004 	return 0;
1005 
1006 dma_pool_error:
1007 	/* Do nothing */
1008 
1009 pthru_dma_pool_error:
1010 
1011 	for (i = 0; i < lld_adp->max_kioc; i++) {
1012 		kioc = adapter->kioc_list + i;
1013 		if (kioc->pthru32) {
1014 			pci_pool_free(adapter->pthru_dma_pool, kioc->pthru32,
1015 				kioc->pthru32_h);
1016 		}
1017 	}
1018 
1019 memalloc_error:
1020 
1021 	kfree(adapter->kioc_list);
1022 	kfree(adapter->mbox_list);
1023 
1024 	if (adapter->pthru_dma_pool)
1025 		pci_pool_destroy(adapter->pthru_dma_pool);
1026 
1027 	kfree(adapter);
1028 
1029 	return rval;
1030 }
1031 
1032 
1033 /**
1034  * mraid_mm_adapter_app_handle - return the application handle for this adapter
1035  * @unique_id	: adapter unique identifier
1036  *
1037  * For the given driver data, locate the adapter in our global list and
1038  * return the corresponding handle, which is also used by applications to
1039  * uniquely identify an adapter.
1040  *
1041  * Return adapter handle if found in the list.
1042  * Return 0 if adapter could not be located, should never happen though.
1043  */
1044 uint32_t
mraid_mm_adapter_app_handle(uint32_t unique_id)1045 mraid_mm_adapter_app_handle(uint32_t unique_id)
1046 {
1047 	mraid_mmadp_t	*adapter;
1048 	mraid_mmadp_t	*tmp;
1049 	int		index = 0;
1050 
1051 	list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
1052 
1053 		if (adapter->unique_id == unique_id) {
1054 
1055 			return MKADAP(index);
1056 		}
1057 
1058 		index++;
1059 	}
1060 
1061 	return 0;
1062 }
1063 
1064 
1065 /**
1066  * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter
1067  * @adp	: Adapter softstate
1068  *
1069  * We maintain a pool of dma buffers per each adapter. Each pool has one
1070  * buffer. E.g, we may have 5 dma pools - one each for 4k, 8k ... 64k buffers.
1071  * We have just one 4k buffer in 4k pool, one 8k buffer in 8k pool etc. We
1072  * dont' want to waste too much memory by allocating more buffers per each
1073  * pool.
1074  */
1075 static int
mraid_mm_setup_dma_pools(mraid_mmadp_t * adp)1076 mraid_mm_setup_dma_pools(mraid_mmadp_t *adp)
1077 {
1078 	mm_dmapool_t	*pool;
1079 	int		bufsize;
1080 	int		i;
1081 
1082 	/*
1083 	 * Create MAX_DMA_POOLS number of pools
1084 	 */
1085 	bufsize = MRAID_MM_INIT_BUFF_SIZE;
1086 
1087 	for (i = 0; i < MAX_DMA_POOLS; i++){
1088 
1089 		pool = &adp->dma_pool_list[i];
1090 
1091 		pool->buf_size = bufsize;
1092 		spin_lock_init(&pool->lock);
1093 
1094 		pool->handle = pci_pool_create("megaraid mm data buffer",
1095 						adp->pdev, bufsize, 16, 0);
1096 
1097 		if (!pool->handle) {
1098 			goto dma_pool_setup_error;
1099 		}
1100 
1101 		pool->vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL,
1102 							&pool->paddr);
1103 
1104 		if (!pool->vaddr)
1105 			goto dma_pool_setup_error;
1106 
1107 		bufsize = bufsize * 2;
1108 	}
1109 
1110 	return 0;
1111 
1112 dma_pool_setup_error:
1113 
1114 	mraid_mm_teardown_dma_pools(adp);
1115 	return (-ENOMEM);
1116 }
1117 
1118 
1119 /**
1120  * mraid_mm_unregister_adp - Unregister routine for low level drivers
1121  * @unique_id	: UID of the adpater
1122  *
1123  * Assumes no outstanding ioctls to llds.
1124  */
1125 int
mraid_mm_unregister_adp(uint32_t unique_id)1126 mraid_mm_unregister_adp(uint32_t unique_id)
1127 {
1128 	mraid_mmadp_t	*adapter;
1129 	mraid_mmadp_t	*tmp;
1130 
1131 	list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
1132 
1133 
1134 		if (adapter->unique_id == unique_id) {
1135 
1136 			adapters_count_g--;
1137 
1138 			list_del_init(&adapter->list);
1139 
1140 			mraid_mm_free_adp_resources(adapter);
1141 
1142 			kfree(adapter);
1143 
1144 			con_log(CL_ANN, (
1145 				"megaraid cmm: Unregistered one adapter:%#x\n",
1146 				unique_id));
1147 
1148 			return 0;
1149 		}
1150 	}
1151 
1152 	return (-ENODEV);
1153 }
1154 
1155 /**
1156  * mraid_mm_free_adp_resources - Free adapter softstate
1157  * @adp	: Adapter softstate
1158  */
1159 static void
mraid_mm_free_adp_resources(mraid_mmadp_t * adp)1160 mraid_mm_free_adp_resources(mraid_mmadp_t *adp)
1161 {
1162 	uioc_t	*kioc;
1163 	int	i;
1164 
1165 	mraid_mm_teardown_dma_pools(adp);
1166 
1167 	for (i = 0; i < adp->max_kioc; i++) {
1168 
1169 		kioc = adp->kioc_list + i;
1170 
1171 		pci_pool_free(adp->pthru_dma_pool, kioc->pthru32,
1172 				kioc->pthru32_h);
1173 	}
1174 
1175 	kfree(adp->kioc_list);
1176 	kfree(adp->mbox_list);
1177 
1178 	pci_pool_destroy(adp->pthru_dma_pool);
1179 
1180 
1181 	return;
1182 }
1183 
1184 
1185 /**
1186  * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers
1187  * @adp	: Adapter softstate
1188  */
1189 static void
mraid_mm_teardown_dma_pools(mraid_mmadp_t * adp)1190 mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp)
1191 {
1192 	int		i;
1193 	mm_dmapool_t	*pool;
1194 
1195 	for (i = 0; i < MAX_DMA_POOLS; i++) {
1196 
1197 		pool = &adp->dma_pool_list[i];
1198 
1199 		if (pool->handle) {
1200 
1201 			if (pool->vaddr)
1202 				pci_pool_free(pool->handle, pool->vaddr,
1203 							pool->paddr);
1204 
1205 			pci_pool_destroy(pool->handle);
1206 			pool->handle = NULL;
1207 		}
1208 	}
1209 
1210 	return;
1211 }
1212 
1213 /**
1214  * mraid_mm_init	- Module entry point
1215  */
1216 static int __init
mraid_mm_init(void)1217 mraid_mm_init(void)
1218 {
1219 	int err;
1220 
1221 	// Announce the driver version
1222 	con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n",
1223 		LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION));
1224 
1225 	err = misc_register(&megaraid_mm_dev);
1226 	if (err < 0) {
1227 		con_log(CL_ANN, ("megaraid cmm: cannot register misc device\n"));
1228 		return err;
1229 	}
1230 
1231 	init_waitqueue_head(&wait_q);
1232 
1233 	INIT_LIST_HEAD(&adapters_list_g);
1234 
1235 	return 0;
1236 }
1237 
1238 
1239 #ifdef CONFIG_COMPAT
1240 /**
1241  * mraid_mm_compat_ioctl	- 32bit to 64bit ioctl conversion routine
1242  * @filep	: file operations pointer (ignored)
1243  * @cmd		: ioctl command
1244  * @arg		: user ioctl packet
1245  */
1246 static long
mraid_mm_compat_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)1247 mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd,
1248 		      unsigned long arg)
1249 {
1250 	int err;
1251 
1252 	err = mraid_mm_ioctl(filep, cmd, arg);
1253 
1254 	return err;
1255 }
1256 #endif
1257 
1258 /**
1259  * mraid_mm_exit	- Module exit point
1260  */
1261 static void __exit
mraid_mm_exit(void)1262 mraid_mm_exit(void)
1263 {
1264 	con_log(CL_DLEVEL1 , ("exiting common mod\n"));
1265 
1266 	misc_deregister(&megaraid_mm_dev);
1267 }
1268 
1269 module_init(mraid_mm_init);
1270 module_exit(mraid_mm_exit);
1271 
1272 /* vi: set ts=8 sw=8 tw=78: */
1273