1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 *
4 * Linux MegaRAID device driver
5 *
6 * Copyright (c) 2003-2004 LSI Logic Corporation.
7 *
8 * FILE : megaraid_mm.c
9 * Version : v2.20.2.7 (Jul 16 2006)
10 *
11 * Common management module
12 */
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/mutex.h>
16 #include "megaraid_mm.h"
17
18
19 // Entry points for char node driver
20 static DEFINE_MUTEX(mraid_mm_mutex);
21 static int mraid_mm_open(struct inode *, struct file *);
22 static long mraid_mm_unlocked_ioctl(struct file *, uint, unsigned long);
23
24
25 // routines to convert to and from the old the format
26 static int mimd_to_kioc(mimd_t __user *, mraid_mmadp_t *, uioc_t *);
27 static int kioc_to_mimd(uioc_t *, mimd_t __user *);
28
29
30 // Helper functions
31 static int handle_drvrcmd(void __user *, uint8_t, int *);
32 static int lld_ioctl(mraid_mmadp_t *, uioc_t *);
33 static void ioctl_done(uioc_t *);
34 static void lld_timedout(struct timer_list *);
35 static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *);
36 static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *);
37 static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *);
38 static void mraid_mm_dealloc_kioc(mraid_mmadp_t *, uioc_t *);
39 static int mraid_mm_attach_buf(mraid_mmadp_t *, uioc_t *, int);
40 static int mraid_mm_setup_dma_pools(mraid_mmadp_t *);
41 static void mraid_mm_free_adp_resources(mraid_mmadp_t *);
42 static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *);
43
44 MODULE_AUTHOR("LSI Logic Corporation");
45 MODULE_DESCRIPTION("LSI Logic Management Module");
46 MODULE_LICENSE("GPL");
47 MODULE_VERSION(LSI_COMMON_MOD_VERSION);
48
49 static int dbglevel = CL_ANN;
50 module_param_named(dlevel, dbglevel, int, 0);
51 MODULE_PARM_DESC(dlevel, "Debug level (default=0)");
52
53 EXPORT_SYMBOL(mraid_mm_register_adp);
54 EXPORT_SYMBOL(mraid_mm_unregister_adp);
55 EXPORT_SYMBOL(mraid_mm_adapter_app_handle);
56
57 static uint32_t drvr_ver = 0x02200207;
58
59 static int adapters_count_g;
60 static struct list_head adapters_list_g;
61
62 static wait_queue_head_t wait_q;
63
64 static const struct file_operations lsi_fops = {
65 .open = mraid_mm_open,
66 .unlocked_ioctl = mraid_mm_unlocked_ioctl,
67 .compat_ioctl = compat_ptr_ioctl,
68 .owner = THIS_MODULE,
69 .llseek = noop_llseek,
70 };
71
72 static struct miscdevice megaraid_mm_dev = {
73 .minor = MISC_DYNAMIC_MINOR,
74 .name = "megadev0",
75 .fops = &lsi_fops,
76 };
77
78 /**
79 * mraid_mm_open - open routine for char node interface
80 * @inode : unused
81 * @filep : unused
82 *
83 * Allow ioctl operations by apps only if they have superuser privilege.
84 */
85 static int
mraid_mm_open(struct inode * inode,struct file * filep)86 mraid_mm_open(struct inode *inode, struct file *filep)
87 {
88 /*
89 * Only allow superuser to access private ioctl interface
90 */
91 if (!capable(CAP_SYS_ADMIN)) return (-EACCES);
92
93 return 0;
94 }
95
96 /**
97 * mraid_mm_ioctl - module entry-point for ioctls
98 * @filep : file operations pointer (ignored)
99 * @cmd : ioctl command
100 * @arg : user ioctl packet
101 */
102 static int
mraid_mm_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)103 mraid_mm_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
104 {
105 uioc_t *kioc;
106 char signature[EXT_IOCTL_SIGN_SZ] = {0};
107 int rval;
108 mraid_mmadp_t *adp;
109 uint8_t old_ioctl;
110 int drvrcmd_rval;
111 void __user *argp = (void __user *)arg;
112
113 /*
114 * Make sure only USCSICMD are issued through this interface.
115 * MIMD application would still fire different command.
116 */
117
118 if ((_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD)) {
119 return (-EINVAL);
120 }
121
122 /*
123 * Look for signature to see if this is the new or old ioctl format.
124 */
125 if (copy_from_user(signature, argp, EXT_IOCTL_SIGN_SZ)) {
126 con_log(CL_ANN, (KERN_WARNING
127 "megaraid cmm: copy from usr addr failed\n"));
128 return (-EFAULT);
129 }
130
131 if (memcmp(signature, EXT_IOCTL_SIGN, EXT_IOCTL_SIGN_SZ) == 0)
132 old_ioctl = 0;
133 else
134 old_ioctl = 1;
135
136 /*
137 * At present, we don't support the new ioctl packet
138 */
139 if (!old_ioctl )
140 return (-EINVAL);
141
142 /*
143 * If it is a driver ioctl (as opposed to fw ioctls), then we can
144 * handle the command locally. rval > 0 means it is not a drvr cmd
145 */
146 rval = handle_drvrcmd(argp, old_ioctl, &drvrcmd_rval);
147
148 if (rval < 0)
149 return rval;
150 else if (rval == 0)
151 return drvrcmd_rval;
152
153 rval = 0;
154 if ((adp = mraid_mm_get_adapter(argp, &rval)) == NULL) {
155 return rval;
156 }
157
158 /*
159 * Check if adapter can accept ioctl. We may have marked it offline
160 * if any previous kioc had timedout on this controller.
161 */
162 if (!adp->quiescent) {
163 con_log(CL_ANN, (KERN_WARNING
164 "megaraid cmm: controller cannot accept cmds due to "
165 "earlier errors\n" ));
166 return -EFAULT;
167 }
168
169 /*
170 * The following call will block till a kioc is available
171 * or return NULL if the list head is empty for the pointer
172 * of type mraid_mmapt passed to mraid_mm_alloc_kioc
173 */
174 kioc = mraid_mm_alloc_kioc(adp);
175 if (!kioc)
176 return -ENXIO;
177
178 /*
179 * User sent the old mimd_t ioctl packet. Convert it to uioc_t.
180 */
181 if ((rval = mimd_to_kioc(argp, adp, kioc))) {
182 mraid_mm_dealloc_kioc(adp, kioc);
183 return rval;
184 }
185
186 kioc->done = ioctl_done;
187
188 /*
189 * Issue the IOCTL to the low level driver. After the IOCTL completes
190 * release the kioc if and only if it was _not_ timedout. If it was
191 * timedout, that means that resources are still with low level driver.
192 */
193 if ((rval = lld_ioctl(adp, kioc))) {
194
195 if (!kioc->timedout)
196 mraid_mm_dealloc_kioc(adp, kioc);
197
198 return rval;
199 }
200
201 /*
202 * Convert the kioc back to user space
203 */
204 rval = kioc_to_mimd(kioc, argp);
205
206 /*
207 * Return the kioc to free pool
208 */
209 mraid_mm_dealloc_kioc(adp, kioc);
210
211 return rval;
212 }
213
214 static long
mraid_mm_unlocked_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)215 mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd,
216 unsigned long arg)
217 {
218 int err;
219
220 mutex_lock(&mraid_mm_mutex);
221 err = mraid_mm_ioctl(filep, cmd, arg);
222 mutex_unlock(&mraid_mm_mutex);
223
224 return err;
225 }
226
227 /**
228 * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet
229 * @umimd : User space mimd_t ioctl packet
230 * @rval : returned success/error status
231 *
232 * The function return value is a pointer to the located @adapter.
233 */
234 static mraid_mmadp_t *
mraid_mm_get_adapter(mimd_t __user * umimd,int * rval)235 mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
236 {
237 mraid_mmadp_t *adapter;
238 mimd_t mimd;
239 uint32_t adapno;
240 int iterator;
241 bool is_found;
242
243 if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
244 *rval = -EFAULT;
245 return NULL;
246 }
247
248 adapno = GETADAP(mimd.ui.fcs.adapno);
249
250 if (adapno >= adapters_count_g) {
251 *rval = -ENODEV;
252 return NULL;
253 }
254
255 adapter = NULL;
256 iterator = 0;
257 is_found = false;
258
259 list_for_each_entry(adapter, &adapters_list_g, list) {
260 if (iterator++ == adapno) {
261 is_found = true;
262 break;
263 }
264 }
265
266 if (!is_found) {
267 *rval = -ENODEV;
268 return NULL;
269 }
270
271 return adapter;
272 }
273
274 /**
275 * handle_drvrcmd - Checks if the opcode is a driver cmd and if it is, handles it.
276 * @arg : packet sent by the user app
277 * @old_ioctl : mimd if 1; uioc otherwise
278 * @rval : pointer for command's returned value (not function status)
279 */
280 static int
handle_drvrcmd(void __user * arg,uint8_t old_ioctl,int * rval)281 handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval)
282 {
283 mimd_t __user *umimd;
284 mimd_t kmimd;
285 uint8_t opcode;
286 uint8_t subopcode;
287
288 if (old_ioctl)
289 goto old_packet;
290 else
291 goto new_packet;
292
293 new_packet:
294 return (-ENOTSUPP);
295
296 old_packet:
297 *rval = 0;
298 umimd = arg;
299
300 if (copy_from_user(&kmimd, umimd, sizeof(mimd_t)))
301 return (-EFAULT);
302
303 opcode = kmimd.ui.fcs.opcode;
304 subopcode = kmimd.ui.fcs.subopcode;
305
306 /*
307 * If the opcode is 0x82 and the subopcode is either GET_DRVRVER or
308 * GET_NUMADP, then we can handle. Otherwise we should return 1 to
309 * indicate that we cannot handle this.
310 */
311 if (opcode != 0x82)
312 return 1;
313
314 switch (subopcode) {
315
316 case MEGAIOC_QDRVRVER:
317
318 if (copy_to_user(kmimd.data, &drvr_ver, sizeof(uint32_t)))
319 return (-EFAULT);
320
321 return 0;
322
323 case MEGAIOC_QNADAP:
324
325 *rval = adapters_count_g;
326
327 if (copy_to_user(kmimd.data, &adapters_count_g,
328 sizeof(uint32_t)))
329 return (-EFAULT);
330
331 return 0;
332
333 default:
334 /* cannot handle */
335 return 1;
336 }
337
338 return 0;
339 }
340
341
342 /**
343 * mimd_to_kioc - Converter from old to new ioctl format
344 * @umimd : user space old MIMD IOCTL
345 * @adp : adapter softstate
346 * @kioc : kernel space new format IOCTL
347 *
348 * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The
349 * new packet is in kernel space so that driver can perform operations on it
350 * freely.
351 */
352
353 static int
mimd_to_kioc(mimd_t __user * umimd,mraid_mmadp_t * adp,uioc_t * kioc)354 mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
355 {
356 mbox64_t *mbox64;
357 mbox_t *mbox;
358 mraid_passthru_t *pthru32;
359 uint32_t adapno;
360 uint8_t opcode;
361 uint8_t subopcode;
362 mimd_t mimd;
363
364 if (copy_from_user(&mimd, umimd, sizeof(mimd_t)))
365 return (-EFAULT);
366
367 /*
368 * Applications are not allowed to send extd pthru
369 */
370 if ((mimd.mbox[0] == MBOXCMD_PASSTHRU64) ||
371 (mimd.mbox[0] == MBOXCMD_EXTPTHRU))
372 return (-EINVAL);
373
374 opcode = mimd.ui.fcs.opcode;
375 subopcode = mimd.ui.fcs.subopcode;
376 adapno = GETADAP(mimd.ui.fcs.adapno);
377
378 if (adapno >= adapters_count_g)
379 return (-ENODEV);
380
381 kioc->adapno = adapno;
382 kioc->mb_type = MBOX_LEGACY;
383 kioc->app_type = APPTYPE_MIMD;
384
385 switch (opcode) {
386
387 case 0x82:
388
389 if (subopcode == MEGAIOC_QADAPINFO) {
390
391 kioc->opcode = GET_ADAP_INFO;
392 kioc->data_dir = UIOC_RD;
393 kioc->xferlen = sizeof(mraid_hba_info_t);
394
395 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
396 return (-ENOMEM);
397 }
398 else {
399 con_log(CL_ANN, (KERN_WARNING
400 "megaraid cmm: Invalid subop\n"));
401 return (-EINVAL);
402 }
403
404 break;
405
406 case 0x81:
407
408 kioc->opcode = MBOX_CMD;
409 kioc->xferlen = mimd.ui.fcs.length;
410 kioc->user_data_len = kioc->xferlen;
411 kioc->user_data = mimd.ui.fcs.buffer;
412
413 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
414 return (-ENOMEM);
415
416 if (mimd.outlen) kioc->data_dir = UIOC_RD;
417 if (mimd.inlen) kioc->data_dir |= UIOC_WR;
418
419 break;
420
421 case 0x80:
422
423 kioc->opcode = MBOX_CMD;
424 kioc->xferlen = (mimd.outlen > mimd.inlen) ?
425 mimd.outlen : mimd.inlen;
426 kioc->user_data_len = kioc->xferlen;
427 kioc->user_data = mimd.data;
428
429 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
430 return (-ENOMEM);
431
432 if (mimd.outlen) kioc->data_dir = UIOC_RD;
433 if (mimd.inlen) kioc->data_dir |= UIOC_WR;
434
435 break;
436
437 default:
438 return (-EINVAL);
439 }
440
441 /*
442 * If driver command, nothing else to do
443 */
444 if (opcode == 0x82)
445 return 0;
446
447 /*
448 * This is a mailbox cmd; copy the mailbox from mimd
449 */
450 mbox64 = (mbox64_t *)((unsigned long)kioc->cmdbuf);
451 mbox = &mbox64->mbox32;
452 memcpy(mbox, mimd.mbox, 14);
453
454 if (mbox->cmd != MBOXCMD_PASSTHRU) { // regular DCMD
455
456 mbox->xferaddr = (uint32_t)kioc->buf_paddr;
457
458 if (kioc->data_dir & UIOC_WR) {
459 if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
460 kioc->xferlen)) {
461 return (-EFAULT);
462 }
463 }
464
465 return 0;
466 }
467
468 /*
469 * This is a regular 32-bit pthru cmd; mbox points to pthru struct.
470 * Just like in above case, the beginning for memblk is treated as
471 * a mailbox. The passthru will begin at next 1K boundary. And the
472 * data will start 1K after that.
473 */
474 pthru32 = kioc->pthru32;
475 kioc->user_pthru = &umimd->pthru;
476 mbox->xferaddr = (uint32_t)kioc->pthru32_h;
477
478 if (copy_from_user(pthru32, kioc->user_pthru,
479 sizeof(mraid_passthru_t))) {
480 return (-EFAULT);
481 }
482
483 pthru32->dataxferaddr = kioc->buf_paddr;
484 if (kioc->data_dir & UIOC_WR) {
485 if (pthru32->dataxferlen > kioc->xferlen)
486 return -EINVAL;
487 if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
488 pthru32->dataxferlen)) {
489 return (-EFAULT);
490 }
491 }
492
493 return 0;
494 }
495
496 /**
497 * mraid_mm_attch_buf - Attach a free dma buffer for required size
498 * @adp : Adapter softstate
499 * @kioc : kioc that the buffer needs to be attached to
500 * @xferlen : required length for buffer
501 *
502 * First we search for a pool with smallest buffer that is >= @xferlen. If
503 * that pool has no free buffer, we will try for the next bigger size. If none
504 * is available, we will try to allocate the smallest buffer that is >=
505 * @xferlen and attach it the pool.
506 */
507 static int
mraid_mm_attach_buf(mraid_mmadp_t * adp,uioc_t * kioc,int xferlen)508 mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen)
509 {
510 mm_dmapool_t *pool;
511 int right_pool = -1;
512 unsigned long flags;
513 int i;
514
515 kioc->pool_index = -1;
516 kioc->buf_vaddr = NULL;
517 kioc->buf_paddr = 0;
518 kioc->free_buf = 0;
519
520 /*
521 * We need xferlen amount of memory. See if we can get it from our
522 * dma pools. If we don't get exact size, we will try bigger buffer
523 */
524
525 for (i = 0; i < MAX_DMA_POOLS; i++) {
526
527 pool = &adp->dma_pool_list[i];
528
529 if (xferlen > pool->buf_size)
530 continue;
531
532 if (right_pool == -1)
533 right_pool = i;
534
535 spin_lock_irqsave(&pool->lock, flags);
536
537 if (!pool->in_use) {
538
539 pool->in_use = 1;
540 kioc->pool_index = i;
541 kioc->buf_vaddr = pool->vaddr;
542 kioc->buf_paddr = pool->paddr;
543
544 spin_unlock_irqrestore(&pool->lock, flags);
545 return 0;
546 }
547 else {
548 spin_unlock_irqrestore(&pool->lock, flags);
549 continue;
550 }
551 }
552
553 /*
554 * If xferlen doesn't match any of our pools, return error
555 */
556 if (right_pool == -1)
557 return -EINVAL;
558
559 /*
560 * We did not get any buffer from the preallocated pool. Let us try
561 * to allocate one new buffer. NOTE: This is a blocking call.
562 */
563 pool = &adp->dma_pool_list[right_pool];
564
565 spin_lock_irqsave(&pool->lock, flags);
566
567 kioc->pool_index = right_pool;
568 kioc->free_buf = 1;
569 kioc->buf_vaddr = dma_pool_alloc(pool->handle, GFP_ATOMIC,
570 &kioc->buf_paddr);
571 spin_unlock_irqrestore(&pool->lock, flags);
572
573 if (!kioc->buf_vaddr)
574 return -ENOMEM;
575
576 return 0;
577 }
578
579 /**
580 * mraid_mm_alloc_kioc - Returns a uioc_t from free list
581 * @adp : Adapter softstate for this module
582 *
583 * The kioc_semaphore is initialized with number of kioc nodes in the
584 * free kioc pool. If the kioc pool is empty, this function blocks till
585 * a kioc becomes free.
586 */
587 static uioc_t *
mraid_mm_alloc_kioc(mraid_mmadp_t * adp)588 mraid_mm_alloc_kioc(mraid_mmadp_t *adp)
589 {
590 uioc_t *kioc;
591 struct list_head* head;
592 unsigned long flags;
593
594 down(&adp->kioc_semaphore);
595
596 spin_lock_irqsave(&adp->kioc_pool_lock, flags);
597
598 head = &adp->kioc_pool;
599
600 if (list_empty(head)) {
601 up(&adp->kioc_semaphore);
602 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
603
604 con_log(CL_ANN, ("megaraid cmm: kioc list empty!\n"));
605 return NULL;
606 }
607
608 kioc = list_entry(head->next, uioc_t, list);
609 list_del_init(&kioc->list);
610
611 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
612
613 memset((caddr_t)(unsigned long)kioc->cmdbuf, 0, sizeof(mbox64_t));
614 memset((caddr_t) kioc->pthru32, 0, sizeof(mraid_passthru_t));
615
616 kioc->buf_vaddr = NULL;
617 kioc->buf_paddr = 0;
618 kioc->pool_index =-1;
619 kioc->free_buf = 0;
620 kioc->user_data = NULL;
621 kioc->user_data_len = 0;
622 kioc->user_pthru = NULL;
623 kioc->timedout = 0;
624
625 return kioc;
626 }
627
628 /**
629 * mraid_mm_dealloc_kioc - Return kioc to free pool
630 * @adp : Adapter softstate
631 * @kioc : uioc_t node to be returned to free pool
632 */
633 static void
mraid_mm_dealloc_kioc(mraid_mmadp_t * adp,uioc_t * kioc)634 mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc)
635 {
636 mm_dmapool_t *pool;
637 unsigned long flags;
638
639 if (kioc->pool_index != -1) {
640 pool = &adp->dma_pool_list[kioc->pool_index];
641
642 /* This routine may be called in non-isr context also */
643 spin_lock_irqsave(&pool->lock, flags);
644
645 /*
646 * While attaching the dma buffer, if we didn't get the
647 * required buffer from the pool, we would have allocated
648 * it at the run time and set the free_buf flag. We must
649 * free that buffer. Otherwise, just mark that the buffer is
650 * not in use
651 */
652 if (kioc->free_buf == 1)
653 dma_pool_free(pool->handle, kioc->buf_vaddr,
654 kioc->buf_paddr);
655 else
656 pool->in_use = 0;
657
658 spin_unlock_irqrestore(&pool->lock, flags);
659 }
660
661 /* Return the kioc to the free pool */
662 spin_lock_irqsave(&adp->kioc_pool_lock, flags);
663 list_add(&kioc->list, &adp->kioc_pool);
664 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
665
666 /* increment the free kioc count */
667 up(&adp->kioc_semaphore);
668
669 return;
670 }
671
672 /**
673 * lld_ioctl - Routine to issue ioctl to low level drvr
674 * @adp : The adapter handle
675 * @kioc : The ioctl packet with kernel addresses
676 */
677 static int
lld_ioctl(mraid_mmadp_t * adp,uioc_t * kioc)678 lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc)
679 {
680 int rval;
681 struct uioc_timeout timeout = { };
682
683 kioc->status = -ENODATA;
684 rval = adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE);
685
686 if (rval) return rval;
687
688 /*
689 * Start the timer
690 */
691 if (adp->timeout > 0) {
692 timeout.uioc = kioc;
693 timer_setup_on_stack(&timeout.timer, lld_timedout, 0);
694
695 timeout.timer.expires = jiffies + adp->timeout * HZ;
696
697 add_timer(&timeout.timer);
698 }
699
700 /*
701 * Wait till the low level driver completes the ioctl. After this
702 * call, the ioctl either completed successfully or timedout.
703 */
704 wait_event(wait_q, (kioc->status != -ENODATA));
705 if (timeout.timer.function) {
706 del_timer_sync(&timeout.timer);
707 destroy_timer_on_stack(&timeout.timer);
708 }
709
710 /*
711 * If the command had timedout, we mark the controller offline
712 * before returning
713 */
714 if (kioc->timedout) {
715 adp->quiescent = 0;
716 }
717
718 return kioc->status;
719 }
720
721
722 /**
723 * ioctl_done - callback from the low level driver
724 * @kioc : completed ioctl packet
725 */
726 static void
ioctl_done(uioc_t * kioc)727 ioctl_done(uioc_t *kioc)
728 {
729 uint32_t adapno;
730 int iterator;
731 mraid_mmadp_t* adapter;
732 bool is_found;
733
734 /*
735 * When the kioc returns from driver, make sure it still doesn't
736 * have ENODATA in status. Otherwise, driver will hang on wait_event
737 * forever
738 */
739 if (kioc->status == -ENODATA) {
740 con_log(CL_ANN, (KERN_WARNING
741 "megaraid cmm: lld didn't change status!\n"));
742
743 kioc->status = -EINVAL;
744 }
745
746 /*
747 * Check if this kioc was timedout before. If so, nobody is waiting
748 * on this kioc. We don't have to wake up anybody. Instead, we just
749 * have to free the kioc
750 */
751 if (kioc->timedout) {
752 iterator = 0;
753 adapter = NULL;
754 adapno = kioc->adapno;
755 is_found = false;
756
757 con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
758 "ioctl that was timedout before\n"));
759
760 list_for_each_entry(adapter, &adapters_list_g, list) {
761 if (iterator++ == adapno) {
762 is_found = true;
763 break;
764 }
765 }
766
767 kioc->timedout = 0;
768
769 if (is_found)
770 mraid_mm_dealloc_kioc( adapter, kioc );
771
772 }
773 else {
774 wake_up(&wait_q);
775 }
776 }
777
778
779 /**
780 * lld_timedout - callback from the expired timer
781 * @t : timer that timed out
782 */
783 static void
lld_timedout(struct timer_list * t)784 lld_timedout(struct timer_list *t)
785 {
786 struct uioc_timeout *timeout = from_timer(timeout, t, timer);
787 uioc_t *kioc = timeout->uioc;
788
789 kioc->status = -ETIME;
790 kioc->timedout = 1;
791
792 con_log(CL_ANN, (KERN_WARNING "megaraid cmm: ioctl timed out\n"));
793
794 wake_up(&wait_q);
795 }
796
797
798 /**
799 * kioc_to_mimd - Converter from new back to old format
800 * @kioc : Kernel space IOCTL packet (successfully issued)
801 * @mimd : User space MIMD packet
802 */
803 static int
kioc_to_mimd(uioc_t * kioc,mimd_t __user * mimd)804 kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd)
805 {
806 mimd_t kmimd;
807 uint8_t opcode;
808 uint8_t subopcode;
809
810 mbox64_t *mbox64;
811 mraid_passthru_t __user *upthru32;
812 mraid_passthru_t *kpthru32;
813 mcontroller_t cinfo;
814 mraid_hba_info_t *hinfo;
815
816
817 if (copy_from_user(&kmimd, mimd, sizeof(mimd_t)))
818 return (-EFAULT);
819
820 opcode = kmimd.ui.fcs.opcode;
821 subopcode = kmimd.ui.fcs.subopcode;
822
823 if (opcode == 0x82) {
824 switch (subopcode) {
825
826 case MEGAIOC_QADAPINFO:
827
828 hinfo = (mraid_hba_info_t *)(unsigned long)
829 kioc->buf_vaddr;
830
831 hinfo_to_cinfo(hinfo, &cinfo);
832
833 if (copy_to_user(kmimd.data, &cinfo, sizeof(cinfo)))
834 return (-EFAULT);
835
836 return 0;
837
838 default:
839 return (-EINVAL);
840 }
841
842 return 0;
843 }
844
845 mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
846
847 if (kioc->user_pthru) {
848
849 upthru32 = kioc->user_pthru;
850 kpthru32 = kioc->pthru32;
851
852 if (copy_to_user(&upthru32->scsistatus,
853 &kpthru32->scsistatus,
854 sizeof(uint8_t))) {
855 return (-EFAULT);
856 }
857 }
858
859 if (kioc->user_data) {
860 if (copy_to_user(kioc->user_data, kioc->buf_vaddr,
861 kioc->user_data_len)) {
862 return (-EFAULT);
863 }
864 }
865
866 if (copy_to_user(&mimd->mbox[17],
867 &mbox64->mbox32.status, sizeof(uint8_t))) {
868 return (-EFAULT);
869 }
870
871 return 0;
872 }
873
874
875 /**
876 * hinfo_to_cinfo - Convert new format hba info into old format
877 * @hinfo : New format, more comprehensive adapter info
878 * @cinfo : Old format adapter info to support mimd_t apps
879 */
880 static void
hinfo_to_cinfo(mraid_hba_info_t * hinfo,mcontroller_t * cinfo)881 hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo)
882 {
883 if (!hinfo || !cinfo)
884 return;
885
886 cinfo->base = hinfo->baseport;
887 cinfo->irq = hinfo->irq;
888 cinfo->numldrv = hinfo->num_ldrv;
889 cinfo->pcibus = hinfo->pci_bus;
890 cinfo->pcidev = hinfo->pci_slot;
891 cinfo->pcifun = PCI_FUNC(hinfo->pci_dev_fn);
892 cinfo->pciid = hinfo->pci_device_id;
893 cinfo->pcivendor = hinfo->pci_vendor_id;
894 cinfo->pcislot = hinfo->pci_slot;
895 cinfo->uid = hinfo->unique_id;
896 }
897
898
899 /**
900 * mraid_mm_register_adp - Registration routine for low level drivers
901 * @lld_adp : Adapter object
902 */
903 int
mraid_mm_register_adp(mraid_mmadp_t * lld_adp)904 mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
905 {
906 mraid_mmadp_t *adapter;
907 mbox64_t *mbox_list;
908 uioc_t *kioc;
909 uint32_t rval;
910 int i;
911
912
913 if (lld_adp->drvr_type != DRVRTYPE_MBOX)
914 return (-EINVAL);
915
916 adapter = kzalloc(sizeof(mraid_mmadp_t), GFP_KERNEL);
917
918 if (!adapter)
919 return -ENOMEM;
920
921
922 adapter->unique_id = lld_adp->unique_id;
923 adapter->drvr_type = lld_adp->drvr_type;
924 adapter->drvr_data = lld_adp->drvr_data;
925 adapter->pdev = lld_adp->pdev;
926 adapter->issue_uioc = lld_adp->issue_uioc;
927 adapter->timeout = lld_adp->timeout;
928 adapter->max_kioc = lld_adp->max_kioc;
929 adapter->quiescent = 1;
930
931 /*
932 * Allocate single blocks of memory for all required kiocs,
933 * mailboxes and passthru structures.
934 */
935 adapter->kioc_list = kmalloc_array(lld_adp->max_kioc,
936 sizeof(uioc_t),
937 GFP_KERNEL);
938 adapter->mbox_list = kmalloc_array(lld_adp->max_kioc,
939 sizeof(mbox64_t),
940 GFP_KERNEL);
941 adapter->pthru_dma_pool = dma_pool_create("megaraid mm pthru pool",
942 &adapter->pdev->dev,
943 sizeof(mraid_passthru_t),
944 16, 0);
945
946 if (!adapter->kioc_list || !adapter->mbox_list ||
947 !adapter->pthru_dma_pool) {
948
949 con_log(CL_ANN, (KERN_WARNING
950 "megaraid cmm: out of memory, %s %d\n", __func__,
951 __LINE__));
952
953 rval = (-ENOMEM);
954
955 goto memalloc_error;
956 }
957
958 /*
959 * Slice kioc_list and make a kioc_pool with the individiual kiocs
960 */
961 INIT_LIST_HEAD(&adapter->kioc_pool);
962 spin_lock_init(&adapter->kioc_pool_lock);
963 sema_init(&adapter->kioc_semaphore, lld_adp->max_kioc);
964
965 mbox_list = (mbox64_t *)adapter->mbox_list;
966
967 for (i = 0; i < lld_adp->max_kioc; i++) {
968
969 kioc = adapter->kioc_list + i;
970 kioc->cmdbuf = (uint64_t)(unsigned long)(mbox_list + i);
971 kioc->pthru32 = dma_pool_alloc(adapter->pthru_dma_pool,
972 GFP_KERNEL, &kioc->pthru32_h);
973
974 if (!kioc->pthru32) {
975
976 con_log(CL_ANN, (KERN_WARNING
977 "megaraid cmm: out of memory, %s %d\n",
978 __func__, __LINE__));
979
980 rval = (-ENOMEM);
981
982 goto pthru_dma_pool_error;
983 }
984
985 list_add_tail(&kioc->list, &adapter->kioc_pool);
986 }
987
988 // Setup the dma pools for data buffers
989 if ((rval = mraid_mm_setup_dma_pools(adapter)) != 0) {
990 goto dma_pool_error;
991 }
992
993 list_add_tail(&adapter->list, &adapters_list_g);
994
995 adapters_count_g++;
996
997 return 0;
998
999 dma_pool_error:
1000 /* Do nothing */
1001
1002 pthru_dma_pool_error:
1003
1004 for (i = 0; i < lld_adp->max_kioc; i++) {
1005 kioc = adapter->kioc_list + i;
1006 if (kioc->pthru32) {
1007 dma_pool_free(adapter->pthru_dma_pool, kioc->pthru32,
1008 kioc->pthru32_h);
1009 }
1010 }
1011
1012 memalloc_error:
1013
1014 kfree(adapter->kioc_list);
1015 kfree(adapter->mbox_list);
1016
1017 dma_pool_destroy(adapter->pthru_dma_pool);
1018
1019 kfree(adapter);
1020
1021 return rval;
1022 }
1023
1024
1025 /**
1026 * mraid_mm_adapter_app_handle - return the application handle for this adapter
1027 * @unique_id : adapter unique identifier
1028 *
1029 * For the given driver data, locate the adapter in our global list and
1030 * return the corresponding handle, which is also used by applications to
1031 * uniquely identify an adapter.
1032 *
1033 * Return adapter handle if found in the list.
1034 * Return 0 if adapter could not be located, should never happen though.
1035 */
1036 uint32_t
mraid_mm_adapter_app_handle(uint32_t unique_id)1037 mraid_mm_adapter_app_handle(uint32_t unique_id)
1038 {
1039 mraid_mmadp_t *adapter;
1040 mraid_mmadp_t *tmp;
1041 int index = 0;
1042
1043 list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
1044
1045 if (adapter->unique_id == unique_id) {
1046
1047 return MKADAP(index);
1048 }
1049
1050 index++;
1051 }
1052
1053 return 0;
1054 }
1055
1056
1057 /**
1058 * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter
1059 * @adp : Adapter softstate
1060 *
1061 * We maintain a pool of dma buffers per each adapter. Each pool has one
1062 * buffer. E.g, we may have 5 dma pools - one each for 4k, 8k ... 64k buffers.
1063 * We have just one 4k buffer in 4k pool, one 8k buffer in 8k pool etc. We
1064 * dont' want to waste too much memory by allocating more buffers per each
1065 * pool.
1066 */
1067 static int
mraid_mm_setup_dma_pools(mraid_mmadp_t * adp)1068 mraid_mm_setup_dma_pools(mraid_mmadp_t *adp)
1069 {
1070 mm_dmapool_t *pool;
1071 int bufsize;
1072 int i;
1073
1074 /*
1075 * Create MAX_DMA_POOLS number of pools
1076 */
1077 bufsize = MRAID_MM_INIT_BUFF_SIZE;
1078
1079 for (i = 0; i < MAX_DMA_POOLS; i++){
1080
1081 pool = &adp->dma_pool_list[i];
1082
1083 pool->buf_size = bufsize;
1084 spin_lock_init(&pool->lock);
1085
1086 pool->handle = dma_pool_create("megaraid mm data buffer",
1087 &adp->pdev->dev, bufsize,
1088 16, 0);
1089
1090 if (!pool->handle) {
1091 goto dma_pool_setup_error;
1092 }
1093
1094 pool->vaddr = dma_pool_alloc(pool->handle, GFP_KERNEL,
1095 &pool->paddr);
1096
1097 if (!pool->vaddr)
1098 goto dma_pool_setup_error;
1099
1100 bufsize = bufsize * 2;
1101 }
1102
1103 return 0;
1104
1105 dma_pool_setup_error:
1106
1107 mraid_mm_teardown_dma_pools(adp);
1108 return (-ENOMEM);
1109 }
1110
1111
1112 /**
1113 * mraid_mm_unregister_adp - Unregister routine for low level drivers
1114 * @unique_id : UID of the adpater
1115 *
1116 * Assumes no outstanding ioctls to llds.
1117 */
1118 int
mraid_mm_unregister_adp(uint32_t unique_id)1119 mraid_mm_unregister_adp(uint32_t unique_id)
1120 {
1121 mraid_mmadp_t *adapter;
1122 mraid_mmadp_t *tmp;
1123
1124 list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
1125
1126
1127 if (adapter->unique_id == unique_id) {
1128
1129 adapters_count_g--;
1130
1131 list_del_init(&adapter->list);
1132
1133 mraid_mm_free_adp_resources(adapter);
1134
1135 kfree(adapter);
1136
1137 con_log(CL_ANN, (
1138 "megaraid cmm: Unregistered one adapter:%#x\n",
1139 unique_id));
1140
1141 return 0;
1142 }
1143 }
1144
1145 return (-ENODEV);
1146 }
1147
1148 /**
1149 * mraid_mm_free_adp_resources - Free adapter softstate
1150 * @adp : Adapter softstate
1151 */
1152 static void
mraid_mm_free_adp_resources(mraid_mmadp_t * adp)1153 mraid_mm_free_adp_resources(mraid_mmadp_t *adp)
1154 {
1155 uioc_t *kioc;
1156 int i;
1157
1158 mraid_mm_teardown_dma_pools(adp);
1159
1160 for (i = 0; i < adp->max_kioc; i++) {
1161
1162 kioc = adp->kioc_list + i;
1163
1164 dma_pool_free(adp->pthru_dma_pool, kioc->pthru32,
1165 kioc->pthru32_h);
1166 }
1167
1168 kfree(adp->kioc_list);
1169 kfree(adp->mbox_list);
1170
1171 dma_pool_destroy(adp->pthru_dma_pool);
1172
1173
1174 return;
1175 }
1176
1177
1178 /**
1179 * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers
1180 * @adp : Adapter softstate
1181 */
1182 static void
mraid_mm_teardown_dma_pools(mraid_mmadp_t * adp)1183 mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp)
1184 {
1185 int i;
1186 mm_dmapool_t *pool;
1187
1188 for (i = 0; i < MAX_DMA_POOLS; i++) {
1189
1190 pool = &adp->dma_pool_list[i];
1191
1192 if (pool->handle) {
1193
1194 if (pool->vaddr)
1195 dma_pool_free(pool->handle, pool->vaddr,
1196 pool->paddr);
1197
1198 dma_pool_destroy(pool->handle);
1199 pool->handle = NULL;
1200 }
1201 }
1202
1203 return;
1204 }
1205
1206 /**
1207 * mraid_mm_init - Module entry point
1208 */
1209 static int __init
mraid_mm_init(void)1210 mraid_mm_init(void)
1211 {
1212 int err;
1213
1214 // Announce the driver version
1215 con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n",
1216 LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION));
1217
1218 err = misc_register(&megaraid_mm_dev);
1219 if (err < 0) {
1220 con_log(CL_ANN, ("megaraid cmm: cannot register misc device\n"));
1221 return err;
1222 }
1223
1224 init_waitqueue_head(&wait_q);
1225
1226 INIT_LIST_HEAD(&adapters_list_g);
1227
1228 return 0;
1229 }
1230
1231
1232 /**
1233 * mraid_mm_exit - Module exit point
1234 */
1235 static void __exit
mraid_mm_exit(void)1236 mraid_mm_exit(void)
1237 {
1238 con_log(CL_DLEVEL1 , ("exiting common mod\n"));
1239
1240 misc_deregister(&megaraid_mm_dev);
1241 }
1242
1243 module_init(mraid_mm_init);
1244 module_exit(mraid_mm_exit);
1245
1246 /* vi: set ts=8 sw=8 tw=78: */
1247