• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *	Adaptec AAC series RAID controller driver
3  *	(c) Copyright 2001 Red Hat Inc.
4  *
5  * based on the old aacraid driver that is..
6  * Adaptec aacraid device driver for Linux.
7  *
8  * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2, or (at your option)
13  * any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; see the file COPYING.  If not, write to
22  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23  *
24  * Module Name:
25  *  commsup.c
26  *
27  * Abstract: Contain all routines that are required for FSA host/adapter
28  *    communication.
29  *
30  */
31 
32 #include <linux/kernel.h>
33 #include <linux/init.h>
34 #include <linux/types.h>
35 #include <linux/sched.h>
36 #include <linux/pci.h>
37 #include <linux/spinlock.h>
38 #include <linux/slab.h>
39 #include <linux/completion.h>
40 #include <linux/blkdev.h>
41 #include <linux/delay.h>
42 #include <linux/kthread.h>
43 #include <linux/interrupt.h>
44 #include <linux/semaphore.h>
45 #include <scsi/scsi.h>
46 #include <scsi/scsi_host.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_cmnd.h>
49 
50 #include "aacraid.h"
51 
52 /**
53  *	fib_map_alloc		-	allocate the fib objects
54  *	@dev: Adapter to allocate for
55  *
56  *	Allocate and map the shared PCI space for the FIB blocks used to
57  *	talk to the Adaptec firmware.
58  */
59 
fib_map_alloc(struct aac_dev * dev)60 static int fib_map_alloc(struct aac_dev *dev)
61 {
62 	dprintk((KERN_INFO
63 	  "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
64 	  dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue,
65 	  AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
66 	if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, dev->max_fib_size
67 	  * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
68 	  &dev->hw_fib_pa))==NULL)
69 		return -ENOMEM;
70 	return 0;
71 }
72 
73 /**
74  *	aac_fib_map_free		-	free the fib objects
75  *	@dev: Adapter to free
76  *
77  *	Free the PCI mappings and the memory allocated for FIB blocks
78  *	on this adapter.
79  */
80 
aac_fib_map_free(struct aac_dev * dev)81 void aac_fib_map_free(struct aac_dev *dev)
82 {
83 	pci_free_consistent(dev->pdev,
84 	  dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
85 	  dev->hw_fib_va, dev->hw_fib_pa);
86 	dev->hw_fib_va = NULL;
87 	dev->hw_fib_pa = 0;
88 }
89 
90 /**
91  *	aac_fib_setup	-	setup the fibs
92  *	@dev: Adapter to set up
93  *
94  *	Allocate the PCI space for the fibs, map it and then intialise the
95  *	fib area, the unmapped fib data and also the free list
96  */
97 
aac_fib_setup(struct aac_dev * dev)98 int aac_fib_setup(struct aac_dev * dev)
99 {
100 	struct fib *fibptr;
101 	struct hw_fib *hw_fib;
102 	dma_addr_t hw_fib_pa;
103 	int i;
104 
105 	while (((i = fib_map_alloc(dev)) == -ENOMEM)
106 	 && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
107 		dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1);
108 		dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB;
109 	}
110 	if (i<0)
111 		return -ENOMEM;
112 
113 	hw_fib = dev->hw_fib_va;
114 	hw_fib_pa = dev->hw_fib_pa;
115 	memset(hw_fib, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
116 	/*
117 	 *	Initialise the fibs
118 	 */
119 	for (i = 0, fibptr = &dev->fibs[i];
120 		i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
121 		i++, fibptr++)
122 	{
123 		fibptr->dev = dev;
124 		fibptr->hw_fib_va = hw_fib;
125 		fibptr->data = (void *) fibptr->hw_fib_va->data;
126 		fibptr->next = fibptr+1;	/* Forward chain the fibs */
127 		init_MUTEX_LOCKED(&fibptr->event_wait);
128 		spin_lock_init(&fibptr->event_lock);
129 		hw_fib->header.XferState = cpu_to_le32(0xffffffff);
130 		hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size);
131 		fibptr->hw_fib_pa = hw_fib_pa;
132 		hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + dev->max_fib_size);
133 		hw_fib_pa = hw_fib_pa + dev->max_fib_size;
134 	}
135 	/*
136 	 *	Add the fib chain to the free list
137 	 */
138 	dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
139 	/*
140 	 *	Enable this to debug out of queue space
141 	 */
142 	dev->free_fib = &dev->fibs[0];
143 	return 0;
144 }
145 
146 /**
147  *	aac_fib_alloc	-	allocate a fib
148  *	@dev: Adapter to allocate the fib for
149  *
150  *	Allocate a fib from the adapter fib pool. If the pool is empty we
151  *	return NULL.
152  */
153 
aac_fib_alloc(struct aac_dev * dev)154 struct fib *aac_fib_alloc(struct aac_dev *dev)
155 {
156 	struct fib * fibptr;
157 	unsigned long flags;
158 	spin_lock_irqsave(&dev->fib_lock, flags);
159 	fibptr = dev->free_fib;
160 	if(!fibptr){
161 		spin_unlock_irqrestore(&dev->fib_lock, flags);
162 		return fibptr;
163 	}
164 	dev->free_fib = fibptr->next;
165 	spin_unlock_irqrestore(&dev->fib_lock, flags);
166 	/*
167 	 *	Set the proper node type code and node byte size
168 	 */
169 	fibptr->type = FSAFS_NTC_FIB_CONTEXT;
170 	fibptr->size = sizeof(struct fib);
171 	/*
172 	 *	Null out fields that depend on being zero at the start of
173 	 *	each I/O
174 	 */
175 	fibptr->hw_fib_va->header.XferState = 0;
176 	fibptr->flags = 0;
177 	fibptr->callback = NULL;
178 	fibptr->callback_data = NULL;
179 
180 	return fibptr;
181 }
182 
183 /**
184  *	aac_fib_free	-	free a fib
185  *	@fibptr: fib to free up
186  *
187  *	Frees up a fib and places it on the appropriate queue
188  */
189 
aac_fib_free(struct fib * fibptr)190 void aac_fib_free(struct fib *fibptr)
191 {
192 	unsigned long flags;
193 
194 	spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
195 	if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
196 		aac_config.fib_timeouts++;
197 	if (fibptr->hw_fib_va->header.XferState != 0) {
198 		printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
199 			 (void*)fibptr,
200 			 le32_to_cpu(fibptr->hw_fib_va->header.XferState));
201 	}
202 	fibptr->next = fibptr->dev->free_fib;
203 	fibptr->dev->free_fib = fibptr;
204 	spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
205 }
206 
207 /**
208  *	aac_fib_init	-	initialise a fib
209  *	@fibptr: The fib to initialize
210  *
211  *	Set up the generic fib fields ready for use
212  */
213 
aac_fib_init(struct fib * fibptr)214 void aac_fib_init(struct fib *fibptr)
215 {
216 	struct hw_fib *hw_fib = fibptr->hw_fib_va;
217 
218 	hw_fib->header.StructType = FIB_MAGIC;
219 	hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
220 	hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
221 	hw_fib->header.SenderFibAddress = 0; /* Filled in later if needed */
222 	hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
223 	hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
224 }
225 
226 /**
227  *	fib_deallocate		-	deallocate a fib
228  *	@fibptr: fib to deallocate
229  *
230  *	Will deallocate and return to the free pool the FIB pointed to by the
231  *	caller.
232  */
233 
fib_dealloc(struct fib * fibptr)234 static void fib_dealloc(struct fib * fibptr)
235 {
236 	struct hw_fib *hw_fib = fibptr->hw_fib_va;
237 	BUG_ON(hw_fib->header.StructType != FIB_MAGIC);
238 	hw_fib->header.XferState = 0;
239 }
240 
241 /*
242  *	Commuication primitives define and support the queuing method we use to
243  *	support host to adapter commuication. All queue accesses happen through
244  *	these routines and are the only routines which have a knowledge of the
245  *	 how these queues are implemented.
246  */
247 
248 /**
249  *	aac_get_entry		-	get a queue entry
250  *	@dev: Adapter
251  *	@qid: Queue Number
252  *	@entry: Entry return
253  *	@index: Index return
254  *	@nonotify: notification control
255  *
256  *	With a priority the routine returns a queue entry if the queue has free entries. If the queue
257  *	is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
258  *	returned.
259  */
260 
aac_get_entry(struct aac_dev * dev,u32 qid,struct aac_entry ** entry,u32 * index,unsigned long * nonotify)261 static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
262 {
263 	struct aac_queue * q;
264 	unsigned long idx;
265 
266 	/*
267 	 *	All of the queues wrap when they reach the end, so we check
268 	 *	to see if they have reached the end and if they have we just
269 	 *	set the index back to zero. This is a wrap. You could or off
270 	 *	the high bits in all updates but this is a bit faster I think.
271 	 */
272 
273 	q = &dev->queues->queue[qid];
274 
275 	idx = *index = le32_to_cpu(*(q->headers.producer));
276 	/* Interrupt Moderation, only interrupt for first two entries */
277 	if (idx != le32_to_cpu(*(q->headers.consumer))) {
278 		if (--idx == 0) {
279 			if (qid == AdapNormCmdQueue)
280 				idx = ADAP_NORM_CMD_ENTRIES;
281 			else
282 				idx = ADAP_NORM_RESP_ENTRIES;
283 		}
284 		if (idx != le32_to_cpu(*(q->headers.consumer)))
285 			*nonotify = 1;
286 	}
287 
288 	if (qid == AdapNormCmdQueue) {
289 		if (*index >= ADAP_NORM_CMD_ENTRIES)
290 			*index = 0; /* Wrap to front of the Producer Queue. */
291 	} else {
292 		if (*index >= ADAP_NORM_RESP_ENTRIES)
293 			*index = 0; /* Wrap to front of the Producer Queue. */
294 	}
295 
296 	/* Queue is full */
297 	if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) {
298 		printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
299 				qid, q->numpending);
300 		return 0;
301 	} else {
302 		*entry = q->base + *index;
303 		return 1;
304 	}
305 }
306 
307 /**
308  *	aac_queue_get		-	get the next free QE
309  *	@dev: Adapter
310  *	@index: Returned index
311  *	@priority: Priority of fib
312  *	@fib: Fib to associate with the queue entry
313  *	@wait: Wait if queue full
314  *	@fibptr: Driver fib object to go with fib
315  *	@nonotify: Don't notify the adapter
316  *
317  *	Gets the next free QE off the requested priorty adapter command
318  *	queue and associates the Fib with the QE. The QE represented by
319  *	index is ready to insert on the queue when this routine returns
320  *	success.
321  */
322 
aac_queue_get(struct aac_dev * dev,u32 * index,u32 qid,struct hw_fib * hw_fib,int wait,struct fib * fibptr,unsigned long * nonotify)323 int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
324 {
325 	struct aac_entry * entry = NULL;
326 	int map = 0;
327 
328 	if (qid == AdapNormCmdQueue) {
329 		/*  if no entries wait for some if caller wants to */
330 		while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
331 			printk(KERN_ERR "GetEntries failed\n");
332 		}
333 		/*
334 		 *	Setup queue entry with a command, status and fib mapped
335 		 */
336 		entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
337 		map = 1;
338 	} else {
339 		while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
340 			/* if no entries wait for some if caller wants to */
341 		}
342 		/*
343 		 *	Setup queue entry with command, status and fib mapped
344 		 */
345 		entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
346 		entry->addr = hw_fib->header.SenderFibAddress;
347 			/* Restore adapters pointer to the FIB */
348 		hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress;	/* Let the adapter now where to find its data */
349 		map = 0;
350 	}
351 	/*
352 	 *	If MapFib is true than we need to map the Fib and put pointers
353 	 *	in the queue entry.
354 	 */
355 	if (map)
356 		entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
357 	return 0;
358 }
359 
360 /*
361  *	Define the highest level of host to adapter communication routines.
362  *	These routines will support host to adapter FS commuication. These
363  *	routines have no knowledge of the commuication method used. This level
364  *	sends and receives FIBs. This level has no knowledge of how these FIBs
365  *	get passed back and forth.
366  */
367 
368 /**
369  *	aac_fib_send	-	send a fib to the adapter
370  *	@command: Command to send
371  *	@fibptr: The fib
372  *	@size: Size of fib data area
373  *	@priority: Priority of Fib
374  *	@wait: Async/sync select
375  *	@reply: True if a reply is wanted
376  *	@callback: Called with reply
377  *	@callback_data: Passed to callback
378  *
379  *	Sends the requested FIB to the adapter and optionally will wait for a
380  *	response FIB. If the caller does not wish to wait for a response than
381  *	an event to wait on must be supplied. This event will be set when a
382  *	response FIB is received from the adapter.
383  */
384 
aac_fib_send(u16 command,struct fib * fibptr,unsigned long size,int priority,int wait,int reply,fib_callback callback,void * callback_data)385 int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
386 		int priority, int wait, int reply, fib_callback callback,
387 		void *callback_data)
388 {
389 	struct aac_dev * dev = fibptr->dev;
390 	struct hw_fib * hw_fib = fibptr->hw_fib_va;
391 	unsigned long flags = 0;
392 	unsigned long qflags;
393 
394 	if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
395 		return -EBUSY;
396 	/*
397 	 *	There are 5 cases with the wait and reponse requested flags.
398 	 *	The only invalid cases are if the caller requests to wait and
399 	 *	does not request a response and if the caller does not want a
400 	 *	response and the Fib is not allocated from pool. If a response
401 	 *	is not requesed the Fib will just be deallocaed by the DPC
402 	 *	routine when the response comes back from the adapter. No
403 	 *	further processing will be done besides deleting the Fib. We
404 	 *	will have a debug mode where the adapter can notify the host
405 	 *	it had a problem and the host can log that fact.
406 	 */
407 	fibptr->flags = 0;
408 	if (wait && !reply) {
409 		return -EINVAL;
410 	} else if (!wait && reply) {
411 		hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
412 		FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
413 	} else if (!wait && !reply) {
414 		hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
415 		FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
416 	} else if (wait && reply) {
417 		hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
418 		FIB_COUNTER_INCREMENT(aac_config.NormalSent);
419 	}
420 	/*
421 	 *	Map the fib into 32bits by using the fib number
422 	 */
423 
424 	hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
425 	hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
426 	/*
427 	 *	Set FIB state to indicate where it came from and if we want a
428 	 *	response from the adapter. Also load the command from the
429 	 *	caller.
430 	 *
431 	 *	Map the hw fib pointer as a 32bit value
432 	 */
433 	hw_fib->header.Command = cpu_to_le16(command);
434 	hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
435 	fibptr->hw_fib_va->header.Flags = 0;	/* 0 the flags field - internal only*/
436 	/*
437 	 *	Set the size of the Fib we want to send to the adapter
438 	 */
439 	hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
440 	if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
441 		return -EMSGSIZE;
442 	}
443 	/*
444 	 *	Get a queue entry connect the FIB to it and send an notify
445 	 *	the adapter a command is ready.
446 	 */
447 	hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
448 
449 	/*
450 	 *	Fill in the Callback and CallbackContext if we are not
451 	 *	going to wait.
452 	 */
453 	if (!wait) {
454 		fibptr->callback = callback;
455 		fibptr->callback_data = callback_data;
456 		fibptr->flags = FIB_CONTEXT_FLAG;
457 	}
458 
459 	fibptr->done = 0;
460 
461 	FIB_COUNTER_INCREMENT(aac_config.FibsSent);
462 
463 	dprintk((KERN_DEBUG "Fib contents:.\n"));
464 	dprintk((KERN_DEBUG "  Command =               %d.\n", le32_to_cpu(hw_fib->header.Command)));
465 	dprintk((KERN_DEBUG "  SubCommand =            %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
466 	dprintk((KERN_DEBUG "  XferState  =            %x.\n", le32_to_cpu(hw_fib->header.XferState)));
467 	dprintk((KERN_DEBUG "  hw_fib va being sent=%p\n",fibptr->hw_fib_va));
468 	dprintk((KERN_DEBUG "  hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
469 	dprintk((KERN_DEBUG "  fib being sent=%p\n",fibptr));
470 
471 	if (!dev->queues)
472 		return -EBUSY;
473 
474 	if(wait)
475 		spin_lock_irqsave(&fibptr->event_lock, flags);
476 	aac_adapter_deliver(fibptr);
477 
478 	/*
479 	 *	If the caller wanted us to wait for response wait now.
480 	 */
481 
482 	if (wait) {
483 		spin_unlock_irqrestore(&fibptr->event_lock, flags);
484 		/* Only set for first known interruptable command */
485 		if (wait < 0) {
486 			/*
487 			 * *VERY* Dangerous to time out a command, the
488 			 * assumption is made that we have no hope of
489 			 * functioning because an interrupt routing or other
490 			 * hardware failure has occurred.
491 			 */
492 			unsigned long count = 36000000L; /* 3 minutes */
493 			while (down_trylock(&fibptr->event_wait)) {
494 				int blink;
495 				if (--count == 0) {
496 					struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
497 					spin_lock_irqsave(q->lock, qflags);
498 					q->numpending--;
499 					spin_unlock_irqrestore(q->lock, qflags);
500 					if (wait == -1) {
501 	        				printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
502 						  "Usually a result of a PCI interrupt routing problem;\n"
503 						  "update mother board BIOS or consider utilizing one of\n"
504 						  "the SAFE mode kernel options (acpi, apic etc)\n");
505 					}
506 					return -ETIMEDOUT;
507 				}
508 				if ((blink = aac_adapter_check_health(dev)) > 0) {
509 					if (wait == -1) {
510 	        				printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
511 						  "Usually a result of a serious unrecoverable hardware problem\n",
512 						  blink);
513 					}
514 					return -EFAULT;
515 				}
516 				udelay(5);
517 			}
518 		} else if (down_interruptible(&fibptr->event_wait)) {
519 			fibptr->done = 2;
520 			up(&fibptr->event_wait);
521 		}
522 		spin_lock_irqsave(&fibptr->event_lock, flags);
523 		if ((fibptr->done == 0) || (fibptr->done == 2)) {
524 			fibptr->done = 2; /* Tell interrupt we aborted */
525 			spin_unlock_irqrestore(&fibptr->event_lock, flags);
526 			return -EINTR;
527 		}
528 		spin_unlock_irqrestore(&fibptr->event_lock, flags);
529 		BUG_ON(fibptr->done == 0);
530 
531 		if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
532 			return -ETIMEDOUT;
533 		return 0;
534 	}
535 	/*
536 	 *	If the user does not want a response than return success otherwise
537 	 *	return pending
538 	 */
539 	if (reply)
540 		return -EINPROGRESS;
541 	else
542 		return 0;
543 }
544 
545 /**
546  *	aac_consumer_get	-	get the top of the queue
547  *	@dev: Adapter
548  *	@q: Queue
549  *	@entry: Return entry
550  *
551  *	Will return a pointer to the entry on the top of the queue requested that
552  *	we are a consumer of, and return the address of the queue entry. It does
553  *	not change the state of the queue.
554  */
555 
aac_consumer_get(struct aac_dev * dev,struct aac_queue * q,struct aac_entry ** entry)556 int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
557 {
558 	u32 index;
559 	int status;
560 	if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
561 		status = 0;
562 	} else {
563 		/*
564 		 *	The consumer index must be wrapped if we have reached
565 		 *	the end of the queue, else we just use the entry
566 		 *	pointed to by the header index
567 		 */
568 		if (le32_to_cpu(*q->headers.consumer) >= q->entries)
569 			index = 0;
570 		else
571 			index = le32_to_cpu(*q->headers.consumer);
572 		*entry = q->base + index;
573 		status = 1;
574 	}
575 	return(status);
576 }
577 
578 /**
579  *	aac_consumer_free	-	free consumer entry
580  *	@dev: Adapter
581  *	@q: Queue
582  *	@qid: Queue ident
583  *
584  *	Frees up the current top of the queue we are a consumer of. If the
585  *	queue was full notify the producer that the queue is no longer full.
586  */
587 
aac_consumer_free(struct aac_dev * dev,struct aac_queue * q,u32 qid)588 void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
589 {
590 	int wasfull = 0;
591 	u32 notify;
592 
593 	if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
594 		wasfull = 1;
595 
596 	if (le32_to_cpu(*q->headers.consumer) >= q->entries)
597 		*q->headers.consumer = cpu_to_le32(1);
598 	else
599 		le32_add_cpu(q->headers.consumer, 1);
600 
601 	if (wasfull) {
602 		switch (qid) {
603 
604 		case HostNormCmdQueue:
605 			notify = HostNormCmdNotFull;
606 			break;
607 		case HostNormRespQueue:
608 			notify = HostNormRespNotFull;
609 			break;
610 		default:
611 			BUG();
612 			return;
613 		}
614 		aac_adapter_notify(dev, notify);
615 	}
616 }
617 
618 /**
619  *	aac_fib_adapter_complete	-	complete adapter issued fib
620  *	@fibptr: fib to complete
621  *	@size: size of fib
622  *
623  *	Will do all necessary work to complete a FIB that was sent from
624  *	the adapter.
625  */
626 
aac_fib_adapter_complete(struct fib * fibptr,unsigned short size)627 int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
628 {
629 	struct hw_fib * hw_fib = fibptr->hw_fib_va;
630 	struct aac_dev * dev = fibptr->dev;
631 	struct aac_queue * q;
632 	unsigned long nointr = 0;
633 	unsigned long qflags;
634 
635 	if (hw_fib->header.XferState == 0) {
636 		if (dev->comm_interface == AAC_COMM_MESSAGE)
637 			kfree (hw_fib);
638 		return 0;
639 	}
640 	/*
641 	 *	If we plan to do anything check the structure type first.
642 	 */
643 	if (hw_fib->header.StructType != FIB_MAGIC) {
644 		if (dev->comm_interface == AAC_COMM_MESSAGE)
645 			kfree (hw_fib);
646 		return -EINVAL;
647 	}
648 	/*
649 	 *	This block handles the case where the adapter had sent us a
650 	 *	command and we have finished processing the command. We
651 	 *	call completeFib when we are done processing the command
652 	 *	and want to send a response back to the adapter. This will
653 	 *	send the completed cdb to the adapter.
654 	 */
655 	if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
656 		if (dev->comm_interface == AAC_COMM_MESSAGE) {
657 			kfree (hw_fib);
658 		} else {
659 			u32 index;
660 			hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
661 			if (size) {
662 				size += sizeof(struct aac_fibhdr);
663 				if (size > le16_to_cpu(hw_fib->header.SenderSize))
664 					return -EMSGSIZE;
665 				hw_fib->header.Size = cpu_to_le16(size);
666 			}
667 			q = &dev->queues->queue[AdapNormRespQueue];
668 			spin_lock_irqsave(q->lock, qflags);
669 			aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
670 			*(q->headers.producer) = cpu_to_le32(index + 1);
671 			spin_unlock_irqrestore(q->lock, qflags);
672 			if (!(nointr & (int)aac_config.irq_mod))
673 				aac_adapter_notify(dev, AdapNormRespQueue);
674 		}
675 	} else {
676 		printk(KERN_WARNING "aac_fib_adapter_complete: "
677 			"Unknown xferstate detected.\n");
678 		BUG();
679 	}
680 	return 0;
681 }
682 
683 /**
684  *	aac_fib_complete	-	fib completion handler
685  *	@fib: FIB to complete
686  *
687  *	Will do all necessary work to complete a FIB.
688  */
689 
aac_fib_complete(struct fib * fibptr)690 int aac_fib_complete(struct fib *fibptr)
691 {
692 	struct hw_fib * hw_fib = fibptr->hw_fib_va;
693 
694 	/*
695 	 *	Check for a fib which has already been completed
696 	 */
697 
698 	if (hw_fib->header.XferState == 0)
699 		return 0;
700 	/*
701 	 *	If we plan to do anything check the structure type first.
702 	 */
703 
704 	if (hw_fib->header.StructType != FIB_MAGIC)
705 		return -EINVAL;
706 	/*
707 	 *	This block completes a cdb which orginated on the host and we
708 	 *	just need to deallocate the cdb or reinit it. At this point the
709 	 *	command is complete that we had sent to the adapter and this
710 	 *	cdb could be reused.
711 	 */
712 	if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
713 		(hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
714 	{
715 		fib_dealloc(fibptr);
716 	}
717 	else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
718 	{
719 		/*
720 		 *	This handles the case when the host has aborted the I/O
721 		 *	to the adapter because the adapter is not responding
722 		 */
723 		fib_dealloc(fibptr);
724 	} else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
725 		fib_dealloc(fibptr);
726 	} else {
727 		BUG();
728 	}
729 	return 0;
730 }
731 
732 /**
733  *	aac_printf	-	handle printf from firmware
734  *	@dev: Adapter
735  *	@val: Message info
736  *
737  *	Print a message passed to us by the controller firmware on the
738  *	Adaptec board
739  */
740 
aac_printf(struct aac_dev * dev,u32 val)741 void aac_printf(struct aac_dev *dev, u32 val)
742 {
743 	char *cp = dev->printfbuf;
744 	if (dev->printf_enabled)
745 	{
746 		int length = val & 0xffff;
747 		int level = (val >> 16) & 0xffff;
748 
749 		/*
750 		 *	The size of the printfbuf is set in port.c
751 		 *	There is no variable or define for it
752 		 */
753 		if (length > 255)
754 			length = 255;
755 		if (cp[length] != 0)
756 			cp[length] = 0;
757 		if (level == LOG_AAC_HIGH_ERROR)
758 			printk(KERN_WARNING "%s:%s", dev->name, cp);
759 		else
760 			printk(KERN_INFO "%s:%s", dev->name, cp);
761 	}
762 	memset(cp, 0, 256);
763 }
764 
765 
766 /**
767  *	aac_handle_aif		-	Handle a message from the firmware
768  *	@dev: Which adapter this fib is from
769  *	@fibptr: Pointer to fibptr from adapter
770  *
771  *	This routine handles a driver notify fib from the adapter and
772  *	dispatches it to the appropriate routine for handling.
773  */
774 
775 #define AIF_SNIFF_TIMEOUT	(30*HZ)
aac_handle_aif(struct aac_dev * dev,struct fib * fibptr)776 static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
777 {
778 	struct hw_fib * hw_fib = fibptr->hw_fib_va;
779 	struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
780 	u32 channel, id, lun, container;
781 	struct scsi_device *device;
782 	enum {
783 		NOTHING,
784 		DELETE,
785 		ADD,
786 		CHANGE
787 	} device_config_needed = NOTHING;
788 
789 	/* Sniff for container changes */
790 
791 	if (!dev || !dev->fsa_dev)
792 		return;
793 	container = channel = id = lun = (u32)-1;
794 
795 	/*
796 	 *	We have set this up to try and minimize the number of
797 	 * re-configures that take place. As a result of this when
798 	 * certain AIF's come in we will set a flag waiting for another
799 	 * type of AIF before setting the re-config flag.
800 	 */
801 	switch (le32_to_cpu(aifcmd->command)) {
802 	case AifCmdDriverNotify:
803 		switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
804 		/*
805 		 *	Morph or Expand complete
806 		 */
807 		case AifDenMorphComplete:
808 		case AifDenVolumeExtendComplete:
809 			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
810 			if (container >= dev->maximum_num_containers)
811 				break;
812 
813 			/*
814 			 *	Find the scsi_device associated with the SCSI
815 			 * address. Make sure we have the right array, and if
816 			 * so set the flag to initiate a new re-config once we
817 			 * see an AifEnConfigChange AIF come through.
818 			 */
819 
820 			if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
821 				device = scsi_device_lookup(dev->scsi_host_ptr,
822 					CONTAINER_TO_CHANNEL(container),
823 					CONTAINER_TO_ID(container),
824 					CONTAINER_TO_LUN(container));
825 				if (device) {
826 					dev->fsa_dev[container].config_needed = CHANGE;
827 					dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
828 					dev->fsa_dev[container].config_waiting_stamp = jiffies;
829 					scsi_device_put(device);
830 				}
831 			}
832 		}
833 
834 		/*
835 		 *	If we are waiting on something and this happens to be
836 		 * that thing then set the re-configure flag.
837 		 */
838 		if (container != (u32)-1) {
839 			if (container >= dev->maximum_num_containers)
840 				break;
841 			if ((dev->fsa_dev[container].config_waiting_on ==
842 			    le32_to_cpu(*(__le32 *)aifcmd->data)) &&
843 			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
844 				dev->fsa_dev[container].config_waiting_on = 0;
845 		} else for (container = 0;
846 		    container < dev->maximum_num_containers; ++container) {
847 			if ((dev->fsa_dev[container].config_waiting_on ==
848 			    le32_to_cpu(*(__le32 *)aifcmd->data)) &&
849 			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
850 				dev->fsa_dev[container].config_waiting_on = 0;
851 		}
852 		break;
853 
854 	case AifCmdEventNotify:
855 		switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
856 		case AifEnBatteryEvent:
857 			dev->cache_protected =
858 				(((__le32 *)aifcmd->data)[1] == cpu_to_le32(3));
859 			break;
860 		/*
861 		 *	Add an Array.
862 		 */
863 		case AifEnAddContainer:
864 			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
865 			if (container >= dev->maximum_num_containers)
866 				break;
867 			dev->fsa_dev[container].config_needed = ADD;
868 			dev->fsa_dev[container].config_waiting_on =
869 				AifEnConfigChange;
870 			dev->fsa_dev[container].config_waiting_stamp = jiffies;
871 			break;
872 
873 		/*
874 		 *	Delete an Array.
875 		 */
876 		case AifEnDeleteContainer:
877 			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
878 			if (container >= dev->maximum_num_containers)
879 				break;
880 			dev->fsa_dev[container].config_needed = DELETE;
881 			dev->fsa_dev[container].config_waiting_on =
882 				AifEnConfigChange;
883 			dev->fsa_dev[container].config_waiting_stamp = jiffies;
884 			break;
885 
886 		/*
887 		 *	Container change detected. If we currently are not
888 		 * waiting on something else, setup to wait on a Config Change.
889 		 */
890 		case AifEnContainerChange:
891 			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
892 			if (container >= dev->maximum_num_containers)
893 				break;
894 			if (dev->fsa_dev[container].config_waiting_on &&
895 			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
896 				break;
897 			dev->fsa_dev[container].config_needed = CHANGE;
898 			dev->fsa_dev[container].config_waiting_on =
899 				AifEnConfigChange;
900 			dev->fsa_dev[container].config_waiting_stamp = jiffies;
901 			break;
902 
903 		case AifEnConfigChange:
904 			break;
905 
906 		case AifEnAddJBOD:
907 		case AifEnDeleteJBOD:
908 			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
909 			if ((container >> 28)) {
910 				container = (u32)-1;
911 				break;
912 			}
913 			channel = (container >> 24) & 0xF;
914 			if (channel >= dev->maximum_num_channels) {
915 				container = (u32)-1;
916 				break;
917 			}
918 			id = container & 0xFFFF;
919 			if (id >= dev->maximum_num_physicals) {
920 				container = (u32)-1;
921 				break;
922 			}
923 			lun = (container >> 16) & 0xFF;
924 			container = (u32)-1;
925 			channel = aac_phys_to_logical(channel);
926 			device_config_needed =
927 			  (((__le32 *)aifcmd->data)[0] ==
928 			    cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE;
929 			break;
930 
931 		case AifEnEnclosureManagement:
932 			/*
933 			 * If in JBOD mode, automatic exposure of new
934 			 * physical target to be suppressed until configured.
935 			 */
936 			if (dev->jbod)
937 				break;
938 			switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) {
939 			case EM_DRIVE_INSERTION:
940 			case EM_DRIVE_REMOVAL:
941 				container = le32_to_cpu(
942 					((__le32 *)aifcmd->data)[2]);
943 				if ((container >> 28)) {
944 					container = (u32)-1;
945 					break;
946 				}
947 				channel = (container >> 24) & 0xF;
948 				if (channel >= dev->maximum_num_channels) {
949 					container = (u32)-1;
950 					break;
951 				}
952 				id = container & 0xFFFF;
953 				lun = (container >> 16) & 0xFF;
954 				container = (u32)-1;
955 				if (id >= dev->maximum_num_physicals) {
956 					/* legacy dev_t ? */
957 					if ((0x2000 <= id) || lun || channel ||
958 					  ((channel = (id >> 7) & 0x3F) >=
959 					  dev->maximum_num_channels))
960 						break;
961 					lun = (id >> 4) & 7;
962 					id &= 0xF;
963 				}
964 				channel = aac_phys_to_logical(channel);
965 				device_config_needed =
966 				  (((__le32 *)aifcmd->data)[3]
967 				    == cpu_to_le32(EM_DRIVE_INSERTION)) ?
968 				  ADD : DELETE;
969 				break;
970 			}
971 			break;
972 		}
973 
974 		/*
975 		 *	If we are waiting on something and this happens to be
976 		 * that thing then set the re-configure flag.
977 		 */
978 		if (container != (u32)-1) {
979 			if (container >= dev->maximum_num_containers)
980 				break;
981 			if ((dev->fsa_dev[container].config_waiting_on ==
982 			    le32_to_cpu(*(__le32 *)aifcmd->data)) &&
983 			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
984 				dev->fsa_dev[container].config_waiting_on = 0;
985 		} else for (container = 0;
986 		    container < dev->maximum_num_containers; ++container) {
987 			if ((dev->fsa_dev[container].config_waiting_on ==
988 			    le32_to_cpu(*(__le32 *)aifcmd->data)) &&
989 			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
990 				dev->fsa_dev[container].config_waiting_on = 0;
991 		}
992 		break;
993 
994 	case AifCmdJobProgress:
995 		/*
996 		 *	These are job progress AIF's. When a Clear is being
997 		 * done on a container it is initially created then hidden from
998 		 * the OS. When the clear completes we don't get a config
999 		 * change so we monitor the job status complete on a clear then
1000 		 * wait for a container change.
1001 		 */
1002 
1003 		if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
1004 		    (((__le32 *)aifcmd->data)[6] == ((__le32 *)aifcmd->data)[5] ||
1005 		     ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess))) {
1006 			for (container = 0;
1007 			    container < dev->maximum_num_containers;
1008 			    ++container) {
1009 				/*
1010 				 * Stomp on all config sequencing for all
1011 				 * containers?
1012 				 */
1013 				dev->fsa_dev[container].config_waiting_on =
1014 					AifEnContainerChange;
1015 				dev->fsa_dev[container].config_needed = ADD;
1016 				dev->fsa_dev[container].config_waiting_stamp =
1017 					jiffies;
1018 			}
1019 		}
1020 		if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
1021 		    ((__le32 *)aifcmd->data)[6] == 0 &&
1022 		    ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning)) {
1023 			for (container = 0;
1024 			    container < dev->maximum_num_containers;
1025 			    ++container) {
1026 				/*
1027 				 * Stomp on all config sequencing for all
1028 				 * containers?
1029 				 */
1030 				dev->fsa_dev[container].config_waiting_on =
1031 					AifEnContainerChange;
1032 				dev->fsa_dev[container].config_needed = DELETE;
1033 				dev->fsa_dev[container].config_waiting_stamp =
1034 					jiffies;
1035 			}
1036 		}
1037 		break;
1038 	}
1039 
1040 	container = 0;
1041 retry_next:
1042 	if (device_config_needed == NOTHING)
1043 	for (; container < dev->maximum_num_containers; ++container) {
1044 		if ((dev->fsa_dev[container].config_waiting_on == 0) &&
1045 			(dev->fsa_dev[container].config_needed != NOTHING) &&
1046 			time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
1047 			device_config_needed =
1048 				dev->fsa_dev[container].config_needed;
1049 			dev->fsa_dev[container].config_needed = NOTHING;
1050 			channel = CONTAINER_TO_CHANNEL(container);
1051 			id = CONTAINER_TO_ID(container);
1052 			lun = CONTAINER_TO_LUN(container);
1053 			break;
1054 		}
1055 	}
1056 	if (device_config_needed == NOTHING)
1057 		return;
1058 
1059 	/*
1060 	 *	If we decided that a re-configuration needs to be done,
1061 	 * schedule it here on the way out the door, please close the door
1062 	 * behind you.
1063 	 */
1064 
1065 	/*
1066 	 *	Find the scsi_device associated with the SCSI address,
1067 	 * and mark it as changed, invalidating the cache. This deals
1068 	 * with changes to existing device IDs.
1069 	 */
1070 
1071 	if (!dev || !dev->scsi_host_ptr)
1072 		return;
1073 	/*
1074 	 * force reload of disk info via aac_probe_container
1075 	 */
1076 	if ((channel == CONTAINER_CHANNEL) &&
1077 	  (device_config_needed != NOTHING)) {
1078 		if (dev->fsa_dev[container].valid == 1)
1079 			dev->fsa_dev[container].valid = 2;
1080 		aac_probe_container(dev, container);
1081 	}
1082 	device = scsi_device_lookup(dev->scsi_host_ptr, channel, id, lun);
1083 	if (device) {
1084 		switch (device_config_needed) {
1085 		case DELETE:
1086 			if (scsi_device_online(device)) {
1087 				scsi_device_set_state(device, SDEV_OFFLINE);
1088 				sdev_printk(KERN_INFO, device,
1089 					"Device offlined - %s\n",
1090 					(channel == CONTAINER_CHANNEL) ?
1091 						"array deleted" :
1092 						"enclosure services event");
1093 			}
1094 			break;
1095 		case ADD:
1096 			if (!scsi_device_online(device)) {
1097 				sdev_printk(KERN_INFO, device,
1098 					"Device online - %s\n",
1099 					(channel == CONTAINER_CHANNEL) ?
1100 						"array created" :
1101 						"enclosure services event");
1102 				scsi_device_set_state(device, SDEV_RUNNING);
1103 			}
1104 			/* FALLTHRU */
1105 		case CHANGE:
1106 			if ((channel == CONTAINER_CHANNEL)
1107 			 && (!dev->fsa_dev[container].valid)) {
1108 				if (!scsi_device_online(device))
1109 					break;
1110 				scsi_device_set_state(device, SDEV_OFFLINE);
1111 				sdev_printk(KERN_INFO, device,
1112 					"Device offlined - %s\n",
1113 					"array failed");
1114 				break;
1115 			}
1116 			scsi_rescan_device(&device->sdev_gendev);
1117 
1118 		default:
1119 			break;
1120 		}
1121 		scsi_device_put(device);
1122 		device_config_needed = NOTHING;
1123 	}
1124 	if (device_config_needed == ADD)
1125 		scsi_add_device(dev->scsi_host_ptr, channel, id, lun);
1126 	if (channel == CONTAINER_CHANNEL) {
1127 		container++;
1128 		device_config_needed = NOTHING;
1129 		goto retry_next;
1130 	}
1131 }
1132 
_aac_reset_adapter(struct aac_dev * aac,int forced)1133 static int _aac_reset_adapter(struct aac_dev *aac, int forced)
1134 {
1135 	int index, quirks;
1136 	int retval;
1137 	struct Scsi_Host *host;
1138 	struct scsi_device *dev;
1139 	struct scsi_cmnd *command;
1140 	struct scsi_cmnd *command_list;
1141 	int jafo = 0;
1142 
1143 	/*
1144 	 * Assumptions:
1145 	 *	- host is locked, unless called by the aacraid thread.
1146 	 *	  (a matter of convenience, due to legacy issues surrounding
1147 	 *	  eh_host_adapter_reset).
1148 	 *	- in_reset is asserted, so no new i/o is getting to the
1149 	 *	  card.
1150 	 *	- The card is dead, or will be very shortly ;-/ so no new
1151 	 *	  commands are completing in the interrupt service.
1152 	 */
1153 	host = aac->scsi_host_ptr;
1154 	scsi_block_requests(host);
1155 	aac_adapter_disable_int(aac);
1156 	if (aac->thread->pid != current->pid) {
1157 		spin_unlock_irq(host->host_lock);
1158 		kthread_stop(aac->thread);
1159 		jafo = 1;
1160 	}
1161 
1162 	/*
1163 	 *	If a positive health, means in a known DEAD PANIC
1164 	 * state and the adapter could be reset to `try again'.
1165 	 */
1166 	retval = aac_adapter_restart(aac, forced ? 0 : aac_adapter_check_health(aac));
1167 
1168 	if (retval)
1169 		goto out;
1170 
1171 	/*
1172 	 *	Loop through the fibs, close the synchronous FIBS
1173 	 */
1174 	for (retval = 1, index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
1175 		struct fib *fib = &aac->fibs[index];
1176 		if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
1177 		  (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) {
1178 			unsigned long flagv;
1179 			spin_lock_irqsave(&fib->event_lock, flagv);
1180 			up(&fib->event_wait);
1181 			spin_unlock_irqrestore(&fib->event_lock, flagv);
1182 			schedule();
1183 			retval = 0;
1184 		}
1185 	}
1186 	/* Give some extra time for ioctls to complete. */
1187 	if (retval == 0)
1188 		ssleep(2);
1189 	index = aac->cardtype;
1190 
1191 	/*
1192 	 * Re-initialize the adapter, first free resources, then carefully
1193 	 * apply the initialization sequence to come back again. Only risk
1194 	 * is a change in Firmware dropping cache, it is assumed the caller
1195 	 * will ensure that i/o is queisced and the card is flushed in that
1196 	 * case.
1197 	 */
1198 	aac_fib_map_free(aac);
1199 	pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
1200 	aac->comm_addr = NULL;
1201 	aac->comm_phys = 0;
1202 	kfree(aac->queues);
1203 	aac->queues = NULL;
1204 	free_irq(aac->pdev->irq, aac);
1205 	kfree(aac->fsa_dev);
1206 	aac->fsa_dev = NULL;
1207 	quirks = aac_get_driver_ident(index)->quirks;
1208 	if (quirks & AAC_QUIRK_31BIT) {
1209 		if (((retval = pci_set_dma_mask(aac->pdev, DMA_31BIT_MASK))) ||
1210 		  ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_31BIT_MASK))))
1211 			goto out;
1212 	} else {
1213 		if (((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK))) ||
1214 		  ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_32BIT_MASK))))
1215 			goto out;
1216 	}
1217 	if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
1218 		goto out;
1219 	if (quirks & AAC_QUIRK_31BIT)
1220 		if ((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK)))
1221 			goto out;
1222 	if (jafo) {
1223 		aac->thread = kthread_run(aac_command_thread, aac, aac->name);
1224 		if (IS_ERR(aac->thread)) {
1225 			retval = PTR_ERR(aac->thread);
1226 			goto out;
1227 		}
1228 	}
1229 	(void)aac_get_adapter_info(aac);
1230 	if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
1231 		host->sg_tablesize = 34;
1232 		host->max_sectors = (host->sg_tablesize * 8) + 112;
1233 	}
1234 	if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
1235 		host->sg_tablesize = 17;
1236 		host->max_sectors = (host->sg_tablesize * 8) + 112;
1237 	}
1238 	aac_get_config_status(aac, 1);
1239 	aac_get_containers(aac);
1240 	/*
1241 	 * This is where the assumption that the Adapter is quiesced
1242 	 * is important.
1243 	 */
1244 	command_list = NULL;
1245 	__shost_for_each_device(dev, host) {
1246 		unsigned long flags;
1247 		spin_lock_irqsave(&dev->list_lock, flags);
1248 		list_for_each_entry(command, &dev->cmd_list, list)
1249 			if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
1250 				command->SCp.buffer = (struct scatterlist *)command_list;
1251 				command_list = command;
1252 			}
1253 		spin_unlock_irqrestore(&dev->list_lock, flags);
1254 	}
1255 	while ((command = command_list)) {
1256 		command_list = (struct scsi_cmnd *)command->SCp.buffer;
1257 		command->SCp.buffer = NULL;
1258 		command->result = DID_OK << 16
1259 		  | COMMAND_COMPLETE << 8
1260 		  | SAM_STAT_TASK_SET_FULL;
1261 		command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
1262 		command->scsi_done(command);
1263 	}
1264 	retval = 0;
1265 
1266 out:
1267 	aac->in_reset = 0;
1268 	scsi_unblock_requests(host);
1269 	if (jafo) {
1270 		spin_lock_irq(host->host_lock);
1271 	}
1272 	return retval;
1273 }
1274 
aac_reset_adapter(struct aac_dev * aac,int forced)1275 int aac_reset_adapter(struct aac_dev * aac, int forced)
1276 {
1277 	unsigned long flagv = 0;
1278 	int retval;
1279 	struct Scsi_Host * host;
1280 
1281 	if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1282 		return -EBUSY;
1283 
1284 	if (aac->in_reset) {
1285 		spin_unlock_irqrestore(&aac->fib_lock, flagv);
1286 		return -EBUSY;
1287 	}
1288 	aac->in_reset = 1;
1289 	spin_unlock_irqrestore(&aac->fib_lock, flagv);
1290 
1291 	/*
1292 	 * Wait for all commands to complete to this specific
1293 	 * target (block maximum 60 seconds). Although not necessary,
1294 	 * it does make us a good storage citizen.
1295 	 */
1296 	host = aac->scsi_host_ptr;
1297 	scsi_block_requests(host);
1298 	if (forced < 2) for (retval = 60; retval; --retval) {
1299 		struct scsi_device * dev;
1300 		struct scsi_cmnd * command;
1301 		int active = 0;
1302 
1303 		__shost_for_each_device(dev, host) {
1304 			spin_lock_irqsave(&dev->list_lock, flagv);
1305 			list_for_each_entry(command, &dev->cmd_list, list) {
1306 				if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
1307 					active++;
1308 					break;
1309 				}
1310 			}
1311 			spin_unlock_irqrestore(&dev->list_lock, flagv);
1312 			if (active)
1313 				break;
1314 
1315 		}
1316 		/*
1317 		 * We can exit If all the commands are complete
1318 		 */
1319 		if (active == 0)
1320 			break;
1321 		ssleep(1);
1322 	}
1323 
1324 	/* Quiesce build, flush cache, write through mode */
1325 	if (forced < 2)
1326 		aac_send_shutdown(aac);
1327 	spin_lock_irqsave(host->host_lock, flagv);
1328 	retval = _aac_reset_adapter(aac, forced ? forced : ((aac_check_reset != 0) && (aac_check_reset != 1)));
1329 	spin_unlock_irqrestore(host->host_lock, flagv);
1330 
1331 	if ((forced < 2) && (retval == -ENODEV)) {
1332 		/* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
1333 		struct fib * fibctx = aac_fib_alloc(aac);
1334 		if (fibctx) {
1335 			struct aac_pause *cmd;
1336 			int status;
1337 
1338 			aac_fib_init(fibctx);
1339 
1340 			cmd = (struct aac_pause *) fib_data(fibctx);
1341 
1342 			cmd->command = cpu_to_le32(VM_ContainerConfig);
1343 			cmd->type = cpu_to_le32(CT_PAUSE_IO);
1344 			cmd->timeout = cpu_to_le32(1);
1345 			cmd->min = cpu_to_le32(1);
1346 			cmd->noRescan = cpu_to_le32(1);
1347 			cmd->count = cpu_to_le32(0);
1348 
1349 			status = aac_fib_send(ContainerCommand,
1350 			  fibctx,
1351 			  sizeof(struct aac_pause),
1352 			  FsaNormal,
1353 			  -2 /* Timeout silently */, 1,
1354 			  NULL, NULL);
1355 
1356 			if (status >= 0)
1357 				aac_fib_complete(fibctx);
1358 			aac_fib_free(fibctx);
1359 		}
1360 	}
1361 
1362 	return retval;
1363 }
1364 
aac_check_health(struct aac_dev * aac)1365 int aac_check_health(struct aac_dev * aac)
1366 {
1367 	int BlinkLED;
1368 	unsigned long time_now, flagv = 0;
1369 	struct list_head * entry;
1370 	struct Scsi_Host * host;
1371 
1372 	/* Extending the scope of fib_lock slightly to protect aac->in_reset */
1373 	if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1374 		return 0;
1375 
1376 	if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
1377 		spin_unlock_irqrestore(&aac->fib_lock, flagv);
1378 		return 0; /* OK */
1379 	}
1380 
1381 	aac->in_reset = 1;
1382 
1383 	/* Fake up an AIF:
1384 	 *	aac_aifcmd.command = AifCmdEventNotify = 1
1385 	 *	aac_aifcmd.seqnum = 0xFFFFFFFF
1386 	 *	aac_aifcmd.data[0] = AifEnExpEvent = 23
1387 	 *	aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
1388 	 *	aac.aifcmd.data[2] = AifHighPriority = 3
1389 	 *	aac.aifcmd.data[3] = BlinkLED
1390 	 */
1391 
1392 	time_now = jiffies/HZ;
1393 	entry = aac->fib_list.next;
1394 
1395 	/*
1396 	 * For each Context that is on the
1397 	 * fibctxList, make a copy of the
1398 	 * fib, and then set the event to wake up the
1399 	 * thread that is waiting for it.
1400 	 */
1401 	while (entry != &aac->fib_list) {
1402 		/*
1403 		 * Extract the fibctx
1404 		 */
1405 		struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
1406 		struct hw_fib * hw_fib;
1407 		struct fib * fib;
1408 		/*
1409 		 * Check if the queue is getting
1410 		 * backlogged
1411 		 */
1412 		if (fibctx->count > 20) {
1413 			/*
1414 			 * It's *not* jiffies folks,
1415 			 * but jiffies / HZ, so do not
1416 			 * panic ...
1417 			 */
1418 			u32 time_last = fibctx->jiffies;
1419 			/*
1420 			 * Has it been > 2 minutes
1421 			 * since the last read off
1422 			 * the queue?
1423 			 */
1424 			if ((time_now - time_last) > aif_timeout) {
1425 				entry = entry->next;
1426 				aac_close_fib_context(aac, fibctx);
1427 				continue;
1428 			}
1429 		}
1430 		/*
1431 		 * Warning: no sleep allowed while
1432 		 * holding spinlock
1433 		 */
1434 		hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC);
1435 		fib = kzalloc(sizeof(struct fib), GFP_ATOMIC);
1436 		if (fib && hw_fib) {
1437 			struct aac_aifcmd * aif;
1438 
1439 			fib->hw_fib_va = hw_fib;
1440 			fib->dev = aac;
1441 			aac_fib_init(fib);
1442 			fib->type = FSAFS_NTC_FIB_CONTEXT;
1443 			fib->size = sizeof (struct fib);
1444 			fib->data = hw_fib->data;
1445 			aif = (struct aac_aifcmd *)hw_fib->data;
1446 			aif->command = cpu_to_le32(AifCmdEventNotify);
1447 			aif->seqnum = cpu_to_le32(0xFFFFFFFF);
1448 			((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent);
1449 			((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic);
1450 			((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority);
1451 			((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED);
1452 
1453 			/*
1454 			 * Put the FIB onto the
1455 			 * fibctx's fibs
1456 			 */
1457 			list_add_tail(&fib->fiblink, &fibctx->fib_list);
1458 			fibctx->count++;
1459 			/*
1460 			 * Set the event to wake up the
1461 			 * thread that will waiting.
1462 			 */
1463 			up(&fibctx->wait_sem);
1464 		} else {
1465 			printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1466 			kfree(fib);
1467 			kfree(hw_fib);
1468 		}
1469 		entry = entry->next;
1470 	}
1471 
1472 	spin_unlock_irqrestore(&aac->fib_lock, flagv);
1473 
1474 	if (BlinkLED < 0) {
1475 		printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED);
1476 		goto out;
1477 	}
1478 
1479 	printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
1480 
1481 	if (!aac_check_reset || ((aac_check_reset == 1) &&
1482 		(aac->supplement_adapter_info.SupportedOptions2 &
1483 			AAC_OPTION_IGNORE_RESET)))
1484 		goto out;
1485 	host = aac->scsi_host_ptr;
1486 	if (aac->thread->pid != current->pid)
1487 		spin_lock_irqsave(host->host_lock, flagv);
1488 	BlinkLED = _aac_reset_adapter(aac, aac_check_reset != 1);
1489 	if (aac->thread->pid != current->pid)
1490 		spin_unlock_irqrestore(host->host_lock, flagv);
1491 	return BlinkLED;
1492 
1493 out:
1494 	aac->in_reset = 0;
1495 	return BlinkLED;
1496 }
1497 
1498 
1499 /**
1500  *	aac_command_thread	-	command processing thread
1501  *	@dev: Adapter to monitor
1502  *
1503  *	Waits on the commandready event in it's queue. When the event gets set
1504  *	it will pull FIBs off it's queue. It will continue to pull FIBs off
1505  *	until the queue is empty. When the queue is empty it will wait for
1506  *	more FIBs.
1507  */
1508 
aac_command_thread(void * data)1509 int aac_command_thread(void *data)
1510 {
1511 	struct aac_dev *dev = data;
1512 	struct hw_fib *hw_fib, *hw_newfib;
1513 	struct fib *fib, *newfib;
1514 	struct aac_fib_context *fibctx;
1515 	unsigned long flags;
1516 	DECLARE_WAITQUEUE(wait, current);
1517 	unsigned long next_jiffies = jiffies + HZ;
1518 	unsigned long next_check_jiffies = next_jiffies;
1519 	long difference = HZ;
1520 
1521 	/*
1522 	 *	We can only have one thread per adapter for AIF's.
1523 	 */
1524 	if (dev->aif_thread)
1525 		return -EINVAL;
1526 
1527 	/*
1528 	 *	Let the DPC know it has a place to send the AIF's to.
1529 	 */
1530 	dev->aif_thread = 1;
1531 	add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
1532 	set_current_state(TASK_INTERRUPTIBLE);
1533 	dprintk ((KERN_INFO "aac_command_thread start\n"));
1534 	while (1) {
1535 		spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
1536 		while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
1537 			struct list_head *entry;
1538 			struct aac_aifcmd * aifcmd;
1539 
1540 			set_current_state(TASK_RUNNING);
1541 
1542 			entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
1543 			list_del(entry);
1544 
1545 			spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
1546 			fib = list_entry(entry, struct fib, fiblink);
1547 			/*
1548 			 *	We will process the FIB here or pass it to a
1549 			 *	worker thread that is TBD. We Really can't
1550 			 *	do anything at this point since we don't have
1551 			 *	anything defined for this thread to do.
1552 			 */
1553 			hw_fib = fib->hw_fib_va;
1554 			memset(fib, 0, sizeof(struct fib));
1555 			fib->type = FSAFS_NTC_FIB_CONTEXT;
1556 			fib->size = sizeof(struct fib);
1557 			fib->hw_fib_va = hw_fib;
1558 			fib->data = hw_fib->data;
1559 			fib->dev = dev;
1560 			/*
1561 			 *	We only handle AifRequest fibs from the adapter.
1562 			 */
1563 			aifcmd = (struct aac_aifcmd *) hw_fib->data;
1564 			if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
1565 				/* Handle Driver Notify Events */
1566 				aac_handle_aif(dev, fib);
1567 				*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1568 				aac_fib_adapter_complete(fib, (u16)sizeof(u32));
1569 			} else {
1570 				/* The u32 here is important and intended. We are using
1571 				   32bit wrapping time to fit the adapter field */
1572 
1573 				u32 time_now, time_last;
1574 				unsigned long flagv;
1575 				unsigned num;
1576 				struct hw_fib ** hw_fib_pool, ** hw_fib_p;
1577 				struct fib ** fib_pool, ** fib_p;
1578 
1579 				/* Sniff events */
1580 				if ((aifcmd->command ==
1581 				     cpu_to_le32(AifCmdEventNotify)) ||
1582 				    (aifcmd->command ==
1583 				     cpu_to_le32(AifCmdJobProgress))) {
1584 					aac_handle_aif(dev, fib);
1585 				}
1586 
1587 				time_now = jiffies/HZ;
1588 
1589 				/*
1590 				 * Warning: no sleep allowed while
1591 				 * holding spinlock. We take the estimate
1592 				 * and pre-allocate a set of fibs outside the
1593 				 * lock.
1594 				 */
1595 				num = le32_to_cpu(dev->init->AdapterFibsSize)
1596 				    / sizeof(struct hw_fib); /* some extra */
1597 				spin_lock_irqsave(&dev->fib_lock, flagv);
1598 				entry = dev->fib_list.next;
1599 				while (entry != &dev->fib_list) {
1600 					entry = entry->next;
1601 					++num;
1602 				}
1603 				spin_unlock_irqrestore(&dev->fib_lock, flagv);
1604 				hw_fib_pool = NULL;
1605 				fib_pool = NULL;
1606 				if (num
1607 				 && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL)))
1608 				 && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) {
1609 					hw_fib_p = hw_fib_pool;
1610 					fib_p = fib_pool;
1611 					while (hw_fib_p < &hw_fib_pool[num]) {
1612 						if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) {
1613 							--hw_fib_p;
1614 							break;
1615 						}
1616 						if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) {
1617 							kfree(*(--hw_fib_p));
1618 							break;
1619 						}
1620 					}
1621 					if ((num = hw_fib_p - hw_fib_pool) == 0) {
1622 						kfree(fib_pool);
1623 						fib_pool = NULL;
1624 						kfree(hw_fib_pool);
1625 						hw_fib_pool = NULL;
1626 					}
1627 				} else {
1628 					kfree(hw_fib_pool);
1629 					hw_fib_pool = NULL;
1630 				}
1631 				spin_lock_irqsave(&dev->fib_lock, flagv);
1632 				entry = dev->fib_list.next;
1633 				/*
1634 				 * For each Context that is on the
1635 				 * fibctxList, make a copy of the
1636 				 * fib, and then set the event to wake up the
1637 				 * thread that is waiting for it.
1638 				 */
1639 				hw_fib_p = hw_fib_pool;
1640 				fib_p = fib_pool;
1641 				while (entry != &dev->fib_list) {
1642 					/*
1643 					 * Extract the fibctx
1644 					 */
1645 					fibctx = list_entry(entry, struct aac_fib_context, next);
1646 					/*
1647 					 * Check if the queue is getting
1648 					 * backlogged
1649 					 */
1650 					if (fibctx->count > 20)
1651 					{
1652 						/*
1653 						 * It's *not* jiffies folks,
1654 						 * but jiffies / HZ so do not
1655 						 * panic ...
1656 						 */
1657 						time_last = fibctx->jiffies;
1658 						/*
1659 						 * Has it been > 2 minutes
1660 						 * since the last read off
1661 						 * the queue?
1662 						 */
1663 						if ((time_now - time_last) > aif_timeout) {
1664 							entry = entry->next;
1665 							aac_close_fib_context(dev, fibctx);
1666 							continue;
1667 						}
1668 					}
1669 					/*
1670 					 * Warning: no sleep allowed while
1671 					 * holding spinlock
1672 					 */
1673 					if (hw_fib_p < &hw_fib_pool[num]) {
1674 						hw_newfib = *hw_fib_p;
1675 						*(hw_fib_p++) = NULL;
1676 						newfib = *fib_p;
1677 						*(fib_p++) = NULL;
1678 						/*
1679 						 * Make the copy of the FIB
1680 						 */
1681 						memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
1682 						memcpy(newfib, fib, sizeof(struct fib));
1683 						newfib->hw_fib_va = hw_newfib;
1684 						/*
1685 						 * Put the FIB onto the
1686 						 * fibctx's fibs
1687 						 */
1688 						list_add_tail(&newfib->fiblink, &fibctx->fib_list);
1689 						fibctx->count++;
1690 						/*
1691 						 * Set the event to wake up the
1692 						 * thread that is waiting.
1693 						 */
1694 						up(&fibctx->wait_sem);
1695 					} else {
1696 						printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1697 					}
1698 					entry = entry->next;
1699 				}
1700 				/*
1701 				 *	Set the status of this FIB
1702 				 */
1703 				*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1704 				aac_fib_adapter_complete(fib, sizeof(u32));
1705 				spin_unlock_irqrestore(&dev->fib_lock, flagv);
1706 				/* Free up the remaining resources */
1707 				hw_fib_p = hw_fib_pool;
1708 				fib_p = fib_pool;
1709 				while (hw_fib_p < &hw_fib_pool[num]) {
1710 					kfree(*hw_fib_p);
1711 					kfree(*fib_p);
1712 					++fib_p;
1713 					++hw_fib_p;
1714 				}
1715 				kfree(hw_fib_pool);
1716 				kfree(fib_pool);
1717 			}
1718 			kfree(fib);
1719 			spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
1720 		}
1721 		/*
1722 		 *	There are no more AIF's
1723 		 */
1724 		spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
1725 
1726 		/*
1727 		 *	Background activity
1728 		 */
1729 		if ((time_before(next_check_jiffies,next_jiffies))
1730 		 && ((difference = next_check_jiffies - jiffies) <= 0)) {
1731 			next_check_jiffies = next_jiffies;
1732 			if (aac_check_health(dev) == 0) {
1733 				difference = ((long)(unsigned)check_interval)
1734 					   * HZ;
1735 				next_check_jiffies = jiffies + difference;
1736 			} else if (!dev->queues)
1737 				break;
1738 		}
1739 		if (!time_before(next_check_jiffies,next_jiffies)
1740 		 && ((difference = next_jiffies - jiffies) <= 0)) {
1741 			struct timeval now;
1742 			int ret;
1743 
1744 			/* Don't even try to talk to adapter if its sick */
1745 			ret = aac_check_health(dev);
1746 			if (!ret && !dev->queues)
1747 				break;
1748 			next_check_jiffies = jiffies
1749 					   + ((long)(unsigned)check_interval)
1750 					   * HZ;
1751 			do_gettimeofday(&now);
1752 
1753 			/* Synchronize our watches */
1754 			if (((1000000 - (1000000 / HZ)) > now.tv_usec)
1755 			 && (now.tv_usec > (1000000 / HZ)))
1756 				difference = (((1000000 - now.tv_usec) * HZ)
1757 				  + 500000) / 1000000;
1758 			else if (ret == 0) {
1759 				struct fib *fibptr;
1760 
1761 				if ((fibptr = aac_fib_alloc(dev))) {
1762 					__le32 *info;
1763 
1764 					aac_fib_init(fibptr);
1765 
1766 					info = (__le32 *) fib_data(fibptr);
1767 					if (now.tv_usec > 500000)
1768 						++now.tv_sec;
1769 
1770 					*info = cpu_to_le32(now.tv_sec);
1771 
1772 					(void)aac_fib_send(SendHostTime,
1773 						fibptr,
1774 						sizeof(*info),
1775 						FsaNormal,
1776 						1, 1,
1777 						NULL,
1778 						NULL);
1779 					aac_fib_complete(fibptr);
1780 					aac_fib_free(fibptr);
1781 				}
1782 				difference = (long)(unsigned)update_interval*HZ;
1783 			} else {
1784 				/* retry shortly */
1785 				difference = 10 * HZ;
1786 			}
1787 			next_jiffies = jiffies + difference;
1788 			if (time_before(next_check_jiffies,next_jiffies))
1789 				difference = next_check_jiffies - jiffies;
1790 		}
1791 		if (difference <= 0)
1792 			difference = 1;
1793 		set_current_state(TASK_INTERRUPTIBLE);
1794 		schedule_timeout(difference);
1795 
1796 		if (kthread_should_stop())
1797 			break;
1798 	}
1799 	if (dev->queues)
1800 		remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
1801 	dev->aif_thread = 0;
1802 	return 0;
1803 }
1804