1/*
2 *	Adaptec AAC series RAID controller driver
3 *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
4 *
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
7 *
8 * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING.  If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 * Module Name:
25 *  commsup.c
26 *
27 * Abstract: Contain all routines that are required for FSA host/adapter
28 *    communication.
29 *
30 */
31
32#include <linux/kernel.h>
33#include <linux/init.h>
34#include <linux/types.h>
35#include <linux/sched.h>
36#include <linux/pci.h>
37#include <linux/spinlock.h>
38#include <linux/slab.h>
39#include <linux/completion.h>
40#include <linux/blkdev.h>
41#include <linux/delay.h>
42#include <linux/kthread.h>
43#include <linux/interrupt.h>
44#include <scsi/scsi.h>
45#include <scsi/scsi_host.h>
46#include <scsi/scsi_device.h>
47#include <scsi/scsi_cmnd.h>
48#include <asm/semaphore.h>
49
50#include "aacraid.h"
51
52/**
53 *	fib_map_alloc		-	allocate the fib objects
54 *	@dev: Adapter to allocate for
55 *
56 *	Allocate and map the shared PCI space for the FIB blocks used to
57 *	talk to the Adaptec firmware.
58 */
59
60static int fib_map_alloc(struct aac_dev *dev)
61{
62	dprintk((KERN_INFO
63	  "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
64	  dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue,
65	  AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
66	if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, dev->max_fib_size
67	  * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
68	  &dev->hw_fib_pa))==NULL)
69		return -ENOMEM;
70	return 0;
71}
72
73/**
74 *	aac_fib_map_free		-	free the fib objects
75 *	@dev: Adapter to free
76 *
77 *	Free the PCI mappings and the memory allocated for FIB blocks
78 *	on this adapter.
79 */
80
81void aac_fib_map_free(struct aac_dev *dev)
82{
83	pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa);
84}
85
86/**
87 *	aac_fib_setup	-	setup the fibs
88 *	@dev: Adapter to set up
89 *
90 *	Allocate the PCI space for the fibs, map it and then intialise the
91 *	fib area, the unmapped fib data and also the free list
92 */
93
94int aac_fib_setup(struct aac_dev * dev)
95{
96	struct fib *fibptr;
97	struct hw_fib *hw_fib;
98	dma_addr_t hw_fib_pa;
99	int i;
100
101	while (((i = fib_map_alloc(dev)) == -ENOMEM)
102	 && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
103		dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1);
104		dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB;
105	}
106	if (i<0)
107		return -ENOMEM;
108
109	hw_fib = dev->hw_fib_va;
110	hw_fib_pa = dev->hw_fib_pa;
111	memset(hw_fib, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
112	/*
113	 *	Initialise the fibs
114	 */
115	for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++)
116	{
117		fibptr->dev = dev;
118		fibptr->hw_fib_va = hw_fib;
119		fibptr->data = (void *) fibptr->hw_fib_va->data;
120		fibptr->next = fibptr+1;	/* Forward chain the fibs */
121		init_MUTEX_LOCKED(&fibptr->event_wait);
122		spin_lock_init(&fibptr->event_lock);
123		hw_fib->header.XferState = cpu_to_le32(0xffffffff);
124		hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size);
125		fibptr->hw_fib_pa = hw_fib_pa;
126		hw_fib = (struct hw_fib *)((unsigned char *)hw_fib + dev->max_fib_size);
127		hw_fib_pa = hw_fib_pa + dev->max_fib_size;
128	}
129	/*
130	 *	Add the fib chain to the free list
131	 */
132	dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
133	/*
134	 *	Enable this to debug out of queue space
135	 */
136	dev->free_fib = &dev->fibs[0];
137	return 0;
138}
139
140/**
141 *	aac_fib_alloc	-	allocate a fib
142 *	@dev: Adapter to allocate the fib for
143 *
144 *	Allocate a fib from the adapter fib pool. If the pool is empty we
145 *	return NULL.
146 */
147
148struct fib *aac_fib_alloc(struct aac_dev *dev)
149{
150	struct fib * fibptr;
151	unsigned long flags;
152	spin_lock_irqsave(&dev->fib_lock, flags);
153	fibptr = dev->free_fib;
154	if(!fibptr){
155		spin_unlock_irqrestore(&dev->fib_lock, flags);
156		return fibptr;
157	}
158	dev->free_fib = fibptr->next;
159	spin_unlock_irqrestore(&dev->fib_lock, flags);
160	/*
161	 *	Set the proper node type code and node byte size
162	 */
163	fibptr->type = FSAFS_NTC_FIB_CONTEXT;
164	fibptr->size = sizeof(struct fib);
165	/*
166	 *	Null out fields that depend on being zero at the start of
167	 *	each I/O
168	 */
169	fibptr->hw_fib_va->header.XferState = 0;
170	fibptr->callback = NULL;
171	fibptr->callback_data = NULL;
172
173	return fibptr;
174}
175
176/**
177 *	aac_fib_free	-	free a fib
178 *	@fibptr: fib to free up
179 *
180 *	Frees up a fib and places it on the appropriate queue
181 */
182
183void aac_fib_free(struct fib *fibptr)
184{
185	unsigned long flags;
186
187	spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
188	if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
189		aac_config.fib_timeouts++;
190	if (fibptr->hw_fib_va->header.XferState != 0) {
191		printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
192			 (void*)fibptr,
193			 le32_to_cpu(fibptr->hw_fib_va->header.XferState));
194	}
195	fibptr->next = fibptr->dev->free_fib;
196	fibptr->dev->free_fib = fibptr;
197	spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
198}
199
200/**
201 *	aac_fib_init	-	initialise a fib
202 *	@fibptr: The fib to initialize
203 *
204 *	Set up the generic fib fields ready for use
205 */
206
207void aac_fib_init(struct fib *fibptr)
208{
209	struct hw_fib *hw_fib = fibptr->hw_fib_va;
210
211	hw_fib->header.StructType = FIB_MAGIC;
212	hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
213	hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
214	hw_fib->header.SenderFibAddress = 0; /* Filled in later if needed */
215	hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
216	hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
217}
218
219/**
220 *	fib_deallocate		-	deallocate a fib
221 *	@fibptr: fib to deallocate
222 *
223 *	Will deallocate and return to the free pool the FIB pointed to by the
224 *	caller.
225 */
226
227static void fib_dealloc(struct fib * fibptr)
228{
229	struct hw_fib *hw_fib = fibptr->hw_fib_va;
230	BUG_ON(hw_fib->header.StructType != FIB_MAGIC);
231	hw_fib->header.XferState = 0;
232}
233
234/*
235 *	Commuication primitives define and support the queuing method we use to
236 *	support host to adapter commuication. All queue accesses happen through
237 *	these routines and are the only routines which have a knowledge of the
238 *	 how these queues are implemented.
239 */
240
241/**
242 *	aac_get_entry		-	get a queue entry
243 *	@dev: Adapter
244 *	@qid: Queue Number
245 *	@entry: Entry return
246 *	@index: Index return
247 *	@nonotify: notification control
248 *
249 *	With a priority the routine returns a queue entry if the queue has free entries. If the queue
250 *	is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
251 *	returned.
252 */
253
254static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
255{
256	struct aac_queue * q;
257	unsigned long idx;
258
259	/*
260	 *	All of the queues wrap when they reach the end, so we check
261	 *	to see if they have reached the end and if they have we just
262	 *	set the index back to zero. This is a wrap. You could or off
263	 *	the high bits in all updates but this is a bit faster I think.
264	 */
265
266	q = &dev->queues->queue[qid];
267
268	idx = *index = le32_to_cpu(*(q->headers.producer));
269	/* Interrupt Moderation, only interrupt for first two entries */
270	if (idx != le32_to_cpu(*(q->headers.consumer))) {
271		if (--idx == 0) {
272			if (qid == AdapNormCmdQueue)
273				idx = ADAP_NORM_CMD_ENTRIES;
274			else
275				idx = ADAP_NORM_RESP_ENTRIES;
276		}
277		if (idx != le32_to_cpu(*(q->headers.consumer)))
278			*nonotify = 1;
279	}
280
281	if (qid == AdapNormCmdQueue) {
282	        if (*index >= ADAP_NORM_CMD_ENTRIES)
283			*index = 0; /* Wrap to front of the Producer Queue. */
284	} else {
285		if (*index >= ADAP_NORM_RESP_ENTRIES)
286			*index = 0; /* Wrap to front of the Producer Queue. */
287	}
288
289        if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
290		printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
291				qid, q->numpending);
292		return 0;
293	} else {
294	        *entry = q->base + *index;
295		return 1;
296	}
297}
298
299/**
300 *	aac_queue_get		-	get the next free QE
301 *	@dev: Adapter
302 *	@index: Returned index
303 *	@priority: Priority of fib
304 *	@fib: Fib to associate with the queue entry
305 *	@wait: Wait if queue full
306 *	@fibptr: Driver fib object to go with fib
307 *	@nonotify: Don't notify the adapter
308 *
309 *	Gets the next free QE off the requested priorty adapter command
310 *	queue and associates the Fib with the QE. The QE represented by
311 *	index is ready to insert on the queue when this routine returns
312 *	success.
313 */
314
315int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
316{
317	struct aac_entry * entry = NULL;
318	int map = 0;
319
320	if (qid == AdapNormCmdQueue) {
321		/*  if no entries wait for some if caller wants to */
322        	while (!aac_get_entry(dev, qid, &entry, index, nonotify))
323        	{
324			printk(KERN_ERR "GetEntries failed\n");
325		}
326	        /*
327	         *	Setup queue entry with a command, status and fib mapped
328	         */
329	        entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
330	        map = 1;
331	} else {
332	        while(!aac_get_entry(dev, qid, &entry, index, nonotify))
333	        {
334			/* if no entries wait for some if caller wants to */
335		}
336        	/*
337        	 *	Setup queue entry with command, status and fib mapped
338        	 */
339        	entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
340        	entry->addr = hw_fib->header.SenderFibAddress;
341     			/* Restore adapters pointer to the FIB */
342		hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress;	/* Let the adapter now where to find its data */
343        	map = 0;
344	}
345	/*
346	 *	If MapFib is true than we need to map the Fib and put pointers
347	 *	in the queue entry.
348	 */
349	if (map)
350		entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
351	return 0;
352}
353
354/*
355 *	Define the highest level of host to adapter communication routines.
356 *	These routines will support host to adapter FS commuication. These
357 *	routines have no knowledge of the commuication method used. This level
358 *	sends and receives FIBs. This level has no knowledge of how these FIBs
359 *	get passed back and forth.
360 */
361
362/**
363 *	aac_fib_send	-	send a fib to the adapter
364 *	@command: Command to send
365 *	@fibptr: The fib
366 *	@size: Size of fib data area
367 *	@priority: Priority of Fib
368 *	@wait: Async/sync select
369 *	@reply: True if a reply is wanted
370 *	@callback: Called with reply
371 *	@callback_data: Passed to callback
372 *
373 *	Sends the requested FIB to the adapter and optionally will wait for a
374 *	response FIB. If the caller does not wish to wait for a response than
375 *	an event to wait on must be supplied. This event will be set when a
376 *	response FIB is received from the adapter.
377 */
378
379int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
380		int priority, int wait, int reply, fib_callback callback,
381		void *callback_data)
382{
383	struct aac_dev * dev = fibptr->dev;
384	struct hw_fib * hw_fib = fibptr->hw_fib_va;
385	unsigned long flags = 0;
386	unsigned long qflags;
387
388	if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
389		return -EBUSY;
390	/*
391	 *	There are 5 cases with the wait and reponse requested flags.
392	 *	The only invalid cases are if the caller requests to wait and
393	 *	does not request a response and if the caller does not want a
394	 *	response and the Fib is not allocated from pool. If a response
395	 *	is not requesed the Fib will just be deallocaed by the DPC
396	 *	routine when the response comes back from the adapter. No
397	 *	further processing will be done besides deleting the Fib. We
398	 *	will have a debug mode where the adapter can notify the host
399	 *	it had a problem and the host can log that fact.
400	 */
401	if (wait && !reply) {
402		return -EINVAL;
403	} else if (!wait && reply) {
404		hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
405		FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
406	} else if (!wait && !reply) {
407		hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
408		FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
409	} else if (wait && reply) {
410		hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
411		FIB_COUNTER_INCREMENT(aac_config.NormalSent);
412	}
413	/*
414	 *	Map the fib into 32bits by using the fib number
415	 */
416
417	hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
418	hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
419	/*
420	 *	Set FIB state to indicate where it came from and if we want a
421	 *	response from the adapter. Also load the command from the
422	 *	caller.
423	 *
424	 *	Map the hw fib pointer as a 32bit value
425	 */
426	hw_fib->header.Command = cpu_to_le16(command);
427	hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
428	fibptr->hw_fib_va->header.Flags = 0;	/* 0 the flags field - internal only*/
429	/*
430	 *	Set the size of the Fib we want to send to the adapter
431	 */
432	hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
433	if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
434		return -EMSGSIZE;
435	}
436	/*
437	 *	Get a queue entry connect the FIB to it and send an notify
438	 *	the adapter a command is ready.
439	 */
440	hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
441
442	/*
443	 *	Fill in the Callback and CallbackContext if we are not
444	 *	going to wait.
445	 */
446	if (!wait) {
447		fibptr->callback = callback;
448		fibptr->callback_data = callback_data;
449	}
450
451	fibptr->done = 0;
452	fibptr->flags = 0;
453
454	FIB_COUNTER_INCREMENT(aac_config.FibsSent);
455
456	dprintk((KERN_DEBUG "Fib contents:.\n"));
457	dprintk((KERN_DEBUG "  Command =               %d.\n", le32_to_cpu(hw_fib->header.Command)));
458	dprintk((KERN_DEBUG "  SubCommand =            %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
459	dprintk((KERN_DEBUG "  XferState  =            %x.\n", le32_to_cpu(hw_fib->header.XferState)));
460	dprintk((KERN_DEBUG "  hw_fib va being sent=%p\n",fibptr->hw_fib_va));
461	dprintk((KERN_DEBUG "  hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
462	dprintk((KERN_DEBUG "  fib being sent=%p\n",fibptr));
463
464	if (!dev->queues)
465		return -EBUSY;
466
467	if(wait)
468		spin_lock_irqsave(&fibptr->event_lock, flags);
469	aac_adapter_deliver(fibptr);
470
471	/*
472	 *	If the caller wanted us to wait for response wait now.
473	 */
474
475	if (wait) {
476		spin_unlock_irqrestore(&fibptr->event_lock, flags);
477		/* Only set for first known interruptable command */
478		if (wait < 0) {
479			/*
480			 * *VERY* Dangerous to time out a command, the
481			 * assumption is made that we have no hope of
482			 * functioning because an interrupt routing or other
483			 * hardware failure has occurred.
484			 */
485			unsigned long count = 36000000L; /* 3 minutes */
486			while (down_trylock(&fibptr->event_wait)) {
487				int blink;
488				if (--count == 0) {
489					struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
490					spin_lock_irqsave(q->lock, qflags);
491					q->numpending--;
492					spin_unlock_irqrestore(q->lock, qflags);
493					if (wait == -1) {
494	        				printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
495						  "Usually a result of a PCI interrupt routing problem;\n"
496						  "update mother board BIOS or consider utilizing one of\n"
497						  "the SAFE mode kernel options (acpi, apic etc)\n");
498					}
499					return -ETIMEDOUT;
500				}
501				if ((blink = aac_adapter_check_health(dev)) > 0) {
502					if (wait == -1) {
503	        				printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
504						  "Usually a result of a serious unrecoverable hardware problem\n",
505						  blink);
506					}
507					return -EFAULT;
508				}
509				udelay(5);
510			}
511		} else
512			(void)down_interruptible(&fibptr->event_wait);
513		spin_lock_irqsave(&fibptr->event_lock, flags);
514		if (fibptr->done == 0) {
515			fibptr->done = 2; /* Tell interrupt we aborted */
516			spin_unlock_irqrestore(&fibptr->event_lock, flags);
517			return -EINTR;
518		}
519		spin_unlock_irqrestore(&fibptr->event_lock, flags);
520		BUG_ON(fibptr->done == 0);
521
522		if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
523			return -ETIMEDOUT;
524		return 0;
525	}
526	/*
527	 *	If the user does not want a response than return success otherwise
528	 *	return pending
529	 */
530	if (reply)
531		return -EINPROGRESS;
532	else
533		return 0;
534}
535
536/**
537 *	aac_consumer_get	-	get the top of the queue
538 *	@dev: Adapter
539 *	@q: Queue
540 *	@entry: Return entry
541 *
542 *	Will return a pointer to the entry on the top of the queue requested that
543 * 	we are a consumer of, and return the address of the queue entry. It does
544 *	not change the state of the queue.
545 */
546
547int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
548{
549	u32 index;
550	int status;
551	if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
552		status = 0;
553	} else {
554		/*
555		 *	The consumer index must be wrapped if we have reached
556		 *	the end of the queue, else we just use the entry
557		 *	pointed to by the header index
558		 */
559		if (le32_to_cpu(*q->headers.consumer) >= q->entries)
560			index = 0;
561		else
562		        index = le32_to_cpu(*q->headers.consumer);
563		*entry = q->base + index;
564		status = 1;
565	}
566	return(status);
567}
568
569/**
570 *	aac_consumer_free	-	free consumer entry
571 *	@dev: Adapter
572 *	@q: Queue
573 *	@qid: Queue ident
574 *
575 *	Frees up the current top of the queue we are a consumer of. If the
576 *	queue was full notify the producer that the queue is no longer full.
577 */
578
579void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
580{
581	int wasfull = 0;
582	u32 notify;
583
584	if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
585		wasfull = 1;
586
587	if (le32_to_cpu(*q->headers.consumer) >= q->entries)
588		*q->headers.consumer = cpu_to_le32(1);
589	else
590		*q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
591
592	if (wasfull) {
593		switch (qid) {
594
595		case HostNormCmdQueue:
596			notify = HostNormCmdNotFull;
597			break;
598		case HostNormRespQueue:
599			notify = HostNormRespNotFull;
600			break;
601		default:
602			BUG();
603			return;
604		}
605		aac_adapter_notify(dev, notify);
606	}
607}
608
609/**
610 *	aac_fib_adapter_complete	-	complete adapter issued fib
611 *	@fibptr: fib to complete
612 *	@size: size of fib
613 *
614 *	Will do all necessary work to complete a FIB that was sent from
615 *	the adapter.
616 */
617
618int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
619{
620	struct hw_fib * hw_fib = fibptr->hw_fib_va;
621	struct aac_dev * dev = fibptr->dev;
622	struct aac_queue * q;
623	unsigned long nointr = 0;
624	unsigned long qflags;
625
626	if (hw_fib->header.XferState == 0) {
627		if (dev->comm_interface == AAC_COMM_MESSAGE)
628			kfree (hw_fib);
629        	return 0;
630	}
631	/*
632	 *	If we plan to do anything check the structure type first.
633	 */
634	if ( hw_fib->header.StructType != FIB_MAGIC ) {
635		if (dev->comm_interface == AAC_COMM_MESSAGE)
636			kfree (hw_fib);
637        	return -EINVAL;
638	}
639	/*
640	 *	This block handles the case where the adapter had sent us a
641	 *	command and we have finished processing the command. We
642	 *	call completeFib when we are done processing the command
643	 *	and want to send a response back to the adapter. This will
644	 *	send the completed cdb to the adapter.
645	 */
646	if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
647		if (dev->comm_interface == AAC_COMM_MESSAGE) {
648			kfree (hw_fib);
649		} else {
650	       		u32 index;
651		        hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
652			if (size) {
653				size += sizeof(struct aac_fibhdr);
654				if (size > le16_to_cpu(hw_fib->header.SenderSize))
655					return -EMSGSIZE;
656				hw_fib->header.Size = cpu_to_le16(size);
657			}
658			q = &dev->queues->queue[AdapNormRespQueue];
659			spin_lock_irqsave(q->lock, qflags);
660			aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
661			*(q->headers.producer) = cpu_to_le32(index + 1);
662			spin_unlock_irqrestore(q->lock, qflags);
663			if (!(nointr & (int)aac_config.irq_mod))
664				aac_adapter_notify(dev, AdapNormRespQueue);
665		}
666	}
667	else
668	{
669        	printk(KERN_WARNING "aac_fib_adapter_complete: Unknown xferstate detected.\n");
670        	BUG();
671	}
672	return 0;
673}
674
675/**
676 *	aac_fib_complete	-	fib completion handler
677 *	@fib: FIB to complete
678 *
679 *	Will do all necessary work to complete a FIB.
680 */
681
682int aac_fib_complete(struct fib *fibptr)
683{
684	struct hw_fib * hw_fib = fibptr->hw_fib_va;
685
686	/*
687	 *	Check for a fib which has already been completed
688	 */
689
690	if (hw_fib->header.XferState == 0)
691        	return 0;
692	/*
693	 *	If we plan to do anything check the structure type first.
694	 */
695
696	if (hw_fib->header.StructType != FIB_MAGIC)
697	        return -EINVAL;
698	/*
699	 *	This block completes a cdb which orginated on the host and we
700	 *	just need to deallocate the cdb or reinit it. At this point the
701	 *	command is complete that we had sent to the adapter and this
702	 *	cdb could be reused.
703	 */
704	if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
705		(hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
706	{
707		fib_dealloc(fibptr);
708	}
709	else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
710	{
711		/*
712		 *	This handles the case when the host has aborted the I/O
713		 *	to the adapter because the adapter is not responding
714		 */
715		fib_dealloc(fibptr);
716	} else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
717		fib_dealloc(fibptr);
718	} else {
719		BUG();
720	}
721	return 0;
722}
723
724/**
725 *	aac_printf	-	handle printf from firmware
726 *	@dev: Adapter
727 *	@val: Message info
728 *
729 *	Print a message passed to us by the controller firmware on the
730 *	Adaptec board
731 */
732
733void aac_printf(struct aac_dev *dev, u32 val)
734{
735	char *cp = dev->printfbuf;
736	if (dev->printf_enabled)
737	{
738		int length = val & 0xffff;
739		int level = (val >> 16) & 0xffff;
740
741		/*
742		 *	The size of the printfbuf is set in port.c
743		 *	There is no variable or define for it
744		 */
745		if (length > 255)
746			length = 255;
747		if (cp[length] != 0)
748			cp[length] = 0;
749		if (level == LOG_AAC_HIGH_ERROR)
750			printk(KERN_WARNING "%s:%s", dev->name, cp);
751		else
752			printk(KERN_INFO "%s:%s", dev->name, cp);
753	}
754	memset(cp, 0,  256);
755}
756
757
758/**
759 *	aac_handle_aif		-	Handle a message from the firmware
760 *	@dev: Which adapter this fib is from
761 *	@fibptr: Pointer to fibptr from adapter
762 *
763 *	This routine handles a driver notify fib from the adapter and
764 *	dispatches it to the appropriate routine for handling.
765 */
766
767#define AIF_SNIFF_TIMEOUT	(30*HZ)
768static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
769{
770	struct hw_fib * hw_fib = fibptr->hw_fib_va;
771	struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
772	u32 container;
773	struct scsi_device *device;
774	enum {
775		NOTHING,
776		DELETE,
777		ADD,
778		CHANGE
779	} device_config_needed;
780
781	/* Sniff for container changes */
782
783	if (!dev || !dev->fsa_dev)
784		return;
785	container = (u32)-1;
786
787	/*
788	 *	We have set this up to try and minimize the number of
789	 * re-configures that take place. As a result of this when
790	 * certain AIF's come in we will set a flag waiting for another
791	 * type of AIF before setting the re-config flag.
792	 */
793	switch (le32_to_cpu(aifcmd->command)) {
794	case AifCmdDriverNotify:
795		switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
796		/*
797		 *	Morph or Expand complete
798		 */
799		case AifDenMorphComplete:
800		case AifDenVolumeExtendComplete:
801			container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
802			if (container >= dev->maximum_num_containers)
803				break;
804
805			/*
806			 *	Find the scsi_device associated with the SCSI
807			 * address. Make sure we have the right array, and if
808			 * so set the flag to initiate a new re-config once we
809			 * see an AifEnConfigChange AIF come through.
810			 */
811
812			if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
813				device = scsi_device_lookup(dev->scsi_host_ptr,
814					CONTAINER_TO_CHANNEL(container),
815					CONTAINER_TO_ID(container),
816					CONTAINER_TO_LUN(container));
817				if (device) {
818					dev->fsa_dev[container].config_needed = CHANGE;
819					dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
820					dev->fsa_dev[container].config_waiting_stamp = jiffies;
821					scsi_device_put(device);
822				}
823			}
824		}
825
826		/*
827		 *	If we are waiting on something and this happens to be
828		 * that thing then set the re-configure flag.
829		 */
830		if (container != (u32)-1) {
831			if (container >= dev->maximum_num_containers)
832				break;
833			if ((dev->fsa_dev[container].config_waiting_on ==
834			    le32_to_cpu(*(u32 *)aifcmd->data)) &&
835			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
836				dev->fsa_dev[container].config_waiting_on = 0;
837		} else for (container = 0;
838		    container < dev->maximum_num_containers; ++container) {
839			if ((dev->fsa_dev[container].config_waiting_on ==
840			    le32_to_cpu(*(u32 *)aifcmd->data)) &&
841			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
842				dev->fsa_dev[container].config_waiting_on = 0;
843		}
844		break;
845
846	case AifCmdEventNotify:
847		switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
848		/*
849		 *	Add an Array.
850		 */
851		case AifEnAddContainer:
852			container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
853			if (container >= dev->maximum_num_containers)
854				break;
855			dev->fsa_dev[container].config_needed = ADD;
856			dev->fsa_dev[container].config_waiting_on =
857				AifEnConfigChange;
858			dev->fsa_dev[container].config_waiting_stamp = jiffies;
859			break;
860
861		/*
862		 *	Delete an Array.
863		 */
864		case AifEnDeleteContainer:
865			container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
866			if (container >= dev->maximum_num_containers)
867				break;
868			dev->fsa_dev[container].config_needed = DELETE;
869			dev->fsa_dev[container].config_waiting_on =
870				AifEnConfigChange;
871			dev->fsa_dev[container].config_waiting_stamp = jiffies;
872			break;
873
874		/*
875		 *	Container change detected. If we currently are not
876		 * waiting on something else, setup to wait on a Config Change.
877		 */
878		case AifEnContainerChange:
879			container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
880			if (container >= dev->maximum_num_containers)
881				break;
882			if (dev->fsa_dev[container].config_waiting_on &&
883			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
884				break;
885			dev->fsa_dev[container].config_needed = CHANGE;
886			dev->fsa_dev[container].config_waiting_on =
887				AifEnConfigChange;
888			dev->fsa_dev[container].config_waiting_stamp = jiffies;
889			break;
890
891		case AifEnConfigChange:
892			break;
893
894		}
895
896		/*
897		 *	If we are waiting on something and this happens to be
898		 * that thing then set the re-configure flag.
899		 */
900		if (container != (u32)-1) {
901			if (container >= dev->maximum_num_containers)
902				break;
903			if ((dev->fsa_dev[container].config_waiting_on ==
904			    le32_to_cpu(*(u32 *)aifcmd->data)) &&
905			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
906				dev->fsa_dev[container].config_waiting_on = 0;
907		} else for (container = 0;
908		    container < dev->maximum_num_containers; ++container) {
909			if ((dev->fsa_dev[container].config_waiting_on ==
910			    le32_to_cpu(*(u32 *)aifcmd->data)) &&
911			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
912				dev->fsa_dev[container].config_waiting_on = 0;
913		}
914		break;
915
916	case AifCmdJobProgress:
917		/*
918		 *	These are job progress AIF's. When a Clear is being
919		 * done on a container it is initially created then hidden from
920		 * the OS. When the clear completes we don't get a config
921		 * change so we monitor the job status complete on a clear then
922		 * wait for a container change.
923		 */
924
925		if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
926		 && ((((u32 *)aifcmd->data)[6] == ((u32 *)aifcmd->data)[5])
927		  || (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess)))) {
928			for (container = 0;
929			    container < dev->maximum_num_containers;
930			    ++container) {
931				/*
932				 * Stomp on all config sequencing for all
933				 * containers?
934				 */
935				dev->fsa_dev[container].config_waiting_on =
936					AifEnContainerChange;
937				dev->fsa_dev[container].config_needed = ADD;
938				dev->fsa_dev[container].config_waiting_stamp =
939					jiffies;
940			}
941		}
942		if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
943		 && (((u32 *)aifcmd->data)[6] == 0)
944		 && (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning))) {
945			for (container = 0;
946			    container < dev->maximum_num_containers;
947			    ++container) {
948				/*
949				 * Stomp on all config sequencing for all
950				 * containers?
951				 */
952				dev->fsa_dev[container].config_waiting_on =
953					AifEnContainerChange;
954				dev->fsa_dev[container].config_needed = DELETE;
955				dev->fsa_dev[container].config_waiting_stamp =
956					jiffies;
957			}
958		}
959		break;
960	}
961
962	device_config_needed = NOTHING;
963	for (container = 0; container < dev->maximum_num_containers;
964	    ++container) {
965		if ((dev->fsa_dev[container].config_waiting_on == 0) &&
966			(dev->fsa_dev[container].config_needed != NOTHING) &&
967			time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
968			device_config_needed =
969				dev->fsa_dev[container].config_needed;
970			dev->fsa_dev[container].config_needed = NOTHING;
971			break;
972		}
973	}
974	if (device_config_needed == NOTHING)
975		return;
976
977	/*
978	 *	If we decided that a re-configuration needs to be done,
979	 * schedule it here on the way out the door, please close the door
980	 * behind you.
981	 */
982
983	/*
984	 *	Find the scsi_device associated with the SCSI address,
985	 * and mark it as changed, invalidating the cache. This deals
986	 * with changes to existing device IDs.
987	 */
988
989	if (!dev || !dev->scsi_host_ptr)
990		return;
991	/*
992	 * force reload of disk info via aac_probe_container
993	 */
994	if ((device_config_needed == CHANGE)
995	 && (dev->fsa_dev[container].valid == 1))
996		dev->fsa_dev[container].valid = 2;
997	if ((device_config_needed == CHANGE) ||
998			(device_config_needed == ADD))
999		aac_probe_container(dev, container);
1000	device = scsi_device_lookup(dev->scsi_host_ptr,
1001		CONTAINER_TO_CHANNEL(container),
1002		CONTAINER_TO_ID(container),
1003		CONTAINER_TO_LUN(container));
1004	if (device) {
1005		switch (device_config_needed) {
1006		case DELETE:
1007		case CHANGE:
1008			scsi_rescan_device(&device->sdev_gendev);
1009
1010		default:
1011			break;
1012		}
1013		scsi_device_put(device);
1014	}
1015	if (device_config_needed == ADD) {
1016		scsi_add_device(dev->scsi_host_ptr,
1017		  CONTAINER_TO_CHANNEL(container),
1018		  CONTAINER_TO_ID(container),
1019		  CONTAINER_TO_LUN(container));
1020	}
1021
1022}
1023
1024static int _aac_reset_adapter(struct aac_dev *aac)
1025{
1026	int index, quirks;
1027	int retval;
1028	struct Scsi_Host *host;
1029	struct scsi_device *dev;
1030	struct scsi_cmnd *command;
1031	struct scsi_cmnd *command_list;
1032
1033	/*
1034	 * Assumptions:
1035	 *	- host is locked.
1036	 *	- in_reset is asserted, so no new i/o is getting to the
1037	 *	  card.
1038	 *	- The card is dead.
1039	 */
1040	host = aac->scsi_host_ptr;
1041	scsi_block_requests(host);
1042	aac_adapter_disable_int(aac);
1043	spin_unlock_irq(host->host_lock);
1044	kthread_stop(aac->thread);
1045
1046	/*
1047	 *	If a positive health, means in a known DEAD PANIC
1048	 * state and the adapter could be reset to `try again'.
1049	 */
1050	retval = aac_adapter_restart(aac, aac_adapter_check_health(aac));
1051
1052	if (retval)
1053		goto out;
1054
1055	/*
1056	 *	Loop through the fibs, close the synchronous FIBS
1057	 */
1058	for (retval = 1, index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
1059		struct fib *fib = &aac->fibs[index];
1060		if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
1061		  (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) {
1062			unsigned long flagv;
1063			spin_lock_irqsave(&fib->event_lock, flagv);
1064			up(&fib->event_wait);
1065			spin_unlock_irqrestore(&fib->event_lock, flagv);
1066			schedule();
1067			retval = 0;
1068		}
1069	}
1070	/* Give some extra time for ioctls to complete. */
1071	if (retval == 0)
1072		ssleep(2);
1073	index = aac->cardtype;
1074
1075	/*
1076	 * Re-initialize the adapter, first free resources, then carefully
1077	 * apply the initialization sequence to come back again. Only risk
1078	 * is a change in Firmware dropping cache, it is assumed the caller
1079	 * will ensure that i/o is queisced and the card is flushed in that
1080	 * case.
1081	 */
1082	aac_fib_map_free(aac);
1083	aac->hw_fib_va = NULL;
1084	aac->hw_fib_pa = 0;
1085	pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
1086	aac->comm_addr = NULL;
1087	aac->comm_phys = 0;
1088	kfree(aac->queues);
1089	aac->queues = NULL;
1090	free_irq(aac->pdev->irq, aac);
1091	kfree(aac->fsa_dev);
1092	aac->fsa_dev = NULL;
1093	if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT) {
1094		if (((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK))) ||
1095		  ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_32BIT_MASK))))
1096			goto out;
1097	} else {
1098		if (((retval = pci_set_dma_mask(aac->pdev, 0x7FFFFFFFULL))) ||
1099		  ((retval = pci_set_consistent_dma_mask(aac->pdev, 0x7FFFFFFFULL))))
1100			goto out;
1101	}
1102	if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
1103		goto out;
1104	if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT)
1105		if ((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK)))
1106			goto out;
1107	aac->thread = kthread_run(aac_command_thread, aac, aac->name);
1108	if (IS_ERR(aac->thread)) {
1109		retval = PTR_ERR(aac->thread);
1110		goto out;
1111	}
1112	(void)aac_get_adapter_info(aac);
1113	quirks = aac_get_driver_ident(index)->quirks;
1114	if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
1115 		host->sg_tablesize = 34;
1116 		host->max_sectors = (host->sg_tablesize * 8) + 112;
1117 	}
1118 	if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
1119 		host->sg_tablesize = 17;
1120 		host->max_sectors = (host->sg_tablesize * 8) + 112;
1121 	}
1122	aac_get_config_status(aac, 1);
1123	aac_get_containers(aac);
1124	/*
1125	 * This is where the assumption that the Adapter is quiesced
1126	 * is important.
1127	 */
1128	command_list = NULL;
1129	__shost_for_each_device(dev, host) {
1130		unsigned long flags;
1131		spin_lock_irqsave(&dev->list_lock, flags);
1132		list_for_each_entry(command, &dev->cmd_list, list)
1133			if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
1134				command->SCp.buffer = (struct scatterlist *)command_list;
1135				command_list = command;
1136			}
1137		spin_unlock_irqrestore(&dev->list_lock, flags);
1138	}
1139	while ((command = command_list)) {
1140		command_list = (struct scsi_cmnd *)command->SCp.buffer;
1141		command->SCp.buffer = NULL;
1142		command->result = DID_OK << 16
1143		  | COMMAND_COMPLETE << 8
1144		  | SAM_STAT_TASK_SET_FULL;
1145		command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
1146		command->scsi_done(command);
1147	}
1148	retval = 0;
1149
1150out:
1151	aac->in_reset = 0;
1152	scsi_unblock_requests(host);
1153	spin_lock_irq(host->host_lock);
1154	return retval;
1155}
1156
1157int aac_check_health(struct aac_dev * aac)
1158{
1159	int BlinkLED;
1160	unsigned long time_now, flagv = 0;
1161	struct list_head * entry;
1162	struct Scsi_Host * host;
1163
1164	/* Extending the scope of fib_lock slightly to protect aac->in_reset */
1165	if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1166		return 0;
1167
1168	if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
1169		spin_unlock_irqrestore(&aac->fib_lock, flagv);
1170		return 0; /* OK */
1171	}
1172
1173	aac->in_reset = 1;
1174
1175	/* Fake up an AIF:
1176	 *	aac_aifcmd.command = AifCmdEventNotify = 1
1177	 *	aac_aifcmd.seqnum = 0xFFFFFFFF
1178	 *	aac_aifcmd.data[0] = AifEnExpEvent = 23
1179	 *	aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
1180	 *	aac.aifcmd.data[2] = AifHighPriority = 3
1181	 *	aac.aifcmd.data[3] = BlinkLED
1182	 */
1183
1184	time_now = jiffies/HZ;
1185	entry = aac->fib_list.next;
1186
1187	/*
1188	 * For each Context that is on the
1189	 * fibctxList, make a copy of the
1190	 * fib, and then set the event to wake up the
1191	 * thread that is waiting for it.
1192	 */
1193	while (entry != &aac->fib_list) {
1194		/*
1195		 * Extract the fibctx
1196		 */
1197		struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
1198		struct hw_fib * hw_fib;
1199		struct fib * fib;
1200		/*
1201		 * Check if the queue is getting
1202		 * backlogged
1203		 */
1204		if (fibctx->count > 20) {
1205			/*
1206			 * It's *not* jiffies folks,
1207			 * but jiffies / HZ, so do not
1208			 * panic ...
1209			 */
1210			u32 time_last = fibctx->jiffies;
1211			/*
1212			 * Has it been > 2 minutes
1213			 * since the last read off
1214			 * the queue?
1215			 */
1216			if ((time_now - time_last) > aif_timeout) {
1217				entry = entry->next;
1218				aac_close_fib_context(aac, fibctx);
1219				continue;
1220			}
1221		}
1222		/*
1223		 * Warning: no sleep allowed while
1224		 * holding spinlock
1225		 */
1226		hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC);
1227		fib = kzalloc(sizeof(struct fib), GFP_ATOMIC);
1228		if (fib && hw_fib) {
1229			struct aac_aifcmd * aif;
1230
1231			fib->hw_fib_va = hw_fib;
1232			fib->dev = aac;
1233			aac_fib_init(fib);
1234			fib->type = FSAFS_NTC_FIB_CONTEXT;
1235			fib->size = sizeof (struct fib);
1236			fib->data = hw_fib->data;
1237			aif = (struct aac_aifcmd *)hw_fib->data;
1238			aif->command = cpu_to_le32(AifCmdEventNotify);
1239		 	aif->seqnum = cpu_to_le32(0xFFFFFFFF);
1240		 	aif->data[0] = cpu_to_le32(AifEnExpEvent);
1241			aif->data[1] = cpu_to_le32(AifExeFirmwarePanic);
1242		 	aif->data[2] = cpu_to_le32(AifHighPriority);
1243			aif->data[3] = cpu_to_le32(BlinkLED);
1244
1245			/*
1246			 * Put the FIB onto the
1247			 * fibctx's fibs
1248			 */
1249			list_add_tail(&fib->fiblink, &fibctx->fib_list);
1250			fibctx->count++;
1251			/*
1252			 * Set the event to wake up the
1253			 * thread that will waiting.
1254			 */
1255			up(&fibctx->wait_sem);
1256		} else {
1257			printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1258			kfree(fib);
1259			kfree(hw_fib);
1260		}
1261		entry = entry->next;
1262	}
1263
1264	spin_unlock_irqrestore(&aac->fib_lock, flagv);
1265
1266	if (BlinkLED < 0) {
1267		printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED);
1268		goto out;
1269	}
1270
1271	printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
1272
1273	host = aac->scsi_host_ptr;
1274	spin_lock_irqsave(host->host_lock, flagv);
1275	BlinkLED = _aac_reset_adapter(aac);
1276	spin_unlock_irqrestore(host->host_lock, flagv);
1277	return BlinkLED;
1278
1279out:
1280	aac->in_reset = 0;
1281	return BlinkLED;
1282}
1283
1284
1285/**
1286 *	aac_command_thread	-	command processing thread
1287 *	@dev: Adapter to monitor
1288 *
1289 *	Waits on the commandready event in it's queue. When the event gets set
1290 *	it will pull FIBs off it's queue. It will continue to pull FIBs off
1291 *	until the queue is empty. When the queue is empty it will wait for
1292 *	more FIBs.
1293 */
1294
1295int aac_command_thread(void *data)
1296{
1297	struct aac_dev *dev = data;
1298	struct hw_fib *hw_fib, *hw_newfib;
1299	struct fib *fib, *newfib;
1300	struct aac_fib_context *fibctx;
1301	unsigned long flags;
1302	DECLARE_WAITQUEUE(wait, current);
1303
1304	/*
1305	 *	We can only have one thread per adapter for AIF's.
1306	 */
1307	if (dev->aif_thread)
1308		return -EINVAL;
1309
1310	/*
1311	 *	Let the DPC know it has a place to send the AIF's to.
1312	 */
1313	dev->aif_thread = 1;
1314	add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
1315	set_current_state(TASK_INTERRUPTIBLE);
1316	dprintk ((KERN_INFO "aac_command_thread start\n"));
1317	while(1)
1318	{
1319		spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
1320		while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
1321			struct list_head *entry;
1322			struct aac_aifcmd * aifcmd;
1323
1324			set_current_state(TASK_RUNNING);
1325
1326			entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
1327			list_del(entry);
1328
1329			spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
1330			fib = list_entry(entry, struct fib, fiblink);
1331			/*
1332			 *	We will process the FIB here or pass it to a
1333			 *	worker thread that is TBD. We Really can't
1334			 *	do anything at this point since we don't have
1335			 *	anything defined for this thread to do.
1336			 */
1337			hw_fib = fib->hw_fib_va;
1338			memset(fib, 0, sizeof(struct fib));
1339			fib->type = FSAFS_NTC_FIB_CONTEXT;
1340			fib->size = sizeof( struct fib );
1341			fib->hw_fib_va = hw_fib;
1342			fib->data = hw_fib->data;
1343			fib->dev = dev;
1344			/*
1345			 *	We only handle AifRequest fibs from the adapter.
1346			 */
1347			aifcmd = (struct aac_aifcmd *) hw_fib->data;
1348			if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
1349				/* Handle Driver Notify Events */
1350				aac_handle_aif(dev, fib);
1351				*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1352				aac_fib_adapter_complete(fib, (u16)sizeof(u32));
1353			} else {
1354				struct list_head *entry;
1355				/* The u32 here is important and intended. We are using
1356				   32bit wrapping time to fit the adapter field */
1357
1358				u32 time_now, time_last;
1359				unsigned long flagv;
1360				unsigned num;
1361				struct hw_fib ** hw_fib_pool, ** hw_fib_p;
1362				struct fib ** fib_pool, ** fib_p;
1363
1364				/* Sniff events */
1365				if ((aifcmd->command ==
1366				     cpu_to_le32(AifCmdEventNotify)) ||
1367				    (aifcmd->command ==
1368				     cpu_to_le32(AifCmdJobProgress))) {
1369					aac_handle_aif(dev, fib);
1370				}
1371
1372				time_now = jiffies/HZ;
1373
1374				/*
1375				 * Warning: no sleep allowed while
1376				 * holding spinlock. We take the estimate
1377				 * and pre-allocate a set of fibs outside the
1378				 * lock.
1379				 */
1380				num = le32_to_cpu(dev->init->AdapterFibsSize)
1381				    / sizeof(struct hw_fib); /* some extra */
1382				spin_lock_irqsave(&dev->fib_lock, flagv);
1383				entry = dev->fib_list.next;
1384				while (entry != &dev->fib_list) {
1385					entry = entry->next;
1386					++num;
1387				}
1388				spin_unlock_irqrestore(&dev->fib_lock, flagv);
1389				hw_fib_pool = NULL;
1390				fib_pool = NULL;
1391				if (num
1392				 && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL)))
1393				 && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) {
1394					hw_fib_p = hw_fib_pool;
1395					fib_p = fib_pool;
1396					while (hw_fib_p < &hw_fib_pool[num]) {
1397						if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) {
1398							--hw_fib_p;
1399							break;
1400						}
1401						if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) {
1402							kfree(*(--hw_fib_p));
1403							break;
1404						}
1405					}
1406					if ((num = hw_fib_p - hw_fib_pool) == 0) {
1407						kfree(fib_pool);
1408						fib_pool = NULL;
1409						kfree(hw_fib_pool);
1410						hw_fib_pool = NULL;
1411					}
1412				} else {
1413					kfree(hw_fib_pool);
1414					hw_fib_pool = NULL;
1415				}
1416				spin_lock_irqsave(&dev->fib_lock, flagv);
1417				entry = dev->fib_list.next;
1418				/*
1419				 * For each Context that is on the
1420				 * fibctxList, make a copy of the
1421				 * fib, and then set the event to wake up the
1422				 * thread that is waiting for it.
1423				 */
1424				hw_fib_p = hw_fib_pool;
1425				fib_p = fib_pool;
1426				while (entry != &dev->fib_list) {
1427					/*
1428					 * Extract the fibctx
1429					 */
1430					fibctx = list_entry(entry, struct aac_fib_context, next);
1431					/*
1432					 * Check if the queue is getting
1433					 * backlogged
1434					 */
1435					if (fibctx->count > 20)
1436					{
1437						/*
1438						 * It's *not* jiffies folks,
1439						 * but jiffies / HZ so do not
1440						 * panic ...
1441						 */
1442						time_last = fibctx->jiffies;
1443						/*
1444						 * Has it been > 2 minutes
1445						 * since the last read off
1446						 * the queue?
1447						 */
1448						if ((time_now - time_last) > aif_timeout) {
1449							entry = entry->next;
1450							aac_close_fib_context(dev, fibctx);
1451							continue;
1452						}
1453					}
1454					/*
1455					 * Warning: no sleep allowed while
1456					 * holding spinlock
1457					 */
1458					if (hw_fib_p < &hw_fib_pool[num]) {
1459						hw_newfib = *hw_fib_p;
1460						*(hw_fib_p++) = NULL;
1461						newfib = *fib_p;
1462						*(fib_p++) = NULL;
1463						/*
1464						 * Make the copy of the FIB
1465						 */
1466						memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
1467						memcpy(newfib, fib, sizeof(struct fib));
1468						newfib->hw_fib_va = hw_newfib;
1469						/*
1470						 * Put the FIB onto the
1471						 * fibctx's fibs
1472						 */
1473						list_add_tail(&newfib->fiblink, &fibctx->fib_list);
1474						fibctx->count++;
1475						/*
1476						 * Set the event to wake up the
1477						 * thread that is waiting.
1478						 */
1479						up(&fibctx->wait_sem);
1480					} else {
1481						printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1482					}
1483					entry = entry->next;
1484				}
1485				/*
1486				 *	Set the status of this FIB
1487				 */
1488				*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1489				aac_fib_adapter_complete(fib, sizeof(u32));
1490				spin_unlock_irqrestore(&dev->fib_lock, flagv);
1491				/* Free up the remaining resources */
1492				hw_fib_p = hw_fib_pool;
1493				fib_p = fib_pool;
1494				while (hw_fib_p < &hw_fib_pool[num]) {
1495					kfree(*hw_fib_p);
1496					kfree(*fib_p);
1497					++fib_p;
1498					++hw_fib_p;
1499				}
1500				kfree(hw_fib_pool);
1501				kfree(fib_pool);
1502			}
1503			kfree(fib);
1504			spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
1505		}
1506		/*
1507		 *	There are no more AIF's
1508		 */
1509		spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
1510		schedule();
1511
1512		if (kthread_should_stop())
1513			break;
1514		set_current_state(TASK_INTERRUPTIBLE);
1515	}
1516	if (dev->queues)
1517		remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
1518	dev->aif_thread = 0;
1519	return 0;
1520}
1521