1/***************************************************************************
2                          dpti.c  -  description
3                             -------------------
4    begin                : Thu Sep 7 2000
5    copyright            : (C) 2000 by Adaptec
6    email                : deanna_bonds@adaptec.com
7
8    			   July 30, 2001 First version being submitted
9			   for inclusion in the kernel.  V2.4
10
11    See README.dpti for history, notes, license info, and credits
12 ***************************************************************************/
13
14/***************************************************************************
15 *                                                                         *
16 *   This program is free software; you can redistribute it and/or modify  *
17 *   it under the terms of the GNU General Public License as published by  *
18 *   the Free Software Foundation; either version 2 of the License, or     *
19 *   (at your option) any later version.                                   *
20 *                                                                         *
21 ***************************************************************************/
22
23//#define DEBUG 1
24//#define UARTDELAY 1
25
26// On the real kernel ADDR32 should always be zero for 2.4. GFP_HIGH allocates
27// high pages. Keep the macro around because of the broken unmerged ia64 tree
28
29#define ADDR32 (0)
30
31#include <linux/version.h>
32#include <linux/module.h>
33
34MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
36
37////////////////////////////////////////////////////////////////
38
39#include <linux/ioctl.h>	/* For SCSI-Passthrough */
40#include <asm/uaccess.h>
41
42#include <linux/stat.h>
43#include <linux/slab.h>		/* for kmalloc() */
44#include <linux/config.h>	/* for CONFIG_PCI */
45#include <linux/pci.h>		/* for PCI support */
46#include <linux/proc_fs.h>
47#include <linux/blk.h>
48#include <linux/delay.h>	/* for udelay */
49#include <linux/tqueue.h>
50#include <linux/interrupt.h>
51#include <linux/kernel.h>	/* for printk */
52#include <linux/sched.h>
53#include <linux/reboot.h>
54#include <linux/smp_lock.h>
55
56#include <linux/timer.h>
57#include <linux/string.h>
58#include <linux/ioport.h>
59#include <linux/stat.h>
60
61#include <asm/processor.h>	/* for boot_cpu_data */
62#include <asm/pgtable.h>
63#include <asm/io.h>		/* for virt_to_bus, etc. */
64
65#include "scsi.h"
66#include "hosts.h"
67#include "sd.h"
68
69#include "dpt/dptsig.h"
70#include "dpti.h"
71
72/*============================================================================
73 * Create a binary signature - this is read by dptsig
74 * Needed for our management apps
75 *============================================================================
76 */
77static dpt_sig_S DPTI_sig = {
78	{'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
79#ifdef __i386__
80	PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
81#elif defined(__ia64__)
82	PROC_INTEL, PROC_IA64,
83#elif defined(__sparc__)
84	PROC_ULTRASPARC,
85#elif defined(__alpha__)
86	PROC_ALPHA ,
87#else
88	(-1),(-1)
89#endif
90	 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
91	ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
92	DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
93};
94
95
96
97
98/*============================================================================
99 * Globals
100 *============================================================================
101 */
102
103DECLARE_MUTEX(adpt_configuration_lock);
104
105static struct i2o_sys_tbl *sys_tbl = NULL;
106static int sys_tbl_ind = 0;
107static int sys_tbl_len = 0;
108
109static adpt_hba* hbas[DPTI_MAX_HBA];
110static adpt_hba* hba_chain = NULL;
111static int hba_count = 0;
112
113static struct file_operations adpt_fops = {
114	ioctl: adpt_ioctl,
115	open: adpt_open,
116	release: adpt_close
117};
118
119#ifdef REBOOT_NOTIFIER
120static struct notifier_block adpt_reboot_notifier =
121{
122	 adpt_reboot_event,
123	 NULL,
124	 0
125};
126#endif
127
128/* Structures and definitions for synchronous message posting.
129 * See adpt_i2o_post_wait() for description
130 * */
131struct adpt_i2o_post_wait_data
132{
133	int status;
134	u32 id;
135	adpt_wait_queue_head_t *wq;
136	struct adpt_i2o_post_wait_data *next;
137};
138
139static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
140static u32 adpt_post_wait_id = 0;
141static spinlock_t adpt_post_wait_lock = SPIN_LOCK_UNLOCKED;
142
143
144/*============================================================================
145 * 				Functions
146 *============================================================================
147 */
148
149static u8 adpt_read_blink_led(adpt_hba* host)
150{
151	if(host->FwDebugBLEDflag_P != 0) {
152		if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
153			return readb(host->FwDebugBLEDvalue_P);
154		}
155	}
156	return 0;
157}
158
159/*============================================================================
160 * Scsi host template interface functions
161 *============================================================================
162 */
163
164static struct pci_device_id dptids[] = {
165	{ PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
166	{ PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
167	{ 0, }
168};
169MODULE_DEVICE_TABLE(pci,dptids);
170
171static int adpt_detect(Scsi_Host_Template* sht)
172{
173	struct pci_dev *pDev = NULL;
174	adpt_hba* pHba;
175
176	adpt_init();
177	sht->use_new_eh_code = 1;
178
179	PINFO("Detecting Adaptec I2O RAID controllers...\n");
180
181        /* search for all Adatpec I2O RAID cards */
182	while ((pDev = pci_find_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
183		if(pDev->device == PCI_DPT_DEVICE_ID ||
184		   pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
185			if(adpt_install_hba(sht, pDev) ){
186				PERROR("Could not Init an I2O RAID device\n");
187				PERROR("Will not try to detect others.\n");
188				return hba_count-1;
189			}
190		}
191	}
192
193	/* In INIT state, Activate IOPs */
194	for (pHba = hba_chain; pHba; pHba = pHba->next) {
195		// Activate does get status , init outbound, and get hrt
196		if (adpt_i2o_activate_hba(pHba) < 0) {
197			adpt_i2o_delete_hba(pHba);
198		}
199	}
200
201
202	/* Active IOPs in HOLD state */
203
204rebuild_sys_tab:
205	if (hba_chain == NULL)
206		return 0;
207
208	/*
209	 * If build_sys_table fails, we kill everything and bail
210	 * as we can't init the IOPs w/o a system table
211	 */
212	if (adpt_i2o_build_sys_table() < 0) {
213		adpt_i2o_sys_shutdown();
214		return 0;
215	}
216
217	PDEBUG("HBA's in HOLD state\n");
218
219	/* If IOP don't get online, we need to rebuild the System table */
220	for (pHba = hba_chain; pHba; pHba = pHba->next) {
221		if (adpt_i2o_online_hba(pHba) < 0) {
222			adpt_i2o_delete_hba(pHba);
223			goto rebuild_sys_tab;
224		}
225	}
226
227	/* Active IOPs now in OPERATIONAL state */
228	PDEBUG("HBA's in OPERATIONAL state\n");
229
230	printk(KERN_INFO"dpti: If you have a lot of devices this could take a few minutes.\n");
231	for (pHba = hba_chain; pHba; pHba = pHba->next) {
232		printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
233		if (adpt_i2o_lct_get(pHba) < 0){
234			adpt_i2o_delete_hba(pHba);
235			continue;
236		}
237
238		if (adpt_i2o_parse_lct(pHba) < 0){
239			adpt_i2o_delete_hba(pHba);
240			continue;
241		}
242		adpt_inquiry(pHba);
243	}
244
245	for (pHba = hba_chain; pHba; pHba = pHba->next) {
246		if( adpt_scsi_register(pHba,sht) < 0){
247			adpt_i2o_delete_hba(pHba);
248			continue;
249		}
250		pHba->initialized = TRUE;
251		pHba->state &= ~DPTI_STATE_RESET;
252	}
253
254	// Register our control device node
255	// nodes will need to be created in /dev to access this
256	// the nodes can not be created from within the driver
257	if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
258		adpt_i2o_sys_shutdown();
259		return 0;
260	}
261	return hba_count;
262}
263
264
265/*
266 * scsi_unregister will be called AFTER we return.
267 */
268static int adpt_release(struct Scsi_Host *host)
269{
270	adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
271//	adpt_i2o_quiesce_hba(pHba);
272	adpt_i2o_delete_hba(pHba);
273	return 0;
274}
275
276
277static void adpt_inquiry(adpt_hba* pHba)
278{
279	u32 msg[14];
280	u32 *mptr;
281	u32 *lenptr;
282	int direction;
283	int scsidir;
284	u32 len;
285	u32 reqlen;
286	u8* buf;
287	u8  scb[16];
288	s32 rcode;
289
290	memset(msg, 0, sizeof(msg));
291	buf = (u8*)kmalloc(80,GFP_KERNEL|ADDR32);
292	if(!buf){
293		printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
294		return;
295	}
296	memset((void*)buf, 0, 36);
297
298	len = 36;
299	direction = 0x00000000;
300	scsidir  =0x40000000;	// DATA IN  (iop<--dev)
301
302	reqlen = 14;		// SINGLE SGE
303	/* Stick the headers on */
304	msg[0] = reqlen<<16 | SGL_OFFSET_12;
305	msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
306	msg[2] = 0;
307	msg[3]  = 0;
308	// Adaptec/DPT Private stuff
309	msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
310	msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
311	/* Direction, disconnect ok | sense data | simple queue , CDBLen */
312	// I2O_SCB_FLAG_ENABLE_DISCONNECT |
313	// I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
314	// I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
315	msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
316
317	mptr=msg+7;
318
319	memset(scb, 0, sizeof(scb));
320	// Write SCSI command into the message - always 16 byte block
321	scb[0] = INQUIRY;
322	scb[1] = 0;
323	scb[2] = 0;
324	scb[3] = 0;
325	scb[4] = 36;
326	scb[5] = 0;
327	// Don't care about the rest of scb
328
329	memcpy(mptr, scb, sizeof(scb));
330	mptr+=4;
331	lenptr=mptr++;		/* Remember me - fill in when we know */
332
333	/* Now fill in the SGList and command */
334	*lenptr = len;
335	*mptr++ = 0xD0000000|direction|len;
336	*mptr++ = virt_to_bus(buf);
337
338	// Send it on it's way
339	rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
340	if (rcode != 0) {
341		sprintf(pHba->detail, "Adaptec I2O RAID");
342		printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
343	} else {
344		memset(pHba->detail, 0, sizeof(pHba->detail));
345		memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
346		memcpy(&(pHba->detail[16]), " Model: ", 8);
347		memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
348		memcpy(&(pHba->detail[40]), " FW: ", 4);
349		memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
350		pHba->detail[48] = '\0';	/* precautionary */
351	}
352	kfree(buf);
353	adpt_i2o_status_get(pHba);
354	return ;
355}
356
357
358static void adpt_select_queue_depths(struct Scsi_Host *host, Scsi_Device * devicelist)
359{
360	Scsi_Device *device;	/* scsi layer per device information */
361	adpt_hba* pHba;
362
363	pHba = (adpt_hba *) host->hostdata[0];
364
365	for (device = devicelist; device != NULL; device = device->next) {
366		if (device->host != host) {
367			continue;
368		}
369		if (host->can_queue) {
370			device->queue_depth =  host->can_queue - 1;
371		} else {
372			device->queue_depth = 1;
373		}
374	}
375}
376
377static int adpt_queue(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
378{
379	adpt_hba* pHba = NULL;
380	struct adpt_device* pDev = NULL;	/* dpt per device information */
381	ulong timeout = jiffies + (TMOUT_SCSI*HZ);
382
383	cmd->scsi_done = done;
384	/*
385	 * SCSI REQUEST_SENSE commands will be executed automatically by the
386	 * Host Adapter for any errors, so they should not be executed
387	 * explicitly unless the Sense Data is zero indicating that no error
388	 * occurred.
389	 */
390
391	if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
392		cmd->result = (DID_OK << 16);
393		cmd->scsi_done(cmd);
394		return 0;
395	}
396
397	pHba = (adpt_hba*)cmd->host->hostdata[0];
398	if (!pHba) {
399		return FAILED;
400	}
401
402	rmb();
403	/*
404	 * TODO: I need to block here if I am processing ioctl cmds
405	 * but if the outstanding cmds all finish before the ioctl,
406	 * the scsi-core will not know to start sending cmds to me again.
407	 * I need to a way to restart the scsi-cores queues or should I block
408	 * calling scsi_done on the outstanding cmds instead
409	 * for now we don't set the IOCTL state
410	 */
411	if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
412		pHba->host->last_reset = jiffies;
413		pHba->host->resetting = 1;
414		return 1;
415	}
416
417	if(cmd->eh_state != SCSI_STATE_QUEUED){
418		// If we are not doing error recovery
419		mod_timer(&cmd->eh_timeout, timeout);
420	}
421
422	// TODO if the cmd->device if offline then I may need to issue a bus rescan
423	// followed by a get_lct to see if the device is there anymore
424	if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
425		/*
426		 * First command request for this device.  Set up a pointer
427		 * to the device structure.  This should be a TEST_UNIT_READY
428		 * command from scan_scsis_single.
429		 */
430		if ((pDev = adpt_find_device(pHba, (u32)cmd->channel, (u32)cmd->target, (u32)cmd-> lun)) == NULL) {
431			// TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
432			// with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
433			cmd->result = (DID_NO_CONNECT << 16);
434			cmd->scsi_done(cmd);
435			return 0;
436		}
437		(struct adpt_device*)(cmd->device->hostdata) = pDev;
438	}
439	pDev->pScsi_dev = cmd->device;
440
441	/*
442	 * If we are being called from when the device is being reset,
443	 * delay processing of the command until later.
444	 */
445	if (pDev->state & DPTI_DEV_RESET ) {
446		return FAILED;
447	}
448	return adpt_scsi_to_i2o(pHba, cmd, pDev);
449}
450
451static int adpt_bios_param(Disk* disk, kdev_t dev, int geom[])
452{
453	int heads=-1;
454	int sectors=-1;
455	int cylinders=-1;
456
457	// *** First lets set the default geometry ****
458
459	// If the capacity is less than ox2000
460	if (disk->capacity < 0x2000 ) {	// floppy
461		heads = 18;
462		sectors = 2;
463	}
464	// else if between 0x2000 and 0x20000
465	else if (disk->capacity < 0x20000) {
466		heads = 64;
467		sectors = 32;
468	}
469	// else if between 0x20000 and 0x40000
470	else if (disk->capacity < 0x40000) {
471		heads = 65;
472		sectors = 63;
473	}
474	// else if between 0x4000 and 0x80000
475	else if (disk->capacity < 0x80000) {
476		heads = 128;
477		sectors = 63;
478	}
479	// else if greater than 0x80000
480	else {
481		heads = 255;
482		sectors = 63;
483	}
484	cylinders = disk->capacity / (heads * sectors);
485
486	// Special case if CDROM
487	if(disk->device->type == 5) {  // CDROM
488		heads = 252;
489		sectors = 63;
490		cylinders = 1111;
491	}
492
493	geom[0] = heads;
494	geom[1] = sectors;
495	geom[2] = cylinders;
496
497	PDEBUG("adpt_bios_param: exit\n");
498	return 0;
499}
500
501
502static const char *adpt_info(struct Scsi_Host *host)
503{
504	adpt_hba* pHba;
505
506	pHba = (adpt_hba *) host->hostdata[0];
507	return (char *) (pHba->detail);
508}
509
510static int adpt_proc_info(char *buffer, char **start, off_t offset,
511		  int length, int hostno, int inout)
512{
513	struct adpt_device* d;
514	int id;
515	int chan;
516	int len = 0;
517	int begin = 0;
518	int pos = 0;
519	adpt_hba* pHba;
520	struct Scsi_Host *host;
521	int unit;
522
523	*start = buffer;
524	if (inout == TRUE) {
525		/*
526		 * The user has done a write and wants us to take the
527		 * data in the buffer and do something with it.
528		 * proc_scsiwrite calls us with inout = 1
529		 *
530		 * Read data from buffer (writing to us) - NOT SUPPORTED
531		 */
532		return -EINVAL;
533	}
534
535	/*
536	 * inout = 0 means the user has done a read and wants information
537	 * returned, so we write information about the cards into the buffer
538	 * proc_scsiread() calls us with inout = 0
539	 */
540
541	// Find HBA (host bus adapter) we are looking for
542	down(&adpt_configuration_lock);
543	for (pHba = hba_chain; pHba; pHba = pHba->next) {
544		if (pHba->host->host_no == hostno) {
545			break;	/* found adapter */
546		}
547	}
548	up(&adpt_configuration_lock);
549	if (pHba == NULL) {
550		return 0;
551	}
552	host = pHba->host;
553
554	len  = sprintf(buffer    , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
555	len += sprintf(buffer+len, "%s\n", pHba->detail);
556	len += sprintf(buffer+len, "SCSI Host=scsi%d  Control Node=/dev/%s  irq=%d\n",
557			pHba->host->host_no, pHba->name, host->irq);
558	len += sprintf(buffer+len, "\tpost fifo size  = %d\n\treply fifo size = %d\n\tsg table size   = %d\n\n",
559			host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
560
561	pos = begin + len;
562
563	/* CHECKPOINT */
564	if(pos > offset + length) {
565		goto stop_output;
566	}
567	if(pos <= offset) {
568		/*
569		 * If we haven't even written to where we last left
570		 * off (the last time we were called), reset the
571		 * beginning pointer.
572		 */
573		len = 0;
574		begin = pos;
575	}
576	len +=  sprintf(buffer+len, "Devices:\n");
577	for(chan = 0; chan < MAX_CHANNEL; chan++) {
578		for(id = 0; id < MAX_ID; id++) {
579			d = pHba->channel[chan].device[id];
580			while(d){
581				len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor);
582				len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev);
583				pos = begin + len;
584
585
586				/* CHECKPOINT */
587				if(pos > offset + length) {
588					goto stop_output;
589				}
590				if(pos <= offset) {
591					len = 0;
592					begin = pos;
593				}
594
595				unit = d->pI2o_dev->lct_data.tid;
596				len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d)  (%s)\n\n",
597					       unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
598					       d->pScsi_dev->online? "online":"offline");
599				pos = begin + len;
600
601				/* CHECKPOINT */
602				if(pos > offset + length) {
603					goto stop_output;
604				}
605				if(pos <= offset) {
606					len = 0;
607					begin = pos;
608				}
609
610				d = d->next_lun;
611			}
612		}
613	}
614
615	/*
616	 * begin is where we last checked our position with regards to offset
617	 * begin is always less than offset.  len is relative to begin.  It
618	 * is the number of bytes written past begin
619	 *
620	 */
621stop_output:
622	/* stop the output and calculate the correct length */
623	*(buffer + len) = '\0';
624
625	*start = buffer + (offset - begin);	/* Start of wanted data */
626	len -= (offset - begin);
627	if(len > length) {
628		len = length;
629	} else if(len < 0){
630		len = 0;
631		**start = '\0';
632	}
633	return len;
634}
635
636
637/*===========================================================================
638 * Error Handling routines
639 *===========================================================================
640 */
641
642static int adpt_abort(Scsi_Cmnd * cmd)
643{
644	adpt_hba* pHba = NULL;	/* host bus adapter structure */
645	struct adpt_device* dptdevice;	/* dpt per device information */
646	u32 msg[5];
647	int rcode;
648
649	if(cmd->serial_number == 0){
650		return FAILED;
651	}
652	pHba = (adpt_hba*) cmd->host->hostdata[0];
653	printk(KERN_INFO"%s: Trying to Abort cmd=%ld\n",pHba->name, cmd->serial_number);
654	if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
655		printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
656		return FAILED;
657	}
658
659	memset(msg, 0, sizeof(msg));
660	msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
661	msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
662	msg[2] = 0;
663	msg[3]= 0;
664	msg[4] = (u32)cmd;
665	if( (rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER)) != 0){
666		if(rcode == -EOPNOTSUPP ){
667			printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
668			return FAILED;
669		}
670		printk(KERN_INFO"%s: Abort cmd=%ld failed.\n",pHba->name, cmd->serial_number);
671		return FAILED;
672	}
673	printk(KERN_INFO"%s: Abort cmd=%ld complete.\n",pHba->name, cmd->serial_number);
674	return SUCCESS;
675}
676
677
678#define I2O_DEVICE_RESET 0x27
679// This is the same for BLK and SCSI devices
680// NOTE this is wrong in the i2o.h definitions
681// This is not currently supported by our adapter but we issue it anyway
682static int adpt_device_reset(Scsi_Cmnd* cmd)
683{
684	adpt_hba* pHba;
685	u32 msg[4];
686	u32 rcode;
687	int old_state;
688	struct adpt_device* d = (void*) cmd->device->hostdata;
689
690	pHba = (void*) cmd->host->hostdata[0];
691	printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
692	if (!d) {
693		printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
694		return FAILED;
695	}
696	memset(msg, 0, sizeof(msg));
697	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
698	msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
699	msg[2] = 0;
700	msg[3] = 0;
701
702	old_state = d->state;
703	d->state |= DPTI_DEV_RESET;
704	if( (rcode = adpt_i2o_post_wait(pHba, (void*)msg,sizeof(msg), FOREVER)) ){
705		d->state = old_state;
706		if(rcode == -EOPNOTSUPP ){
707			printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
708			return FAILED;
709		}
710		printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
711		return FAILED;
712	} else {
713		d->state = old_state;
714		printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
715		return SUCCESS;
716	}
717}
718
719
720#define I2O_HBA_BUS_RESET 0x87
721// This version of bus reset is called by the eh_error handler
722static int adpt_bus_reset(Scsi_Cmnd* cmd)
723{
724	adpt_hba* pHba;
725	u32 msg[4];
726
727	pHba = (adpt_hba*)cmd->host->hostdata[0];
728	memset(msg, 0, sizeof(msg));
729	printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->channel,pHba->channel[cmd->channel].tid );
730	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
731	msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->channel].tid);
732	msg[2] = 0;
733	msg[3] = 0;
734	if(adpt_i2o_post_wait(pHba, (void*)msg,sizeof(msg), FOREVER) ){
735		printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
736		return FAILED;
737	} else {
738		printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
739		return SUCCESS;
740	}
741}
742
743// This version of reset is called by the eh_error_handler
744static int adpt_reset(Scsi_Cmnd* cmd)
745{
746	adpt_hba* pHba;
747	int rcode;
748	pHba = (adpt_hba*)cmd->host->hostdata[0];
749	printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->channel,pHba->channel[cmd->channel].tid );
750	rcode =  adpt_hba_reset(pHba);
751	if(rcode == 0){
752		printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
753		return SUCCESS;
754	} else {
755		printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
756		return FAILED;
757	}
758}
759
760// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
761static int adpt_hba_reset(adpt_hba* pHba)
762{
763	int rcode;
764
765	pHba->state |= DPTI_STATE_RESET;
766
767	// Activate does get status , init outbound, and get hrt
768	if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
769		printk(KERN_ERR "%s: Could not activate\n", pHba->name);
770		adpt_i2o_delete_hba(pHba);
771		return rcode;
772	}
773
774	if ((rcode=adpt_i2o_build_sys_table()) < 0) {
775		adpt_i2o_delete_hba(pHba);
776		return rcode;
777	}
778	PDEBUG("%s: in HOLD state\n",pHba->name);
779
780	if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
781		adpt_i2o_delete_hba(pHba);
782		return rcode;
783	}
784	PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
785
786	if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
787		adpt_i2o_delete_hba(pHba);
788		return rcode;
789	}
790
791	if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
792		adpt_i2o_delete_hba(pHba);
793		return rcode;
794	}
795	pHba->state &= ~DPTI_STATE_RESET;
796
797	adpt_fail_posted_scbs(pHba);
798	return 0;	/* return success */
799}
800
801/*===========================================================================
802 *
803 *===========================================================================
804 */
805
806
807static void adpt_i2o_sys_shutdown(void)
808{
809	adpt_hba *pHba, *pNext;
810	struct adpt_i2o_post_wait_data *p1, *p2;
811
812	 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
813	 printk(KERN_INFO"   This could take a few minutes if there are many devices attached\n");
814	/* Delete all IOPs from the controller chain */
815	/* They should have already been released by the
816	 * scsi-core
817	 */
818	for (pHba = hba_chain; pHba; pHba = pNext) {
819		pNext = pHba->next;
820		adpt_i2o_delete_hba(pHba);
821	}
822
823	/* Remove any timedout entries from the wait queue.  */
824	p2 = NULL;
825//	spin_lock_irqsave(&adpt_post_wait_lock, flags);
826	/* Nothing should be outstanding at this point so just
827	 * free them
828	 */
829	for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p2->next) {
830		kfree(p1);
831	}
832//	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
833	adpt_post_wait_queue = 0;
834
835	 printk(KERN_INFO "Adaptec I2O controllers down.\n");
836}
837
838/*
839 * reboot/shutdown notification.
840 *
841 * - Quiesce each IOP in the system
842 *
843 */
844
845#ifdef REBOOT_NOTIFIER
846static int adpt_reboot_event(struct notifier_block *n, ulong code, void *p)
847{
848
849	 if(code != SYS_RESTART && code != SYS_HALT && code != SYS_POWER_OFF)
850		  return NOTIFY_DONE;
851
852	 adpt_i2o_sys_shutdown();
853
854	 return NOTIFY_DONE;
855}
856#endif
857
858
859static int adpt_install_hba(Scsi_Host_Template* sht, struct pci_dev* pDev)
860{
861
862	adpt_hba* pHba = NULL;
863	adpt_hba* p = NULL;
864	ulong base_addr0_phys = 0;
865	ulong base_addr1_phys = 0;
866	u32 hba_map0_area_size = 0;
867	u32 hba_map1_area_size = 0;
868	ulong base_addr_virt = 0;
869	ulong msg_addr_virt = 0;
870
871	int raptorFlag = FALSE;
872	int i;
873
874	if(pci_enable_device(pDev)) {
875		return -EINVAL;
876	}
877	pci_set_master(pDev);
878
879	base_addr0_phys = pci_resource_start(pDev,0);
880	hba_map0_area_size = pci_resource_len(pDev,0);
881
882	// Check if standard PCI card or single BAR Raptor
883	if(pDev->device == PCI_DPT_DEVICE_ID){
884		if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
885			// Raptor card with this device id needs 4M
886			hba_map0_area_size = 0x400000;
887		} else { // Not Raptor - it is a PCI card
888			if(hba_map0_area_size > 0x100000 ){
889				hba_map0_area_size = 0x100000;
890			}
891		}
892	} else {// Raptor split BAR config
893		// Use BAR1 in this configuration
894		base_addr1_phys = pci_resource_start(pDev,1);
895		hba_map1_area_size = pci_resource_len(pDev,1);
896		raptorFlag = TRUE;
897	}
898
899
900	base_addr_virt = (ulong)ioremap(base_addr0_phys,hba_map0_area_size);
901	if(base_addr_virt == 0) {
902		PERROR("dpti: adpt_config_hba: io remap failed\n");
903		return -EINVAL;
904	}
905
906        if(raptorFlag == TRUE) {
907		msg_addr_virt = (ulong)ioremap(base_addr1_phys, hba_map1_area_size );
908		if(msg_addr_virt == 0) {
909			PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
910			iounmap((void*)base_addr_virt);
911			return -EINVAL;
912		}
913	} else {
914		msg_addr_virt = base_addr_virt;
915	}
916
917	// Allocate and zero the data structure
918	pHba = kmalloc(sizeof(adpt_hba), GFP_KERNEL);
919	if( pHba == NULL) {
920		if(msg_addr_virt != base_addr_virt){
921			iounmap((void*)msg_addr_virt);
922		}
923		iounmap((void*)base_addr_virt);
924		return -ENOMEM;
925	}
926	memset(pHba, 0, sizeof(adpt_hba));
927
928	down(&adpt_configuration_lock);
929	for(i=0;i<DPTI_MAX_HBA;i++) {
930		if(hbas[i]==NULL) {
931			hbas[i]=pHba;
932			break;
933		}
934	}
935
936	if(hba_chain != NULL){
937		for(p = hba_chain; p->next; p = p->next);
938		p->next = pHba;
939	} else {
940		hba_chain = pHba;
941	}
942	pHba->next = NULL;
943	pHba->unit = hba_count;
944	sprintf(pHba->name, "dpti%d", i);
945	hba_count++;
946
947	up(&adpt_configuration_lock);
948
949	pHba->pDev = pDev;
950	pHba->base_addr_phys = base_addr0_phys;
951
952	// Set up the Virtual Base Address of the I2O Device
953	pHba->base_addr_virt = base_addr_virt;
954	pHba->msg_addr_virt = msg_addr_virt;
955	pHba->irq_mask = (ulong)(base_addr_virt+0x30);
956	pHba->post_port = (ulong)(base_addr_virt+0x40);
957	pHba->reply_port = (ulong)(base_addr_virt+0x44);
958
959	pHba->hrt = NULL;
960	pHba->lct = NULL;
961	pHba->lct_size = 0;
962	pHba->status_block = NULL;
963	pHba->post_count = 0;
964	pHba->state = DPTI_STATE_RESET;
965	pHba->pDev = pDev;
966	pHba->devices = NULL;
967
968	// Initializing the spinlocks
969	spin_lock_init(&pHba->state_lock);
970
971	if(raptorFlag == 0){
972		printk(KERN_INFO"Adaptec I2O RAID controller %d at %lx size=%x irq=%d\n",
973			hba_count-1, base_addr_virt, hba_map0_area_size, pDev->irq);
974	} else {
975		printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d\n",hba_count-1, pDev->irq);
976		printk(KERN_INFO"     BAR0 %lx - size= %x\n",base_addr_virt,hba_map0_area_size);
977		printk(KERN_INFO"     BAR1 %lx - size= %x\n",msg_addr_virt,hba_map1_area_size);
978	}
979
980	if (request_irq (pDev->irq, adpt_isr, SA_SHIRQ, pHba->name, pHba)) {
981		printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
982		adpt_i2o_delete_hba(pHba);
983		return -EINVAL;
984	}
985
986	return 0;
987}
988
989
990static void adpt_i2o_delete_hba(adpt_hba* pHba)
991{
992	adpt_hba* p1;
993	adpt_hba* p2;
994	struct i2o_device* d;
995	struct i2o_device* next;
996	int i;
997	int j;
998	struct adpt_device* pDev;
999	struct adpt_device* pNext;
1000
1001
1002	down(&adpt_configuration_lock);
1003	// scsi_unregister calls our adpt_release which
1004	// does a quiese
1005	if(pHba->host){
1006		free_irq(pHba->host->irq, pHba);
1007	}
1008	for(i=0;i<DPTI_MAX_HBA;i++) {
1009		if(hbas[i]==pHba) {
1010			hbas[i] = NULL;
1011		}
1012	}
1013	p2 = NULL;
1014	for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1015		if(p1 == pHba) {
1016			if(p2) {
1017				p2->next = p1->next;
1018			} else {
1019				hba_chain = p1->next;
1020			}
1021			break;
1022		}
1023	}
1024
1025	hba_count--;
1026	up(&adpt_configuration_lock);
1027
1028	iounmap((void*)pHba->base_addr_virt);
1029	if(pHba->msg_addr_virt != pHba->base_addr_virt){
1030		iounmap((void*)pHba->msg_addr_virt);
1031	}
1032	if(pHba->hrt) {
1033		kfree(pHba->hrt);
1034	}
1035	if(pHba->lct){
1036		kfree(pHba->lct);
1037	}
1038	if(pHba->status_block) {
1039		kfree(pHba->status_block);
1040	}
1041	if(pHba->reply_pool){
1042		kfree(pHba->reply_pool);
1043	}
1044
1045	for(d = pHba->devices; d ; d = next){
1046		next = d->next;
1047		kfree(d);
1048	}
1049	for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1050		for(j = 0; j < MAX_ID; j++){
1051			if(pHba->channel[i].device[j] != NULL){
1052				for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1053					pNext = pDev->next_lun;
1054					kfree(pDev);
1055				}
1056			}
1057		}
1058	}
1059	kfree(pHba);
1060
1061	if(hba_count <= 0){
1062		unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1063	}
1064}
1065
1066
1067static int adpt_init(void)
1068{
1069	int i;
1070
1071	printk(KERN_INFO"Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
1072	for (i = 0; i < DPTI_MAX_HBA; i++) {
1073		hbas[i] = NULL;
1074	}
1075#ifdef REBOOT_NOTIFIER
1076	register_reboot_notifier(&adpt_reboot_notifier);
1077#endif
1078
1079	return 0;
1080}
1081
1082
1083static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1084{
1085	struct adpt_device* d;
1086
1087	if(chan < 0 || chan >= MAX_CHANNEL)
1088		return NULL;
1089
1090	if( pHba->channel[chan].device == NULL){
1091		printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1092		return NULL;
1093	}
1094
1095	d = pHba->channel[chan].device[id];
1096	if(!d || d->tid == 0) {
1097		return NULL;
1098	}
1099
1100	/* If it is the only lun at that address then this should match*/
1101	if(d->scsi_lun == lun){
1102		return d;
1103	}
1104
1105	/* else we need to look through all the luns */
1106	for(d=d->next_lun ; d ; d = d->next_lun){
1107		if(d->scsi_lun == lun){
1108			return d;
1109		}
1110	}
1111	return NULL;
1112}
1113
1114
1115static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1116{
1117	// I used my own version of the WAIT_QUEUE_HEAD
1118	// to handle some version differences
1119	// When embedded in the kernel this could go back to the vanilla one
1120	ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1121	int status = 0;
1122	ulong flags = 0;
1123	struct adpt_i2o_post_wait_data *p1, *p2;
1124	struct adpt_i2o_post_wait_data *wait_data =
1125		kmalloc(sizeof(struct adpt_i2o_post_wait_data),GFP_KERNEL);
1126	adpt_wait_queue_t wait;
1127
1128	if(!wait_data){
1129		return -ENOMEM;
1130	}
1131	/*
1132	 * The spin locking is needed to keep anyone from playing
1133	 * with the queue pointers and id while we do the same
1134	 */
1135	spin_lock_irqsave(&adpt_post_wait_lock, flags);
1136       // TODO we need a MORE unique way of getting ids
1137       // to support async LCT get
1138	wait_data->next = adpt_post_wait_queue;
1139	adpt_post_wait_queue = wait_data;
1140	adpt_post_wait_id++;
1141	adpt_post_wait_id = (adpt_post_wait_id & 0x7fff);
1142	wait_data->id =  adpt_post_wait_id;
1143	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1144
1145	wait_data->wq = &adpt_wq_i2o_post;
1146	wait_data->status = -ETIMEDOUT;
1147
1148	// this code is taken from kernel/sched.c:interruptible_sleep_on_timeout
1149	wait.task = current;
1150	init_waitqueue_entry(&wait, current);
1151	wq_write_lock_irqsave(&adpt_wq_i2o_post.lock,flags);
1152	__add_wait_queue(&adpt_wq_i2o_post, &wait);
1153	wq_write_unlock(&adpt_wq_i2o_post.lock);
1154
1155	msg[2] |= 0x80000000 | ((u32)wait_data->id);
1156	timeout *= HZ;
1157	if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1158		if(!timeout){
1159			set_current_state(TASK_INTERRUPTIBLE);
1160			spin_unlock_irq(&io_request_lock);
1161			schedule();
1162			spin_lock_irq(&io_request_lock);
1163		} else {
1164			set_current_state(TASK_INTERRUPTIBLE);
1165			spin_unlock_irq(&io_request_lock);
1166			schedule_timeout(timeout*HZ);
1167			spin_lock_irq(&io_request_lock);
1168		}
1169	}
1170	wq_write_lock_irq(&adpt_wq_i2o_post.lock);
1171	__remove_wait_queue(&adpt_wq_i2o_post, &wait);
1172	wq_write_unlock_irqrestore(&adpt_wq_i2o_post.lock,flags);
1173
1174	if(status == -ETIMEDOUT){
1175		printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1176		// We will have to free the wait_data memory during shutdown
1177		return status;
1178	}
1179
1180	/* Remove the entry from the queue.  */
1181	p2 = NULL;
1182	spin_lock_irqsave(&adpt_post_wait_lock, flags);
1183	for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1184		if(p1 == wait_data) {
1185			if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1186				status = -EOPNOTSUPP;
1187			}
1188			if(p2) {
1189				p2->next = p1->next;
1190			} else {
1191				adpt_post_wait_queue = p1->next;
1192			}
1193			break;
1194		}
1195	}
1196	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1197
1198	kfree(wait_data);
1199
1200	return status;
1201}
1202
1203
1204static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1205{
1206
1207	u32 m = EMPTY_QUEUE;
1208	u32 *msg;
1209	ulong timeout = jiffies + 30*HZ;
1210	do {
1211		rmb();
1212		m = readl(pHba->post_port);
1213		if (m != EMPTY_QUEUE) {
1214			break;
1215		}
1216		if(time_after(jiffies,timeout)){
1217			printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1218			return -ETIMEDOUT;
1219		}
1220	} while(m == EMPTY_QUEUE);
1221
1222	msg = (u32*) (pHba->msg_addr_virt + m);
1223	memcpy_toio(msg, data, len);
1224	wmb();
1225
1226	//post message
1227	writel(m, pHba->post_port);
1228	wmb();
1229
1230	return 0;
1231}
1232
1233
1234static void adpt_i2o_post_wait_complete(u32 context, int status)
1235{
1236	struct adpt_i2o_post_wait_data *p1 = NULL;
1237	/*
1238	 * We need to search through the adpt_post_wait
1239	 * queue to see if the given message is still
1240	 * outstanding.  If not, it means that the IOP
1241	 * took longer to respond to the message than we
1242	 * had allowed and timer has already expired.
1243	 * Not much we can do about that except log
1244	 * it for debug purposes, increase timeout, and recompile
1245	 *
1246	 * Lock needed to keep anyone from moving queue pointers
1247	 * around while we're looking through them.
1248	 */
1249
1250	context &= 0x7fff;
1251
1252	spin_lock(&adpt_post_wait_lock);
1253	for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1254		if(p1->id == context) {
1255			p1->status = status;
1256			spin_unlock(&adpt_post_wait_lock);
1257			wake_up_interruptible(p1->wq);
1258			return;
1259		}
1260	}
1261	spin_unlock(&adpt_post_wait_lock);
1262        // If this happens we loose commands that probably really completed
1263	printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1264	printk(KERN_DEBUG"      Tasks in wait queue:\n");
1265	for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1266		printk(KERN_DEBUG"           %d\n",p1->id);
1267	}
1268	return;
1269}
1270
1271static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1272{
1273	u32 msg[8];
1274	u8* status;
1275	u32 m = EMPTY_QUEUE ;
1276	ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1277
1278	if(pHba->initialized  == FALSE) {	// First time reset should be quick
1279		timeout = jiffies + (25*HZ);
1280	} else {
1281		adpt_i2o_quiesce_hba(pHba);
1282	}
1283
1284	do {
1285		rmb();
1286		m = readl(pHba->post_port);
1287		if (m != EMPTY_QUEUE) {
1288			break;
1289		}
1290		if(time_after(jiffies,timeout)){
1291			printk(KERN_WARNING"Timeout waiting for message!\n");
1292			return -ETIMEDOUT;
1293		}
1294	} while (m == EMPTY_QUEUE);
1295
1296	status = (u8*)kmalloc(4, GFP_KERNEL|ADDR32);
1297	if(status == NULL) {
1298		adpt_send_nop(pHba, m);
1299		printk(KERN_ERR"IOP reset failed - no free memory.\n");
1300		return -ENOMEM;
1301	}
1302	memset(status,0,4);
1303
1304	msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1305	msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1306	msg[2]=0;
1307	msg[3]=0;
1308	msg[4]=0;
1309	msg[5]=0;
1310	msg[6]=virt_to_bus(status);
1311	msg[7]=0;
1312
1313	memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1314	wmb();
1315	writel(m, pHba->post_port);
1316	wmb();
1317
1318	while(*status == 0){
1319		if(time_after(jiffies,timeout)){
1320			printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1321			kfree(status);
1322			return -ETIMEDOUT;
1323		}
1324		rmb();
1325	}
1326
1327	if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1328		PDEBUG("%s: Reset in progress...\n", pHba->name);
1329		// Here we wait for message frame to become available
1330		// indicated that reset has finished
1331		do {
1332			rmb();
1333			m = readl(pHba->post_port);
1334			if (m != EMPTY_QUEUE) {
1335				break;
1336			}
1337			if(time_after(jiffies,timeout)){
1338				printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1339				return -ETIMEDOUT;
1340			}
1341		} while (m == EMPTY_QUEUE);
1342		// Flush the offset
1343		adpt_send_nop(pHba, m);
1344	}
1345	adpt_i2o_status_get(pHba);
1346	if(*status == 0x02 ||
1347			pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1348		printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1349				pHba->name);
1350	} else {
1351		PDEBUG("%s: Reset completed.\n", pHba->name);
1352	}
1353
1354	kfree(status);
1355#ifdef UARTDELAY
1356	// This delay is to allow someone attached to the card through the debug UART to
1357	// set up the dump levels that they want before the rest of the initialization sequence
1358	adpt_delay(20000);
1359#endif
1360	return 0;
1361}
1362
1363
1364static int adpt_i2o_parse_lct(adpt_hba* pHba)
1365{
1366	int i;
1367	int max;
1368	int tid;
1369	struct i2o_device *d;
1370	i2o_lct *lct = pHba->lct;
1371	u8 bus_no = 0;
1372	s16 scsi_id;
1373	s16 scsi_lun;
1374	u32 buf[10]; // larger than 7, or 8 ...
1375	struct adpt_device* pDev;
1376
1377	if (lct == NULL) {
1378		printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1379		return -1;
1380	}
1381
1382	max = lct->table_size;
1383	max -= 3;
1384	max /= 9;
1385
1386	for(i=0;i<max;i++) {
1387		if( lct->lct_entry[i].user_tid != 0xfff){
1388			/*
1389			 * If we have hidden devices, we need to inform the upper layers about
1390			 * the possible maximum id reference to handle device access when
1391			 * an array is disassembled. This code has no other purpose but to
1392			 * allow us future access to devices that are currently hidden
1393			 * behind arrays, hotspares or have not been configured (JBOD mode).
1394			 */
1395			if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1396			    lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1397			    lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1398			    	continue;
1399			}
1400			tid = lct->lct_entry[i].tid;
1401			// I2O_DPT_DEVICE_INFO_GROUP_NO;
1402			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1403				continue;
1404			}
1405			bus_no = buf[0]>>16;
1406			scsi_id = buf[1];
1407			scsi_lun = (buf[2]>>8 )&0xff;
1408			if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1409				printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1410				continue;
1411			}
1412			if(scsi_id > MAX_ID){
1413				printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1414				continue;
1415			}
1416			if(bus_no > pHba->top_scsi_channel){
1417				pHba->top_scsi_channel = bus_no;
1418			}
1419			if(scsi_id > pHba->top_scsi_id){
1420				pHba->top_scsi_id = scsi_id;
1421			}
1422			if(scsi_lun > pHba->top_scsi_lun){
1423				pHba->top_scsi_lun = scsi_lun;
1424			}
1425			continue;
1426		}
1427		d = (struct i2o_device *)kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1428		if(d==NULL)
1429		{
1430			printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1431			return -ENOMEM;
1432		}
1433
1434		d->controller = (void*)pHba;
1435		d->next = NULL;
1436
1437		memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1438
1439		d->flags = 0;
1440		tid = d->lct_data.tid;
1441		adpt_i2o_report_hba_unit(pHba, d);
1442		adpt_i2o_install_device(pHba, d);
1443	}
1444	bus_no = 0;
1445	for(d = pHba->devices; d ; d = d->next) {
1446		if(d->lct_data.class_id  == I2O_CLASS_BUS_ADAPTER_PORT ||
1447		   d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PORT){
1448			tid = d->lct_data.tid;
1449			// TODO get the bus_no from hrt-but for now they are in order
1450			//bus_no =
1451			if(bus_no > pHba->top_scsi_channel){
1452				pHba->top_scsi_channel = bus_no;
1453			}
1454			pHba->channel[bus_no].type = d->lct_data.class_id;
1455			pHba->channel[bus_no].tid = tid;
1456			if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1457			{
1458				pHba->channel[bus_no].scsi_id = buf[1];
1459				PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1460			}
1461			// TODO remove - this is just until we get from hrt
1462			bus_no++;
1463			if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1464				printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1465				break;
1466			}
1467		}
1468	}
1469
1470	// Setup adpt_device table
1471	for(d = pHba->devices; d ; d = d->next) {
1472		if(d->lct_data.class_id  == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1473		   d->lct_data.class_id  == I2O_CLASS_SCSI_PERIPHERAL ||
1474		   d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1475
1476			tid = d->lct_data.tid;
1477			scsi_id = -1;
1478			// I2O_DPT_DEVICE_INFO_GROUP_NO;
1479			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1480				bus_no = buf[0]>>16;
1481				scsi_id = buf[1];
1482				scsi_lun = (buf[2]>>8 )&0xff;
1483				if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1484					continue;
1485				}
1486				if(scsi_id > MAX_ID){
1487					continue;
1488				}
1489				if( pHba->channel[bus_no].device[scsi_id] == NULL){
1490					pDev =  kmalloc(sizeof(struct adpt_device),GFP_KERNEL);
1491					if(pDev == NULL) {
1492						return -ENOMEM;
1493					}
1494					pHba->channel[bus_no].device[scsi_id] = pDev;
1495					memset(pDev,0,sizeof(struct adpt_device));
1496				} else {
1497					for( pDev = pHba->channel[bus_no].device[scsi_id];
1498							pDev->next_lun; pDev = pDev->next_lun){
1499					}
1500					pDev->next_lun = kmalloc(sizeof(struct adpt_device),GFP_KERNEL);
1501					if(pDev == NULL) {
1502						return -ENOMEM;
1503					}
1504					memset(pDev->next_lun,0,sizeof(struct adpt_device));
1505					pDev = pDev->next_lun;
1506				}
1507				pDev->tid = tid;
1508				pDev->scsi_channel = bus_no;
1509				pDev->scsi_id = scsi_id;
1510				pDev->scsi_lun = scsi_lun;
1511				pDev->pI2o_dev = d;
1512				d->owner = pDev;
1513				pDev->type = (buf[0])&0xff;
1514				pDev->flags = (buf[0]>>8)&0xff;
1515				if(scsi_id > pHba->top_scsi_id){
1516					pHba->top_scsi_id = scsi_id;
1517				}
1518				if(scsi_lun > pHba->top_scsi_lun){
1519					pHba->top_scsi_lun = scsi_lun;
1520				}
1521			}
1522			if(scsi_id == -1){
1523				printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1524						d->lct_data.identity_tag);
1525			}
1526		}
1527	}
1528	return 0;
1529}
1530
1531
1532/*
1533 *	Each I2O controller has a chain of devices on it - these match
1534 *	the useful parts of the LCT of the board.
1535 */
1536
1537static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1538{
1539	down(&adpt_configuration_lock);
1540	d->controller=pHba;
1541	d->owner=NULL;
1542	d->next=pHba->devices;
1543	d->prev=NULL;
1544	if (pHba->devices != NULL){
1545		pHba->devices->prev=d;
1546	}
1547	pHba->devices=d;
1548	*d->dev_name = 0;
1549
1550	up(&adpt_configuration_lock);
1551	return 0;
1552}
1553
1554static int adpt_open(struct inode *inode, struct file *file)
1555{
1556	int minor;
1557	adpt_hba* pHba;
1558
1559	//TODO check for root access
1560	//
1561	minor = MINOR(inode->i_rdev);
1562	if (minor >= hba_count) {
1563		return -ENXIO;
1564	}
1565	down(&adpt_configuration_lock);
1566	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1567		if (pHba->unit == minor) {
1568			break;	/* found adapter */
1569		}
1570	}
1571	if (pHba == NULL) {
1572		up(&adpt_configuration_lock);
1573		return -ENXIO;
1574	}
1575
1576//	if(pHba->in_use){
1577	//	up(&adpt_configuration_lock);
1578//		return -EBUSY;
1579//	}
1580
1581	pHba->in_use = 1;
1582	up(&adpt_configuration_lock);
1583
1584	return 0;
1585}
1586
1587static int adpt_close(struct inode *inode, struct file *file)
1588{
1589	int minor;
1590	adpt_hba* pHba;
1591
1592	minor = MINOR(inode->i_rdev);
1593	if (minor >= hba_count) {
1594		return -ENXIO;
1595	}
1596	down(&adpt_configuration_lock);
1597	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1598		if (pHba->unit == minor) {
1599			break;	/* found adapter */
1600		}
1601	}
1602	up(&adpt_configuration_lock);
1603	if (pHba == NULL) {
1604		return -ENXIO;
1605	}
1606
1607	pHba->in_use = 0;
1608
1609	return 0;
1610}
1611
1612
1613static int adpt_i2o_passthru(adpt_hba* pHba, u32* arg)
1614{
1615	u32 msg[MAX_MESSAGE_SIZE];
1616	u32* reply = NULL;
1617	u32 size = 0;
1618	u32 reply_size = 0;
1619	u32* user_msg = (u32*)arg;
1620	u32* user_reply = NULL;
1621	ulong sg_list[pHba->sg_tablesize];
1622	u32 sg_offset = 0;
1623	u32 sg_count = 0;
1624	int sg_index = 0;
1625	u32 i = 0;
1626	u32 rcode = 0;
1627	ulong p = 0;
1628	ulong flags = 0;
1629
1630	memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1631	// get user msg size in u32s
1632	if(get_user(size, &user_msg[0])){
1633		return -EFAULT;
1634	}
1635	size = size>>16;
1636
1637	user_reply = &user_msg[size];
1638	if(size > MAX_MESSAGE_SIZE){
1639		return -EFAULT;
1640	}
1641	size *= 4; // Convert to bytes
1642
1643	/* Copy in the user's I2O command */
1644	if(copy_from_user((void*)msg, (void*)user_msg, size)) {
1645		return -EFAULT;
1646	}
1647	get_user(reply_size, &user_reply[0]);
1648	reply_size = reply_size>>16;
1649	if(reply_size > REPLY_FRAME_SIZE){
1650		reply_size = REPLY_FRAME_SIZE;
1651	}
1652	reply_size *= 4;
1653	reply = kmalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1654	if(reply == NULL) {
1655		printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1656		return -ENOMEM;
1657	}
1658	memset(reply,0,REPLY_FRAME_SIZE*4);
1659	sg_offset = (msg[0]>>4)&0xf;
1660	msg[2] = 0x40000000; // IOCTL context
1661	msg[3] = (u32)reply;
1662	memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1663	if(sg_offset) {
1664		// TODO 64bit fix
1665		struct sg_simple_element *sg =  (struct sg_simple_element*) (msg+sg_offset);
1666		sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1667		if (sg_count > pHba->sg_tablesize){
1668			printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1669			kfree (reply);
1670			return -EINVAL;
1671		}
1672
1673		for(i = 0; i < sg_count; i++) {
1674			int sg_size;
1675
1676			if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1677				printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i,  sg[i].flag_count);
1678				rcode = -EINVAL;
1679				goto cleanup;
1680			}
1681			sg_size = sg[i].flag_count & 0xffffff;
1682			/* Allocate memory for the transfer */
1683			p = (ulong)kmalloc(sg_size, GFP_KERNEL|ADDR32);
1684			if(p == 0) {
1685				printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1686						pHba->name,sg_size,i,sg_count);
1687				rcode = -ENOMEM;
1688				goto cleanup;
1689			}
1690			sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1691			/* Copy in the user's SG buffer if necessary */
1692			if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1693				// TODO 64bit fix
1694				if (copy_from_user((void*)p,(void*)sg[i].addr_bus, sg_size)) {
1695					printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1696					rcode = -EFAULT;
1697					goto cleanup;
1698				}
1699			}
1700			//TODO 64bit fix
1701			sg[i].addr_bus = (u32)virt_to_bus((void*)p);
1702		}
1703	}
1704
1705	do {
1706		spin_lock_irqsave(&io_request_lock, flags);
1707		// This state stops any new commands from enterring the
1708		// controller while processing the ioctl
1709//		pHba->state |= DPTI_STATE_IOCTL;
1710//		We can't set this now - The scsi subsystem sets host_blocked and
1711//		the queue empties and stops.  We need a way to restart the queue
1712		rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1713//		pHba->state &= ~DPTI_STATE_IOCTL;
1714		spin_unlock_irqrestore(&io_request_lock, flags);
1715	} while(rcode == -ETIMEDOUT);
1716
1717	if(rcode){
1718		goto cleanup;
1719	}
1720
1721	if(sg_offset) {
1722	/* Copy back the Scatter Gather buffers back to user space */
1723		u32 j;
1724		// TODO 64bit fix
1725		struct sg_simple_element* sg;
1726		int sg_size;
1727
1728		// re-acquire the original message to handle correctly the sg copy operation
1729		memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1730		// get user msg size in u32s
1731		if(get_user(size, &user_msg[0])){
1732			rcode = -EFAULT;
1733			goto cleanup;
1734		}
1735		size = size>>16;
1736		size *= 4;
1737		/* Copy in the user's I2O command */
1738		if (copy_from_user ((void*)msg, (void*)user_msg, size)) {
1739			rcode = -EFAULT;
1740			goto cleanup;
1741		}
1742		sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1743
1744		// TODO 64bit fix
1745		sg 	 = (struct sg_simple_element*)(msg + sg_offset);
1746		for (j = 0; j < sg_count; j++) {
1747			/* Copy out the SG list to user's buffer if necessary */
1748			if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1749				sg_size = sg[j].flag_count & 0xffffff;
1750				// TODO 64bit fix
1751				if (copy_to_user((void*)sg[j].addr_bus,(void*)sg_list[j], sg_size)) {
1752					printk(KERN_WARNING"%s: Could not copy %lx TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1753					rcode = -EFAULT;
1754					goto cleanup;
1755				}
1756			}
1757		}
1758	}
1759
1760	/* Copy back the reply to user space */
1761	if (reply_size) {
1762		// we wrote our own values for context - now restore the user supplied ones
1763		if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1764			printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1765			rcode = -EFAULT;
1766		}
1767		if(copy_to_user(user_reply, reply, reply_size)) {
1768			printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1769			rcode = -EFAULT;
1770		}
1771	}
1772
1773
1774cleanup:
1775	kfree (reply);
1776	while(sg_index) {
1777		if(sg_list[--sg_index]) {
1778			kfree((void*)(sg_list[sg_index]));
1779		}
1780	}
1781	return rcode;
1782}
1783
1784
1785/*
1786 * This routine returns information about the system.  This does not effect
1787 * any logic and if the info is wrong - it doesn't matter.
1788 */
1789
1790/* Get all the info we can not get from kernel services */
1791static int adpt_system_info(void *buffer)
1792{
1793	sysInfo_S si;
1794
1795	memset(&si, 0, sizeof(si));
1796
1797	si.osType = OS_LINUX;
1798	si.osMajorVersion = (u8) (LINUX_VERSION_CODE >> 16);
1799	si.osMinorVersion = (u8) (LINUX_VERSION_CODE >> 8 & 0x0ff);
1800	si.osRevision =     (u8) (LINUX_VERSION_CODE & 0x0ff);
1801	si.busType = SI_PCI_BUS;
1802	si.processorFamily = DPTI_sig.dsProcessorFamily;
1803
1804#if defined __i386__
1805	adpt_i386_info(&si);
1806#elif defined(__ia64__)
1807	adpt_ia64_info(&si);
1808#elif defined(__sparc__)
1809	adpt_sparc_info(&si);
1810#elif defined(__alpha__)
1811	adpt_alpha_info(&si);
1812#else
1813	si.processorType = 0xff ;
1814#endif
1815	if(copy_to_user(buffer, &si, sizeof(si))){
1816		printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1817		return -EFAULT;
1818	}
1819
1820	return 0;
1821}
1822
1823#if defined __ia64__
1824static void adpt_ia64_info(sysInfo_S* si)
1825{
1826	// This is all the info we need for now
1827	// We will add more info as our new
1828	// managmenent utility requires it
1829	si->processorType = PROC_IA64;
1830}
1831#endif
1832
1833
1834#if defined __sparc__
1835static void adpt_sparc_info(sysInfo_S* si)
1836{
1837	// This is all the info we need for now
1838	// We will add more info as our new
1839	// managmenent utility requires it
1840	si->processorType = PROC_ULTRASPARC;
1841}
1842#endif
1843
1844#if defined __alpha__
1845static void adpt_alpha_info(sysInfo_S* si)
1846{
1847	// This is all the info we need for now
1848	// We will add more info as our new
1849	// managmenent utility requires it
1850	si->processorType = PROC_ALPHA;
1851}
1852#endif
1853
1854#if defined __i386__
1855
1856static void adpt_i386_info(sysInfo_S* si)
1857{
1858	// This is all the info we need for now
1859	// We will add more info as our new
1860	// managmenent utility requires it
1861	switch (boot_cpu_data.x86) {
1862	case CPU_386:
1863		si->processorType = PROC_386;
1864		break;
1865	case CPU_486:
1866		si->processorType = PROC_486;
1867		break;
1868	case CPU_586:
1869		si->processorType = PROC_PENTIUM;
1870		break;
1871	default:  // Just in case
1872		si->processorType = PROC_PENTIUM;
1873		break;
1874	}
1875}
1876
1877#endif
1878
1879
1880static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
1881	      ulong arg)
1882{
1883	int minor;
1884	int error = 0;
1885	adpt_hba* pHba;
1886	ulong flags;
1887
1888	minor = MINOR(inode->i_rdev);
1889	if (minor >= DPTI_MAX_HBA){
1890		return -ENXIO;
1891	}
1892	down(&adpt_configuration_lock);
1893	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1894		if (pHba->unit == minor) {
1895			break;	/* found adapter */
1896		}
1897	}
1898	up(&adpt_configuration_lock);
1899	if(pHba == NULL){
1900		return -ENXIO;
1901	}
1902
1903	while((volatile u32) pHba->state & DPTI_STATE_RESET ) {
1904		set_task_state(current,TASK_UNINTERRUPTIBLE);
1905		schedule_timeout(2);
1906
1907	}
1908
1909	switch (cmd) {
1910	// TODO: handle 3 cases
1911	case DPT_SIGNATURE:
1912		if (copy_to_user((char*)arg, &DPTI_sig, sizeof(DPTI_sig))) {
1913			return -EFAULT;
1914		}
1915		break;
1916	case I2OUSRCMD:
1917		return	adpt_i2o_passthru(pHba,(u32*)arg);
1918		break;
1919
1920	case DPT_CTRLINFO:{
1921		drvrHBAinfo_S HbaInfo;
1922
1923#define FLG_OSD_PCI_VALID 0x0001
1924#define FLG_OSD_DMA	  0x0002
1925#define FLG_OSD_I2O	  0x0004
1926		memset(&HbaInfo, 0, sizeof(HbaInfo));
1927		HbaInfo.drvrHBAnum = pHba->unit;
1928		HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
1929		HbaInfo.blinkState = adpt_read_blink_led(pHba);
1930		HbaInfo.pciBusNum =  pHba->pDev->bus->number;
1931		HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
1932		HbaInfo.Interrupt = pHba->pDev->irq;
1933		HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
1934		if(copy_to_user((void *) arg, &HbaInfo, sizeof(HbaInfo))){
1935			printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
1936			return -EFAULT;
1937		}
1938		break;
1939		}
1940	case DPT_SYSINFO:
1941		return adpt_system_info((void*)arg);
1942		break;
1943	case DPT_BLINKLED:{
1944		u32 value;
1945		value = (u32)adpt_read_blink_led(pHba);
1946		if (copy_to_user((char*)arg, &value, sizeof(value))) {
1947			return -EFAULT;
1948		}
1949		break;
1950		}
1951	case I2ORESETCMD:
1952		spin_lock_irqsave(&io_request_lock, flags);
1953		adpt_hba_reset(pHba);
1954		spin_unlock_irqrestore(&io_request_lock, flags);
1955		break;
1956	case I2ORESCANCMD:
1957		adpt_rescan(pHba);
1958		break;
1959	case DPT_TARGET_BUSY & 0xFFFF:
1960	case DPT_TARGET_BUSY:
1961	{
1962		TARGET_BUSY_T busy;
1963		struct adpt_device* d;
1964
1965		if (copy_from_user((void*)&busy, (void*)arg, sizeof(TARGET_BUSY_T))) {
1966			return -EFAULT;
1967		}
1968
1969		d = adpt_find_device(pHba, busy.channel, busy.id, busy.lun);
1970		if(d == NULL){
1971			return -ENODEV;
1972		}
1973		busy.isBusy = ((d->pScsi_dev) && (0 != d->pScsi_dev->access_count)) ? 1 : 0;
1974		if (copy_to_user ((char*)arg, &busy, sizeof(busy))) {
1975			return -EFAULT;
1976		}
1977		break;
1978	}
1979	default:
1980		return -EINVAL;
1981	}
1982
1983	return error;
1984}
1985
1986
1987static void adpt_isr(int irq, void *dev_id, struct pt_regs *regs)
1988{
1989	Scsi_Cmnd* cmd;
1990	adpt_hba* pHba=NULL;
1991	u32 m;
1992	ulong reply;
1993	u32 status=0;
1994	u32 context;
1995	ulong flags = 0;
1996
1997	pHba = dev_id;
1998	if (pHba == NULL ){
1999		printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2000		return;
2001	}
2002	spin_lock_irqsave(&io_request_lock, flags);
2003	while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2004		m = readl(pHba->reply_port);
2005		if(m == EMPTY_QUEUE){
2006			// Try twice then give up
2007			rmb();
2008			m = readl(pHba->reply_port);
2009			if(m == EMPTY_QUEUE){
2010				// This really should not happen
2011				printk(KERN_ERR"dpti: Could not get reply frame\n");
2012				spin_unlock_irqrestore(&io_request_lock,flags);
2013				return;
2014			}
2015		}
2016		reply = (ulong)bus_to_virt(m);
2017
2018		if (readl(reply) & MSG_FAIL) {
2019			u32 old_m = readl(reply+28);
2020			ulong msg;
2021			u32 old_context;
2022			PDEBUG("%s: Failed message\n",pHba->name);
2023			if(old_m >= 0x100000){
2024				printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2025				writel(m,pHba->reply_port);
2026				continue;
2027			}
2028			// Transaction context is 0 in failed reply frame
2029			msg = (ulong)(pHba->msg_addr_virt + old_m);
2030			old_context = readl(msg+12);
2031			writel(old_context, reply+12);
2032			adpt_send_nop(pHba, old_m);
2033		}
2034		context = readl(reply+8);
2035		if(context & 0x40000000){ // IOCTL
2036			ulong p = (ulong)(readl(reply+12));
2037			if( p != 0) {
2038				memcpy((void*)p, (void*)reply, REPLY_FRAME_SIZE * 4);
2039			}
2040			// All IOCTLs will also be post wait
2041		}
2042		if(context & 0x80000000){ // Post wait message
2043			status = readl(reply+16);
2044			if(status  >> 24){
2045				status &=  0xffff; /* Get detail status */
2046			} else {
2047				status = I2O_POST_WAIT_OK;
2048			}
2049			if(!(context & 0x40000000)) {
2050				cmd = (Scsi_Cmnd*) readl(reply+12);
2051				if(cmd != NULL) {
2052					printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2053				}
2054			}
2055			adpt_i2o_post_wait_complete(context, status);
2056		} else { // SCSI message
2057			cmd = (Scsi_Cmnd*) readl(reply+12);
2058			if(cmd != NULL){
2059				if(cmd->serial_number != 0) { // If not timedout
2060					adpt_i2o_to_scsi(reply, cmd);
2061				}
2062			}
2063		}
2064		writel(m, pHba->reply_port);
2065		wmb();
2066		rmb();
2067	}
2068	spin_unlock_irqrestore(&io_request_lock, flags);
2069	return;
2070
2071}
2072
2073static s32 adpt_scsi_to_i2o(adpt_hba* pHba, Scsi_Cmnd* cmd, struct adpt_device* d)
2074{
2075	int i;
2076	u32 msg[MAX_MESSAGE_SIZE];
2077	u32* mptr;
2078	u32 *lenptr;
2079	int direction;
2080	int scsidir;
2081	u32 len;
2082	u32 reqlen;
2083	s32 rcode;
2084
2085	memset(msg, 0 , sizeof(msg));
2086	len = cmd->request_bufflen;
2087	direction = 0x00000000;
2088
2089	scsidir = 0x00000000;			// DATA NO XFER
2090	if(len) {
2091		/*
2092		 * Set SCBFlags to indicate if data is being transferred
2093		 * in or out, or no data transfer
2094		 * Note:  Do not have to verify index is less than 0 since
2095		 * cmd->cmnd[0] is an unsigned char
2096		 */
2097		switch(cmd->sc_data_direction){
2098		case SCSI_DATA_READ:
2099			scsidir  =0x40000000;	// DATA IN  (iop<--dev)
2100			break;
2101		case SCSI_DATA_WRITE:
2102			direction=0x04000000;	// SGL OUT
2103			scsidir  =0x80000000;	// DATA OUT (iop-->dev)
2104			break;
2105		case SCSI_DATA_NONE:
2106			break;
2107		case SCSI_DATA_UNKNOWN:
2108			scsidir  =0x40000000;	// DATA IN  (iop<--dev)
2109			// Assume In - and continue;
2110			break;
2111		default:
2112			printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2113			     pHba->name, cmd->cmnd[0]);
2114			cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2115			cmd->scsi_done(cmd);
2116			return 	0;
2117		}
2118	}
2119	// msg[0] is set later
2120	// I2O_CMD_SCSI_EXEC
2121	msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2122	msg[2] = 0;
2123	msg[3] = (u32)cmd;	/* We want the SCSI control block back */
2124	// Our cards use the transaction context as the tag for queueing
2125	// Adaptec/DPT Private stuff
2126	msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2127	msg[5] = d->tid;
2128	/* Direction, disconnect ok | sense data | simple queue , CDBLen */
2129	// I2O_SCB_FLAG_ENABLE_DISCONNECT |
2130	// I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2131	// I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2132	msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2133
2134	mptr=msg+7;
2135
2136	// Write SCSI command into the message - always 16 byte block
2137	memset(mptr, 0,  16);
2138	memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2139	mptr+=4;
2140	lenptr=mptr++;		/* Remember me - fill in when we know */
2141	reqlen = 14;		// SINGLE SGE
2142	/* Now fill in the SGList and command */
2143	if(cmd->use_sg) {
2144		struct scatterlist *sg = (struct scatterlist *)cmd->request_buffer;
2145		len = 0;
2146		for(i = 0 ; i < cmd->use_sg; i++) {
2147			*mptr++ = direction|0x10000000|sg->length;
2148			len+=sg->length;
2149			*mptr++ = virt_to_bus(sg->address);
2150			sg++;
2151		}
2152		/* Make this an end of list */
2153		mptr[-2] = direction|0xD0000000|(sg-1)->length;
2154		reqlen = mptr - msg;
2155		*lenptr = len;
2156
2157		if(cmd->underflow && len != cmd->underflow){
2158			printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2159				len, cmd->underflow);
2160		}
2161	} else {
2162		*lenptr = len = cmd->request_bufflen;
2163		if(len == 0) {
2164			reqlen = 12;
2165		} else {
2166			*mptr++ = 0xD0000000|direction|cmd->request_bufflen;
2167			*mptr++ = virt_to_bus(cmd->request_buffer);
2168		}
2169	}
2170
2171	/* Stick the headers on */
2172	msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2173
2174	// Send it on it's way
2175	rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2176	if (rcode == 0) {
2177		return 0;
2178	}
2179	return rcode;
2180}
2181
2182
2183static s32 adpt_scsi_register(adpt_hba* pHba,Scsi_Host_Template * sht)
2184{
2185	struct Scsi_Host *host = NULL;
2186
2187	host = scsi_register(sht, sizeof(adpt_hba*));
2188	if (host == NULL) {
2189		printk ("%s: scsi_register returned NULL\n",pHba->name);
2190		return -1;
2191	}
2192	(adpt_hba*)(host->hostdata[0]) = pHba;
2193	pHba->host = host;
2194
2195	host->irq = pHba->pDev->irq;;
2196	/* no IO ports, so don't have to set host->io_port and
2197	 * host->n_io_port
2198	 */
2199	host->io_port = 0;
2200	host->n_io_port = 0;
2201				/* see comments in hosts.h */
2202	host->max_id = 16;
2203	host->max_lun = 256;
2204	host->max_channel = pHba->top_scsi_channel + 1;
2205	host->cmd_per_lun = 256;
2206	host->unique_id = (uint) pHba;
2207	host->sg_tablesize = pHba->sg_tablesize;
2208	host->can_queue = pHba->post_fifo_size;
2209	host->select_queue_depths = adpt_select_queue_depths;
2210
2211	return 0;
2212}
2213
2214
2215static s32 adpt_i2o_to_scsi(ulong reply, Scsi_Cmnd* cmd)
2216{
2217	adpt_hba* pHba;
2218	u32 hba_status;
2219	u32 dev_status;
2220	u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2221	// I know this would look cleaner if I just read bytes
2222	// but the model I have been using for all the rest of the
2223	// io is in 4 byte words - so I keep that model
2224	u16 detailed_status = readl(reply+16) &0xffff;
2225	dev_status = (detailed_status & 0xff);
2226	hba_status = detailed_status >> 8;
2227
2228	// calculate resid for sg
2229	cmd->resid = cmd->request_bufflen - readl(reply+5);
2230
2231	pHba = (adpt_hba*) cmd->host->hostdata[0];
2232
2233	cmd->sense_buffer[0] = '\0';  // initialize sense valid flag to false
2234
2235	if(!(reply_flags & MSG_FAIL)) {
2236		switch(detailed_status & I2O_SCSI_DSC_MASK) {
2237		case I2O_SCSI_DSC_SUCCESS:
2238			cmd->result = (DID_OK << 16);
2239			// handle underflow
2240			if(readl(reply+5) < cmd->underflow ) {
2241				cmd->result = (DID_ERROR <<16);
2242				printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2243			}
2244			break;
2245		case I2O_SCSI_DSC_REQUEST_ABORTED:
2246			cmd->result = (DID_ABORT << 16);
2247			break;
2248		case I2O_SCSI_DSC_PATH_INVALID:
2249		case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2250		case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2251		case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2252		case I2O_SCSI_DSC_NO_ADAPTER:
2253		case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2254			printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2255				pHba->name, (u32)cmd->channel, (u32)cmd->target, (u32)cmd->lun, hba_status, dev_status, cmd->cmnd[0]);
2256			cmd->result = (DID_TIME_OUT << 16);
2257			break;
2258		case I2O_SCSI_DSC_ADAPTER_BUSY:
2259		case I2O_SCSI_DSC_BUS_BUSY:
2260			cmd->result = (DID_BUS_BUSY << 16);
2261			break;
2262		case I2O_SCSI_DSC_SCSI_BUS_RESET:
2263		case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2264			cmd->result = (DID_RESET << 16);
2265			break;
2266		case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2267			printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2268			cmd->result = (DID_PARITY << 16);
2269			break;
2270		case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2271		case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2272		case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2273		case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2274		case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2275		case I2O_SCSI_DSC_DATA_OVERRUN:
2276		case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2277		case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2278		case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2279		case I2O_SCSI_DSC_PROVIDE_FAILURE:
2280		case I2O_SCSI_DSC_REQUEST_TERMINATED:
2281		case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2282		case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2283		case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2284		case I2O_SCSI_DSC_INVALID_CDB:
2285		case I2O_SCSI_DSC_LUN_INVALID:
2286		case I2O_SCSI_DSC_SCSI_TID_INVALID:
2287		case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2288		case I2O_SCSI_DSC_NO_NEXUS:
2289		case I2O_SCSI_DSC_CDB_RECEIVED:
2290		case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2291		case I2O_SCSI_DSC_QUEUE_FROZEN:
2292		case I2O_SCSI_DSC_REQUEST_INVALID:
2293		default:
2294			printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2295				pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->channel, (u32)cmd->target, (u32)cmd-> lun,
2296			       hba_status, dev_status, cmd->cmnd[0]);
2297			cmd->result = (DID_ERROR << 16);
2298			break;
2299		}
2300
2301		// copy over the request sense data if it was a check
2302		// condition status
2303		if(dev_status == 0x02 /*CHECK_CONDITION*/) {
2304			u32 len = sizeof(cmd->sense_buffer);
2305			len = (len > 40) ?  40 : len;
2306			// Copy over the sense data
2307			memcpy(cmd->sense_buffer, (void*)(reply+28) , len);
2308			if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2309			   cmd->sense_buffer[2] == DATA_PROTECT ){
2310				/* This is to handle an array failed */
2311				cmd->result = (DID_TIME_OUT << 16);
2312				printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2313					pHba->name, (u32)cmd->channel, (u32)cmd->target, (u32)cmd->lun,
2314					hba_status, dev_status, cmd->cmnd[0]);
2315
2316			}
2317		}
2318	} else {
2319		/* In this condtion we could not talk to the tid
2320		 * the card rejected it.  We should signal a retry
2321		 * for a limitted number of retries.
2322		 */
2323		cmd->result = (DID_TIME_OUT << 16);
2324		printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
2325			pHba->name, (u32)cmd->channel, (u32)cmd->target, (u32)cmd-> lun,
2326			((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2327	}
2328
2329	cmd->result |= (dev_status);
2330
2331	if(cmd->scsi_done != NULL){
2332		cmd->scsi_done(cmd);
2333	}
2334	return cmd->result;
2335}
2336
2337
2338static s32 adpt_rescan(adpt_hba* pHba)
2339{
2340	s32 rcode;
2341	ulong flags;
2342
2343	spin_lock_irqsave(&io_request_lock, flags);
2344	if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
2345		spin_unlock_irqrestore(&io_request_lock, flags);
2346		return rcode;
2347	}
2348
2349	if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
2350		spin_unlock_irqrestore(&io_request_lock, flags);
2351		return rcode;
2352	}
2353	spin_unlock_irqrestore(&io_request_lock, flags);
2354	return 0;
2355}
2356
2357
2358static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2359{
2360	int i;
2361	int max;
2362	int tid;
2363	struct i2o_device *d;
2364	i2o_lct *lct = pHba->lct;
2365	u8 bus_no = 0;
2366	s16 scsi_id;
2367	s16 scsi_lun;
2368	u32 buf[10]; // at least 8 u32's
2369	struct adpt_device* pDev = NULL;
2370	struct i2o_device* pI2o_dev = NULL;
2371
2372	if (lct == NULL) {
2373		printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2374		return -1;
2375	}
2376
2377	max = lct->table_size;
2378	max -= 3;
2379	max /= 9;
2380
2381	// Mark each drive as unscanned
2382	for (d = pHba->devices; d; d = d->next) {
2383		pDev =(struct adpt_device*) d->owner;
2384		if(!pDev){
2385			continue;
2386		}
2387		pDev->state |= DPTI_DEV_UNSCANNED;
2388	}
2389
2390	printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2391
2392	for(i=0;i<max;i++) {
2393		if( lct->lct_entry[i].user_tid != 0xfff){
2394			continue;
2395		}
2396
2397		if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2398		    lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2399		    lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2400			tid = lct->lct_entry[i].tid;
2401			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2402				printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2403				continue;
2404			}
2405			bus_no = buf[0]>>16;
2406			scsi_id = buf[1];
2407			scsi_lun = (buf[2]>>8 )&0xff;
2408			pDev = pHba->channel[bus_no].device[scsi_id];
2409			/* da lun */
2410			while(pDev) {
2411				if(pDev->scsi_lun == scsi_lun) {
2412					break;
2413				}
2414				pDev = pDev->next_lun;
2415			}
2416			if(!pDev ) { // Something new add it
2417				d = (struct i2o_device *)kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
2418				if(d==NULL)
2419				{
2420					printk(KERN_CRIT "Out of memory for I2O device data.\n");
2421					return -ENOMEM;
2422				}
2423
2424				d->controller = (void*)pHba;
2425				d->next = NULL;
2426
2427				memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2428
2429				d->flags = 0;
2430				adpt_i2o_report_hba_unit(pHba, d);
2431				adpt_i2o_install_device(pHba, d);
2432
2433				if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
2434					printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
2435					continue;
2436				}
2437				pDev = pHba->channel[bus_no].device[scsi_id];
2438				if( pDev == NULL){
2439					pDev =  kmalloc(sizeof(struct adpt_device),GFP_KERNEL);
2440					if(pDev == NULL) {
2441						return -ENOMEM;
2442					}
2443					pHba->channel[bus_no].device[scsi_id] = pDev;
2444				} else {
2445					while (pDev->next_lun) {
2446						pDev = pDev->next_lun;
2447					}
2448					pDev = pDev->next_lun = kmalloc(sizeof(struct adpt_device),GFP_KERNEL);
2449					if(pDev == NULL) {
2450						return -ENOMEM;
2451					}
2452				}
2453				memset(pDev,0,sizeof(struct adpt_device));
2454				pDev->tid = d->lct_data.tid;
2455				pDev->scsi_channel = bus_no;
2456				pDev->scsi_id = scsi_id;
2457				pDev->scsi_lun = scsi_lun;
2458				pDev->pI2o_dev = d;
2459				d->owner = pDev;
2460				pDev->type = (buf[0])&0xff;
2461				pDev->flags = (buf[0]>>8)&0xff;
2462				// Too late, SCSI system has made up it's mind, but what the hey ...
2463				if(scsi_id > pHba->top_scsi_id){
2464					pHba->top_scsi_id = scsi_id;
2465				}
2466				if(scsi_lun > pHba->top_scsi_lun){
2467					pHba->top_scsi_lun = scsi_lun;
2468				}
2469				continue;
2470			} // end of new i2o device
2471
2472			// We found an old device - check it
2473			while(pDev) {
2474				if(pDev->scsi_lun == scsi_lun) {
2475					if(pDev->pScsi_dev->online == FALSE) {
2476						printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
2477								pHba->name,bus_no,scsi_id,scsi_lun);
2478						if (pDev->pScsi_dev) {
2479							pDev->pScsi_dev->online = TRUE;
2480						}
2481					}
2482					d = pDev->pI2o_dev;
2483					if(d->lct_data.tid != tid) { // something changed
2484						pDev->tid = tid;
2485						memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2486						if (pDev->pScsi_dev) {
2487							pDev->pScsi_dev->changed = TRUE;
2488							pDev->pScsi_dev->removable = TRUE;
2489						}
2490					}
2491					// Found it - mark it scanned
2492					pDev->state = DPTI_DEV_ONLINE;
2493					break;
2494				}
2495				pDev = pDev->next_lun;
2496			}
2497		}
2498	}
2499	for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2500		pDev =(struct adpt_device*) pI2o_dev->owner;
2501		if(!pDev){
2502			continue;
2503		}
2504		// Drive offline drives that previously existed but could not be found
2505		// in the LCT table
2506		if (pDev->state & DPTI_DEV_UNSCANNED){
2507			pDev->state = DPTI_DEV_OFFLINE;
2508			printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2509			if (pDev->pScsi_dev) {
2510				pDev->pScsi_dev->online = FALSE;
2511				if (pDev->pScsi_dev->access_count) {
2512					// A drive that was mounted is no longer there... bad!
2513					SCSI_LOG_ERROR_RECOVERY(1, printk ("%s:Rescan: Previously "
2514								 "mounted drive not found!\n",pHba->name));
2515					printk(KERN_WARNING"%s:Mounted drive taken offline\n",pHba->name);
2516				}
2517			}
2518		}
2519	}
2520	return 0;
2521}
2522
2523static void adpt_fail_posted_scbs(adpt_hba* pHba)
2524{
2525	Scsi_Cmnd* 	cmd = NULL;
2526	Scsi_Device* 	d = NULL;
2527
2528	if( pHba->host->host_queue != NULL ) {
2529		d = pHba->host->host_queue;
2530		if(!d){
2531			return;
2532		}
2533		while( d->next != NULL ){
2534			for(cmd = d->device_queue; cmd ; cmd = cmd->next){
2535				if(cmd->serial_number == 0){
2536					continue;
2537				}
2538				cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2539				cmd->scsi_done(cmd);
2540			}
2541			d = d->next;
2542		}
2543	}
2544}
2545
2546
2547/*============================================================================
2548 *  Routines from i2o subsystem
2549 *============================================================================
2550 */
2551
2552
2553
2554/*
2555 *	Bring an I2O controller into HOLD state. See the spec.
2556 */
2557static int adpt_i2o_activate_hba(adpt_hba* pHba)
2558{
2559	int rcode;
2560
2561	if(pHba->initialized ) {
2562		if (adpt_i2o_status_get(pHba) < 0) {
2563			if((rcode = adpt_i2o_reset_hba(pHba) != 0)){
2564				printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2565				return rcode;
2566			}
2567			if (adpt_i2o_status_get(pHba) < 0) {
2568				printk(KERN_INFO "HBA not responding.\n");
2569				return -1;
2570			}
2571		}
2572
2573		if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2574			printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2575			return -1;
2576		}
2577
2578		if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2579		    pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2580		    pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2581		    pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2582			adpt_i2o_reset_hba(pHba);
2583			if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2584				printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2585				return -1;
2586			}
2587		}
2588	} else {
2589		if((rcode = adpt_i2o_reset_hba(pHba) != 0)){
2590			printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2591			return rcode;
2592		}
2593
2594	}
2595
2596	if (adpt_i2o_init_outbound_q(pHba) < 0) {
2597		return -1;
2598	}
2599
2600	/* In HOLD state */
2601
2602	if (adpt_i2o_hrt_get(pHba) < 0) {
2603		return -1;
2604	}
2605
2606	return 0;
2607}
2608
2609/*
2610 *	Bring a controller online into OPERATIONAL state.
2611 */
2612
2613static int adpt_i2o_online_hba(adpt_hba* pHba)
2614{
2615	if (adpt_i2o_systab_send(pHba) < 0) {
2616		adpt_i2o_delete_hba(pHba);
2617		return -1;
2618	}
2619	/* In READY state */
2620
2621	if (adpt_i2o_enable_hba(pHba) < 0) {
2622		adpt_i2o_delete_hba(pHba);
2623		return -1;
2624	}
2625
2626	/* In OPERATIONAL state  */
2627	return 0;
2628}
2629
2630static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2631{
2632	u32 *msg;
2633	ulong timeout = jiffies + 5*HZ;
2634
2635	while(m == EMPTY_QUEUE){
2636		rmb();
2637		m = readl(pHba->post_port);
2638		if(m != EMPTY_QUEUE){
2639			break;
2640		}
2641		if(time_after(jiffies,timeout)){
2642			printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2643			return 2;
2644		}
2645	}
2646	msg = (u32*)(pHba->msg_addr_virt + m);
2647	writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2648	writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2649	writel( 0,&msg[2]);
2650	wmb();
2651
2652	writel(m, pHba->post_port);
2653	wmb();
2654	return 0;
2655}
2656
2657static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2658{
2659	u8 *status;
2660	u32 *msg = NULL;
2661	int i;
2662	ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2663	u32* ptr;
2664	u32 outbound_frame;  // This had to be a 32 bit address
2665	u32 m;
2666
2667	do {
2668		rmb();
2669		m = readl(pHba->post_port);
2670		if (m != EMPTY_QUEUE) {
2671			break;
2672		}
2673
2674		if(time_after(jiffies,timeout)){
2675			printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2676			return -ETIMEDOUT;
2677		}
2678	} while(m == EMPTY_QUEUE);
2679
2680	msg=(u32 *)(pHba->msg_addr_virt+m);
2681
2682	status = kmalloc(4,GFP_KERNEL|ADDR32);
2683	if (status==NULL) {
2684		adpt_send_nop(pHba, m);
2685		printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2686			pHba->name);
2687		return -ENOMEM;
2688	}
2689	memset(status, 0, 4);
2690
2691	writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2692	writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2693	writel(0, &msg[2]);
2694	writel(0x0106, &msg[3]);	/* Transaction context */
2695	writel(4096, &msg[4]);		/* Host page frame size */
2696	writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]);	/* Outbound msg frame size and Initcode */
2697	writel(0xD0000004, &msg[6]);		/* Simple SG LE, EOB */
2698	writel(virt_to_bus(status), &msg[7]);
2699
2700	writel(m, pHba->post_port);
2701	wmb();
2702
2703	// Wait for the reply status to come back
2704	do {
2705		if (*status) {
2706			if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2707				break;
2708			}
2709		}
2710		rmb();
2711		if(time_after(jiffies,timeout)){
2712			printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2713			kfree((void*)status);
2714			return -ETIMEDOUT;
2715		}
2716	} while (1);
2717
2718	// If the command was successful, fill the fifo with our reply
2719	// message packets
2720	if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2721		kfree((void*)status);
2722		return -2;
2723	}
2724	kfree((void*)status);
2725
2726	if(pHba->reply_pool != NULL){
2727		kfree(pHba->reply_pool);
2728	}
2729
2730	pHba->reply_pool = (u32*)kmalloc(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, GFP_KERNEL|ADDR32);
2731	if(!pHba->reply_pool){
2732		printk(KERN_ERR"%s: Could not allocate reply pool\n",pHba->name);
2733		return -1;
2734	}
2735	memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2736
2737	ptr = pHba->reply_pool;
2738	for(i = 0; i < pHba->reply_fifo_size; i++) {
2739		outbound_frame = (u32)virt_to_bus(ptr);
2740		writel(outbound_frame, pHba->reply_port);
2741		wmb();
2742		ptr +=  REPLY_FRAME_SIZE;
2743	}
2744	adpt_i2o_status_get(pHba);
2745	return 0;
2746}
2747
2748
2749/*
2750 * I2O System Table.  Contains information about
2751 * all the IOPs in the system.  Used to inform IOPs
2752 * about each other's existence.
2753 *
2754 * sys_tbl_ver is the CurrentChangeIndicator that is
2755 * used by IOPs to track changes.
2756 */
2757
2758
2759
2760static s32 adpt_i2o_status_get(adpt_hba* pHba)
2761{
2762	ulong timeout;
2763	u32 m;
2764	u32 *msg;
2765	u8 *status_block=NULL;
2766	ulong status_block_bus;
2767
2768	if(pHba->status_block == NULL) {
2769		pHba->status_block = (i2o_status_block*)
2770			kmalloc(sizeof(i2o_status_block),GFP_KERNEL|ADDR32);
2771		if(pHba->status_block == NULL) {
2772			printk(KERN_ERR
2773			"dpti%d: Get Status Block failed; Out of memory. \n",
2774			pHba->unit);
2775			return -ENOMEM;
2776		}
2777	}
2778	memset(pHba->status_block, 0, sizeof(i2o_status_block));
2779	status_block = (u8*)(pHba->status_block);
2780	status_block_bus = virt_to_bus(pHba->status_block);
2781	timeout = jiffies+TMOUT_GETSTATUS*HZ;
2782	do {
2783		rmb();
2784		m = readl(pHba->post_port);
2785		if (m != EMPTY_QUEUE) {
2786			break;
2787		}
2788		if(time_after(jiffies,timeout)){
2789			printk(KERN_ERR "%s: Timeout waiting for message !\n",
2790					pHba->name);
2791			return -ETIMEDOUT;
2792		}
2793	} while(m==EMPTY_QUEUE);
2794
2795
2796	msg=(u32*)(pHba->msg_addr_virt+m);
2797
2798	writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2799	writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2800	writel(1, &msg[2]);
2801	writel(0, &msg[3]);
2802	writel(0, &msg[4]);
2803	writel(0, &msg[5]);
2804	writel(((u32)status_block_bus)&0xffffffff, &msg[6]);
2805	writel(0, &msg[7]);
2806	writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2807
2808	//post message
2809	writel(m, pHba->post_port);
2810	wmb();
2811
2812	while(status_block[87]!=0xff){
2813		if(time_after(jiffies,timeout)){
2814			printk(KERN_ERR"dpti%d: Get status timeout.\n",
2815				pHba->unit);
2816			return -ETIMEDOUT;
2817		}
2818		rmb();
2819	}
2820
2821	// Set up our number of outbound and inbound messages
2822	pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2823	if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2824		pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2825	}
2826
2827	pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2828	if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2829		pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2830	}
2831
2832	// Calculate the Scatter Gather list size
2833	pHba->sg_tablesize = (pHba->status_block->inbound_frame_size * 4 -40)/ sizeof(struct sg_simple_element);
2834	if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2835		pHba->sg_tablesize = SG_LIST_ELEMENTS;
2836	}
2837
2838
2839#ifdef DEBUG
2840	printk("dpti%d: State = ",pHba->unit);
2841	switch(pHba->status_block->iop_state) {
2842		case 0x01:
2843			printk("INIT\n");
2844			break;
2845		case 0x02:
2846			printk("RESET\n");
2847			break;
2848		case 0x04:
2849			printk("HOLD\n");
2850			break;
2851		case 0x05:
2852			printk("READY\n");
2853			break;
2854		case 0x08:
2855			printk("OPERATIONAL\n");
2856			break;
2857		case 0x10:
2858			printk("FAILED\n");
2859			break;
2860		case 0x11:
2861			printk("FAULTED\n");
2862			break;
2863		default:
2864			printk("%x (unknown!!)\n",pHba->status_block->iop_state);
2865	}
2866#endif
2867	return 0;
2868}
2869
2870/*
2871 * Get the IOP's Logical Configuration Table
2872 */
2873static int adpt_i2o_lct_get(adpt_hba* pHba)
2874{
2875	u32 msg[8];
2876	int ret;
2877	u32 buf[16];
2878
2879	if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
2880		pHba->lct_size = pHba->status_block->expected_lct_size;
2881	}
2882	do {
2883		if (pHba->lct == NULL) {
2884			pHba->lct = kmalloc(pHba->lct_size, GFP_KERNEL|ADDR32);
2885			if(pHba->lct == NULL) {
2886				printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2887					pHba->name);
2888				return -ENOMEM;
2889			}
2890		}
2891		memset(pHba->lct, 0, pHba->lct_size);
2892
2893		msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
2894		msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
2895		msg[2] = 0;
2896		msg[3] = 0;
2897		msg[4] = 0xFFFFFFFF;	/* All devices */
2898		msg[5] = 0x00000000;	/* Report now */
2899		msg[6] = 0xD0000000|pHba->lct_size;
2900		msg[7] = virt_to_bus(pHba->lct);
2901
2902		if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
2903			printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
2904				pHba->name, ret);
2905			printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
2906			return ret;
2907		}
2908
2909		if ((pHba->lct->table_size << 2) > pHba->lct_size) {
2910			pHba->lct_size = pHba->lct->table_size << 2;
2911			kfree(pHba->lct);
2912			pHba->lct = NULL;
2913		}
2914	} while (pHba->lct == NULL);
2915
2916	PDEBUG("%s: Hardware resource table read.\n", pHba->name);
2917
2918
2919	// I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
2920	if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
2921		pHba->FwDebugBufferSize = buf[1];
2922		pHba->FwDebugBuffer_P    = pHba->base_addr_virt + buf[0];
2923		pHba->FwDebugFlags_P     = pHba->FwDebugBuffer_P + FW_DEBUG_FLAGS_OFFSET;
2924		pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P + FW_DEBUG_BLED_OFFSET;
2925		pHba->FwDebugBLEDflag_P  = pHba->FwDebugBLEDvalue_P + 1;
2926		pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P + FW_DEBUG_STR_LENGTH_OFFSET;
2927		pHba->FwDebugBuffer_P += buf[2];
2928		pHba->FwDebugFlags = 0;
2929	}
2930
2931	return 0;
2932}
2933
2934static int adpt_i2o_build_sys_table(void)
2935{
2936	adpt_hba* pHba = NULL;
2937	int count = 0;
2938
2939	sys_tbl_len = sizeof(struct i2o_sys_tbl) +	// Header + IOPs
2940				(hba_count) * sizeof(struct i2o_sys_tbl_entry);
2941
2942	if(sys_tbl)
2943		kfree(sys_tbl);
2944
2945	sys_tbl = kmalloc(sys_tbl_len, GFP_KERNEL|ADDR32);
2946	if(!sys_tbl) {
2947		printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
2948		return -ENOMEM;
2949	}
2950	memset(sys_tbl, 0, sys_tbl_len);
2951
2952	sys_tbl->num_entries = hba_count;
2953	sys_tbl->version = I2OVERSION;
2954	sys_tbl->change_ind = sys_tbl_ind++;
2955
2956	for(pHba = hba_chain; pHba; pHba = pHba->next) {
2957		// Get updated Status Block so we have the latest information
2958		if (adpt_i2o_status_get(pHba)) {
2959			sys_tbl->num_entries--;
2960			continue; // try next one
2961		}
2962
2963		sys_tbl->iops[count].org_id = pHba->status_block->org_id;
2964		sys_tbl->iops[count].iop_id = pHba->unit + 2;
2965		sys_tbl->iops[count].seg_num = 0;
2966		sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
2967		sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
2968		sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
2969		sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
2970		sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
2971		sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
2972		sys_tbl->iops[count].inbound_low = (u32)virt_to_bus((void*)pHba->post_port);
2973		sys_tbl->iops[count].inbound_high = (u32)((u64)virt_to_bus((void*)pHba->post_port)>>32);
2974
2975		count++;
2976	}
2977
2978#ifdef DEBUG
2979{
2980	u32 *table = (u32*)sys_tbl;
2981	printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
2982	for(count = 0; count < (sys_tbl_len >>2); count++) {
2983		printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
2984			count, table[count]);
2985	}
2986}
2987#endif
2988
2989	return 0;
2990}
2991
2992
2993/*
2994 *	 Dump the information block associated with a given unit (TID)
2995 */
2996
2997static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
2998{
2999	char buf[64];
3000	int unit = d->lct_data.tid;
3001
3002	printk(KERN_INFO "TID %3.3d ", unit);
3003
3004	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3005	{
3006		buf[16]=0;
3007		printk(" Vendor: %-12.12s", buf);
3008	}
3009	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3010	{
3011		buf[16]=0;
3012		printk(" Device: %-12.12s", buf);
3013	}
3014	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3015	{
3016		buf[8]=0;
3017		printk(" Rev: %-12.12s\n", buf);
3018	}
3019#ifdef DEBUG
3020	 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3021	 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3022	 printk(KERN_INFO "\tFlags: ");
3023
3024	 if(d->lct_data.device_flags&(1<<0))
3025		  printk("C");	     // ConfigDialog requested
3026	 if(d->lct_data.device_flags&(1<<1))
3027		  printk("U");	     // Multi-user capable
3028	 if(!(d->lct_data.device_flags&(1<<4)))
3029		  printk("P");	     // Peer service enabled!
3030	 if(!(d->lct_data.device_flags&(1<<5)))
3031		  printk("M");	     // Mgmt service enabled!
3032	 printk("\n");
3033#endif
3034}
3035
3036#ifdef DEBUG
3037/*
3038 *	Do i2o class name lookup
3039 */
3040static const char *adpt_i2o_get_class_name(int class)
3041{
3042	int idx = 16;
3043	static char *i2o_class_name[] = {
3044		"Executive",
3045		"Device Driver Module",
3046		"Block Device",
3047		"Tape Device",
3048		"LAN Interface",
3049		"WAN Interface",
3050		"Fibre Channel Port",
3051		"Fibre Channel Device",
3052		"SCSI Device",
3053		"ATE Port",
3054		"ATE Device",
3055		"Floppy Controller",
3056		"Floppy Device",
3057		"Secondary Bus Port",
3058		"Peer Transport Agent",
3059		"Peer Transport",
3060		"Unknown"
3061	};
3062
3063	switch(class&0xFFF) {
3064	case I2O_CLASS_EXECUTIVE:
3065		idx = 0; break;
3066	case I2O_CLASS_DDM:
3067		idx = 1; break;
3068	case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3069		idx = 2; break;
3070	case I2O_CLASS_SEQUENTIAL_STORAGE:
3071		idx = 3; break;
3072	case I2O_CLASS_LAN:
3073		idx = 4; break;
3074	case I2O_CLASS_WAN:
3075		idx = 5; break;
3076	case I2O_CLASS_FIBRE_CHANNEL_PORT:
3077		idx = 6; break;
3078	case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3079		idx = 7; break;
3080	case I2O_CLASS_SCSI_PERIPHERAL:
3081		idx = 8; break;
3082	case I2O_CLASS_ATE_PORT:
3083		idx = 9; break;
3084	case I2O_CLASS_ATE_PERIPHERAL:
3085		idx = 10; break;
3086	case I2O_CLASS_FLOPPY_CONTROLLER:
3087		idx = 11; break;
3088	case I2O_CLASS_FLOPPY_DEVICE:
3089		idx = 12; break;
3090	case I2O_CLASS_BUS_ADAPTER_PORT:
3091		idx = 13; break;
3092	case I2O_CLASS_PEER_TRANSPORT_AGENT:
3093		idx = 14; break;
3094	case I2O_CLASS_PEER_TRANSPORT:
3095		idx = 15; break;
3096	}
3097	return i2o_class_name[idx];
3098}
3099#endif
3100
3101
3102static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3103{
3104	u32 msg[6];
3105	int ret, size = sizeof(i2o_hrt);
3106
3107	do {
3108		if (pHba->hrt == NULL) {
3109			pHba->hrt=kmalloc(size, GFP_KERNEL|ADDR32);
3110			if (pHba->hrt == NULL) {
3111				printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3112				return -ENOMEM;
3113			}
3114		}
3115
3116		msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3117		msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3118		msg[2]= 0;
3119		msg[3]= 0;
3120		msg[4]= (0xD0000000 | size);    /* Simple transaction */
3121		msg[5]= virt_to_bus(pHba->hrt);   /* Dump it here */
3122
3123		if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3124			printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3125			return ret;
3126		}
3127
3128		if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3129			size = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3130			kfree(pHba->hrt);
3131			pHba->hrt = NULL;
3132		}
3133	} while(pHba->hrt == NULL);
3134	return 0;
3135}
3136
3137/*
3138 *	 Query one scalar group value or a whole scalar group.
3139 */
3140static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3141			int group, int field, void *buf, int buflen)
3142{
3143	u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3144	u8  resblk[8+buflen]; /* 8 bytes for header */
3145	int size;
3146
3147	if (field == -1)  		/* whole group */
3148			opblk[4] = -1;
3149
3150	size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3151		opblk, sizeof(opblk), resblk, sizeof(resblk));
3152
3153	memcpy(buf, resblk+8, buflen);  /* cut off header */
3154
3155	if (size < 0)
3156		return size;
3157
3158	return buflen;
3159}
3160
3161
3162/*	Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3163 *
3164 *	This function can be used for all UtilParamsGet/Set operations.
3165 *	The OperationBlock is given in opblk-buffer,
3166 *	and results are returned in resblk-buffer.
3167 *	Note that the minimum sized resblk is 8 bytes and contains
3168 *	ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3169 */
3170static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3171		  void *opblk, int oplen, void *resblk, int reslen)
3172{
3173	u32 msg[9];
3174	u32 *res = (u32 *)resblk;
3175	int wait_status;
3176
3177	msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3178	msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3179	msg[2] = 0;
3180	msg[3] = 0;
3181	msg[4] = 0;
3182	msg[5] = 0x54000000 | oplen;	/* OperationBlock */
3183	msg[6] = virt_to_bus(opblk);
3184	msg[7] = 0xD0000000 | reslen;	/* ResultBlock */
3185	msg[8] = virt_to_bus(resblk);
3186
3187	if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3188   		return wait_status; 	/* -DetailedStatus */
3189	}
3190
3191	if (res[1]&0x00FF0000) { 	/* BlockStatus != SUCCESS */
3192		printk(KERN_WARNING "%s: %s - Error:\n  ErrorInfoSize = 0x%02x, "
3193			"BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3194			pHba->name,
3195			(cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3196							 : "PARAMS_GET",
3197			res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3198		return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3199	}
3200
3201	 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3202}
3203
3204
3205static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3206{
3207	u32 msg[4];
3208	int ret;
3209
3210	adpt_i2o_status_get(pHba);
3211
3212	/* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3213
3214	if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3215   	   (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3216		return 0;
3217	}
3218
3219	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3220	msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3221	msg[2] = 0;
3222	msg[3] = 0;
3223
3224	if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3225		printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3226				pHba->unit, -ret);
3227	} else {
3228		printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3229	}
3230
3231	adpt_i2o_status_get(pHba);
3232	return ret;
3233}
3234
3235
3236/*
3237 * Enable IOP. Allows the IOP to resume external operations.
3238 */
3239static int adpt_i2o_enable_hba(adpt_hba* pHba)
3240{
3241	u32 msg[4];
3242	int ret;
3243
3244	adpt_i2o_status_get(pHba);
3245	if(!pHba->status_block){
3246		return -ENOMEM;
3247	}
3248	/* Enable only allowed on READY state */
3249	if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3250		return 0;
3251
3252	if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3253		return -EINVAL;
3254
3255	msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3256	msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3257	msg[2]= 0;
3258	msg[3]= 0;
3259
3260	if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3261		printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3262			pHba->name, ret);
3263	} else {
3264		PDEBUG("%s: Enabled.\n", pHba->name);
3265	}
3266
3267	adpt_i2o_status_get(pHba);
3268	return ret;
3269}
3270
3271
3272static int adpt_i2o_systab_send(adpt_hba* pHba)
3273{
3274	 u32 msg[12];
3275	 int ret;
3276
3277	msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3278	msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3279	msg[2] = 0;
3280	msg[3] = 0;
3281	msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3282	msg[5] = 0;				   /* Segment 0 */
3283
3284	/*
3285	 * Provide three SGL-elements:
3286	 * System table (SysTab), Private memory space declaration and
3287	 * Private i/o space declaration
3288	 */
3289	msg[6] = 0x54000000 | sys_tbl_len;
3290	msg[7] = virt_to_phys(sys_tbl);
3291	msg[8] = 0x54000000 | 0;
3292	msg[9] = 0;
3293	msg[10] = 0xD4000000 | 0;
3294	msg[11] = 0;
3295
3296	if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3297		printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3298			pHba->name, ret);
3299	}
3300#ifdef DEBUG
3301	else {
3302		PINFO("%s: SysTab set.\n", pHba->name);
3303	}
3304#endif
3305
3306	return ret;
3307 }
3308
3309
3310/*============================================================================
3311 *
3312 *============================================================================
3313 */
3314
3315
3316#ifdef UARTDELAY
3317
3318static static void adpt_delay(int millisec)
3319{
3320	int i;
3321	for (i = 0; i < millisec; i++) {
3322		udelay(1000);	/* delay for one millisecond */
3323	}
3324}
3325
3326#endif
3327
3328static Scsi_Host_Template driver_template = DPT_I2O;
3329#include "scsi_module.c"
3330EXPORT_NO_SYMBOLS;
3331MODULE_LICENSE("GPL");
3332