1/*
2   3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
3
4   Written By: Adam Radford <aradford@gmail.com>
5   Modifications By: Tom Couch
6
7   Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
8   Copyright (C) 2010 LSI Corporation.
9
10   This program is free software; you can redistribute it and/or modify
11   it under the terms of the GNU General Public License as published by
12   the Free Software Foundation; version 2 of the License.
13
14   This program is distributed in the hope that it will be useful,
15   but WITHOUT ANY WARRANTY; without even the implied warranty of
16   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17   GNU General Public License for more details.
18
19   NO WARRANTY
20   THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21   CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22   LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24   solely responsible for determining the appropriateness of using and
25   distributing the Program and assumes all risks associated with its
26   exercise of rights under this Agreement, including but not limited to
27   the risks and costs of program errors, damage to or loss of data,
28   programs or equipment, and unavailability or interruption of operations.
29
30   DISCLAIMER OF LIABILITY
31   NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32   DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33   DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34   ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35   TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36   USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37   HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38
39   You should have received a copy of the GNU General Public License
40   along with this program; if not, write to the Free Software
41   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
42
43   Bugs/Comments/Suggestions should be mailed to:
44   aradford@gmail.com
45
46   Note: This version of the driver does not contain a bundled firmware
47         image.
48
49   History
50   -------
51   2.26.02.000 - Driver cleanup for kernel submission.
52   2.26.02.001 - Replace schedule_timeout() calls with msleep().
53   2.26.02.002 - Add support for PAE mode.
54                 Add lun support.
55                 Fix twa_remove() to free irq handler/unregister_chrdev()
56                 before shutting down card.
57                 Change to new 'change_queue_depth' api.
58                 Fix 'handled=1' ISR usage, remove bogus IRQ check.
59                 Remove un-needed eh_abort handler.
60                 Add support for embedded firmware error strings.
61   2.26.02.003 - Correctly handle single sgl's with use_sg=1.
62   2.26.02.004 - Add support for 9550SX controllers.
63   2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
64   2.26.02.006 - Fix 9550SX pchip reset timeout.
65                 Add big endian support.
66   2.26.02.007 - Disable local interrupts during kmap/unmap_atomic().
67   2.26.02.008 - Free irq handler in __twa_shutdown().
68                 Serialize reset code.
69                 Add support for 9650SE controllers.
70   2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
71   2.26.02.010 - Add support for 9690SA controllers.
72   2.26.02.011 - Increase max AENs drained to 256.
73                 Add MSI support and "use_msi" module parameter.
74                 Fix bug in twa_get_param() on 4GB+.
75                 Use pci_resource_len() for ioremap().
76   2.26.02.012 - Add power management support.
77   2.26.02.013 - Fix bug in twa_load_sgl().
78   2.26.02.014 - Force 60 second timeout default.
79*/
80
81#include <linux/module.h>
82#include <linux/reboot.h>
83#include <linux/spinlock.h>
84#include <linux/interrupt.h>
85#include <linux/moduleparam.h>
86#include <linux/errno.h>
87#include <linux/types.h>
88#include <linux/delay.h>
89#include <linux/pci.h>
90#include <linux/time.h>
91#include <linux/mutex.h>
92#include <linux/slab.h>
93#include <asm/io.h>
94#include <asm/irq.h>
95#include <linux/uaccess.h>
96#include <scsi/scsi.h>
97#include <scsi/scsi_host.h>
98#include <scsi/scsi_tcq.h>
99#include <scsi/scsi_cmnd.h>
100#include "3w-9xxx.h"
101
102/* Globals */
103#define TW_DRIVER_VERSION "2.26.02.014"
104static DEFINE_MUTEX(twa_chrdev_mutex);
105static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
106static unsigned int twa_device_extension_count;
107static int twa_major = -1;
108extern struct timezone sys_tz;
109
110/* Module parameters */
111MODULE_AUTHOR ("LSI");
112MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
113MODULE_LICENSE("GPL");
114MODULE_VERSION(TW_DRIVER_VERSION);
115
116static int use_msi = 0;
117module_param(use_msi, int, S_IRUGO);
118MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts.  Default: 0");
119
120/* Function prototypes */
121static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
122static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
123static char *twa_aen_severity_lookup(unsigned char severity_code);
124static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
125static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
126static int twa_chrdev_open(struct inode *inode, struct file *file);
127static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
128static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
129static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
130static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
131			      u32 set_features, unsigned short current_fw_srl,
132			      unsigned short current_fw_arch_id,
133			      unsigned short current_fw_branch,
134			      unsigned short current_fw_build,
135			      unsigned short *fw_on_ctlr_srl,
136			      unsigned short *fw_on_ctlr_arch_id,
137			      unsigned short *fw_on_ctlr_branch,
138			      unsigned short *fw_on_ctlr_build,
139			      u32 *init_connect_result);
140static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
141static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
142static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
143static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
144static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
145static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
146static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
147				   unsigned char *cdb, int use_sg,
148				   TW_SG_Entry *sglistarg);
149static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
150static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
151
152/* Functions */
153
154/* Show some statistics about the card */
155static ssize_t twa_show_stats(struct device *dev,
156			      struct device_attribute *attr, char *buf)
157{
158	struct Scsi_Host *host = class_to_shost(dev);
159	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
160	unsigned long flags = 0;
161	ssize_t len;
162
163	spin_lock_irqsave(tw_dev->host->host_lock, flags);
164	len = sysfs_emit(buf, "3w-9xxx Driver version: %s\n"
165			 "Current commands posted:   %4d\n"
166			 "Max commands posted:       %4d\n"
167			 "Current pending commands:  %4d\n"
168			 "Max pending commands:      %4d\n"
169			 "Last sgl length:           %4d\n"
170			 "Max sgl length:            %4d\n"
171			 "Last sector count:         %4d\n"
172			 "Max sector count:          %4d\n"
173			 "SCSI Host Resets:          %4d\n"
174			 "AEN's:                     %4d\n",
175			 TW_DRIVER_VERSION,
176			 tw_dev->posted_request_count,
177			 tw_dev->max_posted_request_count,
178			 tw_dev->pending_request_count,
179			 tw_dev->max_pending_request_count,
180			 tw_dev->sgl_entries,
181			 tw_dev->max_sgl_entries,
182			 tw_dev->sector_count,
183			 tw_dev->max_sector_count,
184			 tw_dev->num_resets,
185			 tw_dev->aen_count);
186	spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
187	return len;
188} /* End twa_show_stats() */
189
190/* Create sysfs 'stats' entry */
191static struct device_attribute twa_host_stats_attr = {
192	.attr = {
193		.name =		"stats",
194		.mode =		S_IRUGO,
195	},
196	.show = twa_show_stats
197};
198
199/* Host attributes initializer */
200static struct attribute *twa_host_attrs[] = {
201	&twa_host_stats_attr.attr,
202	NULL,
203};
204
205ATTRIBUTE_GROUPS(twa_host);
206
207/* File operations struct for character device */
208static const struct file_operations twa_fops = {
209	.owner		= THIS_MODULE,
210	.unlocked_ioctl	= twa_chrdev_ioctl,
211	.open		= twa_chrdev_open,
212	.release	= NULL,
213	.llseek		= noop_llseek,
214};
215
216/*
217 * The controllers use an inline buffer instead of a mapped SGL for small,
218 * single entry buffers.  Note that we treat a zero-length transfer like
219 * a mapped SGL.
220 */
221static bool twa_command_mapped(struct scsi_cmnd *cmd)
222{
223	return scsi_sg_count(cmd) != 1 ||
224		scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
225}
226
227/* This function will complete an aen request from the isr */
228static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
229{
230	TW_Command_Full *full_command_packet;
231	TW_Command *command_packet;
232	TW_Command_Apache_Header *header;
233	unsigned short aen;
234	int retval = 1;
235
236	header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
237	tw_dev->posted_request_count--;
238	aen = le16_to_cpu(header->status_block.error);
239	full_command_packet = tw_dev->command_packet_virt[request_id];
240	command_packet = &full_command_packet->command.oldcommand;
241
242	/* First check for internal completion of set param for time sync */
243	if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
244		/* Keep reading the queue in case there are more aen's */
245		if (twa_aen_read_queue(tw_dev, request_id))
246			goto out2;
247		else {
248			retval = 0;
249			goto out;
250		}
251	}
252
253	switch (aen) {
254	case TW_AEN_QUEUE_EMPTY:
255		/* Quit reading the queue if this is the last one */
256		break;
257	case TW_AEN_SYNC_TIME_WITH_HOST:
258		twa_aen_sync_time(tw_dev, request_id);
259		retval = 0;
260		goto out;
261	default:
262		twa_aen_queue_event(tw_dev, header);
263
264		/* If there are more aen's, keep reading the queue */
265		if (twa_aen_read_queue(tw_dev, request_id))
266			goto out2;
267		else {
268			retval = 0;
269			goto out;
270		}
271	}
272	retval = 0;
273out2:
274	tw_dev->state[request_id] = TW_S_COMPLETED;
275	twa_free_request_id(tw_dev, request_id);
276	clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
277out:
278	return retval;
279} /* End twa_aen_complete() */
280
281/* This function will drain aen queue */
282static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
283{
284	int request_id = 0;
285	unsigned char cdb[TW_MAX_CDB_LEN];
286	TW_SG_Entry sglist[1];
287	int finished = 0, count = 0;
288	TW_Command_Full *full_command_packet;
289	TW_Command_Apache_Header *header;
290	unsigned short aen;
291	int first_reset = 0, queue = 0, retval = 1;
292
293	if (no_check_reset)
294		first_reset = 0;
295	else
296		first_reset = 1;
297
298	full_command_packet = tw_dev->command_packet_virt[request_id];
299	memset(full_command_packet, 0, sizeof(TW_Command_Full));
300
301	/* Initialize cdb */
302	memset(&cdb, 0, TW_MAX_CDB_LEN);
303	cdb[0] = REQUEST_SENSE; /* opcode */
304	cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
305
306	/* Initialize sglist */
307	memset(&sglist, 0, sizeof(TW_SG_Entry));
308	sglist[0].length = cpu_to_le32(TW_SECTOR_SIZE);
309	sglist[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
310
311	if (tw_dev->generic_buffer_phys[request_id] & TW_ALIGNMENT_9000_SGL) {
312		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
313		goto out;
314	}
315
316	/* Mark internal command */
317	tw_dev->srb[request_id] = NULL;
318
319	do {
320		/* Send command to the board */
321		if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
322			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
323			goto out;
324		}
325
326		/* Now poll for completion */
327		if (twa_poll_response(tw_dev, request_id, 30)) {
328			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
329			tw_dev->posted_request_count--;
330			goto out;
331		}
332
333		tw_dev->posted_request_count--;
334		header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
335		aen = le16_to_cpu(header->status_block.error);
336		queue = 0;
337		count++;
338
339		switch (aen) {
340		case TW_AEN_QUEUE_EMPTY:
341			if (first_reset != 1)
342				goto out;
343			else
344				finished = 1;
345			break;
346		case TW_AEN_SOFT_RESET:
347			if (first_reset == 0)
348				first_reset = 1;
349			else
350				queue = 1;
351			break;
352		case TW_AEN_SYNC_TIME_WITH_HOST:
353			break;
354		default:
355			queue = 1;
356		}
357
358		/* Now queue an event info */
359		if (queue)
360			twa_aen_queue_event(tw_dev, header);
361	} while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
362
363	if (count == TW_MAX_AEN_DRAIN)
364		goto out;
365
366	retval = 0;
367out:
368	tw_dev->state[request_id] = TW_S_INITIAL;
369	return retval;
370} /* End twa_aen_drain_queue() */
371
372/* This function will queue an event */
373static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
374{
375	u32 local_time;
376	TW_Event *event;
377	unsigned short aen;
378	char host[16];
379	char *error_str;
380
381	tw_dev->aen_count++;
382
383	/* Fill out event info */
384	event = tw_dev->event_queue[tw_dev->error_index];
385
386	/* Check for clobber */
387	host[0] = '\0';
388	if (tw_dev->host) {
389		sprintf(host, " scsi%d:", tw_dev->host->host_no);
390		if (event->retrieved == TW_AEN_NOT_RETRIEVED)
391			tw_dev->aen_clobber = 1;
392	}
393
394	aen = le16_to_cpu(header->status_block.error);
395	memset(event, 0, sizeof(TW_Event));
396
397	event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
398	/* event->time_stamp_sec overflows in y2106 */
399	local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
400	event->time_stamp_sec = local_time;
401	event->aen_code = aen;
402	event->retrieved = TW_AEN_NOT_RETRIEVED;
403	event->sequence_id = tw_dev->error_sequence_id;
404	tw_dev->error_sequence_id++;
405
406	/* Check for embedded error string */
407	error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
408
409	header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
410	event->parameter_len = strlen(header->err_specific_desc);
411	memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
412	if (event->severity != TW_AEN_SEVERITY_DEBUG)
413		printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
414		       host,
415		       twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
416		       TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen,
417		       error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
418		       header->err_specific_desc);
419	else
420		tw_dev->aen_count--;
421
422	if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
423		tw_dev->event_queue_wrapped = 1;
424	tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
425} /* End twa_aen_queue_event() */
426
427/* This function will read the aen queue from the isr */
428static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
429{
430	unsigned char cdb[TW_MAX_CDB_LEN];
431	TW_SG_Entry sglist[1];
432	TW_Command_Full *full_command_packet;
433	int retval = 1;
434
435	full_command_packet = tw_dev->command_packet_virt[request_id];
436	memset(full_command_packet, 0, sizeof(TW_Command_Full));
437
438	/* Initialize cdb */
439	memset(&cdb, 0, TW_MAX_CDB_LEN);
440	cdb[0] = REQUEST_SENSE; /* opcode */
441	cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
442
443	/* Initialize sglist */
444	memset(&sglist, 0, sizeof(TW_SG_Entry));
445	sglist[0].length = cpu_to_le32(TW_SECTOR_SIZE);
446	sglist[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
447
448	/* Mark internal command */
449	tw_dev->srb[request_id] = NULL;
450
451	/* Now post the command packet */
452	if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
453		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
454		goto out;
455	}
456	retval = 0;
457out:
458	return retval;
459} /* End twa_aen_read_queue() */
460
461/* This function will look up an AEN severity string */
462static char *twa_aen_severity_lookup(unsigned char severity_code)
463{
464	char *retval = NULL;
465
466	if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
467	    (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
468		goto out;
469
470	retval = twa_aen_severity_table[severity_code];
471out:
472	return retval;
473} /* End twa_aen_severity_lookup() */
474
475/* This function will sync firmware time with the host time */
476static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
477{
478	u32 schedulertime;
479	TW_Command_Full *full_command_packet;
480	TW_Command *command_packet;
481	TW_Param_Apache *param;
482	time64_t local_time;
483
484	/* Fill out the command packet */
485	full_command_packet = tw_dev->command_packet_virt[request_id];
486	memset(full_command_packet, 0, sizeof(TW_Command_Full));
487	command_packet = &full_command_packet->command.oldcommand;
488	command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
489	command_packet->request_id = request_id;
490	command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
491	command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
492	command_packet->size = TW_COMMAND_SIZE;
493	command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
494
495	/* Setup the param */
496	param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
497	memset(param, 0, TW_SECTOR_SIZE);
498	param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
499	param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
500	param->parameter_size_bytes = cpu_to_le16(4);
501
502	/* Convert system time in UTC to local time seconds since last
503           Sunday 12:00AM */
504	local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
505	div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
506
507	memcpy(param->data, &(__le32){cpu_to_le32(schedulertime)}, sizeof(__le32));
508
509	/* Mark internal command */
510	tw_dev->srb[request_id] = NULL;
511
512	/* Now post the command */
513	twa_post_command_packet(tw_dev, request_id, 1);
514} /* End twa_aen_sync_time() */
515
516/* This function will allocate memory and check if it is correctly aligned */
517static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
518{
519	int i;
520	dma_addr_t dma_handle;
521	unsigned long *cpu_addr;
522	int retval = 1;
523
524	cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
525			size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
526	if (!cpu_addr) {
527		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
528		goto out;
529	}
530
531	if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
532		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
533		dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH,
534				cpu_addr, dma_handle);
535		goto out;
536	}
537
538	memset(cpu_addr, 0, size*TW_Q_LENGTH);
539
540	for (i = 0; i < TW_Q_LENGTH; i++) {
541		switch(which) {
542		case 0:
543			tw_dev->command_packet_phys[i] = dma_handle+(i*size);
544			tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
545			break;
546		case 1:
547			tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
548			tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
549			break;
550		}
551	}
552	retval = 0;
553out:
554	return retval;
555} /* End twa_allocate_memory() */
556
557/* This function will check the status register for unexpected bits */
558static int twa_check_bits(u32 status_reg_value)
559{
560	int retval = 1;
561
562	if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
563		goto out;
564	if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
565		goto out;
566
567	retval = 0;
568out:
569	return retval;
570} /* End twa_check_bits() */
571
572/* This function will check the srl and decide if we are compatible  */
573static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
574{
575	int retval = 1;
576	unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
577	unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
578	u32 init_connect_result = 0;
579
580	if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
581			       TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
582			       TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
583			       TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
584			       &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
585			       &fw_on_ctlr_build, &init_connect_result)) {
586		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
587		goto out;
588	}
589
590	tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
591	tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
592	tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
593
594	/* Try base mode compatibility */
595	if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
596		if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
597				       TW_EXTENDED_INIT_CONNECT,
598				       TW_BASE_FW_SRL, TW_9000_ARCH_ID,
599				       TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD,
600				       &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
601				       &fw_on_ctlr_branch, &fw_on_ctlr_build,
602				       &init_connect_result)) {
603			TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
604			goto out;
605		}
606		if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
607			if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
608				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
609			} else {
610				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
611			}
612			goto out;
613		}
614		tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL;
615		tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH;
616		tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD;
617	}
618
619	/* Load rest of compatibility struct */
620	strscpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION,
621		sizeof(tw_dev->tw_compat_info.driver_version));
622	tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
623	tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
624	tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
625	tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
626	tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
627	tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
628	tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
629	tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
630	tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
631
632	retval = 0;
633out:
634	return retval;
635} /* End twa_check_srl() */
636
637/* This function handles ioctl for the character device */
638static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
639{
640	struct inode *inode = file_inode(file);
641	long timeout;
642	unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
643	dma_addr_t dma_handle;
644	int request_id = 0;
645	unsigned int sequence_id = 0;
646	unsigned char event_index, start_index;
647	TW_Ioctl_Driver_Command driver_command;
648	TW_Ioctl_Buf_Apache *tw_ioctl;
649	TW_Lock *tw_lock;
650	TW_Command_Full *full_command_packet;
651	TW_Compatibility_Info *tw_compat_info;
652	TW_Event *event;
653	ktime_t current_time;
654	TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
655	int retval = TW_IOCTL_ERROR_OS_EFAULT;
656	void __user *argp = (void __user *)arg;
657
658	mutex_lock(&twa_chrdev_mutex);
659
660	/* Only let one of these through at a time */
661	if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
662		retval = TW_IOCTL_ERROR_OS_EINTR;
663		goto out;
664	}
665
666	/* First copy down the driver command */
667	if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
668		goto out2;
669
670	/* Check data buffer size */
671	if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
672		retval = TW_IOCTL_ERROR_OS_EINVAL;
673		goto out2;
674	}
675
676	/* Hardware can only do multiple of 512 byte transfers */
677	data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
678
679	/* Now allocate ioctl buf memory */
680	cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
681				      sizeof(TW_Ioctl_Buf_Apache) + data_buffer_length_adjusted,
682				      &dma_handle, GFP_KERNEL);
683	if (!cpu_addr) {
684		retval = TW_IOCTL_ERROR_OS_ENOMEM;
685		goto out2;
686	}
687
688	tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
689
690	/* Now copy down the entire ioctl */
691	if (copy_from_user(tw_ioctl, argp, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length))
692		goto out3;
693
694	/* See which ioctl we are doing */
695	switch (cmd) {
696	case TW_IOCTL_FIRMWARE_PASS_THROUGH:
697		spin_lock_irqsave(tw_dev->host->host_lock, flags);
698		twa_get_request_id(tw_dev, &request_id);
699
700		/* Flag internal command */
701		tw_dev->srb[request_id] = NULL;
702
703		/* Flag chrdev ioctl */
704		tw_dev->chrdev_request_id = request_id;
705
706		full_command_packet = &tw_ioctl->firmware_command;
707
708		/* Load request id and sglist for both command types */
709		twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
710
711		memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
712
713		/* Now post the command packet to the controller */
714		twa_post_command_packet(tw_dev, request_id, 1);
715		spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
716
717		timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
718
719		/* Now wait for command to complete */
720		timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
721
722		/* We timed out, and didn't get an interrupt */
723		if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
724			/* Now we need to reset the board */
725			printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
726			       tw_dev->host->host_no, TW_DRIVER, 0x37,
727			       cmd);
728			retval = TW_IOCTL_ERROR_OS_EIO;
729			twa_reset_device_extension(tw_dev);
730			goto out3;
731		}
732
733		/* Now copy in the command packet response */
734		memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
735
736		/* Now complete the io */
737		spin_lock_irqsave(tw_dev->host->host_lock, flags);
738		tw_dev->posted_request_count--;
739		tw_dev->state[request_id] = TW_S_COMPLETED;
740		twa_free_request_id(tw_dev, request_id);
741		spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
742		break;
743	case TW_IOCTL_GET_COMPATIBILITY_INFO:
744		tw_ioctl->driver_command.status = 0;
745		/* Copy compatibility struct into ioctl data buffer */
746		tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
747		memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
748		break;
749	case TW_IOCTL_GET_LAST_EVENT:
750		if (tw_dev->event_queue_wrapped) {
751			if (tw_dev->aen_clobber) {
752				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
753				tw_dev->aen_clobber = 0;
754			} else
755				tw_ioctl->driver_command.status = 0;
756		} else {
757			if (!tw_dev->error_index) {
758				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
759				break;
760			}
761			tw_ioctl->driver_command.status = 0;
762		}
763		event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
764		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
765		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
766		break;
767	case TW_IOCTL_GET_FIRST_EVENT:
768		if (tw_dev->event_queue_wrapped) {
769			if (tw_dev->aen_clobber) {
770				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
771				tw_dev->aen_clobber = 0;
772			} else
773				tw_ioctl->driver_command.status = 0;
774			event_index = tw_dev->error_index;
775		} else {
776			if (!tw_dev->error_index) {
777				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
778				break;
779			}
780			tw_ioctl->driver_command.status = 0;
781			event_index = 0;
782		}
783		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
784		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
785		break;
786	case TW_IOCTL_GET_NEXT_EVENT:
787		event = (TW_Event *)tw_ioctl->data_buffer;
788		sequence_id = event->sequence_id;
789		tw_ioctl->driver_command.status = 0;
790
791		if (tw_dev->event_queue_wrapped) {
792			if (tw_dev->aen_clobber) {
793				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
794				tw_dev->aen_clobber = 0;
795			}
796			start_index = tw_dev->error_index;
797		} else {
798			if (!tw_dev->error_index) {
799				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
800				break;
801			}
802			start_index = 0;
803		}
804		event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
805
806		if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
807			if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
808				tw_dev->aen_clobber = 1;
809			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
810			break;
811		}
812		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
813		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
814		break;
815	case TW_IOCTL_GET_PREVIOUS_EVENT:
816		event = (TW_Event *)tw_ioctl->data_buffer;
817		sequence_id = event->sequence_id;
818		tw_ioctl->driver_command.status = 0;
819
820		if (tw_dev->event_queue_wrapped) {
821			if (tw_dev->aen_clobber) {
822				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
823				tw_dev->aen_clobber = 0;
824			}
825			start_index = tw_dev->error_index;
826		} else {
827			if (!tw_dev->error_index) {
828				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
829				break;
830			}
831			start_index = 0;
832		}
833		event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
834
835		if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
836			if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
837				tw_dev->aen_clobber = 1;
838			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
839			break;
840		}
841		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
842		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
843		break;
844	case TW_IOCTL_GET_LOCK:
845		tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
846		current_time = ktime_get();
847
848		if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) ||
849		    ktime_after(current_time, tw_dev->ioctl_time)) {
850			tw_dev->ioctl_sem_lock = 1;
851			tw_dev->ioctl_time = ktime_add_ms(current_time, tw_lock->timeout_msec);
852			tw_ioctl->driver_command.status = 0;
853			tw_lock->time_remaining_msec = tw_lock->timeout_msec;
854		} else {
855			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
856			tw_lock->time_remaining_msec = ktime_ms_delta(tw_dev->ioctl_time, current_time);
857		}
858		break;
859	case TW_IOCTL_RELEASE_LOCK:
860		if (tw_dev->ioctl_sem_lock == 1) {
861			tw_dev->ioctl_sem_lock = 0;
862			tw_ioctl->driver_command.status = 0;
863		} else {
864			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED;
865		}
866		break;
867	default:
868		retval = TW_IOCTL_ERROR_OS_ENOTTY;
869		goto out3;
870	}
871
872	/* Now copy the entire response to userspace */
873	if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length) == 0)
874		retval = 0;
875out3:
876	/* Now free ioctl buf memory */
877	dma_free_coherent(&tw_dev->tw_pci_dev->dev,
878			  sizeof(TW_Ioctl_Buf_Apache) + data_buffer_length_adjusted,
879			  cpu_addr, dma_handle);
880out2:
881	mutex_unlock(&tw_dev->ioctl_lock);
882out:
883	mutex_unlock(&twa_chrdev_mutex);
884	return retval;
885} /* End twa_chrdev_ioctl() */
886
887/* This function handles open for the character device */
888/* NOTE that this function will race with remove. */
889static int twa_chrdev_open(struct inode *inode, struct file *file)
890{
891	unsigned int minor_number;
892	int retval = TW_IOCTL_ERROR_OS_ENODEV;
893
894	if (!capable(CAP_SYS_ADMIN)) {
895		retval = -EACCES;
896		goto out;
897	}
898
899	minor_number = iminor(inode);
900	if (minor_number >= twa_device_extension_count)
901		goto out;
902	retval = 0;
903out:
904	return retval;
905} /* End twa_chrdev_open() */
906
907/* This function will print readable messages from status register errors */
908static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
909{
910	int retval = 1;
911
912	/* Check for various error conditions and handle them appropriately */
913	if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
914		TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
915		writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
916	}
917
918	if (status_reg_value & TW_STATUS_PCI_ABORT) {
919		TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
920		writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
921		pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
922	}
923
924	if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
925		if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
926		     (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
927		    (!test_bit(TW_IN_RESET, &tw_dev->flags)))
928			TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
929		writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
930	}
931
932	if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
933		if (tw_dev->reset_print == 0) {
934			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
935			tw_dev->reset_print = 1;
936		}
937		goto out;
938	}
939	retval = 0;
940out:
941	return retval;
942} /* End twa_decode_bits() */
943
944/* This function will empty the response queue */
945static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
946{
947	u32 status_reg_value;
948	int count = 0, retval = 1;
949
950	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
951
952	while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
953		readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
954		status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
955		count++;
956	}
957	if (count == TW_MAX_RESPONSE_DRAIN)
958		goto out;
959
960	retval = 0;
961out:
962	return retval;
963} /* End twa_empty_response_queue() */
964
965/* This function will clear the pchip/response queue on 9550SX */
966static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
967{
968	u32 response_que_value = 0;
969	unsigned long before;
970	int retval = 1;
971
972	if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
973		before = jiffies;
974		while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
975			response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
976			msleep(1);
977			if (time_after(jiffies, before + HZ * 30))
978				goto out;
979		}
980		/* P-chip settle time */
981		msleep(500);
982		retval = 0;
983	} else
984		retval = 0;
985out:
986	return retval;
987} /* End twa_empty_response_queue_large() */
988
989/* This function passes sense keys from firmware to scsi layer */
990static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
991{
992	TW_Command_Full *full_command_packet;
993	unsigned short error;
994	int retval = 1;
995	char *error_str;
996
997	full_command_packet = tw_dev->command_packet_virt[request_id];
998
999	/* Check for embedded error string */
1000	error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
1001
1002	/* Don't print error for Logical unit not supported during rollcall */
1003	error = le16_to_cpu(full_command_packet->header.status_block.error);
1004	if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
1005		if (print_host)
1006			printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1007			       tw_dev->host->host_no,
1008			       TW_MESSAGE_SOURCE_CONTROLLER_ERROR, error,
1009			       error_str[0] ? error_str : twa_string_lookup(twa_error_table, error),
1010			       full_command_packet->header.err_specific_desc);
1011		else
1012			printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1013			       TW_MESSAGE_SOURCE_CONTROLLER_ERROR, error,
1014			       error_str[0] ? error_str : twa_string_lookup(twa_error_table, error),
1015			       full_command_packet->header.err_specific_desc);
1016	}
1017
1018	if (copy_sense) {
1019		memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
1020		tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
1021		retval = TW_ISR_DONT_RESULT;
1022		goto out;
1023	}
1024	retval = 0;
1025out:
1026	return retval;
1027} /* End twa_fill_sense() */
1028
1029/* This function will free up device extension resources */
1030static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1031{
1032	if (tw_dev->command_packet_virt[0])
1033		dma_free_coherent(&tw_dev->tw_pci_dev->dev,
1034				sizeof(TW_Command_Full) * TW_Q_LENGTH,
1035				tw_dev->command_packet_virt[0],
1036				tw_dev->command_packet_phys[0]);
1037
1038	if (tw_dev->generic_buffer_virt[0])
1039		dma_free_coherent(&tw_dev->tw_pci_dev->dev,
1040				TW_SECTOR_SIZE * TW_Q_LENGTH,
1041				tw_dev->generic_buffer_virt[0],
1042				tw_dev->generic_buffer_phys[0]);
1043
1044	kfree(tw_dev->event_queue[0]);
1045} /* End twa_free_device_extension() */
1046
1047/* This function will free a request id */
1048static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1049{
1050	tw_dev->free_queue[tw_dev->free_tail] = request_id;
1051	tw_dev->state[request_id] = TW_S_FINISHED;
1052	tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1053} /* End twa_free_request_id() */
1054
1055/* This function will get parameter table entries from the firmware */
1056static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1057{
1058	TW_Command_Full *full_command_packet;
1059	TW_Command *command_packet;
1060	TW_Param_Apache *param;
1061	void *retval = NULL;
1062
1063	/* Setup the command packet */
1064	full_command_packet = tw_dev->command_packet_virt[request_id];
1065	memset(full_command_packet, 0, sizeof(TW_Command_Full));
1066	command_packet = &full_command_packet->command.oldcommand;
1067
1068	command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
1069	command_packet->size		  = TW_COMMAND_SIZE;
1070	command_packet->request_id	  = request_id;
1071	command_packet->byte6_offset.block_count = cpu_to_le16(1);
1072
1073	/* Now setup the param */
1074	param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
1075	memset(param, 0, TW_SECTOR_SIZE);
1076	param->table_id = cpu_to_le16(table_id | 0x8000);
1077	param->parameter_id = cpu_to_le16(parameter_id);
1078	param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1079
1080	command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1081	command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1082
1083	/* Post the command packet to the board */
1084	twa_post_command_packet(tw_dev, request_id, 1);
1085
1086	/* Poll for completion */
1087	if (twa_poll_response(tw_dev, request_id, 30))
1088		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
1089	else
1090		retval = (void *)&(param->data[0]);
1091
1092	tw_dev->posted_request_count--;
1093	tw_dev->state[request_id] = TW_S_INITIAL;
1094
1095	return retval;
1096} /* End twa_get_param() */
1097
1098/* This function will assign an available request id */
1099static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
1100{
1101	*request_id = tw_dev->free_queue[tw_dev->free_head];
1102	tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
1103	tw_dev->state[*request_id] = TW_S_STARTED;
1104} /* End twa_get_request_id() */
1105
1106/* This function will send an initconnection command to controller */
1107static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1108			      u32 set_features, unsigned short current_fw_srl,
1109			      unsigned short current_fw_arch_id,
1110			      unsigned short current_fw_branch,
1111			      unsigned short current_fw_build,
1112			      unsigned short *fw_on_ctlr_srl,
1113			      unsigned short *fw_on_ctlr_arch_id,
1114			      unsigned short *fw_on_ctlr_branch,
1115			      unsigned short *fw_on_ctlr_build,
1116			      u32 *init_connect_result)
1117{
1118	TW_Command_Full *full_command_packet;
1119	TW_Initconnect *tw_initconnect;
1120	int request_id = 0, retval = 1;
1121
1122	/* Initialize InitConnection command packet */
1123	full_command_packet = tw_dev->command_packet_virt[request_id];
1124	memset(full_command_packet, 0, sizeof(TW_Command_Full));
1125	full_command_packet->header.header_desc.size_header = 128;
1126
1127	tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1128	tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1129	tw_initconnect->request_id = request_id;
1130	tw_initconnect->message_credits = cpu_to_le16(message_credits);
1131
1132	/* Turn on 64-bit sgl support if we need to */
1133	set_features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1134
1135	tw_initconnect->features = cpu_to_le32(set_features);
1136
1137	if (set_features & TW_EXTENDED_INIT_CONNECT) {
1138		tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1139		tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1140		tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1141		tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1142		tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1143	} else
1144		tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1145
1146	/* Send command packet to the board */
1147	twa_post_command_packet(tw_dev, request_id, 1);
1148
1149	/* Poll for completion */
1150	if (twa_poll_response(tw_dev, request_id, 30)) {
1151		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
1152	} else {
1153		if (set_features & TW_EXTENDED_INIT_CONNECT) {
1154			*fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1155			*fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1156			*fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1157			*fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1158			*init_connect_result = le32_to_cpu(tw_initconnect->result);
1159		}
1160		retval = 0;
1161	}
1162
1163	tw_dev->posted_request_count--;
1164	tw_dev->state[request_id] = TW_S_INITIAL;
1165
1166	return retval;
1167} /* End twa_initconnection() */
1168
1169/* This function will initialize the fields of a device extension */
1170static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
1171{
1172	int i, retval = 1;
1173
1174	/* Initialize command packet buffers */
1175	if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1176		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
1177		goto out;
1178	}
1179
1180	/* Initialize generic buffer */
1181	if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1182		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
1183		goto out;
1184	}
1185
1186	/* Allocate event info space */
1187	tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
1188	if (!tw_dev->event_queue[0]) {
1189		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
1190		goto out;
1191	}
1192
1193
1194	for (i = 0; i < TW_Q_LENGTH; i++) {
1195		tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1196		tw_dev->free_queue[i] = i;
1197		tw_dev->state[i] = TW_S_INITIAL;
1198	}
1199
1200	tw_dev->pending_head = TW_Q_START;
1201	tw_dev->pending_tail = TW_Q_START;
1202	tw_dev->free_head = TW_Q_START;
1203	tw_dev->free_tail = TW_Q_START;
1204	tw_dev->error_sequence_id = 1;
1205	tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1206
1207	mutex_init(&tw_dev->ioctl_lock);
1208	init_waitqueue_head(&tw_dev->ioctl_wqueue);
1209
1210	retval = 0;
1211out:
1212	return retval;
1213} /* End twa_initialize_device_extension() */
1214
1215/* This function is the interrupt service routine */
1216static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1217{
1218	int request_id, error = 0;
1219	u32 status_reg_value;
1220	TW_Response_Queue response_que;
1221	TW_Command_Full *full_command_packet;
1222	TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1223	int handled = 0;
1224
1225	/* Get the per adapter lock */
1226	spin_lock(tw_dev->host->host_lock);
1227
1228	/* Read the registers */
1229	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1230
1231	/* Check if this is our interrupt, otherwise bail */
1232	if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
1233		goto twa_interrupt_bail;
1234
1235	handled = 1;
1236
1237	/* If we are resetting, bail */
1238	if (test_bit(TW_IN_RESET, &tw_dev->flags))
1239		goto twa_interrupt_bail;
1240
1241	/* Check controller for errors */
1242	if (twa_check_bits(status_reg_value)) {
1243		if (twa_decode_bits(tw_dev, status_reg_value)) {
1244			TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1245			goto twa_interrupt_bail;
1246		}
1247	}
1248
1249	/* Handle host interrupt */
1250	if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
1251		TW_CLEAR_HOST_INTERRUPT(tw_dev);
1252
1253	/* Handle attention interrupt */
1254	if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
1255		TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
1256		if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1257			twa_get_request_id(tw_dev, &request_id);
1258
1259			error = twa_aen_read_queue(tw_dev, request_id);
1260			if (error) {
1261				tw_dev->state[request_id] = TW_S_COMPLETED;
1262				twa_free_request_id(tw_dev, request_id);
1263				clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
1264			}
1265		}
1266	}
1267
1268	/* Handle command interrupt */
1269	if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
1270		TW_MASK_COMMAND_INTERRUPT(tw_dev);
1271		/* Drain as many pending commands as we can */
1272		while (tw_dev->pending_request_count > 0) {
1273			request_id = tw_dev->pending_queue[tw_dev->pending_head];
1274			if (tw_dev->state[request_id] != TW_S_PENDING) {
1275				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
1276				TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1277				goto twa_interrupt_bail;
1278			}
1279			if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
1280				tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
1281				tw_dev->pending_request_count--;
1282			} else {
1283				/* If we get here, we will continue re-posting on the next command interrupt */
1284				break;
1285			}
1286		}
1287	}
1288
1289	/* Handle response interrupt */
1290	if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
1291
1292		/* Drain the response queue from the board */
1293		while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
1294			/* Complete the response */
1295			response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1296			request_id = TW_RESID_OUT(response_que.response_id);
1297			full_command_packet = tw_dev->command_packet_virt[request_id];
1298			error = 0;
1299			/* Check for command packet errors */
1300			if (full_command_packet->command.newcommand.status != 0) {
1301				if (tw_dev->srb[request_id] != NULL) {
1302					error = twa_fill_sense(tw_dev, request_id, 1, 1);
1303				} else {
1304					/* Skip ioctl error prints */
1305					if (request_id != tw_dev->chrdev_request_id) {
1306						error = twa_fill_sense(tw_dev, request_id, 0, 1);
1307					}
1308				}
1309			}
1310
1311			/* Check for correct state */
1312			if (tw_dev->state[request_id] != TW_S_POSTED) {
1313				if (tw_dev->srb[request_id] != NULL) {
1314					TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
1315					TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1316					goto twa_interrupt_bail;
1317				}
1318			}
1319
1320			/* Check for internal command completion */
1321			if (tw_dev->srb[request_id] == NULL) {
1322				if (request_id != tw_dev->chrdev_request_id) {
1323					if (twa_aen_complete(tw_dev, request_id))
1324						TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
1325				} else {
1326					tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1327					wake_up(&tw_dev->ioctl_wqueue);
1328				}
1329			} else {
1330				struct scsi_cmnd *cmd;
1331
1332				cmd = tw_dev->srb[request_id];
1333
1334				twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1335				/* If no error command was a success */
1336				if (error == 0) {
1337					cmd->result = (DID_OK << 16);
1338				}
1339
1340				/* If error, command failed */
1341				if (error == 1) {
1342					/* Ask for a host reset */
1343					cmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
1344				}
1345
1346				/* Report residual bytes for single sgl */
1347				if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1348					u32 length = le32_to_cpu(full_command_packet->command.newcommand.sg_list[0].length);
1349
1350					if (length < scsi_bufflen(cmd))
1351						scsi_set_resid(cmd, scsi_bufflen(cmd) - length);
1352				}
1353
1354				/* Now complete the io */
1355				if (twa_command_mapped(cmd))
1356					scsi_dma_unmap(cmd);
1357				scsi_done(cmd);
1358				tw_dev->state[request_id] = TW_S_COMPLETED;
1359				twa_free_request_id(tw_dev, request_id);
1360				tw_dev->posted_request_count--;
1361			}
1362
1363			/* Check for valid status after each drain */
1364			status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1365			if (twa_check_bits(status_reg_value)) {
1366				if (twa_decode_bits(tw_dev, status_reg_value)) {
1367					TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1368					goto twa_interrupt_bail;
1369				}
1370			}
1371		}
1372	}
1373
1374twa_interrupt_bail:
1375	spin_unlock(tw_dev->host->host_lock);
1376	return IRQ_RETVAL(handled);
1377} /* End twa_interrupt() */
1378
1379/* This function will load the request id and various sgls for ioctls */
1380static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1381{
1382	TW_Command *oldcommand;
1383	TW_Command_Apache *newcommand;
1384	TW_SG_Entry *sgl;
1385	unsigned int pae = 0;
1386
1387	if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1388		pae = 1;
1389
1390	if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1391		newcommand = &full_command_packet->command.newcommand;
1392		newcommand->request_id__lunl =
1393			TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id);
1394		if (length) {
1395			newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache));
1396			newcommand->sg_list[0].length = cpu_to_le32(length);
1397		}
1398		newcommand->sgl_entries__lunh =
1399			TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0);
1400	} else {
1401		oldcommand = &full_command_packet->command.oldcommand;
1402		oldcommand->request_id = request_id;
1403
1404		if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1405			/* Load the sg list */
1406			if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
1407				sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
1408			else
1409				sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1410			sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache));
1411			sgl->length = cpu_to_le32(length);
1412
1413			oldcommand->size += pae;
1414		}
1415	}
1416} /* End twa_load_sgl() */
1417
1418/* This function will poll for a response interrupt of a request */
1419static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1420{
1421	int retval = 1, found = 0, response_request_id;
1422	TW_Response_Queue response_queue;
1423	TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
1424
1425	if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
1426		response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1427		response_request_id = TW_RESID_OUT(response_queue.response_id);
1428		if (request_id != response_request_id) {
1429			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
1430			goto out;
1431		}
1432		if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1433			if (full_command_packet->command.newcommand.status != 0) {
1434				/* bad response */
1435				twa_fill_sense(tw_dev, request_id, 0, 0);
1436				goto out;
1437			}
1438			found = 1;
1439		} else {
1440			if (full_command_packet->command.oldcommand.status != 0) {
1441				/* bad response */
1442				twa_fill_sense(tw_dev, request_id, 0, 0);
1443				goto out;
1444			}
1445			found = 1;
1446		}
1447	}
1448
1449	if (found)
1450		retval = 0;
1451out:
1452	return retval;
1453} /* End twa_poll_response() */
1454
1455/* This function will poll the status register for a flag */
1456static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1457{
1458	u32 status_reg_value;
1459	unsigned long before;
1460	int retval = 1;
1461
1462	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1463	before = jiffies;
1464
1465	if (twa_check_bits(status_reg_value))
1466		twa_decode_bits(tw_dev, status_reg_value);
1467
1468	while ((status_reg_value & flag) != flag) {
1469		status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1470
1471		if (twa_check_bits(status_reg_value))
1472			twa_decode_bits(tw_dev, status_reg_value);
1473
1474		if (time_after(jiffies, before + HZ * seconds))
1475			goto out;
1476
1477		msleep(50);
1478	}
1479	retval = 0;
1480out:
1481	return retval;
1482} /* End twa_poll_status() */
1483
1484/* This function will poll the status register for disappearance of a flag */
1485static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1486{
1487	u32 status_reg_value;
1488	unsigned long before;
1489	int retval = 1;
1490
1491	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1492	before = jiffies;
1493
1494	if (twa_check_bits(status_reg_value))
1495		twa_decode_bits(tw_dev, status_reg_value);
1496
1497	while ((status_reg_value & flag) != 0) {
1498		status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1499		if (twa_check_bits(status_reg_value))
1500			twa_decode_bits(tw_dev, status_reg_value);
1501
1502		if (time_after(jiffies, before + HZ * seconds))
1503			goto out;
1504
1505		msleep(50);
1506	}
1507	retval = 0;
1508out:
1509	return retval;
1510} /* End twa_poll_status_gone() */
1511
1512/* This function will attempt to post a command packet to the board */
1513static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
1514{
1515	u32 status_reg_value;
1516	dma_addr_t command_que_value;
1517	int retval = 1;
1518
1519	command_que_value = tw_dev->command_packet_phys[request_id];
1520
1521	/* For 9650SE write low 4 bytes first */
1522	if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1523	    (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1524		command_que_value += TW_COMMAND_OFFSET;
1525		writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1526	}
1527
1528	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1529
1530	if (twa_check_bits(status_reg_value))
1531		twa_decode_bits(tw_dev, status_reg_value);
1532
1533	if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
1534
1535		/* Only pend internal driver commands */
1536		if (!internal) {
1537			retval = SCSI_MLQUEUE_HOST_BUSY;
1538			goto out;
1539		}
1540
1541		/* Couldn't post the command packet, so we do it later */
1542		if (tw_dev->state[request_id] != TW_S_PENDING) {
1543			tw_dev->state[request_id] = TW_S_PENDING;
1544			tw_dev->pending_request_count++;
1545			if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
1546				tw_dev->max_pending_request_count = tw_dev->pending_request_count;
1547			}
1548			tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
1549			tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
1550		}
1551		TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
1552		goto out;
1553	} else {
1554		if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1555		    (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1556			/* Now write upper 4 bytes */
1557			writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1558		} else {
1559			if (sizeof(dma_addr_t) > 4) {
1560				command_que_value += TW_COMMAND_OFFSET;
1561				writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1562				writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
1563			} else {
1564				writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1565			}
1566		}
1567		tw_dev->state[request_id] = TW_S_POSTED;
1568		tw_dev->posted_request_count++;
1569		if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
1570			tw_dev->max_posted_request_count = tw_dev->posted_request_count;
1571		}
1572	}
1573	retval = 0;
1574out:
1575	return retval;
1576} /* End twa_post_command_packet() */
1577
1578/* This function will reset a device extension */
1579static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1580{
1581	int i = 0;
1582	int retval = 1;
1583	unsigned long flags = 0;
1584
1585	set_bit(TW_IN_RESET, &tw_dev->flags);
1586	TW_DISABLE_INTERRUPTS(tw_dev);
1587	TW_MASK_COMMAND_INTERRUPT(tw_dev);
1588	spin_lock_irqsave(tw_dev->host->host_lock, flags);
1589
1590	/* Abort all requests that are in progress */
1591	for (i = 0; i < TW_Q_LENGTH; i++) {
1592		if ((tw_dev->state[i] != TW_S_FINISHED) &&
1593		    (tw_dev->state[i] != TW_S_INITIAL) &&
1594		    (tw_dev->state[i] != TW_S_COMPLETED)) {
1595			if (tw_dev->srb[i]) {
1596				struct scsi_cmnd *cmd = tw_dev->srb[i];
1597
1598				cmd->result = (DID_RESET << 16);
1599				if (twa_command_mapped(cmd))
1600					scsi_dma_unmap(cmd);
1601				scsi_done(cmd);
1602			}
1603		}
1604	}
1605
1606	/* Reset queues and counts */
1607	for (i = 0; i < TW_Q_LENGTH; i++) {
1608		tw_dev->free_queue[i] = i;
1609		tw_dev->state[i] = TW_S_INITIAL;
1610	}
1611	tw_dev->free_head = TW_Q_START;
1612	tw_dev->free_tail = TW_Q_START;
1613	tw_dev->posted_request_count = 0;
1614	tw_dev->pending_request_count = 0;
1615	tw_dev->pending_head = TW_Q_START;
1616	tw_dev->pending_tail = TW_Q_START;
1617	tw_dev->reset_print = 0;
1618
1619	spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1620
1621	if (twa_reset_sequence(tw_dev, 1))
1622		goto out;
1623
1624	TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
1625	clear_bit(TW_IN_RESET, &tw_dev->flags);
1626	tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1627
1628	retval = 0;
1629out:
1630	return retval;
1631} /* End twa_reset_device_extension() */
1632
1633/* This function will reset a controller */
1634static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1635{
1636	int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1637
1638	while (tries < TW_MAX_RESET_TRIES) {
1639		if (do_soft_reset) {
1640			TW_SOFT_RESET(tw_dev);
1641			/* Clear pchip/response queue on 9550SX */
1642			if (twa_empty_response_queue_large(tw_dev)) {
1643				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1644				do_soft_reset = 1;
1645				tries++;
1646				continue;
1647			}
1648		}
1649
1650		/* Make sure controller is in a good state */
1651		if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
1652			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
1653			do_soft_reset = 1;
1654			tries++;
1655			continue;
1656		}
1657
1658		/* Empty response queue */
1659		if (twa_empty_response_queue(tw_dev)) {
1660			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
1661			do_soft_reset = 1;
1662			tries++;
1663			continue;
1664		}
1665
1666		flashed = 0;
1667
1668		/* Check for compatibility/flash */
1669		if (twa_check_srl(tw_dev, &flashed)) {
1670			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
1671			do_soft_reset = 1;
1672			tries++;
1673			continue;
1674		} else {
1675			if (flashed) {
1676				tries++;
1677				continue;
1678			}
1679		}
1680
1681		/* Drain the AEN queue */
1682		if (twa_aen_drain_queue(tw_dev, soft_reset)) {
1683			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
1684			do_soft_reset = 1;
1685			tries++;
1686			continue;
1687		}
1688
1689		/* If we got here, controller is in a good state */
1690		retval = 0;
1691		goto out;
1692	}
1693out:
1694	return retval;
1695} /* End twa_reset_sequence() */
1696
1697/* This funciton returns unit geometry in cylinders/heads/sectors */
1698static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1699{
1700	int heads, sectors, cylinders;
1701
1702	if (capacity >= 0x200000) {
1703		heads = 255;
1704		sectors = 63;
1705		cylinders = sector_div(capacity, heads * sectors);
1706	} else {
1707		heads = 64;
1708		sectors = 32;
1709		cylinders = sector_div(capacity, heads * sectors);
1710	}
1711
1712	geom[0] = heads;
1713	geom[1] = sectors;
1714	geom[2] = cylinders;
1715
1716	return 0;
1717} /* End twa_scsi_biosparam() */
1718
1719/* This is the new scsi eh reset function */
1720static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1721{
1722	TW_Device_Extension *tw_dev = NULL;
1723	int retval = FAILED;
1724
1725	tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1726
1727	tw_dev->num_resets++;
1728
1729	sdev_printk(KERN_WARNING, SCpnt->device,
1730		"WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1731		TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1732
1733	/* Make sure we are not issuing an ioctl or resetting from ioctl */
1734	mutex_lock(&tw_dev->ioctl_lock);
1735
1736	/* Now reset the card and some of the device extension data */
1737	if (twa_reset_device_extension(tw_dev)) {
1738		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1739		goto out;
1740	}
1741
1742	retval = SUCCESS;
1743out:
1744	mutex_unlock(&tw_dev->ioctl_lock);
1745	return retval;
1746} /* End twa_scsi_eh_reset() */
1747
1748/* This is the main scsi queue function to handle scsi opcodes */
1749static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt)
1750{
1751	void (*done)(struct scsi_cmnd *) = scsi_done;
1752	int request_id, retval;
1753	TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1754
1755	/* If we are resetting due to timed out ioctl, report as busy */
1756	if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1757		retval = SCSI_MLQUEUE_HOST_BUSY;
1758		goto out;
1759	}
1760
1761	/* Check if this FW supports luns */
1762	if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
1763		SCpnt->result = (DID_BAD_TARGET << 16);
1764		done(SCpnt);
1765		retval = 0;
1766		goto out;
1767	}
1768
1769	/* Get a free request id */
1770	twa_get_request_id(tw_dev, &request_id);
1771
1772	/* Save the scsi command for use by the ISR */
1773	tw_dev->srb[request_id] = SCpnt;
1774
1775	retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1776	switch (retval) {
1777	case SCSI_MLQUEUE_HOST_BUSY:
1778		if (twa_command_mapped(SCpnt))
1779			scsi_dma_unmap(SCpnt);
1780		twa_free_request_id(tw_dev, request_id);
1781		break;
1782	case 1:
1783		SCpnt->result = (DID_ERROR << 16);
1784		if (twa_command_mapped(SCpnt))
1785			scsi_dma_unmap(SCpnt);
1786		done(SCpnt);
1787		tw_dev->state[request_id] = TW_S_COMPLETED;
1788		twa_free_request_id(tw_dev, request_id);
1789		retval = 0;
1790	}
1791out:
1792	return retval;
1793} /* End twa_scsi_queue() */
1794
1795static DEF_SCSI_QCMD(twa_scsi_queue)
1796
1797/* This function hands scsi cdb's to the firmware */
1798static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
1799				   unsigned char *cdb, int use_sg,
1800				   TW_SG_Entry *sglistarg)
1801{
1802	TW_Command_Full *full_command_packet;
1803	TW_Command_Apache *command_packet;
1804	u32 num_sectors = 0x0;
1805	int i, sg_count;
1806	struct scsi_cmnd *srb = NULL;
1807	struct scatterlist *sg;
1808	int retval = 1;
1809
1810	if (tw_dev->srb[request_id])
1811		srb = tw_dev->srb[request_id];
1812
1813	/* Initialize command packet */
1814	full_command_packet = tw_dev->command_packet_virt[request_id];
1815	full_command_packet->header.header_desc.size_header = 128;
1816	full_command_packet->header.status_block.error = 0;
1817	full_command_packet->header.status_block.severity__reserved = 0;
1818
1819	command_packet = &full_command_packet->command.newcommand;
1820	command_packet->status = 0;
1821	command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
1822
1823	/* We forced 16 byte cdb use earlier */
1824	if (!cdb)
1825		memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
1826	else
1827		memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
1828
1829	if (srb) {
1830		command_packet->unit = srb->device->id;
1831		command_packet->request_id__lunl =
1832			TW_REQ_LUN_IN(srb->device->lun, request_id);
1833	} else {
1834		command_packet->request_id__lunl =
1835			TW_REQ_LUN_IN(0, request_id);
1836		command_packet->unit = 0;
1837	}
1838
1839	command_packet->sgl_offset = 16;
1840
1841	if (!sglistarg) {
1842		/* Map sglist from scsi layer to cmd packet */
1843
1844		if (scsi_sg_count(srb)) {
1845			if (!twa_command_mapped(srb)) {
1846				if (srb->sc_data_direction == DMA_TO_DEVICE ||
1847				    srb->sc_data_direction == DMA_BIDIRECTIONAL)
1848					scsi_sg_copy_to_buffer(srb,
1849							       tw_dev->generic_buffer_virt[request_id],
1850							       TW_SECTOR_SIZE);
1851				command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1852				command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1853			} else {
1854				sg_count = scsi_dma_map(srb);
1855				if (sg_count < 0)
1856					goto out;
1857
1858				scsi_for_each_sg(srb, sg, sg_count, i) {
1859					command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1860					command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1861					if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1862						TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1863						goto out;
1864					}
1865				}
1866			}
1867			command_packet->sgl_entries__lunh = TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id]));
1868		}
1869	} else {
1870		/* Internal cdb post */
1871		for (i = 0; i < use_sg; i++) {
1872			command_packet->sg_list[i].address = sglistarg[i].address;
1873			command_packet->sg_list[i].length = sglistarg[i].length;
1874			if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1875				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
1876				goto out;
1877			}
1878		}
1879		command_packet->sgl_entries__lunh = TW_REQ_LUN_IN(0, use_sg);
1880	}
1881
1882	if (srb) {
1883		if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
1884			num_sectors = (u32)srb->cmnd[4];
1885
1886		if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
1887			num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
1888	}
1889
1890	/* Update sector statistic */
1891	tw_dev->sector_count = num_sectors;
1892	if (tw_dev->sector_count > tw_dev->max_sector_count)
1893		tw_dev->max_sector_count = tw_dev->sector_count;
1894
1895	/* Update SG statistics */
1896	if (srb) {
1897		tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1898		if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1899			tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1900	}
1901
1902	/* Now post the command to the board */
1903	if (srb) {
1904		retval = twa_post_command_packet(tw_dev, request_id, 0);
1905	} else {
1906		twa_post_command_packet(tw_dev, request_id, 1);
1907		retval = 0;
1908	}
1909out:
1910	return retval;
1911} /* End twa_scsiop_execute_scsi() */
1912
1913/* This function completes an execute scsi operation */
1914static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1915{
1916	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1917
1918	if (!twa_command_mapped(cmd) &&
1919	    (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1920	     cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1921		if (scsi_sg_count(cmd) == 1) {
1922			void *buf = tw_dev->generic_buffer_virt[request_id];
1923
1924			scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
1925		}
1926	}
1927} /* End twa_scsiop_execute_scsi_complete() */
1928
1929/* This function tells the controller to shut down */
1930static void __twa_shutdown(TW_Device_Extension *tw_dev)
1931{
1932	/* Disable interrupts */
1933	TW_DISABLE_INTERRUPTS(tw_dev);
1934
1935	/* Free up the IRQ */
1936	free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1937
1938	printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
1939
1940	/* Tell the card we are shutting down */
1941	if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1942		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
1943	} else {
1944		printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
1945	}
1946
1947	/* Clear all interrupts just before exit */
1948	TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1949} /* End __twa_shutdown() */
1950
1951/* Wrapper for __twa_shutdown */
1952static void twa_shutdown(struct pci_dev *pdev)
1953{
1954	struct Scsi_Host *host = pci_get_drvdata(pdev);
1955	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1956
1957	__twa_shutdown(tw_dev);
1958} /* End twa_shutdown() */
1959
1960/* This function will look up a string */
1961static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1962{
1963	int index;
1964
1965	for (index = 0; ((code != table[index].code) &&
1966		      (table[index].text != (char *)0)); index++);
1967	return(table[index].text);
1968} /* End twa_string_lookup() */
1969
1970/* This function gets called when a disk is coming on-line */
1971static int twa_slave_configure(struct scsi_device *sdev)
1972{
1973	/* Force 60 second timeout */
1974	blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
1975
1976	return 0;
1977} /* End twa_slave_configure() */
1978
1979static const struct scsi_host_template driver_template = {
1980	.module			= THIS_MODULE,
1981	.name			= "3ware 9000 Storage Controller",
1982	.queuecommand		= twa_scsi_queue,
1983	.eh_host_reset_handler	= twa_scsi_eh_reset,
1984	.bios_param		= twa_scsi_biosparam,
1985	.change_queue_depth	= scsi_change_queue_depth,
1986	.can_queue		= TW_Q_LENGTH-2,
1987	.slave_configure	= twa_slave_configure,
1988	.this_id		= -1,
1989	.sg_tablesize		= TW_APACHE_MAX_SGL_LENGTH,
1990	.max_sectors		= TW_MAX_SECTORS,
1991	.cmd_per_lun		= TW_MAX_CMDS_PER_LUN,
1992	.shost_groups		= twa_host_groups,
1993	.emulated		= 1,
1994	.no_write_same		= 1,
1995};
1996
1997/* This function will probe and initialize a card */
1998static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
1999{
2000	struct Scsi_Host *host = NULL;
2001	TW_Device_Extension *tw_dev;
2002	unsigned long mem_addr, mem_len;
2003	int retval;
2004
2005	retval = pci_enable_device(pdev);
2006	if (retval) {
2007		TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
2008		return -ENODEV;
2009	}
2010
2011	pci_set_master(pdev);
2012	pci_try_set_mwi(pdev);
2013
2014	retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2015	if (retval)
2016		retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2017	if (retval) {
2018		TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2019		retval = -ENODEV;
2020		goto out_disable_device;
2021	}
2022
2023	host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2024	if (!host) {
2025		TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
2026		retval = -ENOMEM;
2027		goto out_disable_device;
2028	}
2029	tw_dev = (TW_Device_Extension *)host->hostdata;
2030
2031	/* Save values to device extension */
2032	tw_dev->host = host;
2033	tw_dev->tw_pci_dev = pdev;
2034
2035	if (twa_initialize_device_extension(tw_dev)) {
2036		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2037		retval = -ENOMEM;
2038		goto out_free_device_extension;
2039	}
2040
2041	/* Request IO regions */
2042	retval = pci_request_regions(pdev, "3w-9xxx");
2043	if (retval) {
2044		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
2045		goto out_free_device_extension;
2046	}
2047
2048	if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
2049		mem_addr = pci_resource_start(pdev, 1);
2050		mem_len = pci_resource_len(pdev, 1);
2051	} else {
2052		mem_addr = pci_resource_start(pdev, 2);
2053		mem_len = pci_resource_len(pdev, 2);
2054	}
2055
2056	/* Save base address */
2057	tw_dev->base_addr = ioremap(mem_addr, mem_len);
2058	if (!tw_dev->base_addr) {
2059		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2060		retval = -ENOMEM;
2061		goto out_release_mem_region;
2062	}
2063
2064	/* Disable interrupts on the card */
2065	TW_DISABLE_INTERRUPTS(tw_dev);
2066
2067	/* Initialize the card */
2068	if (twa_reset_sequence(tw_dev, 0)) {
2069		retval = -ENOMEM;
2070		goto out_iounmap;
2071	}
2072
2073	/* Set host specific parameters */
2074	if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2075	    (pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
2076		host->max_id = TW_MAX_UNITS_9650SE;
2077	else
2078		host->max_id = TW_MAX_UNITS;
2079
2080	host->max_cmd_len = TW_MAX_CDB_LEN;
2081
2082	/* Channels aren't supported by adapter */
2083	host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
2084	host->max_channel = 0;
2085
2086	/* Register the card with the kernel SCSI layer */
2087	retval = scsi_add_host(host, &pdev->dev);
2088	if (retval) {
2089		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
2090		goto out_iounmap;
2091	}
2092
2093	pci_set_drvdata(pdev, host);
2094
2095	printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
2096	       host->host_no, mem_addr, pdev->irq);
2097	printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2098	       host->host_no,
2099	       (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
2100				     TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
2101	       (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
2102				     TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
2103	       le32_to_cpu(*(__le32 *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2104				     TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2105
2106	/* Try to enable MSI */
2107	if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
2108	    !pci_enable_msi(pdev))
2109		set_bit(TW_USING_MSI, &tw_dev->flags);
2110
2111	/* Now setup the interrupt handler */
2112	retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2113	if (retval) {
2114		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2115		goto out_remove_host;
2116	}
2117
2118	twa_device_extension_list[twa_device_extension_count] = tw_dev;
2119	twa_device_extension_count++;
2120
2121	/* Re-enable interrupts on the card */
2122	TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2123
2124	/* Finally, scan the host */
2125	scsi_scan_host(host);
2126
2127	if (twa_major == -1) {
2128		if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
2129			TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
2130	}
2131	return 0;
2132
2133out_remove_host:
2134	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2135		pci_disable_msi(pdev);
2136	scsi_remove_host(host);
2137out_iounmap:
2138	iounmap(tw_dev->base_addr);
2139out_release_mem_region:
2140	pci_release_regions(pdev);
2141out_free_device_extension:
2142	twa_free_device_extension(tw_dev);
2143	scsi_host_put(host);
2144out_disable_device:
2145	pci_disable_device(pdev);
2146
2147	return retval;
2148} /* End twa_probe() */
2149
2150/* This function is called to remove a device */
2151static void twa_remove(struct pci_dev *pdev)
2152{
2153	struct Scsi_Host *host = pci_get_drvdata(pdev);
2154	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2155
2156	scsi_remove_host(tw_dev->host);
2157
2158	/* Unregister character device */
2159	if (twa_major >= 0) {
2160		unregister_chrdev(twa_major, "twa");
2161		twa_major = -1;
2162	}
2163
2164	/* Shutdown the card */
2165	__twa_shutdown(tw_dev);
2166
2167	/* Disable MSI if enabled */
2168	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2169		pci_disable_msi(pdev);
2170
2171	/* Free IO remapping */
2172	iounmap(tw_dev->base_addr);
2173
2174	/* Free up the mem region */
2175	pci_release_regions(pdev);
2176
2177	/* Free up device extension resources */
2178	twa_free_device_extension(tw_dev);
2179
2180	scsi_host_put(tw_dev->host);
2181	pci_disable_device(pdev);
2182	twa_device_extension_count--;
2183} /* End twa_remove() */
2184
2185/* This function is called on PCI suspend */
2186static int __maybe_unused twa_suspend(struct device *dev)
2187{
2188	struct pci_dev *pdev = to_pci_dev(dev);
2189	struct Scsi_Host *host = pci_get_drvdata(pdev);
2190	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2191
2192	printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no);
2193
2194	TW_DISABLE_INTERRUPTS(tw_dev);
2195	free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
2196
2197	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2198		pci_disable_msi(pdev);
2199
2200	/* Tell the card we are shutting down */
2201	if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
2202		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend");
2203	} else {
2204		printk(KERN_WARNING "3w-9xxx: Suspend complete.\n");
2205	}
2206	TW_CLEAR_ALL_INTERRUPTS(tw_dev);
2207
2208	return 0;
2209} /* End twa_suspend() */
2210
2211/* This function is called on PCI resume */
2212static int __maybe_unused twa_resume(struct device *dev)
2213{
2214	int retval = 0;
2215	struct pci_dev *pdev = to_pci_dev(dev);
2216	struct Scsi_Host *host = pci_get_drvdata(pdev);
2217	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2218
2219	printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
2220
2221	pci_try_set_mwi(pdev);
2222
2223	retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2224	if (retval)
2225		retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2226	if (retval) {
2227		TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
2228		retval = -ENODEV;
2229		goto out_disable_device;
2230	}
2231
2232	/* Initialize the card */
2233	if (twa_reset_sequence(tw_dev, 0)) {
2234		retval = -ENODEV;
2235		goto out_disable_device;
2236	}
2237
2238	/* Now setup the interrupt handler */
2239	retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2240	if (retval) {
2241		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume");
2242		retval = -ENODEV;
2243		goto out_disable_device;
2244	}
2245
2246	/* Now enable MSI if enabled */
2247	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2248		pci_enable_msi(pdev);
2249
2250	/* Re-enable interrupts on the card */
2251	TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2252
2253	printk(KERN_WARNING "3w-9xxx: Resume complete.\n");
2254	return 0;
2255
2256out_disable_device:
2257	scsi_remove_host(host);
2258
2259	return retval;
2260} /* End twa_resume() */
2261
2262/* PCI Devices supported by this driver */
2263static struct pci_device_id twa_pci_tbl[] = {
2264	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2265	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2266	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2267	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2268	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
2269	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2270	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
2271	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2272	{ }
2273};
2274MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
2275
2276static SIMPLE_DEV_PM_OPS(twa_pm_ops, twa_suspend, twa_resume);
2277
2278/* pci_driver initializer */
2279static struct pci_driver twa_driver = {
2280	.name		= "3w-9xxx",
2281	.id_table	= twa_pci_tbl,
2282	.probe		= twa_probe,
2283	.remove		= twa_remove,
2284	.driver.pm	= &twa_pm_ops,
2285	.shutdown	= twa_shutdown
2286};
2287
2288/* This function is called on driver initialization */
2289static int __init twa_init(void)
2290{
2291	printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2292
2293	return pci_register_driver(&twa_driver);
2294} /* End twa_init() */
2295
2296/* This function is called on driver exit */
2297static void __exit twa_exit(void)
2298{
2299	pci_unregister_driver(&twa_driver);
2300} /* End twa_exit() */
2301
2302module_init(twa_init);
2303module_exit(twa_exit);
2304
2305