• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/scsi/qla4xxx/
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c)  2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#include "ql4_def.h"
9#include "ql4_glbl.h"
10#include "ql4_dbg.h"
11#include "ql4_inline.h"
12
13/**
14 * qla4xxx_copy_sense - copy sense data	into cmd sense buffer
15 * @ha: Pointer to host adapter structure.
16 * @sts_entry: Pointer to status entry structure.
17 * @srb: Pointer to srb structure.
18 **/
19static void qla4xxx_copy_sense(struct scsi_qla_host *ha,
20                               struct status_entry *sts_entry,
21                               struct srb *srb)
22{
23	struct scsi_cmnd *cmd = srb->cmd;
24	uint16_t sense_len;
25
26	memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
27	sense_len = le16_to_cpu(sts_entry->senseDataByteCnt);
28	if (sense_len == 0)
29		return;
30
31	/* Save total available sense length,
32	 * not to exceed cmd's sense buffer size */
33	sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE);
34	srb->req_sense_ptr = cmd->sense_buffer;
35	srb->req_sense_len = sense_len;
36
37	/* Copy sense from sts_entry pkt */
38	sense_len = min_t(uint16_t, sense_len, IOCB_MAX_SENSEDATA_LEN);
39	memcpy(cmd->sense_buffer, sts_entry->senseData, sense_len);
40
41	DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: %s: sense key = %x, "
42		"ASL= %02x, ASC/ASCQ = %02x/%02x\n", ha->host_no,
43		cmd->device->channel, cmd->device->id,
44		cmd->device->lun, __func__,
45		sts_entry->senseData[2] & 0x0f,
46		sts_entry->senseData[7],
47		sts_entry->senseData[12],
48		sts_entry->senseData[13]));
49
50	DEBUG5(qla4xxx_dump_buffer(cmd->sense_buffer, sense_len));
51	srb->flags |= SRB_GOT_SENSE;
52
53	/* Update srb, in case a sts_cont pkt follows */
54	srb->req_sense_ptr += sense_len;
55	srb->req_sense_len -= sense_len;
56	if (srb->req_sense_len != 0)
57		ha->status_srb = srb;
58	else
59		ha->status_srb = NULL;
60}
61
62/**
63 * qla4xxx_status_cont_entry - Process a Status Continuations entry.
64 * @ha: SCSI driver HA context
65 * @sts_cont: Entry pointer
66 *
67 * Extended sense data.
68 */
69static void
70qla4xxx_status_cont_entry(struct scsi_qla_host *ha,
71			  struct status_cont_entry *sts_cont)
72{
73	struct srb *srb = ha->status_srb;
74	struct scsi_cmnd *cmd;
75	uint8_t sense_len;
76
77	if (srb == NULL)
78		return;
79
80	cmd = srb->cmd;
81	if (cmd == NULL) {
82		DEBUG2(printk(KERN_INFO "scsi%ld: %s: Cmd already returned "
83			"back to OS srb=%p srb->state:%d\n", ha->host_no,
84			__func__, srb, srb->state));
85		ha->status_srb = NULL;
86		return;
87	}
88
89	/* Copy sense data. */
90	sense_len = min_t(uint16_t, srb->req_sense_len,
91			  IOCB_MAX_EXT_SENSEDATA_LEN);
92	memcpy(srb->req_sense_ptr, sts_cont->ext_sense_data, sense_len);
93	DEBUG5(qla4xxx_dump_buffer(srb->req_sense_ptr, sense_len));
94
95	srb->req_sense_ptr += sense_len;
96	srb->req_sense_len -= sense_len;
97
98	/* Place command on done queue. */
99	if (srb->req_sense_len == 0) {
100		kref_put(&srb->srb_ref, qla4xxx_srb_compl);
101		ha->status_srb = NULL;
102	}
103}
104
105/**
106 * qla4xxx_status_entry - processes status IOCBs
107 * @ha: Pointer to host adapter structure.
108 * @sts_entry: Pointer to status entry structure.
109 **/
110static void qla4xxx_status_entry(struct scsi_qla_host *ha,
111				 struct status_entry *sts_entry)
112{
113	uint8_t scsi_status;
114	struct scsi_cmnd *cmd;
115	struct srb *srb;
116	struct ddb_entry *ddb_entry;
117	uint32_t residual;
118
119	srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
120	if (!srb) {
121		DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid "
122			      "handle 0x%x, sp=%p. This cmd may have already "
123			      "been completed.\n", ha->host_no, __func__,
124			      le32_to_cpu(sts_entry->handle), srb));
125		ql4_printk(KERN_WARNING, ha, "%s invalid status entry:"
126		    " handle=0x%0x\n", __func__, sts_entry->handle);
127		set_bit(DPC_RESET_HA, &ha->dpc_flags);
128		return;
129	}
130
131	cmd = srb->cmd;
132	if (cmd == NULL) {
133		DEBUG2(printk("scsi%ld: %s: Command already returned back to "
134			      "OS pkt->handle=%d srb=%p srb->state:%d\n",
135			      ha->host_no, __func__, sts_entry->handle,
136			      srb, srb->state));
137		ql4_printk(KERN_WARNING, ha, "Command is NULL:"
138		    " already returned to OS (srb=%p)\n", srb);
139		return;
140	}
141
142	ddb_entry = srb->ddb;
143	if (ddb_entry == NULL) {
144		cmd->result = DID_NO_CONNECT << 16;
145		goto status_entry_exit;
146	}
147
148	residual = le32_to_cpu(sts_entry->residualByteCnt);
149
150	/* Translate ISP error to a Linux SCSI error. */
151	scsi_status = sts_entry->scsiStatus;
152	switch (sts_entry->completionStatus) {
153	case SCS_COMPLETE:
154
155		if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
156			cmd->result = DID_ERROR << 16;
157			break;
158		}
159
160		if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) {
161			scsi_set_resid(cmd, residual);
162			if (!scsi_status && ((scsi_bufflen(cmd) - residual) <
163				cmd->underflow)) {
164
165				cmd->result = DID_ERROR << 16;
166
167				DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
168					"Mid-layer Data underrun0, "
169					"xferlen = 0x%x, "
170					"residual = 0x%x\n", ha->host_no,
171					cmd->device->channel,
172					cmd->device->id,
173					cmd->device->lun, __func__,
174					scsi_bufflen(cmd), residual));
175				break;
176			}
177		}
178
179		cmd->result = DID_OK << 16 | scsi_status;
180
181		if (scsi_status != SCSI_CHECK_CONDITION)
182			break;
183
184		/* Copy Sense Data into sense buffer. */
185		qla4xxx_copy_sense(ha, sts_entry, srb);
186		break;
187
188	case SCS_INCOMPLETE:
189		/* Always set the status to DID_ERROR, since
190		 * all conditions result in that status anyway */
191		cmd->result = DID_ERROR << 16;
192		break;
193
194	case SCS_RESET_OCCURRED:
195		DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Device RESET occurred\n",
196			      ha->host_no, cmd->device->channel,
197			      cmd->device->id, cmd->device->lun, __func__));
198
199		cmd->result = DID_RESET << 16;
200		break;
201
202	case SCS_ABORTED:
203		DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Abort occurred\n",
204			      ha->host_no, cmd->device->channel,
205			      cmd->device->id, cmd->device->lun, __func__));
206
207		cmd->result = DID_RESET << 16;
208		break;
209
210	case SCS_TIMEOUT:
211		DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: Timeout\n",
212			      ha->host_no, cmd->device->channel,
213			      cmd->device->id, cmd->device->lun));
214
215		cmd->result = DID_TRANSPORT_DISRUPTED << 16;
216
217		/*
218		 * Mark device missing so that we won't continue to send
219		 * I/O to this device.	We should get a ddb state change
220		 * AEN soon.
221		 */
222		if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
223			qla4xxx_mark_device_missing(ha, ddb_entry);
224		break;
225
226	case SCS_DATA_UNDERRUN:
227	case SCS_DATA_OVERRUN:
228		if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) ||
229		     (sts_entry->completionStatus == SCS_DATA_OVERRUN)) {
230			DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun\n",
231				      ha->host_no,
232				      cmd->device->channel, cmd->device->id,
233				      cmd->device->lun, __func__));
234
235			cmd->result = DID_ERROR << 16;
236			break;
237		}
238
239		scsi_set_resid(cmd, residual);
240
241		/*
242		 * If there is scsi_status, it takes precedense over
243		 * underflow condition.
244		 */
245		if (scsi_status != 0) {
246			cmd->result = DID_OK << 16 | scsi_status;
247
248			if (scsi_status != SCSI_CHECK_CONDITION)
249				break;
250
251			/* Copy Sense Data into sense buffer. */
252			qla4xxx_copy_sense(ha, sts_entry, srb);
253		} else {
254			/*
255			 * If RISC reports underrun and target does not
256			 * report it then we must have a lost frame, so
257			 * tell upper layer to retry it by reporting a
258			 * bus busy.
259			 */
260			if ((sts_entry->iscsiFlags &
261			     ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
262				cmd->result = DID_BUS_BUSY << 16;
263			} else if ((scsi_bufflen(cmd) - residual) <
264				   cmd->underflow) {
265				/*
266				 * Handle mid-layer underflow???
267				 *
268				 * For kernels less than 2.4, the driver must
269				 * return an error if an underflow is detected.
270				 * For kernels equal-to and above 2.4, the
271				 * mid-layer will appearantly handle the
272				 * underflow by detecting the residual count --
273				 * unfortunately, we do not see where this is
274				 * actually being done.	 In the interim, we
275				 * will return DID_ERROR.
276				 */
277				DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
278					"Mid-layer Data underrun1, "
279					"xferlen = 0x%x, "
280					"residual = 0x%x\n", ha->host_no,
281					cmd->device->channel,
282					cmd->device->id,
283					cmd->device->lun, __func__,
284					scsi_bufflen(cmd), residual));
285
286				cmd->result = DID_ERROR << 16;
287			} else {
288				cmd->result = DID_OK << 16;
289			}
290		}
291		break;
292
293	case SCS_DEVICE_LOGGED_OUT:
294	case SCS_DEVICE_UNAVAILABLE:
295		DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: SCS_DEVICE "
296		    "state: 0x%x\n", ha->host_no,
297		    cmd->device->channel, cmd->device->id,
298		    cmd->device->lun, sts_entry->completionStatus));
299		/*
300		 * Mark device missing so that we won't continue to
301		 * send I/O to this device.  We should get a ddb
302		 * state change AEN soon.
303		 */
304		if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
305			qla4xxx_mark_device_missing(ha, ddb_entry);
306
307		cmd->result = DID_TRANSPORT_DISRUPTED << 16;
308		break;
309
310	case SCS_QUEUE_FULL:
311		/*
312		 * SCSI Mid-Layer handles device queue full
313		 */
314		cmd->result = DID_OK << 16 | sts_entry->scsiStatus;
315		DEBUG2(printk("scsi%ld:%d:%d: %s: QUEUE FULL detected "
316			      "compl=%02x, scsi=%02x, state=%02x, iFlags=%02x,"
317			      " iResp=%02x\n", ha->host_no, cmd->device->id,
318			      cmd->device->lun, __func__,
319			      sts_entry->completionStatus,
320			      sts_entry->scsiStatus, sts_entry->state_flags,
321			      sts_entry->iscsiFlags,
322			      sts_entry->iscsiResponse));
323		break;
324
325	default:
326		cmd->result = DID_ERROR << 16;
327		break;
328	}
329
330status_entry_exit:
331
332	/* complete the request, if not waiting for status_continuation pkt */
333	srb->cc_stat = sts_entry->completionStatus;
334	if (ha->status_srb == NULL)
335		kref_put(&srb->srb_ref, qla4xxx_srb_compl);
336}
337
338/**
339 * qla4xxx_process_response_queue - process response queue completions
340 * @ha: Pointer to host adapter structure.
341 *
342 * This routine process response queue completions in interrupt context.
343 * Hardware_lock locked upon entry
344 **/
345void qla4xxx_process_response_queue(struct scsi_qla_host *ha)
346{
347	uint32_t count = 0;
348	struct srb *srb = NULL;
349	struct status_entry *sts_entry;
350
351	/* Process all responses from response queue */
352	while ((ha->response_ptr->signature != RESPONSE_PROCESSED)) {
353		sts_entry = (struct status_entry *) ha->response_ptr;
354		count++;
355
356		/* Advance pointers for next entry */
357		if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) {
358			ha->response_out = 0;
359			ha->response_ptr = ha->response_ring;
360		} else {
361			ha->response_out++;
362			ha->response_ptr++;
363		}
364
365		/* process entry */
366		switch (sts_entry->hdr.entryType) {
367		case ET_STATUS:
368			/* Common status */
369			qla4xxx_status_entry(ha, sts_entry);
370			break;
371
372		case ET_PASSTHRU_STATUS:
373			break;
374
375		case ET_STATUS_CONTINUATION:
376			qla4xxx_status_cont_entry(ha,
377				(struct status_cont_entry *) sts_entry);
378			break;
379
380		case ET_COMMAND:
381			/* ISP device queue is full. Command not
382			 * accepted by ISP.  Queue command for
383			 * later */
384
385			srb = qla4xxx_del_from_active_array(ha,
386						    le32_to_cpu(sts_entry->
387								handle));
388			if (srb == NULL)
389				goto exit_prq_invalid_handle;
390
391			DEBUG2(printk("scsi%ld: %s: FW device queue full, "
392				      "srb %p\n", ha->host_no, __func__, srb));
393
394			/* ETRY normally by sending it back with
395			 * DID_BUS_BUSY */
396			srb->cmd->result = DID_BUS_BUSY << 16;
397			kref_put(&srb->srb_ref, qla4xxx_srb_compl);
398			break;
399
400		case ET_CONTINUE:
401			/* Just throw away the continuation entries */
402			DEBUG2(printk("scsi%ld: %s: Continuation entry - "
403				      "ignoring\n", ha->host_no, __func__));
404			break;
405
406		default:
407			/*
408			 * Invalid entry in response queue, reset RISC
409			 * firmware.
410			 */
411			DEBUG2(printk("scsi%ld: %s: Invalid entry %x in "
412				      "response queue \n", ha->host_no,
413				      __func__,
414				      sts_entry->hdr.entryType));
415			goto exit_prq_error;
416		}
417		((struct response *)sts_entry)->signature = RESPONSE_PROCESSED;
418		wmb();
419	}
420
421	/*
422	 * Tell ISP we're done with response(s). This also clears the interrupt.
423	 */
424	ha->isp_ops->complete_iocb(ha);
425
426	return;
427
428exit_prq_invalid_handle:
429	DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n",
430		      ha->host_no, __func__, srb, sts_entry->hdr.entryType,
431		      sts_entry->completionStatus));
432
433exit_prq_error:
434	ha->isp_ops->complete_iocb(ha);
435	set_bit(DPC_RESET_HA, &ha->dpc_flags);
436}
437
438/**
439 * qla4xxx_isr_decode_mailbox - decodes mailbox status
440 * @ha: Pointer to host adapter structure.
441 * @mailbox_status: Mailbox status.
442 *
443 * This routine decodes the mailbox status during the ISR.
444 * Hardware_lock locked upon entry. runs in interrupt context.
445 **/
446static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
447				       uint32_t mbox_status)
448{
449	int i;
450	uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
451
452	if ((mbox_status == MBOX_STS_BUSY) ||
453	    (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
454	    (mbox_status >> 12 == MBOX_COMPLETION_STATUS)) {
455		ha->mbox_status[0] = mbox_status;
456
457		if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
458			/*
459			 * Copy all mailbox registers to a temporary
460			 * location and set mailbox command done flag
461			 */
462			for (i = 0; i < ha->mbox_status_count; i++)
463				ha->mbox_status[i] = is_qla8022(ha)
464				    ? readl(&ha->qla4_8xxx_reg->mailbox_out[i])
465				    : readl(&ha->reg->mailbox[i]);
466
467			set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
468
469			if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags))
470				complete(&ha->mbx_intr_comp);
471		}
472	} else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
473		for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
474			mbox_sts[i] = is_qla8022(ha)
475			    ? readl(&ha->qla4_8xxx_reg->mailbox_out[i])
476			    : readl(&ha->reg->mailbox[i]);
477
478		/* Immediately process the AENs that don't require much work.
479		 * Only queue the database_changed AENs */
480		if (ha->aen_log.count < MAX_AEN_ENTRIES) {
481			for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
482				ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] =
483				    mbox_sts[i];
484			ha->aen_log.count++;
485		}
486		switch (mbox_status) {
487		case MBOX_ASTS_SYSTEM_ERROR:
488			/* Log Mailbox registers */
489			ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__);
490			if (ql4xdontresethba) {
491				DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n",
492				    ha->host_no, __func__));
493			} else {
494				set_bit(AF_GET_CRASH_RECORD, &ha->flags);
495				set_bit(DPC_RESET_HA, &ha->dpc_flags);
496			}
497			break;
498
499		case MBOX_ASTS_REQUEST_TRANSFER_ERROR:
500		case MBOX_ASTS_RESPONSE_TRANSFER_ERROR:
501		case MBOX_ASTS_NVRAM_INVALID:
502		case MBOX_ASTS_IP_ADDRESS_CHANGED:
503		case MBOX_ASTS_DHCP_LEASE_EXPIRED:
504			DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
505				      "Reset HA\n", ha->host_no, mbox_status));
506			set_bit(DPC_RESET_HA, &ha->dpc_flags);
507			break;
508
509		case MBOX_ASTS_LINK_UP:
510			set_bit(AF_LINK_UP, &ha->flags);
511			if (test_bit(AF_INIT_DONE, &ha->flags))
512				set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
513
514			ql4_printk(KERN_INFO, ha, "%s: LINK UP\n", __func__);
515			break;
516
517		case MBOX_ASTS_LINK_DOWN:
518			clear_bit(AF_LINK_UP, &ha->flags);
519			if (test_bit(AF_INIT_DONE, &ha->flags))
520				set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
521
522			ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__);
523			break;
524
525		case MBOX_ASTS_HEARTBEAT:
526			ha->seconds_since_last_heartbeat = 0;
527			break;
528
529		case MBOX_ASTS_DHCP_LEASE_ACQUIRED:
530			DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE "
531				      "ACQUIRED\n", ha->host_no, mbox_status));
532			set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
533			break;
534
535		case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM:
536		case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target
537							   * mode
538							   * only */
539		case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED:  /* Connection mode */
540		case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
541		case MBOX_ASTS_SUBNET_STATE_CHANGE:
542			/* No action */
543			DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
544				      mbox_status));
545			break;
546
547		case MBOX_ASTS_IP_ADDR_STATE_CHANGED:
548			printk("scsi%ld: AEN %04x, mbox_sts[2]=%04x, "
549			    "mbox_sts[3]=%04x\n", ha->host_no, mbox_sts[0],
550			    mbox_sts[2], mbox_sts[3]);
551
552			/* mbox_sts[2] = Old ACB state
553			 * mbox_sts[3] = new ACB state */
554			if ((mbox_sts[3] == ACB_STATE_VALID) &&
555			    (mbox_sts[2] == ACB_STATE_TENTATIVE))
556				set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
557			else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
558			    (mbox_sts[2] == ACB_STATE_VALID))
559				set_bit(DPC_RESET_HA, &ha->dpc_flags);
560			break;
561
562		case MBOX_ASTS_MAC_ADDRESS_CHANGED:
563		case MBOX_ASTS_DNS:
564			/* No action */
565			DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, "
566				      "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n",
567				      ha->host_no, mbox_sts[0],
568				      mbox_sts[1], mbox_sts[2]));
569			break;
570
571		case MBOX_ASTS_SELF_TEST_FAILED:
572		case MBOX_ASTS_LOGIN_FAILED:
573			/* No action */
574			DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, "
575				      "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n",
576				      ha->host_no, mbox_sts[0], mbox_sts[1],
577				      mbox_sts[2], mbox_sts[3]));
578			break;
579
580		case MBOX_ASTS_DATABASE_CHANGED:
581			/* Queue AEN information and process it in the DPC
582			 * routine */
583			if (ha->aen_q_count > 0) {
584
585				/* decrement available counter */
586				ha->aen_q_count--;
587
588				for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
589					ha->aen_q[ha->aen_in].mbox_sts[i] =
590					    mbox_sts[i];
591
592				/* print debug message */
593				DEBUG2(printk("scsi%ld: AEN[%d] %04x queued"
594				    " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n",
595				    ha->host_no, ha->aen_in, mbox_sts[0],
596				    mbox_sts[1], mbox_sts[2],  mbox_sts[3],
597				    mbox_sts[4]));
598
599				/* advance pointer */
600				ha->aen_in++;
601				if (ha->aen_in == MAX_AEN_ENTRIES)
602					ha->aen_in = 0;
603
604				/* The DPC routine will process the aen */
605				set_bit(DPC_AEN, &ha->dpc_flags);
606			} else {
607				DEBUG2(printk("scsi%ld: %s: aen %04x, queue "
608					      "overflowed!  AEN LOST!!\n",
609					      ha->host_no, __func__,
610					      mbox_sts[0]));
611
612				DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n",
613					      ha->host_no));
614
615				for (i = 0; i < MAX_AEN_ENTRIES; i++) {
616					DEBUG2(printk("AEN[%d] %04x %04x %04x "
617						      "%04x\n", i, mbox_sts[0],
618						      mbox_sts[1], mbox_sts[2],
619						      mbox_sts[3]));
620				}
621			}
622			break;
623
624		default:
625			DEBUG2(printk(KERN_WARNING
626				      "scsi%ld: AEN %04x UNKNOWN\n",
627				      ha->host_no, mbox_sts[0]));
628			break;
629		}
630	} else {
631		DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n",
632			      ha->host_no, mbox_status));
633
634		ha->mbox_status[0] = mbox_status;
635	}
636}
637
638/**
639 * qla4_8xxx_interrupt_service_routine - isr
640 * @ha: pointer to host adapter structure.
641 *
642 * This is the main interrupt service routine.
643 * hardware_lock locked upon entry. runs in interrupt context.
644 **/
645void qla4_8xxx_interrupt_service_routine(struct scsi_qla_host *ha,
646    uint32_t intr_status)
647{
648	/* Process response queue interrupt. */
649	if (intr_status & HSRX_RISC_IOCB_INT)
650		qla4xxx_process_response_queue(ha);
651
652	/* Process mailbox/asynch event interrupt.*/
653	if (intr_status & HSRX_RISC_MB_INT)
654		qla4xxx_isr_decode_mailbox(ha,
655		    readl(&ha->qla4_8xxx_reg->mailbox_out[0]));
656
657	/* clear the interrupt */
658	writel(0, &ha->qla4_8xxx_reg->host_int);
659	readl(&ha->qla4_8xxx_reg->host_int);
660}
661
662/**
663 * qla4xxx_interrupt_service_routine - isr
664 * @ha: pointer to host adapter structure.
665 *
666 * This is the main interrupt service routine.
667 * hardware_lock locked upon entry. runs in interrupt context.
668 **/
669void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
670				       uint32_t intr_status)
671{
672	/* Process response queue interrupt. */
673	if (intr_status & CSR_SCSI_COMPLETION_INTR)
674		qla4xxx_process_response_queue(ha);
675
676	/* Process mailbox/asynch event	 interrupt.*/
677	if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
678		qla4xxx_isr_decode_mailbox(ha,
679					   readl(&ha->reg->mailbox[0]));
680
681		/* Clear Mailbox Interrupt */
682		writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
683		       &ha->reg->ctrl_status);
684		readl(&ha->reg->ctrl_status);
685	}
686}
687
688/**
689 * qla4_8xxx_spurious_interrupt - processes spurious interrupt
690 * @ha: pointer to host adapter structure.
691 * @reqs_count: .
692 *
693 **/
694static void qla4_8xxx_spurious_interrupt(struct scsi_qla_host *ha,
695    uint8_t reqs_count)
696{
697	if (reqs_count)
698		return;
699
700	DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n"));
701	if (is_qla8022(ha)) {
702		writel(0, &ha->qla4_8xxx_reg->host_int);
703		if (test_bit(AF_INTx_ENABLED, &ha->flags))
704			qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
705			    0xfbff);
706	}
707	ha->spurious_int_count++;
708}
709
710/**
711 * qla4xxx_intr_handler - hardware interrupt handler.
712 * @irq: Unused
713 * @dev_id: Pointer to host adapter structure
714 **/
715irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
716{
717	struct scsi_qla_host *ha;
718	uint32_t intr_status;
719	unsigned long flags = 0;
720	uint8_t reqs_count = 0;
721
722	ha = (struct scsi_qla_host *) dev_id;
723	if (!ha) {
724		DEBUG2(printk(KERN_INFO
725			      "qla4xxx: Interrupt with NULL host ptr\n"));
726		return IRQ_NONE;
727	}
728
729	spin_lock_irqsave(&ha->hardware_lock, flags);
730
731	ha->isr_count++;
732	/*
733	 * Repeatedly service interrupts up to a maximum of
734	 * MAX_REQS_SERVICED_PER_INTR
735	 */
736	while (1) {
737		/*
738		 * Read interrupt status
739		 */
740		if (ha->isp_ops->rd_shdw_rsp_q_in(ha) !=
741		    ha->response_out)
742			intr_status = CSR_SCSI_COMPLETION_INTR;
743		else
744			intr_status = readl(&ha->reg->ctrl_status);
745
746		if ((intr_status &
747		    (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) == 0) {
748			if (reqs_count == 0)
749				ha->spurious_int_count++;
750			break;
751		}
752
753		if (intr_status & CSR_FATAL_ERROR) {
754			DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, "
755				      "Status 0x%04x\n", ha->host_no,
756				      readl(isp_port_error_status (ha))));
757
758			/* Issue Soft Reset to clear this error condition.
759			 * This will prevent the RISC from repeatedly
760			 * interrupting the driver; thus, allowing the DPC to
761			 * get scheduled to continue error recovery.
762			 * NOTE: Disabling RISC interrupts does not work in
763			 * this case, as CSR_FATAL_ERROR overrides
764			 * CSR_SCSI_INTR_ENABLE */
765			if ((readl(&ha->reg->ctrl_status) &
766			     CSR_SCSI_RESET_INTR) == 0) {
767				writel(set_rmask(CSR_SOFT_RESET),
768				       &ha->reg->ctrl_status);
769				readl(&ha->reg->ctrl_status);
770			}
771
772			writel(set_rmask(CSR_FATAL_ERROR),
773			       &ha->reg->ctrl_status);
774			readl(&ha->reg->ctrl_status);
775
776			__qla4xxx_disable_intrs(ha);
777
778			set_bit(DPC_RESET_HA, &ha->dpc_flags);
779
780			break;
781		} else if (intr_status & CSR_SCSI_RESET_INTR) {
782			clear_bit(AF_ONLINE, &ha->flags);
783			__qla4xxx_disable_intrs(ha);
784
785			writel(set_rmask(CSR_SCSI_RESET_INTR),
786			       &ha->reg->ctrl_status);
787			readl(&ha->reg->ctrl_status);
788
789			if (!test_bit(AF_HBA_GOING_AWAY, &ha->flags))
790				set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
791
792			break;
793		} else if (intr_status & INTR_PENDING) {
794			ha->isp_ops->interrupt_service_routine(ha, intr_status);
795			ha->total_io_count++;
796			if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
797				break;
798		}
799	}
800
801	spin_unlock_irqrestore(&ha->hardware_lock, flags);
802
803	return IRQ_HANDLED;
804}
805
806/**
807 * qla4_8xxx_intr_handler - hardware interrupt handler.
808 * @irq: Unused
809 * @dev_id: Pointer to host adapter structure
810 **/
811irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id)
812{
813	struct scsi_qla_host *ha = dev_id;
814	uint32_t intr_status;
815	uint32_t status;
816	unsigned long flags = 0;
817	uint8_t reqs_count = 0;
818
819	if (unlikely(pci_channel_offline(ha->pdev)))
820		return IRQ_HANDLED;
821
822	ha->isr_count++;
823	status = qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
824	if (!(status & ha->nx_legacy_intr.int_vec_bit))
825		return IRQ_NONE;
826
827	status = qla4_8xxx_rd_32(ha, ISR_INT_STATE_REG);
828	if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) {
829		DEBUG2(ql4_printk(KERN_INFO, ha,
830		    "%s legacy Int not triggered\n", __func__));
831		return IRQ_NONE;
832	}
833
834	/* clear the interrupt */
835	qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
836
837	/* read twice to ensure write is flushed */
838	qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
839	qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
840
841	spin_lock_irqsave(&ha->hardware_lock, flags);
842	while (1) {
843		if (!(readl(&ha->qla4_8xxx_reg->host_int) &
844		    ISRX_82XX_RISC_INT)) {
845			qla4_8xxx_spurious_interrupt(ha, reqs_count);
846			break;
847		}
848		intr_status =  readl(&ha->qla4_8xxx_reg->host_status);
849		if ((intr_status &
850		    (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0)  {
851			qla4_8xxx_spurious_interrupt(ha, reqs_count);
852			break;
853		}
854
855		ha->isp_ops->interrupt_service_routine(ha, intr_status);
856
857		/* Enable Interrupt */
858		qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
859
860		if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
861			break;
862	}
863
864	spin_unlock_irqrestore(&ha->hardware_lock, flags);
865	return IRQ_HANDLED;
866}
867
868irqreturn_t
869qla4_8xxx_msi_handler(int irq, void *dev_id)
870{
871	struct scsi_qla_host *ha;
872
873	ha = (struct scsi_qla_host *) dev_id;
874	if (!ha) {
875		DEBUG2(printk(KERN_INFO
876		    "qla4xxx: MSIX: Interrupt with NULL host ptr\n"));
877		return IRQ_NONE;
878	}
879
880	ha->isr_count++;
881	/* clear the interrupt */
882	qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
883
884	/* read twice to ensure write is flushed */
885	qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
886	qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
887
888	return qla4_8xxx_default_intr_handler(irq, dev_id);
889}
890
891/**
892 * qla4_8xxx_default_intr_handler - hardware interrupt handler.
893 * @irq: Unused
894 * @dev_id: Pointer to host adapter structure
895 *
896 * This interrupt handler is called directly for MSI-X, and
897 * called indirectly for MSI.
898 **/
899irqreturn_t
900qla4_8xxx_default_intr_handler(int irq, void *dev_id)
901{
902	struct scsi_qla_host *ha = dev_id;
903	unsigned long   flags;
904	uint32_t intr_status;
905	uint8_t reqs_count = 0;
906
907	spin_lock_irqsave(&ha->hardware_lock, flags);
908	while (1) {
909		if (!(readl(&ha->qla4_8xxx_reg->host_int) &
910		    ISRX_82XX_RISC_INT)) {
911			qla4_8xxx_spurious_interrupt(ha, reqs_count);
912			break;
913		}
914
915		intr_status =  readl(&ha->qla4_8xxx_reg->host_status);
916		if ((intr_status &
917		    (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
918			qla4_8xxx_spurious_interrupt(ha, reqs_count);
919			break;
920		}
921
922		ha->isp_ops->interrupt_service_routine(ha, intr_status);
923
924		if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
925			break;
926	}
927
928	ha->isr_count++;
929	spin_unlock_irqrestore(&ha->hardware_lock, flags);
930	return IRQ_HANDLED;
931}
932
933irqreturn_t
934qla4_8xxx_msix_rsp_q(int irq, void *dev_id)
935{
936	struct scsi_qla_host *ha = dev_id;
937	unsigned long flags;
938
939	spin_lock_irqsave(&ha->hardware_lock, flags);
940	qla4xxx_process_response_queue(ha);
941	writel(0, &ha->qla4_8xxx_reg->host_int);
942	spin_unlock_irqrestore(&ha->hardware_lock, flags);
943
944	ha->isr_count++;
945	return IRQ_HANDLED;
946}
947
948/**
949 * qla4xxx_process_aen - processes AENs generated by firmware
950 * @ha: pointer to host adapter structure.
951 * @process_aen: type of AENs to process
952 *
953 * Processes specific types of Asynchronous Events generated by firmware.
954 * The type of AENs to process is specified by process_aen and can be
955 *	PROCESS_ALL_AENS	 0
956 *	FLUSH_DDB_CHANGED_AENS	 1
957 *	RELOGIN_DDB_CHANGED_AENS 2
958 **/
959void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
960{
961	uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
962	struct aen *aen;
963	int i;
964	unsigned long flags;
965
966	spin_lock_irqsave(&ha->hardware_lock, flags);
967	while (ha->aen_out != ha->aen_in) {
968		aen = &ha->aen_q[ha->aen_out];
969		/* copy aen information to local structure */
970		for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
971			mbox_sts[i] = aen->mbox_sts[i];
972
973		ha->aen_q_count++;
974		ha->aen_out++;
975
976		if (ha->aen_out == MAX_AEN_ENTRIES)
977			ha->aen_out = 0;
978
979		spin_unlock_irqrestore(&ha->hardware_lock, flags);
980
981		DEBUG2(printk("qla4xxx(%ld): AEN[%d]=0x%08x, mbx1=0x%08x mbx2=0x%08x"
982			" mbx3=0x%08x mbx4=0x%08x\n", ha->host_no,
983			(ha->aen_out ? (ha->aen_out-1): (MAX_AEN_ENTRIES-1)),
984			mbox_sts[0], mbox_sts[1], mbox_sts[2],
985			mbox_sts[3], mbox_sts[4]));
986
987		switch (mbox_sts[0]) {
988		case MBOX_ASTS_DATABASE_CHANGED:
989			if (process_aen == FLUSH_DDB_CHANGED_AENS) {
990				DEBUG2(printk("scsi%ld: AEN[%d] %04x, index "
991					      "[%d] state=%04x FLUSHED!\n",
992					      ha->host_no, ha->aen_out,
993					      mbox_sts[0], mbox_sts[2],
994					      mbox_sts[3]));
995				break;
996			} else if (process_aen == RELOGIN_DDB_CHANGED_AENS) {
997				/* for use during init time, we only want to
998				 * relogin non-active ddbs */
999				struct ddb_entry *ddb_entry;
1000
1001				ddb_entry =
1002					qla4xxx_lookup_ddb_by_fw_index(ha,
1003								       mbox_sts[2]);
1004				if (!ddb_entry)
1005					break;
1006
1007				ddb_entry->dev_scan_wait_to_complete_relogin =
1008					0;
1009				ddb_entry->dev_scan_wait_to_start_relogin =
1010					jiffies +
1011					((ddb_entry->default_time2wait +
1012					  4) * HZ);
1013
1014				DEBUG2(printk("scsi%ld: ddb [%d] initate"
1015					      " RELOGIN after %d seconds\n",
1016					      ha->host_no,
1017					      ddb_entry->fw_ddb_index,
1018					      ddb_entry->default_time2wait +
1019					      4));
1020				break;
1021			}
1022
1023			if (mbox_sts[1] == 0) {	/* Global DB change. */
1024				qla4xxx_reinitialize_ddb_list(ha);
1025			} else if (mbox_sts[1] == 1) {	/* Specific device. */
1026				qla4xxx_process_ddb_changed(ha, mbox_sts[2],
1027						mbox_sts[3], mbox_sts[4]);
1028			}
1029			break;
1030		}
1031		spin_lock_irqsave(&ha->hardware_lock, flags);
1032	}
1033	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1034}
1035
1036int qla4xxx_request_irqs(struct scsi_qla_host *ha)
1037{
1038	int ret;
1039
1040	if (!is_qla8022(ha))
1041		goto try_intx;
1042
1043	if (ql4xenablemsix == 2)
1044		goto try_msi;
1045
1046	if (ql4xenablemsix == 0 || ql4xenablemsix != 1)
1047		goto try_intx;
1048
1049	/* Trying MSI-X */
1050	ret = qla4_8xxx_enable_msix(ha);
1051	if (!ret) {
1052		DEBUG2(ql4_printk(KERN_INFO, ha,
1053		    "MSI-X: Enabled (0x%X).\n", ha->revision_id));
1054		goto irq_attached;
1055	}
1056
1057	ql4_printk(KERN_WARNING, ha,
1058	    "MSI-X: Falling back-to MSI mode -- %d.\n", ret);
1059
1060try_msi:
1061	/* Trying MSI */
1062	ret = pci_enable_msi(ha->pdev);
1063	if (!ret) {
1064		ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler,
1065			IRQF_DISABLED|IRQF_SHARED, DRIVER_NAME, ha);
1066		if (!ret) {
1067			DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
1068			set_bit(AF_MSI_ENABLED, &ha->flags);
1069			goto irq_attached;
1070		} else {
1071			ql4_printk(KERN_WARNING, ha,
1072			    "MSI: Failed to reserve interrupt %d "
1073			    "already in use.\n", ha->pdev->irq);
1074			pci_disable_msi(ha->pdev);
1075		}
1076	}
1077	ql4_printk(KERN_WARNING, ha,
1078	    "MSI: Falling back-to INTx mode -- %d.\n", ret);
1079
1080try_intx:
1081	/* Trying INTx */
1082	ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1083	    IRQF_DISABLED|IRQF_SHARED, DRIVER_NAME, ha);
1084	if (!ret) {
1085		DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n"));
1086		set_bit(AF_INTx_ENABLED, &ha->flags);
1087		goto irq_attached;
1088
1089	} else {
1090		ql4_printk(KERN_WARNING, ha,
1091		    "INTx: Failed to reserve interrupt %d already in"
1092		    " use.\n", ha->pdev->irq);
1093		return ret;
1094	}
1095
1096irq_attached:
1097	set_bit(AF_IRQ_ATTACHED, &ha->flags);
1098	ha->host->irq = ha->pdev->irq;
1099	ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
1100	    __func__, ha->pdev->irq);
1101	return ret;
1102}
1103
1104void qla4xxx_free_irqs(struct scsi_qla_host *ha)
1105{
1106	if (test_bit(AF_MSIX_ENABLED, &ha->flags))
1107		qla4_8xxx_disable_msix(ha);
1108	else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) {
1109		free_irq(ha->pdev->irq, ha);
1110		pci_disable_msi(ha->pdev);
1111	} else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags))
1112		free_irq(ha->pdev->irq, ha);
1113}
1114