• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/scsi/qla2xxx/
1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c)  2003-2010 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#include "qla_def.h"
8#include "qla_gbl.h"
9
10#include <linux/delay.h>
11#include <linux/slab.h>
12#include <linux/vmalloc.h>
13
14#include "qla_devtbl.h"
15
16#ifdef CONFIG_SPARC
17#include <asm/prom.h>
18#endif
19
20/*
21*  QLogic ISP2x00 Hardware Support Function Prototypes.
22*/
23static int qla2x00_isp_firmware(scsi_qla_host_t *);
24static int qla2x00_setup_chip(scsi_qla_host_t *);
25static int qla2x00_init_rings(scsi_qla_host_t *);
26static int qla2x00_fw_ready(scsi_qla_host_t *);
27static int qla2x00_configure_hba(scsi_qla_host_t *);
28static int qla2x00_configure_loop(scsi_qla_host_t *);
29static int qla2x00_configure_local_loop(scsi_qla_host_t *);
30static int qla2x00_configure_fabric(scsi_qla_host_t *);
31static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *);
32static int qla2x00_device_resync(scsi_qla_host_t *);
33static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
34    uint16_t *);
35
36static int qla2x00_restart_isp(scsi_qla_host_t *);
37
38static int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *);
39
40static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
41static int qla84xx_init_chip(scsi_qla_host_t *);
42static int qla25xx_init_queues(struct qla_hw_data *);
43
44/* SRB Extensions ---------------------------------------------------------- */
45
46static void
47qla2x00_ctx_sp_timeout(unsigned long __data)
48{
49	srb_t *sp = (srb_t *)__data;
50	struct srb_ctx *ctx;
51	struct srb_iocb *iocb;
52	fc_port_t *fcport = sp->fcport;
53	struct qla_hw_data *ha = fcport->vha->hw;
54	struct req_que *req;
55	unsigned long flags;
56
57	spin_lock_irqsave(&ha->hardware_lock, flags);
58	req = ha->req_q_map[0];
59	req->outstanding_cmds[sp->handle] = NULL;
60	ctx = sp->ctx;
61	iocb = ctx->u.iocb_cmd;
62	iocb->timeout(sp);
63	iocb->free(sp);
64	spin_unlock_irqrestore(&ha->hardware_lock, flags);
65}
66
67static void
68qla2x00_ctx_sp_free(srb_t *sp)
69{
70	struct srb_ctx *ctx = sp->ctx;
71	struct srb_iocb *iocb = ctx->u.iocb_cmd;
72	struct scsi_qla_host *vha = sp->fcport->vha;
73
74	del_timer_sync(&iocb->timer);
75	kfree(iocb);
76	kfree(ctx);
77	mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
78
79	QLA_VHA_MARK_NOT_BUSY(vha);
80}
81
82inline srb_t *
83qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
84    unsigned long tmo)
85{
86	srb_t *sp = NULL;
87	struct qla_hw_data *ha = vha->hw;
88	struct srb_ctx *ctx;
89	struct srb_iocb *iocb;
90	uint8_t bail;
91
92	QLA_VHA_MARK_BUSY(vha, bail);
93	if (bail)
94		return NULL;
95
96	sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
97	if (!sp)
98		goto done;
99	ctx = kzalloc(size, GFP_KERNEL);
100	if (!ctx) {
101		mempool_free(sp, ha->srb_mempool);
102		sp = NULL;
103		goto done;
104	}
105	iocb = kzalloc(sizeof(struct srb_iocb), GFP_KERNEL);
106	if (!iocb) {
107		mempool_free(sp, ha->srb_mempool);
108		sp = NULL;
109		kfree(ctx);
110		goto done;
111	}
112
113	memset(sp, 0, sizeof(*sp));
114	sp->fcport = fcport;
115	sp->ctx = ctx;
116	ctx->u.iocb_cmd = iocb;
117	iocb->free = qla2x00_ctx_sp_free;
118
119	init_timer(&iocb->timer);
120	if (!tmo)
121		goto done;
122	iocb->timer.expires = jiffies + tmo * HZ;
123	iocb->timer.data = (unsigned long)sp;
124	iocb->timer.function = qla2x00_ctx_sp_timeout;
125	add_timer(&iocb->timer);
126done:
127	if (!sp)
128		QLA_VHA_MARK_NOT_BUSY(vha);
129	return sp;
130}
131
132/* Asynchronous Login/Logout Routines -------------------------------------- */
133
134static inline unsigned long
135qla2x00_get_async_timeout(struct scsi_qla_host *vha)
136{
137	unsigned long tmo;
138	struct qla_hw_data *ha = vha->hw;
139
140	/* Firmware should use switch negotiated r_a_tov for timeout. */
141	tmo = ha->r_a_tov / 10 * 2;
142	if (!IS_FWI2_CAPABLE(ha)) {
143		/*
144		 * Except for earlier ISPs where the timeout is seeded from the
145		 * initialization control block.
146		 */
147		tmo = ha->login_timeout;
148	}
149	return tmo;
150}
151
152static void
153qla2x00_async_iocb_timeout(srb_t *sp)
154{
155	fc_port_t *fcport = sp->fcport;
156	struct srb_ctx *ctx = sp->ctx;
157
158	DEBUG2(printk(KERN_WARNING
159		"scsi(%ld:%x): Async-%s timeout - portid=%02x%02x%02x.\n",
160		fcport->vha->host_no, sp->handle,
161		ctx->name, fcport->d_id.b.domain,
162		fcport->d_id.b.area, fcport->d_id.b.al_pa));
163
164	fcport->flags &= ~FCF_ASYNC_SENT;
165	if (ctx->type == SRB_LOGIN_CMD) {
166		struct srb_iocb *lio = ctx->u.iocb_cmd;
167		qla2x00_post_async_logout_work(fcport->vha, fcport, NULL);
168		/* Retry as needed. */
169		lio->u.logio.data[0] = MBS_COMMAND_ERROR;
170		lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
171			QLA_LOGIO_LOGIN_RETRIED : 0;
172		qla2x00_post_async_login_done_work(fcport->vha, fcport,
173			lio->u.logio.data);
174	}
175}
176
177static void
178qla2x00_async_login_ctx_done(srb_t *sp)
179{
180	struct srb_ctx *ctx = sp->ctx;
181	struct srb_iocb *lio = ctx->u.iocb_cmd;
182
183	qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport,
184		lio->u.logio.data);
185	lio->free(sp);
186}
187
188int
189qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
190    uint16_t *data)
191{
192	srb_t *sp;
193	struct srb_ctx *ctx;
194	struct srb_iocb *lio;
195	int rval;
196
197	rval = QLA_FUNCTION_FAILED;
198	sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
199	    qla2x00_get_async_timeout(vha) + 2);
200	if (!sp)
201		goto done;
202
203	ctx = sp->ctx;
204	ctx->type = SRB_LOGIN_CMD;
205	ctx->name = "login";
206	lio = ctx->u.iocb_cmd;
207	lio->timeout = qla2x00_async_iocb_timeout;
208	lio->done = qla2x00_async_login_ctx_done;
209	lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
210	if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
211		lio->u.logio.flags |= SRB_LOGIN_RETRIED;
212	rval = qla2x00_start_sp(sp);
213	if (rval != QLA_SUCCESS)
214		goto done_free_sp;
215
216	DEBUG2(printk(KERN_DEBUG
217	    "scsi(%ld:%x): Async-login - loop-id=%x portid=%02x%02x%02x "
218	    "retries=%d.\n", fcport->vha->host_no, sp->handle, fcport->loop_id,
219	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
220	    fcport->login_retry));
221	return rval;
222
223done_free_sp:
224	lio->free(sp);
225done:
226	return rval;
227}
228
229static void
230qla2x00_async_logout_ctx_done(srb_t *sp)
231{
232	struct srb_ctx *ctx = sp->ctx;
233	struct srb_iocb *lio = ctx->u.iocb_cmd;
234
235	qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
236	    lio->u.logio.data);
237	lio->free(sp);
238}
239
240int
241qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
242{
243	srb_t *sp;
244	struct srb_ctx *ctx;
245	struct srb_iocb *lio;
246	int rval;
247
248	rval = QLA_FUNCTION_FAILED;
249	sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
250	    qla2x00_get_async_timeout(vha) + 2);
251	if (!sp)
252		goto done;
253
254	ctx = sp->ctx;
255	ctx->type = SRB_LOGOUT_CMD;
256	ctx->name = "logout";
257	lio = ctx->u.iocb_cmd;
258	lio->timeout = qla2x00_async_iocb_timeout;
259	lio->done = qla2x00_async_logout_ctx_done;
260	rval = qla2x00_start_sp(sp);
261	if (rval != QLA_SUCCESS)
262		goto done_free_sp;
263
264	DEBUG2(printk(KERN_DEBUG
265	    "scsi(%ld:%x): Async-logout - loop-id=%x portid=%02x%02x%02x.\n",
266	    fcport->vha->host_no, sp->handle, fcport->loop_id,
267	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
268	return rval;
269
270done_free_sp:
271	lio->free(sp);
272done:
273	return rval;
274}
275
276static void
277qla2x00_async_adisc_ctx_done(srb_t *sp)
278{
279	struct srb_ctx *ctx = sp->ctx;
280	struct srb_iocb *lio = ctx->u.iocb_cmd;
281
282	qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
283	    lio->u.logio.data);
284	lio->free(sp);
285}
286
287int
288qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
289    uint16_t *data)
290{
291	srb_t *sp;
292	struct srb_ctx *ctx;
293	struct srb_iocb *lio;
294	int rval;
295
296	rval = QLA_FUNCTION_FAILED;
297	sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
298	    qla2x00_get_async_timeout(vha) + 2);
299	if (!sp)
300		goto done;
301
302	ctx = sp->ctx;
303	ctx->type = SRB_ADISC_CMD;
304	ctx->name = "adisc";
305	lio = ctx->u.iocb_cmd;
306	lio->timeout = qla2x00_async_iocb_timeout;
307	lio->done = qla2x00_async_adisc_ctx_done;
308	if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
309		lio->u.logio.flags |= SRB_LOGIN_RETRIED;
310	rval = qla2x00_start_sp(sp);
311	if (rval != QLA_SUCCESS)
312		goto done_free_sp;
313
314	DEBUG2(printk(KERN_DEBUG
315	    "scsi(%ld:%x): Async-adisc - loop-id=%x portid=%02x%02x%02x.\n",
316	    fcport->vha->host_no, sp->handle, fcport->loop_id,
317	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
318
319	return rval;
320
321done_free_sp:
322	lio->free(sp);
323done:
324	return rval;
325}
326
327static void
328qla2x00_async_tm_cmd_ctx_done(srb_t *sp)
329{
330	struct srb_ctx *ctx = sp->ctx;
331	struct srb_iocb *iocb = (struct srb_iocb *)ctx->u.iocb_cmd;
332
333	qla2x00_async_tm_cmd_done(sp->fcport->vha, sp->fcport, iocb);
334	iocb->free(sp);
335}
336
337int
338qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
339	uint32_t tag)
340{
341	struct scsi_qla_host *vha = fcport->vha;
342	srb_t *sp;
343	struct srb_ctx *ctx;
344	struct srb_iocb *tcf;
345	int rval;
346
347	rval = QLA_FUNCTION_FAILED;
348	sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
349	    qla2x00_get_async_timeout(vha) + 2);
350	if (!sp)
351		goto done;
352
353	ctx = sp->ctx;
354	ctx->type = SRB_TM_CMD;
355	ctx->name = "tmf";
356	tcf = ctx->u.iocb_cmd;
357	tcf->u.tmf.flags = flags;
358	tcf->u.tmf.lun = lun;
359	tcf->u.tmf.data = tag;
360	tcf->timeout = qla2x00_async_iocb_timeout;
361	tcf->done = qla2x00_async_tm_cmd_ctx_done;
362
363	rval = qla2x00_start_sp(sp);
364	if (rval != QLA_SUCCESS)
365		goto done_free_sp;
366
367	DEBUG2(printk(KERN_DEBUG
368	    "scsi(%ld:%x): Async-tmf - loop-id=%x portid=%02x%02x%02x.\n",
369	    fcport->vha->host_no, sp->handle, fcport->loop_id,
370	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
371
372	return rval;
373
374done_free_sp:
375	tcf->free(sp);
376done:
377	return rval;
378}
379
380void
381qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
382    uint16_t *data)
383{
384	int rval;
385
386	switch (data[0]) {
387	case MBS_COMMAND_COMPLETE:
388		if (fcport->flags & FCF_FCP2_DEVICE) {
389			fcport->flags |= FCF_ASYNC_SENT;
390			qla2x00_post_async_adisc_work(vha, fcport, data);
391			break;
392		}
393		qla2x00_update_fcport(vha, fcport);
394		break;
395	case MBS_COMMAND_ERROR:
396		fcport->flags &= ~FCF_ASYNC_SENT;
397		if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
398			set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
399		else
400			qla2x00_mark_device_lost(vha, fcport, 1, 1);
401		break;
402	case MBS_PORT_ID_USED:
403		fcport->loop_id = data[1];
404		qla2x00_post_async_logout_work(vha, fcport, NULL);
405		qla2x00_post_async_login_work(vha, fcport, NULL);
406		break;
407	case MBS_LOOP_ID_USED:
408		fcport->loop_id++;
409		rval = qla2x00_find_new_loop_id(vha, fcport);
410		if (rval != QLA_SUCCESS) {
411			fcport->flags &= ~FCF_ASYNC_SENT;
412			qla2x00_mark_device_lost(vha, fcport, 1, 1);
413			break;
414		}
415		qla2x00_post_async_login_work(vha, fcport, NULL);
416		break;
417	}
418	return;
419}
420
421void
422qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
423    uint16_t *data)
424{
425	qla2x00_mark_device_lost(vha, fcport, 1, 0);
426	return;
427}
428
429void
430qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
431    uint16_t *data)
432{
433	if (data[0] == MBS_COMMAND_COMPLETE) {
434		qla2x00_update_fcport(vha, fcport);
435
436		return;
437	}
438
439	/* Retry login. */
440	fcport->flags &= ~FCF_ASYNC_SENT;
441	if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
442		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
443	else
444		qla2x00_mark_device_lost(vha, fcport, 1, 1);
445
446	return;
447}
448
449void
450qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport,
451    struct srb_iocb *iocb)
452{
453	int rval;
454	uint32_t flags;
455	uint16_t lun;
456
457	flags = iocb->u.tmf.flags;
458	lun = (uint16_t)iocb->u.tmf.lun;
459
460	/* Issue Marker IOCB */
461	rval = qla2x00_marker(vha, vha->hw->req_q_map[0],
462		vha->hw->rsp_q_map[0], fcport->loop_id, lun,
463		flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
464
465	if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
466		DEBUG2_3_11(printk(KERN_WARNING
467			"%s(%ld): TM IOCB failed (%x).\n",
468			__func__, vha->host_no, rval));
469	}
470
471	return;
472}
473
474/****************************************************************************/
475/*                QLogic ISP2x00 Hardware Support Functions.                */
476/****************************************************************************/
477
478/*
479* qla2x00_initialize_adapter
480*      Initialize board.
481*
482* Input:
483*      ha = adapter block pointer.
484*
485* Returns:
486*      0 = success
487*/
488int
489qla2x00_initialize_adapter(scsi_qla_host_t *vha)
490{
491	int	rval;
492	struct qla_hw_data *ha = vha->hw;
493	struct req_que *req = ha->req_q_map[0];
494
495	/* Clear adapter flags. */
496	vha->flags.online = 0;
497	ha->flags.chip_reset_done = 0;
498	vha->flags.reset_active = 0;
499	ha->flags.pci_channel_io_perm_failure = 0;
500	ha->flags.eeh_busy = 0;
501	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
502	atomic_set(&vha->loop_state, LOOP_DOWN);
503	vha->device_flags = DFLG_NO_CABLE;
504	vha->dpc_flags = 0;
505	vha->flags.management_server_logged_in = 0;
506	vha->marker_needed = 0;
507	ha->isp_abort_cnt = 0;
508	ha->beacon_blink_led = 0;
509
510	set_bit(0, ha->req_qid_map);
511	set_bit(0, ha->rsp_qid_map);
512
513	qla_printk(KERN_INFO, ha, "Configuring PCI space...\n");
514	rval = ha->isp_ops->pci_config(vha);
515	if (rval) {
516		DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n",
517		    vha->host_no));
518		return (rval);
519	}
520
521	ha->isp_ops->reset_chip(vha);
522
523	rval = qla2xxx_get_flash_info(vha);
524	if (rval) {
525		DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n",
526		    vha->host_no));
527		return (rval);
528	}
529
530	ha->isp_ops->get_flash_version(vha, req->ring);
531
532	qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
533
534	ha->isp_ops->nvram_config(vha);
535
536	if (ha->flags.disable_serdes) {
537		/* Mask HBA via NVRAM settings? */
538		qla_printk(KERN_INFO, ha, "Masking HBA WWPN "
539		    "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
540		    vha->port_name[0], vha->port_name[1],
541		    vha->port_name[2], vha->port_name[3],
542		    vha->port_name[4], vha->port_name[5],
543		    vha->port_name[6], vha->port_name[7]);
544		return QLA_FUNCTION_FAILED;
545	}
546
547	qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
548
549	if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
550		rval = ha->isp_ops->chip_diag(vha);
551		if (rval)
552			return (rval);
553		rval = qla2x00_setup_chip(vha);
554		if (rval)
555			return (rval);
556	}
557
558	if (IS_QLA84XX(ha)) {
559		ha->cs84xx = qla84xx_get_chip(vha);
560		if (!ha->cs84xx) {
561			qla_printk(KERN_ERR, ha,
562			    "Unable to configure ISP84XX.\n");
563			return QLA_FUNCTION_FAILED;
564		}
565	}
566	rval = qla2x00_init_rings(vha);
567	ha->flags.chip_reset_done = 1;
568
569	if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
570		/* Issue verify 84xx FW IOCB to complete 84xx initialization */
571		rval = qla84xx_init_chip(vha);
572		if (rval != QLA_SUCCESS) {
573			qla_printk(KERN_ERR, ha,
574				"Unable to initialize ISP84XX.\n");
575		qla84xx_put_chip(vha);
576		}
577	}
578
579	if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
580		qla24xx_read_fcp_prio_cfg(vha);
581
582	return (rval);
583}
584
585/**
586 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
587 * @ha: HA context
588 *
589 * Returns 0 on success.
590 */
591int
592qla2100_pci_config(scsi_qla_host_t *vha)
593{
594	uint16_t w;
595	unsigned long flags;
596	struct qla_hw_data *ha = vha->hw;
597	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
598
599	pci_set_master(ha->pdev);
600	pci_try_set_mwi(ha->pdev);
601
602	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
603	w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
604	pci_write_config_word(ha->pdev, PCI_COMMAND, w);
605
606	pci_disable_rom(ha->pdev);
607
608	/* Get PCI bus information. */
609	spin_lock_irqsave(&ha->hardware_lock, flags);
610	ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
611	spin_unlock_irqrestore(&ha->hardware_lock, flags);
612
613	return QLA_SUCCESS;
614}
615
616/**
617 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
618 * @ha: HA context
619 *
620 * Returns 0 on success.
621 */
622int
623qla2300_pci_config(scsi_qla_host_t *vha)
624{
625	uint16_t	w;
626	unsigned long   flags = 0;
627	uint32_t	cnt;
628	struct qla_hw_data *ha = vha->hw;
629	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
630
631	pci_set_master(ha->pdev);
632	pci_try_set_mwi(ha->pdev);
633
634	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
635	w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
636
637	if (IS_QLA2322(ha) || IS_QLA6322(ha))
638		w &= ~PCI_COMMAND_INTX_DISABLE;
639	pci_write_config_word(ha->pdev, PCI_COMMAND, w);
640
641	/*
642	 * If this is a 2300 card and not 2312, reset the
643	 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
644	 * the 2310 also reports itself as a 2300 so we need to get the
645	 * fb revision level -- a 6 indicates it really is a 2300 and
646	 * not a 2310.
647	 */
648	if (IS_QLA2300(ha)) {
649		spin_lock_irqsave(&ha->hardware_lock, flags);
650
651		/* Pause RISC. */
652		WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
653		for (cnt = 0; cnt < 30000; cnt++) {
654			if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
655				break;
656
657			udelay(10);
658		}
659
660		/* Select FPM registers. */
661		WRT_REG_WORD(&reg->ctrl_status, 0x20);
662		RD_REG_WORD(&reg->ctrl_status);
663
664		/* Get the fb rev level */
665		ha->fb_rev = RD_FB_CMD_REG(ha, reg);
666
667		if (ha->fb_rev == FPM_2300)
668			pci_clear_mwi(ha->pdev);
669
670		/* Deselect FPM registers. */
671		WRT_REG_WORD(&reg->ctrl_status, 0x0);
672		RD_REG_WORD(&reg->ctrl_status);
673
674		/* Release RISC module. */
675		WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
676		for (cnt = 0; cnt < 30000; cnt++) {
677			if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
678				break;
679
680			udelay(10);
681		}
682
683		spin_unlock_irqrestore(&ha->hardware_lock, flags);
684	}
685
686	pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
687
688	pci_disable_rom(ha->pdev);
689
690	/* Get PCI bus information. */
691	spin_lock_irqsave(&ha->hardware_lock, flags);
692	ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
693	spin_unlock_irqrestore(&ha->hardware_lock, flags);
694
695	return QLA_SUCCESS;
696}
697
698/**
699 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
700 * @ha: HA context
701 *
702 * Returns 0 on success.
703 */
704int
705qla24xx_pci_config(scsi_qla_host_t *vha)
706{
707	uint16_t w;
708	unsigned long flags = 0;
709	struct qla_hw_data *ha = vha->hw;
710	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
711
712	pci_set_master(ha->pdev);
713	pci_try_set_mwi(ha->pdev);
714
715	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
716	w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
717	w &= ~PCI_COMMAND_INTX_DISABLE;
718	pci_write_config_word(ha->pdev, PCI_COMMAND, w);
719
720	pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
721
722	/* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
723	if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
724		pcix_set_mmrbc(ha->pdev, 2048);
725
726	/* PCIe -- adjust Maximum Read Request Size (2048). */
727	if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
728		pcie_set_readrq(ha->pdev, 2048);
729
730	pci_disable_rom(ha->pdev);
731
732	ha->chip_revision = ha->pdev->revision;
733
734	/* Get PCI bus information. */
735	spin_lock_irqsave(&ha->hardware_lock, flags);
736	ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
737	spin_unlock_irqrestore(&ha->hardware_lock, flags);
738
739	return QLA_SUCCESS;
740}
741
742/**
743 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
744 * @ha: HA context
745 *
746 * Returns 0 on success.
747 */
748int
749qla25xx_pci_config(scsi_qla_host_t *vha)
750{
751	uint16_t w;
752	struct qla_hw_data *ha = vha->hw;
753
754	pci_set_master(ha->pdev);
755	pci_try_set_mwi(ha->pdev);
756
757	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
758	w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
759	w &= ~PCI_COMMAND_INTX_DISABLE;
760	pci_write_config_word(ha->pdev, PCI_COMMAND, w);
761
762	/* PCIe -- adjust Maximum Read Request Size (2048). */
763	if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
764		pcie_set_readrq(ha->pdev, 2048);
765
766	pci_disable_rom(ha->pdev);
767
768	ha->chip_revision = ha->pdev->revision;
769
770	return QLA_SUCCESS;
771}
772
773/**
774 * qla2x00_isp_firmware() - Choose firmware image.
775 * @ha: HA context
776 *
777 * Returns 0 on success.
778 */
779static int
780qla2x00_isp_firmware(scsi_qla_host_t *vha)
781{
782	int  rval;
783	uint16_t loop_id, topo, sw_cap;
784	uint8_t domain, area, al_pa;
785	struct qla_hw_data *ha = vha->hw;
786
787	/* Assume loading risc code */
788	rval = QLA_FUNCTION_FAILED;
789
790	if (ha->flags.disable_risc_code_load) {
791		DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n",
792		    vha->host_no));
793		qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n");
794
795		/* Verify checksum of loaded RISC code. */
796		rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
797		if (rval == QLA_SUCCESS) {
798			/* And, verify we are not in ROM code. */
799			rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
800			    &area, &domain, &topo, &sw_cap);
801		}
802	}
803
804	if (rval) {
805		DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n",
806		    vha->host_no));
807	}
808
809	return (rval);
810}
811
812/**
813 * qla2x00_reset_chip() - Reset ISP chip.
814 * @ha: HA context
815 *
816 * Returns 0 on success.
817 */
818void
819qla2x00_reset_chip(scsi_qla_host_t *vha)
820{
821	unsigned long   flags = 0;
822	struct qla_hw_data *ha = vha->hw;
823	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
824	uint32_t	cnt;
825	uint16_t	cmd;
826
827	if (unlikely(pci_channel_offline(ha->pdev)))
828		return;
829
830	ha->isp_ops->disable_intrs(ha);
831
832	spin_lock_irqsave(&ha->hardware_lock, flags);
833
834	/* Turn off master enable */
835	cmd = 0;
836	pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
837	cmd &= ~PCI_COMMAND_MASTER;
838	pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
839
840	if (!IS_QLA2100(ha)) {
841		/* Pause RISC. */
842		WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
843		if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
844			for (cnt = 0; cnt < 30000; cnt++) {
845				if ((RD_REG_WORD(&reg->hccr) &
846				    HCCR_RISC_PAUSE) != 0)
847					break;
848				udelay(100);
849			}
850		} else {
851			RD_REG_WORD(&reg->hccr);	/* PCI Posting. */
852			udelay(10);
853		}
854
855		/* Select FPM registers. */
856		WRT_REG_WORD(&reg->ctrl_status, 0x20);
857		RD_REG_WORD(&reg->ctrl_status);		/* PCI Posting. */
858
859		/* FPM Soft Reset. */
860		WRT_REG_WORD(&reg->fpm_diag_config, 0x100);
861		RD_REG_WORD(&reg->fpm_diag_config);	/* PCI Posting. */
862
863		/* Toggle Fpm Reset. */
864		if (!IS_QLA2200(ha)) {
865			WRT_REG_WORD(&reg->fpm_diag_config, 0x0);
866			RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
867		}
868
869		/* Select frame buffer registers. */
870		WRT_REG_WORD(&reg->ctrl_status, 0x10);
871		RD_REG_WORD(&reg->ctrl_status);		/* PCI Posting. */
872
873		/* Reset frame buffer FIFOs. */
874		if (IS_QLA2200(ha)) {
875			WRT_FB_CMD_REG(ha, reg, 0xa000);
876			RD_FB_CMD_REG(ha, reg);		/* PCI Posting. */
877		} else {
878			WRT_FB_CMD_REG(ha, reg, 0x00fc);
879
880			/* Read back fb_cmd until zero or 3 seconds max */
881			for (cnt = 0; cnt < 3000; cnt++) {
882				if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
883					break;
884				udelay(100);
885			}
886		}
887
888		/* Select RISC module registers. */
889		WRT_REG_WORD(&reg->ctrl_status, 0);
890		RD_REG_WORD(&reg->ctrl_status);		/* PCI Posting. */
891
892		/* Reset RISC processor. */
893		WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
894		RD_REG_WORD(&reg->hccr);		/* PCI Posting. */
895
896		/* Release RISC processor. */
897		WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
898		RD_REG_WORD(&reg->hccr);		/* PCI Posting. */
899	}
900
901	WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
902	WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT);
903
904	/* Reset ISP chip. */
905	WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
906
907	/* Wait for RISC to recover from reset. */
908	if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
909		/*
910		 * It is necessary to for a delay here since the card doesn't
911		 * respond to PCI reads during a reset. On some architectures
912		 * this will result in an MCA.
913		 */
914		udelay(20);
915		for (cnt = 30000; cnt; cnt--) {
916			if ((RD_REG_WORD(&reg->ctrl_status) &
917			    CSR_ISP_SOFT_RESET) == 0)
918				break;
919			udelay(100);
920		}
921	} else
922		udelay(10);
923
924	/* Reset RISC processor. */
925	WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
926
927	WRT_REG_WORD(&reg->semaphore, 0);
928
929	/* Release RISC processor. */
930	WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
931	RD_REG_WORD(&reg->hccr);			/* PCI Posting. */
932
933	if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
934		for (cnt = 0; cnt < 30000; cnt++) {
935			if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
936				break;
937
938			udelay(100);
939		}
940	} else
941		udelay(100);
942
943	/* Turn on master enable */
944	cmd |= PCI_COMMAND_MASTER;
945	pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
946
947	/* Disable RISC pause on FPM parity error. */
948	if (!IS_QLA2100(ha)) {
949		WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
950		RD_REG_WORD(&reg->hccr);		/* PCI Posting. */
951	}
952
953	spin_unlock_irqrestore(&ha->hardware_lock, flags);
954}
955
956/**
957 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
958 * @ha: HA context
959 *
960 * Returns 0 on success.
961 */
962static inline void
963qla24xx_reset_risc(scsi_qla_host_t *vha)
964{
965	unsigned long flags = 0;
966	struct qla_hw_data *ha = vha->hw;
967	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
968	uint32_t cnt, d2;
969	uint16_t wd;
970
971	spin_lock_irqsave(&ha->hardware_lock, flags);
972
973	/* Reset RISC. */
974	WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
975	for (cnt = 0; cnt < 30000; cnt++) {
976		if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
977			break;
978
979		udelay(10);
980	}
981
982	WRT_REG_DWORD(&reg->ctrl_status,
983	    CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
984	pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
985
986	udelay(100);
987	/* Wait for firmware to complete NVRAM accesses. */
988	d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
989	for (cnt = 10000 ; cnt && d2; cnt--) {
990		udelay(5);
991		d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
992		barrier();
993	}
994
995	/* Wait for soft-reset to complete. */
996	d2 = RD_REG_DWORD(&reg->ctrl_status);
997	for (cnt = 6000000 ; cnt && (d2 & CSRX_ISP_SOFT_RESET); cnt--) {
998		udelay(5);
999		d2 = RD_REG_DWORD(&reg->ctrl_status);
1000		barrier();
1001	}
1002
1003	WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
1004	RD_REG_DWORD(&reg->hccr);
1005
1006	WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
1007	RD_REG_DWORD(&reg->hccr);
1008
1009	WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
1010	RD_REG_DWORD(&reg->hccr);
1011
1012	d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1013	for (cnt = 6000000 ; cnt && d2; cnt--) {
1014		udelay(5);
1015		d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
1016		barrier();
1017	}
1018
1019	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1020
1021	if (IS_NOPOLLING_TYPE(ha))
1022		ha->isp_ops->enable_intrs(ha);
1023}
1024
1025/**
1026 * qla24xx_reset_chip() - Reset ISP24xx chip.
1027 * @ha: HA context
1028 *
1029 * Returns 0 on success.
1030 */
1031void
1032qla24xx_reset_chip(scsi_qla_host_t *vha)
1033{
1034	struct qla_hw_data *ha = vha->hw;
1035
1036	if (pci_channel_offline(ha->pdev) &&
1037	    ha->flags.pci_channel_io_perm_failure) {
1038		return;
1039	}
1040
1041	ha->isp_ops->disable_intrs(ha);
1042
1043	/* Perform RISC reset. */
1044	qla24xx_reset_risc(vha);
1045}
1046
1047/**
1048 * qla2x00_chip_diag() - Test chip for proper operation.
1049 * @ha: HA context
1050 *
1051 * Returns 0 on success.
1052 */
1053int
1054qla2x00_chip_diag(scsi_qla_host_t *vha)
1055{
1056	int		rval;
1057	struct qla_hw_data *ha = vha->hw;
1058	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1059	unsigned long	flags = 0;
1060	uint16_t	data;
1061	uint32_t	cnt;
1062	uint16_t	mb[5];
1063	struct req_que *req = ha->req_q_map[0];
1064
1065	/* Assume a failed state */
1066	rval = QLA_FUNCTION_FAILED;
1067
1068	DEBUG3(printk("scsi(%ld): Testing device at %lx.\n",
1069	    vha->host_no, (u_long)&reg->flash_address));
1070
1071	spin_lock_irqsave(&ha->hardware_lock, flags);
1072
1073	/* Reset ISP chip. */
1074	WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
1075
1076	/*
1077	 * We need to have a delay here since the card will not respond while
1078	 * in reset causing an MCA on some architectures.
1079	 */
1080	udelay(20);
1081	data = qla2x00_debounce_register(&reg->ctrl_status);
1082	for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
1083		udelay(5);
1084		data = RD_REG_WORD(&reg->ctrl_status);
1085		barrier();
1086	}
1087
1088	if (!cnt)
1089		goto chip_diag_failed;
1090
1091	DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n",
1092	    vha->host_no));
1093
1094	/* Reset RISC processor. */
1095	WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
1096	WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
1097
1098	if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
1099		data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
1100		for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
1101			udelay(5);
1102			data = RD_MAILBOX_REG(ha, reg, 0);
1103			barrier();
1104		}
1105	} else
1106		udelay(10);
1107
1108	if (!cnt)
1109		goto chip_diag_failed;
1110
1111	/* Check product ID of chip */
1112	DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", vha->host_no));
1113
1114	mb[1] = RD_MAILBOX_REG(ha, reg, 1);
1115	mb[2] = RD_MAILBOX_REG(ha, reg, 2);
1116	mb[3] = RD_MAILBOX_REG(ha, reg, 3);
1117	mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
1118	if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
1119	    mb[3] != PROD_ID_3) {
1120		qla_printk(KERN_WARNING, ha,
1121		    "Wrong product ID = 0x%x,0x%x,0x%x\n", mb[1], mb[2], mb[3]);
1122
1123		goto chip_diag_failed;
1124	}
1125	ha->product_id[0] = mb[1];
1126	ha->product_id[1] = mb[2];
1127	ha->product_id[2] = mb[3];
1128	ha->product_id[3] = mb[4];
1129
1130	/* Adjust fw RISC transfer size */
1131	if (req->length > 1024)
1132		ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
1133	else
1134		ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
1135		    req->length;
1136
1137	if (IS_QLA2200(ha) &&
1138	    RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
1139		/* Limit firmware transfer size with a 2200A */
1140		DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n",
1141		    vha->host_no));
1142
1143		ha->device_type |= DT_ISP2200A;
1144		ha->fw_transfer_size = 128;
1145	}
1146
1147	/* Wrap Incoming Mailboxes Test. */
1148	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1149
1150	DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", vha->host_no));
1151	rval = qla2x00_mbx_reg_test(vha);
1152	if (rval) {
1153		DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
1154		    vha->host_no));
1155		qla_printk(KERN_WARNING, ha,
1156		    "Failed mailbox send register test\n");
1157	}
1158	else {
1159		/* Flag a successful rval */
1160		rval = QLA_SUCCESS;
1161	}
1162	spin_lock_irqsave(&ha->hardware_lock, flags);
1163
1164chip_diag_failed:
1165	if (rval)
1166		DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED "
1167		    "****\n", vha->host_no));
1168
1169	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1170
1171	return (rval);
1172}
1173
1174/**
1175 * qla24xx_chip_diag() - Test ISP24xx for proper operation.
1176 * @ha: HA context
1177 *
1178 * Returns 0 on success.
1179 */
1180int
1181qla24xx_chip_diag(scsi_qla_host_t *vha)
1182{
1183	int rval;
1184	struct qla_hw_data *ha = vha->hw;
1185	struct req_que *req = ha->req_q_map[0];
1186
1187	if (IS_QLA82XX(ha))
1188		return QLA_SUCCESS;
1189
1190	ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
1191
1192	rval = qla2x00_mbx_reg_test(vha);
1193	if (rval) {
1194		DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
1195		    vha->host_no));
1196		qla_printk(KERN_WARNING, ha,
1197		    "Failed mailbox send register test\n");
1198	} else {
1199		/* Flag a successful rval */
1200		rval = QLA_SUCCESS;
1201	}
1202
1203	return rval;
1204}
1205
1206void
1207qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1208{
1209	int rval;
1210	uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
1211	    eft_size, fce_size, mq_size;
1212	dma_addr_t tc_dma;
1213	void *tc;
1214	struct qla_hw_data *ha = vha->hw;
1215	struct req_que *req = ha->req_q_map[0];
1216	struct rsp_que *rsp = ha->rsp_q_map[0];
1217
1218	if (ha->fw_dump) {
1219		qla_printk(KERN_WARNING, ha,
1220		    "Firmware dump previously allocated.\n");
1221		return;
1222	}
1223
1224	ha->fw_dumped = 0;
1225	fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
1226	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
1227		fixed_size = sizeof(struct qla2100_fw_dump);
1228	} else if (IS_QLA23XX(ha)) {
1229		fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
1230		mem_size = (ha->fw_memory_size - 0x11000 + 1) *
1231		    sizeof(uint16_t);
1232	} else if (IS_FWI2_CAPABLE(ha)) {
1233		if (IS_QLA81XX(ha))
1234			fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
1235		else if (IS_QLA25XX(ha))
1236			fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
1237		else
1238			fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
1239		mem_size = (ha->fw_memory_size - 0x100000 + 1) *
1240		    sizeof(uint32_t);
1241		if (ha->mqenable)
1242			mq_size = sizeof(struct qla2xxx_mq_chain);
1243		/* Allocate memory for Fibre Channel Event Buffer. */
1244		if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
1245			goto try_eft;
1246
1247		tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
1248		    GFP_KERNEL);
1249		if (!tc) {
1250			qla_printk(KERN_WARNING, ha, "Unable to allocate "
1251			    "(%d KB) for FCE.\n", FCE_SIZE / 1024);
1252			goto try_eft;
1253		}
1254
1255		memset(tc, 0, FCE_SIZE);
1256		rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
1257		    ha->fce_mb, &ha->fce_bufs);
1258		if (rval) {
1259			qla_printk(KERN_WARNING, ha, "Unable to initialize "
1260			    "FCE (%d).\n", rval);
1261			dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
1262			    tc_dma);
1263			ha->flags.fce_enabled = 0;
1264			goto try_eft;
1265		}
1266
1267		qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n",
1268		    FCE_SIZE / 1024);
1269
1270		fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
1271		ha->flags.fce_enabled = 1;
1272		ha->fce_dma = tc_dma;
1273		ha->fce = tc;
1274try_eft:
1275		/* Allocate memory for Extended Trace Buffer. */
1276		tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
1277		    GFP_KERNEL);
1278		if (!tc) {
1279			qla_printk(KERN_WARNING, ha, "Unable to allocate "
1280			    "(%d KB) for EFT.\n", EFT_SIZE / 1024);
1281			goto cont_alloc;
1282		}
1283
1284		memset(tc, 0, EFT_SIZE);
1285		rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
1286		if (rval) {
1287			qla_printk(KERN_WARNING, ha, "Unable to initialize "
1288			    "EFT (%d).\n", rval);
1289			dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
1290			    tc_dma);
1291			goto cont_alloc;
1292		}
1293
1294		qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n",
1295		    EFT_SIZE / 1024);
1296
1297		eft_size = EFT_SIZE;
1298		ha->eft_dma = tc_dma;
1299		ha->eft = tc;
1300	}
1301cont_alloc:
1302	req_q_size = req->length * sizeof(request_t);
1303	rsp_q_size = rsp->length * sizeof(response_t);
1304
1305	dump_size = offsetof(struct qla2xxx_fw_dump, isp);
1306	dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
1307	ha->chain_offset = dump_size;
1308	dump_size += mq_size + fce_size;
1309
1310	ha->fw_dump = vmalloc(dump_size);
1311	if (!ha->fw_dump) {
1312		qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for "
1313		    "firmware dump!!!\n", dump_size / 1024);
1314
1315		if (ha->eft) {
1316			dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
1317			    ha->eft_dma);
1318			ha->eft = NULL;
1319			ha->eft_dma = 0;
1320		}
1321		return;
1322	}
1323	qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n",
1324	    dump_size / 1024);
1325
1326	ha->fw_dump_len = dump_size;
1327	ha->fw_dump->signature[0] = 'Q';
1328	ha->fw_dump->signature[1] = 'L';
1329	ha->fw_dump->signature[2] = 'G';
1330	ha->fw_dump->signature[3] = 'C';
1331	ha->fw_dump->version = __constant_htonl(1);
1332
1333	ha->fw_dump->fixed_size = htonl(fixed_size);
1334	ha->fw_dump->mem_size = htonl(mem_size);
1335	ha->fw_dump->req_q_size = htonl(req_q_size);
1336	ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
1337
1338	ha->fw_dump->eft_size = htonl(eft_size);
1339	ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
1340	ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
1341
1342	ha->fw_dump->header_size =
1343	    htonl(offsetof(struct qla2xxx_fw_dump, isp));
1344}
1345
1346static int
1347qla81xx_mpi_sync(scsi_qla_host_t *vha)
1348{
1349#define MPS_MASK	0xe0
1350	int rval;
1351	uint16_t dc;
1352	uint32_t dw;
1353	struct qla_hw_data *ha = vha->hw;
1354
1355	if (!IS_QLA81XX(vha->hw))
1356		return QLA_SUCCESS;
1357
1358	rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
1359	if (rval != QLA_SUCCESS) {
1360		DEBUG2(qla_printk(KERN_WARNING, ha,
1361		    "Sync-MPI: Unable to acquire semaphore.\n"));
1362		goto done;
1363	}
1364
1365	pci_read_config_word(vha->hw->pdev, 0x54, &dc);
1366	rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
1367	if (rval != QLA_SUCCESS) {
1368		DEBUG2(qla_printk(KERN_WARNING, ha,
1369		    "Sync-MPI: Unable to read sync.\n"));
1370		goto done_release;
1371	}
1372
1373	dc &= MPS_MASK;
1374	if (dc == (dw & MPS_MASK))
1375		goto done_release;
1376
1377	dw &= ~MPS_MASK;
1378	dw |= dc;
1379	rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
1380	if (rval != QLA_SUCCESS) {
1381		DEBUG2(qla_printk(KERN_WARNING, ha,
1382		    "Sync-MPI: Unable to gain sync.\n"));
1383	}
1384
1385done_release:
1386	rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
1387	if (rval != QLA_SUCCESS) {
1388		DEBUG2(qla_printk(KERN_WARNING, ha,
1389		    "Sync-MPI: Unable to release semaphore.\n"));
1390	}
1391
1392done:
1393	return rval;
1394}
1395
1396/**
1397 * qla2x00_setup_chip() - Load and start RISC firmware.
1398 * @ha: HA context
1399 *
1400 * Returns 0 on success.
1401 */
1402static int
1403qla2x00_setup_chip(scsi_qla_host_t *vha)
1404{
1405	int rval;
1406	uint32_t srisc_address = 0;
1407	struct qla_hw_data *ha = vha->hw;
1408	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1409	unsigned long flags;
1410	uint16_t fw_major_version;
1411
1412	if (IS_QLA82XX(ha)) {
1413		rval = ha->isp_ops->load_risc(vha, &srisc_address);
1414		if (rval == QLA_SUCCESS) {
1415			qla2x00_stop_firmware(vha);
1416			goto enable_82xx_npiv;
1417		} else
1418			goto failed;
1419	}
1420
1421	if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
1422		/* Disable SRAM, Instruction RAM and GP RAM parity.  */
1423		spin_lock_irqsave(&ha->hardware_lock, flags);
1424		WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
1425		RD_REG_WORD(&reg->hccr);
1426		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1427	}
1428
1429	qla81xx_mpi_sync(vha);
1430
1431	/* Load firmware sequences */
1432	rval = ha->isp_ops->load_risc(vha, &srisc_address);
1433	if (rval == QLA_SUCCESS) {
1434		DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC "
1435		    "code.\n", vha->host_no));
1436
1437		rval = qla2x00_verify_checksum(vha, srisc_address);
1438		if (rval == QLA_SUCCESS) {
1439			/* Start firmware execution. */
1440			DEBUG(printk("scsi(%ld): Checksum OK, start "
1441			    "firmware.\n", vha->host_no));
1442
1443			rval = qla2x00_execute_fw(vha, srisc_address);
1444			/* Retrieve firmware information. */
1445			if (rval == QLA_SUCCESS) {
1446enable_82xx_npiv:
1447				fw_major_version = ha->fw_major_version;
1448				rval = qla2x00_get_fw_version(vha,
1449				    &ha->fw_major_version,
1450				    &ha->fw_minor_version,
1451				    &ha->fw_subminor_version,
1452				    &ha->fw_attributes, &ha->fw_memory_size,
1453				    ha->mpi_version, &ha->mpi_capabilities,
1454				    ha->phy_version);
1455				if (rval != QLA_SUCCESS)
1456					goto failed;
1457				ha->flags.npiv_supported = 0;
1458				if (IS_QLA2XXX_MIDTYPE(ha) &&
1459					 (ha->fw_attributes & BIT_2)) {
1460					ha->flags.npiv_supported = 1;
1461					if ((!ha->max_npiv_vports) ||
1462					    ((ha->max_npiv_vports + 1) %
1463					    MIN_MULTI_ID_FABRIC))
1464						ha->max_npiv_vports =
1465						    MIN_MULTI_ID_FABRIC - 1;
1466				}
1467				qla2x00_get_resource_cnts(vha, NULL,
1468				    &ha->fw_xcb_count, NULL, NULL,
1469				    &ha->max_npiv_vports, NULL);
1470
1471				if (!fw_major_version && ql2xallocfwdump) {
1472					if (!IS_QLA82XX(ha))
1473						qla2x00_alloc_fw_dump(vha);
1474				}
1475			}
1476		} else {
1477			DEBUG2(printk(KERN_INFO
1478			    "scsi(%ld): ISP Firmware failed checksum.\n",
1479			    vha->host_no));
1480		}
1481	}
1482
1483	if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
1484		/* Enable proper parity. */
1485		spin_lock_irqsave(&ha->hardware_lock, flags);
1486		if (IS_QLA2300(ha))
1487			/* SRAM parity */
1488			WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
1489		else
1490			/* SRAM, Instruction RAM and GP RAM parity */
1491			WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
1492		RD_REG_WORD(&reg->hccr);
1493		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1494	}
1495
1496	if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
1497		uint32_t size;
1498
1499		rval = qla81xx_fac_get_sector_size(vha, &size);
1500		if (rval == QLA_SUCCESS) {
1501			ha->flags.fac_supported = 1;
1502			ha->fdt_block_size = size << 2;
1503		} else {
1504			qla_printk(KERN_ERR, ha,
1505			    "Unsupported FAC firmware (%d.%02d.%02d).\n",
1506			    ha->fw_major_version, ha->fw_minor_version,
1507			    ha->fw_subminor_version);
1508		}
1509	}
1510failed:
1511	if (rval) {
1512		DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
1513		    vha->host_no));
1514	}
1515
1516	return (rval);
1517}
1518
1519/**
1520 * qla2x00_init_response_q_entries() - Initializes response queue entries.
1521 * @ha: HA context
1522 *
1523 * Beginning of request ring has initialization control block already built
1524 * by nvram config routine.
1525 *
1526 * Returns 0 on success.
1527 */
1528void
1529qla2x00_init_response_q_entries(struct rsp_que *rsp)
1530{
1531	uint16_t cnt;
1532	response_t *pkt;
1533
1534	rsp->ring_ptr = rsp->ring;
1535	rsp->ring_index    = 0;
1536	rsp->status_srb = NULL;
1537	pkt = rsp->ring_ptr;
1538	for (cnt = 0; cnt < rsp->length; cnt++) {
1539		pkt->signature = RESPONSE_PROCESSED;
1540		pkt++;
1541	}
1542}
1543
1544/**
1545 * qla2x00_update_fw_options() - Read and process firmware options.
1546 * @ha: HA context
1547 *
1548 * Returns 0 on success.
1549 */
1550void
1551qla2x00_update_fw_options(scsi_qla_host_t *vha)
1552{
1553	uint16_t swing, emphasis, tx_sens, rx_sens;
1554	struct qla_hw_data *ha = vha->hw;
1555
1556	memset(ha->fw_options, 0, sizeof(ha->fw_options));
1557	qla2x00_get_fw_options(vha, ha->fw_options);
1558
1559	if (IS_QLA2100(ha) || IS_QLA2200(ha))
1560		return;
1561
1562	/* Serial Link options. */
1563	DEBUG3(printk("scsi(%ld): Serial link options:\n",
1564	    vha->host_no));
1565	DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options,
1566	    sizeof(ha->fw_seriallink_options)));
1567
1568	ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
1569	if (ha->fw_seriallink_options[3] & BIT_2) {
1570		ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
1571
1572		/*  1G settings */
1573		swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
1574		emphasis = (ha->fw_seriallink_options[2] &
1575		    (BIT_4 | BIT_3)) >> 3;
1576		tx_sens = ha->fw_seriallink_options[0] &
1577		    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1578		rx_sens = (ha->fw_seriallink_options[0] &
1579		    (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
1580		ha->fw_options[10] = (emphasis << 14) | (swing << 8);
1581		if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
1582			if (rx_sens == 0x0)
1583				rx_sens = 0x3;
1584			ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
1585		} else if (IS_QLA2322(ha) || IS_QLA6322(ha))
1586			ha->fw_options[10] |= BIT_5 |
1587			    ((rx_sens & (BIT_1 | BIT_0)) << 2) |
1588			    (tx_sens & (BIT_1 | BIT_0));
1589
1590		/*  2G settings */
1591		swing = (ha->fw_seriallink_options[2] &
1592		    (BIT_7 | BIT_6 | BIT_5)) >> 5;
1593		emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
1594		tx_sens = ha->fw_seriallink_options[1] &
1595		    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1596		rx_sens = (ha->fw_seriallink_options[1] &
1597		    (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
1598		ha->fw_options[11] = (emphasis << 14) | (swing << 8);
1599		if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
1600			if (rx_sens == 0x0)
1601				rx_sens = 0x3;
1602			ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
1603		} else if (IS_QLA2322(ha) || IS_QLA6322(ha))
1604			ha->fw_options[11] |= BIT_5 |
1605			    ((rx_sens & (BIT_1 | BIT_0)) << 2) |
1606			    (tx_sens & (BIT_1 | BIT_0));
1607	}
1608
1609	/* FCP2 options. */
1610	/*  Return command IOCBs without waiting for an ABTS to complete. */
1611	ha->fw_options[3] |= BIT_13;
1612
1613	/* LED scheme. */
1614	if (ha->flags.enable_led_scheme)
1615		ha->fw_options[2] |= BIT_12;
1616
1617	/* Detect ISP6312. */
1618	if (IS_QLA6312(ha))
1619		ha->fw_options[2] |= BIT_13;
1620
1621	/* Update firmware options. */
1622	qla2x00_set_fw_options(vha, ha->fw_options);
1623}
1624
1625void
1626qla24xx_update_fw_options(scsi_qla_host_t *vha)
1627{
1628	int rval;
1629	struct qla_hw_data *ha = vha->hw;
1630
1631	if (IS_QLA82XX(ha))
1632		return;
1633
1634	/* Update Serial Link options. */
1635	if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
1636		return;
1637
1638	rval = qla2x00_set_serdes_params(vha,
1639	    le16_to_cpu(ha->fw_seriallink_options24[1]),
1640	    le16_to_cpu(ha->fw_seriallink_options24[2]),
1641	    le16_to_cpu(ha->fw_seriallink_options24[3]));
1642	if (rval != QLA_SUCCESS) {
1643		qla_printk(KERN_WARNING, ha,
1644		    "Unable to update Serial Link options (%x).\n", rval);
1645	}
1646}
1647
1648void
1649qla2x00_config_rings(struct scsi_qla_host *vha)
1650{
1651	struct qla_hw_data *ha = vha->hw;
1652	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1653	struct req_que *req = ha->req_q_map[0];
1654	struct rsp_que *rsp = ha->rsp_q_map[0];
1655
1656	/* Setup ring parameters in initialization control block. */
1657	ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0);
1658	ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0);
1659	ha->init_cb->request_q_length = cpu_to_le16(req->length);
1660	ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
1661	ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1662	ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1663	ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1664	ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1665
1666	WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
1667	WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
1668	WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
1669	WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
1670	RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg));		/* PCI Posting. */
1671}
1672
1673void
1674qla24xx_config_rings(struct scsi_qla_host *vha)
1675{
1676	struct qla_hw_data *ha = vha->hw;
1677	device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0);
1678	struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
1679	struct qla_msix_entry *msix;
1680	struct init_cb_24xx *icb;
1681	uint16_t rid = 0;
1682	struct req_que *req = ha->req_q_map[0];
1683	struct rsp_que *rsp = ha->rsp_q_map[0];
1684
1685/* Setup ring parameters in initialization control block. */
1686	icb = (struct init_cb_24xx *)ha->init_cb;
1687	icb->request_q_outpointer = __constant_cpu_to_le16(0);
1688	icb->response_q_inpointer = __constant_cpu_to_le16(0);
1689	icb->request_q_length = cpu_to_le16(req->length);
1690	icb->response_q_length = cpu_to_le16(rsp->length);
1691	icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1692	icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1693	icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1694	icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1695
1696	if (ha->mqenable) {
1697		icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
1698		icb->rid = __constant_cpu_to_le16(rid);
1699		if (ha->flags.msix_enabled) {
1700			msix = &ha->msix_entries[1];
1701			DEBUG2_17(printk(KERN_INFO
1702			"Registering vector 0x%x for base que\n", msix->entry));
1703			icb->msix = cpu_to_le16(msix->entry);
1704		}
1705		/* Use alternate PCI bus number */
1706		if (MSB(rid))
1707			icb->firmware_options_2 |=
1708				__constant_cpu_to_le32(BIT_19);
1709		/* Use alternate PCI devfn */
1710		if (LSB(rid))
1711			icb->firmware_options_2 |=
1712				__constant_cpu_to_le32(BIT_18);
1713
1714		/* Use Disable MSIX Handshake mode for capable adapters */
1715		if (IS_MSIX_NACK_CAPABLE(ha)) {
1716			icb->firmware_options_2 &=
1717				__constant_cpu_to_le32(~BIT_22);
1718			ha->flags.disable_msix_handshake = 1;
1719			qla_printk(KERN_INFO, ha,
1720				"MSIX Handshake Disable Mode turned on\n");
1721		} else {
1722			icb->firmware_options_2 |=
1723				__constant_cpu_to_le32(BIT_22);
1724		}
1725		icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
1726
1727		WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
1728		WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
1729		WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
1730		WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
1731	} else {
1732		WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
1733		WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
1734		WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
1735		WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
1736	}
1737	/* PCI posting */
1738	RD_REG_DWORD(&ioreg->hccr);
1739}
1740
1741/**
1742 * qla2x00_init_rings() - Initializes firmware.
1743 * @ha: HA context
1744 *
1745 * Beginning of request ring has initialization control block already built
1746 * by nvram config routine.
1747 *
1748 * Returns 0 on success.
1749 */
1750static int
1751qla2x00_init_rings(scsi_qla_host_t *vha)
1752{
1753	int	rval;
1754	unsigned long flags = 0;
1755	int cnt, que;
1756	struct qla_hw_data *ha = vha->hw;
1757	struct req_que *req;
1758	struct rsp_que *rsp;
1759	struct scsi_qla_host *vp;
1760	struct mid_init_cb_24xx *mid_init_cb =
1761	    (struct mid_init_cb_24xx *) ha->init_cb;
1762
1763	spin_lock_irqsave(&ha->hardware_lock, flags);
1764
1765	/* Clear outstanding commands array. */
1766	for (que = 0; que < ha->max_req_queues; que++) {
1767		req = ha->req_q_map[que];
1768		if (!req)
1769			continue;
1770		for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
1771			req->outstanding_cmds[cnt] = NULL;
1772
1773		req->current_outstanding_cmd = 1;
1774
1775		/* Initialize firmware. */
1776		req->ring_ptr  = req->ring;
1777		req->ring_index    = 0;
1778		req->cnt      = req->length;
1779	}
1780
1781	for (que = 0; que < ha->max_rsp_queues; que++) {
1782		rsp = ha->rsp_q_map[que];
1783		if (!rsp)
1784			continue;
1785		/* Initialize response queue entries */
1786		qla2x00_init_response_q_entries(rsp);
1787	}
1788
1789	spin_lock_irqsave(&ha->vport_slock, flags);
1790	/* Clear RSCN queue. */
1791	list_for_each_entry(vp, &ha->vp_list, list) {
1792		vp->rscn_in_ptr = 0;
1793		vp->rscn_out_ptr = 0;
1794	}
1795
1796	spin_unlock_irqrestore(&ha->vport_slock, flags);
1797
1798	ha->isp_ops->config_rings(vha);
1799
1800	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1801
1802	/* Update any ISP specific firmware options before initialization. */
1803	ha->isp_ops->update_fw_options(vha);
1804
1805	DEBUG(printk("scsi(%ld): Issue init firmware.\n", vha->host_no));
1806
1807	if (ha->flags.npiv_supported) {
1808		if (ha->operating_mode == LOOP)
1809			ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
1810		mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
1811	}
1812
1813	if (IS_FWI2_CAPABLE(ha)) {
1814		mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
1815		mid_init_cb->init_cb.execution_throttle =
1816		    cpu_to_le16(ha->fw_xcb_count);
1817	}
1818
1819	rval = qla2x00_init_firmware(vha, ha->init_cb_size);
1820	if (rval) {
1821		DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n",
1822		    vha->host_no));
1823	} else {
1824		DEBUG3(printk("scsi(%ld): Init firmware -- success.\n",
1825		    vha->host_no));
1826	}
1827
1828	return (rval);
1829}
1830
1831/**
1832 * qla2x00_fw_ready() - Waits for firmware ready.
1833 * @ha: HA context
1834 *
1835 * Returns 0 on success.
1836 */
1837static int
1838qla2x00_fw_ready(scsi_qla_host_t *vha)
1839{
1840	int		rval;
1841	unsigned long	wtime, mtime, cs84xx_time;
1842	uint16_t	min_wait;	/* Minimum wait time if loop is down */
1843	uint16_t	wait_time;	/* Wait time if loop is coming ready */
1844	uint16_t	state[5];
1845	struct qla_hw_data *ha = vha->hw;
1846
1847	rval = QLA_SUCCESS;
1848
1849	/* 20 seconds for loop down. */
1850	min_wait = 20;
1851
1852	/*
1853	 * Firmware should take at most one RATOV to login, plus 5 seconds for
1854	 * our own processing.
1855	 */
1856	if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
1857		wait_time = min_wait;
1858	}
1859
1860	/* Min wait time if loop down */
1861	mtime = jiffies + (min_wait * HZ);
1862
1863	/* wait time before firmware ready */
1864	wtime = jiffies + (wait_time * HZ);
1865
1866	/* Wait for ISP to finish LIP */
1867	if (!vha->flags.init_done)
1868 		qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n");
1869
1870	DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n",
1871	    vha->host_no));
1872
1873	do {
1874		rval = qla2x00_get_firmware_state(vha, state);
1875		if (rval == QLA_SUCCESS) {
1876			if (state[0] < FSTATE_LOSS_OF_SYNC) {
1877				vha->device_flags &= ~DFLG_NO_CABLE;
1878			}
1879			if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
1880				DEBUG16(printk("scsi(%ld): fw_state=%x "
1881				    "84xx=%x.\n", vha->host_no, state[0],
1882				    state[2]));
1883				if ((state[2] & FSTATE_LOGGED_IN) &&
1884				     (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
1885					DEBUG16(printk("scsi(%ld): Sending "
1886					    "verify iocb.\n", vha->host_no));
1887
1888					cs84xx_time = jiffies;
1889					rval = qla84xx_init_chip(vha);
1890					if (rval != QLA_SUCCESS)
1891						break;
1892
1893					/* Add time taken to initialize. */
1894					cs84xx_time = jiffies - cs84xx_time;
1895					wtime += cs84xx_time;
1896					mtime += cs84xx_time;
1897					DEBUG16(printk("scsi(%ld): Increasing "
1898					    "wait time by %ld. New time %ld\n",
1899					    vha->host_no, cs84xx_time, wtime));
1900				}
1901			} else if (state[0] == FSTATE_READY) {
1902				DEBUG(printk("scsi(%ld): F/W Ready - OK \n",
1903				    vha->host_no));
1904
1905				qla2x00_get_retry_cnt(vha, &ha->retry_count,
1906				    &ha->login_timeout, &ha->r_a_tov);
1907
1908				rval = QLA_SUCCESS;
1909				break;
1910			}
1911
1912			rval = QLA_FUNCTION_FAILED;
1913
1914			if (atomic_read(&vha->loop_down_timer) &&
1915			    state[0] != FSTATE_READY) {
1916				/* Loop down. Timeout on min_wait for states
1917				 * other than Wait for Login.
1918				 */
1919				if (time_after_eq(jiffies, mtime)) {
1920					qla_printk(KERN_INFO, ha,
1921					    "Cable is unplugged...\n");
1922
1923					vha->device_flags |= DFLG_NO_CABLE;
1924					break;
1925				}
1926			}
1927		} else {
1928			/* Mailbox cmd failed. Timeout on min_wait. */
1929			if (time_after_eq(jiffies, mtime) ||
1930			    (IS_QLA82XX(ha) && ha->flags.fw_hung))
1931				break;
1932		}
1933
1934		if (time_after_eq(jiffies, wtime))
1935			break;
1936
1937		/* Delay for a while */
1938		msleep(500);
1939
1940		DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n",
1941		    vha->host_no, state[0], jiffies));
1942	} while (1);
1943
1944	DEBUG(printk("scsi(%ld): fw_state=%x (%x, %x, %x, %x) curr time=%lx.\n",
1945	    vha->host_no, state[0], state[1], state[2], state[3], state[4],
1946	    jiffies));
1947
1948	if (rval) {
1949		DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n",
1950		    vha->host_no));
1951	}
1952
1953	return (rval);
1954}
1955
1956/*
1957*  qla2x00_configure_hba
1958*      Setup adapter context.
1959*
1960* Input:
1961*      ha = adapter state pointer.
1962*
1963* Returns:
1964*      0 = success
1965*
1966* Context:
1967*      Kernel context.
1968*/
1969static int
1970qla2x00_configure_hba(scsi_qla_host_t *vha)
1971{
1972	int       rval;
1973	uint16_t      loop_id;
1974	uint16_t      topo;
1975	uint16_t      sw_cap;
1976	uint8_t       al_pa;
1977	uint8_t       area;
1978	uint8_t       domain;
1979	char		connect_type[22];
1980	struct qla_hw_data *ha = vha->hw;
1981
1982	/* Get host addresses. */
1983	rval = qla2x00_get_adapter_id(vha,
1984	    &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
1985	if (rval != QLA_SUCCESS) {
1986		if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
1987		    (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
1988			DEBUG2(printk("%s(%ld) Loop is in a transition state\n",
1989			    __func__, vha->host_no));
1990		} else {
1991			qla_printk(KERN_WARNING, ha,
1992			    "ERROR -- Unable to get host loop ID.\n");
1993			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1994		}
1995		return (rval);
1996	}
1997
1998	if (topo == 4) {
1999		qla_printk(KERN_INFO, ha,
2000			"Cannot get topology - retrying.\n");
2001		return (QLA_FUNCTION_FAILED);
2002	}
2003
2004	vha->loop_id = loop_id;
2005
2006	/* initialize */
2007	ha->min_external_loopid = SNS_FIRST_LOOP_ID;
2008	ha->operating_mode = LOOP;
2009	ha->switch_cap = 0;
2010
2011	switch (topo) {
2012	case 0:
2013		DEBUG3(printk("scsi(%ld): HBA in NL topology.\n",
2014		    vha->host_no));
2015		ha->current_topology = ISP_CFG_NL;
2016		strcpy(connect_type, "(Loop)");
2017		break;
2018
2019	case 1:
2020		DEBUG3(printk("scsi(%ld): HBA in FL topology.\n",
2021		    vha->host_no));
2022		ha->switch_cap = sw_cap;
2023		ha->current_topology = ISP_CFG_FL;
2024		strcpy(connect_type, "(FL_Port)");
2025		break;
2026
2027	case 2:
2028		DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n",
2029		    vha->host_no));
2030		ha->operating_mode = P2P;
2031		ha->current_topology = ISP_CFG_N;
2032		strcpy(connect_type, "(N_Port-to-N_Port)");
2033		break;
2034
2035	case 3:
2036		DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n",
2037		    vha->host_no));
2038		ha->switch_cap = sw_cap;
2039		ha->operating_mode = P2P;
2040		ha->current_topology = ISP_CFG_F;
2041		strcpy(connect_type, "(F_Port)");
2042		break;
2043
2044	default:
2045		DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. "
2046		    "Using NL.\n",
2047		    vha->host_no, topo));
2048		ha->current_topology = ISP_CFG_NL;
2049		strcpy(connect_type, "(Loop)");
2050		break;
2051	}
2052
2053	/* Save Host port and loop ID. */
2054	/* byte order - Big Endian */
2055	vha->d_id.b.domain = domain;
2056	vha->d_id.b.area = area;
2057	vha->d_id.b.al_pa = al_pa;
2058
2059	if (!vha->flags.init_done)
2060 		qla_printk(KERN_INFO, ha,
2061		    "Topology - %s, Host Loop address 0x%x\n",
2062		    connect_type, vha->loop_id);
2063
2064	if (rval) {
2065		DEBUG2_3(printk("scsi(%ld): FAILED.\n", vha->host_no));
2066	} else {
2067		DEBUG3(printk("scsi(%ld): exiting normally.\n", vha->host_no));
2068	}
2069
2070	return(rval);
2071}
2072
2073inline void
2074qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
2075	char *def)
2076{
2077	char *st, *en;
2078	uint16_t index;
2079	struct qla_hw_data *ha = vha->hw;
2080	int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
2081	    !IS_QLA8XXX_TYPE(ha);
2082
2083	if (memcmp(model, BINZERO, len) != 0) {
2084		strncpy(ha->model_number, model, len);
2085		st = en = ha->model_number;
2086		en += len - 1;
2087		while (en > st) {
2088			if (*en != 0x20 && *en != 0x00)
2089				break;
2090			*en-- = '\0';
2091		}
2092
2093		index = (ha->pdev->subsystem_device & 0xff);
2094		if (use_tbl &&
2095		    ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
2096		    index < QLA_MODEL_NAMES)
2097			strncpy(ha->model_desc,
2098			    qla2x00_model_name[index * 2 + 1],
2099			    sizeof(ha->model_desc) - 1);
2100	} else {
2101		index = (ha->pdev->subsystem_device & 0xff);
2102		if (use_tbl &&
2103		    ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
2104		    index < QLA_MODEL_NAMES) {
2105			strcpy(ha->model_number,
2106			    qla2x00_model_name[index * 2]);
2107			strncpy(ha->model_desc,
2108			    qla2x00_model_name[index * 2 + 1],
2109			    sizeof(ha->model_desc) - 1);
2110		} else {
2111			strcpy(ha->model_number, def);
2112		}
2113	}
2114	if (IS_FWI2_CAPABLE(ha))
2115		qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
2116		    sizeof(ha->model_desc));
2117}
2118
2119/* On sparc systems, obtain port and node WWN from firmware
2120 * properties.
2121 */
2122static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
2123{
2124#ifdef CONFIG_SPARC
2125	struct qla_hw_data *ha = vha->hw;
2126	struct pci_dev *pdev = ha->pdev;
2127	struct device_node *dp = pci_device_to_OF_node(pdev);
2128	const u8 *val;
2129	int len;
2130
2131	val = of_get_property(dp, "port-wwn", &len);
2132	if (val && len >= WWN_SIZE)
2133		memcpy(nv->port_name, val, WWN_SIZE);
2134
2135	val = of_get_property(dp, "node-wwn", &len);
2136	if (val && len >= WWN_SIZE)
2137		memcpy(nv->node_name, val, WWN_SIZE);
2138#endif
2139}
2140
2141/*
2142* NVRAM configuration for ISP 2xxx
2143*
2144* Input:
2145*      ha                = adapter block pointer.
2146*
2147* Output:
2148*      initialization control block in response_ring
2149*      host adapters parameters in host adapter block
2150*
2151* Returns:
2152*      0 = success.
2153*/
2154int
2155qla2x00_nvram_config(scsi_qla_host_t *vha)
2156{
2157	int             rval;
2158	uint8_t         chksum = 0;
2159	uint16_t        cnt;
2160	uint8_t         *dptr1, *dptr2;
2161	struct qla_hw_data *ha = vha->hw;
2162	init_cb_t       *icb = ha->init_cb;
2163	nvram_t         *nv = ha->nvram;
2164	uint8_t         *ptr = ha->nvram;
2165	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2166
2167	rval = QLA_SUCCESS;
2168
2169	/* Determine NVRAM starting address. */
2170	ha->nvram_size = sizeof(nvram_t);
2171	ha->nvram_base = 0;
2172	if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
2173		if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
2174			ha->nvram_base = 0x80;
2175
2176	/* Get NVRAM data and calculate checksum. */
2177	ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
2178	for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
2179		chksum += *ptr++;
2180
2181	DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
2182	DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
2183
2184	/* Bad NVRAM data, set defaults parameters. */
2185	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
2186	    nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
2187		/* Reset NVRAM data. */
2188		qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
2189		    "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
2190		    nv->nvram_version);
2191		qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
2192		    "invalid -- WWPN) defaults.\n");
2193
2194		/*
2195		 * Set default initialization control block.
2196		 */
2197		memset(nv, 0, ha->nvram_size);
2198		nv->parameter_block_version = ICB_VERSION;
2199
2200		if (IS_QLA23XX(ha)) {
2201			nv->firmware_options[0] = BIT_2 | BIT_1;
2202			nv->firmware_options[1] = BIT_7 | BIT_5;
2203			nv->add_firmware_options[0] = BIT_5;
2204			nv->add_firmware_options[1] = BIT_5 | BIT_4;
2205			nv->frame_payload_size = __constant_cpu_to_le16(2048);
2206			nv->special_options[1] = BIT_7;
2207		} else if (IS_QLA2200(ha)) {
2208			nv->firmware_options[0] = BIT_2 | BIT_1;
2209			nv->firmware_options[1] = BIT_7 | BIT_5;
2210			nv->add_firmware_options[0] = BIT_5;
2211			nv->add_firmware_options[1] = BIT_5 | BIT_4;
2212			nv->frame_payload_size = __constant_cpu_to_le16(1024);
2213		} else if (IS_QLA2100(ha)) {
2214			nv->firmware_options[0] = BIT_3 | BIT_1;
2215			nv->firmware_options[1] = BIT_5;
2216			nv->frame_payload_size = __constant_cpu_to_le16(1024);
2217		}
2218
2219		nv->max_iocb_allocation = __constant_cpu_to_le16(256);
2220		nv->execution_throttle = __constant_cpu_to_le16(16);
2221		nv->retry_count = 8;
2222		nv->retry_delay = 1;
2223
2224		nv->port_name[0] = 33;
2225		nv->port_name[3] = 224;
2226		nv->port_name[4] = 139;
2227
2228		qla2xxx_nvram_wwn_from_ofw(vha, nv);
2229
2230		nv->login_timeout = 4;
2231
2232		/*
2233		 * Set default host adapter parameters
2234		 */
2235		nv->host_p[1] = BIT_2;
2236		nv->reset_delay = 5;
2237		nv->port_down_retry_count = 8;
2238		nv->max_luns_per_target = __constant_cpu_to_le16(8);
2239		nv->link_down_timeout = 60;
2240
2241		rval = 1;
2242	}
2243
2244#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2245	/*
2246	 * The SN2 does not provide BIOS emulation which means you can't change
2247	 * potentially bogus BIOS settings. Force the use of default settings
2248	 * for link rate and frame size.  Hope that the rest of the settings
2249	 * are valid.
2250	 */
2251	if (ia64_platform_is("sn2")) {
2252		nv->frame_payload_size = __constant_cpu_to_le16(2048);
2253		if (IS_QLA23XX(ha))
2254			nv->special_options[1] = BIT_7;
2255	}
2256#endif
2257
2258	/* Reset Initialization control block */
2259	memset(icb, 0, ha->init_cb_size);
2260
2261	/*
2262	 * Setup driver NVRAM options.
2263	 */
2264	nv->firmware_options[0] |= (BIT_6 | BIT_1);
2265	nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
2266	nv->firmware_options[1] |= (BIT_5 | BIT_0);
2267	nv->firmware_options[1] &= ~BIT_4;
2268
2269	if (IS_QLA23XX(ha)) {
2270		nv->firmware_options[0] |= BIT_2;
2271		nv->firmware_options[0] &= ~BIT_3;
2272		nv->firmware_options[0] &= ~BIT_6;
2273		nv->add_firmware_options[1] |= BIT_5 | BIT_4;
2274
2275		if (IS_QLA2300(ha)) {
2276			if (ha->fb_rev == FPM_2310) {
2277				strcpy(ha->model_number, "QLA2310");
2278			} else {
2279				strcpy(ha->model_number, "QLA2300");
2280			}
2281		} else {
2282			qla2x00_set_model_info(vha, nv->model_number,
2283			    sizeof(nv->model_number), "QLA23xx");
2284		}
2285	} else if (IS_QLA2200(ha)) {
2286		nv->firmware_options[0] |= BIT_2;
2287		/*
2288		 * 'Point-to-point preferred, else loop' is not a safe
2289		 * connection mode setting.
2290		 */
2291		if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
2292		    (BIT_5 | BIT_4)) {
2293			/* Force 'loop preferred, else point-to-point'. */
2294			nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
2295			nv->add_firmware_options[0] |= BIT_5;
2296		}
2297		strcpy(ha->model_number, "QLA22xx");
2298	} else /*if (IS_QLA2100(ha))*/ {
2299		strcpy(ha->model_number, "QLA2100");
2300	}
2301
2302	/*
2303	 * Copy over NVRAM RISC parameter block to initialization control block.
2304	 */
2305	dptr1 = (uint8_t *)icb;
2306	dptr2 = (uint8_t *)&nv->parameter_block_version;
2307	cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
2308	while (cnt--)
2309		*dptr1++ = *dptr2++;
2310
2311	/* Copy 2nd half. */
2312	dptr1 = (uint8_t *)icb->add_firmware_options;
2313	cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
2314	while (cnt--)
2315		*dptr1++ = *dptr2++;
2316
2317	/* Use alternate WWN? */
2318	if (nv->host_p[1] & BIT_7) {
2319		memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
2320		memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
2321	}
2322
2323	/* Prepare nodename */
2324	if ((icb->firmware_options[1] & BIT_6) == 0) {
2325		/*
2326		 * Firmware will apply the following mask if the nodename was
2327		 * not provided.
2328		 */
2329		memcpy(icb->node_name, icb->port_name, WWN_SIZE);
2330		icb->node_name[0] &= 0xF0;
2331	}
2332
2333	/*
2334	 * Set host adapter parameters.
2335	 */
2336	if (nv->host_p[0] & BIT_7)
2337		ql2xextended_error_logging = 1;
2338	ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
2339	/* Always load RISC code on non ISP2[12]00 chips. */
2340	if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
2341		ha->flags.disable_risc_code_load = 0;
2342	ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
2343	ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
2344	ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
2345	ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
2346	ha->flags.disable_serdes = 0;
2347
2348	ha->operating_mode =
2349	    (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
2350
2351	memcpy(ha->fw_seriallink_options, nv->seriallink_options,
2352	    sizeof(ha->fw_seriallink_options));
2353
2354	/* save HBA serial number */
2355	ha->serial0 = icb->port_name[5];
2356	ha->serial1 = icb->port_name[6];
2357	ha->serial2 = icb->port_name[7];
2358	memcpy(vha->node_name, icb->node_name, WWN_SIZE);
2359	memcpy(vha->port_name, icb->port_name, WWN_SIZE);
2360
2361	icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
2362
2363	ha->retry_count = nv->retry_count;
2364
2365	/* Set minimum login_timeout to 4 seconds. */
2366	if (nv->login_timeout != ql2xlogintimeout)
2367		nv->login_timeout = ql2xlogintimeout;
2368	if (nv->login_timeout < 4)
2369		nv->login_timeout = 4;
2370	ha->login_timeout = nv->login_timeout;
2371	icb->login_timeout = nv->login_timeout;
2372
2373	/* Set minimum RATOV to 100 tenths of a second. */
2374	ha->r_a_tov = 100;
2375
2376	ha->loop_reset_delay = nv->reset_delay;
2377
2378	/* Link Down Timeout = 0:
2379	 *
2380	 * 	When Port Down timer expires we will start returning
2381	 *	I/O's to OS with "DID_NO_CONNECT".
2382	 *
2383	 * Link Down Timeout != 0:
2384	 *
2385	 *	 The driver waits for the link to come up after link down
2386	 *	 before returning I/Os to OS with "DID_NO_CONNECT".
2387	 */
2388	if (nv->link_down_timeout == 0) {
2389		ha->loop_down_abort_time =
2390		    (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
2391	} else {
2392		ha->link_down_timeout =	 nv->link_down_timeout;
2393		ha->loop_down_abort_time =
2394		    (LOOP_DOWN_TIME - ha->link_down_timeout);
2395	}
2396
2397	/*
2398	 * Need enough time to try and get the port back.
2399	 */
2400	ha->port_down_retry_count = nv->port_down_retry_count;
2401	if (qlport_down_retry)
2402		ha->port_down_retry_count = qlport_down_retry;
2403	/* Set login_retry_count */
2404	ha->login_retry_count  = nv->retry_count;
2405	if (ha->port_down_retry_count == nv->port_down_retry_count &&
2406	    ha->port_down_retry_count > 3)
2407		ha->login_retry_count = ha->port_down_retry_count;
2408	else if (ha->port_down_retry_count > (int)ha->login_retry_count)
2409		ha->login_retry_count = ha->port_down_retry_count;
2410	if (ql2xloginretrycount)
2411		ha->login_retry_count = ql2xloginretrycount;
2412
2413	icb->lun_enables = __constant_cpu_to_le16(0);
2414	icb->command_resource_count = 0;
2415	icb->immediate_notify_resource_count = 0;
2416	icb->timeout = __constant_cpu_to_le16(0);
2417
2418	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2419		/* Enable RIO */
2420		icb->firmware_options[0] &= ~BIT_3;
2421		icb->add_firmware_options[0] &=
2422		    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
2423		icb->add_firmware_options[0] |= BIT_2;
2424		icb->response_accumulation_timer = 3;
2425		icb->interrupt_delay_timer = 5;
2426
2427		vha->flags.process_response_queue = 1;
2428	} else {
2429		/* Enable ZIO. */
2430		if (!vha->flags.init_done) {
2431			ha->zio_mode = icb->add_firmware_options[0] &
2432			    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
2433			ha->zio_timer = icb->interrupt_delay_timer ?
2434			    icb->interrupt_delay_timer: 2;
2435		}
2436		icb->add_firmware_options[0] &=
2437		    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
2438		vha->flags.process_response_queue = 0;
2439		if (ha->zio_mode != QLA_ZIO_DISABLED) {
2440			ha->zio_mode = QLA_ZIO_MODE_6;
2441
2442			DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer "
2443			    "delay (%d us).\n", vha->host_no, ha->zio_mode,
2444			    ha->zio_timer * 100));
2445			qla_printk(KERN_INFO, ha,
2446			    "ZIO mode %d enabled; timer delay (%d us).\n",
2447			    ha->zio_mode, ha->zio_timer * 100);
2448
2449			icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
2450			icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
2451			vha->flags.process_response_queue = 1;
2452		}
2453	}
2454
2455	if (rval) {
2456		DEBUG2_3(printk(KERN_WARNING
2457		    "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
2458	}
2459	return (rval);
2460}
2461
2462static void
2463qla2x00_rport_del(void *data)
2464{
2465	fc_port_t *fcport = data;
2466	struct fc_rport *rport;
2467
2468	spin_lock_irq(fcport->vha->host->host_lock);
2469	rport = fcport->drport ? fcport->drport: fcport->rport;
2470	fcport->drport = NULL;
2471	spin_unlock_irq(fcport->vha->host->host_lock);
2472	if (rport)
2473		fc_remote_port_delete(rport);
2474}
2475
2476/**
2477 * qla2x00_alloc_fcport() - Allocate a generic fcport.
2478 * @ha: HA context
2479 * @flags: allocation flags
2480 *
2481 * Returns a pointer to the allocated fcport, or NULL, if none available.
2482 */
2483fc_port_t *
2484qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
2485{
2486	fc_port_t *fcport;
2487
2488	fcport = kzalloc(sizeof(fc_port_t), flags);
2489	if (!fcport)
2490		return NULL;
2491
2492	/* Setup fcport template structure. */
2493	fcport->vha = vha;
2494	fcport->vp_idx = vha->vp_idx;
2495	fcport->port_type = FCT_UNKNOWN;
2496	fcport->loop_id = FC_NO_LOOP_ID;
2497	atomic_set(&fcport->state, FCS_UNCONFIGURED);
2498	fcport->supported_classes = FC_COS_UNSPECIFIED;
2499
2500	return fcport;
2501}
2502
2503/*
2504 * qla2x00_configure_loop
2505 *      Updates Fibre Channel Device Database with what is actually on loop.
2506 *
2507 * Input:
2508 *      ha                = adapter block pointer.
2509 *
2510 * Returns:
2511 *      0 = success.
2512 *      1 = error.
2513 *      2 = database was full and device was not configured.
2514 */
2515static int
2516qla2x00_configure_loop(scsi_qla_host_t *vha)
2517{
2518	int  rval;
2519	unsigned long flags, save_flags;
2520	struct qla_hw_data *ha = vha->hw;
2521	rval = QLA_SUCCESS;
2522
2523	/* Get Initiator ID */
2524	if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
2525		rval = qla2x00_configure_hba(vha);
2526		if (rval != QLA_SUCCESS) {
2527			DEBUG(printk("scsi(%ld): Unable to configure HBA.\n",
2528			    vha->host_no));
2529			return (rval);
2530		}
2531	}
2532
2533	save_flags = flags = vha->dpc_flags;
2534	DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n",
2535	    vha->host_no, flags));
2536
2537	/*
2538	 * If we have both an RSCN and PORT UPDATE pending then handle them
2539	 * both at the same time.
2540	 */
2541	clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2542	clear_bit(RSCN_UPDATE, &vha->dpc_flags);
2543
2544	qla2x00_get_data_rate(vha);
2545
2546	/* Determine what we need to do */
2547	if (ha->current_topology == ISP_CFG_FL &&
2548	    (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
2549
2550		vha->flags.rscn_queue_overflow = 1;
2551		set_bit(RSCN_UPDATE, &flags);
2552
2553	} else if (ha->current_topology == ISP_CFG_F &&
2554	    (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
2555
2556		vha->flags.rscn_queue_overflow = 1;
2557		set_bit(RSCN_UPDATE, &flags);
2558		clear_bit(LOCAL_LOOP_UPDATE, &flags);
2559
2560	} else if (ha->current_topology == ISP_CFG_N) {
2561		clear_bit(RSCN_UPDATE, &flags);
2562
2563	} else if (!vha->flags.online ||
2564	    (test_bit(ABORT_ISP_ACTIVE, &flags))) {
2565
2566		vha->flags.rscn_queue_overflow = 1;
2567		set_bit(RSCN_UPDATE, &flags);
2568		set_bit(LOCAL_LOOP_UPDATE, &flags);
2569	}
2570
2571	if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
2572		if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2573			rval = QLA_FUNCTION_FAILED;
2574		else
2575			rval = qla2x00_configure_local_loop(vha);
2576	}
2577
2578	if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
2579		if (LOOP_TRANSITION(vha))
2580			rval = QLA_FUNCTION_FAILED;
2581		else
2582			rval = qla2x00_configure_fabric(vha);
2583	}
2584
2585	if (rval == QLA_SUCCESS) {
2586		if (atomic_read(&vha->loop_down_timer) ||
2587		    test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2588			rval = QLA_FUNCTION_FAILED;
2589		} else {
2590			atomic_set(&vha->loop_state, LOOP_READY);
2591
2592			DEBUG(printk("scsi(%ld): LOOP READY\n", vha->host_no));
2593		}
2594	}
2595
2596	if (rval) {
2597		DEBUG2_3(printk("%s(%ld): *** FAILED ***\n",
2598		    __func__, vha->host_no));
2599	} else {
2600		DEBUG3(printk("%s: exiting normally\n", __func__));
2601	}
2602
2603	/* Restore state if a resync event occurred during processing */
2604	if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2605		if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
2606			set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2607		if (test_bit(RSCN_UPDATE, &save_flags)) {
2608			set_bit(RSCN_UPDATE, &vha->dpc_flags);
2609			if (!IS_ALOGIO_CAPABLE(ha))
2610				vha->flags.rscn_queue_overflow = 1;
2611		}
2612	}
2613
2614	return (rval);
2615}
2616
2617
2618
2619/*
2620 * qla2x00_configure_local_loop
2621 *	Updates Fibre Channel Device Database with local loop devices.
2622 *
2623 * Input:
2624 *	ha = adapter block pointer.
2625 *
2626 * Returns:
2627 *	0 = success.
2628 */
2629static int
2630qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2631{
2632	int		rval, rval2;
2633	int		found_devs;
2634	int		found;
2635	fc_port_t	*fcport, *new_fcport;
2636
2637	uint16_t	index;
2638	uint16_t	entries;
2639	char		*id_iter;
2640	uint16_t	loop_id;
2641	uint8_t		domain, area, al_pa;
2642	struct qla_hw_data *ha = vha->hw;
2643
2644	found_devs = 0;
2645	new_fcport = NULL;
2646	entries = MAX_FIBRE_DEVICES;
2647
2648	DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", vha->host_no));
2649	DEBUG3(qla2x00_get_fcal_position_map(vha, NULL));
2650
2651	/* Get list of logged in devices. */
2652	memset(ha->gid_list, 0, GID_LIST_SIZE);
2653	rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
2654	    &entries);
2655	if (rval != QLA_SUCCESS)
2656		goto cleanup_allocation;
2657
2658	DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n",
2659	    vha->host_no, entries));
2660	DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list,
2661	    entries * sizeof(struct gid_list_info)));
2662
2663	/* Allocate temporary fcport for any new fcports discovered. */
2664	new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2665	if (new_fcport == NULL) {
2666		rval = QLA_MEMORY_ALLOC_FAILED;
2667		goto cleanup_allocation;
2668	}
2669	new_fcport->flags &= ~FCF_FABRIC_DEVICE;
2670
2671	/*
2672	 * Mark local devices that were present with FCF_DEVICE_LOST for now.
2673	 */
2674	list_for_each_entry(fcport, &vha->vp_fcports, list) {
2675		if (atomic_read(&fcport->state) == FCS_ONLINE &&
2676		    fcport->port_type != FCT_BROADCAST &&
2677		    (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
2678
2679			DEBUG(printk("scsi(%ld): Marking port lost, "
2680			    "loop_id=0x%04x\n",
2681			    vha->host_no, fcport->loop_id));
2682
2683			atomic_set(&fcport->state, FCS_DEVICE_LOST);
2684		}
2685	}
2686
2687	/* Add devices to port list. */
2688	id_iter = (char *)ha->gid_list;
2689	for (index = 0; index < entries; index++) {
2690		domain = ((struct gid_list_info *)id_iter)->domain;
2691		area = ((struct gid_list_info *)id_iter)->area;
2692		al_pa = ((struct gid_list_info *)id_iter)->al_pa;
2693		if (IS_QLA2100(ha) || IS_QLA2200(ha))
2694			loop_id = (uint16_t)
2695			    ((struct gid_list_info *)id_iter)->loop_id_2100;
2696		else
2697			loop_id = le16_to_cpu(
2698			    ((struct gid_list_info *)id_iter)->loop_id);
2699		id_iter += ha->gid_list_info_size;
2700
2701		/* Bypass reserved domain fields. */
2702		if ((domain & 0xf0) == 0xf0)
2703			continue;
2704
2705		/* Bypass if not same domain and area of adapter. */
2706		if (area && domain &&
2707		    (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
2708			continue;
2709
2710		/* Bypass invalid local loop ID. */
2711		if (loop_id > LAST_LOCAL_LOOP_ID)
2712			continue;
2713
2714		/* Fill in member data. */
2715		new_fcport->d_id.b.domain = domain;
2716		new_fcport->d_id.b.area = area;
2717		new_fcport->d_id.b.al_pa = al_pa;
2718		new_fcport->loop_id = loop_id;
2719		new_fcport->vp_idx = vha->vp_idx;
2720		rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
2721		if (rval2 != QLA_SUCCESS) {
2722			DEBUG2(printk("scsi(%ld): Failed to retrieve fcport "
2723			    "information -- get_port_database=%x, "
2724			    "loop_id=0x%04x\n",
2725			    vha->host_no, rval2, new_fcport->loop_id));
2726			DEBUG2(printk("scsi(%ld): Scheduling resync...\n",
2727			    vha->host_no));
2728			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2729			continue;
2730		}
2731
2732		/* Check for matching device in port list. */
2733		found = 0;
2734		fcport = NULL;
2735		list_for_each_entry(fcport, &vha->vp_fcports, list) {
2736			if (memcmp(new_fcport->port_name, fcport->port_name,
2737			    WWN_SIZE))
2738				continue;
2739
2740			fcport->flags &= ~FCF_FABRIC_DEVICE;
2741			fcport->loop_id = new_fcport->loop_id;
2742			fcport->port_type = new_fcport->port_type;
2743			fcport->d_id.b24 = new_fcport->d_id.b24;
2744			memcpy(fcport->node_name, new_fcport->node_name,
2745			    WWN_SIZE);
2746
2747			found++;
2748			break;
2749		}
2750
2751		if (!found) {
2752			/* New device, add to fcports list. */
2753			if (vha->vp_idx) {
2754				new_fcport->vha = vha;
2755				new_fcport->vp_idx = vha->vp_idx;
2756			}
2757			list_add_tail(&new_fcport->list, &vha->vp_fcports);
2758
2759			/* Allocate a new replacement fcport. */
2760			fcport = new_fcport;
2761			new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2762			if (new_fcport == NULL) {
2763				rval = QLA_MEMORY_ALLOC_FAILED;
2764				goto cleanup_allocation;
2765			}
2766			new_fcport->flags &= ~FCF_FABRIC_DEVICE;
2767		}
2768
2769		/* Base iIDMA settings on HBA port speed. */
2770		fcport->fp_speed = ha->link_data_rate;
2771
2772		qla2x00_update_fcport(vha, fcport);
2773
2774		found_devs++;
2775	}
2776
2777cleanup_allocation:
2778	kfree(new_fcport);
2779
2780	if (rval != QLA_SUCCESS) {
2781		DEBUG2(printk("scsi(%ld): Configure local loop error exit: "
2782		    "rval=%x\n", vha->host_no, rval));
2783	}
2784
2785	return (rval);
2786}
2787
2788static void
2789qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2790{
2791#define LS_UNKNOWN      2
2792	static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
2793	char *link_speed;
2794	int rval;
2795	uint16_t mb[4];
2796	struct qla_hw_data *ha = vha->hw;
2797
2798	if (!IS_IIDMA_CAPABLE(ha))
2799		return;
2800
2801	if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
2802	    fcport->fp_speed > ha->link_data_rate)
2803		return;
2804
2805	rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
2806	    mb);
2807	if (rval != QLA_SUCCESS) {
2808		DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA "
2809		    "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n",
2810		    vha->host_no, fcport->port_name[0], fcport->port_name[1],
2811		    fcport->port_name[2], fcport->port_name[3],
2812		    fcport->port_name[4], fcport->port_name[5],
2813		    fcport->port_name[6], fcport->port_name[7], rval,
2814		    fcport->fp_speed, mb[0], mb[1]));
2815	} else {
2816		link_speed = link_speeds[LS_UNKNOWN];
2817		if (fcport->fp_speed < 5)
2818			link_speed = link_speeds[fcport->fp_speed];
2819		else if (fcport->fp_speed == 0x13)
2820			link_speed = link_speeds[5];
2821		DEBUG2(qla_printk(KERN_INFO, ha,
2822		    "iIDMA adjusted to %s GB/s on "
2823		    "%02x%02x%02x%02x%02x%02x%02x%02x.\n",
2824		    link_speed, fcport->port_name[0],
2825		    fcport->port_name[1], fcport->port_name[2],
2826		    fcport->port_name[3], fcport->port_name[4],
2827		    fcport->port_name[5], fcport->port_name[6],
2828		    fcport->port_name[7]));
2829	}
2830}
2831
2832static void
2833qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2834{
2835	struct fc_rport_identifiers rport_ids;
2836	struct fc_rport *rport;
2837	struct qla_hw_data *ha = vha->hw;
2838
2839	qla2x00_rport_del(fcport);
2840
2841	rport_ids.node_name = wwn_to_u64(fcport->node_name);
2842	rport_ids.port_name = wwn_to_u64(fcport->port_name);
2843	rport_ids.port_id = fcport->d_id.b.domain << 16 |
2844	    fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
2845	rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2846	fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
2847	if (!rport) {
2848		qla_printk(KERN_WARNING, ha,
2849		    "Unable to allocate fc remote port!\n");
2850		return;
2851	}
2852	spin_lock_irq(fcport->vha->host->host_lock);
2853	*((fc_port_t **)rport->dd_data) = fcport;
2854	spin_unlock_irq(fcport->vha->host->host_lock);
2855
2856	rport->supported_classes = fcport->supported_classes;
2857
2858	rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2859	if (fcport->port_type == FCT_INITIATOR)
2860		rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
2861	if (fcport->port_type == FCT_TARGET)
2862		rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
2863	fc_remote_port_rolechg(rport, rport_ids.roles);
2864}
2865
2866/*
2867 * qla2x00_update_fcport
2868 *	Updates device on list.
2869 *
2870 * Input:
2871 *	ha = adapter block pointer.
2872 *	fcport = port structure pointer.
2873 *
2874 * Return:
2875 *	0  - Success
2876 *  BIT_0 - error
2877 *
2878 * Context:
2879 *	Kernel context.
2880 */
2881void
2882qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2883{
2884	struct qla_hw_data *ha = vha->hw;
2885
2886	fcport->vha = vha;
2887	fcport->login_retry = 0;
2888	fcport->port_login_retry_count = ha->port_down_retry_count *
2889	    PORT_RETRY_TIME;
2890	atomic_set(&fcport->port_down_timer, ha->port_down_retry_count *
2891	    PORT_RETRY_TIME);
2892	fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
2893
2894	qla2x00_iidma_fcport(vha, fcport);
2895
2896	atomic_set(&fcport->state, FCS_ONLINE);
2897
2898	qla2x00_reg_remote_port(vha, fcport);
2899}
2900
2901/*
2902 * qla2x00_configure_fabric
2903 *      Setup SNS devices with loop ID's.
2904 *
2905 * Input:
2906 *      ha = adapter block pointer.
2907 *
2908 * Returns:
2909 *      0 = success.
2910 *      BIT_0 = error
2911 */
2912static int
2913qla2x00_configure_fabric(scsi_qla_host_t *vha)
2914{
2915	int	rval, rval2;
2916	fc_port_t	*fcport, *fcptemp;
2917	uint16_t	next_loopid;
2918	uint16_t	mb[MAILBOX_REGISTER_COUNT];
2919	uint16_t	loop_id;
2920	LIST_HEAD(new_fcports);
2921	struct qla_hw_data *ha = vha->hw;
2922	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2923
2924	/* If FL port exists, then SNS is present */
2925	if (IS_FWI2_CAPABLE(ha))
2926		loop_id = NPH_F_PORT;
2927	else
2928		loop_id = SNS_FL_PORT;
2929	rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
2930	if (rval != QLA_SUCCESS) {
2931		DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL "
2932		    "Port\n", vha->host_no));
2933
2934		vha->device_flags &= ~SWITCH_FOUND;
2935		return (QLA_SUCCESS);
2936	}
2937	vha->device_flags |= SWITCH_FOUND;
2938
2939	/* Mark devices that need re-synchronization. */
2940	rval2 = qla2x00_device_resync(vha);
2941	if (rval2 == QLA_RSCNS_HANDLED) {
2942		/* No point doing the scan, just continue. */
2943		return (QLA_SUCCESS);
2944	}
2945	do {
2946		/* FDMI support. */
2947		if (ql2xfdmienable &&
2948		    test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
2949			qla2x00_fdmi_register(vha);
2950
2951		/* Ensure we are logged into the SNS. */
2952		if (IS_FWI2_CAPABLE(ha))
2953			loop_id = NPH_SNS;
2954		else
2955			loop_id = SIMPLE_NAME_SERVER;
2956		ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
2957		    0xfc, mb, BIT_1 | BIT_0);
2958		if (mb[0] != MBS_COMMAND_COMPLETE) {
2959			DEBUG2(qla_printk(KERN_INFO, ha,
2960			    "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
2961			    "mb[2]=%x mb[6]=%x mb[7]=%x\n", loop_id,
2962			    mb[0], mb[1], mb[2], mb[6], mb[7]));
2963			return (QLA_SUCCESS);
2964		}
2965
2966		if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
2967			if (qla2x00_rft_id(vha)) {
2968				/* EMPTY */
2969				DEBUG2(printk("scsi(%ld): Register FC-4 "
2970				    "TYPE failed.\n", vha->host_no));
2971			}
2972			if (qla2x00_rff_id(vha)) {
2973				/* EMPTY */
2974				DEBUG2(printk("scsi(%ld): Register FC-4 "
2975				    "Features failed.\n", vha->host_no));
2976			}
2977			if (qla2x00_rnn_id(vha)) {
2978				/* EMPTY */
2979				DEBUG2(printk("scsi(%ld): Register Node Name "
2980				    "failed.\n", vha->host_no));
2981			} else if (qla2x00_rsnn_nn(vha)) {
2982				/* EMPTY */
2983				DEBUG2(printk("scsi(%ld): Register Symbolic "
2984				    "Node Name failed.\n", vha->host_no));
2985			}
2986		}
2987
2988		rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
2989		if (rval != QLA_SUCCESS)
2990			break;
2991
2992		/*
2993		 * Logout all previous fabric devices marked lost, except
2994		 * FCP2 devices.
2995		 */
2996		list_for_each_entry(fcport, &vha->vp_fcports, list) {
2997			if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2998				break;
2999
3000			if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
3001				continue;
3002
3003			if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
3004				qla2x00_mark_device_lost(vha, fcport,
3005				    ql2xplogiabsentdevice, 0);
3006				if (fcport->loop_id != FC_NO_LOOP_ID &&
3007				    (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
3008				    fcport->port_type != FCT_INITIATOR &&
3009				    fcport->port_type != FCT_BROADCAST) {
3010					ha->isp_ops->fabric_logout(vha,
3011					    fcport->loop_id,
3012					    fcport->d_id.b.domain,
3013					    fcport->d_id.b.area,
3014					    fcport->d_id.b.al_pa);
3015					fcport->loop_id = FC_NO_LOOP_ID;
3016				}
3017			}
3018		}
3019
3020		/* Starting free loop ID. */
3021		next_loopid = ha->min_external_loopid;
3022
3023		/*
3024		 * Scan through our port list and login entries that need to be
3025		 * logged in.
3026		 */
3027		list_for_each_entry(fcport, &vha->vp_fcports, list) {
3028			if (atomic_read(&vha->loop_down_timer) ||
3029			    test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3030				break;
3031
3032			if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
3033			    (fcport->flags & FCF_LOGIN_NEEDED) == 0)
3034				continue;
3035
3036			if (fcport->loop_id == FC_NO_LOOP_ID) {
3037				fcport->loop_id = next_loopid;
3038				rval = qla2x00_find_new_loop_id(
3039				    base_vha, fcport);
3040				if (rval != QLA_SUCCESS) {
3041					/* Ran out of IDs to use */
3042					break;
3043				}
3044			}
3045			/* Login and update database */
3046			qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
3047		}
3048
3049		/* Exit if out of loop IDs. */
3050		if (rval != QLA_SUCCESS) {
3051			break;
3052		}
3053
3054		/*
3055		 * Login and add the new devices to our port list.
3056		 */
3057		list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
3058			if (atomic_read(&vha->loop_down_timer) ||
3059			    test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3060				break;
3061
3062			/* Find a new loop ID to use. */
3063			fcport->loop_id = next_loopid;
3064			rval = qla2x00_find_new_loop_id(base_vha, fcport);
3065			if (rval != QLA_SUCCESS) {
3066				/* Ran out of IDs to use */
3067				break;
3068			}
3069
3070			/* Login and update database */
3071			qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
3072
3073			if (vha->vp_idx) {
3074				fcport->vha = vha;
3075				fcport->vp_idx = vha->vp_idx;
3076			}
3077			list_move_tail(&fcport->list, &vha->vp_fcports);
3078		}
3079	} while (0);
3080
3081	/* Free all new device structures not processed. */
3082	list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
3083		list_del(&fcport->list);
3084		kfree(fcport);
3085	}
3086
3087	if (rval) {
3088		DEBUG2(printk("scsi(%ld): Configure fabric error exit: "
3089		    "rval=%d\n", vha->host_no, rval));
3090	}
3091
3092	return (rval);
3093}
3094
3095/*
3096 * qla2x00_find_all_fabric_devs
3097 *
3098 * Input:
3099 *	ha = adapter block pointer.
3100 *	dev = database device entry pointer.
3101 *
3102 * Returns:
3103 *	0 = success.
3104 *
3105 * Context:
3106 *	Kernel context.
3107 */
3108static int
3109qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3110	struct list_head *new_fcports)
3111{
3112	int		rval;
3113	uint16_t	loop_id;
3114	fc_port_t	*fcport, *new_fcport, *fcptemp;
3115	int		found;
3116
3117	sw_info_t	*swl;
3118	int		swl_idx;
3119	int		first_dev, last_dev;
3120	port_id_t	wrap = {}, nxt_d_id;
3121	struct qla_hw_data *ha = vha->hw;
3122	struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
3123	struct scsi_qla_host *tvp;
3124
3125	rval = QLA_SUCCESS;
3126
3127	/* Try GID_PT to get device list, else GAN. */
3128	swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL);
3129	if (!swl) {
3130		/*EMPTY*/
3131		DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback "
3132		    "on GA_NXT\n", vha->host_no));
3133	} else {
3134		if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
3135			kfree(swl);
3136			swl = NULL;
3137		} else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
3138			kfree(swl);
3139			swl = NULL;
3140		} else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
3141			kfree(swl);
3142			swl = NULL;
3143		} else if (ql2xiidmaenable &&
3144		    qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) {
3145			qla2x00_gpsc(vha, swl);
3146		}
3147
3148		/* If other queries succeeded probe for FC-4 type */
3149		if (swl)
3150			qla2x00_gff_id(vha, swl);
3151	}
3152	swl_idx = 0;
3153
3154	/* Allocate temporary fcport for any new fcports discovered. */
3155	new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3156	if (new_fcport == NULL) {
3157		kfree(swl);
3158		return (QLA_MEMORY_ALLOC_FAILED);
3159	}
3160	new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
3161	/* Set start port ID scan at adapter ID. */
3162	first_dev = 1;
3163	last_dev = 0;
3164
3165	/* Starting free loop ID. */
3166	loop_id = ha->min_external_loopid;
3167	for (; loop_id <= ha->max_loop_id; loop_id++) {
3168		if (qla2x00_is_reserved_id(vha, loop_id))
3169			continue;
3170
3171		if (ha->current_topology == ISP_CFG_FL &&
3172		    (atomic_read(&vha->loop_down_timer) ||
3173		     LOOP_TRANSITION(vha))) {
3174			atomic_set(&vha->loop_down_timer, 0);
3175			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3176			set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3177			break;
3178		}
3179
3180		if (swl != NULL) {
3181			if (last_dev) {
3182				wrap.b24 = new_fcport->d_id.b24;
3183			} else {
3184				new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
3185				memcpy(new_fcport->node_name,
3186				    swl[swl_idx].node_name, WWN_SIZE);
3187				memcpy(new_fcport->port_name,
3188				    swl[swl_idx].port_name, WWN_SIZE);
3189				memcpy(new_fcport->fabric_port_name,
3190				    swl[swl_idx].fabric_port_name, WWN_SIZE);
3191				new_fcport->fp_speed = swl[swl_idx].fp_speed;
3192				new_fcport->fc4_type = swl[swl_idx].fc4_type;
3193
3194				if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
3195					last_dev = 1;
3196				}
3197				swl_idx++;
3198			}
3199		} else {
3200			/* Send GA_NXT to the switch */
3201			rval = qla2x00_ga_nxt(vha, new_fcport);
3202			if (rval != QLA_SUCCESS) {
3203				qla_printk(KERN_WARNING, ha,
3204				    "SNS scan failed -- assuming zero-entry "
3205				    "result...\n");
3206				list_for_each_entry_safe(fcport, fcptemp,
3207				    new_fcports, list) {
3208					list_del(&fcport->list);
3209					kfree(fcport);
3210				}
3211				rval = QLA_SUCCESS;
3212				break;
3213			}
3214		}
3215
3216		/* If wrap on switch device list, exit. */
3217		if (first_dev) {
3218			wrap.b24 = new_fcport->d_id.b24;
3219			first_dev = 0;
3220		} else if (new_fcport->d_id.b24 == wrap.b24) {
3221			DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n",
3222			    vha->host_no, new_fcport->d_id.b.domain,
3223			    new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa));
3224			break;
3225		}
3226
3227		/* Bypass if same physical adapter. */
3228		if (new_fcport->d_id.b24 == base_vha->d_id.b24)
3229			continue;
3230
3231		/* Bypass virtual ports of the same host. */
3232		found = 0;
3233		if (ha->num_vhosts) {
3234			unsigned long flags;
3235
3236			spin_lock_irqsave(&ha->vport_slock, flags);
3237			list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
3238				if (new_fcport->d_id.b24 == vp->d_id.b24) {
3239					found = 1;
3240					break;
3241				}
3242			}
3243			spin_unlock_irqrestore(&ha->vport_slock, flags);
3244
3245			if (found)
3246				continue;
3247		}
3248
3249		/* Bypass if same domain and area of adapter. */
3250		if (((new_fcport->d_id.b24 & 0xffff00) ==
3251		    (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
3252			ISP_CFG_FL)
3253			    continue;
3254
3255		/* Bypass reserved domain fields. */
3256		if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
3257			continue;
3258
3259		/* Bypass ports whose FCP-4 type is not FCP_SCSI */
3260		if (ql2xgffidenable &&
3261		    (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
3262		    new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
3263			continue;
3264
3265		/* Locate matching device in database. */
3266		found = 0;
3267		list_for_each_entry(fcport, &vha->vp_fcports, list) {
3268			if (memcmp(new_fcport->port_name, fcport->port_name,
3269			    WWN_SIZE))
3270				continue;
3271
3272			found++;
3273
3274			/* Update port state. */
3275			memcpy(fcport->fabric_port_name,
3276			    new_fcport->fabric_port_name, WWN_SIZE);
3277			fcport->fp_speed = new_fcport->fp_speed;
3278
3279			/*
3280			 * If address the same and state FCS_ONLINE, nothing
3281			 * changed.
3282			 */
3283			if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
3284			    atomic_read(&fcport->state) == FCS_ONLINE) {
3285				break;
3286			}
3287
3288			/*
3289			 * If device was not a fabric device before.
3290			 */
3291			if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3292				fcport->d_id.b24 = new_fcport->d_id.b24;
3293				fcport->loop_id = FC_NO_LOOP_ID;
3294				fcport->flags |= (FCF_FABRIC_DEVICE |
3295				    FCF_LOGIN_NEEDED);
3296				break;
3297			}
3298
3299			/*
3300			 * Port ID changed or device was marked to be updated;
3301			 * Log it out if still logged in and mark it for
3302			 * relogin later.
3303			 */
3304			fcport->d_id.b24 = new_fcport->d_id.b24;
3305			fcport->flags |= FCF_LOGIN_NEEDED;
3306			if (fcport->loop_id != FC_NO_LOOP_ID &&
3307			    (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
3308			    fcport->port_type != FCT_INITIATOR &&
3309			    fcport->port_type != FCT_BROADCAST) {
3310				ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3311				    fcport->d_id.b.domain, fcport->d_id.b.area,
3312				    fcport->d_id.b.al_pa);
3313				fcport->loop_id = FC_NO_LOOP_ID;
3314			}
3315
3316			break;
3317		}
3318
3319		if (found)
3320			continue;
3321		/* If device was not in our fcports list, then add it. */
3322		list_add_tail(&new_fcport->list, new_fcports);
3323
3324		/* Allocate a new replacement fcport. */
3325		nxt_d_id.b24 = new_fcport->d_id.b24;
3326		new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3327		if (new_fcport == NULL) {
3328			kfree(swl);
3329			return (QLA_MEMORY_ALLOC_FAILED);
3330		}
3331		new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
3332		new_fcport->d_id.b24 = nxt_d_id.b24;
3333	}
3334
3335	kfree(swl);
3336	kfree(new_fcport);
3337
3338	return (rval);
3339}
3340
3341/*
3342 * qla2x00_find_new_loop_id
3343 *	Scan through our port list and find a new usable loop ID.
3344 *
3345 * Input:
3346 *	ha:	adapter state pointer.
3347 *	dev:	port structure pointer.
3348 *
3349 * Returns:
3350 *	qla2x00 local function return status code.
3351 *
3352 * Context:
3353 *	Kernel context.
3354 */
3355static int
3356qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3357{
3358	int	rval;
3359	int	found;
3360	fc_port_t *fcport;
3361	uint16_t first_loop_id;
3362	struct qla_hw_data *ha = vha->hw;
3363	struct scsi_qla_host *vp;
3364	struct scsi_qla_host *tvp;
3365	unsigned long flags = 0;
3366
3367	rval = QLA_SUCCESS;
3368
3369	/* Save starting loop ID. */
3370	first_loop_id = dev->loop_id;
3371
3372	for (;;) {
3373		/* Skip loop ID if already used by adapter. */
3374		if (dev->loop_id == vha->loop_id)
3375			dev->loop_id++;
3376
3377		/* Skip reserved loop IDs. */
3378		while (qla2x00_is_reserved_id(vha, dev->loop_id))
3379			dev->loop_id++;
3380
3381		/* Reset loop ID if passed the end. */
3382		if (dev->loop_id > ha->max_loop_id) {
3383			/* first loop ID. */
3384			dev->loop_id = ha->min_external_loopid;
3385		}
3386
3387		/* Check for loop ID being already in use. */
3388		found = 0;
3389		fcport = NULL;
3390
3391		spin_lock_irqsave(&ha->vport_slock, flags);
3392		list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
3393			list_for_each_entry(fcport, &vp->vp_fcports, list) {
3394				if (fcport->loop_id == dev->loop_id &&
3395								fcport != dev) {
3396					/* ID possibly in use */
3397					found++;
3398					break;
3399				}
3400			}
3401			if (found)
3402				break;
3403		}
3404		spin_unlock_irqrestore(&ha->vport_slock, flags);
3405
3406		/* If not in use then it is free to use. */
3407		if (!found) {
3408			break;
3409		}
3410
3411		/* ID in use. Try next value. */
3412		dev->loop_id++;
3413
3414		/* If wrap around. No free ID to use. */
3415		if (dev->loop_id == first_loop_id) {
3416			dev->loop_id = FC_NO_LOOP_ID;
3417			rval = QLA_FUNCTION_FAILED;
3418			break;
3419		}
3420	}
3421
3422	return (rval);
3423}
3424
3425/*
3426 * qla2x00_device_resync
3427 *	Marks devices in the database that needs resynchronization.
3428 *
3429 * Input:
3430 *	ha = adapter block pointer.
3431 *
3432 * Context:
3433 *	Kernel context.
3434 */
3435static int
3436qla2x00_device_resync(scsi_qla_host_t *vha)
3437{
3438	int	rval;
3439	uint32_t mask;
3440	fc_port_t *fcport;
3441	uint32_t rscn_entry;
3442	uint8_t rscn_out_iter;
3443	uint8_t format;
3444	port_id_t d_id = {};
3445
3446	rval = QLA_RSCNS_HANDLED;
3447
3448	while (vha->rscn_out_ptr != vha->rscn_in_ptr ||
3449	    vha->flags.rscn_queue_overflow) {
3450
3451		rscn_entry = vha->rscn_queue[vha->rscn_out_ptr];
3452		format = MSB(MSW(rscn_entry));
3453		d_id.b.domain = LSB(MSW(rscn_entry));
3454		d_id.b.area = MSB(LSW(rscn_entry));
3455		d_id.b.al_pa = LSB(LSW(rscn_entry));
3456
3457		DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = "
3458		    "[%02x/%02x%02x%02x].\n",
3459		    vha->host_no, vha->rscn_out_ptr, format, d_id.b.domain,
3460		    d_id.b.area, d_id.b.al_pa));
3461
3462		vha->rscn_out_ptr++;
3463		if (vha->rscn_out_ptr == MAX_RSCN_COUNT)
3464			vha->rscn_out_ptr = 0;
3465
3466		/* Skip duplicate entries. */
3467		for (rscn_out_iter = vha->rscn_out_ptr;
3468		    !vha->flags.rscn_queue_overflow &&
3469		    rscn_out_iter != vha->rscn_in_ptr;
3470		    rscn_out_iter = (rscn_out_iter ==
3471			(MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) {
3472
3473			if (rscn_entry != vha->rscn_queue[rscn_out_iter])
3474				break;
3475
3476			DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue "
3477			    "entry found at [%d].\n", vha->host_no,
3478			    rscn_out_iter));
3479
3480			vha->rscn_out_ptr = rscn_out_iter;
3481		}
3482
3483		/* Queue overflow, set switch default case. */
3484		if (vha->flags.rscn_queue_overflow) {
3485			DEBUG(printk("scsi(%ld): device_resync: rscn "
3486			    "overflow.\n", vha->host_no));
3487
3488			format = 3;
3489			vha->flags.rscn_queue_overflow = 0;
3490		}
3491
3492		switch (format) {
3493		case 0:
3494			mask = 0xffffff;
3495			break;
3496		case 1:
3497			mask = 0xffff00;
3498			break;
3499		case 2:
3500			mask = 0xff0000;
3501			break;
3502		default:
3503			mask = 0x0;
3504			d_id.b24 = 0;
3505			vha->rscn_out_ptr = vha->rscn_in_ptr;
3506			break;
3507		}
3508
3509		rval = QLA_SUCCESS;
3510
3511		list_for_each_entry(fcport, &vha->vp_fcports, list) {
3512			if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
3513			    (fcport->d_id.b24 & mask) != d_id.b24 ||
3514			    fcport->port_type == FCT_BROADCAST)
3515				continue;
3516
3517			if (atomic_read(&fcport->state) == FCS_ONLINE) {
3518				if (format != 3 ||
3519				    fcport->port_type != FCT_INITIATOR) {
3520					qla2x00_mark_device_lost(vha, fcport,
3521					    0, 0);
3522				}
3523			}
3524		}
3525	}
3526	return (rval);
3527}
3528
3529/*
3530 * qla2x00_fabric_dev_login
3531 *	Login fabric target device and update FC port database.
3532 *
3533 * Input:
3534 *	ha:		adapter state pointer.
3535 *	fcport:		port structure list pointer.
3536 *	next_loopid:	contains value of a new loop ID that can be used
3537 *			by the next login attempt.
3538 *
3539 * Returns:
3540 *	qla2x00 local function return status code.
3541 *
3542 * Context:
3543 *	Kernel context.
3544 */
3545static int
3546qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3547    uint16_t *next_loopid)
3548{
3549	int	rval;
3550	int	retry;
3551	uint8_t opts;
3552	struct qla_hw_data *ha = vha->hw;
3553
3554	rval = QLA_SUCCESS;
3555	retry = 0;
3556
3557	if (IS_ALOGIO_CAPABLE(ha)) {
3558		if (fcport->flags & FCF_ASYNC_SENT)
3559			return rval;
3560		fcport->flags |= FCF_ASYNC_SENT;
3561		rval = qla2x00_post_async_login_work(vha, fcport, NULL);
3562		if (!rval)
3563			return rval;
3564	}
3565
3566	fcport->flags &= ~FCF_ASYNC_SENT;
3567	rval = qla2x00_fabric_login(vha, fcport, next_loopid);
3568	if (rval == QLA_SUCCESS) {
3569		/* Send an ADISC to FCP2 devices.*/
3570		opts = 0;
3571		if (fcport->flags & FCF_FCP2_DEVICE)
3572			opts |= BIT_1;
3573		rval = qla2x00_get_port_database(vha, fcport, opts);
3574		if (rval != QLA_SUCCESS) {
3575			ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3576			    fcport->d_id.b.domain, fcport->d_id.b.area,
3577			    fcport->d_id.b.al_pa);
3578			qla2x00_mark_device_lost(vha, fcport, 1, 0);
3579		} else {
3580			qla2x00_update_fcport(vha, fcport);
3581		}
3582	}
3583
3584	return (rval);
3585}
3586
3587/*
3588 * qla2x00_fabric_login
3589 *	Issue fabric login command.
3590 *
3591 * Input:
3592 *	ha = adapter block pointer.
3593 *	device = pointer to FC device type structure.
3594 *
3595 * Returns:
3596 *      0 - Login successfully
3597 *      1 - Login failed
3598 *      2 - Initiator device
3599 *      3 - Fatal error
3600 */
3601int
3602qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3603    uint16_t *next_loopid)
3604{
3605	int	rval;
3606	int	retry;
3607	uint16_t tmp_loopid;
3608	uint16_t mb[MAILBOX_REGISTER_COUNT];
3609	struct qla_hw_data *ha = vha->hw;
3610
3611	retry = 0;
3612	tmp_loopid = 0;
3613
3614	for (;;) {
3615		DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x "
3616 		    "for port %02x%02x%02x.\n",
3617		    vha->host_no, fcport->loop_id, fcport->d_id.b.domain,
3618		    fcport->d_id.b.area, fcport->d_id.b.al_pa));
3619
3620		/* Login fcport on switch. */
3621		ha->isp_ops->fabric_login(vha, fcport->loop_id,
3622		    fcport->d_id.b.domain, fcport->d_id.b.area,
3623		    fcport->d_id.b.al_pa, mb, BIT_0);
3624		if (mb[0] == MBS_PORT_ID_USED) {
3625			/*
3626			 * Device has another loop ID.  The firmware team
3627			 * recommends the driver perform an implicit login with
3628			 * the specified ID again. The ID we just used is save
3629			 * here so we return with an ID that can be tried by
3630			 * the next login.
3631			 */
3632			retry++;
3633			tmp_loopid = fcport->loop_id;
3634			fcport->loop_id = mb[1];
3635
3636			DEBUG(printk("Fabric Login: port in use - next "
3637 			    "loop id=0x%04x, port Id=%02x%02x%02x.\n",
3638			    fcport->loop_id, fcport->d_id.b.domain,
3639			    fcport->d_id.b.area, fcport->d_id.b.al_pa));
3640
3641		} else if (mb[0] == MBS_COMMAND_COMPLETE) {
3642			/*
3643			 * Login succeeded.
3644			 */
3645			if (retry) {
3646				/* A retry occurred before. */
3647				*next_loopid = tmp_loopid;
3648			} else {
3649				/*
3650				 * No retry occurred before. Just increment the
3651				 * ID value for next login.
3652				 */
3653				*next_loopid = (fcport->loop_id + 1);
3654			}
3655
3656			if (mb[1] & BIT_0) {
3657				fcport->port_type = FCT_INITIATOR;
3658			} else {
3659				fcport->port_type = FCT_TARGET;
3660				if (mb[1] & BIT_1) {
3661					fcport->flags |= FCF_FCP2_DEVICE;
3662				}
3663			}
3664
3665			if (mb[10] & BIT_0)
3666				fcport->supported_classes |= FC_COS_CLASS2;
3667			if (mb[10] & BIT_1)
3668				fcport->supported_classes |= FC_COS_CLASS3;
3669
3670			rval = QLA_SUCCESS;
3671			break;
3672		} else if (mb[0] == MBS_LOOP_ID_USED) {
3673			/*
3674			 * Loop ID already used, try next loop ID.
3675			 */
3676			fcport->loop_id++;
3677			rval = qla2x00_find_new_loop_id(vha, fcport);
3678			if (rval != QLA_SUCCESS) {
3679				/* Ran out of loop IDs to use */
3680				break;
3681			}
3682		} else if (mb[0] == MBS_COMMAND_ERROR) {
3683			/*
3684			 * Firmware possibly timed out during login. If NO
3685			 * retries are left to do then the device is declared
3686			 * dead.
3687			 */
3688			*next_loopid = fcport->loop_id;
3689			ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3690			    fcport->d_id.b.domain, fcport->d_id.b.area,
3691			    fcport->d_id.b.al_pa);
3692			qla2x00_mark_device_lost(vha, fcport, 1, 0);
3693
3694			rval = 1;
3695			break;
3696		} else {
3697			/*
3698			 * unrecoverable / not handled error
3699			 */
3700			DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x "
3701 			    "loop_id=%x jiffies=%lx.\n",
3702			    __func__, vha->host_no, mb[0],
3703			    fcport->d_id.b.domain, fcport->d_id.b.area,
3704			    fcport->d_id.b.al_pa, fcport->loop_id, jiffies));
3705
3706			*next_loopid = fcport->loop_id;
3707			ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3708			    fcport->d_id.b.domain, fcport->d_id.b.area,
3709			    fcport->d_id.b.al_pa);
3710			fcport->loop_id = FC_NO_LOOP_ID;
3711			fcport->login_retry = 0;
3712
3713			rval = 3;
3714			break;
3715		}
3716	}
3717
3718	return (rval);
3719}
3720
3721/*
3722 * qla2x00_local_device_login
3723 *	Issue local device login command.
3724 *
3725 * Input:
3726 *	ha = adapter block pointer.
3727 *	loop_id = loop id of device to login to.
3728 *
3729 * Returns (Where's the #define!!!!):
3730 *      0 - Login successfully
3731 *      1 - Login failed
3732 *      3 - Fatal error
3733 */
3734int
3735qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
3736{
3737	int		rval;
3738	uint16_t	mb[MAILBOX_REGISTER_COUNT];
3739
3740	memset(mb, 0, sizeof(mb));
3741	rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
3742	if (rval == QLA_SUCCESS) {
3743		/* Interrogate mailbox registers for any errors */
3744		if (mb[0] == MBS_COMMAND_ERROR)
3745			rval = 1;
3746		else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
3747			/* device not in PCB table */
3748			rval = 3;
3749	}
3750
3751	return (rval);
3752}
3753
3754/*
3755 *  qla2x00_loop_resync
3756 *      Resync with fibre channel devices.
3757 *
3758 * Input:
3759 *      ha = adapter block pointer.
3760 *
3761 * Returns:
3762 *      0 = success
3763 */
3764int
3765qla2x00_loop_resync(scsi_qla_host_t *vha)
3766{
3767	int rval = QLA_SUCCESS;
3768	uint32_t wait_time;
3769	struct req_que *req;
3770	struct rsp_que *rsp;
3771
3772	if (vha->hw->flags.cpu_affinity_enabled)
3773		req = vha->hw->req_q_map[0];
3774	else
3775		req = vha->req;
3776	rsp = req->rsp;
3777
3778	atomic_set(&vha->loop_state, LOOP_UPDATE);
3779	clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3780	if (vha->flags.online) {
3781		if (!(rval = qla2x00_fw_ready(vha))) {
3782			/* Wait at most MAX_TARGET RSCNs for a stable link. */
3783			wait_time = 256;
3784			do {
3785				atomic_set(&vha->loop_state, LOOP_UPDATE);
3786
3787				/* Issue a marker after FW becomes ready. */
3788				qla2x00_marker(vha, req, rsp, 0, 0,
3789					MK_SYNC_ALL);
3790				vha->marker_needed = 0;
3791
3792				/* Remap devices on Loop. */
3793				clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3794
3795				qla2x00_configure_loop(vha);
3796				wait_time--;
3797			} while (!atomic_read(&vha->loop_down_timer) &&
3798				!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3799				&& wait_time && (test_bit(LOOP_RESYNC_NEEDED,
3800				&vha->dpc_flags)));
3801		}
3802	}
3803
3804	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3805		return (QLA_FUNCTION_FAILED);
3806
3807	if (rval)
3808		DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
3809
3810	return (rval);
3811}
3812
3813void
3814qla2x00_update_fcports(scsi_qla_host_t *base_vha)
3815{
3816	fc_port_t *fcport;
3817	struct scsi_qla_host *vha;
3818	struct qla_hw_data *ha = base_vha->hw;
3819	unsigned long flags;
3820
3821	spin_lock_irqsave(&ha->vport_slock, flags);
3822	/* Go with deferred removal of rport references. */
3823	list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
3824		atomic_inc(&vha->vref_count);
3825		list_for_each_entry(fcport, &vha->vp_fcports, list) {
3826			if (fcport && fcport->drport &&
3827			    atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
3828				spin_unlock_irqrestore(&ha->vport_slock, flags);
3829
3830				qla2x00_rport_del(fcport);
3831
3832				spin_lock_irqsave(&ha->vport_slock, flags);
3833			}
3834		}
3835		atomic_dec(&vha->vref_count);
3836	}
3837	spin_unlock_irqrestore(&ha->vport_slock, flags);
3838}
3839
3840void
3841qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3842{
3843	struct qla_hw_data *ha = vha->hw;
3844	struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
3845	unsigned long flags;
3846
3847	vha->flags.online = 0;
3848	ha->flags.chip_reset_done = 0;
3849	clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3850	ha->qla_stats.total_isp_aborts++;
3851
3852	qla_printk(KERN_INFO, ha,
3853	    "Performing ISP error recovery - ha= %p.\n", ha);
3854
3855	/* Chip reset does not apply to 82XX */
3856	if (!IS_QLA82XX(ha))
3857		ha->isp_ops->reset_chip(vha);
3858
3859	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
3860	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
3861		atomic_set(&vha->loop_state, LOOP_DOWN);
3862		qla2x00_mark_all_devices_lost(vha, 0);
3863
3864		spin_lock_irqsave(&ha->vport_slock, flags);
3865		list_for_each_entry(vp, &base_vha->hw->vp_list, list) {
3866			atomic_inc(&vp->vref_count);
3867			spin_unlock_irqrestore(&ha->vport_slock, flags);
3868
3869			qla2x00_mark_all_devices_lost(vp, 0);
3870
3871			spin_lock_irqsave(&ha->vport_slock, flags);
3872			atomic_dec(&vp->vref_count);
3873		}
3874		spin_unlock_irqrestore(&ha->vport_slock, flags);
3875	} else {
3876		if (!atomic_read(&vha->loop_down_timer))
3877			atomic_set(&vha->loop_down_timer,
3878			    LOOP_DOWN_TIME);
3879	}
3880
3881	/* Make sure for ISP 82XX IO DMA is complete */
3882	if (IS_QLA82XX(ha)) {
3883		if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
3884			WAIT_HOST) == QLA_SUCCESS) {
3885			DEBUG2(qla_printk(KERN_INFO, ha,
3886			"Done wait for pending commands\n"));
3887		}
3888	}
3889
3890	/* Requeue all commands in outstanding command list. */
3891	qla2x00_abort_all_cmds(vha, DID_RESET << 16);
3892}
3893
3894/*
3895*  qla2x00_abort_isp
3896*      Resets ISP and aborts all outstanding commands.
3897*
3898* Input:
3899*      ha           = adapter block pointer.
3900*
3901* Returns:
3902*      0 = success
3903*/
3904int
3905qla2x00_abort_isp(scsi_qla_host_t *vha)
3906{
3907	int rval;
3908	uint8_t        status = 0;
3909	struct qla_hw_data *ha = vha->hw;
3910	struct scsi_qla_host *vp;
3911	struct req_que *req = ha->req_q_map[0];
3912	unsigned long flags;
3913
3914	if (vha->flags.online) {
3915		qla2x00_abort_isp_cleanup(vha);
3916
3917		if (unlikely(pci_channel_offline(ha->pdev) &&
3918		    ha->flags.pci_channel_io_perm_failure)) {
3919			clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3920			status = 0;
3921			return status;
3922		}
3923
3924		ha->isp_ops->get_flash_version(vha, req->ring);
3925
3926		ha->isp_ops->nvram_config(vha);
3927
3928		if (!qla2x00_restart_isp(vha)) {
3929			clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
3930
3931			if (!atomic_read(&vha->loop_down_timer)) {
3932				/*
3933				 * Issue marker command only when we are going
3934				 * to start the I/O .
3935				 */
3936				vha->marker_needed = 1;
3937			}
3938
3939			vha->flags.online = 1;
3940
3941			ha->isp_ops->enable_intrs(ha);
3942
3943			ha->isp_abort_cnt = 0;
3944			clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3945
3946			if (IS_QLA81XX(ha))
3947				qla2x00_get_fw_version(vha,
3948				    &ha->fw_major_version,
3949				    &ha->fw_minor_version,
3950				    &ha->fw_subminor_version,
3951				    &ha->fw_attributes, &ha->fw_memory_size,
3952				    ha->mpi_version, &ha->mpi_capabilities,
3953				    ha->phy_version);
3954
3955			if (ha->fce) {
3956				ha->flags.fce_enabled = 1;
3957				memset(ha->fce, 0,
3958				    fce_calc_size(ha->fce_bufs));
3959				rval = qla2x00_enable_fce_trace(vha,
3960				    ha->fce_dma, ha->fce_bufs, ha->fce_mb,
3961				    &ha->fce_bufs);
3962				if (rval) {
3963					qla_printk(KERN_WARNING, ha,
3964					    "Unable to reinitialize FCE "
3965					    "(%d).\n", rval);
3966					ha->flags.fce_enabled = 0;
3967				}
3968			}
3969
3970			if (ha->eft) {
3971				memset(ha->eft, 0, EFT_SIZE);
3972				rval = qla2x00_enable_eft_trace(vha,
3973				    ha->eft_dma, EFT_NUM_BUFFERS);
3974				if (rval) {
3975					qla_printk(KERN_WARNING, ha,
3976					    "Unable to reinitialize EFT "
3977					    "(%d).\n", rval);
3978				}
3979			}
3980		} else {	/* failed the ISP abort */
3981			vha->flags.online = 1;
3982			if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
3983				if (ha->isp_abort_cnt == 0) {
3984 					qla_printk(KERN_WARNING, ha,
3985					    "ISP error recovery failed - "
3986					    "board disabled\n");
3987					/*
3988					 * The next call disables the board
3989					 * completely.
3990					 */
3991					ha->isp_ops->reset_adapter(vha);
3992					vha->flags.online = 0;
3993					clear_bit(ISP_ABORT_RETRY,
3994					    &vha->dpc_flags);
3995					status = 0;
3996				} else { /* schedule another ISP abort */
3997					ha->isp_abort_cnt--;
3998					DEBUG(printk("qla%ld: ISP abort - "
3999					    "retry remaining %d\n",
4000					    vha->host_no, ha->isp_abort_cnt));
4001					status = 1;
4002				}
4003			} else {
4004				ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
4005				DEBUG(printk("qla2x00(%ld): ISP error recovery "
4006				    "- retrying (%d) more times\n",
4007				    vha->host_no, ha->isp_abort_cnt));
4008				set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
4009				status = 1;
4010			}
4011		}
4012
4013	}
4014
4015	if (!status) {
4016		DEBUG(printk(KERN_INFO
4017				"qla2x00_abort_isp(%ld): succeeded.\n",
4018				vha->host_no));
4019
4020		spin_lock_irqsave(&ha->vport_slock, flags);
4021		list_for_each_entry(vp, &ha->vp_list, list) {
4022			if (vp->vp_idx) {
4023				atomic_inc(&vp->vref_count);
4024				spin_unlock_irqrestore(&ha->vport_slock, flags);
4025
4026				qla2x00_vp_abort_isp(vp);
4027
4028				spin_lock_irqsave(&ha->vport_slock, flags);
4029				atomic_dec(&vp->vref_count);
4030			}
4031		}
4032		spin_unlock_irqrestore(&ha->vport_slock, flags);
4033
4034	} else {
4035		qla_printk(KERN_INFO, ha,
4036			"qla2x00_abort_isp: **** FAILED ****\n");
4037	}
4038
4039	return(status);
4040}
4041
4042/*
4043*  qla2x00_restart_isp
4044*      restarts the ISP after a reset
4045*
4046* Input:
4047*      ha = adapter block pointer.
4048*
4049* Returns:
4050*      0 = success
4051*/
4052static int
4053qla2x00_restart_isp(scsi_qla_host_t *vha)
4054{
4055	int status = 0;
4056	uint32_t wait_time;
4057	struct qla_hw_data *ha = vha->hw;
4058	struct req_que *req = ha->req_q_map[0];
4059	struct rsp_que *rsp = ha->rsp_q_map[0];
4060
4061	/* If firmware needs to be loaded */
4062	if (qla2x00_isp_firmware(vha)) {
4063		vha->flags.online = 0;
4064		status = ha->isp_ops->chip_diag(vha);
4065		if (!status)
4066			status = qla2x00_setup_chip(vha);
4067	}
4068
4069	if (!status && !(status = qla2x00_init_rings(vha))) {
4070		clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4071		ha->flags.chip_reset_done = 1;
4072		/* Initialize the queues in use */
4073		qla25xx_init_queues(ha);
4074
4075		status = qla2x00_fw_ready(vha);
4076		if (!status) {
4077			DEBUG(printk("%s(): Start configure loop, "
4078			    "status = %d\n", __func__, status));
4079
4080			/* Issue a marker after FW becomes ready. */
4081			qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
4082
4083			vha->flags.online = 1;
4084			/* Wait at most MAX_TARGET RSCNs for a stable link. */
4085			wait_time = 256;
4086			do {
4087				clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4088				qla2x00_configure_loop(vha);
4089				wait_time--;
4090			} while (!atomic_read(&vha->loop_down_timer) &&
4091				!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
4092				&& wait_time && (test_bit(LOOP_RESYNC_NEEDED,
4093				&vha->dpc_flags)));
4094		}
4095
4096		/* if no cable then assume it's good */
4097		if ((vha->device_flags & DFLG_NO_CABLE))
4098			status = 0;
4099
4100		DEBUG(printk("%s(): Configure loop done, status = 0x%x\n",
4101				__func__,
4102				status));
4103	}
4104	return (status);
4105}
4106
4107static int
4108qla25xx_init_queues(struct qla_hw_data *ha)
4109{
4110	struct rsp_que *rsp = NULL;
4111	struct req_que *req = NULL;
4112	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4113	int ret = -1;
4114	int i;
4115
4116	for (i = 1; i < ha->max_rsp_queues; i++) {
4117		rsp = ha->rsp_q_map[i];
4118		if (rsp) {
4119			rsp->options &= ~BIT_0;
4120			ret = qla25xx_init_rsp_que(base_vha, rsp);
4121			if (ret != QLA_SUCCESS)
4122				DEBUG2_17(printk(KERN_WARNING
4123					"%s Rsp que:%d init failed\n", __func__,
4124						rsp->id));
4125			else
4126				DEBUG2_17(printk(KERN_INFO
4127					"%s Rsp que:%d inited\n", __func__,
4128						rsp->id));
4129		}
4130	}
4131	for (i = 1; i < ha->max_req_queues; i++) {
4132		req = ha->req_q_map[i];
4133		if (req) {
4134		/* Clear outstanding commands array. */
4135			req->options &= ~BIT_0;
4136			ret = qla25xx_init_req_que(base_vha, req);
4137			if (ret != QLA_SUCCESS)
4138				DEBUG2_17(printk(KERN_WARNING
4139					"%s Req que:%d init failed\n", __func__,
4140						req->id));
4141			else
4142				DEBUG2_17(printk(KERN_WARNING
4143					"%s Req que:%d inited\n", __func__,
4144						req->id));
4145		}
4146	}
4147	return ret;
4148}
4149
4150/*
4151* qla2x00_reset_adapter
4152*      Reset adapter.
4153*
4154* Input:
4155*      ha = adapter block pointer.
4156*/
4157void
4158qla2x00_reset_adapter(scsi_qla_host_t *vha)
4159{
4160	unsigned long flags = 0;
4161	struct qla_hw_data *ha = vha->hw;
4162	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
4163
4164	vha->flags.online = 0;
4165	ha->isp_ops->disable_intrs(ha);
4166
4167	spin_lock_irqsave(&ha->hardware_lock, flags);
4168	WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
4169	RD_REG_WORD(&reg->hccr);			/* PCI Posting. */
4170	WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
4171	RD_REG_WORD(&reg->hccr);			/* PCI Posting. */
4172	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4173}
4174
4175void
4176qla24xx_reset_adapter(scsi_qla_host_t *vha)
4177{
4178	unsigned long flags = 0;
4179	struct qla_hw_data *ha = vha->hw;
4180	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
4181
4182	if (IS_QLA82XX(ha))
4183		return;
4184
4185	vha->flags.online = 0;
4186	ha->isp_ops->disable_intrs(ha);
4187
4188	spin_lock_irqsave(&ha->hardware_lock, flags);
4189	WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
4190	RD_REG_DWORD(&reg->hccr);
4191	WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
4192	RD_REG_DWORD(&reg->hccr);
4193	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4194
4195	if (IS_NOPOLLING_TYPE(ha))
4196		ha->isp_ops->enable_intrs(ha);
4197}
4198
4199/* On sparc systems, obtain port and node WWN from firmware
4200 * properties.
4201 */
4202static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
4203	struct nvram_24xx *nv)
4204{
4205#ifdef CONFIG_SPARC
4206	struct qla_hw_data *ha = vha->hw;
4207	struct pci_dev *pdev = ha->pdev;
4208	struct device_node *dp = pci_device_to_OF_node(pdev);
4209	const u8 *val;
4210	int len;
4211
4212	val = of_get_property(dp, "port-wwn", &len);
4213	if (val && len >= WWN_SIZE)
4214		memcpy(nv->port_name, val, WWN_SIZE);
4215
4216	val = of_get_property(dp, "node-wwn", &len);
4217	if (val && len >= WWN_SIZE)
4218		memcpy(nv->node_name, val, WWN_SIZE);
4219#endif
4220}
4221
4222int
4223qla24xx_nvram_config(scsi_qla_host_t *vha)
4224{
4225	int   rval;
4226	struct init_cb_24xx *icb;
4227	struct nvram_24xx *nv;
4228	uint32_t *dptr;
4229	uint8_t  *dptr1, *dptr2;
4230	uint32_t chksum;
4231	uint16_t cnt;
4232	struct qla_hw_data *ha = vha->hw;
4233
4234	rval = QLA_SUCCESS;
4235	icb = (struct init_cb_24xx *)ha->init_cb;
4236	nv = ha->nvram;
4237
4238	/* Determine NVRAM starting address. */
4239	if (ha->flags.port0) {
4240		ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
4241		ha->vpd_base = FA_NVRAM_VPD0_ADDR;
4242	} else {
4243		ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
4244		ha->vpd_base = FA_NVRAM_VPD1_ADDR;
4245	}
4246	ha->nvram_size = sizeof(struct nvram_24xx);
4247	ha->vpd_size = FA_NVRAM_VPD_SIZE;
4248	if (IS_QLA82XX(ha))
4249		ha->vpd_size = FA_VPD_SIZE_82XX;
4250
4251	/* Get VPD data into cache */
4252	ha->vpd = ha->nvram + VPD_OFFSET;
4253	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
4254	    ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
4255
4256	/* Get NVRAM data into cache and calculate checksum. */
4257	dptr = (uint32_t *)nv;
4258	ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
4259	    ha->nvram_size);
4260	for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
4261		chksum += le32_to_cpu(*dptr++);
4262
4263	DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
4264	DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
4265
4266	/* Bad NVRAM data, set defaults parameters. */
4267	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
4268	    || nv->id[3] != ' ' ||
4269	    nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
4270		/* Reset NVRAM data. */
4271		qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
4272		    "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
4273		    le16_to_cpu(nv->nvram_version));
4274		qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
4275		    "invalid -- WWPN) defaults.\n");
4276
4277		/*
4278		 * Set default initialization control block.
4279		 */
4280		memset(nv, 0, ha->nvram_size);
4281		nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
4282		nv->version = __constant_cpu_to_le16(ICB_VERSION);
4283		nv->frame_payload_size = __constant_cpu_to_le16(2048);
4284		nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4285		nv->exchange_count = __constant_cpu_to_le16(0);
4286		nv->hard_address = __constant_cpu_to_le16(124);
4287		nv->port_name[0] = 0x21;
4288		nv->port_name[1] = 0x00 + ha->port_no;
4289		nv->port_name[2] = 0x00;
4290		nv->port_name[3] = 0xe0;
4291		nv->port_name[4] = 0x8b;
4292		nv->port_name[5] = 0x1c;
4293		nv->port_name[6] = 0x55;
4294		nv->port_name[7] = 0x86;
4295		nv->node_name[0] = 0x20;
4296		nv->node_name[1] = 0x00;
4297		nv->node_name[2] = 0x00;
4298		nv->node_name[3] = 0xe0;
4299		nv->node_name[4] = 0x8b;
4300		nv->node_name[5] = 0x1c;
4301		nv->node_name[6] = 0x55;
4302		nv->node_name[7] = 0x86;
4303		qla24xx_nvram_wwn_from_ofw(vha, nv);
4304		nv->login_retry_count = __constant_cpu_to_le16(8);
4305		nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
4306		nv->login_timeout = __constant_cpu_to_le16(0);
4307		nv->firmware_options_1 =
4308		    __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
4309		nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
4310		nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
4311		nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
4312		nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
4313		nv->efi_parameters = __constant_cpu_to_le32(0);
4314		nv->reset_delay = 5;
4315		nv->max_luns_per_target = __constant_cpu_to_le16(128);
4316		nv->port_down_retry_count = __constant_cpu_to_le16(30);
4317		nv->link_down_timeout = __constant_cpu_to_le16(30);
4318
4319		rval = 1;
4320	}
4321
4322	/* Reset Initialization control block */
4323	memset(icb, 0, ha->init_cb_size);
4324
4325	/* Copy 1st segment. */
4326	dptr1 = (uint8_t *)icb;
4327	dptr2 = (uint8_t *)&nv->version;
4328	cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
4329	while (cnt--)
4330		*dptr1++ = *dptr2++;
4331
4332	icb->login_retry_count = nv->login_retry_count;
4333	icb->link_down_on_nos = nv->link_down_on_nos;
4334
4335	/* Copy 2nd segment. */
4336	dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
4337	dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
4338	cnt = (uint8_t *)&icb->reserved_3 -
4339	    (uint8_t *)&icb->interrupt_delay_timer;
4340	while (cnt--)
4341		*dptr1++ = *dptr2++;
4342
4343	/*
4344	 * Setup driver NVRAM options.
4345	 */
4346	qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
4347	    "QLA2462");
4348
4349	/* Use alternate WWN? */
4350	if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
4351		memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
4352		memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
4353	}
4354
4355	/* Prepare nodename */
4356	if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
4357		/*
4358		 * Firmware will apply the following mask if the nodename was
4359		 * not provided.
4360		 */
4361		memcpy(icb->node_name, icb->port_name, WWN_SIZE);
4362		icb->node_name[0] &= 0xF0;
4363	}
4364
4365	/* Set host adapter parameters. */
4366	ha->flags.disable_risc_code_load = 0;
4367	ha->flags.enable_lip_reset = 0;
4368	ha->flags.enable_lip_full_login =
4369	    le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
4370	ha->flags.enable_target_reset =
4371	    le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
4372	ha->flags.enable_led_scheme = 0;
4373	ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
4374
4375	ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
4376	    (BIT_6 | BIT_5 | BIT_4)) >> 4;
4377
4378	memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
4379	    sizeof(ha->fw_seriallink_options24));
4380
4381	/* save HBA serial number */
4382	ha->serial0 = icb->port_name[5];
4383	ha->serial1 = icb->port_name[6];
4384	ha->serial2 = icb->port_name[7];
4385	memcpy(vha->node_name, icb->node_name, WWN_SIZE);
4386	memcpy(vha->port_name, icb->port_name, WWN_SIZE);
4387
4388	icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4389
4390	ha->retry_count = le16_to_cpu(nv->login_retry_count);
4391
4392	/* Set minimum login_timeout to 4 seconds. */
4393	if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
4394		nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
4395	if (le16_to_cpu(nv->login_timeout) < 4)
4396		nv->login_timeout = __constant_cpu_to_le16(4);
4397	ha->login_timeout = le16_to_cpu(nv->login_timeout);
4398	icb->login_timeout = nv->login_timeout;
4399
4400	/* Set minimum RATOV to 100 tenths of a second. */
4401	ha->r_a_tov = 100;
4402
4403	ha->loop_reset_delay = nv->reset_delay;
4404
4405	/* Link Down Timeout = 0:
4406	 *
4407	 * 	When Port Down timer expires we will start returning
4408	 *	I/O's to OS with "DID_NO_CONNECT".
4409	 *
4410	 * Link Down Timeout != 0:
4411	 *
4412	 *	 The driver waits for the link to come up after link down
4413	 *	 before returning I/Os to OS with "DID_NO_CONNECT".
4414	 */
4415	if (le16_to_cpu(nv->link_down_timeout) == 0) {
4416		ha->loop_down_abort_time =
4417		    (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
4418	} else {
4419		ha->link_down_timeout =	le16_to_cpu(nv->link_down_timeout);
4420		ha->loop_down_abort_time =
4421		    (LOOP_DOWN_TIME - ha->link_down_timeout);
4422	}
4423
4424	/* Need enough time to try and get the port back. */
4425	ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
4426	if (qlport_down_retry)
4427		ha->port_down_retry_count = qlport_down_retry;
4428
4429	/* Set login_retry_count */
4430	ha->login_retry_count  = le16_to_cpu(nv->login_retry_count);
4431	if (ha->port_down_retry_count ==
4432	    le16_to_cpu(nv->port_down_retry_count) &&
4433	    ha->port_down_retry_count > 3)
4434		ha->login_retry_count = ha->port_down_retry_count;
4435	else if (ha->port_down_retry_count > (int)ha->login_retry_count)
4436		ha->login_retry_count = ha->port_down_retry_count;
4437	if (ql2xloginretrycount)
4438		ha->login_retry_count = ql2xloginretrycount;
4439
4440	/* Enable ZIO. */
4441	if (!vha->flags.init_done) {
4442		ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
4443		    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4444		ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
4445		    le16_to_cpu(icb->interrupt_delay_timer): 2;
4446	}
4447	icb->firmware_options_2 &= __constant_cpu_to_le32(
4448	    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
4449	vha->flags.process_response_queue = 0;
4450	if (ha->zio_mode != QLA_ZIO_DISABLED) {
4451		ha->zio_mode = QLA_ZIO_MODE_6;
4452
4453		DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
4454		    "(%d us).\n", vha->host_no, ha->zio_mode,
4455		    ha->zio_timer * 100));
4456		qla_printk(KERN_INFO, ha,
4457		    "ZIO mode %d enabled; timer delay (%d us).\n",
4458		    ha->zio_mode, ha->zio_timer * 100);
4459
4460		icb->firmware_options_2 |= cpu_to_le32(
4461		    (uint32_t)ha->zio_mode);
4462		icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
4463		vha->flags.process_response_queue = 1;
4464	}
4465
4466	if (rval) {
4467		DEBUG2_3(printk(KERN_WARNING
4468		    "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
4469	}
4470	return (rval);
4471}
4472
4473static int
4474qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
4475    uint32_t faddr)
4476{
4477	int	rval = QLA_SUCCESS;
4478	int	segments, fragment;
4479	uint32_t *dcode, dlen;
4480	uint32_t risc_addr;
4481	uint32_t risc_size;
4482	uint32_t i;
4483	struct qla_hw_data *ha = vha->hw;
4484	struct req_que *req = ha->req_q_map[0];
4485
4486	qla_printk(KERN_INFO, ha,
4487	    "FW: Loading from flash (%x)...\n", faddr);
4488
4489	rval = QLA_SUCCESS;
4490
4491	segments = FA_RISC_CODE_SEGMENTS;
4492	dcode = (uint32_t *)req->ring;
4493	*srisc_addr = 0;
4494
4495	/* Validate firmware image by checking version. */
4496	qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
4497	for (i = 0; i < 4; i++)
4498		dcode[i] = be32_to_cpu(dcode[i]);
4499	if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
4500	    dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
4501	    (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
4502		dcode[3] == 0)) {
4503		qla_printk(KERN_WARNING, ha,
4504		    "Unable to verify integrity of flash firmware image!\n");
4505		qla_printk(KERN_WARNING, ha,
4506		    "Firmware data: %08x %08x %08x %08x!\n", dcode[0],
4507		    dcode[1], dcode[2], dcode[3]);
4508
4509		return QLA_FUNCTION_FAILED;
4510	}
4511
4512	while (segments && rval == QLA_SUCCESS) {
4513		/* Read segment's load information. */
4514		qla24xx_read_flash_data(vha, dcode, faddr, 4);
4515
4516		risc_addr = be32_to_cpu(dcode[2]);
4517		*srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
4518		risc_size = be32_to_cpu(dcode[3]);
4519
4520		fragment = 0;
4521		while (risc_size > 0 && rval == QLA_SUCCESS) {
4522			dlen = (uint32_t)(ha->fw_transfer_size >> 2);
4523			if (dlen > risc_size)
4524				dlen = risc_size;
4525
4526			DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
4527			    "addr %x, number of dwords 0x%x, offset 0x%x.\n",
4528			    vha->host_no, risc_addr, dlen, faddr));
4529
4530			qla24xx_read_flash_data(vha, dcode, faddr, dlen);
4531			for (i = 0; i < dlen; i++)
4532				dcode[i] = swab32(dcode[i]);
4533
4534			rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4535			    dlen);
4536			if (rval) {
4537				DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
4538				    "segment %d of firmware\n", vha->host_no,
4539				    fragment));
4540				qla_printk(KERN_WARNING, ha,
4541				    "[ERROR] Failed to load segment %d of "
4542				    "firmware\n", fragment);
4543				break;
4544			}
4545
4546			faddr += dlen;
4547			risc_addr += dlen;
4548			risc_size -= dlen;
4549			fragment++;
4550		}
4551
4552		/* Next segment. */
4553		segments--;
4554	}
4555
4556	return rval;
4557}
4558
4559#define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/"
4560
4561int
4562qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4563{
4564	int	rval;
4565	int	i, fragment;
4566	uint16_t *wcode, *fwcode;
4567	uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
4568	struct fw_blob *blob;
4569	struct qla_hw_data *ha = vha->hw;
4570	struct req_que *req = ha->req_q_map[0];
4571
4572	/* Load firmware blob. */
4573	blob = qla2x00_request_firmware(vha);
4574	if (!blob) {
4575		qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
4576		qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
4577		    "from: " QLA_FW_URL ".\n");
4578		return QLA_FUNCTION_FAILED;
4579	}
4580
4581	rval = QLA_SUCCESS;
4582
4583	wcode = (uint16_t *)req->ring;
4584	*srisc_addr = 0;
4585	fwcode = (uint16_t *)blob->fw->data;
4586	fwclen = 0;
4587
4588	/* Validate firmware image by checking version. */
4589	if (blob->fw->size < 8 * sizeof(uint16_t)) {
4590		qla_printk(KERN_WARNING, ha,
4591		    "Unable to verify integrity of firmware image (%Zd)!\n",
4592		    blob->fw->size);
4593		goto fail_fw_integrity;
4594	}
4595	for (i = 0; i < 4; i++)
4596		wcode[i] = be16_to_cpu(fwcode[i + 4]);
4597	if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
4598	    wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
4599		wcode[2] == 0 && wcode[3] == 0)) {
4600		qla_printk(KERN_WARNING, ha,
4601		    "Unable to verify integrity of firmware image!\n");
4602		qla_printk(KERN_WARNING, ha,
4603		    "Firmware data: %04x %04x %04x %04x!\n", wcode[0],
4604		    wcode[1], wcode[2], wcode[3]);
4605		goto fail_fw_integrity;
4606	}
4607
4608	seg = blob->segs;
4609	while (*seg && rval == QLA_SUCCESS) {
4610		risc_addr = *seg;
4611		*srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
4612		risc_size = be16_to_cpu(fwcode[3]);
4613
4614		/* Validate firmware image size. */
4615		fwclen += risc_size * sizeof(uint16_t);
4616		if (blob->fw->size < fwclen) {
4617			qla_printk(KERN_WARNING, ha,
4618			    "Unable to verify integrity of firmware image "
4619			    "(%Zd)!\n", blob->fw->size);
4620			goto fail_fw_integrity;
4621		}
4622
4623		fragment = 0;
4624		while (risc_size > 0 && rval == QLA_SUCCESS) {
4625			wlen = (uint16_t)(ha->fw_transfer_size >> 1);
4626			if (wlen > risc_size)
4627				wlen = risc_size;
4628
4629			DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
4630			    "addr %x, number of words 0x%x.\n", vha->host_no,
4631			    risc_addr, wlen));
4632
4633			for (i = 0; i < wlen; i++)
4634				wcode[i] = swab16(fwcode[i]);
4635
4636			rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4637			    wlen);
4638			if (rval) {
4639				DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
4640				    "segment %d of firmware\n", vha->host_no,
4641				    fragment));
4642				qla_printk(KERN_WARNING, ha,
4643				    "[ERROR] Failed to load segment %d of "
4644				    "firmware\n", fragment);
4645				break;
4646			}
4647
4648			fwcode += wlen;
4649			risc_addr += wlen;
4650			risc_size -= wlen;
4651			fragment++;
4652		}
4653
4654		/* Next segment. */
4655		seg++;
4656	}
4657	return rval;
4658
4659fail_fw_integrity:
4660	return QLA_FUNCTION_FAILED;
4661}
4662
4663static int
4664qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4665{
4666	int	rval;
4667	int	segments, fragment;
4668	uint32_t *dcode, dlen;
4669	uint32_t risc_addr;
4670	uint32_t risc_size;
4671	uint32_t i;
4672	struct fw_blob *blob;
4673	uint32_t *fwcode, fwclen;
4674	struct qla_hw_data *ha = vha->hw;
4675	struct req_que *req = ha->req_q_map[0];
4676
4677	/* Load firmware blob. */
4678	blob = qla2x00_request_firmware(vha);
4679	if (!blob) {
4680		qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
4681		qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
4682		    "from: " QLA_FW_URL ".\n");
4683
4684		return QLA_FUNCTION_FAILED;
4685	}
4686
4687	qla_printk(KERN_INFO, ha,
4688	    "FW: Loading via request-firmware...\n");
4689
4690	rval = QLA_SUCCESS;
4691
4692	segments = FA_RISC_CODE_SEGMENTS;
4693	dcode = (uint32_t *)req->ring;
4694	*srisc_addr = 0;
4695	fwcode = (uint32_t *)blob->fw->data;
4696	fwclen = 0;
4697
4698	/* Validate firmware image by checking version. */
4699	if (blob->fw->size < 8 * sizeof(uint32_t)) {
4700		qla_printk(KERN_WARNING, ha,
4701		    "Unable to verify integrity of firmware image (%Zd)!\n",
4702		    blob->fw->size);
4703		goto fail_fw_integrity;
4704	}
4705	for (i = 0; i < 4; i++)
4706		dcode[i] = be32_to_cpu(fwcode[i + 4]);
4707	if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
4708	    dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
4709	    (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
4710		dcode[3] == 0)) {
4711		qla_printk(KERN_WARNING, ha,
4712		    "Unable to verify integrity of firmware image!\n");
4713		qla_printk(KERN_WARNING, ha,
4714		    "Firmware data: %08x %08x %08x %08x!\n", dcode[0],
4715		    dcode[1], dcode[2], dcode[3]);
4716		goto fail_fw_integrity;
4717	}
4718
4719	while (segments && rval == QLA_SUCCESS) {
4720		risc_addr = be32_to_cpu(fwcode[2]);
4721		*srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
4722		risc_size = be32_to_cpu(fwcode[3]);
4723
4724		/* Validate firmware image size. */
4725		fwclen += risc_size * sizeof(uint32_t);
4726		if (blob->fw->size < fwclen) {
4727			qla_printk(KERN_WARNING, ha,
4728			    "Unable to verify integrity of firmware image "
4729			    "(%Zd)!\n", blob->fw->size);
4730
4731			goto fail_fw_integrity;
4732		}
4733
4734		fragment = 0;
4735		while (risc_size > 0 && rval == QLA_SUCCESS) {
4736			dlen = (uint32_t)(ha->fw_transfer_size >> 2);
4737			if (dlen > risc_size)
4738				dlen = risc_size;
4739
4740			DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
4741			    "addr %x, number of dwords 0x%x.\n", vha->host_no,
4742			    risc_addr, dlen));
4743
4744			for (i = 0; i < dlen; i++)
4745				dcode[i] = swab32(fwcode[i]);
4746
4747			rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4748			    dlen);
4749			if (rval) {
4750				DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
4751				    "segment %d of firmware\n", vha->host_no,
4752				    fragment));
4753				qla_printk(KERN_WARNING, ha,
4754				    "[ERROR] Failed to load segment %d of "
4755				    "firmware\n", fragment);
4756				break;
4757			}
4758
4759			fwcode += dlen;
4760			risc_addr += dlen;
4761			risc_size -= dlen;
4762			fragment++;
4763		}
4764
4765		/* Next segment. */
4766		segments--;
4767	}
4768	return rval;
4769
4770fail_fw_integrity:
4771	return QLA_FUNCTION_FAILED;
4772}
4773
4774int
4775qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4776{
4777	int rval;
4778
4779	if (ql2xfwloadbin == 1)
4780		return qla81xx_load_risc(vha, srisc_addr);
4781
4782	/*
4783	 * FW Load priority:
4784	 * 1) Firmware via request-firmware interface (.bin file).
4785	 * 2) Firmware residing in flash.
4786	 */
4787	rval = qla24xx_load_risc_blob(vha, srisc_addr);
4788	if (rval == QLA_SUCCESS)
4789		return rval;
4790
4791	return qla24xx_load_risc_flash(vha, srisc_addr,
4792	    vha->hw->flt_region_fw);
4793}
4794
4795int
4796qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4797{
4798	int rval;
4799	struct qla_hw_data *ha = vha->hw;
4800
4801	if (ql2xfwloadbin == 2)
4802		goto try_blob_fw;
4803
4804	/*
4805	 * FW Load priority:
4806	 * 1) Firmware residing in flash.
4807	 * 2) Firmware via request-firmware interface (.bin file).
4808	 * 3) Golden-Firmware residing in flash -- limited operation.
4809	 */
4810	rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
4811	if (rval == QLA_SUCCESS)
4812		return rval;
4813
4814try_blob_fw:
4815	rval = qla24xx_load_risc_blob(vha, srisc_addr);
4816	if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
4817		return rval;
4818
4819	qla_printk(KERN_ERR, ha,
4820	    "FW: Attempting to fallback to golden firmware...\n");
4821	rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
4822	if (rval != QLA_SUCCESS)
4823		return rval;
4824
4825	qla_printk(KERN_ERR, ha,
4826	    "FW: Please update operational firmware...\n");
4827	ha->flags.running_gold_fw = 1;
4828
4829	return rval;
4830}
4831
4832void
4833qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
4834{
4835	int ret, retries;
4836	struct qla_hw_data *ha = vha->hw;
4837
4838	if (ha->flags.pci_channel_io_perm_failure)
4839		return;
4840	if (!IS_FWI2_CAPABLE(ha))
4841		return;
4842	if (!ha->fw_major_version)
4843		return;
4844
4845	ret = qla2x00_stop_firmware(vha);
4846	for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
4847	    ret != QLA_INVALID_COMMAND && retries ; retries--) {
4848		ha->isp_ops->reset_chip(vha);
4849		if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
4850			continue;
4851		if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
4852			continue;
4853		qla_printk(KERN_INFO, ha,
4854		    "Attempting retry of stop-firmware command...\n");
4855		ret = qla2x00_stop_firmware(vha);
4856	}
4857}
4858
4859int
4860qla24xx_configure_vhba(scsi_qla_host_t *vha)
4861{
4862	int rval = QLA_SUCCESS;
4863	uint16_t mb[MAILBOX_REGISTER_COUNT];
4864	struct qla_hw_data *ha = vha->hw;
4865	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4866	struct req_que *req;
4867	struct rsp_que *rsp;
4868
4869	if (!vha->vp_idx)
4870		return -EINVAL;
4871
4872	rval = qla2x00_fw_ready(base_vha);
4873	if (ha->flags.cpu_affinity_enabled)
4874		req = ha->req_q_map[0];
4875	else
4876		req = vha->req;
4877	rsp = req->rsp;
4878
4879	if (rval == QLA_SUCCESS) {
4880		clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4881		qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
4882	}
4883
4884	vha->flags.management_server_logged_in = 0;
4885
4886	/* Login to SNS first */
4887	ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1);
4888	if (mb[0] != MBS_COMMAND_COMPLETE) {
4889		DEBUG15(qla_printk(KERN_INFO, ha,
4890		    "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
4891		    "mb[2]=%x mb[6]=%x mb[7]=%x\n", NPH_SNS,
4892		    mb[0], mb[1], mb[2], mb[6], mb[7]));
4893		return (QLA_FUNCTION_FAILED);
4894	}
4895
4896	atomic_set(&vha->loop_down_timer, 0);
4897	atomic_set(&vha->loop_state, LOOP_UP);
4898	set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4899	set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4900	rval = qla2x00_loop_resync(base_vha);
4901
4902	return rval;
4903}
4904
4905/* 84XX Support **************************************************************/
4906
4907static LIST_HEAD(qla_cs84xx_list);
4908static DEFINE_MUTEX(qla_cs84xx_mutex);
4909
4910static struct qla_chip_state_84xx *
4911qla84xx_get_chip(struct scsi_qla_host *vha)
4912{
4913	struct qla_chip_state_84xx *cs84xx;
4914	struct qla_hw_data *ha = vha->hw;
4915
4916	mutex_lock(&qla_cs84xx_mutex);
4917
4918	/* Find any shared 84xx chip. */
4919	list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
4920		if (cs84xx->bus == ha->pdev->bus) {
4921			kref_get(&cs84xx->kref);
4922			goto done;
4923		}
4924	}
4925
4926	cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
4927	if (!cs84xx)
4928		goto done;
4929
4930	kref_init(&cs84xx->kref);
4931	spin_lock_init(&cs84xx->access_lock);
4932	mutex_init(&cs84xx->fw_update_mutex);
4933	cs84xx->bus = ha->pdev->bus;
4934
4935	list_add_tail(&cs84xx->list, &qla_cs84xx_list);
4936done:
4937	mutex_unlock(&qla_cs84xx_mutex);
4938	return cs84xx;
4939}
4940
4941static void
4942__qla84xx_chip_release(struct kref *kref)
4943{
4944	struct qla_chip_state_84xx *cs84xx =
4945	    container_of(kref, struct qla_chip_state_84xx, kref);
4946
4947	mutex_lock(&qla_cs84xx_mutex);
4948	list_del(&cs84xx->list);
4949	mutex_unlock(&qla_cs84xx_mutex);
4950	kfree(cs84xx);
4951}
4952
4953void
4954qla84xx_put_chip(struct scsi_qla_host *vha)
4955{
4956	struct qla_hw_data *ha = vha->hw;
4957	if (ha->cs84xx)
4958		kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
4959}
4960
4961static int
4962qla84xx_init_chip(scsi_qla_host_t *vha)
4963{
4964	int rval;
4965	uint16_t status[2];
4966	struct qla_hw_data *ha = vha->hw;
4967
4968	mutex_lock(&ha->cs84xx->fw_update_mutex);
4969
4970	rval = qla84xx_verify_chip(vha, status);
4971
4972	mutex_unlock(&ha->cs84xx->fw_update_mutex);
4973
4974	return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
4975	    QLA_SUCCESS;
4976}
4977
4978/* 81XX Support **************************************************************/
4979
4980int
4981qla81xx_nvram_config(scsi_qla_host_t *vha)
4982{
4983	int   rval;
4984	struct init_cb_81xx *icb;
4985	struct nvram_81xx *nv;
4986	uint32_t *dptr;
4987	uint8_t  *dptr1, *dptr2;
4988	uint32_t chksum;
4989	uint16_t cnt;
4990	struct qla_hw_data *ha = vha->hw;
4991
4992	rval = QLA_SUCCESS;
4993	icb = (struct init_cb_81xx *)ha->init_cb;
4994	nv = ha->nvram;
4995
4996	/* Determine NVRAM starting address. */
4997	ha->nvram_size = sizeof(struct nvram_81xx);
4998	ha->vpd_size = FA_NVRAM_VPD_SIZE;
4999
5000	/* Get VPD data into cache */
5001	ha->vpd = ha->nvram + VPD_OFFSET;
5002	ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
5003	    ha->vpd_size);
5004
5005	/* Get NVRAM data into cache and calculate checksum. */
5006	ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
5007	    ha->nvram_size);
5008	dptr = (uint32_t *)nv;
5009	for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
5010		chksum += le32_to_cpu(*dptr++);
5011
5012	DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
5013	DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
5014
5015	/* Bad NVRAM data, set defaults parameters. */
5016	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
5017	    || nv->id[3] != ' ' ||
5018	    nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
5019		/* Reset NVRAM data. */
5020		qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
5021		    "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
5022		    le16_to_cpu(nv->nvram_version));
5023		qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
5024		    "invalid -- WWPN) defaults.\n");
5025
5026		/*
5027		 * Set default initialization control block.
5028		 */
5029		memset(nv, 0, ha->nvram_size);
5030		nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
5031		nv->version = __constant_cpu_to_le16(ICB_VERSION);
5032		nv->frame_payload_size = __constant_cpu_to_le16(2048);
5033		nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
5034		nv->exchange_count = __constant_cpu_to_le16(0);
5035		nv->port_name[0] = 0x21;
5036		nv->port_name[1] = 0x00 + ha->port_no;
5037		nv->port_name[2] = 0x00;
5038		nv->port_name[3] = 0xe0;
5039		nv->port_name[4] = 0x8b;
5040		nv->port_name[5] = 0x1c;
5041		nv->port_name[6] = 0x55;
5042		nv->port_name[7] = 0x86;
5043		nv->node_name[0] = 0x20;
5044		nv->node_name[1] = 0x00;
5045		nv->node_name[2] = 0x00;
5046		nv->node_name[3] = 0xe0;
5047		nv->node_name[4] = 0x8b;
5048		nv->node_name[5] = 0x1c;
5049		nv->node_name[6] = 0x55;
5050		nv->node_name[7] = 0x86;
5051		nv->login_retry_count = __constant_cpu_to_le16(8);
5052		nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
5053		nv->login_timeout = __constant_cpu_to_le16(0);
5054		nv->firmware_options_1 =
5055		    __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
5056		nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
5057		nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
5058		nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
5059		nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
5060		nv->efi_parameters = __constant_cpu_to_le32(0);
5061		nv->reset_delay = 5;
5062		nv->max_luns_per_target = __constant_cpu_to_le16(128);
5063		nv->port_down_retry_count = __constant_cpu_to_le16(30);
5064		nv->link_down_timeout = __constant_cpu_to_le16(30);
5065		nv->enode_mac[0] = 0x00;
5066		nv->enode_mac[1] = 0x02;
5067		nv->enode_mac[2] = 0x03;
5068		nv->enode_mac[3] = 0x04;
5069		nv->enode_mac[4] = 0x05;
5070		nv->enode_mac[5] = 0x06 + ha->port_no;
5071
5072		rval = 1;
5073	}
5074
5075	/* Reset Initialization control block */
5076	memset(icb, 0, sizeof(struct init_cb_81xx));
5077
5078	/* Copy 1st segment. */
5079	dptr1 = (uint8_t *)icb;
5080	dptr2 = (uint8_t *)&nv->version;
5081	cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
5082	while (cnt--)
5083		*dptr1++ = *dptr2++;
5084
5085	icb->login_retry_count = nv->login_retry_count;
5086
5087	/* Copy 2nd segment. */
5088	dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
5089	dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
5090	cnt = (uint8_t *)&icb->reserved_5 -
5091	    (uint8_t *)&icb->interrupt_delay_timer;
5092	while (cnt--)
5093		*dptr1++ = *dptr2++;
5094
5095	memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
5096	/* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
5097	if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
5098		icb->enode_mac[0] = 0x01;
5099		icb->enode_mac[1] = 0x02;
5100		icb->enode_mac[2] = 0x03;
5101		icb->enode_mac[3] = 0x04;
5102		icb->enode_mac[4] = 0x05;
5103		icb->enode_mac[5] = 0x06 + ha->port_no;
5104	}
5105
5106	/* Use extended-initialization control block. */
5107	memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
5108
5109	/*
5110	 * Setup driver NVRAM options.
5111	 */
5112	qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
5113	    "QLE8XXX");
5114
5115	/* Use alternate WWN? */
5116	if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
5117		memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
5118		memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
5119	}
5120
5121	/* Prepare nodename */
5122	if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
5123		/*
5124		 * Firmware will apply the following mask if the nodename was
5125		 * not provided.
5126		 */
5127		memcpy(icb->node_name, icb->port_name, WWN_SIZE);
5128		icb->node_name[0] &= 0xF0;
5129	}
5130
5131	/* Set host adapter parameters. */
5132	ha->flags.disable_risc_code_load = 0;
5133	ha->flags.enable_lip_reset = 0;
5134	ha->flags.enable_lip_full_login =
5135	    le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
5136	ha->flags.enable_target_reset =
5137	    le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
5138	ha->flags.enable_led_scheme = 0;
5139	ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
5140
5141	ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
5142	    (BIT_6 | BIT_5 | BIT_4)) >> 4;
5143
5144	/* save HBA serial number */
5145	ha->serial0 = icb->port_name[5];
5146	ha->serial1 = icb->port_name[6];
5147	ha->serial2 = icb->port_name[7];
5148	memcpy(vha->node_name, icb->node_name, WWN_SIZE);
5149	memcpy(vha->port_name, icb->port_name, WWN_SIZE);
5150
5151	icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
5152
5153	ha->retry_count = le16_to_cpu(nv->login_retry_count);
5154
5155	/* Set minimum login_timeout to 4 seconds. */
5156	if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
5157		nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
5158	if (le16_to_cpu(nv->login_timeout) < 4)
5159		nv->login_timeout = __constant_cpu_to_le16(4);
5160	ha->login_timeout = le16_to_cpu(nv->login_timeout);
5161	icb->login_timeout = nv->login_timeout;
5162
5163	/* Set minimum RATOV to 100 tenths of a second. */
5164	ha->r_a_tov = 100;
5165
5166	ha->loop_reset_delay = nv->reset_delay;
5167
5168	/* Link Down Timeout = 0:
5169	 *
5170	 * 	When Port Down timer expires we will start returning
5171	 *	I/O's to OS with "DID_NO_CONNECT".
5172	 *
5173	 * Link Down Timeout != 0:
5174	 *
5175	 *	 The driver waits for the link to come up after link down
5176	 *	 before returning I/Os to OS with "DID_NO_CONNECT".
5177	 */
5178	if (le16_to_cpu(nv->link_down_timeout) == 0) {
5179		ha->loop_down_abort_time =
5180		    (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
5181	} else {
5182		ha->link_down_timeout =	le16_to_cpu(nv->link_down_timeout);
5183		ha->loop_down_abort_time =
5184		    (LOOP_DOWN_TIME - ha->link_down_timeout);
5185	}
5186
5187	/* Need enough time to try and get the port back. */
5188	ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
5189	if (qlport_down_retry)
5190		ha->port_down_retry_count = qlport_down_retry;
5191
5192	/* Set login_retry_count */
5193	ha->login_retry_count  = le16_to_cpu(nv->login_retry_count);
5194	if (ha->port_down_retry_count ==
5195	    le16_to_cpu(nv->port_down_retry_count) &&
5196	    ha->port_down_retry_count > 3)
5197		ha->login_retry_count = ha->port_down_retry_count;
5198	else if (ha->port_down_retry_count > (int)ha->login_retry_count)
5199		ha->login_retry_count = ha->port_down_retry_count;
5200	if (ql2xloginretrycount)
5201		ha->login_retry_count = ql2xloginretrycount;
5202
5203	/* Enable ZIO. */
5204	if (!vha->flags.init_done) {
5205		ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
5206		    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
5207		ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
5208		    le16_to_cpu(icb->interrupt_delay_timer): 2;
5209	}
5210	icb->firmware_options_2 &= __constant_cpu_to_le32(
5211	    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
5212	vha->flags.process_response_queue = 0;
5213	if (ha->zio_mode != QLA_ZIO_DISABLED) {
5214		ha->zio_mode = QLA_ZIO_MODE_6;
5215
5216		DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
5217		    "(%d us).\n", vha->host_no, ha->zio_mode,
5218		    ha->zio_timer * 100));
5219		qla_printk(KERN_INFO, ha,
5220		    "ZIO mode %d enabled; timer delay (%d us).\n",
5221		    ha->zio_mode, ha->zio_timer * 100);
5222
5223		icb->firmware_options_2 |= cpu_to_le32(
5224		    (uint32_t)ha->zio_mode);
5225		icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
5226		vha->flags.process_response_queue = 1;
5227	}
5228
5229	if (rval) {
5230		DEBUG2_3(printk(KERN_WARNING
5231		    "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
5232	}
5233	return (rval);
5234}
5235
5236int
5237qla82xx_restart_isp(scsi_qla_host_t *vha)
5238{
5239	int status, rval;
5240	uint32_t wait_time;
5241	struct qla_hw_data *ha = vha->hw;
5242	struct req_que *req = ha->req_q_map[0];
5243	struct rsp_que *rsp = ha->rsp_q_map[0];
5244	struct scsi_qla_host *vp;
5245	unsigned long flags;
5246
5247	status = qla2x00_init_rings(vha);
5248	if (!status) {
5249		clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
5250		ha->flags.chip_reset_done = 1;
5251
5252		status = qla2x00_fw_ready(vha);
5253		if (!status) {
5254			qla_printk(KERN_INFO, ha,
5255			"%s(): Start configure loop, "
5256			"status = %d\n", __func__, status);
5257
5258			/* Issue a marker after FW becomes ready. */
5259			qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
5260
5261			vha->flags.online = 1;
5262			/* Wait at most MAX_TARGET RSCNs for a stable link. */
5263			wait_time = 256;
5264			do {
5265				clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5266				qla2x00_configure_loop(vha);
5267				wait_time--;
5268			} while (!atomic_read(&vha->loop_down_timer) &&
5269			    !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) &&
5270			    wait_time &&
5271			    (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)));
5272		}
5273
5274		/* if no cable then assume it's good */
5275		if ((vha->device_flags & DFLG_NO_CABLE))
5276			status = 0;
5277
5278		qla_printk(KERN_INFO, ha,
5279			"%s(): Configure loop done, status = 0x%x\n",
5280			__func__, status);
5281	}
5282
5283	if (!status) {
5284		clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
5285
5286		if (!atomic_read(&vha->loop_down_timer)) {
5287			/*
5288			 * Issue marker command only when we are going
5289			 * to start the I/O .
5290			 */
5291			vha->marker_needed = 1;
5292		}
5293
5294		vha->flags.online = 1;
5295
5296		ha->isp_ops->enable_intrs(ha);
5297
5298		ha->isp_abort_cnt = 0;
5299		clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
5300
5301		if (ha->fce) {
5302			ha->flags.fce_enabled = 1;
5303			memset(ha->fce, 0,
5304			    fce_calc_size(ha->fce_bufs));
5305			rval = qla2x00_enable_fce_trace(vha,
5306			    ha->fce_dma, ha->fce_bufs, ha->fce_mb,
5307			    &ha->fce_bufs);
5308			if (rval) {
5309				qla_printk(KERN_WARNING, ha,
5310				    "Unable to reinitialize FCE "
5311				    "(%d).\n", rval);
5312				ha->flags.fce_enabled = 0;
5313			}
5314		}
5315
5316		if (ha->eft) {
5317			memset(ha->eft, 0, EFT_SIZE);
5318			rval = qla2x00_enable_eft_trace(vha,
5319			    ha->eft_dma, EFT_NUM_BUFFERS);
5320			if (rval) {
5321				qla_printk(KERN_WARNING, ha,
5322				    "Unable to reinitialize EFT "
5323				    "(%d).\n", rval);
5324			}
5325		}
5326	}
5327
5328	if (!status) {
5329		DEBUG(printk(KERN_INFO
5330			"qla82xx_restart_isp(%ld): succeeded.\n",
5331			vha->host_no));
5332
5333		spin_lock_irqsave(&ha->vport_slock, flags);
5334		list_for_each_entry(vp, &ha->vp_list, list) {
5335			if (vp->vp_idx) {
5336				atomic_inc(&vp->vref_count);
5337				spin_unlock_irqrestore(&ha->vport_slock, flags);
5338
5339				qla2x00_vp_abort_isp(vp);
5340
5341				spin_lock_irqsave(&ha->vport_slock, flags);
5342				atomic_dec(&vp->vref_count);
5343			}
5344		}
5345		spin_unlock_irqrestore(&ha->vport_slock, flags);
5346
5347	} else {
5348		qla_printk(KERN_INFO, ha,
5349			"qla82xx_restart_isp: **** FAILED ****\n");
5350	}
5351
5352	return status;
5353}
5354
5355void
5356qla81xx_update_fw_options(scsi_qla_host_t *vha)
5357{
5358	struct qla_hw_data *ha = vha->hw;
5359
5360	if (!ql2xetsenable)
5361		return;
5362
5363	/* Enable ETS Burst. */
5364	memset(ha->fw_options, 0, sizeof(ha->fw_options));
5365	ha->fw_options[2] |= BIT_9;
5366	qla2x00_set_fw_options(vha, ha->fw_options);
5367}
5368
5369/*
5370 * qla24xx_get_fcp_prio
5371 *	Gets the fcp cmd priority value for the logged in port.
5372 *	Looks for a match of the port descriptors within
5373 *	each of the fcp prio config entries. If a match is found,
5374 *	the tag (priority) value is returned.
5375 *
5376 * Input:
5377 *	ha = adapter block po
5378 *	fcport = port structure pointer.
5379 *
5380 * Return:
5381 *	non-zero (if found)
5382 * 	0 (if not found)
5383 *
5384 * Context:
5385 * 	Kernel context
5386 */
5387uint8_t
5388qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
5389{
5390	int i, entries;
5391	uint8_t pid_match, wwn_match;
5392	uint8_t priority;
5393	uint32_t pid1, pid2;
5394	uint64_t wwn1, wwn2;
5395	struct qla_fcp_prio_entry *pri_entry;
5396	struct qla_hw_data *ha = vha->hw;
5397
5398	if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
5399		return 0;
5400
5401	priority = 0;
5402	entries = ha->fcp_prio_cfg->num_entries;
5403	pri_entry = &ha->fcp_prio_cfg->entry[0];
5404
5405	for (i = 0; i < entries; i++) {
5406		pid_match = wwn_match = 0;
5407
5408		if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
5409			pri_entry++;
5410			continue;
5411		}
5412
5413		/* check source pid for a match */
5414		if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
5415			pid1 = pri_entry->src_pid & INVALID_PORT_ID;
5416			pid2 = vha->d_id.b24 & INVALID_PORT_ID;
5417			if (pid1 == INVALID_PORT_ID)
5418				pid_match++;
5419			else if (pid1 == pid2)
5420				pid_match++;
5421		}
5422
5423		/* check destination pid for a match */
5424		if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
5425			pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
5426			pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
5427			if (pid1 == INVALID_PORT_ID)
5428				pid_match++;
5429			else if (pid1 == pid2)
5430				pid_match++;
5431		}
5432
5433		/* check source WWN for a match */
5434		if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
5435			wwn1 = wwn_to_u64(vha->port_name);
5436			wwn2 = wwn_to_u64(pri_entry->src_wwpn);
5437			if (wwn2 == (uint64_t)-1)
5438				wwn_match++;
5439			else if (wwn1 == wwn2)
5440				wwn_match++;
5441		}
5442
5443		/* check destination WWN for a match */
5444		if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
5445			wwn1 = wwn_to_u64(fcport->port_name);
5446			wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
5447			if (wwn2 == (uint64_t)-1)
5448				wwn_match++;
5449			else if (wwn1 == wwn2)
5450				wwn_match++;
5451		}
5452
5453		if (pid_match == 2 || wwn_match == 2) {
5454			/* Found a matching entry */
5455			if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
5456				priority = pri_entry->tag;
5457			break;
5458		}
5459
5460		pri_entry++;
5461	}
5462
5463	return priority;
5464}
5465
5466/*
5467 * qla24xx_update_fcport_fcp_prio
5468 *	Activates fcp priority for the logged in fc port
5469 *
5470 * Input:
5471 *	ha = adapter block pointer.
5472 *	fcp = port structure pointer.
5473 *
5474 * Return:
5475 *	QLA_SUCCESS or QLA_FUNCTION_FAILED
5476 *
5477 * Context:
5478 *	Kernel context.
5479 */
5480int
5481qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *ha, fc_port_t *fcport)
5482{
5483	int ret;
5484	uint8_t priority;
5485	uint16_t mb[5];
5486
5487	if (atomic_read(&fcport->state) == FCS_UNCONFIGURED ||
5488		fcport->port_type != FCT_TARGET ||
5489		fcport->loop_id == FC_NO_LOOP_ID)
5490		return QLA_FUNCTION_FAILED;
5491
5492	priority = qla24xx_get_fcp_prio(ha, fcport);
5493	ret = qla24xx_set_fcp_prio(ha, fcport->loop_id, priority, mb);
5494	if (ret == QLA_SUCCESS)
5495		fcport->fcp_prio = priority;
5496	else
5497		DEBUG2(printk(KERN_WARNING
5498			"scsi(%ld): Unable to activate fcp priority, "
5499			" ret=0x%x\n", ha->host_no, ret));
5500
5501	return  ret;
5502}
5503
5504/*
5505 * qla24xx_update_all_fcp_prio
5506 *	Activates fcp priority for all the logged in ports
5507 *
5508 * Input:
5509 *	ha = adapter block pointer.
5510 *
5511 * Return:
5512 *	QLA_SUCCESS or QLA_FUNCTION_FAILED
5513 *
5514 * Context:
5515 *	Kernel context.
5516 */
5517int
5518qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
5519{
5520	int ret;
5521	fc_port_t *fcport;
5522
5523	ret = QLA_FUNCTION_FAILED;
5524	/* We need to set priority for all logged in ports */
5525	list_for_each_entry(fcport, &vha->vp_fcports, list)
5526		ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
5527
5528	return ret;
5529}
5530