ql_api.c revision 9611:567690184bfa
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/* Copyright 2009 QLogic Corporation */
23
24/*
25 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26 * Use is subject to license terms.
27 */
28
29#pragma ident	"Copyright 2009 QLogic Corporation; ql_api.c"
30
31/*
32 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33 *
34 * ***********************************************************************
35 * *									**
36 * *				NOTICE					**
37 * *		COPYRIGHT (C) 1996-2009 QLOGIC CORPORATION		**
38 * *			ALL RIGHTS RESERVED				**
39 * *									**
40 * ***********************************************************************
41 *
42 */
43
44#include <ql_apps.h>
45#include <ql_api.h>
46#include <ql_debug.h>
47#include <ql_init.h>
48#include <ql_iocb.h>
49#include <ql_ioctl.h>
50#include <ql_isr.h>
51#include <ql_mbx.h>
52#include <ql_xioctl.h>
53
54/*
55 * Solaris external defines.
56 */
57extern pri_t minclsyspri;
58extern pri_t maxclsyspri;
59
60/*
61 * dev_ops functions prototypes
62 */
63static int ql_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
64static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
65static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
66static int ql_power(dev_info_t *, int, int);
67static int ql_quiesce(dev_info_t *);
68
69/*
70 * FCA functions prototypes exported by means of the transport table
71 */
72static opaque_t ql_bind_port(dev_info_t *, fc_fca_port_info_t *,
73    fc_fca_bind_info_t *);
74static void ql_unbind_port(opaque_t);
75static int ql_init_pkt(opaque_t, fc_packet_t *, int);
76static int ql_un_init_pkt(opaque_t, fc_packet_t *);
77static int ql_els_send(opaque_t, fc_packet_t *);
78static int ql_get_cap(opaque_t, char *, void *);
79static int ql_set_cap(opaque_t, char *, void *);
80static int ql_getmap(opaque_t, fc_lilpmap_t *);
81static int ql_transport(opaque_t, fc_packet_t *);
82static int ql_ub_alloc(opaque_t, uint64_t *, uint32_t, uint32_t *, uint32_t);
83static int ql_ub_free(opaque_t, uint32_t, uint64_t *);
84static int ql_ub_release(opaque_t, uint32_t, uint64_t *);
85static int ql_abort(opaque_t, fc_packet_t *, int);
86static int ql_reset(opaque_t, uint32_t);
87static int ql_port_manage(opaque_t, fc_fca_pm_t *);
88static opaque_t ql_get_device(opaque_t, fc_portid_t);
89
90/*
91 * FCA Driver Support Function Prototypes.
92 */
93static uint16_t	ql_wait_outstanding(ql_adapter_state_t *);
94static void ql_task_mgmt(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
95    ql_srb_t *);
96static void ql_task_daemon(void *);
97static void ql_task_thread(ql_adapter_state_t *);
98static void ql_unsol_callback(ql_srb_t *);
99static void ql_free_unsolicited_buffer(ql_adapter_state_t *,
100    fc_unsol_buf_t *);
101static void ql_timer(void *);
102static void ql_watchdog(ql_adapter_state_t *, uint32_t *, uint32_t *);
103static void ql_cmd_timeout(ql_adapter_state_t *, ql_tgt_t *q, ql_srb_t *,
104    uint32_t *, uint32_t *);
105static void ql_halt(ql_adapter_state_t *, int);
106static int ql_els_plogi(ql_adapter_state_t *, fc_packet_t *);
107static int ql_els_flogi(ql_adapter_state_t *, fc_packet_t *);
108static int ql_els_logo(ql_adapter_state_t *, fc_packet_t *);
109static int ql_els_prli(ql_adapter_state_t *, fc_packet_t *);
110static int ql_els_prlo(ql_adapter_state_t *, fc_packet_t *);
111static int ql_els_adisc(ql_adapter_state_t *, fc_packet_t *);
112static int ql_els_linit(ql_adapter_state_t *, fc_packet_t *);
113static int ql_els_lpc(ql_adapter_state_t *, fc_packet_t *);
114static int ql_els_lsts(ql_adapter_state_t *, fc_packet_t *);
115static int ql_els_scr(ql_adapter_state_t *, fc_packet_t *);
116static int ql_els_rscn(ql_adapter_state_t *, fc_packet_t *);
117static int ql_els_farp_req(ql_adapter_state_t *, fc_packet_t *);
118static int ql_els_farp_reply(ql_adapter_state_t *, fc_packet_t *);
119static int ql_els_rls(ql_adapter_state_t *, fc_packet_t *);
120static int ql_els_rnid(ql_adapter_state_t *, fc_packet_t *);
121static int ql_login_port(ql_adapter_state_t *, port_id_t);
122static int ql_login_fabric_port(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
123static int ql_logout_port(ql_adapter_state_t *, port_id_t);
124static ql_lun_t *ql_lun_queue(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
125static int ql_fcp_scsi_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
126static int ql_fcp_ip_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
127static int ql_fc_services(ql_adapter_state_t *, fc_packet_t *);
128static int ql_poll_cmd(ql_adapter_state_t *, ql_srb_t *, time_t);
129static int ql_start_cmd(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
130    ql_srb_t *);
131static int ql_kstat_update(kstat_t *, int);
132static ql_adapter_state_t *ql_fca_handle_to_state(opaque_t);
133static ql_adapter_state_t *ql_cmd_setup(opaque_t, fc_packet_t *, int *);
134static int ql_program_flash_address(ql_adapter_state_t *, uint32_t, uint8_t);
135static void ql_rst_aen(ql_adapter_state_t *);
136static void ql_restart_queues(ql_adapter_state_t *);
137static void ql_abort_queues(ql_adapter_state_t *);
138static void ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq);
139static void ql_idle_check(ql_adapter_state_t *);
140static int ql_loop_resync(ql_adapter_state_t *);
141static size_t ql_24xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
142static size_t ql_25xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
143static int ql_save_config_regs(dev_info_t *);
144static int ql_restore_config_regs(dev_info_t *);
145static int ql_process_rscn(ql_adapter_state_t *, fc_affected_id_t *);
146static int ql_handle_rscn_update(ql_adapter_state_t *);
147static int ql_send_plogi(ql_adapter_state_t *, ql_tgt_t *, ql_head_t *);
148static int ql_process_rscn_for_device(ql_adapter_state_t *, ql_tgt_t *);
149static int ql_dump_firmware(ql_adapter_state_t *);
150static int ql_process_logo_for_device(ql_adapter_state_t *, ql_tgt_t *);
151static int ql_2200_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
152static int ql_2300_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
153static int ql_24xx_binary_fw_dump(ql_adapter_state_t *, ql_24xx_fw_dump_t *);
154static int ql_25xx_binary_fw_dump(ql_adapter_state_t *, ql_25xx_fw_dump_t *);
155static int ql_read_risc_ram(ql_adapter_state_t *, uint32_t, uint32_t,
156    void *);
157static void *ql_read_regs(ql_adapter_state_t *, void *, void *, uint32_t,
158    uint8_t);
159static int ql_busy_plogi(ql_adapter_state_t *, fc_packet_t *, ql_tgt_t *);
160static int ql_suspend_adapter(ql_adapter_state_t *);
161static int ql_bstr_to_dec(char *, uint32_t *, uint32_t);
162static void ql_update_rscn(ql_adapter_state_t *, fc_affected_id_t *);
163int ql_alloc_dma_resouce(ql_adapter_state_t *, dma_mem_t *, int);
164static int ql_bind_dma_buffer(ql_adapter_state_t *, dma_mem_t *, int);
165static void ql_unbind_dma_buffer(ql_adapter_state_t *, dma_mem_t *);
166static void ql_timeout_insert(ql_adapter_state_t *, ql_tgt_t *, ql_srb_t *);
167static int ql_setup_interrupts(ql_adapter_state_t *);
168static int ql_setup_msi(ql_adapter_state_t *);
169static int ql_setup_msix(ql_adapter_state_t *);
170static int ql_setup_fixed(ql_adapter_state_t *);
171static void ql_release_intr(ql_adapter_state_t *);
172static void ql_disable_intr(ql_adapter_state_t *);
173static int ql_legacy_intr(ql_adapter_state_t *);
174static int ql_init_mutex(ql_adapter_state_t *);
175static void ql_destroy_mutex(ql_adapter_state_t *);
176static void ql_iidma(ql_adapter_state_t *);
177
178static int ql_n_port_plogi(ql_adapter_state_t *);
179static void ql_fca_isp_els_request(ql_adapter_state_t *, fc_packet_t *,
180    els_descriptor_t *);
181static void ql_isp_els_request_ctor(els_descriptor_t *,
182    els_passthru_entry_t *);
183static int ql_p2p_plogi(ql_adapter_state_t *, fc_packet_t *);
184/*
185 * Global data
186 */
187static uint8_t	ql_enable_pm = 1;
188static int	ql_flash_sbus_fpga = 0;
189uint32_t	ql_os_release_level;
190uint32_t	ql_disable_aif = 0;
191uint32_t	ql_disable_msi = 0;
192uint32_t	ql_disable_msix = 0;
193
194/* Timer routine variables. */
195static timeout_id_t	ql_timer_timeout_id = NULL;
196static clock_t		ql_timer_ticks;
197
198/* Soft state head pointer. */
199void *ql_state = NULL;
200
201/* Head adapter link. */
202ql_head_t ql_hba = {
203	NULL,
204	NULL
205};
206
207/* Global hba index */
208uint32_t ql_gfru_hba_index = 1;
209
210/*
211 * Some IP defines and globals
212 */
213uint32_t	ql_ip_buffer_count = 128;
214uint32_t	ql_ip_low_water = 10;
215uint8_t		ql_ip_fast_post_count = 5;
216static int	ql_ip_mtu = 65280;		/* equivalent to FCIPMTU */
217
218/* Device AL_PA to Device Head Queue index array. */
219uint8_t ql_alpa_to_index[] = {
220	0x7e, 0x7d, 0x7c, 0x00, 0x7b, 0x01, 0x02, 0x03, 0x7a, 0x04,
221	0x05, 0x06, 0x07, 0x08, 0x09, 0x79, 0x78, 0x0a, 0x0b, 0x0c,
222	0x0d, 0x0e, 0x0f, 0x77, 0x76, 0x10, 0x11, 0x75, 0x12, 0x74,
223	0x73, 0x72, 0x13, 0x14, 0x15, 0x71, 0x16, 0x70, 0x6f, 0x6e,
224	0x17, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x18, 0x19, 0x67,
225	0x66, 0x65, 0x64, 0x63, 0x62, 0x20, 0x21, 0x61, 0x60, 0x23,
226	0x5f, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x5e, 0x2a, 0x5d,
227	0x5c, 0x5b, 0x2b, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x2c,
228	0x2d, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x2e, 0x2f, 0x4e,
229	0x4d, 0x30, 0x4c, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x4b,
230	0x37, 0x4a, 0x49, 0x48, 0x38, 0x47, 0x46, 0x45, 0x44, 0x43,
231	0x42, 0x39, 0x3a, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b,
232	0x3c, 0x3b, 0x3a, 0x3d, 0x39, 0x3e, 0x3f, 0x40, 0x38, 0x37,
233	0x36, 0x41, 0x35, 0x42, 0x43, 0x44, 0x34, 0x45, 0x46, 0x47,
234	0x48, 0x49, 0x4a, 0x33, 0x32, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
235	0x50, 0x31, 0x30, 0x51, 0x52, 0x2f, 0x53, 0x2e, 0x2d, 0x2c,
236	0x54, 0x55, 0x56, 0x2b, 0x57, 0x2a, 0x29, 0x28, 0x58, 0x27,
237	0x26, 0x25, 0x24, 0x23, 0x22, 0x59, 0x5a, 0x21, 0x20, 0x1f,
238	0x1e, 0x1d, 0x1c, 0x5b, 0x5c, 0x1b, 0x1a, 0x5d, 0x19, 0x5e,
239	0x5f, 0x60, 0x61, 0x62, 0x63, 0x18, 0x64, 0x17, 0x16, 0x15,
240	0x65, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x66, 0x67, 0x0e,
241	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x68, 0x69, 0x08, 0x07, 0x6a,
242	0x06, 0x6b, 0x6c, 0x6d, 0x05, 0x04, 0x03, 0x6e, 0x02, 0x6f,
243	0x70, 0x71, 0x01, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x00,
244	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7f, 0x80, 0x00, 0x01,
245	0x02, 0x03, 0x80, 0x7f, 0x7e, 0x04
246};
247
248/* Device loop_id to ALPA array. */
249static uint8_t ql_index_to_alpa[] = {
250	0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
251	0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
252	0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
253	0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
254	0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
255	0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
256	0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
257	0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
258	0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
259	0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
260	0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
261	0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
262	0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
263};
264
265/* 2200 register offsets */
266static reg_off_t reg_off_2200 = {
267	0x00, 0x02, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
268	0x18, 0x18, 0x1A, 0x1A, /* req in, out, resp in, out */
269	0x00, 0x00, /* intr info lo, hi */
270	24, /* Number of mailboxes */
271	/* Mailbox register offsets */
272	0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
273	0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
274	0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
275	/* 2200 does not have mailbox 24-31 */
276	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
277	0x96, 0xa4, 0xb0, 0xb8, 0xc0, 0xcc, 0xce,
278	/* host to host sema */
279	0x00,
280	/* 2200 does not have pri_req_in, pri_req_out, */
281	/* atio_req_in, atio_req_out, io_base_addr */
282	0xff, 0xff, 0xff, 0xff,	0xff
283};
284
285/* 2300 register offsets */
286static reg_off_t reg_off_2300 = {
287	0x00, 0x02, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
288	0x10, 0x12, 0x14, 0x16, /* req in, out, resp in, out */
289	0x18, 0x1A, /* intr info lo, hi */
290	32, /* Number of mailboxes */
291	/* Mailbox register offsets */
292	0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
293	0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
294	0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
295	0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
296	0x96, 0xa4, 0xb0, 0x80, 0xc0, 0xcc, 0xce,
297	/* host to host sema */
298	0x1c,
299	/* 2300 does not have pri_req_in, pri_req_out, */
300	/* atio_req_in, atio_req_out, io_base_addr */
301	0xff, 0xff, 0xff, 0xff,	0xff
302};
303
304/* 2400/2500 register offsets */
305reg_off_t reg_off_2400_2500 = {
306	0x00, 0x04,		/* flash_address, flash_data */
307	0x08, 0x0c, 0x10,	/* ctrl_status, ictrl, istatus */
308	/* 2400 does not have semaphore, nvram */
309	0x14, 0x18,
310	0x1c, 0x20, 0x24, 0x28, /* req_in, req_out, resp_in, resp_out */
311	0x44, 0x46,		/* intr info lo, hi */
312	32,			/* Number of mailboxes */
313	/* Mailbox register offsets */
314	0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
315	0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
316	0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
317	0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
318	/* 2400 does not have fpm_diag_config, pcr, mctr, fb_cmd */
319	0xff, 0xff, 0xff, 0xff,
320	0x48, 0x4c, 0x50,	/* hccr, gpiod, gpioe */
321	0xff,			/* host to host sema */
322	0x2c, 0x30,		/* pri_req_in, pri_req_out */
323	0x3c, 0x40,		/* atio_req_in, atio_req_out */
324	0x54			/* io_base_addr */
325};
326
327/* mutex for protecting variables shared by all instances of the driver */
328kmutex_t ql_global_mutex;
329kmutex_t ql_global_hw_mutex;
330kmutex_t ql_global_el_mutex;
331
332/* DMA access attribute structure. */
333static ddi_device_acc_attr_t ql_dev_acc_attr = {
334	DDI_DEVICE_ATTR_V0,
335	DDI_STRUCTURE_LE_ACC,
336	DDI_STRICTORDER_ACC
337};
338
339/* I/O DMA attributes structures. */
340static ddi_dma_attr_t ql_64bit_io_dma_attr = {
341	DMA_ATTR_V0,			/* dma_attr_version */
342	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
343	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
344	QL_DMA_XFER_COUNTER,		/* DMA counter register */
345	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment */
346	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
347	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
348	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
349	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
350	QL_DMA_SG_LIST_LENGTH,		/* s/g list length */
351	QL_DMA_GRANULARITY,		/* granularity of device */
352	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
353};
354
355static ddi_dma_attr_t ql_32bit_io_dma_attr = {
356	DMA_ATTR_V0,			/* dma_attr_version */
357	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
358	QL_DMA_HIGH_32BIT_ADDRESS,	/* high DMA address range */
359	QL_DMA_XFER_COUNTER,		/* DMA counter register */
360	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment */
361	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
362	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
363	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
364	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
365	QL_DMA_SG_LIST_LENGTH,		/* s/g list length */
366	QL_DMA_GRANULARITY,		/* granularity of device */
367	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
368};
369
370/* Load the default dma attributes */
371static	ddi_dma_attr_t	ql_32fcsm_cmd_dma_attr;
372static	ddi_dma_attr_t	ql_64fcsm_cmd_dma_attr;
373static	ddi_dma_attr_t	ql_32fcsm_rsp_dma_attr;
374static	ddi_dma_attr_t	ql_64fcsm_rsp_dma_attr;
375static	ddi_dma_attr_t	ql_32fcip_cmd_dma_attr;
376static	ddi_dma_attr_t	ql_64fcip_cmd_dma_attr;
377static	ddi_dma_attr_t	ql_32fcip_rsp_dma_attr;
378static	ddi_dma_attr_t	ql_64fcip_rsp_dma_attr;
379static	ddi_dma_attr_t	ql_32fcp_cmd_dma_attr;
380static	ddi_dma_attr_t	ql_64fcp_cmd_dma_attr;
381static	ddi_dma_attr_t	ql_32fcp_rsp_dma_attr;
382static	ddi_dma_attr_t	ql_64fcp_rsp_dma_attr;
383static	ddi_dma_attr_t	ql_32fcp_data_dma_attr;
384static	ddi_dma_attr_t	ql_64fcp_data_dma_attr;
385
386/* Static declarations of cb_ops entry point functions... */
387static struct cb_ops ql_cb_ops = {
388	ql_open,			/* b/c open */
389	ql_close,			/* b/c close */
390	nodev,				/* b strategy */
391	nodev,				/* b print */
392	nodev,				/* b dump */
393	nodev,				/* c read */
394	nodev,				/* c write */
395	ql_ioctl,			/* c ioctl */
396	nodev,				/* c devmap */
397	nodev,				/* c mmap */
398	nodev,				/* c segmap */
399	nochpoll,			/* c poll */
400	nodev,				/* cb_prop_op */
401	NULL,				/* streamtab  */
402	D_MP | D_NEW | D_HOTPLUG,	/* Driver compatibility flag */
403	CB_REV,				/* cb_ops revision */
404	nodev,				/* c aread */
405	nodev				/* c awrite */
406};
407
408/* Static declarations of dev_ops entry point functions... */
409static struct dev_ops ql_devops = {
410	DEVO_REV,			/* devo_rev */
411	0,				/* refcnt */
412	ql_getinfo,			/* getinfo */
413	nulldev,			/* identify */
414	nulldev,			/* probe */
415	ql_attach,			/* attach */
416	ql_detach,			/* detach */
417	nodev,				/* reset */
418	&ql_cb_ops,			/* char/block ops */
419	NULL,				/* bus operations */
420	ql_power,			/* power management */
421	ql_quiesce			/* quiesce device */
422};
423
424/* ELS command code to text converter */
425cmd_table_t els_cmd_tbl[] = ELS_CMD_TABLE();
426/* Mailbox command code to text converter */
427cmd_table_t mbox_cmd_tbl[] = MBOX_CMD_TABLE();
428
429char qlc_driver_version[] = QL_VERSION;
430
431/*
432 * Loadable Driver Interface Structures.
433 * Declare and initialize the module configuration section...
434 */
435static struct modldrv modldrv = {
436	&mod_driverops,				/* type of module: driver */
437	"SunFC Qlogic FCA v" QL_VERSION,	/* name of module */
438	&ql_devops				/* driver dev_ops */
439};
440
441static struct modlinkage modlinkage = {
442	MODREV_1,
443	&modldrv,
444	NULL
445};
446
447/* ************************************************************************ */
448/*				Loadable Module Routines.		    */
449/* ************************************************************************ */
450
451/*
452 * _init
453 *	Initializes a loadable module. It is called before any other
454 *	routine in a loadable module.
455 *
456 * Returns:
457 *	0 = success
458 *
459 * Context:
460 *	Kernel context.
461 */
462int
463_init(void)
464{
465	uint16_t	w16;
466	int		rval = 0;
467
468	/* Get OS major release level. */
469	for (w16 = 0; w16 < sizeof (utsname.release); w16++) {
470		if (utsname.release[w16] == '.') {
471			w16++;
472			break;
473		}
474	}
475	if (w16 < sizeof (utsname.release)) {
476		(void) ql_bstr_to_dec(&utsname.release[w16],
477		    &ql_os_release_level, 0);
478	} else {
479		ql_os_release_level = 0;
480	}
481	if (ql_os_release_level < 6) {
482		cmn_err(CE_WARN, "%s Unsupported OS release level = %d",
483		    QL_NAME, ql_os_release_level);
484		rval = EINVAL;
485	}
486	if (ql_os_release_level == 6) {
487		ql_32bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
488		ql_64bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
489	}
490
491	if (rval == 0) {
492		rval = ddi_soft_state_init(&ql_state,
493		    sizeof (ql_adapter_state_t), 0);
494	}
495	if (rval == 0) {
496		/* allow the FC Transport to tweak the dev_ops */
497		fc_fca_init(&ql_devops);
498
499		mutex_init(&ql_global_mutex, NULL, MUTEX_DRIVER, NULL);
500		mutex_init(&ql_global_hw_mutex, NULL, MUTEX_DRIVER, NULL);
501		mutex_init(&ql_global_el_mutex, NULL, MUTEX_DRIVER, NULL);
502		rval = mod_install(&modlinkage);
503		if (rval != 0) {
504			mutex_destroy(&ql_global_hw_mutex);
505			mutex_destroy(&ql_global_mutex);
506			mutex_destroy(&ql_global_el_mutex);
507			ddi_soft_state_fini(&ql_state);
508		} else {
509			/*EMPTY*/
510			ql_32fcsm_cmd_dma_attr = ql_32bit_io_dma_attr;
511			ql_64fcsm_cmd_dma_attr = ql_64bit_io_dma_attr;
512			ql_32fcsm_rsp_dma_attr = ql_32bit_io_dma_attr;
513			ql_64fcsm_rsp_dma_attr = ql_64bit_io_dma_attr;
514			ql_32fcip_cmd_dma_attr = ql_32bit_io_dma_attr;
515			ql_64fcip_cmd_dma_attr = ql_64bit_io_dma_attr;
516			ql_32fcip_rsp_dma_attr = ql_32bit_io_dma_attr;
517			ql_64fcip_rsp_dma_attr = ql_64bit_io_dma_attr;
518			ql_32fcp_cmd_dma_attr = ql_32bit_io_dma_attr;
519			ql_64fcp_cmd_dma_attr = ql_64bit_io_dma_attr;
520			ql_32fcp_rsp_dma_attr = ql_32bit_io_dma_attr;
521			ql_64fcp_rsp_dma_attr = ql_64bit_io_dma_attr;
522			ql_32fcp_data_dma_attr = ql_32bit_io_dma_attr;
523			ql_64fcp_data_dma_attr = ql_64bit_io_dma_attr;
524			ql_32fcsm_cmd_dma_attr.dma_attr_sgllen =
525			    ql_64fcsm_cmd_dma_attr.dma_attr_sgllen =
526			    QL_FCSM_CMD_SGLLEN;
527			ql_32fcsm_rsp_dma_attr.dma_attr_sgllen =
528			    ql_64fcsm_rsp_dma_attr.dma_attr_sgllen =
529			    QL_FCSM_RSP_SGLLEN;
530			ql_32fcip_cmd_dma_attr.dma_attr_sgllen =
531			    ql_64fcip_cmd_dma_attr.dma_attr_sgllen =
532			    QL_FCIP_CMD_SGLLEN;
533			ql_32fcip_rsp_dma_attr.dma_attr_sgllen =
534			    ql_64fcip_rsp_dma_attr.dma_attr_sgllen =
535			    QL_FCIP_RSP_SGLLEN;
536			ql_32fcp_cmd_dma_attr.dma_attr_sgllen =
537			    ql_64fcp_cmd_dma_attr.dma_attr_sgllen =
538			    QL_FCP_CMD_SGLLEN;
539			ql_32fcp_rsp_dma_attr.dma_attr_sgllen =
540			    ql_64fcp_rsp_dma_attr.dma_attr_sgllen =
541			    QL_FCP_RSP_SGLLEN;
542		}
543	}
544
545	if (rval != 0) {
546		cmn_err(CE_CONT, "?Unable to install/attach driver '%s'",
547		    QL_NAME);
548	}
549
550	return (rval);
551}
552
553/*
554 * _fini
555 *	Prepares a module for unloading. It is called when the system
556 *	wants to unload a module. If the module determines that it can
557 *	be unloaded, then _fini() returns the value returned by
558 *	mod_remove(). Upon successful return from _fini() no other
559 *	routine in the module will be called before _init() is called.
560 *
561 * Returns:
562 *	0 = success
563 *
564 * Context:
565 *	Kernel context.
566 */
567int
568_fini(void)
569{
570	int	rval;
571
572	rval = mod_remove(&modlinkage);
573	if (rval == 0) {
574		mutex_destroy(&ql_global_hw_mutex);
575		mutex_destroy(&ql_global_mutex);
576		mutex_destroy(&ql_global_el_mutex);
577		ddi_soft_state_fini(&ql_state);
578	}
579
580	return (rval);
581}
582
583/*
584 * _info
585 *	Returns information about loadable module.
586 *
587 * Input:
588 *	modinfo = pointer to module information structure.
589 *
590 * Returns:
591 *	Value returned by mod_info().
592 *
593 * Context:
594 *	Kernel context.
595 */
596int
597_info(struct modinfo *modinfop)
598{
599	return (mod_info(&modlinkage, modinfop));
600}
601
602/* ************************************************************************ */
603/*			dev_ops functions				    */
604/* ************************************************************************ */
605
606/*
607 * ql_getinfo
608 *	Returns the pointer associated with arg when cmd is
609 *	set to DDI_INFO_DEVT2DEVINFO, or it should return the
610 *	instance number associated with arg when cmd is set
611 *	to DDI_INFO_DEV2INSTANCE.
612 *
613 * Input:
614 *	dip = Do not use.
615 *	cmd = command argument.
616 *	arg = command specific argument.
617 *	resultp = pointer to where request information is stored.
618 *
619 * Returns:
620 *	DDI_SUCCESS or DDI_FAILURE.
621 *
622 * Context:
623 *	Kernel context.
624 */
625/* ARGSUSED */
626static int
627ql_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
628{
629	ql_adapter_state_t	*ha;
630	int			minor;
631	int			rval = DDI_FAILURE;
632
633	minor = (int)(getminor((dev_t)arg));
634	ha = ddi_get_soft_state(ql_state, minor);
635	if (ha == NULL) {
636		QL_PRINT_2(CE_CONT, "failed, unknown minor=%d\n",
637		    getminor((dev_t)arg));
638		*resultp = NULL;
639		return (rval);
640	}
641
642	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
643
644	switch (cmd) {
645	case DDI_INFO_DEVT2DEVINFO:
646		*resultp = ha->dip;
647		rval = DDI_SUCCESS;
648		break;
649	case DDI_INFO_DEVT2INSTANCE:
650		*resultp = (void *)(uintptr_t)(ha->instance);
651		rval = DDI_SUCCESS;
652		break;
653	default:
654		EL(ha, "failed, unsupported cmd=%d\n", cmd);
655		rval = DDI_FAILURE;
656		break;
657	}
658
659	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
660
661	return (rval);
662}
663
664/*
665 * ql_attach
666 *	Configure and attach an instance of the driver
667 *	for a port.
668 *
669 * Input:
670 *	dip = pointer to device information structure.
671 *	cmd = attach type.
672 *
673 * Returns:
674 *	DDI_SUCCESS or DDI_FAILURE.
675 *
676 * Context:
677 *	Kernel context.
678 */
679static int
680ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
681{
682	uint32_t		size;
683	int			rval;
684	int			instance;
685	uint_t			progress = 0;
686	char			*buf;
687	ushort_t		caps_ptr, cap;
688	fc_fca_tran_t		*tran;
689	ql_adapter_state_t	*ha = NULL;
690
691	static char *pmcomps[] = {
692		NULL,
693		PM_LEVEL_D3_STR,		/* Device OFF */
694		PM_LEVEL_D0_STR,		/* Device ON */
695	};
696
697	QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n",
698	    ddi_get_instance(dip), cmd);
699
700	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
701
702	switch (cmd) {
703	case DDI_ATTACH:
704		/* first get the instance */
705		instance = ddi_get_instance(dip);
706
707		cmn_err(CE_CONT, "!Qlogic %s(%d) FCA Driver v%s\n",
708		    QL_NAME, instance, QL_VERSION);
709
710		/* Correct OS version? */
711		if (ql_os_release_level != 11) {
712			cmn_err(CE_WARN, "%s(%d): This driver is for Solaris "
713			    "11", QL_NAME, instance);
714			goto attach_failed;
715		}
716
717		/* Hardware is installed in a DMA-capable slot? */
718		if (ddi_slaveonly(dip) == DDI_SUCCESS) {
719			cmn_err(CE_WARN, "%s(%d): slave only", QL_NAME,
720			    instance);
721			goto attach_failed;
722		}
723
724		/* No support for high-level interrupts */
725		if (ddi_intr_hilevel(dip, 0) != 0) {
726			cmn_err(CE_WARN, "%s(%d): High level interrupt"
727			    " not supported", QL_NAME, instance);
728			goto attach_failed;
729		}
730
731		/* Allocate our per-device-instance structure */
732		if (ddi_soft_state_zalloc(ql_state,
733		    instance) != DDI_SUCCESS) {
734			cmn_err(CE_WARN, "%s(%d): soft state alloc failed",
735			    QL_NAME, instance);
736			goto attach_failed;
737		}
738		progress |= QL_SOFT_STATE_ALLOCED;
739
740		ha = ddi_get_soft_state(ql_state, instance);
741		if (ha == NULL) {
742			cmn_err(CE_WARN, "%s(%d): can't get soft state",
743			    QL_NAME, instance);
744			goto attach_failed;
745		}
746		ha->dip = dip;
747		ha->instance = instance;
748		ha->hba.base_address = ha;
749		ha->pha = ha;
750
751		if (ql_el_trace_desc_ctor(ha) != DDI_SUCCESS) {
752			cmn_err(CE_WARN, "%s(%d): can't setup el tracing",
753			    QL_NAME, instance);
754			goto attach_failed;
755		}
756
757		/* Get extended logging and dump flags. */
758		ql_common_properties(ha);
759
760		if (strcmp(ddi_driver_name(ddi_get_parent(dip)),
761		    "sbus") == 0) {
762			EL(ha, "%s SBUS card detected", QL_NAME);
763			ha->cfg_flags |= CFG_SBUS_CARD;
764		}
765
766		ha->dev = kmem_zalloc(sizeof (*ha->dev) *
767		    DEVICE_HEAD_LIST_SIZE, KM_SLEEP);
768
769		ha->outstanding_cmds = kmem_zalloc(
770		    sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS,
771		    KM_SLEEP);
772
773		ha->ub_array = kmem_zalloc(sizeof (*ha->ub_array) *
774		    QL_UB_LIMIT, KM_SLEEP);
775
776		ha->adapter_stats = kmem_zalloc(sizeof (*ha->adapter_stats),
777		    KM_SLEEP);
778
779		(void) ddi_pathname(dip, buf);
780		ha->devpath = kmem_zalloc(strlen(buf)+1, KM_SLEEP);
781		if (ha->devpath == NULL) {
782			EL(ha, "devpath mem alloc failed\n");
783		} else {
784			(void) strcpy(ha->devpath, buf);
785			EL(ha, "devpath is: %s\n", ha->devpath);
786		}
787
788		if (CFG_IST(ha, CFG_SBUS_CARD)) {
789			/*
790			 * For cards where PCI is mapped to sbus e.g. Ivory.
791			 *
792			 * 0x00	: 0x000 - 0x0FF PCI Config Space for 2200
793			 *	: 0x100 - 0x3FF PCI IO space for 2200
794			 * 0x01	: 0x000 - 0x0FF PCI Config Space for fpga
795			 *	: 0x100 - 0x3FF PCI IO Space for fpga
796			 */
797			if (ddi_regs_map_setup(dip, 0, (caddr_t *)&ha->iobase,
798			    0x100, 0x300, &ql_dev_acc_attr, &ha->dev_handle)
799			    != DDI_SUCCESS) {
800				cmn_err(CE_WARN, "%s(%d): Unable to map device"
801				    " registers", QL_NAME, instance);
802				goto attach_failed;
803			}
804			if (ddi_regs_map_setup(dip, 1,
805			    (caddr_t *)&ha->sbus_fpga_iobase, 0, 0x400,
806			    &ql_dev_acc_attr, &ha->sbus_fpga_dev_handle)
807			    != DDI_SUCCESS) {
808				/* We should not fail attach here */
809				cmn_err(CE_WARN, "%s(%d): Unable to map FPGA",
810				    QL_NAME, instance);
811				ha->sbus_fpga_iobase = NULL;
812			}
813			progress |= QL_REGS_MAPPED;
814		} else {
815			/*
816			 * Setup the ISP2200 registers address mapping to be
817			 * accessed by this particular driver.
818			 * 0x0   Configuration Space
819			 * 0x1   I/O Space
820			 * 0x2   32-bit Memory Space address
821			 * 0x3   64-bit Memory Space address
822			 */
823			if (ddi_regs_map_setup(dip, 2, (caddr_t *)&ha->iobase,
824			    0, 0x100, &ql_dev_acc_attr,
825			    &ha->dev_handle) != DDI_SUCCESS) {
826				cmn_err(CE_WARN, "%s(%d): regs_map_setup "
827				    "failed", QL_NAME, instance);
828				goto attach_failed;
829			}
830			progress |= QL_REGS_MAPPED;
831
832			/*
833			 * We need I/O space mappings for 23xx HBAs for
834			 * loading flash (FCode). The chip has a bug due to
835			 * which loading flash fails through mem space
836			 * mappings in PCI-X mode.
837			 */
838			if (ddi_regs_map_setup(dip, 1,
839			    (caddr_t *)&ha->iomap_iobase, 0, 0x100,
840			    &ql_dev_acc_attr,
841			    &ha->iomap_dev_handle) != DDI_SUCCESS) {
842				cmn_err(CE_WARN, "%s(%d): regs_map_setup(I/O)"
843				    " failed", QL_NAME, instance);
844				goto attach_failed;
845			}
846			progress |= QL_IOMAP_IOBASE_MAPPED;
847		}
848
849		/*
850		 * We should map config space before adding interrupt
851		 * So that the chip type (2200 or 2300) can be determined
852		 * before the interrupt routine gets a chance to execute.
853		 */
854		if (CFG_IST(ha, CFG_SBUS_CARD)) {
855			if (ddi_regs_map_setup(dip, 0,
856			    (caddr_t *)&ha->sbus_config_base, 0, 0x100,
857			    &ql_dev_acc_attr, &ha->sbus_config_handle) !=
858			    DDI_SUCCESS) {
859				cmn_err(CE_WARN, "%s(%d): Unable to map sbus "
860				    "config registers", QL_NAME, instance);
861				goto attach_failed;
862			}
863		} else {
864			if (pci_config_setup(ha->dip, &ha->pci_handle) !=
865			    DDI_SUCCESS) {
866				cmn_err(CE_WARN, "%s(%d): can't setup PCI "
867				    "config space", QL_NAME, instance);
868				goto attach_failed;
869			}
870		}
871		progress |= QL_CONFIG_SPACE_SETUP;
872
873		ha->subsys_id = (uint16_t)ql_pci_config_get16(ha,
874		    PCI_CONF_SUBSYSID);
875		ha->subven_id = (uint16_t)ql_pci_config_get16(ha,
876		    PCI_CONF_SUBVENID);
877		ha->ven_id = (uint16_t)ql_pci_config_get16(ha,
878		    PCI_CONF_VENID);
879		ha->device_id = (uint16_t)ql_pci_config_get16(ha,
880		    PCI_CONF_DEVID);
881		ha->rev_id = (uint8_t)ql_pci_config_get8(ha,
882		    PCI_CONF_REVID);
883
884		EL(ha, "ISP%x chip detected (RevID=%x, VenID=%x, SVenID=%x, "
885		    "SSysID=%x)\n", ha->device_id, ha->rev_id, ha->ven_id,
886		    ha->subven_id, ha->subsys_id);
887
888		switch (ha->device_id) {
889		case 0x2300:
890		case 0x2312:
891#if !defined(__sparc) || defined(QL_DEBUG_ROUTINES)
892		/*
893		 * per marketing, fibre-lite HBA's are not supported
894		 * on sparc platforms
895		 */
896		case 0x6312:
897		case 0x6322:
898#endif	/* !defined(__sparc) || defined(QL_DEBUG_ROUTINES) */
899			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
900				ha->flags |= FUNCTION_1;
901			}
902			if (ha->device_id == 0x6322) {
903				ha->cfg_flags |= CFG_CTRL_6322;
904				ha->fw_class = 0x6322;
905				ha->risc_dump_size = QL_6322_FW_DUMP_SIZE;
906			} else {
907				ha->cfg_flags |= CFG_CTRL_2300;
908				ha->fw_class = 0x2300;
909				ha->risc_dump_size = QL_2300_FW_DUMP_SIZE;
910			}
911			ha->reg_off = &reg_off_2300;
912			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
913				goto attach_failed;
914			}
915			ha->fcp_cmd = ql_command_iocb;
916			ha->ip_cmd = ql_ip_iocb;
917			ha->ms_cmd = ql_ms_iocb;
918			if (CFG_IST(ha, CFG_SBUS_CARD)) {
919				ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
920				ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
921			} else {
922				ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
923				ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
924			}
925			break;
926
927		case 0x2200:
928			ha->cfg_flags |= CFG_CTRL_2200;
929			ha->reg_off = &reg_off_2200;
930			ha->fw_class = 0x2200;
931			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
932				goto attach_failed;
933			}
934			ha->risc_dump_size = QL_2200_FW_DUMP_SIZE;
935			ha->fcp_cmd = ql_command_iocb;
936			ha->ip_cmd = ql_ip_iocb;
937			ha->ms_cmd = ql_ms_iocb;
938			if (CFG_IST(ha, CFG_SBUS_CARD)) {
939				ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
940				ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
941			} else {
942				ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
943				ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
944			}
945			break;
946
947		case 0x2422:
948		case 0x2432:
949		case 0x5422:
950		case 0x5432:
951		case 0x8432:
952#ifdef __sparc
953			/*
954			 * Per marketing, the QLA/QLE-2440's (which
955			 * also use the 2422 & 2432) are only for the
956			 * x86 platform (SMB market).
957			 */
958			if (ha->subsys_id == 0x145 || ha->subsys_id == 0x147 ||
959			    ha->subsys_id == 0x13e) {
960				cmn_err(CE_WARN,
961				    "%s(%d): Unsupported HBA ssid: %x",
962				    QL_NAME, instance, ha->subsys_id);
963				goto attach_failed;
964			}
965#endif	/* __sparc */
966			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
967				ha->flags |= FUNCTION_1;
968			}
969			ha->cfg_flags |= CFG_CTRL_2422;
970			if (ha->device_id == 0x8432) {
971				ha->cfg_flags |= CFG_CTRL_MENLO;
972			} else {
973				ha->flags |= VP_ENABLED;
974			}
975
976			ha->reg_off = &reg_off_2400_2500;
977			ha->fw_class = 0x2400;
978			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
979				goto attach_failed;
980			}
981			ha->risc_dump_size = QL_24XX_FW_DUMP_SIZE;
982			ha->fcp_cmd = ql_command_24xx_iocb;
983			ha->ip_cmd = ql_ip_24xx_iocb;
984			ha->ms_cmd = ql_ms_24xx_iocb;
985			ha->els_cmd = ql_els_24xx_iocb;
986			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
987			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
988			break;
989
990		case 0x2522:
991		case 0x2532:
992			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
993				ha->flags |= FUNCTION_1;
994			}
995			ha->cfg_flags |= CFG_CTRL_25XX;
996			ha->flags |= VP_ENABLED;
997			ha->fw_class = 0x2500;
998			ha->reg_off = &reg_off_2400_2500;
999			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1000				goto attach_failed;
1001			}
1002			ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1003			ha->fcp_cmd = ql_command_24xx_iocb;
1004			ha->ip_cmd = ql_ip_24xx_iocb;
1005			ha->ms_cmd = ql_ms_24xx_iocb;
1006			ha->els_cmd = ql_els_24xx_iocb;
1007			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1008			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1009			break;
1010
1011		case 0x8001:
1012			if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 4) {
1013				ha->flags |= FUNCTION_1;
1014			}
1015			ha->cfg_flags |= CFG_CTRL_81XX;
1016			ha->flags |= VP_ENABLED;
1017			ha->fw_class = 0x8100;
1018			ha->reg_off = &reg_off_2400_2500;
1019			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1020				goto attach_failed;
1021			}
1022			ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1023			ha->fcp_cmd = ql_command_24xx_iocb;
1024			ha->ip_cmd = ql_ip_24xx_iocb;
1025			ha->ms_cmd = ql_ms_24xx_iocb;
1026			ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1027			ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1028			break;
1029
1030		default:
1031			cmn_err(CE_WARN, "%s(%d): Unsupported device id: %x",
1032			    QL_NAME, instance, ha->device_id);
1033			goto attach_failed;
1034		}
1035
1036		/* Setup hba buffer. */
1037
1038		size = CFG_IST(ha, CFG_CTRL_242581) ?
1039		    (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE) :
1040		    (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE +
1041		    RCVBUF_QUEUE_SIZE);
1042
1043		if (ql_get_dma_mem(ha, &ha->hba_buf, size, LITTLE_ENDIAN_DMA,
1044		    QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1045			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
1046			    "alloc failed", QL_NAME, instance);
1047			goto attach_failed;
1048		}
1049		progress |= QL_HBA_BUFFER_SETUP;
1050
1051		/* Setup buffer pointers. */
1052		ha->request_dvma = ha->hba_buf.cookie.dmac_laddress +
1053		    REQUEST_Q_BUFFER_OFFSET;
1054		ha->request_ring_bp = (struct cmd_entry *)
1055		    ((caddr_t)ha->hba_buf.bp + REQUEST_Q_BUFFER_OFFSET);
1056
1057		ha->response_dvma = ha->hba_buf.cookie.dmac_laddress +
1058		    RESPONSE_Q_BUFFER_OFFSET;
1059		ha->response_ring_bp = (struct sts_entry *)
1060		    ((caddr_t)ha->hba_buf.bp + RESPONSE_Q_BUFFER_OFFSET);
1061
1062		ha->rcvbuf_dvma = ha->hba_buf.cookie.dmac_laddress +
1063		    RCVBUF_Q_BUFFER_OFFSET;
1064		ha->rcvbuf_ring_bp = (struct rcvbuf *)
1065		    ((caddr_t)ha->hba_buf.bp + RCVBUF_Q_BUFFER_OFFSET);
1066
1067		/* Allocate resource for QLogic IOCTL */
1068		(void) ql_alloc_xioctl_resource(ha);
1069
1070		/* Setup interrupts */
1071		if ((rval = ql_setup_interrupts(ha)) != DDI_SUCCESS) {
1072			cmn_err(CE_WARN, "%s(%d): Failed to add interrupt, "
1073			    "rval=%xh", QL_NAME, instance, rval);
1074			goto attach_failed;
1075		}
1076
1077		progress |= (QL_INTR_ADDED | QL_MUTEX_CV_INITED);
1078
1079		/*
1080		 * Allocate an N Port information structure
1081		 * for use when in P2P topology.
1082		 */
1083		ha->n_port = (ql_n_port_info_t *)
1084		    kmem_zalloc(sizeof (ql_n_port_info_t), KM_SLEEP);
1085		if (ha->n_port == NULL) {
1086			cmn_err(CE_WARN, "%s(%d): Failed to create N Port info",
1087			    QL_NAME, instance);
1088			goto attach_failed;
1089		}
1090
1091		progress |= QL_N_PORT_INFO_CREATED;
1092
1093		/*
1094		 * Determine support for Power Management
1095		 */
1096		caps_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
1097
1098		while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
1099			cap = (uint8_t)ql_pci_config_get8(ha, caps_ptr);
1100			if (cap == PCI_CAP_ID_PM) {
1101				ha->pm_capable = 1;
1102				break;
1103			}
1104			caps_ptr = (uint8_t)ql_pci_config_get8(ha, caps_ptr +
1105			    PCI_CAP_NEXT_PTR);
1106		}
1107
1108		if (ha->pm_capable) {
1109			/*
1110			 * Enable PM for 2200 based HBAs only.
1111			 */
1112			if (ha->device_id != 0x2200) {
1113				ha->pm_capable = 0;
1114			}
1115		}
1116
1117		if (ha->pm_capable) {
1118			ha->pm_capable = ql_enable_pm;
1119		}
1120
1121		if (ha->pm_capable) {
1122			/*
1123			 * Initialize power management bookkeeping;
1124			 * components are created idle.
1125			 */
1126			(void) sprintf(buf, "NAME=%s(%d)", QL_NAME, instance);
1127			pmcomps[0] = buf;
1128
1129			/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
1130			if (ddi_prop_update_string_array(DDI_DEV_T_NONE,
1131			    dip, "pm-components", pmcomps,
1132			    sizeof (pmcomps) / sizeof (pmcomps[0])) !=
1133			    DDI_PROP_SUCCESS) {
1134				cmn_err(CE_WARN, "%s(%d): failed to create"
1135				    " pm-components property", QL_NAME,
1136				    instance);
1137
1138				/* Initialize adapter. */
1139				ha->power_level = PM_LEVEL_D0;
1140				if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1141					cmn_err(CE_WARN, "%s(%d): failed to"
1142					    " initialize adapter", QL_NAME,
1143					    instance);
1144					goto attach_failed;
1145				}
1146			} else {
1147				ha->power_level = PM_LEVEL_D3;
1148				if (pm_raise_power(dip, QL_POWER_COMPONENT,
1149				    PM_LEVEL_D0) != DDI_SUCCESS) {
1150					cmn_err(CE_WARN, "%s(%d): failed to"
1151					    " raise power or initialize"
1152					    " adapter", QL_NAME, instance);
1153				}
1154				ASSERT(ha->power_level == PM_LEVEL_D0);
1155			}
1156		} else {
1157			/* Initialize adapter. */
1158			ha->power_level = PM_LEVEL_D0;
1159			if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1160				cmn_err(CE_WARN, "%s(%d): failed to initialize"
1161				    " adapter", QL_NAME, instance);
1162			}
1163		}
1164
1165		if (ha->fw_major_version == 0 && ha->fw_minor_version == 0 &&
1166		    ha->fw_subminor_version == 0) {
1167			cmn_err(CE_NOTE, "!%s(%d): Firmware not loaded",
1168			    QL_NAME, ha->instance);
1169		} else {
1170			cmn_err(CE_NOTE, "!%s(%d): Firmware version %d.%d.%d",
1171			    QL_NAME, ha->instance, ha->fw_major_version,
1172			    ha->fw_minor_version, ha->fw_subminor_version);
1173		}
1174
1175		ha->k_stats = kstat_create(QL_NAME, instance, "statistics",
1176		    "controller", KSTAT_TYPE_RAW,
1177		    (uint32_t)sizeof (ql_adapter_stat_t), KSTAT_FLAG_VIRTUAL);
1178		if (ha->k_stats == NULL) {
1179			cmn_err(CE_WARN, "%s(%d): Failed to create kstat",
1180			    QL_NAME, instance);
1181			goto attach_failed;
1182		}
1183		progress |= QL_KSTAT_CREATED;
1184
1185		ha->adapter_stats->version = 1;
1186		ha->k_stats->ks_data = (void *)ha->adapter_stats;
1187		ha->k_stats->ks_private = ha;
1188		ha->k_stats->ks_update = ql_kstat_update;
1189		ha->k_stats->ks_ndata = 1;
1190		ha->k_stats->ks_data_size = sizeof (ql_adapter_stat_t);
1191		kstat_install(ha->k_stats);
1192
1193		if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
1194		    instance, DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
1195			cmn_err(CE_WARN, "%s(%d): failed to create minor node",
1196			    QL_NAME, instance);
1197			goto attach_failed;
1198		}
1199		progress |= QL_MINOR_NODE_CREATED;
1200
1201		/* Allocate a transport structure for this instance */
1202		tran = kmem_zalloc(sizeof (fc_fca_tran_t), KM_SLEEP);
1203		ASSERT(tran != NULL);
1204
1205		progress |= QL_FCA_TRAN_ALLOCED;
1206
1207		/* fill in the structure */
1208		tran->fca_numports = 1;
1209		tran->fca_version = FCTL_FCA_MODREV_5;
1210		if (CFG_IST(ha, CFG_CTRL_2422)) {
1211			tran->fca_num_npivports = MAX_24_VIRTUAL_PORTS;
1212		} else if (CFG_IST(ha, CFG_CTRL_2581)) {
1213			tran->fca_num_npivports = MAX_25_VIRTUAL_PORTS;
1214		}
1215		bcopy(ha->loginparams.node_ww_name.raw_wwn,
1216		    tran->fca_perm_pwwn.raw_wwn, 8);
1217
1218		EL(ha, "FCA version %d\n", tran->fca_version);
1219
1220		/* Specify the amount of space needed in each packet */
1221		tran->fca_pkt_size = sizeof (ql_srb_t);
1222
1223		/* command limits are usually dictated by hardware */
1224		tran->fca_cmd_max = MAX_OUTSTANDING_COMMANDS;
1225
1226		/* dmaattr are static, set elsewhere. */
1227		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1228			tran->fca_dma_attr = &ql_64bit_io_dma_attr;
1229			tran->fca_dma_fcp_cmd_attr = &ql_64fcp_cmd_dma_attr;
1230			tran->fca_dma_fcp_rsp_attr = &ql_64fcp_rsp_dma_attr;
1231			tran->fca_dma_fcp_data_attr = &ql_64fcp_data_dma_attr;
1232			tran->fca_dma_fcsm_cmd_attr = &ql_64fcsm_cmd_dma_attr;
1233			tran->fca_dma_fcsm_rsp_attr = &ql_64fcsm_rsp_dma_attr;
1234			tran->fca_dma_fcip_cmd_attr = &ql_64fcip_cmd_dma_attr;
1235			tran->fca_dma_fcip_rsp_attr = &ql_64fcip_rsp_dma_attr;
1236		} else {
1237			tran->fca_dma_attr = &ql_32bit_io_dma_attr;
1238			tran->fca_dma_fcp_cmd_attr = &ql_32fcp_cmd_dma_attr;
1239			tran->fca_dma_fcp_rsp_attr = &ql_32fcp_rsp_dma_attr;
1240			tran->fca_dma_fcp_data_attr = &ql_32fcp_data_dma_attr;
1241			tran->fca_dma_fcsm_cmd_attr = &ql_32fcsm_cmd_dma_attr;
1242			tran->fca_dma_fcsm_rsp_attr = &ql_32fcsm_rsp_dma_attr;
1243			tran->fca_dma_fcip_cmd_attr = &ql_32fcip_cmd_dma_attr;
1244			tran->fca_dma_fcip_rsp_attr = &ql_32fcip_rsp_dma_attr;
1245		}
1246
1247		tran->fca_acc_attr = &ql_dev_acc_attr;
1248		tran->fca_iblock = &(ha->iblock_cookie);
1249
1250		/* the remaining values are simply function vectors */
1251		tran->fca_bind_port = ql_bind_port;
1252		tran->fca_unbind_port = ql_unbind_port;
1253		tran->fca_init_pkt = ql_init_pkt;
1254		tran->fca_un_init_pkt = ql_un_init_pkt;
1255		tran->fca_els_send = ql_els_send;
1256		tran->fca_get_cap = ql_get_cap;
1257		tran->fca_set_cap = ql_set_cap;
1258		tran->fca_getmap = ql_getmap;
1259		tran->fca_transport = ql_transport;
1260		tran->fca_ub_alloc = ql_ub_alloc;
1261		tran->fca_ub_free = ql_ub_free;
1262		tran->fca_ub_release = ql_ub_release;
1263		tran->fca_abort = ql_abort;
1264		tran->fca_reset = ql_reset;
1265		tran->fca_port_manage = ql_port_manage;
1266		tran->fca_get_device = ql_get_device;
1267
1268		/* give it to the FC transport */
1269		if (fc_fca_attach(dip, tran) != DDI_SUCCESS) {
1270			cmn_err(CE_WARN, "%s(%d): FCA attach failed", QL_NAME,
1271			    instance);
1272			goto attach_failed;
1273		}
1274		progress |= QL_FCA_ATTACH_DONE;
1275
1276		/* Stash the structure so it can be freed at detach */
1277		ha->tran = tran;
1278
1279		/* Acquire global state lock. */
1280		GLOBAL_STATE_LOCK();
1281
1282		/* Add adapter structure to link list. */
1283		ql_add_link_b(&ql_hba, &ha->hba);
1284
1285		/* Start one second driver timer. */
1286		if (ql_timer_timeout_id == NULL) {
1287			ql_timer_ticks = drv_usectohz(1000000);
1288			ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1289			    ql_timer_ticks);
1290		}
1291
1292		/* Release global state lock. */
1293		GLOBAL_STATE_UNLOCK();
1294
1295		/* Determine and populate HBA fru info */
1296		ql_setup_fruinfo(ha);
1297
1298		/* Setup task_daemon thread. */
1299		(void) thread_create(NULL, 0, (void (*)())ql_task_daemon, ha,
1300		    0, &p0, TS_RUN, minclsyspri);
1301
1302		progress |= QL_TASK_DAEMON_STARTED;
1303
1304		ddi_report_dev(dip);
1305
1306		/* Disable link reset in panic path */
1307		ha->lip_on_panic = 1;
1308
1309		rval = DDI_SUCCESS;
1310		break;
1311
1312attach_failed:
1313		if (progress & QL_FCA_ATTACH_DONE) {
1314			(void) fc_fca_detach(dip);
1315			progress &= ~QL_FCA_ATTACH_DONE;
1316		}
1317
1318		if (progress & QL_FCA_TRAN_ALLOCED) {
1319			kmem_free(tran, sizeof (fc_fca_tran_t));
1320			progress &= ~QL_FCA_TRAN_ALLOCED;
1321		}
1322
1323		if (progress & QL_MINOR_NODE_CREATED) {
1324			ddi_remove_minor_node(dip, "devctl");
1325			progress &= ~QL_MINOR_NODE_CREATED;
1326		}
1327
1328		if (progress & QL_KSTAT_CREATED) {
1329			kstat_delete(ha->k_stats);
1330			progress &= ~QL_KSTAT_CREATED;
1331		}
1332
1333		if (progress & QL_N_PORT_INFO_CREATED) {
1334			kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1335			progress &= ~QL_N_PORT_INFO_CREATED;
1336		}
1337
1338		if (progress & QL_TASK_DAEMON_STARTED) {
1339			TASK_DAEMON_LOCK(ha);
1340
1341			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1342
1343			cv_signal(&ha->cv_task_daemon);
1344
1345			/* Release task daemon lock. */
1346			TASK_DAEMON_UNLOCK(ha);
1347
1348			/* Wait for for task daemon to stop running. */
1349			while (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1350				ql_delay(ha, 10000);
1351			}
1352			progress &= ~QL_TASK_DAEMON_STARTED;
1353		}
1354
1355		if (progress & QL_IOMAP_IOBASE_MAPPED) {
1356			ddi_regs_map_free(&ha->iomap_dev_handle);
1357			progress &= ~QL_IOMAP_IOBASE_MAPPED;
1358		}
1359
1360		if (progress & QL_CONFIG_SPACE_SETUP) {
1361			if (CFG_IST(ha, CFG_SBUS_CARD)) {
1362				ddi_regs_map_free(&ha->sbus_config_handle);
1363			} else {
1364				pci_config_teardown(&ha->pci_handle);
1365			}
1366			progress &= ~QL_CONFIG_SPACE_SETUP;
1367		}
1368
1369		if (progress & QL_INTR_ADDED) {
1370			ql_disable_intr(ha);
1371			ql_release_intr(ha);
1372			progress &= ~QL_INTR_ADDED;
1373		}
1374
1375		if (progress & QL_MUTEX_CV_INITED) {
1376			ql_destroy_mutex(ha);
1377			progress &= ~QL_MUTEX_CV_INITED;
1378		}
1379
1380		if (progress & QL_HBA_BUFFER_SETUP) {
1381			ql_free_phys(ha, &ha->hba_buf);
1382			progress &= ~QL_HBA_BUFFER_SETUP;
1383		}
1384
1385		if (progress & QL_REGS_MAPPED) {
1386			ddi_regs_map_free(&ha->dev_handle);
1387			if (ha->sbus_fpga_iobase != NULL) {
1388				ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1389			}
1390			progress &= ~QL_REGS_MAPPED;
1391		}
1392
1393		if (progress & QL_SOFT_STATE_ALLOCED) {
1394
1395			ql_fcache_rel(ha->fcache);
1396
1397			ASSERT(ha->dev && ha->outstanding_cmds &&
1398			    ha->ub_array && ha->adapter_stats);
1399
1400			kmem_free(ha->adapter_stats,
1401			    sizeof (*ha->adapter_stats));
1402
1403			kmem_free(ha->ub_array, sizeof (*ha->ub_array) *
1404			    QL_UB_LIMIT);
1405
1406			kmem_free(ha->outstanding_cmds,
1407			    sizeof (*ha->outstanding_cmds) *
1408			    MAX_OUTSTANDING_COMMANDS);
1409
1410			if (ha->devpath != NULL) {
1411				kmem_free(ha->devpath,
1412				    strlen(ha->devpath) + 1);
1413			}
1414
1415			kmem_free(ha->dev, sizeof (*ha->dev) *
1416			    DEVICE_HEAD_LIST_SIZE);
1417
1418			if (ha->xioctl != NULL) {
1419				ql_free_xioctl_resource(ha);
1420			}
1421
1422			if (ha->fw_module != NULL) {
1423				(void) ddi_modclose(ha->fw_module);
1424			}
1425
1426			ddi_soft_state_free(ql_state, instance);
1427			progress &= ~QL_SOFT_STATE_ALLOCED;
1428		}
1429		ASSERT(progress == 0);
1430
1431		ddi_prop_remove_all(dip);
1432		rval = DDI_FAILURE;
1433		break;
1434
1435	case DDI_RESUME:
1436		rval = DDI_FAILURE;
1437
1438		ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1439		if (ha == NULL) {
1440			cmn_err(CE_WARN, "%s(%d): can't get soft state",
1441			    QL_NAME, instance);
1442			break;
1443		}
1444
1445		ha->power_level = PM_LEVEL_D3;
1446		if (ha->pm_capable) {
1447			/*
1448			 * Get ql_power to do power on initialization
1449			 */
1450			if (pm_raise_power(dip, QL_POWER_COMPONENT,
1451			    PM_LEVEL_D0) != DDI_SUCCESS) {
1452				cmn_err(CE_WARN, "%s(%d): can't raise adapter"
1453				    " power", QL_NAME, instance);
1454			}
1455		}
1456
1457		/*
1458		 * There is a bug in DR that prevents PM framework
1459		 * from calling ql_power.
1460		 */
1461		if (ha->power_level == PM_LEVEL_D3) {
1462			ha->power_level = PM_LEVEL_D0;
1463
1464			if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1465				cmn_err(CE_WARN, "%s(%d): can't initialize the"
1466				    " adapter", QL_NAME, instance);
1467			}
1468
1469			/* Wake up task_daemon. */
1470			ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG,
1471			    0);
1472		}
1473
1474		/* Acquire global state lock. */
1475		GLOBAL_STATE_LOCK();
1476
1477		/* Restart driver timer. */
1478		if (ql_timer_timeout_id == NULL) {
1479			ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1480			    ql_timer_ticks);
1481		}
1482
1483		/* Release global state lock. */
1484		GLOBAL_STATE_UNLOCK();
1485
1486		/* Wake up command start routine. */
1487		ADAPTER_STATE_LOCK(ha);
1488		ha->flags &= ~ADAPTER_SUSPENDED;
1489		ADAPTER_STATE_UNLOCK(ha);
1490
1491		/*
1492		 * Transport doesn't make FC discovery in polled
1493		 * mode; So we need the daemon thread's services
1494		 * right here.
1495		 */
1496		(void) callb_generic_cpr(&ha->cprinfo, CB_CODE_CPR_RESUME);
1497
1498		rval = DDI_SUCCESS;
1499
1500		/* Restart IP if it was running. */
1501		if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1502			(void) ql_initialize_ip(ha);
1503			ql_isp_rcvbuf(ha);
1504		}
1505		break;
1506
1507	default:
1508		cmn_err(CE_WARN, "%s(%d): attach, unknown code:"
1509		    " %x", QL_NAME, ddi_get_instance(dip), cmd);
1510		rval = DDI_FAILURE;
1511		break;
1512	}
1513
1514	kmem_free(buf, MAXPATHLEN);
1515
1516	if (rval != DDI_SUCCESS) {
1517		/*EMPTY*/
1518		QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1519		    ddi_get_instance(dip), rval);
1520	} else {
1521		/*EMPTY*/
1522		QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1523	}
1524
1525	return (rval);
1526}
1527
1528/*
1529 * ql_detach
1530 *	Used to remove all the states associated with a given
1531 *	instances of a device node prior to the removal of that
1532 *	instance from the system.
1533 *
1534 * Input:
1535 *	dip = pointer to device information structure.
1536 *	cmd = type of detach.
1537 *
1538 * Returns:
1539 *	DDI_SUCCESS or DDI_FAILURE.
1540 *
1541 * Context:
1542 *	Kernel context.
1543 */
1544static int
1545ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1546{
1547	ql_adapter_state_t	*ha, *vha;
1548	ql_tgt_t		*tq;
1549	int			try;
1550	uint16_t		index;
1551	ql_link_t		*link;
1552	char			*buf;
1553	timeout_id_t		timer_id = NULL;
1554	int			rval = DDI_SUCCESS;
1555
1556	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1557	if (ha == NULL) {
1558		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
1559		    ddi_get_instance(dip));
1560		return (DDI_FAILURE);
1561	}
1562
1563	QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n", ha->instance, cmd);
1564
1565	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1566
1567	switch (cmd) {
1568	case DDI_DETACH:
1569		ADAPTER_STATE_LOCK(ha);
1570		ha->flags |= (ADAPTER_SUSPENDED | ABORT_CMDS_LOOP_DOWN_TMO);
1571		ADAPTER_STATE_UNLOCK(ha);
1572
1573		/* Acquire task daemon lock. */
1574		TASK_DAEMON_LOCK(ha);
1575
1576		ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1577		cv_signal(&ha->cv_task_daemon);
1578
1579		/* Release task daemon lock. */
1580		TASK_DAEMON_UNLOCK(ha);
1581
1582		/*
1583		 * Wait for task daemon to stop running.
1584		 * Internal command timeout is approximately
1585		 * 30 seconds, so it would help in some corner
1586		 * cases to wait that long
1587		 */
1588		try = 0;
1589		while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) &&
1590		    try < 3000) {
1591			ql_delay(ha, 10000);
1592			try++;
1593		}
1594
1595		TASK_DAEMON_LOCK(ha);
1596		if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1597			ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
1598			TASK_DAEMON_UNLOCK(ha);
1599			EL(ha, "failed, could not stop task daemon\n");
1600			return (DDI_FAILURE);
1601		}
1602		TASK_DAEMON_UNLOCK(ha);
1603
1604		/* Acquire global state lock. */
1605		GLOBAL_STATE_LOCK();
1606
1607		/* Disable driver timer if no adapters. */
1608		if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1609		    ql_hba.last == &ha->hba) {
1610			timer_id = ql_timer_timeout_id;
1611			ql_timer_timeout_id = NULL;
1612		}
1613		ql_remove_link(&ql_hba, &ha->hba);
1614
1615		GLOBAL_STATE_UNLOCK();
1616
1617		if (timer_id) {
1618			(void) untimeout(timer_id);
1619		}
1620
1621		if (ha->pm_capable) {
1622			if (pm_lower_power(dip, QL_POWER_COMPONENT,
1623			    PM_LEVEL_D3) != DDI_SUCCESS) {
1624				cmn_err(CE_WARN, "%s(%d): failed to lower the"
1625				    " power", QL_NAME, ha->instance);
1626			}
1627		}
1628
1629		/*
1630		 * If pm_lower_power shutdown the adapter, there
1631		 * isn't much else to do
1632		 */
1633		if (ha->power_level != PM_LEVEL_D3) {
1634			ql_halt(ha, PM_LEVEL_D3);
1635		}
1636
1637		/* Remove virtual ports. */
1638		while ((vha = ha->vp_next) != NULL) {
1639			ql_vport_destroy(vha);
1640		}
1641
1642		/* Free target queues. */
1643		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1644			link = ha->dev[index].first;
1645			while (link != NULL) {
1646				tq = link->base_address;
1647				link = link->next;
1648				ql_dev_free(ha, tq);
1649			}
1650		}
1651
1652		/*
1653		 * Free unsolicited buffers.
1654		 * If we are here then there are no ULPs still
1655		 * alive that wish to talk to ql so free up
1656		 * any SRB_IP_UB_UNUSED buffers that are
1657		 * lingering around
1658		 */
1659		QL_UB_LOCK(ha);
1660		for (index = 0; index < QL_UB_LIMIT; index++) {
1661			fc_unsol_buf_t *ubp = ha->ub_array[index];
1662
1663			if (ubp != NULL) {
1664				ql_srb_t *sp = ubp->ub_fca_private;
1665
1666				sp->flags |= SRB_UB_FREE_REQUESTED;
1667
1668				while (!(sp->flags & SRB_UB_IN_FCA) ||
1669				    (sp->flags & (SRB_UB_CALLBACK |
1670				    SRB_UB_ACQUIRED))) {
1671					QL_UB_UNLOCK(ha);
1672					delay(drv_usectohz(100000));
1673					QL_UB_LOCK(ha);
1674				}
1675				ha->ub_array[index] = NULL;
1676
1677				QL_UB_UNLOCK(ha);
1678				ql_free_unsolicited_buffer(ha, ubp);
1679				QL_UB_LOCK(ha);
1680			}
1681		}
1682		QL_UB_UNLOCK(ha);
1683
1684		/* Free any saved RISC code. */
1685		if (ha->risc_code != NULL) {
1686			kmem_free(ha->risc_code, ha->risc_code_size);
1687			ha->risc_code = NULL;
1688			ha->risc_code_size = 0;
1689		}
1690
1691		if (ha->fw_module != NULL) {
1692			(void) ddi_modclose(ha->fw_module);
1693			ha->fw_module = NULL;
1694		}
1695
1696		/* Free resources. */
1697		ddi_prop_remove_all(dip);
1698		(void) fc_fca_detach(dip);
1699		kmem_free(ha->tran, sizeof (fc_fca_tran_t));
1700		ddi_remove_minor_node(dip, "devctl");
1701		if (ha->k_stats != NULL) {
1702			kstat_delete(ha->k_stats);
1703		}
1704
1705		if (CFG_IST(ha, CFG_SBUS_CARD)) {
1706			ddi_regs_map_free(&ha->sbus_config_handle);
1707		} else {
1708			ddi_regs_map_free(&ha->iomap_dev_handle);
1709			pci_config_teardown(&ha->pci_handle);
1710		}
1711
1712		ql_disable_intr(ha);
1713		ql_release_intr(ha);
1714
1715		ql_free_xioctl_resource(ha);
1716
1717		ql_destroy_mutex(ha);
1718
1719		ql_free_phys(ha, &ha->hba_buf);
1720		ql_free_phys(ha, &ha->fwexttracebuf);
1721		ql_free_phys(ha, &ha->fwfcetracebuf);
1722
1723		ddi_regs_map_free(&ha->dev_handle);
1724		if (ha->sbus_fpga_iobase != NULL) {
1725			ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1726		}
1727
1728		ql_fcache_rel(ha->fcache);
1729		if (ha->vcache != NULL) {
1730			kmem_free(ha->vcache, QL_24XX_VPD_SIZE);
1731		}
1732
1733		if (ha->pi_attrs != NULL) {
1734			kmem_free(ha->pi_attrs, sizeof (fca_port_attrs_t));
1735		}
1736
1737		ASSERT(ha->dev && ha->outstanding_cmds && ha->ub_array &&
1738		    ha->adapter_stats);
1739
1740		kmem_free(ha->adapter_stats, sizeof (*ha->adapter_stats));
1741
1742		kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
1743
1744		kmem_free(ha->outstanding_cmds,
1745		    sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS);
1746
1747		if (ha->n_port != NULL) {
1748			kmem_free(&ha->n_port, sizeof (ql_n_port_info_t));
1749		}
1750
1751		if (ha->devpath != NULL) {
1752			kmem_free(ha->devpath, strlen(ha->devpath) + 1);
1753		}
1754
1755		kmem_free(ha->dev, sizeof (*ha->dev) * DEVICE_HEAD_LIST_SIZE);
1756
1757		EL(ha, "detached\n");
1758
1759		ddi_soft_state_free(ql_state, (int)ha->instance);
1760
1761		break;
1762
1763	case DDI_SUSPEND:
1764		ADAPTER_STATE_LOCK(ha);
1765
1766		try = 0;
1767		ha->flags |= ADAPTER_SUSPENDED;
1768		while (ha->flags & ADAPTER_TIMER_BUSY && try++ < 10) {
1769			ADAPTER_STATE_UNLOCK(ha);
1770			delay(drv_usectohz(1000000));
1771			ADAPTER_STATE_LOCK(ha);
1772		}
1773		if (ha->busy || ha->flags & ADAPTER_TIMER_BUSY) {
1774			ha->flags &= ~ADAPTER_SUSPENDED;
1775			ADAPTER_STATE_UNLOCK(ha);
1776			rval = DDI_FAILURE;
1777			cmn_err(CE_WARN, "!%s(%d): Fail suspend"
1778			    " busy %xh flags %xh", QL_NAME, ha->instance,
1779			    ha->busy, ha->flags);
1780			break;
1781		}
1782
1783		ADAPTER_STATE_UNLOCK(ha);
1784
1785		if (ha->flags & IP_INITIALIZED) {
1786			(void) ql_shutdown_ip(ha);
1787		}
1788
1789		try = ql_suspend_adapter(ha);
1790		if (try != QL_SUCCESS) {
1791			ADAPTER_STATE_LOCK(ha);
1792			ha->flags &= ~ADAPTER_SUSPENDED;
1793			ADAPTER_STATE_UNLOCK(ha);
1794			cmn_err(CE_WARN, "%s(%d): Fail suspend rval %xh",
1795			    QL_NAME, ha->instance, try);
1796
1797			/* Restart IP if it was running. */
1798			if (ha->flags & IP_ENABLED &&
1799			    !(ha->flags & IP_INITIALIZED)) {
1800				(void) ql_initialize_ip(ha);
1801				ql_isp_rcvbuf(ha);
1802			}
1803			rval = DDI_FAILURE;
1804			break;
1805		}
1806
1807		/* Acquire global state lock. */
1808		GLOBAL_STATE_LOCK();
1809
1810		/* Disable driver timer if last adapter. */
1811		if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1812		    ql_hba.last == &ha->hba) {
1813			timer_id = ql_timer_timeout_id;
1814			ql_timer_timeout_id = NULL;
1815		}
1816		GLOBAL_STATE_UNLOCK();
1817
1818		if (timer_id) {
1819			(void) untimeout(timer_id);
1820		}
1821
1822		break;
1823
1824	default:
1825		rval = DDI_FAILURE;
1826		break;
1827	}
1828
1829	kmem_free(buf, MAXPATHLEN);
1830
1831	if (rval != DDI_SUCCESS) {
1832		if (ha != NULL) {
1833			EL(ha, "failed, rval = %xh\n", rval);
1834		} else {
1835			/*EMPTY*/
1836			QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1837			    ddi_get_instance(dip), rval);
1838		}
1839	} else {
1840		/*EMPTY*/
1841		QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1842	}
1843
1844	return (rval);
1845}
1846
1847/*
1848 * ql_power
1849 *	Power a device attached to the system.
1850 *
1851 * Input:
1852 *	dip = pointer to device information structure.
1853 *	component = device.
1854 *	level = power level.
1855 *
1856 * Returns:
1857 *	DDI_SUCCESS or DDI_FAILURE.
1858 *
1859 * Context:
1860 *	Kernel context.
1861 */
1862/* ARGSUSED */
1863static int
1864ql_power(dev_info_t *dip, int component, int level)
1865{
1866	int			rval = DDI_FAILURE;
1867	off_t			csr;
1868	uint8_t			saved_pm_val;
1869	ql_adapter_state_t	*ha;
1870	char			*buf;
1871	char			*path;
1872
1873	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1874	if (ha == NULL || ha->pm_capable == 0) {
1875		QL_PRINT_2(CE_CONT, "(%d): no hba or PM not supported\n",
1876		    ddi_get_instance(dip));
1877		return (rval);
1878	}
1879
1880	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
1881
1882	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1883	path = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1884
1885	if (component != QL_POWER_COMPONENT || (level != PM_LEVEL_D0 &&
1886	    level != PM_LEVEL_D3)) {
1887		EL(ha, "invalid, component=%xh or level=%xh\n",
1888		    component, level);
1889		return (rval);
1890	}
1891
1892	GLOBAL_HW_LOCK();
1893	csr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR) + PCI_PMCSR;
1894	GLOBAL_HW_UNLOCK();
1895
1896	ASSERT(csr == QL_PM_CS_REG);
1897
1898	(void) snprintf(buf, sizeof (buf),
1899	    "Qlogic %s(%d): %s\n\t", QL_NAME, ddi_get_instance(dip),
1900	    ddi_pathname(dip, path));
1901
1902	switch (level) {
1903	case PM_LEVEL_D0:	/* power up to D0 state - fully on */
1904
1905		QL_PM_LOCK(ha);
1906		if (ha->power_level == PM_LEVEL_D0) {
1907			QL_PM_UNLOCK(ha);
1908			rval = DDI_SUCCESS;
1909			break;
1910		}
1911
1912		/*
1913		 * Enable interrupts now
1914		 */
1915		saved_pm_val = ha->power_level;
1916		ha->power_level = PM_LEVEL_D0;
1917		QL_PM_UNLOCK(ha);
1918
1919		GLOBAL_HW_LOCK();
1920
1921		ql_pci_config_put16(ha, csr, PCI_PMCSR_D0);
1922
1923		/*
1924		 * Delay after reset, for chip to recover.
1925		 * Otherwise causes system PANIC
1926		 */
1927		drv_usecwait(200000);
1928
1929		GLOBAL_HW_UNLOCK();
1930
1931		if (ha->config_saved) {
1932			ha->config_saved = 0;
1933			if (QL_RESTORE_CONFIG_REGS(dip) != DDI_SUCCESS) {
1934				QL_PM_LOCK(ha);
1935				ha->power_level = saved_pm_val;
1936				QL_PM_UNLOCK(ha);
1937				cmn_err(CE_WARN, "%s failed to restore "
1938				    "config regs", buf);
1939				break;
1940			}
1941		}
1942
1943		if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1944			cmn_err(CE_WARN, "%s adapter initialization failed",
1945			    buf);
1946		}
1947
1948		/* Wake up task_daemon. */
1949		ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG |
1950		    TASK_DAEMON_SLEEPING_FLG, 0);
1951
1952		/* Restart IP if it was running. */
1953		if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1954			(void) ql_initialize_ip(ha);
1955			ql_isp_rcvbuf(ha);
1956		}
1957
1958		cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered ON\n",
1959		    ha->instance, QL_NAME);
1960
1961		rval = DDI_SUCCESS;
1962		break;
1963
1964	case PM_LEVEL_D3:	/* power down to D3 state - off */
1965
1966		QL_PM_LOCK(ha);
1967
1968		if (ha->busy || ((ha->task_daemon_flags &
1969		    TASK_DAEMON_SLEEPING_FLG) == 0)) {
1970			QL_PM_UNLOCK(ha);
1971			break;
1972		}
1973
1974		if (ha->power_level == PM_LEVEL_D3) {
1975			rval = DDI_SUCCESS;
1976			QL_PM_UNLOCK(ha);
1977			break;
1978		}
1979		QL_PM_UNLOCK(ha);
1980
1981		if (QL_SAVE_CONFIG_REGS(dip) != DDI_SUCCESS) {
1982			cmn_err(CE_WARN, "!Qlogic %s(%d): %s failed to save"
1983			    " config regs", QL_NAME, ha->instance, buf);
1984			break;
1985		}
1986		ha->config_saved = 1;
1987
1988		/*
1989		 * Don't enable interrupts. Running mailbox commands with
1990		 * interrupts enabled could cause hangs since pm_run_scan()
1991		 * runs out of a callout thread and on single cpu systems
1992		 * cv_timedwait(), called from ql_mailbox_command(), would
1993		 * not get to run.
1994		 */
1995		TASK_DAEMON_LOCK(ha);
1996		ha->task_daemon_flags |= TASK_DAEMON_POWERING_DOWN;
1997		TASK_DAEMON_UNLOCK(ha);
1998
1999		ql_halt(ha, PM_LEVEL_D3);
2000
2001		/*
2002		 * Setup ql_intr to ignore interrupts from here on.
2003		 */
2004		QL_PM_LOCK(ha);
2005		ha->power_level = PM_LEVEL_D3;
2006		QL_PM_UNLOCK(ha);
2007
2008		/*
2009		 * Wait for ISR to complete.
2010		 */
2011		INTR_LOCK(ha);
2012		ql_pci_config_put16(ha, csr, PCI_PMCSR_D3HOT);
2013		INTR_UNLOCK(ha);
2014
2015		cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered OFF\n",
2016		    ha->instance, QL_NAME);
2017
2018		rval = DDI_SUCCESS;
2019		break;
2020	}
2021
2022	kmem_free(buf, MAXPATHLEN);
2023	kmem_free(path, MAXPATHLEN);
2024
2025	QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
2026
2027	return (rval);
2028}
2029
2030/*
2031 * ql_quiesce
2032 *	quiesce a device attached to the system.
2033 *
2034 * Input:
2035 *	dip = pointer to device information structure.
2036 *
2037 * Returns:
2038 *	DDI_SUCCESS
2039 *
2040 * Context:
2041 *	Kernel context.
2042 */
2043static int
2044ql_quiesce(dev_info_t *dip)
2045{
2046	ql_adapter_state_t	*ha;
2047	uint32_t		timer;
2048	uint32_t		stat;
2049
2050	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2051	if (ha == NULL) {
2052		/* Oh well.... */
2053		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2054		    ddi_get_instance(dip));
2055		return (DDI_SUCCESS);
2056	}
2057
2058	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2059
2060	if (CFG_IST(ha, CFG_CTRL_242581)) {
2061		WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2062		WRT16_IO_REG(ha, mailbox[0], MBC_STOP_FIRMWARE);
2063		WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
2064		for (timer = 0; timer < 30000; timer++) {
2065			stat = RD32_IO_REG(ha, intr_info_lo);
2066			if (stat & BIT_15) {
2067				if ((stat & 0xff) < 0x12) {
2068					WRT32_IO_REG(ha, hccr,
2069					    HC24_CLR_RISC_INT);
2070					break;
2071				}
2072				WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2073			}
2074			drv_usecwait(100);
2075		}
2076		/* Reset the chip. */
2077		WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
2078		    MWB_4096_BYTES);
2079		drv_usecwait(100);
2080
2081	} else {
2082		/* Disable ISP interrupts. */
2083		WRT16_IO_REG(ha, ictrl, 0);
2084		/* Select RISC module registers. */
2085		WRT16_IO_REG(ha, ctrl_status, 0);
2086		/* Reset ISP semaphore. */
2087		WRT16_IO_REG(ha, semaphore, 0);
2088		/* Reset RISC module. */
2089		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
2090		/* Release RISC module. */
2091		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
2092	}
2093
2094	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2095
2096	return (DDI_SUCCESS);
2097}
2098
2099/* ************************************************************************ */
2100/*		Fibre Channel Adapter (FCA) Transport Functions.	    */
2101/* ************************************************************************ */
2102
2103/*
2104 * ql_bind_port
2105 *	Handling port binding. The FC Transport attempts to bind an FCA port
2106 *	when it is ready to start transactions on the port. The FC Transport
2107 *	will call the fca_bind_port() function specified in the fca_transport
2108 *	structure it receives. The FCA must fill in the port_info structure
2109 *	passed in the call and also stash the information for future calls.
2110 *
2111 * Input:
2112 *	dip = pointer to FCA information structure.
2113 *	port_info = pointer to port information structure.
2114 *	bind_info = pointer to bind information structure.
2115 *
2116 * Returns:
2117 *	NULL = failure
2118 *
2119 * Context:
2120 *	Kernel context.
2121 */
2122static opaque_t
2123ql_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
2124    fc_fca_bind_info_t *bind_info)
2125{
2126	ql_adapter_state_t	*ha, *vha;
2127	opaque_t		fca_handle = NULL;
2128	port_id_t		d_id;
2129	int			port_npiv = bind_info->port_npiv;
2130	uchar_t			*port_nwwn = bind_info->port_nwwn.raw_wwn;
2131	uchar_t			*port_pwwn = bind_info->port_pwwn.raw_wwn;
2132
2133	/* get state info based on the dip */
2134	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2135	if (ha == NULL) {
2136		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2137		    ddi_get_instance(dip));
2138		return (NULL);
2139	}
2140	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2141
2142	/* Verify port number is supported. */
2143	if (port_npiv != 0) {
2144		if (!(ha->flags & VP_ENABLED)) {
2145			QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_NOT_SUPPORTED\n",
2146			    ha->instance);
2147			port_info->pi_error = FC_NPIV_NOT_SUPPORTED;
2148			return (NULL);
2149		}
2150		if (!(ha->flags & POINT_TO_POINT)) {
2151			QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_WRONG_TOPOLOGY\n",
2152			    ha->instance);
2153			port_info->pi_error = FC_NPIV_WRONG_TOPOLOGY;
2154			return (NULL);
2155		}
2156		if (!(ha->flags & FDISC_ENABLED)) {
2157			QL_PRINT_2(CE_CONT, "(%d): switch does not support "
2158			    "FDISC\n", ha->instance);
2159			port_info->pi_error = FC_NPIV_FDISC_FAILED;
2160			return (NULL);
2161		}
2162		if (bind_info->port_num > (CFG_IST(ha, CFG_CTRL_2422) ?
2163		    MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) {
2164			QL_PRINT_2(CE_CONT, "(%d): port number=%d "
2165			    "FC_OUTOFBOUNDS\n", ha->instance);
2166			port_info->pi_error = FC_OUTOFBOUNDS;
2167			return (NULL);
2168		}
2169	} else if (bind_info->port_num != 0) {
2170		QL_PRINT_2(CE_CONT, "(%d): failed, port number=%d is not "
2171		    "supported\n", ha->instance, bind_info->port_num);
2172		port_info->pi_error = FC_OUTOFBOUNDS;
2173		return (NULL);
2174	}
2175
2176	/* Locate port context. */
2177	for (vha = ha; vha != NULL; vha = vha->vp_next) {
2178		if (vha->vp_index == bind_info->port_num) {
2179			break;
2180		}
2181	}
2182
2183	/* If virtual port does not exist. */
2184	if (vha == NULL) {
2185		vha = ql_vport_create(ha, (uint8_t)bind_info->port_num);
2186	}
2187
2188	/* make sure this port isn't already bound */
2189	if (vha->flags & FCA_BOUND) {
2190		port_info->pi_error = FC_ALREADY;
2191	} else {
2192		if (vha->vp_index != 0) {
2193			bcopy(port_nwwn,
2194			    vha->loginparams.node_ww_name.raw_wwn, 8);
2195			bcopy(port_pwwn,
2196			    vha->loginparams.nport_ww_name.raw_wwn, 8);
2197		}
2198		if (vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2199			if (ql_vport_enable(vha) != QL_SUCCESS) {
2200				QL_PRINT_2(CE_CONT, "(%d): failed to enable "
2201				    "virtual port=%d\n", ha->instance,
2202				    vha->vp_index);
2203				port_info->pi_error = FC_NPIV_FDISC_FAILED;
2204				return (NULL);
2205			}
2206			cmn_err(CE_CONT, "!Qlogic %s(%d) NPIV(%d) "
2207			    "WWPN=%02x%02x%02x%02x%02x%02x%02x%02x : "
2208			    "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2209			    QL_NAME, ha->instance, vha->vp_index,
2210			    port_pwwn[0], port_pwwn[1], port_pwwn[2],
2211			    port_pwwn[3], port_pwwn[4], port_pwwn[5],
2212			    port_pwwn[6], port_pwwn[7],
2213			    port_nwwn[0], port_nwwn[1], port_nwwn[2],
2214			    port_nwwn[3], port_nwwn[4], port_nwwn[5],
2215			    port_nwwn[6], port_nwwn[7]);
2216		}
2217
2218		/* stash the bind_info supplied by the FC Transport */
2219		vha->bind_info.port_handle = bind_info->port_handle;
2220		vha->bind_info.port_statec_cb =
2221		    bind_info->port_statec_cb;
2222		vha->bind_info.port_unsol_cb = bind_info->port_unsol_cb;
2223
2224		/* Set port's source ID. */
2225		port_info->pi_s_id.port_id = vha->d_id.b24;
2226
2227		/* copy out the default login parameters */
2228		bcopy((void *)&vha->loginparams,
2229		    (void *)&port_info->pi_login_params,
2230		    sizeof (la_els_logi_t));
2231
2232		/* Set port's hard address if enabled. */
2233		port_info->pi_hard_addr.hard_addr = 0;
2234		if (bind_info->port_num == 0) {
2235			d_id.b24 = ha->d_id.b24;
2236			if (CFG_IST(ha, CFG_CTRL_242581)) {
2237				if (ha->init_ctrl_blk.cb24.
2238				    firmware_options_1[0] & BIT_0) {
2239					d_id.b.al_pa = ql_index_to_alpa[ha->
2240					    init_ctrl_blk.cb24.
2241					    hard_address[0]];
2242					port_info->pi_hard_addr.hard_addr =
2243					    d_id.b24;
2244				}
2245			} else if (ha->init_ctrl_blk.cb.firmware_options[0] &
2246			    BIT_0) {
2247				d_id.b.al_pa = ql_index_to_alpa[ha->
2248				    init_ctrl_blk.cb.hard_address[0]];
2249				port_info->pi_hard_addr.hard_addr = d_id.b24;
2250			}
2251
2252			/* Set the node id data */
2253			if (ql_get_rnid_params(ha,
2254			    sizeof (port_info->pi_rnid_params.params),
2255			    (caddr_t)&port_info->pi_rnid_params.params) ==
2256			    QL_SUCCESS) {
2257				port_info->pi_rnid_params.status = FC_SUCCESS;
2258			} else {
2259				port_info->pi_rnid_params.status = FC_FAILURE;
2260			}
2261
2262			/* Populate T11 FC-HBA details */
2263			ql_populate_hba_fru_details(ha, port_info);
2264			ha->pi_attrs = kmem_zalloc(sizeof (fca_port_attrs_t),
2265			    KM_SLEEP);
2266			if (ha->pi_attrs != NULL) {
2267				bcopy(&port_info->pi_attrs, ha->pi_attrs,
2268				    sizeof (fca_port_attrs_t));
2269			}
2270		} else {
2271			port_info->pi_rnid_params.status = FC_FAILURE;
2272			if (ha->pi_attrs != NULL) {
2273				bcopy(ha->pi_attrs, &port_info->pi_attrs,
2274				    sizeof (fca_port_attrs_t));
2275			}
2276		}
2277
2278		/* Generate handle for this FCA. */
2279		fca_handle = (opaque_t)vha;
2280
2281		ADAPTER_STATE_LOCK(ha);
2282		vha->flags |= FCA_BOUND;
2283		ADAPTER_STATE_UNLOCK(ha);
2284		/* Set port's current state. */
2285		port_info->pi_port_state = vha->state;
2286	}
2287
2288	QL_PRINT_10(CE_CONT, "(%d,%d): done, pi_port_state=%xh, "
2289	    "pi_s_id.port_id=%xh\n", ha->instance, ha->vp_index,
2290	    port_info->pi_port_state, port_info->pi_s_id.port_id);
2291
2292	return (fca_handle);
2293}
2294
2295/*
2296 * ql_unbind_port
2297 *	To unbind a Fibre Channel Adapter from an FC Port driver.
2298 *
2299 * Input:
2300 *	fca_handle = handle setup by ql_bind_port().
2301 *
2302 * Context:
2303 *	Kernel context.
2304 */
2305static void
2306ql_unbind_port(opaque_t fca_handle)
2307{
2308	ql_adapter_state_t	*ha;
2309	ql_tgt_t		*tq;
2310	uint32_t		flgs;
2311
2312	ha = ql_fca_handle_to_state(fca_handle);
2313	if (ha == NULL) {
2314		/*EMPTY*/
2315		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2316		    (void *)fca_handle);
2317	} else {
2318		QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance,
2319		    ha->vp_index);
2320
2321		if (!(ha->flags & FCA_BOUND)) {
2322			/*EMPTY*/
2323			QL_PRINT_2(CE_CONT, "(%d): port=%d already unbound\n",
2324			    ha->instance, ha->vp_index);
2325		} else {
2326			if (ha->vp_index != 0 && ha->flags & VP_ENABLED) {
2327				if ((tq = ql_loop_id_to_queue(ha,
2328				    FL_PORT_24XX_HDL)) != NULL) {
2329					(void) ql_logout_fabric_port(ha, tq);
2330				}
2331				(void) ql_vport_control(ha, (uint8_t)
2332				    (CFG_IST(ha, CFG_CTRL_2425) ?
2333				    VPC_DISABLE_INIT : VPC_DISABLE_LOGOUT));
2334				flgs = FCA_BOUND | VP_ENABLED;
2335			} else {
2336				flgs = FCA_BOUND;
2337			}
2338			ADAPTER_STATE_LOCK(ha);
2339			ha->flags &= ~flgs;
2340			ADAPTER_STATE_UNLOCK(ha);
2341		}
2342
2343		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
2344		    ha->vp_index);
2345	}
2346}
2347
2348/*
2349 * ql_init_pkt
2350 *	Initialize FCA portion of packet.
2351 *
2352 * Input:
2353 *	fca_handle = handle setup by ql_bind_port().
2354 *	pkt = pointer to fc_packet.
2355 *
2356 * Returns:
2357 *	FC_SUCCESS - the packet has successfully been initialized.
2358 *	FC_UNBOUND - the fca_handle specified is not bound.
2359 *	FC_NOMEM - the FCA failed initialization due to an allocation error.
2360 *	FC_FAILURE - the FCA failed initialization for undisclosed reasons
2361 *
2362 * Context:
2363 *	Kernel context.
2364 */
2365/* ARGSUSED */
2366static int
2367ql_init_pkt(opaque_t fca_handle, fc_packet_t *pkt, int sleep)
2368{
2369	ql_adapter_state_t	*ha;
2370	ql_srb_t		*sp;
2371
2372	ha = ql_fca_handle_to_state(fca_handle);
2373	if (ha == NULL) {
2374		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2375		    (void *)fca_handle);
2376		return (FC_UNBOUND);
2377	}
2378	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2379
2380	ASSERT(ha->power_level == PM_LEVEL_D0);
2381
2382	sp = (ql_srb_t *)pkt->pkt_fca_private;
2383	sp->flags = 0;
2384
2385	/* init cmd links */
2386	sp->cmd.base_address = sp;
2387	sp->cmd.prev = NULL;
2388	sp->cmd.next = NULL;
2389	sp->cmd.head = NULL;
2390
2391	/* init watchdog links */
2392	sp->wdg.base_address = sp;
2393	sp->wdg.prev = NULL;
2394	sp->wdg.next = NULL;
2395	sp->wdg.head = NULL;
2396	sp->pkt = pkt;
2397	sp->ha = ha;
2398	sp->magic_number = QL_FCA_BRAND;
2399
2400	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2401
2402	return (FC_SUCCESS);
2403}
2404
2405/*
2406 * ql_un_init_pkt
2407 *	Release all local resources bound to packet.
2408 *
2409 * Input:
2410 *	fca_handle = handle setup by ql_bind_port().
2411 *	pkt = pointer to fc_packet.
2412 *
2413 * Returns:
2414 *	FC_SUCCESS - the packet has successfully been invalidated.
2415 *	FC_UNBOUND - the fca_handle specified is not bound.
2416 *	FC_BADPACKET - the packet has not been initialized or has
2417 *			already been freed by this FCA.
2418 *
2419 * Context:
2420 *	Kernel context.
2421 */
2422static int
2423ql_un_init_pkt(opaque_t fca_handle, fc_packet_t *pkt)
2424{
2425	ql_adapter_state_t *ha;
2426	int rval;
2427	ql_srb_t *sp;
2428
2429	ha = ql_fca_handle_to_state(fca_handle);
2430	if (ha == NULL) {
2431		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2432		    (void *)fca_handle);
2433		return (FC_UNBOUND);
2434	}
2435	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2436
2437	sp = (ql_srb_t *)pkt->pkt_fca_private;
2438	ASSERT(sp->magic_number == QL_FCA_BRAND);
2439
2440	if (sp->magic_number != QL_FCA_BRAND) {
2441		EL(ha, "failed, FC_BADPACKET\n");
2442		rval = FC_BADPACKET;
2443	} else {
2444		sp->magic_number = NULL;
2445
2446		ASSERT((sp->flags & (SRB_IN_DEVICE_QUEUE |
2447		    SRB_IN_TOKEN_ARRAY)) == 0);
2448
2449		rval = FC_SUCCESS;
2450	}
2451
2452	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2453
2454	return (rval);
2455}
2456
2457/*
2458 * ql_els_send
2459 *	Issue a extended link service request.
2460 *
2461 * Input:
2462 *	fca_handle = handle setup by ql_bind_port().
2463 *	pkt = pointer to fc_packet.
2464 *
2465 * Returns:
2466 *	FC_SUCCESS - the command was successful.
2467 *	FC_ELS_FREJECT - the command was rejected by a Fabric.
2468 *	FC_ELS_PREJECT - the command was rejected by an N-port.
2469 *	FC_TRANSPORT_ERROR - a transport error occurred.
2470 *	FC_UNBOUND - the fca_handle specified is not bound.
2471 *	FC_ELS_BAD - the FCA can not issue the requested ELS.
2472 *
2473 * Context:
2474 *	Kernel context.
2475 */
2476static int
2477ql_els_send(opaque_t fca_handle, fc_packet_t *pkt)
2478{
2479	ql_adapter_state_t	*ha;
2480	int			rval;
2481	clock_t			timer;
2482	ls_code_t		els;
2483	la_els_rjt_t		rjt;
2484	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
2485
2486	/* Verify proper command. */
2487	ha = ql_cmd_setup(fca_handle, pkt, &rval);
2488	if (ha == NULL) {
2489		QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2490		    rval, fca_handle);
2491		return (FC_INVALID_REQUEST);
2492	}
2493	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2494
2495	ASSERT(ha->power_level == PM_LEVEL_D0);
2496
2497	/* Wait for suspension to end. */
2498	TASK_DAEMON_LOCK(ha);
2499	while (ha->task_daemon_flags & QL_SUSPENDED) {
2500		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2501
2502		/* 30 seconds from now */
2503		timer = ddi_get_lbolt();
2504		timer += drv_usectohz(30000000);
2505
2506		if (cv_timedwait(&ha->pha->cv_dr_suspended,
2507		    &ha->pha->task_daemon_mutex, timer) == -1) {
2508			/*
2509			 * The timeout time 'timer' was
2510			 * reached without the condition
2511			 * being signaled.
2512			 */
2513			pkt->pkt_state = FC_PKT_TRAN_BSY;
2514			pkt->pkt_reason = FC_REASON_XCHG_BSY;
2515
2516			/* Release task daemon lock. */
2517			TASK_DAEMON_UNLOCK(ha);
2518
2519			EL(ha, "QL_SUSPENDED failed=%xh\n",
2520			    QL_FUNCTION_TIMEOUT);
2521			return (FC_TRAN_BUSY);
2522		}
2523	}
2524	/* Release task daemon lock. */
2525	TASK_DAEMON_UNLOCK(ha);
2526
2527	/* Setup response header. */
2528	bcopy((void *)&pkt->pkt_cmd_fhdr, (void *)&pkt->pkt_resp_fhdr,
2529	    sizeof (fc_frame_hdr_t));
2530
2531	if (pkt->pkt_rsplen) {
2532		bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
2533	}
2534
2535	pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2536	pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2537	pkt->pkt_resp_fhdr.r_ctl = R_CTL_EXTENDED_SVC |
2538	    R_CTL_SOLICITED_CONTROL;
2539	pkt->pkt_resp_fhdr.f_ctl = F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ |
2540	    F_CTL_END_SEQ;
2541
2542	sp->flags &= ~(SRB_UB_CALLBACK | SRB_UB_RSCN | SRB_UB_FCP |
2543	    SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT | SRB_FCP_RSP_PKT |
2544	    SRB_IP_PKT | SRB_COMMAND_TIMEOUT | SRB_UB_ACQUIRED | SRB_MS_PKT);
2545
2546	sp->flags |= SRB_ELS_PKT;
2547
2548	/* map the type of ELS to a function */
2549	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
2550	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
2551
2552#if 0
2553	QL_PRINT_3(CE_CONT, "(%d): command fhdr:\n", ha->instance);
2554	QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2555	    sizeof (fc_frame_hdr_t) / 4);
2556	QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2557	QL_DUMP_3((uint8_t *)&els, 32, sizeof (els) / 4);
2558#endif
2559
2560	sp->iocb = ha->els_cmd;
2561	sp->req_cnt = 1;
2562
2563	switch (els.ls_code) {
2564	case LA_ELS_RJT:
2565	case LA_ELS_ACC:
2566		EL(ha, "LA_ELS_RJT\n");
2567		pkt->pkt_state = FC_PKT_SUCCESS;
2568		rval = FC_SUCCESS;
2569		break;
2570	case LA_ELS_PLOGI:
2571	case LA_ELS_PDISC:
2572		rval = ql_els_plogi(ha, pkt);
2573		break;
2574	case LA_ELS_FLOGI:
2575	case LA_ELS_FDISC:
2576		rval = ql_els_flogi(ha, pkt);
2577		break;
2578	case LA_ELS_LOGO:
2579		rval = ql_els_logo(ha, pkt);
2580		break;
2581	case LA_ELS_PRLI:
2582		rval = ql_els_prli(ha, pkt);
2583		break;
2584	case LA_ELS_PRLO:
2585		rval = ql_els_prlo(ha, pkt);
2586		break;
2587	case LA_ELS_ADISC:
2588		rval = ql_els_adisc(ha, pkt);
2589		break;
2590	case LA_ELS_LINIT:
2591		rval = ql_els_linit(ha, pkt);
2592		break;
2593	case LA_ELS_LPC:
2594		rval = ql_els_lpc(ha, pkt);
2595		break;
2596	case LA_ELS_LSTS:
2597		rval = ql_els_lsts(ha, pkt);
2598		break;
2599	case LA_ELS_SCR:
2600		rval = ql_els_scr(ha, pkt);
2601		break;
2602	case LA_ELS_RSCN:
2603		rval = ql_els_rscn(ha, pkt);
2604		break;
2605	case LA_ELS_FARP_REQ:
2606		rval = ql_els_farp_req(ha, pkt);
2607		break;
2608	case LA_ELS_FARP_REPLY:
2609		rval = ql_els_farp_reply(ha, pkt);
2610		break;
2611	case LA_ELS_RLS:
2612		rval = ql_els_rls(ha, pkt);
2613		break;
2614	case LA_ELS_RNID:
2615		rval = ql_els_rnid(ha, pkt);
2616		break;
2617	default:
2618		EL(ha, "LA_ELS_RJT, FC_REASON_CMD_UNSUPPORTED=%xh\n",
2619		    els.ls_code);
2620		/* Build RJT. */
2621		bzero(&rjt, sizeof (rjt));
2622		rjt.ls_code.ls_code = LA_ELS_RJT;
2623		rjt.reason = FC_REASON_CMD_UNSUPPORTED;
2624
2625		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
2626		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
2627
2628		pkt->pkt_state = FC_PKT_LOCAL_RJT;
2629		pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2630		rval = FC_SUCCESS;
2631		break;
2632	}
2633
2634#if 0
2635	QL_PRINT_3(CE_CONT, "(%d): response fhdr:\n", ha->instance);
2636	QL_DUMP_3((uint8_t *)&pkt->pkt_resp_fhdr, 32,
2637	    sizeof (fc_frame_hdr_t) / 4);
2638#endif
2639	/*
2640	 * Return success if the srb was consumed by an iocb. The packet
2641	 * completion callback will be invoked by the response handler.
2642	 */
2643	if (rval == QL_CONSUMED) {
2644		rval = FC_SUCCESS;
2645	} else if (rval == FC_SUCCESS &&
2646	    !(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
2647		/* Do command callback only if no error */
2648		ql_awaken_task_daemon(ha, sp, 0, 0);
2649	}
2650
2651	if (rval != FC_SUCCESS) {
2652		EL(ha, "failed, rval = %xh\n", rval);
2653	} else {
2654		/*EMPTY*/
2655		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2656	}
2657	return (rval);
2658}
2659
2660/*
2661 * ql_get_cap
2662 *	Export FCA hardware and software capabilities.
2663 *
2664 * Input:
2665 *	fca_handle = handle setup by ql_bind_port().
2666 *	cap = pointer to the capabilities string.
2667 *	ptr = buffer pointer for return capability.
2668 *
2669 * Returns:
2670 *	FC_CAP_ERROR - no such capability
2671 *	FC_CAP_FOUND - the capability was returned and cannot be set
2672 *	FC_CAP_SETTABLE - the capability was returned and can be set
2673 *	FC_UNBOUND - the fca_handle specified is not bound.
2674 *
2675 * Context:
2676 *	Kernel context.
2677 */
2678static int
2679ql_get_cap(opaque_t fca_handle, char *cap, void *ptr)
2680{
2681	ql_adapter_state_t	*ha;
2682	int			rval;
2683	uint32_t		*rptr = (uint32_t *)ptr;
2684
2685	ha = ql_fca_handle_to_state(fca_handle);
2686	if (ha == NULL) {
2687		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2688		    (void *)fca_handle);
2689		return (FC_UNBOUND);
2690	}
2691	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2692
2693	if (strcmp(cap, FC_NODE_WWN) == 0) {
2694		bcopy((void *)&ha->loginparams.node_ww_name.raw_wwn[0],
2695		    ptr, 8);
2696		rval = FC_CAP_FOUND;
2697	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2698		bcopy((void *)&ha->loginparams, ptr,
2699		    sizeof (la_els_logi_t));
2700		rval = FC_CAP_FOUND;
2701	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2702		*rptr = (uint32_t)QL_UB_LIMIT;
2703		rval = FC_CAP_FOUND;
2704	} else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2705
2706		dev_info_t	*psydip = NULL;
2707#ifdef __sparc
2708		/*
2709		 * Disable streaming for certain 2 chip adapters
2710		 * below Psycho to handle Psycho byte hole issue.
2711		 */
2712		if ((CFG_IST(ha, CFG_MULTI_CHIP_ADAPTER)) &&
2713		    (!CFG_IST(ha, CFG_SBUS_CARD))) {
2714			for (psydip = ddi_get_parent(ha->dip); psydip;
2715			    psydip = ddi_get_parent(psydip)) {
2716				if (strcmp(ddi_driver_name(psydip),
2717				    "pcipsy") == 0) {
2718					break;
2719				}
2720			}
2721		}
2722#endif	/* __sparc */
2723
2724		if (psydip) {
2725			*rptr = (uint32_t)FC_NO_STREAMING;
2726			EL(ha, "No Streaming\n");
2727		} else {
2728			*rptr = (uint32_t)FC_ALLOW_STREAMING;
2729			EL(ha, "Allow Streaming\n");
2730		}
2731		rval = FC_CAP_FOUND;
2732	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2733		if (CFG_IST(ha, CFG_CTRL_242581)) {
2734			*rptr = (uint32_t)CHAR_TO_SHORT(
2735			    ha->init_ctrl_blk.cb24.max_frame_length[0],
2736			    ha->init_ctrl_blk.cb24.max_frame_length[1]);
2737		} else {
2738			*rptr = (uint32_t)CHAR_TO_SHORT(
2739			    ha->init_ctrl_blk.cb.max_frame_length[0],
2740			    ha->init_ctrl_blk.cb.max_frame_length[1]);
2741		}
2742		rval = FC_CAP_FOUND;
2743	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2744		*rptr = FC_RESET_RETURN_ALL;
2745		rval = FC_CAP_FOUND;
2746	} else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2747		*rptr = FC_NO_DVMA_SPACE;
2748		rval = FC_CAP_FOUND;
2749	} else {
2750		EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2751		rval = FC_CAP_ERROR;
2752	}
2753
2754	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2755
2756	return (rval);
2757}
2758
2759/*
2760 * ql_set_cap
2761 *	Allow the FC Transport to set FCA capabilities if possible.
2762 *
2763 * Input:
2764 *	fca_handle = handle setup by ql_bind_port().
2765 *	cap = pointer to the capabilities string.
2766 *	ptr = buffer pointer for capability.
2767 *
2768 * Returns:
2769 *	FC_CAP_ERROR - no such capability
2770 *	FC_CAP_FOUND - the capability cannot be set by the FC Transport.
2771 *	FC_CAP_SETTABLE - the capability was successfully set.
2772 *	FC_UNBOUND - the fca_handle specified is not bound.
2773 *
2774 * Context:
2775 *	Kernel context.
2776 */
2777/* ARGSUSED */
2778static int
2779ql_set_cap(opaque_t fca_handle, char *cap, void *ptr)
2780{
2781	ql_adapter_state_t	*ha;
2782	int			rval;
2783
2784	ha = ql_fca_handle_to_state(fca_handle);
2785	if (ha == NULL) {
2786		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2787		    (void *)fca_handle);
2788		return (FC_UNBOUND);
2789	}
2790	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2791
2792	if (strcmp(cap, FC_NODE_WWN) == 0) {
2793		rval = FC_CAP_FOUND;
2794	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2795		rval = FC_CAP_FOUND;
2796	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2797		rval = FC_CAP_FOUND;
2798	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2799		rval = FC_CAP_FOUND;
2800	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2801		rval = FC_CAP_FOUND;
2802	} else {
2803		EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2804		rval = FC_CAP_ERROR;
2805	}
2806
2807	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2808
2809	return (rval);
2810}
2811
2812/*
2813 * ql_getmap
2814 *	Request of Arbitrated Loop (AL-PA) map.
2815 *
2816 * Input:
2817 *	fca_handle = handle setup by ql_bind_port().
2818 *	mapbuf= buffer pointer for map.
2819 *
2820 * Returns:
2821 *	FC_OLDPORT - the specified port is not operating in loop mode.
2822 *	FC_OFFLINE - the specified port is not online.
2823 *	FC_NOMAP - there is no loop map available for this port.
2824 *	FC_UNBOUND - the fca_handle specified is not bound.
2825 *	FC_SUCCESS - a valid map has been placed in mapbuf.
2826 *
2827 * Context:
2828 *	Kernel context.
2829 */
2830static int
2831ql_getmap(opaque_t fca_handle, fc_lilpmap_t *mapbuf)
2832{
2833	ql_adapter_state_t	*ha;
2834	clock_t			timer;
2835	int			rval = FC_SUCCESS;
2836
2837	ha = ql_fca_handle_to_state(fca_handle);
2838	if (ha == NULL) {
2839		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2840		    (void *)fca_handle);
2841		return (FC_UNBOUND);
2842	}
2843	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2844
2845	ASSERT(ha->power_level == PM_LEVEL_D0);
2846
2847	mapbuf->lilp_magic = (uint16_t)MAGIC_LIRP;
2848	mapbuf->lilp_myalpa = ha->d_id.b.al_pa;
2849
2850	/* Wait for suspension to end. */
2851	TASK_DAEMON_LOCK(ha);
2852	while (ha->task_daemon_flags & QL_SUSPENDED) {
2853		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2854
2855		/* 30 seconds from now */
2856		timer = ddi_get_lbolt();
2857		timer += drv_usectohz(30000000);
2858
2859		if (cv_timedwait(&ha->pha->cv_dr_suspended,
2860		    &ha->pha->task_daemon_mutex, timer) == -1) {
2861			/*
2862			 * The timeout time 'timer' was
2863			 * reached without the condition
2864			 * being signaled.
2865			 */
2866
2867			/* Release task daemon lock. */
2868			TASK_DAEMON_UNLOCK(ha);
2869
2870			EL(ha, "QL_SUSPENDED failed, FC_TRAN_BUSY\n");
2871			return (FC_TRAN_BUSY);
2872		}
2873	}
2874	/* Release task daemon lock. */
2875	TASK_DAEMON_UNLOCK(ha);
2876
2877	if (ql_get_loop_position_map(ha, LOOP_POSITION_MAP_SIZE,
2878	    (caddr_t)&mapbuf->lilp_length) != QL_SUCCESS) {
2879		/*
2880		 * Now, since transport drivers cosider this as an
2881		 * offline condition, let's wait for few seconds
2882		 * for any loop transitions before we reset the.
2883		 * chip and restart all over again.
2884		 */
2885		ql_delay(ha, 2000000);
2886		EL(ha, "failed, FC_NOMAP\n");
2887		rval = FC_NOMAP;
2888	} else {
2889		/*EMPTY*/
2890		QL_PRINT_3(CE_CONT, "(%d): my_alpa %xh len %xh "
2891		    "data %xh %xh %xh %xh\n", ha->instance,
2892		    mapbuf->lilp_myalpa, mapbuf->lilp_length,
2893		    mapbuf->lilp_alpalist[0], mapbuf->lilp_alpalist[1],
2894		    mapbuf->lilp_alpalist[2], mapbuf->lilp_alpalist[3]);
2895	}
2896
2897	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2898#if 0
2899	QL_DUMP_3((uint8_t *)mapbuf, 8, sizeof (fc_lilpmap_t));
2900#endif
2901	return (rval);
2902}
2903
2904/*
2905 * ql_transport
2906 *	Issue an I/O request. Handles all regular requests.
2907 *
2908 * Input:
2909 *	fca_handle = handle setup by ql_bind_port().
2910 *	pkt = pointer to fc_packet.
2911 *
2912 * Returns:
2913 *	FC_SUCCESS - the packet was accepted for transport.
2914 *	FC_TRANSPORT_ERROR - a transport error occurred.
2915 *	FC_BADPACKET - the packet to be transported had not been
2916 *			initialized by this FCA.
2917 *	FC_UNBOUND - the fca_handle specified is not bound.
2918 *
2919 * Context:
2920 *	Kernel context.
2921 */
2922static int
2923ql_transport(opaque_t fca_handle, fc_packet_t *pkt)
2924{
2925	ql_adapter_state_t	*ha;
2926	int			rval = FC_TRANSPORT_ERROR;
2927	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
2928
2929	/* Verify proper command. */
2930	ha = ql_cmd_setup(fca_handle, pkt, &rval);
2931	if (ha == NULL) {
2932		QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2933		    rval, fca_handle);
2934		return (rval);
2935	}
2936	QL_PRINT_3(CE_CONT, "(%d): started command:\n", ha->instance);
2937#if 0
2938	QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2939	    sizeof (fc_frame_hdr_t) / 4);
2940	QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2941	QL_DUMP_3((uint8_t *)pkt->pkt_cmd, 8, pkt->pkt_cmdlen);
2942#endif
2943	if (ha->flags & ADAPTER_SUSPENDED) {
2944		ASSERT(pkt->pkt_tran_flags & FC_TRAN_DUMPING);
2945	}
2946
2947	ASSERT(ha->power_level == PM_LEVEL_D0);
2948
2949	/* Reset SRB flags. */
2950	sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED | SRB_RETRY |
2951	    SRB_POLL | SRB_WATCHDOG_ENABLED | SRB_ABORT | SRB_UB_CALLBACK |
2952	    SRB_UB_RSCN | SRB_UB_FCP | SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT |
2953	    SRB_FCP_RSP_PKT | SRB_IP_PKT | SRB_GENERIC_SERVICES_PKT |
2954	    SRB_COMMAND_TIMEOUT | SRB_ABORTING | SRB_IN_DEVICE_QUEUE |
2955	    SRB_IN_TOKEN_ARRAY | SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED |
2956	    SRB_MS_PKT | SRB_ELS_PKT);
2957
2958	pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2959	pkt->pkt_resp_fhdr.r_ctl = R_CTL_STATUS;
2960	pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2961	pkt->pkt_resp_fhdr.f_ctl = pkt->pkt_cmd_fhdr.f_ctl;
2962	pkt->pkt_resp_fhdr.type = pkt->pkt_cmd_fhdr.type;
2963
2964	switch (pkt->pkt_cmd_fhdr.r_ctl) {
2965	case R_CTL_COMMAND:
2966		if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
2967			sp->flags |= SRB_FCP_CMD_PKT;
2968			rval = ql_fcp_scsi_cmd(ha, pkt, sp);
2969		}
2970		break;
2971
2972	default:
2973		/* Setup response header and buffer. */
2974		if (pkt->pkt_rsplen) {
2975			bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
2976		}
2977
2978		switch (pkt->pkt_cmd_fhdr.r_ctl) {
2979		case R_CTL_UNSOL_DATA:
2980			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_IS8802_SNAP) {
2981				sp->flags |= SRB_IP_PKT;
2982				rval = ql_fcp_ip_cmd(ha, pkt, sp);
2983			}
2984			break;
2985
2986		case R_CTL_UNSOL_CONTROL:
2987			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_FC_SERVICES) {
2988				sp->flags |= SRB_GENERIC_SERVICES_PKT;
2989				rval = ql_fc_services(ha, pkt);
2990			}
2991			break;
2992
2993		case R_CTL_SOLICITED_DATA:
2994		case R_CTL_STATUS:
2995		default:
2996			pkt->pkt_state = FC_PKT_LOCAL_RJT;
2997			pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2998			rval = FC_TRANSPORT_ERROR;
2999			EL(ha, "unknown, r_ctl=%xh\n",
3000			    pkt->pkt_cmd_fhdr.r_ctl);
3001			break;
3002		}
3003	}
3004
3005	if (rval != FC_SUCCESS) {
3006		EL(ha, "failed, rval = %xh\n", rval);
3007	} else {
3008		/*EMPTY*/
3009		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3010	}
3011
3012	return (rval);
3013}
3014
3015/*
3016 * ql_ub_alloc
3017 *	Allocate buffers for unsolicited exchanges.
3018 *
3019 * Input:
3020 *	fca_handle = handle setup by ql_bind_port().
3021 *	tokens = token array for each buffer.
3022 *	size = size of each buffer.
3023 *	count = pointer to number of buffers.
3024 *	type = the FC-4 type the buffers are reserved for.
3025 *		1 = Extended Link Services, 5 = LLC/SNAP
3026 *
3027 * Returns:
3028 *	FC_FAILURE - buffers could not be allocated.
3029 *	FC_TOOMANY - the FCA could not allocate the requested
3030 *			number of buffers.
3031 *	FC_SUCCESS - unsolicited buffers were allocated.
3032 *	FC_UNBOUND - the fca_handle specified is not bound.
3033 *
3034 * Context:
3035 *	Kernel context.
3036 */
3037static int
3038ql_ub_alloc(opaque_t fca_handle, uint64_t tokens[], uint32_t size,
3039    uint32_t *count, uint32_t type)
3040{
3041	ql_adapter_state_t	*ha;
3042	caddr_t			bufp = NULL;
3043	fc_unsol_buf_t		*ubp;
3044	ql_srb_t		*sp;
3045	uint32_t		index;
3046	uint32_t		cnt;
3047	uint32_t		ub_array_index = 0;
3048	int			rval = FC_SUCCESS;
3049	int			ub_updated = FALSE;
3050
3051	/* Check handle. */
3052	ha = ql_fca_handle_to_state(fca_handle);
3053	if (ha == NULL) {
3054		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3055		    (void *)fca_handle);
3056		return (FC_UNBOUND);
3057	}
3058	QL_PRINT_3(CE_CONT, "(%d,%d): started, count = %xh\n",
3059	    ha->instance, ha->vp_index, *count);
3060
3061	QL_PM_LOCK(ha);
3062	if (ha->power_level != PM_LEVEL_D0) {
3063		QL_PM_UNLOCK(ha);
3064		QL_PRINT_3(CE_CONT, "(%d,%d): down done\n", ha->instance,
3065		    ha->vp_index);
3066		return (FC_FAILURE);
3067	}
3068	QL_PM_UNLOCK(ha);
3069
3070	/* Acquire adapter state lock. */
3071	ADAPTER_STATE_LOCK(ha);
3072
3073	/* Check the count. */
3074	if ((*count + ha->ub_allocated) > QL_UB_LIMIT) {
3075		*count = 0;
3076		EL(ha, "failed, FC_TOOMANY\n");
3077		rval = FC_TOOMANY;
3078	}
3079
3080	/*
3081	 * reset ub_array_index
3082	 */
3083	ub_array_index = 0;
3084
3085	/*
3086	 * Now proceed to allocate any buffers required
3087	 */
3088	for (index = 0; index < *count && rval == FC_SUCCESS; index++) {
3089		/* Allocate all memory needed. */
3090		ubp = (fc_unsol_buf_t *)kmem_zalloc(sizeof (fc_unsol_buf_t),
3091		    KM_SLEEP);
3092		if (ubp == NULL) {
3093			EL(ha, "failed, FC_FAILURE\n");
3094			rval = FC_FAILURE;
3095		} else {
3096			sp = kmem_zalloc(sizeof (ql_srb_t), KM_SLEEP);
3097			if (sp == NULL) {
3098				kmem_free(ubp, sizeof (fc_unsol_buf_t));
3099				rval = FC_FAILURE;
3100			} else {
3101				if (type == FC_TYPE_IS8802_SNAP) {
3102#ifdef	__sparc
3103					if (ql_get_dma_mem(ha,
3104					    &sp->ub_buffer, size,
3105					    BIG_ENDIAN_DMA,
3106					    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3107						rval = FC_FAILURE;
3108						kmem_free(ubp,
3109						    sizeof (fc_unsol_buf_t));
3110						kmem_free(sp,
3111						    sizeof (ql_srb_t));
3112					} else {
3113						bufp = sp->ub_buffer.bp;
3114						sp->ub_size = size;
3115					}
3116#else
3117					if (ql_get_dma_mem(ha,
3118					    &sp->ub_buffer, size,
3119					    LITTLE_ENDIAN_DMA,
3120					    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3121						rval = FC_FAILURE;
3122						kmem_free(ubp,
3123						    sizeof (fc_unsol_buf_t));
3124						kmem_free(sp,
3125						    sizeof (ql_srb_t));
3126					} else {
3127						bufp = sp->ub_buffer.bp;
3128						sp->ub_size = size;
3129					}
3130#endif
3131				} else {
3132					bufp = kmem_zalloc(size, KM_SLEEP);
3133					if (bufp == NULL) {
3134						rval = FC_FAILURE;
3135						kmem_free(ubp,
3136						    sizeof (fc_unsol_buf_t));
3137						kmem_free(sp,
3138						    sizeof (ql_srb_t));
3139					} else {
3140						sp->ub_size = size;
3141					}
3142				}
3143			}
3144		}
3145
3146		if (rval == FC_SUCCESS) {
3147			/* Find next available slot. */
3148			QL_UB_LOCK(ha);
3149			while (ha->ub_array[ub_array_index] != NULL) {
3150				ub_array_index++;
3151			}
3152
3153			ubp->ub_fca_private = (void *)sp;
3154
3155			/* init cmd links */
3156			sp->cmd.base_address = sp;
3157			sp->cmd.prev = NULL;
3158			sp->cmd.next = NULL;
3159			sp->cmd.head = NULL;
3160
3161			/* init wdg links */
3162			sp->wdg.base_address = sp;
3163			sp->wdg.prev = NULL;
3164			sp->wdg.next = NULL;
3165			sp->wdg.head = NULL;
3166			sp->ha = ha;
3167
3168			ubp->ub_buffer = bufp;
3169			ubp->ub_bufsize = size;
3170			ubp->ub_port_handle = fca_handle;
3171			ubp->ub_token = ub_array_index;
3172
3173			/* Save the token. */
3174			tokens[index] = ub_array_index;
3175
3176			/* Setup FCA private information. */
3177			sp->ub_type = type;
3178			sp->handle = ub_array_index;
3179			sp->flags |= SRB_UB_IN_FCA;
3180
3181			ha->ub_array[ub_array_index] = ubp;
3182			ha->ub_allocated++;
3183			ub_updated = TRUE;
3184			QL_UB_UNLOCK(ha);
3185		}
3186	}
3187
3188	/* Release adapter state lock. */
3189	ADAPTER_STATE_UNLOCK(ha);
3190
3191	/* IP buffer. */
3192	if (ub_updated) {
3193		if ((type == FC_TYPE_IS8802_SNAP) &&
3194		    (!(CFG_IST(ha, (CFG_CTRL_6322 | CFG_CTRL_2581))))) {
3195
3196			ADAPTER_STATE_LOCK(ha);
3197			ha->flags |= IP_ENABLED;
3198			ADAPTER_STATE_UNLOCK(ha);
3199
3200			if (!(ha->flags & IP_INITIALIZED)) {
3201				if (CFG_IST(ha, CFG_CTRL_2422)) {
3202					ha->ip_init_ctrl_blk.cb24.mtu_size[0] =
3203					    LSB(ql_ip_mtu);
3204					ha->ip_init_ctrl_blk.cb24.mtu_size[1] =
3205					    MSB(ql_ip_mtu);
3206					ha->ip_init_ctrl_blk.cb24.buf_size[0] =
3207					    LSB(size);
3208					ha->ip_init_ctrl_blk.cb24.buf_size[1] =
3209					    MSB(size);
3210
3211					cnt = CHAR_TO_SHORT(
3212					    ha->ip_init_ctrl_blk.cb24.cc[0],
3213					    ha->ip_init_ctrl_blk.cb24.cc[1]);
3214
3215					if (cnt < *count) {
3216						ha->ip_init_ctrl_blk.cb24.cc[0]
3217						    = LSB(*count);
3218						ha->ip_init_ctrl_blk.cb24.cc[1]
3219						    = MSB(*count);
3220					}
3221				} else {
3222					ha->ip_init_ctrl_blk.cb.mtu_size[0] =
3223					    LSB(ql_ip_mtu);
3224					ha->ip_init_ctrl_blk.cb.mtu_size[1] =
3225					    MSB(ql_ip_mtu);
3226					ha->ip_init_ctrl_blk.cb.buf_size[0] =
3227					    LSB(size);
3228					ha->ip_init_ctrl_blk.cb.buf_size[1] =
3229					    MSB(size);
3230
3231					cnt = CHAR_TO_SHORT(
3232					    ha->ip_init_ctrl_blk.cb.cc[0],
3233					    ha->ip_init_ctrl_blk.cb.cc[1]);
3234
3235					if (cnt < *count) {
3236						ha->ip_init_ctrl_blk.cb.cc[0] =
3237						    LSB(*count);
3238						ha->ip_init_ctrl_blk.cb.cc[1] =
3239						    MSB(*count);
3240					}
3241				}
3242
3243				(void) ql_initialize_ip(ha);
3244			}
3245			ql_isp_rcvbuf(ha);
3246		}
3247	}
3248
3249	if (rval != FC_SUCCESS) {
3250		EL(ha, "failed=%xh\n", rval);
3251	} else {
3252		/*EMPTY*/
3253		QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3254		    ha->vp_index);
3255	}
3256	return (rval);
3257}
3258
3259/*
3260 * ql_ub_free
3261 *	Free unsolicited buffers.
3262 *
3263 * Input:
3264 *	fca_handle = handle setup by ql_bind_port().
3265 *	count = number of buffers.
3266 *	tokens = token array for each buffer.
3267 *
3268 * Returns:
3269 *	FC_SUCCESS - the requested buffers have been freed.
3270 *	FC_UNBOUND - the fca_handle specified is not bound.
3271 *	FC_UB_BADTOKEN - an invalid token was encountered.
3272 *			 No buffers have been released.
3273 *
3274 * Context:
3275 *	Kernel context.
3276 */
3277static int
3278ql_ub_free(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3279{
3280	ql_adapter_state_t	*ha;
3281	ql_srb_t		*sp;
3282	uint32_t		index;
3283	uint64_t		ub_array_index;
3284	int			rval = FC_SUCCESS;
3285
3286	/* Check handle. */
3287	ha = ql_fca_handle_to_state(fca_handle);
3288	if (ha == NULL) {
3289		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3290		    (void *)fca_handle);
3291		return (FC_UNBOUND);
3292	}
3293	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3294
3295	/* Acquire adapter state lock. */
3296	ADAPTER_STATE_LOCK(ha);
3297
3298	/* Check all returned tokens. */
3299	for (index = 0; index < count; index++) {
3300		fc_unsol_buf_t	*ubp;
3301
3302		/* Check the token range. */
3303		if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3304			EL(ha, "failed, FC_UB_BADTOKEN\n");
3305			rval = FC_UB_BADTOKEN;
3306			break;
3307		}
3308
3309		/* Check the unsolicited buffer array. */
3310		QL_UB_LOCK(ha);
3311		ubp = ha->ub_array[ub_array_index];
3312
3313		if (ubp == NULL) {
3314			EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3315			rval = FC_UB_BADTOKEN;
3316			QL_UB_UNLOCK(ha);
3317			break;
3318		}
3319
3320		/* Check the state of the unsolicited buffer. */
3321		sp = ha->ub_array[ub_array_index]->ub_fca_private;
3322		sp->flags |= SRB_UB_FREE_REQUESTED;
3323
3324		while (!(sp->flags & SRB_UB_IN_FCA) ||
3325		    (sp->flags & (SRB_UB_CALLBACK | SRB_UB_ACQUIRED))) {
3326			QL_UB_UNLOCK(ha);
3327			ADAPTER_STATE_UNLOCK(ha);
3328			delay(drv_usectohz(100000));
3329			ADAPTER_STATE_LOCK(ha);
3330			QL_UB_LOCK(ha);
3331		}
3332		ha->ub_array[ub_array_index] = NULL;
3333		QL_UB_UNLOCK(ha);
3334		ql_free_unsolicited_buffer(ha, ubp);
3335	}
3336
3337	if (rval == FC_SUCCESS) {
3338		/*
3339		 * Signal any pending hardware reset when there are
3340		 * no more unsolicited buffers in use.
3341		 */
3342		if (ha->ub_allocated == 0) {
3343			cv_broadcast(&ha->pha->cv_ub);
3344		}
3345	}
3346
3347	/* Release adapter state lock. */
3348	ADAPTER_STATE_UNLOCK(ha);
3349
3350	if (rval != FC_SUCCESS) {
3351		EL(ha, "failed=%xh\n", rval);
3352	} else {
3353		/*EMPTY*/
3354		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3355	}
3356	return (rval);
3357}
3358
3359/*
3360 * ql_ub_release
3361 *	Release unsolicited buffers from FC Transport
3362 *	to FCA for future use.
3363 *
3364 * Input:
3365 *	fca_handle = handle setup by ql_bind_port().
3366 *	count = number of buffers.
3367 *	tokens = token array for each buffer.
3368 *
3369 * Returns:
3370 *	FC_SUCCESS - the requested buffers have been released.
3371 *	FC_UNBOUND - the fca_handle specified is not bound.
3372 *	FC_UB_BADTOKEN - an invalid token was encountered.
3373 *		No buffers have been released.
3374 *
3375 * Context:
3376 *	Kernel context.
3377 */
3378static int
3379ql_ub_release(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3380{
3381	ql_adapter_state_t	*ha;
3382	ql_srb_t		*sp;
3383	uint32_t		index;
3384	uint64_t		ub_array_index;
3385	int			rval = FC_SUCCESS;
3386	int			ub_ip_updated = FALSE;
3387
3388	/* Check handle. */
3389	ha = ql_fca_handle_to_state(fca_handle);
3390	if (ha == NULL) {
3391		QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3392		    (void *)fca_handle);
3393		return (FC_UNBOUND);
3394	}
3395	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3396
3397	/* Acquire adapter state lock. */
3398	ADAPTER_STATE_LOCK(ha);
3399	QL_UB_LOCK(ha);
3400
3401	/* Check all returned tokens. */
3402	for (index = 0; index < count; index++) {
3403		/* Check the token range. */
3404		if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3405			EL(ha, "failed, FC_UB_BADTOKEN\n");
3406			rval = FC_UB_BADTOKEN;
3407			break;
3408		}
3409
3410		/* Check the unsolicited buffer array. */
3411		if (ha->ub_array[ub_array_index] == NULL) {
3412			EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3413			rval = FC_UB_BADTOKEN;
3414			break;
3415		}
3416
3417		/* Check the state of the unsolicited buffer. */
3418		sp = ha->ub_array[ub_array_index]->ub_fca_private;
3419		if (sp->flags & SRB_UB_IN_FCA) {
3420			EL(ha, "failed, FC_UB_BADTOKEN-3\n");
3421			rval = FC_UB_BADTOKEN;
3422			break;
3423		}
3424	}
3425
3426	/* If all tokens checkout, release the buffers. */
3427	if (rval == FC_SUCCESS) {
3428		/* Check all returned tokens. */
3429		for (index = 0; index < count; index++) {
3430			fc_unsol_buf_t	*ubp;
3431
3432			ub_array_index = tokens[index];
3433			ubp = ha->ub_array[ub_array_index];
3434			sp = ubp->ub_fca_private;
3435
3436			ubp->ub_resp_flags = 0;
3437			sp->flags &= ~(SRB_UB_ACQUIRED | SRB_UB_CALLBACK);
3438			sp->flags |= SRB_UB_IN_FCA;
3439
3440			/* IP buffer. */
3441			if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
3442				ub_ip_updated = TRUE;
3443			}
3444		}
3445	}
3446
3447	QL_UB_UNLOCK(ha);
3448	/* Release adapter state lock. */
3449	ADAPTER_STATE_UNLOCK(ha);
3450
3451	/*
3452	 * XXX: We should call ql_isp_rcvbuf() to return a
3453	 * buffer to ISP only if the number of buffers fall below
3454	 * the low water mark.
3455	 */
3456	if (ub_ip_updated) {
3457		ql_isp_rcvbuf(ha);
3458	}
3459
3460	if (rval != FC_SUCCESS) {
3461		EL(ha, "failed, rval = %xh\n", rval);
3462	} else {
3463		/*EMPTY*/
3464		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3465	}
3466	return (rval);
3467}
3468
3469/*
3470 * ql_abort
3471 *	Abort a packet.
3472 *
3473 * Input:
3474 *	fca_handle = handle setup by ql_bind_port().
3475 *	pkt = pointer to fc_packet.
3476 *	flags = KM_SLEEP flag.
3477 *
3478 * Returns:
3479 *	FC_SUCCESS - the packet has successfully aborted.
3480 *	FC_ABORTED - the packet has successfully aborted.
3481 *	FC_ABORTING - the packet is being aborted.
3482 *	FC_ABORT_FAILED - the packet could not be aborted.
3483 *	FC_TRANSPORT_ERROR - a transport error occurred while attempting
3484 *		to abort the packet.
3485 *	FC_BADEXCHANGE - no packet found.
3486 *	FC_UNBOUND - the fca_handle specified is not bound.
3487 *
3488 * Context:
3489 *	Kernel context.
3490 */
3491static int
3492ql_abort(opaque_t fca_handle, fc_packet_t *pkt, int flags)
3493{
3494	port_id_t		d_id;
3495	ql_link_t		*link;
3496	ql_adapter_state_t	*ha, *pha;
3497	ql_srb_t		*sp;
3498	ql_tgt_t		*tq;
3499	ql_lun_t		*lq;
3500	int			rval = FC_ABORTED;
3501
3502	ha = ql_fca_handle_to_state(fca_handle);
3503	if (ha == NULL) {
3504		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3505		    (void *)fca_handle);
3506		return (FC_UNBOUND);
3507	}
3508
3509	pha = ha->pha;
3510
3511	QL_PRINT_3(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
3512
3513	ASSERT(pha->power_level == PM_LEVEL_D0);
3514
3515	/* Get target queue pointer. */
3516	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
3517	tq = ql_d_id_to_queue(ha, d_id);
3518
3519	if ((tq == NULL) || (pha->task_daemon_flags & LOOP_DOWN)) {
3520		if (tq == NULL) {
3521			EL(ha, "failed, FC_TRANSPORT_ERROR\n");
3522			rval = FC_TRANSPORT_ERROR;
3523		} else {
3524			EL(ha, "failed, FC_OFFLINE\n");
3525			rval = FC_OFFLINE;
3526		}
3527		return (rval);
3528	}
3529
3530	sp = (ql_srb_t *)pkt->pkt_fca_private;
3531	lq = sp->lun_queue;
3532
3533	/* Set poll flag if sleep wanted. */
3534	if (flags == KM_SLEEP) {
3535		sp->flags |= SRB_POLL;
3536	}
3537
3538	/* Acquire target queue lock. */
3539	DEVICE_QUEUE_LOCK(tq);
3540	REQUEST_RING_LOCK(ha);
3541
3542	/* If command not already started. */
3543	if (!(sp->flags & SRB_ISP_STARTED)) {
3544		/* Check pending queue for command. */
3545		sp = NULL;
3546		for (link = pha->pending_cmds.first; link != NULL;
3547		    link = link->next) {
3548			sp = link->base_address;
3549			if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3550				/* Remove srb from q. */
3551				ql_remove_link(&pha->pending_cmds, &sp->cmd);
3552				break;
3553			} else {
3554				sp = NULL;
3555			}
3556		}
3557		REQUEST_RING_UNLOCK(ha);
3558
3559		if (sp == NULL) {
3560			/* Check for cmd on device queue. */
3561			for (link = lq->cmd.first; link != NULL;
3562			    link = link->next) {
3563				sp = link->base_address;
3564				if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3565					/* Remove srb from q. */
3566					ql_remove_link(&lq->cmd, &sp->cmd);
3567					break;
3568				} else {
3569					sp = NULL;
3570				}
3571			}
3572		}
3573		/* Release device lock */
3574		DEVICE_QUEUE_UNLOCK(tq);
3575
3576		/* If command on target queue. */
3577		if (sp != NULL) {
3578			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
3579
3580			/* Set return status */
3581			pkt->pkt_reason = CS_ABORTED;
3582
3583			sp->cmd.next = NULL;
3584			ql_done(&sp->cmd);
3585			rval = FC_ABORTED;
3586		} else {
3587			EL(ha, "failed, FC_BADEXCHANGE\n");
3588			rval = FC_BADEXCHANGE;
3589		}
3590	} else if (sp->flags & SRB_ISP_COMPLETED) {
3591		/* Release device queue lock. */
3592		REQUEST_RING_UNLOCK(ha);
3593		DEVICE_QUEUE_UNLOCK(tq);
3594		EL(ha, "failed, already done, FC_FAILURE\n");
3595		rval = FC_FAILURE;
3596	} else if ((sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_SOLICITED_DATA) ||
3597	    (sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_STATUS)) {
3598		/*
3599		 * If here, target data/resp ctio is with Fw.
3600		 * Since firmware is supposed to terminate such I/Os
3601		 * with an error, we need not do any thing. If FW
3602		 * decides not to terminate those IOs and simply keep
3603		 * quite then we need to initiate cleanup here by
3604		 * calling ql_done.
3605		 */
3606		REQUEST_RING_UNLOCK(ha);
3607		DEVICE_QUEUE_UNLOCK(tq);
3608		rval = FC_ABORTED;
3609	} else {
3610		request_t	*ep = pha->request_ring_bp;
3611		uint16_t	cnt;
3612
3613		if (sp->handle != 0) {
3614			for (cnt = 0; cnt < REQUEST_ENTRY_CNT; cnt++) {
3615				if (sp->handle == ddi_get32(
3616				    pha->hba_buf.acc_handle, &ep->handle)) {
3617					ep->entry_type = INVALID_ENTRY_TYPE;
3618					break;
3619				}
3620				ep++;
3621			}
3622		}
3623
3624		/* Release device queue lock. */
3625		REQUEST_RING_UNLOCK(ha);
3626		DEVICE_QUEUE_UNLOCK(tq);
3627
3628		sp->flags |= SRB_ABORTING;
3629		(void) ql_abort_command(ha, sp);
3630		pkt->pkt_reason = CS_ABORTED;
3631		rval = FC_ABORTED;
3632	}
3633
3634	QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
3635
3636	return (rval);
3637}
3638
3639/*
3640 * ql_reset
3641 *	Reset link or hardware.
3642 *
3643 * Input:
3644 *	fca_handle = handle setup by ql_bind_port().
3645 *	cmd = reset type command.
3646 *
3647 * Returns:
3648 *	FC_SUCCESS - reset has successfully finished.
3649 *	FC_UNBOUND - the fca_handle specified is not bound.
3650 *	FC_FAILURE - reset failed.
3651 *
3652 * Context:
3653 *	Kernel context.
3654 */
3655static int
3656ql_reset(opaque_t fca_handle, uint32_t cmd)
3657{
3658	ql_adapter_state_t	*ha;
3659	int			rval = FC_SUCCESS, rval2;
3660
3661	ha = ql_fca_handle_to_state(fca_handle);
3662	if (ha == NULL) {
3663		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3664		    (void *)fca_handle);
3665		return (FC_UNBOUND);
3666	}
3667
3668	QL_PRINT_3(CE_CONT, "(%d,%d): started, cmd=%d\n", ha->instance,
3669	    ha->vp_index, cmd);
3670
3671	ASSERT(ha->power_level == PM_LEVEL_D0);
3672
3673	switch (cmd) {
3674	case FC_FCA_CORE:
3675		/* dump firmware core if specified. */
3676		if (ha->vp_index == 0) {
3677			if (ql_dump_firmware(ha) != QL_SUCCESS) {
3678				EL(ha, "failed, FC_FAILURE\n");
3679				rval = FC_FAILURE;
3680			}
3681		}
3682		break;
3683	case FC_FCA_LINK_RESET:
3684		if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
3685			if (ql_loop_reset(ha) != QL_SUCCESS) {
3686				EL(ha, "failed, FC_FAILURE-2\n");
3687				rval = FC_FAILURE;
3688			}
3689		}
3690		break;
3691	case FC_FCA_RESET_CORE:
3692	case FC_FCA_RESET:
3693		/* if dump firmware core if specified. */
3694		if (cmd == FC_FCA_RESET_CORE) {
3695			if (ha->vp_index != 0) {
3696				rval2 = ha->pha->task_daemon_flags & LOOP_DOWN
3697				    ? QL_SUCCESS : ql_loop_reset(ha);
3698			} else {
3699				rval2 = ql_dump_firmware(ha);
3700			}
3701			if (rval2 != QL_SUCCESS) {
3702				EL(ha, "failed, FC_FAILURE-3\n");
3703				rval = FC_FAILURE;
3704			}
3705		}
3706
3707		/* Free up all unsolicited buffers. */
3708		if (ha->ub_allocated != 0) {
3709			/* Inform to release buffers. */
3710			ha->state = FC_PORT_SPEED_MASK(ha->state);
3711			ha->state |= FC_STATE_RESET_REQUESTED;
3712			if (ha->flags & FCA_BOUND) {
3713				(ha->bind_info.port_statec_cb)
3714				    (ha->bind_info.port_handle,
3715				    ha->state);
3716			}
3717		}
3718
3719		ha->state = FC_PORT_SPEED_MASK(ha->state);
3720
3721		/* All buffers freed */
3722		if (ha->ub_allocated == 0) {
3723			/* Hardware reset. */
3724			if (cmd == FC_FCA_RESET) {
3725				if (ha->vp_index == 0) {
3726					(void) ql_abort_isp(ha);
3727				} else if (!(ha->pha->task_daemon_flags &
3728				    LOOP_DOWN)) {
3729					(void) ql_loop_reset(ha);
3730				}
3731			}
3732
3733			/* Inform that the hardware has been reset */
3734			ha->state |= FC_STATE_RESET;
3735		} else {
3736			/*
3737			 * the port driver expects an online if
3738			 * buffers are not freed.
3739			 */
3740			if (ha->topology & QL_LOOP_CONNECTION) {
3741				ha->state |= FC_STATE_LOOP;
3742			} else {
3743				ha->state |= FC_STATE_ONLINE;
3744			}
3745		}
3746
3747		TASK_DAEMON_LOCK(ha);
3748		ha->task_daemon_flags |= FC_STATE_CHANGE;
3749		TASK_DAEMON_UNLOCK(ha);
3750
3751		ql_awaken_task_daemon(ha, NULL, FC_STATE_CHANGE, 0);
3752
3753		break;
3754	default:
3755		EL(ha, "unknown cmd=%xh\n", cmd);
3756		break;
3757	}
3758
3759	if (rval != FC_SUCCESS) {
3760		EL(ha, "cmd=%xh, failed=%xh\n", cmd, rval);
3761	} else {
3762		/*EMPTY*/
3763		QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3764		    ha->vp_index);
3765	}
3766
3767	return (rval);
3768}
3769
3770/*
3771 * ql_port_manage
3772 *	Perform port management or diagnostics.
3773 *
3774 * Input:
3775 *	fca_handle = handle setup by ql_bind_port().
3776 *	cmd = pointer to command structure.
3777 *
3778 * Returns:
3779 *	FC_SUCCESS - the request completed successfully.
3780 *	FC_FAILURE - the request did not complete successfully.
3781 *	FC_UNBOUND - the fca_handle specified is not bound.
3782 *
3783 * Context:
3784 *	Kernel context.
3785 */
3786static int
3787ql_port_manage(opaque_t fca_handle, fc_fca_pm_t *cmd)
3788{
3789	clock_t			timer;
3790	uint16_t		index;
3791	uint32_t		*bp;
3792	port_id_t		d_id;
3793	ql_link_t		*link;
3794	ql_adapter_state_t	*ha, *pha;
3795	ql_tgt_t		*tq;
3796	dma_mem_t		buffer_xmt, buffer_rcv;
3797	size_t			length;
3798	uint32_t		cnt;
3799	char			buf[80];
3800	lbp_t			*lb;
3801	ql_mbx_data_t		mr;
3802	app_mbx_cmd_t		*mcp;
3803	int			i0;
3804	uint8_t			*bptr;
3805	int			rval2, rval = FC_SUCCESS;
3806	uint32_t		opcode;
3807
3808	ha = ql_fca_handle_to_state(fca_handle);
3809	if (ha == NULL) {
3810		QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3811		    (void *)fca_handle);
3812		return (FC_UNBOUND);
3813	}
3814	pha = ha->pha;
3815
3816	QL_PRINT_3(CE_CONT, "(%d): started=%xh\n", ha->instance,
3817	    cmd->pm_cmd_code);
3818
3819	ASSERT(pha->power_level == PM_LEVEL_D0);
3820
3821	ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
3822
3823	/*
3824	 * Wait for all outstanding commands to complete
3825	 */
3826	index = (uint16_t)ql_wait_outstanding(ha);
3827
3828	if (index != MAX_OUTSTANDING_COMMANDS) {
3829		ql_awaken_task_daemon(ha, NULL, 0, DRIVER_STALL);
3830		ql_restart_queues(ha);
3831		EL(ha, "failed, FC_TRAN_BUSY\n");
3832		return (FC_TRAN_BUSY);
3833	}
3834
3835	switch (cmd->pm_cmd_code) {
3836	case FC_PORT_BYPASS:
3837		d_id.b24 = *cmd->pm_cmd_buf;
3838		tq = ql_d_id_to_queue(ha, d_id);
3839		if (tq == NULL || ql_loop_port_bypass(ha, tq) != QL_SUCCESS) {
3840			EL(ha, "failed, FC_PORT_BYPASS FC_FAILURE\n");
3841			rval = FC_FAILURE;
3842		}
3843		break;
3844	case FC_PORT_UNBYPASS:
3845		d_id.b24 = *cmd->pm_cmd_buf;
3846		tq = ql_d_id_to_queue(ha, d_id);
3847		if (tq == NULL || ql_loop_port_enable(ha, tq) != QL_SUCCESS) {
3848			EL(ha, "failed, FC_PORT_UNBYPASS FC_FAILURE\n");
3849			rval = FC_FAILURE;
3850		}
3851		break;
3852	case FC_PORT_GET_FW_REV:
3853		(void) sprintf(buf, "%d.%d.%d", pha->fw_major_version,
3854		    pha->fw_minor_version, pha->fw_subminor_version);
3855		length = strlen(buf) + 1;
3856		if (cmd->pm_data_len < length) {
3857			cmd->pm_data_len = length;
3858			EL(ha, "failed, FC_PORT_GET_FW_REV FC_FAILURE\n");
3859			rval = FC_FAILURE;
3860		} else {
3861			(void) strcpy(cmd->pm_data_buf, buf);
3862		}
3863		break;
3864
3865	case FC_PORT_GET_FCODE_REV: {
3866		caddr_t		fcode_ver_buf = NULL;
3867
3868		i0 = 0;
3869		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
3870		rval2 = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
3871		    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version",
3872		    (caddr_t)&fcode_ver_buf, &i0);
3873		length = (uint_t)i0;
3874
3875		if (rval2 != DDI_PROP_SUCCESS) {
3876			EL(ha, "failed, getting version = %xh\n", rval2);
3877			length = 20;
3878			fcode_ver_buf = kmem_alloc(length, KM_SLEEP);
3879			if (fcode_ver_buf != NULL) {
3880				(void) sprintf(fcode_ver_buf,
3881				    "NO FCODE FOUND");
3882			}
3883		}
3884
3885		if (cmd->pm_data_len < length) {
3886			EL(ha, "length error, FC_PORT_GET_FCODE_REV "
3887			    "dst=%ld, src=%ld\n", cmd->pm_data_len, length);
3888			cmd->pm_data_len = length;
3889			rval = FC_FAILURE;
3890		} else if (fcode_ver_buf != NULL) {
3891			bcopy((void *)fcode_ver_buf, (void *)cmd->pm_data_buf,
3892			    length);
3893		}
3894
3895		if (fcode_ver_buf != NULL) {
3896			kmem_free(fcode_ver_buf, length);
3897		}
3898		break;
3899	}
3900
3901	case FC_PORT_GET_DUMP:
3902		QL_DUMP_LOCK(pha);
3903		if (cmd->pm_data_len < (size_t)pha->risc_dump_size) {
3904			EL(ha, "failed, FC_PORT_GET_DUMP incorrect "
3905			    "length=%lxh\n", cmd->pm_data_len);
3906			cmd->pm_data_len = pha->risc_dump_size;
3907			rval = FC_FAILURE;
3908		} else if (pha->ql_dump_state & QL_DUMPING) {
3909			EL(ha, "failed, FC_PORT_GET_DUMP FC_TRAN_BUSY\n");
3910			rval = FC_TRAN_BUSY;
3911		} else if (pha->ql_dump_state & QL_DUMP_VALID) {
3912			(void) ql_ascii_fw_dump(ha, cmd->pm_data_buf);
3913			pha->ql_dump_state |= QL_DUMP_UPLOADED;
3914		} else {
3915			EL(ha, "failed, FC_PORT_GET_DUMP no dump file\n");
3916			rval = FC_FAILURE;
3917		}
3918		QL_DUMP_UNLOCK(pha);
3919		break;
3920	case FC_PORT_FORCE_DUMP:
3921		PORTMANAGE_LOCK(ha);
3922		if (ql_dump_firmware(ha) != QL_SUCCESS) {
3923			EL(ha, "failed, FC_PORT_FORCE_DUMP FC_FAILURE\n");
3924			rval = FC_FAILURE;
3925		}
3926		PORTMANAGE_UNLOCK(ha);
3927		break;
3928	case FC_PORT_DOWNLOAD_FW:
3929		PORTMANAGE_LOCK(ha);
3930		if (CFG_IST(ha, CFG_CTRL_242581)) {
3931			if (ql_24xx_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
3932			    (uint32_t)cmd->pm_data_len,
3933			    ha->flash_fw_addr << 2) != QL_SUCCESS) {
3934				EL(ha, "failed, FC_PORT_DOWNLOAD_FW\n");
3935				rval = FC_FAILURE;
3936			}
3937			ql_reset_chip(ha);
3938			(void) ql_abort_isp(ha);
3939		} else {
3940			/* Save copy of the firmware. */
3941			if (pha->risc_code != NULL) {
3942				kmem_free(pha->risc_code, pha->risc_code_size);
3943				pha->risc_code = NULL;
3944				pha->risc_code_size = 0;
3945			}
3946
3947			pha->risc_code = kmem_alloc(cmd->pm_data_len,
3948			    KM_SLEEP);
3949			if (pha->risc_code != NULL) {
3950				pha->risc_code_size =
3951				    (uint32_t)cmd->pm_data_len;
3952				bcopy(cmd->pm_data_buf, pha->risc_code,
3953				    cmd->pm_data_len);
3954
3955				/* Do abort to force reload. */
3956				ql_reset_chip(ha);
3957				if (ql_abort_isp(ha) != QL_SUCCESS) {
3958					kmem_free(pha->risc_code,
3959					    pha->risc_code_size);
3960					pha->risc_code = NULL;
3961					pha->risc_code_size = 0;
3962					ql_reset_chip(ha);
3963					(void) ql_abort_isp(ha);
3964					EL(ha, "failed, FC_PORT_DOWNLOAD_FW"
3965					    " FC_FAILURE\n");
3966					rval = FC_FAILURE;
3967				}
3968			}
3969		}
3970		PORTMANAGE_UNLOCK(ha);
3971		break;
3972	case FC_PORT_GET_DUMP_SIZE:
3973		bp = (uint32_t *)cmd->pm_data_buf;
3974		*bp = pha->risc_dump_size;
3975		break;
3976	case FC_PORT_DIAG:
3977		/*
3978		 * Prevents concurrent diags
3979		 */
3980		PORTMANAGE_LOCK(ha);
3981
3982		/* Wait for suspension to end. */
3983		for (timer = 0; timer < 3000 &&
3984		    pha->task_daemon_flags & QL_LOOP_TRANSITION; timer++) {
3985			ql_delay(ha, 10000);
3986		}
3987
3988		if (pha->task_daemon_flags & QL_LOOP_TRANSITION) {
3989			EL(ha, "failed, FC_TRAN_BUSY-2\n");
3990			rval = FC_TRAN_BUSY;
3991			PORTMANAGE_UNLOCK(ha);
3992			break;
3993		}
3994
3995		switch (cmd->pm_cmd_flags) {
3996		case QL_DIAG_EXEFMW:
3997			if (ql_start_firmware(ha) != QL_SUCCESS) {
3998				EL(ha, "failed, QL_DIAG_EXEFMW FC_FAILURE\n");
3999				rval = FC_FAILURE;
4000			}
4001			break;
4002		case QL_DIAG_CHKCMDQUE:
4003			for (i0 = 1, cnt = 0; i0 < MAX_OUTSTANDING_COMMANDS;
4004			    i0++) {
4005				cnt += (pha->outstanding_cmds[i0] != NULL);
4006			}
4007			if (cnt != 0) {
4008				EL(ha, "failed, QL_DIAG_CHKCMDQUE "
4009				    "FC_FAILURE\n");
4010				rval = FC_FAILURE;
4011			}
4012			break;
4013		case QL_DIAG_FMWCHKSUM:
4014			if (ql_verify_checksum(ha) != QL_SUCCESS) {
4015				EL(ha, "failed, QL_DIAG_FMWCHKSUM "
4016				    "FC_FAILURE\n");
4017				rval = FC_FAILURE;
4018			}
4019			break;
4020		case QL_DIAG_SLFTST:
4021			if (ql_online_selftest(ha) != QL_SUCCESS) {
4022				EL(ha, "failed, QL_DIAG_SLFTST FC_FAILURE\n");
4023				rval = FC_FAILURE;
4024			}
4025			ql_reset_chip(ha);
4026			(void) ql_abort_isp(ha);
4027			break;
4028		case QL_DIAG_REVLVL:
4029			if (cmd->pm_stat_len <
4030			    sizeof (ql_adapter_revlvl_t)) {
4031				EL(ha, "failed, QL_DIAG_REVLVL FC_NOMEM, "
4032				    "slen=%lxh, rlvllen=%lxh\n",
4033				    cmd->pm_stat_len,
4034				    sizeof (ql_adapter_revlvl_t));
4035				rval = FC_NOMEM;
4036			} else {
4037				bcopy((void *)&(pha->adapter_stats->revlvl),
4038				    cmd->pm_stat_buf,
4039				    (size_t)cmd->pm_stat_len);
4040				cmd->pm_stat_len =
4041				    sizeof (ql_adapter_revlvl_t);
4042			}
4043			break;
4044		case QL_DIAG_LPBMBX:
4045
4046			if (cmd->pm_data_len != sizeof (struct app_mbx_cmd)) {
4047				EL(ha, "failed, QL_DIAG_LPBMBX "
4048				    "FC_INVALID_REQUEST, pmlen=%lxh, "
4049				    "reqd=%lxh\n", cmd->pm_data_len,
4050				    sizeof (struct app_mbx_cmd));
4051				rval = FC_INVALID_REQUEST;
4052				break;
4053			}
4054			/*
4055			 * Don't do the wrap test on a 2200 when the
4056			 * firmware is running.
4057			 */
4058			if (!CFG_IST(ha, CFG_CTRL_2200)) {
4059				mcp = (app_mbx_cmd_t *)cmd->pm_data_buf;
4060				mr.mb[1] = mcp->mb[1];
4061				mr.mb[2] = mcp->mb[2];
4062				mr.mb[3] = mcp->mb[3];
4063				mr.mb[4] = mcp->mb[4];
4064				mr.mb[5] = mcp->mb[5];
4065				mr.mb[6] = mcp->mb[6];
4066				mr.mb[7] = mcp->mb[7];
4067
4068				bcopy(&mr.mb[0], &mr.mb[10],
4069				    sizeof (uint16_t) * 8);
4070				if (ql_mbx_wrap_test(ha, &mr) != QL_SUCCESS) {
4071					EL(ha, "failed, QL_DIAG_LPBMBX "
4072					    "FC_FAILURE\n");
4073					rval = FC_FAILURE;
4074					break;
4075				}
4076				if (mr.mb[i0] != mr.mb[i0 + 10]) {
4077					EL(ha, "failed, QL_DIAG_LPBMBX "
4078					    "FC_FAILURE-2\n");
4079
4080					(void) ql_flash_errlog(ha,
4081					    FLASH_ERRLOG_ISP_ERR, 0,
4082					    RD16_IO_REG(ha, hccr),
4083					    RD16_IO_REG(ha, istatus));
4084
4085					rval = FC_FAILURE;
4086					break;
4087				}
4088			}
4089			(void) ql_abort_isp(ha);
4090			break;
4091		case QL_DIAG_LPBDTA:
4092			/*
4093			 * For loopback data, we receive the
4094			 * data back in pm_stat_buf. This provides
4095			 * the user an opportunity to compare the
4096			 * transmitted and received data.
4097			 *
4098			 * NB: lb->options are:
4099			 *	0 --> Ten bit loopback
4100			 *	1 --> One bit loopback
4101			 *	2 --> External loopback
4102			 */
4103			if (cmd->pm_data_len > 65536) {
4104				rval = FC_TOOMANY;
4105				EL(ha, "failed, QL_DIAG_LPBDTA "
4106				    "FC_TOOMANY=%lxh\n", cmd->pm_data_len);
4107				break;
4108			}
4109			if (ql_get_dma_mem(ha, &buffer_xmt,
4110			    (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4111			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4112				EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM\n");
4113				rval = FC_NOMEM;
4114				break;
4115			}
4116			if (ql_get_dma_mem(ha, &buffer_rcv,
4117			    (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4118			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4119				EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM-2\n");
4120				rval = FC_NOMEM;
4121				break;
4122			}
4123			ddi_rep_put8(buffer_xmt.acc_handle,
4124			    (uint8_t *)cmd->pm_data_buf,
4125			    (uint8_t *)buffer_xmt.bp,
4126			    cmd->pm_data_len, DDI_DEV_AUTOINCR);
4127
4128			/* 22xx's adapter must be in loop mode for test. */
4129			if (CFG_IST(ha, CFG_CTRL_2200)) {
4130				bptr = &ha->init_ctrl_blk.cb.add_fw_opt[0];
4131				if (ha->flags & POINT_TO_POINT ||
4132				    (ha->task_daemon_flags & LOOP_DOWN &&
4133				    *bptr & (BIT_6 | BIT_5 | BIT_4))) {
4134					cnt = *bptr;
4135					*bptr = (uint8_t)
4136					    (*bptr & ~(BIT_6|BIT_5|BIT_4));
4137					(void) ql_abort_isp(ha);
4138					*bptr = (uint8_t)cnt;
4139				}
4140			}
4141
4142			/* Shutdown IP. */
4143			if (pha->flags & IP_INITIALIZED) {
4144				(void) ql_shutdown_ip(pha);
4145			}
4146
4147			lb = (lbp_t *)cmd->pm_cmd_buf;
4148			lb->transfer_count =
4149			    (uint32_t)cmd->pm_data_len;
4150			lb->transfer_segment_count = 0;
4151			lb->receive_segment_count = 0;
4152			lb->transfer_data_address =
4153			    buffer_xmt.cookie.dmac_address;
4154			lb->receive_data_address =
4155			    buffer_rcv.cookie.dmac_address;
4156
4157			if ((lb->options & 7) == 2 &&
4158			    pha->task_daemon_flags &
4159			    (QL_LOOP_TRANSITION | LOOP_DOWN)) {
4160				/* Loop must be up for external */
4161				EL(ha, "failed, QL_DIAG_LPBDTA FC_TRAN_BUSY\n");
4162				rval = FC_TRAN_BUSY;
4163			} else if (ql_loop_back(ha, 0, lb,
4164			    buffer_xmt.cookie.dmac_notused,
4165			    buffer_rcv.cookie.dmac_notused) == QL_SUCCESS) {
4166				bzero((void *)cmd->pm_stat_buf,
4167				    cmd->pm_stat_len);
4168				ddi_rep_get8(buffer_rcv.acc_handle,
4169				    (uint8_t *)cmd->pm_stat_buf,
4170				    (uint8_t *)buffer_rcv.bp,
4171				    cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4172			} else {
4173				EL(ha, "failed, QL_DIAG_LPBDTA FC_FAILURE\n");
4174				rval = FC_FAILURE;
4175			}
4176
4177			ql_free_phys(ha, &buffer_xmt);
4178			ql_free_phys(ha, &buffer_rcv);
4179
4180			/* Needed to recover the f/w */
4181			(void) ql_abort_isp(ha);
4182
4183			/* Restart IP if it was shutdown. */
4184			if (pha->flags & IP_ENABLED &&
4185			    !(pha->flags & IP_INITIALIZED)) {
4186				(void) ql_initialize_ip(pha);
4187				ql_isp_rcvbuf(pha);
4188			}
4189
4190			break;
4191		case QL_DIAG_ECHO: {
4192			/*
4193			 * issue an echo command with a user supplied
4194			 * data pattern and destination address
4195			 */
4196			echo_t		echo;		/* temp echo struct */
4197
4198			/* Setup echo cmd & adjust for platform */
4199			opcode = QL_ECHO_CMD;
4200			BIG_ENDIAN_32(&opcode);
4201
4202			/*
4203			 * due to limitations in the ql
4204			 * firmaware the echo data field is
4205			 * limited to 220
4206			 */
4207			if ((cmd->pm_cmd_len > QL_ECHO_CMD_LENGTH) ||
4208			    (cmd->pm_stat_len > QL_ECHO_CMD_LENGTH)) {
4209				EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY, "
4210				    "cmdl1=%lxh, statl2=%lxh\n",
4211				    cmd->pm_cmd_len, cmd->pm_stat_len);
4212				rval = FC_TOOMANY;
4213				break;
4214			}
4215
4216			/*
4217			 * the input data buffer has the user
4218			 * supplied data pattern.  The "echoed"
4219			 * data will be DMAed into the output
4220			 * data buffer.  Therefore the length
4221			 * of the output buffer must be equal
4222			 * to or greater then the input buffer
4223			 * length
4224			 */
4225			if (cmd->pm_cmd_len > cmd->pm_stat_len) {
4226				EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY-2,"
4227				    " cmdl1=%lxh, statl2=%lxh\n",
4228				    cmd->pm_cmd_len, cmd->pm_stat_len);
4229				rval = FC_TOOMANY;
4230				break;
4231			}
4232			/* add four bytes for the opcode */
4233			echo.transfer_count = (uint32_t)(cmd->pm_cmd_len + 4);
4234
4235			/*
4236			 * are we 32 or 64 bit addressed???
4237			 * We need to get the appropriate
4238			 * DMA and set the command options;
4239			 * 64 bit (bit 6) or 32 bit
4240			 * (no bit 6) addressing.
4241			 * while we are at it lets ask for
4242			 * real echo (bit 15)
4243			 */
4244			echo.options = BIT_15;
4245			if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) &&
4246			    !(CFG_IST(ha, CFG_CTRL_81XX))) {
4247				echo.options = (uint16_t)
4248				    (echo.options | BIT_6);
4249			}
4250
4251			/*
4252			 * Set up the DMA mappings for the
4253			 * output and input data buffers.
4254			 * First the output buffer
4255			 */
4256			if (ql_get_dma_mem(ha, &buffer_xmt,
4257			    (uint32_t)(cmd->pm_data_len + 4),
4258			    LITTLE_ENDIAN_DMA,
4259			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4260				EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM\n");
4261				rval = FC_NOMEM;
4262				break;
4263			}
4264			echo.transfer_data_address = buffer_xmt.cookie;
4265
4266			/* Next the input buffer */
4267			if (ql_get_dma_mem(ha, &buffer_rcv,
4268			    (uint32_t)(cmd->pm_data_len + 4),
4269			    LITTLE_ENDIAN_DMA,
4270			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4271				/*
4272				 * since we could not allocate
4273				 * DMA space for the input
4274				 * buffer we need to clean up
4275				 * by freeing the DMA space
4276				 * we allocated for the output
4277				 * buffer
4278				 */
4279				ql_free_phys(ha, &buffer_xmt);
4280				EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM-2\n");
4281				rval = FC_NOMEM;
4282				break;
4283			}
4284			echo.receive_data_address = buffer_rcv.cookie;
4285
4286			/*
4287			 * copy the 4 byte ECHO op code to the
4288			 * allocated DMA space
4289			 */
4290			ddi_rep_put8(buffer_xmt.acc_handle, (uint8_t *)&opcode,
4291			    (uint8_t *)buffer_xmt.bp, 4, DDI_DEV_AUTOINCR);
4292
4293			/*
4294			 * copy the user supplied data to the
4295			 * allocated DMA space
4296			 */
4297			ddi_rep_put8(buffer_xmt.acc_handle,
4298			    (uint8_t *)cmd->pm_cmd_buf,
4299			    (uint8_t *)buffer_xmt.bp + 4, cmd->pm_cmd_len,
4300			    DDI_DEV_AUTOINCR);
4301
4302			/* Shutdown IP. */
4303			if (pha->flags & IP_INITIALIZED) {
4304				(void) ql_shutdown_ip(pha);
4305			}
4306
4307			/* send the echo */
4308			if (ql_echo(ha, 0, &echo) == QL_SUCCESS) {
4309				ddi_rep_put8(buffer_rcv.acc_handle,
4310				    (uint8_t *)buffer_rcv.bp + 4,
4311				    (uint8_t *)cmd->pm_stat_buf,
4312				    cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4313			} else {
4314				EL(ha, "failed, QL_DIAG_ECHO FC_FAILURE\n");
4315				rval = FC_FAILURE;
4316			}
4317
4318			/* Restart IP if it was shutdown. */
4319			if (pha->flags & IP_ENABLED &&
4320			    !(pha->flags & IP_INITIALIZED)) {
4321				(void) ql_initialize_ip(pha);
4322				ql_isp_rcvbuf(pha);
4323			}
4324			/* free up our DMA buffers */
4325			ql_free_phys(ha, &buffer_xmt);
4326			ql_free_phys(ha, &buffer_rcv);
4327			break;
4328		}
4329		default:
4330			EL(ha, "unknown=%xh, FC_PORT_DIAG "
4331			    "FC_INVALID_REQUEST\n", cmd->pm_cmd_flags);
4332			rval = FC_INVALID_REQUEST;
4333			break;
4334		}
4335		PORTMANAGE_UNLOCK(ha);
4336		break;
4337	case FC_PORT_LINK_STATE:
4338		/* Check for name equal to null. */
4339		for (index = 0; index < 8 && index < cmd->pm_cmd_len;
4340		    index++) {
4341			if (cmd->pm_cmd_buf[index] != 0) {
4342				break;
4343			}
4344		}
4345
4346		/* If name not null. */
4347		if (index < 8 && cmd->pm_cmd_len >= 8) {
4348			/* Locate device queue. */
4349			tq = NULL;
4350			for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4351			    tq == NULL; index++) {
4352				for (link = ha->dev[index].first; link != NULL;
4353				    link = link->next) {
4354					tq = link->base_address;
4355
4356					if (bcmp((void *)&tq->port_name[0],
4357					    (void *)cmd->pm_cmd_buf, 8) == 0) {
4358						break;
4359					} else {
4360						tq = NULL;
4361					}
4362				}
4363			}
4364
4365			if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id)) {
4366				cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4367				cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4368			} else {
4369				cnt = FC_PORT_SPEED_MASK(ha->state) |
4370				    FC_STATE_OFFLINE;
4371				cmd->pm_stat_buf[0] = (int8_t)LSB(cnt);
4372				cmd->pm_stat_buf[1] = (int8_t)MSB(cnt);
4373			}
4374		} else {
4375			cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4376			cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4377		}
4378		break;
4379	case FC_PORT_INITIALIZE:
4380		if (cmd->pm_cmd_len >= 8) {
4381			tq = NULL;
4382			for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4383			    tq == NULL; index++) {
4384				for (link = ha->dev[index].first; link != NULL;
4385				    link = link->next) {
4386					tq = link->base_address;
4387
4388					if (bcmp((void *)&tq->port_name[0],
4389					    (void *)cmd->pm_cmd_buf, 8) == 0) {
4390						if (!VALID_DEVICE_ID(ha,
4391						    tq->loop_id)) {
4392							tq = NULL;
4393						}
4394						break;
4395					} else {
4396						tq = NULL;
4397					}
4398				}
4399			}
4400
4401			if (tq == NULL || ql_target_reset(ha, tq,
4402			    ha->loop_reset_delay) != QL_SUCCESS) {
4403				EL(ha, "failed, FC_PORT_INITIALIZE "
4404				    "FC_FAILURE\n");
4405				rval = FC_FAILURE;
4406			}
4407		} else {
4408			EL(ha, "failed, FC_PORT_INITIALIZE FC_FAILURE-2, "
4409			    "clen=%lxh\n", cmd->pm_cmd_len);
4410
4411			rval = FC_FAILURE;
4412		}
4413		break;
4414	case FC_PORT_RLS:
4415		if (cmd->pm_data_len < sizeof (fc_rls_acc_t)) {
4416			EL(ha, "failed, buffer size passed: %lxh, "
4417			    "req: %lxh\n", cmd->pm_data_len,
4418			    (sizeof (fc_rls_acc_t)));
4419			rval = FC_FAILURE;
4420		} else if (LOOP_NOT_READY(pha)) {
4421			EL(ha, "loop NOT ready\n");
4422			bzero(cmd->pm_data_buf, cmd->pm_data_len);
4423		} else if (ql_get_link_status(ha, ha->loop_id,
4424		    cmd->pm_data_len, cmd->pm_data_buf, 0) != QL_SUCCESS) {
4425			EL(ha, "failed, FC_PORT_RLS FC_FAILURE\n");
4426			rval = FC_FAILURE;
4427#ifdef _BIG_ENDIAN
4428		} else {
4429			fc_rls_acc_t		*rls;
4430
4431			rls = (fc_rls_acc_t *)cmd->pm_data_buf;
4432			LITTLE_ENDIAN_32(&rls->rls_link_fail);
4433			LITTLE_ENDIAN_32(&rls->rls_sync_loss);
4434			LITTLE_ENDIAN_32(&rls->rls_sig_loss);
4435			LITTLE_ENDIAN_32(&rls->rls_invalid_crc);
4436#endif /* _BIG_ENDIAN */
4437		}
4438		break;
4439	case FC_PORT_GET_NODE_ID:
4440		if (ql_get_rnid_params(ha, cmd->pm_data_len,
4441		    cmd->pm_data_buf) != QL_SUCCESS) {
4442			EL(ha, "failed, FC_PORT_GET_NODE_ID FC_FAILURE\n");
4443			rval = FC_FAILURE;
4444		}
4445		break;
4446	case FC_PORT_SET_NODE_ID:
4447		if (ql_set_rnid_params(ha, cmd->pm_data_len,
4448		    cmd->pm_data_buf) != QL_SUCCESS) {
4449			EL(ha, "failed, FC_PORT_SET_NODE_ID FC_FAILURE\n");
4450			rval = FC_FAILURE;
4451		}
4452		break;
4453	case FC_PORT_DOWNLOAD_FCODE:
4454		PORTMANAGE_LOCK(ha);
4455		if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
4456			rval = ql_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4457			    (uint32_t)cmd->pm_data_len);
4458		} else {
4459			if (cmd->pm_data_buf[0] == 4 &&
4460			    cmd->pm_data_buf[8] == 0 &&
4461			    cmd->pm_data_buf[9] == 0x10 &&
4462			    cmd->pm_data_buf[10] == 0 &&
4463			    cmd->pm_data_buf[11] == 0) {
4464				rval = ql_24xx_load_flash(ha,
4465				    (uint8_t *)cmd->pm_data_buf,
4466				    (uint32_t)cmd->pm_data_len,
4467				    ha->flash_fw_addr << 2);
4468			} else {
4469				rval = ql_24xx_load_flash(ha,
4470				    (uint8_t *)cmd->pm_data_buf,
4471				    (uint32_t)cmd->pm_data_len, 0);
4472			}
4473		}
4474
4475		if (rval != QL_SUCCESS) {
4476			EL(ha, "failed, FC_PORT_DOWNLOAD_FCODE FC_FAILURE\n");
4477			rval = FC_FAILURE;
4478		} else {
4479			rval = FC_SUCCESS;
4480		}
4481		ql_reset_chip(ha);
4482		(void) ql_abort_isp(ha);
4483		PORTMANAGE_UNLOCK(ha);
4484		break;
4485	default:
4486		EL(ha, "unknown=%xh, FC_BADCMD\n", cmd->pm_cmd_code);
4487		rval = FC_BADCMD;
4488		break;
4489	}
4490
4491	/* Wait for suspension to end. */
4492	ql_awaken_task_daemon(ha, NULL, 0, DRIVER_STALL);
4493	timer = 0;
4494
4495	while (timer++ < 3000 &&
4496	    ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) {
4497		ql_delay(ha, 10000);
4498	}
4499
4500	ql_restart_queues(ha);
4501
4502	if (rval != FC_SUCCESS) {
4503		EL(ha, "failed, rval = %xh\n", rval);
4504	} else {
4505		/*EMPTY*/
4506		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4507	}
4508
4509	return (rval);
4510}
4511
4512static opaque_t
4513ql_get_device(opaque_t fca_handle, fc_portid_t d_id)
4514{
4515	port_id_t		id;
4516	ql_adapter_state_t	*ha;
4517	ql_tgt_t		*tq;
4518
4519	id.r.rsvd_1 = 0;
4520	id.b24 = d_id.port_id;
4521
4522	ha = ql_fca_handle_to_state(fca_handle);
4523	if (ha == NULL) {
4524		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4525		    (void *)fca_handle);
4526		return (NULL);
4527	}
4528	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance, id.b24);
4529
4530	tq = ql_d_id_to_queue(ha, id);
4531
4532	if (tq == NULL) {
4533		EL(ha, "failed, tq=NULL\n");
4534	} else {
4535		/*EMPTY*/
4536		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4537	}
4538	return (tq);
4539}
4540
4541/* ************************************************************************ */
4542/*			FCA Driver Local Support Functions.		    */
4543/* ************************************************************************ */
4544
4545/*
4546 * ql_cmd_setup
4547 *	Verifies proper command.
4548 *
4549 * Input:
4550 *	fca_handle = handle setup by ql_bind_port().
4551 *	pkt = pointer to fc_packet.
4552 *	rval = pointer for return value.
4553 *
4554 * Returns:
4555 *	Adapter state pointer, NULL = failure.
4556 *
4557 * Context:
4558 *	Kernel context.
4559 */
4560static ql_adapter_state_t *
4561ql_cmd_setup(opaque_t fca_handle, fc_packet_t *pkt, int *rval)
4562{
4563	ql_adapter_state_t	*ha, *pha;
4564	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
4565	ql_tgt_t		*tq;
4566	port_id_t		d_id;
4567
4568	pkt->pkt_resp_resid = 0;
4569	pkt->pkt_data_resid = 0;
4570
4571	/* check that the handle is assigned by this FCA */
4572	ha = ql_fca_handle_to_state(fca_handle);
4573	if (ha == NULL) {
4574		*rval = FC_UNBOUND;
4575		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4576		    (void *)fca_handle);
4577		return (NULL);
4578	}
4579	pha = ha->pha;
4580
4581	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
4582
4583	if (ddi_in_panic() || pkt->pkt_tran_flags & FC_TRAN_DUMPING) {
4584		return (ha);
4585	}
4586
4587	if (!(pha->flags & ONLINE)) {
4588		pkt->pkt_state = FC_PKT_LOCAL_RJT;
4589		pkt->pkt_reason = FC_REASON_HW_ERROR;
4590		*rval = FC_TRANSPORT_ERROR;
4591		EL(ha, "failed, not online hf=%xh\n", pha->flags);
4592		return (NULL);
4593	}
4594
4595	/* Exit on loop down. */
4596	if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING) &&
4597	    pha->task_daemon_flags & LOOP_DOWN &&
4598	    pha->loop_down_timer <= pha->loop_down_abort_time) {
4599		pkt->pkt_state = FC_PKT_PORT_OFFLINE;
4600		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4601		*rval = FC_OFFLINE;
4602		EL(ha, "failed, loop down tdf=%xh\n", pha->task_daemon_flags);
4603		return (NULL);
4604	}
4605
4606	if (pkt->pkt_cmd_fhdr.r_ctl == R_CTL_COMMAND &&
4607	    pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
4608		tq = (ql_tgt_t *)pkt->pkt_fca_device;
4609		if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id))) {
4610			d_id.r.rsvd_1 = 0;
4611			d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4612			tq = ql_d_id_to_queue(ha, d_id);
4613
4614			pkt->pkt_fca_device = (opaque_t)tq;
4615		}
4616
4617		if (tq != NULL) {
4618			DEVICE_QUEUE_LOCK(tq);
4619			if (tq->flags & (TQF_RSCN_RCVD |
4620			    TQF_NEED_AUTHENTICATION)) {
4621				*rval = FC_DEVICE_BUSY;
4622				DEVICE_QUEUE_UNLOCK(tq);
4623				EL(ha, "failed, busy qf=%xh, d_id=%xh\n",
4624				    tq->flags, tq->d_id.b24);
4625				return (NULL);
4626			}
4627			DEVICE_QUEUE_UNLOCK(tq);
4628		}
4629	}
4630
4631	/*
4632	 * Check DMA pointers.
4633	 */
4634	*rval = DDI_SUCCESS;
4635	if (pkt->pkt_cmd_acc != NULL && pkt->pkt_cmdlen) {
4636		QL_CLEAR_DMA_HANDLE(pkt->pkt_cmd_dma);
4637		*rval = ddi_check_dma_handle(pkt->pkt_cmd_dma);
4638		if (*rval == DDI_SUCCESS) {
4639			*rval = ddi_check_acc_handle(pkt->pkt_cmd_acc);
4640		}
4641	}
4642
4643	if (pkt->pkt_resp_acc != NULL && *rval == DDI_SUCCESS &&
4644	    pkt->pkt_rsplen != 0) {
4645		QL_CLEAR_DMA_HANDLE(pkt->pkt_resp_dma);
4646		*rval = ddi_check_dma_handle(pkt->pkt_resp_dma);
4647		if (*rval == DDI_SUCCESS) {
4648			*rval = ddi_check_acc_handle(pkt->pkt_resp_acc);
4649		}
4650	}
4651
4652	/*
4653	 * Minimum branch conditional; Change it with care.
4654	 */
4655	if (((pkt->pkt_data_acc != NULL) & (*rval == DDI_SUCCESS) &
4656	    (pkt->pkt_datalen != 0)) != 0) {
4657		QL_CLEAR_DMA_HANDLE(pkt->pkt_data_dma);
4658		*rval = ddi_check_dma_handle(pkt->pkt_data_dma);
4659		if (*rval == DDI_SUCCESS) {
4660			*rval = ddi_check_acc_handle(pkt->pkt_data_acc);
4661		}
4662	}
4663
4664	if (*rval != DDI_SUCCESS) {
4665		pkt->pkt_state = FC_PKT_TRAN_ERROR;
4666		pkt->pkt_reason = FC_REASON_DMA_ERROR;
4667
4668		/* Do command callback. */
4669		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
4670			ql_awaken_task_daemon(ha, sp, 0, 0);
4671		}
4672		*rval = FC_BADPACKET;
4673		EL(ha, "failed, bad DMA pointers\n");
4674		return (NULL);
4675	}
4676
4677	if (sp->magic_number != QL_FCA_BRAND) {
4678		*rval = FC_BADPACKET;
4679		EL(ha, "failed, magic number=%xh\n", sp->magic_number);
4680		return (NULL);
4681	}
4682	*rval = FC_SUCCESS;
4683
4684	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4685
4686	return (ha);
4687}
4688
4689/*
4690 * ql_els_plogi
4691 *	Issue a extended link service port login request.
4692 *
4693 * Input:
4694 *	ha = adapter state pointer.
4695 *	pkt = pointer to fc_packet.
4696 *
4697 * Returns:
4698 *	FC_SUCCESS - the packet was accepted for transport.
4699 *	FC_TRANSPORT_ERROR - a transport error occurred.
4700 *
4701 * Context:
4702 *	Kernel context.
4703 */
4704static int
4705ql_els_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4706{
4707	ql_tgt_t		*tq = NULL;
4708	port_id_t		d_id;
4709	la_els_logi_t		acc;
4710	class_svc_param_t	*class3_param;
4711	int			ret;
4712	int			rval = FC_SUCCESS;
4713
4714	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
4715	    pkt->pkt_cmd_fhdr.d_id);
4716
4717	TASK_DAEMON_LOCK(ha);
4718	if (!(ha->task_daemon_flags & STATE_ONLINE)) {
4719		TASK_DAEMON_UNLOCK(ha);
4720		QL_PRINT_3(CE_CONT, "(%d): offline done\n", ha->instance);
4721		return (FC_OFFLINE);
4722	}
4723	TASK_DAEMON_UNLOCK(ha);
4724
4725	bzero(&acc, sizeof (acc));
4726	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4727
4728	ret = QL_SUCCESS;
4729
4730	if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
4731		/*
4732		 * In p2p topology he sends a PLOGI after determining
4733		 * he has the N_Port login initiative.
4734		 */
4735		ret = ql_p2p_plogi(ha, pkt);
4736	}
4737	if (ret == QL_CONSUMED) {
4738		return (ret);
4739	}
4740
4741	switch (ret = ql_login_port(ha, d_id)) {
4742	case QL_SUCCESS:
4743		tq = ql_d_id_to_queue(ha, d_id);
4744		break;
4745
4746	case QL_LOOP_ID_USED:
4747		if ((ret = ql_login_port(ha, d_id)) == QL_SUCCESS) {
4748			tq = ql_d_id_to_queue(ha, d_id);
4749		}
4750		break;
4751
4752	default:
4753		break;
4754	}
4755
4756	if (ret != QL_SUCCESS) {
4757		/*
4758		 * Invalidate this entry so as to seek a fresh loop ID
4759		 * in case firmware reassigns it to something else
4760		 */
4761		tq = ql_d_id_to_queue(ha, d_id);
4762		if (tq && (ret != QL_MEMORY_ALLOC_FAILED)) {
4763			tq->loop_id = PORT_NO_LOOP_ID;
4764		}
4765	} else if (tq) {
4766		(void) ql_get_port_database(ha, tq, PDF_ADISC);
4767	}
4768
4769	if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id) &&
4770	    (ret != QL_MEMORY_ALLOC_FAILED) && PD_PORT_LOGIN(tq)) {
4771
4772		/* Build ACC. */
4773		acc.ls_code.ls_code = LA_ELS_ACC;
4774		acc.common_service.fcph_version = 0x2006;
4775		acc.common_service.cmn_features = 0x8800;
4776		CFG_IST(ha, CFG_CTRL_242581) ?
4777		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
4778		    ha->init_ctrl_blk.cb24.max_frame_length[0],
4779		    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
4780		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
4781		    ha->init_ctrl_blk.cb.max_frame_length[0],
4782		    ha->init_ctrl_blk.cb.max_frame_length[1]));
4783		acc.common_service.conc_sequences = 0xff;
4784		acc.common_service.relative_offset = 0x03;
4785		acc.common_service.e_d_tov = 0x7d0;
4786
4787		bcopy((void *)&tq->port_name[0],
4788		    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
4789		bcopy((void *)&tq->node_name[0],
4790		    (void *)&acc.node_ww_name.raw_wwn[0], 8);
4791
4792		class3_param = (class_svc_param_t *)&acc.class_3;
4793		class3_param->class_valid_svc_opt = 0x8000;
4794		class3_param->recipient_ctl = tq->class3_recipient_ctl;
4795		class3_param->rcv_data_size = tq->class3_rcv_data_size;
4796		class3_param->conc_sequences = tq->class3_conc_sequences;
4797		class3_param->open_sequences_per_exch =
4798		    tq->class3_open_sequences_per_exch;
4799
4800		if ((ql_busy_plogi(ha, pkt, tq) == FC_TRAN_BUSY)) {
4801			acc.ls_code.ls_code = LA_ELS_RJT;
4802			pkt->pkt_state = FC_PKT_TRAN_BSY;
4803			pkt->pkt_reason = FC_REASON_XCHG_BSY;
4804			EL(ha, "LA_ELS_RJT, FC_REASON_XCHG_BSY\n");
4805			rval = FC_TRAN_BUSY;
4806		} else {
4807			DEVICE_QUEUE_LOCK(tq);
4808			tq->logout_sent = 0;
4809			tq->flags &= ~TQF_NEED_AUTHENTICATION;
4810			if (CFG_IST(ha, CFG_CTRL_242581)) {
4811				tq->flags |= TQF_IIDMA_NEEDED;
4812			}
4813			DEVICE_QUEUE_UNLOCK(tq);
4814
4815			if (CFG_IST(ha, CFG_CTRL_242581)) {
4816				TASK_DAEMON_LOCK(ha);
4817				ha->task_daemon_flags |= TD_IIDMA_NEEDED;
4818				TASK_DAEMON_UNLOCK(ha);
4819			}
4820
4821			pkt->pkt_state = FC_PKT_SUCCESS;
4822		}
4823	} else {
4824		/* Build RJT. */
4825		acc.ls_code.ls_code = LA_ELS_RJT;
4826
4827		switch (ret) {
4828		case QL_FUNCTION_TIMEOUT:
4829			pkt->pkt_state = FC_PKT_TIMEOUT;
4830			pkt->pkt_reason = FC_REASON_HW_ERROR;
4831			break;
4832
4833		case QL_MEMORY_ALLOC_FAILED:
4834			pkt->pkt_state = FC_PKT_LOCAL_BSY;
4835			pkt->pkt_reason = FC_REASON_NOMEM;
4836			rval = FC_TRAN_BUSY;
4837			break;
4838
4839		case QL_FABRIC_NOT_INITIALIZED:
4840			pkt->pkt_state = FC_PKT_FABRIC_BSY;
4841			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4842			rval = FC_TRAN_BUSY;
4843			break;
4844
4845		default:
4846			pkt->pkt_state = FC_PKT_TRAN_ERROR;
4847			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4848			break;
4849		}
4850
4851		EL(ha, "Plogi unsuccess for %xh state %xh reason %xh "
4852		    "ret %xh rval %xh\n", d_id.b24, pkt->pkt_state,
4853		    pkt->pkt_reason, ret, rval);
4854	}
4855
4856	if (tq != NULL) {
4857		DEVICE_QUEUE_LOCK(tq);
4858		tq->flags &= ~(TQF_PLOGI_PROGRS | TQF_QUEUE_SUSPENDED);
4859		if (rval == FC_TRAN_BUSY) {
4860			if (tq->d_id.b24 != BROADCAST_ADDR) {
4861				tq->flags |= TQF_NEED_AUTHENTICATION;
4862			}
4863		}
4864		DEVICE_QUEUE_UNLOCK(tq);
4865	}
4866
4867	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
4868	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
4869
4870	if (rval != FC_SUCCESS) {
4871		EL(ha, "failed, rval = %xh\n", rval);
4872	} else {
4873		/*EMPTY*/
4874		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4875	}
4876	return (rval);
4877}
4878
4879/*
4880 * ql_p2p_plogi
4881 *	Start an extended link service port login request using
4882 *	an ELS Passthru iocb.
4883 *
4884 * Input:
4885 *	ha = adapter state pointer.
4886 *	pkt = pointer to fc_packet.
4887 *
4888 * Returns:
4889 *	QL_CONSUMMED - the iocb was queued for transport.
4890 *
4891 * Context:
4892 *	Kernel context.
4893 */
4894static int
4895ql_p2p_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4896{
4897	uint16_t	id;
4898	ql_tgt_t	tmp;
4899	ql_tgt_t	*tq = &tmp;
4900	int		rval;
4901
4902	tq->d_id.b.al_pa = 0;
4903	tq->d_id.b.area = 0;
4904	tq->d_id.b.domain = 0;
4905
4906	/*
4907	 * Verify that the port database hasn't moved beneath our feet by
4908	 * switching to the appropriate n_port_handle if necessary.  This is
4909	 * less unplesant than the error recovery if the wrong one is used.
4910	 */
4911	for (id = 0; id <= LAST_LOCAL_LOOP_ID; id++) {
4912		tq->loop_id = id;
4913		rval = ql_get_port_database(ha, tq, PDF_NONE);
4914		EL(ha, "rval=%xh\n", rval);
4915		/* check all the ones not logged in for possible use */
4916		if (rval == QL_NOT_LOGGED_IN) {
4917			if (tq->master_state == PD_STATE_PLOGI_PENDING) {
4918				ha->n_port->n_port_handle = tq->loop_id;
4919				EL(ha, "n_port_handle =%xh, master state=%x\n",
4920				    tq->loop_id, tq->master_state);
4921				break;
4922			}
4923			/*
4924			 * Use a 'port unavailable' entry only
4925			 * if we used it before.
4926			 */
4927			if (tq->master_state == PD_STATE_PORT_UNAVAILABLE) {
4928				/* if the port_id matches, reuse it */
4929				if (pkt->pkt_cmd_fhdr.d_id == tq->d_id.b24) {
4930					EL(ha, "n_port_handle =%xh,"
4931					    "master state=%xh\n",
4932					    tq->loop_id, tq->master_state);
4933					break;
4934				} else if (tq->loop_id ==
4935				    ha->n_port->n_port_handle) {
4936				    // avoid a lint error
4937					uint16_t *hndl;
4938					uint16_t val;
4939
4940					hndl = &ha->n_port->n_port_handle;
4941					val = *hndl;
4942					val++;
4943					val++;
4944					*hndl = val;
4945				}
4946			EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
4947			    "master state=%x\n", rval, id, tq->loop_id,
4948			    tq->master_state);
4949			}
4950
4951		}
4952		if (rval == QL_SUCCESS) {
4953			if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
4954				ha->n_port->n_port_handle = tq->loop_id;
4955				EL(ha, "n_port_handle =%xh, master state=%x\n",
4956				    tq->loop_id, tq->master_state);
4957				break;
4958			}
4959			EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
4960			    "master state=%x\n", rval, id, tq->loop_id,
4961			    tq->master_state);
4962		}
4963	}
4964	(void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0, DDI_DMA_SYNC_FORDEV);
4965
4966	ql_start_iocb(ha, (ql_srb_t *)pkt->pkt_fca_private);
4967
4968	return (QL_CONSUMED);
4969}
4970
4971
4972/*
4973 * ql_els_flogi
4974 *	Issue a extended link service fabric login request.
4975 *
4976 * Input:
4977 *	ha = adapter state pointer.
4978 *	pkt = pointer to fc_packet.
4979 *
4980 * Returns:
4981 *	FC_SUCCESS - the packet was accepted for transport.
4982 *	FC_TRANSPORT_ERROR - a transport error occurred.
4983 *
4984 * Context:
4985 *	Kernel context.
4986 */
4987static int
4988ql_els_flogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4989{
4990	ql_tgt_t		*tq = NULL;
4991	port_id_t		d_id;
4992	la_els_logi_t		acc;
4993	class_svc_param_t	*class3_param;
4994	int			rval = FC_SUCCESS;
4995	int			accept = 0;
4996
4997	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
4998	    pkt->pkt_cmd_fhdr.d_id);
4999
5000	bzero(&acc, sizeof (acc));
5001	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5002
5003	if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
5004		/*
5005		 * d_id of zero in a FLOGI accept response in a point to point
5006		 * topology triggers evulation of N Port login initiative.
5007		 */
5008		pkt->pkt_resp_fhdr.d_id = 0;
5009		/*
5010		 * An N_Port already logged in with the firmware
5011		 * will have the only database entry.
5012		 */
5013		if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
5014			tq = ql_loop_id_to_queue(ha, ha->n_port->n_port_handle);
5015		}
5016
5017		if (tq != NULL) {
5018			/*
5019			 * If the target port has initiative send
5020			 * up a PLOGI about the new device.
5021			 */
5022			if ((ql_wwn_cmp(ha, (la_wwn_t *)&tq->port_name[0],
5023			    (la_wwn_t *)(CFG_IST(ha, CFG_CTRL_2425) ?
5024			    &ha->init_ctrl_blk.cb24.port_name[0] :
5025			    &ha->init_ctrl_blk.cb.port_name[0])) == 1)) {
5026				ha->send_plogi_timer = 3;
5027			} else {
5028				ha->send_plogi_timer = 0;
5029			}
5030			pkt->pkt_resp_fhdr.s_id = tq->d_id.b24;
5031		} else {
5032			/*
5033			 * An N_Port not logged in with the firmware will not
5034			 * have a database entry.  We accept anyway and rely
5035			 * on a PLOGI from the upper layers to set the d_id
5036			 * and s_id.
5037			 */
5038			accept = 1;
5039		}
5040	} else {
5041		tq = ql_d_id_to_queue(ha, d_id);
5042	}
5043	if ((tq != NULL) || (accept != NULL)) {
5044		/* Build ACC. */
5045		pkt->pkt_state = FC_PKT_SUCCESS;
5046		class3_param = (class_svc_param_t *)&acc.class_3;
5047
5048		acc.ls_code.ls_code = LA_ELS_ACC;
5049		acc.common_service.fcph_version = 0x2006;
5050		if (ha->topology & QL_N_PORT) {
5051			/* clear F_Port indicator */
5052			acc.common_service.cmn_features = 0x0800;
5053		} else {
5054			acc.common_service.cmn_features = 0x1b00;
5055		}
5056		CFG_IST(ha, CFG_CTRL_242581) ?
5057		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5058		    ha->init_ctrl_blk.cb24.max_frame_length[0],
5059		    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
5060		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5061		    ha->init_ctrl_blk.cb.max_frame_length[0],
5062		    ha->init_ctrl_blk.cb.max_frame_length[1]));
5063		acc.common_service.conc_sequences = 0xff;
5064		acc.common_service.relative_offset = 0x03;
5065		acc.common_service.e_d_tov = 0x7d0;
5066		if (accept) {
5067			/* Use the saved N_Port WWNN and WWPN */
5068			if (ha->n_port != NULL) {
5069				bcopy((void *)&ha->n_port->port_name[0],
5070				    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5071				bcopy((void *)&ha->n_port->node_name[0],
5072				    (void *)&acc.node_ww_name.raw_wwn[0], 8);
5073				/* mark service options invalid */
5074				class3_param->class_valid_svc_opt = 0x0800;
5075			} else {
5076				EL(ha, "ha->n_port is NULL\n");
5077				/* Build RJT. */
5078				acc.ls_code.ls_code = LA_ELS_RJT;
5079
5080				pkt->pkt_state = FC_PKT_TRAN_ERROR;
5081				pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5082			}
5083		} else {
5084			bcopy((void *)&tq->port_name[0],
5085			    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5086			bcopy((void *)&tq->node_name[0],
5087			    (void *)&acc.node_ww_name.raw_wwn[0], 8);
5088
5089			class3_param = (class_svc_param_t *)&acc.class_3;
5090			class3_param->class_valid_svc_opt = 0x8800;
5091			class3_param->recipient_ctl = tq->class3_recipient_ctl;
5092			class3_param->rcv_data_size = tq->class3_rcv_data_size;
5093			class3_param->conc_sequences =
5094			    tq->class3_conc_sequences;
5095			class3_param->open_sequences_per_exch =
5096			    tq->class3_open_sequences_per_exch;
5097		}
5098	} else {
5099		/* Build RJT. */
5100		acc.ls_code.ls_code = LA_ELS_RJT;
5101
5102		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5103		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5104		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5105	}
5106
5107	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5108	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5109
5110	if (rval != FC_SUCCESS) {
5111		EL(ha, "failed, rval = %xh\n", rval);
5112	} else {
5113		/*EMPTY*/
5114		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5115	}
5116	return (rval);
5117}
5118
5119/*
5120 * ql_els_logo
5121 *	Issue a extended link service logout request.
5122 *
5123 * Input:
5124 *	ha = adapter state pointer.
5125 *	pkt = pointer to fc_packet.
5126 *
5127 * Returns:
5128 *	FC_SUCCESS - the packet was accepted for transport.
5129 *	FC_TRANSPORT_ERROR - a transport error occurred.
5130 *
5131 * Context:
5132 *	Kernel context.
5133 */
5134static int
5135ql_els_logo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5136{
5137	port_id_t	d_id;
5138	ql_tgt_t	*tq;
5139	la_els_logo_t	acc;
5140	int		rval = FC_SUCCESS;
5141
5142	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5143	    pkt->pkt_cmd_fhdr.d_id);
5144
5145	bzero(&acc, sizeof (acc));
5146	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5147
5148	tq = ql_d_id_to_queue(ha, d_id);
5149	if (tq) {
5150		DEVICE_QUEUE_LOCK(tq);
5151		if (tq->d_id.b24 == BROADCAST_ADDR) {
5152			DEVICE_QUEUE_UNLOCK(tq);
5153			return (FC_SUCCESS);
5154		}
5155
5156		tq->flags |= TQF_NEED_AUTHENTICATION;
5157
5158		do {
5159			DEVICE_QUEUE_UNLOCK(tq);
5160			(void) ql_abort_device(ha, tq, 1);
5161
5162			/*
5163			 * Wait for commands to drain in F/W (doesn't
5164			 * take more than a few milliseconds)
5165			 */
5166			ql_delay(ha, 10000);
5167
5168			DEVICE_QUEUE_LOCK(tq);
5169		} while (tq->outcnt);
5170
5171		DEVICE_QUEUE_UNLOCK(tq);
5172	}
5173
5174	if (ql_logout_port(ha, d_id) == QL_SUCCESS) {
5175		/* Build ACC. */
5176		acc.ls_code.ls_code = LA_ELS_ACC;
5177
5178		pkt->pkt_state = FC_PKT_SUCCESS;
5179	} else {
5180		/* Build RJT. */
5181		acc.ls_code.ls_code = LA_ELS_RJT;
5182
5183		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5184		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5185		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5186	}
5187
5188	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5189	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5190
5191	if (rval != FC_SUCCESS) {
5192		EL(ha, "failed, rval = %xh\n", rval);
5193	} else {
5194		/*EMPTY*/
5195		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5196	}
5197	return (rval);
5198}
5199
5200/*
5201 * ql_els_prli
5202 *	Issue a extended link service process login request.
5203 *
5204 * Input:
5205 *	ha = adapter state pointer.
5206 *	pkt = pointer to fc_packet.
5207 *
5208 * Returns:
5209 *	FC_SUCCESS - the packet was accepted for transport.
5210 *	FC_TRANSPORT_ERROR - a transport error occurred.
5211 *
5212 * Context:
5213 *	Kernel context.
5214 */
5215static int
5216ql_els_prli(ql_adapter_state_t *ha, fc_packet_t *pkt)
5217{
5218	ql_tgt_t		*tq;
5219	port_id_t		d_id;
5220	la_els_prli_t		acc;
5221	prli_svc_param_t	*param;
5222	int			rval = FC_SUCCESS;
5223
5224	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5225	    pkt->pkt_cmd_fhdr.d_id);
5226
5227	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5228
5229	tq = ql_d_id_to_queue(ha, d_id);
5230	if (tq != NULL) {
5231		(void) ql_get_port_database(ha, tq, PDF_NONE);
5232
5233		if ((ha->topology & QL_N_PORT) &&
5234		    (tq->master_state == PD_STATE_PLOGI_COMPLETED)) {
5235			ql_start_iocb(ha, (ql_srb_t *)pkt->pkt_fca_private);
5236			rval = QL_CONSUMED;
5237		} else {
5238			/* Build ACC. */
5239			bzero(&acc, sizeof (acc));
5240			acc.ls_code = LA_ELS_ACC;
5241			acc.page_length = 0x10;
5242			acc.payload_length = tq->prli_payload_length;
5243
5244			param = (prli_svc_param_t *)&acc.service_params[0];
5245			param->type = 0x08;
5246			param->rsvd = 0x00;
5247			param->process_assoc_flags = tq->prli_svc_param_word_0;
5248			param->process_flags = tq->prli_svc_param_word_3;
5249
5250			ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5251			    (uint8_t *)pkt->pkt_resp, sizeof (acc),
5252			    DDI_DEV_AUTOINCR);
5253
5254			pkt->pkt_state = FC_PKT_SUCCESS;
5255		}
5256	} else {
5257		la_els_rjt_t rjt;
5258
5259		/* Build RJT. */
5260		bzero(&rjt, sizeof (rjt));
5261		rjt.ls_code.ls_code = LA_ELS_RJT;
5262
5263		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5264		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5265
5266		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5267		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5268		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5269	}
5270
5271	if ((rval != FC_SUCCESS) && (rval != QL_CONSUMED)) {
5272		EL(ha, "failed, rval = %xh\n", rval);
5273	} else {
5274		/*EMPTY*/
5275		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5276	}
5277	return (rval);
5278}
5279
5280/*
5281 * ql_els_prlo
5282 *	Issue a extended link service process logout request.
5283 *
5284 * Input:
5285 *	ha = adapter state pointer.
5286 *	pkt = pointer to fc_packet.
5287 *
5288 * Returns:
5289 *	FC_SUCCESS - the packet was accepted for transport.
5290 *	FC_TRANSPORT_ERROR - a transport error occurred.
5291 *
5292 * Context:
5293 *	Kernel context.
5294 */
5295/* ARGSUSED */
5296static int
5297ql_els_prlo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5298{
5299	la_els_prli_t	acc;
5300	int		rval = FC_SUCCESS;
5301
5302	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5303	    pkt->pkt_cmd_fhdr.d_id);
5304
5305	/* Build ACC. */
5306	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&acc,
5307	    (uint8_t *)pkt->pkt_cmd, sizeof (acc), DDI_DEV_AUTOINCR);
5308
5309	acc.ls_code = LA_ELS_ACC;
5310	acc.service_params[2] = 1;
5311
5312	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5313	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5314
5315	pkt->pkt_state = FC_PKT_SUCCESS;
5316
5317	if (rval != FC_SUCCESS) {
5318		EL(ha, "failed, rval = %xh\n", rval);
5319	} else {
5320		/*EMPTY*/
5321		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5322	}
5323	return (rval);
5324}
5325
5326/*
5327 * ql_els_adisc
5328 *	Issue a extended link service address discovery request.
5329 *
5330 * Input:
5331 *	ha = adapter state pointer.
5332 *	pkt = pointer to fc_packet.
5333 *
5334 * Returns:
5335 *	FC_SUCCESS - the packet was accepted for transport.
5336 *	FC_TRANSPORT_ERROR - a transport error occurred.
5337 *
5338 * Context:
5339 *	Kernel context.
5340 */
5341static int
5342ql_els_adisc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5343{
5344	ql_dev_id_list_t	*list;
5345	uint32_t		list_size;
5346	ql_link_t		*link;
5347	ql_tgt_t		*tq;
5348	ql_lun_t		*lq;
5349	port_id_t		d_id;
5350	la_els_adisc_t		acc;
5351	uint16_t		index, loop_id;
5352	ql_mbx_data_t		mr;
5353	int			rval = FC_SUCCESS;
5354
5355	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5356
5357	bzero(&acc, sizeof (acc));
5358	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5359
5360	/*
5361	 * MBC_GET_PORT_DATABASE causes ADISC to go out to
5362	 * the device from the firmware
5363	 */
5364	index = ql_alpa_to_index[d_id.b.al_pa];
5365	tq = NULL;
5366	for (link = ha->dev[index].first; link != NULL; link = link->next) {
5367		tq = link->base_address;
5368		if (tq->d_id.b24 == d_id.b24) {
5369			break;
5370		} else {
5371			tq = NULL;
5372		}
5373	}
5374
5375	if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5376		list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
5377		list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
5378
5379		if (list != NULL &&
5380		    ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
5381		    QL_SUCCESS) {
5382
5383			for (index = 0; index < mr.mb[1]; index++) {
5384				ql_dev_list(ha, list, index, &d_id, &loop_id);
5385
5386				if (tq->d_id.b24 == d_id.b24) {
5387					tq->loop_id = loop_id;
5388					break;
5389				}
5390			}
5391		} else {
5392			cmn_err(CE_WARN, "!%s(%d) didn't get list for %xh",
5393			    QL_NAME, ha->instance, d_id.b24);
5394			tq = NULL;
5395		}
5396		if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5397			cmn_err(CE_WARN, "!%s(%d) no loop_id for adisc %xh",
5398			    QL_NAME, ha->instance, tq->d_id.b24);
5399			tq = NULL;
5400		}
5401
5402		if (list != NULL) {
5403			kmem_free(list, list_size);
5404		}
5405	}
5406
5407	if ((tq != NULL) && (VALID_DEVICE_ID(ha, tq->loop_id)) &&
5408	    ql_get_port_database(ha, tq, PDF_ADISC) == QL_SUCCESS) {
5409
5410		/* Build ACC. */
5411
5412		DEVICE_QUEUE_LOCK(tq);
5413		tq->flags &= ~TQF_NEED_AUTHENTICATION;
5414		if (tq->prli_svc_param_word_3 & PRLI_W3_RETRY) {
5415			for (link = tq->lun_queues.first; link != NULL;
5416			    link = link->next) {
5417				lq = link->base_address;
5418
5419				if (lq->cmd.first != NULL) {
5420					ql_next(ha, lq);
5421					DEVICE_QUEUE_LOCK(tq);
5422				}
5423			}
5424		}
5425		DEVICE_QUEUE_UNLOCK(tq);
5426
5427		acc.ls_code.ls_code = LA_ELS_ACC;
5428		acc.hard_addr.hard_addr = tq->hard_addr.b24;
5429
5430		bcopy((void *)&tq->port_name[0],
5431		    (void *)&acc.port_wwn.raw_wwn[0], 8);
5432		bcopy((void *)&tq->node_name[0],
5433		    (void *)&acc.node_wwn.raw_wwn[0], 8);
5434
5435		acc.nport_id.port_id = tq->d_id.b24;
5436
5437		pkt->pkt_state = FC_PKT_SUCCESS;
5438	} else {
5439		/* Build RJT. */
5440		acc.ls_code.ls_code = LA_ELS_RJT;
5441
5442		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5443		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5444		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5445	}
5446
5447	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5448	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5449
5450	if (rval != FC_SUCCESS) {
5451		EL(ha, "failed, rval = %xh\n", rval);
5452	} else {
5453		/*EMPTY*/
5454		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5455	}
5456	return (rval);
5457}
5458
5459/*
5460 * ql_els_linit
5461 *	Issue a extended link service loop initialize request.
5462 *
5463 * Input:
5464 *	ha = adapter state pointer.
5465 *	pkt = pointer to fc_packet.
5466 *
5467 * Returns:
5468 *	FC_SUCCESS - the packet was accepted for transport.
5469 *	FC_TRANSPORT_ERROR - a transport error occurred.
5470 *
5471 * Context:
5472 *	Kernel context.
5473 */
5474static int
5475ql_els_linit(ql_adapter_state_t *ha, fc_packet_t *pkt)
5476{
5477	ddi_dma_cookie_t	*cp;
5478	uint32_t		cnt;
5479	conv_num_t		n;
5480	port_id_t		d_id;
5481	int			rval = FC_SUCCESS;
5482
5483	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5484
5485	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5486	if (ha->topology & QL_SNS_CONNECTION) {
5487		fc_linit_req_t els;
5488		lfa_cmd_t lfa;
5489
5490		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5491		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5492
5493		/* Setup LFA mailbox command data. */
5494		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5495
5496		lfa.resp_buffer_length[0] = 4;
5497
5498		cp = pkt->pkt_resp_cookie;
5499		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5500			n.size64 = (uint64_t)cp->dmac_laddress;
5501			LITTLE_ENDIAN_64(&n.size64);
5502		} else {
5503			n.size32[0] = LSD(cp->dmac_laddress);
5504			LITTLE_ENDIAN_32(&n.size32[0]);
5505			n.size32[1] = MSD(cp->dmac_laddress);
5506			LITTLE_ENDIAN_32(&n.size32[1]);
5507		}
5508
5509		/* Set buffer address. */
5510		for (cnt = 0; cnt < 8; cnt++) {
5511			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5512		}
5513
5514		lfa.subcommand_length[0] = 4;
5515		n.size32[0] = d_id.b24;
5516		LITTLE_ENDIAN_32(&n.size32[0]);
5517		lfa.addr[0] = n.size8[0];
5518		lfa.addr[1] = n.size8[1];
5519		lfa.addr[2] = n.size8[2];
5520		lfa.subcommand[1] = 0x70;
5521		lfa.payload[2] = els.func;
5522		lfa.payload[4] = els.lip_b3;
5523		lfa.payload[5] = els.lip_b4;
5524
5525		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5526			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5527		} else {
5528			pkt->pkt_state = FC_PKT_SUCCESS;
5529		}
5530	} else {
5531		fc_linit_resp_t rjt;
5532
5533		/* Build RJT. */
5534		bzero(&rjt, sizeof (rjt));
5535		rjt.ls_code.ls_code = LA_ELS_RJT;
5536
5537		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5538		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5539
5540		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5541		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5542		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5543	}
5544
5545	if (rval != FC_SUCCESS) {
5546		EL(ha, "failed, rval = %xh\n", rval);
5547	} else {
5548		/*EMPTY*/
5549		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5550	}
5551	return (rval);
5552}
5553
5554/*
5555 * ql_els_lpc
5556 *	Issue a extended link service loop control request.
5557 *
5558 * Input:
5559 *	ha = adapter state pointer.
5560 *	pkt = pointer to fc_packet.
5561 *
5562 * Returns:
5563 *	FC_SUCCESS - the packet was accepted for transport.
5564 *	FC_TRANSPORT_ERROR - a transport error occurred.
5565 *
5566 * Context:
5567 *	Kernel context.
5568 */
5569static int
5570ql_els_lpc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5571{
5572	ddi_dma_cookie_t	*cp;
5573	uint32_t		cnt;
5574	conv_num_t		n;
5575	port_id_t		d_id;
5576	int			rval = FC_SUCCESS;
5577
5578	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5579
5580	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5581	if (ha->topology & QL_SNS_CONNECTION) {
5582		ql_lpc_t els;
5583		lfa_cmd_t lfa;
5584
5585		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5586		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5587
5588		/* Setup LFA mailbox command data. */
5589		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5590
5591		lfa.resp_buffer_length[0] = 4;
5592
5593		cp = pkt->pkt_resp_cookie;
5594		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5595			n.size64 = (uint64_t)(cp->dmac_laddress);
5596			LITTLE_ENDIAN_64(&n.size64);
5597		} else {
5598			n.size32[0] = cp->dmac_address;
5599			LITTLE_ENDIAN_32(&n.size32[0]);
5600			n.size32[1] = 0;
5601		}
5602
5603		/* Set buffer address. */
5604		for (cnt = 0; cnt < 8; cnt++) {
5605			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5606		}
5607
5608		lfa.subcommand_length[0] = 20;
5609		n.size32[0] = d_id.b24;
5610		LITTLE_ENDIAN_32(&n.size32[0]);
5611		lfa.addr[0] = n.size8[0];
5612		lfa.addr[1] = n.size8[1];
5613		lfa.addr[2] = n.size8[2];
5614		lfa.subcommand[1] = 0x71;
5615		lfa.payload[4] = els.port_control;
5616		bcopy((void *)&els.lpb[0], (void *)&lfa.payload[6], 32);
5617
5618		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5619			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5620		} else {
5621			pkt->pkt_state = FC_PKT_SUCCESS;
5622		}
5623	} else {
5624		ql_lpc_resp_t rjt;
5625
5626		/* Build RJT. */
5627		bzero(&rjt, sizeof (rjt));
5628		rjt.ls_code.ls_code = LA_ELS_RJT;
5629
5630		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5631		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5632
5633		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5634		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5635		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5636	}
5637
5638	if (rval != FC_SUCCESS) {
5639		EL(ha, "failed, rval = %xh\n", rval);
5640	} else {
5641		/*EMPTY*/
5642		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5643	}
5644	return (rval);
5645}
5646
5647/*
5648 * ql_els_lsts
5649 *	Issue a extended link service loop status request.
5650 *
5651 * Input:
5652 *	ha = adapter state pointer.
5653 *	pkt = pointer to fc_packet.
5654 *
5655 * Returns:
5656 *	FC_SUCCESS - the packet was accepted for transport.
5657 *	FC_TRANSPORT_ERROR - a transport error occurred.
5658 *
5659 * Context:
5660 *	Kernel context.
5661 */
5662static int
5663ql_els_lsts(ql_adapter_state_t *ha, fc_packet_t *pkt)
5664{
5665	ddi_dma_cookie_t	*cp;
5666	uint32_t		cnt;
5667	conv_num_t		n;
5668	port_id_t		d_id;
5669	int			rval = FC_SUCCESS;
5670
5671	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5672
5673	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5674	if (ha->topology & QL_SNS_CONNECTION) {
5675		fc_lsts_req_t els;
5676		lfa_cmd_t lfa;
5677
5678		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5679		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5680
5681		/* Setup LFA mailbox command data. */
5682		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5683
5684		lfa.resp_buffer_length[0] = 84;
5685
5686		cp = pkt->pkt_resp_cookie;
5687		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5688			n.size64 = cp->dmac_laddress;
5689			LITTLE_ENDIAN_64(&n.size64);
5690		} else {
5691			n.size32[0] = cp->dmac_address;
5692			LITTLE_ENDIAN_32(&n.size32[0]);
5693			n.size32[1] = 0;
5694		}
5695
5696		/* Set buffer address. */
5697		for (cnt = 0; cnt < 8; cnt++) {
5698			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5699		}
5700
5701		lfa.subcommand_length[0] = 2;
5702		n.size32[0] = d_id.b24;
5703		LITTLE_ENDIAN_32(&n.size32[0]);
5704		lfa.addr[0] = n.size8[0];
5705		lfa.addr[1] = n.size8[1];
5706		lfa.addr[2] = n.size8[2];
5707		lfa.subcommand[1] = 0x72;
5708
5709		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5710			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5711		} else {
5712			pkt->pkt_state = FC_PKT_SUCCESS;
5713		}
5714	} else {
5715		fc_lsts_resp_t rjt;
5716
5717		/* Build RJT. */
5718		bzero(&rjt, sizeof (rjt));
5719		rjt.lsts_ls_code.ls_code = LA_ELS_RJT;
5720
5721		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5722		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5723
5724		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5725		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5726		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5727	}
5728
5729	if (rval != FC_SUCCESS) {
5730		EL(ha, "failed=%xh\n", rval);
5731	} else {
5732		/*EMPTY*/
5733		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5734	}
5735	return (rval);
5736}
5737
5738/*
5739 * ql_els_scr
5740 *	Issue a extended link service state change registration request.
5741 *
5742 * Input:
5743 *	ha = adapter state pointer.
5744 *	pkt = pointer to fc_packet.
5745 *
5746 * Returns:
5747 *	FC_SUCCESS - the packet was accepted for transport.
5748 *	FC_TRANSPORT_ERROR - a transport error occurred.
5749 *
5750 * Context:
5751 *	Kernel context.
5752 */
5753static int
5754ql_els_scr(ql_adapter_state_t *ha, fc_packet_t *pkt)
5755{
5756	fc_scr_resp_t	acc;
5757	int		rval = FC_SUCCESS;
5758
5759	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5760
5761	bzero(&acc, sizeof (acc));
5762	if (ha->topology & QL_SNS_CONNECTION) {
5763		fc_scr_req_t els;
5764
5765		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5766		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5767
5768		if (ql_send_change_request(ha, els.scr_func) ==
5769		    QL_SUCCESS) {
5770			/* Build ACC. */
5771			acc.scr_acc = LA_ELS_ACC;
5772
5773			pkt->pkt_state = FC_PKT_SUCCESS;
5774		} else {
5775			/* Build RJT. */
5776			acc.scr_acc = LA_ELS_RJT;
5777
5778			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5779			pkt->pkt_reason = FC_REASON_HW_ERROR;
5780			EL(ha, "LA_ELS_RJT, FC_REASON_HW_ERROR\n");
5781		}
5782	} else {
5783		/* Build RJT. */
5784		acc.scr_acc = LA_ELS_RJT;
5785
5786		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5787		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5788		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5789	}
5790
5791	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5792	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5793
5794	if (rval != FC_SUCCESS) {
5795		EL(ha, "failed, rval = %xh\n", rval);
5796	} else {
5797		/*EMPTY*/
5798		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5799	}
5800	return (rval);
5801}
5802
5803/*
5804 * ql_els_rscn
5805 *	Issue a extended link service register state
5806 *	change notification request.
5807 *
5808 * Input:
5809 *	ha = adapter state pointer.
5810 *	pkt = pointer to fc_packet.
5811 *
5812 * Returns:
5813 *	FC_SUCCESS - the packet was accepted for transport.
5814 *	FC_TRANSPORT_ERROR - a transport error occurred.
5815 *
5816 * Context:
5817 *	Kernel context.
5818 */
5819static int
5820ql_els_rscn(ql_adapter_state_t *ha, fc_packet_t *pkt)
5821{
5822	ql_rscn_resp_t	acc;
5823	int		rval = FC_SUCCESS;
5824
5825	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5826
5827	bzero(&acc, sizeof (acc));
5828	if (ha->topology & QL_SNS_CONNECTION) {
5829		/* Build ACC. */
5830		acc.scr_acc = LA_ELS_ACC;
5831
5832		pkt->pkt_state = FC_PKT_SUCCESS;
5833	} else {
5834		/* Build RJT. */
5835		acc.scr_acc = LA_ELS_RJT;
5836
5837		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5838		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5839		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5840	}
5841
5842	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5843	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5844
5845	if (rval != FC_SUCCESS) {
5846		EL(ha, "failed, rval = %xh\n", rval);
5847	} else {
5848		/*EMPTY*/
5849		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5850	}
5851	return (rval);
5852}
5853
5854/*
5855 * ql_els_farp_req
5856 *	Issue FC Address Resolution Protocol (FARP)
5857 *	extended link service request.
5858 *
5859 *	Note: not supported.
5860 *
5861 * Input:
5862 *	ha = adapter state pointer.
5863 *	pkt = pointer to fc_packet.
5864 *
5865 * Returns:
5866 *	FC_SUCCESS - the packet was accepted for transport.
5867 *	FC_TRANSPORT_ERROR - a transport error occurred.
5868 *
5869 * Context:
5870 *	Kernel context.
5871 */
5872static int
5873ql_els_farp_req(ql_adapter_state_t *ha, fc_packet_t *pkt)
5874{
5875	ql_acc_rjt_t	acc;
5876	int		rval = FC_SUCCESS;
5877
5878	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5879
5880	bzero(&acc, sizeof (acc));
5881
5882	/* Build ACC. */
5883	acc.ls_code.ls_code = LA_ELS_ACC;
5884
5885	pkt->pkt_state = FC_PKT_SUCCESS;
5886
5887	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5888	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5889
5890	if (rval != FC_SUCCESS) {
5891		EL(ha, "failed, rval = %xh\n", rval);
5892	} else {
5893		/*EMPTY*/
5894		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5895	}
5896	return (rval);
5897}
5898
5899/*
5900 * ql_els_farp_reply
5901 *	Issue FC Address Resolution Protocol (FARP)
5902 *	extended link service reply.
5903 *
5904 *	Note: not supported.
5905 *
5906 * Input:
5907 *	ha = adapter state pointer.
5908 *	pkt = pointer to fc_packet.
5909 *
5910 * Returns:
5911 *	FC_SUCCESS - the packet was accepted for transport.
5912 *	FC_TRANSPORT_ERROR - a transport error occurred.
5913 *
5914 * Context:
5915 *	Kernel context.
5916 */
5917/* ARGSUSED */
5918static int
5919ql_els_farp_reply(ql_adapter_state_t *ha, fc_packet_t *pkt)
5920{
5921	ql_acc_rjt_t	acc;
5922	int		rval = FC_SUCCESS;
5923
5924	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5925
5926	bzero(&acc, sizeof (acc));
5927
5928	/* Build ACC. */
5929	acc.ls_code.ls_code = LA_ELS_ACC;
5930
5931	pkt->pkt_state = FC_PKT_SUCCESS;
5932
5933	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5934	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5935
5936	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5937
5938	return (rval);
5939}
5940
5941static int
5942ql_els_rnid(ql_adapter_state_t *ha, fc_packet_t *pkt)
5943{
5944	uchar_t			*rnid_acc;
5945	port_id_t		d_id;
5946	ql_link_t		*link;
5947	ql_tgt_t		*tq;
5948	uint16_t		index;
5949	la_els_rnid_acc_t	acc;
5950	la_els_rnid_t		*req;
5951	size_t			req_len;
5952
5953	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5954
5955	req_len =  FCIO_RNID_MAX_DATA_LEN + sizeof (fc_rnid_hdr_t);
5956	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5957	index = ql_alpa_to_index[d_id.b.al_pa];
5958
5959	tq = NULL;
5960	for (link = ha->dev[index].first; link != NULL; link = link->next) {
5961		tq = link->base_address;
5962		if (tq->d_id.b24 == d_id.b24) {
5963			break;
5964		} else {
5965			tq = NULL;
5966		}
5967	}
5968
5969	/* Allocate memory for rnid status block */
5970	rnid_acc = kmem_zalloc(req_len, KM_SLEEP);
5971	ASSERT(rnid_acc != NULL);
5972
5973	bzero(&acc, sizeof (acc));
5974
5975	req = (la_els_rnid_t *)pkt->pkt_cmd;
5976	if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
5977	    (ql_send_rnid_els(ha, tq->loop_id, req->data_format, req_len,
5978	    (caddr_t)rnid_acc) != QL_SUCCESS)) {
5979
5980		kmem_free(rnid_acc, req_len);
5981		acc.ls_code.ls_code = LA_ELS_RJT;
5982
5983		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5984		    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5985
5986		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5987		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5988		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5989
5990		return (FC_FAILURE);
5991	}
5992
5993	acc.ls_code.ls_code = LA_ELS_ACC;
5994	bcopy(rnid_acc, &acc.hdr, req_len);
5995	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5996	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5997
5998	kmem_free(rnid_acc, req_len);
5999	pkt->pkt_state = FC_PKT_SUCCESS;
6000
6001	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6002
6003	return (FC_SUCCESS);
6004}
6005
6006static int
6007ql_els_rls(ql_adapter_state_t *ha, fc_packet_t *pkt)
6008{
6009	fc_rls_acc_t		*rls_acc;
6010	port_id_t		d_id;
6011	ql_link_t		*link;
6012	ql_tgt_t		*tq;
6013	uint16_t		index;
6014	la_els_rls_acc_t	acc;
6015
6016	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6017
6018	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6019	index = ql_alpa_to_index[d_id.b.al_pa];
6020
6021	tq = NULL;
6022	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6023		tq = link->base_address;
6024		if (tq->d_id.b24 == d_id.b24) {
6025			break;
6026		} else {
6027			tq = NULL;
6028		}
6029	}
6030
6031	/* Allocate memory for link error status block */
6032	rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
6033	ASSERT(rls_acc != NULL);
6034
6035	bzero(&acc, sizeof (la_els_rls_acc_t));
6036
6037	if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6038	    (ql_get_link_status(ha, tq->loop_id, sizeof (*rls_acc),
6039	    (caddr_t)rls_acc, 0) != QL_SUCCESS)) {
6040
6041		kmem_free(rls_acc, sizeof (*rls_acc));
6042		acc.ls_code.ls_code = LA_ELS_RJT;
6043
6044		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6045		    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6046
6047		pkt->pkt_state = FC_PKT_TRAN_ERROR;
6048		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6049		EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6050
6051		return (FC_FAILURE);
6052	}
6053
6054	LITTLE_ENDIAN_32(&rls_acc->rls_link_fail);
6055	LITTLE_ENDIAN_32(&rls_acc->rls_sync_loss);
6056	LITTLE_ENDIAN_32(&rls_acc->rls_sig_loss);
6057	LITTLE_ENDIAN_32(&rls_acc->rls_invalid_word);
6058	LITTLE_ENDIAN_32(&rls_acc->rls_invalid_crc);
6059
6060	acc.ls_code.ls_code = LA_ELS_ACC;
6061	acc.rls_link_params.rls_link_fail = rls_acc->rls_link_fail;
6062	acc.rls_link_params.rls_sync_loss = rls_acc->rls_sync_loss;
6063	acc.rls_link_params.rls_sig_loss  = rls_acc->rls_sig_loss;
6064	acc.rls_link_params.rls_invalid_word = rls_acc->rls_invalid_word;
6065	acc.rls_link_params.rls_invalid_crc = rls_acc->rls_invalid_crc;
6066	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6067	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6068
6069	kmem_free(rls_acc, sizeof (*rls_acc));
6070	pkt->pkt_state = FC_PKT_SUCCESS;
6071
6072	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6073
6074	return (FC_SUCCESS);
6075}
6076
6077static int
6078ql_busy_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_tgt_t *tq)
6079{
6080	port_id_t	d_id;
6081	ql_srb_t	*sp;
6082	fc_unsol_buf_t  *ubp;
6083	ql_link_t	*link, *next_link;
6084	int		rval = FC_SUCCESS;
6085	int		cnt = 5;
6086
6087	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6088
6089	/*
6090	 * we need to ensure that q->outcnt == 0, otherwise
6091	 * any cmd completed with PKT_PORT_OFFLINE after PLOGI
6092	 * will confuse ulps.
6093	 */
6094
6095	DEVICE_QUEUE_LOCK(tq);
6096	do {
6097		/*
6098		 * wait for the cmds to get drained. If they
6099		 * don't get drained then the transport will
6100		 * retry PLOGI after few secs.
6101		 */
6102		if (tq->outcnt != 0) {
6103			rval = FC_TRAN_BUSY;
6104			DEVICE_QUEUE_UNLOCK(tq);
6105			ql_delay(ha, 10000);
6106			DEVICE_QUEUE_LOCK(tq);
6107			cnt--;
6108			if (!cnt) {
6109				cmn_err(CE_NOTE, "!%s(%d) Plogi busy"
6110				    " for %xh outcount %xh", QL_NAME,
6111				    ha->instance, tq->d_id.b24, tq->outcnt);
6112			}
6113		} else {
6114			rval = FC_SUCCESS;
6115			break;
6116		}
6117	} while (cnt > 0);
6118	DEVICE_QUEUE_UNLOCK(tq);
6119
6120	/*
6121	 * return, if busy or if the plogi was asynchronous.
6122	 */
6123	if ((rval != FC_SUCCESS) ||
6124	    (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
6125	    pkt->pkt_comp)) {
6126		QL_PRINT_3(CE_CONT, "(%d): done, busy or async\n",
6127		    ha->instance);
6128		return (rval);
6129	}
6130
6131	/*
6132	 * Let us give daemon sufficient time and hopefully
6133	 * when transport retries PLOGI, it would have flushed
6134	 * callback queue.
6135	 */
6136	TASK_DAEMON_LOCK(ha);
6137	for (link = ha->callback_queue.first; link != NULL;
6138	    link = next_link) {
6139		next_link = link->next;
6140		sp = link->base_address;
6141		if (sp->flags & SRB_UB_CALLBACK) {
6142			ubp = ha->ub_array[sp->handle];
6143			d_id.b24 = ubp->ub_frame.s_id;
6144		} else {
6145			d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
6146		}
6147		if (tq->d_id.b24 == d_id.b24) {
6148			cmn_err(CE_NOTE, "!%s(%d) Plogi busy for %xh", QL_NAME,
6149			    ha->instance, tq->d_id.b24);
6150			rval = FC_TRAN_BUSY;
6151			break;
6152		}
6153	}
6154	TASK_DAEMON_UNLOCK(ha);
6155
6156	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6157
6158	return (rval);
6159}
6160
6161/*
6162 * ql_login_port
6163 *	Logs in a device if not already logged in.
6164 *
6165 * Input:
6166 *	ha = adapter state pointer.
6167 *	d_id = 24 bit port ID.
6168 *	DEVICE_QUEUE_LOCK must be released.
6169 *
6170 * Returns:
6171 *	QL local function return status code.
6172 *
6173 * Context:
6174 *	Kernel context.
6175 */
6176static int
6177ql_login_port(ql_adapter_state_t *ha, port_id_t d_id)
6178{
6179	ql_adapter_state_t	*vha;
6180	ql_link_t		*link;
6181	uint16_t		index;
6182	ql_tgt_t		*tq, *tq2;
6183	uint16_t		loop_id, first_loop_id, last_loop_id;
6184	int			rval = QL_SUCCESS;
6185
6186	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6187	    d_id.b24);
6188
6189	/* Get head queue index. */
6190	index = ql_alpa_to_index[d_id.b.al_pa];
6191
6192	/* Check for device already has a queue. */
6193	tq = NULL;
6194	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6195		tq = link->base_address;
6196		if (tq->d_id.b24 == d_id.b24) {
6197			loop_id = tq->loop_id;
6198			break;
6199		} else {
6200			tq = NULL;
6201		}
6202	}
6203
6204	/* Let's stop issuing any IO and unsolicited logo */
6205	if ((tq != NULL) && (!(ddi_in_panic()))) {
6206		DEVICE_QUEUE_LOCK(tq);
6207		tq->flags |= (TQF_QUEUE_SUSPENDED | TQF_PLOGI_PROGRS);
6208		tq->flags &= ~TQF_RSCN_RCVD;
6209		DEVICE_QUEUE_UNLOCK(tq);
6210	}
6211	if ((tq != NULL) && (tq->loop_id & PORT_LOST_ID) &&
6212	    !(tq->flags & TQF_FABRIC_DEVICE)) {
6213		loop_id = (uint16_t)(tq->loop_id & ~PORT_LOST_ID);
6214	}
6215
6216	/* Special case for Nameserver */
6217	if (d_id.b24 == 0xFFFFFC) {
6218		loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
6219		    SNS_24XX_HDL : SIMPLE_NAME_SERVER_LOOP_ID);
6220		if (tq == NULL) {
6221			ADAPTER_STATE_LOCK(ha);
6222			tq = ql_dev_init(ha, d_id, loop_id);
6223			ADAPTER_STATE_UNLOCK(ha);
6224			if (tq == NULL) {
6225				EL(ha, "failed=%xh, d_id=%xh\n",
6226				    QL_FUNCTION_FAILED, d_id.b24);
6227				return (QL_FUNCTION_FAILED);
6228			}
6229		}
6230		rval = ql_login_fabric_port(ha, tq, loop_id);
6231		if (rval == QL_SUCCESS) {
6232			tq->loop_id = loop_id;
6233			tq->flags |= TQF_FABRIC_DEVICE;
6234			(void) ql_get_port_database(ha, tq, PDF_NONE);
6235			ha->topology = (uint8_t)
6236			    (ha->topology | QL_SNS_CONNECTION);
6237		}
6238	/* Check for device already logged in. */
6239	} else if (tq != NULL && VALID_DEVICE_ID(ha, loop_id)) {
6240		if (tq->flags & TQF_FABRIC_DEVICE) {
6241			rval = ql_login_fabric_port(ha, tq, loop_id);
6242			if (rval == QL_PORT_ID_USED) {
6243				rval = QL_SUCCESS;
6244			}
6245		} else if (LOCAL_LOOP_ID(loop_id)) {
6246			rval = ql_login_lport(ha, tq, loop_id, (uint16_t)
6247			    (tq->flags & TQF_INITIATOR_DEVICE ?
6248			    LLF_NONE : LLF_PLOGI));
6249			if (rval == QL_SUCCESS) {
6250				DEVICE_QUEUE_LOCK(tq);
6251				tq->loop_id = loop_id;
6252				DEVICE_QUEUE_UNLOCK(tq);
6253			}
6254		}
6255	} else if (ha->topology & QL_SNS_CONNECTION) {
6256		/* Locate unused loop ID. */
6257		if (CFG_IST(ha, CFG_CTRL_242581)) {
6258			first_loop_id = 0;
6259			last_loop_id = LAST_N_PORT_HDL;
6260		} else if (ha->topology & QL_F_PORT) {
6261			first_loop_id = 0;
6262			last_loop_id = SNS_LAST_LOOP_ID;
6263		} else {
6264			first_loop_id = SNS_FIRST_LOOP_ID;
6265			last_loop_id = SNS_LAST_LOOP_ID;
6266		}
6267
6268		/* Acquire adapter state lock. */
6269		ADAPTER_STATE_LOCK(ha);
6270
6271		tq = ql_dev_init(ha, d_id, PORT_NO_LOOP_ID);
6272		if (tq == NULL) {
6273			EL(ha, "failed=%xh, d_id=%xh\n", QL_FUNCTION_FAILED,
6274			    d_id.b24);
6275
6276			ADAPTER_STATE_UNLOCK(ha);
6277
6278			return (QL_FUNCTION_FAILED);
6279		}
6280
6281		rval = QL_FUNCTION_FAILED;
6282		loop_id = ha->pha->free_loop_id++;
6283		for (index = (uint16_t)(last_loop_id - first_loop_id); index;
6284		    index--) {
6285			if (loop_id < first_loop_id ||
6286			    loop_id > last_loop_id) {
6287				loop_id = first_loop_id;
6288				ha->pha->free_loop_id = (uint16_t)
6289				    (loop_id + 1);
6290			}
6291
6292			/* Bypass if loop ID used. */
6293			for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
6294				tq2 = ql_loop_id_to_queue(vha, loop_id);
6295				if (tq2 != NULL && tq2 != tq) {
6296					break;
6297				}
6298			}
6299			if (vha != NULL || RESERVED_LOOP_ID(ha, loop_id) ||
6300			    loop_id == ha->loop_id) {
6301				loop_id = ha->pha->free_loop_id++;
6302				continue;
6303			}
6304
6305			ADAPTER_STATE_UNLOCK(ha);
6306			rval = ql_login_fabric_port(ha, tq, loop_id);
6307
6308			/*
6309			 * If PORT_ID_USED is returned
6310			 * the login_fabric_port() updates
6311			 * with the correct loop ID
6312			 */
6313			switch (rval) {
6314			case QL_PORT_ID_USED:
6315				/*
6316				 * use f/w handle and try to
6317				 * login again.
6318				 */
6319				ADAPTER_STATE_LOCK(ha);
6320				ha->pha->free_loop_id--;
6321				ADAPTER_STATE_UNLOCK(ha);
6322				loop_id = tq->loop_id;
6323				break;
6324
6325			case QL_SUCCESS:
6326				tq->flags |= TQF_FABRIC_DEVICE;
6327				(void) ql_get_port_database(ha,
6328				    tq, PDF_NONE);
6329				index = 1;
6330				break;
6331
6332			case QL_LOOP_ID_USED:
6333				tq->loop_id = PORT_NO_LOOP_ID;
6334				loop_id = ha->pha->free_loop_id++;
6335				break;
6336
6337			case QL_ALL_IDS_IN_USE:
6338				tq->loop_id = PORT_NO_LOOP_ID;
6339				index = 1;
6340				break;
6341
6342			default:
6343				tq->loop_id = PORT_NO_LOOP_ID;
6344				index = 1;
6345				break;
6346			}
6347
6348			ADAPTER_STATE_LOCK(ha);
6349		}
6350
6351		ADAPTER_STATE_UNLOCK(ha);
6352	} else {
6353		rval = QL_FUNCTION_FAILED;
6354	}
6355
6356	if (rval != QL_SUCCESS) {
6357		EL(ha, "failed=%xh, d_id=%xh\n", rval, d_id.b24);
6358	} else {
6359		EL(ha, "d_id=%xh, loop_id=%xh, "
6360		    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n", tq->d_id.b24,
6361		    tq->loop_id, tq->port_name[0], tq->port_name[1],
6362		    tq->port_name[2], tq->port_name[3], tq->port_name[4],
6363		    tq->port_name[5], tq->port_name[6], tq->port_name[7]);
6364	}
6365	return (rval);
6366}
6367
6368/*
6369 * ql_login_fabric_port
6370 *	Issue login fabric port mailbox command.
6371 *
6372 * Input:
6373 *	ha:		adapter state pointer.
6374 *	tq:		target queue pointer.
6375 *	loop_id:	FC Loop ID.
6376 *
6377 * Returns:
6378 *	ql local function return status code.
6379 *
6380 * Context:
6381 *	Kernel context.
6382 */
6383static int
6384ql_login_fabric_port(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t loop_id)
6385{
6386	int		rval;
6387	int		index;
6388	int		retry = 0;
6389	port_id_t	d_id;
6390	ql_tgt_t	*newq;
6391	ql_mbx_data_t	mr;
6392
6393	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6394	    tq->d_id.b24);
6395
6396	/*
6397	 * QL_PARAMETER_ERROR also means the firmware is
6398	 * not able to allocate PCB entry due to resource
6399	 * issues, or collision.
6400	 */
6401	do {
6402		rval = ql_login_fport(ha, tq, loop_id, LFF_NONE, &mr);
6403		if ((rval == QL_PARAMETER_ERROR) ||
6404		    ((rval == QL_COMMAND_ERROR) && (mr.mb[1] == 2 ||
6405		    mr.mb[1] == 3 || mr.mb[1] == 7 || mr.mb[1] == 0xd))) {
6406			retry++;
6407			drv_usecwait(10 * MILLISEC);
6408		} else {
6409			break;
6410		}
6411	} while (retry < 5);
6412
6413	switch (rval) {
6414	case QL_SUCCESS:
6415		tq->loop_id = loop_id;
6416		break;
6417
6418	case QL_PORT_ID_USED:
6419		/*
6420		 * This Loop ID should NOT be in use in drivers
6421		 */
6422		newq = ql_loop_id_to_queue(ha, mr.mb[1]);
6423
6424		if (newq != NULL && newq != tq && tq->logout_sent == 0) {
6425			cmn_err(CE_WARN, "ql_login_fabric_port(%d): logout of "
6426			    "dup loop_id=%xh, d_id=%xh", ha->instance,
6427			    newq->loop_id, newq->d_id.b24);
6428			ql_send_logo(ha, newq, NULL);
6429		}
6430
6431		tq->loop_id = mr.mb[1];
6432		break;
6433
6434	case QL_LOOP_ID_USED:
6435		d_id.b.al_pa = LSB(mr.mb[2]);
6436		d_id.b.area = MSB(mr.mb[2]);
6437		d_id.b.domain = LSB(mr.mb[1]);
6438
6439		newq = ql_d_id_to_queue(ha, d_id);
6440		if (newq && (newq->loop_id != loop_id)) {
6441			/*
6442			 * This should NEVER ever happen; but this
6443			 * code is needed to bail out when the worst
6444			 * case happens - or as used to happen before
6445			 */
6446			ASSERT(newq->d_id.b24 == d_id.b24);
6447
6448			QL_PRINT_2(CE_CONT, "(%d,%d): Loop ID is now "
6449			    "reassigned; old pairs: [%xh, %xh] and [%xh, %xh];"
6450			    "new pairs: [%xh, unknown] and [%xh, %xh]\n",
6451			    ha->instance, ha->vp_index, tq->d_id.b24, loop_id,
6452			    newq->d_id.b24, newq->loop_id, tq->d_id.b24,
6453			    newq->d_id.b24, loop_id);
6454
6455			if ((newq->d_id.b24 & 0xff) != (d_id.b24 & 0xff)) {
6456				ADAPTER_STATE_LOCK(ha);
6457
6458				index = ql_alpa_to_index[newq->d_id.b.al_pa];
6459				ql_add_link_b(&ha->dev[index], &newq->device);
6460
6461				newq->d_id.b24 = d_id.b24;
6462
6463				index = ql_alpa_to_index[d_id.b.al_pa];
6464				ql_add_link_b(&ha->dev[index], &newq->device);
6465
6466				ADAPTER_STATE_UNLOCK(ha);
6467			}
6468
6469			(void) ql_get_port_database(ha, newq, PDF_NONE);
6470
6471		}
6472
6473		/*
6474		 * Invalidate the loop ID for the
6475		 * us to obtain a new one.
6476		 */
6477		tq->loop_id = PORT_NO_LOOP_ID;
6478		break;
6479
6480	case QL_ALL_IDS_IN_USE:
6481		rval = QL_FUNCTION_FAILED;
6482		EL(ha, "no loop id's available\n");
6483		break;
6484
6485	default:
6486		if (rval == QL_COMMAND_ERROR) {
6487			switch (mr.mb[1]) {
6488			case 2:
6489			case 3:
6490				rval = QL_MEMORY_ALLOC_FAILED;
6491				break;
6492
6493			case 4:
6494				rval = QL_FUNCTION_TIMEOUT;
6495				break;
6496			case 7:
6497				rval = QL_FABRIC_NOT_INITIALIZED;
6498				break;
6499			default:
6500				EL(ha, "cmd rtn; mb1=%xh\n", mr.mb[1]);
6501				break;
6502			}
6503		} else {
6504			cmn_err(CE_WARN, "%s(%d): login fabric port failed"
6505			    " D_ID=%xh, rval=%xh, mb1=%xh", QL_NAME,
6506			    ha->instance, tq->d_id.b24, rval, mr.mb[1]);
6507		}
6508		break;
6509	}
6510
6511	if (rval != QL_SUCCESS && rval != QL_PORT_ID_USED &&
6512	    rval != QL_LOOP_ID_USED) {
6513		EL(ha, "failed=%xh\n", rval);
6514	} else {
6515		/*EMPTY*/
6516		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6517	}
6518	return (rval);
6519}
6520
6521/*
6522 * ql_logout_port
6523 *	Logs out a device if possible.
6524 *
6525 * Input:
6526 *	ha:	adapter state pointer.
6527 *	d_id:	24 bit port ID.
6528 *
6529 * Returns:
6530 *	QL local function return status code.
6531 *
6532 * Context:
6533 *	Kernel context.
6534 */
6535static int
6536ql_logout_port(ql_adapter_state_t *ha, port_id_t d_id)
6537{
6538	ql_link_t	*link;
6539	ql_tgt_t	*tq;
6540	uint16_t	index;
6541
6542	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6543
6544	/* Get head queue index. */
6545	index = ql_alpa_to_index[d_id.b.al_pa];
6546
6547	/* Get device queue. */
6548	tq = NULL;
6549	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6550		tq = link->base_address;
6551		if (tq->d_id.b24 == d_id.b24) {
6552			break;
6553		} else {
6554			tq = NULL;
6555		}
6556	}
6557
6558	if (tq != NULL && tq->flags & TQF_FABRIC_DEVICE) {
6559		(void) ql_logout_fabric_port(ha, tq);
6560		tq->loop_id = PORT_NO_LOOP_ID;
6561	}
6562
6563	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6564
6565	return (QL_SUCCESS);
6566}
6567
6568/*
6569 * ql_dev_init
6570 *	Initialize/allocate device queue.
6571 *
6572 * Input:
6573 *	ha:		adapter state pointer.
6574 *	d_id:		device destination ID
6575 *	loop_id:	device loop ID
6576 *	ADAPTER_STATE_LOCK must be already obtained.
6577 *
6578 * Returns:
6579 *	NULL = failure
6580 *
6581 * Context:
6582 *	Kernel context.
6583 */
6584ql_tgt_t *
6585ql_dev_init(ql_adapter_state_t *ha, port_id_t d_id, uint16_t loop_id)
6586{
6587	ql_link_t	*link;
6588	uint16_t	index;
6589	ql_tgt_t	*tq;
6590
6591	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh, loop_id=%xh\n",
6592	    ha->instance, d_id.b24, loop_id);
6593
6594	index = ql_alpa_to_index[d_id.b.al_pa];
6595
6596	/* If device queue exists, set proper loop ID. */
6597	tq = NULL;
6598	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6599		tq = link->base_address;
6600		if (tq->d_id.b24 == d_id.b24) {
6601			tq->loop_id = loop_id;
6602
6603			/* Reset port down retry count. */
6604			tq->port_down_retry_count = ha->port_down_retry_count;
6605			tq->qfull_retry_count = ha->qfull_retry_count;
6606
6607			break;
6608		} else {
6609			tq = NULL;
6610		}
6611	}
6612
6613	/* If device does not have queue. */
6614	if (tq == NULL) {
6615		tq = (ql_tgt_t *)kmem_zalloc(sizeof (ql_tgt_t), KM_SLEEP);
6616		if (tq != NULL) {
6617			/*
6618			 * mutex to protect the device queue,
6619			 * does not block interrupts.
6620			 */
6621			mutex_init(&tq->mutex, NULL, MUTEX_DRIVER,
6622			    (ha->iflags & IFLG_INTR_AIF) ?
6623			    (void *)(uintptr_t)ha->intr_pri :
6624			    (void *)(uintptr_t)ha->iblock_cookie);
6625
6626			tq->d_id.b24 = d_id.b24;
6627			tq->loop_id = loop_id;
6628			tq->device.base_address = tq;
6629			tq->iidma_rate = IIDMA_RATE_INIT;
6630
6631			/* Reset port down retry count. */
6632			tq->port_down_retry_count = ha->port_down_retry_count;
6633			tq->qfull_retry_count = ha->qfull_retry_count;
6634
6635			/* Add device to device queue. */
6636			ql_add_link_b(&ha->dev[index], &tq->device);
6637		}
6638	}
6639
6640	if (tq == NULL) {
6641		EL(ha, "failed, d_id=%xh, loop_id=%xh\n", d_id.b24, loop_id);
6642	} else {
6643		/*EMPTY*/
6644		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6645	}
6646	return (tq);
6647}
6648
6649/*
6650 * ql_dev_free
6651 *	Remove queue from device list and frees resources used by queue.
6652 *
6653 * Input:
6654 *	ha:	adapter state pointer.
6655 *	tq:	target queue pointer.
6656 *	ADAPTER_STATE_LOCK must be already obtained.
6657 *
6658 * Context:
6659 *	Kernel context.
6660 */
6661void
6662ql_dev_free(ql_adapter_state_t *ha, ql_tgt_t *tq)
6663{
6664	ql_link_t	*link;
6665	uint16_t	index;
6666	ql_lun_t	*lq;
6667
6668	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6669
6670	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6671		lq = link->base_address;
6672		if (lq->cmd.first != NULL) {
6673			return;
6674		}
6675	}
6676
6677	if (tq->outcnt == 0) {
6678		/* Get head queue index. */
6679		index = ql_alpa_to_index[tq->d_id.b.al_pa];
6680		for (link = ha->dev[index].first; link != NULL;
6681		    link = link->next) {
6682			if (link->base_address == tq) {
6683				ql_remove_link(&ha->dev[index], link);
6684
6685				link = tq->lun_queues.first;
6686				while (link != NULL) {
6687					lq = link->base_address;
6688					link = link->next;
6689
6690					ql_remove_link(&tq->lun_queues,
6691					    &lq->link);
6692					kmem_free(lq, sizeof (ql_lun_t));
6693				}
6694
6695				mutex_destroy(&tq->mutex);
6696				kmem_free(tq, sizeof (ql_tgt_t));
6697				break;
6698			}
6699		}
6700	}
6701
6702	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6703}
6704
6705/*
6706 * ql_lun_queue
6707 *	Allocate LUN queue if does not exists.
6708 *
6709 * Input:
6710 *	ha:	adapter state pointer.
6711 *	tq:	target queue.
6712 *	lun:	LUN number.
6713 *
6714 * Returns:
6715 *	NULL = failure
6716 *
6717 * Context:
6718 *	Kernel context.
6719 */
6720static ql_lun_t *
6721ql_lun_queue(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t lun)
6722{
6723	ql_lun_t	*lq;
6724	ql_link_t	*link;
6725
6726	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6727
6728	/* Fast path. */
6729	if (tq->last_lun_queue != NULL && tq->last_lun_queue->lun_no == lun) {
6730		QL_PRINT_3(CE_CONT, "(%d): fast done\n", ha->instance);
6731		return (tq->last_lun_queue);
6732	}
6733
6734	if (lun >= MAX_LUNS) {
6735		EL(ha, "Exceeded MAX_LUN=%d, lun=%d\n", MAX_LUNS, lun);
6736		return (NULL);
6737	}
6738	/* If device queue exists, set proper loop ID. */
6739	lq = NULL;
6740	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6741		lq = link->base_address;
6742		if (lq->lun_no == lun) {
6743			QL_PRINT_3(CE_CONT, "(%d): found done\n", ha->instance);
6744			tq->last_lun_queue = lq;
6745			return (lq);
6746		}
6747	}
6748
6749	/* If queue does exist. */
6750	lq = (ql_lun_t *)kmem_zalloc(sizeof (ql_lun_t), KM_SLEEP);
6751
6752	/* Initialize LUN queue. */
6753	if (lq != NULL) {
6754		lq->link.base_address = lq;
6755
6756		lq->lun_no = lun;
6757		lq->target_queue = tq;
6758
6759		DEVICE_QUEUE_LOCK(tq);
6760		ql_add_link_b(&tq->lun_queues, &lq->link);
6761		DEVICE_QUEUE_UNLOCK(tq);
6762		tq->last_lun_queue = lq;
6763	}
6764
6765	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6766
6767	return (lq);
6768}
6769
6770/*
6771 * ql_fcp_scsi_cmd
6772 *	Process fibre channel (FCP) SCSI protocol commands.
6773 *
6774 * Input:
6775 *	ha = adapter state pointer.
6776 *	pkt = pointer to fc_packet.
6777 *	sp = srb pointer.
6778 *
6779 * Returns:
6780 *	FC_SUCCESS - the packet was accepted for transport.
6781 *	FC_TRANSPORT_ERROR - a transport error occurred.
6782 *
6783 * Context:
6784 *	Kernel context.
6785 */
6786static int
6787ql_fcp_scsi_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6788{
6789	port_id_t	d_id;
6790	ql_tgt_t	*tq;
6791	uint64_t	*ptr;
6792	uint16_t	lun;
6793
6794	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6795
6796	tq = (ql_tgt_t *)pkt->pkt_fca_device;
6797	if (tq == NULL) {
6798		d_id.r.rsvd_1 = 0;
6799		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6800		tq = ql_d_id_to_queue(ha, d_id);
6801	}
6802
6803	sp->fcp = (struct fcp_cmd *)pkt->pkt_cmd;
6804	lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
6805	    hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
6806
6807	if (tq != NULL &&
6808	    (sp->lun_queue = ql_lun_queue(ha, tq, lun)) != NULL) {
6809
6810		/*
6811		 * zero out FCP response; 24 Bytes
6812		 */
6813		ptr = (uint64_t *)pkt->pkt_resp;
6814		*ptr++ = 0; *ptr++ = 0; *ptr++ = 0;
6815
6816		/* Handle task management function. */
6817		if ((sp->fcp->fcp_cntl.cntl_kill_tsk |
6818		    sp->fcp->fcp_cntl.cntl_clr_aca |
6819		    sp->fcp->fcp_cntl.cntl_reset_tgt |
6820		    sp->fcp->fcp_cntl.cntl_reset_lun |
6821		    sp->fcp->fcp_cntl.cntl_clr_tsk |
6822		    sp->fcp->fcp_cntl.cntl_abort_tsk) != 0) {
6823			ql_task_mgmt(ha, tq, pkt, sp);
6824		} else {
6825			ha->pha->xioctl->IosRequested++;
6826			ha->pha->xioctl->BytesRequested += (uint32_t)
6827			    sp->fcp->fcp_data_len;
6828
6829			/*
6830			 * Setup for commands with data transfer
6831			 */
6832			sp->iocb = ha->fcp_cmd;
6833			if (sp->fcp->fcp_data_len != 0) {
6834				/*
6835				 * FCP data is bound to pkt_data_dma
6836				 */
6837				if (sp->fcp->fcp_cntl.cntl_write_data) {
6838					(void) ddi_dma_sync(pkt->pkt_data_dma,
6839					    0, 0, DDI_DMA_SYNC_FORDEV);
6840				}
6841
6842				/* Setup IOCB count. */
6843				if (pkt->pkt_data_cookie_cnt > ha->cmd_segs) {
6844					uint32_t	cnt;
6845
6846					cnt = pkt->pkt_data_cookie_cnt -
6847					    ha->cmd_segs;
6848					sp->req_cnt = (uint16_t)
6849					    (cnt / ha->cmd_cont_segs);
6850					if (cnt % ha->cmd_cont_segs) {
6851						sp->req_cnt = (uint16_t)
6852						    (sp->req_cnt + 2);
6853					} else {
6854						sp->req_cnt++;
6855					}
6856				} else {
6857					sp->req_cnt = 1;
6858				}
6859			} else {
6860				sp->req_cnt = 1;
6861			}
6862			QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6863
6864			return (ql_start_cmd(ha, tq, pkt, sp));
6865		}
6866	} else {
6867		pkt->pkt_state = FC_PKT_LOCAL_RJT;
6868		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6869
6870		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
6871			ql_awaken_task_daemon(ha, sp, 0, 0);
6872	}
6873
6874	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6875
6876	return (FC_SUCCESS);
6877}
6878
6879/*
6880 * ql_task_mgmt
6881 *	Task management function processor.
6882 *
6883 * Input:
6884 *	ha:	adapter state pointer.
6885 *	tq:	target queue pointer.
6886 *	pkt:	pointer to fc_packet.
6887 *	sp:	SRB pointer.
6888 *
6889 * Context:
6890 *	Kernel context.
6891 */
6892static void
6893ql_task_mgmt(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
6894    ql_srb_t *sp)
6895{
6896	fcp_rsp_t		*fcpr;
6897	struct fcp_rsp_info	*rsp;
6898	uint16_t		lun;
6899
6900	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6901
6902	ASSERT(pkt->pkt_cmd_dma == NULL && pkt->pkt_resp_dma == NULL);
6903
6904	fcpr = (fcp_rsp_t *)pkt->pkt_resp;
6905	rsp = (struct fcp_rsp_info *)pkt->pkt_resp + sizeof (fcp_rsp_t);
6906
6907	bzero(fcpr, pkt->pkt_rsplen);
6908
6909	fcpr->fcp_u.fcp_status.rsp_len_set = 1;
6910	fcpr->fcp_response_len = 8;
6911	lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
6912	    hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
6913
6914	if (sp->fcp->fcp_cntl.cntl_clr_aca) {
6915		if (ql_clear_aca(ha, tq, lun) != QL_SUCCESS) {
6916			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6917		}
6918	} else if (sp->fcp->fcp_cntl.cntl_reset_lun) {
6919		if (ql_lun_reset(ha, tq, lun) != QL_SUCCESS) {
6920			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6921		}
6922	} else if (sp->fcp->fcp_cntl.cntl_reset_tgt) {
6923		if (ql_target_reset(ha, tq, ha->loop_reset_delay) !=
6924		    QL_SUCCESS) {
6925			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6926		}
6927	} else if (sp->fcp->fcp_cntl.cntl_clr_tsk) {
6928		if (ql_clear_task_set(ha, tq, lun) != QL_SUCCESS) {
6929			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6930		}
6931	} else if (sp->fcp->fcp_cntl.cntl_abort_tsk) {
6932		if (ql_abort_task_set(ha, tq, lun) != QL_SUCCESS) {
6933			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6934		}
6935	} else {
6936		rsp->rsp_code = FCP_TASK_MGMT_NOT_SUPPTD;
6937	}
6938
6939	pkt->pkt_state = FC_PKT_SUCCESS;
6940
6941	/* Do command callback. */
6942	if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
6943		ql_awaken_task_daemon(ha, sp, 0, 0);
6944	}
6945
6946	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6947}
6948
6949/*
6950 * ql_fcp_ip_cmd
6951 *	Process fibre channel (FCP) Internet (IP) protocols commands.
6952 *
6953 * Input:
6954 *	ha:	adapter state pointer.
6955 *	pkt:	pointer to fc_packet.
6956 *	sp:	SRB pointer.
6957 *
6958 * Returns:
6959 *	FC_SUCCESS - the packet was accepted for transport.
6960 *	FC_TRANSPORT_ERROR - a transport error occurred.
6961 *
6962 * Context:
6963 *	Kernel context.
6964 */
6965static int
6966ql_fcp_ip_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6967{
6968	port_id_t	d_id;
6969	ql_tgt_t	*tq;
6970
6971	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6972
6973	tq = (ql_tgt_t *)pkt->pkt_fca_device;
6974	if (tq == NULL) {
6975		d_id.r.rsvd_1 = 0;
6976		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6977		tq = ql_d_id_to_queue(ha, d_id);
6978	}
6979
6980	if (tq != NULL && (sp->lun_queue = ql_lun_queue(ha, tq, 0)) != NULL) {
6981		/*
6982		 * IP data is bound to pkt_cmd_dma
6983		 */
6984		(void) ddi_dma_sync(pkt->pkt_cmd_dma,
6985		    0, 0, DDI_DMA_SYNC_FORDEV);
6986
6987		/* Setup IOCB count. */
6988		sp->iocb = ha->ip_cmd;
6989		if (pkt->pkt_cmd_cookie_cnt > ha->cmd_segs) {
6990			uint32_t	cnt;
6991
6992			cnt = pkt->pkt_cmd_cookie_cnt - ha->cmd_segs;
6993			sp->req_cnt = (uint16_t)(cnt / ha->cmd_cont_segs);
6994			if (cnt % ha->cmd_cont_segs) {
6995				sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
6996			} else {
6997				sp->req_cnt++;
6998			}
6999		} else {
7000			sp->req_cnt = 1;
7001		}
7002		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7003
7004		return (ql_start_cmd(ha, tq, pkt, sp));
7005	} else {
7006		pkt->pkt_state = FC_PKT_LOCAL_RJT;
7007		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7008
7009		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7010			ql_awaken_task_daemon(ha, sp, 0, 0);
7011	}
7012
7013	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7014
7015	return (FC_SUCCESS);
7016}
7017
7018/*
7019 * ql_fc_services
7020 *	Process fibre channel services (name server).
7021 *
7022 * Input:
7023 *	ha:	adapter state pointer.
7024 *	pkt:	pointer to fc_packet.
7025 *
7026 * Returns:
7027 *	FC_SUCCESS - the packet was accepted for transport.
7028 *	FC_TRANSPORT_ERROR - a transport error occurred.
7029 *
7030 * Context:
7031 *	Kernel context.
7032 */
7033static int
7034ql_fc_services(ql_adapter_state_t *ha, fc_packet_t *pkt)
7035{
7036	uint32_t	cnt;
7037	fc_ct_header_t	hdr;
7038	la_els_rjt_t	rjt;
7039	port_id_t	d_id;
7040	ql_tgt_t	*tq;
7041	ql_srb_t	*sp;
7042	int		rval;
7043
7044	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7045
7046	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&hdr,
7047	    (uint8_t *)pkt->pkt_cmd, sizeof (hdr), DDI_DEV_AUTOINCR);
7048
7049	bzero(&rjt, sizeof (rjt));
7050
7051	/* Do some sanity checks */
7052	cnt = (uint32_t)((uint32_t)(hdr.ct_aiusize * 4) +
7053	    sizeof (fc_ct_header_t));
7054	ASSERT(cnt <= (uint32_t)pkt->pkt_rsplen);
7055	if (cnt > (uint32_t)pkt->pkt_rsplen) {
7056		EL(ha, "FC_ELS_MALFORMED, cnt=%xh, size=%xh\n", cnt,
7057		    pkt->pkt_rsplen);
7058		return (FC_ELS_MALFORMED);
7059	}
7060
7061	switch (hdr.ct_fcstype) {
7062	case FCSTYPE_DIRECTORY:
7063	case FCSTYPE_MGMTSERVICE:
7064		/* An FCA must make sure that the header is in big endian */
7065		ql_cthdr_endian(pkt->pkt_cmd_acc, pkt->pkt_cmd, B_FALSE);
7066
7067		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7068		tq = ql_d_id_to_queue(ha, d_id);
7069		sp = (ql_srb_t *)pkt->pkt_fca_private;
7070		if (tq == NULL ||
7071		    (sp->lun_queue = ql_lun_queue(ha, tq, 0)) == NULL) {
7072			pkt->pkt_state = FC_PKT_LOCAL_RJT;
7073			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7074			rval = QL_SUCCESS;
7075			break;
7076		}
7077
7078		/*
7079		 * Services data is bound to pkt_cmd_dma
7080		 */
7081		(void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0,
7082		    DDI_DMA_SYNC_FORDEV);
7083
7084		sp->flags |= SRB_MS_PKT;
7085		sp->retry_count = 32;
7086
7087		/* Setup IOCB count. */
7088		sp->iocb = ha->ms_cmd;
7089		if (pkt->pkt_resp_cookie_cnt > MS_DATA_SEGMENTS) {
7090			cnt = pkt->pkt_resp_cookie_cnt - MS_DATA_SEGMENTS;
7091			sp->req_cnt =
7092			    (uint16_t)(cnt / CONT_TYPE_1_DATA_SEGMENTS);
7093			if (cnt % CONT_TYPE_1_DATA_SEGMENTS) {
7094				sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7095			} else {
7096				sp->req_cnt++;
7097			}
7098		} else {
7099			sp->req_cnt = 1;
7100		}
7101		rval = ql_start_cmd(ha, tq, pkt, sp);
7102
7103		QL_PRINT_3(CE_CONT, "(%d): done, ql_start_cmd=%xh\n",
7104		    ha->instance, rval);
7105
7106		return (rval);
7107
7108	default:
7109		EL(ha, "unknown fcstype=%xh\n", hdr.ct_fcstype);
7110		rval = QL_FUNCTION_PARAMETER_ERROR;
7111		break;
7112	}
7113
7114	if (rval != QL_SUCCESS) {
7115		/* Build RJT. */
7116		rjt.ls_code.ls_code = LA_ELS_RJT;
7117		rjt.reason = FC_REASON_CMD_UNSUPPORTED;
7118
7119		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
7120		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
7121
7122		pkt->pkt_state = FC_PKT_LOCAL_RJT;
7123		pkt->pkt_reason = FC_REASON_UNSUPPORTED;
7124		EL(ha, "LA_ELS_RJT, FC_REASON_UNSUPPORTED\n");
7125	}
7126
7127	/* Do command callback. */
7128	if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7129		ql_awaken_task_daemon(ha, (ql_srb_t *)pkt->pkt_fca_private,
7130		    0, 0);
7131	}
7132
7133	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7134
7135	return (FC_SUCCESS);
7136}
7137
7138/*
7139 * ql_cthdr_endian
7140 *	Change endianess of ct passthrough header and payload.
7141 *
7142 * Input:
7143 *	acc_handle:	DMA buffer access handle.
7144 *	ct_hdr:		Pointer to header.
7145 *	restore:	Restore first flag.
7146 *
7147 * Context:
7148 *	Interrupt or Kernel context, no mailbox commands allowed.
7149 */
7150void
7151ql_cthdr_endian(ddi_acc_handle_t acc_handle, caddr_t ct_hdr,
7152    boolean_t restore)
7153{
7154	uint8_t		i, *bp;
7155	fc_ct_header_t	hdr;
7156	uint32_t	*hdrp = (uint32_t *)&hdr;
7157
7158	ddi_rep_get8(acc_handle, (uint8_t *)&hdr,
7159	    (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7160
7161	if (restore) {
7162		for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7163			*hdrp = BE_32(*hdrp);
7164			hdrp++;
7165		}
7166	}
7167
7168	if (hdr.ct_fcstype == FCSTYPE_DIRECTORY) {
7169		bp = (uint8_t *)ct_hdr + sizeof (fc_ct_header_t);
7170
7171		switch (hdr.ct_cmdrsp) {
7172		case NS_GA_NXT:
7173		case NS_GPN_ID:
7174		case NS_GNN_ID:
7175		case NS_GCS_ID:
7176		case NS_GFT_ID:
7177		case NS_GSPN_ID:
7178		case NS_GPT_ID:
7179		case NS_GID_FT:
7180		case NS_GID_PT:
7181		case NS_RPN_ID:
7182		case NS_RNN_ID:
7183		case NS_RSPN_ID:
7184		case NS_DA_ID:
7185			BIG_ENDIAN_32(bp);
7186			break;
7187		case NS_RFT_ID:
7188		case NS_RCS_ID:
7189		case NS_RPT_ID:
7190			BIG_ENDIAN_32(bp);
7191			bp += 4;
7192			BIG_ENDIAN_32(bp);
7193			break;
7194		case NS_GNN_IP:
7195		case NS_GIPA_IP:
7196			BIG_ENDIAN(bp, 16);
7197			break;
7198		case NS_RIP_NN:
7199			bp += 8;
7200			BIG_ENDIAN(bp, 16);
7201			break;
7202		case NS_RIPA_NN:
7203			bp += 8;
7204			BIG_ENDIAN_64(bp);
7205			break;
7206		default:
7207			break;
7208		}
7209	}
7210
7211	if (restore == B_FALSE) {
7212		for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7213			*hdrp = BE_32(*hdrp);
7214			hdrp++;
7215		}
7216	}
7217
7218	ddi_rep_put8(acc_handle, (uint8_t *)&hdr,
7219	    (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7220}
7221
7222/*
7223 * ql_start_cmd
7224 *	Finishes starting fibre channel protocol (FCP) command.
7225 *
7226 * Input:
7227 *	ha:	adapter state pointer.
7228 *	tq:	target queue pointer.
7229 *	pkt:	pointer to fc_packet.
7230 *	sp:	SRB pointer.
7231 *
7232 * Context:
7233 *	Kernel context.
7234 */
7235static int
7236ql_start_cmd(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7237    ql_srb_t *sp)
7238{
7239	int		rval = FC_SUCCESS;
7240	time_t		poll_wait = 0;
7241	ql_lun_t	*lq = sp->lun_queue;
7242
7243	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7244
7245	sp->handle = 0;
7246
7247	/* Set poll for finish. */
7248	if (pkt->pkt_tran_flags & FC_TRAN_NO_INTR) {
7249		sp->flags |= SRB_POLL;
7250		if (pkt->pkt_timeout == 0) {
7251			pkt->pkt_timeout = SCSI_POLL_TIMEOUT;
7252		}
7253	}
7254
7255	/* Acquire device queue lock. */
7256	DEVICE_QUEUE_LOCK(tq);
7257
7258	/*
7259	 * If we need authentication, report device busy to
7260	 * upper layers to retry later
7261	 */
7262	if (tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION)) {
7263		DEVICE_QUEUE_UNLOCK(tq);
7264		EL(ha, "failed, FC_DEVICE_BUSY=%xh, d_id=%xh\n", tq->flags,
7265		    tq->d_id.b24);
7266		return (FC_DEVICE_BUSY);
7267	}
7268
7269	/* Insert command onto watchdog queue. */
7270	if (!(pkt->pkt_tran_flags & FC_TRAN_DUMPING)) {
7271		ql_timeout_insert(ha, tq, sp);
7272	} else {
7273		/*
7274		 * Run dump requests in polled mode as kernel threads
7275		 * and interrupts may have been disabled.
7276		 */
7277		sp->flags |= SRB_POLL;
7278		sp->init_wdg_q_time = 0;
7279		sp->isp_timeout = 0;
7280	}
7281
7282	/* If a polling command setup wait time. */
7283	if (sp->flags & SRB_POLL) {
7284		if (sp->flags & SRB_WATCHDOG_ENABLED) {
7285			poll_wait = (sp->wdg_q_time + 2) * WATCHDOG_TIME;
7286		} else {
7287			poll_wait = pkt->pkt_timeout;
7288		}
7289		ASSERT(poll_wait != 0);
7290	}
7291
7292	if (ha->pha->flags & ABORT_CMDS_LOOP_DOWN_TMO &&
7293	    (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING))) {
7294		/* Set ending status. */
7295		sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
7296
7297		/* Call done routine to handle completions. */
7298		sp->cmd.next = NULL;
7299		DEVICE_QUEUE_UNLOCK(tq);
7300		ql_done(&sp->cmd);
7301	} else {
7302		if (ddi_in_panic() && (sp->flags & SRB_POLL)) {
7303			int do_lip = 0;
7304
7305			ASSERT(ha->pha->outstanding_cmds[0] == NULL);
7306
7307			DEVICE_QUEUE_UNLOCK(tq);
7308
7309			ADAPTER_STATE_LOCK(ha);
7310			if ((do_lip = ha->pha->lip_on_panic) == 0) {
7311				ha->pha->lip_on_panic++;
7312			}
7313			ADAPTER_STATE_UNLOCK(ha);
7314
7315			if (!do_lip) {
7316
7317				/*
7318				 * That Qlogic F/W performs PLOGI, PRLI, etc
7319				 * is helpful here. If a PLOGI fails for some
7320				 * reason, you would get CS_PORT_LOGGED_OUT
7321				 * or some such error; and we should get a
7322				 * careful polled mode login kicked off inside
7323				 * of this driver itself. You don't have FC
7324				 * transport's services as all threads are
7325				 * suspended, interrupts disabled, and so
7326				 * on. Right now we do re-login if the packet
7327				 * state isn't FC_PKT_SUCCESS.
7328				 */
7329				(void) ql_abort_isp(ha);
7330			}
7331
7332			ql_start_iocb(ha, sp);
7333		} else {
7334			/* Add the command to the device queue */
7335			if (pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
7336				ql_add_link_t(&lq->cmd, &sp->cmd);
7337			} else {
7338				ql_add_link_b(&lq->cmd, &sp->cmd);
7339			}
7340
7341			sp->flags |= SRB_IN_DEVICE_QUEUE;
7342
7343			/* Check whether next message can be processed */
7344			ql_next(ha, lq);
7345		}
7346	}
7347
7348	/* If polling, wait for finish. */
7349	if (poll_wait) {
7350		ASSERT(sp->flags & SRB_POLL);
7351
7352		if (ql_poll_cmd(ha, sp, poll_wait) != QL_SUCCESS) {
7353			int	res;
7354
7355			res = ql_abort((opaque_t)ha, pkt, 0);
7356			if (res != FC_SUCCESS && res != FC_ABORTED) {
7357				ASSERT(res == FC_OFFLINE ||
7358				    res == FC_ABORT_FAILED);
7359
7360				DEVICE_QUEUE_LOCK(tq);
7361				ql_remove_link(&lq->cmd, &sp->cmd);
7362				sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7363				DEVICE_QUEUE_UNLOCK(tq);
7364			}
7365		}
7366
7367		if (pkt->pkt_state != FC_PKT_SUCCESS) {
7368			EL(ha, "failed, FC_TRANSPORT_ERROR\n");
7369			rval = FC_TRANSPORT_ERROR;
7370		}
7371
7372		ASSERT((sp->flags & (SRB_IN_DEVICE_QUEUE |
7373		    SRB_IN_TOKEN_ARRAY)) == 0);
7374
7375		if (ddi_in_panic()) {
7376			ASSERT(ha->pha->outstanding_cmds[0] == NULL);
7377			if (pkt->pkt_state != FC_PKT_SUCCESS) {
7378				port_id_t d_id;
7379
7380				/*
7381				 * successful LOGIN implies by design
7382				 * that PRLI also succeeded for disks
7383				 * Note also that there is no special
7384				 * mailbox command to send PRLI.
7385				 */
7386				d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7387				(void) ql_login_port(ha, d_id);
7388			}
7389		}
7390
7391		/*
7392		 * This should only happen during CPR dumping
7393		 */
7394		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
7395		    pkt->pkt_comp) {
7396			ASSERT(pkt->pkt_tran_flags & FC_TRAN_DUMPING);
7397			sp->flags &= ~SRB_POLL;
7398			(*pkt->pkt_comp)(pkt);
7399		}
7400	}
7401
7402	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7403
7404	return (rval);
7405}
7406
7407/*
7408 * ql_poll_cmd
7409 *	Polls commands for completion.
7410 *
7411 * Input:
7412 *	ha = adapter state pointer.
7413 *	sp = SRB command pointer.
7414 *	poll_wait = poll wait time in seconds.
7415 *
7416 * Returns:
7417 *	QL local function return status code.
7418 *
7419 * Context:
7420 *	Kernel context.
7421 */
7422static int
7423ql_poll_cmd(ql_adapter_state_t *vha, ql_srb_t *sp, time_t poll_wait)
7424{
7425	int			rval = QL_SUCCESS;
7426	time_t			msecs_left = poll_wait * 100;	/* 10ms inc */
7427	ql_adapter_state_t	*ha = vha->pha;
7428
7429	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7430
7431	while (sp->flags & SRB_POLL) {
7432
7433		if ((ha->flags & INTERRUPTS_ENABLED) == 0 ||
7434		    ha->idle_timer >= 15 || ddi_in_panic()) {
7435
7436			/* If waiting for restart, do it now. */
7437			if (ha->port_retry_timer != 0) {
7438				ADAPTER_STATE_LOCK(ha);
7439				ha->port_retry_timer = 0;
7440				ADAPTER_STATE_UNLOCK(ha);
7441
7442				TASK_DAEMON_LOCK(ha);
7443				ha->task_daemon_flags |= PORT_RETRY_NEEDED;
7444				TASK_DAEMON_UNLOCK(ha);
7445			}
7446
7447			if ((CFG_IST(ha, CFG_CTRL_242581) ?
7448			    RD32_IO_REG(ha, istatus) :
7449			    RD16_IO_REG(ha, istatus)) & RISC_INT) {
7450				(void) ql_isr((caddr_t)ha);
7451				INTR_LOCK(ha);
7452				ha->intr_claimed = TRUE;
7453				INTR_UNLOCK(ha);
7454			}
7455
7456			/*
7457			 * Call task thread function in case the
7458			 * daemon is not running.
7459			 */
7460			TASK_DAEMON_LOCK(ha);
7461
7462			if (!ddi_in_panic() && QL_DAEMON_NOT_ACTIVE(ha) &&
7463			    QL_TASK_PENDING(ha)) {
7464				ha->task_daemon_flags |= TASK_THREAD_CALLED;
7465				ql_task_thread(ha);
7466				ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7467			}
7468
7469			TASK_DAEMON_UNLOCK(ha);
7470		}
7471
7472		if (msecs_left < 10) {
7473			rval = QL_FUNCTION_TIMEOUT;
7474			break;
7475		}
7476
7477		/*
7478		 * Polling interval is 10 milli seconds; Increasing
7479		 * the polling interval to seconds since disk IO
7480		 * timeout values are ~60 seconds is tempting enough,
7481		 * but CPR dump time increases, and so will the crash
7482		 * dump time; Don't toy with the settings without due
7483		 * consideration for all the scenarios that will be
7484		 * impacted.
7485		 */
7486		ql_delay(ha, 10000);
7487		msecs_left -= 10;
7488	}
7489
7490	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7491
7492	return (rval);
7493}
7494
7495/*
7496 * ql_next
7497 *	Retrieve and process next job in the device queue.
7498 *
7499 * Input:
7500 *	ha:	adapter state pointer.
7501 *	lq:	LUN queue pointer.
7502 *	DEVICE_QUEUE_LOCK must be already obtained.
7503 *
7504 * Output:
7505 *	Releases DEVICE_QUEUE_LOCK upon exit.
7506 *
7507 * Context:
7508 *	Interrupt or Kernel context, no mailbox commands allowed.
7509 */
7510void
7511ql_next(ql_adapter_state_t *vha, ql_lun_t *lq)
7512{
7513	ql_srb_t		*sp;
7514	ql_link_t		*link;
7515	ql_tgt_t		*tq = lq->target_queue;
7516	ql_adapter_state_t	*ha = vha->pha;
7517
7518	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7519
7520	if (ddi_in_panic()) {
7521		DEVICE_QUEUE_UNLOCK(tq);
7522		QL_PRINT_3(CE_CONT, "(%d): panic/active exit\n",
7523		    ha->instance);
7524		return;
7525	}
7526
7527	while ((link = lq->cmd.first) != NULL) {
7528		sp = link->base_address;
7529
7530		/* Exit if can not start commands. */
7531		if (DRIVER_SUSPENDED(ha) ||
7532		    (ha->flags & ONLINE) == 0 ||
7533		    !VALID_DEVICE_ID(ha, tq->loop_id) ||
7534		    sp->flags & SRB_ABORT ||
7535		    tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION |
7536		    TQF_QUEUE_SUSPENDED)) {
7537			EL(vha, "break, d_id=%xh, tdf=%xh, tqf=%xh, spf=%xh, "
7538			    "haf=%xh, loop_id=%xh\n", tq->d_id.b24,
7539			    ha->task_daemon_flags, tq->flags, sp->flags,
7540			    ha->flags, tq->loop_id);
7541			break;
7542		}
7543
7544		/*
7545		 * Find out the LUN number for untagged command use.
7546		 * If there is an untagged command pending for the LUN,
7547		 * we would not submit another untagged command
7548		 * or if reached LUN execution throttle.
7549		 */
7550		if (sp->flags & SRB_FCP_CMD_PKT) {
7551			if (lq->flags & LQF_UNTAGGED_PENDING ||
7552			    lq->lun_outcnt >= ha->execution_throttle) {
7553				QL_PRINT_8(CE_CONT, "(%d): break, d_id=%xh, "
7554				    "lf=%xh, lun_outcnt=%xh\n", ha->instance,
7555				    tq->d_id.b24, lq->flags, lq->lun_outcnt);
7556				break;
7557			}
7558			if (sp->fcp->fcp_cntl.cntl_qtype ==
7559			    FCP_QTYPE_UNTAGGED) {
7560				/*
7561				 * Set the untagged-flag for the LUN
7562				 * so that no more untagged commands
7563				 * can be submitted for this LUN.
7564				 */
7565				lq->flags |= LQF_UNTAGGED_PENDING;
7566			}
7567
7568			/* Count command as sent. */
7569			lq->lun_outcnt++;
7570		}
7571
7572		/* Remove srb from device queue. */
7573		ql_remove_link(&lq->cmd, &sp->cmd);
7574		sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7575
7576		tq->outcnt++;
7577
7578		ql_start_iocb(vha, sp);
7579	}
7580
7581	/* Release device queue lock. */
7582	DEVICE_QUEUE_UNLOCK(tq);
7583
7584	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7585}
7586
7587/*
7588 * ql_done
7589 *	Process completed commands.
7590 *
7591 * Input:
7592 *	link:	first command link in chain.
7593 *
7594 * Context:
7595 *	Interrupt or Kernel context, no mailbox commands allowed.
7596 */
7597void
7598ql_done(ql_link_t *link)
7599{
7600	ql_adapter_state_t	*ha;
7601	ql_link_t		*next_link;
7602	ql_srb_t		*sp;
7603	ql_tgt_t		*tq;
7604	ql_lun_t		*lq;
7605
7606	QL_PRINT_3(CE_CONT, "started\n");
7607
7608	for (; link != NULL; link = next_link) {
7609		next_link = link->next;
7610		sp = link->base_address;
7611		ha = sp->ha;
7612
7613		if (sp->flags & SRB_UB_CALLBACK) {
7614			QL_UB_LOCK(ha);
7615			if (sp->flags & SRB_UB_IN_ISP) {
7616				if (ha->ub_outcnt != 0) {
7617					ha->ub_outcnt--;
7618				}
7619				QL_UB_UNLOCK(ha);
7620				ql_isp_rcvbuf(ha);
7621				QL_UB_LOCK(ha);
7622			}
7623			QL_UB_UNLOCK(ha);
7624			ql_awaken_task_daemon(ha, sp, 0, 0);
7625		} else {
7626			/* Free outstanding command slot. */
7627			if (sp->handle != 0) {
7628				ha->outstanding_cmds[
7629				    sp->handle & OSC_INDEX_MASK] = NULL;
7630				sp->handle = 0;
7631				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
7632			}
7633
7634			/* Acquire device queue lock. */
7635			lq = sp->lun_queue;
7636			tq = lq->target_queue;
7637			DEVICE_QUEUE_LOCK(tq);
7638
7639			/* Decrement outstanding commands on device. */
7640			if (tq->outcnt != 0) {
7641				tq->outcnt--;
7642			}
7643
7644			if (sp->flags & SRB_FCP_CMD_PKT) {
7645				if (sp->fcp->fcp_cntl.cntl_qtype ==
7646				    FCP_QTYPE_UNTAGGED) {
7647					/*
7648					 * Clear the flag for this LUN so that
7649					 * untagged commands can be submitted
7650					 * for it.
7651					 */
7652					lq->flags &= ~LQF_UNTAGGED_PENDING;
7653				}
7654
7655				if (lq->lun_outcnt != 0) {
7656					lq->lun_outcnt--;
7657				}
7658			}
7659
7660			/* Reset port down retry count on good completion. */
7661			if (sp->pkt->pkt_reason == CS_COMPLETE) {
7662				tq->port_down_retry_count =
7663				    ha->port_down_retry_count;
7664				tq->qfull_retry_count = ha->qfull_retry_count;
7665			}
7666
7667			/* Place request back on top of target command queue */
7668			if ((sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7669			    !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7670			    sp->flags & SRB_RETRY &&
7671			    (sp->flags & SRB_WATCHDOG_ENABLED &&
7672			    sp->wdg_q_time > 1)) {
7673				sp->flags &= ~(SRB_ISP_STARTED |
7674				    SRB_ISP_COMPLETED | SRB_RETRY);
7675
7676				/* Reset watchdog timer */
7677				sp->wdg_q_time = sp->init_wdg_q_time;
7678
7679				/* Issue marker command on reset status. */
7680				if (!(ha->task_daemon_flags & LOOP_DOWN) &&
7681				    (sp->pkt->pkt_reason == CS_RESET ||
7682				    (CFG_IST(ha, CFG_CTRL_242581) &&
7683				    sp->pkt->pkt_reason == CS_ABORTED))) {
7684					(void) ql_marker(ha, tq->loop_id, 0,
7685					    MK_SYNC_ID);
7686				}
7687
7688				ql_add_link_t(&lq->cmd, &sp->cmd);
7689				sp->flags |= SRB_IN_DEVICE_QUEUE;
7690				ql_next(ha, lq);
7691			} else {
7692				/* Remove command from watchdog queue. */
7693				if (sp->flags & SRB_WATCHDOG_ENABLED) {
7694					ql_remove_link(&tq->wdg, &sp->wdg);
7695					sp->flags &= ~SRB_WATCHDOG_ENABLED;
7696				}
7697
7698				if (lq->cmd.first != NULL) {
7699					ql_next(ha, lq);
7700				} else {
7701					/* Release LU queue specific lock. */
7702					DEVICE_QUEUE_UNLOCK(tq);
7703					if (ha->pha->pending_cmds.first !=
7704					    NULL) {
7705						ql_start_iocb(ha, NULL);
7706					}
7707				}
7708
7709				/* Sync buffers if required.  */
7710				if (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT)) {
7711					(void) ddi_dma_sync(
7712					    sp->pkt->pkt_resp_dma,
7713					    0, 0, DDI_DMA_SYNC_FORCPU);
7714				}
7715
7716				/* Map ISP completion codes. */
7717				sp->pkt->pkt_expln = FC_EXPLN_NONE;
7718				sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
7719				switch (sp->pkt->pkt_reason) {
7720				case CS_COMPLETE:
7721					sp->pkt->pkt_state = FC_PKT_SUCCESS;
7722					break;
7723				case CS_RESET:
7724					/* Issue marker command. */
7725					if (!(ha->task_daemon_flags &
7726					    LOOP_DOWN)) {
7727						(void) ql_marker(ha,
7728						    tq->loop_id, 0,
7729						    MK_SYNC_ID);
7730					}
7731					sp->pkt->pkt_state =
7732					    FC_PKT_PORT_OFFLINE;
7733					sp->pkt->pkt_reason =
7734					    FC_REASON_ABORTED;
7735					break;
7736				case CS_RESOUCE_UNAVAILABLE:
7737					sp->pkt->pkt_state = FC_PKT_LOCAL_BSY;
7738					sp->pkt->pkt_reason =
7739					    FC_REASON_PKT_BUSY;
7740					break;
7741
7742				case CS_TIMEOUT:
7743					sp->pkt->pkt_state = FC_PKT_TIMEOUT;
7744					sp->pkt->pkt_reason =
7745					    FC_REASON_HW_ERROR;
7746					break;
7747				case CS_DATA_OVERRUN:
7748					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7749					sp->pkt->pkt_reason =
7750					    FC_REASON_OVERRUN;
7751					break;
7752				case CS_PORT_UNAVAILABLE:
7753				case CS_PORT_LOGGED_OUT:
7754					sp->pkt->pkt_state =
7755					    FC_PKT_PORT_OFFLINE;
7756					sp->pkt->pkt_reason =
7757					    FC_REASON_LOGIN_REQUIRED;
7758					ql_send_logo(ha, tq, NULL);
7759					break;
7760				case CS_PORT_CONFIG_CHG:
7761					sp->pkt->pkt_state =
7762					    FC_PKT_PORT_OFFLINE;
7763					sp->pkt->pkt_reason =
7764					    FC_REASON_OFFLINE;
7765					break;
7766				case CS_QUEUE_FULL:
7767					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7768					sp->pkt->pkt_reason = FC_REASON_QFULL;
7769					break;
7770
7771				case CS_ABORTED:
7772					DEVICE_QUEUE_LOCK(tq);
7773					if (tq->flags & (TQF_RSCN_RCVD |
7774					    TQF_NEED_AUTHENTICATION)) {
7775						sp->pkt->pkt_state =
7776						    FC_PKT_PORT_OFFLINE;
7777						sp->pkt->pkt_reason =
7778						    FC_REASON_LOGIN_REQUIRED;
7779					} else {
7780						sp->pkt->pkt_state =
7781						    FC_PKT_LOCAL_RJT;
7782						sp->pkt->pkt_reason =
7783						    FC_REASON_ABORTED;
7784					}
7785					DEVICE_QUEUE_UNLOCK(tq);
7786					break;
7787
7788				case CS_TRANSPORT:
7789					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7790					sp->pkt->pkt_reason =
7791					    FC_PKT_TRAN_ERROR;
7792					break;
7793
7794				case CS_DATA_UNDERRUN:
7795					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7796					sp->pkt->pkt_reason =
7797					    FC_REASON_UNDERRUN;
7798					break;
7799				case CS_DMA_ERROR:
7800				case CS_BAD_PAYLOAD:
7801				case CS_UNKNOWN:
7802				case CS_CMD_FAILED:
7803				default:
7804					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7805					sp->pkt->pkt_reason =
7806					    FC_REASON_HW_ERROR;
7807					break;
7808				}
7809
7810				/* Now call the pkt completion callback */
7811				if (sp->flags & SRB_POLL) {
7812					sp->flags &= ~SRB_POLL;
7813				} else if (sp->pkt->pkt_comp) {
7814					if (sp->pkt->pkt_tran_flags &
7815					    FC_TRAN_IMMEDIATE_CB) {
7816						(*sp->pkt->pkt_comp)(sp->pkt);
7817					} else {
7818						ql_awaken_task_daemon(ha, sp,
7819						    0, 0);
7820					}
7821				}
7822			}
7823		}
7824	}
7825
7826	QL_PRINT_3(CE_CONT, "done\n");
7827}
7828
7829/*
7830 * ql_awaken_task_daemon
7831 *	Adds command completion callback to callback queue and/or
7832 *	awakens task daemon thread.
7833 *
7834 * Input:
7835 *	ha:		adapter state pointer.
7836 *	sp:		srb pointer.
7837 *	set_flags:	task daemon flags to set.
7838 *	reset_flags:	task daemon flags to reset.
7839 *
7840 * Context:
7841 *	Interrupt or Kernel context, no mailbox commands allowed.
7842 */
7843void
7844ql_awaken_task_daemon(ql_adapter_state_t *vha, ql_srb_t *sp,
7845    uint32_t set_flags, uint32_t reset_flags)
7846{
7847	ql_adapter_state_t	*ha = vha->pha;
7848
7849	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7850
7851	/* Acquire task daemon lock. */
7852	TASK_DAEMON_LOCK(ha);
7853
7854	if (set_flags & ISP_ABORT_NEEDED) {
7855		if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
7856			set_flags &= ~ISP_ABORT_NEEDED;
7857		}
7858	}
7859
7860	ha->task_daemon_flags |= set_flags;
7861	ha->task_daemon_flags &= ~reset_flags;
7862
7863	if (QL_DAEMON_SUSPENDED(ha)) {
7864		if (sp != NULL) {
7865			TASK_DAEMON_UNLOCK(ha);
7866
7867			/* Do callback. */
7868			if (sp->flags & SRB_UB_CALLBACK) {
7869				ql_unsol_callback(sp);
7870			} else {
7871				(*sp->pkt->pkt_comp)(sp->pkt);
7872			}
7873		} else {
7874			if (!(curthread->t_flag & T_INTR_THREAD) &&
7875			    !(ha->task_daemon_flags & TASK_THREAD_CALLED)) {
7876				ha->task_daemon_flags |= TASK_THREAD_CALLED;
7877				ql_task_thread(ha);
7878				ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7879			}
7880
7881			TASK_DAEMON_UNLOCK(ha);
7882		}
7883	} else {
7884		if (sp != NULL) {
7885			ql_add_link_b(&ha->callback_queue, &sp->cmd);
7886		}
7887
7888		if (ha->task_daemon_flags & TASK_DAEMON_SLEEPING_FLG) {
7889			cv_broadcast(&ha->cv_task_daemon);
7890		}
7891		TASK_DAEMON_UNLOCK(ha);
7892	}
7893
7894	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7895}
7896
7897/*
7898 * ql_task_daemon
7899 *	Thread that is awaken by the driver when a
7900 *	background needs to be done.
7901 *
7902 * Input:
7903 *	arg = adapter state pointer.
7904 *
7905 * Context:
7906 *	Kernel context.
7907 */
7908static void
7909ql_task_daemon(void *arg)
7910{
7911	ql_adapter_state_t	*ha = (void *)arg;
7912
7913	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7914
7915	CALLB_CPR_INIT(&ha->cprinfo, &ha->task_daemon_mutex, callb_generic_cpr,
7916	    "ql_task_daemon");
7917
7918	/* Acquire task daemon lock. */
7919	TASK_DAEMON_LOCK(ha);
7920
7921	ha->task_daemon_flags |= TASK_DAEMON_ALIVE_FLG;
7922
7923	while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) == 0) {
7924		ql_task_thread(ha);
7925
7926		QL_PRINT_3(CE_CONT, "(%d): Going to sleep\n", ha->instance);
7927
7928		/*
7929		 * Before we wait on the conditional variable, we
7930		 * need to check if STOP_FLG is set for us to terminate
7931		 */
7932		if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
7933			break;
7934		}
7935
7936		/*LINTED [Solaris CALLB_CPR_SAFE_BEGIN Lint error]*/
7937		CALLB_CPR_SAFE_BEGIN(&ha->cprinfo);
7938
7939		ha->task_daemon_flags |= TASK_DAEMON_SLEEPING_FLG;
7940
7941		/* If killed, stop task daemon */
7942		if (cv_wait_sig(&ha->cv_task_daemon,
7943		    &ha->task_daemon_mutex) == 0) {
7944			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
7945		}
7946
7947		ha->task_daemon_flags &= ~TASK_DAEMON_SLEEPING_FLG;
7948
7949		/*LINTED [Solaris CALLB_CPR_SAFE_END Lint error]*/
7950		CALLB_CPR_SAFE_END(&ha->cprinfo, &ha->task_daemon_mutex);
7951
7952		QL_PRINT_3(CE_CONT, "(%d): Awakened\n", ha->instance);
7953	}
7954
7955	ha->task_daemon_flags &= ~(TASK_DAEMON_STOP_FLG |
7956	    TASK_DAEMON_ALIVE_FLG);
7957
7958	/*LINTED [Solaris CALLB_CPR_EXIT Lint error]*/
7959	CALLB_CPR_EXIT(&ha->cprinfo);
7960
7961	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7962
7963	thread_exit();
7964}
7965
7966/*
7967 * ql_task_thread
7968 *	Thread run by daemon.
7969 *
7970 * Input:
7971 *	ha = adapter state pointer.
7972 *	TASK_DAEMON_LOCK must be acquired prior to call.
7973 *
7974 * Context:
7975 *	Kernel context.
7976 */
7977static void
7978ql_task_thread(ql_adapter_state_t *ha)
7979{
7980	int			loop_again, rval;
7981	ql_srb_t		*sp;
7982	ql_head_t		*head;
7983	ql_link_t		*link;
7984	caddr_t			msg;
7985	ql_adapter_state_t	*vha;
7986
7987	do {
7988		QL_PRINT_3(CE_CONT, "(%d): task_daemon_flags=%xh\n",
7989		    ha->instance, ha->task_daemon_flags);
7990
7991		loop_again = FALSE;
7992
7993		QL_PM_LOCK(ha);
7994		if (ha->power_level != PM_LEVEL_D0) {
7995			QL_PM_UNLOCK(ha);
7996			ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
7997			break;
7998		}
7999		QL_PM_UNLOCK(ha);
8000
8001		/* IDC acknowledge needed. */
8002		if (ha->task_daemon_flags & IDC_ACK_NEEDED) {
8003			ha->task_daemon_flags &= ~IDC_ACK_NEEDED;
8004			ADAPTER_STATE_LOCK(ha);
8005			switch (ha->idc_mb[2]) {
8006			case IDC_OPC_DRV_START:
8007				if (ha->idc_restart_mpi != 0) {
8008					ha->idc_restart_mpi--;
8009					if (ha->idc_restart_mpi == 0) {
8010						ha->restart_mpi_timer = 0;
8011						ha->task_daemon_flags &=
8012						    ~TASK_DAEMON_STALLED_FLG;
8013					}
8014				}
8015				if (ha->idc_flash_acc != 0) {
8016					ha->idc_flash_acc--;
8017					if (ha->idc_flash_acc == 0) {
8018						ha->flash_acc_timer = 0;
8019						GLOBAL_HW_LOCK();
8020					}
8021				}
8022				break;
8023			case IDC_OPC_FLASH_ACC:
8024				ha->flash_acc_timer = 30;
8025				if (ha->idc_flash_acc == 0) {
8026					GLOBAL_HW_UNLOCK();
8027				}
8028				ha->idc_flash_acc++;
8029				break;
8030			case IDC_OPC_RESTART_MPI:
8031				ha->restart_mpi_timer = 30;
8032				ha->idc_restart_mpi++;
8033				ha->task_daemon_flags |=
8034				    TASK_DAEMON_STALLED_FLG;
8035				break;
8036			default:
8037				EL(ha, "Unknown IDC opcode=%xh\n",
8038				    ha->idc_mb[2]);
8039				break;
8040			}
8041			ADAPTER_STATE_UNLOCK(ha);
8042
8043			if (ha->idc_mb[1] & IDC_TIMEOUT_MASK) {
8044				TASK_DAEMON_UNLOCK(ha);
8045				rval = ql_idc_ack(ha);
8046				if (rval != QL_SUCCESS) {
8047					EL(ha, "idc_ack status=%xh\n", rval);
8048				}
8049				TASK_DAEMON_LOCK(ha);
8050				loop_again = TRUE;
8051			}
8052		}
8053
8054		if (ha->flags & ADAPTER_SUSPENDED ||
8055		    ha->task_daemon_flags & (TASK_DAEMON_STOP_FLG |
8056		    DRIVER_STALL) ||
8057		    (ha->flags & ONLINE) == 0) {
8058			ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8059			break;
8060		}
8061		ha->task_daemon_flags &= ~TASK_DAEMON_STALLED_FLG;
8062
8063		if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8064			TASK_DAEMON_UNLOCK(ha);
8065			ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
8066			TASK_DAEMON_LOCK(ha);
8067			loop_again = TRUE;
8068		}
8069
8070		/* Idle Check. */
8071		if (ha->task_daemon_flags & TASK_DAEMON_IDLE_CHK_FLG) {
8072			ha->task_daemon_flags &= ~TASK_DAEMON_IDLE_CHK_FLG;
8073			if (!(ha->task_daemon_flags & QL_SUSPENDED)) {
8074				TASK_DAEMON_UNLOCK(ha);
8075				ql_idle_check(ha);
8076				TASK_DAEMON_LOCK(ha);
8077				loop_again = TRUE;
8078			}
8079		}
8080
8081		/* Crystal+ port#0 bypass transition */
8082		if (ha->task_daemon_flags & HANDLE_PORT_BYPASS_CHANGE) {
8083			ha->task_daemon_flags &= ~HANDLE_PORT_BYPASS_CHANGE;
8084			TASK_DAEMON_UNLOCK(ha);
8085			(void) ql_initiate_lip(ha);
8086			TASK_DAEMON_LOCK(ha);
8087			loop_again = TRUE;
8088		}
8089
8090		/* Abort queues needed. */
8091		if (ha->task_daemon_flags & ABORT_QUEUES_NEEDED) {
8092			ha->task_daemon_flags &= ~ABORT_QUEUES_NEEDED;
8093			TASK_DAEMON_UNLOCK(ha);
8094			ql_abort_queues(ha);
8095			TASK_DAEMON_LOCK(ha);
8096		}
8097
8098		/* Not suspended, awaken waiting routines. */
8099		if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8100		    ha->task_daemon_flags & SUSPENDED_WAKEUP_FLG) {
8101			ha->task_daemon_flags &= ~SUSPENDED_WAKEUP_FLG;
8102			cv_broadcast(&ha->cv_dr_suspended);
8103			loop_again = TRUE;
8104		}
8105
8106		/* Handle RSCN changes. */
8107		for (vha = ha; vha != NULL; vha = vha->vp_next) {
8108			if (vha->task_daemon_flags & RSCN_UPDATE_NEEDED) {
8109				vha->task_daemon_flags &= ~RSCN_UPDATE_NEEDED;
8110				TASK_DAEMON_UNLOCK(ha);
8111				(void) ql_handle_rscn_update(vha);
8112				TASK_DAEMON_LOCK(ha);
8113				loop_again = TRUE;
8114			}
8115		}
8116
8117		/* Handle state changes. */
8118		for (vha = ha; vha != NULL; vha = vha->vp_next) {
8119			if (vha->task_daemon_flags & FC_STATE_CHANGE &&
8120			    !(ha->task_daemon_flags &
8121			    TASK_DAEMON_POWERING_DOWN)) {
8122				/* Report state change. */
8123				EL(vha, "state change = %xh\n", vha->state);
8124				vha->task_daemon_flags &= ~FC_STATE_CHANGE;
8125
8126				if (vha->task_daemon_flags &
8127				    COMMAND_WAIT_NEEDED) {
8128					vha->task_daemon_flags &=
8129					    ~COMMAND_WAIT_NEEDED;
8130					if (!(ha->task_daemon_flags &
8131					    COMMAND_WAIT_ACTIVE)) {
8132						ha->task_daemon_flags |=
8133						    COMMAND_WAIT_ACTIVE;
8134						TASK_DAEMON_UNLOCK(ha);
8135						ql_cmd_wait(ha);
8136						TASK_DAEMON_LOCK(ha);
8137						ha->task_daemon_flags &=
8138						    ~COMMAND_WAIT_ACTIVE;
8139					}
8140				}
8141
8142				msg = NULL;
8143				if (FC_PORT_STATE_MASK(vha->state) ==
8144				    FC_STATE_OFFLINE) {
8145					if (vha->task_daemon_flags &
8146					    STATE_ONLINE) {
8147						if (ha->topology &
8148						    QL_LOOP_CONNECTION) {
8149							msg = "Loop OFFLINE";
8150						} else {
8151							msg = "Link OFFLINE";
8152						}
8153					}
8154					vha->task_daemon_flags &=
8155					    ~STATE_ONLINE;
8156				} else if (FC_PORT_STATE_MASK(vha->state) ==
8157				    FC_STATE_LOOP) {
8158					if (!(vha->task_daemon_flags &
8159					    STATE_ONLINE)) {
8160						msg = "Loop ONLINE";
8161					}
8162					vha->task_daemon_flags |= STATE_ONLINE;
8163				} else if (FC_PORT_STATE_MASK(vha->state) ==
8164				    FC_STATE_ONLINE) {
8165					if (!(vha->task_daemon_flags &
8166					    STATE_ONLINE)) {
8167						msg = "Link ONLINE";
8168					}
8169					vha->task_daemon_flags |= STATE_ONLINE;
8170				} else {
8171					msg = "Unknown Link state";
8172				}
8173
8174				if (msg != NULL) {
8175					cmn_err(CE_NOTE, "!Qlogic %s(%d,%d): "
8176					    "%s", QL_NAME, ha->instance,
8177					    vha->vp_index, msg);
8178				}
8179
8180				if (vha->flags & FCA_BOUND) {
8181					QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8182					    "cb state=%xh\n", ha->instance,
8183					    vha->vp_index, vha->state);
8184					TASK_DAEMON_UNLOCK(ha);
8185					(vha->bind_info.port_statec_cb)
8186					    (vha->bind_info.port_handle,
8187					    vha->state);
8188					TASK_DAEMON_LOCK(ha);
8189				}
8190				loop_again = TRUE;
8191			}
8192		}
8193
8194		if (ha->task_daemon_flags & LIP_RESET_PENDING &&
8195		    !(ha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN)) {
8196			EL(ha, "processing LIP reset\n");
8197			ha->task_daemon_flags &= ~LIP_RESET_PENDING;
8198			TASK_DAEMON_UNLOCK(ha);
8199			for (vha = ha; vha != NULL; vha = vha->vp_next) {
8200				if (vha->flags & FCA_BOUND) {
8201					QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8202					    "cb reset\n", ha->instance,
8203					    vha->vp_index);
8204					(vha->bind_info.port_statec_cb)
8205					    (vha->bind_info.port_handle,
8206					    FC_STATE_TARGET_PORT_RESET);
8207				}
8208			}
8209			TASK_DAEMON_LOCK(ha);
8210			loop_again = TRUE;
8211		}
8212
8213		if (QL_IS_SET(ha->task_daemon_flags, NEED_UNSOLICITED_BUFFERS |
8214		    FIRMWARE_UP)) {
8215			/*
8216			 * The firmware needs more unsolicited
8217			 * buffers. We cannot allocate any new
8218			 * buffers unless the ULP module requests
8219			 * for new buffers. All we can do here is
8220			 * to give received buffers from the pool
8221			 * that is already allocated
8222			 */
8223			ha->task_daemon_flags &= ~NEED_UNSOLICITED_BUFFERS;
8224			TASK_DAEMON_UNLOCK(ha);
8225			ql_isp_rcvbuf(ha);
8226			TASK_DAEMON_LOCK(ha);
8227			loop_again = TRUE;
8228		}
8229
8230		if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8231			TASK_DAEMON_UNLOCK(ha);
8232			(void) ql_abort_isp(ha);
8233			TASK_DAEMON_LOCK(ha);
8234			loop_again = TRUE;
8235		}
8236
8237		if (!(ha->task_daemon_flags & (LOOP_DOWN | DRIVER_STALL |
8238		    COMMAND_WAIT_NEEDED))) {
8239			if (QL_IS_SET(ha->task_daemon_flags,
8240			    RESET_MARKER_NEEDED | FIRMWARE_UP)) {
8241				ha->task_daemon_flags &= ~RESET_MARKER_NEEDED;
8242				if (!(ha->task_daemon_flags & RESET_ACTIVE)) {
8243					ha->task_daemon_flags |= RESET_ACTIVE;
8244					TASK_DAEMON_UNLOCK(ha);
8245					for (vha = ha; vha != NULL;
8246					    vha = vha->vp_next) {
8247						ql_rst_aen(vha);
8248					}
8249					TASK_DAEMON_LOCK(ha);
8250					ha->task_daemon_flags &= ~RESET_ACTIVE;
8251					loop_again = TRUE;
8252				}
8253			}
8254
8255			if (QL_IS_SET(ha->task_daemon_flags,
8256			    LOOP_RESYNC_NEEDED | FIRMWARE_UP)) {
8257				if (!(ha->task_daemon_flags &
8258				    LOOP_RESYNC_ACTIVE)) {
8259					ha->task_daemon_flags |=
8260					    LOOP_RESYNC_ACTIVE;
8261					TASK_DAEMON_UNLOCK(ha);
8262					(void) ql_loop_resync(ha);
8263					TASK_DAEMON_LOCK(ha);
8264					loop_again = TRUE;
8265				}
8266			}
8267		}
8268
8269		/* Port retry needed. */
8270		if (ha->task_daemon_flags & PORT_RETRY_NEEDED) {
8271			ha->task_daemon_flags &= ~PORT_RETRY_NEEDED;
8272			ADAPTER_STATE_LOCK(ha);
8273			ha->port_retry_timer = 0;
8274			ADAPTER_STATE_UNLOCK(ha);
8275
8276			TASK_DAEMON_UNLOCK(ha);
8277			ql_restart_queues(ha);
8278			TASK_DAEMON_LOCK(ha);
8279			loop_again = B_TRUE;
8280		}
8281
8282		/* iiDMA setting needed? */
8283		if (ha->task_daemon_flags & TD_IIDMA_NEEDED) {
8284			ha->task_daemon_flags &= ~TD_IIDMA_NEEDED;
8285
8286			TASK_DAEMON_UNLOCK(ha);
8287			ql_iidma(ha);
8288			TASK_DAEMON_LOCK(ha);
8289			loop_again = B_TRUE;
8290		}
8291
8292		if (ha->task_daemon_flags & SEND_PLOGI) {
8293			ha->task_daemon_flags &= ~SEND_PLOGI;
8294			TASK_DAEMON_UNLOCK(ha);
8295			ql_n_port_plogi(ha);
8296			TASK_DAEMON_LOCK(ha);
8297		}
8298
8299		head = &ha->callback_queue;
8300		if (head->first != NULL) {
8301			sp = head->first->base_address;
8302			link = &sp->cmd;
8303
8304			/* Dequeue command. */
8305			ql_remove_link(head, link);
8306
8307			/* Release task daemon lock. */
8308			TASK_DAEMON_UNLOCK(ha);
8309
8310			ASSERT((sp->flags & (SRB_IN_DEVICE_QUEUE |
8311			    SRB_IN_TOKEN_ARRAY)) == 0);
8312
8313			/* Do callback. */
8314			if (sp->flags & SRB_UB_CALLBACK) {
8315				ql_unsol_callback(sp);
8316			} else {
8317				(*sp->pkt->pkt_comp)(sp->pkt);
8318			}
8319
8320			/* Acquire task daemon lock. */
8321			TASK_DAEMON_LOCK(ha);
8322
8323			loop_again = TRUE;
8324		}
8325
8326	} while (loop_again);
8327}
8328
8329/*
8330 * ql_idle_check
8331 *	Test for adapter is alive and well.
8332 *
8333 * Input:
8334 *	ha:	adapter state pointer.
8335 *
8336 * Context:
8337 *	Kernel context.
8338 */
8339static void
8340ql_idle_check(ql_adapter_state_t *ha)
8341{
8342	ddi_devstate_t	state;
8343	int		rval;
8344	ql_mbx_data_t	mr;
8345
8346	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8347
8348	/* Firmware Ready Test. */
8349	rval = ql_get_firmware_state(ha, &mr);
8350	if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8351	    (rval != QL_SUCCESS || mr.mb[1] != FSTATE_READY)) {
8352		EL(ha, "failed, Firmware Ready Test = %xh\n", rval);
8353		state = ddi_get_devstate(ha->dip);
8354		if (state == DDI_DEVSTATE_UP) {
8355			/*EMPTY*/
8356			ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
8357			    DDI_DEVICE_FAULT, "Firmware Ready Test failed");
8358		}
8359		TASK_DAEMON_LOCK(ha);
8360		if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
8361			EL(ha, "fstate_ready, isp_abort_needed\n");
8362			ha->task_daemon_flags |= ISP_ABORT_NEEDED;
8363		}
8364		TASK_DAEMON_UNLOCK(ha);
8365	}
8366
8367	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8368}
8369
8370/*
8371 * ql_unsol_callback
8372 *	Handle unsolicited buffer callbacks.
8373 *
8374 * Input:
8375 *	ha = adapter state pointer.
8376 *	sp = srb pointer.
8377 *
8378 * Context:
8379 *	Kernel context.
8380 */
8381static void
8382ql_unsol_callback(ql_srb_t *sp)
8383{
8384	fc_affected_id_t	*af;
8385	fc_unsol_buf_t		*ubp;
8386	uchar_t			r_ctl;
8387	uchar_t			ls_code;
8388	ql_tgt_t		*tq;
8389	ql_adapter_state_t	*ha = sp->ha, *pha = sp->ha->pha;
8390
8391	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8392
8393	ubp = ha->ub_array[sp->handle];
8394	r_ctl = ubp->ub_frame.r_ctl;
8395	ls_code = ubp->ub_buffer[0];
8396
8397	if (sp->lun_queue == NULL) {
8398		tq = NULL;
8399	} else {
8400		tq = sp->lun_queue->target_queue;
8401	}
8402
8403	QL_UB_LOCK(ha);
8404	if (sp->flags & SRB_UB_FREE_REQUESTED ||
8405	    pha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN) {
8406		sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
8407		    SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
8408		sp->flags |= SRB_UB_IN_FCA;
8409		QL_UB_UNLOCK(ha);
8410		return;
8411	}
8412
8413	/* Process RSCN */
8414	if (sp->flags & SRB_UB_RSCN) {
8415		int sendup = 1;
8416
8417		/*
8418		 * Defer RSCN posting until commands return
8419		 */
8420		QL_UB_UNLOCK(ha);
8421
8422		af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8423
8424		/* Abort outstanding commands */
8425		sendup = ql_process_rscn(ha, af);
8426		if (sendup == 0) {
8427
8428			TASK_DAEMON_LOCK(ha);
8429			ql_add_link_b(&pha->callback_queue, &sp->cmd);
8430			TASK_DAEMON_UNLOCK(ha);
8431
8432			/*
8433			 * Wait for commands to drain in F/W (doesn't take
8434			 * more than a few milliseconds)
8435			 */
8436			ql_delay(ha, 10000);
8437
8438			QL_PRINT_2(CE_CONT, "(%d,%d): done rscn_sendup=0, "
8439			    "fmt=%xh, d_id=%xh\n", ha->instance, ha->vp_index,
8440			    af->aff_format, af->aff_d_id);
8441			return;
8442		}
8443
8444		QL_UB_LOCK(ha);
8445
8446		EL(ha, "sending unsol rscn, fmt=%xh, d_id=%xh to transport\n",
8447		    af->aff_format, af->aff_d_id);
8448	}
8449
8450	/* Process UNSOL LOGO */
8451	if ((r_ctl == R_CTL_ELS_REQ) && (ls_code == LA_ELS_LOGO)) {
8452		QL_UB_UNLOCK(ha);
8453
8454		if (tq && (ql_process_logo_for_device(ha, tq) == 0)) {
8455			TASK_DAEMON_LOCK(ha);
8456			ql_add_link_b(&pha->callback_queue, &sp->cmd);
8457			TASK_DAEMON_UNLOCK(ha);
8458			QL_PRINT_2(CE_CONT, "(%d,%d): logo_sendup=0, d_id=%xh"
8459			    "\n", ha->instance, ha->vp_index, tq->d_id.b24);
8460			return;
8461		}
8462
8463		QL_UB_LOCK(ha);
8464		EL(ha, "sending unsol logout for %xh to transport\n",
8465		    ubp->ub_frame.s_id);
8466	}
8467
8468	sp->flags &= ~(SRB_UB_IN_FCA | SRB_UB_IN_ISP | SRB_UB_RSCN |
8469	    SRB_UB_FCP);
8470
8471	if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
8472		(void) ddi_dma_sync(sp->ub_buffer.dma_handle, 0,
8473		    ubp->ub_bufsize, DDI_DMA_SYNC_FORCPU);
8474	}
8475	QL_UB_UNLOCK(ha);
8476
8477	(ha->bind_info.port_unsol_cb)(ha->bind_info.port_handle,
8478	    ubp, sp->ub_type);
8479
8480	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8481}
8482
8483/*
8484 * ql_send_logo
8485 *
8486 * Input:
8487 *	ha:	adapter state pointer.
8488 *	tq:	target queue pointer.
8489 *	done_q:	done queue pointer.
8490 *
8491 * Context:
8492 *	Interrupt or Kernel context, no mailbox commands allowed.
8493 */
8494void
8495ql_send_logo(ql_adapter_state_t *vha, ql_tgt_t *tq, ql_head_t *done_q)
8496{
8497	fc_unsol_buf_t		*ubp;
8498	ql_srb_t		*sp;
8499	la_els_logo_t		*payload;
8500	ql_adapter_state_t	*ha = vha->pha;
8501
8502	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
8503	    tq->d_id.b24);
8504
8505	if ((tq->d_id.b24 == 0) || (tq->d_id.b24 == 0xffffff)) {
8506		EL(ha, "no device, d_id=%xh\n", tq->d_id.b24);
8507		return;
8508	}
8509
8510	if ((tq->flags & (TQF_RSCN_RCVD | TQF_PLOGI_PROGRS)) == 0 &&
8511	    tq->logout_sent == 0 && (ha->task_daemon_flags & LOOP_DOWN) == 0) {
8512
8513		/* Locate a buffer to use. */
8514		ubp = ql_get_unsolicited_buffer(vha, FC_TYPE_EXTENDED_LS);
8515		if (ubp == NULL) {
8516			EL(vha, "Failed, get_unsolicited_buffer\n");
8517			return;
8518		}
8519
8520		DEVICE_QUEUE_LOCK(tq);
8521		tq->flags |= TQF_NEED_AUTHENTICATION;
8522		tq->logout_sent++;
8523		DEVICE_QUEUE_UNLOCK(tq);
8524
8525		EL(vha, "Received LOGO from = %xh\n", tq->d_id.b24);
8526
8527		sp = ubp->ub_fca_private;
8528
8529		/* Set header. */
8530		ubp->ub_frame.d_id = vha->d_id.b24;
8531		ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8532		ubp->ub_frame.s_id = tq->d_id.b24;
8533		ubp->ub_frame.rsvd = 0;
8534		ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8535		    F_CTL_SEQ_INITIATIVE;
8536		ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8537		ubp->ub_frame.seq_cnt = 0;
8538		ubp->ub_frame.df_ctl = 0;
8539		ubp->ub_frame.seq_id = 0;
8540		ubp->ub_frame.rx_id = 0xffff;
8541		ubp->ub_frame.ox_id = 0xffff;
8542
8543		/* set payload. */
8544		payload = (la_els_logo_t *)ubp->ub_buffer;
8545		bzero(payload, sizeof (la_els_logo_t));
8546		/* Make sure ls_code in payload is always big endian */
8547		ubp->ub_buffer[0] = LA_ELS_LOGO;
8548		ubp->ub_buffer[1] = 0;
8549		ubp->ub_buffer[2] = 0;
8550		ubp->ub_buffer[3] = 0;
8551		bcopy(&vha->loginparams.node_ww_name.raw_wwn[0],
8552		    &payload->nport_ww_name.raw_wwn[0], 8);
8553		payload->nport_id.port_id = tq->d_id.b24;
8554
8555		QL_UB_LOCK(ha);
8556		sp->flags |= SRB_UB_CALLBACK;
8557		QL_UB_UNLOCK(ha);
8558		if (tq->lun_queues.first != NULL) {
8559			sp->lun_queue = (tq->lun_queues.first)->base_address;
8560		} else {
8561			sp->lun_queue = ql_lun_queue(vha, tq, 0);
8562		}
8563		if (done_q) {
8564			ql_add_link_b(done_q, &sp->cmd);
8565		} else {
8566			ql_awaken_task_daemon(ha, sp, 0, 0);
8567		}
8568	}
8569
8570	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8571}
8572
8573static int
8574ql_process_logo_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
8575{
8576	port_id_t	d_id;
8577	ql_srb_t	*sp;
8578	ql_link_t	*link;
8579	int		sendup = 1;
8580
8581	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8582
8583	DEVICE_QUEUE_LOCK(tq);
8584	if (tq->outcnt) {
8585		DEVICE_QUEUE_UNLOCK(tq);
8586		sendup = 0;
8587		(void) ql_abort_device(ha, tq, 1);
8588		ql_delay(ha, 10000);
8589	} else {
8590		DEVICE_QUEUE_UNLOCK(tq);
8591		TASK_DAEMON_LOCK(ha);
8592
8593		for (link = ha->pha->callback_queue.first; link != NULL;
8594		    link = link->next) {
8595			sp = link->base_address;
8596			if (sp->flags & SRB_UB_CALLBACK) {
8597				continue;
8598			}
8599			d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
8600
8601			if (tq->d_id.b24 == d_id.b24) {
8602				sendup = 0;
8603				break;
8604			}
8605		}
8606
8607		TASK_DAEMON_UNLOCK(ha);
8608	}
8609
8610	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8611
8612	return (sendup);
8613}
8614
8615static int
8616ql_send_plogi(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_head_t *done_q)
8617{
8618	fc_unsol_buf_t		*ubp;
8619	ql_srb_t		*sp;
8620	la_els_logi_t		*payload;
8621	class_svc_param_t	*class3_param;
8622
8623	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8624
8625	if ((tq->flags & TQF_RSCN_RCVD) || (ha->task_daemon_flags &
8626	    LOOP_DOWN)) {
8627		EL(ha, "Failed, tqf=%xh\n", tq->flags);
8628		return (QL_FUNCTION_FAILED);
8629	}
8630
8631	/* Locate a buffer to use. */
8632	ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8633	if (ubp == NULL) {
8634		EL(ha, "Failed\n");
8635		return (QL_FUNCTION_FAILED);
8636	}
8637
8638	QL_PRINT_3(CE_CONT, "(%d): Received LOGO from = %xh\n",
8639	    ha->instance, tq->d_id.b24);
8640
8641	EL(ha, "Emulate PLOGI from = %xh tq = %x\n", tq->d_id.b24, tq);
8642
8643	sp = ubp->ub_fca_private;
8644
8645	/* Set header. */
8646	ubp->ub_frame.d_id = ha->d_id.b24;
8647	ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8648	ubp->ub_frame.s_id = tq->d_id.b24;
8649	ubp->ub_frame.rsvd = 0;
8650	ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8651	    F_CTL_SEQ_INITIATIVE;
8652	ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8653	ubp->ub_frame.seq_cnt = 0;
8654	ubp->ub_frame.df_ctl = 0;
8655	ubp->ub_frame.seq_id = 0;
8656	ubp->ub_frame.rx_id = 0xffff;
8657	ubp->ub_frame.ox_id = 0xffff;
8658
8659	/* set payload. */
8660	payload = (la_els_logi_t *)ubp->ub_buffer;
8661	bzero(payload, sizeof (payload));
8662
8663	payload->ls_code.ls_code = LA_ELS_PLOGI;
8664	payload->common_service.fcph_version = 0x2006;
8665	payload->common_service.cmn_features = 0x8800;
8666
8667	CFG_IST(ha, CFG_CTRL_242581) ?
8668	    (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8669	    ha->init_ctrl_blk.cb24.max_frame_length[0],
8670	    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
8671	    (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8672	    ha->init_ctrl_blk.cb.max_frame_length[0],
8673	    ha->init_ctrl_blk.cb.max_frame_length[1]));
8674
8675	payload->common_service.conc_sequences = 0xff;
8676	payload->common_service.relative_offset = 0x03;
8677	payload->common_service.e_d_tov = 0x7d0;
8678
8679	bcopy((void *)&tq->port_name[0],
8680	    (void *)&payload->nport_ww_name.raw_wwn[0], 8);
8681
8682	bcopy((void *)&tq->node_name[0],
8683	    (void *)&payload->node_ww_name.raw_wwn[0], 8);
8684
8685	class3_param = (class_svc_param_t *)&payload->class_3;
8686	class3_param->class_valid_svc_opt = 0x8000;
8687	class3_param->recipient_ctl = tq->class3_recipient_ctl;
8688	class3_param->rcv_data_size = tq->class3_rcv_data_size;
8689	class3_param->conc_sequences = tq->class3_conc_sequences;
8690	class3_param->open_sequences_per_exch =
8691	    tq->class3_open_sequences_per_exch;
8692
8693	QL_UB_LOCK(ha);
8694	sp->flags |= SRB_UB_CALLBACK;
8695	QL_UB_UNLOCK(ha);
8696
8697	ql_isp_els_handle_endian(ha, (uint8_t *)payload, LA_ELS_PLOGI);
8698
8699	if (done_q) {
8700		ql_add_link_b(done_q, &sp->cmd);
8701	} else {
8702		ql_awaken_task_daemon(ha, sp, 0, 0);
8703	}
8704
8705	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8706
8707	return (QL_SUCCESS);
8708}
8709
8710/*
8711 * Abort outstanding commands in the Firmware, clear internally
8712 * queued commands in the driver, Synchronize the target with
8713 * the Firmware
8714 */
8715int
8716ql_abort_device(ql_adapter_state_t *ha, ql_tgt_t *tq, int drain)
8717{
8718	ql_link_t	*link, *link2;
8719	ql_lun_t	*lq;
8720	int		rval = QL_SUCCESS;
8721	ql_srb_t	*sp;
8722	ql_head_t	done_q = { NULL, NULL };
8723
8724	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
8725
8726	/*
8727	 * First clear, internally queued commands
8728	 */
8729	DEVICE_QUEUE_LOCK(tq);
8730	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
8731		lq = link->base_address;
8732
8733		link2 = lq->cmd.first;
8734		while (link2 != NULL) {
8735			sp = link2->base_address;
8736			link2 = link2->next;
8737
8738			if (sp->flags & SRB_ABORT) {
8739				continue;
8740			}
8741
8742			/* Remove srb from device command queue. */
8743			ql_remove_link(&lq->cmd, &sp->cmd);
8744			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
8745
8746			/* Set ending status. */
8747			sp->pkt->pkt_reason = CS_ABORTED;
8748
8749			/* Call done routine to handle completions. */
8750			ql_add_link_b(&done_q, &sp->cmd);
8751		}
8752	}
8753	DEVICE_QUEUE_UNLOCK(tq);
8754
8755	if (done_q.first != NULL) {
8756		ql_done(done_q.first);
8757	}
8758
8759	if (drain && VALID_TARGET_ID(ha, tq->loop_id) && PD_PORT_LOGIN(tq)) {
8760		rval = ql_abort_target(ha, tq, 0);
8761	}
8762
8763	if (rval != QL_SUCCESS) {
8764		EL(ha, "failed=%xh, d_id=%xh\n", rval, tq->d_id.b24);
8765	} else {
8766		/*EMPTY*/
8767		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
8768		    ha->vp_index);
8769	}
8770
8771	return (rval);
8772}
8773
8774/*
8775 * ql_rcv_rscn_els
8776 *	Processes received RSCN extended link service.
8777 *
8778 * Input:
8779 *	ha:	adapter state pointer.
8780 *	mb:	array containing input mailbox registers.
8781 *	done_q:	done queue pointer.
8782 *
8783 * Context:
8784 *	Interrupt or Kernel context, no mailbox commands allowed.
8785 */
8786void
8787ql_rcv_rscn_els(ql_adapter_state_t *ha, uint16_t *mb, ql_head_t *done_q)
8788{
8789	fc_unsol_buf_t		*ubp;
8790	ql_srb_t		*sp;
8791	fc_rscn_t		*rn;
8792	fc_affected_id_t	*af;
8793	port_id_t		d_id;
8794
8795	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8796
8797	/* Locate a buffer to use. */
8798	ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8799	if (ubp != NULL) {
8800		sp = ubp->ub_fca_private;
8801
8802		/* Set header. */
8803		ubp->ub_frame.d_id = ha->d_id.b24;
8804		ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8805		ubp->ub_frame.s_id = FS_FABRIC_CONTROLLER;
8806		ubp->ub_frame.rsvd = 0;
8807		ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8808		    F_CTL_SEQ_INITIATIVE;
8809		ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8810		ubp->ub_frame.seq_cnt = 0;
8811		ubp->ub_frame.df_ctl = 0;
8812		ubp->ub_frame.seq_id = 0;
8813		ubp->ub_frame.rx_id = 0xffff;
8814		ubp->ub_frame.ox_id = 0xffff;
8815
8816		/* set payload. */
8817		rn = (fc_rscn_t *)ubp->ub_buffer;
8818		af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8819
8820		rn->rscn_code = LA_ELS_RSCN;
8821		rn->rscn_len = 4;
8822		rn->rscn_payload_len = 8;
8823		d_id.b.al_pa = LSB(mb[2]);
8824		d_id.b.area = MSB(mb[2]);
8825		d_id.b.domain =	LSB(mb[1]);
8826		af->aff_d_id = d_id.b24;
8827		af->aff_format = MSB(mb[1]);
8828
8829		EL(ha, "LA_ELS_RSCN fmt=%xh, d_id=%xh\n", af->aff_format,
8830		    af->aff_d_id);
8831
8832		ql_update_rscn(ha, af);
8833
8834		QL_UB_LOCK(ha);
8835		sp->flags |= SRB_UB_CALLBACK | SRB_UB_RSCN;
8836		QL_UB_UNLOCK(ha);
8837		ql_add_link_b(done_q, &sp->cmd);
8838	}
8839
8840	if (ubp == NULL) {
8841		EL(ha, "Failed, get_unsolicited_buffer\n");
8842	} else {
8843		/*EMPTY*/
8844		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8845	}
8846}
8847
8848/*
8849 * ql_update_rscn
8850 *	Update devices from received RSCN.
8851 *
8852 * Input:
8853 *	ha:	adapter state pointer.
8854 *	af:	pointer to RSCN data.
8855 *
8856 * Context:
8857 *	Interrupt or Kernel context, no mailbox commands allowed.
8858 */
8859static void
8860ql_update_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
8861{
8862	ql_link_t	*link;
8863	uint16_t	index;
8864	ql_tgt_t	*tq;
8865
8866	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8867
8868	if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
8869		port_id_t d_id;
8870
8871		d_id.r.rsvd_1 = 0;
8872		d_id.b24 = af->aff_d_id;
8873
8874		tq = ql_d_id_to_queue(ha, d_id);
8875		if (tq) {
8876			EL(ha, "SD_RSCN_RCVD %xh RPA\n", d_id.b24);
8877			DEVICE_QUEUE_LOCK(tq);
8878			tq->flags |= TQF_RSCN_RCVD;
8879			DEVICE_QUEUE_UNLOCK(tq);
8880		}
8881		QL_PRINT_3(CE_CONT, "(%d): FC_RSCN_PORT_ADDRESS done\n",
8882		    ha->instance);
8883
8884		return;
8885	}
8886
8887	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
8888		for (link = ha->dev[index].first; link != NULL;
8889		    link = link->next) {
8890			tq = link->base_address;
8891
8892			switch (af->aff_format) {
8893			case FC_RSCN_FABRIC_ADDRESS:
8894				if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
8895					EL(ha, "SD_RSCN_RCVD %xh RFA\n",
8896					    tq->d_id.b24);
8897					DEVICE_QUEUE_LOCK(tq);
8898					tq->flags |= TQF_RSCN_RCVD;
8899					DEVICE_QUEUE_UNLOCK(tq);
8900				}
8901				break;
8902
8903			case FC_RSCN_AREA_ADDRESS:
8904				if ((tq->d_id.b24 & 0xffff00) == af->aff_d_id) {
8905					EL(ha, "SD_RSCN_RCVD %xh RAA\n",
8906					    tq->d_id.b24);
8907					DEVICE_QUEUE_LOCK(tq);
8908					tq->flags |= TQF_RSCN_RCVD;
8909					DEVICE_QUEUE_UNLOCK(tq);
8910				}
8911				break;
8912
8913			case FC_RSCN_DOMAIN_ADDRESS:
8914				if ((tq->d_id.b24 & 0xff0000) == af->aff_d_id) {
8915					EL(ha, "SD_RSCN_RCVD %xh RDA\n",
8916					    tq->d_id.b24);
8917					DEVICE_QUEUE_LOCK(tq);
8918					tq->flags |= TQF_RSCN_RCVD;
8919					DEVICE_QUEUE_UNLOCK(tq);
8920				}
8921				break;
8922
8923			default:
8924				break;
8925			}
8926		}
8927	}
8928	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8929}
8930
8931/*
8932 * ql_process_rscn
8933 *
8934 * Input:
8935 *	ha:	adapter state pointer.
8936 *	af:	RSCN payload pointer.
8937 *
8938 * Context:
8939 *	Kernel context.
8940 */
8941static int
8942ql_process_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
8943{
8944	int		sendit;
8945	int		sendup = 1;
8946	ql_link_t	*link;
8947	uint16_t	index;
8948	ql_tgt_t	*tq;
8949
8950	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8951
8952	if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
8953		port_id_t d_id;
8954
8955		d_id.r.rsvd_1 = 0;
8956		d_id.b24 = af->aff_d_id;
8957
8958		tq = ql_d_id_to_queue(ha, d_id);
8959		if (tq) {
8960			sendup = ql_process_rscn_for_device(ha, tq);
8961		}
8962
8963		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8964
8965		return (sendup);
8966	}
8967
8968	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
8969		for (link = ha->dev[index].first; link != NULL;
8970		    link = link->next) {
8971
8972			tq = link->base_address;
8973			if (tq == NULL) {
8974				continue;
8975			}
8976
8977			switch (af->aff_format) {
8978			case FC_RSCN_FABRIC_ADDRESS:
8979				if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
8980					sendit = ql_process_rscn_for_device(
8981					    ha, tq);
8982					if (sendup) {
8983						sendup = sendit;
8984					}
8985				}
8986				break;
8987
8988			case FC_RSCN_AREA_ADDRESS:
8989				if ((tq->d_id.b24 & 0xffff00) ==
8990				    af->aff_d_id) {
8991					sendit = ql_process_rscn_for_device(
8992					    ha, tq);
8993
8994					if (sendup) {
8995						sendup = sendit;
8996					}
8997				}
8998				break;
8999
9000			case FC_RSCN_DOMAIN_ADDRESS:
9001				if ((tq->d_id.b24 & 0xff0000) ==
9002				    af->aff_d_id) {
9003					sendit = ql_process_rscn_for_device(
9004					    ha, tq);
9005
9006					if (sendup) {
9007						sendup = sendit;
9008					}
9009				}
9010				break;
9011
9012			default:
9013				break;
9014			}
9015		}
9016	}
9017
9018	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9019
9020	return (sendup);
9021}
9022
9023/*
9024 * ql_process_rscn_for_device
9025 *
9026 * Input:
9027 *	ha:	adapter state pointer.
9028 *	tq:	target queue pointer.
9029 *
9030 * Context:
9031 *	Kernel context.
9032 */
9033static int
9034ql_process_rscn_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
9035{
9036	int sendup = 1;
9037
9038	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9039
9040	DEVICE_QUEUE_LOCK(tq);
9041
9042	/*
9043	 * Let FCP-2 compliant devices continue I/Os
9044	 * with their low level recoveries.
9045	 */
9046	if (((tq->flags & TQF_INITIATOR_DEVICE) == 0) &&
9047	    (tq->prli_svc_param_word_3 & PRLI_W3_RETRY)) {
9048		/*
9049		 * Cause ADISC to go out
9050		 */
9051		DEVICE_QUEUE_UNLOCK(tq);
9052
9053		(void) ql_get_port_database(ha, tq, PDF_NONE);
9054
9055		DEVICE_QUEUE_LOCK(tq);
9056		tq->flags &= ~TQF_RSCN_RCVD;
9057
9058	} else if (tq->loop_id != PORT_NO_LOOP_ID) {
9059		if (tq->d_id.b24 != BROADCAST_ADDR) {
9060			tq->flags |= TQF_NEED_AUTHENTICATION;
9061		}
9062
9063		DEVICE_QUEUE_UNLOCK(tq);
9064
9065		(void) ql_abort_device(ha, tq, 1);
9066
9067		DEVICE_QUEUE_LOCK(tq);
9068
9069		if (tq->outcnt) {
9070			sendup = 0;
9071		} else {
9072			tq->flags &= ~TQF_RSCN_RCVD;
9073		}
9074	} else {
9075		tq->flags &= ~TQF_RSCN_RCVD;
9076	}
9077
9078	if (sendup) {
9079		if (tq->d_id.b24 != BROADCAST_ADDR) {
9080			tq->flags |= TQF_NEED_AUTHENTICATION;
9081		}
9082	}
9083
9084	DEVICE_QUEUE_UNLOCK(tq);
9085
9086	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9087
9088	return (sendup);
9089}
9090
9091static int
9092ql_handle_rscn_update(ql_adapter_state_t *ha)
9093{
9094	int			rval;
9095	ql_tgt_t		*tq;
9096	uint16_t		index, loop_id;
9097	ql_dev_id_list_t	*list;
9098	uint32_t		list_size;
9099	port_id_t		d_id;
9100	ql_mbx_data_t		mr;
9101	ql_head_t		done_q = { NULL, NULL };
9102
9103	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9104
9105	list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
9106	list = kmem_zalloc(list_size, KM_SLEEP);
9107	if (list == NULL) {
9108		rval = QL_MEMORY_ALLOC_FAILED;
9109		EL(ha, "kmem_zalloc failed=%xh\n", rval);
9110		return (rval);
9111	}
9112
9113	/*
9114	 * Get data from RISC code d_id list to init each device queue.
9115	 */
9116	rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
9117	if (rval != QL_SUCCESS) {
9118		kmem_free(list, list_size);
9119		EL(ha, "get_id_list failed=%xh\n", rval);
9120		return (rval);
9121	}
9122
9123	/* Acquire adapter state lock. */
9124	ADAPTER_STATE_LOCK(ha);
9125
9126	/* Check for new devices */
9127	for (index = 0; index < mr.mb[1]; index++) {
9128		ql_dev_list(ha, list, index, &d_id, &loop_id);
9129
9130		if (VALID_DEVICE_ID(ha, loop_id)) {
9131			d_id.r.rsvd_1 = 0;
9132
9133			tq = ql_d_id_to_queue(ha, d_id);
9134			if (tq != NULL) {
9135				continue;
9136			}
9137
9138			tq = ql_dev_init(ha, d_id, loop_id);
9139
9140			/* Test for fabric device. */
9141			if (d_id.b.domain != ha->d_id.b.domain ||
9142			    d_id.b.area != ha->d_id.b.area) {
9143				tq->flags |= TQF_FABRIC_DEVICE;
9144			}
9145
9146			ADAPTER_STATE_UNLOCK(ha);
9147			if (ql_get_port_database(ha, tq, PDF_NONE) !=
9148			    QL_SUCCESS) {
9149				tq->loop_id = PORT_NO_LOOP_ID;
9150			}
9151			ADAPTER_STATE_LOCK(ha);
9152
9153			/*
9154			 * Send up a PLOGI about the new device
9155			 */
9156			if (VALID_DEVICE_ID(ha, tq->loop_id)) {
9157				(void) ql_send_plogi(ha, tq, &done_q);
9158			}
9159		}
9160	}
9161
9162	/* Release adapter state lock. */
9163	ADAPTER_STATE_UNLOCK(ha);
9164
9165	if (done_q.first != NULL) {
9166		ql_done(done_q.first);
9167	}
9168
9169	kmem_free(list, list_size);
9170
9171	if (rval != QL_SUCCESS) {
9172		EL(ha, "failed=%xh\n", rval);
9173	} else {
9174		/*EMPTY*/
9175		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9176	}
9177
9178	return (rval);
9179}
9180
9181/*
9182 * ql_free_unsolicited_buffer
9183 *	Frees allocated buffer.
9184 *
9185 * Input:
9186 *	ha = adapter state pointer.
9187 *	index = buffer array index.
9188 *	ADAPTER_STATE_LOCK must be already obtained.
9189 *
9190 * Context:
9191 *	Kernel context.
9192 */
9193static void
9194ql_free_unsolicited_buffer(ql_adapter_state_t *ha, fc_unsol_buf_t *ubp)
9195{
9196	ql_srb_t	*sp;
9197	int		status;
9198
9199	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9200
9201	sp = ubp->ub_fca_private;
9202	if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
9203		/* Disconnect IP from system buffers. */
9204		if (ha->flags & IP_INITIALIZED) {
9205			ADAPTER_STATE_UNLOCK(ha);
9206			status = ql_shutdown_ip(ha);
9207			ADAPTER_STATE_LOCK(ha);
9208			if (status != QL_SUCCESS) {
9209				cmn_err(CE_WARN,
9210				    "!Qlogic %s(%d): Failed to shutdown IP",
9211				    QL_NAME, ha->instance);
9212				return;
9213			}
9214
9215			ha->flags &= ~IP_ENABLED;
9216		}
9217
9218		ql_free_phys(ha, &sp->ub_buffer);
9219	} else {
9220		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
9221	}
9222
9223	kmem_free(sp, sizeof (ql_srb_t));
9224	kmem_free(ubp, sizeof (fc_unsol_buf_t));
9225
9226	if (ha->ub_allocated != 0) {
9227		ha->ub_allocated--;
9228	}
9229
9230	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9231}
9232
9233/*
9234 * ql_get_unsolicited_buffer
9235 *	Locates a free unsolicited buffer.
9236 *
9237 * Input:
9238 *	ha = adapter state pointer.
9239 *	type = buffer type.
9240 *
9241 * Returns:
9242 *	Unsolicited buffer pointer.
9243 *
9244 * Context:
9245 *	Interrupt or Kernel context, no mailbox commands allowed.
9246 */
9247fc_unsol_buf_t *
9248ql_get_unsolicited_buffer(ql_adapter_state_t *ha, uint32_t type)
9249{
9250	fc_unsol_buf_t	*ubp;
9251	ql_srb_t	*sp;
9252	uint16_t	index;
9253
9254	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9255
9256	/* Locate a buffer to use. */
9257	ubp = NULL;
9258
9259	QL_UB_LOCK(ha);
9260	for (index = 0; index < QL_UB_LIMIT; index++) {
9261		ubp = ha->ub_array[index];
9262		if (ubp != NULL) {
9263			sp = ubp->ub_fca_private;
9264			if ((sp->ub_type == type) &&
9265			    (sp->flags & SRB_UB_IN_FCA) &&
9266			    (!(sp->flags & (SRB_UB_CALLBACK |
9267			    SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED)))) {
9268				sp->flags |= SRB_UB_ACQUIRED;
9269				ubp->ub_resp_flags = 0;
9270				break;
9271			}
9272			ubp = NULL;
9273		}
9274	}
9275	QL_UB_UNLOCK(ha);
9276
9277	if (ubp) {
9278		ubp->ub_resp_token = NULL;
9279		ubp->ub_class = FC_TRAN_CLASS3;
9280	}
9281
9282	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9283
9284	return (ubp);
9285}
9286
9287/*
9288 * ql_ub_frame_hdr
9289 *	Processes received unsolicited buffers from ISP.
9290 *
9291 * Input:
9292 *	ha:	adapter state pointer.
9293 *	tq:	target queue pointer.
9294 *	index:	unsolicited buffer array index.
9295 *	done_q:	done queue pointer.
9296 *
9297 * Returns:
9298 *	ql local function return status code.
9299 *
9300 * Context:
9301 *	Interrupt or Kernel context, no mailbox commands allowed.
9302 */
9303int
9304ql_ub_frame_hdr(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t index,
9305    ql_head_t *done_q)
9306{
9307	fc_unsol_buf_t	*ubp;
9308	ql_srb_t	*sp;
9309	uint16_t	loop_id;
9310	int		rval = QL_FUNCTION_FAILED;
9311
9312	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9313
9314	QL_UB_LOCK(ha);
9315	if (index >= QL_UB_LIMIT || (ubp = ha->ub_array[index]) == NULL) {
9316		EL(ha, "Invalid buffer index=%xh\n", index);
9317		QL_UB_UNLOCK(ha);
9318		return (rval);
9319	}
9320
9321	sp = ubp->ub_fca_private;
9322	if (sp->flags & SRB_UB_FREE_REQUESTED) {
9323		EL(ha, "buffer freed index=%xh\n", index);
9324		sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
9325		    SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
9326
9327		sp->flags |= SRB_UB_IN_FCA;
9328
9329		QL_UB_UNLOCK(ha);
9330		return (rval);
9331	}
9332
9333	if ((sp->handle == index) &&
9334	    (sp->flags & SRB_UB_IN_ISP) &&
9335	    (sp->ub_type == FC_TYPE_IS8802_SNAP) &&
9336	    (!(sp->flags & SRB_UB_ACQUIRED))) {
9337		/* set broadcast D_ID */
9338		loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
9339		    BROADCAST_24XX_HDL : IP_BROADCAST_LOOP_ID);
9340		if (tq->ub_loop_id == loop_id) {
9341			if (ha->topology & QL_FL_PORT) {
9342				ubp->ub_frame.d_id = 0x000000;
9343			} else {
9344				ubp->ub_frame.d_id = 0xffffff;
9345			}
9346		} else {
9347			ubp->ub_frame.d_id = ha->d_id.b24;
9348		}
9349		ubp->ub_frame.r_ctl = R_CTL_UNSOL_DATA;
9350		ubp->ub_frame.rsvd = 0;
9351		ubp->ub_frame.s_id = tq->d_id.b24;
9352		ubp->ub_frame.type = FC_TYPE_IS8802_SNAP;
9353		ubp->ub_frame.seq_cnt = tq->ub_seq_cnt;
9354		ubp->ub_frame.df_ctl = 0;
9355		ubp->ub_frame.seq_id = tq->ub_seq_id;
9356		ubp->ub_frame.rx_id = 0xffff;
9357		ubp->ub_frame.ox_id = 0xffff;
9358		ubp->ub_bufsize = sp->ub_size < tq->ub_sequence_length ?
9359		    sp->ub_size : tq->ub_sequence_length;
9360		ubp->ub_frame.ro = tq->ub_frame_ro;
9361
9362		tq->ub_sequence_length = (uint16_t)
9363		    (tq->ub_sequence_length - ubp->ub_bufsize);
9364		tq->ub_frame_ro += ubp->ub_bufsize;
9365		tq->ub_seq_cnt++;
9366
9367		if (tq->ub_seq_cnt == tq->ub_total_seg_cnt) {
9368			if (tq->ub_seq_cnt == 1) {
9369				ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9370				    F_CTL_FIRST_SEQ | F_CTL_END_SEQ;
9371			} else {
9372				ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9373				    F_CTL_END_SEQ;
9374			}
9375			tq->ub_total_seg_cnt = 0;
9376		} else if (tq->ub_seq_cnt == 1) {
9377			ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9378			    F_CTL_FIRST_SEQ;
9379			ubp->ub_frame.df_ctl = 0x20;
9380		}
9381
9382		QL_PRINT_3(CE_CONT, "(%d): ub_frame.d_id=%xh\n",
9383		    ha->instance, ubp->ub_frame.d_id);
9384		QL_PRINT_3(CE_CONT, "(%d): ub_frame.s_id=%xh\n",
9385		    ha->instance, ubp->ub_frame.s_id);
9386		QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_cnt=%xh\n",
9387		    ha->instance, ubp->ub_frame.seq_cnt);
9388		QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_id=%xh\n",
9389		    ha->instance, ubp->ub_frame.seq_id);
9390		QL_PRINT_3(CE_CONT, "(%d): ub_frame.ro=%xh\n",
9391		    ha->instance, ubp->ub_frame.ro);
9392		QL_PRINT_3(CE_CONT, "(%d): ub_frame.f_ctl=%xh\n",
9393		    ha->instance, ubp->ub_frame.f_ctl);
9394		QL_PRINT_3(CE_CONT, "(%d): ub_bufsize=%xh\n",
9395		    ha->instance, ubp->ub_bufsize);
9396		QL_DUMP_3(ubp->ub_buffer, 8,
9397		    ubp->ub_bufsize < 64 ? ubp->ub_bufsize : 64);
9398
9399		sp->flags |= SRB_UB_CALLBACK | SRB_UB_ACQUIRED;
9400		ql_add_link_b(done_q, &sp->cmd);
9401		rval = QL_SUCCESS;
9402	} else {
9403		if (sp->handle != index) {
9404			EL(ha, "Bad index=%xh, expect=%xh\n", index,
9405			    sp->handle);
9406		}
9407		if ((sp->flags & SRB_UB_IN_ISP) == 0) {
9408			EL(ha, "buffer was already in driver, index=%xh\n",
9409			    index);
9410		}
9411		if ((sp->ub_type == FC_TYPE_IS8802_SNAP) == 0) {
9412			EL(ha, "buffer was not an IP buffer, index=%xh\n",
9413			    index);
9414		}
9415		if (sp->flags & SRB_UB_ACQUIRED) {
9416			EL(ha, "buffer was being used by driver, index=%xh\n",
9417			    index);
9418		}
9419	}
9420	QL_UB_UNLOCK(ha);
9421
9422	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9423
9424	return (rval);
9425}
9426
9427/*
9428 * ql_timer
9429 *	One second timer function.
9430 *
9431 * Input:
9432 *	ql_hba.first = first link in adapter list.
9433 *
9434 * Context:
9435 *	Interrupt context, no mailbox commands allowed.
9436 */
9437static void
9438ql_timer(void *arg)
9439{
9440	ql_link_t		*link;
9441	uint32_t		set_flags;
9442	uint32_t		reset_flags;
9443	ql_adapter_state_t	*ha = NULL, *vha;
9444
9445	QL_PRINT_6(CE_CONT, "started\n");
9446
9447	/* Acquire global state lock. */
9448	GLOBAL_STATE_LOCK();
9449	if (ql_timer_timeout_id == NULL) {
9450		/* Release global state lock. */
9451		GLOBAL_STATE_UNLOCK();
9452		return;
9453	}
9454
9455	for (link = ql_hba.first; link != NULL; link = link->next) {
9456		ha = link->base_address;
9457
9458		/* Skip adapter if suspended of stalled. */
9459		ADAPTER_STATE_LOCK(ha);
9460		if (ha->flags & ADAPTER_SUSPENDED ||
9461		    ha->task_daemon_flags & DRIVER_STALL) {
9462			ADAPTER_STATE_UNLOCK(ha);
9463			continue;
9464		}
9465		ha->flags |= ADAPTER_TIMER_BUSY;
9466		ADAPTER_STATE_UNLOCK(ha);
9467
9468		QL_PM_LOCK(ha);
9469		if (ha->power_level != PM_LEVEL_D0) {
9470			QL_PM_UNLOCK(ha);
9471
9472			ADAPTER_STATE_LOCK(ha);
9473			ha->flags &= ~ADAPTER_TIMER_BUSY;
9474			ADAPTER_STATE_UNLOCK(ha);
9475			continue;
9476		}
9477		ha->busy++;
9478		QL_PM_UNLOCK(ha);
9479
9480		set_flags = 0;
9481		reset_flags = 0;
9482
9483		/* Port retry timer handler. */
9484		if (LOOP_READY(ha)) {
9485			ADAPTER_STATE_LOCK(ha);
9486			if (ha->port_retry_timer != 0) {
9487				ha->port_retry_timer--;
9488				if (ha->port_retry_timer == 0) {
9489					set_flags |= PORT_RETRY_NEEDED;
9490				}
9491			}
9492			ADAPTER_STATE_UNLOCK(ha);
9493		}
9494
9495		/* Loop down timer handler. */
9496		if (LOOP_RECONFIGURE(ha) == 0) {
9497			if (ha->loop_down_timer > LOOP_DOWN_TIMER_END) {
9498				ha->loop_down_timer--;
9499				/*
9500				 * give the firmware loop down dump flag
9501				 * a chance to work.
9502				 */
9503				if (ha->loop_down_timer == LOOP_DOWN_RESET) {
9504					if (CFG_IST(ha,
9505					    CFG_DUMP_LOOP_OFFLINE_TIMEOUT)) {
9506						(void) ql_binary_fw_dump(ha,
9507						    TRUE);
9508					}
9509					EL(ha, "loop_down_reset, "
9510					    "isp_abort_needed\n");
9511					set_flags |= ISP_ABORT_NEEDED;
9512				}
9513			}
9514			if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
9515				/* Command abort time handler. */
9516				if (ha->loop_down_timer ==
9517				    ha->loop_down_abort_time) {
9518					ADAPTER_STATE_LOCK(ha);
9519					ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
9520					ADAPTER_STATE_UNLOCK(ha);
9521					set_flags |= ABORT_QUEUES_NEEDED;
9522					EL(ha, "loop_down_abort_time, "
9523					    "abort_queues_needed\n");
9524				}
9525
9526				/* Watchdog timer handler. */
9527				if (ha->watchdog_timer == 0) {
9528					ha->watchdog_timer = WATCHDOG_TIME;
9529				} else if (LOOP_READY(ha)) {
9530					ha->watchdog_timer--;
9531					if (ha->watchdog_timer == 0) {
9532						for (vha = ha; vha != NULL;
9533						    vha = vha->vp_next) {
9534							ql_watchdog(vha,
9535							    &set_flags,
9536							    &reset_flags);
9537						}
9538						ha->watchdog_timer =
9539						    WATCHDOG_TIME;
9540					}
9541				}
9542			}
9543		}
9544
9545		/* Idle timer handler. */
9546		if (!DRIVER_SUSPENDED(ha)) {
9547			if (++ha->idle_timer >= IDLE_CHECK_TIMER) {
9548#if defined(QL_DEBUG_LEVEL_6) || !defined(QL_DEBUG_LEVEL_3)
9549				set_flags |= TASK_DAEMON_IDLE_CHK_FLG;
9550#endif
9551				ha->idle_timer = 0;
9552			}
9553			if (ha->send_plogi_timer != NULL) {
9554				ha->send_plogi_timer--;
9555				if (ha->send_plogi_timer == NULL) {
9556					set_flags |= SEND_PLOGI;
9557				}
9558			}
9559		}
9560		ADAPTER_STATE_LOCK(ha);
9561		if (ha->restart_mpi_timer != 0) {
9562			ha->restart_mpi_timer--;
9563			if (ha->restart_mpi_timer == 0 &&
9564			    ha->idc_restart_mpi != 0) {
9565				ha->idc_restart_mpi = 0;
9566				reset_flags |= TASK_DAEMON_STALLED_FLG;
9567			}
9568		}
9569		if (ha->flash_acc_timer != 0) {
9570			ha->flash_acc_timer--;
9571			if (ha->flash_acc_timer == 0 &&
9572			    ha->idc_flash_acc != 0) {
9573				ha->idc_flash_acc = 1;
9574				ha->idc_mb[1] = 0;
9575				ha->idc_mb[2] = IDC_OPC_DRV_START;
9576				set_flags |= IDC_ACK_NEEDED;
9577			}
9578		}
9579		ADAPTER_STATE_UNLOCK(ha);
9580
9581		if (set_flags != 0 || reset_flags != 0) {
9582			ql_awaken_task_daemon(ha, NULL, set_flags,
9583			    reset_flags);
9584		}
9585
9586		if (ha->xioctl->ledstate.BeaconState == BEACON_ON) {
9587			ql_blink_led(ha);
9588		}
9589
9590		/* Update the IO stats */
9591		if (ha->xioctl->IOInputByteCnt >= 0x100000) {
9592			ha->xioctl->IOInputMByteCnt +=
9593			    (ha->xioctl->IOInputByteCnt / 0x100000);
9594			ha->xioctl->IOInputByteCnt %= 0x100000;
9595		}
9596
9597		if (ha->xioctl->IOOutputByteCnt >= 0x100000) {
9598			ha->xioctl->IOOutputMByteCnt +=
9599			    (ha->xioctl->IOOutputByteCnt / 0x100000);
9600			ha->xioctl->IOOutputByteCnt %= 0x100000;
9601		}
9602
9603		ADAPTER_STATE_LOCK(ha);
9604		ha->flags &= ~ADAPTER_TIMER_BUSY;
9605		ADAPTER_STATE_UNLOCK(ha);
9606
9607		QL_PM_LOCK(ha);
9608		ha->busy--;
9609		QL_PM_UNLOCK(ha);
9610	}
9611
9612	/* Restart timer, if not being stopped. */
9613	if (ql_timer_timeout_id != NULL) {
9614		ql_timer_timeout_id = timeout(ql_timer, arg, ql_timer_ticks);
9615	}
9616
9617	/* Release global state lock. */
9618	GLOBAL_STATE_UNLOCK();
9619
9620	QL_PRINT_6(CE_CONT, "done\n");
9621}
9622
9623/*
9624 * ql_timeout_insert
9625 *	Function used to insert a command block onto the
9626 *	watchdog timer queue.
9627 *
9628 *	Note: Must insure that pkt_time is not zero
9629 *			before calling ql_timeout_insert.
9630 *
9631 * Input:
9632 *	ha:	adapter state pointer.
9633 *	tq:	target queue pointer.
9634 *	sp:	SRB pointer.
9635 *	DEVICE_QUEUE_LOCK must be already obtained.
9636 *
9637 * Context:
9638 *	Kernel context.
9639 */
9640/* ARGSUSED */
9641static void
9642ql_timeout_insert(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp)
9643{
9644	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9645
9646	if (sp->pkt->pkt_timeout != 0 && sp->pkt->pkt_timeout < 0x10000) {
9647		/* Make sure timeout >= 2 * R_A_TOV */
9648		sp->isp_timeout = (uint16_t)
9649		    (sp->pkt->pkt_timeout < ha->r_a_tov ? ha->r_a_tov :
9650		    sp->pkt->pkt_timeout);
9651
9652		/*
9653		 * The WATCHDOG_TIME must be rounded up + 1.  As an example,
9654		 * consider a 1 second timeout. If the WATCHDOG_TIME is 1, it
9655		 * will expire in the next watchdog call, which could be in
9656		 * 1 microsecond.
9657		 *
9658		 */
9659		sp->wdg_q_time = (sp->isp_timeout + WATCHDOG_TIME - 1) /
9660		    WATCHDOG_TIME;
9661		/*
9662		 * Added an additional 10 to account for the
9663		 * firmware timer drift which can occur with
9664		 * very long timeout values.
9665		 */
9666		sp->wdg_q_time += 10;
9667
9668		/*
9669		 * Add 6 more to insure watchdog does not timeout at the same
9670		 * time as ISP RISC code timeout.
9671		 */
9672		sp->wdg_q_time += 6;
9673
9674		/* Save initial time for resetting watchdog time. */
9675		sp->init_wdg_q_time = sp->wdg_q_time;
9676
9677		/* Insert command onto watchdog queue. */
9678		ql_add_link_b(&tq->wdg, &sp->wdg);
9679
9680		sp->flags |= SRB_WATCHDOG_ENABLED;
9681	} else {
9682		sp->isp_timeout = 0;
9683		sp->wdg_q_time = 0;
9684		sp->init_wdg_q_time = 0;
9685	}
9686
9687	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9688}
9689
9690/*
9691 * ql_watchdog
9692 *	Timeout handler that runs in interrupt context. The
9693 *	ql_adapter_state_t * argument is the parameter set up when the
9694 *	timeout was initialized (state structure pointer).
9695 *	Function used to update timeout values and if timeout
9696 *	has occurred command will be aborted.
9697 *
9698 * Input:
9699 *	ha:		adapter state pointer.
9700 *	set_flags:	task daemon flags to set.
9701 *	reset_flags:	task daemon flags to reset.
9702 *
9703 * Context:
9704 *	Interrupt context, no mailbox commands allowed.
9705 */
9706static void
9707ql_watchdog(ql_adapter_state_t *ha, uint32_t *set_flags, uint32_t *reset_flags)
9708{
9709	ql_srb_t	*sp;
9710	ql_link_t	*link;
9711	ql_link_t	*next_cmd;
9712	ql_link_t	*next_device;
9713	ql_tgt_t	*tq;
9714	ql_lun_t	*lq;
9715	uint16_t	index;
9716	int		q_sane;
9717
9718	QL_PRINT_6(CE_CONT, "(%d): started\n", ha->instance);
9719
9720	/* Loop through all targets. */
9721	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9722		for (link = ha->dev[index].first; link != NULL;
9723		    link = next_device) {
9724			tq = link->base_address;
9725
9726			/* Try to acquire device queue lock. */
9727			if (TRY_DEVICE_QUEUE_LOCK(tq) == 0) {
9728				next_device = NULL;
9729				continue;
9730			}
9731
9732			next_device = link->next;
9733
9734			if (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) &&
9735			    (tq->port_down_retry_count == 0)) {
9736				/* Release device queue lock. */
9737				DEVICE_QUEUE_UNLOCK(tq);
9738				continue;
9739			}
9740
9741			/* Find out if this device is in a sane state. */
9742			if (tq->flags & (TQF_RSCN_RCVD |
9743			    TQF_NEED_AUTHENTICATION | TQF_QUEUE_SUSPENDED)) {
9744				q_sane = 0;
9745			} else {
9746				q_sane = 1;
9747			}
9748			/* Loop through commands on watchdog queue. */
9749			for (link = tq->wdg.first; link != NULL;
9750			    link = next_cmd) {
9751				next_cmd = link->next;
9752				sp = link->base_address;
9753				lq = sp->lun_queue;
9754
9755				/*
9756				 * For SCSI commands, if everything seems to
9757				 * be going fine and this packet is stuck
9758				 * because of throttling at LUN or target
9759				 * level then do not decrement the
9760				 * sp->wdg_q_time
9761				 */
9762				if (ha->task_daemon_flags & STATE_ONLINE &&
9763				    (sp->flags & SRB_ISP_STARTED) == 0 &&
9764				    q_sane && sp->flags & SRB_FCP_CMD_PKT &&
9765				    lq->lun_outcnt >= ha->execution_throttle) {
9766					continue;
9767				}
9768
9769				if (sp->wdg_q_time != 0) {
9770					sp->wdg_q_time--;
9771
9772					/* Timeout? */
9773					if (sp->wdg_q_time != 0) {
9774						continue;
9775					}
9776
9777					ql_remove_link(&tq->wdg, &sp->wdg);
9778					sp->flags &= ~SRB_WATCHDOG_ENABLED;
9779
9780					if (sp->flags & SRB_ISP_STARTED) {
9781						ql_cmd_timeout(ha, tq, sp,
9782						    set_flags, reset_flags);
9783
9784						DEVICE_QUEUE_UNLOCK(tq);
9785						tq = NULL;
9786						next_cmd = NULL;
9787						next_device = NULL;
9788						index = DEVICE_HEAD_LIST_SIZE;
9789					} else {
9790						ql_cmd_timeout(ha, tq, sp,
9791						    set_flags, reset_flags);
9792					}
9793				}
9794			}
9795
9796			/* Release device queue lock. */
9797			if (tq != NULL) {
9798				DEVICE_QUEUE_UNLOCK(tq);
9799			}
9800		}
9801	}
9802
9803	QL_PRINT_6(CE_CONT, "(%d): done\n", ha->instance);
9804}
9805
9806/*
9807 * ql_cmd_timeout
9808 *	Command timeout handler.
9809 *
9810 * Input:
9811 *	ha:		adapter state pointer.
9812 *	tq:		target queue pointer.
9813 *	sp:		SRB pointer.
9814 *	set_flags:	task daemon flags to set.
9815 *	reset_flags:	task daemon flags to reset.
9816 *
9817 * Context:
9818 *	Interrupt context, no mailbox commands allowed.
9819 */
9820/* ARGSUSED */
9821static void
9822ql_cmd_timeout(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp,
9823    uint32_t *set_flags, uint32_t *reset_flags)
9824{
9825
9826	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9827
9828	if (!(sp->flags & SRB_ISP_STARTED)) {
9829
9830		EL(ha, "command timed out in driver = %ph\n", (void *)sp);
9831
9832		REQUEST_RING_LOCK(ha);
9833
9834		/* if it's on a queue */
9835		if (sp->cmd.head) {
9836			/*
9837			 * The pending_cmds que needs to be
9838			 * protected by the ring lock
9839			 */
9840			ql_remove_link(sp->cmd.head, &sp->cmd);
9841		}
9842		sp->flags &= ~SRB_IN_DEVICE_QUEUE;
9843
9844		/* Release device queue lock. */
9845		REQUEST_RING_UNLOCK(ha);
9846		DEVICE_QUEUE_UNLOCK(tq);
9847
9848		/* Set timeout status */
9849		sp->pkt->pkt_reason = CS_TIMEOUT;
9850
9851		/* Ensure no retry */
9852		sp->flags &= ~SRB_RETRY;
9853
9854		/* Call done routine to handle completion. */
9855		ql_done(&sp->cmd);
9856
9857		DEVICE_QUEUE_LOCK(tq);
9858	} else {
9859		EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
9860		    "isp_abort_needed\n", (void *)sp,
9861		    (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
9862		    sp->handle & OSC_INDEX_MASK);
9863
9864		/* Release device queue lock. */
9865		DEVICE_QUEUE_UNLOCK(tq);
9866
9867		INTR_LOCK(ha);
9868		ha->pha->xioctl->ControllerErrorCount++;
9869		INTR_UNLOCK(ha);
9870
9871		/* Set ISP needs to be reset */
9872		sp->flags |= SRB_COMMAND_TIMEOUT;
9873
9874		if (CFG_IST(ha, CFG_DUMP_DRIVER_COMMAND_TIMEOUT)) {
9875			(void) ql_binary_fw_dump(ha, TRUE);
9876		}
9877
9878		*set_flags |= ISP_ABORT_NEEDED;
9879
9880		DEVICE_QUEUE_LOCK(tq);
9881	}
9882
9883	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9884}
9885
9886/*
9887 * ql_rst_aen
9888 *	Processes asynchronous reset.
9889 *
9890 * Input:
9891 *	ha = adapter state pointer.
9892 *
9893 * Context:
9894 *	Kernel context.
9895 */
9896static void
9897ql_rst_aen(ql_adapter_state_t *ha)
9898{
9899	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9900
9901	/* Issue marker command. */
9902	(void) ql_marker(ha, 0, 0, MK_SYNC_ALL);
9903
9904	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9905}
9906
9907/*
9908 * ql_cmd_wait
9909 *	Stall driver until all outstanding commands are returned.
9910 *
9911 * Input:
9912 *	ha = adapter state pointer.
9913 *
9914 * Context:
9915 *	Kernel context.
9916 */
9917void
9918ql_cmd_wait(ql_adapter_state_t *ha)
9919{
9920	uint16_t		index;
9921	ql_link_t		*link;
9922	ql_tgt_t		*tq;
9923	ql_adapter_state_t	*vha;
9924
9925	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9926
9927	/* Wait for all outstanding commands to be returned. */
9928	(void) ql_wait_outstanding(ha);
9929
9930	/*
9931	 * clear out internally queued commands
9932	 */
9933	for (vha = ha; vha != NULL; vha = vha->vp_next) {
9934		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9935			for (link = vha->dev[index].first; link != NULL;
9936			    link = link->next) {
9937				tq = link->base_address;
9938				if (tq &&
9939				    (!(tq->prli_svc_param_word_3 &
9940				    PRLI_W3_RETRY))) {
9941					(void) ql_abort_device(vha, tq, 0);
9942				}
9943			}
9944		}
9945	}
9946
9947	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9948}
9949
9950/*
9951 * ql_wait_outstanding
9952 *	Wait for all outstanding commands to complete.
9953 *
9954 * Input:
9955 *	ha = adapter state pointer.
9956 *
9957 * Returns:
9958 *	index - the index for ql_srb into outstanding_cmds.
9959 *
9960 * Context:
9961 *	Kernel context.
9962 */
9963static uint16_t
9964ql_wait_outstanding(ql_adapter_state_t *ha)
9965{
9966	ql_srb_t	*sp;
9967	uint16_t	index, count;
9968
9969	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9970
9971	count = 3000;
9972	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
9973		if (ha->pha->pending_cmds.first != NULL) {
9974			ql_start_iocb(ha, NULL);
9975			index = 1;
9976		}
9977		if ((sp = ha->pha->outstanding_cmds[index]) != NULL &&
9978		    (sp->flags & SRB_COMMAND_TIMEOUT) == 0) {
9979			if (count-- != 0) {
9980				ql_delay(ha, 10000);
9981				index = 0;
9982			} else {
9983				EL(ha, "failed, sp=%ph\n", (void *)sp);
9984				break;
9985			}
9986		}
9987	}
9988
9989	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9990
9991	return (index);
9992}
9993
9994/*
9995 * ql_restart_queues
9996 *	Restart device queues.
9997 *
9998 * Input:
9999 *	ha = adapter state pointer.
10000 *	DEVICE_QUEUE_LOCK must be released.
10001 *
10002 * Context:
10003 *	Interrupt or Kernel context, no mailbox commands allowed.
10004 */
10005static void
10006ql_restart_queues(ql_adapter_state_t *ha)
10007{
10008	ql_link_t		*link, *link2;
10009	ql_tgt_t		*tq;
10010	ql_lun_t		*lq;
10011	uint16_t		index;
10012	ql_adapter_state_t	*vha;
10013
10014	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10015
10016	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10017		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10018			for (link = vha->dev[index].first; link != NULL;
10019			    link = link->next) {
10020				tq = link->base_address;
10021
10022				/* Acquire device queue lock. */
10023				DEVICE_QUEUE_LOCK(tq);
10024
10025				tq->flags &= ~TQF_QUEUE_SUSPENDED;
10026
10027				for (link2 = tq->lun_queues.first;
10028				    link2 != NULL; link2 = link2->next) {
10029					lq = link2->base_address;
10030
10031					if (lq->cmd.first != NULL) {
10032						ql_next(vha, lq);
10033						DEVICE_QUEUE_LOCK(tq);
10034					}
10035				}
10036
10037				/* Release device queue lock. */
10038				DEVICE_QUEUE_UNLOCK(tq);
10039			}
10040		}
10041	}
10042
10043	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10044}
10045
10046/*
10047 * ql_iidma
10048 *	Setup iiDMA parameters to firmware
10049 *
10050 * Input:
10051 *	ha = adapter state pointer.
10052 *	DEVICE_QUEUE_LOCK must be released.
10053 *
10054 * Context:
10055 *	Interrupt or Kernel context, no mailbox commands allowed.
10056 */
10057static void
10058ql_iidma(ql_adapter_state_t *ha)
10059{
10060	ql_link_t	*link;
10061	ql_tgt_t	*tq;
10062	uint16_t	index;
10063	char		buf[256];
10064	uint32_t	data;
10065
10066	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10067
10068	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
10069		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10070		return;
10071	}
10072
10073	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10074		for (link = ha->dev[index].first; link != NULL;
10075		    link = link->next) {
10076			tq = link->base_address;
10077
10078			/* Acquire device queue lock. */
10079			DEVICE_QUEUE_LOCK(tq);
10080
10081			if ((tq->flags & TQF_IIDMA_NEEDED) == 0) {
10082				DEVICE_QUEUE_UNLOCK(tq);
10083				continue;
10084			}
10085
10086			tq->flags &= ~TQF_IIDMA_NEEDED;
10087
10088			if ((tq->loop_id > LAST_N_PORT_HDL) ||
10089			    (tq->iidma_rate == IIDMA_RATE_NDEF)) {
10090				DEVICE_QUEUE_UNLOCK(tq);
10091				continue;
10092			}
10093
10094			/* Get the iiDMA persistent data */
10095			if (tq->iidma_rate == IIDMA_RATE_INIT) {
10096				(void) sprintf(buf,
10097				    "iidma-rate-%02x%02x%02x%02x%02x"
10098				    "%02x%02x%02x", tq->port_name[0],
10099				    tq->port_name[1], tq->port_name[2],
10100				    tq->port_name[3], tq->port_name[4],
10101				    tq->port_name[5], tq->port_name[6],
10102				    tq->port_name[7]);
10103
10104				if ((data = ql_get_prop(ha, buf)) ==
10105				    0xffffffff) {
10106					tq->iidma_rate = IIDMA_RATE_NDEF;
10107				} else {
10108					switch (data) {
10109					case IIDMA_RATE_1GB:
10110					case IIDMA_RATE_2GB:
10111					case IIDMA_RATE_4GB:
10112					case IIDMA_RATE_10GB:
10113						tq->iidma_rate = data;
10114						break;
10115					case IIDMA_RATE_8GB:
10116						if (CFG_IST(ha,
10117						    CFG_CTRL_25XX)) {
10118							tq->iidma_rate = data;
10119						} else {
10120							tq->iidma_rate =
10121							    IIDMA_RATE_4GB;
10122						}
10123						break;
10124					default:
10125						EL(ha, "invalid data for "
10126						    "parameter: %s: %xh\n",
10127						    buf, data);
10128						tq->iidma_rate =
10129						    IIDMA_RATE_NDEF;
10130						break;
10131					}
10132				}
10133			}
10134
10135			/* Set the firmware's iiDMA rate */
10136			if (tq->iidma_rate <= IIDMA_RATE_MAX &&
10137			    !(CFG_IST(ha, CFG_CTRL_81XX))) {
10138				data = ql_iidma_rate(ha, tq->loop_id,
10139				    &tq->iidma_rate, EXT_IIDMA_MODE_SET);
10140				if (data != QL_SUCCESS) {
10141					EL(ha, "mbx failed: %xh\n", data);
10142				}
10143			}
10144
10145			/* Release device queue lock. */
10146			DEVICE_QUEUE_UNLOCK(tq);
10147		}
10148	}
10149
10150	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10151}
10152
10153/*
10154 * ql_abort_queues
10155 *	Abort all commands on device queues.
10156 *
10157 * Input:
10158 *	ha = adapter state pointer.
10159 *
10160 * Context:
10161 *	Interrupt or Kernel context, no mailbox commands allowed.
10162 */
10163static void
10164ql_abort_queues(ql_adapter_state_t *ha)
10165{
10166	ql_link_t		*link;
10167	ql_tgt_t		*tq;
10168	ql_srb_t		*sp;
10169	uint16_t		index;
10170	ql_adapter_state_t	*vha;
10171
10172	QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10173
10174	/* Return all commands in outstanding command list. */
10175	INTR_LOCK(ha);
10176
10177	/* Place all commands in outstanding cmd list on device queue. */
10178	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10179		if (ha->pending_cmds.first != NULL) {
10180			INTR_UNLOCK(ha);
10181			ql_start_iocb(ha, NULL);
10182			/* Delay for system */
10183			ql_delay(ha, 10000);
10184			INTR_LOCK(ha);
10185			index = 1;
10186		}
10187		sp = ha->outstanding_cmds[index];
10188
10189		/* skip devices capable of FCP2 retrys */
10190		if ((sp != NULL) &&
10191		    ((tq = sp->lun_queue->target_queue) != NULL) &&
10192		    (!(tq->prli_svc_param_word_3 & PRLI_W3_RETRY))) {
10193			ha->outstanding_cmds[index] = NULL;
10194			sp->handle = 0;
10195			sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10196
10197			INTR_UNLOCK(ha);
10198
10199			/* Set ending status. */
10200			sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10201			sp->flags |= SRB_ISP_COMPLETED;
10202
10203			/* Call done routine to handle completions. */
10204			sp->cmd.next = NULL;
10205			ql_done(&sp->cmd);
10206
10207			INTR_LOCK(ha);
10208		}
10209	}
10210	INTR_UNLOCK(ha);
10211
10212	for (vha = ha; vha != NULL; vha = vha->vp_next) {
10213		QL_PRINT_10(CE_CONT, "(%d,%d): abort instance\n",
10214		    vha->instance, vha->vp_index);
10215		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10216			for (link = vha->dev[index].first; link != NULL;
10217			    link = link->next) {
10218				tq = link->base_address;
10219				/* skip devices capable of FCP2 retrys */
10220				if (!(tq->prli_svc_param_word_3 &
10221				    PRLI_W3_RETRY)) {
10222					/*
10223					 * Set port unavailable status and
10224					 * return all commands on a devices
10225					 * queues.
10226					 */
10227					ql_abort_device_queues(ha, tq);
10228				}
10229			}
10230		}
10231	}
10232	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10233}
10234
10235/*
10236 * ql_abort_device_queues
10237 *	Abort all commands on device queues.
10238 *
10239 * Input:
10240 *	ha = adapter state pointer.
10241 *
10242 * Context:
10243 *	Interrupt or Kernel context, no mailbox commands allowed.
10244 */
10245static void
10246ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq)
10247{
10248	ql_link_t	*lun_link, *cmd_link;
10249	ql_srb_t	*sp;
10250	ql_lun_t	*lq;
10251
10252	QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10253
10254	DEVICE_QUEUE_LOCK(tq);
10255
10256	for (lun_link = tq->lun_queues.first; lun_link != NULL;
10257	    lun_link = lun_link->next) {
10258		lq = lun_link->base_address;
10259
10260		cmd_link = lq->cmd.first;
10261		while (cmd_link != NULL) {
10262			sp = cmd_link->base_address;
10263
10264			if (sp->flags & SRB_ABORT) {
10265				cmd_link = cmd_link->next;
10266				continue;
10267			}
10268
10269			/* Remove srb from device cmd queue. */
10270			ql_remove_link(&lq->cmd, &sp->cmd);
10271
10272			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
10273
10274			DEVICE_QUEUE_UNLOCK(tq);
10275
10276			/* Set ending status. */
10277			sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10278
10279			/* Call done routine to handle completion. */
10280			ql_done(&sp->cmd);
10281
10282			/* Delay for system */
10283			ql_delay(ha, 10000);
10284
10285			DEVICE_QUEUE_LOCK(tq);
10286			cmd_link = lq->cmd.first;
10287		}
10288	}
10289	DEVICE_QUEUE_UNLOCK(tq);
10290
10291	QL_PRINT_10(CE_CONT, "(%d): done\n", ha->instance);
10292}
10293
10294/*
10295 * ql_loop_resync
10296 *	Resync with fibre channel devices.
10297 *
10298 * Input:
10299 *	ha = adapter state pointer.
10300 *	DEVICE_QUEUE_LOCK must be released.
10301 *
10302 * Returns:
10303 *	ql local function return status code.
10304 *
10305 * Context:
10306 *	Kernel context.
10307 */
10308static int
10309ql_loop_resync(ql_adapter_state_t *ha)
10310{
10311	int rval;
10312
10313	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10314
10315	if (ha->flags & IP_INITIALIZED) {
10316		(void) ql_shutdown_ip(ha);
10317	}
10318
10319	rval = ql_fw_ready(ha, 10);
10320
10321	TASK_DAEMON_LOCK(ha);
10322	ha->task_daemon_flags &= ~LOOP_RESYNC_ACTIVE;
10323	TASK_DAEMON_UNLOCK(ha);
10324
10325	/* Set loop online, if it really is. */
10326	if (rval == QL_SUCCESS) {
10327		ql_loop_online(ha);
10328		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10329	} else {
10330		EL(ha, "failed, rval = %xh\n", rval);
10331	}
10332
10333	return (rval);
10334}
10335
10336/*
10337 * ql_loop_online
10338 *	Set loop online status if it really is online.
10339 *
10340 * Input:
10341 *	ha = adapter state pointer.
10342 *	DEVICE_QUEUE_LOCK must be released.
10343 *
10344 * Context:
10345 *	Kernel context.
10346 */
10347void
10348ql_loop_online(ql_adapter_state_t *ha)
10349{
10350	ql_adapter_state_t	*vha;
10351
10352	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10353
10354	/* Inform the FC Transport that the hardware is online. */
10355	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10356		if (!(vha->task_daemon_flags &
10357		    (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
10358			/* Restart IP if it was shutdown. */
10359			if (vha->vp_index == 0 && vha->flags & IP_ENABLED &&
10360			    !(vha->flags & IP_INITIALIZED)) {
10361				(void) ql_initialize_ip(vha);
10362				ql_isp_rcvbuf(vha);
10363			}
10364
10365			if (FC_PORT_STATE_MASK(vha->state) != FC_STATE_LOOP &&
10366			    FC_PORT_STATE_MASK(vha->state) !=
10367			    FC_STATE_ONLINE) {
10368				vha->state = FC_PORT_SPEED_MASK(vha->state);
10369				if (vha->topology & QL_LOOP_CONNECTION) {
10370					vha->state |= FC_STATE_LOOP;
10371				} else {
10372					vha->state |= FC_STATE_ONLINE;
10373				}
10374				TASK_DAEMON_LOCK(ha);
10375				vha->task_daemon_flags |= FC_STATE_CHANGE;
10376				TASK_DAEMON_UNLOCK(ha);
10377			}
10378		}
10379	}
10380
10381	ql_awaken_task_daemon(ha, NULL, 0, 0);
10382
10383	/* Restart device queues that may have been stopped. */
10384	ql_restart_queues(ha);
10385
10386	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10387}
10388
10389/*
10390 * ql_fca_handle_to_state
10391 *	Verifies handle to be correct.
10392 *
10393 * Input:
10394 *	fca_handle = pointer to state structure.
10395 *
10396 * Returns:
10397 *	NULL = failure
10398 *
10399 * Context:
10400 *	Kernel context.
10401 */
10402static ql_adapter_state_t *
10403ql_fca_handle_to_state(opaque_t fca_handle)
10404{
10405#ifdef	QL_DEBUG_ROUTINES
10406	ql_link_t		*link;
10407	ql_adapter_state_t	*ha = NULL;
10408	ql_adapter_state_t	*vha = NULL;
10409
10410	for (link = ql_hba.first; link != NULL; link = link->next) {
10411		ha = link->base_address;
10412		for (vha = ha->vp_next; vha != NULL; vha = vha->vp_next) {
10413			if ((opaque_t)vha == fca_handle) {
10414				ha = vha;
10415				break;
10416			}
10417		}
10418		if ((opaque_t)ha == fca_handle) {
10419			break;
10420		} else {
10421			ha = NULL;
10422		}
10423	}
10424
10425	if (ha == NULL) {
10426		/*EMPTY*/
10427		QL_PRINT_2(CE_CONT, "failed\n");
10428	}
10429
10430	ASSERT(ha != NULL);
10431#endif /* QL_DEBUG_ROUTINES */
10432
10433	return ((ql_adapter_state_t *)fca_handle);
10434}
10435
10436/*
10437 * ql_d_id_to_queue
10438 *	Locate device queue that matches destination ID.
10439 *
10440 * Input:
10441 *	ha = adapter state pointer.
10442 *	d_id = destination ID
10443 *
10444 * Returns:
10445 *	NULL = failure
10446 *
10447 * Context:
10448 *	Interrupt or Kernel context, no mailbox commands allowed.
10449 */
10450ql_tgt_t *
10451ql_d_id_to_queue(ql_adapter_state_t *ha, port_id_t d_id)
10452{
10453	uint16_t	index;
10454	ql_tgt_t	*tq;
10455	ql_link_t	*link;
10456
10457	/* Get head queue index. */
10458	index = ql_alpa_to_index[d_id.b.al_pa];
10459
10460	for (link = ha->dev[index].first; link != NULL; link = link->next) {
10461		tq = link->base_address;
10462		if (tq->d_id.b24 == d_id.b24 &&
10463		    VALID_DEVICE_ID(ha, tq->loop_id)) {
10464			return (tq);
10465		}
10466	}
10467
10468	return (NULL);
10469}
10470
10471/*
10472 * ql_loop_id_to_queue
10473 *	Locate device queue that matches loop ID.
10474 *
10475 * Input:
10476 *	ha:		adapter state pointer.
10477 *	loop_id:	destination ID
10478 *
10479 * Returns:
10480 *	NULL = failure
10481 *
10482 * Context:
10483 *	Interrupt or Kernel context, no mailbox commands allowed.
10484 */
10485ql_tgt_t *
10486ql_loop_id_to_queue(ql_adapter_state_t *ha, uint16_t loop_id)
10487{
10488	uint16_t	index;
10489	ql_tgt_t	*tq;
10490	ql_link_t	*link;
10491
10492	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10493		for (link = ha->dev[index].first; link != NULL;
10494		    link = link->next) {
10495			tq = link->base_address;
10496			if (tq->loop_id == loop_id) {
10497				return (tq);
10498			}
10499		}
10500	}
10501
10502	return (NULL);
10503}
10504
10505/*
10506 * ql_kstat_update
10507 *	Updates kernel statistics.
10508 *
10509 * Input:
10510 *	ksp - driver kernel statistics structure pointer.
10511 *	rw - function to perform
10512 *
10513 * Returns:
10514 *	0 or EACCES
10515 *
10516 * Context:
10517 *	Kernel context.
10518 */
10519/* ARGSUSED */
10520static int
10521ql_kstat_update(kstat_t *ksp, int rw)
10522{
10523	int			rval;
10524
10525	QL_PRINT_3(CE_CONT, "started\n");
10526
10527	if (rw == KSTAT_WRITE) {
10528		rval = EACCES;
10529	} else {
10530		rval = 0;
10531	}
10532
10533	if (rval != 0) {
10534		/*EMPTY*/
10535		QL_PRINT_2(CE_CONT, "failed, rval = %xh\n", rval);
10536	} else {
10537		/*EMPTY*/
10538		QL_PRINT_3(CE_CONT, "done\n");
10539	}
10540	return (rval);
10541}
10542
10543/*
10544 * ql_load_flash
10545 *	Loads flash.
10546 *
10547 * Input:
10548 *	ha:	adapter state pointer.
10549 *	dp:	data pointer.
10550 *	size:	data length.
10551 *
10552 * Returns:
10553 *	ql local function return status code.
10554 *
10555 * Context:
10556 *	Kernel context.
10557 */
10558int
10559ql_load_flash(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size)
10560{
10561	uint32_t	cnt;
10562	int		rval;
10563	uint32_t	size_to_offset;
10564	uint32_t	size_to_compare;
10565	int		erase_all;
10566
10567	if (CFG_IST(ha, CFG_CTRL_242581)) {
10568		return (ql_24xx_load_flash(ha, dp, size, 0));
10569	}
10570
10571	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10572
10573	size_to_compare = 0x20000;
10574	size_to_offset = 0;
10575	erase_all = 0;
10576	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10577		if (size == 0x80000) {
10578			/* Request to flash the entire chip. */
10579			size_to_compare = 0x80000;
10580			erase_all = 1;
10581		} else {
10582			size_to_compare = 0x40000;
10583			if (ql_flash_sbus_fpga) {
10584				size_to_offset = 0x40000;
10585			}
10586		}
10587	}
10588	if (size > size_to_compare) {
10589		rval = QL_FUNCTION_PARAMETER_ERROR;
10590		EL(ha, "failed=%xh\n", rval);
10591		return (rval);
10592	}
10593
10594	GLOBAL_HW_LOCK();
10595
10596	/* Enable Flash Read/Write. */
10597	ql_flash_enable(ha);
10598
10599	/* Erase flash prior to write. */
10600	rval = ql_erase_flash(ha, erase_all);
10601
10602	if (rval == QL_SUCCESS) {
10603		/* Write data to flash. */
10604		for (cnt = 0; cnt < size; cnt++) {
10605			/* Allow other system activity. */
10606			if (cnt % 0x1000 == 0) {
10607				ql_delay(ha, 10000);
10608			}
10609			rval = ql_program_flash_address(ha,
10610			    cnt + size_to_offset, *dp++);
10611			if (rval != QL_SUCCESS) {
10612				break;
10613			}
10614		}
10615	}
10616
10617	ql_flash_disable(ha);
10618
10619	GLOBAL_HW_UNLOCK();
10620
10621	if (rval != QL_SUCCESS) {
10622		EL(ha, "failed=%xh\n", rval);
10623	} else {
10624		/*EMPTY*/
10625		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10626	}
10627	return (rval);
10628}
10629
10630/*
10631 * ql_program_flash_address
10632 *	Program flash address.
10633 *
10634 * Input:
10635 *	ha = adapter state pointer.
10636 *	addr = flash byte address.
10637 *	data = data to be written to flash.
10638 *
10639 * Returns:
10640 *	ql local function return status code.
10641 *
10642 * Context:
10643 *	Kernel context.
10644 */
10645static int
10646ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10647{
10648	int rval;
10649
10650	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10651
10652	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10653		ql_write_flash_byte(ha, 0x5555, 0xa0);
10654		ql_write_flash_byte(ha, addr, data);
10655	} else {
10656		/* Write Program Command Sequence */
10657		ql_write_flash_byte(ha, 0x5555, 0xaa);
10658		ql_write_flash_byte(ha, 0x2aaa, 0x55);
10659		ql_write_flash_byte(ha, 0x5555, 0xa0);
10660		ql_write_flash_byte(ha, addr, data);
10661	}
10662
10663	/* Wait for write to complete. */
10664	rval = ql_poll_flash(ha, addr, data);
10665
10666	if (rval != QL_SUCCESS) {
10667		EL(ha, "failed=%xh\n", rval);
10668	} else {
10669		/*EMPTY*/
10670		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10671	}
10672	return (rval);
10673}
10674
10675/*
10676 * ql_erase_flash
10677 *	Erases entire flash.
10678 *
10679 * Input:
10680 *	ha = adapter state pointer.
10681 *
10682 * Returns:
10683 *	ql local function return status code.
10684 *
10685 * Context:
10686 *	Kernel context.
10687 */
10688int
10689ql_erase_flash(ql_adapter_state_t *ha, int erase_all)
10690{
10691	int		rval;
10692	uint32_t	erase_delay = 2000000;
10693	uint32_t	sStartAddr;
10694	uint32_t	ssize;
10695	uint32_t	cnt;
10696	uint8_t		*bfp;
10697	uint8_t		*tmp;
10698
10699	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10700
10701	if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10702
10703		if (ql_flash_sbus_fpga == 1) {
10704			ssize = QL_SBUS_FCODE_SIZE;
10705			sStartAddr = QL_FCODE_OFFSET;
10706		} else {
10707			ssize = QL_FPGA_SIZE;
10708			sStartAddr = QL_FPGA_OFFSET;
10709		}
10710
10711		erase_delay = 20000000;
10712
10713		bfp = (uint8_t *)kmem_zalloc(ssize, KM_SLEEP);
10714
10715		/* Save the section of flash we're not updating to buffer */
10716		tmp = bfp;
10717		for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10718			/* Allow other system activity. */
10719			if (cnt % 0x1000 == 0) {
10720				ql_delay(ha, 10000);
10721			}
10722			*tmp++ = (uint8_t)ql_read_flash_byte(ha, cnt);
10723		}
10724	}
10725
10726	/* Chip Erase Command Sequence */
10727	ql_write_flash_byte(ha, 0x5555, 0xaa);
10728	ql_write_flash_byte(ha, 0x2aaa, 0x55);
10729	ql_write_flash_byte(ha, 0x5555, 0x80);
10730	ql_write_flash_byte(ha, 0x5555, 0xaa);
10731	ql_write_flash_byte(ha, 0x2aaa, 0x55);
10732	ql_write_flash_byte(ha, 0x5555, 0x10);
10733
10734	ql_delay(ha, erase_delay);
10735
10736	/* Wait for erase to complete. */
10737	rval = ql_poll_flash(ha, 0, 0x80);
10738
10739	if (rval != QL_SUCCESS) {
10740		EL(ha, "failed=%xh\n", rval);
10741		if (CFG_IST(ha, CFG_SBUS_CARD)) {
10742			kmem_free(bfp, ssize);
10743		}
10744		return (rval);
10745	}
10746
10747	/* restore the section we saved in the buffer */
10748	if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10749		/* Restore the section we saved off */
10750		tmp = bfp;
10751		for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10752			/* Allow other system activity. */
10753			if (cnt % 0x1000 == 0) {
10754				ql_delay(ha, 10000);
10755			}
10756			rval = ql_program_flash_address(ha, cnt, *tmp++);
10757			if (rval != QL_SUCCESS) {
10758				break;
10759			}
10760		}
10761
10762		kmem_free(bfp, ssize);
10763	}
10764
10765	if (rval != QL_SUCCESS) {
10766		EL(ha, "failed=%xh\n", rval);
10767	} else {
10768		/*EMPTY*/
10769		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10770	}
10771	return (rval);
10772}
10773
10774/*
10775 * ql_poll_flash
10776 *	Polls flash for completion.
10777 *
10778 * Input:
10779 *	ha = adapter state pointer.
10780 *	addr = flash byte address.
10781 *	data = data to be polled.
10782 *
10783 * Returns:
10784 *	ql local function return status code.
10785 *
10786 * Context:
10787 *	Kernel context.
10788 */
10789int
10790ql_poll_flash(ql_adapter_state_t *ha, uint32_t addr, uint8_t poll_data)
10791{
10792	uint8_t		flash_data;
10793	uint32_t	cnt;
10794	int		rval = QL_FUNCTION_FAILED;
10795
10796	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10797
10798	poll_data = (uint8_t)(poll_data & BIT_7);
10799
10800	/* Wait for 30 seconds for command to finish. */
10801	for (cnt = 30000000; cnt; cnt--) {
10802		flash_data = (uint8_t)ql_read_flash_byte(ha, addr);
10803
10804		if ((flash_data & BIT_7) == poll_data) {
10805			rval = QL_SUCCESS;
10806			break;
10807		}
10808		if (flash_data & BIT_5 && cnt > 2) {
10809			cnt = 2;
10810		}
10811		drv_usecwait(1);
10812	}
10813
10814	if (rval != QL_SUCCESS) {
10815		EL(ha, "failed=%xh\n", rval);
10816	} else {
10817		/*EMPTY*/
10818		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10819	}
10820	return (rval);
10821}
10822
10823/*
10824 * ql_flash_enable
10825 *	Setup flash for reading/writing.
10826 *
10827 * Input:
10828 *	ha = adapter state pointer.
10829 *
10830 * Context:
10831 *	Kernel context.
10832 */
10833void
10834ql_flash_enable(ql_adapter_state_t *ha)
10835{
10836	uint16_t	data;
10837
10838	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10839
10840	/* Enable Flash Read/Write. */
10841	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10842		data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
10843		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
10844		data = (uint16_t)(data | SBUS_FLASH_WRITE_ENABLE);
10845		ddi_put16(ha->sbus_fpga_dev_handle,
10846		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
10847		/* Read reset command sequence */
10848		ql_write_flash_byte(ha, 0xaaa, 0xaa);
10849		ql_write_flash_byte(ha, 0x555, 0x55);
10850		ql_write_flash_byte(ha, 0xaaa, 0x20);
10851		ql_write_flash_byte(ha, 0x555, 0xf0);
10852	} else {
10853		data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) |
10854		    ISP_FLASH_ENABLE);
10855		WRT16_IO_REG(ha, ctrl_status, data);
10856
10857		/* Read/Reset Command Sequence */
10858		ql_write_flash_byte(ha, 0x5555, 0xaa);
10859		ql_write_flash_byte(ha, 0x2aaa, 0x55);
10860		ql_write_flash_byte(ha, 0x5555, 0xf0);
10861	}
10862	(void) ql_read_flash_byte(ha, 0);
10863
10864	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10865}
10866
10867/*
10868 * ql_flash_disable
10869 *	Disable flash and allow RISC to run.
10870 *
10871 * Input:
10872 *	ha = adapter state pointer.
10873 *
10874 * Context:
10875 *	Kernel context.
10876 */
10877void
10878ql_flash_disable(ql_adapter_state_t *ha)
10879{
10880	uint16_t	data;
10881
10882	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10883
10884	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10885		/*
10886		 * Lock the flash back up.
10887		 */
10888		ql_write_flash_byte(ha, 0x555, 0x90);
10889		ql_write_flash_byte(ha, 0x555, 0x0);
10890
10891		data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
10892		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
10893		data = (uint16_t)(data & ~SBUS_FLASH_WRITE_ENABLE);
10894		ddi_put16(ha->sbus_fpga_dev_handle,
10895		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
10896	} else {
10897		data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) &
10898		    ~ISP_FLASH_ENABLE);
10899		WRT16_IO_REG(ha, ctrl_status, data);
10900	}
10901
10902	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10903}
10904
10905/*
10906 * ql_write_flash_byte
10907 *	Write byte to flash.
10908 *
10909 * Input:
10910 *	ha = adapter state pointer.
10911 *	addr = flash byte address.
10912 *	data = data to be written.
10913 *
10914 * Context:
10915 *	Kernel context.
10916 */
10917void
10918ql_write_flash_byte(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10919{
10920	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10921		ddi_put16(ha->sbus_fpga_dev_handle,
10922		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
10923		    LSW(addr));
10924		ddi_put16(ha->sbus_fpga_dev_handle,
10925		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
10926		    MSW(addr));
10927		ddi_put16(ha->sbus_fpga_dev_handle,
10928		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA),
10929		    (uint16_t)data);
10930	} else {
10931		uint16_t bank_select;
10932
10933		/* Setup bit 16 of flash address. */
10934		bank_select = (uint16_t)RD16_IO_REG(ha, ctrl_status);
10935
10936		if (CFG_IST(ha, CFG_CTRL_6322)) {
10937			bank_select = (uint16_t)(bank_select & ~0xf0);
10938			bank_select = (uint16_t)(bank_select |
10939			    ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
10940			WRT16_IO_REG(ha, ctrl_status, bank_select);
10941		} else {
10942			if (addr & BIT_16 && !(bank_select &
10943			    ISP_FLASH_64K_BANK)) {
10944				bank_select = (uint16_t)(bank_select |
10945				    ISP_FLASH_64K_BANK);
10946				WRT16_IO_REG(ha, ctrl_status, bank_select);
10947			} else if (!(addr & BIT_16) && bank_select &
10948			    ISP_FLASH_64K_BANK) {
10949				bank_select = (uint16_t)(bank_select &
10950				    ~ISP_FLASH_64K_BANK);
10951				WRT16_IO_REG(ha, ctrl_status, bank_select);
10952			}
10953		}
10954
10955		if (CFG_IST(ha, CFG_SBUS_CARD)) {
10956			WRT16_IO_REG(ha, flash_address, (uint16_t)addr);
10957			WRT16_IO_REG(ha, flash_data, (uint16_t)data);
10958		} else {
10959			WRT16_IOMAP_REG(ha, flash_address, addr);
10960			WRT16_IOMAP_REG(ha, flash_data, data);
10961		}
10962	}
10963}
10964
10965/*
10966 * ql_read_flash_byte
10967 *	Reads byte from flash, but must read a word from chip.
10968 *
10969 * Input:
10970 *	ha = adapter state pointer.
10971 *	addr = flash byte address.
10972 *
10973 * Returns:
10974 *	byte from flash.
10975 *
10976 * Context:
10977 *	Kernel context.
10978 */
10979uint8_t
10980ql_read_flash_byte(ql_adapter_state_t *ha, uint32_t addr)
10981{
10982	uint8_t	data;
10983
10984	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10985		ddi_put16(ha->sbus_fpga_dev_handle,
10986		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
10987		    LSW(addr));
10988		ddi_put16(ha->sbus_fpga_dev_handle,
10989		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
10990		    MSW(addr));
10991		data = (uint8_t)ddi_get16(ha->sbus_fpga_dev_handle,
10992		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA));
10993	} else {
10994		uint16_t	bank_select;
10995
10996		/* Setup bit 16 of flash address. */
10997		bank_select = RD16_IO_REG(ha, ctrl_status);
10998		if (CFG_IST(ha, CFG_CTRL_6322)) {
10999			bank_select = (uint16_t)(bank_select & ~0xf0);
11000			bank_select = (uint16_t)(bank_select |
11001			    ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11002			WRT16_IO_REG(ha, ctrl_status, bank_select);
11003		} else {
11004			if (addr & BIT_16 &&
11005			    !(bank_select & ISP_FLASH_64K_BANK)) {
11006				bank_select = (uint16_t)(bank_select |
11007				    ISP_FLASH_64K_BANK);
11008				WRT16_IO_REG(ha, ctrl_status, bank_select);
11009			} else if (!(addr & BIT_16) &&
11010			    bank_select & ISP_FLASH_64K_BANK) {
11011				bank_select = (uint16_t)(bank_select &
11012				    ~ISP_FLASH_64K_BANK);
11013				WRT16_IO_REG(ha, ctrl_status, bank_select);
11014			}
11015		}
11016
11017		if (CFG_IST(ha, CFG_SBUS_CARD)) {
11018			WRT16_IO_REG(ha, flash_address, addr);
11019			data = (uint8_t)RD16_IO_REG(ha, flash_data);
11020		} else {
11021			WRT16_IOMAP_REG(ha, flash_address, addr);
11022			data = (uint8_t)RD16_IOMAP_REG(ha, flash_data);
11023		}
11024	}
11025
11026	return (data);
11027}
11028
11029/*
11030 * ql_24xx_flash_id
11031 *	Get flash IDs.
11032 *
11033 * Input:
11034 *	ha:		adapter state pointer.
11035 *
11036 * Returns:
11037 *	ql local function return status code.
11038 *
11039 * Context:
11040 *	Kernel context.
11041 */
11042int
11043ql_24xx_flash_id(ql_adapter_state_t *vha)
11044{
11045	int			rval;
11046	uint32_t		fdata = 0;
11047	ql_adapter_state_t	*ha = vha->pha;
11048	ql_xioctl_t		*xp = ha->xioctl;
11049
11050	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11051
11052	rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR | 0x3AB, &fdata);
11053
11054	if (rval != QL_SUCCESS || fdata == 0 || CFG_IST(ha, CFG_CTRL_2581)) {
11055		fdata = 0;
11056		rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR |
11057		    (CFG_IST(ha, CFG_CTRL_2422) ? 0x39F : 0x49F), &fdata);
11058	}
11059
11060	if (rval != QL_SUCCESS) {
11061		EL(ha, "24xx read_flash failed=%xh\n", rval);
11062	} else if (fdata != 0) {
11063		xp->fdesc.flash_manuf = LSB(LSW(fdata));
11064		xp->fdesc.flash_id = MSB(LSW(fdata));
11065		xp->fdesc.flash_len = LSB(MSW(fdata));
11066	} else {
11067		xp->fdesc.flash_manuf = ATMEL_FLASH;
11068		xp->fdesc.flash_id = ATMEL_FLASHID_1024K;
11069		xp->fdesc.flash_len = 0;
11070	}
11071
11072	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11073
11074	return (rval);
11075}
11076
11077/*
11078 * ql_24xx_load_flash
11079 *	Loads flash.
11080 *
11081 * Input:
11082 *	ha = adapter state pointer.
11083 *	dp = data pointer.
11084 *	size = data length in bytes.
11085 *	faddr = 32bit word flash byte address.
11086 *
11087 * Returns:
11088 *	ql local function return status code.
11089 *
11090 * Context:
11091 *	Kernel context.
11092 */
11093int
11094ql_24xx_load_flash(ql_adapter_state_t *vha, uint8_t *dp, uint32_t size,
11095    uint32_t faddr)
11096{
11097	int			rval;
11098	uint32_t		cnt, rest_addr, fdata, wc;
11099	dma_mem_t		dmabuf = {0};
11100	ql_adapter_state_t	*ha = vha->pha;
11101	ql_xioctl_t		*xp = ha->xioctl;
11102
11103	QL_PRINT_3(CE_CONT, "(%d): started, faddr=%xh, size=%xh\n",
11104	    ha->instance, faddr, size);
11105
11106	/* start address must be 32 bit word aligned */
11107	if ((faddr & 0x3) != 0) {
11108		EL(ha, "incorrect buffer size alignment\n");
11109		return (QL_FUNCTION_PARAMETER_ERROR);
11110	}
11111
11112	/* Allocate DMA buffer */
11113	if (CFG_IST(ha, CFG_CTRL_2581)) {
11114		if ((rval = ql_get_dma_mem(ha, &dmabuf, 0xffff,
11115		    LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN)) !=
11116		    QL_SUCCESS) {
11117			EL(ha, "dma alloc failed, rval=%xh\n", rval);
11118			return (rval);
11119		}
11120	}
11121
11122	GLOBAL_HW_LOCK();
11123
11124	/* Enable flash write */
11125	if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) {
11126		GLOBAL_HW_UNLOCK();
11127		EL(ha, "unprotect_flash failed, rval=%xh\n", rval);
11128		ql_free_phys(ha, &dmabuf);
11129		return (rval);
11130	}
11131
11132	/* setup mask of address range within a sector */
11133	rest_addr = (xp->fdesc.block_size - 1) >> 2;
11134
11135	faddr = faddr >> 2;	/* flash gets 32 bit words */
11136
11137	/*
11138	 * Write data to flash.
11139	 */
11140	cnt = 0;
11141	size = (size + 3) >> 2;	/* Round up & convert to dwords */
11142
11143	while (cnt < size) {
11144		/* Beginning of a sector? */
11145		if ((faddr & rest_addr) == 0) {
11146			if (CFG_IST(ha, CFG_CTRL_81XX)) {
11147				fdata = ha->flash_data_addr | faddr;
11148				rval = ql_flash_access(ha,
11149				    FAC_ERASE_SECTOR, fdata, fdata +
11150				    rest_addr, 0);
11151				if (rval != QL_SUCCESS) {
11152					EL(ha, "erase sector status="
11153					    "%xh, start=%xh, end=%xh"
11154					    "\n", rval, fdata,
11155					    fdata + rest_addr);
11156					break;
11157				}
11158			} else {
11159				fdata = (faddr & ~rest_addr) << 2;
11160				fdata = (fdata & 0xff00) |
11161				    (fdata << 16 & 0xff0000) |
11162				    (fdata >> 16 & 0xff);
11163
11164				if (rest_addr == 0x1fff) {
11165					/* 32kb sector block erase */
11166					rval = ql_24xx_write_flash(ha,
11167					    FLASH_CONF_ADDR | 0x0352,
11168					    fdata);
11169				} else {
11170					/* 64kb sector block erase */
11171					rval = ql_24xx_write_flash(ha,
11172					    FLASH_CONF_ADDR | 0x03d8,
11173					    fdata);
11174				}
11175				if (rval != QL_SUCCESS) {
11176					EL(ha, "Unable to flash sector"
11177					    ": address=%xh\n", faddr);
11178					break;
11179				}
11180			}
11181		}
11182
11183		/* Write data */
11184		if (CFG_IST(ha, CFG_CTRL_2581) &&
11185		    ((faddr & 0x3f) == 0)) {
11186			/*
11187			 * Limit write up to sector boundary.
11188			 */
11189			wc = ((~faddr & (rest_addr>>1)) + 1);
11190
11191			if (size - cnt < wc) {
11192				wc = size - cnt;
11193			}
11194
11195			ddi_rep_put8(dmabuf.acc_handle, (uint8_t *)dp,
11196			    (uint8_t *)dmabuf.bp, wc<<2,
11197			    DDI_DEV_AUTOINCR);
11198
11199			rval = ql_wrt_risc_ram(ha, ha->flash_data_addr |
11200			    faddr, dmabuf.cookie.dmac_laddress, wc);
11201			if (rval != QL_SUCCESS) {
11202				EL(ha, "unable to dma to flash "
11203				    "address=%xh\n", faddr << 2);
11204				break;
11205			}
11206
11207			cnt += wc;
11208			faddr += wc;
11209			dp += wc << 2;
11210		} else {
11211			fdata = *dp++;
11212			fdata |= *dp++ << 8;
11213			fdata |= *dp++ << 16;
11214			fdata |= *dp++ << 24;
11215			rval = ql_24xx_write_flash(ha,
11216			    ha->flash_data_addr | faddr, fdata);
11217			if (rval != QL_SUCCESS) {
11218				EL(ha, "Unable to program flash "
11219				    "address=%xh data=%xh\n", faddr,
11220				    *dp);
11221				break;
11222			}
11223			cnt++;
11224			faddr++;
11225
11226			/* Allow other system activity. */
11227			if (cnt % 0x1000 == 0) {
11228				ql_delay(ha, 10000);
11229			}
11230		}
11231	}
11232
11233	ql_24xx_protect_flash(ha);
11234
11235	ql_free_phys(ha, &dmabuf);
11236
11237	GLOBAL_HW_UNLOCK();
11238
11239	if (rval != QL_SUCCESS) {
11240		EL(ha, "failed=%xh\n", rval);
11241	} else {
11242		/*EMPTY*/
11243		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11244	}
11245	return (rval);
11246}
11247
11248/*
11249 * ql_24xx_read_flash
11250 *	Reads a 32bit word from ISP24xx NVRAM/FLASH.
11251 *
11252 * Input:
11253 *	ha:	adapter state pointer.
11254 *	faddr:	NVRAM/FLASH address.
11255 *	bp:	data pointer.
11256 *
11257 * Returns:
11258 *	ql local function return status code.
11259 *
11260 * Context:
11261 *	Kernel context.
11262 */
11263int
11264ql_24xx_read_flash(ql_adapter_state_t *vha, uint32_t faddr, uint32_t *bp)
11265{
11266	uint32_t		timer;
11267	int			rval = QL_SUCCESS;
11268	ql_adapter_state_t	*ha = vha->pha;
11269
11270	/* Clear access error flag */
11271	WRT32_IO_REG(ha, ctrl_status,
11272	    RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11273
11274	WRT32_IO_REG(ha, flash_address, faddr & ~FLASH_DATA_FLAG);
11275
11276	/* Wait for READ cycle to complete. */
11277	for (timer = 300000; timer; timer--) {
11278		if (RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) {
11279			break;
11280		}
11281		drv_usecwait(10);
11282	}
11283
11284	if (timer == 0) {
11285		EL(ha, "failed, timeout\n");
11286		rval = QL_FUNCTION_TIMEOUT;
11287	} else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11288		EL(ha, "failed, access error\n");
11289		rval = QL_FUNCTION_FAILED;
11290	}
11291
11292	*bp = RD32_IO_REG(ha, flash_data);
11293
11294	return (rval);
11295}
11296
11297/*
11298 * ql_24xx_write_flash
11299 *	Writes a 32bit word to ISP24xx NVRAM/FLASH.
11300 *
11301 * Input:
11302 *	ha:	adapter state pointer.
11303 *	addr:	NVRAM/FLASH address.
11304 *	value:	data.
11305 *
11306 * Returns:
11307 *	ql local function return status code.
11308 *
11309 * Context:
11310 *	Kernel context.
11311 */
11312int
11313ql_24xx_write_flash(ql_adapter_state_t *vha, uint32_t addr, uint32_t data)
11314{
11315	uint32_t		timer, fdata;
11316	int			rval = QL_SUCCESS;
11317	ql_adapter_state_t	*ha = vha->pha;
11318
11319	/* Clear access error flag */
11320	WRT32_IO_REG(ha, ctrl_status,
11321	    RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11322
11323	WRT32_IO_REG(ha, flash_data, data);
11324	RD32_IO_REG(ha, flash_data);		/* PCI Posting. */
11325	WRT32_IO_REG(ha, flash_address, addr | FLASH_DATA_FLAG);
11326
11327	/* Wait for Write cycle to complete. */
11328	for (timer = 3000000; timer; timer--) {
11329		if ((RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) == 0) {
11330			/* Check flash write in progress. */
11331			if ((addr & FLASH_ADDR_MASK) == FLASH_CONF_ADDR) {
11332				(void) ql_24xx_read_flash(ha,
11333				    FLASH_CONF_ADDR | 0x005, &fdata);
11334				if (!(fdata & BIT_0)) {
11335					break;
11336				}
11337			} else {
11338				break;
11339			}
11340		}
11341		drv_usecwait(10);
11342	}
11343	if (timer == 0) {
11344		EL(ha, "failed, timeout\n");
11345		rval = QL_FUNCTION_TIMEOUT;
11346	} else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11347		EL(ha, "access error\n");
11348		rval = QL_FUNCTION_FAILED;
11349	}
11350
11351	return (rval);
11352}
11353/*
11354 * ql_24xx_unprotect_flash
11355 *	Enable writes
11356 *
11357 * Input:
11358 *	ha:	adapter state pointer.
11359 *
11360 * Returns:
11361 *	ql local function return status code.
11362 *
11363 * Context:
11364 *	Kernel context.
11365 */
11366int
11367ql_24xx_unprotect_flash(ql_adapter_state_t *vha)
11368{
11369	int			rval;
11370	uint32_t		fdata;
11371	ql_adapter_state_t	*ha = vha->pha;
11372	ql_xioctl_t		*xp = ha->xioctl;
11373
11374	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11375
11376	if (CFG_IST(ha, CFG_CTRL_81XX)) {
11377		if (ha->task_daemon_flags & FIRMWARE_UP) {
11378			if ((rval = ql_flash_access(ha, FAC_WRT_ENABLE, 0, 0,
11379			    0)) != QL_SUCCESS) {
11380				EL(ha, "status=%xh\n", rval);
11381			}
11382			QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11383			    ha->instance);
11384			return (rval);
11385		}
11386	} else {
11387		/* Enable flash write. */
11388		WRT32_IO_REG(ha, ctrl_status,
11389		    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11390		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11391	}
11392
11393	/*
11394	 * Remove block write protection (SST and ST) and
11395	 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11396	 * Unprotect sectors.
11397	 */
11398	(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
11399	    xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
11400
11401	if (xp->fdesc.unprotect_sector_cmd != 0) {
11402		for (fdata = 0; fdata < 0x10; fdata++) {
11403			(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11404			    0x300 | xp->fdesc.unprotect_sector_cmd, fdata);
11405		}
11406
11407		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11408		    xp->fdesc.unprotect_sector_cmd, 0x00400f);
11409		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11410		    xp->fdesc.unprotect_sector_cmd, 0x00600f);
11411		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11412		    xp->fdesc.unprotect_sector_cmd, 0x00800f);
11413	}
11414
11415	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11416
11417	return (QL_SUCCESS);
11418}
11419
11420/*
11421 * ql_24xx_protect_flash
11422 *	Disable writes
11423 *
11424 * Input:
11425 *	ha:	adapter state pointer.
11426 *
11427 * Context:
11428 *	Kernel context.
11429 */
11430void
11431ql_24xx_protect_flash(ql_adapter_state_t *vha)
11432{
11433	int			rval;
11434	uint32_t		fdata;
11435	ql_adapter_state_t	*ha = vha->pha;
11436	ql_xioctl_t		*xp = ha->xioctl;
11437
11438	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11439
11440	if (CFG_IST(ha, CFG_CTRL_81XX)) {
11441		if (ha->task_daemon_flags & FIRMWARE_UP) {
11442			if ((rval = ql_flash_access(ha, FAC_WRT_PROTECT, 0, 0,
11443			    0)) != QL_SUCCESS) {
11444				EL(ha, "status=%xh\n", rval);
11445			}
11446			QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11447			    ha->instance);
11448			return;
11449		}
11450	} else {
11451		/* Enable flash write. */
11452		WRT32_IO_REG(ha, ctrl_status,
11453		    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11454		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11455	}
11456
11457	/*
11458	 * Protect sectors.
11459	 * Set block write protection (SST and ST) and
11460	 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11461	 */
11462	if (xp->fdesc.protect_sector_cmd != 0) {
11463		for (fdata = 0; fdata < 0x10; fdata++) {
11464			(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11465			    0x330 | xp->fdesc.protect_sector_cmd, fdata);
11466		}
11467		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11468		    xp->fdesc.protect_sector_cmd, 0x00400f);
11469		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11470		    xp->fdesc.protect_sector_cmd, 0x00600f);
11471		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11472		    xp->fdesc.protect_sector_cmd, 0x00800f);
11473
11474		/* TODO: ??? */
11475		(void) ql_24xx_write_flash(ha,
11476		    FLASH_CONF_ADDR | 0x101, 0x80);
11477	} else {
11478		(void) ql_24xx_write_flash(ha,
11479		    FLASH_CONF_ADDR | 0x101, 0x9c);
11480	}
11481
11482	/* Disable flash write. */
11483	if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
11484		WRT32_IO_REG(ha, ctrl_status,
11485		    RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
11486		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11487	}
11488
11489	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11490}
11491
11492/*
11493 * ql_dump_firmware
11494 *	Save RISC code state information.
11495 *
11496 * Input:
11497 *	ha = adapter state pointer.
11498 *
11499 * Returns:
11500 *	QL local function return status code.
11501 *
11502 * Context:
11503 *	Kernel context.
11504 */
11505static int
11506ql_dump_firmware(ql_adapter_state_t *vha)
11507{
11508	int			rval;
11509	clock_t			timer;
11510	ql_adapter_state_t	*ha = vha->pha;
11511
11512	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11513
11514	QL_DUMP_LOCK(ha);
11515
11516	if (ha->ql_dump_state & QL_DUMPING ||
11517	    (ha->ql_dump_state & QL_DUMP_VALID &&
11518	    !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11519		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11520		QL_DUMP_UNLOCK(ha);
11521		return (QL_SUCCESS);
11522	}
11523
11524	QL_DUMP_UNLOCK(ha);
11525
11526	ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
11527
11528	/*
11529	 * Wait for all outstanding commands to complete
11530	 */
11531	(void) ql_wait_outstanding(ha);
11532
11533	/* Dump firmware. */
11534	rval = ql_binary_fw_dump(ha, TRUE);
11535
11536	/* Do abort to force restart. */
11537	ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, DRIVER_STALL);
11538	EL(ha, "restarting, isp_abort_needed\n");
11539
11540	/* Acquire task daemon lock. */
11541	TASK_DAEMON_LOCK(ha);
11542
11543	/* Wait for suspension to end. */
11544	while (ha->task_daemon_flags & QL_SUSPENDED) {
11545		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
11546
11547		/* 30 seconds from now */
11548		timer = ddi_get_lbolt();
11549		timer += drv_usectohz(30000000);
11550
11551		if (cv_timedwait(&ha->cv_dr_suspended,
11552		    &ha->task_daemon_mutex, timer) == -1) {
11553			/*
11554			 * The timeout time 'timer' was
11555			 * reached without the condition
11556			 * being signaled.
11557			 */
11558			break;
11559		}
11560	}
11561
11562	/* Release task daemon lock. */
11563	TASK_DAEMON_UNLOCK(ha);
11564
11565	if (rval == QL_SUCCESS || rval == QL_DATA_EXISTS) {
11566		/*EMPTY*/
11567		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11568	} else {
11569		EL(ha, "failed, rval = %xh\n", rval);
11570	}
11571	return (rval);
11572}
11573
11574/*
11575 * ql_binary_fw_dump
11576 *	Dumps binary data from firmware.
11577 *
11578 * Input:
11579 *	ha = adapter state pointer.
11580 *	lock_needed = mailbox lock needed.
11581 *
11582 * Returns:
11583 *	ql local function return status code.
11584 *
11585 * Context:
11586 *	Interrupt or Kernel context, no mailbox commands allowed.
11587 */
11588int
11589ql_binary_fw_dump(ql_adapter_state_t *vha, int lock_needed)
11590{
11591	clock_t			timer;
11592	mbx_cmd_t		mc;
11593	mbx_cmd_t		*mcp = &mc;
11594	int			rval = QL_SUCCESS;
11595	ql_adapter_state_t	*ha = vha->pha;
11596
11597	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11598
11599	QL_DUMP_LOCK(ha);
11600
11601	if (ha->ql_dump_state & QL_DUMPING ||
11602	    (ha->ql_dump_state & QL_DUMP_VALID &&
11603	    !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11604		EL(ha, "dump already done, qds=%x\n", ha->ql_dump_state);
11605		QL_DUMP_UNLOCK(ha);
11606		return (QL_DATA_EXISTS);
11607	}
11608
11609	ha->ql_dump_state &= ~(QL_DUMP_VALID | QL_DUMP_UPLOADED);
11610	ha->ql_dump_state |= QL_DUMPING;
11611
11612	QL_DUMP_UNLOCK(ha);
11613
11614	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE)) {
11615
11616		/* Insert Time Stamp */
11617		rval = ql_fw_etrace(ha, &ha->fwexttracebuf,
11618		    FTO_INSERT_TIME_STAMP);
11619		if (rval != QL_SUCCESS) {
11620			EL(ha, "f/w extended trace insert"
11621			    "time stamp failed: %xh\n", rval);
11622		}
11623	}
11624
11625	if (lock_needed == TRUE) {
11626		/* Acquire mailbox register lock. */
11627		MBX_REGISTER_LOCK(ha);
11628
11629		/* Check for mailbox available, if not wait for signal. */
11630		while (ha->mailbox_flags & MBX_BUSY_FLG) {
11631			ha->mailbox_flags = (uint8_t)
11632			    (ha->mailbox_flags | MBX_WANT_FLG);
11633
11634			/* 30 seconds from now */
11635			timer = ddi_get_lbolt();
11636			timer += (ha->mcp->timeout + 2) *
11637			    drv_usectohz(1000000);
11638			if (cv_timedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
11639			    timer) == -1) {
11640				/*
11641				 * The timeout time 'timer' was
11642				 * reached without the condition
11643				 * being signaled.
11644				 */
11645
11646				/* Release mailbox register lock. */
11647				MBX_REGISTER_UNLOCK(ha);
11648
11649				EL(ha, "failed, rval = %xh\n",
11650				    QL_FUNCTION_TIMEOUT);
11651				return (QL_FUNCTION_TIMEOUT);
11652			}
11653		}
11654
11655		/* Set busy flag. */
11656		ha->mailbox_flags = (uint8_t)
11657		    (ha->mailbox_flags | MBX_BUSY_FLG);
11658		mcp->timeout = 120;
11659		ha->mcp = mcp;
11660
11661		/* Release mailbox register lock. */
11662		MBX_REGISTER_UNLOCK(ha);
11663	}
11664
11665	/* Free previous dump buffer. */
11666	if (ha->ql_dump_ptr != NULL) {
11667		kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11668		ha->ql_dump_ptr = NULL;
11669	}
11670
11671	if (CFG_IST(ha, CFG_CTRL_2422)) {
11672		ha->ql_dump_size = (uint32_t)(sizeof (ql_24xx_fw_dump_t) +
11673		    ha->fw_ext_memory_size);
11674	} else if (CFG_IST(ha, CFG_CTRL_2581)) {
11675		ha->ql_dump_size = (uint32_t)(sizeof (ql_25xx_fw_dump_t) +
11676		    ha->fw_ext_memory_size);
11677	} else {
11678		ha->ql_dump_size = sizeof (ql_fw_dump_t);
11679	}
11680
11681	if ((ha->ql_dump_ptr = kmem_zalloc(ha->ql_dump_size, KM_NOSLEEP)) ==
11682	    NULL) {
11683		rval = QL_MEMORY_ALLOC_FAILED;
11684	} else {
11685		if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11686			rval = ql_2300_binary_fw_dump(ha, ha->ql_dump_ptr);
11687		} else if (CFG_IST(ha, CFG_CTRL_2581)) {
11688			rval = ql_25xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11689		} else if (CFG_IST(ha, CFG_CTRL_2422)) {
11690			rval = ql_24xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11691		} else {
11692			rval = ql_2200_binary_fw_dump(ha, ha->ql_dump_ptr);
11693		}
11694	}
11695
11696	/* Reset ISP chip. */
11697	ql_reset_chip(ha);
11698
11699	QL_DUMP_LOCK(ha);
11700
11701	if (rval != QL_SUCCESS) {
11702		if (ha->ql_dump_ptr != NULL) {
11703			kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11704			ha->ql_dump_ptr = NULL;
11705		}
11706		ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_VALID |
11707		    QL_DUMP_UPLOADED);
11708		EL(ha, "failed, rval = %xh\n", rval);
11709	} else {
11710		ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_UPLOADED);
11711		ha->ql_dump_state |= QL_DUMP_VALID;
11712		EL(ha, "done\n");
11713	}
11714
11715	QL_DUMP_UNLOCK(ha);
11716
11717	return (rval);
11718}
11719
11720/*
11721 * ql_ascii_fw_dump
11722 *	Converts firmware binary dump to ascii.
11723 *
11724 * Input:
11725 *	ha = adapter state pointer.
11726 *	bptr = buffer pointer.
11727 *
11728 * Returns:
11729 *	Amount of data buffer used.
11730 *
11731 * Context:
11732 *	Kernel context.
11733 */
11734size_t
11735ql_ascii_fw_dump(ql_adapter_state_t *vha, caddr_t bufp)
11736{
11737	uint32_t		cnt;
11738	caddr_t			bp;
11739	int			mbox_cnt;
11740	ql_adapter_state_t	*ha = vha->pha;
11741	ql_fw_dump_t		*fw = ha->ql_dump_ptr;
11742
11743	if (CFG_IST(ha, CFG_CTRL_2422)) {
11744		return (ql_24xx_ascii_fw_dump(ha, bufp));
11745	} else if (CFG_IST(ha, CFG_CTRL_2581)) {
11746		return (ql_25xx_ascii_fw_dump(ha, bufp));
11747	}
11748
11749	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11750
11751	if (CFG_IST(ha, CFG_CTRL_2300)) {
11752		(void) sprintf(bufp, "\nISP 2300IP ");
11753	} else if (CFG_IST(ha, CFG_CTRL_6322)) {
11754		(void) sprintf(bufp, "\nISP 6322FLX ");
11755	} else {
11756		(void) sprintf(bufp, "\nISP 2200IP ");
11757	}
11758
11759	bp = bufp + strlen(bufp);
11760	(void) sprintf(bp, "Firmware Version %d.%d.%d\n",
11761	    ha->fw_major_version, ha->fw_minor_version,
11762	    ha->fw_subminor_version);
11763
11764	(void) strcat(bufp, "\nPBIU Registers:");
11765	bp = bufp + strlen(bufp);
11766	for (cnt = 0; cnt < sizeof (fw->pbiu_reg) / 2; cnt++) {
11767		if (cnt % 8 == 0) {
11768			*bp++ = '\n';
11769		}
11770		(void) sprintf(bp, "%04x  ", fw->pbiu_reg[cnt]);
11771		bp = bp + 6;
11772	}
11773
11774	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11775		(void) strcat(bufp, "\n\nReqQ-RspQ-Risc2Host Status "
11776		    "registers:");
11777		bp = bufp + strlen(bufp);
11778		for (cnt = 0; cnt < sizeof (fw->risc_host_reg) / 2; cnt++) {
11779			if (cnt % 8 == 0) {
11780				*bp++ = '\n';
11781			}
11782			(void) sprintf(bp, "%04x  ", fw->risc_host_reg[cnt]);
11783			bp = bp + 6;
11784		}
11785	}
11786
11787	(void) strcat(bp, "\n\nMailbox Registers:");
11788	bp = bufp + strlen(bufp);
11789	mbox_cnt = (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) ? 16 : 8;
11790	for (cnt = 0; cnt < mbox_cnt; cnt++) {
11791		if (cnt % 8 == 0) {
11792			*bp++ = '\n';
11793		}
11794		(void) sprintf(bp, "%04x  ", fw->mailbox_reg[cnt]);
11795		bp = bp + 6;
11796	}
11797
11798	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11799		(void) strcat(bp, "\n\nAuto Request Response DMA Registers:");
11800		bp = bufp + strlen(bufp);
11801		for (cnt = 0; cnt < sizeof (fw->resp_dma_reg) / 2; cnt++) {
11802			if (cnt % 8 == 0) {
11803				*bp++ = '\n';
11804			}
11805			(void) sprintf(bp, "%04x  ", fw->resp_dma_reg[cnt]);
11806			bp = bp + 6;
11807		}
11808	}
11809
11810	(void) strcat(bp, "\n\nDMA Registers:");
11811	bp = bufp + strlen(bufp);
11812	for (cnt = 0; cnt < sizeof (fw->dma_reg) / 2; cnt++) {
11813		if (cnt % 8 == 0) {
11814			*bp++ = '\n';
11815		}
11816		(void) sprintf(bp, "%04x  ", fw->dma_reg[cnt]);
11817		bp = bp + 6;
11818	}
11819
11820	(void) strcat(bp, "\n\nRISC Hardware Registers:");
11821	bp = bufp + strlen(bufp);
11822	for (cnt = 0; cnt < sizeof (fw->risc_hdw_reg) / 2; cnt++) {
11823		if (cnt % 8 == 0) {
11824			*bp++ = '\n';
11825		}
11826		(void) sprintf(bp, "%04x  ", fw->risc_hdw_reg[cnt]);
11827		bp = bp + 6;
11828	}
11829
11830	(void) strcat(bp, "\n\nRISC GP0 Registers:");
11831	bp = bufp + strlen(bufp);
11832	for (cnt = 0; cnt < sizeof (fw->risc_gp0_reg) / 2; cnt++) {
11833		if (cnt % 8 == 0) {
11834			*bp++ = '\n';
11835		}
11836		(void) sprintf(bp, "%04x  ", fw->risc_gp0_reg[cnt]);
11837		bp = bp + 6;
11838	}
11839
11840	(void) strcat(bp, "\n\nRISC GP1 Registers:");
11841	bp = bufp + strlen(bufp);
11842	for (cnt = 0; cnt < sizeof (fw->risc_gp1_reg) / 2; cnt++) {
11843		if (cnt % 8 == 0) {
11844			*bp++ = '\n';
11845		}
11846		(void) sprintf(bp, "%04x  ", fw->risc_gp1_reg[cnt]);
11847		bp = bp + 6;
11848	}
11849
11850	(void) strcat(bp, "\n\nRISC GP2 Registers:");
11851	bp = bufp + strlen(bufp);
11852	for (cnt = 0; cnt < sizeof (fw->risc_gp2_reg) / 2; cnt++) {
11853		if (cnt % 8 == 0) {
11854			*bp++ = '\n';
11855		}
11856		(void) sprintf(bp, "%04x  ", fw->risc_gp2_reg[cnt]);
11857		bp = bp + 6;
11858	}
11859
11860	(void) strcat(bp, "\n\nRISC GP3 Registers:");
11861	bp = bufp + strlen(bufp);
11862	for (cnt = 0; cnt < sizeof (fw->risc_gp3_reg) / 2; cnt++) {
11863		if (cnt % 8 == 0) {
11864			*bp++ = '\n';
11865		}
11866		(void) sprintf(bp, "%04x  ", fw->risc_gp3_reg[cnt]);
11867		bp = bp + 6;
11868	}
11869
11870	(void) strcat(bp, "\n\nRISC GP4 Registers:");
11871	bp = bufp + strlen(bufp);
11872	for (cnt = 0; cnt < sizeof (fw->risc_gp4_reg) / 2; cnt++) {
11873		if (cnt % 8 == 0) {
11874			*bp++ = '\n';
11875		}
11876		(void) sprintf(bp, "%04x  ", fw->risc_gp4_reg[cnt]);
11877		bp = bp + 6;
11878	}
11879
11880	(void) strcat(bp, "\n\nRISC GP5 Registers:");
11881	bp = bufp + strlen(bufp);
11882	for (cnt = 0; cnt < sizeof (fw->risc_gp5_reg) / 2; cnt++) {
11883		if (cnt % 8 == 0) {
11884			*bp++ = '\n';
11885		}
11886		(void) sprintf(bp, "%04x  ", fw->risc_gp5_reg[cnt]);
11887		bp = bp + 6;
11888	}
11889
11890	(void) strcat(bp, "\n\nRISC GP6 Registers:");
11891	bp = bufp + strlen(bufp);
11892	for (cnt = 0; cnt < sizeof (fw->risc_gp6_reg) / 2; cnt++) {
11893		if (cnt % 8 == 0) {
11894			*bp++ = '\n';
11895		}
11896		(void) sprintf(bp, "%04x  ", fw->risc_gp6_reg[cnt]);
11897		bp = bp + 6;
11898	}
11899
11900	(void) strcat(bp, "\n\nRISC GP7 Registers:");
11901	bp = bufp + strlen(bufp);
11902	for (cnt = 0; cnt < sizeof (fw->risc_gp7_reg) / 2; cnt++) {
11903		if (cnt % 8 == 0) {
11904			*bp++ = '\n';
11905		}
11906		(void) sprintf(bp, "%04x  ", fw->risc_gp7_reg[cnt]);
11907		bp = bp + 6;
11908	}
11909
11910	(void) strcat(bp, "\n\nFrame Buffer Hardware Registers:");
11911	bp = bufp + strlen(bufp);
11912	for (cnt = 0; cnt < sizeof (fw->frame_buf_hdw_reg) / 2; cnt++) {
11913		if ((cnt == 16) && ((CFG_IST(ha, (CFG_CTRL_2300 |
11914		    CFG_CTRL_6322)) == 0))) {
11915			break;
11916		}
11917		if (cnt % 8 == 0) {
11918			*bp++ = '\n';
11919		}
11920		(void) sprintf(bp, "%04x  ", fw->frame_buf_hdw_reg[cnt]);
11921		bp = bp + 6;
11922	}
11923
11924	(void) strcat(bp, "\n\nFPM B0 Registers:");
11925	bp = bufp + strlen(bufp);
11926	for (cnt = 0; cnt < sizeof (fw->fpm_b0_reg) / 2; cnt++) {
11927		if (cnt % 8 == 0) {
11928			*bp++ = '\n';
11929		}
11930		(void) sprintf(bp, "%04x  ", fw->fpm_b0_reg[cnt]);
11931		bp = bp + 6;
11932	}
11933
11934	(void) strcat(bp, "\n\nFPM B1 Registers:");
11935	bp = bufp + strlen(bufp);
11936	for (cnt = 0; cnt < sizeof (fw->fpm_b1_reg) / 2; cnt++) {
11937		if (cnt % 8 == 0) {
11938			*bp++ = '\n';
11939		}
11940		(void) sprintf(bp, "%04x  ", fw->fpm_b1_reg[cnt]);
11941		bp = bp + 6;
11942	}
11943
11944	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11945		(void) strcat(bp, "\n\nCode RAM Dump:");
11946		bp = bufp + strlen(bufp);
11947		for (cnt = 0; cnt < sizeof (fw->risc_ram) / 2; cnt++) {
11948			if (cnt % 8 == 0) {
11949				(void) sprintf(bp, "\n%05x: ", cnt + 0x0800);
11950				bp = bp + 8;
11951			}
11952			(void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
11953			bp = bp + 6;
11954		}
11955
11956		(void) strcat(bp, "\n\nStack RAM Dump:");
11957		bp = bufp + strlen(bufp);
11958		for (cnt = 0; cnt < sizeof (fw->stack_ram) / 2; cnt++) {
11959			if (cnt % 8 == 0) {
11960				(void) sprintf(bp, "\n%05x: ", cnt + 0x010000);
11961				bp = bp + 8;
11962			}
11963			(void) sprintf(bp, "%04x  ", fw->stack_ram[cnt]);
11964			bp = bp + 6;
11965		}
11966
11967		(void) strcat(bp, "\n\nData RAM Dump:");
11968		bp = bufp + strlen(bufp);
11969		for (cnt = 0; cnt < sizeof (fw->data_ram) / 2; cnt++) {
11970			if (cnt % 8 == 0) {
11971				(void) sprintf(bp, "\n%05x: ", cnt + 0x010800);
11972				bp = bp + 8;
11973			}
11974			(void) sprintf(bp, "%04x  ", fw->data_ram[cnt]);
11975			bp = bp + 6;
11976		}
11977	} else {
11978		(void) strcat(bp, "\n\nRISC SRAM:");
11979		bp = bufp + strlen(bufp);
11980		for (cnt = 0; cnt < 0xf000; cnt++) {
11981			if (cnt % 8 == 0) {
11982				(void) sprintf(bp, "\n%04x: ", cnt + 0x1000);
11983				bp = bp + 7;
11984			}
11985			(void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
11986			bp = bp + 6;
11987		}
11988	}
11989
11990	(void) strcat(bp, "\n\n[<==END] ISP Debug Dump.");
11991	bp += strlen(bp);
11992
11993	(void) sprintf(bp, "\n\nRequest Queue");
11994	bp += strlen(bp);
11995	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
11996		if (cnt % 8 == 0) {
11997			(void) sprintf(bp, "\n%08x: ", cnt);
11998			bp += strlen(bp);
11999		}
12000		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12001		bp += strlen(bp);
12002	}
12003
12004	(void) sprintf(bp, "\n\nResponse Queue");
12005	bp += strlen(bp);
12006	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12007		if (cnt % 8 == 0) {
12008			(void) sprintf(bp, "\n%08x: ", cnt);
12009			bp += strlen(bp);
12010		}
12011		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12012		bp += strlen(bp);
12013	}
12014
12015	(void) sprintf(bp, "\n");
12016
12017	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
12018
12019	return (strlen(bufp));
12020}
12021
12022/*
12023 * ql_24xx_ascii_fw_dump
12024 *	Converts ISP24xx firmware binary dump to ascii.
12025 *
12026 * Input:
12027 *	ha = adapter state pointer.
12028 *	bptr = buffer pointer.
12029 *
12030 * Returns:
12031 *	Amount of data buffer used.
12032 *
12033 * Context:
12034 *	Kernel context.
12035 */
12036static size_t
12037ql_24xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12038{
12039	uint32_t		cnt;
12040	caddr_t			bp = bufp;
12041	ql_24xx_fw_dump_t	*fw = ha->ql_dump_ptr;
12042
12043	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12044
12045	(void) sprintf(bp, "ISP FW Version %d.%02d.%02d Attributes %X\n",
12046	    ha->fw_major_version, ha->fw_minor_version,
12047	    ha->fw_subminor_version, ha->fw_attributes);
12048	bp += strlen(bp);
12049
12050	(void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
12051
12052	(void) strcat(bp, "\nHost Interface Registers");
12053	bp += strlen(bp);
12054	for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12055		if (cnt % 8 == 0) {
12056			(void) sprintf(bp++, "\n");
12057		}
12058
12059		(void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12060		bp += 9;
12061	}
12062
12063	(void) sprintf(bp, "\n\nMailbox Registers");
12064	bp += strlen(bp);
12065	for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12066		if (cnt % 16 == 0) {
12067			(void) sprintf(bp++, "\n");
12068		}
12069
12070		(void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12071		bp += 5;
12072	}
12073
12074	(void) sprintf(bp, "\n\nXSEQ GP Registers");
12075	bp += strlen(bp);
12076	for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12077		if (cnt % 8 == 0) {
12078			(void) sprintf(bp++, "\n");
12079		}
12080
12081		(void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12082		bp += 9;
12083	}
12084
12085	(void) sprintf(bp, "\n\nXSEQ-0 Registers");
12086	bp += strlen(bp);
12087	for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12088		if (cnt % 8 == 0) {
12089			(void) sprintf(bp++, "\n");
12090		}
12091
12092		(void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12093		bp += 9;
12094	}
12095
12096	(void) sprintf(bp, "\n\nXSEQ-1 Registers");
12097	bp += strlen(bp);
12098	for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12099		if (cnt % 8 == 0) {
12100			(void) sprintf(bp++, "\n");
12101		}
12102
12103		(void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12104		bp += 9;
12105	}
12106
12107	(void) sprintf(bp, "\n\nRSEQ GP Registers");
12108	bp += strlen(bp);
12109	for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12110		if (cnt % 8 == 0) {
12111			(void) sprintf(bp++, "\n");
12112		}
12113
12114		(void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12115		bp += 9;
12116	}
12117
12118	(void) sprintf(bp, "\n\nRSEQ-0 Registers");
12119	bp += strlen(bp);
12120	for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12121		if (cnt % 8 == 0) {
12122			(void) sprintf(bp++, "\n");
12123		}
12124
12125		(void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12126		bp += 9;
12127	}
12128
12129	(void) sprintf(bp, "\n\nRSEQ-1 Registers");
12130	bp += strlen(bp);
12131	for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12132		if (cnt % 8 == 0) {
12133			(void) sprintf(bp++, "\n");
12134		}
12135
12136		(void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12137		bp += 9;
12138	}
12139
12140	(void) sprintf(bp, "\n\nRSEQ-2 Registers");
12141	bp += strlen(bp);
12142	for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12143		if (cnt % 8 == 0) {
12144			(void) sprintf(bp++, "\n");
12145		}
12146
12147		(void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12148		bp += 9;
12149	}
12150
12151	(void) sprintf(bp, "\n\nCommand DMA Registers");
12152	bp += strlen(bp);
12153	for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12154		if (cnt % 8 == 0) {
12155			(void) sprintf(bp++, "\n");
12156		}
12157
12158		(void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12159		bp += 9;
12160	}
12161
12162	(void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12163	bp += strlen(bp);
12164	for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12165		if (cnt % 8 == 0) {
12166			(void) sprintf(bp++, "\n");
12167		}
12168
12169		(void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12170		bp += 9;
12171	}
12172
12173	(void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12174	bp += strlen(bp);
12175	for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12176		if (cnt % 8 == 0) {
12177			(void) sprintf(bp++, "\n");
12178		}
12179
12180		(void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12181		bp += 9;
12182	}
12183
12184	(void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12185	bp += strlen(bp);
12186	for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12187		if (cnt % 8 == 0) {
12188			(void) sprintf(bp++, "\n");
12189		}
12190
12191		(void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12192		bp += 9;
12193	}
12194
12195	(void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12196	bp += strlen(bp);
12197	for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12198		if (cnt % 8 == 0) {
12199			(void) sprintf(bp++, "\n");
12200		}
12201
12202		(void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12203		bp += 9;
12204	}
12205
12206	(void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12207	bp += strlen(bp);
12208	for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12209		if (cnt % 8 == 0) {
12210			(void) sprintf(bp++, "\n");
12211		}
12212
12213		(void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12214		bp += 9;
12215	}
12216
12217	(void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12218	bp += strlen(bp);
12219	for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12220		if (cnt % 8 == 0) {
12221			(void) sprintf(bp++, "\n");
12222		}
12223
12224		(void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12225		bp += 9;
12226	}
12227
12228	(void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12229	bp += strlen(bp);
12230	for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12231		if (cnt % 8 == 0) {
12232			(void) sprintf(bp++, "\n");
12233		}
12234
12235		(void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12236		bp += 9;
12237	}
12238
12239	(void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12240	bp += strlen(bp);
12241	for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12242		if (cnt % 8 == 0) {
12243			(void) sprintf(bp++, "\n");
12244		}
12245
12246		(void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12247		bp += 9;
12248	}
12249
12250	(void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12251	bp += strlen(bp);
12252	for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12253		if (cnt % 8 == 0) {
12254			(void) sprintf(bp++, "\n");
12255		}
12256
12257		(void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12258		bp += 9;
12259	}
12260
12261	(void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12262	bp += strlen(bp);
12263	for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12264		if (cnt % 8 == 0) {
12265			(void) sprintf(bp++, "\n");
12266		}
12267
12268		(void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12269		bp += 9;
12270	}
12271
12272	(void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12273	bp += strlen(bp);
12274	for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12275		if (cnt % 8 == 0) {
12276			(void) sprintf(bp++, "\n");
12277		}
12278
12279		(void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12280		bp += 9;
12281	}
12282
12283	(void) sprintf(bp, "\n\nRISC GP Registers");
12284	bp += strlen(bp);
12285	for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12286		if (cnt % 8 == 0) {
12287			(void) sprintf(bp++, "\n");
12288		}
12289
12290		(void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12291		bp += 9;
12292	}
12293
12294	(void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12295	bp += strlen(bp);
12296	for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12297		if (cnt % 8 == 0) {
12298			(void) sprintf(bp++, "\n");
12299		}
12300
12301		(void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12302		bp += 9;
12303	}
12304
12305	(void) sprintf(bp, "\n\nLMC Registers");
12306	bp += strlen(bp);
12307	for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12308		if (cnt % 8 == 0) {
12309			(void) sprintf(bp++, "\n");
12310		}
12311
12312		(void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12313		bp += 9;
12314	}
12315
12316	(void) sprintf(bp, "\n\nFPM Hardware Registers");
12317	bp += strlen(bp);
12318	for (cnt = 0; cnt < sizeof (fw->fpm_hdw_reg) / 4; cnt++) {
12319		if (cnt % 8 == 0) {
12320			(void) sprintf(bp++, "\n");
12321		}
12322
12323		(void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12324		bp += 9;
12325	}
12326
12327	(void) sprintf(bp, "\n\nFB Hardware Registers");
12328	bp += strlen(bp);
12329	for (cnt = 0; cnt < sizeof (fw->fb_hdw_reg) / 4; cnt++) {
12330		if (cnt % 8 == 0) {
12331			(void) sprintf(bp++, "\n");
12332		}
12333
12334		(void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12335		bp += 9;
12336	}
12337
12338	(void) sprintf(bp, "\n\nCode RAM");
12339	bp += strlen(bp);
12340	for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12341		if (cnt % 8 == 0) {
12342			(void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
12343			bp += 11;
12344		}
12345
12346		(void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
12347		bp += 9;
12348	}
12349
12350	(void) sprintf(bp, "\n\nExternal Memory");
12351	bp += strlen(bp);
12352	for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
12353		if (cnt % 8 == 0) {
12354			(void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
12355			bp += 11;
12356		}
12357		(void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
12358		bp += 9;
12359	}
12360
12361	(void) sprintf(bp, "\n[<==END] ISP Debug Dump");
12362	bp += strlen(bp);
12363
12364	(void) sprintf(bp, "\n\nRequest Queue");
12365	bp += strlen(bp);
12366	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12367		if (cnt % 8 == 0) {
12368			(void) sprintf(bp, "\n%08x: ", cnt);
12369			bp += strlen(bp);
12370		}
12371		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12372		bp += strlen(bp);
12373	}
12374
12375	(void) sprintf(bp, "\n\nResponse Queue");
12376	bp += strlen(bp);
12377	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12378		if (cnt % 8 == 0) {
12379			(void) sprintf(bp, "\n%08x: ", cnt);
12380			bp += strlen(bp);
12381		}
12382		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12383		bp += strlen(bp);
12384	}
12385
12386	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
12387	    (ha->fwexttracebuf.bp != NULL)) {
12388		uint32_t cnt_b = 0;
12389		uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
12390
12391		(void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
12392		bp += strlen(bp);
12393		/* show data address as a byte address, data as long words */
12394		for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
12395			cnt_b = cnt * 4;
12396			if (cnt_b % 32 == 0) {
12397				(void) sprintf(bp, "\n%08x: ",
12398				    (int)(w64 + cnt_b));
12399				bp += 11;
12400			}
12401			(void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
12402			bp += 9;
12403		}
12404	}
12405
12406	if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
12407	    (ha->fwfcetracebuf.bp != NULL)) {
12408		uint32_t cnt_b = 0;
12409		uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
12410
12411		(void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
12412		bp += strlen(bp);
12413		/* show data address as a byte address, data as long words */
12414		for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
12415			cnt_b = cnt * 4;
12416			if (cnt_b % 32 == 0) {
12417				(void) sprintf(bp, "\n%08x: ",
12418				    (int)(w64 + cnt_b));
12419				bp += 11;
12420			}
12421			(void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
12422			bp += 9;
12423		}
12424	}
12425
12426	(void) sprintf(bp, "\n\n");
12427	bp += strlen(bp);
12428
12429	cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
12430
12431	QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
12432
12433	return (cnt);
12434}
12435
12436/*
12437 * ql_25xx_ascii_fw_dump
12438 *	Converts ISP25xx firmware binary dump to ascii.
12439 *
12440 * Input:
12441 *	ha = adapter state pointer.
12442 *	bptr = buffer pointer.
12443 *
12444 * Returns:
12445 *	Amount of data buffer used.
12446 *
12447 * Context:
12448 *	Kernel context.
12449 */
12450static size_t
12451ql_25xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12452{
12453	uint32_t		cnt;
12454	caddr_t			bp = bufp;
12455	ql_25xx_fw_dump_t	*fw = ha->ql_dump_ptr;
12456
12457	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12458
12459	(void) sprintf(bp, "\nISP FW Version %d.%02d.%02d Attributes %X\n",
12460	    ha->fw_major_version, ha->fw_minor_version,
12461	    ha->fw_subminor_version, ha->fw_attributes);
12462	bp += strlen(bp);
12463
12464	(void) sprintf(bp, "\nR2H Status Register\n%08x\n", fw->r2h_status);
12465	bp += strlen(bp);
12466
12467	(void) sprintf(bp, "\nHostRisc Registers");
12468	bp += strlen(bp);
12469	for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
12470		if (cnt % 8 == 0) {
12471			(void) sprintf(bp++, "\n");
12472		}
12473		(void) sprintf(bp, "%08x ", fw->hostrisc_reg[cnt]);
12474		bp += 9;
12475	}
12476
12477	(void) sprintf(bp, "\n\nPCIe Registers");
12478	bp += strlen(bp);
12479	for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
12480		if (cnt % 8 == 0) {
12481			(void) sprintf(bp++, "\n");
12482		}
12483		(void) sprintf(bp, "%08x ", fw->pcie_reg[cnt]);
12484		bp += 9;
12485	}
12486
12487	(void) strcat(bp, "\n\nHost Interface Registers");
12488	bp += strlen(bp);
12489	for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12490		if (cnt % 8 == 0) {
12491			(void) sprintf(bp++, "\n");
12492		}
12493		(void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12494		bp += 9;
12495	}
12496
12497	(void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12498	bp += strlen(bp);
12499	for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12500		if (cnt % 8 == 0) {
12501			(void) sprintf(bp++, "\n");
12502		}
12503		(void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12504		bp += 9;
12505	}
12506
12507	(void) sprintf(bufp + strlen(bufp), "\n\nRISC IO Register\n%08x",
12508	    fw->risc_io);
12509	bp += strlen(bp);
12510
12511	(void) sprintf(bp, "\n\nMailbox Registers");
12512	bp += strlen(bp);
12513	for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12514		if (cnt % 16 == 0) {
12515			(void) sprintf(bp++, "\n");
12516		}
12517		(void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12518		bp += 5;
12519	}
12520
12521	(void) sprintf(bp, "\n\nXSEQ GP Registers");
12522	bp += strlen(bp);
12523	for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12524		if (cnt % 8 == 0) {
12525			(void) sprintf(bp++, "\n");
12526		}
12527		(void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12528		bp += 9;
12529	}
12530
12531	(void) sprintf(bp, "\n\nXSEQ-0 Registers");
12532	bp += strlen(bp);
12533	for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12534		if (cnt % 8 == 0) {
12535			(void) sprintf(bp++, "\n");
12536		}
12537		(void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12538		bp += 9;
12539	}
12540
12541	(void) sprintf(bp, "\n\nXSEQ-1 Registers");
12542	bp += strlen(bp);
12543	for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12544		if (cnt % 8 == 0) {
12545			(void) sprintf(bp++, "\n");
12546		}
12547		(void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12548		bp += 9;
12549	}
12550
12551	(void) sprintf(bp, "\n\nRSEQ GP Registers");
12552	bp += strlen(bp);
12553	for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12554		if (cnt % 8 == 0) {
12555			(void) sprintf(bp++, "\n");
12556		}
12557		(void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12558		bp += 9;
12559	}
12560
12561	(void) sprintf(bp, "\n\nRSEQ-0 Registers");
12562	bp += strlen(bp);
12563	for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12564		if (cnt % 8 == 0) {
12565			(void) sprintf(bp++, "\n");
12566		}
12567		(void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12568		bp += 9;
12569	}
12570
12571	(void) sprintf(bp, "\n\nRSEQ-1 Registers");
12572	bp += strlen(bp);
12573	for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12574		if (cnt % 8 == 0) {
12575			(void) sprintf(bp++, "\n");
12576		}
12577		(void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12578		bp += 9;
12579	}
12580
12581	(void) sprintf(bp, "\n\nRSEQ-2 Registers");
12582	bp += strlen(bp);
12583	for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12584		if (cnt % 8 == 0) {
12585			(void) sprintf(bp++, "\n");
12586		}
12587		(void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12588		bp += 9;
12589	}
12590
12591	(void) sprintf(bp, "\n\nASEQ GP Registers");
12592	bp += strlen(bp);
12593	for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
12594		if (cnt % 8 == 0) {
12595			(void) sprintf(bp++, "\n");
12596		}
12597		(void) sprintf(bp, "%08x ", fw->aseq_gp_reg[cnt]);
12598		bp += 9;
12599	}
12600
12601	(void) sprintf(bp, "\n\nASEQ-0 GP Registers");
12602	bp += strlen(bp);
12603	for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
12604		if (cnt % 8 == 0) {
12605			(void) sprintf(bp++, "\n");
12606		}
12607		(void) sprintf(bp, "%08x ", fw->aseq_0_reg[cnt]);
12608		bp += 9;
12609	}
12610
12611	(void) sprintf(bp, "\n\nASEQ-1 GP Registers");
12612	bp += strlen(bp);
12613	for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
12614		if (cnt % 8 == 0) {
12615			(void) sprintf(bp++, "\n");
12616		}
12617		(void) sprintf(bp, "%08x ", fw->aseq_1_reg[cnt]);
12618		bp += 9;
12619	}
12620
12621	(void) sprintf(bp, "\n\nASEQ-2 GP Registers");
12622	bp += strlen(bp);
12623	for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
12624		if (cnt % 8 == 0) {
12625			(void) sprintf(bp++, "\n");
12626		}
12627		(void) sprintf(bp, "%08x ", fw->aseq_2_reg[cnt]);
12628		bp += 9;
12629	}
12630
12631	(void) sprintf(bp, "\n\nCommand DMA Registers");
12632	bp += strlen(bp);
12633	for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12634		if (cnt % 8 == 0) {
12635			(void) sprintf(bp++, "\n");
12636		}
12637		(void)  sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12638		bp += 9;
12639	}
12640
12641	(void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12642	bp += strlen(bp);
12643	for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12644		if (cnt % 8 == 0) {
12645			(void) sprintf(bp++, "\n");
12646		}
12647		(void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12648		bp += 9;
12649	}
12650
12651	(void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12652	bp += strlen(bp);
12653	for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12654		if (cnt % 8 == 0) {
12655			(void) sprintf(bp++, "\n");
12656		}
12657		(void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12658		bp += 9;
12659	}
12660
12661	(void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12662	bp += strlen(bp);
12663	for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12664		if (cnt % 8 == 0) {
12665			(void) sprintf(bp++, "\n");
12666		}
12667		(void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12668		bp += 9;
12669	}
12670
12671	(void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12672	bp += strlen(bp);
12673	for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12674		if (cnt % 8 == 0) {
12675			(void) sprintf(bp++, "\n");
12676		}
12677		(void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12678		bp += 9;
12679	}
12680
12681	(void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12682	bp += strlen(bp);
12683	for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12684		if (cnt % 8 == 0) {
12685			(void) sprintf(bp++, "\n");
12686		}
12687		(void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12688		bp += 9;
12689	}
12690
12691	(void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12692	bp += strlen(bp);
12693	for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12694		if (cnt % 8 == 0) {
12695			(void) sprintf(bp++, "\n");
12696		}
12697		(void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12698		bp += 9;
12699	}
12700
12701	(void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12702	bp += strlen(bp);
12703	for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12704		if (cnt % 8 == 0) {
12705			(void) sprintf(bp++, "\n");
12706		}
12707		(void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12708		bp += 9;
12709	}
12710
12711	(void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12712	bp += strlen(bp);
12713	for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12714		if (cnt % 8 == 0) {
12715			(void) sprintf(bp++, "\n");
12716		}
12717		(void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12718		bp += 9;
12719	}
12720
12721	(void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12722	bp += strlen(bp);
12723	for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12724		if (cnt % 8 == 0) {
12725			(void) sprintf(bp++, "\n");
12726		}
12727		(void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12728		bp += 9;
12729	}
12730
12731	(void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12732	bp += strlen(bp);
12733	for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12734		if (cnt % 8 == 0) {
12735			(void) sprintf(bp++, "\n");
12736		}
12737		(void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12738		bp += 9;
12739	}
12740
12741	(void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12742	bp += strlen(bp);
12743	for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12744		if (cnt % 8 == 0) {
12745			(void) sprintf(bp++, "\n");
12746		}
12747		(void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12748		bp += 9;
12749	}
12750
12751	(void) sprintf(bp, "\n\nRISC GP Registers");
12752	bp += strlen(bp);
12753	for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12754		if (cnt % 8 == 0) {
12755			(void) sprintf(bp++, "\n");
12756		}
12757		(void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12758		bp += 9;
12759	}
12760
12761	(void) sprintf(bp, "\n\nLMC Registers");
12762	bp += strlen(bp);
12763	for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12764		if (cnt % 8 == 0) {
12765			(void) sprintf(bp++, "\n");
12766		}
12767		(void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12768		bp += 9;
12769	}
12770
12771	(void) sprintf(bp, "\n\nFPM Hardware Registers");
12772	bp += strlen(bp);
12773	for (cnt = 0; cnt < sizeof (fw->fpm_hdw_reg) / 4; cnt++) {
12774		if (cnt % 8 == 0) {
12775			(void) sprintf(bp++, "\n");
12776		}
12777		(void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12778		bp += 9;
12779	}
12780
12781	(void) sprintf(bp, "\n\nFB Hardware Registers");
12782	bp += strlen(bp);
12783	for (cnt = 0; cnt < sizeof (fw->fb_hdw_reg) / 4; cnt++) {
12784		if (cnt % 8 == 0) {
12785			(void) sprintf(bp++, "\n");
12786		}
12787		(void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12788		bp += 9;
12789	}
12790
12791	(void) sprintf(bp, "\n\nCode RAM");
12792	bp += strlen(bp);
12793	for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12794		if (cnt % 8 == 0) {
12795			(void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
12796			bp += 11;
12797		}
12798		(void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
12799		bp += 9;
12800	}
12801
12802	(void) sprintf(bp, "\n\nExternal Memory");
12803	bp += strlen(bp);
12804	for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
12805		if (cnt % 8 == 0) {
12806			(void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
12807			bp += 11;
12808		}
12809		(void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
12810		bp += 9;
12811	}
12812
12813	(void) sprintf(bp, "\n[<==END] ISP Debug Dump");
12814	bp += strlen(bp);
12815
12816	(void) sprintf(bp, "\n\nRequest Queue");
12817	bp += strlen(bp);
12818	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12819		if (cnt % 8 == 0) {
12820			(void) sprintf(bp, "\n%08x: ", cnt);
12821			bp += strlen(bp);
12822		}
12823		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12824		bp += strlen(bp);
12825	}
12826
12827	(void) sprintf(bp, "\n\nResponse Queue");
12828	bp += strlen(bp);
12829	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12830		if (cnt % 8 == 0) {
12831			(void) sprintf(bp, "\n%08x: ", cnt);
12832			bp += strlen(bp);
12833		}
12834		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12835		bp += strlen(bp);
12836	}
12837
12838	if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
12839	    (ha->fwexttracebuf.bp != NULL)) {
12840		uint32_t cnt_b = 0;
12841		uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
12842
12843		(void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
12844		bp += strlen(bp);
12845		/* show data address as a byte address, data as long words */
12846		for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
12847			cnt_b = cnt * 4;
12848			if (cnt_b % 32 == 0) {
12849				(void) sprintf(bp, "\n%08x: ",
12850				    (int)(w64 + cnt_b));
12851				bp += 11;
12852			}
12853			(void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
12854			bp += 9;
12855		}
12856	}
12857
12858	if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
12859	    (ha->fwfcetracebuf.bp != NULL)) {
12860		uint32_t cnt_b = 0;
12861		uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
12862
12863		(void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
12864		bp += strlen(bp);
12865		/* show data address as a byte address, data as long words */
12866		for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
12867			cnt_b = cnt * 4;
12868			if (cnt_b % 32 == 0) {
12869				(void) sprintf(bp, "\n%08x: ",
12870				    (int)(w64 + cnt_b));
12871				bp += 11;
12872			}
12873			(void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
12874			bp += 9;
12875		}
12876	}
12877
12878	(void) sprintf(bp, "\n\n");
12879	bp += strlen(bp);
12880
12881	cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
12882
12883	QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
12884
12885	return (cnt);
12886}
12887
12888/*
12889 * ql_2200_binary_fw_dump
12890 *
12891 * Input:
12892 *	ha:	adapter state pointer.
12893 *	fw:	firmware dump context pointer.
12894 *
12895 * Returns:
12896 *	ql local function return status code.
12897 *
12898 * Context:
12899 *	Interrupt or Kernel context, no mailbox commands allowed.
12900 */
12901static int
12902ql_2200_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
12903{
12904	uint32_t	cnt;
12905	uint16_t	risc_address;
12906	clock_t		timer;
12907	mbx_cmd_t	mc;
12908	mbx_cmd_t	*mcp = &mc;
12909	int		rval = QL_SUCCESS;
12910
12911	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12912
12913	/* Disable ISP interrupts. */
12914	WRT16_IO_REG(ha, ictrl, 0);
12915	ADAPTER_STATE_LOCK(ha);
12916	ha->flags &= ~INTERRUPTS_ENABLED;
12917	ADAPTER_STATE_UNLOCK(ha);
12918
12919	/* Release mailbox registers. */
12920	WRT16_IO_REG(ha, semaphore, 0);
12921
12922	/* Pause RISC. */
12923	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
12924	timer = 30000;
12925	while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
12926		if (timer-- != 0) {
12927			drv_usecwait(MILLISEC);
12928		} else {
12929			rval = QL_FUNCTION_TIMEOUT;
12930			break;
12931		}
12932	}
12933
12934	if (rval == QL_SUCCESS) {
12935		(void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
12936		    sizeof (fw->pbiu_reg) / 2, 16);
12937
12938		/* In 2200 we only read 8 mailboxes */
12939		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x10,
12940		    8, 16);
12941
12942		(void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x20,
12943		    sizeof (fw->dma_reg) / 2, 16);
12944
12945		WRT16_IO_REG(ha, ctrl_status, 0);
12946		(void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
12947		    sizeof (fw->risc_hdw_reg) / 2, 16);
12948
12949		WRT16_IO_REG(ha, pcr, 0x2000);
12950		(void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
12951		    sizeof (fw->risc_gp0_reg) / 2, 16);
12952
12953		WRT16_IO_REG(ha, pcr, 0x2100);
12954		(void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
12955		    sizeof (fw->risc_gp1_reg) / 2, 16);
12956
12957		WRT16_IO_REG(ha, pcr, 0x2200);
12958		(void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
12959		    sizeof (fw->risc_gp2_reg) / 2, 16);
12960
12961		WRT16_IO_REG(ha, pcr, 0x2300);
12962		(void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
12963		    sizeof (fw->risc_gp3_reg) / 2, 16);
12964
12965		WRT16_IO_REG(ha, pcr, 0x2400);
12966		(void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
12967		    sizeof (fw->risc_gp4_reg) / 2, 16);
12968
12969		WRT16_IO_REG(ha, pcr, 0x2500);
12970		(void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
12971		    sizeof (fw->risc_gp5_reg) / 2, 16);
12972
12973		WRT16_IO_REG(ha, pcr, 0x2600);
12974		(void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
12975		    sizeof (fw->risc_gp6_reg) / 2, 16);
12976
12977		WRT16_IO_REG(ha, pcr, 0x2700);
12978		(void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
12979		    sizeof (fw->risc_gp7_reg) / 2, 16);
12980
12981		WRT16_IO_REG(ha, ctrl_status, 0x10);
12982		/* 2200 has only 16 registers */
12983		(void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
12984		    ha->iobase + 0x80, 16, 16);
12985
12986		WRT16_IO_REG(ha, ctrl_status, 0x20);
12987		(void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
12988		    sizeof (fw->fpm_b0_reg) / 2, 16);
12989
12990		WRT16_IO_REG(ha, ctrl_status, 0x30);
12991		(void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
12992		    sizeof (fw->fpm_b1_reg) / 2, 16);
12993
12994		/* Select FPM registers. */
12995		WRT16_IO_REG(ha, ctrl_status, 0x20);
12996
12997		/* FPM Soft Reset. */
12998		WRT16_IO_REG(ha, fpm_diag_config, 0x100);
12999
13000		/* Select frame buffer registers. */
13001		WRT16_IO_REG(ha, ctrl_status, 0x10);
13002
13003		/* Reset frame buffer FIFOs. */
13004		WRT16_IO_REG(ha, fb_cmd, 0xa000);
13005
13006		/* Select RISC module registers. */
13007		WRT16_IO_REG(ha, ctrl_status, 0);
13008
13009		/* Reset RISC module. */
13010		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13011
13012		/* Reset ISP semaphore. */
13013		WRT16_IO_REG(ha, semaphore, 0);
13014
13015		/* Release RISC module. */
13016		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13017
13018		/* Wait for RISC to recover from reset. */
13019		timer = 30000;
13020		while (RD16_IO_REG(ha, mailbox[0]) == MBS_BUSY) {
13021			if (timer-- != 0) {
13022				drv_usecwait(MILLISEC);
13023			} else {
13024				rval = QL_FUNCTION_TIMEOUT;
13025				break;
13026			}
13027		}
13028
13029		/* Disable RISC pause on FPM parity error. */
13030		WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13031	}
13032
13033	if (rval == QL_SUCCESS) {
13034		/* Pause RISC. */
13035		WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13036		timer = 30000;
13037		while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13038			if (timer-- != 0) {
13039				drv_usecwait(MILLISEC);
13040			} else {
13041				rval = QL_FUNCTION_TIMEOUT;
13042				break;
13043			}
13044		}
13045	}
13046
13047	if (rval == QL_SUCCESS) {
13048		/* Set memory configuration and timing. */
13049		WRT16_IO_REG(ha, mctr, 0xf2);
13050
13051		/* Release RISC. */
13052		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13053
13054		/* Get RISC SRAM. */
13055		risc_address = 0x1000;
13056		WRT16_IO_REG(ha, mailbox[0], MBC_READ_RAM_WORD);
13057		for (cnt = 0; cnt < 0xf000; cnt++) {
13058			WRT16_IO_REG(ha, mailbox[1], risc_address++);
13059			WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
13060			for (timer = 6000000; timer != 0; timer--) {
13061				/* Check for pending interrupts. */
13062				if (RD16_IO_REG(ha, istatus) & RISC_INT) {
13063					if (RD16_IO_REG(ha, semaphore) &
13064					    BIT_0) {
13065						WRT16_IO_REG(ha, hccr,
13066						    HC_CLR_RISC_INT);
13067						mcp->mb[0] = RD16_IO_REG(ha,
13068						    mailbox[0]);
13069						fw->risc_ram[cnt] =
13070						    RD16_IO_REG(ha,
13071						    mailbox[2]);
13072						WRT16_IO_REG(ha,
13073						    semaphore, 0);
13074						break;
13075					}
13076					WRT16_IO_REG(ha, hccr,
13077					    HC_CLR_RISC_INT);
13078				}
13079				drv_usecwait(5);
13080			}
13081
13082			if (timer == 0) {
13083				rval = QL_FUNCTION_TIMEOUT;
13084			} else {
13085				rval = mcp->mb[0];
13086			}
13087
13088			if (rval != QL_SUCCESS) {
13089				break;
13090			}
13091		}
13092	}
13093
13094	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13095
13096	return (rval);
13097}
13098
13099/*
13100 * ql_2300_binary_fw_dump
13101 *
13102 * Input:
13103 *	ha:	adapter state pointer.
13104 *	fw:	firmware dump context pointer.
13105 *
13106 * Returns:
13107 *	ql local function return status code.
13108 *
13109 * Context:
13110 *	Interrupt or Kernel context, no mailbox commands allowed.
13111 */
13112static int
13113ql_2300_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13114{
13115	clock_t	timer;
13116	int	rval = QL_SUCCESS;
13117
13118	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13119
13120	/* Disable ISP interrupts. */
13121	WRT16_IO_REG(ha, ictrl, 0);
13122	ADAPTER_STATE_LOCK(ha);
13123	ha->flags &= ~INTERRUPTS_ENABLED;
13124	ADAPTER_STATE_UNLOCK(ha);
13125
13126	/* Release mailbox registers. */
13127	WRT16_IO_REG(ha, semaphore, 0);
13128
13129	/* Pause RISC. */
13130	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13131	timer = 30000;
13132	while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13133		if (timer-- != 0) {
13134			drv_usecwait(MILLISEC);
13135		} else {
13136			rval = QL_FUNCTION_TIMEOUT;
13137			break;
13138		}
13139	}
13140
13141	if (rval == QL_SUCCESS) {
13142		(void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13143		    sizeof (fw->pbiu_reg) / 2, 16);
13144
13145		(void) ql_read_regs(ha, fw->risc_host_reg, ha->iobase + 0x10,
13146		    sizeof (fw->risc_host_reg) / 2, 16);
13147
13148		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x40,
13149		    sizeof (fw->mailbox_reg) / 2, 16);
13150
13151		WRT16_IO_REG(ha, ctrl_status, 0x40);
13152		(void) ql_read_regs(ha, fw->resp_dma_reg, ha->iobase + 0x80,
13153		    sizeof (fw->resp_dma_reg) / 2, 16);
13154
13155		WRT16_IO_REG(ha, ctrl_status, 0x50);
13156		(void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x80,
13157		    sizeof (fw->dma_reg) / 2, 16);
13158
13159		WRT16_IO_REG(ha, ctrl_status, 0);
13160		(void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
13161		    sizeof (fw->risc_hdw_reg) / 2, 16);
13162
13163		WRT16_IO_REG(ha, pcr, 0x2000);
13164		(void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
13165		    sizeof (fw->risc_gp0_reg) / 2, 16);
13166
13167		WRT16_IO_REG(ha, pcr, 0x2200);
13168		(void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
13169		    sizeof (fw->risc_gp1_reg) / 2, 16);
13170
13171		WRT16_IO_REG(ha, pcr, 0x2400);
13172		(void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
13173		    sizeof (fw->risc_gp2_reg) / 2, 16);
13174
13175		WRT16_IO_REG(ha, pcr, 0x2600);
13176		(void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
13177		    sizeof (fw->risc_gp3_reg) / 2, 16);
13178
13179		WRT16_IO_REG(ha, pcr, 0x2800);
13180		(void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
13181		    sizeof (fw->risc_gp4_reg) / 2, 16);
13182
13183		WRT16_IO_REG(ha, pcr, 0x2A00);
13184		(void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
13185		    sizeof (fw->risc_gp5_reg) / 2, 16);
13186
13187		WRT16_IO_REG(ha, pcr, 0x2C00);
13188		(void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
13189		    sizeof (fw->risc_gp6_reg) / 2, 16);
13190
13191		WRT16_IO_REG(ha, pcr, 0x2E00);
13192		(void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
13193		    sizeof (fw->risc_gp7_reg) / 2, 16);
13194
13195		WRT16_IO_REG(ha, ctrl_status, 0x10);
13196		(void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
13197		    ha->iobase + 0x80, sizeof (fw->frame_buf_hdw_reg) / 2, 16);
13198
13199		WRT16_IO_REG(ha, ctrl_status, 0x20);
13200		(void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
13201		    sizeof (fw->fpm_b0_reg) / 2, 16);
13202
13203		WRT16_IO_REG(ha, ctrl_status, 0x30);
13204		(void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
13205		    sizeof (fw->fpm_b1_reg) / 2, 16);
13206
13207		/* Select FPM registers. */
13208		WRT16_IO_REG(ha, ctrl_status, 0x20);
13209
13210		/* FPM Soft Reset. */
13211		WRT16_IO_REG(ha, fpm_diag_config, 0x100);
13212
13213		/* Select frame buffer registers. */
13214		WRT16_IO_REG(ha, ctrl_status, 0x10);
13215
13216		/* Reset frame buffer FIFOs. */
13217		WRT16_IO_REG(ha, fb_cmd, 0xa000);
13218
13219		/* Select RISC module registers. */
13220		WRT16_IO_REG(ha, ctrl_status, 0);
13221
13222		/* Reset RISC module. */
13223		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13224
13225		/* Reset ISP semaphore. */
13226		WRT16_IO_REG(ha, semaphore, 0);
13227
13228		/* Release RISC module. */
13229		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13230
13231		/* Wait for RISC to recover from reset. */
13232		timer = 30000;
13233		while (RD16_IO_REG(ha, mailbox[0]) == MBS_BUSY) {
13234			if (timer-- != 0) {
13235				drv_usecwait(MILLISEC);
13236			} else {
13237				rval = QL_FUNCTION_TIMEOUT;
13238				break;
13239			}
13240		}
13241
13242		/* Disable RISC pause on FPM parity error. */
13243		WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13244	}
13245
13246	/* Get RISC SRAM. */
13247	if (rval == QL_SUCCESS) {
13248		rval = ql_read_risc_ram(ha, 0x800, 0xf800, fw->risc_ram);
13249	}
13250	/* Get STACK SRAM. */
13251	if (rval == QL_SUCCESS) {
13252		rval = ql_read_risc_ram(ha, 0x10000, 0x800, fw->stack_ram);
13253	}
13254	/* Get DATA SRAM. */
13255	if (rval == QL_SUCCESS) {
13256		rval = ql_read_risc_ram(ha, 0x10800, 0xf800, fw->data_ram);
13257	}
13258
13259	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13260
13261	return (rval);
13262}
13263
13264/*
13265 * ql_24xx_binary_fw_dump
13266 *
13267 * Input:
13268 *	ha:	adapter state pointer.
13269 *	fw:	firmware dump context pointer.
13270 *
13271 * Returns:
13272 *	ql local function return status code.
13273 *
13274 * Context:
13275 *	Interrupt or Kernel context, no mailbox commands allowed.
13276 */
13277static int
13278ql_24xx_binary_fw_dump(ql_adapter_state_t *ha, ql_24xx_fw_dump_t *fw)
13279{
13280	uint32_t	*reg32;
13281	void		*bp;
13282	clock_t		timer;
13283	int		rval = QL_SUCCESS;
13284
13285	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13286
13287	fw->hccr = RD32_IO_REG(ha, hccr);
13288
13289	/* Pause RISC. */
13290	if ((RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0) {
13291		/* Disable ISP interrupts. */
13292		WRT16_IO_REG(ha, ictrl, 0);
13293
13294		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13295		for (timer = 30000;
13296		    (RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0 &&
13297		    rval == QL_SUCCESS; timer--) {
13298			if (timer) {
13299				drv_usecwait(100);
13300			} else {
13301				rval = QL_FUNCTION_TIMEOUT;
13302			}
13303		}
13304	}
13305
13306	if (rval == QL_SUCCESS) {
13307		/* Host interface registers. */
13308		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13309		    sizeof (fw->host_reg) / 4, 32);
13310
13311		/* Disable ISP interrupts. */
13312		WRT32_IO_REG(ha, ictrl, 0);
13313		RD32_IO_REG(ha, ictrl);
13314		ADAPTER_STATE_LOCK(ha);
13315		ha->flags &= ~INTERRUPTS_ENABLED;
13316		ADAPTER_STATE_UNLOCK(ha);
13317
13318		/* Shadow registers. */
13319
13320		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13321		RD32_IO_REG(ha, io_base_addr);
13322
13323		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13324		WRT_REG_DWORD(ha, reg32, 0xB0000000);
13325		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13326		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13327
13328		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13329		WRT_REG_DWORD(ha, reg32, 0xB0100000);
13330		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13331		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13332
13333		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13334		WRT_REG_DWORD(ha, reg32, 0xB0200000);
13335		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13336		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13337
13338		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13339		WRT_REG_DWORD(ha, reg32, 0xB0300000);
13340		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13341		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13342
13343		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13344		WRT_REG_DWORD(ha, reg32, 0xB0400000);
13345		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13346		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13347
13348		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13349		WRT_REG_DWORD(ha, reg32, 0xB0500000);
13350		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13351		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13352
13353		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13354		WRT_REG_DWORD(ha, reg32, 0xB0600000);
13355		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13356		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13357
13358		/* Mailbox registers. */
13359		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
13360		    sizeof (fw->mailbox_reg) / 2, 16);
13361
13362		/* Transfer sequence registers. */
13363
13364		/* XSEQ GP */
13365		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
13366		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
13367		    16, 32);
13368		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
13369		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13370		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
13371		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13372		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
13373		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13374		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
13375		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13376		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
13377		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13378		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
13379		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13380		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
13381		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13382
13383		/* XSEQ-0 */
13384		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
13385		(void) ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
13386		    sizeof (fw->xseq_0_reg) / 4, 32);
13387
13388		/* XSEQ-1 */
13389		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
13390		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
13391		    sizeof (fw->xseq_1_reg) / 4, 32);
13392
13393		/* Receive sequence registers. */
13394
13395		/* RSEQ GP */
13396		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
13397		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
13398		    16, 32);
13399		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
13400		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13401		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
13402		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13403		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
13404		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13405		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
13406		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13407		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
13408		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13409		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
13410		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13411		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
13412		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13413
13414		/* RSEQ-0 */
13415		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
13416		(void) ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
13417		    sizeof (fw->rseq_0_reg) / 4, 32);
13418
13419		/* RSEQ-1 */
13420		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
13421		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
13422		    sizeof (fw->rseq_1_reg) / 4, 32);
13423
13424		/* RSEQ-2 */
13425		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
13426		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
13427		    sizeof (fw->rseq_2_reg) / 4, 32);
13428
13429		/* Command DMA registers. */
13430
13431		WRT32_IO_REG(ha, io_base_addr, 0x7100);
13432		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
13433		    sizeof (fw->cmd_dma_reg) / 4, 32);
13434
13435		/* Queues. */
13436
13437		/* RequestQ0 */
13438		WRT32_IO_REG(ha, io_base_addr, 0x7200);
13439		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
13440		    8, 32);
13441		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13442
13443		/* ResponseQ0 */
13444		WRT32_IO_REG(ha, io_base_addr, 0x7300);
13445		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
13446		    8, 32);
13447		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13448
13449		/* RequestQ1 */
13450		WRT32_IO_REG(ha, io_base_addr, 0x7400);
13451		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
13452		    8, 32);
13453		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13454
13455		/* Transmit DMA registers. */
13456
13457		/* XMT0 */
13458		WRT32_IO_REG(ha, io_base_addr, 0x7600);
13459		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
13460		    16, 32);
13461		WRT32_IO_REG(ha, io_base_addr, 0x7610);
13462		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13463
13464		/* XMT1 */
13465		WRT32_IO_REG(ha, io_base_addr, 0x7620);
13466		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
13467		    16, 32);
13468		WRT32_IO_REG(ha, io_base_addr, 0x7630);
13469		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13470
13471		/* XMT2 */
13472		WRT32_IO_REG(ha, io_base_addr, 0x7640);
13473		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
13474		    16, 32);
13475		WRT32_IO_REG(ha, io_base_addr, 0x7650);
13476		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13477
13478		/* XMT3 */
13479		WRT32_IO_REG(ha, io_base_addr, 0x7660);
13480		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
13481		    16, 32);
13482		WRT32_IO_REG(ha, io_base_addr, 0x7670);
13483		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13484
13485		/* XMT4 */
13486		WRT32_IO_REG(ha, io_base_addr, 0x7680);
13487		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
13488		    16, 32);
13489		WRT32_IO_REG(ha, io_base_addr, 0x7690);
13490		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13491
13492		/* XMT Common */
13493		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
13494		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
13495		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
13496
13497		/* Receive DMA registers. */
13498
13499		/* RCVThread0 */
13500		WRT32_IO_REG(ha, io_base_addr, 0x7700);
13501		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
13502		    ha->iobase + 0xC0, 16, 32);
13503		WRT32_IO_REG(ha, io_base_addr, 0x7710);
13504		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13505
13506		/* RCVThread1 */
13507		WRT32_IO_REG(ha, io_base_addr, 0x7720);
13508		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
13509		    ha->iobase + 0xC0, 16, 32);
13510		WRT32_IO_REG(ha, io_base_addr, 0x7730);
13511		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13512
13513		/* RISC registers. */
13514
13515		/* RISC GP */
13516		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
13517		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
13518		    16, 32);
13519		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
13520		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13521		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
13522		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13523		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
13524		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13525		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
13526		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13527		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
13528		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13529		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
13530		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13531		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13532		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13533
13534		/* Local memory controller registers. */
13535
13536		/* LMC */
13537		WRT32_IO_REG(ha, io_base_addr, 0x3000);
13538		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
13539		    16, 32);
13540		WRT32_IO_REG(ha, io_base_addr, 0x3010);
13541		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13542		WRT32_IO_REG(ha, io_base_addr, 0x3020);
13543		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13544		WRT32_IO_REG(ha, io_base_addr, 0x3030);
13545		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13546		WRT32_IO_REG(ha, io_base_addr, 0x3040);
13547		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13548		WRT32_IO_REG(ha, io_base_addr, 0x3050);
13549		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13550		WRT32_IO_REG(ha, io_base_addr, 0x3060);
13551		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13552
13553		/* Fibre Protocol Module registers. */
13554
13555		/* FPM hardware */
13556		WRT32_IO_REG(ha, io_base_addr, 0x4000);
13557		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
13558		    16, 32);
13559		WRT32_IO_REG(ha, io_base_addr, 0x4010);
13560		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13561		WRT32_IO_REG(ha, io_base_addr, 0x4020);
13562		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13563		WRT32_IO_REG(ha, io_base_addr, 0x4030);
13564		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13565		WRT32_IO_REG(ha, io_base_addr, 0x4040);
13566		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13567		WRT32_IO_REG(ha, io_base_addr, 0x4050);
13568		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13569		WRT32_IO_REG(ha, io_base_addr, 0x4060);
13570		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13571		WRT32_IO_REG(ha, io_base_addr, 0x4070);
13572		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13573		WRT32_IO_REG(ha, io_base_addr, 0x4080);
13574		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13575		WRT32_IO_REG(ha, io_base_addr, 0x4090);
13576		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13577		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
13578		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13579		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
13580		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13581
13582		/* Frame Buffer registers. */
13583
13584		/* FB hardware */
13585		WRT32_IO_REG(ha, io_base_addr, 0x6000);
13586		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
13587		    16, 32);
13588		WRT32_IO_REG(ha, io_base_addr, 0x6010);
13589		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13590		WRT32_IO_REG(ha, io_base_addr, 0x6020);
13591		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13592		WRT32_IO_REG(ha, io_base_addr, 0x6030);
13593		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13594		WRT32_IO_REG(ha, io_base_addr, 0x6040);
13595		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13596		WRT32_IO_REG(ha, io_base_addr, 0x6100);
13597		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13598		WRT32_IO_REG(ha, io_base_addr, 0x6130);
13599		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13600		WRT32_IO_REG(ha, io_base_addr, 0x6150);
13601		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13602		WRT32_IO_REG(ha, io_base_addr, 0x6170);
13603		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13604		WRT32_IO_REG(ha, io_base_addr, 0x6190);
13605		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13606		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
13607		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13608	}
13609
13610	/* Get the request queue */
13611	if (rval == QL_SUCCESS) {
13612		uint32_t	cnt;
13613		uint32_t	*w32 = (uint32_t *)ha->request_ring_bp;
13614
13615		/* Sync DMA buffer. */
13616		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
13617		    REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
13618		    DDI_DMA_SYNC_FORKERNEL);
13619
13620		for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
13621			fw->req_q[cnt] = *w32++;
13622			LITTLE_ENDIAN_32(&fw->req_q[cnt]);
13623		}
13624	}
13625
13626	/* Get the response queue */
13627	if (rval == QL_SUCCESS) {
13628		uint32_t	cnt;
13629		uint32_t	*w32 = (uint32_t *)ha->response_ring_bp;
13630
13631		/* Sync DMA buffer. */
13632		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
13633		    RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
13634		    DDI_DMA_SYNC_FORKERNEL);
13635
13636		for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
13637			fw->rsp_q[cnt] = *w32++;
13638			LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
13639		}
13640	}
13641
13642	/* Reset RISC. */
13643	ql_reset_chip(ha);
13644
13645	/* Memory. */
13646	if (rval == QL_SUCCESS) {
13647		/* Code RAM. */
13648		rval = ql_read_risc_ram(ha, 0x20000,
13649		    sizeof (fw->code_ram) / 4, fw->code_ram);
13650	}
13651	if (rval == QL_SUCCESS) {
13652		/* External Memory. */
13653		rval = ql_read_risc_ram(ha, 0x100000,
13654		    ha->fw_ext_memory_size / 4, fw->ext_mem);
13655	}
13656
13657	/* Get the extended trace buffer */
13658	if (rval == QL_SUCCESS) {
13659		if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13660		    (ha->fwexttracebuf.bp != NULL)) {
13661			uint32_t	cnt;
13662			uint32_t	*w32 = ha->fwexttracebuf.bp;
13663
13664			/* Sync DMA buffer. */
13665			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
13666			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
13667
13668			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13669				fw->ext_trace_buf[cnt] = *w32++;
13670			}
13671		}
13672	}
13673
13674	/* Get the FC event trace buffer */
13675	if (rval == QL_SUCCESS) {
13676		if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13677		    (ha->fwfcetracebuf.bp != NULL)) {
13678			uint32_t	cnt;
13679			uint32_t	*w32 = ha->fwfcetracebuf.bp;
13680
13681			/* Sync DMA buffer. */
13682			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
13683			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
13684
13685			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13686				fw->fce_trace_buf[cnt] = *w32++;
13687			}
13688		}
13689	}
13690
13691	if (rval != QL_SUCCESS) {
13692		EL(ha, "failed=%xh\n", rval);
13693	} else {
13694		/*EMPTY*/
13695		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13696	}
13697
13698	return (rval);
13699}
13700
13701/*
13702 * ql_25xx_binary_fw_dump
13703 *
13704 * Input:
13705 *	ha:	adapter state pointer.
13706 *	fw:	firmware dump context pointer.
13707 *
13708 * Returns:
13709 *	ql local function return status code.
13710 *
13711 * Context:
13712 *	Interrupt or Kernel context, no mailbox commands allowed.
13713 */
13714static int
13715ql_25xx_binary_fw_dump(ql_adapter_state_t *ha, ql_25xx_fw_dump_t *fw)
13716{
13717	uint32_t	*reg32;
13718	void		*bp;
13719	clock_t		timer;
13720	int		rval = QL_SUCCESS;
13721
13722	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13723
13724	fw->r2h_status = RD32_IO_REG(ha, intr_info_lo);
13725
13726	/* Pause RISC. */
13727	if ((RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0) {
13728		/* Disable ISP interrupts. */
13729		WRT16_IO_REG(ha, ictrl, 0);
13730
13731		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13732		for (timer = 30000;
13733		    (RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0 &&
13734		    rval == QL_SUCCESS; timer--) {
13735			if (timer) {
13736				drv_usecwait(100);
13737				if (timer % 10000 == 0) {
13738					EL(ha, "risc pause %d\n", timer);
13739				}
13740			} else {
13741				EL(ha, "risc pause timeout\n");
13742				rval = QL_FUNCTION_TIMEOUT;
13743			}
13744		}
13745	}
13746
13747	if (rval == QL_SUCCESS) {
13748
13749		/* Host Interface registers */
13750
13751		/* HostRisc registers. */
13752		WRT32_IO_REG(ha, io_base_addr, 0x7000);
13753		bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
13754		    16, 32);
13755		WRT32_IO_REG(ha, io_base_addr, 0x7010);
13756		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13757
13758		/* PCIe registers. */
13759		WRT32_IO_REG(ha, io_base_addr, 0x7c00);
13760		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
13761		bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
13762		    3, 32);
13763		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
13764		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
13765
13766		/* Host interface registers. */
13767		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13768		    sizeof (fw->host_reg) / 4, 32);
13769
13770		/* Disable ISP interrupts. */
13771
13772		WRT32_IO_REG(ha, ictrl, 0);
13773		RD32_IO_REG(ha, ictrl);
13774		ADAPTER_STATE_LOCK(ha);
13775		ha->flags &= ~INTERRUPTS_ENABLED;
13776		ADAPTER_STATE_UNLOCK(ha);
13777
13778		/* Shadow registers. */
13779
13780		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13781		RD32_IO_REG(ha, io_base_addr);
13782
13783		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13784		WRT_REG_DWORD(ha, reg32, 0xB0000000);
13785		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13786		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13787
13788		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13789		WRT_REG_DWORD(ha, reg32, 0xB0100000);
13790		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13791		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13792
13793		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13794		WRT_REG_DWORD(ha, reg32, 0xB0200000);
13795		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13796		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13797
13798		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13799		WRT_REG_DWORD(ha, reg32, 0xB0300000);
13800		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13801		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13802
13803		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13804		WRT_REG_DWORD(ha, reg32, 0xB0400000);
13805		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13806		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13807
13808		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13809		WRT_REG_DWORD(ha, reg32, 0xB0500000);
13810		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13811		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13812
13813		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13814		WRT_REG_DWORD(ha, reg32, 0xB0600000);
13815		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13816		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13817
13818		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13819		WRT_REG_DWORD(ha, reg32, 0xB0700000);
13820		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13821		fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
13822
13823		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13824		WRT_REG_DWORD(ha, reg32, 0xB0800000);
13825		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13826		fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
13827
13828		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13829		WRT_REG_DWORD(ha, reg32, 0xB0900000);
13830		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13831		fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
13832
13833		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13834		WRT_REG_DWORD(ha, reg32, 0xB0A00000);
13835		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13836		fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
13837
13838		/* RISC I/O register. */
13839
13840		WRT32_IO_REG(ha, io_base_addr, 0x0010);
13841		(void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
13842		    1, 32);
13843
13844		/* Mailbox registers. */
13845
13846		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
13847		    sizeof (fw->mailbox_reg) / 2, 16);
13848
13849		/* Transfer sequence registers. */
13850
13851		/* XSEQ GP */
13852		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
13853		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
13854		    16, 32);
13855		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
13856		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13857		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
13858		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13859		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
13860		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13861		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
13862		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13863		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
13864		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13865		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
13866		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13867		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
13868		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13869
13870		/* XSEQ-0 */
13871		WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
13872		bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
13873		    16, 32);
13874		WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
13875		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13876		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
13877		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13878
13879		/* XSEQ-1 */
13880		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
13881		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
13882		    16, 32);
13883
13884		/* Receive sequence registers. */
13885
13886		/* RSEQ GP */
13887		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
13888		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
13889		    16, 32);
13890		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
13891		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13892		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
13893		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13894		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
13895		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13896		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
13897		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13898		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
13899		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13900		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
13901		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13902		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
13903		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13904
13905		/* RSEQ-0 */
13906		WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
13907		bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
13908		    16, 32);
13909		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
13910		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13911
13912		/* RSEQ-1 */
13913		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
13914		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
13915		    sizeof (fw->rseq_1_reg) / 4, 32);
13916
13917		/* RSEQ-2 */
13918		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
13919		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
13920		    sizeof (fw->rseq_2_reg) / 4, 32);
13921
13922		/* Auxiliary sequencer registers. */
13923
13924		/* ASEQ GP */
13925		WRT32_IO_REG(ha, io_base_addr, 0xB000);
13926		bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
13927		    16, 32);
13928		WRT32_IO_REG(ha, io_base_addr, 0xB010);
13929		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13930		WRT32_IO_REG(ha, io_base_addr, 0xB020);
13931		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13932		WRT32_IO_REG(ha, io_base_addr, 0xB030);
13933		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13934		WRT32_IO_REG(ha, io_base_addr, 0xB040);
13935		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13936		WRT32_IO_REG(ha, io_base_addr, 0xB050);
13937		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13938		WRT32_IO_REG(ha, io_base_addr, 0xB060);
13939		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13940		WRT32_IO_REG(ha, io_base_addr, 0xB070);
13941		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13942
13943		/* ASEQ-0 */
13944		WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
13945		bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
13946		    16, 32);
13947		WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
13948		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13949
13950		/* ASEQ-1 */
13951		WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
13952		(void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
13953		    16, 32);
13954
13955		/* ASEQ-2 */
13956		WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
13957		(void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
13958		    16, 32);
13959
13960		/* Command DMA registers. */
13961
13962		WRT32_IO_REG(ha, io_base_addr, 0x7100);
13963		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
13964		    sizeof (fw->cmd_dma_reg) / 4, 32);
13965
13966		/* Queues. */
13967
13968		/* RequestQ0 */
13969		WRT32_IO_REG(ha, io_base_addr, 0x7200);
13970		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
13971		    8, 32);
13972		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13973
13974		/* ResponseQ0 */
13975		WRT32_IO_REG(ha, io_base_addr, 0x7300);
13976		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
13977		    8, 32);
13978		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13979
13980		/* RequestQ1 */
13981		WRT32_IO_REG(ha, io_base_addr, 0x7400);
13982		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
13983		    8, 32);
13984		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13985
13986		/* Transmit DMA registers. */
13987
13988		/* XMT0 */
13989		WRT32_IO_REG(ha, io_base_addr, 0x7600);
13990		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
13991		    16, 32);
13992		WRT32_IO_REG(ha, io_base_addr, 0x7610);
13993		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13994
13995		/* XMT1 */
13996		WRT32_IO_REG(ha, io_base_addr, 0x7620);
13997		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
13998		    16, 32);
13999		WRT32_IO_REG(ha, io_base_addr, 0x7630);
14000		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14001
14002		/* XMT2 */
14003		WRT32_IO_REG(ha, io_base_addr, 0x7640);
14004		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
14005		    16, 32);
14006		WRT32_IO_REG(ha, io_base_addr, 0x7650);
14007		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14008
14009		/* XMT3 */
14010		WRT32_IO_REG(ha, io_base_addr, 0x7660);
14011		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
14012		    16, 32);
14013		WRT32_IO_REG(ha, io_base_addr, 0x7670);
14014		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14015
14016		/* XMT4 */
14017		WRT32_IO_REG(ha, io_base_addr, 0x7680);
14018		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
14019		    16, 32);
14020		WRT32_IO_REG(ha, io_base_addr, 0x7690);
14021		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14022
14023		/* XMT Common */
14024		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14025		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14026		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14027
14028		/* Receive DMA registers. */
14029
14030		/* RCVThread0 */
14031		WRT32_IO_REG(ha, io_base_addr, 0x7700);
14032		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14033		    ha->iobase + 0xC0, 16, 32);
14034		WRT32_IO_REG(ha, io_base_addr, 0x7710);
14035		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14036
14037		/* RCVThread1 */
14038		WRT32_IO_REG(ha, io_base_addr, 0x7720);
14039		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14040		    ha->iobase + 0xC0, 16, 32);
14041		WRT32_IO_REG(ha, io_base_addr, 0x7730);
14042		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14043
14044		/* RISC registers. */
14045
14046		/* RISC GP */
14047		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14048		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14049		    16, 32);
14050		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14051		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14052		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14053		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14054		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14055		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14056		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14057		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14058		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14059		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14060		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14061		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14062		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14063		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14064
14065		/* Local memory controller (LMC) registers. */
14066
14067		/* LMC */
14068		WRT32_IO_REG(ha, io_base_addr, 0x3000);
14069		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14070		    16, 32);
14071		WRT32_IO_REG(ha, io_base_addr, 0x3010);
14072		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14073		WRT32_IO_REG(ha, io_base_addr, 0x3020);
14074		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14075		WRT32_IO_REG(ha, io_base_addr, 0x3030);
14076		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14077		WRT32_IO_REG(ha, io_base_addr, 0x3040);
14078		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14079		WRT32_IO_REG(ha, io_base_addr, 0x3050);
14080		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14081		WRT32_IO_REG(ha, io_base_addr, 0x3060);
14082		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14083		WRT32_IO_REG(ha, io_base_addr, 0x3070);
14084		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14085
14086		/* Fibre Protocol Module registers. */
14087
14088		/* FPM hardware */
14089		WRT32_IO_REG(ha, io_base_addr, 0x4000);
14090		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14091		    16, 32);
14092		WRT32_IO_REG(ha, io_base_addr, 0x4010);
14093		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14094		WRT32_IO_REG(ha, io_base_addr, 0x4020);
14095		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14096		WRT32_IO_REG(ha, io_base_addr, 0x4030);
14097		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14098		WRT32_IO_REG(ha, io_base_addr, 0x4040);
14099		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14100		WRT32_IO_REG(ha, io_base_addr, 0x4050);
14101		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14102		WRT32_IO_REG(ha, io_base_addr, 0x4060);
14103		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14104		WRT32_IO_REG(ha, io_base_addr, 0x4070);
14105		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14106		WRT32_IO_REG(ha, io_base_addr, 0x4080);
14107		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14108		WRT32_IO_REG(ha, io_base_addr, 0x4090);
14109		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14110		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14111		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14112		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14113		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14114
14115		/* Frame Buffer registers. */
14116
14117		/* FB hardware */
14118		if (CFG_IST(ha, CFG_CTRL_81XX)) {
14119			WRT32_IO_REG(ha, io_base_addr, 0x40C0);
14120		} else {
14121			WRT32_IO_REG(ha, io_base_addr, 0x6000);
14122		}
14123		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14124		    16, 32);
14125
14126		if (CFG_IST(ha, CFG_CTRL_81XX)) {
14127			WRT32_IO_REG(ha, io_base_addr, 0x40D0);
14128		} else {
14129			WRT32_IO_REG(ha, io_base_addr, 0x6010);
14130		}
14131		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14132
14133		WRT32_IO_REG(ha, io_base_addr, 0x6020);
14134		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14135		WRT32_IO_REG(ha, io_base_addr, 0x6030);
14136		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14137		WRT32_IO_REG(ha, io_base_addr, 0x6040);
14138		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14139		WRT32_IO_REG(ha, io_base_addr, 0x6100);
14140		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14141		WRT32_IO_REG(ha, io_base_addr, 0x6130);
14142		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14143		WRT32_IO_REG(ha, io_base_addr, 0x6150);
14144		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14145		WRT32_IO_REG(ha, io_base_addr, 0x6170);
14146		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14147		WRT32_IO_REG(ha, io_base_addr, 0x6190);
14148		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14149		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14150		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14151
14152		if (CFG_IST(ha, CFG_CTRL_81XX)) {
14153			WRT32_IO_REG(ha, io_base_addr, 0x61C0);
14154		} else {
14155			WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14156		}
14157		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14158	}
14159
14160	/* Get the request queue */
14161	if (rval == QL_SUCCESS) {
14162		uint32_t	cnt;
14163		uint32_t	*w32 = (uint32_t *)ha->request_ring_bp;
14164
14165		/* Sync DMA buffer. */
14166		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14167		    REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14168		    DDI_DMA_SYNC_FORKERNEL);
14169
14170		for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14171			fw->req_q[cnt] = *w32++;
14172			LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14173		}
14174	}
14175
14176	/* Get the respons queue */
14177	if (rval == QL_SUCCESS) {
14178		uint32_t	cnt;
14179		uint32_t	*w32 = (uint32_t *)ha->response_ring_bp;
14180
14181		/* Sync DMA buffer. */
14182		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
14183		    RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14184		    DDI_DMA_SYNC_FORKERNEL);
14185
14186		for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14187			fw->rsp_q[cnt] = *w32++;
14188			LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14189		}
14190	}
14191
14192	/* Reset RISC. */
14193
14194	ql_reset_chip(ha);
14195
14196	/* Memory. */
14197
14198	if (rval == QL_SUCCESS) {
14199		/* Code RAM. */
14200		rval = ql_read_risc_ram(ha, 0x20000,
14201		    sizeof (fw->code_ram) / 4, fw->code_ram);
14202	}
14203	if (rval == QL_SUCCESS) {
14204		/* External Memory. */
14205		rval = ql_read_risc_ram(ha, 0x100000,
14206		    ha->fw_ext_memory_size / 4, fw->ext_mem);
14207	}
14208
14209	/* Get the FC event trace buffer */
14210	if (rval == QL_SUCCESS) {
14211		if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14212		    (ha->fwfcetracebuf.bp != NULL)) {
14213			uint32_t	cnt;
14214			uint32_t	*w32 = ha->fwfcetracebuf.bp;
14215
14216			/* Sync DMA buffer. */
14217			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
14218			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
14219
14220			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14221				fw->fce_trace_buf[cnt] = *w32++;
14222			}
14223		}
14224	}
14225
14226	/* Get the extended trace buffer */
14227	if (rval == QL_SUCCESS) {
14228		if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14229		    (ha->fwexttracebuf.bp != NULL)) {
14230			uint32_t	cnt;
14231			uint32_t	*w32 = ha->fwexttracebuf.bp;
14232
14233			/* Sync DMA buffer. */
14234			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
14235			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
14236
14237			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
14238				fw->ext_trace_buf[cnt] = *w32++;
14239			}
14240		}
14241	}
14242
14243	if (rval != QL_SUCCESS) {
14244		EL(ha, "failed=%xh\n", rval);
14245	} else {
14246		/*EMPTY*/
14247		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14248	}
14249
14250	return (rval);
14251}
14252
14253/*
14254 * ql_read_risc_ram
14255 *	Reads RISC RAM one word at a time.
14256 *	Risc interrupts must be disabled when this routine is called.
14257 *
14258 * Input:
14259 *	ha:	adapter state pointer.
14260 *	risc_address:	RISC code start address.
14261 *	len:		Number of words.
14262 *	buf:		buffer pointer.
14263 *
14264 * Returns:
14265 *	ql local function return status code.
14266 *
14267 * Context:
14268 *	Interrupt or Kernel context, no mailbox commands allowed.
14269 */
14270static int
14271ql_read_risc_ram(ql_adapter_state_t *ha, uint32_t risc_address, uint32_t len,
14272    void *buf)
14273{
14274	uint32_t	cnt;
14275	uint16_t	stat;
14276	clock_t		timer;
14277	uint16_t	*buf16 = (uint16_t *)buf;
14278	uint32_t	*buf32 = (uint32_t *)buf;
14279	int		rval = QL_SUCCESS;
14280
14281	for (cnt = 0; cnt < len; cnt++, risc_address++) {
14282		WRT16_IO_REG(ha, mailbox[0], MBC_READ_RAM_EXTENDED);
14283		WRT16_IO_REG(ha, mailbox[1], LSW(risc_address));
14284		WRT16_IO_REG(ha, mailbox[8], MSW(risc_address));
14285		CFG_IST(ha, CFG_CTRL_242581) ?
14286		    WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT) :
14287		    WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
14288		for (timer = 6000000; timer && rval == QL_SUCCESS; timer--) {
14289			if (RD16_IO_REG(ha, istatus) & RISC_INT) {
14290				stat = (uint16_t)
14291				    (RD16_IO_REG(ha, intr_info_lo) & 0xff);
14292				if ((stat == 1) || (stat == 0x10)) {
14293					if (CFG_IST(ha, CFG_CTRL_242581)) {
14294						buf32[cnt] = SHORT_TO_LONG(
14295						    RD16_IO_REG(ha,
14296						    mailbox[2]),
14297						    RD16_IO_REG(ha,
14298						    mailbox[3]));
14299					} else {
14300						buf16[cnt] =
14301						    RD16_IO_REG(ha, mailbox[2]);
14302					}
14303
14304					break;
14305				} else if ((stat == 2) || (stat == 0x11)) {
14306					rval = RD16_IO_REG(ha, mailbox[0]);
14307					break;
14308				}
14309				if (CFG_IST(ha, CFG_CTRL_242581)) {
14310					WRT32_IO_REG(ha, hccr,
14311					    HC24_CLR_RISC_INT);
14312					RD32_IO_REG(ha, hccr);
14313				} else {
14314					WRT16_IO_REG(ha, hccr,
14315					    HC_CLR_RISC_INT);
14316				}
14317			}
14318			drv_usecwait(5);
14319		}
14320		if (CFG_IST(ha, CFG_CTRL_242581)) {
14321			WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
14322			RD32_IO_REG(ha, hccr);
14323		} else {
14324			WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
14325			WRT16_IO_REG(ha, semaphore, 0);
14326		}
14327
14328		if (timer == 0) {
14329			rval = QL_FUNCTION_TIMEOUT;
14330		}
14331	}
14332
14333	return (rval);
14334}
14335
14336/*
14337 * ql_read_regs
14338 *	Reads adapter registers to buffer.
14339 *
14340 * Input:
14341 *	ha:	adapter state pointer.
14342 *	buf:	buffer pointer.
14343 *	reg:	start address.
14344 *	count:	number of registers.
14345 *	wds:	register size.
14346 *
14347 * Context:
14348 *	Interrupt or Kernel context, no mailbox commands allowed.
14349 */
14350static void *
14351ql_read_regs(ql_adapter_state_t *ha, void *buf, void *reg, uint32_t count,
14352    uint8_t wds)
14353{
14354	uint32_t	*bp32, *reg32;
14355	uint16_t	*bp16, *reg16;
14356	uint8_t		*bp8, *reg8;
14357
14358	switch (wds) {
14359	case 32:
14360		bp32 = buf;
14361		reg32 = reg;
14362		while (count--) {
14363			*bp32++ = RD_REG_DWORD(ha, reg32++);
14364		}
14365		return (bp32);
14366	case 16:
14367		bp16 = buf;
14368		reg16 = reg;
14369		while (count--) {
14370			*bp16++ = RD_REG_WORD(ha, reg16++);
14371		}
14372		return (bp16);
14373	case 8:
14374		bp8 = buf;
14375		reg8 = reg;
14376		while (count--) {
14377			*bp8++ = RD_REG_BYTE(ha, reg8++);
14378		}
14379		return (bp8);
14380	default:
14381		EL(ha, "Unknown word size=%d\n", wds);
14382		return (buf);
14383	}
14384}
14385
14386static int
14387ql_save_config_regs(dev_info_t *dip)
14388{
14389	ql_adapter_state_t	*ha;
14390	int			ret;
14391	ql_config_space_t	chs;
14392	caddr_t			prop = "ql-config-space";
14393
14394	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
14395	ASSERT(ha != NULL);
14396	if (ha == NULL) {
14397		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
14398		    ddi_get_instance(dip));
14399		return (DDI_FAILURE);
14400	}
14401
14402	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14403
14404	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
14405	if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, prop) ==
14406	    1) {
14407		QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
14408		return (DDI_SUCCESS);
14409	}
14410
14411	chs.chs_command = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
14412	chs.chs_header_type = (uint8_t)ql_pci_config_get8(ha,
14413	    PCI_CONF_HEADER);
14414	if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14415		chs.chs_bridge_control = (uint8_t)ql_pci_config_get8(ha,
14416		    PCI_BCNF_BCNTRL);
14417	}
14418
14419	chs.chs_cache_line_size = (uint8_t)ql_pci_config_get8(ha,
14420	    PCI_CONF_CACHE_LINESZ);
14421
14422	chs.chs_latency_timer = (uint8_t)ql_pci_config_get8(ha,
14423	    PCI_CONF_LATENCY_TIMER);
14424
14425	if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14426		chs.chs_sec_latency_timer = (uint8_t)ql_pci_config_get8(ha,
14427		    PCI_BCNF_LATENCY_TIMER);
14428	}
14429
14430	chs.chs_base0 = ql_pci_config_get32(ha, PCI_CONF_BASE0);
14431	chs.chs_base1 = ql_pci_config_get32(ha, PCI_CONF_BASE1);
14432	chs.chs_base2 = ql_pci_config_get32(ha, PCI_CONF_BASE2);
14433	chs.chs_base3 = ql_pci_config_get32(ha, PCI_CONF_BASE3);
14434	chs.chs_base4 = ql_pci_config_get32(ha, PCI_CONF_BASE4);
14435	chs.chs_base5 = ql_pci_config_get32(ha, PCI_CONF_BASE5);
14436
14437	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
14438	ret = ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, prop,
14439	    (uchar_t *)&chs, sizeof (ql_config_space_t));
14440
14441	if (ret != DDI_PROP_SUCCESS) {
14442		cmn_err(CE_WARN, "!Qlogic %s(%d) can't update prop %s",
14443		    QL_NAME, ddi_get_instance(dip), prop);
14444		return (DDI_FAILURE);
14445	}
14446
14447	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14448
14449	return (DDI_SUCCESS);
14450}
14451
14452static int
14453ql_restore_config_regs(dev_info_t *dip)
14454{
14455	ql_adapter_state_t	*ha;
14456	uint_t			elements;
14457	ql_config_space_t	*chs_p;
14458	caddr_t			prop = "ql-config-space";
14459
14460	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
14461	ASSERT(ha != NULL);
14462	if (ha == NULL) {
14463		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
14464		    ddi_get_instance(dip));
14465		return (DDI_FAILURE);
14466	}
14467
14468	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14469
14470	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
14471	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip,
14472	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, prop,
14473	    (uchar_t **)&chs_p, &elements) != DDI_PROP_SUCCESS) {
14474		QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
14475		return (DDI_FAILURE);
14476	}
14477
14478	ql_pci_config_put16(ha, PCI_CONF_COMM, chs_p->chs_command);
14479
14480	if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14481		ql_pci_config_put16(ha, PCI_BCNF_BCNTRL,
14482		    chs_p->chs_bridge_control);
14483	}
14484
14485	ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ,
14486	    chs_p->chs_cache_line_size);
14487
14488	ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER,
14489	    chs_p->chs_latency_timer);
14490
14491	if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14492		ql_pci_config_put8(ha, PCI_BCNF_LATENCY_TIMER,
14493		    chs_p->chs_sec_latency_timer);
14494	}
14495
14496	ql_pci_config_put32(ha, PCI_CONF_BASE0, chs_p->chs_base0);
14497	ql_pci_config_put32(ha, PCI_CONF_BASE1, chs_p->chs_base1);
14498	ql_pci_config_put32(ha, PCI_CONF_BASE2, chs_p->chs_base2);
14499	ql_pci_config_put32(ha, PCI_CONF_BASE3, chs_p->chs_base3);
14500	ql_pci_config_put32(ha, PCI_CONF_BASE4, chs_p->chs_base4);
14501	ql_pci_config_put32(ha, PCI_CONF_BASE5, chs_p->chs_base5);
14502
14503	ddi_prop_free(chs_p);
14504
14505	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
14506	if (ndi_prop_remove(DDI_DEV_T_NONE, dip, prop) != DDI_PROP_SUCCESS) {
14507		cmn_err(CE_WARN, "!Qlogic %s(%d): can't remove prop %s",
14508		    QL_NAME, ddi_get_instance(dip), prop);
14509	}
14510
14511	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14512
14513	return (DDI_SUCCESS);
14514}
14515
14516uint8_t
14517ql_pci_config_get8(ql_adapter_state_t *ha, off_t off)
14518{
14519	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14520		return (ddi_get8(ha->sbus_config_handle,
14521		    (uint8_t *)(ha->sbus_config_base + off)));
14522	}
14523
14524#ifdef KERNEL_32
14525	return (pci_config_getb(ha->pci_handle, off));
14526#else
14527	return (pci_config_get8(ha->pci_handle, off));
14528#endif
14529}
14530
14531uint16_t
14532ql_pci_config_get16(ql_adapter_state_t *ha, off_t off)
14533{
14534	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14535		return (ddi_get16(ha->sbus_config_handle,
14536		    (uint16_t *)(ha->sbus_config_base + off)));
14537	}
14538
14539#ifdef KERNEL_32
14540	return (pci_config_getw(ha->pci_handle, off));
14541#else
14542	return (pci_config_get16(ha->pci_handle, off));
14543#endif
14544}
14545
14546uint32_t
14547ql_pci_config_get32(ql_adapter_state_t *ha, off_t off)
14548{
14549	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14550		return (ddi_get32(ha->sbus_config_handle,
14551		    (uint32_t *)(ha->sbus_config_base + off)));
14552	}
14553
14554#ifdef KERNEL_32
14555	return (pci_config_getl(ha->pci_handle, off));
14556#else
14557	return (pci_config_get32(ha->pci_handle, off));
14558#endif
14559}
14560
14561void
14562ql_pci_config_put8(ql_adapter_state_t *ha, off_t off, uint8_t val)
14563{
14564	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14565		ddi_put8(ha->sbus_config_handle,
14566		    (uint8_t *)(ha->sbus_config_base + off), val);
14567	} else {
14568#ifdef KERNEL_32
14569		pci_config_putb(ha->pci_handle, off, val);
14570#else
14571		pci_config_put8(ha->pci_handle, off, val);
14572#endif
14573	}
14574}
14575
14576void
14577ql_pci_config_put16(ql_adapter_state_t *ha, off_t off, uint16_t val)
14578{
14579	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14580		ddi_put16(ha->sbus_config_handle,
14581		    (uint16_t *)(ha->sbus_config_base + off), val);
14582	} else {
14583#ifdef KERNEL_32
14584		pci_config_putw(ha->pci_handle, off, val);
14585#else
14586		pci_config_put16(ha->pci_handle, off, val);
14587#endif
14588	}
14589}
14590
14591void
14592ql_pci_config_put32(ql_adapter_state_t *ha, off_t off, uint32_t val)
14593{
14594	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14595		ddi_put32(ha->sbus_config_handle,
14596		    (uint32_t *)(ha->sbus_config_base + off), val);
14597	} else {
14598#ifdef KERNEL_32
14599		pci_config_putl(ha->pci_handle, off, val);
14600#else
14601		pci_config_put32(ha->pci_handle, off, val);
14602#endif
14603	}
14604}
14605
14606/*
14607 * ql_halt
14608 *	Waits for commands that are running to finish and
14609 *	if they do not, commands are aborted.
14610 *	Finally the adapter is reset.
14611 *
14612 * Input:
14613 *	ha:	adapter state pointer.
14614 *	pwr:	power state.
14615 *
14616 * Context:
14617 *	Kernel context.
14618 */
14619static void
14620ql_halt(ql_adapter_state_t *ha, int pwr)
14621{
14622	uint32_t	cnt;
14623	ql_tgt_t	*tq;
14624	ql_srb_t	*sp;
14625	uint16_t	index;
14626	ql_link_t	*link;
14627
14628	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14629
14630	/* Wait for all commands running to finish. */
14631	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
14632		for (link = ha->dev[index].first; link != NULL;
14633		    link = link->next) {
14634			tq = link->base_address;
14635			(void) ql_abort_device(ha, tq, 0);
14636
14637			/* Wait for 30 seconds for commands to finish. */
14638			for (cnt = 3000; cnt != 0; cnt--) {
14639				/* Acquire device queue lock. */
14640				DEVICE_QUEUE_LOCK(tq);
14641				if (tq->outcnt == 0) {
14642					/* Release device queue lock. */
14643					DEVICE_QUEUE_UNLOCK(tq);
14644					break;
14645				} else {
14646					/* Release device queue lock. */
14647					DEVICE_QUEUE_UNLOCK(tq);
14648					ql_delay(ha, 10000);
14649				}
14650			}
14651
14652			/* Finish any commands waiting for more status. */
14653			if (ha->status_srb != NULL) {
14654				sp = ha->status_srb;
14655				ha->status_srb = NULL;
14656				sp->cmd.next = NULL;
14657				ql_done(&sp->cmd);
14658			}
14659
14660			/* Abort commands that did not finish. */
14661			if (cnt == 0) {
14662				for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS;
14663				    cnt++) {
14664					if (ha->pending_cmds.first != NULL) {
14665						ql_start_iocb(ha, NULL);
14666						cnt = 1;
14667					}
14668					sp = ha->outstanding_cmds[cnt];
14669					if (sp != NULL &&
14670					    sp->lun_queue->target_queue ==
14671					    tq) {
14672						(void) ql_abort((opaque_t)ha,
14673						    sp->pkt, 0);
14674					}
14675				}
14676			}
14677		}
14678	}
14679
14680	/* Shutdown IP. */
14681	if (ha->flags & IP_INITIALIZED) {
14682		(void) ql_shutdown_ip(ha);
14683	}
14684
14685	/* Stop all timers. */
14686	ADAPTER_STATE_LOCK(ha);
14687	ha->port_retry_timer = 0;
14688	ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
14689	ha->watchdog_timer = 0;
14690	ADAPTER_STATE_UNLOCK(ha);
14691
14692	if (pwr == PM_LEVEL_D3) {
14693		ADAPTER_STATE_LOCK(ha);
14694		ha->flags &= ~ONLINE;
14695		ADAPTER_STATE_UNLOCK(ha);
14696
14697		/* Reset ISP chip. */
14698		ql_reset_chip(ha);
14699	}
14700
14701	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14702}
14703
14704/*
14705 * ql_get_dma_mem
14706 *	Function used to allocate dma memory.
14707 *
14708 * Input:
14709 *	ha:			adapter state pointer.
14710 *	mem:			pointer to dma memory object.
14711 *	size:			size of the request in bytes
14712 *
14713 * Returns:
14714 *	qn local function return status code.
14715 *
14716 * Context:
14717 *	Kernel context.
14718 */
14719int
14720ql_get_dma_mem(ql_adapter_state_t *ha, dma_mem_t *mem, uint32_t size,
14721    mem_alloc_type_t allocation_type, mem_alignment_t alignment)
14722{
14723	int	rval;
14724
14725	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14726
14727	mem->size = size;
14728	mem->type = allocation_type;
14729	mem->cookie_count = 1;
14730
14731	switch (alignment) {
14732	case QL_DMA_DATA_ALIGN:
14733		mem->alignment = QL_DMA_ALIGN_8_BYTE_BOUNDARY;
14734		break;
14735	case QL_DMA_RING_ALIGN:
14736		mem->alignment = QL_DMA_ALIGN_64_BYTE_BOUNDARY;
14737		break;
14738	default:
14739		EL(ha, "failed, unknown alignment type %x\n", alignment);
14740		break;
14741	}
14742
14743	if ((rval = ql_alloc_phys(ha, mem, KM_SLEEP)) != QL_SUCCESS) {
14744		ql_free_phys(ha, mem);
14745		EL(ha, "failed, alloc_phys=%xh\n", rval);
14746	}
14747
14748	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14749
14750	return (rval);
14751}
14752
14753/*
14754 * ql_alloc_phys
14755 *	Function used to allocate memory and zero it.
14756 *	Memory is below 4 GB.
14757 *
14758 * Input:
14759 *	ha:			adapter state pointer.
14760 *	mem:			pointer to dma memory object.
14761 *	sleep:			KM_SLEEP/KM_NOSLEEP flag.
14762 *	mem->cookie_count	number of segments allowed.
14763 *	mem->type		memory allocation type.
14764 *	mem->size		memory size.
14765 *	mem->alignment		memory alignment.
14766 *
14767 * Returns:
14768 *	qn local function return status code.
14769 *
14770 * Context:
14771 *	Kernel context.
14772 */
14773int
14774ql_alloc_phys(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
14775{
14776	size_t			rlen;
14777	ddi_dma_attr_t		dma_attr;
14778	ddi_device_acc_attr_t	acc_attr = ql_dev_acc_attr;
14779
14780	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14781
14782	dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
14783	    ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
14784
14785	dma_attr.dma_attr_align = mem->alignment; /* DMA address alignment */
14786	dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
14787
14788	/*
14789	 * Workaround for SUN XMITS buffer must end and start on 8 byte
14790	 * boundary. Else, hardware will overrun the buffer. Simple fix is
14791	 * to make sure buffer has enough room for overrun.
14792	 */
14793	if (mem->size & 7) {
14794		mem->size += 8 - (mem->size & 7);
14795	}
14796
14797	mem->flags = DDI_DMA_CONSISTENT;
14798
14799	/*
14800	 * Allocate DMA memory for command.
14801	 */
14802	if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
14803	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
14804	    DDI_SUCCESS) {
14805		EL(ha, "failed, ddi_dma_alloc_handle\n");
14806		mem->dma_handle = NULL;
14807		return (QL_MEMORY_ALLOC_FAILED);
14808	}
14809
14810	switch (mem->type) {
14811	case KERNEL_MEM:
14812		mem->bp = kmem_zalloc(mem->size, sleep);
14813		break;
14814	case BIG_ENDIAN_DMA:
14815	case LITTLE_ENDIAN_DMA:
14816	case NO_SWAP_DMA:
14817		if (mem->type == BIG_ENDIAN_DMA) {
14818			acc_attr.devacc_attr_endian_flags =
14819			    DDI_STRUCTURE_BE_ACC;
14820		} else if (mem->type == NO_SWAP_DMA) {
14821			acc_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
14822		}
14823		if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr,
14824		    mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
14825		    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&mem->bp, &rlen,
14826		    &mem->acc_handle) == DDI_SUCCESS) {
14827			bzero(mem->bp, mem->size);
14828			/* ensure we got what we asked for (32bit) */
14829			if (dma_attr.dma_attr_addr_hi == NULL) {
14830				if (mem->cookie.dmac_notused != NULL) {
14831					EL(ha, "failed, ddi_dma_mem_alloc "
14832					    "returned 64 bit DMA address\n");
14833					ql_free_phys(ha, mem);
14834					return (QL_MEMORY_ALLOC_FAILED);
14835				}
14836			}
14837		} else {
14838			mem->acc_handle = NULL;
14839			mem->bp = NULL;
14840		}
14841		break;
14842	default:
14843		EL(ha, "failed, unknown type=%xh\n", mem->type);
14844		mem->acc_handle = NULL;
14845		mem->bp = NULL;
14846		break;
14847	}
14848
14849	if (mem->bp == NULL) {
14850		EL(ha, "failed, ddi_dma_mem_alloc\n");
14851		ddi_dma_free_handle(&mem->dma_handle);
14852		mem->dma_handle = NULL;
14853		return (QL_MEMORY_ALLOC_FAILED);
14854	}
14855
14856	mem->flags |= DDI_DMA_RDWR;
14857
14858	if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
14859		EL(ha, "failed, ddi_dma_addr_bind_handle\n");
14860		ql_free_phys(ha, mem);
14861		return (QL_MEMORY_ALLOC_FAILED);
14862	}
14863
14864	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14865
14866	return (QL_SUCCESS);
14867}
14868
14869/*
14870 * ql_free_phys
14871 *	Function used to free physical memory.
14872 *
14873 * Input:
14874 *	ha:	adapter state pointer.
14875 *	mem:	pointer to dma memory object.
14876 *
14877 * Context:
14878 *	Kernel context.
14879 */
14880void
14881ql_free_phys(ql_adapter_state_t *ha, dma_mem_t *mem)
14882{
14883	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14884
14885	if (mem != NULL && mem->dma_handle != NULL) {
14886		ql_unbind_dma_buffer(ha, mem);
14887		switch (mem->type) {
14888		case KERNEL_MEM:
14889			if (mem->bp != NULL) {
14890				kmem_free(mem->bp, mem->size);
14891			}
14892			break;
14893		case LITTLE_ENDIAN_DMA:
14894		case BIG_ENDIAN_DMA:
14895		case NO_SWAP_DMA:
14896			if (mem->acc_handle != NULL) {
14897				ddi_dma_mem_free(&mem->acc_handle);
14898				mem->acc_handle = NULL;
14899			}
14900			break;
14901		default:
14902			break;
14903		}
14904		mem->bp = NULL;
14905		ddi_dma_free_handle(&mem->dma_handle);
14906		mem->dma_handle = NULL;
14907	}
14908
14909	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14910}
14911
14912/*
14913 * ql_alloc_dma_resouce.
14914 *	Allocates DMA resource for buffer.
14915 *
14916 * Input:
14917 *	ha:			adapter state pointer.
14918 *	mem:			pointer to dma memory object.
14919 *	sleep:			KM_SLEEP/KM_NOSLEEP flag.
14920 *	mem->cookie_count	number of segments allowed.
14921 *	mem->type		memory allocation type.
14922 *	mem->size		memory size.
14923 *	mem->bp			pointer to memory or struct buf
14924 *
14925 * Returns:
14926 *	qn local function return status code.
14927 *
14928 * Context:
14929 *	Kernel context.
14930 */
14931int
14932ql_alloc_dma_resouce(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
14933{
14934	ddi_dma_attr_t	dma_attr;
14935
14936	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14937
14938	dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
14939	    ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
14940	dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
14941
14942	/*
14943	 * Allocate DMA handle for command.
14944	 */
14945	if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
14946	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
14947	    DDI_SUCCESS) {
14948		EL(ha, "failed, ddi_dma_alloc_handle\n");
14949		mem->dma_handle = NULL;
14950		return (QL_MEMORY_ALLOC_FAILED);
14951	}
14952
14953	mem->flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
14954
14955	if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
14956		EL(ha, "failed, bind_dma_buffer\n");
14957		ddi_dma_free_handle(&mem->dma_handle);
14958		mem->dma_handle = NULL;
14959		return (QL_MEMORY_ALLOC_FAILED);
14960	}
14961
14962	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14963
14964	return (QL_SUCCESS);
14965}
14966
14967/*
14968 * ql_free_dma_resource
14969 *	Frees DMA resources.
14970 *
14971 * Input:
14972 *	ha:		adapter state pointer.
14973 *	mem:		pointer to dma memory object.
14974 *	mem->dma_handle	DMA memory handle.
14975 *
14976 * Context:
14977 *	Kernel context.
14978 */
14979void
14980ql_free_dma_resource(ql_adapter_state_t *ha, dma_mem_t *mem)
14981{
14982	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14983
14984	ql_free_phys(ha, mem);
14985
14986	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14987}
14988
14989/*
14990 * ql_bind_dma_buffer
14991 *	Binds DMA buffer.
14992 *
14993 * Input:
14994 *	ha:			adapter state pointer.
14995 *	mem:			pointer to dma memory object.
14996 *	sleep:			KM_SLEEP or KM_NOSLEEP.
14997 *	mem->dma_handle		DMA memory handle.
14998 *	mem->cookie_count	number of segments allowed.
14999 *	mem->type		memory allocation type.
15000 *	mem->size		memory size.
15001 *	mem->bp			pointer to memory or struct buf
15002 *
15003 * Returns:
15004 *	mem->cookies		pointer to list of cookies.
15005 *	mem->cookie_count	number of cookies.
15006 *	status			success = DDI_DMA_MAPPED
15007 *				DDI_DMA_PARTIAL_MAP, DDI_DMA_INUSE,
15008 *				DDI_DMA_NORESOURCES, DDI_DMA_NOMAPPING or
15009 *				DDI_DMA_TOOBIG
15010 *
15011 * Context:
15012 *	Kernel context.
15013 */
15014static int
15015ql_bind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15016{
15017	int			rval;
15018	ddi_dma_cookie_t	*cookiep;
15019	uint32_t		cnt = mem->cookie_count;
15020
15021	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15022
15023	if (mem->type == STRUCT_BUF_MEMORY) {
15024		rval = ddi_dma_buf_bind_handle(mem->dma_handle, mem->bp,
15025		    mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15026		    DDI_DMA_DONTWAIT, NULL, &mem->cookie, &mem->cookie_count);
15027	} else {
15028		rval = ddi_dma_addr_bind_handle(mem->dma_handle, NULL, mem->bp,
15029		    mem->size, mem->flags, (sleep == KM_SLEEP) ?
15030		    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->cookie,
15031		    &mem->cookie_count);
15032	}
15033
15034	if (rval == DDI_DMA_MAPPED) {
15035		if (mem->cookie_count > cnt) {
15036			(void) ddi_dma_unbind_handle(mem->dma_handle);
15037			EL(ha, "failed, cookie_count %d > %d\n",
15038			    mem->cookie_count, cnt);
15039			rval = DDI_DMA_TOOBIG;
15040		} else {
15041			if (mem->cookie_count > 1) {
15042				if (mem->cookies = kmem_zalloc(
15043				    sizeof (ddi_dma_cookie_t) *
15044				    mem->cookie_count, sleep)) {
15045					*mem->cookies = mem->cookie;
15046					cookiep = mem->cookies;
15047					for (cnt = 1; cnt < mem->cookie_count;
15048					    cnt++) {
15049						ddi_dma_nextcookie(
15050						    mem->dma_handle,
15051						    ++cookiep);
15052					}
15053				} else {
15054					(void) ddi_dma_unbind_handle(
15055					    mem->dma_handle);
15056					EL(ha, "failed, kmem_zalloc\n");
15057					rval = DDI_DMA_NORESOURCES;
15058				}
15059			} else {
15060				/*
15061				 * It has been reported that dmac_size at times
15062				 * may be incorrect on sparc machines so for
15063				 * sparc machines that only have one segment
15064				 * use the buffer size instead.
15065				 */
15066				mem->cookies = &mem->cookie;
15067				mem->cookies->dmac_size = mem->size;
15068			}
15069		}
15070	}
15071
15072	if (rval != DDI_DMA_MAPPED) {
15073		EL(ha, "failed=%xh\n", rval);
15074	} else {
15075		/*EMPTY*/
15076		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15077	}
15078
15079	return (rval);
15080}
15081
15082/*
15083 * ql_unbind_dma_buffer
15084 *	Unbinds DMA buffer.
15085 *
15086 * Input:
15087 *	ha:			adapter state pointer.
15088 *	mem:			pointer to dma memory object.
15089 *	mem->dma_handle		DMA memory handle.
15090 *	mem->cookies		pointer to cookie list.
15091 *	mem->cookie_count	number of cookies.
15092 *
15093 * Context:
15094 *	Kernel context.
15095 */
15096/* ARGSUSED */
15097static void
15098ql_unbind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem)
15099{
15100	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15101
15102	(void) ddi_dma_unbind_handle(mem->dma_handle);
15103	if (mem->cookie_count > 1) {
15104		kmem_free(mem->cookies, sizeof (ddi_dma_cookie_t) *
15105		    mem->cookie_count);
15106		mem->cookies = NULL;
15107	}
15108	mem->cookie_count = 0;
15109
15110	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15111}
15112
15113static int
15114ql_suspend_adapter(ql_adapter_state_t *ha)
15115{
15116	clock_t timer;
15117
15118	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15119
15120	/*
15121	 * First we will claim mbox ownership so that no
15122	 * thread using mbox hangs when we disable the
15123	 * interrupt in the middle of it.
15124	 */
15125	MBX_REGISTER_LOCK(ha);
15126
15127	/* Check for mailbox available, if not wait for signal. */
15128	while (ha->mailbox_flags & MBX_BUSY_FLG) {
15129		ha->mailbox_flags = (uint8_t)
15130		    (ha->mailbox_flags | MBX_WANT_FLG);
15131
15132		/* 30 seconds from now */
15133		timer = ddi_get_lbolt();
15134		timer += 32 * drv_usectohz(1000000);
15135		if (cv_timedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
15136		    timer) == -1) {
15137
15138			/* Release mailbox register lock. */
15139			MBX_REGISTER_UNLOCK(ha);
15140			EL(ha, "failed, Suspend mbox");
15141			return (QL_FUNCTION_TIMEOUT);
15142		}
15143	}
15144
15145	/* Set busy flag. */
15146	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_BUSY_FLG);
15147	MBX_REGISTER_UNLOCK(ha);
15148
15149	(void) ql_wait_outstanding(ha);
15150
15151	/*
15152	 * here we are sure that there will not be any mbox interrupt.
15153	 * So, let's make sure that we return back all the outstanding
15154	 * cmds as well as internally queued commands.
15155	 */
15156	ql_halt(ha, PM_LEVEL_D0);
15157
15158	if (ha->power_level != PM_LEVEL_D3) {
15159		/* Disable ISP interrupts. */
15160		WRT16_IO_REG(ha, ictrl, 0);
15161	}
15162
15163	ADAPTER_STATE_LOCK(ha);
15164	ha->flags &= ~INTERRUPTS_ENABLED;
15165	ADAPTER_STATE_UNLOCK(ha);
15166
15167	MBX_REGISTER_LOCK(ha);
15168	/* Reset busy status. */
15169	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags & ~MBX_BUSY_FLG);
15170
15171	/* If thread is waiting for mailbox go signal it to start. */
15172	if (ha->mailbox_flags & MBX_WANT_FLG) {
15173		ha->mailbox_flags = (uint8_t)
15174		    (ha->mailbox_flags & ~MBX_WANT_FLG);
15175		cv_broadcast(&ha->cv_mbx_wait);
15176	}
15177	/* Release mailbox register lock. */
15178	MBX_REGISTER_UNLOCK(ha);
15179
15180	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15181
15182	return (QL_SUCCESS);
15183}
15184
15185/*
15186 * ql_add_link_b
15187 *	Add link to the end of the chain.
15188 *
15189 * Input:
15190 *	head = Head of link list.
15191 *	link = link to be added.
15192 *	LOCK must be already obtained.
15193 *
15194 * Context:
15195 *	Interrupt or Kernel context, no mailbox commands allowed.
15196 */
15197void
15198ql_add_link_b(ql_head_t *head, ql_link_t *link)
15199{
15200	ASSERT(link->base_address != NULL);
15201
15202	/* at the end there isn't a next */
15203	link->next = NULL;
15204
15205	if ((link->prev = head->last) == NULL) {
15206		head->first = link;
15207	} else {
15208		head->last->next = link;
15209	}
15210
15211	head->last = link;
15212	link->head = head;	/* the queue we're on */
15213}
15214
15215/*
15216 * ql_add_link_t
15217 *	Add link to the beginning of the chain.
15218 *
15219 * Input:
15220 *	head = Head of link list.
15221 *	link = link to be added.
15222 *	LOCK must be already obtained.
15223 *
15224 * Context:
15225 *	Interrupt or Kernel context, no mailbox commands allowed.
15226 */
15227void
15228ql_add_link_t(ql_head_t *head, ql_link_t *link)
15229{
15230	ASSERT(link->base_address != NULL);
15231
15232	link->prev = NULL;
15233
15234	if ((link->next = head->first) == NULL)	{
15235		head->last = link;
15236	} else {
15237		head->first->prev = link;
15238	}
15239
15240	head->first = link;
15241	link->head = head;	/* the queue we're on */
15242}
15243
15244/*
15245 * ql_remove_link
15246 *	Remove a link from the chain.
15247 *
15248 * Input:
15249 *	head = Head of link list.
15250 *	link = link to be removed.
15251 *	LOCK must be already obtained.
15252 *
15253 * Context:
15254 *	Interrupt or Kernel context, no mailbox commands allowed.
15255 */
15256void
15257ql_remove_link(ql_head_t *head, ql_link_t *link)
15258{
15259	ASSERT(link->base_address != NULL);
15260
15261	if (link->prev != NULL) {
15262		if ((link->prev->next = link->next) == NULL) {
15263			head->last = link->prev;
15264		} else {
15265			link->next->prev = link->prev;
15266		}
15267	} else if ((head->first = link->next) == NULL) {
15268		head->last = NULL;
15269	} else {
15270		head->first->prev = NULL;
15271	}
15272
15273	/* not on a queue any more */
15274	link->prev = link->next = NULL;
15275	link->head = NULL;
15276}
15277
15278/*
15279 * ql_chg_endian
15280 *	Change endianess of byte array.
15281 *
15282 * Input:
15283 *	buf = array pointer.
15284 *	size = size of array in bytes.
15285 *
15286 * Context:
15287 *	Interrupt or Kernel context, no mailbox commands allowed.
15288 */
15289void
15290ql_chg_endian(uint8_t buf[], size_t size)
15291{
15292	uint8_t byte;
15293	size_t  cnt1;
15294	size_t  cnt;
15295
15296	cnt1 = size - 1;
15297	for (cnt = 0; cnt < size / 2; cnt++) {
15298		byte = buf[cnt1];
15299		buf[cnt1] = buf[cnt];
15300		buf[cnt] = byte;
15301		cnt1--;
15302	}
15303}
15304
15305/*
15306 * ql_bstr_to_dec
15307 *	Convert decimal byte string to number.
15308 *
15309 * Input:
15310 *	s:	byte string pointer.
15311 *	ans:	interger pointer for number.
15312 *	size:	number of ascii bytes.
15313 *
15314 * Returns:
15315 *	success = number of ascii bytes processed.
15316 *
15317 * Context:
15318 *	Kernel/Interrupt context.
15319 */
15320static int
15321ql_bstr_to_dec(char *s, uint32_t *ans, uint32_t size)
15322{
15323	int			mul, num, cnt, pos;
15324	char			*str;
15325
15326	/* Calculate size of number. */
15327	if (size == 0) {
15328		for (str = s; *str >= '0' && *str <= '9'; str++) {
15329			size++;
15330		}
15331	}
15332
15333	*ans = 0;
15334	for (cnt = 0; *s != '\0' && size; size--, cnt++) {
15335		if (*s >= '0' && *s <= '9') {
15336			num = *s++ - '0';
15337		} else {
15338			break;
15339		}
15340
15341		for (mul = 1, pos = 1; pos < size; pos++) {
15342			mul *= 10;
15343		}
15344		*ans += num * mul;
15345	}
15346
15347	return (cnt);
15348}
15349
15350/*
15351 * ql_delay
15352 *	Calls delay routine if threads are not suspended, otherwise, busy waits
15353 *	Minimum = 1 tick = 10ms
15354 *
15355 * Input:
15356 *	dly = delay time in microseconds.
15357 *
15358 * Context:
15359 *	Kernel or Interrupt context, no mailbox commands allowed.
15360 */
15361void
15362ql_delay(ql_adapter_state_t *ha, clock_t usecs)
15363{
15364	if (QL_DAEMON_SUSPENDED(ha) || ddi_in_panic()) {
15365		drv_usecwait(usecs);
15366	} else {
15367		delay(drv_usectohz(usecs));
15368	}
15369}
15370
15371/*
15372 * ql_stall_drv
15373 *	Stalls one or all driver instances, waits for 30 seconds.
15374 *
15375 * Input:
15376 *	ha:		adapter state pointer or NULL for all.
15377 *	options:	BIT_0 --> leave driver stalled on exit if
15378 *				  failed.
15379 *
15380 * Returns:
15381 *	ql local function return status code.
15382 *
15383 * Context:
15384 *	Kernel context.
15385 */
15386int
15387ql_stall_driver(ql_adapter_state_t *ha, uint32_t options)
15388{
15389	ql_link_t		*link;
15390	ql_adapter_state_t	*ha2;
15391	uint32_t		timer;
15392
15393	QL_PRINT_3(CE_CONT, "started\n");
15394
15395	/* Wait for 30 seconds for daemons unstall. */
15396	timer = 3000;
15397	link = ha == NULL ? ql_hba.first : &ha->hba;
15398	while (link != NULL && timer) {
15399		ha2 = link->base_address;
15400
15401		ql_awaken_task_daemon(ha2, NULL, DRIVER_STALL, 0);
15402
15403		if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
15404		    (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
15405		    (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG &&
15406		    ql_wait_outstanding(ha2) == MAX_OUTSTANDING_COMMANDS)) {
15407			link = ha == NULL ? link->next : NULL;
15408			continue;
15409		}
15410
15411		ql_delay(ha, 10000);
15412		timer--;
15413		link = ha == NULL ? ql_hba.first : &ha->hba;
15414	}
15415
15416	if (ha2 != NULL && timer == 0) {
15417		EL(ha2, "failed, tdf=%xh, exiting state is: %s\n",
15418		    ha2->task_daemon_flags, (options & BIT_0 ? "stalled" :
15419		    "unstalled"));
15420		if (options & BIT_0) {
15421			ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
15422		}
15423		return (QL_FUNCTION_TIMEOUT);
15424	}
15425
15426	QL_PRINT_3(CE_CONT, "done\n");
15427
15428	return (QL_SUCCESS);
15429}
15430
15431/*
15432 * ql_restart_driver
15433 *	Restarts one or all driver instances.
15434 *
15435 * Input:
15436 *	ha:	adapter state pointer or NULL for all.
15437 *
15438 * Context:
15439 *	Kernel context.
15440 */
15441void
15442ql_restart_driver(ql_adapter_state_t *ha)
15443{
15444	ql_link_t		*link;
15445	ql_adapter_state_t	*ha2;
15446	uint32_t		timer;
15447
15448	QL_PRINT_3(CE_CONT, "started\n");
15449
15450	/* Tell all daemons to unstall. */
15451	link = ha == NULL ? ql_hba.first : &ha->hba;
15452	while (link != NULL) {
15453		ha2 = link->base_address;
15454
15455		ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
15456
15457		link = ha == NULL ? link->next : NULL;
15458	}
15459
15460	/* Wait for 30 seconds for all daemons unstall. */
15461	timer = 3000;
15462	link = ha == NULL ? ql_hba.first : &ha->hba;
15463	while (link != NULL && timer) {
15464		ha2 = link->base_address;
15465
15466		if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
15467		    (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
15468		    (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG) == 0) {
15469			QL_PRINT_2(CE_CONT, "(%d,%d): restarted\n",
15470			    ha2->instance, ha2->vp_index);
15471			ql_restart_queues(ha2);
15472			link = ha == NULL ? link->next : NULL;
15473			continue;
15474		}
15475
15476		QL_PRINT_2(CE_CONT, "(%d,%d): failed, tdf=%xh\n",
15477		    ha2->instance, ha2->vp_index, ha2->task_daemon_flags);
15478
15479		ql_delay(ha, 10000);
15480		timer--;
15481		link = ha == NULL ? ql_hba.first : &ha->hba;
15482	}
15483
15484	QL_PRINT_3(CE_CONT, "done\n");
15485}
15486
15487/*
15488 * ql_setup_interrupts
15489 *	Sets up interrupts based on the HBA's and platform's
15490 *	capabilities (e.g., legacy / MSI / FIXED).
15491 *
15492 * Input:
15493 *	ha = adapter state pointer.
15494 *
15495 * Returns:
15496 *	DDI_SUCCESS or DDI_FAILURE.
15497 *
15498 * Context:
15499 *	Kernel context.
15500 */
15501static int
15502ql_setup_interrupts(ql_adapter_state_t *ha)
15503{
15504	int32_t		rval = DDI_FAILURE;
15505	int32_t		i;
15506	int32_t		itypes = 0;
15507
15508	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15509
15510	/*
15511	 * The Solaris Advanced Interrupt Functions (aif) are only
15512	 * supported on s10U1 or greater.
15513	 */
15514	if (ql_os_release_level < 10 || ql_disable_aif != 0) {
15515		EL(ha, "interrupt framework is not supported or is "
15516		    "disabled, using legacy\n");
15517		return (ql_legacy_intr(ha));
15518	} else if (ql_os_release_level == 10) {
15519		/*
15520		 * See if the advanced interrupt functions (aif) are
15521		 * in the kernel
15522		 */
15523		void	*fptr = (void *)&ddi_intr_get_supported_types;
15524
15525		if (fptr == NULL) {
15526			EL(ha, "aif is not supported, using legacy "
15527			    "interrupts (rev)\n");
15528			return (ql_legacy_intr(ha));
15529		}
15530	}
15531
15532	/* See what types of interrupts this HBA and platform support */
15533	if ((i = ddi_intr_get_supported_types(ha->dip, &itypes)) !=
15534	    DDI_SUCCESS) {
15535		EL(ha, "get supported types failed, rval=%xh, "
15536		    "assuming FIXED\n", i);
15537		itypes = DDI_INTR_TYPE_FIXED;
15538	}
15539
15540	EL(ha, "supported types are: %xh\n", itypes);
15541
15542	if ((itypes & DDI_INTR_TYPE_MSIX) &&
15543	    (rval = ql_setup_msix(ha)) == DDI_SUCCESS) {
15544		EL(ha, "successful MSI-X setup\n");
15545	} else if ((itypes & DDI_INTR_TYPE_MSI) &&
15546	    (rval = ql_setup_msi(ha)) == DDI_SUCCESS) {
15547		EL(ha, "successful MSI setup\n");
15548	} else {
15549		rval = ql_setup_fixed(ha);
15550	}
15551
15552	if (rval != DDI_SUCCESS) {
15553		EL(ha, "failed, aif, rval=%xh\n", rval);
15554	} else {
15555		/*EMPTY*/
15556		QL_PRINT_3(CE_CONT, "(%d): done\n");
15557	}
15558
15559	return (rval);
15560}
15561
15562/*
15563 * ql_setup_msi
15564 *	Set up aif MSI interrupts
15565 *
15566 * Input:
15567 *	ha = adapter state pointer.
15568 *
15569 * Returns:
15570 *	DDI_SUCCESS or DDI_FAILURE.
15571 *
15572 * Context:
15573 *	Kernel context.
15574 */
15575static int
15576ql_setup_msi(ql_adapter_state_t *ha)
15577{
15578	int32_t		count = 0;
15579	int32_t		avail = 0;
15580	int32_t		actual = 0;
15581	int32_t		msitype = DDI_INTR_TYPE_MSI;
15582	int32_t		ret;
15583	ql_ifunc_t	itrfun[10] = {0};
15584
15585	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15586
15587	if (ql_disable_msi != 0) {
15588		EL(ha, "MSI is disabled by user\n");
15589		return (DDI_FAILURE);
15590	}
15591
15592	/* MSI support is only suported on 24xx HBA's. */
15593	if (!(CFG_IST(ha, CFG_CTRL_242581))) {
15594		EL(ha, "HBA does not support MSI\n");
15595		return (DDI_FAILURE);
15596	}
15597
15598	/* Get number of MSI interrupts the system supports */
15599	if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
15600	    DDI_SUCCESS) || count == 0) {
15601		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
15602		return (DDI_FAILURE);
15603	}
15604
15605	/* Get number of available MSI interrupts */
15606	if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
15607	    DDI_SUCCESS) || avail == 0) {
15608		EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
15609		return (DDI_FAILURE);
15610	}
15611
15612	/* MSI requires only 1.  */
15613	count = 1;
15614	itrfun[0].ifunc = &ql_isr_aif;
15615
15616	/* Allocate space for interrupt handles */
15617	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
15618	ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
15619
15620	ha->iflags |= IFLG_INTR_MSI;
15621
15622	/* Allocate the interrupts */
15623	if ((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype, 0, count,
15624	    &actual, 0)) != DDI_SUCCESS || actual < count) {
15625		EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
15626		    "actual=%xh\n", ret, count, actual);
15627		ql_release_intr(ha);
15628		return (DDI_FAILURE);
15629	}
15630
15631	ha->intr_cnt = actual;
15632
15633	/* Get interrupt priority */
15634	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
15635	    DDI_SUCCESS) {
15636		EL(ha, "failed, get_pri ret=%xh\n", ret);
15637		ql_release_intr(ha);
15638		return (ret);
15639	}
15640
15641	/* Add the interrupt handler */
15642	if ((ret = ddi_intr_add_handler(ha->htable[0], itrfun[0].ifunc,
15643	    (caddr_t)ha, (caddr_t)0)) != DDI_SUCCESS) {
15644		EL(ha, "failed, intr_add ret=%xh\n", ret);
15645		ql_release_intr(ha);
15646		return (ret);
15647	}
15648
15649	/* Setup mutexes */
15650	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
15651		EL(ha, "failed, mutex init ret=%xh\n", ret);
15652		ql_release_intr(ha);
15653		return (ret);
15654	}
15655
15656	/* Get the capabilities */
15657	(void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
15658
15659	/* Enable interrupts */
15660	if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
15661		if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
15662		    DDI_SUCCESS) {
15663			EL(ha, "failed, block enable, ret=%xh\n", ret);
15664			ql_destroy_mutex(ha);
15665			ql_release_intr(ha);
15666			return (ret);
15667		}
15668	} else {
15669		if ((ret = ddi_intr_enable(ha->htable[0])) != DDI_SUCCESS) {
15670			EL(ha, "failed, intr enable, ret=%xh\n", ret);
15671			ql_destroy_mutex(ha);
15672			ql_release_intr(ha);
15673			return (ret);
15674		}
15675	}
15676
15677	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15678
15679	return (DDI_SUCCESS);
15680}
15681
15682/*
15683 * ql_setup_msix
15684 *	Set up aif MSI-X interrupts
15685 *
15686 * Input:
15687 *	ha = adapter state pointer.
15688 *
15689 * Returns:
15690 *	DDI_SUCCESS or DDI_FAILURE.
15691 *
15692 * Context:
15693 *	Kernel context.
15694 */
15695static int
15696ql_setup_msix(ql_adapter_state_t *ha)
15697{
15698	uint16_t	hwvect;
15699	int32_t		count = 0;
15700	int32_t		avail = 0;
15701	int32_t		actual = 0;
15702	int32_t		msitype = DDI_INTR_TYPE_MSIX;
15703	int32_t		ret;
15704	uint32_t	i;
15705	ql_ifunc_t	itrfun[QL_MSIX_MAXAIF] = {0};
15706
15707	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15708
15709	if (ql_disable_msix != 0) {
15710		EL(ha, "MSI-X is disabled by user\n");
15711		return (DDI_FAILURE);
15712	}
15713
15714	/*
15715	 * MSI-X support is only available on 24xx HBA's that have
15716	 * rev A2 parts (revid = 3) or greater.
15717	 */
15718	if (!((ha->device_id == 0x2532) || (ha->device_id == 0x2432) ||
15719	    (ha->device_id == 0x8432) || (ha->device_id == 0x8001))) {
15720		EL(ha, "HBA does not support MSI-X\n");
15721		return (DDI_FAILURE);
15722	}
15723
15724	if (CFG_IST(ha, CFG_CTRL_2422) && (ha->rev_id < 3)) {
15725		EL(ha, "HBA does not support MSI-X (revid)\n");
15726		return (DDI_FAILURE);
15727	}
15728
15729	/* Per HP, these HP branded HBA's are not supported with MSI-X */
15730	if (ha->ven_id == 0x103C && (ha->subsys_id == 0x7041 ||
15731	    ha->subsys_id == 0x7040 || ha->subsys_id == 0x1705)) {
15732		EL(ha, "HBA does not support MSI-X (subdevid)\n");
15733		return (DDI_FAILURE);
15734	}
15735
15736	/* Get the number of 24xx/25xx MSI-X h/w vectors */
15737	hwvect = (uint16_t)(((CFG_IST(ha, CFG_CTRL_2422) ?
15738	    ql_pci_config_get16(ha, 0x7e) :
15739	    ql_pci_config_get16(ha, 0xa2)) & 0x3ff) + 1);
15740
15741	EL(ha, "pcie config space hwvect = %d\n", hwvect);
15742
15743	if (hwvect < QL_MSIX_MAXAIF) {
15744		EL(ha, "failed, min h/w vectors req'd: %d, avail: %d\n",
15745		    QL_MSIX_MAXAIF, hwvect);
15746		return (DDI_FAILURE);
15747	}
15748
15749	/* Get number of MSI-X interrupts the platform h/w supports */
15750	if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
15751	    DDI_SUCCESS) || count == 0) {
15752		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
15753		return (DDI_FAILURE);
15754	}
15755
15756	/* Get number of available system interrupts */
15757	if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
15758	    DDI_SUCCESS) || avail == 0) {
15759		EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
15760		return (DDI_FAILURE);
15761	}
15762
15763	/* Fill out the intr table */
15764	count = QL_MSIX_MAXAIF;
15765	itrfun[QL_MSIX_AIF].ifunc = &ql_isr_aif;
15766	itrfun[QL_MSIX_RSPQ].ifunc = &ql_isr_aif;
15767
15768	/* Allocate space for interrupt handles */
15769	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * hwvect);
15770	if ((ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP)) == NULL) {
15771		ha->hsize = 0;
15772		EL(ha, "failed, unable to allocate htable space\n");
15773		return (DDI_FAILURE);
15774	}
15775
15776	ha->iflags |= IFLG_INTR_MSIX;
15777
15778	/* Allocate the interrupts */
15779	if (((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype,
15780	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0)) != DDI_SUCCESS) ||
15781	    actual < QL_MSIX_MAXAIF) {
15782		EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
15783		    "actual=%xh\n", ret, count, actual);
15784		ql_release_intr(ha);
15785		return (DDI_FAILURE);
15786	}
15787
15788	ha->intr_cnt = actual;
15789
15790	/* Get interrupt priority */
15791	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
15792	    DDI_SUCCESS) {
15793		EL(ha, "failed, get_pri ret=%xh\n", ret);
15794		ql_release_intr(ha);
15795		return (ret);
15796	}
15797
15798	/* Add the interrupt handlers */
15799	for (i = 0; i < actual; i++) {
15800		if ((ret = ddi_intr_add_handler(ha->htable[i], itrfun[i].ifunc,
15801		    (void *)ha, (void *)((ulong_t)i))) != DDI_SUCCESS) {
15802			EL(ha, "failed, addh#=%xh, act=%xh, ret=%xh\n", i,
15803			    actual, ret);
15804			ql_release_intr(ha);
15805			return (ret);
15806		}
15807	}
15808
15809	/*
15810	 * duplicate the rest of the intr's
15811	 * ddi_intr_dup_handler() isn't working on x86 just yet...
15812	 */
15813#ifdef __sparc
15814	for (i = actual; i < hwvect; i++) {
15815		if ((ret = ddi_intr_dup_handler(ha->htable[0], (int)i,
15816		    &ha->htable[i])) != DDI_SUCCESS) {
15817			EL(ha, "failed, intr_dup#=%xh, act=%xh, ret=%xh\n",
15818			    i, actual, ret);
15819			ql_release_intr(ha);
15820			return (ret);
15821		}
15822	}
15823#endif
15824
15825	/* Setup mutexes */
15826	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
15827		EL(ha, "failed, mutex init ret=%xh\n", ret);
15828		ql_release_intr(ha);
15829		return (ret);
15830	}
15831
15832	/* Get the capabilities */
15833	(void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
15834
15835	/* Enable interrupts */
15836	if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
15837		if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
15838		    DDI_SUCCESS) {
15839			EL(ha, "failed, block enable, ret=%xh\n", ret);
15840			ql_destroy_mutex(ha);
15841			ql_release_intr(ha);
15842			return (ret);
15843		}
15844	} else {
15845		for (i = 0; i < ha->intr_cnt; i++) {
15846			if ((ret = ddi_intr_enable(ha->htable[i])) !=
15847			    DDI_SUCCESS) {
15848				EL(ha, "failed, intr enable, ret=%xh\n", ret);
15849				ql_destroy_mutex(ha);
15850				ql_release_intr(ha);
15851				return (ret);
15852			}
15853		}
15854	}
15855
15856	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15857
15858	return (DDI_SUCCESS);
15859}
15860
15861/*
15862 * ql_setup_fixed
15863 *	Sets up aif FIXED interrupts
15864 *
15865 * Input:
15866 *	ha = adapter state pointer.
15867 *
15868 * Returns:
15869 *	DDI_SUCCESS or DDI_FAILURE.
15870 *
15871 * Context:
15872 *	Kernel context.
15873 */
15874static int
15875ql_setup_fixed(ql_adapter_state_t *ha)
15876{
15877	int32_t		count = 0;
15878	int32_t		actual = 0;
15879	int32_t		ret;
15880	uint32_t	i;
15881
15882	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15883
15884	/* Get number of fixed interrupts the system supports */
15885	if (((ret = ddi_intr_get_nintrs(ha->dip, DDI_INTR_TYPE_FIXED,
15886	    &count)) != DDI_SUCCESS) || count == 0) {
15887		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
15888		return (DDI_FAILURE);
15889	}
15890
15891	ha->iflags |= IFLG_INTR_FIXED;
15892
15893	/* Allocate space for interrupt handles */
15894	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
15895	ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
15896
15897	/* Allocate the interrupts */
15898	if (((ret = ddi_intr_alloc(ha->dip, ha->htable, DDI_INTR_TYPE_FIXED,
15899	    0, count, &actual, DDI_INTR_ALLOC_STRICT)) != DDI_SUCCESS) ||
15900	    actual < count) {
15901		EL(ha, "failed, intr_alloc ret=%xh, count=%xh, "
15902		    "actual=%xh\n", ret, count, actual);
15903		ql_release_intr(ha);
15904		return (DDI_FAILURE);
15905	}
15906
15907	ha->intr_cnt = actual;
15908
15909	/* Get interrupt priority */
15910	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
15911	    DDI_SUCCESS) {
15912		EL(ha, "failed, get_pri ret=%xh\n", ret);
15913		ql_release_intr(ha);
15914		return (ret);
15915	}
15916
15917	/* Add the interrupt handlers */
15918	for (i = 0; i < ha->intr_cnt; i++) {
15919		if ((ret = ddi_intr_add_handler(ha->htable[i], &ql_isr_aif,
15920		    (void *)ha, (void *)((ulong_t)(i)))) != DDI_SUCCESS) {
15921			EL(ha, "failed, intr_add ret=%xh\n", ret);
15922			ql_release_intr(ha);
15923			return (ret);
15924		}
15925	}
15926
15927	/* Setup mutexes */
15928	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
15929		EL(ha, "failed, mutex init ret=%xh\n", ret);
15930		ql_release_intr(ha);
15931		return (ret);
15932	}
15933
15934	/* Enable interrupts */
15935	for (i = 0; i < ha->intr_cnt; i++) {
15936		if ((ret = ddi_intr_enable(ha->htable[i])) != DDI_SUCCESS) {
15937			EL(ha, "failed, intr enable, ret=%xh\n", ret);
15938			ql_destroy_mutex(ha);
15939			ql_release_intr(ha);
15940			return (ret);
15941		}
15942	}
15943
15944	EL(ha, "using FIXED interupts\n");
15945
15946	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15947
15948	return (DDI_SUCCESS);
15949}
15950
15951/*
15952 * ql_disable_intr
15953 *	Disables interrupts
15954 *
15955 * Input:
15956 *	ha = adapter state pointer.
15957 *
15958 * Returns:
15959 *
15960 * Context:
15961 *	Kernel context.
15962 */
15963static void
15964ql_disable_intr(ql_adapter_state_t *ha)
15965{
15966	uint32_t	i, rval;
15967
15968	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15969
15970	if (!(ha->iflags & IFLG_INTR_AIF)) {
15971
15972		/* Disable legacy interrupts */
15973		(void) ddi_remove_intr(ha->dip, 0, ha->iblock_cookie);
15974
15975	} else if ((ha->intr_cap & DDI_INTR_FLAG_BLOCK) &&
15976	    (ha->iflags & (IFLG_INTR_MSI | IFLG_INTR_MSIX))) {
15977
15978		/* Remove AIF block interrupts (MSI) */
15979		if ((rval = ddi_intr_block_disable(ha->htable, ha->intr_cnt))
15980		    != DDI_SUCCESS) {
15981			EL(ha, "failed intr block disable, rval=%x\n", rval);
15982		}
15983
15984	} else {
15985
15986		/* Remove AIF non-block interrupts (fixed).  */
15987		for (i = 0; i < ha->intr_cnt; i++) {
15988			if ((rval = ddi_intr_disable(ha->htable[i])) !=
15989			    DDI_SUCCESS) {
15990				EL(ha, "failed intr disable, intr#=%xh, "
15991				    "rval=%xh\n", i, rval);
15992			}
15993		}
15994	}
15995
15996	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15997}
15998
15999/*
16000 * ql_release_intr
16001 *	Releases aif legacy interrupt resources
16002 *
16003 * Input:
16004 *	ha = adapter state pointer.
16005 *
16006 * Returns:
16007 *
16008 * Context:
16009 *	Kernel context.
16010 */
16011static void
16012ql_release_intr(ql_adapter_state_t *ha)
16013{
16014	int32_t 	i;
16015
16016	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16017
16018	if (!(ha->iflags & IFLG_INTR_AIF)) {
16019		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16020		return;
16021	}
16022
16023	ha->iflags &= ~(IFLG_INTR_AIF);
16024	if (ha->htable != NULL && ha->hsize > 0) {
16025		i = (int32_t)ha->hsize / (int32_t)sizeof (ddi_intr_handle_t);
16026		while (i-- > 0) {
16027			if (ha->htable[i] == 0) {
16028				EL(ha, "htable[%x]=0h\n", i);
16029				continue;
16030			}
16031
16032			(void) ddi_intr_disable(ha->htable[i]);
16033
16034			if (i < ha->intr_cnt) {
16035				(void) ddi_intr_remove_handler(ha->htable[i]);
16036			}
16037
16038			(void) ddi_intr_free(ha->htable[i]);
16039		}
16040
16041		kmem_free(ha->htable, ha->hsize);
16042		ha->htable = NULL;
16043	}
16044
16045	ha->hsize = 0;
16046	ha->intr_cnt = 0;
16047	ha->intr_pri = 0;
16048	ha->intr_cap = 0;
16049
16050	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16051}
16052
16053/*
16054 * ql_legacy_intr
16055 *	Sets up legacy interrupts.
16056 *
16057 *	NB: Only to be used if AIF (Advanced Interupt Framework)
16058 *	    if NOT in the kernel.
16059 *
16060 * Input:
16061 *	ha = adapter state pointer.
16062 *
16063 * Returns:
16064 *	DDI_SUCCESS or DDI_FAILURE.
16065 *
16066 * Context:
16067 *	Kernel context.
16068 */
16069static int
16070ql_legacy_intr(ql_adapter_state_t *ha)
16071{
16072	int	rval = DDI_SUCCESS;
16073
16074	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16075
16076	/* Setup mutexes */
16077	if (ql_init_mutex(ha) != DDI_SUCCESS) {
16078		EL(ha, "failed, mutex init\n");
16079		return (DDI_FAILURE);
16080	}
16081
16082	/* Setup standard/legacy interrupt handler */
16083	if (ddi_add_intr(ha->dip, (uint_t)0, &ha->iblock_cookie,
16084	    (ddi_idevice_cookie_t *)0, ql_isr, (caddr_t)ha) != DDI_SUCCESS) {
16085		cmn_err(CE_WARN, "%s(%d): Failed to add legacy interrupt",
16086		    QL_NAME, ha->instance);
16087		ql_destroy_mutex(ha);
16088		rval = DDI_FAILURE;
16089	}
16090
16091	if (rval == DDI_SUCCESS) {
16092		ha->iflags |= IFLG_INTR_LEGACY;
16093		EL(ha, "using legacy interrupts\n");
16094	}
16095
16096	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16097
16098	return (rval);
16099}
16100
16101/*
16102 * ql_init_mutex
16103 *	Initializes mutex's
16104 *
16105 * Input:
16106 *	ha = adapter state pointer.
16107 *
16108 * Returns:
16109 *	DDI_SUCCESS or DDI_FAILURE.
16110 *
16111 * Context:
16112 *	Kernel context.
16113 */
16114static int
16115ql_init_mutex(ql_adapter_state_t *ha)
16116{
16117	int	ret;
16118	void	*intr;
16119
16120	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16121
16122	if (ha->iflags & IFLG_INTR_AIF) {
16123		intr = (void *)(uintptr_t)ha->intr_pri;
16124	} else {
16125		/* Get iblock cookies to initialize mutexes */
16126		if ((ret = ddi_get_iblock_cookie(ha->dip, 0,
16127		    &ha->iblock_cookie)) != DDI_SUCCESS) {
16128			EL(ha, "failed, get_iblock: %xh\n", ret);
16129			return (DDI_FAILURE);
16130		}
16131		intr = (void *)ha->iblock_cookie;
16132	}
16133
16134	/* mutexes to protect the adapter state structure. */
16135	mutex_init(&ha->mutex, NULL, MUTEX_DRIVER, intr);
16136
16137	/* mutex to protect the ISP response ring. */
16138	mutex_init(&ha->intr_mutex, NULL, MUTEX_DRIVER, intr);
16139
16140	/* mutex to protect the mailbox registers. */
16141	mutex_init(&ha->mbx_mutex, NULL, MUTEX_DRIVER, intr);
16142
16143	/* power management protection */
16144	mutex_init(&ha->pm_mutex, NULL, MUTEX_DRIVER, intr);
16145
16146	/* Mailbox wait and interrupt conditional variable. */
16147	cv_init(&ha->cv_mbx_wait, NULL, CV_DRIVER, NULL);
16148	cv_init(&ha->cv_mbx_intr, NULL, CV_DRIVER, NULL);
16149
16150	/* mutex to protect the ISP request ring. */
16151	mutex_init(&ha->req_ring_mutex, NULL, MUTEX_DRIVER, intr);
16152
16153	/* Unsolicited buffer conditional variable. */
16154	cv_init(&ha->cv_ub, NULL, CV_DRIVER, NULL);
16155
16156	mutex_init(&ha->ub_mutex, NULL, MUTEX_DRIVER, intr);
16157	mutex_init(&ha->cache_mutex, NULL, MUTEX_DRIVER, intr);
16158
16159	/* Suspended conditional variable. */
16160	cv_init(&ha->cv_dr_suspended, NULL, CV_DRIVER, NULL);
16161
16162	/* mutex to protect task daemon context. */
16163	mutex_init(&ha->task_daemon_mutex, NULL, MUTEX_DRIVER, intr);
16164
16165	/* Task_daemon thread conditional variable. */
16166	cv_init(&ha->cv_task_daemon, NULL, CV_DRIVER, NULL);
16167
16168	/* mutex to protect diag port manage interface */
16169	mutex_init(&ha->portmutex, NULL, MUTEX_DRIVER, intr);
16170
16171	/* mutex to protect per instance f/w dump flags and buffer */
16172	mutex_init(&ha->dump_mutex, NULL, MUTEX_DRIVER, intr);
16173
16174	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16175
16176	return (DDI_SUCCESS);
16177}
16178
16179/*
16180 * ql_destroy_mutex
16181 *	Destroys mutex's
16182 *
16183 * Input:
16184 *	ha = adapter state pointer.
16185 *
16186 * Returns:
16187 *
16188 * Context:
16189 *	Kernel context.
16190 */
16191static void
16192ql_destroy_mutex(ql_adapter_state_t *ha)
16193{
16194	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16195
16196	mutex_destroy(&ha->dump_mutex);
16197	mutex_destroy(&ha->portmutex);
16198	cv_destroy(&ha->cv_task_daemon);
16199	mutex_destroy(&ha->task_daemon_mutex);
16200	cv_destroy(&ha->cv_dr_suspended);
16201	mutex_destroy(&ha->cache_mutex);
16202	mutex_destroy(&ha->ub_mutex);
16203	cv_destroy(&ha->cv_ub);
16204	mutex_destroy(&ha->req_ring_mutex);
16205	cv_destroy(&ha->cv_mbx_intr);
16206	cv_destroy(&ha->cv_mbx_wait);
16207	mutex_destroy(&ha->pm_mutex);
16208	mutex_destroy(&ha->mbx_mutex);
16209	mutex_destroy(&ha->intr_mutex);
16210	mutex_destroy(&ha->mutex);
16211
16212	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16213}
16214
16215/*
16216 * ql_fwmodule_resolve
16217 *	Loads and resolves external firmware module and symbols
16218 *
16219 * Input:
16220 *	ha:		adapter state pointer.
16221 *
16222 * Returns:
16223 *	ql local function return status code:
16224 *		QL_SUCCESS - external f/w module module and symbols resolved
16225 *		QL_FW_NOT_SUPPORTED - Driver does not support ISP type
16226 *		QL_FWMODLOAD_FAILED - Could not load f/w module (ddi failed)
16227 *		QL_FWSYM_NOT_FOUND - Unable to resolve internal f/w symbol
16228 * Context:
16229 *	Kernel context.
16230 *
16231 * NOTE: We currently ddi_modopen/ddi_modclose at attach/detach time.  We
16232 * could switch to a tighter scope around acutal download (and add an extra
16233 * ddi_modopen for module opens that occur before root is mounted).
16234 *
16235 */
16236uint32_t
16237ql_fwmodule_resolve(ql_adapter_state_t *ha)
16238{
16239	int8_t			module[128];
16240	int8_t			fw_version[128];
16241	uint32_t		rval = QL_SUCCESS;
16242	caddr_t			code, code02;
16243	uint8_t			*p_ucfw;
16244	uint16_t		*p_usaddr, *p_uslen;
16245	uint32_t		*p_uiaddr, *p_uilen, *p_uifw;
16246	uint32_t		*p_uiaddr02, *p_uilen02;
16247	struct fw_table		*fwt;
16248	extern struct fw_table	fw_table[];
16249
16250	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16251
16252	if (ha->fw_module != NULL) {
16253		EL(ha, "%x f/w module %d.%02d.%02d is already loaded\n",
16254		    ha->fw_class, ha->fw_major_version, ha->fw_minor_version,
16255		    ha->fw_subminor_version);
16256		return (rval);
16257	}
16258
16259	/* make sure the fw_class is in the fw_table of supported classes */
16260	for (fwt = &fw_table[0]; fwt->fw_version; fwt++) {
16261		if (fwt->fw_class == ha->fw_class)
16262			break;			/* match */
16263	}
16264	if (fwt->fw_version == NULL) {
16265		cmn_err(CE_WARN, "%s(%d): can't find f/w class %x "
16266		    "in driver's fw_table", QL_NAME, ha->instance,
16267		    ha->fw_class);
16268		return (QL_FW_NOT_SUPPORTED);
16269	}
16270
16271	/*
16272	 * open the module related to the fw_class
16273	 */
16274	(void) snprintf(module, sizeof (module), "misc/qlc/qlc_fw_%x",
16275	    ha->fw_class);
16276
16277	ha->fw_module = ddi_modopen(module, KRTLD_MODE_FIRST, NULL);
16278	if (ha->fw_module == NULL) {
16279		cmn_err(CE_WARN, "%s(%d): can't load firmware file %s",
16280		    QL_NAME, ha->instance, module);
16281		return (QL_FWMODLOAD_FAILED);
16282	}
16283
16284	/*
16285	 * resolve the fw module symbols, data types depend on fw_class
16286	 */
16287
16288	switch (ha->fw_class) {
16289	case 0x2200:
16290	case 0x2300:
16291	case 0x6322:
16292
16293		if ((code = ddi_modsym(ha->fw_module, "risc_code01",
16294		    NULL)) == NULL) {
16295			rval = QL_FWSYM_NOT_FOUND;
16296			EL(ha, "failed, f/w module %d rc01 symbol\n", module);
16297		} else if ((p_usaddr = ddi_modsym(ha->fw_module,
16298		    "risc_code_addr01", NULL)) == NULL) {
16299			rval = QL_FWSYM_NOT_FOUND;
16300			EL(ha, "failed, f/w module %d rca01 symbol\n", module);
16301		} else if ((p_uslen = ddi_modsym(ha->fw_module,
16302		    "risc_code_length01", NULL)) == NULL) {
16303			rval = QL_FWSYM_NOT_FOUND;
16304			EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
16305		} else if ((p_ucfw = ddi_modsym(ha->fw_module,
16306		    "firmware_version", NULL)) == NULL) {
16307			rval = QL_FWSYM_NOT_FOUND;
16308			EL(ha, "failed, f/w module %d fwver symbol\n", module);
16309		}
16310
16311		if (rval == QL_SUCCESS) {
16312			ha->risc_fw[0].code = code;
16313			ha->risc_fw[0].addr = *p_usaddr;
16314			ha->risc_fw[0].length = *p_uslen;
16315
16316			(void) snprintf(fw_version, sizeof (fw_version),
16317			    "%d.%02d.%02d", p_ucfw[0], p_ucfw[1], p_ucfw[2]);
16318		}
16319		break;
16320
16321	case 0x2400:
16322	case 0x2500:
16323	case 0x8100:
16324
16325		if ((code = ddi_modsym(ha->fw_module, "risc_code01",
16326		    NULL)) == NULL) {
16327			rval = QL_FWSYM_NOT_FOUND;
16328			EL(ha, "failed, f/w module %d rc01 symbol\n", module);
16329		} else if ((p_uiaddr = ddi_modsym(ha->fw_module,
16330		    "risc_code_addr01", NULL)) == NULL) {
16331			rval = QL_FWSYM_NOT_FOUND;
16332			EL(ha, "failed, f/w module %d rca01 symbol\n", module);
16333		} else if ((p_uilen = ddi_modsym(ha->fw_module,
16334		    "risc_code_length01", NULL)) == NULL) {
16335			rval = QL_FWSYM_NOT_FOUND;
16336			EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
16337		} else if ((p_uifw = ddi_modsym(ha->fw_module,
16338		    "firmware_version", NULL)) == NULL) {
16339			rval = QL_FWSYM_NOT_FOUND;
16340			EL(ha, "failed, f/w module %d fwver symbol\n", module);
16341		}
16342
16343		if ((code02 = ddi_modsym(ha->fw_module, "risc_code02",
16344		    NULL)) == NULL) {
16345			rval = QL_FWSYM_NOT_FOUND;
16346			EL(ha, "failed, f/w module %d rc02 symbol\n", module);
16347		} else if ((p_uiaddr02 = ddi_modsym(ha->fw_module,
16348		    "risc_code_addr02", NULL)) == NULL) {
16349			rval = QL_FWSYM_NOT_FOUND;
16350			EL(ha, "failed, f/w module %d rca02 symbol\n", module);
16351		} else if ((p_uilen02 = ddi_modsym(ha->fw_module,
16352		    "risc_code_length02", NULL)) == NULL) {
16353			rval = QL_FWSYM_NOT_FOUND;
16354			EL(ha, "failed, f/w module %d rcl02 symbol\n", module);
16355		}
16356
16357		if (rval == QL_SUCCESS) {
16358			ha->risc_fw[0].code = code;
16359			ha->risc_fw[0].addr = *p_uiaddr;
16360			ha->risc_fw[0].length = *p_uilen;
16361			ha->risc_fw[1].code = code02;
16362			ha->risc_fw[1].addr = *p_uiaddr02;
16363			ha->risc_fw[1].length = *p_uilen02;
16364
16365			(void) snprintf(fw_version, sizeof (fw_version),
16366			    "%d.%02d.%02d", p_uifw[0], p_uifw[1], p_uifw[2]);
16367		}
16368		break;
16369
16370	default:
16371		EL(ha, "fw_class: '%x' is not supported\n", ha->fw_class);
16372		rval = QL_FW_NOT_SUPPORTED;
16373	}
16374
16375	if (rval != QL_SUCCESS) {
16376		cmn_err(CE_WARN, "%s(%d): can't resolve firmware "
16377		    "module %s (%x)", QL_NAME, ha->instance, module, rval);
16378		if (ha->fw_module != NULL) {
16379			(void) ddi_modclose(ha->fw_module);
16380			ha->fw_module = NULL;
16381		}
16382	} else {
16383		/*
16384		 * check for firmware version mismatch between module and
16385		 * compiled in fw_table version.
16386		 */
16387
16388		if (strcmp(fwt->fw_version, fw_version) != 0) {
16389
16390			/*
16391			 * If f/w / driver version mismatches then
16392			 * return a successful status -- however warn
16393			 * the user that this is NOT recommended.
16394			 */
16395
16396			cmn_err(CE_WARN, "%s(%d): driver / f/w version "
16397			    "mismatch for %x: driver-%s module-%s", QL_NAME,
16398			    ha->instance, ha->fw_class, fwt->fw_version,
16399			    fw_version);
16400
16401			ha->cfg_flags |= CFG_FW_MISMATCH;
16402		} else {
16403			ha->cfg_flags &= ~CFG_FW_MISMATCH;
16404		}
16405	}
16406
16407	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16408
16409	return (rval);
16410}
16411
16412/*
16413 * ql_port_state
16414 *	Set the state on all adapter ports.
16415 *
16416 * Input:
16417 *	ha:	parent adapter state pointer.
16418 *	state:	port state.
16419 *	flags:	task daemon flags to set.
16420 *
16421 * Context:
16422 *	Interrupt or Kernel context, no mailbox commands allowed.
16423 */
16424void
16425ql_port_state(ql_adapter_state_t *ha, uint32_t state, uint32_t flags)
16426{
16427	ql_adapter_state_t	*vha;
16428
16429	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16430
16431	TASK_DAEMON_LOCK(ha);
16432	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
16433		if (FC_PORT_STATE_MASK(vha->state) != state) {
16434			vha->state = state != FC_STATE_OFFLINE ?
16435			    (FC_PORT_SPEED_MASK(vha->state) | state) : state;
16436			vha->task_daemon_flags |= flags;
16437		}
16438	}
16439	ha->pha->task_daemon_flags |= flags & LOOP_DOWN;
16440	TASK_DAEMON_UNLOCK(ha);
16441
16442	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16443}
16444
16445/*
16446 * ql_el_trace_desc_ctor - Construct an extended logging trace descriptor.
16447 *
16448 * Input:	Pointer to the adapter state structure.
16449 * Returns:	Success or Failure.
16450 * Context:	Kernel context.
16451 */
16452int
16453ql_el_trace_desc_ctor(ql_adapter_state_t *ha)
16454{
16455	int	rval = DDI_SUCCESS;
16456
16457	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16458
16459	ha->el_trace_desc =
16460	    (el_trace_desc_t *)kmem_zalloc(sizeof (el_trace_desc_t), KM_SLEEP);
16461
16462	if (ha->el_trace_desc == NULL) {
16463		cmn_err(CE_WARN, "%s(%d): can't construct trace descriptor",
16464		    QL_NAME, ha->instance);
16465		rval = DDI_FAILURE;
16466	} else {
16467		ha->el_trace_desc->next		= 0;
16468		ha->el_trace_desc->trace_buffer =
16469		    (char *)kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
16470
16471		if (ha->el_trace_desc->trace_buffer == NULL) {
16472			cmn_err(CE_WARN, "%s(%d): can't get trace buffer",
16473			    QL_NAME, ha->instance);
16474			kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
16475			rval = DDI_FAILURE;
16476		} else {
16477			ha->el_trace_desc->trace_buffer_size =
16478			    EL_TRACE_BUF_SIZE;
16479			mutex_init(&ha->el_trace_desc->mutex, NULL,
16480			    MUTEX_DRIVER, NULL);
16481		}
16482	}
16483
16484	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16485
16486	return (rval);
16487}
16488
16489/*
16490 * ql_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
16491 *
16492 * Input:	Pointer to the adapter state structure.
16493 * Returns:	Success or Failure.
16494 * Context:	Kernel context.
16495 */
16496int
16497ql_el_trace_desc_dtor(ql_adapter_state_t *ha)
16498{
16499	int	rval = DDI_SUCCESS;
16500
16501	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16502
16503	if (ha->el_trace_desc == NULL) {
16504		cmn_err(CE_WARN, "%s(%d): can't destroy el trace descriptor",
16505		    QL_NAME, ha->instance);
16506		rval = DDI_FAILURE;
16507	} else {
16508		if (ha->el_trace_desc->trace_buffer != NULL) {
16509			kmem_free(ha->el_trace_desc->trace_buffer,
16510			    ha->el_trace_desc->trace_buffer_size);
16511		}
16512		mutex_destroy(&ha->el_trace_desc->mutex);
16513		kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
16514	}
16515
16516	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16517
16518	return (rval);
16519}
16520
16521/*
16522 * els_cmd_text	- Return a pointer to a string describing the command
16523 *
16524 * Input:	els_cmd = the els command opcode.
16525 * Returns:	pointer to a string.
16526 * Context:	Kernel context.
16527 */
16528char *
16529els_cmd_text(int els_cmd)
16530{
16531	cmd_table_t *entry = &els_cmd_tbl[0];
16532
16533	return (cmd_text(entry, els_cmd));
16534}
16535
16536/*
16537 * mbx_cmd_text - Return a pointer to a string describing the command
16538 *
16539 * Input:	mbx_cmd = the mailbox command opcode.
16540 * Returns:	pointer to a string.
16541 * Context:	Kernel context.
16542 */
16543char *
16544mbx_cmd_text(int mbx_cmd)
16545{
16546	cmd_table_t *entry = &mbox_cmd_tbl[0];
16547
16548	return (cmd_text(entry, mbx_cmd));
16549}
16550
16551/*
16552 * cmd_text	Return a pointer to a string describing the command
16553 *
16554 * Input:	entry = the command table
16555 *		cmd = the command.
16556 * Returns:	pointer to a string.
16557 * Context:	Kernel context.
16558 */
16559char *
16560cmd_text(cmd_table_t *entry, int cmd)
16561{
16562	for (; entry->cmd != 0; entry++) {
16563		if (entry->cmd == cmd) {
16564			break;
16565		}
16566	}
16567	return (entry->string);
16568}
16569
16570/*
16571 * ql_els_24xx_mbox_cmd_iocb - els request indication.
16572 *
16573 * Input:	ha = adapter state pointer.
16574 *		srb = scsi request block pointer.
16575 *		arg = els passthru entry iocb pointer.
16576 * Returns:
16577 * Context:	Kernel context.
16578 */
16579void
16580ql_els_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *srb, void *arg)
16581{
16582	els_descriptor_t	els_desc;
16583
16584	/* Extract the ELS information */
16585	ql_fca_isp_els_request(ha, (fc_packet_t *)srb->pkt, &els_desc);
16586
16587	/* Construct the passthru entry */
16588	ql_isp_els_request_ctor(&els_desc, (els_passthru_entry_t *)arg);
16589
16590	/* Ensure correct endianness */
16591	ql_isp_els_handle_cmd_endian(ha, srb);
16592}
16593
16594/*
16595 * ql_isp_els_request_map - Extract into an els descriptor the info required
16596 *			    to build an els_passthru iocb from an fc packet.
16597 *
16598 * Input:	ha = adapter state pointer.
16599 *		pkt = fc packet pointer
16600 *		els_desc = els descriptor pointer
16601 * Returns:
16602 * Context:	Kernel context.
16603 */
16604static void
16605ql_fca_isp_els_request(ql_adapter_state_t *ha, fc_packet_t *pkt,
16606    els_descriptor_t *els_desc)
16607{
16608	ls_code_t	els;
16609
16610	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
16611	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
16612
16613	els_desc->els = els.ls_code;
16614
16615	els_desc->els_handle = ha->hba_buf.acc_handle;
16616	els_desc->d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
16617	els_desc->s_id.b24 = pkt->pkt_cmd_fhdr.s_id;
16618	/* if n_port_handle is not < 0x7d use 0 */
16619	if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
16620		els_desc->n_port_handle = ha->n_port->n_port_handle;
16621	} else {
16622		els_desc->n_port_handle = 0;
16623	}
16624	els_desc->control_flags = 0;
16625	els_desc->cmd_byte_count = pkt->pkt_cmdlen;
16626	/*
16627	 * Transmit DSD. This field defines the Fibre Channel Frame payload
16628	 * (without the frame header) in system memory.
16629	 */
16630	els_desc->tx_dsd.addr[0] = LSD(pkt->pkt_cmd_cookie->dmac_laddress);
16631	els_desc->tx_dsd.addr[1] = MSD(pkt->pkt_cmd_cookie->dmac_laddress);
16632	els_desc->tx_dsd.length = (uint32_t)pkt->pkt_cmd_cookie->dmac_size;
16633
16634	els_desc->rsp_byte_count = pkt->pkt_rsplen;
16635	/*
16636	 * Receive DSD. This field defines the ELS response payload buffer
16637	 * for the ISP24xx firmware transferring the received ELS
16638	 * response frame to a location in host memory.
16639	 */
16640	els_desc->rx_dsd.addr[0] = LSD(pkt->pkt_resp_cookie->dmac_laddress);
16641	els_desc->rx_dsd.addr[1] = MSD(pkt->pkt_resp_cookie->dmac_laddress);
16642	els_desc->rx_dsd.length = (uint32_t)pkt->pkt_resp_cookie->dmac_size;
16643}
16644
16645/*
16646 * ql_isp_els_request_ctor - Construct an els_passthru_entry iocb
16647 * using the els descriptor.
16648 *
16649 * Input:	ha = adapter state pointer.
16650 *		els_desc = els descriptor pointer.
16651 *		els_entry = els passthru entry iocb pointer.
16652 * Returns:
16653 * Context:	Kernel context.
16654 */
16655static void
16656ql_isp_els_request_ctor(els_descriptor_t *els_desc,
16657    els_passthru_entry_t *els_entry)
16658{
16659	uint32_t	*ptr32;
16660
16661	/*
16662	 * Construct command packet.
16663	 */
16664	ddi_put8(els_desc->els_handle, &els_entry->entry_type,
16665	    (uint8_t)ELS_PASSTHRU_TYPE);
16666	ddi_put16(els_desc->els_handle, &els_entry->n_port_hdl,
16667	    els_desc->n_port_handle);
16668	ddi_put8(els_desc->els_handle, &els_entry->sof_type, (uint8_t)BIT_4);
16669	ddi_put32(els_desc->els_handle, &els_entry->rcv_exch_address,
16670	    (uint32_t)0);
16671	ddi_put8(els_desc->els_handle, &els_entry->els_cmd_opcode,
16672	    els_desc->els);
16673	ddi_put8(els_desc->els_handle, &els_entry->d_id_7_0,
16674	    els_desc->d_id.b.al_pa);
16675	ddi_put8(els_desc->els_handle, &els_entry->d_id_15_8,
16676	    els_desc->d_id.b.area);
16677	ddi_put8(els_desc->els_handle, &els_entry->d_id_23_16,
16678	    els_desc->d_id.b.domain);
16679	ddi_put8(els_desc->els_handle, &els_entry->s_id_7_0,
16680	    els_desc->s_id.b.al_pa);
16681	ddi_put8(els_desc->els_handle, &els_entry->s_id_15_8,
16682	    els_desc->s_id.b.area);
16683	ddi_put8(els_desc->els_handle, &els_entry->s_id_23_16,
16684	    els_desc->s_id.b.domain);
16685	ddi_put16(els_desc->els_handle, &els_entry->control_flags,
16686	    els_desc->control_flags);
16687	ddi_put32(els_desc->els_handle, &els_entry->rcv_payld_data_bcnt,
16688	    els_desc->rsp_byte_count);
16689	ddi_put32(els_desc->els_handle, &els_entry->xmt_payld_data_bcnt,
16690	    els_desc->cmd_byte_count);
16691	/* Load transmit data segments and count. */
16692	ptr32 = (uint32_t *)&els_entry->xmt_dseg_0_address;
16693	ddi_put16(els_desc->els_handle, &els_entry->xmt_dseg_count, 1);
16694	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[0]);
16695	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[1]);
16696	ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.length);
16697	ddi_put16(els_desc->els_handle, &els_entry->rcv_dseg_count, 1);
16698	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[0]);
16699	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[1]);
16700	ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.length);
16701}
16702
16703/*
16704 * ql_isp_els_handle_cmd_endian - els requests must be in big endian
16705 *				  in host memory.
16706 *
16707 * Input:	ha = adapter state pointer.
16708 *		srb = scsi request block
16709 * Returns:
16710 * Context:	Kernel context.
16711 */
16712void
16713ql_isp_els_handle_cmd_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
16714{
16715	ls_code_t	els;
16716	fc_packet_t	*pkt;
16717	uint8_t		*ptr;
16718
16719	pkt = srb->pkt;
16720
16721	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
16722	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
16723
16724	ptr = (uint8_t *)pkt->pkt_cmd;
16725
16726	ql_isp_els_handle_endian(ha, ptr, els.ls_code);
16727}
16728
16729/*
16730 * ql_isp_els_handle_rsp_endian - els responses must be in big endian
16731 *				  in host memory.
16732 * Input:	ha = adapter state pointer.
16733 *		srb = scsi request block
16734 * Returns:
16735 * Context:	Kernel context.
16736 */
16737void
16738ql_isp_els_handle_rsp_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
16739{
16740	ls_code_t	els;
16741	fc_packet_t	*pkt;
16742	uint8_t		*ptr;
16743
16744	pkt = srb->pkt;
16745
16746	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
16747	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
16748
16749	ptr = (uint8_t *)pkt->pkt_resp;
16750	BIG_ENDIAN_32(&els);
16751	ql_isp_els_handle_endian(ha, ptr, els.ls_code);
16752}
16753
16754/*
16755 * ql_isp_els_handle_endian - els requests/responses must be in big endian
16756 *			      in host memory.
16757 * Input:	ha = adapter state pointer.
16758 *		ptr = els request/response buffer pointer.
16759 *		ls_code = els command code.
16760 * Returns:
16761 * Context:	Kernel context.
16762 */
16763void
16764ql_isp_els_handle_endian(ql_adapter_state_t *ha, uint8_t *ptr, uint8_t ls_code)
16765{
16766	switch (ls_code) {
16767	case LA_ELS_PLOGI: {
16768		BIG_ENDIAN_32(ptr);	/* Command Code */
16769		ptr += 4;
16770		BIG_ENDIAN_16(ptr);	/* FC-PH version */
16771		ptr += 2;
16772		BIG_ENDIAN_16(ptr);	/* b2b credit */
16773		ptr += 2;
16774		BIG_ENDIAN_16(ptr);	/* Cmn Feature flags */
16775		ptr += 2;
16776		BIG_ENDIAN_16(ptr);	/* Rcv data size */
16777		ptr += 2;
16778		BIG_ENDIAN_16(ptr);	/* Concurrent Seq */
16779		ptr += 2;
16780		BIG_ENDIAN_16(ptr);	/* Rel offset */
16781		ptr += 2;
16782		BIG_ENDIAN_32(ptr);	/* E_D_TOV */
16783		ptr += 4;		/* Port Name */
16784		ptr += 8;		/* Node Name */
16785		ptr += 8;		/* Class 1 */
16786		ptr += 16;		/* Class 2 */
16787		ptr += 16;		/* Class 3 */
16788		BIG_ENDIAN_16(ptr);	/* Service options */
16789		ptr += 2;
16790		BIG_ENDIAN_16(ptr);	/* Initiator control */
16791		ptr += 2;
16792		BIG_ENDIAN_16(ptr);	/* Recipient Control */
16793		ptr += 2;
16794		BIG_ENDIAN_16(ptr);	/* Rcv size */
16795		ptr += 2;
16796		BIG_ENDIAN_16(ptr);	/* Concurrent Seq */
16797		ptr += 2;
16798		BIG_ENDIAN_16(ptr);	/* N_Port e2e credit */
16799		ptr += 2;
16800		BIG_ENDIAN_16(ptr);	/* Open Seq/Exch */
16801		break;
16802	}
16803	case LA_ELS_PRLI: {
16804		BIG_ENDIAN_32(ptr);	/* Command Code/Page length */
16805		ptr += 4;		/* Type */
16806		ptr += 2;
16807		BIG_ENDIAN_16(ptr);	/* Flags */
16808		ptr += 2;
16809		BIG_ENDIAN_32(ptr);	/* Originator Process associator  */
16810		ptr += 4;
16811		BIG_ENDIAN_32(ptr);	/* Responder Process associator */
16812		ptr += 4;
16813		BIG_ENDIAN_32(ptr);	/* Flags */
16814		break;
16815	}
16816	default:
16817		EL(ha, "can't handle els code %x\n", ls_code);
16818		break;
16819	}
16820}
16821
16822/*
16823 * ql_n_port_plogi
16824 *	In N port 2 N port topology where an N Port has logged in with the
16825 *	firmware because it has the N_Port login initiative, we send up
16826 *	a plogi by proxy which stimulates the login procedure to continue.
16827 *
16828 * Input:
16829 *	ha = adapter state pointer.
16830 * Returns:
16831 *
16832 * Context:
16833 *	Kernel context.
16834 */
16835static int
16836ql_n_port_plogi(ql_adapter_state_t *ha)
16837{
16838	int		rval;
16839	ql_tgt_t	*tq;
16840	ql_head_t done_q = { NULL, NULL };
16841
16842	rval = QL_SUCCESS;
16843
16844	if (ha->topology & QL_N_PORT) {
16845		/* if we're doing this the n_port_handle must be good */
16846		if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
16847			tq = ql_loop_id_to_queue(ha,
16848			    ha->n_port->n_port_handle);
16849			if (tq != NULL) {
16850				(void) ql_send_plogi(ha, tq, &done_q);
16851			} else {
16852				EL(ha, "n_port_handle = %x, tq = %x\n",
16853				    ha->n_port->n_port_handle, tq);
16854			}
16855		} else {
16856			EL(ha, "n_port_handle = %x, tq = %x\n",
16857			    ha->n_port->n_port_handle, tq);
16858		}
16859		if (done_q.first != NULL) {
16860			ql_done(done_q.first);
16861		}
16862	}
16863	return (rval);
16864}
16865
16866/*
16867 * Compare two WWNs. The NAA is omitted for comparison.
16868 *
16869 * Note particularly that the indentation used in this
16870 * function  isn't according to Sun recommendations. It
16871 * is indented to make reading a bit easy.
16872 *
16873 * Return Values:
16874 *   if first == second return  0
16875 *   if first > second  return  1
16876 *   if first < second  return -1
16877 */
16878int
16879ql_wwn_cmp(ql_adapter_state_t *ha, la_wwn_t *first, la_wwn_t *second)
16880{
16881	la_wwn_t t1, t2;
16882	int rval;
16883
16884	EL(ha, "WWPN=%08x%08x\n",
16885	    BE_32(first->i_wwn[0]), BE_32(first->i_wwn[1]));
16886	EL(ha, "WWPN=%08x%08x\n",
16887	    BE_32(second->i_wwn[0]), BE_32(second->i_wwn[1]));
16888	/*
16889	 * Fibre Channel protocol is big endian, so compare
16890	 * as big endian values
16891	 */
16892	t1.i_wwn[0] = BE_32(first->i_wwn[0]);
16893	t1.i_wwn[1] = BE_32(first->i_wwn[1]);
16894
16895	t2.i_wwn[0] = BE_32(second->i_wwn[0]);
16896	t2.i_wwn[1] = BE_32(second->i_wwn[1]);
16897
16898	if (t1.i_wwn[0] == t2.i_wwn[0]) {
16899		if (t1.i_wwn[1] == t2.i_wwn[1]) {
16900			rval = 0;
16901		} else if (t1.i_wwn[1] > t2.i_wwn[1]) {
16902			rval = 1;
16903		} else {
16904			rval = -1;
16905		}
16906	} else {
16907		if (t1.i_wwn[0] > t2.i_wwn[0]) {
16908			rval = 1;
16909		} else {
16910			rval = -1;
16911		}
16912	}
16913	return (rval);
16914}
16915