ql_hw.c revision 305487
1250661Sdavidcs/*
2284741Sdavidcs * Copyright (c) 2013-2016 Qlogic Corporation
3250661Sdavidcs * All rights reserved.
4250661Sdavidcs *
5250661Sdavidcs *  Redistribution and use in source and binary forms, with or without
6250661Sdavidcs *  modification, are permitted provided that the following conditions
7250661Sdavidcs *  are met:
8250661Sdavidcs *
9250661Sdavidcs *  1. Redistributions of source code must retain the above copyright
10250661Sdavidcs *     notice, this list of conditions and the following disclaimer.
11250661Sdavidcs *  2. Redistributions in binary form must reproduce the above copyright
12250661Sdavidcs *     notice, this list of conditions and the following disclaimer in the
13250661Sdavidcs *     documentation and/or other materials provided with the distribution.
14250661Sdavidcs *
15250661Sdavidcs *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16250661Sdavidcs *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17250661Sdavidcs *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18250661Sdavidcs *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19250661Sdavidcs *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20250661Sdavidcs *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21250661Sdavidcs *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22250661Sdavidcs *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23250661Sdavidcs *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24250661Sdavidcs *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25250661Sdavidcs *  POSSIBILITY OF SUCH DAMAGE.
26250661Sdavidcs */
27250661Sdavidcs
28250661Sdavidcs/*
29250661Sdavidcs * File: ql_hw.c
30250661Sdavidcs * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31298955Spfg * Content: Contains Hardware dependent functions
32250661Sdavidcs */
33250661Sdavidcs
34250661Sdavidcs#include <sys/cdefs.h>
35250661Sdavidcs__FBSDID("$FreeBSD: stable/11/sys/dev/qlxgbe/ql_hw.c 305487 2016-09-06 19:15:44Z davidcs $");
36250661Sdavidcs
37250661Sdavidcs#include "ql_os.h"
38250661Sdavidcs#include "ql_hw.h"
39250661Sdavidcs#include "ql_def.h"
40250661Sdavidcs#include "ql_inline.h"
41250661Sdavidcs#include "ql_ver.h"
42250661Sdavidcs#include "ql_glbl.h"
43250661Sdavidcs#include "ql_dbg.h"
44305487Sdavidcs#include "ql_minidump.h"
45250661Sdavidcs
46250661Sdavidcs/*
47250661Sdavidcs * Static Functions
48250661Sdavidcs */
49250661Sdavidcs
50250661Sdavidcsstatic void qla_del_rcv_cntxt(qla_host_t *ha);
51250661Sdavidcsstatic int qla_init_rcv_cntxt(qla_host_t *ha);
52250661Sdavidcsstatic void qla_del_xmt_cntxt(qla_host_t *ha);
53250661Sdavidcsstatic int qla_init_xmt_cntxt(qla_host_t *ha);
54250661Sdavidcsstatic void qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx);
55250661Sdavidcsstatic int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
56250661Sdavidcs	uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
57284741Sdavidcsstatic int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
58284741Sdavidcs	uint32_t num_intrs, uint32_t create);
59250661Sdavidcsstatic int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
60250661Sdavidcsstatic int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
61284741Sdavidcs	int tenable, int rcv);
62250661Sdavidcsstatic int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
63250661Sdavidcsstatic int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
64250661Sdavidcs
65250661Sdavidcsstatic int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
66250661Sdavidcs		uint8_t *hdr);
67250661Sdavidcsstatic int qla_hw_add_all_mcast(qla_host_t *ha);
68250661Sdavidcsstatic int qla_hw_del_all_mcast(qla_host_t *ha);
69284741Sdavidcsstatic int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds);
70250661Sdavidcs
71284741Sdavidcsstatic int qla_init_nic_func(qla_host_t *ha);
72284741Sdavidcsstatic int qla_stop_nic_func(qla_host_t *ha);
73284741Sdavidcsstatic int qla_query_fw_dcbx_caps(qla_host_t *ha);
74284741Sdavidcsstatic int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits);
75284741Sdavidcsstatic int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits);
76284741Sdavidcsstatic void qla_get_quick_stats(qla_host_t *ha);
77284741Sdavidcs
78305487Sdavidcsstatic void ql_minidump_free(qla_host_t *ha);
79250661Sdavidcs
80250661Sdavidcs
81250661Sdavidcsstatic int
82250661Sdavidcsqla_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
83250661Sdavidcs{
84250661Sdavidcs        int err = 0, ret;
85250661Sdavidcs        qla_host_t *ha;
86250661Sdavidcs	uint32_t i;
87250661Sdavidcs
88250661Sdavidcs        err = sysctl_handle_int(oidp, &ret, 0, req);
89250661Sdavidcs
90250661Sdavidcs        if (err || !req->newptr)
91250661Sdavidcs                return (err);
92250661Sdavidcs
93250661Sdavidcs        if (ret == 1) {
94250661Sdavidcs
95250661Sdavidcs                ha = (qla_host_t *)arg1;
96250661Sdavidcs
97250661Sdavidcs		for (i = 0; i < ha->hw.num_sds_rings; i++)
98250661Sdavidcs			device_printf(ha->pci_dev,
99250661Sdavidcs				"%s: sds_ring[%d] = %p\n", __func__,i,
100250661Sdavidcs				(void *)ha->hw.sds[i].intr_count);
101250661Sdavidcs
102250661Sdavidcs		for (i = 0; i < ha->hw.num_tx_rings; i++)
103250661Sdavidcs			device_printf(ha->pci_dev,
104250661Sdavidcs				"%s: tx[%d] = %p\n", __func__,i,
105250661Sdavidcs				(void *)ha->tx_ring[i].count);
106250661Sdavidcs
107250661Sdavidcs		for (i = 0; i < ha->hw.num_rds_rings; i++)
108250661Sdavidcs			device_printf(ha->pci_dev,
109250661Sdavidcs				"%s: rds_ring[%d] = %p\n", __func__,i,
110250661Sdavidcs				(void *)ha->hw.rds[i].count);
111250661Sdavidcs
112250661Sdavidcs		device_printf(ha->pci_dev, "%s: lro_pkt_count = %p\n", __func__,
113250661Sdavidcs			(void *)ha->lro_pkt_count);
114250661Sdavidcs
115250661Sdavidcs		device_printf(ha->pci_dev, "%s: lro_bytes = %p\n", __func__,
116250661Sdavidcs			(void *)ha->lro_bytes);
117284741Sdavidcs
118284741Sdavidcs#ifdef QL_ENABLE_ISCSI_TLV
119284741Sdavidcs		device_printf(ha->pci_dev, "%s: iscsi_pkts = %p\n", __func__,
120284741Sdavidcs			(void *)ha->hw.iscsi_pkt_count);
121284741Sdavidcs#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
122284741Sdavidcs
123250661Sdavidcs	}
124250661Sdavidcs	return (err);
125250661Sdavidcs}
126250661Sdavidcs
127284741Sdavidcsstatic int
128284741Sdavidcsqla_sysctl_get_quick_stats(SYSCTL_HANDLER_ARGS)
129284741Sdavidcs{
130284741Sdavidcs	int err, ret = 0;
131284741Sdavidcs	qla_host_t *ha;
132284741Sdavidcs
133284741Sdavidcs	err = sysctl_handle_int(oidp, &ret, 0, req);
134284741Sdavidcs
135284741Sdavidcs	if (err || !req->newptr)
136284741Sdavidcs		return (err);
137284741Sdavidcs
138284741Sdavidcs	if (ret == 1) {
139284741Sdavidcs		ha = (qla_host_t *)arg1;
140284741Sdavidcs		qla_get_quick_stats(ha);
141284741Sdavidcs	}
142284741Sdavidcs	return (err);
143284741Sdavidcs}
144284741Sdavidcs
145250661Sdavidcs#ifdef QL_DBG
146250661Sdavidcs
147250661Sdavidcsstatic void
148250661Sdavidcsqla_stop_pegs(qla_host_t *ha)
149250661Sdavidcs{
150250661Sdavidcs        uint32_t val = 1;
151250661Sdavidcs
152250661Sdavidcs        ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
153250661Sdavidcs        ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
154250661Sdavidcs        ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
155250661Sdavidcs        ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
156250661Sdavidcs        ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
157250661Sdavidcs        device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
158250661Sdavidcs}
159250661Sdavidcs
160250661Sdavidcsstatic int
161250661Sdavidcsqla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
162250661Sdavidcs{
163250661Sdavidcs	int err, ret = 0;
164250661Sdavidcs	qla_host_t *ha;
165250661Sdavidcs
166250661Sdavidcs	err = sysctl_handle_int(oidp, &ret, 0, req);
167250661Sdavidcs
168250661Sdavidcs
169250661Sdavidcs	if (err || !req->newptr)
170250661Sdavidcs		return (err);
171250661Sdavidcs
172250661Sdavidcs	if (ret == 1) {
173250661Sdavidcs		ha = (qla_host_t *)arg1;
174250661Sdavidcs		(void)QLA_LOCK(ha, __func__, 0);
175250661Sdavidcs		qla_stop_pegs(ha);
176250661Sdavidcs		QLA_UNLOCK(ha, __func__);
177250661Sdavidcs	}
178250661Sdavidcs
179250661Sdavidcs	return err;
180250661Sdavidcs}
181250661Sdavidcs#endif /* #ifdef QL_DBG */
182250661Sdavidcs
183284741Sdavidcsstatic int
184284741Sdavidcsqla_validate_set_port_cfg_bit(uint32_t bits)
185284741Sdavidcs{
186284741Sdavidcs        if ((bits & 0xF) > 1)
187284741Sdavidcs                return (-1);
188284741Sdavidcs
189284741Sdavidcs        if (((bits >> 4) & 0xF) > 2)
190284741Sdavidcs                return (-1);
191284741Sdavidcs
192284741Sdavidcs        if (((bits >> 8) & 0xF) > 2)
193284741Sdavidcs                return (-1);
194284741Sdavidcs
195284741Sdavidcs        return (0);
196284741Sdavidcs}
197284741Sdavidcs
198284741Sdavidcsstatic int
199284741Sdavidcsqla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
200284741Sdavidcs{
201284741Sdavidcs        int err, ret = 0;
202284741Sdavidcs        qla_host_t *ha;
203284741Sdavidcs        uint32_t cfg_bits;
204284741Sdavidcs
205284741Sdavidcs        err = sysctl_handle_int(oidp, &ret, 0, req);
206284741Sdavidcs
207284741Sdavidcs        if (err || !req->newptr)
208284741Sdavidcs                return (err);
209284741Sdavidcs
210284741Sdavidcs        if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
211284741Sdavidcs
212284741Sdavidcs                ha = (qla_host_t *)arg1;
213284741Sdavidcs
214284741Sdavidcs                err = qla_get_port_config(ha, &cfg_bits);
215284741Sdavidcs
216284741Sdavidcs                if (err)
217284741Sdavidcs                        goto qla_sysctl_set_port_cfg_exit;
218284741Sdavidcs
219284741Sdavidcs                if (ret & 0x1) {
220284741Sdavidcs                        cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE;
221284741Sdavidcs                } else {
222284741Sdavidcs                        cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE;
223284741Sdavidcs                }
224284741Sdavidcs
225284741Sdavidcs                ret = ret >> 4;
226284741Sdavidcs                cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK;
227284741Sdavidcs
228284741Sdavidcs                if ((ret & 0xF) == 0) {
229284741Sdavidcs                        cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED;
230284741Sdavidcs                } else if ((ret & 0xF) == 1){
231284741Sdavidcs                        cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD;
232284741Sdavidcs                } else {
233284741Sdavidcs                        cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM;
234284741Sdavidcs                }
235284741Sdavidcs
236284741Sdavidcs                ret = ret >> 4;
237284741Sdavidcs                cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK;
238284741Sdavidcs
239284741Sdavidcs                if (ret == 0) {
240284741Sdavidcs                        cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV;
241284741Sdavidcs                } else if (ret == 1){
242284741Sdavidcs                        cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT;
243284741Sdavidcs                } else {
244284741Sdavidcs                        cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV;
245284741Sdavidcs                }
246284741Sdavidcs
247284741Sdavidcs                err = qla_set_port_config(ha, cfg_bits);
248284741Sdavidcs        } else {
249284741Sdavidcs                ha = (qla_host_t *)arg1;
250284741Sdavidcs
251284741Sdavidcs                err = qla_get_port_config(ha, &cfg_bits);
252284741Sdavidcs        }
253284741Sdavidcs
254284741Sdavidcsqla_sysctl_set_port_cfg_exit:
255284741Sdavidcs        return err;
256284741Sdavidcs}
257284741Sdavidcs
258250661Sdavidcs/*
259250661Sdavidcs * Name: ql_hw_add_sysctls
260250661Sdavidcs * Function: Add P3Plus specific sysctls
261250661Sdavidcs */
262250661Sdavidcsvoid
263250661Sdavidcsql_hw_add_sysctls(qla_host_t *ha)
264250661Sdavidcs{
265250661Sdavidcs        device_t	dev;
266250661Sdavidcs
267250661Sdavidcs        dev = ha->pci_dev;
268250661Sdavidcs
269250661Sdavidcs	ha->hw.num_sds_rings = MAX_SDS_RINGS;
270250661Sdavidcs	ha->hw.num_rds_rings = MAX_RDS_RINGS;
271250661Sdavidcs	ha->hw.num_tx_rings = NUM_TX_RINGS;
272250661Sdavidcs
273250661Sdavidcs	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
274250661Sdavidcs		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
275250661Sdavidcs		OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
276250661Sdavidcs		ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
277250661Sdavidcs
278250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
279250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
280250661Sdavidcs                OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
281250661Sdavidcs		ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
282250661Sdavidcs
283250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
284250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
285250661Sdavidcs                OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
286250661Sdavidcs		ha->hw.num_tx_rings, "Number of Transmit Rings");
287250661Sdavidcs
288250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
289250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
290250661Sdavidcs                OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
291250661Sdavidcs		ha->txr_idx, "Tx Ring Used");
292250661Sdavidcs
293250661Sdavidcs	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
294250661Sdavidcs		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
295250661Sdavidcs		OID_AUTO, "drvr_stats", CTLTYPE_INT | CTLFLAG_RW,
296250661Sdavidcs		(void *)ha, 0,
297250661Sdavidcs		qla_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics");
298250661Sdavidcs
299284741Sdavidcs        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
300284741Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
301284741Sdavidcs                OID_AUTO, "quick_stats", CTLTYPE_INT | CTLFLAG_RW,
302284741Sdavidcs                (void *)ha, 0,
303284741Sdavidcs                qla_sysctl_get_quick_stats, "I", "Quick Statistics");
304284741Sdavidcs
305250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
306250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
307250661Sdavidcs                OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
308250661Sdavidcs		ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
309250661Sdavidcs
310250661Sdavidcs	ha->hw.sds_cidx_thres = 32;
311250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
312250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
313250661Sdavidcs                OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
314250661Sdavidcs		ha->hw.sds_cidx_thres,
315250661Sdavidcs		"Number of SDS entries to process before updating"
316250661Sdavidcs		" SDS Ring Consumer Index");
317250661Sdavidcs
318250661Sdavidcs	ha->hw.rds_pidx_thres = 32;
319250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
320250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
321250661Sdavidcs                OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
322250661Sdavidcs		ha->hw.rds_pidx_thres,
323250661Sdavidcs		"Number of Rcv Rings Entries to post before updating"
324250661Sdavidcs		" RDS Ring Producer Index");
325250661Sdavidcs
326284741Sdavidcs        ha->hw.rcv_intr_coalesce = (3 << 16) | 256;
327284741Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
328284741Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
329284741Sdavidcs                OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW,
330284741Sdavidcs                &ha->hw.rcv_intr_coalesce,
331284741Sdavidcs                ha->hw.rcv_intr_coalesce,
332284741Sdavidcs                "Rcv Intr Coalescing Parameters\n"
333284741Sdavidcs                "\tbits 15:0 max packets\n"
334284741Sdavidcs                "\tbits 31:16 max micro-seconds to wait\n"
335284741Sdavidcs                "\tplease run\n"
336284741Sdavidcs                "\tifconfig <if> down && ifconfig <if> up\n"
337284741Sdavidcs                "\tto take effect \n");
338258155Sdavidcs
339284741Sdavidcs        ha->hw.xmt_intr_coalesce = (64 << 16) | 64;
340284741Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
341284741Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
342284741Sdavidcs                OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW,
343284741Sdavidcs                &ha->hw.xmt_intr_coalesce,
344284741Sdavidcs                ha->hw.xmt_intr_coalesce,
345284741Sdavidcs                "Xmt Intr Coalescing Parameters\n"
346284741Sdavidcs                "\tbits 15:0 max packets\n"
347284741Sdavidcs                "\tbits 31:16 max micro-seconds to wait\n"
348284741Sdavidcs                "\tplease run\n"
349284741Sdavidcs                "\tifconfig <if> down && ifconfig <if> up\n"
350284741Sdavidcs                "\tto take effect \n");
351284741Sdavidcs
352284741Sdavidcs        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
353284741Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
354284741Sdavidcs                OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW,
355284741Sdavidcs                (void *)ha, 0,
356284741Sdavidcs                qla_sysctl_port_cfg, "I",
357284741Sdavidcs                        "Set Port Configuration if values below "
358284741Sdavidcs                        "otherwise Get Port Configuration\n"
359284741Sdavidcs                        "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n"
360284741Sdavidcs                        "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n"
361284741Sdavidcs                        "\tBits 8-11: std pause cfg; 0 = xmt and rcv;"
362284741Sdavidcs                        " 1 = xmt only; 2 = rcv only;\n"
363284741Sdavidcs                );
364284741Sdavidcs
365284741Sdavidcs        ha->hw.enable_9kb = 1;
366284741Sdavidcs
367284741Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
368284741Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
369284741Sdavidcs                OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb,
370284741Sdavidcs                ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000");
371284741Sdavidcs
372250661Sdavidcs	ha->hw.mdump_active = 0;
373250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
374250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
375250661Sdavidcs                OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
376250661Sdavidcs		ha->hw.mdump_active,
377305487Sdavidcs		"Minidump retrieval is Active");
378250661Sdavidcs
379305487Sdavidcs	ha->hw.mdump_done = 0;
380250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
381250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
382305487Sdavidcs                OID_AUTO, "mdump_done", CTLFLAG_RW,
383305487Sdavidcs		&ha->hw.mdump_done, ha->hw.mdump_done,
384305487Sdavidcs		"Minidump has been done and available for retrieval");
385305487Sdavidcs
386305487Sdavidcs	ha->hw.mdump_capture_mask = 0xF;
387305487Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
388305487Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
389305487Sdavidcs                OID_AUTO, "minidump_capture_mask", CTLFLAG_RW,
390305487Sdavidcs		&ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask,
391305487Sdavidcs		"Minidump capture mask");
392250661Sdavidcs#ifdef QL_DBG
393250661Sdavidcs
394289635Sdavidcs	ha->err_inject = 0;
395250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
396250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
397250661Sdavidcs                OID_AUTO, "err_inject",
398250661Sdavidcs                CTLFLAG_RW, &ha->err_inject, ha->err_inject,
399250661Sdavidcs                "Error to be injected\n"
400250661Sdavidcs                "\t\t\t 0: No Errors\n"
401250661Sdavidcs                "\t\t\t 1: rcv: rxb struct invalid\n"
402250661Sdavidcs                "\t\t\t 2: rcv: mp == NULL\n"
403250661Sdavidcs                "\t\t\t 3: lro: rxb struct invalid\n"
404250661Sdavidcs                "\t\t\t 4: lro: mp == NULL\n"
405250661Sdavidcs                "\t\t\t 5: rcv: num handles invalid\n"
406250661Sdavidcs                "\t\t\t 6: reg: indirect reg rd_wr failure\n"
407250661Sdavidcs                "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
408250661Sdavidcs                "\t\t\t 8: mbx: mailbox command failure\n"
409250661Sdavidcs                "\t\t\t 9: heartbeat failure\n"
410250661Sdavidcs                "\t\t\t A: temperature failure\n" );
411250661Sdavidcs
412250661Sdavidcs	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
413250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
414250661Sdavidcs                OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW,
415250661Sdavidcs                (void *)ha, 0,
416250661Sdavidcs                qla_sysctl_stop_pegs, "I", "Peg Stop");
417250661Sdavidcs
418250661Sdavidcs#endif /* #ifdef QL_DBG */
419250661Sdavidcs
420284741Sdavidcs        ha->hw.user_pri_nic = 0;
421284741Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
422284741Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
423284741Sdavidcs                OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic,
424284741Sdavidcs                ha->hw.user_pri_nic,
425284741Sdavidcs                "VLAN Tag User Priority for Normal Ethernet Packets");
426284741Sdavidcs
427284741Sdavidcs        ha->hw.user_pri_iscsi = 4;
428284741Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
429284741Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
430284741Sdavidcs                OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi,
431284741Sdavidcs                ha->hw.user_pri_iscsi,
432284741Sdavidcs                "VLAN Tag User Priority for iSCSI Packets");
433284741Sdavidcs
434250661Sdavidcs}
435250661Sdavidcs
436250661Sdavidcsvoid
437250661Sdavidcsql_hw_link_status(qla_host_t *ha)
438250661Sdavidcs{
439250661Sdavidcs	device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
440250661Sdavidcs
441250661Sdavidcs	if (ha->hw.link_up) {
442250661Sdavidcs		device_printf(ha->pci_dev, "link Up\n");
443250661Sdavidcs	} else {
444250661Sdavidcs		device_printf(ha->pci_dev, "link Down\n");
445250661Sdavidcs	}
446250661Sdavidcs
447250661Sdavidcs	if (ha->hw.flags.fduplex) {
448250661Sdavidcs		device_printf(ha->pci_dev, "Full Duplex\n");
449250661Sdavidcs	} else {
450250661Sdavidcs		device_printf(ha->pci_dev, "Half Duplex\n");
451250661Sdavidcs	}
452250661Sdavidcs
453250661Sdavidcs	if (ha->hw.flags.autoneg) {
454250661Sdavidcs		device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
455250661Sdavidcs	} else {
456250661Sdavidcs		device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
457250661Sdavidcs	}
458250661Sdavidcs
459250661Sdavidcs	switch (ha->hw.link_speed) {
460250661Sdavidcs	case 0x710:
461250661Sdavidcs		device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
462250661Sdavidcs		break;
463250661Sdavidcs
464250661Sdavidcs	case 0x3E8:
465250661Sdavidcs		device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
466250661Sdavidcs		break;
467250661Sdavidcs
468250661Sdavidcs	case 0x64:
469250661Sdavidcs		device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
470250661Sdavidcs		break;
471250661Sdavidcs
472250661Sdavidcs	default:
473250661Sdavidcs		device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
474250661Sdavidcs		break;
475250661Sdavidcs	}
476250661Sdavidcs
477250661Sdavidcs	switch (ha->hw.module_type) {
478250661Sdavidcs
479250661Sdavidcs	case 0x01:
480250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
481250661Sdavidcs		break;
482250661Sdavidcs
483250661Sdavidcs	case 0x02:
484250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
485250661Sdavidcs		break;
486250661Sdavidcs
487250661Sdavidcs	case 0x03:
488250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
489250661Sdavidcs		break;
490250661Sdavidcs
491250661Sdavidcs	case 0x04:
492250661Sdavidcs		device_printf(ha->pci_dev,
493250661Sdavidcs			"Module Type 10GE Passive Copper(Compliant)[%d m]\n",
494250661Sdavidcs			ha->hw.cable_length);
495250661Sdavidcs		break;
496250661Sdavidcs
497250661Sdavidcs	case 0x05:
498250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 10GE Active"
499250661Sdavidcs			" Limiting Copper(Compliant)[%d m]\n",
500250661Sdavidcs			ha->hw.cable_length);
501250661Sdavidcs		break;
502250661Sdavidcs
503250661Sdavidcs	case 0x06:
504250661Sdavidcs		device_printf(ha->pci_dev,
505250661Sdavidcs			"Module Type 10GE Passive Copper"
506250661Sdavidcs			" (Legacy, Best Effort)[%d m]\n",
507250661Sdavidcs			ha->hw.cable_length);
508250661Sdavidcs		break;
509250661Sdavidcs
510250661Sdavidcs	case 0x07:
511250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
512250661Sdavidcs		break;
513250661Sdavidcs
514250661Sdavidcs	case 0x08:
515250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
516250661Sdavidcs		break;
517250661Sdavidcs
518250661Sdavidcs	case 0x09:
519250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
520250661Sdavidcs		break;
521250661Sdavidcs
522250661Sdavidcs	case 0x0A:
523250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
524250661Sdavidcs		break;
525250661Sdavidcs
526250661Sdavidcs	case 0x0B:
527250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
528250661Sdavidcs			"(Legacy, Best Effort)\n");
529250661Sdavidcs		break;
530250661Sdavidcs
531250661Sdavidcs	default:
532250661Sdavidcs		device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
533250661Sdavidcs			ha->hw.module_type);
534250661Sdavidcs		break;
535250661Sdavidcs	}
536250661Sdavidcs
537250661Sdavidcs	if (ha->hw.link_faults == 1)
538250661Sdavidcs		device_printf(ha->pci_dev, "SFP Power Fault\n");
539250661Sdavidcs}
540250661Sdavidcs
541250661Sdavidcs/*
542250661Sdavidcs * Name: ql_free_dma
543250661Sdavidcs * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
544250661Sdavidcs */
545250661Sdavidcsvoid
546250661Sdavidcsql_free_dma(qla_host_t *ha)
547250661Sdavidcs{
548250661Sdavidcs	uint32_t i;
549250661Sdavidcs
550250661Sdavidcs        if (ha->hw.dma_buf.flags.sds_ring) {
551250661Sdavidcs		for (i = 0; i < ha->hw.num_sds_rings; i++) {
552250661Sdavidcs			ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
553250661Sdavidcs		}
554250661Sdavidcs        	ha->hw.dma_buf.flags.sds_ring = 0;
555250661Sdavidcs	}
556250661Sdavidcs
557250661Sdavidcs        if (ha->hw.dma_buf.flags.rds_ring) {
558250661Sdavidcs		for (i = 0; i < ha->hw.num_rds_rings; i++) {
559250661Sdavidcs			ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
560250661Sdavidcs		}
561250661Sdavidcs        	ha->hw.dma_buf.flags.rds_ring = 0;
562250661Sdavidcs	}
563250661Sdavidcs
564250661Sdavidcs        if (ha->hw.dma_buf.flags.tx_ring) {
565250661Sdavidcs		ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
566250661Sdavidcs        	ha->hw.dma_buf.flags.tx_ring = 0;
567250661Sdavidcs	}
568305487Sdavidcs	ql_minidump_free(ha);
569250661Sdavidcs}
570250661Sdavidcs
571250661Sdavidcs/*
572250661Sdavidcs * Name: ql_alloc_dma
573250661Sdavidcs * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
574250661Sdavidcs */
575250661Sdavidcsint
576250661Sdavidcsql_alloc_dma(qla_host_t *ha)
577250661Sdavidcs{
578250661Sdavidcs        device_t                dev;
579250661Sdavidcs	uint32_t		i, j, size, tx_ring_size;
580250661Sdavidcs	qla_hw_t		*hw;
581250661Sdavidcs	qla_hw_tx_cntxt_t	*tx_cntxt;
582250661Sdavidcs	uint8_t			*vaddr;
583250661Sdavidcs	bus_addr_t		paddr;
584250661Sdavidcs
585250661Sdavidcs        dev = ha->pci_dev;
586250661Sdavidcs
587250661Sdavidcs        QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
588250661Sdavidcs
589250661Sdavidcs	hw = &ha->hw;
590250661Sdavidcs	/*
591250661Sdavidcs	 * Allocate Transmit Ring
592250661Sdavidcs	 */
593250661Sdavidcs	tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
594250661Sdavidcs	size = (tx_ring_size * ha->hw.num_tx_rings);
595250661Sdavidcs
596250661Sdavidcs	hw->dma_buf.tx_ring.alignment = 8;
597250661Sdavidcs	hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
598250661Sdavidcs
599250661Sdavidcs        if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
600250661Sdavidcs                device_printf(dev, "%s: tx ring alloc failed\n", __func__);
601250661Sdavidcs                goto ql_alloc_dma_exit;
602250661Sdavidcs        }
603250661Sdavidcs
604250661Sdavidcs	vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
605250661Sdavidcs	paddr = hw->dma_buf.tx_ring.dma_addr;
606250661Sdavidcs
607250661Sdavidcs	for (i = 0; i < ha->hw.num_tx_rings; i++) {
608250661Sdavidcs		tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
609250661Sdavidcs
610250661Sdavidcs		tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
611250661Sdavidcs		tx_cntxt->tx_ring_paddr = paddr;
612250661Sdavidcs
613250661Sdavidcs		vaddr += tx_ring_size;
614250661Sdavidcs		paddr += tx_ring_size;
615250661Sdavidcs	}
616250661Sdavidcs
617250661Sdavidcs	for (i = 0; i < ha->hw.num_tx_rings; i++) {
618250661Sdavidcs		tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
619250661Sdavidcs
620250661Sdavidcs		tx_cntxt->tx_cons = (uint32_t *)vaddr;
621250661Sdavidcs		tx_cntxt->tx_cons_paddr = paddr;
622250661Sdavidcs
623250661Sdavidcs		vaddr += sizeof (uint32_t);
624250661Sdavidcs		paddr += sizeof (uint32_t);
625250661Sdavidcs	}
626250661Sdavidcs
627250661Sdavidcs        ha->hw.dma_buf.flags.tx_ring = 1;
628250661Sdavidcs
629250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
630250661Sdavidcs		__func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
631250661Sdavidcs		hw->dma_buf.tx_ring.dma_b));
632250661Sdavidcs	/*
633250661Sdavidcs	 * Allocate Receive Descriptor Rings
634250661Sdavidcs	 */
635250661Sdavidcs
636250661Sdavidcs	for (i = 0; i < hw->num_rds_rings; i++) {
637250661Sdavidcs
638250661Sdavidcs		hw->dma_buf.rds_ring[i].alignment = 8;
639250661Sdavidcs		hw->dma_buf.rds_ring[i].size =
640250661Sdavidcs			(sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
641250661Sdavidcs
642250661Sdavidcs		if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
643250661Sdavidcs			device_printf(dev, "%s: rds ring[%d] alloc failed\n",
644250661Sdavidcs				__func__, i);
645250661Sdavidcs
646250661Sdavidcs			for (j = 0; j < i; j++)
647250661Sdavidcs				ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
648250661Sdavidcs
649250661Sdavidcs			goto ql_alloc_dma_exit;
650250661Sdavidcs		}
651250661Sdavidcs		QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
652250661Sdavidcs			__func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
653250661Sdavidcs			hw->dma_buf.rds_ring[i].dma_b));
654250661Sdavidcs	}
655250661Sdavidcs
656250661Sdavidcs	hw->dma_buf.flags.rds_ring = 1;
657250661Sdavidcs
658250661Sdavidcs	/*
659250661Sdavidcs	 * Allocate Status Descriptor Rings
660250661Sdavidcs	 */
661250661Sdavidcs
662250661Sdavidcs	for (i = 0; i < hw->num_sds_rings; i++) {
663250661Sdavidcs		hw->dma_buf.sds_ring[i].alignment = 8;
664250661Sdavidcs		hw->dma_buf.sds_ring[i].size =
665250661Sdavidcs			(sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
666250661Sdavidcs
667250661Sdavidcs		if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
668250661Sdavidcs			device_printf(dev, "%s: sds ring alloc failed\n",
669250661Sdavidcs				__func__);
670250661Sdavidcs
671250661Sdavidcs			for (j = 0; j < i; j++)
672250661Sdavidcs				ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
673250661Sdavidcs
674250661Sdavidcs			goto ql_alloc_dma_exit;
675250661Sdavidcs		}
676250661Sdavidcs		QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
677250661Sdavidcs			__func__, i,
678250661Sdavidcs			(void *)(hw->dma_buf.sds_ring[i].dma_addr),
679250661Sdavidcs			hw->dma_buf.sds_ring[i].dma_b));
680250661Sdavidcs	}
681250661Sdavidcs	for (i = 0; i < hw->num_sds_rings; i++) {
682250661Sdavidcs		hw->sds[i].sds_ring_base =
683250661Sdavidcs			(q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
684250661Sdavidcs	}
685250661Sdavidcs
686250661Sdavidcs	hw->dma_buf.flags.sds_ring = 1;
687250661Sdavidcs
688250661Sdavidcs	return 0;
689250661Sdavidcs
690250661Sdavidcsql_alloc_dma_exit:
691250661Sdavidcs	ql_free_dma(ha);
692250661Sdavidcs	return -1;
693250661Sdavidcs}
694250661Sdavidcs
695250661Sdavidcs#define Q8_MBX_MSEC_DELAY	5000
696250661Sdavidcs
697250661Sdavidcsstatic int
698250661Sdavidcsqla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
699250661Sdavidcs	uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
700250661Sdavidcs{
701250661Sdavidcs	uint32_t i;
702250661Sdavidcs	uint32_t data;
703250661Sdavidcs	int ret = 0;
704250661Sdavidcs
705250661Sdavidcs	if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) {
706250661Sdavidcs		ret = -3;
707250661Sdavidcs		ha->qla_initiate_recovery = 1;
708250661Sdavidcs		goto exit_qla_mbx_cmd;
709250661Sdavidcs	}
710250661Sdavidcs
711250661Sdavidcs	if (no_pause)
712250661Sdavidcs		i = 1000;
713250661Sdavidcs	else
714250661Sdavidcs		i = Q8_MBX_MSEC_DELAY;
715250661Sdavidcs
716250661Sdavidcs	while (i) {
717250661Sdavidcs		data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
718250661Sdavidcs		if (data == 0)
719250661Sdavidcs			break;
720250661Sdavidcs		if (no_pause) {
721250661Sdavidcs			DELAY(1000);
722250661Sdavidcs		} else {
723250661Sdavidcs			qla_mdelay(__func__, 1);
724250661Sdavidcs		}
725250661Sdavidcs		i--;
726250661Sdavidcs	}
727250661Sdavidcs
728250661Sdavidcs	if (i == 0) {
729250661Sdavidcs		device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
730250661Sdavidcs			__func__, data);
731250661Sdavidcs		ret = -1;
732250661Sdavidcs		ha->qla_initiate_recovery = 1;
733250661Sdavidcs		goto exit_qla_mbx_cmd;
734250661Sdavidcs	}
735250661Sdavidcs
736250661Sdavidcs	for (i = 0; i < n_hmbox; i++) {
737250661Sdavidcs		WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
738250661Sdavidcs		h_mbox++;
739250661Sdavidcs	}
740250661Sdavidcs
741250661Sdavidcs	WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
742250661Sdavidcs
743250661Sdavidcs
744250661Sdavidcs	i = Q8_MBX_MSEC_DELAY;
745250661Sdavidcs	while (i) {
746250661Sdavidcs		data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
747250661Sdavidcs
748250661Sdavidcs		if ((data & 0x3) == 1) {
749250661Sdavidcs			data = READ_REG32(ha, Q8_FW_MBOX0);
750250661Sdavidcs			if ((data & 0xF000) != 0x8000)
751250661Sdavidcs				break;
752250661Sdavidcs		}
753250661Sdavidcs		if (no_pause) {
754250661Sdavidcs			DELAY(1000);
755250661Sdavidcs		} else {
756250661Sdavidcs			qla_mdelay(__func__, 1);
757250661Sdavidcs		}
758250661Sdavidcs		i--;
759250661Sdavidcs	}
760250661Sdavidcs	if (i == 0) {
761250661Sdavidcs		device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
762250661Sdavidcs			__func__, data);
763250661Sdavidcs		ret = -2;
764250661Sdavidcs		ha->qla_initiate_recovery = 1;
765250661Sdavidcs		goto exit_qla_mbx_cmd;
766250661Sdavidcs	}
767250661Sdavidcs
768250661Sdavidcs	for (i = 0; i < n_fwmbox; i++) {
769250661Sdavidcs		*fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
770250661Sdavidcs	}
771250661Sdavidcs
772250661Sdavidcs	WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
773250661Sdavidcs	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
774250661Sdavidcs
775250661Sdavidcsexit_qla_mbx_cmd:
776250661Sdavidcs	return (ret);
777250661Sdavidcs}
778250661Sdavidcs
779284741Sdavidcsint
780284741Sdavidcsqla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
781284741Sdavidcs	uint32_t *num_rcvq)
782250661Sdavidcs{
783250661Sdavidcs	uint32_t *mbox, err;
784250661Sdavidcs	device_t dev = ha->pci_dev;
785250661Sdavidcs
786250661Sdavidcs	bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
787250661Sdavidcs
788250661Sdavidcs	mbox = ha->hw.mbox;
789250661Sdavidcs
790250661Sdavidcs	mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29);
791250661Sdavidcs
792250661Sdavidcs	if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
793250661Sdavidcs		device_printf(dev, "%s: failed0\n", __func__);
794250661Sdavidcs		return (-1);
795250661Sdavidcs	}
796250661Sdavidcs	err = mbox[0] >> 25;
797250661Sdavidcs
798284741Sdavidcs	if (supports_9kb != NULL) {
799284741Sdavidcs		if (mbox[16] & 0x80) /* bit 7 of mbox 16 */
800284741Sdavidcs			*supports_9kb = 1;
801284741Sdavidcs		else
802284741Sdavidcs			*supports_9kb = 0;
803284741Sdavidcs	}
804284741Sdavidcs
805284741Sdavidcs	if (num_rcvq != NULL)
806284741Sdavidcs		*num_rcvq =  ((mbox[6] >> 16) & 0xFFFF);
807284741Sdavidcs
808250661Sdavidcs	if ((err != 1) && (err != 0)) {
809250661Sdavidcs		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
810250661Sdavidcs		return (-1);
811250661Sdavidcs	}
812250661Sdavidcs	return 0;
813250661Sdavidcs}
814250661Sdavidcs
815250661Sdavidcsstatic int
816284741Sdavidcsqla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs,
817284741Sdavidcs	uint32_t create)
818250661Sdavidcs{
819250661Sdavidcs	uint32_t i, err;
820250661Sdavidcs	device_t dev = ha->pci_dev;
821250661Sdavidcs	q80_config_intr_t *c_intr;
822250661Sdavidcs	q80_config_intr_rsp_t *c_intr_rsp;
823250661Sdavidcs
824250661Sdavidcs	c_intr = (q80_config_intr_t *)ha->hw.mbox;
825250661Sdavidcs	bzero(c_intr, (sizeof (q80_config_intr_t)));
826250661Sdavidcs
827250661Sdavidcs	c_intr->opcode = Q8_MBX_CONFIG_INTR;
828250661Sdavidcs
829250661Sdavidcs	c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
830250661Sdavidcs	c_intr->count_version |= Q8_MBX_CMD_VERSION;
831250661Sdavidcs
832250661Sdavidcs	c_intr->nentries = num_intrs;
833250661Sdavidcs
834250661Sdavidcs	for (i = 0; i < num_intrs; i++) {
835250661Sdavidcs		if (create) {
836250661Sdavidcs			c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
837284741Sdavidcs			c_intr->intr[i].msix_index = start_idx + 1 + i;
838250661Sdavidcs		} else {
839250661Sdavidcs			c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
840284741Sdavidcs			c_intr->intr[i].msix_index =
841284741Sdavidcs				ha->hw.intr_id[(start_idx + i)];
842250661Sdavidcs		}
843250661Sdavidcs
844250661Sdavidcs		c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
845250661Sdavidcs	}
846250661Sdavidcs
847250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
848250661Sdavidcs		(sizeof (q80_config_intr_t) >> 2),
849250661Sdavidcs		ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
850250661Sdavidcs		device_printf(dev, "%s: failed0\n", __func__);
851250661Sdavidcs		return (-1);
852250661Sdavidcs	}
853250661Sdavidcs
854250661Sdavidcs	c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
855250661Sdavidcs
856250661Sdavidcs	err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
857250661Sdavidcs
858250661Sdavidcs	if (err) {
859250661Sdavidcs		device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err,
860250661Sdavidcs			c_intr_rsp->nentries);
861250661Sdavidcs
862250661Sdavidcs		for (i = 0; i < c_intr_rsp->nentries; i++) {
863250661Sdavidcs			device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
864250661Sdavidcs				__func__, i,
865250661Sdavidcs				c_intr_rsp->intr[i].status,
866250661Sdavidcs				c_intr_rsp->intr[i].intr_id,
867250661Sdavidcs				c_intr_rsp->intr[i].intr_src);
868250661Sdavidcs		}
869250661Sdavidcs
870250661Sdavidcs		return (-1);
871250661Sdavidcs	}
872250661Sdavidcs
873250661Sdavidcs	for (i = 0; ((i < num_intrs) && create); i++) {
874250661Sdavidcs		if (!c_intr_rsp->intr[i].status) {
875284741Sdavidcs			ha->hw.intr_id[(start_idx + i)] =
876284741Sdavidcs				c_intr_rsp->intr[i].intr_id;
877284741Sdavidcs			ha->hw.intr_src[(start_idx + i)] =
878284741Sdavidcs				c_intr_rsp->intr[i].intr_src;
879250661Sdavidcs		}
880250661Sdavidcs	}
881250661Sdavidcs
882250661Sdavidcs	return (0);
883250661Sdavidcs}
884250661Sdavidcs
885250661Sdavidcs/*
886250661Sdavidcs * Name: qla_config_rss
887250661Sdavidcs * Function: Configure RSS for the context/interface.
888250661Sdavidcs */
889250661Sdavidcsstatic const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
890250661Sdavidcs			0x8030f20c77cb2da3ULL,
891250661Sdavidcs			0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
892250661Sdavidcs			0x255b0ec26d5a56daULL };
893250661Sdavidcs
894250661Sdavidcsstatic int
895250661Sdavidcsqla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
896250661Sdavidcs{
897250661Sdavidcs	q80_config_rss_t	*c_rss;
898250661Sdavidcs	q80_config_rss_rsp_t	*c_rss_rsp;
899250661Sdavidcs	uint32_t		err, i;
900250661Sdavidcs	device_t		dev = ha->pci_dev;
901250661Sdavidcs
902250661Sdavidcs	c_rss = (q80_config_rss_t *)ha->hw.mbox;
903250661Sdavidcs	bzero(c_rss, (sizeof (q80_config_rss_t)));
904250661Sdavidcs
905250661Sdavidcs	c_rss->opcode = Q8_MBX_CONFIG_RSS;
906250661Sdavidcs
907250661Sdavidcs	c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
908250661Sdavidcs	c_rss->count_version |= Q8_MBX_CMD_VERSION;
909250661Sdavidcs
910250661Sdavidcs	c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
911250661Sdavidcs				Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
912284741Sdavidcs	//c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP |
913284741Sdavidcs	//			Q8_MBX_RSS_HASH_TYPE_IPV6_TCP);
914250661Sdavidcs
915250661Sdavidcs	c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
916250661Sdavidcs	c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
917250661Sdavidcs
918250661Sdavidcs	c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
919250661Sdavidcs
920250661Sdavidcs	c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
921250661Sdavidcs	c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
922250661Sdavidcs
923250661Sdavidcs	c_rss->cntxt_id = cntxt_id;
924250661Sdavidcs
925250661Sdavidcs	for (i = 0; i < 5; i++) {
926250661Sdavidcs		c_rss->rss_key[i] = rss_key[i];
927250661Sdavidcs	}
928250661Sdavidcs
929250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
930250661Sdavidcs		(sizeof (q80_config_rss_t) >> 2),
931250661Sdavidcs		ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
932250661Sdavidcs		device_printf(dev, "%s: failed0\n", __func__);
933250661Sdavidcs		return (-1);
934250661Sdavidcs	}
935250661Sdavidcs	c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
936250661Sdavidcs
937250661Sdavidcs	err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
938250661Sdavidcs
939250661Sdavidcs	if (err) {
940250661Sdavidcs		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
941250661Sdavidcs		return (-1);
942250661Sdavidcs	}
943250661Sdavidcs	return 0;
944250661Sdavidcs}
945250661Sdavidcs
946250661Sdavidcsstatic int
947250661Sdavidcsqla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
948250661Sdavidcs        uint16_t cntxt_id, uint8_t *ind_table)
949250661Sdavidcs{
950250661Sdavidcs        q80_config_rss_ind_table_t      *c_rss_ind;
951250661Sdavidcs        q80_config_rss_ind_table_rsp_t  *c_rss_ind_rsp;
952250661Sdavidcs        uint32_t                        err;
953250661Sdavidcs        device_t                        dev = ha->pci_dev;
954250661Sdavidcs
955250661Sdavidcs	if ((count > Q8_RSS_IND_TBL_SIZE) ||
956250661Sdavidcs		((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
957250661Sdavidcs		device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
958250661Sdavidcs			start_idx, count);
959250661Sdavidcs		return (-1);
960250661Sdavidcs	}
961250661Sdavidcs
962250661Sdavidcs        c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
963250661Sdavidcs        bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
964250661Sdavidcs
965250661Sdavidcs        c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
966250661Sdavidcs        c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
967250661Sdavidcs        c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
968250661Sdavidcs
969250661Sdavidcs	c_rss_ind->start_idx = start_idx;
970250661Sdavidcs	c_rss_ind->end_idx = start_idx + count - 1;
971250661Sdavidcs	c_rss_ind->cntxt_id = cntxt_id;
972250661Sdavidcs	bcopy(ind_table, c_rss_ind->ind_table, count);
973250661Sdavidcs
974250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
975250661Sdavidcs		(sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
976250661Sdavidcs		(sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
977250661Sdavidcs		device_printf(dev, "%s: failed0\n", __func__);
978250661Sdavidcs		return (-1);
979250661Sdavidcs	}
980250661Sdavidcs
981250661Sdavidcs	c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
982250661Sdavidcs	err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
983250661Sdavidcs
984250661Sdavidcs	if (err) {
985250661Sdavidcs		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
986250661Sdavidcs		return (-1);
987250661Sdavidcs	}
988250661Sdavidcs	return 0;
989250661Sdavidcs}
990250661Sdavidcs
991250661Sdavidcs/*
992250661Sdavidcs * Name: qla_config_intr_coalesce
993250661Sdavidcs * Function: Configure Interrupt Coalescing.
994250661Sdavidcs */
995250661Sdavidcsstatic int
996284741Sdavidcsqla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
997284741Sdavidcs	int rcv)
998250661Sdavidcs{
999250661Sdavidcs	q80_config_intr_coalesc_t	*intrc;
1000250661Sdavidcs	q80_config_intr_coalesc_rsp_t	*intrc_rsp;
1001250661Sdavidcs	uint32_t			err, i;
1002250661Sdavidcs	device_t			dev = ha->pci_dev;
1003250661Sdavidcs
1004250661Sdavidcs	intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
1005250661Sdavidcs	bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
1006250661Sdavidcs
1007250661Sdavidcs	intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
1008250661Sdavidcs	intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
1009250661Sdavidcs	intrc->count_version |= Q8_MBX_CMD_VERSION;
1010250661Sdavidcs
1011284741Sdavidcs	if (rcv) {
1012284741Sdavidcs		intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
1013284741Sdavidcs		intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF;
1014284741Sdavidcs		intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF;
1015284741Sdavidcs	} else {
1016284741Sdavidcs		intrc->flags = Q8_MBX_INTRC_FLAGS_XMT;
1017284741Sdavidcs		intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF;
1018284741Sdavidcs		intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF;
1019284741Sdavidcs	}
1020284741Sdavidcs
1021250661Sdavidcs	intrc->cntxt_id = cntxt_id;
1022250661Sdavidcs
1023250661Sdavidcs	if (tenable) {
1024250661Sdavidcs		intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
1025250661Sdavidcs		intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
1026250661Sdavidcs
1027250661Sdavidcs		for (i = 0; i < ha->hw.num_sds_rings; i++) {
1028250661Sdavidcs			intrc->sds_ring_mask |= (1 << i);
1029250661Sdavidcs		}
1030250661Sdavidcs		intrc->ms_timeout = 1000;
1031250661Sdavidcs	}
1032250661Sdavidcs
1033250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *)intrc,
1034250661Sdavidcs		(sizeof (q80_config_intr_coalesc_t) >> 2),
1035250661Sdavidcs		ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
1036250661Sdavidcs		device_printf(dev, "%s: failed0\n", __func__);
1037250661Sdavidcs		return (-1);
1038250661Sdavidcs	}
1039250661Sdavidcs	intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
1040250661Sdavidcs
1041250661Sdavidcs	err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
1042250661Sdavidcs
1043250661Sdavidcs	if (err) {
1044250661Sdavidcs		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1045250661Sdavidcs		return (-1);
1046250661Sdavidcs	}
1047250661Sdavidcs
1048250661Sdavidcs	return 0;
1049250661Sdavidcs}
1050250661Sdavidcs
1051250661Sdavidcs
1052250661Sdavidcs/*
1053250661Sdavidcs * Name: qla_config_mac_addr
1054250661Sdavidcs * Function: binds a MAC address to the context/interface.
1055250661Sdavidcs *	Can be unicast, multicast or broadcast.
1056250661Sdavidcs */
1057250661Sdavidcsstatic int
1058250661Sdavidcsqla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
1059250661Sdavidcs{
1060250661Sdavidcs	q80_config_mac_addr_t		*cmac;
1061250661Sdavidcs	q80_config_mac_addr_rsp_t	*cmac_rsp;
1062250661Sdavidcs	uint32_t			err;
1063250661Sdavidcs	device_t			dev = ha->pci_dev;
1064250661Sdavidcs
1065250661Sdavidcs	cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
1066250661Sdavidcs	bzero(cmac, (sizeof (q80_config_mac_addr_t)));
1067250661Sdavidcs
1068250661Sdavidcs	cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
1069250661Sdavidcs	cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
1070250661Sdavidcs	cmac->count_version |= Q8_MBX_CMD_VERSION;
1071250661Sdavidcs
1072250661Sdavidcs	if (add_mac)
1073250661Sdavidcs		cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
1074250661Sdavidcs	else
1075250661Sdavidcs		cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
1076250661Sdavidcs
1077250661Sdavidcs	cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
1078250661Sdavidcs
1079250661Sdavidcs	cmac->nmac_entries = 1;
1080250661Sdavidcs	cmac->cntxt_id = ha->hw.rcv_cntxt_id;
1081250661Sdavidcs	bcopy(mac_addr, cmac->mac_addr[0].addr, 6);
1082250661Sdavidcs
1083250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *)cmac,
1084250661Sdavidcs		(sizeof (q80_config_mac_addr_t) >> 2),
1085250661Sdavidcs		ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
1086250661Sdavidcs		device_printf(dev, "%s: %s failed0\n", __func__,
1087250661Sdavidcs			(add_mac ? "Add" : "Del"));
1088250661Sdavidcs		return (-1);
1089250661Sdavidcs	}
1090250661Sdavidcs	cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
1091250661Sdavidcs
1092250661Sdavidcs	err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
1093250661Sdavidcs
1094250661Sdavidcs	if (err) {
1095250661Sdavidcs		device_printf(dev, "%s: %s "
1096250661Sdavidcs			"%02x:%02x:%02x:%02x:%02x:%02x failed1 [0x%08x]\n",
1097250661Sdavidcs			__func__, (add_mac ? "Add" : "Del"),
1098250661Sdavidcs			mac_addr[0], mac_addr[1], mac_addr[2],
1099250661Sdavidcs			mac_addr[3], mac_addr[4], mac_addr[5], err);
1100250661Sdavidcs		return (-1);
1101250661Sdavidcs	}
1102250661Sdavidcs
1103250661Sdavidcs	return 0;
1104250661Sdavidcs}
1105250661Sdavidcs
1106250661Sdavidcs
1107250661Sdavidcs/*
1108250661Sdavidcs * Name: qla_set_mac_rcv_mode
1109305487Sdavidcs * Function: Enable/Disable AllMulticast and Promiscous Modes.
1110250661Sdavidcs */
1111250661Sdavidcsstatic int
1112250661Sdavidcsqla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
1113250661Sdavidcs{
1114250661Sdavidcs	q80_config_mac_rcv_mode_t	*rcv_mode;
1115250661Sdavidcs	uint32_t			err;
1116250661Sdavidcs	q80_config_mac_rcv_mode_rsp_t	*rcv_mode_rsp;
1117250661Sdavidcs	device_t			dev = ha->pci_dev;
1118250661Sdavidcs
1119250661Sdavidcs	rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
1120250661Sdavidcs	bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
1121250661Sdavidcs
1122250661Sdavidcs	rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
1123250661Sdavidcs	rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
1124250661Sdavidcs	rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
1125250661Sdavidcs
1126250661Sdavidcs	rcv_mode->mode = mode;
1127250661Sdavidcs
1128250661Sdavidcs	rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
1129250661Sdavidcs
1130250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
1131250661Sdavidcs		(sizeof (q80_config_mac_rcv_mode_t) >> 2),
1132250661Sdavidcs		ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
1133250661Sdavidcs		device_printf(dev, "%s: failed0\n", __func__);
1134250661Sdavidcs		return (-1);
1135250661Sdavidcs	}
1136250661Sdavidcs	rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
1137250661Sdavidcs
1138250661Sdavidcs	err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
1139250661Sdavidcs
1140250661Sdavidcs	if (err) {
1141250661Sdavidcs		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1142250661Sdavidcs		return (-1);
1143250661Sdavidcs	}
1144250661Sdavidcs
1145250661Sdavidcs	return 0;
1146250661Sdavidcs}
1147250661Sdavidcs
1148250661Sdavidcsint
1149250661Sdavidcsql_set_promisc(qla_host_t *ha)
1150250661Sdavidcs{
1151250661Sdavidcs	int ret;
1152250661Sdavidcs
1153250661Sdavidcs	ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1154250661Sdavidcs	ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1155250661Sdavidcs	return (ret);
1156250661Sdavidcs}
1157250661Sdavidcs
1158284741Sdavidcsvoid
1159284741Sdavidcsqla_reset_promisc(qla_host_t *ha)
1160284741Sdavidcs{
1161284741Sdavidcs	ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1162284741Sdavidcs	(void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1163284741Sdavidcs}
1164284741Sdavidcs
1165250661Sdavidcsint
1166250661Sdavidcsql_set_allmulti(qla_host_t *ha)
1167250661Sdavidcs{
1168250661Sdavidcs	int ret;
1169250661Sdavidcs
1170250661Sdavidcs	ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
1171250661Sdavidcs	ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1172250661Sdavidcs	return (ret);
1173250661Sdavidcs}
1174250661Sdavidcs
1175284741Sdavidcsvoid
1176284741Sdavidcsqla_reset_allmulti(qla_host_t *ha)
1177284741Sdavidcs{
1178284741Sdavidcs	ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE;
1179284741Sdavidcs	(void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1180284741Sdavidcs}
1181250661Sdavidcs
1182250661Sdavidcs/*
1183250661Sdavidcs * Name: ql_set_max_mtu
1184250661Sdavidcs * Function:
1185250661Sdavidcs *	Sets the maximum transfer unit size for the specified rcv context.
1186250661Sdavidcs */
1187250661Sdavidcsint
1188250661Sdavidcsql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
1189250661Sdavidcs{
1190250661Sdavidcs	device_t		dev;
1191250661Sdavidcs	q80_set_max_mtu_t	*max_mtu;
1192250661Sdavidcs	q80_set_max_mtu_rsp_t	*max_mtu_rsp;
1193250661Sdavidcs	uint32_t		err;
1194250661Sdavidcs
1195250661Sdavidcs	dev = ha->pci_dev;
1196250661Sdavidcs
1197250661Sdavidcs	max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
1198250661Sdavidcs	bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
1199250661Sdavidcs
1200250661Sdavidcs	max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
1201250661Sdavidcs	max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
1202250661Sdavidcs	max_mtu->count_version |= Q8_MBX_CMD_VERSION;
1203250661Sdavidcs
1204250661Sdavidcs	max_mtu->cntxt_id = cntxt_id;
1205250661Sdavidcs	max_mtu->mtu = mtu;
1206250661Sdavidcs
1207250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1208250661Sdavidcs		(sizeof (q80_set_max_mtu_t) >> 2),
1209250661Sdavidcs                ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1210250661Sdavidcs                device_printf(dev, "%s: failed\n", __func__);
1211250661Sdavidcs                return -1;
1212250661Sdavidcs        }
1213250661Sdavidcs
1214250661Sdavidcs	max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1215250661Sdavidcs
1216250661Sdavidcs        err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1217250661Sdavidcs
1218250661Sdavidcs        if (err) {
1219250661Sdavidcs                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1220250661Sdavidcs        }
1221250661Sdavidcs
1222250661Sdavidcs	return 0;
1223250661Sdavidcs}
1224250661Sdavidcs
1225250661Sdavidcsstatic int
1226250661Sdavidcsqla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1227250661Sdavidcs{
1228250661Sdavidcs	device_t		dev;
1229250661Sdavidcs	q80_link_event_t	*lnk;
1230250661Sdavidcs	q80_link_event_rsp_t	*lnk_rsp;
1231250661Sdavidcs	uint32_t		err;
1232250661Sdavidcs
1233250661Sdavidcs	dev = ha->pci_dev;
1234250661Sdavidcs
1235250661Sdavidcs	lnk = (q80_link_event_t *)ha->hw.mbox;
1236250661Sdavidcs	bzero(lnk, (sizeof (q80_link_event_t)));
1237250661Sdavidcs
1238250661Sdavidcs	lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1239250661Sdavidcs	lnk->count_version = (sizeof (q80_link_event_t) >> 2);
1240250661Sdavidcs	lnk->count_version |= Q8_MBX_CMD_VERSION;
1241250661Sdavidcs
1242250661Sdavidcs	lnk->cntxt_id = cntxt_id;
1243250661Sdavidcs	lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
1244250661Sdavidcs
1245250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
1246250661Sdavidcs                ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
1247250661Sdavidcs                device_printf(dev, "%s: failed\n", __func__);
1248250661Sdavidcs                return -1;
1249250661Sdavidcs        }
1250250661Sdavidcs
1251250661Sdavidcs	lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
1252250661Sdavidcs
1253250661Sdavidcs        err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
1254250661Sdavidcs
1255250661Sdavidcs        if (err) {
1256250661Sdavidcs                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1257250661Sdavidcs        }
1258250661Sdavidcs
1259250661Sdavidcs	return 0;
1260250661Sdavidcs}
1261250661Sdavidcs
1262250661Sdavidcsstatic int
1263250661Sdavidcsqla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
1264250661Sdavidcs{
1265250661Sdavidcs	device_t		dev;
1266250661Sdavidcs	q80_config_fw_lro_t	*fw_lro;
1267250661Sdavidcs	q80_config_fw_lro_rsp_t	*fw_lro_rsp;
1268250661Sdavidcs	uint32_t		err;
1269250661Sdavidcs
1270250661Sdavidcs	dev = ha->pci_dev;
1271250661Sdavidcs
1272250661Sdavidcs	fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
1273250661Sdavidcs	bzero(fw_lro, sizeof(q80_config_fw_lro_t));
1274250661Sdavidcs
1275250661Sdavidcs	fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
1276250661Sdavidcs	fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
1277250661Sdavidcs	fw_lro->count_version |= Q8_MBX_CMD_VERSION;
1278250661Sdavidcs
1279250661Sdavidcs	fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
1280284741Sdavidcs	fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK;
1281250661Sdavidcs
1282250661Sdavidcs	fw_lro->cntxt_id = cntxt_id;
1283250661Sdavidcs
1284250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
1285250661Sdavidcs		(sizeof (q80_config_fw_lro_t) >> 2),
1286250661Sdavidcs		ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
1287250661Sdavidcs		device_printf(dev, "%s: failed\n", __func__);
1288250661Sdavidcs		return -1;
1289250661Sdavidcs	}
1290250661Sdavidcs
1291250661Sdavidcs	fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
1292250661Sdavidcs
1293250661Sdavidcs	err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
1294250661Sdavidcs
1295250661Sdavidcs	if (err) {
1296250661Sdavidcs		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1297250661Sdavidcs	}
1298250661Sdavidcs
1299250661Sdavidcs	return 0;
1300250661Sdavidcs}
1301250661Sdavidcs
1302250661Sdavidcsstatic void
1303284741Sdavidcsqla_xmt_stats(qla_host_t *ha, q80_xmt_stats_t *xstat, int i)
1304250661Sdavidcs{
1305250661Sdavidcs	device_t dev = ha->pci_dev;
1306250661Sdavidcs
1307284741Sdavidcs	if (i < ha->hw.num_tx_rings) {
1308284741Sdavidcs		device_printf(dev, "%s[%d]: total_bytes\t\t%" PRIu64 "\n",
1309284741Sdavidcs			__func__, i, xstat->total_bytes);
1310284741Sdavidcs		device_printf(dev, "%s[%d]: total_pkts\t\t%" PRIu64 "\n",
1311284741Sdavidcs			__func__, i, xstat->total_pkts);
1312284741Sdavidcs		device_printf(dev, "%s[%d]: errors\t\t%" PRIu64 "\n",
1313284741Sdavidcs			__func__, i, xstat->errors);
1314284741Sdavidcs		device_printf(dev, "%s[%d]: pkts_dropped\t%" PRIu64 "\n",
1315284741Sdavidcs			__func__, i, xstat->pkts_dropped);
1316284741Sdavidcs		device_printf(dev, "%s[%d]: switch_pkts\t\t%" PRIu64 "\n",
1317284741Sdavidcs			__func__, i, xstat->switch_pkts);
1318284741Sdavidcs		device_printf(dev, "%s[%d]: num_buffers\t\t%" PRIu64 "\n",
1319284741Sdavidcs			__func__, i, xstat->num_buffers);
1320284741Sdavidcs	} else {
1321284741Sdavidcs		device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n",
1322284741Sdavidcs			__func__, xstat->total_bytes);
1323284741Sdavidcs		device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n",
1324284741Sdavidcs			__func__, xstat->total_pkts);
1325284741Sdavidcs		device_printf(dev, "%s: errors\t\t\t%" PRIu64 "\n",
1326284741Sdavidcs			__func__, xstat->errors);
1327284741Sdavidcs		device_printf(dev, "%s: pkts_dropped\t\t\t%" PRIu64 "\n",
1328284741Sdavidcs			__func__, xstat->pkts_dropped);
1329284741Sdavidcs		device_printf(dev, "%s: switch_pkts\t\t\t%" PRIu64 "\n",
1330284741Sdavidcs			__func__, xstat->switch_pkts);
1331284741Sdavidcs		device_printf(dev, "%s: num_buffers\t\t\t%" PRIu64 "\n",
1332284741Sdavidcs			__func__, xstat->num_buffers);
1333284741Sdavidcs	}
1334250661Sdavidcs}
1335250661Sdavidcs
1336250661Sdavidcsstatic void
1337250661Sdavidcsqla_rcv_stats(qla_host_t *ha, q80_rcv_stats_t *rstat)
1338250661Sdavidcs{
1339250661Sdavidcs	device_t dev = ha->pci_dev;
1340250661Sdavidcs
1341250661Sdavidcs	device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n", __func__,
1342250661Sdavidcs		rstat->total_bytes);
1343250661Sdavidcs	device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n", __func__,
1344250661Sdavidcs		rstat->total_pkts);
1345250661Sdavidcs	device_printf(dev, "%s: lro_pkt_count\t\t%" PRIu64 "\n", __func__,
1346250661Sdavidcs		rstat->lro_pkt_count);
1347284741Sdavidcs	device_printf(dev, "%s: sw_pkt_count\t\t\t%" PRIu64 "\n", __func__,
1348250661Sdavidcs		rstat->sw_pkt_count);
1349250661Sdavidcs	device_printf(dev, "%s: ip_chksum_err\t\t%" PRIu64 "\n", __func__,
1350250661Sdavidcs		rstat->ip_chksum_err);
1351250661Sdavidcs	device_printf(dev, "%s: pkts_wo_acntxts\t\t%" PRIu64 "\n", __func__,
1352250661Sdavidcs		rstat->pkts_wo_acntxts);
1353250661Sdavidcs	device_printf(dev, "%s: pkts_dropped_no_sds_card\t%" PRIu64 "\n",
1354250661Sdavidcs		__func__, rstat->pkts_dropped_no_sds_card);
1355250661Sdavidcs	device_printf(dev, "%s: pkts_dropped_no_sds_host\t%" PRIu64 "\n",
1356250661Sdavidcs		__func__, rstat->pkts_dropped_no_sds_host);
1357250661Sdavidcs	device_printf(dev, "%s: oversized_pkts\t\t%" PRIu64 "\n", __func__,
1358250661Sdavidcs		rstat->oversized_pkts);
1359250661Sdavidcs	device_printf(dev, "%s: pkts_dropped_no_rds\t\t%" PRIu64 "\n",
1360250661Sdavidcs		__func__, rstat->pkts_dropped_no_rds);
1361250661Sdavidcs	device_printf(dev, "%s: unxpctd_mcast_pkts\t\t%" PRIu64 "\n",
1362250661Sdavidcs		__func__, rstat->unxpctd_mcast_pkts);
1363250661Sdavidcs	device_printf(dev, "%s: re1_fbq_error\t\t%" PRIu64 "\n", __func__,
1364250661Sdavidcs		rstat->re1_fbq_error);
1365250661Sdavidcs	device_printf(dev, "%s: invalid_mac_addr\t\t%" PRIu64 "\n", __func__,
1366250661Sdavidcs		rstat->invalid_mac_addr);
1367250661Sdavidcs	device_printf(dev, "%s: rds_prime_trys\t\t%" PRIu64 "\n", __func__,
1368250661Sdavidcs		rstat->rds_prime_trys);
1369250661Sdavidcs	device_printf(dev, "%s: rds_prime_success\t\t%" PRIu64 "\n", __func__,
1370250661Sdavidcs		rstat->rds_prime_success);
1371250661Sdavidcs	device_printf(dev, "%s: lro_flows_added\t\t%" PRIu64 "\n", __func__,
1372250661Sdavidcs		rstat->lro_flows_added);
1373250661Sdavidcs	device_printf(dev, "%s: lro_flows_deleted\t\t%" PRIu64 "\n", __func__,
1374250661Sdavidcs		rstat->lro_flows_deleted);
1375250661Sdavidcs	device_printf(dev, "%s: lro_flows_active\t\t%" PRIu64 "\n", __func__,
1376250661Sdavidcs		rstat->lro_flows_active);
1377250661Sdavidcs	device_printf(dev, "%s: pkts_droped_unknown\t\t%" PRIu64 "\n",
1378250661Sdavidcs		__func__, rstat->pkts_droped_unknown);
1379250661Sdavidcs}
1380250661Sdavidcs
1381250661Sdavidcsstatic void
1382250661Sdavidcsqla_mac_stats(qla_host_t *ha, q80_mac_stats_t *mstat)
1383250661Sdavidcs{
1384250661Sdavidcs	device_t dev = ha->pci_dev;
1385250661Sdavidcs
1386250661Sdavidcs	device_printf(dev, "%s: xmt_frames\t\t\t%" PRIu64 "\n", __func__,
1387250661Sdavidcs		mstat->xmt_frames);
1388250661Sdavidcs	device_printf(dev, "%s: xmt_bytes\t\t\t%" PRIu64 "\n", __func__,
1389250661Sdavidcs		mstat->xmt_bytes);
1390250661Sdavidcs	device_printf(dev, "%s: xmt_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1391250661Sdavidcs		mstat->xmt_mcast_pkts);
1392250661Sdavidcs	device_printf(dev, "%s: xmt_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1393250661Sdavidcs		mstat->xmt_bcast_pkts);
1394250661Sdavidcs	device_printf(dev, "%s: xmt_pause_frames\t\t%" PRIu64 "\n", __func__,
1395250661Sdavidcs		mstat->xmt_pause_frames);
1396250661Sdavidcs	device_printf(dev, "%s: xmt_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1397250661Sdavidcs		mstat->xmt_cntrl_pkts);
1398250661Sdavidcs	device_printf(dev, "%s: xmt_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1399250661Sdavidcs		__func__, mstat->xmt_pkt_lt_64bytes);
1400250661Sdavidcs	device_printf(dev, "%s: xmt_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1401250661Sdavidcs		__func__, mstat->xmt_pkt_lt_127bytes);
1402250661Sdavidcs	device_printf(dev, "%s: xmt_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1403250661Sdavidcs		__func__, mstat->xmt_pkt_lt_255bytes);
1404250661Sdavidcs	device_printf(dev, "%s: xmt_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1405250661Sdavidcs		__func__, mstat->xmt_pkt_lt_511bytes);
1406284741Sdavidcs	device_printf(dev, "%s: xmt_pkt_lt_1023bytes\t\t%" PRIu64 "\n",
1407250661Sdavidcs		__func__, mstat->xmt_pkt_lt_1023bytes);
1408284741Sdavidcs	device_printf(dev, "%s: xmt_pkt_lt_1518bytes\t\t%" PRIu64 "\n",
1409250661Sdavidcs		__func__, mstat->xmt_pkt_lt_1518bytes);
1410284741Sdavidcs	device_printf(dev, "%s: xmt_pkt_gt_1518bytes\t\t%" PRIu64 "\n",
1411250661Sdavidcs		__func__, mstat->xmt_pkt_gt_1518bytes);
1412250661Sdavidcs
1413250661Sdavidcs	device_printf(dev, "%s: rcv_frames\t\t\t%" PRIu64 "\n", __func__,
1414250661Sdavidcs		mstat->rcv_frames);
1415250661Sdavidcs	device_printf(dev, "%s: rcv_bytes\t\t\t%" PRIu64 "\n", __func__,
1416250661Sdavidcs		mstat->rcv_bytes);
1417250661Sdavidcs	device_printf(dev, "%s: rcv_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1418250661Sdavidcs		mstat->rcv_mcast_pkts);
1419250661Sdavidcs	device_printf(dev, "%s: rcv_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1420250661Sdavidcs		mstat->rcv_bcast_pkts);
1421250661Sdavidcs	device_printf(dev, "%s: rcv_pause_frames\t\t%" PRIu64 "\n", __func__,
1422250661Sdavidcs		mstat->rcv_pause_frames);
1423250661Sdavidcs	device_printf(dev, "%s: rcv_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1424250661Sdavidcs		mstat->rcv_cntrl_pkts);
1425250661Sdavidcs	device_printf(dev, "%s: rcv_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1426250661Sdavidcs		__func__, mstat->rcv_pkt_lt_64bytes);
1427250661Sdavidcs	device_printf(dev, "%s: rcv_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1428250661Sdavidcs		__func__, mstat->rcv_pkt_lt_127bytes);
1429250661Sdavidcs	device_printf(dev, "%s: rcv_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1430250661Sdavidcs		__func__, mstat->rcv_pkt_lt_255bytes);
1431250661Sdavidcs	device_printf(dev, "%s: rcv_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1432250661Sdavidcs		__func__, mstat->rcv_pkt_lt_511bytes);
1433284741Sdavidcs	device_printf(dev, "%s: rcv_pkt_lt_1023bytes\t\t%" PRIu64 "\n",
1434250661Sdavidcs		__func__, mstat->rcv_pkt_lt_1023bytes);
1435284741Sdavidcs	device_printf(dev, "%s: rcv_pkt_lt_1518bytes\t\t%" PRIu64 "\n",
1436250661Sdavidcs		__func__, mstat->rcv_pkt_lt_1518bytes);
1437284741Sdavidcs	device_printf(dev, "%s: rcv_pkt_gt_1518bytes\t\t%" PRIu64 "\n",
1438250661Sdavidcs		__func__, mstat->rcv_pkt_gt_1518bytes);
1439250661Sdavidcs
1440250661Sdavidcs	device_printf(dev, "%s: rcv_len_error\t\t%" PRIu64 "\n", __func__,
1441250661Sdavidcs		mstat->rcv_len_error);
1442250661Sdavidcs	device_printf(dev, "%s: rcv_len_small\t\t%" PRIu64 "\n", __func__,
1443250661Sdavidcs		mstat->rcv_len_small);
1444250661Sdavidcs	device_printf(dev, "%s: rcv_len_large\t\t%" PRIu64 "\n", __func__,
1445250661Sdavidcs		mstat->rcv_len_large);
1446250661Sdavidcs	device_printf(dev, "%s: rcv_jabber\t\t\t%" PRIu64 "\n", __func__,
1447250661Sdavidcs		mstat->rcv_jabber);
1448250661Sdavidcs	device_printf(dev, "%s: rcv_dropped\t\t\t%" PRIu64 "\n", __func__,
1449250661Sdavidcs		mstat->rcv_dropped);
1450250661Sdavidcs	device_printf(dev, "%s: fcs_error\t\t\t%" PRIu64 "\n", __func__,
1451250661Sdavidcs		mstat->fcs_error);
1452250661Sdavidcs	device_printf(dev, "%s: align_error\t\t\t%" PRIu64 "\n", __func__,
1453250661Sdavidcs		mstat->align_error);
1454250661Sdavidcs}
1455250661Sdavidcs
1456250661Sdavidcs
1457250661Sdavidcsstatic int
1458284741Sdavidcsqla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size)
1459250661Sdavidcs{
1460250661Sdavidcs	device_t		dev;
1461250661Sdavidcs	q80_get_stats_t		*stat;
1462250661Sdavidcs	q80_get_stats_rsp_t	*stat_rsp;
1463250661Sdavidcs	uint32_t		err;
1464250661Sdavidcs
1465250661Sdavidcs	dev = ha->pci_dev;
1466250661Sdavidcs
1467250661Sdavidcs	stat = (q80_get_stats_t *)ha->hw.mbox;
1468250661Sdavidcs	bzero(stat, (sizeof (q80_get_stats_t)));
1469250661Sdavidcs
1470250661Sdavidcs	stat->opcode = Q8_MBX_GET_STATS;
1471250661Sdavidcs	stat->count_version = 2;
1472250661Sdavidcs	stat->count_version |= Q8_MBX_CMD_VERSION;
1473250661Sdavidcs
1474250661Sdavidcs	stat->cmd = cmd;
1475250661Sdavidcs
1476250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
1477284741Sdavidcs                ha->hw.mbox, (rsp_size >> 2), 0)) {
1478250661Sdavidcs                device_printf(dev, "%s: failed\n", __func__);
1479250661Sdavidcs                return -1;
1480250661Sdavidcs        }
1481250661Sdavidcs
1482250661Sdavidcs	stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1483250661Sdavidcs
1484250661Sdavidcs        err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
1485250661Sdavidcs
1486250661Sdavidcs        if (err) {
1487250661Sdavidcs                return -1;
1488250661Sdavidcs        }
1489250661Sdavidcs
1490250661Sdavidcs	return 0;
1491250661Sdavidcs}
1492250661Sdavidcs
1493250661Sdavidcsvoid
1494250661Sdavidcsql_get_stats(qla_host_t *ha)
1495250661Sdavidcs{
1496250661Sdavidcs	q80_get_stats_rsp_t	*stat_rsp;
1497250661Sdavidcs	q80_mac_stats_t		*mstat;
1498250661Sdavidcs	q80_xmt_stats_t		*xstat;
1499250661Sdavidcs	q80_rcv_stats_t		*rstat;
1500250661Sdavidcs	uint32_t		cmd;
1501284741Sdavidcs	int			i;
1502250661Sdavidcs
1503250661Sdavidcs	stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1504250661Sdavidcs	/*
1505250661Sdavidcs	 * Get MAC Statistics
1506250661Sdavidcs	 */
1507250661Sdavidcs	cmd = Q8_GET_STATS_CMD_TYPE_MAC;
1508284741Sdavidcs//	cmd |= Q8_GET_STATS_CMD_CLEAR;
1509250661Sdavidcs
1510250661Sdavidcs	cmd |= ((ha->pci_func & 0x1) << 16);
1511250661Sdavidcs
1512284741Sdavidcs	if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
1513250661Sdavidcs		mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
1514250661Sdavidcs		qla_mac_stats(ha, mstat);
1515250661Sdavidcs	} else {
1516250661Sdavidcs                device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
1517250661Sdavidcs			__func__, ha->hw.mbox[0]);
1518250661Sdavidcs	}
1519250661Sdavidcs	/*
1520250661Sdavidcs	 * Get RCV Statistics
1521250661Sdavidcs	 */
1522250661Sdavidcs	cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
1523284741Sdavidcs//	cmd |= Q8_GET_STATS_CMD_CLEAR;
1524250661Sdavidcs	cmd |= (ha->hw.rcv_cntxt_id << 16);
1525250661Sdavidcs
1526284741Sdavidcs	if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
1527250661Sdavidcs		rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
1528250661Sdavidcs		qla_rcv_stats(ha, rstat);
1529250661Sdavidcs	} else {
1530250661Sdavidcs                device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
1531250661Sdavidcs			__func__, ha->hw.mbox[0]);
1532250661Sdavidcs	}
1533250661Sdavidcs	/*
1534250661Sdavidcs	 * Get XMT Statistics
1535250661Sdavidcs	 */
1536284741Sdavidcs	for (i = 0 ; i < ha->hw.num_tx_rings; i++) {
1537284741Sdavidcs		cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
1538284741Sdavidcs//		cmd |= Q8_GET_STATS_CMD_CLEAR;
1539284741Sdavidcs		cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16);
1540250661Sdavidcs
1541284741Sdavidcs		if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t))
1542284741Sdavidcs			== 0) {
1543284741Sdavidcs			xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
1544284741Sdavidcs			qla_xmt_stats(ha, xstat, i);
1545284741Sdavidcs		} else {
1546284741Sdavidcs			device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
1547284741Sdavidcs				__func__, ha->hw.mbox[0]);
1548284741Sdavidcs		}
1549284741Sdavidcs	}
1550284741Sdavidcs	return;
1551284741Sdavidcs}
1552250661Sdavidcs
1553284741Sdavidcsstatic void
1554284741Sdavidcsqla_get_quick_stats(qla_host_t *ha)
1555284741Sdavidcs{
1556284741Sdavidcs	q80_get_mac_rcv_xmt_stats_rsp_t *stat_rsp;
1557284741Sdavidcs	q80_mac_stats_t         *mstat;
1558284741Sdavidcs	q80_xmt_stats_t         *xstat;
1559284741Sdavidcs	q80_rcv_stats_t         *rstat;
1560284741Sdavidcs	uint32_t                cmd;
1561284741Sdavidcs
1562284741Sdavidcs	stat_rsp = (q80_get_mac_rcv_xmt_stats_rsp_t *)ha->hw.mbox;
1563284741Sdavidcs
1564284741Sdavidcs	cmd = Q8_GET_STATS_CMD_TYPE_ALL;
1565284741Sdavidcs//      cmd |= Q8_GET_STATS_CMD_CLEAR;
1566284741Sdavidcs
1567284741Sdavidcs//      cmd |= ((ha->pci_func & 0x3) << 16);
1568284741Sdavidcs	cmd |= (0xFFFF << 16);
1569284741Sdavidcs
1570284741Sdavidcs	if (qla_get_hw_stats(ha, cmd,
1571284741Sdavidcs			sizeof (q80_get_mac_rcv_xmt_stats_rsp_t)) == 0) {
1572284741Sdavidcs
1573284741Sdavidcs		mstat = (q80_mac_stats_t *)&stat_rsp->mac;
1574284741Sdavidcs		rstat = (q80_rcv_stats_t *)&stat_rsp->rcv;
1575284741Sdavidcs		xstat = (q80_xmt_stats_t *)&stat_rsp->xmt;
1576284741Sdavidcs		qla_mac_stats(ha, mstat);
1577284741Sdavidcs		qla_rcv_stats(ha, rstat);
1578284741Sdavidcs		qla_xmt_stats(ha, xstat, ha->hw.num_tx_rings);
1579250661Sdavidcs	} else {
1580284741Sdavidcs		device_printf(ha->pci_dev, "%s: failed [0x%08x]\n",
1581250661Sdavidcs			__func__, ha->hw.mbox[0]);
1582250661Sdavidcs	}
1583284741Sdavidcs	return;
1584250661Sdavidcs}
1585250661Sdavidcs
1586250661Sdavidcs/*
1587250661Sdavidcs * Name: qla_tx_tso
1588250661Sdavidcs * Function: Checks if the packet to be transmitted is a candidate for
1589250661Sdavidcs *	Large TCP Segment Offload. If yes, the appropriate fields in the Tx
1590250661Sdavidcs *	Ring Structure are plugged in.
1591250661Sdavidcs */
1592250661Sdavidcsstatic int
1593250661Sdavidcsqla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
1594250661Sdavidcs{
1595250661Sdavidcs	struct ether_vlan_header *eh;
1596250661Sdavidcs	struct ip *ip = NULL;
1597250661Sdavidcs	struct ip6_hdr *ip6 = NULL;
1598250661Sdavidcs	struct tcphdr *th = NULL;
1599250661Sdavidcs	uint32_t ehdrlen,  hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
1600250661Sdavidcs	uint16_t etype, opcode, offload = 1;
1601250661Sdavidcs	device_t dev;
1602250661Sdavidcs
1603250661Sdavidcs	dev = ha->pci_dev;
1604250661Sdavidcs
1605250661Sdavidcs
1606250661Sdavidcs	eh = mtod(mp, struct ether_vlan_header *);
1607250661Sdavidcs
1608250661Sdavidcs	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1609250661Sdavidcs		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1610250661Sdavidcs		etype = ntohs(eh->evl_proto);
1611250661Sdavidcs	} else {
1612250661Sdavidcs		ehdrlen = ETHER_HDR_LEN;
1613250661Sdavidcs		etype = ntohs(eh->evl_encap_proto);
1614250661Sdavidcs	}
1615250661Sdavidcs
1616250661Sdavidcs	hdrlen = 0;
1617250661Sdavidcs
1618250661Sdavidcs	switch (etype) {
1619250661Sdavidcs		case ETHERTYPE_IP:
1620250661Sdavidcs
1621250661Sdavidcs			tcp_opt_off = ehdrlen + sizeof(struct ip) +
1622250661Sdavidcs					sizeof(struct tcphdr);
1623250661Sdavidcs
1624250661Sdavidcs			if (mp->m_len < tcp_opt_off) {
1625250661Sdavidcs				m_copydata(mp, 0, tcp_opt_off, hdr);
1626250661Sdavidcs				ip = (struct ip *)(hdr + ehdrlen);
1627250661Sdavidcs			} else {
1628250661Sdavidcs				ip = (struct ip *)(mp->m_data + ehdrlen);
1629250661Sdavidcs			}
1630250661Sdavidcs
1631250661Sdavidcs			ip_hlen = ip->ip_hl << 2;
1632250661Sdavidcs			opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
1633250661Sdavidcs
1634250661Sdavidcs
1635250661Sdavidcs			if ((ip->ip_p != IPPROTO_TCP) ||
1636250661Sdavidcs				(ip_hlen != sizeof (struct ip))){
1637250661Sdavidcs				/* IP Options are not supported */
1638250661Sdavidcs
1639250661Sdavidcs				offload = 0;
1640250661Sdavidcs			} else
1641250661Sdavidcs				th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
1642250661Sdavidcs
1643250661Sdavidcs		break;
1644250661Sdavidcs
1645250661Sdavidcs		case ETHERTYPE_IPV6:
1646250661Sdavidcs
1647250661Sdavidcs			tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
1648250661Sdavidcs					sizeof (struct tcphdr);
1649250661Sdavidcs
1650250661Sdavidcs			if (mp->m_len < tcp_opt_off) {
1651250661Sdavidcs				m_copydata(mp, 0, tcp_opt_off, hdr);
1652250661Sdavidcs				ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
1653250661Sdavidcs			} else {
1654250661Sdavidcs				ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1655250661Sdavidcs			}
1656250661Sdavidcs
1657250661Sdavidcs			ip_hlen = sizeof(struct ip6_hdr);
1658250661Sdavidcs			opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
1659250661Sdavidcs
1660250661Sdavidcs			if (ip6->ip6_nxt != IPPROTO_TCP) {
1661250661Sdavidcs				//device_printf(dev, "%s: ipv6\n", __func__);
1662250661Sdavidcs				offload = 0;
1663250661Sdavidcs			} else
1664250661Sdavidcs				th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
1665250661Sdavidcs		break;
1666250661Sdavidcs
1667250661Sdavidcs		default:
1668250661Sdavidcs			QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
1669250661Sdavidcs			offload = 0;
1670250661Sdavidcs		break;
1671250661Sdavidcs	}
1672250661Sdavidcs
1673250661Sdavidcs	if (!offload)
1674250661Sdavidcs		return (-1);
1675250661Sdavidcs
1676250661Sdavidcs	tcp_hlen = th->th_off << 2;
1677250661Sdavidcs	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
1678250661Sdavidcs
1679250661Sdavidcs        if (mp->m_len < hdrlen) {
1680250661Sdavidcs                if (mp->m_len < tcp_opt_off) {
1681250661Sdavidcs                        if (tcp_hlen > sizeof(struct tcphdr)) {
1682250661Sdavidcs                                m_copydata(mp, tcp_opt_off,
1683250661Sdavidcs                                        (tcp_hlen - sizeof(struct tcphdr)),
1684250661Sdavidcs                                        &hdr[tcp_opt_off]);
1685250661Sdavidcs                        }
1686250661Sdavidcs                } else {
1687250661Sdavidcs                        m_copydata(mp, 0, hdrlen, hdr);
1688250661Sdavidcs                }
1689250661Sdavidcs        }
1690250661Sdavidcs
1691250661Sdavidcs	tx_cmd->mss = mp->m_pkthdr.tso_segsz;
1692250661Sdavidcs
1693250661Sdavidcs	tx_cmd->flags_opcode = opcode ;
1694250661Sdavidcs	tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
1695250661Sdavidcs	tx_cmd->total_hdr_len = hdrlen;
1696250661Sdavidcs
1697250661Sdavidcs	/* Check for Multicast least significant bit of MSB == 1 */
1698250661Sdavidcs	if (eh->evl_dhost[0] & 0x01) {
1699250661Sdavidcs		tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
1700250661Sdavidcs	}
1701250661Sdavidcs
1702250661Sdavidcs	if (mp->m_len < hdrlen) {
1703250661Sdavidcs		printf("%d\n", hdrlen);
1704250661Sdavidcs		return (1);
1705250661Sdavidcs	}
1706250661Sdavidcs
1707250661Sdavidcs	return (0);
1708250661Sdavidcs}
1709250661Sdavidcs
1710250661Sdavidcs/*
1711250661Sdavidcs * Name: qla_tx_chksum
1712250661Sdavidcs * Function: Checks if the packet to be transmitted is a candidate for
1713250661Sdavidcs *	TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
1714250661Sdavidcs *	Ring Structure are plugged in.
1715250661Sdavidcs */
1716250661Sdavidcsstatic int
1717250661Sdavidcsqla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
1718250661Sdavidcs	uint32_t *tcp_hdr_off)
1719250661Sdavidcs{
1720250661Sdavidcs	struct ether_vlan_header *eh;
1721250661Sdavidcs	struct ip *ip;
1722250661Sdavidcs	struct ip6_hdr *ip6;
1723250661Sdavidcs	uint32_t ehdrlen, ip_hlen;
1724250661Sdavidcs	uint16_t etype, opcode, offload = 1;
1725250661Sdavidcs	device_t dev;
1726250661Sdavidcs	uint8_t buf[sizeof(struct ip6_hdr)];
1727250661Sdavidcs
1728250661Sdavidcs	dev = ha->pci_dev;
1729250661Sdavidcs
1730250661Sdavidcs	*op_code = 0;
1731250661Sdavidcs
1732250661Sdavidcs	if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0)
1733250661Sdavidcs		return (-1);
1734250661Sdavidcs
1735250661Sdavidcs	eh = mtod(mp, struct ether_vlan_header *);
1736250661Sdavidcs
1737250661Sdavidcs	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1738250661Sdavidcs		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1739250661Sdavidcs		etype = ntohs(eh->evl_proto);
1740250661Sdavidcs	} else {
1741250661Sdavidcs		ehdrlen = ETHER_HDR_LEN;
1742250661Sdavidcs		etype = ntohs(eh->evl_encap_proto);
1743250661Sdavidcs	}
1744250661Sdavidcs
1745250661Sdavidcs
1746250661Sdavidcs	switch (etype) {
1747250661Sdavidcs		case ETHERTYPE_IP:
1748250661Sdavidcs			ip = (struct ip *)(mp->m_data + ehdrlen);
1749250661Sdavidcs
1750250661Sdavidcs			ip_hlen = sizeof (struct ip);
1751250661Sdavidcs
1752250661Sdavidcs			if (mp->m_len < (ehdrlen + ip_hlen)) {
1753250661Sdavidcs				m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
1754250661Sdavidcs				ip = (struct ip *)buf;
1755250661Sdavidcs			}
1756250661Sdavidcs
1757250661Sdavidcs			if (ip->ip_p == IPPROTO_TCP)
1758250661Sdavidcs				opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
1759250661Sdavidcs			else if (ip->ip_p == IPPROTO_UDP)
1760250661Sdavidcs				opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
1761250661Sdavidcs			else {
1762250661Sdavidcs				//device_printf(dev, "%s: ipv4\n", __func__);
1763250661Sdavidcs				offload = 0;
1764250661Sdavidcs			}
1765250661Sdavidcs		break;
1766250661Sdavidcs
1767250661Sdavidcs		case ETHERTYPE_IPV6:
1768250661Sdavidcs			ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1769250661Sdavidcs
1770250661Sdavidcs			ip_hlen = sizeof(struct ip6_hdr);
1771250661Sdavidcs
1772250661Sdavidcs			if (mp->m_len < (ehdrlen + ip_hlen)) {
1773250661Sdavidcs				m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
1774250661Sdavidcs					buf);
1775250661Sdavidcs				ip6 = (struct ip6_hdr *)buf;
1776250661Sdavidcs			}
1777250661Sdavidcs
1778250661Sdavidcs			if (ip6->ip6_nxt == IPPROTO_TCP)
1779250661Sdavidcs				opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
1780250661Sdavidcs			else if (ip6->ip6_nxt == IPPROTO_UDP)
1781250661Sdavidcs				opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
1782250661Sdavidcs			else {
1783250661Sdavidcs				//device_printf(dev, "%s: ipv6\n", __func__);
1784250661Sdavidcs				offload = 0;
1785250661Sdavidcs			}
1786250661Sdavidcs		break;
1787250661Sdavidcs
1788250661Sdavidcs		default:
1789250661Sdavidcs			offload = 0;
1790250661Sdavidcs		break;
1791250661Sdavidcs	}
1792250661Sdavidcs	if (!offload)
1793250661Sdavidcs		return (-1);
1794250661Sdavidcs
1795250661Sdavidcs	*op_code = opcode;
1796250661Sdavidcs	*tcp_hdr_off = (ip_hlen + ehdrlen);
1797250661Sdavidcs
1798250661Sdavidcs	return (0);
1799250661Sdavidcs}
1800250661Sdavidcs
1801250661Sdavidcs#define QLA_TX_MIN_FREE 2
1802250661Sdavidcs/*
1803250661Sdavidcs * Name: ql_hw_send
1804250661Sdavidcs * Function: Transmits a packet. It first checks if the packet is a
1805250661Sdavidcs *	candidate for Large TCP Segment Offload and then for UDP/TCP checksum
1806250661Sdavidcs *	offload. If either of these creteria are not met, it is transmitted
1807250661Sdavidcs *	as a regular ethernet frame.
1808250661Sdavidcs */
1809250661Sdavidcsint
1810250661Sdavidcsql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
1811284741Sdavidcs	uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu)
1812250661Sdavidcs{
1813250661Sdavidcs	struct ether_vlan_header *eh;
1814250661Sdavidcs	qla_hw_t *hw = &ha->hw;
1815250661Sdavidcs	q80_tx_cmd_t *tx_cmd, tso_cmd;
1816250661Sdavidcs	bus_dma_segment_t *c_seg;
1817250661Sdavidcs	uint32_t num_tx_cmds, hdr_len = 0;
1818250661Sdavidcs	uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
1819250661Sdavidcs	device_t dev;
1820250661Sdavidcs	int i, ret;
1821250661Sdavidcs	uint8_t *src = NULL, *dst = NULL;
1822250661Sdavidcs	uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
1823250661Sdavidcs	uint32_t op_code = 0;
1824250661Sdavidcs	uint32_t tcp_hdr_off = 0;
1825250661Sdavidcs
1826250661Sdavidcs	dev = ha->pci_dev;
1827250661Sdavidcs
1828250661Sdavidcs	/*
1829250661Sdavidcs	 * Always make sure there is atleast one empty slot in the tx_ring
1830250661Sdavidcs	 * tx_ring is considered full when there only one entry available
1831250661Sdavidcs	 */
1832250661Sdavidcs        num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
1833250661Sdavidcs
1834250661Sdavidcs	total_length = mp->m_pkthdr.len;
1835250661Sdavidcs	if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
1836250661Sdavidcs		device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
1837250661Sdavidcs			__func__, total_length);
1838250661Sdavidcs		return (-1);
1839250661Sdavidcs	}
1840250661Sdavidcs	eh = mtod(mp, struct ether_vlan_header *);
1841250661Sdavidcs
1842250661Sdavidcs	if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
1843250661Sdavidcs
1844250661Sdavidcs		bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
1845250661Sdavidcs
1846250661Sdavidcs		src = frame_hdr;
1847250661Sdavidcs		ret = qla_tx_tso(ha, mp, &tso_cmd, src);
1848250661Sdavidcs
1849250661Sdavidcs		if (!(ret & ~1)) {
1850250661Sdavidcs			/* find the additional tx_cmd descriptors required */
1851250661Sdavidcs
1852250661Sdavidcs			if (mp->m_flags & M_VLANTAG)
1853250661Sdavidcs				tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
1854250661Sdavidcs
1855250661Sdavidcs			hdr_len = tso_cmd.total_hdr_len;
1856250661Sdavidcs
1857250661Sdavidcs			bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
1858250661Sdavidcs			bytes = QL_MIN(bytes, hdr_len);
1859250661Sdavidcs
1860250661Sdavidcs			num_tx_cmds++;
1861250661Sdavidcs			hdr_len -= bytes;
1862250661Sdavidcs
1863250661Sdavidcs			while (hdr_len) {
1864250661Sdavidcs				bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
1865250661Sdavidcs				hdr_len -= bytes;
1866250661Sdavidcs				num_tx_cmds++;
1867250661Sdavidcs			}
1868250661Sdavidcs			hdr_len = tso_cmd.total_hdr_len;
1869250661Sdavidcs
1870250661Sdavidcs			if (ret == 0)
1871250661Sdavidcs				src = (uint8_t *)eh;
1872250661Sdavidcs		} else
1873250661Sdavidcs			return (EINVAL);
1874250661Sdavidcs	} else {
1875250661Sdavidcs		(void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
1876250661Sdavidcs	}
1877250661Sdavidcs
1878284741Sdavidcs	if (iscsi_pdu)
1879284741Sdavidcs		ha->hw.iscsi_pkt_count++;
1880284741Sdavidcs
1881250661Sdavidcs	if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
1882250661Sdavidcs		qla_hw_tx_done_locked(ha, txr_idx);
1883250661Sdavidcs		if (hw->tx_cntxt[txr_idx].txr_free <=
1884250661Sdavidcs				(num_tx_cmds + QLA_TX_MIN_FREE)) {
1885250661Sdavidcs        		QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
1886250661Sdavidcs				"(num_tx_cmds + QLA_TX_MIN_FREE))\n",
1887250661Sdavidcs				__func__));
1888250661Sdavidcs			return (-1);
1889250661Sdavidcs		}
1890250661Sdavidcs	}
1891250661Sdavidcs
1892250661Sdavidcs	tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
1893250661Sdavidcs
1894250661Sdavidcs        if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
1895250661Sdavidcs
1896250661Sdavidcs                if (nsegs > ha->hw.max_tx_segs)
1897250661Sdavidcs                        ha->hw.max_tx_segs = nsegs;
1898250661Sdavidcs
1899250661Sdavidcs                bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1900250661Sdavidcs
1901250661Sdavidcs                if (op_code) {
1902250661Sdavidcs                        tx_cmd->flags_opcode = op_code;
1903250661Sdavidcs                        tx_cmd->tcp_hdr_off = tcp_hdr_off;
1904250661Sdavidcs
1905250661Sdavidcs                } else {
1906250661Sdavidcs                        tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
1907250661Sdavidcs                }
1908250661Sdavidcs	} else {
1909250661Sdavidcs		bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
1910250661Sdavidcs		ha->tx_tso_frames++;
1911250661Sdavidcs	}
1912250661Sdavidcs
1913250661Sdavidcs	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1914250661Sdavidcs        	tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
1915284741Sdavidcs
1916284741Sdavidcs		if (iscsi_pdu)
1917284741Sdavidcs			eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
1918284741Sdavidcs
1919250661Sdavidcs	} else if (mp->m_flags & M_VLANTAG) {
1920250661Sdavidcs
1921250661Sdavidcs		if (hdr_len) { /* TSO */
1922250661Sdavidcs			tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
1923250661Sdavidcs						Q8_TX_CMD_FLAGS_HW_VLAN_ID);
1924250661Sdavidcs			tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
1925250661Sdavidcs		} else
1926250661Sdavidcs			tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
1927250661Sdavidcs
1928250661Sdavidcs		ha->hw_vlan_tx_frames++;
1929250661Sdavidcs		tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
1930284741Sdavidcs
1931284741Sdavidcs		if (iscsi_pdu) {
1932284741Sdavidcs			tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13;
1933284741Sdavidcs			mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci;
1934284741Sdavidcs		}
1935250661Sdavidcs	}
1936250661Sdavidcs
1937250661Sdavidcs
1938250661Sdavidcs        tx_cmd->n_bufs = (uint8_t)nsegs;
1939250661Sdavidcs        tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
1940250661Sdavidcs        tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
1941250661Sdavidcs	tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
1942250661Sdavidcs
1943250661Sdavidcs	c_seg = segs;
1944250661Sdavidcs
1945250661Sdavidcs	while (1) {
1946250661Sdavidcs		for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
1947250661Sdavidcs
1948250661Sdavidcs			switch (i) {
1949250661Sdavidcs			case 0:
1950250661Sdavidcs				tx_cmd->buf1_addr = c_seg->ds_addr;
1951250661Sdavidcs				tx_cmd->buf1_len = c_seg->ds_len;
1952250661Sdavidcs				break;
1953250661Sdavidcs
1954250661Sdavidcs			case 1:
1955250661Sdavidcs				tx_cmd->buf2_addr = c_seg->ds_addr;
1956250661Sdavidcs				tx_cmd->buf2_len = c_seg->ds_len;
1957250661Sdavidcs				break;
1958250661Sdavidcs
1959250661Sdavidcs			case 2:
1960250661Sdavidcs				tx_cmd->buf3_addr = c_seg->ds_addr;
1961250661Sdavidcs				tx_cmd->buf3_len = c_seg->ds_len;
1962250661Sdavidcs				break;
1963250661Sdavidcs
1964250661Sdavidcs			case 3:
1965250661Sdavidcs				tx_cmd->buf4_addr = c_seg->ds_addr;
1966250661Sdavidcs				tx_cmd->buf4_len = c_seg->ds_len;
1967250661Sdavidcs				break;
1968250661Sdavidcs			}
1969250661Sdavidcs
1970250661Sdavidcs			c_seg++;
1971250661Sdavidcs			nsegs--;
1972250661Sdavidcs		}
1973250661Sdavidcs
1974250661Sdavidcs		txr_next = hw->tx_cntxt[txr_idx].txr_next =
1975250661Sdavidcs			(hw->tx_cntxt[txr_idx].txr_next + 1) &
1976250661Sdavidcs				(NUM_TX_DESCRIPTORS - 1);
1977250661Sdavidcs		tx_cmd_count++;
1978250661Sdavidcs
1979250661Sdavidcs		if (!nsegs)
1980250661Sdavidcs			break;
1981250661Sdavidcs
1982250661Sdavidcs		tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
1983250661Sdavidcs		bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1984250661Sdavidcs	}
1985250661Sdavidcs
1986250661Sdavidcs	if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
1987250661Sdavidcs
1988250661Sdavidcs		/* TSO : Copy the header in the following tx cmd descriptors */
1989250661Sdavidcs
1990250661Sdavidcs		txr_next = hw->tx_cntxt[txr_idx].txr_next;
1991250661Sdavidcs
1992250661Sdavidcs		tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
1993250661Sdavidcs		bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1994250661Sdavidcs
1995250661Sdavidcs		bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
1996250661Sdavidcs		bytes = QL_MIN(bytes, hdr_len);
1997250661Sdavidcs
1998250661Sdavidcs		dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
1999250661Sdavidcs
2000250661Sdavidcs		if (mp->m_flags & M_VLANTAG) {
2001250661Sdavidcs			/* first copy the src/dst MAC addresses */
2002250661Sdavidcs			bcopy(src, dst, (ETHER_ADDR_LEN * 2));
2003250661Sdavidcs			dst += (ETHER_ADDR_LEN * 2);
2004250661Sdavidcs			src += (ETHER_ADDR_LEN * 2);
2005250661Sdavidcs
2006250661Sdavidcs			*((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
2007250661Sdavidcs			dst += 2;
2008250661Sdavidcs			*((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
2009250661Sdavidcs			dst += 2;
2010250661Sdavidcs
2011250661Sdavidcs			/* bytes left in src header */
2012250661Sdavidcs			hdr_len -= ((ETHER_ADDR_LEN * 2) +
2013250661Sdavidcs					ETHER_VLAN_ENCAP_LEN);
2014250661Sdavidcs
2015250661Sdavidcs			/* bytes left in TxCmd Entry */
2016250661Sdavidcs			bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
2017250661Sdavidcs
2018250661Sdavidcs
2019250661Sdavidcs			bcopy(src, dst, bytes);
2020250661Sdavidcs			src += bytes;
2021250661Sdavidcs			hdr_len -= bytes;
2022250661Sdavidcs		} else {
2023250661Sdavidcs			bcopy(src, dst, bytes);
2024250661Sdavidcs			src += bytes;
2025250661Sdavidcs			hdr_len -= bytes;
2026250661Sdavidcs		}
2027250661Sdavidcs
2028250661Sdavidcs		txr_next = hw->tx_cntxt[txr_idx].txr_next =
2029250661Sdavidcs				(hw->tx_cntxt[txr_idx].txr_next + 1) &
2030250661Sdavidcs					(NUM_TX_DESCRIPTORS - 1);
2031250661Sdavidcs		tx_cmd_count++;
2032250661Sdavidcs
2033250661Sdavidcs		while (hdr_len) {
2034250661Sdavidcs			tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2035250661Sdavidcs			bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2036250661Sdavidcs
2037250661Sdavidcs			bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2038250661Sdavidcs
2039250661Sdavidcs			bcopy(src, tx_cmd, bytes);
2040250661Sdavidcs			src += bytes;
2041250661Sdavidcs			hdr_len -= bytes;
2042250661Sdavidcs
2043250661Sdavidcs			txr_next = hw->tx_cntxt[txr_idx].txr_next =
2044250661Sdavidcs				(hw->tx_cntxt[txr_idx].txr_next + 1) &
2045250661Sdavidcs					(NUM_TX_DESCRIPTORS - 1);
2046250661Sdavidcs			tx_cmd_count++;
2047250661Sdavidcs		}
2048250661Sdavidcs	}
2049250661Sdavidcs
2050250661Sdavidcs	hw->tx_cntxt[txr_idx].txr_free =
2051250661Sdavidcs		hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
2052250661Sdavidcs
2053250661Sdavidcs	QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
2054250661Sdavidcs		txr_idx);
2055250661Sdavidcs       	QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
2056250661Sdavidcs
2057250661Sdavidcs	return (0);
2058250661Sdavidcs}
2059250661Sdavidcs
2060250661Sdavidcs
2061284741Sdavidcs
2062284741Sdavidcs#define Q8_CONFIG_IND_TBL_SIZE	32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
2063250661Sdavidcsstatic int
2064250661Sdavidcsqla_config_rss_ind_table(qla_host_t *ha)
2065250661Sdavidcs{
2066250661Sdavidcs	uint32_t i, count;
2067284741Sdavidcs	uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
2068250661Sdavidcs
2069250661Sdavidcs
2070284741Sdavidcs	for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
2071250661Sdavidcs		rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
2072250661Sdavidcs	}
2073250661Sdavidcs
2074284741Sdavidcs	for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
2075284741Sdavidcs		i = i + Q8_CONFIG_IND_TBL_SIZE) {
2076250661Sdavidcs
2077284741Sdavidcs		if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
2078250661Sdavidcs			count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
2079250661Sdavidcs		} else {
2080284741Sdavidcs			count = Q8_CONFIG_IND_TBL_SIZE;
2081250661Sdavidcs		}
2082250661Sdavidcs
2083250661Sdavidcs		if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
2084250661Sdavidcs			rss_ind_tbl))
2085250661Sdavidcs			return (-1);
2086250661Sdavidcs	}
2087250661Sdavidcs
2088250661Sdavidcs	return (0);
2089250661Sdavidcs}
2090250661Sdavidcs
2091250661Sdavidcs/*
2092250661Sdavidcs * Name: ql_del_hw_if
2093250661Sdavidcs * Function: Destroys the hardware specific entities corresponding to an
2094250661Sdavidcs *	Ethernet Interface
2095250661Sdavidcs */
2096250661Sdavidcsvoid
2097250661Sdavidcsql_del_hw_if(qla_host_t *ha)
2098250661Sdavidcs{
2099284741Sdavidcs	uint32_t i;
2100284741Sdavidcs	uint32_t num_msix;
2101250661Sdavidcs
2102284741Sdavidcs	(void)qla_stop_nic_func(ha);
2103284741Sdavidcs
2104250661Sdavidcs	qla_del_rcv_cntxt(ha);
2105250661Sdavidcs	qla_del_xmt_cntxt(ha);
2106250661Sdavidcs
2107250661Sdavidcs	if (ha->hw.flags.init_intr_cnxt) {
2108284741Sdavidcs		for (i = 0; i < ha->hw.num_sds_rings; ) {
2109284741Sdavidcs
2110284741Sdavidcs			if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2111284741Sdavidcs				num_msix = Q8_MAX_INTR_VECTORS;
2112284741Sdavidcs			else
2113284741Sdavidcs				num_msix = ha->hw.num_sds_rings - i;
2114284741Sdavidcs			qla_config_intr_cntxt(ha, i, num_msix, 0);
2115284741Sdavidcs
2116284741Sdavidcs			i += num_msix;
2117284741Sdavidcs		}
2118284741Sdavidcs
2119250661Sdavidcs		ha->hw.flags.init_intr_cnxt = 0;
2120250661Sdavidcs	}
2121284741Sdavidcs	return;
2122250661Sdavidcs}
2123250661Sdavidcs
2124284741Sdavidcsvoid
2125284741Sdavidcsqla_confirm_9kb_enable(qla_host_t *ha)
2126284741Sdavidcs{
2127284741Sdavidcs	uint32_t supports_9kb = 0;
2128284741Sdavidcs
2129284741Sdavidcs	ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
2130284741Sdavidcs
2131284741Sdavidcs	/* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
2132284741Sdavidcs	WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
2133284741Sdavidcs	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
2134284741Sdavidcs
2135284741Sdavidcs	qla_get_nic_partition(ha, &supports_9kb, NULL);
2136284741Sdavidcs
2137284741Sdavidcs	if (!supports_9kb)
2138284741Sdavidcs		ha->hw.enable_9kb = 0;
2139284741Sdavidcs
2140284741Sdavidcs	return;
2141284741Sdavidcs}
2142284741Sdavidcs
2143284741Sdavidcs
2144250661Sdavidcs/*
2145250661Sdavidcs * Name: ql_init_hw_if
2146250661Sdavidcs * Function: Creates the hardware specific entities corresponding to an
2147250661Sdavidcs *	Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
2148250661Sdavidcs *	corresponding to the interface. Enables LRO if allowed.
2149250661Sdavidcs */
2150250661Sdavidcsint
2151250661Sdavidcsql_init_hw_if(qla_host_t *ha)
2152250661Sdavidcs{
2153250661Sdavidcs	device_t	dev;
2154250661Sdavidcs	uint32_t	i;
2155250661Sdavidcs	uint8_t		bcast_mac[6];
2156250661Sdavidcs	qla_rdesc_t	*rdesc;
2157284741Sdavidcs	uint32_t	num_msix;
2158250661Sdavidcs
2159250661Sdavidcs	dev = ha->pci_dev;
2160250661Sdavidcs
2161250661Sdavidcs	for (i = 0; i < ha->hw.num_sds_rings; i++) {
2162250661Sdavidcs		bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
2163250661Sdavidcs			ha->hw.dma_buf.sds_ring[i].size);
2164250661Sdavidcs	}
2165250661Sdavidcs
2166284741Sdavidcs	for (i = 0; i < ha->hw.num_sds_rings; ) {
2167250661Sdavidcs
2168284741Sdavidcs		if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2169284741Sdavidcs			num_msix = Q8_MAX_INTR_VECTORS;
2170284741Sdavidcs		else
2171284741Sdavidcs			num_msix = ha->hw.num_sds_rings - i;
2172250661Sdavidcs
2173284741Sdavidcs		if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
2174250661Sdavidcs
2175284741Sdavidcs			if (i > 0) {
2176284741Sdavidcs
2177284741Sdavidcs				num_msix = i;
2178284741Sdavidcs
2179284741Sdavidcs				for (i = 0; i < num_msix; ) {
2180284741Sdavidcs					qla_config_intr_cntxt(ha, i,
2181284741Sdavidcs						Q8_MAX_INTR_VECTORS, 0);
2182284741Sdavidcs					i += Q8_MAX_INTR_VECTORS;
2183284741Sdavidcs				}
2184284741Sdavidcs			}
2185284741Sdavidcs			return (-1);
2186284741Sdavidcs		}
2187284741Sdavidcs
2188284741Sdavidcs		i = i + num_msix;
2189284741Sdavidcs	}
2190284741Sdavidcs
2191284741Sdavidcs        ha->hw.flags.init_intr_cnxt = 1;
2192284741Sdavidcs
2193250661Sdavidcs	/*
2194250661Sdavidcs	 * Create Receive Context
2195250661Sdavidcs	 */
2196250661Sdavidcs	if (qla_init_rcv_cntxt(ha)) {
2197250661Sdavidcs		return (-1);
2198250661Sdavidcs	}
2199250661Sdavidcs
2200250661Sdavidcs	for (i = 0; i < ha->hw.num_rds_rings; i++) {
2201250661Sdavidcs		rdesc = &ha->hw.rds[i];
2202250661Sdavidcs		rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
2203250661Sdavidcs		rdesc->rx_in = 0;
2204250661Sdavidcs		/* Update the RDS Producer Indices */
2205250661Sdavidcs		QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
2206250661Sdavidcs			rdesc->rx_next);
2207250661Sdavidcs	}
2208250661Sdavidcs
2209250661Sdavidcs
2210250661Sdavidcs	/*
2211250661Sdavidcs	 * Create Transmit Context
2212250661Sdavidcs	 */
2213250661Sdavidcs	if (qla_init_xmt_cntxt(ha)) {
2214250661Sdavidcs		qla_del_rcv_cntxt(ha);
2215250661Sdavidcs		return (-1);
2216250661Sdavidcs	}
2217250661Sdavidcs	ha->hw.max_tx_segs = 0;
2218250661Sdavidcs
2219250661Sdavidcs	if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1))
2220250661Sdavidcs		return(-1);
2221250661Sdavidcs
2222250661Sdavidcs	ha->hw.flags.unicast_mac = 1;
2223250661Sdavidcs
2224250661Sdavidcs	bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2225250661Sdavidcs	bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2226250661Sdavidcs
2227250661Sdavidcs	if (qla_config_mac_addr(ha, bcast_mac, 1))
2228250661Sdavidcs		return (-1);
2229250661Sdavidcs
2230250661Sdavidcs	ha->hw.flags.bcast_mac = 1;
2231250661Sdavidcs
2232250661Sdavidcs	/*
2233250661Sdavidcs	 * program any cached multicast addresses
2234250661Sdavidcs	 */
2235250661Sdavidcs	if (qla_hw_add_all_mcast(ha))
2236250661Sdavidcs		return (-1);
2237250661Sdavidcs
2238250661Sdavidcs	if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
2239250661Sdavidcs		return (-1);
2240250661Sdavidcs
2241250661Sdavidcs	if (qla_config_rss_ind_table(ha))
2242250661Sdavidcs		return (-1);
2243250661Sdavidcs
2244284741Sdavidcs	if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1))
2245250661Sdavidcs		return (-1);
2246250661Sdavidcs
2247250661Sdavidcs	if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
2248250661Sdavidcs		return (-1);
2249250661Sdavidcs
2250250661Sdavidcs	if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
2251250661Sdavidcs		return (-1);
2252250661Sdavidcs
2253284741Sdavidcs        if (qla_init_nic_func(ha))
2254284741Sdavidcs                return (-1);
2255284741Sdavidcs
2256284741Sdavidcs        if (qla_query_fw_dcbx_caps(ha))
2257284741Sdavidcs                return (-1);
2258284741Sdavidcs
2259250661Sdavidcs	for (i = 0; i < ha->hw.num_sds_rings; i++)
2260250661Sdavidcs		QL_ENABLE_INTERRUPTS(ha, i);
2261250661Sdavidcs
2262250661Sdavidcs	return (0);
2263250661Sdavidcs}
2264250661Sdavidcs
2265250661Sdavidcsstatic int
2266284741Sdavidcsqla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx)
2267250661Sdavidcs{
2268250661Sdavidcs        device_t                dev = ha->pci_dev;
2269250661Sdavidcs        q80_rq_map_sds_to_rds_t *map_rings;
2270284741Sdavidcs	q80_rsp_map_sds_to_rds_t *map_rings_rsp;
2271250661Sdavidcs        uint32_t                i, err;
2272250661Sdavidcs        qla_hw_t                *hw = &ha->hw;
2273250661Sdavidcs
2274250661Sdavidcs        map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
2275250661Sdavidcs        bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
2276250661Sdavidcs
2277250661Sdavidcs        map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
2278250661Sdavidcs        map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
2279250661Sdavidcs        map_rings->count_version |= Q8_MBX_CMD_VERSION;
2280250661Sdavidcs
2281250661Sdavidcs        map_rings->cntxt_id = hw->rcv_cntxt_id;
2282284741Sdavidcs        map_rings->num_rings = num_idx;
2283250661Sdavidcs
2284284741Sdavidcs	for (i = 0; i < num_idx; i++) {
2285284741Sdavidcs		map_rings->sds_rds[i].sds_ring = i + start_idx;
2286284741Sdavidcs		map_rings->sds_rds[i].rds_ring = i + start_idx;
2287284741Sdavidcs	}
2288250661Sdavidcs
2289250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
2290250661Sdavidcs                (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
2291250661Sdavidcs                ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2292250661Sdavidcs                device_printf(dev, "%s: failed0\n", __func__);
2293250661Sdavidcs                return (-1);
2294250661Sdavidcs        }
2295250661Sdavidcs
2296284741Sdavidcs        map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox;
2297250661Sdavidcs
2298250661Sdavidcs        err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
2299250661Sdavidcs
2300250661Sdavidcs        if (err) {
2301250661Sdavidcs                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2302250661Sdavidcs                return (-1);
2303250661Sdavidcs        }
2304250661Sdavidcs
2305250661Sdavidcs        return (0);
2306250661Sdavidcs}
2307250661Sdavidcs
2308250661Sdavidcs/*
2309250661Sdavidcs * Name: qla_init_rcv_cntxt
2310250661Sdavidcs * Function: Creates the Receive Context.
2311250661Sdavidcs */
2312250661Sdavidcsstatic int
2313250661Sdavidcsqla_init_rcv_cntxt(qla_host_t *ha)
2314250661Sdavidcs{
2315250661Sdavidcs	q80_rq_rcv_cntxt_t	*rcntxt;
2316250661Sdavidcs	q80_rsp_rcv_cntxt_t	*rcntxt_rsp;
2317250661Sdavidcs	q80_stat_desc_t		*sdesc;
2318250661Sdavidcs	int			i, j;
2319250661Sdavidcs        qla_hw_t		*hw = &ha->hw;
2320250661Sdavidcs	device_t		dev;
2321250661Sdavidcs	uint32_t		err;
2322250661Sdavidcs	uint32_t		rcntxt_sds_rings;
2323250661Sdavidcs	uint32_t		rcntxt_rds_rings;
2324284741Sdavidcs	uint32_t		max_idx;
2325250661Sdavidcs
2326250661Sdavidcs	dev = ha->pci_dev;
2327250661Sdavidcs
2328250661Sdavidcs	/*
2329250661Sdavidcs	 * Create Receive Context
2330250661Sdavidcs	 */
2331250661Sdavidcs
2332250661Sdavidcs	for (i = 0; i < hw->num_sds_rings; i++) {
2333250661Sdavidcs		sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
2334250661Sdavidcs
2335250661Sdavidcs		for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
2336250661Sdavidcs			sdesc->data[0] = 1ULL;
2337250661Sdavidcs			sdesc->data[1] = 1ULL;
2338250661Sdavidcs		}
2339250661Sdavidcs	}
2340250661Sdavidcs
2341250661Sdavidcs	rcntxt_sds_rings = hw->num_sds_rings;
2342250661Sdavidcs	if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
2343250661Sdavidcs		rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
2344250661Sdavidcs
2345250661Sdavidcs	rcntxt_rds_rings = hw->num_rds_rings;
2346250661Sdavidcs
2347250661Sdavidcs	if (hw->num_rds_rings > MAX_RDS_RING_SETS)
2348250661Sdavidcs		rcntxt_rds_rings = MAX_RDS_RING_SETS;
2349250661Sdavidcs
2350250661Sdavidcs	rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
2351250661Sdavidcs	bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
2352250661Sdavidcs
2353250661Sdavidcs	rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
2354250661Sdavidcs	rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
2355250661Sdavidcs	rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2356250661Sdavidcs
2357250661Sdavidcs	rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
2358250661Sdavidcs			Q8_RCV_CNTXT_CAP0_LRO |
2359250661Sdavidcs			Q8_RCV_CNTXT_CAP0_HW_LRO |
2360250661Sdavidcs			Q8_RCV_CNTXT_CAP0_RSS |
2361250661Sdavidcs			Q8_RCV_CNTXT_CAP0_SGL_LRO;
2362250661Sdavidcs
2363284741Sdavidcs	if (ha->hw.enable_9kb)
2364284741Sdavidcs		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO;
2365284741Sdavidcs	else
2366284741Sdavidcs		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO;
2367284741Sdavidcs
2368250661Sdavidcs	if (ha->hw.num_rds_rings > 1) {
2369250661Sdavidcs		rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
2370250661Sdavidcs		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
2371250661Sdavidcs	} else
2372250661Sdavidcs		rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
2373250661Sdavidcs
2374250661Sdavidcs	rcntxt->nsds_rings = rcntxt_sds_rings;
2375250661Sdavidcs
2376250661Sdavidcs	rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
2377250661Sdavidcs
2378250661Sdavidcs	rcntxt->rcv_vpid = 0;
2379250661Sdavidcs
2380250661Sdavidcs	for (i = 0; i <  rcntxt_sds_rings; i++) {
2381250661Sdavidcs		rcntxt->sds[i].paddr =
2382250661Sdavidcs			qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
2383250661Sdavidcs		rcntxt->sds[i].size =
2384250661Sdavidcs			qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2385250661Sdavidcs		if (ha->msix_count == 2) {
2386250661Sdavidcs			rcntxt->sds[i].intr_id =
2387250661Sdavidcs				qla_host_to_le16(hw->intr_id[0]);
2388250661Sdavidcs			rcntxt->sds[i].intr_src_bit = qla_host_to_le16((i));
2389250661Sdavidcs		} else {
2390250661Sdavidcs			rcntxt->sds[i].intr_id =
2391250661Sdavidcs				qla_host_to_le16(hw->intr_id[i]);
2392250661Sdavidcs			rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
2393250661Sdavidcs		}
2394250661Sdavidcs	}
2395250661Sdavidcs
2396250661Sdavidcs	for (i = 0; i <  rcntxt_rds_rings; i++) {
2397250661Sdavidcs		rcntxt->rds[i].paddr_std =
2398250661Sdavidcs			qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
2399284741Sdavidcs
2400284741Sdavidcs		if (ha->hw.enable_9kb)
2401284741Sdavidcs			rcntxt->rds[i].std_bsize =
2402284741Sdavidcs				qla_host_to_le64(MJUM9BYTES);
2403284741Sdavidcs		else
2404284741Sdavidcs			rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2405284741Sdavidcs
2406250661Sdavidcs		rcntxt->rds[i].std_nentries =
2407250661Sdavidcs			qla_host_to_le32(NUM_RX_DESCRIPTORS);
2408250661Sdavidcs	}
2409250661Sdavidcs
2410250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2411250661Sdavidcs		(sizeof (q80_rq_rcv_cntxt_t) >> 2),
2412250661Sdavidcs                ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
2413250661Sdavidcs                device_printf(dev, "%s: failed0\n", __func__);
2414250661Sdavidcs                return (-1);
2415250661Sdavidcs        }
2416250661Sdavidcs
2417250661Sdavidcs        rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
2418250661Sdavidcs
2419250661Sdavidcs        err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2420250661Sdavidcs
2421250661Sdavidcs        if (err) {
2422250661Sdavidcs                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2423250661Sdavidcs                return (-1);
2424250661Sdavidcs        }
2425250661Sdavidcs
2426250661Sdavidcs	for (i = 0; i <  rcntxt_sds_rings; i++) {
2427250661Sdavidcs		hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
2428250661Sdavidcs	}
2429250661Sdavidcs
2430250661Sdavidcs	for (i = 0; i <  rcntxt_rds_rings; i++) {
2431250661Sdavidcs		hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
2432250661Sdavidcs	}
2433250661Sdavidcs
2434250661Sdavidcs	hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
2435250661Sdavidcs
2436250661Sdavidcs	ha->hw.flags.init_rx_cnxt = 1;
2437250661Sdavidcs
2438250661Sdavidcs	if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
2439284741Sdavidcs
2440284741Sdavidcs		for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
2441284741Sdavidcs
2442284741Sdavidcs			if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
2443284741Sdavidcs				max_idx = MAX_RCNTXT_SDS_RINGS;
2444284741Sdavidcs			else
2445284741Sdavidcs				max_idx = hw->num_sds_rings - i;
2446284741Sdavidcs
2447284741Sdavidcs			err = qla_add_rcv_rings(ha, i, max_idx);
2448284741Sdavidcs			if (err)
2449284741Sdavidcs				return -1;
2450284741Sdavidcs
2451284741Sdavidcs			i += max_idx;
2452284741Sdavidcs		}
2453250661Sdavidcs	}
2454250661Sdavidcs
2455284741Sdavidcs	if (hw->num_rds_rings > 1) {
2456284741Sdavidcs
2457284741Sdavidcs		for (i = 0; i < hw->num_rds_rings; ) {
2458284741Sdavidcs
2459284741Sdavidcs			if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
2460284741Sdavidcs				max_idx = MAX_SDS_TO_RDS_MAP;
2461284741Sdavidcs			else
2462284741Sdavidcs				max_idx = hw->num_rds_rings - i;
2463284741Sdavidcs
2464284741Sdavidcs			err = qla_map_sds_to_rds(ha, i, max_idx);
2465284741Sdavidcs			if (err)
2466284741Sdavidcs				return -1;
2467284741Sdavidcs
2468284741Sdavidcs			i += max_idx;
2469284741Sdavidcs		}
2470250661Sdavidcs	}
2471250661Sdavidcs
2472250661Sdavidcs	return (0);
2473250661Sdavidcs}
2474250661Sdavidcs
2475250661Sdavidcsstatic int
2476284741Sdavidcsqla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
2477250661Sdavidcs{
2478250661Sdavidcs	device_t		dev = ha->pci_dev;
2479250661Sdavidcs	q80_rq_add_rcv_rings_t	*add_rcv;
2480250661Sdavidcs	q80_rsp_add_rcv_rings_t	*add_rcv_rsp;
2481250661Sdavidcs	uint32_t		i,j, err;
2482250661Sdavidcs        qla_hw_t		*hw = &ha->hw;
2483250661Sdavidcs
2484250661Sdavidcs	add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
2485250661Sdavidcs	bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
2486250661Sdavidcs
2487250661Sdavidcs	add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
2488250661Sdavidcs	add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
2489250661Sdavidcs	add_rcv->count_version |= Q8_MBX_CMD_VERSION;
2490250661Sdavidcs
2491284741Sdavidcs	add_rcv->nrds_sets_rings = nsds | (1 << 5);
2492250661Sdavidcs	add_rcv->nsds_rings = nsds;
2493250661Sdavidcs	add_rcv->cntxt_id = hw->rcv_cntxt_id;
2494250661Sdavidcs
2495250661Sdavidcs        for (i = 0; i <  nsds; i++) {
2496250661Sdavidcs
2497250661Sdavidcs		j = i + sds_idx;
2498250661Sdavidcs
2499250661Sdavidcs                add_rcv->sds[i].paddr =
2500250661Sdavidcs                        qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
2501250661Sdavidcs
2502250661Sdavidcs                add_rcv->sds[i].size =
2503250661Sdavidcs                        qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2504250661Sdavidcs
2505250661Sdavidcs                if (ha->msix_count == 2) {
2506250661Sdavidcs                        add_rcv->sds[i].intr_id =
2507250661Sdavidcs                                qla_host_to_le16(hw->intr_id[0]);
2508250661Sdavidcs                        add_rcv->sds[i].intr_src_bit = qla_host_to_le16(j);
2509250661Sdavidcs                } else {
2510250661Sdavidcs                        add_rcv->sds[i].intr_id =
2511250661Sdavidcs                                qla_host_to_le16(hw->intr_id[j]);
2512250661Sdavidcs                        add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
2513250661Sdavidcs                }
2514250661Sdavidcs
2515250661Sdavidcs        }
2516284741Sdavidcs        for (i = 0; (i <  nsds); i++) {
2517250661Sdavidcs                j = i + sds_idx;
2518284741Sdavidcs
2519250661Sdavidcs                add_rcv->rds[i].paddr_std =
2520250661Sdavidcs                        qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
2521284741Sdavidcs
2522284741Sdavidcs		if (ha->hw.enable_9kb)
2523284741Sdavidcs			add_rcv->rds[i].std_bsize =
2524284741Sdavidcs				qla_host_to_le64(MJUM9BYTES);
2525284741Sdavidcs		else
2526284741Sdavidcs                	add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2527284741Sdavidcs
2528250661Sdavidcs                add_rcv->rds[i].std_nentries =
2529250661Sdavidcs                        qla_host_to_le32(NUM_RX_DESCRIPTORS);
2530250661Sdavidcs        }
2531250661Sdavidcs
2532250661Sdavidcs
2533250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
2534250661Sdavidcs		(sizeof (q80_rq_add_rcv_rings_t) >> 2),
2535250661Sdavidcs                ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2536250661Sdavidcs                device_printf(dev, "%s: failed0\n", __func__);
2537250661Sdavidcs                return (-1);
2538250661Sdavidcs        }
2539250661Sdavidcs
2540250661Sdavidcs        add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
2541250661Sdavidcs
2542250661Sdavidcs        err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
2543250661Sdavidcs
2544250661Sdavidcs        if (err) {
2545250661Sdavidcs                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2546250661Sdavidcs                return (-1);
2547250661Sdavidcs        }
2548250661Sdavidcs
2549284741Sdavidcs	for (i = 0; i < nsds; i++) {
2550284741Sdavidcs		hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i];
2551250661Sdavidcs	}
2552284741Sdavidcs
2553284741Sdavidcs	for (i = 0; i < nsds; i++) {
2554284741Sdavidcs		hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std;
2555250661Sdavidcs	}
2556284741Sdavidcs
2557250661Sdavidcs	return (0);
2558250661Sdavidcs}
2559250661Sdavidcs
2560250661Sdavidcs/*
2561250661Sdavidcs * Name: qla_del_rcv_cntxt
2562250661Sdavidcs * Function: Destroys the Receive Context.
2563250661Sdavidcs */
2564250661Sdavidcsstatic void
2565250661Sdavidcsqla_del_rcv_cntxt(qla_host_t *ha)
2566250661Sdavidcs{
2567250661Sdavidcs	device_t			dev = ha->pci_dev;
2568250661Sdavidcs	q80_rcv_cntxt_destroy_t		*rcntxt;
2569250661Sdavidcs	q80_rcv_cntxt_destroy_rsp_t	*rcntxt_rsp;
2570250661Sdavidcs	uint32_t			err;
2571250661Sdavidcs	uint8_t				bcast_mac[6];
2572250661Sdavidcs
2573250661Sdavidcs	if (!ha->hw.flags.init_rx_cnxt)
2574250661Sdavidcs		return;
2575250661Sdavidcs
2576250661Sdavidcs	if (qla_hw_del_all_mcast(ha))
2577250661Sdavidcs		return;
2578250661Sdavidcs
2579250661Sdavidcs	if (ha->hw.flags.bcast_mac) {
2580250661Sdavidcs
2581250661Sdavidcs		bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2582250661Sdavidcs		bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2583250661Sdavidcs
2584250661Sdavidcs		if (qla_config_mac_addr(ha, bcast_mac, 0))
2585250661Sdavidcs			return;
2586250661Sdavidcs		ha->hw.flags.bcast_mac = 0;
2587250661Sdavidcs
2588250661Sdavidcs	}
2589250661Sdavidcs
2590250661Sdavidcs	if (ha->hw.flags.unicast_mac) {
2591250661Sdavidcs		if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0))
2592250661Sdavidcs			return;
2593250661Sdavidcs		ha->hw.flags.unicast_mac = 0;
2594250661Sdavidcs	}
2595250661Sdavidcs
2596250661Sdavidcs	rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
2597250661Sdavidcs	bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
2598250661Sdavidcs
2599250661Sdavidcs	rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
2600250661Sdavidcs	rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
2601250661Sdavidcs	rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2602250661Sdavidcs
2603250661Sdavidcs	rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
2604250661Sdavidcs
2605250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2606250661Sdavidcs		(sizeof (q80_rcv_cntxt_destroy_t) >> 2),
2607250661Sdavidcs                ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
2608250661Sdavidcs                device_printf(dev, "%s: failed0\n", __func__);
2609250661Sdavidcs                return;
2610250661Sdavidcs        }
2611250661Sdavidcs        rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
2612250661Sdavidcs
2613250661Sdavidcs        err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2614250661Sdavidcs
2615250661Sdavidcs        if (err) {
2616250661Sdavidcs                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2617250661Sdavidcs        }
2618250661Sdavidcs
2619250661Sdavidcs	ha->hw.flags.init_rx_cnxt = 0;
2620250661Sdavidcs	return;
2621250661Sdavidcs}
2622250661Sdavidcs
2623250661Sdavidcs/*
2624250661Sdavidcs * Name: qla_init_xmt_cntxt
2625250661Sdavidcs * Function: Creates the Transmit Context.
2626250661Sdavidcs */
2627250661Sdavidcsstatic int
2628250661Sdavidcsqla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2629250661Sdavidcs{
2630250661Sdavidcs	device_t		dev;
2631250661Sdavidcs        qla_hw_t		*hw = &ha->hw;
2632250661Sdavidcs	q80_rq_tx_cntxt_t	*tcntxt;
2633250661Sdavidcs	q80_rsp_tx_cntxt_t	*tcntxt_rsp;
2634250661Sdavidcs	uint32_t		err;
2635250661Sdavidcs	qla_hw_tx_cntxt_t       *hw_tx_cntxt;
2636250661Sdavidcs
2637250661Sdavidcs	hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
2638250661Sdavidcs
2639250661Sdavidcs	dev = ha->pci_dev;
2640250661Sdavidcs
2641250661Sdavidcs	/*
2642250661Sdavidcs	 * Create Transmit Context
2643250661Sdavidcs	 */
2644250661Sdavidcs	tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
2645250661Sdavidcs	bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
2646250661Sdavidcs
2647250661Sdavidcs	tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
2648250661Sdavidcs	tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
2649250661Sdavidcs	tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2650250661Sdavidcs
2651284741Sdavidcs#ifdef QL_ENABLE_ISCSI_TLV
2652284741Sdavidcs
2653284741Sdavidcs	tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
2654284741Sdavidcs				Q8_TX_CNTXT_CAP0_TC;
2655284741Sdavidcs
2656284741Sdavidcs	if (txr_idx >= (ha->hw.num_tx_rings >> 1)) {
2657284741Sdavidcs		tcntxt->traffic_class = 1;
2658284741Sdavidcs	}
2659284741Sdavidcs
2660284741Sdavidcs#else
2661284741Sdavidcs
2662250661Sdavidcs	tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
2663250661Sdavidcs
2664284741Sdavidcs#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
2665284741Sdavidcs
2666250661Sdavidcs	tcntxt->ntx_rings = 1;
2667250661Sdavidcs
2668250661Sdavidcs	tcntxt->tx_ring[0].paddr =
2669250661Sdavidcs		qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
2670250661Sdavidcs	tcntxt->tx_ring[0].tx_consumer =
2671250661Sdavidcs		qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
2672250661Sdavidcs	tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
2673250661Sdavidcs
2674250661Sdavidcs	tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[0]);
2675250661Sdavidcs	tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
2676250661Sdavidcs
2677250661Sdavidcs
2678250661Sdavidcs	hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
2679250661Sdavidcs	hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
2680250661Sdavidcs
2681250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2682250661Sdavidcs		(sizeof (q80_rq_tx_cntxt_t) >> 2),
2683250661Sdavidcs                ha->hw.mbox,
2684250661Sdavidcs		(sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
2685250661Sdavidcs                device_printf(dev, "%s: failed0\n", __func__);
2686250661Sdavidcs                return (-1);
2687250661Sdavidcs        }
2688250661Sdavidcs        tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
2689250661Sdavidcs
2690250661Sdavidcs        err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2691250661Sdavidcs
2692250661Sdavidcs        if (err) {
2693250661Sdavidcs                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2694250661Sdavidcs		return -1;
2695250661Sdavidcs        }
2696250661Sdavidcs
2697250661Sdavidcs	hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
2698250661Sdavidcs	hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
2699250661Sdavidcs
2700284741Sdavidcs	if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0))
2701284741Sdavidcs		return (-1);
2702284741Sdavidcs
2703250661Sdavidcs	return (0);
2704250661Sdavidcs}
2705250661Sdavidcs
2706250661Sdavidcs
2707250661Sdavidcs/*
2708250661Sdavidcs * Name: qla_del_xmt_cntxt
2709250661Sdavidcs * Function: Destroys the Transmit Context.
2710250661Sdavidcs */
2711250661Sdavidcsstatic int
2712250661Sdavidcsqla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2713250661Sdavidcs{
2714250661Sdavidcs	device_t			dev = ha->pci_dev;
2715250661Sdavidcs	q80_tx_cntxt_destroy_t		*tcntxt;
2716250661Sdavidcs	q80_tx_cntxt_destroy_rsp_t	*tcntxt_rsp;
2717250661Sdavidcs	uint32_t			err;
2718250661Sdavidcs
2719250661Sdavidcs	tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
2720250661Sdavidcs	bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
2721250661Sdavidcs
2722250661Sdavidcs	tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
2723250661Sdavidcs	tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
2724250661Sdavidcs	tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2725250661Sdavidcs
2726250661Sdavidcs	tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
2727250661Sdavidcs
2728250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2729250661Sdavidcs		(sizeof (q80_tx_cntxt_destroy_t) >> 2),
2730250661Sdavidcs                ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
2731250661Sdavidcs                device_printf(dev, "%s: failed0\n", __func__);
2732250661Sdavidcs                return (-1);
2733250661Sdavidcs        }
2734250661Sdavidcs        tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
2735250661Sdavidcs
2736250661Sdavidcs        err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2737250661Sdavidcs
2738250661Sdavidcs        if (err) {
2739250661Sdavidcs                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2740250661Sdavidcs		return (-1);
2741250661Sdavidcs        }
2742250661Sdavidcs
2743250661Sdavidcs	return (0);
2744250661Sdavidcs}
2745250661Sdavidcsstatic void
2746250661Sdavidcsqla_del_xmt_cntxt(qla_host_t *ha)
2747250661Sdavidcs{
2748250661Sdavidcs	uint32_t i;
2749250661Sdavidcs
2750250661Sdavidcs	if (!ha->hw.flags.init_tx_cnxt)
2751250661Sdavidcs		return;
2752250661Sdavidcs
2753250661Sdavidcs	for (i = 0; i < ha->hw.num_tx_rings; i++) {
2754250661Sdavidcs		if (qla_del_xmt_cntxt_i(ha, i))
2755250661Sdavidcs			break;
2756250661Sdavidcs	}
2757250661Sdavidcs	ha->hw.flags.init_tx_cnxt = 0;
2758250661Sdavidcs}
2759250661Sdavidcs
2760250661Sdavidcsstatic int
2761250661Sdavidcsqla_init_xmt_cntxt(qla_host_t *ha)
2762250661Sdavidcs{
2763250661Sdavidcs	uint32_t i, j;
2764250661Sdavidcs
2765250661Sdavidcs	for (i = 0; i < ha->hw.num_tx_rings; i++) {
2766250661Sdavidcs		if (qla_init_xmt_cntxt_i(ha, i) != 0) {
2767250661Sdavidcs			for (j = 0; j < i; j++)
2768250661Sdavidcs				qla_del_xmt_cntxt_i(ha, j);
2769250661Sdavidcs			return (-1);
2770250661Sdavidcs		}
2771250661Sdavidcs	}
2772250661Sdavidcs	ha->hw.flags.init_tx_cnxt = 1;
2773250661Sdavidcs	return (0);
2774250661Sdavidcs}
2775250661Sdavidcs
2776250661Sdavidcsstatic int
2777250661Sdavidcsqla_hw_add_all_mcast(qla_host_t *ha)
2778250661Sdavidcs{
2779250661Sdavidcs	int i, nmcast;
2780250661Sdavidcs
2781250661Sdavidcs	nmcast = ha->hw.nmcast;
2782250661Sdavidcs
2783250661Sdavidcs	for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
2784250661Sdavidcs		if ((ha->hw.mcast[i].addr[0] != 0) ||
2785250661Sdavidcs			(ha->hw.mcast[i].addr[1] != 0) ||
2786250661Sdavidcs			(ha->hw.mcast[i].addr[2] != 0) ||
2787250661Sdavidcs			(ha->hw.mcast[i].addr[3] != 0) ||
2788250661Sdavidcs			(ha->hw.mcast[i].addr[4] != 0) ||
2789250661Sdavidcs			(ha->hw.mcast[i].addr[5] != 0)) {
2790250661Sdavidcs
2791250661Sdavidcs			if (qla_config_mac_addr(ha, ha->hw.mcast[i].addr, 1)) {
2792250661Sdavidcs                		device_printf(ha->pci_dev, "%s: failed\n",
2793250661Sdavidcs					__func__);
2794250661Sdavidcs				return (-1);
2795250661Sdavidcs			}
2796250661Sdavidcs
2797250661Sdavidcs			nmcast--;
2798250661Sdavidcs		}
2799250661Sdavidcs	}
2800250661Sdavidcs	return 0;
2801250661Sdavidcs}
2802250661Sdavidcs
2803250661Sdavidcsstatic int
2804250661Sdavidcsqla_hw_del_all_mcast(qla_host_t *ha)
2805250661Sdavidcs{
2806250661Sdavidcs	int i, nmcast;
2807250661Sdavidcs
2808250661Sdavidcs	nmcast = ha->hw.nmcast;
2809250661Sdavidcs
2810250661Sdavidcs	for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
2811250661Sdavidcs		if ((ha->hw.mcast[i].addr[0] != 0) ||
2812250661Sdavidcs			(ha->hw.mcast[i].addr[1] != 0) ||
2813250661Sdavidcs			(ha->hw.mcast[i].addr[2] != 0) ||
2814250661Sdavidcs			(ha->hw.mcast[i].addr[3] != 0) ||
2815250661Sdavidcs			(ha->hw.mcast[i].addr[4] != 0) ||
2816250661Sdavidcs			(ha->hw.mcast[i].addr[5] != 0)) {
2817250661Sdavidcs
2818250661Sdavidcs			if (qla_config_mac_addr(ha, ha->hw.mcast[i].addr, 0))
2819250661Sdavidcs				return (-1);
2820250661Sdavidcs
2821250661Sdavidcs			nmcast--;
2822250661Sdavidcs		}
2823250661Sdavidcs	}
2824250661Sdavidcs	return 0;
2825250661Sdavidcs}
2826250661Sdavidcs
2827250661Sdavidcsstatic int
2828250661Sdavidcsqla_hw_add_mcast(qla_host_t *ha, uint8_t *mta)
2829250661Sdavidcs{
2830250661Sdavidcs	int i;
2831250661Sdavidcs
2832250661Sdavidcs	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2833250661Sdavidcs
2834250661Sdavidcs		if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
2835250661Sdavidcs			return 0; /* its been already added */
2836250661Sdavidcs	}
2837250661Sdavidcs
2838250661Sdavidcs	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2839250661Sdavidcs
2840250661Sdavidcs		if ((ha->hw.mcast[i].addr[0] == 0) &&
2841250661Sdavidcs			(ha->hw.mcast[i].addr[1] == 0) &&
2842250661Sdavidcs			(ha->hw.mcast[i].addr[2] == 0) &&
2843250661Sdavidcs			(ha->hw.mcast[i].addr[3] == 0) &&
2844250661Sdavidcs			(ha->hw.mcast[i].addr[4] == 0) &&
2845250661Sdavidcs			(ha->hw.mcast[i].addr[5] == 0)) {
2846250661Sdavidcs
2847250661Sdavidcs			if (qla_config_mac_addr(ha, mta, 1))
2848250661Sdavidcs				return (-1);
2849250661Sdavidcs
2850250661Sdavidcs			bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
2851250661Sdavidcs			ha->hw.nmcast++;
2852250661Sdavidcs
2853250661Sdavidcs			return 0;
2854250661Sdavidcs		}
2855250661Sdavidcs	}
2856250661Sdavidcs	return 0;
2857250661Sdavidcs}
2858250661Sdavidcs
2859250661Sdavidcsstatic int
2860250661Sdavidcsqla_hw_del_mcast(qla_host_t *ha, uint8_t *mta)
2861250661Sdavidcs{
2862250661Sdavidcs	int i;
2863250661Sdavidcs
2864250661Sdavidcs	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2865250661Sdavidcs		if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
2866250661Sdavidcs
2867250661Sdavidcs			if (qla_config_mac_addr(ha, mta, 0))
2868250661Sdavidcs				return (-1);
2869250661Sdavidcs
2870250661Sdavidcs			ha->hw.mcast[i].addr[0] = 0;
2871250661Sdavidcs			ha->hw.mcast[i].addr[1] = 0;
2872250661Sdavidcs			ha->hw.mcast[i].addr[2] = 0;
2873250661Sdavidcs			ha->hw.mcast[i].addr[3] = 0;
2874250661Sdavidcs			ha->hw.mcast[i].addr[4] = 0;
2875250661Sdavidcs			ha->hw.mcast[i].addr[5] = 0;
2876250661Sdavidcs
2877250661Sdavidcs			ha->hw.nmcast--;
2878250661Sdavidcs
2879250661Sdavidcs			return 0;
2880250661Sdavidcs		}
2881250661Sdavidcs	}
2882250661Sdavidcs	return 0;
2883250661Sdavidcs}
2884250661Sdavidcs
2885250661Sdavidcs/*
2886250661Sdavidcs * Name: ql_hw_set_multi
2887250661Sdavidcs * Function: Sets the Multicast Addresses provided the host O.S into the
2888250661Sdavidcs *	hardware (for the given interface)
2889250661Sdavidcs */
2890250661Sdavidcsint
2891250661Sdavidcsql_hw_set_multi(qla_host_t *ha, uint8_t *mcast, uint32_t mcnt,
2892250661Sdavidcs	uint32_t add_mac)
2893250661Sdavidcs{
2894250661Sdavidcs	int i;
2895250661Sdavidcs	uint8_t *mta = mcast;
2896250661Sdavidcs	int ret = 0;
2897250661Sdavidcs
2898250661Sdavidcs	for (i = 0; i < mcnt; i++) {
2899250661Sdavidcs		if (add_mac) {
2900250661Sdavidcs			ret = qla_hw_add_mcast(ha, mta);
2901250661Sdavidcs			if (ret)
2902250661Sdavidcs				break;
2903250661Sdavidcs		} else {
2904250661Sdavidcs			ret = qla_hw_del_mcast(ha, mta);
2905250661Sdavidcs			if (ret)
2906250661Sdavidcs				break;
2907250661Sdavidcs		}
2908250661Sdavidcs
2909250661Sdavidcs		mta += Q8_MAC_ADDR_LEN;
2910250661Sdavidcs	}
2911250661Sdavidcs	return (ret);
2912250661Sdavidcs}
2913250661Sdavidcs
2914250661Sdavidcs/*
2915250661Sdavidcs * Name: qla_hw_tx_done_locked
2916250661Sdavidcs * Function: Handle Transmit Completions
2917250661Sdavidcs */
2918250661Sdavidcsstatic void
2919250661Sdavidcsqla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
2920250661Sdavidcs{
2921250661Sdavidcs	qla_tx_buf_t *txb;
2922250661Sdavidcs        qla_hw_t *hw = &ha->hw;
2923250661Sdavidcs	uint32_t comp_idx, comp_count = 0;
2924250661Sdavidcs	qla_hw_tx_cntxt_t *hw_tx_cntxt;
2925250661Sdavidcs
2926250661Sdavidcs	hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
2927250661Sdavidcs
2928250661Sdavidcs	/* retrieve index of last entry in tx ring completed */
2929250661Sdavidcs	comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
2930250661Sdavidcs
2931250661Sdavidcs	while (comp_idx != hw_tx_cntxt->txr_comp) {
2932250661Sdavidcs
2933250661Sdavidcs		txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
2934250661Sdavidcs
2935250661Sdavidcs		hw_tx_cntxt->txr_comp++;
2936250661Sdavidcs		if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
2937250661Sdavidcs			hw_tx_cntxt->txr_comp = 0;
2938250661Sdavidcs
2939250661Sdavidcs		comp_count++;
2940250661Sdavidcs
2941250661Sdavidcs		if (txb->m_head) {
2942271849Sglebius			if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1);
2943250661Sdavidcs
2944250661Sdavidcs			bus_dmamap_sync(ha->tx_tag, txb->map,
2945250661Sdavidcs				BUS_DMASYNC_POSTWRITE);
2946250661Sdavidcs			bus_dmamap_unload(ha->tx_tag, txb->map);
2947250661Sdavidcs			m_freem(txb->m_head);
2948250661Sdavidcs
2949250661Sdavidcs			txb->m_head = NULL;
2950250661Sdavidcs		}
2951250661Sdavidcs	}
2952250661Sdavidcs
2953250661Sdavidcs	hw_tx_cntxt->txr_free += comp_count;
2954250661Sdavidcs	return;
2955250661Sdavidcs}
2956250661Sdavidcs
2957250661Sdavidcs/*
2958250661Sdavidcs * Name: ql_hw_tx_done
2959250661Sdavidcs * Function: Handle Transmit Completions
2960250661Sdavidcs */
2961250661Sdavidcsvoid
2962250661Sdavidcsql_hw_tx_done(qla_host_t *ha)
2963250661Sdavidcs{
2964250661Sdavidcs	int i;
2965250661Sdavidcs	uint32_t flag = 0;
2966250661Sdavidcs
2967250661Sdavidcs	if (!mtx_trylock(&ha->tx_lock)) {
2968250661Sdavidcs       		QL_DPRINT8(ha, (ha->pci_dev,
2969250661Sdavidcs			"%s: !mtx_trylock(&ha->tx_lock)\n", __func__));
2970250661Sdavidcs		return;
2971250661Sdavidcs	}
2972250661Sdavidcs	for (i = 0; i < ha->hw.num_tx_rings; i++) {
2973250661Sdavidcs		qla_hw_tx_done_locked(ha, i);
2974250661Sdavidcs		if (ha->hw.tx_cntxt[i].txr_free <= (NUM_TX_DESCRIPTORS >> 1))
2975250661Sdavidcs			flag = 1;
2976250661Sdavidcs	}
2977250661Sdavidcs
2978250661Sdavidcs	if (!flag)
2979250661Sdavidcs		ha->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2980250661Sdavidcs
2981250661Sdavidcs	QLA_TX_UNLOCK(ha);
2982250661Sdavidcs	return;
2983250661Sdavidcs}
2984250661Sdavidcs
2985250661Sdavidcsvoid
2986250661Sdavidcsql_update_link_state(qla_host_t *ha)
2987250661Sdavidcs{
2988250661Sdavidcs	uint32_t link_state;
2989250661Sdavidcs	uint32_t prev_link_state;
2990250661Sdavidcs
2991250661Sdavidcs	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2992250661Sdavidcs		ha->hw.link_up = 0;
2993250661Sdavidcs		return;
2994250661Sdavidcs	}
2995250661Sdavidcs	link_state = READ_REG32(ha, Q8_LINK_STATE);
2996250661Sdavidcs
2997250661Sdavidcs	prev_link_state =  ha->hw.link_up;
2998250661Sdavidcs
2999250661Sdavidcs	if (ha->pci_func == 0)
3000250661Sdavidcs		ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0);
3001250661Sdavidcs	else
3002250661Sdavidcs		ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
3003250661Sdavidcs
3004250661Sdavidcs	if (prev_link_state !=  ha->hw.link_up) {
3005250661Sdavidcs		if (ha->hw.link_up) {
3006250661Sdavidcs			if_link_state_change(ha->ifp, LINK_STATE_UP);
3007250661Sdavidcs		} else {
3008250661Sdavidcs			if_link_state_change(ha->ifp, LINK_STATE_DOWN);
3009250661Sdavidcs		}
3010250661Sdavidcs	}
3011250661Sdavidcs	return;
3012250661Sdavidcs}
3013250661Sdavidcs
3014250661Sdavidcsvoid
3015250661Sdavidcsql_hw_stop_rcv(qla_host_t *ha)
3016250661Sdavidcs{
3017250661Sdavidcs	int i, done, count = 100;
3018250661Sdavidcs
3019305487Sdavidcs	ha->flags.stop_rcv = 1;
3020305487Sdavidcs
3021284741Sdavidcs	while (count) {
3022250661Sdavidcs		done = 1;
3023250661Sdavidcs		for (i = 0; i < ha->hw.num_sds_rings; i++) {
3024250661Sdavidcs			if (ha->hw.sds[i].rcv_active)
3025250661Sdavidcs				done = 0;
3026250661Sdavidcs		}
3027250661Sdavidcs		if (done)
3028250661Sdavidcs			break;
3029250661Sdavidcs		else
3030250661Sdavidcs			qla_mdelay(__func__, 10);
3031284741Sdavidcs		count--;
3032250661Sdavidcs	}
3033250661Sdavidcs	if (!count)
3034250661Sdavidcs		device_printf(ha->pci_dev, "%s: Counter expired.\n", __func__);
3035250661Sdavidcs
3036250661Sdavidcs	return;
3037250661Sdavidcs}
3038250661Sdavidcs
3039250661Sdavidcsint
3040250661Sdavidcsql_hw_check_health(qla_host_t *ha)
3041250661Sdavidcs{
3042250661Sdavidcs	uint32_t val;
3043250661Sdavidcs
3044250661Sdavidcs	ha->hw.health_count++;
3045250661Sdavidcs
3046250661Sdavidcs	if (ha->hw.health_count < 1000)
3047250661Sdavidcs		return 0;
3048250661Sdavidcs
3049250661Sdavidcs	ha->hw.health_count = 0;
3050250661Sdavidcs
3051250661Sdavidcs	val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
3052250661Sdavidcs
3053250661Sdavidcs	if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
3054250661Sdavidcs		(QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
3055250661Sdavidcs		device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n",
3056250661Sdavidcs			__func__, val);
3057250661Sdavidcs		return -1;
3058250661Sdavidcs	}
3059250661Sdavidcs
3060250661Sdavidcs	val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
3061250661Sdavidcs
3062250661Sdavidcs	if ((val != ha->hw.hbeat_value) &&
3063289635Sdavidcs		(!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) {
3064250661Sdavidcs		ha->hw.hbeat_value = val;
3065250661Sdavidcs		return 0;
3066250661Sdavidcs	}
3067250661Sdavidcs	device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n",
3068250661Sdavidcs		__func__, val);
3069250661Sdavidcs
3070250661Sdavidcs	return -1;
3071250661Sdavidcs}
3072250661Sdavidcs
3073250661Sdavidcsstatic int
3074284741Sdavidcsqla_init_nic_func(qla_host_t *ha)
3075284741Sdavidcs{
3076284741Sdavidcs        device_t                dev;
3077284741Sdavidcs        q80_init_nic_func_t     *init_nic;
3078284741Sdavidcs        q80_init_nic_func_rsp_t *init_nic_rsp;
3079284741Sdavidcs        uint32_t                err;
3080284741Sdavidcs
3081284741Sdavidcs        dev = ha->pci_dev;
3082284741Sdavidcs
3083284741Sdavidcs        init_nic = (q80_init_nic_func_t *)ha->hw.mbox;
3084284741Sdavidcs        bzero(init_nic, sizeof(q80_init_nic_func_t));
3085284741Sdavidcs
3086284741Sdavidcs        init_nic->opcode = Q8_MBX_INIT_NIC_FUNC;
3087284741Sdavidcs        init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2);
3088284741Sdavidcs        init_nic->count_version |= Q8_MBX_CMD_VERSION;
3089284741Sdavidcs
3090284741Sdavidcs        init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN;
3091284741Sdavidcs        init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN;
3092284741Sdavidcs        init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN;
3093284741Sdavidcs
3094284741Sdavidcs//qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t));
3095284741Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)init_nic,
3096284741Sdavidcs                (sizeof (q80_init_nic_func_t) >> 2),
3097284741Sdavidcs                ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) {
3098284741Sdavidcs                device_printf(dev, "%s: failed\n", __func__);
3099284741Sdavidcs                return -1;
3100284741Sdavidcs        }
3101284741Sdavidcs
3102284741Sdavidcs        init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox;
3103284741Sdavidcs// qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t));
3104284741Sdavidcs
3105284741Sdavidcs        err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status);
3106284741Sdavidcs
3107284741Sdavidcs        if (err) {
3108284741Sdavidcs                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3109284741Sdavidcs        }
3110284741Sdavidcs
3111284741Sdavidcs        return 0;
3112284741Sdavidcs}
3113284741Sdavidcs
3114284741Sdavidcsstatic int
3115284741Sdavidcsqla_stop_nic_func(qla_host_t *ha)
3116284741Sdavidcs{
3117284741Sdavidcs        device_t                dev;
3118284741Sdavidcs        q80_stop_nic_func_t     *stop_nic;
3119284741Sdavidcs        q80_stop_nic_func_rsp_t *stop_nic_rsp;
3120284741Sdavidcs        uint32_t                err;
3121284741Sdavidcs
3122284741Sdavidcs        dev = ha->pci_dev;
3123284741Sdavidcs
3124284741Sdavidcs        stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox;
3125284741Sdavidcs        bzero(stop_nic, sizeof(q80_stop_nic_func_t));
3126284741Sdavidcs
3127284741Sdavidcs        stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC;
3128284741Sdavidcs        stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2);
3129284741Sdavidcs        stop_nic->count_version |= Q8_MBX_CMD_VERSION;
3130284741Sdavidcs
3131284741Sdavidcs        stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN;
3132284741Sdavidcs        stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN;
3133284741Sdavidcs
3134284741Sdavidcs//qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t));
3135284741Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)stop_nic,
3136284741Sdavidcs                (sizeof (q80_stop_nic_func_t) >> 2),
3137284741Sdavidcs                ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) {
3138284741Sdavidcs                device_printf(dev, "%s: failed\n", __func__);
3139284741Sdavidcs                return -1;
3140284741Sdavidcs        }
3141284741Sdavidcs
3142284741Sdavidcs        stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox;
3143284741Sdavidcs//qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t));
3144284741Sdavidcs
3145284741Sdavidcs        err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status);
3146284741Sdavidcs
3147284741Sdavidcs        if (err) {
3148284741Sdavidcs                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3149284741Sdavidcs        }
3150284741Sdavidcs
3151284741Sdavidcs        return 0;
3152284741Sdavidcs}
3153284741Sdavidcs
3154284741Sdavidcsstatic int
3155284741Sdavidcsqla_query_fw_dcbx_caps(qla_host_t *ha)
3156284741Sdavidcs{
3157284741Sdavidcs        device_t                        dev;
3158284741Sdavidcs        q80_query_fw_dcbx_caps_t        *fw_dcbx;
3159284741Sdavidcs        q80_query_fw_dcbx_caps_rsp_t    *fw_dcbx_rsp;
3160284741Sdavidcs        uint32_t                        err;
3161284741Sdavidcs
3162284741Sdavidcs        dev = ha->pci_dev;
3163284741Sdavidcs
3164284741Sdavidcs        fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox;
3165284741Sdavidcs        bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t));
3166284741Sdavidcs
3167284741Sdavidcs        fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS;
3168284741Sdavidcs        fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2);
3169284741Sdavidcs        fw_dcbx->count_version |= Q8_MBX_CMD_VERSION;
3170284741Sdavidcs
3171284741Sdavidcs        ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t));
3172284741Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx,
3173284741Sdavidcs                (sizeof (q80_query_fw_dcbx_caps_t) >> 2),
3174284741Sdavidcs                ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) {
3175284741Sdavidcs                device_printf(dev, "%s: failed\n", __func__);
3176284741Sdavidcs                return -1;
3177284741Sdavidcs        }
3178284741Sdavidcs
3179284741Sdavidcs        fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox;
3180284741Sdavidcs        ql_dump_buf8(ha, __func__, fw_dcbx_rsp,
3181284741Sdavidcs                sizeof (q80_query_fw_dcbx_caps_rsp_t));
3182284741Sdavidcs
3183284741Sdavidcs        err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status);
3184284741Sdavidcs
3185284741Sdavidcs        if (err) {
3186284741Sdavidcs                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3187284741Sdavidcs        }
3188284741Sdavidcs
3189284741Sdavidcs        return 0;
3190284741Sdavidcs}
3191284741Sdavidcs
3192284741Sdavidcsstatic int
3193284741Sdavidcsqla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2,
3194284741Sdavidcs        uint32_t aen_mb3, uint32_t aen_mb4)
3195284741Sdavidcs{
3196284741Sdavidcs        device_t                dev;
3197284741Sdavidcs        q80_idc_ack_t           *idc_ack;
3198284741Sdavidcs        q80_idc_ack_rsp_t       *idc_ack_rsp;
3199284741Sdavidcs        uint32_t                err;
3200284741Sdavidcs        int                     count = 300;
3201284741Sdavidcs
3202284741Sdavidcs        dev = ha->pci_dev;
3203284741Sdavidcs
3204284741Sdavidcs        idc_ack = (q80_idc_ack_t *)ha->hw.mbox;
3205284741Sdavidcs        bzero(idc_ack, sizeof(q80_idc_ack_t));
3206284741Sdavidcs
3207284741Sdavidcs        idc_ack->opcode = Q8_MBX_IDC_ACK;
3208284741Sdavidcs        idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2);
3209284741Sdavidcs        idc_ack->count_version |= Q8_MBX_CMD_VERSION;
3210284741Sdavidcs
3211284741Sdavidcs        idc_ack->aen_mb1 = aen_mb1;
3212284741Sdavidcs        idc_ack->aen_mb2 = aen_mb2;
3213284741Sdavidcs        idc_ack->aen_mb3 = aen_mb3;
3214284741Sdavidcs        idc_ack->aen_mb4 = aen_mb4;
3215284741Sdavidcs
3216284741Sdavidcs        ha->hw.imd_compl= 0;
3217284741Sdavidcs
3218284741Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)idc_ack,
3219284741Sdavidcs                (sizeof (q80_idc_ack_t) >> 2),
3220284741Sdavidcs                ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) {
3221284741Sdavidcs                device_printf(dev, "%s: failed\n", __func__);
3222284741Sdavidcs                return -1;
3223284741Sdavidcs        }
3224284741Sdavidcs
3225284741Sdavidcs        idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox;
3226284741Sdavidcs
3227284741Sdavidcs        err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status);
3228284741Sdavidcs
3229284741Sdavidcs        if (err) {
3230284741Sdavidcs                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3231284741Sdavidcs                return(-1);
3232284741Sdavidcs        }
3233284741Sdavidcs
3234284741Sdavidcs        while (count && !ha->hw.imd_compl) {
3235284741Sdavidcs                qla_mdelay(__func__, 100);
3236284741Sdavidcs                count--;
3237284741Sdavidcs        }
3238284741Sdavidcs
3239284741Sdavidcs        if (!count)
3240284741Sdavidcs                return -1;
3241284741Sdavidcs        else
3242284741Sdavidcs                device_printf(dev, "%s: count %d\n", __func__, count);
3243284741Sdavidcs
3244284741Sdavidcs        return (0);
3245284741Sdavidcs}
3246284741Sdavidcs
3247284741Sdavidcsstatic int
3248284741Sdavidcsqla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
3249284741Sdavidcs{
3250284741Sdavidcs        device_t                dev;
3251284741Sdavidcs        q80_set_port_cfg_t      *pcfg;
3252284741Sdavidcs        q80_set_port_cfg_rsp_t  *pfg_rsp;
3253284741Sdavidcs        uint32_t                err;
3254284741Sdavidcs        int                     count = 300;
3255284741Sdavidcs
3256284741Sdavidcs        dev = ha->pci_dev;
3257284741Sdavidcs
3258284741Sdavidcs        pcfg = (q80_set_port_cfg_t *)ha->hw.mbox;
3259284741Sdavidcs        bzero(pcfg, sizeof(q80_set_port_cfg_t));
3260284741Sdavidcs
3261284741Sdavidcs        pcfg->opcode = Q8_MBX_SET_PORT_CONFIG;
3262284741Sdavidcs        pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2);
3263284741Sdavidcs        pcfg->count_version |= Q8_MBX_CMD_VERSION;
3264284741Sdavidcs
3265284741Sdavidcs        pcfg->cfg_bits = cfg_bits;
3266284741Sdavidcs
3267284741Sdavidcs        device_printf(dev, "%s: cfg_bits"
3268284741Sdavidcs                " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3269284741Sdavidcs                " [0x%x, 0x%x, 0x%x]\n", __func__,
3270284741Sdavidcs                ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3271284741Sdavidcs                ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3272284741Sdavidcs                ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0));
3273284741Sdavidcs
3274284741Sdavidcs        ha->hw.imd_compl= 0;
3275284741Sdavidcs
3276284741Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3277284741Sdavidcs                (sizeof (q80_set_port_cfg_t) >> 2),
3278284741Sdavidcs                ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) {
3279284741Sdavidcs                device_printf(dev, "%s: failed\n", __func__);
3280284741Sdavidcs                return -1;
3281284741Sdavidcs        }
3282284741Sdavidcs
3283284741Sdavidcs        pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox;
3284284741Sdavidcs
3285284741Sdavidcs        err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status);
3286284741Sdavidcs
3287284741Sdavidcs        if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) {
3288284741Sdavidcs                while (count && !ha->hw.imd_compl) {
3289284741Sdavidcs                        qla_mdelay(__func__, 100);
3290284741Sdavidcs                        count--;
3291284741Sdavidcs                }
3292284741Sdavidcs                if (count) {
3293284741Sdavidcs                        device_printf(dev, "%s: count %d\n", __func__, count);
3294284741Sdavidcs
3295284741Sdavidcs                        err = 0;
3296284741Sdavidcs                }
3297284741Sdavidcs        }
3298284741Sdavidcs
3299284741Sdavidcs        if (err) {
3300284741Sdavidcs                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3301284741Sdavidcs                return(-1);
3302284741Sdavidcs        }
3303284741Sdavidcs
3304284741Sdavidcs        return (0);
3305284741Sdavidcs}
3306284741Sdavidcs
3307284741Sdavidcs
3308284741Sdavidcsstatic int
3309250661Sdavidcsqla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
3310250661Sdavidcs{
3311250661Sdavidcs	uint32_t			err;
3312250661Sdavidcs	device_t			dev = ha->pci_dev;
3313250661Sdavidcs	q80_config_md_templ_size_t	*md_size;
3314250661Sdavidcs	q80_config_md_templ_size_rsp_t	*md_size_rsp;
3315250661Sdavidcs
3316305487Sdavidcs#ifndef QL_LDFLASH_FW
3317284741Sdavidcs
3318305487Sdavidcs	ql_minidump_template_hdr_t *hdr;
3319305487Sdavidcs
3320305487Sdavidcs	hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump;
3321305487Sdavidcs	*size = hdr->size_of_template;
3322284741Sdavidcs	return (0);
3323284741Sdavidcs
3324284741Sdavidcs#endif /* #ifdef QL_LDFLASH_FW */
3325284741Sdavidcs
3326250661Sdavidcs	md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
3327250661Sdavidcs	bzero(md_size, sizeof(q80_config_md_templ_size_t));
3328250661Sdavidcs
3329250661Sdavidcs	md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
3330250661Sdavidcs	md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
3331250661Sdavidcs	md_size->count_version |= Q8_MBX_CMD_VERSION;
3332250661Sdavidcs
3333250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *) md_size,
3334250661Sdavidcs		(sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
3335250661Sdavidcs		(sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
3336250661Sdavidcs
3337250661Sdavidcs		device_printf(dev, "%s: failed\n", __func__);
3338250661Sdavidcs
3339250661Sdavidcs		return (-1);
3340250661Sdavidcs	}
3341250661Sdavidcs
3342250661Sdavidcs	md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
3343250661Sdavidcs
3344250661Sdavidcs	err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
3345250661Sdavidcs
3346250661Sdavidcs        if (err) {
3347250661Sdavidcs		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3348250661Sdavidcs		return(-1);
3349250661Sdavidcs        }
3350250661Sdavidcs
3351250661Sdavidcs	*size = md_size_rsp->templ_size;
3352250661Sdavidcs
3353250661Sdavidcs	return (0);
3354250661Sdavidcs}
3355250661Sdavidcs
3356250661Sdavidcsstatic int
3357284741Sdavidcsqla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
3358284741Sdavidcs{
3359284741Sdavidcs        device_t                dev;
3360284741Sdavidcs        q80_get_port_cfg_t      *pcfg;
3361284741Sdavidcs        q80_get_port_cfg_rsp_t  *pcfg_rsp;
3362284741Sdavidcs        uint32_t                err;
3363284741Sdavidcs
3364284741Sdavidcs        dev = ha->pci_dev;
3365284741Sdavidcs
3366284741Sdavidcs        pcfg = (q80_get_port_cfg_t *)ha->hw.mbox;
3367284741Sdavidcs        bzero(pcfg, sizeof(q80_get_port_cfg_t));
3368284741Sdavidcs
3369284741Sdavidcs        pcfg->opcode = Q8_MBX_GET_PORT_CONFIG;
3370284741Sdavidcs        pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2);
3371284741Sdavidcs        pcfg->count_version |= Q8_MBX_CMD_VERSION;
3372284741Sdavidcs
3373284741Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3374284741Sdavidcs                (sizeof (q80_get_port_cfg_t) >> 2),
3375284741Sdavidcs                ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) {
3376284741Sdavidcs                device_printf(dev, "%s: failed\n", __func__);
3377284741Sdavidcs                return -1;
3378284741Sdavidcs        }
3379284741Sdavidcs
3380284741Sdavidcs        pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox;
3381284741Sdavidcs
3382284741Sdavidcs        err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status);
3383284741Sdavidcs
3384284741Sdavidcs        if (err) {
3385284741Sdavidcs                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3386284741Sdavidcs                return(-1);
3387284741Sdavidcs        }
3388284741Sdavidcs
3389284741Sdavidcs        device_printf(dev, "%s: [cfg_bits, port type]"
3390284741Sdavidcs                " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3391284741Sdavidcs                " [0x%x, 0x%x, 0x%x]\n", __func__,
3392284741Sdavidcs                pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type,
3393284741Sdavidcs                ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3394284741Sdavidcs                ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3395284741Sdavidcs                ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)
3396284741Sdavidcs                );
3397284741Sdavidcs
3398284741Sdavidcs        *cfg_bits = pcfg_rsp->cfg_bits;
3399284741Sdavidcs
3400284741Sdavidcs        return (0);
3401284741Sdavidcs}
3402284741Sdavidcs
3403284741Sdavidcsint
3404284741Sdavidcsqla_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
3405284741Sdavidcs{
3406284741Sdavidcs        struct ether_vlan_header        *eh;
3407284741Sdavidcs        uint16_t                        etype;
3408284741Sdavidcs        struct ip                       *ip = NULL;
3409284741Sdavidcs        struct ip6_hdr                  *ip6 = NULL;
3410284741Sdavidcs        struct tcphdr                   *th = NULL;
3411284741Sdavidcs        uint32_t                        hdrlen;
3412284741Sdavidcs        uint32_t                        offset;
3413284741Sdavidcs        uint8_t                         buf[sizeof(struct ip6_hdr)];
3414284741Sdavidcs
3415284741Sdavidcs        eh = mtod(mp, struct ether_vlan_header *);
3416284741Sdavidcs
3417284741Sdavidcs        if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3418284741Sdavidcs                hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3419284741Sdavidcs                etype = ntohs(eh->evl_proto);
3420284741Sdavidcs        } else {
3421284741Sdavidcs                hdrlen = ETHER_HDR_LEN;
3422284741Sdavidcs                etype = ntohs(eh->evl_encap_proto);
3423284741Sdavidcs        }
3424284741Sdavidcs
3425284741Sdavidcs	if (etype == ETHERTYPE_IP) {
3426284741Sdavidcs
3427284741Sdavidcs		offset = (hdrlen + sizeof (struct ip));
3428284741Sdavidcs
3429284741Sdavidcs		if (mp->m_len >= offset) {
3430284741Sdavidcs                        ip = (struct ip *)(mp->m_data + hdrlen);
3431284741Sdavidcs		} else {
3432284741Sdavidcs			m_copydata(mp, hdrlen, sizeof (struct ip), buf);
3433284741Sdavidcs                        ip = (struct ip *)buf;
3434284741Sdavidcs		}
3435284741Sdavidcs
3436284741Sdavidcs                if (ip->ip_p == IPPROTO_TCP) {
3437284741Sdavidcs
3438284741Sdavidcs			hdrlen += ip->ip_hl << 2;
3439284741Sdavidcs			offset = hdrlen + 4;
3440284741Sdavidcs
3441284741Sdavidcs			if (mp->m_len >= offset) {
3442305487Sdavidcs				th = (struct tcphdr *)(mp->m_data + hdrlen);;
3443284741Sdavidcs			} else {
3444284741Sdavidcs                                m_copydata(mp, hdrlen, 4, buf);
3445284741Sdavidcs				th = (struct tcphdr *)buf;
3446284741Sdavidcs			}
3447284741Sdavidcs                }
3448284741Sdavidcs
3449284741Sdavidcs	} else if (etype == ETHERTYPE_IPV6) {
3450284741Sdavidcs
3451284741Sdavidcs		offset = (hdrlen + sizeof (struct ip6_hdr));
3452284741Sdavidcs
3453284741Sdavidcs		if (mp->m_len >= offset) {
3454284741Sdavidcs                        ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen);
3455284741Sdavidcs		} else {
3456284741Sdavidcs                        m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf);
3457284741Sdavidcs                        ip6 = (struct ip6_hdr *)buf;
3458284741Sdavidcs		}
3459284741Sdavidcs
3460284741Sdavidcs                if (ip6->ip6_nxt == IPPROTO_TCP) {
3461284741Sdavidcs
3462284741Sdavidcs			hdrlen += sizeof(struct ip6_hdr);
3463284741Sdavidcs			offset = hdrlen + 4;
3464284741Sdavidcs
3465284741Sdavidcs			if (mp->m_len >= offset) {
3466305487Sdavidcs				th = (struct tcphdr *)(mp->m_data + hdrlen);;
3467284741Sdavidcs			} else {
3468284741Sdavidcs				m_copydata(mp, hdrlen, 4, buf);
3469284741Sdavidcs				th = (struct tcphdr *)buf;
3470284741Sdavidcs			}
3471284741Sdavidcs                }
3472284741Sdavidcs	}
3473284741Sdavidcs
3474284741Sdavidcs        if (th != NULL) {
3475284741Sdavidcs                if ((th->th_sport == htons(3260)) ||
3476284741Sdavidcs                        (th->th_dport == htons(3260)))
3477284741Sdavidcs                        return 0;
3478284741Sdavidcs        }
3479284741Sdavidcs        return (-1);
3480284741Sdavidcs}
3481284741Sdavidcs
3482284741Sdavidcsvoid
3483284741Sdavidcsqla_hw_async_event(qla_host_t *ha)
3484284741Sdavidcs{
3485284741Sdavidcs        switch (ha->hw.aen_mb0) {
3486284741Sdavidcs        case 0x8101:
3487284741Sdavidcs                (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2,
3488284741Sdavidcs                        ha->hw.aen_mb3, ha->hw.aen_mb4);
3489284741Sdavidcs
3490284741Sdavidcs                break;
3491284741Sdavidcs
3492284741Sdavidcs        default:
3493284741Sdavidcs                break;
3494284741Sdavidcs        }
3495284741Sdavidcs
3496284741Sdavidcs        return;
3497284741Sdavidcs}
3498284741Sdavidcs
3499284741Sdavidcs#ifdef QL_LDFLASH_FW
3500284741Sdavidcsstatic int
3501305487Sdavidcsql_get_minidump_template(qla_host_t *ha)
3502250661Sdavidcs{
3503250661Sdavidcs	uint32_t			err;
3504250661Sdavidcs	device_t			dev = ha->pci_dev;
3505250661Sdavidcs	q80_config_md_templ_cmd_t	*md_templ;
3506250661Sdavidcs	q80_config_md_templ_cmd_rsp_t	*md_templ_rsp;
3507250661Sdavidcs
3508250661Sdavidcs	md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
3509250661Sdavidcs	bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
3510250661Sdavidcs
3511250661Sdavidcs	md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
3512250661Sdavidcs	md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
3513250661Sdavidcs	md_templ->count_version |= Q8_MBX_CMD_VERSION;
3514250661Sdavidcs
3515250661Sdavidcs	md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
3516250661Sdavidcs	md_templ->buff_size = ha->hw.dma_buf.minidump.size;
3517250661Sdavidcs
3518250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
3519250661Sdavidcs		(sizeof(q80_config_md_templ_cmd_t) >> 2),
3520250661Sdavidcs		 ha->hw.mbox,
3521250661Sdavidcs		(sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
3522250661Sdavidcs
3523250661Sdavidcs		device_printf(dev, "%s: failed\n", __func__);
3524250661Sdavidcs
3525250661Sdavidcs		return (-1);
3526250661Sdavidcs	}
3527250661Sdavidcs
3528250661Sdavidcs	md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
3529250661Sdavidcs
3530250661Sdavidcs	err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
3531250661Sdavidcs
3532250661Sdavidcs	if (err) {
3533250661Sdavidcs		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3534250661Sdavidcs		return (-1);
3535250661Sdavidcs	}
3536250661Sdavidcs
3537250661Sdavidcs	return (0);
3538250661Sdavidcs
3539250661Sdavidcs}
3540284741Sdavidcs#endif /* #ifdef QL_LDFLASH_FW */
3541250661Sdavidcs
3542305487Sdavidcs/*
3543305487Sdavidcs * Minidump related functionality
3544305487Sdavidcs */
3545305487Sdavidcs
3546305487Sdavidcsstatic int ql_parse_template(qla_host_t *ha);
3547305487Sdavidcs
3548305487Sdavidcsstatic uint32_t ql_rdcrb(qla_host_t *ha,
3549305487Sdavidcs			ql_minidump_entry_rdcrb_t *crb_entry,
3550305487Sdavidcs			uint32_t * data_buff);
3551305487Sdavidcs
3552305487Sdavidcsstatic uint32_t ql_pollrd(qla_host_t *ha,
3553305487Sdavidcs			ql_minidump_entry_pollrd_t *entry,
3554305487Sdavidcs			uint32_t * data_buff);
3555305487Sdavidcs
3556305487Sdavidcsstatic uint32_t ql_pollrd_modify_write(qla_host_t *ha,
3557305487Sdavidcs			ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
3558305487Sdavidcs			uint32_t *data_buff);
3559305487Sdavidcs
3560305487Sdavidcsstatic uint32_t ql_L2Cache(qla_host_t *ha,
3561305487Sdavidcs			ql_minidump_entry_cache_t *cacheEntry,
3562305487Sdavidcs			uint32_t * data_buff);
3563305487Sdavidcs
3564305487Sdavidcsstatic uint32_t ql_L1Cache(qla_host_t *ha,
3565305487Sdavidcs			ql_minidump_entry_cache_t *cacheEntry,
3566305487Sdavidcs			uint32_t *data_buff);
3567305487Sdavidcs
3568305487Sdavidcsstatic uint32_t ql_rdocm(qla_host_t *ha,
3569305487Sdavidcs			ql_minidump_entry_rdocm_t *ocmEntry,
3570305487Sdavidcs			uint32_t *data_buff);
3571305487Sdavidcs
3572305487Sdavidcsstatic uint32_t ql_rdmem(qla_host_t *ha,
3573305487Sdavidcs			ql_minidump_entry_rdmem_t *mem_entry,
3574305487Sdavidcs			uint32_t *data_buff);
3575305487Sdavidcs
3576305487Sdavidcsstatic uint32_t ql_rdrom(qla_host_t *ha,
3577305487Sdavidcs			ql_minidump_entry_rdrom_t *romEntry,
3578305487Sdavidcs			uint32_t *data_buff);
3579305487Sdavidcs
3580305487Sdavidcsstatic uint32_t ql_rdmux(qla_host_t *ha,
3581305487Sdavidcs			ql_minidump_entry_mux_t *muxEntry,
3582305487Sdavidcs			uint32_t *data_buff);
3583305487Sdavidcs
3584305487Sdavidcsstatic uint32_t ql_rdmux2(qla_host_t *ha,
3585305487Sdavidcs			ql_minidump_entry_mux2_t *muxEntry,
3586305487Sdavidcs			uint32_t *data_buff);
3587305487Sdavidcs
3588305487Sdavidcsstatic uint32_t ql_rdqueue(qla_host_t *ha,
3589305487Sdavidcs			ql_minidump_entry_queue_t *queueEntry,
3590305487Sdavidcs			uint32_t *data_buff);
3591305487Sdavidcs
3592305487Sdavidcsstatic uint32_t ql_cntrl(qla_host_t *ha,
3593305487Sdavidcs			ql_minidump_template_hdr_t *template_hdr,
3594305487Sdavidcs			ql_minidump_entry_cntrl_t *crbEntry);
3595305487Sdavidcs
3596305487Sdavidcs
3597305487Sdavidcsstatic uint32_t
3598305487Sdavidcsql_minidump_size(qla_host_t *ha)
3599305487Sdavidcs{
3600305487Sdavidcs	uint32_t i, k;
3601305487Sdavidcs	uint32_t size = 0;
3602305487Sdavidcs	ql_minidump_template_hdr_t *hdr;
3603305487Sdavidcs
3604305487Sdavidcs	hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b;
3605305487Sdavidcs
3606305487Sdavidcs	i = 0x2;
3607305487Sdavidcs
3608305487Sdavidcs	for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) {
3609305487Sdavidcs		if (i & ha->hw.mdump_capture_mask)
3610305487Sdavidcs			size += hdr->capture_size_array[k];
3611305487Sdavidcs		i = i << 1;
3612305487Sdavidcs	}
3613305487Sdavidcs	return (size);
3614305487Sdavidcs}
3615305487Sdavidcs
3616305487Sdavidcsstatic void
3617305487Sdavidcsql_free_minidump_buffer(qla_host_t *ha)
3618305487Sdavidcs{
3619305487Sdavidcs	if (ha->hw.mdump_buffer != NULL) {
3620305487Sdavidcs		free(ha->hw.mdump_buffer, M_QLA83XXBUF);
3621305487Sdavidcs		ha->hw.mdump_buffer = NULL;
3622305487Sdavidcs		ha->hw.mdump_buffer_size = 0;
3623305487Sdavidcs	}
3624305487Sdavidcs	return;
3625305487Sdavidcs}
3626305487Sdavidcs
3627250661Sdavidcsstatic int
3628305487Sdavidcsql_alloc_minidump_buffer(qla_host_t *ha)
3629250661Sdavidcs{
3630305487Sdavidcs	ha->hw.mdump_buffer_size = ql_minidump_size(ha);
3631305487Sdavidcs
3632305487Sdavidcs	if (!ha->hw.mdump_buffer_size)
3633305487Sdavidcs		return (-1);
3634305487Sdavidcs
3635305487Sdavidcs	ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF,
3636305487Sdavidcs					M_NOWAIT);
3637305487Sdavidcs
3638305487Sdavidcs	if (ha->hw.mdump_buffer == NULL)
3639305487Sdavidcs		return (-1);
3640305487Sdavidcs
3641305487Sdavidcs	return (0);
3642305487Sdavidcs}
3643305487Sdavidcs
3644305487Sdavidcsstatic void
3645305487Sdavidcsql_free_minidump_template_buffer(qla_host_t *ha)
3646305487Sdavidcs{
3647305487Sdavidcs	if (ha->hw.mdump_template != NULL) {
3648305487Sdavidcs		free(ha->hw.mdump_template, M_QLA83XXBUF);
3649305487Sdavidcs		ha->hw.mdump_template = NULL;
3650305487Sdavidcs		ha->hw.mdump_template_size = 0;
3651305487Sdavidcs	}
3652305487Sdavidcs	return;
3653305487Sdavidcs}
3654305487Sdavidcs
3655305487Sdavidcsstatic int
3656305487Sdavidcsql_alloc_minidump_template_buffer(qla_host_t *ha)
3657305487Sdavidcs{
3658305487Sdavidcs	ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size;
3659305487Sdavidcs
3660305487Sdavidcs	ha->hw.mdump_template = malloc(ha->hw.mdump_template_size,
3661305487Sdavidcs					M_QLA83XXBUF, M_NOWAIT);
3662305487Sdavidcs
3663305487Sdavidcs	if (ha->hw.mdump_template == NULL)
3664305487Sdavidcs		return (-1);
3665305487Sdavidcs
3666305487Sdavidcs	return (0);
3667305487Sdavidcs}
3668305487Sdavidcs
3669305487Sdavidcsstatic int
3670305487Sdavidcsql_alloc_minidump_buffers(qla_host_t *ha)
3671305487Sdavidcs{
3672305487Sdavidcs	int ret;
3673305487Sdavidcs
3674305487Sdavidcs	ret = ql_alloc_minidump_template_buffer(ha);
3675305487Sdavidcs
3676305487Sdavidcs	if (ret)
3677305487Sdavidcs		return (ret);
3678305487Sdavidcs
3679305487Sdavidcs	ret = ql_alloc_minidump_buffer(ha);
3680305487Sdavidcs
3681305487Sdavidcs	if (ret)
3682305487Sdavidcs		ql_free_minidump_template_buffer(ha);
3683305487Sdavidcs
3684305487Sdavidcs	return (ret);
3685305487Sdavidcs}
3686305487Sdavidcs
3687305487Sdavidcs
3688305487Sdavidcsstatic uint32_t
3689305487Sdavidcsql_validate_minidump_checksum(qla_host_t *ha)
3690305487Sdavidcs{
3691305487Sdavidcs        uint64_t sum = 0;
3692305487Sdavidcs	int count;
3693305487Sdavidcs	uint32_t *template_buff;
3694305487Sdavidcs
3695305487Sdavidcs	count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t);
3696305487Sdavidcs	template_buff = ha->hw.dma_buf.minidump.dma_b;
3697305487Sdavidcs
3698305487Sdavidcs	while (count-- > 0) {
3699305487Sdavidcs		sum += *template_buff++;
3700305487Sdavidcs	}
3701305487Sdavidcs
3702305487Sdavidcs	while (sum >> 32) {
3703305487Sdavidcs		sum = (sum & 0xFFFFFFFF) + (sum >> 32);
3704305487Sdavidcs	}
3705305487Sdavidcs
3706305487Sdavidcs	return (~sum);
3707305487Sdavidcs}
3708305487Sdavidcs
3709305487Sdavidcsint
3710305487Sdavidcsql_minidump_init(qla_host_t *ha)
3711305487Sdavidcs{
3712284741Sdavidcs	int		ret = 0;
3713250661Sdavidcs	uint32_t	template_size = 0;
3714250661Sdavidcs	device_t	dev = ha->pci_dev;
3715250661Sdavidcs
3716250661Sdavidcs	/*
3717250661Sdavidcs	 * Get Minidump Template Size
3718250661Sdavidcs 	 */
3719250661Sdavidcs	ret = qla_get_minidump_tmplt_size(ha, &template_size);
3720250661Sdavidcs
3721250661Sdavidcs	if (ret || (template_size == 0)) {
3722250661Sdavidcs		device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
3723250661Sdavidcs			template_size);
3724250661Sdavidcs		return (-1);
3725250661Sdavidcs	}
3726250661Sdavidcs
3727250661Sdavidcs	/*
3728250661Sdavidcs	 * Allocate Memory for Minidump Template
3729250661Sdavidcs	 */
3730250661Sdavidcs
3731250661Sdavidcs	ha->hw.dma_buf.minidump.alignment = 8;
3732250661Sdavidcs	ha->hw.dma_buf.minidump.size = template_size;
3733250661Sdavidcs
3734284741Sdavidcs#ifdef QL_LDFLASH_FW
3735250661Sdavidcs	if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
3736250661Sdavidcs
3737250661Sdavidcs		device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
3738250661Sdavidcs
3739250661Sdavidcs		return (-1);
3740250661Sdavidcs	}
3741250661Sdavidcs	ha->hw.dma_buf.flags.minidump = 1;
3742250661Sdavidcs
3743250661Sdavidcs	/*
3744250661Sdavidcs	 * Retrieve Minidump Template
3745250661Sdavidcs	 */
3746305487Sdavidcs	ret = ql_get_minidump_template(ha);
3747284741Sdavidcs#else
3748284741Sdavidcs	ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump;
3749305487Sdavidcs
3750284741Sdavidcs#endif /* #ifdef QL_LDFLASH_FW */
3751250661Sdavidcs
3752305487Sdavidcs	if (ret == 0) {
3753305487Sdavidcs
3754305487Sdavidcs		ret = ql_validate_minidump_checksum(ha);
3755305487Sdavidcs
3756305487Sdavidcs		if (ret == 0) {
3757305487Sdavidcs
3758305487Sdavidcs			ret = ql_alloc_minidump_buffers(ha);
3759305487Sdavidcs
3760305487Sdavidcs			if (ret == 0)
3761305487Sdavidcs		ha->hw.mdump_init = 1;
3762305487Sdavidcs			else
3763305487Sdavidcs				device_printf(dev,
3764305487Sdavidcs					"%s: ql_alloc_minidump_buffers"
3765305487Sdavidcs					" failed\n", __func__);
3766305487Sdavidcs		} else {
3767305487Sdavidcs			device_printf(dev, "%s: ql_validate_minidump_checksum"
3768305487Sdavidcs				" failed\n", __func__);
3769305487Sdavidcs		}
3770250661Sdavidcs	} else {
3771305487Sdavidcs		device_printf(dev, "%s: ql_get_minidump_template failed\n",
3772305487Sdavidcs			 __func__);
3773250661Sdavidcs	}
3774250661Sdavidcs
3775305487Sdavidcs	if (ret)
3776305487Sdavidcs		ql_minidump_free(ha);
3777305487Sdavidcs
3778250661Sdavidcs	return (ret);
3779250661Sdavidcs}
3780250661Sdavidcs
3781250661Sdavidcsstatic void
3782305487Sdavidcsql_minidump_free(qla_host_t *ha)
3783250661Sdavidcs{
3784250661Sdavidcs	ha->hw.mdump_init = 0;
3785250661Sdavidcs	if (ha->hw.dma_buf.flags.minidump) {
3786250661Sdavidcs		ha->hw.dma_buf.flags.minidump = 0;
3787250661Sdavidcs		ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
3788250661Sdavidcs	}
3789305487Sdavidcs
3790305487Sdavidcs	ql_free_minidump_template_buffer(ha);
3791305487Sdavidcs	ql_free_minidump_buffer(ha);
3792305487Sdavidcs
3793250661Sdavidcs	return;
3794250661Sdavidcs}
3795250661Sdavidcs
3796250661Sdavidcsvoid
3797250661Sdavidcsql_minidump(qla_host_t *ha)
3798250661Sdavidcs{
3799250661Sdavidcs	if (!ha->hw.mdump_init)
3800250661Sdavidcs		return;
3801250661Sdavidcs
3802305487Sdavidcs	if (ha->hw.mdump_done)
3803250661Sdavidcs		return;
3804250661Sdavidcs
3805250661Sdavidcs		ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
3806250661Sdavidcs
3807305487Sdavidcs	bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size);
3808305487Sdavidcs	bzero(ha->hw.mdump_template, ha->hw.mdump_template_size);
3809305487Sdavidcs
3810305487Sdavidcs	bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template,
3811305487Sdavidcs		ha->hw.mdump_template_size);
3812305487Sdavidcs
3813305487Sdavidcs	ql_parse_template(ha);
3814305487Sdavidcs
3815250661Sdavidcs	ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
3816250661Sdavidcs
3817305487Sdavidcs	ha->hw.mdump_done = 1;
3818305487Sdavidcs
3819250661Sdavidcs	return;
3820250661Sdavidcs}
3821305487Sdavidcs
3822305487Sdavidcs
3823305487Sdavidcs/*
3824305487Sdavidcs * helper routines
3825305487Sdavidcs */
3826305487Sdavidcsstatic void
3827305487Sdavidcsql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize)
3828305487Sdavidcs{
3829305487Sdavidcs	if (esize != entry->hdr.entry_capture_size) {
3830305487Sdavidcs		entry->hdr.entry_capture_size = esize;
3831305487Sdavidcs		entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG;
3832305487Sdavidcs	}
3833305487Sdavidcs	return;
3834305487Sdavidcs}
3835305487Sdavidcs
3836305487Sdavidcs
3837305487Sdavidcsstatic int
3838305487Sdavidcsql_parse_template(qla_host_t *ha)
3839305487Sdavidcs{
3840305487Sdavidcs	uint32_t num_of_entries, buff_level, e_cnt, esize;
3841305487Sdavidcs	uint32_t end_cnt, rv = 0;
3842305487Sdavidcs	char *dump_buff, *dbuff;
3843305487Sdavidcs	int sane_start = 0, sane_end = 0;
3844305487Sdavidcs	ql_minidump_template_hdr_t *template_hdr;
3845305487Sdavidcs	ql_minidump_entry_t *entry;
3846305487Sdavidcs	uint32_t capture_mask;
3847305487Sdavidcs	uint32_t dump_size;
3848305487Sdavidcs
3849305487Sdavidcs	/* Setup parameters */
3850305487Sdavidcs	template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template;
3851305487Sdavidcs
3852305487Sdavidcs	if (template_hdr->entry_type == TLHDR)
3853305487Sdavidcs		sane_start = 1;
3854305487Sdavidcs
3855305487Sdavidcs	dump_buff = (char *) ha->hw.mdump_buffer;
3856305487Sdavidcs
3857305487Sdavidcs	num_of_entries = template_hdr->num_of_entries;
3858305487Sdavidcs
3859305487Sdavidcs	entry = (ql_minidump_entry_t *) ((char *)template_hdr
3860305487Sdavidcs			+ template_hdr->first_entry_offset );
3861305487Sdavidcs
3862305487Sdavidcs	template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] =
3863305487Sdavidcs		template_hdr->ocm_window_array[ha->pci_func];
3864305487Sdavidcs	template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func;
3865305487Sdavidcs
3866305487Sdavidcs	capture_mask = ha->hw.mdump_capture_mask;
3867305487Sdavidcs	dump_size = ha->hw.mdump_buffer_size;
3868305487Sdavidcs
3869305487Sdavidcs	template_hdr->driver_capture_mask = capture_mask;
3870305487Sdavidcs
3871305487Sdavidcs	QL_DPRINT80(ha, (ha->pci_dev,
3872305487Sdavidcs		"%s: sane_start = %d num_of_entries = %d "
3873305487Sdavidcs		"capture_mask = 0x%x dump_size = %d \n",
3874305487Sdavidcs		__func__, sane_start, num_of_entries, capture_mask, dump_size));
3875305487Sdavidcs
3876305487Sdavidcs	for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
3877305487Sdavidcs
3878305487Sdavidcs		/*
3879305487Sdavidcs		 * If the capture_mask of the entry does not match capture mask
3880305487Sdavidcs		 * skip the entry after marking the driver_flags indicator.
3881305487Sdavidcs		 */
3882305487Sdavidcs
3883305487Sdavidcs		if (!(entry->hdr.entry_capture_mask & capture_mask)) {
3884305487Sdavidcs
3885305487Sdavidcs			entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
3886305487Sdavidcs			entry = (ql_minidump_entry_t *) ((char *) entry
3887305487Sdavidcs					+ entry->hdr.entry_size);
3888305487Sdavidcs			continue;
3889305487Sdavidcs		}
3890305487Sdavidcs
3891305487Sdavidcs		/*
3892305487Sdavidcs		 * This is ONLY needed in implementations where
3893305487Sdavidcs		 * the capture buffer allocated is too small to capture
3894305487Sdavidcs		 * all of the required entries for a given capture mask.
3895305487Sdavidcs		 * We need to empty the buffer contents to a file
3896305487Sdavidcs		 * if possible, before processing the next entry
3897305487Sdavidcs		 * If the buff_full_flag is set, no further capture will happen
3898305487Sdavidcs		 * and all remaining non-control entries will be skipped.
3899305487Sdavidcs		 */
3900305487Sdavidcs		if (entry->hdr.entry_capture_size != 0) {
3901305487Sdavidcs			if ((buff_level + entry->hdr.entry_capture_size) >
3902305487Sdavidcs				dump_size) {
3903305487Sdavidcs				/*  Try to recover by emptying buffer to file */
3904305487Sdavidcs				entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
3905305487Sdavidcs				entry = (ql_minidump_entry_t *) ((char *) entry
3906305487Sdavidcs						+ entry->hdr.entry_size);
3907305487Sdavidcs				continue;
3908305487Sdavidcs			}
3909305487Sdavidcs		}
3910305487Sdavidcs
3911305487Sdavidcs		/*
3912305487Sdavidcs		 * Decode the entry type and process it accordingly
3913305487Sdavidcs		 */
3914305487Sdavidcs
3915305487Sdavidcs		switch (entry->hdr.entry_type) {
3916305487Sdavidcs		case RDNOP:
3917305487Sdavidcs			break;
3918305487Sdavidcs
3919305487Sdavidcs		case RDEND:
3920305487Sdavidcs			if (sane_end == 0) {
3921305487Sdavidcs				end_cnt = e_cnt;
3922305487Sdavidcs			}
3923305487Sdavidcs			sane_end++;
3924305487Sdavidcs			break;
3925305487Sdavidcs
3926305487Sdavidcs		case RDCRB:
3927305487Sdavidcs			dbuff = dump_buff + buff_level;
3928305487Sdavidcs			esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff);
3929305487Sdavidcs			ql_entry_err_chk(entry, esize);
3930305487Sdavidcs			buff_level += esize;
3931305487Sdavidcs			break;
3932305487Sdavidcs
3933305487Sdavidcs                case POLLRD:
3934305487Sdavidcs                        dbuff = dump_buff + buff_level;
3935305487Sdavidcs                        esize = ql_pollrd(ha, (void *)entry, (void *)dbuff);
3936305487Sdavidcs                        ql_entry_err_chk(entry, esize);
3937305487Sdavidcs                        buff_level += esize;
3938305487Sdavidcs                        break;
3939305487Sdavidcs
3940305487Sdavidcs                case POLLRDMWR:
3941305487Sdavidcs                        dbuff = dump_buff + buff_level;
3942305487Sdavidcs                        esize = ql_pollrd_modify_write(ha, (void *)entry,
3943305487Sdavidcs					(void *)dbuff);
3944305487Sdavidcs                        ql_entry_err_chk(entry, esize);
3945305487Sdavidcs                        buff_level += esize;
3946305487Sdavidcs                        break;
3947305487Sdavidcs
3948305487Sdavidcs		case L2ITG:
3949305487Sdavidcs		case L2DTG:
3950305487Sdavidcs		case L2DAT:
3951305487Sdavidcs		case L2INS:
3952305487Sdavidcs			dbuff = dump_buff + buff_level;
3953305487Sdavidcs			esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff);
3954305487Sdavidcs			if (esize == -1) {
3955305487Sdavidcs				entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
3956305487Sdavidcs			} else {
3957305487Sdavidcs				ql_entry_err_chk(entry, esize);
3958305487Sdavidcs				buff_level += esize;
3959305487Sdavidcs			}
3960305487Sdavidcs			break;
3961305487Sdavidcs
3962305487Sdavidcs		case L1DAT:
3963305487Sdavidcs		case L1INS:
3964305487Sdavidcs			dbuff = dump_buff + buff_level;
3965305487Sdavidcs			esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff);
3966305487Sdavidcs			ql_entry_err_chk(entry, esize);
3967305487Sdavidcs			buff_level += esize;
3968305487Sdavidcs			break;
3969305487Sdavidcs
3970305487Sdavidcs		case RDOCM:
3971305487Sdavidcs			dbuff = dump_buff + buff_level;
3972305487Sdavidcs			esize = ql_rdocm(ha, (void *)entry, (void *)dbuff);
3973305487Sdavidcs			ql_entry_err_chk(entry, esize);
3974305487Sdavidcs			buff_level += esize;
3975305487Sdavidcs			break;
3976305487Sdavidcs
3977305487Sdavidcs		case RDMEM:
3978305487Sdavidcs			dbuff = dump_buff + buff_level;
3979305487Sdavidcs			esize = ql_rdmem(ha, (void *)entry, (void *)dbuff);
3980305487Sdavidcs			ql_entry_err_chk(entry, esize);
3981305487Sdavidcs			buff_level += esize;
3982305487Sdavidcs			break;
3983305487Sdavidcs
3984305487Sdavidcs		case BOARD:
3985305487Sdavidcs		case RDROM:
3986305487Sdavidcs			dbuff = dump_buff + buff_level;
3987305487Sdavidcs			esize = ql_rdrom(ha, (void *)entry, (void *)dbuff);
3988305487Sdavidcs			ql_entry_err_chk(entry, esize);
3989305487Sdavidcs			buff_level += esize;
3990305487Sdavidcs			break;
3991305487Sdavidcs
3992305487Sdavidcs		case RDMUX:
3993305487Sdavidcs			dbuff = dump_buff + buff_level;
3994305487Sdavidcs			esize = ql_rdmux(ha, (void *)entry, (void *)dbuff);
3995305487Sdavidcs			ql_entry_err_chk(entry, esize);
3996305487Sdavidcs			buff_level += esize;
3997305487Sdavidcs			break;
3998305487Sdavidcs
3999305487Sdavidcs                case RDMUX2:
4000305487Sdavidcs                        dbuff = dump_buff + buff_level;
4001305487Sdavidcs                        esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff);
4002305487Sdavidcs                        ql_entry_err_chk(entry, esize);
4003305487Sdavidcs                        buff_level += esize;
4004305487Sdavidcs                        break;
4005305487Sdavidcs
4006305487Sdavidcs		case QUEUE:
4007305487Sdavidcs			dbuff = dump_buff + buff_level;
4008305487Sdavidcs			esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff);
4009305487Sdavidcs			ql_entry_err_chk(entry, esize);
4010305487Sdavidcs			buff_level += esize;
4011305487Sdavidcs			break;
4012305487Sdavidcs
4013305487Sdavidcs		case CNTRL:
4014305487Sdavidcs			if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) {
4015305487Sdavidcs				entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4016305487Sdavidcs			}
4017305487Sdavidcs			break;
4018305487Sdavidcs		default:
4019305487Sdavidcs			entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4020305487Sdavidcs			break;
4021305487Sdavidcs		}
4022305487Sdavidcs		/*  next entry in the template */
4023305487Sdavidcs		entry = (ql_minidump_entry_t *) ((char *) entry
4024305487Sdavidcs						+ entry->hdr.entry_size);
4025305487Sdavidcs	}
4026305487Sdavidcs
4027305487Sdavidcs	if (!sane_start || (sane_end > 1)) {
4028305487Sdavidcs		device_printf(ha->pci_dev,
4029305487Sdavidcs			"\n%s: Template configuration error. Check Template\n",
4030305487Sdavidcs			__func__);
4031305487Sdavidcs	}
4032305487Sdavidcs
4033305487Sdavidcs	QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n",
4034305487Sdavidcs		__func__, template_hdr->num_of_entries));
4035305487Sdavidcs
4036305487Sdavidcs	return 0;
4037305487Sdavidcs}
4038305487Sdavidcs
4039305487Sdavidcs/*
4040305487Sdavidcs * Read CRB operation.
4041305487Sdavidcs */
4042305487Sdavidcsstatic uint32_t
4043305487Sdavidcsql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry,
4044305487Sdavidcs	uint32_t * data_buff)
4045305487Sdavidcs{
4046305487Sdavidcs	int loop_cnt;
4047305487Sdavidcs	int ret;
4048305487Sdavidcs	uint32_t op_count, addr, stride, value = 0;
4049305487Sdavidcs
4050305487Sdavidcs	addr = crb_entry->addr;
4051305487Sdavidcs	op_count = crb_entry->op_count;
4052305487Sdavidcs	stride = crb_entry->addr_stride;
4053305487Sdavidcs
4054305487Sdavidcs	for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4055305487Sdavidcs
4056305487Sdavidcs		ret = ql_rdwr_indreg32(ha, addr, &value, 1);
4057305487Sdavidcs
4058305487Sdavidcs		if (ret)
4059305487Sdavidcs			return (0);
4060305487Sdavidcs
4061305487Sdavidcs		*data_buff++ = addr;
4062305487Sdavidcs		*data_buff++ = value;
4063305487Sdavidcs		addr = addr + stride;
4064305487Sdavidcs	}
4065305487Sdavidcs
4066305487Sdavidcs	/*
4067305487Sdavidcs	 * for testing purpose we return amount of data written
4068305487Sdavidcs	 */
4069305487Sdavidcs	return (op_count * (2 * sizeof(uint32_t)));
4070305487Sdavidcs}
4071305487Sdavidcs
4072305487Sdavidcs/*
4073305487Sdavidcs * Handle L2 Cache.
4074305487Sdavidcs */
4075305487Sdavidcs
4076305487Sdavidcsstatic uint32_t
4077305487Sdavidcsql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry,
4078305487Sdavidcs	uint32_t * data_buff)
4079305487Sdavidcs{
4080305487Sdavidcs	int i, k;
4081305487Sdavidcs	int loop_cnt;
4082305487Sdavidcs	int ret;
4083305487Sdavidcs
4084305487Sdavidcs	uint32_t read_value;
4085305487Sdavidcs	uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w;
4086305487Sdavidcs	uint32_t tag_value, read_cnt;
4087305487Sdavidcs	volatile uint8_t cntl_value_r;
4088305487Sdavidcs	long timeout;
4089305487Sdavidcs	uint32_t data;
4090305487Sdavidcs
4091305487Sdavidcs	loop_cnt = cacheEntry->op_count;
4092305487Sdavidcs
4093305487Sdavidcs	read_addr = cacheEntry->read_addr;
4094305487Sdavidcs	cntrl_addr = cacheEntry->control_addr;
4095305487Sdavidcs	cntl_value_w = (uint32_t) cacheEntry->write_value;
4096305487Sdavidcs
4097305487Sdavidcs	tag_reg_addr = cacheEntry->tag_reg_addr;
4098305487Sdavidcs
4099305487Sdavidcs	tag_value = cacheEntry->init_tag_value;
4100305487Sdavidcs	read_cnt = cacheEntry->read_addr_cnt;
4101305487Sdavidcs
4102305487Sdavidcs	for (i = 0; i < loop_cnt; i++) {
4103305487Sdavidcs
4104305487Sdavidcs		ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4105305487Sdavidcs		if (ret)
4106305487Sdavidcs			return (0);
4107305487Sdavidcs
4108305487Sdavidcs		if (cacheEntry->write_value != 0) {
4109305487Sdavidcs
4110305487Sdavidcs			ret = ql_rdwr_indreg32(ha, cntrl_addr,
4111305487Sdavidcs					&cntl_value_w, 0);
4112305487Sdavidcs			if (ret)
4113305487Sdavidcs				return (0);
4114305487Sdavidcs		}
4115305487Sdavidcs
4116305487Sdavidcs		if (cacheEntry->poll_mask != 0) {
4117305487Sdavidcs
4118305487Sdavidcs			timeout = cacheEntry->poll_wait;
4119305487Sdavidcs
4120305487Sdavidcs			ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1);
4121305487Sdavidcs			if (ret)
4122305487Sdavidcs				return (0);
4123305487Sdavidcs
4124305487Sdavidcs			cntl_value_r = (uint8_t)data;
4125305487Sdavidcs
4126305487Sdavidcs			while ((cntl_value_r & cacheEntry->poll_mask) != 0) {
4127305487Sdavidcs
4128305487Sdavidcs				if (timeout) {
4129305487Sdavidcs					qla_mdelay(__func__, 1);
4130305487Sdavidcs					timeout--;
4131305487Sdavidcs				} else
4132305487Sdavidcs					break;
4133305487Sdavidcs
4134305487Sdavidcs				ret = ql_rdwr_indreg32(ha, cntrl_addr,
4135305487Sdavidcs						&data, 1);
4136305487Sdavidcs				if (ret)
4137305487Sdavidcs					return (0);
4138305487Sdavidcs
4139305487Sdavidcs				cntl_value_r = (uint8_t)data;
4140305487Sdavidcs			}
4141305487Sdavidcs			if (!timeout) {
4142305487Sdavidcs				/* Report timeout error.
4143305487Sdavidcs				 * core dump capture failed
4144305487Sdavidcs				 * Skip remaining entries.
4145305487Sdavidcs				 * Write buffer out to file
4146305487Sdavidcs				 * Use driver specific fields in template header
4147305487Sdavidcs				 * to report this error.
4148305487Sdavidcs				 */
4149305487Sdavidcs				return (-1);
4150305487Sdavidcs			}
4151305487Sdavidcs		}
4152305487Sdavidcs
4153305487Sdavidcs		addr = read_addr;
4154305487Sdavidcs		for (k = 0; k < read_cnt; k++) {
4155305487Sdavidcs
4156305487Sdavidcs			ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4157305487Sdavidcs			if (ret)
4158305487Sdavidcs				return (0);
4159305487Sdavidcs
4160305487Sdavidcs			*data_buff++ = read_value;
4161305487Sdavidcs			addr += cacheEntry->read_addr_stride;
4162305487Sdavidcs		}
4163305487Sdavidcs
4164305487Sdavidcs		tag_value += cacheEntry->tag_value_stride;
4165305487Sdavidcs	}
4166305487Sdavidcs
4167305487Sdavidcs	return (read_cnt * loop_cnt * sizeof(uint32_t));
4168305487Sdavidcs}
4169305487Sdavidcs
4170305487Sdavidcs/*
4171305487Sdavidcs * Handle L1 Cache.
4172305487Sdavidcs */
4173305487Sdavidcs
4174305487Sdavidcsstatic uint32_t
4175305487Sdavidcsql_L1Cache(qla_host_t *ha,
4176305487Sdavidcs	ql_minidump_entry_cache_t *cacheEntry,
4177305487Sdavidcs	uint32_t *data_buff)
4178305487Sdavidcs{
4179305487Sdavidcs	int ret;
4180305487Sdavidcs	int i, k;
4181305487Sdavidcs	int loop_cnt;
4182305487Sdavidcs
4183305487Sdavidcs	uint32_t read_value;
4184305487Sdavidcs	uint32_t addr, read_addr, cntrl_addr, tag_reg_addr;
4185305487Sdavidcs	uint32_t tag_value, read_cnt;
4186305487Sdavidcs	uint32_t cntl_value_w;
4187305487Sdavidcs
4188305487Sdavidcs	loop_cnt = cacheEntry->op_count;
4189305487Sdavidcs
4190305487Sdavidcs	read_addr = cacheEntry->read_addr;
4191305487Sdavidcs	cntrl_addr = cacheEntry->control_addr;
4192305487Sdavidcs	cntl_value_w = (uint32_t) cacheEntry->write_value;
4193305487Sdavidcs
4194305487Sdavidcs	tag_reg_addr = cacheEntry->tag_reg_addr;
4195305487Sdavidcs
4196305487Sdavidcs	tag_value = cacheEntry->init_tag_value;
4197305487Sdavidcs	read_cnt = cacheEntry->read_addr_cnt;
4198305487Sdavidcs
4199305487Sdavidcs	for (i = 0; i < loop_cnt; i++) {
4200305487Sdavidcs
4201305487Sdavidcs		ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4202305487Sdavidcs		if (ret)
4203305487Sdavidcs			return (0);
4204305487Sdavidcs
4205305487Sdavidcs		ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0);
4206305487Sdavidcs		if (ret)
4207305487Sdavidcs			return (0);
4208305487Sdavidcs
4209305487Sdavidcs		addr = read_addr;
4210305487Sdavidcs		for (k = 0; k < read_cnt; k++) {
4211305487Sdavidcs
4212305487Sdavidcs			ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4213305487Sdavidcs			if (ret)
4214305487Sdavidcs				return (0);
4215305487Sdavidcs
4216305487Sdavidcs			*data_buff++ = read_value;
4217305487Sdavidcs			addr += cacheEntry->read_addr_stride;
4218305487Sdavidcs		}
4219305487Sdavidcs
4220305487Sdavidcs		tag_value += cacheEntry->tag_value_stride;
4221305487Sdavidcs	}
4222305487Sdavidcs
4223305487Sdavidcs	return (read_cnt * loop_cnt * sizeof(uint32_t));
4224305487Sdavidcs}
4225305487Sdavidcs
4226305487Sdavidcs/*
4227305487Sdavidcs * Reading OCM memory
4228305487Sdavidcs */
4229305487Sdavidcs
4230305487Sdavidcsstatic uint32_t
4231305487Sdavidcsql_rdocm(qla_host_t *ha,
4232305487Sdavidcs	ql_minidump_entry_rdocm_t *ocmEntry,
4233305487Sdavidcs	uint32_t *data_buff)
4234305487Sdavidcs{
4235305487Sdavidcs	int i, loop_cnt;
4236305487Sdavidcs	volatile uint32_t addr;
4237305487Sdavidcs	volatile uint32_t value;
4238305487Sdavidcs
4239305487Sdavidcs	addr = ocmEntry->read_addr;
4240305487Sdavidcs	loop_cnt = ocmEntry->op_count;
4241305487Sdavidcs
4242305487Sdavidcs	for (i = 0; i < loop_cnt; i++) {
4243305487Sdavidcs		value = READ_REG32(ha, addr);
4244305487Sdavidcs		*data_buff++ = value;
4245305487Sdavidcs		addr += ocmEntry->read_addr_stride;
4246305487Sdavidcs	}
4247305487Sdavidcs	return (loop_cnt * sizeof(value));
4248305487Sdavidcs}
4249305487Sdavidcs
4250305487Sdavidcs/*
4251305487Sdavidcs * Read memory
4252305487Sdavidcs */
4253305487Sdavidcs
4254305487Sdavidcsstatic uint32_t
4255305487Sdavidcsql_rdmem(qla_host_t *ha,
4256305487Sdavidcs	ql_minidump_entry_rdmem_t *mem_entry,
4257305487Sdavidcs	uint32_t *data_buff)
4258305487Sdavidcs{
4259305487Sdavidcs	int ret;
4260305487Sdavidcs        int i, loop_cnt;
4261305487Sdavidcs        volatile uint32_t addr;
4262305487Sdavidcs	q80_offchip_mem_val_t val;
4263305487Sdavidcs
4264305487Sdavidcs        addr = mem_entry->read_addr;
4265305487Sdavidcs
4266305487Sdavidcs	/* size in bytes / 16 */
4267305487Sdavidcs        loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4);
4268305487Sdavidcs
4269305487Sdavidcs        for (i = 0; i < loop_cnt; i++) {
4270305487Sdavidcs
4271305487Sdavidcs		ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1);
4272305487Sdavidcs		if (ret)
4273305487Sdavidcs			return (0);
4274305487Sdavidcs
4275305487Sdavidcs                *data_buff++ = val.data_lo;
4276305487Sdavidcs                *data_buff++ = val.data_hi;
4277305487Sdavidcs                *data_buff++ = val.data_ulo;
4278305487Sdavidcs                *data_buff++ = val.data_uhi;
4279305487Sdavidcs
4280305487Sdavidcs                addr += (sizeof(uint32_t) * 4);
4281305487Sdavidcs        }
4282305487Sdavidcs
4283305487Sdavidcs        return (loop_cnt * (sizeof(uint32_t) * 4));
4284305487Sdavidcs}
4285305487Sdavidcs
4286305487Sdavidcs/*
4287305487Sdavidcs * Read Rom
4288305487Sdavidcs */
4289305487Sdavidcs
4290305487Sdavidcsstatic uint32_t
4291305487Sdavidcsql_rdrom(qla_host_t *ha,
4292305487Sdavidcs	ql_minidump_entry_rdrom_t *romEntry,
4293305487Sdavidcs	uint32_t *data_buff)
4294305487Sdavidcs{
4295305487Sdavidcs	int ret;
4296305487Sdavidcs	int i, loop_cnt;
4297305487Sdavidcs	uint32_t addr;
4298305487Sdavidcs	uint32_t value;
4299305487Sdavidcs
4300305487Sdavidcs	addr = romEntry->read_addr;
4301305487Sdavidcs	loop_cnt = romEntry->read_data_size; /* This is size in bytes */
4302305487Sdavidcs	loop_cnt /= sizeof(value);
4303305487Sdavidcs
4304305487Sdavidcs	for (i = 0; i < loop_cnt; i++) {
4305305487Sdavidcs
4306305487Sdavidcs		ret = ql_rd_flash32(ha, addr, &value);
4307305487Sdavidcs		if (ret)
4308305487Sdavidcs			return (0);
4309305487Sdavidcs
4310305487Sdavidcs		*data_buff++ = value;
4311305487Sdavidcs		addr += sizeof(value);
4312305487Sdavidcs	}
4313305487Sdavidcs
4314305487Sdavidcs	return (loop_cnt * sizeof(value));
4315305487Sdavidcs}
4316305487Sdavidcs
4317305487Sdavidcs/*
4318305487Sdavidcs * Read MUX data
4319305487Sdavidcs */
4320305487Sdavidcs
4321305487Sdavidcsstatic uint32_t
4322305487Sdavidcsql_rdmux(qla_host_t *ha,
4323305487Sdavidcs	ql_minidump_entry_mux_t *muxEntry,
4324305487Sdavidcs	uint32_t *data_buff)
4325305487Sdavidcs{
4326305487Sdavidcs	int ret;
4327305487Sdavidcs	int loop_cnt;
4328305487Sdavidcs	uint32_t read_value, sel_value;
4329305487Sdavidcs	uint32_t read_addr, select_addr;
4330305487Sdavidcs
4331305487Sdavidcs	select_addr = muxEntry->select_addr;
4332305487Sdavidcs	sel_value = muxEntry->select_value;
4333305487Sdavidcs	read_addr = muxEntry->read_addr;
4334305487Sdavidcs
4335305487Sdavidcs	for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
4336305487Sdavidcs
4337305487Sdavidcs		ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0);
4338305487Sdavidcs		if (ret)
4339305487Sdavidcs			return (0);
4340305487Sdavidcs
4341305487Sdavidcs		ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4342305487Sdavidcs		if (ret)
4343305487Sdavidcs			return (0);
4344305487Sdavidcs
4345305487Sdavidcs		*data_buff++ = sel_value;
4346305487Sdavidcs		*data_buff++ = read_value;
4347305487Sdavidcs
4348305487Sdavidcs		sel_value += muxEntry->select_value_stride;
4349305487Sdavidcs	}
4350305487Sdavidcs
4351305487Sdavidcs	return (loop_cnt * (2 * sizeof(uint32_t)));
4352305487Sdavidcs}
4353305487Sdavidcs
4354305487Sdavidcsstatic uint32_t
4355305487Sdavidcsql_rdmux2(qla_host_t *ha,
4356305487Sdavidcs	ql_minidump_entry_mux2_t *muxEntry,
4357305487Sdavidcs	uint32_t *data_buff)
4358305487Sdavidcs{
4359305487Sdavidcs	int ret;
4360305487Sdavidcs        int loop_cnt;
4361305487Sdavidcs
4362305487Sdavidcs        uint32_t select_addr_1, select_addr_2;
4363305487Sdavidcs        uint32_t select_value_1, select_value_2;
4364305487Sdavidcs        uint32_t select_value_count, select_value_mask;
4365305487Sdavidcs        uint32_t read_addr, read_value;
4366305487Sdavidcs
4367305487Sdavidcs        select_addr_1 = muxEntry->select_addr_1;
4368305487Sdavidcs        select_addr_2 = muxEntry->select_addr_2;
4369305487Sdavidcs        select_value_1 = muxEntry->select_value_1;
4370305487Sdavidcs        select_value_2 = muxEntry->select_value_2;
4371305487Sdavidcs        select_value_count = muxEntry->select_value_count;
4372305487Sdavidcs        select_value_mask  = muxEntry->select_value_mask;
4373305487Sdavidcs
4374305487Sdavidcs        read_addr = muxEntry->read_addr;
4375305487Sdavidcs
4376305487Sdavidcs        for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count;
4377305487Sdavidcs		loop_cnt++) {
4378305487Sdavidcs
4379305487Sdavidcs                uint32_t temp_sel_val;
4380305487Sdavidcs
4381305487Sdavidcs		ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0);
4382305487Sdavidcs		if (ret)
4383305487Sdavidcs			return (0);
4384305487Sdavidcs
4385305487Sdavidcs                temp_sel_val = select_value_1 & select_value_mask;
4386305487Sdavidcs
4387305487Sdavidcs		ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
4388305487Sdavidcs		if (ret)
4389305487Sdavidcs			return (0);
4390305487Sdavidcs
4391305487Sdavidcs		ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4392305487Sdavidcs		if (ret)
4393305487Sdavidcs			return (0);
4394305487Sdavidcs
4395305487Sdavidcs                *data_buff++ = temp_sel_val;
4396305487Sdavidcs                *data_buff++ = read_value;
4397305487Sdavidcs
4398305487Sdavidcs		ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0);
4399305487Sdavidcs		if (ret)
4400305487Sdavidcs			return (0);
4401305487Sdavidcs
4402305487Sdavidcs                temp_sel_val = select_value_2 & select_value_mask;
4403305487Sdavidcs
4404305487Sdavidcs		ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
4405305487Sdavidcs		if (ret)
4406305487Sdavidcs			return (0);
4407305487Sdavidcs
4408305487Sdavidcs		ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4409305487Sdavidcs		if (ret)
4410305487Sdavidcs			return (0);
4411305487Sdavidcs
4412305487Sdavidcs                *data_buff++ = temp_sel_val;
4413305487Sdavidcs                *data_buff++ = read_value;
4414305487Sdavidcs
4415305487Sdavidcs                select_value_1 += muxEntry->select_value_stride;
4416305487Sdavidcs                select_value_2 += muxEntry->select_value_stride;
4417305487Sdavidcs        }
4418305487Sdavidcs
4419305487Sdavidcs        return (loop_cnt * (4 * sizeof(uint32_t)));
4420305487Sdavidcs}
4421305487Sdavidcs
4422305487Sdavidcs/*
4423305487Sdavidcs * Handling Queue State Reads.
4424305487Sdavidcs */
4425305487Sdavidcs
4426305487Sdavidcsstatic uint32_t
4427305487Sdavidcsql_rdqueue(qla_host_t *ha,
4428305487Sdavidcs	ql_minidump_entry_queue_t *queueEntry,
4429305487Sdavidcs	uint32_t *data_buff)
4430305487Sdavidcs{
4431305487Sdavidcs	int ret;
4432305487Sdavidcs	int loop_cnt, k;
4433305487Sdavidcs	uint32_t read_value;
4434305487Sdavidcs	uint32_t read_addr, read_stride, select_addr;
4435305487Sdavidcs	uint32_t queue_id, read_cnt;
4436305487Sdavidcs
4437305487Sdavidcs	read_cnt = queueEntry->read_addr_cnt;
4438305487Sdavidcs	read_stride = queueEntry->read_addr_stride;
4439305487Sdavidcs	select_addr = queueEntry->select_addr;
4440305487Sdavidcs
4441305487Sdavidcs	for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
4442305487Sdavidcs		loop_cnt++) {
4443305487Sdavidcs
4444305487Sdavidcs		ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0);
4445305487Sdavidcs		if (ret)
4446305487Sdavidcs			return (0);
4447305487Sdavidcs
4448305487Sdavidcs		read_addr = queueEntry->read_addr;
4449305487Sdavidcs
4450305487Sdavidcs		for (k = 0; k < read_cnt; k++) {
4451305487Sdavidcs
4452305487Sdavidcs			ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4453305487Sdavidcs			if (ret)
4454305487Sdavidcs				return (0);
4455305487Sdavidcs
4456305487Sdavidcs			*data_buff++ = read_value;
4457305487Sdavidcs			read_addr += read_stride;
4458305487Sdavidcs		}
4459305487Sdavidcs
4460305487Sdavidcs		queue_id += queueEntry->queue_id_stride;
4461305487Sdavidcs	}
4462305487Sdavidcs
4463305487Sdavidcs	return (loop_cnt * (read_cnt * sizeof(uint32_t)));
4464305487Sdavidcs}
4465305487Sdavidcs
4466305487Sdavidcs/*
4467305487Sdavidcs * Handling control entries.
4468305487Sdavidcs */
4469305487Sdavidcs
4470305487Sdavidcsstatic uint32_t
4471305487Sdavidcsql_cntrl(qla_host_t *ha,
4472305487Sdavidcs	ql_minidump_template_hdr_t *template_hdr,
4473305487Sdavidcs	ql_minidump_entry_cntrl_t *crbEntry)
4474305487Sdavidcs{
4475305487Sdavidcs	int ret;
4476305487Sdavidcs	int count;
4477305487Sdavidcs	uint32_t opcode, read_value, addr, entry_addr;
4478305487Sdavidcs	long timeout;
4479305487Sdavidcs
4480305487Sdavidcs	entry_addr = crbEntry->addr;
4481305487Sdavidcs
4482305487Sdavidcs	for (count = 0; count < crbEntry->op_count; count++) {
4483305487Sdavidcs		opcode = crbEntry->opcode;
4484305487Sdavidcs
4485305487Sdavidcs		if (opcode & QL_DBG_OPCODE_WR) {
4486305487Sdavidcs
4487305487Sdavidcs                	ret = ql_rdwr_indreg32(ha, entry_addr,
4488305487Sdavidcs					&crbEntry->value_1, 0);
4489305487Sdavidcs			if (ret)
4490305487Sdavidcs				return (0);
4491305487Sdavidcs
4492305487Sdavidcs			opcode &= ~QL_DBG_OPCODE_WR;
4493305487Sdavidcs		}
4494305487Sdavidcs
4495305487Sdavidcs		if (opcode & QL_DBG_OPCODE_RW) {
4496305487Sdavidcs
4497305487Sdavidcs                	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4498305487Sdavidcs			if (ret)
4499305487Sdavidcs				return (0);
4500305487Sdavidcs
4501305487Sdavidcs                	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4502305487Sdavidcs			if (ret)
4503305487Sdavidcs				return (0);
4504305487Sdavidcs
4505305487Sdavidcs			opcode &= ~QL_DBG_OPCODE_RW;
4506305487Sdavidcs		}
4507305487Sdavidcs
4508305487Sdavidcs		if (opcode & QL_DBG_OPCODE_AND) {
4509305487Sdavidcs
4510305487Sdavidcs                	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4511305487Sdavidcs			if (ret)
4512305487Sdavidcs				return (0);
4513305487Sdavidcs
4514305487Sdavidcs			read_value &= crbEntry->value_2;
4515305487Sdavidcs			opcode &= ~QL_DBG_OPCODE_AND;
4516305487Sdavidcs
4517305487Sdavidcs			if (opcode & QL_DBG_OPCODE_OR) {
4518305487Sdavidcs				read_value |= crbEntry->value_3;
4519305487Sdavidcs				opcode &= ~QL_DBG_OPCODE_OR;
4520305487Sdavidcs			}
4521305487Sdavidcs
4522305487Sdavidcs                	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4523305487Sdavidcs			if (ret)
4524305487Sdavidcs				return (0);
4525305487Sdavidcs		}
4526305487Sdavidcs
4527305487Sdavidcs		if (opcode & QL_DBG_OPCODE_OR) {
4528305487Sdavidcs
4529305487Sdavidcs                	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
4530305487Sdavidcs			if (ret)
4531305487Sdavidcs				return (0);
4532305487Sdavidcs
4533305487Sdavidcs			read_value |= crbEntry->value_3;
4534305487Sdavidcs
4535305487Sdavidcs                	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
4536305487Sdavidcs			if (ret)
4537305487Sdavidcs				return (0);
4538305487Sdavidcs
4539305487Sdavidcs			opcode &= ~QL_DBG_OPCODE_OR;
4540305487Sdavidcs		}
4541305487Sdavidcs
4542305487Sdavidcs		if (opcode & QL_DBG_OPCODE_POLL) {
4543305487Sdavidcs
4544305487Sdavidcs			opcode &= ~QL_DBG_OPCODE_POLL;
4545305487Sdavidcs			timeout = crbEntry->poll_timeout;
4546305487Sdavidcs			addr = entry_addr;
4547305487Sdavidcs
4548305487Sdavidcs                	ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4549305487Sdavidcs			if (ret)
4550305487Sdavidcs				return (0);
4551305487Sdavidcs
4552305487Sdavidcs			while ((read_value & crbEntry->value_2)
4553305487Sdavidcs				!= crbEntry->value_1) {
4554305487Sdavidcs
4555305487Sdavidcs				if (timeout) {
4556305487Sdavidcs					qla_mdelay(__func__, 1);
4557305487Sdavidcs					timeout--;
4558305487Sdavidcs				} else
4559305487Sdavidcs					break;
4560305487Sdavidcs
4561305487Sdavidcs                		ret = ql_rdwr_indreg32(ha, addr,
4562305487Sdavidcs						&read_value, 1);
4563305487Sdavidcs				if (ret)
4564305487Sdavidcs					return (0);
4565305487Sdavidcs			}
4566305487Sdavidcs
4567305487Sdavidcs			if (!timeout) {
4568305487Sdavidcs				/*
4569305487Sdavidcs				 * Report timeout error.
4570305487Sdavidcs				 * core dump capture failed
4571305487Sdavidcs				 * Skip remaining entries.
4572305487Sdavidcs				 * Write buffer out to file
4573305487Sdavidcs				 * Use driver specific fields in template header
4574305487Sdavidcs				 * to report this error.
4575305487Sdavidcs				 */
4576305487Sdavidcs				return (-1);
4577305487Sdavidcs			}
4578305487Sdavidcs		}
4579305487Sdavidcs
4580305487Sdavidcs		if (opcode & QL_DBG_OPCODE_RDSTATE) {
4581305487Sdavidcs			/*
4582305487Sdavidcs			 * decide which address to use.
4583305487Sdavidcs			 */
4584305487Sdavidcs			if (crbEntry->state_index_a) {
4585305487Sdavidcs				addr = template_hdr->saved_state_array[
4586305487Sdavidcs						crbEntry-> state_index_a];
4587305487Sdavidcs			} else {
4588305487Sdavidcs				addr = entry_addr;
4589305487Sdavidcs			}
4590305487Sdavidcs
4591305487Sdavidcs                	ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4592305487Sdavidcs			if (ret)
4593305487Sdavidcs				return (0);
4594305487Sdavidcs
4595305487Sdavidcs			template_hdr->saved_state_array[crbEntry->state_index_v]
4596305487Sdavidcs					= read_value;
4597305487Sdavidcs			opcode &= ~QL_DBG_OPCODE_RDSTATE;
4598305487Sdavidcs		}
4599305487Sdavidcs
4600305487Sdavidcs		if (opcode & QL_DBG_OPCODE_WRSTATE) {
4601305487Sdavidcs			/*
4602305487Sdavidcs			 * decide which value to use.
4603305487Sdavidcs			 */
4604305487Sdavidcs			if (crbEntry->state_index_v) {
4605305487Sdavidcs				read_value = template_hdr->saved_state_array[
4606305487Sdavidcs						crbEntry->state_index_v];
4607305487Sdavidcs			} else {
4608305487Sdavidcs				read_value = crbEntry->value_1;
4609305487Sdavidcs			}
4610305487Sdavidcs			/*
4611305487Sdavidcs			 * decide which address to use.
4612305487Sdavidcs			 */
4613305487Sdavidcs			if (crbEntry->state_index_a) {
4614305487Sdavidcs				addr = template_hdr->saved_state_array[
4615305487Sdavidcs						crbEntry-> state_index_a];
4616305487Sdavidcs			} else {
4617305487Sdavidcs				addr = entry_addr;
4618305487Sdavidcs			}
4619305487Sdavidcs
4620305487Sdavidcs                	ret = ql_rdwr_indreg32(ha, addr, &read_value, 0);
4621305487Sdavidcs			if (ret)
4622305487Sdavidcs				return (0);
4623305487Sdavidcs
4624305487Sdavidcs			opcode &= ~QL_DBG_OPCODE_WRSTATE;
4625305487Sdavidcs		}
4626305487Sdavidcs
4627305487Sdavidcs		if (opcode & QL_DBG_OPCODE_MDSTATE) {
4628305487Sdavidcs			/*  Read value from saved state using index */
4629305487Sdavidcs			read_value = template_hdr->saved_state_array[
4630305487Sdavidcs						crbEntry->state_index_v];
4631305487Sdavidcs
4632305487Sdavidcs			read_value <<= crbEntry->shl; /*Shift left operation */
4633305487Sdavidcs			read_value >>= crbEntry->shr; /*Shift right operation */
4634305487Sdavidcs
4635305487Sdavidcs			if (crbEntry->value_2) {
4636305487Sdavidcs				/* check if AND mask is provided */
4637305487Sdavidcs				read_value &= crbEntry->value_2;
4638305487Sdavidcs			}
4639305487Sdavidcs
4640305487Sdavidcs			read_value |= crbEntry->value_3; /* OR operation */
4641305487Sdavidcs			read_value += crbEntry->value_1; /* increment op */
4642305487Sdavidcs
4643305487Sdavidcs			/* Write value back to state area. */
4644305487Sdavidcs
4645305487Sdavidcs			template_hdr->saved_state_array[crbEntry->state_index_v]
4646305487Sdavidcs					= read_value;
4647305487Sdavidcs			opcode &= ~QL_DBG_OPCODE_MDSTATE;
4648305487Sdavidcs		}
4649305487Sdavidcs
4650305487Sdavidcs		entry_addr += crbEntry->addr_stride;
4651305487Sdavidcs	}
4652305487Sdavidcs
4653305487Sdavidcs	return (0);
4654305487Sdavidcs}
4655305487Sdavidcs
4656305487Sdavidcs/*
4657305487Sdavidcs * Handling rd poll entry.
4658305487Sdavidcs */
4659305487Sdavidcs
4660305487Sdavidcsstatic uint32_t
4661305487Sdavidcsql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry,
4662305487Sdavidcs	uint32_t *data_buff)
4663305487Sdavidcs{
4664305487Sdavidcs        int ret;
4665305487Sdavidcs        int loop_cnt;
4666305487Sdavidcs        uint32_t op_count, select_addr, select_value_stride, select_value;
4667305487Sdavidcs        uint32_t read_addr, poll, mask, data_size, data;
4668305487Sdavidcs        uint32_t wait_count = 0;
4669305487Sdavidcs
4670305487Sdavidcs        select_addr            = entry->select_addr;
4671305487Sdavidcs        read_addr              = entry->read_addr;
4672305487Sdavidcs        select_value           = entry->select_value;
4673305487Sdavidcs        select_value_stride    = entry->select_value_stride;
4674305487Sdavidcs        op_count               = entry->op_count;
4675305487Sdavidcs        poll                   = entry->poll;
4676305487Sdavidcs        mask                   = entry->mask;
4677305487Sdavidcs        data_size              = entry->data_size;
4678305487Sdavidcs
4679305487Sdavidcs        for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4680305487Sdavidcs
4681305487Sdavidcs                ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0);
4682305487Sdavidcs		if (ret)
4683305487Sdavidcs			return (0);
4684305487Sdavidcs
4685305487Sdavidcs                wait_count = 0;
4686305487Sdavidcs
4687305487Sdavidcs                while (wait_count < poll) {
4688305487Sdavidcs
4689305487Sdavidcs                        uint32_t temp;
4690305487Sdavidcs
4691305487Sdavidcs			ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1);
4692305487Sdavidcs			if (ret)
4693305487Sdavidcs				return (0);
4694305487Sdavidcs
4695305487Sdavidcs                        if ( (temp & mask) != 0 ) {
4696305487Sdavidcs                                break;
4697305487Sdavidcs                        }
4698305487Sdavidcs                        wait_count++;
4699305487Sdavidcs                }
4700305487Sdavidcs
4701305487Sdavidcs                if (wait_count == poll) {
4702305487Sdavidcs                        device_printf(ha->pci_dev,
4703305487Sdavidcs				"%s: Error in processing entry\n", __func__);
4704305487Sdavidcs                        device_printf(ha->pci_dev,
4705305487Sdavidcs				"%s: wait_count <0x%x> poll <0x%x>\n",
4706305487Sdavidcs				__func__, wait_count, poll);
4707305487Sdavidcs                        return 0;
4708305487Sdavidcs                }
4709305487Sdavidcs
4710305487Sdavidcs		ret = ql_rdwr_indreg32(ha, read_addr, &data, 1);
4711305487Sdavidcs		if (ret)
4712305487Sdavidcs			return (0);
4713305487Sdavidcs
4714305487Sdavidcs                *data_buff++ = select_value;
4715305487Sdavidcs                *data_buff++ = data;
4716305487Sdavidcs                select_value = select_value + select_value_stride;
4717305487Sdavidcs        }
4718305487Sdavidcs
4719305487Sdavidcs        /*
4720305487Sdavidcs         * for testing purpose we return amount of data written
4721305487Sdavidcs         */
4722305487Sdavidcs        return (loop_cnt * (2 * sizeof(uint32_t)));
4723305487Sdavidcs}
4724305487Sdavidcs
4725305487Sdavidcs
4726305487Sdavidcs/*
4727305487Sdavidcs * Handling rd modify write poll entry.
4728305487Sdavidcs */
4729305487Sdavidcs
4730305487Sdavidcsstatic uint32_t
4731305487Sdavidcsql_pollrd_modify_write(qla_host_t *ha,
4732305487Sdavidcs	ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
4733305487Sdavidcs	uint32_t *data_buff)
4734305487Sdavidcs{
4735305487Sdavidcs	int ret;
4736305487Sdavidcs        uint32_t addr_1, addr_2, value_1, value_2, data;
4737305487Sdavidcs        uint32_t poll, mask, data_size, modify_mask;
4738305487Sdavidcs        uint32_t wait_count = 0;
4739305487Sdavidcs
4740305487Sdavidcs        addr_1		= entry->addr_1;
4741305487Sdavidcs        addr_2		= entry->addr_2;
4742305487Sdavidcs        value_1		= entry->value_1;
4743305487Sdavidcs        value_2		= entry->value_2;
4744305487Sdavidcs
4745305487Sdavidcs        poll		= entry->poll;
4746305487Sdavidcs        mask		= entry->mask;
4747305487Sdavidcs        modify_mask	= entry->modify_mask;
4748305487Sdavidcs        data_size	= entry->data_size;
4749305487Sdavidcs
4750305487Sdavidcs
4751305487Sdavidcs	ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0);
4752305487Sdavidcs	if (ret)
4753305487Sdavidcs		return (0);
4754305487Sdavidcs
4755305487Sdavidcs        wait_count = 0;
4756305487Sdavidcs        while (wait_count < poll) {
4757305487Sdavidcs
4758305487Sdavidcs		uint32_t temp;
4759305487Sdavidcs
4760305487Sdavidcs		ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
4761305487Sdavidcs		if (ret)
4762305487Sdavidcs			return (0);
4763305487Sdavidcs
4764305487Sdavidcs                if ( (temp & mask) != 0 ) {
4765305487Sdavidcs                        break;
4766305487Sdavidcs                }
4767305487Sdavidcs                wait_count++;
4768305487Sdavidcs        }
4769305487Sdavidcs
4770305487Sdavidcs        if (wait_count == poll) {
4771305487Sdavidcs                device_printf(ha->pci_dev, "%s Error in processing entry\n",
4772305487Sdavidcs			__func__);
4773305487Sdavidcs        } else {
4774305487Sdavidcs
4775305487Sdavidcs		ret = ql_rdwr_indreg32(ha, addr_2, &data, 1);
4776305487Sdavidcs		if (ret)
4777305487Sdavidcs			return (0);
4778305487Sdavidcs
4779305487Sdavidcs                data = (data & modify_mask);
4780305487Sdavidcs
4781305487Sdavidcs		ret = ql_rdwr_indreg32(ha, addr_2, &data, 0);
4782305487Sdavidcs		if (ret)
4783305487Sdavidcs			return (0);
4784305487Sdavidcs
4785305487Sdavidcs		ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0);
4786305487Sdavidcs		if (ret)
4787305487Sdavidcs			return (0);
4788305487Sdavidcs
4789305487Sdavidcs                /* Poll again */
4790305487Sdavidcs                wait_count = 0;
4791305487Sdavidcs                while (wait_count < poll) {
4792305487Sdavidcs
4793305487Sdavidcs                        uint32_t temp;
4794305487Sdavidcs
4795305487Sdavidcs			ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
4796305487Sdavidcs			if (ret)
4797305487Sdavidcs				return (0);
4798305487Sdavidcs
4799305487Sdavidcs                        if ( (temp & mask) != 0 ) {
4800305487Sdavidcs                                break;
4801305487Sdavidcs                        }
4802305487Sdavidcs                        wait_count++;
4803305487Sdavidcs                }
4804305487Sdavidcs                *data_buff++ = addr_2;
4805305487Sdavidcs                *data_buff++ = data;
4806305487Sdavidcs        }
4807305487Sdavidcs
4808305487Sdavidcs        /*
4809305487Sdavidcs         * for testing purpose we return amount of data written
4810305487Sdavidcs         */
4811305487Sdavidcs        return (2 * sizeof(uint32_t));
4812305487Sdavidcs}
4813305487Sdavidcs
4814305487Sdavidcs
4815