ql_hw.c revision 298955
1250661Sdavidcs/*
2284741Sdavidcs * Copyright (c) 2013-2016 Qlogic Corporation
3250661Sdavidcs * All rights reserved.
4250661Sdavidcs *
5250661Sdavidcs *  Redistribution and use in source and binary forms, with or without
6250661Sdavidcs *  modification, are permitted provided that the following conditions
7250661Sdavidcs *  are met:
8250661Sdavidcs *
9250661Sdavidcs *  1. Redistributions of source code must retain the above copyright
10250661Sdavidcs *     notice, this list of conditions and the following disclaimer.
11250661Sdavidcs *  2. Redistributions in binary form must reproduce the above copyright
12250661Sdavidcs *     notice, this list of conditions and the following disclaimer in the
13250661Sdavidcs *     documentation and/or other materials provided with the distribution.
14250661Sdavidcs *
15250661Sdavidcs *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16250661Sdavidcs *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17250661Sdavidcs *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18250661Sdavidcs *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19250661Sdavidcs *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20250661Sdavidcs *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21250661Sdavidcs *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22250661Sdavidcs *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23250661Sdavidcs *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24250661Sdavidcs *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25250661Sdavidcs *  POSSIBILITY OF SUCH DAMAGE.
26250661Sdavidcs */
27250661Sdavidcs
28250661Sdavidcs/*
29250661Sdavidcs * File: ql_hw.c
30250661Sdavidcs * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31298955Spfg * Content: Contains Hardware dependent functions
32250661Sdavidcs */
33250661Sdavidcs
34250661Sdavidcs#include <sys/cdefs.h>
35250661Sdavidcs__FBSDID("$FreeBSD: head/sys/dev/qlxgbe/ql_hw.c 298955 2016-05-03 03:41:25Z pfg $");
36250661Sdavidcs
37250661Sdavidcs#include "ql_os.h"
38250661Sdavidcs#include "ql_hw.h"
39250661Sdavidcs#include "ql_def.h"
40250661Sdavidcs#include "ql_inline.h"
41250661Sdavidcs#include "ql_ver.h"
42250661Sdavidcs#include "ql_glbl.h"
43250661Sdavidcs#include "ql_dbg.h"
44250661Sdavidcs
45250661Sdavidcs/*
46250661Sdavidcs * Static Functions
47250661Sdavidcs */
48250661Sdavidcs
49250661Sdavidcsstatic void qla_del_rcv_cntxt(qla_host_t *ha);
50250661Sdavidcsstatic int qla_init_rcv_cntxt(qla_host_t *ha);
51250661Sdavidcsstatic void qla_del_xmt_cntxt(qla_host_t *ha);
52250661Sdavidcsstatic int qla_init_xmt_cntxt(qla_host_t *ha);
53250661Sdavidcsstatic void qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx);
54250661Sdavidcsstatic int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
55250661Sdavidcs	uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
56284741Sdavidcsstatic int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
57284741Sdavidcs	uint32_t num_intrs, uint32_t create);
58250661Sdavidcsstatic int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
59250661Sdavidcsstatic int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
60284741Sdavidcs	int tenable, int rcv);
61250661Sdavidcsstatic int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
62250661Sdavidcsstatic int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
63250661Sdavidcs
64250661Sdavidcsstatic int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
65250661Sdavidcs		uint8_t *hdr);
66250661Sdavidcsstatic int qla_hw_add_all_mcast(qla_host_t *ha);
67250661Sdavidcsstatic int qla_hw_del_all_mcast(qla_host_t *ha);
68284741Sdavidcsstatic int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds);
69250661Sdavidcs
70284741Sdavidcsstatic int qla_init_nic_func(qla_host_t *ha);
71284741Sdavidcsstatic int qla_stop_nic_func(qla_host_t *ha);
72284741Sdavidcsstatic int qla_query_fw_dcbx_caps(qla_host_t *ha);
73284741Sdavidcsstatic int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits);
74284741Sdavidcsstatic int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits);
75284741Sdavidcsstatic void qla_get_quick_stats(qla_host_t *ha);
76284741Sdavidcs
77250661Sdavidcsstatic int qla_minidump_init(qla_host_t *ha);
78250661Sdavidcsstatic void qla_minidump_free(qla_host_t *ha);
79250661Sdavidcs
80250661Sdavidcs
81250661Sdavidcsstatic int
82250661Sdavidcsqla_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
83250661Sdavidcs{
84250661Sdavidcs        int err = 0, ret;
85250661Sdavidcs        qla_host_t *ha;
86250661Sdavidcs	uint32_t i;
87250661Sdavidcs
88250661Sdavidcs        err = sysctl_handle_int(oidp, &ret, 0, req);
89250661Sdavidcs
90250661Sdavidcs        if (err || !req->newptr)
91250661Sdavidcs                return (err);
92250661Sdavidcs
93250661Sdavidcs        if (ret == 1) {
94250661Sdavidcs
95250661Sdavidcs                ha = (qla_host_t *)arg1;
96250661Sdavidcs
97250661Sdavidcs		for (i = 0; i < ha->hw.num_sds_rings; i++)
98250661Sdavidcs			device_printf(ha->pci_dev,
99250661Sdavidcs				"%s: sds_ring[%d] = %p\n", __func__,i,
100250661Sdavidcs				(void *)ha->hw.sds[i].intr_count);
101250661Sdavidcs
102250661Sdavidcs		for (i = 0; i < ha->hw.num_tx_rings; i++)
103250661Sdavidcs			device_printf(ha->pci_dev,
104250661Sdavidcs				"%s: tx[%d] = %p\n", __func__,i,
105250661Sdavidcs				(void *)ha->tx_ring[i].count);
106250661Sdavidcs
107250661Sdavidcs		for (i = 0; i < ha->hw.num_rds_rings; i++)
108250661Sdavidcs			device_printf(ha->pci_dev,
109250661Sdavidcs				"%s: rds_ring[%d] = %p\n", __func__,i,
110250661Sdavidcs				(void *)ha->hw.rds[i].count);
111250661Sdavidcs
112250661Sdavidcs		device_printf(ha->pci_dev, "%s: lro_pkt_count = %p\n", __func__,
113250661Sdavidcs			(void *)ha->lro_pkt_count);
114250661Sdavidcs
115250661Sdavidcs		device_printf(ha->pci_dev, "%s: lro_bytes = %p\n", __func__,
116250661Sdavidcs			(void *)ha->lro_bytes);
117284741Sdavidcs
118284741Sdavidcs#ifdef QL_ENABLE_ISCSI_TLV
119284741Sdavidcs		device_printf(ha->pci_dev, "%s: iscsi_pkts = %p\n", __func__,
120284741Sdavidcs			(void *)ha->hw.iscsi_pkt_count);
121284741Sdavidcs#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
122284741Sdavidcs
123250661Sdavidcs	}
124250661Sdavidcs	return (err);
125250661Sdavidcs}
126250661Sdavidcs
127284741Sdavidcsstatic int
128284741Sdavidcsqla_sysctl_get_quick_stats(SYSCTL_HANDLER_ARGS)
129284741Sdavidcs{
130284741Sdavidcs	int err, ret = 0;
131284741Sdavidcs	qla_host_t *ha;
132284741Sdavidcs
133284741Sdavidcs	err = sysctl_handle_int(oidp, &ret, 0, req);
134284741Sdavidcs
135284741Sdavidcs	if (err || !req->newptr)
136284741Sdavidcs		return (err);
137284741Sdavidcs
138284741Sdavidcs	if (ret == 1) {
139284741Sdavidcs		ha = (qla_host_t *)arg1;
140284741Sdavidcs		qla_get_quick_stats(ha);
141284741Sdavidcs	}
142284741Sdavidcs	return (err);
143284741Sdavidcs}
144284741Sdavidcs
145250661Sdavidcs#ifdef QL_DBG
146250661Sdavidcs
147250661Sdavidcsstatic void
148250661Sdavidcsqla_stop_pegs(qla_host_t *ha)
149250661Sdavidcs{
150250661Sdavidcs        uint32_t val = 1;
151250661Sdavidcs
152250661Sdavidcs        ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
153250661Sdavidcs        ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
154250661Sdavidcs        ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
155250661Sdavidcs        ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
156250661Sdavidcs        ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
157250661Sdavidcs        device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
158250661Sdavidcs}
159250661Sdavidcs
160250661Sdavidcsstatic int
161250661Sdavidcsqla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
162250661Sdavidcs{
163250661Sdavidcs	int err, ret = 0;
164250661Sdavidcs	qla_host_t *ha;
165250661Sdavidcs
166250661Sdavidcs	err = sysctl_handle_int(oidp, &ret, 0, req);
167250661Sdavidcs
168250661Sdavidcs
169250661Sdavidcs	if (err || !req->newptr)
170250661Sdavidcs		return (err);
171250661Sdavidcs
172250661Sdavidcs	if (ret == 1) {
173250661Sdavidcs		ha = (qla_host_t *)arg1;
174250661Sdavidcs		(void)QLA_LOCK(ha, __func__, 0);
175250661Sdavidcs		qla_stop_pegs(ha);
176250661Sdavidcs		QLA_UNLOCK(ha, __func__);
177250661Sdavidcs	}
178250661Sdavidcs
179250661Sdavidcs	return err;
180250661Sdavidcs}
181250661Sdavidcs#endif /* #ifdef QL_DBG */
182250661Sdavidcs
183284741Sdavidcsstatic int
184284741Sdavidcsqla_validate_set_port_cfg_bit(uint32_t bits)
185284741Sdavidcs{
186284741Sdavidcs        if ((bits & 0xF) > 1)
187284741Sdavidcs                return (-1);
188284741Sdavidcs
189284741Sdavidcs        if (((bits >> 4) & 0xF) > 2)
190284741Sdavidcs                return (-1);
191284741Sdavidcs
192284741Sdavidcs        if (((bits >> 8) & 0xF) > 2)
193284741Sdavidcs                return (-1);
194284741Sdavidcs
195284741Sdavidcs        return (0);
196284741Sdavidcs}
197284741Sdavidcs
198284741Sdavidcsstatic int
199284741Sdavidcsqla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
200284741Sdavidcs{
201284741Sdavidcs        int err, ret = 0;
202284741Sdavidcs        qla_host_t *ha;
203284741Sdavidcs        uint32_t cfg_bits;
204284741Sdavidcs
205284741Sdavidcs        err = sysctl_handle_int(oidp, &ret, 0, req);
206284741Sdavidcs
207284741Sdavidcs        if (err || !req->newptr)
208284741Sdavidcs                return (err);
209284741Sdavidcs
210284741Sdavidcs        if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
211284741Sdavidcs
212284741Sdavidcs                ha = (qla_host_t *)arg1;
213284741Sdavidcs
214284741Sdavidcs                err = qla_get_port_config(ha, &cfg_bits);
215284741Sdavidcs
216284741Sdavidcs                if (err)
217284741Sdavidcs                        goto qla_sysctl_set_port_cfg_exit;
218284741Sdavidcs
219284741Sdavidcs                if (ret & 0x1) {
220284741Sdavidcs                        cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE;
221284741Sdavidcs                } else {
222284741Sdavidcs                        cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE;
223284741Sdavidcs                }
224284741Sdavidcs
225284741Sdavidcs                ret = ret >> 4;
226284741Sdavidcs                cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK;
227284741Sdavidcs
228284741Sdavidcs                if ((ret & 0xF) == 0) {
229284741Sdavidcs                        cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED;
230284741Sdavidcs                } else if ((ret & 0xF) == 1){
231284741Sdavidcs                        cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD;
232284741Sdavidcs                } else {
233284741Sdavidcs                        cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM;
234284741Sdavidcs                }
235284741Sdavidcs
236284741Sdavidcs                ret = ret >> 4;
237284741Sdavidcs                cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK;
238284741Sdavidcs
239284741Sdavidcs                if (ret == 0) {
240284741Sdavidcs                        cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV;
241284741Sdavidcs                } else if (ret == 1){
242284741Sdavidcs                        cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT;
243284741Sdavidcs                } else {
244284741Sdavidcs                        cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV;
245284741Sdavidcs                }
246284741Sdavidcs
247284741Sdavidcs                err = qla_set_port_config(ha, cfg_bits);
248284741Sdavidcs        } else {
249284741Sdavidcs                ha = (qla_host_t *)arg1;
250284741Sdavidcs
251284741Sdavidcs                err = qla_get_port_config(ha, &cfg_bits);
252284741Sdavidcs        }
253284741Sdavidcs
254284741Sdavidcsqla_sysctl_set_port_cfg_exit:
255284741Sdavidcs        return err;
256284741Sdavidcs}
257284741Sdavidcs
258250661Sdavidcs/*
259250661Sdavidcs * Name: ql_hw_add_sysctls
260250661Sdavidcs * Function: Add P3Plus specific sysctls
261250661Sdavidcs */
262250661Sdavidcsvoid
263250661Sdavidcsql_hw_add_sysctls(qla_host_t *ha)
264250661Sdavidcs{
265250661Sdavidcs        device_t	dev;
266250661Sdavidcs
267250661Sdavidcs        dev = ha->pci_dev;
268250661Sdavidcs
269250661Sdavidcs	ha->hw.num_sds_rings = MAX_SDS_RINGS;
270250661Sdavidcs	ha->hw.num_rds_rings = MAX_RDS_RINGS;
271250661Sdavidcs	ha->hw.num_tx_rings = NUM_TX_RINGS;
272250661Sdavidcs
273250661Sdavidcs	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
274250661Sdavidcs		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
275250661Sdavidcs		OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
276250661Sdavidcs		ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
277250661Sdavidcs
278250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
279250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
280250661Sdavidcs                OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
281250661Sdavidcs		ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
282250661Sdavidcs
283250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
284250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
285250661Sdavidcs                OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
286250661Sdavidcs		ha->hw.num_tx_rings, "Number of Transmit Rings");
287250661Sdavidcs
288250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
289250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
290250661Sdavidcs                OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
291250661Sdavidcs		ha->txr_idx, "Tx Ring Used");
292250661Sdavidcs
293250661Sdavidcs	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
294250661Sdavidcs		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
295250661Sdavidcs		OID_AUTO, "drvr_stats", CTLTYPE_INT | CTLFLAG_RW,
296250661Sdavidcs		(void *)ha, 0,
297250661Sdavidcs		qla_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics");
298250661Sdavidcs
299284741Sdavidcs        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
300284741Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
301284741Sdavidcs                OID_AUTO, "quick_stats", CTLTYPE_INT | CTLFLAG_RW,
302284741Sdavidcs                (void *)ha, 0,
303284741Sdavidcs                qla_sysctl_get_quick_stats, "I", "Quick Statistics");
304284741Sdavidcs
305250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
306250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
307250661Sdavidcs                OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
308250661Sdavidcs		ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
309250661Sdavidcs
310250661Sdavidcs	ha->hw.sds_cidx_thres = 32;
311250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
312250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
313250661Sdavidcs                OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
314250661Sdavidcs		ha->hw.sds_cidx_thres,
315250661Sdavidcs		"Number of SDS entries to process before updating"
316250661Sdavidcs		" SDS Ring Consumer Index");
317250661Sdavidcs
318250661Sdavidcs	ha->hw.rds_pidx_thres = 32;
319250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
320250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
321250661Sdavidcs                OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
322250661Sdavidcs		ha->hw.rds_pidx_thres,
323250661Sdavidcs		"Number of Rcv Rings Entries to post before updating"
324250661Sdavidcs		" RDS Ring Producer Index");
325250661Sdavidcs
326284741Sdavidcs        ha->hw.rcv_intr_coalesce = (3 << 16) | 256;
327284741Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
328284741Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
329284741Sdavidcs                OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW,
330284741Sdavidcs                &ha->hw.rcv_intr_coalesce,
331284741Sdavidcs                ha->hw.rcv_intr_coalesce,
332284741Sdavidcs                "Rcv Intr Coalescing Parameters\n"
333284741Sdavidcs                "\tbits 15:0 max packets\n"
334284741Sdavidcs                "\tbits 31:16 max micro-seconds to wait\n"
335284741Sdavidcs                "\tplease run\n"
336284741Sdavidcs                "\tifconfig <if> down && ifconfig <if> up\n"
337284741Sdavidcs                "\tto take effect \n");
338258155Sdavidcs
339284741Sdavidcs        ha->hw.xmt_intr_coalesce = (64 << 16) | 64;
340284741Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
341284741Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
342284741Sdavidcs                OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW,
343284741Sdavidcs                &ha->hw.xmt_intr_coalesce,
344284741Sdavidcs                ha->hw.xmt_intr_coalesce,
345284741Sdavidcs                "Xmt Intr Coalescing Parameters\n"
346284741Sdavidcs                "\tbits 15:0 max packets\n"
347284741Sdavidcs                "\tbits 31:16 max micro-seconds to wait\n"
348284741Sdavidcs                "\tplease run\n"
349284741Sdavidcs                "\tifconfig <if> down && ifconfig <if> up\n"
350284741Sdavidcs                "\tto take effect \n");
351284741Sdavidcs
352284741Sdavidcs        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
353284741Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
354284741Sdavidcs                OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW,
355284741Sdavidcs                (void *)ha, 0,
356284741Sdavidcs                qla_sysctl_port_cfg, "I",
357284741Sdavidcs                        "Set Port Configuration if values below "
358284741Sdavidcs                        "otherwise Get Port Configuration\n"
359284741Sdavidcs                        "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n"
360284741Sdavidcs                        "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n"
361284741Sdavidcs                        "\tBits 8-11: std pause cfg; 0 = xmt and rcv;"
362284741Sdavidcs                        " 1 = xmt only; 2 = rcv only;\n"
363284741Sdavidcs                );
364284741Sdavidcs
365284741Sdavidcs        ha->hw.enable_9kb = 1;
366284741Sdavidcs
367284741Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
368284741Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
369284741Sdavidcs                OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb,
370284741Sdavidcs                ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000");
371284741Sdavidcs
372250661Sdavidcs	ha->hw.mdump_active = 0;
373250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
374250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
375250661Sdavidcs                OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
376250661Sdavidcs		ha->hw.mdump_active,
377250661Sdavidcs		"Minidump Utility is Active \n"
378250661Sdavidcs		"\t 0 = Minidump Utility is not active\n"
379250661Sdavidcs		"\t 1 = Minidump Utility is retrieved on this port\n"
380250661Sdavidcs		"\t 2 = Minidump Utility is retrieved on the other port\n");
381250661Sdavidcs
382250661Sdavidcs	ha->hw.mdump_start = 0;
383250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
384250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
385250661Sdavidcs                OID_AUTO, "minidump_start", CTLFLAG_RW,
386250661Sdavidcs		&ha->hw.mdump_start, ha->hw.mdump_start,
387250661Sdavidcs		"Minidump Utility can start minidump process");
388250661Sdavidcs#ifdef QL_DBG
389250661Sdavidcs
390289635Sdavidcs	ha->err_inject = 0;
391250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
392250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
393250661Sdavidcs                OID_AUTO, "err_inject",
394250661Sdavidcs                CTLFLAG_RW, &ha->err_inject, ha->err_inject,
395250661Sdavidcs                "Error to be injected\n"
396250661Sdavidcs                "\t\t\t 0: No Errors\n"
397250661Sdavidcs                "\t\t\t 1: rcv: rxb struct invalid\n"
398250661Sdavidcs                "\t\t\t 2: rcv: mp == NULL\n"
399250661Sdavidcs                "\t\t\t 3: lro: rxb struct invalid\n"
400250661Sdavidcs                "\t\t\t 4: lro: mp == NULL\n"
401250661Sdavidcs                "\t\t\t 5: rcv: num handles invalid\n"
402250661Sdavidcs                "\t\t\t 6: reg: indirect reg rd_wr failure\n"
403250661Sdavidcs                "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
404250661Sdavidcs                "\t\t\t 8: mbx: mailbox command failure\n"
405250661Sdavidcs                "\t\t\t 9: heartbeat failure\n"
406250661Sdavidcs                "\t\t\t A: temperature failure\n" );
407250661Sdavidcs
408250661Sdavidcs	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
409250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
410250661Sdavidcs                OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW,
411250661Sdavidcs                (void *)ha, 0,
412250661Sdavidcs                qla_sysctl_stop_pegs, "I", "Peg Stop");
413250661Sdavidcs
414250661Sdavidcs#endif /* #ifdef QL_DBG */
415250661Sdavidcs
416284741Sdavidcs        ha->hw.user_pri_nic = 0;
417284741Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
418284741Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
419284741Sdavidcs                OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic,
420284741Sdavidcs                ha->hw.user_pri_nic,
421284741Sdavidcs                "VLAN Tag User Priority for Normal Ethernet Packets");
422284741Sdavidcs
423284741Sdavidcs        ha->hw.user_pri_iscsi = 4;
424284741Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
425284741Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
426284741Sdavidcs                OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi,
427284741Sdavidcs                ha->hw.user_pri_iscsi,
428284741Sdavidcs                "VLAN Tag User Priority for iSCSI Packets");
429284741Sdavidcs
430250661Sdavidcs}
431250661Sdavidcs
432250661Sdavidcsvoid
433250661Sdavidcsql_hw_link_status(qla_host_t *ha)
434250661Sdavidcs{
435250661Sdavidcs	device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
436250661Sdavidcs
437250661Sdavidcs	if (ha->hw.link_up) {
438250661Sdavidcs		device_printf(ha->pci_dev, "link Up\n");
439250661Sdavidcs	} else {
440250661Sdavidcs		device_printf(ha->pci_dev, "link Down\n");
441250661Sdavidcs	}
442250661Sdavidcs
443250661Sdavidcs	if (ha->hw.flags.fduplex) {
444250661Sdavidcs		device_printf(ha->pci_dev, "Full Duplex\n");
445250661Sdavidcs	} else {
446250661Sdavidcs		device_printf(ha->pci_dev, "Half Duplex\n");
447250661Sdavidcs	}
448250661Sdavidcs
449250661Sdavidcs	if (ha->hw.flags.autoneg) {
450250661Sdavidcs		device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
451250661Sdavidcs	} else {
452250661Sdavidcs		device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
453250661Sdavidcs	}
454250661Sdavidcs
455250661Sdavidcs	switch (ha->hw.link_speed) {
456250661Sdavidcs	case 0x710:
457250661Sdavidcs		device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
458250661Sdavidcs		break;
459250661Sdavidcs
460250661Sdavidcs	case 0x3E8:
461250661Sdavidcs		device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
462250661Sdavidcs		break;
463250661Sdavidcs
464250661Sdavidcs	case 0x64:
465250661Sdavidcs		device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
466250661Sdavidcs		break;
467250661Sdavidcs
468250661Sdavidcs	default:
469250661Sdavidcs		device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
470250661Sdavidcs		break;
471250661Sdavidcs	}
472250661Sdavidcs
473250661Sdavidcs	switch (ha->hw.module_type) {
474250661Sdavidcs
475250661Sdavidcs	case 0x01:
476250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
477250661Sdavidcs		break;
478250661Sdavidcs
479250661Sdavidcs	case 0x02:
480250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
481250661Sdavidcs		break;
482250661Sdavidcs
483250661Sdavidcs	case 0x03:
484250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
485250661Sdavidcs		break;
486250661Sdavidcs
487250661Sdavidcs	case 0x04:
488250661Sdavidcs		device_printf(ha->pci_dev,
489250661Sdavidcs			"Module Type 10GE Passive Copper(Compliant)[%d m]\n",
490250661Sdavidcs			ha->hw.cable_length);
491250661Sdavidcs		break;
492250661Sdavidcs
493250661Sdavidcs	case 0x05:
494250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 10GE Active"
495250661Sdavidcs			" Limiting Copper(Compliant)[%d m]\n",
496250661Sdavidcs			ha->hw.cable_length);
497250661Sdavidcs		break;
498250661Sdavidcs
499250661Sdavidcs	case 0x06:
500250661Sdavidcs		device_printf(ha->pci_dev,
501250661Sdavidcs			"Module Type 10GE Passive Copper"
502250661Sdavidcs			" (Legacy, Best Effort)[%d m]\n",
503250661Sdavidcs			ha->hw.cable_length);
504250661Sdavidcs		break;
505250661Sdavidcs
506250661Sdavidcs	case 0x07:
507250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
508250661Sdavidcs		break;
509250661Sdavidcs
510250661Sdavidcs	case 0x08:
511250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
512250661Sdavidcs		break;
513250661Sdavidcs
514250661Sdavidcs	case 0x09:
515250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
516250661Sdavidcs		break;
517250661Sdavidcs
518250661Sdavidcs	case 0x0A:
519250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
520250661Sdavidcs		break;
521250661Sdavidcs
522250661Sdavidcs	case 0x0B:
523250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
524250661Sdavidcs			"(Legacy, Best Effort)\n");
525250661Sdavidcs		break;
526250661Sdavidcs
527250661Sdavidcs	default:
528250661Sdavidcs		device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
529250661Sdavidcs			ha->hw.module_type);
530250661Sdavidcs		break;
531250661Sdavidcs	}
532250661Sdavidcs
533250661Sdavidcs	if (ha->hw.link_faults == 1)
534250661Sdavidcs		device_printf(ha->pci_dev, "SFP Power Fault\n");
535250661Sdavidcs}
536250661Sdavidcs
537250661Sdavidcs/*
538250661Sdavidcs * Name: ql_free_dma
539250661Sdavidcs * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
540250661Sdavidcs */
541250661Sdavidcsvoid
542250661Sdavidcsql_free_dma(qla_host_t *ha)
543250661Sdavidcs{
544250661Sdavidcs	uint32_t i;
545250661Sdavidcs
546250661Sdavidcs        if (ha->hw.dma_buf.flags.sds_ring) {
547250661Sdavidcs		for (i = 0; i < ha->hw.num_sds_rings; i++) {
548250661Sdavidcs			ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
549250661Sdavidcs		}
550250661Sdavidcs        	ha->hw.dma_buf.flags.sds_ring = 0;
551250661Sdavidcs	}
552250661Sdavidcs
553250661Sdavidcs        if (ha->hw.dma_buf.flags.rds_ring) {
554250661Sdavidcs		for (i = 0; i < ha->hw.num_rds_rings; i++) {
555250661Sdavidcs			ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
556250661Sdavidcs		}
557250661Sdavidcs        	ha->hw.dma_buf.flags.rds_ring = 0;
558250661Sdavidcs	}
559250661Sdavidcs
560250661Sdavidcs        if (ha->hw.dma_buf.flags.tx_ring) {
561250661Sdavidcs		ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
562250661Sdavidcs        	ha->hw.dma_buf.flags.tx_ring = 0;
563250661Sdavidcs	}
564250661Sdavidcs	qla_minidump_free(ha);
565250661Sdavidcs}
566250661Sdavidcs
567250661Sdavidcs/*
568250661Sdavidcs * Name: ql_alloc_dma
569250661Sdavidcs * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
570250661Sdavidcs */
571250661Sdavidcsint
572250661Sdavidcsql_alloc_dma(qla_host_t *ha)
573250661Sdavidcs{
574250661Sdavidcs        device_t                dev;
575250661Sdavidcs	uint32_t		i, j, size, tx_ring_size;
576250661Sdavidcs	qla_hw_t		*hw;
577250661Sdavidcs	qla_hw_tx_cntxt_t	*tx_cntxt;
578250661Sdavidcs	uint8_t			*vaddr;
579250661Sdavidcs	bus_addr_t		paddr;
580250661Sdavidcs
581250661Sdavidcs        dev = ha->pci_dev;
582250661Sdavidcs
583250661Sdavidcs        QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
584250661Sdavidcs
585250661Sdavidcs	hw = &ha->hw;
586250661Sdavidcs	/*
587250661Sdavidcs	 * Allocate Transmit Ring
588250661Sdavidcs	 */
589250661Sdavidcs	tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
590250661Sdavidcs	size = (tx_ring_size * ha->hw.num_tx_rings);
591250661Sdavidcs
592250661Sdavidcs	hw->dma_buf.tx_ring.alignment = 8;
593250661Sdavidcs	hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
594250661Sdavidcs
595250661Sdavidcs        if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
596250661Sdavidcs                device_printf(dev, "%s: tx ring alloc failed\n", __func__);
597250661Sdavidcs                goto ql_alloc_dma_exit;
598250661Sdavidcs        }
599250661Sdavidcs
600250661Sdavidcs	vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
601250661Sdavidcs	paddr = hw->dma_buf.tx_ring.dma_addr;
602250661Sdavidcs
603250661Sdavidcs	for (i = 0; i < ha->hw.num_tx_rings; i++) {
604250661Sdavidcs		tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
605250661Sdavidcs
606250661Sdavidcs		tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
607250661Sdavidcs		tx_cntxt->tx_ring_paddr = paddr;
608250661Sdavidcs
609250661Sdavidcs		vaddr += tx_ring_size;
610250661Sdavidcs		paddr += tx_ring_size;
611250661Sdavidcs	}
612250661Sdavidcs
613250661Sdavidcs	for (i = 0; i < ha->hw.num_tx_rings; i++) {
614250661Sdavidcs		tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
615250661Sdavidcs
616250661Sdavidcs		tx_cntxt->tx_cons = (uint32_t *)vaddr;
617250661Sdavidcs		tx_cntxt->tx_cons_paddr = paddr;
618250661Sdavidcs
619250661Sdavidcs		vaddr += sizeof (uint32_t);
620250661Sdavidcs		paddr += sizeof (uint32_t);
621250661Sdavidcs	}
622250661Sdavidcs
623250661Sdavidcs        ha->hw.dma_buf.flags.tx_ring = 1;
624250661Sdavidcs
625250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
626250661Sdavidcs		__func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
627250661Sdavidcs		hw->dma_buf.tx_ring.dma_b));
628250661Sdavidcs	/*
629250661Sdavidcs	 * Allocate Receive Descriptor Rings
630250661Sdavidcs	 */
631250661Sdavidcs
632250661Sdavidcs	for (i = 0; i < hw->num_rds_rings; i++) {
633250661Sdavidcs
634250661Sdavidcs		hw->dma_buf.rds_ring[i].alignment = 8;
635250661Sdavidcs		hw->dma_buf.rds_ring[i].size =
636250661Sdavidcs			(sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
637250661Sdavidcs
638250661Sdavidcs		if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
639250661Sdavidcs			device_printf(dev, "%s: rds ring[%d] alloc failed\n",
640250661Sdavidcs				__func__, i);
641250661Sdavidcs
642250661Sdavidcs			for (j = 0; j < i; j++)
643250661Sdavidcs				ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
644250661Sdavidcs
645250661Sdavidcs			goto ql_alloc_dma_exit;
646250661Sdavidcs		}
647250661Sdavidcs		QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
648250661Sdavidcs			__func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
649250661Sdavidcs			hw->dma_buf.rds_ring[i].dma_b));
650250661Sdavidcs	}
651250661Sdavidcs
652250661Sdavidcs	hw->dma_buf.flags.rds_ring = 1;
653250661Sdavidcs
654250661Sdavidcs	/*
655250661Sdavidcs	 * Allocate Status Descriptor Rings
656250661Sdavidcs	 */
657250661Sdavidcs
658250661Sdavidcs	for (i = 0; i < hw->num_sds_rings; i++) {
659250661Sdavidcs		hw->dma_buf.sds_ring[i].alignment = 8;
660250661Sdavidcs		hw->dma_buf.sds_ring[i].size =
661250661Sdavidcs			(sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
662250661Sdavidcs
663250661Sdavidcs		if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
664250661Sdavidcs			device_printf(dev, "%s: sds ring alloc failed\n",
665250661Sdavidcs				__func__);
666250661Sdavidcs
667250661Sdavidcs			for (j = 0; j < i; j++)
668250661Sdavidcs				ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
669250661Sdavidcs
670250661Sdavidcs			goto ql_alloc_dma_exit;
671250661Sdavidcs		}
672250661Sdavidcs		QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
673250661Sdavidcs			__func__, i,
674250661Sdavidcs			(void *)(hw->dma_buf.sds_ring[i].dma_addr),
675250661Sdavidcs			hw->dma_buf.sds_ring[i].dma_b));
676250661Sdavidcs	}
677250661Sdavidcs	for (i = 0; i < hw->num_sds_rings; i++) {
678250661Sdavidcs		hw->sds[i].sds_ring_base =
679250661Sdavidcs			(q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
680250661Sdavidcs	}
681250661Sdavidcs
682250661Sdavidcs	hw->dma_buf.flags.sds_ring = 1;
683250661Sdavidcs
684250661Sdavidcs	return 0;
685250661Sdavidcs
686250661Sdavidcsql_alloc_dma_exit:
687250661Sdavidcs	ql_free_dma(ha);
688250661Sdavidcs	return -1;
689250661Sdavidcs}
690250661Sdavidcs
691250661Sdavidcs#define Q8_MBX_MSEC_DELAY	5000
692250661Sdavidcs
693250661Sdavidcsstatic int
694250661Sdavidcsqla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
695250661Sdavidcs	uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
696250661Sdavidcs{
697250661Sdavidcs	uint32_t i;
698250661Sdavidcs	uint32_t data;
699250661Sdavidcs	int ret = 0;
700250661Sdavidcs
701250661Sdavidcs	if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) {
702250661Sdavidcs		ret = -3;
703250661Sdavidcs		ha->qla_initiate_recovery = 1;
704250661Sdavidcs		goto exit_qla_mbx_cmd;
705250661Sdavidcs	}
706250661Sdavidcs
707250661Sdavidcs	if (no_pause)
708250661Sdavidcs		i = 1000;
709250661Sdavidcs	else
710250661Sdavidcs		i = Q8_MBX_MSEC_DELAY;
711250661Sdavidcs
712250661Sdavidcs	while (i) {
713250661Sdavidcs		data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
714250661Sdavidcs		if (data == 0)
715250661Sdavidcs			break;
716250661Sdavidcs		if (no_pause) {
717250661Sdavidcs			DELAY(1000);
718250661Sdavidcs		} else {
719250661Sdavidcs			qla_mdelay(__func__, 1);
720250661Sdavidcs		}
721250661Sdavidcs		i--;
722250661Sdavidcs	}
723250661Sdavidcs
724250661Sdavidcs	if (i == 0) {
725250661Sdavidcs		device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
726250661Sdavidcs			__func__, data);
727250661Sdavidcs		ret = -1;
728250661Sdavidcs		ha->qla_initiate_recovery = 1;
729250661Sdavidcs		goto exit_qla_mbx_cmd;
730250661Sdavidcs	}
731250661Sdavidcs
732250661Sdavidcs	for (i = 0; i < n_hmbox; i++) {
733250661Sdavidcs		WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
734250661Sdavidcs		h_mbox++;
735250661Sdavidcs	}
736250661Sdavidcs
737250661Sdavidcs	WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
738250661Sdavidcs
739250661Sdavidcs
740250661Sdavidcs	i = Q8_MBX_MSEC_DELAY;
741250661Sdavidcs	while (i) {
742250661Sdavidcs		data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
743250661Sdavidcs
744250661Sdavidcs		if ((data & 0x3) == 1) {
745250661Sdavidcs			data = READ_REG32(ha, Q8_FW_MBOX0);
746250661Sdavidcs			if ((data & 0xF000) != 0x8000)
747250661Sdavidcs				break;
748250661Sdavidcs		}
749250661Sdavidcs		if (no_pause) {
750250661Sdavidcs			DELAY(1000);
751250661Sdavidcs		} else {
752250661Sdavidcs			qla_mdelay(__func__, 1);
753250661Sdavidcs		}
754250661Sdavidcs		i--;
755250661Sdavidcs	}
756250661Sdavidcs	if (i == 0) {
757250661Sdavidcs		device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
758250661Sdavidcs			__func__, data);
759250661Sdavidcs		ret = -2;
760250661Sdavidcs		ha->qla_initiate_recovery = 1;
761250661Sdavidcs		goto exit_qla_mbx_cmd;
762250661Sdavidcs	}
763250661Sdavidcs
764250661Sdavidcs	for (i = 0; i < n_fwmbox; i++) {
765250661Sdavidcs		*fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
766250661Sdavidcs	}
767250661Sdavidcs
768250661Sdavidcs	WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
769250661Sdavidcs	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
770250661Sdavidcs
771250661Sdavidcsexit_qla_mbx_cmd:
772250661Sdavidcs	return (ret);
773250661Sdavidcs}
774250661Sdavidcs
775284741Sdavidcsint
776284741Sdavidcsqla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
777284741Sdavidcs	uint32_t *num_rcvq)
778250661Sdavidcs{
779250661Sdavidcs	uint32_t *mbox, err;
780250661Sdavidcs	device_t dev = ha->pci_dev;
781250661Sdavidcs
782250661Sdavidcs	bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
783250661Sdavidcs
784250661Sdavidcs	mbox = ha->hw.mbox;
785250661Sdavidcs
786250661Sdavidcs	mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29);
787250661Sdavidcs
788250661Sdavidcs	if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
789250661Sdavidcs		device_printf(dev, "%s: failed0\n", __func__);
790250661Sdavidcs		return (-1);
791250661Sdavidcs	}
792250661Sdavidcs	err = mbox[0] >> 25;
793250661Sdavidcs
794284741Sdavidcs	if (supports_9kb != NULL) {
795284741Sdavidcs		if (mbox[16] & 0x80) /* bit 7 of mbox 16 */
796284741Sdavidcs			*supports_9kb = 1;
797284741Sdavidcs		else
798284741Sdavidcs			*supports_9kb = 0;
799284741Sdavidcs	}
800284741Sdavidcs
801284741Sdavidcs	if (num_rcvq != NULL)
802284741Sdavidcs		*num_rcvq =  ((mbox[6] >> 16) & 0xFFFF);
803284741Sdavidcs
804250661Sdavidcs	if ((err != 1) && (err != 0)) {
805250661Sdavidcs		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
806250661Sdavidcs		return (-1);
807250661Sdavidcs	}
808250661Sdavidcs	return 0;
809250661Sdavidcs}
810250661Sdavidcs
811250661Sdavidcsstatic int
812284741Sdavidcsqla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs,
813284741Sdavidcs	uint32_t create)
814250661Sdavidcs{
815250661Sdavidcs	uint32_t i, err;
816250661Sdavidcs	device_t dev = ha->pci_dev;
817250661Sdavidcs	q80_config_intr_t *c_intr;
818250661Sdavidcs	q80_config_intr_rsp_t *c_intr_rsp;
819250661Sdavidcs
820250661Sdavidcs	c_intr = (q80_config_intr_t *)ha->hw.mbox;
821250661Sdavidcs	bzero(c_intr, (sizeof (q80_config_intr_t)));
822250661Sdavidcs
823250661Sdavidcs	c_intr->opcode = Q8_MBX_CONFIG_INTR;
824250661Sdavidcs
825250661Sdavidcs	c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
826250661Sdavidcs	c_intr->count_version |= Q8_MBX_CMD_VERSION;
827250661Sdavidcs
828250661Sdavidcs	c_intr->nentries = num_intrs;
829250661Sdavidcs
830250661Sdavidcs	for (i = 0; i < num_intrs; i++) {
831250661Sdavidcs		if (create) {
832250661Sdavidcs			c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
833284741Sdavidcs			c_intr->intr[i].msix_index = start_idx + 1 + i;
834250661Sdavidcs		} else {
835250661Sdavidcs			c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
836284741Sdavidcs			c_intr->intr[i].msix_index =
837284741Sdavidcs				ha->hw.intr_id[(start_idx + i)];
838250661Sdavidcs		}
839250661Sdavidcs
840250661Sdavidcs		c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
841250661Sdavidcs	}
842250661Sdavidcs
843250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
844250661Sdavidcs		(sizeof (q80_config_intr_t) >> 2),
845250661Sdavidcs		ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
846250661Sdavidcs		device_printf(dev, "%s: failed0\n", __func__);
847250661Sdavidcs		return (-1);
848250661Sdavidcs	}
849250661Sdavidcs
850250661Sdavidcs	c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
851250661Sdavidcs
852250661Sdavidcs	err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
853250661Sdavidcs
854250661Sdavidcs	if (err) {
855250661Sdavidcs		device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err,
856250661Sdavidcs			c_intr_rsp->nentries);
857250661Sdavidcs
858250661Sdavidcs		for (i = 0; i < c_intr_rsp->nentries; i++) {
859250661Sdavidcs			device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
860250661Sdavidcs				__func__, i,
861250661Sdavidcs				c_intr_rsp->intr[i].status,
862250661Sdavidcs				c_intr_rsp->intr[i].intr_id,
863250661Sdavidcs				c_intr_rsp->intr[i].intr_src);
864250661Sdavidcs		}
865250661Sdavidcs
866250661Sdavidcs		return (-1);
867250661Sdavidcs	}
868250661Sdavidcs
869250661Sdavidcs	for (i = 0; ((i < num_intrs) && create); i++) {
870250661Sdavidcs		if (!c_intr_rsp->intr[i].status) {
871284741Sdavidcs			ha->hw.intr_id[(start_idx + i)] =
872284741Sdavidcs				c_intr_rsp->intr[i].intr_id;
873284741Sdavidcs			ha->hw.intr_src[(start_idx + i)] =
874284741Sdavidcs				c_intr_rsp->intr[i].intr_src;
875250661Sdavidcs		}
876250661Sdavidcs	}
877250661Sdavidcs
878250661Sdavidcs	return (0);
879250661Sdavidcs}
880250661Sdavidcs
881250661Sdavidcs/*
882250661Sdavidcs * Name: qla_config_rss
883250661Sdavidcs * Function: Configure RSS for the context/interface.
884250661Sdavidcs */
885250661Sdavidcsstatic const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
886250661Sdavidcs			0x8030f20c77cb2da3ULL,
887250661Sdavidcs			0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
888250661Sdavidcs			0x255b0ec26d5a56daULL };
889250661Sdavidcs
890250661Sdavidcsstatic int
891250661Sdavidcsqla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
892250661Sdavidcs{
893250661Sdavidcs	q80_config_rss_t	*c_rss;
894250661Sdavidcs	q80_config_rss_rsp_t	*c_rss_rsp;
895250661Sdavidcs	uint32_t		err, i;
896250661Sdavidcs	device_t		dev = ha->pci_dev;
897250661Sdavidcs
898250661Sdavidcs	c_rss = (q80_config_rss_t *)ha->hw.mbox;
899250661Sdavidcs	bzero(c_rss, (sizeof (q80_config_rss_t)));
900250661Sdavidcs
901250661Sdavidcs	c_rss->opcode = Q8_MBX_CONFIG_RSS;
902250661Sdavidcs
903250661Sdavidcs	c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
904250661Sdavidcs	c_rss->count_version |= Q8_MBX_CMD_VERSION;
905250661Sdavidcs
906250661Sdavidcs	c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
907250661Sdavidcs				Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
908284741Sdavidcs	//c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP |
909284741Sdavidcs	//			Q8_MBX_RSS_HASH_TYPE_IPV6_TCP);
910250661Sdavidcs
911250661Sdavidcs	c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
912250661Sdavidcs	c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
913250661Sdavidcs
914250661Sdavidcs	c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
915250661Sdavidcs
916250661Sdavidcs	c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
917250661Sdavidcs	c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
918250661Sdavidcs
919250661Sdavidcs	c_rss->cntxt_id = cntxt_id;
920250661Sdavidcs
921250661Sdavidcs	for (i = 0; i < 5; i++) {
922250661Sdavidcs		c_rss->rss_key[i] = rss_key[i];
923250661Sdavidcs	}
924250661Sdavidcs
925250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
926250661Sdavidcs		(sizeof (q80_config_rss_t) >> 2),
927250661Sdavidcs		ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
928250661Sdavidcs		device_printf(dev, "%s: failed0\n", __func__);
929250661Sdavidcs		return (-1);
930250661Sdavidcs	}
931250661Sdavidcs	c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
932250661Sdavidcs
933250661Sdavidcs	err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
934250661Sdavidcs
935250661Sdavidcs	if (err) {
936250661Sdavidcs		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
937250661Sdavidcs		return (-1);
938250661Sdavidcs	}
939250661Sdavidcs	return 0;
940250661Sdavidcs}
941250661Sdavidcs
942250661Sdavidcsstatic int
943250661Sdavidcsqla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
944250661Sdavidcs        uint16_t cntxt_id, uint8_t *ind_table)
945250661Sdavidcs{
946250661Sdavidcs        q80_config_rss_ind_table_t      *c_rss_ind;
947250661Sdavidcs        q80_config_rss_ind_table_rsp_t  *c_rss_ind_rsp;
948250661Sdavidcs        uint32_t                        err;
949250661Sdavidcs        device_t                        dev = ha->pci_dev;
950250661Sdavidcs
951250661Sdavidcs	if ((count > Q8_RSS_IND_TBL_SIZE) ||
952250661Sdavidcs		((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
953250661Sdavidcs		device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
954250661Sdavidcs			start_idx, count);
955250661Sdavidcs		return (-1);
956250661Sdavidcs	}
957250661Sdavidcs
958250661Sdavidcs        c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
959250661Sdavidcs        bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
960250661Sdavidcs
961250661Sdavidcs        c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
962250661Sdavidcs        c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
963250661Sdavidcs        c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
964250661Sdavidcs
965250661Sdavidcs	c_rss_ind->start_idx = start_idx;
966250661Sdavidcs	c_rss_ind->end_idx = start_idx + count - 1;
967250661Sdavidcs	c_rss_ind->cntxt_id = cntxt_id;
968250661Sdavidcs	bcopy(ind_table, c_rss_ind->ind_table, count);
969250661Sdavidcs
970250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
971250661Sdavidcs		(sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
972250661Sdavidcs		(sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
973250661Sdavidcs		device_printf(dev, "%s: failed0\n", __func__);
974250661Sdavidcs		return (-1);
975250661Sdavidcs	}
976250661Sdavidcs
977250661Sdavidcs	c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
978250661Sdavidcs	err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
979250661Sdavidcs
980250661Sdavidcs	if (err) {
981250661Sdavidcs		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
982250661Sdavidcs		return (-1);
983250661Sdavidcs	}
984250661Sdavidcs	return 0;
985250661Sdavidcs}
986250661Sdavidcs
987250661Sdavidcs/*
988250661Sdavidcs * Name: qla_config_intr_coalesce
989250661Sdavidcs * Function: Configure Interrupt Coalescing.
990250661Sdavidcs */
991250661Sdavidcsstatic int
992284741Sdavidcsqla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
993284741Sdavidcs	int rcv)
994250661Sdavidcs{
995250661Sdavidcs	q80_config_intr_coalesc_t	*intrc;
996250661Sdavidcs	q80_config_intr_coalesc_rsp_t	*intrc_rsp;
997250661Sdavidcs	uint32_t			err, i;
998250661Sdavidcs	device_t			dev = ha->pci_dev;
999250661Sdavidcs
1000250661Sdavidcs	intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
1001250661Sdavidcs	bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
1002250661Sdavidcs
1003250661Sdavidcs	intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
1004250661Sdavidcs	intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
1005250661Sdavidcs	intrc->count_version |= Q8_MBX_CMD_VERSION;
1006250661Sdavidcs
1007284741Sdavidcs	if (rcv) {
1008284741Sdavidcs		intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
1009284741Sdavidcs		intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF;
1010284741Sdavidcs		intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF;
1011284741Sdavidcs	} else {
1012284741Sdavidcs		intrc->flags = Q8_MBX_INTRC_FLAGS_XMT;
1013284741Sdavidcs		intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF;
1014284741Sdavidcs		intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF;
1015284741Sdavidcs	}
1016284741Sdavidcs
1017250661Sdavidcs	intrc->cntxt_id = cntxt_id;
1018250661Sdavidcs
1019250661Sdavidcs	if (tenable) {
1020250661Sdavidcs		intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
1021250661Sdavidcs		intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
1022250661Sdavidcs
1023250661Sdavidcs		for (i = 0; i < ha->hw.num_sds_rings; i++) {
1024250661Sdavidcs			intrc->sds_ring_mask |= (1 << i);
1025250661Sdavidcs		}
1026250661Sdavidcs		intrc->ms_timeout = 1000;
1027250661Sdavidcs	}
1028250661Sdavidcs
1029250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *)intrc,
1030250661Sdavidcs		(sizeof (q80_config_intr_coalesc_t) >> 2),
1031250661Sdavidcs		ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
1032250661Sdavidcs		device_printf(dev, "%s: failed0\n", __func__);
1033250661Sdavidcs		return (-1);
1034250661Sdavidcs	}
1035250661Sdavidcs	intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
1036250661Sdavidcs
1037250661Sdavidcs	err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
1038250661Sdavidcs
1039250661Sdavidcs	if (err) {
1040250661Sdavidcs		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1041250661Sdavidcs		return (-1);
1042250661Sdavidcs	}
1043250661Sdavidcs
1044250661Sdavidcs	return 0;
1045250661Sdavidcs}
1046250661Sdavidcs
1047250661Sdavidcs
1048250661Sdavidcs/*
1049250661Sdavidcs * Name: qla_config_mac_addr
1050250661Sdavidcs * Function: binds a MAC address to the context/interface.
1051250661Sdavidcs *	Can be unicast, multicast or broadcast.
1052250661Sdavidcs */
1053250661Sdavidcsstatic int
1054250661Sdavidcsqla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
1055250661Sdavidcs{
1056250661Sdavidcs	q80_config_mac_addr_t		*cmac;
1057250661Sdavidcs	q80_config_mac_addr_rsp_t	*cmac_rsp;
1058250661Sdavidcs	uint32_t			err;
1059250661Sdavidcs	device_t			dev = ha->pci_dev;
1060250661Sdavidcs
1061250661Sdavidcs	cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
1062250661Sdavidcs	bzero(cmac, (sizeof (q80_config_mac_addr_t)));
1063250661Sdavidcs
1064250661Sdavidcs	cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
1065250661Sdavidcs	cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
1066250661Sdavidcs	cmac->count_version |= Q8_MBX_CMD_VERSION;
1067250661Sdavidcs
1068250661Sdavidcs	if (add_mac)
1069250661Sdavidcs		cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
1070250661Sdavidcs	else
1071250661Sdavidcs		cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
1072250661Sdavidcs
1073250661Sdavidcs	cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
1074250661Sdavidcs
1075250661Sdavidcs	cmac->nmac_entries = 1;
1076250661Sdavidcs	cmac->cntxt_id = ha->hw.rcv_cntxt_id;
1077250661Sdavidcs	bcopy(mac_addr, cmac->mac_addr[0].addr, 6);
1078250661Sdavidcs
1079250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *)cmac,
1080250661Sdavidcs		(sizeof (q80_config_mac_addr_t) >> 2),
1081250661Sdavidcs		ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
1082250661Sdavidcs		device_printf(dev, "%s: %s failed0\n", __func__,
1083250661Sdavidcs			(add_mac ? "Add" : "Del"));
1084250661Sdavidcs		return (-1);
1085250661Sdavidcs	}
1086250661Sdavidcs	cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
1087250661Sdavidcs
1088250661Sdavidcs	err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
1089250661Sdavidcs
1090250661Sdavidcs	if (err) {
1091250661Sdavidcs		device_printf(dev, "%s: %s "
1092250661Sdavidcs			"%02x:%02x:%02x:%02x:%02x:%02x failed1 [0x%08x]\n",
1093250661Sdavidcs			__func__, (add_mac ? "Add" : "Del"),
1094250661Sdavidcs			mac_addr[0], mac_addr[1], mac_addr[2],
1095250661Sdavidcs			mac_addr[3], mac_addr[4], mac_addr[5], err);
1096250661Sdavidcs		return (-1);
1097250661Sdavidcs	}
1098250661Sdavidcs
1099250661Sdavidcs	return 0;
1100250661Sdavidcs}
1101250661Sdavidcs
1102250661Sdavidcs
1103250661Sdavidcs/*
1104250661Sdavidcs * Name: qla_set_mac_rcv_mode
1105298955Spfg * Function: Enable/Disable AllMulticast and Promiscuous Modes.
1106250661Sdavidcs */
1107250661Sdavidcsstatic int
1108250661Sdavidcsqla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
1109250661Sdavidcs{
1110250661Sdavidcs	q80_config_mac_rcv_mode_t	*rcv_mode;
1111250661Sdavidcs	uint32_t			err;
1112250661Sdavidcs	q80_config_mac_rcv_mode_rsp_t	*rcv_mode_rsp;
1113250661Sdavidcs	device_t			dev = ha->pci_dev;
1114250661Sdavidcs
1115250661Sdavidcs	rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
1116250661Sdavidcs	bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
1117250661Sdavidcs
1118250661Sdavidcs	rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
1119250661Sdavidcs	rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
1120250661Sdavidcs	rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
1121250661Sdavidcs
1122250661Sdavidcs	rcv_mode->mode = mode;
1123250661Sdavidcs
1124250661Sdavidcs	rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
1125250661Sdavidcs
1126250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
1127250661Sdavidcs		(sizeof (q80_config_mac_rcv_mode_t) >> 2),
1128250661Sdavidcs		ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
1129250661Sdavidcs		device_printf(dev, "%s: failed0\n", __func__);
1130250661Sdavidcs		return (-1);
1131250661Sdavidcs	}
1132250661Sdavidcs	rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
1133250661Sdavidcs
1134250661Sdavidcs	err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
1135250661Sdavidcs
1136250661Sdavidcs	if (err) {
1137250661Sdavidcs		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1138250661Sdavidcs		return (-1);
1139250661Sdavidcs	}
1140250661Sdavidcs
1141250661Sdavidcs	return 0;
1142250661Sdavidcs}
1143250661Sdavidcs
1144250661Sdavidcsint
1145250661Sdavidcsql_set_promisc(qla_host_t *ha)
1146250661Sdavidcs{
1147250661Sdavidcs	int ret;
1148250661Sdavidcs
1149250661Sdavidcs	ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1150250661Sdavidcs	ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1151250661Sdavidcs	return (ret);
1152250661Sdavidcs}
1153250661Sdavidcs
1154284741Sdavidcsvoid
1155284741Sdavidcsqla_reset_promisc(qla_host_t *ha)
1156284741Sdavidcs{
1157284741Sdavidcs	ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1158284741Sdavidcs	(void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1159284741Sdavidcs}
1160284741Sdavidcs
1161250661Sdavidcsint
1162250661Sdavidcsql_set_allmulti(qla_host_t *ha)
1163250661Sdavidcs{
1164250661Sdavidcs	int ret;
1165250661Sdavidcs
1166250661Sdavidcs	ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
1167250661Sdavidcs	ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1168250661Sdavidcs	return (ret);
1169250661Sdavidcs}
1170250661Sdavidcs
1171284741Sdavidcsvoid
1172284741Sdavidcsqla_reset_allmulti(qla_host_t *ha)
1173284741Sdavidcs{
1174284741Sdavidcs	ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE;
1175284741Sdavidcs	(void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1176284741Sdavidcs}
1177250661Sdavidcs
1178250661Sdavidcs/*
1179250661Sdavidcs * Name: ql_set_max_mtu
1180250661Sdavidcs * Function:
1181250661Sdavidcs *	Sets the maximum transfer unit size for the specified rcv context.
1182250661Sdavidcs */
1183250661Sdavidcsint
1184250661Sdavidcsql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
1185250661Sdavidcs{
1186250661Sdavidcs	device_t		dev;
1187250661Sdavidcs	q80_set_max_mtu_t	*max_mtu;
1188250661Sdavidcs	q80_set_max_mtu_rsp_t	*max_mtu_rsp;
1189250661Sdavidcs	uint32_t		err;
1190250661Sdavidcs
1191250661Sdavidcs	dev = ha->pci_dev;
1192250661Sdavidcs
1193250661Sdavidcs	max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
1194250661Sdavidcs	bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
1195250661Sdavidcs
1196250661Sdavidcs	max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
1197250661Sdavidcs	max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
1198250661Sdavidcs	max_mtu->count_version |= Q8_MBX_CMD_VERSION;
1199250661Sdavidcs
1200250661Sdavidcs	max_mtu->cntxt_id = cntxt_id;
1201250661Sdavidcs	max_mtu->mtu = mtu;
1202250661Sdavidcs
1203250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1204250661Sdavidcs		(sizeof (q80_set_max_mtu_t) >> 2),
1205250661Sdavidcs                ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1206250661Sdavidcs                device_printf(dev, "%s: failed\n", __func__);
1207250661Sdavidcs                return -1;
1208250661Sdavidcs        }
1209250661Sdavidcs
1210250661Sdavidcs	max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1211250661Sdavidcs
1212250661Sdavidcs        err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1213250661Sdavidcs
1214250661Sdavidcs        if (err) {
1215250661Sdavidcs                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1216250661Sdavidcs        }
1217250661Sdavidcs
1218250661Sdavidcs	return 0;
1219250661Sdavidcs}
1220250661Sdavidcs
1221250661Sdavidcsstatic int
1222250661Sdavidcsqla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1223250661Sdavidcs{
1224250661Sdavidcs	device_t		dev;
1225250661Sdavidcs	q80_link_event_t	*lnk;
1226250661Sdavidcs	q80_link_event_rsp_t	*lnk_rsp;
1227250661Sdavidcs	uint32_t		err;
1228250661Sdavidcs
1229250661Sdavidcs	dev = ha->pci_dev;
1230250661Sdavidcs
1231250661Sdavidcs	lnk = (q80_link_event_t *)ha->hw.mbox;
1232250661Sdavidcs	bzero(lnk, (sizeof (q80_link_event_t)));
1233250661Sdavidcs
1234250661Sdavidcs	lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1235250661Sdavidcs	lnk->count_version = (sizeof (q80_link_event_t) >> 2);
1236250661Sdavidcs	lnk->count_version |= Q8_MBX_CMD_VERSION;
1237250661Sdavidcs
1238250661Sdavidcs	lnk->cntxt_id = cntxt_id;
1239250661Sdavidcs	lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
1240250661Sdavidcs
1241250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
1242250661Sdavidcs                ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
1243250661Sdavidcs                device_printf(dev, "%s: failed\n", __func__);
1244250661Sdavidcs                return -1;
1245250661Sdavidcs        }
1246250661Sdavidcs
1247250661Sdavidcs	lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
1248250661Sdavidcs
1249250661Sdavidcs        err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
1250250661Sdavidcs
1251250661Sdavidcs        if (err) {
1252250661Sdavidcs                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1253250661Sdavidcs        }
1254250661Sdavidcs
1255250661Sdavidcs	return 0;
1256250661Sdavidcs}
1257250661Sdavidcs
1258250661Sdavidcsstatic int
1259250661Sdavidcsqla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
1260250661Sdavidcs{
1261250661Sdavidcs	device_t		dev;
1262250661Sdavidcs	q80_config_fw_lro_t	*fw_lro;
1263250661Sdavidcs	q80_config_fw_lro_rsp_t	*fw_lro_rsp;
1264250661Sdavidcs	uint32_t		err;
1265250661Sdavidcs
1266250661Sdavidcs	dev = ha->pci_dev;
1267250661Sdavidcs
1268250661Sdavidcs	fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
1269250661Sdavidcs	bzero(fw_lro, sizeof(q80_config_fw_lro_t));
1270250661Sdavidcs
1271250661Sdavidcs	fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
1272250661Sdavidcs	fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
1273250661Sdavidcs	fw_lro->count_version |= Q8_MBX_CMD_VERSION;
1274250661Sdavidcs
1275250661Sdavidcs	fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
1276284741Sdavidcs	fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK;
1277250661Sdavidcs
1278250661Sdavidcs	fw_lro->cntxt_id = cntxt_id;
1279250661Sdavidcs
1280250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
1281250661Sdavidcs		(sizeof (q80_config_fw_lro_t) >> 2),
1282250661Sdavidcs		ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
1283250661Sdavidcs		device_printf(dev, "%s: failed\n", __func__);
1284250661Sdavidcs		return -1;
1285250661Sdavidcs	}
1286250661Sdavidcs
1287250661Sdavidcs	fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
1288250661Sdavidcs
1289250661Sdavidcs	err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
1290250661Sdavidcs
1291250661Sdavidcs	if (err) {
1292250661Sdavidcs		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1293250661Sdavidcs	}
1294250661Sdavidcs
1295250661Sdavidcs	return 0;
1296250661Sdavidcs}
1297250661Sdavidcs
1298250661Sdavidcsstatic void
1299284741Sdavidcsqla_xmt_stats(qla_host_t *ha, q80_xmt_stats_t *xstat, int i)
1300250661Sdavidcs{
1301250661Sdavidcs	device_t dev = ha->pci_dev;
1302250661Sdavidcs
1303284741Sdavidcs	if (i < ha->hw.num_tx_rings) {
1304284741Sdavidcs		device_printf(dev, "%s[%d]: total_bytes\t\t%" PRIu64 "\n",
1305284741Sdavidcs			__func__, i, xstat->total_bytes);
1306284741Sdavidcs		device_printf(dev, "%s[%d]: total_pkts\t\t%" PRIu64 "\n",
1307284741Sdavidcs			__func__, i, xstat->total_pkts);
1308284741Sdavidcs		device_printf(dev, "%s[%d]: errors\t\t%" PRIu64 "\n",
1309284741Sdavidcs			__func__, i, xstat->errors);
1310284741Sdavidcs		device_printf(dev, "%s[%d]: pkts_dropped\t%" PRIu64 "\n",
1311284741Sdavidcs			__func__, i, xstat->pkts_dropped);
1312284741Sdavidcs		device_printf(dev, "%s[%d]: switch_pkts\t\t%" PRIu64 "\n",
1313284741Sdavidcs			__func__, i, xstat->switch_pkts);
1314284741Sdavidcs		device_printf(dev, "%s[%d]: num_buffers\t\t%" PRIu64 "\n",
1315284741Sdavidcs			__func__, i, xstat->num_buffers);
1316284741Sdavidcs	} else {
1317284741Sdavidcs		device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n",
1318284741Sdavidcs			__func__, xstat->total_bytes);
1319284741Sdavidcs		device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n",
1320284741Sdavidcs			__func__, xstat->total_pkts);
1321284741Sdavidcs		device_printf(dev, "%s: errors\t\t\t%" PRIu64 "\n",
1322284741Sdavidcs			__func__, xstat->errors);
1323284741Sdavidcs		device_printf(dev, "%s: pkts_dropped\t\t\t%" PRIu64 "\n",
1324284741Sdavidcs			__func__, xstat->pkts_dropped);
1325284741Sdavidcs		device_printf(dev, "%s: switch_pkts\t\t\t%" PRIu64 "\n",
1326284741Sdavidcs			__func__, xstat->switch_pkts);
1327284741Sdavidcs		device_printf(dev, "%s: num_buffers\t\t\t%" PRIu64 "\n",
1328284741Sdavidcs			__func__, xstat->num_buffers);
1329284741Sdavidcs	}
1330250661Sdavidcs}
1331250661Sdavidcs
1332250661Sdavidcsstatic void
1333250661Sdavidcsqla_rcv_stats(qla_host_t *ha, q80_rcv_stats_t *rstat)
1334250661Sdavidcs{
1335250661Sdavidcs	device_t dev = ha->pci_dev;
1336250661Sdavidcs
1337250661Sdavidcs	device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n", __func__,
1338250661Sdavidcs		rstat->total_bytes);
1339250661Sdavidcs	device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n", __func__,
1340250661Sdavidcs		rstat->total_pkts);
1341250661Sdavidcs	device_printf(dev, "%s: lro_pkt_count\t\t%" PRIu64 "\n", __func__,
1342250661Sdavidcs		rstat->lro_pkt_count);
1343284741Sdavidcs	device_printf(dev, "%s: sw_pkt_count\t\t\t%" PRIu64 "\n", __func__,
1344250661Sdavidcs		rstat->sw_pkt_count);
1345250661Sdavidcs	device_printf(dev, "%s: ip_chksum_err\t\t%" PRIu64 "\n", __func__,
1346250661Sdavidcs		rstat->ip_chksum_err);
1347250661Sdavidcs	device_printf(dev, "%s: pkts_wo_acntxts\t\t%" PRIu64 "\n", __func__,
1348250661Sdavidcs		rstat->pkts_wo_acntxts);
1349250661Sdavidcs	device_printf(dev, "%s: pkts_dropped_no_sds_card\t%" PRIu64 "\n",
1350250661Sdavidcs		__func__, rstat->pkts_dropped_no_sds_card);
1351250661Sdavidcs	device_printf(dev, "%s: pkts_dropped_no_sds_host\t%" PRIu64 "\n",
1352250661Sdavidcs		__func__, rstat->pkts_dropped_no_sds_host);
1353250661Sdavidcs	device_printf(dev, "%s: oversized_pkts\t\t%" PRIu64 "\n", __func__,
1354250661Sdavidcs		rstat->oversized_pkts);
1355250661Sdavidcs	device_printf(dev, "%s: pkts_dropped_no_rds\t\t%" PRIu64 "\n",
1356250661Sdavidcs		__func__, rstat->pkts_dropped_no_rds);
1357250661Sdavidcs	device_printf(dev, "%s: unxpctd_mcast_pkts\t\t%" PRIu64 "\n",
1358250661Sdavidcs		__func__, rstat->unxpctd_mcast_pkts);
1359250661Sdavidcs	device_printf(dev, "%s: re1_fbq_error\t\t%" PRIu64 "\n", __func__,
1360250661Sdavidcs		rstat->re1_fbq_error);
1361250661Sdavidcs	device_printf(dev, "%s: invalid_mac_addr\t\t%" PRIu64 "\n", __func__,
1362250661Sdavidcs		rstat->invalid_mac_addr);
1363250661Sdavidcs	device_printf(dev, "%s: rds_prime_trys\t\t%" PRIu64 "\n", __func__,
1364250661Sdavidcs		rstat->rds_prime_trys);
1365250661Sdavidcs	device_printf(dev, "%s: rds_prime_success\t\t%" PRIu64 "\n", __func__,
1366250661Sdavidcs		rstat->rds_prime_success);
1367250661Sdavidcs	device_printf(dev, "%s: lro_flows_added\t\t%" PRIu64 "\n", __func__,
1368250661Sdavidcs		rstat->lro_flows_added);
1369250661Sdavidcs	device_printf(dev, "%s: lro_flows_deleted\t\t%" PRIu64 "\n", __func__,
1370250661Sdavidcs		rstat->lro_flows_deleted);
1371250661Sdavidcs	device_printf(dev, "%s: lro_flows_active\t\t%" PRIu64 "\n", __func__,
1372250661Sdavidcs		rstat->lro_flows_active);
1373250661Sdavidcs	device_printf(dev, "%s: pkts_droped_unknown\t\t%" PRIu64 "\n",
1374250661Sdavidcs		__func__, rstat->pkts_droped_unknown);
1375250661Sdavidcs}
1376250661Sdavidcs
1377250661Sdavidcsstatic void
1378250661Sdavidcsqla_mac_stats(qla_host_t *ha, q80_mac_stats_t *mstat)
1379250661Sdavidcs{
1380250661Sdavidcs	device_t dev = ha->pci_dev;
1381250661Sdavidcs
1382250661Sdavidcs	device_printf(dev, "%s: xmt_frames\t\t\t%" PRIu64 "\n", __func__,
1383250661Sdavidcs		mstat->xmt_frames);
1384250661Sdavidcs	device_printf(dev, "%s: xmt_bytes\t\t\t%" PRIu64 "\n", __func__,
1385250661Sdavidcs		mstat->xmt_bytes);
1386250661Sdavidcs	device_printf(dev, "%s: xmt_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1387250661Sdavidcs		mstat->xmt_mcast_pkts);
1388250661Sdavidcs	device_printf(dev, "%s: xmt_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1389250661Sdavidcs		mstat->xmt_bcast_pkts);
1390250661Sdavidcs	device_printf(dev, "%s: xmt_pause_frames\t\t%" PRIu64 "\n", __func__,
1391250661Sdavidcs		mstat->xmt_pause_frames);
1392250661Sdavidcs	device_printf(dev, "%s: xmt_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1393250661Sdavidcs		mstat->xmt_cntrl_pkts);
1394250661Sdavidcs	device_printf(dev, "%s: xmt_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1395250661Sdavidcs		__func__, mstat->xmt_pkt_lt_64bytes);
1396250661Sdavidcs	device_printf(dev, "%s: xmt_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1397250661Sdavidcs		__func__, mstat->xmt_pkt_lt_127bytes);
1398250661Sdavidcs	device_printf(dev, "%s: xmt_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1399250661Sdavidcs		__func__, mstat->xmt_pkt_lt_255bytes);
1400250661Sdavidcs	device_printf(dev, "%s: xmt_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1401250661Sdavidcs		__func__, mstat->xmt_pkt_lt_511bytes);
1402284741Sdavidcs	device_printf(dev, "%s: xmt_pkt_lt_1023bytes\t\t%" PRIu64 "\n",
1403250661Sdavidcs		__func__, mstat->xmt_pkt_lt_1023bytes);
1404284741Sdavidcs	device_printf(dev, "%s: xmt_pkt_lt_1518bytes\t\t%" PRIu64 "\n",
1405250661Sdavidcs		__func__, mstat->xmt_pkt_lt_1518bytes);
1406284741Sdavidcs	device_printf(dev, "%s: xmt_pkt_gt_1518bytes\t\t%" PRIu64 "\n",
1407250661Sdavidcs		__func__, mstat->xmt_pkt_gt_1518bytes);
1408250661Sdavidcs
1409250661Sdavidcs	device_printf(dev, "%s: rcv_frames\t\t\t%" PRIu64 "\n", __func__,
1410250661Sdavidcs		mstat->rcv_frames);
1411250661Sdavidcs	device_printf(dev, "%s: rcv_bytes\t\t\t%" PRIu64 "\n", __func__,
1412250661Sdavidcs		mstat->rcv_bytes);
1413250661Sdavidcs	device_printf(dev, "%s: rcv_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1414250661Sdavidcs		mstat->rcv_mcast_pkts);
1415250661Sdavidcs	device_printf(dev, "%s: rcv_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1416250661Sdavidcs		mstat->rcv_bcast_pkts);
1417250661Sdavidcs	device_printf(dev, "%s: rcv_pause_frames\t\t%" PRIu64 "\n", __func__,
1418250661Sdavidcs		mstat->rcv_pause_frames);
1419250661Sdavidcs	device_printf(dev, "%s: rcv_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1420250661Sdavidcs		mstat->rcv_cntrl_pkts);
1421250661Sdavidcs	device_printf(dev, "%s: rcv_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1422250661Sdavidcs		__func__, mstat->rcv_pkt_lt_64bytes);
1423250661Sdavidcs	device_printf(dev, "%s: rcv_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1424250661Sdavidcs		__func__, mstat->rcv_pkt_lt_127bytes);
1425250661Sdavidcs	device_printf(dev, "%s: rcv_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1426250661Sdavidcs		__func__, mstat->rcv_pkt_lt_255bytes);
1427250661Sdavidcs	device_printf(dev, "%s: rcv_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1428250661Sdavidcs		__func__, mstat->rcv_pkt_lt_511bytes);
1429284741Sdavidcs	device_printf(dev, "%s: rcv_pkt_lt_1023bytes\t\t%" PRIu64 "\n",
1430250661Sdavidcs		__func__, mstat->rcv_pkt_lt_1023bytes);
1431284741Sdavidcs	device_printf(dev, "%s: rcv_pkt_lt_1518bytes\t\t%" PRIu64 "\n",
1432250661Sdavidcs		__func__, mstat->rcv_pkt_lt_1518bytes);
1433284741Sdavidcs	device_printf(dev, "%s: rcv_pkt_gt_1518bytes\t\t%" PRIu64 "\n",
1434250661Sdavidcs		__func__, mstat->rcv_pkt_gt_1518bytes);
1435250661Sdavidcs
1436250661Sdavidcs	device_printf(dev, "%s: rcv_len_error\t\t%" PRIu64 "\n", __func__,
1437250661Sdavidcs		mstat->rcv_len_error);
1438250661Sdavidcs	device_printf(dev, "%s: rcv_len_small\t\t%" PRIu64 "\n", __func__,
1439250661Sdavidcs		mstat->rcv_len_small);
1440250661Sdavidcs	device_printf(dev, "%s: rcv_len_large\t\t%" PRIu64 "\n", __func__,
1441250661Sdavidcs		mstat->rcv_len_large);
1442250661Sdavidcs	device_printf(dev, "%s: rcv_jabber\t\t\t%" PRIu64 "\n", __func__,
1443250661Sdavidcs		mstat->rcv_jabber);
1444250661Sdavidcs	device_printf(dev, "%s: rcv_dropped\t\t\t%" PRIu64 "\n", __func__,
1445250661Sdavidcs		mstat->rcv_dropped);
1446250661Sdavidcs	device_printf(dev, "%s: fcs_error\t\t\t%" PRIu64 "\n", __func__,
1447250661Sdavidcs		mstat->fcs_error);
1448250661Sdavidcs	device_printf(dev, "%s: align_error\t\t\t%" PRIu64 "\n", __func__,
1449250661Sdavidcs		mstat->align_error);
1450250661Sdavidcs}
1451250661Sdavidcs
1452250661Sdavidcs
1453250661Sdavidcsstatic int
1454284741Sdavidcsqla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size)
1455250661Sdavidcs{
1456250661Sdavidcs	device_t		dev;
1457250661Sdavidcs	q80_get_stats_t		*stat;
1458250661Sdavidcs	q80_get_stats_rsp_t	*stat_rsp;
1459250661Sdavidcs	uint32_t		err;
1460250661Sdavidcs
1461250661Sdavidcs	dev = ha->pci_dev;
1462250661Sdavidcs
1463250661Sdavidcs	stat = (q80_get_stats_t *)ha->hw.mbox;
1464250661Sdavidcs	bzero(stat, (sizeof (q80_get_stats_t)));
1465250661Sdavidcs
1466250661Sdavidcs	stat->opcode = Q8_MBX_GET_STATS;
1467250661Sdavidcs	stat->count_version = 2;
1468250661Sdavidcs	stat->count_version |= Q8_MBX_CMD_VERSION;
1469250661Sdavidcs
1470250661Sdavidcs	stat->cmd = cmd;
1471250661Sdavidcs
1472250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
1473284741Sdavidcs                ha->hw.mbox, (rsp_size >> 2), 0)) {
1474250661Sdavidcs                device_printf(dev, "%s: failed\n", __func__);
1475250661Sdavidcs                return -1;
1476250661Sdavidcs        }
1477250661Sdavidcs
1478250661Sdavidcs	stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1479250661Sdavidcs
1480250661Sdavidcs        err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
1481250661Sdavidcs
1482250661Sdavidcs        if (err) {
1483250661Sdavidcs                return -1;
1484250661Sdavidcs        }
1485250661Sdavidcs
1486250661Sdavidcs	return 0;
1487250661Sdavidcs}
1488250661Sdavidcs
1489250661Sdavidcsvoid
1490250661Sdavidcsql_get_stats(qla_host_t *ha)
1491250661Sdavidcs{
1492250661Sdavidcs	q80_get_stats_rsp_t	*stat_rsp;
1493250661Sdavidcs	q80_mac_stats_t		*mstat;
1494250661Sdavidcs	q80_xmt_stats_t		*xstat;
1495250661Sdavidcs	q80_rcv_stats_t		*rstat;
1496250661Sdavidcs	uint32_t		cmd;
1497284741Sdavidcs	int			i;
1498250661Sdavidcs
1499250661Sdavidcs	stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1500250661Sdavidcs	/*
1501250661Sdavidcs	 * Get MAC Statistics
1502250661Sdavidcs	 */
1503250661Sdavidcs	cmd = Q8_GET_STATS_CMD_TYPE_MAC;
1504284741Sdavidcs//	cmd |= Q8_GET_STATS_CMD_CLEAR;
1505250661Sdavidcs
1506250661Sdavidcs	cmd |= ((ha->pci_func & 0x1) << 16);
1507250661Sdavidcs
1508284741Sdavidcs	if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
1509250661Sdavidcs		mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
1510250661Sdavidcs		qla_mac_stats(ha, mstat);
1511250661Sdavidcs	} else {
1512250661Sdavidcs                device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
1513250661Sdavidcs			__func__, ha->hw.mbox[0]);
1514250661Sdavidcs	}
1515250661Sdavidcs	/*
1516250661Sdavidcs	 * Get RCV Statistics
1517250661Sdavidcs	 */
1518250661Sdavidcs	cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
1519284741Sdavidcs//	cmd |= Q8_GET_STATS_CMD_CLEAR;
1520250661Sdavidcs	cmd |= (ha->hw.rcv_cntxt_id << 16);
1521250661Sdavidcs
1522284741Sdavidcs	if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
1523250661Sdavidcs		rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
1524250661Sdavidcs		qla_rcv_stats(ha, rstat);
1525250661Sdavidcs	} else {
1526250661Sdavidcs                device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
1527250661Sdavidcs			__func__, ha->hw.mbox[0]);
1528250661Sdavidcs	}
1529250661Sdavidcs	/*
1530250661Sdavidcs	 * Get XMT Statistics
1531250661Sdavidcs	 */
1532284741Sdavidcs	for (i = 0 ; i < ha->hw.num_tx_rings; i++) {
1533284741Sdavidcs		cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
1534284741Sdavidcs//		cmd |= Q8_GET_STATS_CMD_CLEAR;
1535284741Sdavidcs		cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16);
1536250661Sdavidcs
1537284741Sdavidcs		if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t))
1538284741Sdavidcs			== 0) {
1539284741Sdavidcs			xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
1540284741Sdavidcs			qla_xmt_stats(ha, xstat, i);
1541284741Sdavidcs		} else {
1542284741Sdavidcs			device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
1543284741Sdavidcs				__func__, ha->hw.mbox[0]);
1544284741Sdavidcs		}
1545284741Sdavidcs	}
1546284741Sdavidcs	return;
1547284741Sdavidcs}
1548250661Sdavidcs
1549284741Sdavidcsstatic void
1550284741Sdavidcsqla_get_quick_stats(qla_host_t *ha)
1551284741Sdavidcs{
1552284741Sdavidcs	q80_get_mac_rcv_xmt_stats_rsp_t *stat_rsp;
1553284741Sdavidcs	q80_mac_stats_t         *mstat;
1554284741Sdavidcs	q80_xmt_stats_t         *xstat;
1555284741Sdavidcs	q80_rcv_stats_t         *rstat;
1556284741Sdavidcs	uint32_t                cmd;
1557284741Sdavidcs
1558284741Sdavidcs	stat_rsp = (q80_get_mac_rcv_xmt_stats_rsp_t *)ha->hw.mbox;
1559284741Sdavidcs
1560284741Sdavidcs	cmd = Q8_GET_STATS_CMD_TYPE_ALL;
1561284741Sdavidcs//      cmd |= Q8_GET_STATS_CMD_CLEAR;
1562284741Sdavidcs
1563284741Sdavidcs//      cmd |= ((ha->pci_func & 0x3) << 16);
1564284741Sdavidcs	cmd |= (0xFFFF << 16);
1565284741Sdavidcs
1566284741Sdavidcs	if (qla_get_hw_stats(ha, cmd,
1567284741Sdavidcs			sizeof (q80_get_mac_rcv_xmt_stats_rsp_t)) == 0) {
1568284741Sdavidcs
1569284741Sdavidcs		mstat = (q80_mac_stats_t *)&stat_rsp->mac;
1570284741Sdavidcs		rstat = (q80_rcv_stats_t *)&stat_rsp->rcv;
1571284741Sdavidcs		xstat = (q80_xmt_stats_t *)&stat_rsp->xmt;
1572284741Sdavidcs		qla_mac_stats(ha, mstat);
1573284741Sdavidcs		qla_rcv_stats(ha, rstat);
1574284741Sdavidcs		qla_xmt_stats(ha, xstat, ha->hw.num_tx_rings);
1575250661Sdavidcs	} else {
1576284741Sdavidcs		device_printf(ha->pci_dev, "%s: failed [0x%08x]\n",
1577250661Sdavidcs			__func__, ha->hw.mbox[0]);
1578250661Sdavidcs	}
1579284741Sdavidcs	return;
1580250661Sdavidcs}
1581250661Sdavidcs
1582250661Sdavidcs/*
1583250661Sdavidcs * Name: qla_tx_tso
1584250661Sdavidcs * Function: Checks if the packet to be transmitted is a candidate for
1585250661Sdavidcs *	Large TCP Segment Offload. If yes, the appropriate fields in the Tx
1586250661Sdavidcs *	Ring Structure are plugged in.
1587250661Sdavidcs */
1588250661Sdavidcsstatic int
1589250661Sdavidcsqla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
1590250661Sdavidcs{
1591250661Sdavidcs	struct ether_vlan_header *eh;
1592250661Sdavidcs	struct ip *ip = NULL;
1593250661Sdavidcs	struct ip6_hdr *ip6 = NULL;
1594250661Sdavidcs	struct tcphdr *th = NULL;
1595250661Sdavidcs	uint32_t ehdrlen,  hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
1596250661Sdavidcs	uint16_t etype, opcode, offload = 1;
1597250661Sdavidcs	device_t dev;
1598250661Sdavidcs
1599250661Sdavidcs	dev = ha->pci_dev;
1600250661Sdavidcs
1601250661Sdavidcs
1602250661Sdavidcs	eh = mtod(mp, struct ether_vlan_header *);
1603250661Sdavidcs
1604250661Sdavidcs	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1605250661Sdavidcs		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1606250661Sdavidcs		etype = ntohs(eh->evl_proto);
1607250661Sdavidcs	} else {
1608250661Sdavidcs		ehdrlen = ETHER_HDR_LEN;
1609250661Sdavidcs		etype = ntohs(eh->evl_encap_proto);
1610250661Sdavidcs	}
1611250661Sdavidcs
1612250661Sdavidcs	hdrlen = 0;
1613250661Sdavidcs
1614250661Sdavidcs	switch (etype) {
1615250661Sdavidcs		case ETHERTYPE_IP:
1616250661Sdavidcs
1617250661Sdavidcs			tcp_opt_off = ehdrlen + sizeof(struct ip) +
1618250661Sdavidcs					sizeof(struct tcphdr);
1619250661Sdavidcs
1620250661Sdavidcs			if (mp->m_len < tcp_opt_off) {
1621250661Sdavidcs				m_copydata(mp, 0, tcp_opt_off, hdr);
1622250661Sdavidcs				ip = (struct ip *)(hdr + ehdrlen);
1623250661Sdavidcs			} else {
1624250661Sdavidcs				ip = (struct ip *)(mp->m_data + ehdrlen);
1625250661Sdavidcs			}
1626250661Sdavidcs
1627250661Sdavidcs			ip_hlen = ip->ip_hl << 2;
1628250661Sdavidcs			opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
1629250661Sdavidcs
1630250661Sdavidcs
1631250661Sdavidcs			if ((ip->ip_p != IPPROTO_TCP) ||
1632250661Sdavidcs				(ip_hlen != sizeof (struct ip))){
1633250661Sdavidcs				/* IP Options are not supported */
1634250661Sdavidcs
1635250661Sdavidcs				offload = 0;
1636250661Sdavidcs			} else
1637250661Sdavidcs				th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
1638250661Sdavidcs
1639250661Sdavidcs		break;
1640250661Sdavidcs
1641250661Sdavidcs		case ETHERTYPE_IPV6:
1642250661Sdavidcs
1643250661Sdavidcs			tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
1644250661Sdavidcs					sizeof (struct tcphdr);
1645250661Sdavidcs
1646250661Sdavidcs			if (mp->m_len < tcp_opt_off) {
1647250661Sdavidcs				m_copydata(mp, 0, tcp_opt_off, hdr);
1648250661Sdavidcs				ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
1649250661Sdavidcs			} else {
1650250661Sdavidcs				ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1651250661Sdavidcs			}
1652250661Sdavidcs
1653250661Sdavidcs			ip_hlen = sizeof(struct ip6_hdr);
1654250661Sdavidcs			opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
1655250661Sdavidcs
1656250661Sdavidcs			if (ip6->ip6_nxt != IPPROTO_TCP) {
1657250661Sdavidcs				//device_printf(dev, "%s: ipv6\n", __func__);
1658250661Sdavidcs				offload = 0;
1659250661Sdavidcs			} else
1660250661Sdavidcs				th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
1661250661Sdavidcs		break;
1662250661Sdavidcs
1663250661Sdavidcs		default:
1664250661Sdavidcs			QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
1665250661Sdavidcs			offload = 0;
1666250661Sdavidcs		break;
1667250661Sdavidcs	}
1668250661Sdavidcs
1669250661Sdavidcs	if (!offload)
1670250661Sdavidcs		return (-1);
1671250661Sdavidcs
1672250661Sdavidcs	tcp_hlen = th->th_off << 2;
1673250661Sdavidcs	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
1674250661Sdavidcs
1675250661Sdavidcs        if (mp->m_len < hdrlen) {
1676250661Sdavidcs                if (mp->m_len < tcp_opt_off) {
1677250661Sdavidcs                        if (tcp_hlen > sizeof(struct tcphdr)) {
1678250661Sdavidcs                                m_copydata(mp, tcp_opt_off,
1679250661Sdavidcs                                        (tcp_hlen - sizeof(struct tcphdr)),
1680250661Sdavidcs                                        &hdr[tcp_opt_off]);
1681250661Sdavidcs                        }
1682250661Sdavidcs                } else {
1683250661Sdavidcs                        m_copydata(mp, 0, hdrlen, hdr);
1684250661Sdavidcs                }
1685250661Sdavidcs        }
1686250661Sdavidcs
1687250661Sdavidcs	tx_cmd->mss = mp->m_pkthdr.tso_segsz;
1688250661Sdavidcs
1689250661Sdavidcs	tx_cmd->flags_opcode = opcode ;
1690250661Sdavidcs	tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
1691250661Sdavidcs	tx_cmd->total_hdr_len = hdrlen;
1692250661Sdavidcs
1693250661Sdavidcs	/* Check for Multicast least significant bit of MSB == 1 */
1694250661Sdavidcs	if (eh->evl_dhost[0] & 0x01) {
1695250661Sdavidcs		tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
1696250661Sdavidcs	}
1697250661Sdavidcs
1698250661Sdavidcs	if (mp->m_len < hdrlen) {
1699250661Sdavidcs		printf("%d\n", hdrlen);
1700250661Sdavidcs		return (1);
1701250661Sdavidcs	}
1702250661Sdavidcs
1703250661Sdavidcs	return (0);
1704250661Sdavidcs}
1705250661Sdavidcs
1706250661Sdavidcs/*
1707250661Sdavidcs * Name: qla_tx_chksum
1708250661Sdavidcs * Function: Checks if the packet to be transmitted is a candidate for
1709250661Sdavidcs *	TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
1710250661Sdavidcs *	Ring Structure are plugged in.
1711250661Sdavidcs */
1712250661Sdavidcsstatic int
1713250661Sdavidcsqla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
1714250661Sdavidcs	uint32_t *tcp_hdr_off)
1715250661Sdavidcs{
1716250661Sdavidcs	struct ether_vlan_header *eh;
1717250661Sdavidcs	struct ip *ip;
1718250661Sdavidcs	struct ip6_hdr *ip6;
1719250661Sdavidcs	uint32_t ehdrlen, ip_hlen;
1720250661Sdavidcs	uint16_t etype, opcode, offload = 1;
1721250661Sdavidcs	device_t dev;
1722250661Sdavidcs	uint8_t buf[sizeof(struct ip6_hdr)];
1723250661Sdavidcs
1724250661Sdavidcs	dev = ha->pci_dev;
1725250661Sdavidcs
1726250661Sdavidcs	*op_code = 0;
1727250661Sdavidcs
1728250661Sdavidcs	if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0)
1729250661Sdavidcs		return (-1);
1730250661Sdavidcs
1731250661Sdavidcs	eh = mtod(mp, struct ether_vlan_header *);
1732250661Sdavidcs
1733250661Sdavidcs	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1734250661Sdavidcs		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1735250661Sdavidcs		etype = ntohs(eh->evl_proto);
1736250661Sdavidcs	} else {
1737250661Sdavidcs		ehdrlen = ETHER_HDR_LEN;
1738250661Sdavidcs		etype = ntohs(eh->evl_encap_proto);
1739250661Sdavidcs	}
1740250661Sdavidcs
1741250661Sdavidcs
1742250661Sdavidcs	switch (etype) {
1743250661Sdavidcs		case ETHERTYPE_IP:
1744250661Sdavidcs			ip = (struct ip *)(mp->m_data + ehdrlen);
1745250661Sdavidcs
1746250661Sdavidcs			ip_hlen = sizeof (struct ip);
1747250661Sdavidcs
1748250661Sdavidcs			if (mp->m_len < (ehdrlen + ip_hlen)) {
1749250661Sdavidcs				m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
1750250661Sdavidcs				ip = (struct ip *)buf;
1751250661Sdavidcs			}
1752250661Sdavidcs
1753250661Sdavidcs			if (ip->ip_p == IPPROTO_TCP)
1754250661Sdavidcs				opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
1755250661Sdavidcs			else if (ip->ip_p == IPPROTO_UDP)
1756250661Sdavidcs				opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
1757250661Sdavidcs			else {
1758250661Sdavidcs				//device_printf(dev, "%s: ipv4\n", __func__);
1759250661Sdavidcs				offload = 0;
1760250661Sdavidcs			}
1761250661Sdavidcs		break;
1762250661Sdavidcs
1763250661Sdavidcs		case ETHERTYPE_IPV6:
1764250661Sdavidcs			ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1765250661Sdavidcs
1766250661Sdavidcs			ip_hlen = sizeof(struct ip6_hdr);
1767250661Sdavidcs
1768250661Sdavidcs			if (mp->m_len < (ehdrlen + ip_hlen)) {
1769250661Sdavidcs				m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
1770250661Sdavidcs					buf);
1771250661Sdavidcs				ip6 = (struct ip6_hdr *)buf;
1772250661Sdavidcs			}
1773250661Sdavidcs
1774250661Sdavidcs			if (ip6->ip6_nxt == IPPROTO_TCP)
1775250661Sdavidcs				opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
1776250661Sdavidcs			else if (ip6->ip6_nxt == IPPROTO_UDP)
1777250661Sdavidcs				opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
1778250661Sdavidcs			else {
1779250661Sdavidcs				//device_printf(dev, "%s: ipv6\n", __func__);
1780250661Sdavidcs				offload = 0;
1781250661Sdavidcs			}
1782250661Sdavidcs		break;
1783250661Sdavidcs
1784250661Sdavidcs		default:
1785250661Sdavidcs			offload = 0;
1786250661Sdavidcs		break;
1787250661Sdavidcs	}
1788250661Sdavidcs	if (!offload)
1789250661Sdavidcs		return (-1);
1790250661Sdavidcs
1791250661Sdavidcs	*op_code = opcode;
1792250661Sdavidcs	*tcp_hdr_off = (ip_hlen + ehdrlen);
1793250661Sdavidcs
1794250661Sdavidcs	return (0);
1795250661Sdavidcs}
1796250661Sdavidcs
1797250661Sdavidcs#define QLA_TX_MIN_FREE 2
1798250661Sdavidcs/*
1799250661Sdavidcs * Name: ql_hw_send
1800250661Sdavidcs * Function: Transmits a packet. It first checks if the packet is a
1801250661Sdavidcs *	candidate for Large TCP Segment Offload and then for UDP/TCP checksum
1802250661Sdavidcs *	offload. If either of these creteria are not met, it is transmitted
1803250661Sdavidcs *	as a regular ethernet frame.
1804250661Sdavidcs */
1805250661Sdavidcsint
1806250661Sdavidcsql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
1807284741Sdavidcs	uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu)
1808250661Sdavidcs{
1809250661Sdavidcs	struct ether_vlan_header *eh;
1810250661Sdavidcs	qla_hw_t *hw = &ha->hw;
1811250661Sdavidcs	q80_tx_cmd_t *tx_cmd, tso_cmd;
1812250661Sdavidcs	bus_dma_segment_t *c_seg;
1813250661Sdavidcs	uint32_t num_tx_cmds, hdr_len = 0;
1814250661Sdavidcs	uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
1815250661Sdavidcs	device_t dev;
1816250661Sdavidcs	int i, ret;
1817250661Sdavidcs	uint8_t *src = NULL, *dst = NULL;
1818250661Sdavidcs	uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
1819250661Sdavidcs	uint32_t op_code = 0;
1820250661Sdavidcs	uint32_t tcp_hdr_off = 0;
1821250661Sdavidcs
1822250661Sdavidcs	dev = ha->pci_dev;
1823250661Sdavidcs
1824250661Sdavidcs	/*
1825250661Sdavidcs	 * Always make sure there is atleast one empty slot in the tx_ring
1826250661Sdavidcs	 * tx_ring is considered full when there only one entry available
1827250661Sdavidcs	 */
1828250661Sdavidcs        num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
1829250661Sdavidcs
1830250661Sdavidcs	total_length = mp->m_pkthdr.len;
1831250661Sdavidcs	if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
1832250661Sdavidcs		device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
1833250661Sdavidcs			__func__, total_length);
1834250661Sdavidcs		return (-1);
1835250661Sdavidcs	}
1836250661Sdavidcs	eh = mtod(mp, struct ether_vlan_header *);
1837250661Sdavidcs
1838250661Sdavidcs	if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
1839250661Sdavidcs
1840250661Sdavidcs		bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
1841250661Sdavidcs
1842250661Sdavidcs		src = frame_hdr;
1843250661Sdavidcs		ret = qla_tx_tso(ha, mp, &tso_cmd, src);
1844250661Sdavidcs
1845250661Sdavidcs		if (!(ret & ~1)) {
1846250661Sdavidcs			/* find the additional tx_cmd descriptors required */
1847250661Sdavidcs
1848250661Sdavidcs			if (mp->m_flags & M_VLANTAG)
1849250661Sdavidcs				tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
1850250661Sdavidcs
1851250661Sdavidcs			hdr_len = tso_cmd.total_hdr_len;
1852250661Sdavidcs
1853250661Sdavidcs			bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
1854250661Sdavidcs			bytes = QL_MIN(bytes, hdr_len);
1855250661Sdavidcs
1856250661Sdavidcs			num_tx_cmds++;
1857250661Sdavidcs			hdr_len -= bytes;
1858250661Sdavidcs
1859250661Sdavidcs			while (hdr_len) {
1860250661Sdavidcs				bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
1861250661Sdavidcs				hdr_len -= bytes;
1862250661Sdavidcs				num_tx_cmds++;
1863250661Sdavidcs			}
1864250661Sdavidcs			hdr_len = tso_cmd.total_hdr_len;
1865250661Sdavidcs
1866250661Sdavidcs			if (ret == 0)
1867250661Sdavidcs				src = (uint8_t *)eh;
1868250661Sdavidcs		} else
1869250661Sdavidcs			return (EINVAL);
1870250661Sdavidcs	} else {
1871250661Sdavidcs		(void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
1872250661Sdavidcs	}
1873250661Sdavidcs
1874284741Sdavidcs	if (iscsi_pdu)
1875284741Sdavidcs		ha->hw.iscsi_pkt_count++;
1876284741Sdavidcs
1877250661Sdavidcs	if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
1878250661Sdavidcs		qla_hw_tx_done_locked(ha, txr_idx);
1879250661Sdavidcs		if (hw->tx_cntxt[txr_idx].txr_free <=
1880250661Sdavidcs				(num_tx_cmds + QLA_TX_MIN_FREE)) {
1881250661Sdavidcs        		QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
1882250661Sdavidcs				"(num_tx_cmds + QLA_TX_MIN_FREE))\n",
1883250661Sdavidcs				__func__));
1884250661Sdavidcs			return (-1);
1885250661Sdavidcs		}
1886250661Sdavidcs	}
1887250661Sdavidcs
1888250661Sdavidcs	tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
1889250661Sdavidcs
1890250661Sdavidcs        if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
1891250661Sdavidcs
1892250661Sdavidcs                if (nsegs > ha->hw.max_tx_segs)
1893250661Sdavidcs                        ha->hw.max_tx_segs = nsegs;
1894250661Sdavidcs
1895250661Sdavidcs                bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1896250661Sdavidcs
1897250661Sdavidcs                if (op_code) {
1898250661Sdavidcs                        tx_cmd->flags_opcode = op_code;
1899250661Sdavidcs                        tx_cmd->tcp_hdr_off = tcp_hdr_off;
1900250661Sdavidcs
1901250661Sdavidcs                } else {
1902250661Sdavidcs                        tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
1903250661Sdavidcs                }
1904250661Sdavidcs	} else {
1905250661Sdavidcs		bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
1906250661Sdavidcs		ha->tx_tso_frames++;
1907250661Sdavidcs	}
1908250661Sdavidcs
1909250661Sdavidcs	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1910250661Sdavidcs        	tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
1911284741Sdavidcs
1912284741Sdavidcs		if (iscsi_pdu)
1913284741Sdavidcs			eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
1914284741Sdavidcs
1915250661Sdavidcs	} else if (mp->m_flags & M_VLANTAG) {
1916250661Sdavidcs
1917250661Sdavidcs		if (hdr_len) { /* TSO */
1918250661Sdavidcs			tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
1919250661Sdavidcs						Q8_TX_CMD_FLAGS_HW_VLAN_ID);
1920250661Sdavidcs			tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
1921250661Sdavidcs		} else
1922250661Sdavidcs			tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
1923250661Sdavidcs
1924250661Sdavidcs		ha->hw_vlan_tx_frames++;
1925250661Sdavidcs		tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
1926284741Sdavidcs
1927284741Sdavidcs		if (iscsi_pdu) {
1928284741Sdavidcs			tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13;
1929284741Sdavidcs			mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci;
1930284741Sdavidcs		}
1931250661Sdavidcs	}
1932250661Sdavidcs
1933250661Sdavidcs
1934250661Sdavidcs        tx_cmd->n_bufs = (uint8_t)nsegs;
1935250661Sdavidcs        tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
1936250661Sdavidcs        tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
1937250661Sdavidcs	tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
1938250661Sdavidcs
1939250661Sdavidcs	c_seg = segs;
1940250661Sdavidcs
1941250661Sdavidcs	while (1) {
1942250661Sdavidcs		for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
1943250661Sdavidcs
1944250661Sdavidcs			switch (i) {
1945250661Sdavidcs			case 0:
1946250661Sdavidcs				tx_cmd->buf1_addr = c_seg->ds_addr;
1947250661Sdavidcs				tx_cmd->buf1_len = c_seg->ds_len;
1948250661Sdavidcs				break;
1949250661Sdavidcs
1950250661Sdavidcs			case 1:
1951250661Sdavidcs				tx_cmd->buf2_addr = c_seg->ds_addr;
1952250661Sdavidcs				tx_cmd->buf2_len = c_seg->ds_len;
1953250661Sdavidcs				break;
1954250661Sdavidcs
1955250661Sdavidcs			case 2:
1956250661Sdavidcs				tx_cmd->buf3_addr = c_seg->ds_addr;
1957250661Sdavidcs				tx_cmd->buf3_len = c_seg->ds_len;
1958250661Sdavidcs				break;
1959250661Sdavidcs
1960250661Sdavidcs			case 3:
1961250661Sdavidcs				tx_cmd->buf4_addr = c_seg->ds_addr;
1962250661Sdavidcs				tx_cmd->buf4_len = c_seg->ds_len;
1963250661Sdavidcs				break;
1964250661Sdavidcs			}
1965250661Sdavidcs
1966250661Sdavidcs			c_seg++;
1967250661Sdavidcs			nsegs--;
1968250661Sdavidcs		}
1969250661Sdavidcs
1970250661Sdavidcs		txr_next = hw->tx_cntxt[txr_idx].txr_next =
1971250661Sdavidcs			(hw->tx_cntxt[txr_idx].txr_next + 1) &
1972250661Sdavidcs				(NUM_TX_DESCRIPTORS - 1);
1973250661Sdavidcs		tx_cmd_count++;
1974250661Sdavidcs
1975250661Sdavidcs		if (!nsegs)
1976250661Sdavidcs			break;
1977250661Sdavidcs
1978250661Sdavidcs		tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
1979250661Sdavidcs		bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1980250661Sdavidcs	}
1981250661Sdavidcs
1982250661Sdavidcs	if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
1983250661Sdavidcs
1984250661Sdavidcs		/* TSO : Copy the header in the following tx cmd descriptors */
1985250661Sdavidcs
1986250661Sdavidcs		txr_next = hw->tx_cntxt[txr_idx].txr_next;
1987250661Sdavidcs
1988250661Sdavidcs		tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
1989250661Sdavidcs		bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1990250661Sdavidcs
1991250661Sdavidcs		bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
1992250661Sdavidcs		bytes = QL_MIN(bytes, hdr_len);
1993250661Sdavidcs
1994250661Sdavidcs		dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
1995250661Sdavidcs
1996250661Sdavidcs		if (mp->m_flags & M_VLANTAG) {
1997250661Sdavidcs			/* first copy the src/dst MAC addresses */
1998250661Sdavidcs			bcopy(src, dst, (ETHER_ADDR_LEN * 2));
1999250661Sdavidcs			dst += (ETHER_ADDR_LEN * 2);
2000250661Sdavidcs			src += (ETHER_ADDR_LEN * 2);
2001250661Sdavidcs
2002250661Sdavidcs			*((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
2003250661Sdavidcs			dst += 2;
2004250661Sdavidcs			*((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
2005250661Sdavidcs			dst += 2;
2006250661Sdavidcs
2007250661Sdavidcs			/* bytes left in src header */
2008250661Sdavidcs			hdr_len -= ((ETHER_ADDR_LEN * 2) +
2009250661Sdavidcs					ETHER_VLAN_ENCAP_LEN);
2010250661Sdavidcs
2011250661Sdavidcs			/* bytes left in TxCmd Entry */
2012250661Sdavidcs			bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
2013250661Sdavidcs
2014250661Sdavidcs
2015250661Sdavidcs			bcopy(src, dst, bytes);
2016250661Sdavidcs			src += bytes;
2017250661Sdavidcs			hdr_len -= bytes;
2018250661Sdavidcs		} else {
2019250661Sdavidcs			bcopy(src, dst, bytes);
2020250661Sdavidcs			src += bytes;
2021250661Sdavidcs			hdr_len -= bytes;
2022250661Sdavidcs		}
2023250661Sdavidcs
2024250661Sdavidcs		txr_next = hw->tx_cntxt[txr_idx].txr_next =
2025250661Sdavidcs				(hw->tx_cntxt[txr_idx].txr_next + 1) &
2026250661Sdavidcs					(NUM_TX_DESCRIPTORS - 1);
2027250661Sdavidcs		tx_cmd_count++;
2028250661Sdavidcs
2029250661Sdavidcs		while (hdr_len) {
2030250661Sdavidcs			tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2031250661Sdavidcs			bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2032250661Sdavidcs
2033250661Sdavidcs			bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2034250661Sdavidcs
2035250661Sdavidcs			bcopy(src, tx_cmd, bytes);
2036250661Sdavidcs			src += bytes;
2037250661Sdavidcs			hdr_len -= bytes;
2038250661Sdavidcs
2039250661Sdavidcs			txr_next = hw->tx_cntxt[txr_idx].txr_next =
2040250661Sdavidcs				(hw->tx_cntxt[txr_idx].txr_next + 1) &
2041250661Sdavidcs					(NUM_TX_DESCRIPTORS - 1);
2042250661Sdavidcs			tx_cmd_count++;
2043250661Sdavidcs		}
2044250661Sdavidcs	}
2045250661Sdavidcs
2046250661Sdavidcs	hw->tx_cntxt[txr_idx].txr_free =
2047250661Sdavidcs		hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
2048250661Sdavidcs
2049250661Sdavidcs	QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
2050250661Sdavidcs		txr_idx);
2051250661Sdavidcs       	QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
2052250661Sdavidcs
2053250661Sdavidcs	return (0);
2054250661Sdavidcs}
2055250661Sdavidcs
2056250661Sdavidcs
2057284741Sdavidcs
2058284741Sdavidcs#define Q8_CONFIG_IND_TBL_SIZE	32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
2059250661Sdavidcsstatic int
2060250661Sdavidcsqla_config_rss_ind_table(qla_host_t *ha)
2061250661Sdavidcs{
2062250661Sdavidcs	uint32_t i, count;
2063284741Sdavidcs	uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
2064250661Sdavidcs
2065250661Sdavidcs
2066284741Sdavidcs	for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
2067250661Sdavidcs		rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
2068250661Sdavidcs	}
2069250661Sdavidcs
2070284741Sdavidcs	for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
2071284741Sdavidcs		i = i + Q8_CONFIG_IND_TBL_SIZE) {
2072250661Sdavidcs
2073284741Sdavidcs		if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
2074250661Sdavidcs			count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
2075250661Sdavidcs		} else {
2076284741Sdavidcs			count = Q8_CONFIG_IND_TBL_SIZE;
2077250661Sdavidcs		}
2078250661Sdavidcs
2079250661Sdavidcs		if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
2080250661Sdavidcs			rss_ind_tbl))
2081250661Sdavidcs			return (-1);
2082250661Sdavidcs	}
2083250661Sdavidcs
2084250661Sdavidcs	return (0);
2085250661Sdavidcs}
2086250661Sdavidcs
2087250661Sdavidcs/*
2088250661Sdavidcs * Name: ql_del_hw_if
2089250661Sdavidcs * Function: Destroys the hardware specific entities corresponding to an
2090250661Sdavidcs *	Ethernet Interface
2091250661Sdavidcs */
2092250661Sdavidcsvoid
2093250661Sdavidcsql_del_hw_if(qla_host_t *ha)
2094250661Sdavidcs{
2095284741Sdavidcs	uint32_t i;
2096284741Sdavidcs	uint32_t num_msix;
2097250661Sdavidcs
2098284741Sdavidcs	(void)qla_stop_nic_func(ha);
2099284741Sdavidcs
2100250661Sdavidcs	qla_del_rcv_cntxt(ha);
2101250661Sdavidcs	qla_del_xmt_cntxt(ha);
2102250661Sdavidcs
2103250661Sdavidcs	if (ha->hw.flags.init_intr_cnxt) {
2104284741Sdavidcs		for (i = 0; i < ha->hw.num_sds_rings; ) {
2105284741Sdavidcs
2106284741Sdavidcs			if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2107284741Sdavidcs				num_msix = Q8_MAX_INTR_VECTORS;
2108284741Sdavidcs			else
2109284741Sdavidcs				num_msix = ha->hw.num_sds_rings - i;
2110284741Sdavidcs			qla_config_intr_cntxt(ha, i, num_msix, 0);
2111284741Sdavidcs
2112284741Sdavidcs			i += num_msix;
2113284741Sdavidcs		}
2114284741Sdavidcs
2115250661Sdavidcs		ha->hw.flags.init_intr_cnxt = 0;
2116250661Sdavidcs	}
2117284741Sdavidcs	return;
2118250661Sdavidcs}
2119250661Sdavidcs
2120284741Sdavidcsvoid
2121284741Sdavidcsqla_confirm_9kb_enable(qla_host_t *ha)
2122284741Sdavidcs{
2123284741Sdavidcs	uint32_t supports_9kb = 0;
2124284741Sdavidcs
2125284741Sdavidcs	ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
2126284741Sdavidcs
2127284741Sdavidcs	/* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
2128284741Sdavidcs	WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
2129284741Sdavidcs	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
2130284741Sdavidcs
2131284741Sdavidcs	qla_get_nic_partition(ha, &supports_9kb, NULL);
2132284741Sdavidcs
2133284741Sdavidcs	if (!supports_9kb)
2134284741Sdavidcs		ha->hw.enable_9kb = 0;
2135284741Sdavidcs
2136284741Sdavidcs	return;
2137284741Sdavidcs}
2138284741Sdavidcs
2139284741Sdavidcs
2140250661Sdavidcs/*
2141250661Sdavidcs * Name: ql_init_hw_if
2142250661Sdavidcs * Function: Creates the hardware specific entities corresponding to an
2143250661Sdavidcs *	Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
2144250661Sdavidcs *	corresponding to the interface. Enables LRO if allowed.
2145250661Sdavidcs */
2146250661Sdavidcsint
2147250661Sdavidcsql_init_hw_if(qla_host_t *ha)
2148250661Sdavidcs{
2149250661Sdavidcs	device_t	dev;
2150250661Sdavidcs	uint32_t	i;
2151250661Sdavidcs	uint8_t		bcast_mac[6];
2152250661Sdavidcs	qla_rdesc_t	*rdesc;
2153284741Sdavidcs	uint32_t	num_msix;
2154250661Sdavidcs
2155250661Sdavidcs	dev = ha->pci_dev;
2156250661Sdavidcs
2157250661Sdavidcs	for (i = 0; i < ha->hw.num_sds_rings; i++) {
2158250661Sdavidcs		bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
2159250661Sdavidcs			ha->hw.dma_buf.sds_ring[i].size);
2160250661Sdavidcs	}
2161250661Sdavidcs
2162284741Sdavidcs	for (i = 0; i < ha->hw.num_sds_rings; ) {
2163250661Sdavidcs
2164284741Sdavidcs		if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2165284741Sdavidcs			num_msix = Q8_MAX_INTR_VECTORS;
2166284741Sdavidcs		else
2167284741Sdavidcs			num_msix = ha->hw.num_sds_rings - i;
2168250661Sdavidcs
2169284741Sdavidcs		if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
2170250661Sdavidcs
2171284741Sdavidcs			if (i > 0) {
2172284741Sdavidcs
2173284741Sdavidcs				num_msix = i;
2174284741Sdavidcs
2175284741Sdavidcs				for (i = 0; i < num_msix; ) {
2176284741Sdavidcs					qla_config_intr_cntxt(ha, i,
2177284741Sdavidcs						Q8_MAX_INTR_VECTORS, 0);
2178284741Sdavidcs					i += Q8_MAX_INTR_VECTORS;
2179284741Sdavidcs				}
2180284741Sdavidcs			}
2181284741Sdavidcs			return (-1);
2182284741Sdavidcs		}
2183284741Sdavidcs
2184284741Sdavidcs		i = i + num_msix;
2185284741Sdavidcs	}
2186284741Sdavidcs
2187284741Sdavidcs        ha->hw.flags.init_intr_cnxt = 1;
2188284741Sdavidcs
2189250661Sdavidcs	if (ha->hw.mdump_init == 0) {
2190250661Sdavidcs		qla_minidump_init(ha);
2191250661Sdavidcs	}
2192250661Sdavidcs
2193250661Sdavidcs	/*
2194250661Sdavidcs	 * Create Receive Context
2195250661Sdavidcs	 */
2196250661Sdavidcs	if (qla_init_rcv_cntxt(ha)) {
2197250661Sdavidcs		return (-1);
2198250661Sdavidcs	}
2199250661Sdavidcs
2200250661Sdavidcs	for (i = 0; i < ha->hw.num_rds_rings; i++) {
2201250661Sdavidcs		rdesc = &ha->hw.rds[i];
2202250661Sdavidcs		rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
2203250661Sdavidcs		rdesc->rx_in = 0;
2204250661Sdavidcs		/* Update the RDS Producer Indices */
2205250661Sdavidcs		QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
2206250661Sdavidcs			rdesc->rx_next);
2207250661Sdavidcs	}
2208250661Sdavidcs
2209250661Sdavidcs
2210250661Sdavidcs	/*
2211250661Sdavidcs	 * Create Transmit Context
2212250661Sdavidcs	 */
2213250661Sdavidcs	if (qla_init_xmt_cntxt(ha)) {
2214250661Sdavidcs		qla_del_rcv_cntxt(ha);
2215250661Sdavidcs		return (-1);
2216250661Sdavidcs	}
2217250661Sdavidcs	ha->hw.max_tx_segs = 0;
2218250661Sdavidcs
2219250661Sdavidcs	if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1))
2220250661Sdavidcs		return(-1);
2221250661Sdavidcs
2222250661Sdavidcs	ha->hw.flags.unicast_mac = 1;
2223250661Sdavidcs
2224250661Sdavidcs	bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2225250661Sdavidcs	bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2226250661Sdavidcs
2227250661Sdavidcs	if (qla_config_mac_addr(ha, bcast_mac, 1))
2228250661Sdavidcs		return (-1);
2229250661Sdavidcs
2230250661Sdavidcs	ha->hw.flags.bcast_mac = 1;
2231250661Sdavidcs
2232250661Sdavidcs	/*
2233250661Sdavidcs	 * program any cached multicast addresses
2234250661Sdavidcs	 */
2235250661Sdavidcs	if (qla_hw_add_all_mcast(ha))
2236250661Sdavidcs		return (-1);
2237250661Sdavidcs
2238250661Sdavidcs	if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
2239250661Sdavidcs		return (-1);
2240250661Sdavidcs
2241250661Sdavidcs	if (qla_config_rss_ind_table(ha))
2242250661Sdavidcs		return (-1);
2243250661Sdavidcs
2244284741Sdavidcs	if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1))
2245250661Sdavidcs		return (-1);
2246250661Sdavidcs
2247250661Sdavidcs	if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
2248250661Sdavidcs		return (-1);
2249250661Sdavidcs
2250250661Sdavidcs	if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
2251250661Sdavidcs		return (-1);
2252250661Sdavidcs
2253284741Sdavidcs        if (qla_init_nic_func(ha))
2254284741Sdavidcs                return (-1);
2255284741Sdavidcs
2256284741Sdavidcs        if (qla_query_fw_dcbx_caps(ha))
2257284741Sdavidcs                return (-1);
2258284741Sdavidcs
2259250661Sdavidcs	for (i = 0; i < ha->hw.num_sds_rings; i++)
2260250661Sdavidcs		QL_ENABLE_INTERRUPTS(ha, i);
2261250661Sdavidcs
2262250661Sdavidcs	return (0);
2263250661Sdavidcs}
2264250661Sdavidcs
2265250661Sdavidcsstatic int
2266284741Sdavidcsqla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx)
2267250661Sdavidcs{
2268250661Sdavidcs        device_t                dev = ha->pci_dev;
2269250661Sdavidcs        q80_rq_map_sds_to_rds_t *map_rings;
2270284741Sdavidcs	q80_rsp_map_sds_to_rds_t *map_rings_rsp;
2271250661Sdavidcs        uint32_t                i, err;
2272250661Sdavidcs        qla_hw_t                *hw = &ha->hw;
2273250661Sdavidcs
2274250661Sdavidcs        map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
2275250661Sdavidcs        bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
2276250661Sdavidcs
2277250661Sdavidcs        map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
2278250661Sdavidcs        map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
2279250661Sdavidcs        map_rings->count_version |= Q8_MBX_CMD_VERSION;
2280250661Sdavidcs
2281250661Sdavidcs        map_rings->cntxt_id = hw->rcv_cntxt_id;
2282284741Sdavidcs        map_rings->num_rings = num_idx;
2283250661Sdavidcs
2284284741Sdavidcs	for (i = 0; i < num_idx; i++) {
2285284741Sdavidcs		map_rings->sds_rds[i].sds_ring = i + start_idx;
2286284741Sdavidcs		map_rings->sds_rds[i].rds_ring = i + start_idx;
2287284741Sdavidcs	}
2288250661Sdavidcs
2289250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
2290250661Sdavidcs                (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
2291250661Sdavidcs                ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2292250661Sdavidcs                device_printf(dev, "%s: failed0\n", __func__);
2293250661Sdavidcs                return (-1);
2294250661Sdavidcs        }
2295250661Sdavidcs
2296284741Sdavidcs        map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox;
2297250661Sdavidcs
2298250661Sdavidcs        err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
2299250661Sdavidcs
2300250661Sdavidcs        if (err) {
2301250661Sdavidcs                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2302250661Sdavidcs                return (-1);
2303250661Sdavidcs        }
2304250661Sdavidcs
2305250661Sdavidcs        return (0);
2306250661Sdavidcs}
2307250661Sdavidcs
2308250661Sdavidcs/*
2309250661Sdavidcs * Name: qla_init_rcv_cntxt
2310250661Sdavidcs * Function: Creates the Receive Context.
2311250661Sdavidcs */
2312250661Sdavidcsstatic int
2313250661Sdavidcsqla_init_rcv_cntxt(qla_host_t *ha)
2314250661Sdavidcs{
2315250661Sdavidcs	q80_rq_rcv_cntxt_t	*rcntxt;
2316250661Sdavidcs	q80_rsp_rcv_cntxt_t	*rcntxt_rsp;
2317250661Sdavidcs	q80_stat_desc_t		*sdesc;
2318250661Sdavidcs	int			i, j;
2319250661Sdavidcs        qla_hw_t		*hw = &ha->hw;
2320250661Sdavidcs	device_t		dev;
2321250661Sdavidcs	uint32_t		err;
2322250661Sdavidcs	uint32_t		rcntxt_sds_rings;
2323250661Sdavidcs	uint32_t		rcntxt_rds_rings;
2324284741Sdavidcs	uint32_t		max_idx;
2325250661Sdavidcs
2326250661Sdavidcs	dev = ha->pci_dev;
2327250661Sdavidcs
2328250661Sdavidcs	/*
2329250661Sdavidcs	 * Create Receive Context
2330250661Sdavidcs	 */
2331250661Sdavidcs
2332250661Sdavidcs	for (i = 0; i < hw->num_sds_rings; i++) {
2333250661Sdavidcs		sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
2334250661Sdavidcs
2335250661Sdavidcs		for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
2336250661Sdavidcs			sdesc->data[0] = 1ULL;
2337250661Sdavidcs			sdesc->data[1] = 1ULL;
2338250661Sdavidcs		}
2339250661Sdavidcs	}
2340250661Sdavidcs
2341250661Sdavidcs	rcntxt_sds_rings = hw->num_sds_rings;
2342250661Sdavidcs	if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
2343250661Sdavidcs		rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
2344250661Sdavidcs
2345250661Sdavidcs	rcntxt_rds_rings = hw->num_rds_rings;
2346250661Sdavidcs
2347250661Sdavidcs	if (hw->num_rds_rings > MAX_RDS_RING_SETS)
2348250661Sdavidcs		rcntxt_rds_rings = MAX_RDS_RING_SETS;
2349250661Sdavidcs
2350250661Sdavidcs	rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
2351250661Sdavidcs	bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
2352250661Sdavidcs
2353250661Sdavidcs	rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
2354250661Sdavidcs	rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
2355250661Sdavidcs	rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2356250661Sdavidcs
2357250661Sdavidcs	rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
2358250661Sdavidcs			Q8_RCV_CNTXT_CAP0_LRO |
2359250661Sdavidcs			Q8_RCV_CNTXT_CAP0_HW_LRO |
2360250661Sdavidcs			Q8_RCV_CNTXT_CAP0_RSS |
2361250661Sdavidcs			Q8_RCV_CNTXT_CAP0_SGL_LRO;
2362250661Sdavidcs
2363284741Sdavidcs	if (ha->hw.enable_9kb)
2364284741Sdavidcs		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO;
2365284741Sdavidcs	else
2366284741Sdavidcs		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO;
2367284741Sdavidcs
2368250661Sdavidcs	if (ha->hw.num_rds_rings > 1) {
2369250661Sdavidcs		rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
2370250661Sdavidcs		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
2371250661Sdavidcs	} else
2372250661Sdavidcs		rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
2373250661Sdavidcs
2374250661Sdavidcs	rcntxt->nsds_rings = rcntxt_sds_rings;
2375250661Sdavidcs
2376250661Sdavidcs	rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
2377250661Sdavidcs
2378250661Sdavidcs	rcntxt->rcv_vpid = 0;
2379250661Sdavidcs
2380250661Sdavidcs	for (i = 0; i <  rcntxt_sds_rings; i++) {
2381250661Sdavidcs		rcntxt->sds[i].paddr =
2382250661Sdavidcs			qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
2383250661Sdavidcs		rcntxt->sds[i].size =
2384250661Sdavidcs			qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2385250661Sdavidcs		if (ha->msix_count == 2) {
2386250661Sdavidcs			rcntxt->sds[i].intr_id =
2387250661Sdavidcs				qla_host_to_le16(hw->intr_id[0]);
2388250661Sdavidcs			rcntxt->sds[i].intr_src_bit = qla_host_to_le16((i));
2389250661Sdavidcs		} else {
2390250661Sdavidcs			rcntxt->sds[i].intr_id =
2391250661Sdavidcs				qla_host_to_le16(hw->intr_id[i]);
2392250661Sdavidcs			rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
2393250661Sdavidcs		}
2394250661Sdavidcs	}
2395250661Sdavidcs
2396250661Sdavidcs	for (i = 0; i <  rcntxt_rds_rings; i++) {
2397250661Sdavidcs		rcntxt->rds[i].paddr_std =
2398250661Sdavidcs			qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
2399284741Sdavidcs
2400284741Sdavidcs		if (ha->hw.enable_9kb)
2401284741Sdavidcs			rcntxt->rds[i].std_bsize =
2402284741Sdavidcs				qla_host_to_le64(MJUM9BYTES);
2403284741Sdavidcs		else
2404284741Sdavidcs			rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2405284741Sdavidcs
2406250661Sdavidcs		rcntxt->rds[i].std_nentries =
2407250661Sdavidcs			qla_host_to_le32(NUM_RX_DESCRIPTORS);
2408250661Sdavidcs	}
2409250661Sdavidcs
2410250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2411250661Sdavidcs		(sizeof (q80_rq_rcv_cntxt_t) >> 2),
2412250661Sdavidcs                ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
2413250661Sdavidcs                device_printf(dev, "%s: failed0\n", __func__);
2414250661Sdavidcs                return (-1);
2415250661Sdavidcs        }
2416250661Sdavidcs
2417250661Sdavidcs        rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
2418250661Sdavidcs
2419250661Sdavidcs        err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2420250661Sdavidcs
2421250661Sdavidcs        if (err) {
2422250661Sdavidcs                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2423250661Sdavidcs                return (-1);
2424250661Sdavidcs        }
2425250661Sdavidcs
2426250661Sdavidcs	for (i = 0; i <  rcntxt_sds_rings; i++) {
2427250661Sdavidcs		hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
2428250661Sdavidcs	}
2429250661Sdavidcs
2430250661Sdavidcs	for (i = 0; i <  rcntxt_rds_rings; i++) {
2431250661Sdavidcs		hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
2432250661Sdavidcs	}
2433250661Sdavidcs
2434250661Sdavidcs	hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
2435250661Sdavidcs
2436250661Sdavidcs	ha->hw.flags.init_rx_cnxt = 1;
2437250661Sdavidcs
2438250661Sdavidcs	if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
2439284741Sdavidcs
2440284741Sdavidcs		for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
2441284741Sdavidcs
2442284741Sdavidcs			if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
2443284741Sdavidcs				max_idx = MAX_RCNTXT_SDS_RINGS;
2444284741Sdavidcs			else
2445284741Sdavidcs				max_idx = hw->num_sds_rings - i;
2446284741Sdavidcs
2447284741Sdavidcs			err = qla_add_rcv_rings(ha, i, max_idx);
2448284741Sdavidcs			if (err)
2449284741Sdavidcs				return -1;
2450284741Sdavidcs
2451284741Sdavidcs			i += max_idx;
2452284741Sdavidcs		}
2453250661Sdavidcs	}
2454250661Sdavidcs
2455284741Sdavidcs	if (hw->num_rds_rings > 1) {
2456284741Sdavidcs
2457284741Sdavidcs		for (i = 0; i < hw->num_rds_rings; ) {
2458284741Sdavidcs
2459284741Sdavidcs			if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
2460284741Sdavidcs				max_idx = MAX_SDS_TO_RDS_MAP;
2461284741Sdavidcs			else
2462284741Sdavidcs				max_idx = hw->num_rds_rings - i;
2463284741Sdavidcs
2464284741Sdavidcs			err = qla_map_sds_to_rds(ha, i, max_idx);
2465284741Sdavidcs			if (err)
2466284741Sdavidcs				return -1;
2467284741Sdavidcs
2468284741Sdavidcs			i += max_idx;
2469284741Sdavidcs		}
2470250661Sdavidcs	}
2471250661Sdavidcs
2472250661Sdavidcs	return (0);
2473250661Sdavidcs}
2474250661Sdavidcs
2475250661Sdavidcsstatic int
2476284741Sdavidcsqla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
2477250661Sdavidcs{
2478250661Sdavidcs	device_t		dev = ha->pci_dev;
2479250661Sdavidcs	q80_rq_add_rcv_rings_t	*add_rcv;
2480250661Sdavidcs	q80_rsp_add_rcv_rings_t	*add_rcv_rsp;
2481250661Sdavidcs	uint32_t		i,j, err;
2482250661Sdavidcs        qla_hw_t		*hw = &ha->hw;
2483250661Sdavidcs
2484250661Sdavidcs	add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
2485250661Sdavidcs	bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
2486250661Sdavidcs
2487250661Sdavidcs	add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
2488250661Sdavidcs	add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
2489250661Sdavidcs	add_rcv->count_version |= Q8_MBX_CMD_VERSION;
2490250661Sdavidcs
2491284741Sdavidcs	add_rcv->nrds_sets_rings = nsds | (1 << 5);
2492250661Sdavidcs	add_rcv->nsds_rings = nsds;
2493250661Sdavidcs	add_rcv->cntxt_id = hw->rcv_cntxt_id;
2494250661Sdavidcs
2495250661Sdavidcs        for (i = 0; i <  nsds; i++) {
2496250661Sdavidcs
2497250661Sdavidcs		j = i + sds_idx;
2498250661Sdavidcs
2499250661Sdavidcs                add_rcv->sds[i].paddr =
2500250661Sdavidcs                        qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
2501250661Sdavidcs
2502250661Sdavidcs                add_rcv->sds[i].size =
2503250661Sdavidcs                        qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2504250661Sdavidcs
2505250661Sdavidcs                if (ha->msix_count == 2) {
2506250661Sdavidcs                        add_rcv->sds[i].intr_id =
2507250661Sdavidcs                                qla_host_to_le16(hw->intr_id[0]);
2508250661Sdavidcs                        add_rcv->sds[i].intr_src_bit = qla_host_to_le16(j);
2509250661Sdavidcs                } else {
2510250661Sdavidcs                        add_rcv->sds[i].intr_id =
2511250661Sdavidcs                                qla_host_to_le16(hw->intr_id[j]);
2512250661Sdavidcs                        add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
2513250661Sdavidcs                }
2514250661Sdavidcs
2515250661Sdavidcs        }
2516284741Sdavidcs        for (i = 0; (i <  nsds); i++) {
2517250661Sdavidcs                j = i + sds_idx;
2518284741Sdavidcs
2519250661Sdavidcs                add_rcv->rds[i].paddr_std =
2520250661Sdavidcs                        qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
2521284741Sdavidcs
2522284741Sdavidcs		if (ha->hw.enable_9kb)
2523284741Sdavidcs			add_rcv->rds[i].std_bsize =
2524284741Sdavidcs				qla_host_to_le64(MJUM9BYTES);
2525284741Sdavidcs		else
2526284741Sdavidcs                	add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2527284741Sdavidcs
2528250661Sdavidcs                add_rcv->rds[i].std_nentries =
2529250661Sdavidcs                        qla_host_to_le32(NUM_RX_DESCRIPTORS);
2530250661Sdavidcs        }
2531250661Sdavidcs
2532250661Sdavidcs
2533250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
2534250661Sdavidcs		(sizeof (q80_rq_add_rcv_rings_t) >> 2),
2535250661Sdavidcs                ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2536250661Sdavidcs                device_printf(dev, "%s: failed0\n", __func__);
2537250661Sdavidcs                return (-1);
2538250661Sdavidcs        }
2539250661Sdavidcs
2540250661Sdavidcs        add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
2541250661Sdavidcs
2542250661Sdavidcs        err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
2543250661Sdavidcs
2544250661Sdavidcs        if (err) {
2545250661Sdavidcs                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2546250661Sdavidcs                return (-1);
2547250661Sdavidcs        }
2548250661Sdavidcs
2549284741Sdavidcs	for (i = 0; i < nsds; i++) {
2550284741Sdavidcs		hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i];
2551250661Sdavidcs	}
2552284741Sdavidcs
2553284741Sdavidcs	for (i = 0; i < nsds; i++) {
2554284741Sdavidcs		hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std;
2555250661Sdavidcs	}
2556284741Sdavidcs
2557250661Sdavidcs	return (0);
2558250661Sdavidcs}
2559250661Sdavidcs
2560250661Sdavidcs/*
2561250661Sdavidcs * Name: qla_del_rcv_cntxt
2562250661Sdavidcs * Function: Destroys the Receive Context.
2563250661Sdavidcs */
2564250661Sdavidcsstatic void
2565250661Sdavidcsqla_del_rcv_cntxt(qla_host_t *ha)
2566250661Sdavidcs{
2567250661Sdavidcs	device_t			dev = ha->pci_dev;
2568250661Sdavidcs	q80_rcv_cntxt_destroy_t		*rcntxt;
2569250661Sdavidcs	q80_rcv_cntxt_destroy_rsp_t	*rcntxt_rsp;
2570250661Sdavidcs	uint32_t			err;
2571250661Sdavidcs	uint8_t				bcast_mac[6];
2572250661Sdavidcs
2573250661Sdavidcs	if (!ha->hw.flags.init_rx_cnxt)
2574250661Sdavidcs		return;
2575250661Sdavidcs
2576250661Sdavidcs	if (qla_hw_del_all_mcast(ha))
2577250661Sdavidcs		return;
2578250661Sdavidcs
2579250661Sdavidcs	if (ha->hw.flags.bcast_mac) {
2580250661Sdavidcs
2581250661Sdavidcs		bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2582250661Sdavidcs		bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2583250661Sdavidcs
2584250661Sdavidcs		if (qla_config_mac_addr(ha, bcast_mac, 0))
2585250661Sdavidcs			return;
2586250661Sdavidcs		ha->hw.flags.bcast_mac = 0;
2587250661Sdavidcs
2588250661Sdavidcs	}
2589250661Sdavidcs
2590250661Sdavidcs	if (ha->hw.flags.unicast_mac) {
2591250661Sdavidcs		if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0))
2592250661Sdavidcs			return;
2593250661Sdavidcs		ha->hw.flags.unicast_mac = 0;
2594250661Sdavidcs	}
2595250661Sdavidcs
2596250661Sdavidcs	rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
2597250661Sdavidcs	bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
2598250661Sdavidcs
2599250661Sdavidcs	rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
2600250661Sdavidcs	rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
2601250661Sdavidcs	rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2602250661Sdavidcs
2603250661Sdavidcs	rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
2604250661Sdavidcs
2605250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2606250661Sdavidcs		(sizeof (q80_rcv_cntxt_destroy_t) >> 2),
2607250661Sdavidcs                ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
2608250661Sdavidcs                device_printf(dev, "%s: failed0\n", __func__);
2609250661Sdavidcs                return;
2610250661Sdavidcs        }
2611250661Sdavidcs        rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
2612250661Sdavidcs
2613250661Sdavidcs        err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2614250661Sdavidcs
2615250661Sdavidcs        if (err) {
2616250661Sdavidcs                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2617250661Sdavidcs        }
2618250661Sdavidcs
2619250661Sdavidcs	ha->hw.flags.init_rx_cnxt = 0;
2620250661Sdavidcs	return;
2621250661Sdavidcs}
2622250661Sdavidcs
2623250661Sdavidcs/*
2624250661Sdavidcs * Name: qla_init_xmt_cntxt
2625250661Sdavidcs * Function: Creates the Transmit Context.
2626250661Sdavidcs */
2627250661Sdavidcsstatic int
2628250661Sdavidcsqla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2629250661Sdavidcs{
2630250661Sdavidcs	device_t		dev;
2631250661Sdavidcs        qla_hw_t		*hw = &ha->hw;
2632250661Sdavidcs	q80_rq_tx_cntxt_t	*tcntxt;
2633250661Sdavidcs	q80_rsp_tx_cntxt_t	*tcntxt_rsp;
2634250661Sdavidcs	uint32_t		err;
2635250661Sdavidcs	qla_hw_tx_cntxt_t       *hw_tx_cntxt;
2636250661Sdavidcs
2637250661Sdavidcs	hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
2638250661Sdavidcs
2639250661Sdavidcs	dev = ha->pci_dev;
2640250661Sdavidcs
2641250661Sdavidcs	/*
2642250661Sdavidcs	 * Create Transmit Context
2643250661Sdavidcs	 */
2644250661Sdavidcs	tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
2645250661Sdavidcs	bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
2646250661Sdavidcs
2647250661Sdavidcs	tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
2648250661Sdavidcs	tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
2649250661Sdavidcs	tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2650250661Sdavidcs
2651284741Sdavidcs#ifdef QL_ENABLE_ISCSI_TLV
2652284741Sdavidcs
2653284741Sdavidcs	tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
2654284741Sdavidcs				Q8_TX_CNTXT_CAP0_TC;
2655284741Sdavidcs
2656284741Sdavidcs	if (txr_idx >= (ha->hw.num_tx_rings >> 1)) {
2657284741Sdavidcs		tcntxt->traffic_class = 1;
2658284741Sdavidcs	}
2659284741Sdavidcs
2660284741Sdavidcs#else
2661284741Sdavidcs
2662250661Sdavidcs	tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
2663250661Sdavidcs
2664284741Sdavidcs#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
2665284741Sdavidcs
2666250661Sdavidcs	tcntxt->ntx_rings = 1;
2667250661Sdavidcs
2668250661Sdavidcs	tcntxt->tx_ring[0].paddr =
2669250661Sdavidcs		qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
2670250661Sdavidcs	tcntxt->tx_ring[0].tx_consumer =
2671250661Sdavidcs		qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
2672250661Sdavidcs	tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
2673250661Sdavidcs
2674250661Sdavidcs	tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[0]);
2675250661Sdavidcs	tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
2676250661Sdavidcs
2677250661Sdavidcs
2678250661Sdavidcs	hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
2679250661Sdavidcs	hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
2680250661Sdavidcs
2681250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2682250661Sdavidcs		(sizeof (q80_rq_tx_cntxt_t) >> 2),
2683250661Sdavidcs                ha->hw.mbox,
2684250661Sdavidcs		(sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
2685250661Sdavidcs                device_printf(dev, "%s: failed0\n", __func__);
2686250661Sdavidcs                return (-1);
2687250661Sdavidcs        }
2688250661Sdavidcs        tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
2689250661Sdavidcs
2690250661Sdavidcs        err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2691250661Sdavidcs
2692250661Sdavidcs        if (err) {
2693250661Sdavidcs                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2694250661Sdavidcs		return -1;
2695250661Sdavidcs        }
2696250661Sdavidcs
2697250661Sdavidcs	hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
2698250661Sdavidcs	hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
2699250661Sdavidcs
2700284741Sdavidcs	if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0))
2701284741Sdavidcs		return (-1);
2702284741Sdavidcs
2703250661Sdavidcs	return (0);
2704250661Sdavidcs}
2705250661Sdavidcs
2706250661Sdavidcs
2707250661Sdavidcs/*
2708250661Sdavidcs * Name: qla_del_xmt_cntxt
2709250661Sdavidcs * Function: Destroys the Transmit Context.
2710250661Sdavidcs */
2711250661Sdavidcsstatic int
2712250661Sdavidcsqla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2713250661Sdavidcs{
2714250661Sdavidcs	device_t			dev = ha->pci_dev;
2715250661Sdavidcs	q80_tx_cntxt_destroy_t		*tcntxt;
2716250661Sdavidcs	q80_tx_cntxt_destroy_rsp_t	*tcntxt_rsp;
2717250661Sdavidcs	uint32_t			err;
2718250661Sdavidcs
2719250661Sdavidcs	tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
2720250661Sdavidcs	bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
2721250661Sdavidcs
2722250661Sdavidcs	tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
2723250661Sdavidcs	tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
2724250661Sdavidcs	tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2725250661Sdavidcs
2726250661Sdavidcs	tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
2727250661Sdavidcs
2728250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2729250661Sdavidcs		(sizeof (q80_tx_cntxt_destroy_t) >> 2),
2730250661Sdavidcs                ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
2731250661Sdavidcs                device_printf(dev, "%s: failed0\n", __func__);
2732250661Sdavidcs                return (-1);
2733250661Sdavidcs        }
2734250661Sdavidcs        tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
2735250661Sdavidcs
2736250661Sdavidcs        err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2737250661Sdavidcs
2738250661Sdavidcs        if (err) {
2739250661Sdavidcs                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2740250661Sdavidcs		return (-1);
2741250661Sdavidcs        }
2742250661Sdavidcs
2743250661Sdavidcs	return (0);
2744250661Sdavidcs}
2745250661Sdavidcsstatic void
2746250661Sdavidcsqla_del_xmt_cntxt(qla_host_t *ha)
2747250661Sdavidcs{
2748250661Sdavidcs	uint32_t i;
2749250661Sdavidcs
2750250661Sdavidcs	if (!ha->hw.flags.init_tx_cnxt)
2751250661Sdavidcs		return;
2752250661Sdavidcs
2753250661Sdavidcs	for (i = 0; i < ha->hw.num_tx_rings; i++) {
2754250661Sdavidcs		if (qla_del_xmt_cntxt_i(ha, i))
2755250661Sdavidcs			break;
2756250661Sdavidcs	}
2757250661Sdavidcs	ha->hw.flags.init_tx_cnxt = 0;
2758250661Sdavidcs}
2759250661Sdavidcs
2760250661Sdavidcsstatic int
2761250661Sdavidcsqla_init_xmt_cntxt(qla_host_t *ha)
2762250661Sdavidcs{
2763250661Sdavidcs	uint32_t i, j;
2764250661Sdavidcs
2765250661Sdavidcs	for (i = 0; i < ha->hw.num_tx_rings; i++) {
2766250661Sdavidcs		if (qla_init_xmt_cntxt_i(ha, i) != 0) {
2767250661Sdavidcs			for (j = 0; j < i; j++)
2768250661Sdavidcs				qla_del_xmt_cntxt_i(ha, j);
2769250661Sdavidcs			return (-1);
2770250661Sdavidcs		}
2771250661Sdavidcs	}
2772250661Sdavidcs	ha->hw.flags.init_tx_cnxt = 1;
2773250661Sdavidcs	return (0);
2774250661Sdavidcs}
2775250661Sdavidcs
2776250661Sdavidcsstatic int
2777250661Sdavidcsqla_hw_add_all_mcast(qla_host_t *ha)
2778250661Sdavidcs{
2779250661Sdavidcs	int i, nmcast;
2780250661Sdavidcs
2781250661Sdavidcs	nmcast = ha->hw.nmcast;
2782250661Sdavidcs
2783250661Sdavidcs	for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
2784250661Sdavidcs		if ((ha->hw.mcast[i].addr[0] != 0) ||
2785250661Sdavidcs			(ha->hw.mcast[i].addr[1] != 0) ||
2786250661Sdavidcs			(ha->hw.mcast[i].addr[2] != 0) ||
2787250661Sdavidcs			(ha->hw.mcast[i].addr[3] != 0) ||
2788250661Sdavidcs			(ha->hw.mcast[i].addr[4] != 0) ||
2789250661Sdavidcs			(ha->hw.mcast[i].addr[5] != 0)) {
2790250661Sdavidcs
2791250661Sdavidcs			if (qla_config_mac_addr(ha, ha->hw.mcast[i].addr, 1)) {
2792250661Sdavidcs                		device_printf(ha->pci_dev, "%s: failed\n",
2793250661Sdavidcs					__func__);
2794250661Sdavidcs				return (-1);
2795250661Sdavidcs			}
2796250661Sdavidcs
2797250661Sdavidcs			nmcast--;
2798250661Sdavidcs		}
2799250661Sdavidcs	}
2800250661Sdavidcs	return 0;
2801250661Sdavidcs}
2802250661Sdavidcs
2803250661Sdavidcsstatic int
2804250661Sdavidcsqla_hw_del_all_mcast(qla_host_t *ha)
2805250661Sdavidcs{
2806250661Sdavidcs	int i, nmcast;
2807250661Sdavidcs
2808250661Sdavidcs	nmcast = ha->hw.nmcast;
2809250661Sdavidcs
2810250661Sdavidcs	for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
2811250661Sdavidcs		if ((ha->hw.mcast[i].addr[0] != 0) ||
2812250661Sdavidcs			(ha->hw.mcast[i].addr[1] != 0) ||
2813250661Sdavidcs			(ha->hw.mcast[i].addr[2] != 0) ||
2814250661Sdavidcs			(ha->hw.mcast[i].addr[3] != 0) ||
2815250661Sdavidcs			(ha->hw.mcast[i].addr[4] != 0) ||
2816250661Sdavidcs			(ha->hw.mcast[i].addr[5] != 0)) {
2817250661Sdavidcs
2818250661Sdavidcs			if (qla_config_mac_addr(ha, ha->hw.mcast[i].addr, 0))
2819250661Sdavidcs				return (-1);
2820250661Sdavidcs
2821250661Sdavidcs			nmcast--;
2822250661Sdavidcs		}
2823250661Sdavidcs	}
2824250661Sdavidcs	return 0;
2825250661Sdavidcs}
2826250661Sdavidcs
2827250661Sdavidcsstatic int
2828250661Sdavidcsqla_hw_add_mcast(qla_host_t *ha, uint8_t *mta)
2829250661Sdavidcs{
2830250661Sdavidcs	int i;
2831250661Sdavidcs
2832250661Sdavidcs	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2833250661Sdavidcs
2834250661Sdavidcs		if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
2835250661Sdavidcs			return 0; /* its been already added */
2836250661Sdavidcs	}
2837250661Sdavidcs
2838250661Sdavidcs	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2839250661Sdavidcs
2840250661Sdavidcs		if ((ha->hw.mcast[i].addr[0] == 0) &&
2841250661Sdavidcs			(ha->hw.mcast[i].addr[1] == 0) &&
2842250661Sdavidcs			(ha->hw.mcast[i].addr[2] == 0) &&
2843250661Sdavidcs			(ha->hw.mcast[i].addr[3] == 0) &&
2844250661Sdavidcs			(ha->hw.mcast[i].addr[4] == 0) &&
2845250661Sdavidcs			(ha->hw.mcast[i].addr[5] == 0)) {
2846250661Sdavidcs
2847250661Sdavidcs			if (qla_config_mac_addr(ha, mta, 1))
2848250661Sdavidcs				return (-1);
2849250661Sdavidcs
2850250661Sdavidcs			bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
2851250661Sdavidcs			ha->hw.nmcast++;
2852250661Sdavidcs
2853250661Sdavidcs			return 0;
2854250661Sdavidcs		}
2855250661Sdavidcs	}
2856250661Sdavidcs	return 0;
2857250661Sdavidcs}
2858250661Sdavidcs
2859250661Sdavidcsstatic int
2860250661Sdavidcsqla_hw_del_mcast(qla_host_t *ha, uint8_t *mta)
2861250661Sdavidcs{
2862250661Sdavidcs	int i;
2863250661Sdavidcs
2864250661Sdavidcs	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2865250661Sdavidcs		if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
2866250661Sdavidcs
2867250661Sdavidcs			if (qla_config_mac_addr(ha, mta, 0))
2868250661Sdavidcs				return (-1);
2869250661Sdavidcs
2870250661Sdavidcs			ha->hw.mcast[i].addr[0] = 0;
2871250661Sdavidcs			ha->hw.mcast[i].addr[1] = 0;
2872250661Sdavidcs			ha->hw.mcast[i].addr[2] = 0;
2873250661Sdavidcs			ha->hw.mcast[i].addr[3] = 0;
2874250661Sdavidcs			ha->hw.mcast[i].addr[4] = 0;
2875250661Sdavidcs			ha->hw.mcast[i].addr[5] = 0;
2876250661Sdavidcs
2877250661Sdavidcs			ha->hw.nmcast--;
2878250661Sdavidcs
2879250661Sdavidcs			return 0;
2880250661Sdavidcs		}
2881250661Sdavidcs	}
2882250661Sdavidcs	return 0;
2883250661Sdavidcs}
2884250661Sdavidcs
2885250661Sdavidcs/*
2886250661Sdavidcs * Name: ql_hw_set_multi
2887250661Sdavidcs * Function: Sets the Multicast Addresses provided the host O.S into the
2888250661Sdavidcs *	hardware (for the given interface)
2889250661Sdavidcs */
2890250661Sdavidcsint
2891250661Sdavidcsql_hw_set_multi(qla_host_t *ha, uint8_t *mcast, uint32_t mcnt,
2892250661Sdavidcs	uint32_t add_mac)
2893250661Sdavidcs{
2894250661Sdavidcs	int i;
2895250661Sdavidcs	uint8_t *mta = mcast;
2896250661Sdavidcs	int ret = 0;
2897250661Sdavidcs
2898250661Sdavidcs	for (i = 0; i < mcnt; i++) {
2899250661Sdavidcs		if (add_mac) {
2900250661Sdavidcs			ret = qla_hw_add_mcast(ha, mta);
2901250661Sdavidcs			if (ret)
2902250661Sdavidcs				break;
2903250661Sdavidcs		} else {
2904250661Sdavidcs			ret = qla_hw_del_mcast(ha, mta);
2905250661Sdavidcs			if (ret)
2906250661Sdavidcs				break;
2907250661Sdavidcs		}
2908250661Sdavidcs
2909250661Sdavidcs		mta += Q8_MAC_ADDR_LEN;
2910250661Sdavidcs	}
2911250661Sdavidcs	return (ret);
2912250661Sdavidcs}
2913250661Sdavidcs
2914250661Sdavidcs/*
2915250661Sdavidcs * Name: qla_hw_tx_done_locked
2916250661Sdavidcs * Function: Handle Transmit Completions
2917250661Sdavidcs */
2918250661Sdavidcsstatic void
2919250661Sdavidcsqla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
2920250661Sdavidcs{
2921250661Sdavidcs	qla_tx_buf_t *txb;
2922250661Sdavidcs        qla_hw_t *hw = &ha->hw;
2923250661Sdavidcs	uint32_t comp_idx, comp_count = 0;
2924250661Sdavidcs	qla_hw_tx_cntxt_t *hw_tx_cntxt;
2925250661Sdavidcs
2926250661Sdavidcs	hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
2927250661Sdavidcs
2928250661Sdavidcs	/* retrieve index of last entry in tx ring completed */
2929250661Sdavidcs	comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
2930250661Sdavidcs
2931250661Sdavidcs	while (comp_idx != hw_tx_cntxt->txr_comp) {
2932250661Sdavidcs
2933250661Sdavidcs		txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
2934250661Sdavidcs
2935250661Sdavidcs		hw_tx_cntxt->txr_comp++;
2936250661Sdavidcs		if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
2937250661Sdavidcs			hw_tx_cntxt->txr_comp = 0;
2938250661Sdavidcs
2939250661Sdavidcs		comp_count++;
2940250661Sdavidcs
2941250661Sdavidcs		if (txb->m_head) {
2942271849Sglebius			if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1);
2943250661Sdavidcs
2944250661Sdavidcs			bus_dmamap_sync(ha->tx_tag, txb->map,
2945250661Sdavidcs				BUS_DMASYNC_POSTWRITE);
2946250661Sdavidcs			bus_dmamap_unload(ha->tx_tag, txb->map);
2947250661Sdavidcs			m_freem(txb->m_head);
2948250661Sdavidcs
2949250661Sdavidcs			txb->m_head = NULL;
2950250661Sdavidcs		}
2951250661Sdavidcs	}
2952250661Sdavidcs
2953250661Sdavidcs	hw_tx_cntxt->txr_free += comp_count;
2954250661Sdavidcs	return;
2955250661Sdavidcs}
2956250661Sdavidcs
2957250661Sdavidcs/*
2958250661Sdavidcs * Name: ql_hw_tx_done
2959250661Sdavidcs * Function: Handle Transmit Completions
2960250661Sdavidcs */
2961250661Sdavidcsvoid
2962250661Sdavidcsql_hw_tx_done(qla_host_t *ha)
2963250661Sdavidcs{
2964250661Sdavidcs	int i;
2965250661Sdavidcs	uint32_t flag = 0;
2966250661Sdavidcs
2967250661Sdavidcs	if (!mtx_trylock(&ha->tx_lock)) {
2968250661Sdavidcs       		QL_DPRINT8(ha, (ha->pci_dev,
2969250661Sdavidcs			"%s: !mtx_trylock(&ha->tx_lock)\n", __func__));
2970250661Sdavidcs		return;
2971250661Sdavidcs	}
2972250661Sdavidcs	for (i = 0; i < ha->hw.num_tx_rings; i++) {
2973250661Sdavidcs		qla_hw_tx_done_locked(ha, i);
2974250661Sdavidcs		if (ha->hw.tx_cntxt[i].txr_free <= (NUM_TX_DESCRIPTORS >> 1))
2975250661Sdavidcs			flag = 1;
2976250661Sdavidcs	}
2977250661Sdavidcs
2978250661Sdavidcs	if (!flag)
2979250661Sdavidcs		ha->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2980250661Sdavidcs
2981250661Sdavidcs	QLA_TX_UNLOCK(ha);
2982250661Sdavidcs	return;
2983250661Sdavidcs}
2984250661Sdavidcs
2985250661Sdavidcsvoid
2986250661Sdavidcsql_update_link_state(qla_host_t *ha)
2987250661Sdavidcs{
2988250661Sdavidcs	uint32_t link_state;
2989250661Sdavidcs	uint32_t prev_link_state;
2990250661Sdavidcs
2991250661Sdavidcs	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2992250661Sdavidcs		ha->hw.link_up = 0;
2993250661Sdavidcs		return;
2994250661Sdavidcs	}
2995250661Sdavidcs	link_state = READ_REG32(ha, Q8_LINK_STATE);
2996250661Sdavidcs
2997250661Sdavidcs	prev_link_state =  ha->hw.link_up;
2998250661Sdavidcs
2999250661Sdavidcs	if (ha->pci_func == 0)
3000250661Sdavidcs		ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0);
3001250661Sdavidcs	else
3002250661Sdavidcs		ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
3003250661Sdavidcs
3004250661Sdavidcs	if (prev_link_state !=  ha->hw.link_up) {
3005250661Sdavidcs		if (ha->hw.link_up) {
3006250661Sdavidcs			if_link_state_change(ha->ifp, LINK_STATE_UP);
3007250661Sdavidcs		} else {
3008250661Sdavidcs			if_link_state_change(ha->ifp, LINK_STATE_DOWN);
3009250661Sdavidcs		}
3010250661Sdavidcs	}
3011250661Sdavidcs	return;
3012250661Sdavidcs}
3013250661Sdavidcs
3014250661Sdavidcsvoid
3015250661Sdavidcsql_hw_stop_rcv(qla_host_t *ha)
3016250661Sdavidcs{
3017250661Sdavidcs	int i, done, count = 100;
3018250661Sdavidcs
3019284741Sdavidcs	while (count) {
3020250661Sdavidcs		done = 1;
3021250661Sdavidcs		for (i = 0; i < ha->hw.num_sds_rings; i++) {
3022250661Sdavidcs			if (ha->hw.sds[i].rcv_active)
3023250661Sdavidcs				done = 0;
3024250661Sdavidcs		}
3025250661Sdavidcs		if (done)
3026250661Sdavidcs			break;
3027250661Sdavidcs		else
3028250661Sdavidcs			qla_mdelay(__func__, 10);
3029284741Sdavidcs		count--;
3030250661Sdavidcs	}
3031250661Sdavidcs	if (!count)
3032250661Sdavidcs		device_printf(ha->pci_dev, "%s: Counter expired.\n", __func__);
3033250661Sdavidcs
3034250661Sdavidcs	return;
3035250661Sdavidcs}
3036250661Sdavidcs
3037250661Sdavidcsint
3038250661Sdavidcsql_hw_check_health(qla_host_t *ha)
3039250661Sdavidcs{
3040250661Sdavidcs	uint32_t val;
3041250661Sdavidcs
3042250661Sdavidcs	ha->hw.health_count++;
3043250661Sdavidcs
3044250661Sdavidcs	if (ha->hw.health_count < 1000)
3045250661Sdavidcs		return 0;
3046250661Sdavidcs
3047250661Sdavidcs	ha->hw.health_count = 0;
3048250661Sdavidcs
3049250661Sdavidcs	val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
3050250661Sdavidcs
3051250661Sdavidcs	if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
3052250661Sdavidcs		(QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
3053250661Sdavidcs		device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n",
3054250661Sdavidcs			__func__, val);
3055250661Sdavidcs		return -1;
3056250661Sdavidcs	}
3057250661Sdavidcs
3058250661Sdavidcs	val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
3059250661Sdavidcs
3060250661Sdavidcs	if ((val != ha->hw.hbeat_value) &&
3061289635Sdavidcs		(!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) {
3062250661Sdavidcs		ha->hw.hbeat_value = val;
3063250661Sdavidcs		return 0;
3064250661Sdavidcs	}
3065250661Sdavidcs	device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n",
3066250661Sdavidcs		__func__, val);
3067250661Sdavidcs
3068250661Sdavidcs	return -1;
3069250661Sdavidcs}
3070250661Sdavidcs
3071250661Sdavidcsstatic int
3072284741Sdavidcsqla_init_nic_func(qla_host_t *ha)
3073284741Sdavidcs{
3074284741Sdavidcs        device_t                dev;
3075284741Sdavidcs        q80_init_nic_func_t     *init_nic;
3076284741Sdavidcs        q80_init_nic_func_rsp_t *init_nic_rsp;
3077284741Sdavidcs        uint32_t                err;
3078284741Sdavidcs
3079284741Sdavidcs        dev = ha->pci_dev;
3080284741Sdavidcs
3081284741Sdavidcs        init_nic = (q80_init_nic_func_t *)ha->hw.mbox;
3082284741Sdavidcs        bzero(init_nic, sizeof(q80_init_nic_func_t));
3083284741Sdavidcs
3084284741Sdavidcs        init_nic->opcode = Q8_MBX_INIT_NIC_FUNC;
3085284741Sdavidcs        init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2);
3086284741Sdavidcs        init_nic->count_version |= Q8_MBX_CMD_VERSION;
3087284741Sdavidcs
3088284741Sdavidcs        init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN;
3089284741Sdavidcs        init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN;
3090284741Sdavidcs        init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN;
3091284741Sdavidcs
3092284741Sdavidcs//qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t));
3093284741Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)init_nic,
3094284741Sdavidcs                (sizeof (q80_init_nic_func_t) >> 2),
3095284741Sdavidcs                ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) {
3096284741Sdavidcs                device_printf(dev, "%s: failed\n", __func__);
3097284741Sdavidcs                return -1;
3098284741Sdavidcs        }
3099284741Sdavidcs
3100284741Sdavidcs        init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox;
3101284741Sdavidcs// qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t));
3102284741Sdavidcs
3103284741Sdavidcs        err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status);
3104284741Sdavidcs
3105284741Sdavidcs        if (err) {
3106284741Sdavidcs                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3107284741Sdavidcs        }
3108284741Sdavidcs
3109284741Sdavidcs        return 0;
3110284741Sdavidcs}
3111284741Sdavidcs
3112284741Sdavidcsstatic int
3113284741Sdavidcsqla_stop_nic_func(qla_host_t *ha)
3114284741Sdavidcs{
3115284741Sdavidcs        device_t                dev;
3116284741Sdavidcs        q80_stop_nic_func_t     *stop_nic;
3117284741Sdavidcs        q80_stop_nic_func_rsp_t *stop_nic_rsp;
3118284741Sdavidcs        uint32_t                err;
3119284741Sdavidcs
3120284741Sdavidcs        dev = ha->pci_dev;
3121284741Sdavidcs
3122284741Sdavidcs        stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox;
3123284741Sdavidcs        bzero(stop_nic, sizeof(q80_stop_nic_func_t));
3124284741Sdavidcs
3125284741Sdavidcs        stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC;
3126284741Sdavidcs        stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2);
3127284741Sdavidcs        stop_nic->count_version |= Q8_MBX_CMD_VERSION;
3128284741Sdavidcs
3129284741Sdavidcs        stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN;
3130284741Sdavidcs        stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN;
3131284741Sdavidcs
3132284741Sdavidcs//qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t));
3133284741Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)stop_nic,
3134284741Sdavidcs                (sizeof (q80_stop_nic_func_t) >> 2),
3135284741Sdavidcs                ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) {
3136284741Sdavidcs                device_printf(dev, "%s: failed\n", __func__);
3137284741Sdavidcs                return -1;
3138284741Sdavidcs        }
3139284741Sdavidcs
3140284741Sdavidcs        stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox;
3141284741Sdavidcs//qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t));
3142284741Sdavidcs
3143284741Sdavidcs        err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status);
3144284741Sdavidcs
3145284741Sdavidcs        if (err) {
3146284741Sdavidcs                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3147284741Sdavidcs        }
3148284741Sdavidcs
3149284741Sdavidcs        return 0;
3150284741Sdavidcs}
3151284741Sdavidcs
3152284741Sdavidcsstatic int
3153284741Sdavidcsqla_query_fw_dcbx_caps(qla_host_t *ha)
3154284741Sdavidcs{
3155284741Sdavidcs        device_t                        dev;
3156284741Sdavidcs        q80_query_fw_dcbx_caps_t        *fw_dcbx;
3157284741Sdavidcs        q80_query_fw_dcbx_caps_rsp_t    *fw_dcbx_rsp;
3158284741Sdavidcs        uint32_t                        err;
3159284741Sdavidcs
3160284741Sdavidcs        dev = ha->pci_dev;
3161284741Sdavidcs
3162284741Sdavidcs        fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox;
3163284741Sdavidcs        bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t));
3164284741Sdavidcs
3165284741Sdavidcs        fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS;
3166284741Sdavidcs        fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2);
3167284741Sdavidcs        fw_dcbx->count_version |= Q8_MBX_CMD_VERSION;
3168284741Sdavidcs
3169284741Sdavidcs        ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t));
3170284741Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx,
3171284741Sdavidcs                (sizeof (q80_query_fw_dcbx_caps_t) >> 2),
3172284741Sdavidcs                ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) {
3173284741Sdavidcs                device_printf(dev, "%s: failed\n", __func__);
3174284741Sdavidcs                return -1;
3175284741Sdavidcs        }
3176284741Sdavidcs
3177284741Sdavidcs        fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox;
3178284741Sdavidcs        ql_dump_buf8(ha, __func__, fw_dcbx_rsp,
3179284741Sdavidcs                sizeof (q80_query_fw_dcbx_caps_rsp_t));
3180284741Sdavidcs
3181284741Sdavidcs        err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status);
3182284741Sdavidcs
3183284741Sdavidcs        if (err) {
3184284741Sdavidcs                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3185284741Sdavidcs        }
3186284741Sdavidcs
3187284741Sdavidcs        return 0;
3188284741Sdavidcs}
3189284741Sdavidcs
3190284741Sdavidcsstatic int
3191284741Sdavidcsqla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2,
3192284741Sdavidcs        uint32_t aen_mb3, uint32_t aen_mb4)
3193284741Sdavidcs{
3194284741Sdavidcs        device_t                dev;
3195284741Sdavidcs        q80_idc_ack_t           *idc_ack;
3196284741Sdavidcs        q80_idc_ack_rsp_t       *idc_ack_rsp;
3197284741Sdavidcs        uint32_t                err;
3198284741Sdavidcs        int                     count = 300;
3199284741Sdavidcs
3200284741Sdavidcs        dev = ha->pci_dev;
3201284741Sdavidcs
3202284741Sdavidcs        idc_ack = (q80_idc_ack_t *)ha->hw.mbox;
3203284741Sdavidcs        bzero(idc_ack, sizeof(q80_idc_ack_t));
3204284741Sdavidcs
3205284741Sdavidcs        idc_ack->opcode = Q8_MBX_IDC_ACK;
3206284741Sdavidcs        idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2);
3207284741Sdavidcs        idc_ack->count_version |= Q8_MBX_CMD_VERSION;
3208284741Sdavidcs
3209284741Sdavidcs        idc_ack->aen_mb1 = aen_mb1;
3210284741Sdavidcs        idc_ack->aen_mb2 = aen_mb2;
3211284741Sdavidcs        idc_ack->aen_mb3 = aen_mb3;
3212284741Sdavidcs        idc_ack->aen_mb4 = aen_mb4;
3213284741Sdavidcs
3214284741Sdavidcs        ha->hw.imd_compl= 0;
3215284741Sdavidcs
3216284741Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)idc_ack,
3217284741Sdavidcs                (sizeof (q80_idc_ack_t) >> 2),
3218284741Sdavidcs                ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) {
3219284741Sdavidcs                device_printf(dev, "%s: failed\n", __func__);
3220284741Sdavidcs                return -1;
3221284741Sdavidcs        }
3222284741Sdavidcs
3223284741Sdavidcs        idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox;
3224284741Sdavidcs
3225284741Sdavidcs        err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status);
3226284741Sdavidcs
3227284741Sdavidcs        if (err) {
3228284741Sdavidcs                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3229284741Sdavidcs                return(-1);
3230284741Sdavidcs        }
3231284741Sdavidcs
3232284741Sdavidcs        while (count && !ha->hw.imd_compl) {
3233284741Sdavidcs                qla_mdelay(__func__, 100);
3234284741Sdavidcs                count--;
3235284741Sdavidcs        }
3236284741Sdavidcs
3237284741Sdavidcs        if (!count)
3238284741Sdavidcs                return -1;
3239284741Sdavidcs        else
3240284741Sdavidcs                device_printf(dev, "%s: count %d\n", __func__, count);
3241284741Sdavidcs
3242284741Sdavidcs        return (0);
3243284741Sdavidcs}
3244284741Sdavidcs
3245284741Sdavidcsstatic int
3246284741Sdavidcsqla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
3247284741Sdavidcs{
3248284741Sdavidcs        device_t                dev;
3249284741Sdavidcs        q80_set_port_cfg_t      *pcfg;
3250284741Sdavidcs        q80_set_port_cfg_rsp_t  *pfg_rsp;
3251284741Sdavidcs        uint32_t                err;
3252284741Sdavidcs        int                     count = 300;
3253284741Sdavidcs
3254284741Sdavidcs        dev = ha->pci_dev;
3255284741Sdavidcs
3256284741Sdavidcs        pcfg = (q80_set_port_cfg_t *)ha->hw.mbox;
3257284741Sdavidcs        bzero(pcfg, sizeof(q80_set_port_cfg_t));
3258284741Sdavidcs
3259284741Sdavidcs        pcfg->opcode = Q8_MBX_SET_PORT_CONFIG;
3260284741Sdavidcs        pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2);
3261284741Sdavidcs        pcfg->count_version |= Q8_MBX_CMD_VERSION;
3262284741Sdavidcs
3263284741Sdavidcs        pcfg->cfg_bits = cfg_bits;
3264284741Sdavidcs
3265284741Sdavidcs        device_printf(dev, "%s: cfg_bits"
3266284741Sdavidcs                " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3267284741Sdavidcs                " [0x%x, 0x%x, 0x%x]\n", __func__,
3268284741Sdavidcs                ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3269284741Sdavidcs                ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3270284741Sdavidcs                ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0));
3271284741Sdavidcs
3272284741Sdavidcs        ha->hw.imd_compl= 0;
3273284741Sdavidcs
3274284741Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3275284741Sdavidcs                (sizeof (q80_set_port_cfg_t) >> 2),
3276284741Sdavidcs                ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) {
3277284741Sdavidcs                device_printf(dev, "%s: failed\n", __func__);
3278284741Sdavidcs                return -1;
3279284741Sdavidcs        }
3280284741Sdavidcs
3281284741Sdavidcs        pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox;
3282284741Sdavidcs
3283284741Sdavidcs        err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status);
3284284741Sdavidcs
3285284741Sdavidcs        if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) {
3286284741Sdavidcs                while (count && !ha->hw.imd_compl) {
3287284741Sdavidcs                        qla_mdelay(__func__, 100);
3288284741Sdavidcs                        count--;
3289284741Sdavidcs                }
3290284741Sdavidcs                if (count) {
3291284741Sdavidcs                        device_printf(dev, "%s: count %d\n", __func__, count);
3292284741Sdavidcs
3293284741Sdavidcs                        err = 0;
3294284741Sdavidcs                }
3295284741Sdavidcs        }
3296284741Sdavidcs
3297284741Sdavidcs        if (err) {
3298284741Sdavidcs                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3299284741Sdavidcs                return(-1);
3300284741Sdavidcs        }
3301284741Sdavidcs
3302284741Sdavidcs        return (0);
3303284741Sdavidcs}
3304284741Sdavidcs
3305284741Sdavidcs
3306284741Sdavidcsstatic int
3307250661Sdavidcsqla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
3308250661Sdavidcs{
3309250661Sdavidcs	uint32_t			err;
3310250661Sdavidcs	device_t			dev = ha->pci_dev;
3311250661Sdavidcs	q80_config_md_templ_size_t	*md_size;
3312250661Sdavidcs	q80_config_md_templ_size_rsp_t	*md_size_rsp;
3313250661Sdavidcs
3314284741Sdavidcs#ifdef QL_LDFLASH_FW
3315284741Sdavidcs
3316284741Sdavidcs	*size = ql83xx_minidump_len;
3317284741Sdavidcs	return (0);
3318284741Sdavidcs
3319284741Sdavidcs#endif /* #ifdef QL_LDFLASH_FW */
3320284741Sdavidcs
3321250661Sdavidcs	md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
3322250661Sdavidcs	bzero(md_size, sizeof(q80_config_md_templ_size_t));
3323250661Sdavidcs
3324250661Sdavidcs	md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
3325250661Sdavidcs	md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
3326250661Sdavidcs	md_size->count_version |= Q8_MBX_CMD_VERSION;
3327250661Sdavidcs
3328250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *) md_size,
3329250661Sdavidcs		(sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
3330250661Sdavidcs		(sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
3331250661Sdavidcs
3332250661Sdavidcs		device_printf(dev, "%s: failed\n", __func__);
3333250661Sdavidcs
3334250661Sdavidcs		return (-1);
3335250661Sdavidcs	}
3336250661Sdavidcs
3337250661Sdavidcs	md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
3338250661Sdavidcs
3339250661Sdavidcs	err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
3340250661Sdavidcs
3341250661Sdavidcs        if (err) {
3342250661Sdavidcs		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3343250661Sdavidcs		return(-1);
3344250661Sdavidcs        }
3345250661Sdavidcs
3346250661Sdavidcs	*size = md_size_rsp->templ_size;
3347250661Sdavidcs
3348250661Sdavidcs	return (0);
3349250661Sdavidcs}
3350250661Sdavidcs
3351250661Sdavidcsstatic int
3352284741Sdavidcsqla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
3353284741Sdavidcs{
3354284741Sdavidcs        device_t                dev;
3355284741Sdavidcs        q80_get_port_cfg_t      *pcfg;
3356284741Sdavidcs        q80_get_port_cfg_rsp_t  *pcfg_rsp;
3357284741Sdavidcs        uint32_t                err;
3358284741Sdavidcs
3359284741Sdavidcs        dev = ha->pci_dev;
3360284741Sdavidcs
3361284741Sdavidcs        pcfg = (q80_get_port_cfg_t *)ha->hw.mbox;
3362284741Sdavidcs        bzero(pcfg, sizeof(q80_get_port_cfg_t));
3363284741Sdavidcs
3364284741Sdavidcs        pcfg->opcode = Q8_MBX_GET_PORT_CONFIG;
3365284741Sdavidcs        pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2);
3366284741Sdavidcs        pcfg->count_version |= Q8_MBX_CMD_VERSION;
3367284741Sdavidcs
3368284741Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3369284741Sdavidcs                (sizeof (q80_get_port_cfg_t) >> 2),
3370284741Sdavidcs                ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) {
3371284741Sdavidcs                device_printf(dev, "%s: failed\n", __func__);
3372284741Sdavidcs                return -1;
3373284741Sdavidcs        }
3374284741Sdavidcs
3375284741Sdavidcs        pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox;
3376284741Sdavidcs
3377284741Sdavidcs        err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status);
3378284741Sdavidcs
3379284741Sdavidcs        if (err) {
3380284741Sdavidcs                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3381284741Sdavidcs                return(-1);
3382284741Sdavidcs        }
3383284741Sdavidcs
3384284741Sdavidcs        device_printf(dev, "%s: [cfg_bits, port type]"
3385284741Sdavidcs                " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3386284741Sdavidcs                " [0x%x, 0x%x, 0x%x]\n", __func__,
3387284741Sdavidcs                pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type,
3388284741Sdavidcs                ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3389284741Sdavidcs                ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3390284741Sdavidcs                ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)
3391284741Sdavidcs                );
3392284741Sdavidcs
3393284741Sdavidcs        *cfg_bits = pcfg_rsp->cfg_bits;
3394284741Sdavidcs
3395284741Sdavidcs        return (0);
3396284741Sdavidcs}
3397284741Sdavidcs
3398284741Sdavidcsint
3399284741Sdavidcsqla_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
3400284741Sdavidcs{
3401284741Sdavidcs        struct ether_vlan_header        *eh;
3402284741Sdavidcs        uint16_t                        etype;
3403284741Sdavidcs        struct ip                       *ip = NULL;
3404284741Sdavidcs        struct ip6_hdr                  *ip6 = NULL;
3405284741Sdavidcs        struct tcphdr                   *th = NULL;
3406284741Sdavidcs        uint32_t                        hdrlen;
3407284741Sdavidcs        uint32_t                        offset;
3408284741Sdavidcs        uint8_t                         buf[sizeof(struct ip6_hdr)];
3409284741Sdavidcs
3410284741Sdavidcs        eh = mtod(mp, struct ether_vlan_header *);
3411284741Sdavidcs
3412284741Sdavidcs        if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3413284741Sdavidcs                hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3414284741Sdavidcs                etype = ntohs(eh->evl_proto);
3415284741Sdavidcs        } else {
3416284741Sdavidcs                hdrlen = ETHER_HDR_LEN;
3417284741Sdavidcs                etype = ntohs(eh->evl_encap_proto);
3418284741Sdavidcs        }
3419284741Sdavidcs
3420284741Sdavidcs	if (etype == ETHERTYPE_IP) {
3421284741Sdavidcs
3422284741Sdavidcs		offset = (hdrlen + sizeof (struct ip));
3423284741Sdavidcs
3424284741Sdavidcs		if (mp->m_len >= offset) {
3425284741Sdavidcs                        ip = (struct ip *)(mp->m_data + hdrlen);
3426284741Sdavidcs		} else {
3427284741Sdavidcs			m_copydata(mp, hdrlen, sizeof (struct ip), buf);
3428284741Sdavidcs                        ip = (struct ip *)buf;
3429284741Sdavidcs		}
3430284741Sdavidcs
3431284741Sdavidcs                if (ip->ip_p == IPPROTO_TCP) {
3432284741Sdavidcs
3433284741Sdavidcs			hdrlen += ip->ip_hl << 2;
3434284741Sdavidcs			offset = hdrlen + 4;
3435284741Sdavidcs
3436284741Sdavidcs			if (mp->m_len >= offset) {
3437297793Spfg				th = (struct tcphdr *)(mp->m_data + hdrlen);
3438284741Sdavidcs			} else {
3439284741Sdavidcs                                m_copydata(mp, hdrlen, 4, buf);
3440284741Sdavidcs				th = (struct tcphdr *)buf;
3441284741Sdavidcs			}
3442284741Sdavidcs                }
3443284741Sdavidcs
3444284741Sdavidcs	} else if (etype == ETHERTYPE_IPV6) {
3445284741Sdavidcs
3446284741Sdavidcs		offset = (hdrlen + sizeof (struct ip6_hdr));
3447284741Sdavidcs
3448284741Sdavidcs		if (mp->m_len >= offset) {
3449284741Sdavidcs                        ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen);
3450284741Sdavidcs		} else {
3451284741Sdavidcs                        m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf);
3452284741Sdavidcs                        ip6 = (struct ip6_hdr *)buf;
3453284741Sdavidcs		}
3454284741Sdavidcs
3455284741Sdavidcs                if (ip6->ip6_nxt == IPPROTO_TCP) {
3456284741Sdavidcs
3457284741Sdavidcs			hdrlen += sizeof(struct ip6_hdr);
3458284741Sdavidcs			offset = hdrlen + 4;
3459284741Sdavidcs
3460284741Sdavidcs			if (mp->m_len >= offset) {
3461297793Spfg				th = (struct tcphdr *)(mp->m_data + hdrlen);
3462284741Sdavidcs			} else {
3463284741Sdavidcs				m_copydata(mp, hdrlen, 4, buf);
3464284741Sdavidcs				th = (struct tcphdr *)buf;
3465284741Sdavidcs			}
3466284741Sdavidcs                }
3467284741Sdavidcs	}
3468284741Sdavidcs
3469284741Sdavidcs        if (th != NULL) {
3470284741Sdavidcs                if ((th->th_sport == htons(3260)) ||
3471284741Sdavidcs                        (th->th_dport == htons(3260)))
3472284741Sdavidcs                        return 0;
3473284741Sdavidcs        }
3474284741Sdavidcs        return (-1);
3475284741Sdavidcs}
3476284741Sdavidcs
3477284741Sdavidcsvoid
3478284741Sdavidcsqla_hw_async_event(qla_host_t *ha)
3479284741Sdavidcs{
3480284741Sdavidcs        switch (ha->hw.aen_mb0) {
3481284741Sdavidcs        case 0x8101:
3482284741Sdavidcs                (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2,
3483284741Sdavidcs                        ha->hw.aen_mb3, ha->hw.aen_mb4);
3484284741Sdavidcs
3485284741Sdavidcs                break;
3486284741Sdavidcs
3487284741Sdavidcs        default:
3488284741Sdavidcs                break;
3489284741Sdavidcs        }
3490284741Sdavidcs
3491284741Sdavidcs        return;
3492284741Sdavidcs}
3493284741Sdavidcs
3494284741Sdavidcs#ifdef QL_LDFLASH_FW
3495284741Sdavidcsstatic int
3496250661Sdavidcsqla_get_minidump_template(qla_host_t *ha)
3497250661Sdavidcs{
3498250661Sdavidcs	uint32_t			err;
3499250661Sdavidcs	device_t			dev = ha->pci_dev;
3500250661Sdavidcs	q80_config_md_templ_cmd_t	*md_templ;
3501250661Sdavidcs	q80_config_md_templ_cmd_rsp_t	*md_templ_rsp;
3502250661Sdavidcs
3503250661Sdavidcs	md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
3504250661Sdavidcs	bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
3505250661Sdavidcs
3506250661Sdavidcs	md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
3507250661Sdavidcs	md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
3508250661Sdavidcs	md_templ->count_version |= Q8_MBX_CMD_VERSION;
3509250661Sdavidcs
3510250661Sdavidcs	md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
3511250661Sdavidcs	md_templ->buff_size = ha->hw.dma_buf.minidump.size;
3512250661Sdavidcs
3513250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
3514250661Sdavidcs		(sizeof(q80_config_md_templ_cmd_t) >> 2),
3515250661Sdavidcs		 ha->hw.mbox,
3516250661Sdavidcs		(sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
3517250661Sdavidcs
3518250661Sdavidcs		device_printf(dev, "%s: failed\n", __func__);
3519250661Sdavidcs
3520250661Sdavidcs		return (-1);
3521250661Sdavidcs	}
3522250661Sdavidcs
3523250661Sdavidcs	md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
3524250661Sdavidcs
3525250661Sdavidcs	err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
3526250661Sdavidcs
3527250661Sdavidcs	if (err) {
3528250661Sdavidcs		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3529250661Sdavidcs		return (-1);
3530250661Sdavidcs	}
3531250661Sdavidcs
3532250661Sdavidcs	return (0);
3533250661Sdavidcs
3534250661Sdavidcs}
3535284741Sdavidcs#endif /* #ifdef QL_LDFLASH_FW */
3536250661Sdavidcs
3537250661Sdavidcsstatic int
3538250661Sdavidcsqla_minidump_init(qla_host_t *ha)
3539250661Sdavidcs{
3540284741Sdavidcs	int		ret = 0;
3541250661Sdavidcs	uint32_t	template_size = 0;
3542250661Sdavidcs	device_t	dev = ha->pci_dev;
3543250661Sdavidcs
3544250661Sdavidcs	/*
3545250661Sdavidcs	 * Get Minidump Template Size
3546250661Sdavidcs 	 */
3547250661Sdavidcs	ret = qla_get_minidump_tmplt_size(ha, &template_size);
3548250661Sdavidcs
3549250661Sdavidcs	if (ret || (template_size == 0)) {
3550250661Sdavidcs		device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
3551250661Sdavidcs			template_size);
3552250661Sdavidcs		return (-1);
3553250661Sdavidcs	}
3554250661Sdavidcs
3555250661Sdavidcs	/*
3556250661Sdavidcs	 * Allocate Memory for Minidump Template
3557250661Sdavidcs	 */
3558250661Sdavidcs
3559250661Sdavidcs	ha->hw.dma_buf.minidump.alignment = 8;
3560250661Sdavidcs	ha->hw.dma_buf.minidump.size = template_size;
3561250661Sdavidcs
3562284741Sdavidcs#ifdef QL_LDFLASH_FW
3563250661Sdavidcs	if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
3564250661Sdavidcs
3565250661Sdavidcs		device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
3566250661Sdavidcs
3567250661Sdavidcs		return (-1);
3568250661Sdavidcs	}
3569250661Sdavidcs	ha->hw.dma_buf.flags.minidump = 1;
3570250661Sdavidcs
3571250661Sdavidcs	/*
3572250661Sdavidcs	 * Retrieve Minidump Template
3573250661Sdavidcs	 */
3574250661Sdavidcs	ret = qla_get_minidump_template(ha);
3575284741Sdavidcs#else
3576284741Sdavidcs	ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump;
3577284741Sdavidcs#endif /* #ifdef QL_LDFLASH_FW */
3578250661Sdavidcs
3579250661Sdavidcs	if (ret) {
3580250661Sdavidcs		qla_minidump_free(ha);
3581250661Sdavidcs	} else {
3582250661Sdavidcs		ha->hw.mdump_init = 1;
3583250661Sdavidcs	}
3584250661Sdavidcs
3585250661Sdavidcs	return (ret);
3586250661Sdavidcs}
3587250661Sdavidcs
3588250661Sdavidcs
3589250661Sdavidcsstatic void
3590250661Sdavidcsqla_minidump_free(qla_host_t *ha)
3591250661Sdavidcs{
3592250661Sdavidcs	ha->hw.mdump_init = 0;
3593250661Sdavidcs	if (ha->hw.dma_buf.flags.minidump) {
3594250661Sdavidcs		ha->hw.dma_buf.flags.minidump = 0;
3595250661Sdavidcs		ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
3596250661Sdavidcs	}
3597250661Sdavidcs	return;
3598250661Sdavidcs}
3599250661Sdavidcs
3600250661Sdavidcsvoid
3601250661Sdavidcsql_minidump(qla_host_t *ha)
3602250661Sdavidcs{
3603250661Sdavidcs	uint32_t delay = 6000;
3604250661Sdavidcs
3605250661Sdavidcs	if (!ha->hw.mdump_init)
3606250661Sdavidcs		return;
3607250661Sdavidcs
3608250661Sdavidcs	if (!ha->hw.mdump_active)
3609250661Sdavidcs		return;
3610250661Sdavidcs
3611250661Sdavidcs	if (ha->hw.mdump_active == 1) {
3612250661Sdavidcs		ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
3613250661Sdavidcs		ha->hw.mdump_start = 1;
3614250661Sdavidcs	}
3615250661Sdavidcs
3616250661Sdavidcs	while (delay-- && ha->hw.mdump_active) {
3617250661Sdavidcs		qla_mdelay(__func__, 100);
3618250661Sdavidcs	}
3619250661Sdavidcs	ha->hw.mdump_start = 0;
3620250661Sdavidcs	ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
3621250661Sdavidcs
3622250661Sdavidcs	return;
3623250661Sdavidcs}
3624