1250661Sdavidcs/*
2250661Sdavidcs * Copyright (c) 2013-2014 Qlogic Corporation
3250661Sdavidcs * All rights reserved.
4250661Sdavidcs *
5250661Sdavidcs *  Redistribution and use in source and binary forms, with or without
6250661Sdavidcs *  modification, are permitted provided that the following conditions
7250661Sdavidcs *  are met:
8250661Sdavidcs *
9250661Sdavidcs *  1. Redistributions of source code must retain the above copyright
10250661Sdavidcs *     notice, this list of conditions and the following disclaimer.
11250661Sdavidcs *  2. Redistributions in binary form must reproduce the above copyright
12250661Sdavidcs *     notice, this list of conditions and the following disclaimer in the
13250661Sdavidcs *     documentation and/or other materials provided with the distribution.
14250661Sdavidcs *
15250661Sdavidcs *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16250661Sdavidcs *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17250661Sdavidcs *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18250661Sdavidcs *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19250661Sdavidcs *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20250661Sdavidcs *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21250661Sdavidcs *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22250661Sdavidcs *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23250661Sdavidcs *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24250661Sdavidcs *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25250661Sdavidcs *  POSSIBILITY OF SUCH DAMAGE.
26250661Sdavidcs */
27250661Sdavidcs
28250661Sdavidcs/*
29250661Sdavidcs * File: ql_hw.c
30250661Sdavidcs * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31250661Sdavidcs * Content: Contains Hardware dependant functions
32250661Sdavidcs */
33250661Sdavidcs
34250661Sdavidcs#include <sys/cdefs.h>
35250661Sdavidcs__FBSDID("$FreeBSD$");
36250661Sdavidcs
37250661Sdavidcs#include "ql_os.h"
38250661Sdavidcs#include "ql_hw.h"
39250661Sdavidcs#include "ql_def.h"
40250661Sdavidcs#include "ql_inline.h"
41250661Sdavidcs#include "ql_ver.h"
42250661Sdavidcs#include "ql_glbl.h"
43250661Sdavidcs#include "ql_dbg.h"
44250661Sdavidcs
45250661Sdavidcs/*
46250661Sdavidcs * Static Functions
47250661Sdavidcs */
48250661Sdavidcs
49250661Sdavidcsstatic void qla_del_rcv_cntxt(qla_host_t *ha);
50250661Sdavidcsstatic int qla_init_rcv_cntxt(qla_host_t *ha);
51250661Sdavidcsstatic void qla_del_xmt_cntxt(qla_host_t *ha);
52250661Sdavidcsstatic int qla_init_xmt_cntxt(qla_host_t *ha);
53250661Sdavidcsstatic void qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx);
54250661Sdavidcsstatic int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
55250661Sdavidcs	uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
56250661Sdavidcsstatic int qla_config_intr_cntxt(qla_host_t *ha, uint32_t num_intrs,
57250661Sdavidcs	uint32_t create);
58250661Sdavidcsstatic int qla_get_nic_partition(qla_host_t *ha);
59250661Sdavidcsstatic int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
60250661Sdavidcsstatic int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
61250661Sdavidcs	int tenable);
62250661Sdavidcsstatic int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
63250661Sdavidcsstatic int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
64250661Sdavidcs
65250661Sdavidcsstatic int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
66250661Sdavidcs		uint8_t *hdr);
67250661Sdavidcsstatic int qla_hw_add_all_mcast(qla_host_t *ha);
68250661Sdavidcsstatic int qla_hw_del_all_mcast(qla_host_t *ha);
69250661Sdavidcsstatic int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx);
70250661Sdavidcs
71250661Sdavidcsstatic int qla_minidump_init(qla_host_t *ha);
72250661Sdavidcsstatic void qla_minidump_free(qla_host_t *ha);
73250661Sdavidcs
74250661Sdavidcs
75250661Sdavidcsstatic int
76250661Sdavidcsqla_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
77250661Sdavidcs{
78250661Sdavidcs        int err = 0, ret;
79250661Sdavidcs        qla_host_t *ha;
80250661Sdavidcs	uint32_t i;
81250661Sdavidcs
82250661Sdavidcs        err = sysctl_handle_int(oidp, &ret, 0, req);
83250661Sdavidcs
84250661Sdavidcs        if (err || !req->newptr)
85250661Sdavidcs                return (err);
86250661Sdavidcs
87250661Sdavidcs        if (ret == 1) {
88250661Sdavidcs
89250661Sdavidcs                ha = (qla_host_t *)arg1;
90250661Sdavidcs
91250661Sdavidcs		for (i = 0; i < ha->hw.num_sds_rings; i++)
92250661Sdavidcs			device_printf(ha->pci_dev,
93250661Sdavidcs				"%s: sds_ring[%d] = %p\n", __func__,i,
94250661Sdavidcs				(void *)ha->hw.sds[i].intr_count);
95250661Sdavidcs
96250661Sdavidcs		for (i = 0; i < ha->hw.num_tx_rings; i++)
97250661Sdavidcs			device_printf(ha->pci_dev,
98250661Sdavidcs				"%s: tx[%d] = %p\n", __func__,i,
99250661Sdavidcs				(void *)ha->tx_ring[i].count);
100250661Sdavidcs
101250661Sdavidcs		for (i = 0; i < ha->hw.num_rds_rings; i++)
102250661Sdavidcs			device_printf(ha->pci_dev,
103250661Sdavidcs				"%s: rds_ring[%d] = %p\n", __func__,i,
104250661Sdavidcs				(void *)ha->hw.rds[i].count);
105250661Sdavidcs
106250661Sdavidcs		device_printf(ha->pci_dev, "%s: lro_pkt_count = %p\n", __func__,
107250661Sdavidcs			(void *)ha->lro_pkt_count);
108250661Sdavidcs
109250661Sdavidcs		device_printf(ha->pci_dev, "%s: lro_bytes = %p\n", __func__,
110250661Sdavidcs			(void *)ha->lro_bytes);
111250661Sdavidcs	}
112250661Sdavidcs	return (err);
113250661Sdavidcs}
114250661Sdavidcs
115250661Sdavidcs#ifdef QL_DBG
116250661Sdavidcs
117250661Sdavidcsstatic void
118250661Sdavidcsqla_stop_pegs(qla_host_t *ha)
119250661Sdavidcs{
120250661Sdavidcs        uint32_t val = 1;
121250661Sdavidcs
122250661Sdavidcs        ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
123250661Sdavidcs        ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
124250661Sdavidcs        ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
125250661Sdavidcs        ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
126250661Sdavidcs        ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
127250661Sdavidcs        device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
128250661Sdavidcs}
129250661Sdavidcs
130250661Sdavidcsstatic int
131250661Sdavidcsqla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
132250661Sdavidcs{
133250661Sdavidcs	int err, ret = 0;
134250661Sdavidcs	qla_host_t *ha;
135250661Sdavidcs
136250661Sdavidcs	err = sysctl_handle_int(oidp, &ret, 0, req);
137250661Sdavidcs
138250661Sdavidcs
139250661Sdavidcs	if (err || !req->newptr)
140250661Sdavidcs		return (err);
141250661Sdavidcs
142250661Sdavidcs	if (ret == 1) {
143250661Sdavidcs		ha = (qla_host_t *)arg1;
144250661Sdavidcs		(void)QLA_LOCK(ha, __func__, 0);
145250661Sdavidcs		qla_stop_pegs(ha);
146250661Sdavidcs		QLA_UNLOCK(ha, __func__);
147250661Sdavidcs	}
148250661Sdavidcs
149250661Sdavidcs	return err;
150250661Sdavidcs}
151250661Sdavidcs#endif /* #ifdef QL_DBG */
152250661Sdavidcs
153250661Sdavidcs/*
154250661Sdavidcs * Name: ql_hw_add_sysctls
155250661Sdavidcs * Function: Add P3Plus specific sysctls
156250661Sdavidcs */
157250661Sdavidcsvoid
158250661Sdavidcsql_hw_add_sysctls(qla_host_t *ha)
159250661Sdavidcs{
160250661Sdavidcs        device_t	dev;
161250661Sdavidcs
162250661Sdavidcs        dev = ha->pci_dev;
163250661Sdavidcs
164250661Sdavidcs	ha->hw.num_sds_rings = MAX_SDS_RINGS;
165250661Sdavidcs	ha->hw.num_rds_rings = MAX_RDS_RINGS;
166250661Sdavidcs	ha->hw.num_tx_rings = NUM_TX_RINGS;
167250661Sdavidcs
168250661Sdavidcs	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
169250661Sdavidcs		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
170250661Sdavidcs		OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
171250661Sdavidcs		ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
172250661Sdavidcs
173250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
174250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
175250661Sdavidcs                OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
176250661Sdavidcs		ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
177250661Sdavidcs
178250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
179250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
180250661Sdavidcs                OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
181250661Sdavidcs		ha->hw.num_tx_rings, "Number of Transmit Rings");
182250661Sdavidcs
183250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
184250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
185250661Sdavidcs                OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
186250661Sdavidcs		ha->txr_idx, "Tx Ring Used");
187250661Sdavidcs
188250661Sdavidcs	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
189250661Sdavidcs		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
190250661Sdavidcs		OID_AUTO, "drvr_stats", CTLTYPE_INT | CTLFLAG_RW,
191250661Sdavidcs		(void *)ha, 0,
192250661Sdavidcs		qla_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics");
193250661Sdavidcs
194250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
195250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
196250661Sdavidcs                OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
197250661Sdavidcs		ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
198250661Sdavidcs
199250661Sdavidcs	ha->hw.sds_cidx_thres = 32;
200250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
201250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
202250661Sdavidcs                OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
203250661Sdavidcs		ha->hw.sds_cidx_thres,
204250661Sdavidcs		"Number of SDS entries to process before updating"
205250661Sdavidcs		" SDS Ring Consumer Index");
206250661Sdavidcs
207250661Sdavidcs	ha->hw.rds_pidx_thres = 32;
208250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
209250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
210250661Sdavidcs                OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
211250661Sdavidcs		ha->hw.rds_pidx_thres,
212250661Sdavidcs		"Number of Rcv Rings Entries to post before updating"
213250661Sdavidcs		" RDS Ring Producer Index");
214250661Sdavidcs
215258898Sdavidcs	ha->hw.min_lro_pkt_size = 512;
216258898Sdavidcs	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
217258898Sdavidcs		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
218258898Sdavidcs		OID_AUTO, "min_lro_pkt_size", CTLFLAG_RD, &ha->hw.min_lro_pkt_size,
219258898Sdavidcs		ha->hw.min_lro_pkt_size, "minimum packet size to trigger lro");
220258898Sdavidcs
221250661Sdavidcs	ha->hw.mdump_active = 0;
222250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
223250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
224250661Sdavidcs                OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
225250661Sdavidcs		ha->hw.mdump_active,
226250661Sdavidcs		"Minidump Utility is Active \n"
227250661Sdavidcs		"\t 0 = Minidump Utility is not active\n"
228250661Sdavidcs		"\t 1 = Minidump Utility is retrieved on this port\n"
229250661Sdavidcs		"\t 2 = Minidump Utility is retrieved on the other port\n");
230250661Sdavidcs
231250661Sdavidcs	ha->hw.mdump_start = 0;
232250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
233250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
234250661Sdavidcs                OID_AUTO, "minidump_start", CTLFLAG_RW,
235250661Sdavidcs		&ha->hw.mdump_start, ha->hw.mdump_start,
236250661Sdavidcs		"Minidump Utility can start minidump process");
237250661Sdavidcs#ifdef QL_DBG
238250661Sdavidcs
239250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
240250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
241250661Sdavidcs                OID_AUTO, "err_inject",
242250661Sdavidcs                CTLFLAG_RW, &ha->err_inject, ha->err_inject,
243250661Sdavidcs                "Error to be injected\n"
244250661Sdavidcs                "\t\t\t 0: No Errors\n"
245250661Sdavidcs                "\t\t\t 1: rcv: rxb struct invalid\n"
246250661Sdavidcs                "\t\t\t 2: rcv: mp == NULL\n"
247250661Sdavidcs                "\t\t\t 3: lro: rxb struct invalid\n"
248250661Sdavidcs                "\t\t\t 4: lro: mp == NULL\n"
249250661Sdavidcs                "\t\t\t 5: rcv: num handles invalid\n"
250250661Sdavidcs                "\t\t\t 6: reg: indirect reg rd_wr failure\n"
251250661Sdavidcs                "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
252250661Sdavidcs                "\t\t\t 8: mbx: mailbox command failure\n"
253250661Sdavidcs                "\t\t\t 9: heartbeat failure\n"
254250661Sdavidcs                "\t\t\t A: temperature failure\n" );
255250661Sdavidcs
256250661Sdavidcs	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
257250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
258250661Sdavidcs                OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW,
259250661Sdavidcs                (void *)ha, 0,
260250661Sdavidcs                qla_sysctl_stop_pegs, "I", "Peg Stop");
261250661Sdavidcs
262250661Sdavidcs#endif /* #ifdef QL_DBG */
263250661Sdavidcs
264250661Sdavidcs}
265250661Sdavidcs
266250661Sdavidcsvoid
267250661Sdavidcsql_hw_link_status(qla_host_t *ha)
268250661Sdavidcs{
269250661Sdavidcs	device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
270250661Sdavidcs
271250661Sdavidcs	if (ha->hw.link_up) {
272250661Sdavidcs		device_printf(ha->pci_dev, "link Up\n");
273250661Sdavidcs	} else {
274250661Sdavidcs		device_printf(ha->pci_dev, "link Down\n");
275250661Sdavidcs	}
276250661Sdavidcs
277250661Sdavidcs	if (ha->hw.flags.fduplex) {
278250661Sdavidcs		device_printf(ha->pci_dev, "Full Duplex\n");
279250661Sdavidcs	} else {
280250661Sdavidcs		device_printf(ha->pci_dev, "Half Duplex\n");
281250661Sdavidcs	}
282250661Sdavidcs
283250661Sdavidcs	if (ha->hw.flags.autoneg) {
284250661Sdavidcs		device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
285250661Sdavidcs	} else {
286250661Sdavidcs		device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
287250661Sdavidcs	}
288250661Sdavidcs
289250661Sdavidcs	switch (ha->hw.link_speed) {
290250661Sdavidcs	case 0x710:
291250661Sdavidcs		device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
292250661Sdavidcs		break;
293250661Sdavidcs
294250661Sdavidcs	case 0x3E8:
295250661Sdavidcs		device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
296250661Sdavidcs		break;
297250661Sdavidcs
298250661Sdavidcs	case 0x64:
299250661Sdavidcs		device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
300250661Sdavidcs		break;
301250661Sdavidcs
302250661Sdavidcs	default:
303250661Sdavidcs		device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
304250661Sdavidcs		break;
305250661Sdavidcs	}
306250661Sdavidcs
307250661Sdavidcs	switch (ha->hw.module_type) {
308250661Sdavidcs
309250661Sdavidcs	case 0x01:
310250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
311250661Sdavidcs		break;
312250661Sdavidcs
313250661Sdavidcs	case 0x02:
314250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
315250661Sdavidcs		break;
316250661Sdavidcs
317250661Sdavidcs	case 0x03:
318250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
319250661Sdavidcs		break;
320250661Sdavidcs
321250661Sdavidcs	case 0x04:
322250661Sdavidcs		device_printf(ha->pci_dev,
323250661Sdavidcs			"Module Type 10GE Passive Copper(Compliant)[%d m]\n",
324250661Sdavidcs			ha->hw.cable_length);
325250661Sdavidcs		break;
326250661Sdavidcs
327250661Sdavidcs	case 0x05:
328250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 10GE Active"
329250661Sdavidcs			" Limiting Copper(Compliant)[%d m]\n",
330250661Sdavidcs			ha->hw.cable_length);
331250661Sdavidcs		break;
332250661Sdavidcs
333250661Sdavidcs	case 0x06:
334250661Sdavidcs		device_printf(ha->pci_dev,
335250661Sdavidcs			"Module Type 10GE Passive Copper"
336250661Sdavidcs			" (Legacy, Best Effort)[%d m]\n",
337250661Sdavidcs			ha->hw.cable_length);
338250661Sdavidcs		break;
339250661Sdavidcs
340250661Sdavidcs	case 0x07:
341250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
342250661Sdavidcs		break;
343250661Sdavidcs
344250661Sdavidcs	case 0x08:
345250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
346250661Sdavidcs		break;
347250661Sdavidcs
348250661Sdavidcs	case 0x09:
349250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
350250661Sdavidcs		break;
351250661Sdavidcs
352250661Sdavidcs	case 0x0A:
353250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
354250661Sdavidcs		break;
355250661Sdavidcs
356250661Sdavidcs	case 0x0B:
357250661Sdavidcs		device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
358250661Sdavidcs			"(Legacy, Best Effort)\n");
359250661Sdavidcs		break;
360250661Sdavidcs
361250661Sdavidcs	default:
362250661Sdavidcs		device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
363250661Sdavidcs			ha->hw.module_type);
364250661Sdavidcs		break;
365250661Sdavidcs	}
366250661Sdavidcs
367250661Sdavidcs	if (ha->hw.link_faults == 1)
368250661Sdavidcs		device_printf(ha->pci_dev, "SFP Power Fault\n");
369250661Sdavidcs}
370250661Sdavidcs
371250661Sdavidcs/*
372250661Sdavidcs * Name: ql_free_dma
373250661Sdavidcs * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
374250661Sdavidcs */
375250661Sdavidcsvoid
376250661Sdavidcsql_free_dma(qla_host_t *ha)
377250661Sdavidcs{
378250661Sdavidcs	uint32_t i;
379250661Sdavidcs
380250661Sdavidcs        if (ha->hw.dma_buf.flags.sds_ring) {
381250661Sdavidcs		for (i = 0; i < ha->hw.num_sds_rings; i++) {
382250661Sdavidcs			ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
383250661Sdavidcs		}
384250661Sdavidcs        	ha->hw.dma_buf.flags.sds_ring = 0;
385250661Sdavidcs	}
386250661Sdavidcs
387250661Sdavidcs        if (ha->hw.dma_buf.flags.rds_ring) {
388250661Sdavidcs		for (i = 0; i < ha->hw.num_rds_rings; i++) {
389250661Sdavidcs			ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
390250661Sdavidcs		}
391250661Sdavidcs        	ha->hw.dma_buf.flags.rds_ring = 0;
392250661Sdavidcs	}
393250661Sdavidcs
394250661Sdavidcs        if (ha->hw.dma_buf.flags.tx_ring) {
395250661Sdavidcs		ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
396250661Sdavidcs        	ha->hw.dma_buf.flags.tx_ring = 0;
397250661Sdavidcs	}
398250661Sdavidcs	qla_minidump_free(ha);
399250661Sdavidcs}
400250661Sdavidcs
401250661Sdavidcs/*
402250661Sdavidcs * Name: ql_alloc_dma
403250661Sdavidcs * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
404250661Sdavidcs */
405250661Sdavidcsint
406250661Sdavidcsql_alloc_dma(qla_host_t *ha)
407250661Sdavidcs{
408250661Sdavidcs        device_t                dev;
409250661Sdavidcs	uint32_t		i, j, size, tx_ring_size;
410250661Sdavidcs	qla_hw_t		*hw;
411250661Sdavidcs	qla_hw_tx_cntxt_t	*tx_cntxt;
412250661Sdavidcs	uint8_t			*vaddr;
413250661Sdavidcs	bus_addr_t		paddr;
414250661Sdavidcs
415250661Sdavidcs        dev = ha->pci_dev;
416250661Sdavidcs
417250661Sdavidcs        QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
418250661Sdavidcs
419250661Sdavidcs	hw = &ha->hw;
420250661Sdavidcs	/*
421250661Sdavidcs	 * Allocate Transmit Ring
422250661Sdavidcs	 */
423250661Sdavidcs	tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
424250661Sdavidcs	size = (tx_ring_size * ha->hw.num_tx_rings);
425250661Sdavidcs
426250661Sdavidcs	hw->dma_buf.tx_ring.alignment = 8;
427250661Sdavidcs	hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
428250661Sdavidcs
429250661Sdavidcs        if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
430250661Sdavidcs                device_printf(dev, "%s: tx ring alloc failed\n", __func__);
431250661Sdavidcs                goto ql_alloc_dma_exit;
432250661Sdavidcs        }
433250661Sdavidcs
434250661Sdavidcs	vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
435250661Sdavidcs	paddr = hw->dma_buf.tx_ring.dma_addr;
436250661Sdavidcs
437250661Sdavidcs	for (i = 0; i < ha->hw.num_tx_rings; i++) {
438250661Sdavidcs		tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
439250661Sdavidcs
440250661Sdavidcs		tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
441250661Sdavidcs		tx_cntxt->tx_ring_paddr = paddr;
442250661Sdavidcs
443250661Sdavidcs		vaddr += tx_ring_size;
444250661Sdavidcs		paddr += tx_ring_size;
445250661Sdavidcs	}
446250661Sdavidcs
447250661Sdavidcs	for (i = 0; i < ha->hw.num_tx_rings; i++) {
448250661Sdavidcs		tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
449250661Sdavidcs
450250661Sdavidcs		tx_cntxt->tx_cons = (uint32_t *)vaddr;
451250661Sdavidcs		tx_cntxt->tx_cons_paddr = paddr;
452250661Sdavidcs
453250661Sdavidcs		vaddr += sizeof (uint32_t);
454250661Sdavidcs		paddr += sizeof (uint32_t);
455250661Sdavidcs	}
456250661Sdavidcs
457250661Sdavidcs        ha->hw.dma_buf.flags.tx_ring = 1;
458250661Sdavidcs
459250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
460250661Sdavidcs		__func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
461250661Sdavidcs		hw->dma_buf.tx_ring.dma_b));
462250661Sdavidcs	/*
463250661Sdavidcs	 * Allocate Receive Descriptor Rings
464250661Sdavidcs	 */
465250661Sdavidcs
466250661Sdavidcs	for (i = 0; i < hw->num_rds_rings; i++) {
467250661Sdavidcs
468250661Sdavidcs		hw->dma_buf.rds_ring[i].alignment = 8;
469250661Sdavidcs		hw->dma_buf.rds_ring[i].size =
470250661Sdavidcs			(sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
471250661Sdavidcs
472250661Sdavidcs		if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
473250661Sdavidcs			device_printf(dev, "%s: rds ring[%d] alloc failed\n",
474250661Sdavidcs				__func__, i);
475250661Sdavidcs
476250661Sdavidcs			for (j = 0; j < i; j++)
477250661Sdavidcs				ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
478250661Sdavidcs
479250661Sdavidcs			goto ql_alloc_dma_exit;
480250661Sdavidcs		}
481250661Sdavidcs		QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
482250661Sdavidcs			__func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
483250661Sdavidcs			hw->dma_buf.rds_ring[i].dma_b));
484250661Sdavidcs	}
485250661Sdavidcs
486250661Sdavidcs	hw->dma_buf.flags.rds_ring = 1;
487250661Sdavidcs
488250661Sdavidcs	/*
489250661Sdavidcs	 * Allocate Status Descriptor Rings
490250661Sdavidcs	 */
491250661Sdavidcs
492250661Sdavidcs	for (i = 0; i < hw->num_sds_rings; i++) {
493250661Sdavidcs		hw->dma_buf.sds_ring[i].alignment = 8;
494250661Sdavidcs		hw->dma_buf.sds_ring[i].size =
495250661Sdavidcs			(sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
496250661Sdavidcs
497250661Sdavidcs		if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
498250661Sdavidcs			device_printf(dev, "%s: sds ring alloc failed\n",
499250661Sdavidcs				__func__);
500250661Sdavidcs
501250661Sdavidcs			for (j = 0; j < i; j++)
502250661Sdavidcs				ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
503250661Sdavidcs
504250661Sdavidcs			goto ql_alloc_dma_exit;
505250661Sdavidcs		}
506250661Sdavidcs		QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
507250661Sdavidcs			__func__, i,
508250661Sdavidcs			(void *)(hw->dma_buf.sds_ring[i].dma_addr),
509250661Sdavidcs			hw->dma_buf.sds_ring[i].dma_b));
510250661Sdavidcs	}
511250661Sdavidcs	for (i = 0; i < hw->num_sds_rings; i++) {
512250661Sdavidcs		hw->sds[i].sds_ring_base =
513250661Sdavidcs			(q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
514250661Sdavidcs	}
515250661Sdavidcs
516250661Sdavidcs	hw->dma_buf.flags.sds_ring = 1;
517250661Sdavidcs
518250661Sdavidcs	return 0;
519250661Sdavidcs
520250661Sdavidcsql_alloc_dma_exit:
521250661Sdavidcs	ql_free_dma(ha);
522250661Sdavidcs	return -1;
523250661Sdavidcs}
524250661Sdavidcs
525250661Sdavidcs#define Q8_MBX_MSEC_DELAY	5000
526250661Sdavidcs
527250661Sdavidcsstatic int
528250661Sdavidcsqla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
529250661Sdavidcs	uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
530250661Sdavidcs{
531250661Sdavidcs	uint32_t i;
532250661Sdavidcs	uint32_t data;
533250661Sdavidcs	int ret = 0;
534250661Sdavidcs
535250661Sdavidcs	if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) {
536250661Sdavidcs		ret = -3;
537250661Sdavidcs		ha->qla_initiate_recovery = 1;
538250661Sdavidcs		goto exit_qla_mbx_cmd;
539250661Sdavidcs	}
540250661Sdavidcs
541250661Sdavidcs	if (no_pause)
542250661Sdavidcs		i = 1000;
543250661Sdavidcs	else
544250661Sdavidcs		i = Q8_MBX_MSEC_DELAY;
545250661Sdavidcs
546250661Sdavidcs	while (i) {
547250661Sdavidcs		data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
548250661Sdavidcs		if (data == 0)
549250661Sdavidcs			break;
550250661Sdavidcs		if (no_pause) {
551250661Sdavidcs			DELAY(1000);
552250661Sdavidcs		} else {
553250661Sdavidcs			qla_mdelay(__func__, 1);
554250661Sdavidcs		}
555250661Sdavidcs		i--;
556250661Sdavidcs	}
557250661Sdavidcs
558250661Sdavidcs	if (i == 0) {
559250661Sdavidcs		device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
560250661Sdavidcs			__func__, data);
561250661Sdavidcs		ret = -1;
562250661Sdavidcs		ha->qla_initiate_recovery = 1;
563250661Sdavidcs		goto exit_qla_mbx_cmd;
564250661Sdavidcs	}
565250661Sdavidcs
566250661Sdavidcs	for (i = 0; i < n_hmbox; i++) {
567250661Sdavidcs		WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
568250661Sdavidcs		h_mbox++;
569250661Sdavidcs	}
570250661Sdavidcs
571250661Sdavidcs	WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
572250661Sdavidcs
573250661Sdavidcs
574250661Sdavidcs	i = Q8_MBX_MSEC_DELAY;
575250661Sdavidcs	while (i) {
576250661Sdavidcs		data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
577250661Sdavidcs
578250661Sdavidcs		if ((data & 0x3) == 1) {
579250661Sdavidcs			data = READ_REG32(ha, Q8_FW_MBOX0);
580250661Sdavidcs			if ((data & 0xF000) != 0x8000)
581250661Sdavidcs				break;
582250661Sdavidcs		}
583250661Sdavidcs		if (no_pause) {
584250661Sdavidcs			DELAY(1000);
585250661Sdavidcs		} else {
586250661Sdavidcs			qla_mdelay(__func__, 1);
587250661Sdavidcs		}
588250661Sdavidcs		i--;
589250661Sdavidcs	}
590250661Sdavidcs	if (i == 0) {
591250661Sdavidcs		device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
592250661Sdavidcs			__func__, data);
593250661Sdavidcs		ret = -2;
594250661Sdavidcs		ha->qla_initiate_recovery = 1;
595250661Sdavidcs		goto exit_qla_mbx_cmd;
596250661Sdavidcs	}
597250661Sdavidcs
598250661Sdavidcs	for (i = 0; i < n_fwmbox; i++) {
599250661Sdavidcs		*fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
600250661Sdavidcs	}
601250661Sdavidcs
602250661Sdavidcs	WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
603250661Sdavidcs	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
604250661Sdavidcs
605250661Sdavidcsexit_qla_mbx_cmd:
606250661Sdavidcs	return (ret);
607250661Sdavidcs}
608250661Sdavidcs
609250661Sdavidcsstatic int
610250661Sdavidcsqla_get_nic_partition(qla_host_t *ha)
611250661Sdavidcs{
612250661Sdavidcs	uint32_t *mbox, err;
613250661Sdavidcs	device_t dev = ha->pci_dev;
614250661Sdavidcs
615250661Sdavidcs	bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
616250661Sdavidcs
617250661Sdavidcs	mbox = ha->hw.mbox;
618250661Sdavidcs
619250661Sdavidcs	mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29);
620250661Sdavidcs
621250661Sdavidcs	if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
622250661Sdavidcs		device_printf(dev, "%s: failed0\n", __func__);
623250661Sdavidcs		return (-1);
624250661Sdavidcs	}
625250661Sdavidcs	err = mbox[0] >> 25;
626250661Sdavidcs
627250661Sdavidcs	if ((err != 1) && (err != 0)) {
628250661Sdavidcs		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
629250661Sdavidcs		return (-1);
630250661Sdavidcs	}
631250661Sdavidcs	return 0;
632250661Sdavidcs}
633250661Sdavidcs
634250661Sdavidcsstatic int
635250661Sdavidcsqla_config_intr_cntxt(qla_host_t *ha, uint32_t num_intrs, uint32_t create)
636250661Sdavidcs{
637250661Sdavidcs	uint32_t i, err;
638250661Sdavidcs	device_t dev = ha->pci_dev;
639250661Sdavidcs	q80_config_intr_t *c_intr;
640250661Sdavidcs	q80_config_intr_rsp_t *c_intr_rsp;
641250661Sdavidcs
642250661Sdavidcs	c_intr = (q80_config_intr_t *)ha->hw.mbox;
643250661Sdavidcs	bzero(c_intr, (sizeof (q80_config_intr_t)));
644250661Sdavidcs
645250661Sdavidcs	c_intr->opcode = Q8_MBX_CONFIG_INTR;
646250661Sdavidcs
647250661Sdavidcs	c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
648250661Sdavidcs	c_intr->count_version |= Q8_MBX_CMD_VERSION;
649250661Sdavidcs
650250661Sdavidcs	c_intr->nentries = num_intrs;
651250661Sdavidcs
652250661Sdavidcs	for (i = 0; i < num_intrs; i++) {
653250661Sdavidcs		if (create) {
654250661Sdavidcs			c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
655250661Sdavidcs			c_intr->intr[i].msix_index = i + 1;
656250661Sdavidcs		} else {
657250661Sdavidcs			c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
658250661Sdavidcs			c_intr->intr[i].msix_index = ha->hw.intr_id[i];
659250661Sdavidcs		}
660250661Sdavidcs
661250661Sdavidcs		c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
662250661Sdavidcs	}
663250661Sdavidcs
664250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
665250661Sdavidcs		(sizeof (q80_config_intr_t) >> 2),
666250661Sdavidcs		ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
667250661Sdavidcs		device_printf(dev, "%s: failed0\n", __func__);
668250661Sdavidcs		return (-1);
669250661Sdavidcs	}
670250661Sdavidcs
671250661Sdavidcs	c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
672250661Sdavidcs
673250661Sdavidcs	err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
674250661Sdavidcs
675250661Sdavidcs	if (err) {
676250661Sdavidcs		device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err,
677250661Sdavidcs			c_intr_rsp->nentries);
678250661Sdavidcs
679250661Sdavidcs		for (i = 0; i < c_intr_rsp->nentries; i++) {
680250661Sdavidcs			device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
681250661Sdavidcs				__func__, i,
682250661Sdavidcs				c_intr_rsp->intr[i].status,
683250661Sdavidcs				c_intr_rsp->intr[i].intr_id,
684250661Sdavidcs				c_intr_rsp->intr[i].intr_src);
685250661Sdavidcs		}
686250661Sdavidcs
687250661Sdavidcs		return (-1);
688250661Sdavidcs	}
689250661Sdavidcs
690250661Sdavidcs	for (i = 0; ((i < num_intrs) && create); i++) {
691250661Sdavidcs		if (!c_intr_rsp->intr[i].status) {
692250661Sdavidcs			ha->hw.intr_id[i] = c_intr_rsp->intr[i].intr_id;
693250661Sdavidcs			ha->hw.intr_src[i] = c_intr_rsp->intr[i].intr_src;
694250661Sdavidcs		}
695250661Sdavidcs	}
696250661Sdavidcs
697250661Sdavidcs	return (0);
698250661Sdavidcs}
699250661Sdavidcs
700250661Sdavidcs/*
701250661Sdavidcs * Name: qla_config_rss
702250661Sdavidcs * Function: Configure RSS for the context/interface.
703250661Sdavidcs */
704250661Sdavidcsstatic const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
705250661Sdavidcs			0x8030f20c77cb2da3ULL,
706250661Sdavidcs			0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
707250661Sdavidcs			0x255b0ec26d5a56daULL };
708250661Sdavidcs
709250661Sdavidcsstatic int
710250661Sdavidcsqla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
711250661Sdavidcs{
712250661Sdavidcs	q80_config_rss_t	*c_rss;
713250661Sdavidcs	q80_config_rss_rsp_t	*c_rss_rsp;
714250661Sdavidcs	uint32_t		err, i;
715250661Sdavidcs	device_t		dev = ha->pci_dev;
716250661Sdavidcs
717250661Sdavidcs	c_rss = (q80_config_rss_t *)ha->hw.mbox;
718250661Sdavidcs	bzero(c_rss, (sizeof (q80_config_rss_t)));
719250661Sdavidcs
720250661Sdavidcs	c_rss->opcode = Q8_MBX_CONFIG_RSS;
721250661Sdavidcs
722250661Sdavidcs	c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
723250661Sdavidcs	c_rss->count_version |= Q8_MBX_CMD_VERSION;
724250661Sdavidcs
725250661Sdavidcs	c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
726250661Sdavidcs				Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
727250661Sdavidcs
728250661Sdavidcs	c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
729250661Sdavidcs	c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
730250661Sdavidcs
731250661Sdavidcs	c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
732250661Sdavidcs
733250661Sdavidcs	c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
734250661Sdavidcs	c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
735250661Sdavidcs
736250661Sdavidcs	c_rss->cntxt_id = cntxt_id;
737250661Sdavidcs
738250661Sdavidcs	for (i = 0; i < 5; i++) {
739250661Sdavidcs		c_rss->rss_key[i] = rss_key[i];
740250661Sdavidcs	}
741250661Sdavidcs
742250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
743250661Sdavidcs		(sizeof (q80_config_rss_t) >> 2),
744250661Sdavidcs		ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
745250661Sdavidcs		device_printf(dev, "%s: failed0\n", __func__);
746250661Sdavidcs		return (-1);
747250661Sdavidcs	}
748250661Sdavidcs	c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
749250661Sdavidcs
750250661Sdavidcs	err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
751250661Sdavidcs
752250661Sdavidcs	if (err) {
753250661Sdavidcs		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
754250661Sdavidcs		return (-1);
755250661Sdavidcs	}
756250661Sdavidcs	return 0;
757250661Sdavidcs}
758250661Sdavidcs
759250661Sdavidcsstatic uint8_t rss_ind_default_table[Q8_RSS_IND_TBL_SIZE];
760250661Sdavidcs
761250661Sdavidcsstatic int
762250661Sdavidcsqla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
763250661Sdavidcs        uint16_t cntxt_id, uint8_t *ind_table)
764250661Sdavidcs{
765250661Sdavidcs        q80_config_rss_ind_table_t      *c_rss_ind;
766250661Sdavidcs        q80_config_rss_ind_table_rsp_t  *c_rss_ind_rsp;
767250661Sdavidcs        uint32_t                        err;
768250661Sdavidcs        device_t                        dev = ha->pci_dev;
769250661Sdavidcs
770250661Sdavidcs	if ((count > Q8_RSS_IND_TBL_SIZE) ||
771250661Sdavidcs		((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
772250661Sdavidcs		device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
773250661Sdavidcs			start_idx, count);
774250661Sdavidcs		return (-1);
775250661Sdavidcs	}
776250661Sdavidcs
777250661Sdavidcs        c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
778250661Sdavidcs        bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
779250661Sdavidcs
780250661Sdavidcs        c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
781250661Sdavidcs        c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
782250661Sdavidcs        c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
783250661Sdavidcs
784250661Sdavidcs	c_rss_ind->start_idx = start_idx;
785250661Sdavidcs	c_rss_ind->end_idx = start_idx + count - 1;
786250661Sdavidcs	c_rss_ind->cntxt_id = cntxt_id;
787250661Sdavidcs	bcopy(ind_table, c_rss_ind->ind_table, count);
788250661Sdavidcs
789250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
790250661Sdavidcs		(sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
791250661Sdavidcs		(sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
792250661Sdavidcs		device_printf(dev, "%s: failed0\n", __func__);
793250661Sdavidcs		return (-1);
794250661Sdavidcs	}
795250661Sdavidcs
796250661Sdavidcs	c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
797250661Sdavidcs	err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
798250661Sdavidcs
799250661Sdavidcs	if (err) {
800250661Sdavidcs		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
801250661Sdavidcs		return (-1);
802250661Sdavidcs	}
803250661Sdavidcs	return 0;
804250661Sdavidcs}
805250661Sdavidcs
806250661Sdavidcs/*
807250661Sdavidcs * Name: qla_config_intr_coalesce
808250661Sdavidcs * Function: Configure Interrupt Coalescing.
809250661Sdavidcs */
810250661Sdavidcsstatic int
811250661Sdavidcsqla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable)
812250661Sdavidcs{
813250661Sdavidcs	q80_config_intr_coalesc_t	*intrc;
814250661Sdavidcs	q80_config_intr_coalesc_rsp_t	*intrc_rsp;
815250661Sdavidcs	uint32_t			err, i;
816250661Sdavidcs	device_t			dev = ha->pci_dev;
817250661Sdavidcs
818250661Sdavidcs	intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
819250661Sdavidcs	bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
820250661Sdavidcs
821250661Sdavidcs	intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
822250661Sdavidcs	intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
823250661Sdavidcs	intrc->count_version |= Q8_MBX_CMD_VERSION;
824250661Sdavidcs
825250661Sdavidcs	intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
826250661Sdavidcs	intrc->cntxt_id = cntxt_id;
827250661Sdavidcs
828250661Sdavidcs	intrc->max_pkts = 256;
829250661Sdavidcs	intrc->max_mswait = 3;
830250661Sdavidcs
831250661Sdavidcs	if (tenable) {
832250661Sdavidcs		intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
833250661Sdavidcs		intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
834250661Sdavidcs
835250661Sdavidcs		for (i = 0; i < ha->hw.num_sds_rings; i++) {
836250661Sdavidcs			intrc->sds_ring_mask |= (1 << i);
837250661Sdavidcs		}
838250661Sdavidcs		intrc->ms_timeout = 1000;
839250661Sdavidcs	}
840250661Sdavidcs
841250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *)intrc,
842250661Sdavidcs		(sizeof (q80_config_intr_coalesc_t) >> 2),
843250661Sdavidcs		ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
844250661Sdavidcs		device_printf(dev, "%s: failed0\n", __func__);
845250661Sdavidcs		return (-1);
846250661Sdavidcs	}
847250661Sdavidcs	intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
848250661Sdavidcs
849250661Sdavidcs	err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
850250661Sdavidcs
851250661Sdavidcs	if (err) {
852250661Sdavidcs		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
853250661Sdavidcs		return (-1);
854250661Sdavidcs	}
855250661Sdavidcs
856250661Sdavidcs	return 0;
857250661Sdavidcs}
858250661Sdavidcs
859250661Sdavidcs
860250661Sdavidcs/*
861250661Sdavidcs * Name: qla_config_mac_addr
862250661Sdavidcs * Function: binds a MAC address to the context/interface.
863250661Sdavidcs *	Can be unicast, multicast or broadcast.
864250661Sdavidcs */
865250661Sdavidcsstatic int
866250661Sdavidcsqla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
867250661Sdavidcs{
868250661Sdavidcs	q80_config_mac_addr_t		*cmac;
869250661Sdavidcs	q80_config_mac_addr_rsp_t	*cmac_rsp;
870250661Sdavidcs	uint32_t			err;
871250661Sdavidcs	device_t			dev = ha->pci_dev;
872250661Sdavidcs
873250661Sdavidcs	cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
874250661Sdavidcs	bzero(cmac, (sizeof (q80_config_mac_addr_t)));
875250661Sdavidcs
876250661Sdavidcs	cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
877250661Sdavidcs	cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
878250661Sdavidcs	cmac->count_version |= Q8_MBX_CMD_VERSION;
879250661Sdavidcs
880250661Sdavidcs	if (add_mac)
881250661Sdavidcs		cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
882250661Sdavidcs	else
883250661Sdavidcs		cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
884250661Sdavidcs
885250661Sdavidcs	cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
886250661Sdavidcs
887250661Sdavidcs	cmac->nmac_entries = 1;
888250661Sdavidcs	cmac->cntxt_id = ha->hw.rcv_cntxt_id;
889250661Sdavidcs	bcopy(mac_addr, cmac->mac_addr[0].addr, 6);
890250661Sdavidcs
891250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *)cmac,
892250661Sdavidcs		(sizeof (q80_config_mac_addr_t) >> 2),
893250661Sdavidcs		ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
894250661Sdavidcs		device_printf(dev, "%s: %s failed0\n", __func__,
895250661Sdavidcs			(add_mac ? "Add" : "Del"));
896250661Sdavidcs		return (-1);
897250661Sdavidcs	}
898250661Sdavidcs	cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
899250661Sdavidcs
900250661Sdavidcs	err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
901250661Sdavidcs
902250661Sdavidcs	if (err) {
903250661Sdavidcs		device_printf(dev, "%s: %s "
904250661Sdavidcs			"%02x:%02x:%02x:%02x:%02x:%02x failed1 [0x%08x]\n",
905250661Sdavidcs			__func__, (add_mac ? "Add" : "Del"),
906250661Sdavidcs			mac_addr[0], mac_addr[1], mac_addr[2],
907250661Sdavidcs			mac_addr[3], mac_addr[4], mac_addr[5], err);
908250661Sdavidcs		return (-1);
909250661Sdavidcs	}
910250661Sdavidcs
911250661Sdavidcs	return 0;
912250661Sdavidcs}
913250661Sdavidcs
914250661Sdavidcs
915250661Sdavidcs/*
916250661Sdavidcs * Name: qla_set_mac_rcv_mode
917250661Sdavidcs * Function: Enable/Disable AllMulticast and Promiscous Modes.
918250661Sdavidcs */
919250661Sdavidcsstatic int
920250661Sdavidcsqla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
921250661Sdavidcs{
922250661Sdavidcs	q80_config_mac_rcv_mode_t	*rcv_mode;
923250661Sdavidcs	uint32_t			err;
924250661Sdavidcs	q80_config_mac_rcv_mode_rsp_t	*rcv_mode_rsp;
925250661Sdavidcs	device_t			dev = ha->pci_dev;
926250661Sdavidcs
927250661Sdavidcs	rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
928250661Sdavidcs	bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
929250661Sdavidcs
930250661Sdavidcs	rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
931250661Sdavidcs	rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
932250661Sdavidcs	rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
933250661Sdavidcs
934250661Sdavidcs	rcv_mode->mode = mode;
935250661Sdavidcs
936250661Sdavidcs	rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
937250661Sdavidcs
938250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
939250661Sdavidcs		(sizeof (q80_config_mac_rcv_mode_t) >> 2),
940250661Sdavidcs		ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
941250661Sdavidcs		device_printf(dev, "%s: failed0\n", __func__);
942250661Sdavidcs		return (-1);
943250661Sdavidcs	}
944250661Sdavidcs	rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
945250661Sdavidcs
946250661Sdavidcs	err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
947250661Sdavidcs
948250661Sdavidcs	if (err) {
949250661Sdavidcs		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
950250661Sdavidcs		return (-1);
951250661Sdavidcs	}
952250661Sdavidcs
953250661Sdavidcs	return 0;
954250661Sdavidcs}
955250661Sdavidcs
956250661Sdavidcsint
957250661Sdavidcsql_set_promisc(qla_host_t *ha)
958250661Sdavidcs{
959250661Sdavidcs	int ret;
960250661Sdavidcs
961250661Sdavidcs	ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
962250661Sdavidcs	ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
963250661Sdavidcs	return (ret);
964250661Sdavidcs}
965250661Sdavidcs
966250661Sdavidcsint
967250661Sdavidcsql_set_allmulti(qla_host_t *ha)
968250661Sdavidcs{
969250661Sdavidcs	int ret;
970250661Sdavidcs
971250661Sdavidcs	ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
972250661Sdavidcs	ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
973250661Sdavidcs	return (ret);
974250661Sdavidcs}
975250661Sdavidcs
976250661Sdavidcs
977250661Sdavidcs/*
978250661Sdavidcs * Name: ql_set_max_mtu
979250661Sdavidcs * Function:
980250661Sdavidcs *	Sets the maximum transfer unit size for the specified rcv context.
981250661Sdavidcs */
982250661Sdavidcsint
983250661Sdavidcsql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
984250661Sdavidcs{
985250661Sdavidcs	device_t		dev;
986250661Sdavidcs	q80_set_max_mtu_t	*max_mtu;
987250661Sdavidcs	q80_set_max_mtu_rsp_t	*max_mtu_rsp;
988250661Sdavidcs	uint32_t		err;
989250661Sdavidcs
990250661Sdavidcs	dev = ha->pci_dev;
991250661Sdavidcs
992250661Sdavidcs	max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
993250661Sdavidcs	bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
994250661Sdavidcs
995250661Sdavidcs	max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
996250661Sdavidcs	max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
997250661Sdavidcs	max_mtu->count_version |= Q8_MBX_CMD_VERSION;
998250661Sdavidcs
999250661Sdavidcs	max_mtu->cntxt_id = cntxt_id;
1000250661Sdavidcs	max_mtu->mtu = mtu;
1001250661Sdavidcs
1002250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1003250661Sdavidcs		(sizeof (q80_set_max_mtu_t) >> 2),
1004250661Sdavidcs                ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1005250661Sdavidcs                device_printf(dev, "%s: failed\n", __func__);
1006250661Sdavidcs                return -1;
1007250661Sdavidcs        }
1008250661Sdavidcs
1009250661Sdavidcs	max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1010250661Sdavidcs
1011250661Sdavidcs        err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1012250661Sdavidcs
1013250661Sdavidcs        if (err) {
1014250661Sdavidcs                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1015250661Sdavidcs        }
1016250661Sdavidcs
1017250661Sdavidcs	return 0;
1018250661Sdavidcs}
1019250661Sdavidcs
1020250661Sdavidcsstatic int
1021250661Sdavidcsqla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1022250661Sdavidcs{
1023250661Sdavidcs	device_t		dev;
1024250661Sdavidcs	q80_link_event_t	*lnk;
1025250661Sdavidcs	q80_link_event_rsp_t	*lnk_rsp;
1026250661Sdavidcs	uint32_t		err;
1027250661Sdavidcs
1028250661Sdavidcs	dev = ha->pci_dev;
1029250661Sdavidcs
1030250661Sdavidcs	lnk = (q80_link_event_t *)ha->hw.mbox;
1031250661Sdavidcs	bzero(lnk, (sizeof (q80_link_event_t)));
1032250661Sdavidcs
1033250661Sdavidcs	lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1034250661Sdavidcs	lnk->count_version = (sizeof (q80_link_event_t) >> 2);
1035250661Sdavidcs	lnk->count_version |= Q8_MBX_CMD_VERSION;
1036250661Sdavidcs
1037250661Sdavidcs	lnk->cntxt_id = cntxt_id;
1038250661Sdavidcs	lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
1039250661Sdavidcs
1040250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
1041250661Sdavidcs                ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
1042250661Sdavidcs                device_printf(dev, "%s: failed\n", __func__);
1043250661Sdavidcs                return -1;
1044250661Sdavidcs        }
1045250661Sdavidcs
1046250661Sdavidcs	lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
1047250661Sdavidcs
1048250661Sdavidcs        err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
1049250661Sdavidcs
1050250661Sdavidcs        if (err) {
1051250661Sdavidcs                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1052250661Sdavidcs        }
1053250661Sdavidcs
1054250661Sdavidcs	return 0;
1055250661Sdavidcs}
1056250661Sdavidcs
1057250661Sdavidcsstatic int
1058250661Sdavidcsqla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
1059250661Sdavidcs{
1060250661Sdavidcs	device_t		dev;
1061250661Sdavidcs	q80_config_fw_lro_t	*fw_lro;
1062250661Sdavidcs	q80_config_fw_lro_rsp_t	*fw_lro_rsp;
1063250661Sdavidcs	uint32_t		err;
1064250661Sdavidcs
1065250661Sdavidcs	dev = ha->pci_dev;
1066250661Sdavidcs
1067250661Sdavidcs	fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
1068250661Sdavidcs	bzero(fw_lro, sizeof(q80_config_fw_lro_t));
1069250661Sdavidcs
1070250661Sdavidcs	fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
1071250661Sdavidcs	fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
1072250661Sdavidcs	fw_lro->count_version |= Q8_MBX_CMD_VERSION;
1073250661Sdavidcs
1074250661Sdavidcs	fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
1075250661Sdavidcs
1076250661Sdavidcs	fw_lro->cntxt_id = cntxt_id;
1077250661Sdavidcs
1078258898Sdavidcs	if (ha->hw.min_lro_pkt_size) {
1079258898Sdavidcs		fw_lro->flags |= Q8_MBX_FW_LRO_LOW_THRESHOLD;
1080258898Sdavidcs		fw_lro->low_threshold = ha->hw.min_lro_pkt_size;
1081258898Sdavidcs	}
1082258898Sdavidcs
1083250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
1084250661Sdavidcs		(sizeof (q80_config_fw_lro_t) >> 2),
1085250661Sdavidcs		ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
1086250661Sdavidcs		device_printf(dev, "%s: failed\n", __func__);
1087250661Sdavidcs		return -1;
1088250661Sdavidcs	}
1089250661Sdavidcs
1090250661Sdavidcs	fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
1091250661Sdavidcs
1092250661Sdavidcs	err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
1093250661Sdavidcs
1094250661Sdavidcs	if (err) {
1095250661Sdavidcs		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1096250661Sdavidcs	}
1097250661Sdavidcs
1098250661Sdavidcs	return 0;
1099250661Sdavidcs}
1100250661Sdavidcs
1101250661Sdavidcsstatic void
1102250661Sdavidcsqla_xmt_stats(qla_host_t *ha, q80_xmt_stats_t *xstat)
1103250661Sdavidcs{
1104250661Sdavidcs	device_t dev = ha->pci_dev;
1105250661Sdavidcs
1106250661Sdavidcs	device_printf(dev, "%s: total_bytes\t\t%" PRIu64 "\n", __func__,
1107250661Sdavidcs		xstat->total_bytes);
1108250661Sdavidcs	device_printf(dev, "%s: total_pkts\t\t%" PRIu64 "\n", __func__,
1109250661Sdavidcs		xstat->total_pkts);
1110250661Sdavidcs	device_printf(dev, "%s: errors\t\t%" PRIu64 "\n", __func__,
1111250661Sdavidcs		xstat->errors);
1112250661Sdavidcs	device_printf(dev, "%s: pkts_dropped\t%" PRIu64 "\n", __func__,
1113250661Sdavidcs		xstat->pkts_dropped);
1114250661Sdavidcs	device_printf(dev, "%s: switch_pkts\t\t%" PRIu64 "\n", __func__,
1115250661Sdavidcs		xstat->switch_pkts);
1116250661Sdavidcs	device_printf(dev, "%s: num_buffers\t\t%" PRIu64 "\n", __func__,
1117250661Sdavidcs		xstat->num_buffers);
1118250661Sdavidcs}
1119250661Sdavidcs
1120250661Sdavidcsstatic void
1121250661Sdavidcsqla_rcv_stats(qla_host_t *ha, q80_rcv_stats_t *rstat)
1122250661Sdavidcs{
1123250661Sdavidcs	device_t dev = ha->pci_dev;
1124250661Sdavidcs
1125250661Sdavidcs	device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n", __func__,
1126250661Sdavidcs		rstat->total_bytes);
1127250661Sdavidcs	device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n", __func__,
1128250661Sdavidcs		rstat->total_pkts);
1129250661Sdavidcs	device_printf(dev, "%s: lro_pkt_count\t\t%" PRIu64 "\n", __func__,
1130250661Sdavidcs		rstat->lro_pkt_count);
1131250661Sdavidcs	device_printf(dev, "%s: sw_pkt_count\t\t%" PRIu64 "\n", __func__,
1132250661Sdavidcs		rstat->sw_pkt_count);
1133250661Sdavidcs	device_printf(dev, "%s: ip_chksum_err\t\t%" PRIu64 "\n", __func__,
1134250661Sdavidcs		rstat->ip_chksum_err);
1135250661Sdavidcs	device_printf(dev, "%s: pkts_wo_acntxts\t\t%" PRIu64 "\n", __func__,
1136250661Sdavidcs		rstat->pkts_wo_acntxts);
1137250661Sdavidcs	device_printf(dev, "%s: pkts_dropped_no_sds_card\t%" PRIu64 "\n",
1138250661Sdavidcs		__func__, rstat->pkts_dropped_no_sds_card);
1139250661Sdavidcs	device_printf(dev, "%s: pkts_dropped_no_sds_host\t%" PRIu64 "\n",
1140250661Sdavidcs		__func__, rstat->pkts_dropped_no_sds_host);
1141250661Sdavidcs	device_printf(dev, "%s: oversized_pkts\t\t%" PRIu64 "\n", __func__,
1142250661Sdavidcs		rstat->oversized_pkts);
1143250661Sdavidcs	device_printf(dev, "%s: pkts_dropped_no_rds\t\t%" PRIu64 "\n",
1144250661Sdavidcs		__func__, rstat->pkts_dropped_no_rds);
1145250661Sdavidcs	device_printf(dev, "%s: unxpctd_mcast_pkts\t\t%" PRIu64 "\n",
1146250661Sdavidcs		__func__, rstat->unxpctd_mcast_pkts);
1147250661Sdavidcs	device_printf(dev, "%s: re1_fbq_error\t\t%" PRIu64 "\n", __func__,
1148250661Sdavidcs		rstat->re1_fbq_error);
1149250661Sdavidcs	device_printf(dev, "%s: invalid_mac_addr\t\t%" PRIu64 "\n", __func__,
1150250661Sdavidcs		rstat->invalid_mac_addr);
1151250661Sdavidcs	device_printf(dev, "%s: rds_prime_trys\t\t%" PRIu64 "\n", __func__,
1152250661Sdavidcs		rstat->rds_prime_trys);
1153250661Sdavidcs	device_printf(dev, "%s: rds_prime_success\t\t%" PRIu64 "\n", __func__,
1154250661Sdavidcs		rstat->rds_prime_success);
1155250661Sdavidcs	device_printf(dev, "%s: lro_flows_added\t\t%" PRIu64 "\n", __func__,
1156250661Sdavidcs		rstat->lro_flows_added);
1157250661Sdavidcs	device_printf(dev, "%s: lro_flows_deleted\t\t%" PRIu64 "\n", __func__,
1158250661Sdavidcs		rstat->lro_flows_deleted);
1159250661Sdavidcs	device_printf(dev, "%s: lro_flows_active\t\t%" PRIu64 "\n", __func__,
1160250661Sdavidcs		rstat->lro_flows_active);
1161250661Sdavidcs	device_printf(dev, "%s: pkts_droped_unknown\t\t%" PRIu64 "\n",
1162250661Sdavidcs		__func__, rstat->pkts_droped_unknown);
1163250661Sdavidcs}
1164250661Sdavidcs
1165250661Sdavidcsstatic void
1166250661Sdavidcsqla_mac_stats(qla_host_t *ha, q80_mac_stats_t *mstat)
1167250661Sdavidcs{
1168250661Sdavidcs	device_t dev = ha->pci_dev;
1169250661Sdavidcs
1170250661Sdavidcs	device_printf(dev, "%s: xmt_frames\t\t\t%" PRIu64 "\n", __func__,
1171250661Sdavidcs		mstat->xmt_frames);
1172250661Sdavidcs	device_printf(dev, "%s: xmt_bytes\t\t\t%" PRIu64 "\n", __func__,
1173250661Sdavidcs		mstat->xmt_bytes);
1174250661Sdavidcs	device_printf(dev, "%s: xmt_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1175250661Sdavidcs		mstat->xmt_mcast_pkts);
1176250661Sdavidcs	device_printf(dev, "%s: xmt_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1177250661Sdavidcs		mstat->xmt_bcast_pkts);
1178250661Sdavidcs	device_printf(dev, "%s: xmt_pause_frames\t\t%" PRIu64 "\n", __func__,
1179250661Sdavidcs		mstat->xmt_pause_frames);
1180250661Sdavidcs	device_printf(dev, "%s: xmt_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1181250661Sdavidcs		mstat->xmt_cntrl_pkts);
1182250661Sdavidcs	device_printf(dev, "%s: xmt_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1183250661Sdavidcs		__func__, mstat->xmt_pkt_lt_64bytes);
1184250661Sdavidcs	device_printf(dev, "%s: xmt_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1185250661Sdavidcs		__func__, mstat->xmt_pkt_lt_127bytes);
1186250661Sdavidcs	device_printf(dev, "%s: xmt_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1187250661Sdavidcs		__func__, mstat->xmt_pkt_lt_255bytes);
1188250661Sdavidcs	device_printf(dev, "%s: xmt_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1189250661Sdavidcs		__func__, mstat->xmt_pkt_lt_511bytes);
1190250661Sdavidcs	device_printf(dev, "%s: xmt_pkt_lt_1023bytes\t%" PRIu64 "\n",
1191250661Sdavidcs		__func__, mstat->xmt_pkt_lt_1023bytes);
1192250661Sdavidcs	device_printf(dev, "%s: xmt_pkt_lt_1518bytes\t%" PRIu64 "\n",
1193250661Sdavidcs		__func__, mstat->xmt_pkt_lt_1518bytes);
1194250661Sdavidcs	device_printf(dev, "%s: xmt_pkt_gt_1518bytes\t%" PRIu64 "\n",
1195250661Sdavidcs		__func__, mstat->xmt_pkt_gt_1518bytes);
1196250661Sdavidcs
1197250661Sdavidcs	device_printf(dev, "%s: rcv_frames\t\t\t%" PRIu64 "\n", __func__,
1198250661Sdavidcs		mstat->rcv_frames);
1199250661Sdavidcs	device_printf(dev, "%s: rcv_bytes\t\t\t%" PRIu64 "\n", __func__,
1200250661Sdavidcs		mstat->rcv_bytes);
1201250661Sdavidcs	device_printf(dev, "%s: rcv_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1202250661Sdavidcs		mstat->rcv_mcast_pkts);
1203250661Sdavidcs	device_printf(dev, "%s: rcv_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1204250661Sdavidcs		mstat->rcv_bcast_pkts);
1205250661Sdavidcs	device_printf(dev, "%s: rcv_pause_frames\t\t%" PRIu64 "\n", __func__,
1206250661Sdavidcs		mstat->rcv_pause_frames);
1207250661Sdavidcs	device_printf(dev, "%s: rcv_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1208250661Sdavidcs		mstat->rcv_cntrl_pkts);
1209250661Sdavidcs	device_printf(dev, "%s: rcv_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1210250661Sdavidcs		__func__, mstat->rcv_pkt_lt_64bytes);
1211250661Sdavidcs	device_printf(dev, "%s: rcv_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1212250661Sdavidcs		__func__, mstat->rcv_pkt_lt_127bytes);
1213250661Sdavidcs	device_printf(dev, "%s: rcv_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1214250661Sdavidcs		__func__, mstat->rcv_pkt_lt_255bytes);
1215250661Sdavidcs	device_printf(dev, "%s: rcv_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1216250661Sdavidcs		__func__, mstat->rcv_pkt_lt_511bytes);
1217250661Sdavidcs	device_printf(dev, "%s: rcv_pkt_lt_1023bytes\t%" PRIu64 "\n",
1218250661Sdavidcs		__func__, mstat->rcv_pkt_lt_1023bytes);
1219250661Sdavidcs	device_printf(dev, "%s: rcv_pkt_lt_1518bytes\t%" PRIu64 "\n",
1220250661Sdavidcs		__func__, mstat->rcv_pkt_lt_1518bytes);
1221250661Sdavidcs	device_printf(dev, "%s: rcv_pkt_gt_1518bytes\t%" PRIu64 "\n",
1222250661Sdavidcs		__func__, mstat->rcv_pkt_gt_1518bytes);
1223250661Sdavidcs
1224250661Sdavidcs	device_printf(dev, "%s: rcv_len_error\t\t%" PRIu64 "\n", __func__,
1225250661Sdavidcs		mstat->rcv_len_error);
1226250661Sdavidcs	device_printf(dev, "%s: rcv_len_small\t\t%" PRIu64 "\n", __func__,
1227250661Sdavidcs		mstat->rcv_len_small);
1228250661Sdavidcs	device_printf(dev, "%s: rcv_len_large\t\t%" PRIu64 "\n", __func__,
1229250661Sdavidcs		mstat->rcv_len_large);
1230250661Sdavidcs	device_printf(dev, "%s: rcv_jabber\t\t\t%" PRIu64 "\n", __func__,
1231250661Sdavidcs		mstat->rcv_jabber);
1232250661Sdavidcs	device_printf(dev, "%s: rcv_dropped\t\t\t%" PRIu64 "\n", __func__,
1233250661Sdavidcs		mstat->rcv_dropped);
1234250661Sdavidcs	device_printf(dev, "%s: fcs_error\t\t\t%" PRIu64 "\n", __func__,
1235250661Sdavidcs		mstat->fcs_error);
1236250661Sdavidcs	device_printf(dev, "%s: align_error\t\t\t%" PRIu64 "\n", __func__,
1237250661Sdavidcs		mstat->align_error);
1238250661Sdavidcs}
1239250661Sdavidcs
1240250661Sdavidcs
1241250661Sdavidcsstatic int
1242250661Sdavidcsqla_get_hw_stats(qla_host_t *ha, uint32_t cmd)
1243250661Sdavidcs{
1244250661Sdavidcs	device_t		dev;
1245250661Sdavidcs	q80_get_stats_t		*stat;
1246250661Sdavidcs	q80_get_stats_rsp_t	*stat_rsp;
1247250661Sdavidcs	uint32_t		err;
1248250661Sdavidcs
1249250661Sdavidcs	dev = ha->pci_dev;
1250250661Sdavidcs
1251250661Sdavidcs	stat = (q80_get_stats_t *)ha->hw.mbox;
1252250661Sdavidcs	bzero(stat, (sizeof (q80_get_stats_t)));
1253250661Sdavidcs
1254250661Sdavidcs	stat->opcode = Q8_MBX_GET_STATS;
1255250661Sdavidcs	stat->count_version = 2;
1256250661Sdavidcs	stat->count_version |= Q8_MBX_CMD_VERSION;
1257250661Sdavidcs
1258250661Sdavidcs	stat->cmd = cmd;
1259250661Sdavidcs
1260250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
1261250661Sdavidcs                ha->hw.mbox, (sizeof (q80_get_stats_rsp_t) >> 2), 0)) {
1262250661Sdavidcs                device_printf(dev, "%s: failed\n", __func__);
1263250661Sdavidcs                return -1;
1264250661Sdavidcs        }
1265250661Sdavidcs
1266250661Sdavidcs	stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1267250661Sdavidcs
1268250661Sdavidcs        err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
1269250661Sdavidcs
1270250661Sdavidcs        if (err) {
1271250661Sdavidcs                return -1;
1272250661Sdavidcs        }
1273250661Sdavidcs
1274250661Sdavidcs	return 0;
1275250661Sdavidcs}
1276250661Sdavidcs
1277250661Sdavidcsvoid
1278250661Sdavidcsql_get_stats(qla_host_t *ha)
1279250661Sdavidcs{
1280250661Sdavidcs	q80_get_stats_rsp_t	*stat_rsp;
1281250661Sdavidcs	q80_mac_stats_t		*mstat;
1282250661Sdavidcs	q80_xmt_stats_t		*xstat;
1283250661Sdavidcs	q80_rcv_stats_t		*rstat;
1284250661Sdavidcs	uint32_t		cmd;
1285250661Sdavidcs
1286250661Sdavidcs	stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1287250661Sdavidcs	/*
1288250661Sdavidcs	 * Get MAC Statistics
1289250661Sdavidcs	 */
1290250661Sdavidcs	cmd = Q8_GET_STATS_CMD_TYPE_MAC;
1291250661Sdavidcs
1292250661Sdavidcs	cmd |= ((ha->pci_func & 0x1) << 16);
1293250661Sdavidcs
1294250661Sdavidcs	if (qla_get_hw_stats(ha, cmd) == 0) {
1295250661Sdavidcs		mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
1296250661Sdavidcs		qla_mac_stats(ha, mstat);
1297250661Sdavidcs	} else {
1298250661Sdavidcs                device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
1299250661Sdavidcs			__func__, ha->hw.mbox[0]);
1300250661Sdavidcs	}
1301250661Sdavidcs	/*
1302250661Sdavidcs	 * Get RCV Statistics
1303250661Sdavidcs	 */
1304250661Sdavidcs	cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
1305250661Sdavidcs	cmd |= (ha->hw.rcv_cntxt_id << 16);
1306250661Sdavidcs
1307250661Sdavidcs	if (qla_get_hw_stats(ha, cmd) == 0) {
1308250661Sdavidcs		rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
1309250661Sdavidcs		qla_rcv_stats(ha, rstat);
1310250661Sdavidcs	} else {
1311250661Sdavidcs                device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
1312250661Sdavidcs			__func__, ha->hw.mbox[0]);
1313250661Sdavidcs	}
1314250661Sdavidcs	/*
1315250661Sdavidcs	 * Get XMT Statistics
1316250661Sdavidcs	 */
1317250661Sdavidcs	cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
1318250661Sdavidcs	cmd |= (ha->hw.tx_cntxt[ha->txr_idx].tx_cntxt_id << 16);
1319250661Sdavidcs
1320250661Sdavidcs
1321250661Sdavidcs	if (qla_get_hw_stats(ha, cmd) == 0) {
1322250661Sdavidcs		xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
1323250661Sdavidcs		qla_xmt_stats(ha, xstat);
1324250661Sdavidcs	} else {
1325250661Sdavidcs                device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
1326250661Sdavidcs			__func__, ha->hw.mbox[0]);
1327250661Sdavidcs	}
1328250661Sdavidcs}
1329250661Sdavidcs
1330250661Sdavidcs/*
1331250661Sdavidcs * Name: qla_tx_tso
1332250661Sdavidcs * Function: Checks if the packet to be transmitted is a candidate for
1333250661Sdavidcs *	Large TCP Segment Offload. If yes, the appropriate fields in the Tx
1334250661Sdavidcs *	Ring Structure are plugged in.
1335250661Sdavidcs */
1336250661Sdavidcsstatic int
1337250661Sdavidcsqla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
1338250661Sdavidcs{
1339250661Sdavidcs	struct ether_vlan_header *eh;
1340250661Sdavidcs	struct ip *ip = NULL;
1341250661Sdavidcs	struct ip6_hdr *ip6 = NULL;
1342250661Sdavidcs	struct tcphdr *th = NULL;
1343250661Sdavidcs	uint32_t ehdrlen,  hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
1344250661Sdavidcs	uint16_t etype, opcode, offload = 1;
1345250661Sdavidcs	device_t dev;
1346250661Sdavidcs
1347250661Sdavidcs	dev = ha->pci_dev;
1348250661Sdavidcs
1349250661Sdavidcs
1350250661Sdavidcs	eh = mtod(mp, struct ether_vlan_header *);
1351250661Sdavidcs
1352250661Sdavidcs	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1353250661Sdavidcs		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1354250661Sdavidcs		etype = ntohs(eh->evl_proto);
1355250661Sdavidcs	} else {
1356250661Sdavidcs		ehdrlen = ETHER_HDR_LEN;
1357250661Sdavidcs		etype = ntohs(eh->evl_encap_proto);
1358250661Sdavidcs	}
1359250661Sdavidcs
1360250661Sdavidcs	hdrlen = 0;
1361250661Sdavidcs
1362250661Sdavidcs	switch (etype) {
1363250661Sdavidcs		case ETHERTYPE_IP:
1364250661Sdavidcs
1365250661Sdavidcs			tcp_opt_off = ehdrlen + sizeof(struct ip) +
1366250661Sdavidcs					sizeof(struct tcphdr);
1367250661Sdavidcs
1368250661Sdavidcs			if (mp->m_len < tcp_opt_off) {
1369250661Sdavidcs				m_copydata(mp, 0, tcp_opt_off, hdr);
1370250661Sdavidcs				ip = (struct ip *)(hdr + ehdrlen);
1371250661Sdavidcs			} else {
1372250661Sdavidcs				ip = (struct ip *)(mp->m_data + ehdrlen);
1373250661Sdavidcs			}
1374250661Sdavidcs
1375250661Sdavidcs			ip_hlen = ip->ip_hl << 2;
1376250661Sdavidcs			opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
1377250661Sdavidcs
1378250661Sdavidcs
1379250661Sdavidcs			if ((ip->ip_p != IPPROTO_TCP) ||
1380250661Sdavidcs				(ip_hlen != sizeof (struct ip))){
1381250661Sdavidcs				/* IP Options are not supported */
1382250661Sdavidcs
1383250661Sdavidcs				offload = 0;
1384250661Sdavidcs			} else
1385250661Sdavidcs				th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
1386250661Sdavidcs
1387250661Sdavidcs		break;
1388250661Sdavidcs
1389250661Sdavidcs		case ETHERTYPE_IPV6:
1390250661Sdavidcs
1391250661Sdavidcs			tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
1392250661Sdavidcs					sizeof (struct tcphdr);
1393250661Sdavidcs
1394250661Sdavidcs			if (mp->m_len < tcp_opt_off) {
1395250661Sdavidcs				m_copydata(mp, 0, tcp_opt_off, hdr);
1396250661Sdavidcs				ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
1397250661Sdavidcs			} else {
1398250661Sdavidcs				ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1399250661Sdavidcs			}
1400250661Sdavidcs
1401250661Sdavidcs			ip_hlen = sizeof(struct ip6_hdr);
1402250661Sdavidcs			opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
1403250661Sdavidcs
1404250661Sdavidcs			if (ip6->ip6_nxt != IPPROTO_TCP) {
1405250661Sdavidcs				//device_printf(dev, "%s: ipv6\n", __func__);
1406250661Sdavidcs				offload = 0;
1407250661Sdavidcs			} else
1408250661Sdavidcs				th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
1409250661Sdavidcs		break;
1410250661Sdavidcs
1411250661Sdavidcs		default:
1412250661Sdavidcs			QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
1413250661Sdavidcs			offload = 0;
1414250661Sdavidcs		break;
1415250661Sdavidcs	}
1416250661Sdavidcs
1417250661Sdavidcs	if (!offload)
1418250661Sdavidcs		return (-1);
1419250661Sdavidcs
1420250661Sdavidcs	tcp_hlen = th->th_off << 2;
1421250661Sdavidcs	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
1422250661Sdavidcs
1423250661Sdavidcs        if (mp->m_len < hdrlen) {
1424250661Sdavidcs                if (mp->m_len < tcp_opt_off) {
1425250661Sdavidcs                        if (tcp_hlen > sizeof(struct tcphdr)) {
1426250661Sdavidcs                                m_copydata(mp, tcp_opt_off,
1427250661Sdavidcs                                        (tcp_hlen - sizeof(struct tcphdr)),
1428250661Sdavidcs                                        &hdr[tcp_opt_off]);
1429250661Sdavidcs                        }
1430250661Sdavidcs                } else {
1431250661Sdavidcs                        m_copydata(mp, 0, hdrlen, hdr);
1432250661Sdavidcs                }
1433250661Sdavidcs        }
1434250661Sdavidcs
1435250661Sdavidcs	tx_cmd->mss = mp->m_pkthdr.tso_segsz;
1436250661Sdavidcs
1437250661Sdavidcs	tx_cmd->flags_opcode = opcode ;
1438250661Sdavidcs	tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
1439250661Sdavidcs	tx_cmd->total_hdr_len = hdrlen;
1440250661Sdavidcs
1441250661Sdavidcs	/* Check for Multicast least significant bit of MSB == 1 */
1442250661Sdavidcs	if (eh->evl_dhost[0] & 0x01) {
1443250661Sdavidcs		tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
1444250661Sdavidcs	}
1445250661Sdavidcs
1446250661Sdavidcs	if (mp->m_len < hdrlen) {
1447250661Sdavidcs		printf("%d\n", hdrlen);
1448250661Sdavidcs		return (1);
1449250661Sdavidcs	}
1450250661Sdavidcs
1451250661Sdavidcs	return (0);
1452250661Sdavidcs}
1453250661Sdavidcs
1454250661Sdavidcs/*
1455250661Sdavidcs * Name: qla_tx_chksum
1456250661Sdavidcs * Function: Checks if the packet to be transmitted is a candidate for
1457250661Sdavidcs *	TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
1458250661Sdavidcs *	Ring Structure are plugged in.
1459250661Sdavidcs */
1460250661Sdavidcsstatic int
1461250661Sdavidcsqla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
1462250661Sdavidcs	uint32_t *tcp_hdr_off)
1463250661Sdavidcs{
1464250661Sdavidcs	struct ether_vlan_header *eh;
1465250661Sdavidcs	struct ip *ip;
1466250661Sdavidcs	struct ip6_hdr *ip6;
1467250661Sdavidcs	uint32_t ehdrlen, ip_hlen;
1468250661Sdavidcs	uint16_t etype, opcode, offload = 1;
1469250661Sdavidcs	device_t dev;
1470250661Sdavidcs	uint8_t buf[sizeof(struct ip6_hdr)];
1471250661Sdavidcs
1472250661Sdavidcs	dev = ha->pci_dev;
1473250661Sdavidcs
1474250661Sdavidcs	*op_code = 0;
1475250661Sdavidcs
1476250661Sdavidcs	if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0)
1477250661Sdavidcs		return (-1);
1478250661Sdavidcs
1479250661Sdavidcs	eh = mtod(mp, struct ether_vlan_header *);
1480250661Sdavidcs
1481250661Sdavidcs	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1482250661Sdavidcs		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1483250661Sdavidcs		etype = ntohs(eh->evl_proto);
1484250661Sdavidcs	} else {
1485250661Sdavidcs		ehdrlen = ETHER_HDR_LEN;
1486250661Sdavidcs		etype = ntohs(eh->evl_encap_proto);
1487250661Sdavidcs	}
1488250661Sdavidcs
1489250661Sdavidcs
1490250661Sdavidcs	switch (etype) {
1491250661Sdavidcs		case ETHERTYPE_IP:
1492250661Sdavidcs			ip = (struct ip *)(mp->m_data + ehdrlen);
1493250661Sdavidcs
1494250661Sdavidcs			ip_hlen = sizeof (struct ip);
1495250661Sdavidcs
1496250661Sdavidcs			if (mp->m_len < (ehdrlen + ip_hlen)) {
1497250661Sdavidcs				m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
1498250661Sdavidcs				ip = (struct ip *)buf;
1499250661Sdavidcs			}
1500250661Sdavidcs
1501250661Sdavidcs			if (ip->ip_p == IPPROTO_TCP)
1502250661Sdavidcs				opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
1503250661Sdavidcs			else if (ip->ip_p == IPPROTO_UDP)
1504250661Sdavidcs				opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
1505250661Sdavidcs			else {
1506250661Sdavidcs				//device_printf(dev, "%s: ipv4\n", __func__);
1507250661Sdavidcs				offload = 0;
1508250661Sdavidcs			}
1509250661Sdavidcs		break;
1510250661Sdavidcs
1511250661Sdavidcs		case ETHERTYPE_IPV6:
1512250661Sdavidcs			ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1513250661Sdavidcs
1514250661Sdavidcs			ip_hlen = sizeof(struct ip6_hdr);
1515250661Sdavidcs
1516250661Sdavidcs			if (mp->m_len < (ehdrlen + ip_hlen)) {
1517250661Sdavidcs				m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
1518250661Sdavidcs					buf);
1519250661Sdavidcs				ip6 = (struct ip6_hdr *)buf;
1520250661Sdavidcs			}
1521250661Sdavidcs
1522250661Sdavidcs			if (ip6->ip6_nxt == IPPROTO_TCP)
1523250661Sdavidcs				opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
1524250661Sdavidcs			else if (ip6->ip6_nxt == IPPROTO_UDP)
1525250661Sdavidcs				opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
1526250661Sdavidcs			else {
1527250661Sdavidcs				//device_printf(dev, "%s: ipv6\n", __func__);
1528250661Sdavidcs				offload = 0;
1529250661Sdavidcs			}
1530250661Sdavidcs		break;
1531250661Sdavidcs
1532250661Sdavidcs		default:
1533250661Sdavidcs			offload = 0;
1534250661Sdavidcs		break;
1535250661Sdavidcs	}
1536250661Sdavidcs	if (!offload)
1537250661Sdavidcs		return (-1);
1538250661Sdavidcs
1539250661Sdavidcs	*op_code = opcode;
1540250661Sdavidcs	*tcp_hdr_off = (ip_hlen + ehdrlen);
1541250661Sdavidcs
1542250661Sdavidcs	return (0);
1543250661Sdavidcs}
1544250661Sdavidcs
1545250661Sdavidcs#define QLA_TX_MIN_FREE 2
1546250661Sdavidcs/*
1547250661Sdavidcs * Name: ql_hw_send
1548250661Sdavidcs * Function: Transmits a packet. It first checks if the packet is a
1549250661Sdavidcs *	candidate for Large TCP Segment Offload and then for UDP/TCP checksum
1550250661Sdavidcs *	offload. If either of these creteria are not met, it is transmitted
1551250661Sdavidcs *	as a regular ethernet frame.
1552250661Sdavidcs */
1553250661Sdavidcsint
1554250661Sdavidcsql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
1555250661Sdavidcs	uint32_t tx_idx,  struct mbuf *mp, uint32_t txr_idx)
1556250661Sdavidcs{
1557250661Sdavidcs	struct ether_vlan_header *eh;
1558250661Sdavidcs	qla_hw_t *hw = &ha->hw;
1559250661Sdavidcs	q80_tx_cmd_t *tx_cmd, tso_cmd;
1560250661Sdavidcs	bus_dma_segment_t *c_seg;
1561250661Sdavidcs	uint32_t num_tx_cmds, hdr_len = 0;
1562250661Sdavidcs	uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
1563250661Sdavidcs	device_t dev;
1564250661Sdavidcs	int i, ret;
1565250661Sdavidcs	uint8_t *src = NULL, *dst = NULL;
1566250661Sdavidcs	uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
1567250661Sdavidcs	uint32_t op_code = 0;
1568250661Sdavidcs	uint32_t tcp_hdr_off = 0;
1569250661Sdavidcs
1570250661Sdavidcs	dev = ha->pci_dev;
1571250661Sdavidcs
1572250661Sdavidcs	/*
1573250661Sdavidcs	 * Always make sure there is atleast one empty slot in the tx_ring
1574250661Sdavidcs	 * tx_ring is considered full when there only one entry available
1575250661Sdavidcs	 */
1576250661Sdavidcs        num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
1577250661Sdavidcs
1578250661Sdavidcs	total_length = mp->m_pkthdr.len;
1579250661Sdavidcs	if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
1580250661Sdavidcs		device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
1581250661Sdavidcs			__func__, total_length);
1582250661Sdavidcs		return (-1);
1583250661Sdavidcs	}
1584250661Sdavidcs	eh = mtod(mp, struct ether_vlan_header *);
1585250661Sdavidcs
1586250661Sdavidcs	if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
1587250661Sdavidcs
1588250661Sdavidcs		bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
1589250661Sdavidcs
1590250661Sdavidcs		src = frame_hdr;
1591250661Sdavidcs		ret = qla_tx_tso(ha, mp, &tso_cmd, src);
1592250661Sdavidcs
1593250661Sdavidcs		if (!(ret & ~1)) {
1594250661Sdavidcs			/* find the additional tx_cmd descriptors required */
1595250661Sdavidcs
1596250661Sdavidcs			if (mp->m_flags & M_VLANTAG)
1597250661Sdavidcs				tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
1598250661Sdavidcs
1599250661Sdavidcs			hdr_len = tso_cmd.total_hdr_len;
1600250661Sdavidcs
1601250661Sdavidcs			bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
1602250661Sdavidcs			bytes = QL_MIN(bytes, hdr_len);
1603250661Sdavidcs
1604250661Sdavidcs			num_tx_cmds++;
1605250661Sdavidcs			hdr_len -= bytes;
1606250661Sdavidcs
1607250661Sdavidcs			while (hdr_len) {
1608250661Sdavidcs				bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
1609250661Sdavidcs				hdr_len -= bytes;
1610250661Sdavidcs				num_tx_cmds++;
1611250661Sdavidcs			}
1612250661Sdavidcs			hdr_len = tso_cmd.total_hdr_len;
1613250661Sdavidcs
1614250661Sdavidcs			if (ret == 0)
1615250661Sdavidcs				src = (uint8_t *)eh;
1616250661Sdavidcs		} else
1617250661Sdavidcs			return (EINVAL);
1618250661Sdavidcs	} else {
1619250661Sdavidcs		(void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
1620250661Sdavidcs	}
1621250661Sdavidcs
1622250661Sdavidcs	if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
1623250661Sdavidcs		qla_hw_tx_done_locked(ha, txr_idx);
1624250661Sdavidcs		if (hw->tx_cntxt[txr_idx].txr_free <=
1625250661Sdavidcs				(num_tx_cmds + QLA_TX_MIN_FREE)) {
1626250661Sdavidcs        		QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
1627250661Sdavidcs				"(num_tx_cmds + QLA_TX_MIN_FREE))\n",
1628250661Sdavidcs				__func__));
1629250661Sdavidcs			return (-1);
1630250661Sdavidcs		}
1631250661Sdavidcs	}
1632250661Sdavidcs
1633250661Sdavidcs	tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
1634250661Sdavidcs
1635250661Sdavidcs        if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
1636250661Sdavidcs
1637250661Sdavidcs                if (nsegs > ha->hw.max_tx_segs)
1638250661Sdavidcs                        ha->hw.max_tx_segs = nsegs;
1639250661Sdavidcs
1640250661Sdavidcs                bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1641250661Sdavidcs
1642250661Sdavidcs                if (op_code) {
1643250661Sdavidcs                        tx_cmd->flags_opcode = op_code;
1644250661Sdavidcs                        tx_cmd->tcp_hdr_off = tcp_hdr_off;
1645250661Sdavidcs
1646250661Sdavidcs                } else {
1647250661Sdavidcs                        tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
1648250661Sdavidcs                }
1649250661Sdavidcs	} else {
1650250661Sdavidcs		bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
1651250661Sdavidcs		ha->tx_tso_frames++;
1652250661Sdavidcs	}
1653250661Sdavidcs
1654250661Sdavidcs	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1655250661Sdavidcs        	tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
1656250661Sdavidcs	} else if (mp->m_flags & M_VLANTAG) {
1657250661Sdavidcs
1658250661Sdavidcs		if (hdr_len) { /* TSO */
1659250661Sdavidcs			tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
1660250661Sdavidcs						Q8_TX_CMD_FLAGS_HW_VLAN_ID);
1661250661Sdavidcs			tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
1662250661Sdavidcs		} else
1663250661Sdavidcs			tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
1664250661Sdavidcs
1665250661Sdavidcs		ha->hw_vlan_tx_frames++;
1666250661Sdavidcs		tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
1667250661Sdavidcs	}
1668250661Sdavidcs
1669250661Sdavidcs
1670250661Sdavidcs        tx_cmd->n_bufs = (uint8_t)nsegs;
1671250661Sdavidcs        tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
1672250661Sdavidcs        tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
1673250661Sdavidcs	tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
1674250661Sdavidcs
1675250661Sdavidcs	c_seg = segs;
1676250661Sdavidcs
1677250661Sdavidcs	while (1) {
1678250661Sdavidcs		for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
1679250661Sdavidcs
1680250661Sdavidcs			switch (i) {
1681250661Sdavidcs			case 0:
1682250661Sdavidcs				tx_cmd->buf1_addr = c_seg->ds_addr;
1683250661Sdavidcs				tx_cmd->buf1_len = c_seg->ds_len;
1684250661Sdavidcs				break;
1685250661Sdavidcs
1686250661Sdavidcs			case 1:
1687250661Sdavidcs				tx_cmd->buf2_addr = c_seg->ds_addr;
1688250661Sdavidcs				tx_cmd->buf2_len = c_seg->ds_len;
1689250661Sdavidcs				break;
1690250661Sdavidcs
1691250661Sdavidcs			case 2:
1692250661Sdavidcs				tx_cmd->buf3_addr = c_seg->ds_addr;
1693250661Sdavidcs				tx_cmd->buf3_len = c_seg->ds_len;
1694250661Sdavidcs				break;
1695250661Sdavidcs
1696250661Sdavidcs			case 3:
1697250661Sdavidcs				tx_cmd->buf4_addr = c_seg->ds_addr;
1698250661Sdavidcs				tx_cmd->buf4_len = c_seg->ds_len;
1699250661Sdavidcs				break;
1700250661Sdavidcs			}
1701250661Sdavidcs
1702250661Sdavidcs			c_seg++;
1703250661Sdavidcs			nsegs--;
1704250661Sdavidcs		}
1705250661Sdavidcs
1706250661Sdavidcs		txr_next = hw->tx_cntxt[txr_idx].txr_next =
1707250661Sdavidcs			(hw->tx_cntxt[txr_idx].txr_next + 1) &
1708250661Sdavidcs				(NUM_TX_DESCRIPTORS - 1);
1709250661Sdavidcs		tx_cmd_count++;
1710250661Sdavidcs
1711250661Sdavidcs		if (!nsegs)
1712250661Sdavidcs			break;
1713250661Sdavidcs
1714250661Sdavidcs		tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
1715250661Sdavidcs		bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1716250661Sdavidcs	}
1717250661Sdavidcs
1718250661Sdavidcs	if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
1719250661Sdavidcs
1720250661Sdavidcs		/* TSO : Copy the header in the following tx cmd descriptors */
1721250661Sdavidcs
1722250661Sdavidcs		txr_next = hw->tx_cntxt[txr_idx].txr_next;
1723250661Sdavidcs
1724250661Sdavidcs		tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
1725250661Sdavidcs		bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1726250661Sdavidcs
1727250661Sdavidcs		bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
1728250661Sdavidcs		bytes = QL_MIN(bytes, hdr_len);
1729250661Sdavidcs
1730250661Sdavidcs		dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
1731250661Sdavidcs
1732250661Sdavidcs		if (mp->m_flags & M_VLANTAG) {
1733250661Sdavidcs			/* first copy the src/dst MAC addresses */
1734250661Sdavidcs			bcopy(src, dst, (ETHER_ADDR_LEN * 2));
1735250661Sdavidcs			dst += (ETHER_ADDR_LEN * 2);
1736250661Sdavidcs			src += (ETHER_ADDR_LEN * 2);
1737250661Sdavidcs
1738250661Sdavidcs			*((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
1739250661Sdavidcs			dst += 2;
1740250661Sdavidcs			*((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
1741250661Sdavidcs			dst += 2;
1742250661Sdavidcs
1743250661Sdavidcs			/* bytes left in src header */
1744250661Sdavidcs			hdr_len -= ((ETHER_ADDR_LEN * 2) +
1745250661Sdavidcs					ETHER_VLAN_ENCAP_LEN);
1746250661Sdavidcs
1747250661Sdavidcs			/* bytes left in TxCmd Entry */
1748250661Sdavidcs			bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
1749250661Sdavidcs
1750250661Sdavidcs
1751250661Sdavidcs			bcopy(src, dst, bytes);
1752250661Sdavidcs			src += bytes;
1753250661Sdavidcs			hdr_len -= bytes;
1754250661Sdavidcs		} else {
1755250661Sdavidcs			bcopy(src, dst, bytes);
1756250661Sdavidcs			src += bytes;
1757250661Sdavidcs			hdr_len -= bytes;
1758250661Sdavidcs		}
1759250661Sdavidcs
1760250661Sdavidcs		txr_next = hw->tx_cntxt[txr_idx].txr_next =
1761250661Sdavidcs				(hw->tx_cntxt[txr_idx].txr_next + 1) &
1762250661Sdavidcs					(NUM_TX_DESCRIPTORS - 1);
1763250661Sdavidcs		tx_cmd_count++;
1764250661Sdavidcs
1765250661Sdavidcs		while (hdr_len) {
1766250661Sdavidcs			tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
1767250661Sdavidcs			bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1768250661Sdavidcs
1769250661Sdavidcs			bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
1770250661Sdavidcs
1771250661Sdavidcs			bcopy(src, tx_cmd, bytes);
1772250661Sdavidcs			src += bytes;
1773250661Sdavidcs			hdr_len -= bytes;
1774250661Sdavidcs
1775250661Sdavidcs			txr_next = hw->tx_cntxt[txr_idx].txr_next =
1776250661Sdavidcs				(hw->tx_cntxt[txr_idx].txr_next + 1) &
1777250661Sdavidcs					(NUM_TX_DESCRIPTORS - 1);
1778250661Sdavidcs			tx_cmd_count++;
1779250661Sdavidcs		}
1780250661Sdavidcs	}
1781250661Sdavidcs
1782250661Sdavidcs	hw->tx_cntxt[txr_idx].txr_free =
1783250661Sdavidcs		hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
1784250661Sdavidcs
1785250661Sdavidcs	QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
1786250661Sdavidcs		txr_idx);
1787250661Sdavidcs       	QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
1788250661Sdavidcs
1789250661Sdavidcs	return (0);
1790250661Sdavidcs}
1791250661Sdavidcs
1792250661Sdavidcs
1793250661Sdavidcsstatic int
1794250661Sdavidcsqla_config_rss_ind_table(qla_host_t *ha)
1795250661Sdavidcs{
1796250661Sdavidcs	uint32_t i, count;
1797250661Sdavidcs	uint8_t rss_ind_tbl[16];
1798250661Sdavidcs
1799250661Sdavidcs	bzero(rss_ind_default_table, sizeof(rss_ind_default_table));
1800250661Sdavidcs
1801250661Sdavidcs
1802250661Sdavidcs	for (i = 0; i < 16; i++) {
1803250661Sdavidcs		rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
1804250661Sdavidcs	}
1805250661Sdavidcs
1806250661Sdavidcs	for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ; i = i + 16) {
1807250661Sdavidcs
1808250661Sdavidcs		if ((i + 16) > Q8_RSS_IND_TBL_MAX_IDX) {
1809250661Sdavidcs			count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
1810250661Sdavidcs		} else {
1811250661Sdavidcs			count = 16;
1812250661Sdavidcs		}
1813250661Sdavidcs
1814250661Sdavidcs		if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
1815250661Sdavidcs			rss_ind_tbl))
1816250661Sdavidcs			return (-1);
1817250661Sdavidcs	}
1818250661Sdavidcs
1819250661Sdavidcs	return (0);
1820250661Sdavidcs}
1821250661Sdavidcs
1822250661Sdavidcs/*
1823250661Sdavidcs * Name: ql_del_hw_if
1824250661Sdavidcs * Function: Destroys the hardware specific entities corresponding to an
1825250661Sdavidcs *	Ethernet Interface
1826250661Sdavidcs */
1827250661Sdavidcsvoid
1828250661Sdavidcsql_del_hw_if(qla_host_t *ha)
1829250661Sdavidcs{
1830250661Sdavidcs
1831250661Sdavidcs	qla_del_rcv_cntxt(ha);
1832250661Sdavidcs	qla_del_xmt_cntxt(ha);
1833250661Sdavidcs
1834250661Sdavidcs	if (ha->hw.flags.init_intr_cnxt) {
1835250661Sdavidcs		qla_config_intr_cntxt(ha, ha->hw.num_sds_rings, 0);
1836250661Sdavidcs		ha->hw.flags.init_intr_cnxt = 0;
1837250661Sdavidcs	}
1838250661Sdavidcs}
1839250661Sdavidcs
1840250661Sdavidcs/*
1841250661Sdavidcs * Name: ql_init_hw_if
1842250661Sdavidcs * Function: Creates the hardware specific entities corresponding to an
1843250661Sdavidcs *	Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
1844250661Sdavidcs *	corresponding to the interface. Enables LRO if allowed.
1845250661Sdavidcs */
1846250661Sdavidcsint
1847250661Sdavidcsql_init_hw_if(qla_host_t *ha)
1848250661Sdavidcs{
1849250661Sdavidcs	device_t	dev;
1850250661Sdavidcs	uint32_t	i;
1851250661Sdavidcs	uint8_t		bcast_mac[6];
1852250661Sdavidcs	qla_rdesc_t	*rdesc;
1853250661Sdavidcs
1854250661Sdavidcs	dev = ha->pci_dev;
1855250661Sdavidcs
1856250661Sdavidcs	for (i = 0; i < ha->hw.num_sds_rings; i++) {
1857250661Sdavidcs		bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
1858250661Sdavidcs			ha->hw.dma_buf.sds_ring[i].size);
1859250661Sdavidcs	}
1860250661Sdavidcs	ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
1861250661Sdavidcs
1862250661Sdavidcs	/* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
1863250661Sdavidcs	WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
1864250661Sdavidcs	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
1865250661Sdavidcs
1866250661Sdavidcs	qla_get_nic_partition(ha);
1867250661Sdavidcs
1868250661Sdavidcs	if (qla_config_intr_cntxt(ha, ha->hw.num_sds_rings, 1) == 0) {
1869250661Sdavidcs		ha->hw.flags.init_intr_cnxt = 1;
1870250661Sdavidcs	} else
1871250661Sdavidcs		return (-1);
1872250661Sdavidcs
1873250661Sdavidcs	if (ha->hw.mdump_init == 0) {
1874250661Sdavidcs		qla_minidump_init(ha);
1875250661Sdavidcs	}
1876250661Sdavidcs
1877250661Sdavidcs	/*
1878250661Sdavidcs	 * Create Receive Context
1879250661Sdavidcs	 */
1880250661Sdavidcs	if (qla_init_rcv_cntxt(ha)) {
1881250661Sdavidcs		return (-1);
1882250661Sdavidcs	}
1883250661Sdavidcs
1884250661Sdavidcs	for (i = 0; i < ha->hw.num_rds_rings; i++) {
1885250661Sdavidcs		rdesc = &ha->hw.rds[i];
1886250661Sdavidcs		rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
1887250661Sdavidcs		rdesc->rx_in = 0;
1888250661Sdavidcs		/* Update the RDS Producer Indices */
1889250661Sdavidcs		QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
1890250661Sdavidcs			rdesc->rx_next);
1891250661Sdavidcs	}
1892250661Sdavidcs
1893250661Sdavidcs
1894250661Sdavidcs	/*
1895250661Sdavidcs	 * Create Transmit Context
1896250661Sdavidcs	 */
1897250661Sdavidcs	if (qla_init_xmt_cntxt(ha)) {
1898250661Sdavidcs		qla_del_rcv_cntxt(ha);
1899250661Sdavidcs		return (-1);
1900250661Sdavidcs	}
1901250661Sdavidcs	ha->hw.max_tx_segs = 0;
1902250661Sdavidcs
1903250661Sdavidcs	if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1))
1904250661Sdavidcs		return(-1);
1905250661Sdavidcs
1906250661Sdavidcs	ha->hw.flags.unicast_mac = 1;
1907250661Sdavidcs
1908250661Sdavidcs	bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
1909250661Sdavidcs	bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
1910250661Sdavidcs
1911250661Sdavidcs	if (qla_config_mac_addr(ha, bcast_mac, 1))
1912250661Sdavidcs		return (-1);
1913250661Sdavidcs
1914250661Sdavidcs	ha->hw.flags.bcast_mac = 1;
1915250661Sdavidcs
1916250661Sdavidcs	/*
1917250661Sdavidcs	 * program any cached multicast addresses
1918250661Sdavidcs	 */
1919250661Sdavidcs	if (qla_hw_add_all_mcast(ha))
1920250661Sdavidcs		return (-1);
1921250661Sdavidcs
1922250661Sdavidcs	if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
1923250661Sdavidcs		return (-1);
1924250661Sdavidcs
1925250661Sdavidcs	if (qla_config_rss_ind_table(ha))
1926250661Sdavidcs		return (-1);
1927250661Sdavidcs
1928250661Sdavidcs	if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0))
1929250661Sdavidcs		return (-1);
1930250661Sdavidcs
1931250661Sdavidcs	if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
1932250661Sdavidcs		return (-1);
1933250661Sdavidcs
1934250661Sdavidcs	if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
1935250661Sdavidcs		return (-1);
1936250661Sdavidcs
1937250661Sdavidcs	for (i = 0; i < ha->hw.num_sds_rings; i++)
1938250661Sdavidcs		QL_ENABLE_INTERRUPTS(ha, i);
1939250661Sdavidcs
1940250661Sdavidcs	return (0);
1941250661Sdavidcs}
1942250661Sdavidcs
1943250661Sdavidcsstatic int
1944250661Sdavidcsqla_map_sds_to_rds(qla_host_t *ha)
1945250661Sdavidcs{
1946250661Sdavidcs        device_t                dev = ha->pci_dev;
1947250661Sdavidcs        q80_rq_map_sds_to_rds_t *map_rings;
1948250661Sdavidcs        q80_rsp_add_rcv_rings_t *map_rings_rsp;
1949250661Sdavidcs        uint32_t                i, err;
1950250661Sdavidcs        qla_hw_t                *hw = &ha->hw;
1951250661Sdavidcs
1952250661Sdavidcs        map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
1953250661Sdavidcs        bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
1954250661Sdavidcs
1955250661Sdavidcs        map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
1956250661Sdavidcs        map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
1957250661Sdavidcs        map_rings->count_version |= Q8_MBX_CMD_VERSION;
1958250661Sdavidcs
1959250661Sdavidcs        map_rings->cntxt_id = hw->rcv_cntxt_id;
1960250661Sdavidcs        map_rings->num_rings = hw->num_sds_rings;
1961250661Sdavidcs
1962250661Sdavidcs        for (i = 0; i < hw->num_sds_rings; i++) {
1963250661Sdavidcs                map_rings->sds_rds[i].sds_ring = i;
1964250661Sdavidcs                map_rings->sds_rds[i].rds_ring = i;
1965250661Sdavidcs        }
1966250661Sdavidcs
1967250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
1968250661Sdavidcs                (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
1969250661Sdavidcs                ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
1970250661Sdavidcs                device_printf(dev, "%s: failed0\n", __func__);
1971250661Sdavidcs                return (-1);
1972250661Sdavidcs        }
1973250661Sdavidcs
1974250661Sdavidcs        map_rings_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
1975250661Sdavidcs
1976250661Sdavidcs        err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
1977250661Sdavidcs
1978250661Sdavidcs        if (err) {
1979250661Sdavidcs                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1980250661Sdavidcs                return (-1);
1981250661Sdavidcs        }
1982250661Sdavidcs
1983250661Sdavidcs        return (0);
1984250661Sdavidcs}
1985250661Sdavidcs
1986250661Sdavidcs/*
1987250661Sdavidcs * Name: qla_init_rcv_cntxt
1988250661Sdavidcs * Function: Creates the Receive Context.
1989250661Sdavidcs */
1990250661Sdavidcsstatic int
1991250661Sdavidcsqla_init_rcv_cntxt(qla_host_t *ha)
1992250661Sdavidcs{
1993250661Sdavidcs	q80_rq_rcv_cntxt_t	*rcntxt;
1994250661Sdavidcs	q80_rsp_rcv_cntxt_t	*rcntxt_rsp;
1995250661Sdavidcs	q80_stat_desc_t		*sdesc;
1996250661Sdavidcs	int			i, j;
1997250661Sdavidcs        qla_hw_t		*hw = &ha->hw;
1998250661Sdavidcs	device_t		dev;
1999250661Sdavidcs	uint32_t		err;
2000250661Sdavidcs	uint32_t		rcntxt_sds_rings;
2001250661Sdavidcs	uint32_t		rcntxt_rds_rings;
2002250661Sdavidcs
2003250661Sdavidcs	dev = ha->pci_dev;
2004250661Sdavidcs
2005250661Sdavidcs	/*
2006250661Sdavidcs	 * Create Receive Context
2007250661Sdavidcs	 */
2008250661Sdavidcs
2009250661Sdavidcs	for (i = 0; i < hw->num_sds_rings; i++) {
2010250661Sdavidcs		sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
2011250661Sdavidcs
2012250661Sdavidcs		for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
2013250661Sdavidcs			sdesc->data[0] = 1ULL;
2014250661Sdavidcs			sdesc->data[1] = 1ULL;
2015250661Sdavidcs		}
2016250661Sdavidcs	}
2017250661Sdavidcs
2018250661Sdavidcs	rcntxt_sds_rings = hw->num_sds_rings;
2019250661Sdavidcs	if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
2020250661Sdavidcs		rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
2021250661Sdavidcs
2022250661Sdavidcs	rcntxt_rds_rings = hw->num_rds_rings;
2023250661Sdavidcs
2024250661Sdavidcs	if (hw->num_rds_rings > MAX_RDS_RING_SETS)
2025250661Sdavidcs		rcntxt_rds_rings = MAX_RDS_RING_SETS;
2026250661Sdavidcs
2027250661Sdavidcs	rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
2028250661Sdavidcs	bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
2029250661Sdavidcs
2030250661Sdavidcs	rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
2031250661Sdavidcs	rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
2032250661Sdavidcs	rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2033250661Sdavidcs
2034250661Sdavidcs	rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
2035250661Sdavidcs			Q8_RCV_CNTXT_CAP0_LRO |
2036250661Sdavidcs			Q8_RCV_CNTXT_CAP0_HW_LRO |
2037250661Sdavidcs			Q8_RCV_CNTXT_CAP0_RSS |
2038250661Sdavidcs			Q8_RCV_CNTXT_CAP0_SGL_JUMBO |
2039250661Sdavidcs			Q8_RCV_CNTXT_CAP0_SGL_LRO;
2040250661Sdavidcs
2041250661Sdavidcs	if (ha->hw.num_rds_rings > 1) {
2042250661Sdavidcs		rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
2043250661Sdavidcs		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
2044250661Sdavidcs	} else
2045250661Sdavidcs		rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
2046250661Sdavidcs
2047250661Sdavidcs	rcntxt->nsds_rings = rcntxt_sds_rings;
2048250661Sdavidcs
2049250661Sdavidcs	rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
2050250661Sdavidcs
2051250661Sdavidcs	rcntxt->rcv_vpid = 0;
2052250661Sdavidcs
2053250661Sdavidcs	for (i = 0; i <  rcntxt_sds_rings; i++) {
2054250661Sdavidcs		rcntxt->sds[i].paddr =
2055250661Sdavidcs			qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
2056250661Sdavidcs		rcntxt->sds[i].size =
2057250661Sdavidcs			qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2058250661Sdavidcs		if (ha->msix_count == 2) {
2059250661Sdavidcs			rcntxt->sds[i].intr_id =
2060250661Sdavidcs				qla_host_to_le16(hw->intr_id[0]);
2061250661Sdavidcs			rcntxt->sds[i].intr_src_bit = qla_host_to_le16((i));
2062250661Sdavidcs		} else {
2063250661Sdavidcs			rcntxt->sds[i].intr_id =
2064250661Sdavidcs				qla_host_to_le16(hw->intr_id[i]);
2065250661Sdavidcs			rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
2066250661Sdavidcs		}
2067250661Sdavidcs	}
2068250661Sdavidcs
2069250661Sdavidcs	for (i = 0; i <  rcntxt_rds_rings; i++) {
2070250661Sdavidcs		rcntxt->rds[i].paddr_std =
2071250661Sdavidcs			qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
2072250661Sdavidcs		rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2073250661Sdavidcs		rcntxt->rds[i].std_nentries =
2074250661Sdavidcs			qla_host_to_le32(NUM_RX_DESCRIPTORS);
2075250661Sdavidcs	}
2076250661Sdavidcs
2077250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2078250661Sdavidcs		(sizeof (q80_rq_rcv_cntxt_t) >> 2),
2079250661Sdavidcs                ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
2080250661Sdavidcs                device_printf(dev, "%s: failed0\n", __func__);
2081250661Sdavidcs                return (-1);
2082250661Sdavidcs        }
2083250661Sdavidcs
2084250661Sdavidcs        rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
2085250661Sdavidcs
2086250661Sdavidcs        err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2087250661Sdavidcs
2088250661Sdavidcs        if (err) {
2089250661Sdavidcs                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2090250661Sdavidcs                return (-1);
2091250661Sdavidcs        }
2092250661Sdavidcs
2093250661Sdavidcs	for (i = 0; i <  rcntxt_sds_rings; i++) {
2094250661Sdavidcs		hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
2095250661Sdavidcs	}
2096250661Sdavidcs
2097250661Sdavidcs	for (i = 0; i <  rcntxt_rds_rings; i++) {
2098250661Sdavidcs		hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
2099250661Sdavidcs	}
2100250661Sdavidcs
2101250661Sdavidcs	hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
2102250661Sdavidcs
2103250661Sdavidcs	ha->hw.flags.init_rx_cnxt = 1;
2104250661Sdavidcs
2105250661Sdavidcs	if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
2106250661Sdavidcs		err = qla_add_rcv_rings(ha, MAX_RCNTXT_SDS_RINGS);
2107250661Sdavidcs		if (err)
2108250661Sdavidcs			return -1;
2109250661Sdavidcs	}
2110250661Sdavidcs
2111250661Sdavidcs        if (hw->num_rds_rings > 1) {
2112250661Sdavidcs		err = qla_map_sds_to_rds(ha);
2113250661Sdavidcs		if (err)
2114250661Sdavidcs			return -1;
2115250661Sdavidcs	}
2116250661Sdavidcs
2117250661Sdavidcs	return (0);
2118250661Sdavidcs}
2119250661Sdavidcs
2120250661Sdavidcsstatic int
2121250661Sdavidcsqla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx)
2122250661Sdavidcs{
2123250661Sdavidcs	device_t		dev = ha->pci_dev;
2124250661Sdavidcs	q80_rq_add_rcv_rings_t	*add_rcv;
2125250661Sdavidcs	q80_rsp_add_rcv_rings_t	*add_rcv_rsp;
2126250661Sdavidcs	uint32_t		i,j, err;
2127250661Sdavidcs	uint8_t			nsds;
2128250661Sdavidcs        qla_hw_t		*hw = &ha->hw;
2129250661Sdavidcs
2130250661Sdavidcs	nsds = hw->num_sds_rings - MAX_RCNTXT_SDS_RINGS;
2131250661Sdavidcs
2132250661Sdavidcs	add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
2133250661Sdavidcs	bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
2134250661Sdavidcs
2135250661Sdavidcs	add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
2136250661Sdavidcs	add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
2137250661Sdavidcs	add_rcv->count_version |= Q8_MBX_CMD_VERSION;
2138250661Sdavidcs
2139250661Sdavidcs        if (hw->num_rds_rings > 1)
2140250661Sdavidcs                add_rcv->nrds_sets_rings = nsds | (1 << 5);
2141250661Sdavidcs        else
2142250661Sdavidcs                add_rcv->nrds_sets_rings = 0;
2143250661Sdavidcs
2144250661Sdavidcs	add_rcv->nsds_rings = nsds;
2145250661Sdavidcs	add_rcv->cntxt_id = hw->rcv_cntxt_id;
2146250661Sdavidcs
2147250661Sdavidcs        for (i = 0; i <  nsds; i++) {
2148250661Sdavidcs
2149250661Sdavidcs		j = i + sds_idx;
2150250661Sdavidcs
2151250661Sdavidcs                add_rcv->sds[i].paddr =
2152250661Sdavidcs                        qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
2153250661Sdavidcs
2154250661Sdavidcs                add_rcv->sds[i].size =
2155250661Sdavidcs                        qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2156250661Sdavidcs
2157250661Sdavidcs                if (ha->msix_count == 2) {
2158250661Sdavidcs                        add_rcv->sds[i].intr_id =
2159250661Sdavidcs                                qla_host_to_le16(hw->intr_id[0]);
2160250661Sdavidcs                        add_rcv->sds[i].intr_src_bit = qla_host_to_le16(j);
2161250661Sdavidcs                } else {
2162250661Sdavidcs                        add_rcv->sds[i].intr_id =
2163250661Sdavidcs                                qla_host_to_le16(hw->intr_id[j]);
2164250661Sdavidcs                        add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
2165250661Sdavidcs                }
2166250661Sdavidcs
2167250661Sdavidcs        }
2168250661Sdavidcs        for (i = 0; ((i <  nsds) && (hw->num_rds_rings > 1)); i++) {
2169250661Sdavidcs                j = i + sds_idx;
2170250661Sdavidcs                add_rcv->rds[i].paddr_std =
2171250661Sdavidcs                        qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
2172250661Sdavidcs                add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2173250661Sdavidcs                add_rcv->rds[i].std_nentries =
2174250661Sdavidcs                        qla_host_to_le32(NUM_RX_DESCRIPTORS);
2175250661Sdavidcs        }
2176250661Sdavidcs
2177250661Sdavidcs
2178250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
2179250661Sdavidcs		(sizeof (q80_rq_add_rcv_rings_t) >> 2),
2180250661Sdavidcs                ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2181250661Sdavidcs                device_printf(dev, "%s: failed0\n", __func__);
2182250661Sdavidcs                return (-1);
2183250661Sdavidcs        }
2184250661Sdavidcs
2185250661Sdavidcs        add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
2186250661Sdavidcs
2187250661Sdavidcs        err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
2188250661Sdavidcs
2189250661Sdavidcs        if (err) {
2190250661Sdavidcs                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2191250661Sdavidcs                return (-1);
2192250661Sdavidcs        }
2193250661Sdavidcs
2194250661Sdavidcs	for (i = sds_idx; i < hw->num_sds_rings; i++) {
2195250661Sdavidcs		hw->sds[i].sds_consumer = add_rcv_rsp->sds_cons[(i - sds_idx)];
2196250661Sdavidcs	}
2197250661Sdavidcs	for (i = sds_idx; i < hw->num_rds_rings; i++) {
2198250661Sdavidcs		hw->rds[i].prod_std = add_rcv_rsp->rds[(i - sds_idx)].prod_std;
2199250661Sdavidcs	}
2200250661Sdavidcs	return (0);
2201250661Sdavidcs}
2202250661Sdavidcs
2203250661Sdavidcs/*
2204250661Sdavidcs * Name: qla_del_rcv_cntxt
2205250661Sdavidcs * Function: Destroys the Receive Context.
2206250661Sdavidcs */
2207250661Sdavidcsstatic void
2208250661Sdavidcsqla_del_rcv_cntxt(qla_host_t *ha)
2209250661Sdavidcs{
2210250661Sdavidcs	device_t			dev = ha->pci_dev;
2211250661Sdavidcs	q80_rcv_cntxt_destroy_t		*rcntxt;
2212250661Sdavidcs	q80_rcv_cntxt_destroy_rsp_t	*rcntxt_rsp;
2213250661Sdavidcs	uint32_t			err;
2214250661Sdavidcs	uint8_t				bcast_mac[6];
2215250661Sdavidcs
2216250661Sdavidcs	if (!ha->hw.flags.init_rx_cnxt)
2217250661Sdavidcs		return;
2218250661Sdavidcs
2219250661Sdavidcs	if (qla_hw_del_all_mcast(ha))
2220250661Sdavidcs		return;
2221250661Sdavidcs
2222250661Sdavidcs	if (ha->hw.flags.bcast_mac) {
2223250661Sdavidcs
2224250661Sdavidcs		bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2225250661Sdavidcs		bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2226250661Sdavidcs
2227250661Sdavidcs		if (qla_config_mac_addr(ha, bcast_mac, 0))
2228250661Sdavidcs			return;
2229250661Sdavidcs		ha->hw.flags.bcast_mac = 0;
2230250661Sdavidcs
2231250661Sdavidcs	}
2232250661Sdavidcs
2233250661Sdavidcs	if (ha->hw.flags.unicast_mac) {
2234250661Sdavidcs		if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0))
2235250661Sdavidcs			return;
2236250661Sdavidcs		ha->hw.flags.unicast_mac = 0;
2237250661Sdavidcs	}
2238250661Sdavidcs
2239250661Sdavidcs	rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
2240250661Sdavidcs	bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
2241250661Sdavidcs
2242250661Sdavidcs	rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
2243250661Sdavidcs	rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
2244250661Sdavidcs	rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2245250661Sdavidcs
2246250661Sdavidcs	rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
2247250661Sdavidcs
2248250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2249250661Sdavidcs		(sizeof (q80_rcv_cntxt_destroy_t) >> 2),
2250250661Sdavidcs                ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
2251250661Sdavidcs                device_printf(dev, "%s: failed0\n", __func__);
2252250661Sdavidcs                return;
2253250661Sdavidcs        }
2254250661Sdavidcs        rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
2255250661Sdavidcs
2256250661Sdavidcs        err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2257250661Sdavidcs
2258250661Sdavidcs        if (err) {
2259250661Sdavidcs                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2260250661Sdavidcs        }
2261250661Sdavidcs
2262250661Sdavidcs	ha->hw.flags.init_rx_cnxt = 0;
2263250661Sdavidcs	return;
2264250661Sdavidcs}
2265250661Sdavidcs
2266250661Sdavidcs/*
2267250661Sdavidcs * Name: qla_init_xmt_cntxt
2268250661Sdavidcs * Function: Creates the Transmit Context.
2269250661Sdavidcs */
2270250661Sdavidcsstatic int
2271250661Sdavidcsqla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2272250661Sdavidcs{
2273250661Sdavidcs	device_t		dev;
2274250661Sdavidcs        qla_hw_t		*hw = &ha->hw;
2275250661Sdavidcs	q80_rq_tx_cntxt_t	*tcntxt;
2276250661Sdavidcs	q80_rsp_tx_cntxt_t	*tcntxt_rsp;
2277250661Sdavidcs	uint32_t		err;
2278250661Sdavidcs	qla_hw_tx_cntxt_t       *hw_tx_cntxt;
2279250661Sdavidcs
2280250661Sdavidcs	hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
2281250661Sdavidcs
2282250661Sdavidcs	dev = ha->pci_dev;
2283250661Sdavidcs
2284250661Sdavidcs	/*
2285250661Sdavidcs	 * Create Transmit Context
2286250661Sdavidcs	 */
2287250661Sdavidcs	tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
2288250661Sdavidcs	bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
2289250661Sdavidcs
2290250661Sdavidcs	tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
2291250661Sdavidcs	tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
2292250661Sdavidcs	tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2293250661Sdavidcs
2294250661Sdavidcs	tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
2295250661Sdavidcs
2296250661Sdavidcs	tcntxt->ntx_rings = 1;
2297250661Sdavidcs
2298250661Sdavidcs	tcntxt->tx_ring[0].paddr =
2299250661Sdavidcs		qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
2300250661Sdavidcs	tcntxt->tx_ring[0].tx_consumer =
2301250661Sdavidcs		qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
2302250661Sdavidcs	tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
2303250661Sdavidcs
2304250661Sdavidcs	tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[0]);
2305250661Sdavidcs	tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
2306250661Sdavidcs
2307250661Sdavidcs
2308250661Sdavidcs	hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
2309250661Sdavidcs	hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
2310250661Sdavidcs
2311250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2312250661Sdavidcs		(sizeof (q80_rq_tx_cntxt_t) >> 2),
2313250661Sdavidcs                ha->hw.mbox,
2314250661Sdavidcs		(sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
2315250661Sdavidcs                device_printf(dev, "%s: failed0\n", __func__);
2316250661Sdavidcs                return (-1);
2317250661Sdavidcs        }
2318250661Sdavidcs        tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
2319250661Sdavidcs
2320250661Sdavidcs        err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2321250661Sdavidcs
2322250661Sdavidcs        if (err) {
2323250661Sdavidcs                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2324250661Sdavidcs		return -1;
2325250661Sdavidcs        }
2326250661Sdavidcs
2327250661Sdavidcs	hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
2328250661Sdavidcs	hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
2329250661Sdavidcs
2330250661Sdavidcs	return (0);
2331250661Sdavidcs}
2332250661Sdavidcs
2333250661Sdavidcs
2334250661Sdavidcs/*
2335250661Sdavidcs * Name: qla_del_xmt_cntxt
2336250661Sdavidcs * Function: Destroys the Transmit Context.
2337250661Sdavidcs */
2338250661Sdavidcsstatic int
2339250661Sdavidcsqla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2340250661Sdavidcs{
2341250661Sdavidcs	device_t			dev = ha->pci_dev;
2342250661Sdavidcs	q80_tx_cntxt_destroy_t		*tcntxt;
2343250661Sdavidcs	q80_tx_cntxt_destroy_rsp_t	*tcntxt_rsp;
2344250661Sdavidcs	uint32_t			err;
2345250661Sdavidcs
2346250661Sdavidcs	tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
2347250661Sdavidcs	bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
2348250661Sdavidcs
2349250661Sdavidcs	tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
2350250661Sdavidcs	tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
2351250661Sdavidcs	tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2352250661Sdavidcs
2353250661Sdavidcs	tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
2354250661Sdavidcs
2355250661Sdavidcs        if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2356250661Sdavidcs		(sizeof (q80_tx_cntxt_destroy_t) >> 2),
2357250661Sdavidcs                ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
2358250661Sdavidcs                device_printf(dev, "%s: failed0\n", __func__);
2359250661Sdavidcs                return (-1);
2360250661Sdavidcs        }
2361250661Sdavidcs        tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
2362250661Sdavidcs
2363250661Sdavidcs        err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2364250661Sdavidcs
2365250661Sdavidcs        if (err) {
2366250661Sdavidcs                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2367250661Sdavidcs		return (-1);
2368250661Sdavidcs        }
2369250661Sdavidcs
2370250661Sdavidcs	return (0);
2371250661Sdavidcs}
2372250661Sdavidcsstatic void
2373250661Sdavidcsqla_del_xmt_cntxt(qla_host_t *ha)
2374250661Sdavidcs{
2375250661Sdavidcs	uint32_t i;
2376250661Sdavidcs
2377250661Sdavidcs	if (!ha->hw.flags.init_tx_cnxt)
2378250661Sdavidcs		return;
2379250661Sdavidcs
2380250661Sdavidcs	for (i = 0; i < ha->hw.num_tx_rings; i++) {
2381250661Sdavidcs		if (qla_del_xmt_cntxt_i(ha, i))
2382250661Sdavidcs			break;
2383250661Sdavidcs	}
2384250661Sdavidcs	ha->hw.flags.init_tx_cnxt = 0;
2385250661Sdavidcs}
2386250661Sdavidcs
2387250661Sdavidcsstatic int
2388250661Sdavidcsqla_init_xmt_cntxt(qla_host_t *ha)
2389250661Sdavidcs{
2390250661Sdavidcs	uint32_t i, j;
2391250661Sdavidcs
2392250661Sdavidcs	for (i = 0; i < ha->hw.num_tx_rings; i++) {
2393250661Sdavidcs		if (qla_init_xmt_cntxt_i(ha, i) != 0) {
2394250661Sdavidcs			for (j = 0; j < i; j++)
2395250661Sdavidcs				qla_del_xmt_cntxt_i(ha, j);
2396250661Sdavidcs			return (-1);
2397250661Sdavidcs		}
2398250661Sdavidcs	}
2399250661Sdavidcs	ha->hw.flags.init_tx_cnxt = 1;
2400250661Sdavidcs	return (0);
2401250661Sdavidcs}
2402250661Sdavidcs
2403250661Sdavidcsstatic int
2404250661Sdavidcsqla_hw_add_all_mcast(qla_host_t *ha)
2405250661Sdavidcs{
2406250661Sdavidcs	int i, nmcast;
2407250661Sdavidcs
2408250661Sdavidcs	nmcast = ha->hw.nmcast;
2409250661Sdavidcs
2410250661Sdavidcs	for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
2411250661Sdavidcs		if ((ha->hw.mcast[i].addr[0] != 0) ||
2412250661Sdavidcs			(ha->hw.mcast[i].addr[1] != 0) ||
2413250661Sdavidcs			(ha->hw.mcast[i].addr[2] != 0) ||
2414250661Sdavidcs			(ha->hw.mcast[i].addr[3] != 0) ||
2415250661Sdavidcs			(ha->hw.mcast[i].addr[4] != 0) ||
2416250661Sdavidcs			(ha->hw.mcast[i].addr[5] != 0)) {
2417250661Sdavidcs
2418250661Sdavidcs			if (qla_config_mac_addr(ha, ha->hw.mcast[i].addr, 1)) {
2419250661Sdavidcs                		device_printf(ha->pci_dev, "%s: failed\n",
2420250661Sdavidcs					__func__);
2421250661Sdavidcs				return (-1);
2422250661Sdavidcs			}
2423250661Sdavidcs
2424250661Sdavidcs			nmcast--;
2425250661Sdavidcs		}
2426250661Sdavidcs	}
2427250661Sdavidcs	return 0;
2428250661Sdavidcs}
2429250661Sdavidcs
2430250661Sdavidcsstatic int
2431250661Sdavidcsqla_hw_del_all_mcast(qla_host_t *ha)
2432250661Sdavidcs{
2433250661Sdavidcs	int i, nmcast;
2434250661Sdavidcs
2435250661Sdavidcs	nmcast = ha->hw.nmcast;
2436250661Sdavidcs
2437250661Sdavidcs	for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
2438250661Sdavidcs		if ((ha->hw.mcast[i].addr[0] != 0) ||
2439250661Sdavidcs			(ha->hw.mcast[i].addr[1] != 0) ||
2440250661Sdavidcs			(ha->hw.mcast[i].addr[2] != 0) ||
2441250661Sdavidcs			(ha->hw.mcast[i].addr[3] != 0) ||
2442250661Sdavidcs			(ha->hw.mcast[i].addr[4] != 0) ||
2443250661Sdavidcs			(ha->hw.mcast[i].addr[5] != 0)) {
2444250661Sdavidcs
2445250661Sdavidcs			if (qla_config_mac_addr(ha, ha->hw.mcast[i].addr, 0))
2446250661Sdavidcs				return (-1);
2447250661Sdavidcs
2448250661Sdavidcs			nmcast--;
2449250661Sdavidcs		}
2450250661Sdavidcs	}
2451250661Sdavidcs	return 0;
2452250661Sdavidcs}
2453250661Sdavidcs
2454250661Sdavidcsstatic int
2455250661Sdavidcsqla_hw_add_mcast(qla_host_t *ha, uint8_t *mta)
2456250661Sdavidcs{
2457250661Sdavidcs	int i;
2458250661Sdavidcs
2459250661Sdavidcs	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2460250661Sdavidcs
2461250661Sdavidcs		if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
2462250661Sdavidcs			return 0; /* its been already added */
2463250661Sdavidcs	}
2464250661Sdavidcs
2465250661Sdavidcs	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2466250661Sdavidcs
2467250661Sdavidcs		if ((ha->hw.mcast[i].addr[0] == 0) &&
2468250661Sdavidcs			(ha->hw.mcast[i].addr[1] == 0) &&
2469250661Sdavidcs			(ha->hw.mcast[i].addr[2] == 0) &&
2470250661Sdavidcs			(ha->hw.mcast[i].addr[3] == 0) &&
2471250661Sdavidcs			(ha->hw.mcast[i].addr[4] == 0) &&
2472250661Sdavidcs			(ha->hw.mcast[i].addr[5] == 0)) {
2473250661Sdavidcs
2474250661Sdavidcs			if (qla_config_mac_addr(ha, mta, 1))
2475250661Sdavidcs				return (-1);
2476250661Sdavidcs
2477250661Sdavidcs			bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
2478250661Sdavidcs			ha->hw.nmcast++;
2479250661Sdavidcs
2480250661Sdavidcs			return 0;
2481250661Sdavidcs		}
2482250661Sdavidcs	}
2483250661Sdavidcs	return 0;
2484250661Sdavidcs}
2485250661Sdavidcs
2486250661Sdavidcsstatic int
2487250661Sdavidcsqla_hw_del_mcast(qla_host_t *ha, uint8_t *mta)
2488250661Sdavidcs{
2489250661Sdavidcs	int i;
2490250661Sdavidcs
2491250661Sdavidcs	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2492250661Sdavidcs		if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
2493250661Sdavidcs
2494250661Sdavidcs			if (qla_config_mac_addr(ha, mta, 0))
2495250661Sdavidcs				return (-1);
2496250661Sdavidcs
2497250661Sdavidcs			ha->hw.mcast[i].addr[0] = 0;
2498250661Sdavidcs			ha->hw.mcast[i].addr[1] = 0;
2499250661Sdavidcs			ha->hw.mcast[i].addr[2] = 0;
2500250661Sdavidcs			ha->hw.mcast[i].addr[3] = 0;
2501250661Sdavidcs			ha->hw.mcast[i].addr[4] = 0;
2502250661Sdavidcs			ha->hw.mcast[i].addr[5] = 0;
2503250661Sdavidcs
2504250661Sdavidcs			ha->hw.nmcast--;
2505250661Sdavidcs
2506250661Sdavidcs			return 0;
2507250661Sdavidcs		}
2508250661Sdavidcs	}
2509250661Sdavidcs	return 0;
2510250661Sdavidcs}
2511250661Sdavidcs
2512250661Sdavidcs/*
2513250661Sdavidcs * Name: ql_hw_set_multi
2514250661Sdavidcs * Function: Sets the Multicast Addresses provided the host O.S into the
2515250661Sdavidcs *	hardware (for the given interface)
2516250661Sdavidcs */
2517250661Sdavidcsint
2518250661Sdavidcsql_hw_set_multi(qla_host_t *ha, uint8_t *mcast, uint32_t mcnt,
2519250661Sdavidcs	uint32_t add_mac)
2520250661Sdavidcs{
2521250661Sdavidcs	int i;
2522250661Sdavidcs	uint8_t *mta = mcast;
2523250661Sdavidcs	int ret = 0;
2524250661Sdavidcs
2525250661Sdavidcs	for (i = 0; i < mcnt; i++) {
2526250661Sdavidcs		if (add_mac) {
2527250661Sdavidcs			ret = qla_hw_add_mcast(ha, mta);
2528250661Sdavidcs			if (ret)
2529250661Sdavidcs				break;
2530250661Sdavidcs		} else {
2531250661Sdavidcs			ret = qla_hw_del_mcast(ha, mta);
2532250661Sdavidcs			if (ret)
2533250661Sdavidcs				break;
2534250661Sdavidcs		}
2535250661Sdavidcs
2536250661Sdavidcs		mta += Q8_MAC_ADDR_LEN;
2537250661Sdavidcs	}
2538250661Sdavidcs	return (ret);
2539250661Sdavidcs}
2540250661Sdavidcs
2541250661Sdavidcs/*
2542250661Sdavidcs * Name: qla_hw_tx_done_locked
2543250661Sdavidcs * Function: Handle Transmit Completions
2544250661Sdavidcs */
2545250661Sdavidcsstatic void
2546250661Sdavidcsqla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
2547250661Sdavidcs{
2548250661Sdavidcs	qla_tx_buf_t *txb;
2549250661Sdavidcs        qla_hw_t *hw = &ha->hw;
2550250661Sdavidcs	uint32_t comp_idx, comp_count = 0;
2551250661Sdavidcs	qla_hw_tx_cntxt_t *hw_tx_cntxt;
2552250661Sdavidcs
2553250661Sdavidcs	hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
2554250661Sdavidcs
2555250661Sdavidcs	/* retrieve index of last entry in tx ring completed */
2556250661Sdavidcs	comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
2557250661Sdavidcs
2558250661Sdavidcs	while (comp_idx != hw_tx_cntxt->txr_comp) {
2559250661Sdavidcs
2560250661Sdavidcs		txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
2561250661Sdavidcs
2562250661Sdavidcs		hw_tx_cntxt->txr_comp++;
2563250661Sdavidcs		if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
2564250661Sdavidcs			hw_tx_cntxt->txr_comp = 0;
2565250661Sdavidcs
2566250661Sdavidcs		comp_count++;
2567250661Sdavidcs
2568250661Sdavidcs		if (txb->m_head) {
2569250661Sdavidcs			ha->ifp->if_opackets++;
2570250661Sdavidcs
2571250661Sdavidcs			bus_dmamap_sync(ha->tx_tag, txb->map,
2572250661Sdavidcs				BUS_DMASYNC_POSTWRITE);
2573250661Sdavidcs			bus_dmamap_unload(ha->tx_tag, txb->map);
2574250661Sdavidcs			m_freem(txb->m_head);
2575250661Sdavidcs
2576250661Sdavidcs			txb->m_head = NULL;
2577250661Sdavidcs		}
2578250661Sdavidcs	}
2579250661Sdavidcs
2580250661Sdavidcs	hw_tx_cntxt->txr_free += comp_count;
2581250661Sdavidcs	return;
2582250661Sdavidcs}
2583250661Sdavidcs
2584250661Sdavidcs/*
2585250661Sdavidcs * Name: ql_hw_tx_done
2586250661Sdavidcs * Function: Handle Transmit Completions
2587250661Sdavidcs */
2588250661Sdavidcsvoid
2589250661Sdavidcsql_hw_tx_done(qla_host_t *ha)
2590250661Sdavidcs{
2591250661Sdavidcs	int i;
2592250661Sdavidcs	uint32_t flag = 0;
2593250661Sdavidcs
2594250661Sdavidcs	if (!mtx_trylock(&ha->tx_lock)) {
2595250661Sdavidcs       		QL_DPRINT8(ha, (ha->pci_dev,
2596250661Sdavidcs			"%s: !mtx_trylock(&ha->tx_lock)\n", __func__));
2597250661Sdavidcs		return;
2598250661Sdavidcs	}
2599250661Sdavidcs	for (i = 0; i < ha->hw.num_tx_rings; i++) {
2600250661Sdavidcs		qla_hw_tx_done_locked(ha, i);
2601250661Sdavidcs		if (ha->hw.tx_cntxt[i].txr_free <= (NUM_TX_DESCRIPTORS >> 1))
2602250661Sdavidcs			flag = 1;
2603250661Sdavidcs	}
2604250661Sdavidcs
2605250661Sdavidcs	if (!flag)
2606250661Sdavidcs		ha->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2607250661Sdavidcs
2608250661Sdavidcs	QLA_TX_UNLOCK(ha);
2609250661Sdavidcs	return;
2610250661Sdavidcs}
2611250661Sdavidcs
2612250661Sdavidcsvoid
2613250661Sdavidcsql_update_link_state(qla_host_t *ha)
2614250661Sdavidcs{
2615250661Sdavidcs	uint32_t link_state;
2616250661Sdavidcs	uint32_t prev_link_state;
2617250661Sdavidcs
2618250661Sdavidcs	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2619250661Sdavidcs		ha->hw.link_up = 0;
2620250661Sdavidcs		return;
2621250661Sdavidcs	}
2622250661Sdavidcs	link_state = READ_REG32(ha, Q8_LINK_STATE);
2623250661Sdavidcs
2624250661Sdavidcs	prev_link_state =  ha->hw.link_up;
2625250661Sdavidcs
2626250661Sdavidcs	if (ha->pci_func == 0)
2627250661Sdavidcs		ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0);
2628250661Sdavidcs	else
2629250661Sdavidcs		ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
2630250661Sdavidcs
2631250661Sdavidcs	if (prev_link_state !=  ha->hw.link_up) {
2632250661Sdavidcs		if (ha->hw.link_up) {
2633250661Sdavidcs			if_link_state_change(ha->ifp, LINK_STATE_UP);
2634250661Sdavidcs		} else {
2635250661Sdavidcs			if_link_state_change(ha->ifp, LINK_STATE_DOWN);
2636250661Sdavidcs		}
2637250661Sdavidcs	}
2638250661Sdavidcs	return;
2639250661Sdavidcs}
2640250661Sdavidcs
2641250661Sdavidcsvoid
2642250661Sdavidcsql_hw_stop_rcv(qla_host_t *ha)
2643250661Sdavidcs{
2644250661Sdavidcs	int i, done, count = 100;
2645250661Sdavidcs
2646250661Sdavidcs	while (count--) {
2647250661Sdavidcs		done = 1;
2648250661Sdavidcs		for (i = 0; i < ha->hw.num_sds_rings; i++) {
2649250661Sdavidcs			if (ha->hw.sds[i].rcv_active)
2650250661Sdavidcs				done = 0;
2651250661Sdavidcs		}
2652250661Sdavidcs		if (done)
2653250661Sdavidcs			break;
2654250661Sdavidcs		else
2655250661Sdavidcs			qla_mdelay(__func__, 10);
2656250661Sdavidcs	}
2657250661Sdavidcs	if (!count)
2658250661Sdavidcs		device_printf(ha->pci_dev, "%s: Counter expired.\n", __func__);
2659250661Sdavidcs
2660250661Sdavidcs	return;
2661250661Sdavidcs}
2662250661Sdavidcs
2663250661Sdavidcsint
2664250661Sdavidcsql_hw_check_health(qla_host_t *ha)
2665250661Sdavidcs{
2666250661Sdavidcs	uint32_t val;
2667250661Sdavidcs
2668250661Sdavidcs	ha->hw.health_count++;
2669250661Sdavidcs
2670250661Sdavidcs	if (ha->hw.health_count < 1000)
2671250661Sdavidcs		return 0;
2672250661Sdavidcs
2673250661Sdavidcs	ha->hw.health_count = 0;
2674250661Sdavidcs
2675250661Sdavidcs	val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
2676250661Sdavidcs
2677250661Sdavidcs	if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
2678250661Sdavidcs		(QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
2679250661Sdavidcs		device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n",
2680250661Sdavidcs			__func__, val);
2681250661Sdavidcs		return -1;
2682250661Sdavidcs	}
2683250661Sdavidcs
2684250661Sdavidcs	val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
2685250661Sdavidcs
2686250661Sdavidcs	if ((val != ha->hw.hbeat_value) &&
2687250661Sdavidcs		(!(QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE)))) {
2688250661Sdavidcs		ha->hw.hbeat_value = val;
2689250661Sdavidcs		return 0;
2690250661Sdavidcs	}
2691250661Sdavidcs	device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n",
2692250661Sdavidcs		__func__, val);
2693250661Sdavidcs
2694250661Sdavidcs	return -1;
2695250661Sdavidcs}
2696250661Sdavidcs
2697250661Sdavidcsstatic int
2698250661Sdavidcsqla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
2699250661Sdavidcs{
2700250661Sdavidcs	uint32_t			err;
2701250661Sdavidcs	device_t			dev = ha->pci_dev;
2702250661Sdavidcs	q80_config_md_templ_size_t	*md_size;
2703250661Sdavidcs	q80_config_md_templ_size_rsp_t	*md_size_rsp;
2704250661Sdavidcs
2705250661Sdavidcs	md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
2706250661Sdavidcs	bzero(md_size, sizeof(q80_config_md_templ_size_t));
2707250661Sdavidcs
2708250661Sdavidcs	md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
2709250661Sdavidcs	md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
2710250661Sdavidcs	md_size->count_version |= Q8_MBX_CMD_VERSION;
2711250661Sdavidcs
2712250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *) md_size,
2713250661Sdavidcs		(sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
2714250661Sdavidcs		(sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
2715250661Sdavidcs
2716250661Sdavidcs		device_printf(dev, "%s: failed\n", __func__);
2717250661Sdavidcs
2718250661Sdavidcs		return (-1);
2719250661Sdavidcs	}
2720250661Sdavidcs
2721250661Sdavidcs	md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
2722250661Sdavidcs
2723250661Sdavidcs	err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
2724250661Sdavidcs
2725250661Sdavidcs        if (err) {
2726250661Sdavidcs		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2727250661Sdavidcs		return(-1);
2728250661Sdavidcs        }
2729250661Sdavidcs
2730250661Sdavidcs	*size = md_size_rsp->templ_size;
2731250661Sdavidcs
2732250661Sdavidcs	return (0);
2733250661Sdavidcs}
2734250661Sdavidcs
2735250661Sdavidcsstatic int
2736250661Sdavidcsqla_get_minidump_template(qla_host_t *ha)
2737250661Sdavidcs{
2738250661Sdavidcs	uint32_t			err;
2739250661Sdavidcs	device_t			dev = ha->pci_dev;
2740250661Sdavidcs	q80_config_md_templ_cmd_t	*md_templ;
2741250661Sdavidcs	q80_config_md_templ_cmd_rsp_t	*md_templ_rsp;
2742250661Sdavidcs
2743250661Sdavidcs	md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
2744250661Sdavidcs	bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
2745250661Sdavidcs
2746250661Sdavidcs	md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
2747250661Sdavidcs	md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
2748250661Sdavidcs	md_templ->count_version |= Q8_MBX_CMD_VERSION;
2749250661Sdavidcs
2750250661Sdavidcs	md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
2751250661Sdavidcs	md_templ->buff_size = ha->hw.dma_buf.minidump.size;
2752250661Sdavidcs
2753250661Sdavidcs	if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
2754250661Sdavidcs		(sizeof(q80_config_md_templ_cmd_t) >> 2),
2755250661Sdavidcs		 ha->hw.mbox,
2756250661Sdavidcs		(sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
2757250661Sdavidcs
2758250661Sdavidcs		device_printf(dev, "%s: failed\n", __func__);
2759250661Sdavidcs
2760250661Sdavidcs		return (-1);
2761250661Sdavidcs	}
2762250661Sdavidcs
2763250661Sdavidcs	md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
2764250661Sdavidcs
2765250661Sdavidcs	err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
2766250661Sdavidcs
2767250661Sdavidcs	if (err) {
2768250661Sdavidcs		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2769250661Sdavidcs		return (-1);
2770250661Sdavidcs	}
2771250661Sdavidcs
2772250661Sdavidcs	return (0);
2773250661Sdavidcs
2774250661Sdavidcs}
2775250661Sdavidcs
2776250661Sdavidcsstatic int
2777250661Sdavidcsqla_minidump_init(qla_host_t *ha)
2778250661Sdavidcs{
2779250661Sdavidcs	int		ret;
2780250661Sdavidcs	uint32_t	template_size = 0;
2781250661Sdavidcs	device_t	dev = ha->pci_dev;
2782250661Sdavidcs
2783250661Sdavidcs	/*
2784250661Sdavidcs	 * Get Minidump Template Size
2785250661Sdavidcs 	 */
2786250661Sdavidcs	ret = qla_get_minidump_tmplt_size(ha, &template_size);
2787250661Sdavidcs
2788250661Sdavidcs	if (ret || (template_size == 0)) {
2789250661Sdavidcs		device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
2790250661Sdavidcs			template_size);
2791250661Sdavidcs		return (-1);
2792250661Sdavidcs	}
2793250661Sdavidcs
2794250661Sdavidcs	/*
2795250661Sdavidcs	 * Allocate Memory for Minidump Template
2796250661Sdavidcs	 */
2797250661Sdavidcs
2798250661Sdavidcs	ha->hw.dma_buf.minidump.alignment = 8;
2799250661Sdavidcs	ha->hw.dma_buf.minidump.size = template_size;
2800250661Sdavidcs
2801250661Sdavidcs	if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
2802250661Sdavidcs
2803250661Sdavidcs		device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
2804250661Sdavidcs
2805250661Sdavidcs		return (-1);
2806250661Sdavidcs	}
2807250661Sdavidcs	ha->hw.dma_buf.flags.minidump = 1;
2808250661Sdavidcs
2809250661Sdavidcs	/*
2810250661Sdavidcs	 * Retrieve Minidump Template
2811250661Sdavidcs	 */
2812250661Sdavidcs	ret = qla_get_minidump_template(ha);
2813250661Sdavidcs
2814250661Sdavidcs	if (ret) {
2815250661Sdavidcs		qla_minidump_free(ha);
2816250661Sdavidcs	} else {
2817250661Sdavidcs		ha->hw.mdump_init = 1;
2818250661Sdavidcs	}
2819250661Sdavidcs
2820250661Sdavidcs	return (ret);
2821250661Sdavidcs}
2822250661Sdavidcs
2823250661Sdavidcs
2824250661Sdavidcsstatic void
2825250661Sdavidcsqla_minidump_free(qla_host_t *ha)
2826250661Sdavidcs{
2827250661Sdavidcs	ha->hw.mdump_init = 0;
2828250661Sdavidcs	if (ha->hw.dma_buf.flags.minidump) {
2829250661Sdavidcs		ha->hw.dma_buf.flags.minidump = 0;
2830250661Sdavidcs		ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
2831250661Sdavidcs	}
2832250661Sdavidcs	return;
2833250661Sdavidcs}
2834250661Sdavidcs
2835250661Sdavidcsvoid
2836250661Sdavidcsql_minidump(qla_host_t *ha)
2837250661Sdavidcs{
2838250661Sdavidcs	uint32_t delay = 6000;
2839250661Sdavidcs
2840250661Sdavidcs	if (!ha->hw.mdump_init)
2841250661Sdavidcs		return;
2842250661Sdavidcs
2843250661Sdavidcs	if (!ha->hw.mdump_active)
2844250661Sdavidcs		return;
2845250661Sdavidcs
2846250661Sdavidcs	if (ha->hw.mdump_active == 1) {
2847250661Sdavidcs		ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
2848250661Sdavidcs		ha->hw.mdump_start = 1;
2849250661Sdavidcs	}
2850250661Sdavidcs
2851250661Sdavidcs	while (delay-- && ha->hw.mdump_active) {
2852250661Sdavidcs		qla_mdelay(__func__, 100);
2853250661Sdavidcs	}
2854250661Sdavidcs	ha->hw.mdump_start = 0;
2855250661Sdavidcs	ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
2856250661Sdavidcs
2857250661Sdavidcs	return;
2858250661Sdavidcs}
2859