1250661Sdavidcs/*
2284982Sdavidcs * Copyright (c) 2013-2016 Qlogic Corporation
3250661Sdavidcs * All rights reserved.
4250661Sdavidcs *
5250661Sdavidcs *  Redistribution and use in source and binary forms, with or without
6250661Sdavidcs *  modification, are permitted provided that the following conditions
7250661Sdavidcs *  are met:
8250661Sdavidcs *
9250661Sdavidcs *  1. Redistributions of source code must retain the above copyright
10250661Sdavidcs *     notice, this list of conditions and the following disclaimer.
11250661Sdavidcs *  2. Redistributions in binary form must reproduce the above copyright
12250661Sdavidcs *     notice, this list of conditions and the following disclaimer in the
13250661Sdavidcs *     documentation and/or other materials provided with the distribution.
14250661Sdavidcs *
15250661Sdavidcs *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16250661Sdavidcs *  and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17250661Sdavidcs *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18250661Sdavidcs *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19250661Sdavidcs *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20250661Sdavidcs *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21250661Sdavidcs *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22250661Sdavidcs *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23250661Sdavidcs *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24250661Sdavidcs *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25250661Sdavidcs *  POSSIBILITY OF SUCH DAMAGE.
26250661Sdavidcs */
27250661Sdavidcs
28250661Sdavidcs/*
29250661Sdavidcs * File: ql_os.c
30250661Sdavidcs * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31250661Sdavidcs */
32250661Sdavidcs
33250661Sdavidcs#include <sys/cdefs.h>
34250661Sdavidcs__FBSDID("$FreeBSD: releng/10.2/sys/dev/qlxgbe/ql_os.c 284982 2015-06-30 20:59:07Z davidcs $");
35250661Sdavidcs
36250661Sdavidcs
37250661Sdavidcs#include "ql_os.h"
38250661Sdavidcs#include "ql_hw.h"
39250661Sdavidcs#include "ql_def.h"
40250661Sdavidcs#include "ql_inline.h"
41250661Sdavidcs#include "ql_ver.h"
42250661Sdavidcs#include "ql_glbl.h"
43250661Sdavidcs#include "ql_dbg.h"
44250661Sdavidcs#include <sys/smp.h>
45250661Sdavidcs
46250661Sdavidcs/*
47250661Sdavidcs * Some PCI Configuration Space Related Defines
48250661Sdavidcs */
49250661Sdavidcs
50250661Sdavidcs#ifndef PCI_VENDOR_QLOGIC
51250661Sdavidcs#define PCI_VENDOR_QLOGIC	0x1077
52250661Sdavidcs#endif
53250661Sdavidcs
54250661Sdavidcs#ifndef PCI_PRODUCT_QLOGIC_ISP8030
55250661Sdavidcs#define PCI_PRODUCT_QLOGIC_ISP8030	0x8030
56250661Sdavidcs#endif
57250661Sdavidcs
58250661Sdavidcs#define PCI_QLOGIC_ISP8030 \
59250661Sdavidcs	((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC)
60250661Sdavidcs
61250661Sdavidcs/*
62250661Sdavidcs * static functions
63250661Sdavidcs */
64250661Sdavidcsstatic int qla_alloc_parent_dma_tag(qla_host_t *ha);
65250661Sdavidcsstatic void qla_free_parent_dma_tag(qla_host_t *ha);
66250661Sdavidcsstatic int qla_alloc_xmt_bufs(qla_host_t *ha);
67250661Sdavidcsstatic void qla_free_xmt_bufs(qla_host_t *ha);
68250661Sdavidcsstatic int qla_alloc_rcv_bufs(qla_host_t *ha);
69250661Sdavidcsstatic void qla_free_rcv_bufs(qla_host_t *ha);
70250661Sdavidcsstatic void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb);
71250661Sdavidcs
72250661Sdavidcsstatic void qla_init_ifnet(device_t dev, qla_host_t *ha);
73250661Sdavidcsstatic int qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS);
74250661Sdavidcsstatic int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS);
75250661Sdavidcsstatic void qla_release(qla_host_t *ha);
76250661Sdavidcsstatic void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
77250661Sdavidcs		int error);
78250661Sdavidcsstatic void qla_stop(qla_host_t *ha);
79250661Sdavidcsstatic int qla_send(qla_host_t *ha, struct mbuf **m_headp);
80250661Sdavidcsstatic void qla_tx_done(void *context, int pending);
81250661Sdavidcsstatic void qla_get_peer(qla_host_t *ha);
82250661Sdavidcsstatic void qla_error_recovery(void *context, int pending);
83284982Sdavidcsstatic void qla_async_event(void *context, int pending);
84250661Sdavidcs
85250661Sdavidcs/*
86250661Sdavidcs * Hooks to the Operating Systems
87250661Sdavidcs */
88250661Sdavidcsstatic int qla_pci_probe (device_t);
89250661Sdavidcsstatic int qla_pci_attach (device_t);
90250661Sdavidcsstatic int qla_pci_detach (device_t);
91250661Sdavidcs
92250661Sdavidcsstatic void qla_init(void *arg);
93250661Sdavidcsstatic int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
94250661Sdavidcsstatic int qla_media_change(struct ifnet *ifp);
95250661Sdavidcsstatic void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
96250661Sdavidcsstatic void qla_start(struct ifnet *ifp);
97250661Sdavidcs
98250661Sdavidcsstatic device_method_t qla_pci_methods[] = {
99250661Sdavidcs	/* Device interface */
100250661Sdavidcs	DEVMETHOD(device_probe, qla_pci_probe),
101250661Sdavidcs	DEVMETHOD(device_attach, qla_pci_attach),
102250661Sdavidcs	DEVMETHOD(device_detach, qla_pci_detach),
103250661Sdavidcs	{ 0, 0 }
104250661Sdavidcs};
105250661Sdavidcs
106250661Sdavidcsstatic driver_t qla_pci_driver = {
107250661Sdavidcs	"ql", qla_pci_methods, sizeof (qla_host_t),
108250661Sdavidcs};
109250661Sdavidcs
110250661Sdavidcsstatic devclass_t qla83xx_devclass;
111250661Sdavidcs
112250661SdavidcsDRIVER_MODULE(qla83xx, pci, qla_pci_driver, qla83xx_devclass, 0, 0);
113250661Sdavidcs
114250661SdavidcsMODULE_DEPEND(qla83xx, pci, 1, 1, 1);
115250661SdavidcsMODULE_DEPEND(qla83xx, ether, 1, 1, 1);
116250661Sdavidcs
117250661SdavidcsMALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver");
118250661Sdavidcs
119250661Sdavidcs#define QL_STD_REPLENISH_THRES		0
120250661Sdavidcs#define QL_JUMBO_REPLENISH_THRES	32
121250661Sdavidcs
122250661Sdavidcs
123250661Sdavidcsstatic char dev_str[64];
124284982Sdavidcsstatic char ver_str[64];
125250661Sdavidcs
126250661Sdavidcs/*
127250661Sdavidcs * Name:	qla_pci_probe
128250661Sdavidcs * Function:	Validate the PCI device to be a QLA80XX device
129250661Sdavidcs */
130250661Sdavidcsstatic int
131250661Sdavidcsqla_pci_probe(device_t dev)
132250661Sdavidcs{
133250661Sdavidcs        switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
134250661Sdavidcs        case PCI_QLOGIC_ISP8030:
135250661Sdavidcs		snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
136250661Sdavidcs			"Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function",
137250661Sdavidcs			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
138250661Sdavidcs			QLA_VERSION_BUILD);
139284982Sdavidcs		snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d",
140284982Sdavidcs			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
141284982Sdavidcs			QLA_VERSION_BUILD);
142250661Sdavidcs                device_set_desc(dev, dev_str);
143250661Sdavidcs                break;
144250661Sdavidcs        default:
145250661Sdavidcs                return (ENXIO);
146250661Sdavidcs        }
147250661Sdavidcs
148250661Sdavidcs        if (bootverbose)
149250661Sdavidcs                printf("%s: %s\n ", __func__, dev_str);
150250661Sdavidcs
151250661Sdavidcs        return (BUS_PROBE_DEFAULT);
152250661Sdavidcs}
153250661Sdavidcs
154250661Sdavidcsstatic void
155250661Sdavidcsqla_add_sysctls(qla_host_t *ha)
156250661Sdavidcs{
157250661Sdavidcs        device_t dev = ha->pci_dev;
158250661Sdavidcs
159284982Sdavidcs	SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
160284982Sdavidcs		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
161284982Sdavidcs		OID_AUTO, "version", CTLFLAG_RD,
162284982Sdavidcs		ver_str, 0, "Driver Version");
163284982Sdavidcs
164250661Sdavidcs        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
165250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
166250661Sdavidcs                OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
167250661Sdavidcs                (void *)ha, 0,
168250661Sdavidcs                qla_sysctl_get_stats, "I", "Statistics");
169250661Sdavidcs
170250661Sdavidcs        SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
171250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
172250661Sdavidcs                OID_AUTO, "fw_version", CTLFLAG_RD,
173273736Shselasky                ha->fw_ver_str, 0, "firmware version");
174250661Sdavidcs
175250661Sdavidcs        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
176250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
177250661Sdavidcs                OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW,
178250661Sdavidcs                (void *)ha, 0,
179250661Sdavidcs                qla_sysctl_get_link_status, "I", "Link Status");
180250661Sdavidcs
181250661Sdavidcs	ha->dbg_level = 0;
182250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
183250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
184250661Sdavidcs                OID_AUTO, "debug", CTLFLAG_RW,
185250661Sdavidcs                &ha->dbg_level, ha->dbg_level, "Debug Level");
186250661Sdavidcs
187250661Sdavidcs	ha->std_replenish = QL_STD_REPLENISH_THRES;
188250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
189250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
190250661Sdavidcs                OID_AUTO, "std_replenish", CTLFLAG_RW,
191250661Sdavidcs                &ha->std_replenish, ha->std_replenish,
192250661Sdavidcs                "Threshold for Replenishing Standard Frames");
193250661Sdavidcs
194250661Sdavidcs        SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
195250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
196250661Sdavidcs                OID_AUTO, "ipv4_lro",
197250661Sdavidcs                CTLFLAG_RD, &ha->ipv4_lro,
198250661Sdavidcs                "number of ipv4 lro completions");
199250661Sdavidcs
200250661Sdavidcs        SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
201250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
202250661Sdavidcs                OID_AUTO, "ipv6_lro",
203250661Sdavidcs                CTLFLAG_RD, &ha->ipv6_lro,
204250661Sdavidcs                "number of ipv6 lro completions");
205250661Sdavidcs
206250661Sdavidcs	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
207250661Sdavidcs		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
208250661Sdavidcs		OID_AUTO, "tx_tso_frames",
209250661Sdavidcs		CTLFLAG_RD, &ha->tx_tso_frames,
210250661Sdavidcs		"number of Tx TSO Frames");
211250661Sdavidcs
212250661Sdavidcs	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
213250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
214250661Sdavidcs		OID_AUTO, "hw_vlan_tx_frames",
215250661Sdavidcs		CTLFLAG_RD, &ha->hw_vlan_tx_frames,
216250661Sdavidcs		"number of Tx VLAN Frames");
217250661Sdavidcs
218250661Sdavidcs        return;
219250661Sdavidcs}
220250661Sdavidcs
221250661Sdavidcsstatic void
222250661Sdavidcsqla_watchdog(void *arg)
223250661Sdavidcs{
224250661Sdavidcs	qla_host_t *ha = arg;
225250661Sdavidcs	qla_hw_t *hw;
226250661Sdavidcs	struct ifnet *ifp;
227250661Sdavidcs	uint32_t i;
228250661Sdavidcs	qla_hw_tx_cntxt_t *hw_tx_cntxt;
229250661Sdavidcs
230250661Sdavidcs	hw = &ha->hw;
231250661Sdavidcs	ifp = ha->ifp;
232250661Sdavidcs
233250661Sdavidcs        if (ha->flags.qla_watchdog_exit) {
234250661Sdavidcs		ha->qla_watchdog_exited = 1;
235250661Sdavidcs		return;
236250661Sdavidcs	}
237250661Sdavidcs	ha->qla_watchdog_exited = 0;
238250661Sdavidcs
239250661Sdavidcs	if (!ha->flags.qla_watchdog_pause) {
240250661Sdavidcs		if (ql_hw_check_health(ha) || ha->qla_initiate_recovery ||
241250661Sdavidcs			(ha->msg_from_peer == QL_PEER_MSG_RESET)) {
242250661Sdavidcs			ha->qla_watchdog_paused = 1;
243250661Sdavidcs			ha->flags.qla_watchdog_pause = 1;
244250661Sdavidcs			ha->qla_initiate_recovery = 0;
245250661Sdavidcs			ha->err_inject = 0;
246250661Sdavidcs			taskqueue_enqueue(ha->err_tq, &ha->err_task);
247250661Sdavidcs		} else {
248284982Sdavidcs
249284982Sdavidcs                        if (ha->async_event) {
250284982Sdavidcs                                ha->async_event = 0;
251284982Sdavidcs                                taskqueue_enqueue(ha->async_event_tq,
252284982Sdavidcs                                        &ha->async_event_task);
253284982Sdavidcs                        }
254284982Sdavidcs
255250661Sdavidcs			for (i = 0; i < ha->hw.num_tx_rings; i++) {
256250661Sdavidcs				hw_tx_cntxt = &hw->tx_cntxt[i];
257250661Sdavidcs				if (qla_le32_to_host(*(hw_tx_cntxt->tx_cons)) !=
258250661Sdavidcs					hw_tx_cntxt->txr_comp) {
259250661Sdavidcs					taskqueue_enqueue(ha->tx_tq,
260250661Sdavidcs						&ha->tx_task);
261250661Sdavidcs					break;
262250661Sdavidcs				}
263250661Sdavidcs			}
264250661Sdavidcs
265250661Sdavidcs			if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) {
266250661Sdavidcs				taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
267250661Sdavidcs			}
268250661Sdavidcs			ha->qla_watchdog_paused = 0;
269250661Sdavidcs		}
270250661Sdavidcs
271250661Sdavidcs	} else {
272250661Sdavidcs		ha->qla_watchdog_paused = 1;
273250661Sdavidcs	}
274250661Sdavidcs
275250661Sdavidcs	ha->watchdog_ticks = ha->watchdog_ticks++ % 1000;
276250661Sdavidcs	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
277250661Sdavidcs		qla_watchdog, ha);
278250661Sdavidcs}
279250661Sdavidcs
280250661Sdavidcs/*
281250661Sdavidcs * Name:	qla_pci_attach
282250661Sdavidcs * Function:	attaches the device to the operating system
283250661Sdavidcs */
284250661Sdavidcsstatic int
285250661Sdavidcsqla_pci_attach(device_t dev)
286250661Sdavidcs{
287250661Sdavidcs	qla_host_t *ha = NULL;
288250661Sdavidcs	uint32_t rsrc_len;
289250661Sdavidcs	int i;
290284982Sdavidcs	uint32_t num_rcvq = 0;
291250661Sdavidcs
292250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
293250661Sdavidcs
294250661Sdavidcs        if ((ha = device_get_softc(dev)) == NULL) {
295250661Sdavidcs                device_printf(dev, "cannot get softc\n");
296250661Sdavidcs                return (ENOMEM);
297250661Sdavidcs        }
298250661Sdavidcs
299250661Sdavidcs        memset(ha, 0, sizeof (qla_host_t));
300250661Sdavidcs
301250661Sdavidcs        if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) {
302250661Sdavidcs                device_printf(dev, "device is not ISP8030\n");
303250661Sdavidcs                return (ENXIO);
304250661Sdavidcs	}
305250661Sdavidcs
306250661Sdavidcs        ha->pci_func = pci_get_function(dev);
307250661Sdavidcs
308250661Sdavidcs        ha->pci_dev = dev;
309250661Sdavidcs
310250661Sdavidcs	pci_enable_busmaster(dev);
311250661Sdavidcs
312250661Sdavidcs	ha->reg_rid = PCIR_BAR(0);
313250661Sdavidcs	ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
314250661Sdavidcs				RF_ACTIVE);
315250661Sdavidcs
316250661Sdavidcs        if (ha->pci_reg == NULL) {
317250661Sdavidcs                device_printf(dev, "unable to map any ports\n");
318250661Sdavidcs                goto qla_pci_attach_err;
319250661Sdavidcs        }
320250661Sdavidcs
321250661Sdavidcs	rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
322250661Sdavidcs					ha->reg_rid);
323250661Sdavidcs
324250661Sdavidcs	mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
325250661Sdavidcs
326250661Sdavidcs	mtx_init(&ha->tx_lock, "qla83xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
327250661Sdavidcs
328250661Sdavidcs	qla_add_sysctls(ha);
329250661Sdavidcs	ql_hw_add_sysctls(ha);
330250661Sdavidcs
331250661Sdavidcs	ha->flags.lock_init = 1;
332250661Sdavidcs
333250661Sdavidcs	ha->reg_rid1 = PCIR_BAR(2);
334250661Sdavidcs	ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
335250661Sdavidcs			&ha->reg_rid1, RF_ACTIVE);
336250661Sdavidcs
337250661Sdavidcs	ha->msix_count = pci_msix_count(dev);
338250661Sdavidcs
339250661Sdavidcs	if (ha->msix_count < (ha->hw.num_sds_rings + 1)) {
340250661Sdavidcs		device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
341250661Sdavidcs			ha->msix_count);
342250661Sdavidcs		goto qla_pci_attach_err;
343250661Sdavidcs	}
344250661Sdavidcs
345250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
346250661Sdavidcs		" msix_count 0x%x pci_reg %p\n", __func__, ha,
347250661Sdavidcs		ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg));
348250661Sdavidcs
349284982Sdavidcs        /* initialize hardware */
350284982Sdavidcs        if (ql_init_hw(ha)) {
351284982Sdavidcs                device_printf(dev, "%s: ql_init_hw failed\n", __func__);
352284982Sdavidcs                goto qla_pci_attach_err;
353284982Sdavidcs        }
354284982Sdavidcs
355284982Sdavidcs        device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__,
356284982Sdavidcs                ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
357284982Sdavidcs                ha->fw_ver_build);
358284982Sdavidcs        snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d",
359284982Sdavidcs                        ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
360284982Sdavidcs                        ha->fw_ver_build);
361284982Sdavidcs
362284982Sdavidcs        if (qla_get_nic_partition(ha, NULL, &num_rcvq)) {
363284982Sdavidcs                device_printf(dev, "%s: qla_get_nic_partition failed\n",
364284982Sdavidcs                        __func__);
365284982Sdavidcs                goto qla_pci_attach_err;
366284982Sdavidcs        }
367284982Sdavidcs        device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
368284982Sdavidcs                " msix_count 0x%x pci_reg %p num_rcvq = %d\n", __func__, ha,
369284982Sdavidcs                ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg, num_rcvq);
370284982Sdavidcs
371284982Sdavidcs
372284982Sdavidcs#ifdef QL_ENABLE_ISCSI_TLV
373284982Sdavidcs        if ((ha->msix_count  < 64) || (num_rcvq != 32)) {
374284982Sdavidcs                ha->hw.num_sds_rings = 15;
375284982Sdavidcs                ha->hw.num_tx_rings = 32;
376284982Sdavidcs        }
377284982Sdavidcs#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
378284982Sdavidcs	ha->hw.num_rds_rings = ha->hw.num_sds_rings;
379284982Sdavidcs
380250661Sdavidcs	ha->msix_count = ha->hw.num_sds_rings + 1;
381250661Sdavidcs
382250661Sdavidcs	if (pci_alloc_msix(dev, &ha->msix_count)) {
383250661Sdavidcs		device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
384250661Sdavidcs			ha->msix_count);
385250661Sdavidcs		ha->msix_count = 0;
386250661Sdavidcs		goto qla_pci_attach_err;
387250661Sdavidcs	}
388250661Sdavidcs
389250661Sdavidcs	ha->mbx_irq_rid = 1;
390250661Sdavidcs	ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
391250661Sdavidcs				&ha->mbx_irq_rid,
392250661Sdavidcs				(RF_ACTIVE | RF_SHAREABLE));
393250661Sdavidcs	if (ha->mbx_irq == NULL) {
394250661Sdavidcs		device_printf(dev, "could not allocate mbx interrupt\n");
395250661Sdavidcs		goto qla_pci_attach_err;
396250661Sdavidcs	}
397250661Sdavidcs	if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE),
398250661Sdavidcs		NULL, ql_mbx_isr, ha, &ha->mbx_handle)) {
399250661Sdavidcs		device_printf(dev, "could not setup mbx interrupt\n");
400250661Sdavidcs		goto qla_pci_attach_err;
401250661Sdavidcs	}
402250661Sdavidcs
403250661Sdavidcs
404250661Sdavidcs	for (i = 0; i < ha->hw.num_sds_rings; i++) {
405250661Sdavidcs		ha->irq_vec[i].sds_idx = i;
406250661Sdavidcs                ha->irq_vec[i].ha = ha;
407250661Sdavidcs                ha->irq_vec[i].irq_rid = 2 + i;
408250661Sdavidcs
409250661Sdavidcs		ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
410250661Sdavidcs				&ha->irq_vec[i].irq_rid,
411250661Sdavidcs				(RF_ACTIVE | RF_SHAREABLE));
412250661Sdavidcs
413250661Sdavidcs		if (ha->irq_vec[i].irq == NULL) {
414250661Sdavidcs			device_printf(dev, "could not allocate interrupt\n");
415250661Sdavidcs			goto qla_pci_attach_err;
416250661Sdavidcs		}
417250661Sdavidcs		if (bus_setup_intr(dev, ha->irq_vec[i].irq,
418250661Sdavidcs			(INTR_TYPE_NET | INTR_MPSAFE),
419250661Sdavidcs			NULL, ql_isr, &ha->irq_vec[i],
420250661Sdavidcs			&ha->irq_vec[i].handle)) {
421250661Sdavidcs			device_printf(dev, "could not setup interrupt\n");
422250661Sdavidcs			goto qla_pci_attach_err;
423250661Sdavidcs		}
424250661Sdavidcs	}
425250661Sdavidcs
426250661Sdavidcs	printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus,
427250661Sdavidcs		ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count);
428250661Sdavidcs
429250661Sdavidcs	ql_read_mac_addr(ha);
430250661Sdavidcs
431250661Sdavidcs	/* allocate parent dma tag */
432250661Sdavidcs	if (qla_alloc_parent_dma_tag(ha)) {
433250661Sdavidcs		device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n",
434250661Sdavidcs			__func__);
435250661Sdavidcs		goto qla_pci_attach_err;
436250661Sdavidcs	}
437250661Sdavidcs
438250661Sdavidcs	/* alloc all dma buffers */
439250661Sdavidcs	if (ql_alloc_dma(ha)) {
440250661Sdavidcs		device_printf(dev, "%s: ql_alloc_dma failed\n", __func__);
441250661Sdavidcs		goto qla_pci_attach_err;
442250661Sdavidcs	}
443250661Sdavidcs	qla_get_peer(ha);
444250661Sdavidcs
445250661Sdavidcs	/* create the o.s ethernet interface */
446250661Sdavidcs	qla_init_ifnet(dev, ha);
447250661Sdavidcs
448250661Sdavidcs	ha->flags.qla_watchdog_active = 1;
449250661Sdavidcs	ha->flags.qla_watchdog_pause = 1;
450250661Sdavidcs
451250661Sdavidcs
452250661Sdavidcs	TASK_INIT(&ha->tx_task, 0, qla_tx_done, ha);
453250661Sdavidcs	ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT,
454250661Sdavidcs			taskqueue_thread_enqueue, &ha->tx_tq);
455250661Sdavidcs	taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq",
456250661Sdavidcs		device_get_nameunit(ha->pci_dev));
457250661Sdavidcs
458250661Sdavidcs	callout_init(&ha->tx_callout, TRUE);
459250661Sdavidcs	ha->flags.qla_callout_init = 1;
460250661Sdavidcs
461250661Sdavidcs	/* create ioctl device interface */
462250661Sdavidcs	if (ql_make_cdev(ha)) {
463250661Sdavidcs		device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
464250661Sdavidcs		goto qla_pci_attach_err;
465250661Sdavidcs	}
466250661Sdavidcs
467250661Sdavidcs	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
468250661Sdavidcs		qla_watchdog, ha);
469250661Sdavidcs
470250661Sdavidcs	TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha);
471250661Sdavidcs	ha->err_tq = taskqueue_create_fast("qla_errq", M_NOWAIT,
472250661Sdavidcs			taskqueue_thread_enqueue, &ha->err_tq);
473250661Sdavidcs	taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq",
474250661Sdavidcs		device_get_nameunit(ha->pci_dev));
475250661Sdavidcs
476284982Sdavidcs        TASK_INIT(&ha->async_event_task, 0, qla_async_event, ha);
477284982Sdavidcs        ha->async_event_tq = taskqueue_create_fast("qla_asyncq", M_NOWAIT,
478284982Sdavidcs                        taskqueue_thread_enqueue, &ha->async_event_tq);
479284982Sdavidcs        taskqueue_start_threads(&ha->async_event_tq, 1, PI_NET, "%s asyncq",
480284982Sdavidcs                device_get_nameunit(ha->pci_dev));
481284982Sdavidcs
482250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__));
483250661Sdavidcs        return (0);
484250661Sdavidcs
485250661Sdavidcsqla_pci_attach_err:
486250661Sdavidcs
487250661Sdavidcs	qla_release(ha);
488250661Sdavidcs
489250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__));
490250661Sdavidcs        return (ENXIO);
491250661Sdavidcs}
492250661Sdavidcs
493250661Sdavidcs/*
494250661Sdavidcs * Name:	qla_pci_detach
495250661Sdavidcs * Function:	Unhooks the device from the operating system
496250661Sdavidcs */
497250661Sdavidcsstatic int
498250661Sdavidcsqla_pci_detach(device_t dev)
499250661Sdavidcs{
500250661Sdavidcs	qla_host_t *ha = NULL;
501250661Sdavidcs	struct ifnet *ifp;
502250661Sdavidcs
503250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
504250661Sdavidcs
505250661Sdavidcs        if ((ha = device_get_softc(dev)) == NULL) {
506250661Sdavidcs                device_printf(dev, "cannot get softc\n");
507250661Sdavidcs                return (ENOMEM);
508250661Sdavidcs        }
509250661Sdavidcs
510250661Sdavidcs	ifp = ha->ifp;
511250661Sdavidcs
512250661Sdavidcs	(void)QLA_LOCK(ha, __func__, 0);
513250661Sdavidcs	qla_stop(ha);
514250661Sdavidcs	QLA_UNLOCK(ha, __func__);
515250661Sdavidcs
516250661Sdavidcs	qla_release(ha);
517250661Sdavidcs
518250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
519250661Sdavidcs
520250661Sdavidcs        return (0);
521250661Sdavidcs}
522250661Sdavidcs
523250661Sdavidcs/*
524250661Sdavidcs * SYSCTL Related Callbacks
525250661Sdavidcs */
526250661Sdavidcsstatic int
527250661Sdavidcsqla_sysctl_get_stats(SYSCTL_HANDLER_ARGS)
528250661Sdavidcs{
529250661Sdavidcs	int err, ret = 0;
530250661Sdavidcs	qla_host_t *ha;
531250661Sdavidcs
532250661Sdavidcs	err = sysctl_handle_int(oidp, &ret, 0, req);
533250661Sdavidcs
534250661Sdavidcs	if (err || !req->newptr)
535250661Sdavidcs		return (err);
536250661Sdavidcs
537250661Sdavidcs	if (ret == 1) {
538250661Sdavidcs		ha = (qla_host_t *)arg1;
539250661Sdavidcs		ql_get_stats(ha);
540250661Sdavidcs	}
541250661Sdavidcs	return (err);
542250661Sdavidcs}
543250661Sdavidcsstatic int
544250661Sdavidcsqla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS)
545250661Sdavidcs{
546250661Sdavidcs	int err, ret = 0;
547250661Sdavidcs	qla_host_t *ha;
548250661Sdavidcs
549250661Sdavidcs	err = sysctl_handle_int(oidp, &ret, 0, req);
550250661Sdavidcs
551250661Sdavidcs	if (err || !req->newptr)
552250661Sdavidcs		return (err);
553250661Sdavidcs
554250661Sdavidcs	if (ret == 1) {
555250661Sdavidcs		ha = (qla_host_t *)arg1;
556250661Sdavidcs		ql_hw_link_status(ha);
557250661Sdavidcs	}
558250661Sdavidcs	return (err);
559250661Sdavidcs}
560250661Sdavidcs
561250661Sdavidcs/*
562250661Sdavidcs * Name:	qla_release
563250661Sdavidcs * Function:	Releases the resources allocated for the device
564250661Sdavidcs */
565250661Sdavidcsstatic void
566250661Sdavidcsqla_release(qla_host_t *ha)
567250661Sdavidcs{
568250661Sdavidcs	device_t dev;
569250661Sdavidcs	int i;
570250661Sdavidcs
571250661Sdavidcs	dev = ha->pci_dev;
572250661Sdavidcs
573284982Sdavidcs        if (ha->async_event_tq) {
574284982Sdavidcs                taskqueue_drain(ha->async_event_tq, &ha->async_event_task);
575284982Sdavidcs                taskqueue_free(ha->async_event_tq);
576284982Sdavidcs        }
577284982Sdavidcs
578250661Sdavidcs	if (ha->err_tq) {
579250661Sdavidcs		taskqueue_drain(ha->err_tq, &ha->err_task);
580250661Sdavidcs		taskqueue_free(ha->err_tq);
581250661Sdavidcs	}
582250661Sdavidcs
583250661Sdavidcs	if (ha->tx_tq) {
584250661Sdavidcs		taskqueue_drain(ha->tx_tq, &ha->tx_task);
585250661Sdavidcs		taskqueue_free(ha->tx_tq);
586250661Sdavidcs	}
587250661Sdavidcs
588250661Sdavidcs	ql_del_cdev(ha);
589250661Sdavidcs
590250661Sdavidcs	if (ha->flags.qla_watchdog_active) {
591250661Sdavidcs		ha->flags.qla_watchdog_exit = 1;
592250661Sdavidcs
593250661Sdavidcs		while (ha->qla_watchdog_exited == 0)
594250661Sdavidcs			qla_mdelay(__func__, 1);
595250661Sdavidcs	}
596250661Sdavidcs
597250661Sdavidcs	if (ha->flags.qla_callout_init)
598250661Sdavidcs		callout_stop(&ha->tx_callout);
599250661Sdavidcs
600250661Sdavidcs	if (ha->ifp != NULL)
601250661Sdavidcs		ether_ifdetach(ha->ifp);
602250661Sdavidcs
603250661Sdavidcs	ql_free_dma(ha);
604250661Sdavidcs	qla_free_parent_dma_tag(ha);
605250661Sdavidcs
606250661Sdavidcs	if (ha->mbx_handle)
607250661Sdavidcs		(void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle);
608250661Sdavidcs
609250661Sdavidcs	if (ha->mbx_irq)
610250661Sdavidcs		(void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid,
611250661Sdavidcs				ha->mbx_irq);
612250661Sdavidcs
613250661Sdavidcs	for (i = 0; i < ha->hw.num_sds_rings; i++) {
614250661Sdavidcs
615250661Sdavidcs		if (ha->irq_vec[i].handle) {
616250661Sdavidcs			(void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
617250661Sdavidcs					ha->irq_vec[i].handle);
618250661Sdavidcs		}
619250661Sdavidcs
620250661Sdavidcs		if (ha->irq_vec[i].irq) {
621250661Sdavidcs			(void)bus_release_resource(dev, SYS_RES_IRQ,
622250661Sdavidcs				ha->irq_vec[i].irq_rid,
623250661Sdavidcs				ha->irq_vec[i].irq);
624250661Sdavidcs		}
625250661Sdavidcs	}
626250661Sdavidcs
627250661Sdavidcs	if (ha->msix_count)
628250661Sdavidcs		pci_release_msi(dev);
629250661Sdavidcs
630250661Sdavidcs	if (ha->flags.lock_init) {
631250661Sdavidcs		mtx_destroy(&ha->tx_lock);
632250661Sdavidcs		mtx_destroy(&ha->hw_lock);
633250661Sdavidcs	}
634250661Sdavidcs
635250661Sdavidcs        if (ha->pci_reg)
636250661Sdavidcs                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
637250661Sdavidcs				ha->pci_reg);
638250661Sdavidcs
639250661Sdavidcs        if (ha->pci_reg1)
640250661Sdavidcs                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1,
641250661Sdavidcs				ha->pci_reg1);
642250661Sdavidcs}
643250661Sdavidcs
644250661Sdavidcs/*
645250661Sdavidcs * DMA Related Functions
646250661Sdavidcs */
647250661Sdavidcs
648250661Sdavidcsstatic void
649250661Sdavidcsqla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
650250661Sdavidcs{
651250661Sdavidcs        *((bus_addr_t *)arg) = 0;
652250661Sdavidcs
653250661Sdavidcs        if (error) {
654250661Sdavidcs                printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
655250661Sdavidcs                return;
656250661Sdavidcs	}
657250661Sdavidcs
658250661Sdavidcs        *((bus_addr_t *)arg) = segs[0].ds_addr;
659250661Sdavidcs
660250661Sdavidcs	return;
661250661Sdavidcs}
662250661Sdavidcs
663250661Sdavidcsint
664250661Sdavidcsql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
665250661Sdavidcs{
666250661Sdavidcs        int             ret = 0;
667250661Sdavidcs        device_t        dev;
668250661Sdavidcs        bus_addr_t      b_addr;
669250661Sdavidcs
670250661Sdavidcs        dev = ha->pci_dev;
671250661Sdavidcs
672250661Sdavidcs        QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
673250661Sdavidcs
674250661Sdavidcs        ret = bus_dma_tag_create(
675250661Sdavidcs                        ha->parent_tag,/* parent */
676250661Sdavidcs                        dma_buf->alignment,
677250661Sdavidcs                        ((bus_size_t)(1ULL << 32)),/* boundary */
678250661Sdavidcs                        BUS_SPACE_MAXADDR,      /* lowaddr */
679250661Sdavidcs                        BUS_SPACE_MAXADDR,      /* highaddr */
680250661Sdavidcs                        NULL, NULL,             /* filter, filterarg */
681250661Sdavidcs                        dma_buf->size,          /* maxsize */
682250661Sdavidcs                        1,                      /* nsegments */
683250661Sdavidcs                        dma_buf->size,          /* maxsegsize */
684250661Sdavidcs                        0,                      /* flags */
685250661Sdavidcs                        NULL, NULL,             /* lockfunc, lockarg */
686250661Sdavidcs                        &dma_buf->dma_tag);
687250661Sdavidcs
688250661Sdavidcs        if (ret) {
689250661Sdavidcs                device_printf(dev, "%s: could not create dma tag\n", __func__);
690250661Sdavidcs                goto ql_alloc_dmabuf_exit;
691250661Sdavidcs        }
692250661Sdavidcs        ret = bus_dmamem_alloc(dma_buf->dma_tag,
693250661Sdavidcs                        (void **)&dma_buf->dma_b,
694250661Sdavidcs                        (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
695250661Sdavidcs                        &dma_buf->dma_map);
696250661Sdavidcs        if (ret) {
697250661Sdavidcs                bus_dma_tag_destroy(dma_buf->dma_tag);
698250661Sdavidcs                device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
699250661Sdavidcs                goto ql_alloc_dmabuf_exit;
700250661Sdavidcs        }
701250661Sdavidcs
702250661Sdavidcs        ret = bus_dmamap_load(dma_buf->dma_tag,
703250661Sdavidcs                        dma_buf->dma_map,
704250661Sdavidcs                        dma_buf->dma_b,
705250661Sdavidcs                        dma_buf->size,
706250661Sdavidcs                        qla_dmamap_callback,
707250661Sdavidcs                        &b_addr, BUS_DMA_NOWAIT);
708250661Sdavidcs
709250661Sdavidcs        if (ret || !b_addr) {
710250661Sdavidcs                bus_dma_tag_destroy(dma_buf->dma_tag);
711250661Sdavidcs                bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
712250661Sdavidcs                        dma_buf->dma_map);
713250661Sdavidcs                ret = -1;
714250661Sdavidcs                goto ql_alloc_dmabuf_exit;
715250661Sdavidcs        }
716250661Sdavidcs
717250661Sdavidcs        dma_buf->dma_addr = b_addr;
718250661Sdavidcs
719250661Sdavidcsql_alloc_dmabuf_exit:
720250661Sdavidcs        QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
721250661Sdavidcs                __func__, ret, (void *)dma_buf->dma_tag,
722250661Sdavidcs                (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
723250661Sdavidcs		dma_buf->size));
724250661Sdavidcs
725250661Sdavidcs        return ret;
726250661Sdavidcs}
727250661Sdavidcs
728250661Sdavidcsvoid
729250661Sdavidcsql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
730250661Sdavidcs{
731250661Sdavidcs        bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
732250661Sdavidcs        bus_dma_tag_destroy(dma_buf->dma_tag);
733250661Sdavidcs}
734250661Sdavidcs
735250661Sdavidcsstatic int
736250661Sdavidcsqla_alloc_parent_dma_tag(qla_host_t *ha)
737250661Sdavidcs{
738250661Sdavidcs	int		ret;
739250661Sdavidcs	device_t	dev;
740250661Sdavidcs
741250661Sdavidcs	dev = ha->pci_dev;
742250661Sdavidcs
743250661Sdavidcs        /*
744250661Sdavidcs         * Allocate parent DMA Tag
745250661Sdavidcs         */
746250661Sdavidcs        ret = bus_dma_tag_create(
747250661Sdavidcs                        bus_get_dma_tag(dev),   /* parent */
748250661Sdavidcs                        1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
749250661Sdavidcs                        BUS_SPACE_MAXADDR,      /* lowaddr */
750250661Sdavidcs                        BUS_SPACE_MAXADDR,      /* highaddr */
751250661Sdavidcs                        NULL, NULL,             /* filter, filterarg */
752250661Sdavidcs                        BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
753250661Sdavidcs                        0,                      /* nsegments */
754250661Sdavidcs                        BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
755250661Sdavidcs                        0,                      /* flags */
756250661Sdavidcs                        NULL, NULL,             /* lockfunc, lockarg */
757250661Sdavidcs                        &ha->parent_tag);
758250661Sdavidcs
759250661Sdavidcs        if (ret) {
760250661Sdavidcs                device_printf(dev, "%s: could not create parent dma tag\n",
761250661Sdavidcs                        __func__);
762250661Sdavidcs		return (-1);
763250661Sdavidcs        }
764250661Sdavidcs
765250661Sdavidcs        ha->flags.parent_tag = 1;
766250661Sdavidcs
767250661Sdavidcs	return (0);
768250661Sdavidcs}
769250661Sdavidcs
770250661Sdavidcsstatic void
771250661Sdavidcsqla_free_parent_dma_tag(qla_host_t *ha)
772250661Sdavidcs{
773250661Sdavidcs        if (ha->flags.parent_tag) {
774250661Sdavidcs                bus_dma_tag_destroy(ha->parent_tag);
775250661Sdavidcs                ha->flags.parent_tag = 0;
776250661Sdavidcs        }
777250661Sdavidcs}
778250661Sdavidcs
779250661Sdavidcs/*
780250661Sdavidcs * Name: qla_init_ifnet
781250661Sdavidcs * Function: Creates the Network Device Interface and Registers it with the O.S
782250661Sdavidcs */
783250661Sdavidcs
784250661Sdavidcsstatic void
785250661Sdavidcsqla_init_ifnet(device_t dev, qla_host_t *ha)
786250661Sdavidcs{
787250661Sdavidcs	struct ifnet *ifp;
788250661Sdavidcs
789250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
790250661Sdavidcs
791250661Sdavidcs	ifp = ha->ifp = if_alloc(IFT_ETHER);
792250661Sdavidcs
793250661Sdavidcs	if (ifp == NULL)
794250661Sdavidcs		panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
795250661Sdavidcs
796250661Sdavidcs	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
797250661Sdavidcs
798250661Sdavidcs#if __FreeBSD_version >= 1000000
799250661Sdavidcs	if_initbaudrate(ifp, IF_Gbps(10));
800250661Sdavidcs	ifp->if_capabilities = IFCAP_LINKSTATE;
801250661Sdavidcs#else
802250661Sdavidcs	ifp->if_mtu = ETHERMTU;
803250661Sdavidcs	ifp->if_baudrate = (1 * 1000 * 1000 *1000);
804250661Sdavidcs
805250661Sdavidcs#endif /* #if __FreeBSD_version >= 1000000 */
806250661Sdavidcs
807250661Sdavidcs	ifp->if_init = qla_init;
808250661Sdavidcs	ifp->if_softc = ha;
809250661Sdavidcs	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
810250661Sdavidcs	ifp->if_ioctl = qla_ioctl;
811250661Sdavidcs	ifp->if_start = qla_start;
812250661Sdavidcs
813250661Sdavidcs	IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha));
814250661Sdavidcs	ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha);
815250661Sdavidcs	IFQ_SET_READY(&ifp->if_snd);
816250661Sdavidcs
817250661Sdavidcs	ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
818250661Sdavidcs
819250661Sdavidcs	ether_ifattach(ifp, qla_get_mac_addr(ha));
820250661Sdavidcs
821250661Sdavidcs	ifp->if_capabilities = IFCAP_HWCSUM |
822250661Sdavidcs				IFCAP_TSO4 |
823250661Sdavidcs				IFCAP_JUMBO_MTU;
824250661Sdavidcs
825250661Sdavidcs	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
826250661Sdavidcs	ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
827250661Sdavidcs
828250661Sdavidcs	ifp->if_capenable = ifp->if_capabilities;
829250661Sdavidcs
830250661Sdavidcs	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
831250661Sdavidcs
832250661Sdavidcs	ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status);
833250661Sdavidcs
834250661Sdavidcs	ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0,
835250661Sdavidcs		NULL);
836250661Sdavidcs	ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
837250661Sdavidcs
838250661Sdavidcs	ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
839250661Sdavidcs
840250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
841250661Sdavidcs
842250661Sdavidcs	return;
843250661Sdavidcs}
844250661Sdavidcs
845250661Sdavidcsstatic void
846250661Sdavidcsqla_init_locked(qla_host_t *ha)
847250661Sdavidcs{
848250661Sdavidcs	struct ifnet *ifp = ha->ifp;
849250661Sdavidcs
850250661Sdavidcs	qla_stop(ha);
851250661Sdavidcs
852250661Sdavidcs	if (qla_alloc_xmt_bufs(ha) != 0)
853250661Sdavidcs		return;
854250661Sdavidcs
855284982Sdavidcs	qla_confirm_9kb_enable(ha);
856284982Sdavidcs
857250661Sdavidcs	if (qla_alloc_rcv_bufs(ha) != 0)
858250661Sdavidcs		return;
859250661Sdavidcs
860250661Sdavidcs	bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN);
861250661Sdavidcs
862250661Sdavidcs	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO;
863250661Sdavidcs
864250661Sdavidcs	ha->flags.stop_rcv = 0;
865250661Sdavidcs 	if (ql_init_hw_if(ha) == 0) {
866250661Sdavidcs		ifp = ha->ifp;
867250661Sdavidcs		ifp->if_drv_flags |= IFF_DRV_RUNNING;
868250661Sdavidcs		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
869250661Sdavidcs		ha->flags.qla_watchdog_pause = 0;
870250661Sdavidcs		ha->hw_vlan_tx_frames = 0;
871250661Sdavidcs		ha->tx_tso_frames = 0;
872250661Sdavidcs	}
873250661Sdavidcs
874250661Sdavidcs	return;
875250661Sdavidcs}
876250661Sdavidcs
877250661Sdavidcsstatic void
878250661Sdavidcsqla_init(void *arg)
879250661Sdavidcs{
880250661Sdavidcs	qla_host_t *ha;
881250661Sdavidcs
882250661Sdavidcs	ha = (qla_host_t *)arg;
883250661Sdavidcs
884250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
885250661Sdavidcs
886250661Sdavidcs	(void)QLA_LOCK(ha, __func__, 0);
887250661Sdavidcs	qla_init_locked(ha);
888250661Sdavidcs	QLA_UNLOCK(ha, __func__);
889250661Sdavidcs
890250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
891250661Sdavidcs}
892250661Sdavidcs
893250661Sdavidcsstatic int
894250661Sdavidcsqla_set_multi(qla_host_t *ha, uint32_t add_multi)
895250661Sdavidcs{
896250661Sdavidcs	uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
897250661Sdavidcs	struct ifmultiaddr *ifma;
898250661Sdavidcs	int mcnt = 0;
899250661Sdavidcs	struct ifnet *ifp = ha->ifp;
900250661Sdavidcs	int ret = 0;
901250661Sdavidcs
902250661Sdavidcs	if_maddr_rlock(ifp);
903250661Sdavidcs
904250661Sdavidcs	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
905250661Sdavidcs
906250661Sdavidcs		if (ifma->ifma_addr->sa_family != AF_LINK)
907250661Sdavidcs			continue;
908250661Sdavidcs
909250661Sdavidcs		if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
910250661Sdavidcs			break;
911250661Sdavidcs
912250661Sdavidcs		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
913250661Sdavidcs			&mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
914250661Sdavidcs
915250661Sdavidcs		mcnt++;
916250661Sdavidcs	}
917250661Sdavidcs
918250661Sdavidcs	if_maddr_runlock(ifp);
919250661Sdavidcs
920250661Sdavidcs	if (QLA_LOCK(ha, __func__, 1) == 0) {
921250661Sdavidcs		ret = ql_hw_set_multi(ha, mta, mcnt, add_multi);
922250661Sdavidcs		QLA_UNLOCK(ha, __func__);
923250661Sdavidcs	}
924250661Sdavidcs
925250661Sdavidcs	return (ret);
926250661Sdavidcs}
927250661Sdavidcs
928250661Sdavidcsstatic int
929250661Sdavidcsqla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
930250661Sdavidcs{
931250661Sdavidcs	int ret = 0;
932250661Sdavidcs	struct ifreq *ifr = (struct ifreq *)data;
933250661Sdavidcs	struct ifaddr *ifa = (struct ifaddr *)data;
934250661Sdavidcs	qla_host_t *ha;
935250661Sdavidcs
936250661Sdavidcs	ha = (qla_host_t *)ifp->if_softc;
937250661Sdavidcs
938250661Sdavidcs	switch (cmd) {
939250661Sdavidcs	case SIOCSIFADDR:
940250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
941250661Sdavidcs			__func__, cmd));
942250661Sdavidcs
943250661Sdavidcs		if (ifa->ifa_addr->sa_family == AF_INET) {
944250661Sdavidcs			ifp->if_flags |= IFF_UP;
945250661Sdavidcs			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
946250661Sdavidcs				(void)QLA_LOCK(ha, __func__, 0);
947250661Sdavidcs				qla_init_locked(ha);
948250661Sdavidcs				QLA_UNLOCK(ha, __func__);
949250661Sdavidcs			}
950250661Sdavidcs			QL_DPRINT4(ha, (ha->pci_dev,
951250661Sdavidcs				"%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
952250661Sdavidcs				__func__, cmd,
953250661Sdavidcs				ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
954250661Sdavidcs
955250661Sdavidcs			arp_ifinit(ifp, ifa);
956250661Sdavidcs		} else {
957250661Sdavidcs			ether_ioctl(ifp, cmd, data);
958250661Sdavidcs		}
959250661Sdavidcs		break;
960250661Sdavidcs
961250661Sdavidcs	case SIOCSIFMTU:
962250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
963250661Sdavidcs			__func__, cmd));
964250661Sdavidcs
965250661Sdavidcs		if (ifr->ifr_mtu > QLA_MAX_MTU) {
966250661Sdavidcs			ret = EINVAL;
967250661Sdavidcs		} else {
968250661Sdavidcs			(void) QLA_LOCK(ha, __func__, 0);
969250661Sdavidcs			ifp->if_mtu = ifr->ifr_mtu;
970250661Sdavidcs			ha->max_frame_size =
971250661Sdavidcs				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
972250661Sdavidcs			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
973250661Sdavidcs				ret = ql_set_max_mtu(ha, ha->max_frame_size,
974250661Sdavidcs					ha->hw.rcv_cntxt_id);
975250661Sdavidcs			}
976250661Sdavidcs
977250661Sdavidcs			if (ifp->if_mtu > ETHERMTU)
978250661Sdavidcs				ha->std_replenish = QL_JUMBO_REPLENISH_THRES;
979250661Sdavidcs			else
980250661Sdavidcs				ha->std_replenish = QL_STD_REPLENISH_THRES;
981250661Sdavidcs
982250661Sdavidcs
983250661Sdavidcs			QLA_UNLOCK(ha, __func__);
984250661Sdavidcs
985250661Sdavidcs			if (ret)
986250661Sdavidcs				ret = EINVAL;
987250661Sdavidcs		}
988250661Sdavidcs
989250661Sdavidcs		break;
990250661Sdavidcs
991250661Sdavidcs	case SIOCSIFFLAGS:
992250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
993250661Sdavidcs			__func__, cmd));
994250661Sdavidcs
995250661Sdavidcs		(void)QLA_LOCK(ha, __func__, 0);
996250661Sdavidcs
997250661Sdavidcs		if (ifp->if_flags & IFF_UP) {
998250661Sdavidcs			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
999250661Sdavidcs				if ((ifp->if_flags ^ ha->if_flags) &
1000250661Sdavidcs					IFF_PROMISC) {
1001250661Sdavidcs					ret = ql_set_promisc(ha);
1002250661Sdavidcs				} else if ((ifp->if_flags ^ ha->if_flags) &
1003250661Sdavidcs					IFF_ALLMULTI) {
1004250661Sdavidcs					ret = ql_set_allmulti(ha);
1005250661Sdavidcs				}
1006250661Sdavidcs			} else {
1007250661Sdavidcs				qla_init_locked(ha);
1008250661Sdavidcs				ha->max_frame_size = ifp->if_mtu +
1009250661Sdavidcs					ETHER_HDR_LEN + ETHER_CRC_LEN;
1010250661Sdavidcs				ret = ql_set_max_mtu(ha, ha->max_frame_size,
1011250661Sdavidcs					ha->hw.rcv_cntxt_id);
1012250661Sdavidcs			}
1013250661Sdavidcs		} else {
1014250661Sdavidcs			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1015250661Sdavidcs				qla_stop(ha);
1016250661Sdavidcs			ha->if_flags = ifp->if_flags;
1017250661Sdavidcs		}
1018250661Sdavidcs
1019250661Sdavidcs		QLA_UNLOCK(ha, __func__);
1020250661Sdavidcs		break;
1021250661Sdavidcs
1022250661Sdavidcs	case SIOCADDMULTI:
1023250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev,
1024250661Sdavidcs			"%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
1025250661Sdavidcs
1026250661Sdavidcs		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1027250661Sdavidcs			if (qla_set_multi(ha, 1))
1028250661Sdavidcs				ret = EINVAL;
1029250661Sdavidcs		}
1030250661Sdavidcs		break;
1031250661Sdavidcs
1032250661Sdavidcs	case SIOCDELMULTI:
1033250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev,
1034250661Sdavidcs			"%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
1035250661Sdavidcs
1036250661Sdavidcs		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1037250661Sdavidcs			if (qla_set_multi(ha, 0))
1038250661Sdavidcs				ret = EINVAL;
1039250661Sdavidcs		}
1040250661Sdavidcs		break;
1041250661Sdavidcs
1042250661Sdavidcs	case SIOCSIFMEDIA:
1043250661Sdavidcs	case SIOCGIFMEDIA:
1044250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev,
1045250661Sdavidcs			"%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
1046250661Sdavidcs			__func__, cmd));
1047250661Sdavidcs		ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
1048250661Sdavidcs		break;
1049250661Sdavidcs
1050250661Sdavidcs	case SIOCSIFCAP:
1051250661Sdavidcs	{
1052250661Sdavidcs		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1053250661Sdavidcs
1054250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
1055250661Sdavidcs			__func__, cmd));
1056250661Sdavidcs
1057250661Sdavidcs		if (mask & IFCAP_HWCSUM)
1058250661Sdavidcs			ifp->if_capenable ^= IFCAP_HWCSUM;
1059250661Sdavidcs		if (mask & IFCAP_TSO4)
1060250661Sdavidcs			ifp->if_capenable ^= IFCAP_TSO4;
1061250661Sdavidcs		if (mask & IFCAP_VLAN_HWTAGGING)
1062250661Sdavidcs			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1063250661Sdavidcs		if (mask & IFCAP_VLAN_HWTSO)
1064250661Sdavidcs			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1065250661Sdavidcs
1066250661Sdavidcs		if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1067250661Sdavidcs			qla_init(ha);
1068250661Sdavidcs
1069250661Sdavidcs		VLAN_CAPABILITIES(ifp);
1070250661Sdavidcs		break;
1071250661Sdavidcs	}
1072250661Sdavidcs
1073250661Sdavidcs	default:
1074250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n",
1075250661Sdavidcs			__func__, cmd));
1076250661Sdavidcs		ret = ether_ioctl(ifp, cmd, data);
1077250661Sdavidcs		break;
1078250661Sdavidcs	}
1079250661Sdavidcs
1080250661Sdavidcs	return (ret);
1081250661Sdavidcs}
1082250661Sdavidcs
1083250661Sdavidcsstatic int
1084250661Sdavidcsqla_media_change(struct ifnet *ifp)
1085250661Sdavidcs{
1086250661Sdavidcs	qla_host_t *ha;
1087250661Sdavidcs	struct ifmedia *ifm;
1088250661Sdavidcs	int ret = 0;
1089250661Sdavidcs
1090250661Sdavidcs	ha = (qla_host_t *)ifp->if_softc;
1091250661Sdavidcs
1092250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1093250661Sdavidcs
1094250661Sdavidcs	ifm = &ha->media;
1095250661Sdavidcs
1096250661Sdavidcs	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1097250661Sdavidcs		ret = EINVAL;
1098250661Sdavidcs
1099250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1100250661Sdavidcs
1101250661Sdavidcs	return (ret);
1102250661Sdavidcs}
1103250661Sdavidcs
1104250661Sdavidcsstatic void
1105250661Sdavidcsqla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1106250661Sdavidcs{
1107250661Sdavidcs	qla_host_t *ha;
1108250661Sdavidcs
1109250661Sdavidcs	ha = (qla_host_t *)ifp->if_softc;
1110250661Sdavidcs
1111250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1112250661Sdavidcs
1113250661Sdavidcs	ifmr->ifm_status = IFM_AVALID;
1114250661Sdavidcs	ifmr->ifm_active = IFM_ETHER;
1115250661Sdavidcs
1116250661Sdavidcs	ql_update_link_state(ha);
1117250661Sdavidcs	if (ha->hw.link_up) {
1118250661Sdavidcs		ifmr->ifm_status |= IFM_ACTIVE;
1119250661Sdavidcs		ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha));
1120250661Sdavidcs	}
1121250661Sdavidcs
1122250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\
1123250661Sdavidcs		(ha->hw.link_up ? "link_up" : "link_down")));
1124250661Sdavidcs
1125250661Sdavidcs	return;
1126250661Sdavidcs}
1127250661Sdavidcs
1128250661Sdavidcsstatic void
1129250661Sdavidcsqla_start(struct ifnet *ifp)
1130250661Sdavidcs{
1131250661Sdavidcs	struct mbuf    *m_head;
1132250661Sdavidcs	qla_host_t *ha = (qla_host_t *)ifp->if_softc;
1133250661Sdavidcs
1134250661Sdavidcs	QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
1135250661Sdavidcs
1136250661Sdavidcs	if (!mtx_trylock(&ha->tx_lock)) {
1137250661Sdavidcs		QL_DPRINT8(ha, (ha->pci_dev,
1138250661Sdavidcs			"%s: mtx_trylock(&ha->tx_lock) failed\n", __func__));
1139250661Sdavidcs		return;
1140250661Sdavidcs	}
1141250661Sdavidcs
1142250661Sdavidcs	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1143250661Sdavidcs		IFF_DRV_RUNNING) {
1144250661Sdavidcs		QL_DPRINT8(ha,
1145250661Sdavidcs			(ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1146250661Sdavidcs		QLA_TX_UNLOCK(ha);
1147250661Sdavidcs		return;
1148250661Sdavidcs	}
1149250661Sdavidcs
1150284982Sdavidcs	if (!ha->hw.link_up || !ha->watchdog_ticks)
1151250661Sdavidcs		ql_update_link_state(ha);
1152250661Sdavidcs
1153250661Sdavidcs	if (!ha->hw.link_up) {
1154250661Sdavidcs		QL_DPRINT8(ha, (ha->pci_dev, "%s: link down\n", __func__));
1155250661Sdavidcs		QLA_TX_UNLOCK(ha);
1156250661Sdavidcs		return;
1157250661Sdavidcs	}
1158250661Sdavidcs
1159250661Sdavidcs	while (ifp->if_snd.ifq_head != NULL) {
1160250661Sdavidcs		IF_DEQUEUE(&ifp->if_snd, m_head);
1161250661Sdavidcs
1162250661Sdavidcs		if (m_head == NULL) {
1163250661Sdavidcs			QL_DPRINT8(ha, (ha->pci_dev, "%s: m_head == NULL\n",
1164250661Sdavidcs				__func__));
1165250661Sdavidcs			break;
1166250661Sdavidcs		}
1167250661Sdavidcs
1168250661Sdavidcs		if (qla_send(ha, &m_head)) {
1169250661Sdavidcs			if (m_head == NULL)
1170250661Sdavidcs				break;
1171250661Sdavidcs			QL_DPRINT8(ha, (ha->pci_dev, "%s: PREPEND\n", __func__));
1172250661Sdavidcs			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1173250661Sdavidcs			IF_PREPEND(&ifp->if_snd, m_head);
1174250661Sdavidcs			break;
1175250661Sdavidcs		}
1176250661Sdavidcs		/* Send a copy of the frame to the BPF listener */
1177250661Sdavidcs		ETHER_BPF_MTAP(ifp, m_head);
1178250661Sdavidcs	}
1179250661Sdavidcs	QLA_TX_UNLOCK(ha);
1180250661Sdavidcs	QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
1181250661Sdavidcs	return;
1182250661Sdavidcs}
1183250661Sdavidcs
1184250661Sdavidcsstatic int
1185250661Sdavidcsqla_send(qla_host_t *ha, struct mbuf **m_headp)
1186250661Sdavidcs{
1187250661Sdavidcs	bus_dma_segment_t	segs[QLA_MAX_SEGMENTS];
1188250661Sdavidcs	bus_dmamap_t		map;
1189250661Sdavidcs	int			nsegs;
1190250661Sdavidcs	int			ret = -1;
1191250661Sdavidcs	uint32_t		tx_idx;
1192250661Sdavidcs	struct mbuf		*m_head = *m_headp;
1193250661Sdavidcs	uint32_t		txr_idx = ha->txr_idx;
1194284982Sdavidcs	uint32_t		iscsi_pdu = 0;
1195250661Sdavidcs
1196250661Sdavidcs	QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
1197250661Sdavidcs
1198281955Shiren	/* check if flowid is set */
1199284982Sdavidcs
1200284982Sdavidcs	if (M_HASHTYPE_GET(m_head) != M_HASHTYPE_NONE) {
1201284982Sdavidcs#ifdef QL_ENABLE_ISCSI_TLV
1202284982Sdavidcs		if (qla_iscsi_pdu(ha, m_head) == 0) {
1203284982Sdavidcs			iscsi_pdu = 1;
1204284982Sdavidcs			txr_idx = m_head->m_pkthdr.flowid &
1205284982Sdavidcs					((ha->hw.num_tx_rings >> 1) - 1);
1206284982Sdavidcs		} else {
1207284982Sdavidcs			txr_idx = m_head->m_pkthdr.flowid &
1208284982Sdavidcs					(ha->hw.num_tx_rings - 1);
1209284982Sdavidcs		}
1210284982Sdavidcs#else
1211250661Sdavidcs		txr_idx = m_head->m_pkthdr.flowid & (ha->hw.num_tx_rings - 1);
1212284982Sdavidcs#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
1213284982Sdavidcs	}
1214250661Sdavidcs
1215284982Sdavidcs
1216250661Sdavidcs	tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next;
1217250661Sdavidcs	map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
1218250661Sdavidcs
1219250661Sdavidcs	ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1220250661Sdavidcs			BUS_DMA_NOWAIT);
1221250661Sdavidcs
1222250661Sdavidcs	if (ret == EFBIG) {
1223250661Sdavidcs
1224250661Sdavidcs		struct mbuf *m;
1225250661Sdavidcs
1226250661Sdavidcs		QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1227250661Sdavidcs			m_head->m_pkthdr.len));
1228250661Sdavidcs
1229250661Sdavidcs		m = m_defrag(m_head, M_NOWAIT);
1230250661Sdavidcs		if (m == NULL) {
1231250661Sdavidcs			ha->err_tx_defrag++;
1232250661Sdavidcs			m_freem(m_head);
1233250661Sdavidcs			*m_headp = NULL;
1234250661Sdavidcs			device_printf(ha->pci_dev,
1235250661Sdavidcs				"%s: m_defrag() = NULL [%d]\n",
1236250661Sdavidcs				__func__, ret);
1237250661Sdavidcs			return (ENOBUFS);
1238250661Sdavidcs		}
1239250661Sdavidcs		m_head = m;
1240250661Sdavidcs		*m_headp = m_head;
1241250661Sdavidcs
1242250661Sdavidcs		if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1243250661Sdavidcs					segs, &nsegs, BUS_DMA_NOWAIT))) {
1244250661Sdavidcs
1245250661Sdavidcs			ha->err_tx_dmamap_load++;
1246250661Sdavidcs
1247250661Sdavidcs			device_printf(ha->pci_dev,
1248250661Sdavidcs				"%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1249250661Sdavidcs				__func__, ret, m_head->m_pkthdr.len);
1250250661Sdavidcs
1251250661Sdavidcs			if (ret != ENOMEM) {
1252250661Sdavidcs				m_freem(m_head);
1253250661Sdavidcs				*m_headp = NULL;
1254250661Sdavidcs			}
1255250661Sdavidcs			return (ret);
1256250661Sdavidcs		}
1257250661Sdavidcs
1258250661Sdavidcs	} else if (ret) {
1259250661Sdavidcs
1260250661Sdavidcs		ha->err_tx_dmamap_load++;
1261250661Sdavidcs
1262250661Sdavidcs		device_printf(ha->pci_dev,
1263250661Sdavidcs			"%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1264250661Sdavidcs			__func__, ret, m_head->m_pkthdr.len);
1265250661Sdavidcs
1266250661Sdavidcs		if (ret != ENOMEM) {
1267250661Sdavidcs			m_freem(m_head);
1268250661Sdavidcs			*m_headp = NULL;
1269250661Sdavidcs		}
1270250661Sdavidcs		return (ret);
1271250661Sdavidcs	}
1272250661Sdavidcs
1273250661Sdavidcs	QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet"));
1274250661Sdavidcs
1275250661Sdavidcs	bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1276250661Sdavidcs
1277284982Sdavidcs        if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx,
1278284982Sdavidcs				iscsi_pdu))) {
1279250661Sdavidcs		ha->tx_ring[txr_idx].count++;
1280250661Sdavidcs		ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
1281250661Sdavidcs	} else {
1282250661Sdavidcs		if (ret == EINVAL) {
1283250661Sdavidcs			if (m_head)
1284250661Sdavidcs				m_freem(m_head);
1285250661Sdavidcs			*m_headp = NULL;
1286250661Sdavidcs		}
1287250661Sdavidcs	}
1288250661Sdavidcs
1289250661Sdavidcs	QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
1290250661Sdavidcs	return (ret);
1291250661Sdavidcs}
1292250661Sdavidcs
1293250661Sdavidcsstatic void
1294250661Sdavidcsqla_stop(qla_host_t *ha)
1295250661Sdavidcs{
1296250661Sdavidcs	struct ifnet *ifp = ha->ifp;
1297250661Sdavidcs	device_t	dev;
1298250661Sdavidcs
1299250661Sdavidcs	dev = ha->pci_dev;
1300250661Sdavidcs
1301250661Sdavidcs	ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1302250661Sdavidcs
1303250661Sdavidcs	ha->flags.qla_watchdog_pause = 1;
1304250661Sdavidcs
1305250661Sdavidcs	while (!ha->qla_watchdog_paused)
1306250661Sdavidcs		qla_mdelay(__func__, 1);
1307250661Sdavidcs
1308250661Sdavidcs	ha->flags.stop_rcv = 1;
1309250661Sdavidcs	ql_hw_stop_rcv(ha);
1310250661Sdavidcs
1311250661Sdavidcs	ql_del_hw_if(ha);
1312250661Sdavidcs
1313250661Sdavidcs	qla_free_xmt_bufs(ha);
1314250661Sdavidcs	qla_free_rcv_bufs(ha);
1315250661Sdavidcs
1316250661Sdavidcs	return;
1317250661Sdavidcs}
1318250661Sdavidcs
1319250661Sdavidcs/*
1320250661Sdavidcs * Buffer Management Functions for Transmit and Receive Rings
1321250661Sdavidcs */
1322250661Sdavidcsstatic int
1323250661Sdavidcsqla_alloc_xmt_bufs(qla_host_t *ha)
1324250661Sdavidcs{
1325250661Sdavidcs	int ret = 0;
1326250661Sdavidcs	uint32_t i, j;
1327250661Sdavidcs	qla_tx_buf_t *txb;
1328250661Sdavidcs
1329250661Sdavidcs	if (bus_dma_tag_create(NULL,    /* parent */
1330250661Sdavidcs		1, 0,    /* alignment, bounds */
1331250661Sdavidcs		BUS_SPACE_MAXADDR,       /* lowaddr */
1332250661Sdavidcs		BUS_SPACE_MAXADDR,       /* highaddr */
1333250661Sdavidcs		NULL, NULL,      /* filter, filterarg */
1334250661Sdavidcs		QLA_MAX_TSO_FRAME_SIZE,     /* maxsize */
1335250661Sdavidcs		QLA_MAX_SEGMENTS,        /* nsegments */
1336250661Sdavidcs		PAGE_SIZE,        /* maxsegsize */
1337250661Sdavidcs		BUS_DMA_ALLOCNOW,        /* flags */
1338250661Sdavidcs		NULL,    /* lockfunc */
1339250661Sdavidcs		NULL,    /* lockfuncarg */
1340250661Sdavidcs		&ha->tx_tag)) {
1341250661Sdavidcs		device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1342250661Sdavidcs			__func__);
1343250661Sdavidcs		return (ENOMEM);
1344250661Sdavidcs	}
1345250661Sdavidcs
1346250661Sdavidcs	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1347250661Sdavidcs		bzero((void *)ha->tx_ring[i].tx_buf,
1348250661Sdavidcs			(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1349250661Sdavidcs	}
1350250661Sdavidcs
1351250661Sdavidcs	for (j = 0; j < ha->hw.num_tx_rings; j++) {
1352250661Sdavidcs		for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1353250661Sdavidcs
1354250661Sdavidcs			txb = &ha->tx_ring[j].tx_buf[i];
1355250661Sdavidcs
1356250661Sdavidcs			if ((ret = bus_dmamap_create(ha->tx_tag,
1357250661Sdavidcs					BUS_DMA_NOWAIT, &txb->map))) {
1358250661Sdavidcs
1359250661Sdavidcs				ha->err_tx_dmamap_create++;
1360250661Sdavidcs				device_printf(ha->pci_dev,
1361250661Sdavidcs					"%s: bus_dmamap_create failed[%d]\n",
1362250661Sdavidcs					__func__, ret);
1363250661Sdavidcs
1364250661Sdavidcs				qla_free_xmt_bufs(ha);
1365250661Sdavidcs
1366250661Sdavidcs				return (ret);
1367250661Sdavidcs			}
1368250661Sdavidcs		}
1369250661Sdavidcs	}
1370250661Sdavidcs
1371250661Sdavidcs	return 0;
1372250661Sdavidcs}
1373250661Sdavidcs
1374250661Sdavidcs/*
1375250661Sdavidcs * Release mbuf after it sent on the wire
1376250661Sdavidcs */
1377250661Sdavidcsstatic void
1378250661Sdavidcsqla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1379250661Sdavidcs{
1380250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1381250661Sdavidcs
1382250661Sdavidcs	if (txb->m_head && txb->map) {
1383250661Sdavidcs
1384250661Sdavidcs		bus_dmamap_unload(ha->tx_tag, txb->map);
1385250661Sdavidcs
1386250661Sdavidcs		m_freem(txb->m_head);
1387250661Sdavidcs		txb->m_head = NULL;
1388250661Sdavidcs	}
1389250661Sdavidcs
1390250661Sdavidcs	if (txb->map)
1391250661Sdavidcs		bus_dmamap_destroy(ha->tx_tag, txb->map);
1392250661Sdavidcs
1393250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1394250661Sdavidcs}
1395250661Sdavidcs
1396250661Sdavidcsstatic void
1397250661Sdavidcsqla_free_xmt_bufs(qla_host_t *ha)
1398250661Sdavidcs{
1399250661Sdavidcs	int		i, j;
1400250661Sdavidcs
1401250661Sdavidcs	for (j = 0; j < ha->hw.num_tx_rings; j++) {
1402250661Sdavidcs		for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1403250661Sdavidcs			qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
1404250661Sdavidcs	}
1405250661Sdavidcs
1406250661Sdavidcs	if (ha->tx_tag != NULL) {
1407250661Sdavidcs		bus_dma_tag_destroy(ha->tx_tag);
1408250661Sdavidcs		ha->tx_tag = NULL;
1409250661Sdavidcs	}
1410250661Sdavidcs
1411250661Sdavidcs	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1412250661Sdavidcs		bzero((void *)ha->tx_ring[i].tx_buf,
1413250661Sdavidcs			(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1414250661Sdavidcs	}
1415250661Sdavidcs	return;
1416250661Sdavidcs}
1417250661Sdavidcs
1418250661Sdavidcs
1419250661Sdavidcsstatic int
1420250661Sdavidcsqla_alloc_rcv_std(qla_host_t *ha)
1421250661Sdavidcs{
1422250661Sdavidcs	int		i, j, k, r, ret = 0;
1423250661Sdavidcs	qla_rx_buf_t	*rxb;
1424250661Sdavidcs	qla_rx_ring_t	*rx_ring;
1425250661Sdavidcs
1426250661Sdavidcs	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1427250661Sdavidcs
1428250661Sdavidcs		rx_ring = &ha->rx_ring[r];
1429250661Sdavidcs
1430250661Sdavidcs		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1431250661Sdavidcs
1432250661Sdavidcs			rxb = &rx_ring->rx_buf[i];
1433250661Sdavidcs
1434250661Sdavidcs			ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT,
1435250661Sdavidcs					&rxb->map);
1436250661Sdavidcs
1437250661Sdavidcs			if (ret) {
1438250661Sdavidcs				device_printf(ha->pci_dev,
1439250661Sdavidcs					"%s: dmamap[%d, %d] failed\n",
1440250661Sdavidcs					__func__, r, i);
1441250661Sdavidcs
1442250661Sdavidcs				for (k = 0; k < r; k++) {
1443250661Sdavidcs					for (j = 0; j < NUM_RX_DESCRIPTORS;
1444250661Sdavidcs						j++) {
1445250661Sdavidcs						rxb = &ha->rx_ring[k].rx_buf[j];
1446250661Sdavidcs						bus_dmamap_destroy(ha->rx_tag,
1447250661Sdavidcs							rxb->map);
1448250661Sdavidcs					}
1449250661Sdavidcs				}
1450250661Sdavidcs
1451250661Sdavidcs				for (j = 0; j < i; j++) {
1452250661Sdavidcs					bus_dmamap_destroy(ha->rx_tag,
1453250661Sdavidcs						rx_ring->rx_buf[j].map);
1454250661Sdavidcs				}
1455250661Sdavidcs				goto qla_alloc_rcv_std_err;
1456250661Sdavidcs			}
1457250661Sdavidcs		}
1458250661Sdavidcs	}
1459250661Sdavidcs
1460250661Sdavidcs	qla_init_hw_rcv_descriptors(ha);
1461250661Sdavidcs
1462250661Sdavidcs
1463250661Sdavidcs	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1464250661Sdavidcs
1465250661Sdavidcs		rx_ring = &ha->rx_ring[r];
1466250661Sdavidcs
1467250661Sdavidcs		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1468250661Sdavidcs			rxb = &rx_ring->rx_buf[i];
1469250661Sdavidcs			rxb->handle = i;
1470250661Sdavidcs			if (!(ret = ql_get_mbuf(ha, rxb, NULL))) {
1471250661Sdavidcs				/*
1472250661Sdavidcs			 	 * set the physical address in the
1473250661Sdavidcs				 * corresponding descriptor entry in the
1474250661Sdavidcs				 * receive ring/queue for the hba
1475250661Sdavidcs				 */
1476250661Sdavidcs				qla_set_hw_rcv_desc(ha, r, i, rxb->handle,
1477250661Sdavidcs					rxb->paddr,
1478250661Sdavidcs					(rxb->m_head)->m_pkthdr.len);
1479250661Sdavidcs			} else {
1480250661Sdavidcs				device_printf(ha->pci_dev,
1481250661Sdavidcs					"%s: ql_get_mbuf [%d, %d] failed\n",
1482250661Sdavidcs					__func__, r, i);
1483250661Sdavidcs				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1484250661Sdavidcs				goto qla_alloc_rcv_std_err;
1485250661Sdavidcs			}
1486250661Sdavidcs		}
1487250661Sdavidcs	}
1488250661Sdavidcs	return 0;
1489250661Sdavidcs
1490250661Sdavidcsqla_alloc_rcv_std_err:
1491250661Sdavidcs	return (-1);
1492250661Sdavidcs}
1493250661Sdavidcs
1494250661Sdavidcsstatic void
1495250661Sdavidcsqla_free_rcv_std(qla_host_t *ha)
1496250661Sdavidcs{
1497250661Sdavidcs	int		i, r;
1498250661Sdavidcs	qla_rx_buf_t	*rxb;
1499250661Sdavidcs
1500250661Sdavidcs	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1501250661Sdavidcs		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1502250661Sdavidcs			rxb = &ha->rx_ring[r].rx_buf[i];
1503250661Sdavidcs			if (rxb->m_head != NULL) {
1504250661Sdavidcs				bus_dmamap_unload(ha->rx_tag, rxb->map);
1505250661Sdavidcs				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1506250661Sdavidcs				m_freem(rxb->m_head);
1507250661Sdavidcs				rxb->m_head = NULL;
1508250661Sdavidcs			}
1509250661Sdavidcs		}
1510250661Sdavidcs	}
1511250661Sdavidcs	return;
1512250661Sdavidcs}
1513250661Sdavidcs
1514250661Sdavidcsstatic int
1515250661Sdavidcsqla_alloc_rcv_bufs(qla_host_t *ha)
1516250661Sdavidcs{
1517250661Sdavidcs	int		i, ret = 0;
1518250661Sdavidcs
1519250661Sdavidcs	if (bus_dma_tag_create(NULL,    /* parent */
1520250661Sdavidcs			1, 0,    /* alignment, bounds */
1521250661Sdavidcs			BUS_SPACE_MAXADDR,       /* lowaddr */
1522250661Sdavidcs			BUS_SPACE_MAXADDR,       /* highaddr */
1523250661Sdavidcs			NULL, NULL,      /* filter, filterarg */
1524250661Sdavidcs			MJUM9BYTES,     /* maxsize */
1525250661Sdavidcs			1,        /* nsegments */
1526250661Sdavidcs			MJUM9BYTES,        /* maxsegsize */
1527250661Sdavidcs			BUS_DMA_ALLOCNOW,        /* flags */
1528250661Sdavidcs			NULL,    /* lockfunc */
1529250661Sdavidcs			NULL,    /* lockfuncarg */
1530250661Sdavidcs			&ha->rx_tag)) {
1531250661Sdavidcs
1532250661Sdavidcs		device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1533250661Sdavidcs			__func__);
1534250661Sdavidcs
1535250661Sdavidcs		return (ENOMEM);
1536250661Sdavidcs	}
1537250661Sdavidcs
1538250661Sdavidcs	bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1539250661Sdavidcs
1540250661Sdavidcs	for (i = 0; i < ha->hw.num_sds_rings; i++) {
1541250661Sdavidcs		ha->hw.sds[i].sdsr_next = 0;
1542250661Sdavidcs		ha->hw.sds[i].rxb_free = NULL;
1543250661Sdavidcs		ha->hw.sds[i].rx_free = 0;
1544250661Sdavidcs	}
1545250661Sdavidcs
1546250661Sdavidcs	ret = qla_alloc_rcv_std(ha);
1547250661Sdavidcs
1548250661Sdavidcs	return (ret);
1549250661Sdavidcs}
1550250661Sdavidcs
1551250661Sdavidcsstatic void
1552250661Sdavidcsqla_free_rcv_bufs(qla_host_t *ha)
1553250661Sdavidcs{
1554250661Sdavidcs	int		i;
1555250661Sdavidcs
1556250661Sdavidcs	qla_free_rcv_std(ha);
1557250661Sdavidcs
1558250661Sdavidcs	if (ha->rx_tag != NULL) {
1559250661Sdavidcs		bus_dma_tag_destroy(ha->rx_tag);
1560250661Sdavidcs		ha->rx_tag = NULL;
1561250661Sdavidcs	}
1562250661Sdavidcs
1563250661Sdavidcs	bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1564250661Sdavidcs
1565250661Sdavidcs	for (i = 0; i < ha->hw.num_sds_rings; i++) {
1566250661Sdavidcs		ha->hw.sds[i].sdsr_next = 0;
1567250661Sdavidcs		ha->hw.sds[i].rxb_free = NULL;
1568250661Sdavidcs		ha->hw.sds[i].rx_free = 0;
1569250661Sdavidcs	}
1570250661Sdavidcs
1571250661Sdavidcs	return;
1572250661Sdavidcs}
1573250661Sdavidcs
1574250661Sdavidcsint
1575250661Sdavidcsql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
1576250661Sdavidcs{
1577250661Sdavidcs	register struct mbuf *mp = nmp;
1578250661Sdavidcs	struct ifnet   		*ifp;
1579250661Sdavidcs	int            		ret = 0;
1580250661Sdavidcs	uint32_t		offset;
1581250661Sdavidcs	bus_dma_segment_t	segs[1];
1582284982Sdavidcs	int			nsegs, mbuf_size;
1583250661Sdavidcs
1584250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1585250661Sdavidcs
1586250661Sdavidcs	ifp = ha->ifp;
1587250661Sdavidcs
1588284982Sdavidcs        if (ha->hw.enable_9kb)
1589284982Sdavidcs                mbuf_size = MJUM9BYTES;
1590284982Sdavidcs        else
1591284982Sdavidcs                mbuf_size = MCLBYTES;
1592284982Sdavidcs
1593250661Sdavidcs	if (mp == NULL) {
1594250661Sdavidcs
1595284982Sdavidcs                if (ha->hw.enable_9kb)
1596284982Sdavidcs                        mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, mbuf_size);
1597284982Sdavidcs                else
1598284982Sdavidcs                        mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1599250661Sdavidcs
1600250661Sdavidcs		if (mp == NULL) {
1601250661Sdavidcs			ha->err_m_getcl++;
1602250661Sdavidcs			ret = ENOBUFS;
1603250661Sdavidcs			device_printf(ha->pci_dev,
1604250661Sdavidcs					"%s: m_getcl failed\n", __func__);
1605250661Sdavidcs			goto exit_ql_get_mbuf;
1606250661Sdavidcs		}
1607284982Sdavidcs		mp->m_len = mp->m_pkthdr.len = mbuf_size;
1608250661Sdavidcs	} else {
1609284982Sdavidcs		mp->m_len = mp->m_pkthdr.len = mbuf_size;
1610250661Sdavidcs		mp->m_data = mp->m_ext.ext_buf;
1611250661Sdavidcs		mp->m_next = NULL;
1612250661Sdavidcs	}
1613250661Sdavidcs
1614250661Sdavidcs	offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
1615250661Sdavidcs	if (offset) {
1616250661Sdavidcs		offset = 8 - offset;
1617250661Sdavidcs		m_adj(mp, offset);
1618250661Sdavidcs	}
1619250661Sdavidcs
1620250661Sdavidcs	/*
1621250661Sdavidcs	 * Using memory from the mbuf cluster pool, invoke the bus_dma
1622250661Sdavidcs	 * machinery to arrange the memory mapping.
1623250661Sdavidcs	 */
1624250661Sdavidcs	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map,
1625250661Sdavidcs			mp, segs, &nsegs, BUS_DMA_NOWAIT);
1626250661Sdavidcs	rxb->paddr = segs[0].ds_addr;
1627250661Sdavidcs
1628250661Sdavidcs	if (ret || !rxb->paddr || (nsegs != 1)) {
1629250661Sdavidcs		m_free(mp);
1630250661Sdavidcs		rxb->m_head = NULL;
1631250661Sdavidcs		device_printf(ha->pci_dev,
1632250661Sdavidcs			"%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
1633250661Sdavidcs			__func__, ret, (long long unsigned int)rxb->paddr,
1634250661Sdavidcs			nsegs);
1635250661Sdavidcs                ret = -1;
1636250661Sdavidcs		goto exit_ql_get_mbuf;
1637250661Sdavidcs	}
1638250661Sdavidcs	rxb->m_head = mp;
1639250661Sdavidcs	bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
1640250661Sdavidcs
1641250661Sdavidcsexit_ql_get_mbuf:
1642250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
1643250661Sdavidcs	return (ret);
1644250661Sdavidcs}
1645250661Sdavidcs
1646250661Sdavidcsstatic void
1647250661Sdavidcsqla_tx_done(void *context, int pending)
1648250661Sdavidcs{
1649250661Sdavidcs	qla_host_t *ha = context;
1650250661Sdavidcs	struct ifnet   *ifp;
1651250661Sdavidcs
1652250661Sdavidcs	ifp = ha->ifp;
1653250661Sdavidcs
1654250661Sdavidcs	if (!ifp)
1655250661Sdavidcs		return;
1656250661Sdavidcs
1657250661Sdavidcs	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1658250661Sdavidcs		QL_DPRINT8(ha, (ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1659250661Sdavidcs		return;
1660250661Sdavidcs	}
1661250661Sdavidcs	ql_hw_tx_done(ha);
1662250661Sdavidcs
1663250661Sdavidcs	qla_start(ha->ifp);
1664250661Sdavidcs}
1665250661Sdavidcs
1666250661Sdavidcsstatic void
1667250661Sdavidcsqla_get_peer(qla_host_t *ha)
1668250661Sdavidcs{
1669250661Sdavidcs	device_t *peers;
1670250661Sdavidcs	int count, i, slot;
1671250661Sdavidcs	int my_slot = pci_get_slot(ha->pci_dev);
1672250661Sdavidcs
1673250661Sdavidcs	if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count))
1674250661Sdavidcs		return;
1675250661Sdavidcs
1676250661Sdavidcs	for (i = 0; i < count; i++) {
1677250661Sdavidcs		slot = pci_get_slot(peers[i]);
1678250661Sdavidcs
1679250661Sdavidcs		if ((slot >= 0) && (slot == my_slot) &&
1680250661Sdavidcs			(pci_get_device(peers[i]) ==
1681250661Sdavidcs				pci_get_device(ha->pci_dev))) {
1682250661Sdavidcs			if (ha->pci_dev != peers[i])
1683250661Sdavidcs				ha->peer_dev = peers[i];
1684250661Sdavidcs		}
1685250661Sdavidcs	}
1686250661Sdavidcs}
1687250661Sdavidcs
1688250661Sdavidcsstatic void
1689250661Sdavidcsqla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer)
1690250661Sdavidcs{
1691250661Sdavidcs	qla_host_t *ha_peer;
1692250661Sdavidcs
1693250661Sdavidcs	if (ha->peer_dev) {
1694250661Sdavidcs        	if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) {
1695250661Sdavidcs
1696250661Sdavidcs			ha_peer->msg_from_peer = msg_to_peer;
1697250661Sdavidcs		}
1698250661Sdavidcs	}
1699250661Sdavidcs}
1700250661Sdavidcs
1701250661Sdavidcsstatic void
1702250661Sdavidcsqla_error_recovery(void *context, int pending)
1703250661Sdavidcs{
1704250661Sdavidcs	qla_host_t *ha = context;
1705250661Sdavidcs	uint32_t msecs_100 = 100;
1706250661Sdavidcs	struct ifnet *ifp = ha->ifp;
1707250661Sdavidcs
1708250661Sdavidcs        (void)QLA_LOCK(ha, __func__, 0);
1709250661Sdavidcs
1710284982Sdavidcs	ha->hw.imd_compl = 1;
1711284982Sdavidcs	qla_mdelay(__func__, 300);
1712284982Sdavidcs
1713250661Sdavidcs        ha->flags.stop_rcv = 1;
1714250661Sdavidcs
1715250661Sdavidcs        ql_hw_stop_rcv(ha);
1716250661Sdavidcs
1717250661Sdavidcs        ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1718250661Sdavidcs
1719250661Sdavidcs        QLA_UNLOCK(ha, __func__);
1720250661Sdavidcs
1721250661Sdavidcs	if ((ha->pci_func & 0x1) == 0) {
1722250661Sdavidcs
1723252580Sdavidcs		if (!ha->msg_from_peer) {
1724250661Sdavidcs			qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
1725250661Sdavidcs
1726252580Sdavidcs			while ((ha->msg_from_peer != QL_PEER_MSG_ACK) &&
1727252580Sdavidcs				msecs_100--)
1728252580Sdavidcs				qla_mdelay(__func__, 100);
1729252580Sdavidcs		}
1730250661Sdavidcs
1731250661Sdavidcs		ha->msg_from_peer = 0;
1732250661Sdavidcs
1733255003Sdavidcs		ql_minidump(ha);
1734255003Sdavidcs
1735250661Sdavidcs		(void) ql_init_hw(ha);
1736250661Sdavidcs        	qla_free_xmt_bufs(ha);
1737250661Sdavidcs	        qla_free_rcv_bufs(ha);
1738250661Sdavidcs
1739250661Sdavidcs		qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
1740250661Sdavidcs
1741250661Sdavidcs	} else {
1742250661Sdavidcs		if (ha->msg_from_peer == QL_PEER_MSG_RESET) {
1743250661Sdavidcs
1744250661Sdavidcs			ha->msg_from_peer = 0;
1745250661Sdavidcs
1746250661Sdavidcs			qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
1747250661Sdavidcs		} else {
1748250661Sdavidcs			qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
1749250661Sdavidcs		}
1750250661Sdavidcs
1751250661Sdavidcs		while ((ha->msg_from_peer != QL_PEER_MSG_ACK)  && msecs_100--)
1752250661Sdavidcs			qla_mdelay(__func__, 100);
1753250661Sdavidcs		ha->msg_from_peer = 0;
1754250661Sdavidcs
1755250661Sdavidcs		(void) ql_init_hw(ha);
1756250661Sdavidcs        	qla_free_xmt_bufs(ha);
1757250661Sdavidcs	        qla_free_rcv_bufs(ha);
1758250661Sdavidcs	}
1759250661Sdavidcs        (void)QLA_LOCK(ha, __func__, 0);
1760250661Sdavidcs
1761250661Sdavidcs	if (qla_alloc_xmt_bufs(ha) != 0) {
1762250661Sdavidcs        	QLA_UNLOCK(ha, __func__);
1763250661Sdavidcs                return;
1764250661Sdavidcs	}
1765284982Sdavidcs	qla_confirm_9kb_enable(ha);
1766250661Sdavidcs
1767250661Sdavidcs        if (qla_alloc_rcv_bufs(ha) != 0) {
1768250661Sdavidcs        	QLA_UNLOCK(ha, __func__);
1769250661Sdavidcs                return;
1770250661Sdavidcs	}
1771250661Sdavidcs
1772250661Sdavidcs        ha->flags.stop_rcv = 0;
1773250661Sdavidcs        if (ql_init_hw_if(ha) == 0) {
1774250661Sdavidcs                ifp = ha->ifp;
1775250661Sdavidcs                ifp->if_drv_flags |= IFF_DRV_RUNNING;
1776250661Sdavidcs                ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1777250661Sdavidcs                ha->flags.qla_watchdog_pause = 0;
1778250661Sdavidcs        }
1779250661Sdavidcs
1780250661Sdavidcs        QLA_UNLOCK(ha, __func__);
1781250661Sdavidcs}
1782250661Sdavidcs
1783284982Sdavidcsstatic void
1784284982Sdavidcsqla_async_event(void *context, int pending)
1785284982Sdavidcs{
1786284982Sdavidcs        qla_host_t *ha = context;
1787284982Sdavidcs
1788284982Sdavidcs        (void)QLA_LOCK(ha, __func__, 0);
1789284982Sdavidcs        qla_hw_async_event(ha);
1790284982Sdavidcs        QLA_UNLOCK(ha, __func__);
1791284982Sdavidcs}
1792284982Sdavidcs
1793