ql_os.c revision 250661
1250661Sdavidcs/*
2250661Sdavidcs * Copyright (c) 2013-2014 Qlogic Corporation
3250661Sdavidcs * All rights reserved.
4250661Sdavidcs *
5250661Sdavidcs *  Redistribution and use in source and binary forms, with or without
6250661Sdavidcs *  modification, are permitted provided that the following conditions
7250661Sdavidcs *  are met:
8250661Sdavidcs *
9250661Sdavidcs *  1. Redistributions of source code must retain the above copyright
10250661Sdavidcs *     notice, this list of conditions and the following disclaimer.
11250661Sdavidcs *  2. Redistributions in binary form must reproduce the above copyright
12250661Sdavidcs *     notice, this list of conditions and the following disclaimer in the
13250661Sdavidcs *     documentation and/or other materials provided with the distribution.
14250661Sdavidcs *
15250661Sdavidcs *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16250661Sdavidcs *  and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17250661Sdavidcs *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18250661Sdavidcs *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19250661Sdavidcs *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20250661Sdavidcs *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21250661Sdavidcs *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22250661Sdavidcs *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23250661Sdavidcs *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24250661Sdavidcs *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25250661Sdavidcs *  POSSIBILITY OF SUCH DAMAGE.
26250661Sdavidcs */
27250661Sdavidcs
28250661Sdavidcs/*
29250661Sdavidcs * File: ql_os.c
30250661Sdavidcs * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31250661Sdavidcs */
32250661Sdavidcs
33250661Sdavidcs#include <sys/cdefs.h>
34250661Sdavidcs__FBSDID("$FreeBSD: head/sys/dev/qlxgbe/ql_os.c 250661 2013-05-15 17:03:09Z davidcs $");
35250661Sdavidcs
36250661Sdavidcs
37250661Sdavidcs#include "ql_os.h"
38250661Sdavidcs#include "ql_hw.h"
39250661Sdavidcs#include "ql_def.h"
40250661Sdavidcs#include "ql_inline.h"
41250661Sdavidcs#include "ql_ver.h"
42250661Sdavidcs#include "ql_glbl.h"
43250661Sdavidcs#include "ql_dbg.h"
44250661Sdavidcs#include <sys/smp.h>
45250661Sdavidcs
46250661Sdavidcs/*
47250661Sdavidcs * Some PCI Configuration Space Related Defines
48250661Sdavidcs */
49250661Sdavidcs
50250661Sdavidcs#ifndef PCI_VENDOR_QLOGIC
51250661Sdavidcs#define PCI_VENDOR_QLOGIC	0x1077
52250661Sdavidcs#endif
53250661Sdavidcs
54250661Sdavidcs#ifndef PCI_PRODUCT_QLOGIC_ISP8030
55250661Sdavidcs#define PCI_PRODUCT_QLOGIC_ISP8030	0x8030
56250661Sdavidcs#endif
57250661Sdavidcs
58250661Sdavidcs#define PCI_QLOGIC_ISP8030 \
59250661Sdavidcs	((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC)
60250661Sdavidcs
61250661Sdavidcs/*
62250661Sdavidcs * static functions
63250661Sdavidcs */
64250661Sdavidcsstatic int qla_alloc_parent_dma_tag(qla_host_t *ha);
65250661Sdavidcsstatic void qla_free_parent_dma_tag(qla_host_t *ha);
66250661Sdavidcsstatic int qla_alloc_xmt_bufs(qla_host_t *ha);
67250661Sdavidcsstatic void qla_free_xmt_bufs(qla_host_t *ha);
68250661Sdavidcsstatic int qla_alloc_rcv_bufs(qla_host_t *ha);
69250661Sdavidcsstatic void qla_free_rcv_bufs(qla_host_t *ha);
70250661Sdavidcsstatic void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb);
71250661Sdavidcs
72250661Sdavidcsstatic void qla_init_ifnet(device_t dev, qla_host_t *ha);
73250661Sdavidcsstatic int qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS);
74250661Sdavidcsstatic int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS);
75250661Sdavidcsstatic void qla_release(qla_host_t *ha);
76250661Sdavidcsstatic void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
77250661Sdavidcs		int error);
78250661Sdavidcsstatic void qla_stop(qla_host_t *ha);
79250661Sdavidcsstatic int qla_send(qla_host_t *ha, struct mbuf **m_headp);
80250661Sdavidcsstatic void qla_tx_done(void *context, int pending);
81250661Sdavidcsstatic void qla_get_peer(qla_host_t *ha);
82250661Sdavidcsstatic void qla_error_recovery(void *context, int pending);
83250661Sdavidcs
84250661Sdavidcs/*
85250661Sdavidcs * Hooks to the Operating Systems
86250661Sdavidcs */
87250661Sdavidcsstatic int qla_pci_probe (device_t);
88250661Sdavidcsstatic int qla_pci_attach (device_t);
89250661Sdavidcsstatic int qla_pci_detach (device_t);
90250661Sdavidcs
91250661Sdavidcsstatic void qla_init(void *arg);
92250661Sdavidcsstatic int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
93250661Sdavidcsstatic int qla_media_change(struct ifnet *ifp);
94250661Sdavidcsstatic void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
95250661Sdavidcsstatic void qla_start(struct ifnet *ifp);
96250661Sdavidcs
97250661Sdavidcsstatic device_method_t qla_pci_methods[] = {
98250661Sdavidcs	/* Device interface */
99250661Sdavidcs	DEVMETHOD(device_probe, qla_pci_probe),
100250661Sdavidcs	DEVMETHOD(device_attach, qla_pci_attach),
101250661Sdavidcs	DEVMETHOD(device_detach, qla_pci_detach),
102250661Sdavidcs	{ 0, 0 }
103250661Sdavidcs};
104250661Sdavidcs
105250661Sdavidcsstatic driver_t qla_pci_driver = {
106250661Sdavidcs	"ql", qla_pci_methods, sizeof (qla_host_t),
107250661Sdavidcs};
108250661Sdavidcs
109250661Sdavidcsstatic devclass_t qla83xx_devclass;
110250661Sdavidcs
111250661SdavidcsDRIVER_MODULE(qla83xx, pci, qla_pci_driver, qla83xx_devclass, 0, 0);
112250661Sdavidcs
113250661SdavidcsMODULE_DEPEND(qla83xx, pci, 1, 1, 1);
114250661SdavidcsMODULE_DEPEND(qla83xx, ether, 1, 1, 1);
115250661Sdavidcs
116250661SdavidcsMALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver");
117250661Sdavidcs
118250661Sdavidcs#define QL_STD_REPLENISH_THRES		0
119250661Sdavidcs#define QL_JUMBO_REPLENISH_THRES	32
120250661Sdavidcs
121250661Sdavidcs
122250661Sdavidcsstatic char dev_str[64];
123250661Sdavidcs
124250661Sdavidcs/*
125250661Sdavidcs * Name:	qla_pci_probe
126250661Sdavidcs * Function:	Validate the PCI device to be a QLA80XX device
127250661Sdavidcs */
128250661Sdavidcsstatic int
129250661Sdavidcsqla_pci_probe(device_t dev)
130250661Sdavidcs{
131250661Sdavidcs        switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
132250661Sdavidcs        case PCI_QLOGIC_ISP8030:
133250661Sdavidcs		snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
134250661Sdavidcs			"Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function",
135250661Sdavidcs			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
136250661Sdavidcs			QLA_VERSION_BUILD);
137250661Sdavidcs                device_set_desc(dev, dev_str);
138250661Sdavidcs                break;
139250661Sdavidcs        default:
140250661Sdavidcs                return (ENXIO);
141250661Sdavidcs        }
142250661Sdavidcs
143250661Sdavidcs        if (bootverbose)
144250661Sdavidcs                printf("%s: %s\n ", __func__, dev_str);
145250661Sdavidcs
146250661Sdavidcs        return (BUS_PROBE_DEFAULT);
147250661Sdavidcs}
148250661Sdavidcs
149250661Sdavidcsstatic void
150250661Sdavidcsqla_add_sysctls(qla_host_t *ha)
151250661Sdavidcs{
152250661Sdavidcs        device_t dev = ha->pci_dev;
153250661Sdavidcs
154250661Sdavidcs        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
155250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
156250661Sdavidcs                OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
157250661Sdavidcs                (void *)ha, 0,
158250661Sdavidcs                qla_sysctl_get_stats, "I", "Statistics");
159250661Sdavidcs
160250661Sdavidcs        SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
161250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
162250661Sdavidcs                OID_AUTO, "fw_version", CTLFLAG_RD,
163250661Sdavidcs                &ha->fw_ver_str, 0, "firmware version");
164250661Sdavidcs
165250661Sdavidcs        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
166250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
167250661Sdavidcs                OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW,
168250661Sdavidcs                (void *)ha, 0,
169250661Sdavidcs                qla_sysctl_get_link_status, "I", "Link Status");
170250661Sdavidcs
171250661Sdavidcs	ha->dbg_level = 0;
172250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
173250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
174250661Sdavidcs                OID_AUTO, "debug", CTLFLAG_RW,
175250661Sdavidcs                &ha->dbg_level, ha->dbg_level, "Debug Level");
176250661Sdavidcs
177250661Sdavidcs	ha->std_replenish = QL_STD_REPLENISH_THRES;
178250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
179250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
180250661Sdavidcs                OID_AUTO, "std_replenish", CTLFLAG_RW,
181250661Sdavidcs                &ha->std_replenish, ha->std_replenish,
182250661Sdavidcs                "Threshold for Replenishing Standard Frames");
183250661Sdavidcs
184250661Sdavidcs        SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
185250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
186250661Sdavidcs                OID_AUTO, "ipv4_lro",
187250661Sdavidcs                CTLFLAG_RD, &ha->ipv4_lro,
188250661Sdavidcs                "number of ipv4 lro completions");
189250661Sdavidcs
190250661Sdavidcs        SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
191250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
192250661Sdavidcs                OID_AUTO, "ipv6_lro",
193250661Sdavidcs                CTLFLAG_RD, &ha->ipv6_lro,
194250661Sdavidcs                "number of ipv6 lro completions");
195250661Sdavidcs
196250661Sdavidcs	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
197250661Sdavidcs		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
198250661Sdavidcs		OID_AUTO, "tx_tso_frames",
199250661Sdavidcs		CTLFLAG_RD, &ha->tx_tso_frames,
200250661Sdavidcs		"number of Tx TSO Frames");
201250661Sdavidcs
202250661Sdavidcs	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
203250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
204250661Sdavidcs		OID_AUTO, "hw_vlan_tx_frames",
205250661Sdavidcs		CTLFLAG_RD, &ha->hw_vlan_tx_frames,
206250661Sdavidcs		"number of Tx VLAN Frames");
207250661Sdavidcs
208250661Sdavidcs        return;
209250661Sdavidcs}
210250661Sdavidcs
211250661Sdavidcsstatic void
212250661Sdavidcsqla_watchdog(void *arg)
213250661Sdavidcs{
214250661Sdavidcs	qla_host_t *ha = arg;
215250661Sdavidcs	qla_hw_t *hw;
216250661Sdavidcs	struct ifnet *ifp;
217250661Sdavidcs	uint32_t i;
218250661Sdavidcs	qla_hw_tx_cntxt_t *hw_tx_cntxt;
219250661Sdavidcs
220250661Sdavidcs	hw = &ha->hw;
221250661Sdavidcs	ifp = ha->ifp;
222250661Sdavidcs
223250661Sdavidcs        if (ha->flags.qla_watchdog_exit) {
224250661Sdavidcs		ha->qla_watchdog_exited = 1;
225250661Sdavidcs		return;
226250661Sdavidcs	}
227250661Sdavidcs	ha->qla_watchdog_exited = 0;
228250661Sdavidcs
229250661Sdavidcs	if (!ha->flags.qla_watchdog_pause) {
230250661Sdavidcs		if (ql_hw_check_health(ha) || ha->qla_initiate_recovery ||
231250661Sdavidcs			(ha->msg_from_peer == QL_PEER_MSG_RESET)) {
232250661Sdavidcs			ha->qla_watchdog_paused = 1;
233250661Sdavidcs			ha->flags.qla_watchdog_pause = 1;
234250661Sdavidcs			ha->qla_initiate_recovery = 0;
235250661Sdavidcs			ha->err_inject = 0;
236250661Sdavidcs			taskqueue_enqueue(ha->err_tq, &ha->err_task);
237250661Sdavidcs		} else {
238250661Sdavidcs			for (i = 0; i < ha->hw.num_tx_rings; i++) {
239250661Sdavidcs				hw_tx_cntxt = &hw->tx_cntxt[i];
240250661Sdavidcs				if (qla_le32_to_host(*(hw_tx_cntxt->tx_cons)) !=
241250661Sdavidcs					hw_tx_cntxt->txr_comp) {
242250661Sdavidcs					taskqueue_enqueue(ha->tx_tq,
243250661Sdavidcs						&ha->tx_task);
244250661Sdavidcs					break;
245250661Sdavidcs				}
246250661Sdavidcs			}
247250661Sdavidcs
248250661Sdavidcs			if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) {
249250661Sdavidcs				taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
250250661Sdavidcs			}
251250661Sdavidcs			ha->qla_watchdog_paused = 0;
252250661Sdavidcs		}
253250661Sdavidcs
254250661Sdavidcs	} else {
255250661Sdavidcs		ha->qla_watchdog_paused = 1;
256250661Sdavidcs	}
257250661Sdavidcs
258250661Sdavidcs	ha->watchdog_ticks = ha->watchdog_ticks++ % 1000;
259250661Sdavidcs	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
260250661Sdavidcs		qla_watchdog, ha);
261250661Sdavidcs}
262250661Sdavidcs
263250661Sdavidcs/*
264250661Sdavidcs * Name:	qla_pci_attach
265250661Sdavidcs * Function:	attaches the device to the operating system
266250661Sdavidcs */
267250661Sdavidcsstatic int
268250661Sdavidcsqla_pci_attach(device_t dev)
269250661Sdavidcs{
270250661Sdavidcs	qla_host_t *ha = NULL;
271250661Sdavidcs	uint32_t rsrc_len;
272250661Sdavidcs	int i;
273250661Sdavidcs
274250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
275250661Sdavidcs
276250661Sdavidcs        if ((ha = device_get_softc(dev)) == NULL) {
277250661Sdavidcs                device_printf(dev, "cannot get softc\n");
278250661Sdavidcs                return (ENOMEM);
279250661Sdavidcs        }
280250661Sdavidcs
281250661Sdavidcs        memset(ha, 0, sizeof (qla_host_t));
282250661Sdavidcs
283250661Sdavidcs        if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) {
284250661Sdavidcs                device_printf(dev, "device is not ISP8030\n");
285250661Sdavidcs                return (ENXIO);
286250661Sdavidcs	}
287250661Sdavidcs
288250661Sdavidcs        ha->pci_func = pci_get_function(dev);
289250661Sdavidcs
290250661Sdavidcs        ha->pci_dev = dev;
291250661Sdavidcs
292250661Sdavidcs	pci_enable_busmaster(dev);
293250661Sdavidcs
294250661Sdavidcs	ha->reg_rid = PCIR_BAR(0);
295250661Sdavidcs	ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
296250661Sdavidcs				RF_ACTIVE);
297250661Sdavidcs
298250661Sdavidcs        if (ha->pci_reg == NULL) {
299250661Sdavidcs                device_printf(dev, "unable to map any ports\n");
300250661Sdavidcs                goto qla_pci_attach_err;
301250661Sdavidcs        }
302250661Sdavidcs
303250661Sdavidcs	rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
304250661Sdavidcs					ha->reg_rid);
305250661Sdavidcs
306250661Sdavidcs	mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
307250661Sdavidcs
308250661Sdavidcs	mtx_init(&ha->tx_lock, "qla83xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
309250661Sdavidcs
310250661Sdavidcs	qla_add_sysctls(ha);
311250661Sdavidcs	ql_hw_add_sysctls(ha);
312250661Sdavidcs
313250661Sdavidcs	ha->flags.lock_init = 1;
314250661Sdavidcs
315250661Sdavidcs	ha->reg_rid1 = PCIR_BAR(2);
316250661Sdavidcs	ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
317250661Sdavidcs			&ha->reg_rid1, RF_ACTIVE);
318250661Sdavidcs
319250661Sdavidcs	ha->msix_count = pci_msix_count(dev);
320250661Sdavidcs
321250661Sdavidcs	if (ha->msix_count < (ha->hw.num_sds_rings + 1)) {
322250661Sdavidcs		device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
323250661Sdavidcs			ha->msix_count);
324250661Sdavidcs		goto qla_pci_attach_err;
325250661Sdavidcs	}
326250661Sdavidcs
327250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
328250661Sdavidcs		" msix_count 0x%x pci_reg %p\n", __func__, ha,
329250661Sdavidcs		ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg));
330250661Sdavidcs
331250661Sdavidcs	ha->msix_count = ha->hw.num_sds_rings + 1;
332250661Sdavidcs
333250661Sdavidcs	if (pci_alloc_msix(dev, &ha->msix_count)) {
334250661Sdavidcs		device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
335250661Sdavidcs			ha->msix_count);
336250661Sdavidcs		ha->msix_count = 0;
337250661Sdavidcs		goto qla_pci_attach_err;
338250661Sdavidcs	}
339250661Sdavidcs
340250661Sdavidcs	ha->mbx_irq_rid = 1;
341250661Sdavidcs	ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
342250661Sdavidcs				&ha->mbx_irq_rid,
343250661Sdavidcs				(RF_ACTIVE | RF_SHAREABLE));
344250661Sdavidcs	if (ha->mbx_irq == NULL) {
345250661Sdavidcs		device_printf(dev, "could not allocate mbx interrupt\n");
346250661Sdavidcs		goto qla_pci_attach_err;
347250661Sdavidcs	}
348250661Sdavidcs	if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE),
349250661Sdavidcs		NULL, ql_mbx_isr, ha, &ha->mbx_handle)) {
350250661Sdavidcs		device_printf(dev, "could not setup mbx interrupt\n");
351250661Sdavidcs		goto qla_pci_attach_err;
352250661Sdavidcs	}
353250661Sdavidcs
354250661Sdavidcs
355250661Sdavidcs	for (i = 0; i < ha->hw.num_sds_rings; i++) {
356250661Sdavidcs		ha->irq_vec[i].sds_idx = i;
357250661Sdavidcs                ha->irq_vec[i].ha = ha;
358250661Sdavidcs                ha->irq_vec[i].irq_rid = 2 + i;
359250661Sdavidcs
360250661Sdavidcs		ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
361250661Sdavidcs				&ha->irq_vec[i].irq_rid,
362250661Sdavidcs				(RF_ACTIVE | RF_SHAREABLE));
363250661Sdavidcs
364250661Sdavidcs		if (ha->irq_vec[i].irq == NULL) {
365250661Sdavidcs			device_printf(dev, "could not allocate interrupt\n");
366250661Sdavidcs			goto qla_pci_attach_err;
367250661Sdavidcs		}
368250661Sdavidcs		if (bus_setup_intr(dev, ha->irq_vec[i].irq,
369250661Sdavidcs			(INTR_TYPE_NET | INTR_MPSAFE),
370250661Sdavidcs			NULL, ql_isr, &ha->irq_vec[i],
371250661Sdavidcs			&ha->irq_vec[i].handle)) {
372250661Sdavidcs			device_printf(dev, "could not setup interrupt\n");
373250661Sdavidcs			goto qla_pci_attach_err;
374250661Sdavidcs		}
375250661Sdavidcs	}
376250661Sdavidcs
377250661Sdavidcs	printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus,
378250661Sdavidcs		ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count);
379250661Sdavidcs
380250661Sdavidcs	/* initialize hardware */
381250661Sdavidcs	if (ql_init_hw(ha)) {
382250661Sdavidcs		device_printf(dev, "%s: ql_init_hw failed\n", __func__);
383250661Sdavidcs		goto qla_pci_attach_err;
384250661Sdavidcs	}
385250661Sdavidcs
386250661Sdavidcs	device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__,
387250661Sdavidcs		ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
388250661Sdavidcs		ha->fw_ver_build);
389250661Sdavidcs        snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d",
390250661Sdavidcs                        ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
391250661Sdavidcs                        ha->fw_ver_build);
392250661Sdavidcs
393250661Sdavidcs	ql_read_mac_addr(ha);
394250661Sdavidcs
395250661Sdavidcs	/* allocate parent dma tag */
396250661Sdavidcs	if (qla_alloc_parent_dma_tag(ha)) {
397250661Sdavidcs		device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n",
398250661Sdavidcs			__func__);
399250661Sdavidcs		goto qla_pci_attach_err;
400250661Sdavidcs	}
401250661Sdavidcs
402250661Sdavidcs	/* alloc all dma buffers */
403250661Sdavidcs	if (ql_alloc_dma(ha)) {
404250661Sdavidcs		device_printf(dev, "%s: ql_alloc_dma failed\n", __func__);
405250661Sdavidcs		goto qla_pci_attach_err;
406250661Sdavidcs	}
407250661Sdavidcs	qla_get_peer(ha);
408250661Sdavidcs
409250661Sdavidcs	/* create the o.s ethernet interface */
410250661Sdavidcs	qla_init_ifnet(dev, ha);
411250661Sdavidcs
412250661Sdavidcs	ha->flags.qla_watchdog_active = 1;
413250661Sdavidcs	ha->flags.qla_watchdog_pause = 1;
414250661Sdavidcs
415250661Sdavidcs
416250661Sdavidcs	TASK_INIT(&ha->tx_task, 0, qla_tx_done, ha);
417250661Sdavidcs	ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT,
418250661Sdavidcs			taskqueue_thread_enqueue, &ha->tx_tq);
419250661Sdavidcs	taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq",
420250661Sdavidcs		device_get_nameunit(ha->pci_dev));
421250661Sdavidcs
422250661Sdavidcs	callout_init(&ha->tx_callout, TRUE);
423250661Sdavidcs	ha->flags.qla_callout_init = 1;
424250661Sdavidcs
425250661Sdavidcs	/* create ioctl device interface */
426250661Sdavidcs	if (ql_make_cdev(ha)) {
427250661Sdavidcs		device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
428250661Sdavidcs		goto qla_pci_attach_err;
429250661Sdavidcs	}
430250661Sdavidcs
431250661Sdavidcs	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
432250661Sdavidcs		qla_watchdog, ha);
433250661Sdavidcs
434250661Sdavidcs	TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha);
435250661Sdavidcs	ha->err_tq = taskqueue_create_fast("qla_errq", M_NOWAIT,
436250661Sdavidcs			taskqueue_thread_enqueue, &ha->err_tq);
437250661Sdavidcs	taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq",
438250661Sdavidcs		device_get_nameunit(ha->pci_dev));
439250661Sdavidcs
440250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__));
441250661Sdavidcs        return (0);
442250661Sdavidcs
443250661Sdavidcsqla_pci_attach_err:
444250661Sdavidcs
445250661Sdavidcs	qla_release(ha);
446250661Sdavidcs
447250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__));
448250661Sdavidcs        return (ENXIO);
449250661Sdavidcs}
450250661Sdavidcs
451250661Sdavidcs/*
452250661Sdavidcs * Name:	qla_pci_detach
453250661Sdavidcs * Function:	Unhooks the device from the operating system
454250661Sdavidcs */
455250661Sdavidcsstatic int
456250661Sdavidcsqla_pci_detach(device_t dev)
457250661Sdavidcs{
458250661Sdavidcs	qla_host_t *ha = NULL;
459250661Sdavidcs	struct ifnet *ifp;
460250661Sdavidcs
461250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
462250661Sdavidcs
463250661Sdavidcs        if ((ha = device_get_softc(dev)) == NULL) {
464250661Sdavidcs                device_printf(dev, "cannot get softc\n");
465250661Sdavidcs                return (ENOMEM);
466250661Sdavidcs        }
467250661Sdavidcs
468250661Sdavidcs	ifp = ha->ifp;
469250661Sdavidcs
470250661Sdavidcs	(void)QLA_LOCK(ha, __func__, 0);
471250661Sdavidcs	qla_stop(ha);
472250661Sdavidcs	QLA_UNLOCK(ha, __func__);
473250661Sdavidcs
474250661Sdavidcs	qla_release(ha);
475250661Sdavidcs
476250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
477250661Sdavidcs
478250661Sdavidcs        return (0);
479250661Sdavidcs}
480250661Sdavidcs
481250661Sdavidcs/*
482250661Sdavidcs * SYSCTL Related Callbacks
483250661Sdavidcs */
484250661Sdavidcsstatic int
485250661Sdavidcsqla_sysctl_get_stats(SYSCTL_HANDLER_ARGS)
486250661Sdavidcs{
487250661Sdavidcs	int err, ret = 0;
488250661Sdavidcs	qla_host_t *ha;
489250661Sdavidcs
490250661Sdavidcs	err = sysctl_handle_int(oidp, &ret, 0, req);
491250661Sdavidcs
492250661Sdavidcs	if (err || !req->newptr)
493250661Sdavidcs		return (err);
494250661Sdavidcs
495250661Sdavidcs	if (ret == 1) {
496250661Sdavidcs		ha = (qla_host_t *)arg1;
497250661Sdavidcs		ql_get_stats(ha);
498250661Sdavidcs	}
499250661Sdavidcs	return (err);
500250661Sdavidcs}
501250661Sdavidcsstatic int
502250661Sdavidcsqla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS)
503250661Sdavidcs{
504250661Sdavidcs	int err, ret = 0;
505250661Sdavidcs	qla_host_t *ha;
506250661Sdavidcs
507250661Sdavidcs	err = sysctl_handle_int(oidp, &ret, 0, req);
508250661Sdavidcs
509250661Sdavidcs	if (err || !req->newptr)
510250661Sdavidcs		return (err);
511250661Sdavidcs
512250661Sdavidcs	if (ret == 1) {
513250661Sdavidcs		ha = (qla_host_t *)arg1;
514250661Sdavidcs		ql_hw_link_status(ha);
515250661Sdavidcs	}
516250661Sdavidcs	return (err);
517250661Sdavidcs}
518250661Sdavidcs
519250661Sdavidcs/*
520250661Sdavidcs * Name:	qla_release
521250661Sdavidcs * Function:	Releases the resources allocated for the device
522250661Sdavidcs */
523250661Sdavidcsstatic void
524250661Sdavidcsqla_release(qla_host_t *ha)
525250661Sdavidcs{
526250661Sdavidcs	device_t dev;
527250661Sdavidcs	int i;
528250661Sdavidcs
529250661Sdavidcs	dev = ha->pci_dev;
530250661Sdavidcs
531250661Sdavidcs	if (ha->err_tq) {
532250661Sdavidcs		taskqueue_drain(ha->err_tq, &ha->err_task);
533250661Sdavidcs		taskqueue_free(ha->err_tq);
534250661Sdavidcs	}
535250661Sdavidcs
536250661Sdavidcs	if (ha->tx_tq) {
537250661Sdavidcs		taskqueue_drain(ha->tx_tq, &ha->tx_task);
538250661Sdavidcs		taskqueue_free(ha->tx_tq);
539250661Sdavidcs	}
540250661Sdavidcs
541250661Sdavidcs	ql_del_cdev(ha);
542250661Sdavidcs
543250661Sdavidcs	if (ha->flags.qla_watchdog_active) {
544250661Sdavidcs		ha->flags.qla_watchdog_exit = 1;
545250661Sdavidcs
546250661Sdavidcs		while (ha->qla_watchdog_exited == 0)
547250661Sdavidcs			qla_mdelay(__func__, 1);
548250661Sdavidcs	}
549250661Sdavidcs
550250661Sdavidcs	if (ha->flags.qla_callout_init)
551250661Sdavidcs		callout_stop(&ha->tx_callout);
552250661Sdavidcs
553250661Sdavidcs	if (ha->ifp != NULL)
554250661Sdavidcs		ether_ifdetach(ha->ifp);
555250661Sdavidcs
556250661Sdavidcs	ql_free_dma(ha);
557250661Sdavidcs	qla_free_parent_dma_tag(ha);
558250661Sdavidcs
559250661Sdavidcs	if (ha->mbx_handle)
560250661Sdavidcs		(void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle);
561250661Sdavidcs
562250661Sdavidcs	if (ha->mbx_irq)
563250661Sdavidcs		(void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid,
564250661Sdavidcs				ha->mbx_irq);
565250661Sdavidcs
566250661Sdavidcs	for (i = 0; i < ha->hw.num_sds_rings; i++) {
567250661Sdavidcs
568250661Sdavidcs		if (ha->irq_vec[i].handle) {
569250661Sdavidcs			(void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
570250661Sdavidcs					ha->irq_vec[i].handle);
571250661Sdavidcs		}
572250661Sdavidcs
573250661Sdavidcs		if (ha->irq_vec[i].irq) {
574250661Sdavidcs			(void)bus_release_resource(dev, SYS_RES_IRQ,
575250661Sdavidcs				ha->irq_vec[i].irq_rid,
576250661Sdavidcs				ha->irq_vec[i].irq);
577250661Sdavidcs		}
578250661Sdavidcs	}
579250661Sdavidcs
580250661Sdavidcs	if (ha->msix_count)
581250661Sdavidcs		pci_release_msi(dev);
582250661Sdavidcs
583250661Sdavidcs	if (ha->flags.lock_init) {
584250661Sdavidcs		mtx_destroy(&ha->tx_lock);
585250661Sdavidcs		mtx_destroy(&ha->hw_lock);
586250661Sdavidcs	}
587250661Sdavidcs
588250661Sdavidcs        if (ha->pci_reg)
589250661Sdavidcs                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
590250661Sdavidcs				ha->pci_reg);
591250661Sdavidcs
592250661Sdavidcs        if (ha->pci_reg1)
593250661Sdavidcs                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1,
594250661Sdavidcs				ha->pci_reg1);
595250661Sdavidcs}
596250661Sdavidcs
597250661Sdavidcs/*
598250661Sdavidcs * DMA Related Functions
599250661Sdavidcs */
600250661Sdavidcs
601250661Sdavidcsstatic void
602250661Sdavidcsqla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
603250661Sdavidcs{
604250661Sdavidcs        *((bus_addr_t *)arg) = 0;
605250661Sdavidcs
606250661Sdavidcs        if (error) {
607250661Sdavidcs                printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
608250661Sdavidcs                return;
609250661Sdavidcs	}
610250661Sdavidcs
611250661Sdavidcs        *((bus_addr_t *)arg) = segs[0].ds_addr;
612250661Sdavidcs
613250661Sdavidcs	return;
614250661Sdavidcs}
615250661Sdavidcs
616250661Sdavidcsint
617250661Sdavidcsql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
618250661Sdavidcs{
619250661Sdavidcs        int             ret = 0;
620250661Sdavidcs        device_t        dev;
621250661Sdavidcs        bus_addr_t      b_addr;
622250661Sdavidcs
623250661Sdavidcs        dev = ha->pci_dev;
624250661Sdavidcs
625250661Sdavidcs        QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
626250661Sdavidcs
627250661Sdavidcs        ret = bus_dma_tag_create(
628250661Sdavidcs                        ha->parent_tag,/* parent */
629250661Sdavidcs                        dma_buf->alignment,
630250661Sdavidcs                        ((bus_size_t)(1ULL << 32)),/* boundary */
631250661Sdavidcs                        BUS_SPACE_MAXADDR,      /* lowaddr */
632250661Sdavidcs                        BUS_SPACE_MAXADDR,      /* highaddr */
633250661Sdavidcs                        NULL, NULL,             /* filter, filterarg */
634250661Sdavidcs                        dma_buf->size,          /* maxsize */
635250661Sdavidcs                        1,                      /* nsegments */
636250661Sdavidcs                        dma_buf->size,          /* maxsegsize */
637250661Sdavidcs                        0,                      /* flags */
638250661Sdavidcs                        NULL, NULL,             /* lockfunc, lockarg */
639250661Sdavidcs                        &dma_buf->dma_tag);
640250661Sdavidcs
641250661Sdavidcs        if (ret) {
642250661Sdavidcs                device_printf(dev, "%s: could not create dma tag\n", __func__);
643250661Sdavidcs                goto ql_alloc_dmabuf_exit;
644250661Sdavidcs        }
645250661Sdavidcs        ret = bus_dmamem_alloc(dma_buf->dma_tag,
646250661Sdavidcs                        (void **)&dma_buf->dma_b,
647250661Sdavidcs                        (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
648250661Sdavidcs                        &dma_buf->dma_map);
649250661Sdavidcs        if (ret) {
650250661Sdavidcs                bus_dma_tag_destroy(dma_buf->dma_tag);
651250661Sdavidcs                device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
652250661Sdavidcs                goto ql_alloc_dmabuf_exit;
653250661Sdavidcs        }
654250661Sdavidcs
655250661Sdavidcs        ret = bus_dmamap_load(dma_buf->dma_tag,
656250661Sdavidcs                        dma_buf->dma_map,
657250661Sdavidcs                        dma_buf->dma_b,
658250661Sdavidcs                        dma_buf->size,
659250661Sdavidcs                        qla_dmamap_callback,
660250661Sdavidcs                        &b_addr, BUS_DMA_NOWAIT);
661250661Sdavidcs
662250661Sdavidcs        if (ret || !b_addr) {
663250661Sdavidcs                bus_dma_tag_destroy(dma_buf->dma_tag);
664250661Sdavidcs                bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
665250661Sdavidcs                        dma_buf->dma_map);
666250661Sdavidcs                ret = -1;
667250661Sdavidcs                goto ql_alloc_dmabuf_exit;
668250661Sdavidcs        }
669250661Sdavidcs
670250661Sdavidcs        dma_buf->dma_addr = b_addr;
671250661Sdavidcs
672250661Sdavidcsql_alloc_dmabuf_exit:
673250661Sdavidcs        QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
674250661Sdavidcs                __func__, ret, (void *)dma_buf->dma_tag,
675250661Sdavidcs                (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
676250661Sdavidcs		dma_buf->size));
677250661Sdavidcs
678250661Sdavidcs        return ret;
679250661Sdavidcs}
680250661Sdavidcs
681250661Sdavidcsvoid
682250661Sdavidcsql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
683250661Sdavidcs{
684250661Sdavidcs        bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
685250661Sdavidcs        bus_dma_tag_destroy(dma_buf->dma_tag);
686250661Sdavidcs}
687250661Sdavidcs
688250661Sdavidcsstatic int
689250661Sdavidcsqla_alloc_parent_dma_tag(qla_host_t *ha)
690250661Sdavidcs{
691250661Sdavidcs	int		ret;
692250661Sdavidcs	device_t	dev;
693250661Sdavidcs
694250661Sdavidcs	dev = ha->pci_dev;
695250661Sdavidcs
696250661Sdavidcs        /*
697250661Sdavidcs         * Allocate parent DMA Tag
698250661Sdavidcs         */
699250661Sdavidcs        ret = bus_dma_tag_create(
700250661Sdavidcs                        bus_get_dma_tag(dev),   /* parent */
701250661Sdavidcs                        1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
702250661Sdavidcs                        BUS_SPACE_MAXADDR,      /* lowaddr */
703250661Sdavidcs                        BUS_SPACE_MAXADDR,      /* highaddr */
704250661Sdavidcs                        NULL, NULL,             /* filter, filterarg */
705250661Sdavidcs                        BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
706250661Sdavidcs                        0,                      /* nsegments */
707250661Sdavidcs                        BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
708250661Sdavidcs                        0,                      /* flags */
709250661Sdavidcs                        NULL, NULL,             /* lockfunc, lockarg */
710250661Sdavidcs                        &ha->parent_tag);
711250661Sdavidcs
712250661Sdavidcs        if (ret) {
713250661Sdavidcs                device_printf(dev, "%s: could not create parent dma tag\n",
714250661Sdavidcs                        __func__);
715250661Sdavidcs		return (-1);
716250661Sdavidcs        }
717250661Sdavidcs
718250661Sdavidcs        ha->flags.parent_tag = 1;
719250661Sdavidcs
720250661Sdavidcs	return (0);
721250661Sdavidcs}
722250661Sdavidcs
723250661Sdavidcsstatic void
724250661Sdavidcsqla_free_parent_dma_tag(qla_host_t *ha)
725250661Sdavidcs{
726250661Sdavidcs        if (ha->flags.parent_tag) {
727250661Sdavidcs                bus_dma_tag_destroy(ha->parent_tag);
728250661Sdavidcs                ha->flags.parent_tag = 0;
729250661Sdavidcs        }
730250661Sdavidcs}
731250661Sdavidcs
732250661Sdavidcs/*
733250661Sdavidcs * Name: qla_init_ifnet
734250661Sdavidcs * Function: Creates the Network Device Interface and Registers it with the O.S
735250661Sdavidcs */
736250661Sdavidcs
737250661Sdavidcsstatic void
738250661Sdavidcsqla_init_ifnet(device_t dev, qla_host_t *ha)
739250661Sdavidcs{
740250661Sdavidcs	struct ifnet *ifp;
741250661Sdavidcs
742250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
743250661Sdavidcs
744250661Sdavidcs	ifp = ha->ifp = if_alloc(IFT_ETHER);
745250661Sdavidcs
746250661Sdavidcs	if (ifp == NULL)
747250661Sdavidcs		panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
748250661Sdavidcs
749250661Sdavidcs	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
750250661Sdavidcs
751250661Sdavidcs#if __FreeBSD_version >= 1000000
752250661Sdavidcs	if_initbaudrate(ifp, IF_Gbps(10));
753250661Sdavidcs	ifp->if_capabilities = IFCAP_LINKSTATE;
754250661Sdavidcs#else
755250661Sdavidcs	ifp->if_mtu = ETHERMTU;
756250661Sdavidcs	ifp->if_baudrate = (1 * 1000 * 1000 *1000);
757250661Sdavidcs
758250661Sdavidcs#endif /* #if __FreeBSD_version >= 1000000 */
759250661Sdavidcs
760250661Sdavidcs	ifp->if_init = qla_init;
761250661Sdavidcs	ifp->if_softc = ha;
762250661Sdavidcs	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
763250661Sdavidcs	ifp->if_ioctl = qla_ioctl;
764250661Sdavidcs	ifp->if_start = qla_start;
765250661Sdavidcs
766250661Sdavidcs	IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha));
767250661Sdavidcs	ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha);
768250661Sdavidcs	IFQ_SET_READY(&ifp->if_snd);
769250661Sdavidcs
770250661Sdavidcs	ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
771250661Sdavidcs
772250661Sdavidcs	ether_ifattach(ifp, qla_get_mac_addr(ha));
773250661Sdavidcs
774250661Sdavidcs	ifp->if_capabilities = IFCAP_HWCSUM |
775250661Sdavidcs				IFCAP_TSO4 |
776250661Sdavidcs				IFCAP_JUMBO_MTU;
777250661Sdavidcs
778250661Sdavidcs	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
779250661Sdavidcs	ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
780250661Sdavidcs
781250661Sdavidcs	ifp->if_capenable = ifp->if_capabilities;
782250661Sdavidcs
783250661Sdavidcs	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
784250661Sdavidcs
785250661Sdavidcs	ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status);
786250661Sdavidcs
787250661Sdavidcs	ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0,
788250661Sdavidcs		NULL);
789250661Sdavidcs	ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
790250661Sdavidcs
791250661Sdavidcs	ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
792250661Sdavidcs
793250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
794250661Sdavidcs
795250661Sdavidcs	return;
796250661Sdavidcs}
797250661Sdavidcs
798250661Sdavidcsstatic void
799250661Sdavidcsqla_init_locked(qla_host_t *ha)
800250661Sdavidcs{
801250661Sdavidcs	struct ifnet *ifp = ha->ifp;
802250661Sdavidcs
803250661Sdavidcs	qla_stop(ha);
804250661Sdavidcs
805250661Sdavidcs	if (qla_alloc_xmt_bufs(ha) != 0)
806250661Sdavidcs		return;
807250661Sdavidcs
808250661Sdavidcs	if (qla_alloc_rcv_bufs(ha) != 0)
809250661Sdavidcs		return;
810250661Sdavidcs
811250661Sdavidcs	bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN);
812250661Sdavidcs
813250661Sdavidcs	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO;
814250661Sdavidcs
815250661Sdavidcs	ha->flags.stop_rcv = 0;
816250661Sdavidcs 	if (ql_init_hw_if(ha) == 0) {
817250661Sdavidcs		ifp = ha->ifp;
818250661Sdavidcs		ifp->if_drv_flags |= IFF_DRV_RUNNING;
819250661Sdavidcs		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
820250661Sdavidcs		ha->flags.qla_watchdog_pause = 0;
821250661Sdavidcs		ha->hw_vlan_tx_frames = 0;
822250661Sdavidcs		ha->tx_tso_frames = 0;
823250661Sdavidcs	}
824250661Sdavidcs
825250661Sdavidcs	return;
826250661Sdavidcs}
827250661Sdavidcs
828250661Sdavidcsstatic void
829250661Sdavidcsqla_init(void *arg)
830250661Sdavidcs{
831250661Sdavidcs	qla_host_t *ha;
832250661Sdavidcs
833250661Sdavidcs	ha = (qla_host_t *)arg;
834250661Sdavidcs
835250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
836250661Sdavidcs
837250661Sdavidcs	(void)QLA_LOCK(ha, __func__, 0);
838250661Sdavidcs	qla_init_locked(ha);
839250661Sdavidcs	QLA_UNLOCK(ha, __func__);
840250661Sdavidcs
841250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
842250661Sdavidcs}
843250661Sdavidcs
844250661Sdavidcsstatic int
845250661Sdavidcsqla_set_multi(qla_host_t *ha, uint32_t add_multi)
846250661Sdavidcs{
847250661Sdavidcs	uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
848250661Sdavidcs	struct ifmultiaddr *ifma;
849250661Sdavidcs	int mcnt = 0;
850250661Sdavidcs	struct ifnet *ifp = ha->ifp;
851250661Sdavidcs	int ret = 0;
852250661Sdavidcs
853250661Sdavidcs	if_maddr_rlock(ifp);
854250661Sdavidcs
855250661Sdavidcs	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
856250661Sdavidcs
857250661Sdavidcs		if (ifma->ifma_addr->sa_family != AF_LINK)
858250661Sdavidcs			continue;
859250661Sdavidcs
860250661Sdavidcs		if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
861250661Sdavidcs			break;
862250661Sdavidcs
863250661Sdavidcs		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
864250661Sdavidcs			&mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
865250661Sdavidcs
866250661Sdavidcs		mcnt++;
867250661Sdavidcs	}
868250661Sdavidcs
869250661Sdavidcs	if_maddr_runlock(ifp);
870250661Sdavidcs
871250661Sdavidcs	if (QLA_LOCK(ha, __func__, 1) == 0) {
872250661Sdavidcs		ret = ql_hw_set_multi(ha, mta, mcnt, add_multi);
873250661Sdavidcs		QLA_UNLOCK(ha, __func__);
874250661Sdavidcs	}
875250661Sdavidcs
876250661Sdavidcs	return (ret);
877250661Sdavidcs}
878250661Sdavidcs
879250661Sdavidcsstatic int
880250661Sdavidcsqla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
881250661Sdavidcs{
882250661Sdavidcs	int ret = 0;
883250661Sdavidcs	struct ifreq *ifr = (struct ifreq *)data;
884250661Sdavidcs	struct ifaddr *ifa = (struct ifaddr *)data;
885250661Sdavidcs	qla_host_t *ha;
886250661Sdavidcs
887250661Sdavidcs	ha = (qla_host_t *)ifp->if_softc;
888250661Sdavidcs
889250661Sdavidcs	switch (cmd) {
890250661Sdavidcs	case SIOCSIFADDR:
891250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
892250661Sdavidcs			__func__, cmd));
893250661Sdavidcs
894250661Sdavidcs		if (ifa->ifa_addr->sa_family == AF_INET) {
895250661Sdavidcs			ifp->if_flags |= IFF_UP;
896250661Sdavidcs			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
897250661Sdavidcs				(void)QLA_LOCK(ha, __func__, 0);
898250661Sdavidcs				qla_init_locked(ha);
899250661Sdavidcs				QLA_UNLOCK(ha, __func__);
900250661Sdavidcs			}
901250661Sdavidcs			QL_DPRINT4(ha, (ha->pci_dev,
902250661Sdavidcs				"%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
903250661Sdavidcs				__func__, cmd,
904250661Sdavidcs				ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
905250661Sdavidcs
906250661Sdavidcs			arp_ifinit(ifp, ifa);
907250661Sdavidcs		} else {
908250661Sdavidcs			ether_ioctl(ifp, cmd, data);
909250661Sdavidcs		}
910250661Sdavidcs		break;
911250661Sdavidcs
912250661Sdavidcs	case SIOCSIFMTU:
913250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
914250661Sdavidcs			__func__, cmd));
915250661Sdavidcs
916250661Sdavidcs		if (ifr->ifr_mtu > QLA_MAX_MTU) {
917250661Sdavidcs			ret = EINVAL;
918250661Sdavidcs		} else {
919250661Sdavidcs			(void) QLA_LOCK(ha, __func__, 0);
920250661Sdavidcs			ifp->if_mtu = ifr->ifr_mtu;
921250661Sdavidcs			ha->max_frame_size =
922250661Sdavidcs				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
923250661Sdavidcs			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
924250661Sdavidcs				ret = ql_set_max_mtu(ha, ha->max_frame_size,
925250661Sdavidcs					ha->hw.rcv_cntxt_id);
926250661Sdavidcs			}
927250661Sdavidcs
928250661Sdavidcs			if (ifp->if_mtu > ETHERMTU)
929250661Sdavidcs				ha->std_replenish = QL_JUMBO_REPLENISH_THRES;
930250661Sdavidcs			else
931250661Sdavidcs				ha->std_replenish = QL_STD_REPLENISH_THRES;
932250661Sdavidcs
933250661Sdavidcs
934250661Sdavidcs			QLA_UNLOCK(ha, __func__);
935250661Sdavidcs
936250661Sdavidcs			if (ret)
937250661Sdavidcs				ret = EINVAL;
938250661Sdavidcs		}
939250661Sdavidcs
940250661Sdavidcs		break;
941250661Sdavidcs
942250661Sdavidcs	case SIOCSIFFLAGS:
943250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
944250661Sdavidcs			__func__, cmd));
945250661Sdavidcs
946250661Sdavidcs		(void)QLA_LOCK(ha, __func__, 0);
947250661Sdavidcs
948250661Sdavidcs		if (ifp->if_flags & IFF_UP) {
949250661Sdavidcs			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
950250661Sdavidcs				if ((ifp->if_flags ^ ha->if_flags) &
951250661Sdavidcs					IFF_PROMISC) {
952250661Sdavidcs					ret = ql_set_promisc(ha);
953250661Sdavidcs				} else if ((ifp->if_flags ^ ha->if_flags) &
954250661Sdavidcs					IFF_ALLMULTI) {
955250661Sdavidcs					ret = ql_set_allmulti(ha);
956250661Sdavidcs				}
957250661Sdavidcs			} else {
958250661Sdavidcs				qla_init_locked(ha);
959250661Sdavidcs				ha->max_frame_size = ifp->if_mtu +
960250661Sdavidcs					ETHER_HDR_LEN + ETHER_CRC_LEN;
961250661Sdavidcs				ret = ql_set_max_mtu(ha, ha->max_frame_size,
962250661Sdavidcs					ha->hw.rcv_cntxt_id);
963250661Sdavidcs			}
964250661Sdavidcs		} else {
965250661Sdavidcs			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
966250661Sdavidcs				qla_stop(ha);
967250661Sdavidcs			ha->if_flags = ifp->if_flags;
968250661Sdavidcs		}
969250661Sdavidcs
970250661Sdavidcs		QLA_UNLOCK(ha, __func__);
971250661Sdavidcs		break;
972250661Sdavidcs
973250661Sdavidcs	case SIOCADDMULTI:
974250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev,
975250661Sdavidcs			"%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
976250661Sdavidcs
977250661Sdavidcs		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
978250661Sdavidcs			if (qla_set_multi(ha, 1))
979250661Sdavidcs				ret = EINVAL;
980250661Sdavidcs		}
981250661Sdavidcs		break;
982250661Sdavidcs
983250661Sdavidcs	case SIOCDELMULTI:
984250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev,
985250661Sdavidcs			"%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
986250661Sdavidcs
987250661Sdavidcs		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
988250661Sdavidcs			if (qla_set_multi(ha, 0))
989250661Sdavidcs				ret = EINVAL;
990250661Sdavidcs		}
991250661Sdavidcs		break;
992250661Sdavidcs
993250661Sdavidcs	case SIOCSIFMEDIA:
994250661Sdavidcs	case SIOCGIFMEDIA:
995250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev,
996250661Sdavidcs			"%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
997250661Sdavidcs			__func__, cmd));
998250661Sdavidcs		ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
999250661Sdavidcs		break;
1000250661Sdavidcs
1001250661Sdavidcs	case SIOCSIFCAP:
1002250661Sdavidcs	{
1003250661Sdavidcs		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1004250661Sdavidcs
1005250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
1006250661Sdavidcs			__func__, cmd));
1007250661Sdavidcs
1008250661Sdavidcs		if (mask & IFCAP_HWCSUM)
1009250661Sdavidcs			ifp->if_capenable ^= IFCAP_HWCSUM;
1010250661Sdavidcs		if (mask & IFCAP_TSO4)
1011250661Sdavidcs			ifp->if_capenable ^= IFCAP_TSO4;
1012250661Sdavidcs		if (mask & IFCAP_VLAN_HWTAGGING)
1013250661Sdavidcs			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1014250661Sdavidcs		if (mask & IFCAP_VLAN_HWTSO)
1015250661Sdavidcs			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1016250661Sdavidcs
1017250661Sdavidcs		if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1018250661Sdavidcs			qla_init(ha);
1019250661Sdavidcs
1020250661Sdavidcs		VLAN_CAPABILITIES(ifp);
1021250661Sdavidcs		break;
1022250661Sdavidcs	}
1023250661Sdavidcs
1024250661Sdavidcs	default:
1025250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n",
1026250661Sdavidcs			__func__, cmd));
1027250661Sdavidcs		ret = ether_ioctl(ifp, cmd, data);
1028250661Sdavidcs		break;
1029250661Sdavidcs	}
1030250661Sdavidcs
1031250661Sdavidcs	return (ret);
1032250661Sdavidcs}
1033250661Sdavidcs
1034250661Sdavidcsstatic int
1035250661Sdavidcsqla_media_change(struct ifnet *ifp)
1036250661Sdavidcs{
1037250661Sdavidcs	qla_host_t *ha;
1038250661Sdavidcs	struct ifmedia *ifm;
1039250661Sdavidcs	int ret = 0;
1040250661Sdavidcs
1041250661Sdavidcs	ha = (qla_host_t *)ifp->if_softc;
1042250661Sdavidcs
1043250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1044250661Sdavidcs
1045250661Sdavidcs	ifm = &ha->media;
1046250661Sdavidcs
1047250661Sdavidcs	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1048250661Sdavidcs		ret = EINVAL;
1049250661Sdavidcs
1050250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1051250661Sdavidcs
1052250661Sdavidcs	return (ret);
1053250661Sdavidcs}
1054250661Sdavidcs
1055250661Sdavidcsstatic void
1056250661Sdavidcsqla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1057250661Sdavidcs{
1058250661Sdavidcs	qla_host_t *ha;
1059250661Sdavidcs
1060250661Sdavidcs	ha = (qla_host_t *)ifp->if_softc;
1061250661Sdavidcs
1062250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1063250661Sdavidcs
1064250661Sdavidcs	ifmr->ifm_status = IFM_AVALID;
1065250661Sdavidcs	ifmr->ifm_active = IFM_ETHER;
1066250661Sdavidcs
1067250661Sdavidcs	ql_update_link_state(ha);
1068250661Sdavidcs	if (ha->hw.link_up) {
1069250661Sdavidcs		ifmr->ifm_status |= IFM_ACTIVE;
1070250661Sdavidcs		ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha));
1071250661Sdavidcs	}
1072250661Sdavidcs
1073250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\
1074250661Sdavidcs		(ha->hw.link_up ? "link_up" : "link_down")));
1075250661Sdavidcs
1076250661Sdavidcs	return;
1077250661Sdavidcs}
1078250661Sdavidcs
1079250661Sdavidcsstatic void
1080250661Sdavidcsqla_start(struct ifnet *ifp)
1081250661Sdavidcs{
1082250661Sdavidcs	struct mbuf    *m_head;
1083250661Sdavidcs	qla_host_t *ha = (qla_host_t *)ifp->if_softc;
1084250661Sdavidcs
1085250661Sdavidcs	QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
1086250661Sdavidcs
1087250661Sdavidcs	if (!mtx_trylock(&ha->tx_lock)) {
1088250661Sdavidcs		QL_DPRINT8(ha, (ha->pci_dev,
1089250661Sdavidcs			"%s: mtx_trylock(&ha->tx_lock) failed\n", __func__));
1090250661Sdavidcs		return;
1091250661Sdavidcs	}
1092250661Sdavidcs
1093250661Sdavidcs	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1094250661Sdavidcs		IFF_DRV_RUNNING) {
1095250661Sdavidcs		QL_DPRINT8(ha,
1096250661Sdavidcs			(ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1097250661Sdavidcs		QLA_TX_UNLOCK(ha);
1098250661Sdavidcs		return;
1099250661Sdavidcs	}
1100250661Sdavidcs
1101250661Sdavidcs	if (!ha->watchdog_ticks)
1102250661Sdavidcs		ql_update_link_state(ha);
1103250661Sdavidcs
1104250661Sdavidcs	if (!ha->hw.link_up) {
1105250661Sdavidcs		QL_DPRINT8(ha, (ha->pci_dev, "%s: link down\n", __func__));
1106250661Sdavidcs		QLA_TX_UNLOCK(ha);
1107250661Sdavidcs		return;
1108250661Sdavidcs	}
1109250661Sdavidcs
1110250661Sdavidcs	while (ifp->if_snd.ifq_head != NULL) {
1111250661Sdavidcs		IF_DEQUEUE(&ifp->if_snd, m_head);
1112250661Sdavidcs
1113250661Sdavidcs		if (m_head == NULL) {
1114250661Sdavidcs			QL_DPRINT8(ha, (ha->pci_dev, "%s: m_head == NULL\n",
1115250661Sdavidcs				__func__));
1116250661Sdavidcs			break;
1117250661Sdavidcs		}
1118250661Sdavidcs
1119250661Sdavidcs		if (qla_send(ha, &m_head)) {
1120250661Sdavidcs			if (m_head == NULL)
1121250661Sdavidcs				break;
1122250661Sdavidcs			QL_DPRINT8(ha, (ha->pci_dev, "%s: PREPEND\n", __func__));
1123250661Sdavidcs			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1124250661Sdavidcs			IF_PREPEND(&ifp->if_snd, m_head);
1125250661Sdavidcs			break;
1126250661Sdavidcs		}
1127250661Sdavidcs		/* Send a copy of the frame to the BPF listener */
1128250661Sdavidcs		ETHER_BPF_MTAP(ifp, m_head);
1129250661Sdavidcs	}
1130250661Sdavidcs	QLA_TX_UNLOCK(ha);
1131250661Sdavidcs	QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
1132250661Sdavidcs	return;
1133250661Sdavidcs}
1134250661Sdavidcs
1135250661Sdavidcsstatic int
1136250661Sdavidcsqla_send(qla_host_t *ha, struct mbuf **m_headp)
1137250661Sdavidcs{
1138250661Sdavidcs	bus_dma_segment_t	segs[QLA_MAX_SEGMENTS];
1139250661Sdavidcs	bus_dmamap_t		map;
1140250661Sdavidcs	int			nsegs;
1141250661Sdavidcs	int			ret = -1;
1142250661Sdavidcs	uint32_t		tx_idx;
1143250661Sdavidcs	struct mbuf		*m_head = *m_headp;
1144250661Sdavidcs	uint32_t		txr_idx = ha->txr_idx;
1145250661Sdavidcs
1146250661Sdavidcs	QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
1147250661Sdavidcs
1148250661Sdavidcs	if (m_head->m_flags & M_FLOWID)
1149250661Sdavidcs		txr_idx = m_head->m_pkthdr.flowid & (ha->hw.num_tx_rings - 1);
1150250661Sdavidcs
1151250661Sdavidcs	tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next;
1152250661Sdavidcs	map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
1153250661Sdavidcs
1154250661Sdavidcs	ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1155250661Sdavidcs			BUS_DMA_NOWAIT);
1156250661Sdavidcs
1157250661Sdavidcs	if (ret == EFBIG) {
1158250661Sdavidcs
1159250661Sdavidcs		struct mbuf *m;
1160250661Sdavidcs
1161250661Sdavidcs		QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1162250661Sdavidcs			m_head->m_pkthdr.len));
1163250661Sdavidcs
1164250661Sdavidcs		m = m_defrag(m_head, M_NOWAIT);
1165250661Sdavidcs		if (m == NULL) {
1166250661Sdavidcs			ha->err_tx_defrag++;
1167250661Sdavidcs			m_freem(m_head);
1168250661Sdavidcs			*m_headp = NULL;
1169250661Sdavidcs			device_printf(ha->pci_dev,
1170250661Sdavidcs				"%s: m_defrag() = NULL [%d]\n",
1171250661Sdavidcs				__func__, ret);
1172250661Sdavidcs			return (ENOBUFS);
1173250661Sdavidcs		}
1174250661Sdavidcs		m_head = m;
1175250661Sdavidcs		*m_headp = m_head;
1176250661Sdavidcs
1177250661Sdavidcs		if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1178250661Sdavidcs					segs, &nsegs, BUS_DMA_NOWAIT))) {
1179250661Sdavidcs
1180250661Sdavidcs			ha->err_tx_dmamap_load++;
1181250661Sdavidcs
1182250661Sdavidcs			device_printf(ha->pci_dev,
1183250661Sdavidcs				"%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1184250661Sdavidcs				__func__, ret, m_head->m_pkthdr.len);
1185250661Sdavidcs
1186250661Sdavidcs			if (ret != ENOMEM) {
1187250661Sdavidcs				m_freem(m_head);
1188250661Sdavidcs				*m_headp = NULL;
1189250661Sdavidcs			}
1190250661Sdavidcs			return (ret);
1191250661Sdavidcs		}
1192250661Sdavidcs
1193250661Sdavidcs	} else if (ret) {
1194250661Sdavidcs
1195250661Sdavidcs		ha->err_tx_dmamap_load++;
1196250661Sdavidcs
1197250661Sdavidcs		device_printf(ha->pci_dev,
1198250661Sdavidcs			"%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1199250661Sdavidcs			__func__, ret, m_head->m_pkthdr.len);
1200250661Sdavidcs
1201250661Sdavidcs		if (ret != ENOMEM) {
1202250661Sdavidcs			m_freem(m_head);
1203250661Sdavidcs			*m_headp = NULL;
1204250661Sdavidcs		}
1205250661Sdavidcs		return (ret);
1206250661Sdavidcs	}
1207250661Sdavidcs
1208250661Sdavidcs	QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet"));
1209250661Sdavidcs
1210250661Sdavidcs	bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1211250661Sdavidcs
1212250661Sdavidcs        if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx))) {
1213250661Sdavidcs
1214250661Sdavidcs		ha->tx_ring[txr_idx].count++;
1215250661Sdavidcs		ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
1216250661Sdavidcs	} else {
1217250661Sdavidcs		if (ret == EINVAL) {
1218250661Sdavidcs			if (m_head)
1219250661Sdavidcs				m_freem(m_head);
1220250661Sdavidcs			*m_headp = NULL;
1221250661Sdavidcs		}
1222250661Sdavidcs	}
1223250661Sdavidcs
1224250661Sdavidcs	QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
1225250661Sdavidcs	return (ret);
1226250661Sdavidcs}
1227250661Sdavidcs
1228250661Sdavidcsstatic void
1229250661Sdavidcsqla_stop(qla_host_t *ha)
1230250661Sdavidcs{
1231250661Sdavidcs	struct ifnet *ifp = ha->ifp;
1232250661Sdavidcs	device_t	dev;
1233250661Sdavidcs
1234250661Sdavidcs	dev = ha->pci_dev;
1235250661Sdavidcs
1236250661Sdavidcs	ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1237250661Sdavidcs
1238250661Sdavidcs	ha->flags.qla_watchdog_pause = 1;
1239250661Sdavidcs
1240250661Sdavidcs	while (!ha->qla_watchdog_paused)
1241250661Sdavidcs		qla_mdelay(__func__, 1);
1242250661Sdavidcs
1243250661Sdavidcs	ha->flags.stop_rcv = 1;
1244250661Sdavidcs	ql_hw_stop_rcv(ha);
1245250661Sdavidcs
1246250661Sdavidcs	ql_del_hw_if(ha);
1247250661Sdavidcs
1248250661Sdavidcs	qla_free_xmt_bufs(ha);
1249250661Sdavidcs	qla_free_rcv_bufs(ha);
1250250661Sdavidcs
1251250661Sdavidcs	return;
1252250661Sdavidcs}
1253250661Sdavidcs
1254250661Sdavidcs/*
1255250661Sdavidcs * Buffer Management Functions for Transmit and Receive Rings
1256250661Sdavidcs */
1257250661Sdavidcsstatic int
1258250661Sdavidcsqla_alloc_xmt_bufs(qla_host_t *ha)
1259250661Sdavidcs{
1260250661Sdavidcs	int ret = 0;
1261250661Sdavidcs	uint32_t i, j;
1262250661Sdavidcs	qla_tx_buf_t *txb;
1263250661Sdavidcs
1264250661Sdavidcs	if (bus_dma_tag_create(NULL,    /* parent */
1265250661Sdavidcs		1, 0,    /* alignment, bounds */
1266250661Sdavidcs		BUS_SPACE_MAXADDR,       /* lowaddr */
1267250661Sdavidcs		BUS_SPACE_MAXADDR,       /* highaddr */
1268250661Sdavidcs		NULL, NULL,      /* filter, filterarg */
1269250661Sdavidcs		QLA_MAX_TSO_FRAME_SIZE,     /* maxsize */
1270250661Sdavidcs		QLA_MAX_SEGMENTS,        /* nsegments */
1271250661Sdavidcs		PAGE_SIZE,        /* maxsegsize */
1272250661Sdavidcs		BUS_DMA_ALLOCNOW,        /* flags */
1273250661Sdavidcs		NULL,    /* lockfunc */
1274250661Sdavidcs		NULL,    /* lockfuncarg */
1275250661Sdavidcs		&ha->tx_tag)) {
1276250661Sdavidcs		device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1277250661Sdavidcs			__func__);
1278250661Sdavidcs		return (ENOMEM);
1279250661Sdavidcs	}
1280250661Sdavidcs
1281250661Sdavidcs	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1282250661Sdavidcs		bzero((void *)ha->tx_ring[i].tx_buf,
1283250661Sdavidcs			(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1284250661Sdavidcs	}
1285250661Sdavidcs
1286250661Sdavidcs	for (j = 0; j < ha->hw.num_tx_rings; j++) {
1287250661Sdavidcs		for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1288250661Sdavidcs
1289250661Sdavidcs			txb = &ha->tx_ring[j].tx_buf[i];
1290250661Sdavidcs
1291250661Sdavidcs			if ((ret = bus_dmamap_create(ha->tx_tag,
1292250661Sdavidcs					BUS_DMA_NOWAIT, &txb->map))) {
1293250661Sdavidcs
1294250661Sdavidcs				ha->err_tx_dmamap_create++;
1295250661Sdavidcs				device_printf(ha->pci_dev,
1296250661Sdavidcs					"%s: bus_dmamap_create failed[%d]\n",
1297250661Sdavidcs					__func__, ret);
1298250661Sdavidcs
1299250661Sdavidcs				qla_free_xmt_bufs(ha);
1300250661Sdavidcs
1301250661Sdavidcs				return (ret);
1302250661Sdavidcs			}
1303250661Sdavidcs		}
1304250661Sdavidcs	}
1305250661Sdavidcs
1306250661Sdavidcs	return 0;
1307250661Sdavidcs}
1308250661Sdavidcs
1309250661Sdavidcs/*
1310250661Sdavidcs * Release mbuf after it sent on the wire
1311250661Sdavidcs */
1312250661Sdavidcsstatic void
1313250661Sdavidcsqla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1314250661Sdavidcs{
1315250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1316250661Sdavidcs
1317250661Sdavidcs	if (txb->m_head && txb->map) {
1318250661Sdavidcs
1319250661Sdavidcs		bus_dmamap_unload(ha->tx_tag, txb->map);
1320250661Sdavidcs
1321250661Sdavidcs		m_freem(txb->m_head);
1322250661Sdavidcs		txb->m_head = NULL;
1323250661Sdavidcs	}
1324250661Sdavidcs
1325250661Sdavidcs	if (txb->map)
1326250661Sdavidcs		bus_dmamap_destroy(ha->tx_tag, txb->map);
1327250661Sdavidcs
1328250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1329250661Sdavidcs}
1330250661Sdavidcs
1331250661Sdavidcsstatic void
1332250661Sdavidcsqla_free_xmt_bufs(qla_host_t *ha)
1333250661Sdavidcs{
1334250661Sdavidcs	int		i, j;
1335250661Sdavidcs
1336250661Sdavidcs	for (j = 0; j < ha->hw.num_tx_rings; j++) {
1337250661Sdavidcs		for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1338250661Sdavidcs			qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
1339250661Sdavidcs	}
1340250661Sdavidcs
1341250661Sdavidcs	if (ha->tx_tag != NULL) {
1342250661Sdavidcs		bus_dma_tag_destroy(ha->tx_tag);
1343250661Sdavidcs		ha->tx_tag = NULL;
1344250661Sdavidcs	}
1345250661Sdavidcs
1346250661Sdavidcs	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1347250661Sdavidcs		bzero((void *)ha->tx_ring[i].tx_buf,
1348250661Sdavidcs			(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1349250661Sdavidcs	}
1350250661Sdavidcs	return;
1351250661Sdavidcs}
1352250661Sdavidcs
1353250661Sdavidcs
1354250661Sdavidcsstatic int
1355250661Sdavidcsqla_alloc_rcv_std(qla_host_t *ha)
1356250661Sdavidcs{
1357250661Sdavidcs	int		i, j, k, r, ret = 0;
1358250661Sdavidcs	qla_rx_buf_t	*rxb;
1359250661Sdavidcs	qla_rx_ring_t	*rx_ring;
1360250661Sdavidcs
1361250661Sdavidcs	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1362250661Sdavidcs
1363250661Sdavidcs		rx_ring = &ha->rx_ring[r];
1364250661Sdavidcs
1365250661Sdavidcs		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1366250661Sdavidcs
1367250661Sdavidcs			rxb = &rx_ring->rx_buf[i];
1368250661Sdavidcs
1369250661Sdavidcs			ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT,
1370250661Sdavidcs					&rxb->map);
1371250661Sdavidcs
1372250661Sdavidcs			if (ret) {
1373250661Sdavidcs				device_printf(ha->pci_dev,
1374250661Sdavidcs					"%s: dmamap[%d, %d] failed\n",
1375250661Sdavidcs					__func__, r, i);
1376250661Sdavidcs
1377250661Sdavidcs				for (k = 0; k < r; k++) {
1378250661Sdavidcs					for (j = 0; j < NUM_RX_DESCRIPTORS;
1379250661Sdavidcs						j++) {
1380250661Sdavidcs						rxb = &ha->rx_ring[k].rx_buf[j];
1381250661Sdavidcs						bus_dmamap_destroy(ha->rx_tag,
1382250661Sdavidcs							rxb->map);
1383250661Sdavidcs					}
1384250661Sdavidcs				}
1385250661Sdavidcs
1386250661Sdavidcs				for (j = 0; j < i; j++) {
1387250661Sdavidcs					bus_dmamap_destroy(ha->rx_tag,
1388250661Sdavidcs						rx_ring->rx_buf[j].map);
1389250661Sdavidcs				}
1390250661Sdavidcs				goto qla_alloc_rcv_std_err;
1391250661Sdavidcs			}
1392250661Sdavidcs		}
1393250661Sdavidcs	}
1394250661Sdavidcs
1395250661Sdavidcs	qla_init_hw_rcv_descriptors(ha);
1396250661Sdavidcs
1397250661Sdavidcs
1398250661Sdavidcs	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1399250661Sdavidcs
1400250661Sdavidcs		rx_ring = &ha->rx_ring[r];
1401250661Sdavidcs
1402250661Sdavidcs		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1403250661Sdavidcs			rxb = &rx_ring->rx_buf[i];
1404250661Sdavidcs			rxb->handle = i;
1405250661Sdavidcs			if (!(ret = ql_get_mbuf(ha, rxb, NULL))) {
1406250661Sdavidcs				/*
1407250661Sdavidcs			 	 * set the physical address in the
1408250661Sdavidcs				 * corresponding descriptor entry in the
1409250661Sdavidcs				 * receive ring/queue for the hba
1410250661Sdavidcs				 */
1411250661Sdavidcs				qla_set_hw_rcv_desc(ha, r, i, rxb->handle,
1412250661Sdavidcs					rxb->paddr,
1413250661Sdavidcs					(rxb->m_head)->m_pkthdr.len);
1414250661Sdavidcs			} else {
1415250661Sdavidcs				device_printf(ha->pci_dev,
1416250661Sdavidcs					"%s: ql_get_mbuf [%d, %d] failed\n",
1417250661Sdavidcs					__func__, r, i);
1418250661Sdavidcs				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1419250661Sdavidcs				goto qla_alloc_rcv_std_err;
1420250661Sdavidcs			}
1421250661Sdavidcs		}
1422250661Sdavidcs	}
1423250661Sdavidcs	return 0;
1424250661Sdavidcs
1425250661Sdavidcsqla_alloc_rcv_std_err:
1426250661Sdavidcs	return (-1);
1427250661Sdavidcs}
1428250661Sdavidcs
1429250661Sdavidcsstatic void
1430250661Sdavidcsqla_free_rcv_std(qla_host_t *ha)
1431250661Sdavidcs{
1432250661Sdavidcs	int		i, r;
1433250661Sdavidcs	qla_rx_buf_t	*rxb;
1434250661Sdavidcs
1435250661Sdavidcs	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1436250661Sdavidcs		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1437250661Sdavidcs			rxb = &ha->rx_ring[r].rx_buf[i];
1438250661Sdavidcs			if (rxb->m_head != NULL) {
1439250661Sdavidcs				bus_dmamap_unload(ha->rx_tag, rxb->map);
1440250661Sdavidcs				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1441250661Sdavidcs				m_freem(rxb->m_head);
1442250661Sdavidcs				rxb->m_head = NULL;
1443250661Sdavidcs			}
1444250661Sdavidcs		}
1445250661Sdavidcs	}
1446250661Sdavidcs	return;
1447250661Sdavidcs}
1448250661Sdavidcs
1449250661Sdavidcsstatic int
1450250661Sdavidcsqla_alloc_rcv_bufs(qla_host_t *ha)
1451250661Sdavidcs{
1452250661Sdavidcs	int		i, ret = 0;
1453250661Sdavidcs
1454250661Sdavidcs	if (bus_dma_tag_create(NULL,    /* parent */
1455250661Sdavidcs			1, 0,    /* alignment, bounds */
1456250661Sdavidcs			BUS_SPACE_MAXADDR,       /* lowaddr */
1457250661Sdavidcs			BUS_SPACE_MAXADDR,       /* highaddr */
1458250661Sdavidcs			NULL, NULL,      /* filter, filterarg */
1459250661Sdavidcs			MJUM9BYTES,     /* maxsize */
1460250661Sdavidcs			1,        /* nsegments */
1461250661Sdavidcs			MJUM9BYTES,        /* maxsegsize */
1462250661Sdavidcs			BUS_DMA_ALLOCNOW,        /* flags */
1463250661Sdavidcs			NULL,    /* lockfunc */
1464250661Sdavidcs			NULL,    /* lockfuncarg */
1465250661Sdavidcs			&ha->rx_tag)) {
1466250661Sdavidcs
1467250661Sdavidcs		device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1468250661Sdavidcs			__func__);
1469250661Sdavidcs
1470250661Sdavidcs		return (ENOMEM);
1471250661Sdavidcs	}
1472250661Sdavidcs
1473250661Sdavidcs	bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1474250661Sdavidcs
1475250661Sdavidcs	for (i = 0; i < ha->hw.num_sds_rings; i++) {
1476250661Sdavidcs		ha->hw.sds[i].sdsr_next = 0;
1477250661Sdavidcs		ha->hw.sds[i].rxb_free = NULL;
1478250661Sdavidcs		ha->hw.sds[i].rx_free = 0;
1479250661Sdavidcs	}
1480250661Sdavidcs
1481250661Sdavidcs	ret = qla_alloc_rcv_std(ha);
1482250661Sdavidcs
1483250661Sdavidcs	return (ret);
1484250661Sdavidcs}
1485250661Sdavidcs
1486250661Sdavidcsstatic void
1487250661Sdavidcsqla_free_rcv_bufs(qla_host_t *ha)
1488250661Sdavidcs{
1489250661Sdavidcs	int		i;
1490250661Sdavidcs
1491250661Sdavidcs	qla_free_rcv_std(ha);
1492250661Sdavidcs
1493250661Sdavidcs	if (ha->rx_tag != NULL) {
1494250661Sdavidcs		bus_dma_tag_destroy(ha->rx_tag);
1495250661Sdavidcs		ha->rx_tag = NULL;
1496250661Sdavidcs	}
1497250661Sdavidcs
1498250661Sdavidcs	bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1499250661Sdavidcs
1500250661Sdavidcs	for (i = 0; i < ha->hw.num_sds_rings; i++) {
1501250661Sdavidcs		ha->hw.sds[i].sdsr_next = 0;
1502250661Sdavidcs		ha->hw.sds[i].rxb_free = NULL;
1503250661Sdavidcs		ha->hw.sds[i].rx_free = 0;
1504250661Sdavidcs	}
1505250661Sdavidcs
1506250661Sdavidcs	return;
1507250661Sdavidcs}
1508250661Sdavidcs
1509250661Sdavidcsint
1510250661Sdavidcsql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
1511250661Sdavidcs{
1512250661Sdavidcs	register struct mbuf *mp = nmp;
1513250661Sdavidcs	struct ifnet   		*ifp;
1514250661Sdavidcs	int            		ret = 0;
1515250661Sdavidcs	uint32_t		offset;
1516250661Sdavidcs	bus_dma_segment_t	segs[1];
1517250661Sdavidcs	int			nsegs;
1518250661Sdavidcs
1519250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1520250661Sdavidcs
1521250661Sdavidcs	ifp = ha->ifp;
1522250661Sdavidcs
1523250661Sdavidcs	if (mp == NULL) {
1524250661Sdavidcs
1525250661Sdavidcs		mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1526250661Sdavidcs
1527250661Sdavidcs		if (mp == NULL) {
1528250661Sdavidcs			ha->err_m_getcl++;
1529250661Sdavidcs			ret = ENOBUFS;
1530250661Sdavidcs			device_printf(ha->pci_dev,
1531250661Sdavidcs					"%s: m_getcl failed\n", __func__);
1532250661Sdavidcs			goto exit_ql_get_mbuf;
1533250661Sdavidcs		}
1534250661Sdavidcs		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1535250661Sdavidcs	} else {
1536250661Sdavidcs		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1537250661Sdavidcs		mp->m_data = mp->m_ext.ext_buf;
1538250661Sdavidcs		mp->m_next = NULL;
1539250661Sdavidcs	}
1540250661Sdavidcs
1541250661Sdavidcs	offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
1542250661Sdavidcs	if (offset) {
1543250661Sdavidcs		offset = 8 - offset;
1544250661Sdavidcs		m_adj(mp, offset);
1545250661Sdavidcs	}
1546250661Sdavidcs
1547250661Sdavidcs	/*
1548250661Sdavidcs	 * Using memory from the mbuf cluster pool, invoke the bus_dma
1549250661Sdavidcs	 * machinery to arrange the memory mapping.
1550250661Sdavidcs	 */
1551250661Sdavidcs	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map,
1552250661Sdavidcs			mp, segs, &nsegs, BUS_DMA_NOWAIT);
1553250661Sdavidcs	rxb->paddr = segs[0].ds_addr;
1554250661Sdavidcs
1555250661Sdavidcs	if (ret || !rxb->paddr || (nsegs != 1)) {
1556250661Sdavidcs		m_free(mp);
1557250661Sdavidcs		rxb->m_head = NULL;
1558250661Sdavidcs		device_printf(ha->pci_dev,
1559250661Sdavidcs			"%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
1560250661Sdavidcs			__func__, ret, (long long unsigned int)rxb->paddr,
1561250661Sdavidcs			nsegs);
1562250661Sdavidcs                ret = -1;
1563250661Sdavidcs		goto exit_ql_get_mbuf;
1564250661Sdavidcs	}
1565250661Sdavidcs	rxb->m_head = mp;
1566250661Sdavidcs	bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
1567250661Sdavidcs
1568250661Sdavidcsexit_ql_get_mbuf:
1569250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
1570250661Sdavidcs	return (ret);
1571250661Sdavidcs}
1572250661Sdavidcs
1573250661Sdavidcsstatic void
1574250661Sdavidcsqla_tx_done(void *context, int pending)
1575250661Sdavidcs{
1576250661Sdavidcs	qla_host_t *ha = context;
1577250661Sdavidcs	struct ifnet   *ifp;
1578250661Sdavidcs
1579250661Sdavidcs	ifp = ha->ifp;
1580250661Sdavidcs
1581250661Sdavidcs	if (!ifp)
1582250661Sdavidcs		return;
1583250661Sdavidcs
1584250661Sdavidcs	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1585250661Sdavidcs		QL_DPRINT8(ha, (ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1586250661Sdavidcs		return;
1587250661Sdavidcs	}
1588250661Sdavidcs	ql_hw_tx_done(ha);
1589250661Sdavidcs
1590250661Sdavidcs	qla_start(ha->ifp);
1591250661Sdavidcs}
1592250661Sdavidcs
1593250661Sdavidcsstatic void
1594250661Sdavidcsqla_get_peer(qla_host_t *ha)
1595250661Sdavidcs{
1596250661Sdavidcs	device_t *peers;
1597250661Sdavidcs	int count, i, slot;
1598250661Sdavidcs	int my_slot = pci_get_slot(ha->pci_dev);
1599250661Sdavidcs
1600250661Sdavidcs	if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count))
1601250661Sdavidcs		return;
1602250661Sdavidcs
1603250661Sdavidcs	for (i = 0; i < count; i++) {
1604250661Sdavidcs		slot = pci_get_slot(peers[i]);
1605250661Sdavidcs
1606250661Sdavidcs		if ((slot >= 0) && (slot == my_slot) &&
1607250661Sdavidcs			(pci_get_device(peers[i]) ==
1608250661Sdavidcs				pci_get_device(ha->pci_dev))) {
1609250661Sdavidcs			if (ha->pci_dev != peers[i])
1610250661Sdavidcs				ha->peer_dev = peers[i];
1611250661Sdavidcs		}
1612250661Sdavidcs	}
1613250661Sdavidcs}
1614250661Sdavidcs
1615250661Sdavidcsstatic void
1616250661Sdavidcsqla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer)
1617250661Sdavidcs{
1618250661Sdavidcs	qla_host_t *ha_peer;
1619250661Sdavidcs
1620250661Sdavidcs	if (ha->peer_dev) {
1621250661Sdavidcs        	if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) {
1622250661Sdavidcs
1623250661Sdavidcs			ha_peer->msg_from_peer = msg_to_peer;
1624250661Sdavidcs		}
1625250661Sdavidcs	}
1626250661Sdavidcs}
1627250661Sdavidcs
1628250661Sdavidcsstatic void
1629250661Sdavidcsqla_error_recovery(void *context, int pending)
1630250661Sdavidcs{
1631250661Sdavidcs	qla_host_t *ha = context;
1632250661Sdavidcs	uint32_t msecs_100 = 100;
1633250661Sdavidcs	struct ifnet *ifp = ha->ifp;
1634250661Sdavidcs
1635250661Sdavidcs        (void)QLA_LOCK(ha, __func__, 0);
1636250661Sdavidcs
1637250661Sdavidcs        ha->flags.stop_rcv = 1;
1638250661Sdavidcs
1639250661Sdavidcs        ql_hw_stop_rcv(ha);
1640250661Sdavidcs
1641250661Sdavidcs        ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1642250661Sdavidcs
1643250661Sdavidcs        QLA_UNLOCK(ha, __func__);
1644250661Sdavidcs
1645250661Sdavidcs	ql_minidump(ha);
1646250661Sdavidcs
1647250661Sdavidcs	if ((ha->pci_func & 0x1) == 0) {
1648250661Sdavidcs
1649250661Sdavidcs		if (!ha->msg_from_peer)
1650250661Sdavidcs			qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
1651250661Sdavidcs
1652250661Sdavidcs		while ((ha->msg_from_peer != QL_PEER_MSG_ACK)  && msecs_100--)
1653250661Sdavidcs			qla_mdelay(__func__, 100);
1654250661Sdavidcs
1655250661Sdavidcs		ha->msg_from_peer = 0;
1656250661Sdavidcs
1657250661Sdavidcs		(void) ql_init_hw(ha);
1658250661Sdavidcs        	qla_free_xmt_bufs(ha);
1659250661Sdavidcs	        qla_free_rcv_bufs(ha);
1660250661Sdavidcs
1661250661Sdavidcs		qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
1662250661Sdavidcs
1663250661Sdavidcs	} else {
1664250661Sdavidcs		if (ha->msg_from_peer == QL_PEER_MSG_RESET) {
1665250661Sdavidcs
1666250661Sdavidcs			ha->msg_from_peer = 0;
1667250661Sdavidcs
1668250661Sdavidcs			qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
1669250661Sdavidcs		} else {
1670250661Sdavidcs			qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
1671250661Sdavidcs		}
1672250661Sdavidcs
1673250661Sdavidcs		while ((ha->msg_from_peer != QL_PEER_MSG_ACK)  && msecs_100--)
1674250661Sdavidcs			qla_mdelay(__func__, 100);
1675250661Sdavidcs		ha->msg_from_peer = 0;
1676250661Sdavidcs
1677250661Sdavidcs		(void) ql_init_hw(ha);
1678250661Sdavidcs        	qla_free_xmt_bufs(ha);
1679250661Sdavidcs	        qla_free_rcv_bufs(ha);
1680250661Sdavidcs	}
1681250661Sdavidcs        (void)QLA_LOCK(ha, __func__, 0);
1682250661Sdavidcs
1683250661Sdavidcs	if (qla_alloc_xmt_bufs(ha) != 0) {
1684250661Sdavidcs        	QLA_UNLOCK(ha, __func__);
1685250661Sdavidcs                return;
1686250661Sdavidcs	}
1687250661Sdavidcs
1688250661Sdavidcs        if (qla_alloc_rcv_bufs(ha) != 0) {
1689250661Sdavidcs        	QLA_UNLOCK(ha, __func__);
1690250661Sdavidcs                return;
1691250661Sdavidcs	}
1692250661Sdavidcs
1693250661Sdavidcs        ha->flags.stop_rcv = 0;
1694250661Sdavidcs        if (ql_init_hw_if(ha) == 0) {
1695250661Sdavidcs                ifp = ha->ifp;
1696250661Sdavidcs                ifp->if_drv_flags |= IFF_DRV_RUNNING;
1697250661Sdavidcs                ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1698250661Sdavidcs                ha->flags.qla_watchdog_pause = 0;
1699250661Sdavidcs        }
1700250661Sdavidcs
1701250661Sdavidcs        QLA_UNLOCK(ha, __func__);
1702250661Sdavidcs}
1703250661Sdavidcs
1704