1331722Seadler/*
2284741Sdavidcs * Copyright (c) 2013-2016 Qlogic Corporation
3250661Sdavidcs * All rights reserved.
4250661Sdavidcs *
5250661Sdavidcs *  Redistribution and use in source and binary forms, with or without
6250661Sdavidcs *  modification, are permitted provided that the following conditions
7250661Sdavidcs *  are met:
8250661Sdavidcs *
9250661Sdavidcs *  1. Redistributions of source code must retain the above copyright
10250661Sdavidcs *     notice, this list of conditions and the following disclaimer.
11250661Sdavidcs *  2. Redistributions in binary form must reproduce the above copyright
12250661Sdavidcs *     notice, this list of conditions and the following disclaimer in the
13250661Sdavidcs *     documentation and/or other materials provided with the distribution.
14250661Sdavidcs *
15250661Sdavidcs *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16250661Sdavidcs *  and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17250661Sdavidcs *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18250661Sdavidcs *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19250661Sdavidcs *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20250661Sdavidcs *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21250661Sdavidcs *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22250661Sdavidcs *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23250661Sdavidcs *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24250661Sdavidcs *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25250661Sdavidcs *  POSSIBILITY OF SUCH DAMAGE.
26250661Sdavidcs */
27250661Sdavidcs
28250661Sdavidcs/*
29250661Sdavidcs * File: ql_os.c
30250661Sdavidcs * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31250661Sdavidcs */
32250661Sdavidcs
33250661Sdavidcs#include <sys/cdefs.h>
34250661Sdavidcs__FBSDID("$FreeBSD: stable/11/sys/dev/qlxgbe/ql_os.c 332052 2018-04-04 23:53:29Z davidcs $");
35250661Sdavidcs
36250661Sdavidcs
37250661Sdavidcs#include "ql_os.h"
38250661Sdavidcs#include "ql_hw.h"
39250661Sdavidcs#include "ql_def.h"
40250661Sdavidcs#include "ql_inline.h"
41250661Sdavidcs#include "ql_ver.h"
42250661Sdavidcs#include "ql_glbl.h"
43250661Sdavidcs#include "ql_dbg.h"
44250661Sdavidcs#include <sys/smp.h>
45250661Sdavidcs
46250661Sdavidcs/*
47250661Sdavidcs * Some PCI Configuration Space Related Defines
48250661Sdavidcs */
49250661Sdavidcs
50250661Sdavidcs#ifndef PCI_VENDOR_QLOGIC
51250661Sdavidcs#define PCI_VENDOR_QLOGIC	0x1077
52250661Sdavidcs#endif
53250661Sdavidcs
54250661Sdavidcs#ifndef PCI_PRODUCT_QLOGIC_ISP8030
55250661Sdavidcs#define PCI_PRODUCT_QLOGIC_ISP8030	0x8030
56250661Sdavidcs#endif
57250661Sdavidcs
58250661Sdavidcs#define PCI_QLOGIC_ISP8030 \
59250661Sdavidcs	((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC)
60250661Sdavidcs
61250661Sdavidcs/*
62250661Sdavidcs * static functions
63250661Sdavidcs */
64250661Sdavidcsstatic int qla_alloc_parent_dma_tag(qla_host_t *ha);
65250661Sdavidcsstatic void qla_free_parent_dma_tag(qla_host_t *ha);
66250661Sdavidcsstatic int qla_alloc_xmt_bufs(qla_host_t *ha);
67250661Sdavidcsstatic void qla_free_xmt_bufs(qla_host_t *ha);
68250661Sdavidcsstatic int qla_alloc_rcv_bufs(qla_host_t *ha);
69250661Sdavidcsstatic void qla_free_rcv_bufs(qla_host_t *ha);
70250661Sdavidcsstatic void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb);
71250661Sdavidcs
72250661Sdavidcsstatic void qla_init_ifnet(device_t dev, qla_host_t *ha);
73250661Sdavidcsstatic int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS);
74250661Sdavidcsstatic void qla_release(qla_host_t *ha);
75250661Sdavidcsstatic void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
76250661Sdavidcs		int error);
77250661Sdavidcsstatic void qla_stop(qla_host_t *ha);
78250661Sdavidcsstatic void qla_get_peer(qla_host_t *ha);
79250661Sdavidcsstatic void qla_error_recovery(void *context, int pending);
80284741Sdavidcsstatic void qla_async_event(void *context, int pending);
81322972Sdavidcsstatic void qla_stats(void *context, int pending);
82313070Sdavidcsstatic int qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
83313070Sdavidcs		uint32_t iscsi_pdu);
84250661Sdavidcs
85250661Sdavidcs/*
86250661Sdavidcs * Hooks to the Operating Systems
87250661Sdavidcs */
88250661Sdavidcsstatic int qla_pci_probe (device_t);
89250661Sdavidcsstatic int qla_pci_attach (device_t);
90250661Sdavidcsstatic int qla_pci_detach (device_t);
91250661Sdavidcs
92250661Sdavidcsstatic void qla_init(void *arg);
93250661Sdavidcsstatic int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
94250661Sdavidcsstatic int qla_media_change(struct ifnet *ifp);
95250661Sdavidcsstatic void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
96250661Sdavidcs
97313070Sdavidcsstatic int qla_transmit(struct ifnet *ifp, struct mbuf  *mp);
98313070Sdavidcsstatic void qla_qflush(struct ifnet *ifp);
99313070Sdavidcsstatic int qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
100313070Sdavidcsstatic void qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
101313070Sdavidcsstatic int qla_create_fp_taskqueues(qla_host_t *ha);
102313070Sdavidcsstatic void qla_destroy_fp_taskqueues(qla_host_t *ha);
103313070Sdavidcsstatic void qla_drain_fp_taskqueues(qla_host_t *ha);
104313070Sdavidcs
105250661Sdavidcsstatic device_method_t qla_pci_methods[] = {
106250661Sdavidcs	/* Device interface */
107250661Sdavidcs	DEVMETHOD(device_probe, qla_pci_probe),
108250661Sdavidcs	DEVMETHOD(device_attach, qla_pci_attach),
109250661Sdavidcs	DEVMETHOD(device_detach, qla_pci_detach),
110250661Sdavidcs	{ 0, 0 }
111250661Sdavidcs};
112250661Sdavidcs
113250661Sdavidcsstatic driver_t qla_pci_driver = {
114250661Sdavidcs	"ql", qla_pci_methods, sizeof (qla_host_t),
115250661Sdavidcs};
116250661Sdavidcs
117250661Sdavidcsstatic devclass_t qla83xx_devclass;
118250661Sdavidcs
119250661SdavidcsDRIVER_MODULE(qla83xx, pci, qla_pci_driver, qla83xx_devclass, 0, 0);
120250661Sdavidcs
121250661SdavidcsMODULE_DEPEND(qla83xx, pci, 1, 1, 1);
122250661SdavidcsMODULE_DEPEND(qla83xx, ether, 1, 1, 1);
123250661Sdavidcs
124250661SdavidcsMALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver");
125250661Sdavidcs
126250661Sdavidcs#define QL_STD_REPLENISH_THRES		0
127250661Sdavidcs#define QL_JUMBO_REPLENISH_THRES	32
128250661Sdavidcs
129250661Sdavidcs
130250661Sdavidcsstatic char dev_str[64];
131284741Sdavidcsstatic char ver_str[64];
132250661Sdavidcs
133250661Sdavidcs/*
134250661Sdavidcs * Name:	qla_pci_probe
135250661Sdavidcs * Function:	Validate the PCI device to be a QLA80XX device
136250661Sdavidcs */
137250661Sdavidcsstatic int
138250661Sdavidcsqla_pci_probe(device_t dev)
139250661Sdavidcs{
140250661Sdavidcs        switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
141250661Sdavidcs        case PCI_QLOGIC_ISP8030:
142250661Sdavidcs		snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
143250661Sdavidcs			"Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function",
144250661Sdavidcs			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
145250661Sdavidcs			QLA_VERSION_BUILD);
146284741Sdavidcs		snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d",
147284741Sdavidcs			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
148284741Sdavidcs			QLA_VERSION_BUILD);
149250661Sdavidcs                device_set_desc(dev, dev_str);
150250661Sdavidcs                break;
151250661Sdavidcs        default:
152250661Sdavidcs                return (ENXIO);
153250661Sdavidcs        }
154250661Sdavidcs
155250661Sdavidcs        if (bootverbose)
156250661Sdavidcs                printf("%s: %s\n ", __func__, dev_str);
157250661Sdavidcs
158250661Sdavidcs        return (BUS_PROBE_DEFAULT);
159250661Sdavidcs}
160250661Sdavidcs
161250661Sdavidcsstatic void
162250661Sdavidcsqla_add_sysctls(qla_host_t *ha)
163250661Sdavidcs{
164250661Sdavidcs        device_t dev = ha->pci_dev;
165250661Sdavidcs
166284741Sdavidcs	SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
167284741Sdavidcs		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
168284741Sdavidcs		OID_AUTO, "version", CTLFLAG_RD,
169284741Sdavidcs		ver_str, 0, "Driver Version");
170284741Sdavidcs
171250661Sdavidcs        SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
172250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
173250661Sdavidcs                OID_AUTO, "fw_version", CTLFLAG_RD,
174273377Shselasky                ha->fw_ver_str, 0, "firmware version");
175250661Sdavidcs
176250661Sdavidcs        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
177250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
178250661Sdavidcs                OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW,
179250661Sdavidcs                (void *)ha, 0,
180250661Sdavidcs                qla_sysctl_get_link_status, "I", "Link Status");
181250661Sdavidcs
182250661Sdavidcs	ha->dbg_level = 0;
183250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
184250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
185250661Sdavidcs                OID_AUTO, "debug", CTLFLAG_RW,
186250661Sdavidcs                &ha->dbg_level, ha->dbg_level, "Debug Level");
187250661Sdavidcs
188324027Sdavidcs	ha->enable_minidump = 1;
189324027Sdavidcs	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
190324027Sdavidcs		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
191324027Sdavidcs		OID_AUTO, "enable_minidump", CTLFLAG_RW,
192324027Sdavidcs		&ha->enable_minidump, ha->enable_minidump,
193330555Sdavidcs		"Minidump retrival prior to error recovery "
194330555Sdavidcs		"is enabled only when this is set");
195324027Sdavidcs
196330555Sdavidcs	ha->enable_driverstate_dump = 1;
197330555Sdavidcs	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
198330555Sdavidcs		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
199330555Sdavidcs		OID_AUTO, "enable_driverstate_dump", CTLFLAG_RW,
200330555Sdavidcs		&ha->enable_driverstate_dump, ha->enable_driverstate_dump,
201330555Sdavidcs		"Driver State retrival prior to error recovery "
202330555Sdavidcs		"is enabled only when this is set");
203330555Sdavidcs
204330555Sdavidcs	ha->enable_error_recovery = 1;
205330555Sdavidcs	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
206330555Sdavidcs		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
207330555Sdavidcs		OID_AUTO, "enable_error_recovery", CTLFLAG_RW,
208330555Sdavidcs		&ha->enable_error_recovery, ha->enable_error_recovery,
209330555Sdavidcs		"when set error recovery is enabled on fatal errors "
210330555Sdavidcs		"otherwise the port is turned offline");
211330555Sdavidcs
212330555Sdavidcs	ha->ms_delay_after_init = 1000;
213330555Sdavidcs	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
214330555Sdavidcs		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
215330555Sdavidcs		OID_AUTO, "ms_delay_after_init", CTLFLAG_RW,
216330555Sdavidcs		&ha->ms_delay_after_init, ha->ms_delay_after_init,
217330555Sdavidcs		"millisecond delay after hw_init");
218330555Sdavidcs
219250661Sdavidcs	ha->std_replenish = QL_STD_REPLENISH_THRES;
220250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
221250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
222250661Sdavidcs                OID_AUTO, "std_replenish", CTLFLAG_RW,
223250661Sdavidcs                &ha->std_replenish, ha->std_replenish,
224250661Sdavidcs                "Threshold for Replenishing Standard Frames");
225250661Sdavidcs
226250661Sdavidcs        SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
227250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
228250661Sdavidcs                OID_AUTO, "ipv4_lro",
229250661Sdavidcs                CTLFLAG_RD, &ha->ipv4_lro,
230250661Sdavidcs                "number of ipv4 lro completions");
231250661Sdavidcs
232250661Sdavidcs        SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
233250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
234250661Sdavidcs                OID_AUTO, "ipv6_lro",
235250661Sdavidcs                CTLFLAG_RD, &ha->ipv6_lro,
236250661Sdavidcs                "number of ipv6 lro completions");
237250661Sdavidcs
238250661Sdavidcs	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
239250661Sdavidcs		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
240250661Sdavidcs		OID_AUTO, "tx_tso_frames",
241250661Sdavidcs		CTLFLAG_RD, &ha->tx_tso_frames,
242250661Sdavidcs		"number of Tx TSO Frames");
243250661Sdavidcs
244250661Sdavidcs	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
245250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
246250661Sdavidcs		OID_AUTO, "hw_vlan_tx_frames",
247250661Sdavidcs		CTLFLAG_RD, &ha->hw_vlan_tx_frames,
248250661Sdavidcs		"number of Tx VLAN Frames");
249250661Sdavidcs
250322972Sdavidcs	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
251322972Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
252322972Sdavidcs		OID_AUTO, "hw_lock_failed",
253322972Sdavidcs		CTLFLAG_RD, &ha->hw_lock_failed,
254322972Sdavidcs		"number of hw_lock failures");
255322972Sdavidcs
256250661Sdavidcs        return;
257250661Sdavidcs}
258250661Sdavidcs
259250661Sdavidcsstatic void
260250661Sdavidcsqla_watchdog(void *arg)
261250661Sdavidcs{
262250661Sdavidcs	qla_host_t *ha = arg;
263250661Sdavidcs	qla_hw_t *hw;
264250661Sdavidcs	struct ifnet *ifp;
265250661Sdavidcs
266250661Sdavidcs	hw = &ha->hw;
267250661Sdavidcs	ifp = ha->ifp;
268250661Sdavidcs
269322972Sdavidcs        if (ha->qla_watchdog_exit) {
270250661Sdavidcs		ha->qla_watchdog_exited = 1;
271250661Sdavidcs		return;
272250661Sdavidcs	}
273250661Sdavidcs	ha->qla_watchdog_exited = 0;
274250661Sdavidcs
275322972Sdavidcs	if (!ha->qla_watchdog_pause) {
276330555Sdavidcs                if (!ha->offline &&
277330555Sdavidcs                        (ql_hw_check_health(ha) || ha->qla_initiate_recovery ||
278330555Sdavidcs                        (ha->msg_from_peer == QL_PEER_MSG_RESET))) {
279284741Sdavidcs
280330555Sdavidcs	        	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
281330555Sdavidcs			ql_update_link_state(ha);
282330555Sdavidcs
283330555Sdavidcs			if (ha->enable_error_recovery) {
284322972Sdavidcs				ha->qla_watchdog_paused = 1;
285322972Sdavidcs				ha->qla_watchdog_pause = 1;
286322972Sdavidcs				ha->err_inject = 0;
287322972Sdavidcs				device_printf(ha->pci_dev,
288322972Sdavidcs					"%s: taskqueue_enqueue(err_task) \n",
289322972Sdavidcs					__func__);
290322972Sdavidcs				taskqueue_enqueue(ha->err_tq, &ha->err_task);
291330555Sdavidcs			} else {
292330555Sdavidcs				if (ifp != NULL)
293330555Sdavidcs					ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
294330555Sdavidcs				ha->offline = 1;
295322972Sdavidcs			}
296330555Sdavidcs			return;
297322972Sdavidcs
298330555Sdavidcs		} else {
299330555Sdavidcs			if (ha->qla_interface_up) {
300322972Sdavidcs
301330555Sdavidcs				ha->watchdog_ticks++;
302322972Sdavidcs
303330555Sdavidcs				if (ha->watchdog_ticks > 1000)
304330555Sdavidcs					ha->watchdog_ticks = 0;
305322972Sdavidcs
306330555Sdavidcs				if (!ha->watchdog_ticks && QL_RUNNING(ifp)) {
307330555Sdavidcs					taskqueue_enqueue(ha->stats_tq,
308330555Sdavidcs						&ha->stats_task);
309330555Sdavidcs				}
310322972Sdavidcs
311330555Sdavidcs				if (ha->async_event) {
312330555Sdavidcs					taskqueue_enqueue(ha->async_event_tq,
313330555Sdavidcs						&ha->async_event_task);
314330555Sdavidcs				}
315284741Sdavidcs
316250661Sdavidcs			}
317250661Sdavidcs			ha->qla_watchdog_paused = 0;
318250661Sdavidcs		}
319250661Sdavidcs	} else {
320250661Sdavidcs		ha->qla_watchdog_paused = 1;
321250661Sdavidcs	}
322250661Sdavidcs
323250661Sdavidcs	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
324250661Sdavidcs		qla_watchdog, ha);
325250661Sdavidcs}
326250661Sdavidcs
327250661Sdavidcs/*
328250661Sdavidcs * Name:	qla_pci_attach
329250661Sdavidcs * Function:	attaches the device to the operating system
330250661Sdavidcs */
331250661Sdavidcsstatic int
332250661Sdavidcsqla_pci_attach(device_t dev)
333250661Sdavidcs{
334250661Sdavidcs	qla_host_t *ha = NULL;
335250661Sdavidcs	uint32_t rsrc_len;
336250661Sdavidcs	int i;
337284741Sdavidcs	uint32_t num_rcvq = 0;
338250661Sdavidcs
339250661Sdavidcs        if ((ha = device_get_softc(dev)) == NULL) {
340250661Sdavidcs                device_printf(dev, "cannot get softc\n");
341250661Sdavidcs                return (ENOMEM);
342250661Sdavidcs        }
343250661Sdavidcs
344250661Sdavidcs        memset(ha, 0, sizeof (qla_host_t));
345250661Sdavidcs
346250661Sdavidcs        if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) {
347250661Sdavidcs                device_printf(dev, "device is not ISP8030\n");
348250661Sdavidcs                return (ENXIO);
349250661Sdavidcs	}
350250661Sdavidcs
351305487Sdavidcs        ha->pci_func = pci_get_function(dev) & 0x1;
352250661Sdavidcs
353250661Sdavidcs        ha->pci_dev = dev;
354250661Sdavidcs
355250661Sdavidcs	pci_enable_busmaster(dev);
356250661Sdavidcs
357250661Sdavidcs	ha->reg_rid = PCIR_BAR(0);
358250661Sdavidcs	ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
359250661Sdavidcs				RF_ACTIVE);
360250661Sdavidcs
361250661Sdavidcs        if (ha->pci_reg == NULL) {
362250661Sdavidcs                device_printf(dev, "unable to map any ports\n");
363250661Sdavidcs                goto qla_pci_attach_err;
364250661Sdavidcs        }
365250661Sdavidcs
366250661Sdavidcs	rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
367250661Sdavidcs					ha->reg_rid);
368250661Sdavidcs
369313070Sdavidcs	mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
370330555Sdavidcs	mtx_init(&ha->sp_log_lock, "qla83xx_sp_log_lock", MTX_NETWORK_LOCK, MTX_DEF);
371322972Sdavidcs	ha->flags.lock_init = 1;
372250661Sdavidcs
373250661Sdavidcs	qla_add_sysctls(ha);
374250661Sdavidcs
375322972Sdavidcs	ha->hw.num_sds_rings = MAX_SDS_RINGS;
376322972Sdavidcs	ha->hw.num_rds_rings = MAX_RDS_RINGS;
377322972Sdavidcs	ha->hw.num_tx_rings = NUM_TX_RINGS;
378250661Sdavidcs
379250661Sdavidcs	ha->reg_rid1 = PCIR_BAR(2);
380250661Sdavidcs	ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
381250661Sdavidcs			&ha->reg_rid1, RF_ACTIVE);
382250661Sdavidcs
383250661Sdavidcs	ha->msix_count = pci_msix_count(dev);
384250661Sdavidcs
385322972Sdavidcs	if (ha->msix_count < 1 ) {
386250661Sdavidcs		device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
387250661Sdavidcs			ha->msix_count);
388250661Sdavidcs		goto qla_pci_attach_err;
389250661Sdavidcs	}
390250661Sdavidcs
391322972Sdavidcs	if (ha->msix_count < (ha->hw.num_sds_rings + 1)) {
392322972Sdavidcs		ha->hw.num_sds_rings = ha->msix_count - 1;
393322972Sdavidcs	}
394322972Sdavidcs
395250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
396313070Sdavidcs		" msix_count 0x%x pci_reg %p pci_reg1 %p\n", __func__, ha,
397313070Sdavidcs		ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg,
398313070Sdavidcs		ha->pci_reg1));
399250661Sdavidcs
400284741Sdavidcs        /* initialize hardware */
401284741Sdavidcs        if (ql_init_hw(ha)) {
402284741Sdavidcs                device_printf(dev, "%s: ql_init_hw failed\n", __func__);
403284741Sdavidcs                goto qla_pci_attach_err;
404284741Sdavidcs        }
405284741Sdavidcs
406284741Sdavidcs        device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__,
407284741Sdavidcs                ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
408284741Sdavidcs                ha->fw_ver_build);
409284741Sdavidcs        snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d",
410284741Sdavidcs                        ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
411284741Sdavidcs                        ha->fw_ver_build);
412284741Sdavidcs
413284741Sdavidcs        if (qla_get_nic_partition(ha, NULL, &num_rcvq)) {
414284741Sdavidcs                device_printf(dev, "%s: qla_get_nic_partition failed\n",
415284741Sdavidcs                        __func__);
416284741Sdavidcs                goto qla_pci_attach_err;
417284741Sdavidcs        }
418284741Sdavidcs        device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
419313070Sdavidcs                " msix_count 0x%x pci_reg %p pci_reg1 %p num_rcvq = %d\n",
420313070Sdavidcs		__func__, ha, ha->pci_func, rsrc_len, ha->msix_count,
421313070Sdavidcs		ha->pci_reg, ha->pci_reg1, num_rcvq);
422284741Sdavidcs
423284741Sdavidcs        if ((ha->msix_count  < 64) || (num_rcvq != 32)) {
424322972Sdavidcs		if (ha->hw.num_sds_rings > 15) {
425322972Sdavidcs                	ha->hw.num_sds_rings = 15;
426322972Sdavidcs		}
427284741Sdavidcs        }
428322972Sdavidcs
429284741Sdavidcs	ha->hw.num_rds_rings = ha->hw.num_sds_rings;
430322972Sdavidcs	ha->hw.num_tx_rings = ha->hw.num_sds_rings;
431284741Sdavidcs
432322972Sdavidcs#ifdef QL_ENABLE_ISCSI_TLV
433322972Sdavidcs	ha->hw.num_tx_rings = ha->hw.num_sds_rings * 2;
434322972Sdavidcs#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
435322972Sdavidcs
436322972Sdavidcs	ql_hw_add_sysctls(ha);
437322972Sdavidcs
438250661Sdavidcs	ha->msix_count = ha->hw.num_sds_rings + 1;
439250661Sdavidcs
440250661Sdavidcs	if (pci_alloc_msix(dev, &ha->msix_count)) {
441250661Sdavidcs		device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
442250661Sdavidcs			ha->msix_count);
443250661Sdavidcs		ha->msix_count = 0;
444250661Sdavidcs		goto qla_pci_attach_err;
445250661Sdavidcs	}
446250661Sdavidcs
447250661Sdavidcs	ha->mbx_irq_rid = 1;
448250661Sdavidcs	ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
449250661Sdavidcs				&ha->mbx_irq_rid,
450250661Sdavidcs				(RF_ACTIVE | RF_SHAREABLE));
451250661Sdavidcs	if (ha->mbx_irq == NULL) {
452250661Sdavidcs		device_printf(dev, "could not allocate mbx interrupt\n");
453250661Sdavidcs		goto qla_pci_attach_err;
454250661Sdavidcs	}
455250661Sdavidcs	if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE),
456250661Sdavidcs		NULL, ql_mbx_isr, ha, &ha->mbx_handle)) {
457250661Sdavidcs		device_printf(dev, "could not setup mbx interrupt\n");
458250661Sdavidcs		goto qla_pci_attach_err;
459250661Sdavidcs	}
460250661Sdavidcs
461250661Sdavidcs	for (i = 0; i < ha->hw.num_sds_rings; i++) {
462250661Sdavidcs		ha->irq_vec[i].sds_idx = i;
463250661Sdavidcs                ha->irq_vec[i].ha = ha;
464250661Sdavidcs                ha->irq_vec[i].irq_rid = 2 + i;
465250661Sdavidcs
466250661Sdavidcs		ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
467250661Sdavidcs				&ha->irq_vec[i].irq_rid,
468250661Sdavidcs				(RF_ACTIVE | RF_SHAREABLE));
469250661Sdavidcs
470250661Sdavidcs		if (ha->irq_vec[i].irq == NULL) {
471250661Sdavidcs			device_printf(dev, "could not allocate interrupt\n");
472250661Sdavidcs			goto qla_pci_attach_err;
473250661Sdavidcs		}
474250661Sdavidcs		if (bus_setup_intr(dev, ha->irq_vec[i].irq,
475250661Sdavidcs			(INTR_TYPE_NET | INTR_MPSAFE),
476250661Sdavidcs			NULL, ql_isr, &ha->irq_vec[i],
477250661Sdavidcs			&ha->irq_vec[i].handle)) {
478250661Sdavidcs			device_printf(dev, "could not setup interrupt\n");
479250661Sdavidcs			goto qla_pci_attach_err;
480250661Sdavidcs		}
481313070Sdavidcs
482313070Sdavidcs		ha->tx_fp[i].ha = ha;
483313070Sdavidcs		ha->tx_fp[i].txr_idx = i;
484313070Sdavidcs
485313070Sdavidcs		if (qla_alloc_tx_br(ha, &ha->tx_fp[i])) {
486313070Sdavidcs			device_printf(dev, "%s: could not allocate tx_br[%d]\n",
487313070Sdavidcs				__func__, i);
488313070Sdavidcs			goto qla_pci_attach_err;
489313070Sdavidcs		}
490250661Sdavidcs	}
491250661Sdavidcs
492313070Sdavidcs	if (qla_create_fp_taskqueues(ha) != 0)
493313070Sdavidcs		goto qla_pci_attach_err;
494313070Sdavidcs
495250661Sdavidcs	printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus,
496250661Sdavidcs		ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count);
497250661Sdavidcs
498250661Sdavidcs	ql_read_mac_addr(ha);
499250661Sdavidcs
500250661Sdavidcs	/* allocate parent dma tag */
501250661Sdavidcs	if (qla_alloc_parent_dma_tag(ha)) {
502250661Sdavidcs		device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n",
503250661Sdavidcs			__func__);
504250661Sdavidcs		goto qla_pci_attach_err;
505250661Sdavidcs	}
506250661Sdavidcs
507250661Sdavidcs	/* alloc all dma buffers */
508250661Sdavidcs	if (ql_alloc_dma(ha)) {
509250661Sdavidcs		device_printf(dev, "%s: ql_alloc_dma failed\n", __func__);
510250661Sdavidcs		goto qla_pci_attach_err;
511250661Sdavidcs	}
512250661Sdavidcs	qla_get_peer(ha);
513250661Sdavidcs
514305487Sdavidcs	if (ql_minidump_init(ha) != 0) {
515305487Sdavidcs		device_printf(dev, "%s: ql_minidump_init failed\n", __func__);
516305487Sdavidcs		goto qla_pci_attach_err;
517305487Sdavidcs	}
518324762Sdavidcs	ql_alloc_drvr_state_buffer(ha);
519330555Sdavidcs	ql_alloc_sp_log_buffer(ha);
520250661Sdavidcs	/* create the o.s ethernet interface */
521250661Sdavidcs	qla_init_ifnet(dev, ha);
522250661Sdavidcs
523250661Sdavidcs	ha->flags.qla_watchdog_active = 1;
524322972Sdavidcs	ha->qla_watchdog_pause = 0;
525250661Sdavidcs
526305487Sdavidcs	callout_init(&ha->tx_callout, TRUE);
527250661Sdavidcs	ha->flags.qla_callout_init = 1;
528250661Sdavidcs
529250661Sdavidcs	/* create ioctl device interface */
530250661Sdavidcs	if (ql_make_cdev(ha)) {
531250661Sdavidcs		device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
532250661Sdavidcs		goto qla_pci_attach_err;
533250661Sdavidcs	}
534250661Sdavidcs
535250661Sdavidcs	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
536250661Sdavidcs		qla_watchdog, ha);
537250661Sdavidcs
538250661Sdavidcs	TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha);
539307524Sdavidcs	ha->err_tq = taskqueue_create("qla_errq", M_NOWAIT,
540250661Sdavidcs			taskqueue_thread_enqueue, &ha->err_tq);
541250661Sdavidcs	taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq",
542250661Sdavidcs		device_get_nameunit(ha->pci_dev));
543250661Sdavidcs
544284741Sdavidcs        TASK_INIT(&ha->async_event_task, 0, qla_async_event, ha);
545307524Sdavidcs        ha->async_event_tq = taskqueue_create("qla_asyncq", M_NOWAIT,
546284741Sdavidcs                        taskqueue_thread_enqueue, &ha->async_event_tq);
547284741Sdavidcs        taskqueue_start_threads(&ha->async_event_tq, 1, PI_NET, "%s asyncq",
548284741Sdavidcs                device_get_nameunit(ha->pci_dev));
549284741Sdavidcs
550322972Sdavidcs        TASK_INIT(&ha->stats_task, 0, qla_stats, ha);
551322972Sdavidcs        ha->stats_tq = taskqueue_create("qla_statsq", M_NOWAIT,
552322972Sdavidcs                        taskqueue_thread_enqueue, &ha->stats_tq);
553322972Sdavidcs        taskqueue_start_threads(&ha->stats_tq, 1, PI_NET, "%s taskq",
554322972Sdavidcs                device_get_nameunit(ha->pci_dev));
555322972Sdavidcs
556250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__));
557250661Sdavidcs        return (0);
558250661Sdavidcs
559250661Sdavidcsqla_pci_attach_err:
560250661Sdavidcs
561250661Sdavidcs	qla_release(ha);
562250661Sdavidcs
563322972Sdavidcs	if (ha->flags.lock_init) {
564322972Sdavidcs		mtx_destroy(&ha->hw_lock);
565330555Sdavidcs		mtx_destroy(&ha->sp_log_lock);
566322972Sdavidcs	}
567322972Sdavidcs
568250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__));
569250661Sdavidcs        return (ENXIO);
570250661Sdavidcs}
571250661Sdavidcs
572250661Sdavidcs/*
573250661Sdavidcs * Name:	qla_pci_detach
574250661Sdavidcs * Function:	Unhooks the device from the operating system
575250661Sdavidcs */
576250661Sdavidcsstatic int
577250661Sdavidcsqla_pci_detach(device_t dev)
578250661Sdavidcs{
579250661Sdavidcs	qla_host_t *ha = NULL;
580250661Sdavidcs	struct ifnet *ifp;
581250661Sdavidcs
582250661Sdavidcs
583250661Sdavidcs        if ((ha = device_get_softc(dev)) == NULL) {
584250661Sdavidcs                device_printf(dev, "cannot get softc\n");
585250661Sdavidcs                return (ENOMEM);
586250661Sdavidcs        }
587250661Sdavidcs
588322972Sdavidcs	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
589322972Sdavidcs
590250661Sdavidcs	ifp = ha->ifp;
591250661Sdavidcs
592322972Sdavidcs	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
593322972Sdavidcs	QLA_LOCK(ha, __func__, -1, 0);
594322972Sdavidcs
595322972Sdavidcs	ha->qla_detach_active = 1;
596250661Sdavidcs	qla_stop(ha);
597250661Sdavidcs
598250661Sdavidcs	qla_release(ha);
599250661Sdavidcs
600322972Sdavidcs	QLA_UNLOCK(ha, __func__);
601322972Sdavidcs
602322972Sdavidcs	if (ha->flags.lock_init) {
603322972Sdavidcs		mtx_destroy(&ha->hw_lock);
604330555Sdavidcs		mtx_destroy(&ha->sp_log_lock);
605322972Sdavidcs	}
606322972Sdavidcs
607250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
608250661Sdavidcs
609250661Sdavidcs        return (0);
610250661Sdavidcs}
611250661Sdavidcs
612250661Sdavidcs/*
613250661Sdavidcs * SYSCTL Related Callbacks
614250661Sdavidcs */
615250661Sdavidcsstatic int
616250661Sdavidcsqla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS)
617250661Sdavidcs{
618250661Sdavidcs	int err, ret = 0;
619250661Sdavidcs	qla_host_t *ha;
620250661Sdavidcs
621250661Sdavidcs	err = sysctl_handle_int(oidp, &ret, 0, req);
622250661Sdavidcs
623250661Sdavidcs	if (err || !req->newptr)
624250661Sdavidcs		return (err);
625250661Sdavidcs
626250661Sdavidcs	if (ret == 1) {
627250661Sdavidcs		ha = (qla_host_t *)arg1;
628250661Sdavidcs		ql_hw_link_status(ha);
629250661Sdavidcs	}
630250661Sdavidcs	return (err);
631250661Sdavidcs}
632250661Sdavidcs
633250661Sdavidcs/*
634250661Sdavidcs * Name:	qla_release
635250661Sdavidcs * Function:	Releases the resources allocated for the device
636250661Sdavidcs */
637250661Sdavidcsstatic void
638250661Sdavidcsqla_release(qla_host_t *ha)
639250661Sdavidcs{
640250661Sdavidcs	device_t dev;
641250661Sdavidcs	int i;
642250661Sdavidcs
643250661Sdavidcs	dev = ha->pci_dev;
644250661Sdavidcs
645284741Sdavidcs        if (ha->async_event_tq) {
646330555Sdavidcs                taskqueue_drain_all(ha->async_event_tq);
647284741Sdavidcs                taskqueue_free(ha->async_event_tq);
648284741Sdavidcs        }
649284741Sdavidcs
650250661Sdavidcs	if (ha->err_tq) {
651330555Sdavidcs		taskqueue_drain_all(ha->err_tq);
652250661Sdavidcs		taskqueue_free(ha->err_tq);
653250661Sdavidcs	}
654250661Sdavidcs
655322972Sdavidcs	if (ha->stats_tq) {
656330555Sdavidcs		taskqueue_drain_all(ha->stats_tq);
657322972Sdavidcs		taskqueue_free(ha->stats_tq);
658322972Sdavidcs	}
659322972Sdavidcs
660250661Sdavidcs	ql_del_cdev(ha);
661250661Sdavidcs
662250661Sdavidcs	if (ha->flags.qla_watchdog_active) {
663322972Sdavidcs		ha->qla_watchdog_exit = 1;
664250661Sdavidcs
665250661Sdavidcs		while (ha->qla_watchdog_exited == 0)
666250661Sdavidcs			qla_mdelay(__func__, 1);
667250661Sdavidcs	}
668250661Sdavidcs
669250661Sdavidcs	if (ha->flags.qla_callout_init)
670250661Sdavidcs		callout_stop(&ha->tx_callout);
671250661Sdavidcs
672250661Sdavidcs	if (ha->ifp != NULL)
673250661Sdavidcs		ether_ifdetach(ha->ifp);
674250661Sdavidcs
675324762Sdavidcs	ql_free_drvr_state_buffer(ha);
676330555Sdavidcs	ql_free_sp_log_buffer(ha);
677250661Sdavidcs	ql_free_dma(ha);
678250661Sdavidcs	qla_free_parent_dma_tag(ha);
679250661Sdavidcs
680250661Sdavidcs	if (ha->mbx_handle)
681250661Sdavidcs		(void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle);
682250661Sdavidcs
683250661Sdavidcs	if (ha->mbx_irq)
684250661Sdavidcs		(void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid,
685250661Sdavidcs				ha->mbx_irq);
686250661Sdavidcs
687250661Sdavidcs	for (i = 0; i < ha->hw.num_sds_rings; i++) {
688250661Sdavidcs
689250661Sdavidcs		if (ha->irq_vec[i].handle) {
690250661Sdavidcs			(void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
691250661Sdavidcs					ha->irq_vec[i].handle);
692250661Sdavidcs		}
693250661Sdavidcs
694250661Sdavidcs		if (ha->irq_vec[i].irq) {
695250661Sdavidcs			(void)bus_release_resource(dev, SYS_RES_IRQ,
696250661Sdavidcs				ha->irq_vec[i].irq_rid,
697250661Sdavidcs				ha->irq_vec[i].irq);
698250661Sdavidcs		}
699313070Sdavidcs
700313070Sdavidcs		qla_free_tx_br(ha, &ha->tx_fp[i]);
701250661Sdavidcs	}
702313070Sdavidcs	qla_destroy_fp_taskqueues(ha);
703250661Sdavidcs
704250661Sdavidcs	if (ha->msix_count)
705250661Sdavidcs		pci_release_msi(dev);
706250661Sdavidcs
707250661Sdavidcs        if (ha->pci_reg)
708250661Sdavidcs                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
709250661Sdavidcs				ha->pci_reg);
710250661Sdavidcs
711250661Sdavidcs        if (ha->pci_reg1)
712250661Sdavidcs                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1,
713250661Sdavidcs				ha->pci_reg1);
714322972Sdavidcs
715322972Sdavidcs	return;
716250661Sdavidcs}
717250661Sdavidcs
718250661Sdavidcs/*
719250661Sdavidcs * DMA Related Functions
720250661Sdavidcs */
721250661Sdavidcs
722250661Sdavidcsstatic void
723250661Sdavidcsqla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
724250661Sdavidcs{
725250661Sdavidcs        *((bus_addr_t *)arg) = 0;
726250661Sdavidcs
727250661Sdavidcs        if (error) {
728250661Sdavidcs                printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
729250661Sdavidcs                return;
730250661Sdavidcs	}
731250661Sdavidcs
732250661Sdavidcs        *((bus_addr_t *)arg) = segs[0].ds_addr;
733250661Sdavidcs
734250661Sdavidcs	return;
735250661Sdavidcs}
736250661Sdavidcs
737250661Sdavidcsint
738250661Sdavidcsql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
739250661Sdavidcs{
740250661Sdavidcs        int             ret = 0;
741250661Sdavidcs        device_t        dev;
742250661Sdavidcs        bus_addr_t      b_addr;
743250661Sdavidcs
744250661Sdavidcs        dev = ha->pci_dev;
745250661Sdavidcs
746250661Sdavidcs        QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
747250661Sdavidcs
748250661Sdavidcs        ret = bus_dma_tag_create(
749250661Sdavidcs                        ha->parent_tag,/* parent */
750250661Sdavidcs                        dma_buf->alignment,
751250661Sdavidcs                        ((bus_size_t)(1ULL << 32)),/* boundary */
752250661Sdavidcs                        BUS_SPACE_MAXADDR,      /* lowaddr */
753250661Sdavidcs                        BUS_SPACE_MAXADDR,      /* highaddr */
754250661Sdavidcs                        NULL, NULL,             /* filter, filterarg */
755250661Sdavidcs                        dma_buf->size,          /* maxsize */
756250661Sdavidcs                        1,                      /* nsegments */
757250661Sdavidcs                        dma_buf->size,          /* maxsegsize */
758250661Sdavidcs                        0,                      /* flags */
759250661Sdavidcs                        NULL, NULL,             /* lockfunc, lockarg */
760250661Sdavidcs                        &dma_buf->dma_tag);
761250661Sdavidcs
762250661Sdavidcs        if (ret) {
763250661Sdavidcs                device_printf(dev, "%s: could not create dma tag\n", __func__);
764250661Sdavidcs                goto ql_alloc_dmabuf_exit;
765250661Sdavidcs        }
766250661Sdavidcs        ret = bus_dmamem_alloc(dma_buf->dma_tag,
767250661Sdavidcs                        (void **)&dma_buf->dma_b,
768250661Sdavidcs                        (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
769250661Sdavidcs                        &dma_buf->dma_map);
770250661Sdavidcs        if (ret) {
771250661Sdavidcs                bus_dma_tag_destroy(dma_buf->dma_tag);
772250661Sdavidcs                device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
773250661Sdavidcs                goto ql_alloc_dmabuf_exit;
774250661Sdavidcs        }
775250661Sdavidcs
776250661Sdavidcs        ret = bus_dmamap_load(dma_buf->dma_tag,
777250661Sdavidcs                        dma_buf->dma_map,
778250661Sdavidcs                        dma_buf->dma_b,
779250661Sdavidcs                        dma_buf->size,
780250661Sdavidcs                        qla_dmamap_callback,
781250661Sdavidcs                        &b_addr, BUS_DMA_NOWAIT);
782250661Sdavidcs
783250661Sdavidcs        if (ret || !b_addr) {
784250661Sdavidcs                bus_dma_tag_destroy(dma_buf->dma_tag);
785250661Sdavidcs                bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
786250661Sdavidcs                        dma_buf->dma_map);
787250661Sdavidcs                ret = -1;
788250661Sdavidcs                goto ql_alloc_dmabuf_exit;
789250661Sdavidcs        }
790250661Sdavidcs
791250661Sdavidcs        dma_buf->dma_addr = b_addr;
792250661Sdavidcs
793250661Sdavidcsql_alloc_dmabuf_exit:
794250661Sdavidcs        QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
795250661Sdavidcs                __func__, ret, (void *)dma_buf->dma_tag,
796250661Sdavidcs                (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
797250661Sdavidcs		dma_buf->size));
798250661Sdavidcs
799250661Sdavidcs        return ret;
800250661Sdavidcs}
801250661Sdavidcs
802250661Sdavidcsvoid
803250661Sdavidcsql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
804250661Sdavidcs{
805314789Sdavidcs	bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
806250661Sdavidcs        bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
807250661Sdavidcs        bus_dma_tag_destroy(dma_buf->dma_tag);
808250661Sdavidcs}
809250661Sdavidcs
810250661Sdavidcsstatic int
811250661Sdavidcsqla_alloc_parent_dma_tag(qla_host_t *ha)
812250661Sdavidcs{
813250661Sdavidcs	int		ret;
814250661Sdavidcs	device_t	dev;
815250661Sdavidcs
816250661Sdavidcs	dev = ha->pci_dev;
817250661Sdavidcs
818250661Sdavidcs        /*
819250661Sdavidcs         * Allocate parent DMA Tag
820250661Sdavidcs         */
821250661Sdavidcs        ret = bus_dma_tag_create(
822250661Sdavidcs                        bus_get_dma_tag(dev),   /* parent */
823250661Sdavidcs                        1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
824250661Sdavidcs                        BUS_SPACE_MAXADDR,      /* lowaddr */
825250661Sdavidcs                        BUS_SPACE_MAXADDR,      /* highaddr */
826250661Sdavidcs                        NULL, NULL,             /* filter, filterarg */
827250661Sdavidcs                        BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
828250661Sdavidcs                        0,                      /* nsegments */
829250661Sdavidcs                        BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
830250661Sdavidcs                        0,                      /* flags */
831250661Sdavidcs                        NULL, NULL,             /* lockfunc, lockarg */
832250661Sdavidcs                        &ha->parent_tag);
833250661Sdavidcs
834250661Sdavidcs        if (ret) {
835250661Sdavidcs                device_printf(dev, "%s: could not create parent dma tag\n",
836250661Sdavidcs                        __func__);
837250661Sdavidcs		return (-1);
838250661Sdavidcs        }
839250661Sdavidcs
840250661Sdavidcs        ha->flags.parent_tag = 1;
841250661Sdavidcs
842250661Sdavidcs	return (0);
843250661Sdavidcs}
844250661Sdavidcs
845250661Sdavidcsstatic void
846250661Sdavidcsqla_free_parent_dma_tag(qla_host_t *ha)
847250661Sdavidcs{
848250661Sdavidcs        if (ha->flags.parent_tag) {
849250661Sdavidcs                bus_dma_tag_destroy(ha->parent_tag);
850250661Sdavidcs                ha->flags.parent_tag = 0;
851250661Sdavidcs        }
852250661Sdavidcs}
853250661Sdavidcs
854250661Sdavidcs/*
855250661Sdavidcs * Name: qla_init_ifnet
856250661Sdavidcs * Function: Creates the Network Device Interface and Registers it with the O.S
857250661Sdavidcs */
858250661Sdavidcs
859250661Sdavidcsstatic void
860250661Sdavidcsqla_init_ifnet(device_t dev, qla_host_t *ha)
861250661Sdavidcs{
862250661Sdavidcs	struct ifnet *ifp;
863250661Sdavidcs
864250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
865250661Sdavidcs
866250661Sdavidcs	ifp = ha->ifp = if_alloc(IFT_ETHER);
867250661Sdavidcs
868250661Sdavidcs	if (ifp == NULL)
869250661Sdavidcs		panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
870250661Sdavidcs
871250661Sdavidcs	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
872250661Sdavidcs
873263102Sglebius	ifp->if_baudrate = IF_Gbps(10);
874250661Sdavidcs	ifp->if_capabilities = IFCAP_LINKSTATE;
875305487Sdavidcs	ifp->if_mtu = ETHERMTU;
876250661Sdavidcs
877250661Sdavidcs	ifp->if_init = qla_init;
878250661Sdavidcs	ifp->if_softc = ha;
879250661Sdavidcs	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
880250661Sdavidcs	ifp->if_ioctl = qla_ioctl;
881250661Sdavidcs
882313070Sdavidcs	ifp->if_transmit = qla_transmit;
883313070Sdavidcs	ifp->if_qflush = qla_qflush;
884313070Sdavidcs
885250661Sdavidcs	IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha));
886250661Sdavidcs	ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha);
887250661Sdavidcs	IFQ_SET_READY(&ifp->if_snd);
888250661Sdavidcs
889250661Sdavidcs	ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
890250661Sdavidcs
891250661Sdavidcs	ether_ifattach(ifp, qla_get_mac_addr(ha));
892250661Sdavidcs
893313070Sdavidcs	ifp->if_capabilities |= IFCAP_HWCSUM |
894250661Sdavidcs				IFCAP_TSO4 |
895330555Sdavidcs				IFCAP_TSO6 |
896313070Sdavidcs				IFCAP_JUMBO_MTU |
897313070Sdavidcs				IFCAP_VLAN_HWTAGGING |
898313070Sdavidcs				IFCAP_VLAN_MTU |
899313070Sdavidcs				IFCAP_VLAN_HWTSO |
900313070Sdavidcs				IFCAP_LRO;
901250661Sdavidcs
902250661Sdavidcs	ifp->if_capenable = ifp->if_capabilities;
903250661Sdavidcs
904270856Sglebius	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
905250661Sdavidcs
906250661Sdavidcs	ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status);
907250661Sdavidcs
908250661Sdavidcs	ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0,
909250661Sdavidcs		NULL);
910250661Sdavidcs	ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
911250661Sdavidcs
912250661Sdavidcs	ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
913250661Sdavidcs
914250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
915250661Sdavidcs
916250661Sdavidcs	return;
917250661Sdavidcs}
918250661Sdavidcs
919250661Sdavidcsstatic void
920250661Sdavidcsqla_init_locked(qla_host_t *ha)
921250661Sdavidcs{
922250661Sdavidcs	struct ifnet *ifp = ha->ifp;
923250661Sdavidcs
924330555Sdavidcs	ql_sp_log(ha, 14, 0, 0, 0, 0, 0, 0);
925330555Sdavidcs
926250661Sdavidcs	qla_stop(ha);
927250661Sdavidcs
928250661Sdavidcs	if (qla_alloc_xmt_bufs(ha) != 0)
929250661Sdavidcs		return;
930250661Sdavidcs
931284741Sdavidcs	qla_confirm_9kb_enable(ha);
932284741Sdavidcs
933250661Sdavidcs	if (qla_alloc_rcv_bufs(ha) != 0)
934250661Sdavidcs		return;
935250661Sdavidcs
936250661Sdavidcs	bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN);
937250661Sdavidcs
938250661Sdavidcs	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO;
939322972Sdavidcs	ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6;
940250661Sdavidcs
941322972Sdavidcs	ha->stop_rcv = 0;
942250661Sdavidcs 	if (ql_init_hw_if(ha) == 0) {
943250661Sdavidcs		ifp = ha->ifp;
944250661Sdavidcs		ifp->if_drv_flags |= IFF_DRV_RUNNING;
945250661Sdavidcs		ha->hw_vlan_tx_frames = 0;
946250661Sdavidcs		ha->tx_tso_frames = 0;
947322972Sdavidcs		ha->qla_interface_up = 1;
948322972Sdavidcs		ql_update_link_state(ha);
949330555Sdavidcs	} else {
950330555Sdavidcs		if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_IF_START_FAILURE)
951330555Sdavidcs			ha->hw.sp_log_stop = -1;
952250661Sdavidcs	}
953250661Sdavidcs
954330555Sdavidcs	ha->qla_watchdog_pause = 0;
955330555Sdavidcs
956250661Sdavidcs	return;
957250661Sdavidcs}
958250661Sdavidcs
959250661Sdavidcsstatic void
960250661Sdavidcsqla_init(void *arg)
961250661Sdavidcs{
962250661Sdavidcs	qla_host_t *ha;
963250661Sdavidcs
964250661Sdavidcs	ha = (qla_host_t *)arg;
965250661Sdavidcs
966250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
967250661Sdavidcs
968322972Sdavidcs	if (QLA_LOCK(ha, __func__, -1, 0) != 0)
969322972Sdavidcs		return;
970322972Sdavidcs
971250661Sdavidcs	qla_init_locked(ha);
972250661Sdavidcs
973322972Sdavidcs	QLA_UNLOCK(ha, __func__);
974322972Sdavidcs
975250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
976250661Sdavidcs}
977250661Sdavidcs
978250661Sdavidcsstatic int
979250661Sdavidcsqla_set_multi(qla_host_t *ha, uint32_t add_multi)
980250661Sdavidcs{
981250661Sdavidcs	uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
982250661Sdavidcs	struct ifmultiaddr *ifma;
983250661Sdavidcs	int mcnt = 0;
984250661Sdavidcs	struct ifnet *ifp = ha->ifp;
985250661Sdavidcs	int ret = 0;
986250661Sdavidcs
987250661Sdavidcs	if_maddr_rlock(ifp);
988250661Sdavidcs
989250661Sdavidcs	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
990250661Sdavidcs
991250661Sdavidcs		if (ifma->ifma_addr->sa_family != AF_LINK)
992250661Sdavidcs			continue;
993250661Sdavidcs
994250661Sdavidcs		if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
995250661Sdavidcs			break;
996250661Sdavidcs
997250661Sdavidcs		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
998250661Sdavidcs			&mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
999250661Sdavidcs
1000250661Sdavidcs		mcnt++;
1001250661Sdavidcs	}
1002250661Sdavidcs
1003250661Sdavidcs	if_maddr_runlock(ifp);
1004250661Sdavidcs
1005322972Sdavidcs	if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1006322972Sdavidcs		QLA_LOCK_NO_SLEEP) != 0)
1007322972Sdavidcs		return (-1);
1008322972Sdavidcs
1009330555Sdavidcs	ql_sp_log(ha, 12, 4, ifp->if_drv_flags,
1010330555Sdavidcs		(ifp->if_drv_flags & IFF_DRV_RUNNING),
1011330555Sdavidcs		add_multi, (uint32_t)mcnt, 0);
1012330555Sdavidcs
1013318306Sdavidcs	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1014324324Sdavidcs
1015324324Sdavidcs		if (!add_multi) {
1016324324Sdavidcs			ret = qla_hw_del_all_mcast(ha);
1017324324Sdavidcs
1018324324Sdavidcs			if (ret)
1019324324Sdavidcs				device_printf(ha->pci_dev,
1020324324Sdavidcs					"%s: qla_hw_del_all_mcast() failed\n",
1021324324Sdavidcs				__func__);
1022324324Sdavidcs		}
1023324324Sdavidcs
1024324324Sdavidcs		if (!ret)
1025324324Sdavidcs			ret = ql_hw_set_multi(ha, mta, mcnt, 1);
1026324324Sdavidcs
1027318306Sdavidcs	}
1028250661Sdavidcs
1029322972Sdavidcs	QLA_UNLOCK(ha, __func__);
1030322972Sdavidcs
1031250661Sdavidcs	return (ret);
1032250661Sdavidcs}
1033250661Sdavidcs
1034250661Sdavidcsstatic int
1035250661Sdavidcsqla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1036250661Sdavidcs{
1037250661Sdavidcs	int ret = 0;
1038250661Sdavidcs	struct ifreq *ifr = (struct ifreq *)data;
1039250661Sdavidcs	struct ifaddr *ifa = (struct ifaddr *)data;
1040250661Sdavidcs	qla_host_t *ha;
1041250661Sdavidcs
1042250661Sdavidcs	ha = (qla_host_t *)ifp->if_softc;
1043330555Sdavidcs	if (ha->offline || ha->qla_initiate_recovery)
1044330555Sdavidcs		return (ret);
1045250661Sdavidcs
1046250661Sdavidcs	switch (cmd) {
1047250661Sdavidcs	case SIOCSIFADDR:
1048250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
1049250661Sdavidcs			__func__, cmd));
1050250661Sdavidcs
1051250661Sdavidcs		if (ifa->ifa_addr->sa_family == AF_INET) {
1052322972Sdavidcs
1053322972Sdavidcs			ret = QLA_LOCK(ha, __func__,
1054322972Sdavidcs					QLA_LOCK_DEFAULT_MS_TIMEOUT,
1055322972Sdavidcs					QLA_LOCK_NO_SLEEP);
1056322972Sdavidcs			if (ret)
1057322972Sdavidcs				break;
1058322972Sdavidcs
1059250661Sdavidcs			ifp->if_flags |= IFF_UP;
1060322972Sdavidcs
1061330555Sdavidcs			ql_sp_log(ha, 8, 3, ifp->if_drv_flags,
1062330555Sdavidcs				(ifp->if_drv_flags & IFF_DRV_RUNNING),
1063330555Sdavidcs				ntohl(IA_SIN(ifa)->sin_addr.s_addr), 0, 0);
1064330555Sdavidcs
1065250661Sdavidcs			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1066250661Sdavidcs				qla_init_locked(ha);
1067250661Sdavidcs			}
1068322972Sdavidcs
1069322972Sdavidcs			QLA_UNLOCK(ha, __func__);
1070250661Sdavidcs			QL_DPRINT4(ha, (ha->pci_dev,
1071250661Sdavidcs				"%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
1072250661Sdavidcs				__func__, cmd,
1073250661Sdavidcs				ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
1074250661Sdavidcs
1075250661Sdavidcs			arp_ifinit(ifp, ifa);
1076250661Sdavidcs		} else {
1077250661Sdavidcs			ether_ioctl(ifp, cmd, data);
1078250661Sdavidcs		}
1079250661Sdavidcs		break;
1080250661Sdavidcs
1081250661Sdavidcs	case SIOCSIFMTU:
1082250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
1083250661Sdavidcs			__func__, cmd));
1084250661Sdavidcs
1085250661Sdavidcs		if (ifr->ifr_mtu > QLA_MAX_MTU) {
1086250661Sdavidcs			ret = EINVAL;
1087250661Sdavidcs		} else {
1088322972Sdavidcs			ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1089322972Sdavidcs					QLA_LOCK_NO_SLEEP);
1090317990Sdavidcs
1091322972Sdavidcs			if (ret)
1092322972Sdavidcs				break;
1093322972Sdavidcs
1094250661Sdavidcs			ifp->if_mtu = ifr->ifr_mtu;
1095250661Sdavidcs			ha->max_frame_size =
1096250661Sdavidcs				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1097317990Sdavidcs
1098330555Sdavidcs			ql_sp_log(ha, 9, 4, ifp->if_drv_flags,
1099330555Sdavidcs				(ifp->if_drv_flags & IFF_DRV_RUNNING),
1100330555Sdavidcs				ha->max_frame_size, ifp->if_mtu, 0);
1101330555Sdavidcs
1102322972Sdavidcs			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1103321494Sdavidcs				qla_init_locked(ha);
1104250661Sdavidcs			}
1105250661Sdavidcs
1106250661Sdavidcs			if (ifp->if_mtu > ETHERMTU)
1107250661Sdavidcs				ha->std_replenish = QL_JUMBO_REPLENISH_THRES;
1108250661Sdavidcs			else
1109250661Sdavidcs				ha->std_replenish = QL_STD_REPLENISH_THRES;
1110250661Sdavidcs
1111250661Sdavidcs
1112322972Sdavidcs			QLA_UNLOCK(ha, __func__);
1113250661Sdavidcs		}
1114250661Sdavidcs
1115250661Sdavidcs		break;
1116250661Sdavidcs
1117250661Sdavidcs	case SIOCSIFFLAGS:
1118250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
1119250661Sdavidcs			__func__, cmd));
1120250661Sdavidcs
1121322972Sdavidcs		ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1122322972Sdavidcs				QLA_LOCK_NO_SLEEP);
1123250661Sdavidcs
1124322972Sdavidcs		if (ret)
1125322972Sdavidcs			break;
1126322972Sdavidcs
1127330555Sdavidcs		ql_sp_log(ha, 10, 4, ifp->if_drv_flags,
1128330555Sdavidcs			(ifp->if_drv_flags & IFF_DRV_RUNNING),
1129330555Sdavidcs			ha->if_flags, ifp->if_flags, 0);
1130330555Sdavidcs
1131250661Sdavidcs		if (ifp->if_flags & IFF_UP) {
1132322972Sdavidcs
1133322972Sdavidcs			ha->max_frame_size = ifp->if_mtu +
1134322972Sdavidcs					ETHER_HDR_LEN + ETHER_CRC_LEN;
1135322972Sdavidcs			qla_init_locked(ha);
1136322972Sdavidcs
1137322972Sdavidcs			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1138250661Sdavidcs				if ((ifp->if_flags ^ ha->if_flags) &
1139250661Sdavidcs					IFF_PROMISC) {
1140250661Sdavidcs					ret = ql_set_promisc(ha);
1141250661Sdavidcs				} else if ((ifp->if_flags ^ ha->if_flags) &
1142250661Sdavidcs					IFF_ALLMULTI) {
1143250661Sdavidcs					ret = ql_set_allmulti(ha);
1144250661Sdavidcs				}
1145250661Sdavidcs			}
1146250661Sdavidcs		} else {
1147250661Sdavidcs			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1148250661Sdavidcs				qla_stop(ha);
1149250661Sdavidcs			ha->if_flags = ifp->if_flags;
1150250661Sdavidcs		}
1151250661Sdavidcs
1152322972Sdavidcs		QLA_UNLOCK(ha, __func__);
1153250661Sdavidcs		break;
1154250661Sdavidcs
1155250661Sdavidcs	case SIOCADDMULTI:
1156250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev,
1157250661Sdavidcs			"%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
1158250661Sdavidcs
1159318306Sdavidcs		if (qla_set_multi(ha, 1))
1160318306Sdavidcs			ret = EINVAL;
1161250661Sdavidcs		break;
1162250661Sdavidcs
1163250661Sdavidcs	case SIOCDELMULTI:
1164250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev,
1165250661Sdavidcs			"%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
1166250661Sdavidcs
1167318306Sdavidcs		if (qla_set_multi(ha, 0))
1168318306Sdavidcs			ret = EINVAL;
1169250661Sdavidcs		break;
1170250661Sdavidcs
1171250661Sdavidcs	case SIOCSIFMEDIA:
1172250661Sdavidcs	case SIOCGIFMEDIA:
1173250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev,
1174250661Sdavidcs			"%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
1175250661Sdavidcs			__func__, cmd));
1176250661Sdavidcs		ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
1177250661Sdavidcs		break;
1178250661Sdavidcs
1179250661Sdavidcs	case SIOCSIFCAP:
1180250661Sdavidcs	{
1181250661Sdavidcs		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1182250661Sdavidcs
1183250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
1184250661Sdavidcs			__func__, cmd));
1185250661Sdavidcs
1186250661Sdavidcs		if (mask & IFCAP_HWCSUM)
1187250661Sdavidcs			ifp->if_capenable ^= IFCAP_HWCSUM;
1188250661Sdavidcs		if (mask & IFCAP_TSO4)
1189250661Sdavidcs			ifp->if_capenable ^= IFCAP_TSO4;
1190322972Sdavidcs		if (mask & IFCAP_TSO6)
1191322972Sdavidcs			ifp->if_capenable ^= IFCAP_TSO6;
1192250661Sdavidcs		if (mask & IFCAP_VLAN_HWTAGGING)
1193250661Sdavidcs			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1194250661Sdavidcs		if (mask & IFCAP_VLAN_HWTSO)
1195250661Sdavidcs			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1196317109Sdavidcs		if (mask & IFCAP_LRO)
1197317109Sdavidcs			ifp->if_capenable ^= IFCAP_LRO;
1198250661Sdavidcs
1199322972Sdavidcs		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1200322972Sdavidcs			ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1201322972Sdavidcs				QLA_LOCK_NO_SLEEP);
1202250661Sdavidcs
1203322972Sdavidcs			if (ret)
1204322972Sdavidcs				break;
1205322972Sdavidcs
1206330555Sdavidcs			ql_sp_log(ha, 11, 4, ifp->if_drv_flags,
1207330555Sdavidcs				(ifp->if_drv_flags & IFF_DRV_RUNNING),
1208330555Sdavidcs				mask, ifp->if_capenable, 0);
1209330555Sdavidcs
1210322972Sdavidcs			qla_init_locked(ha);
1211322972Sdavidcs
1212322972Sdavidcs			QLA_UNLOCK(ha, __func__);
1213322972Sdavidcs
1214322972Sdavidcs		}
1215250661Sdavidcs		VLAN_CAPABILITIES(ifp);
1216250661Sdavidcs		break;
1217250661Sdavidcs	}
1218250661Sdavidcs
1219250661Sdavidcs	default:
1220250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n",
1221250661Sdavidcs			__func__, cmd));
1222250661Sdavidcs		ret = ether_ioctl(ifp, cmd, data);
1223250661Sdavidcs		break;
1224250661Sdavidcs	}
1225250661Sdavidcs
1226250661Sdavidcs	return (ret);
1227250661Sdavidcs}
1228250661Sdavidcs
1229250661Sdavidcsstatic int
1230250661Sdavidcsqla_media_change(struct ifnet *ifp)
1231250661Sdavidcs{
1232250661Sdavidcs	qla_host_t *ha;
1233250661Sdavidcs	struct ifmedia *ifm;
1234250661Sdavidcs	int ret = 0;
1235250661Sdavidcs
1236250661Sdavidcs	ha = (qla_host_t *)ifp->if_softc;
1237250661Sdavidcs
1238250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1239250661Sdavidcs
1240250661Sdavidcs	ifm = &ha->media;
1241250661Sdavidcs
1242250661Sdavidcs	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1243250661Sdavidcs		ret = EINVAL;
1244250661Sdavidcs
1245250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1246250661Sdavidcs
1247250661Sdavidcs	return (ret);
1248250661Sdavidcs}
1249250661Sdavidcs
1250250661Sdavidcsstatic void
1251250661Sdavidcsqla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1252250661Sdavidcs{
1253250661Sdavidcs	qla_host_t *ha;
1254250661Sdavidcs
1255250661Sdavidcs	ha = (qla_host_t *)ifp->if_softc;
1256250661Sdavidcs
1257250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1258250661Sdavidcs
1259250661Sdavidcs	ifmr->ifm_status = IFM_AVALID;
1260250661Sdavidcs	ifmr->ifm_active = IFM_ETHER;
1261250661Sdavidcs
1262250661Sdavidcs	ql_update_link_state(ha);
1263250661Sdavidcs	if (ha->hw.link_up) {
1264250661Sdavidcs		ifmr->ifm_status |= IFM_ACTIVE;
1265250661Sdavidcs		ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha));
1266250661Sdavidcs	}
1267250661Sdavidcs
1268250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\
1269250661Sdavidcs		(ha->hw.link_up ? "link_up" : "link_down")));
1270250661Sdavidcs
1271250661Sdavidcs	return;
1272250661Sdavidcs}
1273250661Sdavidcs
1274250661Sdavidcs
1275250661Sdavidcsstatic int
1276313070Sdavidcsqla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
1277313070Sdavidcs	uint32_t iscsi_pdu)
1278250661Sdavidcs{
1279250661Sdavidcs	bus_dma_segment_t	segs[QLA_MAX_SEGMENTS];
1280250661Sdavidcs	bus_dmamap_t		map;
1281250661Sdavidcs	int			nsegs;
1282250661Sdavidcs	int			ret = -1;
1283250661Sdavidcs	uint32_t		tx_idx;
1284250661Sdavidcs	struct mbuf		*m_head = *m_headp;
1285250661Sdavidcs
1286250661Sdavidcs	QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
1287250661Sdavidcs
1288250661Sdavidcs	tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next;
1289324761Sdavidcs
1290332052Sdavidcs	if ((NULL != ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head) ||
1291332052Sdavidcs		(QL_ERR_INJECT(ha, INJCT_TXBUF_MBUF_NON_NULL))){
1292324761Sdavidcs		QL_ASSERT(ha, 0, ("%s [%d]: txr_idx = %d tx_idx = %d "\
1293324761Sdavidcs			"mbuf = %p\n", __func__, __LINE__, txr_idx, tx_idx,\
1294324761Sdavidcs			ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head));
1295332052Sdavidcs
1296332052Sdavidcs		device_printf(ha->pci_dev, "%s [%d]: txr_idx = %d tx_idx = %d "
1297332052Sdavidcs			"mbuf = %p\n", __func__, __LINE__, txr_idx, tx_idx,
1298332052Sdavidcs			ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head);
1299332052Sdavidcs
1300324761Sdavidcs		if (m_head)
1301324761Sdavidcs			m_freem(m_head);
1302324761Sdavidcs		*m_headp = NULL;
1303332052Sdavidcs		QL_INITIATE_RECOVERY(ha);
1304324761Sdavidcs		return (ret);
1305324761Sdavidcs	}
1306324761Sdavidcs
1307250661Sdavidcs	map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
1308250661Sdavidcs
1309250661Sdavidcs	ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1310250661Sdavidcs			BUS_DMA_NOWAIT);
1311250661Sdavidcs
1312250661Sdavidcs	if (ret == EFBIG) {
1313250661Sdavidcs
1314250661Sdavidcs		struct mbuf *m;
1315250661Sdavidcs
1316250661Sdavidcs		QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1317250661Sdavidcs			m_head->m_pkthdr.len));
1318250661Sdavidcs
1319250661Sdavidcs		m = m_defrag(m_head, M_NOWAIT);
1320250661Sdavidcs		if (m == NULL) {
1321250661Sdavidcs			ha->err_tx_defrag++;
1322250661Sdavidcs			m_freem(m_head);
1323250661Sdavidcs			*m_headp = NULL;
1324250661Sdavidcs			device_printf(ha->pci_dev,
1325250661Sdavidcs				"%s: m_defrag() = NULL [%d]\n",
1326250661Sdavidcs				__func__, ret);
1327250661Sdavidcs			return (ENOBUFS);
1328250661Sdavidcs		}
1329250661Sdavidcs		m_head = m;
1330250661Sdavidcs		*m_headp = m_head;
1331250661Sdavidcs
1332250661Sdavidcs		if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1333250661Sdavidcs					segs, &nsegs, BUS_DMA_NOWAIT))) {
1334250661Sdavidcs
1335250661Sdavidcs			ha->err_tx_dmamap_load++;
1336250661Sdavidcs
1337250661Sdavidcs			device_printf(ha->pci_dev,
1338250661Sdavidcs				"%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1339250661Sdavidcs				__func__, ret, m_head->m_pkthdr.len);
1340250661Sdavidcs
1341250661Sdavidcs			if (ret != ENOMEM) {
1342250661Sdavidcs				m_freem(m_head);
1343250661Sdavidcs				*m_headp = NULL;
1344250661Sdavidcs			}
1345250661Sdavidcs			return (ret);
1346250661Sdavidcs		}
1347250661Sdavidcs
1348250661Sdavidcs	} else if (ret) {
1349250661Sdavidcs
1350250661Sdavidcs		ha->err_tx_dmamap_load++;
1351250661Sdavidcs
1352250661Sdavidcs		device_printf(ha->pci_dev,
1353250661Sdavidcs			"%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1354250661Sdavidcs			__func__, ret, m_head->m_pkthdr.len);
1355250661Sdavidcs
1356250661Sdavidcs		if (ret != ENOMEM) {
1357250661Sdavidcs			m_freem(m_head);
1358250661Sdavidcs			*m_headp = NULL;
1359250661Sdavidcs		}
1360250661Sdavidcs		return (ret);
1361250661Sdavidcs	}
1362250661Sdavidcs
1363250661Sdavidcs	QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet"));
1364250661Sdavidcs
1365250661Sdavidcs	bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1366250661Sdavidcs
1367284741Sdavidcs        if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx,
1368284741Sdavidcs				iscsi_pdu))) {
1369250661Sdavidcs		ha->tx_ring[txr_idx].count++;
1370322972Sdavidcs		if (iscsi_pdu)
1371322972Sdavidcs			ha->tx_ring[txr_idx].iscsi_pkt_count++;
1372250661Sdavidcs		ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
1373250661Sdavidcs	} else {
1374324029Sdavidcs		bus_dmamap_unload(ha->tx_tag, map);
1375250661Sdavidcs		if (ret == EINVAL) {
1376250661Sdavidcs			if (m_head)
1377250661Sdavidcs				m_freem(m_head);
1378250661Sdavidcs			*m_headp = NULL;
1379250661Sdavidcs		}
1380250661Sdavidcs	}
1381250661Sdavidcs
1382250661Sdavidcs	QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
1383250661Sdavidcs	return (ret);
1384250661Sdavidcs}
1385250661Sdavidcs
1386313070Sdavidcsstatic int
1387313070Sdavidcsqla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
1388313070Sdavidcs{
1389313070Sdavidcs        snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
1390313070Sdavidcs                "qla%d_fp%d_tx_mq_lock", ha->pci_func, fp->txr_idx);
1391313070Sdavidcs
1392313070Sdavidcs        mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
1393313070Sdavidcs
1394313070Sdavidcs        fp->tx_br = buf_ring_alloc(NUM_TX_DESCRIPTORS, M_DEVBUF,
1395313070Sdavidcs                                   M_NOWAIT, &fp->tx_mtx);
1396313070Sdavidcs        if (fp->tx_br == NULL) {
1397313070Sdavidcs            QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for "
1398313070Sdavidcs                " fp[%d, %d]\n", ha->pci_func, fp->txr_idx));
1399313070Sdavidcs            return (-ENOMEM);
1400313070Sdavidcs        }
1401313070Sdavidcs        return 0;
1402313070Sdavidcs}
1403313070Sdavidcs
1404250661Sdavidcsstatic void
1405313070Sdavidcsqla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
1406313070Sdavidcs{
1407313070Sdavidcs        struct mbuf *mp;
1408313070Sdavidcs        struct ifnet *ifp = ha->ifp;
1409313070Sdavidcs
1410313070Sdavidcs        if (mtx_initialized(&fp->tx_mtx)) {
1411313070Sdavidcs
1412313070Sdavidcs                if (fp->tx_br != NULL) {
1413313070Sdavidcs
1414313070Sdavidcs                        mtx_lock(&fp->tx_mtx);
1415313070Sdavidcs
1416313070Sdavidcs                        while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
1417313070Sdavidcs                                m_freem(mp);
1418313070Sdavidcs                        }
1419313070Sdavidcs
1420313070Sdavidcs                        mtx_unlock(&fp->tx_mtx);
1421313070Sdavidcs
1422313070Sdavidcs                        buf_ring_free(fp->tx_br, M_DEVBUF);
1423313070Sdavidcs                        fp->tx_br = NULL;
1424313070Sdavidcs                }
1425313070Sdavidcs                mtx_destroy(&fp->tx_mtx);
1426313070Sdavidcs        }
1427313070Sdavidcs        return;
1428313070Sdavidcs}
1429313070Sdavidcs
1430313070Sdavidcsstatic void
1431313070Sdavidcsqla_fp_taskqueue(void *context, int pending)
1432313070Sdavidcs{
1433313070Sdavidcs        qla_tx_fp_t *fp;
1434313070Sdavidcs        qla_host_t *ha;
1435313070Sdavidcs        struct ifnet *ifp;
1436332052Sdavidcs        struct mbuf  *mp = NULL;
1437332052Sdavidcs        int ret = 0;
1438313070Sdavidcs	uint32_t txr_idx;
1439313070Sdavidcs	uint32_t iscsi_pdu = 0;
1440322972Sdavidcs	uint32_t rx_pkts_left = -1;
1441313070Sdavidcs
1442313070Sdavidcs        fp = context;
1443313070Sdavidcs
1444313070Sdavidcs        if (fp == NULL)
1445313070Sdavidcs                return;
1446313070Sdavidcs
1447313070Sdavidcs        ha = (qla_host_t *)fp->ha;
1448313070Sdavidcs
1449313070Sdavidcs        ifp = ha->ifp;
1450313070Sdavidcs
1451313070Sdavidcs	txr_idx = fp->txr_idx;
1452313070Sdavidcs
1453313070Sdavidcs        mtx_lock(&fp->tx_mtx);
1454313070Sdavidcs
1455322972Sdavidcs        if (!(ifp->if_drv_flags & IFF_DRV_RUNNING) || (!ha->hw.link_up)) {
1456313070Sdavidcs                mtx_unlock(&fp->tx_mtx);
1457313070Sdavidcs                goto qla_fp_taskqueue_exit;
1458313070Sdavidcs        }
1459313070Sdavidcs
1460324029Sdavidcs	while (rx_pkts_left && !ha->stop_rcv &&
1461332052Sdavidcs		(ifp->if_drv_flags & IFF_DRV_RUNNING) && ha->hw.link_up) {
1462322972Sdavidcs		rx_pkts_left = ql_rcv_isr(ha, fp->txr_idx, 64);
1463313070Sdavidcs
1464313070Sdavidcs#ifdef QL_ENABLE_ISCSI_TLV
1465322972Sdavidcs		ql_hw_tx_done_locked(ha, fp->txr_idx);
1466322972Sdavidcs		ql_hw_tx_done_locked(ha, (fp->txr_idx + (ha->hw.num_tx_rings >> 1)));
1467313070Sdavidcs#else
1468322972Sdavidcs		ql_hw_tx_done_locked(ha, fp->txr_idx);
1469313070Sdavidcs#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
1470313070Sdavidcs
1471322972Sdavidcs		mp = drbr_peek(ifp, fp->tx_br);
1472313070Sdavidcs
1473322972Sdavidcs        	while (mp != NULL) {
1474313070Sdavidcs
1475322972Sdavidcs			if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) {
1476313070Sdavidcs#ifdef QL_ENABLE_ISCSI_TLV
1477322972Sdavidcs				if (ql_iscsi_pdu(ha, mp) == 0) {
1478322972Sdavidcs					txr_idx = txr_idx +
1479322972Sdavidcs						(ha->hw.num_tx_rings >> 1);
1480322972Sdavidcs					iscsi_pdu = 1;
1481322972Sdavidcs				} else {
1482322972Sdavidcs					iscsi_pdu = 0;
1483322972Sdavidcs					txr_idx = fp->txr_idx;
1484322972Sdavidcs				}
1485322972Sdavidcs#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
1486313070Sdavidcs			}
1487313070Sdavidcs
1488322972Sdavidcs			ret = qla_send(ha, &mp, txr_idx, iscsi_pdu);
1489313070Sdavidcs
1490322972Sdavidcs			if (ret) {
1491322972Sdavidcs				if (mp != NULL)
1492322972Sdavidcs					drbr_putback(ifp, fp->tx_br, mp);
1493322972Sdavidcs				else {
1494322972Sdavidcs					drbr_advance(ifp, fp->tx_br);
1495322972Sdavidcs				}
1496313070Sdavidcs
1497322972Sdavidcs				mtx_unlock(&fp->tx_mtx);
1498313070Sdavidcs
1499322972Sdavidcs				goto qla_fp_taskqueue_exit0;
1500322972Sdavidcs			} else {
1501322972Sdavidcs				drbr_advance(ifp, fp->tx_br);
1502322972Sdavidcs			}
1503313070Sdavidcs
1504324029Sdavidcs			/* Send a copy of the frame to the BPF listener */
1505324029Sdavidcs			ETHER_BPF_MTAP(ifp, mp);
1506332052Sdavidcs
1507332052Sdavidcs			if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ||
1508332052Sdavidcs				(!ha->hw.link_up))
1509324029Sdavidcs				break;
1510324029Sdavidcs
1511322972Sdavidcs			mp = drbr_peek(ifp, fp->tx_br);
1512322972Sdavidcs		}
1513322972Sdavidcs	}
1514313070Sdavidcs        mtx_unlock(&fp->tx_mtx);
1515313070Sdavidcs
1516332052Sdavidcs	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1517332052Sdavidcs		goto qla_fp_taskqueue_exit;
1518332052Sdavidcs
1519313070Sdavidcsqla_fp_taskqueue_exit0:
1520313070Sdavidcs
1521313070Sdavidcs	if (rx_pkts_left || ((mp != NULL) && ret)) {
1522313070Sdavidcs		taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
1523313070Sdavidcs	} else {
1524322972Sdavidcs		if (!ha->stop_rcv) {
1525313070Sdavidcs			QL_ENABLE_INTERRUPTS(ha, fp->txr_idx);
1526313070Sdavidcs		}
1527313070Sdavidcs	}
1528313070Sdavidcs
1529313070Sdavidcsqla_fp_taskqueue_exit:
1530313070Sdavidcs
1531313070Sdavidcs        QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
1532313070Sdavidcs        return;
1533313070Sdavidcs}
1534313070Sdavidcs
1535313070Sdavidcsstatic int
1536313070Sdavidcsqla_create_fp_taskqueues(qla_host_t *ha)
1537313070Sdavidcs{
1538313070Sdavidcs        int     i;
1539313070Sdavidcs        uint8_t tq_name[32];
1540313070Sdavidcs
1541313070Sdavidcs        for (i = 0; i < ha->hw.num_sds_rings; i++) {
1542313070Sdavidcs
1543313070Sdavidcs                qla_tx_fp_t *fp = &ha->tx_fp[i];
1544313070Sdavidcs
1545313070Sdavidcs                bzero(tq_name, sizeof (tq_name));
1546313070Sdavidcs                snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
1547313070Sdavidcs
1548313070Sdavidcs                TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp);
1549313070Sdavidcs
1550313070Sdavidcs                fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT,
1551313070Sdavidcs                                        taskqueue_thread_enqueue,
1552313070Sdavidcs                                        &fp->fp_taskqueue);
1553313070Sdavidcs
1554313070Sdavidcs                if (fp->fp_taskqueue == NULL)
1555313070Sdavidcs                        return (-1);
1556313070Sdavidcs
1557313070Sdavidcs                taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
1558313070Sdavidcs                        tq_name);
1559313070Sdavidcs
1560313070Sdavidcs                QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__,
1561313070Sdavidcs                        fp->fp_taskqueue));
1562313070Sdavidcs        }
1563313070Sdavidcs
1564313070Sdavidcs        return (0);
1565313070Sdavidcs}
1566313070Sdavidcs
1567313070Sdavidcsstatic void
1568313070Sdavidcsqla_destroy_fp_taskqueues(qla_host_t *ha)
1569313070Sdavidcs{
1570313070Sdavidcs        int     i;
1571313070Sdavidcs
1572313070Sdavidcs        for (i = 0; i < ha->hw.num_sds_rings; i++) {
1573313070Sdavidcs
1574313070Sdavidcs                qla_tx_fp_t *fp = &ha->tx_fp[i];
1575313070Sdavidcs
1576313070Sdavidcs                if (fp->fp_taskqueue != NULL) {
1577330555Sdavidcs                        taskqueue_drain_all(fp->fp_taskqueue);
1578313070Sdavidcs                        taskqueue_free(fp->fp_taskqueue);
1579313070Sdavidcs                        fp->fp_taskqueue = NULL;
1580313070Sdavidcs                }
1581313070Sdavidcs        }
1582313070Sdavidcs        return;
1583313070Sdavidcs}
1584313070Sdavidcs
1585313070Sdavidcsstatic void
1586313070Sdavidcsqla_drain_fp_taskqueues(qla_host_t *ha)
1587313070Sdavidcs{
1588313070Sdavidcs        int     i;
1589313070Sdavidcs
1590313070Sdavidcs        for (i = 0; i < ha->hw.num_sds_rings; i++) {
1591313070Sdavidcs                qla_tx_fp_t *fp = &ha->tx_fp[i];
1592313070Sdavidcs
1593313070Sdavidcs                if (fp->fp_taskqueue != NULL) {
1594330555Sdavidcs                        taskqueue_drain_all(fp->fp_taskqueue);
1595313070Sdavidcs                }
1596313070Sdavidcs        }
1597313070Sdavidcs        return;
1598313070Sdavidcs}
1599313070Sdavidcs
1600313070Sdavidcsstatic int
1601313070Sdavidcsqla_transmit(struct ifnet *ifp, struct mbuf  *mp)
1602313070Sdavidcs{
1603313070Sdavidcs	qla_host_t *ha = (qla_host_t *)ifp->if_softc;
1604313070Sdavidcs        qla_tx_fp_t *fp;
1605313070Sdavidcs        int rss_id = 0;
1606313070Sdavidcs        int ret = 0;
1607313070Sdavidcs
1608313070Sdavidcs        QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1609313070Sdavidcs
1610313070Sdavidcs#if __FreeBSD_version >= 1100000
1611313070Sdavidcs        if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
1612313070Sdavidcs#else
1613313070Sdavidcs        if (mp->m_flags & M_FLOWID)
1614313070Sdavidcs#endif
1615313070Sdavidcs                rss_id = (mp->m_pkthdr.flowid & Q8_RSS_IND_TBL_MAX_IDX) %
1616313070Sdavidcs                                        ha->hw.num_sds_rings;
1617313070Sdavidcs        fp = &ha->tx_fp[rss_id];
1618313070Sdavidcs
1619313070Sdavidcs        if (fp->tx_br == NULL) {
1620313070Sdavidcs                ret = EINVAL;
1621313070Sdavidcs                goto qla_transmit_exit;
1622313070Sdavidcs        }
1623313070Sdavidcs
1624313070Sdavidcs        if (mp != NULL) {
1625313070Sdavidcs                ret = drbr_enqueue(ifp, fp->tx_br, mp);
1626313070Sdavidcs        }
1627313070Sdavidcs
1628313070Sdavidcs        if (fp->fp_taskqueue != NULL)
1629313070Sdavidcs                taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
1630313070Sdavidcs
1631313070Sdavidcs        ret = 0;
1632313070Sdavidcs
1633313070Sdavidcsqla_transmit_exit:
1634313070Sdavidcs
1635313070Sdavidcs        QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
1636313070Sdavidcs        return ret;
1637313070Sdavidcs}
1638313070Sdavidcs
1639313070Sdavidcsstatic void
1640313070Sdavidcsqla_qflush(struct ifnet *ifp)
1641313070Sdavidcs{
1642313070Sdavidcs        int                     i;
1643313070Sdavidcs        qla_tx_fp_t		*fp;
1644313070Sdavidcs        struct mbuf             *mp;
1645313070Sdavidcs        qla_host_t              *ha;
1646313070Sdavidcs
1647313070Sdavidcs        ha = (qla_host_t *)ifp->if_softc;
1648313070Sdavidcs
1649313070Sdavidcs        QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1650313070Sdavidcs
1651313070Sdavidcs        for (i = 0; i < ha->hw.num_sds_rings; i++) {
1652313070Sdavidcs
1653313070Sdavidcs                fp = &ha->tx_fp[i];
1654313070Sdavidcs
1655313070Sdavidcs                if (fp == NULL)
1656313070Sdavidcs                        continue;
1657313070Sdavidcs
1658313070Sdavidcs                if (fp->tx_br) {
1659313070Sdavidcs                        mtx_lock(&fp->tx_mtx);
1660313070Sdavidcs
1661313070Sdavidcs                        while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
1662313070Sdavidcs                                m_freem(mp);
1663313070Sdavidcs                        }
1664313070Sdavidcs                        mtx_unlock(&fp->tx_mtx);
1665313070Sdavidcs                }
1666313070Sdavidcs        }
1667313070Sdavidcs        QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1668313070Sdavidcs
1669313070Sdavidcs        return;
1670313070Sdavidcs}
1671313070Sdavidcs
1672313070Sdavidcsstatic void
1673250661Sdavidcsqla_stop(qla_host_t *ha)
1674250661Sdavidcs{
1675250661Sdavidcs	struct ifnet *ifp = ha->ifp;
1676250661Sdavidcs	device_t	dev;
1677313070Sdavidcs	int i = 0;
1678250661Sdavidcs
1679330555Sdavidcs	ql_sp_log(ha, 13, 0, 0, 0, 0, 0, 0);
1680330555Sdavidcs
1681250661Sdavidcs	dev = ha->pci_dev;
1682250661Sdavidcs
1683322972Sdavidcs	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1684322972Sdavidcs	ha->qla_watchdog_pause = 1;
1685250661Sdavidcs
1686313070Sdavidcs        for (i = 0; i < ha->hw.num_sds_rings; i++) {
1687313070Sdavidcs        	qla_tx_fp_t *fp;
1688313070Sdavidcs
1689313070Sdavidcs		fp = &ha->tx_fp[i];
1690313070Sdavidcs
1691313070Sdavidcs                if (fp == NULL)
1692313070Sdavidcs                        continue;
1693313070Sdavidcs
1694313070Sdavidcs		if (fp->tx_br != NULL) {
1695313070Sdavidcs                        mtx_lock(&fp->tx_mtx);
1696313070Sdavidcs                        mtx_unlock(&fp->tx_mtx);
1697313070Sdavidcs		}
1698313070Sdavidcs	}
1699313070Sdavidcs
1700322972Sdavidcs	while (!ha->qla_watchdog_paused)
1701250661Sdavidcs		qla_mdelay(__func__, 1);
1702250661Sdavidcs
1703322972Sdavidcs	ha->qla_interface_up = 0;
1704305487Sdavidcs
1705313070Sdavidcs	qla_drain_fp_taskqueues(ha);
1706313070Sdavidcs
1707250661Sdavidcs	ql_del_hw_if(ha);
1708250661Sdavidcs
1709250661Sdavidcs	qla_free_xmt_bufs(ha);
1710250661Sdavidcs	qla_free_rcv_bufs(ha);
1711250661Sdavidcs
1712250661Sdavidcs	return;
1713250661Sdavidcs}
1714250661Sdavidcs
1715250661Sdavidcs/*
1716250661Sdavidcs * Buffer Management Functions for Transmit and Receive Rings
1717250661Sdavidcs */
1718250661Sdavidcsstatic int
1719250661Sdavidcsqla_alloc_xmt_bufs(qla_host_t *ha)
1720250661Sdavidcs{
1721250661Sdavidcs	int ret = 0;
1722250661Sdavidcs	uint32_t i, j;
1723250661Sdavidcs	qla_tx_buf_t *txb;
1724250661Sdavidcs
1725250661Sdavidcs	if (bus_dma_tag_create(NULL,    /* parent */
1726250661Sdavidcs		1, 0,    /* alignment, bounds */
1727250661Sdavidcs		BUS_SPACE_MAXADDR,       /* lowaddr */
1728250661Sdavidcs		BUS_SPACE_MAXADDR,       /* highaddr */
1729250661Sdavidcs		NULL, NULL,      /* filter, filterarg */
1730250661Sdavidcs		QLA_MAX_TSO_FRAME_SIZE,     /* maxsize */
1731250661Sdavidcs		QLA_MAX_SEGMENTS,        /* nsegments */
1732250661Sdavidcs		PAGE_SIZE,        /* maxsegsize */
1733250661Sdavidcs		BUS_DMA_ALLOCNOW,        /* flags */
1734250661Sdavidcs		NULL,    /* lockfunc */
1735250661Sdavidcs		NULL,    /* lockfuncarg */
1736250661Sdavidcs		&ha->tx_tag)) {
1737250661Sdavidcs		device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1738250661Sdavidcs			__func__);
1739250661Sdavidcs		return (ENOMEM);
1740250661Sdavidcs	}
1741250661Sdavidcs
1742250661Sdavidcs	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1743250661Sdavidcs		bzero((void *)ha->tx_ring[i].tx_buf,
1744250661Sdavidcs			(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1745250661Sdavidcs	}
1746250661Sdavidcs
1747250661Sdavidcs	for (j = 0; j < ha->hw.num_tx_rings; j++) {
1748250661Sdavidcs		for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1749250661Sdavidcs
1750250661Sdavidcs			txb = &ha->tx_ring[j].tx_buf[i];
1751250661Sdavidcs
1752250661Sdavidcs			if ((ret = bus_dmamap_create(ha->tx_tag,
1753250661Sdavidcs					BUS_DMA_NOWAIT, &txb->map))) {
1754250661Sdavidcs
1755250661Sdavidcs				ha->err_tx_dmamap_create++;
1756250661Sdavidcs				device_printf(ha->pci_dev,
1757250661Sdavidcs					"%s: bus_dmamap_create failed[%d]\n",
1758250661Sdavidcs					__func__, ret);
1759250661Sdavidcs
1760250661Sdavidcs				qla_free_xmt_bufs(ha);
1761250661Sdavidcs
1762250661Sdavidcs				return (ret);
1763250661Sdavidcs			}
1764250661Sdavidcs		}
1765250661Sdavidcs	}
1766250661Sdavidcs
1767250661Sdavidcs	return 0;
1768250661Sdavidcs}
1769250661Sdavidcs
1770250661Sdavidcs/*
1771250661Sdavidcs * Release mbuf after it sent on the wire
1772250661Sdavidcs */
1773250661Sdavidcsstatic void
1774250661Sdavidcsqla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1775250661Sdavidcs{
1776250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1777250661Sdavidcs
1778324029Sdavidcs	if (txb->m_head) {
1779324029Sdavidcs		bus_dmamap_sync(ha->tx_tag, txb->map,
1780324029Sdavidcs			BUS_DMASYNC_POSTWRITE);
1781250661Sdavidcs
1782250661Sdavidcs		bus_dmamap_unload(ha->tx_tag, txb->map);
1783250661Sdavidcs
1784250661Sdavidcs		m_freem(txb->m_head);
1785250661Sdavidcs		txb->m_head = NULL;
1786324029Sdavidcs
1787324029Sdavidcs		bus_dmamap_destroy(ha->tx_tag, txb->map);
1788324029Sdavidcs		txb->map = NULL;
1789250661Sdavidcs	}
1790250661Sdavidcs
1791324029Sdavidcs	if (txb->map) {
1792324029Sdavidcs		bus_dmamap_unload(ha->tx_tag, txb->map);
1793250661Sdavidcs		bus_dmamap_destroy(ha->tx_tag, txb->map);
1794324029Sdavidcs		txb->map = NULL;
1795324029Sdavidcs	}
1796250661Sdavidcs
1797250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1798250661Sdavidcs}
1799250661Sdavidcs
1800250661Sdavidcsstatic void
1801250661Sdavidcsqla_free_xmt_bufs(qla_host_t *ha)
1802250661Sdavidcs{
1803250661Sdavidcs	int		i, j;
1804250661Sdavidcs
1805250661Sdavidcs	for (j = 0; j < ha->hw.num_tx_rings; j++) {
1806250661Sdavidcs		for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1807250661Sdavidcs			qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
1808250661Sdavidcs	}
1809250661Sdavidcs
1810250661Sdavidcs	if (ha->tx_tag != NULL) {
1811250661Sdavidcs		bus_dma_tag_destroy(ha->tx_tag);
1812250661Sdavidcs		ha->tx_tag = NULL;
1813250661Sdavidcs	}
1814250661Sdavidcs
1815250661Sdavidcs	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1816250661Sdavidcs		bzero((void *)ha->tx_ring[i].tx_buf,
1817250661Sdavidcs			(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1818250661Sdavidcs	}
1819250661Sdavidcs	return;
1820250661Sdavidcs}
1821250661Sdavidcs
1822250661Sdavidcs
1823250661Sdavidcsstatic int
1824250661Sdavidcsqla_alloc_rcv_std(qla_host_t *ha)
1825250661Sdavidcs{
1826250661Sdavidcs	int		i, j, k, r, ret = 0;
1827250661Sdavidcs	qla_rx_buf_t	*rxb;
1828250661Sdavidcs	qla_rx_ring_t	*rx_ring;
1829250661Sdavidcs
1830250661Sdavidcs	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1831250661Sdavidcs
1832250661Sdavidcs		rx_ring = &ha->rx_ring[r];
1833250661Sdavidcs
1834250661Sdavidcs		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1835250661Sdavidcs
1836250661Sdavidcs			rxb = &rx_ring->rx_buf[i];
1837250661Sdavidcs
1838250661Sdavidcs			ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT,
1839250661Sdavidcs					&rxb->map);
1840250661Sdavidcs
1841250661Sdavidcs			if (ret) {
1842250661Sdavidcs				device_printf(ha->pci_dev,
1843250661Sdavidcs					"%s: dmamap[%d, %d] failed\n",
1844250661Sdavidcs					__func__, r, i);
1845250661Sdavidcs
1846250661Sdavidcs				for (k = 0; k < r; k++) {
1847250661Sdavidcs					for (j = 0; j < NUM_RX_DESCRIPTORS;
1848250661Sdavidcs						j++) {
1849250661Sdavidcs						rxb = &ha->rx_ring[k].rx_buf[j];
1850250661Sdavidcs						bus_dmamap_destroy(ha->rx_tag,
1851250661Sdavidcs							rxb->map);
1852250661Sdavidcs					}
1853250661Sdavidcs				}
1854250661Sdavidcs
1855250661Sdavidcs				for (j = 0; j < i; j++) {
1856250661Sdavidcs					bus_dmamap_destroy(ha->rx_tag,
1857250661Sdavidcs						rx_ring->rx_buf[j].map);
1858250661Sdavidcs				}
1859250661Sdavidcs				goto qla_alloc_rcv_std_err;
1860250661Sdavidcs			}
1861250661Sdavidcs		}
1862250661Sdavidcs	}
1863250661Sdavidcs
1864250661Sdavidcs	qla_init_hw_rcv_descriptors(ha);
1865250661Sdavidcs
1866250661Sdavidcs
1867250661Sdavidcs	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1868250661Sdavidcs
1869250661Sdavidcs		rx_ring = &ha->rx_ring[r];
1870250661Sdavidcs
1871250661Sdavidcs		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1872250661Sdavidcs			rxb = &rx_ring->rx_buf[i];
1873250661Sdavidcs			rxb->handle = i;
1874250661Sdavidcs			if (!(ret = ql_get_mbuf(ha, rxb, NULL))) {
1875250661Sdavidcs				/*
1876250661Sdavidcs			 	 * set the physical address in the
1877250661Sdavidcs				 * corresponding descriptor entry in the
1878250661Sdavidcs				 * receive ring/queue for the hba
1879250661Sdavidcs				 */
1880250661Sdavidcs				qla_set_hw_rcv_desc(ha, r, i, rxb->handle,
1881250661Sdavidcs					rxb->paddr,
1882250661Sdavidcs					(rxb->m_head)->m_pkthdr.len);
1883250661Sdavidcs			} else {
1884250661Sdavidcs				device_printf(ha->pci_dev,
1885250661Sdavidcs					"%s: ql_get_mbuf [%d, %d] failed\n",
1886250661Sdavidcs					__func__, r, i);
1887250661Sdavidcs				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1888250661Sdavidcs				goto qla_alloc_rcv_std_err;
1889250661Sdavidcs			}
1890250661Sdavidcs		}
1891250661Sdavidcs	}
1892250661Sdavidcs	return 0;
1893250661Sdavidcs
1894250661Sdavidcsqla_alloc_rcv_std_err:
1895250661Sdavidcs	return (-1);
1896250661Sdavidcs}
1897250661Sdavidcs
1898250661Sdavidcsstatic void
1899250661Sdavidcsqla_free_rcv_std(qla_host_t *ha)
1900250661Sdavidcs{
1901250661Sdavidcs	int		i, r;
1902250661Sdavidcs	qla_rx_buf_t	*rxb;
1903250661Sdavidcs
1904250661Sdavidcs	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1905250661Sdavidcs		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1906250661Sdavidcs			rxb = &ha->rx_ring[r].rx_buf[i];
1907250661Sdavidcs			if (rxb->m_head != NULL) {
1908250661Sdavidcs				bus_dmamap_unload(ha->rx_tag, rxb->map);
1909250661Sdavidcs				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1910250661Sdavidcs				m_freem(rxb->m_head);
1911250661Sdavidcs				rxb->m_head = NULL;
1912250661Sdavidcs			}
1913250661Sdavidcs		}
1914250661Sdavidcs	}
1915250661Sdavidcs	return;
1916250661Sdavidcs}
1917250661Sdavidcs
1918250661Sdavidcsstatic int
1919250661Sdavidcsqla_alloc_rcv_bufs(qla_host_t *ha)
1920250661Sdavidcs{
1921250661Sdavidcs	int		i, ret = 0;
1922250661Sdavidcs
1923250661Sdavidcs	if (bus_dma_tag_create(NULL,    /* parent */
1924250661Sdavidcs			1, 0,    /* alignment, bounds */
1925250661Sdavidcs			BUS_SPACE_MAXADDR,       /* lowaddr */
1926250661Sdavidcs			BUS_SPACE_MAXADDR,       /* highaddr */
1927250661Sdavidcs			NULL, NULL,      /* filter, filterarg */
1928250661Sdavidcs			MJUM9BYTES,     /* maxsize */
1929250661Sdavidcs			1,        /* nsegments */
1930250661Sdavidcs			MJUM9BYTES,        /* maxsegsize */
1931250661Sdavidcs			BUS_DMA_ALLOCNOW,        /* flags */
1932250661Sdavidcs			NULL,    /* lockfunc */
1933250661Sdavidcs			NULL,    /* lockfuncarg */
1934250661Sdavidcs			&ha->rx_tag)) {
1935250661Sdavidcs
1936250661Sdavidcs		device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1937250661Sdavidcs			__func__);
1938250661Sdavidcs
1939250661Sdavidcs		return (ENOMEM);
1940250661Sdavidcs	}
1941250661Sdavidcs
1942250661Sdavidcs	bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1943250661Sdavidcs
1944250661Sdavidcs	for (i = 0; i < ha->hw.num_sds_rings; i++) {
1945250661Sdavidcs		ha->hw.sds[i].sdsr_next = 0;
1946250661Sdavidcs		ha->hw.sds[i].rxb_free = NULL;
1947250661Sdavidcs		ha->hw.sds[i].rx_free = 0;
1948250661Sdavidcs	}
1949250661Sdavidcs
1950250661Sdavidcs	ret = qla_alloc_rcv_std(ha);
1951250661Sdavidcs
1952250661Sdavidcs	return (ret);
1953250661Sdavidcs}
1954250661Sdavidcs
1955250661Sdavidcsstatic void
1956250661Sdavidcsqla_free_rcv_bufs(qla_host_t *ha)
1957250661Sdavidcs{
1958250661Sdavidcs	int		i;
1959250661Sdavidcs
1960250661Sdavidcs	qla_free_rcv_std(ha);
1961250661Sdavidcs
1962250661Sdavidcs	if (ha->rx_tag != NULL) {
1963250661Sdavidcs		bus_dma_tag_destroy(ha->rx_tag);
1964250661Sdavidcs		ha->rx_tag = NULL;
1965250661Sdavidcs	}
1966250661Sdavidcs
1967250661Sdavidcs	bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1968250661Sdavidcs
1969250661Sdavidcs	for (i = 0; i < ha->hw.num_sds_rings; i++) {
1970250661Sdavidcs		ha->hw.sds[i].sdsr_next = 0;
1971250661Sdavidcs		ha->hw.sds[i].rxb_free = NULL;
1972250661Sdavidcs		ha->hw.sds[i].rx_free = 0;
1973250661Sdavidcs	}
1974250661Sdavidcs
1975250661Sdavidcs	return;
1976250661Sdavidcs}
1977250661Sdavidcs
1978250661Sdavidcsint
1979250661Sdavidcsql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
1980250661Sdavidcs{
1981331643Sdim	struct mbuf *mp = nmp;
1982250661Sdavidcs	struct ifnet   		*ifp;
1983250661Sdavidcs	int            		ret = 0;
1984250661Sdavidcs	uint32_t		offset;
1985250661Sdavidcs	bus_dma_segment_t	segs[1];
1986284741Sdavidcs	int			nsegs, mbuf_size;
1987250661Sdavidcs
1988250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1989250661Sdavidcs
1990250661Sdavidcs	ifp = ha->ifp;
1991250661Sdavidcs
1992284741Sdavidcs        if (ha->hw.enable_9kb)
1993284741Sdavidcs                mbuf_size = MJUM9BYTES;
1994284741Sdavidcs        else
1995284741Sdavidcs                mbuf_size = MCLBYTES;
1996284741Sdavidcs
1997250661Sdavidcs	if (mp == NULL) {
1998250661Sdavidcs
1999305488Sdavidcs		if (QL_ERR_INJECT(ha, INJCT_M_GETCL_M_GETJCL_FAILURE))
2000305488Sdavidcs			return(-1);
2001305488Sdavidcs
2002284741Sdavidcs                if (ha->hw.enable_9kb)
2003284741Sdavidcs                        mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, mbuf_size);
2004284741Sdavidcs                else
2005284741Sdavidcs                        mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2006250661Sdavidcs
2007250661Sdavidcs		if (mp == NULL) {
2008250661Sdavidcs			ha->err_m_getcl++;
2009250661Sdavidcs			ret = ENOBUFS;
2010250661Sdavidcs			device_printf(ha->pci_dev,
2011250661Sdavidcs					"%s: m_getcl failed\n", __func__);
2012250661Sdavidcs			goto exit_ql_get_mbuf;
2013250661Sdavidcs		}
2014284741Sdavidcs		mp->m_len = mp->m_pkthdr.len = mbuf_size;
2015250661Sdavidcs	} else {
2016284741Sdavidcs		mp->m_len = mp->m_pkthdr.len = mbuf_size;
2017250661Sdavidcs		mp->m_data = mp->m_ext.ext_buf;
2018250661Sdavidcs		mp->m_next = NULL;
2019250661Sdavidcs	}
2020250661Sdavidcs
2021250661Sdavidcs	offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
2022250661Sdavidcs	if (offset) {
2023250661Sdavidcs		offset = 8 - offset;
2024250661Sdavidcs		m_adj(mp, offset);
2025250661Sdavidcs	}
2026250661Sdavidcs
2027250661Sdavidcs	/*
2028250661Sdavidcs	 * Using memory from the mbuf cluster pool, invoke the bus_dma
2029250661Sdavidcs	 * machinery to arrange the memory mapping.
2030250661Sdavidcs	 */
2031250661Sdavidcs	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map,
2032250661Sdavidcs			mp, segs, &nsegs, BUS_DMA_NOWAIT);
2033250661Sdavidcs	rxb->paddr = segs[0].ds_addr;
2034250661Sdavidcs
2035250661Sdavidcs	if (ret || !rxb->paddr || (nsegs != 1)) {
2036250661Sdavidcs		m_free(mp);
2037250661Sdavidcs		rxb->m_head = NULL;
2038250661Sdavidcs		device_printf(ha->pci_dev,
2039250661Sdavidcs			"%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
2040250661Sdavidcs			__func__, ret, (long long unsigned int)rxb->paddr,
2041250661Sdavidcs			nsegs);
2042250661Sdavidcs                ret = -1;
2043250661Sdavidcs		goto exit_ql_get_mbuf;
2044250661Sdavidcs	}
2045250661Sdavidcs	rxb->m_head = mp;
2046250661Sdavidcs	bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
2047250661Sdavidcs
2048250661Sdavidcsexit_ql_get_mbuf:
2049250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
2050250661Sdavidcs	return (ret);
2051250661Sdavidcs}
2052250661Sdavidcs
2053250661Sdavidcs
2054250661Sdavidcsstatic void
2055250661Sdavidcsqla_get_peer(qla_host_t *ha)
2056250661Sdavidcs{
2057250661Sdavidcs	device_t *peers;
2058250661Sdavidcs	int count, i, slot;
2059250661Sdavidcs	int my_slot = pci_get_slot(ha->pci_dev);
2060250661Sdavidcs
2061250661Sdavidcs	if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count))
2062250661Sdavidcs		return;
2063250661Sdavidcs
2064250661Sdavidcs	for (i = 0; i < count; i++) {
2065250661Sdavidcs		slot = pci_get_slot(peers[i]);
2066250661Sdavidcs
2067250661Sdavidcs		if ((slot >= 0) && (slot == my_slot) &&
2068250661Sdavidcs			(pci_get_device(peers[i]) ==
2069250661Sdavidcs				pci_get_device(ha->pci_dev))) {
2070250661Sdavidcs			if (ha->pci_dev != peers[i])
2071250661Sdavidcs				ha->peer_dev = peers[i];
2072250661Sdavidcs		}
2073250661Sdavidcs	}
2074250661Sdavidcs}
2075250661Sdavidcs
2076250661Sdavidcsstatic void
2077250661Sdavidcsqla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer)
2078250661Sdavidcs{
2079250661Sdavidcs	qla_host_t *ha_peer;
2080250661Sdavidcs
2081250661Sdavidcs	if (ha->peer_dev) {
2082250661Sdavidcs        	if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) {
2083250661Sdavidcs
2084250661Sdavidcs			ha_peer->msg_from_peer = msg_to_peer;
2085250661Sdavidcs		}
2086250661Sdavidcs	}
2087250661Sdavidcs}
2088250661Sdavidcs
2089330555Sdavidcsvoid
2090330555Sdavidcsqla_set_error_recovery(qla_host_t *ha)
2091330555Sdavidcs{
2092330555Sdavidcs	struct ifnet *ifp = ha->ifp;
2093330555Sdavidcs
2094330555Sdavidcs	if (!cold && ha->enable_error_recovery) {
2095330555Sdavidcs		if (ifp)
2096330555Sdavidcs			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2097330555Sdavidcs		ha->qla_initiate_recovery = 1;
2098330555Sdavidcs	} else
2099330555Sdavidcs		ha->offline = 1;
2100330555Sdavidcs	return;
2101330555Sdavidcs}
2102330555Sdavidcs
2103250661Sdavidcsstatic void
2104250661Sdavidcsqla_error_recovery(void *context, int pending)
2105250661Sdavidcs{
2106250661Sdavidcs	qla_host_t *ha = context;
2107330555Sdavidcs	uint32_t msecs_100 = 400;
2108250661Sdavidcs	struct ifnet *ifp = ha->ifp;
2109313070Sdavidcs	int i = 0;
2110250661Sdavidcs
2111330555Sdavidcs	device_printf(ha->pci_dev, "%s: enter\n", __func__);
2112322972Sdavidcs	ha->hw.imd_compl = 1;
2113250661Sdavidcs
2114330555Sdavidcs	taskqueue_drain_all(ha->stats_tq);
2115330555Sdavidcs	taskqueue_drain_all(ha->async_event_tq);
2116330555Sdavidcs
2117322972Sdavidcs	if (QLA_LOCK(ha, __func__, -1, 0) != 0)
2118322972Sdavidcs		return;
2119305487Sdavidcs
2120330555Sdavidcs	device_printf(ha->pci_dev, "%s: ts_usecs = %ld start\n",
2121330555Sdavidcs		__func__, qla_get_usec_timestamp());
2122321495Sdavidcs
2123322972Sdavidcs	if (ha->qla_interface_up) {
2124322972Sdavidcs
2125313070Sdavidcs		qla_mdelay(__func__, 300);
2126284741Sdavidcs
2127330555Sdavidcs	        //ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2128313070Sdavidcs
2129313070Sdavidcs		for (i = 0; i < ha->hw.num_sds_rings; i++) {
2130313070Sdavidcs	        	qla_tx_fp_t *fp;
2131313070Sdavidcs
2132313070Sdavidcs			fp = &ha->tx_fp[i];
2133313070Sdavidcs
2134313070Sdavidcs			if (fp == NULL)
2135313070Sdavidcs				continue;
2136313070Sdavidcs
2137313070Sdavidcs			if (fp->tx_br != NULL) {
2138313070Sdavidcs				mtx_lock(&fp->tx_mtx);
2139313070Sdavidcs				mtx_unlock(&fp->tx_mtx);
2140313070Sdavidcs			}
2141313070Sdavidcs		}
2142305487Sdavidcs	}
2143250661Sdavidcs
2144318306Sdavidcs	qla_drain_fp_taskqueues(ha);
2145318306Sdavidcs
2146250661Sdavidcs	if ((ha->pci_func & 0x1) == 0) {
2147250661Sdavidcs
2148252580Sdavidcs		if (!ha->msg_from_peer) {
2149250661Sdavidcs			qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
2150250661Sdavidcs
2151252580Sdavidcs			while ((ha->msg_from_peer != QL_PEER_MSG_ACK) &&
2152252580Sdavidcs				msecs_100--)
2153252580Sdavidcs				qla_mdelay(__func__, 100);
2154252580Sdavidcs		}
2155250661Sdavidcs
2156250661Sdavidcs		ha->msg_from_peer = 0;
2157250661Sdavidcs
2158324027Sdavidcs		if (ha->enable_minidump)
2159324027Sdavidcs			ql_minidump(ha);
2160255003Sdavidcs
2161330555Sdavidcs		if (ha->enable_driverstate_dump)
2162330555Sdavidcs			ql_capture_drvr_state(ha);
2163305487Sdavidcs
2164330555Sdavidcs		if (ql_init_hw(ha)) {
2165330555Sdavidcs			device_printf(ha->pci_dev,
2166330555Sdavidcs				"%s: ts_usecs = %ld exit: ql_init_hw failed\n",
2167330555Sdavidcs				__func__, qla_get_usec_timestamp());
2168330555Sdavidcs			ha->offline = 1;
2169330555Sdavidcs			goto qla_error_recovery_exit;
2170330555Sdavidcs		}
2171330555Sdavidcs
2172322972Sdavidcs		if (ha->qla_interface_up) {
2173317990Sdavidcs			qla_free_xmt_bufs(ha);
2174317990Sdavidcs			qla_free_rcv_bufs(ha);
2175305487Sdavidcs		}
2176250661Sdavidcs
2177330555Sdavidcs		if (!QL_ERR_INJECT(ha, INJCT_PEER_PORT_FAILURE_ERR_RECOVERY))
2178330555Sdavidcs			qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
2179250661Sdavidcs
2180250661Sdavidcs	} else {
2181250661Sdavidcs		if (ha->msg_from_peer == QL_PEER_MSG_RESET) {
2182250661Sdavidcs
2183250661Sdavidcs			ha->msg_from_peer = 0;
2184250661Sdavidcs
2185330555Sdavidcs			if (!QL_ERR_INJECT(ha, INJCT_PEER_PORT_FAILURE_ERR_RECOVERY))
2186330555Sdavidcs				qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
2187250661Sdavidcs		} else {
2188250661Sdavidcs			qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
2189250661Sdavidcs		}
2190250661Sdavidcs
2191250661Sdavidcs		while ((ha->msg_from_peer != QL_PEER_MSG_ACK)  && msecs_100--)
2192250661Sdavidcs			qla_mdelay(__func__, 100);
2193250661Sdavidcs		ha->msg_from_peer = 0;
2194250661Sdavidcs
2195330555Sdavidcs		if (ha->enable_driverstate_dump)
2196330555Sdavidcs			ql_capture_drvr_state(ha);
2197305487Sdavidcs
2198330555Sdavidcs		if (msecs_100 == 0) {
2199330555Sdavidcs			device_printf(ha->pci_dev,
2200330555Sdavidcs				"%s: ts_usecs = %ld exit: QL_PEER_MSG_ACK not received\n",
2201330555Sdavidcs				__func__, qla_get_usec_timestamp());
2202330555Sdavidcs			ha->offline = 1;
2203330555Sdavidcs			goto qla_error_recovery_exit;
2204330555Sdavidcs		}
2205317990Sdavidcs
2206330555Sdavidcs		if (ql_init_hw(ha)) {
2207330555Sdavidcs			device_printf(ha->pci_dev,
2208330555Sdavidcs				"%s: ts_usecs = %ld exit: ql_init_hw failed\n",
2209330555Sdavidcs				__func__, qla_get_usec_timestamp());
2210330555Sdavidcs			ha->offline = 1;
2211330555Sdavidcs			goto qla_error_recovery_exit;
2212330555Sdavidcs		}
2213330555Sdavidcs
2214322972Sdavidcs		if (ha->qla_interface_up) {
2215317990Sdavidcs			qla_free_xmt_bufs(ha);
2216317990Sdavidcs			qla_free_rcv_bufs(ha);
2217317990Sdavidcs		}
2218250661Sdavidcs	}
2219305487Sdavidcs
2220330555Sdavidcs	qla_mdelay(__func__, ha->ms_delay_after_init);
2221330555Sdavidcs
2222330555Sdavidcs	*((uint32_t *)&ha->hw.flags) = 0;
2223330555Sdavidcs	ha->qla_initiate_recovery = 0;
2224330555Sdavidcs
2225322972Sdavidcs	if (ha->qla_interface_up) {
2226250661Sdavidcs
2227317990Sdavidcs		if (qla_alloc_xmt_bufs(ha) != 0) {
2228330555Sdavidcs			ha->offline = 1;
2229322972Sdavidcs			goto qla_error_recovery_exit;
2230317990Sdavidcs		}
2231330555Sdavidcs
2232317990Sdavidcs		qla_confirm_9kb_enable(ha);
2233250661Sdavidcs
2234317990Sdavidcs		if (qla_alloc_rcv_bufs(ha) != 0) {
2235330555Sdavidcs			ha->offline = 1;
2236322972Sdavidcs			goto qla_error_recovery_exit;
2237317990Sdavidcs		}
2238317990Sdavidcs
2239322972Sdavidcs		ha->stop_rcv = 0;
2240317990Sdavidcs
2241317990Sdavidcs		if (ql_init_hw_if(ha) == 0) {
2242317990Sdavidcs			ifp = ha->ifp;
2243317990Sdavidcs			ifp->if_drv_flags |= IFF_DRV_RUNNING;
2244322972Sdavidcs			ha->qla_watchdog_pause = 0;
2245330555Sdavidcs			ql_update_link_state(ha);
2246330555Sdavidcs		} else {
2247330555Sdavidcs			ha->offline = 1;
2248330555Sdavidcs
2249330555Sdavidcs			if (ha->hw.sp_log_stop_events &
2250330555Sdavidcs				Q8_SP_LOG_STOP_IF_START_FAILURE)
2251330555Sdavidcs				ha->hw.sp_log_stop = -1;
2252317990Sdavidcs		}
2253330555Sdavidcs	} else {
2254322972Sdavidcs		ha->qla_watchdog_pause = 0;
2255330555Sdavidcs	}
2256250661Sdavidcs
2257322972Sdavidcsqla_error_recovery_exit:
2258322972Sdavidcs
2259330555Sdavidcs	if (ha->offline ) {
2260330555Sdavidcs		device_printf(ha->pci_dev, "%s: ts_usecs = %ld port offline\n",
2261330555Sdavidcs			__func__, qla_get_usec_timestamp());
2262330555Sdavidcs		if (ha->hw.sp_log_stop_events &
2263330555Sdavidcs			Q8_SP_LOG_STOP_ERR_RECOVERY_FAILURE)
2264330555Sdavidcs			ha->hw.sp_log_stop = -1;
2265330555Sdavidcs	}
2266322972Sdavidcs
2267330555Sdavidcs
2268322972Sdavidcs        QLA_UNLOCK(ha, __func__);
2269322972Sdavidcs
2270330555Sdavidcs	if (!ha->offline)
2271330555Sdavidcs		callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
2272330555Sdavidcs			qla_watchdog, ha);
2273330555Sdavidcs
2274330555Sdavidcs	device_printf(ha->pci_dev,
2275330555Sdavidcs		"%s: ts_usecs = %ld exit\n",
2276330555Sdavidcs		__func__, qla_get_usec_timestamp());
2277322972Sdavidcs	return;
2278250661Sdavidcs}
2279250661Sdavidcs
2280284741Sdavidcsstatic void
2281284741Sdavidcsqla_async_event(void *context, int pending)
2282284741Sdavidcs{
2283284741Sdavidcs        qla_host_t *ha = context;
2284284741Sdavidcs
2285322972Sdavidcs	if (QLA_LOCK(ha, __func__, -1, 0) != 0)
2286322972Sdavidcs		return;
2287322972Sdavidcs
2288322972Sdavidcs	if (ha->async_event) {
2289322972Sdavidcs		ha->async_event = 0;
2290322972Sdavidcs        	qla_hw_async_event(ha);
2291322972Sdavidcs	}
2292322972Sdavidcs
2293322972Sdavidcs	QLA_UNLOCK(ha, __func__);
2294322972Sdavidcs
2295322972Sdavidcs	return;
2296284741Sdavidcs}
2297284741Sdavidcs
2298322972Sdavidcsstatic void
2299322972Sdavidcsqla_stats(void *context, int pending)
2300322972Sdavidcs{
2301322972Sdavidcs        qla_host_t *ha;
2302322972Sdavidcs
2303322972Sdavidcs        ha = context;
2304322972Sdavidcs
2305322972Sdavidcs	ql_get_stats(ha);
2306330555Sdavidcs
2307322972Sdavidcs	return;
2308322972Sdavidcs}
2309322972Sdavidcs
2310