1250661Sdavidcs/*
2284982Sdavidcs * Copyright (c) 2013-2016 Qlogic Corporation
3250661Sdavidcs * All rights reserved.
4250661Sdavidcs *
5250661Sdavidcs *  Redistribution and use in source and binary forms, with or without
6250661Sdavidcs *  modification, are permitted provided that the following conditions
7250661Sdavidcs *  are met:
8250661Sdavidcs *
9250661Sdavidcs *  1. Redistributions of source code must retain the above copyright
10250661Sdavidcs *     notice, this list of conditions and the following disclaimer.
11250661Sdavidcs *  2. Redistributions in binary form must reproduce the above copyright
12250661Sdavidcs *     notice, this list of conditions and the following disclaimer in the
13250661Sdavidcs *     documentation and/or other materials provided with the distribution.
14250661Sdavidcs *
15250661Sdavidcs *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16250661Sdavidcs *  and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17250661Sdavidcs *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18250661Sdavidcs *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19250661Sdavidcs *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20250661Sdavidcs *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21250661Sdavidcs *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22250661Sdavidcs *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23250661Sdavidcs *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24250661Sdavidcs *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25250661Sdavidcs *  POSSIBILITY OF SUCH DAMAGE.
26250661Sdavidcs */
27250661Sdavidcs
28250661Sdavidcs/*
29250661Sdavidcs * File: ql_os.c
30250661Sdavidcs * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31250661Sdavidcs */
32250661Sdavidcs
33250661Sdavidcs#include <sys/cdefs.h>
34250661Sdavidcs__FBSDID("$FreeBSD: releng/10.3/sys/dev/qlxgbe/ql_os.c 292615 2015-12-22 19:34:21Z davidcs $");
35250661Sdavidcs
36250661Sdavidcs
37250661Sdavidcs#include "ql_os.h"
38250661Sdavidcs#include "ql_hw.h"
39250661Sdavidcs#include "ql_def.h"
40250661Sdavidcs#include "ql_inline.h"
41250661Sdavidcs#include "ql_ver.h"
42250661Sdavidcs#include "ql_glbl.h"
43250661Sdavidcs#include "ql_dbg.h"
44250661Sdavidcs#include <sys/smp.h>
45250661Sdavidcs
46250661Sdavidcs/*
47250661Sdavidcs * Some PCI Configuration Space Related Defines
48250661Sdavidcs */
49250661Sdavidcs
50250661Sdavidcs#ifndef PCI_VENDOR_QLOGIC
51250661Sdavidcs#define PCI_VENDOR_QLOGIC	0x1077
52250661Sdavidcs#endif
53250661Sdavidcs
54250661Sdavidcs#ifndef PCI_PRODUCT_QLOGIC_ISP8030
55250661Sdavidcs#define PCI_PRODUCT_QLOGIC_ISP8030	0x8030
56250661Sdavidcs#endif
57250661Sdavidcs
58250661Sdavidcs#define PCI_QLOGIC_ISP8030 \
59250661Sdavidcs	((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC)
60250661Sdavidcs
61250661Sdavidcs/*
62250661Sdavidcs * static functions
63250661Sdavidcs */
64250661Sdavidcsstatic int qla_alloc_parent_dma_tag(qla_host_t *ha);
65250661Sdavidcsstatic void qla_free_parent_dma_tag(qla_host_t *ha);
66250661Sdavidcsstatic int qla_alloc_xmt_bufs(qla_host_t *ha);
67250661Sdavidcsstatic void qla_free_xmt_bufs(qla_host_t *ha);
68250661Sdavidcsstatic int qla_alloc_rcv_bufs(qla_host_t *ha);
69250661Sdavidcsstatic void qla_free_rcv_bufs(qla_host_t *ha);
70250661Sdavidcsstatic void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb);
71250661Sdavidcs
72250661Sdavidcsstatic void qla_init_ifnet(device_t dev, qla_host_t *ha);
73250661Sdavidcsstatic int qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS);
74250661Sdavidcsstatic int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS);
75250661Sdavidcsstatic void qla_release(qla_host_t *ha);
76250661Sdavidcsstatic void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
77250661Sdavidcs		int error);
78250661Sdavidcsstatic void qla_stop(qla_host_t *ha);
79250661Sdavidcsstatic int qla_send(qla_host_t *ha, struct mbuf **m_headp);
80250661Sdavidcsstatic void qla_tx_done(void *context, int pending);
81250661Sdavidcsstatic void qla_get_peer(qla_host_t *ha);
82250661Sdavidcsstatic void qla_error_recovery(void *context, int pending);
83284982Sdavidcsstatic void qla_async_event(void *context, int pending);
84250661Sdavidcs
85250661Sdavidcs/*
86250661Sdavidcs * Hooks to the Operating Systems
87250661Sdavidcs */
88250661Sdavidcsstatic int qla_pci_probe (device_t);
89250661Sdavidcsstatic int qla_pci_attach (device_t);
90250661Sdavidcsstatic int qla_pci_detach (device_t);
91250661Sdavidcs
92250661Sdavidcsstatic void qla_init(void *arg);
93250661Sdavidcsstatic int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
94250661Sdavidcsstatic int qla_media_change(struct ifnet *ifp);
95250661Sdavidcsstatic void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
96250661Sdavidcsstatic void qla_start(struct ifnet *ifp);
97250661Sdavidcs
98250661Sdavidcsstatic device_method_t qla_pci_methods[] = {
99250661Sdavidcs	/* Device interface */
100250661Sdavidcs	DEVMETHOD(device_probe, qla_pci_probe),
101250661Sdavidcs	DEVMETHOD(device_attach, qla_pci_attach),
102250661Sdavidcs	DEVMETHOD(device_detach, qla_pci_detach),
103250661Sdavidcs	{ 0, 0 }
104250661Sdavidcs};
105250661Sdavidcs
106250661Sdavidcsstatic driver_t qla_pci_driver = {
107250661Sdavidcs	"ql", qla_pci_methods, sizeof (qla_host_t),
108250661Sdavidcs};
109250661Sdavidcs
110250661Sdavidcsstatic devclass_t qla83xx_devclass;
111250661Sdavidcs
112250661SdavidcsDRIVER_MODULE(qla83xx, pci, qla_pci_driver, qla83xx_devclass, 0, 0);
113250661Sdavidcs
114250661SdavidcsMODULE_DEPEND(qla83xx, pci, 1, 1, 1);
115250661SdavidcsMODULE_DEPEND(qla83xx, ether, 1, 1, 1);
116250661Sdavidcs
117250661SdavidcsMALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver");
118250661Sdavidcs
119250661Sdavidcs#define QL_STD_REPLENISH_THRES		0
120250661Sdavidcs#define QL_JUMBO_REPLENISH_THRES	32
121250661Sdavidcs
122250661Sdavidcs
123250661Sdavidcsstatic char dev_str[64];
124284982Sdavidcsstatic char ver_str[64];
125250661Sdavidcs
126250661Sdavidcs/*
127250661Sdavidcs * Name:	qla_pci_probe
128250661Sdavidcs * Function:	Validate the PCI device to be a QLA80XX device
129250661Sdavidcs */
130250661Sdavidcsstatic int
131250661Sdavidcsqla_pci_probe(device_t dev)
132250661Sdavidcs{
133250661Sdavidcs        switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
134250661Sdavidcs        case PCI_QLOGIC_ISP8030:
135250661Sdavidcs		snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
136250661Sdavidcs			"Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function",
137250661Sdavidcs			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
138250661Sdavidcs			QLA_VERSION_BUILD);
139284982Sdavidcs		snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d",
140284982Sdavidcs			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
141284982Sdavidcs			QLA_VERSION_BUILD);
142250661Sdavidcs                device_set_desc(dev, dev_str);
143250661Sdavidcs                break;
144250661Sdavidcs        default:
145250661Sdavidcs                return (ENXIO);
146250661Sdavidcs        }
147250661Sdavidcs
148250661Sdavidcs        if (bootverbose)
149250661Sdavidcs                printf("%s: %s\n ", __func__, dev_str);
150250661Sdavidcs
151250661Sdavidcs        return (BUS_PROBE_DEFAULT);
152250661Sdavidcs}
153250661Sdavidcs
154250661Sdavidcsstatic void
155250661Sdavidcsqla_add_sysctls(qla_host_t *ha)
156250661Sdavidcs{
157250661Sdavidcs        device_t dev = ha->pci_dev;
158250661Sdavidcs
159284982Sdavidcs	SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
160284982Sdavidcs		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
161284982Sdavidcs		OID_AUTO, "version", CTLFLAG_RD,
162284982Sdavidcs		ver_str, 0, "Driver Version");
163284982Sdavidcs
164250661Sdavidcs        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
165250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
166250661Sdavidcs                OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
167250661Sdavidcs                (void *)ha, 0,
168250661Sdavidcs                qla_sysctl_get_stats, "I", "Statistics");
169250661Sdavidcs
170250661Sdavidcs        SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
171250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
172250661Sdavidcs                OID_AUTO, "fw_version", CTLFLAG_RD,
173273736Shselasky                ha->fw_ver_str, 0, "firmware version");
174250661Sdavidcs
175250661Sdavidcs        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
176250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
177250661Sdavidcs                OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW,
178250661Sdavidcs                (void *)ha, 0,
179250661Sdavidcs                qla_sysctl_get_link_status, "I", "Link Status");
180250661Sdavidcs
181250661Sdavidcs	ha->dbg_level = 0;
182250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
183250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
184250661Sdavidcs                OID_AUTO, "debug", CTLFLAG_RW,
185250661Sdavidcs                &ha->dbg_level, ha->dbg_level, "Debug Level");
186250661Sdavidcs
187250661Sdavidcs	ha->std_replenish = QL_STD_REPLENISH_THRES;
188250661Sdavidcs        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
189250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
190250661Sdavidcs                OID_AUTO, "std_replenish", CTLFLAG_RW,
191250661Sdavidcs                &ha->std_replenish, ha->std_replenish,
192250661Sdavidcs                "Threshold for Replenishing Standard Frames");
193250661Sdavidcs
194250661Sdavidcs        SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
195250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
196250661Sdavidcs                OID_AUTO, "ipv4_lro",
197250661Sdavidcs                CTLFLAG_RD, &ha->ipv4_lro,
198250661Sdavidcs                "number of ipv4 lro completions");
199250661Sdavidcs
200250661Sdavidcs        SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
201250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
202250661Sdavidcs                OID_AUTO, "ipv6_lro",
203250661Sdavidcs                CTLFLAG_RD, &ha->ipv6_lro,
204250661Sdavidcs                "number of ipv6 lro completions");
205250661Sdavidcs
206250661Sdavidcs	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
207250661Sdavidcs		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
208250661Sdavidcs		OID_AUTO, "tx_tso_frames",
209250661Sdavidcs		CTLFLAG_RD, &ha->tx_tso_frames,
210250661Sdavidcs		"number of Tx TSO Frames");
211250661Sdavidcs
212250661Sdavidcs	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
213250661Sdavidcs                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
214250661Sdavidcs		OID_AUTO, "hw_vlan_tx_frames",
215250661Sdavidcs		CTLFLAG_RD, &ha->hw_vlan_tx_frames,
216250661Sdavidcs		"number of Tx VLAN Frames");
217250661Sdavidcs
218250661Sdavidcs        return;
219250661Sdavidcs}
220250661Sdavidcs
221250661Sdavidcsstatic void
222250661Sdavidcsqla_watchdog(void *arg)
223250661Sdavidcs{
224250661Sdavidcs	qla_host_t *ha = arg;
225250661Sdavidcs	qla_hw_t *hw;
226250661Sdavidcs	struct ifnet *ifp;
227250661Sdavidcs	uint32_t i;
228250661Sdavidcs	qla_hw_tx_cntxt_t *hw_tx_cntxt;
229250661Sdavidcs
230250661Sdavidcs	hw = &ha->hw;
231250661Sdavidcs	ifp = ha->ifp;
232250661Sdavidcs
233250661Sdavidcs        if (ha->flags.qla_watchdog_exit) {
234250661Sdavidcs		ha->qla_watchdog_exited = 1;
235250661Sdavidcs		return;
236250661Sdavidcs	}
237250661Sdavidcs	ha->qla_watchdog_exited = 0;
238250661Sdavidcs
239250661Sdavidcs	if (!ha->flags.qla_watchdog_pause) {
240250661Sdavidcs		if (ql_hw_check_health(ha) || ha->qla_initiate_recovery ||
241250661Sdavidcs			(ha->msg_from_peer == QL_PEER_MSG_RESET)) {
242250661Sdavidcs			ha->qla_watchdog_paused = 1;
243250661Sdavidcs			ha->flags.qla_watchdog_pause = 1;
244250661Sdavidcs			ha->qla_initiate_recovery = 0;
245250661Sdavidcs			ha->err_inject = 0;
246250661Sdavidcs			taskqueue_enqueue(ha->err_tq, &ha->err_task);
247250661Sdavidcs		} else {
248284982Sdavidcs
249284982Sdavidcs                        if (ha->async_event) {
250284982Sdavidcs                                ha->async_event = 0;
251284982Sdavidcs                                taskqueue_enqueue(ha->async_event_tq,
252284982Sdavidcs                                        &ha->async_event_task);
253284982Sdavidcs                        }
254284982Sdavidcs
255250661Sdavidcs			for (i = 0; i < ha->hw.num_tx_rings; i++) {
256250661Sdavidcs				hw_tx_cntxt = &hw->tx_cntxt[i];
257250661Sdavidcs				if (qla_le32_to_host(*(hw_tx_cntxt->tx_cons)) !=
258250661Sdavidcs					hw_tx_cntxt->txr_comp) {
259250661Sdavidcs					taskqueue_enqueue(ha->tx_tq,
260250661Sdavidcs						&ha->tx_task);
261250661Sdavidcs					break;
262250661Sdavidcs				}
263250661Sdavidcs			}
264250661Sdavidcs
265250661Sdavidcs			if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) {
266250661Sdavidcs				taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
267250661Sdavidcs			}
268250661Sdavidcs			ha->qla_watchdog_paused = 0;
269250661Sdavidcs		}
270250661Sdavidcs
271250661Sdavidcs	} else {
272250661Sdavidcs		ha->qla_watchdog_paused = 1;
273250661Sdavidcs	}
274250661Sdavidcs
275250661Sdavidcs	ha->watchdog_ticks = ha->watchdog_ticks++ % 1000;
276250661Sdavidcs	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
277250661Sdavidcs		qla_watchdog, ha);
278250661Sdavidcs}
279250661Sdavidcs
280250661Sdavidcs/*
281250661Sdavidcs * Name:	qla_pci_attach
282250661Sdavidcs * Function:	attaches the device to the operating system
283250661Sdavidcs */
284250661Sdavidcsstatic int
285250661Sdavidcsqla_pci_attach(device_t dev)
286250661Sdavidcs{
287250661Sdavidcs	qla_host_t *ha = NULL;
288250661Sdavidcs	uint32_t rsrc_len;
289250661Sdavidcs	int i;
290284982Sdavidcs	uint32_t num_rcvq = 0;
291250661Sdavidcs
292250661Sdavidcs        if ((ha = device_get_softc(dev)) == NULL) {
293250661Sdavidcs                device_printf(dev, "cannot get softc\n");
294250661Sdavidcs                return (ENOMEM);
295250661Sdavidcs        }
296250661Sdavidcs
297250661Sdavidcs        memset(ha, 0, sizeof (qla_host_t));
298250661Sdavidcs
299250661Sdavidcs        if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) {
300250661Sdavidcs                device_printf(dev, "device is not ISP8030\n");
301250661Sdavidcs                return (ENXIO);
302250661Sdavidcs	}
303250661Sdavidcs
304250661Sdavidcs        ha->pci_func = pci_get_function(dev);
305250661Sdavidcs
306250661Sdavidcs        ha->pci_dev = dev;
307250661Sdavidcs
308250661Sdavidcs	pci_enable_busmaster(dev);
309250661Sdavidcs
310250661Sdavidcs	ha->reg_rid = PCIR_BAR(0);
311250661Sdavidcs	ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
312250661Sdavidcs				RF_ACTIVE);
313250661Sdavidcs
314250661Sdavidcs        if (ha->pci_reg == NULL) {
315250661Sdavidcs                device_printf(dev, "unable to map any ports\n");
316250661Sdavidcs                goto qla_pci_attach_err;
317250661Sdavidcs        }
318250661Sdavidcs
319250661Sdavidcs	rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
320250661Sdavidcs					ha->reg_rid);
321250661Sdavidcs
322250661Sdavidcs	mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
323250661Sdavidcs
324250661Sdavidcs	mtx_init(&ha->tx_lock, "qla83xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
325250661Sdavidcs
326250661Sdavidcs	qla_add_sysctls(ha);
327250661Sdavidcs	ql_hw_add_sysctls(ha);
328250661Sdavidcs
329250661Sdavidcs	ha->flags.lock_init = 1;
330250661Sdavidcs
331250661Sdavidcs	ha->reg_rid1 = PCIR_BAR(2);
332250661Sdavidcs	ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
333250661Sdavidcs			&ha->reg_rid1, RF_ACTIVE);
334250661Sdavidcs
335250661Sdavidcs	ha->msix_count = pci_msix_count(dev);
336250661Sdavidcs
337250661Sdavidcs	if (ha->msix_count < (ha->hw.num_sds_rings + 1)) {
338250661Sdavidcs		device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
339250661Sdavidcs			ha->msix_count);
340250661Sdavidcs		goto qla_pci_attach_err;
341250661Sdavidcs	}
342250661Sdavidcs
343250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
344250661Sdavidcs		" msix_count 0x%x pci_reg %p\n", __func__, ha,
345250661Sdavidcs		ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg));
346250661Sdavidcs
347284982Sdavidcs        /* initialize hardware */
348284982Sdavidcs        if (ql_init_hw(ha)) {
349284982Sdavidcs                device_printf(dev, "%s: ql_init_hw failed\n", __func__);
350284982Sdavidcs                goto qla_pci_attach_err;
351284982Sdavidcs        }
352284982Sdavidcs
353284982Sdavidcs        device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__,
354284982Sdavidcs                ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
355284982Sdavidcs                ha->fw_ver_build);
356284982Sdavidcs        snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d",
357284982Sdavidcs                        ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
358284982Sdavidcs                        ha->fw_ver_build);
359284982Sdavidcs
360284982Sdavidcs        if (qla_get_nic_partition(ha, NULL, &num_rcvq)) {
361284982Sdavidcs                device_printf(dev, "%s: qla_get_nic_partition failed\n",
362284982Sdavidcs                        __func__);
363284982Sdavidcs                goto qla_pci_attach_err;
364284982Sdavidcs        }
365284982Sdavidcs        device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
366284982Sdavidcs                " msix_count 0x%x pci_reg %p num_rcvq = %d\n", __func__, ha,
367284982Sdavidcs                ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg, num_rcvq);
368284982Sdavidcs
369284982Sdavidcs
370284982Sdavidcs#ifdef QL_ENABLE_ISCSI_TLV
371284982Sdavidcs        if ((ha->msix_count  < 64) || (num_rcvq != 32)) {
372284982Sdavidcs                ha->hw.num_sds_rings = 15;
373284982Sdavidcs                ha->hw.num_tx_rings = 32;
374284982Sdavidcs        }
375284982Sdavidcs#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
376284982Sdavidcs	ha->hw.num_rds_rings = ha->hw.num_sds_rings;
377284982Sdavidcs
378250661Sdavidcs	ha->msix_count = ha->hw.num_sds_rings + 1;
379250661Sdavidcs
380250661Sdavidcs	if (pci_alloc_msix(dev, &ha->msix_count)) {
381250661Sdavidcs		device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
382250661Sdavidcs			ha->msix_count);
383250661Sdavidcs		ha->msix_count = 0;
384250661Sdavidcs		goto qla_pci_attach_err;
385250661Sdavidcs	}
386250661Sdavidcs
387250661Sdavidcs	ha->mbx_irq_rid = 1;
388250661Sdavidcs	ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
389250661Sdavidcs				&ha->mbx_irq_rid,
390250661Sdavidcs				(RF_ACTIVE | RF_SHAREABLE));
391250661Sdavidcs	if (ha->mbx_irq == NULL) {
392250661Sdavidcs		device_printf(dev, "could not allocate mbx interrupt\n");
393250661Sdavidcs		goto qla_pci_attach_err;
394250661Sdavidcs	}
395250661Sdavidcs	if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE),
396250661Sdavidcs		NULL, ql_mbx_isr, ha, &ha->mbx_handle)) {
397250661Sdavidcs		device_printf(dev, "could not setup mbx interrupt\n");
398250661Sdavidcs		goto qla_pci_attach_err;
399250661Sdavidcs	}
400250661Sdavidcs
401250661Sdavidcs
402250661Sdavidcs	for (i = 0; i < ha->hw.num_sds_rings; i++) {
403250661Sdavidcs		ha->irq_vec[i].sds_idx = i;
404250661Sdavidcs                ha->irq_vec[i].ha = ha;
405250661Sdavidcs                ha->irq_vec[i].irq_rid = 2 + i;
406250661Sdavidcs
407250661Sdavidcs		ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
408250661Sdavidcs				&ha->irq_vec[i].irq_rid,
409250661Sdavidcs				(RF_ACTIVE | RF_SHAREABLE));
410250661Sdavidcs
411250661Sdavidcs		if (ha->irq_vec[i].irq == NULL) {
412250661Sdavidcs			device_printf(dev, "could not allocate interrupt\n");
413250661Sdavidcs			goto qla_pci_attach_err;
414250661Sdavidcs		}
415250661Sdavidcs		if (bus_setup_intr(dev, ha->irq_vec[i].irq,
416250661Sdavidcs			(INTR_TYPE_NET | INTR_MPSAFE),
417250661Sdavidcs			NULL, ql_isr, &ha->irq_vec[i],
418250661Sdavidcs			&ha->irq_vec[i].handle)) {
419250661Sdavidcs			device_printf(dev, "could not setup interrupt\n");
420250661Sdavidcs			goto qla_pci_attach_err;
421250661Sdavidcs		}
422250661Sdavidcs	}
423250661Sdavidcs
424250661Sdavidcs	printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus,
425250661Sdavidcs		ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count);
426250661Sdavidcs
427250661Sdavidcs	ql_read_mac_addr(ha);
428250661Sdavidcs
429250661Sdavidcs	/* allocate parent dma tag */
430250661Sdavidcs	if (qla_alloc_parent_dma_tag(ha)) {
431250661Sdavidcs		device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n",
432250661Sdavidcs			__func__);
433250661Sdavidcs		goto qla_pci_attach_err;
434250661Sdavidcs	}
435250661Sdavidcs
436250661Sdavidcs	/* alloc all dma buffers */
437250661Sdavidcs	if (ql_alloc_dma(ha)) {
438250661Sdavidcs		device_printf(dev, "%s: ql_alloc_dma failed\n", __func__);
439250661Sdavidcs		goto qla_pci_attach_err;
440250661Sdavidcs	}
441250661Sdavidcs	qla_get_peer(ha);
442250661Sdavidcs
443250661Sdavidcs	/* create the o.s ethernet interface */
444250661Sdavidcs	qla_init_ifnet(dev, ha);
445250661Sdavidcs
446250661Sdavidcs	ha->flags.qla_watchdog_active = 1;
447250661Sdavidcs	ha->flags.qla_watchdog_pause = 1;
448250661Sdavidcs
449250661Sdavidcs
450250661Sdavidcs	TASK_INIT(&ha->tx_task, 0, qla_tx_done, ha);
451250661Sdavidcs	ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT,
452250661Sdavidcs			taskqueue_thread_enqueue, &ha->tx_tq);
453250661Sdavidcs	taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq",
454250661Sdavidcs		device_get_nameunit(ha->pci_dev));
455250661Sdavidcs
456250661Sdavidcs	callout_init(&ha->tx_callout, TRUE);
457250661Sdavidcs	ha->flags.qla_callout_init = 1;
458250661Sdavidcs
459250661Sdavidcs	/* create ioctl device interface */
460250661Sdavidcs	if (ql_make_cdev(ha)) {
461250661Sdavidcs		device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
462250661Sdavidcs		goto qla_pci_attach_err;
463250661Sdavidcs	}
464250661Sdavidcs
465250661Sdavidcs	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
466250661Sdavidcs		qla_watchdog, ha);
467250661Sdavidcs
468250661Sdavidcs	TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha);
469250661Sdavidcs	ha->err_tq = taskqueue_create_fast("qla_errq", M_NOWAIT,
470250661Sdavidcs			taskqueue_thread_enqueue, &ha->err_tq);
471250661Sdavidcs	taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq",
472250661Sdavidcs		device_get_nameunit(ha->pci_dev));
473250661Sdavidcs
474284982Sdavidcs        TASK_INIT(&ha->async_event_task, 0, qla_async_event, ha);
475284982Sdavidcs        ha->async_event_tq = taskqueue_create_fast("qla_asyncq", M_NOWAIT,
476284982Sdavidcs                        taskqueue_thread_enqueue, &ha->async_event_tq);
477284982Sdavidcs        taskqueue_start_threads(&ha->async_event_tq, 1, PI_NET, "%s asyncq",
478284982Sdavidcs                device_get_nameunit(ha->pci_dev));
479284982Sdavidcs
480250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__));
481250661Sdavidcs        return (0);
482250661Sdavidcs
483250661Sdavidcsqla_pci_attach_err:
484250661Sdavidcs
485250661Sdavidcs	qla_release(ha);
486250661Sdavidcs
487250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__));
488250661Sdavidcs        return (ENXIO);
489250661Sdavidcs}
490250661Sdavidcs
491250661Sdavidcs/*
492250661Sdavidcs * Name:	qla_pci_detach
493250661Sdavidcs * Function:	Unhooks the device from the operating system
494250661Sdavidcs */
495250661Sdavidcsstatic int
496250661Sdavidcsqla_pci_detach(device_t dev)
497250661Sdavidcs{
498250661Sdavidcs	qla_host_t *ha = NULL;
499250661Sdavidcs	struct ifnet *ifp;
500250661Sdavidcs
501250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
502250661Sdavidcs
503250661Sdavidcs        if ((ha = device_get_softc(dev)) == NULL) {
504250661Sdavidcs                device_printf(dev, "cannot get softc\n");
505250661Sdavidcs                return (ENOMEM);
506250661Sdavidcs        }
507250661Sdavidcs
508250661Sdavidcs	ifp = ha->ifp;
509250661Sdavidcs
510250661Sdavidcs	(void)QLA_LOCK(ha, __func__, 0);
511250661Sdavidcs	qla_stop(ha);
512250661Sdavidcs	QLA_UNLOCK(ha, __func__);
513250661Sdavidcs
514250661Sdavidcs	qla_release(ha);
515250661Sdavidcs
516250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
517250661Sdavidcs
518250661Sdavidcs        return (0);
519250661Sdavidcs}
520250661Sdavidcs
521250661Sdavidcs/*
522250661Sdavidcs * SYSCTL Related Callbacks
523250661Sdavidcs */
524250661Sdavidcsstatic int
525250661Sdavidcsqla_sysctl_get_stats(SYSCTL_HANDLER_ARGS)
526250661Sdavidcs{
527250661Sdavidcs	int err, ret = 0;
528250661Sdavidcs	qla_host_t *ha;
529250661Sdavidcs
530250661Sdavidcs	err = sysctl_handle_int(oidp, &ret, 0, req);
531250661Sdavidcs
532250661Sdavidcs	if (err || !req->newptr)
533250661Sdavidcs		return (err);
534250661Sdavidcs
535250661Sdavidcs	if (ret == 1) {
536250661Sdavidcs		ha = (qla_host_t *)arg1;
537250661Sdavidcs		ql_get_stats(ha);
538250661Sdavidcs	}
539250661Sdavidcs	return (err);
540250661Sdavidcs}
541250661Sdavidcsstatic int
542250661Sdavidcsqla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS)
543250661Sdavidcs{
544250661Sdavidcs	int err, ret = 0;
545250661Sdavidcs	qla_host_t *ha;
546250661Sdavidcs
547250661Sdavidcs	err = sysctl_handle_int(oidp, &ret, 0, req);
548250661Sdavidcs
549250661Sdavidcs	if (err || !req->newptr)
550250661Sdavidcs		return (err);
551250661Sdavidcs
552250661Sdavidcs	if (ret == 1) {
553250661Sdavidcs		ha = (qla_host_t *)arg1;
554250661Sdavidcs		ql_hw_link_status(ha);
555250661Sdavidcs	}
556250661Sdavidcs	return (err);
557250661Sdavidcs}
558250661Sdavidcs
559250661Sdavidcs/*
560250661Sdavidcs * Name:	qla_release
561250661Sdavidcs * Function:	Releases the resources allocated for the device
562250661Sdavidcs */
563250661Sdavidcsstatic void
564250661Sdavidcsqla_release(qla_host_t *ha)
565250661Sdavidcs{
566250661Sdavidcs	device_t dev;
567250661Sdavidcs	int i;
568250661Sdavidcs
569250661Sdavidcs	dev = ha->pci_dev;
570250661Sdavidcs
571284982Sdavidcs        if (ha->async_event_tq) {
572284982Sdavidcs                taskqueue_drain(ha->async_event_tq, &ha->async_event_task);
573284982Sdavidcs                taskqueue_free(ha->async_event_tq);
574284982Sdavidcs        }
575284982Sdavidcs
576250661Sdavidcs	if (ha->err_tq) {
577250661Sdavidcs		taskqueue_drain(ha->err_tq, &ha->err_task);
578250661Sdavidcs		taskqueue_free(ha->err_tq);
579250661Sdavidcs	}
580250661Sdavidcs
581250661Sdavidcs	if (ha->tx_tq) {
582250661Sdavidcs		taskqueue_drain(ha->tx_tq, &ha->tx_task);
583250661Sdavidcs		taskqueue_free(ha->tx_tq);
584250661Sdavidcs	}
585250661Sdavidcs
586250661Sdavidcs	ql_del_cdev(ha);
587250661Sdavidcs
588250661Sdavidcs	if (ha->flags.qla_watchdog_active) {
589250661Sdavidcs		ha->flags.qla_watchdog_exit = 1;
590250661Sdavidcs
591250661Sdavidcs		while (ha->qla_watchdog_exited == 0)
592250661Sdavidcs			qla_mdelay(__func__, 1);
593250661Sdavidcs	}
594250661Sdavidcs
595250661Sdavidcs	if (ha->flags.qla_callout_init)
596250661Sdavidcs		callout_stop(&ha->tx_callout);
597250661Sdavidcs
598250661Sdavidcs	if (ha->ifp != NULL)
599250661Sdavidcs		ether_ifdetach(ha->ifp);
600250661Sdavidcs
601250661Sdavidcs	ql_free_dma(ha);
602250661Sdavidcs	qla_free_parent_dma_tag(ha);
603250661Sdavidcs
604250661Sdavidcs	if (ha->mbx_handle)
605250661Sdavidcs		(void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle);
606250661Sdavidcs
607250661Sdavidcs	if (ha->mbx_irq)
608250661Sdavidcs		(void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid,
609250661Sdavidcs				ha->mbx_irq);
610250661Sdavidcs
611250661Sdavidcs	for (i = 0; i < ha->hw.num_sds_rings; i++) {
612250661Sdavidcs
613250661Sdavidcs		if (ha->irq_vec[i].handle) {
614250661Sdavidcs			(void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
615250661Sdavidcs					ha->irq_vec[i].handle);
616250661Sdavidcs		}
617250661Sdavidcs
618250661Sdavidcs		if (ha->irq_vec[i].irq) {
619250661Sdavidcs			(void)bus_release_resource(dev, SYS_RES_IRQ,
620250661Sdavidcs				ha->irq_vec[i].irq_rid,
621250661Sdavidcs				ha->irq_vec[i].irq);
622250661Sdavidcs		}
623250661Sdavidcs	}
624250661Sdavidcs
625250661Sdavidcs	if (ha->msix_count)
626250661Sdavidcs		pci_release_msi(dev);
627250661Sdavidcs
628250661Sdavidcs	if (ha->flags.lock_init) {
629250661Sdavidcs		mtx_destroy(&ha->tx_lock);
630250661Sdavidcs		mtx_destroy(&ha->hw_lock);
631250661Sdavidcs	}
632250661Sdavidcs
633250661Sdavidcs        if (ha->pci_reg)
634250661Sdavidcs                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
635250661Sdavidcs				ha->pci_reg);
636250661Sdavidcs
637250661Sdavidcs        if (ha->pci_reg1)
638250661Sdavidcs                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1,
639250661Sdavidcs				ha->pci_reg1);
640250661Sdavidcs}
641250661Sdavidcs
642250661Sdavidcs/*
643250661Sdavidcs * DMA Related Functions
644250661Sdavidcs */
645250661Sdavidcs
646250661Sdavidcsstatic void
647250661Sdavidcsqla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
648250661Sdavidcs{
649250661Sdavidcs        *((bus_addr_t *)arg) = 0;
650250661Sdavidcs
651250661Sdavidcs        if (error) {
652250661Sdavidcs                printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
653250661Sdavidcs                return;
654250661Sdavidcs	}
655250661Sdavidcs
656250661Sdavidcs        *((bus_addr_t *)arg) = segs[0].ds_addr;
657250661Sdavidcs
658250661Sdavidcs	return;
659250661Sdavidcs}
660250661Sdavidcs
661250661Sdavidcsint
662250661Sdavidcsql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
663250661Sdavidcs{
664250661Sdavidcs        int             ret = 0;
665250661Sdavidcs        device_t        dev;
666250661Sdavidcs        bus_addr_t      b_addr;
667250661Sdavidcs
668250661Sdavidcs        dev = ha->pci_dev;
669250661Sdavidcs
670250661Sdavidcs        QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
671250661Sdavidcs
672250661Sdavidcs        ret = bus_dma_tag_create(
673250661Sdavidcs                        ha->parent_tag,/* parent */
674250661Sdavidcs                        dma_buf->alignment,
675250661Sdavidcs                        ((bus_size_t)(1ULL << 32)),/* boundary */
676250661Sdavidcs                        BUS_SPACE_MAXADDR,      /* lowaddr */
677250661Sdavidcs                        BUS_SPACE_MAXADDR,      /* highaddr */
678250661Sdavidcs                        NULL, NULL,             /* filter, filterarg */
679250661Sdavidcs                        dma_buf->size,          /* maxsize */
680250661Sdavidcs                        1,                      /* nsegments */
681250661Sdavidcs                        dma_buf->size,          /* maxsegsize */
682250661Sdavidcs                        0,                      /* flags */
683250661Sdavidcs                        NULL, NULL,             /* lockfunc, lockarg */
684250661Sdavidcs                        &dma_buf->dma_tag);
685250661Sdavidcs
686250661Sdavidcs        if (ret) {
687250661Sdavidcs                device_printf(dev, "%s: could not create dma tag\n", __func__);
688250661Sdavidcs                goto ql_alloc_dmabuf_exit;
689250661Sdavidcs        }
690250661Sdavidcs        ret = bus_dmamem_alloc(dma_buf->dma_tag,
691250661Sdavidcs                        (void **)&dma_buf->dma_b,
692250661Sdavidcs                        (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
693250661Sdavidcs                        &dma_buf->dma_map);
694250661Sdavidcs        if (ret) {
695250661Sdavidcs                bus_dma_tag_destroy(dma_buf->dma_tag);
696250661Sdavidcs                device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
697250661Sdavidcs                goto ql_alloc_dmabuf_exit;
698250661Sdavidcs        }
699250661Sdavidcs
700250661Sdavidcs        ret = bus_dmamap_load(dma_buf->dma_tag,
701250661Sdavidcs                        dma_buf->dma_map,
702250661Sdavidcs                        dma_buf->dma_b,
703250661Sdavidcs                        dma_buf->size,
704250661Sdavidcs                        qla_dmamap_callback,
705250661Sdavidcs                        &b_addr, BUS_DMA_NOWAIT);
706250661Sdavidcs
707250661Sdavidcs        if (ret || !b_addr) {
708250661Sdavidcs                bus_dma_tag_destroy(dma_buf->dma_tag);
709250661Sdavidcs                bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
710250661Sdavidcs                        dma_buf->dma_map);
711250661Sdavidcs                ret = -1;
712250661Sdavidcs                goto ql_alloc_dmabuf_exit;
713250661Sdavidcs        }
714250661Sdavidcs
715250661Sdavidcs        dma_buf->dma_addr = b_addr;
716250661Sdavidcs
717250661Sdavidcsql_alloc_dmabuf_exit:
718250661Sdavidcs        QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
719250661Sdavidcs                __func__, ret, (void *)dma_buf->dma_tag,
720250661Sdavidcs                (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
721250661Sdavidcs		dma_buf->size));
722250661Sdavidcs
723250661Sdavidcs        return ret;
724250661Sdavidcs}
725250661Sdavidcs
726250661Sdavidcsvoid
727250661Sdavidcsql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
728250661Sdavidcs{
729250661Sdavidcs        bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
730250661Sdavidcs        bus_dma_tag_destroy(dma_buf->dma_tag);
731250661Sdavidcs}
732250661Sdavidcs
733250661Sdavidcsstatic int
734250661Sdavidcsqla_alloc_parent_dma_tag(qla_host_t *ha)
735250661Sdavidcs{
736250661Sdavidcs	int		ret;
737250661Sdavidcs	device_t	dev;
738250661Sdavidcs
739250661Sdavidcs	dev = ha->pci_dev;
740250661Sdavidcs
741250661Sdavidcs        /*
742250661Sdavidcs         * Allocate parent DMA Tag
743250661Sdavidcs         */
744250661Sdavidcs        ret = bus_dma_tag_create(
745250661Sdavidcs                        bus_get_dma_tag(dev),   /* parent */
746250661Sdavidcs                        1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
747250661Sdavidcs                        BUS_SPACE_MAXADDR,      /* lowaddr */
748250661Sdavidcs                        BUS_SPACE_MAXADDR,      /* highaddr */
749250661Sdavidcs                        NULL, NULL,             /* filter, filterarg */
750250661Sdavidcs                        BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
751250661Sdavidcs                        0,                      /* nsegments */
752250661Sdavidcs                        BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
753250661Sdavidcs                        0,                      /* flags */
754250661Sdavidcs                        NULL, NULL,             /* lockfunc, lockarg */
755250661Sdavidcs                        &ha->parent_tag);
756250661Sdavidcs
757250661Sdavidcs        if (ret) {
758250661Sdavidcs                device_printf(dev, "%s: could not create parent dma tag\n",
759250661Sdavidcs                        __func__);
760250661Sdavidcs		return (-1);
761250661Sdavidcs        }
762250661Sdavidcs
763250661Sdavidcs        ha->flags.parent_tag = 1;
764250661Sdavidcs
765250661Sdavidcs	return (0);
766250661Sdavidcs}
767250661Sdavidcs
768250661Sdavidcsstatic void
769250661Sdavidcsqla_free_parent_dma_tag(qla_host_t *ha)
770250661Sdavidcs{
771250661Sdavidcs        if (ha->flags.parent_tag) {
772250661Sdavidcs                bus_dma_tag_destroy(ha->parent_tag);
773250661Sdavidcs                ha->flags.parent_tag = 0;
774250661Sdavidcs        }
775250661Sdavidcs}
776250661Sdavidcs
777250661Sdavidcs/*
778250661Sdavidcs * Name: qla_init_ifnet
779250661Sdavidcs * Function: Creates the Network Device Interface and Registers it with the O.S
780250661Sdavidcs */
781250661Sdavidcs
782250661Sdavidcsstatic void
783250661Sdavidcsqla_init_ifnet(device_t dev, qla_host_t *ha)
784250661Sdavidcs{
785250661Sdavidcs	struct ifnet *ifp;
786250661Sdavidcs
787250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
788250661Sdavidcs
789250661Sdavidcs	ifp = ha->ifp = if_alloc(IFT_ETHER);
790250661Sdavidcs
791250661Sdavidcs	if (ifp == NULL)
792250661Sdavidcs		panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
793250661Sdavidcs
794250661Sdavidcs	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
795250661Sdavidcs
796250661Sdavidcs#if __FreeBSD_version >= 1000000
797250661Sdavidcs	if_initbaudrate(ifp, IF_Gbps(10));
798250661Sdavidcs	ifp->if_capabilities = IFCAP_LINKSTATE;
799250661Sdavidcs#else
800250661Sdavidcs	ifp->if_mtu = ETHERMTU;
801250661Sdavidcs	ifp->if_baudrate = (1 * 1000 * 1000 *1000);
802250661Sdavidcs
803250661Sdavidcs#endif /* #if __FreeBSD_version >= 1000000 */
804250661Sdavidcs
805250661Sdavidcs	ifp->if_init = qla_init;
806250661Sdavidcs	ifp->if_softc = ha;
807250661Sdavidcs	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
808250661Sdavidcs	ifp->if_ioctl = qla_ioctl;
809250661Sdavidcs	ifp->if_start = qla_start;
810250661Sdavidcs
811250661Sdavidcs	IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha));
812250661Sdavidcs	ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha);
813250661Sdavidcs	IFQ_SET_READY(&ifp->if_snd);
814250661Sdavidcs
815250661Sdavidcs	ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
816250661Sdavidcs
817250661Sdavidcs	ether_ifattach(ifp, qla_get_mac_addr(ha));
818250661Sdavidcs
819250661Sdavidcs	ifp->if_capabilities = IFCAP_HWCSUM |
820250661Sdavidcs				IFCAP_TSO4 |
821250661Sdavidcs				IFCAP_JUMBO_MTU;
822250661Sdavidcs
823250661Sdavidcs	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
824250661Sdavidcs	ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
825250661Sdavidcs
826250661Sdavidcs	ifp->if_capenable = ifp->if_capabilities;
827250661Sdavidcs
828250661Sdavidcs	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
829250661Sdavidcs
830250661Sdavidcs	ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status);
831250661Sdavidcs
832250661Sdavidcs	ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0,
833250661Sdavidcs		NULL);
834250661Sdavidcs	ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
835250661Sdavidcs
836250661Sdavidcs	ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
837250661Sdavidcs
838250661Sdavidcs	QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
839250661Sdavidcs
840250661Sdavidcs	return;
841250661Sdavidcs}
842250661Sdavidcs
843250661Sdavidcsstatic void
844250661Sdavidcsqla_init_locked(qla_host_t *ha)
845250661Sdavidcs{
846250661Sdavidcs	struct ifnet *ifp = ha->ifp;
847250661Sdavidcs
848250661Sdavidcs	qla_stop(ha);
849250661Sdavidcs
850250661Sdavidcs	if (qla_alloc_xmt_bufs(ha) != 0)
851250661Sdavidcs		return;
852250661Sdavidcs
853284982Sdavidcs	qla_confirm_9kb_enable(ha);
854284982Sdavidcs
855250661Sdavidcs	if (qla_alloc_rcv_bufs(ha) != 0)
856250661Sdavidcs		return;
857250661Sdavidcs
858250661Sdavidcs	bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN);
859250661Sdavidcs
860250661Sdavidcs	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO;
861250661Sdavidcs
862250661Sdavidcs	ha->flags.stop_rcv = 0;
863250661Sdavidcs 	if (ql_init_hw_if(ha) == 0) {
864250661Sdavidcs		ifp = ha->ifp;
865250661Sdavidcs		ifp->if_drv_flags |= IFF_DRV_RUNNING;
866250661Sdavidcs		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
867250661Sdavidcs		ha->flags.qla_watchdog_pause = 0;
868250661Sdavidcs		ha->hw_vlan_tx_frames = 0;
869250661Sdavidcs		ha->tx_tso_frames = 0;
870250661Sdavidcs	}
871250661Sdavidcs
872250661Sdavidcs	return;
873250661Sdavidcs}
874250661Sdavidcs
875250661Sdavidcsstatic void
876250661Sdavidcsqla_init(void *arg)
877250661Sdavidcs{
878250661Sdavidcs	qla_host_t *ha;
879250661Sdavidcs
880250661Sdavidcs	ha = (qla_host_t *)arg;
881250661Sdavidcs
882250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
883250661Sdavidcs
884250661Sdavidcs	(void)QLA_LOCK(ha, __func__, 0);
885250661Sdavidcs	qla_init_locked(ha);
886250661Sdavidcs	QLA_UNLOCK(ha, __func__);
887250661Sdavidcs
888250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
889250661Sdavidcs}
890250661Sdavidcs
891250661Sdavidcsstatic int
892250661Sdavidcsqla_set_multi(qla_host_t *ha, uint32_t add_multi)
893250661Sdavidcs{
894250661Sdavidcs	uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
895250661Sdavidcs	struct ifmultiaddr *ifma;
896250661Sdavidcs	int mcnt = 0;
897250661Sdavidcs	struct ifnet *ifp = ha->ifp;
898250661Sdavidcs	int ret = 0;
899250661Sdavidcs
900250661Sdavidcs	if_maddr_rlock(ifp);
901250661Sdavidcs
902250661Sdavidcs	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
903250661Sdavidcs
904250661Sdavidcs		if (ifma->ifma_addr->sa_family != AF_LINK)
905250661Sdavidcs			continue;
906250661Sdavidcs
907250661Sdavidcs		if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
908250661Sdavidcs			break;
909250661Sdavidcs
910250661Sdavidcs		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
911250661Sdavidcs			&mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
912250661Sdavidcs
913250661Sdavidcs		mcnt++;
914250661Sdavidcs	}
915250661Sdavidcs
916250661Sdavidcs	if_maddr_runlock(ifp);
917250661Sdavidcs
918250661Sdavidcs	if (QLA_LOCK(ha, __func__, 1) == 0) {
919250661Sdavidcs		ret = ql_hw_set_multi(ha, mta, mcnt, add_multi);
920250661Sdavidcs		QLA_UNLOCK(ha, __func__);
921250661Sdavidcs	}
922250661Sdavidcs
923250661Sdavidcs	return (ret);
924250661Sdavidcs}
925250661Sdavidcs
926250661Sdavidcsstatic int
927250661Sdavidcsqla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
928250661Sdavidcs{
929250661Sdavidcs	int ret = 0;
930250661Sdavidcs	struct ifreq *ifr = (struct ifreq *)data;
931250661Sdavidcs	struct ifaddr *ifa = (struct ifaddr *)data;
932250661Sdavidcs	qla_host_t *ha;
933250661Sdavidcs
934250661Sdavidcs	ha = (qla_host_t *)ifp->if_softc;
935250661Sdavidcs
936250661Sdavidcs	switch (cmd) {
937250661Sdavidcs	case SIOCSIFADDR:
938250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
939250661Sdavidcs			__func__, cmd));
940250661Sdavidcs
941250661Sdavidcs		if (ifa->ifa_addr->sa_family == AF_INET) {
942250661Sdavidcs			ifp->if_flags |= IFF_UP;
943250661Sdavidcs			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
944250661Sdavidcs				(void)QLA_LOCK(ha, __func__, 0);
945250661Sdavidcs				qla_init_locked(ha);
946250661Sdavidcs				QLA_UNLOCK(ha, __func__);
947250661Sdavidcs			}
948250661Sdavidcs			QL_DPRINT4(ha, (ha->pci_dev,
949250661Sdavidcs				"%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
950250661Sdavidcs				__func__, cmd,
951250661Sdavidcs				ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
952250661Sdavidcs
953250661Sdavidcs			arp_ifinit(ifp, ifa);
954250661Sdavidcs		} else {
955250661Sdavidcs			ether_ioctl(ifp, cmd, data);
956250661Sdavidcs		}
957250661Sdavidcs		break;
958250661Sdavidcs
959250661Sdavidcs	case SIOCSIFMTU:
960250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
961250661Sdavidcs			__func__, cmd));
962250661Sdavidcs
963250661Sdavidcs		if (ifr->ifr_mtu > QLA_MAX_MTU) {
964250661Sdavidcs			ret = EINVAL;
965250661Sdavidcs		} else {
966250661Sdavidcs			(void) QLA_LOCK(ha, __func__, 0);
967250661Sdavidcs			ifp->if_mtu = ifr->ifr_mtu;
968250661Sdavidcs			ha->max_frame_size =
969250661Sdavidcs				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
970250661Sdavidcs			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
971250661Sdavidcs				ret = ql_set_max_mtu(ha, ha->max_frame_size,
972250661Sdavidcs					ha->hw.rcv_cntxt_id);
973250661Sdavidcs			}
974250661Sdavidcs
975250661Sdavidcs			if (ifp->if_mtu > ETHERMTU)
976250661Sdavidcs				ha->std_replenish = QL_JUMBO_REPLENISH_THRES;
977250661Sdavidcs			else
978250661Sdavidcs				ha->std_replenish = QL_STD_REPLENISH_THRES;
979250661Sdavidcs
980250661Sdavidcs
981250661Sdavidcs			QLA_UNLOCK(ha, __func__);
982250661Sdavidcs
983250661Sdavidcs			if (ret)
984250661Sdavidcs				ret = EINVAL;
985250661Sdavidcs		}
986250661Sdavidcs
987250661Sdavidcs		break;
988250661Sdavidcs
989250661Sdavidcs	case SIOCSIFFLAGS:
990250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
991250661Sdavidcs			__func__, cmd));
992250661Sdavidcs
993250661Sdavidcs		(void)QLA_LOCK(ha, __func__, 0);
994250661Sdavidcs
995250661Sdavidcs		if (ifp->if_flags & IFF_UP) {
996250661Sdavidcs			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
997250661Sdavidcs				if ((ifp->if_flags ^ ha->if_flags) &
998250661Sdavidcs					IFF_PROMISC) {
999250661Sdavidcs					ret = ql_set_promisc(ha);
1000250661Sdavidcs				} else if ((ifp->if_flags ^ ha->if_flags) &
1001250661Sdavidcs					IFF_ALLMULTI) {
1002250661Sdavidcs					ret = ql_set_allmulti(ha);
1003250661Sdavidcs				}
1004250661Sdavidcs			} else {
1005250661Sdavidcs				qla_init_locked(ha);
1006250661Sdavidcs				ha->max_frame_size = ifp->if_mtu +
1007250661Sdavidcs					ETHER_HDR_LEN + ETHER_CRC_LEN;
1008250661Sdavidcs				ret = ql_set_max_mtu(ha, ha->max_frame_size,
1009250661Sdavidcs					ha->hw.rcv_cntxt_id);
1010250661Sdavidcs			}
1011250661Sdavidcs		} else {
1012250661Sdavidcs			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1013250661Sdavidcs				qla_stop(ha);
1014250661Sdavidcs			ha->if_flags = ifp->if_flags;
1015250661Sdavidcs		}
1016250661Sdavidcs
1017250661Sdavidcs		QLA_UNLOCK(ha, __func__);
1018250661Sdavidcs		break;
1019250661Sdavidcs
1020250661Sdavidcs	case SIOCADDMULTI:
1021250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev,
1022250661Sdavidcs			"%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
1023250661Sdavidcs
1024250661Sdavidcs		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1025250661Sdavidcs			if (qla_set_multi(ha, 1))
1026250661Sdavidcs				ret = EINVAL;
1027250661Sdavidcs		}
1028250661Sdavidcs		break;
1029250661Sdavidcs
1030250661Sdavidcs	case SIOCDELMULTI:
1031250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev,
1032250661Sdavidcs			"%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
1033250661Sdavidcs
1034250661Sdavidcs		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1035250661Sdavidcs			if (qla_set_multi(ha, 0))
1036250661Sdavidcs				ret = EINVAL;
1037250661Sdavidcs		}
1038250661Sdavidcs		break;
1039250661Sdavidcs
1040250661Sdavidcs	case SIOCSIFMEDIA:
1041250661Sdavidcs	case SIOCGIFMEDIA:
1042250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev,
1043250661Sdavidcs			"%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
1044250661Sdavidcs			__func__, cmd));
1045250661Sdavidcs		ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
1046250661Sdavidcs		break;
1047250661Sdavidcs
1048250661Sdavidcs	case SIOCSIFCAP:
1049250661Sdavidcs	{
1050250661Sdavidcs		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1051250661Sdavidcs
1052250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
1053250661Sdavidcs			__func__, cmd));
1054250661Sdavidcs
1055250661Sdavidcs		if (mask & IFCAP_HWCSUM)
1056250661Sdavidcs			ifp->if_capenable ^= IFCAP_HWCSUM;
1057250661Sdavidcs		if (mask & IFCAP_TSO4)
1058250661Sdavidcs			ifp->if_capenable ^= IFCAP_TSO4;
1059250661Sdavidcs		if (mask & IFCAP_VLAN_HWTAGGING)
1060250661Sdavidcs			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1061250661Sdavidcs		if (mask & IFCAP_VLAN_HWTSO)
1062250661Sdavidcs			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1063250661Sdavidcs
1064250661Sdavidcs		if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1065250661Sdavidcs			qla_init(ha);
1066250661Sdavidcs
1067250661Sdavidcs		VLAN_CAPABILITIES(ifp);
1068250661Sdavidcs		break;
1069250661Sdavidcs	}
1070250661Sdavidcs
1071250661Sdavidcs	default:
1072250661Sdavidcs		QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n",
1073250661Sdavidcs			__func__, cmd));
1074250661Sdavidcs		ret = ether_ioctl(ifp, cmd, data);
1075250661Sdavidcs		break;
1076250661Sdavidcs	}
1077250661Sdavidcs
1078250661Sdavidcs	return (ret);
1079250661Sdavidcs}
1080250661Sdavidcs
1081250661Sdavidcsstatic int
1082250661Sdavidcsqla_media_change(struct ifnet *ifp)
1083250661Sdavidcs{
1084250661Sdavidcs	qla_host_t *ha;
1085250661Sdavidcs	struct ifmedia *ifm;
1086250661Sdavidcs	int ret = 0;
1087250661Sdavidcs
1088250661Sdavidcs	ha = (qla_host_t *)ifp->if_softc;
1089250661Sdavidcs
1090250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1091250661Sdavidcs
1092250661Sdavidcs	ifm = &ha->media;
1093250661Sdavidcs
1094250661Sdavidcs	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1095250661Sdavidcs		ret = EINVAL;
1096250661Sdavidcs
1097250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1098250661Sdavidcs
1099250661Sdavidcs	return (ret);
1100250661Sdavidcs}
1101250661Sdavidcs
1102250661Sdavidcsstatic void
1103250661Sdavidcsqla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1104250661Sdavidcs{
1105250661Sdavidcs	qla_host_t *ha;
1106250661Sdavidcs
1107250661Sdavidcs	ha = (qla_host_t *)ifp->if_softc;
1108250661Sdavidcs
1109250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1110250661Sdavidcs
1111250661Sdavidcs	ifmr->ifm_status = IFM_AVALID;
1112250661Sdavidcs	ifmr->ifm_active = IFM_ETHER;
1113250661Sdavidcs
1114250661Sdavidcs	ql_update_link_state(ha);
1115250661Sdavidcs	if (ha->hw.link_up) {
1116250661Sdavidcs		ifmr->ifm_status |= IFM_ACTIVE;
1117250661Sdavidcs		ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha));
1118250661Sdavidcs	}
1119250661Sdavidcs
1120250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\
1121250661Sdavidcs		(ha->hw.link_up ? "link_up" : "link_down")));
1122250661Sdavidcs
1123250661Sdavidcs	return;
1124250661Sdavidcs}
1125250661Sdavidcs
1126250661Sdavidcsstatic void
1127250661Sdavidcsqla_start(struct ifnet *ifp)
1128250661Sdavidcs{
1129250661Sdavidcs	struct mbuf    *m_head;
1130250661Sdavidcs	qla_host_t *ha = (qla_host_t *)ifp->if_softc;
1131250661Sdavidcs
1132250661Sdavidcs	QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
1133250661Sdavidcs
1134250661Sdavidcs	if (!mtx_trylock(&ha->tx_lock)) {
1135250661Sdavidcs		QL_DPRINT8(ha, (ha->pci_dev,
1136250661Sdavidcs			"%s: mtx_trylock(&ha->tx_lock) failed\n", __func__));
1137250661Sdavidcs		return;
1138250661Sdavidcs	}
1139250661Sdavidcs
1140250661Sdavidcs	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1141250661Sdavidcs		IFF_DRV_RUNNING) {
1142250661Sdavidcs		QL_DPRINT8(ha,
1143250661Sdavidcs			(ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1144250661Sdavidcs		QLA_TX_UNLOCK(ha);
1145250661Sdavidcs		return;
1146250661Sdavidcs	}
1147250661Sdavidcs
1148284982Sdavidcs	if (!ha->hw.link_up || !ha->watchdog_ticks)
1149250661Sdavidcs		ql_update_link_state(ha);
1150250661Sdavidcs
1151250661Sdavidcs	if (!ha->hw.link_up) {
1152250661Sdavidcs		QL_DPRINT8(ha, (ha->pci_dev, "%s: link down\n", __func__));
1153250661Sdavidcs		QLA_TX_UNLOCK(ha);
1154250661Sdavidcs		return;
1155250661Sdavidcs	}
1156250661Sdavidcs
1157250661Sdavidcs	while (ifp->if_snd.ifq_head != NULL) {
1158250661Sdavidcs		IF_DEQUEUE(&ifp->if_snd, m_head);
1159250661Sdavidcs
1160250661Sdavidcs		if (m_head == NULL) {
1161250661Sdavidcs			QL_DPRINT8(ha, (ha->pci_dev, "%s: m_head == NULL\n",
1162250661Sdavidcs				__func__));
1163250661Sdavidcs			break;
1164250661Sdavidcs		}
1165250661Sdavidcs
1166250661Sdavidcs		if (qla_send(ha, &m_head)) {
1167250661Sdavidcs			if (m_head == NULL)
1168250661Sdavidcs				break;
1169250661Sdavidcs			QL_DPRINT8(ha, (ha->pci_dev, "%s: PREPEND\n", __func__));
1170250661Sdavidcs			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1171250661Sdavidcs			IF_PREPEND(&ifp->if_snd, m_head);
1172250661Sdavidcs			break;
1173250661Sdavidcs		}
1174250661Sdavidcs		/* Send a copy of the frame to the BPF listener */
1175250661Sdavidcs		ETHER_BPF_MTAP(ifp, m_head);
1176250661Sdavidcs	}
1177250661Sdavidcs	QLA_TX_UNLOCK(ha);
1178250661Sdavidcs	QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
1179250661Sdavidcs	return;
1180250661Sdavidcs}
1181250661Sdavidcs
1182250661Sdavidcsstatic int
1183250661Sdavidcsqla_send(qla_host_t *ha, struct mbuf **m_headp)
1184250661Sdavidcs{
1185250661Sdavidcs	bus_dma_segment_t	segs[QLA_MAX_SEGMENTS];
1186250661Sdavidcs	bus_dmamap_t		map;
1187250661Sdavidcs	int			nsegs;
1188250661Sdavidcs	int			ret = -1;
1189250661Sdavidcs	uint32_t		tx_idx;
1190250661Sdavidcs	struct mbuf		*m_head = *m_headp;
1191250661Sdavidcs	uint32_t		txr_idx = ha->txr_idx;
1192284982Sdavidcs	uint32_t		iscsi_pdu = 0;
1193250661Sdavidcs
1194250661Sdavidcs	QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
1195250661Sdavidcs
1196281955Shiren	/* check if flowid is set */
1197284982Sdavidcs
1198284982Sdavidcs	if (M_HASHTYPE_GET(m_head) != M_HASHTYPE_NONE) {
1199284982Sdavidcs#ifdef QL_ENABLE_ISCSI_TLV
1200284982Sdavidcs		if (qla_iscsi_pdu(ha, m_head) == 0) {
1201284982Sdavidcs			iscsi_pdu = 1;
1202284982Sdavidcs			txr_idx = m_head->m_pkthdr.flowid &
1203284982Sdavidcs					((ha->hw.num_tx_rings >> 1) - 1);
1204284982Sdavidcs		} else {
1205284982Sdavidcs			txr_idx = m_head->m_pkthdr.flowid &
1206284982Sdavidcs					(ha->hw.num_tx_rings - 1);
1207284982Sdavidcs		}
1208284982Sdavidcs#else
1209250661Sdavidcs		txr_idx = m_head->m_pkthdr.flowid & (ha->hw.num_tx_rings - 1);
1210284982Sdavidcs#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
1211284982Sdavidcs	}
1212250661Sdavidcs
1213284982Sdavidcs
1214250661Sdavidcs	tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next;
1215250661Sdavidcs	map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
1216250661Sdavidcs
1217250661Sdavidcs	ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1218250661Sdavidcs			BUS_DMA_NOWAIT);
1219250661Sdavidcs
1220250661Sdavidcs	if (ret == EFBIG) {
1221250661Sdavidcs
1222250661Sdavidcs		struct mbuf *m;
1223250661Sdavidcs
1224250661Sdavidcs		QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1225250661Sdavidcs			m_head->m_pkthdr.len));
1226250661Sdavidcs
1227250661Sdavidcs		m = m_defrag(m_head, M_NOWAIT);
1228250661Sdavidcs		if (m == NULL) {
1229250661Sdavidcs			ha->err_tx_defrag++;
1230250661Sdavidcs			m_freem(m_head);
1231250661Sdavidcs			*m_headp = NULL;
1232250661Sdavidcs			device_printf(ha->pci_dev,
1233250661Sdavidcs				"%s: m_defrag() = NULL [%d]\n",
1234250661Sdavidcs				__func__, ret);
1235250661Sdavidcs			return (ENOBUFS);
1236250661Sdavidcs		}
1237250661Sdavidcs		m_head = m;
1238250661Sdavidcs		*m_headp = m_head;
1239250661Sdavidcs
1240250661Sdavidcs		if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1241250661Sdavidcs					segs, &nsegs, BUS_DMA_NOWAIT))) {
1242250661Sdavidcs
1243250661Sdavidcs			ha->err_tx_dmamap_load++;
1244250661Sdavidcs
1245250661Sdavidcs			device_printf(ha->pci_dev,
1246250661Sdavidcs				"%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1247250661Sdavidcs				__func__, ret, m_head->m_pkthdr.len);
1248250661Sdavidcs
1249250661Sdavidcs			if (ret != ENOMEM) {
1250250661Sdavidcs				m_freem(m_head);
1251250661Sdavidcs				*m_headp = NULL;
1252250661Sdavidcs			}
1253250661Sdavidcs			return (ret);
1254250661Sdavidcs		}
1255250661Sdavidcs
1256250661Sdavidcs	} else if (ret) {
1257250661Sdavidcs
1258250661Sdavidcs		ha->err_tx_dmamap_load++;
1259250661Sdavidcs
1260250661Sdavidcs		device_printf(ha->pci_dev,
1261250661Sdavidcs			"%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1262250661Sdavidcs			__func__, ret, m_head->m_pkthdr.len);
1263250661Sdavidcs
1264250661Sdavidcs		if (ret != ENOMEM) {
1265250661Sdavidcs			m_freem(m_head);
1266250661Sdavidcs			*m_headp = NULL;
1267250661Sdavidcs		}
1268250661Sdavidcs		return (ret);
1269250661Sdavidcs	}
1270250661Sdavidcs
1271250661Sdavidcs	QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet"));
1272250661Sdavidcs
1273250661Sdavidcs	bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1274250661Sdavidcs
1275284982Sdavidcs        if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx,
1276284982Sdavidcs				iscsi_pdu))) {
1277250661Sdavidcs		ha->tx_ring[txr_idx].count++;
1278250661Sdavidcs		ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
1279250661Sdavidcs	} else {
1280250661Sdavidcs		if (ret == EINVAL) {
1281250661Sdavidcs			if (m_head)
1282250661Sdavidcs				m_freem(m_head);
1283250661Sdavidcs			*m_headp = NULL;
1284250661Sdavidcs		}
1285250661Sdavidcs	}
1286250661Sdavidcs
1287250661Sdavidcs	QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
1288250661Sdavidcs	return (ret);
1289250661Sdavidcs}
1290250661Sdavidcs
1291250661Sdavidcsstatic void
1292250661Sdavidcsqla_stop(qla_host_t *ha)
1293250661Sdavidcs{
1294250661Sdavidcs	struct ifnet *ifp = ha->ifp;
1295250661Sdavidcs	device_t	dev;
1296250661Sdavidcs
1297250661Sdavidcs	dev = ha->pci_dev;
1298250661Sdavidcs
1299250661Sdavidcs	ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1300250661Sdavidcs
1301250661Sdavidcs	ha->flags.qla_watchdog_pause = 1;
1302250661Sdavidcs
1303250661Sdavidcs	while (!ha->qla_watchdog_paused)
1304250661Sdavidcs		qla_mdelay(__func__, 1);
1305250661Sdavidcs
1306250661Sdavidcs	ha->flags.stop_rcv = 1;
1307250661Sdavidcs	ql_hw_stop_rcv(ha);
1308250661Sdavidcs
1309250661Sdavidcs	ql_del_hw_if(ha);
1310250661Sdavidcs
1311250661Sdavidcs	qla_free_xmt_bufs(ha);
1312250661Sdavidcs	qla_free_rcv_bufs(ha);
1313250661Sdavidcs
1314250661Sdavidcs	return;
1315250661Sdavidcs}
1316250661Sdavidcs
1317250661Sdavidcs/*
1318250661Sdavidcs * Buffer Management Functions for Transmit and Receive Rings
1319250661Sdavidcs */
1320250661Sdavidcsstatic int
1321250661Sdavidcsqla_alloc_xmt_bufs(qla_host_t *ha)
1322250661Sdavidcs{
1323250661Sdavidcs	int ret = 0;
1324250661Sdavidcs	uint32_t i, j;
1325250661Sdavidcs	qla_tx_buf_t *txb;
1326250661Sdavidcs
1327250661Sdavidcs	if (bus_dma_tag_create(NULL,    /* parent */
1328250661Sdavidcs		1, 0,    /* alignment, bounds */
1329250661Sdavidcs		BUS_SPACE_MAXADDR,       /* lowaddr */
1330250661Sdavidcs		BUS_SPACE_MAXADDR,       /* highaddr */
1331250661Sdavidcs		NULL, NULL,      /* filter, filterarg */
1332250661Sdavidcs		QLA_MAX_TSO_FRAME_SIZE,     /* maxsize */
1333250661Sdavidcs		QLA_MAX_SEGMENTS,        /* nsegments */
1334250661Sdavidcs		PAGE_SIZE,        /* maxsegsize */
1335250661Sdavidcs		BUS_DMA_ALLOCNOW,        /* flags */
1336250661Sdavidcs		NULL,    /* lockfunc */
1337250661Sdavidcs		NULL,    /* lockfuncarg */
1338250661Sdavidcs		&ha->tx_tag)) {
1339250661Sdavidcs		device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1340250661Sdavidcs			__func__);
1341250661Sdavidcs		return (ENOMEM);
1342250661Sdavidcs	}
1343250661Sdavidcs
1344250661Sdavidcs	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1345250661Sdavidcs		bzero((void *)ha->tx_ring[i].tx_buf,
1346250661Sdavidcs			(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1347250661Sdavidcs	}
1348250661Sdavidcs
1349250661Sdavidcs	for (j = 0; j < ha->hw.num_tx_rings; j++) {
1350250661Sdavidcs		for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1351250661Sdavidcs
1352250661Sdavidcs			txb = &ha->tx_ring[j].tx_buf[i];
1353250661Sdavidcs
1354250661Sdavidcs			if ((ret = bus_dmamap_create(ha->tx_tag,
1355250661Sdavidcs					BUS_DMA_NOWAIT, &txb->map))) {
1356250661Sdavidcs
1357250661Sdavidcs				ha->err_tx_dmamap_create++;
1358250661Sdavidcs				device_printf(ha->pci_dev,
1359250661Sdavidcs					"%s: bus_dmamap_create failed[%d]\n",
1360250661Sdavidcs					__func__, ret);
1361250661Sdavidcs
1362250661Sdavidcs				qla_free_xmt_bufs(ha);
1363250661Sdavidcs
1364250661Sdavidcs				return (ret);
1365250661Sdavidcs			}
1366250661Sdavidcs		}
1367250661Sdavidcs	}
1368250661Sdavidcs
1369250661Sdavidcs	return 0;
1370250661Sdavidcs}
1371250661Sdavidcs
1372250661Sdavidcs/*
1373250661Sdavidcs * Release mbuf after it sent on the wire
1374250661Sdavidcs */
1375250661Sdavidcsstatic void
1376250661Sdavidcsqla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1377250661Sdavidcs{
1378250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1379250661Sdavidcs
1380250661Sdavidcs	if (txb->m_head && txb->map) {
1381250661Sdavidcs
1382250661Sdavidcs		bus_dmamap_unload(ha->tx_tag, txb->map);
1383250661Sdavidcs
1384250661Sdavidcs		m_freem(txb->m_head);
1385250661Sdavidcs		txb->m_head = NULL;
1386250661Sdavidcs	}
1387250661Sdavidcs
1388250661Sdavidcs	if (txb->map)
1389250661Sdavidcs		bus_dmamap_destroy(ha->tx_tag, txb->map);
1390250661Sdavidcs
1391250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1392250661Sdavidcs}
1393250661Sdavidcs
1394250661Sdavidcsstatic void
1395250661Sdavidcsqla_free_xmt_bufs(qla_host_t *ha)
1396250661Sdavidcs{
1397250661Sdavidcs	int		i, j;
1398250661Sdavidcs
1399250661Sdavidcs	for (j = 0; j < ha->hw.num_tx_rings; j++) {
1400250661Sdavidcs		for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1401250661Sdavidcs			qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
1402250661Sdavidcs	}
1403250661Sdavidcs
1404250661Sdavidcs	if (ha->tx_tag != NULL) {
1405250661Sdavidcs		bus_dma_tag_destroy(ha->tx_tag);
1406250661Sdavidcs		ha->tx_tag = NULL;
1407250661Sdavidcs	}
1408250661Sdavidcs
1409250661Sdavidcs	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1410250661Sdavidcs		bzero((void *)ha->tx_ring[i].tx_buf,
1411250661Sdavidcs			(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1412250661Sdavidcs	}
1413250661Sdavidcs	return;
1414250661Sdavidcs}
1415250661Sdavidcs
1416250661Sdavidcs
1417250661Sdavidcsstatic int
1418250661Sdavidcsqla_alloc_rcv_std(qla_host_t *ha)
1419250661Sdavidcs{
1420250661Sdavidcs	int		i, j, k, r, ret = 0;
1421250661Sdavidcs	qla_rx_buf_t	*rxb;
1422250661Sdavidcs	qla_rx_ring_t	*rx_ring;
1423250661Sdavidcs
1424250661Sdavidcs	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1425250661Sdavidcs
1426250661Sdavidcs		rx_ring = &ha->rx_ring[r];
1427250661Sdavidcs
1428250661Sdavidcs		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1429250661Sdavidcs
1430250661Sdavidcs			rxb = &rx_ring->rx_buf[i];
1431250661Sdavidcs
1432250661Sdavidcs			ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT,
1433250661Sdavidcs					&rxb->map);
1434250661Sdavidcs
1435250661Sdavidcs			if (ret) {
1436250661Sdavidcs				device_printf(ha->pci_dev,
1437250661Sdavidcs					"%s: dmamap[%d, %d] failed\n",
1438250661Sdavidcs					__func__, r, i);
1439250661Sdavidcs
1440250661Sdavidcs				for (k = 0; k < r; k++) {
1441250661Sdavidcs					for (j = 0; j < NUM_RX_DESCRIPTORS;
1442250661Sdavidcs						j++) {
1443250661Sdavidcs						rxb = &ha->rx_ring[k].rx_buf[j];
1444250661Sdavidcs						bus_dmamap_destroy(ha->rx_tag,
1445250661Sdavidcs							rxb->map);
1446250661Sdavidcs					}
1447250661Sdavidcs				}
1448250661Sdavidcs
1449250661Sdavidcs				for (j = 0; j < i; j++) {
1450250661Sdavidcs					bus_dmamap_destroy(ha->rx_tag,
1451250661Sdavidcs						rx_ring->rx_buf[j].map);
1452250661Sdavidcs				}
1453250661Sdavidcs				goto qla_alloc_rcv_std_err;
1454250661Sdavidcs			}
1455250661Sdavidcs		}
1456250661Sdavidcs	}
1457250661Sdavidcs
1458250661Sdavidcs	qla_init_hw_rcv_descriptors(ha);
1459250661Sdavidcs
1460250661Sdavidcs
1461250661Sdavidcs	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1462250661Sdavidcs
1463250661Sdavidcs		rx_ring = &ha->rx_ring[r];
1464250661Sdavidcs
1465250661Sdavidcs		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1466250661Sdavidcs			rxb = &rx_ring->rx_buf[i];
1467250661Sdavidcs			rxb->handle = i;
1468250661Sdavidcs			if (!(ret = ql_get_mbuf(ha, rxb, NULL))) {
1469250661Sdavidcs				/*
1470250661Sdavidcs			 	 * set the physical address in the
1471250661Sdavidcs				 * corresponding descriptor entry in the
1472250661Sdavidcs				 * receive ring/queue for the hba
1473250661Sdavidcs				 */
1474250661Sdavidcs				qla_set_hw_rcv_desc(ha, r, i, rxb->handle,
1475250661Sdavidcs					rxb->paddr,
1476250661Sdavidcs					(rxb->m_head)->m_pkthdr.len);
1477250661Sdavidcs			} else {
1478250661Sdavidcs				device_printf(ha->pci_dev,
1479250661Sdavidcs					"%s: ql_get_mbuf [%d, %d] failed\n",
1480250661Sdavidcs					__func__, r, i);
1481250661Sdavidcs				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1482250661Sdavidcs				goto qla_alloc_rcv_std_err;
1483250661Sdavidcs			}
1484250661Sdavidcs		}
1485250661Sdavidcs	}
1486250661Sdavidcs	return 0;
1487250661Sdavidcs
1488250661Sdavidcsqla_alloc_rcv_std_err:
1489250661Sdavidcs	return (-1);
1490250661Sdavidcs}
1491250661Sdavidcs
1492250661Sdavidcsstatic void
1493250661Sdavidcsqla_free_rcv_std(qla_host_t *ha)
1494250661Sdavidcs{
1495250661Sdavidcs	int		i, r;
1496250661Sdavidcs	qla_rx_buf_t	*rxb;
1497250661Sdavidcs
1498250661Sdavidcs	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1499250661Sdavidcs		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1500250661Sdavidcs			rxb = &ha->rx_ring[r].rx_buf[i];
1501250661Sdavidcs			if (rxb->m_head != NULL) {
1502250661Sdavidcs				bus_dmamap_unload(ha->rx_tag, rxb->map);
1503250661Sdavidcs				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1504250661Sdavidcs				m_freem(rxb->m_head);
1505250661Sdavidcs				rxb->m_head = NULL;
1506250661Sdavidcs			}
1507250661Sdavidcs		}
1508250661Sdavidcs	}
1509250661Sdavidcs	return;
1510250661Sdavidcs}
1511250661Sdavidcs
1512250661Sdavidcsstatic int
1513250661Sdavidcsqla_alloc_rcv_bufs(qla_host_t *ha)
1514250661Sdavidcs{
1515250661Sdavidcs	int		i, ret = 0;
1516250661Sdavidcs
1517250661Sdavidcs	if (bus_dma_tag_create(NULL,    /* parent */
1518250661Sdavidcs			1, 0,    /* alignment, bounds */
1519250661Sdavidcs			BUS_SPACE_MAXADDR,       /* lowaddr */
1520250661Sdavidcs			BUS_SPACE_MAXADDR,       /* highaddr */
1521250661Sdavidcs			NULL, NULL,      /* filter, filterarg */
1522250661Sdavidcs			MJUM9BYTES,     /* maxsize */
1523250661Sdavidcs			1,        /* nsegments */
1524250661Sdavidcs			MJUM9BYTES,        /* maxsegsize */
1525250661Sdavidcs			BUS_DMA_ALLOCNOW,        /* flags */
1526250661Sdavidcs			NULL,    /* lockfunc */
1527250661Sdavidcs			NULL,    /* lockfuncarg */
1528250661Sdavidcs			&ha->rx_tag)) {
1529250661Sdavidcs
1530250661Sdavidcs		device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1531250661Sdavidcs			__func__);
1532250661Sdavidcs
1533250661Sdavidcs		return (ENOMEM);
1534250661Sdavidcs	}
1535250661Sdavidcs
1536250661Sdavidcs	bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1537250661Sdavidcs
1538250661Sdavidcs	for (i = 0; i < ha->hw.num_sds_rings; i++) {
1539250661Sdavidcs		ha->hw.sds[i].sdsr_next = 0;
1540250661Sdavidcs		ha->hw.sds[i].rxb_free = NULL;
1541250661Sdavidcs		ha->hw.sds[i].rx_free = 0;
1542250661Sdavidcs	}
1543250661Sdavidcs
1544250661Sdavidcs	ret = qla_alloc_rcv_std(ha);
1545250661Sdavidcs
1546250661Sdavidcs	return (ret);
1547250661Sdavidcs}
1548250661Sdavidcs
1549250661Sdavidcsstatic void
1550250661Sdavidcsqla_free_rcv_bufs(qla_host_t *ha)
1551250661Sdavidcs{
1552250661Sdavidcs	int		i;
1553250661Sdavidcs
1554250661Sdavidcs	qla_free_rcv_std(ha);
1555250661Sdavidcs
1556250661Sdavidcs	if (ha->rx_tag != NULL) {
1557250661Sdavidcs		bus_dma_tag_destroy(ha->rx_tag);
1558250661Sdavidcs		ha->rx_tag = NULL;
1559250661Sdavidcs	}
1560250661Sdavidcs
1561250661Sdavidcs	bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1562250661Sdavidcs
1563250661Sdavidcs	for (i = 0; i < ha->hw.num_sds_rings; i++) {
1564250661Sdavidcs		ha->hw.sds[i].sdsr_next = 0;
1565250661Sdavidcs		ha->hw.sds[i].rxb_free = NULL;
1566250661Sdavidcs		ha->hw.sds[i].rx_free = 0;
1567250661Sdavidcs	}
1568250661Sdavidcs
1569250661Sdavidcs	return;
1570250661Sdavidcs}
1571250661Sdavidcs
1572250661Sdavidcsint
1573250661Sdavidcsql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
1574250661Sdavidcs{
1575250661Sdavidcs	register struct mbuf *mp = nmp;
1576250661Sdavidcs	struct ifnet   		*ifp;
1577250661Sdavidcs	int            		ret = 0;
1578250661Sdavidcs	uint32_t		offset;
1579250661Sdavidcs	bus_dma_segment_t	segs[1];
1580284982Sdavidcs	int			nsegs, mbuf_size;
1581250661Sdavidcs
1582250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1583250661Sdavidcs
1584250661Sdavidcs	ifp = ha->ifp;
1585250661Sdavidcs
1586284982Sdavidcs        if (ha->hw.enable_9kb)
1587284982Sdavidcs                mbuf_size = MJUM9BYTES;
1588284982Sdavidcs        else
1589284982Sdavidcs                mbuf_size = MCLBYTES;
1590284982Sdavidcs
1591250661Sdavidcs	if (mp == NULL) {
1592250661Sdavidcs
1593284982Sdavidcs                if (ha->hw.enable_9kb)
1594284982Sdavidcs                        mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, mbuf_size);
1595284982Sdavidcs                else
1596284982Sdavidcs                        mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1597250661Sdavidcs
1598250661Sdavidcs		if (mp == NULL) {
1599250661Sdavidcs			ha->err_m_getcl++;
1600250661Sdavidcs			ret = ENOBUFS;
1601250661Sdavidcs			device_printf(ha->pci_dev,
1602250661Sdavidcs					"%s: m_getcl failed\n", __func__);
1603250661Sdavidcs			goto exit_ql_get_mbuf;
1604250661Sdavidcs		}
1605284982Sdavidcs		mp->m_len = mp->m_pkthdr.len = mbuf_size;
1606250661Sdavidcs	} else {
1607284982Sdavidcs		mp->m_len = mp->m_pkthdr.len = mbuf_size;
1608250661Sdavidcs		mp->m_data = mp->m_ext.ext_buf;
1609250661Sdavidcs		mp->m_next = NULL;
1610250661Sdavidcs	}
1611250661Sdavidcs
1612250661Sdavidcs	offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
1613250661Sdavidcs	if (offset) {
1614250661Sdavidcs		offset = 8 - offset;
1615250661Sdavidcs		m_adj(mp, offset);
1616250661Sdavidcs	}
1617250661Sdavidcs
1618250661Sdavidcs	/*
1619250661Sdavidcs	 * Using memory from the mbuf cluster pool, invoke the bus_dma
1620250661Sdavidcs	 * machinery to arrange the memory mapping.
1621250661Sdavidcs	 */
1622250661Sdavidcs	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map,
1623250661Sdavidcs			mp, segs, &nsegs, BUS_DMA_NOWAIT);
1624250661Sdavidcs	rxb->paddr = segs[0].ds_addr;
1625250661Sdavidcs
1626250661Sdavidcs	if (ret || !rxb->paddr || (nsegs != 1)) {
1627250661Sdavidcs		m_free(mp);
1628250661Sdavidcs		rxb->m_head = NULL;
1629250661Sdavidcs		device_printf(ha->pci_dev,
1630250661Sdavidcs			"%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
1631250661Sdavidcs			__func__, ret, (long long unsigned int)rxb->paddr,
1632250661Sdavidcs			nsegs);
1633250661Sdavidcs                ret = -1;
1634250661Sdavidcs		goto exit_ql_get_mbuf;
1635250661Sdavidcs	}
1636250661Sdavidcs	rxb->m_head = mp;
1637250661Sdavidcs	bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
1638250661Sdavidcs
1639250661Sdavidcsexit_ql_get_mbuf:
1640250661Sdavidcs	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
1641250661Sdavidcs	return (ret);
1642250661Sdavidcs}
1643250661Sdavidcs
1644250661Sdavidcsstatic void
1645250661Sdavidcsqla_tx_done(void *context, int pending)
1646250661Sdavidcs{
1647250661Sdavidcs	qla_host_t *ha = context;
1648250661Sdavidcs	struct ifnet   *ifp;
1649250661Sdavidcs
1650250661Sdavidcs	ifp = ha->ifp;
1651250661Sdavidcs
1652250661Sdavidcs	if (!ifp)
1653250661Sdavidcs		return;
1654250661Sdavidcs
1655250661Sdavidcs	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1656250661Sdavidcs		QL_DPRINT8(ha, (ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1657250661Sdavidcs		return;
1658250661Sdavidcs	}
1659250661Sdavidcs	ql_hw_tx_done(ha);
1660250661Sdavidcs
1661250661Sdavidcs	qla_start(ha->ifp);
1662250661Sdavidcs}
1663250661Sdavidcs
1664250661Sdavidcsstatic void
1665250661Sdavidcsqla_get_peer(qla_host_t *ha)
1666250661Sdavidcs{
1667250661Sdavidcs	device_t *peers;
1668250661Sdavidcs	int count, i, slot;
1669250661Sdavidcs	int my_slot = pci_get_slot(ha->pci_dev);
1670250661Sdavidcs
1671250661Sdavidcs	if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count))
1672250661Sdavidcs		return;
1673250661Sdavidcs
1674250661Sdavidcs	for (i = 0; i < count; i++) {
1675250661Sdavidcs		slot = pci_get_slot(peers[i]);
1676250661Sdavidcs
1677250661Sdavidcs		if ((slot >= 0) && (slot == my_slot) &&
1678250661Sdavidcs			(pci_get_device(peers[i]) ==
1679250661Sdavidcs				pci_get_device(ha->pci_dev))) {
1680250661Sdavidcs			if (ha->pci_dev != peers[i])
1681250661Sdavidcs				ha->peer_dev = peers[i];
1682250661Sdavidcs		}
1683250661Sdavidcs	}
1684250661Sdavidcs}
1685250661Sdavidcs
1686250661Sdavidcsstatic void
1687250661Sdavidcsqla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer)
1688250661Sdavidcs{
1689250661Sdavidcs	qla_host_t *ha_peer;
1690250661Sdavidcs
1691250661Sdavidcs	if (ha->peer_dev) {
1692250661Sdavidcs        	if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) {
1693250661Sdavidcs
1694250661Sdavidcs			ha_peer->msg_from_peer = msg_to_peer;
1695250661Sdavidcs		}
1696250661Sdavidcs	}
1697250661Sdavidcs}
1698250661Sdavidcs
1699250661Sdavidcsstatic void
1700250661Sdavidcsqla_error_recovery(void *context, int pending)
1701250661Sdavidcs{
1702250661Sdavidcs	qla_host_t *ha = context;
1703250661Sdavidcs	uint32_t msecs_100 = 100;
1704250661Sdavidcs	struct ifnet *ifp = ha->ifp;
1705250661Sdavidcs
1706250661Sdavidcs        (void)QLA_LOCK(ha, __func__, 0);
1707250661Sdavidcs
1708284982Sdavidcs	ha->hw.imd_compl = 1;
1709284982Sdavidcs	qla_mdelay(__func__, 300);
1710284982Sdavidcs
1711250661Sdavidcs        ha->flags.stop_rcv = 1;
1712250661Sdavidcs
1713250661Sdavidcs        ql_hw_stop_rcv(ha);
1714250661Sdavidcs
1715250661Sdavidcs        ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1716250661Sdavidcs
1717250661Sdavidcs        QLA_UNLOCK(ha, __func__);
1718250661Sdavidcs
1719250661Sdavidcs	if ((ha->pci_func & 0x1) == 0) {
1720250661Sdavidcs
1721252580Sdavidcs		if (!ha->msg_from_peer) {
1722250661Sdavidcs			qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
1723250661Sdavidcs
1724252580Sdavidcs			while ((ha->msg_from_peer != QL_PEER_MSG_ACK) &&
1725252580Sdavidcs				msecs_100--)
1726252580Sdavidcs				qla_mdelay(__func__, 100);
1727252580Sdavidcs		}
1728250661Sdavidcs
1729250661Sdavidcs		ha->msg_from_peer = 0;
1730250661Sdavidcs
1731255003Sdavidcs		ql_minidump(ha);
1732255003Sdavidcs
1733250661Sdavidcs		(void) ql_init_hw(ha);
1734250661Sdavidcs        	qla_free_xmt_bufs(ha);
1735250661Sdavidcs	        qla_free_rcv_bufs(ha);
1736250661Sdavidcs
1737250661Sdavidcs		qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
1738250661Sdavidcs
1739250661Sdavidcs	} else {
1740250661Sdavidcs		if (ha->msg_from_peer == QL_PEER_MSG_RESET) {
1741250661Sdavidcs
1742250661Sdavidcs			ha->msg_from_peer = 0;
1743250661Sdavidcs
1744250661Sdavidcs			qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
1745250661Sdavidcs		} else {
1746250661Sdavidcs			qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
1747250661Sdavidcs		}
1748250661Sdavidcs
1749250661Sdavidcs		while ((ha->msg_from_peer != QL_PEER_MSG_ACK)  && msecs_100--)
1750250661Sdavidcs			qla_mdelay(__func__, 100);
1751250661Sdavidcs		ha->msg_from_peer = 0;
1752250661Sdavidcs
1753250661Sdavidcs		(void) ql_init_hw(ha);
1754250661Sdavidcs        	qla_free_xmt_bufs(ha);
1755250661Sdavidcs	        qla_free_rcv_bufs(ha);
1756250661Sdavidcs	}
1757250661Sdavidcs        (void)QLA_LOCK(ha, __func__, 0);
1758250661Sdavidcs
1759250661Sdavidcs	if (qla_alloc_xmt_bufs(ha) != 0) {
1760250661Sdavidcs        	QLA_UNLOCK(ha, __func__);
1761250661Sdavidcs                return;
1762250661Sdavidcs	}
1763284982Sdavidcs	qla_confirm_9kb_enable(ha);
1764250661Sdavidcs
1765250661Sdavidcs        if (qla_alloc_rcv_bufs(ha) != 0) {
1766250661Sdavidcs        	QLA_UNLOCK(ha, __func__);
1767250661Sdavidcs                return;
1768250661Sdavidcs	}
1769250661Sdavidcs
1770250661Sdavidcs        ha->flags.stop_rcv = 0;
1771250661Sdavidcs        if (ql_init_hw_if(ha) == 0) {
1772250661Sdavidcs                ifp = ha->ifp;
1773250661Sdavidcs                ifp->if_drv_flags |= IFF_DRV_RUNNING;
1774250661Sdavidcs                ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1775250661Sdavidcs                ha->flags.qla_watchdog_pause = 0;
1776250661Sdavidcs        }
1777250661Sdavidcs
1778250661Sdavidcs        QLA_UNLOCK(ha, __func__);
1779250661Sdavidcs}
1780250661Sdavidcs
1781284982Sdavidcsstatic void
1782284982Sdavidcsqla_async_event(void *context, int pending)
1783284982Sdavidcs{
1784284982Sdavidcs        qla_host_t *ha = context;
1785284982Sdavidcs
1786284982Sdavidcs        (void)QLA_LOCK(ha, __func__, 0);
1787284982Sdavidcs        qla_hw_async_event(ha);
1788284982Sdavidcs        QLA_UNLOCK(ha, __func__);
1789284982Sdavidcs}
1790284982Sdavidcs
1791