ql_os.c revision 331722
1/*
2 * Copyright (c) 2013-2016 Qlogic Corporation
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 */
27
28/*
29 * File: ql_os.c
30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/11/sys/dev/qlxgbe/ql_os.c 331722 2018-03-29 02:50:57Z eadler $");
35
36
37#include "ql_os.h"
38#include "ql_hw.h"
39#include "ql_def.h"
40#include "ql_inline.h"
41#include "ql_ver.h"
42#include "ql_glbl.h"
43#include "ql_dbg.h"
44#include <sys/smp.h>
45
46/*
47 * Some PCI Configuration Space Related Defines
48 */
49
50#ifndef PCI_VENDOR_QLOGIC
51#define PCI_VENDOR_QLOGIC	0x1077
52#endif
53
54#ifndef PCI_PRODUCT_QLOGIC_ISP8030
55#define PCI_PRODUCT_QLOGIC_ISP8030	0x8030
56#endif
57
58#define PCI_QLOGIC_ISP8030 \
59	((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC)
60
61/*
62 * static functions
63 */
64static int qla_alloc_parent_dma_tag(qla_host_t *ha);
65static void qla_free_parent_dma_tag(qla_host_t *ha);
66static int qla_alloc_xmt_bufs(qla_host_t *ha);
67static void qla_free_xmt_bufs(qla_host_t *ha);
68static int qla_alloc_rcv_bufs(qla_host_t *ha);
69static void qla_free_rcv_bufs(qla_host_t *ha);
70static void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb);
71
72static void qla_init_ifnet(device_t dev, qla_host_t *ha);
73static int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS);
74static void qla_release(qla_host_t *ha);
75static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
76		int error);
77static void qla_stop(qla_host_t *ha);
78static void qla_get_peer(qla_host_t *ha);
79static void qla_error_recovery(void *context, int pending);
80static void qla_async_event(void *context, int pending);
81static void qla_stats(void *context, int pending);
82static int qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
83		uint32_t iscsi_pdu);
84
85/*
86 * Hooks to the Operating Systems
87 */
88static int qla_pci_probe (device_t);
89static int qla_pci_attach (device_t);
90static int qla_pci_detach (device_t);
91
92static void qla_init(void *arg);
93static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
94static int qla_media_change(struct ifnet *ifp);
95static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
96
97static int qla_transmit(struct ifnet *ifp, struct mbuf  *mp);
98static void qla_qflush(struct ifnet *ifp);
99static int qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
100static void qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
101static int qla_create_fp_taskqueues(qla_host_t *ha);
102static void qla_destroy_fp_taskqueues(qla_host_t *ha);
103static void qla_drain_fp_taskqueues(qla_host_t *ha);
104
105static device_method_t qla_pci_methods[] = {
106	/* Device interface */
107	DEVMETHOD(device_probe, qla_pci_probe),
108	DEVMETHOD(device_attach, qla_pci_attach),
109	DEVMETHOD(device_detach, qla_pci_detach),
110	{ 0, 0 }
111};
112
113static driver_t qla_pci_driver = {
114	"ql", qla_pci_methods, sizeof (qla_host_t),
115};
116
117static devclass_t qla83xx_devclass;
118
119DRIVER_MODULE(qla83xx, pci, qla_pci_driver, qla83xx_devclass, 0, 0);
120
121MODULE_DEPEND(qla83xx, pci, 1, 1, 1);
122MODULE_DEPEND(qla83xx, ether, 1, 1, 1);
123
124MALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver");
125
126#define QL_STD_REPLENISH_THRES		0
127#define QL_JUMBO_REPLENISH_THRES	32
128
129
130static char dev_str[64];
131static char ver_str[64];
132
133/*
134 * Name:	qla_pci_probe
135 * Function:	Validate the PCI device to be a QLA80XX device
136 */
137static int
138qla_pci_probe(device_t dev)
139{
140        switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
141        case PCI_QLOGIC_ISP8030:
142		snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
143			"Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function",
144			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
145			QLA_VERSION_BUILD);
146		snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d",
147			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
148			QLA_VERSION_BUILD);
149                device_set_desc(dev, dev_str);
150                break;
151        default:
152                return (ENXIO);
153        }
154
155        if (bootverbose)
156                printf("%s: %s\n ", __func__, dev_str);
157
158        return (BUS_PROBE_DEFAULT);
159}
160
161static void
162qla_add_sysctls(qla_host_t *ha)
163{
164        device_t dev = ha->pci_dev;
165
166	SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
167		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
168		OID_AUTO, "version", CTLFLAG_RD,
169		ver_str, 0, "Driver Version");
170
171        SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
172                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
173                OID_AUTO, "fw_version", CTLFLAG_RD,
174                ha->fw_ver_str, 0, "firmware version");
175
176        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
177                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
178                OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW,
179                (void *)ha, 0,
180                qla_sysctl_get_link_status, "I", "Link Status");
181
182	ha->dbg_level = 0;
183        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
184                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
185                OID_AUTO, "debug", CTLFLAG_RW,
186                &ha->dbg_level, ha->dbg_level, "Debug Level");
187
188	ha->enable_minidump = 1;
189	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
190		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
191		OID_AUTO, "enable_minidump", CTLFLAG_RW,
192		&ha->enable_minidump, ha->enable_minidump,
193		"Minidump retrival prior to error recovery "
194		"is enabled only when this is set");
195
196	ha->enable_driverstate_dump = 1;
197	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
198		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
199		OID_AUTO, "enable_driverstate_dump", CTLFLAG_RW,
200		&ha->enable_driverstate_dump, ha->enable_driverstate_dump,
201		"Driver State retrival prior to error recovery "
202		"is enabled only when this is set");
203
204	ha->enable_error_recovery = 1;
205	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
206		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
207		OID_AUTO, "enable_error_recovery", CTLFLAG_RW,
208		&ha->enable_error_recovery, ha->enable_error_recovery,
209		"when set error recovery is enabled on fatal errors "
210		"otherwise the port is turned offline");
211
212	ha->ms_delay_after_init = 1000;
213	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
214		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
215		OID_AUTO, "ms_delay_after_init", CTLFLAG_RW,
216		&ha->ms_delay_after_init, ha->ms_delay_after_init,
217		"millisecond delay after hw_init");
218
219	ha->std_replenish = QL_STD_REPLENISH_THRES;
220        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
221                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
222                OID_AUTO, "std_replenish", CTLFLAG_RW,
223                &ha->std_replenish, ha->std_replenish,
224                "Threshold for Replenishing Standard Frames");
225
226        SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
227                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
228                OID_AUTO, "ipv4_lro",
229                CTLFLAG_RD, &ha->ipv4_lro,
230                "number of ipv4 lro completions");
231
232        SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
233                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
234                OID_AUTO, "ipv6_lro",
235                CTLFLAG_RD, &ha->ipv6_lro,
236                "number of ipv6 lro completions");
237
238	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
239		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
240		OID_AUTO, "tx_tso_frames",
241		CTLFLAG_RD, &ha->tx_tso_frames,
242		"number of Tx TSO Frames");
243
244	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
245                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
246		OID_AUTO, "hw_vlan_tx_frames",
247		CTLFLAG_RD, &ha->hw_vlan_tx_frames,
248		"number of Tx VLAN Frames");
249
250	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
251                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
252		OID_AUTO, "hw_lock_failed",
253		CTLFLAG_RD, &ha->hw_lock_failed,
254		"number of hw_lock failures");
255
256        return;
257}
258
259static void
260qla_watchdog(void *arg)
261{
262	qla_host_t *ha = arg;
263	qla_hw_t *hw;
264	struct ifnet *ifp;
265
266	hw = &ha->hw;
267	ifp = ha->ifp;
268
269        if (ha->qla_watchdog_exit) {
270		ha->qla_watchdog_exited = 1;
271		return;
272	}
273	ha->qla_watchdog_exited = 0;
274
275	if (!ha->qla_watchdog_pause) {
276                if (!ha->offline &&
277                        (ql_hw_check_health(ha) || ha->qla_initiate_recovery ||
278                        (ha->msg_from_peer == QL_PEER_MSG_RESET))) {
279
280	        	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
281			ql_update_link_state(ha);
282
283			if (ha->enable_error_recovery) {
284				ha->qla_watchdog_paused = 1;
285				ha->qla_watchdog_pause = 1;
286				ha->err_inject = 0;
287				device_printf(ha->pci_dev,
288					"%s: taskqueue_enqueue(err_task) \n",
289					__func__);
290				taskqueue_enqueue(ha->err_tq, &ha->err_task);
291			} else {
292				if (ifp != NULL)
293					ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
294				ha->offline = 1;
295			}
296			return;
297
298		} else {
299			if (ha->qla_interface_up) {
300
301				ha->watchdog_ticks++;
302
303				if (ha->watchdog_ticks > 1000)
304					ha->watchdog_ticks = 0;
305
306				if (!ha->watchdog_ticks && QL_RUNNING(ifp)) {
307					taskqueue_enqueue(ha->stats_tq,
308						&ha->stats_task);
309				}
310
311				if (ha->async_event) {
312					taskqueue_enqueue(ha->async_event_tq,
313						&ha->async_event_task);
314				}
315
316			}
317			ha->qla_watchdog_paused = 0;
318		}
319	} else {
320		ha->qla_watchdog_paused = 1;
321	}
322
323	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
324		qla_watchdog, ha);
325}
326
327/*
328 * Name:	qla_pci_attach
329 * Function:	attaches the device to the operating system
330 */
331static int
332qla_pci_attach(device_t dev)
333{
334	qla_host_t *ha = NULL;
335	uint32_t rsrc_len;
336	int i;
337	uint32_t num_rcvq = 0;
338
339        if ((ha = device_get_softc(dev)) == NULL) {
340                device_printf(dev, "cannot get softc\n");
341                return (ENOMEM);
342        }
343
344        memset(ha, 0, sizeof (qla_host_t));
345
346        if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) {
347                device_printf(dev, "device is not ISP8030\n");
348                return (ENXIO);
349	}
350
351        ha->pci_func = pci_get_function(dev) & 0x1;
352
353        ha->pci_dev = dev;
354
355	pci_enable_busmaster(dev);
356
357	ha->reg_rid = PCIR_BAR(0);
358	ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
359				RF_ACTIVE);
360
361        if (ha->pci_reg == NULL) {
362                device_printf(dev, "unable to map any ports\n");
363                goto qla_pci_attach_err;
364        }
365
366	rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
367					ha->reg_rid);
368
369	mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
370	mtx_init(&ha->sp_log_lock, "qla83xx_sp_log_lock", MTX_NETWORK_LOCK, MTX_DEF);
371	ha->flags.lock_init = 1;
372
373	qla_add_sysctls(ha);
374
375	ha->hw.num_sds_rings = MAX_SDS_RINGS;
376	ha->hw.num_rds_rings = MAX_RDS_RINGS;
377	ha->hw.num_tx_rings = NUM_TX_RINGS;
378
379	ha->reg_rid1 = PCIR_BAR(2);
380	ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
381			&ha->reg_rid1, RF_ACTIVE);
382
383	ha->msix_count = pci_msix_count(dev);
384
385	if (ha->msix_count < 1 ) {
386		device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
387			ha->msix_count);
388		goto qla_pci_attach_err;
389	}
390
391	if (ha->msix_count < (ha->hw.num_sds_rings + 1)) {
392		ha->hw.num_sds_rings = ha->msix_count - 1;
393	}
394
395	QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
396		" msix_count 0x%x pci_reg %p pci_reg1 %p\n", __func__, ha,
397		ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg,
398		ha->pci_reg1));
399
400        /* initialize hardware */
401        if (ql_init_hw(ha)) {
402                device_printf(dev, "%s: ql_init_hw failed\n", __func__);
403                goto qla_pci_attach_err;
404        }
405
406        device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__,
407                ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
408                ha->fw_ver_build);
409        snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d",
410                        ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
411                        ha->fw_ver_build);
412
413        if (qla_get_nic_partition(ha, NULL, &num_rcvq)) {
414                device_printf(dev, "%s: qla_get_nic_partition failed\n",
415                        __func__);
416                goto qla_pci_attach_err;
417        }
418        device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
419                " msix_count 0x%x pci_reg %p pci_reg1 %p num_rcvq = %d\n",
420		__func__, ha, ha->pci_func, rsrc_len, ha->msix_count,
421		ha->pci_reg, ha->pci_reg1, num_rcvq);
422
423        if ((ha->msix_count  < 64) || (num_rcvq != 32)) {
424		if (ha->hw.num_sds_rings > 15) {
425                	ha->hw.num_sds_rings = 15;
426		}
427        }
428
429	ha->hw.num_rds_rings = ha->hw.num_sds_rings;
430	ha->hw.num_tx_rings = ha->hw.num_sds_rings;
431
432#ifdef QL_ENABLE_ISCSI_TLV
433	ha->hw.num_tx_rings = ha->hw.num_sds_rings * 2;
434#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
435
436	ql_hw_add_sysctls(ha);
437
438	ha->msix_count = ha->hw.num_sds_rings + 1;
439
440	if (pci_alloc_msix(dev, &ha->msix_count)) {
441		device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
442			ha->msix_count);
443		ha->msix_count = 0;
444		goto qla_pci_attach_err;
445	}
446
447	ha->mbx_irq_rid = 1;
448	ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
449				&ha->mbx_irq_rid,
450				(RF_ACTIVE | RF_SHAREABLE));
451	if (ha->mbx_irq == NULL) {
452		device_printf(dev, "could not allocate mbx interrupt\n");
453		goto qla_pci_attach_err;
454	}
455	if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE),
456		NULL, ql_mbx_isr, ha, &ha->mbx_handle)) {
457		device_printf(dev, "could not setup mbx interrupt\n");
458		goto qla_pci_attach_err;
459	}
460
461	for (i = 0; i < ha->hw.num_sds_rings; i++) {
462		ha->irq_vec[i].sds_idx = i;
463                ha->irq_vec[i].ha = ha;
464                ha->irq_vec[i].irq_rid = 2 + i;
465
466		ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
467				&ha->irq_vec[i].irq_rid,
468				(RF_ACTIVE | RF_SHAREABLE));
469
470		if (ha->irq_vec[i].irq == NULL) {
471			device_printf(dev, "could not allocate interrupt\n");
472			goto qla_pci_attach_err;
473		}
474		if (bus_setup_intr(dev, ha->irq_vec[i].irq,
475			(INTR_TYPE_NET | INTR_MPSAFE),
476			NULL, ql_isr, &ha->irq_vec[i],
477			&ha->irq_vec[i].handle)) {
478			device_printf(dev, "could not setup interrupt\n");
479			goto qla_pci_attach_err;
480		}
481
482		ha->tx_fp[i].ha = ha;
483		ha->tx_fp[i].txr_idx = i;
484
485		if (qla_alloc_tx_br(ha, &ha->tx_fp[i])) {
486			device_printf(dev, "%s: could not allocate tx_br[%d]\n",
487				__func__, i);
488			goto qla_pci_attach_err;
489		}
490	}
491
492	if (qla_create_fp_taskqueues(ha) != 0)
493		goto qla_pci_attach_err;
494
495	printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus,
496		ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count);
497
498	ql_read_mac_addr(ha);
499
500	/* allocate parent dma tag */
501	if (qla_alloc_parent_dma_tag(ha)) {
502		device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n",
503			__func__);
504		goto qla_pci_attach_err;
505	}
506
507	/* alloc all dma buffers */
508	if (ql_alloc_dma(ha)) {
509		device_printf(dev, "%s: ql_alloc_dma failed\n", __func__);
510		goto qla_pci_attach_err;
511	}
512	qla_get_peer(ha);
513
514	if (ql_minidump_init(ha) != 0) {
515		device_printf(dev, "%s: ql_minidump_init failed\n", __func__);
516		goto qla_pci_attach_err;
517	}
518	ql_alloc_drvr_state_buffer(ha);
519	ql_alloc_sp_log_buffer(ha);
520	/* create the o.s ethernet interface */
521	qla_init_ifnet(dev, ha);
522
523	ha->flags.qla_watchdog_active = 1;
524	ha->qla_watchdog_pause = 0;
525
526	callout_init(&ha->tx_callout, TRUE);
527	ha->flags.qla_callout_init = 1;
528
529	/* create ioctl device interface */
530	if (ql_make_cdev(ha)) {
531		device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
532		goto qla_pci_attach_err;
533	}
534
535	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
536		qla_watchdog, ha);
537
538	TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha);
539	ha->err_tq = taskqueue_create("qla_errq", M_NOWAIT,
540			taskqueue_thread_enqueue, &ha->err_tq);
541	taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq",
542		device_get_nameunit(ha->pci_dev));
543
544        TASK_INIT(&ha->async_event_task, 0, qla_async_event, ha);
545        ha->async_event_tq = taskqueue_create("qla_asyncq", M_NOWAIT,
546                        taskqueue_thread_enqueue, &ha->async_event_tq);
547        taskqueue_start_threads(&ha->async_event_tq, 1, PI_NET, "%s asyncq",
548                device_get_nameunit(ha->pci_dev));
549
550        TASK_INIT(&ha->stats_task, 0, qla_stats, ha);
551        ha->stats_tq = taskqueue_create("qla_statsq", M_NOWAIT,
552                        taskqueue_thread_enqueue, &ha->stats_tq);
553        taskqueue_start_threads(&ha->stats_tq, 1, PI_NET, "%s taskq",
554                device_get_nameunit(ha->pci_dev));
555
556	QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__));
557        return (0);
558
559qla_pci_attach_err:
560
561	qla_release(ha);
562
563	if (ha->flags.lock_init) {
564		mtx_destroy(&ha->hw_lock);
565		mtx_destroy(&ha->sp_log_lock);
566	}
567
568	QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__));
569        return (ENXIO);
570}
571
572/*
573 * Name:	qla_pci_detach
574 * Function:	Unhooks the device from the operating system
575 */
576static int
577qla_pci_detach(device_t dev)
578{
579	qla_host_t *ha = NULL;
580	struct ifnet *ifp;
581
582
583        if ((ha = device_get_softc(dev)) == NULL) {
584                device_printf(dev, "cannot get softc\n");
585                return (ENOMEM);
586        }
587
588	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
589
590	ifp = ha->ifp;
591
592	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
593	QLA_LOCK(ha, __func__, -1, 0);
594
595	ha->qla_detach_active = 1;
596	qla_stop(ha);
597
598	qla_release(ha);
599
600	QLA_UNLOCK(ha, __func__);
601
602	if (ha->flags.lock_init) {
603		mtx_destroy(&ha->hw_lock);
604		mtx_destroy(&ha->sp_log_lock);
605	}
606
607	QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
608
609        return (0);
610}
611
612/*
613 * SYSCTL Related Callbacks
614 */
615static int
616qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS)
617{
618	int err, ret = 0;
619	qla_host_t *ha;
620
621	err = sysctl_handle_int(oidp, &ret, 0, req);
622
623	if (err || !req->newptr)
624		return (err);
625
626	if (ret == 1) {
627		ha = (qla_host_t *)arg1;
628		ql_hw_link_status(ha);
629	}
630	return (err);
631}
632
633/*
634 * Name:	qla_release
635 * Function:	Releases the resources allocated for the device
636 */
637static void
638qla_release(qla_host_t *ha)
639{
640	device_t dev;
641	int i;
642
643	dev = ha->pci_dev;
644
645        if (ha->async_event_tq) {
646                taskqueue_drain_all(ha->async_event_tq);
647                taskqueue_free(ha->async_event_tq);
648        }
649
650	if (ha->err_tq) {
651		taskqueue_drain_all(ha->err_tq);
652		taskqueue_free(ha->err_tq);
653	}
654
655	if (ha->stats_tq) {
656		taskqueue_drain_all(ha->stats_tq);
657		taskqueue_free(ha->stats_tq);
658	}
659
660	ql_del_cdev(ha);
661
662	if (ha->flags.qla_watchdog_active) {
663		ha->qla_watchdog_exit = 1;
664
665		while (ha->qla_watchdog_exited == 0)
666			qla_mdelay(__func__, 1);
667	}
668
669	if (ha->flags.qla_callout_init)
670		callout_stop(&ha->tx_callout);
671
672	if (ha->ifp != NULL)
673		ether_ifdetach(ha->ifp);
674
675	ql_free_drvr_state_buffer(ha);
676	ql_free_sp_log_buffer(ha);
677	ql_free_dma(ha);
678	qla_free_parent_dma_tag(ha);
679
680	if (ha->mbx_handle)
681		(void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle);
682
683	if (ha->mbx_irq)
684		(void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid,
685				ha->mbx_irq);
686
687	for (i = 0; i < ha->hw.num_sds_rings; i++) {
688
689		if (ha->irq_vec[i].handle) {
690			(void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
691					ha->irq_vec[i].handle);
692		}
693
694		if (ha->irq_vec[i].irq) {
695			(void)bus_release_resource(dev, SYS_RES_IRQ,
696				ha->irq_vec[i].irq_rid,
697				ha->irq_vec[i].irq);
698		}
699
700		qla_free_tx_br(ha, &ha->tx_fp[i]);
701	}
702	qla_destroy_fp_taskqueues(ha);
703
704	if (ha->msix_count)
705		pci_release_msi(dev);
706
707        if (ha->pci_reg)
708                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
709				ha->pci_reg);
710
711        if (ha->pci_reg1)
712                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1,
713				ha->pci_reg1);
714
715	return;
716}
717
718/*
719 * DMA Related Functions
720 */
721
722static void
723qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
724{
725        *((bus_addr_t *)arg) = 0;
726
727        if (error) {
728                printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
729                return;
730	}
731
732        *((bus_addr_t *)arg) = segs[0].ds_addr;
733
734	return;
735}
736
737int
738ql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
739{
740        int             ret = 0;
741        device_t        dev;
742        bus_addr_t      b_addr;
743
744        dev = ha->pci_dev;
745
746        QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
747
748        ret = bus_dma_tag_create(
749                        ha->parent_tag,/* parent */
750                        dma_buf->alignment,
751                        ((bus_size_t)(1ULL << 32)),/* boundary */
752                        BUS_SPACE_MAXADDR,      /* lowaddr */
753                        BUS_SPACE_MAXADDR,      /* highaddr */
754                        NULL, NULL,             /* filter, filterarg */
755                        dma_buf->size,          /* maxsize */
756                        1,                      /* nsegments */
757                        dma_buf->size,          /* maxsegsize */
758                        0,                      /* flags */
759                        NULL, NULL,             /* lockfunc, lockarg */
760                        &dma_buf->dma_tag);
761
762        if (ret) {
763                device_printf(dev, "%s: could not create dma tag\n", __func__);
764                goto ql_alloc_dmabuf_exit;
765        }
766        ret = bus_dmamem_alloc(dma_buf->dma_tag,
767                        (void **)&dma_buf->dma_b,
768                        (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
769                        &dma_buf->dma_map);
770        if (ret) {
771                bus_dma_tag_destroy(dma_buf->dma_tag);
772                device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
773                goto ql_alloc_dmabuf_exit;
774        }
775
776        ret = bus_dmamap_load(dma_buf->dma_tag,
777                        dma_buf->dma_map,
778                        dma_buf->dma_b,
779                        dma_buf->size,
780                        qla_dmamap_callback,
781                        &b_addr, BUS_DMA_NOWAIT);
782
783        if (ret || !b_addr) {
784                bus_dma_tag_destroy(dma_buf->dma_tag);
785                bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
786                        dma_buf->dma_map);
787                ret = -1;
788                goto ql_alloc_dmabuf_exit;
789        }
790
791        dma_buf->dma_addr = b_addr;
792
793ql_alloc_dmabuf_exit:
794        QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
795                __func__, ret, (void *)dma_buf->dma_tag,
796                (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
797		dma_buf->size));
798
799        return ret;
800}
801
802void
803ql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
804{
805	bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
806        bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
807        bus_dma_tag_destroy(dma_buf->dma_tag);
808}
809
810static int
811qla_alloc_parent_dma_tag(qla_host_t *ha)
812{
813	int		ret;
814	device_t	dev;
815
816	dev = ha->pci_dev;
817
818        /*
819         * Allocate parent DMA Tag
820         */
821        ret = bus_dma_tag_create(
822                        bus_get_dma_tag(dev),   /* parent */
823                        1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
824                        BUS_SPACE_MAXADDR,      /* lowaddr */
825                        BUS_SPACE_MAXADDR,      /* highaddr */
826                        NULL, NULL,             /* filter, filterarg */
827                        BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
828                        0,                      /* nsegments */
829                        BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
830                        0,                      /* flags */
831                        NULL, NULL,             /* lockfunc, lockarg */
832                        &ha->parent_tag);
833
834        if (ret) {
835                device_printf(dev, "%s: could not create parent dma tag\n",
836                        __func__);
837		return (-1);
838        }
839
840        ha->flags.parent_tag = 1;
841
842	return (0);
843}
844
845static void
846qla_free_parent_dma_tag(qla_host_t *ha)
847{
848        if (ha->flags.parent_tag) {
849                bus_dma_tag_destroy(ha->parent_tag);
850                ha->flags.parent_tag = 0;
851        }
852}
853
854/*
855 * Name: qla_init_ifnet
856 * Function: Creates the Network Device Interface and Registers it with the O.S
857 */
858
859static void
860qla_init_ifnet(device_t dev, qla_host_t *ha)
861{
862	struct ifnet *ifp;
863
864	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
865
866	ifp = ha->ifp = if_alloc(IFT_ETHER);
867
868	if (ifp == NULL)
869		panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
870
871	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
872
873	ifp->if_baudrate = IF_Gbps(10);
874	ifp->if_capabilities = IFCAP_LINKSTATE;
875	ifp->if_mtu = ETHERMTU;
876
877	ifp->if_init = qla_init;
878	ifp->if_softc = ha;
879	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
880	ifp->if_ioctl = qla_ioctl;
881
882	ifp->if_transmit = qla_transmit;
883	ifp->if_qflush = qla_qflush;
884
885	IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha));
886	ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha);
887	IFQ_SET_READY(&ifp->if_snd);
888
889	ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
890
891	ether_ifattach(ifp, qla_get_mac_addr(ha));
892
893	ifp->if_capabilities |= IFCAP_HWCSUM |
894				IFCAP_TSO4 |
895				IFCAP_TSO6 |
896				IFCAP_JUMBO_MTU |
897				IFCAP_VLAN_HWTAGGING |
898				IFCAP_VLAN_MTU |
899				IFCAP_VLAN_HWTSO |
900				IFCAP_LRO;
901
902	ifp->if_capenable = ifp->if_capabilities;
903
904	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
905
906	ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status);
907
908	ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0,
909		NULL);
910	ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
911
912	ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
913
914	QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
915
916	return;
917}
918
919static void
920qla_init_locked(qla_host_t *ha)
921{
922	struct ifnet *ifp = ha->ifp;
923
924	ql_sp_log(ha, 14, 0, 0, 0, 0, 0, 0);
925
926	qla_stop(ha);
927
928	if (qla_alloc_xmt_bufs(ha) != 0)
929		return;
930
931	qla_confirm_9kb_enable(ha);
932
933	if (qla_alloc_rcv_bufs(ha) != 0)
934		return;
935
936	bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN);
937
938	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO;
939	ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6;
940
941	ha->stop_rcv = 0;
942 	if (ql_init_hw_if(ha) == 0) {
943		ifp = ha->ifp;
944		ifp->if_drv_flags |= IFF_DRV_RUNNING;
945		ha->hw_vlan_tx_frames = 0;
946		ha->tx_tso_frames = 0;
947		ha->qla_interface_up = 1;
948		ql_update_link_state(ha);
949	} else {
950		if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_IF_START_FAILURE)
951			ha->hw.sp_log_stop = -1;
952	}
953
954	ha->qla_watchdog_pause = 0;
955
956	return;
957}
958
959static void
960qla_init(void *arg)
961{
962	qla_host_t *ha;
963
964	ha = (qla_host_t *)arg;
965
966	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
967
968	if (QLA_LOCK(ha, __func__, -1, 0) != 0)
969		return;
970
971	qla_init_locked(ha);
972
973	QLA_UNLOCK(ha, __func__);
974
975	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
976}
977
978static int
979qla_set_multi(qla_host_t *ha, uint32_t add_multi)
980{
981	uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
982	struct ifmultiaddr *ifma;
983	int mcnt = 0;
984	struct ifnet *ifp = ha->ifp;
985	int ret = 0;
986
987	if_maddr_rlock(ifp);
988
989	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
990
991		if (ifma->ifma_addr->sa_family != AF_LINK)
992			continue;
993
994		if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
995			break;
996
997		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
998			&mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
999
1000		mcnt++;
1001	}
1002
1003	if_maddr_runlock(ifp);
1004
1005	if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1006		QLA_LOCK_NO_SLEEP) != 0)
1007		return (-1);
1008
1009	ql_sp_log(ha, 12, 4, ifp->if_drv_flags,
1010		(ifp->if_drv_flags & IFF_DRV_RUNNING),
1011		add_multi, (uint32_t)mcnt, 0);
1012
1013	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1014
1015		if (!add_multi) {
1016			ret = qla_hw_del_all_mcast(ha);
1017
1018			if (ret)
1019				device_printf(ha->pci_dev,
1020					"%s: qla_hw_del_all_mcast() failed\n",
1021				__func__);
1022		}
1023
1024		if (!ret)
1025			ret = ql_hw_set_multi(ha, mta, mcnt, 1);
1026
1027	}
1028
1029	QLA_UNLOCK(ha, __func__);
1030
1031	return (ret);
1032}
1033
1034static int
1035qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1036{
1037	int ret = 0;
1038	struct ifreq *ifr = (struct ifreq *)data;
1039	struct ifaddr *ifa = (struct ifaddr *)data;
1040	qla_host_t *ha;
1041
1042	ha = (qla_host_t *)ifp->if_softc;
1043	if (ha->offline || ha->qla_initiate_recovery)
1044		return (ret);
1045
1046	switch (cmd) {
1047	case SIOCSIFADDR:
1048		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
1049			__func__, cmd));
1050
1051		if (ifa->ifa_addr->sa_family == AF_INET) {
1052
1053			ret = QLA_LOCK(ha, __func__,
1054					QLA_LOCK_DEFAULT_MS_TIMEOUT,
1055					QLA_LOCK_NO_SLEEP);
1056			if (ret)
1057				break;
1058
1059			ifp->if_flags |= IFF_UP;
1060
1061			ql_sp_log(ha, 8, 3, ifp->if_drv_flags,
1062				(ifp->if_drv_flags & IFF_DRV_RUNNING),
1063				ntohl(IA_SIN(ifa)->sin_addr.s_addr), 0, 0);
1064
1065			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1066				qla_init_locked(ha);
1067			}
1068
1069			QLA_UNLOCK(ha, __func__);
1070			QL_DPRINT4(ha, (ha->pci_dev,
1071				"%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
1072				__func__, cmd,
1073				ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
1074
1075			arp_ifinit(ifp, ifa);
1076		} else {
1077			ether_ioctl(ifp, cmd, data);
1078		}
1079		break;
1080
1081	case SIOCSIFMTU:
1082		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
1083			__func__, cmd));
1084
1085		if (ifr->ifr_mtu > QLA_MAX_MTU) {
1086			ret = EINVAL;
1087		} else {
1088			ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1089					QLA_LOCK_NO_SLEEP);
1090
1091			if (ret)
1092				break;
1093
1094			ifp->if_mtu = ifr->ifr_mtu;
1095			ha->max_frame_size =
1096				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1097
1098			ql_sp_log(ha, 9, 4, ifp->if_drv_flags,
1099				(ifp->if_drv_flags & IFF_DRV_RUNNING),
1100				ha->max_frame_size, ifp->if_mtu, 0);
1101
1102			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1103				qla_init_locked(ha);
1104			}
1105
1106			if (ifp->if_mtu > ETHERMTU)
1107				ha->std_replenish = QL_JUMBO_REPLENISH_THRES;
1108			else
1109				ha->std_replenish = QL_STD_REPLENISH_THRES;
1110
1111
1112			QLA_UNLOCK(ha, __func__);
1113		}
1114
1115		break;
1116
1117	case SIOCSIFFLAGS:
1118		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
1119			__func__, cmd));
1120
1121		ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1122				QLA_LOCK_NO_SLEEP);
1123
1124		if (ret)
1125			break;
1126
1127		ql_sp_log(ha, 10, 4, ifp->if_drv_flags,
1128			(ifp->if_drv_flags & IFF_DRV_RUNNING),
1129			ha->if_flags, ifp->if_flags, 0);
1130
1131		if (ifp->if_flags & IFF_UP) {
1132
1133			ha->max_frame_size = ifp->if_mtu +
1134					ETHER_HDR_LEN + ETHER_CRC_LEN;
1135			qla_init_locked(ha);
1136
1137			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1138				if ((ifp->if_flags ^ ha->if_flags) &
1139					IFF_PROMISC) {
1140					ret = ql_set_promisc(ha);
1141				} else if ((ifp->if_flags ^ ha->if_flags) &
1142					IFF_ALLMULTI) {
1143					ret = ql_set_allmulti(ha);
1144				}
1145			}
1146		} else {
1147			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1148				qla_stop(ha);
1149			ha->if_flags = ifp->if_flags;
1150		}
1151
1152		QLA_UNLOCK(ha, __func__);
1153		break;
1154
1155	case SIOCADDMULTI:
1156		QL_DPRINT4(ha, (ha->pci_dev,
1157			"%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
1158
1159		if (qla_set_multi(ha, 1))
1160			ret = EINVAL;
1161		break;
1162
1163	case SIOCDELMULTI:
1164		QL_DPRINT4(ha, (ha->pci_dev,
1165			"%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
1166
1167		if (qla_set_multi(ha, 0))
1168			ret = EINVAL;
1169		break;
1170
1171	case SIOCSIFMEDIA:
1172	case SIOCGIFMEDIA:
1173		QL_DPRINT4(ha, (ha->pci_dev,
1174			"%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
1175			__func__, cmd));
1176		ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
1177		break;
1178
1179	case SIOCSIFCAP:
1180	{
1181		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1182
1183		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
1184			__func__, cmd));
1185
1186		if (mask & IFCAP_HWCSUM)
1187			ifp->if_capenable ^= IFCAP_HWCSUM;
1188		if (mask & IFCAP_TSO4)
1189			ifp->if_capenable ^= IFCAP_TSO4;
1190		if (mask & IFCAP_TSO6)
1191			ifp->if_capenable ^= IFCAP_TSO6;
1192		if (mask & IFCAP_VLAN_HWTAGGING)
1193			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1194		if (mask & IFCAP_VLAN_HWTSO)
1195			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1196		if (mask & IFCAP_LRO)
1197			ifp->if_capenable ^= IFCAP_LRO;
1198
1199		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1200			ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1201				QLA_LOCK_NO_SLEEP);
1202
1203			if (ret)
1204				break;
1205
1206			ql_sp_log(ha, 11, 4, ifp->if_drv_flags,
1207				(ifp->if_drv_flags & IFF_DRV_RUNNING),
1208				mask, ifp->if_capenable, 0);
1209
1210			qla_init_locked(ha);
1211
1212			QLA_UNLOCK(ha, __func__);
1213
1214		}
1215		VLAN_CAPABILITIES(ifp);
1216		break;
1217	}
1218
1219	default:
1220		QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n",
1221			__func__, cmd));
1222		ret = ether_ioctl(ifp, cmd, data);
1223		break;
1224	}
1225
1226	return (ret);
1227}
1228
1229static int
1230qla_media_change(struct ifnet *ifp)
1231{
1232	qla_host_t *ha;
1233	struct ifmedia *ifm;
1234	int ret = 0;
1235
1236	ha = (qla_host_t *)ifp->if_softc;
1237
1238	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1239
1240	ifm = &ha->media;
1241
1242	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1243		ret = EINVAL;
1244
1245	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1246
1247	return (ret);
1248}
1249
1250static void
1251qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1252{
1253	qla_host_t *ha;
1254
1255	ha = (qla_host_t *)ifp->if_softc;
1256
1257	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1258
1259	ifmr->ifm_status = IFM_AVALID;
1260	ifmr->ifm_active = IFM_ETHER;
1261
1262	ql_update_link_state(ha);
1263	if (ha->hw.link_up) {
1264		ifmr->ifm_status |= IFM_ACTIVE;
1265		ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha));
1266	}
1267
1268	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\
1269		(ha->hw.link_up ? "link_up" : "link_down")));
1270
1271	return;
1272}
1273
1274
1275static int
1276qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
1277	uint32_t iscsi_pdu)
1278{
1279	bus_dma_segment_t	segs[QLA_MAX_SEGMENTS];
1280	bus_dmamap_t		map;
1281	int			nsegs;
1282	int			ret = -1;
1283	uint32_t		tx_idx;
1284	struct mbuf		*m_head = *m_headp;
1285
1286	QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
1287
1288	tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next;
1289
1290	if (NULL != ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head) {
1291		QL_ASSERT(ha, 0, ("%s [%d]: txr_idx = %d tx_idx = %d "\
1292			"mbuf = %p\n", __func__, __LINE__, txr_idx, tx_idx,\
1293			ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head));
1294		if (m_head)
1295			m_freem(m_head);
1296		*m_headp = NULL;
1297		return (ret);
1298	}
1299
1300	map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
1301
1302	ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1303			BUS_DMA_NOWAIT);
1304
1305	if (ret == EFBIG) {
1306
1307		struct mbuf *m;
1308
1309		QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1310			m_head->m_pkthdr.len));
1311
1312		m = m_defrag(m_head, M_NOWAIT);
1313		if (m == NULL) {
1314			ha->err_tx_defrag++;
1315			m_freem(m_head);
1316			*m_headp = NULL;
1317			device_printf(ha->pci_dev,
1318				"%s: m_defrag() = NULL [%d]\n",
1319				__func__, ret);
1320			return (ENOBUFS);
1321		}
1322		m_head = m;
1323		*m_headp = m_head;
1324
1325		if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1326					segs, &nsegs, BUS_DMA_NOWAIT))) {
1327
1328			ha->err_tx_dmamap_load++;
1329
1330			device_printf(ha->pci_dev,
1331				"%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1332				__func__, ret, m_head->m_pkthdr.len);
1333
1334			if (ret != ENOMEM) {
1335				m_freem(m_head);
1336				*m_headp = NULL;
1337			}
1338			return (ret);
1339		}
1340
1341	} else if (ret) {
1342
1343		ha->err_tx_dmamap_load++;
1344
1345		device_printf(ha->pci_dev,
1346			"%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1347			__func__, ret, m_head->m_pkthdr.len);
1348
1349		if (ret != ENOMEM) {
1350			m_freem(m_head);
1351			*m_headp = NULL;
1352		}
1353		return (ret);
1354	}
1355
1356	QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet"));
1357
1358	bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1359
1360        if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx,
1361				iscsi_pdu))) {
1362		ha->tx_ring[txr_idx].count++;
1363		if (iscsi_pdu)
1364			ha->tx_ring[txr_idx].iscsi_pkt_count++;
1365		ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
1366	} else {
1367		bus_dmamap_unload(ha->tx_tag, map);
1368		if (ret == EINVAL) {
1369			if (m_head)
1370				m_freem(m_head);
1371			*m_headp = NULL;
1372		}
1373	}
1374
1375	QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
1376	return (ret);
1377}
1378
1379static int
1380qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
1381{
1382        snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
1383                "qla%d_fp%d_tx_mq_lock", ha->pci_func, fp->txr_idx);
1384
1385        mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
1386
1387        fp->tx_br = buf_ring_alloc(NUM_TX_DESCRIPTORS, M_DEVBUF,
1388                                   M_NOWAIT, &fp->tx_mtx);
1389        if (fp->tx_br == NULL) {
1390            QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for "
1391                " fp[%d, %d]\n", ha->pci_func, fp->txr_idx));
1392            return (-ENOMEM);
1393        }
1394        return 0;
1395}
1396
1397static void
1398qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
1399{
1400        struct mbuf *mp;
1401        struct ifnet *ifp = ha->ifp;
1402
1403        if (mtx_initialized(&fp->tx_mtx)) {
1404
1405                if (fp->tx_br != NULL) {
1406
1407                        mtx_lock(&fp->tx_mtx);
1408
1409                        while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
1410                                m_freem(mp);
1411                        }
1412
1413                        mtx_unlock(&fp->tx_mtx);
1414
1415                        buf_ring_free(fp->tx_br, M_DEVBUF);
1416                        fp->tx_br = NULL;
1417                }
1418                mtx_destroy(&fp->tx_mtx);
1419        }
1420        return;
1421}
1422
1423static void
1424qla_fp_taskqueue(void *context, int pending)
1425{
1426        qla_tx_fp_t *fp;
1427        qla_host_t *ha;
1428        struct ifnet *ifp;
1429        struct mbuf  *mp;
1430        int ret;
1431	uint32_t txr_idx;
1432	uint32_t iscsi_pdu = 0;
1433	uint32_t rx_pkts_left = -1;
1434
1435        fp = context;
1436
1437        if (fp == NULL)
1438                return;
1439
1440        ha = (qla_host_t *)fp->ha;
1441
1442        ifp = ha->ifp;
1443
1444	txr_idx = fp->txr_idx;
1445
1446        mtx_lock(&fp->tx_mtx);
1447
1448        if (!(ifp->if_drv_flags & IFF_DRV_RUNNING) || (!ha->hw.link_up)) {
1449                mtx_unlock(&fp->tx_mtx);
1450                goto qla_fp_taskqueue_exit;
1451        }
1452
1453	while (rx_pkts_left && !ha->stop_rcv &&
1454		(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1455		rx_pkts_left = ql_rcv_isr(ha, fp->txr_idx, 64);
1456
1457#ifdef QL_ENABLE_ISCSI_TLV
1458		ql_hw_tx_done_locked(ha, fp->txr_idx);
1459		ql_hw_tx_done_locked(ha, (fp->txr_idx + (ha->hw.num_tx_rings >> 1)));
1460#else
1461		ql_hw_tx_done_locked(ha, fp->txr_idx);
1462#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
1463
1464		mp = drbr_peek(ifp, fp->tx_br);
1465
1466        	while (mp != NULL) {
1467
1468			if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) {
1469#ifdef QL_ENABLE_ISCSI_TLV
1470				if (ql_iscsi_pdu(ha, mp) == 0) {
1471					txr_idx = txr_idx +
1472						(ha->hw.num_tx_rings >> 1);
1473					iscsi_pdu = 1;
1474				} else {
1475					iscsi_pdu = 0;
1476					txr_idx = fp->txr_idx;
1477				}
1478#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
1479			}
1480
1481			ret = qla_send(ha, &mp, txr_idx, iscsi_pdu);
1482
1483			if (ret) {
1484				if (mp != NULL)
1485					drbr_putback(ifp, fp->tx_br, mp);
1486				else {
1487					drbr_advance(ifp, fp->tx_br);
1488				}
1489
1490				mtx_unlock(&fp->tx_mtx);
1491
1492				goto qla_fp_taskqueue_exit0;
1493			} else {
1494				drbr_advance(ifp, fp->tx_br);
1495			}
1496
1497			/* Send a copy of the frame to the BPF listener */
1498			ETHER_BPF_MTAP(ifp, mp);
1499			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1500				break;
1501
1502			mp = drbr_peek(ifp, fp->tx_br);
1503		}
1504	}
1505        mtx_unlock(&fp->tx_mtx);
1506
1507qla_fp_taskqueue_exit0:
1508
1509	if (rx_pkts_left || ((mp != NULL) && ret)) {
1510		taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
1511	} else {
1512		if (!ha->stop_rcv) {
1513			QL_ENABLE_INTERRUPTS(ha, fp->txr_idx);
1514		}
1515	}
1516
1517qla_fp_taskqueue_exit:
1518
1519        QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
1520        return;
1521}
1522
1523static int
1524qla_create_fp_taskqueues(qla_host_t *ha)
1525{
1526        int     i;
1527        uint8_t tq_name[32];
1528
1529        for (i = 0; i < ha->hw.num_sds_rings; i++) {
1530
1531                qla_tx_fp_t *fp = &ha->tx_fp[i];
1532
1533                bzero(tq_name, sizeof (tq_name));
1534                snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
1535
1536                TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp);
1537
1538                fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT,
1539                                        taskqueue_thread_enqueue,
1540                                        &fp->fp_taskqueue);
1541
1542                if (fp->fp_taskqueue == NULL)
1543                        return (-1);
1544
1545                taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
1546                        tq_name);
1547
1548                QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__,
1549                        fp->fp_taskqueue));
1550        }
1551
1552        return (0);
1553}
1554
1555static void
1556qla_destroy_fp_taskqueues(qla_host_t *ha)
1557{
1558        int     i;
1559
1560        for (i = 0; i < ha->hw.num_sds_rings; i++) {
1561
1562                qla_tx_fp_t *fp = &ha->tx_fp[i];
1563
1564                if (fp->fp_taskqueue != NULL) {
1565                        taskqueue_drain_all(fp->fp_taskqueue);
1566                        taskqueue_free(fp->fp_taskqueue);
1567                        fp->fp_taskqueue = NULL;
1568                }
1569        }
1570        return;
1571}
1572
1573static void
1574qla_drain_fp_taskqueues(qla_host_t *ha)
1575{
1576        int     i;
1577
1578        for (i = 0; i < ha->hw.num_sds_rings; i++) {
1579                qla_tx_fp_t *fp = &ha->tx_fp[i];
1580
1581                if (fp->fp_taskqueue != NULL) {
1582                        taskqueue_drain_all(fp->fp_taskqueue);
1583                }
1584        }
1585        return;
1586}
1587
1588static int
1589qla_transmit(struct ifnet *ifp, struct mbuf  *mp)
1590{
1591	qla_host_t *ha = (qla_host_t *)ifp->if_softc;
1592        qla_tx_fp_t *fp;
1593        int rss_id = 0;
1594        int ret = 0;
1595
1596        QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1597
1598#if __FreeBSD_version >= 1100000
1599        if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
1600#else
1601        if (mp->m_flags & M_FLOWID)
1602#endif
1603                rss_id = (mp->m_pkthdr.flowid & Q8_RSS_IND_TBL_MAX_IDX) %
1604                                        ha->hw.num_sds_rings;
1605        fp = &ha->tx_fp[rss_id];
1606
1607        if (fp->tx_br == NULL) {
1608                ret = EINVAL;
1609                goto qla_transmit_exit;
1610        }
1611
1612        if (mp != NULL) {
1613                ret = drbr_enqueue(ifp, fp->tx_br, mp);
1614        }
1615
1616        if (fp->fp_taskqueue != NULL)
1617                taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
1618
1619        ret = 0;
1620
1621qla_transmit_exit:
1622
1623        QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
1624        return ret;
1625}
1626
1627static void
1628qla_qflush(struct ifnet *ifp)
1629{
1630        int                     i;
1631        qla_tx_fp_t		*fp;
1632        struct mbuf             *mp;
1633        qla_host_t              *ha;
1634
1635        ha = (qla_host_t *)ifp->if_softc;
1636
1637        QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1638
1639        for (i = 0; i < ha->hw.num_sds_rings; i++) {
1640
1641                fp = &ha->tx_fp[i];
1642
1643                if (fp == NULL)
1644                        continue;
1645
1646                if (fp->tx_br) {
1647                        mtx_lock(&fp->tx_mtx);
1648
1649                        while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
1650                                m_freem(mp);
1651                        }
1652                        mtx_unlock(&fp->tx_mtx);
1653                }
1654        }
1655        QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1656
1657        return;
1658}
1659
1660static void
1661qla_stop(qla_host_t *ha)
1662{
1663	struct ifnet *ifp = ha->ifp;
1664	device_t	dev;
1665	int i = 0;
1666
1667	ql_sp_log(ha, 13, 0, 0, 0, 0, 0, 0);
1668
1669	dev = ha->pci_dev;
1670
1671	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1672	ha->qla_watchdog_pause = 1;
1673
1674        for (i = 0; i < ha->hw.num_sds_rings; i++) {
1675        	qla_tx_fp_t *fp;
1676
1677		fp = &ha->tx_fp[i];
1678
1679                if (fp == NULL)
1680                        continue;
1681
1682		if (fp->tx_br != NULL) {
1683                        mtx_lock(&fp->tx_mtx);
1684                        mtx_unlock(&fp->tx_mtx);
1685		}
1686	}
1687
1688	while (!ha->qla_watchdog_paused)
1689		qla_mdelay(__func__, 1);
1690
1691	ha->qla_interface_up = 0;
1692
1693	qla_drain_fp_taskqueues(ha);
1694
1695	ql_del_hw_if(ha);
1696
1697	qla_free_xmt_bufs(ha);
1698	qla_free_rcv_bufs(ha);
1699
1700	return;
1701}
1702
1703/*
1704 * Buffer Management Functions for Transmit and Receive Rings
1705 */
1706static int
1707qla_alloc_xmt_bufs(qla_host_t *ha)
1708{
1709	int ret = 0;
1710	uint32_t i, j;
1711	qla_tx_buf_t *txb;
1712
1713	if (bus_dma_tag_create(NULL,    /* parent */
1714		1, 0,    /* alignment, bounds */
1715		BUS_SPACE_MAXADDR,       /* lowaddr */
1716		BUS_SPACE_MAXADDR,       /* highaddr */
1717		NULL, NULL,      /* filter, filterarg */
1718		QLA_MAX_TSO_FRAME_SIZE,     /* maxsize */
1719		QLA_MAX_SEGMENTS,        /* nsegments */
1720		PAGE_SIZE,        /* maxsegsize */
1721		BUS_DMA_ALLOCNOW,        /* flags */
1722		NULL,    /* lockfunc */
1723		NULL,    /* lockfuncarg */
1724		&ha->tx_tag)) {
1725		device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1726			__func__);
1727		return (ENOMEM);
1728	}
1729
1730	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1731		bzero((void *)ha->tx_ring[i].tx_buf,
1732			(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1733	}
1734
1735	for (j = 0; j < ha->hw.num_tx_rings; j++) {
1736		for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1737
1738			txb = &ha->tx_ring[j].tx_buf[i];
1739
1740			if ((ret = bus_dmamap_create(ha->tx_tag,
1741					BUS_DMA_NOWAIT, &txb->map))) {
1742
1743				ha->err_tx_dmamap_create++;
1744				device_printf(ha->pci_dev,
1745					"%s: bus_dmamap_create failed[%d]\n",
1746					__func__, ret);
1747
1748				qla_free_xmt_bufs(ha);
1749
1750				return (ret);
1751			}
1752		}
1753	}
1754
1755	return 0;
1756}
1757
1758/*
1759 * Release mbuf after it sent on the wire
1760 */
1761static void
1762qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1763{
1764	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1765
1766	if (txb->m_head) {
1767		bus_dmamap_sync(ha->tx_tag, txb->map,
1768			BUS_DMASYNC_POSTWRITE);
1769
1770		bus_dmamap_unload(ha->tx_tag, txb->map);
1771
1772		m_freem(txb->m_head);
1773		txb->m_head = NULL;
1774
1775		bus_dmamap_destroy(ha->tx_tag, txb->map);
1776		txb->map = NULL;
1777	}
1778
1779	if (txb->map) {
1780		bus_dmamap_unload(ha->tx_tag, txb->map);
1781		bus_dmamap_destroy(ha->tx_tag, txb->map);
1782		txb->map = NULL;
1783	}
1784
1785	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1786}
1787
1788static void
1789qla_free_xmt_bufs(qla_host_t *ha)
1790{
1791	int		i, j;
1792
1793	for (j = 0; j < ha->hw.num_tx_rings; j++) {
1794		for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1795			qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
1796	}
1797
1798	if (ha->tx_tag != NULL) {
1799		bus_dma_tag_destroy(ha->tx_tag);
1800		ha->tx_tag = NULL;
1801	}
1802
1803	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1804		bzero((void *)ha->tx_ring[i].tx_buf,
1805			(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1806	}
1807	return;
1808}
1809
1810
1811static int
1812qla_alloc_rcv_std(qla_host_t *ha)
1813{
1814	int		i, j, k, r, ret = 0;
1815	qla_rx_buf_t	*rxb;
1816	qla_rx_ring_t	*rx_ring;
1817
1818	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1819
1820		rx_ring = &ha->rx_ring[r];
1821
1822		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1823
1824			rxb = &rx_ring->rx_buf[i];
1825
1826			ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT,
1827					&rxb->map);
1828
1829			if (ret) {
1830				device_printf(ha->pci_dev,
1831					"%s: dmamap[%d, %d] failed\n",
1832					__func__, r, i);
1833
1834				for (k = 0; k < r; k++) {
1835					for (j = 0; j < NUM_RX_DESCRIPTORS;
1836						j++) {
1837						rxb = &ha->rx_ring[k].rx_buf[j];
1838						bus_dmamap_destroy(ha->rx_tag,
1839							rxb->map);
1840					}
1841				}
1842
1843				for (j = 0; j < i; j++) {
1844					bus_dmamap_destroy(ha->rx_tag,
1845						rx_ring->rx_buf[j].map);
1846				}
1847				goto qla_alloc_rcv_std_err;
1848			}
1849		}
1850	}
1851
1852	qla_init_hw_rcv_descriptors(ha);
1853
1854
1855	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1856
1857		rx_ring = &ha->rx_ring[r];
1858
1859		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1860			rxb = &rx_ring->rx_buf[i];
1861			rxb->handle = i;
1862			if (!(ret = ql_get_mbuf(ha, rxb, NULL))) {
1863				/*
1864			 	 * set the physical address in the
1865				 * corresponding descriptor entry in the
1866				 * receive ring/queue for the hba
1867				 */
1868				qla_set_hw_rcv_desc(ha, r, i, rxb->handle,
1869					rxb->paddr,
1870					(rxb->m_head)->m_pkthdr.len);
1871			} else {
1872				device_printf(ha->pci_dev,
1873					"%s: ql_get_mbuf [%d, %d] failed\n",
1874					__func__, r, i);
1875				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1876				goto qla_alloc_rcv_std_err;
1877			}
1878		}
1879	}
1880	return 0;
1881
1882qla_alloc_rcv_std_err:
1883	return (-1);
1884}
1885
1886static void
1887qla_free_rcv_std(qla_host_t *ha)
1888{
1889	int		i, r;
1890	qla_rx_buf_t	*rxb;
1891
1892	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1893		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1894			rxb = &ha->rx_ring[r].rx_buf[i];
1895			if (rxb->m_head != NULL) {
1896				bus_dmamap_unload(ha->rx_tag, rxb->map);
1897				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1898				m_freem(rxb->m_head);
1899				rxb->m_head = NULL;
1900			}
1901		}
1902	}
1903	return;
1904}
1905
1906static int
1907qla_alloc_rcv_bufs(qla_host_t *ha)
1908{
1909	int		i, ret = 0;
1910
1911	if (bus_dma_tag_create(NULL,    /* parent */
1912			1, 0,    /* alignment, bounds */
1913			BUS_SPACE_MAXADDR,       /* lowaddr */
1914			BUS_SPACE_MAXADDR,       /* highaddr */
1915			NULL, NULL,      /* filter, filterarg */
1916			MJUM9BYTES,     /* maxsize */
1917			1,        /* nsegments */
1918			MJUM9BYTES,        /* maxsegsize */
1919			BUS_DMA_ALLOCNOW,        /* flags */
1920			NULL,    /* lockfunc */
1921			NULL,    /* lockfuncarg */
1922			&ha->rx_tag)) {
1923
1924		device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1925			__func__);
1926
1927		return (ENOMEM);
1928	}
1929
1930	bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1931
1932	for (i = 0; i < ha->hw.num_sds_rings; i++) {
1933		ha->hw.sds[i].sdsr_next = 0;
1934		ha->hw.sds[i].rxb_free = NULL;
1935		ha->hw.sds[i].rx_free = 0;
1936	}
1937
1938	ret = qla_alloc_rcv_std(ha);
1939
1940	return (ret);
1941}
1942
1943static void
1944qla_free_rcv_bufs(qla_host_t *ha)
1945{
1946	int		i;
1947
1948	qla_free_rcv_std(ha);
1949
1950	if (ha->rx_tag != NULL) {
1951		bus_dma_tag_destroy(ha->rx_tag);
1952		ha->rx_tag = NULL;
1953	}
1954
1955	bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1956
1957	for (i = 0; i < ha->hw.num_sds_rings; i++) {
1958		ha->hw.sds[i].sdsr_next = 0;
1959		ha->hw.sds[i].rxb_free = NULL;
1960		ha->hw.sds[i].rx_free = 0;
1961	}
1962
1963	return;
1964}
1965
1966int
1967ql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
1968{
1969	struct mbuf *mp = nmp;
1970	struct ifnet   		*ifp;
1971	int            		ret = 0;
1972	uint32_t		offset;
1973	bus_dma_segment_t	segs[1];
1974	int			nsegs, mbuf_size;
1975
1976	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1977
1978	ifp = ha->ifp;
1979
1980        if (ha->hw.enable_9kb)
1981                mbuf_size = MJUM9BYTES;
1982        else
1983                mbuf_size = MCLBYTES;
1984
1985	if (mp == NULL) {
1986
1987		if (QL_ERR_INJECT(ha, INJCT_M_GETCL_M_GETJCL_FAILURE))
1988			return(-1);
1989
1990                if (ha->hw.enable_9kb)
1991                        mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, mbuf_size);
1992                else
1993                        mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1994
1995		if (mp == NULL) {
1996			ha->err_m_getcl++;
1997			ret = ENOBUFS;
1998			device_printf(ha->pci_dev,
1999					"%s: m_getcl failed\n", __func__);
2000			goto exit_ql_get_mbuf;
2001		}
2002		mp->m_len = mp->m_pkthdr.len = mbuf_size;
2003	} else {
2004		mp->m_len = mp->m_pkthdr.len = mbuf_size;
2005		mp->m_data = mp->m_ext.ext_buf;
2006		mp->m_next = NULL;
2007	}
2008
2009	offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
2010	if (offset) {
2011		offset = 8 - offset;
2012		m_adj(mp, offset);
2013	}
2014
2015	/*
2016	 * Using memory from the mbuf cluster pool, invoke the bus_dma
2017	 * machinery to arrange the memory mapping.
2018	 */
2019	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map,
2020			mp, segs, &nsegs, BUS_DMA_NOWAIT);
2021	rxb->paddr = segs[0].ds_addr;
2022
2023	if (ret || !rxb->paddr || (nsegs != 1)) {
2024		m_free(mp);
2025		rxb->m_head = NULL;
2026		device_printf(ha->pci_dev,
2027			"%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
2028			__func__, ret, (long long unsigned int)rxb->paddr,
2029			nsegs);
2030                ret = -1;
2031		goto exit_ql_get_mbuf;
2032	}
2033	rxb->m_head = mp;
2034	bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
2035
2036exit_ql_get_mbuf:
2037	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
2038	return (ret);
2039}
2040
2041
2042static void
2043qla_get_peer(qla_host_t *ha)
2044{
2045	device_t *peers;
2046	int count, i, slot;
2047	int my_slot = pci_get_slot(ha->pci_dev);
2048
2049	if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count))
2050		return;
2051
2052	for (i = 0; i < count; i++) {
2053		slot = pci_get_slot(peers[i]);
2054
2055		if ((slot >= 0) && (slot == my_slot) &&
2056			(pci_get_device(peers[i]) ==
2057				pci_get_device(ha->pci_dev))) {
2058			if (ha->pci_dev != peers[i])
2059				ha->peer_dev = peers[i];
2060		}
2061	}
2062}
2063
2064static void
2065qla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer)
2066{
2067	qla_host_t *ha_peer;
2068
2069	if (ha->peer_dev) {
2070        	if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) {
2071
2072			ha_peer->msg_from_peer = msg_to_peer;
2073		}
2074	}
2075}
2076
2077void
2078qla_set_error_recovery(qla_host_t *ha)
2079{
2080	struct ifnet *ifp = ha->ifp;
2081
2082	if (!cold && ha->enable_error_recovery) {
2083		if (ifp)
2084			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2085		ha->qla_initiate_recovery = 1;
2086	} else
2087		ha->offline = 1;
2088	return;
2089}
2090
2091static void
2092qla_error_recovery(void *context, int pending)
2093{
2094	qla_host_t *ha = context;
2095	uint32_t msecs_100 = 400;
2096	struct ifnet *ifp = ha->ifp;
2097	int i = 0;
2098
2099	device_printf(ha->pci_dev, "%s: enter\n", __func__);
2100	ha->hw.imd_compl = 1;
2101
2102	taskqueue_drain_all(ha->stats_tq);
2103	taskqueue_drain_all(ha->async_event_tq);
2104
2105	if (QLA_LOCK(ha, __func__, -1, 0) != 0)
2106		return;
2107
2108	device_printf(ha->pci_dev, "%s: ts_usecs = %ld start\n",
2109		__func__, qla_get_usec_timestamp());
2110
2111	if (ha->qla_interface_up) {
2112
2113		qla_mdelay(__func__, 300);
2114
2115	        //ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2116
2117		for (i = 0; i < ha->hw.num_sds_rings; i++) {
2118	        	qla_tx_fp_t *fp;
2119
2120			fp = &ha->tx_fp[i];
2121
2122			if (fp == NULL)
2123				continue;
2124
2125			if (fp->tx_br != NULL) {
2126				mtx_lock(&fp->tx_mtx);
2127				mtx_unlock(&fp->tx_mtx);
2128			}
2129		}
2130	}
2131
2132	qla_drain_fp_taskqueues(ha);
2133
2134	if ((ha->pci_func & 0x1) == 0) {
2135
2136		if (!ha->msg_from_peer) {
2137			qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
2138
2139			while ((ha->msg_from_peer != QL_PEER_MSG_ACK) &&
2140				msecs_100--)
2141				qla_mdelay(__func__, 100);
2142		}
2143
2144		ha->msg_from_peer = 0;
2145
2146		if (ha->enable_minidump)
2147			ql_minidump(ha);
2148
2149		if (ha->enable_driverstate_dump)
2150			ql_capture_drvr_state(ha);
2151
2152		if (ql_init_hw(ha)) {
2153			device_printf(ha->pci_dev,
2154				"%s: ts_usecs = %ld exit: ql_init_hw failed\n",
2155				__func__, qla_get_usec_timestamp());
2156			ha->offline = 1;
2157			goto qla_error_recovery_exit;
2158		}
2159
2160		if (ha->qla_interface_up) {
2161			qla_free_xmt_bufs(ha);
2162			qla_free_rcv_bufs(ha);
2163		}
2164
2165		if (!QL_ERR_INJECT(ha, INJCT_PEER_PORT_FAILURE_ERR_RECOVERY))
2166			qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
2167
2168	} else {
2169		if (ha->msg_from_peer == QL_PEER_MSG_RESET) {
2170
2171			ha->msg_from_peer = 0;
2172
2173			if (!QL_ERR_INJECT(ha, INJCT_PEER_PORT_FAILURE_ERR_RECOVERY))
2174				qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
2175		} else {
2176			qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
2177		}
2178
2179		while ((ha->msg_from_peer != QL_PEER_MSG_ACK)  && msecs_100--)
2180			qla_mdelay(__func__, 100);
2181		ha->msg_from_peer = 0;
2182
2183		if (ha->enable_driverstate_dump)
2184			ql_capture_drvr_state(ha);
2185
2186		if (msecs_100 == 0) {
2187			device_printf(ha->pci_dev,
2188				"%s: ts_usecs = %ld exit: QL_PEER_MSG_ACK not received\n",
2189				__func__, qla_get_usec_timestamp());
2190			ha->offline = 1;
2191			goto qla_error_recovery_exit;
2192		}
2193
2194		if (ql_init_hw(ha)) {
2195			device_printf(ha->pci_dev,
2196				"%s: ts_usecs = %ld exit: ql_init_hw failed\n",
2197				__func__, qla_get_usec_timestamp());
2198			ha->offline = 1;
2199			goto qla_error_recovery_exit;
2200		}
2201
2202		if (ha->qla_interface_up) {
2203			qla_free_xmt_bufs(ha);
2204			qla_free_rcv_bufs(ha);
2205		}
2206	}
2207
2208	qla_mdelay(__func__, ha->ms_delay_after_init);
2209
2210	*((uint32_t *)&ha->hw.flags) = 0;
2211	ha->qla_initiate_recovery = 0;
2212
2213	if (ha->qla_interface_up) {
2214
2215		if (qla_alloc_xmt_bufs(ha) != 0) {
2216			ha->offline = 1;
2217			goto qla_error_recovery_exit;
2218		}
2219
2220		qla_confirm_9kb_enable(ha);
2221
2222		if (qla_alloc_rcv_bufs(ha) != 0) {
2223			ha->offline = 1;
2224			goto qla_error_recovery_exit;
2225		}
2226
2227		ha->stop_rcv = 0;
2228
2229		if (ql_init_hw_if(ha) == 0) {
2230			ifp = ha->ifp;
2231			ifp->if_drv_flags |= IFF_DRV_RUNNING;
2232			ha->qla_watchdog_pause = 0;
2233			ql_update_link_state(ha);
2234		} else {
2235			ha->offline = 1;
2236
2237			if (ha->hw.sp_log_stop_events &
2238				Q8_SP_LOG_STOP_IF_START_FAILURE)
2239				ha->hw.sp_log_stop = -1;
2240		}
2241	} else {
2242		ha->qla_watchdog_pause = 0;
2243	}
2244
2245qla_error_recovery_exit:
2246
2247	if (ha->offline ) {
2248		device_printf(ha->pci_dev, "%s: ts_usecs = %ld port offline\n",
2249			__func__, qla_get_usec_timestamp());
2250		if (ha->hw.sp_log_stop_events &
2251			Q8_SP_LOG_STOP_ERR_RECOVERY_FAILURE)
2252			ha->hw.sp_log_stop = -1;
2253	}
2254
2255
2256        QLA_UNLOCK(ha, __func__);
2257
2258	if (!ha->offline)
2259		callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
2260			qla_watchdog, ha);
2261
2262	device_printf(ha->pci_dev,
2263		"%s: ts_usecs = %ld exit\n",
2264		__func__, qla_get_usec_timestamp());
2265	return;
2266}
2267
2268static void
2269qla_async_event(void *context, int pending)
2270{
2271        qla_host_t *ha = context;
2272
2273	if (QLA_LOCK(ha, __func__, -1, 0) != 0)
2274		return;
2275
2276	if (ha->async_event) {
2277		ha->async_event = 0;
2278        	qla_hw_async_event(ha);
2279	}
2280
2281	QLA_UNLOCK(ha, __func__);
2282
2283	return;
2284}
2285
2286static void
2287qla_stats(void *context, int pending)
2288{
2289        qla_host_t *ha;
2290
2291        ha = context;
2292
2293	ql_get_stats(ha);
2294
2295	return;
2296}
2297
2298