ql_os.c revision 317990
1/*
2 * Copyright (c) 2013-2016 Qlogic Corporation
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 */
27
28/*
29 * File: ql_os.c
30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/11/sys/dev/qlxgbe/ql_os.c 317990 2017-05-08 22:37:37Z davidcs $");
35
36
37#include "ql_os.h"
38#include "ql_hw.h"
39#include "ql_def.h"
40#include "ql_inline.h"
41#include "ql_ver.h"
42#include "ql_glbl.h"
43#include "ql_dbg.h"
44#include <sys/smp.h>
45
46/*
47 * Some PCI Configuration Space Related Defines
48 */
49
50#ifndef PCI_VENDOR_QLOGIC
51#define PCI_VENDOR_QLOGIC	0x1077
52#endif
53
54#ifndef PCI_PRODUCT_QLOGIC_ISP8030
55#define PCI_PRODUCT_QLOGIC_ISP8030	0x8030
56#endif
57
58#define PCI_QLOGIC_ISP8030 \
59	((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC)
60
61/*
62 * static functions
63 */
64static int qla_alloc_parent_dma_tag(qla_host_t *ha);
65static void qla_free_parent_dma_tag(qla_host_t *ha);
66static int qla_alloc_xmt_bufs(qla_host_t *ha);
67static void qla_free_xmt_bufs(qla_host_t *ha);
68static int qla_alloc_rcv_bufs(qla_host_t *ha);
69static void qla_free_rcv_bufs(qla_host_t *ha);
70static void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb);
71
72static void qla_init_ifnet(device_t dev, qla_host_t *ha);
73static int qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS);
74static int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS);
75static void qla_release(qla_host_t *ha);
76static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
77		int error);
78static void qla_stop(qla_host_t *ha);
79static void qla_get_peer(qla_host_t *ha);
80static void qla_error_recovery(void *context, int pending);
81static void qla_async_event(void *context, int pending);
82static int qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
83		uint32_t iscsi_pdu);
84
85/*
86 * Hooks to the Operating Systems
87 */
88static int qla_pci_probe (device_t);
89static int qla_pci_attach (device_t);
90static int qla_pci_detach (device_t);
91
92static void qla_init(void *arg);
93static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
94static int qla_media_change(struct ifnet *ifp);
95static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
96
97static int qla_transmit(struct ifnet *ifp, struct mbuf  *mp);
98static void qla_qflush(struct ifnet *ifp);
99static int qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
100static void qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
101static int qla_create_fp_taskqueues(qla_host_t *ha);
102static void qla_destroy_fp_taskqueues(qla_host_t *ha);
103static void qla_drain_fp_taskqueues(qla_host_t *ha);
104
105static device_method_t qla_pci_methods[] = {
106	/* Device interface */
107	DEVMETHOD(device_probe, qla_pci_probe),
108	DEVMETHOD(device_attach, qla_pci_attach),
109	DEVMETHOD(device_detach, qla_pci_detach),
110	{ 0, 0 }
111};
112
113static driver_t qla_pci_driver = {
114	"ql", qla_pci_methods, sizeof (qla_host_t),
115};
116
117static devclass_t qla83xx_devclass;
118
119DRIVER_MODULE(qla83xx, pci, qla_pci_driver, qla83xx_devclass, 0, 0);
120
121MODULE_DEPEND(qla83xx, pci, 1, 1, 1);
122MODULE_DEPEND(qla83xx, ether, 1, 1, 1);
123
124MALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver");
125
126#define QL_STD_REPLENISH_THRES		0
127#define QL_JUMBO_REPLENISH_THRES	32
128
129
130static char dev_str[64];
131static char ver_str[64];
132
133/*
134 * Name:	qla_pci_probe
135 * Function:	Validate the PCI device to be a QLA80XX device
136 */
137static int
138qla_pci_probe(device_t dev)
139{
140        switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
141        case PCI_QLOGIC_ISP8030:
142		snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
143			"Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function",
144			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
145			QLA_VERSION_BUILD);
146		snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d",
147			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
148			QLA_VERSION_BUILD);
149                device_set_desc(dev, dev_str);
150                break;
151        default:
152                return (ENXIO);
153        }
154
155        if (bootverbose)
156                printf("%s: %s\n ", __func__, dev_str);
157
158        return (BUS_PROBE_DEFAULT);
159}
160
161static void
162qla_add_sysctls(qla_host_t *ha)
163{
164        device_t dev = ha->pci_dev;
165
166	SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
167		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
168		OID_AUTO, "version", CTLFLAG_RD,
169		ver_str, 0, "Driver Version");
170
171        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
172                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
173                OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
174                (void *)ha, 0,
175                qla_sysctl_get_stats, "I", "Statistics");
176
177        SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
178                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
179                OID_AUTO, "fw_version", CTLFLAG_RD,
180                ha->fw_ver_str, 0, "firmware version");
181
182        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
183                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
184                OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW,
185                (void *)ha, 0,
186                qla_sysctl_get_link_status, "I", "Link Status");
187
188	ha->dbg_level = 0;
189        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
190                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
191                OID_AUTO, "debug", CTLFLAG_RW,
192                &ha->dbg_level, ha->dbg_level, "Debug Level");
193
194	ha->std_replenish = QL_STD_REPLENISH_THRES;
195        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
196                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
197                OID_AUTO, "std_replenish", CTLFLAG_RW,
198                &ha->std_replenish, ha->std_replenish,
199                "Threshold for Replenishing Standard Frames");
200
201        SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
202                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
203                OID_AUTO, "ipv4_lro",
204                CTLFLAG_RD, &ha->ipv4_lro,
205                "number of ipv4 lro completions");
206
207        SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
208                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
209                OID_AUTO, "ipv6_lro",
210                CTLFLAG_RD, &ha->ipv6_lro,
211                "number of ipv6 lro completions");
212
213	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
214		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
215		OID_AUTO, "tx_tso_frames",
216		CTLFLAG_RD, &ha->tx_tso_frames,
217		"number of Tx TSO Frames");
218
219	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
220                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
221		OID_AUTO, "hw_vlan_tx_frames",
222		CTLFLAG_RD, &ha->hw_vlan_tx_frames,
223		"number of Tx VLAN Frames");
224
225        return;
226}
227
228static void
229qla_watchdog(void *arg)
230{
231	qla_host_t *ha = arg;
232	qla_hw_t *hw;
233	struct ifnet *ifp;
234	uint32_t i;
235
236	hw = &ha->hw;
237	ifp = ha->ifp;
238
239        if (ha->flags.qla_watchdog_exit) {
240		ha->qla_watchdog_exited = 1;
241		return;
242	}
243	ha->qla_watchdog_exited = 0;
244
245	if (!ha->flags.qla_watchdog_pause) {
246		if (ql_hw_check_health(ha) || ha->qla_initiate_recovery ||
247			(ha->msg_from_peer == QL_PEER_MSG_RESET)) {
248			ha->qla_watchdog_paused = 1;
249			ha->flags.qla_watchdog_pause = 1;
250			ha->qla_initiate_recovery = 0;
251			ha->err_inject = 0;
252			device_printf(ha->pci_dev,
253				"%s: taskqueue_enqueue(err_task) \n", __func__);
254			taskqueue_enqueue(ha->err_tq, &ha->err_task);
255		} else if (ha->flags.qla_interface_up) {
256
257                        if (ha->async_event) {
258                                ha->async_event = 0;
259                                taskqueue_enqueue(ha->async_event_tq,
260                                        &ha->async_event_task);
261                        }
262
263			for (i = 0; i < ha->hw.num_sds_rings; i++) {
264				qla_tx_fp_t *fp = &ha->tx_fp[i];
265
266				if (fp->fp_taskqueue != NULL)
267					taskqueue_enqueue(fp->fp_taskqueue,
268						&fp->fp_task);
269			}
270
271			ha->qla_watchdog_paused = 0;
272		} else {
273			ha->qla_watchdog_paused = 0;
274		}
275	} else {
276		ha->qla_watchdog_paused = 1;
277	}
278
279	ha->watchdog_ticks = ha->watchdog_ticks++ % 1000;
280	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
281		qla_watchdog, ha);
282}
283
284/*
285 * Name:	qla_pci_attach
286 * Function:	attaches the device to the operating system
287 */
288static int
289qla_pci_attach(device_t dev)
290{
291	qla_host_t *ha = NULL;
292	uint32_t rsrc_len;
293	int i;
294	uint32_t num_rcvq = 0;
295
296        if ((ha = device_get_softc(dev)) == NULL) {
297                device_printf(dev, "cannot get softc\n");
298                return (ENOMEM);
299        }
300
301        memset(ha, 0, sizeof (qla_host_t));
302
303        if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) {
304                device_printf(dev, "device is not ISP8030\n");
305                return (ENXIO);
306	}
307
308        ha->pci_func = pci_get_function(dev) & 0x1;
309
310        ha->pci_dev = dev;
311
312	pci_enable_busmaster(dev);
313
314	ha->reg_rid = PCIR_BAR(0);
315	ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
316				RF_ACTIVE);
317
318        if (ha->pci_reg == NULL) {
319                device_printf(dev, "unable to map any ports\n");
320                goto qla_pci_attach_err;
321        }
322
323	rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
324					ha->reg_rid);
325
326	mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
327
328	qla_add_sysctls(ha);
329	ql_hw_add_sysctls(ha);
330
331	ha->flags.lock_init = 1;
332
333	ha->reg_rid1 = PCIR_BAR(2);
334	ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
335			&ha->reg_rid1, RF_ACTIVE);
336
337	ha->msix_count = pci_msix_count(dev);
338
339	if (ha->msix_count < (ha->hw.num_sds_rings + 1)) {
340		device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
341			ha->msix_count);
342		goto qla_pci_attach_err;
343	}
344
345	QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
346		" msix_count 0x%x pci_reg %p pci_reg1 %p\n", __func__, ha,
347		ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg,
348		ha->pci_reg1));
349
350        /* initialize hardware */
351        if (ql_init_hw(ha)) {
352                device_printf(dev, "%s: ql_init_hw failed\n", __func__);
353                goto qla_pci_attach_err;
354        }
355
356        device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__,
357                ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
358                ha->fw_ver_build);
359        snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d",
360                        ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
361                        ha->fw_ver_build);
362
363        if (qla_get_nic_partition(ha, NULL, &num_rcvq)) {
364                device_printf(dev, "%s: qla_get_nic_partition failed\n",
365                        __func__);
366                goto qla_pci_attach_err;
367        }
368        device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
369                " msix_count 0x%x pci_reg %p pci_reg1 %p num_rcvq = %d\n",
370		__func__, ha, ha->pci_func, rsrc_len, ha->msix_count,
371		ha->pci_reg, ha->pci_reg1, num_rcvq);
372
373
374#ifdef QL_ENABLE_ISCSI_TLV
375        if ((ha->msix_count  < 64) || (num_rcvq != 32)) {
376                ha->hw.num_sds_rings = 15;
377                ha->hw.num_tx_rings = ha->hw.num_sds_rings * 2;
378        }
379#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
380	ha->hw.num_rds_rings = ha->hw.num_sds_rings;
381
382	ha->msix_count = ha->hw.num_sds_rings + 1;
383
384	if (pci_alloc_msix(dev, &ha->msix_count)) {
385		device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
386			ha->msix_count);
387		ha->msix_count = 0;
388		goto qla_pci_attach_err;
389	}
390
391	ha->mbx_irq_rid = 1;
392	ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
393				&ha->mbx_irq_rid,
394				(RF_ACTIVE | RF_SHAREABLE));
395	if (ha->mbx_irq == NULL) {
396		device_printf(dev, "could not allocate mbx interrupt\n");
397		goto qla_pci_attach_err;
398	}
399	if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE),
400		NULL, ql_mbx_isr, ha, &ha->mbx_handle)) {
401		device_printf(dev, "could not setup mbx interrupt\n");
402		goto qla_pci_attach_err;
403	}
404
405	for (i = 0; i < ha->hw.num_sds_rings; i++) {
406		ha->irq_vec[i].sds_idx = i;
407                ha->irq_vec[i].ha = ha;
408                ha->irq_vec[i].irq_rid = 2 + i;
409
410		ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
411				&ha->irq_vec[i].irq_rid,
412				(RF_ACTIVE | RF_SHAREABLE));
413
414		if (ha->irq_vec[i].irq == NULL) {
415			device_printf(dev, "could not allocate interrupt\n");
416			goto qla_pci_attach_err;
417		}
418		if (bus_setup_intr(dev, ha->irq_vec[i].irq,
419			(INTR_TYPE_NET | INTR_MPSAFE),
420			NULL, ql_isr, &ha->irq_vec[i],
421			&ha->irq_vec[i].handle)) {
422			device_printf(dev, "could not setup interrupt\n");
423			goto qla_pci_attach_err;
424		}
425
426		ha->tx_fp[i].ha = ha;
427		ha->tx_fp[i].txr_idx = i;
428
429		if (qla_alloc_tx_br(ha, &ha->tx_fp[i])) {
430			device_printf(dev, "%s: could not allocate tx_br[%d]\n",
431				__func__, i);
432			goto qla_pci_attach_err;
433		}
434	}
435
436	if (qla_create_fp_taskqueues(ha) != 0)
437		goto qla_pci_attach_err;
438
439	printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus,
440		ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count);
441
442	ql_read_mac_addr(ha);
443
444	/* allocate parent dma tag */
445	if (qla_alloc_parent_dma_tag(ha)) {
446		device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n",
447			__func__);
448		goto qla_pci_attach_err;
449	}
450
451	/* alloc all dma buffers */
452	if (ql_alloc_dma(ha)) {
453		device_printf(dev, "%s: ql_alloc_dma failed\n", __func__);
454		goto qla_pci_attach_err;
455	}
456	qla_get_peer(ha);
457
458	if (ql_minidump_init(ha) != 0) {
459		device_printf(dev, "%s: ql_minidump_init failed\n", __func__);
460		goto qla_pci_attach_err;
461	}
462	/* create the o.s ethernet interface */
463	qla_init_ifnet(dev, ha);
464
465	ha->flags.qla_watchdog_active = 1;
466	ha->flags.qla_watchdog_pause = 0;
467
468	callout_init(&ha->tx_callout, TRUE);
469	ha->flags.qla_callout_init = 1;
470
471	/* create ioctl device interface */
472	if (ql_make_cdev(ha)) {
473		device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
474		goto qla_pci_attach_err;
475	}
476
477	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
478		qla_watchdog, ha);
479
480	TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha);
481	ha->err_tq = taskqueue_create("qla_errq", M_NOWAIT,
482			taskqueue_thread_enqueue, &ha->err_tq);
483	taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq",
484		device_get_nameunit(ha->pci_dev));
485
486        TASK_INIT(&ha->async_event_task, 0, qla_async_event, ha);
487        ha->async_event_tq = taskqueue_create("qla_asyncq", M_NOWAIT,
488                        taskqueue_thread_enqueue, &ha->async_event_tq);
489        taskqueue_start_threads(&ha->async_event_tq, 1, PI_NET, "%s asyncq",
490                device_get_nameunit(ha->pci_dev));
491
492	QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__));
493        return (0);
494
495qla_pci_attach_err:
496
497	qla_release(ha);
498
499	QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__));
500        return (ENXIO);
501}
502
503/*
504 * Name:	qla_pci_detach
505 * Function:	Unhooks the device from the operating system
506 */
507static int
508qla_pci_detach(device_t dev)
509{
510	qla_host_t *ha = NULL;
511	struct ifnet *ifp;
512
513	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
514
515        if ((ha = device_get_softc(dev)) == NULL) {
516                device_printf(dev, "cannot get softc\n");
517                return (ENOMEM);
518        }
519
520	ifp = ha->ifp;
521
522	QLA_LOCK(ha);
523	qla_stop(ha);
524	QLA_UNLOCK(ha);
525
526	qla_release(ha);
527
528	QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
529
530        return (0);
531}
532
533/*
534 * SYSCTL Related Callbacks
535 */
536static int
537qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS)
538{
539	int err, ret = 0;
540	qla_host_t *ha;
541
542	err = sysctl_handle_int(oidp, &ret, 0, req);
543
544	if (err || !req->newptr)
545		return (err);
546
547	if (ret == 1) {
548		ha = (qla_host_t *)arg1;
549		ql_get_stats(ha);
550	}
551	return (err);
552}
553static int
554qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS)
555{
556	int err, ret = 0;
557	qla_host_t *ha;
558
559	err = sysctl_handle_int(oidp, &ret, 0, req);
560
561	if (err || !req->newptr)
562		return (err);
563
564	if (ret == 1) {
565		ha = (qla_host_t *)arg1;
566		ql_hw_link_status(ha);
567	}
568	return (err);
569}
570
571/*
572 * Name:	qla_release
573 * Function:	Releases the resources allocated for the device
574 */
575static void
576qla_release(qla_host_t *ha)
577{
578	device_t dev;
579	int i;
580
581	dev = ha->pci_dev;
582
583        if (ha->async_event_tq) {
584                taskqueue_drain(ha->async_event_tq, &ha->async_event_task);
585                taskqueue_free(ha->async_event_tq);
586        }
587
588	if (ha->err_tq) {
589		taskqueue_drain(ha->err_tq, &ha->err_task);
590		taskqueue_free(ha->err_tq);
591	}
592
593	ql_del_cdev(ha);
594
595	if (ha->flags.qla_watchdog_active) {
596		ha->flags.qla_watchdog_exit = 1;
597
598		while (ha->qla_watchdog_exited == 0)
599			qla_mdelay(__func__, 1);
600	}
601
602	if (ha->flags.qla_callout_init)
603		callout_stop(&ha->tx_callout);
604
605	if (ha->ifp != NULL)
606		ether_ifdetach(ha->ifp);
607
608	ql_free_dma(ha);
609	qla_free_parent_dma_tag(ha);
610
611	if (ha->mbx_handle)
612		(void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle);
613
614	if (ha->mbx_irq)
615		(void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid,
616				ha->mbx_irq);
617
618	for (i = 0; i < ha->hw.num_sds_rings; i++) {
619
620		if (ha->irq_vec[i].handle) {
621			(void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
622					ha->irq_vec[i].handle);
623		}
624
625		if (ha->irq_vec[i].irq) {
626			(void)bus_release_resource(dev, SYS_RES_IRQ,
627				ha->irq_vec[i].irq_rid,
628				ha->irq_vec[i].irq);
629		}
630
631		qla_free_tx_br(ha, &ha->tx_fp[i]);
632	}
633	qla_destroy_fp_taskqueues(ha);
634
635	if (ha->msix_count)
636		pci_release_msi(dev);
637
638	if (ha->flags.lock_init) {
639		mtx_destroy(&ha->hw_lock);
640	}
641
642        if (ha->pci_reg)
643                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
644				ha->pci_reg);
645
646        if (ha->pci_reg1)
647                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1,
648				ha->pci_reg1);
649}
650
651/*
652 * DMA Related Functions
653 */
654
655static void
656qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
657{
658        *((bus_addr_t *)arg) = 0;
659
660        if (error) {
661                printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
662                return;
663	}
664
665        *((bus_addr_t *)arg) = segs[0].ds_addr;
666
667	return;
668}
669
670int
671ql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
672{
673        int             ret = 0;
674        device_t        dev;
675        bus_addr_t      b_addr;
676
677        dev = ha->pci_dev;
678
679        QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
680
681        ret = bus_dma_tag_create(
682                        ha->parent_tag,/* parent */
683                        dma_buf->alignment,
684                        ((bus_size_t)(1ULL << 32)),/* boundary */
685                        BUS_SPACE_MAXADDR,      /* lowaddr */
686                        BUS_SPACE_MAXADDR,      /* highaddr */
687                        NULL, NULL,             /* filter, filterarg */
688                        dma_buf->size,          /* maxsize */
689                        1,                      /* nsegments */
690                        dma_buf->size,          /* maxsegsize */
691                        0,                      /* flags */
692                        NULL, NULL,             /* lockfunc, lockarg */
693                        &dma_buf->dma_tag);
694
695        if (ret) {
696                device_printf(dev, "%s: could not create dma tag\n", __func__);
697                goto ql_alloc_dmabuf_exit;
698        }
699        ret = bus_dmamem_alloc(dma_buf->dma_tag,
700                        (void **)&dma_buf->dma_b,
701                        (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
702                        &dma_buf->dma_map);
703        if (ret) {
704                bus_dma_tag_destroy(dma_buf->dma_tag);
705                device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
706                goto ql_alloc_dmabuf_exit;
707        }
708
709        ret = bus_dmamap_load(dma_buf->dma_tag,
710                        dma_buf->dma_map,
711                        dma_buf->dma_b,
712                        dma_buf->size,
713                        qla_dmamap_callback,
714                        &b_addr, BUS_DMA_NOWAIT);
715
716        if (ret || !b_addr) {
717                bus_dma_tag_destroy(dma_buf->dma_tag);
718                bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
719                        dma_buf->dma_map);
720                ret = -1;
721                goto ql_alloc_dmabuf_exit;
722        }
723
724        dma_buf->dma_addr = b_addr;
725
726ql_alloc_dmabuf_exit:
727        QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
728                __func__, ret, (void *)dma_buf->dma_tag,
729                (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
730		dma_buf->size));
731
732        return ret;
733}
734
735void
736ql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
737{
738	bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
739        bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
740        bus_dma_tag_destroy(dma_buf->dma_tag);
741}
742
743static int
744qla_alloc_parent_dma_tag(qla_host_t *ha)
745{
746	int		ret;
747	device_t	dev;
748
749	dev = ha->pci_dev;
750
751        /*
752         * Allocate parent DMA Tag
753         */
754        ret = bus_dma_tag_create(
755                        bus_get_dma_tag(dev),   /* parent */
756                        1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
757                        BUS_SPACE_MAXADDR,      /* lowaddr */
758                        BUS_SPACE_MAXADDR,      /* highaddr */
759                        NULL, NULL,             /* filter, filterarg */
760                        BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
761                        0,                      /* nsegments */
762                        BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
763                        0,                      /* flags */
764                        NULL, NULL,             /* lockfunc, lockarg */
765                        &ha->parent_tag);
766
767        if (ret) {
768                device_printf(dev, "%s: could not create parent dma tag\n",
769                        __func__);
770		return (-1);
771        }
772
773        ha->flags.parent_tag = 1;
774
775	return (0);
776}
777
778static void
779qla_free_parent_dma_tag(qla_host_t *ha)
780{
781        if (ha->flags.parent_tag) {
782                bus_dma_tag_destroy(ha->parent_tag);
783                ha->flags.parent_tag = 0;
784        }
785}
786
787/*
788 * Name: qla_init_ifnet
789 * Function: Creates the Network Device Interface and Registers it with the O.S
790 */
791
792static void
793qla_init_ifnet(device_t dev, qla_host_t *ha)
794{
795	struct ifnet *ifp;
796
797	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
798
799	ifp = ha->ifp = if_alloc(IFT_ETHER);
800
801	if (ifp == NULL)
802		panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
803
804	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
805
806	ifp->if_baudrate = IF_Gbps(10);
807	ifp->if_capabilities = IFCAP_LINKSTATE;
808	ifp->if_mtu = ETHERMTU;
809
810	ifp->if_init = qla_init;
811	ifp->if_softc = ha;
812	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
813	ifp->if_ioctl = qla_ioctl;
814
815	ifp->if_transmit = qla_transmit;
816	ifp->if_qflush = qla_qflush;
817
818	IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha));
819	ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha);
820	IFQ_SET_READY(&ifp->if_snd);
821
822	ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
823
824	ether_ifattach(ifp, qla_get_mac_addr(ha));
825
826	ifp->if_capabilities |= IFCAP_HWCSUM |
827				IFCAP_TSO4 |
828				IFCAP_JUMBO_MTU |
829				IFCAP_VLAN_HWTAGGING |
830				IFCAP_VLAN_MTU |
831				IFCAP_VLAN_HWTSO |
832				IFCAP_LRO;
833
834	ifp->if_capenable = ifp->if_capabilities;
835
836	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
837
838	ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status);
839
840	ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0,
841		NULL);
842	ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
843
844	ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
845
846	QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
847
848	return;
849}
850
851static void
852qla_init_locked(qla_host_t *ha)
853{
854	struct ifnet *ifp = ha->ifp;
855
856	qla_stop(ha);
857
858	if (qla_alloc_xmt_bufs(ha) != 0)
859		return;
860
861	qla_confirm_9kb_enable(ha);
862
863	if (qla_alloc_rcv_bufs(ha) != 0)
864		return;
865
866	bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN);
867
868	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO;
869
870	ha->flags.stop_rcv = 0;
871 	if (ql_init_hw_if(ha) == 0) {
872		ifp = ha->ifp;
873		ifp->if_drv_flags |= IFF_DRV_RUNNING;
874		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
875		ha->flags.qla_watchdog_pause = 0;
876		ha->hw_vlan_tx_frames = 0;
877		ha->tx_tso_frames = 0;
878		ha->flags.qla_interface_up = 1;
879	}
880
881	return;
882}
883
884static void
885qla_init(void *arg)
886{
887	qla_host_t *ha;
888
889	ha = (qla_host_t *)arg;
890
891	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
892
893	QLA_LOCK(ha);
894	qla_init_locked(ha);
895	QLA_UNLOCK(ha);
896
897	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
898}
899
900static int
901qla_set_multi(qla_host_t *ha, uint32_t add_multi)
902{
903	uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
904	struct ifmultiaddr *ifma;
905	int mcnt = 0;
906	struct ifnet *ifp = ha->ifp;
907	int ret = 0;
908
909	if_maddr_rlock(ifp);
910
911	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
912
913		if (ifma->ifma_addr->sa_family != AF_LINK)
914			continue;
915
916		if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
917			break;
918
919		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
920			&mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
921
922		mcnt++;
923	}
924
925	if_maddr_runlock(ifp);
926
927	QLA_LOCK(ha);
928	ret = ql_hw_set_multi(ha, mta, mcnt, add_multi);
929	QLA_UNLOCK(ha);
930
931	return (ret);
932}
933
934static int
935qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
936{
937	int ret = 0;
938	struct ifreq *ifr = (struct ifreq *)data;
939	struct ifaddr *ifa = (struct ifaddr *)data;
940	qla_host_t *ha;
941
942	ha = (qla_host_t *)ifp->if_softc;
943
944	switch (cmd) {
945	case SIOCSIFADDR:
946		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
947			__func__, cmd));
948
949		if (ifa->ifa_addr->sa_family == AF_INET) {
950			ifp->if_flags |= IFF_UP;
951			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
952				QLA_LOCK(ha);
953				qla_init_locked(ha);
954				QLA_UNLOCK(ha);
955			}
956			QL_DPRINT4(ha, (ha->pci_dev,
957				"%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
958				__func__, cmd,
959				ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
960
961			arp_ifinit(ifp, ifa);
962		} else {
963			ether_ioctl(ifp, cmd, data);
964		}
965		break;
966
967	case SIOCSIFMTU:
968		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
969			__func__, cmd));
970
971		if (ifr->ifr_mtu > QLA_MAX_MTU) {
972			ret = EINVAL;
973		} else {
974			QLA_LOCK(ha);
975
976			ifp->if_mtu = ifr->ifr_mtu;
977			ha->max_frame_size =
978				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
979
980			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
981				ret = ql_set_max_mtu(ha, ha->max_frame_size,
982					ha->hw.rcv_cntxt_id);
983			}
984
985			if (ifp->if_mtu > ETHERMTU)
986				ha->std_replenish = QL_JUMBO_REPLENISH_THRES;
987			else
988				ha->std_replenish = QL_STD_REPLENISH_THRES;
989
990
991			QLA_UNLOCK(ha);
992
993			if (ret)
994				ret = EINVAL;
995		}
996
997		break;
998
999	case SIOCSIFFLAGS:
1000		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
1001			__func__, cmd));
1002
1003		QLA_LOCK(ha);
1004
1005		if (ifp->if_flags & IFF_UP) {
1006			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1007				if ((ifp->if_flags ^ ha->if_flags) &
1008					IFF_PROMISC) {
1009					ret = ql_set_promisc(ha);
1010				} else if ((ifp->if_flags ^ ha->if_flags) &
1011					IFF_ALLMULTI) {
1012					ret = ql_set_allmulti(ha);
1013				}
1014			} else {
1015				qla_init_locked(ha);
1016				ha->max_frame_size = ifp->if_mtu +
1017					ETHER_HDR_LEN + ETHER_CRC_LEN;
1018				ret = ql_set_max_mtu(ha, ha->max_frame_size,
1019					ha->hw.rcv_cntxt_id);
1020			}
1021		} else {
1022			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1023				qla_stop(ha);
1024			ha->if_flags = ifp->if_flags;
1025		}
1026
1027		QLA_UNLOCK(ha);
1028		break;
1029
1030	case SIOCADDMULTI:
1031		QL_DPRINT4(ha, (ha->pci_dev,
1032			"%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
1033
1034		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1035			if (qla_set_multi(ha, 1))
1036				ret = EINVAL;
1037		}
1038		break;
1039
1040	case SIOCDELMULTI:
1041		QL_DPRINT4(ha, (ha->pci_dev,
1042			"%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
1043
1044		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1045			if (qla_set_multi(ha, 0))
1046				ret = EINVAL;
1047		}
1048		break;
1049
1050	case SIOCSIFMEDIA:
1051	case SIOCGIFMEDIA:
1052		QL_DPRINT4(ha, (ha->pci_dev,
1053			"%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
1054			__func__, cmd));
1055		ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
1056		break;
1057
1058	case SIOCSIFCAP:
1059	{
1060		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1061
1062		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
1063			__func__, cmd));
1064
1065		if (mask & IFCAP_HWCSUM)
1066			ifp->if_capenable ^= IFCAP_HWCSUM;
1067		if (mask & IFCAP_TSO4)
1068			ifp->if_capenable ^= IFCAP_TSO4;
1069		if (mask & IFCAP_VLAN_HWTAGGING)
1070			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1071		if (mask & IFCAP_VLAN_HWTSO)
1072			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1073		if (mask & IFCAP_LRO)
1074			ifp->if_capenable ^= IFCAP_LRO;
1075
1076		if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1077			qla_init(ha);
1078
1079		VLAN_CAPABILITIES(ifp);
1080		break;
1081	}
1082
1083	default:
1084		QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n",
1085			__func__, cmd));
1086		ret = ether_ioctl(ifp, cmd, data);
1087		break;
1088	}
1089
1090	return (ret);
1091}
1092
1093static int
1094qla_media_change(struct ifnet *ifp)
1095{
1096	qla_host_t *ha;
1097	struct ifmedia *ifm;
1098	int ret = 0;
1099
1100	ha = (qla_host_t *)ifp->if_softc;
1101
1102	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1103
1104	ifm = &ha->media;
1105
1106	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1107		ret = EINVAL;
1108
1109	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1110
1111	return (ret);
1112}
1113
1114static void
1115qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1116{
1117	qla_host_t *ha;
1118
1119	ha = (qla_host_t *)ifp->if_softc;
1120
1121	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1122
1123	ifmr->ifm_status = IFM_AVALID;
1124	ifmr->ifm_active = IFM_ETHER;
1125
1126	ql_update_link_state(ha);
1127	if (ha->hw.link_up) {
1128		ifmr->ifm_status |= IFM_ACTIVE;
1129		ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha));
1130	}
1131
1132	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\
1133		(ha->hw.link_up ? "link_up" : "link_down")));
1134
1135	return;
1136}
1137
1138
1139static int
1140qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
1141	uint32_t iscsi_pdu)
1142{
1143	bus_dma_segment_t	segs[QLA_MAX_SEGMENTS];
1144	bus_dmamap_t		map;
1145	int			nsegs;
1146	int			ret = -1;
1147	uint32_t		tx_idx;
1148	struct mbuf		*m_head = *m_headp;
1149
1150	QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
1151
1152	tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next;
1153	map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
1154
1155	ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1156			BUS_DMA_NOWAIT);
1157
1158	if (ret == EFBIG) {
1159
1160		struct mbuf *m;
1161
1162		QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1163			m_head->m_pkthdr.len));
1164
1165		m = m_defrag(m_head, M_NOWAIT);
1166		if (m == NULL) {
1167			ha->err_tx_defrag++;
1168			m_freem(m_head);
1169			*m_headp = NULL;
1170			device_printf(ha->pci_dev,
1171				"%s: m_defrag() = NULL [%d]\n",
1172				__func__, ret);
1173			return (ENOBUFS);
1174		}
1175		m_head = m;
1176		*m_headp = m_head;
1177
1178		if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1179					segs, &nsegs, BUS_DMA_NOWAIT))) {
1180
1181			ha->err_tx_dmamap_load++;
1182
1183			device_printf(ha->pci_dev,
1184				"%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1185				__func__, ret, m_head->m_pkthdr.len);
1186
1187			if (ret != ENOMEM) {
1188				m_freem(m_head);
1189				*m_headp = NULL;
1190			}
1191			return (ret);
1192		}
1193
1194	} else if (ret) {
1195
1196		ha->err_tx_dmamap_load++;
1197
1198		device_printf(ha->pci_dev,
1199			"%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1200			__func__, ret, m_head->m_pkthdr.len);
1201
1202		if (ret != ENOMEM) {
1203			m_freem(m_head);
1204			*m_headp = NULL;
1205		}
1206		return (ret);
1207	}
1208
1209	QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet"));
1210
1211	bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1212
1213        if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx,
1214				iscsi_pdu))) {
1215		ha->tx_ring[txr_idx].count++;
1216		ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
1217	} else {
1218		if (ret == EINVAL) {
1219			if (m_head)
1220				m_freem(m_head);
1221			*m_headp = NULL;
1222		}
1223	}
1224
1225	QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
1226	return (ret);
1227}
1228
1229static int
1230qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
1231{
1232        snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
1233                "qla%d_fp%d_tx_mq_lock", ha->pci_func, fp->txr_idx);
1234
1235        mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
1236
1237        fp->tx_br = buf_ring_alloc(NUM_TX_DESCRIPTORS, M_DEVBUF,
1238                                   M_NOWAIT, &fp->tx_mtx);
1239        if (fp->tx_br == NULL) {
1240            QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for "
1241                " fp[%d, %d]\n", ha->pci_func, fp->txr_idx));
1242            return (-ENOMEM);
1243        }
1244        return 0;
1245}
1246
1247static void
1248qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
1249{
1250        struct mbuf *mp;
1251        struct ifnet *ifp = ha->ifp;
1252
1253        if (mtx_initialized(&fp->tx_mtx)) {
1254
1255                if (fp->tx_br != NULL) {
1256
1257                        mtx_lock(&fp->tx_mtx);
1258
1259                        while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
1260                                m_freem(mp);
1261                        }
1262
1263                        mtx_unlock(&fp->tx_mtx);
1264
1265                        buf_ring_free(fp->tx_br, M_DEVBUF);
1266                        fp->tx_br = NULL;
1267                }
1268                mtx_destroy(&fp->tx_mtx);
1269        }
1270        return;
1271}
1272
1273static void
1274qla_fp_taskqueue(void *context, int pending)
1275{
1276        qla_tx_fp_t *fp;
1277        qla_host_t *ha;
1278        struct ifnet *ifp;
1279        struct mbuf  *mp;
1280        int ret;
1281	uint32_t txr_idx;
1282	uint32_t iscsi_pdu = 0;
1283	uint32_t rx_pkts_left;
1284
1285        fp = context;
1286
1287        if (fp == NULL)
1288                return;
1289
1290        ha = (qla_host_t *)fp->ha;
1291
1292        ifp = ha->ifp;
1293
1294	txr_idx = fp->txr_idx;
1295
1296        mtx_lock(&fp->tx_mtx);
1297
1298        if (((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1299                IFF_DRV_RUNNING) || (!ha->hw.link_up)) {
1300                mtx_unlock(&fp->tx_mtx);
1301                goto qla_fp_taskqueue_exit;
1302        }
1303
1304	rx_pkts_left = ql_rcv_isr(ha, fp->txr_idx, 64);
1305
1306#ifdef QL_ENABLE_ISCSI_TLV
1307	ql_hw_tx_done_locked(ha, fp->txr_idx);
1308	ql_hw_tx_done_locked(ha, (fp->txr_idx + (ha->hw.num_tx_rings >> 1)));
1309	txr_idx = txr_idx + (ha->hw.num_tx_rings >> 1);
1310#else
1311	ql_hw_tx_done_locked(ha, fp->txr_idx);
1312#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
1313
1314        mp = drbr_peek(ifp, fp->tx_br);
1315
1316        while (mp != NULL) {
1317
1318		if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) {
1319#ifdef QL_ENABLE_ISCSI_TLV
1320			if (ql_iscsi_pdu(ha, mp) == 0) {
1321				iscsi_pdu = 1;
1322			}
1323#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
1324		}
1325
1326		ret = qla_send(ha, &mp, txr_idx, iscsi_pdu);
1327
1328                if (ret) {
1329                        if (mp != NULL)
1330                                drbr_putback(ifp, fp->tx_br, mp);
1331                        else {
1332                                drbr_advance(ifp, fp->tx_br);
1333                        }
1334
1335                        mtx_unlock(&fp->tx_mtx);
1336
1337                        goto qla_fp_taskqueue_exit0;
1338                } else {
1339                        drbr_advance(ifp, fp->tx_br);
1340                }
1341
1342                mp = drbr_peek(ifp, fp->tx_br);
1343        }
1344
1345        mtx_unlock(&fp->tx_mtx);
1346
1347qla_fp_taskqueue_exit0:
1348
1349	if (rx_pkts_left || ((mp != NULL) && ret)) {
1350		taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
1351	} else {
1352		if (!ha->flags.stop_rcv) {
1353			QL_ENABLE_INTERRUPTS(ha, fp->txr_idx);
1354		}
1355	}
1356
1357qla_fp_taskqueue_exit:
1358
1359        QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
1360        return;
1361}
1362
1363static int
1364qla_create_fp_taskqueues(qla_host_t *ha)
1365{
1366        int     i;
1367        uint8_t tq_name[32];
1368
1369        for (i = 0; i < ha->hw.num_sds_rings; i++) {
1370
1371                qla_tx_fp_t *fp = &ha->tx_fp[i];
1372
1373                bzero(tq_name, sizeof (tq_name));
1374                snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
1375
1376                TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp);
1377
1378                fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT,
1379                                        taskqueue_thread_enqueue,
1380                                        &fp->fp_taskqueue);
1381
1382                if (fp->fp_taskqueue == NULL)
1383                        return (-1);
1384
1385                taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
1386                        tq_name);
1387
1388                QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__,
1389                        fp->fp_taskqueue));
1390        }
1391
1392        return (0);
1393}
1394
1395static void
1396qla_destroy_fp_taskqueues(qla_host_t *ha)
1397{
1398        int     i;
1399
1400        for (i = 0; i < ha->hw.num_sds_rings; i++) {
1401
1402                qla_tx_fp_t *fp = &ha->tx_fp[i];
1403
1404                if (fp->fp_taskqueue != NULL) {
1405                        taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
1406                        taskqueue_free(fp->fp_taskqueue);
1407                        fp->fp_taskqueue = NULL;
1408                }
1409        }
1410        return;
1411}
1412
1413static void
1414qla_drain_fp_taskqueues(qla_host_t *ha)
1415{
1416        int     i;
1417
1418        for (i = 0; i < ha->hw.num_sds_rings; i++) {
1419                qla_tx_fp_t *fp = &ha->tx_fp[i];
1420
1421                if (fp->fp_taskqueue != NULL) {
1422                        taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
1423                }
1424        }
1425        return;
1426}
1427
1428static int
1429qla_transmit(struct ifnet *ifp, struct mbuf  *mp)
1430{
1431	qla_host_t *ha = (qla_host_t *)ifp->if_softc;
1432        qla_tx_fp_t *fp;
1433        int rss_id = 0;
1434        int ret = 0;
1435
1436        QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1437
1438#if __FreeBSD_version >= 1100000
1439        if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
1440#else
1441        if (mp->m_flags & M_FLOWID)
1442#endif
1443                rss_id = (mp->m_pkthdr.flowid & Q8_RSS_IND_TBL_MAX_IDX) %
1444                                        ha->hw.num_sds_rings;
1445        fp = &ha->tx_fp[rss_id];
1446
1447        if (fp->tx_br == NULL) {
1448                ret = EINVAL;
1449                goto qla_transmit_exit;
1450        }
1451
1452        if (mp != NULL) {
1453                ret = drbr_enqueue(ifp, fp->tx_br, mp);
1454        }
1455
1456        if (fp->fp_taskqueue != NULL)
1457                taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
1458
1459        ret = 0;
1460
1461qla_transmit_exit:
1462
1463        QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
1464        return ret;
1465}
1466
1467static void
1468qla_qflush(struct ifnet *ifp)
1469{
1470        int                     i;
1471        qla_tx_fp_t		*fp;
1472        struct mbuf             *mp;
1473        qla_host_t              *ha;
1474
1475        ha = (qla_host_t *)ifp->if_softc;
1476
1477        QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1478
1479        for (i = 0; i < ha->hw.num_sds_rings; i++) {
1480
1481                fp = &ha->tx_fp[i];
1482
1483                if (fp == NULL)
1484                        continue;
1485
1486                if (fp->tx_br) {
1487                        mtx_lock(&fp->tx_mtx);
1488
1489                        while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
1490                                m_freem(mp);
1491                        }
1492                        mtx_unlock(&fp->tx_mtx);
1493                }
1494        }
1495        QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1496
1497        return;
1498}
1499
1500static void
1501qla_stop(qla_host_t *ha)
1502{
1503	struct ifnet *ifp = ha->ifp;
1504	device_t	dev;
1505	int i = 0;
1506
1507	dev = ha->pci_dev;
1508
1509	ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1510
1511        for (i = 0; i < ha->hw.num_sds_rings; i++) {
1512        	qla_tx_fp_t *fp;
1513
1514		fp = &ha->tx_fp[i];
1515
1516                if (fp == NULL)
1517                        continue;
1518
1519		if (fp->tx_br != NULL) {
1520                        mtx_lock(&fp->tx_mtx);
1521                        mtx_unlock(&fp->tx_mtx);
1522		}
1523	}
1524
1525	ha->flags.qla_watchdog_pause = 1;
1526
1527	while (!ha->qla_watchdog_paused)
1528		qla_mdelay(__func__, 1);
1529
1530	ha->flags.qla_interface_up = 0;
1531
1532	qla_drain_fp_taskqueues(ha);
1533
1534	ql_hw_stop_rcv(ha);
1535
1536	ql_del_hw_if(ha);
1537
1538	qla_free_xmt_bufs(ha);
1539	qla_free_rcv_bufs(ha);
1540
1541	return;
1542}
1543
1544/*
1545 * Buffer Management Functions for Transmit and Receive Rings
1546 */
1547static int
1548qla_alloc_xmt_bufs(qla_host_t *ha)
1549{
1550	int ret = 0;
1551	uint32_t i, j;
1552	qla_tx_buf_t *txb;
1553
1554	if (bus_dma_tag_create(NULL,    /* parent */
1555		1, 0,    /* alignment, bounds */
1556		BUS_SPACE_MAXADDR,       /* lowaddr */
1557		BUS_SPACE_MAXADDR,       /* highaddr */
1558		NULL, NULL,      /* filter, filterarg */
1559		QLA_MAX_TSO_FRAME_SIZE,     /* maxsize */
1560		QLA_MAX_SEGMENTS,        /* nsegments */
1561		PAGE_SIZE,        /* maxsegsize */
1562		BUS_DMA_ALLOCNOW,        /* flags */
1563		NULL,    /* lockfunc */
1564		NULL,    /* lockfuncarg */
1565		&ha->tx_tag)) {
1566		device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1567			__func__);
1568		return (ENOMEM);
1569	}
1570
1571	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1572		bzero((void *)ha->tx_ring[i].tx_buf,
1573			(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1574	}
1575
1576	for (j = 0; j < ha->hw.num_tx_rings; j++) {
1577		for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1578
1579			txb = &ha->tx_ring[j].tx_buf[i];
1580
1581			if ((ret = bus_dmamap_create(ha->tx_tag,
1582					BUS_DMA_NOWAIT, &txb->map))) {
1583
1584				ha->err_tx_dmamap_create++;
1585				device_printf(ha->pci_dev,
1586					"%s: bus_dmamap_create failed[%d]\n",
1587					__func__, ret);
1588
1589				qla_free_xmt_bufs(ha);
1590
1591				return (ret);
1592			}
1593		}
1594	}
1595
1596	return 0;
1597}
1598
1599/*
1600 * Release mbuf after it sent on the wire
1601 */
1602static void
1603qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1604{
1605	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1606
1607	if (txb->m_head && txb->map) {
1608
1609		bus_dmamap_unload(ha->tx_tag, txb->map);
1610
1611		m_freem(txb->m_head);
1612		txb->m_head = NULL;
1613	}
1614
1615	if (txb->map)
1616		bus_dmamap_destroy(ha->tx_tag, txb->map);
1617
1618	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1619}
1620
1621static void
1622qla_free_xmt_bufs(qla_host_t *ha)
1623{
1624	int		i, j;
1625
1626	for (j = 0; j < ha->hw.num_tx_rings; j++) {
1627		for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1628			qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
1629	}
1630
1631	if (ha->tx_tag != NULL) {
1632		bus_dma_tag_destroy(ha->tx_tag);
1633		ha->tx_tag = NULL;
1634	}
1635
1636	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1637		bzero((void *)ha->tx_ring[i].tx_buf,
1638			(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1639	}
1640	return;
1641}
1642
1643
1644static int
1645qla_alloc_rcv_std(qla_host_t *ha)
1646{
1647	int		i, j, k, r, ret = 0;
1648	qla_rx_buf_t	*rxb;
1649	qla_rx_ring_t	*rx_ring;
1650
1651	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1652
1653		rx_ring = &ha->rx_ring[r];
1654
1655		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1656
1657			rxb = &rx_ring->rx_buf[i];
1658
1659			ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT,
1660					&rxb->map);
1661
1662			if (ret) {
1663				device_printf(ha->pci_dev,
1664					"%s: dmamap[%d, %d] failed\n",
1665					__func__, r, i);
1666
1667				for (k = 0; k < r; k++) {
1668					for (j = 0; j < NUM_RX_DESCRIPTORS;
1669						j++) {
1670						rxb = &ha->rx_ring[k].rx_buf[j];
1671						bus_dmamap_destroy(ha->rx_tag,
1672							rxb->map);
1673					}
1674				}
1675
1676				for (j = 0; j < i; j++) {
1677					bus_dmamap_destroy(ha->rx_tag,
1678						rx_ring->rx_buf[j].map);
1679				}
1680				goto qla_alloc_rcv_std_err;
1681			}
1682		}
1683	}
1684
1685	qla_init_hw_rcv_descriptors(ha);
1686
1687
1688	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1689
1690		rx_ring = &ha->rx_ring[r];
1691
1692		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1693			rxb = &rx_ring->rx_buf[i];
1694			rxb->handle = i;
1695			if (!(ret = ql_get_mbuf(ha, rxb, NULL))) {
1696				/*
1697			 	 * set the physical address in the
1698				 * corresponding descriptor entry in the
1699				 * receive ring/queue for the hba
1700				 */
1701				qla_set_hw_rcv_desc(ha, r, i, rxb->handle,
1702					rxb->paddr,
1703					(rxb->m_head)->m_pkthdr.len);
1704			} else {
1705				device_printf(ha->pci_dev,
1706					"%s: ql_get_mbuf [%d, %d] failed\n",
1707					__func__, r, i);
1708				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1709				goto qla_alloc_rcv_std_err;
1710			}
1711		}
1712	}
1713	return 0;
1714
1715qla_alloc_rcv_std_err:
1716	return (-1);
1717}
1718
1719static void
1720qla_free_rcv_std(qla_host_t *ha)
1721{
1722	int		i, r;
1723	qla_rx_buf_t	*rxb;
1724
1725	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1726		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1727			rxb = &ha->rx_ring[r].rx_buf[i];
1728			if (rxb->m_head != NULL) {
1729				bus_dmamap_unload(ha->rx_tag, rxb->map);
1730				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1731				m_freem(rxb->m_head);
1732				rxb->m_head = NULL;
1733			}
1734		}
1735	}
1736	return;
1737}
1738
1739static int
1740qla_alloc_rcv_bufs(qla_host_t *ha)
1741{
1742	int		i, ret = 0;
1743
1744	if (bus_dma_tag_create(NULL,    /* parent */
1745			1, 0,    /* alignment, bounds */
1746			BUS_SPACE_MAXADDR,       /* lowaddr */
1747			BUS_SPACE_MAXADDR,       /* highaddr */
1748			NULL, NULL,      /* filter, filterarg */
1749			MJUM9BYTES,     /* maxsize */
1750			1,        /* nsegments */
1751			MJUM9BYTES,        /* maxsegsize */
1752			BUS_DMA_ALLOCNOW,        /* flags */
1753			NULL,    /* lockfunc */
1754			NULL,    /* lockfuncarg */
1755			&ha->rx_tag)) {
1756
1757		device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1758			__func__);
1759
1760		return (ENOMEM);
1761	}
1762
1763	bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1764
1765	for (i = 0; i < ha->hw.num_sds_rings; i++) {
1766		ha->hw.sds[i].sdsr_next = 0;
1767		ha->hw.sds[i].rxb_free = NULL;
1768		ha->hw.sds[i].rx_free = 0;
1769	}
1770
1771	ret = qla_alloc_rcv_std(ha);
1772
1773	return (ret);
1774}
1775
1776static void
1777qla_free_rcv_bufs(qla_host_t *ha)
1778{
1779	int		i;
1780
1781	qla_free_rcv_std(ha);
1782
1783	if (ha->rx_tag != NULL) {
1784		bus_dma_tag_destroy(ha->rx_tag);
1785		ha->rx_tag = NULL;
1786	}
1787
1788	bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1789
1790	for (i = 0; i < ha->hw.num_sds_rings; i++) {
1791		ha->hw.sds[i].sdsr_next = 0;
1792		ha->hw.sds[i].rxb_free = NULL;
1793		ha->hw.sds[i].rx_free = 0;
1794	}
1795
1796	return;
1797}
1798
1799int
1800ql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
1801{
1802	register struct mbuf *mp = nmp;
1803	struct ifnet   		*ifp;
1804	int            		ret = 0;
1805	uint32_t		offset;
1806	bus_dma_segment_t	segs[1];
1807	int			nsegs, mbuf_size;
1808
1809	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1810
1811	ifp = ha->ifp;
1812
1813        if (ha->hw.enable_9kb)
1814                mbuf_size = MJUM9BYTES;
1815        else
1816                mbuf_size = MCLBYTES;
1817
1818	if (mp == NULL) {
1819
1820		if (QL_ERR_INJECT(ha, INJCT_M_GETCL_M_GETJCL_FAILURE))
1821			return(-1);
1822
1823                if (ha->hw.enable_9kb)
1824                        mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, mbuf_size);
1825                else
1826                        mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1827
1828		if (mp == NULL) {
1829			ha->err_m_getcl++;
1830			ret = ENOBUFS;
1831			device_printf(ha->pci_dev,
1832					"%s: m_getcl failed\n", __func__);
1833			goto exit_ql_get_mbuf;
1834		}
1835		mp->m_len = mp->m_pkthdr.len = mbuf_size;
1836	} else {
1837		mp->m_len = mp->m_pkthdr.len = mbuf_size;
1838		mp->m_data = mp->m_ext.ext_buf;
1839		mp->m_next = NULL;
1840	}
1841
1842	offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
1843	if (offset) {
1844		offset = 8 - offset;
1845		m_adj(mp, offset);
1846	}
1847
1848	/*
1849	 * Using memory from the mbuf cluster pool, invoke the bus_dma
1850	 * machinery to arrange the memory mapping.
1851	 */
1852	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map,
1853			mp, segs, &nsegs, BUS_DMA_NOWAIT);
1854	rxb->paddr = segs[0].ds_addr;
1855
1856	if (ret || !rxb->paddr || (nsegs != 1)) {
1857		m_free(mp);
1858		rxb->m_head = NULL;
1859		device_printf(ha->pci_dev,
1860			"%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
1861			__func__, ret, (long long unsigned int)rxb->paddr,
1862			nsegs);
1863                ret = -1;
1864		goto exit_ql_get_mbuf;
1865	}
1866	rxb->m_head = mp;
1867	bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
1868
1869exit_ql_get_mbuf:
1870	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
1871	return (ret);
1872}
1873
1874
1875static void
1876qla_get_peer(qla_host_t *ha)
1877{
1878	device_t *peers;
1879	int count, i, slot;
1880	int my_slot = pci_get_slot(ha->pci_dev);
1881
1882	if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count))
1883		return;
1884
1885	for (i = 0; i < count; i++) {
1886		slot = pci_get_slot(peers[i]);
1887
1888		if ((slot >= 0) && (slot == my_slot) &&
1889			(pci_get_device(peers[i]) ==
1890				pci_get_device(ha->pci_dev))) {
1891			if (ha->pci_dev != peers[i])
1892				ha->peer_dev = peers[i];
1893		}
1894	}
1895}
1896
1897static void
1898qla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer)
1899{
1900	qla_host_t *ha_peer;
1901
1902	if (ha->peer_dev) {
1903        	if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) {
1904
1905			ha_peer->msg_from_peer = msg_to_peer;
1906		}
1907	}
1908}
1909
1910static void
1911qla_error_recovery(void *context, int pending)
1912{
1913	qla_host_t *ha = context;
1914	uint32_t msecs_100 = 100;
1915	struct ifnet *ifp = ha->ifp;
1916	int i = 0;
1917
1918        QLA_LOCK(ha);
1919
1920	if (ha->flags.qla_interface_up) {
1921
1922		ha->hw.imd_compl = 1;
1923		qla_mdelay(__func__, 300);
1924
1925		ql_hw_stop_rcv(ha);
1926
1927	        ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1928
1929		for (i = 0; i < ha->hw.num_sds_rings; i++) {
1930	        	qla_tx_fp_t *fp;
1931
1932			fp = &ha->tx_fp[i];
1933
1934			if (fp == NULL)
1935				continue;
1936
1937			if (fp->tx_br != NULL) {
1938				mtx_lock(&fp->tx_mtx);
1939				mtx_unlock(&fp->tx_mtx);
1940			}
1941		}
1942	}
1943
1944        QLA_UNLOCK(ha);
1945
1946	if ((ha->pci_func & 0x1) == 0) {
1947
1948		if (!ha->msg_from_peer) {
1949			qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
1950
1951			while ((ha->msg_from_peer != QL_PEER_MSG_ACK) &&
1952				msecs_100--)
1953				qla_mdelay(__func__, 100);
1954		}
1955
1956		ha->msg_from_peer = 0;
1957
1958        	QLA_LOCK(ha);
1959
1960		ql_minidump(ha);
1961
1962        	QLA_UNLOCK(ha);
1963
1964		(void) ql_init_hw(ha);
1965
1966        	QLA_LOCK(ha);
1967
1968		if (ha->flags.qla_interface_up) {
1969			qla_free_xmt_bufs(ha);
1970			qla_free_rcv_bufs(ha);
1971		}
1972
1973        	QLA_UNLOCK(ha);
1974
1975		qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
1976
1977	} else {
1978		if (ha->msg_from_peer == QL_PEER_MSG_RESET) {
1979
1980			ha->msg_from_peer = 0;
1981
1982			qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
1983		} else {
1984			qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
1985		}
1986
1987		while ((ha->msg_from_peer != QL_PEER_MSG_ACK)  && msecs_100--)
1988			qla_mdelay(__func__, 100);
1989		ha->msg_from_peer = 0;
1990
1991		(void) ql_init_hw(ha);
1992
1993        	QLA_LOCK(ha);
1994
1995		if (ha->flags.qla_interface_up) {
1996			qla_free_xmt_bufs(ha);
1997			qla_free_rcv_bufs(ha);
1998		}
1999
2000        	QLA_UNLOCK(ha);
2001	}
2002
2003        QLA_LOCK(ha);
2004
2005	if (ha->flags.qla_interface_up) {
2006
2007		if (qla_alloc_xmt_bufs(ha) != 0) {
2008			QLA_UNLOCK(ha);
2009			return;
2010		}
2011		qla_confirm_9kb_enable(ha);
2012
2013		if (qla_alloc_rcv_bufs(ha) != 0) {
2014			QLA_UNLOCK(ha);
2015			return;
2016		}
2017
2018		ha->flags.stop_rcv = 0;
2019
2020		if (ql_init_hw_if(ha) == 0) {
2021			ifp = ha->ifp;
2022			ifp->if_drv_flags |= IFF_DRV_RUNNING;
2023			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2024			ha->flags.qla_watchdog_pause = 0;
2025		}
2026	} else
2027		ha->flags.qla_watchdog_pause = 0;
2028
2029        QLA_UNLOCK(ha);
2030}
2031
2032static void
2033qla_async_event(void *context, int pending)
2034{
2035        qla_host_t *ha = context;
2036
2037        QLA_LOCK(ha);
2038        qla_hw_async_event(ha);
2039        QLA_UNLOCK(ha);
2040}
2041
2042