1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2013-2016 Qlogic Corporation
5 * All rights reserved.
6 *
7 *  Redistribution and use in source and binary forms, with or without
8 *  modification, are permitted provided that the following conditions
9 *  are met:
10 *
11 *  1. Redistributions of source code must retain the above copyright
12 *     notice, this list of conditions and the following disclaimer.
13 *  2. Redistributions in binary form must reproduce the above copyright
14 *     notice, this list of conditions and the following disclaimer in the
15 *     documentation and/or other materials provided with the distribution.
16 *
17 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 *  and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 *  POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/*
31 * File: ql_os.c
32 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33 */
34
35#include <sys/cdefs.h>
36#include "ql_os.h"
37#include "ql_hw.h"
38#include "ql_def.h"
39#include "ql_inline.h"
40#include "ql_ver.h"
41#include "ql_glbl.h"
42#include "ql_dbg.h"
43#include <sys/smp.h>
44
45/*
46 * Some PCI Configuration Space Related Defines
47 */
48
49#ifndef PCI_VENDOR_QLOGIC
50#define PCI_VENDOR_QLOGIC	0x1077
51#endif
52
53#ifndef PCI_PRODUCT_QLOGIC_ISP8030
54#define PCI_PRODUCT_QLOGIC_ISP8030	0x8030
55#endif
56
57#define PCI_QLOGIC_ISP8030 \
58	((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC)
59
60/*
61 * static functions
62 */
63static int qla_alloc_parent_dma_tag(qla_host_t *ha);
64static void qla_free_parent_dma_tag(qla_host_t *ha);
65static int qla_alloc_xmt_bufs(qla_host_t *ha);
66static void qla_free_xmt_bufs(qla_host_t *ha);
67static int qla_alloc_rcv_bufs(qla_host_t *ha);
68static void qla_free_rcv_bufs(qla_host_t *ha);
69static void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb);
70
71static void qla_init_ifnet(device_t dev, qla_host_t *ha);
72static int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS);
73static void qla_release(qla_host_t *ha);
74static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
75		int error);
76static void qla_stop(qla_host_t *ha);
77static void qla_get_peer(qla_host_t *ha);
78static void qla_error_recovery(void *context, int pending);
79static void qla_async_event(void *context, int pending);
80static void qla_stats(void *context, int pending);
81static int qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
82		uint32_t iscsi_pdu);
83
84/*
85 * Hooks to the Operating Systems
86 */
87static int qla_pci_probe (device_t);
88static int qla_pci_attach (device_t);
89static int qla_pci_detach (device_t);
90
91static void qla_init(void *arg);
92static int qla_ioctl(if_t ifp, u_long cmd, caddr_t data);
93static int qla_media_change(if_t ifp);
94static void qla_media_status(if_t ifp, struct ifmediareq *ifmr);
95
96static int qla_transmit(if_t ifp, struct mbuf  *mp);
97static void qla_qflush(if_t ifp);
98static int qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
99static void qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
100static int qla_create_fp_taskqueues(qla_host_t *ha);
101static void qla_destroy_fp_taskqueues(qla_host_t *ha);
102static void qla_drain_fp_taskqueues(qla_host_t *ha);
103
104static device_method_t qla_pci_methods[] = {
105	/* Device interface */
106	DEVMETHOD(device_probe, qla_pci_probe),
107	DEVMETHOD(device_attach, qla_pci_attach),
108	DEVMETHOD(device_detach, qla_pci_detach),
109	{ 0, 0 }
110};
111
112static driver_t qla_pci_driver = {
113	"ql", qla_pci_methods, sizeof (qla_host_t),
114};
115
116DRIVER_MODULE(qla83xx, pci, qla_pci_driver, 0, 0);
117
118MODULE_DEPEND(qla83xx, pci, 1, 1, 1);
119MODULE_DEPEND(qla83xx, ether, 1, 1, 1);
120
121MALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver");
122
123#define QL_STD_REPLENISH_THRES		0
124#define QL_JUMBO_REPLENISH_THRES	32
125
126static char dev_str[64];
127static char ver_str[64];
128
129/*
130 * Name:	qla_pci_probe
131 * Function:	Validate the PCI device to be a QLA80XX device
132 */
133static int
134qla_pci_probe(device_t dev)
135{
136        switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
137        case PCI_QLOGIC_ISP8030:
138		snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
139			"Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function",
140			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
141			QLA_VERSION_BUILD);
142		snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d",
143			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
144			QLA_VERSION_BUILD);
145                device_set_desc(dev, dev_str);
146                break;
147        default:
148                return (ENXIO);
149        }
150
151        if (bootverbose)
152                printf("%s: %s\n ", __func__, dev_str);
153
154        return (BUS_PROBE_DEFAULT);
155}
156
157static void
158qla_add_sysctls(qla_host_t *ha)
159{
160        device_t dev = ha->pci_dev;
161
162	SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
163		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
164		OID_AUTO, "version", CTLFLAG_RD,
165		ver_str, 0, "Driver Version");
166
167        SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
168                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
169                OID_AUTO, "fw_version", CTLFLAG_RD,
170                ha->fw_ver_str, 0, "firmware version");
171
172        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
173            SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
174	    "link_status", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
175	    (void *)ha, 0, qla_sysctl_get_link_status, "I", "Link Status");
176
177	ha->dbg_level = 0;
178        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
179                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
180                OID_AUTO, "debug", CTLFLAG_RW,
181                &ha->dbg_level, ha->dbg_level, "Debug Level");
182
183	ha->enable_minidump = 1;
184	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
185		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
186		OID_AUTO, "enable_minidump", CTLFLAG_RW,
187		&ha->enable_minidump, ha->enable_minidump,
188		"Minidump retrival prior to error recovery "
189		"is enabled only when this is set");
190
191	ha->enable_driverstate_dump = 1;
192	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
193		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
194		OID_AUTO, "enable_driverstate_dump", CTLFLAG_RW,
195		&ha->enable_driverstate_dump, ha->enable_driverstate_dump,
196		"Driver State retrival prior to error recovery "
197		"is enabled only when this is set");
198
199	ha->enable_error_recovery = 1;
200	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
201		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
202		OID_AUTO, "enable_error_recovery", CTLFLAG_RW,
203		&ha->enable_error_recovery, ha->enable_error_recovery,
204		"when set error recovery is enabled on fatal errors "
205		"otherwise the port is turned offline");
206
207	ha->ms_delay_after_init = 1000;
208	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
209		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
210		OID_AUTO, "ms_delay_after_init", CTLFLAG_RW,
211		&ha->ms_delay_after_init, ha->ms_delay_after_init,
212		"millisecond delay after hw_init");
213
214	ha->std_replenish = QL_STD_REPLENISH_THRES;
215        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
216                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
217                OID_AUTO, "std_replenish", CTLFLAG_RW,
218                &ha->std_replenish, ha->std_replenish,
219                "Threshold for Replenishing Standard Frames");
220
221        SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
222                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
223                OID_AUTO, "ipv4_lro",
224                CTLFLAG_RD, &ha->ipv4_lro,
225                "number of ipv4 lro completions");
226
227        SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
228                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
229                OID_AUTO, "ipv6_lro",
230                CTLFLAG_RD, &ha->ipv6_lro,
231                "number of ipv6 lro completions");
232
233	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
234		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
235		OID_AUTO, "tx_tso_frames",
236		CTLFLAG_RD, &ha->tx_tso_frames,
237		"number of Tx TSO Frames");
238
239	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
240                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
241		OID_AUTO, "hw_vlan_tx_frames",
242		CTLFLAG_RD, &ha->hw_vlan_tx_frames,
243		"number of Tx VLAN Frames");
244
245	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
246                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
247		OID_AUTO, "hw_lock_failed",
248		CTLFLAG_RD, &ha->hw_lock_failed,
249		"number of hw_lock failures");
250
251        return;
252}
253
254static void
255qla_watchdog(void *arg)
256{
257	qla_host_t *ha = arg;
258	if_t ifp;
259
260	ifp = ha->ifp;
261
262        if (ha->qla_watchdog_exit) {
263		ha->qla_watchdog_exited = 1;
264		return;
265	}
266	ha->qla_watchdog_exited = 0;
267
268	if (!ha->qla_watchdog_pause) {
269                if (!ha->offline &&
270                        (ql_hw_check_health(ha) || ha->qla_initiate_recovery ||
271                        (ha->msg_from_peer == QL_PEER_MSG_RESET))) {
272	        	if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
273			ql_update_link_state(ha);
274
275			if (ha->enable_error_recovery) {
276				ha->qla_watchdog_paused = 1;
277				ha->qla_watchdog_pause = 1;
278				ha->err_inject = 0;
279				device_printf(ha->pci_dev,
280					"%s: taskqueue_enqueue(err_task) \n",
281					__func__);
282				taskqueue_enqueue(ha->err_tq, &ha->err_task);
283			} else {
284				if (ifp != NULL)
285					if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
286				ha->offline = 1;
287			}
288			return;
289
290		} else {
291			if (ha->qla_interface_up) {
292				ha->watchdog_ticks++;
293
294				if (ha->watchdog_ticks > 1000)
295					ha->watchdog_ticks = 0;
296
297				if (!ha->watchdog_ticks && QL_RUNNING(ifp)) {
298					taskqueue_enqueue(ha->stats_tq,
299						&ha->stats_task);
300				}
301
302				if (ha->async_event) {
303					taskqueue_enqueue(ha->async_event_tq,
304						&ha->async_event_task);
305				}
306			}
307			ha->qla_watchdog_paused = 0;
308		}
309	} else {
310		ha->qla_watchdog_paused = 1;
311	}
312
313	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
314		qla_watchdog, ha);
315}
316
317/*
318 * Name:	qla_pci_attach
319 * Function:	attaches the device to the operating system
320 */
321static int
322qla_pci_attach(device_t dev)
323{
324	qla_host_t *ha = NULL;
325	uint32_t rsrc_len;
326	int i;
327	uint32_t num_rcvq = 0;
328
329        if ((ha = device_get_softc(dev)) == NULL) {
330                device_printf(dev, "cannot get softc\n");
331                return (ENOMEM);
332        }
333
334        memset(ha, 0, sizeof (qla_host_t));
335
336        if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) {
337                device_printf(dev, "device is not ISP8030\n");
338                return (ENXIO);
339	}
340
341        ha->pci_func = pci_get_function(dev) & 0x1;
342
343        ha->pci_dev = dev;
344
345	pci_enable_busmaster(dev);
346
347	ha->reg_rid = PCIR_BAR(0);
348	ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
349				RF_ACTIVE);
350
351        if (ha->pci_reg == NULL) {
352                device_printf(dev, "unable to map any ports\n");
353                goto qla_pci_attach_err;
354        }
355
356	rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
357					ha->reg_rid);
358
359	mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
360	mtx_init(&ha->sp_log_lock, "qla83xx_sp_log_lock", MTX_NETWORK_LOCK, MTX_DEF);
361	ha->flags.lock_init = 1;
362
363	qla_add_sysctls(ha);
364
365	ha->hw.num_sds_rings = MAX_SDS_RINGS;
366	ha->hw.num_rds_rings = MAX_RDS_RINGS;
367	ha->hw.num_tx_rings = NUM_TX_RINGS;
368
369	ha->reg_rid1 = PCIR_BAR(2);
370	ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
371			&ha->reg_rid1, RF_ACTIVE);
372
373	ha->msix_count = pci_msix_count(dev);
374
375	if (ha->msix_count < 1 ) {
376		device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
377			ha->msix_count);
378		goto qla_pci_attach_err;
379	}
380
381	if (ha->msix_count < (ha->hw.num_sds_rings + 1)) {
382		ha->hw.num_sds_rings = ha->msix_count - 1;
383	}
384
385	QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
386		" msix_count 0x%x pci_reg %p pci_reg1 %p\n", __func__, ha,
387		ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg,
388		ha->pci_reg1));
389
390        /* initialize hardware */
391        if (ql_init_hw(ha)) {
392                device_printf(dev, "%s: ql_init_hw failed\n", __func__);
393                goto qla_pci_attach_err;
394        }
395
396        device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__,
397                ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
398                ha->fw_ver_build);
399        snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d",
400                        ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
401                        ha->fw_ver_build);
402
403        if (qla_get_nic_partition(ha, NULL, &num_rcvq)) {
404                device_printf(dev, "%s: qla_get_nic_partition failed\n",
405                        __func__);
406                goto qla_pci_attach_err;
407        }
408        device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
409                " msix_count 0x%x pci_reg %p pci_reg1 %p num_rcvq = %d\n",
410		__func__, ha, ha->pci_func, rsrc_len, ha->msix_count,
411		ha->pci_reg, ha->pci_reg1, num_rcvq);
412
413        if ((ha->msix_count  < 64) || (num_rcvq != 32)) {
414		if (ha->hw.num_sds_rings > 15) {
415                	ha->hw.num_sds_rings = 15;
416		}
417        }
418
419	ha->hw.num_rds_rings = ha->hw.num_sds_rings;
420	ha->hw.num_tx_rings = ha->hw.num_sds_rings;
421
422#ifdef QL_ENABLE_ISCSI_TLV
423	ha->hw.num_tx_rings = ha->hw.num_sds_rings * 2;
424#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
425
426	ql_hw_add_sysctls(ha);
427
428	ha->msix_count = ha->hw.num_sds_rings + 1;
429
430	if (pci_alloc_msix(dev, &ha->msix_count)) {
431		device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
432			ha->msix_count);
433		ha->msix_count = 0;
434		goto qla_pci_attach_err;
435	}
436
437	ha->mbx_irq_rid = 1;
438	ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
439				&ha->mbx_irq_rid,
440				(RF_ACTIVE | RF_SHAREABLE));
441	if (ha->mbx_irq == NULL) {
442		device_printf(dev, "could not allocate mbx interrupt\n");
443		goto qla_pci_attach_err;
444	}
445	if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE),
446		NULL, ql_mbx_isr, ha, &ha->mbx_handle)) {
447		device_printf(dev, "could not setup mbx interrupt\n");
448		goto qla_pci_attach_err;
449	}
450
451	for (i = 0; i < ha->hw.num_sds_rings; i++) {
452		ha->irq_vec[i].sds_idx = i;
453                ha->irq_vec[i].ha = ha;
454                ha->irq_vec[i].irq_rid = 2 + i;
455
456		ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
457				&ha->irq_vec[i].irq_rid,
458				(RF_ACTIVE | RF_SHAREABLE));
459
460		if (ha->irq_vec[i].irq == NULL) {
461			device_printf(dev, "could not allocate interrupt\n");
462			goto qla_pci_attach_err;
463		}
464		if (bus_setup_intr(dev, ha->irq_vec[i].irq,
465			(INTR_TYPE_NET | INTR_MPSAFE),
466			NULL, ql_isr, &ha->irq_vec[i],
467			&ha->irq_vec[i].handle)) {
468			device_printf(dev, "could not setup interrupt\n");
469			goto qla_pci_attach_err;
470		}
471
472		ha->tx_fp[i].ha = ha;
473		ha->tx_fp[i].txr_idx = i;
474
475		if (qla_alloc_tx_br(ha, &ha->tx_fp[i])) {
476			device_printf(dev, "%s: could not allocate tx_br[%d]\n",
477				__func__, i);
478			goto qla_pci_attach_err;
479		}
480	}
481
482	if (qla_create_fp_taskqueues(ha) != 0)
483		goto qla_pci_attach_err;
484
485	printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus,
486		ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count);
487
488	ql_read_mac_addr(ha);
489
490	/* allocate parent dma tag */
491	if (qla_alloc_parent_dma_tag(ha)) {
492		device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n",
493			__func__);
494		goto qla_pci_attach_err;
495	}
496
497	/* alloc all dma buffers */
498	if (ql_alloc_dma(ha)) {
499		device_printf(dev, "%s: ql_alloc_dma failed\n", __func__);
500		goto qla_pci_attach_err;
501	}
502	qla_get_peer(ha);
503
504	if (ql_minidump_init(ha) != 0) {
505		device_printf(dev, "%s: ql_minidump_init failed\n", __func__);
506		goto qla_pci_attach_err;
507	}
508	ql_alloc_drvr_state_buffer(ha);
509	ql_alloc_sp_log_buffer(ha);
510	/* create the o.s ethernet interface */
511	qla_init_ifnet(dev, ha);
512
513	ha->flags.qla_watchdog_active = 1;
514	ha->qla_watchdog_pause = 0;
515
516	callout_init(&ha->tx_callout, TRUE);
517	ha->flags.qla_callout_init = 1;
518
519	/* create ioctl device interface */
520	if (ql_make_cdev(ha)) {
521		device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
522		goto qla_pci_attach_err;
523	}
524
525	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
526		qla_watchdog, ha);
527
528	TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha);
529	ha->err_tq = taskqueue_create("qla_errq", M_NOWAIT,
530			taskqueue_thread_enqueue, &ha->err_tq);
531	taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq",
532		device_get_nameunit(ha->pci_dev));
533
534        TASK_INIT(&ha->async_event_task, 0, qla_async_event, ha);
535        ha->async_event_tq = taskqueue_create("qla_asyncq", M_NOWAIT,
536                        taskqueue_thread_enqueue, &ha->async_event_tq);
537        taskqueue_start_threads(&ha->async_event_tq, 1, PI_NET, "%s asyncq",
538                device_get_nameunit(ha->pci_dev));
539
540        TASK_INIT(&ha->stats_task, 0, qla_stats, ha);
541        ha->stats_tq = taskqueue_create("qla_statsq", M_NOWAIT,
542                        taskqueue_thread_enqueue, &ha->stats_tq);
543        taskqueue_start_threads(&ha->stats_tq, 1, PI_NET, "%s taskq",
544                device_get_nameunit(ha->pci_dev));
545
546	QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__));
547        return (0);
548
549qla_pci_attach_err:
550
551	qla_release(ha);
552
553	if (ha->flags.lock_init) {
554		mtx_destroy(&ha->hw_lock);
555		mtx_destroy(&ha->sp_log_lock);
556	}
557
558	QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__));
559        return (ENXIO);
560}
561
562/*
563 * Name:	qla_pci_detach
564 * Function:	Unhooks the device from the operating system
565 */
566static int
567qla_pci_detach(device_t dev)
568{
569	qla_host_t *ha = NULL;
570	if_t ifp;
571
572        if ((ha = device_get_softc(dev)) == NULL) {
573                device_printf(dev, "cannot get softc\n");
574                return (ENOMEM);
575        }
576
577	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
578
579	ifp = ha->ifp;
580
581	if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
582	QLA_LOCK(ha, __func__, -1, 0);
583
584	ha->qla_detach_active = 1;
585	qla_stop(ha);
586
587	qla_release(ha);
588
589	QLA_UNLOCK(ha, __func__);
590
591	if (ha->flags.lock_init) {
592		mtx_destroy(&ha->hw_lock);
593		mtx_destroy(&ha->sp_log_lock);
594	}
595
596	QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
597
598        return (0);
599}
600
601/*
602 * SYSCTL Related Callbacks
603 */
604static int
605qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS)
606{
607	int err, ret = 0;
608	qla_host_t *ha;
609
610	err = sysctl_handle_int(oidp, &ret, 0, req);
611
612	if (err || !req->newptr)
613		return (err);
614
615	if (ret == 1) {
616		ha = (qla_host_t *)arg1;
617		ql_hw_link_status(ha);
618	}
619	return (err);
620}
621
622/*
623 * Name:	qla_release
624 * Function:	Releases the resources allocated for the device
625 */
626static void
627qla_release(qla_host_t *ha)
628{
629	device_t dev;
630	int i;
631
632	dev = ha->pci_dev;
633
634        if (ha->async_event_tq) {
635                taskqueue_drain_all(ha->async_event_tq);
636                taskqueue_free(ha->async_event_tq);
637        }
638
639	if (ha->err_tq) {
640		taskqueue_drain_all(ha->err_tq);
641		taskqueue_free(ha->err_tq);
642	}
643
644	if (ha->stats_tq) {
645		taskqueue_drain_all(ha->stats_tq);
646		taskqueue_free(ha->stats_tq);
647	}
648
649	ql_del_cdev(ha);
650
651	if (ha->flags.qla_watchdog_active) {
652		ha->qla_watchdog_exit = 1;
653
654		while (ha->qla_watchdog_exited == 0)
655			qla_mdelay(__func__, 1);
656	}
657
658	if (ha->flags.qla_callout_init)
659		callout_stop(&ha->tx_callout);
660
661	if (ha->ifp != NULL)
662		ether_ifdetach(ha->ifp);
663
664	ql_free_drvr_state_buffer(ha);
665	ql_free_sp_log_buffer(ha);
666	ql_free_dma(ha);
667	qla_free_parent_dma_tag(ha);
668
669	if (ha->mbx_handle)
670		(void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle);
671
672	if (ha->mbx_irq)
673		(void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid,
674				ha->mbx_irq);
675
676	for (i = 0; i < ha->hw.num_sds_rings; i++) {
677		if (ha->irq_vec[i].handle) {
678			(void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
679					ha->irq_vec[i].handle);
680		}
681
682		if (ha->irq_vec[i].irq) {
683			(void)bus_release_resource(dev, SYS_RES_IRQ,
684				ha->irq_vec[i].irq_rid,
685				ha->irq_vec[i].irq);
686		}
687
688		qla_free_tx_br(ha, &ha->tx_fp[i]);
689	}
690	qla_destroy_fp_taskqueues(ha);
691
692	if (ha->msix_count)
693		pci_release_msi(dev);
694
695        if (ha->pci_reg)
696                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
697				ha->pci_reg);
698
699        if (ha->pci_reg1)
700                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1,
701				ha->pci_reg1);
702
703	return;
704}
705
706/*
707 * DMA Related Functions
708 */
709
710static void
711qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
712{
713        *((bus_addr_t *)arg) = 0;
714
715        if (error) {
716                printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
717                return;
718	}
719
720        *((bus_addr_t *)arg) = segs[0].ds_addr;
721
722	return;
723}
724
725int
726ql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
727{
728        int             ret = 0;
729        device_t        dev;
730        bus_addr_t      b_addr;
731
732        dev = ha->pci_dev;
733
734        QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
735
736        ret = bus_dma_tag_create(
737                        ha->parent_tag,/* parent */
738                        dma_buf->alignment,
739                        ((bus_size_t)(1ULL << 32)),/* boundary */
740                        BUS_SPACE_MAXADDR,      /* lowaddr */
741                        BUS_SPACE_MAXADDR,      /* highaddr */
742                        NULL, NULL,             /* filter, filterarg */
743                        dma_buf->size,          /* maxsize */
744                        1,                      /* nsegments */
745                        dma_buf->size,          /* maxsegsize */
746                        0,                      /* flags */
747                        NULL, NULL,             /* lockfunc, lockarg */
748                        &dma_buf->dma_tag);
749
750        if (ret) {
751                device_printf(dev, "%s: could not create dma tag\n", __func__);
752                goto ql_alloc_dmabuf_exit;
753        }
754        ret = bus_dmamem_alloc(dma_buf->dma_tag,
755                        (void **)&dma_buf->dma_b,
756                        (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
757                        &dma_buf->dma_map);
758        if (ret) {
759                bus_dma_tag_destroy(dma_buf->dma_tag);
760                device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
761                goto ql_alloc_dmabuf_exit;
762        }
763
764        ret = bus_dmamap_load(dma_buf->dma_tag,
765                        dma_buf->dma_map,
766                        dma_buf->dma_b,
767                        dma_buf->size,
768                        qla_dmamap_callback,
769                        &b_addr, BUS_DMA_NOWAIT);
770
771        if (ret || !b_addr) {
772                bus_dma_tag_destroy(dma_buf->dma_tag);
773                bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
774                        dma_buf->dma_map);
775                ret = -1;
776                goto ql_alloc_dmabuf_exit;
777        }
778
779        dma_buf->dma_addr = b_addr;
780
781ql_alloc_dmabuf_exit:
782        QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
783                __func__, ret, (void *)dma_buf->dma_tag,
784                (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
785		dma_buf->size));
786
787        return ret;
788}
789
790void
791ql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
792{
793	bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
794        bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
795        bus_dma_tag_destroy(dma_buf->dma_tag);
796}
797
798static int
799qla_alloc_parent_dma_tag(qla_host_t *ha)
800{
801	int		ret;
802	device_t	dev;
803
804	dev = ha->pci_dev;
805
806        /*
807         * Allocate parent DMA Tag
808         */
809        ret = bus_dma_tag_create(
810                        bus_get_dma_tag(dev),   /* parent */
811                        1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
812                        BUS_SPACE_MAXADDR,      /* lowaddr */
813                        BUS_SPACE_MAXADDR,      /* highaddr */
814                        NULL, NULL,             /* filter, filterarg */
815                        BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
816                        0,                      /* nsegments */
817                        BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
818                        0,                      /* flags */
819                        NULL, NULL,             /* lockfunc, lockarg */
820                        &ha->parent_tag);
821
822        if (ret) {
823                device_printf(dev, "%s: could not create parent dma tag\n",
824                        __func__);
825		return (-1);
826        }
827
828        ha->flags.parent_tag = 1;
829
830	return (0);
831}
832
833static void
834qla_free_parent_dma_tag(qla_host_t *ha)
835{
836        if (ha->flags.parent_tag) {
837                bus_dma_tag_destroy(ha->parent_tag);
838                ha->flags.parent_tag = 0;
839        }
840}
841
842/*
843 * Name: qla_init_ifnet
844 * Function: Creates the Network Device Interface and Registers it with the O.S
845 */
846
847static void
848qla_init_ifnet(device_t dev, qla_host_t *ha)
849{
850	if_t ifp;
851
852	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
853
854	ifp = ha->ifp = if_alloc(IFT_ETHER);
855
856	if (ifp == NULL)
857		panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
858
859	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
860
861	if_setbaudrate(ifp, IF_Gbps(10));
862	if_setcapabilities(ifp, IFCAP_LINKSTATE);
863	if_setmtu(ifp, ETHERMTU);
864
865	if_setinitfn(ifp, qla_init);
866	if_setsoftc(ifp, ha);
867	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
868	if_setioctlfn(ifp, qla_ioctl);
869
870	if_settransmitfn(ifp, qla_transmit);
871	if_setqflushfn(ifp, qla_qflush);
872
873	if_setsendqlen(ifp, qla_get_ifq_snd_maxlen(ha));
874	if_setsendqready(ifp);
875
876	ha->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
877
878	ether_ifattach(ifp, qla_get_mac_addr(ha));
879
880	if_setcapabilitiesbit(ifp, IFCAP_HWCSUM |
881				IFCAP_TSO4 |
882				IFCAP_TSO6 |
883				IFCAP_JUMBO_MTU |
884				IFCAP_VLAN_HWTAGGING |
885				IFCAP_VLAN_MTU |
886				IFCAP_VLAN_HWTSO |
887				IFCAP_LRO, 0);
888
889	if_setcapenable(ifp, if_getcapabilities(ifp));
890
891	if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
892
893	ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status);
894
895	ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0,
896		NULL);
897	ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
898
899	ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
900
901	QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
902
903	return;
904}
905
906static void
907qla_init_locked(qla_host_t *ha)
908{
909	if_t ifp = ha->ifp;
910
911	ql_sp_log(ha, 14, 0, 0, 0, 0, 0, 0);
912
913	qla_stop(ha);
914
915	if (qla_alloc_xmt_bufs(ha) != 0)
916		return;
917
918	qla_confirm_9kb_enable(ha);
919
920	if (qla_alloc_rcv_bufs(ha) != 0)
921		return;
922
923	bcopy(if_getlladdr(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN);
924
925	if_sethwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_TSO);
926	if_sethwassistbits(ifp, CSUM_TCP_IPV6 | CSUM_UDP_IPV6, 0);
927
928	ha->stop_rcv = 0;
929 	if (ql_init_hw_if(ha) == 0) {
930		ifp = ha->ifp;
931		if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
932		ha->hw_vlan_tx_frames = 0;
933		ha->tx_tso_frames = 0;
934		ha->qla_interface_up = 1;
935		ql_update_link_state(ha);
936	} else {
937		if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_IF_START_FAILURE)
938			ha->hw.sp_log_stop = -1;
939	}
940
941	ha->qla_watchdog_pause = 0;
942
943	return;
944}
945
946static void
947qla_init(void *arg)
948{
949	qla_host_t *ha;
950
951	ha = (qla_host_t *)arg;
952
953	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
954
955	if (QLA_LOCK(ha, __func__, -1, 0) != 0)
956		return;
957
958	qla_init_locked(ha);
959
960	QLA_UNLOCK(ha, __func__);
961
962	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
963}
964
965static u_int
966qla_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
967{
968	uint8_t *mta = arg;
969
970	if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
971		return (0);
972
973	bcopy(LLADDR(sdl), &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
974
975	return (1);
976}
977
978static int
979qla_set_multi(qla_host_t *ha, uint32_t add_multi)
980{
981	uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
982	int mcnt = 0;
983	if_t ifp = ha->ifp;
984	int ret = 0;
985
986	mcnt = if_foreach_llmaddr(ifp, qla_copy_maddr, mta);
987
988	if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
989		QLA_LOCK_NO_SLEEP) != 0)
990		return (-1);
991
992	ql_sp_log(ha, 12, 4, if_getdrvflags(ifp),
993		(if_getdrvflags(ifp) & IFF_DRV_RUNNING),
994		add_multi, (uint32_t)mcnt, 0);
995
996	if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
997		if (!add_multi) {
998			ret = qla_hw_del_all_mcast(ha);
999
1000			if (ret)
1001				device_printf(ha->pci_dev,
1002					"%s: qla_hw_del_all_mcast() failed\n",
1003				__func__);
1004		}
1005
1006		if (!ret)
1007			ret = ql_hw_set_multi(ha, mta, mcnt, 1);
1008	}
1009
1010	QLA_UNLOCK(ha, __func__);
1011
1012	return (ret);
1013}
1014
1015static int
1016qla_ioctl(if_t ifp, u_long cmd, caddr_t data)
1017{
1018	int ret = 0;
1019	struct ifreq *ifr = (struct ifreq *)data;
1020#ifdef INET
1021	struct ifaddr *ifa = (struct ifaddr *)data;
1022#endif
1023	qla_host_t *ha;
1024
1025	ha = (qla_host_t *)if_getsoftc(ifp);
1026	if (ha->offline || ha->qla_initiate_recovery)
1027		return (ret);
1028
1029	switch (cmd) {
1030	case SIOCSIFADDR:
1031		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
1032			__func__, cmd));
1033
1034#ifdef INET
1035		if (ifa->ifa_addr->sa_family == AF_INET) {
1036			ret = QLA_LOCK(ha, __func__,
1037					QLA_LOCK_DEFAULT_MS_TIMEOUT,
1038					QLA_LOCK_NO_SLEEP);
1039			if (ret)
1040				break;
1041
1042			if_setflagbits(ifp, IFF_UP, 0);
1043
1044			ql_sp_log(ha, 8, 3, if_getdrvflags(ifp),
1045				(if_getdrvflags(ifp) & IFF_DRV_RUNNING),
1046				ntohl(IA_SIN(ifa)->sin_addr.s_addr), 0, 0);
1047
1048			if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
1049				qla_init_locked(ha);
1050			}
1051
1052			QLA_UNLOCK(ha, __func__);
1053			QL_DPRINT4(ha, (ha->pci_dev,
1054				"%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
1055				__func__, cmd,
1056				ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
1057
1058			arp_ifinit(ifp, ifa);
1059			break;
1060		}
1061#endif
1062		ether_ioctl(ifp, cmd, data);
1063		break;
1064
1065	case SIOCSIFMTU:
1066		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
1067			__func__, cmd));
1068
1069		if (ifr->ifr_mtu > QLA_MAX_MTU) {
1070			ret = EINVAL;
1071		} else {
1072			ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1073					QLA_LOCK_NO_SLEEP);
1074
1075			if (ret)
1076				break;
1077
1078			if_setmtu(ifp, ifr->ifr_mtu);
1079			ha->max_frame_size =
1080				if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
1081
1082			ql_sp_log(ha, 9, 4, if_getdrvflags(ifp),
1083				(if_getdrvflags(ifp) & IFF_DRV_RUNNING),
1084				ha->max_frame_size, if_getmtu(ifp), 0);
1085
1086			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1087				qla_init_locked(ha);
1088			}
1089
1090			if (if_getmtu(ifp) > ETHERMTU)
1091				ha->std_replenish = QL_JUMBO_REPLENISH_THRES;
1092			else
1093				ha->std_replenish = QL_STD_REPLENISH_THRES;
1094
1095
1096			QLA_UNLOCK(ha, __func__);
1097		}
1098
1099		break;
1100
1101	case SIOCSIFFLAGS:
1102		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
1103			__func__, cmd));
1104
1105		ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1106				QLA_LOCK_NO_SLEEP);
1107
1108		if (ret)
1109			break;
1110
1111		ql_sp_log(ha, 10, 4, if_getdrvflags(ifp),
1112			(if_getdrvflags(ifp) & IFF_DRV_RUNNING),
1113			ha->if_flags, if_getflags(ifp), 0);
1114
1115		if (if_getflags(ifp) & IFF_UP) {
1116			ha->max_frame_size = if_getmtu(ifp) +
1117					ETHER_HDR_LEN + ETHER_CRC_LEN;
1118			qla_init_locked(ha);
1119
1120			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1121				if ((if_getflags(ifp) ^ ha->if_flags) &
1122					IFF_PROMISC) {
1123					ret = ql_set_promisc(ha);
1124				} else if ((if_getflags(ifp) ^ ha->if_flags) &
1125					IFF_ALLMULTI) {
1126					ret = ql_set_allmulti(ha);
1127				}
1128			}
1129		} else {
1130			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1131				qla_stop(ha);
1132			ha->if_flags = if_getflags(ifp);
1133		}
1134
1135		QLA_UNLOCK(ha, __func__);
1136		break;
1137
1138	case SIOCADDMULTI:
1139		QL_DPRINT4(ha, (ha->pci_dev,
1140			"%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
1141
1142		if (qla_set_multi(ha, 1))
1143			ret = EINVAL;
1144		break;
1145
1146	case SIOCDELMULTI:
1147		QL_DPRINT4(ha, (ha->pci_dev,
1148			"%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
1149
1150		if (qla_set_multi(ha, 0))
1151			ret = EINVAL;
1152		break;
1153
1154	case SIOCSIFMEDIA:
1155	case SIOCGIFMEDIA:
1156		QL_DPRINT4(ha, (ha->pci_dev,
1157			"%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
1158			__func__, cmd));
1159		ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
1160		break;
1161
1162	case SIOCSIFCAP:
1163	{
1164		int mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
1165
1166		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
1167			__func__, cmd));
1168
1169		if (mask & IFCAP_HWCSUM)
1170			if_togglecapenable(ifp, IFCAP_HWCSUM);
1171		if (mask & IFCAP_TSO4)
1172			if_togglecapenable(ifp, IFCAP_TSO4);
1173		if (mask & IFCAP_TSO6)
1174			if_togglecapenable(ifp, IFCAP_TSO6);
1175		if (mask & IFCAP_VLAN_HWTAGGING)
1176			if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
1177		if (mask & IFCAP_VLAN_HWTSO)
1178			if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
1179		if (mask & IFCAP_LRO)
1180			if_togglecapenable(ifp, IFCAP_LRO);
1181
1182		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1183			ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1184				QLA_LOCK_NO_SLEEP);
1185
1186			if (ret)
1187				break;
1188
1189			ql_sp_log(ha, 11, 4, if_getdrvflags(ifp),
1190				(if_getdrvflags(ifp) & IFF_DRV_RUNNING),
1191				mask, if_getcapenable(ifp), 0);
1192
1193			qla_init_locked(ha);
1194
1195			QLA_UNLOCK(ha, __func__);
1196		}
1197		VLAN_CAPABILITIES(ifp);
1198		break;
1199	}
1200
1201	default:
1202		QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n",
1203			__func__, cmd));
1204		ret = ether_ioctl(ifp, cmd, data);
1205		break;
1206	}
1207
1208	return (ret);
1209}
1210
1211static int
1212qla_media_change(if_t ifp)
1213{
1214	qla_host_t *ha;
1215	struct ifmedia *ifm;
1216	int ret = 0;
1217
1218	ha = (qla_host_t *)if_getsoftc(ifp);
1219
1220	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1221
1222	ifm = &ha->media;
1223
1224	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1225		ret = EINVAL;
1226
1227	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1228
1229	return (ret);
1230}
1231
1232static void
1233qla_media_status(if_t ifp, struct ifmediareq *ifmr)
1234{
1235	qla_host_t *ha;
1236
1237	ha = (qla_host_t *)if_getsoftc(ifp);
1238
1239	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1240
1241	ifmr->ifm_status = IFM_AVALID;
1242	ifmr->ifm_active = IFM_ETHER;
1243
1244	ql_update_link_state(ha);
1245	if (ha->hw.link_up) {
1246		ifmr->ifm_status |= IFM_ACTIVE;
1247		ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha));
1248	}
1249
1250	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\
1251		(ha->hw.link_up ? "link_up" : "link_down")));
1252
1253	return;
1254}
1255
1256static int
1257qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
1258	uint32_t iscsi_pdu)
1259{
1260	bus_dma_segment_t	segs[QLA_MAX_SEGMENTS];
1261	bus_dmamap_t		map;
1262	int			nsegs;
1263	int			ret = -1;
1264	uint32_t		tx_idx;
1265	struct mbuf		*m_head = *m_headp;
1266
1267	QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
1268
1269	tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next;
1270
1271	if ((NULL != ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head) ||
1272		(QL_ERR_INJECT(ha, INJCT_TXBUF_MBUF_NON_NULL))){
1273		QL_ASSERT(ha, 0, ("%s [%d]: txr_idx = %d tx_idx = %d "\
1274			"mbuf = %p\n", __func__, __LINE__, txr_idx, tx_idx,\
1275			ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head));
1276
1277		device_printf(ha->pci_dev, "%s [%d]: txr_idx = %d tx_idx = %d "
1278			"mbuf = %p\n", __func__, __LINE__, txr_idx, tx_idx,
1279			ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head);
1280
1281		if (m_head)
1282			m_freem(m_head);
1283		*m_headp = NULL;
1284		QL_INITIATE_RECOVERY(ha);
1285		return (ret);
1286	}
1287
1288	map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
1289
1290	ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1291			BUS_DMA_NOWAIT);
1292
1293	if (ret == EFBIG) {
1294		struct mbuf *m;
1295
1296		QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1297			m_head->m_pkthdr.len));
1298
1299		m = m_defrag(m_head, M_NOWAIT);
1300		if (m == NULL) {
1301			ha->err_tx_defrag++;
1302			m_freem(m_head);
1303			*m_headp = NULL;
1304			device_printf(ha->pci_dev,
1305				"%s: m_defrag() = NULL [%d]\n",
1306				__func__, ret);
1307			return (ENOBUFS);
1308		}
1309		m_head = m;
1310		*m_headp = m_head;
1311
1312		if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1313					segs, &nsegs, BUS_DMA_NOWAIT))) {
1314			ha->err_tx_dmamap_load++;
1315
1316			device_printf(ha->pci_dev,
1317				"%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1318				__func__, ret, m_head->m_pkthdr.len);
1319
1320			if (ret != ENOMEM) {
1321				m_freem(m_head);
1322				*m_headp = NULL;
1323			}
1324			return (ret);
1325		}
1326
1327	} else if (ret) {
1328		ha->err_tx_dmamap_load++;
1329
1330		device_printf(ha->pci_dev,
1331			"%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1332			__func__, ret, m_head->m_pkthdr.len);
1333
1334		if (ret != ENOMEM) {
1335			m_freem(m_head);
1336			*m_headp = NULL;
1337		}
1338		return (ret);
1339	}
1340
1341	QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet"));
1342
1343	bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1344
1345        if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx,
1346				iscsi_pdu))) {
1347		ha->tx_ring[txr_idx].count++;
1348		if (iscsi_pdu)
1349			ha->tx_ring[txr_idx].iscsi_pkt_count++;
1350		ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
1351	} else {
1352		bus_dmamap_unload(ha->tx_tag, map);
1353		if (ret == EINVAL) {
1354			if (m_head)
1355				m_freem(m_head);
1356			*m_headp = NULL;
1357		}
1358	}
1359
1360	QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
1361	return (ret);
1362}
1363
1364static int
1365qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
1366{
1367        snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
1368                "qla%d_fp%d_tx_mq_lock", ha->pci_func, fp->txr_idx);
1369
1370        mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
1371
1372        fp->tx_br = buf_ring_alloc(NUM_TX_DESCRIPTORS, M_DEVBUF,
1373                                   M_NOWAIT, &fp->tx_mtx);
1374        if (fp->tx_br == NULL) {
1375            QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for "
1376                " fp[%d, %d]\n", ha->pci_func, fp->txr_idx));
1377            return (-ENOMEM);
1378        }
1379        return 0;
1380}
1381
1382static void
1383qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
1384{
1385        struct mbuf *mp;
1386        if_t ifp = ha->ifp;
1387
1388        if (mtx_initialized(&fp->tx_mtx)) {
1389                if (fp->tx_br != NULL) {
1390                        mtx_lock(&fp->tx_mtx);
1391
1392                        while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
1393                                m_freem(mp);
1394                        }
1395
1396                        mtx_unlock(&fp->tx_mtx);
1397
1398                        buf_ring_free(fp->tx_br, M_DEVBUF);
1399                        fp->tx_br = NULL;
1400                }
1401                mtx_destroy(&fp->tx_mtx);
1402        }
1403        return;
1404}
1405
1406static void
1407qla_fp_taskqueue(void *context, int pending)
1408{
1409        qla_tx_fp_t *fp;
1410        qla_host_t *ha;
1411        if_t ifp;
1412        struct mbuf  *mp = NULL;
1413        int ret = 0;
1414	uint32_t txr_idx;
1415	uint32_t iscsi_pdu = 0;
1416	uint32_t rx_pkts_left = -1;
1417
1418        fp = context;
1419
1420        if (fp == NULL)
1421                return;
1422
1423        ha = (qla_host_t *)fp->ha;
1424
1425        ifp = ha->ifp;
1426
1427	txr_idx = fp->txr_idx;
1428
1429        mtx_lock(&fp->tx_mtx);
1430
1431        if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) || (!ha->hw.link_up)) {
1432                mtx_unlock(&fp->tx_mtx);
1433                goto qla_fp_taskqueue_exit;
1434        }
1435
1436	while (rx_pkts_left && !ha->stop_rcv &&
1437		(if_getdrvflags(ifp) & IFF_DRV_RUNNING) && ha->hw.link_up) {
1438		rx_pkts_left = ql_rcv_isr(ha, fp->txr_idx, 64);
1439
1440#ifdef QL_ENABLE_ISCSI_TLV
1441		ql_hw_tx_done_locked(ha, fp->txr_idx);
1442		ql_hw_tx_done_locked(ha, (fp->txr_idx + (ha->hw.num_tx_rings >> 1)));
1443#else
1444		ql_hw_tx_done_locked(ha, fp->txr_idx);
1445#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
1446
1447		mp = drbr_peek(ifp, fp->tx_br);
1448
1449        	while (mp != NULL) {
1450			if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) {
1451#ifdef QL_ENABLE_ISCSI_TLV
1452				if (ql_iscsi_pdu(ha, mp) == 0) {
1453					txr_idx = txr_idx +
1454						(ha->hw.num_tx_rings >> 1);
1455					iscsi_pdu = 1;
1456				} else {
1457					iscsi_pdu = 0;
1458					txr_idx = fp->txr_idx;
1459				}
1460#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
1461			}
1462
1463			ret = qla_send(ha, &mp, txr_idx, iscsi_pdu);
1464
1465			if (ret) {
1466				if (mp != NULL)
1467					drbr_putback(ifp, fp->tx_br, mp);
1468				else {
1469					drbr_advance(ifp, fp->tx_br);
1470				}
1471
1472				mtx_unlock(&fp->tx_mtx);
1473
1474				goto qla_fp_taskqueue_exit0;
1475			} else {
1476				drbr_advance(ifp, fp->tx_br);
1477			}
1478
1479			/* Send a copy of the frame to the BPF listener */
1480			ETHER_BPF_MTAP(ifp, mp);
1481
1482			if (((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) ||
1483				(!ha->hw.link_up))
1484				break;
1485
1486			mp = drbr_peek(ifp, fp->tx_br);
1487		}
1488	}
1489        mtx_unlock(&fp->tx_mtx);
1490
1491	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
1492		goto qla_fp_taskqueue_exit;
1493
1494qla_fp_taskqueue_exit0:
1495
1496	if (rx_pkts_left || ((mp != NULL) && ret)) {
1497		taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
1498	} else {
1499		if (!ha->stop_rcv) {
1500			QL_ENABLE_INTERRUPTS(ha, fp->txr_idx);
1501		}
1502	}
1503
1504qla_fp_taskqueue_exit:
1505
1506        QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
1507        return;
1508}
1509
1510static int
1511qla_create_fp_taskqueues(qla_host_t *ha)
1512{
1513        int     i;
1514        uint8_t tq_name[32];
1515
1516        for (i = 0; i < ha->hw.num_sds_rings; i++) {
1517                qla_tx_fp_t *fp = &ha->tx_fp[i];
1518
1519                bzero(tq_name, sizeof (tq_name));
1520                snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
1521
1522                NET_TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp);
1523
1524                fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT,
1525                                        taskqueue_thread_enqueue,
1526                                        &fp->fp_taskqueue);
1527
1528                if (fp->fp_taskqueue == NULL)
1529                        return (-1);
1530
1531                taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
1532                        tq_name);
1533
1534                QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__,
1535                        fp->fp_taskqueue));
1536        }
1537
1538        return (0);
1539}
1540
1541static void
1542qla_destroy_fp_taskqueues(qla_host_t *ha)
1543{
1544        int     i;
1545
1546        for (i = 0; i < ha->hw.num_sds_rings; i++) {
1547                qla_tx_fp_t *fp = &ha->tx_fp[i];
1548
1549                if (fp->fp_taskqueue != NULL) {
1550                        taskqueue_drain_all(fp->fp_taskqueue);
1551                        taskqueue_free(fp->fp_taskqueue);
1552                        fp->fp_taskqueue = NULL;
1553                }
1554        }
1555        return;
1556}
1557
1558static void
1559qla_drain_fp_taskqueues(qla_host_t *ha)
1560{
1561        int     i;
1562
1563        for (i = 0; i < ha->hw.num_sds_rings; i++) {
1564                qla_tx_fp_t *fp = &ha->tx_fp[i];
1565
1566                if (fp->fp_taskqueue != NULL) {
1567                        taskqueue_drain_all(fp->fp_taskqueue);
1568                }
1569        }
1570        return;
1571}
1572
1573static int
1574qla_transmit(if_t ifp, struct mbuf  *mp)
1575{
1576	qla_host_t *ha = (qla_host_t *)if_getsoftc(ifp);
1577        qla_tx_fp_t *fp;
1578        int rss_id = 0;
1579        int ret = 0;
1580
1581        QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1582
1583        if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
1584                rss_id = (mp->m_pkthdr.flowid & Q8_RSS_IND_TBL_MAX_IDX) %
1585                                        ha->hw.num_sds_rings;
1586        fp = &ha->tx_fp[rss_id];
1587
1588        if (fp->tx_br == NULL) {
1589                ret = EINVAL;
1590                goto qla_transmit_exit;
1591        }
1592
1593        if (mp != NULL) {
1594                ret = drbr_enqueue(ifp, fp->tx_br, mp);
1595        }
1596
1597        if (fp->fp_taskqueue != NULL)
1598                taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
1599
1600        ret = 0;
1601
1602qla_transmit_exit:
1603
1604        QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
1605        return ret;
1606}
1607
1608static void
1609qla_qflush(if_t ifp)
1610{
1611        int                     i;
1612        qla_tx_fp_t		*fp;
1613        struct mbuf             *mp;
1614        qla_host_t              *ha;
1615
1616        ha = (qla_host_t *)if_getsoftc(ifp);
1617
1618        QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1619
1620        for (i = 0; i < ha->hw.num_sds_rings; i++) {
1621                fp = &ha->tx_fp[i];
1622
1623                if (fp == NULL)
1624                        continue;
1625
1626                if (fp->tx_br) {
1627                        mtx_lock(&fp->tx_mtx);
1628
1629                        while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
1630                                m_freem(mp);
1631                        }
1632                        mtx_unlock(&fp->tx_mtx);
1633                }
1634        }
1635        QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1636
1637        return;
1638}
1639
1640static void
1641qla_stop(qla_host_t *ha)
1642{
1643	if_t ifp = ha->ifp;
1644	int i = 0;
1645
1646	ql_sp_log(ha, 13, 0, 0, 0, 0, 0, 0);
1647
1648	if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1649	ha->qla_watchdog_pause = 1;
1650
1651        for (i = 0; i < ha->hw.num_sds_rings; i++) {
1652        	qla_tx_fp_t *fp;
1653
1654		fp = &ha->tx_fp[i];
1655
1656                if (fp == NULL)
1657                        continue;
1658
1659		if (fp->tx_br != NULL) {
1660                        mtx_lock(&fp->tx_mtx);
1661                        mtx_unlock(&fp->tx_mtx);
1662		}
1663	}
1664
1665	while (!ha->qla_watchdog_paused)
1666		qla_mdelay(__func__, 1);
1667
1668	ha->qla_interface_up = 0;
1669
1670	qla_drain_fp_taskqueues(ha);
1671
1672	ql_del_hw_if(ha);
1673
1674	qla_free_xmt_bufs(ha);
1675	qla_free_rcv_bufs(ha);
1676
1677	return;
1678}
1679
1680/*
1681 * Buffer Management Functions for Transmit and Receive Rings
1682 */
1683static int
1684qla_alloc_xmt_bufs(qla_host_t *ha)
1685{
1686	int ret = 0;
1687	uint32_t i, j;
1688	qla_tx_buf_t *txb;
1689
1690	if (bus_dma_tag_create(NULL,    /* parent */
1691		1, 0,    /* alignment, bounds */
1692		BUS_SPACE_MAXADDR,       /* lowaddr */
1693		BUS_SPACE_MAXADDR,       /* highaddr */
1694		NULL, NULL,      /* filter, filterarg */
1695		QLA_MAX_TSO_FRAME_SIZE,     /* maxsize */
1696		QLA_MAX_SEGMENTS,        /* nsegments */
1697		PAGE_SIZE,        /* maxsegsize */
1698		BUS_DMA_ALLOCNOW,        /* flags */
1699		NULL,    /* lockfunc */
1700		NULL,    /* lockfuncarg */
1701		&ha->tx_tag)) {
1702		device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1703			__func__);
1704		return (ENOMEM);
1705	}
1706
1707	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1708		bzero((void *)ha->tx_ring[i].tx_buf,
1709			(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1710	}
1711
1712	for (j = 0; j < ha->hw.num_tx_rings; j++) {
1713		for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1714			txb = &ha->tx_ring[j].tx_buf[i];
1715
1716			if ((ret = bus_dmamap_create(ha->tx_tag,
1717					BUS_DMA_NOWAIT, &txb->map))) {
1718				ha->err_tx_dmamap_create++;
1719				device_printf(ha->pci_dev,
1720					"%s: bus_dmamap_create failed[%d]\n",
1721					__func__, ret);
1722
1723				qla_free_xmt_bufs(ha);
1724
1725				return (ret);
1726			}
1727		}
1728	}
1729
1730	return 0;
1731}
1732
1733/*
1734 * Release mbuf after it sent on the wire
1735 */
1736static void
1737qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1738{
1739	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1740
1741	if (txb->m_head) {
1742		bus_dmamap_sync(ha->tx_tag, txb->map,
1743			BUS_DMASYNC_POSTWRITE);
1744
1745		bus_dmamap_unload(ha->tx_tag, txb->map);
1746
1747		m_freem(txb->m_head);
1748		txb->m_head = NULL;
1749
1750		bus_dmamap_destroy(ha->tx_tag, txb->map);
1751		txb->map = NULL;
1752	}
1753
1754	if (txb->map) {
1755		bus_dmamap_unload(ha->tx_tag, txb->map);
1756		bus_dmamap_destroy(ha->tx_tag, txb->map);
1757		txb->map = NULL;
1758	}
1759
1760	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1761}
1762
1763static void
1764qla_free_xmt_bufs(qla_host_t *ha)
1765{
1766	int		i, j;
1767
1768	for (j = 0; j < ha->hw.num_tx_rings; j++) {
1769		for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1770			qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
1771	}
1772
1773	if (ha->tx_tag != NULL) {
1774		bus_dma_tag_destroy(ha->tx_tag);
1775		ha->tx_tag = NULL;
1776	}
1777
1778	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1779		bzero((void *)ha->tx_ring[i].tx_buf,
1780			(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1781	}
1782	return;
1783}
1784
1785static int
1786qla_alloc_rcv_std(qla_host_t *ha)
1787{
1788	int		i, j, k, r, ret = 0;
1789	qla_rx_buf_t	*rxb;
1790	qla_rx_ring_t	*rx_ring;
1791
1792	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1793		rx_ring = &ha->rx_ring[r];
1794
1795		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1796			rxb = &rx_ring->rx_buf[i];
1797
1798			ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT,
1799					&rxb->map);
1800
1801			if (ret) {
1802				device_printf(ha->pci_dev,
1803					"%s: dmamap[%d, %d] failed\n",
1804					__func__, r, i);
1805
1806				for (k = 0; k < r; k++) {
1807					for (j = 0; j < NUM_RX_DESCRIPTORS;
1808						j++) {
1809						rxb = &ha->rx_ring[k].rx_buf[j];
1810						bus_dmamap_destroy(ha->rx_tag,
1811							rxb->map);
1812					}
1813				}
1814
1815				for (j = 0; j < i; j++) {
1816					bus_dmamap_destroy(ha->rx_tag,
1817						rx_ring->rx_buf[j].map);
1818				}
1819				goto qla_alloc_rcv_std_err;
1820			}
1821		}
1822	}
1823
1824	qla_init_hw_rcv_descriptors(ha);
1825
1826	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1827		rx_ring = &ha->rx_ring[r];
1828
1829		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1830			rxb = &rx_ring->rx_buf[i];
1831			rxb->handle = i;
1832			if (!(ret = ql_get_mbuf(ha, rxb, NULL))) {
1833				/*
1834			 	 * set the physical address in the
1835				 * corresponding descriptor entry in the
1836				 * receive ring/queue for the hba
1837				 */
1838				qla_set_hw_rcv_desc(ha, r, i, rxb->handle,
1839					rxb->paddr,
1840					(rxb->m_head)->m_pkthdr.len);
1841			} else {
1842				device_printf(ha->pci_dev,
1843					"%s: ql_get_mbuf [%d, %d] failed\n",
1844					__func__, r, i);
1845				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1846				goto qla_alloc_rcv_std_err;
1847			}
1848		}
1849	}
1850	return 0;
1851
1852qla_alloc_rcv_std_err:
1853	return (-1);
1854}
1855
1856static void
1857qla_free_rcv_std(qla_host_t *ha)
1858{
1859	int		i, r;
1860	qla_rx_buf_t	*rxb;
1861
1862	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1863		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1864			rxb = &ha->rx_ring[r].rx_buf[i];
1865			if (rxb->m_head != NULL) {
1866				bus_dmamap_unload(ha->rx_tag, rxb->map);
1867				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1868				m_freem(rxb->m_head);
1869				rxb->m_head = NULL;
1870			}
1871		}
1872	}
1873	return;
1874}
1875
1876static int
1877qla_alloc_rcv_bufs(qla_host_t *ha)
1878{
1879	int		i, ret = 0;
1880
1881	if (bus_dma_tag_create(NULL,    /* parent */
1882			1, 0,    /* alignment, bounds */
1883			BUS_SPACE_MAXADDR,       /* lowaddr */
1884			BUS_SPACE_MAXADDR,       /* highaddr */
1885			NULL, NULL,      /* filter, filterarg */
1886			MJUM9BYTES,     /* maxsize */
1887			1,        /* nsegments */
1888			MJUM9BYTES,        /* maxsegsize */
1889			BUS_DMA_ALLOCNOW,        /* flags */
1890			NULL,    /* lockfunc */
1891			NULL,    /* lockfuncarg */
1892			&ha->rx_tag)) {
1893		device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1894			__func__);
1895
1896		return (ENOMEM);
1897	}
1898
1899	bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1900
1901	for (i = 0; i < ha->hw.num_sds_rings; i++) {
1902		ha->hw.sds[i].sdsr_next = 0;
1903		ha->hw.sds[i].rxb_free = NULL;
1904		ha->hw.sds[i].rx_free = 0;
1905	}
1906
1907	ret = qla_alloc_rcv_std(ha);
1908
1909	return (ret);
1910}
1911
1912static void
1913qla_free_rcv_bufs(qla_host_t *ha)
1914{
1915	int		i;
1916
1917	qla_free_rcv_std(ha);
1918
1919	if (ha->rx_tag != NULL) {
1920		bus_dma_tag_destroy(ha->rx_tag);
1921		ha->rx_tag = NULL;
1922	}
1923
1924	bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1925
1926	for (i = 0; i < ha->hw.num_sds_rings; i++) {
1927		ha->hw.sds[i].sdsr_next = 0;
1928		ha->hw.sds[i].rxb_free = NULL;
1929		ha->hw.sds[i].rx_free = 0;
1930	}
1931
1932	return;
1933}
1934
1935int
1936ql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
1937{
1938	register struct mbuf *mp = nmp;
1939	int            		ret = 0;
1940	uint32_t		offset;
1941	bus_dma_segment_t	segs[1];
1942	int			nsegs, mbuf_size;
1943
1944	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1945
1946        if (ha->hw.enable_9kb)
1947                mbuf_size = MJUM9BYTES;
1948        else
1949                mbuf_size = MCLBYTES;
1950
1951	if (mp == NULL) {
1952		if (QL_ERR_INJECT(ha, INJCT_M_GETCL_M_GETJCL_FAILURE))
1953			return(-1);
1954
1955                if (ha->hw.enable_9kb)
1956                        mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, mbuf_size);
1957                else
1958                        mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1959
1960		if (mp == NULL) {
1961			ha->err_m_getcl++;
1962			ret = ENOBUFS;
1963			device_printf(ha->pci_dev,
1964					"%s: m_getcl failed\n", __func__);
1965			goto exit_ql_get_mbuf;
1966		}
1967		mp->m_len = mp->m_pkthdr.len = mbuf_size;
1968	} else {
1969		mp->m_len = mp->m_pkthdr.len = mbuf_size;
1970		mp->m_data = mp->m_ext.ext_buf;
1971		mp->m_next = NULL;
1972	}
1973
1974	offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
1975	if (offset) {
1976		offset = 8 - offset;
1977		m_adj(mp, offset);
1978	}
1979
1980	/*
1981	 * Using memory from the mbuf cluster pool, invoke the bus_dma
1982	 * machinery to arrange the memory mapping.
1983	 */
1984	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map,
1985			mp, segs, &nsegs, BUS_DMA_NOWAIT);
1986	rxb->paddr = segs[0].ds_addr;
1987
1988	if (ret || !rxb->paddr || (nsegs != 1)) {
1989		m_free(mp);
1990		rxb->m_head = NULL;
1991		device_printf(ha->pci_dev,
1992			"%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
1993			__func__, ret, (long long unsigned int)rxb->paddr,
1994			nsegs);
1995                ret = -1;
1996		goto exit_ql_get_mbuf;
1997	}
1998	rxb->m_head = mp;
1999	bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
2000
2001exit_ql_get_mbuf:
2002	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
2003	return (ret);
2004}
2005
2006static void
2007qla_get_peer(qla_host_t *ha)
2008{
2009	device_t *peers;
2010	int count, i, slot;
2011	int my_slot = pci_get_slot(ha->pci_dev);
2012
2013	if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count))
2014		return;
2015
2016	for (i = 0; i < count; i++) {
2017		slot = pci_get_slot(peers[i]);
2018
2019		if ((slot >= 0) && (slot == my_slot) &&
2020			(pci_get_device(peers[i]) ==
2021				pci_get_device(ha->pci_dev))) {
2022			if (ha->pci_dev != peers[i])
2023				ha->peer_dev = peers[i];
2024		}
2025	}
2026}
2027
2028static void
2029qla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer)
2030{
2031	qla_host_t *ha_peer;
2032
2033	if (ha->peer_dev) {
2034        	if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) {
2035			ha_peer->msg_from_peer = msg_to_peer;
2036		}
2037	}
2038}
2039
2040void
2041qla_set_error_recovery(qla_host_t *ha)
2042{
2043	if_t ifp = ha->ifp;
2044
2045	if (!cold && ha->enable_error_recovery) {
2046		if (ifp)
2047			if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
2048		ha->qla_initiate_recovery = 1;
2049	} else
2050		ha->offline = 1;
2051	return;
2052}
2053
2054static void
2055qla_error_recovery(void *context, int pending)
2056{
2057	qla_host_t *ha = context;
2058	uint32_t msecs_100 = 400;
2059	if_t ifp = ha->ifp;
2060	int i = 0;
2061
2062	device_printf(ha->pci_dev, "%s: enter\n", __func__);
2063	ha->hw.imd_compl = 1;
2064
2065	taskqueue_drain_all(ha->stats_tq);
2066	taskqueue_drain_all(ha->async_event_tq);
2067
2068	if (QLA_LOCK(ha, __func__, -1, 0) != 0)
2069		return;
2070
2071	device_printf(ha->pci_dev, "%s: ts_usecs = %ld start\n",
2072		__func__, qla_get_usec_timestamp());
2073
2074	if (ha->qla_interface_up) {
2075		qla_mdelay(__func__, 300);
2076
2077
2078
2079		for (i = 0; i < ha->hw.num_sds_rings; i++) {
2080	        	qla_tx_fp_t *fp;
2081
2082			fp = &ha->tx_fp[i];
2083
2084			if (fp == NULL)
2085				continue;
2086
2087			if (fp->tx_br != NULL) {
2088				mtx_lock(&fp->tx_mtx);
2089				mtx_unlock(&fp->tx_mtx);
2090			}
2091		}
2092	}
2093
2094	qla_drain_fp_taskqueues(ha);
2095
2096	if ((ha->pci_func & 0x1) == 0) {
2097		if (!ha->msg_from_peer) {
2098			qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
2099
2100			while ((ha->msg_from_peer != QL_PEER_MSG_ACK) &&
2101				msecs_100--)
2102				qla_mdelay(__func__, 100);
2103		}
2104
2105		ha->msg_from_peer = 0;
2106
2107		if (ha->enable_minidump)
2108			ql_minidump(ha);
2109
2110		if (ha->enable_driverstate_dump)
2111			ql_capture_drvr_state(ha);
2112
2113		if (ql_init_hw(ha)) {
2114			device_printf(ha->pci_dev,
2115				"%s: ts_usecs = %ld exit: ql_init_hw failed\n",
2116				__func__, qla_get_usec_timestamp());
2117			ha->offline = 1;
2118			goto qla_error_recovery_exit;
2119		}
2120
2121		if (ha->qla_interface_up) {
2122			qla_free_xmt_bufs(ha);
2123			qla_free_rcv_bufs(ha);
2124		}
2125
2126		if (!QL_ERR_INJECT(ha, INJCT_PEER_PORT_FAILURE_ERR_RECOVERY))
2127			qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
2128
2129	} else {
2130		if (ha->msg_from_peer == QL_PEER_MSG_RESET) {
2131			ha->msg_from_peer = 0;
2132
2133			if (!QL_ERR_INJECT(ha, INJCT_PEER_PORT_FAILURE_ERR_RECOVERY))
2134				qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
2135		} else {
2136			qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
2137		}
2138
2139		while ((ha->msg_from_peer != QL_PEER_MSG_ACK)  && msecs_100--)
2140			qla_mdelay(__func__, 100);
2141		ha->msg_from_peer = 0;
2142
2143		if (ha->enable_driverstate_dump)
2144			ql_capture_drvr_state(ha);
2145
2146		if (msecs_100 == 0) {
2147			device_printf(ha->pci_dev,
2148				"%s: ts_usecs = %ld exit: QL_PEER_MSG_ACK not received\n",
2149				__func__, qla_get_usec_timestamp());
2150			ha->offline = 1;
2151			goto qla_error_recovery_exit;
2152		}
2153
2154		if (ql_init_hw(ha)) {
2155			device_printf(ha->pci_dev,
2156				"%s: ts_usecs = %ld exit: ql_init_hw failed\n",
2157				__func__, qla_get_usec_timestamp());
2158			ha->offline = 1;
2159			goto qla_error_recovery_exit;
2160		}
2161
2162		if (ha->qla_interface_up) {
2163			qla_free_xmt_bufs(ha);
2164			qla_free_rcv_bufs(ha);
2165		}
2166	}
2167
2168	qla_mdelay(__func__, ha->ms_delay_after_init);
2169
2170	*((uint32_t *)&ha->hw.flags) = 0;
2171	ha->qla_initiate_recovery = 0;
2172
2173	if (ha->qla_interface_up) {
2174		if (qla_alloc_xmt_bufs(ha) != 0) {
2175			ha->offline = 1;
2176			goto qla_error_recovery_exit;
2177		}
2178
2179		qla_confirm_9kb_enable(ha);
2180
2181		if (qla_alloc_rcv_bufs(ha) != 0) {
2182			ha->offline = 1;
2183			goto qla_error_recovery_exit;
2184		}
2185
2186		ha->stop_rcv = 0;
2187
2188		if (ql_init_hw_if(ha) == 0) {
2189			ifp = ha->ifp;
2190			if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
2191			ha->qla_watchdog_pause = 0;
2192			ql_update_link_state(ha);
2193		} else {
2194			ha->offline = 1;
2195
2196			if (ha->hw.sp_log_stop_events &
2197				Q8_SP_LOG_STOP_IF_START_FAILURE)
2198				ha->hw.sp_log_stop = -1;
2199		}
2200	} else {
2201		ha->qla_watchdog_pause = 0;
2202	}
2203
2204qla_error_recovery_exit:
2205
2206	if (ha->offline ) {
2207		device_printf(ha->pci_dev, "%s: ts_usecs = %ld port offline\n",
2208			__func__, qla_get_usec_timestamp());
2209		if (ha->hw.sp_log_stop_events &
2210			Q8_SP_LOG_STOP_ERR_RECOVERY_FAILURE)
2211			ha->hw.sp_log_stop = -1;
2212	}
2213
2214        QLA_UNLOCK(ha, __func__);
2215
2216	if (!ha->offline)
2217		callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
2218			qla_watchdog, ha);
2219
2220	device_printf(ha->pci_dev,
2221		"%s: ts_usecs = %ld exit\n",
2222		__func__, qla_get_usec_timestamp());
2223	return;
2224}
2225
2226static void
2227qla_async_event(void *context, int pending)
2228{
2229        qla_host_t *ha = context;
2230
2231	if (QLA_LOCK(ha, __func__, -1, 0) != 0)
2232		return;
2233
2234	if (ha->async_event) {
2235		ha->async_event = 0;
2236        	qla_hw_async_event(ha);
2237	}
2238
2239	QLA_UNLOCK(ha, __func__);
2240
2241	return;
2242}
2243
2244static void
2245qla_stats(void *context, int pending)
2246{
2247        qla_host_t *ha;
2248
2249        ha = context;
2250
2251	ql_get_stats(ha);
2252
2253	return;
2254}
2255