1/*
2 * Copyright (c) 2013-2016 Qlogic Corporation
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 */
27
28/*
29 * File: ql_os.c
30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/11/sys/dev/qlxgbe/ql_os.c 332052 2018-04-04 23:53:29Z davidcs $");
35
36
37#include "ql_os.h"
38#include "ql_hw.h"
39#include "ql_def.h"
40#include "ql_inline.h"
41#include "ql_ver.h"
42#include "ql_glbl.h"
43#include "ql_dbg.h"
44#include <sys/smp.h>
45
46/*
47 * Some PCI Configuration Space Related Defines
48 */
49
50#ifndef PCI_VENDOR_QLOGIC
51#define PCI_VENDOR_QLOGIC	0x1077
52#endif
53
54#ifndef PCI_PRODUCT_QLOGIC_ISP8030
55#define PCI_PRODUCT_QLOGIC_ISP8030	0x8030
56#endif
57
58#define PCI_QLOGIC_ISP8030 \
59	((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC)
60
61/*
62 * static functions
63 */
64static int qla_alloc_parent_dma_tag(qla_host_t *ha);
65static void qla_free_parent_dma_tag(qla_host_t *ha);
66static int qla_alloc_xmt_bufs(qla_host_t *ha);
67static void qla_free_xmt_bufs(qla_host_t *ha);
68static int qla_alloc_rcv_bufs(qla_host_t *ha);
69static void qla_free_rcv_bufs(qla_host_t *ha);
70static void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb);
71
72static void qla_init_ifnet(device_t dev, qla_host_t *ha);
73static int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS);
74static void qla_release(qla_host_t *ha);
75static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
76		int error);
77static void qla_stop(qla_host_t *ha);
78static void qla_get_peer(qla_host_t *ha);
79static void qla_error_recovery(void *context, int pending);
80static void qla_async_event(void *context, int pending);
81static void qla_stats(void *context, int pending);
82static int qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
83		uint32_t iscsi_pdu);
84
85/*
86 * Hooks to the Operating Systems
87 */
88static int qla_pci_probe (device_t);
89static int qla_pci_attach (device_t);
90static int qla_pci_detach (device_t);
91
92static void qla_init(void *arg);
93static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
94static int qla_media_change(struct ifnet *ifp);
95static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
96
97static int qla_transmit(struct ifnet *ifp, struct mbuf  *mp);
98static void qla_qflush(struct ifnet *ifp);
99static int qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
100static void qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp);
101static int qla_create_fp_taskqueues(qla_host_t *ha);
102static void qla_destroy_fp_taskqueues(qla_host_t *ha);
103static void qla_drain_fp_taskqueues(qla_host_t *ha);
104
105static device_method_t qla_pci_methods[] = {
106	/* Device interface */
107	DEVMETHOD(device_probe, qla_pci_probe),
108	DEVMETHOD(device_attach, qla_pci_attach),
109	DEVMETHOD(device_detach, qla_pci_detach),
110	{ 0, 0 }
111};
112
113static driver_t qla_pci_driver = {
114	"ql", qla_pci_methods, sizeof (qla_host_t),
115};
116
117static devclass_t qla83xx_devclass;
118
119DRIVER_MODULE(qla83xx, pci, qla_pci_driver, qla83xx_devclass, 0, 0);
120
121MODULE_DEPEND(qla83xx, pci, 1, 1, 1);
122MODULE_DEPEND(qla83xx, ether, 1, 1, 1);
123
124MALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver");
125
126#define QL_STD_REPLENISH_THRES		0
127#define QL_JUMBO_REPLENISH_THRES	32
128
129
130static char dev_str[64];
131static char ver_str[64];
132
133/*
134 * Name:	qla_pci_probe
135 * Function:	Validate the PCI device to be a QLA80XX device
136 */
137static int
138qla_pci_probe(device_t dev)
139{
140        switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
141        case PCI_QLOGIC_ISP8030:
142		snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
143			"Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function",
144			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
145			QLA_VERSION_BUILD);
146		snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d",
147			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
148			QLA_VERSION_BUILD);
149                device_set_desc(dev, dev_str);
150                break;
151        default:
152                return (ENXIO);
153        }
154
155        if (bootverbose)
156                printf("%s: %s\n ", __func__, dev_str);
157
158        return (BUS_PROBE_DEFAULT);
159}
160
161static void
162qla_add_sysctls(qla_host_t *ha)
163{
164        device_t dev = ha->pci_dev;
165
166	SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
167		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
168		OID_AUTO, "version", CTLFLAG_RD,
169		ver_str, 0, "Driver Version");
170
171        SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
172                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
173                OID_AUTO, "fw_version", CTLFLAG_RD,
174                ha->fw_ver_str, 0, "firmware version");
175
176        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
177                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
178                OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW,
179                (void *)ha, 0,
180                qla_sysctl_get_link_status, "I", "Link Status");
181
182	ha->dbg_level = 0;
183        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
184                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
185                OID_AUTO, "debug", CTLFLAG_RW,
186                &ha->dbg_level, ha->dbg_level, "Debug Level");
187
188	ha->enable_minidump = 1;
189	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
190		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
191		OID_AUTO, "enable_minidump", CTLFLAG_RW,
192		&ha->enable_minidump, ha->enable_minidump,
193		"Minidump retrival prior to error recovery "
194		"is enabled only when this is set");
195
196	ha->enable_driverstate_dump = 1;
197	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
198		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
199		OID_AUTO, "enable_driverstate_dump", CTLFLAG_RW,
200		&ha->enable_driverstate_dump, ha->enable_driverstate_dump,
201		"Driver State retrival prior to error recovery "
202		"is enabled only when this is set");
203
204	ha->enable_error_recovery = 1;
205	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
206		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
207		OID_AUTO, "enable_error_recovery", CTLFLAG_RW,
208		&ha->enable_error_recovery, ha->enable_error_recovery,
209		"when set error recovery is enabled on fatal errors "
210		"otherwise the port is turned offline");
211
212	ha->ms_delay_after_init = 1000;
213	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
214		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
215		OID_AUTO, "ms_delay_after_init", CTLFLAG_RW,
216		&ha->ms_delay_after_init, ha->ms_delay_after_init,
217		"millisecond delay after hw_init");
218
219	ha->std_replenish = QL_STD_REPLENISH_THRES;
220        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
221                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
222                OID_AUTO, "std_replenish", CTLFLAG_RW,
223                &ha->std_replenish, ha->std_replenish,
224                "Threshold for Replenishing Standard Frames");
225
226        SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
227                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
228                OID_AUTO, "ipv4_lro",
229                CTLFLAG_RD, &ha->ipv4_lro,
230                "number of ipv4 lro completions");
231
232        SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
233                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
234                OID_AUTO, "ipv6_lro",
235                CTLFLAG_RD, &ha->ipv6_lro,
236                "number of ipv6 lro completions");
237
238	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
239		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
240		OID_AUTO, "tx_tso_frames",
241		CTLFLAG_RD, &ha->tx_tso_frames,
242		"number of Tx TSO Frames");
243
244	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
245                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
246		OID_AUTO, "hw_vlan_tx_frames",
247		CTLFLAG_RD, &ha->hw_vlan_tx_frames,
248		"number of Tx VLAN Frames");
249
250	SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev),
251                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
252		OID_AUTO, "hw_lock_failed",
253		CTLFLAG_RD, &ha->hw_lock_failed,
254		"number of hw_lock failures");
255
256        return;
257}
258
259static void
260qla_watchdog(void *arg)
261{
262	qla_host_t *ha = arg;
263	qla_hw_t *hw;
264	struct ifnet *ifp;
265
266	hw = &ha->hw;
267	ifp = ha->ifp;
268
269        if (ha->qla_watchdog_exit) {
270		ha->qla_watchdog_exited = 1;
271		return;
272	}
273	ha->qla_watchdog_exited = 0;
274
275	if (!ha->qla_watchdog_pause) {
276                if (!ha->offline &&
277                        (ql_hw_check_health(ha) || ha->qla_initiate_recovery ||
278                        (ha->msg_from_peer == QL_PEER_MSG_RESET))) {
279
280	        	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
281			ql_update_link_state(ha);
282
283			if (ha->enable_error_recovery) {
284				ha->qla_watchdog_paused = 1;
285				ha->qla_watchdog_pause = 1;
286				ha->err_inject = 0;
287				device_printf(ha->pci_dev,
288					"%s: taskqueue_enqueue(err_task) \n",
289					__func__);
290				taskqueue_enqueue(ha->err_tq, &ha->err_task);
291			} else {
292				if (ifp != NULL)
293					ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
294				ha->offline = 1;
295			}
296			return;
297
298		} else {
299			if (ha->qla_interface_up) {
300
301				ha->watchdog_ticks++;
302
303				if (ha->watchdog_ticks > 1000)
304					ha->watchdog_ticks = 0;
305
306				if (!ha->watchdog_ticks && QL_RUNNING(ifp)) {
307					taskqueue_enqueue(ha->stats_tq,
308						&ha->stats_task);
309				}
310
311				if (ha->async_event) {
312					taskqueue_enqueue(ha->async_event_tq,
313						&ha->async_event_task);
314				}
315
316			}
317			ha->qla_watchdog_paused = 0;
318		}
319	} else {
320		ha->qla_watchdog_paused = 1;
321	}
322
323	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
324		qla_watchdog, ha);
325}
326
327/*
328 * Name:	qla_pci_attach
329 * Function:	attaches the device to the operating system
330 */
331static int
332qla_pci_attach(device_t dev)
333{
334	qla_host_t *ha = NULL;
335	uint32_t rsrc_len;
336	int i;
337	uint32_t num_rcvq = 0;
338
339        if ((ha = device_get_softc(dev)) == NULL) {
340                device_printf(dev, "cannot get softc\n");
341                return (ENOMEM);
342        }
343
344        memset(ha, 0, sizeof (qla_host_t));
345
346        if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) {
347                device_printf(dev, "device is not ISP8030\n");
348                return (ENXIO);
349	}
350
351        ha->pci_func = pci_get_function(dev) & 0x1;
352
353        ha->pci_dev = dev;
354
355	pci_enable_busmaster(dev);
356
357	ha->reg_rid = PCIR_BAR(0);
358	ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
359				RF_ACTIVE);
360
361        if (ha->pci_reg == NULL) {
362                device_printf(dev, "unable to map any ports\n");
363                goto qla_pci_attach_err;
364        }
365
366	rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY,
367					ha->reg_rid);
368
369	mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
370	mtx_init(&ha->sp_log_lock, "qla83xx_sp_log_lock", MTX_NETWORK_LOCK, MTX_DEF);
371	ha->flags.lock_init = 1;
372
373	qla_add_sysctls(ha);
374
375	ha->hw.num_sds_rings = MAX_SDS_RINGS;
376	ha->hw.num_rds_rings = MAX_RDS_RINGS;
377	ha->hw.num_tx_rings = NUM_TX_RINGS;
378
379	ha->reg_rid1 = PCIR_BAR(2);
380	ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
381			&ha->reg_rid1, RF_ACTIVE);
382
383	ha->msix_count = pci_msix_count(dev);
384
385	if (ha->msix_count < 1 ) {
386		device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
387			ha->msix_count);
388		goto qla_pci_attach_err;
389	}
390
391	if (ha->msix_count < (ha->hw.num_sds_rings + 1)) {
392		ha->hw.num_sds_rings = ha->msix_count - 1;
393	}
394
395	QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
396		" msix_count 0x%x pci_reg %p pci_reg1 %p\n", __func__, ha,
397		ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg,
398		ha->pci_reg1));
399
400        /* initialize hardware */
401        if (ql_init_hw(ha)) {
402                device_printf(dev, "%s: ql_init_hw failed\n", __func__);
403                goto qla_pci_attach_err;
404        }
405
406        device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__,
407                ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
408                ha->fw_ver_build);
409        snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d",
410                        ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub,
411                        ha->fw_ver_build);
412
413        if (qla_get_nic_partition(ha, NULL, &num_rcvq)) {
414                device_printf(dev, "%s: qla_get_nic_partition failed\n",
415                        __func__);
416                goto qla_pci_attach_err;
417        }
418        device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
419                " msix_count 0x%x pci_reg %p pci_reg1 %p num_rcvq = %d\n",
420		__func__, ha, ha->pci_func, rsrc_len, ha->msix_count,
421		ha->pci_reg, ha->pci_reg1, num_rcvq);
422
423        if ((ha->msix_count  < 64) || (num_rcvq != 32)) {
424		if (ha->hw.num_sds_rings > 15) {
425                	ha->hw.num_sds_rings = 15;
426		}
427        }
428
429	ha->hw.num_rds_rings = ha->hw.num_sds_rings;
430	ha->hw.num_tx_rings = ha->hw.num_sds_rings;
431
432#ifdef QL_ENABLE_ISCSI_TLV
433	ha->hw.num_tx_rings = ha->hw.num_sds_rings * 2;
434#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
435
436	ql_hw_add_sysctls(ha);
437
438	ha->msix_count = ha->hw.num_sds_rings + 1;
439
440	if (pci_alloc_msix(dev, &ha->msix_count)) {
441		device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
442			ha->msix_count);
443		ha->msix_count = 0;
444		goto qla_pci_attach_err;
445	}
446
447	ha->mbx_irq_rid = 1;
448	ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
449				&ha->mbx_irq_rid,
450				(RF_ACTIVE | RF_SHAREABLE));
451	if (ha->mbx_irq == NULL) {
452		device_printf(dev, "could not allocate mbx interrupt\n");
453		goto qla_pci_attach_err;
454	}
455	if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE),
456		NULL, ql_mbx_isr, ha, &ha->mbx_handle)) {
457		device_printf(dev, "could not setup mbx interrupt\n");
458		goto qla_pci_attach_err;
459	}
460
461	for (i = 0; i < ha->hw.num_sds_rings; i++) {
462		ha->irq_vec[i].sds_idx = i;
463                ha->irq_vec[i].ha = ha;
464                ha->irq_vec[i].irq_rid = 2 + i;
465
466		ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
467				&ha->irq_vec[i].irq_rid,
468				(RF_ACTIVE | RF_SHAREABLE));
469
470		if (ha->irq_vec[i].irq == NULL) {
471			device_printf(dev, "could not allocate interrupt\n");
472			goto qla_pci_attach_err;
473		}
474		if (bus_setup_intr(dev, ha->irq_vec[i].irq,
475			(INTR_TYPE_NET | INTR_MPSAFE),
476			NULL, ql_isr, &ha->irq_vec[i],
477			&ha->irq_vec[i].handle)) {
478			device_printf(dev, "could not setup interrupt\n");
479			goto qla_pci_attach_err;
480		}
481
482		ha->tx_fp[i].ha = ha;
483		ha->tx_fp[i].txr_idx = i;
484
485		if (qla_alloc_tx_br(ha, &ha->tx_fp[i])) {
486			device_printf(dev, "%s: could not allocate tx_br[%d]\n",
487				__func__, i);
488			goto qla_pci_attach_err;
489		}
490	}
491
492	if (qla_create_fp_taskqueues(ha) != 0)
493		goto qla_pci_attach_err;
494
495	printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus,
496		ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count);
497
498	ql_read_mac_addr(ha);
499
500	/* allocate parent dma tag */
501	if (qla_alloc_parent_dma_tag(ha)) {
502		device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n",
503			__func__);
504		goto qla_pci_attach_err;
505	}
506
507	/* alloc all dma buffers */
508	if (ql_alloc_dma(ha)) {
509		device_printf(dev, "%s: ql_alloc_dma failed\n", __func__);
510		goto qla_pci_attach_err;
511	}
512	qla_get_peer(ha);
513
514	if (ql_minidump_init(ha) != 0) {
515		device_printf(dev, "%s: ql_minidump_init failed\n", __func__);
516		goto qla_pci_attach_err;
517	}
518	ql_alloc_drvr_state_buffer(ha);
519	ql_alloc_sp_log_buffer(ha);
520	/* create the o.s ethernet interface */
521	qla_init_ifnet(dev, ha);
522
523	ha->flags.qla_watchdog_active = 1;
524	ha->qla_watchdog_pause = 0;
525
526	callout_init(&ha->tx_callout, TRUE);
527	ha->flags.qla_callout_init = 1;
528
529	/* create ioctl device interface */
530	if (ql_make_cdev(ha)) {
531		device_printf(dev, "%s: ql_make_cdev failed\n", __func__);
532		goto qla_pci_attach_err;
533	}
534
535	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
536		qla_watchdog, ha);
537
538	TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha);
539	ha->err_tq = taskqueue_create("qla_errq", M_NOWAIT,
540			taskqueue_thread_enqueue, &ha->err_tq);
541	taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq",
542		device_get_nameunit(ha->pci_dev));
543
544        TASK_INIT(&ha->async_event_task, 0, qla_async_event, ha);
545        ha->async_event_tq = taskqueue_create("qla_asyncq", M_NOWAIT,
546                        taskqueue_thread_enqueue, &ha->async_event_tq);
547        taskqueue_start_threads(&ha->async_event_tq, 1, PI_NET, "%s asyncq",
548                device_get_nameunit(ha->pci_dev));
549
550        TASK_INIT(&ha->stats_task, 0, qla_stats, ha);
551        ha->stats_tq = taskqueue_create("qla_statsq", M_NOWAIT,
552                        taskqueue_thread_enqueue, &ha->stats_tq);
553        taskqueue_start_threads(&ha->stats_tq, 1, PI_NET, "%s taskq",
554                device_get_nameunit(ha->pci_dev));
555
556	QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__));
557        return (0);
558
559qla_pci_attach_err:
560
561	qla_release(ha);
562
563	if (ha->flags.lock_init) {
564		mtx_destroy(&ha->hw_lock);
565		mtx_destroy(&ha->sp_log_lock);
566	}
567
568	QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__));
569        return (ENXIO);
570}
571
572/*
573 * Name:	qla_pci_detach
574 * Function:	Unhooks the device from the operating system
575 */
576static int
577qla_pci_detach(device_t dev)
578{
579	qla_host_t *ha = NULL;
580	struct ifnet *ifp;
581
582
583        if ((ha = device_get_softc(dev)) == NULL) {
584                device_printf(dev, "cannot get softc\n");
585                return (ENOMEM);
586        }
587
588	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
589
590	ifp = ha->ifp;
591
592	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
593	QLA_LOCK(ha, __func__, -1, 0);
594
595	ha->qla_detach_active = 1;
596	qla_stop(ha);
597
598	qla_release(ha);
599
600	QLA_UNLOCK(ha, __func__);
601
602	if (ha->flags.lock_init) {
603		mtx_destroy(&ha->hw_lock);
604		mtx_destroy(&ha->sp_log_lock);
605	}
606
607	QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
608
609        return (0);
610}
611
612/*
613 * SYSCTL Related Callbacks
614 */
615static int
616qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS)
617{
618	int err, ret = 0;
619	qla_host_t *ha;
620
621	err = sysctl_handle_int(oidp, &ret, 0, req);
622
623	if (err || !req->newptr)
624		return (err);
625
626	if (ret == 1) {
627		ha = (qla_host_t *)arg1;
628		ql_hw_link_status(ha);
629	}
630	return (err);
631}
632
633/*
634 * Name:	qla_release
635 * Function:	Releases the resources allocated for the device
636 */
637static void
638qla_release(qla_host_t *ha)
639{
640	device_t dev;
641	int i;
642
643	dev = ha->pci_dev;
644
645        if (ha->async_event_tq) {
646                taskqueue_drain_all(ha->async_event_tq);
647                taskqueue_free(ha->async_event_tq);
648        }
649
650	if (ha->err_tq) {
651		taskqueue_drain_all(ha->err_tq);
652		taskqueue_free(ha->err_tq);
653	}
654
655	if (ha->stats_tq) {
656		taskqueue_drain_all(ha->stats_tq);
657		taskqueue_free(ha->stats_tq);
658	}
659
660	ql_del_cdev(ha);
661
662	if (ha->flags.qla_watchdog_active) {
663		ha->qla_watchdog_exit = 1;
664
665		while (ha->qla_watchdog_exited == 0)
666			qla_mdelay(__func__, 1);
667	}
668
669	if (ha->flags.qla_callout_init)
670		callout_stop(&ha->tx_callout);
671
672	if (ha->ifp != NULL)
673		ether_ifdetach(ha->ifp);
674
675	ql_free_drvr_state_buffer(ha);
676	ql_free_sp_log_buffer(ha);
677	ql_free_dma(ha);
678	qla_free_parent_dma_tag(ha);
679
680	if (ha->mbx_handle)
681		(void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle);
682
683	if (ha->mbx_irq)
684		(void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid,
685				ha->mbx_irq);
686
687	for (i = 0; i < ha->hw.num_sds_rings; i++) {
688
689		if (ha->irq_vec[i].handle) {
690			(void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
691					ha->irq_vec[i].handle);
692		}
693
694		if (ha->irq_vec[i].irq) {
695			(void)bus_release_resource(dev, SYS_RES_IRQ,
696				ha->irq_vec[i].irq_rid,
697				ha->irq_vec[i].irq);
698		}
699
700		qla_free_tx_br(ha, &ha->tx_fp[i]);
701	}
702	qla_destroy_fp_taskqueues(ha);
703
704	if (ha->msix_count)
705		pci_release_msi(dev);
706
707        if (ha->pci_reg)
708                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
709				ha->pci_reg);
710
711        if (ha->pci_reg1)
712                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1,
713				ha->pci_reg1);
714
715	return;
716}
717
718/*
719 * DMA Related Functions
720 */
721
722static void
723qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
724{
725        *((bus_addr_t *)arg) = 0;
726
727        if (error) {
728                printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
729                return;
730	}
731
732        *((bus_addr_t *)arg) = segs[0].ds_addr;
733
734	return;
735}
736
737int
738ql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
739{
740        int             ret = 0;
741        device_t        dev;
742        bus_addr_t      b_addr;
743
744        dev = ha->pci_dev;
745
746        QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
747
748        ret = bus_dma_tag_create(
749                        ha->parent_tag,/* parent */
750                        dma_buf->alignment,
751                        ((bus_size_t)(1ULL << 32)),/* boundary */
752                        BUS_SPACE_MAXADDR,      /* lowaddr */
753                        BUS_SPACE_MAXADDR,      /* highaddr */
754                        NULL, NULL,             /* filter, filterarg */
755                        dma_buf->size,          /* maxsize */
756                        1,                      /* nsegments */
757                        dma_buf->size,          /* maxsegsize */
758                        0,                      /* flags */
759                        NULL, NULL,             /* lockfunc, lockarg */
760                        &dma_buf->dma_tag);
761
762        if (ret) {
763                device_printf(dev, "%s: could not create dma tag\n", __func__);
764                goto ql_alloc_dmabuf_exit;
765        }
766        ret = bus_dmamem_alloc(dma_buf->dma_tag,
767                        (void **)&dma_buf->dma_b,
768                        (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
769                        &dma_buf->dma_map);
770        if (ret) {
771                bus_dma_tag_destroy(dma_buf->dma_tag);
772                device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
773                goto ql_alloc_dmabuf_exit;
774        }
775
776        ret = bus_dmamap_load(dma_buf->dma_tag,
777                        dma_buf->dma_map,
778                        dma_buf->dma_b,
779                        dma_buf->size,
780                        qla_dmamap_callback,
781                        &b_addr, BUS_DMA_NOWAIT);
782
783        if (ret || !b_addr) {
784                bus_dma_tag_destroy(dma_buf->dma_tag);
785                bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
786                        dma_buf->dma_map);
787                ret = -1;
788                goto ql_alloc_dmabuf_exit;
789        }
790
791        dma_buf->dma_addr = b_addr;
792
793ql_alloc_dmabuf_exit:
794        QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
795                __func__, ret, (void *)dma_buf->dma_tag,
796                (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
797		dma_buf->size));
798
799        return ret;
800}
801
802void
803ql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
804{
805	bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
806        bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
807        bus_dma_tag_destroy(dma_buf->dma_tag);
808}
809
810static int
811qla_alloc_parent_dma_tag(qla_host_t *ha)
812{
813	int		ret;
814	device_t	dev;
815
816	dev = ha->pci_dev;
817
818        /*
819         * Allocate parent DMA Tag
820         */
821        ret = bus_dma_tag_create(
822                        bus_get_dma_tag(dev),   /* parent */
823                        1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
824                        BUS_SPACE_MAXADDR,      /* lowaddr */
825                        BUS_SPACE_MAXADDR,      /* highaddr */
826                        NULL, NULL,             /* filter, filterarg */
827                        BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
828                        0,                      /* nsegments */
829                        BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
830                        0,                      /* flags */
831                        NULL, NULL,             /* lockfunc, lockarg */
832                        &ha->parent_tag);
833
834        if (ret) {
835                device_printf(dev, "%s: could not create parent dma tag\n",
836                        __func__);
837		return (-1);
838        }
839
840        ha->flags.parent_tag = 1;
841
842	return (0);
843}
844
845static void
846qla_free_parent_dma_tag(qla_host_t *ha)
847{
848        if (ha->flags.parent_tag) {
849                bus_dma_tag_destroy(ha->parent_tag);
850                ha->flags.parent_tag = 0;
851        }
852}
853
854/*
855 * Name: qla_init_ifnet
856 * Function: Creates the Network Device Interface and Registers it with the O.S
857 */
858
859static void
860qla_init_ifnet(device_t dev, qla_host_t *ha)
861{
862	struct ifnet *ifp;
863
864	QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
865
866	ifp = ha->ifp = if_alloc(IFT_ETHER);
867
868	if (ifp == NULL)
869		panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
870
871	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
872
873	ifp->if_baudrate = IF_Gbps(10);
874	ifp->if_capabilities = IFCAP_LINKSTATE;
875	ifp->if_mtu = ETHERMTU;
876
877	ifp->if_init = qla_init;
878	ifp->if_softc = ha;
879	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
880	ifp->if_ioctl = qla_ioctl;
881
882	ifp->if_transmit = qla_transmit;
883	ifp->if_qflush = qla_qflush;
884
885	IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha));
886	ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha);
887	IFQ_SET_READY(&ifp->if_snd);
888
889	ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
890
891	ether_ifattach(ifp, qla_get_mac_addr(ha));
892
893	ifp->if_capabilities |= IFCAP_HWCSUM |
894				IFCAP_TSO4 |
895				IFCAP_TSO6 |
896				IFCAP_JUMBO_MTU |
897				IFCAP_VLAN_HWTAGGING |
898				IFCAP_VLAN_MTU |
899				IFCAP_VLAN_HWTSO |
900				IFCAP_LRO;
901
902	ifp->if_capenable = ifp->if_capabilities;
903
904	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
905
906	ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status);
907
908	ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0,
909		NULL);
910	ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
911
912	ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
913
914	QL_DPRINT2(ha, (dev, "%s: exit\n", __func__));
915
916	return;
917}
918
919static void
920qla_init_locked(qla_host_t *ha)
921{
922	struct ifnet *ifp = ha->ifp;
923
924	ql_sp_log(ha, 14, 0, 0, 0, 0, 0, 0);
925
926	qla_stop(ha);
927
928	if (qla_alloc_xmt_bufs(ha) != 0)
929		return;
930
931	qla_confirm_9kb_enable(ha);
932
933	if (qla_alloc_rcv_bufs(ha) != 0)
934		return;
935
936	bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN);
937
938	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO;
939	ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6;
940
941	ha->stop_rcv = 0;
942 	if (ql_init_hw_if(ha) == 0) {
943		ifp = ha->ifp;
944		ifp->if_drv_flags |= IFF_DRV_RUNNING;
945		ha->hw_vlan_tx_frames = 0;
946		ha->tx_tso_frames = 0;
947		ha->qla_interface_up = 1;
948		ql_update_link_state(ha);
949	} else {
950		if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_IF_START_FAILURE)
951			ha->hw.sp_log_stop = -1;
952	}
953
954	ha->qla_watchdog_pause = 0;
955
956	return;
957}
958
959static void
960qla_init(void *arg)
961{
962	qla_host_t *ha;
963
964	ha = (qla_host_t *)arg;
965
966	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
967
968	if (QLA_LOCK(ha, __func__, -1, 0) != 0)
969		return;
970
971	qla_init_locked(ha);
972
973	QLA_UNLOCK(ha, __func__);
974
975	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
976}
977
978static int
979qla_set_multi(qla_host_t *ha, uint32_t add_multi)
980{
981	uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
982	struct ifmultiaddr *ifma;
983	int mcnt = 0;
984	struct ifnet *ifp = ha->ifp;
985	int ret = 0;
986
987	if_maddr_rlock(ifp);
988
989	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
990
991		if (ifma->ifma_addr->sa_family != AF_LINK)
992			continue;
993
994		if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
995			break;
996
997		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
998			&mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
999
1000		mcnt++;
1001	}
1002
1003	if_maddr_runlock(ifp);
1004
1005	if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1006		QLA_LOCK_NO_SLEEP) != 0)
1007		return (-1);
1008
1009	ql_sp_log(ha, 12, 4, ifp->if_drv_flags,
1010		(ifp->if_drv_flags & IFF_DRV_RUNNING),
1011		add_multi, (uint32_t)mcnt, 0);
1012
1013	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1014
1015		if (!add_multi) {
1016			ret = qla_hw_del_all_mcast(ha);
1017
1018			if (ret)
1019				device_printf(ha->pci_dev,
1020					"%s: qla_hw_del_all_mcast() failed\n",
1021				__func__);
1022		}
1023
1024		if (!ret)
1025			ret = ql_hw_set_multi(ha, mta, mcnt, 1);
1026
1027	}
1028
1029	QLA_UNLOCK(ha, __func__);
1030
1031	return (ret);
1032}
1033
1034static int
1035qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1036{
1037	int ret = 0;
1038	struct ifreq *ifr = (struct ifreq *)data;
1039	struct ifaddr *ifa = (struct ifaddr *)data;
1040	qla_host_t *ha;
1041
1042	ha = (qla_host_t *)ifp->if_softc;
1043	if (ha->offline || ha->qla_initiate_recovery)
1044		return (ret);
1045
1046	switch (cmd) {
1047	case SIOCSIFADDR:
1048		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
1049			__func__, cmd));
1050
1051		if (ifa->ifa_addr->sa_family == AF_INET) {
1052
1053			ret = QLA_LOCK(ha, __func__,
1054					QLA_LOCK_DEFAULT_MS_TIMEOUT,
1055					QLA_LOCK_NO_SLEEP);
1056			if (ret)
1057				break;
1058
1059			ifp->if_flags |= IFF_UP;
1060
1061			ql_sp_log(ha, 8, 3, ifp->if_drv_flags,
1062				(ifp->if_drv_flags & IFF_DRV_RUNNING),
1063				ntohl(IA_SIN(ifa)->sin_addr.s_addr), 0, 0);
1064
1065			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1066				qla_init_locked(ha);
1067			}
1068
1069			QLA_UNLOCK(ha, __func__);
1070			QL_DPRINT4(ha, (ha->pci_dev,
1071				"%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
1072				__func__, cmd,
1073				ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
1074
1075			arp_ifinit(ifp, ifa);
1076		} else {
1077			ether_ioctl(ifp, cmd, data);
1078		}
1079		break;
1080
1081	case SIOCSIFMTU:
1082		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
1083			__func__, cmd));
1084
1085		if (ifr->ifr_mtu > QLA_MAX_MTU) {
1086			ret = EINVAL;
1087		} else {
1088			ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1089					QLA_LOCK_NO_SLEEP);
1090
1091			if (ret)
1092				break;
1093
1094			ifp->if_mtu = ifr->ifr_mtu;
1095			ha->max_frame_size =
1096				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1097
1098			ql_sp_log(ha, 9, 4, ifp->if_drv_flags,
1099				(ifp->if_drv_flags & IFF_DRV_RUNNING),
1100				ha->max_frame_size, ifp->if_mtu, 0);
1101
1102			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1103				qla_init_locked(ha);
1104			}
1105
1106			if (ifp->if_mtu > ETHERMTU)
1107				ha->std_replenish = QL_JUMBO_REPLENISH_THRES;
1108			else
1109				ha->std_replenish = QL_STD_REPLENISH_THRES;
1110
1111
1112			QLA_UNLOCK(ha, __func__);
1113		}
1114
1115		break;
1116
1117	case SIOCSIFFLAGS:
1118		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
1119			__func__, cmd));
1120
1121		ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1122				QLA_LOCK_NO_SLEEP);
1123
1124		if (ret)
1125			break;
1126
1127		ql_sp_log(ha, 10, 4, ifp->if_drv_flags,
1128			(ifp->if_drv_flags & IFF_DRV_RUNNING),
1129			ha->if_flags, ifp->if_flags, 0);
1130
1131		if (ifp->if_flags & IFF_UP) {
1132
1133			ha->max_frame_size = ifp->if_mtu +
1134					ETHER_HDR_LEN + ETHER_CRC_LEN;
1135			qla_init_locked(ha);
1136
1137			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1138				if ((ifp->if_flags ^ ha->if_flags) &
1139					IFF_PROMISC) {
1140					ret = ql_set_promisc(ha);
1141				} else if ((ifp->if_flags ^ ha->if_flags) &
1142					IFF_ALLMULTI) {
1143					ret = ql_set_allmulti(ha);
1144				}
1145			}
1146		} else {
1147			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1148				qla_stop(ha);
1149			ha->if_flags = ifp->if_flags;
1150		}
1151
1152		QLA_UNLOCK(ha, __func__);
1153		break;
1154
1155	case SIOCADDMULTI:
1156		QL_DPRINT4(ha, (ha->pci_dev,
1157			"%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
1158
1159		if (qla_set_multi(ha, 1))
1160			ret = EINVAL;
1161		break;
1162
1163	case SIOCDELMULTI:
1164		QL_DPRINT4(ha, (ha->pci_dev,
1165			"%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
1166
1167		if (qla_set_multi(ha, 0))
1168			ret = EINVAL;
1169		break;
1170
1171	case SIOCSIFMEDIA:
1172	case SIOCGIFMEDIA:
1173		QL_DPRINT4(ha, (ha->pci_dev,
1174			"%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
1175			__func__, cmd));
1176		ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
1177		break;
1178
1179	case SIOCSIFCAP:
1180	{
1181		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1182
1183		QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
1184			__func__, cmd));
1185
1186		if (mask & IFCAP_HWCSUM)
1187			ifp->if_capenable ^= IFCAP_HWCSUM;
1188		if (mask & IFCAP_TSO4)
1189			ifp->if_capenable ^= IFCAP_TSO4;
1190		if (mask & IFCAP_TSO6)
1191			ifp->if_capenable ^= IFCAP_TSO6;
1192		if (mask & IFCAP_VLAN_HWTAGGING)
1193			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1194		if (mask & IFCAP_VLAN_HWTSO)
1195			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1196		if (mask & IFCAP_LRO)
1197			ifp->if_capenable ^= IFCAP_LRO;
1198
1199		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1200			ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT,
1201				QLA_LOCK_NO_SLEEP);
1202
1203			if (ret)
1204				break;
1205
1206			ql_sp_log(ha, 11, 4, ifp->if_drv_flags,
1207				(ifp->if_drv_flags & IFF_DRV_RUNNING),
1208				mask, ifp->if_capenable, 0);
1209
1210			qla_init_locked(ha);
1211
1212			QLA_UNLOCK(ha, __func__);
1213
1214		}
1215		VLAN_CAPABILITIES(ifp);
1216		break;
1217	}
1218
1219	default:
1220		QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n",
1221			__func__, cmd));
1222		ret = ether_ioctl(ifp, cmd, data);
1223		break;
1224	}
1225
1226	return (ret);
1227}
1228
1229static int
1230qla_media_change(struct ifnet *ifp)
1231{
1232	qla_host_t *ha;
1233	struct ifmedia *ifm;
1234	int ret = 0;
1235
1236	ha = (qla_host_t *)ifp->if_softc;
1237
1238	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1239
1240	ifm = &ha->media;
1241
1242	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1243		ret = EINVAL;
1244
1245	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1246
1247	return (ret);
1248}
1249
1250static void
1251qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1252{
1253	qla_host_t *ha;
1254
1255	ha = (qla_host_t *)ifp->if_softc;
1256
1257	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1258
1259	ifmr->ifm_status = IFM_AVALID;
1260	ifmr->ifm_active = IFM_ETHER;
1261
1262	ql_update_link_state(ha);
1263	if (ha->hw.link_up) {
1264		ifmr->ifm_status |= IFM_ACTIVE;
1265		ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha));
1266	}
1267
1268	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\
1269		(ha->hw.link_up ? "link_up" : "link_down")));
1270
1271	return;
1272}
1273
1274
1275static int
1276qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
1277	uint32_t iscsi_pdu)
1278{
1279	bus_dma_segment_t	segs[QLA_MAX_SEGMENTS];
1280	bus_dmamap_t		map;
1281	int			nsegs;
1282	int			ret = -1;
1283	uint32_t		tx_idx;
1284	struct mbuf		*m_head = *m_headp;
1285
1286	QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
1287
1288	tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next;
1289
1290	if ((NULL != ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head) ||
1291		(QL_ERR_INJECT(ha, INJCT_TXBUF_MBUF_NON_NULL))){
1292		QL_ASSERT(ha, 0, ("%s [%d]: txr_idx = %d tx_idx = %d "\
1293			"mbuf = %p\n", __func__, __LINE__, txr_idx, tx_idx,\
1294			ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head));
1295
1296		device_printf(ha->pci_dev, "%s [%d]: txr_idx = %d tx_idx = %d "
1297			"mbuf = %p\n", __func__, __LINE__, txr_idx, tx_idx,
1298			ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head);
1299
1300		if (m_head)
1301			m_freem(m_head);
1302		*m_headp = NULL;
1303		QL_INITIATE_RECOVERY(ha);
1304		return (ret);
1305	}
1306
1307	map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
1308
1309	ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1310			BUS_DMA_NOWAIT);
1311
1312	if (ret == EFBIG) {
1313
1314		struct mbuf *m;
1315
1316		QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1317			m_head->m_pkthdr.len));
1318
1319		m = m_defrag(m_head, M_NOWAIT);
1320		if (m == NULL) {
1321			ha->err_tx_defrag++;
1322			m_freem(m_head);
1323			*m_headp = NULL;
1324			device_printf(ha->pci_dev,
1325				"%s: m_defrag() = NULL [%d]\n",
1326				__func__, ret);
1327			return (ENOBUFS);
1328		}
1329		m_head = m;
1330		*m_headp = m_head;
1331
1332		if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1333					segs, &nsegs, BUS_DMA_NOWAIT))) {
1334
1335			ha->err_tx_dmamap_load++;
1336
1337			device_printf(ha->pci_dev,
1338				"%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1339				__func__, ret, m_head->m_pkthdr.len);
1340
1341			if (ret != ENOMEM) {
1342				m_freem(m_head);
1343				*m_headp = NULL;
1344			}
1345			return (ret);
1346		}
1347
1348	} else if (ret) {
1349
1350		ha->err_tx_dmamap_load++;
1351
1352		device_printf(ha->pci_dev,
1353			"%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1354			__func__, ret, m_head->m_pkthdr.len);
1355
1356		if (ret != ENOMEM) {
1357			m_freem(m_head);
1358			*m_headp = NULL;
1359		}
1360		return (ret);
1361	}
1362
1363	QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet"));
1364
1365	bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1366
1367        if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx,
1368				iscsi_pdu))) {
1369		ha->tx_ring[txr_idx].count++;
1370		if (iscsi_pdu)
1371			ha->tx_ring[txr_idx].iscsi_pkt_count++;
1372		ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
1373	} else {
1374		bus_dmamap_unload(ha->tx_tag, map);
1375		if (ret == EINVAL) {
1376			if (m_head)
1377				m_freem(m_head);
1378			*m_headp = NULL;
1379		}
1380	}
1381
1382	QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__));
1383	return (ret);
1384}
1385
1386static int
1387qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
1388{
1389        snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
1390                "qla%d_fp%d_tx_mq_lock", ha->pci_func, fp->txr_idx);
1391
1392        mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
1393
1394        fp->tx_br = buf_ring_alloc(NUM_TX_DESCRIPTORS, M_DEVBUF,
1395                                   M_NOWAIT, &fp->tx_mtx);
1396        if (fp->tx_br == NULL) {
1397            QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for "
1398                " fp[%d, %d]\n", ha->pci_func, fp->txr_idx));
1399            return (-ENOMEM);
1400        }
1401        return 0;
1402}
1403
1404static void
1405qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *fp)
1406{
1407        struct mbuf *mp;
1408        struct ifnet *ifp = ha->ifp;
1409
1410        if (mtx_initialized(&fp->tx_mtx)) {
1411
1412                if (fp->tx_br != NULL) {
1413
1414                        mtx_lock(&fp->tx_mtx);
1415
1416                        while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
1417                                m_freem(mp);
1418                        }
1419
1420                        mtx_unlock(&fp->tx_mtx);
1421
1422                        buf_ring_free(fp->tx_br, M_DEVBUF);
1423                        fp->tx_br = NULL;
1424                }
1425                mtx_destroy(&fp->tx_mtx);
1426        }
1427        return;
1428}
1429
1430static void
1431qla_fp_taskqueue(void *context, int pending)
1432{
1433        qla_tx_fp_t *fp;
1434        qla_host_t *ha;
1435        struct ifnet *ifp;
1436        struct mbuf  *mp = NULL;
1437        int ret = 0;
1438	uint32_t txr_idx;
1439	uint32_t iscsi_pdu = 0;
1440	uint32_t rx_pkts_left = -1;
1441
1442        fp = context;
1443
1444        if (fp == NULL)
1445                return;
1446
1447        ha = (qla_host_t *)fp->ha;
1448
1449        ifp = ha->ifp;
1450
1451	txr_idx = fp->txr_idx;
1452
1453        mtx_lock(&fp->tx_mtx);
1454
1455        if (!(ifp->if_drv_flags & IFF_DRV_RUNNING) || (!ha->hw.link_up)) {
1456                mtx_unlock(&fp->tx_mtx);
1457                goto qla_fp_taskqueue_exit;
1458        }
1459
1460	while (rx_pkts_left && !ha->stop_rcv &&
1461		(ifp->if_drv_flags & IFF_DRV_RUNNING) && ha->hw.link_up) {
1462		rx_pkts_left = ql_rcv_isr(ha, fp->txr_idx, 64);
1463
1464#ifdef QL_ENABLE_ISCSI_TLV
1465		ql_hw_tx_done_locked(ha, fp->txr_idx);
1466		ql_hw_tx_done_locked(ha, (fp->txr_idx + (ha->hw.num_tx_rings >> 1)));
1467#else
1468		ql_hw_tx_done_locked(ha, fp->txr_idx);
1469#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
1470
1471		mp = drbr_peek(ifp, fp->tx_br);
1472
1473        	while (mp != NULL) {
1474
1475			if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) {
1476#ifdef QL_ENABLE_ISCSI_TLV
1477				if (ql_iscsi_pdu(ha, mp) == 0) {
1478					txr_idx = txr_idx +
1479						(ha->hw.num_tx_rings >> 1);
1480					iscsi_pdu = 1;
1481				} else {
1482					iscsi_pdu = 0;
1483					txr_idx = fp->txr_idx;
1484				}
1485#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
1486			}
1487
1488			ret = qla_send(ha, &mp, txr_idx, iscsi_pdu);
1489
1490			if (ret) {
1491				if (mp != NULL)
1492					drbr_putback(ifp, fp->tx_br, mp);
1493				else {
1494					drbr_advance(ifp, fp->tx_br);
1495				}
1496
1497				mtx_unlock(&fp->tx_mtx);
1498
1499				goto qla_fp_taskqueue_exit0;
1500			} else {
1501				drbr_advance(ifp, fp->tx_br);
1502			}
1503
1504			/* Send a copy of the frame to the BPF listener */
1505			ETHER_BPF_MTAP(ifp, mp);
1506
1507			if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ||
1508				(!ha->hw.link_up))
1509				break;
1510
1511			mp = drbr_peek(ifp, fp->tx_br);
1512		}
1513	}
1514        mtx_unlock(&fp->tx_mtx);
1515
1516	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1517		goto qla_fp_taskqueue_exit;
1518
1519qla_fp_taskqueue_exit0:
1520
1521	if (rx_pkts_left || ((mp != NULL) && ret)) {
1522		taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
1523	} else {
1524		if (!ha->stop_rcv) {
1525			QL_ENABLE_INTERRUPTS(ha, fp->txr_idx);
1526		}
1527	}
1528
1529qla_fp_taskqueue_exit:
1530
1531        QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
1532        return;
1533}
1534
1535static int
1536qla_create_fp_taskqueues(qla_host_t *ha)
1537{
1538        int     i;
1539        uint8_t tq_name[32];
1540
1541        for (i = 0; i < ha->hw.num_sds_rings; i++) {
1542
1543                qla_tx_fp_t *fp = &ha->tx_fp[i];
1544
1545                bzero(tq_name, sizeof (tq_name));
1546                snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i);
1547
1548                TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp);
1549
1550                fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT,
1551                                        taskqueue_thread_enqueue,
1552                                        &fp->fp_taskqueue);
1553
1554                if (fp->fp_taskqueue == NULL)
1555                        return (-1);
1556
1557                taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
1558                        tq_name);
1559
1560                QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__,
1561                        fp->fp_taskqueue));
1562        }
1563
1564        return (0);
1565}
1566
1567static void
1568qla_destroy_fp_taskqueues(qla_host_t *ha)
1569{
1570        int     i;
1571
1572        for (i = 0; i < ha->hw.num_sds_rings; i++) {
1573
1574                qla_tx_fp_t *fp = &ha->tx_fp[i];
1575
1576                if (fp->fp_taskqueue != NULL) {
1577                        taskqueue_drain_all(fp->fp_taskqueue);
1578                        taskqueue_free(fp->fp_taskqueue);
1579                        fp->fp_taskqueue = NULL;
1580                }
1581        }
1582        return;
1583}
1584
1585static void
1586qla_drain_fp_taskqueues(qla_host_t *ha)
1587{
1588        int     i;
1589
1590        for (i = 0; i < ha->hw.num_sds_rings; i++) {
1591                qla_tx_fp_t *fp = &ha->tx_fp[i];
1592
1593                if (fp->fp_taskqueue != NULL) {
1594                        taskqueue_drain_all(fp->fp_taskqueue);
1595                }
1596        }
1597        return;
1598}
1599
1600static int
1601qla_transmit(struct ifnet *ifp, struct mbuf  *mp)
1602{
1603	qla_host_t *ha = (qla_host_t *)ifp->if_softc;
1604        qla_tx_fp_t *fp;
1605        int rss_id = 0;
1606        int ret = 0;
1607
1608        QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1609
1610#if __FreeBSD_version >= 1100000
1611        if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE)
1612#else
1613        if (mp->m_flags & M_FLOWID)
1614#endif
1615                rss_id = (mp->m_pkthdr.flowid & Q8_RSS_IND_TBL_MAX_IDX) %
1616                                        ha->hw.num_sds_rings;
1617        fp = &ha->tx_fp[rss_id];
1618
1619        if (fp->tx_br == NULL) {
1620                ret = EINVAL;
1621                goto qla_transmit_exit;
1622        }
1623
1624        if (mp != NULL) {
1625                ret = drbr_enqueue(ifp, fp->tx_br, mp);
1626        }
1627
1628        if (fp->fp_taskqueue != NULL)
1629                taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
1630
1631        ret = 0;
1632
1633qla_transmit_exit:
1634
1635        QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret));
1636        return ret;
1637}
1638
1639static void
1640qla_qflush(struct ifnet *ifp)
1641{
1642        int                     i;
1643        qla_tx_fp_t		*fp;
1644        struct mbuf             *mp;
1645        qla_host_t              *ha;
1646
1647        ha = (qla_host_t *)ifp->if_softc;
1648
1649        QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1650
1651        for (i = 0; i < ha->hw.num_sds_rings; i++) {
1652
1653                fp = &ha->tx_fp[i];
1654
1655                if (fp == NULL)
1656                        continue;
1657
1658                if (fp->tx_br) {
1659                        mtx_lock(&fp->tx_mtx);
1660
1661                        while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
1662                                m_freem(mp);
1663                        }
1664                        mtx_unlock(&fp->tx_mtx);
1665                }
1666        }
1667        QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1668
1669        return;
1670}
1671
1672static void
1673qla_stop(qla_host_t *ha)
1674{
1675	struct ifnet *ifp = ha->ifp;
1676	device_t	dev;
1677	int i = 0;
1678
1679	ql_sp_log(ha, 13, 0, 0, 0, 0, 0, 0);
1680
1681	dev = ha->pci_dev;
1682
1683	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1684	ha->qla_watchdog_pause = 1;
1685
1686        for (i = 0; i < ha->hw.num_sds_rings; i++) {
1687        	qla_tx_fp_t *fp;
1688
1689		fp = &ha->tx_fp[i];
1690
1691                if (fp == NULL)
1692                        continue;
1693
1694		if (fp->tx_br != NULL) {
1695                        mtx_lock(&fp->tx_mtx);
1696                        mtx_unlock(&fp->tx_mtx);
1697		}
1698	}
1699
1700	while (!ha->qla_watchdog_paused)
1701		qla_mdelay(__func__, 1);
1702
1703	ha->qla_interface_up = 0;
1704
1705	qla_drain_fp_taskqueues(ha);
1706
1707	ql_del_hw_if(ha);
1708
1709	qla_free_xmt_bufs(ha);
1710	qla_free_rcv_bufs(ha);
1711
1712	return;
1713}
1714
1715/*
1716 * Buffer Management Functions for Transmit and Receive Rings
1717 */
1718static int
1719qla_alloc_xmt_bufs(qla_host_t *ha)
1720{
1721	int ret = 0;
1722	uint32_t i, j;
1723	qla_tx_buf_t *txb;
1724
1725	if (bus_dma_tag_create(NULL,    /* parent */
1726		1, 0,    /* alignment, bounds */
1727		BUS_SPACE_MAXADDR,       /* lowaddr */
1728		BUS_SPACE_MAXADDR,       /* highaddr */
1729		NULL, NULL,      /* filter, filterarg */
1730		QLA_MAX_TSO_FRAME_SIZE,     /* maxsize */
1731		QLA_MAX_SEGMENTS,        /* nsegments */
1732		PAGE_SIZE,        /* maxsegsize */
1733		BUS_DMA_ALLOCNOW,        /* flags */
1734		NULL,    /* lockfunc */
1735		NULL,    /* lockfuncarg */
1736		&ha->tx_tag)) {
1737		device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n",
1738			__func__);
1739		return (ENOMEM);
1740	}
1741
1742	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1743		bzero((void *)ha->tx_ring[i].tx_buf,
1744			(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1745	}
1746
1747	for (j = 0; j < ha->hw.num_tx_rings; j++) {
1748		for (i = 0; i < NUM_TX_DESCRIPTORS; i++) {
1749
1750			txb = &ha->tx_ring[j].tx_buf[i];
1751
1752			if ((ret = bus_dmamap_create(ha->tx_tag,
1753					BUS_DMA_NOWAIT, &txb->map))) {
1754
1755				ha->err_tx_dmamap_create++;
1756				device_printf(ha->pci_dev,
1757					"%s: bus_dmamap_create failed[%d]\n",
1758					__func__, ret);
1759
1760				qla_free_xmt_bufs(ha);
1761
1762				return (ret);
1763			}
1764		}
1765	}
1766
1767	return 0;
1768}
1769
1770/*
1771 * Release mbuf after it sent on the wire
1772 */
1773static void
1774qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1775{
1776	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1777
1778	if (txb->m_head) {
1779		bus_dmamap_sync(ha->tx_tag, txb->map,
1780			BUS_DMASYNC_POSTWRITE);
1781
1782		bus_dmamap_unload(ha->tx_tag, txb->map);
1783
1784		m_freem(txb->m_head);
1785		txb->m_head = NULL;
1786
1787		bus_dmamap_destroy(ha->tx_tag, txb->map);
1788		txb->map = NULL;
1789	}
1790
1791	if (txb->map) {
1792		bus_dmamap_unload(ha->tx_tag, txb->map);
1793		bus_dmamap_destroy(ha->tx_tag, txb->map);
1794		txb->map = NULL;
1795	}
1796
1797	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__));
1798}
1799
1800static void
1801qla_free_xmt_bufs(qla_host_t *ha)
1802{
1803	int		i, j;
1804
1805	for (j = 0; j < ha->hw.num_tx_rings; j++) {
1806		for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1807			qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
1808	}
1809
1810	if (ha->tx_tag != NULL) {
1811		bus_dma_tag_destroy(ha->tx_tag);
1812		ha->tx_tag = NULL;
1813	}
1814
1815	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1816		bzero((void *)ha->tx_ring[i].tx_buf,
1817			(sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS));
1818	}
1819	return;
1820}
1821
1822
1823static int
1824qla_alloc_rcv_std(qla_host_t *ha)
1825{
1826	int		i, j, k, r, ret = 0;
1827	qla_rx_buf_t	*rxb;
1828	qla_rx_ring_t	*rx_ring;
1829
1830	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1831
1832		rx_ring = &ha->rx_ring[r];
1833
1834		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1835
1836			rxb = &rx_ring->rx_buf[i];
1837
1838			ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT,
1839					&rxb->map);
1840
1841			if (ret) {
1842				device_printf(ha->pci_dev,
1843					"%s: dmamap[%d, %d] failed\n",
1844					__func__, r, i);
1845
1846				for (k = 0; k < r; k++) {
1847					for (j = 0; j < NUM_RX_DESCRIPTORS;
1848						j++) {
1849						rxb = &ha->rx_ring[k].rx_buf[j];
1850						bus_dmamap_destroy(ha->rx_tag,
1851							rxb->map);
1852					}
1853				}
1854
1855				for (j = 0; j < i; j++) {
1856					bus_dmamap_destroy(ha->rx_tag,
1857						rx_ring->rx_buf[j].map);
1858				}
1859				goto qla_alloc_rcv_std_err;
1860			}
1861		}
1862	}
1863
1864	qla_init_hw_rcv_descriptors(ha);
1865
1866
1867	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1868
1869		rx_ring = &ha->rx_ring[r];
1870
1871		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1872			rxb = &rx_ring->rx_buf[i];
1873			rxb->handle = i;
1874			if (!(ret = ql_get_mbuf(ha, rxb, NULL))) {
1875				/*
1876			 	 * set the physical address in the
1877				 * corresponding descriptor entry in the
1878				 * receive ring/queue for the hba
1879				 */
1880				qla_set_hw_rcv_desc(ha, r, i, rxb->handle,
1881					rxb->paddr,
1882					(rxb->m_head)->m_pkthdr.len);
1883			} else {
1884				device_printf(ha->pci_dev,
1885					"%s: ql_get_mbuf [%d, %d] failed\n",
1886					__func__, r, i);
1887				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1888				goto qla_alloc_rcv_std_err;
1889			}
1890		}
1891	}
1892	return 0;
1893
1894qla_alloc_rcv_std_err:
1895	return (-1);
1896}
1897
1898static void
1899qla_free_rcv_std(qla_host_t *ha)
1900{
1901	int		i, r;
1902	qla_rx_buf_t	*rxb;
1903
1904	for (r = 0; r < ha->hw.num_rds_rings; r++) {
1905		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1906			rxb = &ha->rx_ring[r].rx_buf[i];
1907			if (rxb->m_head != NULL) {
1908				bus_dmamap_unload(ha->rx_tag, rxb->map);
1909				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1910				m_freem(rxb->m_head);
1911				rxb->m_head = NULL;
1912			}
1913		}
1914	}
1915	return;
1916}
1917
1918static int
1919qla_alloc_rcv_bufs(qla_host_t *ha)
1920{
1921	int		i, ret = 0;
1922
1923	if (bus_dma_tag_create(NULL,    /* parent */
1924			1, 0,    /* alignment, bounds */
1925			BUS_SPACE_MAXADDR,       /* lowaddr */
1926			BUS_SPACE_MAXADDR,       /* highaddr */
1927			NULL, NULL,      /* filter, filterarg */
1928			MJUM9BYTES,     /* maxsize */
1929			1,        /* nsegments */
1930			MJUM9BYTES,        /* maxsegsize */
1931			BUS_DMA_ALLOCNOW,        /* flags */
1932			NULL,    /* lockfunc */
1933			NULL,    /* lockfuncarg */
1934			&ha->rx_tag)) {
1935
1936		device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n",
1937			__func__);
1938
1939		return (ENOMEM);
1940	}
1941
1942	bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1943
1944	for (i = 0; i < ha->hw.num_sds_rings; i++) {
1945		ha->hw.sds[i].sdsr_next = 0;
1946		ha->hw.sds[i].rxb_free = NULL;
1947		ha->hw.sds[i].rx_free = 0;
1948	}
1949
1950	ret = qla_alloc_rcv_std(ha);
1951
1952	return (ret);
1953}
1954
1955static void
1956qla_free_rcv_bufs(qla_host_t *ha)
1957{
1958	int		i;
1959
1960	qla_free_rcv_std(ha);
1961
1962	if (ha->rx_tag != NULL) {
1963		bus_dma_tag_destroy(ha->rx_tag);
1964		ha->rx_tag = NULL;
1965	}
1966
1967	bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS));
1968
1969	for (i = 0; i < ha->hw.num_sds_rings; i++) {
1970		ha->hw.sds[i].sdsr_next = 0;
1971		ha->hw.sds[i].rxb_free = NULL;
1972		ha->hw.sds[i].rx_free = 0;
1973	}
1974
1975	return;
1976}
1977
1978int
1979ql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
1980{
1981	struct mbuf *mp = nmp;
1982	struct ifnet   		*ifp;
1983	int            		ret = 0;
1984	uint32_t		offset;
1985	bus_dma_segment_t	segs[1];
1986	int			nsegs, mbuf_size;
1987
1988	QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__));
1989
1990	ifp = ha->ifp;
1991
1992        if (ha->hw.enable_9kb)
1993                mbuf_size = MJUM9BYTES;
1994        else
1995                mbuf_size = MCLBYTES;
1996
1997	if (mp == NULL) {
1998
1999		if (QL_ERR_INJECT(ha, INJCT_M_GETCL_M_GETJCL_FAILURE))
2000			return(-1);
2001
2002                if (ha->hw.enable_9kb)
2003                        mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, mbuf_size);
2004                else
2005                        mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2006
2007		if (mp == NULL) {
2008			ha->err_m_getcl++;
2009			ret = ENOBUFS;
2010			device_printf(ha->pci_dev,
2011					"%s: m_getcl failed\n", __func__);
2012			goto exit_ql_get_mbuf;
2013		}
2014		mp->m_len = mp->m_pkthdr.len = mbuf_size;
2015	} else {
2016		mp->m_len = mp->m_pkthdr.len = mbuf_size;
2017		mp->m_data = mp->m_ext.ext_buf;
2018		mp->m_next = NULL;
2019	}
2020
2021	offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
2022	if (offset) {
2023		offset = 8 - offset;
2024		m_adj(mp, offset);
2025	}
2026
2027	/*
2028	 * Using memory from the mbuf cluster pool, invoke the bus_dma
2029	 * machinery to arrange the memory mapping.
2030	 */
2031	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map,
2032			mp, segs, &nsegs, BUS_DMA_NOWAIT);
2033	rxb->paddr = segs[0].ds_addr;
2034
2035	if (ret || !rxb->paddr || (nsegs != 1)) {
2036		m_free(mp);
2037		rxb->m_head = NULL;
2038		device_printf(ha->pci_dev,
2039			"%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
2040			__func__, ret, (long long unsigned int)rxb->paddr,
2041			nsegs);
2042                ret = -1;
2043		goto exit_ql_get_mbuf;
2044	}
2045	rxb->m_head = mp;
2046	bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
2047
2048exit_ql_get_mbuf:
2049	QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
2050	return (ret);
2051}
2052
2053
2054static void
2055qla_get_peer(qla_host_t *ha)
2056{
2057	device_t *peers;
2058	int count, i, slot;
2059	int my_slot = pci_get_slot(ha->pci_dev);
2060
2061	if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count))
2062		return;
2063
2064	for (i = 0; i < count; i++) {
2065		slot = pci_get_slot(peers[i]);
2066
2067		if ((slot >= 0) && (slot == my_slot) &&
2068			(pci_get_device(peers[i]) ==
2069				pci_get_device(ha->pci_dev))) {
2070			if (ha->pci_dev != peers[i])
2071				ha->peer_dev = peers[i];
2072		}
2073	}
2074}
2075
2076static void
2077qla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer)
2078{
2079	qla_host_t *ha_peer;
2080
2081	if (ha->peer_dev) {
2082        	if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) {
2083
2084			ha_peer->msg_from_peer = msg_to_peer;
2085		}
2086	}
2087}
2088
2089void
2090qla_set_error_recovery(qla_host_t *ha)
2091{
2092	struct ifnet *ifp = ha->ifp;
2093
2094	if (!cold && ha->enable_error_recovery) {
2095		if (ifp)
2096			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2097		ha->qla_initiate_recovery = 1;
2098	} else
2099		ha->offline = 1;
2100	return;
2101}
2102
2103static void
2104qla_error_recovery(void *context, int pending)
2105{
2106	qla_host_t *ha = context;
2107	uint32_t msecs_100 = 400;
2108	struct ifnet *ifp = ha->ifp;
2109	int i = 0;
2110
2111	device_printf(ha->pci_dev, "%s: enter\n", __func__);
2112	ha->hw.imd_compl = 1;
2113
2114	taskqueue_drain_all(ha->stats_tq);
2115	taskqueue_drain_all(ha->async_event_tq);
2116
2117	if (QLA_LOCK(ha, __func__, -1, 0) != 0)
2118		return;
2119
2120	device_printf(ha->pci_dev, "%s: ts_usecs = %ld start\n",
2121		__func__, qla_get_usec_timestamp());
2122
2123	if (ha->qla_interface_up) {
2124
2125		qla_mdelay(__func__, 300);
2126
2127	        //ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2128
2129		for (i = 0; i < ha->hw.num_sds_rings; i++) {
2130	        	qla_tx_fp_t *fp;
2131
2132			fp = &ha->tx_fp[i];
2133
2134			if (fp == NULL)
2135				continue;
2136
2137			if (fp->tx_br != NULL) {
2138				mtx_lock(&fp->tx_mtx);
2139				mtx_unlock(&fp->tx_mtx);
2140			}
2141		}
2142	}
2143
2144	qla_drain_fp_taskqueues(ha);
2145
2146	if ((ha->pci_func & 0x1) == 0) {
2147
2148		if (!ha->msg_from_peer) {
2149			qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
2150
2151			while ((ha->msg_from_peer != QL_PEER_MSG_ACK) &&
2152				msecs_100--)
2153				qla_mdelay(__func__, 100);
2154		}
2155
2156		ha->msg_from_peer = 0;
2157
2158		if (ha->enable_minidump)
2159			ql_minidump(ha);
2160
2161		if (ha->enable_driverstate_dump)
2162			ql_capture_drvr_state(ha);
2163
2164		if (ql_init_hw(ha)) {
2165			device_printf(ha->pci_dev,
2166				"%s: ts_usecs = %ld exit: ql_init_hw failed\n",
2167				__func__, qla_get_usec_timestamp());
2168			ha->offline = 1;
2169			goto qla_error_recovery_exit;
2170		}
2171
2172		if (ha->qla_interface_up) {
2173			qla_free_xmt_bufs(ha);
2174			qla_free_rcv_bufs(ha);
2175		}
2176
2177		if (!QL_ERR_INJECT(ha, INJCT_PEER_PORT_FAILURE_ERR_RECOVERY))
2178			qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
2179
2180	} else {
2181		if (ha->msg_from_peer == QL_PEER_MSG_RESET) {
2182
2183			ha->msg_from_peer = 0;
2184
2185			if (!QL_ERR_INJECT(ha, INJCT_PEER_PORT_FAILURE_ERR_RECOVERY))
2186				qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK);
2187		} else {
2188			qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET);
2189		}
2190
2191		while ((ha->msg_from_peer != QL_PEER_MSG_ACK)  && msecs_100--)
2192			qla_mdelay(__func__, 100);
2193		ha->msg_from_peer = 0;
2194
2195		if (ha->enable_driverstate_dump)
2196			ql_capture_drvr_state(ha);
2197
2198		if (msecs_100 == 0) {
2199			device_printf(ha->pci_dev,
2200				"%s: ts_usecs = %ld exit: QL_PEER_MSG_ACK not received\n",
2201				__func__, qla_get_usec_timestamp());
2202			ha->offline = 1;
2203			goto qla_error_recovery_exit;
2204		}
2205
2206		if (ql_init_hw(ha)) {
2207			device_printf(ha->pci_dev,
2208				"%s: ts_usecs = %ld exit: ql_init_hw failed\n",
2209				__func__, qla_get_usec_timestamp());
2210			ha->offline = 1;
2211			goto qla_error_recovery_exit;
2212		}
2213
2214		if (ha->qla_interface_up) {
2215			qla_free_xmt_bufs(ha);
2216			qla_free_rcv_bufs(ha);
2217		}
2218	}
2219
2220	qla_mdelay(__func__, ha->ms_delay_after_init);
2221
2222	*((uint32_t *)&ha->hw.flags) = 0;
2223	ha->qla_initiate_recovery = 0;
2224
2225	if (ha->qla_interface_up) {
2226
2227		if (qla_alloc_xmt_bufs(ha) != 0) {
2228			ha->offline = 1;
2229			goto qla_error_recovery_exit;
2230		}
2231
2232		qla_confirm_9kb_enable(ha);
2233
2234		if (qla_alloc_rcv_bufs(ha) != 0) {
2235			ha->offline = 1;
2236			goto qla_error_recovery_exit;
2237		}
2238
2239		ha->stop_rcv = 0;
2240
2241		if (ql_init_hw_if(ha) == 0) {
2242			ifp = ha->ifp;
2243			ifp->if_drv_flags |= IFF_DRV_RUNNING;
2244			ha->qla_watchdog_pause = 0;
2245			ql_update_link_state(ha);
2246		} else {
2247			ha->offline = 1;
2248
2249			if (ha->hw.sp_log_stop_events &
2250				Q8_SP_LOG_STOP_IF_START_FAILURE)
2251				ha->hw.sp_log_stop = -1;
2252		}
2253	} else {
2254		ha->qla_watchdog_pause = 0;
2255	}
2256
2257qla_error_recovery_exit:
2258
2259	if (ha->offline ) {
2260		device_printf(ha->pci_dev, "%s: ts_usecs = %ld port offline\n",
2261			__func__, qla_get_usec_timestamp());
2262		if (ha->hw.sp_log_stop_events &
2263			Q8_SP_LOG_STOP_ERR_RECOVERY_FAILURE)
2264			ha->hw.sp_log_stop = -1;
2265	}
2266
2267
2268        QLA_UNLOCK(ha, __func__);
2269
2270	if (!ha->offline)
2271		callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
2272			qla_watchdog, ha);
2273
2274	device_printf(ha->pci_dev,
2275		"%s: ts_usecs = %ld exit\n",
2276		__func__, qla_get_usec_timestamp());
2277	return;
2278}
2279
2280static void
2281qla_async_event(void *context, int pending)
2282{
2283        qla_host_t *ha = context;
2284
2285	if (QLA_LOCK(ha, __func__, -1, 0) != 0)
2286		return;
2287
2288	if (ha->async_event) {
2289		ha->async_event = 0;
2290        	qla_hw_async_event(ha);
2291	}
2292
2293	QLA_UNLOCK(ha, __func__);
2294
2295	return;
2296}
2297
2298static void
2299qla_stats(void *context, int pending)
2300{
2301        qla_host_t *ha;
2302
2303        ha = context;
2304
2305	ql_get_stats(ha);
2306
2307	return;
2308}
2309
2310