1/*
2 * Copyright (c) 2013-2014 Qlogic Corporation
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 */
27
28/*
29 * File: qls_os.c
30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/11/sys/dev/qlxge/qls_os.c 331722 2018-03-29 02:50:57Z eadler $");
35
36
37#include "qls_os.h"
38#include "qls_hw.h"
39#include "qls_def.h"
40#include "qls_inline.h"
41#include "qls_ver.h"
42#include "qls_glbl.h"
43#include "qls_dbg.h"
44#include <sys/smp.h>
45
46/*
47 * Some PCI Configuration Space Related Defines
48 */
49
50#ifndef PCI_VENDOR_QLOGIC
51#define PCI_VENDOR_QLOGIC	0x1077
52#endif
53
54#ifndef PCI_DEVICE_QLOGIC_8000
55#define PCI_DEVICE_QLOGIC_8000	0x8000
56#endif
57
58#define PCI_QLOGIC_DEV8000 \
59	((PCI_DEVICE_QLOGIC_8000 << 16) | PCI_VENDOR_QLOGIC)
60
61/*
62 * static functions
63 */
64static int qls_alloc_parent_dma_tag(qla_host_t *ha);
65static void qls_free_parent_dma_tag(qla_host_t *ha);
66
67static void qls_flush_xmt_bufs(qla_host_t *ha);
68
69static int qls_alloc_rcv_bufs(qla_host_t *ha);
70static void qls_free_rcv_bufs(qla_host_t *ha);
71
72static void qls_init_ifnet(device_t dev, qla_host_t *ha);
73static void qls_release(qla_host_t *ha);
74static void qls_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
75		int error);
76static void qls_stop(qla_host_t *ha);
77static int qls_send(qla_host_t *ha, struct mbuf **m_headp);
78static void qls_tx_done(void *context, int pending);
79
80static int qls_config_lro(qla_host_t *ha);
81static void qls_free_lro(qla_host_t *ha);
82
83static void qls_error_recovery(void *context, int pending);
84
85/*
86 * Hooks to the Operating Systems
87 */
88static int qls_pci_probe (device_t);
89static int qls_pci_attach (device_t);
90static int qls_pci_detach (device_t);
91
92static void qls_start(struct ifnet *ifp);
93static void qls_init(void *arg);
94static int qls_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
95static int qls_media_change(struct ifnet *ifp);
96static void qls_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
97
98static device_method_t qla_pci_methods[] = {
99	/* Device interface */
100	DEVMETHOD(device_probe, qls_pci_probe),
101	DEVMETHOD(device_attach, qls_pci_attach),
102	DEVMETHOD(device_detach, qls_pci_detach),
103	{ 0, 0 }
104};
105
106static driver_t qla_pci_driver = {
107	"ql", qla_pci_methods, sizeof (qla_host_t),
108};
109
110static devclass_t qla8000_devclass;
111
112DRIVER_MODULE(qla8000, pci, qla_pci_driver, qla8000_devclass, 0, 0);
113
114MODULE_DEPEND(qla8000, pci, 1, 1, 1);
115MODULE_DEPEND(qla8000, ether, 1, 1, 1);
116
117MALLOC_DEFINE(M_QLA8000BUF, "qla8000buf", "Buffers for qla8000 driver");
118
119static char dev_str[64];
120static char ver_str[64];
121
122/*
123 * Name:	qls_pci_probe
124 * Function:	Validate the PCI device to be a QLA80XX device
125 */
126static int
127qls_pci_probe(device_t dev)
128{
129        switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
130        case PCI_QLOGIC_DEV8000:
131		snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
132			"Qlogic ISP 8000 PCI CNA Adapter-Ethernet Function",
133			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
134			QLA_VERSION_BUILD);
135		snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d",
136			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
137			QLA_VERSION_BUILD);
138                device_set_desc(dev, dev_str);
139                break;
140        default:
141                return (ENXIO);
142        }
143
144        if (bootverbose)
145                printf("%s: %s\n ", __func__, dev_str);
146
147        return (BUS_PROBE_DEFAULT);
148}
149
150static int
151qls_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
152{
153        int err = 0, ret;
154        qla_host_t *ha;
155        uint32_t i;
156
157        err = sysctl_handle_int(oidp, &ret, 0, req);
158
159        if (err || !req->newptr)
160                return (err);
161
162        if (ret == 1) {
163
164                ha = (qla_host_t *)arg1;
165
166                for (i = 0; i < ha->num_tx_rings; i++) {
167
168                        device_printf(ha->pci_dev,
169                                "%s: tx_ring[%d].tx_frames= %p\n",
170				__func__, i,
171                                (void *)ha->tx_ring[i].tx_frames);
172
173                        device_printf(ha->pci_dev,
174                                "%s: tx_ring[%d].tx_tso_frames= %p\n",
175				__func__, i,
176                                (void *)ha->tx_ring[i].tx_tso_frames);
177
178                        device_printf(ha->pci_dev,
179                                "%s: tx_ring[%d].tx_vlan_frames= %p\n",
180				__func__, i,
181                                (void *)ha->tx_ring[i].tx_vlan_frames);
182
183                        device_printf(ha->pci_dev,
184                                "%s: tx_ring[%d].txr_free= 0x%08x\n",
185				__func__, i,
186                                ha->tx_ring[i].txr_free);
187
188                        device_printf(ha->pci_dev,
189                                "%s: tx_ring[%d].txr_next= 0x%08x\n",
190				__func__, i,
191                                ha->tx_ring[i].txr_next);
192
193                        device_printf(ha->pci_dev,
194                                "%s: tx_ring[%d].txr_done= 0x%08x\n",
195				__func__, i,
196                                ha->tx_ring[i].txr_done);
197
198                        device_printf(ha->pci_dev,
199                                "%s: tx_ring[%d].txr_cons_idx= 0x%08x\n",
200				__func__, i,
201                                *(ha->tx_ring[i].txr_cons_vaddr));
202		}
203
204                for (i = 0; i < ha->num_rx_rings; i++) {
205
206                        device_printf(ha->pci_dev,
207                                "%s: rx_ring[%d].rx_int= %p\n",
208				__func__, i,
209                                (void *)ha->rx_ring[i].rx_int);
210
211                        device_printf(ha->pci_dev,
212                                "%s: rx_ring[%d].rss_int= %p\n",
213				__func__, i,
214                                (void *)ha->rx_ring[i].rss_int);
215
216                        device_printf(ha->pci_dev,
217                                "%s: rx_ring[%d].lbq_next= 0x%08x\n",
218				__func__, i,
219                                ha->rx_ring[i].lbq_next);
220
221                        device_printf(ha->pci_dev,
222                                "%s: rx_ring[%d].lbq_free= 0x%08x\n",
223				__func__, i,
224                                ha->rx_ring[i].lbq_free);
225
226                        device_printf(ha->pci_dev,
227                                "%s: rx_ring[%d].lbq_in= 0x%08x\n",
228				__func__, i,
229                                ha->rx_ring[i].lbq_in);
230
231                        device_printf(ha->pci_dev,
232                                "%s: rx_ring[%d].sbq_next= 0x%08x\n",
233				__func__, i,
234                                ha->rx_ring[i].sbq_next);
235
236                        device_printf(ha->pci_dev,
237                                "%s: rx_ring[%d].sbq_free= 0x%08x\n",
238				__func__, i,
239                                ha->rx_ring[i].sbq_free);
240
241                        device_printf(ha->pci_dev,
242                                "%s: rx_ring[%d].sbq_in= 0x%08x\n",
243				__func__, i,
244                                ha->rx_ring[i].sbq_in);
245		}
246
247		device_printf(ha->pci_dev, "%s: err_m_getcl = 0x%08x\n",
248				__func__, ha->err_m_getcl);
249		device_printf(ha->pci_dev, "%s: err_m_getjcl = 0x%08x\n",
250				__func__, ha->err_m_getjcl);
251		device_printf(ha->pci_dev,
252				"%s: err_tx_dmamap_create = 0x%08x\n",
253				__func__, ha->err_tx_dmamap_create);
254		device_printf(ha->pci_dev,
255				"%s: err_tx_dmamap_load = 0x%08x\n",
256				__func__, ha->err_tx_dmamap_load);
257		device_printf(ha->pci_dev,
258				"%s: err_tx_defrag = 0x%08x\n",
259				__func__, ha->err_tx_defrag);
260        }
261        return (err);
262}
263
264static void
265qls_add_sysctls(qla_host_t *ha)
266{
267        device_t dev = ha->pci_dev;
268
269	SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
270		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
271		OID_AUTO, "version", CTLFLAG_RD,
272		ver_str, 0, "Driver Version");
273
274	qls_dbg_level = 0;
275        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
276                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
277                OID_AUTO, "debug", CTLFLAG_RW,
278                &qls_dbg_level, qls_dbg_level, "Debug Level");
279
280        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
281                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
282                OID_AUTO, "drvr_stats", CTLTYPE_INT | CTLFLAG_RW,
283                (void *)ha, 0,
284                qls_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics");
285
286        return;
287}
288
289static void
290qls_watchdog(void *arg)
291{
292	qla_host_t *ha = arg;
293	struct ifnet *ifp;
294
295	ifp = ha->ifp;
296
297        if (ha->flags.qla_watchdog_exit) {
298		ha->qla_watchdog_exited = 1;
299		return;
300	}
301	ha->qla_watchdog_exited = 0;
302
303	if (!ha->flags.qla_watchdog_pause) {
304
305		if (ha->qla_initiate_recovery) {
306
307			ha->qla_watchdog_paused = 1;
308			ha->qla_initiate_recovery = 0;
309			ha->err_inject = 0;
310			taskqueue_enqueue(ha->err_tq, &ha->err_task);
311
312		} else if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) {
313
314			taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
315		}
316
317		ha->qla_watchdog_paused = 0;
318	} else {
319		ha->qla_watchdog_paused = 1;
320	}
321
322	ha->watchdog_ticks = ha->watchdog_ticks++ % 1000;
323	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
324		qls_watchdog, ha);
325
326	return;
327}
328
329/*
330 * Name:	qls_pci_attach
331 * Function:	attaches the device to the operating system
332 */
333static int
334qls_pci_attach(device_t dev)
335{
336	qla_host_t *ha = NULL;
337	int i;
338
339	QL_DPRINT2((dev, "%s: enter\n", __func__));
340
341        if ((ha = device_get_softc(dev)) == NULL) {
342                device_printf(dev, "cannot get softc\n");
343                return (ENOMEM);
344        }
345
346        memset(ha, 0, sizeof (qla_host_t));
347
348        if (pci_get_device(dev) != PCI_DEVICE_QLOGIC_8000) {
349                device_printf(dev, "device is not QLE8000\n");
350                return (ENXIO);
351	}
352
353        ha->pci_func = pci_get_function(dev);
354
355        ha->pci_dev = dev;
356
357	pci_enable_busmaster(dev);
358
359	ha->reg_rid = PCIR_BAR(1);
360	ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
361				RF_ACTIVE);
362
363        if (ha->pci_reg == NULL) {
364                device_printf(dev, "unable to map any ports\n");
365                goto qls_pci_attach_err;
366        }
367
368	ha->reg_rid1 = PCIR_BAR(3);
369	ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
370			&ha->reg_rid1, RF_ACTIVE);
371
372        if (ha->pci_reg1 == NULL) {
373                device_printf(dev, "unable to map any ports\n");
374                goto qls_pci_attach_err;
375        }
376
377	mtx_init(&ha->hw_lock, "qla80xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
378	mtx_init(&ha->tx_lock, "qla80xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
379
380	qls_add_sysctls(ha);
381	qls_hw_add_sysctls(ha);
382
383	ha->flags.lock_init = 1;
384
385	ha->msix_count = pci_msix_count(dev);
386
387	if (ha->msix_count < qls_get_msix_count(ha)) {
388		device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
389			ha->msix_count);
390		goto qls_pci_attach_err;
391	}
392
393	ha->msix_count = qls_get_msix_count(ha);
394
395	device_printf(dev, "\n%s: ha %p pci_func 0x%x  msix_count 0x%x"
396		" pci_reg %p pci_reg1 %p\n", __func__, ha,
397		ha->pci_func, ha->msix_count, ha->pci_reg, ha->pci_reg1);
398
399	if (pci_alloc_msix(dev, &ha->msix_count)) {
400		device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
401			ha->msix_count);
402		ha->msix_count = 0;
403		goto qls_pci_attach_err;
404	}
405
406        for (i = 0; i < ha->num_rx_rings; i++) {
407                ha->irq_vec[i].cq_idx = i;
408                ha->irq_vec[i].ha = ha;
409                ha->irq_vec[i].irq_rid = 1 + i;
410
411                ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
412                                &ha->irq_vec[i].irq_rid,
413                                (RF_ACTIVE | RF_SHAREABLE));
414
415                if (ha->irq_vec[i].irq == NULL) {
416                        device_printf(dev, "could not allocate interrupt\n");
417                        goto qls_pci_attach_err;
418                }
419
420		if (bus_setup_intr(dev, ha->irq_vec[i].irq,
421			(INTR_TYPE_NET | INTR_MPSAFE), NULL, qls_isr,
422			&ha->irq_vec[i], &ha->irq_vec[i].handle)) {
423				device_printf(dev,
424					"could not setup interrupt\n");
425			goto qls_pci_attach_err;
426		}
427        }
428
429	qls_rd_nic_params(ha);
430
431	/* allocate parent dma tag */
432	if (qls_alloc_parent_dma_tag(ha)) {
433		device_printf(dev, "%s: qls_alloc_parent_dma_tag failed\n",
434			__func__);
435		goto qls_pci_attach_err;
436	}
437
438	/* alloc all dma buffers */
439	if (qls_alloc_dma(ha)) {
440		device_printf(dev, "%s: qls_alloc_dma failed\n", __func__);
441		goto qls_pci_attach_err;
442	}
443
444	/* create the o.s ethernet interface */
445	qls_init_ifnet(dev, ha);
446
447	ha->flags.qla_watchdog_active = 1;
448	ha->flags.qla_watchdog_pause = 1;
449
450	TASK_INIT(&ha->tx_task, 0, qls_tx_done, ha);
451	ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT,
452			taskqueue_thread_enqueue, &ha->tx_tq);
453	taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq",
454		device_get_nameunit(ha->pci_dev));
455
456	callout_init(&ha->tx_callout, 1);
457	ha->flags.qla_callout_init = 1;
458
459        /* create ioctl device interface */
460        if (qls_make_cdev(ha)) {
461                device_printf(dev, "%s: qls_make_cdev failed\n", __func__);
462                goto qls_pci_attach_err;
463        }
464
465	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
466		qls_watchdog, ha);
467
468        TASK_INIT(&ha->err_task, 0, qls_error_recovery, ha);
469        ha->err_tq = taskqueue_create_fast("qla_errq", M_NOWAIT,
470                        taskqueue_thread_enqueue, &ha->err_tq);
471        taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq",
472                device_get_nameunit(ha->pci_dev));
473
474	QL_DPRINT2((dev, "%s: exit 0\n", __func__));
475        return (0);
476
477qls_pci_attach_err:
478
479	qls_release(ha);
480
481	QL_DPRINT2((dev, "%s: exit ENXIO\n", __func__));
482        return (ENXIO);
483}
484
485/*
486 * Name:	qls_pci_detach
487 * Function:	Unhooks the device from the operating system
488 */
489static int
490qls_pci_detach(device_t dev)
491{
492	qla_host_t *ha = NULL;
493	struct ifnet *ifp;
494
495	QL_DPRINT2((dev, "%s: enter\n", __func__));
496
497        if ((ha = device_get_softc(dev)) == NULL) {
498                device_printf(dev, "cannot get softc\n");
499                return (ENOMEM);
500        }
501
502	ifp = ha->ifp;
503
504	(void)QLA_LOCK(ha, __func__, 0);
505	qls_stop(ha);
506	QLA_UNLOCK(ha, __func__);
507
508	qls_release(ha);
509
510	QL_DPRINT2((dev, "%s: exit\n", __func__));
511
512        return (0);
513}
514
515/*
516 * Name:	qls_release
517 * Function:	Releases the resources allocated for the device
518 */
519static void
520qls_release(qla_host_t *ha)
521{
522	device_t dev;
523	int i;
524
525	dev = ha->pci_dev;
526
527	if (ha->err_tq) {
528		taskqueue_drain(ha->err_tq, &ha->err_task);
529		taskqueue_free(ha->err_tq);
530	}
531
532	if (ha->tx_tq) {
533		taskqueue_drain(ha->tx_tq, &ha->tx_task);
534		taskqueue_free(ha->tx_tq);
535	}
536
537	qls_del_cdev(ha);
538
539	if (ha->flags.qla_watchdog_active) {
540		ha->flags.qla_watchdog_exit = 1;
541
542		while (ha->qla_watchdog_exited == 0)
543			qls_mdelay(__func__, 1);
544	}
545
546	if (ha->flags.qla_callout_init)
547		callout_stop(&ha->tx_callout);
548
549	if (ha->ifp != NULL)
550		ether_ifdetach(ha->ifp);
551
552	qls_free_dma(ha);
553	qls_free_parent_dma_tag(ha);
554
555        for (i = 0; i < ha->num_rx_rings; i++) {
556
557                if (ha->irq_vec[i].handle) {
558                        (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
559                                        ha->irq_vec[i].handle);
560                }
561
562                if (ha->irq_vec[i].irq) {
563                        (void)bus_release_resource(dev, SYS_RES_IRQ,
564                                ha->irq_vec[i].irq_rid,
565                                ha->irq_vec[i].irq);
566                }
567        }
568
569	if (ha->msix_count)
570		pci_release_msi(dev);
571
572	if (ha->flags.lock_init) {
573		mtx_destroy(&ha->tx_lock);
574		mtx_destroy(&ha->hw_lock);
575	}
576
577        if (ha->pci_reg)
578                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
579				ha->pci_reg);
580
581        if (ha->pci_reg1)
582                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1,
583				ha->pci_reg1);
584}
585
586/*
587 * DMA Related Functions
588 */
589
590static void
591qls_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
592{
593        *((bus_addr_t *)arg) = 0;
594
595        if (error) {
596                printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
597                return;
598	}
599
600        *((bus_addr_t *)arg) = segs[0].ds_addr;
601
602	return;
603}
604
605int
606qls_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
607{
608        int             ret = 0;
609        device_t        dev;
610        bus_addr_t      b_addr;
611
612        dev = ha->pci_dev;
613
614        QL_DPRINT2((dev, "%s: enter\n", __func__));
615
616        ret = bus_dma_tag_create(
617                        ha->parent_tag,/* parent */
618                        dma_buf->alignment,
619                        ((bus_size_t)(1ULL << 32)),/* boundary */
620                        BUS_SPACE_MAXADDR,      /* lowaddr */
621                        BUS_SPACE_MAXADDR,      /* highaddr */
622                        NULL, NULL,             /* filter, filterarg */
623                        dma_buf->size,          /* maxsize */
624                        1,                      /* nsegments */
625                        dma_buf->size,          /* maxsegsize */
626                        0,                      /* flags */
627                        NULL, NULL,             /* lockfunc, lockarg */
628                        &dma_buf->dma_tag);
629
630        if (ret) {
631                device_printf(dev, "%s: could not create dma tag\n", __func__);
632                goto qls_alloc_dmabuf_exit;
633        }
634        ret = bus_dmamem_alloc(dma_buf->dma_tag,
635                        (void **)&dma_buf->dma_b,
636                        (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
637                        &dma_buf->dma_map);
638        if (ret) {
639                bus_dma_tag_destroy(dma_buf->dma_tag);
640                device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
641                goto qls_alloc_dmabuf_exit;
642        }
643
644        ret = bus_dmamap_load(dma_buf->dma_tag,
645                        dma_buf->dma_map,
646                        dma_buf->dma_b,
647                        dma_buf->size,
648                        qls_dmamap_callback,
649                        &b_addr, BUS_DMA_NOWAIT);
650
651        if (ret || !b_addr) {
652                bus_dma_tag_destroy(dma_buf->dma_tag);
653                bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
654                        dma_buf->dma_map);
655                ret = -1;
656                goto qls_alloc_dmabuf_exit;
657        }
658
659        dma_buf->dma_addr = b_addr;
660
661qls_alloc_dmabuf_exit:
662        QL_DPRINT2((dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
663                __func__, ret, (void *)dma_buf->dma_tag,
664                (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
665		dma_buf->size));
666
667        return ret;
668}
669
670void
671qls_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
672{
673        bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
674        bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
675        bus_dma_tag_destroy(dma_buf->dma_tag);
676}
677
678static int
679qls_alloc_parent_dma_tag(qla_host_t *ha)
680{
681	int		ret;
682	device_t	dev;
683
684	dev = ha->pci_dev;
685
686        /*
687         * Allocate parent DMA Tag
688         */
689        ret = bus_dma_tag_create(
690                        bus_get_dma_tag(dev),   /* parent */
691                        1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
692                        BUS_SPACE_MAXADDR,      /* lowaddr */
693                        BUS_SPACE_MAXADDR,      /* highaddr */
694                        NULL, NULL,             /* filter, filterarg */
695                        BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
696                        0,                      /* nsegments */
697                        BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
698                        0,                      /* flags */
699                        NULL, NULL,             /* lockfunc, lockarg */
700                        &ha->parent_tag);
701
702        if (ret) {
703                device_printf(dev, "%s: could not create parent dma tag\n",
704                        __func__);
705		return (-1);
706        }
707
708        ha->flags.parent_tag = 1;
709
710	return (0);
711}
712
713static void
714qls_free_parent_dma_tag(qla_host_t *ha)
715{
716        if (ha->flags.parent_tag) {
717                bus_dma_tag_destroy(ha->parent_tag);
718                ha->flags.parent_tag = 0;
719        }
720}
721
722/*
723 * Name: qls_init_ifnet
724 * Function: Creates the Network Device Interface and Registers it with the O.S
725 */
726
727static void
728qls_init_ifnet(device_t dev, qla_host_t *ha)
729{
730	struct ifnet *ifp;
731
732	QL_DPRINT2((dev, "%s: enter\n", __func__));
733
734	ifp = ha->ifp = if_alloc(IFT_ETHER);
735
736	if (ifp == NULL)
737		panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
738
739	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
740	ifp->if_baudrate = IF_Gbps(10);
741	ifp->if_init = qls_init;
742	ifp->if_softc = ha;
743	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
744	ifp->if_ioctl = qls_ioctl;
745	ifp->if_start = qls_start;
746
747	IFQ_SET_MAXLEN(&ifp->if_snd, qls_get_ifq_snd_maxlen(ha));
748	ifp->if_snd.ifq_drv_maxlen = qls_get_ifq_snd_maxlen(ha);
749	IFQ_SET_READY(&ifp->if_snd);
750
751	ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
752	if (ha->max_frame_size <= MCLBYTES) {
753		ha->msize = MCLBYTES;
754	} else if (ha->max_frame_size <= MJUMPAGESIZE) {
755		ha->msize = MJUMPAGESIZE;
756	} else
757		ha->msize = MJUM9BYTES;
758
759	ether_ifattach(ifp, qls_get_mac_addr(ha));
760
761	ifp->if_capabilities = IFCAP_JUMBO_MTU;
762
763	ifp->if_capabilities |= IFCAP_HWCSUM;
764	ifp->if_capabilities |= IFCAP_VLAN_MTU;
765
766	ifp->if_capabilities |= IFCAP_TSO4;
767	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
768	ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
769	ifp->if_capabilities |= IFCAP_LINKSTATE;
770
771	ifp->if_capenable = ifp->if_capabilities;
772
773	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
774
775	ifmedia_init(&ha->media, IFM_IMASK, qls_media_change, qls_media_status);
776
777	ifmedia_add(&ha->media, (IFM_ETHER | qls_get_optics(ha) | IFM_FDX), 0,
778		NULL);
779	ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
780
781	ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
782
783	QL_DPRINT2((dev, "%s: exit\n", __func__));
784
785	return;
786}
787
788static void
789qls_init_locked(qla_host_t *ha)
790{
791	struct ifnet *ifp = ha->ifp;
792
793	qls_stop(ha);
794
795	qls_flush_xmt_bufs(ha);
796
797	if (qls_alloc_rcv_bufs(ha) != 0)
798		return;
799
800	if (qls_config_lro(ha))
801		return;
802
803	bcopy(IF_LLADDR(ha->ifp), ha->mac_addr, ETHER_ADDR_LEN);
804
805	ifp->if_hwassist = CSUM_IP;
806	ifp->if_hwassist |= CSUM_TCP;
807	ifp->if_hwassist |= CSUM_UDP;
808	ifp->if_hwassist |= CSUM_TSO;
809
810 	if (qls_init_hw_if(ha) == 0) {
811		ifp = ha->ifp;
812		ifp->if_drv_flags |= IFF_DRV_RUNNING;
813		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
814		ha->flags.qla_watchdog_pause = 0;
815	}
816
817	return;
818}
819
820static void
821qls_init(void *arg)
822{
823	qla_host_t *ha;
824
825	ha = (qla_host_t *)arg;
826
827	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
828
829	(void)QLA_LOCK(ha, __func__, 0);
830	qls_init_locked(ha);
831	QLA_UNLOCK(ha, __func__);
832
833	QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
834}
835
836static void
837qls_set_multi(qla_host_t *ha, uint32_t add_multi)
838{
839	uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
840	struct ifmultiaddr *ifma;
841	int mcnt = 0;
842	struct ifnet *ifp = ha->ifp;
843
844	if_maddr_rlock(ifp);
845
846	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
847
848		if (ifma->ifma_addr->sa_family != AF_LINK)
849			continue;
850
851		if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
852			break;
853
854		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
855			&mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
856
857		mcnt++;
858	}
859
860	if_maddr_runlock(ifp);
861
862	if (QLA_LOCK(ha, __func__, 1) == 0) {
863		qls_hw_set_multi(ha, mta, mcnt, add_multi);
864		QLA_UNLOCK(ha, __func__);
865	}
866
867	return;
868}
869
870static int
871qls_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
872{
873	int ret = 0;
874	struct ifreq *ifr = (struct ifreq *)data;
875	struct ifaddr *ifa = (struct ifaddr *)data;
876	qla_host_t *ha;
877
878	ha = (qla_host_t *)ifp->if_softc;
879
880	switch (cmd) {
881	case SIOCSIFADDR:
882		QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
883			__func__, cmd));
884
885		if (ifa->ifa_addr->sa_family == AF_INET) {
886			ifp->if_flags |= IFF_UP;
887			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
888				(void)QLA_LOCK(ha, __func__, 0);
889				qls_init_locked(ha);
890				QLA_UNLOCK(ha, __func__);
891			}
892			QL_DPRINT4((ha->pci_dev,
893				"%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
894				__func__, cmd,
895				ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
896
897			arp_ifinit(ifp, ifa);
898		} else {
899			ether_ioctl(ifp, cmd, data);
900		}
901		break;
902
903	case SIOCSIFMTU:
904		QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
905			__func__, cmd));
906
907		if (ifr->ifr_mtu > QLA_MAX_MTU) {
908			ret = EINVAL;
909		} else {
910			(void) QLA_LOCK(ha, __func__, 0);
911
912			ifp->if_mtu = ifr->ifr_mtu;
913			ha->max_frame_size =
914				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
915
916			QLA_UNLOCK(ha, __func__);
917
918			if (ret)
919				ret = EINVAL;
920		}
921
922		break;
923
924	case SIOCSIFFLAGS:
925		QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
926			__func__, cmd));
927
928		(void)QLA_LOCK(ha, __func__, 0);
929
930		if (ifp->if_flags & IFF_UP) {
931			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
932				if ((ifp->if_flags ^ ha->if_flags) &
933					IFF_PROMISC) {
934					ret = qls_set_promisc(ha);
935				} else if ((ifp->if_flags ^ ha->if_flags) &
936					IFF_ALLMULTI) {
937					ret = qls_set_allmulti(ha);
938				}
939			} else {
940				ha->max_frame_size = ifp->if_mtu +
941					ETHER_HDR_LEN + ETHER_CRC_LEN;
942				qls_init_locked(ha);
943			}
944		} else {
945			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
946				qls_stop(ha);
947			ha->if_flags = ifp->if_flags;
948		}
949
950		QLA_UNLOCK(ha, __func__);
951		break;
952
953	case SIOCADDMULTI:
954		QL_DPRINT4((ha->pci_dev,
955			"%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
956
957		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
958			qls_set_multi(ha, 1);
959		}
960		break;
961
962	case SIOCDELMULTI:
963		QL_DPRINT4((ha->pci_dev,
964			"%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
965
966		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
967			qls_set_multi(ha, 0);
968		}
969		break;
970
971	case SIOCSIFMEDIA:
972	case SIOCGIFMEDIA:
973		QL_DPRINT4((ha->pci_dev,
974			"%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
975			__func__, cmd));
976		ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
977		break;
978
979	case SIOCSIFCAP:
980	{
981		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
982
983		QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
984			__func__, cmd));
985
986		if (mask & IFCAP_HWCSUM)
987			ifp->if_capenable ^= IFCAP_HWCSUM;
988		if (mask & IFCAP_TSO4)
989			ifp->if_capenable ^= IFCAP_TSO4;
990		if (mask & IFCAP_VLAN_HWTAGGING)
991			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
992		if (mask & IFCAP_VLAN_HWTSO)
993			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
994
995		if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
996			qls_init(ha);
997
998		VLAN_CAPABILITIES(ifp);
999		break;
1000	}
1001
1002	default:
1003		QL_DPRINT4((ha->pci_dev, "%s: default (0x%lx)\n",
1004			__func__, cmd));
1005		ret = ether_ioctl(ifp, cmd, data);
1006		break;
1007	}
1008
1009	return (ret);
1010}
1011
1012static int
1013qls_media_change(struct ifnet *ifp)
1014{
1015	qla_host_t *ha;
1016	struct ifmedia *ifm;
1017	int ret = 0;
1018
1019	ha = (qla_host_t *)ifp->if_softc;
1020
1021	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1022
1023	ifm = &ha->media;
1024
1025	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1026		ret = EINVAL;
1027
1028	QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
1029
1030	return (ret);
1031}
1032
1033static void
1034qls_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1035{
1036	qla_host_t *ha;
1037
1038	ha = (qla_host_t *)ifp->if_softc;
1039
1040	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1041
1042	ifmr->ifm_status = IFM_AVALID;
1043	ifmr->ifm_active = IFM_ETHER;
1044
1045	qls_update_link_state(ha);
1046	if (ha->link_up) {
1047		ifmr->ifm_status |= IFM_ACTIVE;
1048		ifmr->ifm_active |= (IFM_FDX | qls_get_optics(ha));
1049	}
1050
1051	QL_DPRINT2((ha->pci_dev, "%s: exit (%s)\n", __func__,\
1052		(ha->link_up ? "link_up" : "link_down")));
1053
1054	return;
1055}
1056
1057static void
1058qls_start(struct ifnet *ifp)
1059{
1060	int		i, ret = 0;
1061	struct mbuf	*m_head;
1062	qla_host_t	*ha = (qla_host_t *)ifp->if_softc;
1063
1064	QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
1065
1066	if (!mtx_trylock(&ha->tx_lock)) {
1067		QL_DPRINT8((ha->pci_dev,
1068			"%s: mtx_trylock(&ha->tx_lock) failed\n", __func__));
1069		return;
1070	}
1071
1072	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) ==
1073		IFF_DRV_RUNNING) {
1074
1075		for (i = 0; i < ha->num_tx_rings; i++) {
1076			ret |= qls_hw_tx_done(ha, i);
1077		}
1078
1079		if (ret == 0)
1080			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1081	}
1082
1083	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1084		IFF_DRV_RUNNING) {
1085		QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1086		QLA_TX_UNLOCK(ha);
1087		return;
1088	}
1089
1090	if (!ha->link_up) {
1091		qls_update_link_state(ha);
1092		if (!ha->link_up) {
1093			QL_DPRINT8((ha->pci_dev, "%s: link down\n", __func__));
1094			QLA_TX_UNLOCK(ha);
1095			return;
1096		}
1097	}
1098
1099	while (ifp->if_snd.ifq_head != NULL) {
1100
1101		IF_DEQUEUE(&ifp->if_snd, m_head);
1102
1103		if (m_head == NULL) {
1104			QL_DPRINT8((ha->pci_dev, "%s: m_head == NULL\n",
1105				__func__));
1106			break;
1107		}
1108
1109		if (qls_send(ha, &m_head)) {
1110			if (m_head == NULL)
1111				break;
1112			QL_DPRINT8((ha->pci_dev, "%s: PREPEND\n", __func__));
1113			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1114			IF_PREPEND(&ifp->if_snd, m_head);
1115			break;
1116		}
1117		/* Send a copy of the frame to the BPF listener */
1118		ETHER_BPF_MTAP(ifp, m_head);
1119	}
1120
1121	QLA_TX_UNLOCK(ha);
1122	QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
1123	return;
1124}
1125
1126static int
1127qls_send(qla_host_t *ha, struct mbuf **m_headp)
1128{
1129	bus_dma_segment_t	segs[QLA_MAX_SEGMENTS];
1130	bus_dmamap_t		map;
1131	int			nsegs;
1132	int			ret = -1;
1133	uint32_t		tx_idx;
1134	struct mbuf		*m_head = *m_headp;
1135	uint32_t		txr_idx = 0;
1136
1137	QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
1138
1139	/* check if flowid is set */
1140	if (M_HASHTYPE_GET(m_head) != M_HASHTYPE_NONE)
1141		txr_idx = m_head->m_pkthdr.flowid & (ha->num_tx_rings - 1);
1142
1143	tx_idx = ha->tx_ring[txr_idx].txr_next;
1144
1145	map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
1146
1147	ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1148			BUS_DMA_NOWAIT);
1149
1150	if (ret == EFBIG) {
1151
1152		struct mbuf *m;
1153
1154		QL_DPRINT8((ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1155			m_head->m_pkthdr.len));
1156
1157		m = m_defrag(m_head, M_NOWAIT);
1158		if (m == NULL) {
1159			ha->err_tx_defrag++;
1160			m_freem(m_head);
1161			*m_headp = NULL;
1162			device_printf(ha->pci_dev,
1163				"%s: m_defrag() = NULL [%d]\n",
1164				__func__, ret);
1165			return (ENOBUFS);
1166		}
1167		m_head = m;
1168		*m_headp = m_head;
1169
1170		if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1171					segs, &nsegs, BUS_DMA_NOWAIT))) {
1172
1173			ha->err_tx_dmamap_load++;
1174
1175			device_printf(ha->pci_dev,
1176				"%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1177				__func__, ret, m_head->m_pkthdr.len);
1178
1179			if (ret != ENOMEM) {
1180				m_freem(m_head);
1181				*m_headp = NULL;
1182			}
1183			return (ret);
1184		}
1185
1186	} else if (ret) {
1187
1188		ha->err_tx_dmamap_load++;
1189
1190		device_printf(ha->pci_dev,
1191			"%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1192			__func__, ret, m_head->m_pkthdr.len);
1193
1194		if (ret != ENOMEM) {
1195			m_freem(m_head);
1196			*m_headp = NULL;
1197		}
1198		return (ret);
1199	}
1200
1201	QL_ASSERT(ha, (nsegs != 0), ("qls_send: empty packet"));
1202
1203	bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1204
1205        if (!(ret = qls_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx))) {
1206
1207		ha->tx_ring[txr_idx].count++;
1208		ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
1209		ha->tx_ring[txr_idx].tx_buf[tx_idx].map = map;
1210	} else {
1211		if (ret == EINVAL) {
1212			if (m_head)
1213				m_freem(m_head);
1214			*m_headp = NULL;
1215		}
1216	}
1217
1218	QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
1219	return (ret);
1220}
1221
1222static void
1223qls_stop(qla_host_t *ha)
1224{
1225	struct ifnet *ifp = ha->ifp;
1226	device_t	dev;
1227
1228	dev = ha->pci_dev;
1229
1230	ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1231
1232	ha->flags.qla_watchdog_pause = 1;
1233
1234	while (!ha->qla_watchdog_paused)
1235		qls_mdelay(__func__, 1);
1236
1237	qls_del_hw_if(ha);
1238
1239	qls_free_lro(ha);
1240
1241	qls_flush_xmt_bufs(ha);
1242	qls_free_rcv_bufs(ha);
1243
1244	return;
1245}
1246
1247/*
1248 * Buffer Management Functions for Transmit and Receive Rings
1249 */
1250/*
1251 * Release mbuf after it sent on the wire
1252 */
1253static void
1254qls_flush_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1255{
1256	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1257
1258	if (txb->m_head) {
1259
1260		bus_dmamap_unload(ha->tx_tag, txb->map);
1261
1262		m_freem(txb->m_head);
1263		txb->m_head = NULL;
1264	}
1265
1266	QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
1267}
1268
1269static void
1270qls_flush_xmt_bufs(qla_host_t *ha)
1271{
1272	int		i, j;
1273
1274	for (j = 0; j < ha->num_tx_rings; j++) {
1275		for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1276			qls_flush_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
1277	}
1278
1279	return;
1280}
1281
1282
1283static int
1284qls_alloc_rcv_mbufs(qla_host_t *ha, int r)
1285{
1286	int			i, j, ret = 0;
1287	qla_rx_buf_t		*rxb;
1288	qla_rx_ring_t		*rx_ring;
1289	volatile q81_bq_addr_e_t *sbq_e;
1290
1291
1292	rx_ring = &ha->rx_ring[r];
1293
1294	for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1295
1296		rxb = &rx_ring->rx_buf[i];
1297
1298		ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map);
1299
1300		if (ret) {
1301			device_printf(ha->pci_dev,
1302				"%s: dmamap[%d, %d] failed\n", __func__, r, i);
1303
1304			for (j = 0; j < i; j++) {
1305				rxb = &rx_ring->rx_buf[j];
1306				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1307			}
1308			goto qls_alloc_rcv_mbufs_err;
1309		}
1310	}
1311
1312	rx_ring = &ha->rx_ring[r];
1313
1314	sbq_e = rx_ring->sbq_vaddr;
1315
1316	rxb = &rx_ring->rx_buf[0];
1317
1318	for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1319
1320		if (!(ret = qls_get_mbuf(ha, rxb, NULL))) {
1321
1322			/*
1323		 	 * set the physical address in the
1324			 * corresponding descriptor entry in the
1325			 * receive ring/queue for the hba
1326			 */
1327
1328			sbq_e->addr_lo = rxb->paddr & 0xFFFFFFFF;
1329			sbq_e->addr_hi = (rxb->paddr >> 32) & 0xFFFFFFFF;
1330
1331		} else {
1332			device_printf(ha->pci_dev,
1333				"%s: qls_get_mbuf [%d, %d] failed\n",
1334					__func__, r, i);
1335			bus_dmamap_destroy(ha->rx_tag, rxb->map);
1336			goto qls_alloc_rcv_mbufs_err;
1337		}
1338
1339		rxb++;
1340		sbq_e++;
1341	}
1342	return 0;
1343
1344qls_alloc_rcv_mbufs_err:
1345	return (-1);
1346}
1347
1348static void
1349qls_free_rcv_bufs(qla_host_t *ha)
1350{
1351	int		i, r;
1352	qla_rx_buf_t	*rxb;
1353	qla_rx_ring_t	*rxr;
1354
1355	for (r = 0; r < ha->num_rx_rings; r++) {
1356
1357		rxr = &ha->rx_ring[r];
1358
1359		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1360
1361			rxb = &rxr->rx_buf[i];
1362
1363			if (rxb->m_head != NULL) {
1364				bus_dmamap_unload(ha->rx_tag, rxb->map);
1365				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1366				m_freem(rxb->m_head);
1367			}
1368		}
1369		bzero(rxr->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
1370	}
1371	return;
1372}
1373
1374static int
1375qls_alloc_rcv_bufs(qla_host_t *ha)
1376{
1377	int		r, ret = 0;
1378	qla_rx_ring_t	*rxr;
1379
1380	for (r = 0; r < ha->num_rx_rings; r++) {
1381		rxr = &ha->rx_ring[r];
1382		bzero(rxr->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
1383	}
1384
1385	for (r = 0; r < ha->num_rx_rings; r++) {
1386
1387		ret = qls_alloc_rcv_mbufs(ha, r);
1388
1389		if (ret)
1390			qls_free_rcv_bufs(ha);
1391	}
1392
1393	return (ret);
1394}
1395
1396int
1397qls_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
1398{
1399	struct mbuf *mp = nmp;
1400	struct ifnet   		*ifp;
1401	int            		ret = 0;
1402	uint32_t		offset;
1403	bus_dma_segment_t	segs[1];
1404	int			nsegs;
1405
1406	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1407
1408	ifp = ha->ifp;
1409
1410	if (mp == NULL) {
1411
1412		mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ha->msize);
1413
1414		if (mp == NULL) {
1415
1416			if (ha->msize == MCLBYTES)
1417				ha->err_m_getcl++;
1418			else
1419				ha->err_m_getjcl++;
1420
1421			ret = ENOBUFS;
1422			device_printf(ha->pci_dev,
1423					"%s: m_getcl failed\n", __func__);
1424			goto exit_qls_get_mbuf;
1425		}
1426		mp->m_len = mp->m_pkthdr.len = ha->msize;
1427	} else {
1428		mp->m_len = mp->m_pkthdr.len = ha->msize;
1429		mp->m_data = mp->m_ext.ext_buf;
1430		mp->m_next = NULL;
1431	}
1432
1433	/* align the receive buffers to 8 byte boundary */
1434	offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
1435	if (offset) {
1436		offset = 8 - offset;
1437		m_adj(mp, offset);
1438	}
1439
1440	/*
1441	 * Using memory from the mbuf cluster pool, invoke the bus_dma
1442	 * machinery to arrange the memory mapping.
1443	 */
1444	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map,
1445			mp, segs, &nsegs, BUS_DMA_NOWAIT);
1446	rxb->paddr = segs[0].ds_addr;
1447
1448	if (ret || !rxb->paddr || (nsegs != 1)) {
1449		m_freem(mp);
1450		rxb->m_head = NULL;
1451		device_printf(ha->pci_dev,
1452			"%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
1453			__func__, ret, (long long unsigned int)rxb->paddr,
1454			nsegs);
1455                ret = -1;
1456		goto exit_qls_get_mbuf;
1457	}
1458	rxb->m_head = mp;
1459	bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
1460
1461exit_qls_get_mbuf:
1462	QL_DPRINT2((ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
1463	return (ret);
1464}
1465
1466static void
1467qls_tx_done(void *context, int pending)
1468{
1469	qla_host_t *ha = context;
1470	struct ifnet   *ifp;
1471
1472	ifp = ha->ifp;
1473
1474	if (!ifp)
1475		return;
1476
1477	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1478		QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1479		return;
1480	}
1481
1482	qls_start(ha->ifp);
1483	return;
1484}
1485
1486static int
1487qls_config_lro(qla_host_t *ha)
1488{
1489        int i;
1490        struct lro_ctrl *lro;
1491
1492        for (i = 0; i < ha->num_rx_rings; i++) {
1493                lro = &ha->rx_ring[i].lro;
1494                if (tcp_lro_init(lro)) {
1495                        device_printf(ha->pci_dev, "%s: tcp_lro_init failed\n",
1496                                __func__);
1497                        return (-1);
1498                }
1499                lro->ifp = ha->ifp;
1500        }
1501        ha->flags.lro_init = 1;
1502
1503        QL_DPRINT2((ha->pci_dev, "%s: LRO initialized\n", __func__));
1504        return (0);
1505}
1506
1507static void
1508qls_free_lro(qla_host_t *ha)
1509{
1510        int i;
1511        struct lro_ctrl *lro;
1512
1513        if (!ha->flags.lro_init)
1514                return;
1515
1516        for (i = 0; i < ha->num_rx_rings; i++) {
1517                lro = &ha->rx_ring[i].lro;
1518                tcp_lro_free(lro);
1519        }
1520        ha->flags.lro_init = 0;
1521}
1522
1523static void
1524qls_error_recovery(void *context, int pending)
1525{
1526        qla_host_t *ha = context;
1527
1528	qls_init(ha);
1529
1530	return;
1531}
1532
1533