qls_os.c revision 330897
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2013-2014 Qlogic Corporation
5 * All rights reserved.
6 *
7 *  Redistribution and use in source and binary forms, with or without
8 *  modification, are permitted provided that the following conditions
9 *  are met:
10 *
11 *  1. Redistributions of source code must retain the above copyright
12 *     notice, this list of conditions and the following disclaimer.
13 *  2. Redistributions in binary form must reproduce the above copyright
14 *     notice, this list of conditions and the following disclaimer in the
15 *     documentation and/or other materials provided with the distribution.
16 *
17 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 *  and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 *  POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/*
31 * File: qls_os.c
32 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: stable/11/sys/dev/qlxge/qls_os.c 330897 2018-03-14 03:19:51Z eadler $");
37
38
39#include "qls_os.h"
40#include "qls_hw.h"
41#include "qls_def.h"
42#include "qls_inline.h"
43#include "qls_ver.h"
44#include "qls_glbl.h"
45#include "qls_dbg.h"
46#include <sys/smp.h>
47
48/*
49 * Some PCI Configuration Space Related Defines
50 */
51
52#ifndef PCI_VENDOR_QLOGIC
53#define PCI_VENDOR_QLOGIC	0x1077
54#endif
55
56#ifndef PCI_DEVICE_QLOGIC_8000
57#define PCI_DEVICE_QLOGIC_8000	0x8000
58#endif
59
60#define PCI_QLOGIC_DEV8000 \
61	((PCI_DEVICE_QLOGIC_8000 << 16) | PCI_VENDOR_QLOGIC)
62
63/*
64 * static functions
65 */
66static int qls_alloc_parent_dma_tag(qla_host_t *ha);
67static void qls_free_parent_dma_tag(qla_host_t *ha);
68
69static void qls_flush_xmt_bufs(qla_host_t *ha);
70
71static int qls_alloc_rcv_bufs(qla_host_t *ha);
72static void qls_free_rcv_bufs(qla_host_t *ha);
73
74static void qls_init_ifnet(device_t dev, qla_host_t *ha);
75static void qls_release(qla_host_t *ha);
76static void qls_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs,
77		int error);
78static void qls_stop(qla_host_t *ha);
79static int qls_send(qla_host_t *ha, struct mbuf **m_headp);
80static void qls_tx_done(void *context, int pending);
81
82static int qls_config_lro(qla_host_t *ha);
83static void qls_free_lro(qla_host_t *ha);
84
85static void qls_error_recovery(void *context, int pending);
86
87/*
88 * Hooks to the Operating Systems
89 */
90static int qls_pci_probe (device_t);
91static int qls_pci_attach (device_t);
92static int qls_pci_detach (device_t);
93
94static void qls_start(struct ifnet *ifp);
95static void qls_init(void *arg);
96static int qls_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
97static int qls_media_change(struct ifnet *ifp);
98static void qls_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
99
100static device_method_t qla_pci_methods[] = {
101	/* Device interface */
102	DEVMETHOD(device_probe, qls_pci_probe),
103	DEVMETHOD(device_attach, qls_pci_attach),
104	DEVMETHOD(device_detach, qls_pci_detach),
105	{ 0, 0 }
106};
107
108static driver_t qla_pci_driver = {
109	"ql", qla_pci_methods, sizeof (qla_host_t),
110};
111
112static devclass_t qla8000_devclass;
113
114DRIVER_MODULE(qla8000, pci, qla_pci_driver, qla8000_devclass, 0, 0);
115
116MODULE_DEPEND(qla8000, pci, 1, 1, 1);
117MODULE_DEPEND(qla8000, ether, 1, 1, 1);
118
119MALLOC_DEFINE(M_QLA8000BUF, "qla8000buf", "Buffers for qla8000 driver");
120
121static char dev_str[64];
122static char ver_str[64];
123
124/*
125 * Name:	qls_pci_probe
126 * Function:	Validate the PCI device to be a QLA80XX device
127 */
128static int
129qls_pci_probe(device_t dev)
130{
131        switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
132        case PCI_QLOGIC_DEV8000:
133		snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d",
134			"Qlogic ISP 8000 PCI CNA Adapter-Ethernet Function",
135			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
136			QLA_VERSION_BUILD);
137		snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d",
138			QLA_VERSION_MAJOR, QLA_VERSION_MINOR,
139			QLA_VERSION_BUILD);
140                device_set_desc(dev, dev_str);
141                break;
142        default:
143                return (ENXIO);
144        }
145
146        if (bootverbose)
147                printf("%s: %s\n ", __func__, dev_str);
148
149        return (BUS_PROBE_DEFAULT);
150}
151
152static int
153qls_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
154{
155        int err = 0, ret;
156        qla_host_t *ha;
157        uint32_t i;
158
159        err = sysctl_handle_int(oidp, &ret, 0, req);
160
161        if (err || !req->newptr)
162                return (err);
163
164        if (ret == 1) {
165
166                ha = (qla_host_t *)arg1;
167
168                for (i = 0; i < ha->num_tx_rings; i++) {
169
170                        device_printf(ha->pci_dev,
171                                "%s: tx_ring[%d].tx_frames= %p\n",
172				__func__, i,
173                                (void *)ha->tx_ring[i].tx_frames);
174
175                        device_printf(ha->pci_dev,
176                                "%s: tx_ring[%d].tx_tso_frames= %p\n",
177				__func__, i,
178                                (void *)ha->tx_ring[i].tx_tso_frames);
179
180                        device_printf(ha->pci_dev,
181                                "%s: tx_ring[%d].tx_vlan_frames= %p\n",
182				__func__, i,
183                                (void *)ha->tx_ring[i].tx_vlan_frames);
184
185                        device_printf(ha->pci_dev,
186                                "%s: tx_ring[%d].txr_free= 0x%08x\n",
187				__func__, i,
188                                ha->tx_ring[i].txr_free);
189
190                        device_printf(ha->pci_dev,
191                                "%s: tx_ring[%d].txr_next= 0x%08x\n",
192				__func__, i,
193                                ha->tx_ring[i].txr_next);
194
195                        device_printf(ha->pci_dev,
196                                "%s: tx_ring[%d].txr_done= 0x%08x\n",
197				__func__, i,
198                                ha->tx_ring[i].txr_done);
199
200                        device_printf(ha->pci_dev,
201                                "%s: tx_ring[%d].txr_cons_idx= 0x%08x\n",
202				__func__, i,
203                                *(ha->tx_ring[i].txr_cons_vaddr));
204		}
205
206                for (i = 0; i < ha->num_rx_rings; i++) {
207
208                        device_printf(ha->pci_dev,
209                                "%s: rx_ring[%d].rx_int= %p\n",
210				__func__, i,
211                                (void *)ha->rx_ring[i].rx_int);
212
213                        device_printf(ha->pci_dev,
214                                "%s: rx_ring[%d].rss_int= %p\n",
215				__func__, i,
216                                (void *)ha->rx_ring[i].rss_int);
217
218                        device_printf(ha->pci_dev,
219                                "%s: rx_ring[%d].lbq_next= 0x%08x\n",
220				__func__, i,
221                                ha->rx_ring[i].lbq_next);
222
223                        device_printf(ha->pci_dev,
224                                "%s: rx_ring[%d].lbq_free= 0x%08x\n",
225				__func__, i,
226                                ha->rx_ring[i].lbq_free);
227
228                        device_printf(ha->pci_dev,
229                                "%s: rx_ring[%d].lbq_in= 0x%08x\n",
230				__func__, i,
231                                ha->rx_ring[i].lbq_in);
232
233                        device_printf(ha->pci_dev,
234                                "%s: rx_ring[%d].sbq_next= 0x%08x\n",
235				__func__, i,
236                                ha->rx_ring[i].sbq_next);
237
238                        device_printf(ha->pci_dev,
239                                "%s: rx_ring[%d].sbq_free= 0x%08x\n",
240				__func__, i,
241                                ha->rx_ring[i].sbq_free);
242
243                        device_printf(ha->pci_dev,
244                                "%s: rx_ring[%d].sbq_in= 0x%08x\n",
245				__func__, i,
246                                ha->rx_ring[i].sbq_in);
247		}
248
249		device_printf(ha->pci_dev, "%s: err_m_getcl = 0x%08x\n",
250				__func__, ha->err_m_getcl);
251		device_printf(ha->pci_dev, "%s: err_m_getjcl = 0x%08x\n",
252				__func__, ha->err_m_getjcl);
253		device_printf(ha->pci_dev,
254				"%s: err_tx_dmamap_create = 0x%08x\n",
255				__func__, ha->err_tx_dmamap_create);
256		device_printf(ha->pci_dev,
257				"%s: err_tx_dmamap_load = 0x%08x\n",
258				__func__, ha->err_tx_dmamap_load);
259		device_printf(ha->pci_dev,
260				"%s: err_tx_defrag = 0x%08x\n",
261				__func__, ha->err_tx_defrag);
262        }
263        return (err);
264}
265
266static void
267qls_add_sysctls(qla_host_t *ha)
268{
269        device_t dev = ha->pci_dev;
270
271	SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev),
272		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
273		OID_AUTO, "version", CTLFLAG_RD,
274		ver_str, 0, "Driver Version");
275
276	qls_dbg_level = 0;
277        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
278                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
279                OID_AUTO, "debug", CTLFLAG_RW,
280                &qls_dbg_level, qls_dbg_level, "Debug Level");
281
282        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
283                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
284                OID_AUTO, "drvr_stats", CTLTYPE_INT | CTLFLAG_RW,
285                (void *)ha, 0,
286                qls_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics");
287
288        return;
289}
290
291static void
292qls_watchdog(void *arg)
293{
294	qla_host_t *ha = arg;
295	struct ifnet *ifp;
296
297	ifp = ha->ifp;
298
299        if (ha->flags.qla_watchdog_exit) {
300		ha->qla_watchdog_exited = 1;
301		return;
302	}
303	ha->qla_watchdog_exited = 0;
304
305	if (!ha->flags.qla_watchdog_pause) {
306
307		if (ha->qla_initiate_recovery) {
308
309			ha->qla_watchdog_paused = 1;
310			ha->qla_initiate_recovery = 0;
311			ha->err_inject = 0;
312			taskqueue_enqueue(ha->err_tq, &ha->err_task);
313
314		} else if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) {
315
316			taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
317		}
318
319		ha->qla_watchdog_paused = 0;
320	} else {
321		ha->qla_watchdog_paused = 1;
322	}
323
324	ha->watchdog_ticks = ha->watchdog_ticks++ % 1000;
325	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
326		qls_watchdog, ha);
327
328	return;
329}
330
331/*
332 * Name:	qls_pci_attach
333 * Function:	attaches the device to the operating system
334 */
335static int
336qls_pci_attach(device_t dev)
337{
338	qla_host_t *ha = NULL;
339	int i;
340
341	QL_DPRINT2((dev, "%s: enter\n", __func__));
342
343        if ((ha = device_get_softc(dev)) == NULL) {
344                device_printf(dev, "cannot get softc\n");
345                return (ENOMEM);
346        }
347
348        memset(ha, 0, sizeof (qla_host_t));
349
350        if (pci_get_device(dev) != PCI_DEVICE_QLOGIC_8000) {
351                device_printf(dev, "device is not QLE8000\n");
352                return (ENXIO);
353	}
354
355        ha->pci_func = pci_get_function(dev);
356
357        ha->pci_dev = dev;
358
359	pci_enable_busmaster(dev);
360
361	ha->reg_rid = PCIR_BAR(1);
362	ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
363				RF_ACTIVE);
364
365        if (ha->pci_reg == NULL) {
366                device_printf(dev, "unable to map any ports\n");
367                goto qls_pci_attach_err;
368        }
369
370	ha->reg_rid1 = PCIR_BAR(3);
371	ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
372			&ha->reg_rid1, RF_ACTIVE);
373
374        if (ha->pci_reg1 == NULL) {
375                device_printf(dev, "unable to map any ports\n");
376                goto qls_pci_attach_err;
377        }
378
379	mtx_init(&ha->hw_lock, "qla80xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
380	mtx_init(&ha->tx_lock, "qla80xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF);
381
382	qls_add_sysctls(ha);
383	qls_hw_add_sysctls(ha);
384
385	ha->flags.lock_init = 1;
386
387	ha->msix_count = pci_msix_count(dev);
388
389	if (ha->msix_count < qls_get_msix_count(ha)) {
390		device_printf(dev, "%s: msix_count[%d] not enough\n", __func__,
391			ha->msix_count);
392		goto qls_pci_attach_err;
393	}
394
395	ha->msix_count = qls_get_msix_count(ha);
396
397	device_printf(dev, "\n%s: ha %p pci_func 0x%x  msix_count 0x%x"
398		" pci_reg %p pci_reg1 %p\n", __func__, ha,
399		ha->pci_func, ha->msix_count, ha->pci_reg, ha->pci_reg1);
400
401	if (pci_alloc_msix(dev, &ha->msix_count)) {
402		device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
403			ha->msix_count);
404		ha->msix_count = 0;
405		goto qls_pci_attach_err;
406	}
407
408        for (i = 0; i < ha->num_rx_rings; i++) {
409                ha->irq_vec[i].cq_idx = i;
410                ha->irq_vec[i].ha = ha;
411                ha->irq_vec[i].irq_rid = 1 + i;
412
413                ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
414                                &ha->irq_vec[i].irq_rid,
415                                (RF_ACTIVE | RF_SHAREABLE));
416
417                if (ha->irq_vec[i].irq == NULL) {
418                        device_printf(dev, "could not allocate interrupt\n");
419                        goto qls_pci_attach_err;
420                }
421
422		if (bus_setup_intr(dev, ha->irq_vec[i].irq,
423			(INTR_TYPE_NET | INTR_MPSAFE), NULL, qls_isr,
424			&ha->irq_vec[i], &ha->irq_vec[i].handle)) {
425				device_printf(dev,
426					"could not setup interrupt\n");
427			goto qls_pci_attach_err;
428		}
429        }
430
431	qls_rd_nic_params(ha);
432
433	/* allocate parent dma tag */
434	if (qls_alloc_parent_dma_tag(ha)) {
435		device_printf(dev, "%s: qls_alloc_parent_dma_tag failed\n",
436			__func__);
437		goto qls_pci_attach_err;
438	}
439
440	/* alloc all dma buffers */
441	if (qls_alloc_dma(ha)) {
442		device_printf(dev, "%s: qls_alloc_dma failed\n", __func__);
443		goto qls_pci_attach_err;
444	}
445
446	/* create the o.s ethernet interface */
447	qls_init_ifnet(dev, ha);
448
449	ha->flags.qla_watchdog_active = 1;
450	ha->flags.qla_watchdog_pause = 1;
451
452	TASK_INIT(&ha->tx_task, 0, qls_tx_done, ha);
453	ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT,
454			taskqueue_thread_enqueue, &ha->tx_tq);
455	taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq",
456		device_get_nameunit(ha->pci_dev));
457
458	callout_init(&ha->tx_callout, 1);
459	ha->flags.qla_callout_init = 1;
460
461        /* create ioctl device interface */
462        if (qls_make_cdev(ha)) {
463                device_printf(dev, "%s: qls_make_cdev failed\n", __func__);
464                goto qls_pci_attach_err;
465        }
466
467	callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS,
468		qls_watchdog, ha);
469
470        TASK_INIT(&ha->err_task, 0, qls_error_recovery, ha);
471        ha->err_tq = taskqueue_create_fast("qla_errq", M_NOWAIT,
472                        taskqueue_thread_enqueue, &ha->err_tq);
473        taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq",
474                device_get_nameunit(ha->pci_dev));
475
476	QL_DPRINT2((dev, "%s: exit 0\n", __func__));
477        return (0);
478
479qls_pci_attach_err:
480
481	qls_release(ha);
482
483	QL_DPRINT2((dev, "%s: exit ENXIO\n", __func__));
484        return (ENXIO);
485}
486
487/*
488 * Name:	qls_pci_detach
489 * Function:	Unhooks the device from the operating system
490 */
491static int
492qls_pci_detach(device_t dev)
493{
494	qla_host_t *ha = NULL;
495	struct ifnet *ifp;
496
497	QL_DPRINT2((dev, "%s: enter\n", __func__));
498
499        if ((ha = device_get_softc(dev)) == NULL) {
500                device_printf(dev, "cannot get softc\n");
501                return (ENOMEM);
502        }
503
504	ifp = ha->ifp;
505
506	(void)QLA_LOCK(ha, __func__, 0);
507	qls_stop(ha);
508	QLA_UNLOCK(ha, __func__);
509
510	qls_release(ha);
511
512	QL_DPRINT2((dev, "%s: exit\n", __func__));
513
514        return (0);
515}
516
517/*
518 * Name:	qls_release
519 * Function:	Releases the resources allocated for the device
520 */
521static void
522qls_release(qla_host_t *ha)
523{
524	device_t dev;
525	int i;
526
527	dev = ha->pci_dev;
528
529	if (ha->err_tq) {
530		taskqueue_drain(ha->err_tq, &ha->err_task);
531		taskqueue_free(ha->err_tq);
532	}
533
534	if (ha->tx_tq) {
535		taskqueue_drain(ha->tx_tq, &ha->tx_task);
536		taskqueue_free(ha->tx_tq);
537	}
538
539	qls_del_cdev(ha);
540
541	if (ha->flags.qla_watchdog_active) {
542		ha->flags.qla_watchdog_exit = 1;
543
544		while (ha->qla_watchdog_exited == 0)
545			qls_mdelay(__func__, 1);
546	}
547
548	if (ha->flags.qla_callout_init)
549		callout_stop(&ha->tx_callout);
550
551	if (ha->ifp != NULL)
552		ether_ifdetach(ha->ifp);
553
554	qls_free_dma(ha);
555	qls_free_parent_dma_tag(ha);
556
557        for (i = 0; i < ha->num_rx_rings; i++) {
558
559                if (ha->irq_vec[i].handle) {
560                        (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
561                                        ha->irq_vec[i].handle);
562                }
563
564                if (ha->irq_vec[i].irq) {
565                        (void)bus_release_resource(dev, SYS_RES_IRQ,
566                                ha->irq_vec[i].irq_rid,
567                                ha->irq_vec[i].irq);
568                }
569        }
570
571	if (ha->msix_count)
572		pci_release_msi(dev);
573
574	if (ha->flags.lock_init) {
575		mtx_destroy(&ha->tx_lock);
576		mtx_destroy(&ha->hw_lock);
577	}
578
579        if (ha->pci_reg)
580                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
581				ha->pci_reg);
582
583        if (ha->pci_reg1)
584                (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1,
585				ha->pci_reg1);
586}
587
588/*
589 * DMA Related Functions
590 */
591
592static void
593qls_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
594{
595        *((bus_addr_t *)arg) = 0;
596
597        if (error) {
598                printf("%s: bus_dmamap_load failed (%d)\n", __func__, error);
599                return;
600	}
601
602        *((bus_addr_t *)arg) = segs[0].ds_addr;
603
604	return;
605}
606
607int
608qls_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
609{
610        int             ret = 0;
611        device_t        dev;
612        bus_addr_t      b_addr;
613
614        dev = ha->pci_dev;
615
616        QL_DPRINT2((dev, "%s: enter\n", __func__));
617
618        ret = bus_dma_tag_create(
619                        ha->parent_tag,/* parent */
620                        dma_buf->alignment,
621                        ((bus_size_t)(1ULL << 32)),/* boundary */
622                        BUS_SPACE_MAXADDR,      /* lowaddr */
623                        BUS_SPACE_MAXADDR,      /* highaddr */
624                        NULL, NULL,             /* filter, filterarg */
625                        dma_buf->size,          /* maxsize */
626                        1,                      /* nsegments */
627                        dma_buf->size,          /* maxsegsize */
628                        0,                      /* flags */
629                        NULL, NULL,             /* lockfunc, lockarg */
630                        &dma_buf->dma_tag);
631
632        if (ret) {
633                device_printf(dev, "%s: could not create dma tag\n", __func__);
634                goto qls_alloc_dmabuf_exit;
635        }
636        ret = bus_dmamem_alloc(dma_buf->dma_tag,
637                        (void **)&dma_buf->dma_b,
638                        (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT),
639                        &dma_buf->dma_map);
640        if (ret) {
641                bus_dma_tag_destroy(dma_buf->dma_tag);
642                device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__);
643                goto qls_alloc_dmabuf_exit;
644        }
645
646        ret = bus_dmamap_load(dma_buf->dma_tag,
647                        dma_buf->dma_map,
648                        dma_buf->dma_b,
649                        dma_buf->size,
650                        qls_dmamap_callback,
651                        &b_addr, BUS_DMA_NOWAIT);
652
653        if (ret || !b_addr) {
654                bus_dma_tag_destroy(dma_buf->dma_tag);
655                bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
656                        dma_buf->dma_map);
657                ret = -1;
658                goto qls_alloc_dmabuf_exit;
659        }
660
661        dma_buf->dma_addr = b_addr;
662
663qls_alloc_dmabuf_exit:
664        QL_DPRINT2((dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n",
665                __func__, ret, (void *)dma_buf->dma_tag,
666                (void *)dma_buf->dma_map, (void *)dma_buf->dma_b,
667		dma_buf->size));
668
669        return ret;
670}
671
672void
673qls_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf)
674{
675        bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
676        bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
677        bus_dma_tag_destroy(dma_buf->dma_tag);
678}
679
680static int
681qls_alloc_parent_dma_tag(qla_host_t *ha)
682{
683	int		ret;
684	device_t	dev;
685
686	dev = ha->pci_dev;
687
688        /*
689         * Allocate parent DMA Tag
690         */
691        ret = bus_dma_tag_create(
692                        bus_get_dma_tag(dev),   /* parent */
693                        1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */
694                        BUS_SPACE_MAXADDR,      /* lowaddr */
695                        BUS_SPACE_MAXADDR,      /* highaddr */
696                        NULL, NULL,             /* filter, filterarg */
697                        BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
698                        0,                      /* nsegments */
699                        BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
700                        0,                      /* flags */
701                        NULL, NULL,             /* lockfunc, lockarg */
702                        &ha->parent_tag);
703
704        if (ret) {
705                device_printf(dev, "%s: could not create parent dma tag\n",
706                        __func__);
707		return (-1);
708        }
709
710        ha->flags.parent_tag = 1;
711
712	return (0);
713}
714
715static void
716qls_free_parent_dma_tag(qla_host_t *ha)
717{
718        if (ha->flags.parent_tag) {
719                bus_dma_tag_destroy(ha->parent_tag);
720                ha->flags.parent_tag = 0;
721        }
722}
723
724/*
725 * Name: qls_init_ifnet
726 * Function: Creates the Network Device Interface and Registers it with the O.S
727 */
728
729static void
730qls_init_ifnet(device_t dev, qla_host_t *ha)
731{
732	struct ifnet *ifp;
733
734	QL_DPRINT2((dev, "%s: enter\n", __func__));
735
736	ifp = ha->ifp = if_alloc(IFT_ETHER);
737
738	if (ifp == NULL)
739		panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
740
741	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
742	ifp->if_baudrate = IF_Gbps(10);
743	ifp->if_init = qls_init;
744	ifp->if_softc = ha;
745	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
746	ifp->if_ioctl = qls_ioctl;
747	ifp->if_start = qls_start;
748
749	IFQ_SET_MAXLEN(&ifp->if_snd, qls_get_ifq_snd_maxlen(ha));
750	ifp->if_snd.ifq_drv_maxlen = qls_get_ifq_snd_maxlen(ha);
751	IFQ_SET_READY(&ifp->if_snd);
752
753	ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
754	if (ha->max_frame_size <= MCLBYTES) {
755		ha->msize = MCLBYTES;
756	} else if (ha->max_frame_size <= MJUMPAGESIZE) {
757		ha->msize = MJUMPAGESIZE;
758	} else
759		ha->msize = MJUM9BYTES;
760
761	ether_ifattach(ifp, qls_get_mac_addr(ha));
762
763	ifp->if_capabilities = IFCAP_JUMBO_MTU;
764
765	ifp->if_capabilities |= IFCAP_HWCSUM;
766	ifp->if_capabilities |= IFCAP_VLAN_MTU;
767
768	ifp->if_capabilities |= IFCAP_TSO4;
769	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
770	ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
771	ifp->if_capabilities |= IFCAP_LINKSTATE;
772
773	ifp->if_capenable = ifp->if_capabilities;
774
775	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
776
777	ifmedia_init(&ha->media, IFM_IMASK, qls_media_change, qls_media_status);
778
779	ifmedia_add(&ha->media, (IFM_ETHER | qls_get_optics(ha) | IFM_FDX), 0,
780		NULL);
781	ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
782
783	ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
784
785	QL_DPRINT2((dev, "%s: exit\n", __func__));
786
787	return;
788}
789
790static void
791qls_init_locked(qla_host_t *ha)
792{
793	struct ifnet *ifp = ha->ifp;
794
795	qls_stop(ha);
796
797	qls_flush_xmt_bufs(ha);
798
799	if (qls_alloc_rcv_bufs(ha) != 0)
800		return;
801
802	if (qls_config_lro(ha))
803		return;
804
805	bcopy(IF_LLADDR(ha->ifp), ha->mac_addr, ETHER_ADDR_LEN);
806
807	ifp->if_hwassist = CSUM_IP;
808	ifp->if_hwassist |= CSUM_TCP;
809	ifp->if_hwassist |= CSUM_UDP;
810	ifp->if_hwassist |= CSUM_TSO;
811
812 	if (qls_init_hw_if(ha) == 0) {
813		ifp = ha->ifp;
814		ifp->if_drv_flags |= IFF_DRV_RUNNING;
815		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
816		ha->flags.qla_watchdog_pause = 0;
817	}
818
819	return;
820}
821
822static void
823qls_init(void *arg)
824{
825	qla_host_t *ha;
826
827	ha = (qla_host_t *)arg;
828
829	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
830
831	(void)QLA_LOCK(ha, __func__, 0);
832	qls_init_locked(ha);
833	QLA_UNLOCK(ha, __func__);
834
835	QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
836}
837
838static void
839qls_set_multi(qla_host_t *ha, uint32_t add_multi)
840{
841	uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN];
842	struct ifmultiaddr *ifma;
843	int mcnt = 0;
844	struct ifnet *ifp = ha->ifp;
845
846	if_maddr_rlock(ifp);
847
848	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
849
850		if (ifma->ifma_addr->sa_family != AF_LINK)
851			continue;
852
853		if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS)
854			break;
855
856		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
857			&mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN);
858
859		mcnt++;
860	}
861
862	if_maddr_runlock(ifp);
863
864	if (QLA_LOCK(ha, __func__, 1) == 0) {
865		qls_hw_set_multi(ha, mta, mcnt, add_multi);
866		QLA_UNLOCK(ha, __func__);
867	}
868
869	return;
870}
871
872static int
873qls_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
874{
875	int ret = 0;
876	struct ifreq *ifr = (struct ifreq *)data;
877	struct ifaddr *ifa = (struct ifaddr *)data;
878	qla_host_t *ha;
879
880	ha = (qla_host_t *)ifp->if_softc;
881
882	switch (cmd) {
883	case SIOCSIFADDR:
884		QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n",
885			__func__, cmd));
886
887		if (ifa->ifa_addr->sa_family == AF_INET) {
888			ifp->if_flags |= IFF_UP;
889			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
890				(void)QLA_LOCK(ha, __func__, 0);
891				qls_init_locked(ha);
892				QLA_UNLOCK(ha, __func__);
893			}
894			QL_DPRINT4((ha->pci_dev,
895				"%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
896				__func__, cmd,
897				ntohl(IA_SIN(ifa)->sin_addr.s_addr)));
898
899			arp_ifinit(ifp, ifa);
900		} else {
901			ether_ioctl(ifp, cmd, data);
902		}
903		break;
904
905	case SIOCSIFMTU:
906		QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n",
907			__func__, cmd));
908
909		if (ifr->ifr_mtu > QLA_MAX_MTU) {
910			ret = EINVAL;
911		} else {
912			(void) QLA_LOCK(ha, __func__, 0);
913
914			ifp->if_mtu = ifr->ifr_mtu;
915			ha->max_frame_size =
916				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
917
918			QLA_UNLOCK(ha, __func__);
919
920			if (ret)
921				ret = EINVAL;
922		}
923
924		break;
925
926	case SIOCSIFFLAGS:
927		QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n",
928			__func__, cmd));
929
930		(void)QLA_LOCK(ha, __func__, 0);
931
932		if (ifp->if_flags & IFF_UP) {
933			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
934				if ((ifp->if_flags ^ ha->if_flags) &
935					IFF_PROMISC) {
936					ret = qls_set_promisc(ha);
937				} else if ((ifp->if_flags ^ ha->if_flags) &
938					IFF_ALLMULTI) {
939					ret = qls_set_allmulti(ha);
940				}
941			} else {
942				ha->max_frame_size = ifp->if_mtu +
943					ETHER_HDR_LEN + ETHER_CRC_LEN;
944				qls_init_locked(ha);
945			}
946		} else {
947			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
948				qls_stop(ha);
949			ha->if_flags = ifp->if_flags;
950		}
951
952		QLA_UNLOCK(ha, __func__);
953		break;
954
955	case SIOCADDMULTI:
956		QL_DPRINT4((ha->pci_dev,
957			"%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd));
958
959		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
960			qls_set_multi(ha, 1);
961		}
962		break;
963
964	case SIOCDELMULTI:
965		QL_DPRINT4((ha->pci_dev,
966			"%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd));
967
968		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
969			qls_set_multi(ha, 0);
970		}
971		break;
972
973	case SIOCSIFMEDIA:
974	case SIOCGIFMEDIA:
975		QL_DPRINT4((ha->pci_dev,
976			"%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n",
977			__func__, cmd));
978		ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
979		break;
980
981	case SIOCSIFCAP:
982	{
983		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
984
985		QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n",
986			__func__, cmd));
987
988		if (mask & IFCAP_HWCSUM)
989			ifp->if_capenable ^= IFCAP_HWCSUM;
990		if (mask & IFCAP_TSO4)
991			ifp->if_capenable ^= IFCAP_TSO4;
992		if (mask & IFCAP_VLAN_HWTAGGING)
993			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
994		if (mask & IFCAP_VLAN_HWTSO)
995			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
996
997		if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
998			qls_init(ha);
999
1000		VLAN_CAPABILITIES(ifp);
1001		break;
1002	}
1003
1004	default:
1005		QL_DPRINT4((ha->pci_dev, "%s: default (0x%lx)\n",
1006			__func__, cmd));
1007		ret = ether_ioctl(ifp, cmd, data);
1008		break;
1009	}
1010
1011	return (ret);
1012}
1013
1014static int
1015qls_media_change(struct ifnet *ifp)
1016{
1017	qla_host_t *ha;
1018	struct ifmedia *ifm;
1019	int ret = 0;
1020
1021	ha = (qla_host_t *)ifp->if_softc;
1022
1023	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1024
1025	ifm = &ha->media;
1026
1027	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1028		ret = EINVAL;
1029
1030	QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
1031
1032	return (ret);
1033}
1034
1035static void
1036qls_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1037{
1038	qla_host_t *ha;
1039
1040	ha = (qla_host_t *)ifp->if_softc;
1041
1042	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1043
1044	ifmr->ifm_status = IFM_AVALID;
1045	ifmr->ifm_active = IFM_ETHER;
1046
1047	qls_update_link_state(ha);
1048	if (ha->link_up) {
1049		ifmr->ifm_status |= IFM_ACTIVE;
1050		ifmr->ifm_active |= (IFM_FDX | qls_get_optics(ha));
1051	}
1052
1053	QL_DPRINT2((ha->pci_dev, "%s: exit (%s)\n", __func__,\
1054		(ha->link_up ? "link_up" : "link_down")));
1055
1056	return;
1057}
1058
1059static void
1060qls_start(struct ifnet *ifp)
1061{
1062	int		i, ret = 0;
1063	struct mbuf	*m_head;
1064	qla_host_t	*ha = (qla_host_t *)ifp->if_softc;
1065
1066	QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
1067
1068	if (!mtx_trylock(&ha->tx_lock)) {
1069		QL_DPRINT8((ha->pci_dev,
1070			"%s: mtx_trylock(&ha->tx_lock) failed\n", __func__));
1071		return;
1072	}
1073
1074	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) ==
1075		IFF_DRV_RUNNING) {
1076
1077		for (i = 0; i < ha->num_tx_rings; i++) {
1078			ret |= qls_hw_tx_done(ha, i);
1079		}
1080
1081		if (ret == 0)
1082			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1083	}
1084
1085	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1086		IFF_DRV_RUNNING) {
1087		QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1088		QLA_TX_UNLOCK(ha);
1089		return;
1090	}
1091
1092	if (!ha->link_up) {
1093		qls_update_link_state(ha);
1094		if (!ha->link_up) {
1095			QL_DPRINT8((ha->pci_dev, "%s: link down\n", __func__));
1096			QLA_TX_UNLOCK(ha);
1097			return;
1098		}
1099	}
1100
1101	while (ifp->if_snd.ifq_head != NULL) {
1102
1103		IF_DEQUEUE(&ifp->if_snd, m_head);
1104
1105		if (m_head == NULL) {
1106			QL_DPRINT8((ha->pci_dev, "%s: m_head == NULL\n",
1107				__func__));
1108			break;
1109		}
1110
1111		if (qls_send(ha, &m_head)) {
1112			if (m_head == NULL)
1113				break;
1114			QL_DPRINT8((ha->pci_dev, "%s: PREPEND\n", __func__));
1115			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1116			IF_PREPEND(&ifp->if_snd, m_head);
1117			break;
1118		}
1119		/* Send a copy of the frame to the BPF listener */
1120		ETHER_BPF_MTAP(ifp, m_head);
1121	}
1122
1123	QLA_TX_UNLOCK(ha);
1124	QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
1125	return;
1126}
1127
1128static int
1129qls_send(qla_host_t *ha, struct mbuf **m_headp)
1130{
1131	bus_dma_segment_t	segs[QLA_MAX_SEGMENTS];
1132	bus_dmamap_t		map;
1133	int			nsegs;
1134	int			ret = -1;
1135	uint32_t		tx_idx;
1136	struct mbuf		*m_head = *m_headp;
1137	uint32_t		txr_idx = 0;
1138
1139	QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
1140
1141	/* check if flowid is set */
1142	if (M_HASHTYPE_GET(m_head) != M_HASHTYPE_NONE)
1143		txr_idx = m_head->m_pkthdr.flowid & (ha->num_tx_rings - 1);
1144
1145	tx_idx = ha->tx_ring[txr_idx].txr_next;
1146
1147	map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
1148
1149	ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
1150			BUS_DMA_NOWAIT);
1151
1152	if (ret == EFBIG) {
1153
1154		struct mbuf *m;
1155
1156		QL_DPRINT8((ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
1157			m_head->m_pkthdr.len));
1158
1159		m = m_defrag(m_head, M_NOWAIT);
1160		if (m == NULL) {
1161			ha->err_tx_defrag++;
1162			m_freem(m_head);
1163			*m_headp = NULL;
1164			device_printf(ha->pci_dev,
1165				"%s: m_defrag() = NULL [%d]\n",
1166				__func__, ret);
1167			return (ENOBUFS);
1168		}
1169		m_head = m;
1170		*m_headp = m_head;
1171
1172		if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
1173					segs, &nsegs, BUS_DMA_NOWAIT))) {
1174
1175			ha->err_tx_dmamap_load++;
1176
1177			device_printf(ha->pci_dev,
1178				"%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n",
1179				__func__, ret, m_head->m_pkthdr.len);
1180
1181			if (ret != ENOMEM) {
1182				m_freem(m_head);
1183				*m_headp = NULL;
1184			}
1185			return (ret);
1186		}
1187
1188	} else if (ret) {
1189
1190		ha->err_tx_dmamap_load++;
1191
1192		device_printf(ha->pci_dev,
1193			"%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n",
1194			__func__, ret, m_head->m_pkthdr.len);
1195
1196		if (ret != ENOMEM) {
1197			m_freem(m_head);
1198			*m_headp = NULL;
1199		}
1200		return (ret);
1201	}
1202
1203	QL_ASSERT(ha, (nsegs != 0), ("qls_send: empty packet"));
1204
1205	bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
1206
1207        if (!(ret = qls_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx))) {
1208
1209		ha->tx_ring[txr_idx].count++;
1210		ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
1211		ha->tx_ring[txr_idx].tx_buf[tx_idx].map = map;
1212	} else {
1213		if (ret == EINVAL) {
1214			if (m_head)
1215				m_freem(m_head);
1216			*m_headp = NULL;
1217		}
1218	}
1219
1220	QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__));
1221	return (ret);
1222}
1223
1224static void
1225qls_stop(qla_host_t *ha)
1226{
1227	struct ifnet *ifp = ha->ifp;
1228	device_t	dev;
1229
1230	dev = ha->pci_dev;
1231
1232	ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1233
1234	ha->flags.qla_watchdog_pause = 1;
1235
1236	while (!ha->qla_watchdog_paused)
1237		qls_mdelay(__func__, 1);
1238
1239	qls_del_hw_if(ha);
1240
1241	qls_free_lro(ha);
1242
1243	qls_flush_xmt_bufs(ha);
1244	qls_free_rcv_bufs(ha);
1245
1246	return;
1247}
1248
1249/*
1250 * Buffer Management Functions for Transmit and Receive Rings
1251 */
1252/*
1253 * Release mbuf after it sent on the wire
1254 */
1255static void
1256qls_flush_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb)
1257{
1258	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1259
1260	if (txb->m_head) {
1261
1262		bus_dmamap_unload(ha->tx_tag, txb->map);
1263
1264		m_freem(txb->m_head);
1265		txb->m_head = NULL;
1266	}
1267
1268	QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__));
1269}
1270
1271static void
1272qls_flush_xmt_bufs(qla_host_t *ha)
1273{
1274	int		i, j;
1275
1276	for (j = 0; j < ha->num_tx_rings; j++) {
1277		for (i = 0; i < NUM_TX_DESCRIPTORS; i++)
1278			qls_flush_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
1279	}
1280
1281	return;
1282}
1283
1284
1285static int
1286qls_alloc_rcv_mbufs(qla_host_t *ha, int r)
1287{
1288	int			i, j, ret = 0;
1289	qla_rx_buf_t		*rxb;
1290	qla_rx_ring_t		*rx_ring;
1291	volatile q81_bq_addr_e_t *sbq_e;
1292
1293
1294	rx_ring = &ha->rx_ring[r];
1295
1296	for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1297
1298		rxb = &rx_ring->rx_buf[i];
1299
1300		ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map);
1301
1302		if (ret) {
1303			device_printf(ha->pci_dev,
1304				"%s: dmamap[%d, %d] failed\n", __func__, r, i);
1305
1306			for (j = 0; j < i; j++) {
1307				rxb = &rx_ring->rx_buf[j];
1308				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1309			}
1310			goto qls_alloc_rcv_mbufs_err;
1311		}
1312	}
1313
1314	rx_ring = &ha->rx_ring[r];
1315
1316	sbq_e = rx_ring->sbq_vaddr;
1317
1318	rxb = &rx_ring->rx_buf[0];
1319
1320	for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1321
1322		if (!(ret = qls_get_mbuf(ha, rxb, NULL))) {
1323
1324			/*
1325		 	 * set the physical address in the
1326			 * corresponding descriptor entry in the
1327			 * receive ring/queue for the hba
1328			 */
1329
1330			sbq_e->addr_lo = rxb->paddr & 0xFFFFFFFF;
1331			sbq_e->addr_hi = (rxb->paddr >> 32) & 0xFFFFFFFF;
1332
1333		} else {
1334			device_printf(ha->pci_dev,
1335				"%s: qls_get_mbuf [%d, %d] failed\n",
1336					__func__, r, i);
1337			bus_dmamap_destroy(ha->rx_tag, rxb->map);
1338			goto qls_alloc_rcv_mbufs_err;
1339		}
1340
1341		rxb++;
1342		sbq_e++;
1343	}
1344	return 0;
1345
1346qls_alloc_rcv_mbufs_err:
1347	return (-1);
1348}
1349
1350static void
1351qls_free_rcv_bufs(qla_host_t *ha)
1352{
1353	int		i, r;
1354	qla_rx_buf_t	*rxb;
1355	qla_rx_ring_t	*rxr;
1356
1357	for (r = 0; r < ha->num_rx_rings; r++) {
1358
1359		rxr = &ha->rx_ring[r];
1360
1361		for (i = 0; i < NUM_RX_DESCRIPTORS; i++) {
1362
1363			rxb = &rxr->rx_buf[i];
1364
1365			if (rxb->m_head != NULL) {
1366				bus_dmamap_unload(ha->rx_tag, rxb->map);
1367				bus_dmamap_destroy(ha->rx_tag, rxb->map);
1368				m_freem(rxb->m_head);
1369			}
1370		}
1371		bzero(rxr->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
1372	}
1373	return;
1374}
1375
1376static int
1377qls_alloc_rcv_bufs(qla_host_t *ha)
1378{
1379	int		r, ret = 0;
1380	qla_rx_ring_t	*rxr;
1381
1382	for (r = 0; r < ha->num_rx_rings; r++) {
1383		rxr = &ha->rx_ring[r];
1384		bzero(rxr->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS));
1385	}
1386
1387	for (r = 0; r < ha->num_rx_rings; r++) {
1388
1389		ret = qls_alloc_rcv_mbufs(ha, r);
1390
1391		if (ret)
1392			qls_free_rcv_bufs(ha);
1393	}
1394
1395	return (ret);
1396}
1397
1398int
1399qls_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp)
1400{
1401	register struct mbuf *mp = nmp;
1402	struct ifnet   		*ifp;
1403	int            		ret = 0;
1404	uint32_t		offset;
1405	bus_dma_segment_t	segs[1];
1406	int			nsegs;
1407
1408	QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__));
1409
1410	ifp = ha->ifp;
1411
1412	if (mp == NULL) {
1413
1414		mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ha->msize);
1415
1416		if (mp == NULL) {
1417
1418			if (ha->msize == MCLBYTES)
1419				ha->err_m_getcl++;
1420			else
1421				ha->err_m_getjcl++;
1422
1423			ret = ENOBUFS;
1424			device_printf(ha->pci_dev,
1425					"%s: m_getcl failed\n", __func__);
1426			goto exit_qls_get_mbuf;
1427		}
1428		mp->m_len = mp->m_pkthdr.len = ha->msize;
1429	} else {
1430		mp->m_len = mp->m_pkthdr.len = ha->msize;
1431		mp->m_data = mp->m_ext.ext_buf;
1432		mp->m_next = NULL;
1433	}
1434
1435	/* align the receive buffers to 8 byte boundary */
1436	offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL);
1437	if (offset) {
1438		offset = 8 - offset;
1439		m_adj(mp, offset);
1440	}
1441
1442	/*
1443	 * Using memory from the mbuf cluster pool, invoke the bus_dma
1444	 * machinery to arrange the memory mapping.
1445	 */
1446	ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map,
1447			mp, segs, &nsegs, BUS_DMA_NOWAIT);
1448	rxb->paddr = segs[0].ds_addr;
1449
1450	if (ret || !rxb->paddr || (nsegs != 1)) {
1451		m_freem(mp);
1452		rxb->m_head = NULL;
1453		device_printf(ha->pci_dev,
1454			"%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
1455			__func__, ret, (long long unsigned int)rxb->paddr,
1456			nsegs);
1457                ret = -1;
1458		goto exit_qls_get_mbuf;
1459	}
1460	rxb->m_head = mp;
1461	bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD);
1462
1463exit_qls_get_mbuf:
1464	QL_DPRINT2((ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret));
1465	return (ret);
1466}
1467
1468static void
1469qls_tx_done(void *context, int pending)
1470{
1471	qla_host_t *ha = context;
1472	struct ifnet   *ifp;
1473
1474	ifp = ha->ifp;
1475
1476	if (!ifp)
1477		return;
1478
1479	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1480		QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__));
1481		return;
1482	}
1483
1484	qls_start(ha->ifp);
1485	return;
1486}
1487
1488static int
1489qls_config_lro(qla_host_t *ha)
1490{
1491        int i;
1492        struct lro_ctrl *lro;
1493
1494        for (i = 0; i < ha->num_rx_rings; i++) {
1495                lro = &ha->rx_ring[i].lro;
1496                if (tcp_lro_init(lro)) {
1497                        device_printf(ha->pci_dev, "%s: tcp_lro_init failed\n",
1498                                __func__);
1499                        return (-1);
1500                }
1501                lro->ifp = ha->ifp;
1502        }
1503        ha->flags.lro_init = 1;
1504
1505        QL_DPRINT2((ha->pci_dev, "%s: LRO initialized\n", __func__));
1506        return (0);
1507}
1508
1509static void
1510qls_free_lro(qla_host_t *ha)
1511{
1512        int i;
1513        struct lro_ctrl *lro;
1514
1515        if (!ha->flags.lro_init)
1516                return;
1517
1518        for (i = 0; i < ha->num_rx_rings; i++) {
1519                lro = &ha->rx_ring[i].lro;
1520                tcp_lro_free(lro);
1521        }
1522        ha->flags.lro_init = 0;
1523}
1524
1525static void
1526qls_error_recovery(void *context, int pending)
1527{
1528        qla_host_t *ha = context;
1529
1530	qls_init(ha);
1531
1532	return;
1533}
1534
1535