1/*
2 * Copyright (c) 2013-2014 Qlogic Corporation
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 */
27
28/*
29 * File: ql_hw.c
30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31 * Content: Contains Hardware dependant functions
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD$");
36
37#include "ql_os.h"
38#include "ql_hw.h"
39#include "ql_def.h"
40#include "ql_inline.h"
41#include "ql_ver.h"
42#include "ql_glbl.h"
43#include "ql_dbg.h"
44
45/*
46 * Static Functions
47 */
48
49static void qla_del_rcv_cntxt(qla_host_t *ha);
50static int qla_init_rcv_cntxt(qla_host_t *ha);
51static void qla_del_xmt_cntxt(qla_host_t *ha);
52static int qla_init_xmt_cntxt(qla_host_t *ha);
53static void qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx);
54static int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
55	uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
56static int qla_config_intr_cntxt(qla_host_t *ha, uint32_t num_intrs,
57	uint32_t create);
58static int qla_get_nic_partition(qla_host_t *ha);
59static int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
60static int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
61	int tenable);
62static int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
63static int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
64
65static int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
66		uint8_t *hdr);
67static int qla_hw_add_all_mcast(qla_host_t *ha);
68static int qla_hw_del_all_mcast(qla_host_t *ha);
69static int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx);
70
71static int qla_minidump_init(qla_host_t *ha);
72static void qla_minidump_free(qla_host_t *ha);
73
74
75static int
76qla_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
77{
78        int err = 0, ret;
79        qla_host_t *ha;
80	uint32_t i;
81
82        err = sysctl_handle_int(oidp, &ret, 0, req);
83
84        if (err || !req->newptr)
85                return (err);
86
87        if (ret == 1) {
88
89                ha = (qla_host_t *)arg1;
90
91		for (i = 0; i < ha->hw.num_sds_rings; i++)
92			device_printf(ha->pci_dev,
93				"%s: sds_ring[%d] = %p\n", __func__,i,
94				(void *)ha->hw.sds[i].intr_count);
95
96		for (i = 0; i < ha->hw.num_tx_rings; i++)
97			device_printf(ha->pci_dev,
98				"%s: tx[%d] = %p\n", __func__,i,
99				(void *)ha->tx_ring[i].count);
100
101		for (i = 0; i < ha->hw.num_rds_rings; i++)
102			device_printf(ha->pci_dev,
103				"%s: rds_ring[%d] = %p\n", __func__,i,
104				(void *)ha->hw.rds[i].count);
105
106		device_printf(ha->pci_dev, "%s: lro_pkt_count = %p\n", __func__,
107			(void *)ha->lro_pkt_count);
108
109		device_printf(ha->pci_dev, "%s: lro_bytes = %p\n", __func__,
110			(void *)ha->lro_bytes);
111	}
112	return (err);
113}
114
115#ifdef QL_DBG
116
117static void
118qla_stop_pegs(qla_host_t *ha)
119{
120        uint32_t val = 1;
121
122        ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
123        ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
124        ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
125        ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
126        ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
127        device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
128}
129
130static int
131qla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
132{
133	int err, ret = 0;
134	qla_host_t *ha;
135
136	err = sysctl_handle_int(oidp, &ret, 0, req);
137
138
139	if (err || !req->newptr)
140		return (err);
141
142	if (ret == 1) {
143		ha = (qla_host_t *)arg1;
144		(void)QLA_LOCK(ha, __func__, 0);
145		qla_stop_pegs(ha);
146		QLA_UNLOCK(ha, __func__);
147	}
148
149	return err;
150}
151#endif /* #ifdef QL_DBG */
152
153/*
154 * Name: ql_hw_add_sysctls
155 * Function: Add P3Plus specific sysctls
156 */
157void
158ql_hw_add_sysctls(qla_host_t *ha)
159{
160        device_t	dev;
161
162        dev = ha->pci_dev;
163
164	ha->hw.num_sds_rings = MAX_SDS_RINGS;
165	ha->hw.num_rds_rings = MAX_RDS_RINGS;
166	ha->hw.num_tx_rings = NUM_TX_RINGS;
167
168	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
169		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
170		OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
171		ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
172
173        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
174                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
175                OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
176		ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
177
178        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
179                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
180                OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
181		ha->hw.num_tx_rings, "Number of Transmit Rings");
182
183        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
184                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
185                OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
186		ha->txr_idx, "Tx Ring Used");
187
188	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
189		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
190		OID_AUTO, "drvr_stats", CTLTYPE_INT | CTLFLAG_RW,
191		(void *)ha, 0,
192		qla_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics");
193
194        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
195                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
196                OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
197		ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
198
199	ha->hw.sds_cidx_thres = 32;
200        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
201                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
202                OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
203		ha->hw.sds_cidx_thres,
204		"Number of SDS entries to process before updating"
205		" SDS Ring Consumer Index");
206
207	ha->hw.rds_pidx_thres = 32;
208        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
209                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
210                OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
211		ha->hw.rds_pidx_thres,
212		"Number of Rcv Rings Entries to post before updating"
213		" RDS Ring Producer Index");
214
215	ha->hw.min_lro_pkt_size = 512;
216	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
217		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
218		OID_AUTO, "min_lro_pkt_size", CTLFLAG_RD, &ha->hw.min_lro_pkt_size,
219		ha->hw.min_lro_pkt_size, "minimum packet size to trigger lro");
220
221	ha->hw.mdump_active = 0;
222        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
223                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
224                OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
225		ha->hw.mdump_active,
226		"Minidump Utility is Active \n"
227		"\t 0 = Minidump Utility is not active\n"
228		"\t 1 = Minidump Utility is retrieved on this port\n"
229		"\t 2 = Minidump Utility is retrieved on the other port\n");
230
231	ha->hw.mdump_start = 0;
232        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
233                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
234                OID_AUTO, "minidump_start", CTLFLAG_RW,
235		&ha->hw.mdump_start, ha->hw.mdump_start,
236		"Minidump Utility can start minidump process");
237#ifdef QL_DBG
238
239        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
240                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
241                OID_AUTO, "err_inject",
242                CTLFLAG_RW, &ha->err_inject, ha->err_inject,
243                "Error to be injected\n"
244                "\t\t\t 0: No Errors\n"
245                "\t\t\t 1: rcv: rxb struct invalid\n"
246                "\t\t\t 2: rcv: mp == NULL\n"
247                "\t\t\t 3: lro: rxb struct invalid\n"
248                "\t\t\t 4: lro: mp == NULL\n"
249                "\t\t\t 5: rcv: num handles invalid\n"
250                "\t\t\t 6: reg: indirect reg rd_wr failure\n"
251                "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
252                "\t\t\t 8: mbx: mailbox command failure\n"
253                "\t\t\t 9: heartbeat failure\n"
254                "\t\t\t A: temperature failure\n" );
255
256	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
257                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
258                OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW,
259                (void *)ha, 0,
260                qla_sysctl_stop_pegs, "I", "Peg Stop");
261
262#endif /* #ifdef QL_DBG */
263
264}
265
266void
267ql_hw_link_status(qla_host_t *ha)
268{
269	device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
270
271	if (ha->hw.link_up) {
272		device_printf(ha->pci_dev, "link Up\n");
273	} else {
274		device_printf(ha->pci_dev, "link Down\n");
275	}
276
277	if (ha->hw.flags.fduplex) {
278		device_printf(ha->pci_dev, "Full Duplex\n");
279	} else {
280		device_printf(ha->pci_dev, "Half Duplex\n");
281	}
282
283	if (ha->hw.flags.autoneg) {
284		device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
285	} else {
286		device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
287	}
288
289	switch (ha->hw.link_speed) {
290	case 0x710:
291		device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
292		break;
293
294	case 0x3E8:
295		device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
296		break;
297
298	case 0x64:
299		device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
300		break;
301
302	default:
303		device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
304		break;
305	}
306
307	switch (ha->hw.module_type) {
308
309	case 0x01:
310		device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
311		break;
312
313	case 0x02:
314		device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
315		break;
316
317	case 0x03:
318		device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
319		break;
320
321	case 0x04:
322		device_printf(ha->pci_dev,
323			"Module Type 10GE Passive Copper(Compliant)[%d m]\n",
324			ha->hw.cable_length);
325		break;
326
327	case 0x05:
328		device_printf(ha->pci_dev, "Module Type 10GE Active"
329			" Limiting Copper(Compliant)[%d m]\n",
330			ha->hw.cable_length);
331		break;
332
333	case 0x06:
334		device_printf(ha->pci_dev,
335			"Module Type 10GE Passive Copper"
336			" (Legacy, Best Effort)[%d m]\n",
337			ha->hw.cable_length);
338		break;
339
340	case 0x07:
341		device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
342		break;
343
344	case 0x08:
345		device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
346		break;
347
348	case 0x09:
349		device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
350		break;
351
352	case 0x0A:
353		device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
354		break;
355
356	case 0x0B:
357		device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
358			"(Legacy, Best Effort)\n");
359		break;
360
361	default:
362		device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
363			ha->hw.module_type);
364		break;
365	}
366
367	if (ha->hw.link_faults == 1)
368		device_printf(ha->pci_dev, "SFP Power Fault\n");
369}
370
371/*
372 * Name: ql_free_dma
373 * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
374 */
375void
376ql_free_dma(qla_host_t *ha)
377{
378	uint32_t i;
379
380        if (ha->hw.dma_buf.flags.sds_ring) {
381		for (i = 0; i < ha->hw.num_sds_rings; i++) {
382			ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
383		}
384        	ha->hw.dma_buf.flags.sds_ring = 0;
385	}
386
387        if (ha->hw.dma_buf.flags.rds_ring) {
388		for (i = 0; i < ha->hw.num_rds_rings; i++) {
389			ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
390		}
391        	ha->hw.dma_buf.flags.rds_ring = 0;
392	}
393
394        if (ha->hw.dma_buf.flags.tx_ring) {
395		ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
396        	ha->hw.dma_buf.flags.tx_ring = 0;
397	}
398	qla_minidump_free(ha);
399}
400
401/*
402 * Name: ql_alloc_dma
403 * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
404 */
405int
406ql_alloc_dma(qla_host_t *ha)
407{
408        device_t                dev;
409	uint32_t		i, j, size, tx_ring_size;
410	qla_hw_t		*hw;
411	qla_hw_tx_cntxt_t	*tx_cntxt;
412	uint8_t			*vaddr;
413	bus_addr_t		paddr;
414
415        dev = ha->pci_dev;
416
417        QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
418
419	hw = &ha->hw;
420	/*
421	 * Allocate Transmit Ring
422	 */
423	tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
424	size = (tx_ring_size * ha->hw.num_tx_rings);
425
426	hw->dma_buf.tx_ring.alignment = 8;
427	hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
428
429        if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
430                device_printf(dev, "%s: tx ring alloc failed\n", __func__);
431                goto ql_alloc_dma_exit;
432        }
433
434	vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
435	paddr = hw->dma_buf.tx_ring.dma_addr;
436
437	for (i = 0; i < ha->hw.num_tx_rings; i++) {
438		tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
439
440		tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
441		tx_cntxt->tx_ring_paddr = paddr;
442
443		vaddr += tx_ring_size;
444		paddr += tx_ring_size;
445	}
446
447	for (i = 0; i < ha->hw.num_tx_rings; i++) {
448		tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
449
450		tx_cntxt->tx_cons = (uint32_t *)vaddr;
451		tx_cntxt->tx_cons_paddr = paddr;
452
453		vaddr += sizeof (uint32_t);
454		paddr += sizeof (uint32_t);
455	}
456
457        ha->hw.dma_buf.flags.tx_ring = 1;
458
459	QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
460		__func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
461		hw->dma_buf.tx_ring.dma_b));
462	/*
463	 * Allocate Receive Descriptor Rings
464	 */
465
466	for (i = 0; i < hw->num_rds_rings; i++) {
467
468		hw->dma_buf.rds_ring[i].alignment = 8;
469		hw->dma_buf.rds_ring[i].size =
470			(sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
471
472		if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
473			device_printf(dev, "%s: rds ring[%d] alloc failed\n",
474				__func__, i);
475
476			for (j = 0; j < i; j++)
477				ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
478
479			goto ql_alloc_dma_exit;
480		}
481		QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
482			__func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
483			hw->dma_buf.rds_ring[i].dma_b));
484	}
485
486	hw->dma_buf.flags.rds_ring = 1;
487
488	/*
489	 * Allocate Status Descriptor Rings
490	 */
491
492	for (i = 0; i < hw->num_sds_rings; i++) {
493		hw->dma_buf.sds_ring[i].alignment = 8;
494		hw->dma_buf.sds_ring[i].size =
495			(sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
496
497		if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
498			device_printf(dev, "%s: sds ring alloc failed\n",
499				__func__);
500
501			for (j = 0; j < i; j++)
502				ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
503
504			goto ql_alloc_dma_exit;
505		}
506		QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
507			__func__, i,
508			(void *)(hw->dma_buf.sds_ring[i].dma_addr),
509			hw->dma_buf.sds_ring[i].dma_b));
510	}
511	for (i = 0; i < hw->num_sds_rings; i++) {
512		hw->sds[i].sds_ring_base =
513			(q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
514	}
515
516	hw->dma_buf.flags.sds_ring = 1;
517
518	return 0;
519
520ql_alloc_dma_exit:
521	ql_free_dma(ha);
522	return -1;
523}
524
525#define Q8_MBX_MSEC_DELAY	5000
526
527static int
528qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
529	uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
530{
531	uint32_t i;
532	uint32_t data;
533	int ret = 0;
534
535	if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) {
536		ret = -3;
537		ha->qla_initiate_recovery = 1;
538		goto exit_qla_mbx_cmd;
539	}
540
541	if (no_pause)
542		i = 1000;
543	else
544		i = Q8_MBX_MSEC_DELAY;
545
546	while (i) {
547		data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
548		if (data == 0)
549			break;
550		if (no_pause) {
551			DELAY(1000);
552		} else {
553			qla_mdelay(__func__, 1);
554		}
555		i--;
556	}
557
558	if (i == 0) {
559		device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
560			__func__, data);
561		ret = -1;
562		ha->qla_initiate_recovery = 1;
563		goto exit_qla_mbx_cmd;
564	}
565
566	for (i = 0; i < n_hmbox; i++) {
567		WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
568		h_mbox++;
569	}
570
571	WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
572
573
574	i = Q8_MBX_MSEC_DELAY;
575	while (i) {
576		data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
577
578		if ((data & 0x3) == 1) {
579			data = READ_REG32(ha, Q8_FW_MBOX0);
580			if ((data & 0xF000) != 0x8000)
581				break;
582		}
583		if (no_pause) {
584			DELAY(1000);
585		} else {
586			qla_mdelay(__func__, 1);
587		}
588		i--;
589	}
590	if (i == 0) {
591		device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
592			__func__, data);
593		ret = -2;
594		ha->qla_initiate_recovery = 1;
595		goto exit_qla_mbx_cmd;
596	}
597
598	for (i = 0; i < n_fwmbox; i++) {
599		*fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
600	}
601
602	WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
603	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
604
605exit_qla_mbx_cmd:
606	return (ret);
607}
608
609static int
610qla_get_nic_partition(qla_host_t *ha)
611{
612	uint32_t *mbox, err;
613	device_t dev = ha->pci_dev;
614
615	bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
616
617	mbox = ha->hw.mbox;
618
619	mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29);
620
621	if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
622		device_printf(dev, "%s: failed0\n", __func__);
623		return (-1);
624	}
625	err = mbox[0] >> 25;
626
627	if ((err != 1) && (err != 0)) {
628		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
629		return (-1);
630	}
631	return 0;
632}
633
634static int
635qla_config_intr_cntxt(qla_host_t *ha, uint32_t num_intrs, uint32_t create)
636{
637	uint32_t i, err;
638	device_t dev = ha->pci_dev;
639	q80_config_intr_t *c_intr;
640	q80_config_intr_rsp_t *c_intr_rsp;
641
642	c_intr = (q80_config_intr_t *)ha->hw.mbox;
643	bzero(c_intr, (sizeof (q80_config_intr_t)));
644
645	c_intr->opcode = Q8_MBX_CONFIG_INTR;
646
647	c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
648	c_intr->count_version |= Q8_MBX_CMD_VERSION;
649
650	c_intr->nentries = num_intrs;
651
652	for (i = 0; i < num_intrs; i++) {
653		if (create) {
654			c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
655			c_intr->intr[i].msix_index = i + 1;
656		} else {
657			c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
658			c_intr->intr[i].msix_index = ha->hw.intr_id[i];
659		}
660
661		c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
662	}
663
664	if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
665		(sizeof (q80_config_intr_t) >> 2),
666		ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
667		device_printf(dev, "%s: failed0\n", __func__);
668		return (-1);
669	}
670
671	c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
672
673	err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
674
675	if (err) {
676		device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err,
677			c_intr_rsp->nentries);
678
679		for (i = 0; i < c_intr_rsp->nentries; i++) {
680			device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
681				__func__, i,
682				c_intr_rsp->intr[i].status,
683				c_intr_rsp->intr[i].intr_id,
684				c_intr_rsp->intr[i].intr_src);
685		}
686
687		return (-1);
688	}
689
690	for (i = 0; ((i < num_intrs) && create); i++) {
691		if (!c_intr_rsp->intr[i].status) {
692			ha->hw.intr_id[i] = c_intr_rsp->intr[i].intr_id;
693			ha->hw.intr_src[i] = c_intr_rsp->intr[i].intr_src;
694		}
695	}
696
697	return (0);
698}
699
700/*
701 * Name: qla_config_rss
702 * Function: Configure RSS for the context/interface.
703 */
704static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
705			0x8030f20c77cb2da3ULL,
706			0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
707			0x255b0ec26d5a56daULL };
708
709static int
710qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
711{
712	q80_config_rss_t	*c_rss;
713	q80_config_rss_rsp_t	*c_rss_rsp;
714	uint32_t		err, i;
715	device_t		dev = ha->pci_dev;
716
717	c_rss = (q80_config_rss_t *)ha->hw.mbox;
718	bzero(c_rss, (sizeof (q80_config_rss_t)));
719
720	c_rss->opcode = Q8_MBX_CONFIG_RSS;
721
722	c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
723	c_rss->count_version |= Q8_MBX_CMD_VERSION;
724
725	c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
726				Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
727
728	c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
729	c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
730
731	c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
732
733	c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
734	c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
735
736	c_rss->cntxt_id = cntxt_id;
737
738	for (i = 0; i < 5; i++) {
739		c_rss->rss_key[i] = rss_key[i];
740	}
741
742	if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
743		(sizeof (q80_config_rss_t) >> 2),
744		ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
745		device_printf(dev, "%s: failed0\n", __func__);
746		return (-1);
747	}
748	c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
749
750	err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
751
752	if (err) {
753		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
754		return (-1);
755	}
756	return 0;
757}
758
759static uint8_t rss_ind_default_table[Q8_RSS_IND_TBL_SIZE];
760
761static int
762qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
763        uint16_t cntxt_id, uint8_t *ind_table)
764{
765        q80_config_rss_ind_table_t      *c_rss_ind;
766        q80_config_rss_ind_table_rsp_t  *c_rss_ind_rsp;
767        uint32_t                        err;
768        device_t                        dev = ha->pci_dev;
769
770	if ((count > Q8_RSS_IND_TBL_SIZE) ||
771		((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
772		device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
773			start_idx, count);
774		return (-1);
775	}
776
777        c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
778        bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
779
780        c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
781        c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
782        c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
783
784	c_rss_ind->start_idx = start_idx;
785	c_rss_ind->end_idx = start_idx + count - 1;
786	c_rss_ind->cntxt_id = cntxt_id;
787	bcopy(ind_table, c_rss_ind->ind_table, count);
788
789	if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
790		(sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
791		(sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
792		device_printf(dev, "%s: failed0\n", __func__);
793		return (-1);
794	}
795
796	c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
797	err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
798
799	if (err) {
800		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
801		return (-1);
802	}
803	return 0;
804}
805
806/*
807 * Name: qla_config_intr_coalesce
808 * Function: Configure Interrupt Coalescing.
809 */
810static int
811qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable)
812{
813	q80_config_intr_coalesc_t	*intrc;
814	q80_config_intr_coalesc_rsp_t	*intrc_rsp;
815	uint32_t			err, i;
816	device_t			dev = ha->pci_dev;
817
818	intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
819	bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
820
821	intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
822	intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
823	intrc->count_version |= Q8_MBX_CMD_VERSION;
824
825	intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
826	intrc->cntxt_id = cntxt_id;
827
828	intrc->max_pkts = 256;
829	intrc->max_mswait = 3;
830
831	if (tenable) {
832		intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
833		intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
834
835		for (i = 0; i < ha->hw.num_sds_rings; i++) {
836			intrc->sds_ring_mask |= (1 << i);
837		}
838		intrc->ms_timeout = 1000;
839	}
840
841	if (qla_mbx_cmd(ha, (uint32_t *)intrc,
842		(sizeof (q80_config_intr_coalesc_t) >> 2),
843		ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
844		device_printf(dev, "%s: failed0\n", __func__);
845		return (-1);
846	}
847	intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
848
849	err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
850
851	if (err) {
852		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
853		return (-1);
854	}
855
856	return 0;
857}
858
859
860/*
861 * Name: qla_config_mac_addr
862 * Function: binds a MAC address to the context/interface.
863 *	Can be unicast, multicast or broadcast.
864 */
865static int
866qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
867{
868	q80_config_mac_addr_t		*cmac;
869	q80_config_mac_addr_rsp_t	*cmac_rsp;
870	uint32_t			err;
871	device_t			dev = ha->pci_dev;
872
873	cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
874	bzero(cmac, (sizeof (q80_config_mac_addr_t)));
875
876	cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
877	cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
878	cmac->count_version |= Q8_MBX_CMD_VERSION;
879
880	if (add_mac)
881		cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
882	else
883		cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
884
885	cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
886
887	cmac->nmac_entries = 1;
888	cmac->cntxt_id = ha->hw.rcv_cntxt_id;
889	bcopy(mac_addr, cmac->mac_addr[0].addr, 6);
890
891	if (qla_mbx_cmd(ha, (uint32_t *)cmac,
892		(sizeof (q80_config_mac_addr_t) >> 2),
893		ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
894		device_printf(dev, "%s: %s failed0\n", __func__,
895			(add_mac ? "Add" : "Del"));
896		return (-1);
897	}
898	cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
899
900	err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
901
902	if (err) {
903		device_printf(dev, "%s: %s "
904			"%02x:%02x:%02x:%02x:%02x:%02x failed1 [0x%08x]\n",
905			__func__, (add_mac ? "Add" : "Del"),
906			mac_addr[0], mac_addr[1], mac_addr[2],
907			mac_addr[3], mac_addr[4], mac_addr[5], err);
908		return (-1);
909	}
910
911	return 0;
912}
913
914
915/*
916 * Name: qla_set_mac_rcv_mode
917 * Function: Enable/Disable AllMulticast and Promiscous Modes.
918 */
919static int
920qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
921{
922	q80_config_mac_rcv_mode_t	*rcv_mode;
923	uint32_t			err;
924	q80_config_mac_rcv_mode_rsp_t	*rcv_mode_rsp;
925	device_t			dev = ha->pci_dev;
926
927	rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
928	bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
929
930	rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
931	rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
932	rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
933
934	rcv_mode->mode = mode;
935
936	rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
937
938	if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
939		(sizeof (q80_config_mac_rcv_mode_t) >> 2),
940		ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
941		device_printf(dev, "%s: failed0\n", __func__);
942		return (-1);
943	}
944	rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
945
946	err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
947
948	if (err) {
949		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
950		return (-1);
951	}
952
953	return 0;
954}
955
956int
957ql_set_promisc(qla_host_t *ha)
958{
959	int ret;
960
961	ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
962	ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
963	return (ret);
964}
965
966int
967ql_set_allmulti(qla_host_t *ha)
968{
969	int ret;
970
971	ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
972	ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
973	return (ret);
974}
975
976
977/*
978 * Name: ql_set_max_mtu
979 * Function:
980 *	Sets the maximum transfer unit size for the specified rcv context.
981 */
982int
983ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
984{
985	device_t		dev;
986	q80_set_max_mtu_t	*max_mtu;
987	q80_set_max_mtu_rsp_t	*max_mtu_rsp;
988	uint32_t		err;
989
990	dev = ha->pci_dev;
991
992	max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
993	bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
994
995	max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
996	max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
997	max_mtu->count_version |= Q8_MBX_CMD_VERSION;
998
999	max_mtu->cntxt_id = cntxt_id;
1000	max_mtu->mtu = mtu;
1001
1002        if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1003		(sizeof (q80_set_max_mtu_t) >> 2),
1004                ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1005                device_printf(dev, "%s: failed\n", __func__);
1006                return -1;
1007        }
1008
1009	max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1010
1011        err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1012
1013        if (err) {
1014                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1015        }
1016
1017	return 0;
1018}
1019
1020static int
1021qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1022{
1023	device_t		dev;
1024	q80_link_event_t	*lnk;
1025	q80_link_event_rsp_t	*lnk_rsp;
1026	uint32_t		err;
1027
1028	dev = ha->pci_dev;
1029
1030	lnk = (q80_link_event_t *)ha->hw.mbox;
1031	bzero(lnk, (sizeof (q80_link_event_t)));
1032
1033	lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1034	lnk->count_version = (sizeof (q80_link_event_t) >> 2);
1035	lnk->count_version |= Q8_MBX_CMD_VERSION;
1036
1037	lnk->cntxt_id = cntxt_id;
1038	lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
1039
1040        if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
1041                ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
1042                device_printf(dev, "%s: failed\n", __func__);
1043                return -1;
1044        }
1045
1046	lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
1047
1048        err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
1049
1050        if (err) {
1051                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1052        }
1053
1054	return 0;
1055}
1056
1057static int
1058qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
1059{
1060	device_t		dev;
1061	q80_config_fw_lro_t	*fw_lro;
1062	q80_config_fw_lro_rsp_t	*fw_lro_rsp;
1063	uint32_t		err;
1064
1065	dev = ha->pci_dev;
1066
1067	fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
1068	bzero(fw_lro, sizeof(q80_config_fw_lro_t));
1069
1070	fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
1071	fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
1072	fw_lro->count_version |= Q8_MBX_CMD_VERSION;
1073
1074	fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
1075
1076	fw_lro->cntxt_id = cntxt_id;
1077
1078	if (ha->hw.min_lro_pkt_size) {
1079		fw_lro->flags |= Q8_MBX_FW_LRO_LOW_THRESHOLD;
1080		fw_lro->low_threshold = ha->hw.min_lro_pkt_size;
1081	}
1082
1083	if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
1084		(sizeof (q80_config_fw_lro_t) >> 2),
1085		ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
1086		device_printf(dev, "%s: failed\n", __func__);
1087		return -1;
1088	}
1089
1090	fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
1091
1092	err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
1093
1094	if (err) {
1095		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1096	}
1097
1098	return 0;
1099}
1100
1101static void
1102qla_xmt_stats(qla_host_t *ha, q80_xmt_stats_t *xstat)
1103{
1104	device_t dev = ha->pci_dev;
1105
1106	device_printf(dev, "%s: total_bytes\t\t%" PRIu64 "\n", __func__,
1107		xstat->total_bytes);
1108	device_printf(dev, "%s: total_pkts\t\t%" PRIu64 "\n", __func__,
1109		xstat->total_pkts);
1110	device_printf(dev, "%s: errors\t\t%" PRIu64 "\n", __func__,
1111		xstat->errors);
1112	device_printf(dev, "%s: pkts_dropped\t%" PRIu64 "\n", __func__,
1113		xstat->pkts_dropped);
1114	device_printf(dev, "%s: switch_pkts\t\t%" PRIu64 "\n", __func__,
1115		xstat->switch_pkts);
1116	device_printf(dev, "%s: num_buffers\t\t%" PRIu64 "\n", __func__,
1117		xstat->num_buffers);
1118}
1119
1120static void
1121qla_rcv_stats(qla_host_t *ha, q80_rcv_stats_t *rstat)
1122{
1123	device_t dev = ha->pci_dev;
1124
1125	device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n", __func__,
1126		rstat->total_bytes);
1127	device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n", __func__,
1128		rstat->total_pkts);
1129	device_printf(dev, "%s: lro_pkt_count\t\t%" PRIu64 "\n", __func__,
1130		rstat->lro_pkt_count);
1131	device_printf(dev, "%s: sw_pkt_count\t\t%" PRIu64 "\n", __func__,
1132		rstat->sw_pkt_count);
1133	device_printf(dev, "%s: ip_chksum_err\t\t%" PRIu64 "\n", __func__,
1134		rstat->ip_chksum_err);
1135	device_printf(dev, "%s: pkts_wo_acntxts\t\t%" PRIu64 "\n", __func__,
1136		rstat->pkts_wo_acntxts);
1137	device_printf(dev, "%s: pkts_dropped_no_sds_card\t%" PRIu64 "\n",
1138		__func__, rstat->pkts_dropped_no_sds_card);
1139	device_printf(dev, "%s: pkts_dropped_no_sds_host\t%" PRIu64 "\n",
1140		__func__, rstat->pkts_dropped_no_sds_host);
1141	device_printf(dev, "%s: oversized_pkts\t\t%" PRIu64 "\n", __func__,
1142		rstat->oversized_pkts);
1143	device_printf(dev, "%s: pkts_dropped_no_rds\t\t%" PRIu64 "\n",
1144		__func__, rstat->pkts_dropped_no_rds);
1145	device_printf(dev, "%s: unxpctd_mcast_pkts\t\t%" PRIu64 "\n",
1146		__func__, rstat->unxpctd_mcast_pkts);
1147	device_printf(dev, "%s: re1_fbq_error\t\t%" PRIu64 "\n", __func__,
1148		rstat->re1_fbq_error);
1149	device_printf(dev, "%s: invalid_mac_addr\t\t%" PRIu64 "\n", __func__,
1150		rstat->invalid_mac_addr);
1151	device_printf(dev, "%s: rds_prime_trys\t\t%" PRIu64 "\n", __func__,
1152		rstat->rds_prime_trys);
1153	device_printf(dev, "%s: rds_prime_success\t\t%" PRIu64 "\n", __func__,
1154		rstat->rds_prime_success);
1155	device_printf(dev, "%s: lro_flows_added\t\t%" PRIu64 "\n", __func__,
1156		rstat->lro_flows_added);
1157	device_printf(dev, "%s: lro_flows_deleted\t\t%" PRIu64 "\n", __func__,
1158		rstat->lro_flows_deleted);
1159	device_printf(dev, "%s: lro_flows_active\t\t%" PRIu64 "\n", __func__,
1160		rstat->lro_flows_active);
1161	device_printf(dev, "%s: pkts_droped_unknown\t\t%" PRIu64 "\n",
1162		__func__, rstat->pkts_droped_unknown);
1163}
1164
1165static void
1166qla_mac_stats(qla_host_t *ha, q80_mac_stats_t *mstat)
1167{
1168	device_t dev = ha->pci_dev;
1169
1170	device_printf(dev, "%s: xmt_frames\t\t\t%" PRIu64 "\n", __func__,
1171		mstat->xmt_frames);
1172	device_printf(dev, "%s: xmt_bytes\t\t\t%" PRIu64 "\n", __func__,
1173		mstat->xmt_bytes);
1174	device_printf(dev, "%s: xmt_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1175		mstat->xmt_mcast_pkts);
1176	device_printf(dev, "%s: xmt_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1177		mstat->xmt_bcast_pkts);
1178	device_printf(dev, "%s: xmt_pause_frames\t\t%" PRIu64 "\n", __func__,
1179		mstat->xmt_pause_frames);
1180	device_printf(dev, "%s: xmt_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1181		mstat->xmt_cntrl_pkts);
1182	device_printf(dev, "%s: xmt_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1183		__func__, mstat->xmt_pkt_lt_64bytes);
1184	device_printf(dev, "%s: xmt_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1185		__func__, mstat->xmt_pkt_lt_127bytes);
1186	device_printf(dev, "%s: xmt_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1187		__func__, mstat->xmt_pkt_lt_255bytes);
1188	device_printf(dev, "%s: xmt_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1189		__func__, mstat->xmt_pkt_lt_511bytes);
1190	device_printf(dev, "%s: xmt_pkt_lt_1023bytes\t%" PRIu64 "\n",
1191		__func__, mstat->xmt_pkt_lt_1023bytes);
1192	device_printf(dev, "%s: xmt_pkt_lt_1518bytes\t%" PRIu64 "\n",
1193		__func__, mstat->xmt_pkt_lt_1518bytes);
1194	device_printf(dev, "%s: xmt_pkt_gt_1518bytes\t%" PRIu64 "\n",
1195		__func__, mstat->xmt_pkt_gt_1518bytes);
1196
1197	device_printf(dev, "%s: rcv_frames\t\t\t%" PRIu64 "\n", __func__,
1198		mstat->rcv_frames);
1199	device_printf(dev, "%s: rcv_bytes\t\t\t%" PRIu64 "\n", __func__,
1200		mstat->rcv_bytes);
1201	device_printf(dev, "%s: rcv_mcast_pkts\t\t%" PRIu64 "\n", __func__,
1202		mstat->rcv_mcast_pkts);
1203	device_printf(dev, "%s: rcv_bcast_pkts\t\t%" PRIu64 "\n", __func__,
1204		mstat->rcv_bcast_pkts);
1205	device_printf(dev, "%s: rcv_pause_frames\t\t%" PRIu64 "\n", __func__,
1206		mstat->rcv_pause_frames);
1207	device_printf(dev, "%s: rcv_cntrl_pkts\t\t%" PRIu64 "\n", __func__,
1208		mstat->rcv_cntrl_pkts);
1209	device_printf(dev, "%s: rcv_pkt_lt_64bytes\t\t%" PRIu64 "\n",
1210		__func__, mstat->rcv_pkt_lt_64bytes);
1211	device_printf(dev, "%s: rcv_pkt_lt_127bytes\t\t%" PRIu64 "\n",
1212		__func__, mstat->rcv_pkt_lt_127bytes);
1213	device_printf(dev, "%s: rcv_pkt_lt_255bytes\t\t%" PRIu64 "\n",
1214		__func__, mstat->rcv_pkt_lt_255bytes);
1215	device_printf(dev, "%s: rcv_pkt_lt_511bytes\t\t%" PRIu64 "\n",
1216		__func__, mstat->rcv_pkt_lt_511bytes);
1217	device_printf(dev, "%s: rcv_pkt_lt_1023bytes\t%" PRIu64 "\n",
1218		__func__, mstat->rcv_pkt_lt_1023bytes);
1219	device_printf(dev, "%s: rcv_pkt_lt_1518bytes\t%" PRIu64 "\n",
1220		__func__, mstat->rcv_pkt_lt_1518bytes);
1221	device_printf(dev, "%s: rcv_pkt_gt_1518bytes\t%" PRIu64 "\n",
1222		__func__, mstat->rcv_pkt_gt_1518bytes);
1223
1224	device_printf(dev, "%s: rcv_len_error\t\t%" PRIu64 "\n", __func__,
1225		mstat->rcv_len_error);
1226	device_printf(dev, "%s: rcv_len_small\t\t%" PRIu64 "\n", __func__,
1227		mstat->rcv_len_small);
1228	device_printf(dev, "%s: rcv_len_large\t\t%" PRIu64 "\n", __func__,
1229		mstat->rcv_len_large);
1230	device_printf(dev, "%s: rcv_jabber\t\t\t%" PRIu64 "\n", __func__,
1231		mstat->rcv_jabber);
1232	device_printf(dev, "%s: rcv_dropped\t\t\t%" PRIu64 "\n", __func__,
1233		mstat->rcv_dropped);
1234	device_printf(dev, "%s: fcs_error\t\t\t%" PRIu64 "\n", __func__,
1235		mstat->fcs_error);
1236	device_printf(dev, "%s: align_error\t\t\t%" PRIu64 "\n", __func__,
1237		mstat->align_error);
1238}
1239
1240
1241static int
1242qla_get_hw_stats(qla_host_t *ha, uint32_t cmd)
1243{
1244	device_t		dev;
1245	q80_get_stats_t		*stat;
1246	q80_get_stats_rsp_t	*stat_rsp;
1247	uint32_t		err;
1248
1249	dev = ha->pci_dev;
1250
1251	stat = (q80_get_stats_t *)ha->hw.mbox;
1252	bzero(stat, (sizeof (q80_get_stats_t)));
1253
1254	stat->opcode = Q8_MBX_GET_STATS;
1255	stat->count_version = 2;
1256	stat->count_version |= Q8_MBX_CMD_VERSION;
1257
1258	stat->cmd = cmd;
1259
1260        if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
1261                ha->hw.mbox, (sizeof (q80_get_stats_rsp_t) >> 2), 0)) {
1262                device_printf(dev, "%s: failed\n", __func__);
1263                return -1;
1264        }
1265
1266	stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1267
1268        err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
1269
1270        if (err) {
1271                return -1;
1272        }
1273
1274	return 0;
1275}
1276
1277void
1278ql_get_stats(qla_host_t *ha)
1279{
1280	q80_get_stats_rsp_t	*stat_rsp;
1281	q80_mac_stats_t		*mstat;
1282	q80_xmt_stats_t		*xstat;
1283	q80_rcv_stats_t		*rstat;
1284	uint32_t		cmd;
1285
1286	stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1287	/*
1288	 * Get MAC Statistics
1289	 */
1290	cmd = Q8_GET_STATS_CMD_TYPE_MAC;
1291
1292	cmd |= ((ha->pci_func & 0x1) << 16);
1293
1294	if (qla_get_hw_stats(ha, cmd) == 0) {
1295		mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
1296		qla_mac_stats(ha, mstat);
1297	} else {
1298                device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
1299			__func__, ha->hw.mbox[0]);
1300	}
1301	/*
1302	 * Get RCV Statistics
1303	 */
1304	cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
1305	cmd |= (ha->hw.rcv_cntxt_id << 16);
1306
1307	if (qla_get_hw_stats(ha, cmd) == 0) {
1308		rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
1309		qla_rcv_stats(ha, rstat);
1310	} else {
1311                device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
1312			__func__, ha->hw.mbox[0]);
1313	}
1314	/*
1315	 * Get XMT Statistics
1316	 */
1317	cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
1318	cmd |= (ha->hw.tx_cntxt[ha->txr_idx].tx_cntxt_id << 16);
1319
1320
1321	if (qla_get_hw_stats(ha, cmd) == 0) {
1322		xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
1323		qla_xmt_stats(ha, xstat);
1324	} else {
1325                device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
1326			__func__, ha->hw.mbox[0]);
1327	}
1328}
1329
1330/*
1331 * Name: qla_tx_tso
1332 * Function: Checks if the packet to be transmitted is a candidate for
1333 *	Large TCP Segment Offload. If yes, the appropriate fields in the Tx
1334 *	Ring Structure are plugged in.
1335 */
1336static int
1337qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
1338{
1339	struct ether_vlan_header *eh;
1340	struct ip *ip = NULL;
1341	struct ip6_hdr *ip6 = NULL;
1342	struct tcphdr *th = NULL;
1343	uint32_t ehdrlen,  hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
1344	uint16_t etype, opcode, offload = 1;
1345	device_t dev;
1346
1347	dev = ha->pci_dev;
1348
1349
1350	eh = mtod(mp, struct ether_vlan_header *);
1351
1352	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1353		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1354		etype = ntohs(eh->evl_proto);
1355	} else {
1356		ehdrlen = ETHER_HDR_LEN;
1357		etype = ntohs(eh->evl_encap_proto);
1358	}
1359
1360	hdrlen = 0;
1361
1362	switch (etype) {
1363		case ETHERTYPE_IP:
1364
1365			tcp_opt_off = ehdrlen + sizeof(struct ip) +
1366					sizeof(struct tcphdr);
1367
1368			if (mp->m_len < tcp_opt_off) {
1369				m_copydata(mp, 0, tcp_opt_off, hdr);
1370				ip = (struct ip *)(hdr + ehdrlen);
1371			} else {
1372				ip = (struct ip *)(mp->m_data + ehdrlen);
1373			}
1374
1375			ip_hlen = ip->ip_hl << 2;
1376			opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
1377
1378
1379			if ((ip->ip_p != IPPROTO_TCP) ||
1380				(ip_hlen != sizeof (struct ip))){
1381				/* IP Options are not supported */
1382
1383				offload = 0;
1384			} else
1385				th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
1386
1387		break;
1388
1389		case ETHERTYPE_IPV6:
1390
1391			tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
1392					sizeof (struct tcphdr);
1393
1394			if (mp->m_len < tcp_opt_off) {
1395				m_copydata(mp, 0, tcp_opt_off, hdr);
1396				ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
1397			} else {
1398				ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1399			}
1400
1401			ip_hlen = sizeof(struct ip6_hdr);
1402			opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
1403
1404			if (ip6->ip6_nxt != IPPROTO_TCP) {
1405				//device_printf(dev, "%s: ipv6\n", __func__);
1406				offload = 0;
1407			} else
1408				th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
1409		break;
1410
1411		default:
1412			QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
1413			offload = 0;
1414		break;
1415	}
1416
1417	if (!offload)
1418		return (-1);
1419
1420	tcp_hlen = th->th_off << 2;
1421	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
1422
1423        if (mp->m_len < hdrlen) {
1424                if (mp->m_len < tcp_opt_off) {
1425                        if (tcp_hlen > sizeof(struct tcphdr)) {
1426                                m_copydata(mp, tcp_opt_off,
1427                                        (tcp_hlen - sizeof(struct tcphdr)),
1428                                        &hdr[tcp_opt_off]);
1429                        }
1430                } else {
1431                        m_copydata(mp, 0, hdrlen, hdr);
1432                }
1433        }
1434
1435	tx_cmd->mss = mp->m_pkthdr.tso_segsz;
1436
1437	tx_cmd->flags_opcode = opcode ;
1438	tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
1439	tx_cmd->total_hdr_len = hdrlen;
1440
1441	/* Check for Multicast least significant bit of MSB == 1 */
1442	if (eh->evl_dhost[0] & 0x01) {
1443		tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
1444	}
1445
1446	if (mp->m_len < hdrlen) {
1447		printf("%d\n", hdrlen);
1448		return (1);
1449	}
1450
1451	return (0);
1452}
1453
1454/*
1455 * Name: qla_tx_chksum
1456 * Function: Checks if the packet to be transmitted is a candidate for
1457 *	TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
1458 *	Ring Structure are plugged in.
1459 */
1460static int
1461qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
1462	uint32_t *tcp_hdr_off)
1463{
1464	struct ether_vlan_header *eh;
1465	struct ip *ip;
1466	struct ip6_hdr *ip6;
1467	uint32_t ehdrlen, ip_hlen;
1468	uint16_t etype, opcode, offload = 1;
1469	device_t dev;
1470	uint8_t buf[sizeof(struct ip6_hdr)];
1471
1472	dev = ha->pci_dev;
1473
1474	*op_code = 0;
1475
1476	if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0)
1477		return (-1);
1478
1479	eh = mtod(mp, struct ether_vlan_header *);
1480
1481	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1482		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1483		etype = ntohs(eh->evl_proto);
1484	} else {
1485		ehdrlen = ETHER_HDR_LEN;
1486		etype = ntohs(eh->evl_encap_proto);
1487	}
1488
1489
1490	switch (etype) {
1491		case ETHERTYPE_IP:
1492			ip = (struct ip *)(mp->m_data + ehdrlen);
1493
1494			ip_hlen = sizeof (struct ip);
1495
1496			if (mp->m_len < (ehdrlen + ip_hlen)) {
1497				m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
1498				ip = (struct ip *)buf;
1499			}
1500
1501			if (ip->ip_p == IPPROTO_TCP)
1502				opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
1503			else if (ip->ip_p == IPPROTO_UDP)
1504				opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
1505			else {
1506				//device_printf(dev, "%s: ipv4\n", __func__);
1507				offload = 0;
1508			}
1509		break;
1510
1511		case ETHERTYPE_IPV6:
1512			ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1513
1514			ip_hlen = sizeof(struct ip6_hdr);
1515
1516			if (mp->m_len < (ehdrlen + ip_hlen)) {
1517				m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
1518					buf);
1519				ip6 = (struct ip6_hdr *)buf;
1520			}
1521
1522			if (ip6->ip6_nxt == IPPROTO_TCP)
1523				opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
1524			else if (ip6->ip6_nxt == IPPROTO_UDP)
1525				opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
1526			else {
1527				//device_printf(dev, "%s: ipv6\n", __func__);
1528				offload = 0;
1529			}
1530		break;
1531
1532		default:
1533			offload = 0;
1534		break;
1535	}
1536	if (!offload)
1537		return (-1);
1538
1539	*op_code = opcode;
1540	*tcp_hdr_off = (ip_hlen + ehdrlen);
1541
1542	return (0);
1543}
1544
1545#define QLA_TX_MIN_FREE 2
1546/*
1547 * Name: ql_hw_send
1548 * Function: Transmits a packet. It first checks if the packet is a
1549 *	candidate for Large TCP Segment Offload and then for UDP/TCP checksum
1550 *	offload. If either of these creteria are not met, it is transmitted
1551 *	as a regular ethernet frame.
1552 */
1553int
1554ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
1555	uint32_t tx_idx,  struct mbuf *mp, uint32_t txr_idx)
1556{
1557	struct ether_vlan_header *eh;
1558	qla_hw_t *hw = &ha->hw;
1559	q80_tx_cmd_t *tx_cmd, tso_cmd;
1560	bus_dma_segment_t *c_seg;
1561	uint32_t num_tx_cmds, hdr_len = 0;
1562	uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
1563	device_t dev;
1564	int i, ret;
1565	uint8_t *src = NULL, *dst = NULL;
1566	uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
1567	uint32_t op_code = 0;
1568	uint32_t tcp_hdr_off = 0;
1569
1570	dev = ha->pci_dev;
1571
1572	/*
1573	 * Always make sure there is atleast one empty slot in the tx_ring
1574	 * tx_ring is considered full when there only one entry available
1575	 */
1576        num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
1577
1578	total_length = mp->m_pkthdr.len;
1579	if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
1580		device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
1581			__func__, total_length);
1582		return (-1);
1583	}
1584	eh = mtod(mp, struct ether_vlan_header *);
1585
1586	if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
1587
1588		bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
1589
1590		src = frame_hdr;
1591		ret = qla_tx_tso(ha, mp, &tso_cmd, src);
1592
1593		if (!(ret & ~1)) {
1594			/* find the additional tx_cmd descriptors required */
1595
1596			if (mp->m_flags & M_VLANTAG)
1597				tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
1598
1599			hdr_len = tso_cmd.total_hdr_len;
1600
1601			bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
1602			bytes = QL_MIN(bytes, hdr_len);
1603
1604			num_tx_cmds++;
1605			hdr_len -= bytes;
1606
1607			while (hdr_len) {
1608				bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
1609				hdr_len -= bytes;
1610				num_tx_cmds++;
1611			}
1612			hdr_len = tso_cmd.total_hdr_len;
1613
1614			if (ret == 0)
1615				src = (uint8_t *)eh;
1616		} else
1617			return (EINVAL);
1618	} else {
1619		(void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
1620	}
1621
1622	if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
1623		qla_hw_tx_done_locked(ha, txr_idx);
1624		if (hw->tx_cntxt[txr_idx].txr_free <=
1625				(num_tx_cmds + QLA_TX_MIN_FREE)) {
1626        		QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
1627				"(num_tx_cmds + QLA_TX_MIN_FREE))\n",
1628				__func__));
1629			return (-1);
1630		}
1631	}
1632
1633	tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
1634
1635        if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
1636
1637                if (nsegs > ha->hw.max_tx_segs)
1638                        ha->hw.max_tx_segs = nsegs;
1639
1640                bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1641
1642                if (op_code) {
1643                        tx_cmd->flags_opcode = op_code;
1644                        tx_cmd->tcp_hdr_off = tcp_hdr_off;
1645
1646                } else {
1647                        tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
1648                }
1649	} else {
1650		bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
1651		ha->tx_tso_frames++;
1652	}
1653
1654	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1655        	tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
1656	} else if (mp->m_flags & M_VLANTAG) {
1657
1658		if (hdr_len) { /* TSO */
1659			tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
1660						Q8_TX_CMD_FLAGS_HW_VLAN_ID);
1661			tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
1662		} else
1663			tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
1664
1665		ha->hw_vlan_tx_frames++;
1666		tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
1667	}
1668
1669
1670        tx_cmd->n_bufs = (uint8_t)nsegs;
1671        tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
1672        tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
1673	tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
1674
1675	c_seg = segs;
1676
1677	while (1) {
1678		for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
1679
1680			switch (i) {
1681			case 0:
1682				tx_cmd->buf1_addr = c_seg->ds_addr;
1683				tx_cmd->buf1_len = c_seg->ds_len;
1684				break;
1685
1686			case 1:
1687				tx_cmd->buf2_addr = c_seg->ds_addr;
1688				tx_cmd->buf2_len = c_seg->ds_len;
1689				break;
1690
1691			case 2:
1692				tx_cmd->buf3_addr = c_seg->ds_addr;
1693				tx_cmd->buf3_len = c_seg->ds_len;
1694				break;
1695
1696			case 3:
1697				tx_cmd->buf4_addr = c_seg->ds_addr;
1698				tx_cmd->buf4_len = c_seg->ds_len;
1699				break;
1700			}
1701
1702			c_seg++;
1703			nsegs--;
1704		}
1705
1706		txr_next = hw->tx_cntxt[txr_idx].txr_next =
1707			(hw->tx_cntxt[txr_idx].txr_next + 1) &
1708				(NUM_TX_DESCRIPTORS - 1);
1709		tx_cmd_count++;
1710
1711		if (!nsegs)
1712			break;
1713
1714		tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
1715		bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1716	}
1717
1718	if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
1719
1720		/* TSO : Copy the header in the following tx cmd descriptors */
1721
1722		txr_next = hw->tx_cntxt[txr_idx].txr_next;
1723
1724		tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
1725		bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1726
1727		bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
1728		bytes = QL_MIN(bytes, hdr_len);
1729
1730		dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
1731
1732		if (mp->m_flags & M_VLANTAG) {
1733			/* first copy the src/dst MAC addresses */
1734			bcopy(src, dst, (ETHER_ADDR_LEN * 2));
1735			dst += (ETHER_ADDR_LEN * 2);
1736			src += (ETHER_ADDR_LEN * 2);
1737
1738			*((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
1739			dst += 2;
1740			*((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
1741			dst += 2;
1742
1743			/* bytes left in src header */
1744			hdr_len -= ((ETHER_ADDR_LEN * 2) +
1745					ETHER_VLAN_ENCAP_LEN);
1746
1747			/* bytes left in TxCmd Entry */
1748			bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
1749
1750
1751			bcopy(src, dst, bytes);
1752			src += bytes;
1753			hdr_len -= bytes;
1754		} else {
1755			bcopy(src, dst, bytes);
1756			src += bytes;
1757			hdr_len -= bytes;
1758		}
1759
1760		txr_next = hw->tx_cntxt[txr_idx].txr_next =
1761				(hw->tx_cntxt[txr_idx].txr_next + 1) &
1762					(NUM_TX_DESCRIPTORS - 1);
1763		tx_cmd_count++;
1764
1765		while (hdr_len) {
1766			tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
1767			bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
1768
1769			bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
1770
1771			bcopy(src, tx_cmd, bytes);
1772			src += bytes;
1773			hdr_len -= bytes;
1774
1775			txr_next = hw->tx_cntxt[txr_idx].txr_next =
1776				(hw->tx_cntxt[txr_idx].txr_next + 1) &
1777					(NUM_TX_DESCRIPTORS - 1);
1778			tx_cmd_count++;
1779		}
1780	}
1781
1782	hw->tx_cntxt[txr_idx].txr_free =
1783		hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
1784
1785	QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
1786		txr_idx);
1787       	QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
1788
1789	return (0);
1790}
1791
1792
1793static int
1794qla_config_rss_ind_table(qla_host_t *ha)
1795{
1796	uint32_t i, count;
1797	uint8_t rss_ind_tbl[16];
1798
1799	bzero(rss_ind_default_table, sizeof(rss_ind_default_table));
1800
1801
1802	for (i = 0; i < 16; i++) {
1803		rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
1804	}
1805
1806	for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ; i = i + 16) {
1807
1808		if ((i + 16) > Q8_RSS_IND_TBL_MAX_IDX) {
1809			count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
1810		} else {
1811			count = 16;
1812		}
1813
1814		if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
1815			rss_ind_tbl))
1816			return (-1);
1817	}
1818
1819	return (0);
1820}
1821
1822/*
1823 * Name: ql_del_hw_if
1824 * Function: Destroys the hardware specific entities corresponding to an
1825 *	Ethernet Interface
1826 */
1827void
1828ql_del_hw_if(qla_host_t *ha)
1829{
1830
1831	qla_del_rcv_cntxt(ha);
1832	qla_del_xmt_cntxt(ha);
1833
1834	if (ha->hw.flags.init_intr_cnxt) {
1835		qla_config_intr_cntxt(ha, ha->hw.num_sds_rings, 0);
1836		ha->hw.flags.init_intr_cnxt = 0;
1837	}
1838}
1839
1840/*
1841 * Name: ql_init_hw_if
1842 * Function: Creates the hardware specific entities corresponding to an
1843 *	Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
1844 *	corresponding to the interface. Enables LRO if allowed.
1845 */
1846int
1847ql_init_hw_if(qla_host_t *ha)
1848{
1849	device_t	dev;
1850	uint32_t	i;
1851	uint8_t		bcast_mac[6];
1852	qla_rdesc_t	*rdesc;
1853
1854	dev = ha->pci_dev;
1855
1856	for (i = 0; i < ha->hw.num_sds_rings; i++) {
1857		bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
1858			ha->hw.dma_buf.sds_ring[i].size);
1859	}
1860	ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
1861
1862	/* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
1863	WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
1864	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
1865
1866	qla_get_nic_partition(ha);
1867
1868	if (qla_config_intr_cntxt(ha, ha->hw.num_sds_rings, 1) == 0) {
1869		ha->hw.flags.init_intr_cnxt = 1;
1870	} else
1871		return (-1);
1872
1873	if (ha->hw.mdump_init == 0) {
1874		qla_minidump_init(ha);
1875	}
1876
1877	/*
1878	 * Create Receive Context
1879	 */
1880	if (qla_init_rcv_cntxt(ha)) {
1881		return (-1);
1882	}
1883
1884	for (i = 0; i < ha->hw.num_rds_rings; i++) {
1885		rdesc = &ha->hw.rds[i];
1886		rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
1887		rdesc->rx_in = 0;
1888		/* Update the RDS Producer Indices */
1889		QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
1890			rdesc->rx_next);
1891	}
1892
1893
1894	/*
1895	 * Create Transmit Context
1896	 */
1897	if (qla_init_xmt_cntxt(ha)) {
1898		qla_del_rcv_cntxt(ha);
1899		return (-1);
1900	}
1901	ha->hw.max_tx_segs = 0;
1902
1903	if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1))
1904		return(-1);
1905
1906	ha->hw.flags.unicast_mac = 1;
1907
1908	bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
1909	bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
1910
1911	if (qla_config_mac_addr(ha, bcast_mac, 1))
1912		return (-1);
1913
1914	ha->hw.flags.bcast_mac = 1;
1915
1916	/*
1917	 * program any cached multicast addresses
1918	 */
1919	if (qla_hw_add_all_mcast(ha))
1920		return (-1);
1921
1922	if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
1923		return (-1);
1924
1925	if (qla_config_rss_ind_table(ha))
1926		return (-1);
1927
1928	if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0))
1929		return (-1);
1930
1931	if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
1932		return (-1);
1933
1934	if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
1935		return (-1);
1936
1937	for (i = 0; i < ha->hw.num_sds_rings; i++)
1938		QL_ENABLE_INTERRUPTS(ha, i);
1939
1940	return (0);
1941}
1942
1943static int
1944qla_map_sds_to_rds(qla_host_t *ha)
1945{
1946        device_t                dev = ha->pci_dev;
1947        q80_rq_map_sds_to_rds_t *map_rings;
1948        q80_rsp_add_rcv_rings_t *map_rings_rsp;
1949        uint32_t                i, err;
1950        qla_hw_t                *hw = &ha->hw;
1951
1952        map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
1953        bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
1954
1955        map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
1956        map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
1957        map_rings->count_version |= Q8_MBX_CMD_VERSION;
1958
1959        map_rings->cntxt_id = hw->rcv_cntxt_id;
1960        map_rings->num_rings = hw->num_sds_rings;
1961
1962        for (i = 0; i < hw->num_sds_rings; i++) {
1963                map_rings->sds_rds[i].sds_ring = i;
1964                map_rings->sds_rds[i].rds_ring = i;
1965        }
1966
1967        if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
1968                (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
1969                ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
1970                device_printf(dev, "%s: failed0\n", __func__);
1971                return (-1);
1972        }
1973
1974        map_rings_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
1975
1976        err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
1977
1978        if (err) {
1979                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1980                return (-1);
1981        }
1982
1983        return (0);
1984}
1985
1986/*
1987 * Name: qla_init_rcv_cntxt
1988 * Function: Creates the Receive Context.
1989 */
1990static int
1991qla_init_rcv_cntxt(qla_host_t *ha)
1992{
1993	q80_rq_rcv_cntxt_t	*rcntxt;
1994	q80_rsp_rcv_cntxt_t	*rcntxt_rsp;
1995	q80_stat_desc_t		*sdesc;
1996	int			i, j;
1997        qla_hw_t		*hw = &ha->hw;
1998	device_t		dev;
1999	uint32_t		err;
2000	uint32_t		rcntxt_sds_rings;
2001	uint32_t		rcntxt_rds_rings;
2002
2003	dev = ha->pci_dev;
2004
2005	/*
2006	 * Create Receive Context
2007	 */
2008
2009	for (i = 0; i < hw->num_sds_rings; i++) {
2010		sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
2011
2012		for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
2013			sdesc->data[0] = 1ULL;
2014			sdesc->data[1] = 1ULL;
2015		}
2016	}
2017
2018	rcntxt_sds_rings = hw->num_sds_rings;
2019	if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
2020		rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
2021
2022	rcntxt_rds_rings = hw->num_rds_rings;
2023
2024	if (hw->num_rds_rings > MAX_RDS_RING_SETS)
2025		rcntxt_rds_rings = MAX_RDS_RING_SETS;
2026
2027	rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
2028	bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
2029
2030	rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
2031	rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
2032	rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2033
2034	rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
2035			Q8_RCV_CNTXT_CAP0_LRO |
2036			Q8_RCV_CNTXT_CAP0_HW_LRO |
2037			Q8_RCV_CNTXT_CAP0_RSS |
2038			Q8_RCV_CNTXT_CAP0_SGL_JUMBO |
2039			Q8_RCV_CNTXT_CAP0_SGL_LRO;
2040
2041	if (ha->hw.num_rds_rings > 1) {
2042		rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
2043		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
2044	} else
2045		rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
2046
2047	rcntxt->nsds_rings = rcntxt_sds_rings;
2048
2049	rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
2050
2051	rcntxt->rcv_vpid = 0;
2052
2053	for (i = 0; i <  rcntxt_sds_rings; i++) {
2054		rcntxt->sds[i].paddr =
2055			qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
2056		rcntxt->sds[i].size =
2057			qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2058		if (ha->msix_count == 2) {
2059			rcntxt->sds[i].intr_id =
2060				qla_host_to_le16(hw->intr_id[0]);
2061			rcntxt->sds[i].intr_src_bit = qla_host_to_le16((i));
2062		} else {
2063			rcntxt->sds[i].intr_id =
2064				qla_host_to_le16(hw->intr_id[i]);
2065			rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
2066		}
2067	}
2068
2069	for (i = 0; i <  rcntxt_rds_rings; i++) {
2070		rcntxt->rds[i].paddr_std =
2071			qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
2072		rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2073		rcntxt->rds[i].std_nentries =
2074			qla_host_to_le32(NUM_RX_DESCRIPTORS);
2075	}
2076
2077        if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2078		(sizeof (q80_rq_rcv_cntxt_t) >> 2),
2079                ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
2080                device_printf(dev, "%s: failed0\n", __func__);
2081                return (-1);
2082        }
2083
2084        rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
2085
2086        err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2087
2088        if (err) {
2089                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2090                return (-1);
2091        }
2092
2093	for (i = 0; i <  rcntxt_sds_rings; i++) {
2094		hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
2095	}
2096
2097	for (i = 0; i <  rcntxt_rds_rings; i++) {
2098		hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
2099	}
2100
2101	hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
2102
2103	ha->hw.flags.init_rx_cnxt = 1;
2104
2105	if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
2106		err = qla_add_rcv_rings(ha, MAX_RCNTXT_SDS_RINGS);
2107		if (err)
2108			return -1;
2109	}
2110
2111        if (hw->num_rds_rings > 1) {
2112		err = qla_map_sds_to_rds(ha);
2113		if (err)
2114			return -1;
2115	}
2116
2117	return (0);
2118}
2119
2120static int
2121qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx)
2122{
2123	device_t		dev = ha->pci_dev;
2124	q80_rq_add_rcv_rings_t	*add_rcv;
2125	q80_rsp_add_rcv_rings_t	*add_rcv_rsp;
2126	uint32_t		i,j, err;
2127	uint8_t			nsds;
2128        qla_hw_t		*hw = &ha->hw;
2129
2130	nsds = hw->num_sds_rings - MAX_RCNTXT_SDS_RINGS;
2131
2132	add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
2133	bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
2134
2135	add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
2136	add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
2137	add_rcv->count_version |= Q8_MBX_CMD_VERSION;
2138
2139        if (hw->num_rds_rings > 1)
2140                add_rcv->nrds_sets_rings = nsds | (1 << 5);
2141        else
2142                add_rcv->nrds_sets_rings = 0;
2143
2144	add_rcv->nsds_rings = nsds;
2145	add_rcv->cntxt_id = hw->rcv_cntxt_id;
2146
2147        for (i = 0; i <  nsds; i++) {
2148
2149		j = i + sds_idx;
2150
2151                add_rcv->sds[i].paddr =
2152                        qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
2153
2154                add_rcv->sds[i].size =
2155                        qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2156
2157                if (ha->msix_count == 2) {
2158                        add_rcv->sds[i].intr_id =
2159                                qla_host_to_le16(hw->intr_id[0]);
2160                        add_rcv->sds[i].intr_src_bit = qla_host_to_le16(j);
2161                } else {
2162                        add_rcv->sds[i].intr_id =
2163                                qla_host_to_le16(hw->intr_id[j]);
2164                        add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
2165                }
2166
2167        }
2168        for (i = 0; ((i <  nsds) && (hw->num_rds_rings > 1)); i++) {
2169                j = i + sds_idx;
2170                add_rcv->rds[i].paddr_std =
2171                        qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
2172                add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2173                add_rcv->rds[i].std_nentries =
2174                        qla_host_to_le32(NUM_RX_DESCRIPTORS);
2175        }
2176
2177
2178        if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
2179		(sizeof (q80_rq_add_rcv_rings_t) >> 2),
2180                ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2181                device_printf(dev, "%s: failed0\n", __func__);
2182                return (-1);
2183        }
2184
2185        add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
2186
2187        err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
2188
2189        if (err) {
2190                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2191                return (-1);
2192        }
2193
2194	for (i = sds_idx; i < hw->num_sds_rings; i++) {
2195		hw->sds[i].sds_consumer = add_rcv_rsp->sds_cons[(i - sds_idx)];
2196	}
2197	for (i = sds_idx; i < hw->num_rds_rings; i++) {
2198		hw->rds[i].prod_std = add_rcv_rsp->rds[(i - sds_idx)].prod_std;
2199	}
2200	return (0);
2201}
2202
2203/*
2204 * Name: qla_del_rcv_cntxt
2205 * Function: Destroys the Receive Context.
2206 */
2207static void
2208qla_del_rcv_cntxt(qla_host_t *ha)
2209{
2210	device_t			dev = ha->pci_dev;
2211	q80_rcv_cntxt_destroy_t		*rcntxt;
2212	q80_rcv_cntxt_destroy_rsp_t	*rcntxt_rsp;
2213	uint32_t			err;
2214	uint8_t				bcast_mac[6];
2215
2216	if (!ha->hw.flags.init_rx_cnxt)
2217		return;
2218
2219	if (qla_hw_del_all_mcast(ha))
2220		return;
2221
2222	if (ha->hw.flags.bcast_mac) {
2223
2224		bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2225		bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2226
2227		if (qla_config_mac_addr(ha, bcast_mac, 0))
2228			return;
2229		ha->hw.flags.bcast_mac = 0;
2230
2231	}
2232
2233	if (ha->hw.flags.unicast_mac) {
2234		if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0))
2235			return;
2236		ha->hw.flags.unicast_mac = 0;
2237	}
2238
2239	rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
2240	bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
2241
2242	rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
2243	rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
2244	rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2245
2246	rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
2247
2248        if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2249		(sizeof (q80_rcv_cntxt_destroy_t) >> 2),
2250                ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
2251                device_printf(dev, "%s: failed0\n", __func__);
2252                return;
2253        }
2254        rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
2255
2256        err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2257
2258        if (err) {
2259                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2260        }
2261
2262	ha->hw.flags.init_rx_cnxt = 0;
2263	return;
2264}
2265
2266/*
2267 * Name: qla_init_xmt_cntxt
2268 * Function: Creates the Transmit Context.
2269 */
2270static int
2271qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2272{
2273	device_t		dev;
2274        qla_hw_t		*hw = &ha->hw;
2275	q80_rq_tx_cntxt_t	*tcntxt;
2276	q80_rsp_tx_cntxt_t	*tcntxt_rsp;
2277	uint32_t		err;
2278	qla_hw_tx_cntxt_t       *hw_tx_cntxt;
2279
2280	hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
2281
2282	dev = ha->pci_dev;
2283
2284	/*
2285	 * Create Transmit Context
2286	 */
2287	tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
2288	bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
2289
2290	tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
2291	tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
2292	tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2293
2294	tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
2295
2296	tcntxt->ntx_rings = 1;
2297
2298	tcntxt->tx_ring[0].paddr =
2299		qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
2300	tcntxt->tx_ring[0].tx_consumer =
2301		qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
2302	tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
2303
2304	tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[0]);
2305	tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
2306
2307
2308	hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
2309	hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
2310
2311        if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2312		(sizeof (q80_rq_tx_cntxt_t) >> 2),
2313                ha->hw.mbox,
2314		(sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
2315                device_printf(dev, "%s: failed0\n", __func__);
2316                return (-1);
2317        }
2318        tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
2319
2320        err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2321
2322        if (err) {
2323                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2324		return -1;
2325        }
2326
2327	hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
2328	hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
2329
2330	return (0);
2331}
2332
2333
2334/*
2335 * Name: qla_del_xmt_cntxt
2336 * Function: Destroys the Transmit Context.
2337 */
2338static int
2339qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
2340{
2341	device_t			dev = ha->pci_dev;
2342	q80_tx_cntxt_destroy_t		*tcntxt;
2343	q80_tx_cntxt_destroy_rsp_t	*tcntxt_rsp;
2344	uint32_t			err;
2345
2346	tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
2347	bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
2348
2349	tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
2350	tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
2351	tcntxt->count_version |= Q8_MBX_CMD_VERSION;
2352
2353	tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
2354
2355        if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
2356		(sizeof (q80_tx_cntxt_destroy_t) >> 2),
2357                ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
2358                device_printf(dev, "%s: failed0\n", __func__);
2359                return (-1);
2360        }
2361        tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
2362
2363        err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
2364
2365        if (err) {
2366                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2367		return (-1);
2368        }
2369
2370	return (0);
2371}
2372static void
2373qla_del_xmt_cntxt(qla_host_t *ha)
2374{
2375	uint32_t i;
2376
2377	if (!ha->hw.flags.init_tx_cnxt)
2378		return;
2379
2380	for (i = 0; i < ha->hw.num_tx_rings; i++) {
2381		if (qla_del_xmt_cntxt_i(ha, i))
2382			break;
2383	}
2384	ha->hw.flags.init_tx_cnxt = 0;
2385}
2386
2387static int
2388qla_init_xmt_cntxt(qla_host_t *ha)
2389{
2390	uint32_t i, j;
2391
2392	for (i = 0; i < ha->hw.num_tx_rings; i++) {
2393		if (qla_init_xmt_cntxt_i(ha, i) != 0) {
2394			for (j = 0; j < i; j++)
2395				qla_del_xmt_cntxt_i(ha, j);
2396			return (-1);
2397		}
2398	}
2399	ha->hw.flags.init_tx_cnxt = 1;
2400	return (0);
2401}
2402
2403static int
2404qla_hw_add_all_mcast(qla_host_t *ha)
2405{
2406	int i, nmcast;
2407
2408	nmcast = ha->hw.nmcast;
2409
2410	for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
2411		if ((ha->hw.mcast[i].addr[0] != 0) ||
2412			(ha->hw.mcast[i].addr[1] != 0) ||
2413			(ha->hw.mcast[i].addr[2] != 0) ||
2414			(ha->hw.mcast[i].addr[3] != 0) ||
2415			(ha->hw.mcast[i].addr[4] != 0) ||
2416			(ha->hw.mcast[i].addr[5] != 0)) {
2417
2418			if (qla_config_mac_addr(ha, ha->hw.mcast[i].addr, 1)) {
2419                		device_printf(ha->pci_dev, "%s: failed\n",
2420					__func__);
2421				return (-1);
2422			}
2423
2424			nmcast--;
2425		}
2426	}
2427	return 0;
2428}
2429
2430static int
2431qla_hw_del_all_mcast(qla_host_t *ha)
2432{
2433	int i, nmcast;
2434
2435	nmcast = ha->hw.nmcast;
2436
2437	for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
2438		if ((ha->hw.mcast[i].addr[0] != 0) ||
2439			(ha->hw.mcast[i].addr[1] != 0) ||
2440			(ha->hw.mcast[i].addr[2] != 0) ||
2441			(ha->hw.mcast[i].addr[3] != 0) ||
2442			(ha->hw.mcast[i].addr[4] != 0) ||
2443			(ha->hw.mcast[i].addr[5] != 0)) {
2444
2445			if (qla_config_mac_addr(ha, ha->hw.mcast[i].addr, 0))
2446				return (-1);
2447
2448			nmcast--;
2449		}
2450	}
2451	return 0;
2452}
2453
2454static int
2455qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta)
2456{
2457	int i;
2458
2459	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2460
2461		if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
2462			return 0; /* its been already added */
2463	}
2464
2465	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2466
2467		if ((ha->hw.mcast[i].addr[0] == 0) &&
2468			(ha->hw.mcast[i].addr[1] == 0) &&
2469			(ha->hw.mcast[i].addr[2] == 0) &&
2470			(ha->hw.mcast[i].addr[3] == 0) &&
2471			(ha->hw.mcast[i].addr[4] == 0) &&
2472			(ha->hw.mcast[i].addr[5] == 0)) {
2473
2474			if (qla_config_mac_addr(ha, mta, 1))
2475				return (-1);
2476
2477			bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
2478			ha->hw.nmcast++;
2479
2480			return 0;
2481		}
2482	}
2483	return 0;
2484}
2485
2486static int
2487qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta)
2488{
2489	int i;
2490
2491	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
2492		if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
2493
2494			if (qla_config_mac_addr(ha, mta, 0))
2495				return (-1);
2496
2497			ha->hw.mcast[i].addr[0] = 0;
2498			ha->hw.mcast[i].addr[1] = 0;
2499			ha->hw.mcast[i].addr[2] = 0;
2500			ha->hw.mcast[i].addr[3] = 0;
2501			ha->hw.mcast[i].addr[4] = 0;
2502			ha->hw.mcast[i].addr[5] = 0;
2503
2504			ha->hw.nmcast--;
2505
2506			return 0;
2507		}
2508	}
2509	return 0;
2510}
2511
2512/*
2513 * Name: ql_hw_set_multi
2514 * Function: Sets the Multicast Addresses provided the host O.S into the
2515 *	hardware (for the given interface)
2516 */
2517int
2518ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast, uint32_t mcnt,
2519	uint32_t add_mac)
2520{
2521	int i;
2522	uint8_t *mta = mcast;
2523	int ret = 0;
2524
2525	for (i = 0; i < mcnt; i++) {
2526		if (add_mac) {
2527			ret = qla_hw_add_mcast(ha, mta);
2528			if (ret)
2529				break;
2530		} else {
2531			ret = qla_hw_del_mcast(ha, mta);
2532			if (ret)
2533				break;
2534		}
2535
2536		mta += Q8_MAC_ADDR_LEN;
2537	}
2538	return (ret);
2539}
2540
2541/*
2542 * Name: qla_hw_tx_done_locked
2543 * Function: Handle Transmit Completions
2544 */
2545static void
2546qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
2547{
2548	qla_tx_buf_t *txb;
2549        qla_hw_t *hw = &ha->hw;
2550	uint32_t comp_idx, comp_count = 0;
2551	qla_hw_tx_cntxt_t *hw_tx_cntxt;
2552
2553	hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
2554
2555	/* retrieve index of last entry in tx ring completed */
2556	comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
2557
2558	while (comp_idx != hw_tx_cntxt->txr_comp) {
2559
2560		txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
2561
2562		hw_tx_cntxt->txr_comp++;
2563		if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
2564			hw_tx_cntxt->txr_comp = 0;
2565
2566		comp_count++;
2567
2568		if (txb->m_head) {
2569			ha->ifp->if_opackets++;
2570
2571			bus_dmamap_sync(ha->tx_tag, txb->map,
2572				BUS_DMASYNC_POSTWRITE);
2573			bus_dmamap_unload(ha->tx_tag, txb->map);
2574			m_freem(txb->m_head);
2575
2576			txb->m_head = NULL;
2577		}
2578	}
2579
2580	hw_tx_cntxt->txr_free += comp_count;
2581	return;
2582}
2583
2584/*
2585 * Name: ql_hw_tx_done
2586 * Function: Handle Transmit Completions
2587 */
2588void
2589ql_hw_tx_done(qla_host_t *ha)
2590{
2591	int i;
2592	uint32_t flag = 0;
2593
2594	if (!mtx_trylock(&ha->tx_lock)) {
2595       		QL_DPRINT8(ha, (ha->pci_dev,
2596			"%s: !mtx_trylock(&ha->tx_lock)\n", __func__));
2597		return;
2598	}
2599	for (i = 0; i < ha->hw.num_tx_rings; i++) {
2600		qla_hw_tx_done_locked(ha, i);
2601		if (ha->hw.tx_cntxt[i].txr_free <= (NUM_TX_DESCRIPTORS >> 1))
2602			flag = 1;
2603	}
2604
2605	if (!flag)
2606		ha->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2607
2608	QLA_TX_UNLOCK(ha);
2609	return;
2610}
2611
2612void
2613ql_update_link_state(qla_host_t *ha)
2614{
2615	uint32_t link_state;
2616	uint32_t prev_link_state;
2617
2618	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2619		ha->hw.link_up = 0;
2620		return;
2621	}
2622	link_state = READ_REG32(ha, Q8_LINK_STATE);
2623
2624	prev_link_state =  ha->hw.link_up;
2625
2626	if (ha->pci_func == 0)
2627		ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0);
2628	else
2629		ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
2630
2631	if (prev_link_state !=  ha->hw.link_up) {
2632		if (ha->hw.link_up) {
2633			if_link_state_change(ha->ifp, LINK_STATE_UP);
2634		} else {
2635			if_link_state_change(ha->ifp, LINK_STATE_DOWN);
2636		}
2637	}
2638	return;
2639}
2640
2641void
2642ql_hw_stop_rcv(qla_host_t *ha)
2643{
2644	int i, done, count = 100;
2645
2646	while (count--) {
2647		done = 1;
2648		for (i = 0; i < ha->hw.num_sds_rings; i++) {
2649			if (ha->hw.sds[i].rcv_active)
2650				done = 0;
2651		}
2652		if (done)
2653			break;
2654		else
2655			qla_mdelay(__func__, 10);
2656	}
2657	if (!count)
2658		device_printf(ha->pci_dev, "%s: Counter expired.\n", __func__);
2659
2660	return;
2661}
2662
2663int
2664ql_hw_check_health(qla_host_t *ha)
2665{
2666	uint32_t val;
2667
2668	ha->hw.health_count++;
2669
2670	if (ha->hw.health_count < 1000)
2671		return 0;
2672
2673	ha->hw.health_count = 0;
2674
2675	val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
2676
2677	if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
2678		(QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
2679		device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n",
2680			__func__, val);
2681		return -1;
2682	}
2683
2684	val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
2685
2686	if ((val != ha->hw.hbeat_value) &&
2687		(!(QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE)))) {
2688		ha->hw.hbeat_value = val;
2689		return 0;
2690	}
2691	device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n",
2692		__func__, val);
2693
2694	return -1;
2695}
2696
2697static int
2698qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
2699{
2700	uint32_t			err;
2701	device_t			dev = ha->pci_dev;
2702	q80_config_md_templ_size_t	*md_size;
2703	q80_config_md_templ_size_rsp_t	*md_size_rsp;
2704
2705	md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
2706	bzero(md_size, sizeof(q80_config_md_templ_size_t));
2707
2708	md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
2709	md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
2710	md_size->count_version |= Q8_MBX_CMD_VERSION;
2711
2712	if (qla_mbx_cmd(ha, (uint32_t *) md_size,
2713		(sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
2714		(sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
2715
2716		device_printf(dev, "%s: failed\n", __func__);
2717
2718		return (-1);
2719	}
2720
2721	md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
2722
2723	err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
2724
2725        if (err) {
2726		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2727		return(-1);
2728        }
2729
2730	*size = md_size_rsp->templ_size;
2731
2732	return (0);
2733}
2734
2735static int
2736qla_get_minidump_template(qla_host_t *ha)
2737{
2738	uint32_t			err;
2739	device_t			dev = ha->pci_dev;
2740	q80_config_md_templ_cmd_t	*md_templ;
2741	q80_config_md_templ_cmd_rsp_t	*md_templ_rsp;
2742
2743	md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
2744	bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
2745
2746	md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
2747	md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
2748	md_templ->count_version |= Q8_MBX_CMD_VERSION;
2749
2750	md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
2751	md_templ->buff_size = ha->hw.dma_buf.minidump.size;
2752
2753	if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
2754		(sizeof(q80_config_md_templ_cmd_t) >> 2),
2755		 ha->hw.mbox,
2756		(sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
2757
2758		device_printf(dev, "%s: failed\n", __func__);
2759
2760		return (-1);
2761	}
2762
2763	md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
2764
2765	err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
2766
2767	if (err) {
2768		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
2769		return (-1);
2770	}
2771
2772	return (0);
2773
2774}
2775
2776static int
2777qla_minidump_init(qla_host_t *ha)
2778{
2779	int		ret;
2780	uint32_t	template_size = 0;
2781	device_t	dev = ha->pci_dev;
2782
2783	/*
2784	 * Get Minidump Template Size
2785 	 */
2786	ret = qla_get_minidump_tmplt_size(ha, &template_size);
2787
2788	if (ret || (template_size == 0)) {
2789		device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
2790			template_size);
2791		return (-1);
2792	}
2793
2794	/*
2795	 * Allocate Memory for Minidump Template
2796	 */
2797
2798	ha->hw.dma_buf.minidump.alignment = 8;
2799	ha->hw.dma_buf.minidump.size = template_size;
2800
2801	if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
2802
2803		device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
2804
2805		return (-1);
2806	}
2807	ha->hw.dma_buf.flags.minidump = 1;
2808
2809	/*
2810	 * Retrieve Minidump Template
2811	 */
2812	ret = qla_get_minidump_template(ha);
2813
2814	if (ret) {
2815		qla_minidump_free(ha);
2816	} else {
2817		ha->hw.mdump_init = 1;
2818	}
2819
2820	return (ret);
2821}
2822
2823
2824static void
2825qla_minidump_free(qla_host_t *ha)
2826{
2827	ha->hw.mdump_init = 0;
2828	if (ha->hw.dma_buf.flags.minidump) {
2829		ha->hw.dma_buf.flags.minidump = 0;
2830		ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
2831	}
2832	return;
2833}
2834
2835void
2836ql_minidump(qla_host_t *ha)
2837{
2838	uint32_t delay = 6000;
2839
2840	if (!ha->hw.mdump_init)
2841		return;
2842
2843	if (!ha->hw.mdump_active)
2844		return;
2845
2846	if (ha->hw.mdump_active == 1) {
2847		ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
2848		ha->hw.mdump_start = 1;
2849	}
2850
2851	while (delay-- && ha->hw.mdump_active) {
2852		qla_mdelay(__func__, 100);
2853	}
2854	ha->hw.mdump_start = 0;
2855	ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
2856
2857	return;
2858}
2859