ql_hw.c revision 324325
1222Snever/*
28346Smhaupt * Copyright (c) 2013-2016 Qlogic Corporation
3222Snever * All rights reserved.
4222Snever *
5222Snever *  Redistribution and use in source and binary forms, with or without
6222Snever *  modification, are permitted provided that the following conditions
7222Snever *  are met:
8222Snever *
9222Snever *  1. Redistributions of source code must retain the above copyright
10222Snever *     notice, this list of conditions and the following disclaimer.
11222Snever *  2. Redistributions in binary form must reproduce the above copyright
12222Snever *     notice, this list of conditions and the following disclaimer in the
13222Snever *     documentation and/or other materials provided with the distribution.
14222Snever *
15222Snever *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16222Snever *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17222Snever *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18222Snever *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
191472Strims *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
201472Strims *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
211472Strims *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22222Snever *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23222Snever *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24222Snever *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25222Snever *  POSSIBILITY OF SUCH DAMAGE.
26222Snever */
27222Snever
28222Snever/*
298346Smhaupt * File: ql_hw.c
30222Snever * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31222Snever * Content: Contains Hardware dependent functions
32417Snever */
33222Snever
34222Snever#include <sys/cdefs.h>
35222Snever__FBSDID("$FreeBSD: stable/11/sys/dev/qlxgbe/ql_hw.c 324325 2017-10-05 18:38:42Z davidcs $");
36222Snever
378346Smhaupt#include "ql_os.h"
38222Snever#include "ql_hw.h"
39222Snever#include "ql_def.h"
40222Snever#include "ql_inline.h"
41222Snever#include "ql_ver.h"
42222Snever#include "ql_glbl.h"
43222Snever#include "ql_dbg.h"
44222Snever#include "ql_minidump.h"
45222Snever
46222Snever/*
47222Snever * Static Functions
48222Snever */
49222Snever
50222Sneverstatic void qla_del_rcv_cntxt(qla_host_t *ha);
51222Sneverstatic int qla_init_rcv_cntxt(qla_host_t *ha);
52222Sneverstatic void qla_del_xmt_cntxt(qla_host_t *ha);
53222Sneverstatic int qla_init_xmt_cntxt(qla_host_t *ha);
54222Sneverstatic int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
55222Snever	uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause);
56222Sneverstatic int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx,
57222Snever	uint32_t num_intrs, uint32_t create);
58222Sneverstatic int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id);
59222Sneverstatic int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id,
60222Snever	int tenable, int rcv);
61222Sneverstatic int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode);
62222Sneverstatic int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id);
63222Snever
64222Sneverstatic int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd,
65222Snever		uint8_t *hdr);
66222Sneverstatic int qla_hw_add_all_mcast(qla_host_t *ha);
67222Sneverstatic int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds);
68222Snever
69222Sneverstatic int qla_init_nic_func(qla_host_t *ha);
70222Sneverstatic int qla_stop_nic_func(qla_host_t *ha);
71222Sneverstatic int qla_query_fw_dcbx_caps(qla_host_t *ha);
72222Sneverstatic int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits);
73222Sneverstatic int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits);
74222Sneverstatic int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode);
75222Sneverstatic int qla_get_cam_search_mode(qla_host_t *ha);
76222Snever
77222Sneverstatic void ql_minidump_free(qla_host_t *ha);
78222Snever
79222Snever#ifdef QL_DBG
80222Snever
81222Sneverstatic void
82222Sneverqla_stop_pegs(qla_host_t *ha)
83222Snever{
84222Snever        uint32_t val = 1;
85222Snever
86222Snever        ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0);
87222Snever        ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0);
88222Snever        ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0);
89222Snever        ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0);
90222Snever        ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0);
918346Smhaupt        device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__);
92222Snever}
93222Snever
94222Sneverstatic int
95222Sneverqla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS)
96222Snever{
97222Snever	int err, ret = 0;
98222Snever	qla_host_t *ha;
99222Snever
100222Snever	err = sysctl_handle_int(oidp, &ret, 0, req);
101222Snever
102222Snever
103222Snever	if (err || !req->newptr)
104222Snever		return (err);
105222Snever
106222Snever	if (ret == 1) {
107222Snever		ha = (qla_host_t *)arg1;
108222Snever		if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
109222Snever			qla_stop_pegs(ha);
110222Snever			QLA_UNLOCK(ha, __func__);
111222Snever		}
112222Snever	}
113222Snever
114222Snever	return err;
115222Snever}
116222Snever#endif /* #ifdef QL_DBG */
117222Snever
118222Sneverstatic int
119222Sneverqla_validate_set_port_cfg_bit(uint32_t bits)
120222Snever{
121222Snever        if ((bits & 0xF) > 1)
122222Snever                return (-1);
123222Snever
124222Snever        if (((bits >> 4) & 0xF) > 2)
125222Snever                return (-1);
126222Snever
1278346Smhaupt        if (((bits >> 8) & 0xF) > 2)
128222Snever                return (-1);
129222Snever
130222Snever        return (0);
131222Snever}
132222Snever
133222Sneverstatic int
134222Sneverqla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS)
135222Snever{
136222Snever        int err, ret = 0;
137222Snever        qla_host_t *ha;
138222Snever        uint32_t cfg_bits;
139222Snever
140222Snever        err = sysctl_handle_int(oidp, &ret, 0, req);
141222Snever
142222Snever        if (err || !req->newptr)
143222Snever                return (err);
144222Snever
145222Snever	ha = (qla_host_t *)arg1;
146222Snever
147222Snever        if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) {
1488346Smhaupt
149222Snever                err = qla_get_port_config(ha, &cfg_bits);
150222Snever
151222Snever                if (err)
152222Snever                        goto qla_sysctl_set_port_cfg_exit;
1538346Smhaupt
1548346Smhaupt                if (ret & 0x1) {
1558346Smhaupt                        cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE;
1568346Smhaupt                } else {
1578346Smhaupt                        cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE;
1588346Smhaupt                }
1598346Smhaupt
1608346Smhaupt                ret = ret >> 4;
1618346Smhaupt                cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK;
1628346Smhaupt
1638346Smhaupt                if ((ret & 0xF) == 0) {
1648346Smhaupt                        cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED;
1658346Smhaupt                } else if ((ret & 0xF) == 1){
1668346Smhaupt                        cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD;
1678346Smhaupt                } else {
1688346Smhaupt                        cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM;
1698346Smhaupt                }
1708346Smhaupt
1718346Smhaupt                ret = ret >> 4;
172222Snever                cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK;
1738346Smhaupt
174417Snever                if (ret == 0) {
1758346Smhaupt                        cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV;
176417Snever                } else if (ret == 1){
1778346Smhaupt                        cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT;
1788346Smhaupt                } else {
1798346Smhaupt                        cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV;
1808346Smhaupt                }
1818346Smhaupt
1828346Smhaupt		if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
1838346Smhaupt                	err = qla_set_port_config(ha, cfg_bits);
1848346Smhaupt			QLA_UNLOCK(ha, __func__);
1858346Smhaupt		} else {
1868346Smhaupt			device_printf(ha->pci_dev, "%s: failed\n", __func__);
187417Snever		}
1888346Smhaupt        } else {
189222Snever		if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
190222Snever                	err = qla_get_port_config(ha, &cfg_bits);
191222Snever			QLA_UNLOCK(ha, __func__);
192222Snever		} else {
193222Snever			device_printf(ha->pci_dev, "%s: failed\n", __func__);
194222Snever		}
195222Snever        }
196222Snever
197222Sneverqla_sysctl_set_port_cfg_exit:
198222Snever        return err;
199222Snever}
200
201static int
202qla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS)
203{
204	int err, ret = 0;
205	qla_host_t *ha;
206
207	err = sysctl_handle_int(oidp, &ret, 0, req);
208
209	if (err || !req->newptr)
210		return (err);
211
212	ha = (qla_host_t *)arg1;
213
214	if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) ||
215		(ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) {
216
217		if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
218			err = qla_set_cam_search_mode(ha, (uint32_t)ret);
219			QLA_UNLOCK(ha, __func__);
220		} else {
221			device_printf(ha->pci_dev, "%s: failed\n", __func__);
222		}
223
224	} else {
225		device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret);
226	}
227
228	return (err);
229}
230
231static int
232qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS)
233{
234	int err, ret = 0;
235	qla_host_t *ha;
236
237	err = sysctl_handle_int(oidp, &ret, 0, req);
238
239	if (err || !req->newptr)
240		return (err);
241
242	ha = (qla_host_t *)arg1;
243	if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
244		err = qla_get_cam_search_mode(ha);
245		QLA_UNLOCK(ha, __func__);
246	} else {
247		device_printf(ha->pci_dev, "%s: failed\n", __func__);
248	}
249
250	return (err);
251}
252
253static void
254qlnx_add_hw_mac_stats_sysctls(qla_host_t *ha)
255{
256        struct sysctl_ctx_list  *ctx;
257        struct sysctl_oid_list  *children;
258        struct sysctl_oid       *ctx_oid;
259
260        ctx = device_get_sysctl_ctx(ha->pci_dev);
261        children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
262
263        ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_mac",
264                        CTLFLAG_RD, NULL, "stats_hw_mac");
265        children = SYSCTL_CHILDREN(ctx_oid);
266
267        SYSCTL_ADD_QUAD(ctx, children,
268                OID_AUTO, "xmt_frames",
269                CTLFLAG_RD, &ha->hw.mac.xmt_frames,
270                "xmt_frames");
271
272        SYSCTL_ADD_QUAD(ctx, children,
273                OID_AUTO, "xmt_bytes",
274                CTLFLAG_RD, &ha->hw.mac.xmt_bytes,
275                "xmt_frames");
276
277        SYSCTL_ADD_QUAD(ctx, children,
278                OID_AUTO, "xmt_mcast_pkts",
279                CTLFLAG_RD, &ha->hw.mac.xmt_mcast_pkts,
280                "xmt_mcast_pkts");
281
282        SYSCTL_ADD_QUAD(ctx, children,
283                OID_AUTO, "xmt_bcast_pkts",
284                CTLFLAG_RD, &ha->hw.mac.xmt_bcast_pkts,
285                "xmt_bcast_pkts");
286
287        SYSCTL_ADD_QUAD(ctx, children,
288                OID_AUTO, "xmt_pause_frames",
289                CTLFLAG_RD, &ha->hw.mac.xmt_pause_frames,
290                "xmt_pause_frames");
291
292        SYSCTL_ADD_QUAD(ctx, children,
293                OID_AUTO, "xmt_cntrl_pkts",
294                CTLFLAG_RD, &ha->hw.mac.xmt_cntrl_pkts,
295                "xmt_cntrl_pkts");
296
297        SYSCTL_ADD_QUAD(ctx, children,
298                OID_AUTO, "xmt_pkt_lt_64bytes",
299                CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_64bytes,
300                "xmt_pkt_lt_64bytes");
301
302        SYSCTL_ADD_QUAD(ctx, children,
303                OID_AUTO, "xmt_pkt_lt_127bytes",
304                CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_127bytes,
305                "xmt_pkt_lt_127bytes");
306
307        SYSCTL_ADD_QUAD(ctx, children,
308                OID_AUTO, "xmt_pkt_lt_255bytes",
309                CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_255bytes,
310                "xmt_pkt_lt_255bytes");
311
312        SYSCTL_ADD_QUAD(ctx, children,
313                OID_AUTO, "xmt_pkt_lt_511bytes",
314                CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_511bytes,
315                "xmt_pkt_lt_511bytes");
316
317        SYSCTL_ADD_QUAD(ctx, children,
318                OID_AUTO, "xmt_pkt_lt_1023bytes",
319                CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1023bytes,
320                "xmt_pkt_lt_1023bytes");
321
322        SYSCTL_ADD_QUAD(ctx, children,
323                OID_AUTO, "xmt_pkt_lt_1518bytes",
324                CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1518bytes,
325                "xmt_pkt_lt_1518bytes");
326
327        SYSCTL_ADD_QUAD(ctx, children,
328                OID_AUTO, "xmt_pkt_gt_1518bytes",
329                CTLFLAG_RD, &ha->hw.mac.xmt_pkt_gt_1518bytes,
330                "xmt_pkt_gt_1518bytes");
331
332        SYSCTL_ADD_QUAD(ctx, children,
333                OID_AUTO, "rcv_frames",
334                CTLFLAG_RD, &ha->hw.mac.rcv_frames,
335                "rcv_frames");
336
337        SYSCTL_ADD_QUAD(ctx, children,
338                OID_AUTO, "rcv_bytes",
339                CTLFLAG_RD, &ha->hw.mac.rcv_bytes,
340                "rcv_bytes");
341
342        SYSCTL_ADD_QUAD(ctx, children,
343                OID_AUTO, "rcv_mcast_pkts",
344                CTLFLAG_RD, &ha->hw.mac.rcv_mcast_pkts,
345                "rcv_mcast_pkts");
346
347        SYSCTL_ADD_QUAD(ctx, children,
348                OID_AUTO, "rcv_bcast_pkts",
349                CTLFLAG_RD, &ha->hw.mac.rcv_bcast_pkts,
350                "rcv_bcast_pkts");
351
352        SYSCTL_ADD_QUAD(ctx, children,
353                OID_AUTO, "rcv_pause_frames",
354                CTLFLAG_RD, &ha->hw.mac.rcv_pause_frames,
355                "rcv_pause_frames");
356
357        SYSCTL_ADD_QUAD(ctx, children,
358                OID_AUTO, "rcv_cntrl_pkts",
359                CTLFLAG_RD, &ha->hw.mac.rcv_cntrl_pkts,
360                "rcv_cntrl_pkts");
361
362        SYSCTL_ADD_QUAD(ctx, children,
363                OID_AUTO, "rcv_pkt_lt_64bytes",
364                CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_64bytes,
365                "rcv_pkt_lt_64bytes");
366
367        SYSCTL_ADD_QUAD(ctx, children,
368                OID_AUTO, "rcv_pkt_lt_127bytes",
369                CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_127bytes,
370                "rcv_pkt_lt_127bytes");
371
372        SYSCTL_ADD_QUAD(ctx, children,
373                OID_AUTO, "rcv_pkt_lt_255bytes",
374                CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_255bytes,
375                "rcv_pkt_lt_255bytes");
376
377        SYSCTL_ADD_QUAD(ctx, children,
378                OID_AUTO, "rcv_pkt_lt_511bytes",
379                CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_511bytes,
380                "rcv_pkt_lt_511bytes");
381
382        SYSCTL_ADD_QUAD(ctx, children,
383                OID_AUTO, "rcv_pkt_lt_1023bytes",
384                CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1023bytes,
385                "rcv_pkt_lt_1023bytes");
386
387        SYSCTL_ADD_QUAD(ctx, children,
388                OID_AUTO, "rcv_pkt_lt_1518bytes",
389                CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1518bytes,
390                "rcv_pkt_lt_1518bytes");
391
392        SYSCTL_ADD_QUAD(ctx, children,
393                OID_AUTO, "rcv_pkt_gt_1518bytes",
394                CTLFLAG_RD, &ha->hw.mac.rcv_pkt_gt_1518bytes,
395                "rcv_pkt_gt_1518bytes");
396
397        SYSCTL_ADD_QUAD(ctx, children,
398                OID_AUTO, "rcv_len_error",
399                CTLFLAG_RD, &ha->hw.mac.rcv_len_error,
400                "rcv_len_error");
401
402        SYSCTL_ADD_QUAD(ctx, children,
403                OID_AUTO, "rcv_len_small",
404                CTLFLAG_RD, &ha->hw.mac.rcv_len_small,
405                "rcv_len_small");
406
407        SYSCTL_ADD_QUAD(ctx, children,
408                OID_AUTO, "rcv_len_large",
409                CTLFLAG_RD, &ha->hw.mac.rcv_len_large,
410                "rcv_len_large");
411
412        SYSCTL_ADD_QUAD(ctx, children,
413                OID_AUTO, "rcv_jabber",
414                CTLFLAG_RD, &ha->hw.mac.rcv_jabber,
415                "rcv_jabber");
416
417        SYSCTL_ADD_QUAD(ctx, children,
418                OID_AUTO, "rcv_dropped",
419                CTLFLAG_RD, &ha->hw.mac.rcv_dropped,
420                "rcv_dropped");
421
422        SYSCTL_ADD_QUAD(ctx, children,
423                OID_AUTO, "fcs_error",
424                CTLFLAG_RD, &ha->hw.mac.fcs_error,
425                "fcs_error");
426
427        SYSCTL_ADD_QUAD(ctx, children,
428                OID_AUTO, "align_error",
429                CTLFLAG_RD, &ha->hw.mac.align_error,
430                "align_error");
431
432        SYSCTL_ADD_QUAD(ctx, children,
433                OID_AUTO, "eswitched_frames",
434                CTLFLAG_RD, &ha->hw.mac.eswitched_frames,
435                "eswitched_frames");
436
437        SYSCTL_ADD_QUAD(ctx, children,
438                OID_AUTO, "eswitched_bytes",
439                CTLFLAG_RD, &ha->hw.mac.eswitched_bytes,
440                "eswitched_bytes");
441
442        SYSCTL_ADD_QUAD(ctx, children,
443                OID_AUTO, "eswitched_mcast_frames",
444                CTLFLAG_RD, &ha->hw.mac.eswitched_mcast_frames,
445                "eswitched_mcast_frames");
446
447        SYSCTL_ADD_QUAD(ctx, children,
448                OID_AUTO, "eswitched_bcast_frames",
449                CTLFLAG_RD, &ha->hw.mac.eswitched_bcast_frames,
450                "eswitched_bcast_frames");
451
452        SYSCTL_ADD_QUAD(ctx, children,
453                OID_AUTO, "eswitched_ucast_frames",
454                CTLFLAG_RD, &ha->hw.mac.eswitched_ucast_frames,
455                "eswitched_ucast_frames");
456
457        SYSCTL_ADD_QUAD(ctx, children,
458                OID_AUTO, "eswitched_err_free_frames",
459                CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_frames,
460                "eswitched_err_free_frames");
461
462        SYSCTL_ADD_QUAD(ctx, children,
463                OID_AUTO, "eswitched_err_free_bytes",
464                CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_bytes,
465                "eswitched_err_free_bytes");
466
467	return;
468}
469
470static void
471qlnx_add_hw_rcv_stats_sysctls(qla_host_t *ha)
472{
473        struct sysctl_ctx_list  *ctx;
474        struct sysctl_oid_list  *children;
475        struct sysctl_oid       *ctx_oid;
476
477        ctx = device_get_sysctl_ctx(ha->pci_dev);
478        children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
479
480        ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_rcv",
481                        CTLFLAG_RD, NULL, "stats_hw_rcv");
482        children = SYSCTL_CHILDREN(ctx_oid);
483
484        SYSCTL_ADD_QUAD(ctx, children,
485                OID_AUTO, "total_bytes",
486                CTLFLAG_RD, &ha->hw.rcv.total_bytes,
487                "total_bytes");
488
489        SYSCTL_ADD_QUAD(ctx, children,
490                OID_AUTO, "total_pkts",
491                CTLFLAG_RD, &ha->hw.rcv.total_pkts,
492                "total_pkts");
493
494        SYSCTL_ADD_QUAD(ctx, children,
495                OID_AUTO, "lro_pkt_count",
496                CTLFLAG_RD, &ha->hw.rcv.lro_pkt_count,
497                "lro_pkt_count");
498
499        SYSCTL_ADD_QUAD(ctx, children,
500                OID_AUTO, "sw_pkt_count",
501                CTLFLAG_RD, &ha->hw.rcv.sw_pkt_count,
502                "sw_pkt_count");
503
504        SYSCTL_ADD_QUAD(ctx, children,
505                OID_AUTO, "ip_chksum_err",
506                CTLFLAG_RD, &ha->hw.rcv.ip_chksum_err,
507                "ip_chksum_err");
508
509        SYSCTL_ADD_QUAD(ctx, children,
510                OID_AUTO, "pkts_wo_acntxts",
511                CTLFLAG_RD, &ha->hw.rcv.pkts_wo_acntxts,
512                "pkts_wo_acntxts");
513
514        SYSCTL_ADD_QUAD(ctx, children,
515                OID_AUTO, "pkts_dropped_no_sds_card",
516                CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_card,
517                "pkts_dropped_no_sds_card");
518
519        SYSCTL_ADD_QUAD(ctx, children,
520                OID_AUTO, "pkts_dropped_no_sds_host",
521                CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_host,
522                "pkts_dropped_no_sds_host");
523
524        SYSCTL_ADD_QUAD(ctx, children,
525                OID_AUTO, "oversized_pkts",
526                CTLFLAG_RD, &ha->hw.rcv.oversized_pkts,
527                "oversized_pkts");
528
529        SYSCTL_ADD_QUAD(ctx, children,
530                OID_AUTO, "pkts_dropped_no_rds",
531                CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_rds,
532                "pkts_dropped_no_rds");
533
534        SYSCTL_ADD_QUAD(ctx, children,
535                OID_AUTO, "unxpctd_mcast_pkts",
536                CTLFLAG_RD, &ha->hw.rcv.unxpctd_mcast_pkts,
537                "unxpctd_mcast_pkts");
538
539        SYSCTL_ADD_QUAD(ctx, children,
540                OID_AUTO, "re1_fbq_error",
541                CTLFLAG_RD, &ha->hw.rcv.re1_fbq_error,
542                "re1_fbq_error");
543
544        SYSCTL_ADD_QUAD(ctx, children,
545                OID_AUTO, "invalid_mac_addr",
546                CTLFLAG_RD, &ha->hw.rcv.invalid_mac_addr,
547                "invalid_mac_addr");
548
549        SYSCTL_ADD_QUAD(ctx, children,
550                OID_AUTO, "rds_prime_trys",
551                CTLFLAG_RD, &ha->hw.rcv.rds_prime_trys,
552                "rds_prime_trys");
553
554        SYSCTL_ADD_QUAD(ctx, children,
555                OID_AUTO, "rds_prime_success",
556                CTLFLAG_RD, &ha->hw.rcv.rds_prime_success,
557                "rds_prime_success");
558
559        SYSCTL_ADD_QUAD(ctx, children,
560                OID_AUTO, "lro_flows_added",
561                CTLFLAG_RD, &ha->hw.rcv.lro_flows_added,
562                "lro_flows_added");
563
564        SYSCTL_ADD_QUAD(ctx, children,
565                OID_AUTO, "lro_flows_deleted",
566                CTLFLAG_RD, &ha->hw.rcv.lro_flows_deleted,
567                "lro_flows_deleted");
568
569        SYSCTL_ADD_QUAD(ctx, children,
570                OID_AUTO, "lro_flows_active",
571                CTLFLAG_RD, &ha->hw.rcv.lro_flows_active,
572                "lro_flows_active");
573
574        SYSCTL_ADD_QUAD(ctx, children,
575                OID_AUTO, "pkts_droped_unknown",
576                CTLFLAG_RD, &ha->hw.rcv.pkts_droped_unknown,
577                "pkts_droped_unknown");
578
579        SYSCTL_ADD_QUAD(ctx, children,
580                OID_AUTO, "pkts_cnt_oversized",
581                CTLFLAG_RD, &ha->hw.rcv.pkts_cnt_oversized,
582                "pkts_cnt_oversized");
583
584	return;
585}
586
587static void
588qlnx_add_hw_xmt_stats_sysctls(qla_host_t *ha)
589{
590        struct sysctl_ctx_list  *ctx;
591        struct sysctl_oid_list  *children;
592        struct sysctl_oid_list  *node_children;
593        struct sysctl_oid       *ctx_oid;
594        int                     i;
595        uint8_t                 name_str[16];
596
597        ctx = device_get_sysctl_ctx(ha->pci_dev);
598        children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
599
600        ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_xmt",
601                        CTLFLAG_RD, NULL, "stats_hw_xmt");
602        children = SYSCTL_CHILDREN(ctx_oid);
603
604        for (i = 0; i < ha->hw.num_tx_rings; i++) {
605
606                bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
607                snprintf(name_str, sizeof(name_str), "%d", i);
608
609                ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
610                        CTLFLAG_RD, NULL, name_str);
611                node_children = SYSCTL_CHILDREN(ctx_oid);
612
613                /* Tx Related */
614
615                SYSCTL_ADD_QUAD(ctx, node_children,
616			OID_AUTO, "total_bytes",
617                        CTLFLAG_RD, &ha->hw.xmt[i].total_bytes,
618                        "total_bytes");
619
620                SYSCTL_ADD_QUAD(ctx, node_children,
621			OID_AUTO, "total_pkts",
622                        CTLFLAG_RD, &ha->hw.xmt[i].total_pkts,
623                        "total_pkts");
624
625                SYSCTL_ADD_QUAD(ctx, node_children,
626			OID_AUTO, "errors",
627                        CTLFLAG_RD, &ha->hw.xmt[i].errors,
628                        "errors");
629
630                SYSCTL_ADD_QUAD(ctx, node_children,
631			OID_AUTO, "pkts_dropped",
632                        CTLFLAG_RD, &ha->hw.xmt[i].pkts_dropped,
633                        "pkts_dropped");
634
635                SYSCTL_ADD_QUAD(ctx, node_children,
636			OID_AUTO, "switch_pkts",
637                        CTLFLAG_RD, &ha->hw.xmt[i].switch_pkts,
638                        "switch_pkts");
639
640                SYSCTL_ADD_QUAD(ctx, node_children,
641			OID_AUTO, "num_buffers",
642                        CTLFLAG_RD, &ha->hw.xmt[i].num_buffers,
643                        "num_buffers");
644	}
645
646	return;
647}
648
649static void
650qlnx_add_hw_stats_sysctls(qla_host_t *ha)
651{
652	qlnx_add_hw_mac_stats_sysctls(ha);
653	qlnx_add_hw_rcv_stats_sysctls(ha);
654	qlnx_add_hw_xmt_stats_sysctls(ha);
655
656	return;
657}
658
659static void
660qlnx_add_drvr_sds_stats(qla_host_t *ha)
661{
662        struct sysctl_ctx_list  *ctx;
663        struct sysctl_oid_list  *children;
664        struct sysctl_oid_list  *node_children;
665        struct sysctl_oid       *ctx_oid;
666        int                     i;
667        uint8_t                 name_str[16];
668
669        ctx = device_get_sysctl_ctx(ha->pci_dev);
670        children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
671
672        ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_sds",
673                        CTLFLAG_RD, NULL, "stats_drvr_sds");
674        children = SYSCTL_CHILDREN(ctx_oid);
675
676        for (i = 0; i < ha->hw.num_sds_rings; i++) {
677
678                bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
679                snprintf(name_str, sizeof(name_str), "%d", i);
680
681                ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
682                        CTLFLAG_RD, NULL, name_str);
683                node_children = SYSCTL_CHILDREN(ctx_oid);
684
685                SYSCTL_ADD_QUAD(ctx, node_children,
686			OID_AUTO, "intr_count",
687                        CTLFLAG_RD, &ha->hw.sds[i].intr_count,
688                        "intr_count");
689
690                SYSCTL_ADD_UINT(ctx, node_children,
691			OID_AUTO, "rx_free",
692                        CTLFLAG_RD, &ha->hw.sds[i].rx_free,
693			ha->hw.sds[i].rx_free, "rx_free");
694	}
695
696	return;
697}
698static void
699qlnx_add_drvr_rds_stats(qla_host_t *ha)
700{
701        struct sysctl_ctx_list  *ctx;
702        struct sysctl_oid_list  *children;
703        struct sysctl_oid_list  *node_children;
704        struct sysctl_oid       *ctx_oid;
705        int                     i;
706        uint8_t                 name_str[16];
707
708        ctx = device_get_sysctl_ctx(ha->pci_dev);
709        children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
710
711        ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_rds",
712                        CTLFLAG_RD, NULL, "stats_drvr_rds");
713        children = SYSCTL_CHILDREN(ctx_oid);
714
715        for (i = 0; i < ha->hw.num_rds_rings; i++) {
716
717                bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
718                snprintf(name_str, sizeof(name_str), "%d", i);
719
720                ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
721                        CTLFLAG_RD, NULL, name_str);
722                node_children = SYSCTL_CHILDREN(ctx_oid);
723
724                SYSCTL_ADD_QUAD(ctx, node_children,
725			OID_AUTO, "count",
726                        CTLFLAG_RD, &ha->hw.rds[i].count,
727                        "count");
728
729                SYSCTL_ADD_QUAD(ctx, node_children,
730			OID_AUTO, "lro_pkt_count",
731                        CTLFLAG_RD, &ha->hw.rds[i].lro_pkt_count,
732                        "lro_pkt_count");
733
734                SYSCTL_ADD_QUAD(ctx, node_children,
735			OID_AUTO, "lro_bytes",
736                        CTLFLAG_RD, &ha->hw.rds[i].lro_bytes,
737                        "lro_bytes");
738	}
739
740	return;
741}
742
743static void
744qlnx_add_drvr_tx_stats(qla_host_t *ha)
745{
746        struct sysctl_ctx_list  *ctx;
747        struct sysctl_oid_list  *children;
748        struct sysctl_oid_list  *node_children;
749        struct sysctl_oid       *ctx_oid;
750        int                     i;
751        uint8_t                 name_str[16];
752
753        ctx = device_get_sysctl_ctx(ha->pci_dev);
754        children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
755
756        ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_xmt",
757                        CTLFLAG_RD, NULL, "stats_drvr_xmt");
758        children = SYSCTL_CHILDREN(ctx_oid);
759
760        for (i = 0; i < ha->hw.num_tx_rings; i++) {
761
762                bzero(name_str, (sizeof(uint8_t) * sizeof(name_str)));
763                snprintf(name_str, sizeof(name_str), "%d", i);
764
765                ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str,
766                        CTLFLAG_RD, NULL, name_str);
767                node_children = SYSCTL_CHILDREN(ctx_oid);
768
769                SYSCTL_ADD_QUAD(ctx, node_children,
770			OID_AUTO, "count",
771                        CTLFLAG_RD, &ha->tx_ring[i].count,
772                        "count");
773
774#ifdef QL_ENABLE_ISCSI_TLV
775                SYSCTL_ADD_QUAD(ctx, node_children,
776			OID_AUTO, "iscsi_pkt_count",
777                        CTLFLAG_RD, &ha->tx_ring[i].iscsi_pkt_count,
778                        "iscsi_pkt_count");
779#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
780	}
781
782	return;
783}
784
785static void
786qlnx_add_drvr_stats_sysctls(qla_host_t *ha)
787{
788	qlnx_add_drvr_sds_stats(ha);
789	qlnx_add_drvr_rds_stats(ha);
790	qlnx_add_drvr_tx_stats(ha);
791	return;
792}
793
794/*
795 * Name: ql_hw_add_sysctls
796 * Function: Add P3Plus specific sysctls
797 */
798void
799ql_hw_add_sysctls(qla_host_t *ha)
800{
801        device_t	dev;
802
803        dev = ha->pci_dev;
804
805	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
806		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
807		OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings,
808		ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings");
809
810        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
811                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
812                OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings,
813		ha->hw.num_sds_rings, "Number of Status Descriptor Rings");
814
815        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
816                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
817                OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings,
818		ha->hw.num_tx_rings, "Number of Transmit Rings");
819
820        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
821                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
822                OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx,
823		ha->txr_idx, "Tx Ring Used");
824
825        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
826                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
827                OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs,
828		ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt");
829
830	ha->hw.sds_cidx_thres = 32;
831        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
832                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
833                OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres,
834		ha->hw.sds_cidx_thres,
835		"Number of SDS entries to process before updating"
836		" SDS Ring Consumer Index");
837
838	ha->hw.rds_pidx_thres = 32;
839        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
840                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
841                OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres,
842		ha->hw.rds_pidx_thres,
843		"Number of Rcv Rings Entries to post before updating"
844		" RDS Ring Producer Index");
845
846        ha->hw.rcv_intr_coalesce = (3 << 16) | 256;
847        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
848                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
849                OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW,
850                &ha->hw.rcv_intr_coalesce,
851                ha->hw.rcv_intr_coalesce,
852                "Rcv Intr Coalescing Parameters\n"
853                "\tbits 15:0 max packets\n"
854                "\tbits 31:16 max micro-seconds to wait\n"
855                "\tplease run\n"
856                "\tifconfig <if> down && ifconfig <if> up\n"
857                "\tto take effect \n");
858
859        ha->hw.xmt_intr_coalesce = (64 << 16) | 64;
860        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
861                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
862                OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW,
863                &ha->hw.xmt_intr_coalesce,
864                ha->hw.xmt_intr_coalesce,
865                "Xmt Intr Coalescing Parameters\n"
866                "\tbits 15:0 max packets\n"
867                "\tbits 31:16 max micro-seconds to wait\n"
868                "\tplease run\n"
869                "\tifconfig <if> down && ifconfig <if> up\n"
870                "\tto take effect \n");
871
872        SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
873                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
874                OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW,
875                (void *)ha, 0,
876                qla_sysctl_port_cfg, "I",
877                        "Set Port Configuration if values below "
878                        "otherwise Get Port Configuration\n"
879                        "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n"
880                        "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n"
881                        "\tBits 8-11: std pause cfg; 0 = xmt and rcv;"
882                        " 1 = xmt only; 2 = rcv only;\n"
883                );
884
885	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
886		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
887		OID_AUTO, "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
888		(void *)ha, 0,
889		qla_sysctl_set_cam_search_mode, "I",
890			"Set CAM Search Mode"
891			"\t 1 = search mode internal\n"
892			"\t 2 = search mode auto\n");
893
894	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
895		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
896		OID_AUTO, "get_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW,
897		(void *)ha, 0,
898		qla_sysctl_get_cam_search_mode, "I",
899			"Get CAM Search Mode"
900			"\t 1 = search mode internal\n"
901			"\t 2 = search mode auto\n");
902
903        ha->hw.enable_9kb = 1;
904
905        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
906                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
907                OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb,
908                ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000");
909
910        ha->hw.enable_hw_lro = 1;
911
912        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
913                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
914                OID_AUTO, "enable_hw_lro", CTLFLAG_RW, &ha->hw.enable_hw_lro,
915                ha->hw.enable_hw_lro, "Enable Hardware LRO; Default is true \n"
916		"\t 1 : Hardware LRO if LRO is enabled\n"
917		"\t 0 : Software LRO if LRO is enabled\n"
918		"\t Any change requires ifconfig down/up to take effect\n"
919		"\t Note that LRO may be turned off/on via ifconfig\n");
920
921	ha->hw.mdump_active = 0;
922        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
923                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
924                OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active,
925		ha->hw.mdump_active,
926		"Minidump retrieval is Active");
927
928	ha->hw.mdump_done = 0;
929        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
930                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
931                OID_AUTO, "mdump_done", CTLFLAG_RW,
932		&ha->hw.mdump_done, ha->hw.mdump_done,
933		"Minidump has been done and available for retrieval");
934
935	ha->hw.mdump_capture_mask = 0xF;
936        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
937                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
938                OID_AUTO, "minidump_capture_mask", CTLFLAG_RW,
939		&ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask,
940		"Minidump capture mask");
941#ifdef QL_DBG
942
943	ha->err_inject = 0;
944        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
945                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
946                OID_AUTO, "err_inject",
947                CTLFLAG_RW, &ha->err_inject, ha->err_inject,
948                "Error to be injected\n"
949                "\t\t\t 0: No Errors\n"
950                "\t\t\t 1: rcv: rxb struct invalid\n"
951                "\t\t\t 2: rcv: mp == NULL\n"
952                "\t\t\t 3: lro: rxb struct invalid\n"
953                "\t\t\t 4: lro: mp == NULL\n"
954                "\t\t\t 5: rcv: num handles invalid\n"
955                "\t\t\t 6: reg: indirect reg rd_wr failure\n"
956                "\t\t\t 7: ocm: offchip memory rd_wr failure\n"
957                "\t\t\t 8: mbx: mailbox command failure\n"
958                "\t\t\t 9: heartbeat failure\n"
959                "\t\t\t A: temperature failure\n"
960		"\t\t\t 11: m_getcl or m_getjcl failure\n" );
961
962	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
963                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
964                OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW,
965                (void *)ha, 0,
966                qla_sysctl_stop_pegs, "I", "Peg Stop");
967
968#endif /* #ifdef QL_DBG */
969
970        ha->hw.user_pri_nic = 0;
971        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
972                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
973                OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic,
974                ha->hw.user_pri_nic,
975                "VLAN Tag User Priority for Normal Ethernet Packets");
976
977        ha->hw.user_pri_iscsi = 4;
978        SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
979                SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
980                OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi,
981                ha->hw.user_pri_iscsi,
982                "VLAN Tag User Priority for iSCSI Packets");
983
984	qlnx_add_hw_stats_sysctls(ha);
985	qlnx_add_drvr_stats_sysctls(ha);
986
987	return;
988}
989
990void
991ql_hw_link_status(qla_host_t *ha)
992{
993	device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui);
994
995	if (ha->hw.link_up) {
996		device_printf(ha->pci_dev, "link Up\n");
997	} else {
998		device_printf(ha->pci_dev, "link Down\n");
999	}
1000
1001	if (ha->hw.flags.fduplex) {
1002		device_printf(ha->pci_dev, "Full Duplex\n");
1003	} else {
1004		device_printf(ha->pci_dev, "Half Duplex\n");
1005	}
1006
1007	if (ha->hw.flags.autoneg) {
1008		device_printf(ha->pci_dev, "Auto Negotiation Enabled\n");
1009	} else {
1010		device_printf(ha->pci_dev, "Auto Negotiation Disabled\n");
1011	}
1012
1013	switch (ha->hw.link_speed) {
1014	case 0x710:
1015		device_printf(ha->pci_dev, "link speed\t\t 10Gps\n");
1016		break;
1017
1018	case 0x3E8:
1019		device_printf(ha->pci_dev, "link speed\t\t 1Gps\n");
1020		break;
1021
1022	case 0x64:
1023		device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n");
1024		break;
1025
1026	default:
1027		device_printf(ha->pci_dev, "link speed\t\t Unknown\n");
1028		break;
1029	}
1030
1031	switch (ha->hw.module_type) {
1032
1033	case 0x01:
1034		device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n");
1035		break;
1036
1037	case 0x02:
1038		device_printf(ha->pci_dev, "Module Type 10GBase-LR\n");
1039		break;
1040
1041	case 0x03:
1042		device_printf(ha->pci_dev, "Module Type 10GBase-SR\n");
1043		break;
1044
1045	case 0x04:
1046		device_printf(ha->pci_dev,
1047			"Module Type 10GE Passive Copper(Compliant)[%d m]\n",
1048			ha->hw.cable_length);
1049		break;
1050
1051	case 0x05:
1052		device_printf(ha->pci_dev, "Module Type 10GE Active"
1053			" Limiting Copper(Compliant)[%d m]\n",
1054			ha->hw.cable_length);
1055		break;
1056
1057	case 0x06:
1058		device_printf(ha->pci_dev,
1059			"Module Type 10GE Passive Copper"
1060			" (Legacy, Best Effort)[%d m]\n",
1061			ha->hw.cable_length);
1062		break;
1063
1064	case 0x07:
1065		device_printf(ha->pci_dev, "Module Type 1000Base-SX\n");
1066		break;
1067
1068	case 0x08:
1069		device_printf(ha->pci_dev, "Module Type 1000Base-LX\n");
1070		break;
1071
1072	case 0x09:
1073		device_printf(ha->pci_dev, "Module Type 1000Base-CX\n");
1074		break;
1075
1076	case 0x0A:
1077		device_printf(ha->pci_dev, "Module Type 1000Base-T\n");
1078		break;
1079
1080	case 0x0B:
1081		device_printf(ha->pci_dev, "Module Type 1GE Passive Copper"
1082			"(Legacy, Best Effort)\n");
1083		break;
1084
1085	default:
1086		device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n",
1087			ha->hw.module_type);
1088		break;
1089	}
1090
1091	if (ha->hw.link_faults == 1)
1092		device_printf(ha->pci_dev, "SFP Power Fault\n");
1093}
1094
1095/*
1096 * Name: ql_free_dma
1097 * Function: Frees the DMA'able memory allocated in ql_alloc_dma()
1098 */
1099void
1100ql_free_dma(qla_host_t *ha)
1101{
1102	uint32_t i;
1103
1104        if (ha->hw.dma_buf.flags.sds_ring) {
1105		for (i = 0; i < ha->hw.num_sds_rings; i++) {
1106			ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]);
1107		}
1108        	ha->hw.dma_buf.flags.sds_ring = 0;
1109	}
1110
1111        if (ha->hw.dma_buf.flags.rds_ring) {
1112		for (i = 0; i < ha->hw.num_rds_rings; i++) {
1113			ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]);
1114		}
1115        	ha->hw.dma_buf.flags.rds_ring = 0;
1116	}
1117
1118        if (ha->hw.dma_buf.flags.tx_ring) {
1119		ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
1120        	ha->hw.dma_buf.flags.tx_ring = 0;
1121	}
1122	ql_minidump_free(ha);
1123}
1124
1125/*
1126 * Name: ql_alloc_dma
1127 * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
1128 */
1129int
1130ql_alloc_dma(qla_host_t *ha)
1131{
1132        device_t                dev;
1133	uint32_t		i, j, size, tx_ring_size;
1134	qla_hw_t		*hw;
1135	qla_hw_tx_cntxt_t	*tx_cntxt;
1136	uint8_t			*vaddr;
1137	bus_addr_t		paddr;
1138
1139        dev = ha->pci_dev;
1140
1141        QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
1142
1143	hw = &ha->hw;
1144	/*
1145	 * Allocate Transmit Ring
1146	 */
1147	tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS);
1148	size = (tx_ring_size * ha->hw.num_tx_rings);
1149
1150	hw->dma_buf.tx_ring.alignment = 8;
1151	hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
1152
1153        if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
1154                device_printf(dev, "%s: tx ring alloc failed\n", __func__);
1155                goto ql_alloc_dma_exit;
1156        }
1157
1158	vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
1159	paddr = hw->dma_buf.tx_ring.dma_addr;
1160
1161	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1162		tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
1163
1164		tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr;
1165		tx_cntxt->tx_ring_paddr = paddr;
1166
1167		vaddr += tx_ring_size;
1168		paddr += tx_ring_size;
1169	}
1170
1171	for (i = 0; i < ha->hw.num_tx_rings; i++) {
1172		tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i];
1173
1174		tx_cntxt->tx_cons = (uint32_t *)vaddr;
1175		tx_cntxt->tx_cons_paddr = paddr;
1176
1177		vaddr += sizeof (uint32_t);
1178		paddr += sizeof (uint32_t);
1179	}
1180
1181        ha->hw.dma_buf.flags.tx_ring = 1;
1182
1183	QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n",
1184		__func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
1185		hw->dma_buf.tx_ring.dma_b));
1186	/*
1187	 * Allocate Receive Descriptor Rings
1188	 */
1189
1190	for (i = 0; i < hw->num_rds_rings; i++) {
1191
1192		hw->dma_buf.rds_ring[i].alignment = 8;
1193		hw->dma_buf.rds_ring[i].size =
1194			(sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
1195
1196		if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) {
1197			device_printf(dev, "%s: rds ring[%d] alloc failed\n",
1198				__func__, i);
1199
1200			for (j = 0; j < i; j++)
1201				ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]);
1202
1203			goto ql_alloc_dma_exit;
1204		}
1205		QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n",
1206			__func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr),
1207			hw->dma_buf.rds_ring[i].dma_b));
1208	}
1209
1210	hw->dma_buf.flags.rds_ring = 1;
1211
1212	/*
1213	 * Allocate Status Descriptor Rings
1214	 */
1215
1216	for (i = 0; i < hw->num_sds_rings; i++) {
1217		hw->dma_buf.sds_ring[i].alignment = 8;
1218		hw->dma_buf.sds_ring[i].size =
1219			(sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;
1220
1221		if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) {
1222			device_printf(dev, "%s: sds ring alloc failed\n",
1223				__func__);
1224
1225			for (j = 0; j < i; j++)
1226				ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]);
1227
1228			goto ql_alloc_dma_exit;
1229		}
1230		QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n",
1231			__func__, i,
1232			(void *)(hw->dma_buf.sds_ring[i].dma_addr),
1233			hw->dma_buf.sds_ring[i].dma_b));
1234	}
1235	for (i = 0; i < hw->num_sds_rings; i++) {
1236		hw->sds[i].sds_ring_base =
1237			(q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;
1238	}
1239
1240	hw->dma_buf.flags.sds_ring = 1;
1241
1242	return 0;
1243
1244ql_alloc_dma_exit:
1245	ql_free_dma(ha);
1246	return -1;
1247}
1248
1249#define Q8_MBX_MSEC_DELAY	5000
1250
1251static int
1252qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox,
1253	uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause)
1254{
1255	uint32_t i;
1256	uint32_t data;
1257	int ret = 0;
1258
1259	if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) {
1260		ret = -3;
1261		ha->qla_initiate_recovery = 1;
1262		goto exit_qla_mbx_cmd;
1263	}
1264
1265	if (no_pause)
1266		i = 1000;
1267	else
1268		i = Q8_MBX_MSEC_DELAY;
1269
1270	while (i) {
1271		data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL);
1272		if (data == 0)
1273			break;
1274		if (no_pause) {
1275			DELAY(1000);
1276		} else {
1277			qla_mdelay(__func__, 1);
1278		}
1279		i--;
1280	}
1281
1282	if (i == 0) {
1283		device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n",
1284			__func__, data);
1285		ret = -1;
1286		ha->qla_initiate_recovery = 1;
1287		goto exit_qla_mbx_cmd;
1288	}
1289
1290	for (i = 0; i < n_hmbox; i++) {
1291		WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox);
1292		h_mbox++;
1293	}
1294
1295	WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1);
1296
1297
1298	i = Q8_MBX_MSEC_DELAY;
1299	while (i) {
1300		data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
1301
1302		if ((data & 0x3) == 1) {
1303			data = READ_REG32(ha, Q8_FW_MBOX0);
1304			if ((data & 0xF000) != 0x8000)
1305				break;
1306		}
1307		if (no_pause) {
1308			DELAY(1000);
1309		} else {
1310			qla_mdelay(__func__, 1);
1311		}
1312		i--;
1313	}
1314	if (i == 0) {
1315		device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n",
1316			__func__, data);
1317		ret = -2;
1318		ha->qla_initiate_recovery = 1;
1319		goto exit_qla_mbx_cmd;
1320	}
1321
1322	for (i = 0; i < n_fwmbox; i++) {
1323		*fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2)));
1324	}
1325
1326	WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
1327	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
1328
1329exit_qla_mbx_cmd:
1330	return (ret);
1331}
1332
1333int
1334qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb,
1335	uint32_t *num_rcvq)
1336{
1337	uint32_t *mbox, err;
1338	device_t dev = ha->pci_dev;
1339
1340	bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX));
1341
1342	mbox = ha->hw.mbox;
1343
1344	mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29);
1345
1346	if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) {
1347		device_printf(dev, "%s: failed0\n", __func__);
1348		return (-1);
1349	}
1350	err = mbox[0] >> 25;
1351
1352	if (supports_9kb != NULL) {
1353		if (mbox[16] & 0x80) /* bit 7 of mbox 16 */
1354			*supports_9kb = 1;
1355		else
1356			*supports_9kb = 0;
1357	}
1358
1359	if (num_rcvq != NULL)
1360		*num_rcvq =  ((mbox[6] >> 16) & 0xFFFF);
1361
1362	if ((err != 1) && (err != 0)) {
1363		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1364		return (-1);
1365	}
1366	return 0;
1367}
1368
1369static int
1370qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs,
1371	uint32_t create)
1372{
1373	uint32_t i, err;
1374	device_t dev = ha->pci_dev;
1375	q80_config_intr_t *c_intr;
1376	q80_config_intr_rsp_t *c_intr_rsp;
1377
1378	c_intr = (q80_config_intr_t *)ha->hw.mbox;
1379	bzero(c_intr, (sizeof (q80_config_intr_t)));
1380
1381	c_intr->opcode = Q8_MBX_CONFIG_INTR;
1382
1383	c_intr->count_version = (sizeof (q80_config_intr_t) >> 2);
1384	c_intr->count_version |= Q8_MBX_CMD_VERSION;
1385
1386	c_intr->nentries = num_intrs;
1387
1388	for (i = 0; i < num_intrs; i++) {
1389		if (create) {
1390			c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE;
1391			c_intr->intr[i].msix_index = start_idx + 1 + i;
1392		} else {
1393			c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE;
1394			c_intr->intr[i].msix_index =
1395				ha->hw.intr_id[(start_idx + i)];
1396		}
1397
1398		c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X;
1399	}
1400
1401	if (qla_mbx_cmd(ha, (uint32_t *)c_intr,
1402		(sizeof (q80_config_intr_t) >> 2),
1403		ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) {
1404		device_printf(dev, "%s: failed0\n", __func__);
1405		return (-1);
1406	}
1407
1408	c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox;
1409
1410	err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status);
1411
1412	if (err) {
1413		device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err,
1414			c_intr_rsp->nentries);
1415
1416		for (i = 0; i < c_intr_rsp->nentries; i++) {
1417			device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n",
1418				__func__, i,
1419				c_intr_rsp->intr[i].status,
1420				c_intr_rsp->intr[i].intr_id,
1421				c_intr_rsp->intr[i].intr_src);
1422		}
1423
1424		return (-1);
1425	}
1426
1427	for (i = 0; ((i < num_intrs) && create); i++) {
1428		if (!c_intr_rsp->intr[i].status) {
1429			ha->hw.intr_id[(start_idx + i)] =
1430				c_intr_rsp->intr[i].intr_id;
1431			ha->hw.intr_src[(start_idx + i)] =
1432				c_intr_rsp->intr[i].intr_src;
1433		}
1434	}
1435
1436	return (0);
1437}
1438
1439/*
1440 * Name: qla_config_rss
1441 * Function: Configure RSS for the context/interface.
1442 */
1443static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL,
1444			0x8030f20c77cb2da3ULL,
1445			0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
1446			0x255b0ec26d5a56daULL };
1447
1448static int
1449qla_config_rss(qla_host_t *ha, uint16_t cntxt_id)
1450{
1451	q80_config_rss_t	*c_rss;
1452	q80_config_rss_rsp_t	*c_rss_rsp;
1453	uint32_t		err, i;
1454	device_t		dev = ha->pci_dev;
1455
1456	c_rss = (q80_config_rss_t *)ha->hw.mbox;
1457	bzero(c_rss, (sizeof (q80_config_rss_t)));
1458
1459	c_rss->opcode = Q8_MBX_CONFIG_RSS;
1460
1461	c_rss->count_version = (sizeof (q80_config_rss_t) >> 2);
1462	c_rss->count_version |= Q8_MBX_CMD_VERSION;
1463
1464	c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP |
1465				Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP);
1466	//c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP |
1467	//			Q8_MBX_RSS_HASH_TYPE_IPV6_TCP);
1468
1469	c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS;
1470	c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE;
1471
1472	c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK;
1473
1474	c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID;
1475	c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS;
1476
1477	c_rss->cntxt_id = cntxt_id;
1478
1479	for (i = 0; i < 5; i++) {
1480		c_rss->rss_key[i] = rss_key[i];
1481	}
1482
1483	if (qla_mbx_cmd(ha, (uint32_t *)c_rss,
1484		(sizeof (q80_config_rss_t) >> 2),
1485		ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) {
1486		device_printf(dev, "%s: failed0\n", __func__);
1487		return (-1);
1488	}
1489	c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox;
1490
1491	err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status);
1492
1493	if (err) {
1494		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1495		return (-1);
1496	}
1497	return 0;
1498}
1499
1500static int
1501qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count,
1502        uint16_t cntxt_id, uint8_t *ind_table)
1503{
1504        q80_config_rss_ind_table_t      *c_rss_ind;
1505        q80_config_rss_ind_table_rsp_t  *c_rss_ind_rsp;
1506        uint32_t                        err;
1507        device_t                        dev = ha->pci_dev;
1508
1509	if ((count > Q8_RSS_IND_TBL_SIZE) ||
1510		((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) {
1511		device_printf(dev, "%s: illegal count [%d, %d]\n", __func__,
1512			start_idx, count);
1513		return (-1);
1514	}
1515
1516        c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox;
1517        bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t));
1518
1519        c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE;
1520        c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2);
1521        c_rss_ind->count_version |= Q8_MBX_CMD_VERSION;
1522
1523	c_rss_ind->start_idx = start_idx;
1524	c_rss_ind->end_idx = start_idx + count - 1;
1525	c_rss_ind->cntxt_id = cntxt_id;
1526	bcopy(ind_table, c_rss_ind->ind_table, count);
1527
1528	if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind,
1529		(sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox,
1530		(sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) {
1531		device_printf(dev, "%s: failed0\n", __func__);
1532		return (-1);
1533	}
1534
1535	c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox;
1536	err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status);
1537
1538	if (err) {
1539		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1540		return (-1);
1541	}
1542	return 0;
1543}
1544
1545/*
1546 * Name: qla_config_intr_coalesce
1547 * Function: Configure Interrupt Coalescing.
1548 */
1549static int
1550qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable,
1551	int rcv)
1552{
1553	q80_config_intr_coalesc_t	*intrc;
1554	q80_config_intr_coalesc_rsp_t	*intrc_rsp;
1555	uint32_t			err, i;
1556	device_t			dev = ha->pci_dev;
1557
1558	intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox;
1559	bzero(intrc, (sizeof (q80_config_intr_coalesc_t)));
1560
1561	intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE;
1562	intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2);
1563	intrc->count_version |= Q8_MBX_CMD_VERSION;
1564
1565	if (rcv) {
1566		intrc->flags = Q8_MBX_INTRC_FLAGS_RCV;
1567		intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF;
1568		intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF;
1569	} else {
1570		intrc->flags = Q8_MBX_INTRC_FLAGS_XMT;
1571		intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF;
1572		intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF;
1573	}
1574
1575	intrc->cntxt_id = cntxt_id;
1576
1577	if (tenable) {
1578		intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC;
1579		intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC;
1580
1581		for (i = 0; i < ha->hw.num_sds_rings; i++) {
1582			intrc->sds_ring_mask |= (1 << i);
1583		}
1584		intrc->ms_timeout = 1000;
1585	}
1586
1587	if (qla_mbx_cmd(ha, (uint32_t *)intrc,
1588		(sizeof (q80_config_intr_coalesc_t) >> 2),
1589		ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) {
1590		device_printf(dev, "%s: failed0\n", __func__);
1591		return (-1);
1592	}
1593	intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox;
1594
1595	err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status);
1596
1597	if (err) {
1598		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1599		return (-1);
1600	}
1601
1602	return 0;
1603}
1604
1605
1606/*
1607 * Name: qla_config_mac_addr
1608 * Function: binds a MAC address to the context/interface.
1609 *	Can be unicast, multicast or broadcast.
1610 */
1611static int
1612qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac,
1613	uint32_t num_mac)
1614{
1615	q80_config_mac_addr_t		*cmac;
1616	q80_config_mac_addr_rsp_t	*cmac_rsp;
1617	uint32_t			err;
1618	device_t			dev = ha->pci_dev;
1619	int				i;
1620	uint8_t				*mac_cpy = mac_addr;
1621
1622	if (num_mac > Q8_MAX_MAC_ADDRS) {
1623		device_printf(dev, "%s: %s num_mac [0x%x] > Q8_MAX_MAC_ADDRS\n",
1624			__func__, (add_mac ? "Add" : "Del"), num_mac);
1625		return (-1);
1626	}
1627
1628	cmac = (q80_config_mac_addr_t *)ha->hw.mbox;
1629	bzero(cmac, (sizeof (q80_config_mac_addr_t)));
1630
1631	cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR;
1632	cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2;
1633	cmac->count_version |= Q8_MBX_CMD_VERSION;
1634
1635	if (add_mac)
1636		cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR;
1637	else
1638		cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR;
1639
1640	cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS;
1641
1642	cmac->nmac_entries = num_mac;
1643	cmac->cntxt_id = ha->hw.rcv_cntxt_id;
1644
1645	for (i = 0; i < num_mac; i++) {
1646		bcopy(mac_addr, cmac->mac_addr[i].addr, Q8_ETHER_ADDR_LEN);
1647		mac_addr = mac_addr + ETHER_ADDR_LEN;
1648	}
1649
1650	if (qla_mbx_cmd(ha, (uint32_t *)cmac,
1651		(sizeof (q80_config_mac_addr_t) >> 2),
1652		ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) {
1653		device_printf(dev, "%s: %s failed0\n", __func__,
1654			(add_mac ? "Add" : "Del"));
1655		return (-1);
1656	}
1657	cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox;
1658
1659	err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status);
1660
1661	if (err) {
1662		device_printf(dev, "%s: %s failed1 [0x%08x]\n", __func__,
1663			(add_mac ? "Add" : "Del"), err);
1664		for (i = 0; i < num_mac; i++) {
1665			device_printf(dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n",
1666				__func__, mac_cpy[0], mac_cpy[1], mac_cpy[2],
1667				mac_cpy[3], mac_cpy[4], mac_cpy[5]);
1668			mac_cpy += ETHER_ADDR_LEN;
1669		}
1670		return (-1);
1671	}
1672
1673	return 0;
1674}
1675
1676
1677/*
1678 * Name: qla_set_mac_rcv_mode
1679 * Function: Enable/Disable AllMulticast and Promiscous Modes.
1680 */
1681static int
1682qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode)
1683{
1684	q80_config_mac_rcv_mode_t	*rcv_mode;
1685	uint32_t			err;
1686	q80_config_mac_rcv_mode_rsp_t	*rcv_mode_rsp;
1687	device_t			dev = ha->pci_dev;
1688
1689	rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox;
1690	bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t)));
1691
1692	rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE;
1693	rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2;
1694	rcv_mode->count_version |= Q8_MBX_CMD_VERSION;
1695
1696	rcv_mode->mode = mode;
1697
1698	rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id;
1699
1700	if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode,
1701		(sizeof (q80_config_mac_rcv_mode_t) >> 2),
1702		ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) {
1703		device_printf(dev, "%s: failed0\n", __func__);
1704		return (-1);
1705	}
1706	rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox;
1707
1708	err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status);
1709
1710	if (err) {
1711		device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
1712		return (-1);
1713	}
1714
1715	return 0;
1716}
1717
1718int
1719ql_set_promisc(qla_host_t *ha)
1720{
1721	int ret;
1722
1723	ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1724	ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1725	return (ret);
1726}
1727
1728void
1729qla_reset_promisc(qla_host_t *ha)
1730{
1731	ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE;
1732	(void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1733}
1734
1735int
1736ql_set_allmulti(qla_host_t *ha)
1737{
1738	int ret;
1739
1740	ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE;
1741	ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1742	return (ret);
1743}
1744
1745void
1746qla_reset_allmulti(qla_host_t *ha)
1747{
1748	ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE;
1749	(void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode);
1750}
1751
1752/*
1753 * Name: ql_set_max_mtu
1754 * Function:
1755 *	Sets the maximum transfer unit size for the specified rcv context.
1756 */
1757int
1758ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id)
1759{
1760	device_t		dev;
1761	q80_set_max_mtu_t	*max_mtu;
1762	q80_set_max_mtu_rsp_t	*max_mtu_rsp;
1763	uint32_t		err;
1764
1765	dev = ha->pci_dev;
1766
1767	max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox;
1768	bzero(max_mtu, (sizeof (q80_set_max_mtu_t)));
1769
1770	max_mtu->opcode = Q8_MBX_SET_MAX_MTU;
1771	max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2);
1772	max_mtu->count_version |= Q8_MBX_CMD_VERSION;
1773
1774	max_mtu->cntxt_id = cntxt_id;
1775	max_mtu->mtu = mtu;
1776
1777        if (qla_mbx_cmd(ha, (uint32_t *)max_mtu,
1778		(sizeof (q80_set_max_mtu_t) >> 2),
1779                ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) {
1780                device_printf(dev, "%s: failed\n", __func__);
1781                return -1;
1782        }
1783
1784	max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox;
1785
1786        err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status);
1787
1788        if (err) {
1789                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1790        }
1791
1792	return 0;
1793}
1794
1795static int
1796qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id)
1797{
1798	device_t		dev;
1799	q80_link_event_t	*lnk;
1800	q80_link_event_rsp_t	*lnk_rsp;
1801	uint32_t		err;
1802
1803	dev = ha->pci_dev;
1804
1805	lnk = (q80_link_event_t *)ha->hw.mbox;
1806	bzero(lnk, (sizeof (q80_link_event_t)));
1807
1808	lnk->opcode = Q8_MBX_LINK_EVENT_REQ;
1809	lnk->count_version = (sizeof (q80_link_event_t) >> 2);
1810	lnk->count_version |= Q8_MBX_CMD_VERSION;
1811
1812	lnk->cntxt_id = cntxt_id;
1813	lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC;
1814
1815        if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2),
1816                ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) {
1817                device_printf(dev, "%s: failed\n", __func__);
1818                return -1;
1819        }
1820
1821	lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox;
1822
1823        err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status);
1824
1825        if (err) {
1826                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1827        }
1828
1829	return 0;
1830}
1831
1832static int
1833qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id)
1834{
1835	device_t		dev;
1836	q80_config_fw_lro_t	*fw_lro;
1837	q80_config_fw_lro_rsp_t	*fw_lro_rsp;
1838	uint32_t		err;
1839
1840	dev = ha->pci_dev;
1841
1842	fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox;
1843	bzero(fw_lro, sizeof(q80_config_fw_lro_t));
1844
1845	fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO;
1846	fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2);
1847	fw_lro->count_version |= Q8_MBX_CMD_VERSION;
1848
1849	fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK;
1850	fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK;
1851
1852	fw_lro->cntxt_id = cntxt_id;
1853
1854	if (qla_mbx_cmd(ha, (uint32_t *)fw_lro,
1855		(sizeof (q80_config_fw_lro_t) >> 2),
1856		ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) {
1857		device_printf(dev, "%s: failed\n", __func__);
1858		return -1;
1859	}
1860
1861	fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox;
1862
1863	err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status);
1864
1865	if (err) {
1866		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1867	}
1868
1869	return 0;
1870}
1871
1872static int
1873qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode)
1874{
1875	device_t                dev;
1876	q80_hw_config_t         *hw_config;
1877	q80_hw_config_rsp_t     *hw_config_rsp;
1878	uint32_t                err;
1879
1880	dev = ha->pci_dev;
1881
1882	hw_config = (q80_hw_config_t *)ha->hw.mbox;
1883	bzero(hw_config, sizeof (q80_hw_config_t));
1884
1885	hw_config->opcode = Q8_MBX_HW_CONFIG;
1886	hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT;
1887	hw_config->count_version |= Q8_MBX_CMD_VERSION;
1888
1889	hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE;
1890
1891	hw_config->u.set_cam_search_mode.mode = search_mode;
1892
1893	if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
1894		(sizeof (q80_hw_config_t) >> 2),
1895		ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
1896		device_printf(dev, "%s: failed\n", __func__);
1897		return -1;
1898	}
1899	hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
1900
1901	err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
1902
1903	if (err) {
1904		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1905	}
1906
1907	return 0;
1908}
1909
1910static int
1911qla_get_cam_search_mode(qla_host_t *ha)
1912{
1913	device_t                dev;
1914	q80_hw_config_t         *hw_config;
1915	q80_hw_config_rsp_t     *hw_config_rsp;
1916	uint32_t                err;
1917
1918	dev = ha->pci_dev;
1919
1920	hw_config = (q80_hw_config_t *)ha->hw.mbox;
1921	bzero(hw_config, sizeof (q80_hw_config_t));
1922
1923	hw_config->opcode = Q8_MBX_HW_CONFIG;
1924	hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT;
1925	hw_config->count_version |= Q8_MBX_CMD_VERSION;
1926
1927	hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE;
1928
1929	if (qla_mbx_cmd(ha, (uint32_t *)hw_config,
1930		(sizeof (q80_hw_config_t) >> 2),
1931		ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) {
1932		device_printf(dev, "%s: failed\n", __func__);
1933		return -1;
1934	}
1935	hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox;
1936
1937	err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status);
1938
1939	if (err) {
1940		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
1941	} else {
1942		device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__,
1943			hw_config_rsp->u.get_cam_search_mode.mode);
1944	}
1945
1946	return 0;
1947}
1948
1949static int
1950qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size)
1951{
1952	device_t		dev;
1953	q80_get_stats_t		*stat;
1954	q80_get_stats_rsp_t	*stat_rsp;
1955	uint32_t		err;
1956
1957	dev = ha->pci_dev;
1958
1959	stat = (q80_get_stats_t *)ha->hw.mbox;
1960	bzero(stat, (sizeof (q80_get_stats_t)));
1961
1962	stat->opcode = Q8_MBX_GET_STATS;
1963	stat->count_version = 2;
1964	stat->count_version |= Q8_MBX_CMD_VERSION;
1965
1966	stat->cmd = cmd;
1967
1968        if (qla_mbx_cmd(ha, (uint32_t *)stat, 2,
1969                ha->hw.mbox, (rsp_size >> 2), 0)) {
1970                device_printf(dev, "%s: failed\n", __func__);
1971                return -1;
1972        }
1973
1974	stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
1975
1976        err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status);
1977
1978        if (err) {
1979                return -1;
1980        }
1981
1982	return 0;
1983}
1984
1985void
1986ql_get_stats(qla_host_t *ha)
1987{
1988	q80_get_stats_rsp_t	*stat_rsp;
1989	q80_mac_stats_t		*mstat;
1990	q80_xmt_stats_t		*xstat;
1991	q80_rcv_stats_t		*rstat;
1992	uint32_t		cmd;
1993	int			i;
1994	struct ifnet *ifp = ha->ifp;
1995
1996	if (ifp == NULL)
1997		return;
1998
1999	if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) != 0) {
2000		device_printf(ha->pci_dev, "%s: failed\n", __func__);
2001		return;
2002	}
2003
2004	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2005		QLA_UNLOCK(ha, __func__);
2006		return;
2007	}
2008
2009	stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox;
2010	/*
2011	 * Get MAC Statistics
2012	 */
2013	cmd = Q8_GET_STATS_CMD_TYPE_MAC;
2014//	cmd |= Q8_GET_STATS_CMD_CLEAR;
2015
2016	cmd |= ((ha->pci_func & 0x1) << 16);
2017
2018	if (ha->qla_watchdog_pause)
2019		goto ql_get_stats_exit;
2020
2021	if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
2022		mstat = (q80_mac_stats_t *)&stat_rsp->u.mac;
2023		bcopy(mstat, &ha->hw.mac, sizeof(q80_mac_stats_t));
2024	} else {
2025                device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n",
2026			__func__, ha->hw.mbox[0]);
2027	}
2028	/*
2029	 * Get RCV Statistics
2030	 */
2031	cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT;
2032//	cmd |= Q8_GET_STATS_CMD_CLEAR;
2033	cmd |= (ha->hw.rcv_cntxt_id << 16);
2034
2035	if (ha->qla_watchdog_pause)
2036		goto ql_get_stats_exit;
2037
2038	if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) {
2039		rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv;
2040		bcopy(rstat, &ha->hw.rcv, sizeof(q80_rcv_stats_t));
2041	} else {
2042                device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n",
2043			__func__, ha->hw.mbox[0]);
2044	}
2045
2046	if (ha->qla_watchdog_pause)
2047		goto ql_get_stats_exit;
2048	/*
2049	 * Get XMT Statistics
2050	 */
2051	for (i = 0 ; ((i < ha->hw.num_tx_rings) && (!ha->qla_watchdog_pause));
2052		i++) {
2053		cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT;
2054//		cmd |= Q8_GET_STATS_CMD_CLEAR;
2055		cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16);
2056
2057		if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t))
2058			== 0) {
2059			xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt;
2060			bcopy(xstat, &ha->hw.xmt[i], sizeof(q80_xmt_stats_t));
2061		} else {
2062			device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n",
2063				__func__, ha->hw.mbox[0]);
2064		}
2065	}
2066
2067ql_get_stats_exit:
2068	QLA_UNLOCK(ha, __func__);
2069
2070	return;
2071}
2072
2073/*
2074 * Name: qla_tx_tso
2075 * Function: Checks if the packet to be transmitted is a candidate for
2076 *	Large TCP Segment Offload. If yes, the appropriate fields in the Tx
2077 *	Ring Structure are plugged in.
2078 */
2079static int
2080qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
2081{
2082	struct ether_vlan_header *eh;
2083	struct ip *ip = NULL;
2084	struct ip6_hdr *ip6 = NULL;
2085	struct tcphdr *th = NULL;
2086	uint32_t ehdrlen,  hdrlen, ip_hlen, tcp_hlen, tcp_opt_off;
2087	uint16_t etype, opcode, offload = 1;
2088	device_t dev;
2089
2090	dev = ha->pci_dev;
2091
2092
2093	eh = mtod(mp, struct ether_vlan_header *);
2094
2095	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2096		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2097		etype = ntohs(eh->evl_proto);
2098	} else {
2099		ehdrlen = ETHER_HDR_LEN;
2100		etype = ntohs(eh->evl_encap_proto);
2101	}
2102
2103	hdrlen = 0;
2104
2105	switch (etype) {
2106		case ETHERTYPE_IP:
2107
2108			tcp_opt_off = ehdrlen + sizeof(struct ip) +
2109					sizeof(struct tcphdr);
2110
2111			if (mp->m_len < tcp_opt_off) {
2112				m_copydata(mp, 0, tcp_opt_off, hdr);
2113				ip = (struct ip *)(hdr + ehdrlen);
2114			} else {
2115				ip = (struct ip *)(mp->m_data + ehdrlen);
2116			}
2117
2118			ip_hlen = ip->ip_hl << 2;
2119			opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;
2120
2121
2122			if ((ip->ip_p != IPPROTO_TCP) ||
2123				(ip_hlen != sizeof (struct ip))){
2124				/* IP Options are not supported */
2125
2126				offload = 0;
2127			} else
2128				th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2129
2130		break;
2131
2132		case ETHERTYPE_IPV6:
2133
2134			tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) +
2135					sizeof (struct tcphdr);
2136
2137			if (mp->m_len < tcp_opt_off) {
2138				m_copydata(mp, 0, tcp_opt_off, hdr);
2139				ip6 = (struct ip6_hdr *)(hdr + ehdrlen);
2140			} else {
2141				ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2142			}
2143
2144			ip_hlen = sizeof(struct ip6_hdr);
2145			opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6;
2146
2147			if (ip6->ip6_nxt != IPPROTO_TCP) {
2148				//device_printf(dev, "%s: ipv6\n", __func__);
2149				offload = 0;
2150			} else
2151				th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
2152		break;
2153
2154		default:
2155			QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__));
2156			offload = 0;
2157		break;
2158	}
2159
2160	if (!offload)
2161		return (-1);
2162
2163	tcp_hlen = th->th_off << 2;
2164	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2165
2166        if (mp->m_len < hdrlen) {
2167                if (mp->m_len < tcp_opt_off) {
2168                        if (tcp_hlen > sizeof(struct tcphdr)) {
2169                                m_copydata(mp, tcp_opt_off,
2170                                        (tcp_hlen - sizeof(struct tcphdr)),
2171                                        &hdr[tcp_opt_off]);
2172                        }
2173                } else {
2174                        m_copydata(mp, 0, hdrlen, hdr);
2175                }
2176        }
2177
2178	tx_cmd->mss = mp->m_pkthdr.tso_segsz;
2179
2180	tx_cmd->flags_opcode = opcode ;
2181	tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
2182	tx_cmd->total_hdr_len = hdrlen;
2183
2184	/* Check for Multicast least significant bit of MSB == 1 */
2185	if (eh->evl_dhost[0] & 0x01) {
2186		tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST;
2187	}
2188
2189	if (mp->m_len < hdrlen) {
2190		printf("%d\n", hdrlen);
2191		return (1);
2192	}
2193
2194	return (0);
2195}
2196
2197/*
2198 * Name: qla_tx_chksum
2199 * Function: Checks if the packet to be transmitted is a candidate for
2200 *	TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx
2201 *	Ring Structure are plugged in.
2202 */
2203static int
2204qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code,
2205	uint32_t *tcp_hdr_off)
2206{
2207	struct ether_vlan_header *eh;
2208	struct ip *ip;
2209	struct ip6_hdr *ip6;
2210	uint32_t ehdrlen, ip_hlen;
2211	uint16_t etype, opcode, offload = 1;
2212	device_t dev;
2213	uint8_t buf[sizeof(struct ip6_hdr)];
2214
2215	dev = ha->pci_dev;
2216
2217	*op_code = 0;
2218
2219	if ((mp->m_pkthdr.csum_flags &
2220		(CSUM_TCP|CSUM_UDP|CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) == 0)
2221		return (-1);
2222
2223	eh = mtod(mp, struct ether_vlan_header *);
2224
2225	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2226		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2227		etype = ntohs(eh->evl_proto);
2228	} else {
2229		ehdrlen = ETHER_HDR_LEN;
2230		etype = ntohs(eh->evl_encap_proto);
2231	}
2232
2233
2234	switch (etype) {
2235		case ETHERTYPE_IP:
2236			ip = (struct ip *)(mp->m_data + ehdrlen);
2237
2238			ip_hlen = sizeof (struct ip);
2239
2240			if (mp->m_len < (ehdrlen + ip_hlen)) {
2241				m_copydata(mp, ehdrlen, sizeof(struct ip), buf);
2242				ip = (struct ip *)buf;
2243			}
2244
2245			if (ip->ip_p == IPPROTO_TCP)
2246				opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM;
2247			else if (ip->ip_p == IPPROTO_UDP)
2248				opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM;
2249			else {
2250				//device_printf(dev, "%s: ipv4\n", __func__);
2251				offload = 0;
2252			}
2253		break;
2254
2255		case ETHERTYPE_IPV6:
2256			ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2257
2258			ip_hlen = sizeof(struct ip6_hdr);
2259
2260			if (mp->m_len < (ehdrlen + ip_hlen)) {
2261				m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr),
2262					buf);
2263				ip6 = (struct ip6_hdr *)buf;
2264			}
2265
2266			if (ip6->ip6_nxt == IPPROTO_TCP)
2267				opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6;
2268			else if (ip6->ip6_nxt == IPPROTO_UDP)
2269				opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6;
2270			else {
2271				//device_printf(dev, "%s: ipv6\n", __func__);
2272				offload = 0;
2273			}
2274		break;
2275
2276		default:
2277			offload = 0;
2278		break;
2279	}
2280	if (!offload)
2281		return (-1);
2282
2283	*op_code = opcode;
2284	*tcp_hdr_off = (ip_hlen + ehdrlen);
2285
2286	return (0);
2287}
2288
2289#define QLA_TX_MIN_FREE 2
2290/*
2291 * Name: ql_hw_send
2292 * Function: Transmits a packet. It first checks if the packet is a
2293 *	candidate for Large TCP Segment Offload and then for UDP/TCP checksum
2294 *	offload. If either of these creteria are not met, it is transmitted
2295 *	as a regular ethernet frame.
2296 */
2297int
2298ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
2299	uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu)
2300{
2301	struct ether_vlan_header *eh;
2302	qla_hw_t *hw = &ha->hw;
2303	q80_tx_cmd_t *tx_cmd, tso_cmd;
2304	bus_dma_segment_t *c_seg;
2305	uint32_t num_tx_cmds, hdr_len = 0;
2306	uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next;
2307	device_t dev;
2308	int i, ret;
2309	uint8_t *src = NULL, *dst = NULL;
2310	uint8_t frame_hdr[QL_FRAME_HDR_SIZE];
2311	uint32_t op_code = 0;
2312	uint32_t tcp_hdr_off = 0;
2313
2314	dev = ha->pci_dev;
2315
2316	/*
2317	 * Always make sure there is atleast one empty slot in the tx_ring
2318	 * tx_ring is considered full when there only one entry available
2319	 */
2320        num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;
2321
2322	total_length = mp->m_pkthdr.len;
2323	if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
2324		device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
2325			__func__, total_length);
2326		return (EINVAL);
2327	}
2328	eh = mtod(mp, struct ether_vlan_header *);
2329
2330	if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2331
2332		bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));
2333
2334		src = frame_hdr;
2335		ret = qla_tx_tso(ha, mp, &tso_cmd, src);
2336
2337		if (!(ret & ~1)) {
2338			/* find the additional tx_cmd descriptors required */
2339
2340			if (mp->m_flags & M_VLANTAG)
2341				tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN;
2342
2343			hdr_len = tso_cmd.total_hdr_len;
2344
2345			bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2346			bytes = QL_MIN(bytes, hdr_len);
2347
2348			num_tx_cmds++;
2349			hdr_len -= bytes;
2350
2351			while (hdr_len) {
2352				bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2353				hdr_len -= bytes;
2354				num_tx_cmds++;
2355			}
2356			hdr_len = tso_cmd.total_hdr_len;
2357
2358			if (ret == 0)
2359				src = (uint8_t *)eh;
2360		} else
2361			return (EINVAL);
2362	} else {
2363		(void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off);
2364	}
2365
2366	if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
2367		ql_hw_tx_done_locked(ha, txr_idx);
2368		if (hw->tx_cntxt[txr_idx].txr_free <=
2369				(num_tx_cmds + QLA_TX_MIN_FREE)) {
2370        		QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= "
2371				"(num_tx_cmds + QLA_TX_MIN_FREE))\n",
2372				__func__));
2373			return (-1);
2374		}
2375	}
2376
2377	tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx];
2378
2379        if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) {
2380
2381                if (nsegs > ha->hw.max_tx_segs)
2382                        ha->hw.max_tx_segs = nsegs;
2383
2384                bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2385
2386                if (op_code) {
2387                        tx_cmd->flags_opcode = op_code;
2388                        tx_cmd->tcp_hdr_off = tcp_hdr_off;
2389
2390                } else {
2391                        tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
2392                }
2393	} else {
2394		bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t));
2395		ha->tx_tso_frames++;
2396	}
2397
2398	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2399        	tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED;
2400
2401		if (iscsi_pdu)
2402			eh->evl_tag |= ha->hw.user_pri_iscsi << 13;
2403
2404	} else if (mp->m_flags & M_VLANTAG) {
2405
2406		if (hdr_len) { /* TSO */
2407			tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED |
2408						Q8_TX_CMD_FLAGS_HW_VLAN_ID);
2409			tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN;
2410		} else
2411			tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID;
2412
2413		ha->hw_vlan_tx_frames++;
2414		tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag;
2415
2416		if (iscsi_pdu) {
2417			tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13;
2418			mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci;
2419		}
2420	}
2421
2422
2423        tx_cmd->n_bufs = (uint8_t)nsegs;
2424        tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF);
2425        tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8)));
2426	tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func);
2427
2428	c_seg = segs;
2429
2430	while (1) {
2431		for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) {
2432
2433			switch (i) {
2434			case 0:
2435				tx_cmd->buf1_addr = c_seg->ds_addr;
2436				tx_cmd->buf1_len = c_seg->ds_len;
2437				break;
2438
2439			case 1:
2440				tx_cmd->buf2_addr = c_seg->ds_addr;
2441				tx_cmd->buf2_len = c_seg->ds_len;
2442				break;
2443
2444			case 2:
2445				tx_cmd->buf3_addr = c_seg->ds_addr;
2446				tx_cmd->buf3_len = c_seg->ds_len;
2447				break;
2448
2449			case 3:
2450				tx_cmd->buf4_addr = c_seg->ds_addr;
2451				tx_cmd->buf4_len = c_seg->ds_len;
2452				break;
2453			}
2454
2455			c_seg++;
2456			nsegs--;
2457		}
2458
2459		txr_next = hw->tx_cntxt[txr_idx].txr_next =
2460			(hw->tx_cntxt[txr_idx].txr_next + 1) &
2461				(NUM_TX_DESCRIPTORS - 1);
2462		tx_cmd_count++;
2463
2464		if (!nsegs)
2465			break;
2466
2467		tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2468		bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2469	}
2470
2471	if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
2472
2473		/* TSO : Copy the header in the following tx cmd descriptors */
2474
2475		txr_next = hw->tx_cntxt[txr_idx].txr_next;
2476
2477		tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2478		bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2479
2480		bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
2481		bytes = QL_MIN(bytes, hdr_len);
2482
2483		dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN;
2484
2485		if (mp->m_flags & M_VLANTAG) {
2486			/* first copy the src/dst MAC addresses */
2487			bcopy(src, dst, (ETHER_ADDR_LEN * 2));
2488			dst += (ETHER_ADDR_LEN * 2);
2489			src += (ETHER_ADDR_LEN * 2);
2490
2491			*((uint16_t *)dst) = htons(ETHERTYPE_VLAN);
2492			dst += 2;
2493			*((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag);
2494			dst += 2;
2495
2496			/* bytes left in src header */
2497			hdr_len -= ((ETHER_ADDR_LEN * 2) +
2498					ETHER_VLAN_ENCAP_LEN);
2499
2500			/* bytes left in TxCmd Entry */
2501			bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN);
2502
2503
2504			bcopy(src, dst, bytes);
2505			src += bytes;
2506			hdr_len -= bytes;
2507		} else {
2508			bcopy(src, dst, bytes);
2509			src += bytes;
2510			hdr_len -= bytes;
2511		}
2512
2513		txr_next = hw->tx_cntxt[txr_idx].txr_next =
2514				(hw->tx_cntxt[txr_idx].txr_next + 1) &
2515					(NUM_TX_DESCRIPTORS - 1);
2516		tx_cmd_count++;
2517
2518		while (hdr_len) {
2519			tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next];
2520			bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
2521
2522			bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
2523
2524			bcopy(src, tx_cmd, bytes);
2525			src += bytes;
2526			hdr_len -= bytes;
2527
2528			txr_next = hw->tx_cntxt[txr_idx].txr_next =
2529				(hw->tx_cntxt[txr_idx].txr_next + 1) &
2530					(NUM_TX_DESCRIPTORS - 1);
2531			tx_cmd_count++;
2532		}
2533	}
2534
2535	hw->tx_cntxt[txr_idx].txr_free =
2536		hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count;
2537
2538	QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\
2539		txr_idx);
2540       	QL_DPRINT8(ha, (dev, "%s: return\n", __func__));
2541
2542	return (0);
2543}
2544
2545
2546
2547#define Q8_CONFIG_IND_TBL_SIZE	32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */
2548static int
2549qla_config_rss_ind_table(qla_host_t *ha)
2550{
2551	uint32_t i, count;
2552	uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE];
2553
2554
2555	for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) {
2556		rss_ind_tbl[i] = i % ha->hw.num_sds_rings;
2557	}
2558
2559	for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ;
2560		i = i + Q8_CONFIG_IND_TBL_SIZE) {
2561
2562		if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) {
2563			count = Q8_RSS_IND_TBL_MAX_IDX - i + 1;
2564		} else {
2565			count = Q8_CONFIG_IND_TBL_SIZE;
2566		}
2567
2568		if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id,
2569			rss_ind_tbl))
2570			return (-1);
2571	}
2572
2573	return (0);
2574}
2575
2576static int
2577qla_config_soft_lro(qla_host_t *ha)
2578{
2579        int i;
2580        qla_hw_t *hw = &ha->hw;
2581        struct lro_ctrl *lro;
2582
2583        for (i = 0; i < hw->num_sds_rings; i++) {
2584                lro = &hw->sds[i].lro;
2585
2586		bzero(lro, sizeof(struct lro_ctrl));
2587
2588#if (__FreeBSD_version >= 1100101)
2589                if (tcp_lro_init_args(lro, ha->ifp, 0, NUM_RX_DESCRIPTORS)) {
2590                        device_printf(ha->pci_dev,
2591				"%s: tcp_lro_init_args [%d] failed\n",
2592                                __func__, i);
2593                        return (-1);
2594                }
2595#else
2596                if (tcp_lro_init(lro)) {
2597                        device_printf(ha->pci_dev,
2598				"%s: tcp_lro_init [%d] failed\n",
2599                                __func__, i);
2600                        return (-1);
2601                }
2602#endif /* #if (__FreeBSD_version >= 1100101) */
2603
2604                lro->ifp = ha->ifp;
2605        }
2606
2607        QL_DPRINT2(ha, (ha->pci_dev, "%s: LRO initialized\n", __func__));
2608        return (0);
2609}
2610
2611static void
2612qla_drain_soft_lro(qla_host_t *ha)
2613{
2614        int i;
2615        qla_hw_t *hw = &ha->hw;
2616        struct lro_ctrl *lro;
2617
2618       	for (i = 0; i < hw->num_sds_rings; i++) {
2619               	lro = &hw->sds[i].lro;
2620
2621#if (__FreeBSD_version >= 1100101)
2622		tcp_lro_flush_all(lro);
2623#else
2624                struct lro_entry *queued;
2625
2626		while ((!SLIST_EMPTY(&lro->lro_active))) {
2627			queued = SLIST_FIRST(&lro->lro_active);
2628			SLIST_REMOVE_HEAD(&lro->lro_active, next);
2629			tcp_lro_flush(lro, queued);
2630		}
2631#endif /* #if (__FreeBSD_version >= 1100101) */
2632	}
2633
2634	return;
2635}
2636
2637static void
2638qla_free_soft_lro(qla_host_t *ha)
2639{
2640        int i;
2641        qla_hw_t *hw = &ha->hw;
2642        struct lro_ctrl *lro;
2643
2644        for (i = 0; i < hw->num_sds_rings; i++) {
2645               	lro = &hw->sds[i].lro;
2646		tcp_lro_free(lro);
2647	}
2648
2649	return;
2650}
2651
2652
2653/*
2654 * Name: ql_del_hw_if
2655 * Function: Destroys the hardware specific entities corresponding to an
2656 *	Ethernet Interface
2657 */
2658void
2659ql_del_hw_if(qla_host_t *ha)
2660{
2661	uint32_t i;
2662	uint32_t num_msix;
2663
2664	(void)qla_stop_nic_func(ha);
2665
2666	qla_del_rcv_cntxt(ha);
2667
2668	qla_del_xmt_cntxt(ha);
2669
2670	if (ha->hw.flags.init_intr_cnxt) {
2671		for (i = 0; i < ha->hw.num_sds_rings; ) {
2672
2673			if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2674				num_msix = Q8_MAX_INTR_VECTORS;
2675			else
2676				num_msix = ha->hw.num_sds_rings - i;
2677			qla_config_intr_cntxt(ha, i, num_msix, 0);
2678
2679			i += num_msix;
2680		}
2681
2682		ha->hw.flags.init_intr_cnxt = 0;
2683	}
2684
2685	if (ha->hw.enable_soft_lro) {
2686		qla_drain_soft_lro(ha);
2687		qla_free_soft_lro(ha);
2688	}
2689
2690	return;
2691}
2692
2693void
2694qla_confirm_9kb_enable(qla_host_t *ha)
2695{
2696	uint32_t supports_9kb = 0;
2697
2698	ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX);
2699
2700	/* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */
2701	WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2);
2702	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
2703
2704	qla_get_nic_partition(ha, &supports_9kb, NULL);
2705
2706	if (!supports_9kb)
2707		ha->hw.enable_9kb = 0;
2708
2709	return;
2710}
2711
2712/*
2713 * Name: ql_init_hw_if
2714 * Function: Creates the hardware specific entities corresponding to an
2715 *	Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address
2716 *	corresponding to the interface. Enables LRO if allowed.
2717 */
2718int
2719ql_init_hw_if(qla_host_t *ha)
2720{
2721	device_t	dev;
2722	uint32_t	i;
2723	uint8_t		bcast_mac[6];
2724	qla_rdesc_t	*rdesc;
2725	uint32_t	num_msix;
2726
2727	dev = ha->pci_dev;
2728
2729	for (i = 0; i < ha->hw.num_sds_rings; i++) {
2730		bzero(ha->hw.dma_buf.sds_ring[i].dma_b,
2731			ha->hw.dma_buf.sds_ring[i].size);
2732	}
2733
2734	for (i = 0; i < ha->hw.num_sds_rings; ) {
2735
2736		if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings)
2737			num_msix = Q8_MAX_INTR_VECTORS;
2738		else
2739			num_msix = ha->hw.num_sds_rings - i;
2740
2741		if (qla_config_intr_cntxt(ha, i, num_msix, 1)) {
2742
2743			if (i > 0) {
2744
2745				num_msix = i;
2746
2747				for (i = 0; i < num_msix; ) {
2748					qla_config_intr_cntxt(ha, i,
2749						Q8_MAX_INTR_VECTORS, 0);
2750					i += Q8_MAX_INTR_VECTORS;
2751				}
2752			}
2753			return (-1);
2754		}
2755
2756		i = i + num_msix;
2757	}
2758
2759        ha->hw.flags.init_intr_cnxt = 1;
2760
2761	/*
2762	 * Create Receive Context
2763	 */
2764	if (qla_init_rcv_cntxt(ha)) {
2765		return (-1);
2766	}
2767
2768	for (i = 0; i < ha->hw.num_rds_rings; i++) {
2769		rdesc = &ha->hw.rds[i];
2770		rdesc->rx_next = NUM_RX_DESCRIPTORS - 2;
2771		rdesc->rx_in = 0;
2772		/* Update the RDS Producer Indices */
2773		QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\
2774			rdesc->rx_next);
2775	}
2776
2777	/*
2778	 * Create Transmit Context
2779	 */
2780	if (qla_init_xmt_cntxt(ha)) {
2781		qla_del_rcv_cntxt(ha);
2782		return (-1);
2783	}
2784	ha->hw.max_tx_segs = 0;
2785
2786	if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1, 1))
2787		return(-1);
2788
2789	ha->hw.flags.unicast_mac = 1;
2790
2791	bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
2792	bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
2793
2794	if (qla_config_mac_addr(ha, bcast_mac, 1, 1))
2795		return (-1);
2796
2797	ha->hw.flags.bcast_mac = 1;
2798
2799	/*
2800	 * program any cached multicast addresses
2801	 */
2802	if (qla_hw_add_all_mcast(ha))
2803		return (-1);
2804
2805	if (ql_set_max_mtu(ha, ha->max_frame_size, ha->hw.rcv_cntxt_id))
2806		return (-1);
2807
2808	if (qla_config_rss(ha, ha->hw.rcv_cntxt_id))
2809		return (-1);
2810
2811	if (qla_config_rss_ind_table(ha))
2812		return (-1);
2813
2814	if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1))
2815		return (-1);
2816
2817	if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id))
2818		return (-1);
2819
2820	if (ha->ifp->if_capenable & IFCAP_LRO) {
2821		if (ha->hw.enable_hw_lro) {
2822			ha->hw.enable_soft_lro = 0;
2823
2824			if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id))
2825				return (-1);
2826		} else {
2827			ha->hw.enable_soft_lro = 1;
2828
2829			if (qla_config_soft_lro(ha))
2830				return (-1);
2831		}
2832	}
2833
2834        if (qla_init_nic_func(ha))
2835                return (-1);
2836
2837        if (qla_query_fw_dcbx_caps(ha))
2838                return (-1);
2839
2840	for (i = 0; i < ha->hw.num_sds_rings; i++)
2841		QL_ENABLE_INTERRUPTS(ha, i);
2842
2843	return (0);
2844}
2845
2846static int
2847qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx)
2848{
2849        device_t                dev = ha->pci_dev;
2850        q80_rq_map_sds_to_rds_t *map_rings;
2851	q80_rsp_map_sds_to_rds_t *map_rings_rsp;
2852        uint32_t                i, err;
2853        qla_hw_t                *hw = &ha->hw;
2854
2855        map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox;
2856        bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t));
2857
2858        map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS;
2859        map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2);
2860        map_rings->count_version |= Q8_MBX_CMD_VERSION;
2861
2862        map_rings->cntxt_id = hw->rcv_cntxt_id;
2863        map_rings->num_rings = num_idx;
2864
2865	for (i = 0; i < num_idx; i++) {
2866		map_rings->sds_rds[i].sds_ring = i + start_idx;
2867		map_rings->sds_rds[i].rds_ring = i + start_idx;
2868	}
2869
2870        if (qla_mbx_cmd(ha, (uint32_t *)map_rings,
2871                (sizeof (q80_rq_map_sds_to_rds_t) >> 2),
2872                ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
2873                device_printf(dev, "%s: failed0\n", __func__);
2874                return (-1);
2875        }
2876
2877        map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox;
2878
2879        err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status);
2880
2881        if (err) {
2882                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2883                return (-1);
2884        }
2885
2886        return (0);
2887}
2888
2889/*
2890 * Name: qla_init_rcv_cntxt
2891 * Function: Creates the Receive Context.
2892 */
2893static int
2894qla_init_rcv_cntxt(qla_host_t *ha)
2895{
2896	q80_rq_rcv_cntxt_t	*rcntxt;
2897	q80_rsp_rcv_cntxt_t	*rcntxt_rsp;
2898	q80_stat_desc_t		*sdesc;
2899	int			i, j;
2900        qla_hw_t		*hw = &ha->hw;
2901	device_t		dev;
2902	uint32_t		err;
2903	uint32_t		rcntxt_sds_rings;
2904	uint32_t		rcntxt_rds_rings;
2905	uint32_t		max_idx;
2906
2907	dev = ha->pci_dev;
2908
2909	/*
2910	 * Create Receive Context
2911	 */
2912
2913	for (i = 0; i < hw->num_sds_rings; i++) {
2914		sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0];
2915
2916		for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) {
2917			sdesc->data[0] = 1ULL;
2918			sdesc->data[1] = 1ULL;
2919		}
2920	}
2921
2922	rcntxt_sds_rings = hw->num_sds_rings;
2923	if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS)
2924		rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS;
2925
2926	rcntxt_rds_rings = hw->num_rds_rings;
2927
2928	if (hw->num_rds_rings > MAX_RDS_RING_SETS)
2929		rcntxt_rds_rings = MAX_RDS_RING_SETS;
2930
2931	rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox;
2932	bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t)));
2933
2934	rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT;
2935	rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2);
2936	rcntxt->count_version |= Q8_MBX_CMD_VERSION;
2937
2938	rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW |
2939			Q8_RCV_CNTXT_CAP0_LRO |
2940			Q8_RCV_CNTXT_CAP0_HW_LRO |
2941			Q8_RCV_CNTXT_CAP0_RSS |
2942			Q8_RCV_CNTXT_CAP0_SGL_LRO;
2943
2944	if (ha->hw.enable_9kb)
2945		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO;
2946	else
2947		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO;
2948
2949	if (ha->hw.num_rds_rings > 1) {
2950		rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5);
2951		rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS;
2952	} else
2953		rcntxt->nrds_sets_rings = 0x1 | (1 << 5);
2954
2955	rcntxt->nsds_rings = rcntxt_sds_rings;
2956
2957	rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE;
2958
2959	rcntxt->rcv_vpid = 0;
2960
2961	for (i = 0; i <  rcntxt_sds_rings; i++) {
2962		rcntxt->sds[i].paddr =
2963			qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
2964		rcntxt->sds[i].size =
2965			qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
2966		rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]);
2967		rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0);
2968	}
2969
2970	for (i = 0; i <  rcntxt_rds_rings; i++) {
2971		rcntxt->rds[i].paddr_std =
2972			qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);
2973
2974		if (ha->hw.enable_9kb)
2975			rcntxt->rds[i].std_bsize =
2976				qla_host_to_le64(MJUM9BYTES);
2977		else
2978			rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
2979
2980		rcntxt->rds[i].std_nentries =
2981			qla_host_to_le32(NUM_RX_DESCRIPTORS);
2982	}
2983
2984        if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
2985		(sizeof (q80_rq_rcv_cntxt_t) >> 2),
2986                ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) {
2987                device_printf(dev, "%s: failed0\n", __func__);
2988                return (-1);
2989        }
2990
2991        rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox;
2992
2993        err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
2994
2995        if (err) {
2996                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
2997                return (-1);
2998        }
2999
3000	for (i = 0; i <  rcntxt_sds_rings; i++) {
3001		hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i];
3002	}
3003
3004	for (i = 0; i <  rcntxt_rds_rings; i++) {
3005		hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std;
3006	}
3007
3008	hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id;
3009
3010	ha->hw.flags.init_rx_cnxt = 1;
3011
3012	if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) {
3013
3014		for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) {
3015
3016			if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings)
3017				max_idx = MAX_RCNTXT_SDS_RINGS;
3018			else
3019				max_idx = hw->num_sds_rings - i;
3020
3021			err = qla_add_rcv_rings(ha, i, max_idx);
3022			if (err)
3023				return -1;
3024
3025			i += max_idx;
3026		}
3027	}
3028
3029	if (hw->num_rds_rings > 1) {
3030
3031		for (i = 0; i < hw->num_rds_rings; ) {
3032
3033			if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings)
3034				max_idx = MAX_SDS_TO_RDS_MAP;
3035			else
3036				max_idx = hw->num_rds_rings - i;
3037
3038			err = qla_map_sds_to_rds(ha, i, max_idx);
3039			if (err)
3040				return -1;
3041
3042			i += max_idx;
3043		}
3044	}
3045
3046	return (0);
3047}
3048
3049static int
3050qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds)
3051{
3052	device_t		dev = ha->pci_dev;
3053	q80_rq_add_rcv_rings_t	*add_rcv;
3054	q80_rsp_add_rcv_rings_t	*add_rcv_rsp;
3055	uint32_t		i,j, err;
3056        qla_hw_t		*hw = &ha->hw;
3057
3058	add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox;
3059	bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t));
3060
3061	add_rcv->opcode = Q8_MBX_ADD_RX_RINGS;
3062	add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2);
3063	add_rcv->count_version |= Q8_MBX_CMD_VERSION;
3064
3065	add_rcv->nrds_sets_rings = nsds | (1 << 5);
3066	add_rcv->nsds_rings = nsds;
3067	add_rcv->cntxt_id = hw->rcv_cntxt_id;
3068
3069        for (i = 0; i <  nsds; i++) {
3070
3071		j = i + sds_idx;
3072
3073                add_rcv->sds[i].paddr =
3074                        qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr);
3075
3076                add_rcv->sds[i].size =
3077                        qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
3078
3079                add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]);
3080                add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0);
3081
3082        }
3083
3084        for (i = 0; (i <  nsds); i++) {
3085                j = i + sds_idx;
3086
3087                add_rcv->rds[i].paddr_std =
3088                        qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr);
3089
3090		if (ha->hw.enable_9kb)
3091			add_rcv->rds[i].std_bsize =
3092				qla_host_to_le64(MJUM9BYTES);
3093		else
3094                	add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES);
3095
3096                add_rcv->rds[i].std_nentries =
3097                        qla_host_to_le32(NUM_RX_DESCRIPTORS);
3098        }
3099
3100
3101        if (qla_mbx_cmd(ha, (uint32_t *)add_rcv,
3102		(sizeof (q80_rq_add_rcv_rings_t) >> 2),
3103                ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) {
3104                device_printf(dev, "%s: failed0\n", __func__);
3105                return (-1);
3106        }
3107
3108        add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox;
3109
3110        err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status);
3111
3112        if (err) {
3113                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3114                return (-1);
3115        }
3116
3117	for (i = 0; i < nsds; i++) {
3118		hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i];
3119	}
3120
3121	for (i = 0; i < nsds; i++) {
3122		hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std;
3123	}
3124
3125	return (0);
3126}
3127
3128/*
3129 * Name: qla_del_rcv_cntxt
3130 * Function: Destroys the Receive Context.
3131 */
3132static void
3133qla_del_rcv_cntxt(qla_host_t *ha)
3134{
3135	device_t			dev = ha->pci_dev;
3136	q80_rcv_cntxt_destroy_t		*rcntxt;
3137	q80_rcv_cntxt_destroy_rsp_t	*rcntxt_rsp;
3138	uint32_t			err;
3139	uint8_t				bcast_mac[6];
3140
3141	if (!ha->hw.flags.init_rx_cnxt)
3142		return;
3143
3144	if (qla_hw_del_all_mcast(ha))
3145		return;
3146
3147	if (ha->hw.flags.bcast_mac) {
3148
3149		bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF;
3150		bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF;
3151
3152		if (qla_config_mac_addr(ha, bcast_mac, 0, 1))
3153			return;
3154		ha->hw.flags.bcast_mac = 0;
3155
3156	}
3157
3158	if (ha->hw.flags.unicast_mac) {
3159		if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0, 1))
3160			return;
3161		ha->hw.flags.unicast_mac = 0;
3162	}
3163
3164	rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox;
3165	bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t)));
3166
3167	rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT;
3168	rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2);
3169	rcntxt->count_version |= Q8_MBX_CMD_VERSION;
3170
3171	rcntxt->cntxt_id = ha->hw.rcv_cntxt_id;
3172
3173        if (qla_mbx_cmd(ha, (uint32_t *)rcntxt,
3174		(sizeof (q80_rcv_cntxt_destroy_t) >> 2),
3175                ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) {
3176                device_printf(dev, "%s: failed0\n", __func__);
3177                return;
3178        }
3179        rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox;
3180
3181        err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status);
3182
3183        if (err) {
3184                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3185        }
3186
3187	ha->hw.flags.init_rx_cnxt = 0;
3188	return;
3189}
3190
3191/*
3192 * Name: qla_init_xmt_cntxt
3193 * Function: Creates the Transmit Context.
3194 */
3195static int
3196qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
3197{
3198	device_t		dev;
3199        qla_hw_t		*hw = &ha->hw;
3200	q80_rq_tx_cntxt_t	*tcntxt;
3201	q80_rsp_tx_cntxt_t	*tcntxt_rsp;
3202	uint32_t		err;
3203	qla_hw_tx_cntxt_t       *hw_tx_cntxt;
3204	uint32_t		intr_idx;
3205
3206	hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3207
3208	dev = ha->pci_dev;
3209
3210	/*
3211	 * Create Transmit Context
3212	 */
3213	tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox;
3214	bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t)));
3215
3216	tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT;
3217	tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2);
3218	tcntxt->count_version |= Q8_MBX_CMD_VERSION;
3219
3220	intr_idx = txr_idx;
3221
3222#ifdef QL_ENABLE_ISCSI_TLV
3223
3224	tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO |
3225				Q8_TX_CNTXT_CAP0_TC;
3226
3227	if (txr_idx >= (ha->hw.num_tx_rings >> 1)) {
3228		tcntxt->traffic_class = 1;
3229	}
3230
3231	intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1);
3232
3233#else
3234	tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO;
3235
3236#endif /* #ifdef QL_ENABLE_ISCSI_TLV */
3237
3238	tcntxt->ntx_rings = 1;
3239
3240	tcntxt->tx_ring[0].paddr =
3241		qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr);
3242	tcntxt->tx_ring[0].tx_consumer =
3243		qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr);
3244	tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
3245
3246	tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]);
3247	tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
3248
3249	hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS;
3250	hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0;
3251	*hw_tx_cntxt->tx_cons = 0;
3252
3253        if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
3254		(sizeof (q80_rq_tx_cntxt_t) >> 2),
3255                ha->hw.mbox,
3256		(sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) {
3257                device_printf(dev, "%s: failed0\n", __func__);
3258                return (-1);
3259        }
3260        tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox;
3261
3262        err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
3263
3264        if (err) {
3265                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3266		return -1;
3267        }
3268
3269	hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
3270	hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
3271
3272	if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0))
3273		return (-1);
3274
3275	return (0);
3276}
3277
3278
3279/*
3280 * Name: qla_del_xmt_cntxt
3281 * Function: Destroys the Transmit Context.
3282 */
3283static int
3284qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx)
3285{
3286	device_t			dev = ha->pci_dev;
3287	q80_tx_cntxt_destroy_t		*tcntxt;
3288	q80_tx_cntxt_destroy_rsp_t	*tcntxt_rsp;
3289	uint32_t			err;
3290
3291	tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox;
3292	bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t)));
3293
3294	tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT;
3295	tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2);
3296	tcntxt->count_version |= Q8_MBX_CMD_VERSION;
3297
3298	tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id;
3299
3300        if (qla_mbx_cmd(ha, (uint32_t *)tcntxt,
3301		(sizeof (q80_tx_cntxt_destroy_t) >> 2),
3302                ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) {
3303                device_printf(dev, "%s: failed0\n", __func__);
3304                return (-1);
3305        }
3306        tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox;
3307
3308        err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status);
3309
3310        if (err) {
3311                device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err);
3312		return (-1);
3313        }
3314
3315	return (0);
3316}
3317static void
3318qla_del_xmt_cntxt(qla_host_t *ha)
3319{
3320	uint32_t i;
3321
3322	if (!ha->hw.flags.init_tx_cnxt)
3323		return;
3324
3325	for (i = 0; i < ha->hw.num_tx_rings; i++) {
3326		if (qla_del_xmt_cntxt_i(ha, i))
3327			break;
3328	}
3329	ha->hw.flags.init_tx_cnxt = 0;
3330}
3331
3332static int
3333qla_init_xmt_cntxt(qla_host_t *ha)
3334{
3335	uint32_t i, j;
3336
3337	for (i = 0; i < ha->hw.num_tx_rings; i++) {
3338		if (qla_init_xmt_cntxt_i(ha, i) != 0) {
3339			for (j = 0; j < i; j++)
3340				qla_del_xmt_cntxt_i(ha, j);
3341			return (-1);
3342		}
3343	}
3344	ha->hw.flags.init_tx_cnxt = 1;
3345	return (0);
3346}
3347
3348static int
3349qla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast)
3350{
3351	int i, nmcast;
3352	uint32_t count = 0;
3353	uint8_t *mcast;
3354
3355	nmcast = ha->hw.nmcast;
3356
3357	QL_DPRINT2(ha, (ha->pci_dev,
3358		"%s:[0x%x] enter nmcast = %d \n", __func__, add_mcast, nmcast));
3359
3360	mcast = ha->hw.mac_addr_arr;
3361	memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3362
3363	for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) {
3364		if ((ha->hw.mcast[i].addr[0] != 0) ||
3365			(ha->hw.mcast[i].addr[1] != 0) ||
3366			(ha->hw.mcast[i].addr[2] != 0) ||
3367			(ha->hw.mcast[i].addr[3] != 0) ||
3368			(ha->hw.mcast[i].addr[4] != 0) ||
3369			(ha->hw.mcast[i].addr[5] != 0)) {
3370
3371			bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN);
3372			mcast = mcast + ETHER_ADDR_LEN;
3373			count++;
3374
3375			if (count == Q8_MAX_MAC_ADDRS) {
3376				if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3377					add_mcast, count)) {
3378                			device_printf(ha->pci_dev,
3379						"%s: failed\n", __func__);
3380					return (-1);
3381				}
3382
3383				count = 0;
3384				mcast = ha->hw.mac_addr_arr;
3385				memset(mcast, 0,
3386					(Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3387			}
3388
3389			nmcast--;
3390		}
3391	}
3392
3393	if (count) {
3394		if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast,
3395			count)) {
3396                	device_printf(ha->pci_dev, "%s: failed\n", __func__);
3397			return (-1);
3398		}
3399	}
3400	QL_DPRINT2(ha, (ha->pci_dev,
3401		"%s:[0x%x] exit nmcast = %d \n", __func__, add_mcast, nmcast));
3402
3403	return 0;
3404}
3405
3406static int
3407qla_hw_add_all_mcast(qla_host_t *ha)
3408{
3409	int ret;
3410
3411	ret = qla_hw_all_mcast(ha, 1);
3412
3413	return (ret);
3414}
3415
3416int
3417qla_hw_del_all_mcast(qla_host_t *ha)
3418{
3419	int ret;
3420
3421	ret = qla_hw_all_mcast(ha, 0);
3422
3423	bzero(ha->hw.mcast, (sizeof (qla_mcast_t) * Q8_MAX_NUM_MULTICAST_ADDRS));
3424	ha->hw.nmcast = 0;
3425
3426	return (ret);
3427}
3428
3429static int
3430qla_hw_mac_addr_present(qla_host_t *ha, uint8_t *mta)
3431{
3432	int i;
3433
3434	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3435		if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0)
3436			return (0); /* its been already added */
3437	}
3438	return (-1);
3439}
3440
3441static int
3442qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3443{
3444	int i;
3445
3446	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3447
3448		if ((ha->hw.mcast[i].addr[0] == 0) &&
3449			(ha->hw.mcast[i].addr[1] == 0) &&
3450			(ha->hw.mcast[i].addr[2] == 0) &&
3451			(ha->hw.mcast[i].addr[3] == 0) &&
3452			(ha->hw.mcast[i].addr[4] == 0) &&
3453			(ha->hw.mcast[i].addr[5] == 0)) {
3454
3455			bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN);
3456			ha->hw.nmcast++;
3457
3458			mta = mta + ETHER_ADDR_LEN;
3459			nmcast--;
3460
3461			if (nmcast == 0)
3462				break;
3463		}
3464
3465	}
3466	return 0;
3467}
3468
3469static int
3470qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast)
3471{
3472	int i;
3473
3474	for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) {
3475		if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) {
3476
3477			ha->hw.mcast[i].addr[0] = 0;
3478			ha->hw.mcast[i].addr[1] = 0;
3479			ha->hw.mcast[i].addr[2] = 0;
3480			ha->hw.mcast[i].addr[3] = 0;
3481			ha->hw.mcast[i].addr[4] = 0;
3482			ha->hw.mcast[i].addr[5] = 0;
3483
3484			ha->hw.nmcast--;
3485
3486			mta = mta + ETHER_ADDR_LEN;
3487			nmcast--;
3488
3489			if (nmcast == 0)
3490				break;
3491		}
3492	}
3493	return 0;
3494}
3495
3496/*
3497 * Name: ql_hw_set_multi
3498 * Function: Sets the Multicast Addresses provided by the host O.S into the
3499 *	hardware (for the given interface)
3500 */
3501int
3502ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt,
3503	uint32_t add_mac)
3504{
3505	uint8_t *mta = mcast_addr;
3506	int i;
3507	int ret = 0;
3508	uint32_t count = 0;
3509	uint8_t *mcast;
3510
3511	mcast = ha->hw.mac_addr_arr;
3512	memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3513
3514	for (i = 0; i < mcnt; i++) {
3515		if (mta[0] || mta[1] || mta[2] || mta[3] || mta[4] || mta[5]) {
3516			if (add_mac) {
3517				if (qla_hw_mac_addr_present(ha, mta) != 0) {
3518					bcopy(mta, mcast, ETHER_ADDR_LEN);
3519					mcast = mcast + ETHER_ADDR_LEN;
3520					count++;
3521				}
3522			} else {
3523				if (qla_hw_mac_addr_present(ha, mta) == 0) {
3524					bcopy(mta, mcast, ETHER_ADDR_LEN);
3525					mcast = mcast + ETHER_ADDR_LEN;
3526					count++;
3527				}
3528			}
3529		}
3530		if (count == Q8_MAX_MAC_ADDRS) {
3531			if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr,
3532				add_mac, count)) {
3533                		device_printf(ha->pci_dev, "%s: failed\n",
3534					__func__);
3535				return (-1);
3536			}
3537
3538			if (add_mac) {
3539				qla_hw_add_mcast(ha, ha->hw.mac_addr_arr,
3540					count);
3541			} else {
3542				qla_hw_del_mcast(ha, ha->hw.mac_addr_arr,
3543					count);
3544			}
3545
3546			count = 0;
3547			mcast = ha->hw.mac_addr_arr;
3548			memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN));
3549		}
3550
3551		mta += Q8_MAC_ADDR_LEN;
3552	}
3553
3554	if (count) {
3555		if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac,
3556			count)) {
3557                	device_printf(ha->pci_dev, "%s: failed\n", __func__);
3558			return (-1);
3559		}
3560		if (add_mac) {
3561			qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count);
3562		} else {
3563			qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count);
3564		}
3565	}
3566
3567	return (ret);
3568}
3569
3570/*
3571 * Name: ql_hw_tx_done_locked
3572 * Function: Handle Transmit Completions
3573 */
3574void
3575ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx)
3576{
3577	qla_tx_buf_t *txb;
3578        qla_hw_t *hw = &ha->hw;
3579	uint32_t comp_idx, comp_count = 0;
3580	qla_hw_tx_cntxt_t *hw_tx_cntxt;
3581
3582	hw_tx_cntxt = &hw->tx_cntxt[txr_idx];
3583
3584	/* retrieve index of last entry in tx ring completed */
3585	comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons));
3586
3587	while (comp_idx != hw_tx_cntxt->txr_comp) {
3588
3589		txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
3590
3591		hw_tx_cntxt->txr_comp++;
3592		if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS)
3593			hw_tx_cntxt->txr_comp = 0;
3594
3595		comp_count++;
3596
3597		if (txb->m_head) {
3598			if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1);
3599
3600			bus_dmamap_sync(ha->tx_tag, txb->map,
3601				BUS_DMASYNC_POSTWRITE);
3602			bus_dmamap_unload(ha->tx_tag, txb->map);
3603			m_freem(txb->m_head);
3604
3605			txb->m_head = NULL;
3606		}
3607	}
3608
3609	hw_tx_cntxt->txr_free += comp_count;
3610	return;
3611}
3612
3613void
3614ql_update_link_state(qla_host_t *ha)
3615{
3616	uint32_t link_state;
3617	uint32_t prev_link_state;
3618
3619	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3620		ha->hw.link_up = 0;
3621		return;
3622	}
3623	link_state = READ_REG32(ha, Q8_LINK_STATE);
3624
3625	prev_link_state =  ha->hw.link_up;
3626
3627	if (ha->pci_func == 0)
3628		ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0);
3629	else
3630		ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0);
3631
3632	if (prev_link_state !=  ha->hw.link_up) {
3633		if (ha->hw.link_up) {
3634			if_link_state_change(ha->ifp, LINK_STATE_UP);
3635		} else {
3636			if_link_state_change(ha->ifp, LINK_STATE_DOWN);
3637		}
3638	}
3639	return;
3640}
3641
3642int
3643ql_hw_check_health(qla_host_t *ha)
3644{
3645	uint32_t val;
3646
3647	ha->hw.health_count++;
3648
3649	if (ha->hw.health_count < 500)
3650		return 0;
3651
3652	ha->hw.health_count = 0;
3653
3654	val = READ_REG32(ha, Q8_ASIC_TEMPERATURE);
3655
3656	if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) ||
3657		(QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) {
3658		device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n",
3659			__func__, val);
3660		return -1;
3661	}
3662
3663	val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT);
3664
3665	if ((val != ha->hw.hbeat_value) &&
3666		(!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) {
3667		ha->hw.hbeat_value = val;
3668		ha->hw.hbeat_failure = 0;
3669		return 0;
3670	}
3671
3672	ha->hw.hbeat_failure++;
3673
3674
3675	if ((ha->dbg_level & 0x8000) && (ha->hw.hbeat_failure == 1))
3676		device_printf(ha->pci_dev, "%s: Heartbeat Failue 1[0x%08x]\n",
3677			__func__, val);
3678	if (ha->hw.hbeat_failure < 2) /* we ignore the first failure */
3679		return 0;
3680	else
3681		device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n",
3682			__func__, val);
3683
3684	return -1;
3685}
3686
3687static int
3688qla_init_nic_func(qla_host_t *ha)
3689{
3690        device_t                dev;
3691        q80_init_nic_func_t     *init_nic;
3692        q80_init_nic_func_rsp_t *init_nic_rsp;
3693        uint32_t                err;
3694
3695        dev = ha->pci_dev;
3696
3697        init_nic = (q80_init_nic_func_t *)ha->hw.mbox;
3698        bzero(init_nic, sizeof(q80_init_nic_func_t));
3699
3700        init_nic->opcode = Q8_MBX_INIT_NIC_FUNC;
3701        init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2);
3702        init_nic->count_version |= Q8_MBX_CMD_VERSION;
3703
3704        init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN;
3705        init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN;
3706        init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN;
3707
3708//qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t));
3709        if (qla_mbx_cmd(ha, (uint32_t *)init_nic,
3710                (sizeof (q80_init_nic_func_t) >> 2),
3711                ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) {
3712                device_printf(dev, "%s: failed\n", __func__);
3713                return -1;
3714        }
3715
3716        init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox;
3717// qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t));
3718
3719        err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status);
3720
3721        if (err) {
3722                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3723        }
3724
3725        return 0;
3726}
3727
3728static int
3729qla_stop_nic_func(qla_host_t *ha)
3730{
3731        device_t                dev;
3732        q80_stop_nic_func_t     *stop_nic;
3733        q80_stop_nic_func_rsp_t *stop_nic_rsp;
3734        uint32_t                err;
3735
3736        dev = ha->pci_dev;
3737
3738        stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox;
3739        bzero(stop_nic, sizeof(q80_stop_nic_func_t));
3740
3741        stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC;
3742        stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2);
3743        stop_nic->count_version |= Q8_MBX_CMD_VERSION;
3744
3745        stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN;
3746        stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN;
3747
3748//qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t));
3749        if (qla_mbx_cmd(ha, (uint32_t *)stop_nic,
3750                (sizeof (q80_stop_nic_func_t) >> 2),
3751                ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) {
3752                device_printf(dev, "%s: failed\n", __func__);
3753                return -1;
3754        }
3755
3756        stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox;
3757//qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t));
3758
3759        err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status);
3760
3761        if (err) {
3762                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3763        }
3764
3765        return 0;
3766}
3767
3768static int
3769qla_query_fw_dcbx_caps(qla_host_t *ha)
3770{
3771        device_t                        dev;
3772        q80_query_fw_dcbx_caps_t        *fw_dcbx;
3773        q80_query_fw_dcbx_caps_rsp_t    *fw_dcbx_rsp;
3774        uint32_t                        err;
3775
3776        dev = ha->pci_dev;
3777
3778        fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox;
3779        bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t));
3780
3781        fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS;
3782        fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2);
3783        fw_dcbx->count_version |= Q8_MBX_CMD_VERSION;
3784
3785        ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t));
3786        if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx,
3787                (sizeof (q80_query_fw_dcbx_caps_t) >> 2),
3788                ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) {
3789                device_printf(dev, "%s: failed\n", __func__);
3790                return -1;
3791        }
3792
3793        fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox;
3794        ql_dump_buf8(ha, __func__, fw_dcbx_rsp,
3795                sizeof (q80_query_fw_dcbx_caps_rsp_t));
3796
3797        err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status);
3798
3799        if (err) {
3800                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3801        }
3802
3803        return 0;
3804}
3805
3806static int
3807qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2,
3808        uint32_t aen_mb3, uint32_t aen_mb4)
3809{
3810        device_t                dev;
3811        q80_idc_ack_t           *idc_ack;
3812        q80_idc_ack_rsp_t       *idc_ack_rsp;
3813        uint32_t                err;
3814        int                     count = 300;
3815
3816        dev = ha->pci_dev;
3817
3818        idc_ack = (q80_idc_ack_t *)ha->hw.mbox;
3819        bzero(idc_ack, sizeof(q80_idc_ack_t));
3820
3821        idc_ack->opcode = Q8_MBX_IDC_ACK;
3822        idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2);
3823        idc_ack->count_version |= Q8_MBX_CMD_VERSION;
3824
3825        idc_ack->aen_mb1 = aen_mb1;
3826        idc_ack->aen_mb2 = aen_mb2;
3827        idc_ack->aen_mb3 = aen_mb3;
3828        idc_ack->aen_mb4 = aen_mb4;
3829
3830        ha->hw.imd_compl= 0;
3831
3832        if (qla_mbx_cmd(ha, (uint32_t *)idc_ack,
3833                (sizeof (q80_idc_ack_t) >> 2),
3834                ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) {
3835                device_printf(dev, "%s: failed\n", __func__);
3836                return -1;
3837        }
3838
3839        idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox;
3840
3841        err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status);
3842
3843        if (err) {
3844                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3845                return(-1);
3846        }
3847
3848        while (count && !ha->hw.imd_compl) {
3849                qla_mdelay(__func__, 100);
3850                count--;
3851        }
3852
3853        if (!count)
3854                return -1;
3855        else
3856                device_printf(dev, "%s: count %d\n", __func__, count);
3857
3858        return (0);
3859}
3860
3861static int
3862qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits)
3863{
3864        device_t                dev;
3865        q80_set_port_cfg_t      *pcfg;
3866        q80_set_port_cfg_rsp_t  *pfg_rsp;
3867        uint32_t                err;
3868        int                     count = 300;
3869
3870        dev = ha->pci_dev;
3871
3872        pcfg = (q80_set_port_cfg_t *)ha->hw.mbox;
3873        bzero(pcfg, sizeof(q80_set_port_cfg_t));
3874
3875        pcfg->opcode = Q8_MBX_SET_PORT_CONFIG;
3876        pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2);
3877        pcfg->count_version |= Q8_MBX_CMD_VERSION;
3878
3879        pcfg->cfg_bits = cfg_bits;
3880
3881        device_printf(dev, "%s: cfg_bits"
3882                " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
3883                " [0x%x, 0x%x, 0x%x]\n", __func__,
3884                ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
3885                ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
3886                ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0));
3887
3888        ha->hw.imd_compl= 0;
3889
3890        if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3891                (sizeof (q80_set_port_cfg_t) >> 2),
3892                ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) {
3893                device_printf(dev, "%s: failed\n", __func__);
3894                return -1;
3895        }
3896
3897        pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox;
3898
3899        err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status);
3900
3901        if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) {
3902                while (count && !ha->hw.imd_compl) {
3903                        qla_mdelay(__func__, 100);
3904                        count--;
3905                }
3906                if (count) {
3907                        device_printf(dev, "%s: count %d\n", __func__, count);
3908
3909                        err = 0;
3910                }
3911        }
3912
3913        if (err) {
3914                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3915                return(-1);
3916        }
3917
3918        return (0);
3919}
3920
3921
3922static int
3923qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size)
3924{
3925	uint32_t			err;
3926	device_t			dev = ha->pci_dev;
3927	q80_config_md_templ_size_t	*md_size;
3928	q80_config_md_templ_size_rsp_t	*md_size_rsp;
3929
3930#ifndef QL_LDFLASH_FW
3931
3932	ql_minidump_template_hdr_t *hdr;
3933
3934	hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump;
3935	*size = hdr->size_of_template;
3936	return (0);
3937
3938#endif /* #ifdef QL_LDFLASH_FW */
3939
3940	md_size = (q80_config_md_templ_size_t *) ha->hw.mbox;
3941	bzero(md_size, sizeof(q80_config_md_templ_size_t));
3942
3943	md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE;
3944	md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2);
3945	md_size->count_version |= Q8_MBX_CMD_VERSION;
3946
3947	if (qla_mbx_cmd(ha, (uint32_t *) md_size,
3948		(sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox,
3949		(sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) {
3950
3951		device_printf(dev, "%s: failed\n", __func__);
3952
3953		return (-1);
3954	}
3955
3956	md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox;
3957
3958	err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status);
3959
3960        if (err) {
3961		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
3962		return(-1);
3963        }
3964
3965	*size = md_size_rsp->templ_size;
3966
3967	return (0);
3968}
3969
3970static int
3971qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits)
3972{
3973        device_t                dev;
3974        q80_get_port_cfg_t      *pcfg;
3975        q80_get_port_cfg_rsp_t  *pcfg_rsp;
3976        uint32_t                err;
3977
3978        dev = ha->pci_dev;
3979
3980        pcfg = (q80_get_port_cfg_t *)ha->hw.mbox;
3981        bzero(pcfg, sizeof(q80_get_port_cfg_t));
3982
3983        pcfg->opcode = Q8_MBX_GET_PORT_CONFIG;
3984        pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2);
3985        pcfg->count_version |= Q8_MBX_CMD_VERSION;
3986
3987        if (qla_mbx_cmd(ha, (uint32_t *)pcfg,
3988                (sizeof (q80_get_port_cfg_t) >> 2),
3989                ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) {
3990                device_printf(dev, "%s: failed\n", __func__);
3991                return -1;
3992        }
3993
3994        pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox;
3995
3996        err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status);
3997
3998        if (err) {
3999                device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4000                return(-1);
4001        }
4002
4003        device_printf(dev, "%s: [cfg_bits, port type]"
4004                " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]"
4005                " [0x%x, 0x%x, 0x%x]\n", __func__,
4006                pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type,
4007                ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20),
4008                ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5),
4009                ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)
4010                );
4011
4012        *cfg_bits = pcfg_rsp->cfg_bits;
4013
4014        return (0);
4015}
4016
4017int
4018ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp)
4019{
4020        struct ether_vlan_header        *eh;
4021        uint16_t                        etype;
4022        struct ip                       *ip = NULL;
4023        struct ip6_hdr                  *ip6 = NULL;
4024        struct tcphdr                   *th = NULL;
4025        uint32_t                        hdrlen;
4026        uint32_t                        offset;
4027        uint8_t                         buf[sizeof(struct ip6_hdr)];
4028
4029        eh = mtod(mp, struct ether_vlan_header *);
4030
4031        if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4032                hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4033                etype = ntohs(eh->evl_proto);
4034        } else {
4035                hdrlen = ETHER_HDR_LEN;
4036                etype = ntohs(eh->evl_encap_proto);
4037        }
4038
4039	if (etype == ETHERTYPE_IP) {
4040
4041		offset = (hdrlen + sizeof (struct ip));
4042
4043		if (mp->m_len >= offset) {
4044                        ip = (struct ip *)(mp->m_data + hdrlen);
4045		} else {
4046			m_copydata(mp, hdrlen, sizeof (struct ip), buf);
4047                        ip = (struct ip *)buf;
4048		}
4049
4050                if (ip->ip_p == IPPROTO_TCP) {
4051
4052			hdrlen += ip->ip_hl << 2;
4053			offset = hdrlen + 4;
4054
4055			if (mp->m_len >= offset) {
4056				th = (struct tcphdr *)(mp->m_data + hdrlen);;
4057			} else {
4058                                m_copydata(mp, hdrlen, 4, buf);
4059				th = (struct tcphdr *)buf;
4060			}
4061                }
4062
4063	} else if (etype == ETHERTYPE_IPV6) {
4064
4065		offset = (hdrlen + sizeof (struct ip6_hdr));
4066
4067		if (mp->m_len >= offset) {
4068                        ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen);
4069		} else {
4070                        m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf);
4071                        ip6 = (struct ip6_hdr *)buf;
4072		}
4073
4074                if (ip6->ip6_nxt == IPPROTO_TCP) {
4075
4076			hdrlen += sizeof(struct ip6_hdr);
4077			offset = hdrlen + 4;
4078
4079			if (mp->m_len >= offset) {
4080				th = (struct tcphdr *)(mp->m_data + hdrlen);;
4081			} else {
4082				m_copydata(mp, hdrlen, 4, buf);
4083				th = (struct tcphdr *)buf;
4084			}
4085                }
4086	}
4087
4088        if (th != NULL) {
4089                if ((th->th_sport == htons(3260)) ||
4090                        (th->th_dport == htons(3260)))
4091                        return 0;
4092        }
4093        return (-1);
4094}
4095
4096void
4097qla_hw_async_event(qla_host_t *ha)
4098{
4099        switch (ha->hw.aen_mb0) {
4100        case 0x8101:
4101                (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2,
4102                        ha->hw.aen_mb3, ha->hw.aen_mb4);
4103
4104                break;
4105
4106        default:
4107                break;
4108        }
4109
4110        return;
4111}
4112
4113#ifdef QL_LDFLASH_FW
4114static int
4115ql_get_minidump_template(qla_host_t *ha)
4116{
4117	uint32_t			err;
4118	device_t			dev = ha->pci_dev;
4119	q80_config_md_templ_cmd_t	*md_templ;
4120	q80_config_md_templ_cmd_rsp_t	*md_templ_rsp;
4121
4122	md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox;
4123	bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t)));
4124
4125	md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT;
4126	md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2);
4127	md_templ->count_version |= Q8_MBX_CMD_VERSION;
4128
4129	md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr;
4130	md_templ->buff_size = ha->hw.dma_buf.minidump.size;
4131
4132	if (qla_mbx_cmd(ha, (uint32_t *) md_templ,
4133		(sizeof(q80_config_md_templ_cmd_t) >> 2),
4134		 ha->hw.mbox,
4135		(sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) {
4136
4137		device_printf(dev, "%s: failed\n", __func__);
4138
4139		return (-1);
4140	}
4141
4142	md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox;
4143
4144	err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status);
4145
4146	if (err) {
4147		device_printf(dev, "%s: failed [0x%08x]\n", __func__, err);
4148		return (-1);
4149	}
4150
4151	return (0);
4152
4153}
4154#endif /* #ifdef QL_LDFLASH_FW */
4155
4156/*
4157 * Minidump related functionality
4158 */
4159
4160static int ql_parse_template(qla_host_t *ha);
4161
4162static uint32_t ql_rdcrb(qla_host_t *ha,
4163			ql_minidump_entry_rdcrb_t *crb_entry,
4164			uint32_t * data_buff);
4165
4166static uint32_t ql_pollrd(qla_host_t *ha,
4167			ql_minidump_entry_pollrd_t *entry,
4168			uint32_t * data_buff);
4169
4170static uint32_t ql_pollrd_modify_write(qla_host_t *ha,
4171			ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
4172			uint32_t *data_buff);
4173
4174static uint32_t ql_L2Cache(qla_host_t *ha,
4175			ql_minidump_entry_cache_t *cacheEntry,
4176			uint32_t * data_buff);
4177
4178static uint32_t ql_L1Cache(qla_host_t *ha,
4179			ql_minidump_entry_cache_t *cacheEntry,
4180			uint32_t *data_buff);
4181
4182static uint32_t ql_rdocm(qla_host_t *ha,
4183			ql_minidump_entry_rdocm_t *ocmEntry,
4184			uint32_t *data_buff);
4185
4186static uint32_t ql_rdmem(qla_host_t *ha,
4187			ql_minidump_entry_rdmem_t *mem_entry,
4188			uint32_t *data_buff);
4189
4190static uint32_t ql_rdrom(qla_host_t *ha,
4191			ql_minidump_entry_rdrom_t *romEntry,
4192			uint32_t *data_buff);
4193
4194static uint32_t ql_rdmux(qla_host_t *ha,
4195			ql_minidump_entry_mux_t *muxEntry,
4196			uint32_t *data_buff);
4197
4198static uint32_t ql_rdmux2(qla_host_t *ha,
4199			ql_minidump_entry_mux2_t *muxEntry,
4200			uint32_t *data_buff);
4201
4202static uint32_t ql_rdqueue(qla_host_t *ha,
4203			ql_minidump_entry_queue_t *queueEntry,
4204			uint32_t *data_buff);
4205
4206static uint32_t ql_cntrl(qla_host_t *ha,
4207			ql_minidump_template_hdr_t *template_hdr,
4208			ql_minidump_entry_cntrl_t *crbEntry);
4209
4210
4211static uint32_t
4212ql_minidump_size(qla_host_t *ha)
4213{
4214	uint32_t i, k;
4215	uint32_t size = 0;
4216	ql_minidump_template_hdr_t *hdr;
4217
4218	hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b;
4219
4220	i = 0x2;
4221
4222	for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) {
4223		if (i & ha->hw.mdump_capture_mask)
4224			size += hdr->capture_size_array[k];
4225		i = i << 1;
4226	}
4227	return (size);
4228}
4229
4230static void
4231ql_free_minidump_buffer(qla_host_t *ha)
4232{
4233	if (ha->hw.mdump_buffer != NULL) {
4234		free(ha->hw.mdump_buffer, M_QLA83XXBUF);
4235		ha->hw.mdump_buffer = NULL;
4236		ha->hw.mdump_buffer_size = 0;
4237	}
4238	return;
4239}
4240
4241static int
4242ql_alloc_minidump_buffer(qla_host_t *ha)
4243{
4244	ha->hw.mdump_buffer_size = ql_minidump_size(ha);
4245
4246	if (!ha->hw.mdump_buffer_size)
4247		return (-1);
4248
4249	ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF,
4250					M_NOWAIT);
4251
4252	if (ha->hw.mdump_buffer == NULL)
4253		return (-1);
4254
4255	return (0);
4256}
4257
4258static void
4259ql_free_minidump_template_buffer(qla_host_t *ha)
4260{
4261	if (ha->hw.mdump_template != NULL) {
4262		free(ha->hw.mdump_template, M_QLA83XXBUF);
4263		ha->hw.mdump_template = NULL;
4264		ha->hw.mdump_template_size = 0;
4265	}
4266	return;
4267}
4268
4269static int
4270ql_alloc_minidump_template_buffer(qla_host_t *ha)
4271{
4272	ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size;
4273
4274	ha->hw.mdump_template = malloc(ha->hw.mdump_template_size,
4275					M_QLA83XXBUF, M_NOWAIT);
4276
4277	if (ha->hw.mdump_template == NULL)
4278		return (-1);
4279
4280	return (0);
4281}
4282
4283static int
4284ql_alloc_minidump_buffers(qla_host_t *ha)
4285{
4286	int ret;
4287
4288	ret = ql_alloc_minidump_template_buffer(ha);
4289
4290	if (ret)
4291		return (ret);
4292
4293	ret = ql_alloc_minidump_buffer(ha);
4294
4295	if (ret)
4296		ql_free_minidump_template_buffer(ha);
4297
4298	return (ret);
4299}
4300
4301
4302static uint32_t
4303ql_validate_minidump_checksum(qla_host_t *ha)
4304{
4305        uint64_t sum = 0;
4306	int count;
4307	uint32_t *template_buff;
4308
4309	count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t);
4310	template_buff = ha->hw.dma_buf.minidump.dma_b;
4311
4312	while (count-- > 0) {
4313		sum += *template_buff++;
4314	}
4315
4316	while (sum >> 32) {
4317		sum = (sum & 0xFFFFFFFF) + (sum >> 32);
4318	}
4319
4320	return (~sum);
4321}
4322
4323int
4324ql_minidump_init(qla_host_t *ha)
4325{
4326	int		ret = 0;
4327	uint32_t	template_size = 0;
4328	device_t	dev = ha->pci_dev;
4329
4330	/*
4331	 * Get Minidump Template Size
4332 	 */
4333	ret = qla_get_minidump_tmplt_size(ha, &template_size);
4334
4335	if (ret || (template_size == 0)) {
4336		device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret,
4337			template_size);
4338		return (-1);
4339	}
4340
4341	/*
4342	 * Allocate Memory for Minidump Template
4343	 */
4344
4345	ha->hw.dma_buf.minidump.alignment = 8;
4346	ha->hw.dma_buf.minidump.size = template_size;
4347
4348#ifdef QL_LDFLASH_FW
4349	if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) {
4350
4351		device_printf(dev, "%s: minidump dma alloc failed\n", __func__);
4352
4353		return (-1);
4354	}
4355	ha->hw.dma_buf.flags.minidump = 1;
4356
4357	/*
4358	 * Retrieve Minidump Template
4359	 */
4360	ret = ql_get_minidump_template(ha);
4361#else
4362	ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump;
4363
4364#endif /* #ifdef QL_LDFLASH_FW */
4365
4366	if (ret == 0) {
4367
4368		ret = ql_validate_minidump_checksum(ha);
4369
4370		if (ret == 0) {
4371
4372			ret = ql_alloc_minidump_buffers(ha);
4373
4374			if (ret == 0)
4375		ha->hw.mdump_init = 1;
4376			else
4377				device_printf(dev,
4378					"%s: ql_alloc_minidump_buffers"
4379					" failed\n", __func__);
4380		} else {
4381			device_printf(dev, "%s: ql_validate_minidump_checksum"
4382				" failed\n", __func__);
4383		}
4384	} else {
4385		device_printf(dev, "%s: ql_get_minidump_template failed\n",
4386			 __func__);
4387	}
4388
4389	if (ret)
4390		ql_minidump_free(ha);
4391
4392	return (ret);
4393}
4394
4395static void
4396ql_minidump_free(qla_host_t *ha)
4397{
4398	ha->hw.mdump_init = 0;
4399	if (ha->hw.dma_buf.flags.minidump) {
4400		ha->hw.dma_buf.flags.minidump = 0;
4401		ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump);
4402	}
4403
4404	ql_free_minidump_template_buffer(ha);
4405	ql_free_minidump_buffer(ha);
4406
4407	return;
4408}
4409
4410void
4411ql_minidump(qla_host_t *ha)
4412{
4413	if (!ha->hw.mdump_init)
4414		return;
4415
4416	if (ha->hw.mdump_done)
4417		return;
4418
4419		ha->hw.mdump_start_seq_index = ql_stop_sequence(ha);
4420
4421	bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size);
4422	bzero(ha->hw.mdump_template, ha->hw.mdump_template_size);
4423
4424	bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template,
4425		ha->hw.mdump_template_size);
4426
4427	ql_parse_template(ha);
4428
4429	ql_start_sequence(ha, ha->hw.mdump_start_seq_index);
4430
4431	ha->hw.mdump_done = 1;
4432
4433	return;
4434}
4435
4436
4437/*
4438 * helper routines
4439 */
4440static void
4441ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize)
4442{
4443	if (esize != entry->hdr.entry_capture_size) {
4444		entry->hdr.entry_capture_size = esize;
4445		entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG;
4446	}
4447	return;
4448}
4449
4450
4451static int
4452ql_parse_template(qla_host_t *ha)
4453{
4454	uint32_t num_of_entries, buff_level, e_cnt, esize;
4455	uint32_t end_cnt, rv = 0;
4456	char *dump_buff, *dbuff;
4457	int sane_start = 0, sane_end = 0;
4458	ql_minidump_template_hdr_t *template_hdr;
4459	ql_minidump_entry_t *entry;
4460	uint32_t capture_mask;
4461	uint32_t dump_size;
4462
4463	/* Setup parameters */
4464	template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template;
4465
4466	if (template_hdr->entry_type == TLHDR)
4467		sane_start = 1;
4468
4469	dump_buff = (char *) ha->hw.mdump_buffer;
4470
4471	num_of_entries = template_hdr->num_of_entries;
4472
4473	entry = (ql_minidump_entry_t *) ((char *)template_hdr
4474			+ template_hdr->first_entry_offset );
4475
4476	template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] =
4477		template_hdr->ocm_window_array[ha->pci_func];
4478	template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func;
4479
4480	capture_mask = ha->hw.mdump_capture_mask;
4481	dump_size = ha->hw.mdump_buffer_size;
4482
4483	template_hdr->driver_capture_mask = capture_mask;
4484
4485	QL_DPRINT80(ha, (ha->pci_dev,
4486		"%s: sane_start = %d num_of_entries = %d "
4487		"capture_mask = 0x%x dump_size = %d \n",
4488		__func__, sane_start, num_of_entries, capture_mask, dump_size));
4489
4490	for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
4491
4492		/*
4493		 * If the capture_mask of the entry does not match capture mask
4494		 * skip the entry after marking the driver_flags indicator.
4495		 */
4496
4497		if (!(entry->hdr.entry_capture_mask & capture_mask)) {
4498
4499			entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4500			entry = (ql_minidump_entry_t *) ((char *) entry
4501					+ entry->hdr.entry_size);
4502			continue;
4503		}
4504
4505		/*
4506		 * This is ONLY needed in implementations where
4507		 * the capture buffer allocated is too small to capture
4508		 * all of the required entries for a given capture mask.
4509		 * We need to empty the buffer contents to a file
4510		 * if possible, before processing the next entry
4511		 * If the buff_full_flag is set, no further capture will happen
4512		 * and all remaining non-control entries will be skipped.
4513		 */
4514		if (entry->hdr.entry_capture_size != 0) {
4515			if ((buff_level + entry->hdr.entry_capture_size) >
4516				dump_size) {
4517				/*  Try to recover by emptying buffer to file */
4518				entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4519				entry = (ql_minidump_entry_t *) ((char *) entry
4520						+ entry->hdr.entry_size);
4521				continue;
4522			}
4523		}
4524
4525		/*
4526		 * Decode the entry type and process it accordingly
4527		 */
4528
4529		switch (entry->hdr.entry_type) {
4530		case RDNOP:
4531			break;
4532
4533		case RDEND:
4534			if (sane_end == 0) {
4535				end_cnt = e_cnt;
4536			}
4537			sane_end++;
4538			break;
4539
4540		case RDCRB:
4541			dbuff = dump_buff + buff_level;
4542			esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff);
4543			ql_entry_err_chk(entry, esize);
4544			buff_level += esize;
4545			break;
4546
4547                case POLLRD:
4548                        dbuff = dump_buff + buff_level;
4549                        esize = ql_pollrd(ha, (void *)entry, (void *)dbuff);
4550                        ql_entry_err_chk(entry, esize);
4551                        buff_level += esize;
4552                        break;
4553
4554                case POLLRDMWR:
4555                        dbuff = dump_buff + buff_level;
4556                        esize = ql_pollrd_modify_write(ha, (void *)entry,
4557					(void *)dbuff);
4558                        ql_entry_err_chk(entry, esize);
4559                        buff_level += esize;
4560                        break;
4561
4562		case L2ITG:
4563		case L2DTG:
4564		case L2DAT:
4565		case L2INS:
4566			dbuff = dump_buff + buff_level;
4567			esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff);
4568			if (esize == -1) {
4569				entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4570			} else {
4571				ql_entry_err_chk(entry, esize);
4572				buff_level += esize;
4573			}
4574			break;
4575
4576		case L1DAT:
4577		case L1INS:
4578			dbuff = dump_buff + buff_level;
4579			esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff);
4580			ql_entry_err_chk(entry, esize);
4581			buff_level += esize;
4582			break;
4583
4584		case RDOCM:
4585			dbuff = dump_buff + buff_level;
4586			esize = ql_rdocm(ha, (void *)entry, (void *)dbuff);
4587			ql_entry_err_chk(entry, esize);
4588			buff_level += esize;
4589			break;
4590
4591		case RDMEM:
4592			dbuff = dump_buff + buff_level;
4593			esize = ql_rdmem(ha, (void *)entry, (void *)dbuff);
4594			ql_entry_err_chk(entry, esize);
4595			buff_level += esize;
4596			break;
4597
4598		case BOARD:
4599		case RDROM:
4600			dbuff = dump_buff + buff_level;
4601			esize = ql_rdrom(ha, (void *)entry, (void *)dbuff);
4602			ql_entry_err_chk(entry, esize);
4603			buff_level += esize;
4604			break;
4605
4606		case RDMUX:
4607			dbuff = dump_buff + buff_level;
4608			esize = ql_rdmux(ha, (void *)entry, (void *)dbuff);
4609			ql_entry_err_chk(entry, esize);
4610			buff_level += esize;
4611			break;
4612
4613                case RDMUX2:
4614                        dbuff = dump_buff + buff_level;
4615                        esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff);
4616                        ql_entry_err_chk(entry, esize);
4617                        buff_level += esize;
4618                        break;
4619
4620		case QUEUE:
4621			dbuff = dump_buff + buff_level;
4622			esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff);
4623			ql_entry_err_chk(entry, esize);
4624			buff_level += esize;
4625			break;
4626
4627		case CNTRL:
4628			if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) {
4629				entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4630			}
4631			break;
4632		default:
4633			entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG;
4634			break;
4635		}
4636		/*  next entry in the template */
4637		entry = (ql_minidump_entry_t *) ((char *) entry
4638						+ entry->hdr.entry_size);
4639	}
4640
4641	if (!sane_start || (sane_end > 1)) {
4642		device_printf(ha->pci_dev,
4643			"\n%s: Template configuration error. Check Template\n",
4644			__func__);
4645	}
4646
4647	QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n",
4648		__func__, template_hdr->num_of_entries));
4649
4650	return 0;
4651}
4652
4653/*
4654 * Read CRB operation.
4655 */
4656static uint32_t
4657ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry,
4658	uint32_t * data_buff)
4659{
4660	int loop_cnt;
4661	int ret;
4662	uint32_t op_count, addr, stride, value = 0;
4663
4664	addr = crb_entry->addr;
4665	op_count = crb_entry->op_count;
4666	stride = crb_entry->addr_stride;
4667
4668	for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
4669
4670		ret = ql_rdwr_indreg32(ha, addr, &value, 1);
4671
4672		if (ret)
4673			return (0);
4674
4675		*data_buff++ = addr;
4676		*data_buff++ = value;
4677		addr = addr + stride;
4678	}
4679
4680	/*
4681	 * for testing purpose we return amount of data written
4682	 */
4683	return (op_count * (2 * sizeof(uint32_t)));
4684}
4685
4686/*
4687 * Handle L2 Cache.
4688 */
4689
4690static uint32_t
4691ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry,
4692	uint32_t * data_buff)
4693{
4694	int i, k;
4695	int loop_cnt;
4696	int ret;
4697
4698	uint32_t read_value;
4699	uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w;
4700	uint32_t tag_value, read_cnt;
4701	volatile uint8_t cntl_value_r;
4702	long timeout;
4703	uint32_t data;
4704
4705	loop_cnt = cacheEntry->op_count;
4706
4707	read_addr = cacheEntry->read_addr;
4708	cntrl_addr = cacheEntry->control_addr;
4709	cntl_value_w = (uint32_t) cacheEntry->write_value;
4710
4711	tag_reg_addr = cacheEntry->tag_reg_addr;
4712
4713	tag_value = cacheEntry->init_tag_value;
4714	read_cnt = cacheEntry->read_addr_cnt;
4715
4716	for (i = 0; i < loop_cnt; i++) {
4717
4718		ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4719		if (ret)
4720			return (0);
4721
4722		if (cacheEntry->write_value != 0) {
4723
4724			ret = ql_rdwr_indreg32(ha, cntrl_addr,
4725					&cntl_value_w, 0);
4726			if (ret)
4727				return (0);
4728		}
4729
4730		if (cacheEntry->poll_mask != 0) {
4731
4732			timeout = cacheEntry->poll_wait;
4733
4734			ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1);
4735			if (ret)
4736				return (0);
4737
4738			cntl_value_r = (uint8_t)data;
4739
4740			while ((cntl_value_r & cacheEntry->poll_mask) != 0) {
4741
4742				if (timeout) {
4743					qla_mdelay(__func__, 1);
4744					timeout--;
4745				} else
4746					break;
4747
4748				ret = ql_rdwr_indreg32(ha, cntrl_addr,
4749						&data, 1);
4750				if (ret)
4751					return (0);
4752
4753				cntl_value_r = (uint8_t)data;
4754			}
4755			if (!timeout) {
4756				/* Report timeout error.
4757				 * core dump capture failed
4758				 * Skip remaining entries.
4759				 * Write buffer out to file
4760				 * Use driver specific fields in template header
4761				 * to report this error.
4762				 */
4763				return (-1);
4764			}
4765		}
4766
4767		addr = read_addr;
4768		for (k = 0; k < read_cnt; k++) {
4769
4770			ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4771			if (ret)
4772				return (0);
4773
4774			*data_buff++ = read_value;
4775			addr += cacheEntry->read_addr_stride;
4776		}
4777
4778		tag_value += cacheEntry->tag_value_stride;
4779	}
4780
4781	return (read_cnt * loop_cnt * sizeof(uint32_t));
4782}
4783
4784/*
4785 * Handle L1 Cache.
4786 */
4787
4788static uint32_t
4789ql_L1Cache(qla_host_t *ha,
4790	ql_minidump_entry_cache_t *cacheEntry,
4791	uint32_t *data_buff)
4792{
4793	int ret;
4794	int i, k;
4795	int loop_cnt;
4796
4797	uint32_t read_value;
4798	uint32_t addr, read_addr, cntrl_addr, tag_reg_addr;
4799	uint32_t tag_value, read_cnt;
4800	uint32_t cntl_value_w;
4801
4802	loop_cnt = cacheEntry->op_count;
4803
4804	read_addr = cacheEntry->read_addr;
4805	cntrl_addr = cacheEntry->control_addr;
4806	cntl_value_w = (uint32_t) cacheEntry->write_value;
4807
4808	tag_reg_addr = cacheEntry->tag_reg_addr;
4809
4810	tag_value = cacheEntry->init_tag_value;
4811	read_cnt = cacheEntry->read_addr_cnt;
4812
4813	for (i = 0; i < loop_cnt; i++) {
4814
4815		ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0);
4816		if (ret)
4817			return (0);
4818
4819		ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0);
4820		if (ret)
4821			return (0);
4822
4823		addr = read_addr;
4824		for (k = 0; k < read_cnt; k++) {
4825
4826			ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
4827			if (ret)
4828				return (0);
4829
4830			*data_buff++ = read_value;
4831			addr += cacheEntry->read_addr_stride;
4832		}
4833
4834		tag_value += cacheEntry->tag_value_stride;
4835	}
4836
4837	return (read_cnt * loop_cnt * sizeof(uint32_t));
4838}
4839
4840/*
4841 * Reading OCM memory
4842 */
4843
4844static uint32_t
4845ql_rdocm(qla_host_t *ha,
4846	ql_minidump_entry_rdocm_t *ocmEntry,
4847	uint32_t *data_buff)
4848{
4849	int i, loop_cnt;
4850	volatile uint32_t addr;
4851	volatile uint32_t value;
4852
4853	addr = ocmEntry->read_addr;
4854	loop_cnt = ocmEntry->op_count;
4855
4856	for (i = 0; i < loop_cnt; i++) {
4857		value = READ_REG32(ha, addr);
4858		*data_buff++ = value;
4859		addr += ocmEntry->read_addr_stride;
4860	}
4861	return (loop_cnt * sizeof(value));
4862}
4863
4864/*
4865 * Read memory
4866 */
4867
4868static uint32_t
4869ql_rdmem(qla_host_t *ha,
4870	ql_minidump_entry_rdmem_t *mem_entry,
4871	uint32_t *data_buff)
4872{
4873	int ret;
4874        int i, loop_cnt;
4875        volatile uint32_t addr;
4876	q80_offchip_mem_val_t val;
4877
4878        addr = mem_entry->read_addr;
4879
4880	/* size in bytes / 16 */
4881        loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4);
4882
4883        for (i = 0; i < loop_cnt; i++) {
4884
4885		ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1);
4886		if (ret)
4887			return (0);
4888
4889                *data_buff++ = val.data_lo;
4890                *data_buff++ = val.data_hi;
4891                *data_buff++ = val.data_ulo;
4892                *data_buff++ = val.data_uhi;
4893
4894                addr += (sizeof(uint32_t) * 4);
4895        }
4896
4897        return (loop_cnt * (sizeof(uint32_t) * 4));
4898}
4899
4900/*
4901 * Read Rom
4902 */
4903
4904static uint32_t
4905ql_rdrom(qla_host_t *ha,
4906	ql_minidump_entry_rdrom_t *romEntry,
4907	uint32_t *data_buff)
4908{
4909	int ret;
4910	int i, loop_cnt;
4911	uint32_t addr;
4912	uint32_t value;
4913
4914	addr = romEntry->read_addr;
4915	loop_cnt = romEntry->read_data_size; /* This is size in bytes */
4916	loop_cnt /= sizeof(value);
4917
4918	for (i = 0; i < loop_cnt; i++) {
4919
4920		ret = ql_rd_flash32(ha, addr, &value);
4921		if (ret)
4922			return (0);
4923
4924		*data_buff++ = value;
4925		addr += sizeof(value);
4926	}
4927
4928	return (loop_cnt * sizeof(value));
4929}
4930
4931/*
4932 * Read MUX data
4933 */
4934
4935static uint32_t
4936ql_rdmux(qla_host_t *ha,
4937	ql_minidump_entry_mux_t *muxEntry,
4938	uint32_t *data_buff)
4939{
4940	int ret;
4941	int loop_cnt;
4942	uint32_t read_value, sel_value;
4943	uint32_t read_addr, select_addr;
4944
4945	select_addr = muxEntry->select_addr;
4946	sel_value = muxEntry->select_value;
4947	read_addr = muxEntry->read_addr;
4948
4949	for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
4950
4951		ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0);
4952		if (ret)
4953			return (0);
4954
4955		ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
4956		if (ret)
4957			return (0);
4958
4959		*data_buff++ = sel_value;
4960		*data_buff++ = read_value;
4961
4962		sel_value += muxEntry->select_value_stride;
4963	}
4964
4965	return (loop_cnt * (2 * sizeof(uint32_t)));
4966}
4967
4968static uint32_t
4969ql_rdmux2(qla_host_t *ha,
4970	ql_minidump_entry_mux2_t *muxEntry,
4971	uint32_t *data_buff)
4972{
4973	int ret;
4974        int loop_cnt;
4975
4976        uint32_t select_addr_1, select_addr_2;
4977        uint32_t select_value_1, select_value_2;
4978        uint32_t select_value_count, select_value_mask;
4979        uint32_t read_addr, read_value;
4980
4981        select_addr_1 = muxEntry->select_addr_1;
4982        select_addr_2 = muxEntry->select_addr_2;
4983        select_value_1 = muxEntry->select_value_1;
4984        select_value_2 = muxEntry->select_value_2;
4985        select_value_count = muxEntry->select_value_count;
4986        select_value_mask  = muxEntry->select_value_mask;
4987
4988        read_addr = muxEntry->read_addr;
4989
4990        for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count;
4991		loop_cnt++) {
4992
4993                uint32_t temp_sel_val;
4994
4995		ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0);
4996		if (ret)
4997			return (0);
4998
4999                temp_sel_val = select_value_1 & select_value_mask;
5000
5001		ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
5002		if (ret)
5003			return (0);
5004
5005		ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5006		if (ret)
5007			return (0);
5008
5009                *data_buff++ = temp_sel_val;
5010                *data_buff++ = read_value;
5011
5012		ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0);
5013		if (ret)
5014			return (0);
5015
5016                temp_sel_val = select_value_2 & select_value_mask;
5017
5018		ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0);
5019		if (ret)
5020			return (0);
5021
5022		ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5023		if (ret)
5024			return (0);
5025
5026                *data_buff++ = temp_sel_val;
5027                *data_buff++ = read_value;
5028
5029                select_value_1 += muxEntry->select_value_stride;
5030                select_value_2 += muxEntry->select_value_stride;
5031        }
5032
5033        return (loop_cnt * (4 * sizeof(uint32_t)));
5034}
5035
5036/*
5037 * Handling Queue State Reads.
5038 */
5039
5040static uint32_t
5041ql_rdqueue(qla_host_t *ha,
5042	ql_minidump_entry_queue_t *queueEntry,
5043	uint32_t *data_buff)
5044{
5045	int ret;
5046	int loop_cnt, k;
5047	uint32_t read_value;
5048	uint32_t read_addr, read_stride, select_addr;
5049	uint32_t queue_id, read_cnt;
5050
5051	read_cnt = queueEntry->read_addr_cnt;
5052	read_stride = queueEntry->read_addr_stride;
5053	select_addr = queueEntry->select_addr;
5054
5055	for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
5056		loop_cnt++) {
5057
5058		ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0);
5059		if (ret)
5060			return (0);
5061
5062		read_addr = queueEntry->read_addr;
5063
5064		for (k = 0; k < read_cnt; k++) {
5065
5066			ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1);
5067			if (ret)
5068				return (0);
5069
5070			*data_buff++ = read_value;
5071			read_addr += read_stride;
5072		}
5073
5074		queue_id += queueEntry->queue_id_stride;
5075	}
5076
5077	return (loop_cnt * (read_cnt * sizeof(uint32_t)));
5078}
5079
5080/*
5081 * Handling control entries.
5082 */
5083
5084static uint32_t
5085ql_cntrl(qla_host_t *ha,
5086	ql_minidump_template_hdr_t *template_hdr,
5087	ql_minidump_entry_cntrl_t *crbEntry)
5088{
5089	int ret;
5090	int count;
5091	uint32_t opcode, read_value, addr, entry_addr;
5092	long timeout;
5093
5094	entry_addr = crbEntry->addr;
5095
5096	for (count = 0; count < crbEntry->op_count; count++) {
5097		opcode = crbEntry->opcode;
5098
5099		if (opcode & QL_DBG_OPCODE_WR) {
5100
5101                	ret = ql_rdwr_indreg32(ha, entry_addr,
5102					&crbEntry->value_1, 0);
5103			if (ret)
5104				return (0);
5105
5106			opcode &= ~QL_DBG_OPCODE_WR;
5107		}
5108
5109		if (opcode & QL_DBG_OPCODE_RW) {
5110
5111                	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5112			if (ret)
5113				return (0);
5114
5115                	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5116			if (ret)
5117				return (0);
5118
5119			opcode &= ~QL_DBG_OPCODE_RW;
5120		}
5121
5122		if (opcode & QL_DBG_OPCODE_AND) {
5123
5124                	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5125			if (ret)
5126				return (0);
5127
5128			read_value &= crbEntry->value_2;
5129			opcode &= ~QL_DBG_OPCODE_AND;
5130
5131			if (opcode & QL_DBG_OPCODE_OR) {
5132				read_value |= crbEntry->value_3;
5133				opcode &= ~QL_DBG_OPCODE_OR;
5134			}
5135
5136                	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5137			if (ret)
5138				return (0);
5139		}
5140
5141		if (opcode & QL_DBG_OPCODE_OR) {
5142
5143                	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1);
5144			if (ret)
5145				return (0);
5146
5147			read_value |= crbEntry->value_3;
5148
5149                	ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0);
5150			if (ret)
5151				return (0);
5152
5153			opcode &= ~QL_DBG_OPCODE_OR;
5154		}
5155
5156		if (opcode & QL_DBG_OPCODE_POLL) {
5157
5158			opcode &= ~QL_DBG_OPCODE_POLL;
5159			timeout = crbEntry->poll_timeout;
5160			addr = entry_addr;
5161
5162                	ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5163			if (ret)
5164				return (0);
5165
5166			while ((read_value & crbEntry->value_2)
5167				!= crbEntry->value_1) {
5168
5169				if (timeout) {
5170					qla_mdelay(__func__, 1);
5171					timeout--;
5172				} else
5173					break;
5174
5175                		ret = ql_rdwr_indreg32(ha, addr,
5176						&read_value, 1);
5177				if (ret)
5178					return (0);
5179			}
5180
5181			if (!timeout) {
5182				/*
5183				 * Report timeout error.
5184				 * core dump capture failed
5185				 * Skip remaining entries.
5186				 * Write buffer out to file
5187				 * Use driver specific fields in template header
5188				 * to report this error.
5189				 */
5190				return (-1);
5191			}
5192		}
5193
5194		if (opcode & QL_DBG_OPCODE_RDSTATE) {
5195			/*
5196			 * decide which address to use.
5197			 */
5198			if (crbEntry->state_index_a) {
5199				addr = template_hdr->saved_state_array[
5200						crbEntry-> state_index_a];
5201			} else {
5202				addr = entry_addr;
5203			}
5204
5205                	ret = ql_rdwr_indreg32(ha, addr, &read_value, 1);
5206			if (ret)
5207				return (0);
5208
5209			template_hdr->saved_state_array[crbEntry->state_index_v]
5210					= read_value;
5211			opcode &= ~QL_DBG_OPCODE_RDSTATE;
5212		}
5213
5214		if (opcode & QL_DBG_OPCODE_WRSTATE) {
5215			/*
5216			 * decide which value to use.
5217			 */
5218			if (crbEntry->state_index_v) {
5219				read_value = template_hdr->saved_state_array[
5220						crbEntry->state_index_v];
5221			} else {
5222				read_value = crbEntry->value_1;
5223			}
5224			/*
5225			 * decide which address to use.
5226			 */
5227			if (crbEntry->state_index_a) {
5228				addr = template_hdr->saved_state_array[
5229						crbEntry-> state_index_a];
5230			} else {
5231				addr = entry_addr;
5232			}
5233
5234                	ret = ql_rdwr_indreg32(ha, addr, &read_value, 0);
5235			if (ret)
5236				return (0);
5237
5238			opcode &= ~QL_DBG_OPCODE_WRSTATE;
5239		}
5240
5241		if (opcode & QL_DBG_OPCODE_MDSTATE) {
5242			/*  Read value from saved state using index */
5243			read_value = template_hdr->saved_state_array[
5244						crbEntry->state_index_v];
5245
5246			read_value <<= crbEntry->shl; /*Shift left operation */
5247			read_value >>= crbEntry->shr; /*Shift right operation */
5248
5249			if (crbEntry->value_2) {
5250				/* check if AND mask is provided */
5251				read_value &= crbEntry->value_2;
5252			}
5253
5254			read_value |= crbEntry->value_3; /* OR operation */
5255			read_value += crbEntry->value_1; /* increment op */
5256
5257			/* Write value back to state area. */
5258
5259			template_hdr->saved_state_array[crbEntry->state_index_v]
5260					= read_value;
5261			opcode &= ~QL_DBG_OPCODE_MDSTATE;
5262		}
5263
5264		entry_addr += crbEntry->addr_stride;
5265	}
5266
5267	return (0);
5268}
5269
5270/*
5271 * Handling rd poll entry.
5272 */
5273
5274static uint32_t
5275ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry,
5276	uint32_t *data_buff)
5277{
5278        int ret;
5279        int loop_cnt;
5280        uint32_t op_count, select_addr, select_value_stride, select_value;
5281        uint32_t read_addr, poll, mask, data_size, data;
5282        uint32_t wait_count = 0;
5283
5284        select_addr            = entry->select_addr;
5285        read_addr              = entry->read_addr;
5286        select_value           = entry->select_value;
5287        select_value_stride    = entry->select_value_stride;
5288        op_count               = entry->op_count;
5289        poll                   = entry->poll;
5290        mask                   = entry->mask;
5291        data_size              = entry->data_size;
5292
5293        for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
5294
5295                ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0);
5296		if (ret)
5297			return (0);
5298
5299                wait_count = 0;
5300
5301                while (wait_count < poll) {
5302
5303                        uint32_t temp;
5304
5305			ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1);
5306			if (ret)
5307				return (0);
5308
5309                        if ( (temp & mask) != 0 ) {
5310                                break;
5311                        }
5312                        wait_count++;
5313                }
5314
5315                if (wait_count == poll) {
5316                        device_printf(ha->pci_dev,
5317				"%s: Error in processing entry\n", __func__);
5318                        device_printf(ha->pci_dev,
5319				"%s: wait_count <0x%x> poll <0x%x>\n",
5320				__func__, wait_count, poll);
5321                        return 0;
5322                }
5323
5324		ret = ql_rdwr_indreg32(ha, read_addr, &data, 1);
5325		if (ret)
5326			return (0);
5327
5328                *data_buff++ = select_value;
5329                *data_buff++ = data;
5330                select_value = select_value + select_value_stride;
5331        }
5332
5333        /*
5334         * for testing purpose we return amount of data written
5335         */
5336        return (loop_cnt * (2 * sizeof(uint32_t)));
5337}
5338
5339
5340/*
5341 * Handling rd modify write poll entry.
5342 */
5343
5344static uint32_t
5345ql_pollrd_modify_write(qla_host_t *ha,
5346	ql_minidump_entry_rd_modify_wr_with_poll_t *entry,
5347	uint32_t *data_buff)
5348{
5349	int ret;
5350        uint32_t addr_1, addr_2, value_1, value_2, data;
5351        uint32_t poll, mask, data_size, modify_mask;
5352        uint32_t wait_count = 0;
5353
5354        addr_1		= entry->addr_1;
5355        addr_2		= entry->addr_2;
5356        value_1		= entry->value_1;
5357        value_2		= entry->value_2;
5358
5359        poll		= entry->poll;
5360        mask		= entry->mask;
5361        modify_mask	= entry->modify_mask;
5362        data_size	= entry->data_size;
5363
5364
5365	ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0);
5366	if (ret)
5367		return (0);
5368
5369        wait_count = 0;
5370        while (wait_count < poll) {
5371
5372		uint32_t temp;
5373
5374		ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5375		if (ret)
5376			return (0);
5377
5378                if ( (temp & mask) != 0 ) {
5379                        break;
5380                }
5381                wait_count++;
5382        }
5383
5384        if (wait_count == poll) {
5385                device_printf(ha->pci_dev, "%s Error in processing entry\n",
5386			__func__);
5387        } else {
5388
5389		ret = ql_rdwr_indreg32(ha, addr_2, &data, 1);
5390		if (ret)
5391			return (0);
5392
5393                data = (data & modify_mask);
5394
5395		ret = ql_rdwr_indreg32(ha, addr_2, &data, 0);
5396		if (ret)
5397			return (0);
5398
5399		ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0);
5400		if (ret)
5401			return (0);
5402
5403                /* Poll again */
5404                wait_count = 0;
5405                while (wait_count < poll) {
5406
5407                        uint32_t temp;
5408
5409			ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1);
5410			if (ret)
5411				return (0);
5412
5413                        if ( (temp & mask) != 0 ) {
5414                                break;
5415                        }
5416                        wait_count++;
5417                }
5418                *data_buff++ = addr_2;
5419                *data_buff++ = data;
5420        }
5421
5422        /*
5423         * for testing purpose we return amount of data written
5424         */
5425        return (2 * sizeof(uint32_t));
5426}
5427
5428
5429