qlnx_ioctl.c revision 337519
1/*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 */
27
28
29/*
30 * File: qlnx_ioctl.c
31 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: stable/10/sys/dev/qlnx/qlnxe/qlnx_ioctl.c 337519 2018-08-09 01:39:47Z davidcs $");
36
37#include "qlnx_os.h"
38#include "bcm_osal.h"
39
40#include "reg_addr.h"
41#include "ecore_gtt_reg_addr.h"
42#include "ecore.h"
43#include "ecore_chain.h"
44#include "ecore_status.h"
45#include "ecore_hw.h"
46#include "ecore_rt_defs.h"
47#include "ecore_init_ops.h"
48#include "ecore_int.h"
49#include "ecore_cxt.h"
50#include "ecore_spq.h"
51#include "ecore_init_fw_funcs.h"
52#include "ecore_sp_commands.h"
53#include "ecore_dev_api.h"
54#include "ecore_l2_api.h"
55#include "ecore_mcp.h"
56#include "ecore_hw_defs.h"
57#include "mcp_public.h"
58#include "ecore_iro.h"
59#include "nvm_cfg.h"
60#include "ecore_dev_api.h"
61#include "ecore_dbg_fw_funcs.h"
62#include "ecore_dcbx_api.h"
63
64#include "qlnx_ioctl.h"
65#include "qlnx_def.h"
66#include "qlnx_ver.h"
67#include <sys/smp.h>
68
69
70static int qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
71                struct thread *td);
72
73static struct cdevsw qlnx_cdevsw = {
74        .d_version = D_VERSION,
75        .d_ioctl = qlnx_eioctl,
76        .d_name = "qlnxioctl",
77};
78
79int
80qlnx_make_cdev(qlnx_host_t *ha)
81{
82	ha->ioctl_dev = make_dev(&qlnx_cdevsw,
83				ha->ifp->if_dunit,
84				UID_ROOT,
85				GID_WHEEL,
86				0600,
87				"%s",
88				if_name(ha->ifp));
89
90	if (ha->ioctl_dev == NULL)
91		return (-1);
92
93	ha->ioctl_dev->si_drv1 = ha;
94
95	return (0);
96}
97
98void
99qlnx_del_cdev(qlnx_host_t *ha)
100{
101	if (ha->ioctl_dev != NULL)
102		destroy_dev(ha->ioctl_dev);
103	return;
104}
105
106int
107qlnx_grc_dump(qlnx_host_t *ha, uint32_t *num_dumped_dwords, int hwfn_index)
108{
109	int rval = EINVAL;
110	struct ecore_hwfn *p_hwfn;
111	struct ecore_ptt *p_ptt;
112
113	if (ha->grcdump_dwords[hwfn_index]) {
114		/* the grcdump is already available */
115		*num_dumped_dwords = ha->grcdump_dwords[hwfn_index];
116		return (0);
117	}
118
119	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
120
121	p_hwfn = &ha->cdev.hwfns[hwfn_index];
122	p_ptt = ecore_ptt_acquire(p_hwfn);
123
124	if (!p_ptt) {
125		QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
126		return (rval);
127	}
128
129	if ((rval = ecore_dbg_grc_dump(p_hwfn, p_ptt,
130			ha->grcdump[hwfn_index],
131			(ha->grcdump_size[hwfn_index] >> 2),
132			num_dumped_dwords)) == DBG_STATUS_OK) {
133	 	rval = 0;
134		ha->grcdump_taken = 1;
135	} else
136		QL_DPRINT1(ha,"ecore_dbg_grc_dump failed [%d, 0x%x]\n",
137			   hwfn_index, rval);
138
139	ecore_ptt_release(p_hwfn, p_ptt);
140
141	return (rval);
142}
143
144static void
145qlnx_get_grc_dump_size(qlnx_host_t *ha, qlnx_grcdump_t *grcdump)
146{
147	int i;
148
149	grcdump->pci_func = ha->pci_func;
150
151	for (i = 0; i < ha->cdev.num_hwfns; i++)
152		grcdump->grcdump_size[i] = ha->grcdump_size[i];
153
154	return;
155}
156
157static int
158qlnx_get_grc_dump(qlnx_host_t *ha, qlnx_grcdump_t *grcdump)
159{
160	int		i;
161	int		rval = 0;
162	uint32_t	dwords = 0;
163
164	grcdump->pci_func = ha->pci_func;
165
166	for (i = 0; i < ha->cdev.num_hwfns; i++) {
167
168		if ((ha->grcdump[i] == NULL) || (grcdump->grcdump[i] == NULL) ||
169			(grcdump->grcdump_size[i] < ha->grcdump_size[i]))
170			return (EINVAL);
171
172		rval = qlnx_grc_dump(ha, &dwords, i);
173
174		if (rval)
175			break;
176
177		grcdump->grcdump_dwords[i] = dwords;
178
179		QL_DPRINT1(ha,"grcdump_dwords[%d] = 0x%x\n", i, dwords);
180
181		rval = copyout(ha->grcdump[i], grcdump->grcdump[i],
182				ha->grcdump_size[i]);
183
184		if (rval)
185			break;
186
187		ha->grcdump_dwords[i] = 0;
188	}
189
190	ha->grcdump_taken = 0;
191
192	return (rval);
193}
194
195int
196qlnx_idle_chk(qlnx_host_t *ha, uint32_t *num_dumped_dwords, int hwfn_index)
197{
198	int rval = EINVAL;
199	struct ecore_hwfn *p_hwfn;
200	struct ecore_ptt *p_ptt;
201
202	if (ha->idle_chk_dwords[hwfn_index]) {
203		/* the idle check is already available */
204		*num_dumped_dwords = ha->idle_chk_dwords[hwfn_index];
205		return (0);
206	}
207
208	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
209
210	p_hwfn = &ha->cdev.hwfns[hwfn_index];
211	p_ptt = ecore_ptt_acquire(p_hwfn);
212
213	if (!p_ptt) {
214		QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
215		return (rval);
216	}
217
218	if ((rval = ecore_dbg_idle_chk_dump(p_hwfn, p_ptt,
219			ha->idle_chk[hwfn_index],
220			(ha->idle_chk_size[hwfn_index] >> 2),
221			num_dumped_dwords)) == DBG_STATUS_OK) {
222	 	rval = 0;
223		ha->idle_chk_taken = 1;
224	} else
225		QL_DPRINT1(ha,"ecore_dbg_idle_chk_dump failed [%d, 0x%x]\n",
226			   hwfn_index, rval);
227
228	ecore_ptt_release(p_hwfn, p_ptt);
229
230	return (rval);
231}
232
233static void
234qlnx_get_idle_chk_size(qlnx_host_t *ha, qlnx_idle_chk_t *idle_chk)
235{
236	int i;
237
238	idle_chk->pci_func = ha->pci_func;
239
240	for (i = 0; i < ha->cdev.num_hwfns; i++)
241		idle_chk->idle_chk_size[i] = ha->idle_chk_size[i];
242
243	return;
244}
245
246static int
247qlnx_get_idle_chk(qlnx_host_t *ha, qlnx_idle_chk_t *idle_chk)
248{
249	int		i;
250	int		rval = 0;
251	uint32_t	dwords = 0;
252
253	idle_chk->pci_func = ha->pci_func;
254
255	for (i = 0; i < ha->cdev.num_hwfns; i++) {
256
257		if ((ha->idle_chk[i] == NULL) ||
258				(idle_chk->idle_chk[i] == NULL) ||
259				(idle_chk->idle_chk_size[i] <
260					ha->idle_chk_size[i]))
261			return (EINVAL);
262
263		rval = qlnx_idle_chk(ha, &dwords, i);
264
265		if (rval)
266			break;
267
268		idle_chk->idle_chk_dwords[i] = dwords;
269
270		QL_DPRINT1(ha,"idle_chk_dwords[%d] = 0x%x\n", i, dwords);
271
272               	rval = copyout(ha->idle_chk[i], idle_chk->idle_chk[i],
273				ha->idle_chk_size[i]);
274
275		if (rval)
276			break;
277
278		ha->idle_chk_dwords[i] = 0;
279	}
280	ha->idle_chk_taken = 0;
281
282	return (rval);
283}
284
285static uint32_t
286qlnx_get_trace_cmd_size(qlnx_host_t *ha, int hwfn_index, uint16_t cmd)
287{
288        int rval = -1;
289        struct ecore_hwfn *p_hwfn;
290        struct ecore_ptt *p_ptt;
291	uint32_t num_dwords = 0;
292
293        p_hwfn = &ha->cdev.hwfns[hwfn_index];
294        p_ptt = ecore_ptt_acquire(p_hwfn);
295
296        if (!p_ptt) {
297                QL_DPRINT1(ha, "ecore_ptt_acquire [%d, 0x%x]failed\n",
298                           hwfn_index, cmd);
299                return (0);
300        }
301
302	switch (cmd) {
303
304	case QLNX_MCP_TRACE:
305        	rval = ecore_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
306				p_ptt, &num_dwords);
307		break;
308
309	case QLNX_REG_FIFO:
310        	rval = ecore_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
311				p_ptt, &num_dwords);
312		break;
313
314	case QLNX_IGU_FIFO:
315        	rval = ecore_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
316				p_ptt, &num_dwords);
317		break;
318
319	case QLNX_PROTECTION_OVERRIDE:
320        	rval = ecore_dbg_protection_override_get_dump_buf_size(p_hwfn,
321				p_ptt, &num_dwords);
322		break;
323
324	case QLNX_FW_ASSERTS:
325        	rval = ecore_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
326				p_ptt, &num_dwords);
327		break;
328	}
329
330        if (rval != DBG_STATUS_OK) {
331                QL_DPRINT1(ha,"cmd = 0x%x failed [0x%x]\n", cmd, rval);
332		num_dwords = 0;
333        }
334
335        ecore_ptt_release(p_hwfn, p_ptt);
336
337        return ((num_dwords * sizeof (uint32_t)));
338}
339
340static void
341qlnx_get_trace_size(qlnx_host_t *ha, qlnx_trace_t *trace)
342{
343	int i;
344
345	trace->pci_func = ha->pci_func;
346
347	for (i = 0; i < ha->cdev.num_hwfns; i++) {
348		trace->size[i] = qlnx_get_trace_cmd_size(ha, i, trace->cmd);
349	}
350
351	return;
352}
353
354static int
355qlnx_get_trace(qlnx_host_t *ha, int hwfn_index, qlnx_trace_t *trace)
356{
357        int rval = -1;
358        struct ecore_hwfn *p_hwfn;
359        struct ecore_ptt *p_ptt;
360	uint32_t num_dwords = 0;
361	void *buffer;
362
363	buffer = qlnx_zalloc(trace->size[hwfn_index]);
364	if (buffer == NULL) {
365                QL_DPRINT1(ha,"qlnx_zalloc [%d, 0x%x]failed\n",
366                           hwfn_index, trace->cmd);
367                return (ENXIO);
368	}
369	ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
370
371        p_hwfn = &ha->cdev.hwfns[hwfn_index];
372        p_ptt = ecore_ptt_acquire(p_hwfn);
373
374        if (!p_ptt) {
375                QL_DPRINT1(ha, "ecore_ptt_acquire [%d, 0x%x]failed\n",
376                           hwfn_index, trace->cmd);
377                return (ENXIO);
378        }
379
380	switch (trace->cmd) {
381
382	case QLNX_MCP_TRACE:
383        	rval = ecore_dbg_mcp_trace_dump(p_hwfn, p_ptt,
384				buffer, (trace->size[hwfn_index] >> 2),
385				&num_dwords);
386		break;
387
388	case QLNX_REG_FIFO:
389        	rval = ecore_dbg_reg_fifo_dump(p_hwfn, p_ptt,
390				buffer, (trace->size[hwfn_index] >> 2),
391				&num_dwords);
392		break;
393
394	case QLNX_IGU_FIFO:
395        	rval = ecore_dbg_igu_fifo_dump(p_hwfn, p_ptt,
396				buffer, (trace->size[hwfn_index] >> 2),
397				&num_dwords);
398		break;
399
400	case QLNX_PROTECTION_OVERRIDE:
401        	rval = ecore_dbg_protection_override_dump(p_hwfn, p_ptt,
402				buffer, (trace->size[hwfn_index] >> 2),
403				&num_dwords);
404		break;
405
406	case QLNX_FW_ASSERTS:
407        	rval = ecore_dbg_fw_asserts_dump(p_hwfn, p_ptt,
408				buffer, (trace->size[hwfn_index] >> 2),
409				&num_dwords);
410		break;
411	}
412
413        if (rval != DBG_STATUS_OK) {
414                QL_DPRINT1(ha,"cmd = 0x%x failed [0x%x]\n", trace->cmd, rval);
415		num_dwords = 0;
416        }
417
418        ecore_ptt_release(p_hwfn, p_ptt);
419
420	trace->dwords[hwfn_index] = num_dwords;
421
422	if (num_dwords) {
423               	rval = copyout(buffer, trace->buffer[hwfn_index],
424				(num_dwords << 2));
425	}
426
427        return (rval);
428}
429
430static int
431qlnx_reg_rd_wr(qlnx_host_t *ha, qlnx_reg_rd_wr_t *reg_rd_wr)
432{
433	int			rval = 0;
434	struct ecore_hwfn	*p_hwfn;
435
436	if (reg_rd_wr->hwfn_index >= QLNX_MAX_HW_FUNCS) {
437		return (EINVAL);
438	}
439
440	p_hwfn = &ha->cdev.hwfns[reg_rd_wr->hwfn_index];
441
442	switch (reg_rd_wr->cmd) {
443
444		case QLNX_REG_READ_CMD:
445			if (reg_rd_wr->access_type == QLNX_REG_ACCESS_DIRECT) {
446				reg_rd_wr->val = qlnx_reg_rd32(p_hwfn,
447							reg_rd_wr->addr);
448			}
449			break;
450
451		case QLNX_REG_WRITE_CMD:
452			if (reg_rd_wr->access_type == QLNX_REG_ACCESS_DIRECT) {
453				qlnx_reg_wr32(p_hwfn, reg_rd_wr->addr,
454					reg_rd_wr->val);
455			}
456			break;
457
458		default:
459			rval = EINVAL;
460			break;
461	}
462
463	return (rval);
464}
465
466static int
467qlnx_rd_wr_pci_config(qlnx_host_t *ha, qlnx_pcicfg_rd_wr_t *pci_cfg_rd_wr)
468{
469	int rval = 0;
470
471	switch (pci_cfg_rd_wr->cmd) {
472
473		case QLNX_PCICFG_READ:
474			pci_cfg_rd_wr->val = pci_read_config(ha->pci_dev,
475						pci_cfg_rd_wr->reg,
476						pci_cfg_rd_wr->width);
477			break;
478
479		case QLNX_PCICFG_WRITE:
480			pci_write_config(ha->pci_dev, pci_cfg_rd_wr->reg,
481				pci_cfg_rd_wr->val, pci_cfg_rd_wr->width);
482			break;
483
484		default:
485			rval = EINVAL;
486			break;
487	}
488
489	return (rval);
490}
491
492static void
493qlnx_mac_addr(qlnx_host_t *ha, qlnx_perm_mac_addr_t *mac_addr)
494{
495	bzero(mac_addr->addr, sizeof(mac_addr->addr));
496	snprintf(mac_addr->addr, sizeof(mac_addr->addr),
497		"%02x:%02x:%02x:%02x:%02x:%02x",
498		ha->primary_mac[0], ha->primary_mac[1], ha->primary_mac[2],
499		ha->primary_mac[3], ha->primary_mac[4], ha->primary_mac[5]);
500
501	return;
502}
503
504static int
505qlnx_get_regs(qlnx_host_t *ha, qlnx_get_regs_t *regs)
506{
507	int		i;
508	int		rval = 0;
509	uint32_t	dwords = 0;
510	uint8_t		*outb;
511
512	regs->reg_buf_len = 0;
513	outb = regs->reg_buf;
514
515	for (i = 0; i < ha->cdev.num_hwfns; i++) {
516
517		rval = qlnx_grc_dump(ha, &dwords, i);
518
519		if (rval)
520			break;
521
522		regs->reg_buf_len += (dwords << 2);
523
524		rval = copyout(ha->grcdump[i], outb, ha->grcdump_size[i]);
525
526		if (rval)
527			break;
528
529		ha->grcdump_dwords[i] = 0;
530		outb += regs->reg_buf_len;
531	}
532
533	ha->grcdump_taken = 0;
534
535	return (rval);
536}
537
538static int
539qlnx_drv_info(qlnx_host_t *ha, qlnx_drvinfo_t *drv_info)
540{
541	int i;
542	extern char qlnx_name_str[];
543	extern char qlnx_ver_str[];
544
545	bzero(drv_info, sizeof(qlnx_drvinfo_t));
546
547	snprintf(drv_info->drv_name, sizeof(drv_info->drv_name), "%s",
548		qlnx_name_str);
549	snprintf(drv_info->drv_version, sizeof(drv_info->drv_version), "%s",
550		qlnx_ver_str);
551	snprintf(drv_info->mfw_version, sizeof(drv_info->mfw_version), "%s",
552		ha->mfw_ver);
553	snprintf(drv_info->stormfw_version, sizeof(drv_info->stormfw_version),
554		"%s", ha->stormfw_ver);
555
556	drv_info->eeprom_dump_len = ha->flash_size;
557
558	for (i = 0; i < ha->cdev.num_hwfns; i++) {
559		drv_info->reg_dump_len += ha->grcdump_size[i];
560	}
561
562	snprintf(drv_info->bus_info, sizeof(drv_info->bus_info),
563		"%d:%d:%d", pci_get_bus(ha->pci_dev),
564		pci_get_slot(ha->pci_dev), ha->pci_func);
565
566	return (0);
567}
568
569static int
570qlnx_dev_settings(qlnx_host_t *ha, qlnx_dev_setting_t *dev_info)
571{
572	struct ecore_hwfn *p_hwfn;
573	struct qlnx_link_output if_link;
574
575	p_hwfn = &ha->cdev.hwfns[0];
576
577	qlnx_fill_link(ha, p_hwfn, &if_link);
578
579	dev_info->supported = if_link.supported_caps;
580	dev_info->advertising = if_link.advertised_caps;
581	dev_info->speed = if_link.speed;
582	dev_info->duplex = if_link.duplex;
583	dev_info->port = ha->pci_func & 0x1;
584	dev_info->autoneg = if_link.autoneg;
585
586	return (0);
587}
588
589static int
590qlnx_write_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram, uint32_t cmd)
591{
592	uint8_t *buf;
593	int ret = 0;
594
595	if ((nvram->data == NULL) || (nvram->data_len == 0))
596		return (EINVAL);
597
598	buf = qlnx_zalloc(nvram->data_len);
599
600	ret = copyin(nvram->data, buf, nvram->data_len);
601
602	QL_DPRINT9(ha, "issue cmd = 0x%x data = %p \
603		 data_len = 0x%x ret = 0x%x exit\n",
604		cmd, nvram->data, nvram->data_len, ret);
605
606	if (ret == 0) {
607		ret = ecore_mcp_nvm_write(&ha->cdev, cmd,
608			nvram->offset, buf, nvram->data_len);
609	}
610
611	QL_DPRINT9(ha, "cmd = 0x%x data = %p \
612		 data_len = 0x%x resp = 0x%x ret = 0x%x exit\n",
613		cmd, nvram->data, nvram->data_len, ha->cdev.mcp_nvm_resp, ret);
614
615	free(buf, M_QLNXBUF);
616
617	return (ret);
618}
619
620static int
621qlnx_read_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram)
622{
623	uint8_t *buf;
624	int ret = 0;
625
626	if ((nvram->data == NULL) || (nvram->data_len == 0))
627		return (EINVAL);
628
629	buf = qlnx_zalloc(nvram->data_len);
630
631	ret = ecore_mcp_nvm_read(&ha->cdev, nvram->offset, buf,
632		nvram->data_len);
633
634	QL_DPRINT9(ha, " data = %p data_len = 0x%x \
635		 resp = 0x%x ret = 0x%x exit\n",
636		nvram->data, nvram->data_len, ha->cdev.mcp_nvm_resp, ret);
637
638	if (ret == 0) {
639		ret = copyout(buf, nvram->data, nvram->data_len);
640	}
641
642	free(buf, M_QLNXBUF);
643
644	return (ret);
645}
646
647static int
648qlnx_get_nvram_resp(qlnx_host_t *ha, qlnx_nvram_t *nvram)
649{
650	uint8_t *buf;
651	int ret = 0;
652
653	if ((nvram->data == NULL) || (nvram->data_len == 0))
654		return (EINVAL);
655
656	buf = qlnx_zalloc(nvram->data_len);
657
658
659	ret = ecore_mcp_nvm_resp(&ha->cdev, buf);
660
661	QL_DPRINT9(ha, "data = %p data_len = 0x%x \
662		 resp = 0x%x ret = 0x%x exit\n",
663		nvram->data, nvram->data_len, ha->cdev.mcp_nvm_resp, ret);
664
665	if (ret == 0) {
666		ret = copyout(buf, nvram->data, nvram->data_len);
667	}
668
669	free(buf, M_QLNXBUF);
670
671	return (ret);
672}
673
674static int
675qlnx_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram)
676{
677	int ret = 0;
678
679	switch (nvram->cmd) {
680
681	case QLNX_NVRAM_CMD_WRITE_NVRAM:
682		ret = qlnx_write_nvram(ha, nvram, ECORE_NVM_WRITE_NVRAM);
683		break;
684
685	case QLNX_NVRAM_CMD_PUT_FILE_DATA:
686		ret = qlnx_write_nvram(ha, nvram, ECORE_PUT_FILE_DATA);
687		break;
688
689	case QLNX_NVRAM_CMD_READ_NVRAM:
690		ret = qlnx_read_nvram(ha, nvram);
691		break;
692
693	case QLNX_NVRAM_CMD_SET_SECURE_MODE:
694		ret = ecore_mcp_nvm_set_secure_mode(&ha->cdev, nvram->offset);
695
696		QL_DPRINT9(ha, "QLNX_NVRAM_CMD_SET_SECURE_MODE \
697			 resp = 0x%x ret = 0x%x exit\n",
698			 ha->cdev.mcp_nvm_resp, ret);
699		break;
700
701	case QLNX_NVRAM_CMD_DEL_FILE:
702		ret = ecore_mcp_nvm_del_file(&ha->cdev, nvram->offset);
703
704		QL_DPRINT9(ha, "QLNX_NVRAM_CMD_DEL_FILE \
705			 resp = 0x%x ret = 0x%x exit\n",
706			ha->cdev.mcp_nvm_resp, ret);
707		break;
708
709	case QLNX_NVRAM_CMD_PUT_FILE_BEGIN:
710		ret = ecore_mcp_nvm_put_file_begin(&ha->cdev, nvram->offset);
711
712		QL_DPRINT9(ha, "QLNX_NVRAM_CMD_PUT_FILE_BEGIN \
713			 resp = 0x%x ret = 0x%x exit\n",
714			ha->cdev.mcp_nvm_resp, ret);
715		break;
716
717	case QLNX_NVRAM_CMD_GET_NVRAM_RESP:
718		ret = qlnx_get_nvram_resp(ha, nvram);
719		break;
720
721	default:
722		ret = EINVAL;
723		break;
724	}
725
726	return (ret);
727}
728
729static void
730qlnx_storm_stats(qlnx_host_t *ha, qlnx_storm_stats_dump_t *s_stats)
731{
732	int i;
733	int index;
734	int ret;
735	int stats_copied = 0;
736
737	s_stats->num_hwfns = ha->cdev.num_hwfns;
738
739//	if (ha->storm_stats_index < QLNX_STORM_STATS_SAMPLES_PER_HWFN)
740//		return;
741
742	s_stats->num_samples = ha->storm_stats_index;
743
744	for (i = 0; i < ha->cdev.num_hwfns; i++) {
745
746		index = (QLNX_STORM_STATS_SAMPLES_PER_HWFN * i);
747
748		if (s_stats->buffer[i]) {
749
750			ret = copyout(&ha->storm_stats[index],
751					s_stats->buffer[i],
752					QLNX_STORM_STATS_BYTES_PER_HWFN);
753			if (ret) {
754				printf("%s [%d]: failed\n", __func__, i);
755			}
756
757			if (s_stats->num_samples ==
758				QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
759
760				bzero((void *)&ha->storm_stats[i],
761					QLNX_STORM_STATS_BYTES_PER_HWFN);
762
763				stats_copied = 1;
764			}
765		}
766	}
767
768	if (stats_copied)
769		ha->storm_stats_index = 0;
770
771	return;
772}
773
774#ifdef QLNX_USER_LLDP
775
776static int
777qlnx_lldp_configure(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn,
778	struct ecore_ptt *p_ptt, uint32_t enable)
779{
780	int ret = 0;
781	uint8_t lldp_mac[6] = {0};
782	struct ecore_lldp_config_params lldp_params;
783	struct ecore_lldp_sys_tlvs tlv_params;
784
785	ret = ecore_mcp_get_lldp_mac(p_hwfn, p_ptt, lldp_mac);
786
787	if (ret != ECORE_SUCCESS) {
788                device_printf(ha->pci_dev,
789			"%s: ecore_mcp_get_lldp_mac failed\n", __func__);
790                return (-1);
791	}
792
793	bzero(&lldp_params, sizeof(struct ecore_lldp_config_params));
794	bzero(&tlv_params, sizeof(struct ecore_lldp_sys_tlvs));
795
796	lldp_params.agent = ECORE_LLDP_NEAREST_BRIDGE;
797	lldp_params.tx_interval = 30; //Default value used as suggested by MFW
798	lldp_params.tx_hold = 4; //Default value used as suggested by MFW
799	lldp_params.tx_credit = 5; //Default value used as suggested by MFW
800	lldp_params.rx_enable = enable ? 1 : 0;
801	lldp_params.tx_enable = enable ? 1 : 0;
802
803	lldp_params.chassis_id_tlv[0] = 0;
804	lldp_params.chassis_id_tlv[0] |= (QLNX_LLDP_TYPE_CHASSIS_ID << 1);
805	lldp_params.chassis_id_tlv[0] |=
806		((QLNX_LLDP_CHASSIS_ID_SUBTYPE_OCTETS +
807			QLNX_LLDP_CHASSIS_ID_MAC_ADDR_LEN) << 8);
808	lldp_params.chassis_id_tlv[0] |= (QLNX_LLDP_CHASSIS_ID_SUBTYPE_MAC << 16);
809	lldp_params.chassis_id_tlv[0] |= lldp_mac[0] << 24;
810	lldp_params.chassis_id_tlv[1] = lldp_mac[1] | (lldp_mac[2] << 8) |
811		 (lldp_mac[3] << 16) | (lldp_mac[4] << 24);
812	lldp_params.chassis_id_tlv[2] = lldp_mac[5];
813
814
815	lldp_params.port_id_tlv[0] = 0;
816	lldp_params.port_id_tlv[0] |= (QLNX_LLDP_TYPE_PORT_ID << 1);
817	lldp_params.port_id_tlv[0] |=
818		((QLNX_LLDP_PORT_ID_SUBTYPE_OCTETS +
819			QLNX_LLDP_PORT_ID_MAC_ADDR_LEN) << 8);
820	lldp_params.port_id_tlv[0] |= (QLNX_LLDP_PORT_ID_SUBTYPE_MAC << 16);
821	lldp_params.port_id_tlv[0] |= lldp_mac[0] << 24;
822	lldp_params.port_id_tlv[1] = lldp_mac[1] | (lldp_mac[2] << 8) |
823		 (lldp_mac[3] << 16) | (lldp_mac[4] << 24);
824	lldp_params.port_id_tlv[2] = lldp_mac[5];
825
826	ret = ecore_lldp_set_params(p_hwfn, p_ptt, &lldp_params);
827
828	if (ret != ECORE_SUCCESS) {
829                device_printf(ha->pci_dev,
830			"%s: ecore_lldp_set_params failed\n", __func__);
831                return (-1);
832	}
833
834	//If LLDP is disable then disable discard_mandatory_tlv flag
835	if (!enable) {
836		tlv_params.discard_mandatory_tlv = false;
837		tlv_params.buf_size = 0;
838		ret = ecore_lldp_set_system_tlvs(p_hwfn, p_ptt, &tlv_params);
839    	}
840
841	if (ret != ECORE_SUCCESS) {
842                device_printf(ha->pci_dev,
843			"%s: ecore_lldp_set_system_tlvs failed\n", __func__);
844	}
845
846	return (ret);
847}
848
849static int
850qlnx_register_default_lldp_tlvs(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn,
851	struct ecore_ptt *p_ptt)
852{
853	int ret = 0;
854
855	ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
856			ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_CHASSIS_ID);
857	if (ret != ECORE_SUCCESS) {
858                device_printf(ha->pci_dev,
859			"%s: QLNX_LLDP_TYPE_CHASSIS_ID failed\n", __func__);
860		goto qlnx_register_default_lldp_tlvs_exit;
861	}
862
863	//register Port ID TLV
864	ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
865			ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_PORT_ID);
866	if (ret != ECORE_SUCCESS) {
867                device_printf(ha->pci_dev,
868			"%s: QLNX_LLDP_TYPE_PORT_ID failed\n", __func__);
869		goto qlnx_register_default_lldp_tlvs_exit;
870	}
871
872	//register TTL TLV
873	ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
874			ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_TTL);
875	if (ret != ECORE_SUCCESS) {
876                device_printf(ha->pci_dev,
877			"%s: QLNX_LLDP_TYPE_TTL failed\n", __func__);
878		goto qlnx_register_default_lldp_tlvs_exit;
879	}
880
881	//register Port Description TLV
882	ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
883			ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_PORT_DESC);
884	if (ret != ECORE_SUCCESS) {
885                device_printf(ha->pci_dev,
886			"%s: QLNX_LLDP_TYPE_PORT_DESC failed\n", __func__);
887		goto qlnx_register_default_lldp_tlvs_exit;
888	}
889
890	//register System Name TLV
891	ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
892			ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_SYS_NAME);
893	if (ret != ECORE_SUCCESS) {
894                device_printf(ha->pci_dev,
895			"%s: QLNX_LLDP_TYPE_SYS_NAME failed\n", __func__);
896		goto qlnx_register_default_lldp_tlvs_exit;
897	}
898
899	//register System Description TLV
900	ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
901			ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_SYS_DESC);
902	if (ret != ECORE_SUCCESS) {
903                device_printf(ha->pci_dev,
904			"%s: QLNX_LLDP_TYPE_SYS_DESC failed\n", __func__);
905		goto qlnx_register_default_lldp_tlvs_exit;
906	}
907
908	//register System Capabilities TLV
909	ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
910			ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_SYS_CAPS);
911	if (ret != ECORE_SUCCESS) {
912                device_printf(ha->pci_dev,
913			"%s: QLNX_LLDP_TYPE_SYS_CAPS failed\n", __func__);
914		goto qlnx_register_default_lldp_tlvs_exit;
915	}
916
917	//register Management Address TLV
918	ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
919			ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_MGMT_ADDR);
920	if (ret != ECORE_SUCCESS) {
921                device_printf(ha->pci_dev,
922			"%s: QLNX_LLDP_TYPE_MGMT_ADDR failed\n", __func__);
923		goto qlnx_register_default_lldp_tlvs_exit;
924	}
925
926	//register Organizationally Specific TLVs
927	ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
928			ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_ORG_SPECIFIC);
929	if (ret != ECORE_SUCCESS) {
930                device_printf(ha->pci_dev,
931			"%s: QLNX_LLDP_TYPE_ORG_SPECIFIC failed\n", __func__);
932	}
933
934qlnx_register_default_lldp_tlvs_exit:
935	return (ret);
936}
937
938int
939qlnx_set_lldp_tlvx(qlnx_host_t *ha, qlnx_lldp_sys_tlvs_t *lldp_tlvs)
940{
941	int ret = 0;
942	struct ecore_hwfn *p_hwfn;
943	struct ecore_ptt *p_ptt;
944	struct ecore_lldp_sys_tlvs tlv_params;
945
946	p_hwfn = &ha->cdev.hwfns[0];
947	p_ptt = ecore_ptt_acquire(p_hwfn);
948
949        if (!p_ptt) {
950                device_printf(ha->pci_dev,
951			"%s: ecore_ptt_acquire failed\n", __func__);
952                return (ENXIO);
953        }
954
955	ret = qlnx_lldp_configure(ha, p_hwfn, p_ptt, 0);
956
957	if (ret) {
958                device_printf(ha->pci_dev,
959			"%s: qlnx_lldp_configure disable failed\n", __func__);
960		goto qlnx_set_lldp_tlvx_exit;
961	}
962
963	ret = qlnx_register_default_lldp_tlvs(ha, p_hwfn, p_ptt);
964
965	if (ret) {
966                device_printf(ha->pci_dev,
967			"%s: qlnx_register_default_lldp_tlvs failed\n",
968			__func__);
969		goto qlnx_set_lldp_tlvx_exit;
970	}
971
972	ret = qlnx_lldp_configure(ha, p_hwfn, p_ptt, 1);
973
974	if (ret) {
975                device_printf(ha->pci_dev,
976			"%s: qlnx_lldp_configure enable failed\n", __func__);
977		goto qlnx_set_lldp_tlvx_exit;
978	}
979
980	if (lldp_tlvs != NULL) {
981		bzero(&tlv_params, sizeof(struct ecore_lldp_sys_tlvs));
982
983		tlv_params.discard_mandatory_tlv =
984			(lldp_tlvs->discard_mandatory_tlv ? true: false);
985		tlv_params.buf_size = lldp_tlvs->buf_size;
986		memcpy(tlv_params.buf, lldp_tlvs->buf, lldp_tlvs->buf_size);
987
988		ret = ecore_lldp_set_system_tlvs(p_hwfn, p_ptt, &tlv_params);
989
990		if (ret) {
991			device_printf(ha->pci_dev,
992				"%s: ecore_lldp_set_system_tlvs failed\n",
993				__func__);
994		}
995	}
996qlnx_set_lldp_tlvx_exit:
997
998	ecore_ptt_release(p_hwfn, p_ptt);
999	return (ret);
1000}
1001
1002#endif /* #ifdef QLNX_USER_LLDP */
1003
1004static int
1005qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
1006	struct thread *td)
1007{
1008	qlnx_host_t	*ha;
1009	int		rval = 0;
1010	struct ifnet	*ifp;
1011	qlnx_trace_t	*trace;
1012	int		i;
1013
1014	if ((ha = (qlnx_host_t *)dev->si_drv1) == NULL)
1015		return ENXIO;
1016
1017	ifp = ha->ifp;
1018
1019	switch (cmd) {
1020
1021	case QLNX_GRC_DUMP_SIZE:
1022		qlnx_get_grc_dump_size(ha, (qlnx_grcdump_t *)data);
1023		break;
1024
1025	case QLNX_GRC_DUMP:
1026		rval = qlnx_get_grc_dump(ha, (qlnx_grcdump_t *)data);
1027		break;
1028
1029	case QLNX_IDLE_CHK_SIZE:
1030		qlnx_get_idle_chk_size(ha, (qlnx_idle_chk_t *)data);
1031		break;
1032
1033	case QLNX_IDLE_CHK:
1034		rval = qlnx_get_idle_chk(ha, (qlnx_idle_chk_t *)data);
1035		break;
1036
1037	case QLNX_DRV_INFO:
1038		rval = qlnx_drv_info(ha, (qlnx_drvinfo_t *)data);
1039		break;
1040
1041	case QLNX_DEV_SETTING:
1042		rval = qlnx_dev_settings(ha, (qlnx_dev_setting_t *)data);
1043		break;
1044
1045	case QLNX_GET_REGS:
1046		rval = qlnx_get_regs(ha, (qlnx_get_regs_t *)data);
1047		break;
1048
1049	case QLNX_NVRAM:
1050		rval = qlnx_nvram(ha, (qlnx_nvram_t *)data);
1051		break;
1052
1053	case QLNX_RD_WR_REG:
1054		rval = qlnx_reg_rd_wr(ha, (qlnx_reg_rd_wr_t *)data);
1055		break;
1056
1057	case QLNX_RD_WR_PCICFG:
1058		rval = qlnx_rd_wr_pci_config(ha, (qlnx_pcicfg_rd_wr_t *)data);
1059		break;
1060
1061	case QLNX_MAC_ADDR:
1062		qlnx_mac_addr(ha, (qlnx_perm_mac_addr_t *)data);
1063		break;
1064
1065	case QLNX_STORM_STATS:
1066		qlnx_storm_stats(ha, (qlnx_storm_stats_dump_t *)data);
1067		break;
1068
1069	case QLNX_TRACE_SIZE:
1070		qlnx_get_trace_size(ha, (qlnx_trace_t *)data);
1071		break;
1072
1073	case QLNX_TRACE:
1074		trace = (qlnx_trace_t *)data;
1075
1076		for (i = 0; i < ha->cdev.num_hwfns; i++) {
1077
1078			if (trace->size[i] && trace->cmd && trace->buffer[i])
1079				rval = qlnx_get_trace(ha, i, trace);
1080
1081			if (rval)
1082				break;
1083		}
1084		break;
1085
1086#ifdef QLNX_USER_LLDP
1087	case QLNX_SET_LLDP_TLVS:
1088		rval = qlnx_set_lldp_tlvx(ha, (qlnx_lldp_sys_tlvs_t *)data);
1089		break;
1090#endif /* #ifdef QLNX_USER_LLDP */
1091
1092	default:
1093		rval = EINVAL;
1094		break;
1095	}
1096
1097	return (rval);
1098}
1099
1100