ql_ioctl.c revision 331722
1/*
2 * Copyright (c) 2013-2016 Qlogic Corporation
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 */
27/*
28 * File: ql_ioctl.c
29 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: stable/11/sys/dev/qlxgbe/ql_ioctl.c 331722 2018-03-29 02:50:57Z eadler $");
34
35
36#include "ql_os.h"
37#include "ql_hw.h"
38#include "ql_def.h"
39#include "ql_inline.h"
40#include "ql_glbl.h"
41#include "ql_ioctl.h"
42#include "ql_ver.h"
43#include "ql_dbg.h"
44
45static int ql_slowpath_log(qla_host_t *ha, qla_sp_log_t *log);
46static int ql_drvr_state(qla_host_t *ha, qla_driver_state_t *drvr_state);
47static uint32_t ql_drvr_state_size(qla_host_t *ha);
48static int ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
49		struct thread *td);
50
51static struct cdevsw qla_cdevsw = {
52	.d_version = D_VERSION,
53	.d_ioctl = ql_eioctl,
54	.d_name = "qlcnic",
55};
56
57int
58ql_make_cdev(qla_host_t *ha)
59{
60        ha->ioctl_dev = make_dev(&qla_cdevsw,
61				ha->ifp->if_dunit,
62                                UID_ROOT,
63                                GID_WHEEL,
64                                0600,
65                                "%s",
66                                if_name(ha->ifp));
67
68	if (ha->ioctl_dev == NULL)
69		return (-1);
70
71        ha->ioctl_dev->si_drv1 = ha;
72
73	return (0);
74}
75
76void
77ql_del_cdev(qla_host_t *ha)
78{
79	if (ha->ioctl_dev != NULL)
80		destroy_dev(ha->ioctl_dev);
81	return;
82}
83
84static int
85ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
86	struct thread *td)
87{
88        qla_host_t *ha;
89        int rval = 0;
90	device_t pci_dev;
91	struct ifnet *ifp;
92	int count;
93
94	q80_offchip_mem_val_t val;
95	qla_rd_pci_ids_t *pci_ids;
96	qla_rd_fw_dump_t *fw_dump;
97        union {
98		qla_reg_val_t *rv;
99	        qla_rd_flash_t *rdf;
100		qla_wr_flash_t *wrf;
101		qla_erase_flash_t *erf;
102		qla_offchip_mem_val_t *mem;
103	} u;
104
105
106        if ((ha = (qla_host_t *)dev->si_drv1) == NULL)
107                return ENXIO;
108
109	pci_dev= ha->pci_dev;
110
111        switch(cmd) {
112
113        case QLA_RDWR_REG:
114
115                u.rv = (qla_reg_val_t *)data;
116
117                if (u.rv->direct) {
118                        if (u.rv->rd) {
119                                u.rv->val = READ_REG32(ha, u.rv->reg);
120                        } else {
121                                WRITE_REG32(ha, u.rv->reg, u.rv->val);
122                        }
123                } else {
124                        if ((rval = ql_rdwr_indreg32(ha, u.rv->reg, &u.rv->val,
125                                u.rv->rd)))
126                                rval = ENXIO;
127                }
128                break;
129
130        case QLA_RD_FLASH:
131
132		if (!ha->hw.flags.fdt_valid) {
133			rval = EIO;
134			break;
135		}
136
137                u.rdf = (qla_rd_flash_t *)data;
138                if ((rval = ql_rd_flash32(ha, u.rdf->off, &u.rdf->data)))
139                        rval = ENXIO;
140                break;
141
142	case QLA_WR_FLASH:
143
144		ifp = ha->ifp;
145
146		if (ifp == NULL) {
147			rval = ENXIO;
148			break;
149		}
150
151		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
152			rval = ENXIO;
153			break;
154		}
155
156		if (!ha->hw.flags.fdt_valid) {
157			rval = EIO;
158			break;
159		}
160
161		u.wrf = (qla_wr_flash_t *)data;
162		if ((rval = ql_wr_flash_buffer(ha, u.wrf->off, u.wrf->size,
163			u.wrf->buffer))) {
164			printf("flash write failed[%d]\n", rval);
165			rval = ENXIO;
166		}
167		break;
168
169	case QLA_ERASE_FLASH:
170
171		ifp = ha->ifp;
172
173		if (ifp == NULL) {
174			rval = ENXIO;
175			break;
176		}
177
178		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
179			rval = ENXIO;
180			break;
181		}
182
183		if (!ha->hw.flags.fdt_valid) {
184			rval = EIO;
185			break;
186		}
187
188		u.erf = (qla_erase_flash_t *)data;
189		if ((rval = ql_erase_flash(ha, u.erf->off,
190			u.erf->size))) {
191			printf("flash erase failed[%d]\n", rval);
192			rval = ENXIO;
193		}
194		break;
195
196	case QLA_RDWR_MS_MEM:
197		u.mem = (qla_offchip_mem_val_t *)data;
198
199		if ((rval = ql_rdwr_offchip_mem(ha, u.mem->off, &val,
200			u.mem->rd)))
201			rval = ENXIO;
202		else {
203			u.mem->data_lo = val.data_lo;
204			u.mem->data_hi = val.data_hi;
205			u.mem->data_ulo = val.data_ulo;
206			u.mem->data_uhi = val.data_uhi;
207		}
208
209		break;
210
211	case QLA_RD_FW_DUMP_SIZE:
212
213		if (ha->hw.mdump_init == 0) {
214			rval = EINVAL;
215			break;
216		}
217
218		fw_dump = (qla_rd_fw_dump_t *)data;
219		fw_dump->minidump_size = ha->hw.mdump_buffer_size +
220						ha->hw.mdump_template_size;
221		fw_dump->pci_func = ha->pci_func;
222
223		break;
224
225	case QLA_RD_FW_DUMP:
226
227		if (ha->hw.mdump_init == 0) {
228			device_printf(pci_dev, "%s: minidump not initialized\n", __func__);
229			rval = EINVAL;
230			break;
231		}
232
233		fw_dump = (qla_rd_fw_dump_t *)data;
234
235		if ((fw_dump->minidump == NULL) ||
236			(fw_dump->minidump_size != (ha->hw.mdump_buffer_size +
237				ha->hw.mdump_template_size))) {
238			device_printf(pci_dev,
239				"%s: minidump buffer [%p] size = [%d, %d] invalid\n", __func__,
240				fw_dump->minidump, fw_dump->minidump_size,
241				(ha->hw.mdump_buffer_size + ha->hw.mdump_template_size));
242			rval = EINVAL;
243			break;
244		}
245
246		if ((ha->pci_func & 0x1)) {
247			device_printf(pci_dev, "%s: mindump allowed only on Port0\n", __func__);
248			rval = ENXIO;
249			break;
250		}
251
252		fw_dump->saved = 1;
253
254		if (ha->offline) {
255
256			if (ha->enable_minidump)
257				ql_minidump(ha);
258
259			fw_dump->saved = 0;
260			fw_dump->usec_ts = ha->hw.mdump_usec_ts;
261
262			if (!ha->hw.mdump_done) {
263				device_printf(pci_dev,
264					"%s: port offline minidump failed\n", __func__);
265				rval = ENXIO;
266				break;
267			}
268		} else {
269
270			if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
271				if (!ha->hw.mdump_done) {
272					fw_dump->saved = 0;
273					QL_INITIATE_RECOVERY(ha);
274					device_printf(pci_dev, "%s: recovery initiated "
275						" to trigger minidump\n",
276						__func__);
277				}
278				QLA_UNLOCK(ha, __func__);
279			} else {
280				device_printf(pci_dev, "%s: QLA_LOCK() failed0\n", __func__);
281				rval = ENXIO;
282				break;
283			}
284
285#define QLNX_DUMP_WAIT_SECS	30
286
287			count = QLNX_DUMP_WAIT_SECS * 1000;
288
289			while (count) {
290				if (ha->hw.mdump_done)
291					break;
292				qla_mdelay(__func__, 100);
293				count -= 100;
294			}
295
296			if (!ha->hw.mdump_done) {
297				device_printf(pci_dev,
298					"%s: port not offline minidump failed\n", __func__);
299				rval = ENXIO;
300				break;
301			}
302			fw_dump->usec_ts = ha->hw.mdump_usec_ts;
303
304			if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
305				ha->hw.mdump_done = 0;
306				QLA_UNLOCK(ha, __func__);
307			} else {
308				device_printf(pci_dev, "%s: QLA_LOCK() failed1\n", __func__);
309				rval = ENXIO;
310				break;
311			}
312		}
313
314		if ((rval = copyout(ha->hw.mdump_template,
315			fw_dump->minidump, ha->hw.mdump_template_size))) {
316			device_printf(pci_dev, "%s: template copyout failed\n", __func__);
317			rval = ENXIO;
318			break;
319		}
320
321		if ((rval = copyout(ha->hw.mdump_buffer,
322				((uint8_t *)fw_dump->minidump +
323					ha->hw.mdump_template_size),
324				ha->hw.mdump_buffer_size))) {
325			device_printf(pci_dev, "%s: minidump copyout failed\n", __func__);
326			rval = ENXIO;
327		}
328		break;
329
330	case QLA_RD_DRVR_STATE:
331		rval = ql_drvr_state(ha, (qla_driver_state_t *)data);
332		break;
333
334	case QLA_RD_SLOWPATH_LOG:
335		rval = ql_slowpath_log(ha, (qla_sp_log_t *)data);
336		break;
337
338	case QLA_RD_PCI_IDS:
339		pci_ids = (qla_rd_pci_ids_t *)data;
340		pci_ids->ven_id = pci_get_vendor(pci_dev);
341		pci_ids->dev_id = pci_get_device(pci_dev);
342		pci_ids->subsys_ven_id = pci_get_subvendor(pci_dev);
343		pci_ids->subsys_dev_id = pci_get_subdevice(pci_dev);
344		pci_ids->rev_id = pci_read_config(pci_dev, PCIR_REVID, 1);
345		break;
346
347        default:
348                break;
349        }
350
351        return rval;
352}
353
354
355
356static int
357ql_drvr_state(qla_host_t *ha, qla_driver_state_t *state)
358{
359	int rval = 0;
360	uint32_t drvr_state_size;
361
362	drvr_state_size = ql_drvr_state_size(ha);
363
364	if (state->buffer == NULL) {
365		state->size = drvr_state_size;
366		return (0);
367	}
368
369	if (state->size < drvr_state_size)
370		return (ENXIO);
371
372	if (ha->hw.drvr_state == NULL)
373		return (ENOMEM);
374
375	ql_capture_drvr_state(ha);
376
377	rval = copyout(ha->hw.drvr_state, state->buffer, drvr_state_size);
378
379	bzero(ha->hw.drvr_state, drvr_state_size);
380
381	return (rval);
382}
383
384static uint32_t
385ql_drvr_state_size(qla_host_t *ha)
386{
387	uint32_t drvr_state_size;
388	uint32_t size;
389
390	size = sizeof (qla_drvr_state_hdr_t);
391	drvr_state_size = QL_ALIGN(size, 64);
392
393	size =  ha->hw.num_tx_rings * (sizeof (qla_drvr_state_tx_t));
394	drvr_state_size += QL_ALIGN(size, 64);
395
396	size =  ha->hw.num_rds_rings * (sizeof (qla_drvr_state_rx_t));
397	drvr_state_size += QL_ALIGN(size, 64);
398
399	size =  ha->hw.num_sds_rings * (sizeof (qla_drvr_state_sds_t));
400	drvr_state_size += QL_ALIGN(size, 64);
401
402	size = sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS * ha->hw.num_tx_rings;
403	drvr_state_size += QL_ALIGN(size, 64);
404
405	size = sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS * ha->hw.num_rds_rings;
406	drvr_state_size += QL_ALIGN(size, 64);
407
408	size = sizeof(q80_stat_desc_t) * NUM_STATUS_DESCRIPTORS *
409			ha->hw.num_sds_rings;
410	drvr_state_size += QL_ALIGN(size, 64);
411
412	return (drvr_state_size);
413}
414
415static void
416ql_get_tx_state(qla_host_t *ha, qla_drvr_state_tx_t *tx_state)
417{
418	int i;
419
420	for (i = 0; i < ha->hw.num_tx_rings; i++) {
421		tx_state->base_p_addr = ha->hw.tx_cntxt[i].tx_ring_paddr;
422		tx_state->cons_p_addr = ha->hw.tx_cntxt[i].tx_cons_paddr;
423		tx_state->tx_prod_reg = ha->hw.tx_cntxt[i].tx_prod_reg;
424		tx_state->tx_cntxt_id = ha->hw.tx_cntxt[i].tx_cntxt_id;
425		tx_state->txr_free = ha->hw.tx_cntxt[i].txr_free;
426		tx_state->txr_next = ha->hw.tx_cntxt[i].txr_next;
427		tx_state->txr_comp = ha->hw.tx_cntxt[i].txr_comp;
428		tx_state++;
429	}
430	return;
431}
432
433static void
434ql_get_rx_state(qla_host_t *ha, qla_drvr_state_rx_t *rx_state)
435{
436	int i;
437
438	for (i = 0; i < ha->hw.num_rds_rings; i++) {
439		rx_state->prod_std = ha->hw.rds[i].prod_std;
440		rx_state->rx_next = ha->hw.rds[i].rx_next;
441		rx_state++;
442	}
443	return;
444}
445
446static void
447ql_get_sds_state(qla_host_t *ha, qla_drvr_state_sds_t *sds_state)
448{
449	int i;
450
451	for (i = 0; i < ha->hw.num_sds_rings; i++) {
452		sds_state->sdsr_next = ha->hw.sds[i].sdsr_next;
453		sds_state->sds_consumer = ha->hw.sds[i].sds_consumer;
454		sds_state++;
455	}
456	return;
457}
458
459void
460ql_capture_drvr_state(qla_host_t *ha)
461{
462	uint8_t *state_buffer;
463	uint8_t *ptr;
464	qla_drvr_state_hdr_t *hdr;
465	uint32_t size;
466	int i;
467
468	state_buffer =  ha->hw.drvr_state;
469
470	if (state_buffer == NULL)
471		return;
472
473	hdr = (qla_drvr_state_hdr_t *)state_buffer;
474
475	hdr->saved = 0;
476
477	if (hdr->drvr_version_major) {
478		hdr->saved = 1;
479		return;
480	}
481
482	hdr->usec_ts = qla_get_usec_timestamp();
483
484	hdr->drvr_version_major = QLA_VERSION_MAJOR;
485	hdr->drvr_version_minor = QLA_VERSION_MINOR;
486	hdr->drvr_version_build = QLA_VERSION_BUILD;
487
488	bcopy(ha->hw.mac_addr, hdr->mac_addr, ETHER_ADDR_LEN);
489
490	hdr->link_speed = ha->hw.link_speed;
491	hdr->cable_length = ha->hw.cable_length;
492	hdr->cable_oui = ha->hw.cable_oui;
493	hdr->link_up = ha->hw.link_up;
494	hdr->module_type = ha->hw.module_type;
495	hdr->link_faults = ha->hw.link_faults;
496	hdr->rcv_intr_coalesce = ha->hw.rcv_intr_coalesce;
497	hdr->xmt_intr_coalesce = ha->hw.xmt_intr_coalesce;
498
499	size = sizeof (qla_drvr_state_hdr_t);
500	hdr->tx_state_offset = QL_ALIGN(size, 64);
501
502	ptr = state_buffer + hdr->tx_state_offset;
503
504	ql_get_tx_state(ha, (qla_drvr_state_tx_t *)ptr);
505
506	size =  ha->hw.num_tx_rings * (sizeof (qla_drvr_state_tx_t));
507	hdr->rx_state_offset = hdr->tx_state_offset + QL_ALIGN(size, 64);
508	ptr = state_buffer + hdr->rx_state_offset;
509
510	ql_get_rx_state(ha, (qla_drvr_state_rx_t *)ptr);
511
512	size =  ha->hw.num_rds_rings * (sizeof (qla_drvr_state_rx_t));
513	hdr->sds_state_offset = hdr->rx_state_offset + QL_ALIGN(size, 64);
514	ptr = state_buffer + hdr->sds_state_offset;
515
516	ql_get_sds_state(ha, (qla_drvr_state_sds_t *)ptr);
517
518	size =  ha->hw.num_sds_rings * (sizeof (qla_drvr_state_sds_t));
519	hdr->txr_offset = hdr->sds_state_offset + QL_ALIGN(size, 64);
520	ptr = state_buffer + hdr->txr_offset;
521
522	hdr->num_tx_rings = ha->hw.num_tx_rings;
523	hdr->txr_size = sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS;
524	hdr->txr_entries = NUM_TX_DESCRIPTORS;
525
526	size = hdr->num_tx_rings * hdr->txr_size;
527	bcopy(ha->hw.dma_buf.tx_ring.dma_b, ptr, size);
528
529	hdr->rxr_offset = hdr->txr_offset + QL_ALIGN(size, 64);
530	ptr = state_buffer + hdr->rxr_offset;
531
532	hdr->rxr_size = sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS;
533	hdr->rxr_entries = NUM_RX_DESCRIPTORS;
534	hdr->num_rx_rings = ha->hw.num_rds_rings;
535
536	for (i = 0; i < ha->hw.num_rds_rings; i++) {
537		bcopy(ha->hw.dma_buf.rds_ring[i].dma_b, ptr, hdr->rxr_size);
538		ptr += hdr->rxr_size;
539	}
540
541	size = hdr->rxr_size * hdr->num_rx_rings;
542	hdr->sds_offset = hdr->rxr_offset + QL_ALIGN(size, 64);
543	hdr->sds_ring_size = sizeof(q80_stat_desc_t) * NUM_STATUS_DESCRIPTORS;
544	hdr->sds_entries = NUM_STATUS_DESCRIPTORS;
545	hdr->num_sds_rings = ha->hw.num_sds_rings;
546
547	ptr = state_buffer + hdr->sds_offset;
548	for (i = 0; i < ha->hw.num_sds_rings; i++) {
549		bcopy(ha->hw.dma_buf.sds_ring[i].dma_b, ptr, hdr->sds_ring_size);
550		ptr += hdr->sds_ring_size;
551	}
552	return;
553}
554
555void
556ql_alloc_drvr_state_buffer(qla_host_t *ha)
557{
558	uint32_t drvr_state_size;
559
560	drvr_state_size = ql_drvr_state_size(ha);
561
562	ha->hw.drvr_state =  malloc(drvr_state_size, M_QLA83XXBUF, M_NOWAIT);
563
564	if (ha->hw.drvr_state != NULL)
565		bzero(ha->hw.drvr_state, drvr_state_size);
566
567	return;
568}
569
570void
571ql_free_drvr_state_buffer(qla_host_t *ha)
572{
573	if (ha->hw.drvr_state != NULL)
574		free(ha->hw.drvr_state, M_QLA83XXBUF);
575	return;
576}
577
578void
579ql_sp_log(qla_host_t *ha, uint16_t fmtstr_idx, uint16_t num_params,
580	uint32_t param0, uint32_t param1, uint32_t param2, uint32_t param3,
581	uint32_t param4)
582{
583	qla_sp_log_entry_t *sp_e, *sp_log;
584
585	if (((sp_log = ha->hw.sp_log) == NULL) || ha->hw.sp_log_stop)
586		return;
587
588	mtx_lock(&ha->sp_log_lock);
589
590	sp_e = &sp_log[ha->hw.sp_log_index];
591
592	bzero(sp_e, sizeof (qla_sp_log_entry_t));
593
594	sp_e->fmtstr_idx = fmtstr_idx;
595	sp_e->num_params = num_params;
596
597	sp_e->usec_ts = qla_get_usec_timestamp();
598
599	sp_e->params[0] = param0;
600	sp_e->params[1] = param1;
601	sp_e->params[2] = param2;
602	sp_e->params[3] = param3;
603	sp_e->params[4] = param4;
604
605	ha->hw.sp_log_index = (ha->hw.sp_log_index + 1) & (NUM_LOG_ENTRIES - 1);
606
607	if (ha->hw.sp_log_num_entries < NUM_LOG_ENTRIES)
608		ha->hw.sp_log_num_entries++;
609
610	mtx_unlock(&ha->sp_log_lock);
611
612	return;
613}
614
615void
616ql_alloc_sp_log_buffer(qla_host_t *ha)
617{
618	uint32_t size;
619
620	size = (sizeof(qla_sp_log_entry_t)) * NUM_LOG_ENTRIES;
621
622	ha->hw.sp_log =  malloc(size, M_QLA83XXBUF, M_NOWAIT);
623
624	if (ha->hw.sp_log != NULL)
625		bzero(ha->hw.sp_log, size);
626
627	ha->hw.sp_log_index = 0;
628	ha->hw.sp_log_num_entries = 0;
629
630	return;
631}
632
633void
634ql_free_sp_log_buffer(qla_host_t *ha)
635{
636	if (ha->hw.sp_log != NULL)
637		free(ha->hw.sp_log, M_QLA83XXBUF);
638	return;
639}
640
641static int
642ql_slowpath_log(qla_host_t *ha, qla_sp_log_t *log)
643{
644	int rval = 0;
645	uint32_t size;
646
647	if ((ha->hw.sp_log == NULL) || (log->buffer == NULL))
648		return (EINVAL);
649
650	size = (sizeof(qla_sp_log_entry_t) * NUM_LOG_ENTRIES);
651
652	mtx_lock(&ha->sp_log_lock);
653
654	rval = copyout(ha->hw.sp_log, log->buffer, size);
655
656	if (!rval) {
657		log->next_idx = ha->hw.sp_log_index;
658		log->num_entries = ha->hw.sp_log_num_entries;
659	}
660	device_printf(ha->pci_dev,
661		"%s: exit [rval = %d][%p, next_idx = %d, %d entries, %d bytes]\n",
662		__func__, rval, log->buffer, log->next_idx, log->num_entries, size);
663	mtx_unlock(&ha->sp_log_lock);
664
665	return (rval);
666}
667
668