ql_ioctl.c revision 324762
1/*
2 * Copyright (c) 2013-2016 Qlogic Corporation
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 */
27/*
28 * File: ql_ioctl.c
29 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: stable/11/sys/dev/qlxgbe/ql_ioctl.c 324762 2017-10-19 17:30:20Z davidcs $");
34
35
36#include "ql_os.h"
37#include "ql_hw.h"
38#include "ql_def.h"
39#include "ql_inline.h"
40#include "ql_glbl.h"
41#include "ql_ioctl.h"
42#include "ql_ver.h"
43#include "ql_dbg.h"
44
45static int ql_drvr_state(qla_host_t *ha, qla_driver_state_t *drvr_state);
46static uint32_t ql_drvr_state_size(qla_host_t *ha);
47static int ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
48		struct thread *td);
49
50static struct cdevsw qla_cdevsw = {
51	.d_version = D_VERSION,
52	.d_ioctl = ql_eioctl,
53	.d_name = "qlcnic",
54};
55
56int
57ql_make_cdev(qla_host_t *ha)
58{
59        ha->ioctl_dev = make_dev(&qla_cdevsw,
60				ha->ifp->if_dunit,
61                                UID_ROOT,
62                                GID_WHEEL,
63                                0600,
64                                "%s",
65                                if_name(ha->ifp));
66
67	if (ha->ioctl_dev == NULL)
68		return (-1);
69
70        ha->ioctl_dev->si_drv1 = ha;
71
72	return (0);
73}
74
75void
76ql_del_cdev(qla_host_t *ha)
77{
78	if (ha->ioctl_dev != NULL)
79		destroy_dev(ha->ioctl_dev);
80	return;
81}
82
83static int
84ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
85	struct thread *td)
86{
87        qla_host_t *ha;
88        int rval = 0;
89	device_t pci_dev;
90	struct ifnet *ifp;
91	int count;
92
93	q80_offchip_mem_val_t val;
94	qla_rd_pci_ids_t *pci_ids;
95	qla_rd_fw_dump_t *fw_dump;
96        union {
97		qla_reg_val_t *rv;
98	        qla_rd_flash_t *rdf;
99		qla_wr_flash_t *wrf;
100		qla_erase_flash_t *erf;
101		qla_offchip_mem_val_t *mem;
102	} u;
103
104
105        if ((ha = (qla_host_t *)dev->si_drv1) == NULL)
106                return ENXIO;
107
108	pci_dev= ha->pci_dev;
109
110        switch(cmd) {
111
112        case QLA_RDWR_REG:
113
114                u.rv = (qla_reg_val_t *)data;
115
116                if (u.rv->direct) {
117                        if (u.rv->rd) {
118                                u.rv->val = READ_REG32(ha, u.rv->reg);
119                        } else {
120                                WRITE_REG32(ha, u.rv->reg, u.rv->val);
121                        }
122                } else {
123                        if ((rval = ql_rdwr_indreg32(ha, u.rv->reg, &u.rv->val,
124                                u.rv->rd)))
125                                rval = ENXIO;
126                }
127                break;
128
129        case QLA_RD_FLASH:
130
131		if (!ha->hw.flags.fdt_valid) {
132			rval = EIO;
133			break;
134		}
135
136                u.rdf = (qla_rd_flash_t *)data;
137                if ((rval = ql_rd_flash32(ha, u.rdf->off, &u.rdf->data)))
138                        rval = ENXIO;
139                break;
140
141	case QLA_WR_FLASH:
142
143		ifp = ha->ifp;
144
145		if (ifp == NULL) {
146			rval = ENXIO;
147			break;
148		}
149
150		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
151			rval = ENXIO;
152			break;
153		}
154
155		if (!ha->hw.flags.fdt_valid) {
156			rval = EIO;
157			break;
158		}
159
160		u.wrf = (qla_wr_flash_t *)data;
161		if ((rval = ql_wr_flash_buffer(ha, u.wrf->off, u.wrf->size,
162			u.wrf->buffer))) {
163			printf("flash write failed[%d]\n", rval);
164			rval = ENXIO;
165		}
166		break;
167
168	case QLA_ERASE_FLASH:
169
170		ifp = ha->ifp;
171
172		if (ifp == NULL) {
173			rval = ENXIO;
174			break;
175		}
176
177		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
178			rval = ENXIO;
179			break;
180		}
181
182		if (!ha->hw.flags.fdt_valid) {
183			rval = EIO;
184			break;
185		}
186
187		u.erf = (qla_erase_flash_t *)data;
188		if ((rval = ql_erase_flash(ha, u.erf->off,
189			u.erf->size))) {
190			printf("flash erase failed[%d]\n", rval);
191			rval = ENXIO;
192		}
193		break;
194
195	case QLA_RDWR_MS_MEM:
196		u.mem = (qla_offchip_mem_val_t *)data;
197
198		if ((rval = ql_rdwr_offchip_mem(ha, u.mem->off, &val,
199			u.mem->rd)))
200			rval = ENXIO;
201		else {
202			u.mem->data_lo = val.data_lo;
203			u.mem->data_hi = val.data_hi;
204			u.mem->data_ulo = val.data_ulo;
205			u.mem->data_uhi = val.data_uhi;
206		}
207
208		break;
209
210	case QLA_RD_FW_DUMP_SIZE:
211
212		if (ha->hw.mdump_init == 0) {
213			rval = EINVAL;
214			break;
215		}
216
217		fw_dump = (qla_rd_fw_dump_t *)data;
218		fw_dump->minidump_size = ha->hw.mdump_buffer_size +
219						ha->hw.mdump_template_size;
220		fw_dump->pci_func = ha->pci_func;
221
222		break;
223
224	case QLA_RD_FW_DUMP:
225
226		if (ha->hw.mdump_init == 0) {
227			rval = EINVAL;
228			break;
229		}
230
231		fw_dump = (qla_rd_fw_dump_t *)data;
232
233		if ((fw_dump->minidump == NULL) ||
234			(fw_dump->minidump_size != (ha->hw.mdump_buffer_size +
235				ha->hw.mdump_template_size))) {
236			rval = EINVAL;
237			break;
238		}
239
240		if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
241			if (!ha->hw.mdump_done)
242				ha->qla_initiate_recovery = 1;
243			QLA_UNLOCK(ha, __func__);
244		} else {
245			rval = ENXIO;
246			break;
247		}
248
249#define QLNX_DUMP_WAIT_SECS	30
250
251		count = QLNX_DUMP_WAIT_SECS * 1000;
252
253		while (count) {
254			if (ha->hw.mdump_done)
255				break;
256			qla_mdelay(__func__, 100);
257			count -= 100;
258		}
259
260		if (!ha->hw.mdump_done) {
261			rval = ENXIO;
262			break;
263		}
264
265		if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
266			ha->hw.mdump_done = 0;
267			QLA_UNLOCK(ha, __func__);
268		} else {
269			rval = ENXIO;
270			break;
271		}
272
273		if ((rval = copyout(ha->hw.mdump_template,
274			fw_dump->minidump, ha->hw.mdump_template_size))) {
275			rval = ENXIO;
276			break;
277		}
278
279		if ((rval = copyout(ha->hw.mdump_buffer,
280				((uint8_t *)fw_dump->minidump +
281					ha->hw.mdump_template_size),
282				ha->hw.mdump_buffer_size)))
283			rval = ENXIO;
284		break;
285
286	case QLA_RD_DRVR_STATE:
287		rval = ql_drvr_state(ha, (qla_driver_state_t *)data);
288		break;
289
290	case QLA_RD_PCI_IDS:
291		pci_ids = (qla_rd_pci_ids_t *)data;
292		pci_ids->ven_id = pci_get_vendor(pci_dev);
293		pci_ids->dev_id = pci_get_device(pci_dev);
294		pci_ids->subsys_ven_id = pci_get_subvendor(pci_dev);
295		pci_ids->subsys_dev_id = pci_get_subdevice(pci_dev);
296		pci_ids->rev_id = pci_read_config(pci_dev, PCIR_REVID, 1);
297		break;
298
299        default:
300                break;
301        }
302
303        return rval;
304}
305
306
307static int
308ql_drvr_state(qla_host_t *ha, qla_driver_state_t *state)
309{
310	int rval = 0;
311	uint32_t drvr_state_size;
312	qla_drvr_state_hdr_t *hdr;
313
314	drvr_state_size = ql_drvr_state_size(ha);
315
316	if (state->buffer == NULL) {
317		state->size = drvr_state_size;
318		return (0);
319	}
320
321	if (state->size < drvr_state_size)
322		return (ENXIO);
323
324	if (ha->hw.drvr_state == NULL)
325		return (ENOMEM);
326
327	hdr = ha->hw.drvr_state;
328
329	if (!hdr->drvr_version_major)
330		ql_capture_drvr_state(ha);
331
332	rval = copyout(ha->hw.drvr_state, state->buffer, drvr_state_size);
333
334	bzero(ha->hw.drvr_state, drvr_state_size);
335
336	return (rval);
337}
338
339static uint32_t
340ql_drvr_state_size(qla_host_t *ha)
341{
342	uint32_t drvr_state_size;
343	uint32_t size;
344
345	size = sizeof (qla_drvr_state_hdr_t);
346	drvr_state_size = QL_ALIGN(size, 64);
347
348	size =  ha->hw.num_tx_rings * (sizeof (qla_drvr_state_tx_t));
349	drvr_state_size += QL_ALIGN(size, 64);
350
351	size =  ha->hw.num_rds_rings * (sizeof (qla_drvr_state_rx_t));
352	drvr_state_size += QL_ALIGN(size, 64);
353
354	size =  ha->hw.num_sds_rings * (sizeof (qla_drvr_state_sds_t));
355	drvr_state_size += QL_ALIGN(size, 64);
356
357	size = sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS * ha->hw.num_tx_rings;
358	drvr_state_size += QL_ALIGN(size, 64);
359
360	size = sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS * ha->hw.num_rds_rings;
361	drvr_state_size += QL_ALIGN(size, 64);
362
363	size = sizeof(q80_stat_desc_t) * NUM_STATUS_DESCRIPTORS *
364			ha->hw.num_sds_rings;
365	drvr_state_size += QL_ALIGN(size, 64);
366
367	return (drvr_state_size);
368}
369
370static void
371ql_get_tx_state(qla_host_t *ha, qla_drvr_state_tx_t *tx_state)
372{
373	int i;
374
375	for (i = 0; i < ha->hw.num_tx_rings; i++) {
376		tx_state->base_p_addr = ha->hw.tx_cntxt[i].tx_ring_paddr;
377		tx_state->cons_p_addr = ha->hw.tx_cntxt[i].tx_cons_paddr;
378		tx_state->tx_prod_reg = ha->hw.tx_cntxt[i].tx_prod_reg;
379		tx_state->tx_cntxt_id = ha->hw.tx_cntxt[i].tx_cntxt_id;
380		tx_state->txr_free = ha->hw.tx_cntxt[i].txr_free;
381		tx_state->txr_next = ha->hw.tx_cntxt[i].txr_next;
382		tx_state->txr_comp = ha->hw.tx_cntxt[i].txr_comp;
383		tx_state++;
384	}
385	return;
386}
387
388static void
389ql_get_rx_state(qla_host_t *ha, qla_drvr_state_rx_t *rx_state)
390{
391	int i;
392
393	for (i = 0; i < ha->hw.num_rds_rings; i++) {
394		rx_state->prod_std = ha->hw.rds[i].prod_std;
395		rx_state->rx_next = ha->hw.rds[i].rx_next;
396		rx_state++;
397	}
398	return;
399}
400
401static void
402ql_get_sds_state(qla_host_t *ha, qla_drvr_state_sds_t *sds_state)
403{
404	int i;
405
406	for (i = 0; i < ha->hw.num_sds_rings; i++) {
407		sds_state->sdsr_next = ha->hw.sds[i].sdsr_next;
408		sds_state->sds_consumer = ha->hw.sds[i].sds_consumer;
409		sds_state++;
410	}
411	return;
412}
413
414void
415ql_capture_drvr_state(qla_host_t *ha)
416{
417	uint8_t *state_buffer;
418	uint8_t *ptr;
419	uint32_t drvr_state_size;
420	qla_drvr_state_hdr_t *hdr;
421	uint32_t size;
422	int i;
423
424	drvr_state_size = ql_drvr_state_size(ha);
425
426	state_buffer =  ha->hw.drvr_state;
427
428	if (state_buffer == NULL)
429		return;
430
431	bzero(state_buffer, drvr_state_size);
432
433	hdr = (qla_drvr_state_hdr_t *)state_buffer;
434
435	hdr->drvr_version_major = QLA_VERSION_MAJOR;
436	hdr->drvr_version_minor = QLA_VERSION_MINOR;
437	hdr->drvr_version_build = QLA_VERSION_BUILD;
438
439	bcopy(ha->hw.mac_addr, hdr->mac_addr, ETHER_ADDR_LEN);
440
441	hdr->link_speed = ha->hw.link_speed;
442	hdr->cable_length = ha->hw.cable_length;
443	hdr->cable_oui = ha->hw.cable_oui;
444	hdr->link_up = ha->hw.link_up;
445	hdr->module_type = ha->hw.module_type;
446	hdr->link_faults = ha->hw.link_faults;
447	hdr->rcv_intr_coalesce = ha->hw.rcv_intr_coalesce;
448	hdr->xmt_intr_coalesce = ha->hw.xmt_intr_coalesce;
449
450	size = sizeof (qla_drvr_state_hdr_t);
451	hdr->tx_state_offset = QL_ALIGN(size, 64);
452
453	ptr = state_buffer + hdr->tx_state_offset;
454
455	ql_get_tx_state(ha, (qla_drvr_state_tx_t *)ptr);
456
457	size =  ha->hw.num_tx_rings * (sizeof (qla_drvr_state_tx_t));
458	hdr->rx_state_offset = hdr->tx_state_offset + QL_ALIGN(size, 64);
459	ptr = state_buffer + hdr->rx_state_offset;
460
461	ql_get_rx_state(ha, (qla_drvr_state_rx_t *)ptr);
462
463	size =  ha->hw.num_rds_rings * (sizeof (qla_drvr_state_rx_t));
464	hdr->sds_state_offset = hdr->rx_state_offset + QL_ALIGN(size, 64);
465	ptr = state_buffer + hdr->sds_state_offset;
466
467	ql_get_sds_state(ha, (qla_drvr_state_sds_t *)ptr);
468
469	size =  ha->hw.num_sds_rings * (sizeof (qla_drvr_state_sds_t));
470	hdr->txr_offset = hdr->sds_state_offset + QL_ALIGN(size, 64);
471	ptr = state_buffer + hdr->txr_offset;
472
473	hdr->num_tx_rings = ha->hw.num_tx_rings;
474	hdr->txr_size = sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS;
475	hdr->txr_entries = NUM_TX_DESCRIPTORS;
476
477	size = hdr->num_tx_rings * hdr->txr_size;
478	bcopy(ha->hw.dma_buf.tx_ring.dma_b, ptr, size);
479
480	hdr->rxr_offset = hdr->txr_offset + QL_ALIGN(size, 64);
481	ptr = state_buffer + hdr->rxr_offset;
482
483	hdr->rxr_size = sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS;
484	hdr->rxr_entries = NUM_RX_DESCRIPTORS;
485	hdr->num_rx_rings = ha->hw.num_rds_rings;
486
487	for (i = 0; i < ha->hw.num_rds_rings; i++) {
488		bcopy(ha->hw.dma_buf.rds_ring[i].dma_b, ptr, hdr->rxr_size);
489		ptr += hdr->rxr_size;
490	}
491
492	size = hdr->rxr_size * hdr->num_rx_rings;
493	hdr->sds_offset = hdr->rxr_offset + QL_ALIGN(size, 64);
494	hdr->sds_ring_size = sizeof(q80_stat_desc_t) * NUM_STATUS_DESCRIPTORS;
495	hdr->sds_entries = NUM_STATUS_DESCRIPTORS;
496	hdr->num_sds_rings = ha->hw.num_sds_rings;
497
498	ptr = state_buffer + hdr->sds_offset;
499	for (i = 0; i < ha->hw.num_sds_rings; i++) {
500		bcopy(ha->hw.dma_buf.sds_ring[i].dma_b, ptr, hdr->sds_ring_size);
501		ptr += hdr->sds_ring_size;
502	}
503	return;
504}
505
506void
507ql_alloc_drvr_state_buffer(qla_host_t *ha)
508{
509	uint32_t drvr_state_size;
510
511	drvr_state_size = ql_drvr_state_size(ha);
512
513	ha->hw.drvr_state =  malloc(drvr_state_size, M_QLA83XXBUF, M_NOWAIT);
514
515	return;
516}
517
518void
519ql_free_drvr_state_buffer(qla_host_t *ha)
520{
521	if (ha->hw.drvr_state != NULL)
522		free(ha->hw.drvr_state, M_QLA83XXBUF);
523	return;
524}
525
526