1/*
2 *   BSD LICENSE
3 *
4 *   Copyright(c) 2017 Cavium, Inc.. All rights reserved.
5 *   All rights reserved.
6 *
7 *   Redistribution and use in source and binary forms, with or without
8 *   modification, are permitted provided that the following conditions
9 *   are met:
10 *
11 *     * Redistributions of source code must retain the above copyright
12 *       notice, this list of conditions and the following disclaimer.
13 *     * Redistributions in binary form must reproduce the above copyright
14 *       notice, this list of conditions and the following disclaimer in
15 *       the documentation and/or other materials provided with the
16 *       distribution.
17 *     * Neither the name of Cavium, Inc. nor the names of its
18 *       contributors may be used to endorse or promote products derived
19 *       from this software without specific prior written permission.
20 *
21 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 *   OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33/*$FreeBSD: stable/11/sys/dev/liquidio/lio_main.c 335293 2018-06-17 17:38:24Z dim $*/
34
35#include "lio_bsd.h"
36#include "lio_common.h"
37
38#include "lio_droq.h"
39#include "lio_iq.h"
40#include "lio_response_manager.h"
41#include "lio_device.h"
42#include "lio_ctrl.h"
43#include "lio_main.h"
44#include "lio_network.h"
45#include "cn23xx_pf_device.h"
46#include "lio_image.h"
47#include "lio_ioctl.h"
48#include "lio_rxtx.h"
49#include "lio_rss.h"
50
51/* Number of milliseconds to wait for DDR initialization */
52#define LIO_DDR_TIMEOUT	10000
53#define LIO_MAX_FW_TYPE_LEN	8
54
55static char fw_type[LIO_MAX_FW_TYPE_LEN];
56TUNABLE_STR("hw.lio.fw_type", fw_type, sizeof(fw_type));
57
58/*
59 * Integers that specify number of queues per PF.
60 * Valid range is 0 to 64.
61 * Use 0 to derive from CPU count.
62 */
63static int	num_queues_per_pf0;
64static int	num_queues_per_pf1;
65TUNABLE_INT("hw.lio.num_queues_per_pf0", &num_queues_per_pf0);
66TUNABLE_INT("hw.lio.num_queues_per_pf1", &num_queues_per_pf1);
67
68#ifdef RSS
69static int	lio_rss = 1;
70TUNABLE_INT("hw.lio.rss", &lio_rss);
71#endif	/* RSS */
72
73/* Hardware LRO */
74unsigned int	lio_hwlro = 0;
75TUNABLE_INT("hw.lio.hwlro", &lio_hwlro);
76
77/*
78 * Bitmask indicating which consoles have debug
79 * output redirected to syslog.
80 */
81static unsigned long	console_bitmask;
82TUNABLE_ULONG("hw.lio.console_bitmask", &console_bitmask);
83
84/*
85 * \brief determines if a given console has debug enabled.
86 * @param console console to check
87 * @returns  1 = enabled. 0 otherwise
88 */
89int
90lio_console_debug_enabled(uint32_t console)
91{
92
93	return (console_bitmask >> (console)) & 0x1;
94}
95
96static int	lio_detach(device_t dev);
97
98static int	lio_device_init(struct octeon_device *octeon_dev);
99static int	lio_chip_specific_setup(struct octeon_device *oct);
100static void	lio_watchdog(void *param);
101static int	lio_load_firmware(struct octeon_device *oct);
102static int	lio_nic_starter(struct octeon_device *oct);
103static int	lio_init_nic_module(struct octeon_device *oct);
104static int	lio_setup_nic_devices(struct octeon_device *octeon_dev);
105static int	lio_link_info(struct lio_recv_info *recv_info, void *ptr);
106static void	lio_if_cfg_callback(struct octeon_device *oct, uint32_t status,
107				    void *buf);
108static int	lio_set_rxcsum_command(struct ifnet *ifp, int command,
109				       uint8_t rx_cmd);
110static int	lio_setup_glists(struct octeon_device *oct, struct lio *lio,
111				 int num_iqs);
112static void	lio_destroy_nic_device(struct octeon_device *oct, int ifidx);
113static inline void	lio_update_link_status(struct ifnet *ifp,
114					       union octeon_link_status *ls);
115static void	lio_send_rx_ctrl_cmd(struct lio *lio, int start_stop);
116static int	lio_stop_nic_module(struct octeon_device *oct);
117static void	lio_destroy_resources(struct octeon_device *oct);
118static int	lio_setup_rx_oom_poll_fn(struct ifnet *ifp);
119
120static void	lio_vlan_rx_add_vid(void *arg, struct ifnet *ifp, uint16_t vid);
121static void	lio_vlan_rx_kill_vid(void *arg, struct ifnet *ifp,
122				     uint16_t vid);
123static struct octeon_device *
124	lio_get_other_octeon_device(struct octeon_device *oct);
125
126static int	lio_wait_for_oq_pkts(struct octeon_device *oct);
127
128int	lio_send_rss_param(struct lio *lio);
129static int	lio_dbg_console_print(struct octeon_device *oct,
130				      uint32_t console_num, char *prefix,
131				      char *suffix);
132
133/* Polling interval for determining when NIC application is alive */
134#define LIO_STARTER_POLL_INTERVAL_MS	100
135
136/*
137 * vendor_info_array.
138 * This array contains the list of IDs on which the driver should load.
139 */
140struct lio_vendor_info {
141	uint16_t	vendor_id;
142	uint16_t	device_id;
143	uint16_t	subdevice_id;
144	uint8_t		revision_id;
145	uint8_t		index;
146};
147
148static struct lio_vendor_info lio_pci_tbl[] = {
149	/* CN2350 10G */
150	{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_10G_SUBDEVICE,
151		0x02, 0},
152
153	/* CN2350 10G */
154	{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_10G_SUBDEVICE1,
155		0x02, 0},
156
157	/* CN2360 10G */
158	{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2360_10G_SUBDEVICE,
159		0x02, 1},
160
161	/* CN2350 25G */
162	{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_25G_SUBDEVICE,
163		0x02, 2},
164
165	/* CN2360 25G */
166	{PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2360_25G_SUBDEVICE,
167		0x02, 3},
168
169	{0, 0, 0, 0, 0}
170};
171
172static char *lio_strings[] = {
173	"LiquidIO 2350 10GbE Server Adapter",
174	"LiquidIO 2360 10GbE Server Adapter",
175	"LiquidIO 2350 25GbE Server Adapter",
176	"LiquidIO 2360 25GbE Server Adapter",
177};
178
179struct lio_if_cfg_resp {
180	uint64_t	rh;
181	struct octeon_if_cfg_info cfg_info;
182	uint64_t	status;
183};
184
185struct lio_if_cfg_context {
186	int		octeon_id;
187	volatile int	cond;
188};
189
190struct lio_rx_ctl_context {
191	int		octeon_id;
192	volatile int	cond;
193};
194
195static int
196lio_probe(device_t dev)
197{
198	struct lio_vendor_info	*tbl;
199
200	uint16_t	vendor_id;
201	uint16_t	device_id;
202	uint16_t	subdevice_id;
203	uint8_t		revision_id;
204	char		device_ver[256];
205
206	vendor_id = pci_get_vendor(dev);
207	if (vendor_id != PCI_VENDOR_ID_CAVIUM)
208		return (ENXIO);
209
210	device_id = pci_get_device(dev);
211	subdevice_id = pci_get_subdevice(dev);
212	revision_id = pci_get_revid(dev);
213
214	tbl = lio_pci_tbl;
215	while (tbl->vendor_id) {
216		if ((vendor_id == tbl->vendor_id) &&
217		    (device_id == tbl->device_id) &&
218		    (subdevice_id == tbl->subdevice_id) &&
219		    (revision_id == tbl->revision_id)) {
220			sprintf(device_ver, "%s, Version - %s",
221				lio_strings[tbl->index], LIO_VERSION);
222			device_set_desc_copy(dev, device_ver);
223			return (BUS_PROBE_DEFAULT);
224		}
225
226		tbl++;
227	}
228
229	return (ENXIO);
230}
231
232static int
233lio_attach(device_t device)
234{
235	struct octeon_device	*oct_dev = NULL;
236	uint64_t	scratch1;
237	uint32_t	error;
238	int		timeout, ret = 1;
239	uint8_t		bus, dev, function;
240
241	oct_dev = lio_allocate_device(device);
242	if (oct_dev == NULL) {
243		device_printf(device, "Error: Unable to allocate device\n");
244		return (-ENOMEM);
245	}
246
247	oct_dev->tx_budget = LIO_DEFAULT_TX_PKTS_PROCESS_BUDGET;
248	oct_dev->rx_budget = LIO_DEFAULT_RX_PKTS_PROCESS_BUDGET;
249	oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
250
251	oct_dev->device = device;
252	bus = pci_get_bus(device);
253	dev = pci_get_slot(device);
254	function = pci_get_function(device);
255
256	lio_dev_info(oct_dev, "Initializing device %x:%x %02x:%02x.%01x\n",
257		     pci_get_vendor(device), pci_get_device(device), bus, dev,
258		     function);
259
260	if (lio_device_init(oct_dev)) {
261		lio_dev_err(oct_dev, "Failed to init device\n");
262		lio_detach(device);
263		return (-ENOMEM);
264	}
265
266	scratch1 = lio_read_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1);
267	if (!(scratch1 & 4ULL)) {
268		/*
269		 * Bit 2 of SLI_SCRATCH_1 is a flag that indicates that
270		 * the lio watchdog kernel thread is running for this
271		 * NIC.  Each NIC gets one watchdog kernel thread.
272		 */
273		scratch1 |= 4ULL;
274		lio_write_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1, scratch1);
275
276		error = kproc_create(lio_watchdog, oct_dev,
277				     &oct_dev->watchdog_task, 0, 0,
278				     "liowd/%02hhx:%02hhx.%hhx", bus,
279				     dev, function);
280		if (!error) {
281			kproc_resume(oct_dev->watchdog_task);
282		} else {
283			oct_dev->watchdog_task = NULL;
284			lio_dev_err(oct_dev,
285				    "failed to create kernel_thread\n");
286			lio_detach(device);
287			return (-1);
288		}
289	}
290	oct_dev->rx_pause = 1;
291	oct_dev->tx_pause = 1;
292
293	timeout = 0;
294	while (timeout < LIO_NIC_STARTER_TIMEOUT) {
295		lio_mdelay(LIO_STARTER_POLL_INTERVAL_MS);
296		timeout += LIO_STARTER_POLL_INTERVAL_MS;
297
298		/*
299		 * During the boot process interrupts are not available.
300		 * So polling for first control message from FW.
301		 */
302		if (cold)
303			lio_droq_bh(oct_dev->droq[0], 0);
304
305		if (atomic_load_acq_int(&oct_dev->status) == LIO_DEV_CORE_OK) {
306			ret = lio_nic_starter(oct_dev);
307			break;
308		}
309	}
310
311	if (ret) {
312		lio_dev_err(oct_dev, "Firmware failed to start\n");
313		lio_detach(device);
314		return (-EIO);
315	}
316
317	lio_dev_dbg(oct_dev, "Device is ready\n");
318
319	return (0);
320}
321
322static int
323lio_detach(device_t dev)
324{
325	struct octeon_device	*oct_dev = device_get_softc(dev);
326
327	lio_dev_dbg(oct_dev, "Stopping device\n");
328	if (oct_dev->watchdog_task) {
329		uint64_t	scratch1;
330
331		kproc_suspend(oct_dev->watchdog_task, 0);
332
333		scratch1 = lio_read_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1);
334		scratch1 &= ~4ULL;
335		lio_write_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1, scratch1);
336	}
337
338	if (oct_dev->app_mode && (oct_dev->app_mode == LIO_DRV_NIC_APP))
339		lio_stop_nic_module(oct_dev);
340
341	/*
342	 * Reset the octeon device and cleanup all memory allocated for
343	 * the octeon device by  driver.
344	 */
345	lio_destroy_resources(oct_dev);
346
347	lio_dev_info(oct_dev, "Device removed\n");
348
349	/*
350	 * This octeon device has been removed. Update the global
351	 * data structure to reflect this. Free the device structure.
352	 */
353	lio_free_device_mem(oct_dev);
354	return (0);
355}
356
357static int
358lio_shutdown(device_t dev)
359{
360	struct octeon_device	*oct_dev = device_get_softc(dev);
361	struct lio	*lio = if_getsoftc(oct_dev->props.ifp);
362
363	lio_send_rx_ctrl_cmd(lio, 0);
364
365	return (0);
366}
367
368static int
369lio_suspend(device_t dev)
370{
371
372	return (ENXIO);
373}
374
375static int
376lio_resume(device_t dev)
377{
378
379	return (ENXIO);
380}
381
382static int
383lio_event(struct module *mod, int event, void *junk)
384{
385
386	switch (event) {
387	case MOD_LOAD:
388		lio_init_device_list(LIO_CFG_TYPE_DEFAULT);
389		break;
390	default:
391		break;
392	}
393
394	return (0);
395}
396
397/*********************************************************************
398 *  FreeBSD Device Interface Entry Points
399 * *******************************************************************/
400static device_method_t lio_methods[] = {
401	/* Device interface */
402	DEVMETHOD(device_probe, lio_probe),
403	DEVMETHOD(device_attach, lio_attach),
404	DEVMETHOD(device_detach, lio_detach),
405	DEVMETHOD(device_shutdown, lio_shutdown),
406	DEVMETHOD(device_suspend, lio_suspend),
407	DEVMETHOD(device_resume, lio_resume),
408	DEVMETHOD_END
409};
410
411static driver_t lio_driver = {
412	LIO_DRV_NAME, lio_methods, sizeof(struct octeon_device),
413};
414
415devclass_t lio_devclass;
416DRIVER_MODULE(lio, pci, lio_driver, lio_devclass, lio_event, 0);
417
418MODULE_DEPEND(lio, pci, 1, 1, 1);
419MODULE_DEPEND(lio, ether, 1, 1, 1);
420MODULE_DEPEND(lio, firmware, 1, 1, 1);
421
422static bool
423fw_type_is_none(void)
424{
425	return strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
426		       sizeof(LIO_FW_NAME_TYPE_NONE)) == 0;
427}
428
429/*
430 * \brief Device initialization for each Octeon device that is probed
431 * @param octeon_dev  octeon device
432 */
433static int
434lio_device_init(struct octeon_device *octeon_dev)
435{
436	unsigned long	ddr_timeout = LIO_DDR_TIMEOUT;
437	char	*dbg_enb = NULL;
438	int	fw_loaded = 0;
439	int	i, j, ret;
440	uint8_t	bus, dev, function;
441	char	bootcmd[] = "\n";
442
443	bus = pci_get_bus(octeon_dev->device);
444	dev = pci_get_slot(octeon_dev->device);
445	function = pci_get_function(octeon_dev->device);
446
447	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_BEGIN_STATE);
448
449	/* Enable access to the octeon device */
450	if (pci_enable_busmaster(octeon_dev->device)) {
451		lio_dev_err(octeon_dev, "pci_enable_device failed\n");
452		return (1);
453	}
454
455	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_PCI_ENABLE_DONE);
456
457	/* Identify the Octeon type and map the BAR address space. */
458	if (lio_chip_specific_setup(octeon_dev)) {
459		lio_dev_err(octeon_dev, "Chip specific setup failed\n");
460		return (1);
461	}
462
463	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_PCI_MAP_DONE);
464
465	/*
466	 * Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
467	 * since that is what is required for the reference to be removed
468	 * during de-initialization (see 'octeon_destroy_resources').
469	 */
470	lio_register_device(octeon_dev, bus, dev, function, true);
471
472
473	octeon_dev->app_mode = LIO_DRV_INVALID_APP;
474
475	if (!lio_cn23xx_pf_fw_loaded(octeon_dev) && !fw_type_is_none()) {
476		fw_loaded = 0;
477		/* Do a soft reset of the Octeon device. */
478		if (octeon_dev->fn_list.soft_reset(octeon_dev))
479			return (1);
480
481		/* things might have changed */
482		if (!lio_cn23xx_pf_fw_loaded(octeon_dev))
483			fw_loaded = 0;
484		else
485			fw_loaded = 1;
486	} else {
487		fw_loaded = 1;
488	}
489
490	/*
491	 * Initialize the dispatch mechanism used to push packets arriving on
492	 * Octeon Output queues.
493	 */
494	if (lio_init_dispatch_list(octeon_dev))
495		return (1);
496
497	lio_register_dispatch_fn(octeon_dev, LIO_OPCODE_NIC,
498				 LIO_OPCODE_NIC_CORE_DRV_ACTIVE,
499				 lio_core_drv_init, octeon_dev);
500	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_DISPATCH_INIT_DONE);
501
502	ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
503	if (ret) {
504		lio_dev_err(octeon_dev,
505			    "Failed to configure device registers\n");
506		return (ret);
507	}
508
509	/* Initialize soft command buffer pool */
510	if (lio_setup_sc_buffer_pool(octeon_dev)) {
511		lio_dev_err(octeon_dev, "sc buffer pool allocation failed\n");
512		return (1);
513	}
514
515	atomic_store_rel_int(&octeon_dev->status,
516			     LIO_DEV_SC_BUFF_POOL_INIT_DONE);
517
518	if (lio_allocate_ioq_vector(octeon_dev)) {
519		lio_dev_err(octeon_dev,
520			    "IOQ vector allocation failed\n");
521		return (1);
522	}
523
524	atomic_store_rel_int(&octeon_dev->status,
525			     LIO_DEV_MSIX_ALLOC_VECTOR_DONE);
526
527	for (i = 0; i < LIO_MAX_POSSIBLE_INSTR_QUEUES; i++) {
528		octeon_dev->instr_queue[i] =
529			malloc(sizeof(struct lio_instr_queue),
530			       M_DEVBUF, M_NOWAIT | M_ZERO);
531		if (octeon_dev->instr_queue[i] == NULL)
532			return (1);
533	}
534
535	/* Setup the data structures that manage this Octeon's Input queues. */
536	if (lio_setup_instr_queue0(octeon_dev)) {
537		lio_dev_err(octeon_dev,
538			    "Instruction queue initialization failed\n");
539		return (1);
540	}
541
542	atomic_store_rel_int(&octeon_dev->status,
543			     LIO_DEV_INSTR_QUEUE_INIT_DONE);
544
545	/*
546	 * Initialize lists to manage the requests of different types that
547	 * arrive from user & kernel applications for this octeon device.
548	 */
549
550	if (lio_setup_response_list(octeon_dev)) {
551		lio_dev_err(octeon_dev, "Response list allocation failed\n");
552		return (1);
553	}
554
555	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_RESP_LIST_INIT_DONE);
556
557	for (i = 0; i < LIO_MAX_POSSIBLE_OUTPUT_QUEUES; i++) {
558		octeon_dev->droq[i] = malloc(sizeof(*octeon_dev->droq[i]),
559					     M_DEVBUF, M_NOWAIT | M_ZERO);
560		if (octeon_dev->droq[i] == NULL)
561			return (1);
562	}
563
564	if (lio_setup_output_queue0(octeon_dev)) {
565		lio_dev_err(octeon_dev, "Output queue initialization failed\n");
566		return (1);
567	}
568
569	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_DROQ_INIT_DONE);
570
571	/*
572	 * Setup the interrupt handler and record the INT SUM register address
573	 */
574	if (lio_setup_interrupt(octeon_dev,
575				octeon_dev->sriov_info.num_pf_rings))
576		return (1);
577
578	/* Enable Octeon device interrupts */
579	octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
580
581	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_INTR_SET_DONE);
582
583	/*
584	 * Send Credit for Octeon Output queues. Credits are always sent BEFORE
585	 * the output queue is enabled.
586	 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
587	 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
588	 * Otherwise, it is possible that the DRV_ACTIVE message will be sent
589	 * before any credits have been issued, causing the ring to be reset
590	 * (and the f/w appear to never have started).
591	 */
592	for (j = 0; j < octeon_dev->num_oqs; j++)
593		lio_write_csr32(octeon_dev,
594				octeon_dev->droq[j]->pkts_credit_reg,
595				octeon_dev->droq[j]->max_count);
596
597	/* Enable the input and output queues for this Octeon device */
598	ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
599	if (ret) {
600		lio_dev_err(octeon_dev, "Failed to enable input/output queues");
601		return (ret);
602	}
603
604	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_IO_QUEUES_DONE);
605
606	if (!fw_loaded) {
607		lio_dev_dbg(octeon_dev, "Waiting for DDR initialization...\n");
608		if (!ddr_timeout) {
609			lio_dev_info(octeon_dev,
610				     "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
611		}
612
613		lio_sleep_timeout(LIO_RESET_MSECS);
614
615		/*
616		 * Wait for the octeon to initialize DDR after the
617		 * soft-reset.
618		 */
619		while (!ddr_timeout) {
620			if (pause("-", lio_ms_to_ticks(100))) {
621				/* user probably pressed Control-C */
622				return (1);
623			}
624		}
625
626		ret = lio_wait_for_ddr_init(octeon_dev, &ddr_timeout);
627		if (ret) {
628			lio_dev_err(octeon_dev,
629				    "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
630				    ret);
631			return (1);
632		}
633
634		if (lio_wait_for_bootloader(octeon_dev, 1100)) {
635			lio_dev_err(octeon_dev, "Board not responding\n");
636			return (1);
637		}
638
639		/* Divert uboot to take commands from host instead. */
640		ret = lio_console_send_cmd(octeon_dev, bootcmd, 50);
641
642		lio_dev_dbg(octeon_dev, "Initializing consoles\n");
643		ret = lio_init_consoles(octeon_dev);
644		if (ret) {
645			lio_dev_err(octeon_dev, "Could not access board consoles\n");
646			return (1);
647		}
648
649		/*
650		 * If console debug enabled, specify empty string to
651		 * use default enablement ELSE specify NULL string for
652		 * 'disabled'.
653		 */
654		dbg_enb = lio_console_debug_enabled(0) ? "" : NULL;
655		ret = lio_add_console(octeon_dev, 0, dbg_enb);
656
657		if (ret) {
658			lio_dev_err(octeon_dev, "Could not access board console\n");
659			return (1);
660		} else if (lio_console_debug_enabled(0)) {
661			/*
662			 * If console was added AND we're logging console output
663			 * then set our console print function.
664			 */
665			octeon_dev->console[0].print = lio_dbg_console_print;
666		}
667
668		atomic_store_rel_int(&octeon_dev->status,
669				     LIO_DEV_CONSOLE_INIT_DONE);
670
671		lio_dev_dbg(octeon_dev, "Loading firmware\n");
672
673		ret = lio_load_firmware(octeon_dev);
674		if (ret) {
675			lio_dev_err(octeon_dev, "Could not load firmware to board\n");
676			return (1);
677		}
678	}
679
680	atomic_store_rel_int(&octeon_dev->status, LIO_DEV_HOST_OK);
681
682	return (0);
683}
684
685/*
686 * \brief PCI FLR for each Octeon device.
687 * @param oct octeon device
688 */
689static void
690lio_pci_flr(struct octeon_device *oct)
691{
692	uint32_t	exppos, status;
693
694	pci_find_cap(oct->device, PCIY_EXPRESS, &exppos);
695
696	pci_save_state(oct->device);
697
698	/* Quiesce the device completely */
699	pci_write_config(oct->device, PCIR_COMMAND, PCIM_CMD_INTxDIS, 2);
700
701	/* Wait for Transaction Pending bit clean */
702	lio_mdelay(100);
703
704	status = pci_read_config(oct->device, exppos + PCIER_DEVICE_STA, 2);
705	if (status & PCIEM_STA_TRANSACTION_PND) {
706		lio_dev_info(oct, "Function reset incomplete after 100ms, sleeping for 5 seconds\n");
707		lio_mdelay(5);
708
709		status = pci_read_config(oct->device, exppos + PCIER_DEVICE_STA, 2);
710		if (status & PCIEM_STA_TRANSACTION_PND)
711			lio_dev_info(oct, "Function reset still incomplete after 5s, reset anyway\n");
712	}
713
714	pci_write_config(oct->device, exppos + PCIER_DEVICE_CTL, PCIEM_CTL_INITIATE_FLR, 2);
715	lio_mdelay(100);
716
717	pci_restore_state(oct->device);
718}
719
720/*
721 * \brief Debug console print function
722 * @param octeon_dev  octeon device
723 * @param console_num console number
724 * @param prefix      first portion of line to display
725 * @param suffix      second portion of line to display
726 *
727 * The OCTEON debug console outputs entire lines (excluding '\n').
728 * Normally, the line will be passed in the 'prefix' parameter.
729 * However, due to buffering, it is possible for a line to be split into two
730 * parts, in which case they will be passed as the 'prefix' parameter and
731 * 'suffix' parameter.
732 */
733static int
734lio_dbg_console_print(struct octeon_device *oct, uint32_t console_num,
735		      char *prefix, char *suffix)
736{
737
738	if (prefix != NULL && suffix != NULL)
739		lio_dev_info(oct, "%u: %s%s\n", console_num, prefix, suffix);
740	else if (prefix != NULL)
741		lio_dev_info(oct, "%u: %s\n", console_num, prefix);
742	else if (suffix != NULL)
743		lio_dev_info(oct, "%u: %s\n", console_num, suffix);
744
745	return (0);
746}
747
748static void
749lio_watchdog(void *param)
750{
751	int		core_num;
752	uint16_t	mask_of_crashed_or_stuck_cores = 0;
753	struct octeon_device	*oct = param;
754	bool		err_msg_was_printed[12];
755
756	bzero(err_msg_was_printed, sizeof(err_msg_was_printed));
757
758	while (1) {
759		kproc_suspend_check(oct->watchdog_task);
760		mask_of_crashed_or_stuck_cores =
761			(uint16_t)lio_read_csr64(oct, LIO_CN23XX_SLI_SCRATCH2);
762
763		if (mask_of_crashed_or_stuck_cores) {
764			struct octeon_device *other_oct;
765
766			oct->cores_crashed = true;
767			other_oct = lio_get_other_octeon_device(oct);
768			if (other_oct != NULL)
769				other_oct->cores_crashed = true;
770
771			for (core_num = 0; core_num < LIO_MAX_CORES;
772			     core_num++) {
773				bool core_crashed_or_got_stuck;
774
775				core_crashed_or_got_stuck =
776				    (mask_of_crashed_or_stuck_cores >>
777				     core_num) & 1;
778				if (core_crashed_or_got_stuck &&
779				    !err_msg_was_printed[core_num]) {
780					lio_dev_err(oct,
781						    "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
782						    core_num);
783					err_msg_was_printed[core_num] = true;
784				}
785			}
786
787		}
788
789		/* sleep for two seconds */
790		pause("-", lio_ms_to_ticks(2000));
791	}
792}
793
794static int
795lio_chip_specific_setup(struct octeon_device *oct)
796{
797	char		*s;
798	uint32_t	dev_id, rev_id;
799	int		ret = 1;
800
801	dev_id = lio_read_pci_cfg(oct, 0);
802	rev_id = pci_get_revid(oct->device);
803	oct->subdevice_id = pci_get_subdevice(oct->device);
804
805	switch (dev_id) {
806	case LIO_CN23XX_PF_PCIID:
807		oct->chip_id = LIO_CN23XX_PF_VID;
808		if (pci_get_function(oct->device) == 0) {
809			if (num_queues_per_pf0 < 0) {
810				lio_dev_info(oct, "Invalid num_queues_per_pf0: %d, Setting it to default\n",
811					     num_queues_per_pf0);
812				num_queues_per_pf0 = 0;
813			}
814
815			oct->sriov_info.num_pf_rings = num_queues_per_pf0;
816		} else {
817			if (num_queues_per_pf1 < 0) {
818				lio_dev_info(oct, "Invalid num_queues_per_pf1: %d, Setting it to default\n",
819					     num_queues_per_pf1);
820				num_queues_per_pf1 = 0;
821			}
822
823			oct->sriov_info.num_pf_rings = num_queues_per_pf1;
824		}
825
826		ret = lio_cn23xx_pf_setup_device(oct);
827		s = "CN23XX";
828		break;
829
830	default:
831		s = "?";
832		lio_dev_err(oct, "Unknown device found (dev_id: %x)\n", dev_id);
833	}
834
835	if (!ret)
836		lio_dev_info(oct, "%s PASS%d.%d %s Version: %s\n", s,
837			     OCTEON_MAJOR_REV(oct), OCTEON_MINOR_REV(oct),
838			     lio_get_conf(oct)->card_name, LIO_VERSION);
839
840	return (ret);
841}
842
843static struct octeon_device *
844lio_get_other_octeon_device(struct octeon_device *oct)
845{
846	struct octeon_device	*other_oct;
847
848	other_oct = lio_get_device(oct->octeon_id + 1);
849
850	if ((other_oct != NULL) && other_oct->device) {
851		int	oct_busnum, other_oct_busnum;
852
853		oct_busnum = pci_get_bus(oct->device);
854		other_oct_busnum = pci_get_bus(other_oct->device);
855
856		if (oct_busnum == other_oct_busnum) {
857			int	oct_slot, other_oct_slot;
858
859			oct_slot = pci_get_slot(oct->device);
860			other_oct_slot = pci_get_slot(other_oct->device);
861
862			if (oct_slot == other_oct_slot)
863				return (other_oct);
864		}
865	}
866	return (NULL);
867}
868
869/*
870 * \brief Load firmware to device
871 * @param oct octeon device
872 *
873 * Maps device to firmware filename, requests firmware, and downloads it
874 */
875static int
876lio_load_firmware(struct octeon_device *oct)
877{
878	const struct firmware	*fw;
879	char	*tmp_fw_type = NULL;
880	int	ret = 0;
881	char	fw_name[LIO_MAX_FW_FILENAME_LEN];
882
883	if (fw_type[0] == '\0')
884		tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
885	else
886		tmp_fw_type = fw_type;
887
888	sprintf(fw_name, "%s%s_%s%s", LIO_FW_BASE_NAME,
889		lio_get_conf(oct)->card_name, tmp_fw_type, LIO_FW_NAME_SUFFIX);
890
891	fw = firmware_get(fw_name);
892	if (fw == NULL) {
893		lio_dev_err(oct, "Request firmware failed. Could not find file %s.\n",
894			    fw_name);
895		return (EINVAL);
896	}
897
898	ret = lio_download_firmware(oct, fw->data, fw->datasize);
899
900	firmware_put(fw, FIRMWARE_UNLOAD);
901
902	return (ret);
903}
904
905static int
906lio_nic_starter(struct octeon_device *oct)
907{
908	int	ret = 0;
909
910	atomic_store_rel_int(&oct->status, LIO_DEV_RUNNING);
911
912	if (oct->app_mode && oct->app_mode == LIO_DRV_NIC_APP) {
913		if (lio_init_nic_module(oct)) {
914			lio_dev_err(oct, "NIC initialization failed\n");
915			ret = -1;
916#ifdef CAVIUM_ONiLY_23XX_VF
917		} else {
918			if (octeon_enable_sriov(oct) < 0)
919				ret = -1;
920#endif
921		}
922	} else {
923		lio_dev_err(oct,
924			    "Unexpected application running on NIC (%d). Check firmware.\n",
925			    oct->app_mode);
926		ret = -1;
927	}
928
929	return (ret);
930}
931
932static int
933lio_init_nic_module(struct octeon_device *oct)
934{
935	int	num_nic_ports = LIO_GET_NUM_NIC_PORTS_CFG(lio_get_conf(oct));
936	int	retval = 0;
937
938	lio_dev_dbg(oct, "Initializing network interfaces\n");
939
940	/*
941	 * only default iq and oq were initialized
942	 * initialize the rest as well
943	 */
944
945	/* run port_config command for each port */
946	oct->ifcount = num_nic_ports;
947
948	bzero(&oct->props, sizeof(struct lio_if_props));
949
950	oct->props.gmxport = -1;
951
952	retval = lio_setup_nic_devices(oct);
953	if (retval) {
954		lio_dev_err(oct, "Setup NIC devices failed\n");
955		goto lio_init_failure;
956	}
957
958	lio_dev_dbg(oct, "Network interfaces ready\n");
959
960	return (retval);
961
962lio_init_failure:
963
964	oct->ifcount = 0;
965
966	return (retval);
967}
968
969static int
970lio_ifmedia_update(struct ifnet *ifp)
971{
972	struct lio	*lio = if_getsoftc(ifp);
973	struct ifmedia	*ifm;
974
975	ifm = &lio->ifmedia;
976
977	/* We only support Ethernet media type. */
978	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
979		return (EINVAL);
980
981	switch (IFM_SUBTYPE(ifm->ifm_media)) {
982	case IFM_AUTO:
983		break;
984	case IFM_10G_CX4:
985	case IFM_10G_SR:
986	case IFM_10G_T:
987	case IFM_10G_TWINAX:
988	default:
989		/* We don't support changing the media type. */
990		lio_dev_err(lio->oct_dev, "Invalid media type (%d)\n",
991			    IFM_SUBTYPE(ifm->ifm_media));
992		return (EINVAL);
993	}
994
995	return (0);
996}
997
998static int
999lio_get_media_subtype(struct octeon_device *oct)
1000{
1001
1002	switch(oct->subdevice_id) {
1003	case LIO_CN2350_10G_SUBDEVICE:
1004	case LIO_CN2350_10G_SUBDEVICE1:
1005	case LIO_CN2360_10G_SUBDEVICE:
1006		return (IFM_10G_SR);
1007
1008	case LIO_CN2350_25G_SUBDEVICE:
1009	case LIO_CN2360_25G_SUBDEVICE:
1010		return (IFM_25G_SR);
1011	}
1012
1013	return (IFM_10G_SR);
1014}
1015
1016static uint64_t
1017lio_get_baudrate(struct octeon_device *oct)
1018{
1019
1020	switch(oct->subdevice_id) {
1021	case LIO_CN2350_10G_SUBDEVICE:
1022	case LIO_CN2350_10G_SUBDEVICE1:
1023	case LIO_CN2360_10G_SUBDEVICE:
1024		return (IF_Gbps(10));
1025
1026	case LIO_CN2350_25G_SUBDEVICE:
1027	case LIO_CN2360_25G_SUBDEVICE:
1028		return (IF_Gbps(25));
1029	}
1030
1031	return (IF_Gbps(10));
1032}
1033
1034static void
1035lio_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1036{
1037	struct lio	*lio = if_getsoftc(ifp);
1038
1039	/* Report link down if the driver isn't running. */
1040	if (!lio_ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
1041		ifmr->ifm_active |= IFM_NONE;
1042		return;
1043	}
1044
1045	/* Setup the default interface info. */
1046	ifmr->ifm_status = IFM_AVALID;
1047	ifmr->ifm_active = IFM_ETHER;
1048
1049	if (lio->linfo.link.s.link_up) {
1050		ifmr->ifm_status |= IFM_ACTIVE;
1051	} else {
1052		ifmr->ifm_active |= IFM_NONE;
1053		return;
1054	}
1055
1056	ifmr->ifm_active |= lio_get_media_subtype(lio->oct_dev);
1057
1058	if (lio->linfo.link.s.duplex)
1059		ifmr->ifm_active |= IFM_FDX;
1060	else
1061		ifmr->ifm_active |= IFM_HDX;
1062}
1063
1064static uint64_t
1065lio_get_counter(if_t ifp, ift_counter cnt)
1066{
1067	struct lio	*lio = if_getsoftc(ifp);
1068	struct octeon_device	*oct = lio->oct_dev;
1069	uint64_t	counter = 0;
1070	int		i, q_no;
1071
1072	switch (cnt) {
1073	case IFCOUNTER_IPACKETS:
1074		for (i = 0; i < oct->num_oqs; i++) {
1075			q_no = lio->linfo.rxpciq[i].s.q_no;
1076			counter += oct->droq[q_no]->stats.rx_pkts_received;
1077		}
1078		break;
1079	case IFCOUNTER_OPACKETS:
1080		for (i = 0; i < oct->num_iqs; i++) {
1081			q_no = lio->linfo.txpciq[i].s.q_no;
1082			counter += oct->instr_queue[q_no]->stats.tx_done;
1083		}
1084		break;
1085	case IFCOUNTER_IBYTES:
1086		for (i = 0; i < oct->num_oqs; i++) {
1087			q_no = lio->linfo.rxpciq[i].s.q_no;
1088			counter += oct->droq[q_no]->stats.rx_bytes_received;
1089		}
1090		break;
1091	case IFCOUNTER_OBYTES:
1092		for (i = 0; i < oct->num_iqs; i++) {
1093			q_no = lio->linfo.txpciq[i].s.q_no;
1094			counter += oct->instr_queue[q_no]->stats.tx_tot_bytes;
1095		}
1096		break;
1097	case IFCOUNTER_IQDROPS:
1098		for (i = 0; i < oct->num_oqs; i++) {
1099			q_no = lio->linfo.rxpciq[i].s.q_no;
1100			counter += oct->droq[q_no]->stats.rx_dropped;
1101		}
1102		break;
1103	case IFCOUNTER_OQDROPS:
1104		for (i = 0; i < oct->num_iqs; i++) {
1105			q_no = lio->linfo.txpciq[i].s.q_no;
1106			counter += oct->instr_queue[q_no]->stats.tx_dropped;
1107		}
1108		break;
1109	case IFCOUNTER_IMCASTS:
1110		counter = oct->link_stats.fromwire.total_mcst;
1111		break;
1112	case IFCOUNTER_OMCASTS:
1113		counter = oct->link_stats.fromhost.mcast_pkts_sent;
1114		break;
1115	case IFCOUNTER_COLLISIONS:
1116		counter = oct->link_stats.fromhost.total_collisions;
1117		break;
1118	case IFCOUNTER_IERRORS:
1119		counter = oct->link_stats.fromwire.fcs_err +
1120		    oct->link_stats.fromwire.l2_err +
1121		    oct->link_stats.fromwire.frame_err;
1122		break;
1123	default:
1124		return (if_get_counter_default(ifp, cnt));
1125	}
1126
1127	return (counter);
1128}
1129
1130static int
1131lio_init_ifnet(struct lio *lio)
1132{
1133	struct octeon_device	*oct = lio->oct_dev;
1134	if_t ifp = lio->ifp;
1135
1136	/* ifconfig entrypoint for media type/status reporting */
1137	ifmedia_init(&lio->ifmedia, IFM_IMASK, lio_ifmedia_update,
1138		     lio_ifmedia_status);
1139
1140	/* set the default interface values */
1141	ifmedia_add(&lio->ifmedia,
1142		    (IFM_ETHER | IFM_FDX | lio_get_media_subtype(oct)),
1143		    0, NULL);
1144	ifmedia_add(&lio->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
1145	ifmedia_set(&lio->ifmedia, (IFM_ETHER | IFM_AUTO));
1146
1147	lio->ifmedia.ifm_media = lio->ifmedia.ifm_cur->ifm_media;
1148	lio_dev_dbg(oct, "IFMEDIA flags : %x\n", lio->ifmedia.ifm_media);
1149
1150	if_initname(ifp, device_get_name(oct->device),
1151		    device_get_unit(oct->device));
1152	if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST));
1153	if_setioctlfn(ifp, lio_ioctl);
1154	if_setgetcounterfn(ifp, lio_get_counter);
1155	if_settransmitfn(ifp, lio_mq_start);
1156	if_setqflushfn(ifp, lio_qflush);
1157	if_setinitfn(ifp, lio_open);
1158	if_setmtu(ifp, lio->linfo.link.s.mtu);
1159	lio->mtu = lio->linfo.link.s.mtu;
1160	if_sethwassist(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
1161			     CSUM_TCP_IPV6 | CSUM_UDP_IPV6));
1162
1163	if_setcapabilitiesbit(ifp, (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 |
1164				    IFCAP_TSO | IFCAP_LRO |
1165				    IFCAP_JUMBO_MTU | IFCAP_HWSTATS |
1166				    IFCAP_LINKSTATE | IFCAP_VLAN_HWFILTER |
1167				    IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTAGGING |
1168				    IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU), 0);
1169
1170	if_setcapenable(ifp, if_getcapabilities(ifp));
1171	if_setbaudrate(ifp, lio_get_baudrate(oct));
1172
1173	return (0);
1174}
1175
1176static void
1177lio_tcp_lro_free(struct octeon_device *octeon_dev, struct ifnet *ifp)
1178{
1179	struct lio	*lio = if_getsoftc(ifp);
1180	struct lio_droq	*droq;
1181	int		q_no;
1182	int		i;
1183
1184	for (i = 0; i < octeon_dev->num_oqs; i++) {
1185		q_no = lio->linfo.rxpciq[i].s.q_no;
1186		droq = octeon_dev->droq[q_no];
1187		if (droq->lro.ifp) {
1188			tcp_lro_free(&droq->lro);
1189			droq->lro.ifp = NULL;
1190		}
1191	}
1192}
1193
1194static int
1195lio_tcp_lro_init(struct octeon_device *octeon_dev, struct ifnet *ifp)
1196{
1197	struct lio	*lio = if_getsoftc(ifp);
1198	struct lio_droq	*droq;
1199	struct lro_ctrl	*lro;
1200	int		i, q_no, ret = 0;
1201
1202	for (i = 0; i < octeon_dev->num_oqs; i++) {
1203		q_no = lio->linfo.rxpciq[i].s.q_no;
1204		droq = octeon_dev->droq[q_no];
1205		lro = &droq->lro;
1206		ret = tcp_lro_init(lro);
1207		if (ret) {
1208			lio_dev_err(octeon_dev, "LRO Initialization failed ret %d\n",
1209				    ret);
1210			goto lro_init_failed;
1211		}
1212
1213		lro->ifp = ifp;
1214	}
1215
1216	return (ret);
1217
1218lro_init_failed:
1219	lio_tcp_lro_free(octeon_dev, ifp);
1220
1221	return (ret);
1222}
1223
1224static int
1225lio_setup_nic_devices(struct octeon_device *octeon_dev)
1226{
1227	union		octeon_if_cfg if_cfg;
1228	struct lio	*lio = NULL;
1229	struct ifnet	*ifp = NULL;
1230	struct lio_version		*vdata;
1231	struct lio_soft_command		*sc;
1232	struct lio_if_cfg_context	*ctx;
1233	struct lio_if_cfg_resp		*resp;
1234	struct lio_if_props		*props;
1235	int		num_iqueues, num_oqueues, retval;
1236	unsigned int	base_queue;
1237	unsigned int	gmx_port_id;
1238	uint32_t	ctx_size, data_size;
1239	uint32_t	ifidx_or_pfnum, resp_size;
1240	uint8_t		mac[ETHER_HDR_LEN], i, j;
1241
1242	/* This is to handle link status changes */
1243	lio_register_dispatch_fn(octeon_dev, LIO_OPCODE_NIC,
1244				 LIO_OPCODE_NIC_INFO,
1245				 lio_link_info, octeon_dev);
1246
1247	for (i = 0; i < octeon_dev->ifcount; i++) {
1248		resp_size = sizeof(struct lio_if_cfg_resp);
1249		ctx_size = sizeof(struct lio_if_cfg_context);
1250		data_size = sizeof(struct lio_version);
1251		sc = lio_alloc_soft_command(octeon_dev, data_size, resp_size,
1252					    ctx_size);
1253		if (sc == NULL)
1254			return (ENOMEM);
1255
1256		resp = (struct lio_if_cfg_resp *)sc->virtrptr;
1257		ctx = (struct lio_if_cfg_context *)sc->ctxptr;
1258		vdata = (struct lio_version *)sc->virtdptr;
1259
1260		*((uint64_t *)vdata) = 0;
1261		vdata->major = htobe16(LIO_BASE_MAJOR_VERSION);
1262		vdata->minor = htobe16(LIO_BASE_MINOR_VERSION);
1263		vdata->micro = htobe16(LIO_BASE_MICRO_VERSION);
1264
1265		num_iqueues = octeon_dev->sriov_info.num_pf_rings;
1266		num_oqueues = octeon_dev->sriov_info.num_pf_rings;
1267		base_queue = octeon_dev->sriov_info.pf_srn;
1268
1269		gmx_port_id = octeon_dev->pf_num;
1270		ifidx_or_pfnum = octeon_dev->pf_num;
1271
1272		lio_dev_dbg(octeon_dev, "requesting config for interface %d, iqs %d, oqs %d\n",
1273			    ifidx_or_pfnum, num_iqueues, num_oqueues);
1274		ctx->cond = 0;
1275		ctx->octeon_id = lio_get_device_id(octeon_dev);
1276
1277		if_cfg.if_cfg64 = 0;
1278		if_cfg.s.num_iqueues = num_iqueues;
1279		if_cfg.s.num_oqueues = num_oqueues;
1280		if_cfg.s.base_queue = base_queue;
1281		if_cfg.s.gmx_port_id = gmx_port_id;
1282
1283		sc->iq_no = 0;
1284
1285		lio_prepare_soft_command(octeon_dev, sc, LIO_OPCODE_NIC,
1286					 LIO_OPCODE_NIC_IF_CFG, 0,
1287					 if_cfg.if_cfg64, 0);
1288
1289		sc->callback = lio_if_cfg_callback;
1290		sc->callback_arg = sc;
1291		sc->wait_time = 3000;
1292
1293		retval = lio_send_soft_command(octeon_dev, sc);
1294		if (retval == LIO_IQ_SEND_FAILED) {
1295			lio_dev_err(octeon_dev, "iq/oq config failed status: %x\n",
1296				    retval);
1297			/* Soft instr is freed by driver in case of failure. */
1298			goto setup_nic_dev_fail;
1299		}
1300
1301		/*
1302		 * Sleep on a wait queue till the cond flag indicates that the
1303		 * response arrived or timed-out.
1304		 */
1305		lio_sleep_cond(octeon_dev, &ctx->cond);
1306
1307		retval = resp->status;
1308		if (retval) {
1309			lio_dev_err(octeon_dev, "iq/oq config failed\n");
1310			goto setup_nic_dev_fail;
1311		}
1312
1313		lio_swap_8B_data((uint64_t *)(&resp->cfg_info),
1314				 (sizeof(struct octeon_if_cfg_info)) >> 3);
1315
1316		num_iqueues = bitcount64(resp->cfg_info.iqmask);
1317		num_oqueues = bitcount64(resp->cfg_info.oqmask);
1318
1319		if (!(num_iqueues) || !(num_oqueues)) {
1320			lio_dev_err(octeon_dev,
1321				    "Got bad iqueues (%016llX) or oqueues (%016llX) from firmware.\n",
1322				    LIO_CAST64(resp->cfg_info.iqmask),
1323				    LIO_CAST64(resp->cfg_info.oqmask));
1324			goto setup_nic_dev_fail;
1325		}
1326
1327		lio_dev_dbg(octeon_dev,
1328			    "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
1329			    i, LIO_CAST64(resp->cfg_info.iqmask),
1330			    LIO_CAST64(resp->cfg_info.oqmask),
1331			    num_iqueues, num_oqueues);
1332
1333		ifp = if_alloc(IFT_ETHER);
1334
1335		if (ifp == NULL) {
1336			lio_dev_err(octeon_dev, "Device allocation failed\n");
1337			goto setup_nic_dev_fail;
1338		}
1339
1340		lio = malloc(sizeof(struct lio), M_DEVBUF, M_NOWAIT | M_ZERO);
1341
1342		if (lio == NULL) {
1343			lio_dev_err(octeon_dev, "Lio allocation failed\n");
1344			goto setup_nic_dev_fail;
1345		}
1346
1347		if_setsoftc(ifp, lio);
1348
1349		ifp->if_hw_tsomax = LIO_MAX_FRAME_SIZE;
1350		ifp->if_hw_tsomaxsegcount = LIO_MAX_SG;
1351		ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
1352
1353		lio->ifidx = ifidx_or_pfnum;
1354
1355		props = &octeon_dev->props;
1356		props->gmxport = resp->cfg_info.linfo.gmxport;
1357		props->ifp = ifp;
1358
1359		lio->linfo.num_rxpciq = num_oqueues;
1360		lio->linfo.num_txpciq = num_iqueues;
1361		for (j = 0; j < num_oqueues; j++) {
1362			lio->linfo.rxpciq[j].rxpciq64 =
1363			    resp->cfg_info.linfo.rxpciq[j].rxpciq64;
1364		}
1365
1366		for (j = 0; j < num_iqueues; j++) {
1367			lio->linfo.txpciq[j].txpciq64 =
1368			    resp->cfg_info.linfo.txpciq[j].txpciq64;
1369		}
1370
1371		lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
1372		lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
1373		lio->linfo.link.link_status64 =
1374		    resp->cfg_info.linfo.link.link_status64;
1375
1376		/*
1377		 * Point to the properties for octeon device to which this
1378		 * interface belongs.
1379		 */
1380		lio->oct_dev = octeon_dev;
1381		lio->ifp = ifp;
1382
1383		lio_dev_dbg(octeon_dev, "if%d gmx: %d hw_addr: 0x%llx\n", i,
1384			    lio->linfo.gmxport, LIO_CAST64(lio->linfo.hw_addr));
1385		lio_init_ifnet(lio);
1386		/* 64-bit swap required on LE machines */
1387		lio_swap_8B_data(&lio->linfo.hw_addr, 1);
1388		for (j = 0; j < 6; j++)
1389			mac[j] = *((uint8_t *)(
1390				   ((uint8_t *)&lio->linfo.hw_addr) + 2 + j));
1391
1392		ether_ifattach(ifp, mac);
1393
1394		/*
1395		 * By default all interfaces on a single Octeon uses the same
1396		 * tx and rx queues
1397		 */
1398		lio->txq = lio->linfo.txpciq[0].s.q_no;
1399		lio->rxq = lio->linfo.rxpciq[0].s.q_no;
1400		if (lio_setup_io_queues(octeon_dev, i, lio->linfo.num_txpciq,
1401					lio->linfo.num_rxpciq)) {
1402			lio_dev_err(octeon_dev, "I/O queues creation failed\n");
1403			goto setup_nic_dev_fail;
1404		}
1405
1406		lio_ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
1407
1408		lio->tx_qsize = lio_get_tx_qsize(octeon_dev, lio->txq);
1409		lio->rx_qsize = lio_get_rx_qsize(octeon_dev, lio->rxq);
1410
1411		if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
1412			lio_dev_err(octeon_dev, "Gather list allocation failed\n");
1413			goto setup_nic_dev_fail;
1414		}
1415
1416		if ((lio_hwlro == 0) && lio_tcp_lro_init(octeon_dev, ifp))
1417			goto setup_nic_dev_fail;
1418
1419		if (lio_hwlro &&
1420		    (if_getcapenable(ifp) & IFCAP_LRO) &&
1421		    (if_getcapenable(ifp) & IFCAP_RXCSUM) &&
1422		    (if_getcapenable(ifp) & IFCAP_RXCSUM_IPV6))
1423			lio_set_feature(ifp, LIO_CMD_LRO_ENABLE,
1424					LIO_LROIPV4 | LIO_LROIPV6);
1425
1426		if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER))
1427			lio_set_feature(ifp, LIO_CMD_VLAN_FILTER_CTL, 1);
1428		else
1429			lio_set_feature(ifp, LIO_CMD_VLAN_FILTER_CTL, 0);
1430
1431		if (lio_setup_rx_oom_poll_fn(ifp))
1432			goto setup_nic_dev_fail;
1433
1434		lio_dev_dbg(octeon_dev, "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
1435			    i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
1436		lio->link_changes++;
1437
1438		lio_ifstate_set(lio, LIO_IFSTATE_REGISTERED);
1439
1440		/*
1441		 * Sending command to firmware to enable Rx checksum offload
1442		 * by default at the time of setup of Liquidio driver for
1443		 * this device
1444		 */
1445		lio_set_rxcsum_command(ifp, LIO_CMD_TNL_RX_CSUM_CTL,
1446				       LIO_CMD_RXCSUM_ENABLE);
1447		lio_set_feature(ifp, LIO_CMD_TNL_TX_CSUM_CTL,
1448				LIO_CMD_TXCSUM_ENABLE);
1449
1450#ifdef RSS
1451		if (lio_rss) {
1452			if (lio_send_rss_param(lio))
1453				goto setup_nic_dev_fail;
1454		} else
1455#endif	/* RSS */
1456
1457			lio_set_feature(ifp, LIO_CMD_SET_FNV,
1458					LIO_CMD_FNV_ENABLE);
1459
1460		lio_dev_dbg(octeon_dev, "NIC ifidx:%d Setup successful\n", i);
1461
1462		lio_free_soft_command(octeon_dev, sc);
1463		lio->vlan_attach =
1464		    EVENTHANDLER_REGISTER(vlan_config,
1465					  lio_vlan_rx_add_vid, lio,
1466					  EVENTHANDLER_PRI_FIRST);
1467		lio->vlan_detach =
1468		    EVENTHANDLER_REGISTER(vlan_unconfig,
1469					  lio_vlan_rx_kill_vid, lio,
1470					  EVENTHANDLER_PRI_FIRST);
1471
1472		/* Update stats periodically */
1473		callout_init(&lio->stats_timer, 0);
1474		lio->stats_interval = LIO_DEFAULT_STATS_INTERVAL;
1475
1476		lio_add_hw_stats(lio);
1477	}
1478
1479	return (0);
1480
1481setup_nic_dev_fail:
1482
1483	lio_free_soft_command(octeon_dev, sc);
1484
1485	while (i--) {
1486		lio_dev_err(octeon_dev, "NIC ifidx:%d Setup failed\n", i);
1487		lio_destroy_nic_device(octeon_dev, i);
1488	}
1489
1490	return (ENODEV);
1491}
1492
1493static int
1494lio_link_info(struct lio_recv_info *recv_info, void *ptr)
1495{
1496	struct octeon_device	*oct = (struct octeon_device *)ptr;
1497	struct lio_recv_pkt	*recv_pkt = recv_info->recv_pkt;
1498	union octeon_link_status *ls;
1499	int	gmxport = 0, i;
1500
1501	lio_dev_dbg(oct, "%s Called\n", __func__);
1502	if (recv_pkt->buffer_size[0] != (sizeof(*ls) + LIO_DROQ_INFO_SIZE)) {
1503		lio_dev_err(oct, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
1504			    recv_pkt->buffer_size[0],
1505			    recv_pkt->rh.r_nic_info.gmxport);
1506		goto nic_info_err;
1507	}
1508	gmxport = recv_pkt->rh.r_nic_info.gmxport;
1509	ls = (union octeon_link_status *)(recv_pkt->buffer_ptr[0]->m_data +
1510					  LIO_DROQ_INFO_SIZE);
1511	lio_swap_8B_data((uint64_t *)ls,
1512			 (sizeof(union octeon_link_status)) >> 3);
1513
1514	if (oct->props.gmxport == gmxport)
1515		lio_update_link_status(oct->props.ifp, ls);
1516
1517nic_info_err:
1518	for (i = 0; i < recv_pkt->buffer_count; i++)
1519		lio_recv_buffer_free(recv_pkt->buffer_ptr[i]);
1520
1521	lio_free_recv_info(recv_info);
1522	return (0);
1523}
1524
1525void
1526lio_free_mbuf(struct lio_instr_queue *iq, struct lio_mbuf_free_info *finfo)
1527{
1528
1529	bus_dmamap_sync(iq->txtag, finfo->map, BUS_DMASYNC_POSTWRITE);
1530	bus_dmamap_unload(iq->txtag, finfo->map);
1531	m_freem(finfo->mb);
1532}
1533
1534void
1535lio_free_sgmbuf(struct lio_instr_queue *iq, struct lio_mbuf_free_info *finfo)
1536{
1537	struct lio_gather	*g;
1538	struct octeon_device	*oct;
1539	struct lio		*lio;
1540	int	iq_no;
1541
1542	g = finfo->g;
1543	iq_no = iq->txpciq.s.q_no;
1544	oct = iq->oct_dev;
1545	lio = if_getsoftc(oct->props.ifp);
1546
1547	mtx_lock(&lio->glist_lock[iq_no]);
1548	STAILQ_INSERT_TAIL(&lio->ghead[iq_no], &g->node, entries);
1549	mtx_unlock(&lio->glist_lock[iq_no]);
1550
1551	bus_dmamap_sync(iq->txtag, finfo->map, BUS_DMASYNC_POSTWRITE);
1552	bus_dmamap_unload(iq->txtag, finfo->map);
1553	m_freem(finfo->mb);
1554}
1555
1556static void
1557lio_if_cfg_callback(struct octeon_device *oct, uint32_t status, void *buf)
1558{
1559	struct lio_soft_command	*sc = (struct lio_soft_command *)buf;
1560	struct lio_if_cfg_resp	*resp;
1561	struct lio_if_cfg_context *ctx;
1562
1563	resp = (struct lio_if_cfg_resp *)sc->virtrptr;
1564	ctx = (struct lio_if_cfg_context *)sc->ctxptr;
1565
1566	oct = lio_get_device(ctx->octeon_id);
1567	if (resp->status)
1568		lio_dev_err(oct, "nic if cfg instruction failed. Status: %llx (0x%08x)\n",
1569			    LIO_CAST64(resp->status), status);
1570	ctx->cond = 1;
1571
1572	snprintf(oct->fw_info.lio_firmware_version, 32, "%s",
1573		 resp->cfg_info.lio_firmware_version);
1574
1575	/*
1576	 * This barrier is required to be sure that the response has been
1577	 * written fully before waking up the handler
1578	 */
1579	wmb();
1580}
1581
1582static int
1583lio_is_mac_changed(uint8_t *new, uint8_t *old)
1584{
1585
1586	return ((new[0] != old[0]) || (new[1] != old[1]) ||
1587		(new[2] != old[2]) || (new[3] != old[3]) ||
1588		(new[4] != old[4]) || (new[5] != old[5]));
1589}
1590
1591void
1592lio_open(void *arg)
1593{
1594	struct lio	*lio = arg;
1595	struct ifnet	*ifp = lio->ifp;
1596	struct octeon_device	*oct = lio->oct_dev;
1597	uint8_t	*mac_new, mac_old[ETHER_HDR_LEN];
1598	int	ret = 0;
1599
1600	lio_ifstate_set(lio, LIO_IFSTATE_RUNNING);
1601
1602	/* Ready for link status updates */
1603	lio->intf_open = 1;
1604
1605	lio_dev_info(oct, "Interface Open, ready for traffic\n");
1606
1607	/* tell Octeon to start forwarding packets to host */
1608	lio_send_rx_ctrl_cmd(lio, 1);
1609
1610	mac_new = IF_LLADDR(ifp);
1611	memcpy(mac_old, ((uint8_t *)&lio->linfo.hw_addr) + 2, ETHER_HDR_LEN);
1612
1613	if (lio_is_mac_changed(mac_new, mac_old)) {
1614		ret = lio_set_mac(ifp, mac_new);
1615		if (ret)
1616			lio_dev_err(oct, "MAC change failed, error: %d\n", ret);
1617	}
1618
1619	/* Now inform the stack we're ready */
1620	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
1621
1622	lio_dev_info(oct, "Interface is opened\n");
1623}
1624
1625static int
1626lio_set_rxcsum_command(struct ifnet *ifp, int command, uint8_t rx_cmd)
1627{
1628	struct lio_ctrl_pkt	nctrl;
1629	struct lio		*lio = if_getsoftc(ifp);
1630	struct octeon_device	*oct = lio->oct_dev;
1631	int	ret = 0;
1632
1633	nctrl.ncmd.cmd64 = 0;
1634	nctrl.ncmd.s.cmd = command;
1635	nctrl.ncmd.s.param1 = rx_cmd;
1636	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1637	nctrl.wait_time = 100;
1638	nctrl.lio = lio;
1639	nctrl.cb_fn = lio_ctrl_cmd_completion;
1640
1641	ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
1642	if (ret < 0) {
1643		lio_dev_err(oct, "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
1644			    ret);
1645	}
1646
1647	return (ret);
1648}
1649
1650static int
1651lio_stop_nic_module(struct octeon_device *oct)
1652{
1653	int		i, j;
1654	struct lio	*lio;
1655
1656	lio_dev_dbg(oct, "Stopping network interfaces\n");
1657	if (!oct->ifcount) {
1658		lio_dev_err(oct, "Init for Octeon was not completed\n");
1659		return (1);
1660	}
1661
1662	mtx_lock(&oct->cmd_resp_wqlock);
1663	oct->cmd_resp_state = LIO_DRV_OFFLINE;
1664	mtx_unlock(&oct->cmd_resp_wqlock);
1665
1666	for (i = 0; i < oct->ifcount; i++) {
1667		lio = if_getsoftc(oct->props.ifp);
1668		for (j = 0; j < oct->num_oqs; j++)
1669			lio_unregister_droq_ops(oct,
1670						lio->linfo.rxpciq[j].s.q_no);
1671	}
1672
1673	callout_drain(&lio->stats_timer);
1674
1675	for (i = 0; i < oct->ifcount; i++)
1676		lio_destroy_nic_device(oct, i);
1677
1678	lio_dev_dbg(oct, "Network interface stopped\n");
1679
1680	return (0);
1681}
1682
1683static void
1684lio_delete_glists(struct octeon_device *oct, struct lio *lio)
1685{
1686	struct lio_gather	*g;
1687	int	i;
1688
1689	if (lio->glist_lock != NULL) {
1690		free((void *)lio->glist_lock, M_DEVBUF);
1691		lio->glist_lock = NULL;
1692	}
1693
1694	if (lio->ghead == NULL)
1695		return;
1696
1697	for (i = 0; i < lio->linfo.num_txpciq; i++) {
1698		do {
1699			g = (struct lio_gather *)
1700			    lio_delete_first_node(&lio->ghead[i]);
1701			free(g, M_DEVBUF);
1702		} while (g);
1703
1704		if ((lio->glists_virt_base != NULL) &&
1705		    (lio->glists_virt_base[i] != NULL)) {
1706			lio_dma_free(lio->glist_entry_size * lio->tx_qsize,
1707				     lio->glists_virt_base[i]);
1708		}
1709	}
1710
1711	free(lio->glists_virt_base, M_DEVBUF);
1712	lio->glists_virt_base = NULL;
1713
1714	free(lio->glists_dma_base, M_DEVBUF);
1715	lio->glists_dma_base = NULL;
1716
1717	free(lio->ghead, M_DEVBUF);
1718	lio->ghead = NULL;
1719}
1720
1721static int
1722lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
1723{
1724	struct lio_gather	*g;
1725	int	i, j;
1726
1727	lio->glist_lock = malloc(num_iqs * sizeof(*lio->glist_lock), M_DEVBUF,
1728				 M_NOWAIT | M_ZERO);
1729	if (lio->glist_lock == NULL)
1730		return (1);
1731
1732	lio->ghead = malloc(num_iqs * sizeof(*lio->ghead), M_DEVBUF,
1733			    M_NOWAIT | M_ZERO);
1734	if (lio->ghead == NULL) {
1735		free((void *)lio->glist_lock, M_DEVBUF);
1736		lio->glist_lock = NULL;
1737		return (1);
1738	}
1739
1740	lio->glist_entry_size = ROUNDUP8((ROUNDUP4(LIO_MAX_SG) >> 2) *
1741					 LIO_SG_ENTRY_SIZE);
1742	/*
1743	 * allocate memory to store virtual and dma base address of
1744	 * per glist consistent memory
1745	 */
1746	lio->glists_virt_base = malloc(num_iqs * sizeof(void *), M_DEVBUF,
1747				       M_NOWAIT | M_ZERO);
1748	lio->glists_dma_base = malloc(num_iqs * sizeof(vm_paddr_t), M_DEVBUF,
1749				      M_NOWAIT | M_ZERO);
1750	if ((lio->glists_virt_base == NULL) || (lio->glists_dma_base == NULL)) {
1751		lio_delete_glists(oct, lio);
1752		return (1);
1753	}
1754
1755	for (i = 0; i < num_iqs; i++) {
1756		mtx_init(&lio->glist_lock[i], "glist_lock", NULL, MTX_DEF);
1757
1758		STAILQ_INIT(&lio->ghead[i]);
1759
1760		lio->glists_virt_base[i] =
1761		    lio_dma_alloc(lio->glist_entry_size * lio->tx_qsize,
1762				  (vm_paddr_t *)&lio->glists_dma_base[i]);
1763		if (lio->glists_virt_base[i] == NULL) {
1764			lio_delete_glists(oct, lio);
1765			return (1);
1766		}
1767
1768		for (j = 0; j < lio->tx_qsize; j++) {
1769			g = malloc(sizeof(*g), M_DEVBUF, M_NOWAIT | M_ZERO);
1770			if (g == NULL)
1771				break;
1772
1773			g->sg = (struct lio_sg_entry *)(uintptr_t)
1774			    ((uint64_t)(uintptr_t)lio->glists_virt_base[i] +
1775			     (j * lio->glist_entry_size));
1776			g->sg_dma_ptr = (uint64_t)lio->glists_dma_base[i] +
1777				(j * lio->glist_entry_size);
1778			STAILQ_INSERT_TAIL(&lio->ghead[i], &g->node, entries);
1779		}
1780
1781		if (j != lio->tx_qsize) {
1782			lio_delete_glists(oct, lio);
1783			return (1);
1784		}
1785	}
1786
1787	return (0);
1788}
1789
1790void
1791lio_stop(struct ifnet *ifp)
1792{
1793	struct lio	*lio = if_getsoftc(ifp);
1794	struct octeon_device	*oct = lio->oct_dev;
1795
1796	lio_ifstate_reset(lio, LIO_IFSTATE_RUNNING);
1797	if_link_state_change(ifp, LINK_STATE_DOWN);
1798
1799	lio->intf_open = 0;
1800	lio->linfo.link.s.link_up = 0;
1801	lio->link_changes++;
1802
1803	lio_send_rx_ctrl_cmd(lio, 0);
1804
1805	/* Tell the stack that the interface is no longer active */
1806	if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1807
1808	lio_dev_info(oct, "Interface is stopped\n");
1809}
1810
1811static void
1812lio_check_rx_oom_status(struct lio *lio)
1813{
1814	struct lio_droq	*droq;
1815	struct octeon_device *oct = lio->oct_dev;
1816	int	desc_refilled;
1817	int	q, q_no = 0;
1818
1819	for (q = 0; q < oct->num_oqs; q++) {
1820		q_no = lio->linfo.rxpciq[q].s.q_no;
1821		droq = oct->droq[q_no];
1822		if (droq == NULL)
1823			continue;
1824		if (lio_read_csr32(oct, droq->pkts_credit_reg) <= 0x40) {
1825			mtx_lock(&droq->lock);
1826			desc_refilled = lio_droq_refill(oct, droq);
1827			/*
1828			 * Flush the droq descriptor data to memory to be sure
1829			 * that when we update the credits the data in memory
1830			 * is accurate.
1831			 */
1832			wmb();
1833			lio_write_csr32(oct, droq->pkts_credit_reg,
1834					desc_refilled);
1835			/* make sure mmio write completes */
1836			__compiler_membar();
1837			mtx_unlock(&droq->lock);
1838		}
1839	}
1840}
1841
1842static void
1843lio_poll_check_rx_oom_status(void *arg, int pending __unused)
1844{
1845	struct lio_tq	*rx_status_tq = arg;
1846	struct lio	*lio = rx_status_tq->ctxptr;
1847
1848	if (lio_ifstate_check(lio, LIO_IFSTATE_RUNNING))
1849		lio_check_rx_oom_status(lio);
1850
1851	taskqueue_enqueue_timeout(rx_status_tq->tq, &rx_status_tq->work,
1852				  lio_ms_to_ticks(50));
1853}
1854
1855static int
1856lio_setup_rx_oom_poll_fn(struct ifnet *ifp)
1857{
1858	struct lio	*lio = if_getsoftc(ifp);
1859	struct octeon_device	*oct = lio->oct_dev;
1860	struct lio_tq	*rx_status_tq;
1861
1862	rx_status_tq = &lio->rx_status_tq;
1863
1864	rx_status_tq->tq = taskqueue_create("lio_rx_oom_status", M_WAITOK,
1865					    taskqueue_thread_enqueue,
1866					    &rx_status_tq->tq);
1867	if (rx_status_tq->tq == NULL) {
1868		lio_dev_err(oct, "unable to create lio rx oom status tq\n");
1869		return (-1);
1870	}
1871
1872	TIMEOUT_TASK_INIT(rx_status_tq->tq, &rx_status_tq->work, 0,
1873			  lio_poll_check_rx_oom_status, (void *)rx_status_tq);
1874
1875	rx_status_tq->ctxptr = lio;
1876
1877	taskqueue_start_threads(&rx_status_tq->tq, 1, PI_NET,
1878				"lio%d_rx_oom_status",
1879				oct->octeon_id);
1880
1881	taskqueue_enqueue_timeout(rx_status_tq->tq, &rx_status_tq->work,
1882				  lio_ms_to_ticks(50));
1883
1884	return (0);
1885}
1886
1887static void
1888lio_cleanup_rx_oom_poll_fn(struct ifnet *ifp)
1889{
1890	struct lio	*lio = if_getsoftc(ifp);
1891
1892	if (lio->rx_status_tq.tq != NULL) {
1893		while (taskqueue_cancel_timeout(lio->rx_status_tq.tq,
1894						&lio->rx_status_tq.work, NULL))
1895			taskqueue_drain_timeout(lio->rx_status_tq.tq,
1896						&lio->rx_status_tq.work);
1897
1898		taskqueue_free(lio->rx_status_tq.tq);
1899
1900		lio->rx_status_tq.tq = NULL;
1901	}
1902}
1903
1904static void
1905lio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1906{
1907	struct ifnet	*ifp = oct->props.ifp;
1908	struct lio	*lio;
1909
1910	if (ifp == NULL) {
1911		lio_dev_err(oct, "%s No ifp ptr for index %d\n",
1912			    __func__, ifidx);
1913		return;
1914	}
1915
1916	lio = if_getsoftc(ifp);
1917
1918	lio_ifstate_set(lio, LIO_IFSTATE_DETACH);
1919
1920	lio_dev_dbg(oct, "NIC device cleanup\n");
1921
1922	if (atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1923		lio_stop(ifp);
1924
1925	if (lio_wait_for_pending_requests(oct))
1926		lio_dev_err(oct, "There were pending requests\n");
1927
1928	if (lio_wait_for_instr_fetch(oct))
1929		lio_dev_err(oct, "IQ had pending instructions\n");
1930
1931	if (lio_wait_for_oq_pkts(oct))
1932		lio_dev_err(oct, "OQ had pending packets\n");
1933
1934	if (atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1935		ether_ifdetach(ifp);
1936
1937	lio_tcp_lro_free(oct, ifp);
1938
1939	lio_cleanup_rx_oom_poll_fn(ifp);
1940
1941	lio_delete_glists(oct, lio);
1942
1943	EVENTHANDLER_DEREGISTER(vlan_config, lio->vlan_attach);
1944	EVENTHANDLER_DEREGISTER(vlan_unconfig, lio->vlan_detach);
1945
1946	free(lio, M_DEVBUF);
1947
1948	if_free(ifp);
1949
1950	oct->props.gmxport = -1;
1951
1952	oct->props.ifp = NULL;
1953}
1954
1955static void
1956print_link_info(struct ifnet *ifp)
1957{
1958	struct lio	*lio = if_getsoftc(ifp);
1959
1960	if (!lio_ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
1961	    lio_ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
1962		struct octeon_link_info *linfo = &lio->linfo;
1963
1964		if (linfo->link.s.link_up) {
1965			lio_dev_info(lio->oct_dev, "%d Mbps %s Duplex UP\n",
1966				     linfo->link.s.speed,
1967				     (linfo->link.s.duplex) ? "Full" : "Half");
1968		} else {
1969			lio_dev_info(lio->oct_dev, "Link Down\n");
1970		}
1971	}
1972}
1973
1974static inline void
1975lio_update_link_status(struct ifnet *ifp, union octeon_link_status *ls)
1976{
1977	struct lio	*lio = if_getsoftc(ifp);
1978	int	changed = (lio->linfo.link.link_status64 != ls->link_status64);
1979
1980	lio->linfo.link.link_status64 = ls->link_status64;
1981
1982	if ((lio->intf_open) && (changed)) {
1983		print_link_info(ifp);
1984		lio->link_changes++;
1985		if (lio->linfo.link.s.link_up)
1986			if_link_state_change(ifp, LINK_STATE_UP);
1987		else
1988			if_link_state_change(ifp, LINK_STATE_DOWN);
1989	}
1990}
1991
1992/*
1993 * \brief Callback for rx ctrl
1994 * @param status status of request
1995 * @param buf pointer to resp structure
1996 */
1997static void
1998lio_rx_ctl_callback(struct octeon_device *oct, uint32_t status, void *buf)
1999{
2000	struct lio_soft_command	*sc = (struct lio_soft_command *)buf;
2001	struct lio_rx_ctl_context *ctx;
2002
2003	ctx = (struct lio_rx_ctl_context *)sc->ctxptr;
2004
2005	oct = lio_get_device(ctx->octeon_id);
2006	if (status)
2007		lio_dev_err(oct, "rx ctl instruction failed. Status: %llx\n",
2008			    LIO_CAST64(status));
2009	ctx->cond = 1;
2010
2011	/*
2012	 * This barrier is required to be sure that the response has been
2013	 * written fully before waking up the handler
2014	 */
2015	wmb();
2016}
2017
2018static void
2019lio_send_rx_ctrl_cmd(struct lio *lio, int start_stop)
2020{
2021	struct lio_soft_command	*sc;
2022	struct lio_rx_ctl_context *ctx;
2023	union octeon_cmd	*ncmd;
2024	struct octeon_device	*oct = (struct octeon_device *)lio->oct_dev;
2025	int	ctx_size = sizeof(struct lio_rx_ctl_context);
2026	int	retval;
2027
2028	if (oct->props.rx_on == start_stop)
2029		return;
2030
2031	sc = lio_alloc_soft_command(oct, OCTEON_CMD_SIZE, 16, ctx_size);
2032	if (sc == NULL)
2033		return;
2034
2035	ncmd = (union octeon_cmd *)sc->virtdptr;
2036	ctx = (struct lio_rx_ctl_context *)sc->ctxptr;
2037
2038	ctx->cond = 0;
2039	ctx->octeon_id = lio_get_device_id(oct);
2040	ncmd->cmd64 = 0;
2041	ncmd->s.cmd = LIO_CMD_RX_CTL;
2042	ncmd->s.param1 = start_stop;
2043
2044	lio_swap_8B_data((uint64_t *)ncmd, (OCTEON_CMD_SIZE >> 3));
2045
2046	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
2047
2048	lio_prepare_soft_command(oct, sc, LIO_OPCODE_NIC, LIO_OPCODE_NIC_CMD, 0,
2049				 0, 0);
2050
2051	sc->callback = lio_rx_ctl_callback;
2052	sc->callback_arg = sc;
2053	sc->wait_time = 5000;
2054
2055	retval = lio_send_soft_command(oct, sc);
2056	if (retval == LIO_IQ_SEND_FAILED) {
2057		lio_dev_err(oct, "Failed to send RX Control message\n");
2058	} else {
2059		/*
2060		 * Sleep on a wait queue till the cond flag indicates that the
2061		 * response arrived or timed-out.
2062		 */
2063		lio_sleep_cond(oct, &ctx->cond);
2064		oct->props.rx_on = start_stop;
2065	}
2066
2067	lio_free_soft_command(oct, sc);
2068}
2069
2070static void
2071lio_vlan_rx_add_vid(void *arg, struct ifnet *ifp, uint16_t vid)
2072{
2073	struct lio_ctrl_pkt	nctrl;
2074	struct lio		*lio = if_getsoftc(ifp);
2075	struct octeon_device	*oct = lio->oct_dev;
2076	int	ret = 0;
2077
2078	if (if_getsoftc(ifp) != arg)	/* Not our event */
2079		return;
2080
2081	if ((vid == 0) || (vid > 4095))	/* Invalid */
2082		return;
2083
2084	bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
2085
2086	nctrl.ncmd.cmd64 = 0;
2087	nctrl.ncmd.s.cmd = LIO_CMD_ADD_VLAN_FILTER;
2088	nctrl.ncmd.s.param1 = vid;
2089	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2090	nctrl.wait_time = 100;
2091	nctrl.lio = lio;
2092	nctrl.cb_fn = lio_ctrl_cmd_completion;
2093
2094	ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
2095	if (ret < 0) {
2096		lio_dev_err(oct, "Add VLAN filter failed in core (ret: 0x%x)\n",
2097			    ret);
2098	}
2099}
2100
2101static void
2102lio_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, uint16_t vid)
2103{
2104	struct lio_ctrl_pkt	nctrl;
2105	struct lio		*lio = if_getsoftc(ifp);
2106	struct octeon_device	*oct = lio->oct_dev;
2107	int	ret = 0;
2108
2109	if (if_getsoftc(ifp) != arg)	/* Not our event */
2110		return;
2111
2112	if ((vid == 0) || (vid > 4095))	/* Invalid */
2113		return;
2114
2115	bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
2116
2117	nctrl.ncmd.cmd64 = 0;
2118	nctrl.ncmd.s.cmd = LIO_CMD_DEL_VLAN_FILTER;
2119	nctrl.ncmd.s.param1 = vid;
2120	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2121	nctrl.wait_time = 100;
2122	nctrl.lio = lio;
2123	nctrl.cb_fn = lio_ctrl_cmd_completion;
2124
2125	ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
2126	if (ret < 0) {
2127		lio_dev_err(oct,
2128			    "Kill VLAN filter failed in core (ret: 0x%x)\n",
2129			    ret);
2130	}
2131}
2132
2133static int
2134lio_wait_for_oq_pkts(struct octeon_device *oct)
2135{
2136	int	i, pending_pkts, pkt_cnt = 0, retry = 100;
2137
2138	do {
2139		pending_pkts = 0;
2140
2141		for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) {
2142			if (!(oct->io_qmask.oq & BIT_ULL(i)))
2143				continue;
2144
2145			pkt_cnt = lio_droq_check_hw_for_pkts(oct->droq[i]);
2146			if (pkt_cnt > 0) {
2147				pending_pkts += pkt_cnt;
2148				taskqueue_enqueue(oct->droq[i]->droq_taskqueue,
2149						  &oct->droq[i]->droq_task);
2150			}
2151		}
2152
2153		pkt_cnt = 0;
2154		lio_sleep_timeout(1);
2155	} while (retry-- && pending_pkts);
2156
2157	return (pkt_cnt);
2158}
2159
2160static void
2161lio_destroy_resources(struct octeon_device *oct)
2162{
2163	int i, refcount;
2164
2165	switch (atomic_load_acq_int(&oct->status)) {
2166	case LIO_DEV_RUNNING:
2167	case LIO_DEV_CORE_OK:
2168		/* No more instructions will be forwarded. */
2169		atomic_store_rel_int(&oct->status, LIO_DEV_IN_RESET);
2170
2171		oct->app_mode = LIO_DRV_INVALID_APP;
2172		lio_dev_dbg(oct, "Device state is now %s\n",
2173			    lio_get_state_string(&oct->status));
2174
2175		lio_sleep_timeout(100);
2176
2177		/* fallthrough */
2178	case LIO_DEV_HOST_OK:
2179
2180		/* fallthrough */
2181	case LIO_DEV_CONSOLE_INIT_DONE:
2182		/* Remove any consoles */
2183		lio_remove_consoles(oct);
2184
2185		/* fallthrough */
2186	case LIO_DEV_IO_QUEUES_DONE:
2187		if (lio_wait_for_pending_requests(oct))
2188			lio_dev_err(oct, "There were pending requests\n");
2189
2190		if (lio_wait_for_instr_fetch(oct))
2191			lio_dev_err(oct, "IQ had pending instructions\n");
2192
2193		/*
2194		 * Disable the input and output queues now. No more packets will
2195		 * arrive from Octeon, but we should wait for all packet
2196		 * processing to finish.
2197		 */
2198		oct->fn_list.disable_io_queues(oct);
2199
2200		if (lio_wait_for_oq_pkts(oct))
2201			lio_dev_err(oct, "OQ had pending packets\n");
2202
2203		/* fallthrough */
2204	case LIO_DEV_INTR_SET_DONE:
2205		/* Disable interrupts  */
2206		oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
2207
2208		if (oct->msix_on) {
2209			for (i = 0; i < oct->num_msix_irqs - 1; i++) {
2210				if (oct->ioq_vector[i].tag != NULL) {
2211					bus_teardown_intr(oct->device,
2212						  oct->ioq_vector[i].msix_res,
2213						      oct->ioq_vector[i].tag);
2214					oct->ioq_vector[i].tag = NULL;
2215				}
2216				if (oct->ioq_vector[i].msix_res != NULL) {
2217					bus_release_resource(oct->device,
2218						SYS_RES_IRQ,
2219						oct->ioq_vector[i].vector,
2220						oct->ioq_vector[i].msix_res);
2221					oct->ioq_vector[i].msix_res = NULL;
2222				}
2223			}
2224			/* non-iov vector's argument is oct struct */
2225			if (oct->tag != NULL) {
2226				bus_teardown_intr(oct->device, oct->msix_res,
2227						  oct->tag);
2228				oct->tag = NULL;
2229			}
2230
2231			if (oct->msix_res != NULL) {
2232				bus_release_resource(oct->device, SYS_RES_IRQ,
2233						     oct->aux_vector,
2234						     oct->msix_res);
2235				oct->msix_res = NULL;
2236			}
2237
2238			pci_release_msi(oct->device);
2239		}
2240		/* fallthrough */
2241	case LIO_DEV_IN_RESET:
2242	case LIO_DEV_DROQ_INIT_DONE:
2243		/* Wait for any pending operations */
2244		lio_mdelay(100);
2245		for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) {
2246			if (!(oct->io_qmask.oq & BIT_ULL(i)))
2247				continue;
2248			lio_delete_droq(oct, i);
2249		}
2250
2251		/* fallthrough */
2252	case LIO_DEV_RESP_LIST_INIT_DONE:
2253		for (i = 0; i < LIO_MAX_POSSIBLE_OUTPUT_QUEUES; i++) {
2254			if (oct->droq[i] != NULL) {
2255				free(oct->droq[i], M_DEVBUF);
2256				oct->droq[i] = NULL;
2257			}
2258		}
2259		lio_delete_response_list(oct);
2260
2261		/* fallthrough */
2262	case LIO_DEV_INSTR_QUEUE_INIT_DONE:
2263		for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) {
2264			if (!(oct->io_qmask.iq & BIT_ULL(i)))
2265				continue;
2266
2267			lio_delete_instr_queue(oct, i);
2268		}
2269
2270		/* fallthrough */
2271	case LIO_DEV_MSIX_ALLOC_VECTOR_DONE:
2272		for (i = 0; i < LIO_MAX_POSSIBLE_INSTR_QUEUES; i++) {
2273			if (oct->instr_queue[i] != NULL) {
2274				free(oct->instr_queue[i], M_DEVBUF);
2275				oct->instr_queue[i] = NULL;
2276			}
2277		}
2278		lio_free_ioq_vector(oct);
2279
2280		/* fallthrough */
2281	case LIO_DEV_SC_BUFF_POOL_INIT_DONE:
2282		lio_free_sc_buffer_pool(oct);
2283
2284		/* fallthrough */
2285	case LIO_DEV_DISPATCH_INIT_DONE:
2286		lio_delete_dispatch_list(oct);
2287
2288		/* fallthrough */
2289	case LIO_DEV_PCI_MAP_DONE:
2290		refcount = lio_deregister_device(oct);
2291
2292		if (fw_type_is_none())
2293			lio_pci_flr(oct);
2294
2295		if (!refcount)
2296			oct->fn_list.soft_reset(oct);
2297
2298		lio_unmap_pci_barx(oct, 0);
2299		lio_unmap_pci_barx(oct, 1);
2300
2301		/* fallthrough */
2302	case LIO_DEV_PCI_ENABLE_DONE:
2303		/* Disable the device, releasing the PCI INT */
2304		pci_disable_busmaster(oct->device);
2305
2306		/* fallthrough */
2307	case LIO_DEV_BEGIN_STATE:
2308		break;
2309	}	/* end switch (oct->status) */
2310}
2311