vxge.c revision 221167
1/*-
2 * Copyright(c) 2002-2011 Exar Corp.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification are permitted provided the following conditions are met:
7 *
8 *    1. Redistributions of source code must retain the above copyright notice,
9 *       this list of conditions and the following disclaimer.
10 *
11 *    2. Redistributions in binary form must reproduce the above copyright
12 *       notice, this list of conditions and the following disclaimer in the
13 *       documentation and/or other materials provided with the distribution.
14 *
15 *    3. Neither the name of the Exar Corporation nor the names of its
16 *       contributors may be used to endorse or promote products derived from
17 *       this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31/*$FreeBSD: head/sys/dev/vxge/vxge.c 221167 2011-04-28 14:33:15Z gnn $*/
32
33#include <dev/vxge/vxge.h>
34
35static int vxge_pci_bd_no = -1;
36static u32 vxge_drv_copyright = 0;
37static u32 vxge_dev_ref_count = 0;
38static u32 vxge_dev_req_reboot = 0;
39
40static int vpath_selector[VXGE_HAL_MAX_VIRTUAL_PATHS] = \
41{0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
42
43/*
44 * vxge_probe
45 * Probes for x3100 devices
46 */
47int
48vxge_probe(device_t ndev)
49{
50	int err = ENXIO;
51
52	u16 pci_bd_no = 0;
53	u16 pci_vendor_id = 0;
54	u16 pci_device_id = 0;
55
56	char adapter_name[64];
57
58	pci_vendor_id = pci_get_vendor(ndev);
59	if (pci_vendor_id != VXGE_PCI_VENDOR_ID)
60		goto _exit0;
61
62	pci_device_id = pci_get_device(ndev);
63
64	if (pci_device_id == VXGE_PCI_DEVICE_ID_TITAN_1) {
65
66		pci_bd_no = (pci_get_bus(ndev) | pci_get_slot(ndev));
67
68		snprintf(adapter_name, sizeof(adapter_name),
69		    VXGE_ADAPTER_NAME, pci_get_revid(ndev));
70		device_set_desc_copy(ndev, adapter_name);
71
72		if (!vxge_drv_copyright) {
73			device_printf(ndev, VXGE_COPYRIGHT);
74			vxge_drv_copyright = 1;
75		}
76
77		if (vxge_dev_req_reboot == 0) {
78			vxge_pci_bd_no = pci_bd_no;
79			err = BUS_PROBE_DEFAULT;
80		} else {
81			if (pci_bd_no != vxge_pci_bd_no) {
82				vxge_pci_bd_no = pci_bd_no;
83				err = BUS_PROBE_DEFAULT;
84			}
85		}
86	}
87
88_exit0:
89	return (err);
90}
91
92/*
93 * vxge_attach
94 * Connects driver to the system if probe was success @ndev handle
95 */
96int
97vxge_attach(device_t ndev)
98{
99	int err = 0;
100	vxge_dev_t *vdev;
101	vxge_hal_device_t *hldev = NULL;
102	vxge_hal_device_attr_t device_attr;
103	vxge_free_resources_e error_level = VXGE_FREE_NONE;
104
105	vxge_hal_status_e status = VXGE_HAL_OK;
106
107	/* Get per-ndev buffer */
108	vdev = (vxge_dev_t *) device_get_softc(ndev);
109	if (!vdev)
110		goto _exit0;
111
112	bzero(vdev, sizeof(vxge_dev_t));
113
114	vdev->ndev = ndev;
115	strlcpy(vdev->ndev_name, "vxge", sizeof(vdev->ndev_name));
116
117	err = vxge_driver_config(vdev);
118	if (err != 0)
119		goto _exit0;
120
121	/* Initialize HAL driver */
122	status = vxge_driver_init(vdev);
123	if (status != VXGE_HAL_OK) {
124		device_printf(vdev->ndev, "Failed to initialize driver\n");
125		goto _exit0;
126	}
127	/* Enable PCI bus-master */
128	pci_enable_busmaster(ndev);
129
130	/* Allocate resources */
131	err = vxge_alloc_resources(vdev);
132	if (err != 0) {
133		device_printf(vdev->ndev, "resource allocation failed\n");
134		goto _exit0;
135	}
136
137	err = vxge_device_hw_info_get(vdev);
138	if (err != 0) {
139		error_level = VXGE_FREE_BAR2;
140		goto _exit0;
141	}
142
143	/* Get firmware default values for Device Configuration */
144	vxge_hal_device_config_default_get(vdev->device_config);
145
146	/* Customize Device Configuration based on User request */
147	vxge_vpath_config(vdev);
148
149	/* Allocate ISR resources */
150	err = vxge_alloc_isr_resources(vdev);
151	if (err != 0) {
152		error_level = VXGE_FREE_ISR_RESOURCE;
153		device_printf(vdev->ndev, "isr resource allocation failed\n");
154		goto _exit0;
155	}
156
157	/* HAL attributes */
158	device_attr.bar0 = (u8 *) vdev->pdev->bar_info[0];
159	device_attr.bar1 = (u8 *) vdev->pdev->bar_info[1];
160	device_attr.bar2 = (u8 *) vdev->pdev->bar_info[2];
161	device_attr.regh0 = (vxge_bus_res_t *) vdev->pdev->reg_map[0];
162	device_attr.regh1 = (vxge_bus_res_t *) vdev->pdev->reg_map[1];
163	device_attr.regh2 = (vxge_bus_res_t *) vdev->pdev->reg_map[2];
164	device_attr.irqh = (pci_irq_h) vdev->config.isr_info[0].irq_handle;
165	device_attr.cfgh = vdev->pdev;
166	device_attr.pdev = vdev->pdev;
167
168	/* Initialize HAL Device */
169	status = vxge_hal_device_initialize((vxge_hal_device_h *) &hldev,
170	    &device_attr, vdev->device_config);
171	if (status != VXGE_HAL_OK) {
172		error_level = VXGE_FREE_ISR_RESOURCE;
173		device_printf(vdev->ndev, "hal device initialization failed\n");
174		goto _exit0;
175	}
176
177	vdev->devh = hldev;
178	vxge_hal_device_private_set(hldev, vdev);
179
180	if (vdev->is_privilaged) {
181		err = vxge_firmware_verify(vdev);
182		if (err != 0) {
183			vxge_dev_req_reboot = 1;
184			error_level = VXGE_FREE_TERMINATE_DEVICE;
185			goto _exit0;
186		}
187	}
188
189	/* Allocate memory for vpath */
190	vdev->vpaths = (vxge_vpath_t *)
191	    vxge_mem_alloc(vdev->no_of_vpath * sizeof(vxge_vpath_t));
192
193	if (vdev->vpaths == NULL) {
194		error_level = VXGE_FREE_TERMINATE_DEVICE;
195		device_printf(vdev->ndev, "vpath memory allocation failed\n");
196		goto _exit0;
197	}
198
199	vdev->no_of_func = 1;
200	if (vdev->is_privilaged) {
201
202		vxge_hal_func_mode_count(vdev->devh,
203		    vdev->config.hw_info.function_mode, &vdev->no_of_func);
204
205		vxge_bw_priority_config(vdev);
206	}
207
208	/* Initialize mutexes */
209	vxge_mutex_init(vdev);
210
211	/* Initialize Media */
212	vxge_media_init(vdev);
213
214	err = vxge_ifp_setup(ndev);
215	if (err != 0) {
216		error_level = VXGE_FREE_MEDIA;
217		device_printf(vdev->ndev, "setting up interface failed\n");
218		goto _exit0;
219	}
220
221	err = vxge_isr_setup(vdev);
222	if (err != 0) {
223		error_level = VXGE_FREE_INTERFACE;
224		device_printf(vdev->ndev,
225		    "failed to associate interrupt handler with device\n");
226		goto _exit0;
227	}
228	vxge_device_hw_info_print(vdev);
229	vdev->is_active = TRUE;
230
231_exit0:
232	if (error_level) {
233		vxge_free_resources(ndev, error_level);
234		err = ENXIO;
235	}
236
237	return (err);
238}
239
240/*
241 * vxge_detach
242 * Detaches driver from the Kernel subsystem
243 */
244int
245vxge_detach(device_t ndev)
246{
247	vxge_dev_t *vdev;
248
249	vdev = (vxge_dev_t *) device_get_softc(ndev);
250	if (vdev->is_active) {
251		vdev->is_active = FALSE;
252		vxge_stop(vdev);
253		vxge_free_resources(ndev, VXGE_FREE_ALL);
254	}
255
256	return (0);
257}
258
259/*
260 * vxge_shutdown
261 * To shutdown device before system shutdown
262 */
263int
264vxge_shutdown(device_t ndev)
265{
266	vxge_dev_t *vdev = (vxge_dev_t *) device_get_softc(ndev);
267	vxge_stop(vdev);
268	return (0);
269}
270
271/*
272 * vxge_init
273 * Initialize the interface
274 */
275void
276vxge_init(void *vdev_ptr)
277{
278	vxge_dev_t *vdev = (vxge_dev_t *) vdev_ptr;
279
280	VXGE_DRV_LOCK(vdev);
281	vxge_init_locked(vdev);
282	VXGE_DRV_UNLOCK(vdev);
283}
284
285/*
286 * vxge_init_locked
287 * Initialize the interface
288 */
289void
290vxge_init_locked(vxge_dev_t *vdev)
291{
292	int i, err = EINVAL;
293	vxge_hal_device_t *hldev = vdev->devh;
294	vxge_hal_status_e status = VXGE_HAL_OK;
295	vxge_hal_vpath_h vpath_handle;
296
297	ifnet_t ifp = vdev->ifp;
298
299	/* If device is in running state, initializing is not required */
300	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
301		goto _exit0;
302
303	VXGE_DRV_LOCK_ASSERT(vdev);
304
305	/* Opening vpaths */
306	err = vxge_vpath_open(vdev);
307	if (err != 0)
308		goto _exit1;
309
310	if (vdev->config.rth_enable) {
311		status = vxge_rth_config(vdev);
312		if (status != VXGE_HAL_OK)
313			goto _exit1;
314	}
315
316	for (i = 0; i < vdev->no_of_vpath; i++) {
317		vpath_handle = vxge_vpath_handle_get(vdev, i);
318		if (!vpath_handle)
319			continue;
320
321		/* check initial mtu before enabling the device */
322		status = vxge_hal_device_mtu_check(vpath_handle, ifp->if_mtu);
323		if (status != VXGE_HAL_OK) {
324			device_printf(vdev->ndev,
325			    "invalid mtu size %ld specified\n", ifp->if_mtu);
326			goto _exit1;
327		}
328
329		status = vxge_hal_vpath_mtu_set(vpath_handle, ifp->if_mtu);
330		if (status != VXGE_HAL_OK) {
331			device_printf(vdev->ndev,
332			    "setting mtu in device failed\n");
333			goto _exit1;
334		}
335	}
336
337	/* Enable HAL device */
338	status = vxge_hal_device_enable(hldev);
339	if (status != VXGE_HAL_OK) {
340		device_printf(vdev->ndev, "failed to enable device\n");
341		goto _exit1;
342	}
343
344	if (vdev->config.intr_mode == VXGE_HAL_INTR_MODE_MSIX)
345		vxge_msix_enable(vdev);
346
347	/* Checksum capability */
348	ifp->if_hwassist = 0;
349	if (ifp->if_capenable & IFCAP_TXCSUM)
350		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
351
352	if (ifp->if_capenable & IFCAP_TSO4)
353		ifp->if_hwassist |= CSUM_TSO;
354
355	for (i = 0; i < vdev->no_of_vpath; i++) {
356		vpath_handle = vxge_vpath_handle_get(vdev, i);
357		if (!vpath_handle)
358			continue;
359
360		/* Enabling bcast for all vpath */
361		status = vxge_hal_vpath_bcast_enable(vpath_handle);
362		if (status != VXGE_HAL_OK)
363			device_printf(vdev->ndev,
364			    "can't enable bcast on vpath (%d)\n", i);
365	}
366
367	/* Enable interrupts */
368	vxge_hal_device_intr_enable(vdev->devh);
369
370	for (i = 0; i < vdev->no_of_vpath; i++) {
371		vpath_handle = vxge_vpath_handle_get(vdev, i);
372		if (!vpath_handle)
373			continue;
374
375		bzero(&(vdev->vpaths[i].driver_stats),
376		    sizeof(vxge_drv_stats_t));
377		status = vxge_hal_vpath_enable(vpath_handle);
378		if (status != VXGE_HAL_OK)
379			goto _exit2;
380	}
381
382	vxge_os_mdelay(1000);
383
384	/* Device is initialized */
385	vdev->is_initialized = TRUE;
386
387	/* Now inform the stack we're ready */
388	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
389	ifp->if_drv_flags |= IFF_DRV_RUNNING;
390
391	goto _exit0;
392
393_exit2:
394	vxge_hal_device_intr_disable(vdev->devh);
395	vxge_hal_device_disable(hldev);
396
397_exit1:
398	vxge_vpath_close(vdev);
399
400_exit0:
401	return;
402}
403
404/*
405 * vxge_driver_init
406 * Initializes HAL driver
407 */
408vxge_hal_status_e
409vxge_driver_init(vxge_dev_t *vdev)
410{
411	vxge_hal_uld_cbs_t uld_callbacks;
412	vxge_hal_driver_config_t driver_config;
413	vxge_hal_status_e status = VXGE_HAL_OK;
414
415	/* Initialize HAL driver */
416	if (!vxge_dev_ref_count) {
417		bzero(&uld_callbacks, sizeof(vxge_hal_uld_cbs_t));
418		bzero(&driver_config, sizeof(vxge_hal_driver_config_t));
419
420		uld_callbacks.link_up = vxge_link_up;
421		uld_callbacks.link_down = vxge_link_down;
422		uld_callbacks.crit_err = vxge_crit_error;
423		uld_callbacks.sched_timer = NULL;
424		uld_callbacks.xpak_alarm_log = NULL;
425
426		status = vxge_hal_driver_initialize(&driver_config,
427		    &uld_callbacks);
428		if (status != VXGE_HAL_OK) {
429			device_printf(vdev->ndev,
430			    "failed to initialize driver\n");
431			goto _exit0;
432		}
433	}
434	vxge_hal_driver_debug_set(VXGE_TRACE);
435	vxge_dev_ref_count++;
436
437_exit0:
438	return (status);
439}
440
441/*
442 * vxge_driver_config
443 */
444int
445vxge_driver_config(vxge_dev_t *vdev)
446{
447	int i, err = 0;
448	char temp_buffer[30];
449
450	vxge_bw_info_t bw_info;
451
452	VXGE_GET_PARAM("hint.vxge.0.no_of_vpath", vdev->config,
453	    no_of_vpath, VXGE_DEFAULT_USER_HARDCODED);
454
455	if (vdev->config.no_of_vpath == VXGE_DEFAULT_USER_HARDCODED)
456		vdev->config.no_of_vpath = mp_ncpus;
457
458	if (vdev->config.no_of_vpath <= 0) {
459		err = EINVAL;
460		device_printf(vdev->ndev,
461		    "Failed to load driver, \
462		    invalid config : \'no_of_vpath\'\n");
463		goto _exit0;
464	}
465
466	VXGE_GET_PARAM("hint.vxge.0.intr_coalesce", vdev->config,
467	    intr_coalesce, VXGE_DEFAULT_CONFIG_DISABLE);
468
469	VXGE_GET_PARAM("hint.vxge.0.rth_enable", vdev->config,
470	    rth_enable, VXGE_DEFAULT_CONFIG_ENABLE);
471
472	VXGE_GET_PARAM("hint.vxge.0.rth_bkt_sz", vdev->config,
473	    rth_bkt_sz, VXGE_DEFAULT_RTH_BUCKET_SIZE);
474
475	VXGE_GET_PARAM("hint.vxge.0.lro_enable", vdev->config,
476	    lro_enable, VXGE_DEFAULT_CONFIG_ENABLE);
477
478	VXGE_GET_PARAM("hint.vxge.0.tso_enable", vdev->config,
479	    tso_enable, VXGE_DEFAULT_CONFIG_ENABLE);
480
481	VXGE_GET_PARAM("hint.vxge.0.tx_steering", vdev->config,
482	    tx_steering, VXGE_DEFAULT_CONFIG_DISABLE);
483
484	VXGE_GET_PARAM("hint.vxge.0.msix_enable", vdev->config,
485	    intr_mode, VXGE_HAL_INTR_MODE_MSIX);
486
487	VXGE_GET_PARAM("hint.vxge.0.ifqmaxlen", vdev->config,
488	    ifq_maxlen, VXGE_DEFAULT_CONFIG_IFQ_MAXLEN);
489
490	VXGE_GET_PARAM("hint.vxge.0.port_mode", vdev->config,
491	    port_mode, VXGE_DEFAULT_CONFIG_VALUE);
492
493	if (vdev->config.port_mode == VXGE_DEFAULT_USER_HARDCODED)
494		vdev->config.port_mode = VXGE_DEFAULT_CONFIG_VALUE;
495
496	VXGE_GET_PARAM("hint.vxge.0.l2_switch", vdev->config,
497	    l2_switch, VXGE_DEFAULT_CONFIG_VALUE);
498
499	if (vdev->config.l2_switch == VXGE_DEFAULT_USER_HARDCODED)
500		vdev->config.l2_switch = VXGE_DEFAULT_CONFIG_VALUE;
501
502	VXGE_GET_PARAM("hint.vxge.0.fw_upgrade", vdev->config,
503	    fw_option, VXGE_FW_UPGRADE_ALL);
504
505	VXGE_GET_PARAM("hint.vxge.0.low_latency", vdev->config,
506	    low_latency, VXGE_DEFAULT_CONFIG_DISABLE);
507
508	VXGE_GET_PARAM("hint.vxge.0.func_mode", vdev->config,
509	    function_mode, VXGE_DEFAULT_CONFIG_VALUE);
510
511	if (vdev->config.function_mode == VXGE_DEFAULT_USER_HARDCODED)
512		vdev->config.function_mode = VXGE_DEFAULT_CONFIG_VALUE;
513
514	if (!(is_multi_func(vdev->config.function_mode) ||
515	    is_single_func(vdev->config.function_mode)))
516		vdev->config.function_mode = VXGE_DEFAULT_CONFIG_VALUE;
517
518	for (i = 0; i < VXGE_HAL_MAX_FUNCTIONS; i++) {
519
520		bw_info.func_id = i;
521
522		sprintf(temp_buffer, "hint.vxge.0.bandwidth_%d", i);
523		VXGE_GET_PARAM(temp_buffer, bw_info,
524		    bandwidth, VXGE_DEFAULT_USER_HARDCODED);
525
526		if (bw_info.bandwidth == VXGE_DEFAULT_USER_HARDCODED)
527			bw_info.bandwidth = VXGE_HAL_VPATH_BW_LIMIT_DEFAULT;
528
529		sprintf(temp_buffer, "hint.vxge.0.priority_%d", i);
530		VXGE_GET_PARAM(temp_buffer, bw_info,
531		    priority, VXGE_DEFAULT_USER_HARDCODED);
532
533		if (bw_info.priority == VXGE_DEFAULT_USER_HARDCODED)
534			bw_info.priority = VXGE_HAL_VPATH_PRIORITY_DEFAULT;
535
536		vxge_os_memcpy(&vdev->config.bw_info[i], &bw_info,
537		    sizeof(vxge_bw_info_t));
538	}
539
540_exit0:
541	return (err);
542}
543
544/*
545 * vxge_stop
546 */
547void
548vxge_stop(vxge_dev_t *vdev)
549{
550	VXGE_DRV_LOCK(vdev);
551	vxge_stop_locked(vdev);
552	VXGE_DRV_UNLOCK(vdev);
553}
554
555/*
556 * vxge_stop_locked
557 * Common code for both stop and part of reset.
558 * disables device, interrupts and closes vpaths handle
559 */
560void
561vxge_stop_locked(vxge_dev_t *vdev)
562{
563	u64 adapter_status = 0;
564	vxge_hal_status_e status;
565	vxge_hal_device_t *hldev = vdev->devh;
566	ifnet_t ifp = vdev->ifp;
567
568	VXGE_DRV_LOCK_ASSERT(vdev);
569
570	/* If device is not in "Running" state, return */
571	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
572		return;
573
574	/* Set appropriate flags */
575	vdev->is_initialized = FALSE;
576	hldev->link_state = VXGE_HAL_LINK_NONE;
577	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
578	if_link_state_change(ifp, LINK_STATE_DOWN);
579
580	/* Disable interrupts */
581	vxge_hal_device_intr_disable(hldev);
582
583	/* Disable HAL device */
584	status = vxge_hal_device_disable(hldev);
585	if (status != VXGE_HAL_OK) {
586		vxge_hal_device_status(hldev, &adapter_status);
587		device_printf(vdev->ndev,
588		    "adapter status: 0x%llx\n", adapter_status);
589	}
590
591	/* reset vpaths */
592	vxge_vpath_reset(vdev);
593
594	vxge_os_mdelay(1000);
595
596	/* Close Vpaths */
597	vxge_vpath_close(vdev);
598}
599
600void
601vxge_send(ifnet_t ifp)
602{
603	vxge_vpath_t *vpath;
604	vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc;
605
606	vpath = &(vdev->vpaths[0]);
607
608	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
609		if (VXGE_TX_TRYLOCK(vpath)) {
610			vxge_send_locked(ifp, vpath);
611			VXGE_TX_UNLOCK(vpath);
612		}
613	}
614}
615
616static inline void
617vxge_send_locked(ifnet_t ifp, vxge_vpath_t *vpath)
618{
619	mbuf_t m_head = NULL;
620	vxge_dev_t *vdev = vpath->vdev;
621
622	VXGE_TX_LOCK_ASSERT(vpath);
623
624	if ((!vdev->is_initialized) ||
625	    ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
626	    IFF_DRV_RUNNING))
627		return;
628
629	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
630		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
631		if (m_head == NULL)
632			break;
633
634		if (vxge_xmit(ifp, vpath, &m_head)) {
635			if (m_head == NULL)
636				break;
637
638			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
639			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
640			VXGE_DRV_STATS(vpath, tx_again);
641			break;
642		}
643		/* Send a copy of the frame to the BPF listener */
644		ETHER_BPF_MTAP(ifp, m_head);
645	}
646}
647
648#if __FreeBSD_version >= 800000
649
650int
651vxge_mq_send(ifnet_t ifp, mbuf_t m_head)
652{
653	int i = 0, err = 0;
654
655	vxge_vpath_t *vpath;
656	vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc;
657
658	if (vdev->config.tx_steering) {
659		i = vxge_vpath_get(vdev, m_head);
660	} else if ((m_head->m_flags & M_FLOWID) != 0) {
661		i = m_head->m_pkthdr.flowid % vdev->no_of_vpath;
662	}
663
664	vpath = &(vdev->vpaths[i]);
665	if (VXGE_TX_TRYLOCK(vpath)) {
666		err = vxge_mq_send_locked(ifp, vpath, m_head);
667		VXGE_TX_UNLOCK(vpath);
668	} else
669		err = drbr_enqueue(ifp, vpath->br, m_head);
670
671	return (err);
672}
673
674static inline int
675vxge_mq_send_locked(ifnet_t ifp, vxge_vpath_t *vpath, mbuf_t m_head)
676{
677	int err = 0;
678	mbuf_t next = NULL;
679	vxge_dev_t *vdev = vpath->vdev;
680
681	VXGE_TX_LOCK_ASSERT(vpath);
682
683	if ((!vdev->is_initialized) ||
684	    ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
685	    IFF_DRV_RUNNING)) {
686		err = drbr_enqueue(ifp, vpath->br, m_head);
687		goto _exit0;
688	}
689	if (m_head == NULL) {
690		next = drbr_dequeue(ifp, vpath->br);
691	} else if (drbr_needs_enqueue(ifp, vpath->br)) {
692		if ((err = drbr_enqueue(ifp, vpath->br, m_head)) != 0)
693			goto _exit0;
694		next = drbr_dequeue(ifp, vpath->br);
695	} else
696		next = m_head;
697
698	/* Process the queue */
699	while (next != NULL) {
700		if ((err = vxge_xmit(ifp, vpath, &next)) != 0) {
701			if (next == NULL)
702				break;
703
704			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
705			err = drbr_enqueue(ifp, vpath->br, next);
706			VXGE_DRV_STATS(vpath, tx_again);
707			break;
708		}
709		drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
710
711		/* Send a copy of the frame to the BPF listener */
712		ETHER_BPF_MTAP(ifp, next);
713		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
714			break;
715
716		next = drbr_dequeue(ifp, vpath->br);
717	}
718
719_exit0:
720	return (err);
721}
722
723void
724vxge_mq_qflush(ifnet_t ifp)
725{
726	int i;
727	mbuf_t m_head;
728	vxge_vpath_t *vpath;
729
730	vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc;
731
732	for (i = 0; i < vdev->no_of_vpath; i++) {
733		vpath = &(vdev->vpaths[i]);
734		if (!vpath->handle)
735			continue;
736
737		VXGE_TX_LOCK(vpath);
738		while ((m_head = buf_ring_dequeue_sc(vpath->br)) != NULL)
739			vxge_free_packet(m_head);
740
741		VXGE_TX_UNLOCK(vpath);
742	}
743	if_qflush(ifp);
744}
745#endif
746
747static inline int
748vxge_xmit(ifnet_t ifp, vxge_vpath_t *vpath, mbuf_t *m_headp)
749{
750	int err, num_segs = 0;
751	u32 txdl_avail, dma_index, tagged = 0;
752
753	dma_addr_t dma_addr;
754	bus_size_t dma_sizes;
755
756	void *dtr_priv;
757	vxge_txdl_priv_t *txdl_priv;
758	vxge_hal_txdl_h txdlh;
759	vxge_hal_status_e status;
760	vxge_dev_t *vdev = vpath->vdev;
761
762	VXGE_DRV_STATS(vpath, tx_xmit);
763
764	txdl_avail = vxge_hal_fifo_free_txdl_count_get(vpath->handle);
765	if (txdl_avail < VXGE_TX_LOW_THRESHOLD) {
766
767		VXGE_DRV_STATS(vpath, tx_low_dtr_cnt);
768		err = ENOBUFS;
769		goto _exit0;
770	}
771
772	/* Reserve descriptors */
773	status = vxge_hal_fifo_txdl_reserve(vpath->handle, &txdlh, &dtr_priv);
774	if (status != VXGE_HAL_OK) {
775		VXGE_DRV_STATS(vpath, tx_reserve_failed);
776		err = ENOBUFS;
777		goto _exit0;
778	}
779
780	/* Update Tx private structure for this descriptor */
781	txdl_priv = (vxge_txdl_priv_t *) dtr_priv;
782
783	/*
784	 * Map the packet for DMA.
785	 * Returns number of segments through num_segs.
786	 */
787	err = vxge_dma_mbuf_coalesce(vpath->dma_tag_tx, txdl_priv->dma_map,
788	    m_headp, txdl_priv->dma_buffers, &num_segs);
789
790	if (vpath->driver_stats.tx_max_frags < num_segs)
791		vpath->driver_stats.tx_max_frags = num_segs;
792
793	if (err == ENOMEM) {
794		VXGE_DRV_STATS(vpath, tx_no_dma_setup);
795		vxge_hal_fifo_txdl_free(vpath->handle, txdlh);
796		goto _exit0;
797	} else if (err != 0) {
798		vxge_free_packet(*m_headp);
799		VXGE_DRV_STATS(vpath, tx_no_dma_setup);
800		vxge_hal_fifo_txdl_free(vpath->handle, txdlh);
801		goto _exit0;
802	}
803
804	txdl_priv->mbuf_pkt = *m_headp;
805
806	/* Set VLAN tag in descriptor only if this packet has it */
807	if ((*m_headp)->m_flags & M_VLANTAG)
808		vxge_hal_fifo_txdl_vlan_set(txdlh,
809		    (*m_headp)->m_pkthdr.ether_vtag);
810
811	/* Set descriptor buffer for header and each fragment/segment */
812	for (dma_index = 0; dma_index < num_segs; dma_index++) {
813
814		dma_sizes = txdl_priv->dma_buffers[dma_index].ds_len;
815		dma_addr = htole64(txdl_priv->dma_buffers[dma_index].ds_addr);
816
817		vxge_hal_fifo_txdl_buffer_set(vpath->handle, txdlh, dma_index,
818		    dma_addr, dma_sizes);
819	}
820
821	/* Pre-write Sync of mapping */
822	bus_dmamap_sync(vpath->dma_tag_tx, txdl_priv->dma_map,
823	    BUS_DMASYNC_PREWRITE);
824
825	if ((*m_headp)->m_pkthdr.csum_flags & CSUM_TSO) {
826		if ((*m_headp)->m_pkthdr.tso_segsz) {
827			VXGE_DRV_STATS(vpath, tx_tso);
828			vxge_hal_fifo_txdl_lso_set(txdlh,
829			    VXGE_HAL_FIFO_LSO_FRM_ENCAP_AUTO,
830			    (*m_headp)->m_pkthdr.tso_segsz);
831		}
832	}
833
834	/* Checksum */
835	if (ifp->if_hwassist > 0) {
836		vxge_hal_fifo_txdl_cksum_set_bits(txdlh,
837		    VXGE_HAL_FIFO_TXD_TX_CKO_IPV4_EN |
838		    VXGE_HAL_FIFO_TXD_TX_CKO_TCP_EN |
839		    VXGE_HAL_FIFO_TXD_TX_CKO_UDP_EN);
840	}
841
842	if ((vxge_hal_device_check_id(vdev->devh) == VXGE_HAL_CARD_TITAN_1A) &&
843	    (vdev->hw_fw_version >= VXGE_FW_VERSION(1, 8, 0)))
844		tagged = 1;
845
846	vxge_hal_fifo_txdl_post(vpath->handle, txdlh, tagged);
847	VXGE_DRV_STATS(vpath, tx_posted);
848
849_exit0:
850	return (err);
851}
852
853/*
854 * vxge_tx_replenish
855 * Allocate buffers and set them into descriptors for later use
856 */
857/* ARGSUSED */
858vxge_hal_status_e
859vxge_tx_replenish(vxge_hal_vpath_h vpath_handle, vxge_hal_txdl_h txdlh,
860    void *dtr_priv, u32 dtr_index, void *userdata, vxge_hal_reopen_e reopen)
861{
862	int err = 0;
863
864	vxge_vpath_t *vpath = (vxge_vpath_t *) userdata;
865	vxge_txdl_priv_t *txdl_priv = (vxge_txdl_priv_t *) dtr_priv;
866
867	err = bus_dmamap_create(vpath->dma_tag_tx, BUS_DMA_NOWAIT,
868	    &txdl_priv->dma_map);
869
870	return ((err == 0) ? VXGE_HAL_OK : VXGE_HAL_FAIL);
871}
872
873/*
874 * vxge_tx_compl
875 * If the interrupt is due to Tx completion, free the sent buffer
876 */
877vxge_hal_status_e
878vxge_tx_compl(vxge_hal_vpath_h vpath_handle, vxge_hal_txdl_h txdlh,
879    void *dtr_priv, vxge_hal_fifo_tcode_e t_code, void *userdata)
880{
881	vxge_hal_status_e status = VXGE_HAL_OK;
882
883	vxge_txdl_priv_t *txdl_priv;
884	vxge_vpath_t *vpath = (vxge_vpath_t *) userdata;
885	vxge_dev_t *vdev = vpath->vdev;
886
887	ifnet_t ifp = vdev->ifp;
888
889	VXGE_TX_LOCK(vpath);
890
891	/*
892	 * For each completed descriptor
893	 * Get private structure, free buffer, do unmapping, and free descriptor
894	 */
895
896	do {
897		VXGE_DRV_STATS(vpath, tx_compl);
898		if (t_code != VXGE_HAL_FIFO_T_CODE_OK) {
899			device_printf(vdev->ndev, "tx transfer code %d\n",
900			    t_code);
901
902			ifp->if_oerrors++;
903			VXGE_DRV_STATS(vpath, tx_tcode);
904			vxge_hal_fifo_handle_tcode(vpath_handle, txdlh, t_code);
905		}
906		ifp->if_opackets++;
907		txdl_priv = (vxge_txdl_priv_t *) dtr_priv;
908
909		bus_dmamap_unload(vpath->dma_tag_tx, txdl_priv->dma_map);
910
911		vxge_free_packet(txdl_priv->mbuf_pkt);
912		vxge_hal_fifo_txdl_free(vpath->handle, txdlh);
913
914	} while (vxge_hal_fifo_txdl_next_completed(vpath_handle, &txdlh,
915	    &dtr_priv, &t_code) == VXGE_HAL_OK);
916
917
918	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
919	VXGE_TX_UNLOCK(vpath);
920
921	return (status);
922}
923
924/* ARGSUSED */
925void
926vxge_tx_term(vxge_hal_vpath_h vpath_handle, vxge_hal_txdl_h txdlh,
927    void *dtr_priv, vxge_hal_txdl_state_e state,
928    void *userdata, vxge_hal_reopen_e reopen)
929{
930	vxge_vpath_t *vpath = (vxge_vpath_t *) userdata;
931	vxge_txdl_priv_t *txdl_priv = (vxge_txdl_priv_t *) dtr_priv;
932
933	if (state != VXGE_HAL_TXDL_STATE_POSTED)
934		return;
935
936	if (txdl_priv != NULL) {
937		bus_dmamap_sync(vpath->dma_tag_tx, txdl_priv->dma_map,
938		    BUS_DMASYNC_POSTWRITE);
939
940		bus_dmamap_unload(vpath->dma_tag_tx, txdl_priv->dma_map);
941		bus_dmamap_destroy(vpath->dma_tag_tx, txdl_priv->dma_map);
942		vxge_free_packet(txdl_priv->mbuf_pkt);
943	}
944
945	/* Free the descriptor */
946	vxge_hal_fifo_txdl_free(vpath->handle, txdlh);
947}
948
949/*
950 * vxge_rx_replenish
951 * Allocate buffers and set them into descriptors for later use
952 */
953/* ARGSUSED */
954vxge_hal_status_e
955vxge_rx_replenish(vxge_hal_vpath_h vpath_handle, vxge_hal_rxd_h rxdh,
956    void *dtr_priv, u32 dtr_index, void *userdata, vxge_hal_reopen_e reopen)
957{
958	int err = 0;
959	vxge_hal_status_e status = VXGE_HAL_OK;
960
961	vxge_vpath_t *vpath = (vxge_vpath_t *) userdata;
962	vxge_rxd_priv_t *rxd_priv = (vxge_rxd_priv_t *) dtr_priv;
963
964	/* Create DMA map for these descriptors */
965	err = bus_dmamap_create(vpath->dma_tag_rx, BUS_DMA_NOWAIT,
966	    &rxd_priv->dma_map);
967	if (err == 0) {
968		if (vxge_rx_rxd_1b_set(vpath, rxdh, dtr_priv)) {
969			bus_dmamap_destroy(vpath->dma_tag_rx,
970			    rxd_priv->dma_map);
971			status = VXGE_HAL_FAIL;
972		}
973	}
974
975	return (status);
976}
977
978/*
979 * vxge_rx_compl
980 */
981vxge_hal_status_e
982vxge_rx_compl(vxge_hal_vpath_h vpath_handle, vxge_hal_rxd_h rxdh,
983    void *dtr_priv, u8 t_code, void *userdata)
984{
985	mbuf_t mbuf_up;
986
987	vxge_rxd_priv_t *rxd_priv;
988	vxge_hal_ring_rxd_info_t ext_info;
989	vxge_hal_status_e status = VXGE_HAL_OK;
990
991	vxge_vpath_t *vpath = (vxge_vpath_t *) userdata;
992	vxge_dev_t *vdev = vpath->vdev;
993
994	struct lro_entry *queued = NULL;
995	struct lro_ctrl *lro = &vpath->lro;
996
997	/* get the interface pointer */
998	ifnet_t ifp = vdev->ifp;
999
1000	do {
1001		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1002			vxge_hal_ring_rxd_post(vpath_handle, rxdh);
1003			status = VXGE_HAL_FAIL;
1004			break;
1005		}
1006
1007		VXGE_DRV_STATS(vpath, rx_compl);
1008		rxd_priv = (vxge_rxd_priv_t *) dtr_priv;
1009
1010		/* Gets details of mbuf i.e., packet length */
1011		vxge_rx_rxd_1b_get(vpath, rxdh, dtr_priv);
1012
1013		/*
1014		 * Prepare one buffer to send it to upper layer Since upper
1015		 * layer frees the buffer do not use rxd_priv->mbuf_pkt.
1016		 * Meanwhile prepare a new buffer, do mapping, use with the
1017		 * current descriptor and post descriptor back to ring vpath
1018		 */
1019		mbuf_up = rxd_priv->mbuf_pkt;
1020		if (t_code != VXGE_HAL_RING_RXD_T_CODE_OK) {
1021
1022			ifp->if_ierrors++;
1023			VXGE_DRV_STATS(vpath, rx_tcode);
1024			status = vxge_hal_ring_handle_tcode(vpath_handle,
1025			    rxdh, t_code);
1026
1027			/*
1028			 * If transfer code is not for unknown protocols and
1029			 * vxge_hal_device_handle_tcode is NOT returned
1030			 * VXGE_HAL_OK
1031			 * drop this packet and increment rx_tcode stats
1032			 */
1033			if ((status != VXGE_HAL_OK) &&
1034			    (t_code != VXGE_HAL_RING_T_CODE_L3_PKT_ERR)) {
1035
1036				vxge_free_packet(mbuf_up);
1037				vxge_hal_ring_rxd_post(vpath_handle, rxdh);
1038				continue;
1039			}
1040		}
1041
1042		if (vxge_rx_rxd_1b_set(vpath, rxdh, dtr_priv)) {
1043			/*
1044			 * If unable to allocate buffer, post descriptor back
1045			 * to vpath for future processing of same packet.
1046			 */
1047			vxge_hal_ring_rxd_post(vpath_handle, rxdh);
1048			continue;
1049		}
1050
1051		/* Get the extended information */
1052		vxge_hal_ring_rxd_1b_info_get(vpath_handle, rxdh, &ext_info);
1053
1054		/* post descriptor with newly allocated mbuf back to vpath */
1055		vxge_hal_ring_rxd_post(vpath_handle, rxdh);
1056		vpath->rxd_posted++;
1057
1058		if (vpath->rxd_posted % VXGE_RXD_REPLENISH_COUNT == 0)
1059			vxge_hal_ring_rxd_post_post_db(vpath_handle);
1060
1061		/*
1062		 * Set successfully computed checksums in the mbuf.
1063		 * Leave the rest to the stack to be reverified.
1064		 */
1065		vxge_rx_checksum(ext_info, mbuf_up);
1066
1067#if __FreeBSD_version >= 800000
1068		mbuf_up->m_flags |= M_FLOWID;
1069		mbuf_up->m_pkthdr.flowid = vpath->vp_index;
1070#endif
1071		/* Post-Read sync for buffers */
1072		bus_dmamap_sync(vpath->dma_tag_rx, rxd_priv->dma_map,
1073		    BUS_DMASYNC_POSTREAD);
1074
1075		vxge_rx_input(ifp, mbuf_up, vpath);
1076
1077	} while (vxge_hal_ring_rxd_next_completed(vpath_handle, &rxdh,
1078	    &dtr_priv, &t_code) == VXGE_HAL_OK);
1079
1080	/* Flush any outstanding LRO work */
1081	if (vpath->lro_enable && vpath->lro.lro_cnt) {
1082		while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1083			SLIST_REMOVE_HEAD(&lro->lro_active, next);
1084			tcp_lro_flush(lro, queued);
1085		}
1086	}
1087
1088	return (status);
1089}
1090
1091static inline void
1092vxge_rx_input(ifnet_t ifp, mbuf_t mbuf_up, vxge_vpath_t *vpath)
1093{
1094	if (vpath->lro_enable && vpath->lro.lro_cnt) {
1095		if (tcp_lro_rx(&vpath->lro, mbuf_up, 0) == 0)
1096			return;
1097	}
1098	(*ifp->if_input) (ifp, mbuf_up);
1099}
1100
1101static inline void
1102vxge_rx_checksum(vxge_hal_ring_rxd_info_t ext_info, mbuf_t mbuf_up)
1103{
1104
1105	if (!(ext_info.proto & VXGE_HAL_FRAME_PROTO_IP_FRAG) &&
1106	    (ext_info.proto & VXGE_HAL_FRAME_PROTO_TCP_OR_UDP) &&
1107	    ext_info.l3_cksum_valid && ext_info.l4_cksum_valid) {
1108
1109		mbuf_up->m_pkthdr.csum_data = htons(0xffff);
1110
1111		mbuf_up->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1112		mbuf_up->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1113		mbuf_up->m_pkthdr.csum_flags |=
1114		    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1115
1116	} else {
1117
1118		if (ext_info.vlan) {
1119			mbuf_up->m_pkthdr.ether_vtag = ext_info.vlan;
1120			mbuf_up->m_flags |= M_VLANTAG;
1121		}
1122	}
1123}
1124
1125/*
1126 * vxge_rx_term During unload terminate and free all descriptors
1127 * @vpath_handle Rx vpath Handle @rxdh Rx Descriptor Handle @state Descriptor
1128 * State @userdata Per-adapter Data @reopen vpath open/reopen option
1129 */
1130/* ARGSUSED */
1131void
1132vxge_rx_term(vxge_hal_vpath_h vpath_handle, vxge_hal_rxd_h rxdh,
1133    void *dtr_priv, vxge_hal_rxd_state_e state, void *userdata,
1134    vxge_hal_reopen_e reopen)
1135{
1136	vxge_vpath_t *vpath = (vxge_vpath_t *) userdata;
1137	vxge_rxd_priv_t *rxd_priv = (vxge_rxd_priv_t *) dtr_priv;
1138
1139	if (state != VXGE_HAL_RXD_STATE_POSTED)
1140		return;
1141
1142	if (rxd_priv != NULL) {
1143		bus_dmamap_sync(vpath->dma_tag_rx, rxd_priv->dma_map,
1144		    BUS_DMASYNC_POSTREAD);
1145		bus_dmamap_unload(vpath->dma_tag_rx, rxd_priv->dma_map);
1146		bus_dmamap_destroy(vpath->dma_tag_rx, rxd_priv->dma_map);
1147
1148		vxge_free_packet(rxd_priv->mbuf_pkt);
1149	}
1150	/* Free the descriptor */
1151	vxge_hal_ring_rxd_free(vpath_handle, rxdh);
1152}
1153
1154/*
1155 * vxge_rx_rxd_1b_get
1156 * Get descriptors of packet to send up
1157 */
1158void
1159vxge_rx_rxd_1b_get(vxge_vpath_t *vpath, vxge_hal_rxd_h rxdh, void *dtr_priv)
1160{
1161	vxge_rxd_priv_t *rxd_priv = (vxge_rxd_priv_t *) dtr_priv;
1162	mbuf_t mbuf_up = rxd_priv->mbuf_pkt;
1163
1164	/* Retrieve data from completed descriptor */
1165	vxge_hal_ring_rxd_1b_get(vpath->handle, rxdh, &rxd_priv->dma_addr[0],
1166	    (u32 *) &rxd_priv->dma_sizes[0]);
1167
1168	/* Update newly created buffer to be sent up with packet length */
1169	mbuf_up->m_len = rxd_priv->dma_sizes[0];
1170	mbuf_up->m_pkthdr.len = rxd_priv->dma_sizes[0];
1171	mbuf_up->m_next = NULL;
1172}
1173
1174/*
1175 * vxge_rx_rxd_1b_set
1176 * Allocates new mbufs to be placed into descriptors
1177 */
1178int
1179vxge_rx_rxd_1b_set(vxge_vpath_t *vpath, vxge_hal_rxd_h rxdh, void *dtr_priv)
1180{
1181	int num_segs, err = 0;
1182
1183	mbuf_t mbuf_pkt;
1184	bus_dmamap_t dma_map;
1185	bus_dma_segment_t dma_buffers[1];
1186	vxge_rxd_priv_t *rxd_priv = (vxge_rxd_priv_t *) dtr_priv;
1187
1188	vxge_dev_t *vdev = vpath->vdev;
1189
1190	mbuf_pkt = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, vdev->rx_mbuf_sz);
1191	if (!mbuf_pkt) {
1192		err = ENOBUFS;
1193		VXGE_DRV_STATS(vpath, rx_no_buf);
1194		device_printf(vdev->ndev, "out of memory to allocate mbuf\n");
1195		goto _exit0;
1196	}
1197
1198	/* Update mbuf's length, packet length and receive interface */
1199	mbuf_pkt->m_len = vdev->rx_mbuf_sz;
1200	mbuf_pkt->m_pkthdr.len = vdev->rx_mbuf_sz;
1201	mbuf_pkt->m_pkthdr.rcvif = vdev->ifp;
1202
1203	/* Load DMA map */
1204	err = vxge_dma_mbuf_coalesce(vpath->dma_tag_rx, vpath->extra_dma_map,
1205	    &mbuf_pkt, dma_buffers, &num_segs);
1206	if (err != 0) {
1207		VXGE_DRV_STATS(vpath, rx_map_fail);
1208		vxge_free_packet(mbuf_pkt);
1209		goto _exit0;
1210	}
1211
1212	/* Unload DMA map of mbuf in current descriptor */
1213	bus_dmamap_sync(vpath->dma_tag_rx, rxd_priv->dma_map,
1214	    BUS_DMASYNC_POSTREAD);
1215	bus_dmamap_unload(vpath->dma_tag_rx, rxd_priv->dma_map);
1216
1217	/* Update descriptor private data */
1218	dma_map = rxd_priv->dma_map;
1219	rxd_priv->mbuf_pkt = mbuf_pkt;
1220	rxd_priv->dma_addr[0] = htole64(dma_buffers->ds_addr);
1221	rxd_priv->dma_map = vpath->extra_dma_map;
1222	vpath->extra_dma_map = dma_map;
1223
1224	/* Pre-Read/Write sync */
1225	bus_dmamap_sync(vpath->dma_tag_rx, rxd_priv->dma_map,
1226	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1227
1228	/* Set descriptor buffer */
1229	vxge_hal_ring_rxd_1b_set(rxdh, rxd_priv->dma_addr[0], vdev->rx_mbuf_sz);
1230
1231_exit0:
1232	return (err);
1233}
1234
1235/*
1236 * vxge_link_up
1237 * Callback for Link-up indication from HAL
1238 */
1239/* ARGSUSED */
1240void
1241vxge_link_up(vxge_hal_device_h devh, void *userdata)
1242{
1243	int i;
1244	vxge_vpath_t *vpath;
1245	vxge_hal_device_hw_info_t *hw_info;
1246
1247	vxge_dev_t *vdev = (vxge_dev_t *) userdata;
1248	hw_info = &vdev->config.hw_info;
1249
1250	ifnet_t ifp = vdev->ifp;
1251
1252	if (vdev->config.intr_mode == VXGE_HAL_INTR_MODE_MSIX) {
1253		for (i = 0; i < vdev->no_of_vpath; i++) {
1254			vpath = &(vdev->vpaths[i]);
1255			vxge_hal_vpath_tti_ci_set(vpath->handle);
1256			vxge_hal_vpath_rti_ci_set(vpath->handle);
1257		}
1258	}
1259
1260	if (vdev->is_privilaged && (hw_info->ports > 1)) {
1261		vxge_active_port_update(vdev);
1262		device_printf(vdev->ndev,
1263		    "Active Port : %lld\n", vdev->active_port);
1264	}
1265
1266	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1267	if_link_state_change(ifp, LINK_STATE_UP);
1268}
1269
1270/*
1271 * vxge_link_down
1272 * Callback for Link-down indication from HAL
1273 */
1274/* ARGSUSED */
1275void
1276vxge_link_down(vxge_hal_device_h devh, void *userdata)
1277{
1278	int i;
1279	vxge_vpath_t *vpath;
1280	vxge_dev_t *vdev = (vxge_dev_t *) userdata;
1281
1282	ifnet_t ifp = vdev->ifp;
1283
1284	if (vdev->config.intr_mode == VXGE_HAL_INTR_MODE_MSIX) {
1285		for (i = 0; i < vdev->no_of_vpath; i++) {
1286			vpath = &(vdev->vpaths[i]);
1287			vxge_hal_vpath_tti_ci_reset(vpath->handle);
1288			vxge_hal_vpath_rti_ci_reset(vpath->handle);
1289		}
1290	}
1291
1292	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1293	if_link_state_change(ifp, LINK_STATE_DOWN);
1294}
1295
1296/*
1297 * vxge_reset
1298 */
1299void
1300vxge_reset(vxge_dev_t *vdev)
1301{
1302	if (!vdev->is_initialized)
1303		return;
1304
1305	VXGE_DRV_LOCK(vdev);
1306	vxge_stop_locked(vdev);
1307	vxge_init_locked(vdev);
1308	VXGE_DRV_UNLOCK(vdev);
1309}
1310
1311/*
1312 * vxge_crit_error
1313 * Callback for Critical error indication from HAL
1314 */
1315/* ARGSUSED */
1316void
1317vxge_crit_error(vxge_hal_device_h devh, void *userdata,
1318    vxge_hal_event_e type, u64 serr_data)
1319{
1320	vxge_dev_t *vdev = (vxge_dev_t *) userdata;
1321	ifnet_t ifp = vdev->ifp;
1322
1323	switch (type) {
1324	case VXGE_HAL_EVENT_SERR:
1325	case VXGE_HAL_EVENT_KDFCCTL:
1326	case VXGE_HAL_EVENT_CRITICAL:
1327		vxge_hal_device_intr_disable(vdev->devh);
1328		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1329		if_link_state_change(ifp, LINK_STATE_DOWN);
1330		break;
1331	default:
1332		break;
1333	}
1334}
1335
1336/*
1337 * vxge_ifp_setup
1338 */
1339int
1340vxge_ifp_setup(device_t ndev)
1341{
1342	ifnet_t ifp;
1343	int i, j, err = 0;
1344
1345	vxge_dev_t *vdev = (vxge_dev_t *) device_get_softc(ndev);
1346
1347	for (i = 0, j = 0; i < VXGE_HAL_MAX_VIRTUAL_PATHS; i++) {
1348		if (!bVAL1(vdev->config.hw_info.vpath_mask, i))
1349			continue;
1350
1351		if (j >= vdev->no_of_vpath)
1352			break;
1353
1354		vdev->vpaths[j].vp_id = i;
1355		vdev->vpaths[j].vp_index = j;
1356		vdev->vpaths[j].vdev = vdev;
1357		vdev->vpaths[j].is_configured = TRUE;
1358
1359		vxge_os_memcpy((u8 *) vdev->vpaths[j].mac_addr,
1360		    (u8 *) (vdev->config.hw_info.mac_addrs[i]),
1361		    (size_t) ETHER_ADDR_LEN);
1362		j++;
1363	}
1364
1365	/* Get interface ifnet structure for this Ether device */
1366	ifp = if_alloc(IFT_ETHER);
1367	if (ifp == NULL) {
1368		device_printf(vdev->ndev,
1369		    "memory allocation for ifnet failed\n");
1370		err = ENXIO;
1371		goto _exit0;
1372	}
1373	vdev->ifp = ifp;
1374
1375	/* Initialize interface ifnet structure */
1376	if_initname(ifp, device_get_name(ndev), device_get_unit(ndev));
1377
1378	ifp->if_mtu = ETHERMTU;
1379	ifp->if_baudrate = VXGE_BAUDRATE;
1380	ifp->if_init = vxge_init;
1381	ifp->if_softc = vdev;
1382	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1383	ifp->if_ioctl = vxge_ioctl;
1384	ifp->if_start = vxge_send;
1385
1386#if __FreeBSD_version >= 800000
1387	ifp->if_transmit = vxge_mq_send;
1388	ifp->if_qflush = vxge_mq_qflush;
1389#endif
1390	ifp->if_snd.ifq_drv_maxlen = max(vdev->config.ifq_maxlen, ifqmaxlen);
1391	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1392	/* IFQ_SET_READY(&ifp->if_snd); */
1393
1394	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1395
1396	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
1397	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1398	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1399
1400	if (vdev->config.tso_enable)
1401		vxge_tso_config(vdev);
1402
1403	if (vdev->config.lro_enable)
1404		ifp->if_capabilities |= IFCAP_LRO;
1405
1406	ifp->if_capenable = ifp->if_capabilities;
1407
1408	strlcpy(vdev->ndev_name, device_get_nameunit(ndev),
1409	    sizeof(vdev->ndev_name));
1410
1411	/* Attach the interface */
1412	ether_ifattach(ifp, vdev->vpaths[0].mac_addr);
1413
1414_exit0:
1415	return (err);
1416}
1417
1418/*
1419 * vxge_isr_setup
1420 * Register isr functions
1421 */
1422int
1423vxge_isr_setup(vxge_dev_t *vdev)
1424{
1425	int i, irq_rid, err = 0;
1426	vxge_vpath_t *vpath;
1427
1428	void *isr_func_arg;
1429	void (*isr_func_ptr) (void *);
1430
1431	switch (vdev->config.intr_mode) {
1432	case VXGE_HAL_INTR_MODE_IRQLINE:
1433		err = bus_setup_intr(vdev->ndev,
1434		    vdev->config.isr_info[0].irq_res,
1435		    (INTR_TYPE_NET | INTR_MPSAFE),
1436		    vxge_isr_filter, vxge_isr_line, vdev,
1437		    &vdev->config.isr_info[0].irq_handle);
1438		break;
1439
1440	case VXGE_HAL_INTR_MODE_MSIX:
1441		for (i = 0; i < vdev->intr_count; i++) {
1442
1443			irq_rid = vdev->config.isr_info[i].irq_rid;
1444			vpath = &vdev->vpaths[irq_rid / 4];
1445
1446			if ((irq_rid % 4) == 2) {
1447				isr_func_ptr = vxge_isr_msix;
1448				isr_func_arg = (void *) vpath;
1449			} else if ((irq_rid % 4) == 3) {
1450				isr_func_ptr = vxge_isr_msix_alarm;
1451				isr_func_arg = (void *) vpath;
1452			} else
1453				break;
1454
1455			err = bus_setup_intr(vdev->ndev,
1456			    vdev->config.isr_info[i].irq_res,
1457			    (INTR_TYPE_NET | INTR_MPSAFE), NULL,
1458			    (void *) isr_func_ptr, (void *) isr_func_arg,
1459			    &vdev->config.isr_info[i].irq_handle);
1460			if (err != 0)
1461				break;
1462		}
1463
1464		if (err != 0) {
1465			/* Teardown interrupt handler */
1466			while (--i > 0)
1467				bus_teardown_intr(vdev->ndev,
1468				    vdev->config.isr_info[i].irq_res,
1469				    vdev->config.isr_info[i].irq_handle);
1470		}
1471		break;
1472	}
1473
1474	return (err);
1475}
1476
1477/*
1478 * vxge_isr_filter
1479 * ISR filter function - filter interrupts from other shared devices
1480 */
1481int
1482vxge_isr_filter(void *handle)
1483{
1484	u64 val64 = 0;
1485	vxge_dev_t *vdev = (vxge_dev_t *) handle;
1486	__hal_device_t *hldev = (__hal_device_t *) vdev->devh;
1487
1488	vxge_hal_common_reg_t *common_reg =
1489	(vxge_hal_common_reg_t *) (hldev->common_reg);
1490
1491	val64 = vxge_os_pio_mem_read64(vdev->pdev, (vdev->devh)->regh0,
1492	    &common_reg->titan_general_int_status);
1493
1494	return ((val64) ? FILTER_SCHEDULE_THREAD : FILTER_STRAY);
1495}
1496
1497/*
1498 * vxge_isr_line
1499 * Interrupt service routine for Line interrupts
1500 */
1501void
1502vxge_isr_line(void *vdev_ptr)
1503{
1504	vxge_dev_t *vdev = (vxge_dev_t *) vdev_ptr;
1505
1506	vxge_hal_device_handle_irq(vdev->devh, 0);
1507}
1508
1509void
1510vxge_isr_msix(void *vpath_ptr)
1511{
1512	u32 got_rx = 0;
1513	u32 got_tx = 0;
1514
1515	__hal_virtualpath_t *hal_vpath;
1516	vxge_vpath_t *vpath = (vxge_vpath_t *) vpath_ptr;
1517	vxge_dev_t *vdev = vpath->vdev;
1518	hal_vpath = ((__hal_vpath_handle_t *) vpath->handle)->vpath;
1519
1520	VXGE_DRV_STATS(vpath, isr_msix);
1521	VXGE_HAL_DEVICE_STATS_SW_INFO_TRAFFIC_INTR(vdev->devh);
1522
1523	vxge_hal_vpath_mf_msix_mask(vpath->handle, vpath->msix_vec);
1524
1525	/* processing rx */
1526	vxge_hal_vpath_poll_rx(vpath->handle, &got_rx);
1527
1528	/* processing tx */
1529	if (hal_vpath->vp_config->fifo.enable) {
1530		vxge_intr_coalesce_tx(vpath);
1531		vxge_hal_vpath_poll_tx(vpath->handle, &got_tx);
1532	}
1533
1534	vxge_hal_vpath_mf_msix_unmask(vpath->handle, vpath->msix_vec);
1535}
1536
1537void
1538vxge_isr_msix_alarm(void *vpath_ptr)
1539{
1540	int i;
1541	vxge_hal_status_e status = VXGE_HAL_OK;
1542
1543	vxge_vpath_t *vpath = (vxge_vpath_t *) vpath_ptr;
1544	vxge_dev_t *vdev = vpath->vdev;
1545
1546	VXGE_HAL_DEVICE_STATS_SW_INFO_NOT_TRAFFIC_INTR(vdev->devh);
1547
1548	/* Process alarms in each vpath */
1549	for (i = 0; i < vdev->no_of_vpath; i++) {
1550
1551		vpath = &(vdev->vpaths[i]);
1552		vxge_hal_vpath_mf_msix_mask(vpath->handle,
1553		    vpath->msix_vec_alarm);
1554		status = vxge_hal_vpath_alarm_process(vpath->handle, 0);
1555		if ((status == VXGE_HAL_ERR_EVENT_SLOT_FREEZE) ||
1556		    (status == VXGE_HAL_ERR_EVENT_SERR)) {
1557			device_printf(vdev->ndev,
1558			    "processing alarms urecoverable error %x\n",
1559			    status);
1560
1561			/* Stop the driver */
1562			vdev->is_initialized = FALSE;
1563			break;
1564		}
1565		vxge_hal_vpath_mf_msix_unmask(vpath->handle,
1566		    vpath->msix_vec_alarm);
1567	}
1568}
1569
1570/*
1571 * vxge_msix_enable
1572 */
1573vxge_hal_status_e
1574vxge_msix_enable(vxge_dev_t *vdev)
1575{
1576	int i, first_vp_id, msix_id;
1577
1578	vxge_vpath_t *vpath;
1579	vxge_hal_status_e status = VXGE_HAL_OK;
1580
1581	/*
1582	 * Unmasking and Setting MSIX vectors before enabling interrupts
1583	 * tim[] : 0 - Tx ## 1 - Rx ## 2 - UMQ-DMQ ## 0 - BITMAP
1584	 */
1585	int tim[4] = {0, 1, 0, 0};
1586
1587	for (i = 0; i < vdev->no_of_vpath; i++) {
1588
1589		vpath = vdev->vpaths + i;
1590		first_vp_id = vdev->vpaths[0].vp_id;
1591
1592		msix_id = vpath->vp_id * VXGE_HAL_VPATH_MSIX_ACTIVE;
1593		tim[1] = vpath->msix_vec = msix_id + 1;
1594
1595		vpath->msix_vec_alarm = first_vp_id *
1596		    VXGE_HAL_VPATH_MSIX_ACTIVE + VXGE_HAL_VPATH_MSIX_ALARM_ID;
1597
1598		status = vxge_hal_vpath_mf_msix_set(vpath->handle,
1599		    tim, VXGE_HAL_VPATH_MSIX_ALARM_ID);
1600
1601		if (status != VXGE_HAL_OK) {
1602			device_printf(vdev->ndev,
1603			    "failed to set msix vectors to vpath\n");
1604			break;
1605		}
1606
1607		vxge_hal_vpath_mf_msix_unmask(vpath->handle, vpath->msix_vec);
1608		vxge_hal_vpath_mf_msix_unmask(vpath->handle,
1609		    vpath->msix_vec_alarm);
1610	}
1611
1612	return (status);
1613}
1614
1615/*
1616 * vxge_media_init
1617 * Initializes, adds and sets media
1618 */
1619void
1620vxge_media_init(vxge_dev_t *vdev)
1621{
1622	ifmedia_init(&vdev->media,
1623	    IFM_IMASK, vxge_media_change, vxge_media_status);
1624
1625	/* Add supported media */
1626	ifmedia_add(&vdev->media,
1627	    IFM_ETHER | vdev->ifm_optics | IFM_FDX,
1628	    0, NULL);
1629
1630	/* Set media */
1631	ifmedia_add(&vdev->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1632	ifmedia_set(&vdev->media, IFM_ETHER | IFM_AUTO);
1633}
1634
1635/*
1636 * vxge_media_status
1637 * Callback  for interface media settings
1638 */
1639void
1640vxge_media_status(ifnet_t ifp, struct ifmediareq *ifmr)
1641{
1642	vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc;
1643	vxge_hal_device_t *hldev = vdev->devh;
1644
1645	ifmr->ifm_status = IFM_AVALID;
1646	ifmr->ifm_active = IFM_ETHER;
1647
1648	/* set link state */
1649	if (vxge_hal_device_link_state_get(hldev) == VXGE_HAL_LINK_UP) {
1650		ifmr->ifm_status |= IFM_ACTIVE;
1651		ifmr->ifm_active |= vdev->ifm_optics | IFM_FDX;
1652		if_link_state_change(ifp, LINK_STATE_UP);
1653	}
1654}
1655
1656/*
1657 * vxge_media_change
1658 * Media change driver callback
1659 */
1660int
1661vxge_media_change(ifnet_t ifp)
1662{
1663	vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc;
1664	struct ifmedia *ifmediap = &vdev->media;
1665
1666	return (IFM_TYPE(ifmediap->ifm_media) != IFM_ETHER ? EINVAL : 0);
1667}
1668
1669/*
1670 * Allocate PCI resources
1671 */
1672int
1673vxge_alloc_resources(vxge_dev_t *vdev)
1674{
1675	int err = 0;
1676	vxge_pci_info_t *pci_info = NULL;
1677	vxge_free_resources_e error_level = VXGE_FREE_NONE;
1678
1679	device_t ndev = vdev->ndev;
1680
1681	/* Allocate Buffer for HAL Device Configuration */
1682	vdev->device_config = (vxge_hal_device_config_t *)
1683	    vxge_mem_alloc(sizeof(vxge_hal_device_config_t));
1684
1685	if (!vdev->device_config) {
1686		err = ENOMEM;
1687		error_level = VXGE_DISABLE_PCI_BUSMASTER;
1688		device_printf(vdev->ndev,
1689		    "failed to allocate memory for device config\n");
1690		goto _exit0;
1691	}
1692
1693
1694	pci_info = (vxge_pci_info_t *) vxge_mem_alloc(sizeof(vxge_pci_info_t));
1695	if (!pci_info) {
1696		error_level = VXGE_FREE_DEVICE_CONFIG;
1697		err = ENOMEM;
1698		device_printf(vdev->ndev,
1699		    "failed to allocate memory for pci info\n");
1700		goto _exit0;
1701	}
1702	pci_info->ndev = ndev;
1703	vdev->pdev = pci_info;
1704
1705	err = vxge_alloc_bar_resources(vdev, 0);
1706	if (err != 0) {
1707		error_level = VXGE_FREE_BAR0;
1708		goto _exit0;
1709	}
1710
1711	err = vxge_alloc_bar_resources(vdev, 1);
1712	if (err != 0) {
1713		error_level = VXGE_FREE_BAR1;
1714		goto _exit0;
1715	}
1716
1717	err = vxge_alloc_bar_resources(vdev, 2);
1718	if (err != 0)
1719		error_level = VXGE_FREE_BAR2;
1720
1721_exit0:
1722	if (error_level)
1723		vxge_free_resources(ndev, error_level);
1724
1725	return (err);
1726}
1727
1728/*
1729 * vxge_alloc_bar_resources
1730 * Allocates BAR resources
1731 */
1732int
1733vxge_alloc_bar_resources(vxge_dev_t *vdev, int i)
1734{
1735	int err = 0;
1736	int res_id = 0;
1737	vxge_pci_info_t *pci_info = vdev->pdev;
1738
1739	res_id = PCIR_BAR((i == 0) ? 0 : (i * 2));
1740
1741	pci_info->bar_info[i] =
1742	    bus_alloc_resource_any(vdev->ndev,
1743	    SYS_RES_MEMORY, &res_id, RF_ACTIVE);
1744
1745	if (pci_info->bar_info[i] == NULL) {
1746		device_printf(vdev->ndev,
1747		    "failed to allocate memory for bus resources\n");
1748		err = ENOMEM;
1749		goto _exit0;
1750	}
1751
1752	pci_info->reg_map[i] =
1753	    (vxge_bus_res_t *) vxge_mem_alloc(sizeof(vxge_bus_res_t));
1754
1755	if (pci_info->reg_map[i] == NULL) {
1756		device_printf(vdev->ndev,
1757		    "failed to allocate memory bar resources\n");
1758		err = ENOMEM;
1759		goto _exit0;
1760	}
1761
1762	((vxge_bus_res_t *) (pci_info->reg_map[i]))->bus_space_tag =
1763	    rman_get_bustag(pci_info->bar_info[i]);
1764
1765	((vxge_bus_res_t *) (pci_info->reg_map[i]))->bus_space_handle =
1766	    rman_get_bushandle(pci_info->bar_info[i]);
1767
1768	((vxge_bus_res_t *) (pci_info->reg_map[i]))->bar_start_addr =
1769	    pci_info->bar_info[i];
1770
1771	((vxge_bus_res_t *) (pci_info->reg_map[i]))->bus_res_len =
1772	    rman_get_size(pci_info->bar_info[i]);
1773
1774_exit0:
1775	return (err);
1776}
1777
1778/*
1779 * vxge_alloc_isr_resources
1780 */
1781int
1782vxge_alloc_isr_resources(vxge_dev_t *vdev)
1783{
1784	int i, err = 0, irq_rid;
1785	int msix_vec_reqd, intr_count, msix_count;
1786
1787	int intr_mode = VXGE_HAL_INTR_MODE_IRQLINE;
1788
1789	if (vdev->config.intr_mode == VXGE_HAL_INTR_MODE_MSIX) {
1790		/* MSI-X messages supported by device */
1791		intr_count = pci_msix_count(vdev->ndev);
1792		if (intr_count) {
1793
1794			msix_vec_reqd = 4 * vdev->no_of_vpath;
1795			if (intr_count >= msix_vec_reqd) {
1796				intr_count = msix_vec_reqd;
1797
1798				err = pci_alloc_msix(vdev->ndev, &intr_count);
1799				if (err == 0)
1800					intr_mode = VXGE_HAL_INTR_MODE_MSIX;
1801			}
1802
1803			if ((err != 0) || (intr_count < msix_vec_reqd)) {
1804				device_printf(vdev->ndev, "Unable to allocate "
1805				    "msi/x vectors switching to INTA mode\n");
1806			}
1807		}
1808	}
1809
1810	err = 0;
1811	vdev->intr_count = 0;
1812	vdev->config.intr_mode = intr_mode;
1813
1814	switch (vdev->config.intr_mode) {
1815	case VXGE_HAL_INTR_MODE_IRQLINE:
1816		vdev->config.isr_info[0].irq_rid = 0;
1817		vdev->config.isr_info[0].irq_res =
1818		    bus_alloc_resource_any(vdev->ndev, SYS_RES_IRQ,
1819		    &vdev->config.isr_info[0].irq_rid,
1820		    (RF_SHAREABLE | RF_ACTIVE));
1821
1822		if (vdev->config.isr_info[0].irq_res == NULL) {
1823			device_printf(vdev->ndev,
1824			    "failed to allocate line interrupt resource\n");
1825			err = ENOMEM;
1826			goto _exit0;
1827		}
1828		vdev->intr_count++;
1829		break;
1830
1831	case VXGE_HAL_INTR_MODE_MSIX:
1832		msix_count = 0;
1833		for (i = 0; i < vdev->no_of_vpath; i++) {
1834			irq_rid = i * 4;
1835
1836			vdev->config.isr_info[msix_count].irq_rid = irq_rid + 2;
1837			vdev->config.isr_info[msix_count].irq_res =
1838			    bus_alloc_resource_any(vdev->ndev, SYS_RES_IRQ,
1839			    &vdev->config.isr_info[msix_count].irq_rid,
1840			    (RF_SHAREABLE | RF_ACTIVE));
1841
1842			if (vdev->config.isr_info[msix_count].irq_res == NULL) {
1843				device_printf(vdev->ndev,
1844				    "allocating bus resource (rid %d) failed\n",
1845				    vdev->config.isr_info[msix_count].irq_rid);
1846				err = ENOMEM;
1847				goto _exit0;
1848			}
1849
1850			vdev->intr_count++;
1851			err = bus_bind_intr(vdev->ndev,
1852			    vdev->config.isr_info[msix_count].irq_res,
1853			    (i % mp_ncpus));
1854			if (err != 0)
1855				break;
1856
1857			msix_count++;
1858		}
1859
1860		vdev->config.isr_info[msix_count].irq_rid = 3;
1861		vdev->config.isr_info[msix_count].irq_res =
1862		    bus_alloc_resource_any(vdev->ndev, SYS_RES_IRQ,
1863		    &vdev->config.isr_info[msix_count].irq_rid,
1864		    (RF_SHAREABLE | RF_ACTIVE));
1865
1866		if (vdev->config.isr_info[msix_count].irq_res == NULL) {
1867			device_printf(vdev->ndev,
1868			    "allocating bus resource (rid %d) failed\n",
1869			    vdev->config.isr_info[msix_count].irq_rid);
1870			err = ENOMEM;
1871			goto _exit0;
1872		}
1873
1874		vdev->intr_count++;
1875		err = bus_bind_intr(vdev->ndev,
1876		    vdev->config.isr_info[msix_count].irq_res, (i % mp_ncpus));
1877
1878		break;
1879	}
1880
1881	vdev->device_config->intr_mode = vdev->config.intr_mode;
1882
1883_exit0:
1884	return (err);
1885}
1886
1887/*
1888 * vxge_free_resources
1889 * Undo what-all we did during load/attach
1890 */
1891void
1892vxge_free_resources(device_t ndev, vxge_free_resources_e vxge_free_resource)
1893{
1894	int i;
1895	vxge_dev_t *vdev;
1896
1897	vdev = (vxge_dev_t *) device_get_softc(ndev);
1898
1899	switch (vxge_free_resource) {
1900	case VXGE_FREE_ALL:
1901		for (i = 0; i < vdev->intr_count; i++) {
1902			bus_teardown_intr(ndev,
1903			    vdev->config.isr_info[i].irq_res,
1904			    vdev->config.isr_info[i].irq_handle);
1905		}
1906		/* FALLTHROUGH */
1907
1908	case VXGE_FREE_INTERFACE:
1909		ether_ifdetach(vdev->ifp);
1910		bus_generic_detach(ndev);
1911		if_free(vdev->ifp);
1912		/* FALLTHROUGH */
1913
1914	case VXGE_FREE_MEDIA:
1915		ifmedia_removeall(&vdev->media);
1916		/* FALLTHROUGH */
1917
1918	case VXGE_FREE_MUTEX:
1919		vxge_mutex_destroy(vdev);
1920		/* FALLTHROUGH */
1921
1922	case VXGE_FREE_VPATH:
1923		vxge_mem_free(vdev->vpaths,
1924		    vdev->no_of_vpath * sizeof(vxge_vpath_t));
1925		/* FALLTHROUGH */
1926
1927	case VXGE_FREE_TERMINATE_DEVICE:
1928		if (vdev->devh != NULL) {
1929			vxge_hal_device_private_set(vdev->devh, 0);
1930			vxge_hal_device_terminate(vdev->devh);
1931		}
1932		/* FALLTHROUGH */
1933
1934	case VXGE_FREE_ISR_RESOURCE:
1935		vxge_free_isr_resources(vdev);
1936		/* FALLTHROUGH */
1937
1938	case VXGE_FREE_BAR2:
1939		vxge_free_bar_resources(vdev, 2);
1940		/* FALLTHROUGH */
1941
1942	case VXGE_FREE_BAR1:
1943		vxge_free_bar_resources(vdev, 1);
1944		/* FALLTHROUGH */
1945
1946	case VXGE_FREE_BAR0:
1947		vxge_free_bar_resources(vdev, 0);
1948		/* FALLTHROUGH */
1949
1950	case VXGE_FREE_PCI_INFO:
1951		vxge_mem_free(vdev->pdev, sizeof(vxge_pci_info_t));
1952		/* FALLTHROUGH */
1953
1954	case VXGE_FREE_DEVICE_CONFIG:
1955		vxge_mem_free(vdev->device_config,
1956		    sizeof(vxge_hal_device_config_t));
1957		/* FALLTHROUGH */
1958
1959	case VXGE_DISABLE_PCI_BUSMASTER:
1960		pci_disable_busmaster(ndev);
1961		/* FALLTHROUGH */
1962
1963	case VXGE_FREE_TERMINATE_DRIVER:
1964		if (vxge_dev_ref_count) {
1965			--vxge_dev_ref_count;
1966			if (0 == vxge_dev_ref_count)
1967				vxge_hal_driver_terminate();
1968		}
1969		/* FALLTHROUGH */
1970
1971	default:
1972	case VXGE_FREE_NONE:
1973		break;
1974		/* NOTREACHED */
1975	}
1976}
1977
1978void
1979vxge_free_isr_resources(vxge_dev_t *vdev)
1980{
1981	int i;
1982
1983	switch (vdev->config.intr_mode) {
1984	case VXGE_HAL_INTR_MODE_IRQLINE:
1985		if (vdev->config.isr_info[0].irq_res) {
1986			bus_release_resource(vdev->ndev, SYS_RES_IRQ,
1987			    vdev->config.isr_info[0].irq_rid,
1988			    vdev->config.isr_info[0].irq_res);
1989
1990			vdev->config.isr_info[0].irq_res = NULL;
1991		}
1992		break;
1993
1994	case VXGE_HAL_INTR_MODE_MSIX:
1995		for (i = 0; i < vdev->intr_count; i++) {
1996			if (vdev->config.isr_info[i].irq_res) {
1997				bus_release_resource(vdev->ndev, SYS_RES_IRQ,
1998				    vdev->config.isr_info[i].irq_rid,
1999				    vdev->config.isr_info[i].irq_res);
2000
2001				vdev->config.isr_info[i].irq_res = NULL;
2002			}
2003		}
2004
2005		if (vdev->intr_count)
2006			pci_release_msi(vdev->ndev);
2007
2008		break;
2009	}
2010}
2011
2012void
2013vxge_free_bar_resources(vxge_dev_t *vdev, int i)
2014{
2015	int res_id = 0;
2016	vxge_pci_info_t *pci_info = vdev->pdev;
2017
2018	res_id = PCIR_BAR((i == 0) ? 0 : (i * 2));
2019
2020	if (pci_info->bar_info[i])
2021		bus_release_resource(vdev->ndev, SYS_RES_MEMORY,
2022		    res_id, pci_info->bar_info[i]);
2023
2024	vxge_mem_free(pci_info->reg_map[i], sizeof(vxge_bus_res_t));
2025}
2026
2027/*
2028 * vxge_init_mutex
2029 * Initializes mutexes used in driver
2030 */
2031void
2032vxge_mutex_init(vxge_dev_t *vdev)
2033{
2034	int i;
2035
2036	snprintf(vdev->mtx_drv_name, sizeof(vdev->mtx_drv_name),
2037	    "%s_drv", vdev->ndev_name);
2038
2039	mtx_init(&vdev->mtx_drv, vdev->mtx_drv_name,
2040	    MTX_NETWORK_LOCK, MTX_DEF);
2041
2042	for (i = 0; i < vdev->no_of_vpath; i++) {
2043		snprintf(vdev->vpaths[i].mtx_tx_name,
2044		    sizeof(vdev->vpaths[i].mtx_tx_name), "%s_tx_%d",
2045		    vdev->ndev_name, i);
2046
2047		mtx_init(&vdev->vpaths[i].mtx_tx,
2048		    vdev->vpaths[i].mtx_tx_name, NULL, MTX_DEF);
2049	}
2050}
2051
2052/*
2053 * vxge_mutex_destroy
2054 * Destroys mutexes used in driver
2055 */
2056void
2057vxge_mutex_destroy(vxge_dev_t *vdev)
2058{
2059	int i;
2060
2061	for (i = 0; i < vdev->no_of_vpath; i++)
2062		VXGE_TX_LOCK_DESTROY(&(vdev->vpaths[i]));
2063
2064	VXGE_DRV_LOCK_DESTROY(vdev);
2065}
2066
2067/*
2068 * vxge_rth_config
2069 */
2070vxge_hal_status_e
2071vxge_rth_config(vxge_dev_t *vdev)
2072{
2073	int i;
2074	vxge_hal_vpath_h vpath_handle;
2075	vxge_hal_rth_hash_types_t hash_types;
2076	vxge_hal_status_e status = VXGE_HAL_OK;
2077	u8 mtable[256] = {0};
2078
2079	/* Filling matable with bucket-to-vpath mapping */
2080	vdev->config.rth_bkt_sz = VXGE_DEFAULT_RTH_BUCKET_SIZE;
2081
2082	for (i = 0; i < (1 << vdev->config.rth_bkt_sz); i++)
2083		mtable[i] = i % vdev->no_of_vpath;
2084
2085	/* Fill RTH hash types */
2086	hash_types.hash_type_tcpipv4_en = VXGE_HAL_RING_HASH_TYPE_TCP_IPV4;
2087	hash_types.hash_type_tcpipv6_en = VXGE_HAL_RING_HASH_TYPE_TCP_IPV6;
2088	hash_types.hash_type_tcpipv6ex_en = VXGE_HAL_RING_HASH_TYPE_TCP_IPV6_EX;
2089	hash_types.hash_type_ipv4_en = VXGE_HAL_RING_HASH_TYPE_IPV4;
2090	hash_types.hash_type_ipv6_en = VXGE_HAL_RING_HASH_TYPE_IPV6;
2091	hash_types.hash_type_ipv6ex_en = VXGE_HAL_RING_HASH_TYPE_IPV6_EX;
2092
2093	/* set indirection table, bucket-to-vpath mapping */
2094	status = vxge_hal_vpath_rts_rth_itable_set(vdev->vpath_handles,
2095	    vdev->no_of_vpath, mtable,
2096	    ((u32) (1 << vdev->config.rth_bkt_sz)));
2097
2098	if (status != VXGE_HAL_OK) {
2099		device_printf(vdev->ndev, "rth configuration failed\n");
2100		goto _exit0;
2101	}
2102	for (i = 0; i < vdev->no_of_vpath; i++) {
2103		vpath_handle = vxge_vpath_handle_get(vdev, i);
2104		if (!vpath_handle)
2105			continue;
2106
2107		status = vxge_hal_vpath_rts_rth_set(vpath_handle,
2108		    RTH_ALG_JENKINS,
2109		    &hash_types, vdev->config.rth_bkt_sz, TRUE);
2110		if (status != VXGE_HAL_OK) {
2111			device_printf(vdev->ndev,
2112			    "rth configuration failed for vpath (%d)\n",
2113			    vdev->vpaths[i].vp_id);
2114			break;
2115		}
2116	}
2117
2118_exit0:
2119	return (status);
2120}
2121
2122/*
2123 * vxge_vpath_config
2124 * Sets HAL parameter values from kenv
2125 */
2126void
2127vxge_vpath_config(vxge_dev_t *vdev)
2128{
2129	int i;
2130	u32 no_of_vpath = 0;
2131	vxge_hal_vp_config_t *vp_config;
2132	vxge_hal_device_config_t *device_config = vdev->device_config;
2133
2134	device_config->debug_level = VXGE_TRACE;
2135	device_config->debug_mask = VXGE_COMPONENT_ALL;
2136	device_config->device_poll_millis = VXGE_DEFAULT_DEVICE_POLL_MILLIS;
2137
2138	vdev->config.no_of_vpath =
2139	    min(vdev->config.no_of_vpath, vdev->max_supported_vpath);
2140
2141	for (i = 0; i < VXGE_HAL_MAX_VIRTUAL_PATHS; i++) {
2142		vp_config = &(device_config->vp_config[i]);
2143		vp_config->fifo.enable = VXGE_HAL_FIFO_DISABLE;
2144		vp_config->ring.enable = VXGE_HAL_RING_DISABLE;
2145	}
2146
2147	for (i = 0; i < VXGE_HAL_MAX_VIRTUAL_PATHS; i++) {
2148		if (no_of_vpath >= vdev->config.no_of_vpath)
2149			break;
2150
2151		if (!bVAL1(vdev->config.hw_info.vpath_mask, i))
2152			continue;
2153
2154		no_of_vpath++;
2155		vp_config = &(device_config->vp_config[i]);
2156		vp_config->mtu = VXGE_HAL_DEFAULT_MTU;
2157		vp_config->ring.enable = VXGE_HAL_RING_ENABLE;
2158		vp_config->ring.post_mode = VXGE_HAL_RING_POST_MODE_DOORBELL;
2159		vp_config->ring.buffer_mode = VXGE_HAL_RING_RXD_BUFFER_MODE_1;
2160		vp_config->ring.ring_length =
2161		    vxge_ring_length_get(VXGE_HAL_RING_RXD_BUFFER_MODE_1);
2162		vp_config->ring.scatter_mode = VXGE_HAL_RING_SCATTER_MODE_A;
2163		vp_config->rpa_all_vid_en = VXGE_DEFAULT_ALL_VID_ENABLE;
2164		vp_config->rpa_strip_vlan_tag = VXGE_DEFAULT_STRIP_VLAN_TAG;
2165		vp_config->rpa_ucast_all_addr_en =
2166		    VXGE_HAL_VPATH_RPA_UCAST_ALL_ADDR_DISABLE;
2167
2168		vp_config->rti.intr_enable = VXGE_HAL_TIM_INTR_ENABLE;
2169		vp_config->rti.txfrm_cnt_en = VXGE_HAL_TXFRM_CNT_EN_ENABLE;
2170		vp_config->rti.util_sel =
2171		    VXGE_HAL_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL;
2172
2173		vp_config->rti.uec_a = VXGE_DEFAULT_RTI_RX_UFC_A;
2174		vp_config->rti.uec_b = VXGE_DEFAULT_RTI_RX_UFC_B;
2175		vp_config->rti.uec_c = VXGE_DEFAULT_RTI_RX_UFC_C;
2176		vp_config->rti.uec_d = VXGE_DEFAULT_RTI_RX_UFC_D;
2177
2178		vp_config->rti.urange_a = VXGE_DEFAULT_RTI_RX_URANGE_A;
2179		vp_config->rti.urange_b = VXGE_DEFAULT_RTI_RX_URANGE_B;
2180		vp_config->rti.urange_c = VXGE_DEFAULT_RTI_RX_URANGE_C;
2181
2182		vp_config->rti.timer_ac_en = VXGE_HAL_TIM_TIMER_AC_ENABLE;
2183		vp_config->rti.timer_ci_en = VXGE_HAL_TIM_TIMER_CI_ENABLE;
2184
2185		vp_config->rti.btimer_val =
2186		    (VXGE_DEFAULT_RTI_BTIMER_VAL * 1000) / 272;
2187		vp_config->rti.rtimer_val =
2188		    (VXGE_DEFAULT_RTI_RTIMER_VAL * 1000) / 272;
2189		vp_config->rti.ltimer_val =
2190		    (VXGE_DEFAULT_RTI_LTIMER_VAL * 1000) / 272;
2191
2192		if ((no_of_vpath > 1) && (VXGE_DEFAULT_CONFIG_MQ_ENABLE == 0))
2193			continue;
2194
2195		vp_config->fifo.enable = VXGE_HAL_FIFO_ENABLE;
2196		vp_config->fifo.max_aligned_frags =
2197		    VXGE_DEFAULT_FIFO_ALIGNED_FRAGS;
2198
2199		vp_config->tti.intr_enable = VXGE_HAL_TIM_INTR_ENABLE;
2200		vp_config->tti.txfrm_cnt_en = VXGE_HAL_TXFRM_CNT_EN_ENABLE;
2201		vp_config->tti.util_sel =
2202		    VXGE_HAL_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL;
2203
2204		vp_config->tti.uec_a = VXGE_DEFAULT_TTI_TX_UFC_A;
2205		vp_config->tti.uec_b = VXGE_DEFAULT_TTI_TX_UFC_B;
2206		vp_config->tti.uec_c = VXGE_DEFAULT_TTI_TX_UFC_C;
2207		vp_config->tti.uec_d = VXGE_DEFAULT_TTI_TX_UFC_D;
2208
2209		vp_config->tti.urange_a = VXGE_DEFAULT_TTI_TX_URANGE_A;
2210		vp_config->tti.urange_b = VXGE_DEFAULT_TTI_TX_URANGE_B;
2211		vp_config->tti.urange_c = VXGE_DEFAULT_TTI_TX_URANGE_C;
2212
2213		vp_config->tti.timer_ac_en = VXGE_HAL_TIM_TIMER_AC_ENABLE;
2214		vp_config->tti.timer_ci_en = VXGE_HAL_TIM_TIMER_CI_ENABLE;
2215
2216		vp_config->tti.btimer_val =
2217		    (VXGE_DEFAULT_TTI_BTIMER_VAL * 1000) / 272;
2218		vp_config->tti.rtimer_val =
2219		    (VXGE_DEFAULT_TTI_RTIMER_VAL * 1000) / 272;
2220		vp_config->tti.ltimer_val =
2221		    (VXGE_DEFAULT_TTI_LTIMER_VAL * 1000) / 272;
2222	}
2223
2224	vdev->no_of_vpath = no_of_vpath;
2225
2226	if (vdev->no_of_vpath == 1)
2227		vdev->config.tx_steering = 0;
2228
2229	if (vdev->config.rth_enable && (vdev->no_of_vpath > 1)) {
2230		device_config->rth_en = VXGE_HAL_RTH_ENABLE;
2231		device_config->rth_it_type = VXGE_HAL_RTH_IT_TYPE_MULTI_IT;
2232	}
2233
2234	vdev->config.rth_enable = device_config->rth_en;
2235}
2236
2237/*
2238 * vxge_vpath_cb_fn
2239 * Virtual path Callback function
2240 */
2241/* ARGSUSED */
2242static vxge_hal_status_e
2243vxge_vpath_cb_fn(vxge_hal_client_h client_handle, vxge_hal_up_msg_h msgh,
2244    vxge_hal_message_type_e msg_type, vxge_hal_obj_id_t obj_id,
2245    vxge_hal_result_e result, vxge_hal_opaque_handle_t *opaque_handle)
2246{
2247	return (VXGE_HAL_OK);
2248}
2249
2250/*
2251 * vxge_vpath_open
2252 */
2253int
2254vxge_vpath_open(vxge_dev_t *vdev)
2255{
2256	int i, err = EINVAL;
2257	u64 func_id;
2258
2259	vxge_vpath_t *vpath;
2260	vxge_hal_vpath_attr_t vpath_attr;
2261	vxge_hal_status_e status = VXGE_HAL_OK;
2262	struct lro_ctrl *lro = NULL;
2263
2264	bzero(&vpath_attr, sizeof(vxge_hal_vpath_attr_t));
2265
2266	for (i = 0; i < vdev->no_of_vpath; i++) {
2267
2268		vpath = &(vdev->vpaths[i]);
2269		lro = &vpath->lro;
2270
2271		/* Vpath vpath_attr: FIFO */
2272		vpath_attr.vp_id = vpath->vp_id;
2273		vpath_attr.fifo_attr.callback = vxge_tx_compl;
2274		vpath_attr.fifo_attr.txdl_init = vxge_tx_replenish;
2275		vpath_attr.fifo_attr.txdl_term = vxge_tx_term;
2276		vpath_attr.fifo_attr.userdata = vpath;
2277		vpath_attr.fifo_attr.per_txdl_space = sizeof(vxge_txdl_priv_t);
2278
2279		/* Vpath vpath_attr: Ring */
2280		vpath_attr.ring_attr.callback = vxge_rx_compl;
2281		vpath_attr.ring_attr.rxd_init = vxge_rx_replenish;
2282		vpath_attr.ring_attr.rxd_term = vxge_rx_term;
2283		vpath_attr.ring_attr.userdata = vpath;
2284		vpath_attr.ring_attr.per_rxd_space = sizeof(vxge_rxd_priv_t);
2285
2286		err = vxge_dma_tags_create(vpath);
2287		if (err != 0) {
2288			device_printf(vdev->ndev,
2289			    "failed to create dma tags\n");
2290			break;
2291		}
2292#if __FreeBSD_version >= 800000
2293		vpath->br = buf_ring_alloc(VXGE_DEFAULT_BR_SIZE, M_DEVBUF,
2294		    M_WAITOK, &vpath->mtx_tx);
2295		if (vpath->br == NULL) {
2296			err = ENOMEM;
2297			break;
2298		}
2299#endif
2300		status = vxge_hal_vpath_open(vdev->devh, &vpath_attr,
2301		    (vxge_hal_vpath_callback_f) vxge_vpath_cb_fn,
2302		    NULL, &vpath->handle);
2303		if (status != VXGE_HAL_OK) {
2304			device_printf(vdev->ndev,
2305			    "failed to open vpath (%d)\n", vpath->vp_id);
2306			err = EPERM;
2307			break;
2308		}
2309		vpath->is_open = TRUE;
2310		vdev->vpath_handles[i] = vpath->handle;
2311
2312		vpath->tx_ticks = ticks;
2313		vpath->rx_ticks = ticks;
2314
2315		vpath->tti_rtimer_val = VXGE_DEFAULT_TTI_RTIMER_VAL;
2316		vpath->tti_rtimer_val = VXGE_DEFAULT_TTI_RTIMER_VAL;
2317
2318		vpath->tx_intr_coalesce = vdev->config.intr_coalesce;
2319		vpath->rx_intr_coalesce = vdev->config.intr_coalesce;
2320
2321		func_id = vdev->config.hw_info.func_id;
2322
2323		if (vdev->config.low_latency &&
2324		    (vdev->config.bw_info[func_id].priority ==
2325			VXGE_DEFAULT_VPATH_PRIORITY_HIGH)) {
2326			vpath->tx_intr_coalesce = 0;
2327		}
2328
2329		if (vdev->ifp->if_capenable & IFCAP_LRO) {
2330			err = tcp_lro_init(lro);
2331			if (err != 0) {
2332				device_printf(vdev->ndev,
2333				    "LRO Initialization failed!\n");
2334				break;
2335			}
2336			vpath->lro_enable = TRUE;
2337			lro->ifp = vdev->ifp;
2338		}
2339	}
2340
2341	return (err);
2342}
2343
2344void
2345vxge_tso_config(vxge_dev_t *vdev)
2346{
2347	u32 func_id, priority;
2348	vxge_hal_status_e status = VXGE_HAL_OK;
2349
2350	vdev->ifp->if_capabilities |= IFCAP_TSO4;
2351
2352	status = vxge_bw_priority_get(vdev, NULL);
2353	if (status == VXGE_HAL_OK) {
2354
2355		func_id = vdev->config.hw_info.func_id;
2356		priority = vdev->config.bw_info[func_id].priority;
2357
2358		if (priority != VXGE_DEFAULT_VPATH_PRIORITY_HIGH)
2359			vdev->ifp->if_capabilities &= ~IFCAP_TSO4;
2360	}
2361
2362#if __FreeBSD_version >= 800000
2363	if (vdev->ifp->if_capabilities & IFCAP_TSO4)
2364		vdev->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
2365#endif
2366
2367}
2368
2369vxge_hal_status_e
2370vxge_bw_priority_get(vxge_dev_t *vdev, vxge_bw_info_t *bw_info)
2371{
2372	u32 priority, bandwidth;
2373	u32 vpath_count;
2374
2375	u64 func_id, func_mode, vpath_list[VXGE_HAL_MAX_VIRTUAL_PATHS];
2376	vxge_hal_status_e status = VXGE_HAL_OK;
2377
2378	func_id = vdev->config.hw_info.func_id;
2379	if (bw_info) {
2380		func_id = bw_info->func_id;
2381		func_mode = vdev->config.hw_info.function_mode;
2382		if ((is_single_func(func_mode)) && (func_id > 0))
2383			return (VXGE_HAL_FAIL);
2384	}
2385
2386	if (vdev->hw_fw_version >= VXGE_FW_VERSION(1, 8, 0)) {
2387
2388		status = vxge_hal_vf_rx_bw_get(vdev->devh,
2389		    func_id, &bandwidth, &priority);
2390
2391	} else {
2392
2393		status = vxge_hal_get_vpath_list(vdev->devh,
2394		    func_id, vpath_list, &vpath_count);
2395
2396		if (status == VXGE_HAL_OK) {
2397			status = vxge_hal_bw_priority_get(vdev->devh,
2398			    vpath_list[0], &bandwidth, &priority);
2399		}
2400	}
2401
2402	if (status == VXGE_HAL_OK) {
2403		if (bw_info) {
2404			bw_info->priority = priority;
2405			bw_info->bandwidth = bandwidth;
2406		} else {
2407			vdev->config.bw_info[func_id].priority = priority;
2408			vdev->config.bw_info[func_id].bandwidth = bandwidth;
2409		}
2410	}
2411
2412	return (status);
2413}
2414
2415/*
2416 * close vpaths
2417 */
2418void
2419vxge_vpath_close(vxge_dev_t *vdev)
2420{
2421	int i;
2422	vxge_vpath_t *vpath;
2423
2424	for (i = 0; i < vdev->no_of_vpath; i++) {
2425
2426		vpath = &(vdev->vpaths[i]);
2427		if (vpath->handle)
2428			vxge_hal_vpath_close(vpath->handle);
2429
2430#if __FreeBSD_version >= 800000
2431		if (vpath->br != NULL)
2432			buf_ring_free(vpath->br, M_DEVBUF);
2433#endif
2434		/* Free LRO memory */
2435		if (vpath->lro_enable)
2436			tcp_lro_free(&vpath->lro);
2437
2438		if (vpath->dma_tag_rx) {
2439			bus_dmamap_destroy(vpath->dma_tag_rx,
2440			    vpath->extra_dma_map);
2441			bus_dma_tag_destroy(vpath->dma_tag_rx);
2442		}
2443
2444		if (vpath->dma_tag_tx)
2445			bus_dma_tag_destroy(vpath->dma_tag_tx);
2446
2447		vpath->handle = NULL;
2448		vpath->is_open = FALSE;
2449	}
2450}
2451
2452/*
2453 * reset vpaths
2454 */
2455void
2456vxge_vpath_reset(vxge_dev_t *vdev)
2457{
2458	int i;
2459	vxge_hal_vpath_h vpath_handle;
2460	vxge_hal_status_e status = VXGE_HAL_OK;
2461
2462	for (i = 0; i < vdev->no_of_vpath; i++) {
2463		vpath_handle = vxge_vpath_handle_get(vdev, i);
2464		if (!vpath_handle)
2465			continue;
2466
2467		status = vxge_hal_vpath_reset(vpath_handle);
2468		if (status != VXGE_HAL_OK)
2469			device_printf(vdev->ndev,
2470			    "failed to reset vpath :%d\n", i);
2471	}
2472}
2473
2474static inline int
2475vxge_vpath_get(vxge_dev_t *vdev, mbuf_t mhead)
2476{
2477	struct tcphdr *th = NULL;
2478	struct udphdr *uh = NULL;
2479	struct ip *ip = NULL;
2480	struct ip6_hdr *ip6 = NULL;
2481	struct ether_vlan_header *eth = NULL;
2482	void *ulp = NULL;
2483
2484	int ehdrlen, iphlen = 0;
2485	u8 ipproto = 0;
2486	u16 etype, src_port, dst_port;
2487	u16 queue_len, counter = 0;
2488
2489	src_port = dst_port = 0;
2490	queue_len = vdev->no_of_vpath;
2491
2492	eth = mtod(mhead, struct ether_vlan_header *);
2493	if (eth->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2494		etype = ntohs(eth->evl_proto);
2495		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2496	} else {
2497		etype = ntohs(eth->evl_encap_proto);
2498		ehdrlen = ETHER_HDR_LEN;
2499	}
2500
2501	switch (etype) {
2502	case ETHERTYPE_IP:
2503		ip = (struct ip *) (mhead->m_data + ehdrlen);
2504		iphlen = ip->ip_hl << 2;
2505		ipproto = ip->ip_p;
2506		th = (struct tcphdr *) ((caddr_t)ip + iphlen);
2507		uh = (struct udphdr *) ((caddr_t)ip + iphlen);
2508		break;
2509
2510	case ETHERTYPE_IPV6:
2511		ip6 = (struct ip6_hdr *) (mhead->m_data + ehdrlen);
2512		iphlen = sizeof(struct ip6_hdr);
2513		ipproto = ip6->ip6_nxt;
2514
2515		ulp = mtod(mhead, char *) + iphlen;
2516		th = ((struct tcphdr *) (ulp));
2517		uh = ((struct udphdr *) (ulp));
2518		break;
2519
2520	default:
2521		break;
2522	}
2523
2524	switch (ipproto) {
2525	case IPPROTO_TCP:
2526		src_port = th->th_sport;
2527		dst_port = th->th_dport;
2528		break;
2529
2530	case IPPROTO_UDP:
2531		src_port = uh->uh_sport;
2532		dst_port = uh->uh_dport;
2533		break;
2534
2535	default:
2536		break;
2537	}
2538
2539	counter = (ntohs(src_port) + ntohs(dst_port)) &
2540	    vpath_selector[queue_len - 1];
2541
2542	if (counter >= queue_len)
2543		counter = queue_len - 1;
2544
2545	return (counter);
2546}
2547
2548static inline vxge_hal_vpath_h
2549vxge_vpath_handle_get(vxge_dev_t *vdev, int i)
2550{
2551	return (vdev->vpaths[i].is_open ? vdev->vpaths[i].handle : NULL);
2552}
2553
2554int
2555vxge_firmware_verify(vxge_dev_t *vdev)
2556{
2557	int err = 0;
2558	u64 active_config;
2559	vxge_hal_status_e status = VXGE_HAL_FAIL;
2560
2561	if (vdev->fw_upgrade) {
2562		status = vxge_firmware_upgrade(vdev);
2563		if (status == VXGE_HAL_OK) {
2564			err = ENXIO;
2565			goto _exit0;
2566		}
2567	}
2568
2569	if ((vdev->config.function_mode != VXGE_DEFAULT_CONFIG_VALUE) &&
2570	    (vdev->config.hw_info.function_mode !=
2571	    (u64) vdev->config.function_mode)) {
2572
2573		status = vxge_func_mode_set(vdev);
2574		if (status == VXGE_HAL_OK)
2575			err = ENXIO;
2576	}
2577
2578	/* l2_switch configuration */
2579	active_config = VXGE_DEFAULT_CONFIG_VALUE;
2580	status = vxge_hal_get_active_config(vdev->devh,
2581	    VXGE_HAL_XMAC_NWIF_ActConfig_L2SwitchEnabled,
2582	    &active_config);
2583
2584	if (status == VXGE_HAL_OK) {
2585		vdev->l2_switch = active_config;
2586		if (vdev->config.l2_switch != VXGE_DEFAULT_CONFIG_VALUE) {
2587			if (vdev->config.l2_switch != active_config) {
2588				status = vxge_l2switch_mode_set(vdev);
2589				if (status == VXGE_HAL_OK)
2590					err = ENXIO;
2591			}
2592		}
2593	}
2594
2595	if (vdev->config.hw_info.ports == VXGE_DUAL_PORT_MODE) {
2596		if (vxge_port_mode_update(vdev) == ENXIO)
2597			err = ENXIO;
2598	}
2599
2600_exit0:
2601	if (err == ENXIO)
2602		device_printf(vdev->ndev, "PLEASE POWER CYCLE THE SYSTEM\n");
2603
2604	return (err);
2605}
2606
2607vxge_hal_status_e
2608vxge_firmware_upgrade(vxge_dev_t *vdev)
2609{
2610	u8 *fw_buffer;
2611	u32 fw_size;
2612	vxge_hal_device_hw_info_t *hw_info;
2613	vxge_hal_status_e status = VXGE_HAL_OK;
2614
2615	hw_info = &vdev->config.hw_info;
2616
2617	fw_size = sizeof(VXGE_FW_ARRAY_NAME);
2618	fw_buffer = (u8 *) VXGE_FW_ARRAY_NAME;
2619
2620	device_printf(vdev->ndev, "Current firmware version : %s (%s)\n",
2621	    hw_info->fw_version.version, hw_info->fw_date.date);
2622
2623	device_printf(vdev->ndev, "Upgrading firmware to %d.%d.%d\n",
2624	    VXGE_MIN_FW_MAJOR_VERSION, VXGE_MIN_FW_MINOR_VERSION,
2625	    VXGE_MIN_FW_BUILD_NUMBER);
2626
2627	/* Call HAL API to upgrade firmware */
2628	status = vxge_hal_mrpcim_fw_upgrade(vdev->pdev,
2629	    (pci_reg_h) vdev->pdev->reg_map[0],
2630	    (u8 *) vdev->pdev->bar_info[0],
2631	    fw_buffer, fw_size);
2632
2633	device_printf(vdev->ndev, "firmware upgrade %s\n",
2634	    (status == VXGE_HAL_OK) ? "successful" : "failed");
2635
2636	return (status);
2637}
2638
2639vxge_hal_status_e
2640vxge_func_mode_set(vxge_dev_t *vdev)
2641{
2642	u64 active_config;
2643	vxge_hal_status_e status = VXGE_HAL_FAIL;
2644
2645	status = vxge_hal_mrpcim_pcie_func_mode_set(vdev->devh,
2646	    vdev->config.function_mode);
2647	device_printf(vdev->ndev,
2648	    "function mode change %s\n",
2649	    (status == VXGE_HAL_OK) ? "successful" : "failed");
2650
2651	if (status == VXGE_HAL_OK) {
2652		vxge_hal_set_fw_api(vdev->devh, 0ULL,
2653		    VXGE_HAL_API_FUNC_MODE_COMMIT,
2654		    0, 0ULL, 0ULL);
2655
2656		vxge_hal_get_active_config(vdev->devh,
2657		    VXGE_HAL_XMAC_NWIF_ActConfig_NWPortMode,
2658		    &active_config);
2659
2660		/*
2661		 * If in MF + DP mode
2662		 * if user changes to SF, change port_mode to single port mode
2663		 */
2664		if (((is_multi_func(vdev->config.hw_info.function_mode)) &&
2665		    is_single_func(vdev->config.function_mode)) &&
2666		    (active_config == VXGE_HAL_DP_NP_MODE_DUAL_PORT)) {
2667			vdev->config.port_mode =
2668			    VXGE_HAL_DP_NP_MODE_SINGLE_PORT;
2669
2670			status = vxge_port_mode_set(vdev);
2671		}
2672	}
2673	return (status);
2674}
2675
2676vxge_hal_status_e
2677vxge_port_mode_set(vxge_dev_t *vdev)
2678{
2679	vxge_hal_status_e status = VXGE_HAL_FAIL;
2680
2681	status = vxge_hal_set_port_mode(vdev->devh, vdev->config.port_mode);
2682	device_printf(vdev->ndev,
2683	    "port mode change %s\n",
2684	    (status == VXGE_HAL_OK) ? "successful" : "failed");
2685
2686	if (status == VXGE_HAL_OK) {
2687		vxge_hal_set_fw_api(vdev->devh, 0ULL,
2688		    VXGE_HAL_API_FUNC_MODE_COMMIT,
2689		    0, 0ULL, 0ULL);
2690
2691		/* Configure vpath_mapping for active-active mode only */
2692		if (vdev->config.port_mode == VXGE_HAL_DP_NP_MODE_DUAL_PORT) {
2693
2694			status = vxge_hal_config_vpath_map(vdev->devh,
2695			    VXGE_DUAL_PORT_MAP);
2696
2697			device_printf(vdev->ndev, "dual port map change %s\n",
2698			    (status == VXGE_HAL_OK) ? "successful" : "failed");
2699		}
2700	}
2701	return (status);
2702}
2703
2704int
2705vxge_port_mode_update(vxge_dev_t *vdev)
2706{
2707	int err = 0;
2708	u64 active_config;
2709	vxge_hal_status_e status = VXGE_HAL_FAIL;
2710
2711	if ((vdev->config.port_mode == VXGE_HAL_DP_NP_MODE_DUAL_PORT) &&
2712	    is_single_func(vdev->config.hw_info.function_mode)) {
2713
2714		device_printf(vdev->ndev,
2715		    "Adapter in SF mode, dual port mode is not allowed\n");
2716		err = EPERM;
2717		goto _exit0;
2718	}
2719
2720	active_config = VXGE_DEFAULT_CONFIG_VALUE;
2721	status = vxge_hal_get_active_config(vdev->devh,
2722	    VXGE_HAL_XMAC_NWIF_ActConfig_NWPortMode,
2723	    &active_config);
2724	if (status != VXGE_HAL_OK) {
2725		err = EINVAL;
2726		goto _exit0;
2727	}
2728
2729	vdev->port_mode = active_config;
2730	if (vdev->config.port_mode != VXGE_DEFAULT_CONFIG_VALUE) {
2731		if (vdev->config.port_mode != vdev->port_mode) {
2732			status = vxge_port_mode_set(vdev);
2733			if (status != VXGE_HAL_OK) {
2734				err = EINVAL;
2735				goto _exit0;
2736			}
2737			err = ENXIO;
2738			vdev->port_mode  = vdev->config.port_mode;
2739		}
2740	}
2741
2742	active_config = VXGE_DEFAULT_CONFIG_VALUE;
2743	status = vxge_hal_get_active_config(vdev->devh,
2744	    VXGE_HAL_XMAC_NWIF_ActConfig_BehaviourOnFail,
2745	    &active_config);
2746	if (status != VXGE_HAL_OK) {
2747		err = EINVAL;
2748		goto _exit0;
2749	}
2750
2751	vdev->port_failure = active_config;
2752
2753	/*
2754	 * active/active mode : set to NoMove
2755	 * active/passive mode: set to Failover-Failback
2756	 */
2757	if (vdev->port_mode == VXGE_HAL_DP_NP_MODE_DUAL_PORT)
2758		vdev->config.port_failure =
2759		    VXGE_HAL_XMAC_NWIF_OnFailure_NoMove;
2760
2761	else if (vdev->port_mode == VXGE_HAL_DP_NP_MODE_ACTIVE_PASSIVE)
2762		vdev->config.port_failure =
2763		    VXGE_HAL_XMAC_NWIF_OnFailure_OtherPortBackOnRestore;
2764
2765	if ((vdev->port_mode != VXGE_HAL_DP_NP_MODE_SINGLE_PORT) &&
2766	    (vdev->config.port_failure != vdev->port_failure)) {
2767		status = vxge_port_behavior_on_failure_set(vdev);
2768		if (status == VXGE_HAL_OK)
2769			err = ENXIO;
2770	}
2771
2772_exit0:
2773	return (err);
2774}
2775
2776vxge_hal_status_e
2777vxge_port_mode_get(vxge_dev_t *vdev, vxge_port_info_t *port_info)
2778{
2779	int err = 0;
2780	u64 active_config;
2781	vxge_hal_status_e status = VXGE_HAL_FAIL;
2782
2783	active_config = VXGE_DEFAULT_CONFIG_VALUE;
2784	status = vxge_hal_get_active_config(vdev->devh,
2785	    VXGE_HAL_XMAC_NWIF_ActConfig_NWPortMode,
2786	    &active_config);
2787
2788	if (status != VXGE_HAL_OK) {
2789		err = ENXIO;
2790		goto _exit0;
2791	}
2792
2793	port_info->port_mode = active_config;
2794
2795	active_config = VXGE_DEFAULT_CONFIG_VALUE;
2796	status = vxge_hal_get_active_config(vdev->devh,
2797	    VXGE_HAL_XMAC_NWIF_ActConfig_BehaviourOnFail,
2798	    &active_config);
2799	if (status != VXGE_HAL_OK) {
2800		err = ENXIO;
2801		goto _exit0;
2802	}
2803
2804	port_info->port_failure = active_config;
2805
2806_exit0:
2807	return (err);
2808}
2809
2810vxge_hal_status_e
2811vxge_port_behavior_on_failure_set(vxge_dev_t *vdev)
2812{
2813	vxge_hal_status_e status = VXGE_HAL_FAIL;
2814
2815	status = vxge_hal_set_behavior_on_failure(vdev->devh,
2816	    vdev->config.port_failure);
2817
2818	device_printf(vdev->ndev,
2819	    "port behaviour on failure change %s\n",
2820	    (status == VXGE_HAL_OK) ? "successful" : "failed");
2821
2822	if (status == VXGE_HAL_OK)
2823		vxge_hal_set_fw_api(vdev->devh, 0ULL,
2824		    VXGE_HAL_API_FUNC_MODE_COMMIT,
2825		    0, 0ULL, 0ULL);
2826
2827	return (status);
2828}
2829
2830void
2831vxge_active_port_update(vxge_dev_t *vdev)
2832{
2833	u64 active_config;
2834	vxge_hal_status_e status = VXGE_HAL_FAIL;
2835
2836	active_config = VXGE_DEFAULT_CONFIG_VALUE;
2837	status = vxge_hal_get_active_config(vdev->devh,
2838	    VXGE_HAL_XMAC_NWIF_ActConfig_ActivePort,
2839	    &active_config);
2840
2841	if (status == VXGE_HAL_OK)
2842		vdev->active_port = active_config;
2843}
2844
2845vxge_hal_status_e
2846vxge_l2switch_mode_set(vxge_dev_t *vdev)
2847{
2848	vxge_hal_status_e status = VXGE_HAL_FAIL;
2849
2850	status = vxge_hal_set_l2switch_mode(vdev->devh,
2851	    vdev->config.l2_switch);
2852
2853	device_printf(vdev->ndev, "L2 switch %s\n",
2854	    (status == VXGE_HAL_OK) ?
2855	    (vdev->config.l2_switch) ? "enable" : "disable" :
2856	    "change failed");
2857
2858	if (status == VXGE_HAL_OK)
2859		vxge_hal_set_fw_api(vdev->devh, 0ULL,
2860		    VXGE_HAL_API_FUNC_MODE_COMMIT,
2861		    0, 0ULL, 0ULL);
2862
2863	return (status);
2864}
2865
2866/*
2867 * vxge_promisc_set
2868 * Enable Promiscuous Mode
2869 */
2870void
2871vxge_promisc_set(vxge_dev_t *vdev)
2872{
2873	int i;
2874	ifnet_t ifp;
2875	vxge_hal_vpath_h vpath_handle;
2876
2877	if (!vdev->is_initialized)
2878		return;
2879
2880	ifp = vdev->ifp;
2881
2882	if ((ifp->if_flags & IFF_ALLMULTI) && (!vdev->all_multi_flag)) {
2883		for (i = 0; i < vdev->no_of_vpath; i++) {
2884			vpath_handle = vxge_vpath_handle_get(vdev, i);
2885			if (!vpath_handle)
2886				continue;
2887
2888			vxge_hal_vpath_mcast_enable(vpath_handle);
2889			vdev->all_multi_flag = 1;
2890		}
2891
2892	} else if (!(ifp->if_flags & IFF_ALLMULTI) && (vdev->all_multi_flag)) {
2893		for (i = 0; i < vdev->no_of_vpath; i++) {
2894			vpath_handle = vxge_vpath_handle_get(vdev, i);
2895			if (!vpath_handle)
2896				continue;
2897
2898			vxge_hal_vpath_mcast_disable(vpath_handle);
2899			vdev->all_multi_flag = 0;
2900		}
2901	}
2902	for (i = 0; i < vdev->no_of_vpath; i++) {
2903		vpath_handle = vxge_vpath_handle_get(vdev, i);
2904		if (!vpath_handle)
2905			continue;
2906
2907		if (ifp->if_flags & IFF_PROMISC)
2908			vxge_hal_vpath_promisc_enable(vpath_handle);
2909		else
2910			vxge_hal_vpath_promisc_disable(vpath_handle);
2911	}
2912}
2913
2914/*
2915 * vxge_change_mtu
2916 * Change interface MTU to a requested valid size
2917 */
2918int
2919vxge_change_mtu(vxge_dev_t *vdev, unsigned long new_mtu)
2920{
2921	int err = EINVAL;
2922
2923	if ((new_mtu < VXGE_HAL_MIN_MTU) || (new_mtu > VXGE_HAL_MAX_MTU))
2924		goto _exit0;
2925
2926	(vdev->ifp)->if_mtu = new_mtu;
2927	device_printf(vdev->ndev, "MTU changed to %ld\n", (vdev->ifp)->if_mtu);
2928
2929	if (vdev->is_initialized) {
2930		if_down(vdev->ifp);
2931		vxge_reset(vdev);
2932		if_up(vdev->ifp);
2933	}
2934	err = 0;
2935
2936_exit0:
2937	return (err);
2938}
2939
2940/*
2941 * Creates DMA tags for both Tx and Rx
2942 */
2943int
2944vxge_dma_tags_create(vxge_vpath_t *vpath)
2945{
2946	int err = 0;
2947	bus_size_t max_size, boundary;
2948	vxge_dev_t *vdev = vpath->vdev;
2949	ifnet_t ifp = vdev->ifp;
2950
2951	max_size = ifp->if_mtu +
2952	    VXGE_HAL_MAC_HEADER_MAX_SIZE +
2953	    VXGE_HAL_HEADER_ETHERNET_II_802_3_ALIGN;
2954
2955	VXGE_BUFFER_ALIGN(max_size, 128)
2956	if (max_size <= MCLBYTES)
2957		vdev->rx_mbuf_sz = MCLBYTES;
2958	else
2959		vdev->rx_mbuf_sz =
2960		    (max_size > MJUMPAGESIZE) ? MJUM9BYTES : MJUMPAGESIZE;
2961
2962	boundary = (max_size > PAGE_SIZE) ? 0 : PAGE_SIZE;
2963
2964	/* DMA tag for Tx */
2965	err = bus_dma_tag_create(
2966	    bus_get_dma_tag(vdev->ndev),
2967	    1,
2968	    PAGE_SIZE,
2969	    BUS_SPACE_MAXADDR,
2970	    BUS_SPACE_MAXADDR,
2971	    NULL,
2972	    NULL,
2973	    VXGE_TSO_SIZE,
2974	    VXGE_MAX_SEGS,
2975	    PAGE_SIZE,
2976	    BUS_DMA_ALLOCNOW,
2977	    NULL,
2978	    NULL,
2979	    &(vpath->dma_tag_tx));
2980	if (err != 0)
2981		goto _exit0;
2982
2983	/* DMA tag for Rx */
2984	err = bus_dma_tag_create(
2985	    bus_get_dma_tag(vdev->ndev),
2986	    1,
2987	    boundary,
2988	    BUS_SPACE_MAXADDR,
2989	    BUS_SPACE_MAXADDR,
2990	    NULL,
2991	    NULL,
2992	    vdev->rx_mbuf_sz,
2993	    1,
2994	    vdev->rx_mbuf_sz,
2995	    BUS_DMA_ALLOCNOW,
2996	    NULL,
2997	    NULL,
2998	    &(vpath->dma_tag_rx));
2999	if (err != 0)
3000		goto _exit1;
3001
3002	/* Create DMA map for this descriptor */
3003	err = bus_dmamap_create(vpath->dma_tag_rx, BUS_DMA_NOWAIT,
3004	    &vpath->extra_dma_map);
3005	if (err == 0)
3006		goto _exit0;
3007
3008	bus_dma_tag_destroy(vpath->dma_tag_rx);
3009
3010_exit1:
3011	bus_dma_tag_destroy(vpath->dma_tag_tx);
3012
3013_exit0:
3014	return (err);
3015}
3016
3017static inline int
3018vxge_dma_mbuf_coalesce(bus_dma_tag_t dma_tag_tx, bus_dmamap_t dma_map,
3019    mbuf_t * m_headp, bus_dma_segment_t * dma_buffers,
3020    int *num_segs)
3021{
3022	int err = 0;
3023	mbuf_t mbuf_pkt = NULL;
3024
3025retry:
3026	err = bus_dmamap_load_mbuf_sg(dma_tag_tx, dma_map, *m_headp,
3027	    dma_buffers, num_segs, BUS_DMA_NOWAIT);
3028	if (err == EFBIG) {
3029		/* try to defrag, too many segments */
3030		mbuf_pkt = m_defrag(*m_headp, M_DONTWAIT);
3031		if (mbuf_pkt == NULL) {
3032			err = ENOBUFS;
3033			goto _exit0;
3034		}
3035		*m_headp = mbuf_pkt;
3036		goto retry;
3037	}
3038
3039_exit0:
3040	return (err);
3041}
3042
3043int
3044vxge_device_hw_info_get(vxge_dev_t *vdev)
3045{
3046	int i, err = ENXIO;
3047	u64 vpath_mask = 0;
3048	u32 max_supported_vpath = 0;
3049	u32 fw_ver_maj_min;
3050	vxge_firmware_upgrade_e fw_option;
3051
3052	vxge_hal_status_e status = VXGE_HAL_OK;
3053	vxge_hal_device_hw_info_t *hw_info;
3054
3055	status = vxge_hal_device_hw_info_get(vdev->pdev,
3056	    (pci_reg_h) vdev->pdev->reg_map[0],
3057	    (u8 *) vdev->pdev->bar_info[0],
3058	    &vdev->config.hw_info);
3059
3060	if (status != VXGE_HAL_OK)
3061		goto _exit0;
3062
3063	hw_info = &vdev->config.hw_info;
3064
3065	vpath_mask = hw_info->vpath_mask;
3066	if (vpath_mask == 0) {
3067		device_printf(vdev->ndev, "No vpaths available in device\n");
3068		goto _exit0;
3069	}
3070
3071	fw_option = vdev->config.fw_option;
3072
3073	/* Check how many vpaths are available */
3074	for (i = 0; i < VXGE_HAL_MAX_VIRTUAL_PATHS; i++) {
3075		if (!((vpath_mask) & mBIT(i)))
3076			continue;
3077		max_supported_vpath++;
3078	}
3079
3080	vdev->max_supported_vpath = max_supported_vpath;
3081	status = vxge_hal_device_is_privileged(hw_info->host_type,
3082	    hw_info->func_id);
3083	vdev->is_privilaged = (status == VXGE_HAL_OK) ? TRUE : FALSE;
3084
3085	vdev->hw_fw_version = VXGE_FW_VERSION(
3086	    hw_info->fw_version.major,
3087	    hw_info->fw_version.minor,
3088	    hw_info->fw_version.build);
3089
3090	fw_ver_maj_min =
3091	    VXGE_FW_MAJ_MIN_VERSION(hw_info->fw_version.major,
3092	    hw_info->fw_version.minor);
3093
3094	if ((fw_option >= VXGE_FW_UPGRADE_FORCE) ||
3095	    (vdev->hw_fw_version != VXGE_DRV_FW_VERSION)) {
3096
3097		/* For fw_ver 1.8.1 and above ignore build number. */
3098		if ((fw_option == VXGE_FW_UPGRADE_ALL) &&
3099		    ((vdev->hw_fw_version >= VXGE_FW_VERSION(1, 8, 1)) &&
3100		    (fw_ver_maj_min == VXGE_DRV_FW_MAJ_MIN_VERSION))) {
3101			goto _exit1;
3102		}
3103
3104		if (vdev->hw_fw_version < VXGE_BASE_FW_VERSION) {
3105			device_printf(vdev->ndev,
3106			    "Upgrade driver through vxge_update, "
3107			    "Unable to load the driver.\n");
3108			goto _exit0;
3109		}
3110		vdev->fw_upgrade = TRUE;
3111	}
3112
3113_exit1:
3114	err = 0;
3115
3116_exit0:
3117	return (err);
3118}
3119
3120/*
3121 * vxge_device_hw_info_print
3122 * Print device and driver information
3123 */
3124void
3125vxge_device_hw_info_print(vxge_dev_t *vdev)
3126{
3127	u32 i;
3128	device_t ndev;
3129	struct sysctl_ctx_list *ctx;
3130	struct sysctl_oid_list *children;
3131	char pmd_type[2][VXGE_PMD_INFO_LEN];
3132
3133	vxge_hal_device_t *hldev;
3134	vxge_hal_device_hw_info_t *hw_info;
3135	vxge_hal_device_pmd_info_t *pmd_port;
3136
3137	hldev = vdev->devh;
3138	ndev = vdev->ndev;
3139
3140	ctx = device_get_sysctl_ctx(ndev);
3141	children = SYSCTL_CHILDREN(device_get_sysctl_tree(ndev));
3142
3143	hw_info = &(vdev->config.hw_info);
3144
3145	snprintf(vdev->config.nic_attr[VXGE_PRINT_DRV_VERSION],
3146	    sizeof(vdev->config.nic_attr[VXGE_PRINT_DRV_VERSION]),
3147	    "%d.%d.%d.%d", XGELL_VERSION_MAJOR, XGELL_VERSION_MINOR,
3148	    XGELL_VERSION_FIX, XGELL_VERSION_BUILD);
3149
3150	/* Print PCI-e bus type/speed/width info */
3151	snprintf(vdev->config.nic_attr[VXGE_PRINT_PCIE_INFO],
3152	    sizeof(vdev->config.nic_attr[VXGE_PRINT_PCIE_INFO]),
3153	    "x%d", hldev->link_width);
3154
3155	if (hldev->link_width <= VXGE_HAL_PCI_E_LINK_WIDTH_X4)
3156		device_printf(ndev, "For optimal performance a x8 "
3157		    "PCI-Express slot is required.\n");
3158
3159	vxge_null_terminate((char *) hw_info->serial_number,
3160	    sizeof(hw_info->serial_number));
3161
3162	vxge_null_terminate((char *) hw_info->part_number,
3163	    sizeof(hw_info->part_number));
3164
3165	snprintf(vdev->config.nic_attr[VXGE_PRINT_SERIAL_NO],
3166	    sizeof(vdev->config.nic_attr[VXGE_PRINT_SERIAL_NO]),
3167	    "%s", hw_info->serial_number);
3168
3169	snprintf(vdev->config.nic_attr[VXGE_PRINT_PART_NO],
3170	    sizeof(vdev->config.nic_attr[VXGE_PRINT_PART_NO]),
3171	    "%s", hw_info->part_number);
3172
3173	snprintf(vdev->config.nic_attr[VXGE_PRINT_FW_VERSION],
3174	    sizeof(vdev->config.nic_attr[VXGE_PRINT_FW_VERSION]),
3175	    "%s", hw_info->fw_version.version);
3176
3177	snprintf(vdev->config.nic_attr[VXGE_PRINT_FW_DATE],
3178	    sizeof(vdev->config.nic_attr[VXGE_PRINT_FW_DATE]),
3179	    "%s", hw_info->fw_date.date);
3180
3181	pmd_port = &(hw_info->pmd_port0);
3182	for (i = 0; i < hw_info->ports; i++) {
3183
3184		vxge_pmd_port_type_get(vdev, pmd_port->type,
3185		    pmd_type[i], sizeof(pmd_type[i]));
3186
3187		strncpy(vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0 + i],
3188		    "vendor=??, sn=??, pn=??, type=??",
3189		    sizeof(vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0 + i]));
3190
3191		vxge_null_terminate(pmd_port->vendor, sizeof(pmd_port->vendor));
3192		if (strlen(pmd_port->vendor) == 0) {
3193			pmd_port = &(hw_info->pmd_port1);
3194			continue;
3195		}
3196
3197		vxge_null_terminate(pmd_port->ser_num,
3198		    sizeof(pmd_port->ser_num));
3199
3200		vxge_null_terminate(pmd_port->part_num,
3201		    sizeof(pmd_port->part_num));
3202
3203		snprintf(vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0 + i],
3204		    sizeof(vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0 + i]),
3205		    "vendor=%s, sn=%s, pn=%s, type=%s",
3206		    pmd_port->vendor, pmd_port->ser_num,
3207		    pmd_port->part_num, pmd_type[i]);
3208
3209		pmd_port = &(hw_info->pmd_port1);
3210	}
3211
3212	switch (hw_info->function_mode) {
3213	case VXGE_HAL_PCIE_FUNC_MODE_SF1_VP17:
3214		snprintf(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE],
3215		    sizeof(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]),
3216		    "%s %d %s", "Single Function - 1 function(s)",
3217		    vdev->max_supported_vpath, "VPath(s)/function");
3218		break;
3219
3220	case VXGE_HAL_PCIE_FUNC_MODE_MF2_VP8:
3221		snprintf(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE],
3222		    sizeof(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]),
3223		    "%s %d %s", "Multi Function - 2 function(s)",
3224		    vdev->max_supported_vpath, "VPath(s)/function");
3225		break;
3226
3227	case VXGE_HAL_PCIE_FUNC_MODE_MF4_VP4:
3228		snprintf(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE],
3229		    sizeof(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]),
3230		    "%s %d %s", "Multi Function - 4 function(s)",
3231		    vdev->max_supported_vpath, "VPath(s)/function");
3232		break;
3233
3234	case VXGE_HAL_PCIE_FUNC_MODE_MF8_VP2:
3235		snprintf(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE],
3236		    sizeof(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]),
3237		    "%s %d %s", "Multi Function - 8 function(s)",
3238		    vdev->max_supported_vpath, "VPath(s)/function");
3239		break;
3240
3241	case VXGE_HAL_PCIE_FUNC_MODE_MF8P_VP2:
3242		snprintf(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE],
3243		    sizeof(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]),
3244		    "%s %d %s", "Multi Function (DirectIO) - 8 function(s)",
3245		    vdev->max_supported_vpath, "VPath(s)/function");
3246		break;
3247	}
3248
3249	snprintf(vdev->config.nic_attr[VXGE_PRINT_INTR_MODE],
3250	    sizeof(vdev->config.nic_attr[VXGE_PRINT_INTR_MODE]),
3251	    "%s", ((vdev->config.intr_mode == VXGE_HAL_INTR_MODE_MSIX) ?
3252	    "MSI-X" : "INTA"));
3253
3254	snprintf(vdev->config.nic_attr[VXGE_PRINT_VPATH_COUNT],
3255	    sizeof(vdev->config.nic_attr[VXGE_PRINT_VPATH_COUNT]),
3256	    "%d", vdev->no_of_vpath);
3257
3258	snprintf(vdev->config.nic_attr[VXGE_PRINT_MTU_SIZE],
3259	    sizeof(vdev->config.nic_attr[VXGE_PRINT_MTU_SIZE]),
3260	    "%lu", vdev->ifp->if_mtu);
3261
3262	snprintf(vdev->config.nic_attr[VXGE_PRINT_LRO_MODE],
3263	    sizeof(vdev->config.nic_attr[VXGE_PRINT_LRO_MODE]),
3264	    "%s", ((vdev->config.lro_enable) ? "Enabled" : "Disabled"));
3265
3266	snprintf(vdev->config.nic_attr[VXGE_PRINT_RTH_MODE],
3267	    sizeof(vdev->config.nic_attr[VXGE_PRINT_RTH_MODE]),
3268	    "%s", ((vdev->config.rth_enable) ? "Enabled" : "Disabled"));
3269
3270	snprintf(vdev->config.nic_attr[VXGE_PRINT_TSO_MODE],
3271	    sizeof(vdev->config.nic_attr[VXGE_PRINT_TSO_MODE]),
3272	    "%s", ((vdev->ifp->if_capenable & IFCAP_TSO4) ?
3273	    "Enabled" : "Disabled"));
3274
3275	snprintf(vdev->config.nic_attr[VXGE_PRINT_ADAPTER_TYPE],
3276	    sizeof(vdev->config.nic_attr[VXGE_PRINT_ADAPTER_TYPE]),
3277	    "%s", ((hw_info->ports == 1) ? "Single Port" : "Dual Port"));
3278
3279	if (vdev->is_privilaged) {
3280
3281		if (hw_info->ports > 1) {
3282
3283			snprintf(vdev->config.nic_attr[VXGE_PRINT_PORT_MODE],
3284			    sizeof(vdev->config.nic_attr[VXGE_PRINT_PORT_MODE]),
3285			    "%s", vxge_port_mode[vdev->port_mode]);
3286
3287			if (vdev->port_mode != VXGE_HAL_DP_NP_MODE_SINGLE_PORT)
3288				snprintf(vdev->config.nic_attr[VXGE_PRINT_PORT_FAILURE],
3289				    sizeof(vdev->config.nic_attr[VXGE_PRINT_PORT_FAILURE]),
3290				    "%s", vxge_port_failure[vdev->port_failure]);
3291
3292			vxge_active_port_update(vdev);
3293			snprintf(vdev->config.nic_attr[VXGE_PRINT_ACTIVE_PORT],
3294			    sizeof(vdev->config.nic_attr[VXGE_PRINT_ACTIVE_PORT]),
3295			    "%lld", vdev->active_port);
3296		}
3297
3298		if (!is_single_func(hw_info->function_mode)) {
3299			snprintf(vdev->config.nic_attr[VXGE_PRINT_L2SWITCH_MODE],
3300			    sizeof(vdev->config.nic_attr[VXGE_PRINT_L2SWITCH_MODE]),
3301			    "%s", ((vdev->l2_switch) ? "Enabled" : "Disabled"));
3302		}
3303	}
3304
3305	device_printf(ndev, "Driver version\t: %s\n",
3306	    vdev->config.nic_attr[VXGE_PRINT_DRV_VERSION]);
3307
3308	device_printf(ndev, "Serial number\t: %s\n",
3309	    vdev->config.nic_attr[VXGE_PRINT_SERIAL_NO]);
3310
3311	device_printf(ndev, "Part number\t: %s\n",
3312	    vdev->config.nic_attr[VXGE_PRINT_PART_NO]);
3313
3314	device_printf(ndev, "Firmware version\t: %s\n",
3315	    vdev->config.nic_attr[VXGE_PRINT_FW_VERSION]);
3316
3317	device_printf(ndev, "Firmware date\t: %s\n",
3318	    vdev->config.nic_attr[VXGE_PRINT_FW_DATE]);
3319
3320	device_printf(ndev, "Link width\t: %s\n",
3321	    vdev->config.nic_attr[VXGE_PRINT_PCIE_INFO]);
3322
3323	if (vdev->is_privilaged) {
3324		device_printf(ndev, "Function mode\t: %s\n",
3325		    vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]);
3326	}
3327
3328	device_printf(ndev, "Interrupt type\t: %s\n",
3329	    vdev->config.nic_attr[VXGE_PRINT_INTR_MODE]);
3330
3331	device_printf(ndev, "VPath(s) opened\t: %s\n",
3332	    vdev->config.nic_attr[VXGE_PRINT_VPATH_COUNT]);
3333
3334	device_printf(ndev, "Adapter Type\t: %s\n",
3335	    vdev->config.nic_attr[VXGE_PRINT_ADAPTER_TYPE]);
3336
3337	device_printf(ndev, "PMD Port 0\t: %s\n",
3338	    vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0]);
3339
3340	if (hw_info->ports > 1) {
3341		device_printf(ndev, "PMD Port 1\t: %s\n",
3342		    vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_1]);
3343
3344		if (vdev->is_privilaged) {
3345			device_printf(ndev, "Port Mode\t: %s\n",
3346			    vdev->config.nic_attr[VXGE_PRINT_PORT_MODE]);
3347
3348			if (vdev->port_mode != VXGE_HAL_DP_NP_MODE_SINGLE_PORT)
3349				device_printf(ndev, "Port Failure\t: %s\n",
3350				    vdev->config.nic_attr[VXGE_PRINT_PORT_FAILURE]);
3351
3352			device_printf(vdev->ndev, "Active Port\t: %s\n",
3353			    vdev->config.nic_attr[VXGE_PRINT_ACTIVE_PORT]);
3354		}
3355	}
3356
3357	if (vdev->is_privilaged && !is_single_func(hw_info->function_mode)) {
3358		device_printf(vdev->ndev, "L2 Switch\t: %s\n",
3359		    vdev->config.nic_attr[VXGE_PRINT_L2SWITCH_MODE]);
3360	}
3361
3362	device_printf(ndev, "MTU is %s\n",
3363	    vdev->config.nic_attr[VXGE_PRINT_MTU_SIZE]);
3364
3365	device_printf(ndev, "LRO %s\n",
3366	    vdev->config.nic_attr[VXGE_PRINT_LRO_MODE]);
3367
3368	device_printf(ndev, "RTH %s\n",
3369	    vdev->config.nic_attr[VXGE_PRINT_RTH_MODE]);
3370
3371	device_printf(ndev, "TSO %s\n",
3372	    vdev->config.nic_attr[VXGE_PRINT_TSO_MODE]);
3373
3374	SYSCTL_ADD_STRING(ctx, children,
3375	    OID_AUTO, "Driver version", CTLFLAG_RD,
3376	    &vdev->config.nic_attr[VXGE_PRINT_DRV_VERSION],
3377	    0, "Driver version");
3378
3379	SYSCTL_ADD_STRING(ctx, children,
3380	    OID_AUTO, "Serial number", CTLFLAG_RD,
3381	    &vdev->config.nic_attr[VXGE_PRINT_SERIAL_NO],
3382	    0, "Serial number");
3383
3384	SYSCTL_ADD_STRING(ctx, children,
3385	    OID_AUTO, "Part number", CTLFLAG_RD,
3386	    &vdev->config.nic_attr[VXGE_PRINT_PART_NO],
3387	    0, "Part number");
3388
3389	SYSCTL_ADD_STRING(ctx, children,
3390	    OID_AUTO, "Firmware version", CTLFLAG_RD,
3391	    &vdev->config.nic_attr[VXGE_PRINT_FW_VERSION],
3392	    0, "Firmware version");
3393
3394	SYSCTL_ADD_STRING(ctx, children,
3395	    OID_AUTO, "Firmware date", CTLFLAG_RD,
3396	    &vdev->config.nic_attr[VXGE_PRINT_FW_DATE],
3397	    0, "Firmware date");
3398
3399	SYSCTL_ADD_STRING(ctx, children,
3400	    OID_AUTO, "Link width", CTLFLAG_RD,
3401	    &vdev->config.nic_attr[VXGE_PRINT_PCIE_INFO],
3402	    0, "Link width");
3403
3404	if (vdev->is_privilaged) {
3405		SYSCTL_ADD_STRING(ctx, children,
3406		    OID_AUTO, "Function mode", CTLFLAG_RD,
3407		    &vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE],
3408		    0, "Function mode");
3409	}
3410
3411	SYSCTL_ADD_STRING(ctx, children,
3412	    OID_AUTO, "Interrupt type", CTLFLAG_RD,
3413	    &vdev->config.nic_attr[VXGE_PRINT_INTR_MODE],
3414	    0, "Interrupt type");
3415
3416	SYSCTL_ADD_STRING(ctx, children,
3417	    OID_AUTO, "VPath(s) opened", CTLFLAG_RD,
3418	    &vdev->config.nic_attr[VXGE_PRINT_VPATH_COUNT],
3419	    0, "VPath(s) opened");
3420
3421	SYSCTL_ADD_STRING(ctx, children,
3422	    OID_AUTO, "Adapter Type", CTLFLAG_RD,
3423	    &vdev->config.nic_attr[VXGE_PRINT_ADAPTER_TYPE],
3424	    0, "Adapter Type");
3425
3426	SYSCTL_ADD_STRING(ctx, children,
3427	    OID_AUTO, "pmd port 0", CTLFLAG_RD,
3428	    &vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0],
3429	    0, "pmd port");
3430
3431	if (hw_info->ports > 1) {
3432
3433		SYSCTL_ADD_STRING(ctx, children,
3434		    OID_AUTO, "pmd port 1", CTLFLAG_RD,
3435		    &vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_1],
3436		    0, "pmd port");
3437
3438		if (vdev->is_privilaged) {
3439			SYSCTL_ADD_STRING(ctx, children,
3440			    OID_AUTO, "Port Mode", CTLFLAG_RD,
3441			    &vdev->config.nic_attr[VXGE_PRINT_PORT_MODE],
3442			    0, "Port Mode");
3443
3444			if (vdev->port_mode != VXGE_HAL_DP_NP_MODE_SINGLE_PORT)
3445				SYSCTL_ADD_STRING(ctx, children,
3446				    OID_AUTO, "Port Failure", CTLFLAG_RD,
3447				    &vdev->config.nic_attr[VXGE_PRINT_PORT_FAILURE],
3448				    0, "Port Failure");
3449
3450			SYSCTL_ADD_STRING(ctx, children,
3451			    OID_AUTO, "L2 Switch", CTLFLAG_RD,
3452			    &vdev->config.nic_attr[VXGE_PRINT_L2SWITCH_MODE],
3453			    0, "L2 Switch");
3454		}
3455	}
3456
3457	SYSCTL_ADD_STRING(ctx, children,
3458	    OID_AUTO, "LRO mode", CTLFLAG_RD,
3459	    &vdev->config.nic_attr[VXGE_PRINT_LRO_MODE],
3460	    0, "LRO mode");
3461
3462	SYSCTL_ADD_STRING(ctx, children,
3463	    OID_AUTO, "RTH mode", CTLFLAG_RD,
3464	    &vdev->config.nic_attr[VXGE_PRINT_RTH_MODE],
3465	    0, "RTH mode");
3466
3467	SYSCTL_ADD_STRING(ctx, children,
3468	    OID_AUTO, "TSO mode", CTLFLAG_RD,
3469	    &vdev->config.nic_attr[VXGE_PRINT_TSO_MODE],
3470	    0, "TSO mode");
3471}
3472
3473void
3474vxge_pmd_port_type_get(vxge_dev_t *vdev, u32 port_type,
3475    char *ifm_name, u8 ifm_len)
3476{
3477
3478	vdev->ifm_optics = IFM_UNKNOWN;
3479
3480	switch (port_type) {
3481	case VXGE_HAL_DEVICE_PMD_TYPE_10G_SR:
3482		vdev->ifm_optics = IFM_10G_SR;
3483		strlcpy(ifm_name, "10GbE SR", ifm_len);
3484		break;
3485
3486	case VXGE_HAL_DEVICE_PMD_TYPE_10G_LR:
3487		vdev->ifm_optics = IFM_10G_LR;
3488		strlcpy(ifm_name, "10GbE LR", ifm_len);
3489		break;
3490
3491	case VXGE_HAL_DEVICE_PMD_TYPE_10G_LRM:
3492		vdev->ifm_optics = IFM_10G_LRM;
3493		strlcpy(ifm_name, "10GbE LRM", ifm_len);
3494		break;
3495
3496	case VXGE_HAL_DEVICE_PMD_TYPE_10G_DIRECT:
3497		vdev->ifm_optics = IFM_10G_TWINAX;
3498		strlcpy(ifm_name, "10GbE DA (Direct Attached)", ifm_len);
3499		break;
3500
3501	case VXGE_HAL_DEVICE_PMD_TYPE_10G_CX4:
3502		vdev->ifm_optics = IFM_10G_CX4;
3503		strlcpy(ifm_name, "10GbE CX4", ifm_len);
3504		break;
3505
3506	case VXGE_HAL_DEVICE_PMD_TYPE_10G_BASE_T:
3507#if __FreeBSD_version >= 800000
3508		vdev->ifm_optics = IFM_10G_T;
3509#endif
3510		strlcpy(ifm_name, "10GbE baseT", ifm_len);
3511		break;
3512
3513	case VXGE_HAL_DEVICE_PMD_TYPE_10G_OTHER:
3514		strlcpy(ifm_name, "10GbE Other", ifm_len);
3515		break;
3516
3517	case VXGE_HAL_DEVICE_PMD_TYPE_1G_SX:
3518		vdev->ifm_optics = IFM_1000_SX;
3519		strlcpy(ifm_name, "1GbE SX", ifm_len);
3520		break;
3521
3522	case VXGE_HAL_DEVICE_PMD_TYPE_1G_LX:
3523		vdev->ifm_optics = IFM_1000_LX;
3524		strlcpy(ifm_name, "1GbE LX", ifm_len);
3525		break;
3526
3527	case VXGE_HAL_DEVICE_PMD_TYPE_1G_CX:
3528		vdev->ifm_optics = IFM_1000_CX;
3529		strlcpy(ifm_name, "1GbE CX", ifm_len);
3530		break;
3531
3532	case VXGE_HAL_DEVICE_PMD_TYPE_1G_BASE_T:
3533		vdev->ifm_optics = IFM_1000_T;
3534		strlcpy(ifm_name, "1GbE baseT", ifm_len);
3535		break;
3536
3537	case VXGE_HAL_DEVICE_PMD_TYPE_1G_DIRECT:
3538		strlcpy(ifm_name, "1GbE DA (Direct Attached)",
3539		    ifm_len);
3540		break;
3541
3542	case VXGE_HAL_DEVICE_PMD_TYPE_1G_CX4:
3543		strlcpy(ifm_name, "1GbE CX4", ifm_len);
3544		break;
3545
3546	case VXGE_HAL_DEVICE_PMD_TYPE_1G_OTHER:
3547		strlcpy(ifm_name, "1GbE Other", ifm_len);
3548		break;
3549
3550	default:
3551	case VXGE_HAL_DEVICE_PMD_TYPE_UNKNOWN:
3552		strlcpy(ifm_name, "UNSUP", ifm_len);
3553		break;
3554	}
3555}
3556
3557u32
3558vxge_ring_length_get(u32 buffer_mode)
3559{
3560	return (VXGE_DEFAULT_RING_BLOCK *
3561	    vxge_hal_ring_rxds_per_block_get(buffer_mode));
3562}
3563
3564/*
3565 * Removes trailing spaces padded
3566 * and NULL terminates strings
3567 */
3568static inline void
3569vxge_null_terminate(char *str, size_t len)
3570{
3571	len--;
3572	while (*str && (*str != ' ') && (len != 0))
3573		++str;
3574
3575	--len;
3576	if (*str)
3577		*str = '\0';
3578}
3579
3580/*
3581 * vxge_ioctl
3582 * Callback to control the device
3583 */
3584int
3585vxge_ioctl(ifnet_t ifp, u_long command, caddr_t data)
3586{
3587	int mask, err = 0;
3588	vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc;
3589	struct ifreq *ifr = (struct ifreq *) data;
3590
3591	if (!vdev->is_active)
3592		return (EBUSY);
3593
3594	switch (command) {
3595		/* Set/Get ifnet address */
3596	case SIOCSIFADDR:
3597	case SIOCGIFADDR:
3598		ether_ioctl(ifp, command, data);
3599		break;
3600
3601		/* Set Interface MTU */
3602	case SIOCSIFMTU:
3603		err = vxge_change_mtu(vdev, (unsigned long)ifr->ifr_mtu);
3604		break;
3605
3606		/* Set Interface Flags */
3607	case SIOCSIFFLAGS:
3608		VXGE_DRV_LOCK(vdev);
3609		if (ifp->if_flags & IFF_UP) {
3610			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3611				if ((ifp->if_flags ^ vdev->if_flags) &
3612				    (IFF_PROMISC | IFF_ALLMULTI))
3613					vxge_promisc_set(vdev);
3614			} else {
3615				vxge_init_locked(vdev);
3616			}
3617		} else {
3618			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3619				vxge_stop_locked(vdev);
3620		}
3621		vdev->if_flags = ifp->if_flags;
3622		VXGE_DRV_UNLOCK(vdev);
3623		break;
3624
3625		/* Add/delete multicast address */
3626	case SIOCADDMULTI:
3627	case SIOCDELMULTI:
3628		break;
3629
3630		/* Get/Set Interface Media */
3631	case SIOCSIFMEDIA:
3632	case SIOCGIFMEDIA:
3633		err = ifmedia_ioctl(ifp, ifr, &vdev->media, command);
3634		break;
3635
3636		/* Set Capabilities */
3637	case SIOCSIFCAP:
3638		VXGE_DRV_LOCK(vdev);
3639		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3640
3641		if (mask & IFCAP_TXCSUM) {
3642			ifp->if_capenable ^= IFCAP_TXCSUM;
3643			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
3644
3645			if ((ifp->if_capenable & IFCAP_TSO) &&
3646			    !(ifp->if_capenable & IFCAP_TXCSUM)) {
3647
3648				ifp->if_capenable &= ~IFCAP_TSO;
3649				ifp->if_hwassist &= ~CSUM_TSO;
3650				if_printf(ifp, "TSO Disabled\n");
3651			}
3652		}
3653		if (mask & IFCAP_RXCSUM)
3654			ifp->if_capenable ^= IFCAP_RXCSUM;
3655
3656		if (mask & IFCAP_TSO4) {
3657			ifp->if_capenable ^= IFCAP_TSO4;
3658
3659			if (ifp->if_capenable & IFCAP_TSO) {
3660				if (ifp->if_capenable & IFCAP_TXCSUM) {
3661					ifp->if_hwassist |= CSUM_TSO;
3662					if_printf(ifp, "TSO Enabled\n");
3663				} else {
3664					ifp->if_capenable &= ~IFCAP_TSO;
3665					ifp->if_hwassist &= ~CSUM_TSO;
3666					if_printf(ifp,
3667					    "Enable tx checksum offload \
3668					     first.\n");
3669					err = EAGAIN;
3670				}
3671			} else {
3672				ifp->if_hwassist &= ~CSUM_TSO;
3673				if_printf(ifp, "TSO Disabled\n");
3674			}
3675		}
3676		if (mask & IFCAP_LRO)
3677			ifp->if_capenable ^= IFCAP_LRO;
3678
3679		if (mask & IFCAP_VLAN_HWTAGGING)
3680			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
3681
3682		if (mask & IFCAP_VLAN_MTU)
3683			ifp->if_capenable ^= IFCAP_VLAN_MTU;
3684
3685		if (mask & IFCAP_VLAN_HWCSUM)
3686			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
3687
3688#if __FreeBSD_version >= 800000
3689		if (mask & IFCAP_VLAN_HWTSO)
3690			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
3691#endif
3692
3693#if defined(VLAN_CAPABILITIES)
3694		VLAN_CAPABILITIES(ifp);
3695#endif
3696
3697		VXGE_DRV_UNLOCK(vdev);
3698		break;
3699
3700	case SIOCGPRIVATE_0:
3701		VXGE_DRV_LOCK(vdev);
3702		err = vxge_ioctl_stats(vdev, ifr);
3703		VXGE_DRV_UNLOCK(vdev);
3704		break;
3705
3706	case SIOCGPRIVATE_1:
3707		VXGE_DRV_LOCK(vdev);
3708		err = vxge_ioctl_regs(vdev, ifr);
3709		VXGE_DRV_UNLOCK(vdev);
3710		break;
3711
3712	default:
3713		err = ether_ioctl(ifp, command, data);
3714		break;
3715	}
3716
3717	return (err);
3718}
3719
3720/*
3721 * vxge_ioctl_regs
3722 * IOCTL to get registers
3723 */
3724int
3725vxge_ioctl_regs(vxge_dev_t *vdev, struct ifreq *ifr)
3726{
3727	u64 value = 0x0;
3728	u32 vp_id = 0;
3729	u32 offset, reqd_size = 0;
3730	int i, err = EINVAL;
3731
3732	char *command = (char *) ifr->ifr_data;
3733	void *reg_info = (void *) ifr->ifr_data;
3734
3735	vxge_vpath_t *vpath;
3736	vxge_hal_status_e status = VXGE_HAL_OK;
3737	vxge_hal_mgmt_reg_type_e regs_type;
3738
3739	switch (*command) {
3740	case vxge_hal_mgmt_reg_type_pcicfgmgmt:
3741		if (vdev->is_privilaged) {
3742			reqd_size = sizeof(vxge_hal_pcicfgmgmt_reg_t);
3743			regs_type = vxge_hal_mgmt_reg_type_pcicfgmgmt;
3744		}
3745		break;
3746
3747	case vxge_hal_mgmt_reg_type_mrpcim:
3748		if (vdev->is_privilaged) {
3749			reqd_size = sizeof(vxge_hal_mrpcim_reg_t);
3750			regs_type = vxge_hal_mgmt_reg_type_mrpcim;
3751		}
3752		break;
3753
3754	case vxge_hal_mgmt_reg_type_srpcim:
3755		if (vdev->is_privilaged) {
3756			reqd_size = sizeof(vxge_hal_srpcim_reg_t);
3757			regs_type = vxge_hal_mgmt_reg_type_srpcim;
3758		}
3759		break;
3760
3761	case vxge_hal_mgmt_reg_type_memrepair:
3762		if (vdev->is_privilaged) {
3763			/* reqd_size = sizeof(vxge_hal_memrepair_reg_t); */
3764			regs_type = vxge_hal_mgmt_reg_type_memrepair;
3765		}
3766		break;
3767
3768	case vxge_hal_mgmt_reg_type_legacy:
3769		reqd_size = sizeof(vxge_hal_legacy_reg_t);
3770		regs_type = vxge_hal_mgmt_reg_type_legacy;
3771		break;
3772
3773	case vxge_hal_mgmt_reg_type_toc:
3774		reqd_size = sizeof(vxge_hal_toc_reg_t);
3775		regs_type = vxge_hal_mgmt_reg_type_toc;
3776		break;
3777
3778	case vxge_hal_mgmt_reg_type_common:
3779		reqd_size = sizeof(vxge_hal_common_reg_t);
3780		regs_type = vxge_hal_mgmt_reg_type_common;
3781		break;
3782
3783	case vxge_hal_mgmt_reg_type_vpmgmt:
3784		reqd_size = sizeof(vxge_hal_vpmgmt_reg_t);
3785		regs_type = vxge_hal_mgmt_reg_type_vpmgmt;
3786		vpath = &(vdev->vpaths[*((u32 *) reg_info + 1)]);
3787		vp_id = vpath->vp_id;
3788		break;
3789
3790	case vxge_hal_mgmt_reg_type_vpath:
3791		reqd_size = sizeof(vxge_hal_vpath_reg_t);
3792		regs_type = vxge_hal_mgmt_reg_type_vpath;
3793		vpath = &(vdev->vpaths[*((u32 *) reg_info + 1)]);
3794		vp_id = vpath->vp_id;
3795		break;
3796
3797	case VXGE_GET_VPATH_COUNT:
3798		*((u32 *) reg_info) = vdev->no_of_vpath;
3799		err = 0;
3800		break;
3801
3802	default:
3803		reqd_size = 0;
3804		break;
3805	}
3806
3807	if (reqd_size) {
3808		for (i = 0, offset = 0; offset < reqd_size;
3809		    i++, offset += 0x0008) {
3810			value = 0x0;
3811			status = vxge_hal_mgmt_reg_read(vdev->devh, regs_type,
3812			    vp_id, offset, &value);
3813
3814			err = (status != VXGE_HAL_OK) ? EINVAL : 0;
3815			if (err == EINVAL)
3816				break;
3817
3818			*((u64 *) ((u64 *) reg_info + i)) = value;
3819		}
3820	}
3821	return (err);
3822}
3823
3824/*
3825 * vxge_ioctl_stats
3826 * IOCTL to get statistics
3827 */
3828int
3829vxge_ioctl_stats(vxge_dev_t *vdev, struct ifreq *ifr)
3830{
3831	int i, retsize, err = EINVAL;
3832	u32 bufsize;
3833
3834	vxge_vpath_t *vpath;
3835	vxge_bw_info_t *bw_info;
3836	vxge_port_info_t *port_info;
3837	vxge_drv_stats_t *drv_stat;
3838
3839	char *buffer = NULL;
3840	char *command = (char *) ifr->ifr_data;
3841	vxge_hal_status_e status = VXGE_HAL_OK;
3842
3843	switch (*command) {
3844	case VXGE_GET_PCI_CONF:
3845		bufsize = VXGE_STATS_BUFFER_SIZE;
3846		buffer = (char *) vxge_mem_alloc(bufsize);
3847		if (buffer != NULL) {
3848			status = vxge_hal_aux_pci_config_read(vdev->devh,
3849			    bufsize, buffer, &retsize);
3850			if (status == VXGE_HAL_OK)
3851				err = copyout(buffer, ifr->ifr_data, retsize);
3852			else
3853				device_printf(vdev->ndev,
3854				    "failed pciconfig statistics query\n");
3855
3856			vxge_mem_free(buffer, bufsize);
3857		}
3858		break;
3859
3860	case VXGE_GET_MRPCIM_STATS:
3861		if (!vdev->is_privilaged)
3862			break;
3863
3864		bufsize = VXGE_STATS_BUFFER_SIZE;
3865		buffer = (char *) vxge_mem_alloc(bufsize);
3866		if (buffer != NULL) {
3867			status = vxge_hal_aux_stats_mrpcim_read(vdev->devh,
3868			    bufsize, buffer, &retsize);
3869			if (status == VXGE_HAL_OK)
3870				err = copyout(buffer, ifr->ifr_data, retsize);
3871			else
3872				device_printf(vdev->ndev,
3873				    "failed mrpcim statistics query\n");
3874
3875			vxge_mem_free(buffer, bufsize);
3876		}
3877		break;
3878
3879	case VXGE_GET_DEVICE_STATS:
3880		bufsize = VXGE_STATS_BUFFER_SIZE;
3881		buffer = (char *) vxge_mem_alloc(bufsize);
3882		if (buffer != NULL) {
3883			status = vxge_hal_aux_stats_device_read(vdev->devh,
3884			    bufsize, buffer, &retsize);
3885			if (status == VXGE_HAL_OK)
3886				err = copyout(buffer, ifr->ifr_data, retsize);
3887			else
3888				device_printf(vdev->ndev,
3889				    "failed device statistics query\n");
3890
3891			vxge_mem_free(buffer, bufsize);
3892		}
3893		break;
3894
3895	case VXGE_GET_DEVICE_HWINFO:
3896		bufsize = sizeof(vxge_device_hw_info_t);
3897		buffer = (char *) vxge_mem_alloc(bufsize);
3898		if (buffer != NULL) {
3899			vxge_os_memcpy(
3900			    &(((vxge_device_hw_info_t *) buffer)->hw_info),
3901			    &vdev->config.hw_info,
3902			    sizeof(vxge_hal_device_hw_info_t));
3903
3904			((vxge_device_hw_info_t *) buffer)->port_mode =
3905			    vdev->port_mode;
3906
3907			((vxge_device_hw_info_t *) buffer)->port_failure =
3908			    vdev->port_failure;
3909
3910			err = copyout(buffer, ifr->ifr_data, bufsize);
3911			if (err != 0)
3912				device_printf(vdev->ndev,
3913				    "failed device hardware info query\n");
3914
3915			vxge_mem_free(buffer, bufsize);
3916		}
3917		break;
3918
3919	case VXGE_GET_DRIVER_STATS:
3920		bufsize = sizeof(vxge_drv_stats_t) * vdev->no_of_vpath;
3921		drv_stat = (vxge_drv_stats_t *) vxge_mem_alloc(bufsize);
3922		if (drv_stat != NULL) {
3923			for (i = 0; i < vdev->no_of_vpath; i++) {
3924				vpath = &(vdev->vpaths[i]);
3925
3926				vpath->driver_stats.rx_lro_queued +=
3927				    vpath->lro.lro_queued;
3928
3929				vpath->driver_stats.rx_lro_flushed +=
3930				    vpath->lro.lro_flushed;
3931
3932				vxge_os_memcpy(&drv_stat[i],
3933				    &(vpath->driver_stats),
3934				    sizeof(vxge_drv_stats_t));
3935			}
3936
3937			err = copyout(drv_stat, ifr->ifr_data, bufsize);
3938			if (err != 0)
3939				device_printf(vdev->ndev,
3940				    "failed driver statistics query\n");
3941
3942			vxge_mem_free(drv_stat, bufsize);
3943		}
3944		break;
3945
3946	case VXGE_GET_BANDWIDTH:
3947		bw_info = (vxge_bw_info_t *) ifr->ifr_data;
3948
3949		if ((vdev->config.hw_info.func_id != 0) &&
3950		    (vdev->hw_fw_version < VXGE_FW_VERSION(1, 8, 0)))
3951			break;
3952
3953		if (vdev->config.hw_info.func_id != 0)
3954			bw_info->func_id = vdev->config.hw_info.func_id;
3955
3956		status = vxge_bw_priority_get(vdev, bw_info);
3957		if (status != VXGE_HAL_OK)
3958			break;
3959
3960		err = copyout(bw_info, ifr->ifr_data, sizeof(vxge_bw_info_t));
3961		break;
3962
3963	case VXGE_SET_BANDWIDTH:
3964		if (vdev->is_privilaged)
3965			err = vxge_bw_priority_set(vdev, ifr);
3966		break;
3967
3968	case VXGE_SET_PORT_MODE:
3969		if (vdev->is_privilaged) {
3970			if (vdev->config.hw_info.ports == VXGE_DUAL_PORT_MODE) {
3971				port_info = (vxge_port_info_t *) ifr->ifr_data;
3972				vdev->config.port_mode = port_info->port_mode;
3973				err = vxge_port_mode_update(vdev);
3974				if (err != ENXIO)
3975					err = VXGE_HAL_FAIL;
3976				else {
3977					err = VXGE_HAL_OK;
3978					device_printf(vdev->ndev,
3979					    "PLEASE POWER CYCLE THE SYSTEM\n");
3980				}
3981			}
3982		}
3983		break;
3984
3985	case VXGE_GET_PORT_MODE:
3986		if (vdev->is_privilaged) {
3987			if (vdev->config.hw_info.ports == VXGE_DUAL_PORT_MODE) {
3988				port_info = (vxge_port_info_t *) ifr->ifr_data;
3989				err = vxge_port_mode_get(vdev, port_info);
3990				if (err == VXGE_HAL_OK) {
3991					err = copyout(port_info, ifr->ifr_data,
3992					    sizeof(vxge_port_info_t));
3993				}
3994			}
3995		}
3996		break;
3997
3998	default:
3999		break;
4000	}
4001
4002	return (err);
4003}
4004
4005int
4006vxge_bw_priority_config(vxge_dev_t *vdev)
4007{
4008	u32 i;
4009	int err = EINVAL;
4010
4011	for (i = 0; i < vdev->no_of_func; i++) {
4012		err = vxge_bw_priority_update(vdev, i, TRUE);
4013		if (err != 0)
4014			break;
4015	}
4016
4017	return (err);
4018}
4019
4020int
4021vxge_bw_priority_set(vxge_dev_t *vdev, struct ifreq *ifr)
4022{
4023	int err;
4024	u32 func_id;
4025	vxge_bw_info_t *bw_info;
4026
4027	bw_info = (vxge_bw_info_t *) ifr->ifr_data;
4028	func_id = bw_info->func_id;
4029
4030	vdev->config.bw_info[func_id].priority = bw_info->priority;
4031	vdev->config.bw_info[func_id].bandwidth = bw_info->bandwidth;
4032
4033	err = vxge_bw_priority_update(vdev, func_id, FALSE);
4034
4035	return (err);
4036}
4037
4038int
4039vxge_bw_priority_update(vxge_dev_t *vdev, u32 func_id, bool binit)
4040{
4041	u32 i, set = 0;
4042	u32 bandwidth, priority, vpath_count;
4043	u64 vpath_list[VXGE_HAL_MAX_VIRTUAL_PATHS];
4044
4045	vxge_hal_device_t *hldev;
4046	vxge_hal_vp_config_t *vp_config;
4047	vxge_hal_status_e status = VXGE_HAL_OK;
4048
4049	hldev = vdev->devh;
4050
4051	status = vxge_hal_get_vpath_list(vdev->devh, func_id,
4052	    vpath_list, &vpath_count);
4053
4054	if (status != VXGE_HAL_OK)
4055		return (status);
4056
4057	for (i = 0; i < vpath_count; i++) {
4058		vp_config = &(hldev->config.vp_config[vpath_list[i]]);
4059
4060		/* Configure Bandwidth */
4061		if (vdev->config.bw_info[func_id].bandwidth !=
4062		    VXGE_HAL_VPATH_BW_LIMIT_DEFAULT) {
4063
4064			set = 1;
4065			bandwidth = vdev->config.bw_info[func_id].bandwidth;
4066			if (bandwidth < VXGE_HAL_VPATH_BW_LIMIT_MIN ||
4067			    bandwidth > VXGE_HAL_VPATH_BW_LIMIT_MAX) {
4068
4069				bandwidth = VXGE_HAL_VPATH_BW_LIMIT_DEFAULT;
4070			}
4071			vp_config->bandwidth = bandwidth;
4072		}
4073
4074		/*
4075		 * If b/w limiting is enabled on any of the
4076		 * VFs, then for remaining VFs set the priority to 3
4077		 * and b/w limiting to max i.e 10 Gb)
4078		 */
4079		if (vp_config->bandwidth == VXGE_HAL_VPATH_BW_LIMIT_DEFAULT)
4080			vp_config->bandwidth = VXGE_HAL_VPATH_BW_LIMIT_MAX;
4081
4082		if (binit && vdev->config.low_latency) {
4083			if (func_id == 0)
4084				vdev->config.bw_info[func_id].priority =
4085				    VXGE_DEFAULT_VPATH_PRIORITY_HIGH;
4086		}
4087
4088		/* Configure Priority */
4089		if (vdev->config.bw_info[func_id].priority !=
4090		    VXGE_HAL_VPATH_PRIORITY_DEFAULT) {
4091
4092			set = 1;
4093			priority = vdev->config.bw_info[func_id].priority;
4094			if (priority < VXGE_HAL_VPATH_PRIORITY_MIN ||
4095			    priority > VXGE_HAL_VPATH_PRIORITY_MAX) {
4096
4097				priority = VXGE_HAL_VPATH_PRIORITY_DEFAULT;
4098			}
4099			vp_config->priority = priority;
4100
4101		} else if (vdev->config.low_latency) {
4102			set = 1;
4103			vp_config->priority = VXGE_DEFAULT_VPATH_PRIORITY_LOW;
4104		}
4105
4106		if (set == 1) {
4107			status = vxge_hal_rx_bw_priority_set(vdev->devh,
4108			    vpath_list[i]);
4109			if (status != VXGE_HAL_OK)
4110				break;
4111
4112			if (vpath_list[i] < VXGE_HAL_TX_BW_VPATH_LIMIT) {
4113				status = vxge_hal_tx_bw_priority_set(
4114				    vdev->devh, vpath_list[i]);
4115				if (status != VXGE_HAL_OK)
4116					break;
4117			}
4118		}
4119	}
4120
4121	return ((status  == VXGE_HAL_OK) ? 0 : EINVAL);
4122}
4123
4124/*
4125 * vxge_intr_coalesce_tx
4126 * Changes interrupt coalescing if the interrupts are not within a range
4127 * Return Value: Nothing
4128 */
4129void
4130vxge_intr_coalesce_tx(vxge_vpath_t *vpath)
4131{
4132	u32 timer;
4133
4134	if (!vpath->tx_intr_coalesce)
4135		return;
4136
4137	vpath->tx_interrupts++;
4138	if (ticks > vpath->tx_ticks + hz/100) {
4139
4140		vpath->tx_ticks = ticks;
4141		timer = vpath->tti_rtimer_val;
4142		if (vpath->tx_interrupts > VXGE_MAX_TX_INTERRUPT_COUNT) {
4143			if (timer != VXGE_TTI_RTIMER_ADAPT_VAL) {
4144				vpath->tti_rtimer_val =
4145				    VXGE_TTI_RTIMER_ADAPT_VAL;
4146
4147				vxge_hal_vpath_dynamic_tti_rtimer_set(
4148				    vpath->handle, vpath->tti_rtimer_val);
4149			}
4150		} else {
4151			if (timer != 0) {
4152				vpath->tti_rtimer_val = 0;
4153				vxge_hal_vpath_dynamic_tti_rtimer_set(
4154				    vpath->handle, vpath->tti_rtimer_val);
4155			}
4156		}
4157		vpath->tx_interrupts = 0;
4158	}
4159}
4160
4161/*
4162 * vxge_intr_coalesce_rx
4163 * Changes interrupt coalescing if the interrupts are not within a range
4164 * Return Value: Nothing
4165 */
4166void
4167vxge_intr_coalesce_rx(vxge_vpath_t *vpath)
4168{
4169	u32 timer;
4170
4171	if (!vpath->rx_intr_coalesce)
4172		return;
4173
4174	vpath->rx_interrupts++;
4175	if (ticks > vpath->rx_ticks + hz/100) {
4176
4177		vpath->rx_ticks = ticks;
4178		timer = vpath->rti_rtimer_val;
4179
4180		if (vpath->rx_interrupts > VXGE_MAX_RX_INTERRUPT_COUNT) {
4181			if (timer != VXGE_RTI_RTIMER_ADAPT_VAL) {
4182				vpath->rti_rtimer_val =
4183				    VXGE_RTI_RTIMER_ADAPT_VAL;
4184
4185				vxge_hal_vpath_dynamic_rti_rtimer_set(
4186				    vpath->handle, vpath->rti_rtimer_val);
4187			}
4188		} else {
4189			if (timer != 0) {
4190				vpath->rti_rtimer_val = 0;
4191				vxge_hal_vpath_dynamic_rti_rtimer_set(
4192				    vpath->handle, vpath->rti_rtimer_val);
4193			}
4194		}
4195		vpath->rx_interrupts = 0;
4196	}
4197}
4198
4199/*
4200 * vxge_methods FreeBSD device interface entry points
4201 */
4202static device_method_t vxge_methods[] = {
4203	DEVMETHOD(device_probe, vxge_probe),
4204	DEVMETHOD(device_attach, vxge_attach),
4205	DEVMETHOD(device_detach, vxge_detach),
4206	DEVMETHOD(device_shutdown, vxge_shutdown),
4207	{0, 0}
4208};
4209
4210static driver_t vxge_driver = {
4211	"vxge", vxge_methods, sizeof(vxge_dev_t),
4212};
4213
4214static devclass_t vxge_devclass;
4215
4216DRIVER_MODULE(vxge, pci, vxge_driver, vxge_devclass, 0, 0);
4217