1142425Snectar/*-
2142425Snectar * Copyright(c) 2002-2011 Exar Corp.
3142425Snectar * All rights reserved.
4142425Snectar *
5142425Snectar * Redistribution and use in source and binary forms, with or without
6142425Snectar * modification are permitted provided the following conditions are met:
7142425Snectar *
8142425Snectar *    1. Redistributions of source code must retain the above copyright notice,
9142425Snectar *       this list of conditions and the following disclaimer.
10142425Snectar *
11142425Snectar *    2. Redistributions in binary form must reproduce the above copyright
12142425Snectar *       notice, this list of conditions and the following disclaimer in the
13142425Snectar *       documentation and/or other materials provided with the distribution.
14142425Snectar *
15142425Snectar *    3. Neither the name of the Exar Corporation nor the names of its
16142425Snectar *       contributors may be used to endorse or promote products derived from
17142425Snectar *       this software without specific prior written permission.
18142425Snectar *
19142425Snectar * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20142425Snectar * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21142425Snectar * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22142425Snectar * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23142425Snectar * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24142425Snectar * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25142425Snectar * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26142425Snectar * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27142425Snectar * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28142425Snectar * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29142425Snectar * POSSIBILITY OF SUCH DAMAGE.
30142425Snectar */
31142425Snectar/*$FreeBSD: stable/10/sys/dev/vxge/vxge.c 314939 2017-03-09 02:59:02Z pfg $*/
32142425Snectar
33142425Snectar#include <dev/vxge/vxge.h>
34142425Snectar
35142425Snectarstatic int vxge_pci_bd_no = -1;
36142425Snectarstatic u32 vxge_drv_copyright = 0;
37142425Snectarstatic u32 vxge_dev_ref_count = 0;
38142425Snectarstatic u32 vxge_dev_req_reboot = 0;
39142425Snectar
40142425Snectarstatic int vpath_selector[VXGE_HAL_MAX_VIRTUAL_PATHS] = \
41142425Snectar{0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
42142425Snectar
43142425Snectar/*
44142425Snectar * vxge_probe
45142425Snectar * Probes for x3100 devices
46142425Snectar */
47142425Snectarint
48142425Snectarvxge_probe(device_t ndev)
49142425Snectar{
50142425Snectar	int err = ENXIO;
51142425Snectar
52142425Snectar	u16 pci_bd_no = 0;
53142425Snectar	u16 pci_vendor_id = 0;
54142425Snectar	u16 pci_device_id = 0;
55142425Snectar
56142425Snectar	char adapter_name[64];
57142425Snectar
58142425Snectar	pci_vendor_id = pci_get_vendor(ndev);
59142425Snectar	if (pci_vendor_id != VXGE_PCI_VENDOR_ID)
60142425Snectar		goto _exit0;
61142425Snectar
62142425Snectar	pci_device_id = pci_get_device(ndev);
63142425Snectar
64142425Snectar	if (pci_device_id == VXGE_PCI_DEVICE_ID_TITAN_1) {
65142425Snectar
66142425Snectar		pci_bd_no = (pci_get_bus(ndev) | pci_get_slot(ndev));
67142425Snectar
68142425Snectar		snprintf(adapter_name, sizeof(adapter_name),
69142425Snectar		    VXGE_ADAPTER_NAME, pci_get_revid(ndev));
70142425Snectar		device_set_desc_copy(ndev, adapter_name);
71142425Snectar
72142425Snectar		if (!vxge_drv_copyright) {
73142425Snectar			device_printf(ndev, VXGE_COPYRIGHT);
74142425Snectar			vxge_drv_copyright = 1;
75142425Snectar		}
76142425Snectar
77142425Snectar		if (vxge_dev_req_reboot == 0) {
78142425Snectar			vxge_pci_bd_no = pci_bd_no;
79142425Snectar			err = BUS_PROBE_DEFAULT;
80142425Snectar		} else {
81142425Snectar			if (pci_bd_no != vxge_pci_bd_no) {
82142425Snectar				vxge_pci_bd_no = pci_bd_no;
83142425Snectar				err = BUS_PROBE_DEFAULT;
84142425Snectar			}
85142425Snectar		}
86142425Snectar	}
87142425Snectar
88142425Snectar_exit0:
89142425Snectar	return (err);
90142425Snectar}
91142425Snectar
92142425Snectar/*
93142425Snectar * vxge_attach
94142425Snectar * Connects driver to the system if probe was success @ndev handle
95142425Snectar */
96142425Snectarint
97142425Snectarvxge_attach(device_t ndev)
98142425Snectar{
99142425Snectar	int err = 0;
100142425Snectar	vxge_dev_t *vdev;
101142425Snectar	vxge_hal_device_t *hldev = NULL;
102142425Snectar	vxge_hal_device_attr_t device_attr;
103142425Snectar	vxge_free_resources_e error_level = VXGE_FREE_NONE;
104142425Snectar
105142425Snectar	vxge_hal_status_e status = VXGE_HAL_OK;
106142425Snectar
107142425Snectar	/* Get per-ndev buffer */
108142425Snectar	vdev = (vxge_dev_t *) device_get_softc(ndev);
109	if (!vdev)
110		goto _exit0;
111
112	bzero(vdev, sizeof(vxge_dev_t));
113
114	vdev->ndev = ndev;
115	strlcpy(vdev->ndev_name, "vxge", sizeof(vdev->ndev_name));
116
117	err = vxge_driver_config(vdev);
118	if (err != 0)
119		goto _exit0;
120
121	/* Initialize HAL driver */
122	status = vxge_driver_init(vdev);
123	if (status != VXGE_HAL_OK) {
124		device_printf(vdev->ndev, "Failed to initialize driver\n");
125		goto _exit0;
126	}
127	/* Enable PCI bus-master */
128	pci_enable_busmaster(ndev);
129
130	/* Allocate resources */
131	err = vxge_alloc_resources(vdev);
132	if (err != 0) {
133		device_printf(vdev->ndev, "resource allocation failed\n");
134		goto _exit0;
135	}
136
137	err = vxge_device_hw_info_get(vdev);
138	if (err != 0) {
139		error_level = VXGE_FREE_BAR2;
140		goto _exit0;
141	}
142
143	/* Get firmware default values for Device Configuration */
144	vxge_hal_device_config_default_get(vdev->device_config);
145
146	/* Customize Device Configuration based on User request */
147	vxge_vpath_config(vdev);
148
149	/* Allocate ISR resources */
150	err = vxge_alloc_isr_resources(vdev);
151	if (err != 0) {
152		error_level = VXGE_FREE_ISR_RESOURCE;
153		device_printf(vdev->ndev, "isr resource allocation failed\n");
154		goto _exit0;
155	}
156
157	/* HAL attributes */
158	device_attr.bar0 = (u8 *) vdev->pdev->bar_info[0];
159	device_attr.bar1 = (u8 *) vdev->pdev->bar_info[1];
160	device_attr.bar2 = (u8 *) vdev->pdev->bar_info[2];
161	device_attr.regh0 = (vxge_bus_res_t *) vdev->pdev->reg_map[0];
162	device_attr.regh1 = (vxge_bus_res_t *) vdev->pdev->reg_map[1];
163	device_attr.regh2 = (vxge_bus_res_t *) vdev->pdev->reg_map[2];
164	device_attr.irqh = (pci_irq_h) vdev->config.isr_info[0].irq_handle;
165	device_attr.cfgh = vdev->pdev;
166	device_attr.pdev = vdev->pdev;
167
168	/* Initialize HAL Device */
169	status = vxge_hal_device_initialize((vxge_hal_device_h *) &hldev,
170	    &device_attr, vdev->device_config);
171	if (status != VXGE_HAL_OK) {
172		error_level = VXGE_FREE_ISR_RESOURCE;
173		device_printf(vdev->ndev, "hal device initialization failed\n");
174		goto _exit0;
175	}
176
177	vdev->devh = hldev;
178	vxge_hal_device_private_set(hldev, vdev);
179
180	if (vdev->is_privilaged) {
181		err = vxge_firmware_verify(vdev);
182		if (err != 0) {
183			vxge_dev_req_reboot = 1;
184			error_level = VXGE_FREE_TERMINATE_DEVICE;
185			goto _exit0;
186		}
187	}
188
189	/* Allocate memory for vpath */
190	vdev->vpaths = (vxge_vpath_t *)
191	    vxge_mem_alloc(vdev->no_of_vpath * sizeof(vxge_vpath_t));
192
193	if (vdev->vpaths == NULL) {
194		error_level = VXGE_FREE_TERMINATE_DEVICE;
195		device_printf(vdev->ndev, "vpath memory allocation failed\n");
196		goto _exit0;
197	}
198
199	vdev->no_of_func = 1;
200	if (vdev->is_privilaged) {
201
202		vxge_hal_func_mode_count(vdev->devh,
203		    vdev->config.hw_info.function_mode, &vdev->no_of_func);
204
205		vxge_bw_priority_config(vdev);
206	}
207
208	/* Initialize mutexes */
209	vxge_mutex_init(vdev);
210
211	/* Initialize Media */
212	vxge_media_init(vdev);
213
214	err = vxge_ifp_setup(ndev);
215	if (err != 0) {
216		error_level = VXGE_FREE_MEDIA;
217		device_printf(vdev->ndev, "setting up interface failed\n");
218		goto _exit0;
219	}
220
221	err = vxge_isr_setup(vdev);
222	if (err != 0) {
223		error_level = VXGE_FREE_INTERFACE;
224		device_printf(vdev->ndev,
225		    "failed to associate interrupt handler with device\n");
226		goto _exit0;
227	}
228	vxge_device_hw_info_print(vdev);
229	vdev->is_active = TRUE;
230
231_exit0:
232	if (error_level) {
233		vxge_free_resources(ndev, error_level);
234		err = ENXIO;
235	}
236
237	return (err);
238}
239
240/*
241 * vxge_detach
242 * Detaches driver from the Kernel subsystem
243 */
244int
245vxge_detach(device_t ndev)
246{
247	vxge_dev_t *vdev;
248
249	vdev = (vxge_dev_t *) device_get_softc(ndev);
250	if (vdev->is_active) {
251		vdev->is_active = FALSE;
252		vxge_stop(vdev);
253		vxge_free_resources(ndev, VXGE_FREE_ALL);
254	}
255
256	return (0);
257}
258
259/*
260 * vxge_shutdown
261 * To shutdown device before system shutdown
262 */
263int
264vxge_shutdown(device_t ndev)
265{
266	vxge_dev_t *vdev = (vxge_dev_t *) device_get_softc(ndev);
267	vxge_stop(vdev);
268	return (0);
269}
270
271/*
272 * vxge_init
273 * Initialize the interface
274 */
275void
276vxge_init(void *vdev_ptr)
277{
278	vxge_dev_t *vdev = (vxge_dev_t *) vdev_ptr;
279
280	VXGE_DRV_LOCK(vdev);
281	vxge_init_locked(vdev);
282	VXGE_DRV_UNLOCK(vdev);
283}
284
285/*
286 * vxge_init_locked
287 * Initialize the interface
288 */
289void
290vxge_init_locked(vxge_dev_t *vdev)
291{
292	int i, err = EINVAL;
293	vxge_hal_device_t *hldev = vdev->devh;
294	vxge_hal_status_e status = VXGE_HAL_OK;
295	vxge_hal_vpath_h vpath_handle;
296
297	ifnet_t ifp = vdev->ifp;
298
299	/* If device is in running state, initializing is not required */
300	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
301		goto _exit0;
302
303	VXGE_DRV_LOCK_ASSERT(vdev);
304
305	/* Opening vpaths */
306	err = vxge_vpath_open(vdev);
307	if (err != 0)
308		goto _exit1;
309
310	if (vdev->config.rth_enable) {
311		status = vxge_rth_config(vdev);
312		if (status != VXGE_HAL_OK)
313			goto _exit1;
314	}
315
316	for (i = 0; i < vdev->no_of_vpath; i++) {
317		vpath_handle = vxge_vpath_handle_get(vdev, i);
318		if (!vpath_handle)
319			continue;
320
321		/* check initial mtu before enabling the device */
322		status = vxge_hal_device_mtu_check(vpath_handle, ifp->if_mtu);
323		if (status != VXGE_HAL_OK) {
324			device_printf(vdev->ndev,
325			    "invalid mtu size %ld specified\n", ifp->if_mtu);
326			goto _exit1;
327		}
328
329		status = vxge_hal_vpath_mtu_set(vpath_handle, ifp->if_mtu);
330		if (status != VXGE_HAL_OK) {
331			device_printf(vdev->ndev,
332			    "setting mtu in device failed\n");
333			goto _exit1;
334		}
335	}
336
337	/* Enable HAL device */
338	status = vxge_hal_device_enable(hldev);
339	if (status != VXGE_HAL_OK) {
340		device_printf(vdev->ndev, "failed to enable device\n");
341		goto _exit1;
342	}
343
344	if (vdev->config.intr_mode == VXGE_HAL_INTR_MODE_MSIX)
345		vxge_msix_enable(vdev);
346
347	/* Checksum capability */
348	ifp->if_hwassist = 0;
349	if (ifp->if_capenable & IFCAP_TXCSUM)
350		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
351
352	if (ifp->if_capenable & IFCAP_TSO4)
353		ifp->if_hwassist |= CSUM_TSO;
354
355	for (i = 0; i < vdev->no_of_vpath; i++) {
356		vpath_handle = vxge_vpath_handle_get(vdev, i);
357		if (!vpath_handle)
358			continue;
359
360		/* Enabling mcast for all vpath */
361		vxge_hal_vpath_mcast_enable(vpath_handle);
362
363		/* Enabling bcast for all vpath */
364		status = vxge_hal_vpath_bcast_enable(vpath_handle);
365		if (status != VXGE_HAL_OK)
366			device_printf(vdev->ndev,
367			    "can't enable bcast on vpath (%d)\n", i);
368	}
369
370	/* Enable interrupts */
371	vxge_hal_device_intr_enable(vdev->devh);
372
373	for (i = 0; i < vdev->no_of_vpath; i++) {
374		vpath_handle = vxge_vpath_handle_get(vdev, i);
375		if (!vpath_handle)
376			continue;
377
378		bzero(&(vdev->vpaths[i].driver_stats),
379		    sizeof(vxge_drv_stats_t));
380		status = vxge_hal_vpath_enable(vpath_handle);
381		if (status != VXGE_HAL_OK)
382			goto _exit2;
383	}
384
385	vxge_os_mdelay(1000);
386
387	/* Device is initialized */
388	vdev->is_initialized = TRUE;
389
390	/* Now inform the stack we're ready */
391	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
392	ifp->if_drv_flags |= IFF_DRV_RUNNING;
393
394	goto _exit0;
395
396_exit2:
397	vxge_hal_device_intr_disable(vdev->devh);
398	vxge_hal_device_disable(hldev);
399
400_exit1:
401	vxge_vpath_close(vdev);
402
403_exit0:
404	return;
405}
406
407/*
408 * vxge_driver_init
409 * Initializes HAL driver
410 */
411vxge_hal_status_e
412vxge_driver_init(vxge_dev_t *vdev)
413{
414	vxge_hal_uld_cbs_t uld_callbacks;
415	vxge_hal_driver_config_t driver_config;
416	vxge_hal_status_e status = VXGE_HAL_OK;
417
418	/* Initialize HAL driver */
419	if (!vxge_dev_ref_count) {
420		bzero(&uld_callbacks, sizeof(vxge_hal_uld_cbs_t));
421		bzero(&driver_config, sizeof(vxge_hal_driver_config_t));
422
423		uld_callbacks.link_up = vxge_link_up;
424		uld_callbacks.link_down = vxge_link_down;
425		uld_callbacks.crit_err = vxge_crit_error;
426		uld_callbacks.sched_timer = NULL;
427		uld_callbacks.xpak_alarm_log = NULL;
428
429		status = vxge_hal_driver_initialize(&driver_config,
430		    &uld_callbacks);
431		if (status != VXGE_HAL_OK) {
432			device_printf(vdev->ndev,
433			    "failed to initialize driver\n");
434			goto _exit0;
435		}
436	}
437	vxge_hal_driver_debug_set(VXGE_TRACE);
438	vxge_dev_ref_count++;
439
440_exit0:
441	return (status);
442}
443
444/*
445 * vxge_driver_config
446 */
447int
448vxge_driver_config(vxge_dev_t *vdev)
449{
450	int i, err = 0;
451	char temp_buffer[30];
452
453	vxge_bw_info_t bw_info;
454
455	VXGE_GET_PARAM("hint.vxge.0.no_of_vpath", vdev->config,
456	    no_of_vpath, VXGE_DEFAULT_USER_HARDCODED);
457
458	if (vdev->config.no_of_vpath == VXGE_DEFAULT_USER_HARDCODED)
459		vdev->config.no_of_vpath = mp_ncpus;
460
461	if (vdev->config.no_of_vpath <= 0) {
462		err = EINVAL;
463		device_printf(vdev->ndev,
464		    "Failed to load driver, \
465		    invalid config : \'no_of_vpath\'\n");
466		goto _exit0;
467	}
468
469	VXGE_GET_PARAM("hint.vxge.0.intr_coalesce", vdev->config,
470	    intr_coalesce, VXGE_DEFAULT_CONFIG_DISABLE);
471
472	VXGE_GET_PARAM("hint.vxge.0.rth_enable", vdev->config,
473	    rth_enable, VXGE_DEFAULT_CONFIG_ENABLE);
474
475	VXGE_GET_PARAM("hint.vxge.0.rth_bkt_sz", vdev->config,
476	    rth_bkt_sz, VXGE_DEFAULT_RTH_BUCKET_SIZE);
477
478	VXGE_GET_PARAM("hint.vxge.0.lro_enable", vdev->config,
479	    lro_enable, VXGE_DEFAULT_CONFIG_ENABLE);
480
481	VXGE_GET_PARAM("hint.vxge.0.tso_enable", vdev->config,
482	    tso_enable, VXGE_DEFAULT_CONFIG_ENABLE);
483
484	VXGE_GET_PARAM("hint.vxge.0.tx_steering", vdev->config,
485	    tx_steering, VXGE_DEFAULT_CONFIG_DISABLE);
486
487	VXGE_GET_PARAM("hint.vxge.0.msix_enable", vdev->config,
488	    intr_mode, VXGE_HAL_INTR_MODE_MSIX);
489
490	VXGE_GET_PARAM("hint.vxge.0.ifqmaxlen", vdev->config,
491	    ifq_maxlen, VXGE_DEFAULT_CONFIG_IFQ_MAXLEN);
492
493	VXGE_GET_PARAM("hint.vxge.0.port_mode", vdev->config,
494	    port_mode, VXGE_DEFAULT_CONFIG_VALUE);
495
496	if (vdev->config.port_mode == VXGE_DEFAULT_USER_HARDCODED)
497		vdev->config.port_mode = VXGE_DEFAULT_CONFIG_VALUE;
498
499	VXGE_GET_PARAM("hint.vxge.0.l2_switch", vdev->config,
500	    l2_switch, VXGE_DEFAULT_CONFIG_VALUE);
501
502	if (vdev->config.l2_switch == VXGE_DEFAULT_USER_HARDCODED)
503		vdev->config.l2_switch = VXGE_DEFAULT_CONFIG_VALUE;
504
505	VXGE_GET_PARAM("hint.vxge.0.fw_upgrade", vdev->config,
506	    fw_option, VXGE_FW_UPGRADE_ALL);
507
508	VXGE_GET_PARAM("hint.vxge.0.low_latency", vdev->config,
509	    low_latency, VXGE_DEFAULT_CONFIG_DISABLE);
510
511	VXGE_GET_PARAM("hint.vxge.0.func_mode", vdev->config,
512	    function_mode, VXGE_DEFAULT_CONFIG_VALUE);
513
514	if (vdev->config.function_mode == VXGE_DEFAULT_USER_HARDCODED)
515		vdev->config.function_mode = VXGE_DEFAULT_CONFIG_VALUE;
516
517	if (!(is_multi_func(vdev->config.function_mode) ||
518	    is_single_func(vdev->config.function_mode)))
519		vdev->config.function_mode = VXGE_DEFAULT_CONFIG_VALUE;
520
521	for (i = 0; i < VXGE_HAL_MAX_FUNCTIONS; i++) {
522
523		bw_info.func_id = i;
524
525		sprintf(temp_buffer, "hint.vxge.0.bandwidth_%d", i);
526		VXGE_GET_PARAM(temp_buffer, bw_info,
527		    bandwidth, VXGE_DEFAULT_USER_HARDCODED);
528
529		if (bw_info.bandwidth == VXGE_DEFAULT_USER_HARDCODED)
530			bw_info.bandwidth = VXGE_HAL_VPATH_BW_LIMIT_DEFAULT;
531
532		sprintf(temp_buffer, "hint.vxge.0.priority_%d", i);
533		VXGE_GET_PARAM(temp_buffer, bw_info,
534		    priority, VXGE_DEFAULT_USER_HARDCODED);
535
536		if (bw_info.priority == VXGE_DEFAULT_USER_HARDCODED)
537			bw_info.priority = VXGE_HAL_VPATH_PRIORITY_DEFAULT;
538
539		vxge_os_memcpy(&vdev->config.bw_info[i], &bw_info,
540		    sizeof(vxge_bw_info_t));
541	}
542
543_exit0:
544	return (err);
545}
546
547/*
548 * vxge_stop
549 */
550void
551vxge_stop(vxge_dev_t *vdev)
552{
553	VXGE_DRV_LOCK(vdev);
554	vxge_stop_locked(vdev);
555	VXGE_DRV_UNLOCK(vdev);
556}
557
558/*
559 * vxge_stop_locked
560 * Common code for both stop and part of reset.
561 * disables device, interrupts and closes vpaths handle
562 */
563void
564vxge_stop_locked(vxge_dev_t *vdev)
565{
566	u64 adapter_status = 0;
567	vxge_hal_status_e status;
568	vxge_hal_device_t *hldev = vdev->devh;
569	ifnet_t ifp = vdev->ifp;
570
571	VXGE_DRV_LOCK_ASSERT(vdev);
572
573	/* If device is not in "Running" state, return */
574	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
575		return;
576
577	/* Set appropriate flags */
578	vdev->is_initialized = FALSE;
579	hldev->link_state = VXGE_HAL_LINK_NONE;
580	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
581	if_link_state_change(ifp, LINK_STATE_DOWN);
582
583	/* Disable interrupts */
584	vxge_hal_device_intr_disable(hldev);
585
586	/* Disable HAL device */
587	status = vxge_hal_device_disable(hldev);
588	if (status != VXGE_HAL_OK) {
589		vxge_hal_device_status(hldev, &adapter_status);
590		device_printf(vdev->ndev,
591		    "adapter status: 0x%llx\n", adapter_status);
592	}
593
594	/* reset vpaths */
595	vxge_vpath_reset(vdev);
596
597	vxge_os_mdelay(1000);
598
599	/* Close Vpaths */
600	vxge_vpath_close(vdev);
601}
602
603void
604vxge_send(ifnet_t ifp)
605{
606	vxge_vpath_t *vpath;
607	vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc;
608
609	vpath = &(vdev->vpaths[0]);
610
611	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
612		if (VXGE_TX_TRYLOCK(vpath)) {
613			vxge_send_locked(ifp, vpath);
614			VXGE_TX_UNLOCK(vpath);
615		}
616	}
617}
618
619static inline void
620vxge_send_locked(ifnet_t ifp, vxge_vpath_t *vpath)
621{
622	mbuf_t m_head = NULL;
623	vxge_dev_t *vdev = vpath->vdev;
624
625	VXGE_TX_LOCK_ASSERT(vpath);
626
627	if ((!vdev->is_initialized) ||
628	    ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
629	    IFF_DRV_RUNNING))
630		return;
631
632	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
633		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
634		if (m_head == NULL)
635			break;
636
637		if (vxge_xmit(ifp, vpath, &m_head)) {
638			if (m_head == NULL)
639				break;
640
641			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
642			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
643			VXGE_DRV_STATS(vpath, tx_again);
644			break;
645		}
646		/* Send a copy of the frame to the BPF listener */
647		ETHER_BPF_MTAP(ifp, m_head);
648	}
649}
650
651#if __FreeBSD_version >= 800000
652
653int
654vxge_mq_send(ifnet_t ifp, mbuf_t m_head)
655{
656	int i = 0, err = 0;
657
658	vxge_vpath_t *vpath;
659	vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc;
660
661	if (vdev->config.tx_steering) {
662		i = vxge_vpath_get(vdev, m_head);
663	} else if (M_HASHTYPE_GET(m_head) != M_HASHTYPE_NONE) {
664		i = m_head->m_pkthdr.flowid % vdev->no_of_vpath;
665	}
666
667	vpath = &(vdev->vpaths[i]);
668	if (VXGE_TX_TRYLOCK(vpath)) {
669		err = vxge_mq_send_locked(ifp, vpath, m_head);
670		VXGE_TX_UNLOCK(vpath);
671	} else
672		err = drbr_enqueue(ifp, vpath->br, m_head);
673
674	return (err);
675}
676
677static inline int
678vxge_mq_send_locked(ifnet_t ifp, vxge_vpath_t *vpath, mbuf_t m_head)
679{
680	int err = 0;
681	mbuf_t next = NULL;
682	vxge_dev_t *vdev = vpath->vdev;
683
684	VXGE_TX_LOCK_ASSERT(vpath);
685
686	if ((!vdev->is_initialized) ||
687	    ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
688	    IFF_DRV_RUNNING)) {
689		err = drbr_enqueue(ifp, vpath->br, m_head);
690		goto _exit0;
691	}
692	if (m_head == NULL) {
693		next = drbr_dequeue(ifp, vpath->br);
694	} else if (drbr_needs_enqueue(ifp, vpath->br)) {
695		if ((err = drbr_enqueue(ifp, vpath->br, m_head)) != 0)
696			goto _exit0;
697		next = drbr_dequeue(ifp, vpath->br);
698	} else
699		next = m_head;
700
701	/* Process the queue */
702	while (next != NULL) {
703		if ((err = vxge_xmit(ifp, vpath, &next)) != 0) {
704			if (next == NULL)
705				break;
706
707			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
708			err = drbr_enqueue(ifp, vpath->br, next);
709			VXGE_DRV_STATS(vpath, tx_again);
710			break;
711		}
712		ifp->if_obytes += next->m_pkthdr.len;
713		if (next->m_flags & M_MCAST)
714			ifp->if_omcasts++;
715
716		/* Send a copy of the frame to the BPF listener */
717		ETHER_BPF_MTAP(ifp, next);
718		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
719			break;
720
721		next = drbr_dequeue(ifp, vpath->br);
722	}
723
724_exit0:
725	return (err);
726}
727
728void
729vxge_mq_qflush(ifnet_t ifp)
730{
731	int i;
732	mbuf_t m_head;
733	vxge_vpath_t *vpath;
734
735	vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc;
736
737	for (i = 0; i < vdev->no_of_vpath; i++) {
738		vpath = &(vdev->vpaths[i]);
739		if (!vpath->handle)
740			continue;
741
742		VXGE_TX_LOCK(vpath);
743		while ((m_head = buf_ring_dequeue_sc(vpath->br)) != NULL)
744			vxge_free_packet(m_head);
745
746		VXGE_TX_UNLOCK(vpath);
747	}
748	if_qflush(ifp);
749}
750#endif
751
752static inline int
753vxge_xmit(ifnet_t ifp, vxge_vpath_t *vpath, mbuf_t *m_headp)
754{
755	int err, num_segs = 0;
756	u32 txdl_avail, dma_index, tagged = 0;
757
758	dma_addr_t dma_addr;
759	bus_size_t dma_sizes;
760
761	void *dtr_priv;
762	vxge_txdl_priv_t *txdl_priv;
763	vxge_hal_txdl_h txdlh;
764	vxge_hal_status_e status;
765	vxge_dev_t *vdev = vpath->vdev;
766
767	VXGE_DRV_STATS(vpath, tx_xmit);
768
769	txdl_avail = vxge_hal_fifo_free_txdl_count_get(vpath->handle);
770	if (txdl_avail < VXGE_TX_LOW_THRESHOLD) {
771
772		VXGE_DRV_STATS(vpath, tx_low_dtr_cnt);
773		err = ENOBUFS;
774		goto _exit0;
775	}
776
777	/* Reserve descriptors */
778	status = vxge_hal_fifo_txdl_reserve(vpath->handle, &txdlh, &dtr_priv);
779	if (status != VXGE_HAL_OK) {
780		VXGE_DRV_STATS(vpath, tx_reserve_failed);
781		err = ENOBUFS;
782		goto _exit0;
783	}
784
785	/* Update Tx private structure for this descriptor */
786	txdl_priv = (vxge_txdl_priv_t *) dtr_priv;
787
788	/*
789	 * Map the packet for DMA.
790	 * Returns number of segments through num_segs.
791	 */
792	err = vxge_dma_mbuf_coalesce(vpath->dma_tag_tx, txdl_priv->dma_map,
793	    m_headp, txdl_priv->dma_buffers, &num_segs);
794
795	if (vpath->driver_stats.tx_max_frags < num_segs)
796		vpath->driver_stats.tx_max_frags = num_segs;
797
798	if (err == ENOMEM) {
799		VXGE_DRV_STATS(vpath, tx_no_dma_setup);
800		vxge_hal_fifo_txdl_free(vpath->handle, txdlh);
801		goto _exit0;
802	} else if (err != 0) {
803		vxge_free_packet(*m_headp);
804		VXGE_DRV_STATS(vpath, tx_no_dma_setup);
805		vxge_hal_fifo_txdl_free(vpath->handle, txdlh);
806		goto _exit0;
807	}
808
809	txdl_priv->mbuf_pkt = *m_headp;
810
811	/* Set VLAN tag in descriptor only if this packet has it */
812	if ((*m_headp)->m_flags & M_VLANTAG)
813		vxge_hal_fifo_txdl_vlan_set(txdlh,
814		    (*m_headp)->m_pkthdr.ether_vtag);
815
816	/* Set descriptor buffer for header and each fragment/segment */
817	for (dma_index = 0; dma_index < num_segs; dma_index++) {
818
819		dma_sizes = txdl_priv->dma_buffers[dma_index].ds_len;
820		dma_addr = htole64(txdl_priv->dma_buffers[dma_index].ds_addr);
821
822		vxge_hal_fifo_txdl_buffer_set(vpath->handle, txdlh, dma_index,
823		    dma_addr, dma_sizes);
824	}
825
826	/* Pre-write Sync of mapping */
827	bus_dmamap_sync(vpath->dma_tag_tx, txdl_priv->dma_map,
828	    BUS_DMASYNC_PREWRITE);
829
830	if ((*m_headp)->m_pkthdr.csum_flags & CSUM_TSO) {
831		if ((*m_headp)->m_pkthdr.tso_segsz) {
832			VXGE_DRV_STATS(vpath, tx_tso);
833			vxge_hal_fifo_txdl_lso_set(txdlh,
834			    VXGE_HAL_FIFO_LSO_FRM_ENCAP_AUTO,
835			    (*m_headp)->m_pkthdr.tso_segsz);
836		}
837	}
838
839	/* Checksum */
840	if (ifp->if_hwassist > 0) {
841		vxge_hal_fifo_txdl_cksum_set_bits(txdlh,
842		    VXGE_HAL_FIFO_TXD_TX_CKO_IPV4_EN |
843		    VXGE_HAL_FIFO_TXD_TX_CKO_TCP_EN |
844		    VXGE_HAL_FIFO_TXD_TX_CKO_UDP_EN);
845	}
846
847	if ((vxge_hal_device_check_id(vdev->devh) == VXGE_HAL_CARD_TITAN_1A) &&
848	    (vdev->hw_fw_version >= VXGE_FW_VERSION(1, 8, 0)))
849		tagged = 1;
850
851	vxge_hal_fifo_txdl_post(vpath->handle, txdlh, tagged);
852	VXGE_DRV_STATS(vpath, tx_posted);
853
854_exit0:
855	return (err);
856}
857
858/*
859 * vxge_tx_replenish
860 * Allocate buffers and set them into descriptors for later use
861 */
862/* ARGSUSED */
863vxge_hal_status_e
864vxge_tx_replenish(vxge_hal_vpath_h vpath_handle, vxge_hal_txdl_h txdlh,
865    void *dtr_priv, u32 dtr_index, void *userdata, vxge_hal_reopen_e reopen)
866{
867	int err = 0;
868
869	vxge_vpath_t *vpath = (vxge_vpath_t *) userdata;
870	vxge_txdl_priv_t *txdl_priv = (vxge_txdl_priv_t *) dtr_priv;
871
872	err = bus_dmamap_create(vpath->dma_tag_tx, BUS_DMA_NOWAIT,
873	    &txdl_priv->dma_map);
874
875	return ((err == 0) ? VXGE_HAL_OK : VXGE_HAL_FAIL);
876}
877
878/*
879 * vxge_tx_compl
880 * If the interrupt is due to Tx completion, free the sent buffer
881 */
882vxge_hal_status_e
883vxge_tx_compl(vxge_hal_vpath_h vpath_handle, vxge_hal_txdl_h txdlh,
884    void *dtr_priv, vxge_hal_fifo_tcode_e t_code, void *userdata)
885{
886	vxge_hal_status_e status = VXGE_HAL_OK;
887
888	vxge_txdl_priv_t *txdl_priv;
889	vxge_vpath_t *vpath = (vxge_vpath_t *) userdata;
890	vxge_dev_t *vdev = vpath->vdev;
891
892	ifnet_t ifp = vdev->ifp;
893
894	VXGE_TX_LOCK(vpath);
895
896	/*
897	 * For each completed descriptor
898	 * Get private structure, free buffer, do unmapping, and free descriptor
899	 */
900
901	do {
902		VXGE_DRV_STATS(vpath, tx_compl);
903		if (t_code != VXGE_HAL_FIFO_T_CODE_OK) {
904			device_printf(vdev->ndev, "tx transfer code %d\n",
905			    t_code);
906
907			ifp->if_oerrors++;
908			VXGE_DRV_STATS(vpath, tx_tcode);
909			vxge_hal_fifo_handle_tcode(vpath_handle, txdlh, t_code);
910		}
911		ifp->if_opackets++;
912		txdl_priv = (vxge_txdl_priv_t *) dtr_priv;
913
914		bus_dmamap_unload(vpath->dma_tag_tx, txdl_priv->dma_map);
915
916		vxge_free_packet(txdl_priv->mbuf_pkt);
917		vxge_hal_fifo_txdl_free(vpath->handle, txdlh);
918
919	} while (vxge_hal_fifo_txdl_next_completed(vpath_handle, &txdlh,
920	    &dtr_priv, &t_code) == VXGE_HAL_OK);
921
922
923	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
924	VXGE_TX_UNLOCK(vpath);
925
926	return (status);
927}
928
929/* ARGSUSED */
930void
931vxge_tx_term(vxge_hal_vpath_h vpath_handle, vxge_hal_txdl_h txdlh,
932    void *dtr_priv, vxge_hal_txdl_state_e state,
933    void *userdata, vxge_hal_reopen_e reopen)
934{
935	vxge_vpath_t *vpath = (vxge_vpath_t *) userdata;
936	vxge_txdl_priv_t *txdl_priv = (vxge_txdl_priv_t *) dtr_priv;
937
938	if (state != VXGE_HAL_TXDL_STATE_POSTED)
939		return;
940
941	if (txdl_priv != NULL) {
942		bus_dmamap_sync(vpath->dma_tag_tx, txdl_priv->dma_map,
943		    BUS_DMASYNC_POSTWRITE);
944
945		bus_dmamap_unload(vpath->dma_tag_tx, txdl_priv->dma_map);
946		bus_dmamap_destroy(vpath->dma_tag_tx, txdl_priv->dma_map);
947		vxge_free_packet(txdl_priv->mbuf_pkt);
948	}
949
950	/* Free the descriptor */
951	vxge_hal_fifo_txdl_free(vpath->handle, txdlh);
952}
953
954/*
955 * vxge_rx_replenish
956 * Allocate buffers and set them into descriptors for later use
957 */
958/* ARGSUSED */
959vxge_hal_status_e
960vxge_rx_replenish(vxge_hal_vpath_h vpath_handle, vxge_hal_rxd_h rxdh,
961    void *dtr_priv, u32 dtr_index, void *userdata, vxge_hal_reopen_e reopen)
962{
963	int err = 0;
964	vxge_hal_status_e status = VXGE_HAL_OK;
965
966	vxge_vpath_t *vpath = (vxge_vpath_t *) userdata;
967	vxge_rxd_priv_t *rxd_priv = (vxge_rxd_priv_t *) dtr_priv;
968
969	/* Create DMA map for these descriptors */
970	err = bus_dmamap_create(vpath->dma_tag_rx, BUS_DMA_NOWAIT,
971	    &rxd_priv->dma_map);
972	if (err == 0) {
973		if (vxge_rx_rxd_1b_set(vpath, rxdh, dtr_priv)) {
974			bus_dmamap_destroy(vpath->dma_tag_rx,
975			    rxd_priv->dma_map);
976			status = VXGE_HAL_FAIL;
977		}
978	}
979
980	return (status);
981}
982
983/*
984 * vxge_rx_compl
985 */
986vxge_hal_status_e
987vxge_rx_compl(vxge_hal_vpath_h vpath_handle, vxge_hal_rxd_h rxdh,
988    void *dtr_priv, u8 t_code, void *userdata)
989{
990	mbuf_t mbuf_up;
991
992	vxge_rxd_priv_t *rxd_priv;
993	vxge_hal_ring_rxd_info_t ext_info;
994	vxge_hal_status_e status = VXGE_HAL_OK;
995
996	vxge_vpath_t *vpath = (vxge_vpath_t *) userdata;
997	vxge_dev_t *vdev = vpath->vdev;
998
999	struct lro_entry *queued = NULL;
1000	struct lro_ctrl *lro = &vpath->lro;
1001
1002	/* get the interface pointer */
1003	ifnet_t ifp = vdev->ifp;
1004
1005	do {
1006		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1007			vxge_hal_ring_rxd_post(vpath_handle, rxdh);
1008			status = VXGE_HAL_FAIL;
1009			break;
1010		}
1011
1012		VXGE_DRV_STATS(vpath, rx_compl);
1013		rxd_priv = (vxge_rxd_priv_t *) dtr_priv;
1014
1015		/* Gets details of mbuf i.e., packet length */
1016		vxge_rx_rxd_1b_get(vpath, rxdh, dtr_priv);
1017
1018		/*
1019		 * Prepare one buffer to send it to upper layer Since upper
1020		 * layer frees the buffer do not use rxd_priv->mbuf_pkt.
1021		 * Meanwhile prepare a new buffer, do mapping, use with the
1022		 * current descriptor and post descriptor back to ring vpath
1023		 */
1024		mbuf_up = rxd_priv->mbuf_pkt;
1025		if (t_code != VXGE_HAL_RING_RXD_T_CODE_OK) {
1026
1027			ifp->if_ierrors++;
1028			VXGE_DRV_STATS(vpath, rx_tcode);
1029			status = vxge_hal_ring_handle_tcode(vpath_handle,
1030			    rxdh, t_code);
1031
1032			/*
1033			 * If transfer code is not for unknown protocols and
1034			 * vxge_hal_device_handle_tcode is NOT returned
1035			 * VXGE_HAL_OK
1036			 * drop this packet and increment rx_tcode stats
1037			 */
1038			if ((status != VXGE_HAL_OK) &&
1039			    (t_code != VXGE_HAL_RING_T_CODE_L3_PKT_ERR)) {
1040
1041				vxge_free_packet(mbuf_up);
1042				vxge_hal_ring_rxd_post(vpath_handle, rxdh);
1043				continue;
1044			}
1045		}
1046
1047		if (vxge_rx_rxd_1b_set(vpath, rxdh, dtr_priv)) {
1048			/*
1049			 * If unable to allocate buffer, post descriptor back
1050			 * to vpath for future processing of same packet.
1051			 */
1052			vxge_hal_ring_rxd_post(vpath_handle, rxdh);
1053			continue;
1054		}
1055
1056		/* Get the extended information */
1057		vxge_hal_ring_rxd_1b_info_get(vpath_handle, rxdh, &ext_info);
1058
1059		/* post descriptor with newly allocated mbuf back to vpath */
1060		vxge_hal_ring_rxd_post(vpath_handle, rxdh);
1061		vpath->rxd_posted++;
1062
1063		if (vpath->rxd_posted % VXGE_RXD_REPLENISH_COUNT == 0)
1064			vxge_hal_ring_rxd_post_post_db(vpath_handle);
1065
1066		/*
1067		 * Set successfully computed checksums in the mbuf.
1068		 * Leave the rest to the stack to be reverified.
1069		 */
1070		vxge_rx_checksum(ext_info, mbuf_up);
1071
1072#if __FreeBSD_version >= 800000
1073		M_HASHTYPE_SET(mbuf_up, M_HASHTYPE_OPAQUE);
1074		mbuf_up->m_pkthdr.flowid = vpath->vp_index;
1075#endif
1076		/* Post-Read sync for buffers */
1077		bus_dmamap_sync(vpath->dma_tag_rx, rxd_priv->dma_map,
1078		    BUS_DMASYNC_POSTREAD);
1079
1080		vxge_rx_input(ifp, mbuf_up, vpath);
1081
1082	} while (vxge_hal_ring_rxd_next_completed(vpath_handle, &rxdh,
1083	    &dtr_priv, &t_code) == VXGE_HAL_OK);
1084
1085	/* Flush any outstanding LRO work */
1086	if (vpath->lro_enable && vpath->lro.lro_cnt) {
1087		while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1088			SLIST_REMOVE_HEAD(&lro->lro_active, next);
1089			tcp_lro_flush(lro, queued);
1090		}
1091	}
1092
1093	return (status);
1094}
1095
1096static inline void
1097vxge_rx_input(ifnet_t ifp, mbuf_t mbuf_up, vxge_vpath_t *vpath)
1098{
1099	if (vpath->lro_enable && vpath->lro.lro_cnt) {
1100		if (tcp_lro_rx(&vpath->lro, mbuf_up, 0) == 0)
1101			return;
1102	}
1103	(*ifp->if_input) (ifp, mbuf_up);
1104}
1105
1106static inline void
1107vxge_rx_checksum(vxge_hal_ring_rxd_info_t ext_info, mbuf_t mbuf_up)
1108{
1109
1110	if (!(ext_info.proto & VXGE_HAL_FRAME_PROTO_IP_FRAG) &&
1111	    (ext_info.proto & VXGE_HAL_FRAME_PROTO_TCP_OR_UDP) &&
1112	    ext_info.l3_cksum_valid && ext_info.l4_cksum_valid) {
1113
1114		mbuf_up->m_pkthdr.csum_data = htons(0xffff);
1115
1116		mbuf_up->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1117		mbuf_up->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1118		mbuf_up->m_pkthdr.csum_flags |=
1119		    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1120
1121	} else {
1122
1123		if (ext_info.vlan) {
1124			mbuf_up->m_pkthdr.ether_vtag = ext_info.vlan;
1125			mbuf_up->m_flags |= M_VLANTAG;
1126		}
1127	}
1128}
1129
1130/*
1131 * vxge_rx_term During unload terminate and free all descriptors
1132 * @vpath_handle Rx vpath Handle @rxdh Rx Descriptor Handle @state Descriptor
1133 * State @userdata Per-adapter Data @reopen vpath open/reopen option
1134 */
1135/* ARGSUSED */
1136void
1137vxge_rx_term(vxge_hal_vpath_h vpath_handle, vxge_hal_rxd_h rxdh,
1138    void *dtr_priv, vxge_hal_rxd_state_e state, void *userdata,
1139    vxge_hal_reopen_e reopen)
1140{
1141	vxge_vpath_t *vpath = (vxge_vpath_t *) userdata;
1142	vxge_rxd_priv_t *rxd_priv = (vxge_rxd_priv_t *) dtr_priv;
1143
1144	if (state != VXGE_HAL_RXD_STATE_POSTED)
1145		return;
1146
1147	if (rxd_priv != NULL) {
1148		bus_dmamap_sync(vpath->dma_tag_rx, rxd_priv->dma_map,
1149		    BUS_DMASYNC_POSTREAD);
1150		bus_dmamap_unload(vpath->dma_tag_rx, rxd_priv->dma_map);
1151		bus_dmamap_destroy(vpath->dma_tag_rx, rxd_priv->dma_map);
1152
1153		vxge_free_packet(rxd_priv->mbuf_pkt);
1154	}
1155	/* Free the descriptor */
1156	vxge_hal_ring_rxd_free(vpath_handle, rxdh);
1157}
1158
1159/*
1160 * vxge_rx_rxd_1b_get
1161 * Get descriptors of packet to send up
1162 */
1163void
1164vxge_rx_rxd_1b_get(vxge_vpath_t *vpath, vxge_hal_rxd_h rxdh, void *dtr_priv)
1165{
1166	vxge_rxd_priv_t *rxd_priv = (vxge_rxd_priv_t *) dtr_priv;
1167	mbuf_t mbuf_up = rxd_priv->mbuf_pkt;
1168
1169	/* Retrieve data from completed descriptor */
1170	vxge_hal_ring_rxd_1b_get(vpath->handle, rxdh, &rxd_priv->dma_addr[0],
1171	    (u32 *) &rxd_priv->dma_sizes[0]);
1172
1173	/* Update newly created buffer to be sent up with packet length */
1174	mbuf_up->m_len = rxd_priv->dma_sizes[0];
1175	mbuf_up->m_pkthdr.len = rxd_priv->dma_sizes[0];
1176	mbuf_up->m_next = NULL;
1177}
1178
1179/*
1180 * vxge_rx_rxd_1b_set
1181 * Allocates new mbufs to be placed into descriptors
1182 */
1183int
1184vxge_rx_rxd_1b_set(vxge_vpath_t *vpath, vxge_hal_rxd_h rxdh, void *dtr_priv)
1185{
1186	int num_segs, err = 0;
1187
1188	mbuf_t mbuf_pkt;
1189	bus_dmamap_t dma_map;
1190	bus_dma_segment_t dma_buffers[1];
1191	vxge_rxd_priv_t *rxd_priv = (vxge_rxd_priv_t *) dtr_priv;
1192
1193	vxge_dev_t *vdev = vpath->vdev;
1194
1195	mbuf_pkt = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, vdev->rx_mbuf_sz);
1196	if (!mbuf_pkt) {
1197		err = ENOBUFS;
1198		VXGE_DRV_STATS(vpath, rx_no_buf);
1199		device_printf(vdev->ndev, "out of memory to allocate mbuf\n");
1200		goto _exit0;
1201	}
1202
1203	/* Update mbuf's length, packet length and receive interface */
1204	mbuf_pkt->m_len = vdev->rx_mbuf_sz;
1205	mbuf_pkt->m_pkthdr.len = vdev->rx_mbuf_sz;
1206	mbuf_pkt->m_pkthdr.rcvif = vdev->ifp;
1207
1208	/* Load DMA map */
1209	err = vxge_dma_mbuf_coalesce(vpath->dma_tag_rx, vpath->extra_dma_map,
1210	    &mbuf_pkt, dma_buffers, &num_segs);
1211	if (err != 0) {
1212		VXGE_DRV_STATS(vpath, rx_map_fail);
1213		vxge_free_packet(mbuf_pkt);
1214		goto _exit0;
1215	}
1216
1217	/* Unload DMA map of mbuf in current descriptor */
1218	bus_dmamap_sync(vpath->dma_tag_rx, rxd_priv->dma_map,
1219	    BUS_DMASYNC_POSTREAD);
1220	bus_dmamap_unload(vpath->dma_tag_rx, rxd_priv->dma_map);
1221
1222	/* Update descriptor private data */
1223	dma_map = rxd_priv->dma_map;
1224	rxd_priv->mbuf_pkt = mbuf_pkt;
1225	rxd_priv->dma_addr[0] = htole64(dma_buffers->ds_addr);
1226	rxd_priv->dma_map = vpath->extra_dma_map;
1227	vpath->extra_dma_map = dma_map;
1228
1229	/* Pre-Read/Write sync */
1230	bus_dmamap_sync(vpath->dma_tag_rx, rxd_priv->dma_map,
1231	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1232
1233	/* Set descriptor buffer */
1234	vxge_hal_ring_rxd_1b_set(rxdh, rxd_priv->dma_addr[0], vdev->rx_mbuf_sz);
1235
1236_exit0:
1237	return (err);
1238}
1239
1240/*
1241 * vxge_link_up
1242 * Callback for Link-up indication from HAL
1243 */
1244/* ARGSUSED */
1245void
1246vxge_link_up(vxge_hal_device_h devh, void *userdata)
1247{
1248	int i;
1249	vxge_vpath_t *vpath;
1250	vxge_hal_device_hw_info_t *hw_info;
1251
1252	vxge_dev_t *vdev = (vxge_dev_t *) userdata;
1253	hw_info = &vdev->config.hw_info;
1254
1255	ifnet_t ifp = vdev->ifp;
1256
1257	if (vdev->config.intr_mode == VXGE_HAL_INTR_MODE_MSIX) {
1258		for (i = 0; i < vdev->no_of_vpath; i++) {
1259			vpath = &(vdev->vpaths[i]);
1260			vxge_hal_vpath_tti_ci_set(vpath->handle);
1261			vxge_hal_vpath_rti_ci_set(vpath->handle);
1262		}
1263	}
1264
1265	if (vdev->is_privilaged && (hw_info->ports > 1)) {
1266		vxge_active_port_update(vdev);
1267		device_printf(vdev->ndev,
1268		    "Active Port : %lld\n", vdev->active_port);
1269	}
1270
1271	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1272	if_link_state_change(ifp, LINK_STATE_UP);
1273}
1274
1275/*
1276 * vxge_link_down
1277 * Callback for Link-down indication from HAL
1278 */
1279/* ARGSUSED */
1280void
1281vxge_link_down(vxge_hal_device_h devh, void *userdata)
1282{
1283	int i;
1284	vxge_vpath_t *vpath;
1285	vxge_dev_t *vdev = (vxge_dev_t *) userdata;
1286
1287	ifnet_t ifp = vdev->ifp;
1288
1289	if (vdev->config.intr_mode == VXGE_HAL_INTR_MODE_MSIX) {
1290		for (i = 0; i < vdev->no_of_vpath; i++) {
1291			vpath = &(vdev->vpaths[i]);
1292			vxge_hal_vpath_tti_ci_reset(vpath->handle);
1293			vxge_hal_vpath_rti_ci_reset(vpath->handle);
1294		}
1295	}
1296
1297	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1298	if_link_state_change(ifp, LINK_STATE_DOWN);
1299}
1300
1301/*
1302 * vxge_reset
1303 */
1304void
1305vxge_reset(vxge_dev_t *vdev)
1306{
1307	if (!vdev->is_initialized)
1308		return;
1309
1310	VXGE_DRV_LOCK(vdev);
1311	vxge_stop_locked(vdev);
1312	vxge_init_locked(vdev);
1313	VXGE_DRV_UNLOCK(vdev);
1314}
1315
1316/*
1317 * vxge_crit_error
1318 * Callback for Critical error indication from HAL
1319 */
1320/* ARGSUSED */
1321void
1322vxge_crit_error(vxge_hal_device_h devh, void *userdata,
1323    vxge_hal_event_e type, u64 serr_data)
1324{
1325	vxge_dev_t *vdev = (vxge_dev_t *) userdata;
1326	ifnet_t ifp = vdev->ifp;
1327
1328	switch (type) {
1329	case VXGE_HAL_EVENT_SERR:
1330	case VXGE_HAL_EVENT_KDFCCTL:
1331	case VXGE_HAL_EVENT_CRITICAL:
1332		vxge_hal_device_intr_disable(vdev->devh);
1333		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1334		if_link_state_change(ifp, LINK_STATE_DOWN);
1335		break;
1336	default:
1337		break;
1338	}
1339}
1340
1341/*
1342 * vxge_ifp_setup
1343 */
1344int
1345vxge_ifp_setup(device_t ndev)
1346{
1347	ifnet_t ifp;
1348	int i, j, err = 0;
1349
1350	vxge_dev_t *vdev = (vxge_dev_t *) device_get_softc(ndev);
1351
1352	for (i = 0, j = 0; i < VXGE_HAL_MAX_VIRTUAL_PATHS; i++) {
1353		if (!bVAL1(vdev->config.hw_info.vpath_mask, i))
1354			continue;
1355
1356		if (j >= vdev->no_of_vpath)
1357			break;
1358
1359		vdev->vpaths[j].vp_id = i;
1360		vdev->vpaths[j].vp_index = j;
1361		vdev->vpaths[j].vdev = vdev;
1362		vdev->vpaths[j].is_configured = TRUE;
1363
1364		vxge_os_memcpy((u8 *) vdev->vpaths[j].mac_addr,
1365		    (u8 *) (vdev->config.hw_info.mac_addrs[i]),
1366		    (size_t) ETHER_ADDR_LEN);
1367		j++;
1368	}
1369
1370	/* Get interface ifnet structure for this Ether device */
1371	ifp = if_alloc(IFT_ETHER);
1372	if (ifp == NULL) {
1373		device_printf(vdev->ndev,
1374		    "memory allocation for ifnet failed\n");
1375		err = ENXIO;
1376		goto _exit0;
1377	}
1378	vdev->ifp = ifp;
1379
1380	/* Initialize interface ifnet structure */
1381	if_initname(ifp, device_get_name(ndev), device_get_unit(ndev));
1382
1383	ifp->if_baudrate = VXGE_BAUDRATE;
1384	ifp->if_init = vxge_init;
1385	ifp->if_softc = vdev;
1386	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1387	ifp->if_ioctl = vxge_ioctl;
1388	ifp->if_start = vxge_send;
1389
1390#if __FreeBSD_version >= 800000
1391	ifp->if_transmit = vxge_mq_send;
1392	ifp->if_qflush = vxge_mq_qflush;
1393#endif
1394	ifp->if_snd.ifq_drv_maxlen = max(vdev->config.ifq_maxlen, ifqmaxlen);
1395	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1396	/* IFQ_SET_READY(&ifp->if_snd); */
1397
1398	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1399
1400	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
1401	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1402	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1403
1404	if (vdev->config.tso_enable)
1405		vxge_tso_config(vdev);
1406
1407	if (vdev->config.lro_enable)
1408		ifp->if_capabilities |= IFCAP_LRO;
1409
1410	ifp->if_capenable = ifp->if_capabilities;
1411
1412	strlcpy(vdev->ndev_name, device_get_nameunit(ndev),
1413	    sizeof(vdev->ndev_name));
1414
1415	/* Attach the interface */
1416	ether_ifattach(ifp, vdev->vpaths[0].mac_addr);
1417
1418_exit0:
1419	return (err);
1420}
1421
1422/*
1423 * vxge_isr_setup
1424 * Register isr functions
1425 */
1426int
1427vxge_isr_setup(vxge_dev_t *vdev)
1428{
1429	int i, irq_rid, err = 0;
1430	vxge_vpath_t *vpath;
1431
1432	void *isr_func_arg;
1433	void (*isr_func_ptr) (void *);
1434
1435	switch (vdev->config.intr_mode) {
1436	case VXGE_HAL_INTR_MODE_IRQLINE:
1437		err = bus_setup_intr(vdev->ndev,
1438		    vdev->config.isr_info[0].irq_res,
1439		    (INTR_TYPE_NET | INTR_MPSAFE),
1440		    vxge_isr_filter, vxge_isr_line, vdev,
1441		    &vdev->config.isr_info[0].irq_handle);
1442		break;
1443
1444	case VXGE_HAL_INTR_MODE_MSIX:
1445		for (i = 0; i < vdev->intr_count; i++) {
1446
1447			irq_rid = vdev->config.isr_info[i].irq_rid;
1448			vpath = &vdev->vpaths[irq_rid / 4];
1449
1450			if ((irq_rid % 4) == 2) {
1451				isr_func_ptr = vxge_isr_msix;
1452				isr_func_arg = (void *) vpath;
1453			} else if ((irq_rid % 4) == 3) {
1454				isr_func_ptr = vxge_isr_msix_alarm;
1455				isr_func_arg = (void *) vpath;
1456			} else
1457				break;
1458
1459			err = bus_setup_intr(vdev->ndev,
1460			    vdev->config.isr_info[i].irq_res,
1461			    (INTR_TYPE_NET | INTR_MPSAFE), NULL,
1462			    (void *) isr_func_ptr, (void *) isr_func_arg,
1463			    &vdev->config.isr_info[i].irq_handle);
1464			if (err != 0)
1465				break;
1466		}
1467
1468		if (err != 0) {
1469			/* Teardown interrupt handler */
1470			while (--i > 0)
1471				bus_teardown_intr(vdev->ndev,
1472				    vdev->config.isr_info[i].irq_res,
1473				    vdev->config.isr_info[i].irq_handle);
1474		}
1475		break;
1476	}
1477
1478	return (err);
1479}
1480
1481/*
1482 * vxge_isr_filter
1483 * ISR filter function - filter interrupts from other shared devices
1484 */
1485int
1486vxge_isr_filter(void *handle)
1487{
1488	u64 val64 = 0;
1489	vxge_dev_t *vdev = (vxge_dev_t *) handle;
1490	__hal_device_t *hldev = (__hal_device_t *) vdev->devh;
1491
1492	vxge_hal_common_reg_t *common_reg =
1493	(vxge_hal_common_reg_t *) (hldev->common_reg);
1494
1495	val64 = vxge_os_pio_mem_read64(vdev->pdev, (vdev->devh)->regh0,
1496	    &common_reg->titan_general_int_status);
1497
1498	return ((val64) ? FILTER_SCHEDULE_THREAD : FILTER_STRAY);
1499}
1500
1501/*
1502 * vxge_isr_line
1503 * Interrupt service routine for Line interrupts
1504 */
1505void
1506vxge_isr_line(void *vdev_ptr)
1507{
1508	vxge_dev_t *vdev = (vxge_dev_t *) vdev_ptr;
1509
1510	vxge_hal_device_handle_irq(vdev->devh, 0);
1511}
1512
1513void
1514vxge_isr_msix(void *vpath_ptr)
1515{
1516	u32 got_rx = 0;
1517	u32 got_tx = 0;
1518
1519	__hal_virtualpath_t *hal_vpath;
1520	vxge_vpath_t *vpath = (vxge_vpath_t *) vpath_ptr;
1521	vxge_dev_t *vdev = vpath->vdev;
1522	hal_vpath = ((__hal_vpath_handle_t *) vpath->handle)->vpath;
1523
1524	VXGE_DRV_STATS(vpath, isr_msix);
1525	VXGE_HAL_DEVICE_STATS_SW_INFO_TRAFFIC_INTR(vdev->devh);
1526
1527	vxge_hal_vpath_mf_msix_mask(vpath->handle, vpath->msix_vec);
1528
1529	/* processing rx */
1530	vxge_hal_vpath_poll_rx(vpath->handle, &got_rx);
1531
1532	/* processing tx */
1533	if (hal_vpath->vp_config->fifo.enable) {
1534		vxge_intr_coalesce_tx(vpath);
1535		vxge_hal_vpath_poll_tx(vpath->handle, &got_tx);
1536	}
1537
1538	vxge_hal_vpath_mf_msix_unmask(vpath->handle, vpath->msix_vec);
1539}
1540
1541void
1542vxge_isr_msix_alarm(void *vpath_ptr)
1543{
1544	int i;
1545	vxge_hal_status_e status = VXGE_HAL_OK;
1546
1547	vxge_vpath_t *vpath = (vxge_vpath_t *) vpath_ptr;
1548	vxge_dev_t *vdev = vpath->vdev;
1549
1550	VXGE_HAL_DEVICE_STATS_SW_INFO_NOT_TRAFFIC_INTR(vdev->devh);
1551
1552	/* Process alarms in each vpath */
1553	for (i = 0; i < vdev->no_of_vpath; i++) {
1554
1555		vpath = &(vdev->vpaths[i]);
1556		vxge_hal_vpath_mf_msix_mask(vpath->handle,
1557		    vpath->msix_vec_alarm);
1558		status = vxge_hal_vpath_alarm_process(vpath->handle, 0);
1559		if ((status == VXGE_HAL_ERR_EVENT_SLOT_FREEZE) ||
1560		    (status == VXGE_HAL_ERR_EVENT_SERR)) {
1561			device_printf(vdev->ndev,
1562			    "processing alarms urecoverable error %x\n",
1563			    status);
1564
1565			/* Stop the driver */
1566			vdev->is_initialized = FALSE;
1567			break;
1568		}
1569		vxge_hal_vpath_mf_msix_unmask(vpath->handle,
1570		    vpath->msix_vec_alarm);
1571	}
1572}
1573
1574/*
1575 * vxge_msix_enable
1576 */
1577vxge_hal_status_e
1578vxge_msix_enable(vxge_dev_t *vdev)
1579{
1580	int i, first_vp_id, msix_id;
1581
1582	vxge_vpath_t *vpath;
1583	vxge_hal_status_e status = VXGE_HAL_OK;
1584
1585	/*
1586	 * Unmasking and Setting MSIX vectors before enabling interrupts
1587	 * tim[] : 0 - Tx ## 1 - Rx ## 2 - UMQ-DMQ ## 0 - BITMAP
1588	 */
1589	int tim[4] = {0, 1, 0, 0};
1590
1591	for (i = 0; i < vdev->no_of_vpath; i++) {
1592
1593		vpath = vdev->vpaths + i;
1594		first_vp_id = vdev->vpaths[0].vp_id;
1595
1596		msix_id = vpath->vp_id * VXGE_HAL_VPATH_MSIX_ACTIVE;
1597		tim[1] = vpath->msix_vec = msix_id + 1;
1598
1599		vpath->msix_vec_alarm = first_vp_id *
1600		    VXGE_HAL_VPATH_MSIX_ACTIVE + VXGE_HAL_VPATH_MSIX_ALARM_ID;
1601
1602		status = vxge_hal_vpath_mf_msix_set(vpath->handle,
1603		    tim, VXGE_HAL_VPATH_MSIX_ALARM_ID);
1604
1605		if (status != VXGE_HAL_OK) {
1606			device_printf(vdev->ndev,
1607			    "failed to set msix vectors to vpath\n");
1608			break;
1609		}
1610
1611		vxge_hal_vpath_mf_msix_unmask(vpath->handle, vpath->msix_vec);
1612		vxge_hal_vpath_mf_msix_unmask(vpath->handle,
1613		    vpath->msix_vec_alarm);
1614	}
1615
1616	return (status);
1617}
1618
1619/*
1620 * vxge_media_init
1621 * Initializes, adds and sets media
1622 */
1623void
1624vxge_media_init(vxge_dev_t *vdev)
1625{
1626	ifmedia_init(&vdev->media,
1627	    IFM_IMASK, vxge_media_change, vxge_media_status);
1628
1629	/* Add supported media */
1630	ifmedia_add(&vdev->media,
1631	    IFM_ETHER | vdev->ifm_optics | IFM_FDX,
1632	    0, NULL);
1633
1634	/* Set media */
1635	ifmedia_add(&vdev->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1636	ifmedia_set(&vdev->media, IFM_ETHER | IFM_AUTO);
1637}
1638
1639/*
1640 * vxge_media_status
1641 * Callback  for interface media settings
1642 */
1643void
1644vxge_media_status(ifnet_t ifp, struct ifmediareq *ifmr)
1645{
1646	vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc;
1647	vxge_hal_device_t *hldev = vdev->devh;
1648
1649	ifmr->ifm_status = IFM_AVALID;
1650	ifmr->ifm_active = IFM_ETHER;
1651
1652	/* set link state */
1653	if (vxge_hal_device_link_state_get(hldev) == VXGE_HAL_LINK_UP) {
1654		ifmr->ifm_status |= IFM_ACTIVE;
1655		ifmr->ifm_active |= vdev->ifm_optics | IFM_FDX;
1656		if_link_state_change(ifp, LINK_STATE_UP);
1657	}
1658}
1659
1660/*
1661 * vxge_media_change
1662 * Media change driver callback
1663 */
1664int
1665vxge_media_change(ifnet_t ifp)
1666{
1667	vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc;
1668	struct ifmedia *ifmediap = &vdev->media;
1669
1670	return (IFM_TYPE(ifmediap->ifm_media) != IFM_ETHER ? EINVAL : 0);
1671}
1672
1673/*
1674 * Allocate PCI resources
1675 */
1676int
1677vxge_alloc_resources(vxge_dev_t *vdev)
1678{
1679	int err = 0;
1680	vxge_pci_info_t *pci_info = NULL;
1681	vxge_free_resources_e error_level = VXGE_FREE_NONE;
1682
1683	device_t ndev = vdev->ndev;
1684
1685	/* Allocate Buffer for HAL Device Configuration */
1686	vdev->device_config = (vxge_hal_device_config_t *)
1687	    vxge_mem_alloc(sizeof(vxge_hal_device_config_t));
1688
1689	if (!vdev->device_config) {
1690		err = ENOMEM;
1691		error_level = VXGE_DISABLE_PCI_BUSMASTER;
1692		device_printf(vdev->ndev,
1693		    "failed to allocate memory for device config\n");
1694		goto _exit0;
1695	}
1696
1697
1698	pci_info = (vxge_pci_info_t *) vxge_mem_alloc(sizeof(vxge_pci_info_t));
1699	if (!pci_info) {
1700		error_level = VXGE_FREE_DEVICE_CONFIG;
1701		err = ENOMEM;
1702		device_printf(vdev->ndev,
1703		    "failed to allocate memory for pci info\n");
1704		goto _exit0;
1705	}
1706	pci_info->ndev = ndev;
1707	vdev->pdev = pci_info;
1708
1709	err = vxge_alloc_bar_resources(vdev, 0);
1710	if (err != 0) {
1711		error_level = VXGE_FREE_BAR0;
1712		goto _exit0;
1713	}
1714
1715	err = vxge_alloc_bar_resources(vdev, 1);
1716	if (err != 0) {
1717		error_level = VXGE_FREE_BAR1;
1718		goto _exit0;
1719	}
1720
1721	err = vxge_alloc_bar_resources(vdev, 2);
1722	if (err != 0)
1723		error_level = VXGE_FREE_BAR2;
1724
1725_exit0:
1726	if (error_level)
1727		vxge_free_resources(ndev, error_level);
1728
1729	return (err);
1730}
1731
1732/*
1733 * vxge_alloc_bar_resources
1734 * Allocates BAR resources
1735 */
1736int
1737vxge_alloc_bar_resources(vxge_dev_t *vdev, int i)
1738{
1739	int err = 0;
1740	int res_id = 0;
1741	vxge_pci_info_t *pci_info = vdev->pdev;
1742
1743	res_id = PCIR_BAR((i == 0) ? 0 : (i * 2));
1744
1745	pci_info->bar_info[i] =
1746	    bus_alloc_resource_any(vdev->ndev,
1747	    SYS_RES_MEMORY, &res_id, RF_ACTIVE);
1748
1749	if (pci_info->bar_info[i] == NULL) {
1750		device_printf(vdev->ndev,
1751		    "failed to allocate memory for bus resources\n");
1752		err = ENOMEM;
1753		goto _exit0;
1754	}
1755
1756	pci_info->reg_map[i] =
1757	    (vxge_bus_res_t *) vxge_mem_alloc(sizeof(vxge_bus_res_t));
1758
1759	if (pci_info->reg_map[i] == NULL) {
1760		device_printf(vdev->ndev,
1761		    "failed to allocate memory bar resources\n");
1762		err = ENOMEM;
1763		goto _exit0;
1764	}
1765
1766	((vxge_bus_res_t *) (pci_info->reg_map[i]))->bus_space_tag =
1767	    rman_get_bustag(pci_info->bar_info[i]);
1768
1769	((vxge_bus_res_t *) (pci_info->reg_map[i]))->bus_space_handle =
1770	    rman_get_bushandle(pci_info->bar_info[i]);
1771
1772	((vxge_bus_res_t *) (pci_info->reg_map[i]))->bar_start_addr =
1773	    pci_info->bar_info[i];
1774
1775	((vxge_bus_res_t *) (pci_info->reg_map[i]))->bus_res_len =
1776	    rman_get_size(pci_info->bar_info[i]);
1777
1778_exit0:
1779	return (err);
1780}
1781
1782/*
1783 * vxge_alloc_isr_resources
1784 */
1785int
1786vxge_alloc_isr_resources(vxge_dev_t *vdev)
1787{
1788	int i, err = 0, irq_rid;
1789	int msix_vec_reqd, intr_count, msix_count;
1790
1791	int intr_mode = VXGE_HAL_INTR_MODE_IRQLINE;
1792
1793	if (vdev->config.intr_mode == VXGE_HAL_INTR_MODE_MSIX) {
1794		/* MSI-X messages supported by device */
1795		intr_count = pci_msix_count(vdev->ndev);
1796		if (intr_count) {
1797
1798			msix_vec_reqd = 4 * vdev->no_of_vpath;
1799			if (intr_count >= msix_vec_reqd) {
1800				intr_count = msix_vec_reqd;
1801
1802				err = pci_alloc_msix(vdev->ndev, &intr_count);
1803				if (err == 0)
1804					intr_mode = VXGE_HAL_INTR_MODE_MSIX;
1805			}
1806
1807			if ((err != 0) || (intr_count < msix_vec_reqd)) {
1808				device_printf(vdev->ndev, "Unable to allocate "
1809				    "msi/x vectors switching to INTA mode\n");
1810			}
1811		}
1812	}
1813
1814	err = 0;
1815	vdev->intr_count = 0;
1816	vdev->config.intr_mode = intr_mode;
1817
1818	switch (vdev->config.intr_mode) {
1819	case VXGE_HAL_INTR_MODE_IRQLINE:
1820		vdev->config.isr_info[0].irq_rid = 0;
1821		vdev->config.isr_info[0].irq_res =
1822		    bus_alloc_resource_any(vdev->ndev, SYS_RES_IRQ,
1823		    &vdev->config.isr_info[0].irq_rid,
1824		    (RF_SHAREABLE | RF_ACTIVE));
1825
1826		if (vdev->config.isr_info[0].irq_res == NULL) {
1827			device_printf(vdev->ndev,
1828			    "failed to allocate line interrupt resource\n");
1829			err = ENOMEM;
1830			goto _exit0;
1831		}
1832		vdev->intr_count++;
1833		break;
1834
1835	case VXGE_HAL_INTR_MODE_MSIX:
1836		msix_count = 0;
1837		for (i = 0; i < vdev->no_of_vpath; i++) {
1838			irq_rid = i * 4;
1839
1840			vdev->config.isr_info[msix_count].irq_rid = irq_rid + 2;
1841			vdev->config.isr_info[msix_count].irq_res =
1842			    bus_alloc_resource_any(vdev->ndev, SYS_RES_IRQ,
1843			    &vdev->config.isr_info[msix_count].irq_rid,
1844			    (RF_SHAREABLE | RF_ACTIVE));
1845
1846			if (vdev->config.isr_info[msix_count].irq_res == NULL) {
1847				device_printf(vdev->ndev,
1848				    "allocating bus resource (rid %d) failed\n",
1849				    vdev->config.isr_info[msix_count].irq_rid);
1850				err = ENOMEM;
1851				goto _exit0;
1852			}
1853
1854			vdev->intr_count++;
1855			err = bus_bind_intr(vdev->ndev,
1856			    vdev->config.isr_info[msix_count].irq_res,
1857			    (i % mp_ncpus));
1858			if (err != 0)
1859				break;
1860
1861			msix_count++;
1862		}
1863
1864		vdev->config.isr_info[msix_count].irq_rid = 3;
1865		vdev->config.isr_info[msix_count].irq_res =
1866		    bus_alloc_resource_any(vdev->ndev, SYS_RES_IRQ,
1867		    &vdev->config.isr_info[msix_count].irq_rid,
1868		    (RF_SHAREABLE | RF_ACTIVE));
1869
1870		if (vdev->config.isr_info[msix_count].irq_res == NULL) {
1871			device_printf(vdev->ndev,
1872			    "allocating bus resource (rid %d) failed\n",
1873			    vdev->config.isr_info[msix_count].irq_rid);
1874			err = ENOMEM;
1875			goto _exit0;
1876		}
1877
1878		vdev->intr_count++;
1879		err = bus_bind_intr(vdev->ndev,
1880		    vdev->config.isr_info[msix_count].irq_res, (i % mp_ncpus));
1881
1882		break;
1883	}
1884
1885	vdev->device_config->intr_mode = vdev->config.intr_mode;
1886
1887_exit0:
1888	return (err);
1889}
1890
1891/*
1892 * vxge_free_resources
1893 * Undo what-all we did during load/attach
1894 */
1895void
1896vxge_free_resources(device_t ndev, vxge_free_resources_e vxge_free_resource)
1897{
1898	int i;
1899	vxge_dev_t *vdev;
1900
1901	vdev = (vxge_dev_t *) device_get_softc(ndev);
1902
1903	switch (vxge_free_resource) {
1904	case VXGE_FREE_ALL:
1905		for (i = 0; i < vdev->intr_count; i++) {
1906			bus_teardown_intr(ndev,
1907			    vdev->config.isr_info[i].irq_res,
1908			    vdev->config.isr_info[i].irq_handle);
1909		}
1910		/* FALLTHROUGH */
1911
1912	case VXGE_FREE_INTERFACE:
1913		ether_ifdetach(vdev->ifp);
1914		bus_generic_detach(ndev);
1915		if_free(vdev->ifp);
1916		/* FALLTHROUGH */
1917
1918	case VXGE_FREE_MEDIA:
1919		ifmedia_removeall(&vdev->media);
1920		/* FALLTHROUGH */
1921
1922	case VXGE_FREE_MUTEX:
1923		vxge_mutex_destroy(vdev);
1924		/* FALLTHROUGH */
1925
1926	case VXGE_FREE_VPATH:
1927		vxge_mem_free(vdev->vpaths,
1928		    vdev->no_of_vpath * sizeof(vxge_vpath_t));
1929		/* FALLTHROUGH */
1930
1931	case VXGE_FREE_TERMINATE_DEVICE:
1932		if (vdev->devh != NULL) {
1933			vxge_hal_device_private_set(vdev->devh, 0);
1934			vxge_hal_device_terminate(vdev->devh);
1935		}
1936		/* FALLTHROUGH */
1937
1938	case VXGE_FREE_ISR_RESOURCE:
1939		vxge_free_isr_resources(vdev);
1940		/* FALLTHROUGH */
1941
1942	case VXGE_FREE_BAR2:
1943		vxge_free_bar_resources(vdev, 2);
1944		/* FALLTHROUGH */
1945
1946	case VXGE_FREE_BAR1:
1947		vxge_free_bar_resources(vdev, 1);
1948		/* FALLTHROUGH */
1949
1950	case VXGE_FREE_BAR0:
1951		vxge_free_bar_resources(vdev, 0);
1952		/* FALLTHROUGH */
1953
1954	case VXGE_FREE_PCI_INFO:
1955		vxge_mem_free(vdev->pdev, sizeof(vxge_pci_info_t));
1956		/* FALLTHROUGH */
1957
1958	case VXGE_FREE_DEVICE_CONFIG:
1959		vxge_mem_free(vdev->device_config,
1960		    sizeof(vxge_hal_device_config_t));
1961		/* FALLTHROUGH */
1962
1963	case VXGE_DISABLE_PCI_BUSMASTER:
1964		pci_disable_busmaster(ndev);
1965		/* FALLTHROUGH */
1966
1967	case VXGE_FREE_TERMINATE_DRIVER:
1968		if (vxge_dev_ref_count) {
1969			--vxge_dev_ref_count;
1970			if (0 == vxge_dev_ref_count)
1971				vxge_hal_driver_terminate();
1972		}
1973		/* FALLTHROUGH */
1974
1975	default:
1976	case VXGE_FREE_NONE:
1977		break;
1978		/* NOTREACHED */
1979	}
1980}
1981
1982void
1983vxge_free_isr_resources(vxge_dev_t *vdev)
1984{
1985	int i;
1986
1987	switch (vdev->config.intr_mode) {
1988	case VXGE_HAL_INTR_MODE_IRQLINE:
1989		if (vdev->config.isr_info[0].irq_res) {
1990			bus_release_resource(vdev->ndev, SYS_RES_IRQ,
1991			    vdev->config.isr_info[0].irq_rid,
1992			    vdev->config.isr_info[0].irq_res);
1993
1994			vdev->config.isr_info[0].irq_res = NULL;
1995		}
1996		break;
1997
1998	case VXGE_HAL_INTR_MODE_MSIX:
1999		for (i = 0; i < vdev->intr_count; i++) {
2000			if (vdev->config.isr_info[i].irq_res) {
2001				bus_release_resource(vdev->ndev, SYS_RES_IRQ,
2002				    vdev->config.isr_info[i].irq_rid,
2003				    vdev->config.isr_info[i].irq_res);
2004
2005				vdev->config.isr_info[i].irq_res = NULL;
2006			}
2007		}
2008
2009		if (vdev->intr_count)
2010			pci_release_msi(vdev->ndev);
2011
2012		break;
2013	}
2014}
2015
2016void
2017vxge_free_bar_resources(vxge_dev_t *vdev, int i)
2018{
2019	int res_id = 0;
2020	vxge_pci_info_t *pci_info = vdev->pdev;
2021
2022	res_id = PCIR_BAR((i == 0) ? 0 : (i * 2));
2023
2024	if (pci_info->bar_info[i])
2025		bus_release_resource(vdev->ndev, SYS_RES_MEMORY,
2026		    res_id, pci_info->bar_info[i]);
2027
2028	vxge_mem_free(pci_info->reg_map[i], sizeof(vxge_bus_res_t));
2029}
2030
2031/*
2032 * vxge_init_mutex
2033 * Initializes mutexes used in driver
2034 */
2035void
2036vxge_mutex_init(vxge_dev_t *vdev)
2037{
2038	int i;
2039
2040	snprintf(vdev->mtx_drv_name, sizeof(vdev->mtx_drv_name),
2041	    "%s_drv", vdev->ndev_name);
2042
2043	mtx_init(&vdev->mtx_drv, vdev->mtx_drv_name,
2044	    MTX_NETWORK_LOCK, MTX_DEF);
2045
2046	for (i = 0; i < vdev->no_of_vpath; i++) {
2047		snprintf(vdev->vpaths[i].mtx_tx_name,
2048		    sizeof(vdev->vpaths[i].mtx_tx_name), "%s_tx_%d",
2049		    vdev->ndev_name, i);
2050
2051		mtx_init(&vdev->vpaths[i].mtx_tx,
2052		    vdev->vpaths[i].mtx_tx_name, NULL, MTX_DEF);
2053	}
2054}
2055
2056/*
2057 * vxge_mutex_destroy
2058 * Destroys mutexes used in driver
2059 */
2060void
2061vxge_mutex_destroy(vxge_dev_t *vdev)
2062{
2063	int i;
2064
2065	for (i = 0; i < vdev->no_of_vpath; i++)
2066		VXGE_TX_LOCK_DESTROY(&(vdev->vpaths[i]));
2067
2068	VXGE_DRV_LOCK_DESTROY(vdev);
2069}
2070
2071/*
2072 * vxge_rth_config
2073 */
2074vxge_hal_status_e
2075vxge_rth_config(vxge_dev_t *vdev)
2076{
2077	int i;
2078	vxge_hal_vpath_h vpath_handle;
2079	vxge_hal_rth_hash_types_t hash_types;
2080	vxge_hal_status_e status = VXGE_HAL_OK;
2081	u8 mtable[256] = {0};
2082
2083	/* Filling matable with bucket-to-vpath mapping */
2084	vdev->config.rth_bkt_sz = VXGE_DEFAULT_RTH_BUCKET_SIZE;
2085
2086	for (i = 0; i < (1 << vdev->config.rth_bkt_sz); i++)
2087		mtable[i] = i % vdev->no_of_vpath;
2088
2089	/* Fill RTH hash types */
2090	hash_types.hash_type_tcpipv4_en = VXGE_HAL_RING_HASH_TYPE_TCP_IPV4;
2091	hash_types.hash_type_tcpipv6_en = VXGE_HAL_RING_HASH_TYPE_TCP_IPV6;
2092	hash_types.hash_type_tcpipv6ex_en = VXGE_HAL_RING_HASH_TYPE_TCP_IPV6_EX;
2093	hash_types.hash_type_ipv4_en = VXGE_HAL_RING_HASH_TYPE_IPV4;
2094	hash_types.hash_type_ipv6_en = VXGE_HAL_RING_HASH_TYPE_IPV6;
2095	hash_types.hash_type_ipv6ex_en = VXGE_HAL_RING_HASH_TYPE_IPV6_EX;
2096
2097	/* set indirection table, bucket-to-vpath mapping */
2098	status = vxge_hal_vpath_rts_rth_itable_set(vdev->vpath_handles,
2099	    vdev->no_of_vpath, mtable,
2100	    ((u32) (1 << vdev->config.rth_bkt_sz)));
2101
2102	if (status != VXGE_HAL_OK) {
2103		device_printf(vdev->ndev, "rth configuration failed\n");
2104		goto _exit0;
2105	}
2106	for (i = 0; i < vdev->no_of_vpath; i++) {
2107		vpath_handle = vxge_vpath_handle_get(vdev, i);
2108		if (!vpath_handle)
2109			continue;
2110
2111		status = vxge_hal_vpath_rts_rth_set(vpath_handle,
2112		    RTH_ALG_JENKINS,
2113		    &hash_types, vdev->config.rth_bkt_sz, TRUE);
2114		if (status != VXGE_HAL_OK) {
2115			device_printf(vdev->ndev,
2116			    "rth configuration failed for vpath (%d)\n",
2117			    vdev->vpaths[i].vp_id);
2118			break;
2119		}
2120	}
2121
2122_exit0:
2123	return (status);
2124}
2125
2126/*
2127 * vxge_vpath_config
2128 * Sets HAL parameter values from kenv
2129 */
2130void
2131vxge_vpath_config(vxge_dev_t *vdev)
2132{
2133	int i;
2134	u32 no_of_vpath = 0;
2135	vxge_hal_vp_config_t *vp_config;
2136	vxge_hal_device_config_t *device_config = vdev->device_config;
2137
2138	device_config->debug_level = VXGE_TRACE;
2139	device_config->debug_mask = VXGE_COMPONENT_ALL;
2140	device_config->device_poll_millis = VXGE_DEFAULT_DEVICE_POLL_MILLIS;
2141
2142	vdev->config.no_of_vpath =
2143	    min(vdev->config.no_of_vpath, vdev->max_supported_vpath);
2144
2145	for (i = 0; i < VXGE_HAL_MAX_VIRTUAL_PATHS; i++) {
2146		vp_config = &(device_config->vp_config[i]);
2147		vp_config->fifo.enable = VXGE_HAL_FIFO_DISABLE;
2148		vp_config->ring.enable = VXGE_HAL_RING_DISABLE;
2149	}
2150
2151	for (i = 0; i < VXGE_HAL_MAX_VIRTUAL_PATHS; i++) {
2152		if (no_of_vpath >= vdev->config.no_of_vpath)
2153			break;
2154
2155		if (!bVAL1(vdev->config.hw_info.vpath_mask, i))
2156			continue;
2157
2158		no_of_vpath++;
2159		vp_config = &(device_config->vp_config[i]);
2160		vp_config->mtu = VXGE_HAL_DEFAULT_MTU;
2161		vp_config->ring.enable = VXGE_HAL_RING_ENABLE;
2162		vp_config->ring.post_mode = VXGE_HAL_RING_POST_MODE_DOORBELL;
2163		vp_config->ring.buffer_mode = VXGE_HAL_RING_RXD_BUFFER_MODE_1;
2164		vp_config->ring.ring_length =
2165		    vxge_ring_length_get(VXGE_HAL_RING_RXD_BUFFER_MODE_1);
2166		vp_config->ring.scatter_mode = VXGE_HAL_RING_SCATTER_MODE_A;
2167		vp_config->rpa_all_vid_en = VXGE_DEFAULT_ALL_VID_ENABLE;
2168		vp_config->rpa_strip_vlan_tag = VXGE_DEFAULT_STRIP_VLAN_TAG;
2169		vp_config->rpa_ucast_all_addr_en =
2170		    VXGE_HAL_VPATH_RPA_UCAST_ALL_ADDR_DISABLE;
2171
2172		vp_config->rti.intr_enable = VXGE_HAL_TIM_INTR_ENABLE;
2173		vp_config->rti.txfrm_cnt_en = VXGE_HAL_TXFRM_CNT_EN_ENABLE;
2174		vp_config->rti.util_sel =
2175		    VXGE_HAL_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL;
2176
2177		vp_config->rti.uec_a = VXGE_DEFAULT_RTI_RX_UFC_A;
2178		vp_config->rti.uec_b = VXGE_DEFAULT_RTI_RX_UFC_B;
2179		vp_config->rti.uec_c = VXGE_DEFAULT_RTI_RX_UFC_C;
2180		vp_config->rti.uec_d = VXGE_DEFAULT_RTI_RX_UFC_D;
2181
2182		vp_config->rti.urange_a = VXGE_DEFAULT_RTI_RX_URANGE_A;
2183		vp_config->rti.urange_b = VXGE_DEFAULT_RTI_RX_URANGE_B;
2184		vp_config->rti.urange_c = VXGE_DEFAULT_RTI_RX_URANGE_C;
2185
2186		vp_config->rti.timer_ac_en = VXGE_HAL_TIM_TIMER_AC_ENABLE;
2187		vp_config->rti.timer_ci_en = VXGE_HAL_TIM_TIMER_CI_ENABLE;
2188
2189		vp_config->rti.btimer_val =
2190		    (VXGE_DEFAULT_RTI_BTIMER_VAL * 1000) / 272;
2191		vp_config->rti.rtimer_val =
2192		    (VXGE_DEFAULT_RTI_RTIMER_VAL * 1000) / 272;
2193		vp_config->rti.ltimer_val =
2194		    (VXGE_DEFAULT_RTI_LTIMER_VAL * 1000) / 272;
2195
2196		if ((no_of_vpath > 1) && (VXGE_DEFAULT_CONFIG_MQ_ENABLE == 0))
2197			continue;
2198
2199		vp_config->fifo.enable = VXGE_HAL_FIFO_ENABLE;
2200		vp_config->fifo.max_aligned_frags =
2201		    VXGE_DEFAULT_FIFO_ALIGNED_FRAGS;
2202
2203		vp_config->tti.intr_enable = VXGE_HAL_TIM_INTR_ENABLE;
2204		vp_config->tti.txfrm_cnt_en = VXGE_HAL_TXFRM_CNT_EN_ENABLE;
2205		vp_config->tti.util_sel =
2206		    VXGE_HAL_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL;
2207
2208		vp_config->tti.uec_a = VXGE_DEFAULT_TTI_TX_UFC_A;
2209		vp_config->tti.uec_b = VXGE_DEFAULT_TTI_TX_UFC_B;
2210		vp_config->tti.uec_c = VXGE_DEFAULT_TTI_TX_UFC_C;
2211		vp_config->tti.uec_d = VXGE_DEFAULT_TTI_TX_UFC_D;
2212
2213		vp_config->tti.urange_a = VXGE_DEFAULT_TTI_TX_URANGE_A;
2214		vp_config->tti.urange_b = VXGE_DEFAULT_TTI_TX_URANGE_B;
2215		vp_config->tti.urange_c = VXGE_DEFAULT_TTI_TX_URANGE_C;
2216
2217		vp_config->tti.timer_ac_en = VXGE_HAL_TIM_TIMER_AC_ENABLE;
2218		vp_config->tti.timer_ci_en = VXGE_HAL_TIM_TIMER_CI_ENABLE;
2219
2220		vp_config->tti.btimer_val =
2221		    (VXGE_DEFAULT_TTI_BTIMER_VAL * 1000) / 272;
2222		vp_config->tti.rtimer_val =
2223		    (VXGE_DEFAULT_TTI_RTIMER_VAL * 1000) / 272;
2224		vp_config->tti.ltimer_val =
2225		    (VXGE_DEFAULT_TTI_LTIMER_VAL * 1000) / 272;
2226	}
2227
2228	vdev->no_of_vpath = no_of_vpath;
2229
2230	if (vdev->no_of_vpath == 1)
2231		vdev->config.tx_steering = 0;
2232
2233	if (vdev->config.rth_enable && (vdev->no_of_vpath > 1)) {
2234		device_config->rth_en = VXGE_HAL_RTH_ENABLE;
2235		device_config->rth_it_type = VXGE_HAL_RTH_IT_TYPE_MULTI_IT;
2236	}
2237
2238	vdev->config.rth_enable = device_config->rth_en;
2239}
2240
2241/*
2242 * vxge_vpath_cb_fn
2243 * Virtual path Callback function
2244 */
2245/* ARGSUSED */
2246static vxge_hal_status_e
2247vxge_vpath_cb_fn(vxge_hal_client_h client_handle, vxge_hal_up_msg_h msgh,
2248    vxge_hal_message_type_e msg_type, vxge_hal_obj_id_t obj_id,
2249    vxge_hal_result_e result, vxge_hal_opaque_handle_t *opaque_handle)
2250{
2251	return (VXGE_HAL_OK);
2252}
2253
2254/*
2255 * vxge_vpath_open
2256 */
2257int
2258vxge_vpath_open(vxge_dev_t *vdev)
2259{
2260	int i, err = EINVAL;
2261	u64 func_id;
2262
2263	vxge_vpath_t *vpath;
2264	vxge_hal_vpath_attr_t vpath_attr;
2265	vxge_hal_status_e status = VXGE_HAL_OK;
2266	struct lro_ctrl *lro = NULL;
2267
2268	bzero(&vpath_attr, sizeof(vxge_hal_vpath_attr_t));
2269
2270	for (i = 0; i < vdev->no_of_vpath; i++) {
2271
2272		vpath = &(vdev->vpaths[i]);
2273		lro = &vpath->lro;
2274
2275		/* Vpath vpath_attr: FIFO */
2276		vpath_attr.vp_id = vpath->vp_id;
2277		vpath_attr.fifo_attr.callback = vxge_tx_compl;
2278		vpath_attr.fifo_attr.txdl_init = vxge_tx_replenish;
2279		vpath_attr.fifo_attr.txdl_term = vxge_tx_term;
2280		vpath_attr.fifo_attr.userdata = vpath;
2281		vpath_attr.fifo_attr.per_txdl_space = sizeof(vxge_txdl_priv_t);
2282
2283		/* Vpath vpath_attr: Ring */
2284		vpath_attr.ring_attr.callback = vxge_rx_compl;
2285		vpath_attr.ring_attr.rxd_init = vxge_rx_replenish;
2286		vpath_attr.ring_attr.rxd_term = vxge_rx_term;
2287		vpath_attr.ring_attr.userdata = vpath;
2288		vpath_attr.ring_attr.per_rxd_space = sizeof(vxge_rxd_priv_t);
2289
2290		err = vxge_dma_tags_create(vpath);
2291		if (err != 0) {
2292			device_printf(vdev->ndev,
2293			    "failed to create dma tags\n");
2294			break;
2295		}
2296#if __FreeBSD_version >= 800000
2297		vpath->br = buf_ring_alloc(VXGE_DEFAULT_BR_SIZE, M_DEVBUF,
2298		    M_WAITOK, &vpath->mtx_tx);
2299		if (vpath->br == NULL) {
2300			err = ENOMEM;
2301			break;
2302		}
2303#endif
2304		status = vxge_hal_vpath_open(vdev->devh, &vpath_attr,
2305		    (vxge_hal_vpath_callback_f) vxge_vpath_cb_fn,
2306		    NULL, &vpath->handle);
2307		if (status != VXGE_HAL_OK) {
2308			device_printf(vdev->ndev,
2309			    "failed to open vpath (%d)\n", vpath->vp_id);
2310			err = EPERM;
2311			break;
2312		}
2313		vpath->is_open = TRUE;
2314		vdev->vpath_handles[i] = vpath->handle;
2315
2316		vpath->tx_ticks = ticks;
2317		vpath->rx_ticks = ticks;
2318
2319		vpath->tti_rtimer_val = VXGE_DEFAULT_TTI_RTIMER_VAL;
2320		vpath->rti_rtimer_val = VXGE_DEFAULT_RTI_RTIMER_VAL;
2321
2322		vpath->tx_intr_coalesce = vdev->config.intr_coalesce;
2323		vpath->rx_intr_coalesce = vdev->config.intr_coalesce;
2324
2325		func_id = vdev->config.hw_info.func_id;
2326
2327		if (vdev->config.low_latency &&
2328		    (vdev->config.bw_info[func_id].priority ==
2329			VXGE_DEFAULT_VPATH_PRIORITY_HIGH)) {
2330			vpath->tx_intr_coalesce = 0;
2331		}
2332
2333		if (vdev->ifp->if_capenable & IFCAP_LRO) {
2334			err = tcp_lro_init(lro);
2335			if (err != 0) {
2336				device_printf(vdev->ndev,
2337				    "LRO Initialization failed!\n");
2338				break;
2339			}
2340			vpath->lro_enable = TRUE;
2341			lro->ifp = vdev->ifp;
2342		}
2343	}
2344
2345	return (err);
2346}
2347
2348void
2349vxge_tso_config(vxge_dev_t *vdev)
2350{
2351	u32 func_id, priority;
2352	vxge_hal_status_e status = VXGE_HAL_OK;
2353
2354	vdev->ifp->if_capabilities |= IFCAP_TSO4;
2355
2356	status = vxge_bw_priority_get(vdev, NULL);
2357	if (status == VXGE_HAL_OK) {
2358
2359		func_id = vdev->config.hw_info.func_id;
2360		priority = vdev->config.bw_info[func_id].priority;
2361
2362		if (priority != VXGE_DEFAULT_VPATH_PRIORITY_HIGH)
2363			vdev->ifp->if_capabilities &= ~IFCAP_TSO4;
2364	}
2365
2366#if __FreeBSD_version >= 800000
2367	if (vdev->ifp->if_capabilities & IFCAP_TSO4)
2368		vdev->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
2369#endif
2370
2371}
2372
2373vxge_hal_status_e
2374vxge_bw_priority_get(vxge_dev_t *vdev, vxge_bw_info_t *bw_info)
2375{
2376	u32 priority, bandwidth;
2377	u32 vpath_count;
2378
2379	u64 func_id, func_mode, vpath_list[VXGE_HAL_MAX_VIRTUAL_PATHS];
2380	vxge_hal_status_e status = VXGE_HAL_OK;
2381
2382	func_id = vdev->config.hw_info.func_id;
2383	if (bw_info) {
2384		func_id = bw_info->func_id;
2385		func_mode = vdev->config.hw_info.function_mode;
2386		if ((is_single_func(func_mode)) && (func_id > 0))
2387			return (VXGE_HAL_FAIL);
2388	}
2389
2390	if (vdev->hw_fw_version >= VXGE_FW_VERSION(1, 8, 0)) {
2391
2392		status = vxge_hal_vf_rx_bw_get(vdev->devh,
2393		    func_id, &bandwidth, &priority);
2394
2395	} else {
2396
2397		status = vxge_hal_get_vpath_list(vdev->devh,
2398		    func_id, vpath_list, &vpath_count);
2399
2400		if (status == VXGE_HAL_OK) {
2401			status = vxge_hal_bw_priority_get(vdev->devh,
2402			    vpath_list[0], &bandwidth, &priority);
2403		}
2404	}
2405
2406	if (status == VXGE_HAL_OK) {
2407		if (bw_info) {
2408			bw_info->priority = priority;
2409			bw_info->bandwidth = bandwidth;
2410		} else {
2411			vdev->config.bw_info[func_id].priority = priority;
2412			vdev->config.bw_info[func_id].bandwidth = bandwidth;
2413		}
2414	}
2415
2416	return (status);
2417}
2418
2419/*
2420 * close vpaths
2421 */
2422void
2423vxge_vpath_close(vxge_dev_t *vdev)
2424{
2425	int i;
2426	vxge_vpath_t *vpath;
2427
2428	for (i = 0; i < vdev->no_of_vpath; i++) {
2429
2430		vpath = &(vdev->vpaths[i]);
2431		if (vpath->handle)
2432			vxge_hal_vpath_close(vpath->handle);
2433
2434#if __FreeBSD_version >= 800000
2435		if (vpath->br != NULL)
2436			buf_ring_free(vpath->br, M_DEVBUF);
2437#endif
2438		/* Free LRO memory */
2439		if (vpath->lro_enable)
2440			tcp_lro_free(&vpath->lro);
2441
2442		if (vpath->dma_tag_rx) {
2443			bus_dmamap_destroy(vpath->dma_tag_rx,
2444			    vpath->extra_dma_map);
2445			bus_dma_tag_destroy(vpath->dma_tag_rx);
2446		}
2447
2448		if (vpath->dma_tag_tx)
2449			bus_dma_tag_destroy(vpath->dma_tag_tx);
2450
2451		vpath->handle = NULL;
2452		vpath->is_open = FALSE;
2453	}
2454}
2455
2456/*
2457 * reset vpaths
2458 */
2459void
2460vxge_vpath_reset(vxge_dev_t *vdev)
2461{
2462	int i;
2463	vxge_hal_vpath_h vpath_handle;
2464	vxge_hal_status_e status = VXGE_HAL_OK;
2465
2466	for (i = 0; i < vdev->no_of_vpath; i++) {
2467		vpath_handle = vxge_vpath_handle_get(vdev, i);
2468		if (!vpath_handle)
2469			continue;
2470
2471		status = vxge_hal_vpath_reset(vpath_handle);
2472		if (status != VXGE_HAL_OK)
2473			device_printf(vdev->ndev,
2474			    "failed to reset vpath :%d\n", i);
2475	}
2476}
2477
2478static inline int
2479vxge_vpath_get(vxge_dev_t *vdev, mbuf_t mhead)
2480{
2481	struct tcphdr *th = NULL;
2482	struct udphdr *uh = NULL;
2483	struct ip *ip = NULL;
2484	struct ip6_hdr *ip6 = NULL;
2485	struct ether_vlan_header *eth = NULL;
2486	void *ulp = NULL;
2487
2488	int ehdrlen, iphlen = 0;
2489	u8 ipproto = 0;
2490	u16 etype, src_port, dst_port;
2491	u16 queue_len, counter = 0;
2492
2493	src_port = dst_port = 0;
2494	queue_len = vdev->no_of_vpath;
2495
2496	eth = mtod(mhead, struct ether_vlan_header *);
2497	if (eth->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2498		etype = ntohs(eth->evl_proto);
2499		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2500	} else {
2501		etype = ntohs(eth->evl_encap_proto);
2502		ehdrlen = ETHER_HDR_LEN;
2503	}
2504
2505	switch (etype) {
2506	case ETHERTYPE_IP:
2507		ip = (struct ip *) (mhead->m_data + ehdrlen);
2508		iphlen = ip->ip_hl << 2;
2509		ipproto = ip->ip_p;
2510		th = (struct tcphdr *) ((caddr_t)ip + iphlen);
2511		uh = (struct udphdr *) ((caddr_t)ip + iphlen);
2512		break;
2513
2514	case ETHERTYPE_IPV6:
2515		ip6 = (struct ip6_hdr *) (mhead->m_data + ehdrlen);
2516		iphlen = sizeof(struct ip6_hdr);
2517		ipproto = ip6->ip6_nxt;
2518
2519		ulp = mtod(mhead, char *) + iphlen;
2520		th = ((struct tcphdr *) (ulp));
2521		uh = ((struct udphdr *) (ulp));
2522		break;
2523
2524	default:
2525		break;
2526	}
2527
2528	switch (ipproto) {
2529	case IPPROTO_TCP:
2530		src_port = th->th_sport;
2531		dst_port = th->th_dport;
2532		break;
2533
2534	case IPPROTO_UDP:
2535		src_port = uh->uh_sport;
2536		dst_port = uh->uh_dport;
2537		break;
2538
2539	default:
2540		break;
2541	}
2542
2543	counter = (ntohs(src_port) + ntohs(dst_port)) &
2544	    vpath_selector[queue_len - 1];
2545
2546	if (counter >= queue_len)
2547		counter = queue_len - 1;
2548
2549	return (counter);
2550}
2551
2552static inline vxge_hal_vpath_h
2553vxge_vpath_handle_get(vxge_dev_t *vdev, int i)
2554{
2555	return (vdev->vpaths[i].is_open ? vdev->vpaths[i].handle : NULL);
2556}
2557
2558int
2559vxge_firmware_verify(vxge_dev_t *vdev)
2560{
2561	int err = 0;
2562	u64 active_config;
2563	vxge_hal_status_e status = VXGE_HAL_FAIL;
2564
2565	if (vdev->fw_upgrade) {
2566		status = vxge_firmware_upgrade(vdev);
2567		if (status == VXGE_HAL_OK) {
2568			err = ENXIO;
2569			goto _exit0;
2570		}
2571	}
2572
2573	if ((vdev->config.function_mode != VXGE_DEFAULT_CONFIG_VALUE) &&
2574	    (vdev->config.hw_info.function_mode !=
2575	    (u64) vdev->config.function_mode)) {
2576
2577		status = vxge_func_mode_set(vdev);
2578		if (status == VXGE_HAL_OK)
2579			err = ENXIO;
2580	}
2581
2582	/* l2_switch configuration */
2583	active_config = VXGE_DEFAULT_CONFIG_VALUE;
2584	status = vxge_hal_get_active_config(vdev->devh,
2585	    VXGE_HAL_XMAC_NWIF_ActConfig_L2SwitchEnabled,
2586	    &active_config);
2587
2588	if (status == VXGE_HAL_OK) {
2589		vdev->l2_switch = active_config;
2590		if (vdev->config.l2_switch != VXGE_DEFAULT_CONFIG_VALUE) {
2591			if (vdev->config.l2_switch != active_config) {
2592				status = vxge_l2switch_mode_set(vdev);
2593				if (status == VXGE_HAL_OK)
2594					err = ENXIO;
2595			}
2596		}
2597	}
2598
2599	if (vdev->config.hw_info.ports == VXGE_DUAL_PORT_MODE) {
2600		if (vxge_port_mode_update(vdev) == ENXIO)
2601			err = ENXIO;
2602	}
2603
2604_exit0:
2605	if (err == ENXIO)
2606		device_printf(vdev->ndev, "PLEASE POWER CYCLE THE SYSTEM\n");
2607
2608	return (err);
2609}
2610
2611vxge_hal_status_e
2612vxge_firmware_upgrade(vxge_dev_t *vdev)
2613{
2614	u8 *fw_buffer;
2615	u32 fw_size;
2616	vxge_hal_device_hw_info_t *hw_info;
2617	vxge_hal_status_e status = VXGE_HAL_OK;
2618
2619	hw_info = &vdev->config.hw_info;
2620
2621	fw_size = sizeof(VXGE_FW_ARRAY_NAME);
2622	fw_buffer = (u8 *) VXGE_FW_ARRAY_NAME;
2623
2624	device_printf(vdev->ndev, "Current firmware version : %s (%s)\n",
2625	    hw_info->fw_version.version, hw_info->fw_date.date);
2626
2627	device_printf(vdev->ndev, "Upgrading firmware to %d.%d.%d\n",
2628	    VXGE_MIN_FW_MAJOR_VERSION, VXGE_MIN_FW_MINOR_VERSION,
2629	    VXGE_MIN_FW_BUILD_NUMBER);
2630
2631	/* Call HAL API to upgrade firmware */
2632	status = vxge_hal_mrpcim_fw_upgrade(vdev->pdev,
2633	    (pci_reg_h) vdev->pdev->reg_map[0],
2634	    (u8 *) vdev->pdev->bar_info[0],
2635	    fw_buffer, fw_size);
2636
2637	device_printf(vdev->ndev, "firmware upgrade %s\n",
2638	    (status == VXGE_HAL_OK) ? "successful" : "failed");
2639
2640	return (status);
2641}
2642
2643vxge_hal_status_e
2644vxge_func_mode_set(vxge_dev_t *vdev)
2645{
2646	u64 active_config;
2647	vxge_hal_status_e status = VXGE_HAL_FAIL;
2648
2649	status = vxge_hal_mrpcim_pcie_func_mode_set(vdev->devh,
2650	    vdev->config.function_mode);
2651	device_printf(vdev->ndev,
2652	    "function mode change %s\n",
2653	    (status == VXGE_HAL_OK) ? "successful" : "failed");
2654
2655	if (status == VXGE_HAL_OK) {
2656		vxge_hal_set_fw_api(vdev->devh, 0ULL,
2657		    VXGE_HAL_API_FUNC_MODE_COMMIT,
2658		    0, 0ULL, 0ULL);
2659
2660		vxge_hal_get_active_config(vdev->devh,
2661		    VXGE_HAL_XMAC_NWIF_ActConfig_NWPortMode,
2662		    &active_config);
2663
2664		/*
2665		 * If in MF + DP mode
2666		 * if user changes to SF, change port_mode to single port mode
2667		 */
2668		if (((is_multi_func(vdev->config.hw_info.function_mode)) &&
2669		    is_single_func(vdev->config.function_mode)) &&
2670		    (active_config == VXGE_HAL_DP_NP_MODE_DUAL_PORT)) {
2671			vdev->config.port_mode =
2672			    VXGE_HAL_DP_NP_MODE_SINGLE_PORT;
2673
2674			status = vxge_port_mode_set(vdev);
2675		}
2676	}
2677	return (status);
2678}
2679
2680vxge_hal_status_e
2681vxge_port_mode_set(vxge_dev_t *vdev)
2682{
2683	vxge_hal_status_e status = VXGE_HAL_FAIL;
2684
2685	status = vxge_hal_set_port_mode(vdev->devh, vdev->config.port_mode);
2686	device_printf(vdev->ndev,
2687	    "port mode change %s\n",
2688	    (status == VXGE_HAL_OK) ? "successful" : "failed");
2689
2690	if (status == VXGE_HAL_OK) {
2691		vxge_hal_set_fw_api(vdev->devh, 0ULL,
2692		    VXGE_HAL_API_FUNC_MODE_COMMIT,
2693		    0, 0ULL, 0ULL);
2694
2695		/* Configure vpath_mapping for active-active mode only */
2696		if (vdev->config.port_mode == VXGE_HAL_DP_NP_MODE_DUAL_PORT) {
2697
2698			status = vxge_hal_config_vpath_map(vdev->devh,
2699			    VXGE_DUAL_PORT_MAP);
2700
2701			device_printf(vdev->ndev, "dual port map change %s\n",
2702			    (status == VXGE_HAL_OK) ? "successful" : "failed");
2703		}
2704	}
2705	return (status);
2706}
2707
2708int
2709vxge_port_mode_update(vxge_dev_t *vdev)
2710{
2711	int err = 0;
2712	u64 active_config;
2713	vxge_hal_status_e status = VXGE_HAL_FAIL;
2714
2715	if ((vdev->config.port_mode == VXGE_HAL_DP_NP_MODE_DUAL_PORT) &&
2716	    is_single_func(vdev->config.hw_info.function_mode)) {
2717
2718		device_printf(vdev->ndev,
2719		    "Adapter in SF mode, dual port mode is not allowed\n");
2720		err = EPERM;
2721		goto _exit0;
2722	}
2723
2724	active_config = VXGE_DEFAULT_CONFIG_VALUE;
2725	status = vxge_hal_get_active_config(vdev->devh,
2726	    VXGE_HAL_XMAC_NWIF_ActConfig_NWPortMode,
2727	    &active_config);
2728	if (status != VXGE_HAL_OK) {
2729		err = EINVAL;
2730		goto _exit0;
2731	}
2732
2733	vdev->port_mode = active_config;
2734	if (vdev->config.port_mode != VXGE_DEFAULT_CONFIG_VALUE) {
2735		if (vdev->config.port_mode != vdev->port_mode) {
2736			status = vxge_port_mode_set(vdev);
2737			if (status != VXGE_HAL_OK) {
2738				err = EINVAL;
2739				goto _exit0;
2740			}
2741			err = ENXIO;
2742			vdev->port_mode  = vdev->config.port_mode;
2743		}
2744	}
2745
2746	active_config = VXGE_DEFAULT_CONFIG_VALUE;
2747	status = vxge_hal_get_active_config(vdev->devh,
2748	    VXGE_HAL_XMAC_NWIF_ActConfig_BehaviourOnFail,
2749	    &active_config);
2750	if (status != VXGE_HAL_OK) {
2751		err = EINVAL;
2752		goto _exit0;
2753	}
2754
2755	vdev->port_failure = active_config;
2756
2757	/*
2758	 * active/active mode : set to NoMove
2759	 * active/passive mode: set to Failover-Failback
2760	 */
2761	if (vdev->port_mode == VXGE_HAL_DP_NP_MODE_DUAL_PORT)
2762		vdev->config.port_failure =
2763		    VXGE_HAL_XMAC_NWIF_OnFailure_NoMove;
2764
2765	else if (vdev->port_mode == VXGE_HAL_DP_NP_MODE_ACTIVE_PASSIVE)
2766		vdev->config.port_failure =
2767		    VXGE_HAL_XMAC_NWIF_OnFailure_OtherPortBackOnRestore;
2768
2769	if ((vdev->port_mode != VXGE_HAL_DP_NP_MODE_SINGLE_PORT) &&
2770	    (vdev->config.port_failure != vdev->port_failure)) {
2771		status = vxge_port_behavior_on_failure_set(vdev);
2772		if (status == VXGE_HAL_OK)
2773			err = ENXIO;
2774	}
2775
2776_exit0:
2777	return (err);
2778}
2779
2780vxge_hal_status_e
2781vxge_port_mode_get(vxge_dev_t *vdev, vxge_port_info_t *port_info)
2782{
2783	int err = 0;
2784	u64 active_config;
2785	vxge_hal_status_e status = VXGE_HAL_FAIL;
2786
2787	active_config = VXGE_DEFAULT_CONFIG_VALUE;
2788	status = vxge_hal_get_active_config(vdev->devh,
2789	    VXGE_HAL_XMAC_NWIF_ActConfig_NWPortMode,
2790	    &active_config);
2791
2792	if (status != VXGE_HAL_OK) {
2793		err = ENXIO;
2794		goto _exit0;
2795	}
2796
2797	port_info->port_mode = active_config;
2798
2799	active_config = VXGE_DEFAULT_CONFIG_VALUE;
2800	status = vxge_hal_get_active_config(vdev->devh,
2801	    VXGE_HAL_XMAC_NWIF_ActConfig_BehaviourOnFail,
2802	    &active_config);
2803	if (status != VXGE_HAL_OK) {
2804		err = ENXIO;
2805		goto _exit0;
2806	}
2807
2808	port_info->port_failure = active_config;
2809
2810_exit0:
2811	return (err);
2812}
2813
2814vxge_hal_status_e
2815vxge_port_behavior_on_failure_set(vxge_dev_t *vdev)
2816{
2817	vxge_hal_status_e status = VXGE_HAL_FAIL;
2818
2819	status = vxge_hal_set_behavior_on_failure(vdev->devh,
2820	    vdev->config.port_failure);
2821
2822	device_printf(vdev->ndev,
2823	    "port behaviour on failure change %s\n",
2824	    (status == VXGE_HAL_OK) ? "successful" : "failed");
2825
2826	if (status == VXGE_HAL_OK)
2827		vxge_hal_set_fw_api(vdev->devh, 0ULL,
2828		    VXGE_HAL_API_FUNC_MODE_COMMIT,
2829		    0, 0ULL, 0ULL);
2830
2831	return (status);
2832}
2833
2834void
2835vxge_active_port_update(vxge_dev_t *vdev)
2836{
2837	u64 active_config;
2838	vxge_hal_status_e status = VXGE_HAL_FAIL;
2839
2840	active_config = VXGE_DEFAULT_CONFIG_VALUE;
2841	status = vxge_hal_get_active_config(vdev->devh,
2842	    VXGE_HAL_XMAC_NWIF_ActConfig_ActivePort,
2843	    &active_config);
2844
2845	if (status == VXGE_HAL_OK)
2846		vdev->active_port = active_config;
2847}
2848
2849vxge_hal_status_e
2850vxge_l2switch_mode_set(vxge_dev_t *vdev)
2851{
2852	vxge_hal_status_e status = VXGE_HAL_FAIL;
2853
2854	status = vxge_hal_set_l2switch_mode(vdev->devh,
2855	    vdev->config.l2_switch);
2856
2857	device_printf(vdev->ndev, "L2 switch %s\n",
2858	    (status == VXGE_HAL_OK) ?
2859	    (vdev->config.l2_switch) ? "enable" : "disable" :
2860	    "change failed");
2861
2862	if (status == VXGE_HAL_OK)
2863		vxge_hal_set_fw_api(vdev->devh, 0ULL,
2864		    VXGE_HAL_API_FUNC_MODE_COMMIT,
2865		    0, 0ULL, 0ULL);
2866
2867	return (status);
2868}
2869
2870/*
2871 * vxge_promisc_set
2872 * Enable Promiscuous Mode
2873 */
2874void
2875vxge_promisc_set(vxge_dev_t *vdev)
2876{
2877	int i;
2878	ifnet_t ifp;
2879	vxge_hal_vpath_h vpath_handle;
2880
2881	if (!vdev->is_initialized)
2882		return;
2883
2884	ifp = vdev->ifp;
2885
2886	for (i = 0; i < vdev->no_of_vpath; i++) {
2887		vpath_handle = vxge_vpath_handle_get(vdev, i);
2888		if (!vpath_handle)
2889			continue;
2890
2891		if (ifp->if_flags & IFF_PROMISC)
2892			vxge_hal_vpath_promisc_enable(vpath_handle);
2893		else
2894			vxge_hal_vpath_promisc_disable(vpath_handle);
2895	}
2896}
2897
2898/*
2899 * vxge_change_mtu
2900 * Change interface MTU to a requested valid size
2901 */
2902int
2903vxge_change_mtu(vxge_dev_t *vdev, unsigned long new_mtu)
2904{
2905	int err = EINVAL;
2906
2907	if ((new_mtu < VXGE_HAL_MIN_MTU) || (new_mtu > VXGE_HAL_MAX_MTU))
2908		goto _exit0;
2909
2910	(vdev->ifp)->if_mtu = new_mtu;
2911	device_printf(vdev->ndev, "MTU changed to %ld\n", (vdev->ifp)->if_mtu);
2912
2913	if (vdev->is_initialized) {
2914		if_down(vdev->ifp);
2915		vxge_reset(vdev);
2916		if_up(vdev->ifp);
2917	}
2918	err = 0;
2919
2920_exit0:
2921	return (err);
2922}
2923
2924/*
2925 * Creates DMA tags for both Tx and Rx
2926 */
2927int
2928vxge_dma_tags_create(vxge_vpath_t *vpath)
2929{
2930	int err = 0;
2931	bus_size_t max_size, boundary;
2932	vxge_dev_t *vdev = vpath->vdev;
2933	ifnet_t ifp = vdev->ifp;
2934
2935	max_size = ifp->if_mtu +
2936	    VXGE_HAL_MAC_HEADER_MAX_SIZE +
2937	    VXGE_HAL_HEADER_ETHERNET_II_802_3_ALIGN;
2938
2939	VXGE_BUFFER_ALIGN(max_size, 128)
2940	if (max_size <= MCLBYTES)
2941		vdev->rx_mbuf_sz = MCLBYTES;
2942	else
2943		vdev->rx_mbuf_sz =
2944		    (max_size > MJUMPAGESIZE) ? MJUM9BYTES : MJUMPAGESIZE;
2945
2946	boundary = (max_size > PAGE_SIZE) ? 0 : PAGE_SIZE;
2947
2948	/* DMA tag for Tx */
2949	err = bus_dma_tag_create(
2950	    bus_get_dma_tag(vdev->ndev),
2951	    1,
2952	    PAGE_SIZE,
2953	    BUS_SPACE_MAXADDR,
2954	    BUS_SPACE_MAXADDR,
2955	    NULL,
2956	    NULL,
2957	    VXGE_TSO_SIZE,
2958	    VXGE_MAX_SEGS,
2959	    PAGE_SIZE,
2960	    BUS_DMA_ALLOCNOW,
2961	    NULL,
2962	    NULL,
2963	    &(vpath->dma_tag_tx));
2964	if (err != 0)
2965		goto _exit0;
2966
2967	/* DMA tag for Rx */
2968	err = bus_dma_tag_create(
2969	    bus_get_dma_tag(vdev->ndev),
2970	    1,
2971	    boundary,
2972	    BUS_SPACE_MAXADDR,
2973	    BUS_SPACE_MAXADDR,
2974	    NULL,
2975	    NULL,
2976	    vdev->rx_mbuf_sz,
2977	    1,
2978	    vdev->rx_mbuf_sz,
2979	    BUS_DMA_ALLOCNOW,
2980	    NULL,
2981	    NULL,
2982	    &(vpath->dma_tag_rx));
2983	if (err != 0)
2984		goto _exit1;
2985
2986	/* Create DMA map for this descriptor */
2987	err = bus_dmamap_create(vpath->dma_tag_rx, BUS_DMA_NOWAIT,
2988	    &vpath->extra_dma_map);
2989	if (err == 0)
2990		goto _exit0;
2991
2992	bus_dma_tag_destroy(vpath->dma_tag_rx);
2993
2994_exit1:
2995	bus_dma_tag_destroy(vpath->dma_tag_tx);
2996
2997_exit0:
2998	return (err);
2999}
3000
3001static inline int
3002vxge_dma_mbuf_coalesce(bus_dma_tag_t dma_tag_tx, bus_dmamap_t dma_map,
3003    mbuf_t * m_headp, bus_dma_segment_t * dma_buffers,
3004    int *num_segs)
3005{
3006	int err = 0;
3007	mbuf_t mbuf_pkt = NULL;
3008
3009retry:
3010	err = bus_dmamap_load_mbuf_sg(dma_tag_tx, dma_map, *m_headp,
3011	    dma_buffers, num_segs, BUS_DMA_NOWAIT);
3012	if (err == EFBIG) {
3013		/* try to defrag, too many segments */
3014		mbuf_pkt = m_defrag(*m_headp, M_NOWAIT);
3015		if (mbuf_pkt == NULL) {
3016			err = ENOBUFS;
3017			goto _exit0;
3018		}
3019		*m_headp = mbuf_pkt;
3020		goto retry;
3021	}
3022
3023_exit0:
3024	return (err);
3025}
3026
3027int
3028vxge_device_hw_info_get(vxge_dev_t *vdev)
3029{
3030	int i, err = ENXIO;
3031	u64 vpath_mask = 0;
3032	u32 max_supported_vpath = 0;
3033	u32 fw_ver_maj_min;
3034	vxge_firmware_upgrade_e fw_option;
3035
3036	vxge_hal_status_e status = VXGE_HAL_OK;
3037	vxge_hal_device_hw_info_t *hw_info;
3038
3039	status = vxge_hal_device_hw_info_get(vdev->pdev,
3040	    (pci_reg_h) vdev->pdev->reg_map[0],
3041	    (u8 *) vdev->pdev->bar_info[0],
3042	    &vdev->config.hw_info);
3043
3044	if (status != VXGE_HAL_OK)
3045		goto _exit0;
3046
3047	hw_info = &vdev->config.hw_info;
3048
3049	vpath_mask = hw_info->vpath_mask;
3050	if (vpath_mask == 0) {
3051		device_printf(vdev->ndev, "No vpaths available in device\n");
3052		goto _exit0;
3053	}
3054
3055	fw_option = vdev->config.fw_option;
3056
3057	/* Check how many vpaths are available */
3058	for (i = 0; i < VXGE_HAL_MAX_VIRTUAL_PATHS; i++) {
3059		if (!((vpath_mask) & mBIT(i)))
3060			continue;
3061		max_supported_vpath++;
3062	}
3063
3064	vdev->max_supported_vpath = max_supported_vpath;
3065	status = vxge_hal_device_is_privileged(hw_info->host_type,
3066	    hw_info->func_id);
3067	vdev->is_privilaged = (status == VXGE_HAL_OK) ? TRUE : FALSE;
3068
3069	vdev->hw_fw_version = VXGE_FW_VERSION(
3070	    hw_info->fw_version.major,
3071	    hw_info->fw_version.minor,
3072	    hw_info->fw_version.build);
3073
3074	fw_ver_maj_min =
3075	    VXGE_FW_MAJ_MIN_VERSION(hw_info->fw_version.major,
3076	    hw_info->fw_version.minor);
3077
3078	if ((fw_option >= VXGE_FW_UPGRADE_FORCE) ||
3079	    (vdev->hw_fw_version != VXGE_DRV_FW_VERSION)) {
3080
3081		/* For fw_ver 1.8.1 and above ignore build number. */
3082		if ((fw_option == VXGE_FW_UPGRADE_ALL) &&
3083		    ((vdev->hw_fw_version >= VXGE_FW_VERSION(1, 8, 1)) &&
3084		    (fw_ver_maj_min == VXGE_DRV_FW_MAJ_MIN_VERSION))) {
3085			goto _exit1;
3086		}
3087
3088		if (vdev->hw_fw_version < VXGE_BASE_FW_VERSION) {
3089			device_printf(vdev->ndev,
3090			    "Upgrade driver through vxge_update, "
3091			    "Unable to load the driver.\n");
3092			goto _exit0;
3093		}
3094		vdev->fw_upgrade = TRUE;
3095	}
3096
3097_exit1:
3098	err = 0;
3099
3100_exit0:
3101	return (err);
3102}
3103
3104/*
3105 * vxge_device_hw_info_print
3106 * Print device and driver information
3107 */
3108void
3109vxge_device_hw_info_print(vxge_dev_t *vdev)
3110{
3111	u32 i;
3112	device_t ndev;
3113	struct sysctl_ctx_list *ctx;
3114	struct sysctl_oid_list *children;
3115	char pmd_type[2][VXGE_PMD_INFO_LEN];
3116
3117	vxge_hal_device_t *hldev;
3118	vxge_hal_device_hw_info_t *hw_info;
3119	vxge_hal_device_pmd_info_t *pmd_port;
3120
3121	hldev = vdev->devh;
3122	ndev = vdev->ndev;
3123
3124	ctx = device_get_sysctl_ctx(ndev);
3125	children = SYSCTL_CHILDREN(device_get_sysctl_tree(ndev));
3126
3127	hw_info = &(vdev->config.hw_info);
3128
3129	snprintf(vdev->config.nic_attr[VXGE_PRINT_DRV_VERSION],
3130	    sizeof(vdev->config.nic_attr[VXGE_PRINT_DRV_VERSION]),
3131	    "%d.%d.%d.%d", XGELL_VERSION_MAJOR, XGELL_VERSION_MINOR,
3132	    XGELL_VERSION_FIX, XGELL_VERSION_BUILD);
3133
3134	/* Print PCI-e bus type/speed/width info */
3135	snprintf(vdev->config.nic_attr[VXGE_PRINT_PCIE_INFO],
3136	    sizeof(vdev->config.nic_attr[VXGE_PRINT_PCIE_INFO]),
3137	    "x%d", hldev->link_width);
3138
3139	if (hldev->link_width <= VXGE_HAL_PCI_E_LINK_WIDTH_X4)
3140		device_printf(ndev, "For optimal performance a x8 "
3141		    "PCI-Express slot is required.\n");
3142
3143	vxge_null_terminate((char *) hw_info->serial_number,
3144	    sizeof(hw_info->serial_number));
3145
3146	vxge_null_terminate((char *) hw_info->part_number,
3147	    sizeof(hw_info->part_number));
3148
3149	snprintf(vdev->config.nic_attr[VXGE_PRINT_SERIAL_NO],
3150	    sizeof(vdev->config.nic_attr[VXGE_PRINT_SERIAL_NO]),
3151	    "%s", hw_info->serial_number);
3152
3153	snprintf(vdev->config.nic_attr[VXGE_PRINT_PART_NO],
3154	    sizeof(vdev->config.nic_attr[VXGE_PRINT_PART_NO]),
3155	    "%s", hw_info->part_number);
3156
3157	snprintf(vdev->config.nic_attr[VXGE_PRINT_FW_VERSION],
3158	    sizeof(vdev->config.nic_attr[VXGE_PRINT_FW_VERSION]),
3159	    "%s", hw_info->fw_version.version);
3160
3161	snprintf(vdev->config.nic_attr[VXGE_PRINT_FW_DATE],
3162	    sizeof(vdev->config.nic_attr[VXGE_PRINT_FW_DATE]),
3163	    "%s", hw_info->fw_date.date);
3164
3165	pmd_port = &(hw_info->pmd_port0);
3166	for (i = 0; i < hw_info->ports; i++) {
3167
3168		vxge_pmd_port_type_get(vdev, pmd_port->type,
3169		    pmd_type[i], sizeof(pmd_type[i]));
3170
3171		strncpy(vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0 + i],
3172		    "vendor=??, sn=??, pn=??, type=??",
3173		    sizeof(vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0 + i]));
3174
3175		vxge_null_terminate(pmd_port->vendor, sizeof(pmd_port->vendor));
3176		if (strlen(pmd_port->vendor) == 0) {
3177			pmd_port = &(hw_info->pmd_port1);
3178			continue;
3179		}
3180
3181		vxge_null_terminate(pmd_port->ser_num,
3182		    sizeof(pmd_port->ser_num));
3183
3184		vxge_null_terminate(pmd_port->part_num,
3185		    sizeof(pmd_port->part_num));
3186
3187		snprintf(vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0 + i],
3188		    sizeof(vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0 + i]),
3189		    "vendor=%s, sn=%s, pn=%s, type=%s",
3190		    pmd_port->vendor, pmd_port->ser_num,
3191		    pmd_port->part_num, pmd_type[i]);
3192
3193		pmd_port = &(hw_info->pmd_port1);
3194	}
3195
3196	switch (hw_info->function_mode) {
3197	case VXGE_HAL_PCIE_FUNC_MODE_SF1_VP17:
3198		snprintf(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE],
3199		    sizeof(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]),
3200		    "%s %d %s", "Single Function - 1 function(s)",
3201		    vdev->max_supported_vpath, "VPath(s)/function");
3202		break;
3203
3204	case VXGE_HAL_PCIE_FUNC_MODE_MF2_VP8:
3205		snprintf(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE],
3206		    sizeof(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]),
3207		    "%s %d %s", "Multi Function - 2 function(s)",
3208		    vdev->max_supported_vpath, "VPath(s)/function");
3209		break;
3210
3211	case VXGE_HAL_PCIE_FUNC_MODE_MF4_VP4:
3212		snprintf(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE],
3213		    sizeof(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]),
3214		    "%s %d %s", "Multi Function - 4 function(s)",
3215		    vdev->max_supported_vpath, "VPath(s)/function");
3216		break;
3217
3218	case VXGE_HAL_PCIE_FUNC_MODE_MF8_VP2:
3219		snprintf(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE],
3220		    sizeof(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]),
3221		    "%s %d %s", "Multi Function - 8 function(s)",
3222		    vdev->max_supported_vpath, "VPath(s)/function");
3223		break;
3224
3225	case VXGE_HAL_PCIE_FUNC_MODE_MF8P_VP2:
3226		snprintf(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE],
3227		    sizeof(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]),
3228		    "%s %d %s", "Multi Function (DirectIO) - 8 function(s)",
3229		    vdev->max_supported_vpath, "VPath(s)/function");
3230		break;
3231	}
3232
3233	snprintf(vdev->config.nic_attr[VXGE_PRINT_INTR_MODE],
3234	    sizeof(vdev->config.nic_attr[VXGE_PRINT_INTR_MODE]),
3235	    "%s", ((vdev->config.intr_mode == VXGE_HAL_INTR_MODE_MSIX) ?
3236	    "MSI-X" : "INTA"));
3237
3238	snprintf(vdev->config.nic_attr[VXGE_PRINT_VPATH_COUNT],
3239	    sizeof(vdev->config.nic_attr[VXGE_PRINT_VPATH_COUNT]),
3240	    "%d", vdev->no_of_vpath);
3241
3242	snprintf(vdev->config.nic_attr[VXGE_PRINT_MTU_SIZE],
3243	    sizeof(vdev->config.nic_attr[VXGE_PRINT_MTU_SIZE]),
3244	    "%lu", vdev->ifp->if_mtu);
3245
3246	snprintf(vdev->config.nic_attr[VXGE_PRINT_LRO_MODE],
3247	    sizeof(vdev->config.nic_attr[VXGE_PRINT_LRO_MODE]),
3248	    "%s", ((vdev->config.lro_enable) ? "Enabled" : "Disabled"));
3249
3250	snprintf(vdev->config.nic_attr[VXGE_PRINT_RTH_MODE],
3251	    sizeof(vdev->config.nic_attr[VXGE_PRINT_RTH_MODE]),
3252	    "%s", ((vdev->config.rth_enable) ? "Enabled" : "Disabled"));
3253
3254	snprintf(vdev->config.nic_attr[VXGE_PRINT_TSO_MODE],
3255	    sizeof(vdev->config.nic_attr[VXGE_PRINT_TSO_MODE]),
3256	    "%s", ((vdev->ifp->if_capenable & IFCAP_TSO4) ?
3257	    "Enabled" : "Disabled"));
3258
3259	snprintf(vdev->config.nic_attr[VXGE_PRINT_ADAPTER_TYPE],
3260	    sizeof(vdev->config.nic_attr[VXGE_PRINT_ADAPTER_TYPE]),
3261	    "%s", ((hw_info->ports == 1) ? "Single Port" : "Dual Port"));
3262
3263	if (vdev->is_privilaged) {
3264
3265		if (hw_info->ports > 1) {
3266
3267			snprintf(vdev->config.nic_attr[VXGE_PRINT_PORT_MODE],
3268			    sizeof(vdev->config.nic_attr[VXGE_PRINT_PORT_MODE]),
3269			    "%s", vxge_port_mode[vdev->port_mode]);
3270
3271			if (vdev->port_mode != VXGE_HAL_DP_NP_MODE_SINGLE_PORT)
3272				snprintf(vdev->config.nic_attr[VXGE_PRINT_PORT_FAILURE],
3273				    sizeof(vdev->config.nic_attr[VXGE_PRINT_PORT_FAILURE]),
3274				    "%s", vxge_port_failure[vdev->port_failure]);
3275
3276			vxge_active_port_update(vdev);
3277			snprintf(vdev->config.nic_attr[VXGE_PRINT_ACTIVE_PORT],
3278			    sizeof(vdev->config.nic_attr[VXGE_PRINT_ACTIVE_PORT]),
3279			    "%lld", vdev->active_port);
3280		}
3281
3282		if (!is_single_func(hw_info->function_mode)) {
3283			snprintf(vdev->config.nic_attr[VXGE_PRINT_L2SWITCH_MODE],
3284			    sizeof(vdev->config.nic_attr[VXGE_PRINT_L2SWITCH_MODE]),
3285			    "%s", ((vdev->l2_switch) ? "Enabled" : "Disabled"));
3286		}
3287	}
3288
3289	device_printf(ndev, "Driver version\t: %s\n",
3290	    vdev->config.nic_attr[VXGE_PRINT_DRV_VERSION]);
3291
3292	device_printf(ndev, "Serial number\t: %s\n",
3293	    vdev->config.nic_attr[VXGE_PRINT_SERIAL_NO]);
3294
3295	device_printf(ndev, "Part number\t: %s\n",
3296	    vdev->config.nic_attr[VXGE_PRINT_PART_NO]);
3297
3298	device_printf(ndev, "Firmware version\t: %s\n",
3299	    vdev->config.nic_attr[VXGE_PRINT_FW_VERSION]);
3300
3301	device_printf(ndev, "Firmware date\t: %s\n",
3302	    vdev->config.nic_attr[VXGE_PRINT_FW_DATE]);
3303
3304	device_printf(ndev, "Link width\t: %s\n",
3305	    vdev->config.nic_attr[VXGE_PRINT_PCIE_INFO]);
3306
3307	if (vdev->is_privilaged) {
3308		device_printf(ndev, "Function mode\t: %s\n",
3309		    vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]);
3310	}
3311
3312	device_printf(ndev, "Interrupt type\t: %s\n",
3313	    vdev->config.nic_attr[VXGE_PRINT_INTR_MODE]);
3314
3315	device_printf(ndev, "VPath(s) opened\t: %s\n",
3316	    vdev->config.nic_attr[VXGE_PRINT_VPATH_COUNT]);
3317
3318	device_printf(ndev, "Adapter Type\t: %s\n",
3319	    vdev->config.nic_attr[VXGE_PRINT_ADAPTER_TYPE]);
3320
3321	device_printf(ndev, "PMD Port 0\t: %s\n",
3322	    vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0]);
3323
3324	if (hw_info->ports > 1) {
3325		device_printf(ndev, "PMD Port 1\t: %s\n",
3326		    vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_1]);
3327
3328		if (vdev->is_privilaged) {
3329			device_printf(ndev, "Port Mode\t: %s\n",
3330			    vdev->config.nic_attr[VXGE_PRINT_PORT_MODE]);
3331
3332			if (vdev->port_mode != VXGE_HAL_DP_NP_MODE_SINGLE_PORT)
3333				device_printf(ndev, "Port Failure\t: %s\n",
3334				    vdev->config.nic_attr[VXGE_PRINT_PORT_FAILURE]);
3335
3336			device_printf(vdev->ndev, "Active Port\t: %s\n",
3337			    vdev->config.nic_attr[VXGE_PRINT_ACTIVE_PORT]);
3338		}
3339	}
3340
3341	if (vdev->is_privilaged && !is_single_func(hw_info->function_mode)) {
3342		device_printf(vdev->ndev, "L2 Switch\t: %s\n",
3343		    vdev->config.nic_attr[VXGE_PRINT_L2SWITCH_MODE]);
3344	}
3345
3346	device_printf(ndev, "MTU is %s\n",
3347	    vdev->config.nic_attr[VXGE_PRINT_MTU_SIZE]);
3348
3349	device_printf(ndev, "LRO %s\n",
3350	    vdev->config.nic_attr[VXGE_PRINT_LRO_MODE]);
3351
3352	device_printf(ndev, "RTH %s\n",
3353	    vdev->config.nic_attr[VXGE_PRINT_RTH_MODE]);
3354
3355	device_printf(ndev, "TSO %s\n",
3356	    vdev->config.nic_attr[VXGE_PRINT_TSO_MODE]);
3357
3358	SYSCTL_ADD_STRING(ctx, children,
3359	    OID_AUTO, "Driver version", CTLFLAG_RD,
3360	    vdev->config.nic_attr[VXGE_PRINT_DRV_VERSION],
3361	    0, "Driver version");
3362
3363	SYSCTL_ADD_STRING(ctx, children,
3364	    OID_AUTO, "Serial number", CTLFLAG_RD,
3365	    vdev->config.nic_attr[VXGE_PRINT_SERIAL_NO],
3366	    0, "Serial number");
3367
3368	SYSCTL_ADD_STRING(ctx, children,
3369	    OID_AUTO, "Part number", CTLFLAG_RD,
3370	    vdev->config.nic_attr[VXGE_PRINT_PART_NO],
3371	    0, "Part number");
3372
3373	SYSCTL_ADD_STRING(ctx, children,
3374	    OID_AUTO, "Firmware version", CTLFLAG_RD,
3375	    vdev->config.nic_attr[VXGE_PRINT_FW_VERSION],
3376	    0, "Firmware version");
3377
3378	SYSCTL_ADD_STRING(ctx, children,
3379	    OID_AUTO, "Firmware date", CTLFLAG_RD,
3380	    vdev->config.nic_attr[VXGE_PRINT_FW_DATE],
3381	    0, "Firmware date");
3382
3383	SYSCTL_ADD_STRING(ctx, children,
3384	    OID_AUTO, "Link width", CTLFLAG_RD,
3385	    vdev->config.nic_attr[VXGE_PRINT_PCIE_INFO],
3386	    0, "Link width");
3387
3388	if (vdev->is_privilaged) {
3389		SYSCTL_ADD_STRING(ctx, children,
3390		    OID_AUTO, "Function mode", CTLFLAG_RD,
3391		    vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE],
3392		    0, "Function mode");
3393	}
3394
3395	SYSCTL_ADD_STRING(ctx, children,
3396	    OID_AUTO, "Interrupt type", CTLFLAG_RD,
3397	    vdev->config.nic_attr[VXGE_PRINT_INTR_MODE],
3398	    0, "Interrupt type");
3399
3400	SYSCTL_ADD_STRING(ctx, children,
3401	    OID_AUTO, "VPath(s) opened", CTLFLAG_RD,
3402	    vdev->config.nic_attr[VXGE_PRINT_VPATH_COUNT],
3403	    0, "VPath(s) opened");
3404
3405	SYSCTL_ADD_STRING(ctx, children,
3406	    OID_AUTO, "Adapter Type", CTLFLAG_RD,
3407	    vdev->config.nic_attr[VXGE_PRINT_ADAPTER_TYPE],
3408	    0, "Adapter Type");
3409
3410	SYSCTL_ADD_STRING(ctx, children,
3411	    OID_AUTO, "pmd port 0", CTLFLAG_RD,
3412	    vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0],
3413	    0, "pmd port");
3414
3415	if (hw_info->ports > 1) {
3416
3417		SYSCTL_ADD_STRING(ctx, children,
3418		    OID_AUTO, "pmd port 1", CTLFLAG_RD,
3419		    vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_1],
3420		    0, "pmd port");
3421
3422		if (vdev->is_privilaged) {
3423			SYSCTL_ADD_STRING(ctx, children,
3424			    OID_AUTO, "Port Mode", CTLFLAG_RD,
3425			    vdev->config.nic_attr[VXGE_PRINT_PORT_MODE],
3426			    0, "Port Mode");
3427
3428			if (vdev->port_mode != VXGE_HAL_DP_NP_MODE_SINGLE_PORT)
3429				SYSCTL_ADD_STRING(ctx, children,
3430				    OID_AUTO, "Port Failure", CTLFLAG_RD,
3431				    vdev->config.nic_attr[VXGE_PRINT_PORT_FAILURE],
3432				    0, "Port Failure");
3433
3434			SYSCTL_ADD_STRING(ctx, children,
3435			    OID_AUTO, "L2 Switch", CTLFLAG_RD,
3436			    vdev->config.nic_attr[VXGE_PRINT_L2SWITCH_MODE],
3437			    0, "L2 Switch");
3438		}
3439	}
3440
3441	SYSCTL_ADD_STRING(ctx, children,
3442	    OID_AUTO, "LRO mode", CTLFLAG_RD,
3443	    vdev->config.nic_attr[VXGE_PRINT_LRO_MODE],
3444	    0, "LRO mode");
3445
3446	SYSCTL_ADD_STRING(ctx, children,
3447	    OID_AUTO, "RTH mode", CTLFLAG_RD,
3448	    vdev->config.nic_attr[VXGE_PRINT_RTH_MODE],
3449	    0, "RTH mode");
3450
3451	SYSCTL_ADD_STRING(ctx, children,
3452	    OID_AUTO, "TSO mode", CTLFLAG_RD,
3453	    vdev->config.nic_attr[VXGE_PRINT_TSO_MODE],
3454	    0, "TSO mode");
3455}
3456
3457void
3458vxge_pmd_port_type_get(vxge_dev_t *vdev, u32 port_type,
3459    char *ifm_name, u8 ifm_len)
3460{
3461
3462	vdev->ifm_optics = IFM_UNKNOWN;
3463
3464	switch (port_type) {
3465	case VXGE_HAL_DEVICE_PMD_TYPE_10G_SR:
3466		vdev->ifm_optics = IFM_10G_SR;
3467		strlcpy(ifm_name, "10GbE SR", ifm_len);
3468		break;
3469
3470	case VXGE_HAL_DEVICE_PMD_TYPE_10G_LR:
3471		vdev->ifm_optics = IFM_10G_LR;
3472		strlcpy(ifm_name, "10GbE LR", ifm_len);
3473		break;
3474
3475	case VXGE_HAL_DEVICE_PMD_TYPE_10G_LRM:
3476		vdev->ifm_optics = IFM_10G_LRM;
3477		strlcpy(ifm_name, "10GbE LRM", ifm_len);
3478		break;
3479
3480	case VXGE_HAL_DEVICE_PMD_TYPE_10G_DIRECT:
3481		vdev->ifm_optics = IFM_10G_TWINAX;
3482		strlcpy(ifm_name, "10GbE DA (Direct Attached)", ifm_len);
3483		break;
3484
3485	case VXGE_HAL_DEVICE_PMD_TYPE_10G_CX4:
3486		vdev->ifm_optics = IFM_10G_CX4;
3487		strlcpy(ifm_name, "10GbE CX4", ifm_len);
3488		break;
3489
3490	case VXGE_HAL_DEVICE_PMD_TYPE_10G_BASE_T:
3491#if __FreeBSD_version >= 800000
3492		vdev->ifm_optics = IFM_10G_T;
3493#endif
3494		strlcpy(ifm_name, "10GbE baseT", ifm_len);
3495		break;
3496
3497	case VXGE_HAL_DEVICE_PMD_TYPE_10G_OTHER:
3498		strlcpy(ifm_name, "10GbE Other", ifm_len);
3499		break;
3500
3501	case VXGE_HAL_DEVICE_PMD_TYPE_1G_SX:
3502		vdev->ifm_optics = IFM_1000_SX;
3503		strlcpy(ifm_name, "1GbE SX", ifm_len);
3504		break;
3505
3506	case VXGE_HAL_DEVICE_PMD_TYPE_1G_LX:
3507		vdev->ifm_optics = IFM_1000_LX;
3508		strlcpy(ifm_name, "1GbE LX", ifm_len);
3509		break;
3510
3511	case VXGE_HAL_DEVICE_PMD_TYPE_1G_CX:
3512		vdev->ifm_optics = IFM_1000_CX;
3513		strlcpy(ifm_name, "1GbE CX", ifm_len);
3514		break;
3515
3516	case VXGE_HAL_DEVICE_PMD_TYPE_1G_BASE_T:
3517		vdev->ifm_optics = IFM_1000_T;
3518		strlcpy(ifm_name, "1GbE baseT", ifm_len);
3519		break;
3520
3521	case VXGE_HAL_DEVICE_PMD_TYPE_1G_DIRECT:
3522		strlcpy(ifm_name, "1GbE DA (Direct Attached)",
3523		    ifm_len);
3524		break;
3525
3526	case VXGE_HAL_DEVICE_PMD_TYPE_1G_CX4:
3527		strlcpy(ifm_name, "1GbE CX4", ifm_len);
3528		break;
3529
3530	case VXGE_HAL_DEVICE_PMD_TYPE_1G_OTHER:
3531		strlcpy(ifm_name, "1GbE Other", ifm_len);
3532		break;
3533
3534	default:
3535	case VXGE_HAL_DEVICE_PMD_TYPE_UNKNOWN:
3536		strlcpy(ifm_name, "UNSUP", ifm_len);
3537		break;
3538	}
3539}
3540
3541u32
3542vxge_ring_length_get(u32 buffer_mode)
3543{
3544	return (VXGE_DEFAULT_RING_BLOCK *
3545	    vxge_hal_ring_rxds_per_block_get(buffer_mode));
3546}
3547
3548/*
3549 * Removes trailing spaces padded
3550 * and NULL terminates strings
3551 */
3552static inline void
3553vxge_null_terminate(char *str, size_t len)
3554{
3555	len--;
3556	while (*str && (*str != ' ') && (len != 0))
3557		++str;
3558
3559	--len;
3560	if (*str)
3561		*str = '\0';
3562}
3563
3564/*
3565 * vxge_ioctl
3566 * Callback to control the device
3567 */
3568int
3569vxge_ioctl(ifnet_t ifp, u_long command, caddr_t data)
3570{
3571	int mask, err = 0;
3572	vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc;
3573	struct ifreq *ifr = (struct ifreq *) data;
3574
3575	if (!vdev->is_active)
3576		return (EBUSY);
3577
3578	switch (command) {
3579		/* Set/Get ifnet address */
3580	case SIOCSIFADDR:
3581	case SIOCGIFADDR:
3582		ether_ioctl(ifp, command, data);
3583		break;
3584
3585		/* Set Interface MTU */
3586	case SIOCSIFMTU:
3587		err = vxge_change_mtu(vdev, (unsigned long)ifr->ifr_mtu);
3588		break;
3589
3590		/* Set Interface Flags */
3591	case SIOCSIFFLAGS:
3592		VXGE_DRV_LOCK(vdev);
3593		if (ifp->if_flags & IFF_UP) {
3594			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3595				if ((ifp->if_flags ^ vdev->if_flags) &
3596				    (IFF_PROMISC | IFF_ALLMULTI))
3597					vxge_promisc_set(vdev);
3598			} else {
3599				vxge_init_locked(vdev);
3600			}
3601		} else {
3602			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3603				vxge_stop_locked(vdev);
3604		}
3605		vdev->if_flags = ifp->if_flags;
3606		VXGE_DRV_UNLOCK(vdev);
3607		break;
3608
3609		/* Add/delete multicast address */
3610	case SIOCADDMULTI:
3611	case SIOCDELMULTI:
3612		break;
3613
3614		/* Get/Set Interface Media */
3615	case SIOCSIFMEDIA:
3616	case SIOCGIFMEDIA:
3617		err = ifmedia_ioctl(ifp, ifr, &vdev->media, command);
3618		break;
3619
3620		/* Set Capabilities */
3621	case SIOCSIFCAP:
3622		VXGE_DRV_LOCK(vdev);
3623		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3624
3625		if (mask & IFCAP_TXCSUM) {
3626			ifp->if_capenable ^= IFCAP_TXCSUM;
3627			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
3628
3629			if ((ifp->if_capenable & IFCAP_TSO) &&
3630			    !(ifp->if_capenable & IFCAP_TXCSUM)) {
3631
3632				ifp->if_capenable &= ~IFCAP_TSO;
3633				ifp->if_hwassist &= ~CSUM_TSO;
3634				if_printf(ifp, "TSO Disabled\n");
3635			}
3636		}
3637		if (mask & IFCAP_RXCSUM)
3638			ifp->if_capenable ^= IFCAP_RXCSUM;
3639
3640		if (mask & IFCAP_TSO4) {
3641			ifp->if_capenable ^= IFCAP_TSO4;
3642
3643			if (ifp->if_capenable & IFCAP_TSO) {
3644				if (ifp->if_capenable & IFCAP_TXCSUM) {
3645					ifp->if_hwassist |= CSUM_TSO;
3646					if_printf(ifp, "TSO Enabled\n");
3647				} else {
3648					ifp->if_capenable &= ~IFCAP_TSO;
3649					ifp->if_hwassist &= ~CSUM_TSO;
3650					if_printf(ifp,
3651					    "Enable tx checksum offload \
3652					     first.\n");
3653					err = EAGAIN;
3654				}
3655			} else {
3656				ifp->if_hwassist &= ~CSUM_TSO;
3657				if_printf(ifp, "TSO Disabled\n");
3658			}
3659		}
3660		if (mask & IFCAP_LRO)
3661			ifp->if_capenable ^= IFCAP_LRO;
3662
3663		if (mask & IFCAP_VLAN_HWTAGGING)
3664			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
3665
3666		if (mask & IFCAP_VLAN_MTU)
3667			ifp->if_capenable ^= IFCAP_VLAN_MTU;
3668
3669		if (mask & IFCAP_VLAN_HWCSUM)
3670			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
3671
3672#if __FreeBSD_version >= 800000
3673		if (mask & IFCAP_VLAN_HWTSO)
3674			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
3675#endif
3676
3677#if defined(VLAN_CAPABILITIES)
3678		VLAN_CAPABILITIES(ifp);
3679#endif
3680
3681		VXGE_DRV_UNLOCK(vdev);
3682		break;
3683
3684	case SIOCGPRIVATE_0:
3685		VXGE_DRV_LOCK(vdev);
3686		err = vxge_ioctl_stats(vdev, ifr);
3687		VXGE_DRV_UNLOCK(vdev);
3688		break;
3689
3690	case SIOCGPRIVATE_1:
3691		VXGE_DRV_LOCK(vdev);
3692		err = vxge_ioctl_regs(vdev, ifr);
3693		VXGE_DRV_UNLOCK(vdev);
3694		break;
3695
3696	default:
3697		err = ether_ioctl(ifp, command, data);
3698		break;
3699	}
3700
3701	return (err);
3702}
3703
3704/*
3705 * vxge_ioctl_regs
3706 * IOCTL to get registers
3707 */
3708int
3709vxge_ioctl_regs(vxge_dev_t *vdev, struct ifreq *ifr)
3710{
3711	u64 value = 0x0;
3712	u32 vp_id = 0;
3713	u32 offset, reqd_size = 0;
3714	int i, err = EINVAL;
3715
3716	char *command = (char *) ifr->ifr_data;
3717	void *reg_info = (void *) ifr->ifr_data;
3718
3719	vxge_vpath_t *vpath;
3720	vxge_hal_status_e status = VXGE_HAL_OK;
3721	vxge_hal_mgmt_reg_type_e regs_type;
3722
3723	switch (*command) {
3724	case vxge_hal_mgmt_reg_type_pcicfgmgmt:
3725		if (vdev->is_privilaged) {
3726			reqd_size = sizeof(vxge_hal_pcicfgmgmt_reg_t);
3727			regs_type = vxge_hal_mgmt_reg_type_pcicfgmgmt;
3728		}
3729		break;
3730
3731	case vxge_hal_mgmt_reg_type_mrpcim:
3732		if (vdev->is_privilaged) {
3733			reqd_size = sizeof(vxge_hal_mrpcim_reg_t);
3734			regs_type = vxge_hal_mgmt_reg_type_mrpcim;
3735		}
3736		break;
3737
3738	case vxge_hal_mgmt_reg_type_srpcim:
3739		if (vdev->is_privilaged) {
3740			reqd_size = sizeof(vxge_hal_srpcim_reg_t);
3741			regs_type = vxge_hal_mgmt_reg_type_srpcim;
3742		}
3743		break;
3744
3745	case vxge_hal_mgmt_reg_type_memrepair:
3746		if (vdev->is_privilaged) {
3747			/* reqd_size = sizeof(vxge_hal_memrepair_reg_t); */
3748			regs_type = vxge_hal_mgmt_reg_type_memrepair;
3749		}
3750		break;
3751
3752	case vxge_hal_mgmt_reg_type_legacy:
3753		reqd_size = sizeof(vxge_hal_legacy_reg_t);
3754		regs_type = vxge_hal_mgmt_reg_type_legacy;
3755		break;
3756
3757	case vxge_hal_mgmt_reg_type_toc:
3758		reqd_size = sizeof(vxge_hal_toc_reg_t);
3759		regs_type = vxge_hal_mgmt_reg_type_toc;
3760		break;
3761
3762	case vxge_hal_mgmt_reg_type_common:
3763		reqd_size = sizeof(vxge_hal_common_reg_t);
3764		regs_type = vxge_hal_mgmt_reg_type_common;
3765		break;
3766
3767	case vxge_hal_mgmt_reg_type_vpmgmt:
3768		reqd_size = sizeof(vxge_hal_vpmgmt_reg_t);
3769		regs_type = vxge_hal_mgmt_reg_type_vpmgmt;
3770		vpath = &(vdev->vpaths[*((u32 *) reg_info + 1)]);
3771		vp_id = vpath->vp_id;
3772		break;
3773
3774	case vxge_hal_mgmt_reg_type_vpath:
3775		reqd_size = sizeof(vxge_hal_vpath_reg_t);
3776		regs_type = vxge_hal_mgmt_reg_type_vpath;
3777		vpath = &(vdev->vpaths[*((u32 *) reg_info + 1)]);
3778		vp_id = vpath->vp_id;
3779		break;
3780
3781	case VXGE_GET_VPATH_COUNT:
3782		*((u32 *) reg_info) = vdev->no_of_vpath;
3783		err = 0;
3784		break;
3785
3786	default:
3787		reqd_size = 0;
3788		break;
3789	}
3790
3791	if (reqd_size) {
3792		for (i = 0, offset = 0; offset < reqd_size;
3793		    i++, offset += 0x0008) {
3794			value = 0x0;
3795			status = vxge_hal_mgmt_reg_read(vdev->devh, regs_type,
3796			    vp_id, offset, &value);
3797
3798			err = (status != VXGE_HAL_OK) ? EINVAL : 0;
3799			if (err == EINVAL)
3800				break;
3801
3802			*((u64 *) ((u64 *) reg_info + i)) = value;
3803		}
3804	}
3805	return (err);
3806}
3807
3808/*
3809 * vxge_ioctl_stats
3810 * IOCTL to get statistics
3811 */
3812int
3813vxge_ioctl_stats(vxge_dev_t *vdev, struct ifreq *ifr)
3814{
3815	int i, retsize, err = EINVAL;
3816	u32 bufsize;
3817
3818	vxge_vpath_t *vpath;
3819	vxge_bw_info_t *bw_info;
3820	vxge_port_info_t *port_info;
3821	vxge_drv_stats_t *drv_stat;
3822
3823	char *buffer = NULL;
3824	char *command = (char *) ifr->ifr_data;
3825	vxge_hal_status_e status = VXGE_HAL_OK;
3826
3827	switch (*command) {
3828	case VXGE_GET_PCI_CONF:
3829		bufsize = VXGE_STATS_BUFFER_SIZE;
3830		buffer = (char *) vxge_mem_alloc(bufsize);
3831		if (buffer != NULL) {
3832			status = vxge_hal_aux_pci_config_read(vdev->devh,
3833			    bufsize, buffer, &retsize);
3834			if (status == VXGE_HAL_OK)
3835				err = copyout(buffer, ifr->ifr_data, retsize);
3836			else
3837				device_printf(vdev->ndev,
3838				    "failed pciconfig statistics query\n");
3839
3840			vxge_mem_free(buffer, bufsize);
3841		}
3842		break;
3843
3844	case VXGE_GET_MRPCIM_STATS:
3845		if (!vdev->is_privilaged)
3846			break;
3847
3848		bufsize = VXGE_STATS_BUFFER_SIZE;
3849		buffer = (char *) vxge_mem_alloc(bufsize);
3850		if (buffer != NULL) {
3851			status = vxge_hal_aux_stats_mrpcim_read(vdev->devh,
3852			    bufsize, buffer, &retsize);
3853			if (status == VXGE_HAL_OK)
3854				err = copyout(buffer, ifr->ifr_data, retsize);
3855			else
3856				device_printf(vdev->ndev,
3857				    "failed mrpcim statistics query\n");
3858
3859			vxge_mem_free(buffer, bufsize);
3860		}
3861		break;
3862
3863	case VXGE_GET_DEVICE_STATS:
3864		bufsize = VXGE_STATS_BUFFER_SIZE;
3865		buffer = (char *) vxge_mem_alloc(bufsize);
3866		if (buffer != NULL) {
3867			status = vxge_hal_aux_stats_device_read(vdev->devh,
3868			    bufsize, buffer, &retsize);
3869			if (status == VXGE_HAL_OK)
3870				err = copyout(buffer, ifr->ifr_data, retsize);
3871			else
3872				device_printf(vdev->ndev,
3873				    "failed device statistics query\n");
3874
3875			vxge_mem_free(buffer, bufsize);
3876		}
3877		break;
3878
3879	case VXGE_GET_DEVICE_HWINFO:
3880		bufsize = sizeof(vxge_device_hw_info_t);
3881		buffer = (char *) vxge_mem_alloc(bufsize);
3882		if (buffer != NULL) {
3883			vxge_os_memcpy(
3884			    &(((vxge_device_hw_info_t *) buffer)->hw_info),
3885			    &vdev->config.hw_info,
3886			    sizeof(vxge_hal_device_hw_info_t));
3887
3888			((vxge_device_hw_info_t *) buffer)->port_mode =
3889			    vdev->port_mode;
3890
3891			((vxge_device_hw_info_t *) buffer)->port_failure =
3892			    vdev->port_failure;
3893
3894			err = copyout(buffer, ifr->ifr_data, bufsize);
3895			if (err != 0)
3896				device_printf(vdev->ndev,
3897				    "failed device hardware info query\n");
3898
3899			vxge_mem_free(buffer, bufsize);
3900		}
3901		break;
3902
3903	case VXGE_GET_DRIVER_STATS:
3904		bufsize = sizeof(vxge_drv_stats_t) * vdev->no_of_vpath;
3905		drv_stat = (vxge_drv_stats_t *) vxge_mem_alloc(bufsize);
3906		if (drv_stat != NULL) {
3907			for (i = 0; i < vdev->no_of_vpath; i++) {
3908				vpath = &(vdev->vpaths[i]);
3909
3910				vpath->driver_stats.rx_lro_queued +=
3911				    vpath->lro.lro_queued;
3912
3913				vpath->driver_stats.rx_lro_flushed +=
3914				    vpath->lro.lro_flushed;
3915
3916				vxge_os_memcpy(&drv_stat[i],
3917				    &(vpath->driver_stats),
3918				    sizeof(vxge_drv_stats_t));
3919			}
3920
3921			err = copyout(drv_stat, ifr->ifr_data, bufsize);
3922			if (err != 0)
3923				device_printf(vdev->ndev,
3924				    "failed driver statistics query\n");
3925
3926			vxge_mem_free(drv_stat, bufsize);
3927		}
3928		break;
3929
3930	case VXGE_GET_BANDWIDTH:
3931		bw_info = (vxge_bw_info_t *) ifr->ifr_data;
3932
3933		if ((vdev->config.hw_info.func_id != 0) &&
3934		    (vdev->hw_fw_version < VXGE_FW_VERSION(1, 8, 0)))
3935			break;
3936
3937		if (vdev->config.hw_info.func_id != 0)
3938			bw_info->func_id = vdev->config.hw_info.func_id;
3939
3940		status = vxge_bw_priority_get(vdev, bw_info);
3941		if (status != VXGE_HAL_OK)
3942			break;
3943
3944		err = copyout(bw_info, ifr->ifr_data, sizeof(vxge_bw_info_t));
3945		break;
3946
3947	case VXGE_SET_BANDWIDTH:
3948		if (vdev->is_privilaged)
3949			err = vxge_bw_priority_set(vdev, ifr);
3950		break;
3951
3952	case VXGE_SET_PORT_MODE:
3953		if (vdev->is_privilaged) {
3954			if (vdev->config.hw_info.ports == VXGE_DUAL_PORT_MODE) {
3955				port_info = (vxge_port_info_t *) ifr->ifr_data;
3956				vdev->config.port_mode = port_info->port_mode;
3957				err = vxge_port_mode_update(vdev);
3958				if (err != ENXIO)
3959					err = VXGE_HAL_FAIL;
3960				else {
3961					err = VXGE_HAL_OK;
3962					device_printf(vdev->ndev,
3963					    "PLEASE POWER CYCLE THE SYSTEM\n");
3964				}
3965			}
3966		}
3967		break;
3968
3969	case VXGE_GET_PORT_MODE:
3970		if (vdev->is_privilaged) {
3971			if (vdev->config.hw_info.ports == VXGE_DUAL_PORT_MODE) {
3972				port_info = (vxge_port_info_t *) ifr->ifr_data;
3973				err = vxge_port_mode_get(vdev, port_info);
3974				if (err == VXGE_HAL_OK) {
3975					err = copyout(port_info, ifr->ifr_data,
3976					    sizeof(vxge_port_info_t));
3977				}
3978			}
3979		}
3980		break;
3981
3982	default:
3983		break;
3984	}
3985
3986	return (err);
3987}
3988
3989int
3990vxge_bw_priority_config(vxge_dev_t *vdev)
3991{
3992	u32 i;
3993	int err = EINVAL;
3994
3995	for (i = 0; i < vdev->no_of_func; i++) {
3996		err = vxge_bw_priority_update(vdev, i, TRUE);
3997		if (err != 0)
3998			break;
3999	}
4000
4001	return (err);
4002}
4003
4004int
4005vxge_bw_priority_set(vxge_dev_t *vdev, struct ifreq *ifr)
4006{
4007	int err;
4008	u32 func_id;
4009	vxge_bw_info_t *bw_info;
4010
4011	bw_info = (vxge_bw_info_t *) ifr->ifr_data;
4012	func_id = bw_info->func_id;
4013
4014	vdev->config.bw_info[func_id].priority = bw_info->priority;
4015	vdev->config.bw_info[func_id].bandwidth = bw_info->bandwidth;
4016
4017	err = vxge_bw_priority_update(vdev, func_id, FALSE);
4018
4019	return (err);
4020}
4021
4022int
4023vxge_bw_priority_update(vxge_dev_t *vdev, u32 func_id, bool binit)
4024{
4025	u32 i, set = 0;
4026	u32 bandwidth, priority, vpath_count;
4027	u64 vpath_list[VXGE_HAL_MAX_VIRTUAL_PATHS];
4028
4029	vxge_hal_device_t *hldev;
4030	vxge_hal_vp_config_t *vp_config;
4031	vxge_hal_status_e status = VXGE_HAL_OK;
4032
4033	hldev = vdev->devh;
4034
4035	status = vxge_hal_get_vpath_list(vdev->devh, func_id,
4036	    vpath_list, &vpath_count);
4037
4038	if (status != VXGE_HAL_OK)
4039		return (status);
4040
4041	for (i = 0; i < vpath_count; i++) {
4042		vp_config = &(hldev->config.vp_config[vpath_list[i]]);
4043
4044		/* Configure Bandwidth */
4045		if (vdev->config.bw_info[func_id].bandwidth !=
4046		    VXGE_HAL_VPATH_BW_LIMIT_DEFAULT) {
4047
4048			set = 1;
4049			bandwidth = vdev->config.bw_info[func_id].bandwidth;
4050			if (bandwidth < VXGE_HAL_VPATH_BW_LIMIT_MIN ||
4051			    bandwidth > VXGE_HAL_VPATH_BW_LIMIT_MAX) {
4052
4053				bandwidth = VXGE_HAL_VPATH_BW_LIMIT_DEFAULT;
4054			}
4055			vp_config->bandwidth = bandwidth;
4056		}
4057
4058		/*
4059		 * If b/w limiting is enabled on any of the
4060		 * VFs, then for remaining VFs set the priority to 3
4061		 * and b/w limiting to max i.e 10 Gb)
4062		 */
4063		if (vp_config->bandwidth == VXGE_HAL_VPATH_BW_LIMIT_DEFAULT)
4064			vp_config->bandwidth = VXGE_HAL_VPATH_BW_LIMIT_MAX;
4065
4066		if (binit && vdev->config.low_latency) {
4067			if (func_id == 0)
4068				vdev->config.bw_info[func_id].priority =
4069				    VXGE_DEFAULT_VPATH_PRIORITY_HIGH;
4070		}
4071
4072		/* Configure Priority */
4073		if (vdev->config.bw_info[func_id].priority !=
4074		    VXGE_HAL_VPATH_PRIORITY_DEFAULT) {
4075
4076			set = 1;
4077			priority = vdev->config.bw_info[func_id].priority;
4078			if (priority < VXGE_HAL_VPATH_PRIORITY_MIN ||
4079			    priority > VXGE_HAL_VPATH_PRIORITY_MAX) {
4080
4081				priority = VXGE_HAL_VPATH_PRIORITY_DEFAULT;
4082			}
4083			vp_config->priority = priority;
4084
4085		} else if (vdev->config.low_latency) {
4086			set = 1;
4087			vp_config->priority = VXGE_DEFAULT_VPATH_PRIORITY_LOW;
4088		}
4089
4090		if (set == 1) {
4091			status = vxge_hal_rx_bw_priority_set(vdev->devh,
4092			    vpath_list[i]);
4093			if (status != VXGE_HAL_OK)
4094				break;
4095
4096			if (vpath_list[i] < VXGE_HAL_TX_BW_VPATH_LIMIT) {
4097				status = vxge_hal_tx_bw_priority_set(
4098				    vdev->devh, vpath_list[i]);
4099				if (status != VXGE_HAL_OK)
4100					break;
4101			}
4102		}
4103	}
4104
4105	return ((status  == VXGE_HAL_OK) ? 0 : EINVAL);
4106}
4107
4108/*
4109 * vxge_intr_coalesce_tx
4110 * Changes interrupt coalescing if the interrupts are not within a range
4111 * Return Value: Nothing
4112 */
4113void
4114vxge_intr_coalesce_tx(vxge_vpath_t *vpath)
4115{
4116	u32 timer;
4117
4118	if (!vpath->tx_intr_coalesce)
4119		return;
4120
4121	vpath->tx_interrupts++;
4122	if (ticks > vpath->tx_ticks + hz/100) {
4123
4124		vpath->tx_ticks = ticks;
4125		timer = vpath->tti_rtimer_val;
4126		if (vpath->tx_interrupts > VXGE_MAX_TX_INTERRUPT_COUNT) {
4127			if (timer != VXGE_TTI_RTIMER_ADAPT_VAL) {
4128				vpath->tti_rtimer_val =
4129				    VXGE_TTI_RTIMER_ADAPT_VAL;
4130
4131				vxge_hal_vpath_dynamic_tti_rtimer_set(
4132				    vpath->handle, vpath->tti_rtimer_val);
4133			}
4134		} else {
4135			if (timer != 0) {
4136				vpath->tti_rtimer_val = 0;
4137				vxge_hal_vpath_dynamic_tti_rtimer_set(
4138				    vpath->handle, vpath->tti_rtimer_val);
4139			}
4140		}
4141		vpath->tx_interrupts = 0;
4142	}
4143}
4144
4145/*
4146 * vxge_intr_coalesce_rx
4147 * Changes interrupt coalescing if the interrupts are not within a range
4148 * Return Value: Nothing
4149 */
4150void
4151vxge_intr_coalesce_rx(vxge_vpath_t *vpath)
4152{
4153	u32 timer;
4154
4155	if (!vpath->rx_intr_coalesce)
4156		return;
4157
4158	vpath->rx_interrupts++;
4159	if (ticks > vpath->rx_ticks + hz/100) {
4160
4161		vpath->rx_ticks = ticks;
4162		timer = vpath->rti_rtimer_val;
4163
4164		if (vpath->rx_interrupts > VXGE_MAX_RX_INTERRUPT_COUNT) {
4165			if (timer != VXGE_RTI_RTIMER_ADAPT_VAL) {
4166				vpath->rti_rtimer_val =
4167				    VXGE_RTI_RTIMER_ADAPT_VAL;
4168
4169				vxge_hal_vpath_dynamic_rti_rtimer_set(
4170				    vpath->handle, vpath->rti_rtimer_val);
4171			}
4172		} else {
4173			if (timer != 0) {
4174				vpath->rti_rtimer_val = 0;
4175				vxge_hal_vpath_dynamic_rti_rtimer_set(
4176				    vpath->handle, vpath->rti_rtimer_val);
4177			}
4178		}
4179		vpath->rx_interrupts = 0;
4180	}
4181}
4182
4183/*
4184 * vxge_methods FreeBSD device interface entry points
4185 */
4186static device_method_t vxge_methods[] = {
4187	DEVMETHOD(device_probe, vxge_probe),
4188	DEVMETHOD(device_attach, vxge_attach),
4189	DEVMETHOD(device_detach, vxge_detach),
4190	DEVMETHOD(device_shutdown, vxge_shutdown),
4191
4192	DEVMETHOD_END
4193};
4194
4195static driver_t vxge_driver = {
4196	"vxge", vxge_methods, sizeof(vxge_dev_t),
4197};
4198
4199static devclass_t vxge_devclass;
4200
4201DRIVER_MODULE(vxge, pci, vxge_driver, vxge_devclass, 0, 0);
4202