if_nxge.c revision 331722
1/*-
2 * Copyright (c) 2002-2007 Neterion, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: stable/11/sys/dev/nxge/if_nxge.c 331722 2018-03-29 02:50:57Z eadler $
27 */
28
29#include <dev/nxge/if_nxge.h>
30#include <dev/nxge/xge-osdep.h>
31#include <net/if_arp.h>
32#include <sys/types.h>
33#include <net/if.h>
34#include <net/if_var.h>
35#include <net/if_vlan_var.h>
36
37int       copyright_print       = 0;
38int       hal_driver_init_count = 0;
39size_t    size                  = sizeof(int);
40
41static void inline xge_flush_txds(xge_hal_channel_h);
42
43/**
44 * xge_probe
45 * Probes for Xframe devices
46 *
47 * @dev Device handle
48 *
49 * Returns
50 * BUS_PROBE_DEFAULT if device is supported
51 * ENXIO if device is not supported
52 */
53int
54xge_probe(device_t dev)
55{
56	int  devid    = pci_get_device(dev);
57	int  vendorid = pci_get_vendor(dev);
58	int  retValue = ENXIO;
59
60	if(vendorid == XGE_PCI_VENDOR_ID) {
61	    if((devid == XGE_PCI_DEVICE_ID_XENA_2) ||
62	        (devid == XGE_PCI_DEVICE_ID_HERC_2)) {
63	        if(!copyright_print) {
64	            xge_os_printf(XGE_COPYRIGHT);
65	            copyright_print = 1;
66	        }
67	        device_set_desc_copy(dev,
68	            "Neterion Xframe 10 Gigabit Ethernet Adapter");
69	        retValue = BUS_PROBE_DEFAULT;
70	    }
71	}
72
73	return retValue;
74}
75
76/**
77 * xge_init_params
78 * Sets HAL parameter values (from kenv).
79 *
80 * @dconfig Device Configuration
81 * @dev Device Handle
82 */
83void
84xge_init_params(xge_hal_device_config_t *dconfig, device_t dev)
85{
86	int qindex, tindex, revision;
87	device_t checkdev;
88	xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
89
90	dconfig->mtu                   = XGE_DEFAULT_INITIAL_MTU;
91	dconfig->pci_freq_mherz        = XGE_DEFAULT_USER_HARDCODED;
92	dconfig->device_poll_millis    = XGE_HAL_DEFAULT_DEVICE_POLL_MILLIS;
93	dconfig->link_stability_period = XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD;
94	dconfig->mac.rmac_bcast_en     = XGE_DEFAULT_MAC_RMAC_BCAST_EN;
95	dconfig->fifo.alignment_size   = XGE_DEFAULT_FIFO_ALIGNMENT_SIZE;
96
97	XGE_GET_PARAM("hw.xge.enable_tso", (*lldev), enabled_tso,
98	    XGE_DEFAULT_ENABLED_TSO);
99	XGE_GET_PARAM("hw.xge.enable_lro", (*lldev), enabled_lro,
100	    XGE_DEFAULT_ENABLED_LRO);
101	XGE_GET_PARAM("hw.xge.enable_msi", (*lldev), enabled_msi,
102	    XGE_DEFAULT_ENABLED_MSI);
103
104	XGE_GET_PARAM("hw.xge.latency_timer", (*dconfig), latency_timer,
105	    XGE_DEFAULT_LATENCY_TIMER);
106	XGE_GET_PARAM("hw.xge.max_splits_trans", (*dconfig), max_splits_trans,
107	    XGE_DEFAULT_MAX_SPLITS_TRANS);
108	XGE_GET_PARAM("hw.xge.mmrb_count", (*dconfig), mmrb_count,
109	    XGE_DEFAULT_MMRB_COUNT);
110	XGE_GET_PARAM("hw.xge.shared_splits", (*dconfig), shared_splits,
111	    XGE_DEFAULT_SHARED_SPLITS);
112	XGE_GET_PARAM("hw.xge.isr_polling_cnt", (*dconfig), isr_polling_cnt,
113	    XGE_DEFAULT_ISR_POLLING_CNT);
114	XGE_GET_PARAM("hw.xge.stats_refresh_time_sec", (*dconfig),
115	    stats_refresh_time_sec, XGE_DEFAULT_STATS_REFRESH_TIME_SEC);
116
117	XGE_GET_PARAM_MAC("hw.xge.mac_tmac_util_period", tmac_util_period,
118	    XGE_DEFAULT_MAC_TMAC_UTIL_PERIOD);
119	XGE_GET_PARAM_MAC("hw.xge.mac_rmac_util_period", rmac_util_period,
120	    XGE_DEFAULT_MAC_RMAC_UTIL_PERIOD);
121	XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_gen_en", rmac_pause_gen_en,
122	    XGE_DEFAULT_MAC_RMAC_PAUSE_GEN_EN);
123	XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_rcv_en", rmac_pause_rcv_en,
124	    XGE_DEFAULT_MAC_RMAC_PAUSE_RCV_EN);
125	XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_time", rmac_pause_time,
126	    XGE_DEFAULT_MAC_RMAC_PAUSE_TIME);
127	XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q0q3",
128	    mc_pause_threshold_q0q3, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q0Q3);
129	XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q4q7",
130	    mc_pause_threshold_q4q7, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q4Q7);
131
132	XGE_GET_PARAM_FIFO("hw.xge.fifo_memblock_size", memblock_size,
133	    XGE_DEFAULT_FIFO_MEMBLOCK_SIZE);
134	XGE_GET_PARAM_FIFO("hw.xge.fifo_reserve_threshold", reserve_threshold,
135	    XGE_DEFAULT_FIFO_RESERVE_THRESHOLD);
136	XGE_GET_PARAM_FIFO("hw.xge.fifo_max_frags", max_frags,
137	    XGE_DEFAULT_FIFO_MAX_FRAGS);
138
139	for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
140	    XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_intr", intr, qindex,
141	        XGE_DEFAULT_FIFO_QUEUE_INTR);
142	    XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_max", max, qindex,
143	        XGE_DEFAULT_FIFO_QUEUE_MAX);
144	    XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_initial", initial,
145	        qindex, XGE_DEFAULT_FIFO_QUEUE_INITIAL);
146
147	    for (tindex = 0; tindex < XGE_HAL_MAX_FIFO_TTI_NUM; tindex++) {
148	        dconfig->fifo.queue[qindex].tti[tindex].enabled  = 1;
149	        dconfig->fifo.queue[qindex].configured = 1;
150
151	        XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_a",
152	            urange_a, qindex, tindex,
153	            XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_A);
154	        XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_b",
155	            urange_b, qindex, tindex,
156	            XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_B);
157	        XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_c",
158	            urange_c, qindex, tindex,
159	            XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_C);
160	        XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_a",
161	            ufc_a, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_A);
162	        XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_b",
163	            ufc_b, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_B);
164	        XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_c",
165	            ufc_c, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_C);
166	        XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_d",
167	            ufc_d, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_D);
168	        XGE_GET_PARAM_FIFO_QUEUE_TTI(
169	            "hw.xge.fifo_queue_tti_timer_ci_en", timer_ci_en, qindex,
170	            tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_CI_EN);
171	        XGE_GET_PARAM_FIFO_QUEUE_TTI(
172	            "hw.xge.fifo_queue_tti_timer_ac_en", timer_ac_en, qindex,
173	            tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_AC_EN);
174	        XGE_GET_PARAM_FIFO_QUEUE_TTI(
175	            "hw.xge.fifo_queue_tti_timer_val_us", timer_val_us, qindex,
176	            tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_VAL_US);
177	    }
178	}
179
180	XGE_GET_PARAM_RING("hw.xge.ring_memblock_size", memblock_size,
181	    XGE_DEFAULT_RING_MEMBLOCK_SIZE);
182
183	XGE_GET_PARAM_RING("hw.xge.ring_strip_vlan_tag", strip_vlan_tag,
184	    XGE_DEFAULT_RING_STRIP_VLAN_TAG);
185
186	XGE_GET_PARAM("hw.xge.buffer_mode", (*lldev), buffer_mode,
187	    XGE_DEFAULT_BUFFER_MODE);
188	if((lldev->buffer_mode < XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ||
189	    (lldev->buffer_mode > XGE_HAL_RING_QUEUE_BUFFER_MODE_2)) {
190	    xge_trace(XGE_ERR, "Supported buffer modes are 1 and 2");
191	    lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_1;
192	}
193
194	for (qindex = 0; qindex < XGE_RING_COUNT; qindex++) {
195	    dconfig->ring.queue[qindex].max_frm_len  = XGE_HAL_RING_USE_MTU;
196	    dconfig->ring.queue[qindex].priority     = 0;
197	    dconfig->ring.queue[qindex].configured   = 1;
198	    dconfig->ring.queue[qindex].buffer_mode  =
199	        (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) ?
200	        XGE_HAL_RING_QUEUE_BUFFER_MODE_3 : lldev->buffer_mode;
201
202	    XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_max", max, qindex,
203	        XGE_DEFAULT_RING_QUEUE_MAX);
204	    XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_initial", initial,
205	        qindex, XGE_DEFAULT_RING_QUEUE_INITIAL);
206	    XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_dram_size_mb",
207	        dram_size_mb, qindex, XGE_DEFAULT_RING_QUEUE_DRAM_SIZE_MB);
208	    XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_indicate_max_pkts",
209	        indicate_max_pkts, qindex,
210	        XGE_DEFAULT_RING_QUEUE_INDICATE_MAX_PKTS);
211	    XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_backoff_interval_us",
212	        backoff_interval_us, qindex,
213	        XGE_DEFAULT_RING_QUEUE_BACKOFF_INTERVAL_US);
214
215	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_a", ufc_a,
216	        qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_A);
217	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_b", ufc_b,
218	        qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_B);
219	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_c", ufc_c,
220	        qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_C);
221	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_d", ufc_d,
222	        qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_D);
223	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_ac_en",
224	        timer_ac_en, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_AC_EN);
225	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_val_us",
226	        timer_val_us, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_VAL_US);
227	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_a",
228	        urange_a, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_A);
229	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_b",
230	        urange_b, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_B);
231	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_c",
232	        urange_c, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_C);
233	}
234
235	if(dconfig->fifo.max_frags > (PAGE_SIZE/32)) {
236	    xge_os_printf("fifo_max_frags = %d", dconfig->fifo.max_frags)
237	    xge_os_printf("fifo_max_frags should be <= (PAGE_SIZE / 32) = %d",
238	        (int)(PAGE_SIZE / 32))
239	    xge_os_printf("Using fifo_max_frags = %d", (int)(PAGE_SIZE / 32))
240	    dconfig->fifo.max_frags = (PAGE_SIZE / 32);
241	}
242
243	checkdev = pci_find_device(VENDOR_ID_AMD, DEVICE_ID_8131_PCI_BRIDGE);
244	if(checkdev != NULL) {
245	    /* Check Revision for 0x12 */
246	    revision = pci_read_config(checkdev,
247	        xge_offsetof(xge_hal_pci_config_t, revision), 1);
248	    if(revision <= 0x12) {
249	        /* Set mmrb_count to 1k and max splits = 2 */
250	        dconfig->mmrb_count       = 1;
251	        dconfig->max_splits_trans = XGE_HAL_THREE_SPLIT_TRANSACTION;
252	    }
253	}
254}
255
256/**
257 * xge_buffer_sizes_set
258 * Set buffer sizes based on Rx buffer mode
259 *
260 * @lldev Per-adapter Data
261 * @buffer_mode Rx Buffer Mode
262 */
263void
264xge_rx_buffer_sizes_set(xge_lldev_t *lldev, int buffer_mode, int mtu)
265{
266	int index = 0;
267	int frame_header = XGE_HAL_MAC_HEADER_MAX_SIZE;
268	int buffer_size = mtu + frame_header;
269
270	xge_os_memzero(lldev->rxd_mbuf_len, sizeof(lldev->rxd_mbuf_len));
271
272	if(buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
273	    lldev->rxd_mbuf_len[buffer_mode - 1] = mtu;
274
275	lldev->rxd_mbuf_len[0] = (buffer_mode == 1) ? buffer_size:frame_header;
276
277	if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
278	    lldev->rxd_mbuf_len[1] = XGE_HAL_TCPIP_HEADER_MAX_SIZE;
279
280	if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
281	    index = 2;
282	    buffer_size -= XGE_HAL_TCPIP_HEADER_MAX_SIZE;
283	    while(buffer_size > MJUMPAGESIZE) {
284	        lldev->rxd_mbuf_len[index++] = MJUMPAGESIZE;
285	        buffer_size -= MJUMPAGESIZE;
286	    }
287	    XGE_ALIGN_TO(buffer_size, 128);
288	    lldev->rxd_mbuf_len[index] = buffer_size;
289	    lldev->rxd_mbuf_cnt = index + 1;
290	}
291
292	for(index = 0; index < buffer_mode; index++)
293	    xge_trace(XGE_TRACE, "Buffer[%d] %d\n", index,
294	        lldev->rxd_mbuf_len[index]);
295}
296
297/**
298 * xge_buffer_mode_init
299 * Init Rx buffer mode
300 *
301 * @lldev Per-adapter Data
302 * @mtu Interface MTU
303 */
304void
305xge_buffer_mode_init(xge_lldev_t *lldev, int mtu)
306{
307	int index = 0, buffer_size = 0;
308	xge_hal_ring_config_t *ring_config = &((lldev->devh)->config.ring);
309
310	buffer_size = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE;
311
312	if(lldev->enabled_lro)
313	    (lldev->ifnetp)->if_capenable |= IFCAP_LRO;
314	else
315	    (lldev->ifnetp)->if_capenable &= ~IFCAP_LRO;
316
317	lldev->rxd_mbuf_cnt = lldev->buffer_mode;
318	if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
319	    XGE_SET_BUFFER_MODE_IN_RINGS(XGE_HAL_RING_QUEUE_BUFFER_MODE_3);
320	    ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_B;
321	}
322	else {
323	    XGE_SET_BUFFER_MODE_IN_RINGS(lldev->buffer_mode);
324	    ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_A;
325	}
326	xge_rx_buffer_sizes_set(lldev, lldev->buffer_mode, mtu);
327
328	xge_os_printf("%s: TSO %s", device_get_nameunit(lldev->device),
329	    ((lldev->enabled_tso) ? "Enabled":"Disabled"));
330	xge_os_printf("%s: LRO %s", device_get_nameunit(lldev->device),
331	    ((lldev->ifnetp)->if_capenable & IFCAP_LRO) ? "Enabled":"Disabled");
332	xge_os_printf("%s: Rx %d Buffer Mode Enabled",
333	    device_get_nameunit(lldev->device), lldev->buffer_mode);
334}
335
336/**
337 * xge_driver_initialize
338 * Initializes HAL driver (common for all devices)
339 *
340 * Returns
341 * XGE_HAL_OK if success
342 * XGE_HAL_ERR_BAD_DRIVER_CONFIG if driver configuration parameters are invalid
343 */
344int
345xge_driver_initialize(void)
346{
347	xge_hal_uld_cbs_t       uld_callbacks;
348	xge_hal_driver_config_t driver_config;
349	xge_hal_status_e        status = XGE_HAL_OK;
350
351	/* Initialize HAL driver */
352	if(!hal_driver_init_count) {
353	    xge_os_memzero(&uld_callbacks, sizeof(xge_hal_uld_cbs_t));
354	    xge_os_memzero(&driver_config, sizeof(xge_hal_driver_config_t));
355
356	    /*
357	     * Initial and maximum size of the queue used to store the events
358	     * like Link up/down (xge_hal_event_e)
359	     */
360	    driver_config.queue_size_initial = XGE_HAL_MIN_QUEUE_SIZE_INITIAL;
361	    driver_config.queue_size_max     = XGE_HAL_MAX_QUEUE_SIZE_MAX;
362
363	    uld_callbacks.link_up   = xge_callback_link_up;
364	    uld_callbacks.link_down = xge_callback_link_down;
365	    uld_callbacks.crit_err  = xge_callback_crit_err;
366	    uld_callbacks.event     = xge_callback_event;
367
368	    status = xge_hal_driver_initialize(&driver_config, &uld_callbacks);
369	    if(status != XGE_HAL_OK) {
370	        XGE_EXIT_ON_ERR("xgeX: Initialization of HAL driver failed",
371	            xdi_out, status);
372	    }
373	}
374	hal_driver_init_count = hal_driver_init_count + 1;
375
376	xge_hal_driver_debug_module_mask_set(0xffffffff);
377	xge_hal_driver_debug_level_set(XGE_TRACE);
378
379xdi_out:
380	return status;
381}
382
383/**
384 * xge_media_init
385 * Initializes, adds and sets media
386 *
387 * @devc Device Handle
388 */
389void
390xge_media_init(device_t devc)
391{
392	xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(devc);
393
394	/* Initialize Media */
395	ifmedia_init(&lldev->media, IFM_IMASK, xge_ifmedia_change,
396	    xge_ifmedia_status);
397
398	/* Add supported media */
399	ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL);
400	ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
401	ifmedia_add(&lldev->media, IFM_ETHER | IFM_AUTO,    0, NULL);
402	ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_SR,  0, NULL);
403	ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_LR,  0, NULL);
404
405	/* Set media */
406	ifmedia_set(&lldev->media, IFM_ETHER | IFM_AUTO);
407}
408
409/**
410 * xge_pci_space_save
411 * Save PCI configuration space
412 *
413 * @dev Device Handle
414 */
415void
416xge_pci_space_save(device_t dev)
417{
418	struct pci_devinfo *dinfo = NULL;
419
420	dinfo = device_get_ivars(dev);
421	xge_trace(XGE_TRACE, "Saving PCI configuration space");
422	pci_cfg_save(dev, dinfo, 0);
423}
424
425/**
426 * xge_pci_space_restore
427 * Restore saved PCI configuration space
428 *
429 * @dev Device Handle
430 */
431void
432xge_pci_space_restore(device_t dev)
433{
434	struct pci_devinfo *dinfo = NULL;
435
436	dinfo = device_get_ivars(dev);
437	xge_trace(XGE_TRACE, "Restoring PCI configuration space");
438	pci_cfg_restore(dev, dinfo);
439}
440
441/**
442 * xge_msi_info_save
443 * Save MSI info
444 *
445 * @lldev Per-adapter Data
446 */
447void
448xge_msi_info_save(xge_lldev_t * lldev)
449{
450	xge_os_pci_read16(lldev->pdev, NULL,
451	    xge_offsetof(xge_hal_pci_config_le_t, msi_control),
452	    &lldev->msi_info.msi_control);
453	xge_os_pci_read32(lldev->pdev, NULL,
454	    xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address),
455	    &lldev->msi_info.msi_lower_address);
456	xge_os_pci_read32(lldev->pdev, NULL,
457	    xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address),
458	    &lldev->msi_info.msi_higher_address);
459	xge_os_pci_read16(lldev->pdev, NULL,
460	    xge_offsetof(xge_hal_pci_config_le_t, msi_data),
461	    &lldev->msi_info.msi_data);
462}
463
464/**
465 * xge_msi_info_restore
466 * Restore saved MSI info
467 *
468 * @dev Device Handle
469 */
470void
471xge_msi_info_restore(xge_lldev_t *lldev)
472{
473	/*
474	 * If interface is made down and up, traffic fails. It was observed that
475	 * MSI information were getting reset on down. Restoring them.
476	 */
477	xge_os_pci_write16(lldev->pdev, NULL,
478	    xge_offsetof(xge_hal_pci_config_le_t, msi_control),
479	    lldev->msi_info.msi_control);
480
481	xge_os_pci_write32(lldev->pdev, NULL,
482	    xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address),
483	    lldev->msi_info.msi_lower_address);
484
485	xge_os_pci_write32(lldev->pdev, NULL,
486	    xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address),
487	    lldev->msi_info.msi_higher_address);
488
489	xge_os_pci_write16(lldev->pdev, NULL,
490	    xge_offsetof(xge_hal_pci_config_le_t, msi_data),
491	    lldev->msi_info.msi_data);
492}
493
494/**
495 * xge_init_mutex
496 * Initializes mutexes used in driver
497 *
498 * @lldev  Per-adapter Data
499 */
500void
501xge_mutex_init(xge_lldev_t *lldev)
502{
503	int qindex;
504
505	sprintf(lldev->mtx_name_drv, "%s_drv",
506	    device_get_nameunit(lldev->device));
507	mtx_init(&lldev->mtx_drv, lldev->mtx_name_drv, MTX_NETWORK_LOCK,
508	    MTX_DEF);
509
510	for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
511	    sprintf(lldev->mtx_name_tx[qindex], "%s_tx_%d",
512	        device_get_nameunit(lldev->device), qindex);
513	    mtx_init(&lldev->mtx_tx[qindex], lldev->mtx_name_tx[qindex], NULL,
514	        MTX_DEF);
515	}
516}
517
518/**
519 * xge_mutex_destroy
520 * Destroys mutexes used in driver
521 *
522 * @lldev Per-adapter Data
523 */
524void
525xge_mutex_destroy(xge_lldev_t *lldev)
526{
527	int qindex;
528
529	for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++)
530	    mtx_destroy(&lldev->mtx_tx[qindex]);
531	mtx_destroy(&lldev->mtx_drv);
532}
533
534/**
535 * xge_print_info
536 * Print device and driver information
537 *
538 * @lldev Per-adapter Data
539 */
540void
541xge_print_info(xge_lldev_t *lldev)
542{
543	device_t dev = lldev->device;
544	xge_hal_device_t *hldev = lldev->devh;
545	xge_hal_status_e status = XGE_HAL_OK;
546	u64 val64 = 0;
547	const char *xge_pci_bus_speeds[17] = {
548	    "PCI 33MHz Bus",
549	    "PCI 66MHz Bus",
550	    "PCIX(M1) 66MHz Bus",
551	    "PCIX(M1) 100MHz Bus",
552	    "PCIX(M1) 133MHz Bus",
553	    "PCIX(M2) 133MHz Bus",
554	    "PCIX(M2) 200MHz Bus",
555	    "PCIX(M2) 266MHz Bus",
556	    "PCIX(M1) Reserved",
557	    "PCIX(M1) 66MHz Bus (Not Supported)",
558	    "PCIX(M1) 100MHz Bus (Not Supported)",
559	    "PCIX(M1) 133MHz Bus (Not Supported)",
560	    "PCIX(M2) Reserved",
561	    "PCIX 533 Reserved",
562	    "PCI Basic Mode",
563	    "PCIX Basic Mode",
564	    "PCI Invalid Mode"
565	};
566
567	xge_os_printf("%s: Xframe%s %s Revision %d Driver v%s",
568	    device_get_nameunit(dev),
569	    ((hldev->device_id == XGE_PCI_DEVICE_ID_XENA_2) ? "I" : "II"),
570	    hldev->vpd_data.product_name, hldev->revision, XGE_DRIVER_VERSION);
571	xge_os_printf("%s: Serial Number %s",
572	    device_get_nameunit(dev), hldev->vpd_data.serial_num);
573
574	if(pci_get_device(dev) == XGE_PCI_DEVICE_ID_HERC_2) {
575	    status = xge_hal_mgmt_reg_read(hldev, 0,
576	        xge_offsetof(xge_hal_pci_bar0_t, pci_info), &val64);
577	    if(status != XGE_HAL_OK)
578	        xge_trace(XGE_ERR, "Error for getting bus speed");
579
580	    xge_os_printf("%s: Adapter is on %s bit %s",
581	        device_get_nameunit(dev), ((val64 & BIT(8)) ? "32":"64"),
582	        (xge_pci_bus_speeds[((val64 & XGE_HAL_PCI_INFO) >> 60)]));
583	}
584
585	xge_os_printf("%s: Using %s Interrupts",
586	    device_get_nameunit(dev),
587	    (lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) ? "MSI":"Line");
588}
589
590/**
591 * xge_create_dma_tags
592 * Creates DMA tags for both Tx and Rx
593 *
594 * @dev Device Handle
595 *
596 * Returns XGE_HAL_OK or XGE_HAL_FAIL (if errors)
597 */
598xge_hal_status_e
599xge_create_dma_tags(device_t dev)
600{
601	xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
602	xge_hal_status_e status = XGE_HAL_FAIL;
603	int mtu = (lldev->ifnetp)->if_mtu, maxsize;
604
605	/* DMA tag for Tx */
606	status = bus_dma_tag_create(
607	    bus_get_dma_tag(dev),                /* Parent                    */
608	    PAGE_SIZE,                           /* Alignment                 */
609	    0,                                   /* Bounds                    */
610	    BUS_SPACE_MAXADDR,                   /* Low Address               */
611	    BUS_SPACE_MAXADDR,                   /* High Address              */
612	    NULL,                                /* Filter Function           */
613	    NULL,                                /* Filter Function Arguments */
614	    MCLBYTES * XGE_MAX_SEGS,             /* Maximum Size              */
615	    XGE_MAX_SEGS,                        /* Number of Segments        */
616	    MCLBYTES,                            /* Maximum Segment Size      */
617	    BUS_DMA_ALLOCNOW,                    /* Flags                     */
618	    NULL,                                /* Lock Function             */
619	    NULL,                                /* Lock Function Arguments   */
620	    (&lldev->dma_tag_tx));               /* DMA Tag                   */
621	if(status != 0)
622	    goto _exit;
623
624	maxsize = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE;
625	if(maxsize <= MCLBYTES) {
626	    maxsize = MCLBYTES;
627	}
628	else {
629	    if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
630	        maxsize = MJUMPAGESIZE;
631	    else
632	        maxsize = (maxsize <= MJUMPAGESIZE) ? MJUMPAGESIZE : MJUM9BYTES;
633	}
634
635	/* DMA tag for Rx */
636	status = bus_dma_tag_create(
637	    bus_get_dma_tag(dev),                /* Parent                    */
638	    PAGE_SIZE,                           /* Alignment                 */
639	    0,                                   /* Bounds                    */
640	    BUS_SPACE_MAXADDR,                   /* Low Address               */
641	    BUS_SPACE_MAXADDR,                   /* High Address              */
642	    NULL,                                /* Filter Function           */
643	    NULL,                                /* Filter Function Arguments */
644	    maxsize,                             /* Maximum Size              */
645	    1,                                   /* Number of Segments        */
646	    maxsize,                             /* Maximum Segment Size      */
647	    BUS_DMA_ALLOCNOW,                    /* Flags                     */
648	    NULL,                                /* Lock Function             */
649	    NULL,                                /* Lock Function Arguments   */
650	    (&lldev->dma_tag_rx));               /* DMA Tag                   */
651	if(status != 0)
652	    goto _exit1;
653
654	status = bus_dmamap_create(lldev->dma_tag_rx, BUS_DMA_NOWAIT,
655	    &lldev->extra_dma_map);
656	if(status != 0)
657	    goto _exit2;
658
659	status = XGE_HAL_OK;
660	goto _exit;
661
662_exit2:
663	status = bus_dma_tag_destroy(lldev->dma_tag_rx);
664	if(status != 0)
665	    xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
666_exit1:
667	status = bus_dma_tag_destroy(lldev->dma_tag_tx);
668	if(status != 0)
669	    xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
670	status = XGE_HAL_FAIL;
671_exit:
672	return status;
673}
674
675/**
676 * xge_confirm_changes
677 * Disables and Enables interface to apply requested change
678 *
679 * @lldev Per-adapter Data
680 * @mtu_set Is it called for changing MTU? (Yes: 1, No: 0)
681 *
682 * Returns 0 or Error Number
683 */
684void
685xge_confirm_changes(xge_lldev_t *lldev, xge_option_e option)
686{
687	if(lldev->initialized == 0) goto _exit1;
688
689	mtx_lock(&lldev->mtx_drv);
690	if_down(lldev->ifnetp);
691	xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
692
693	if(option == XGE_SET_MTU)
694	    (lldev->ifnetp)->if_mtu = lldev->mtu;
695	else
696	    xge_buffer_mode_init(lldev, lldev->mtu);
697
698	xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
699	if_up(lldev->ifnetp);
700	mtx_unlock(&lldev->mtx_drv);
701	goto _exit;
702
703_exit1:
704	/* Request was to change MTU and device not initialized */
705	if(option == XGE_SET_MTU) {
706	    (lldev->ifnetp)->if_mtu = lldev->mtu;
707	    xge_buffer_mode_init(lldev, lldev->mtu);
708	}
709_exit:
710	return;
711}
712
713/**
714 * xge_change_lro_status
715 * Enable/Disable LRO feature
716 *
717 * @SYSCTL_HANDLER_ARGS sysctl_oid structure with arguments
718 *
719 * Returns 0 or error number.
720 */
721static int
722xge_change_lro_status(SYSCTL_HANDLER_ARGS)
723{
724	xge_lldev_t *lldev = (xge_lldev_t *)arg1;
725	int request = lldev->enabled_lro, status = XGE_HAL_OK;
726
727	status = sysctl_handle_int(oidp, &request, arg2, req);
728	if((status != XGE_HAL_OK) || (!req->newptr))
729	    goto _exit;
730
731	if((request < 0) || (request > 1)) {
732	    status = EINVAL;
733	    goto _exit;
734	}
735
736	/* Return if current and requested states are same */
737	if(request == lldev->enabled_lro){
738	    xge_trace(XGE_ERR, "LRO is already %s",
739	        ((request) ? "enabled" : "disabled"));
740	    goto _exit;
741	}
742
743	lldev->enabled_lro = request;
744	xge_confirm_changes(lldev, XGE_CHANGE_LRO);
745	arg2 = lldev->enabled_lro;
746
747_exit:
748	return status;
749}
750
751/**
752 * xge_add_sysctl_handlers
753 * Registers sysctl parameter value update handlers
754 *
755 * @lldev Per-adapter data
756 */
757void
758xge_add_sysctl_handlers(xge_lldev_t *lldev)
759{
760	struct sysctl_ctx_list *context_list =
761	    device_get_sysctl_ctx(lldev->device);
762	struct sysctl_oid *oid = device_get_sysctl_tree(lldev->device);
763
764	SYSCTL_ADD_PROC(context_list, SYSCTL_CHILDREN(oid), OID_AUTO,
765	    "enable_lro", CTLTYPE_INT | CTLFLAG_RW, lldev, 0,
766	    xge_change_lro_status, "I", "Enable or disable LRO feature");
767}
768
769/**
770 * xge_attach
771 * Connects driver to the system if probe was success
772 *
773 * @dev Device Handle
774 */
775int
776xge_attach(device_t dev)
777{
778	xge_hal_device_config_t *device_config;
779	xge_hal_device_attr_t   attr;
780	xge_lldev_t             *lldev;
781	xge_hal_device_t        *hldev;
782	xge_pci_info_t          *pci_info;
783	struct ifnet            *ifnetp;
784	int                     rid, rid0, rid1, error;
785	int                     msi_count = 0, status = XGE_HAL_OK;
786	int                     enable_msi = XGE_HAL_INTR_MODE_IRQLINE;
787
788	device_config = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t));
789	if(!device_config) {
790	    XGE_EXIT_ON_ERR("Memory allocation for device configuration failed",
791	        attach_out_config, ENOMEM);
792	}
793
794	lldev = (xge_lldev_t *) device_get_softc(dev);
795	if(!lldev) {
796	    XGE_EXIT_ON_ERR("Adapter softc is NULL", attach_out, ENOMEM);
797	}
798	lldev->device = dev;
799
800	xge_mutex_init(lldev);
801
802	error = xge_driver_initialize();
803	if(error != XGE_HAL_OK) {
804	    xge_resources_free(dev, xge_free_mutex);
805	    XGE_EXIT_ON_ERR("Initializing driver failed", attach_out, ENXIO);
806	}
807
808	/* HAL device */
809	hldev =
810	    (xge_hal_device_t *)xge_os_malloc(NULL, sizeof(xge_hal_device_t));
811	if(!hldev) {
812	    xge_resources_free(dev, xge_free_terminate_hal_driver);
813	    XGE_EXIT_ON_ERR("Memory allocation for HAL device failed",
814	        attach_out, ENOMEM);
815	}
816	lldev->devh = hldev;
817
818	/* Our private structure */
819	pci_info =
820	    (xge_pci_info_t*) xge_os_malloc(NULL, sizeof(xge_pci_info_t));
821	if(!pci_info) {
822	    xge_resources_free(dev, xge_free_hal_device);
823	    XGE_EXIT_ON_ERR("Memory allocation for PCI info. failed",
824	        attach_out, ENOMEM);
825	}
826	lldev->pdev      = pci_info;
827	pci_info->device = dev;
828
829	/* Set bus master */
830	pci_enable_busmaster(dev);
831
832	/* Get virtual address for BAR0 */
833	rid0 = PCIR_BAR(0);
834	pci_info->regmap0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid0,
835	    RF_ACTIVE);
836	if(pci_info->regmap0 == NULL) {
837	    xge_resources_free(dev, xge_free_pci_info);
838	    XGE_EXIT_ON_ERR("Bus resource allocation for BAR0 failed",
839	        attach_out, ENOMEM);
840	}
841	attr.bar0 = (char *)pci_info->regmap0;
842
843	pci_info->bar0resource = (xge_bus_resource_t*)
844	    xge_os_malloc(NULL, sizeof(xge_bus_resource_t));
845	if(pci_info->bar0resource == NULL) {
846	    xge_resources_free(dev, xge_free_bar0);
847	    XGE_EXIT_ON_ERR("Memory allocation for BAR0 Resources failed",
848	        attach_out, ENOMEM);
849	}
850	((xge_bus_resource_t *)(pci_info->bar0resource))->bus_tag =
851	    rman_get_bustag(pci_info->regmap0);
852	((xge_bus_resource_t *)(pci_info->bar0resource))->bus_handle =
853	    rman_get_bushandle(pci_info->regmap0);
854	((xge_bus_resource_t *)(pci_info->bar0resource))->bar_start_addr =
855	    pci_info->regmap0;
856
857	/* Get virtual address for BAR1 */
858	rid1 = PCIR_BAR(2);
859	pci_info->regmap1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid1,
860	    RF_ACTIVE);
861	if(pci_info->regmap1 == NULL) {
862	    xge_resources_free(dev, xge_free_bar0_resource);
863	    XGE_EXIT_ON_ERR("Bus resource allocation for BAR1 failed",
864	        attach_out, ENOMEM);
865	}
866	attr.bar1 = (char *)pci_info->regmap1;
867
868	pci_info->bar1resource = (xge_bus_resource_t*)
869	    xge_os_malloc(NULL, sizeof(xge_bus_resource_t));
870	if(pci_info->bar1resource == NULL) {
871	    xge_resources_free(dev, xge_free_bar1);
872	    XGE_EXIT_ON_ERR("Memory allocation for BAR1 Resources failed",
873	        attach_out, ENOMEM);
874	}
875	((xge_bus_resource_t *)(pci_info->bar1resource))->bus_tag =
876	    rman_get_bustag(pci_info->regmap1);
877	((xge_bus_resource_t *)(pci_info->bar1resource))->bus_handle =
878	    rman_get_bushandle(pci_info->regmap1);
879	((xge_bus_resource_t *)(pci_info->bar1resource))->bar_start_addr =
880	    pci_info->regmap1;
881
882	/* Save PCI config space */
883	xge_pci_space_save(dev);
884
885	attr.regh0 = (xge_bus_resource_t *) pci_info->bar0resource;
886	attr.regh1 = (xge_bus_resource_t *) pci_info->bar1resource;
887	attr.irqh  = lldev->irqhandle;
888	attr.cfgh  = pci_info;
889	attr.pdev  = pci_info;
890
891	/* Initialize device configuration parameters */
892	xge_init_params(device_config, dev);
893
894	rid = 0;
895	if(lldev->enabled_msi) {
896	    /* Number of MSI messages supported by device */
897	    msi_count = pci_msi_count(dev);
898	    if(msi_count > 1) {
899	        /* Device supports MSI */
900	        if(bootverbose) {
901	            xge_trace(XGE_ERR, "MSI count: %d", msi_count);
902	            xge_trace(XGE_ERR, "Now, driver supporting 1 message");
903	        }
904	        msi_count = 1;
905	        error = pci_alloc_msi(dev, &msi_count);
906	        if(error == 0) {
907	            if(bootverbose)
908	                xge_trace(XGE_ERR, "Allocated messages: %d", msi_count);
909	            enable_msi = XGE_HAL_INTR_MODE_MSI;
910	            rid = 1;
911	        }
912	        else {
913	            if(bootverbose)
914	                xge_trace(XGE_ERR, "pci_alloc_msi failed, %d", error);
915	        }
916	    }
917	}
918	lldev->enabled_msi = enable_msi;
919
920	/* Allocate resource for irq */
921	lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
922	    (RF_SHAREABLE | RF_ACTIVE));
923	if(lldev->irq == NULL) {
924	    xge_trace(XGE_ERR, "Allocating irq resource for %s failed",
925	        ((rid == 0) ? "line interrupt" : "MSI"));
926	    if(rid == 1) {
927	        error = pci_release_msi(dev);
928	        if(error != 0) {
929	            xge_trace(XGE_ERR, "Releasing MSI resources failed %d",
930	                error);
931	            xge_trace(XGE_ERR, "Requires reboot to use MSI again");
932	        }
933	        xge_trace(XGE_ERR, "Trying line interrupts");
934	        rid = 0;
935	        lldev->enabled_msi = XGE_HAL_INTR_MODE_IRQLINE;
936	        lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
937	            (RF_SHAREABLE | RF_ACTIVE));
938	    }
939	    if(lldev->irq == NULL) {
940	        xge_trace(XGE_ERR, "Allocating irq resource failed");
941	        xge_resources_free(dev, xge_free_bar1_resource);
942	        status = ENOMEM;
943	        goto attach_out;
944	    }
945	}
946
947	device_config->intr_mode = lldev->enabled_msi;
948	if(bootverbose) {
949	    xge_trace(XGE_TRACE, "rid: %d, Mode: %d, MSI count: %d", rid,
950	        lldev->enabled_msi, msi_count);
951	}
952
953	/* Initialize HAL device */
954	error = xge_hal_device_initialize(hldev, &attr, device_config);
955	if(error != XGE_HAL_OK) {
956	    xge_resources_free(dev, xge_free_irq_resource);
957	    XGE_EXIT_ON_ERR("Initializing HAL device failed", attach_out,
958	        ENXIO);
959	}
960
961	xge_hal_device_private_set(hldev, lldev);
962
963	error = xge_interface_setup(dev);
964	if(error != 0) {
965	    status = error;
966	    goto attach_out;
967	}
968
969	ifnetp         = lldev->ifnetp;
970	ifnetp->if_mtu = device_config->mtu;
971
972	xge_media_init(dev);
973
974	/* Associate interrupt handler with the device */
975	if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
976	    error = bus_setup_intr(dev, lldev->irq,
977	        (INTR_TYPE_NET | INTR_MPSAFE),
978#if __FreeBSD_version > 700030
979	        NULL,
980#endif
981	        xge_isr_msi, lldev, &lldev->irqhandle);
982	    xge_msi_info_save(lldev);
983	}
984	else {
985	    error = bus_setup_intr(dev, lldev->irq,
986	        (INTR_TYPE_NET | INTR_MPSAFE),
987#if __FreeBSD_version > 700030
988	        xge_isr_filter,
989#endif
990	        xge_isr_line, lldev, &lldev->irqhandle);
991	}
992	if(error != 0) {
993	    xge_resources_free(dev, xge_free_media_interface);
994	    XGE_EXIT_ON_ERR("Associating interrupt handler with device failed",
995	        attach_out, ENXIO);
996	}
997
998	xge_print_info(lldev);
999
1000	xge_add_sysctl_handlers(lldev);
1001
1002	xge_buffer_mode_init(lldev, device_config->mtu);
1003
1004attach_out:
1005	xge_os_free(NULL, device_config, sizeof(xge_hal_device_config_t));
1006attach_out_config:
1007	return status;
1008}
1009
1010/**
1011 * xge_resources_free
1012 * Undo what-all we did during load/attach
1013 *
1014 * @dev Device Handle
1015 * @error Identifies what-all to undo
1016 */
1017void
1018xge_resources_free(device_t dev, xge_lables_e error)
1019{
1020	xge_lldev_t *lldev;
1021	xge_pci_info_t *pci_info;
1022	xge_hal_device_t *hldev;
1023	int rid, status;
1024
1025	/* LL Device */
1026	lldev = (xge_lldev_t *) device_get_softc(dev);
1027	pci_info = lldev->pdev;
1028
1029	/* HAL Device */
1030	hldev = lldev->devh;
1031
1032	switch(error) {
1033	    case xge_free_all:
1034	        /* Teardown interrupt handler - device association */
1035	        bus_teardown_intr(dev, lldev->irq, lldev->irqhandle);
1036
1037	    case xge_free_media_interface:
1038	        /* Media */
1039	        ifmedia_removeall(&lldev->media);
1040
1041	        /* Detach Ether */
1042	        ether_ifdetach(lldev->ifnetp);
1043	        if_free(lldev->ifnetp);
1044
1045	        xge_hal_device_private_set(hldev, NULL);
1046	        xge_hal_device_disable(hldev);
1047
1048	    case xge_free_terminate_hal_device:
1049	        /* HAL Device */
1050	        xge_hal_device_terminate(hldev);
1051
1052	    case xge_free_irq_resource:
1053	        /* Release IRQ resource */
1054	        bus_release_resource(dev, SYS_RES_IRQ,
1055	            ((lldev->enabled_msi == XGE_HAL_INTR_MODE_IRQLINE) ? 0:1),
1056	            lldev->irq);
1057
1058	        if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
1059	            status = pci_release_msi(dev);
1060	            if(status != 0) {
1061	                if(bootverbose) {
1062	                    xge_trace(XGE_ERR,
1063	                        "pci_release_msi returned %d", status);
1064	                }
1065	            }
1066	        }
1067
1068	    case xge_free_bar1_resource:
1069	        /* Restore PCI configuration space */
1070	        xge_pci_space_restore(dev);
1071
1072	        /* Free bar1resource */
1073	        xge_os_free(NULL, pci_info->bar1resource,
1074	            sizeof(xge_bus_resource_t));
1075
1076	    case xge_free_bar1:
1077	        /* Release BAR1 */
1078	        rid = PCIR_BAR(2);
1079	        bus_release_resource(dev, SYS_RES_MEMORY, rid,
1080	            pci_info->regmap1);
1081
1082	    case xge_free_bar0_resource:
1083	        /* Free bar0resource */
1084	        xge_os_free(NULL, pci_info->bar0resource,
1085	            sizeof(xge_bus_resource_t));
1086
1087	    case xge_free_bar0:
1088	        /* Release BAR0 */
1089	        rid = PCIR_BAR(0);
1090	        bus_release_resource(dev, SYS_RES_MEMORY, rid,
1091	            pci_info->regmap0);
1092
1093	    case xge_free_pci_info:
1094	        /* Disable Bus Master */
1095	        pci_disable_busmaster(dev);
1096
1097	        /* Free pci_info_t */
1098	        lldev->pdev = NULL;
1099	        xge_os_free(NULL, pci_info, sizeof(xge_pci_info_t));
1100
1101	    case xge_free_hal_device:
1102	        /* Free device configuration struct and HAL device */
1103	        xge_os_free(NULL, hldev, sizeof(xge_hal_device_t));
1104
1105	    case xge_free_terminate_hal_driver:
1106	        /* Terminate HAL driver */
1107	        hal_driver_init_count = hal_driver_init_count - 1;
1108	        if(!hal_driver_init_count) {
1109	            xge_hal_driver_terminate();
1110	        }
1111
1112	    case xge_free_mutex:
1113	        xge_mutex_destroy(lldev);
1114	}
1115}
1116
1117/**
1118 * xge_detach
1119 * Detaches driver from the Kernel subsystem
1120 *
1121 * @dev Device Handle
1122 */
1123int
1124xge_detach(device_t dev)
1125{
1126	xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
1127
1128	if(lldev->in_detach == 0) {
1129	    lldev->in_detach = 1;
1130	    xge_stop(lldev);
1131	    xge_resources_free(dev, xge_free_all);
1132	}
1133
1134	return 0;
1135}
1136
1137/**
1138 * xge_shutdown
1139 * To shutdown device before system shutdown
1140 *
1141 * @dev Device Handle
1142 */
1143int
1144xge_shutdown(device_t dev)
1145{
1146	xge_lldev_t *lldev = (xge_lldev_t *) device_get_softc(dev);
1147	xge_stop(lldev);
1148
1149	return 0;
1150}
1151
1152/**
1153 * xge_interface_setup
1154 * Setup interface
1155 *
1156 * @dev Device Handle
1157 *
1158 * Returns 0 on success, ENXIO/ENOMEM on failure
1159 */
1160int
1161xge_interface_setup(device_t dev)
1162{
1163	u8 mcaddr[ETHER_ADDR_LEN];
1164	xge_hal_status_e status;
1165	xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
1166	struct ifnet *ifnetp;
1167	xge_hal_device_t *hldev = lldev->devh;
1168
1169	/* Get the MAC address of the device */
1170	status = xge_hal_device_macaddr_get(hldev, 0, &mcaddr);
1171	if(status != XGE_HAL_OK) {
1172	    xge_resources_free(dev, xge_free_terminate_hal_device);
1173	    XGE_EXIT_ON_ERR("Getting MAC address failed", ifsetup_out, ENXIO);
1174	}
1175
1176	/* Get interface ifnet structure for this Ether device */
1177	ifnetp = lldev->ifnetp = if_alloc(IFT_ETHER);
1178	if(ifnetp == NULL) {
1179	    xge_resources_free(dev, xge_free_terminate_hal_device);
1180	    XGE_EXIT_ON_ERR("Allocation ifnet failed", ifsetup_out, ENOMEM);
1181	}
1182
1183	/* Initialize interface ifnet structure */
1184	if_initname(ifnetp, device_get_name(dev), device_get_unit(dev));
1185	ifnetp->if_mtu      = XGE_HAL_DEFAULT_MTU;
1186	ifnetp->if_baudrate = XGE_BAUDRATE;
1187	ifnetp->if_init     = xge_init;
1188	ifnetp->if_softc    = lldev;
1189	ifnetp->if_flags    = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1190	ifnetp->if_ioctl    = xge_ioctl;
1191	ifnetp->if_start    = xge_send;
1192
1193	/* TODO: Check and assign optimal value */
1194	ifnetp->if_snd.ifq_maxlen = ifqmaxlen;
1195
1196	ifnetp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU |
1197	    IFCAP_HWCSUM;
1198	if(lldev->enabled_tso)
1199	    ifnetp->if_capabilities |= IFCAP_TSO4;
1200	if(lldev->enabled_lro)
1201	    ifnetp->if_capabilities |= IFCAP_LRO;
1202
1203	ifnetp->if_capenable = ifnetp->if_capabilities;
1204
1205	/* Attach the interface */
1206	ether_ifattach(ifnetp, mcaddr);
1207
1208ifsetup_out:
1209	return status;
1210}
1211
1212/**
1213 * xge_callback_link_up
1214 * Callback for Link-up indication from HAL
1215 *
1216 * @userdata Per-adapter data
1217 */
1218void
1219xge_callback_link_up(void *userdata)
1220{
1221	xge_lldev_t  *lldev  = (xge_lldev_t *)userdata;
1222	struct ifnet *ifnetp = lldev->ifnetp;
1223
1224	ifnetp->if_flags  &= ~IFF_DRV_OACTIVE;
1225	if_link_state_change(ifnetp, LINK_STATE_UP);
1226}
1227
1228/**
1229 * xge_callback_link_down
1230 * Callback for Link-down indication from HAL
1231 *
1232 * @userdata Per-adapter data
1233 */
1234void
1235xge_callback_link_down(void *userdata)
1236{
1237	xge_lldev_t  *lldev  = (xge_lldev_t *)userdata;
1238	struct ifnet *ifnetp = lldev->ifnetp;
1239
1240	ifnetp->if_flags  |= IFF_DRV_OACTIVE;
1241	if_link_state_change(ifnetp, LINK_STATE_DOWN);
1242}
1243
1244/**
1245 * xge_callback_crit_err
1246 * Callback for Critical error indication from HAL
1247 *
1248 * @userdata Per-adapter data
1249 * @type Event type (Enumerated hardware error)
1250 * @serr_data Hardware status
1251 */
1252void
1253xge_callback_crit_err(void *userdata, xge_hal_event_e type, u64 serr_data)
1254{
1255	xge_trace(XGE_ERR, "Critical Error");
1256	xge_reset(userdata);
1257}
1258
1259/**
1260 * xge_callback_event
1261 * Callback from HAL indicating that some event has been queued
1262 *
1263 * @item Queued event item
1264 */
1265void
1266xge_callback_event(xge_queue_item_t *item)
1267{
1268	xge_lldev_t      *lldev  = NULL;
1269	xge_hal_device_t *hldev  = NULL;
1270	struct ifnet     *ifnetp = NULL;
1271
1272	hldev  = item->context;
1273	lldev  = xge_hal_device_private(hldev);
1274	ifnetp = lldev->ifnetp;
1275
1276	switch((int)item->event_type) {
1277	    case XGE_LL_EVENT_TRY_XMIT_AGAIN:
1278	        if(lldev->initialized) {
1279	            if(xge_hal_channel_dtr_count(lldev->fifo_channel[0]) > 0) {
1280	                ifnetp->if_flags  &= ~IFF_DRV_OACTIVE;
1281	            }
1282	            else {
1283	                xge_queue_produce_context(
1284	                    xge_hal_device_queue(lldev->devh),
1285	                    XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh);
1286	            }
1287	        }
1288	        break;
1289
1290	    case XGE_LL_EVENT_DEVICE_RESETTING:
1291	        xge_reset(item->context);
1292	        break;
1293
1294	    default:
1295	        break;
1296	}
1297}
1298
1299/**
1300 * xge_ifmedia_change
1301 * Media change driver callback
1302 *
1303 * @ifnetp Interface Handle
1304 *
1305 * Returns 0 if media is Ether else EINVAL
1306 */
1307int
1308xge_ifmedia_change(struct ifnet *ifnetp)
1309{
1310	xge_lldev_t    *lldev    = ifnetp->if_softc;
1311	struct ifmedia *ifmediap = &lldev->media;
1312
1313	return (IFM_TYPE(ifmediap->ifm_media) != IFM_ETHER) ?  EINVAL:0;
1314}
1315
1316/**
1317 * xge_ifmedia_status
1318 * Media status driver callback
1319 *
1320 * @ifnetp Interface Handle
1321 * @ifmr Interface Media Settings
1322 */
1323void
1324xge_ifmedia_status(struct ifnet *ifnetp, struct ifmediareq *ifmr)
1325{
1326	xge_hal_status_e status;
1327	u64              regvalue;
1328	xge_lldev_t      *lldev = ifnetp->if_softc;
1329	xge_hal_device_t *hldev = lldev->devh;
1330
1331	ifmr->ifm_status = IFM_AVALID;
1332	ifmr->ifm_active = IFM_ETHER;
1333
1334	status = xge_hal_mgmt_reg_read(hldev, 0,
1335	    xge_offsetof(xge_hal_pci_bar0_t, adapter_status), &regvalue);
1336	if(status != XGE_HAL_OK) {
1337	    xge_trace(XGE_TRACE, "Getting adapter status failed");
1338	    goto _exit;
1339	}
1340
1341	if((regvalue & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
1342	    XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) == 0) {
1343	    ifmr->ifm_status |= IFM_ACTIVE;
1344	    ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1345	    if_link_state_change(ifnetp, LINK_STATE_UP);
1346	}
1347	else {
1348	    if_link_state_change(ifnetp, LINK_STATE_DOWN);
1349	}
1350_exit:
1351	return;
1352}
1353
1354/**
1355 * xge_ioctl_stats
1356 * IOCTL to get statistics
1357 *
1358 * @lldev Per-adapter data
1359 * @ifreqp Interface request
1360 */
1361int
1362xge_ioctl_stats(xge_lldev_t *lldev, struct ifreq *ifreqp)
1363{
1364	xge_hal_status_e status = XGE_HAL_OK;
1365	char *data = (char *)ifreqp->ifr_data;
1366	void *info = NULL;
1367	int retValue = EINVAL;
1368
1369	switch(*data) {
1370	    case XGE_QUERY_STATS:
1371	        mtx_lock(&lldev->mtx_drv);
1372	        status = xge_hal_stats_hw(lldev->devh,
1373	            (xge_hal_stats_hw_info_t **)&info);
1374	        mtx_unlock(&lldev->mtx_drv);
1375	        if(status == XGE_HAL_OK) {
1376	            if(copyout(info, ifreqp->ifr_data,
1377	                sizeof(xge_hal_stats_hw_info_t)) == 0)
1378	                retValue = 0;
1379	        }
1380	        else {
1381	            xge_trace(XGE_ERR, "Getting statistics failed (Status: %d)",
1382	                status);
1383	        }
1384	        break;
1385
1386	    case XGE_QUERY_PCICONF:
1387	        info = xge_os_malloc(NULL, sizeof(xge_hal_pci_config_t));
1388	        if(info != NULL) {
1389	            mtx_lock(&lldev->mtx_drv);
1390	            status = xge_hal_mgmt_pci_config(lldev->devh, info,
1391	                sizeof(xge_hal_pci_config_t));
1392	            mtx_unlock(&lldev->mtx_drv);
1393	            if(status == XGE_HAL_OK) {
1394	                if(copyout(info, ifreqp->ifr_data,
1395	                    sizeof(xge_hal_pci_config_t)) == 0)
1396	                    retValue = 0;
1397	            }
1398	            else {
1399	                xge_trace(XGE_ERR,
1400	                    "Getting PCI configuration failed (%d)", status);
1401	            }
1402	            xge_os_free(NULL, info, sizeof(xge_hal_pci_config_t));
1403	        }
1404	        break;
1405
1406	    case XGE_QUERY_DEVSTATS:
1407	        info = xge_os_malloc(NULL, sizeof(xge_hal_stats_device_info_t));
1408	        if(info != NULL) {
1409	            mtx_lock(&lldev->mtx_drv);
1410	            status =xge_hal_mgmt_device_stats(lldev->devh, info,
1411	                sizeof(xge_hal_stats_device_info_t));
1412	            mtx_unlock(&lldev->mtx_drv);
1413	            if(status == XGE_HAL_OK) {
1414	                if(copyout(info, ifreqp->ifr_data,
1415	                    sizeof(xge_hal_stats_device_info_t)) == 0)
1416	                    retValue = 0;
1417	            }
1418	            else {
1419	                xge_trace(XGE_ERR, "Getting device info failed (%d)",
1420	                    status);
1421	            }
1422	            xge_os_free(NULL, info,
1423	                sizeof(xge_hal_stats_device_info_t));
1424	        }
1425	        break;
1426
1427	    case XGE_QUERY_SWSTATS:
1428	        info = xge_os_malloc(NULL, sizeof(xge_hal_stats_sw_err_t));
1429	        if(info != NULL) {
1430	            mtx_lock(&lldev->mtx_drv);
1431	            status =xge_hal_mgmt_sw_stats(lldev->devh, info,
1432	                sizeof(xge_hal_stats_sw_err_t));
1433	            mtx_unlock(&lldev->mtx_drv);
1434	            if(status == XGE_HAL_OK) {
1435	                if(copyout(info, ifreqp->ifr_data,
1436	                    sizeof(xge_hal_stats_sw_err_t)) == 0)
1437	                    retValue = 0;
1438	            }
1439	            else {
1440	                xge_trace(XGE_ERR,
1441	                    "Getting tcode statistics failed (%d)", status);
1442	            }
1443	            xge_os_free(NULL, info, sizeof(xge_hal_stats_sw_err_t));
1444	        }
1445	        break;
1446
1447	    case XGE_QUERY_DRIVERSTATS:
1448		if(copyout(&lldev->driver_stats, ifreqp->ifr_data,
1449	            sizeof(xge_driver_stats_t)) == 0) {
1450	            retValue = 0;
1451	        }
1452	        else {
1453	            xge_trace(XGE_ERR,
1454	                "Copyout of driver statistics failed (%d)", status);
1455	        }
1456	        break;
1457
1458	    case XGE_READ_VERSION:
1459	        info = xge_os_malloc(NULL, XGE_BUFFER_SIZE);
1460	        if(info != NULL) {
1461	            strcpy(info, XGE_DRIVER_VERSION);
1462	            if(copyout(info, ifreqp->ifr_data, XGE_BUFFER_SIZE) == 0)
1463	                retValue = 0;
1464	            xge_os_free(NULL, info, XGE_BUFFER_SIZE);
1465	        }
1466	        break;
1467
1468	    case XGE_QUERY_DEVCONF:
1469	        info = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t));
1470	        if(info != NULL) {
1471	            mtx_lock(&lldev->mtx_drv);
1472	            status = xge_hal_mgmt_device_config(lldev->devh, info,
1473	                sizeof(xge_hal_device_config_t));
1474	            mtx_unlock(&lldev->mtx_drv);
1475	            if(status == XGE_HAL_OK) {
1476	                if(copyout(info, ifreqp->ifr_data,
1477	                    sizeof(xge_hal_device_config_t)) == 0)
1478	                    retValue = 0;
1479	            }
1480	            else {
1481	                xge_trace(XGE_ERR, "Getting devconfig failed (%d)",
1482	                    status);
1483	            }
1484	            xge_os_free(NULL, info, sizeof(xge_hal_device_config_t));
1485	        }
1486	        break;
1487
1488	    case XGE_QUERY_BUFFER_MODE:
1489	        if(copyout(&lldev->buffer_mode, ifreqp->ifr_data,
1490	            sizeof(int)) == 0)
1491	            retValue = 0;
1492	        break;
1493
1494	    case XGE_SET_BUFFER_MODE_1:
1495	    case XGE_SET_BUFFER_MODE_2:
1496	    case XGE_SET_BUFFER_MODE_5:
1497	        *data = (*data == XGE_SET_BUFFER_MODE_1) ? 'Y':'N';
1498	        if(copyout(data, ifreqp->ifr_data, sizeof(data)) == 0)
1499	            retValue = 0;
1500	        break;
1501	    default:
1502	        xge_trace(XGE_TRACE, "Nothing is matching");
1503	        retValue = ENOTTY;
1504	        break;
1505	}
1506	return retValue;
1507}
1508
1509/**
1510 * xge_ioctl_registers
1511 * IOCTL to get registers
1512 *
1513 * @lldev Per-adapter data
1514 * @ifreqp Interface request
1515 */
1516int
1517xge_ioctl_registers(xge_lldev_t *lldev, struct ifreq *ifreqp)
1518{
1519	xge_register_t *data = (xge_register_t *)ifreqp->ifr_data;
1520	xge_hal_status_e status = XGE_HAL_OK;
1521	int retValue = EINVAL, offset = 0, index = 0;
1522	u64 val64 = 0;
1523
1524	/* Reading a register */
1525	if(strcmp(data->option, "-r") == 0) {
1526	    data->value = 0x0000;
1527	    mtx_lock(&lldev->mtx_drv);
1528	    status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset,
1529	        &data->value);
1530	    mtx_unlock(&lldev->mtx_drv);
1531	    if(status == XGE_HAL_OK) {
1532	        if(copyout(data, ifreqp->ifr_data, sizeof(xge_register_t)) == 0)
1533	            retValue = 0;
1534	    }
1535	}
1536	/* Writing to a register */
1537	else if(strcmp(data->option, "-w") == 0) {
1538	    mtx_lock(&lldev->mtx_drv);
1539	    status = xge_hal_mgmt_reg_write(lldev->devh, 0, data->offset,
1540	        data->value);
1541	    if(status == XGE_HAL_OK) {
1542	        val64 = 0x0000;
1543	        status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset,
1544	            &val64);
1545	        if(status != XGE_HAL_OK) {
1546	            xge_trace(XGE_ERR, "Reading back updated register failed");
1547	        }
1548	        else {
1549	            if(val64 != data->value) {
1550	                xge_trace(XGE_ERR,
1551	                    "Read and written register values mismatched");
1552	            }
1553	            else retValue = 0;
1554	        }
1555	    }
1556	    else {
1557	        xge_trace(XGE_ERR, "Getting register value failed");
1558	    }
1559	    mtx_unlock(&lldev->mtx_drv);
1560	}
1561	else {
1562	    mtx_lock(&lldev->mtx_drv);
1563	    for(index = 0, offset = 0; offset <= XGE_OFFSET_OF_LAST_REG;
1564	        index++, offset += 0x0008) {
1565	        val64 = 0;
1566	        status = xge_hal_mgmt_reg_read(lldev->devh, 0, offset, &val64);
1567	        if(status != XGE_HAL_OK) {
1568	            xge_trace(XGE_ERR, "Getting register value failed");
1569	            break;
1570	        }
1571	        *((u64 *)((u64 *)data + index)) = val64;
1572	        retValue = 0;
1573	    }
1574	    mtx_unlock(&lldev->mtx_drv);
1575
1576	    if(retValue == 0) {
1577	        if(copyout(data, ifreqp->ifr_data,
1578	            sizeof(xge_hal_pci_bar0_t)) != 0) {
1579	            xge_trace(XGE_ERR, "Copyout of register values failed");
1580	            retValue = EINVAL;
1581	        }
1582	    }
1583	    else {
1584	        xge_trace(XGE_ERR, "Getting register values failed");
1585	    }
1586	}
1587	return retValue;
1588}
1589
1590/**
1591 * xge_ioctl
1592 * Callback to control the device - Interface configuration
1593 *
1594 * @ifnetp Interface Handle
1595 * @command Device control command
1596 * @data Parameters associated with command (if any)
1597 */
1598int
1599xge_ioctl(struct ifnet *ifnetp, unsigned long command, caddr_t data)
1600{
1601	struct ifreq   *ifreqp   = (struct ifreq *)data;
1602	xge_lldev_t    *lldev    = ifnetp->if_softc;
1603	struct ifmedia *ifmediap = &lldev->media;
1604	int             retValue = 0, mask = 0;
1605
1606	if(lldev->in_detach) {
1607	    return retValue;
1608	}
1609
1610	switch(command) {
1611	    /* Set/Get ifnet address */
1612	    case SIOCSIFADDR:
1613	    case SIOCGIFADDR:
1614	        ether_ioctl(ifnetp, command, data);
1615	        break;
1616
1617	    /* Set ifnet MTU */
1618	    case SIOCSIFMTU:
1619	        retValue = xge_change_mtu(lldev, ifreqp->ifr_mtu);
1620	        break;
1621
1622	    /* Set ifnet flags */
1623	    case SIOCSIFFLAGS:
1624	        if(ifnetp->if_flags & IFF_UP) {
1625	            /* Link status is UP */
1626	            if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
1627	                xge_init(lldev);
1628	            }
1629	            xge_disable_promisc(lldev);
1630	            xge_enable_promisc(lldev);
1631	        }
1632	        else {
1633	            /* Link status is DOWN */
1634	            /* If device is in running, make it down */
1635	            if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1636	                xge_stop(lldev);
1637	            }
1638	        }
1639	        break;
1640
1641	    /* Add/delete multicast address */
1642	    case SIOCADDMULTI:
1643	    case SIOCDELMULTI:
1644	        if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1645	            xge_setmulti(lldev);
1646	        }
1647	        break;
1648
1649	    /* Set/Get net media */
1650	    case SIOCSIFMEDIA:
1651	    case SIOCGIFMEDIA:
1652	        retValue = ifmedia_ioctl(ifnetp, ifreqp, ifmediap, command);
1653	        break;
1654
1655	    /* Set capabilities */
1656	    case SIOCSIFCAP:
1657	        mtx_lock(&lldev->mtx_drv);
1658	        mask = ifreqp->ifr_reqcap ^ ifnetp->if_capenable;
1659	        if(mask & IFCAP_TXCSUM) {
1660	            if(ifnetp->if_capenable & IFCAP_TXCSUM) {
1661	                ifnetp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1662	                ifnetp->if_hwassist &=
1663	                    ~(CSUM_TCP | CSUM_UDP | CSUM_TSO);
1664	            }
1665	            else {
1666	                ifnetp->if_capenable |= IFCAP_TXCSUM;
1667	                ifnetp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1668	            }
1669	        }
1670	        if(mask & IFCAP_TSO4) {
1671	            if(ifnetp->if_capenable & IFCAP_TSO4) {
1672	                ifnetp->if_capenable &= ~IFCAP_TSO4;
1673	                ifnetp->if_hwassist  &= ~CSUM_TSO;
1674
1675	                xge_os_printf("%s: TSO Disabled",
1676	                    device_get_nameunit(lldev->device));
1677	            }
1678	            else if(ifnetp->if_capenable & IFCAP_TXCSUM) {
1679	                ifnetp->if_capenable |= IFCAP_TSO4;
1680	                ifnetp->if_hwassist  |= CSUM_TSO;
1681
1682	                xge_os_printf("%s: TSO Enabled",
1683	                    device_get_nameunit(lldev->device));
1684	            }
1685	        }
1686
1687	        mtx_unlock(&lldev->mtx_drv);
1688	        break;
1689
1690	    /* Custom IOCTL 0 */
1691	    case SIOCGPRIVATE_0:
1692	        retValue = xge_ioctl_stats(lldev, ifreqp);
1693	        break;
1694
1695	    /* Custom IOCTL 1 */
1696	    case SIOCGPRIVATE_1:
1697	        retValue = xge_ioctl_registers(lldev, ifreqp);
1698	        break;
1699
1700	    default:
1701	        retValue = EINVAL;
1702	        break;
1703	}
1704	return retValue;
1705}
1706
1707/**
1708 * xge_init
1709 * Initialize the interface
1710 *
1711 * @plldev Per-adapter Data
1712 */
1713void
1714xge_init(void *plldev)
1715{
1716	xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1717
1718	mtx_lock(&lldev->mtx_drv);
1719	xge_os_memzero(&lldev->driver_stats, sizeof(xge_driver_stats_t));
1720	xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
1721	mtx_unlock(&lldev->mtx_drv);
1722}
1723
1724/**
1725 * xge_device_init
1726 * Initialize the interface (called by holding lock)
1727 *
1728 * @pdevin Per-adapter Data
1729 */
1730void
1731xge_device_init(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
1732{
1733	struct ifnet     *ifnetp = lldev->ifnetp;
1734	xge_hal_device_t *hldev  = lldev->devh;
1735	struct ifaddr      *ifaddrp;
1736	unsigned char      *macaddr;
1737	struct sockaddr_dl *sockaddrp;
1738	int                 status   = XGE_HAL_OK;
1739
1740	mtx_assert((&lldev->mtx_drv), MA_OWNED);
1741
1742	/* If device is in running state, initializing is not required */
1743	if(ifnetp->if_drv_flags & IFF_DRV_RUNNING)
1744	    return;
1745
1746	/* Initializing timer */
1747	callout_init(&lldev->timer, 1);
1748
1749	xge_trace(XGE_TRACE, "Set MTU size");
1750	status = xge_hal_device_mtu_set(hldev, ifnetp->if_mtu);
1751	if(status != XGE_HAL_OK) {
1752	    xge_trace(XGE_ERR, "Setting MTU in HAL device failed");
1753	    goto _exit;
1754	}
1755
1756	/* Enable HAL device */
1757	xge_hal_device_enable(hldev);
1758
1759	/* Get MAC address and update in HAL */
1760	ifaddrp             = ifnetp->if_addr;
1761	sockaddrp           = (struct sockaddr_dl *)ifaddrp->ifa_addr;
1762	sockaddrp->sdl_type = IFT_ETHER;
1763	sockaddrp->sdl_alen = ifnetp->if_addrlen;
1764	macaddr             = LLADDR(sockaddrp);
1765	xge_trace(XGE_TRACE,
1766	    "Setting MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n",
1767	    *macaddr, *(macaddr + 1), *(macaddr + 2), *(macaddr + 3),
1768	    *(macaddr + 4), *(macaddr + 5));
1769	status = xge_hal_device_macaddr_set(hldev, 0, macaddr);
1770	if(status != XGE_HAL_OK)
1771	    xge_trace(XGE_ERR, "Setting MAC address failed (%d)", status);
1772
1773	/* Opening channels */
1774	mtx_unlock(&lldev->mtx_drv);
1775	status = xge_channel_open(lldev, option);
1776	mtx_lock(&lldev->mtx_drv);
1777	if(status != XGE_HAL_OK)
1778	    goto _exit;
1779
1780	/* Set appropriate flags */
1781	ifnetp->if_drv_flags  |=  IFF_DRV_RUNNING;
1782	ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1783
1784	/* Checksum capability */
1785	ifnetp->if_hwassist = (ifnetp->if_capenable & IFCAP_TXCSUM) ?
1786	    (CSUM_TCP | CSUM_UDP) : 0;
1787
1788	if((lldev->enabled_tso) && (ifnetp->if_capenable & IFCAP_TSO4))
1789	    ifnetp->if_hwassist |= CSUM_TSO;
1790
1791	/* Enable interrupts */
1792	xge_hal_device_intr_enable(hldev);
1793
1794	callout_reset(&lldev->timer, 10*hz, xge_timer, lldev);
1795
1796	/* Disable promiscuous mode */
1797	xge_trace(XGE_TRACE, "If opted, enable promiscuous mode");
1798	xge_enable_promisc(lldev);
1799
1800	/* Device is initialized */
1801	lldev->initialized = 1;
1802	xge_os_mdelay(1000);
1803
1804_exit:
1805	return;
1806}
1807
1808/**
1809 * xge_timer
1810 * Timer timeout function to handle link status
1811 *
1812 * @devp Per-adapter Data
1813 */
1814void
1815xge_timer(void *devp)
1816{
1817	xge_lldev_t      *lldev = (xge_lldev_t *)devp;
1818	xge_hal_device_t *hldev = lldev->devh;
1819
1820	/* Poll for changes */
1821	xge_hal_device_poll(hldev);
1822
1823	/* Reset timer */
1824	callout_reset(&lldev->timer, hz, xge_timer, lldev);
1825
1826	return;
1827}
1828
1829/**
1830 * xge_stop
1831 * De-activate the interface
1832 *
1833 * @lldev Per-adater Data
1834 */
1835void
1836xge_stop(xge_lldev_t *lldev)
1837{
1838	mtx_lock(&lldev->mtx_drv);
1839	xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
1840	mtx_unlock(&lldev->mtx_drv);
1841}
1842
1843/**
1844 * xge_isr_filter
1845 * ISR filter function - to filter interrupts from other devices (shared)
1846 *
1847 * @handle Per-adapter Data
1848 *
1849 * Returns
1850 * FILTER_STRAY if interrupt is from other device
1851 * FILTER_SCHEDULE_THREAD if interrupt is from Xframe device
1852 */
1853int
1854xge_isr_filter(void *handle)
1855{
1856	xge_lldev_t *lldev       = (xge_lldev_t *)handle;
1857	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)((lldev->devh)->bar0);
1858	u16 retValue = FILTER_STRAY;
1859	u64 val64    = 0;
1860
1861	XGE_DRV_STATS(isr_filter);
1862
1863	val64 = xge_os_pio_mem_read64(lldev->pdev, (lldev->devh)->regh0,
1864	    &bar0->general_int_status);
1865	retValue = (!val64) ? FILTER_STRAY : FILTER_SCHEDULE_THREAD;
1866
1867	return retValue;
1868}
1869
1870/**
1871 * xge_isr_line
1872 * Interrupt service routine for Line interrupts
1873 *
1874 * @plldev Per-adapter Data
1875 */
1876void
1877xge_isr_line(void *plldev)
1878{
1879	xge_hal_status_e status;
1880	xge_lldev_t      *lldev   = (xge_lldev_t *)plldev;
1881	xge_hal_device_t *hldev   = (xge_hal_device_t *)lldev->devh;
1882	struct ifnet     *ifnetp  = lldev->ifnetp;
1883
1884	XGE_DRV_STATS(isr_line);
1885
1886	if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1887	    status = xge_hal_device_handle_irq(hldev);
1888	    if(!(IFQ_DRV_IS_EMPTY(&ifnetp->if_snd)))
1889	        xge_send(ifnetp);
1890	}
1891}
1892
1893/*
1894 * xge_isr_msi
1895 * ISR for Message signaled interrupts
1896 */
1897void
1898xge_isr_msi(void *plldev)
1899{
1900	xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1901	XGE_DRV_STATS(isr_msi);
1902	xge_hal_device_continue_irq(lldev->devh);
1903}
1904
1905/**
1906 * xge_rx_open
1907 * Initiate and open all Rx channels
1908 *
1909 * @qid Ring Index
1910 * @lldev Per-adapter Data
1911 * @rflag Channel open/close/reopen flag
1912 *
1913 * Returns 0 or Error Number
1914 */
1915int
1916xge_rx_open(int qid, xge_lldev_t *lldev, xge_hal_channel_reopen_e rflag)
1917{
1918	u64 adapter_status = 0x0;
1919	xge_hal_status_e status = XGE_HAL_FAIL;
1920
1921	xge_hal_channel_attr_t attr = {
1922	    .post_qid      = qid,
1923	    .compl_qid     = 0,
1924	    .callback      = xge_rx_compl,
1925	    .per_dtr_space = sizeof(xge_rx_priv_t),
1926	    .flags         = 0,
1927	    .type          = XGE_HAL_CHANNEL_TYPE_RING,
1928	    .userdata      = lldev,
1929	    .dtr_init      = xge_rx_initial_replenish,
1930	    .dtr_term      = xge_rx_term
1931	};
1932
1933	/* If device is not ready, return */
1934	status = xge_hal_device_status(lldev->devh, &adapter_status);
1935	if(status != XGE_HAL_OK) {
1936	    xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status);
1937	    XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL);
1938	}
1939	else {
1940	    status = xge_hal_channel_open(lldev->devh, &attr,
1941	        &lldev->ring_channel[qid], rflag);
1942	}
1943
1944_exit:
1945	return status;
1946}
1947
1948/**
1949 * xge_tx_open
1950 * Initialize and open all Tx channels
1951 *
1952 * @lldev Per-adapter Data
1953 * @tflag Channel open/close/reopen flag
1954 *
1955 * Returns 0 or Error Number
1956 */
1957int
1958xge_tx_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e tflag)
1959{
1960	xge_hal_status_e status = XGE_HAL_FAIL;
1961	u64 adapter_status = 0x0;
1962	int qindex, index;
1963
1964	xge_hal_channel_attr_t attr = {
1965	    .compl_qid     = 0,
1966	    .callback      = xge_tx_compl,
1967	    .per_dtr_space = sizeof(xge_tx_priv_t),
1968	    .flags         = 0,
1969	    .type          = XGE_HAL_CHANNEL_TYPE_FIFO,
1970	    .userdata      = lldev,
1971	    .dtr_init      = xge_tx_initial_replenish,
1972	    .dtr_term      = xge_tx_term
1973	};
1974
1975	/* If device is not ready, return */
1976	status = xge_hal_device_status(lldev->devh, &adapter_status);
1977	if(status != XGE_HAL_OK) {
1978	    xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status);
1979	    XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL);
1980	}
1981
1982	for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
1983	    attr.post_qid = qindex,
1984	    status = xge_hal_channel_open(lldev->devh, &attr,
1985	        &lldev->fifo_channel[qindex], tflag);
1986	    if(status != XGE_HAL_OK) {
1987	        for(index = 0; index < qindex; index++)
1988	            xge_hal_channel_close(lldev->fifo_channel[index], tflag);
1989	    }
1990	}
1991
1992_exit:
1993	return status;
1994}
1995
1996/**
1997 * xge_enable_msi
1998 * Enables MSI
1999 *
2000 * @lldev Per-adapter Data
2001 */
2002void
2003xge_enable_msi(xge_lldev_t *lldev)
2004{
2005	xge_list_t        *item    = NULL;
2006	xge_hal_device_t  *hldev   = lldev->devh;
2007	xge_hal_channel_t *channel = NULL;
2008	u16 offset = 0, val16 = 0;
2009
2010	xge_os_pci_read16(lldev->pdev, NULL,
2011	    xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16);
2012
2013	/* Update msi_data */
2014	offset = (val16 & 0x80) ? 0x4c : 0x48;
2015	xge_os_pci_read16(lldev->pdev, NULL, offset, &val16);
2016	if(val16 & 0x1)
2017	    val16 &= 0xfffe;
2018	else
2019	    val16 |= 0x1;
2020	xge_os_pci_write16(lldev->pdev, NULL, offset, val16);
2021
2022	/* Update msi_control */
2023	xge_os_pci_read16(lldev->pdev, NULL,
2024	    xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16);
2025	val16 |= 0x10;
2026	xge_os_pci_write16(lldev->pdev, NULL,
2027	    xge_offsetof(xge_hal_pci_config_le_t, msi_control), val16);
2028
2029	/* Set TxMAT and RxMAT registers with MSI */
2030	xge_list_for_each(item, &hldev->free_channels) {
2031	    channel = xge_container_of(item, xge_hal_channel_t, item);
2032	    xge_hal_channel_msi_set(channel, 1, (u32)val16);
2033	}
2034}
2035
2036/**
2037 * xge_channel_open
2038 * Open both Tx and Rx channels
2039 *
2040 * @lldev Per-adapter Data
2041 * @option Channel reopen option
2042 */
2043int
2044xge_channel_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2045{
2046	xge_lro_entry_t *lro_session = NULL;
2047	xge_hal_status_e status   = XGE_HAL_OK;
2048	int index = 0, index2 = 0;
2049
2050	if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
2051	    xge_msi_info_restore(lldev);
2052	    xge_enable_msi(lldev);
2053	}
2054
2055_exit2:
2056	status = xge_create_dma_tags(lldev->device);
2057	if(status != XGE_HAL_OK)
2058	    XGE_EXIT_ON_ERR("DMA tag creation failed", _exit, status);
2059
2060	/* Open ring (Rx) channel */
2061	for(index = 0; index < XGE_RING_COUNT; index++) {
2062	    status = xge_rx_open(index, lldev, option);
2063	    if(status != XGE_HAL_OK) {
2064	        /*
2065	         * DMA mapping fails in the unpatched Kernel which can't
2066	         * allocate contiguous memory for Jumbo frames.
2067	         * Try using 5 buffer mode.
2068	         */
2069	        if((lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) &&
2070	            (((lldev->ifnetp)->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE) >
2071	            MJUMPAGESIZE)) {
2072	            /* Close so far opened channels */
2073	            for(index2 = 0; index2 < index; index2++) {
2074	                xge_hal_channel_close(lldev->ring_channel[index2],
2075	                    option);
2076	            }
2077
2078	            /* Destroy DMA tags intended to use for 1 buffer mode */
2079	            if(bus_dmamap_destroy(lldev->dma_tag_rx,
2080	                lldev->extra_dma_map)) {
2081	                xge_trace(XGE_ERR, "Rx extra DMA map destroy failed");
2082	            }
2083	            if(bus_dma_tag_destroy(lldev->dma_tag_rx))
2084	                xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
2085	            if(bus_dma_tag_destroy(lldev->dma_tag_tx))
2086	                xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
2087
2088	            /* Switch to 5 buffer mode */
2089	            lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_5;
2090	            xge_buffer_mode_init(lldev, (lldev->ifnetp)->if_mtu);
2091
2092	            /* Restart init */
2093	            goto _exit2;
2094	        }
2095	        else {
2096	            XGE_EXIT_ON_ERR("Opening Rx channel failed", _exit1,
2097	                status);
2098	        }
2099	    }
2100	}
2101
2102	if(lldev->enabled_lro) {
2103	    SLIST_INIT(&lldev->lro_free);
2104	    SLIST_INIT(&lldev->lro_active);
2105	    lldev->lro_num = XGE_LRO_DEFAULT_ENTRIES;
2106
2107	    for(index = 0; index < lldev->lro_num; index++) {
2108	        lro_session = (xge_lro_entry_t *)
2109	            xge_os_malloc(NULL, sizeof(xge_lro_entry_t));
2110	        if(lro_session == NULL) {
2111	            lldev->lro_num = index;
2112	            break;
2113	        }
2114	        SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next);
2115	    }
2116	}
2117
2118	/* Open FIFO (Tx) channel */
2119	status = xge_tx_open(lldev, option);
2120	if(status != XGE_HAL_OK)
2121	    XGE_EXIT_ON_ERR("Opening Tx channel failed", _exit1, status);
2122
2123	goto _exit;
2124
2125_exit1:
2126	/*
2127	 * Opening Rx channel(s) failed (index is <last ring index - 1>) or
2128	 * Initialization of LRO failed (index is XGE_RING_COUNT)
2129	 * Opening Tx channel failed    (index is XGE_RING_COUNT)
2130	 */
2131	for(index2 = 0; index2 < index; index2++)
2132	    xge_hal_channel_close(lldev->ring_channel[index2], option);
2133
2134_exit:
2135	return status;
2136}
2137
2138/**
2139 * xge_channel_close
2140 * Close both Tx and Rx channels
2141 *
2142 * @lldev Per-adapter Data
2143 * @option Channel reopen option
2144 *
2145 */
2146void
2147xge_channel_close(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2148{
2149	int qindex = 0;
2150
2151	DELAY(1000 * 1000);
2152
2153	/* Close FIFO (Tx) channel */
2154	for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++)
2155	    xge_hal_channel_close(lldev->fifo_channel[qindex], option);
2156
2157	/* Close Ring (Rx) channels */
2158	for(qindex = 0; qindex < XGE_RING_COUNT; qindex++)
2159	    xge_hal_channel_close(lldev->ring_channel[qindex], option);
2160
2161	if(bus_dmamap_destroy(lldev->dma_tag_rx, lldev->extra_dma_map))
2162	    xge_trace(XGE_ERR, "Rx extra map destroy failed");
2163	if(bus_dma_tag_destroy(lldev->dma_tag_rx))
2164	    xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
2165	if(bus_dma_tag_destroy(lldev->dma_tag_tx))
2166	    xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
2167}
2168
2169/**
2170 * dmamap_cb
2171 * DMA map callback
2172 *
2173 * @arg Parameter passed from dmamap
2174 * @segs Segments
2175 * @nseg Number of segments
2176 * @error Error
2177 */
2178void
2179dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2180{
2181	if(!error) {
2182	    *(bus_addr_t *) arg = segs->ds_addr;
2183	}
2184}
2185
2186/**
2187 * xge_reset
2188 * Device Reset
2189 *
2190 * @lldev Per-adapter Data
2191 */
2192void
2193xge_reset(xge_lldev_t *lldev)
2194{
2195	xge_trace(XGE_TRACE, "Reseting the chip");
2196
2197	/* If the device is not initialized, return */
2198	if(lldev->initialized) {
2199	    mtx_lock(&lldev->mtx_drv);
2200	    xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
2201	    xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
2202	    mtx_unlock(&lldev->mtx_drv);
2203	}
2204
2205	return;
2206}
2207
2208/**
2209 * xge_setmulti
2210 * Set an address as a multicast address
2211 *
2212 * @lldev Per-adapter Data
2213 */
2214void
2215xge_setmulti(xge_lldev_t *lldev)
2216{
2217	struct ifmultiaddr *ifma;
2218	u8                 *lladdr;
2219	xge_hal_device_t   *hldev        = (xge_hal_device_t *)lldev->devh;
2220	struct ifnet       *ifnetp       = lldev->ifnetp;
2221	int                index         = 0;
2222	int                offset        = 1;
2223	int                table_size    = 47;
2224	xge_hal_status_e   status        = XGE_HAL_OK;
2225	u8                 initial_addr[]= {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
2226
2227	if((ifnetp->if_flags & IFF_MULTICAST) && (!lldev->all_multicast)) {
2228	    status = xge_hal_device_mcast_enable(hldev);
2229	    lldev->all_multicast = 1;
2230	}
2231	else if((ifnetp->if_flags & IFF_MULTICAST) && (lldev->all_multicast)) {
2232	    status = xge_hal_device_mcast_disable(hldev);
2233	    lldev->all_multicast = 0;
2234	}
2235
2236	if(status != XGE_HAL_OK) {
2237	    xge_trace(XGE_ERR, "Enabling/disabling multicast failed");
2238	    goto _exit;
2239	}
2240
2241	/* Updating address list */
2242	if_maddr_rlock(ifnetp);
2243	index = 0;
2244	TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
2245	    if(ifma->ifma_addr->sa_family != AF_LINK) {
2246	        continue;
2247	    }
2248	    lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2249	    index += 1;
2250	}
2251	if_maddr_runlock(ifnetp);
2252
2253	if((!lldev->all_multicast) && (index)) {
2254	    lldev->macaddr_count = (index + 1);
2255	    if(lldev->macaddr_count > table_size) {
2256	        goto _exit;
2257	    }
2258
2259	    /* Clear old addresses */
2260	    for(index = 0; index < 48; index++) {
2261	        xge_hal_device_macaddr_set(hldev, (offset + index),
2262	            initial_addr);
2263	    }
2264	}
2265
2266	/* Add new addresses */
2267	if_maddr_rlock(ifnetp);
2268	index = 0;
2269	TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
2270	    if(ifma->ifma_addr->sa_family != AF_LINK) {
2271	        continue;
2272	    }
2273	    lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2274	    xge_hal_device_macaddr_set(hldev, (offset + index), lladdr);
2275	    index += 1;
2276	}
2277	if_maddr_runlock(ifnetp);
2278
2279_exit:
2280	return;
2281}
2282
2283/**
2284 * xge_enable_promisc
2285 * Enable Promiscuous Mode
2286 *
2287 * @lldev Per-adapter Data
2288 */
2289void
2290xge_enable_promisc(xge_lldev_t *lldev)
2291{
2292	struct ifnet *ifnetp = lldev->ifnetp;
2293	xge_hal_device_t *hldev = lldev->devh;
2294	xge_hal_pci_bar0_t *bar0 = NULL;
2295	u64 val64 = 0;
2296
2297	bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
2298
2299	if(ifnetp->if_flags & IFF_PROMISC) {
2300	    xge_hal_device_promisc_enable(lldev->devh);
2301
2302	    /*
2303	     * When operating in promiscuous mode, don't strip the VLAN tag
2304	     */
2305	    val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
2306	        &bar0->rx_pa_cfg);
2307	    val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2308	    val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(0);
2309	    xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
2310	        &bar0->rx_pa_cfg);
2311
2312	    xge_trace(XGE_TRACE, "Promiscuous mode ON");
2313	}
2314}
2315
2316/**
2317 * xge_disable_promisc
2318 * Disable Promiscuous Mode
2319 *
2320 * @lldev Per-adapter Data
2321 */
2322void
2323xge_disable_promisc(xge_lldev_t *lldev)
2324{
2325	xge_hal_device_t *hldev = lldev->devh;
2326	xge_hal_pci_bar0_t *bar0 = NULL;
2327	u64 val64 = 0;
2328
2329	bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
2330
2331	xge_hal_device_promisc_disable(lldev->devh);
2332
2333	/*
2334	 * Strip VLAN tag when operating in non-promiscuous mode
2335	 */
2336	val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
2337	    &bar0->rx_pa_cfg);
2338	val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2339	val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2340	xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
2341	    &bar0->rx_pa_cfg);
2342
2343	xge_trace(XGE_TRACE, "Promiscuous mode OFF");
2344}
2345
2346/**
2347 * xge_change_mtu
2348 * Change interface MTU to a requested valid size
2349 *
2350 * @lldev Per-adapter Data
2351 * @NewMtu Requested MTU
2352 *
2353 * Returns 0 or Error Number
2354 */
2355int
2356xge_change_mtu(xge_lldev_t *lldev, int new_mtu)
2357{
2358	int status = XGE_HAL_OK;
2359
2360	/* Check requested MTU size for boundary */
2361	if(xge_hal_device_mtu_check(lldev->devh, new_mtu) != XGE_HAL_OK) {
2362	    XGE_EXIT_ON_ERR("Invalid MTU", _exit, EINVAL);
2363	}
2364
2365	lldev->mtu = new_mtu;
2366	xge_confirm_changes(lldev, XGE_SET_MTU);
2367
2368_exit:
2369	return status;
2370}
2371
2372/**
2373 * xge_device_stop
2374 *
2375 * Common code for both stop and part of reset. Disables device, interrupts and
2376 * closes channels
2377 *
2378 * @dev Device Handle
2379 * @option Channel normal/reset option
2380 */
2381void
2382xge_device_stop(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2383{
2384	xge_hal_device_t *hldev  = lldev->devh;
2385	struct ifnet     *ifnetp = lldev->ifnetp;
2386	u64               val64  = 0;
2387
2388	mtx_assert((&lldev->mtx_drv), MA_OWNED);
2389
2390	/* If device is not in "Running" state, return */
2391	if (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING))
2392	    goto _exit;
2393
2394	/* Set appropriate flags */
2395	ifnetp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2396
2397	/* Stop timer */
2398	callout_stop(&lldev->timer);
2399
2400	/* Disable interrupts */
2401	xge_hal_device_intr_disable(hldev);
2402
2403	mtx_unlock(&lldev->mtx_drv);
2404	xge_queue_flush(xge_hal_device_queue(lldev->devh));
2405	mtx_lock(&lldev->mtx_drv);
2406
2407	/* Disable HAL device */
2408	if(xge_hal_device_disable(hldev) != XGE_HAL_OK) {
2409	    xge_trace(XGE_ERR, "Disabling HAL device failed");
2410	    xge_hal_device_status(hldev, &val64);
2411	    xge_trace(XGE_ERR, "Adapter Status: 0x%llx", (long long)val64);
2412	}
2413
2414	/* Close Tx and Rx channels */
2415	xge_channel_close(lldev, option);
2416
2417	/* Reset HAL device */
2418	xge_hal_device_reset(hldev);
2419
2420	xge_os_mdelay(1000);
2421	lldev->initialized = 0;
2422
2423	if_link_state_change(ifnetp, LINK_STATE_DOWN);
2424
2425_exit:
2426	return;
2427}
2428
2429/**
2430 * xge_set_mbuf_cflags
2431 * set checksum flag for the mbuf
2432 *
2433 * @pkt Packet
2434 */
2435void
2436xge_set_mbuf_cflags(mbuf_t pkt)
2437{
2438	pkt->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2439	pkt->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2440	pkt->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2441	pkt->m_pkthdr.csum_data = htons(0xffff);
2442}
2443
2444/**
2445 * xge_lro_flush_sessions
2446 * Flush LRO session and send accumulated LRO packet to upper layer
2447 *
2448 * @lldev Per-adapter Data
2449 */
2450void
2451xge_lro_flush_sessions(xge_lldev_t *lldev)
2452{
2453	xge_lro_entry_t *lro_session = NULL;
2454
2455	while(!SLIST_EMPTY(&lldev->lro_active)) {
2456	    lro_session = SLIST_FIRST(&lldev->lro_active);
2457	    SLIST_REMOVE_HEAD(&lldev->lro_active, next);
2458	    xge_lro_flush(lldev, lro_session);
2459	}
2460}
2461
2462/**
2463 * xge_lro_flush
2464 * Flush LRO session. Send accumulated LRO packet to upper layer
2465 *
2466 * @lldev Per-adapter Data
2467 * @lro LRO session to be flushed
2468 */
2469static void
2470xge_lro_flush(xge_lldev_t *lldev, xge_lro_entry_t *lro_session)
2471{
2472	struct ip *header_ip;
2473	struct tcphdr *header_tcp;
2474	u32 *ptr;
2475
2476	if(lro_session->append_cnt) {
2477	    header_ip = lro_session->lro_header_ip;
2478	    header_ip->ip_len = htons(lro_session->len - ETHER_HDR_LEN);
2479	    lro_session->m_head->m_pkthdr.len = lro_session->len;
2480	    header_tcp = (struct tcphdr *)(header_ip + 1);
2481	    header_tcp->th_ack = lro_session->ack_seq;
2482	    header_tcp->th_win = lro_session->window;
2483	    if(lro_session->timestamp) {
2484	        ptr = (u32 *)(header_tcp + 1);
2485	        ptr[1] = htonl(lro_session->tsval);
2486	        ptr[2] = lro_session->tsecr;
2487	    }
2488	}
2489
2490	(*lldev->ifnetp->if_input)(lldev->ifnetp, lro_session->m_head);
2491	lro_session->m_head = NULL;
2492	lro_session->timestamp = 0;
2493	lro_session->append_cnt = 0;
2494	SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next);
2495}
2496
2497/**
2498 * xge_lro_accumulate
2499 * Accumulate packets to form a large LRO packet based on various conditions
2500 *
2501 * @lldev Per-adapter Data
2502 * @m_head Current Packet
2503 *
2504 * Returns XGE_HAL_OK or XGE_HAL_FAIL (failure)
2505 */
2506static int
2507xge_lro_accumulate(xge_lldev_t *lldev, struct mbuf *m_head)
2508{
2509	struct ether_header *header_ethernet;
2510	struct ip *header_ip;
2511	struct tcphdr *header_tcp;
2512	u32 seq, *ptr;
2513	struct mbuf *buffer_next, *buffer_tail;
2514	xge_lro_entry_t *lro_session;
2515	xge_hal_status_e status = XGE_HAL_FAIL;
2516	int hlen, ip_len, tcp_hdr_len, tcp_data_len, tot_len, tcp_options;
2517	int trim;
2518
2519	/* Get Ethernet header */
2520	header_ethernet = mtod(m_head, struct ether_header *);
2521
2522	/* Return if it is not IP packet */
2523	if(header_ethernet->ether_type != htons(ETHERTYPE_IP))
2524	    goto _exit;
2525
2526	/* Get IP header */
2527	header_ip = lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1 ?
2528	    (struct ip *)(header_ethernet + 1) :
2529	    mtod(m_head->m_next, struct ip *);
2530
2531	/* Return if it is not TCP packet */
2532	if(header_ip->ip_p != IPPROTO_TCP)
2533	    goto _exit;
2534
2535	/* Return if packet has options */
2536	if((header_ip->ip_hl << 2) != sizeof(*header_ip))
2537	    goto _exit;
2538
2539	/* Return if packet is fragmented */
2540	if(header_ip->ip_off & htons(IP_MF | IP_OFFMASK))
2541	    goto _exit;
2542
2543	/* Get TCP header */
2544	header_tcp = (struct tcphdr *)(header_ip + 1);
2545
2546	/* Return if not ACK or PUSH */
2547	if((header_tcp->th_flags & ~(TH_ACK | TH_PUSH)) != 0)
2548	    goto _exit;
2549
2550	/* Only timestamp option is handled */
2551	tcp_options = (header_tcp->th_off << 2) - sizeof(*header_tcp);
2552	tcp_hdr_len = sizeof(*header_tcp) + tcp_options;
2553	ptr = (u32 *)(header_tcp + 1);
2554	if(tcp_options != 0) {
2555	    if(__predict_false(tcp_options != TCPOLEN_TSTAMP_APPA) ||
2556	        (*ptr != ntohl(TCPOPT_NOP << 24 | TCPOPT_NOP << 16 |
2557	        TCPOPT_TIMESTAMP << 8 | TCPOLEN_TIMESTAMP))) {
2558	        goto _exit;
2559	    }
2560	}
2561
2562	/* Total length of packet (IP) */
2563	ip_len = ntohs(header_ip->ip_len);
2564
2565	/* TCP data size */
2566	tcp_data_len = ip_len - (header_tcp->th_off << 2) - sizeof(*header_ip);
2567
2568	/* If the frame is padded, trim it */
2569	tot_len = m_head->m_pkthdr.len;
2570	trim = tot_len - (ip_len + ETHER_HDR_LEN);
2571	if(trim != 0) {
2572	    if(trim < 0)
2573	        goto _exit;
2574	    m_adj(m_head, -trim);
2575	    tot_len = m_head->m_pkthdr.len;
2576	}
2577
2578	buffer_next = m_head;
2579	buffer_tail = NULL;
2580	while(buffer_next != NULL) {
2581	    buffer_tail = buffer_next;
2582	    buffer_next = buffer_tail->m_next;
2583	}
2584
2585	/* Total size of only headers */
2586	hlen = ip_len + ETHER_HDR_LEN - tcp_data_len;
2587
2588	/* Get sequence number */
2589	seq = ntohl(header_tcp->th_seq);
2590
2591	SLIST_FOREACH(lro_session, &lldev->lro_active, next) {
2592	    if(lro_session->source_port == header_tcp->th_sport &&
2593	        lro_session->dest_port == header_tcp->th_dport &&
2594	        lro_session->source_ip == header_ip->ip_src.s_addr &&
2595	        lro_session->dest_ip == header_ip->ip_dst.s_addr) {
2596
2597	        /* Unmatched sequence number, flush LRO session */
2598	        if(__predict_false(seq != lro_session->next_seq)) {
2599	            SLIST_REMOVE(&lldev->lro_active, lro_session,
2600	                xge_lro_entry_t, next);
2601	            xge_lro_flush(lldev, lro_session);
2602	            goto _exit;
2603	        }
2604
2605	        /* Handle timestamp option */
2606	        if(tcp_options) {
2607	            u32 tsval = ntohl(*(ptr + 1));
2608	            if(__predict_false(lro_session->tsval > tsval ||
2609	                *(ptr + 2) == 0)) {
2610	                goto _exit;
2611	            }
2612	            lro_session->tsval = tsval;
2613	            lro_session->tsecr = *(ptr + 2);
2614	        }
2615
2616	        lro_session->next_seq += tcp_data_len;
2617	        lro_session->ack_seq = header_tcp->th_ack;
2618	        lro_session->window = header_tcp->th_win;
2619
2620	        /* If TCP data/payload is of 0 size, free mbuf */
2621	        if(tcp_data_len == 0) {
2622	            m_freem(m_head);
2623	            status = XGE_HAL_OK;
2624	            goto _exit;
2625	        }
2626
2627	        lro_session->append_cnt++;
2628	        lro_session->len += tcp_data_len;
2629
2630	        /* Adjust mbuf so that m_data points to payload than headers */
2631	        m_adj(m_head, hlen);
2632
2633	        /* Append this packet to LRO accumulated packet */
2634	        lro_session->m_tail->m_next = m_head;
2635	        lro_session->m_tail = buffer_tail;
2636
2637	        /* Flush if LRO packet is exceeding maximum size */
2638	        if(lro_session->len >
2639	            (XGE_HAL_LRO_DEFAULT_FRM_LEN - lldev->ifnetp->if_mtu)) {
2640	            SLIST_REMOVE(&lldev->lro_active, lro_session,
2641	                xge_lro_entry_t, next);
2642	            xge_lro_flush(lldev, lro_session);
2643	        }
2644	        status = XGE_HAL_OK;
2645	        goto _exit;
2646	    }
2647	}
2648
2649	if(SLIST_EMPTY(&lldev->lro_free))
2650	    goto _exit;
2651
2652	/* Start a new LRO session */
2653	lro_session = SLIST_FIRST(&lldev->lro_free);
2654	SLIST_REMOVE_HEAD(&lldev->lro_free, next);
2655	SLIST_INSERT_HEAD(&lldev->lro_active, lro_session, next);
2656	lro_session->source_port = header_tcp->th_sport;
2657	lro_session->dest_port = header_tcp->th_dport;
2658	lro_session->source_ip = header_ip->ip_src.s_addr;
2659	lro_session->dest_ip = header_ip->ip_dst.s_addr;
2660	lro_session->next_seq = seq + tcp_data_len;
2661	lro_session->mss = tcp_data_len;
2662	lro_session->ack_seq = header_tcp->th_ack;
2663	lro_session->window = header_tcp->th_win;
2664
2665	lro_session->lro_header_ip = header_ip;
2666
2667	/* Handle timestamp option */
2668	if(tcp_options) {
2669	    lro_session->timestamp = 1;
2670	    lro_session->tsval = ntohl(*(ptr + 1));
2671	    lro_session->tsecr = *(ptr + 2);
2672	}
2673
2674	lro_session->len = tot_len;
2675	lro_session->m_head = m_head;
2676	lro_session->m_tail = buffer_tail;
2677	status = XGE_HAL_OK;
2678
2679_exit:
2680	return status;
2681}
2682
2683/**
2684 * xge_accumulate_large_rx
2685 * Accumulate packets to form a large LRO packet based on various conditions
2686 *
2687 * @lldev Per-adapter Data
2688 * @pkt Current packet
2689 * @pkt_length Packet Length
2690 * @rxd_priv Rx Descriptor Private Data
2691 */
2692void
2693xge_accumulate_large_rx(xge_lldev_t *lldev, struct mbuf *pkt, int pkt_length,
2694	xge_rx_priv_t *rxd_priv)
2695{
2696	if(xge_lro_accumulate(lldev, pkt) != XGE_HAL_OK) {
2697	    bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map,
2698	        BUS_DMASYNC_POSTREAD);
2699	    (*lldev->ifnetp->if_input)(lldev->ifnetp, pkt);
2700	}
2701}
2702
2703/**
2704 * xge_rx_compl
2705 * If the interrupt is due to received frame (Rx completion), send it up
2706 *
2707 * @channelh Ring Channel Handle
2708 * @dtr Current Descriptor
2709 * @t_code Transfer Code indicating success or error
2710 * @userdata Per-adapter Data
2711 *
2712 * Returns XGE_HAL_OK or HAL error enums
2713 */
2714xge_hal_status_e
2715xge_rx_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
2716	void *userdata)
2717{
2718	struct ifnet       *ifnetp;
2719	xge_rx_priv_t      *rxd_priv = NULL;
2720	mbuf_t              mbuf_up  = NULL;
2721	xge_hal_status_e    status   = XGE_HAL_OK;
2722	xge_hal_dtr_info_t  ext_info;
2723	int                 index;
2724	u16                 vlan_tag;
2725
2726	/*get the user data portion*/
2727	xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
2728	if(!lldev) {
2729	    XGE_EXIT_ON_ERR("Failed to get user data", _exit, XGE_HAL_FAIL);
2730	}
2731
2732	XGE_DRV_STATS(rx_completions);
2733
2734	/* get the interface pointer */
2735	ifnetp = lldev->ifnetp;
2736
2737	do {
2738	    XGE_DRV_STATS(rx_desc_compl);
2739
2740	    if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
2741	        status = XGE_HAL_FAIL;
2742	        goto _exit;
2743	    }
2744
2745	    if(t_code) {
2746	        xge_trace(XGE_TRACE, "Packet dropped because of %d", t_code);
2747	        XGE_DRV_STATS(rx_tcode);
2748	        xge_hal_device_handle_tcode(channelh, dtr, t_code);
2749	        xge_hal_ring_dtr_post(channelh,dtr);
2750	        continue;
2751	    }
2752
2753	    /* Get the private data for this descriptor*/
2754	    rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh,
2755	        dtr);
2756	    if(!rxd_priv) {
2757	        XGE_EXIT_ON_ERR("Failed to get descriptor private data", _exit,
2758	            XGE_HAL_FAIL);
2759	    }
2760
2761	    /*
2762	     * Prepare one buffer to send it to upper layer -- since the upper
2763	     * layer frees the buffer do not use rxd_priv->buffer. Meanwhile
2764	     * prepare a new buffer, do mapping, use it in the current
2765	     * descriptor and post descriptor back to ring channel
2766	     */
2767	    mbuf_up = rxd_priv->bufferArray[0];
2768
2769	    /* Gets details of mbuf i.e., packet length */
2770	    xge_ring_dtr_get(mbuf_up, channelh, dtr, lldev, rxd_priv);
2771
2772	    status =
2773	        (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ?
2774	        xge_get_buf(dtr, rxd_priv, lldev, 0) :
2775	        xge_get_buf_3b_5b(dtr, rxd_priv, lldev);
2776
2777	    if(status != XGE_HAL_OK) {
2778	        xge_trace(XGE_ERR, "No memory");
2779	        XGE_DRV_STATS(rx_no_buf);
2780
2781	        /*
2782	         * Unable to allocate buffer. Instead of discarding, post
2783	         * descriptor back to channel for future processing of same
2784	         * packet.
2785	         */
2786	        xge_hal_ring_dtr_post(channelh, dtr);
2787	        continue;
2788	    }
2789
2790	    /* Get the extended information */
2791	    xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
2792
2793	    /*
2794	     * As we have allocated a new mbuf for this descriptor, post this
2795	     * descriptor with new mbuf back to ring channel
2796	     */
2797	    vlan_tag = ext_info.vlan;
2798	    xge_hal_ring_dtr_post(channelh, dtr);
2799	    if ((!(ext_info.proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) &&
2800	        (ext_info.proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) &&
2801	        (ext_info.l3_cksum == XGE_HAL_L3_CKSUM_OK) &&
2802	        (ext_info.l4_cksum == XGE_HAL_L4_CKSUM_OK))) {
2803
2804	        /* set Checksum Flag */
2805	        xge_set_mbuf_cflags(mbuf_up);
2806
2807	        if(lldev->enabled_lro) {
2808	            xge_accumulate_large_rx(lldev, mbuf_up, mbuf_up->m_len,
2809	                rxd_priv);
2810	        }
2811	        else {
2812	            /* Post-Read sync for buffers*/
2813	            for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2814	                bus_dmamap_sync(lldev->dma_tag_rx,
2815	                    rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
2816	            }
2817	            (*ifnetp->if_input)(ifnetp, mbuf_up);
2818	        }
2819	    }
2820	    else {
2821	        /*
2822	         * Packet with erroneous checksum , let the upper layer deal
2823	         * with it
2824	         */
2825
2826	        /* Post-Read sync for buffers*/
2827	        for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2828	            bus_dmamap_sync(lldev->dma_tag_rx,
2829	                 rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
2830	        }
2831
2832	        if(vlan_tag) {
2833	            mbuf_up->m_pkthdr.ether_vtag = vlan_tag;
2834	            mbuf_up->m_flags |= M_VLANTAG;
2835	        }
2836
2837	        if(lldev->enabled_lro)
2838	            xge_lro_flush_sessions(lldev);
2839
2840	        (*ifnetp->if_input)(ifnetp, mbuf_up);
2841	    }
2842	} while(xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code)
2843	    == XGE_HAL_OK);
2844
2845	if(lldev->enabled_lro)
2846	    xge_lro_flush_sessions(lldev);
2847
2848_exit:
2849	return status;
2850}
2851
2852/**
2853 * xge_ring_dtr_get
2854 * Get descriptors
2855 *
2856 * @mbuf_up Packet to send up
2857 * @channelh Ring Channel Handle
2858 * @dtr Descriptor
2859 * @lldev Per-adapter Data
2860 * @rxd_priv Rx Descriptor Private Data
2861 *
2862 * Returns XGE_HAL_OK or HAL error enums
2863 */
2864int
2865xge_ring_dtr_get(mbuf_t mbuf_up, xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
2866	xge_lldev_t *lldev, xge_rx_priv_t *rxd_priv)
2867{
2868	mbuf_t           m;
2869	int              pkt_length[5]={0,0}, pkt_len=0;
2870	dma_addr_t       dma_data[5];
2871	int              index;
2872
2873	m = mbuf_up;
2874	pkt_len = 0;
2875
2876	if(lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
2877	    xge_os_memzero(pkt_length, sizeof(pkt_length));
2878
2879	    /*
2880	     * Retrieve data of interest from the completed descriptor -- This
2881	     * returns the packet length
2882	     */
2883	    if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
2884	        xge_hal_ring_dtr_5b_get(channelh, dtr, dma_data, pkt_length);
2885	    }
2886	    else {
2887	        xge_hal_ring_dtr_3b_get(channelh, dtr, dma_data, pkt_length);
2888	    }
2889
2890	    for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2891	        m->m_len  = pkt_length[index];
2892
2893	        if(index < (lldev->rxd_mbuf_cnt-1)) {
2894	            m->m_next = rxd_priv->bufferArray[index + 1];
2895	            m = m->m_next;
2896	        }
2897	        else {
2898	            m->m_next = NULL;
2899	        }
2900	        pkt_len+=pkt_length[index];
2901	    }
2902
2903	    /*
2904	     * Since 2 buffer mode is an exceptional case where data is in 3rd
2905	     * buffer but not in 2nd buffer
2906	     */
2907	    if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
2908	        m->m_len = pkt_length[2];
2909	        pkt_len+=pkt_length[2];
2910	    }
2911
2912	    /*
2913	     * Update length of newly created buffer to be sent up with packet
2914	     * length
2915	     */
2916	    mbuf_up->m_pkthdr.len = pkt_len;
2917	}
2918	else {
2919	    /*
2920	     * Retrieve data of interest from the completed descriptor -- This
2921	     * returns the packet length
2922	     */
2923	    xge_hal_ring_dtr_1b_get(channelh, dtr,&dma_data[0], &pkt_length[0]);
2924
2925	    /*
2926	     * Update length of newly created buffer to be sent up with packet
2927	     * length
2928	     */
2929	    mbuf_up->m_len =  mbuf_up->m_pkthdr.len = pkt_length[0];
2930	}
2931
2932	return XGE_HAL_OK;
2933}
2934
2935/**
2936 * xge_flush_txds
2937 * Flush Tx descriptors
2938 *
2939 * @channelh Channel handle
2940 */
2941static void inline
2942xge_flush_txds(xge_hal_channel_h channelh)
2943{
2944	xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
2945	xge_hal_dtr_h tx_dtr;
2946	xge_tx_priv_t *tx_priv;
2947	u8 t_code;
2948
2949	while(xge_hal_fifo_dtr_next_completed(channelh, &tx_dtr, &t_code)
2950	    == XGE_HAL_OK) {
2951	    XGE_DRV_STATS(tx_desc_compl);
2952	    if(t_code) {
2953	        xge_trace(XGE_TRACE, "Tx descriptor with t_code %d", t_code);
2954	        XGE_DRV_STATS(tx_tcode);
2955	        xge_hal_device_handle_tcode(channelh, tx_dtr, t_code);
2956	    }
2957
2958	    tx_priv = xge_hal_fifo_dtr_private(tx_dtr);
2959	    bus_dmamap_unload(lldev->dma_tag_tx, tx_priv->dma_map);
2960	    m_freem(tx_priv->buffer);
2961	    tx_priv->buffer = NULL;
2962	    xge_hal_fifo_dtr_free(channelh, tx_dtr);
2963	}
2964}
2965
2966/**
2967 * xge_send
2968 * Transmit function
2969 *
2970 * @ifnetp Interface Handle
2971 */
2972void
2973xge_send(struct ifnet *ifnetp)
2974{
2975	int qindex = 0;
2976	xge_lldev_t *lldev = ifnetp->if_softc;
2977
2978	for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
2979	    if(mtx_trylock(&lldev->mtx_tx[qindex]) == 0) {
2980	        XGE_DRV_STATS(tx_lock_fail);
2981	        break;
2982	    }
2983	    xge_send_locked(ifnetp, qindex);
2984	    mtx_unlock(&lldev->mtx_tx[qindex]);
2985	}
2986}
2987
2988static void inline
2989xge_send_locked(struct ifnet *ifnetp, int qindex)
2990{
2991	xge_hal_dtr_h            dtr;
2992	static bus_dma_segment_t segs[XGE_MAX_SEGS];
2993	xge_hal_status_e         status;
2994	unsigned int             max_fragments;
2995	xge_lldev_t              *lldev          = ifnetp->if_softc;
2996	xge_hal_channel_h        channelh        = lldev->fifo_channel[qindex];
2997	mbuf_t                   m_head          = NULL;
2998	mbuf_t                   m_buf           = NULL;
2999	xge_tx_priv_t            *ll_tx_priv     = NULL;
3000	register unsigned int    count           = 0;
3001	unsigned int             nsegs           = 0;
3002	u16                      vlan_tag;
3003
3004	max_fragments = ((xge_hal_fifo_t *)channelh)->config->max_frags;
3005
3006	/* If device is not initialized, return */
3007	if((!lldev->initialized) || (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)))
3008	    return;
3009
3010	XGE_DRV_STATS(tx_calls);
3011
3012	/*
3013	 * This loop will be executed for each packet in the kernel maintained
3014	 * queue -- each packet can be with fragments as an mbuf chain
3015	 */
3016	for(;;) {
3017	    IF_DEQUEUE(&ifnetp->if_snd, m_head);
3018	    if (m_head == NULL) {
3019		ifnetp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
3020		return;
3021	    }
3022
3023	    for(m_buf = m_head; m_buf != NULL; m_buf = m_buf->m_next) {
3024	        if(m_buf->m_len) count += 1;
3025	    }
3026
3027	    if(count >= max_fragments) {
3028	        m_buf = m_defrag(m_head, M_NOWAIT);
3029	        if(m_buf != NULL) m_head = m_buf;
3030	        XGE_DRV_STATS(tx_defrag);
3031	    }
3032
3033	    /* Reserve descriptors */
3034	    status = xge_hal_fifo_dtr_reserve(channelh, &dtr);
3035	    if(status != XGE_HAL_OK) {
3036	        XGE_DRV_STATS(tx_no_txd);
3037	        xge_flush_txds(channelh);
3038		break;
3039	    }
3040
3041	    vlan_tag =
3042	        (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0;
3043	    xge_hal_fifo_dtr_vlan_set(dtr, vlan_tag);
3044
3045	    /* Update Tx private structure for this descriptor */
3046	    ll_tx_priv         = xge_hal_fifo_dtr_private(dtr);
3047	    ll_tx_priv->buffer = m_head;
3048
3049	    /*
3050	     * Do mapping -- Required DMA tag has been created in xge_init
3051	     * function and DMA maps have already been created in the
3052	     * xgell_tx_replenish function.
3053	     * Returns number of segments through nsegs
3054	     */
3055	    if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_tx,
3056	        ll_tx_priv->dma_map, m_head, segs, &nsegs, BUS_DMA_NOWAIT)) {
3057	        xge_trace(XGE_TRACE, "DMA map load failed");
3058	        XGE_DRV_STATS(tx_map_fail);
3059		break;
3060	    }
3061
3062	    if(lldev->driver_stats.tx_max_frags < nsegs)
3063	        lldev->driver_stats.tx_max_frags = nsegs;
3064
3065	    /* Set descriptor buffer for header and each fragment/segment */
3066	    count = 0;
3067	    do {
3068	        xge_hal_fifo_dtr_buffer_set(channelh, dtr, count,
3069	            (dma_addr_t)htole64(segs[count].ds_addr),
3070	            segs[count].ds_len);
3071	        count++;
3072	    } while(count < nsegs);
3073
3074	    /* Pre-write Sync of mapping */
3075	    bus_dmamap_sync(lldev->dma_tag_tx, ll_tx_priv->dma_map,
3076	        BUS_DMASYNC_PREWRITE);
3077
3078	    if((lldev->enabled_tso) &&
3079	        (m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3080	        XGE_DRV_STATS(tx_tso);
3081	        xge_hal_fifo_dtr_mss_set(dtr, m_head->m_pkthdr.tso_segsz);
3082	    }
3083
3084	    /* Checksum */
3085	    if(ifnetp->if_hwassist > 0) {
3086	        xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_IPV4_EN
3087	            | XGE_HAL_TXD_TX_CKO_TCP_EN | XGE_HAL_TXD_TX_CKO_UDP_EN);
3088	    }
3089
3090	    /* Post descriptor to FIFO channel */
3091	    xge_hal_fifo_dtr_post(channelh, dtr);
3092	    XGE_DRV_STATS(tx_posted);
3093
3094	    /* Send the same copy of mbuf packet to BPF (Berkely Packet Filter)
3095	     * listener so that we can use tools like tcpdump */
3096	    ETHER_BPF_MTAP(ifnetp, m_head);
3097	}
3098
3099	/* Prepend the packet back to queue */
3100	IF_PREPEND(&ifnetp->if_snd, m_head);
3101	ifnetp->if_drv_flags |= IFF_DRV_OACTIVE;
3102
3103	xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
3104	    XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh);
3105	XGE_DRV_STATS(tx_again);
3106}
3107
3108/**
3109 * xge_get_buf
3110 * Allocates new mbufs to be placed into descriptors
3111 *
3112 * @dtrh Descriptor Handle
3113 * @rxd_priv Rx Descriptor Private Data
3114 * @lldev Per-adapter Data
3115 * @index Buffer Index (if multi-buffer mode)
3116 *
3117 * Returns XGE_HAL_OK or HAL error enums
3118 */
3119int
3120xge_get_buf(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv,
3121	xge_lldev_t *lldev, int index)
3122{
3123	register mbuf_t mp            = NULL;
3124	struct          ifnet *ifnetp = lldev->ifnetp;
3125	int             status        = XGE_HAL_OK;
3126	int             buffer_size = 0, cluster_size = 0, count;
3127	bus_dmamap_t    map = rxd_priv->dmainfo[index].dma_map;
3128	bus_dma_segment_t segs[3];
3129
3130	buffer_size = (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ?
3131	    ifnetp->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE :
3132	    lldev->rxd_mbuf_len[index];
3133
3134	if(buffer_size <= MCLBYTES) {
3135	    cluster_size = MCLBYTES;
3136	    mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3137	}
3138	else {
3139	    cluster_size = MJUMPAGESIZE;
3140	    if((lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5) &&
3141	        (buffer_size > MJUMPAGESIZE)) {
3142	        cluster_size = MJUM9BYTES;
3143	    }
3144	    mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, cluster_size);
3145	}
3146	if(!mp) {
3147	    xge_trace(XGE_ERR, "Out of memory to allocate mbuf");
3148	    status = XGE_HAL_FAIL;
3149	    goto getbuf_out;
3150	}
3151
3152	/* Update mbuf's length, packet length and receive interface */
3153	mp->m_len = mp->m_pkthdr.len = buffer_size;
3154	mp->m_pkthdr.rcvif = ifnetp;
3155
3156	/* Load DMA map */
3157	if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_rx, lldev->extra_dma_map,
3158	    mp, segs, &count, BUS_DMA_NOWAIT)) {
3159	    XGE_DRV_STATS(rx_map_fail);
3160	    m_freem(mp);
3161	    XGE_EXIT_ON_ERR("DMA map load failed", getbuf_out, XGE_HAL_FAIL);
3162	}
3163
3164	/* Update descriptor private data */
3165	rxd_priv->bufferArray[index]         = mp;
3166	rxd_priv->dmainfo[index].dma_phyaddr = htole64(segs->ds_addr);
3167	rxd_priv->dmainfo[index].dma_map     = lldev->extra_dma_map;
3168	lldev->extra_dma_map = map;
3169
3170	/* Pre-Read/Write sync */
3171	bus_dmamap_sync(lldev->dma_tag_rx, map, BUS_DMASYNC_POSTREAD);
3172
3173	/* Unload DMA map of mbuf in current descriptor */
3174	bus_dmamap_unload(lldev->dma_tag_rx, map);
3175
3176	/* Set descriptor buffer */
3177	if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3178	    xge_hal_ring_dtr_1b_set(dtrh, rxd_priv->dmainfo[0].dma_phyaddr,
3179	        cluster_size);
3180	}
3181
3182getbuf_out:
3183	return status;
3184}
3185
3186/**
3187 * xge_get_buf_3b_5b
3188 * Allocates new mbufs to be placed into descriptors (in multi-buffer modes)
3189 *
3190 * @dtrh Descriptor Handle
3191 * @rxd_priv Rx Descriptor Private Data
3192 * @lldev Per-adapter Data
3193 *
3194 * Returns XGE_HAL_OK or HAL error enums
3195 */
3196int
3197xge_get_buf_3b_5b(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv,
3198	xge_lldev_t *lldev)
3199{
3200	bus_addr_t  dma_pointers[5];
3201	int         dma_sizes[5];
3202	int         status = XGE_HAL_OK, index;
3203	int         newindex = 0;
3204
3205	for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
3206	    status = xge_get_buf(dtrh, rxd_priv, lldev, index);
3207	    if(status != XGE_HAL_OK) {
3208	        for(newindex = 0; newindex < index; newindex++) {
3209	            m_freem(rxd_priv->bufferArray[newindex]);
3210	        }
3211	        XGE_EXIT_ON_ERR("mbuf allocation failed", _exit, status);
3212	    }
3213	}
3214
3215	for(index = 0; index < lldev->buffer_mode; index++) {
3216	    if(lldev->rxd_mbuf_len[index] != 0) {
3217	        dma_pointers[index] = rxd_priv->dmainfo[index].dma_phyaddr;
3218	        dma_sizes[index]    = lldev->rxd_mbuf_len[index];
3219	    }
3220	    else {
3221	        dma_pointers[index] = rxd_priv->dmainfo[index-1].dma_phyaddr;
3222	        dma_sizes[index]    = 1;
3223	    }
3224	}
3225
3226	/* Assigning second buffer to third pointer in 2 buffer mode */
3227	if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
3228	    dma_pointers[2] = dma_pointers[1];
3229	    dma_sizes[2]    = dma_sizes[1];
3230	    dma_sizes[1]    = 1;
3231	}
3232
3233	if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
3234	    xge_hal_ring_dtr_5b_set(dtrh, dma_pointers, dma_sizes);
3235	}
3236	else {
3237	    xge_hal_ring_dtr_3b_set(dtrh, dma_pointers, dma_sizes);
3238	}
3239
3240_exit:
3241	return status;
3242}
3243
3244/**
3245 * xge_tx_compl
3246 * If the interrupt is due to Tx completion, free the sent buffer
3247 *
3248 * @channelh Channel Handle
3249 * @dtr Descriptor
3250 * @t_code Transfer Code indicating success or error
3251 * @userdata Per-adapter Data
3252 *
3253 * Returns XGE_HAL_OK or HAL error enum
3254 */
3255xge_hal_status_e
3256xge_tx_compl(xge_hal_channel_h channelh,
3257	xge_hal_dtr_h dtr, u8 t_code, void *userdata)
3258{
3259	xge_tx_priv_t *ll_tx_priv = NULL;
3260	xge_lldev_t   *lldev  = (xge_lldev_t *)userdata;
3261	struct ifnet  *ifnetp = lldev->ifnetp;
3262	mbuf_t         m_buffer = NULL;
3263	int            qindex   = xge_hal_channel_id(channelh);
3264
3265	mtx_lock(&lldev->mtx_tx[qindex]);
3266
3267	XGE_DRV_STATS(tx_completions);
3268
3269	/*
3270	 * For each completed descriptor: Get private structure, free buffer,
3271	 * do unmapping, and free descriptor
3272	 */
3273	do {
3274	    XGE_DRV_STATS(tx_desc_compl);
3275
3276	    if(t_code) {
3277	        XGE_DRV_STATS(tx_tcode);
3278	        xge_trace(XGE_TRACE, "t_code %d", t_code);
3279	        xge_hal_device_handle_tcode(channelh, dtr, t_code);
3280	    }
3281
3282	    ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3283	    m_buffer   = ll_tx_priv->buffer;
3284	    bus_dmamap_unload(lldev->dma_tag_tx, ll_tx_priv->dma_map);
3285	    m_freem(m_buffer);
3286	    ll_tx_priv->buffer = NULL;
3287	    xge_hal_fifo_dtr_free(channelh, dtr);
3288	} while(xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code)
3289	    == XGE_HAL_OK);
3290	xge_send_locked(ifnetp, qindex);
3291	ifnetp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3292
3293	mtx_unlock(&lldev->mtx_tx[qindex]);
3294
3295	return XGE_HAL_OK;
3296}
3297
3298/**
3299 * xge_tx_initial_replenish
3300 * Initially allocate buffers and set them into descriptors for later use
3301 *
3302 * @channelh Tx Channel Handle
3303 * @dtrh Descriptor Handle
3304 * @index
3305 * @userdata Per-adapter Data
3306 * @reopen Channel open/reopen option
3307 *
3308 * Returns XGE_HAL_OK or HAL error enums
3309 */
3310xge_hal_status_e
3311xge_tx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3312	int index, void *userdata, xge_hal_channel_reopen_e reopen)
3313{
3314	xge_tx_priv_t *txd_priv = NULL;
3315	int            status   = XGE_HAL_OK;
3316
3317	/* Get the user data portion from channel handle */
3318	xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
3319	if(lldev == NULL) {
3320	    XGE_EXIT_ON_ERR("Failed to get user data from channel", txinit_out,
3321	        XGE_HAL_FAIL);
3322	}
3323
3324	/* Get the private data */
3325	txd_priv = (xge_tx_priv_t *) xge_hal_fifo_dtr_private(dtrh);
3326	if(txd_priv == NULL) {
3327	    XGE_EXIT_ON_ERR("Failed to get descriptor private data", txinit_out,
3328	        XGE_HAL_FAIL);
3329	}
3330
3331	/* Create DMA map for this descriptor */
3332	if(bus_dmamap_create(lldev->dma_tag_tx, BUS_DMA_NOWAIT,
3333	    &txd_priv->dma_map)) {
3334	    XGE_EXIT_ON_ERR("DMA map creation for Tx descriptor failed",
3335	        txinit_out, XGE_HAL_FAIL);
3336	}
3337
3338txinit_out:
3339	return status;
3340}
3341
3342/**
3343 * xge_rx_initial_replenish
3344 * Initially allocate buffers and set them into descriptors for later use
3345 *
3346 * @channelh Tx Channel Handle
3347 * @dtrh Descriptor Handle
3348 * @index Ring Index
3349 * @userdata Per-adapter Data
3350 * @reopen Channel open/reopen option
3351 *
3352 * Returns XGE_HAL_OK or HAL error enums
3353 */
3354xge_hal_status_e
3355xge_rx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3356	int index, void *userdata, xge_hal_channel_reopen_e reopen)
3357{
3358	xge_rx_priv_t  *rxd_priv = NULL;
3359	int             status   = XGE_HAL_OK;
3360	int             index1 = 0, index2 = 0;
3361
3362	/* Get the user data portion from channel handle */
3363	xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
3364	if(lldev == NULL) {
3365	    XGE_EXIT_ON_ERR("Failed to get user data from channel", rxinit_out,
3366	        XGE_HAL_FAIL);
3367	}
3368
3369	/* Get the private data */
3370	rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
3371	if(rxd_priv == NULL) {
3372	    XGE_EXIT_ON_ERR("Failed to get descriptor private data", rxinit_out,
3373	        XGE_HAL_FAIL);
3374	}
3375
3376	rxd_priv->bufferArray = xge_os_malloc(NULL,
3377	        (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3378
3379	if(rxd_priv->bufferArray == NULL) {
3380	    XGE_EXIT_ON_ERR("Failed to allocate Rxd private", rxinit_out,
3381	        XGE_HAL_FAIL);
3382	}
3383
3384	if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3385	    /* Create DMA map for these descriptors*/
3386	    if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT,
3387	        &rxd_priv->dmainfo[0].dma_map)) {
3388	        XGE_EXIT_ON_ERR("DMA map creation for Rx descriptor failed",
3389	            rxinit_err_out, XGE_HAL_FAIL);
3390	    }
3391	    /* Get a buffer, attach it to this descriptor */
3392	    status = xge_get_buf(dtrh, rxd_priv, lldev, 0);
3393	}
3394	else {
3395	    for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
3396	        /* Create DMA map for this descriptor */
3397	        if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT ,
3398	            &rxd_priv->dmainfo[index1].dma_map)) {
3399	            for(index2 = index1 - 1; index2 >= 0; index2--) {
3400	                bus_dmamap_destroy(lldev->dma_tag_rx,
3401	                    rxd_priv->dmainfo[index2].dma_map);
3402	            }
3403	            XGE_EXIT_ON_ERR(
3404	                "Jumbo DMA map creation for Rx descriptor failed",
3405	                rxinit_err_out, XGE_HAL_FAIL);
3406	        }
3407	    }
3408	    status = xge_get_buf_3b_5b(dtrh, rxd_priv, lldev);
3409	}
3410
3411	if(status != XGE_HAL_OK) {
3412	    for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
3413	        bus_dmamap_destroy(lldev->dma_tag_rx,
3414	            rxd_priv->dmainfo[index1].dma_map);
3415	    }
3416	    goto rxinit_err_out;
3417	}
3418	else {
3419	    goto rxinit_out;
3420	}
3421
3422rxinit_err_out:
3423	xge_os_free(NULL, rxd_priv->bufferArray,
3424	    (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3425rxinit_out:
3426	return status;
3427}
3428
3429/**
3430 * xge_rx_term
3431 * During unload terminate and free all descriptors
3432 *
3433 * @channelh Rx Channel Handle
3434 * @dtrh Rx Descriptor Handle
3435 * @state Descriptor State
3436 * @userdata Per-adapter Data
3437 * @reopen Channel open/reopen option
3438 */
3439void
3440xge_rx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3441	xge_hal_dtr_state_e state, void *userdata,
3442	xge_hal_channel_reopen_e reopen)
3443{
3444	xge_rx_priv_t *rxd_priv = NULL;
3445	xge_lldev_t   *lldev    = NULL;
3446	int            index = 0;
3447
3448	/* Descriptor state is not "Posted" */
3449	if(state != XGE_HAL_DTR_STATE_POSTED) goto rxterm_out;
3450
3451	/* Get the user data portion */
3452	lldev = xge_hal_channel_userdata(channelh);
3453
3454	/* Get the private data */
3455	rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
3456
3457	for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
3458	    if(rxd_priv->dmainfo[index].dma_map != NULL) {
3459	        bus_dmamap_sync(lldev->dma_tag_rx,
3460	            rxd_priv->dmainfo[index].dma_map, BUS_DMASYNC_POSTREAD);
3461	        bus_dmamap_unload(lldev->dma_tag_rx,
3462	            rxd_priv->dmainfo[index].dma_map);
3463	        if(rxd_priv->bufferArray[index] != NULL)
3464	            m_free(rxd_priv->bufferArray[index]);
3465	        bus_dmamap_destroy(lldev->dma_tag_rx,
3466	            rxd_priv->dmainfo[index].dma_map);
3467	    }
3468	}
3469	xge_os_free(NULL, rxd_priv->bufferArray,
3470	    (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3471
3472	/* Free the descriptor */
3473	xge_hal_ring_dtr_free(channelh, dtrh);
3474
3475rxterm_out:
3476	return;
3477}
3478
3479/**
3480 * xge_tx_term
3481 * During unload terminate and free all descriptors
3482 *
3483 * @channelh Rx Channel Handle
3484 * @dtrh Rx Descriptor Handle
3485 * @state Descriptor State
3486 * @userdata Per-adapter Data
3487 * @reopen Channel open/reopen option
3488 */
3489void
3490xge_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
3491	xge_hal_dtr_state_e state, void *userdata,
3492	xge_hal_channel_reopen_e reopen)
3493{
3494	xge_tx_priv_t *ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3495	xge_lldev_t   *lldev      = (xge_lldev_t *)userdata;
3496
3497	/* Destroy DMA map */
3498	bus_dmamap_destroy(lldev->dma_tag_tx, ll_tx_priv->dma_map);
3499}
3500
3501/**
3502 * xge_methods
3503 *
3504 * FreeBSD device interface entry points
3505 */
3506static device_method_t xge_methods[] = {
3507	DEVMETHOD(device_probe,     xge_probe),
3508	DEVMETHOD(device_attach,    xge_attach),
3509	DEVMETHOD(device_detach,    xge_detach),
3510	DEVMETHOD(device_shutdown,  xge_shutdown),
3511
3512	DEVMETHOD_END
3513};
3514
3515static driver_t xge_driver = {
3516	"nxge",
3517	xge_methods,
3518	sizeof(xge_lldev_t),
3519};
3520static devclass_t xge_devclass;
3521DRIVER_MODULE(nxge, pci, xge_driver, xge_devclass, 0, 0);
3522
3523