if_nxge.c revision 332280
1/*-
2 * Copyright (c) 2002-2007 Neterion, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: stable/10/sys/dev/nxge/if_nxge.c 332280 2018-04-08 15:35:57Z brooks $
27 */
28
29#include <dev/nxge/if_nxge.h>
30#include <dev/nxge/xge-osdep.h>
31#include <net/if_arp.h>
32#include <sys/types.h>
33#include <net/if.h>
34#include <net/if_vlan_var.h>
35
36int       copyright_print       = 0;
37int       hal_driver_init_count = 0;
38size_t    size                  = sizeof(int);
39
40static void inline xge_flush_txds(xge_hal_channel_h);
41
42/**
43 * xge_probe
44 * Probes for Xframe devices
45 *
46 * @dev Device handle
47 *
48 * Returns
49 * BUS_PROBE_DEFAULT if device is supported
50 * ENXIO if device is not supported
51 */
52int
53xge_probe(device_t dev)
54{
55	int  devid    = pci_get_device(dev);
56	int  vendorid = pci_get_vendor(dev);
57	int  retValue = ENXIO;
58
59	if(vendorid == XGE_PCI_VENDOR_ID) {
60	    if((devid == XGE_PCI_DEVICE_ID_XENA_2) ||
61	        (devid == XGE_PCI_DEVICE_ID_HERC_2)) {
62	        if(!copyright_print) {
63	            xge_os_printf(XGE_COPYRIGHT);
64	            copyright_print = 1;
65	        }
66	        device_set_desc_copy(dev,
67	            "Neterion Xframe 10 Gigabit Ethernet Adapter");
68	        retValue = BUS_PROBE_DEFAULT;
69	    }
70	}
71
72	return retValue;
73}
74
75/**
76 * xge_init_params
77 * Sets HAL parameter values (from kenv).
78 *
79 * @dconfig Device Configuration
80 * @dev Device Handle
81 */
82void
83xge_init_params(xge_hal_device_config_t *dconfig, device_t dev)
84{
85	int qindex, tindex, revision;
86	device_t checkdev;
87	xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
88
89	dconfig->mtu                   = XGE_DEFAULT_INITIAL_MTU;
90	dconfig->pci_freq_mherz        = XGE_DEFAULT_USER_HARDCODED;
91	dconfig->device_poll_millis    = XGE_HAL_DEFAULT_DEVICE_POLL_MILLIS;
92	dconfig->link_stability_period = XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD;
93	dconfig->mac.rmac_bcast_en     = XGE_DEFAULT_MAC_RMAC_BCAST_EN;
94	dconfig->fifo.alignment_size   = XGE_DEFAULT_FIFO_ALIGNMENT_SIZE;
95
96	XGE_GET_PARAM("hw.xge.enable_tso", (*lldev), enabled_tso,
97	    XGE_DEFAULT_ENABLED_TSO);
98	XGE_GET_PARAM("hw.xge.enable_lro", (*lldev), enabled_lro,
99	    XGE_DEFAULT_ENABLED_LRO);
100	XGE_GET_PARAM("hw.xge.enable_msi", (*lldev), enabled_msi,
101	    XGE_DEFAULT_ENABLED_MSI);
102
103	XGE_GET_PARAM("hw.xge.latency_timer", (*dconfig), latency_timer,
104	    XGE_DEFAULT_LATENCY_TIMER);
105	XGE_GET_PARAM("hw.xge.max_splits_trans", (*dconfig), max_splits_trans,
106	    XGE_DEFAULT_MAX_SPLITS_TRANS);
107	XGE_GET_PARAM("hw.xge.mmrb_count", (*dconfig), mmrb_count,
108	    XGE_DEFAULT_MMRB_COUNT);
109	XGE_GET_PARAM("hw.xge.shared_splits", (*dconfig), shared_splits,
110	    XGE_DEFAULT_SHARED_SPLITS);
111	XGE_GET_PARAM("hw.xge.isr_polling_cnt", (*dconfig), isr_polling_cnt,
112	    XGE_DEFAULT_ISR_POLLING_CNT);
113	XGE_GET_PARAM("hw.xge.stats_refresh_time_sec", (*dconfig),
114	    stats_refresh_time_sec, XGE_DEFAULT_STATS_REFRESH_TIME_SEC);
115
116	XGE_GET_PARAM_MAC("hw.xge.mac_tmac_util_period", tmac_util_period,
117	    XGE_DEFAULT_MAC_TMAC_UTIL_PERIOD);
118	XGE_GET_PARAM_MAC("hw.xge.mac_rmac_util_period", rmac_util_period,
119	    XGE_DEFAULT_MAC_RMAC_UTIL_PERIOD);
120	XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_gen_en", rmac_pause_gen_en,
121	    XGE_DEFAULT_MAC_RMAC_PAUSE_GEN_EN);
122	XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_rcv_en", rmac_pause_rcv_en,
123	    XGE_DEFAULT_MAC_RMAC_PAUSE_RCV_EN);
124	XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_time", rmac_pause_time,
125	    XGE_DEFAULT_MAC_RMAC_PAUSE_TIME);
126	XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q0q3",
127	    mc_pause_threshold_q0q3, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q0Q3);
128	XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q4q7",
129	    mc_pause_threshold_q4q7, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q4Q7);
130
131	XGE_GET_PARAM_FIFO("hw.xge.fifo_memblock_size", memblock_size,
132	    XGE_DEFAULT_FIFO_MEMBLOCK_SIZE);
133	XGE_GET_PARAM_FIFO("hw.xge.fifo_reserve_threshold", reserve_threshold,
134	    XGE_DEFAULT_FIFO_RESERVE_THRESHOLD);
135	XGE_GET_PARAM_FIFO("hw.xge.fifo_max_frags", max_frags,
136	    XGE_DEFAULT_FIFO_MAX_FRAGS);
137
138	for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
139	    XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_intr", intr, qindex,
140	        XGE_DEFAULT_FIFO_QUEUE_INTR);
141	    XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_max", max, qindex,
142	        XGE_DEFAULT_FIFO_QUEUE_MAX);
143	    XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_initial", initial,
144	        qindex, XGE_DEFAULT_FIFO_QUEUE_INITIAL);
145
146	    for (tindex = 0; tindex < XGE_HAL_MAX_FIFO_TTI_NUM; tindex++) {
147	        dconfig->fifo.queue[qindex].tti[tindex].enabled  = 1;
148	        dconfig->fifo.queue[qindex].configured = 1;
149
150	        XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_a",
151	            urange_a, qindex, tindex,
152	            XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_A);
153	        XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_b",
154	            urange_b, qindex, tindex,
155	            XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_B);
156	        XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_c",
157	            urange_c, qindex, tindex,
158	            XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_C);
159	        XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_a",
160	            ufc_a, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_A);
161	        XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_b",
162	            ufc_b, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_B);
163	        XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_c",
164	            ufc_c, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_C);
165	        XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_d",
166	            ufc_d, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_D);
167	        XGE_GET_PARAM_FIFO_QUEUE_TTI(
168	            "hw.xge.fifo_queue_tti_timer_ci_en", timer_ci_en, qindex,
169	            tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_CI_EN);
170	        XGE_GET_PARAM_FIFO_QUEUE_TTI(
171	            "hw.xge.fifo_queue_tti_timer_ac_en", timer_ac_en, qindex,
172	            tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_AC_EN);
173	        XGE_GET_PARAM_FIFO_QUEUE_TTI(
174	            "hw.xge.fifo_queue_tti_timer_val_us", timer_val_us, qindex,
175	            tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_VAL_US);
176	    }
177	}
178
179	XGE_GET_PARAM_RING("hw.xge.ring_memblock_size", memblock_size,
180	    XGE_DEFAULT_RING_MEMBLOCK_SIZE);
181
182	XGE_GET_PARAM_RING("hw.xge.ring_strip_vlan_tag", strip_vlan_tag,
183	    XGE_DEFAULT_RING_STRIP_VLAN_TAG);
184
185	XGE_GET_PARAM("hw.xge.buffer_mode", (*lldev), buffer_mode,
186	    XGE_DEFAULT_BUFFER_MODE);
187	if((lldev->buffer_mode < XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ||
188	    (lldev->buffer_mode > XGE_HAL_RING_QUEUE_BUFFER_MODE_2)) {
189	    xge_trace(XGE_ERR, "Supported buffer modes are 1 and 2");
190	    lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_1;
191	}
192
193	for (qindex = 0; qindex < XGE_RING_COUNT; qindex++) {
194	    dconfig->ring.queue[qindex].max_frm_len  = XGE_HAL_RING_USE_MTU;
195	    dconfig->ring.queue[qindex].priority     = 0;
196	    dconfig->ring.queue[qindex].configured   = 1;
197	    dconfig->ring.queue[qindex].buffer_mode  =
198	        (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) ?
199	        XGE_HAL_RING_QUEUE_BUFFER_MODE_3 : lldev->buffer_mode;
200
201	    XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_max", max, qindex,
202	        XGE_DEFAULT_RING_QUEUE_MAX);
203	    XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_initial", initial,
204	        qindex, XGE_DEFAULT_RING_QUEUE_INITIAL);
205	    XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_dram_size_mb",
206	        dram_size_mb, qindex, XGE_DEFAULT_RING_QUEUE_DRAM_SIZE_MB);
207	    XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_indicate_max_pkts",
208	        indicate_max_pkts, qindex,
209	        XGE_DEFAULT_RING_QUEUE_INDICATE_MAX_PKTS);
210	    XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_backoff_interval_us",
211	        backoff_interval_us, qindex,
212	        XGE_DEFAULT_RING_QUEUE_BACKOFF_INTERVAL_US);
213
214	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_a", ufc_a,
215	        qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_A);
216	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_b", ufc_b,
217	        qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_B);
218	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_c", ufc_c,
219	        qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_C);
220	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_d", ufc_d,
221	        qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_D);
222	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_ac_en",
223	        timer_ac_en, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_AC_EN);
224	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_val_us",
225	        timer_val_us, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_VAL_US);
226	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_a",
227	        urange_a, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_A);
228	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_b",
229	        urange_b, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_B);
230	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_c",
231	        urange_c, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_C);
232	}
233
234	if(dconfig->fifo.max_frags > (PAGE_SIZE/32)) {
235	    xge_os_printf("fifo_max_frags = %d", dconfig->fifo.max_frags)
236	    xge_os_printf("fifo_max_frags should be <= (PAGE_SIZE / 32) = %d",
237	        (int)(PAGE_SIZE / 32))
238	    xge_os_printf("Using fifo_max_frags = %d", (int)(PAGE_SIZE / 32))
239	    dconfig->fifo.max_frags = (PAGE_SIZE / 32);
240	}
241
242	checkdev = pci_find_device(VENDOR_ID_AMD, DEVICE_ID_8131_PCI_BRIDGE);
243	if(checkdev != NULL) {
244	    /* Check Revision for 0x12 */
245	    revision = pci_read_config(checkdev,
246	        xge_offsetof(xge_hal_pci_config_t, revision), 1);
247	    if(revision <= 0x12) {
248	        /* Set mmrb_count to 1k and max splits = 2 */
249	        dconfig->mmrb_count       = 1;
250	        dconfig->max_splits_trans = XGE_HAL_THREE_SPLIT_TRANSACTION;
251	    }
252	}
253}
254
255/**
256 * xge_buffer_sizes_set
257 * Set buffer sizes based on Rx buffer mode
258 *
259 * @lldev Per-adapter Data
260 * @buffer_mode Rx Buffer Mode
261 */
262void
263xge_rx_buffer_sizes_set(xge_lldev_t *lldev, int buffer_mode, int mtu)
264{
265	int index = 0;
266	int frame_header = XGE_HAL_MAC_HEADER_MAX_SIZE;
267	int buffer_size = mtu + frame_header;
268
269	xge_os_memzero(lldev->rxd_mbuf_len, sizeof(lldev->rxd_mbuf_len));
270
271	if(buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
272	    lldev->rxd_mbuf_len[buffer_mode - 1] = mtu;
273
274	lldev->rxd_mbuf_len[0] = (buffer_mode == 1) ? buffer_size:frame_header;
275
276	if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
277	    lldev->rxd_mbuf_len[1] = XGE_HAL_TCPIP_HEADER_MAX_SIZE;
278
279	if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
280	    index = 2;
281	    buffer_size -= XGE_HAL_TCPIP_HEADER_MAX_SIZE;
282	    while(buffer_size > MJUMPAGESIZE) {
283	        lldev->rxd_mbuf_len[index++] = MJUMPAGESIZE;
284	        buffer_size -= MJUMPAGESIZE;
285	    }
286	    XGE_ALIGN_TO(buffer_size, 128);
287	    lldev->rxd_mbuf_len[index] = buffer_size;
288	    lldev->rxd_mbuf_cnt = index + 1;
289	}
290
291	for(index = 0; index < buffer_mode; index++)
292	    xge_trace(XGE_TRACE, "Buffer[%d] %d\n", index,
293	        lldev->rxd_mbuf_len[index]);
294}
295
296/**
297 * xge_buffer_mode_init
298 * Init Rx buffer mode
299 *
300 * @lldev Per-adapter Data
301 * @mtu Interface MTU
302 */
303void
304xge_buffer_mode_init(xge_lldev_t *lldev, int mtu)
305{
306	int index = 0, buffer_size = 0;
307	xge_hal_ring_config_t *ring_config = &((lldev->devh)->config.ring);
308
309	buffer_size = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE;
310
311	if(lldev->enabled_lro)
312	    (lldev->ifnetp)->if_capenable |= IFCAP_LRO;
313	else
314	    (lldev->ifnetp)->if_capenable &= ~IFCAP_LRO;
315
316	lldev->rxd_mbuf_cnt = lldev->buffer_mode;
317	if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
318	    XGE_SET_BUFFER_MODE_IN_RINGS(XGE_HAL_RING_QUEUE_BUFFER_MODE_3);
319	    ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_B;
320	}
321	else {
322	    XGE_SET_BUFFER_MODE_IN_RINGS(lldev->buffer_mode);
323	    ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_A;
324	}
325	xge_rx_buffer_sizes_set(lldev, lldev->buffer_mode, mtu);
326
327	xge_os_printf("%s: TSO %s", device_get_nameunit(lldev->device),
328	    ((lldev->enabled_tso) ? "Enabled":"Disabled"));
329	xge_os_printf("%s: LRO %s", device_get_nameunit(lldev->device),
330	    ((lldev->ifnetp)->if_capenable & IFCAP_LRO) ? "Enabled":"Disabled");
331	xge_os_printf("%s: Rx %d Buffer Mode Enabled",
332	    device_get_nameunit(lldev->device), lldev->buffer_mode);
333}
334
335/**
336 * xge_driver_initialize
337 * Initializes HAL driver (common for all devices)
338 *
339 * Returns
340 * XGE_HAL_OK if success
341 * XGE_HAL_ERR_BAD_DRIVER_CONFIG if driver configuration parameters are invalid
342 */
343int
344xge_driver_initialize(void)
345{
346	xge_hal_uld_cbs_t       uld_callbacks;
347	xge_hal_driver_config_t driver_config;
348	xge_hal_status_e        status = XGE_HAL_OK;
349
350	/* Initialize HAL driver */
351	if(!hal_driver_init_count) {
352	    xge_os_memzero(&uld_callbacks, sizeof(xge_hal_uld_cbs_t));
353	    xge_os_memzero(&driver_config, sizeof(xge_hal_driver_config_t));
354
355	    /*
356	     * Initial and maximum size of the queue used to store the events
357	     * like Link up/down (xge_hal_event_e)
358	     */
359	    driver_config.queue_size_initial = XGE_HAL_MIN_QUEUE_SIZE_INITIAL;
360	    driver_config.queue_size_max     = XGE_HAL_MAX_QUEUE_SIZE_MAX;
361
362	    uld_callbacks.link_up   = xge_callback_link_up;
363	    uld_callbacks.link_down = xge_callback_link_down;
364	    uld_callbacks.crit_err  = xge_callback_crit_err;
365	    uld_callbacks.event     = xge_callback_event;
366
367	    status = xge_hal_driver_initialize(&driver_config, &uld_callbacks);
368	    if(status != XGE_HAL_OK) {
369	        XGE_EXIT_ON_ERR("xgeX: Initialization of HAL driver failed",
370	            xdi_out, status);
371	    }
372	}
373	hal_driver_init_count = hal_driver_init_count + 1;
374
375	xge_hal_driver_debug_module_mask_set(0xffffffff);
376	xge_hal_driver_debug_level_set(XGE_TRACE);
377
378xdi_out:
379	return status;
380}
381
382/**
383 * xge_media_init
384 * Initializes, adds and sets media
385 *
386 * @devc Device Handle
387 */
388void
389xge_media_init(device_t devc)
390{
391	xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(devc);
392
393	/* Initialize Media */
394	ifmedia_init(&lldev->media, IFM_IMASK, xge_ifmedia_change,
395	    xge_ifmedia_status);
396
397	/* Add supported media */
398	ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL);
399	ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
400	ifmedia_add(&lldev->media, IFM_ETHER | IFM_AUTO,    0, NULL);
401	ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_SR,  0, NULL);
402	ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_LR,  0, NULL);
403
404	/* Set media */
405	ifmedia_set(&lldev->media, IFM_ETHER | IFM_AUTO);
406}
407
408/**
409 * xge_pci_space_save
410 * Save PCI configuration space
411 *
412 * @dev Device Handle
413 */
414void
415xge_pci_space_save(device_t dev)
416{
417	struct pci_devinfo *dinfo = NULL;
418
419	dinfo = device_get_ivars(dev);
420	xge_trace(XGE_TRACE, "Saving PCI configuration space");
421	pci_cfg_save(dev, dinfo, 0);
422}
423
424/**
425 * xge_pci_space_restore
426 * Restore saved PCI configuration space
427 *
428 * @dev Device Handle
429 */
430void
431xge_pci_space_restore(device_t dev)
432{
433	struct pci_devinfo *dinfo = NULL;
434
435	dinfo = device_get_ivars(dev);
436	xge_trace(XGE_TRACE, "Restoring PCI configuration space");
437	pci_cfg_restore(dev, dinfo);
438}
439
440/**
441 * xge_msi_info_save
442 * Save MSI info
443 *
444 * @lldev Per-adapter Data
445 */
446void
447xge_msi_info_save(xge_lldev_t * lldev)
448{
449	xge_os_pci_read16(lldev->pdev, NULL,
450	    xge_offsetof(xge_hal_pci_config_le_t, msi_control),
451	    &lldev->msi_info.msi_control);
452	xge_os_pci_read32(lldev->pdev, NULL,
453	    xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address),
454	    &lldev->msi_info.msi_lower_address);
455	xge_os_pci_read32(lldev->pdev, NULL,
456	    xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address),
457	    &lldev->msi_info.msi_higher_address);
458	xge_os_pci_read16(lldev->pdev, NULL,
459	    xge_offsetof(xge_hal_pci_config_le_t, msi_data),
460	    &lldev->msi_info.msi_data);
461}
462
463/**
464 * xge_msi_info_restore
465 * Restore saved MSI info
466 *
467 * @dev Device Handle
468 */
469void
470xge_msi_info_restore(xge_lldev_t *lldev)
471{
472	/*
473	 * If interface is made down and up, traffic fails. It was observed that
474	 * MSI information were getting reset on down. Restoring them.
475	 */
476	xge_os_pci_write16(lldev->pdev, NULL,
477	    xge_offsetof(xge_hal_pci_config_le_t, msi_control),
478	    lldev->msi_info.msi_control);
479
480	xge_os_pci_write32(lldev->pdev, NULL,
481	    xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address),
482	    lldev->msi_info.msi_lower_address);
483
484	xge_os_pci_write32(lldev->pdev, NULL,
485	    xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address),
486	    lldev->msi_info.msi_higher_address);
487
488	xge_os_pci_write16(lldev->pdev, NULL,
489	    xge_offsetof(xge_hal_pci_config_le_t, msi_data),
490	    lldev->msi_info.msi_data);
491}
492
493/**
494 * xge_init_mutex
495 * Initializes mutexes used in driver
496 *
497 * @lldev  Per-adapter Data
498 */
499void
500xge_mutex_init(xge_lldev_t *lldev)
501{
502	int qindex;
503
504	sprintf(lldev->mtx_name_drv, "%s_drv",
505	    device_get_nameunit(lldev->device));
506	mtx_init(&lldev->mtx_drv, lldev->mtx_name_drv, MTX_NETWORK_LOCK,
507	    MTX_DEF);
508
509	for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
510	    sprintf(lldev->mtx_name_tx[qindex], "%s_tx_%d",
511	        device_get_nameunit(lldev->device), qindex);
512	    mtx_init(&lldev->mtx_tx[qindex], lldev->mtx_name_tx[qindex], NULL,
513	        MTX_DEF);
514	}
515}
516
517/**
518 * xge_mutex_destroy
519 * Destroys mutexes used in driver
520 *
521 * @lldev Per-adapter Data
522 */
523void
524xge_mutex_destroy(xge_lldev_t *lldev)
525{
526	int qindex;
527
528	for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++)
529	    mtx_destroy(&lldev->mtx_tx[qindex]);
530	mtx_destroy(&lldev->mtx_drv);
531}
532
533/**
534 * xge_print_info
535 * Print device and driver information
536 *
537 * @lldev Per-adapter Data
538 */
539void
540xge_print_info(xge_lldev_t *lldev)
541{
542	device_t dev = lldev->device;
543	xge_hal_device_t *hldev = lldev->devh;
544	xge_hal_status_e status = XGE_HAL_OK;
545	u64 val64 = 0;
546	const char *xge_pci_bus_speeds[17] = {
547	    "PCI 33MHz Bus",
548	    "PCI 66MHz Bus",
549	    "PCIX(M1) 66MHz Bus",
550	    "PCIX(M1) 100MHz Bus",
551	    "PCIX(M1) 133MHz Bus",
552	    "PCIX(M2) 133MHz Bus",
553	    "PCIX(M2) 200MHz Bus",
554	    "PCIX(M2) 266MHz Bus",
555	    "PCIX(M1) Reserved",
556	    "PCIX(M1) 66MHz Bus (Not Supported)",
557	    "PCIX(M1) 100MHz Bus (Not Supported)",
558	    "PCIX(M1) 133MHz Bus (Not Supported)",
559	    "PCIX(M2) Reserved",
560	    "PCIX 533 Reserved",
561	    "PCI Basic Mode",
562	    "PCIX Basic Mode",
563	    "PCI Invalid Mode"
564	};
565
566	xge_os_printf("%s: Xframe%s %s Revision %d Driver v%s",
567	    device_get_nameunit(dev),
568	    ((hldev->device_id == XGE_PCI_DEVICE_ID_XENA_2) ? "I" : "II"),
569	    hldev->vpd_data.product_name, hldev->revision, XGE_DRIVER_VERSION);
570	xge_os_printf("%s: Serial Number %s",
571	    device_get_nameunit(dev), hldev->vpd_data.serial_num);
572
573	if(pci_get_device(dev) == XGE_PCI_DEVICE_ID_HERC_2) {
574	    status = xge_hal_mgmt_reg_read(hldev, 0,
575	        xge_offsetof(xge_hal_pci_bar0_t, pci_info), &val64);
576	    if(status != XGE_HAL_OK)
577	        xge_trace(XGE_ERR, "Error for getting bus speed");
578
579	    xge_os_printf("%s: Adapter is on %s bit %s",
580	        device_get_nameunit(dev), ((val64 & BIT(8)) ? "32":"64"),
581	        (xge_pci_bus_speeds[((val64 & XGE_HAL_PCI_INFO) >> 60)]));
582	}
583
584	xge_os_printf("%s: Using %s Interrupts",
585	    device_get_nameunit(dev),
586	    (lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) ? "MSI":"Line");
587}
588
589/**
590 * xge_create_dma_tags
591 * Creates DMA tags for both Tx and Rx
592 *
593 * @dev Device Handle
594 *
595 * Returns XGE_HAL_OK or XGE_HAL_FAIL (if errors)
596 */
597xge_hal_status_e
598xge_create_dma_tags(device_t dev)
599{
600	xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
601	xge_hal_status_e status = XGE_HAL_FAIL;
602	int mtu = (lldev->ifnetp)->if_mtu, maxsize;
603
604	/* DMA tag for Tx */
605	status = bus_dma_tag_create(
606	    bus_get_dma_tag(dev),                /* Parent                    */
607	    PAGE_SIZE,                           /* Alignment                 */
608	    0,                                   /* Bounds                    */
609	    BUS_SPACE_MAXADDR,                   /* Low Address               */
610	    BUS_SPACE_MAXADDR,                   /* High Address              */
611	    NULL,                                /* Filter Function           */
612	    NULL,                                /* Filter Function Arguments */
613	    MCLBYTES * XGE_MAX_SEGS,             /* Maximum Size              */
614	    XGE_MAX_SEGS,                        /* Number of Segments        */
615	    MCLBYTES,                            /* Maximum Segment Size      */
616	    BUS_DMA_ALLOCNOW,                    /* Flags                     */
617	    NULL,                                /* Lock Function             */
618	    NULL,                                /* Lock Function Arguments   */
619	    (&lldev->dma_tag_tx));               /* DMA Tag                   */
620	if(status != 0)
621	    goto _exit;
622
623	maxsize = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE;
624	if(maxsize <= MCLBYTES) {
625	    maxsize = MCLBYTES;
626	}
627	else {
628	    if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
629	        maxsize = MJUMPAGESIZE;
630	    else
631	        maxsize = (maxsize <= MJUMPAGESIZE) ? MJUMPAGESIZE : MJUM9BYTES;
632	}
633
634	/* DMA tag for Rx */
635	status = bus_dma_tag_create(
636	    bus_get_dma_tag(dev),                /* Parent                    */
637	    PAGE_SIZE,                           /* Alignment                 */
638	    0,                                   /* Bounds                    */
639	    BUS_SPACE_MAXADDR,                   /* Low Address               */
640	    BUS_SPACE_MAXADDR,                   /* High Address              */
641	    NULL,                                /* Filter Function           */
642	    NULL,                                /* Filter Function Arguments */
643	    maxsize,                             /* Maximum Size              */
644	    1,                                   /* Number of Segments        */
645	    maxsize,                             /* Maximum Segment Size      */
646	    BUS_DMA_ALLOCNOW,                    /* Flags                     */
647	    NULL,                                /* Lock Function             */
648	    NULL,                                /* Lock Function Arguments   */
649	    (&lldev->dma_tag_rx));               /* DMA Tag                   */
650	if(status != 0)
651	    goto _exit1;
652
653	status = bus_dmamap_create(lldev->dma_tag_rx, BUS_DMA_NOWAIT,
654	    &lldev->extra_dma_map);
655	if(status != 0)
656	    goto _exit2;
657
658	status = XGE_HAL_OK;
659	goto _exit;
660
661_exit2:
662	status = bus_dma_tag_destroy(lldev->dma_tag_rx);
663	if(status != 0)
664	    xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
665_exit1:
666	status = bus_dma_tag_destroy(lldev->dma_tag_tx);
667	if(status != 0)
668	    xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
669	status = XGE_HAL_FAIL;
670_exit:
671	return status;
672}
673
674/**
675 * xge_confirm_changes
676 * Disables and Enables interface to apply requested change
677 *
678 * @lldev Per-adapter Data
679 * @mtu_set Is it called for changing MTU? (Yes: 1, No: 0)
680 *
681 * Returns 0 or Error Number
682 */
683void
684xge_confirm_changes(xge_lldev_t *lldev, xge_option_e option)
685{
686	if(lldev->initialized == 0) goto _exit1;
687
688	mtx_lock(&lldev->mtx_drv);
689	if_down(lldev->ifnetp);
690	xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
691
692	if(option == XGE_SET_MTU)
693	    (lldev->ifnetp)->if_mtu = lldev->mtu;
694	else
695	    xge_buffer_mode_init(lldev, lldev->mtu);
696
697	xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
698	if_up(lldev->ifnetp);
699	mtx_unlock(&lldev->mtx_drv);
700	goto _exit;
701
702_exit1:
703	/* Request was to change MTU and device not initialized */
704	if(option == XGE_SET_MTU) {
705	    (lldev->ifnetp)->if_mtu = lldev->mtu;
706	    xge_buffer_mode_init(lldev, lldev->mtu);
707	}
708_exit:
709	return;
710}
711
712/**
713 * xge_change_lro_status
714 * Enable/Disable LRO feature
715 *
716 * @SYSCTL_HANDLER_ARGS sysctl_oid structure with arguments
717 *
718 * Returns 0 or error number.
719 */
720static int
721xge_change_lro_status(SYSCTL_HANDLER_ARGS)
722{
723	xge_lldev_t *lldev = (xge_lldev_t *)arg1;
724	int request = lldev->enabled_lro, status = XGE_HAL_OK;
725
726	status = sysctl_handle_int(oidp, &request, arg2, req);
727	if((status != XGE_HAL_OK) || (!req->newptr))
728	    goto _exit;
729
730	if((request < 0) || (request > 1)) {
731	    status = EINVAL;
732	    goto _exit;
733	}
734
735	/* Return if current and requested states are same */
736	if(request == lldev->enabled_lro){
737	    xge_trace(XGE_ERR, "LRO is already %s",
738	        ((request) ? "enabled" : "disabled"));
739	    goto _exit;
740	}
741
742	lldev->enabled_lro = request;
743	xge_confirm_changes(lldev, XGE_CHANGE_LRO);
744	arg2 = lldev->enabled_lro;
745
746_exit:
747	return status;
748}
749
750/**
751 * xge_add_sysctl_handlers
752 * Registers sysctl parameter value update handlers
753 *
754 * @lldev Per-adapter data
755 */
756void
757xge_add_sysctl_handlers(xge_lldev_t *lldev)
758{
759	struct sysctl_ctx_list *context_list =
760	    device_get_sysctl_ctx(lldev->device);
761	struct sysctl_oid *oid = device_get_sysctl_tree(lldev->device);
762
763	SYSCTL_ADD_PROC(context_list, SYSCTL_CHILDREN(oid), OID_AUTO,
764	    "enable_lro", CTLTYPE_INT | CTLFLAG_RW, lldev, 0,
765	    xge_change_lro_status, "I", "Enable or disable LRO feature");
766}
767
768/**
769 * xge_attach
770 * Connects driver to the system if probe was success
771 *
772 * @dev Device Handle
773 */
774int
775xge_attach(device_t dev)
776{
777	xge_hal_device_config_t *device_config;
778	xge_hal_device_attr_t   attr;
779	xge_lldev_t             *lldev;
780	xge_hal_device_t        *hldev;
781	xge_pci_info_t          *pci_info;
782	struct ifnet            *ifnetp;
783	int                     rid, rid0, rid1, error;
784	int                     msi_count = 0, status = XGE_HAL_OK;
785	int                     enable_msi = XGE_HAL_INTR_MODE_IRQLINE;
786
787	device_config = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t));
788	if(!device_config) {
789	    XGE_EXIT_ON_ERR("Memory allocation for device configuration failed",
790	        attach_out_config, ENOMEM);
791	}
792
793	lldev = (xge_lldev_t *) device_get_softc(dev);
794	if(!lldev) {
795	    XGE_EXIT_ON_ERR("Adapter softc is NULL", attach_out, ENOMEM);
796	}
797	lldev->device = dev;
798
799	xge_mutex_init(lldev);
800
801	error = xge_driver_initialize();
802	if(error != XGE_HAL_OK) {
803	    xge_resources_free(dev, xge_free_mutex);
804	    XGE_EXIT_ON_ERR("Initializing driver failed", attach_out, ENXIO);
805	}
806
807	/* HAL device */
808	hldev =
809	    (xge_hal_device_t *)xge_os_malloc(NULL, sizeof(xge_hal_device_t));
810	if(!hldev) {
811	    xge_resources_free(dev, xge_free_terminate_hal_driver);
812	    XGE_EXIT_ON_ERR("Memory allocation for HAL device failed",
813	        attach_out, ENOMEM);
814	}
815	lldev->devh = hldev;
816
817	/* Our private structure */
818	pci_info =
819	    (xge_pci_info_t*) xge_os_malloc(NULL, sizeof(xge_pci_info_t));
820	if(!pci_info) {
821	    xge_resources_free(dev, xge_free_hal_device);
822	    XGE_EXIT_ON_ERR("Memory allocation for PCI info. failed",
823	        attach_out, ENOMEM);
824	}
825	lldev->pdev      = pci_info;
826	pci_info->device = dev;
827
828	/* Set bus master */
829	pci_enable_busmaster(dev);
830
831	/* Get virtual address for BAR0 */
832	rid0 = PCIR_BAR(0);
833	pci_info->regmap0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid0,
834	    RF_ACTIVE);
835	if(pci_info->regmap0 == NULL) {
836	    xge_resources_free(dev, xge_free_pci_info);
837	    XGE_EXIT_ON_ERR("Bus resource allocation for BAR0 failed",
838	        attach_out, ENOMEM);
839	}
840	attr.bar0 = (char *)pci_info->regmap0;
841
842	pci_info->bar0resource = (xge_bus_resource_t*)
843	    xge_os_malloc(NULL, sizeof(xge_bus_resource_t));
844	if(pci_info->bar0resource == NULL) {
845	    xge_resources_free(dev, xge_free_bar0);
846	    XGE_EXIT_ON_ERR("Memory allocation for BAR0 Resources failed",
847	        attach_out, ENOMEM);
848	}
849	((xge_bus_resource_t *)(pci_info->bar0resource))->bus_tag =
850	    rman_get_bustag(pci_info->regmap0);
851	((xge_bus_resource_t *)(pci_info->bar0resource))->bus_handle =
852	    rman_get_bushandle(pci_info->regmap0);
853	((xge_bus_resource_t *)(pci_info->bar0resource))->bar_start_addr =
854	    pci_info->regmap0;
855
856	/* Get virtual address for BAR1 */
857	rid1 = PCIR_BAR(2);
858	pci_info->regmap1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid1,
859	    RF_ACTIVE);
860	if(pci_info->regmap1 == NULL) {
861	    xge_resources_free(dev, xge_free_bar0_resource);
862	    XGE_EXIT_ON_ERR("Bus resource allocation for BAR1 failed",
863	        attach_out, ENOMEM);
864	}
865	attr.bar1 = (char *)pci_info->regmap1;
866
867	pci_info->bar1resource = (xge_bus_resource_t*)
868	    xge_os_malloc(NULL, sizeof(xge_bus_resource_t));
869	if(pci_info->bar1resource == NULL) {
870	    xge_resources_free(dev, xge_free_bar1);
871	    XGE_EXIT_ON_ERR("Memory allocation for BAR1 Resources failed",
872	        attach_out, ENOMEM);
873	}
874	((xge_bus_resource_t *)(pci_info->bar1resource))->bus_tag =
875	    rman_get_bustag(pci_info->regmap1);
876	((xge_bus_resource_t *)(pci_info->bar1resource))->bus_handle =
877	    rman_get_bushandle(pci_info->regmap1);
878	((xge_bus_resource_t *)(pci_info->bar1resource))->bar_start_addr =
879	    pci_info->regmap1;
880
881	/* Save PCI config space */
882	xge_pci_space_save(dev);
883
884	attr.regh0 = (xge_bus_resource_t *) pci_info->bar0resource;
885	attr.regh1 = (xge_bus_resource_t *) pci_info->bar1resource;
886	attr.irqh  = lldev->irqhandle;
887	attr.cfgh  = pci_info;
888	attr.pdev  = pci_info;
889
890	/* Initialize device configuration parameters */
891	xge_init_params(device_config, dev);
892
893	rid = 0;
894	if(lldev->enabled_msi) {
895	    /* Number of MSI messages supported by device */
896	    msi_count = pci_msi_count(dev);
897	    if(msi_count > 1) {
898	        /* Device supports MSI */
899	        if(bootverbose) {
900	            xge_trace(XGE_ERR, "MSI count: %d", msi_count);
901	            xge_trace(XGE_ERR, "Now, driver supporting 1 message");
902	        }
903	        msi_count = 1;
904	        error = pci_alloc_msi(dev, &msi_count);
905	        if(error == 0) {
906	            if(bootverbose)
907	                xge_trace(XGE_ERR, "Allocated messages: %d", msi_count);
908	            enable_msi = XGE_HAL_INTR_MODE_MSI;
909	            rid = 1;
910	        }
911	        else {
912	            if(bootverbose)
913	                xge_trace(XGE_ERR, "pci_alloc_msi failed, %d", error);
914	        }
915	    }
916	}
917	lldev->enabled_msi = enable_msi;
918
919	/* Allocate resource for irq */
920	lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
921	    (RF_SHAREABLE | RF_ACTIVE));
922	if(lldev->irq == NULL) {
923	    xge_trace(XGE_ERR, "Allocating irq resource for %s failed",
924	        ((rid == 0) ? "line interrupt" : "MSI"));
925	    if(rid == 1) {
926	        error = pci_release_msi(dev);
927	        if(error != 0) {
928	            xge_trace(XGE_ERR, "Releasing MSI resources failed %d",
929	                error);
930	            xge_trace(XGE_ERR, "Requires reboot to use MSI again");
931	        }
932	        xge_trace(XGE_ERR, "Trying line interrupts");
933	        rid = 0;
934	        lldev->enabled_msi = XGE_HAL_INTR_MODE_IRQLINE;
935	        lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
936	            (RF_SHAREABLE | RF_ACTIVE));
937	    }
938	    if(lldev->irq == NULL) {
939	        xge_trace(XGE_ERR, "Allocating irq resource failed");
940	        xge_resources_free(dev, xge_free_bar1_resource);
941	        status = ENOMEM;
942	        goto attach_out;
943	    }
944	}
945
946	device_config->intr_mode = lldev->enabled_msi;
947	if(bootverbose) {
948	    xge_trace(XGE_TRACE, "rid: %d, Mode: %d, MSI count: %d", rid,
949	        lldev->enabled_msi, msi_count);
950	}
951
952	/* Initialize HAL device */
953	error = xge_hal_device_initialize(hldev, &attr, device_config);
954	if(error != XGE_HAL_OK) {
955	    xge_resources_free(dev, xge_free_irq_resource);
956	    XGE_EXIT_ON_ERR("Initializing HAL device failed", attach_out,
957	        ENXIO);
958	}
959
960	xge_hal_device_private_set(hldev, lldev);
961
962	error = xge_interface_setup(dev);
963	if(error != 0) {
964	    status = error;
965	    goto attach_out;
966	}
967
968	ifnetp         = lldev->ifnetp;
969	ifnetp->if_mtu = device_config->mtu;
970
971	xge_media_init(dev);
972
973	/* Associate interrupt handler with the device */
974	if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
975	    error = bus_setup_intr(dev, lldev->irq,
976	        (INTR_TYPE_NET | INTR_MPSAFE),
977#if __FreeBSD_version > 700030
978	        NULL,
979#endif
980	        xge_isr_msi, lldev, &lldev->irqhandle);
981	    xge_msi_info_save(lldev);
982	}
983	else {
984	    error = bus_setup_intr(dev, lldev->irq,
985	        (INTR_TYPE_NET | INTR_MPSAFE),
986#if __FreeBSD_version > 700030
987	        xge_isr_filter,
988#endif
989	        xge_isr_line, lldev, &lldev->irqhandle);
990	}
991	if(error != 0) {
992	    xge_resources_free(dev, xge_free_media_interface);
993	    XGE_EXIT_ON_ERR("Associating interrupt handler with device failed",
994	        attach_out, ENXIO);
995	}
996
997	xge_print_info(lldev);
998
999	xge_add_sysctl_handlers(lldev);
1000
1001	xge_buffer_mode_init(lldev, device_config->mtu);
1002
1003attach_out:
1004	xge_os_free(NULL, device_config, sizeof(xge_hal_device_config_t));
1005attach_out_config:
1006	return status;
1007}
1008
1009/**
1010 * xge_resources_free
1011 * Undo what-all we did during load/attach
1012 *
1013 * @dev Device Handle
1014 * @error Identifies what-all to undo
1015 */
1016void
1017xge_resources_free(device_t dev, xge_lables_e error)
1018{
1019	xge_lldev_t *lldev;
1020	xge_pci_info_t *pci_info;
1021	xge_hal_device_t *hldev;
1022	int rid, status;
1023
1024	/* LL Device */
1025	lldev = (xge_lldev_t *) device_get_softc(dev);
1026	pci_info = lldev->pdev;
1027
1028	/* HAL Device */
1029	hldev = lldev->devh;
1030
1031	switch(error) {
1032	    case xge_free_all:
1033	        /* Teardown interrupt handler - device association */
1034	        bus_teardown_intr(dev, lldev->irq, lldev->irqhandle);
1035
1036	    case xge_free_media_interface:
1037	        /* Media */
1038	        ifmedia_removeall(&lldev->media);
1039
1040	        /* Detach Ether */
1041	        ether_ifdetach(lldev->ifnetp);
1042	        if_free(lldev->ifnetp);
1043
1044	        xge_hal_device_private_set(hldev, NULL);
1045	        xge_hal_device_disable(hldev);
1046
1047	    case xge_free_terminate_hal_device:
1048	        /* HAL Device */
1049	        xge_hal_device_terminate(hldev);
1050
1051	    case xge_free_irq_resource:
1052	        /* Release IRQ resource */
1053	        bus_release_resource(dev, SYS_RES_IRQ,
1054	            ((lldev->enabled_msi == XGE_HAL_INTR_MODE_IRQLINE) ? 0:1),
1055	            lldev->irq);
1056
1057	        if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
1058	            status = pci_release_msi(dev);
1059	            if(status != 0) {
1060	                if(bootverbose) {
1061	                    xge_trace(XGE_ERR,
1062	                        "pci_release_msi returned %d", status);
1063	                }
1064	            }
1065	        }
1066
1067	    case xge_free_bar1_resource:
1068	        /* Restore PCI configuration space */
1069	        xge_pci_space_restore(dev);
1070
1071	        /* Free bar1resource */
1072	        xge_os_free(NULL, pci_info->bar1resource,
1073	            sizeof(xge_bus_resource_t));
1074
1075	    case xge_free_bar1:
1076	        /* Release BAR1 */
1077	        rid = PCIR_BAR(2);
1078	        bus_release_resource(dev, SYS_RES_MEMORY, rid,
1079	            pci_info->regmap1);
1080
1081	    case xge_free_bar0_resource:
1082	        /* Free bar0resource */
1083	        xge_os_free(NULL, pci_info->bar0resource,
1084	            sizeof(xge_bus_resource_t));
1085
1086	    case xge_free_bar0:
1087	        /* Release BAR0 */
1088	        rid = PCIR_BAR(0);
1089	        bus_release_resource(dev, SYS_RES_MEMORY, rid,
1090	            pci_info->regmap0);
1091
1092	    case xge_free_pci_info:
1093	        /* Disable Bus Master */
1094	        pci_disable_busmaster(dev);
1095
1096	        /* Free pci_info_t */
1097	        lldev->pdev = NULL;
1098	        xge_os_free(NULL, pci_info, sizeof(xge_pci_info_t));
1099
1100	    case xge_free_hal_device:
1101	        /* Free device configuration struct and HAL device */
1102	        xge_os_free(NULL, hldev, sizeof(xge_hal_device_t));
1103
1104	    case xge_free_terminate_hal_driver:
1105	        /* Terminate HAL driver */
1106	        hal_driver_init_count = hal_driver_init_count - 1;
1107	        if(!hal_driver_init_count) {
1108	            xge_hal_driver_terminate();
1109	        }
1110
1111	    case xge_free_mutex:
1112	        xge_mutex_destroy(lldev);
1113	}
1114}
1115
1116/**
1117 * xge_detach
1118 * Detaches driver from the Kernel subsystem
1119 *
1120 * @dev Device Handle
1121 */
1122int
1123xge_detach(device_t dev)
1124{
1125	xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
1126
1127	if(lldev->in_detach == 0) {
1128	    lldev->in_detach = 1;
1129	    xge_stop(lldev);
1130	    xge_resources_free(dev, xge_free_all);
1131	}
1132
1133	return 0;
1134}
1135
1136/**
1137 * xge_shutdown
1138 * To shutdown device before system shutdown
1139 *
1140 * @dev Device Handle
1141 */
1142int
1143xge_shutdown(device_t dev)
1144{
1145	xge_lldev_t *lldev = (xge_lldev_t *) device_get_softc(dev);
1146	xge_stop(lldev);
1147
1148	return 0;
1149}
1150
1151/**
1152 * xge_interface_setup
1153 * Setup interface
1154 *
1155 * @dev Device Handle
1156 *
1157 * Returns 0 on success, ENXIO/ENOMEM on failure
1158 */
1159int
1160xge_interface_setup(device_t dev)
1161{
1162	u8 mcaddr[ETHER_ADDR_LEN];
1163	xge_hal_status_e status;
1164	xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
1165	struct ifnet *ifnetp;
1166	xge_hal_device_t *hldev = lldev->devh;
1167
1168	/* Get the MAC address of the device */
1169	status = xge_hal_device_macaddr_get(hldev, 0, &mcaddr);
1170	if(status != XGE_HAL_OK) {
1171	    xge_resources_free(dev, xge_free_terminate_hal_device);
1172	    XGE_EXIT_ON_ERR("Getting MAC address failed", ifsetup_out, ENXIO);
1173	}
1174
1175	/* Get interface ifnet structure for this Ether device */
1176	ifnetp = lldev->ifnetp = if_alloc(IFT_ETHER);
1177	if(ifnetp == NULL) {
1178	    xge_resources_free(dev, xge_free_terminate_hal_device);
1179	    XGE_EXIT_ON_ERR("Allocation ifnet failed", ifsetup_out, ENOMEM);
1180	}
1181
1182	/* Initialize interface ifnet structure */
1183	if_initname(ifnetp, device_get_name(dev), device_get_unit(dev));
1184	ifnetp->if_mtu      = XGE_HAL_DEFAULT_MTU;
1185	ifnetp->if_baudrate = XGE_BAUDRATE;
1186	ifnetp->if_init     = xge_init;
1187	ifnetp->if_softc    = lldev;
1188	ifnetp->if_flags    = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1189	ifnetp->if_ioctl    = xge_ioctl;
1190	ifnetp->if_start    = xge_send;
1191
1192	/* TODO: Check and assign optimal value */
1193	ifnetp->if_snd.ifq_maxlen = ifqmaxlen;
1194
1195	ifnetp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU |
1196	    IFCAP_HWCSUM;
1197	if(lldev->enabled_tso)
1198	    ifnetp->if_capabilities |= IFCAP_TSO4;
1199	if(lldev->enabled_lro)
1200	    ifnetp->if_capabilities |= IFCAP_LRO;
1201
1202	ifnetp->if_capenable = ifnetp->if_capabilities;
1203
1204	/* Attach the interface */
1205	ether_ifattach(ifnetp, mcaddr);
1206
1207ifsetup_out:
1208	return status;
1209}
1210
1211/**
1212 * xge_callback_link_up
1213 * Callback for Link-up indication from HAL
1214 *
1215 * @userdata Per-adapter data
1216 */
1217void
1218xge_callback_link_up(void *userdata)
1219{
1220	xge_lldev_t  *lldev  = (xge_lldev_t *)userdata;
1221	struct ifnet *ifnetp = lldev->ifnetp;
1222
1223	ifnetp->if_flags  &= ~IFF_DRV_OACTIVE;
1224	if_link_state_change(ifnetp, LINK_STATE_UP);
1225}
1226
1227/**
1228 * xge_callback_link_down
1229 * Callback for Link-down indication from HAL
1230 *
1231 * @userdata Per-adapter data
1232 */
1233void
1234xge_callback_link_down(void *userdata)
1235{
1236	xge_lldev_t  *lldev  = (xge_lldev_t *)userdata;
1237	struct ifnet *ifnetp = lldev->ifnetp;
1238
1239	ifnetp->if_flags  |= IFF_DRV_OACTIVE;
1240	if_link_state_change(ifnetp, LINK_STATE_DOWN);
1241}
1242
1243/**
1244 * xge_callback_crit_err
1245 * Callback for Critical error indication from HAL
1246 *
1247 * @userdata Per-adapter data
1248 * @type Event type (Enumerated hardware error)
1249 * @serr_data Hardware status
1250 */
1251void
1252xge_callback_crit_err(void *userdata, xge_hal_event_e type, u64 serr_data)
1253{
1254	xge_trace(XGE_ERR, "Critical Error");
1255	xge_reset(userdata);
1256}
1257
1258/**
1259 * xge_callback_event
1260 * Callback from HAL indicating that some event has been queued
1261 *
1262 * @item Queued event item
1263 */
1264void
1265xge_callback_event(xge_queue_item_t *item)
1266{
1267	xge_lldev_t      *lldev  = NULL;
1268	xge_hal_device_t *hldev  = NULL;
1269	struct ifnet     *ifnetp = NULL;
1270
1271	hldev  = item->context;
1272	lldev  = xge_hal_device_private(hldev);
1273	ifnetp = lldev->ifnetp;
1274
1275	switch((int)item->event_type) {
1276	    case XGE_LL_EVENT_TRY_XMIT_AGAIN:
1277	        if(lldev->initialized) {
1278	            if(xge_hal_channel_dtr_count(lldev->fifo_channel[0]) > 0) {
1279	                ifnetp->if_flags  &= ~IFF_DRV_OACTIVE;
1280	            }
1281	            else {
1282	                xge_queue_produce_context(
1283	                    xge_hal_device_queue(lldev->devh),
1284	                    XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh);
1285	            }
1286	        }
1287	        break;
1288
1289	    case XGE_LL_EVENT_DEVICE_RESETTING:
1290	        xge_reset(item->context);
1291	        break;
1292
1293	    default:
1294	        break;
1295	}
1296}
1297
1298/**
1299 * xge_ifmedia_change
1300 * Media change driver callback
1301 *
1302 * @ifnetp Interface Handle
1303 *
1304 * Returns 0 if media is Ether else EINVAL
1305 */
1306int
1307xge_ifmedia_change(struct ifnet *ifnetp)
1308{
1309	xge_lldev_t    *lldev    = ifnetp->if_softc;
1310	struct ifmedia *ifmediap = &lldev->media;
1311
1312	return (IFM_TYPE(ifmediap->ifm_media) != IFM_ETHER) ?  EINVAL:0;
1313}
1314
1315/**
1316 * xge_ifmedia_status
1317 * Media status driver callback
1318 *
1319 * @ifnetp Interface Handle
1320 * @ifmr Interface Media Settings
1321 */
1322void
1323xge_ifmedia_status(struct ifnet *ifnetp, struct ifmediareq *ifmr)
1324{
1325	xge_hal_status_e status;
1326	u64              regvalue;
1327	xge_lldev_t      *lldev = ifnetp->if_softc;
1328	xge_hal_device_t *hldev = lldev->devh;
1329
1330	ifmr->ifm_status = IFM_AVALID;
1331	ifmr->ifm_active = IFM_ETHER;
1332
1333	status = xge_hal_mgmt_reg_read(hldev, 0,
1334	    xge_offsetof(xge_hal_pci_bar0_t, adapter_status), &regvalue);
1335	if(status != XGE_HAL_OK) {
1336	    xge_trace(XGE_TRACE, "Getting adapter status failed");
1337	    goto _exit;
1338	}
1339
1340	if((regvalue & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
1341	    XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) == 0) {
1342	    ifmr->ifm_status |= IFM_ACTIVE;
1343	    ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1344	    if_link_state_change(ifnetp, LINK_STATE_UP);
1345	}
1346	else {
1347	    if_link_state_change(ifnetp, LINK_STATE_DOWN);
1348	}
1349_exit:
1350	return;
1351}
1352
1353/**
1354 * xge_ioctl_stats
1355 * IOCTL to get statistics
1356 *
1357 * @lldev Per-adapter data
1358 * @ifreqp Interface request
1359 */
1360int
1361xge_ioctl_stats(xge_lldev_t *lldev, struct ifreq *ifreqp)
1362{
1363	xge_hal_status_e status = XGE_HAL_OK;
1364	char cmd, mode;
1365	void *info = NULL;
1366	int retValue;
1367
1368	cmd = retValue = fubyte(ifreqp->ifr_data);
1369	if (retValue == -1)
1370		return (EFAULT);
1371
1372	retValue = EINVAL;
1373	switch(cmd) {
1374	    case XGE_QUERY_STATS:
1375	        mtx_lock(&lldev->mtx_drv);
1376	        status = xge_hal_stats_hw(lldev->devh,
1377	            (xge_hal_stats_hw_info_t **)&info);
1378	        mtx_unlock(&lldev->mtx_drv);
1379	        if(status == XGE_HAL_OK) {
1380	            if(copyout(info, ifreqp->ifr_data,
1381	                sizeof(xge_hal_stats_hw_info_t)) == 0)
1382	                retValue = 0;
1383	        }
1384	        else {
1385	            xge_trace(XGE_ERR, "Getting statistics failed (Status: %d)",
1386	                status);
1387	        }
1388	        break;
1389
1390	    case XGE_QUERY_PCICONF:
1391	        info = xge_os_malloc(NULL, sizeof(xge_hal_pci_config_t));
1392	        if(info != NULL) {
1393	            mtx_lock(&lldev->mtx_drv);
1394	            status = xge_hal_mgmt_pci_config(lldev->devh, info,
1395	                sizeof(xge_hal_pci_config_t));
1396	            mtx_unlock(&lldev->mtx_drv);
1397	            if(status == XGE_HAL_OK) {
1398	                if(copyout(info, ifreqp->ifr_data,
1399	                    sizeof(xge_hal_pci_config_t)) == 0)
1400	                    retValue = 0;
1401	            }
1402	            else {
1403	                xge_trace(XGE_ERR,
1404	                    "Getting PCI configuration failed (%d)", status);
1405	            }
1406	            xge_os_free(NULL, info, sizeof(xge_hal_pci_config_t));
1407	        }
1408	        break;
1409
1410	    case XGE_QUERY_DEVSTATS:
1411	        info = xge_os_malloc(NULL, sizeof(xge_hal_stats_device_info_t));
1412	        if(info != NULL) {
1413	            mtx_lock(&lldev->mtx_drv);
1414	            status =xge_hal_mgmt_device_stats(lldev->devh, info,
1415	                sizeof(xge_hal_stats_device_info_t));
1416	            mtx_unlock(&lldev->mtx_drv);
1417	            if(status == XGE_HAL_OK) {
1418	                if(copyout(info, ifreqp->ifr_data,
1419	                    sizeof(xge_hal_stats_device_info_t)) == 0)
1420	                    retValue = 0;
1421	            }
1422	            else {
1423	                xge_trace(XGE_ERR, "Getting device info failed (%d)",
1424	                    status);
1425	            }
1426	            xge_os_free(NULL, info,
1427	                sizeof(xge_hal_stats_device_info_t));
1428	        }
1429	        break;
1430
1431	    case XGE_QUERY_SWSTATS:
1432	        info = xge_os_malloc(NULL, sizeof(xge_hal_stats_sw_err_t));
1433	        if(info != NULL) {
1434	            mtx_lock(&lldev->mtx_drv);
1435	            status =xge_hal_mgmt_sw_stats(lldev->devh, info,
1436	                sizeof(xge_hal_stats_sw_err_t));
1437	            mtx_unlock(&lldev->mtx_drv);
1438	            if(status == XGE_HAL_OK) {
1439	                if(copyout(info, ifreqp->ifr_data,
1440	                    sizeof(xge_hal_stats_sw_err_t)) == 0)
1441	                    retValue = 0;
1442	            }
1443	            else {
1444	                xge_trace(XGE_ERR,
1445	                    "Getting tcode statistics failed (%d)", status);
1446	            }
1447	            xge_os_free(NULL, info, sizeof(xge_hal_stats_sw_err_t));
1448	        }
1449	        break;
1450
1451	    case XGE_QUERY_DRIVERSTATS:
1452		if(copyout(&lldev->driver_stats, ifreqp->ifr_data,
1453	            sizeof(xge_driver_stats_t)) == 0) {
1454	            retValue = 0;
1455	        }
1456	        else {
1457	            xge_trace(XGE_ERR,
1458	                "Copyout of driver statistics failed (%d)", status);
1459	        }
1460	        break;
1461
1462	    case XGE_READ_VERSION:
1463	        info = xge_os_malloc(NULL, XGE_BUFFER_SIZE);
1464	        if(version != NULL) {
1465	            strcpy(info, XGE_DRIVER_VERSION);
1466	            if(copyout(info, ifreqp->ifr_data, XGE_BUFFER_SIZE) == 0)
1467	                retValue = 0;
1468	            xge_os_free(NULL, info, XGE_BUFFER_SIZE);
1469	        }
1470	        break;
1471
1472	    case XGE_QUERY_DEVCONF:
1473	        info = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t));
1474	        if(info != NULL) {
1475	            mtx_lock(&lldev->mtx_drv);
1476	            status = xge_hal_mgmt_device_config(lldev->devh, info,
1477	                sizeof(xge_hal_device_config_t));
1478	            mtx_unlock(&lldev->mtx_drv);
1479	            if(status == XGE_HAL_OK) {
1480	                if(copyout(info, ifreqp->ifr_data,
1481	                    sizeof(xge_hal_device_config_t)) == 0)
1482	                    retValue = 0;
1483	            }
1484	            else {
1485	                xge_trace(XGE_ERR, "Getting devconfig failed (%d)",
1486	                    status);
1487	            }
1488	            xge_os_free(NULL, info, sizeof(xge_hal_device_config_t));
1489	        }
1490	        break;
1491
1492	    case XGE_QUERY_BUFFER_MODE:
1493	        if(copyout(&lldev->buffer_mode, ifreqp->ifr_data,
1494	            sizeof(int)) == 0)
1495	            retValue = 0;
1496	        break;
1497
1498	    case XGE_SET_BUFFER_MODE_1:
1499	    case XGE_SET_BUFFER_MODE_2:
1500	    case XGE_SET_BUFFER_MODE_5:
1501	        mode = (cmd == XGE_SET_BUFFER_MODE_1) ? 'Y':'N';
1502	        if(copyout(&mode, ifreqp->ifr_data, sizeof(mode)) == 0)
1503	            retValue = 0;
1504	        break;
1505	    default:
1506	        xge_trace(XGE_TRACE, "Nothing is matching");
1507	        retValue = ENOTTY;
1508	        break;
1509	}
1510	return retValue;
1511}
1512
1513/**
1514 * xge_ioctl_registers
1515 * IOCTL to get registers
1516 *
1517 * @lldev Per-adapter data
1518 * @ifreqp Interface request
1519 */
1520int
1521xge_ioctl_registers(xge_lldev_t *lldev, struct ifreq *ifreqp)
1522{
1523	xge_register_t tmpdata;
1524	xge_register_t *data;
1525	xge_hal_status_e status = XGE_HAL_OK;
1526	int retValue = EINVAL, offset = 0, index = 0;
1527	int error;
1528	u64 val64 = 0;
1529
1530	error = copyin(ifreqp->ifr_data, &tmpdata, sizeof(tmpdata));
1531	if (error != 0)
1532		return (error);
1533	data = &tmpdata;
1534
1535	/* Reading a register */
1536	if(strcmp(data->option, "-r") == 0) {
1537	    data->value = 0x0000;
1538	    mtx_lock(&lldev->mtx_drv);
1539	    status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset,
1540	        &data->value);
1541	    mtx_unlock(&lldev->mtx_drv);
1542	    if(status == XGE_HAL_OK) {
1543	        if(copyout(data, ifreqp->ifr_data, sizeof(xge_register_t)) == 0)
1544	            retValue = 0;
1545	    }
1546	}
1547	/* Writing to a register */
1548	else if(strcmp(data->option, "-w") == 0) {
1549	    mtx_lock(&lldev->mtx_drv);
1550	    status = xge_hal_mgmt_reg_write(lldev->devh, 0, data->offset,
1551	        data->value);
1552	    if(status == XGE_HAL_OK) {
1553	        val64 = 0x0000;
1554	        status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset,
1555	            &val64);
1556	        if(status != XGE_HAL_OK) {
1557	            xge_trace(XGE_ERR, "Reading back updated register failed");
1558	        }
1559	        else {
1560	            if(val64 != data->value) {
1561	                xge_trace(XGE_ERR,
1562	                    "Read and written register values mismatched");
1563	            }
1564	            else retValue = 0;
1565	        }
1566	    }
1567	    else {
1568	        xge_trace(XGE_ERR, "Getting register value failed");
1569	    }
1570	    mtx_unlock(&lldev->mtx_drv);
1571	}
1572	else {
1573	    mtx_lock(&lldev->mtx_drv);
1574	    for(index = 0, offset = 0; offset <= XGE_OFFSET_OF_LAST_REG;
1575	        index++, offset += 0x0008) {
1576	        val64 = 0;
1577	        status = xge_hal_mgmt_reg_read(lldev->devh, 0, offset, &val64);
1578	        if(status != XGE_HAL_OK) {
1579	            xge_trace(XGE_ERR, "Getting register value failed");
1580	            break;
1581	        }
1582	        *((u64 *)((u64 *)data + index)) = val64;
1583	        retValue = 0;
1584	    }
1585	    mtx_unlock(&lldev->mtx_drv);
1586
1587	    if(retValue == 0) {
1588	        if(copyout(data, ifreqp->ifr_data,
1589	            sizeof(xge_hal_pci_bar0_t)) != 0) {
1590	            xge_trace(XGE_ERR, "Copyout of register values failed");
1591	            retValue = EINVAL;
1592	        }
1593	    }
1594	    else {
1595	        xge_trace(XGE_ERR, "Getting register values failed");
1596	    }
1597	}
1598	return retValue;
1599}
1600
1601/**
1602 * xge_ioctl
1603 * Callback to control the device - Interface configuration
1604 *
1605 * @ifnetp Interface Handle
1606 * @command Device control command
1607 * @data Parameters associated with command (if any)
1608 */
1609int
1610xge_ioctl(struct ifnet *ifnetp, unsigned long command, caddr_t data)
1611{
1612	struct ifreq   *ifreqp   = (struct ifreq *)data;
1613	xge_lldev_t    *lldev    = ifnetp->if_softc;
1614	struct ifmedia *ifmediap = &lldev->media;
1615	int             retValue = 0, mask = 0;
1616
1617	if(lldev->in_detach) {
1618	    return retValue;
1619	}
1620
1621	switch(command) {
1622	    /* Set/Get ifnet address */
1623	    case SIOCSIFADDR:
1624	    case SIOCGIFADDR:
1625	        ether_ioctl(ifnetp, command, data);
1626	        break;
1627
1628	    /* Set ifnet MTU */
1629	    case SIOCSIFMTU:
1630	        retValue = xge_change_mtu(lldev, ifreqp->ifr_mtu);
1631	        break;
1632
1633	    /* Set ifnet flags */
1634	    case SIOCSIFFLAGS:
1635	        if(ifnetp->if_flags & IFF_UP) {
1636	            /* Link status is UP */
1637	            if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
1638	                xge_init(lldev);
1639	            }
1640	            xge_disable_promisc(lldev);
1641	            xge_enable_promisc(lldev);
1642	        }
1643	        else {
1644	            /* Link status is DOWN */
1645	            /* If device is in running, make it down */
1646	            if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1647	                xge_stop(lldev);
1648	            }
1649	        }
1650	        break;
1651
1652	    /* Add/delete multicast address */
1653	    case SIOCADDMULTI:
1654	    case SIOCDELMULTI:
1655	        if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1656	            xge_setmulti(lldev);
1657	        }
1658	        break;
1659
1660	    /* Set/Get net media */
1661	    case SIOCSIFMEDIA:
1662	    case SIOCGIFMEDIA:
1663	        retValue = ifmedia_ioctl(ifnetp, ifreqp, ifmediap, command);
1664	        break;
1665
1666	    /* Set capabilities */
1667	    case SIOCSIFCAP:
1668	        mtx_lock(&lldev->mtx_drv);
1669	        mask = ifreqp->ifr_reqcap ^ ifnetp->if_capenable;
1670	        if(mask & IFCAP_TXCSUM) {
1671	            if(ifnetp->if_capenable & IFCAP_TXCSUM) {
1672	                ifnetp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1673	                ifnetp->if_hwassist &=
1674	                    ~(CSUM_TCP | CSUM_UDP | CSUM_TSO);
1675	            }
1676	            else {
1677	                ifnetp->if_capenable |= IFCAP_TXCSUM;
1678	                ifnetp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1679	            }
1680	        }
1681	        if(mask & IFCAP_TSO4) {
1682	            if(ifnetp->if_capenable & IFCAP_TSO4) {
1683	                ifnetp->if_capenable &= ~IFCAP_TSO4;
1684	                ifnetp->if_hwassist  &= ~CSUM_TSO;
1685
1686	                xge_os_printf("%s: TSO Disabled",
1687	                    device_get_nameunit(lldev->device));
1688	            }
1689	            else if(ifnetp->if_capenable & IFCAP_TXCSUM) {
1690	                ifnetp->if_capenable |= IFCAP_TSO4;
1691	                ifnetp->if_hwassist  |= CSUM_TSO;
1692
1693	                xge_os_printf("%s: TSO Enabled",
1694	                    device_get_nameunit(lldev->device));
1695	            }
1696	        }
1697
1698	        mtx_unlock(&lldev->mtx_drv);
1699	        break;
1700
1701	    /* Custom IOCTL 0 */
1702	    case SIOCGPRIVATE_0:
1703	        retValue = xge_ioctl_stats(lldev, ifreqp);
1704	        break;
1705
1706	    /* Custom IOCTL 1 */
1707	    case SIOCGPRIVATE_1:
1708	        retValue = xge_ioctl_registers(lldev, ifreqp);
1709	        break;
1710
1711	    default:
1712	        retValue = EINVAL;
1713	        break;
1714	}
1715	return retValue;
1716}
1717
1718/**
1719 * xge_init
1720 * Initialize the interface
1721 *
1722 * @plldev Per-adapter Data
1723 */
1724void
1725xge_init(void *plldev)
1726{
1727	xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1728
1729	mtx_lock(&lldev->mtx_drv);
1730	xge_os_memzero(&lldev->driver_stats, sizeof(xge_driver_stats_t));
1731	xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
1732	mtx_unlock(&lldev->mtx_drv);
1733}
1734
1735/**
1736 * xge_device_init
1737 * Initialize the interface (called by holding lock)
1738 *
1739 * @pdevin Per-adapter Data
1740 */
1741void
1742xge_device_init(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
1743{
1744	struct ifnet     *ifnetp = lldev->ifnetp;
1745	xge_hal_device_t *hldev  = lldev->devh;
1746	struct ifaddr      *ifaddrp;
1747	unsigned char      *macaddr;
1748	struct sockaddr_dl *sockaddrp;
1749	int                 status   = XGE_HAL_OK;
1750
1751	mtx_assert((&lldev->mtx_drv), MA_OWNED);
1752
1753	/* If device is in running state, initializing is not required */
1754	if(ifnetp->if_drv_flags & IFF_DRV_RUNNING)
1755	    return;
1756
1757	/* Initializing timer */
1758	callout_init(&lldev->timer, 1);
1759
1760	xge_trace(XGE_TRACE, "Set MTU size");
1761	status = xge_hal_device_mtu_set(hldev, ifnetp->if_mtu);
1762	if(status != XGE_HAL_OK) {
1763	    xge_trace(XGE_ERR, "Setting MTU in HAL device failed");
1764	    goto _exit;
1765	}
1766
1767	/* Enable HAL device */
1768	xge_hal_device_enable(hldev);
1769
1770	/* Get MAC address and update in HAL */
1771	ifaddrp             = ifnetp->if_addr;
1772	sockaddrp           = (struct sockaddr_dl *)ifaddrp->ifa_addr;
1773	sockaddrp->sdl_type = IFT_ETHER;
1774	sockaddrp->sdl_alen = ifnetp->if_addrlen;
1775	macaddr             = LLADDR(sockaddrp);
1776	xge_trace(XGE_TRACE,
1777	    "Setting MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n",
1778	    *macaddr, *(macaddr + 1), *(macaddr + 2), *(macaddr + 3),
1779	    *(macaddr + 4), *(macaddr + 5));
1780	status = xge_hal_device_macaddr_set(hldev, 0, macaddr);
1781	if(status != XGE_HAL_OK)
1782	    xge_trace(XGE_ERR, "Setting MAC address failed (%d)", status);
1783
1784	/* Opening channels */
1785	mtx_unlock(&lldev->mtx_drv);
1786	status = xge_channel_open(lldev, option);
1787	mtx_lock(&lldev->mtx_drv);
1788	if(status != XGE_HAL_OK)
1789	    goto _exit;
1790
1791	/* Set appropriate flags */
1792	ifnetp->if_drv_flags  |=  IFF_DRV_RUNNING;
1793	ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1794
1795	/* Checksum capability */
1796	ifnetp->if_hwassist = (ifnetp->if_capenable & IFCAP_TXCSUM) ?
1797	    (CSUM_TCP | CSUM_UDP) : 0;
1798
1799	if((lldev->enabled_tso) && (ifnetp->if_capenable & IFCAP_TSO4))
1800	    ifnetp->if_hwassist |= CSUM_TSO;
1801
1802	/* Enable interrupts */
1803	xge_hal_device_intr_enable(hldev);
1804
1805	callout_reset(&lldev->timer, 10*hz, xge_timer, lldev);
1806
1807	/* Disable promiscuous mode */
1808	xge_trace(XGE_TRACE, "If opted, enable promiscuous mode");
1809	xge_enable_promisc(lldev);
1810
1811	/* Device is initialized */
1812	lldev->initialized = 1;
1813	xge_os_mdelay(1000);
1814
1815_exit:
1816	return;
1817}
1818
1819/**
1820 * xge_timer
1821 * Timer timeout function to handle link status
1822 *
1823 * @devp Per-adapter Data
1824 */
1825void
1826xge_timer(void *devp)
1827{
1828	xge_lldev_t      *lldev = (xge_lldev_t *)devp;
1829	xge_hal_device_t *hldev = lldev->devh;
1830
1831	/* Poll for changes */
1832	xge_hal_device_poll(hldev);
1833
1834	/* Reset timer */
1835	callout_reset(&lldev->timer, hz, xge_timer, lldev);
1836
1837	return;
1838}
1839
1840/**
1841 * xge_stop
1842 * De-activate the interface
1843 *
1844 * @lldev Per-adater Data
1845 */
1846void
1847xge_stop(xge_lldev_t *lldev)
1848{
1849	mtx_lock(&lldev->mtx_drv);
1850	xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
1851	mtx_unlock(&lldev->mtx_drv);
1852}
1853
1854/**
1855 * xge_isr_filter
1856 * ISR filter function - to filter interrupts from other devices (shared)
1857 *
1858 * @handle Per-adapter Data
1859 *
1860 * Returns
1861 * FILTER_STRAY if interrupt is from other device
1862 * FILTER_SCHEDULE_THREAD if interrupt is from Xframe device
1863 */
1864int
1865xge_isr_filter(void *handle)
1866{
1867	xge_lldev_t *lldev       = (xge_lldev_t *)handle;
1868	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)((lldev->devh)->bar0);
1869	u16 retValue = FILTER_STRAY;
1870	u64 val64    = 0;
1871
1872	XGE_DRV_STATS(isr_filter);
1873
1874	val64 = xge_os_pio_mem_read64(lldev->pdev, (lldev->devh)->regh0,
1875	    &bar0->general_int_status);
1876	retValue = (!val64) ? FILTER_STRAY : FILTER_SCHEDULE_THREAD;
1877
1878	return retValue;
1879}
1880
1881/**
1882 * xge_isr_line
1883 * Interrupt service routine for Line interrupts
1884 *
1885 * @plldev Per-adapter Data
1886 */
1887void
1888xge_isr_line(void *plldev)
1889{
1890	xge_hal_status_e status;
1891	xge_lldev_t      *lldev   = (xge_lldev_t *)plldev;
1892	xge_hal_device_t *hldev   = (xge_hal_device_t *)lldev->devh;
1893	struct ifnet     *ifnetp  = lldev->ifnetp;
1894
1895	XGE_DRV_STATS(isr_line);
1896
1897	if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1898	    status = xge_hal_device_handle_irq(hldev);
1899	    if(!(IFQ_DRV_IS_EMPTY(&ifnetp->if_snd)))
1900	        xge_send(ifnetp);
1901	}
1902}
1903
1904/*
1905 * xge_isr_msi
1906 * ISR for Message signaled interrupts
1907 */
1908void
1909xge_isr_msi(void *plldev)
1910{
1911	xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1912	XGE_DRV_STATS(isr_msi);
1913	xge_hal_device_continue_irq(lldev->devh);
1914}
1915
1916/**
1917 * xge_rx_open
1918 * Initiate and open all Rx channels
1919 *
1920 * @qid Ring Index
1921 * @lldev Per-adapter Data
1922 * @rflag Channel open/close/reopen flag
1923 *
1924 * Returns 0 or Error Number
1925 */
1926int
1927xge_rx_open(int qid, xge_lldev_t *lldev, xge_hal_channel_reopen_e rflag)
1928{
1929	u64 adapter_status = 0x0;
1930	xge_hal_status_e status = XGE_HAL_FAIL;
1931
1932	xge_hal_channel_attr_t attr = {
1933	    .post_qid      = qid,
1934	    .compl_qid     = 0,
1935	    .callback      = xge_rx_compl,
1936	    .per_dtr_space = sizeof(xge_rx_priv_t),
1937	    .flags         = 0,
1938	    .type          = XGE_HAL_CHANNEL_TYPE_RING,
1939	    .userdata      = lldev,
1940	    .dtr_init      = xge_rx_initial_replenish,
1941	    .dtr_term      = xge_rx_term
1942	};
1943
1944	/* If device is not ready, return */
1945	status = xge_hal_device_status(lldev->devh, &adapter_status);
1946	if(status != XGE_HAL_OK) {
1947	    xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status);
1948	    XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL);
1949	}
1950	else {
1951	    status = xge_hal_channel_open(lldev->devh, &attr,
1952	        &lldev->ring_channel[qid], rflag);
1953	}
1954
1955_exit:
1956	return status;
1957}
1958
1959/**
1960 * xge_tx_open
1961 * Initialize and open all Tx channels
1962 *
1963 * @lldev Per-adapter Data
1964 * @tflag Channel open/close/reopen flag
1965 *
1966 * Returns 0 or Error Number
1967 */
1968int
1969xge_tx_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e tflag)
1970{
1971	xge_hal_status_e status = XGE_HAL_FAIL;
1972	u64 adapter_status = 0x0;
1973	int qindex, index;
1974
1975	xge_hal_channel_attr_t attr = {
1976	    .compl_qid     = 0,
1977	    .callback      = xge_tx_compl,
1978	    .per_dtr_space = sizeof(xge_tx_priv_t),
1979	    .flags         = 0,
1980	    .type          = XGE_HAL_CHANNEL_TYPE_FIFO,
1981	    .userdata      = lldev,
1982	    .dtr_init      = xge_tx_initial_replenish,
1983	    .dtr_term      = xge_tx_term
1984	};
1985
1986	/* If device is not ready, return */
1987	status = xge_hal_device_status(lldev->devh, &adapter_status);
1988	if(status != XGE_HAL_OK) {
1989	    xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status);
1990	    XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL);
1991	}
1992
1993	for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
1994	    attr.post_qid = qindex,
1995	    status = xge_hal_channel_open(lldev->devh, &attr,
1996	        &lldev->fifo_channel[qindex], tflag);
1997	    if(status != XGE_HAL_OK) {
1998	        for(index = 0; index < qindex; index++)
1999	            xge_hal_channel_close(lldev->fifo_channel[index], tflag);
2000	    }
2001	}
2002
2003_exit:
2004	return status;
2005}
2006
2007/**
2008 * xge_enable_msi
2009 * Enables MSI
2010 *
2011 * @lldev Per-adapter Data
2012 */
2013void
2014xge_enable_msi(xge_lldev_t *lldev)
2015{
2016	xge_list_t        *item    = NULL;
2017	xge_hal_device_t  *hldev   = lldev->devh;
2018	xge_hal_channel_t *channel = NULL;
2019	u16 offset = 0, val16 = 0;
2020
2021	xge_os_pci_read16(lldev->pdev, NULL,
2022	    xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16);
2023
2024	/* Update msi_data */
2025	offset = (val16 & 0x80) ? 0x4c : 0x48;
2026	xge_os_pci_read16(lldev->pdev, NULL, offset, &val16);
2027	if(val16 & 0x1)
2028	    val16 &= 0xfffe;
2029	else
2030	    val16 |= 0x1;
2031	xge_os_pci_write16(lldev->pdev, NULL, offset, val16);
2032
2033	/* Update msi_control */
2034	xge_os_pci_read16(lldev->pdev, NULL,
2035	    xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16);
2036	val16 |= 0x10;
2037	xge_os_pci_write16(lldev->pdev, NULL,
2038	    xge_offsetof(xge_hal_pci_config_le_t, msi_control), val16);
2039
2040	/* Set TxMAT and RxMAT registers with MSI */
2041	xge_list_for_each(item, &hldev->free_channels) {
2042	    channel = xge_container_of(item, xge_hal_channel_t, item);
2043	    xge_hal_channel_msi_set(channel, 1, (u32)val16);
2044	}
2045}
2046
2047/**
2048 * xge_channel_open
2049 * Open both Tx and Rx channels
2050 *
2051 * @lldev Per-adapter Data
2052 * @option Channel reopen option
2053 */
2054int
2055xge_channel_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2056{
2057	xge_lro_entry_t *lro_session = NULL;
2058	xge_hal_status_e status   = XGE_HAL_OK;
2059	int index = 0, index2 = 0;
2060
2061	if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
2062	    xge_msi_info_restore(lldev);
2063	    xge_enable_msi(lldev);
2064	}
2065
2066_exit2:
2067	status = xge_create_dma_tags(lldev->device);
2068	if(status != XGE_HAL_OK)
2069	    XGE_EXIT_ON_ERR("DMA tag creation failed", _exit, status);
2070
2071	/* Open ring (Rx) channel */
2072	for(index = 0; index < XGE_RING_COUNT; index++) {
2073	    status = xge_rx_open(index, lldev, option);
2074	    if(status != XGE_HAL_OK) {
2075	        /*
2076	         * DMA mapping fails in the unpatched Kernel which can't
2077	         * allocate contiguous memory for Jumbo frames.
2078	         * Try using 5 buffer mode.
2079	         */
2080	        if((lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) &&
2081	            (((lldev->ifnetp)->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE) >
2082	            MJUMPAGESIZE)) {
2083	            /* Close so far opened channels */
2084	            for(index2 = 0; index2 < index; index2++) {
2085	                xge_hal_channel_close(lldev->ring_channel[index2],
2086	                    option);
2087	            }
2088
2089	            /* Destroy DMA tags intended to use for 1 buffer mode */
2090	            if(bus_dmamap_destroy(lldev->dma_tag_rx,
2091	                lldev->extra_dma_map)) {
2092	                xge_trace(XGE_ERR, "Rx extra DMA map destroy failed");
2093	            }
2094	            if(bus_dma_tag_destroy(lldev->dma_tag_rx))
2095	                xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
2096	            if(bus_dma_tag_destroy(lldev->dma_tag_tx))
2097	                xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
2098
2099	            /* Switch to 5 buffer mode */
2100	            lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_5;
2101	            xge_buffer_mode_init(lldev, (lldev->ifnetp)->if_mtu);
2102
2103	            /* Restart init */
2104	            goto _exit2;
2105	        }
2106	        else {
2107	            XGE_EXIT_ON_ERR("Opening Rx channel failed", _exit1,
2108	                status);
2109	        }
2110	    }
2111	}
2112
2113	if(lldev->enabled_lro) {
2114	    SLIST_INIT(&lldev->lro_free);
2115	    SLIST_INIT(&lldev->lro_active);
2116	    lldev->lro_num = XGE_LRO_DEFAULT_ENTRIES;
2117
2118	    for(index = 0; index < lldev->lro_num; index++) {
2119	        lro_session = (xge_lro_entry_t *)
2120	            xge_os_malloc(NULL, sizeof(xge_lro_entry_t));
2121	        if(lro_session == NULL) {
2122	            lldev->lro_num = index;
2123	            break;
2124	        }
2125	        SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next);
2126	    }
2127	}
2128
2129	/* Open FIFO (Tx) channel */
2130	status = xge_tx_open(lldev, option);
2131	if(status != XGE_HAL_OK)
2132	    XGE_EXIT_ON_ERR("Opening Tx channel failed", _exit1, status);
2133
2134	goto _exit;
2135
2136_exit1:
2137	/*
2138	 * Opening Rx channel(s) failed (index is <last ring index - 1>) or
2139	 * Initialization of LRO failed (index is XGE_RING_COUNT)
2140	 * Opening Tx channel failed    (index is XGE_RING_COUNT)
2141	 */
2142	for(index2 = 0; index2 < index; index2++)
2143	    xge_hal_channel_close(lldev->ring_channel[index2], option);
2144
2145_exit:
2146	return status;
2147}
2148
2149/**
2150 * xge_channel_close
2151 * Close both Tx and Rx channels
2152 *
2153 * @lldev Per-adapter Data
2154 * @option Channel reopen option
2155 *
2156 */
2157void
2158xge_channel_close(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2159{
2160	int qindex = 0;
2161
2162	DELAY(1000 * 1000);
2163
2164	/* Close FIFO (Tx) channel */
2165	for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++)
2166	    xge_hal_channel_close(lldev->fifo_channel[qindex], option);
2167
2168	/* Close Ring (Rx) channels */
2169	for(qindex = 0; qindex < XGE_RING_COUNT; qindex++)
2170	    xge_hal_channel_close(lldev->ring_channel[qindex], option);
2171
2172	if(bus_dmamap_destroy(lldev->dma_tag_rx, lldev->extra_dma_map))
2173	    xge_trace(XGE_ERR, "Rx extra map destroy failed");
2174	if(bus_dma_tag_destroy(lldev->dma_tag_rx))
2175	    xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
2176	if(bus_dma_tag_destroy(lldev->dma_tag_tx))
2177	    xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
2178}
2179
2180/**
2181 * dmamap_cb
2182 * DMA map callback
2183 *
2184 * @arg Parameter passed from dmamap
2185 * @segs Segments
2186 * @nseg Number of segments
2187 * @error Error
2188 */
2189void
2190dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2191{
2192	if(!error) {
2193	    *(bus_addr_t *) arg = segs->ds_addr;
2194	}
2195}
2196
2197/**
2198 * xge_reset
2199 * Device Reset
2200 *
2201 * @lldev Per-adapter Data
2202 */
2203void
2204xge_reset(xge_lldev_t *lldev)
2205{
2206	xge_trace(XGE_TRACE, "Reseting the chip");
2207
2208	/* If the device is not initialized, return */
2209	if(lldev->initialized) {
2210	    mtx_lock(&lldev->mtx_drv);
2211	    xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
2212	    xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
2213	    mtx_unlock(&lldev->mtx_drv);
2214	}
2215
2216	return;
2217}
2218
2219/**
2220 * xge_setmulti
2221 * Set an address as a multicast address
2222 *
2223 * @lldev Per-adapter Data
2224 */
2225void
2226xge_setmulti(xge_lldev_t *lldev)
2227{
2228	struct ifmultiaddr *ifma;
2229	u8                 *lladdr;
2230	xge_hal_device_t   *hldev        = (xge_hal_device_t *)lldev->devh;
2231	struct ifnet       *ifnetp       = lldev->ifnetp;
2232	int                index         = 0;
2233	int                offset        = 1;
2234	int                table_size    = 47;
2235	xge_hal_status_e   status        = XGE_HAL_OK;
2236	u8                 initial_addr[]= {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
2237
2238	if((ifnetp->if_flags & IFF_MULTICAST) && (!lldev->all_multicast)) {
2239	    status = xge_hal_device_mcast_enable(hldev);
2240	    lldev->all_multicast = 1;
2241	}
2242	else if((ifnetp->if_flags & IFF_MULTICAST) && (lldev->all_multicast)) {
2243	    status = xge_hal_device_mcast_disable(hldev);
2244	    lldev->all_multicast = 0;
2245	}
2246
2247	if(status != XGE_HAL_OK) {
2248	    xge_trace(XGE_ERR, "Enabling/disabling multicast failed");
2249	    goto _exit;
2250	}
2251
2252	/* Updating address list */
2253	if_maddr_rlock(ifnetp);
2254	index = 0;
2255	TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
2256	    if(ifma->ifma_addr->sa_family != AF_LINK) {
2257	        continue;
2258	    }
2259	    lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2260	    index += 1;
2261	}
2262	if_maddr_runlock(ifnetp);
2263
2264	if((!lldev->all_multicast) && (index)) {
2265	    lldev->macaddr_count = (index + 1);
2266	    if(lldev->macaddr_count > table_size) {
2267	        goto _exit;
2268	    }
2269
2270	    /* Clear old addresses */
2271	    for(index = 0; index < 48; index++) {
2272	        xge_hal_device_macaddr_set(hldev, (offset + index),
2273	            initial_addr);
2274	    }
2275	}
2276
2277	/* Add new addresses */
2278	if_maddr_rlock(ifnetp);
2279	index = 0;
2280	TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
2281	    if(ifma->ifma_addr->sa_family != AF_LINK) {
2282	        continue;
2283	    }
2284	    lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2285	    xge_hal_device_macaddr_set(hldev, (offset + index), lladdr);
2286	    index += 1;
2287	}
2288	if_maddr_runlock(ifnetp);
2289
2290_exit:
2291	return;
2292}
2293
2294/**
2295 * xge_enable_promisc
2296 * Enable Promiscuous Mode
2297 *
2298 * @lldev Per-adapter Data
2299 */
2300void
2301xge_enable_promisc(xge_lldev_t *lldev)
2302{
2303	struct ifnet *ifnetp = lldev->ifnetp;
2304	xge_hal_device_t *hldev = lldev->devh;
2305	xge_hal_pci_bar0_t *bar0 = NULL;
2306	u64 val64 = 0;
2307
2308	bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
2309
2310	if(ifnetp->if_flags & IFF_PROMISC) {
2311	    xge_hal_device_promisc_enable(lldev->devh);
2312
2313	    /*
2314	     * When operating in promiscuous mode, don't strip the VLAN tag
2315	     */
2316	    val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
2317	        &bar0->rx_pa_cfg);
2318	    val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2319	    val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(0);
2320	    xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
2321	        &bar0->rx_pa_cfg);
2322
2323	    xge_trace(XGE_TRACE, "Promiscuous mode ON");
2324	}
2325}
2326
2327/**
2328 * xge_disable_promisc
2329 * Disable Promiscuous Mode
2330 *
2331 * @lldev Per-adapter Data
2332 */
2333void
2334xge_disable_promisc(xge_lldev_t *lldev)
2335{
2336	xge_hal_device_t *hldev = lldev->devh;
2337	xge_hal_pci_bar0_t *bar0 = NULL;
2338	u64 val64 = 0;
2339
2340	bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
2341
2342	xge_hal_device_promisc_disable(lldev->devh);
2343
2344	/*
2345	 * Strip VLAN tag when operating in non-promiscuous mode
2346	 */
2347	val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
2348	    &bar0->rx_pa_cfg);
2349	val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2350	val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2351	xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
2352	    &bar0->rx_pa_cfg);
2353
2354	xge_trace(XGE_TRACE, "Promiscuous mode OFF");
2355}
2356
2357/**
2358 * xge_change_mtu
2359 * Change interface MTU to a requested valid size
2360 *
2361 * @lldev Per-adapter Data
2362 * @NewMtu Requested MTU
2363 *
2364 * Returns 0 or Error Number
2365 */
2366int
2367xge_change_mtu(xge_lldev_t *lldev, int new_mtu)
2368{
2369	int status = XGE_HAL_OK;
2370
2371	/* Check requested MTU size for boundary */
2372	if(xge_hal_device_mtu_check(lldev->devh, new_mtu) != XGE_HAL_OK) {
2373	    XGE_EXIT_ON_ERR("Invalid MTU", _exit, EINVAL);
2374	}
2375
2376	lldev->mtu = new_mtu;
2377	xge_confirm_changes(lldev, XGE_SET_MTU);
2378
2379_exit:
2380	return status;
2381}
2382
2383/**
2384 * xge_device_stop
2385 *
2386 * Common code for both stop and part of reset. Disables device, interrupts and
2387 * closes channels
2388 *
2389 * @dev Device Handle
2390 * @option Channel normal/reset option
2391 */
2392void
2393xge_device_stop(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2394{
2395	xge_hal_device_t *hldev  = lldev->devh;
2396	struct ifnet     *ifnetp = lldev->ifnetp;
2397	u64               val64  = 0;
2398
2399	mtx_assert((&lldev->mtx_drv), MA_OWNED);
2400
2401	/* If device is not in "Running" state, return */
2402	if (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING))
2403	    goto _exit;
2404
2405	/* Set appropriate flags */
2406	ifnetp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2407
2408	/* Stop timer */
2409	callout_stop(&lldev->timer);
2410
2411	/* Disable interrupts */
2412	xge_hal_device_intr_disable(hldev);
2413
2414	mtx_unlock(&lldev->mtx_drv);
2415	xge_queue_flush(xge_hal_device_queue(lldev->devh));
2416	mtx_lock(&lldev->mtx_drv);
2417
2418	/* Disable HAL device */
2419	if(xge_hal_device_disable(hldev) != XGE_HAL_OK) {
2420	    xge_trace(XGE_ERR, "Disabling HAL device failed");
2421	    xge_hal_device_status(hldev, &val64);
2422	    xge_trace(XGE_ERR, "Adapter Status: 0x%llx", (long long)val64);
2423	}
2424
2425	/* Close Tx and Rx channels */
2426	xge_channel_close(lldev, option);
2427
2428	/* Reset HAL device */
2429	xge_hal_device_reset(hldev);
2430
2431	xge_os_mdelay(1000);
2432	lldev->initialized = 0;
2433
2434	if_link_state_change(ifnetp, LINK_STATE_DOWN);
2435
2436_exit:
2437	return;
2438}
2439
2440/**
2441 * xge_set_mbuf_cflags
2442 * set checksum flag for the mbuf
2443 *
2444 * @pkt Packet
2445 */
2446void
2447xge_set_mbuf_cflags(mbuf_t pkt)
2448{
2449	pkt->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2450	pkt->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2451	pkt->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2452	pkt->m_pkthdr.csum_data = htons(0xffff);
2453}
2454
2455/**
2456 * xge_lro_flush_sessions
2457 * Flush LRO session and send accumulated LRO packet to upper layer
2458 *
2459 * @lldev Per-adapter Data
2460 */
2461void
2462xge_lro_flush_sessions(xge_lldev_t *lldev)
2463{
2464	xge_lro_entry_t *lro_session = NULL;
2465
2466	while(!SLIST_EMPTY(&lldev->lro_active)) {
2467	    lro_session = SLIST_FIRST(&lldev->lro_active);
2468	    SLIST_REMOVE_HEAD(&lldev->lro_active, next);
2469	    xge_lro_flush(lldev, lro_session);
2470	}
2471}
2472
2473/**
2474 * xge_lro_flush
2475 * Flush LRO session. Send accumulated LRO packet to upper layer
2476 *
2477 * @lldev Per-adapter Data
2478 * @lro LRO session to be flushed
2479 */
2480static void
2481xge_lro_flush(xge_lldev_t *lldev, xge_lro_entry_t *lro_session)
2482{
2483	struct ip *header_ip;
2484	struct tcphdr *header_tcp;
2485	u32 *ptr;
2486
2487	if(lro_session->append_cnt) {
2488	    header_ip = lro_session->lro_header_ip;
2489	    header_ip->ip_len = htons(lro_session->len - ETHER_HDR_LEN);
2490	    lro_session->m_head->m_pkthdr.len = lro_session->len;
2491	    header_tcp = (struct tcphdr *)(header_ip + 1);
2492	    header_tcp->th_ack = lro_session->ack_seq;
2493	    header_tcp->th_win = lro_session->window;
2494	    if(lro_session->timestamp) {
2495	        ptr = (u32 *)(header_tcp + 1);
2496	        ptr[1] = htonl(lro_session->tsval);
2497	        ptr[2] = lro_session->tsecr;
2498	    }
2499	}
2500
2501	(*lldev->ifnetp->if_input)(lldev->ifnetp, lro_session->m_head);
2502	lro_session->m_head = NULL;
2503	lro_session->timestamp = 0;
2504	lro_session->append_cnt = 0;
2505	SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next);
2506}
2507
2508/**
2509 * xge_lro_accumulate
2510 * Accumulate packets to form a large LRO packet based on various conditions
2511 *
2512 * @lldev Per-adapter Data
2513 * @m_head Current Packet
2514 *
2515 * Returns XGE_HAL_OK or XGE_HAL_FAIL (failure)
2516 */
2517static int
2518xge_lro_accumulate(xge_lldev_t *lldev, struct mbuf *m_head)
2519{
2520	struct ether_header *header_ethernet;
2521	struct ip *header_ip;
2522	struct tcphdr *header_tcp;
2523	u32 seq, *ptr;
2524	struct mbuf *buffer_next, *buffer_tail;
2525	xge_lro_entry_t *lro_session;
2526	xge_hal_status_e status = XGE_HAL_FAIL;
2527	int hlen, ip_len, tcp_hdr_len, tcp_data_len, tot_len, tcp_options;
2528	int trim;
2529
2530	/* Get Ethernet header */
2531	header_ethernet = mtod(m_head, struct ether_header *);
2532
2533	/* Return if it is not IP packet */
2534	if(header_ethernet->ether_type != htons(ETHERTYPE_IP))
2535	    goto _exit;
2536
2537	/* Get IP header */
2538	header_ip = lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1 ?
2539	    (struct ip *)(header_ethernet + 1) :
2540	    mtod(m_head->m_next, struct ip *);
2541
2542	/* Return if it is not TCP packet */
2543	if(header_ip->ip_p != IPPROTO_TCP)
2544	    goto _exit;
2545
2546	/* Return if packet has options */
2547	if((header_ip->ip_hl << 2) != sizeof(*header_ip))
2548	    goto _exit;
2549
2550	/* Return if packet is fragmented */
2551	if(header_ip->ip_off & htons(IP_MF | IP_OFFMASK))
2552	    goto _exit;
2553
2554	/* Get TCP header */
2555	header_tcp = (struct tcphdr *)(header_ip + 1);
2556
2557	/* Return if not ACK or PUSH */
2558	if((header_tcp->th_flags & ~(TH_ACK | TH_PUSH)) != 0)
2559	    goto _exit;
2560
2561	/* Only timestamp option is handled */
2562	tcp_options = (header_tcp->th_off << 2) - sizeof(*header_tcp);
2563	tcp_hdr_len = sizeof(*header_tcp) + tcp_options;
2564	ptr = (u32 *)(header_tcp + 1);
2565	if(tcp_options != 0) {
2566	    if(__predict_false(tcp_options != TCPOLEN_TSTAMP_APPA) ||
2567	        (*ptr != ntohl(TCPOPT_NOP << 24 | TCPOPT_NOP << 16 |
2568	        TCPOPT_TIMESTAMP << 8 | TCPOLEN_TIMESTAMP))) {
2569	        goto _exit;
2570	    }
2571	}
2572
2573	/* Total length of packet (IP) */
2574	ip_len = ntohs(header_ip->ip_len);
2575
2576	/* TCP data size */
2577	tcp_data_len = ip_len - (header_tcp->th_off << 2) - sizeof(*header_ip);
2578
2579	/* If the frame is padded, trim it */
2580	tot_len = m_head->m_pkthdr.len;
2581	trim = tot_len - (ip_len + ETHER_HDR_LEN);
2582	if(trim != 0) {
2583	    if(trim < 0)
2584	        goto _exit;
2585	    m_adj(m_head, -trim);
2586	    tot_len = m_head->m_pkthdr.len;
2587	}
2588
2589	buffer_next = m_head;
2590	buffer_tail = NULL;
2591	while(buffer_next != NULL) {
2592	    buffer_tail = buffer_next;
2593	    buffer_next = buffer_tail->m_next;
2594	}
2595
2596	/* Total size of only headers */
2597	hlen = ip_len + ETHER_HDR_LEN - tcp_data_len;
2598
2599	/* Get sequence number */
2600	seq = ntohl(header_tcp->th_seq);
2601
2602	SLIST_FOREACH(lro_session, &lldev->lro_active, next) {
2603	    if(lro_session->source_port == header_tcp->th_sport &&
2604	        lro_session->dest_port == header_tcp->th_dport &&
2605	        lro_session->source_ip == header_ip->ip_src.s_addr &&
2606	        lro_session->dest_ip == header_ip->ip_dst.s_addr) {
2607
2608	        /* Unmatched sequence number, flush LRO session */
2609	        if(__predict_false(seq != lro_session->next_seq)) {
2610	            SLIST_REMOVE(&lldev->lro_active, lro_session,
2611	                xge_lro_entry_t, next);
2612	            xge_lro_flush(lldev, lro_session);
2613	            goto _exit;
2614	        }
2615
2616	        /* Handle timestamp option */
2617	        if(tcp_options) {
2618	            u32 tsval = ntohl(*(ptr + 1));
2619	            if(__predict_false(lro_session->tsval > tsval ||
2620	                *(ptr + 2) == 0)) {
2621	                goto _exit;
2622	            }
2623	            lro_session->tsval = tsval;
2624	            lro_session->tsecr = *(ptr + 2);
2625	        }
2626
2627	        lro_session->next_seq += tcp_data_len;
2628	        lro_session->ack_seq = header_tcp->th_ack;
2629	        lro_session->window = header_tcp->th_win;
2630
2631	        /* If TCP data/payload is of 0 size, free mbuf */
2632	        if(tcp_data_len == 0) {
2633	            m_freem(m_head);
2634	            status = XGE_HAL_OK;
2635	            goto _exit;
2636	        }
2637
2638	        lro_session->append_cnt++;
2639	        lro_session->len += tcp_data_len;
2640
2641	        /* Adjust mbuf so that m_data points to payload than headers */
2642	        m_adj(m_head, hlen);
2643
2644	        /* Append this packet to LRO accumulated packet */
2645	        lro_session->m_tail->m_next = m_head;
2646	        lro_session->m_tail = buffer_tail;
2647
2648	        /* Flush if LRO packet is exceeding maximum size */
2649	        if(lro_session->len >
2650	            (XGE_HAL_LRO_DEFAULT_FRM_LEN - lldev->ifnetp->if_mtu)) {
2651	            SLIST_REMOVE(&lldev->lro_active, lro_session,
2652	                xge_lro_entry_t, next);
2653	            xge_lro_flush(lldev, lro_session);
2654	        }
2655	        status = XGE_HAL_OK;
2656	        goto _exit;
2657	    }
2658	}
2659
2660	if(SLIST_EMPTY(&lldev->lro_free))
2661	    goto _exit;
2662
2663	/* Start a new LRO session */
2664	lro_session = SLIST_FIRST(&lldev->lro_free);
2665	SLIST_REMOVE_HEAD(&lldev->lro_free, next);
2666	SLIST_INSERT_HEAD(&lldev->lro_active, lro_session, next);
2667	lro_session->source_port = header_tcp->th_sport;
2668	lro_session->dest_port = header_tcp->th_dport;
2669	lro_session->source_ip = header_ip->ip_src.s_addr;
2670	lro_session->dest_ip = header_ip->ip_dst.s_addr;
2671	lro_session->next_seq = seq + tcp_data_len;
2672	lro_session->mss = tcp_data_len;
2673	lro_session->ack_seq = header_tcp->th_ack;
2674	lro_session->window = header_tcp->th_win;
2675
2676	lro_session->lro_header_ip = header_ip;
2677
2678	/* Handle timestamp option */
2679	if(tcp_options) {
2680	    lro_session->timestamp = 1;
2681	    lro_session->tsval = ntohl(*(ptr + 1));
2682	    lro_session->tsecr = *(ptr + 2);
2683	}
2684
2685	lro_session->len = tot_len;
2686	lro_session->m_head = m_head;
2687	lro_session->m_tail = buffer_tail;
2688	status = XGE_HAL_OK;
2689
2690_exit:
2691	return status;
2692}
2693
2694/**
2695 * xge_accumulate_large_rx
2696 * Accumulate packets to form a large LRO packet based on various conditions
2697 *
2698 * @lldev Per-adapter Data
2699 * @pkt Current packet
2700 * @pkt_length Packet Length
2701 * @rxd_priv Rx Descriptor Private Data
2702 */
2703void
2704xge_accumulate_large_rx(xge_lldev_t *lldev, struct mbuf *pkt, int pkt_length,
2705	xge_rx_priv_t *rxd_priv)
2706{
2707	if(xge_lro_accumulate(lldev, pkt) != XGE_HAL_OK) {
2708	    bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map,
2709	        BUS_DMASYNC_POSTREAD);
2710	    (*lldev->ifnetp->if_input)(lldev->ifnetp, pkt);
2711	}
2712}
2713
2714/**
2715 * xge_rx_compl
2716 * If the interrupt is due to received frame (Rx completion), send it up
2717 *
2718 * @channelh Ring Channel Handle
2719 * @dtr Current Descriptor
2720 * @t_code Transfer Code indicating success or error
2721 * @userdata Per-adapter Data
2722 *
2723 * Returns XGE_HAL_OK or HAL error enums
2724 */
2725xge_hal_status_e
2726xge_rx_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
2727	void *userdata)
2728{
2729	struct ifnet       *ifnetp;
2730	xge_rx_priv_t      *rxd_priv = NULL;
2731	mbuf_t              mbuf_up  = NULL;
2732	xge_hal_status_e    status   = XGE_HAL_OK;
2733	xge_hal_dtr_info_t  ext_info;
2734	int                 index;
2735	u16                 vlan_tag;
2736
2737	/*get the user data portion*/
2738	xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
2739	if(!lldev) {
2740	    XGE_EXIT_ON_ERR("Failed to get user data", _exit, XGE_HAL_FAIL);
2741	}
2742
2743	XGE_DRV_STATS(rx_completions);
2744
2745	/* get the interface pointer */
2746	ifnetp = lldev->ifnetp;
2747
2748	do {
2749	    XGE_DRV_STATS(rx_desc_compl);
2750
2751	    if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
2752	        status = XGE_HAL_FAIL;
2753	        goto _exit;
2754	    }
2755
2756	    if(t_code) {
2757	        xge_trace(XGE_TRACE, "Packet dropped because of %d", t_code);
2758	        XGE_DRV_STATS(rx_tcode);
2759	        xge_hal_device_handle_tcode(channelh, dtr, t_code);
2760	        xge_hal_ring_dtr_post(channelh,dtr);
2761	        continue;
2762	    }
2763
2764	    /* Get the private data for this descriptor*/
2765	    rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh,
2766	        dtr);
2767	    if(!rxd_priv) {
2768	        XGE_EXIT_ON_ERR("Failed to get descriptor private data", _exit,
2769	            XGE_HAL_FAIL);
2770	    }
2771
2772	    /*
2773	     * Prepare one buffer to send it to upper layer -- since the upper
2774	     * layer frees the buffer do not use rxd_priv->buffer. Meanwhile
2775	     * prepare a new buffer, do mapping, use it in the current
2776	     * descriptor and post descriptor back to ring channel
2777	     */
2778	    mbuf_up = rxd_priv->bufferArray[0];
2779
2780	    /* Gets details of mbuf i.e., packet length */
2781	    xge_ring_dtr_get(mbuf_up, channelh, dtr, lldev, rxd_priv);
2782
2783	    status =
2784	        (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ?
2785	        xge_get_buf(dtr, rxd_priv, lldev, 0) :
2786	        xge_get_buf_3b_5b(dtr, rxd_priv, lldev);
2787
2788	    if(status != XGE_HAL_OK) {
2789	        xge_trace(XGE_ERR, "No memory");
2790	        XGE_DRV_STATS(rx_no_buf);
2791
2792	        /*
2793	         * Unable to allocate buffer. Instead of discarding, post
2794	         * descriptor back to channel for future processing of same
2795	         * packet.
2796	         */
2797	        xge_hal_ring_dtr_post(channelh, dtr);
2798	        continue;
2799	    }
2800
2801	    /* Get the extended information */
2802	    xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
2803
2804	    /*
2805	     * As we have allocated a new mbuf for this descriptor, post this
2806	     * descriptor with new mbuf back to ring channel
2807	     */
2808	    vlan_tag = ext_info.vlan;
2809	    xge_hal_ring_dtr_post(channelh, dtr);
2810	    if ((!(ext_info.proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) &&
2811	        (ext_info.proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) &&
2812	        (ext_info.l3_cksum == XGE_HAL_L3_CKSUM_OK) &&
2813	        (ext_info.l4_cksum == XGE_HAL_L4_CKSUM_OK))) {
2814
2815	        /* set Checksum Flag */
2816	        xge_set_mbuf_cflags(mbuf_up);
2817
2818	        if(lldev->enabled_lro) {
2819	            xge_accumulate_large_rx(lldev, mbuf_up, mbuf_up->m_len,
2820	                rxd_priv);
2821	        }
2822	        else {
2823	            /* Post-Read sync for buffers*/
2824	            for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2825	                bus_dmamap_sync(lldev->dma_tag_rx,
2826	                    rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
2827	            }
2828	            (*ifnetp->if_input)(ifnetp, mbuf_up);
2829	        }
2830	    }
2831	    else {
2832	        /*
2833	         * Packet with erroneous checksum , let the upper layer deal
2834	         * with it
2835	         */
2836
2837	        /* Post-Read sync for buffers*/
2838	        for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2839	            bus_dmamap_sync(lldev->dma_tag_rx,
2840	                 rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
2841	        }
2842
2843	        if(vlan_tag) {
2844	            mbuf_up->m_pkthdr.ether_vtag = vlan_tag;
2845	            mbuf_up->m_flags |= M_VLANTAG;
2846	        }
2847
2848	        if(lldev->enabled_lro)
2849	            xge_lro_flush_sessions(lldev);
2850
2851	        (*ifnetp->if_input)(ifnetp, mbuf_up);
2852	    }
2853	} while(xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code)
2854	    == XGE_HAL_OK);
2855
2856	if(lldev->enabled_lro)
2857	    xge_lro_flush_sessions(lldev);
2858
2859_exit:
2860	return status;
2861}
2862
2863/**
2864 * xge_ring_dtr_get
2865 * Get descriptors
2866 *
2867 * @mbuf_up Packet to send up
2868 * @channelh Ring Channel Handle
2869 * @dtr Descriptor
2870 * @lldev Per-adapter Data
2871 * @rxd_priv Rx Descriptor Private Data
2872 *
2873 * Returns XGE_HAL_OK or HAL error enums
2874 */
2875int
2876xge_ring_dtr_get(mbuf_t mbuf_up, xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
2877	xge_lldev_t *lldev, xge_rx_priv_t *rxd_priv)
2878{
2879	mbuf_t           m;
2880	int              pkt_length[5]={0,0}, pkt_len=0;
2881	dma_addr_t       dma_data[5];
2882	int              index;
2883
2884	m = mbuf_up;
2885	pkt_len = 0;
2886
2887	if(lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
2888	    xge_os_memzero(pkt_length, sizeof(pkt_length));
2889
2890	    /*
2891	     * Retrieve data of interest from the completed descriptor -- This
2892	     * returns the packet length
2893	     */
2894	    if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
2895	        xge_hal_ring_dtr_5b_get(channelh, dtr, dma_data, pkt_length);
2896	    }
2897	    else {
2898	        xge_hal_ring_dtr_3b_get(channelh, dtr, dma_data, pkt_length);
2899	    }
2900
2901	    for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2902	        m->m_len  = pkt_length[index];
2903
2904	        if(index < (lldev->rxd_mbuf_cnt-1)) {
2905	            m->m_next = rxd_priv->bufferArray[index + 1];
2906	            m = m->m_next;
2907	        }
2908	        else {
2909	            m->m_next = NULL;
2910	        }
2911	        pkt_len+=pkt_length[index];
2912	    }
2913
2914	    /*
2915	     * Since 2 buffer mode is an exceptional case where data is in 3rd
2916	     * buffer but not in 2nd buffer
2917	     */
2918	    if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
2919	        m->m_len = pkt_length[2];
2920	        pkt_len+=pkt_length[2];
2921	    }
2922
2923	    /*
2924	     * Update length of newly created buffer to be sent up with packet
2925	     * length
2926	     */
2927	    mbuf_up->m_pkthdr.len = pkt_len;
2928	}
2929	else {
2930	    /*
2931	     * Retrieve data of interest from the completed descriptor -- This
2932	     * returns the packet length
2933	     */
2934	    xge_hal_ring_dtr_1b_get(channelh, dtr,&dma_data[0], &pkt_length[0]);
2935
2936	    /*
2937	     * Update length of newly created buffer to be sent up with packet
2938	     * length
2939	     */
2940	    mbuf_up->m_len =  mbuf_up->m_pkthdr.len = pkt_length[0];
2941	}
2942
2943	return XGE_HAL_OK;
2944}
2945
2946/**
2947 * xge_flush_txds
2948 * Flush Tx descriptors
2949 *
2950 * @channelh Channel handle
2951 */
2952static void inline
2953xge_flush_txds(xge_hal_channel_h channelh)
2954{
2955	xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
2956	xge_hal_dtr_h tx_dtr;
2957	xge_tx_priv_t *tx_priv;
2958	u8 t_code;
2959
2960	while(xge_hal_fifo_dtr_next_completed(channelh, &tx_dtr, &t_code)
2961	    == XGE_HAL_OK) {
2962	    XGE_DRV_STATS(tx_desc_compl);
2963	    if(t_code) {
2964	        xge_trace(XGE_TRACE, "Tx descriptor with t_code %d", t_code);
2965	        XGE_DRV_STATS(tx_tcode);
2966	        xge_hal_device_handle_tcode(channelh, tx_dtr, t_code);
2967	    }
2968
2969	    tx_priv = xge_hal_fifo_dtr_private(tx_dtr);
2970	    bus_dmamap_unload(lldev->dma_tag_tx, tx_priv->dma_map);
2971	    m_freem(tx_priv->buffer);
2972	    tx_priv->buffer = NULL;
2973	    xge_hal_fifo_dtr_free(channelh, tx_dtr);
2974	}
2975}
2976
2977/**
2978 * xge_send
2979 * Transmit function
2980 *
2981 * @ifnetp Interface Handle
2982 */
2983void
2984xge_send(struct ifnet *ifnetp)
2985{
2986	int qindex = 0;
2987	xge_lldev_t *lldev = ifnetp->if_softc;
2988
2989	for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
2990	    if(mtx_trylock(&lldev->mtx_tx[qindex]) == 0) {
2991	        XGE_DRV_STATS(tx_lock_fail);
2992	        break;
2993	    }
2994	    xge_send_locked(ifnetp, qindex);
2995	    mtx_unlock(&lldev->mtx_tx[qindex]);
2996	}
2997}
2998
2999static void inline
3000xge_send_locked(struct ifnet *ifnetp, int qindex)
3001{
3002	xge_hal_dtr_h            dtr;
3003	static bus_dma_segment_t segs[XGE_MAX_SEGS];
3004	xge_hal_status_e         status;
3005	unsigned int             max_fragments;
3006	xge_lldev_t              *lldev          = ifnetp->if_softc;
3007	xge_hal_channel_h        channelh        = lldev->fifo_channel[qindex];
3008	mbuf_t                   m_head          = NULL;
3009	mbuf_t                   m_buf           = NULL;
3010	xge_tx_priv_t            *ll_tx_priv     = NULL;
3011	register unsigned int    count           = 0;
3012	unsigned int             nsegs           = 0;
3013	u16                      vlan_tag;
3014
3015	max_fragments = ((xge_hal_fifo_t *)channelh)->config->max_frags;
3016
3017	/* If device is not initialized, return */
3018	if((!lldev->initialized) || (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)))
3019	    return;
3020
3021	XGE_DRV_STATS(tx_calls);
3022
3023	/*
3024	 * This loop will be executed for each packet in the kernel maintained
3025	 * queue -- each packet can be with fragments as an mbuf chain
3026	 */
3027	for(;;) {
3028	    IF_DEQUEUE(&ifnetp->if_snd, m_head);
3029	    if (m_head == NULL) {
3030		ifnetp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
3031		return;
3032	    }
3033
3034	    for(m_buf = m_head; m_buf != NULL; m_buf = m_buf->m_next) {
3035	        if(m_buf->m_len) count += 1;
3036	    }
3037
3038	    if(count >= max_fragments) {
3039	        m_buf = m_defrag(m_head, M_NOWAIT);
3040	        if(m_buf != NULL) m_head = m_buf;
3041	        XGE_DRV_STATS(tx_defrag);
3042	    }
3043
3044	    /* Reserve descriptors */
3045	    status = xge_hal_fifo_dtr_reserve(channelh, &dtr);
3046	    if(status != XGE_HAL_OK) {
3047	        XGE_DRV_STATS(tx_no_txd);
3048	        xge_flush_txds(channelh);
3049		break;
3050	    }
3051
3052	    vlan_tag =
3053	        (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0;
3054	    xge_hal_fifo_dtr_vlan_set(dtr, vlan_tag);
3055
3056	    /* Update Tx private structure for this descriptor */
3057	    ll_tx_priv         = xge_hal_fifo_dtr_private(dtr);
3058	    ll_tx_priv->buffer = m_head;
3059
3060	    /*
3061	     * Do mapping -- Required DMA tag has been created in xge_init
3062	     * function and DMA maps have already been created in the
3063	     * xgell_tx_replenish function.
3064	     * Returns number of segments through nsegs
3065	     */
3066	    if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_tx,
3067	        ll_tx_priv->dma_map, m_head, segs, &nsegs, BUS_DMA_NOWAIT)) {
3068	        xge_trace(XGE_TRACE, "DMA map load failed");
3069	        XGE_DRV_STATS(tx_map_fail);
3070		break;
3071	    }
3072
3073	    if(lldev->driver_stats.tx_max_frags < nsegs)
3074	        lldev->driver_stats.tx_max_frags = nsegs;
3075
3076	    /* Set descriptor buffer for header and each fragment/segment */
3077	    count = 0;
3078	    do {
3079	        xge_hal_fifo_dtr_buffer_set(channelh, dtr, count,
3080	            (dma_addr_t)htole64(segs[count].ds_addr),
3081	            segs[count].ds_len);
3082	        count++;
3083	    } while(count < nsegs);
3084
3085	    /* Pre-write Sync of mapping */
3086	    bus_dmamap_sync(lldev->dma_tag_tx, ll_tx_priv->dma_map,
3087	        BUS_DMASYNC_PREWRITE);
3088
3089	    if((lldev->enabled_tso) &&
3090	        (m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3091	        XGE_DRV_STATS(tx_tso);
3092	        xge_hal_fifo_dtr_mss_set(dtr, m_head->m_pkthdr.tso_segsz);
3093	    }
3094
3095	    /* Checksum */
3096	    if(ifnetp->if_hwassist > 0) {
3097	        xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_IPV4_EN
3098	            | XGE_HAL_TXD_TX_CKO_TCP_EN | XGE_HAL_TXD_TX_CKO_UDP_EN);
3099	    }
3100
3101	    /* Post descriptor to FIFO channel */
3102	    xge_hal_fifo_dtr_post(channelh, dtr);
3103	    XGE_DRV_STATS(tx_posted);
3104
3105	    /* Send the same copy of mbuf packet to BPF (Berkely Packet Filter)
3106	     * listener so that we can use tools like tcpdump */
3107	    ETHER_BPF_MTAP(ifnetp, m_head);
3108	}
3109
3110	/* Prepend the packet back to queue */
3111	IF_PREPEND(&ifnetp->if_snd, m_head);
3112	ifnetp->if_drv_flags |= IFF_DRV_OACTIVE;
3113
3114	xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
3115	    XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh);
3116	XGE_DRV_STATS(tx_again);
3117}
3118
3119/**
3120 * xge_get_buf
3121 * Allocates new mbufs to be placed into descriptors
3122 *
3123 * @dtrh Descriptor Handle
3124 * @rxd_priv Rx Descriptor Private Data
3125 * @lldev Per-adapter Data
3126 * @index Buffer Index (if multi-buffer mode)
3127 *
3128 * Returns XGE_HAL_OK or HAL error enums
3129 */
3130int
3131xge_get_buf(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv,
3132	xge_lldev_t *lldev, int index)
3133{
3134	register mbuf_t mp            = NULL;
3135	struct          ifnet *ifnetp = lldev->ifnetp;
3136	int             status        = XGE_HAL_OK;
3137	int             buffer_size = 0, cluster_size = 0, count;
3138	bus_dmamap_t    map = rxd_priv->dmainfo[index].dma_map;
3139	bus_dma_segment_t segs[3];
3140
3141	buffer_size = (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ?
3142	    ifnetp->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE :
3143	    lldev->rxd_mbuf_len[index];
3144
3145	if(buffer_size <= MCLBYTES) {
3146	    cluster_size = MCLBYTES;
3147	    mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3148	}
3149	else {
3150	    cluster_size = MJUMPAGESIZE;
3151	    if((lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5) &&
3152	        (buffer_size > MJUMPAGESIZE)) {
3153	        cluster_size = MJUM9BYTES;
3154	    }
3155	    mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, cluster_size);
3156	}
3157	if(!mp) {
3158	    xge_trace(XGE_ERR, "Out of memory to allocate mbuf");
3159	    status = XGE_HAL_FAIL;
3160	    goto getbuf_out;
3161	}
3162
3163	/* Update mbuf's length, packet length and receive interface */
3164	mp->m_len = mp->m_pkthdr.len = buffer_size;
3165	mp->m_pkthdr.rcvif = ifnetp;
3166
3167	/* Load DMA map */
3168	if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_rx, lldev->extra_dma_map,
3169	    mp, segs, &count, BUS_DMA_NOWAIT)) {
3170	    XGE_DRV_STATS(rx_map_fail);
3171	    m_freem(mp);
3172	    XGE_EXIT_ON_ERR("DMA map load failed", getbuf_out, XGE_HAL_FAIL);
3173	}
3174
3175	/* Update descriptor private data */
3176	rxd_priv->bufferArray[index]         = mp;
3177	rxd_priv->dmainfo[index].dma_phyaddr = htole64(segs->ds_addr);
3178	rxd_priv->dmainfo[index].dma_map     = lldev->extra_dma_map;
3179	lldev->extra_dma_map = map;
3180
3181	/* Pre-Read/Write sync */
3182	bus_dmamap_sync(lldev->dma_tag_rx, map, BUS_DMASYNC_POSTREAD);
3183
3184	/* Unload DMA map of mbuf in current descriptor */
3185	bus_dmamap_unload(lldev->dma_tag_rx, map);
3186
3187	/* Set descriptor buffer */
3188	if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3189	    xge_hal_ring_dtr_1b_set(dtrh, rxd_priv->dmainfo[0].dma_phyaddr,
3190	        cluster_size);
3191	}
3192
3193getbuf_out:
3194	return status;
3195}
3196
3197/**
3198 * xge_get_buf_3b_5b
3199 * Allocates new mbufs to be placed into descriptors (in multi-buffer modes)
3200 *
3201 * @dtrh Descriptor Handle
3202 * @rxd_priv Rx Descriptor Private Data
3203 * @lldev Per-adapter Data
3204 *
3205 * Returns XGE_HAL_OK or HAL error enums
3206 */
3207int
3208xge_get_buf_3b_5b(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv,
3209	xge_lldev_t *lldev)
3210{
3211	bus_addr_t  dma_pointers[5];
3212	int         dma_sizes[5];
3213	int         status = XGE_HAL_OK, index;
3214	int         newindex = 0;
3215
3216	for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
3217	    status = xge_get_buf(dtrh, rxd_priv, lldev, index);
3218	    if(status != XGE_HAL_OK) {
3219	        for(newindex = 0; newindex < index; newindex++) {
3220	            m_freem(rxd_priv->bufferArray[newindex]);
3221	        }
3222	        XGE_EXIT_ON_ERR("mbuf allocation failed", _exit, status);
3223	    }
3224	}
3225
3226	for(index = 0; index < lldev->buffer_mode; index++) {
3227	    if(lldev->rxd_mbuf_len[index] != 0) {
3228	        dma_pointers[index] = rxd_priv->dmainfo[index].dma_phyaddr;
3229	        dma_sizes[index]    = lldev->rxd_mbuf_len[index];
3230	    }
3231	    else {
3232	        dma_pointers[index] = rxd_priv->dmainfo[index-1].dma_phyaddr;
3233	        dma_sizes[index]    = 1;
3234	    }
3235	}
3236
3237	/* Assigning second buffer to third pointer in 2 buffer mode */
3238	if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
3239	    dma_pointers[2] = dma_pointers[1];
3240	    dma_sizes[2]    = dma_sizes[1];
3241	    dma_sizes[1]    = 1;
3242	}
3243
3244	if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
3245	    xge_hal_ring_dtr_5b_set(dtrh, dma_pointers, dma_sizes);
3246	}
3247	else {
3248	    xge_hal_ring_dtr_3b_set(dtrh, dma_pointers, dma_sizes);
3249	}
3250
3251_exit:
3252	return status;
3253}
3254
3255/**
3256 * xge_tx_compl
3257 * If the interrupt is due to Tx completion, free the sent buffer
3258 *
3259 * @channelh Channel Handle
3260 * @dtr Descriptor
3261 * @t_code Transfer Code indicating success or error
3262 * @userdata Per-adapter Data
3263 *
3264 * Returns XGE_HAL_OK or HAL error enum
3265 */
3266xge_hal_status_e
3267xge_tx_compl(xge_hal_channel_h channelh,
3268	xge_hal_dtr_h dtr, u8 t_code, void *userdata)
3269{
3270	xge_tx_priv_t *ll_tx_priv = NULL;
3271	xge_lldev_t   *lldev  = (xge_lldev_t *)userdata;
3272	struct ifnet  *ifnetp = lldev->ifnetp;
3273	mbuf_t         m_buffer = NULL;
3274	int            qindex   = xge_hal_channel_id(channelh);
3275
3276	mtx_lock(&lldev->mtx_tx[qindex]);
3277
3278	XGE_DRV_STATS(tx_completions);
3279
3280	/*
3281	 * For each completed descriptor: Get private structure, free buffer,
3282	 * do unmapping, and free descriptor
3283	 */
3284	do {
3285	    XGE_DRV_STATS(tx_desc_compl);
3286
3287	    if(t_code) {
3288	        XGE_DRV_STATS(tx_tcode);
3289	        xge_trace(XGE_TRACE, "t_code %d", t_code);
3290	        xge_hal_device_handle_tcode(channelh, dtr, t_code);
3291	    }
3292
3293	    ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3294	    m_buffer   = ll_tx_priv->buffer;
3295	    bus_dmamap_unload(lldev->dma_tag_tx, ll_tx_priv->dma_map);
3296	    m_freem(m_buffer);
3297	    ll_tx_priv->buffer = NULL;
3298	    xge_hal_fifo_dtr_free(channelh, dtr);
3299	} while(xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code)
3300	    == XGE_HAL_OK);
3301	xge_send_locked(ifnetp, qindex);
3302	ifnetp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3303
3304	mtx_unlock(&lldev->mtx_tx[qindex]);
3305
3306	return XGE_HAL_OK;
3307}
3308
3309/**
3310 * xge_tx_initial_replenish
3311 * Initially allocate buffers and set them into descriptors for later use
3312 *
3313 * @channelh Tx Channel Handle
3314 * @dtrh Descriptor Handle
3315 * @index
3316 * @userdata Per-adapter Data
3317 * @reopen Channel open/reopen option
3318 *
3319 * Returns XGE_HAL_OK or HAL error enums
3320 */
3321xge_hal_status_e
3322xge_tx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3323	int index, void *userdata, xge_hal_channel_reopen_e reopen)
3324{
3325	xge_tx_priv_t *txd_priv = NULL;
3326	int            status   = XGE_HAL_OK;
3327
3328	/* Get the user data portion from channel handle */
3329	xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
3330	if(lldev == NULL) {
3331	    XGE_EXIT_ON_ERR("Failed to get user data from channel", txinit_out,
3332	        XGE_HAL_FAIL);
3333	}
3334
3335	/* Get the private data */
3336	txd_priv = (xge_tx_priv_t *) xge_hal_fifo_dtr_private(dtrh);
3337	if(txd_priv == NULL) {
3338	    XGE_EXIT_ON_ERR("Failed to get descriptor private data", txinit_out,
3339	        XGE_HAL_FAIL);
3340	}
3341
3342	/* Create DMA map for this descriptor */
3343	if(bus_dmamap_create(lldev->dma_tag_tx, BUS_DMA_NOWAIT,
3344	    &txd_priv->dma_map)) {
3345	    XGE_EXIT_ON_ERR("DMA map creation for Tx descriptor failed",
3346	        txinit_out, XGE_HAL_FAIL);
3347	}
3348
3349txinit_out:
3350	return status;
3351}
3352
3353/**
3354 * xge_rx_initial_replenish
3355 * Initially allocate buffers and set them into descriptors for later use
3356 *
3357 * @channelh Tx Channel Handle
3358 * @dtrh Descriptor Handle
3359 * @index Ring Index
3360 * @userdata Per-adapter Data
3361 * @reopen Channel open/reopen option
3362 *
3363 * Returns XGE_HAL_OK or HAL error enums
3364 */
3365xge_hal_status_e
3366xge_rx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3367	int index, void *userdata, xge_hal_channel_reopen_e reopen)
3368{
3369	xge_rx_priv_t  *rxd_priv = NULL;
3370	int             status   = XGE_HAL_OK;
3371	int             index1 = 0, index2 = 0;
3372
3373	/* Get the user data portion from channel handle */
3374	xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
3375	if(lldev == NULL) {
3376	    XGE_EXIT_ON_ERR("Failed to get user data from channel", rxinit_out,
3377	        XGE_HAL_FAIL);
3378	}
3379
3380	/* Get the private data */
3381	rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
3382	if(rxd_priv == NULL) {
3383	    XGE_EXIT_ON_ERR("Failed to get descriptor private data", rxinit_out,
3384	        XGE_HAL_FAIL);
3385	}
3386
3387	rxd_priv->bufferArray = xge_os_malloc(NULL,
3388	        (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3389
3390	if(rxd_priv->bufferArray == NULL) {
3391	    XGE_EXIT_ON_ERR("Failed to allocate Rxd private", rxinit_out,
3392	        XGE_HAL_FAIL);
3393	}
3394
3395	if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3396	    /* Create DMA map for these descriptors*/
3397	    if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT,
3398	        &rxd_priv->dmainfo[0].dma_map)) {
3399	        XGE_EXIT_ON_ERR("DMA map creation for Rx descriptor failed",
3400	            rxinit_err_out, XGE_HAL_FAIL);
3401	    }
3402	    /* Get a buffer, attach it to this descriptor */
3403	    status = xge_get_buf(dtrh, rxd_priv, lldev, 0);
3404	}
3405	else {
3406	    for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
3407	        /* Create DMA map for this descriptor */
3408	        if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT ,
3409	            &rxd_priv->dmainfo[index1].dma_map)) {
3410	            for(index2 = index1 - 1; index2 >= 0; index2--) {
3411	                bus_dmamap_destroy(lldev->dma_tag_rx,
3412	                    rxd_priv->dmainfo[index2].dma_map);
3413	            }
3414	            XGE_EXIT_ON_ERR(
3415	                "Jumbo DMA map creation for Rx descriptor failed",
3416	                rxinit_err_out, XGE_HAL_FAIL);
3417	        }
3418	    }
3419	    status = xge_get_buf_3b_5b(dtrh, rxd_priv, lldev);
3420	}
3421
3422	if(status != XGE_HAL_OK) {
3423	    for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
3424	        bus_dmamap_destroy(lldev->dma_tag_rx,
3425	            rxd_priv->dmainfo[index1].dma_map);
3426	    }
3427	    goto rxinit_err_out;
3428	}
3429	else {
3430	    goto rxinit_out;
3431	}
3432
3433rxinit_err_out:
3434	xge_os_free(NULL, rxd_priv->bufferArray,
3435	    (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3436rxinit_out:
3437	return status;
3438}
3439
3440/**
3441 * xge_rx_term
3442 * During unload terminate and free all descriptors
3443 *
3444 * @channelh Rx Channel Handle
3445 * @dtrh Rx Descriptor Handle
3446 * @state Descriptor State
3447 * @userdata Per-adapter Data
3448 * @reopen Channel open/reopen option
3449 */
3450void
3451xge_rx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3452	xge_hal_dtr_state_e state, void *userdata,
3453	xge_hal_channel_reopen_e reopen)
3454{
3455	xge_rx_priv_t *rxd_priv = NULL;
3456	xge_lldev_t   *lldev    = NULL;
3457	int            index = 0;
3458
3459	/* Descriptor state is not "Posted" */
3460	if(state != XGE_HAL_DTR_STATE_POSTED) goto rxterm_out;
3461
3462	/* Get the user data portion */
3463	lldev = xge_hal_channel_userdata(channelh);
3464
3465	/* Get the private data */
3466	rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
3467
3468	for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
3469	    if(rxd_priv->dmainfo[index].dma_map != NULL) {
3470	        bus_dmamap_sync(lldev->dma_tag_rx,
3471	            rxd_priv->dmainfo[index].dma_map, BUS_DMASYNC_POSTREAD);
3472	        bus_dmamap_unload(lldev->dma_tag_rx,
3473	            rxd_priv->dmainfo[index].dma_map);
3474	        if(rxd_priv->bufferArray[index] != NULL)
3475	            m_free(rxd_priv->bufferArray[index]);
3476	        bus_dmamap_destroy(lldev->dma_tag_rx,
3477	            rxd_priv->dmainfo[index].dma_map);
3478	    }
3479	}
3480	xge_os_free(NULL, rxd_priv->bufferArray,
3481	    (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3482
3483	/* Free the descriptor */
3484	xge_hal_ring_dtr_free(channelh, dtrh);
3485
3486rxterm_out:
3487	return;
3488}
3489
3490/**
3491 * xge_tx_term
3492 * During unload terminate and free all descriptors
3493 *
3494 * @channelh Rx Channel Handle
3495 * @dtrh Rx Descriptor Handle
3496 * @state Descriptor State
3497 * @userdata Per-adapter Data
3498 * @reopen Channel open/reopen option
3499 */
3500void
3501xge_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
3502	xge_hal_dtr_state_e state, void *userdata,
3503	xge_hal_channel_reopen_e reopen)
3504{
3505	xge_tx_priv_t *ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3506	xge_lldev_t   *lldev      = (xge_lldev_t *)userdata;
3507
3508	/* Destroy DMA map */
3509	bus_dmamap_destroy(lldev->dma_tag_tx, ll_tx_priv->dma_map);
3510}
3511
3512/**
3513 * xge_methods
3514 *
3515 * FreeBSD device interface entry points
3516 */
3517static device_method_t xge_methods[] = {
3518	DEVMETHOD(device_probe,     xge_probe),
3519	DEVMETHOD(device_attach,    xge_attach),
3520	DEVMETHOD(device_detach,    xge_detach),
3521	DEVMETHOD(device_shutdown,  xge_shutdown),
3522
3523	DEVMETHOD_END
3524};
3525
3526static driver_t xge_driver = {
3527	"nxge",
3528	xge_methods,
3529	sizeof(xge_lldev_t),
3530};
3531static devclass_t xge_devclass;
3532DRIVER_MODULE(nxge, pci, xge_driver, xge_devclass, 0, 0);
3533
3534