1/*-
2 * Copyright (c) 2002-2007 Neterion, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: stable/11/sys/dev/nxge/if_nxge.c 333367 2018-05-08 16:56:14Z sbruno $
27 */
28
29#include <dev/nxge/if_nxge.h>
30#include <dev/nxge/xge-osdep.h>
31#include <net/if_arp.h>
32#include <sys/types.h>
33#include <net/if.h>
34#include <net/if_var.h>
35#include <net/if_vlan_var.h>
36
37int       copyright_print       = 0;
38int       hal_driver_init_count = 0;
39size_t    size                  = sizeof(int);
40
41static void inline xge_flush_txds(xge_hal_channel_h);
42
43/**
44 * xge_probe
45 * Probes for Xframe devices
46 *
47 * @dev Device handle
48 *
49 * Returns
50 * BUS_PROBE_DEFAULT if device is supported
51 * ENXIO if device is not supported
52 */
53int
54xge_probe(device_t dev)
55{
56	int  devid    = pci_get_device(dev);
57	int  vendorid = pci_get_vendor(dev);
58	int  retValue = ENXIO;
59
60	if(vendorid == XGE_PCI_VENDOR_ID) {
61	    if((devid == XGE_PCI_DEVICE_ID_XENA_2) ||
62	        (devid == XGE_PCI_DEVICE_ID_HERC_2)) {
63	        if(!copyright_print) {
64	            xge_os_printf(XGE_COPYRIGHT);
65	            copyright_print = 1;
66	        }
67	        device_set_desc_copy(dev,
68	            "Neterion Xframe 10 Gigabit Ethernet Adapter");
69	        retValue = BUS_PROBE_DEFAULT;
70	    }
71	}
72
73	return retValue;
74}
75
76/**
77 * xge_init_params
78 * Sets HAL parameter values (from kenv).
79 *
80 * @dconfig Device Configuration
81 * @dev Device Handle
82 */
83void
84xge_init_params(xge_hal_device_config_t *dconfig, device_t dev)
85{
86	int qindex, tindex, revision;
87	device_t checkdev;
88	xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
89
90	dconfig->mtu                   = XGE_DEFAULT_INITIAL_MTU;
91	dconfig->pci_freq_mherz        = XGE_DEFAULT_USER_HARDCODED;
92	dconfig->device_poll_millis    = XGE_HAL_DEFAULT_DEVICE_POLL_MILLIS;
93	dconfig->link_stability_period = XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD;
94	dconfig->mac.rmac_bcast_en     = XGE_DEFAULT_MAC_RMAC_BCAST_EN;
95	dconfig->fifo.alignment_size   = XGE_DEFAULT_FIFO_ALIGNMENT_SIZE;
96
97	XGE_GET_PARAM("hw.xge.enable_tso", (*lldev), enabled_tso,
98	    XGE_DEFAULT_ENABLED_TSO);
99	XGE_GET_PARAM("hw.xge.enable_lro", (*lldev), enabled_lro,
100	    XGE_DEFAULT_ENABLED_LRO);
101	XGE_GET_PARAM("hw.xge.enable_msi", (*lldev), enabled_msi,
102	    XGE_DEFAULT_ENABLED_MSI);
103
104	XGE_GET_PARAM("hw.xge.latency_timer", (*dconfig), latency_timer,
105	    XGE_DEFAULT_LATENCY_TIMER);
106	XGE_GET_PARAM("hw.xge.max_splits_trans", (*dconfig), max_splits_trans,
107	    XGE_DEFAULT_MAX_SPLITS_TRANS);
108	XGE_GET_PARAM("hw.xge.mmrb_count", (*dconfig), mmrb_count,
109	    XGE_DEFAULT_MMRB_COUNT);
110	XGE_GET_PARAM("hw.xge.shared_splits", (*dconfig), shared_splits,
111	    XGE_DEFAULT_SHARED_SPLITS);
112	XGE_GET_PARAM("hw.xge.isr_polling_cnt", (*dconfig), isr_polling_cnt,
113	    XGE_DEFAULT_ISR_POLLING_CNT);
114	XGE_GET_PARAM("hw.xge.stats_refresh_time_sec", (*dconfig),
115	    stats_refresh_time_sec, XGE_DEFAULT_STATS_REFRESH_TIME_SEC);
116
117	XGE_GET_PARAM_MAC("hw.xge.mac_tmac_util_period", tmac_util_period,
118	    XGE_DEFAULT_MAC_TMAC_UTIL_PERIOD);
119	XGE_GET_PARAM_MAC("hw.xge.mac_rmac_util_period", rmac_util_period,
120	    XGE_DEFAULT_MAC_RMAC_UTIL_PERIOD);
121	XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_gen_en", rmac_pause_gen_en,
122	    XGE_DEFAULT_MAC_RMAC_PAUSE_GEN_EN);
123	XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_rcv_en", rmac_pause_rcv_en,
124	    XGE_DEFAULT_MAC_RMAC_PAUSE_RCV_EN);
125	XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_time", rmac_pause_time,
126	    XGE_DEFAULT_MAC_RMAC_PAUSE_TIME);
127	XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q0q3",
128	    mc_pause_threshold_q0q3, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q0Q3);
129	XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q4q7",
130	    mc_pause_threshold_q4q7, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q4Q7);
131
132	XGE_GET_PARAM_FIFO("hw.xge.fifo_memblock_size", memblock_size,
133	    XGE_DEFAULT_FIFO_MEMBLOCK_SIZE);
134	XGE_GET_PARAM_FIFO("hw.xge.fifo_reserve_threshold", reserve_threshold,
135	    XGE_DEFAULT_FIFO_RESERVE_THRESHOLD);
136	XGE_GET_PARAM_FIFO("hw.xge.fifo_max_frags", max_frags,
137	    XGE_DEFAULT_FIFO_MAX_FRAGS);
138
139	for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
140	    XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_intr", intr, qindex,
141	        XGE_DEFAULT_FIFO_QUEUE_INTR);
142	    XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_max", max, qindex,
143	        XGE_DEFAULT_FIFO_QUEUE_MAX);
144	    XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_initial", initial,
145	        qindex, XGE_DEFAULT_FIFO_QUEUE_INITIAL);
146
147	    for (tindex = 0; tindex < XGE_HAL_MAX_FIFO_TTI_NUM; tindex++) {
148	        dconfig->fifo.queue[qindex].tti[tindex].enabled  = 1;
149	        dconfig->fifo.queue[qindex].configured = 1;
150
151	        XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_a",
152	            urange_a, qindex, tindex,
153	            XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_A);
154	        XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_b",
155	            urange_b, qindex, tindex,
156	            XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_B);
157	        XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_c",
158	            urange_c, qindex, tindex,
159	            XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_C);
160	        XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_a",
161	            ufc_a, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_A);
162	        XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_b",
163	            ufc_b, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_B);
164	        XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_c",
165	            ufc_c, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_C);
166	        XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_d",
167	            ufc_d, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_D);
168	        XGE_GET_PARAM_FIFO_QUEUE_TTI(
169	            "hw.xge.fifo_queue_tti_timer_ci_en", timer_ci_en, qindex,
170	            tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_CI_EN);
171	        XGE_GET_PARAM_FIFO_QUEUE_TTI(
172	            "hw.xge.fifo_queue_tti_timer_ac_en", timer_ac_en, qindex,
173	            tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_AC_EN);
174	        XGE_GET_PARAM_FIFO_QUEUE_TTI(
175	            "hw.xge.fifo_queue_tti_timer_val_us", timer_val_us, qindex,
176	            tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_VAL_US);
177	    }
178	}
179
180	XGE_GET_PARAM_RING("hw.xge.ring_memblock_size", memblock_size,
181	    XGE_DEFAULT_RING_MEMBLOCK_SIZE);
182
183	XGE_GET_PARAM_RING("hw.xge.ring_strip_vlan_tag", strip_vlan_tag,
184	    XGE_DEFAULT_RING_STRIP_VLAN_TAG);
185
186	XGE_GET_PARAM("hw.xge.buffer_mode", (*lldev), buffer_mode,
187	    XGE_DEFAULT_BUFFER_MODE);
188	if((lldev->buffer_mode < XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ||
189	    (lldev->buffer_mode > XGE_HAL_RING_QUEUE_BUFFER_MODE_2)) {
190	    xge_trace(XGE_ERR, "Supported buffer modes are 1 and 2");
191	    lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_1;
192	}
193
194	for (qindex = 0; qindex < XGE_RING_COUNT; qindex++) {
195	    dconfig->ring.queue[qindex].max_frm_len  = XGE_HAL_RING_USE_MTU;
196	    dconfig->ring.queue[qindex].priority     = 0;
197	    dconfig->ring.queue[qindex].configured   = 1;
198	    dconfig->ring.queue[qindex].buffer_mode  =
199	        (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) ?
200	        XGE_HAL_RING_QUEUE_BUFFER_MODE_3 : lldev->buffer_mode;
201
202	    XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_max", max, qindex,
203	        XGE_DEFAULT_RING_QUEUE_MAX);
204	    XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_initial", initial,
205	        qindex, XGE_DEFAULT_RING_QUEUE_INITIAL);
206	    XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_dram_size_mb",
207	        dram_size_mb, qindex, XGE_DEFAULT_RING_QUEUE_DRAM_SIZE_MB);
208	    XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_indicate_max_pkts",
209	        indicate_max_pkts, qindex,
210	        XGE_DEFAULT_RING_QUEUE_INDICATE_MAX_PKTS);
211	    XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_backoff_interval_us",
212	        backoff_interval_us, qindex,
213	        XGE_DEFAULT_RING_QUEUE_BACKOFF_INTERVAL_US);
214
215	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_a", ufc_a,
216	        qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_A);
217	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_b", ufc_b,
218	        qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_B);
219	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_c", ufc_c,
220	        qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_C);
221	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_d", ufc_d,
222	        qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_D);
223	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_ac_en",
224	        timer_ac_en, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_AC_EN);
225	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_val_us",
226	        timer_val_us, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_VAL_US);
227	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_a",
228	        urange_a, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_A);
229	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_b",
230	        urange_b, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_B);
231	    XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_c",
232	        urange_c, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_C);
233	}
234
235	if(dconfig->fifo.max_frags > (PAGE_SIZE/32)) {
236	    xge_os_printf("fifo_max_frags = %d", dconfig->fifo.max_frags)
237	    xge_os_printf("fifo_max_frags should be <= (PAGE_SIZE / 32) = %d",
238	        (int)(PAGE_SIZE / 32))
239	    xge_os_printf("Using fifo_max_frags = %d", (int)(PAGE_SIZE / 32))
240	    dconfig->fifo.max_frags = (PAGE_SIZE / 32);
241	}
242
243	checkdev = pci_find_device(VENDOR_ID_AMD, DEVICE_ID_8131_PCI_BRIDGE);
244	if(checkdev != NULL) {
245	    /* Check Revision for 0x12 */
246	    revision = pci_read_config(checkdev,
247	        xge_offsetof(xge_hal_pci_config_t, revision), 1);
248	    if(revision <= 0x12) {
249	        /* Set mmrb_count to 1k and max splits = 2 */
250	        dconfig->mmrb_count       = 1;
251	        dconfig->max_splits_trans = XGE_HAL_THREE_SPLIT_TRANSACTION;
252	    }
253	}
254}
255
256/**
257 * xge_buffer_sizes_set
258 * Set buffer sizes based on Rx buffer mode
259 *
260 * @lldev Per-adapter Data
261 * @buffer_mode Rx Buffer Mode
262 */
263void
264xge_rx_buffer_sizes_set(xge_lldev_t *lldev, int buffer_mode, int mtu)
265{
266	int index = 0;
267	int frame_header = XGE_HAL_MAC_HEADER_MAX_SIZE;
268	int buffer_size = mtu + frame_header;
269
270	xge_os_memzero(lldev->rxd_mbuf_len, sizeof(lldev->rxd_mbuf_len));
271
272	if(buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
273	    lldev->rxd_mbuf_len[buffer_mode - 1] = mtu;
274
275	lldev->rxd_mbuf_len[0] = (buffer_mode == 1) ? buffer_size:frame_header;
276
277	if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
278	    lldev->rxd_mbuf_len[1] = XGE_HAL_TCPIP_HEADER_MAX_SIZE;
279
280	if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
281	    index = 2;
282	    buffer_size -= XGE_HAL_TCPIP_HEADER_MAX_SIZE;
283	    while(buffer_size > MJUMPAGESIZE) {
284	        lldev->rxd_mbuf_len[index++] = MJUMPAGESIZE;
285	        buffer_size -= MJUMPAGESIZE;
286	    }
287	    XGE_ALIGN_TO(buffer_size, 128);
288	    lldev->rxd_mbuf_len[index] = buffer_size;
289	    lldev->rxd_mbuf_cnt = index + 1;
290	}
291
292	for(index = 0; index < buffer_mode; index++)
293	    xge_trace(XGE_TRACE, "Buffer[%d] %d\n", index,
294	        lldev->rxd_mbuf_len[index]);
295}
296
297/**
298 * xge_buffer_mode_init
299 * Init Rx buffer mode
300 *
301 * @lldev Per-adapter Data
302 * @mtu Interface MTU
303 */
304void
305xge_buffer_mode_init(xge_lldev_t *lldev, int mtu)
306{
307	int index = 0, buffer_size = 0;
308	xge_hal_ring_config_t *ring_config = &((lldev->devh)->config.ring);
309
310	buffer_size = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE;
311
312	if(lldev->enabled_lro)
313	    (lldev->ifnetp)->if_capenable |= IFCAP_LRO;
314	else
315	    (lldev->ifnetp)->if_capenable &= ~IFCAP_LRO;
316
317	lldev->rxd_mbuf_cnt = lldev->buffer_mode;
318	if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
319	    XGE_SET_BUFFER_MODE_IN_RINGS(XGE_HAL_RING_QUEUE_BUFFER_MODE_3);
320	    ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_B;
321	}
322	else {
323	    XGE_SET_BUFFER_MODE_IN_RINGS(lldev->buffer_mode);
324	    ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_A;
325	}
326	xge_rx_buffer_sizes_set(lldev, lldev->buffer_mode, mtu);
327
328	xge_os_printf("%s: TSO %s", device_get_nameunit(lldev->device),
329	    ((lldev->enabled_tso) ? "Enabled":"Disabled"));
330	xge_os_printf("%s: LRO %s", device_get_nameunit(lldev->device),
331	    ((lldev->ifnetp)->if_capenable & IFCAP_LRO) ? "Enabled":"Disabled");
332	xge_os_printf("%s: Rx %d Buffer Mode Enabled",
333	    device_get_nameunit(lldev->device), lldev->buffer_mode);
334}
335
336/**
337 * xge_driver_initialize
338 * Initializes HAL driver (common for all devices)
339 *
340 * Returns
341 * XGE_HAL_OK if success
342 * XGE_HAL_ERR_BAD_DRIVER_CONFIG if driver configuration parameters are invalid
343 */
344int
345xge_driver_initialize(void)
346{
347	xge_hal_uld_cbs_t       uld_callbacks;
348	xge_hal_driver_config_t driver_config;
349	xge_hal_status_e        status = XGE_HAL_OK;
350
351	/* Initialize HAL driver */
352	if(!hal_driver_init_count) {
353	    xge_os_memzero(&uld_callbacks, sizeof(xge_hal_uld_cbs_t));
354	    xge_os_memzero(&driver_config, sizeof(xge_hal_driver_config_t));
355
356	    /*
357	     * Initial and maximum size of the queue used to store the events
358	     * like Link up/down (xge_hal_event_e)
359	     */
360	    driver_config.queue_size_initial = XGE_HAL_MIN_QUEUE_SIZE_INITIAL;
361	    driver_config.queue_size_max     = XGE_HAL_MAX_QUEUE_SIZE_MAX;
362
363	    uld_callbacks.link_up   = xge_callback_link_up;
364	    uld_callbacks.link_down = xge_callback_link_down;
365	    uld_callbacks.crit_err  = xge_callback_crit_err;
366	    uld_callbacks.event     = xge_callback_event;
367
368	    status = xge_hal_driver_initialize(&driver_config, &uld_callbacks);
369	    if(status != XGE_HAL_OK) {
370	        XGE_EXIT_ON_ERR("xgeX: Initialization of HAL driver failed",
371	            xdi_out, status);
372	    }
373	}
374	hal_driver_init_count = hal_driver_init_count + 1;
375
376	xge_hal_driver_debug_module_mask_set(0xffffffff);
377	xge_hal_driver_debug_level_set(XGE_TRACE);
378
379xdi_out:
380	return status;
381}
382
383/**
384 * xge_media_init
385 * Initializes, adds and sets media
386 *
387 * @devc Device Handle
388 */
389void
390xge_media_init(device_t devc)
391{
392	xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(devc);
393
394	/* Initialize Media */
395	ifmedia_init(&lldev->media, IFM_IMASK, xge_ifmedia_change,
396	    xge_ifmedia_status);
397
398	/* Add supported media */
399	ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL);
400	ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
401	ifmedia_add(&lldev->media, IFM_ETHER | IFM_AUTO,    0, NULL);
402	ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_SR,  0, NULL);
403	ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_LR,  0, NULL);
404
405	/* Set media */
406	ifmedia_set(&lldev->media, IFM_ETHER | IFM_AUTO);
407}
408
409/**
410 * xge_pci_space_save
411 * Save PCI configuration space
412 *
413 * @dev Device Handle
414 */
415void
416xge_pci_space_save(device_t dev)
417{
418	struct pci_devinfo *dinfo = NULL;
419
420	dinfo = device_get_ivars(dev);
421	xge_trace(XGE_TRACE, "Saving PCI configuration space");
422	pci_cfg_save(dev, dinfo, 0);
423}
424
425/**
426 * xge_pci_space_restore
427 * Restore saved PCI configuration space
428 *
429 * @dev Device Handle
430 */
431void
432xge_pci_space_restore(device_t dev)
433{
434	struct pci_devinfo *dinfo = NULL;
435
436	dinfo = device_get_ivars(dev);
437	xge_trace(XGE_TRACE, "Restoring PCI configuration space");
438	pci_cfg_restore(dev, dinfo);
439}
440
441/**
442 * xge_msi_info_save
443 * Save MSI info
444 *
445 * @lldev Per-adapter Data
446 */
447void
448xge_msi_info_save(xge_lldev_t * lldev)
449{
450	xge_os_pci_read16(lldev->pdev, NULL,
451	    xge_offsetof(xge_hal_pci_config_le_t, msi_control),
452	    &lldev->msi_info.msi_control);
453	xge_os_pci_read32(lldev->pdev, NULL,
454	    xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address),
455	    &lldev->msi_info.msi_lower_address);
456	xge_os_pci_read32(lldev->pdev, NULL,
457	    xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address),
458	    &lldev->msi_info.msi_higher_address);
459	xge_os_pci_read16(lldev->pdev, NULL,
460	    xge_offsetof(xge_hal_pci_config_le_t, msi_data),
461	    &lldev->msi_info.msi_data);
462}
463
464/**
465 * xge_msi_info_restore
466 * Restore saved MSI info
467 *
468 * @dev Device Handle
469 */
470void
471xge_msi_info_restore(xge_lldev_t *lldev)
472{
473	/*
474	 * If interface is made down and up, traffic fails. It was observed that
475	 * MSI information were getting reset on down. Restoring them.
476	 */
477	xge_os_pci_write16(lldev->pdev, NULL,
478	    xge_offsetof(xge_hal_pci_config_le_t, msi_control),
479	    lldev->msi_info.msi_control);
480
481	xge_os_pci_write32(lldev->pdev, NULL,
482	    xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address),
483	    lldev->msi_info.msi_lower_address);
484
485	xge_os_pci_write32(lldev->pdev, NULL,
486	    xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address),
487	    lldev->msi_info.msi_higher_address);
488
489	xge_os_pci_write16(lldev->pdev, NULL,
490	    xge_offsetof(xge_hal_pci_config_le_t, msi_data),
491	    lldev->msi_info.msi_data);
492}
493
494/**
495 * xge_init_mutex
496 * Initializes mutexes used in driver
497 *
498 * @lldev  Per-adapter Data
499 */
500void
501xge_mutex_init(xge_lldev_t *lldev)
502{
503	int qindex;
504
505	sprintf(lldev->mtx_name_drv, "%s_drv",
506	    device_get_nameunit(lldev->device));
507	mtx_init(&lldev->mtx_drv, lldev->mtx_name_drv, MTX_NETWORK_LOCK,
508	    MTX_DEF);
509
510	for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
511	    sprintf(lldev->mtx_name_tx[qindex], "%s_tx_%d",
512	        device_get_nameunit(lldev->device), qindex);
513	    mtx_init(&lldev->mtx_tx[qindex], lldev->mtx_name_tx[qindex], NULL,
514	        MTX_DEF);
515	}
516}
517
518/**
519 * xge_mutex_destroy
520 * Destroys mutexes used in driver
521 *
522 * @lldev Per-adapter Data
523 */
524void
525xge_mutex_destroy(xge_lldev_t *lldev)
526{
527	int qindex;
528
529	for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++)
530	    mtx_destroy(&lldev->mtx_tx[qindex]);
531	mtx_destroy(&lldev->mtx_drv);
532}
533
534/**
535 * xge_print_info
536 * Print device and driver information
537 *
538 * @lldev Per-adapter Data
539 */
540void
541xge_print_info(xge_lldev_t *lldev)
542{
543	device_t dev = lldev->device;
544	xge_hal_device_t *hldev = lldev->devh;
545	xge_hal_status_e status = XGE_HAL_OK;
546	u64 val64 = 0;
547	const char *xge_pci_bus_speeds[17] = {
548	    "PCI 33MHz Bus",
549	    "PCI 66MHz Bus",
550	    "PCIX(M1) 66MHz Bus",
551	    "PCIX(M1) 100MHz Bus",
552	    "PCIX(M1) 133MHz Bus",
553	    "PCIX(M2) 133MHz Bus",
554	    "PCIX(M2) 200MHz Bus",
555	    "PCIX(M2) 266MHz Bus",
556	    "PCIX(M1) Reserved",
557	    "PCIX(M1) 66MHz Bus (Not Supported)",
558	    "PCIX(M1) 100MHz Bus (Not Supported)",
559	    "PCIX(M1) 133MHz Bus (Not Supported)",
560	    "PCIX(M2) Reserved",
561	    "PCIX 533 Reserved",
562	    "PCI Basic Mode",
563	    "PCIX Basic Mode",
564	    "PCI Invalid Mode"
565	};
566
567	xge_os_printf("%s: Xframe%s %s Revision %d Driver v%s",
568	    device_get_nameunit(dev),
569	    ((hldev->device_id == XGE_PCI_DEVICE_ID_XENA_2) ? "I" : "II"),
570	    hldev->vpd_data.product_name, hldev->revision, XGE_DRIVER_VERSION);
571	xge_os_printf("%s: Serial Number %s",
572	    device_get_nameunit(dev), hldev->vpd_data.serial_num);
573
574	if(pci_get_device(dev) == XGE_PCI_DEVICE_ID_HERC_2) {
575	    status = xge_hal_mgmt_reg_read(hldev, 0,
576	        xge_offsetof(xge_hal_pci_bar0_t, pci_info), &val64);
577	    if(status != XGE_HAL_OK)
578	        xge_trace(XGE_ERR, "Error for getting bus speed");
579
580	    xge_os_printf("%s: Adapter is on %s bit %s",
581	        device_get_nameunit(dev), ((val64 & BIT(8)) ? "32":"64"),
582	        (xge_pci_bus_speeds[((val64 & XGE_HAL_PCI_INFO) >> 60)]));
583	}
584
585	xge_os_printf("%s: Using %s Interrupts",
586	    device_get_nameunit(dev),
587	    (lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) ? "MSI":"Line");
588}
589
590/**
591 * xge_create_dma_tags
592 * Creates DMA tags for both Tx and Rx
593 *
594 * @dev Device Handle
595 *
596 * Returns XGE_HAL_OK or XGE_HAL_FAIL (if errors)
597 */
598xge_hal_status_e
599xge_create_dma_tags(device_t dev)
600{
601	xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
602	xge_hal_status_e status = XGE_HAL_FAIL;
603	int mtu = (lldev->ifnetp)->if_mtu, maxsize;
604
605	/* DMA tag for Tx */
606	status = bus_dma_tag_create(
607	    bus_get_dma_tag(dev),                /* Parent                    */
608	    PAGE_SIZE,                           /* Alignment                 */
609	    0,                                   /* Bounds                    */
610	    BUS_SPACE_MAXADDR,                   /* Low Address               */
611	    BUS_SPACE_MAXADDR,                   /* High Address              */
612	    NULL,                                /* Filter Function           */
613	    NULL,                                /* Filter Function Arguments */
614	    MCLBYTES * XGE_MAX_SEGS,             /* Maximum Size              */
615	    XGE_MAX_SEGS,                        /* Number of Segments        */
616	    MCLBYTES,                            /* Maximum Segment Size      */
617	    BUS_DMA_ALLOCNOW,                    /* Flags                     */
618	    NULL,                                /* Lock Function             */
619	    NULL,                                /* Lock Function Arguments   */
620	    (&lldev->dma_tag_tx));               /* DMA Tag                   */
621	if(status != 0)
622	    goto _exit;
623
624	maxsize = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE;
625	if(maxsize <= MCLBYTES) {
626	    maxsize = MCLBYTES;
627	}
628	else {
629	    if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
630	        maxsize = MJUMPAGESIZE;
631	    else
632	        maxsize = (maxsize <= MJUMPAGESIZE) ? MJUMPAGESIZE : MJUM9BYTES;
633	}
634
635	/* DMA tag for Rx */
636	status = bus_dma_tag_create(
637	    bus_get_dma_tag(dev),                /* Parent                    */
638	    PAGE_SIZE,                           /* Alignment                 */
639	    0,                                   /* Bounds                    */
640	    BUS_SPACE_MAXADDR,                   /* Low Address               */
641	    BUS_SPACE_MAXADDR,                   /* High Address              */
642	    NULL,                                /* Filter Function           */
643	    NULL,                                /* Filter Function Arguments */
644	    maxsize,                             /* Maximum Size              */
645	    1,                                   /* Number of Segments        */
646	    maxsize,                             /* Maximum Segment Size      */
647	    BUS_DMA_ALLOCNOW,                    /* Flags                     */
648	    NULL,                                /* Lock Function             */
649	    NULL,                                /* Lock Function Arguments   */
650	    (&lldev->dma_tag_rx));               /* DMA Tag                   */
651	if(status != 0)
652	    goto _exit1;
653
654	status = bus_dmamap_create(lldev->dma_tag_rx, BUS_DMA_NOWAIT,
655	    &lldev->extra_dma_map);
656	if(status != 0)
657	    goto _exit2;
658
659	status = XGE_HAL_OK;
660	goto _exit;
661
662_exit2:
663	status = bus_dma_tag_destroy(lldev->dma_tag_rx);
664	if(status != 0)
665	    xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
666_exit1:
667	status = bus_dma_tag_destroy(lldev->dma_tag_tx);
668	if(status != 0)
669	    xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
670	status = XGE_HAL_FAIL;
671_exit:
672	return status;
673}
674
675/**
676 * xge_confirm_changes
677 * Disables and Enables interface to apply requested change
678 *
679 * @lldev Per-adapter Data
680 * @mtu_set Is it called for changing MTU? (Yes: 1, No: 0)
681 *
682 * Returns 0 or Error Number
683 */
684void
685xge_confirm_changes(xge_lldev_t *lldev, xge_option_e option)
686{
687	if(lldev->initialized == 0) goto _exit1;
688
689	mtx_lock(&lldev->mtx_drv);
690	if_down(lldev->ifnetp);
691	xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
692
693	if(option == XGE_SET_MTU)
694	    (lldev->ifnetp)->if_mtu = lldev->mtu;
695	else
696	    xge_buffer_mode_init(lldev, lldev->mtu);
697
698	xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
699	if_up(lldev->ifnetp);
700	mtx_unlock(&lldev->mtx_drv);
701	goto _exit;
702
703_exit1:
704	/* Request was to change MTU and device not initialized */
705	if(option == XGE_SET_MTU) {
706	    (lldev->ifnetp)->if_mtu = lldev->mtu;
707	    xge_buffer_mode_init(lldev, lldev->mtu);
708	}
709_exit:
710	return;
711}
712
713/**
714 * xge_change_lro_status
715 * Enable/Disable LRO feature
716 *
717 * @SYSCTL_HANDLER_ARGS sysctl_oid structure with arguments
718 *
719 * Returns 0 or error number.
720 */
721static int
722xge_change_lro_status(SYSCTL_HANDLER_ARGS)
723{
724	xge_lldev_t *lldev = (xge_lldev_t *)arg1;
725	int request = lldev->enabled_lro, status = XGE_HAL_OK;
726
727	status = sysctl_handle_int(oidp, &request, arg2, req);
728	if((status != XGE_HAL_OK) || (!req->newptr))
729	    goto _exit;
730
731	if((request < 0) || (request > 1)) {
732	    status = EINVAL;
733	    goto _exit;
734	}
735
736	/* Return if current and requested states are same */
737	if(request == lldev->enabled_lro){
738	    xge_trace(XGE_ERR, "LRO is already %s",
739	        ((request) ? "enabled" : "disabled"));
740	    goto _exit;
741	}
742
743	lldev->enabled_lro = request;
744	xge_confirm_changes(lldev, XGE_CHANGE_LRO);
745	arg2 = lldev->enabled_lro;
746
747_exit:
748	return status;
749}
750
751/**
752 * xge_add_sysctl_handlers
753 * Registers sysctl parameter value update handlers
754 *
755 * @lldev Per-adapter data
756 */
757void
758xge_add_sysctl_handlers(xge_lldev_t *lldev)
759{
760	struct sysctl_ctx_list *context_list =
761	    device_get_sysctl_ctx(lldev->device);
762	struct sysctl_oid *oid = device_get_sysctl_tree(lldev->device);
763
764	SYSCTL_ADD_PROC(context_list, SYSCTL_CHILDREN(oid), OID_AUTO,
765	    "enable_lro", CTLTYPE_INT | CTLFLAG_RW, lldev, 0,
766	    xge_change_lro_status, "I", "Enable or disable LRO feature");
767}
768
769/**
770 * xge_attach
771 * Connects driver to the system if probe was success
772 *
773 * @dev Device Handle
774 */
775int
776xge_attach(device_t dev)
777{
778	xge_hal_device_config_t *device_config;
779	xge_hal_device_attr_t   attr;
780	xge_lldev_t             *lldev;
781	xge_hal_device_t        *hldev;
782	xge_pci_info_t          *pci_info;
783	struct ifnet            *ifnetp;
784	int                     rid, rid0, rid1, error;
785	int                     msi_count = 0, status = XGE_HAL_OK;
786	int                     enable_msi = XGE_HAL_INTR_MODE_IRQLINE;
787
788	device_config = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t));
789	if(!device_config) {
790	    XGE_EXIT_ON_ERR("Memory allocation for device configuration failed",
791	        attach_out_config, ENOMEM);
792	}
793
794	lldev = (xge_lldev_t *) device_get_softc(dev);
795	if(!lldev) {
796	    XGE_EXIT_ON_ERR("Adapter softc is NULL", attach_out, ENOMEM);
797	}
798	lldev->device = dev;
799
800	xge_mutex_init(lldev);
801
802	error = xge_driver_initialize();
803	if(error != XGE_HAL_OK) {
804	    xge_resources_free(dev, xge_free_mutex);
805	    XGE_EXIT_ON_ERR("Initializing driver failed", attach_out, ENXIO);
806	}
807
808	/* HAL device */
809	hldev =
810	    (xge_hal_device_t *)xge_os_malloc(NULL, sizeof(xge_hal_device_t));
811	if(!hldev) {
812	    xge_resources_free(dev, xge_free_terminate_hal_driver);
813	    XGE_EXIT_ON_ERR("Memory allocation for HAL device failed",
814	        attach_out, ENOMEM);
815	}
816	lldev->devh = hldev;
817
818	/* Our private structure */
819	pci_info =
820	    (xge_pci_info_t*) xge_os_malloc(NULL, sizeof(xge_pci_info_t));
821	if(!pci_info) {
822	    xge_resources_free(dev, xge_free_hal_device);
823	    XGE_EXIT_ON_ERR("Memory allocation for PCI info. failed",
824	        attach_out, ENOMEM);
825	}
826	lldev->pdev      = pci_info;
827	pci_info->device = dev;
828
829	/* Set bus master */
830	pci_enable_busmaster(dev);
831
832	/* Get virtual address for BAR0 */
833	rid0 = PCIR_BAR(0);
834	pci_info->regmap0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid0,
835	    RF_ACTIVE);
836	if(pci_info->regmap0 == NULL) {
837	    xge_resources_free(dev, xge_free_pci_info);
838	    XGE_EXIT_ON_ERR("Bus resource allocation for BAR0 failed",
839	        attach_out, ENOMEM);
840	}
841	attr.bar0 = (char *)pci_info->regmap0;
842
843	pci_info->bar0resource = (xge_bus_resource_t*)
844	    xge_os_malloc(NULL, sizeof(xge_bus_resource_t));
845	if(pci_info->bar0resource == NULL) {
846	    xge_resources_free(dev, xge_free_bar0);
847	    XGE_EXIT_ON_ERR("Memory allocation for BAR0 Resources failed",
848	        attach_out, ENOMEM);
849	}
850	((xge_bus_resource_t *)(pci_info->bar0resource))->bus_tag =
851	    rman_get_bustag(pci_info->regmap0);
852	((xge_bus_resource_t *)(pci_info->bar0resource))->bus_handle =
853	    rman_get_bushandle(pci_info->regmap0);
854	((xge_bus_resource_t *)(pci_info->bar0resource))->bar_start_addr =
855	    pci_info->regmap0;
856
857	/* Get virtual address for BAR1 */
858	rid1 = PCIR_BAR(2);
859	pci_info->regmap1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid1,
860	    RF_ACTIVE);
861	if(pci_info->regmap1 == NULL) {
862	    xge_resources_free(dev, xge_free_bar0_resource);
863	    XGE_EXIT_ON_ERR("Bus resource allocation for BAR1 failed",
864	        attach_out, ENOMEM);
865	}
866	attr.bar1 = (char *)pci_info->regmap1;
867
868	pci_info->bar1resource = (xge_bus_resource_t*)
869	    xge_os_malloc(NULL, sizeof(xge_bus_resource_t));
870	if(pci_info->bar1resource == NULL) {
871	    xge_resources_free(dev, xge_free_bar1);
872	    XGE_EXIT_ON_ERR("Memory allocation for BAR1 Resources failed",
873	        attach_out, ENOMEM);
874	}
875	((xge_bus_resource_t *)(pci_info->bar1resource))->bus_tag =
876	    rman_get_bustag(pci_info->regmap1);
877	((xge_bus_resource_t *)(pci_info->bar1resource))->bus_handle =
878	    rman_get_bushandle(pci_info->regmap1);
879	((xge_bus_resource_t *)(pci_info->bar1resource))->bar_start_addr =
880	    pci_info->regmap1;
881
882	/* Save PCI config space */
883	xge_pci_space_save(dev);
884
885	attr.regh0 = (xge_bus_resource_t *) pci_info->bar0resource;
886	attr.regh1 = (xge_bus_resource_t *) pci_info->bar1resource;
887	attr.irqh  = lldev->irqhandle;
888	attr.cfgh  = pci_info;
889	attr.pdev  = pci_info;
890
891	/* Initialize device configuration parameters */
892	xge_init_params(device_config, dev);
893
894	rid = 0;
895	if(lldev->enabled_msi) {
896	    /* Number of MSI messages supported by device */
897	    msi_count = pci_msi_count(dev);
898	    if(msi_count > 1) {
899	        /* Device supports MSI */
900	        if(bootverbose) {
901	            xge_trace(XGE_ERR, "MSI count: %d", msi_count);
902	            xge_trace(XGE_ERR, "Now, driver supporting 1 message");
903	        }
904	        msi_count = 1;
905	        error = pci_alloc_msi(dev, &msi_count);
906	        if(error == 0) {
907	            if(bootverbose)
908	                xge_trace(XGE_ERR, "Allocated messages: %d", msi_count);
909	            enable_msi = XGE_HAL_INTR_MODE_MSI;
910	            rid = 1;
911	        }
912	        else {
913	            if(bootverbose)
914	                xge_trace(XGE_ERR, "pci_alloc_msi failed, %d", error);
915	        }
916	    }
917	}
918	lldev->enabled_msi = enable_msi;
919
920	/* Allocate resource for irq */
921	lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
922	    (RF_SHAREABLE | RF_ACTIVE));
923	if(lldev->irq == NULL) {
924	    xge_trace(XGE_ERR, "Allocating irq resource for %s failed",
925	        ((rid == 0) ? "line interrupt" : "MSI"));
926	    if(rid == 1) {
927	        error = pci_release_msi(dev);
928	        if(error != 0) {
929	            xge_trace(XGE_ERR, "Releasing MSI resources failed %d",
930	                error);
931	            xge_trace(XGE_ERR, "Requires reboot to use MSI again");
932	        }
933	        xge_trace(XGE_ERR, "Trying line interrupts");
934	        rid = 0;
935	        lldev->enabled_msi = XGE_HAL_INTR_MODE_IRQLINE;
936	        lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
937	            (RF_SHAREABLE | RF_ACTIVE));
938	    }
939	    if(lldev->irq == NULL) {
940	        xge_trace(XGE_ERR, "Allocating irq resource failed");
941	        xge_resources_free(dev, xge_free_bar1_resource);
942	        status = ENOMEM;
943	        goto attach_out;
944	    }
945	}
946
947	device_config->intr_mode = lldev->enabled_msi;
948	if(bootverbose) {
949	    xge_trace(XGE_TRACE, "rid: %d, Mode: %d, MSI count: %d", rid,
950	        lldev->enabled_msi, msi_count);
951	}
952
953	/* Initialize HAL device */
954	error = xge_hal_device_initialize(hldev, &attr, device_config);
955	if(error != XGE_HAL_OK) {
956	    xge_resources_free(dev, xge_free_irq_resource);
957	    XGE_EXIT_ON_ERR("Initializing HAL device failed", attach_out,
958	        ENXIO);
959	}
960
961	xge_hal_device_private_set(hldev, lldev);
962
963	error = xge_interface_setup(dev);
964	if(error != 0) {
965	    status = error;
966	    goto attach_out;
967	}
968
969	ifnetp         = lldev->ifnetp;
970	ifnetp->if_mtu = device_config->mtu;
971
972	xge_media_init(dev);
973
974	/* Associate interrupt handler with the device */
975	if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
976	    error = bus_setup_intr(dev, lldev->irq,
977	        (INTR_TYPE_NET | INTR_MPSAFE),
978#if __FreeBSD_version > 700030
979	        NULL,
980#endif
981	        xge_isr_msi, lldev, &lldev->irqhandle);
982	    xge_msi_info_save(lldev);
983	}
984	else {
985	    error = bus_setup_intr(dev, lldev->irq,
986	        (INTR_TYPE_NET | INTR_MPSAFE),
987#if __FreeBSD_version > 700030
988	        xge_isr_filter,
989#endif
990	        xge_isr_line, lldev, &lldev->irqhandle);
991	}
992	if(error != 0) {
993	    xge_resources_free(dev, xge_free_media_interface);
994	    XGE_EXIT_ON_ERR("Associating interrupt handler with device failed",
995	        attach_out, ENXIO);
996	}
997
998	xge_print_info(lldev);
999
1000	xge_add_sysctl_handlers(lldev);
1001
1002	xge_buffer_mode_init(lldev, device_config->mtu);
1003
1004attach_out:
1005	xge_os_free(NULL, device_config, sizeof(xge_hal_device_config_t));
1006attach_out_config:
1007	gone_in_dev(dev, 12, "nxge(4) driver");
1008	return status;
1009}
1010
1011/**
1012 * xge_resources_free
1013 * Undo what-all we did during load/attach
1014 *
1015 * @dev Device Handle
1016 * @error Identifies what-all to undo
1017 */
1018void
1019xge_resources_free(device_t dev, xge_lables_e error)
1020{
1021	xge_lldev_t *lldev;
1022	xge_pci_info_t *pci_info;
1023	xge_hal_device_t *hldev;
1024	int rid, status;
1025
1026	/* LL Device */
1027	lldev = (xge_lldev_t *) device_get_softc(dev);
1028	pci_info = lldev->pdev;
1029
1030	/* HAL Device */
1031	hldev = lldev->devh;
1032
1033	switch(error) {
1034	    case xge_free_all:
1035	        /* Teardown interrupt handler - device association */
1036	        bus_teardown_intr(dev, lldev->irq, lldev->irqhandle);
1037
1038	    case xge_free_media_interface:
1039	        /* Media */
1040	        ifmedia_removeall(&lldev->media);
1041
1042	        /* Detach Ether */
1043	        ether_ifdetach(lldev->ifnetp);
1044	        if_free(lldev->ifnetp);
1045
1046	        xge_hal_device_private_set(hldev, NULL);
1047	        xge_hal_device_disable(hldev);
1048
1049	    case xge_free_terminate_hal_device:
1050	        /* HAL Device */
1051	        xge_hal_device_terminate(hldev);
1052
1053	    case xge_free_irq_resource:
1054	        /* Release IRQ resource */
1055	        bus_release_resource(dev, SYS_RES_IRQ,
1056	            ((lldev->enabled_msi == XGE_HAL_INTR_MODE_IRQLINE) ? 0:1),
1057	            lldev->irq);
1058
1059	        if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
1060	            status = pci_release_msi(dev);
1061	            if(status != 0) {
1062	                if(bootverbose) {
1063	                    xge_trace(XGE_ERR,
1064	                        "pci_release_msi returned %d", status);
1065	                }
1066	            }
1067	        }
1068
1069	    case xge_free_bar1_resource:
1070	        /* Restore PCI configuration space */
1071	        xge_pci_space_restore(dev);
1072
1073	        /* Free bar1resource */
1074	        xge_os_free(NULL, pci_info->bar1resource,
1075	            sizeof(xge_bus_resource_t));
1076
1077	    case xge_free_bar1:
1078	        /* Release BAR1 */
1079	        rid = PCIR_BAR(2);
1080	        bus_release_resource(dev, SYS_RES_MEMORY, rid,
1081	            pci_info->regmap1);
1082
1083	    case xge_free_bar0_resource:
1084	        /* Free bar0resource */
1085	        xge_os_free(NULL, pci_info->bar0resource,
1086	            sizeof(xge_bus_resource_t));
1087
1088	    case xge_free_bar0:
1089	        /* Release BAR0 */
1090	        rid = PCIR_BAR(0);
1091	        bus_release_resource(dev, SYS_RES_MEMORY, rid,
1092	            pci_info->regmap0);
1093
1094	    case xge_free_pci_info:
1095	        /* Disable Bus Master */
1096	        pci_disable_busmaster(dev);
1097
1098	        /* Free pci_info_t */
1099	        lldev->pdev = NULL;
1100	        xge_os_free(NULL, pci_info, sizeof(xge_pci_info_t));
1101
1102	    case xge_free_hal_device:
1103	        /* Free device configuration struct and HAL device */
1104	        xge_os_free(NULL, hldev, sizeof(xge_hal_device_t));
1105
1106	    case xge_free_terminate_hal_driver:
1107	        /* Terminate HAL driver */
1108	        hal_driver_init_count = hal_driver_init_count - 1;
1109	        if(!hal_driver_init_count) {
1110	            xge_hal_driver_terminate();
1111	        }
1112
1113	    case xge_free_mutex:
1114	        xge_mutex_destroy(lldev);
1115	}
1116}
1117
1118/**
1119 * xge_detach
1120 * Detaches driver from the Kernel subsystem
1121 *
1122 * @dev Device Handle
1123 */
1124int
1125xge_detach(device_t dev)
1126{
1127	xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
1128
1129	if(lldev->in_detach == 0) {
1130	    lldev->in_detach = 1;
1131	    xge_stop(lldev);
1132	    xge_resources_free(dev, xge_free_all);
1133	}
1134
1135	return 0;
1136}
1137
1138/**
1139 * xge_shutdown
1140 * To shutdown device before system shutdown
1141 *
1142 * @dev Device Handle
1143 */
1144int
1145xge_shutdown(device_t dev)
1146{
1147	xge_lldev_t *lldev = (xge_lldev_t *) device_get_softc(dev);
1148	xge_stop(lldev);
1149
1150	return 0;
1151}
1152
1153/**
1154 * xge_interface_setup
1155 * Setup interface
1156 *
1157 * @dev Device Handle
1158 *
1159 * Returns 0 on success, ENXIO/ENOMEM on failure
1160 */
1161int
1162xge_interface_setup(device_t dev)
1163{
1164	u8 mcaddr[ETHER_ADDR_LEN];
1165	xge_hal_status_e status;
1166	xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
1167	struct ifnet *ifnetp;
1168	xge_hal_device_t *hldev = lldev->devh;
1169
1170	/* Get the MAC address of the device */
1171	status = xge_hal_device_macaddr_get(hldev, 0, &mcaddr);
1172	if(status != XGE_HAL_OK) {
1173	    xge_resources_free(dev, xge_free_terminate_hal_device);
1174	    XGE_EXIT_ON_ERR("Getting MAC address failed", ifsetup_out, ENXIO);
1175	}
1176
1177	/* Get interface ifnet structure for this Ether device */
1178	ifnetp = lldev->ifnetp = if_alloc(IFT_ETHER);
1179	if(ifnetp == NULL) {
1180	    xge_resources_free(dev, xge_free_terminate_hal_device);
1181	    XGE_EXIT_ON_ERR("Allocation ifnet failed", ifsetup_out, ENOMEM);
1182	}
1183
1184	/* Initialize interface ifnet structure */
1185	if_initname(ifnetp, device_get_name(dev), device_get_unit(dev));
1186	ifnetp->if_mtu      = XGE_HAL_DEFAULT_MTU;
1187	ifnetp->if_baudrate = XGE_BAUDRATE;
1188	ifnetp->if_init     = xge_init;
1189	ifnetp->if_softc    = lldev;
1190	ifnetp->if_flags    = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1191	ifnetp->if_ioctl    = xge_ioctl;
1192	ifnetp->if_start    = xge_send;
1193
1194	/* TODO: Check and assign optimal value */
1195	ifnetp->if_snd.ifq_maxlen = ifqmaxlen;
1196
1197	ifnetp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU |
1198	    IFCAP_HWCSUM;
1199	if(lldev->enabled_tso)
1200	    ifnetp->if_capabilities |= IFCAP_TSO4;
1201	if(lldev->enabled_lro)
1202	    ifnetp->if_capabilities |= IFCAP_LRO;
1203
1204	ifnetp->if_capenable = ifnetp->if_capabilities;
1205
1206	/* Attach the interface */
1207	ether_ifattach(ifnetp, mcaddr);
1208
1209ifsetup_out:
1210	return status;
1211}
1212
1213/**
1214 * xge_callback_link_up
1215 * Callback for Link-up indication from HAL
1216 *
1217 * @userdata Per-adapter data
1218 */
1219void
1220xge_callback_link_up(void *userdata)
1221{
1222	xge_lldev_t  *lldev  = (xge_lldev_t *)userdata;
1223	struct ifnet *ifnetp = lldev->ifnetp;
1224
1225	ifnetp->if_flags  &= ~IFF_DRV_OACTIVE;
1226	if_link_state_change(ifnetp, LINK_STATE_UP);
1227}
1228
1229/**
1230 * xge_callback_link_down
1231 * Callback for Link-down indication from HAL
1232 *
1233 * @userdata Per-adapter data
1234 */
1235void
1236xge_callback_link_down(void *userdata)
1237{
1238	xge_lldev_t  *lldev  = (xge_lldev_t *)userdata;
1239	struct ifnet *ifnetp = lldev->ifnetp;
1240
1241	ifnetp->if_flags  |= IFF_DRV_OACTIVE;
1242	if_link_state_change(ifnetp, LINK_STATE_DOWN);
1243}
1244
1245/**
1246 * xge_callback_crit_err
1247 * Callback for Critical error indication from HAL
1248 *
1249 * @userdata Per-adapter data
1250 * @type Event type (Enumerated hardware error)
1251 * @serr_data Hardware status
1252 */
1253void
1254xge_callback_crit_err(void *userdata, xge_hal_event_e type, u64 serr_data)
1255{
1256	xge_trace(XGE_ERR, "Critical Error");
1257	xge_reset(userdata);
1258}
1259
1260/**
1261 * xge_callback_event
1262 * Callback from HAL indicating that some event has been queued
1263 *
1264 * @item Queued event item
1265 */
1266void
1267xge_callback_event(xge_queue_item_t *item)
1268{
1269	xge_lldev_t      *lldev  = NULL;
1270	xge_hal_device_t *hldev  = NULL;
1271	struct ifnet     *ifnetp = NULL;
1272
1273	hldev  = item->context;
1274	lldev  = xge_hal_device_private(hldev);
1275	ifnetp = lldev->ifnetp;
1276
1277	switch((int)item->event_type) {
1278	    case XGE_LL_EVENT_TRY_XMIT_AGAIN:
1279	        if(lldev->initialized) {
1280	            if(xge_hal_channel_dtr_count(lldev->fifo_channel[0]) > 0) {
1281	                ifnetp->if_flags  &= ~IFF_DRV_OACTIVE;
1282	            }
1283	            else {
1284	                xge_queue_produce_context(
1285	                    xge_hal_device_queue(lldev->devh),
1286	                    XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh);
1287	            }
1288	        }
1289	        break;
1290
1291	    case XGE_LL_EVENT_DEVICE_RESETTING:
1292	        xge_reset(item->context);
1293	        break;
1294
1295	    default:
1296	        break;
1297	}
1298}
1299
1300/**
1301 * xge_ifmedia_change
1302 * Media change driver callback
1303 *
1304 * @ifnetp Interface Handle
1305 *
1306 * Returns 0 if media is Ether else EINVAL
1307 */
1308int
1309xge_ifmedia_change(struct ifnet *ifnetp)
1310{
1311	xge_lldev_t    *lldev    = ifnetp->if_softc;
1312	struct ifmedia *ifmediap = &lldev->media;
1313
1314	return (IFM_TYPE(ifmediap->ifm_media) != IFM_ETHER) ?  EINVAL:0;
1315}
1316
1317/**
1318 * xge_ifmedia_status
1319 * Media status driver callback
1320 *
1321 * @ifnetp Interface Handle
1322 * @ifmr Interface Media Settings
1323 */
1324void
1325xge_ifmedia_status(struct ifnet *ifnetp, struct ifmediareq *ifmr)
1326{
1327	xge_hal_status_e status;
1328	u64              regvalue;
1329	xge_lldev_t      *lldev = ifnetp->if_softc;
1330	xge_hal_device_t *hldev = lldev->devh;
1331
1332	ifmr->ifm_status = IFM_AVALID;
1333	ifmr->ifm_active = IFM_ETHER;
1334
1335	status = xge_hal_mgmt_reg_read(hldev, 0,
1336	    xge_offsetof(xge_hal_pci_bar0_t, adapter_status), &regvalue);
1337	if(status != XGE_HAL_OK) {
1338	    xge_trace(XGE_TRACE, "Getting adapter status failed");
1339	    goto _exit;
1340	}
1341
1342	if((regvalue & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
1343	    XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) == 0) {
1344	    ifmr->ifm_status |= IFM_ACTIVE;
1345	    ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1346	    if_link_state_change(ifnetp, LINK_STATE_UP);
1347	}
1348	else {
1349	    if_link_state_change(ifnetp, LINK_STATE_DOWN);
1350	}
1351_exit:
1352	return;
1353}
1354
1355/**
1356 * xge_ioctl_stats
1357 * IOCTL to get statistics
1358 *
1359 * @lldev Per-adapter data
1360 * @ifreqp Interface request
1361 */
1362int
1363xge_ioctl_stats(xge_lldev_t *lldev, struct ifreq *ifreqp)
1364{
1365	xge_hal_status_e status = XGE_HAL_OK;
1366	char cmd, mode;
1367	void *info = NULL;
1368	int retValue;
1369
1370	cmd = retValue = fubyte(ifr_data_get_ptr(ifreqp));
1371	if (retValue == -1)
1372		return (EFAULT);
1373
1374	retValue = EINVAL;
1375	switch(cmd) {
1376	    case XGE_QUERY_STATS:
1377	        mtx_lock(&lldev->mtx_drv);
1378	        status = xge_hal_stats_hw(lldev->devh,
1379	            (xge_hal_stats_hw_info_t **)&info);
1380	        mtx_unlock(&lldev->mtx_drv);
1381	        if(status == XGE_HAL_OK) {
1382	            if(copyout(info, ifr_data_get_ptr(ifreqp),
1383	                sizeof(xge_hal_stats_hw_info_t)) == 0)
1384	                retValue = 0;
1385	        }
1386	        else {
1387	            xge_trace(XGE_ERR, "Getting statistics failed (Status: %d)",
1388	                status);
1389	        }
1390	        break;
1391
1392	    case XGE_QUERY_PCICONF:
1393	        info = xge_os_malloc(NULL, sizeof(xge_hal_pci_config_t));
1394	        if(info != NULL) {
1395	            mtx_lock(&lldev->mtx_drv);
1396	            status = xge_hal_mgmt_pci_config(lldev->devh, info,
1397	                sizeof(xge_hal_pci_config_t));
1398	            mtx_unlock(&lldev->mtx_drv);
1399	            if(status == XGE_HAL_OK) {
1400	                if(copyout(info, ifr_data_get_ptr(ifreqp),
1401	                    sizeof(xge_hal_pci_config_t)) == 0)
1402	                    retValue = 0;
1403	            }
1404	            else {
1405	                xge_trace(XGE_ERR,
1406	                    "Getting PCI configuration failed (%d)", status);
1407	            }
1408	            xge_os_free(NULL, info, sizeof(xge_hal_pci_config_t));
1409	        }
1410	        break;
1411
1412	    case XGE_QUERY_DEVSTATS:
1413	        info = xge_os_malloc(NULL, sizeof(xge_hal_stats_device_info_t));
1414	        if(info != NULL) {
1415	            mtx_lock(&lldev->mtx_drv);
1416	            status =xge_hal_mgmt_device_stats(lldev->devh, info,
1417	                sizeof(xge_hal_stats_device_info_t));
1418	            mtx_unlock(&lldev->mtx_drv);
1419	            if(status == XGE_HAL_OK) {
1420	                if(copyout(info, ifr_data_get_ptr(ifreqp),
1421	                    sizeof(xge_hal_stats_device_info_t)) == 0)
1422	                    retValue = 0;
1423	            }
1424	            else {
1425	                xge_trace(XGE_ERR, "Getting device info failed (%d)",
1426	                    status);
1427	            }
1428	            xge_os_free(NULL, info,
1429	                sizeof(xge_hal_stats_device_info_t));
1430	        }
1431	        break;
1432
1433	    case XGE_QUERY_SWSTATS:
1434	        info = xge_os_malloc(NULL, sizeof(xge_hal_stats_sw_err_t));
1435	        if(info != NULL) {
1436	            mtx_lock(&lldev->mtx_drv);
1437	            status =xge_hal_mgmt_sw_stats(lldev->devh, info,
1438	                sizeof(xge_hal_stats_sw_err_t));
1439	            mtx_unlock(&lldev->mtx_drv);
1440	            if(status == XGE_HAL_OK) {
1441	                if(copyout(info, ifr_data_get_ptr(ifreqp),
1442	                    sizeof(xge_hal_stats_sw_err_t)) == 0)
1443	                    retValue = 0;
1444	            }
1445	            else {
1446	                xge_trace(XGE_ERR,
1447	                    "Getting tcode statistics failed (%d)", status);
1448	            }
1449	            xge_os_free(NULL, info, sizeof(xge_hal_stats_sw_err_t));
1450	        }
1451	        break;
1452
1453	    case XGE_QUERY_DRIVERSTATS:
1454		if(copyout(&lldev->driver_stats, ifr_data_get_ptr(ifreqp),
1455	            sizeof(xge_driver_stats_t)) == 0) {
1456	            retValue = 0;
1457	        }
1458	        else {
1459	            xge_trace(XGE_ERR,
1460	                "Copyout of driver statistics failed (%d)", status);
1461	        }
1462	        break;
1463
1464	    case XGE_READ_VERSION:
1465	        info = xge_os_malloc(NULL, XGE_BUFFER_SIZE);
1466	        if(info != NULL) {
1467	            strcpy(info, XGE_DRIVER_VERSION);
1468	            if(copyout(info, ifr_data_get_ptr(ifreqp),
1469			XGE_BUFFER_SIZE) == 0)
1470	                retValue = 0;
1471	            xge_os_free(NULL, info, XGE_BUFFER_SIZE);
1472	        }
1473	        break;
1474
1475	    case XGE_QUERY_DEVCONF:
1476	        info = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t));
1477	        if(info != NULL) {
1478	            mtx_lock(&lldev->mtx_drv);
1479	            status = xge_hal_mgmt_device_config(lldev->devh, info,
1480	                sizeof(xge_hal_device_config_t));
1481	            mtx_unlock(&lldev->mtx_drv);
1482	            if(status == XGE_HAL_OK) {
1483	                if(copyout(info, ifr_data_get_ptr(ifreqp),
1484	                    sizeof(xge_hal_device_config_t)) == 0)
1485	                    retValue = 0;
1486	            }
1487	            else {
1488	                xge_trace(XGE_ERR, "Getting devconfig failed (%d)",
1489	                    status);
1490	            }
1491	            xge_os_free(NULL, info, sizeof(xge_hal_device_config_t));
1492	        }
1493	        break;
1494
1495	    case XGE_QUERY_BUFFER_MODE:
1496	        if(copyout(&lldev->buffer_mode, ifr_data_get_ptr(ifreqp),
1497	            sizeof(int)) == 0)
1498	            retValue = 0;
1499	        break;
1500
1501	    case XGE_SET_BUFFER_MODE_1:
1502	    case XGE_SET_BUFFER_MODE_2:
1503	    case XGE_SET_BUFFER_MODE_5:
1504	        mode = (cmd == XGE_SET_BUFFER_MODE_1) ? 'Y':'N';
1505	        if(copyout(&mode, ifr_data_get_ptr(ifreqp), sizeof(mode)) == 0)
1506	            retValue = 0;
1507	        break;
1508	    default:
1509	        xge_trace(XGE_TRACE, "Nothing is matching");
1510	        retValue = ENOTTY;
1511	        break;
1512	}
1513	return retValue;
1514}
1515
1516/**
1517 * xge_ioctl_registers
1518 * IOCTL to get registers
1519 *
1520 * @lldev Per-adapter data
1521 * @ifreqp Interface request
1522 */
1523int
1524xge_ioctl_registers(xge_lldev_t *lldev, struct ifreq *ifreqp)
1525{
1526	xge_register_t tmpdata;
1527	xge_register_t *data;
1528	xge_hal_status_e status = XGE_HAL_OK;
1529	int retValue = EINVAL, offset = 0, index = 0;
1530	int error;
1531	u64 val64 = 0;
1532
1533	error = copyin(ifr_data_get_ptr(ifreqp), &tmpdata, sizeof(tmpdata));
1534	if (error != 0)
1535		return (error);
1536	data = &tmpdata;
1537
1538	/* Reading a register */
1539	if(strcmp(data->option, "-r") == 0) {
1540	    data->value = 0x0000;
1541	    mtx_lock(&lldev->mtx_drv);
1542	    status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset,
1543	        &data->value);
1544	    mtx_unlock(&lldev->mtx_drv);
1545	    if(status == XGE_HAL_OK) {
1546	        if(copyout(data, ifr_data_get_ptr(ifreqp),
1547		    sizeof(xge_register_t)) == 0)
1548	            retValue = 0;
1549	    }
1550	}
1551	/* Writing to a register */
1552	else if(strcmp(data->option, "-w") == 0) {
1553	    mtx_lock(&lldev->mtx_drv);
1554	    status = xge_hal_mgmt_reg_write(lldev->devh, 0, data->offset,
1555	        data->value);
1556	    if(status == XGE_HAL_OK) {
1557	        val64 = 0x0000;
1558	        status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset,
1559	            &val64);
1560	        if(status != XGE_HAL_OK) {
1561	            xge_trace(XGE_ERR, "Reading back updated register failed");
1562	        }
1563	        else {
1564	            if(val64 != data->value) {
1565	                xge_trace(XGE_ERR,
1566	                    "Read and written register values mismatched");
1567	            }
1568	            else retValue = 0;
1569	        }
1570	    }
1571	    else {
1572	        xge_trace(XGE_ERR, "Getting register value failed");
1573	    }
1574	    mtx_unlock(&lldev->mtx_drv);
1575	}
1576	else {
1577	    mtx_lock(&lldev->mtx_drv);
1578	    for(index = 0, offset = 0; offset <= XGE_OFFSET_OF_LAST_REG;
1579	        index++, offset += 0x0008) {
1580	        val64 = 0;
1581	        status = xge_hal_mgmt_reg_read(lldev->devh, 0, offset, &val64);
1582	        if(status != XGE_HAL_OK) {
1583	            xge_trace(XGE_ERR, "Getting register value failed");
1584	            break;
1585	        }
1586	        *((u64 *)((u64 *)data + index)) = val64;
1587	        retValue = 0;
1588	    }
1589	    mtx_unlock(&lldev->mtx_drv);
1590
1591	    if(retValue == 0) {
1592	        if(copyout(data, ifr_data_get_ptr(ifreqp),
1593	            sizeof(xge_hal_pci_bar0_t)) != 0) {
1594	            xge_trace(XGE_ERR, "Copyout of register values failed");
1595	            retValue = EINVAL;
1596	        }
1597	    }
1598	    else {
1599	        xge_trace(XGE_ERR, "Getting register values failed");
1600	    }
1601	}
1602	return retValue;
1603}
1604
1605/**
1606 * xge_ioctl
1607 * Callback to control the device - Interface configuration
1608 *
1609 * @ifnetp Interface Handle
1610 * @command Device control command
1611 * @data Parameters associated with command (if any)
1612 */
1613int
1614xge_ioctl(struct ifnet *ifnetp, unsigned long command, caddr_t data)
1615{
1616	struct ifreq   *ifreqp   = (struct ifreq *)data;
1617	xge_lldev_t    *lldev    = ifnetp->if_softc;
1618	struct ifmedia *ifmediap = &lldev->media;
1619	int             retValue = 0, mask = 0;
1620
1621	if(lldev->in_detach) {
1622	    return retValue;
1623	}
1624
1625	switch(command) {
1626	    /* Set/Get ifnet address */
1627	    case SIOCSIFADDR:
1628	    case SIOCGIFADDR:
1629	        ether_ioctl(ifnetp, command, data);
1630	        break;
1631
1632	    /* Set ifnet MTU */
1633	    case SIOCSIFMTU:
1634	        retValue = xge_change_mtu(lldev, ifreqp->ifr_mtu);
1635	        break;
1636
1637	    /* Set ifnet flags */
1638	    case SIOCSIFFLAGS:
1639	        if(ifnetp->if_flags & IFF_UP) {
1640	            /* Link status is UP */
1641	            if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
1642	                xge_init(lldev);
1643	            }
1644	            xge_disable_promisc(lldev);
1645	            xge_enable_promisc(lldev);
1646	        }
1647	        else {
1648	            /* Link status is DOWN */
1649	            /* If device is in running, make it down */
1650	            if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1651	                xge_stop(lldev);
1652	            }
1653	        }
1654	        break;
1655
1656	    /* Add/delete multicast address */
1657	    case SIOCADDMULTI:
1658	    case SIOCDELMULTI:
1659	        if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1660	            xge_setmulti(lldev);
1661	        }
1662	        break;
1663
1664	    /* Set/Get net media */
1665	    case SIOCSIFMEDIA:
1666	    case SIOCGIFMEDIA:
1667	        retValue = ifmedia_ioctl(ifnetp, ifreqp, ifmediap, command);
1668	        break;
1669
1670	    /* Set capabilities */
1671	    case SIOCSIFCAP:
1672	        mtx_lock(&lldev->mtx_drv);
1673	        mask = ifreqp->ifr_reqcap ^ ifnetp->if_capenable;
1674	        if(mask & IFCAP_TXCSUM) {
1675	            if(ifnetp->if_capenable & IFCAP_TXCSUM) {
1676	                ifnetp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1677	                ifnetp->if_hwassist &=
1678	                    ~(CSUM_TCP | CSUM_UDP | CSUM_TSO);
1679	            }
1680	            else {
1681	                ifnetp->if_capenable |= IFCAP_TXCSUM;
1682	                ifnetp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1683	            }
1684	        }
1685	        if(mask & IFCAP_TSO4) {
1686	            if(ifnetp->if_capenable & IFCAP_TSO4) {
1687	                ifnetp->if_capenable &= ~IFCAP_TSO4;
1688	                ifnetp->if_hwassist  &= ~CSUM_TSO;
1689
1690	                xge_os_printf("%s: TSO Disabled",
1691	                    device_get_nameunit(lldev->device));
1692	            }
1693	            else if(ifnetp->if_capenable & IFCAP_TXCSUM) {
1694	                ifnetp->if_capenable |= IFCAP_TSO4;
1695	                ifnetp->if_hwassist  |= CSUM_TSO;
1696
1697	                xge_os_printf("%s: TSO Enabled",
1698	                    device_get_nameunit(lldev->device));
1699	            }
1700	        }
1701
1702	        mtx_unlock(&lldev->mtx_drv);
1703	        break;
1704
1705	    /* Custom IOCTL 0 */
1706	    case SIOCGPRIVATE_0:
1707	        retValue = xge_ioctl_stats(lldev, ifreqp);
1708	        break;
1709
1710	    /* Custom IOCTL 1 */
1711	    case SIOCGPRIVATE_1:
1712	        retValue = xge_ioctl_registers(lldev, ifreqp);
1713	        break;
1714
1715	    default:
1716	        retValue = EINVAL;
1717	        break;
1718	}
1719	return retValue;
1720}
1721
1722/**
1723 * xge_init
1724 * Initialize the interface
1725 *
1726 * @plldev Per-adapter Data
1727 */
1728void
1729xge_init(void *plldev)
1730{
1731	xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1732
1733	mtx_lock(&lldev->mtx_drv);
1734	xge_os_memzero(&lldev->driver_stats, sizeof(xge_driver_stats_t));
1735	xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
1736	mtx_unlock(&lldev->mtx_drv);
1737}
1738
1739/**
1740 * xge_device_init
1741 * Initialize the interface (called by holding lock)
1742 *
1743 * @pdevin Per-adapter Data
1744 */
1745void
1746xge_device_init(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
1747{
1748	struct ifnet     *ifnetp = lldev->ifnetp;
1749	xge_hal_device_t *hldev  = lldev->devh;
1750	struct ifaddr      *ifaddrp;
1751	unsigned char      *macaddr;
1752	struct sockaddr_dl *sockaddrp;
1753	int                 status   = XGE_HAL_OK;
1754
1755	mtx_assert((&lldev->mtx_drv), MA_OWNED);
1756
1757	/* If device is in running state, initializing is not required */
1758	if(ifnetp->if_drv_flags & IFF_DRV_RUNNING)
1759	    return;
1760
1761	/* Initializing timer */
1762	callout_init(&lldev->timer, 1);
1763
1764	xge_trace(XGE_TRACE, "Set MTU size");
1765	status = xge_hal_device_mtu_set(hldev, ifnetp->if_mtu);
1766	if(status != XGE_HAL_OK) {
1767	    xge_trace(XGE_ERR, "Setting MTU in HAL device failed");
1768	    goto _exit;
1769	}
1770
1771	/* Enable HAL device */
1772	xge_hal_device_enable(hldev);
1773
1774	/* Get MAC address and update in HAL */
1775	ifaddrp             = ifnetp->if_addr;
1776	sockaddrp           = (struct sockaddr_dl *)ifaddrp->ifa_addr;
1777	sockaddrp->sdl_type = IFT_ETHER;
1778	sockaddrp->sdl_alen = ifnetp->if_addrlen;
1779	macaddr             = LLADDR(sockaddrp);
1780	xge_trace(XGE_TRACE,
1781	    "Setting MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n",
1782	    *macaddr, *(macaddr + 1), *(macaddr + 2), *(macaddr + 3),
1783	    *(macaddr + 4), *(macaddr + 5));
1784	status = xge_hal_device_macaddr_set(hldev, 0, macaddr);
1785	if(status != XGE_HAL_OK)
1786	    xge_trace(XGE_ERR, "Setting MAC address failed (%d)", status);
1787
1788	/* Opening channels */
1789	mtx_unlock(&lldev->mtx_drv);
1790	status = xge_channel_open(lldev, option);
1791	mtx_lock(&lldev->mtx_drv);
1792	if(status != XGE_HAL_OK)
1793	    goto _exit;
1794
1795	/* Set appropriate flags */
1796	ifnetp->if_drv_flags  |=  IFF_DRV_RUNNING;
1797	ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1798
1799	/* Checksum capability */
1800	ifnetp->if_hwassist = (ifnetp->if_capenable & IFCAP_TXCSUM) ?
1801	    (CSUM_TCP | CSUM_UDP) : 0;
1802
1803	if((lldev->enabled_tso) && (ifnetp->if_capenable & IFCAP_TSO4))
1804	    ifnetp->if_hwassist |= CSUM_TSO;
1805
1806	/* Enable interrupts */
1807	xge_hal_device_intr_enable(hldev);
1808
1809	callout_reset(&lldev->timer, 10*hz, xge_timer, lldev);
1810
1811	/* Disable promiscuous mode */
1812	xge_trace(XGE_TRACE, "If opted, enable promiscuous mode");
1813	xge_enable_promisc(lldev);
1814
1815	/* Device is initialized */
1816	lldev->initialized = 1;
1817	xge_os_mdelay(1000);
1818
1819_exit:
1820	return;
1821}
1822
1823/**
1824 * xge_timer
1825 * Timer timeout function to handle link status
1826 *
1827 * @devp Per-adapter Data
1828 */
1829void
1830xge_timer(void *devp)
1831{
1832	xge_lldev_t      *lldev = (xge_lldev_t *)devp;
1833	xge_hal_device_t *hldev = lldev->devh;
1834
1835	/* Poll for changes */
1836	xge_hal_device_poll(hldev);
1837
1838	/* Reset timer */
1839	callout_reset(&lldev->timer, hz, xge_timer, lldev);
1840
1841	return;
1842}
1843
1844/**
1845 * xge_stop
1846 * De-activate the interface
1847 *
1848 * @lldev Per-adater Data
1849 */
1850void
1851xge_stop(xge_lldev_t *lldev)
1852{
1853	mtx_lock(&lldev->mtx_drv);
1854	xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
1855	mtx_unlock(&lldev->mtx_drv);
1856}
1857
1858/**
1859 * xge_isr_filter
1860 * ISR filter function - to filter interrupts from other devices (shared)
1861 *
1862 * @handle Per-adapter Data
1863 *
1864 * Returns
1865 * FILTER_STRAY if interrupt is from other device
1866 * FILTER_SCHEDULE_THREAD if interrupt is from Xframe device
1867 */
1868int
1869xge_isr_filter(void *handle)
1870{
1871	xge_lldev_t *lldev       = (xge_lldev_t *)handle;
1872	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)((lldev->devh)->bar0);
1873	u16 retValue = FILTER_STRAY;
1874	u64 val64    = 0;
1875
1876	XGE_DRV_STATS(isr_filter);
1877
1878	val64 = xge_os_pio_mem_read64(lldev->pdev, (lldev->devh)->regh0,
1879	    &bar0->general_int_status);
1880	retValue = (!val64) ? FILTER_STRAY : FILTER_SCHEDULE_THREAD;
1881
1882	return retValue;
1883}
1884
1885/**
1886 * xge_isr_line
1887 * Interrupt service routine for Line interrupts
1888 *
1889 * @plldev Per-adapter Data
1890 */
1891void
1892xge_isr_line(void *plldev)
1893{
1894	xge_hal_status_e status;
1895	xge_lldev_t      *lldev   = (xge_lldev_t *)plldev;
1896	xge_hal_device_t *hldev   = (xge_hal_device_t *)lldev->devh;
1897	struct ifnet     *ifnetp  = lldev->ifnetp;
1898
1899	XGE_DRV_STATS(isr_line);
1900
1901	if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1902	    status = xge_hal_device_handle_irq(hldev);
1903	    if(!(IFQ_DRV_IS_EMPTY(&ifnetp->if_snd)))
1904	        xge_send(ifnetp);
1905	}
1906}
1907
1908/*
1909 * xge_isr_msi
1910 * ISR for Message signaled interrupts
1911 */
1912void
1913xge_isr_msi(void *plldev)
1914{
1915	xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1916	XGE_DRV_STATS(isr_msi);
1917	xge_hal_device_continue_irq(lldev->devh);
1918}
1919
1920/**
1921 * xge_rx_open
1922 * Initiate and open all Rx channels
1923 *
1924 * @qid Ring Index
1925 * @lldev Per-adapter Data
1926 * @rflag Channel open/close/reopen flag
1927 *
1928 * Returns 0 or Error Number
1929 */
1930int
1931xge_rx_open(int qid, xge_lldev_t *lldev, xge_hal_channel_reopen_e rflag)
1932{
1933	u64 adapter_status = 0x0;
1934	xge_hal_status_e status = XGE_HAL_FAIL;
1935
1936	xge_hal_channel_attr_t attr = {
1937	    .post_qid      = qid,
1938	    .compl_qid     = 0,
1939	    .callback      = xge_rx_compl,
1940	    .per_dtr_space = sizeof(xge_rx_priv_t),
1941	    .flags         = 0,
1942	    .type          = XGE_HAL_CHANNEL_TYPE_RING,
1943	    .userdata      = lldev,
1944	    .dtr_init      = xge_rx_initial_replenish,
1945	    .dtr_term      = xge_rx_term
1946	};
1947
1948	/* If device is not ready, return */
1949	status = xge_hal_device_status(lldev->devh, &adapter_status);
1950	if(status != XGE_HAL_OK) {
1951	    xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status);
1952	    XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL);
1953	}
1954	else {
1955	    status = xge_hal_channel_open(lldev->devh, &attr,
1956	        &lldev->ring_channel[qid], rflag);
1957	}
1958
1959_exit:
1960	return status;
1961}
1962
1963/**
1964 * xge_tx_open
1965 * Initialize and open all Tx channels
1966 *
1967 * @lldev Per-adapter Data
1968 * @tflag Channel open/close/reopen flag
1969 *
1970 * Returns 0 or Error Number
1971 */
1972int
1973xge_tx_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e tflag)
1974{
1975	xge_hal_status_e status = XGE_HAL_FAIL;
1976	u64 adapter_status = 0x0;
1977	int qindex, index;
1978
1979	xge_hal_channel_attr_t attr = {
1980	    .compl_qid     = 0,
1981	    .callback      = xge_tx_compl,
1982	    .per_dtr_space = sizeof(xge_tx_priv_t),
1983	    .flags         = 0,
1984	    .type          = XGE_HAL_CHANNEL_TYPE_FIFO,
1985	    .userdata      = lldev,
1986	    .dtr_init      = xge_tx_initial_replenish,
1987	    .dtr_term      = xge_tx_term
1988	};
1989
1990	/* If device is not ready, return */
1991	status = xge_hal_device_status(lldev->devh, &adapter_status);
1992	if(status != XGE_HAL_OK) {
1993	    xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status);
1994	    XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL);
1995	}
1996
1997	for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
1998	    attr.post_qid = qindex,
1999	    status = xge_hal_channel_open(lldev->devh, &attr,
2000	        &lldev->fifo_channel[qindex], tflag);
2001	    if(status != XGE_HAL_OK) {
2002	        for(index = 0; index < qindex; index++)
2003	            xge_hal_channel_close(lldev->fifo_channel[index], tflag);
2004	    }
2005	}
2006
2007_exit:
2008	return status;
2009}
2010
2011/**
2012 * xge_enable_msi
2013 * Enables MSI
2014 *
2015 * @lldev Per-adapter Data
2016 */
2017void
2018xge_enable_msi(xge_lldev_t *lldev)
2019{
2020	xge_list_t        *item    = NULL;
2021	xge_hal_device_t  *hldev   = lldev->devh;
2022	xge_hal_channel_t *channel = NULL;
2023	u16 offset = 0, val16 = 0;
2024
2025	xge_os_pci_read16(lldev->pdev, NULL,
2026	    xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16);
2027
2028	/* Update msi_data */
2029	offset = (val16 & 0x80) ? 0x4c : 0x48;
2030	xge_os_pci_read16(lldev->pdev, NULL, offset, &val16);
2031	if(val16 & 0x1)
2032	    val16 &= 0xfffe;
2033	else
2034	    val16 |= 0x1;
2035	xge_os_pci_write16(lldev->pdev, NULL, offset, val16);
2036
2037	/* Update msi_control */
2038	xge_os_pci_read16(lldev->pdev, NULL,
2039	    xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16);
2040	val16 |= 0x10;
2041	xge_os_pci_write16(lldev->pdev, NULL,
2042	    xge_offsetof(xge_hal_pci_config_le_t, msi_control), val16);
2043
2044	/* Set TxMAT and RxMAT registers with MSI */
2045	xge_list_for_each(item, &hldev->free_channels) {
2046	    channel = xge_container_of(item, xge_hal_channel_t, item);
2047	    xge_hal_channel_msi_set(channel, 1, (u32)val16);
2048	}
2049}
2050
2051/**
2052 * xge_channel_open
2053 * Open both Tx and Rx channels
2054 *
2055 * @lldev Per-adapter Data
2056 * @option Channel reopen option
2057 */
2058int
2059xge_channel_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2060{
2061	xge_lro_entry_t *lro_session = NULL;
2062	xge_hal_status_e status   = XGE_HAL_OK;
2063	int index = 0, index2 = 0;
2064
2065	if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
2066	    xge_msi_info_restore(lldev);
2067	    xge_enable_msi(lldev);
2068	}
2069
2070_exit2:
2071	status = xge_create_dma_tags(lldev->device);
2072	if(status != XGE_HAL_OK)
2073	    XGE_EXIT_ON_ERR("DMA tag creation failed", _exit, status);
2074
2075	/* Open ring (Rx) channel */
2076	for(index = 0; index < XGE_RING_COUNT; index++) {
2077	    status = xge_rx_open(index, lldev, option);
2078	    if(status != XGE_HAL_OK) {
2079	        /*
2080	         * DMA mapping fails in the unpatched Kernel which can't
2081	         * allocate contiguous memory for Jumbo frames.
2082	         * Try using 5 buffer mode.
2083	         */
2084	        if((lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) &&
2085	            (((lldev->ifnetp)->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE) >
2086	            MJUMPAGESIZE)) {
2087	            /* Close so far opened channels */
2088	            for(index2 = 0; index2 < index; index2++) {
2089	                xge_hal_channel_close(lldev->ring_channel[index2],
2090	                    option);
2091	            }
2092
2093	            /* Destroy DMA tags intended to use for 1 buffer mode */
2094	            if(bus_dmamap_destroy(lldev->dma_tag_rx,
2095	                lldev->extra_dma_map)) {
2096	                xge_trace(XGE_ERR, "Rx extra DMA map destroy failed");
2097	            }
2098	            if(bus_dma_tag_destroy(lldev->dma_tag_rx))
2099	                xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
2100	            if(bus_dma_tag_destroy(lldev->dma_tag_tx))
2101	                xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
2102
2103	            /* Switch to 5 buffer mode */
2104	            lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_5;
2105	            xge_buffer_mode_init(lldev, (lldev->ifnetp)->if_mtu);
2106
2107	            /* Restart init */
2108	            goto _exit2;
2109	        }
2110	        else {
2111	            XGE_EXIT_ON_ERR("Opening Rx channel failed", _exit1,
2112	                status);
2113	        }
2114	    }
2115	}
2116
2117	if(lldev->enabled_lro) {
2118	    SLIST_INIT(&lldev->lro_free);
2119	    SLIST_INIT(&lldev->lro_active);
2120	    lldev->lro_num = XGE_LRO_DEFAULT_ENTRIES;
2121
2122	    for(index = 0; index < lldev->lro_num; index++) {
2123	        lro_session = (xge_lro_entry_t *)
2124	            xge_os_malloc(NULL, sizeof(xge_lro_entry_t));
2125	        if(lro_session == NULL) {
2126	            lldev->lro_num = index;
2127	            break;
2128	        }
2129	        SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next);
2130	    }
2131	}
2132
2133	/* Open FIFO (Tx) channel */
2134	status = xge_tx_open(lldev, option);
2135	if(status != XGE_HAL_OK)
2136	    XGE_EXIT_ON_ERR("Opening Tx channel failed", _exit1, status);
2137
2138	goto _exit;
2139
2140_exit1:
2141	/*
2142	 * Opening Rx channel(s) failed (index is <last ring index - 1>) or
2143	 * Initialization of LRO failed (index is XGE_RING_COUNT)
2144	 * Opening Tx channel failed    (index is XGE_RING_COUNT)
2145	 */
2146	for(index2 = 0; index2 < index; index2++)
2147	    xge_hal_channel_close(lldev->ring_channel[index2], option);
2148
2149_exit:
2150	return status;
2151}
2152
2153/**
2154 * xge_channel_close
2155 * Close both Tx and Rx channels
2156 *
2157 * @lldev Per-adapter Data
2158 * @option Channel reopen option
2159 *
2160 */
2161void
2162xge_channel_close(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2163{
2164	int qindex = 0;
2165
2166	DELAY(1000 * 1000);
2167
2168	/* Close FIFO (Tx) channel */
2169	for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++)
2170	    xge_hal_channel_close(lldev->fifo_channel[qindex], option);
2171
2172	/* Close Ring (Rx) channels */
2173	for(qindex = 0; qindex < XGE_RING_COUNT; qindex++)
2174	    xge_hal_channel_close(lldev->ring_channel[qindex], option);
2175
2176	if(bus_dmamap_destroy(lldev->dma_tag_rx, lldev->extra_dma_map))
2177	    xge_trace(XGE_ERR, "Rx extra map destroy failed");
2178	if(bus_dma_tag_destroy(lldev->dma_tag_rx))
2179	    xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
2180	if(bus_dma_tag_destroy(lldev->dma_tag_tx))
2181	    xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
2182}
2183
2184/**
2185 * dmamap_cb
2186 * DMA map callback
2187 *
2188 * @arg Parameter passed from dmamap
2189 * @segs Segments
2190 * @nseg Number of segments
2191 * @error Error
2192 */
2193void
2194dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2195{
2196	if(!error) {
2197	    *(bus_addr_t *) arg = segs->ds_addr;
2198	}
2199}
2200
2201/**
2202 * xge_reset
2203 * Device Reset
2204 *
2205 * @lldev Per-adapter Data
2206 */
2207void
2208xge_reset(xge_lldev_t *lldev)
2209{
2210	xge_trace(XGE_TRACE, "Reseting the chip");
2211
2212	/* If the device is not initialized, return */
2213	if(lldev->initialized) {
2214	    mtx_lock(&lldev->mtx_drv);
2215	    xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
2216	    xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
2217	    mtx_unlock(&lldev->mtx_drv);
2218	}
2219
2220	return;
2221}
2222
2223/**
2224 * xge_setmulti
2225 * Set an address as a multicast address
2226 *
2227 * @lldev Per-adapter Data
2228 */
2229void
2230xge_setmulti(xge_lldev_t *lldev)
2231{
2232	struct ifmultiaddr *ifma;
2233	u8                 *lladdr;
2234	xge_hal_device_t   *hldev        = (xge_hal_device_t *)lldev->devh;
2235	struct ifnet       *ifnetp       = lldev->ifnetp;
2236	int                index         = 0;
2237	int                offset        = 1;
2238	int                table_size    = 47;
2239	xge_hal_status_e   status        = XGE_HAL_OK;
2240	u8                 initial_addr[]= {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
2241
2242	if((ifnetp->if_flags & IFF_MULTICAST) && (!lldev->all_multicast)) {
2243	    status = xge_hal_device_mcast_enable(hldev);
2244	    lldev->all_multicast = 1;
2245	}
2246	else if((ifnetp->if_flags & IFF_MULTICAST) && (lldev->all_multicast)) {
2247	    status = xge_hal_device_mcast_disable(hldev);
2248	    lldev->all_multicast = 0;
2249	}
2250
2251	if(status != XGE_HAL_OK) {
2252	    xge_trace(XGE_ERR, "Enabling/disabling multicast failed");
2253	    goto _exit;
2254	}
2255
2256	/* Updating address list */
2257	if_maddr_rlock(ifnetp);
2258	index = 0;
2259	TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
2260	    if(ifma->ifma_addr->sa_family != AF_LINK) {
2261	        continue;
2262	    }
2263	    lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2264	    index += 1;
2265	}
2266	if_maddr_runlock(ifnetp);
2267
2268	if((!lldev->all_multicast) && (index)) {
2269	    lldev->macaddr_count = (index + 1);
2270	    if(lldev->macaddr_count > table_size) {
2271	        goto _exit;
2272	    }
2273
2274	    /* Clear old addresses */
2275	    for(index = 0; index < 48; index++) {
2276	        xge_hal_device_macaddr_set(hldev, (offset + index),
2277	            initial_addr);
2278	    }
2279	}
2280
2281	/* Add new addresses */
2282	if_maddr_rlock(ifnetp);
2283	index = 0;
2284	TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
2285	    if(ifma->ifma_addr->sa_family != AF_LINK) {
2286	        continue;
2287	    }
2288	    lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2289	    xge_hal_device_macaddr_set(hldev, (offset + index), lladdr);
2290	    index += 1;
2291	}
2292	if_maddr_runlock(ifnetp);
2293
2294_exit:
2295	return;
2296}
2297
2298/**
2299 * xge_enable_promisc
2300 * Enable Promiscuous Mode
2301 *
2302 * @lldev Per-adapter Data
2303 */
2304void
2305xge_enable_promisc(xge_lldev_t *lldev)
2306{
2307	struct ifnet *ifnetp = lldev->ifnetp;
2308	xge_hal_device_t *hldev = lldev->devh;
2309	xge_hal_pci_bar0_t *bar0 = NULL;
2310	u64 val64 = 0;
2311
2312	bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
2313
2314	if(ifnetp->if_flags & IFF_PROMISC) {
2315	    xge_hal_device_promisc_enable(lldev->devh);
2316
2317	    /*
2318	     * When operating in promiscuous mode, don't strip the VLAN tag
2319	     */
2320	    val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
2321	        &bar0->rx_pa_cfg);
2322	    val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2323	    val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(0);
2324	    xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
2325	        &bar0->rx_pa_cfg);
2326
2327	    xge_trace(XGE_TRACE, "Promiscuous mode ON");
2328	}
2329}
2330
2331/**
2332 * xge_disable_promisc
2333 * Disable Promiscuous Mode
2334 *
2335 * @lldev Per-adapter Data
2336 */
2337void
2338xge_disable_promisc(xge_lldev_t *lldev)
2339{
2340	xge_hal_device_t *hldev = lldev->devh;
2341	xge_hal_pci_bar0_t *bar0 = NULL;
2342	u64 val64 = 0;
2343
2344	bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
2345
2346	xge_hal_device_promisc_disable(lldev->devh);
2347
2348	/*
2349	 * Strip VLAN tag when operating in non-promiscuous mode
2350	 */
2351	val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
2352	    &bar0->rx_pa_cfg);
2353	val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2354	val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2355	xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
2356	    &bar0->rx_pa_cfg);
2357
2358	xge_trace(XGE_TRACE, "Promiscuous mode OFF");
2359}
2360
2361/**
2362 * xge_change_mtu
2363 * Change interface MTU to a requested valid size
2364 *
2365 * @lldev Per-adapter Data
2366 * @NewMtu Requested MTU
2367 *
2368 * Returns 0 or Error Number
2369 */
2370int
2371xge_change_mtu(xge_lldev_t *lldev, int new_mtu)
2372{
2373	int status = XGE_HAL_OK;
2374
2375	/* Check requested MTU size for boundary */
2376	if(xge_hal_device_mtu_check(lldev->devh, new_mtu) != XGE_HAL_OK) {
2377	    XGE_EXIT_ON_ERR("Invalid MTU", _exit, EINVAL);
2378	}
2379
2380	lldev->mtu = new_mtu;
2381	xge_confirm_changes(lldev, XGE_SET_MTU);
2382
2383_exit:
2384	return status;
2385}
2386
2387/**
2388 * xge_device_stop
2389 *
2390 * Common code for both stop and part of reset. Disables device, interrupts and
2391 * closes channels
2392 *
2393 * @dev Device Handle
2394 * @option Channel normal/reset option
2395 */
2396void
2397xge_device_stop(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2398{
2399	xge_hal_device_t *hldev  = lldev->devh;
2400	struct ifnet     *ifnetp = lldev->ifnetp;
2401	u64               val64  = 0;
2402
2403	mtx_assert((&lldev->mtx_drv), MA_OWNED);
2404
2405	/* If device is not in "Running" state, return */
2406	if (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING))
2407	    goto _exit;
2408
2409	/* Set appropriate flags */
2410	ifnetp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2411
2412	/* Stop timer */
2413	callout_stop(&lldev->timer);
2414
2415	/* Disable interrupts */
2416	xge_hal_device_intr_disable(hldev);
2417
2418	mtx_unlock(&lldev->mtx_drv);
2419	xge_queue_flush(xge_hal_device_queue(lldev->devh));
2420	mtx_lock(&lldev->mtx_drv);
2421
2422	/* Disable HAL device */
2423	if(xge_hal_device_disable(hldev) != XGE_HAL_OK) {
2424	    xge_trace(XGE_ERR, "Disabling HAL device failed");
2425	    xge_hal_device_status(hldev, &val64);
2426	    xge_trace(XGE_ERR, "Adapter Status: 0x%llx", (long long)val64);
2427	}
2428
2429	/* Close Tx and Rx channels */
2430	xge_channel_close(lldev, option);
2431
2432	/* Reset HAL device */
2433	xge_hal_device_reset(hldev);
2434
2435	xge_os_mdelay(1000);
2436	lldev->initialized = 0;
2437
2438	if_link_state_change(ifnetp, LINK_STATE_DOWN);
2439
2440_exit:
2441	return;
2442}
2443
2444/**
2445 * xge_set_mbuf_cflags
2446 * set checksum flag for the mbuf
2447 *
2448 * @pkt Packet
2449 */
2450void
2451xge_set_mbuf_cflags(mbuf_t pkt)
2452{
2453	pkt->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2454	pkt->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2455	pkt->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2456	pkt->m_pkthdr.csum_data = htons(0xffff);
2457}
2458
2459/**
2460 * xge_lro_flush_sessions
2461 * Flush LRO session and send accumulated LRO packet to upper layer
2462 *
2463 * @lldev Per-adapter Data
2464 */
2465void
2466xge_lro_flush_sessions(xge_lldev_t *lldev)
2467{
2468	xge_lro_entry_t *lro_session = NULL;
2469
2470	while(!SLIST_EMPTY(&lldev->lro_active)) {
2471	    lro_session = SLIST_FIRST(&lldev->lro_active);
2472	    SLIST_REMOVE_HEAD(&lldev->lro_active, next);
2473	    xge_lro_flush(lldev, lro_session);
2474	}
2475}
2476
2477/**
2478 * xge_lro_flush
2479 * Flush LRO session. Send accumulated LRO packet to upper layer
2480 *
2481 * @lldev Per-adapter Data
2482 * @lro LRO session to be flushed
2483 */
2484static void
2485xge_lro_flush(xge_lldev_t *lldev, xge_lro_entry_t *lro_session)
2486{
2487	struct ip *header_ip;
2488	struct tcphdr *header_tcp;
2489	u32 *ptr;
2490
2491	if(lro_session->append_cnt) {
2492	    header_ip = lro_session->lro_header_ip;
2493	    header_ip->ip_len = htons(lro_session->len - ETHER_HDR_LEN);
2494	    lro_session->m_head->m_pkthdr.len = lro_session->len;
2495	    header_tcp = (struct tcphdr *)(header_ip + 1);
2496	    header_tcp->th_ack = lro_session->ack_seq;
2497	    header_tcp->th_win = lro_session->window;
2498	    if(lro_session->timestamp) {
2499	        ptr = (u32 *)(header_tcp + 1);
2500	        ptr[1] = htonl(lro_session->tsval);
2501	        ptr[2] = lro_session->tsecr;
2502	    }
2503	}
2504
2505	(*lldev->ifnetp->if_input)(lldev->ifnetp, lro_session->m_head);
2506	lro_session->m_head = NULL;
2507	lro_session->timestamp = 0;
2508	lro_session->append_cnt = 0;
2509	SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next);
2510}
2511
2512/**
2513 * xge_lro_accumulate
2514 * Accumulate packets to form a large LRO packet based on various conditions
2515 *
2516 * @lldev Per-adapter Data
2517 * @m_head Current Packet
2518 *
2519 * Returns XGE_HAL_OK or XGE_HAL_FAIL (failure)
2520 */
2521static int
2522xge_lro_accumulate(xge_lldev_t *lldev, struct mbuf *m_head)
2523{
2524	struct ether_header *header_ethernet;
2525	struct ip *header_ip;
2526	struct tcphdr *header_tcp;
2527	u32 seq, *ptr;
2528	struct mbuf *buffer_next, *buffer_tail;
2529	xge_lro_entry_t *lro_session;
2530	xge_hal_status_e status = XGE_HAL_FAIL;
2531	int hlen, ip_len, tcp_hdr_len, tcp_data_len, tot_len, tcp_options;
2532	int trim;
2533
2534	/* Get Ethernet header */
2535	header_ethernet = mtod(m_head, struct ether_header *);
2536
2537	/* Return if it is not IP packet */
2538	if(header_ethernet->ether_type != htons(ETHERTYPE_IP))
2539	    goto _exit;
2540
2541	/* Get IP header */
2542	header_ip = lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1 ?
2543	    (struct ip *)(header_ethernet + 1) :
2544	    mtod(m_head->m_next, struct ip *);
2545
2546	/* Return if it is not TCP packet */
2547	if(header_ip->ip_p != IPPROTO_TCP)
2548	    goto _exit;
2549
2550	/* Return if packet has options */
2551	if((header_ip->ip_hl << 2) != sizeof(*header_ip))
2552	    goto _exit;
2553
2554	/* Return if packet is fragmented */
2555	if(header_ip->ip_off & htons(IP_MF | IP_OFFMASK))
2556	    goto _exit;
2557
2558	/* Get TCP header */
2559	header_tcp = (struct tcphdr *)(header_ip + 1);
2560
2561	/* Return if not ACK or PUSH */
2562	if((header_tcp->th_flags & ~(TH_ACK | TH_PUSH)) != 0)
2563	    goto _exit;
2564
2565	/* Only timestamp option is handled */
2566	tcp_options = (header_tcp->th_off << 2) - sizeof(*header_tcp);
2567	tcp_hdr_len = sizeof(*header_tcp) + tcp_options;
2568	ptr = (u32 *)(header_tcp + 1);
2569	if(tcp_options != 0) {
2570	    if(__predict_false(tcp_options != TCPOLEN_TSTAMP_APPA) ||
2571	        (*ptr != ntohl(TCPOPT_NOP << 24 | TCPOPT_NOP << 16 |
2572	        TCPOPT_TIMESTAMP << 8 | TCPOLEN_TIMESTAMP))) {
2573	        goto _exit;
2574	    }
2575	}
2576
2577	/* Total length of packet (IP) */
2578	ip_len = ntohs(header_ip->ip_len);
2579
2580	/* TCP data size */
2581	tcp_data_len = ip_len - (header_tcp->th_off << 2) - sizeof(*header_ip);
2582
2583	/* If the frame is padded, trim it */
2584	tot_len = m_head->m_pkthdr.len;
2585	trim = tot_len - (ip_len + ETHER_HDR_LEN);
2586	if(trim != 0) {
2587	    if(trim < 0)
2588	        goto _exit;
2589	    m_adj(m_head, -trim);
2590	    tot_len = m_head->m_pkthdr.len;
2591	}
2592
2593	buffer_next = m_head;
2594	buffer_tail = NULL;
2595	while(buffer_next != NULL) {
2596	    buffer_tail = buffer_next;
2597	    buffer_next = buffer_tail->m_next;
2598	}
2599
2600	/* Total size of only headers */
2601	hlen = ip_len + ETHER_HDR_LEN - tcp_data_len;
2602
2603	/* Get sequence number */
2604	seq = ntohl(header_tcp->th_seq);
2605
2606	SLIST_FOREACH(lro_session, &lldev->lro_active, next) {
2607	    if(lro_session->source_port == header_tcp->th_sport &&
2608	        lro_session->dest_port == header_tcp->th_dport &&
2609	        lro_session->source_ip == header_ip->ip_src.s_addr &&
2610	        lro_session->dest_ip == header_ip->ip_dst.s_addr) {
2611
2612	        /* Unmatched sequence number, flush LRO session */
2613	        if(__predict_false(seq != lro_session->next_seq)) {
2614	            SLIST_REMOVE(&lldev->lro_active, lro_session,
2615	                xge_lro_entry_t, next);
2616	            xge_lro_flush(lldev, lro_session);
2617	            goto _exit;
2618	        }
2619
2620	        /* Handle timestamp option */
2621	        if(tcp_options) {
2622	            u32 tsval = ntohl(*(ptr + 1));
2623	            if(__predict_false(lro_session->tsval > tsval ||
2624	                *(ptr + 2) == 0)) {
2625	                goto _exit;
2626	            }
2627	            lro_session->tsval = tsval;
2628	            lro_session->tsecr = *(ptr + 2);
2629	        }
2630
2631	        lro_session->next_seq += tcp_data_len;
2632	        lro_session->ack_seq = header_tcp->th_ack;
2633	        lro_session->window = header_tcp->th_win;
2634
2635	        /* If TCP data/payload is of 0 size, free mbuf */
2636	        if(tcp_data_len == 0) {
2637	            m_freem(m_head);
2638	            status = XGE_HAL_OK;
2639	            goto _exit;
2640	        }
2641
2642	        lro_session->append_cnt++;
2643	        lro_session->len += tcp_data_len;
2644
2645	        /* Adjust mbuf so that m_data points to payload than headers */
2646	        m_adj(m_head, hlen);
2647
2648	        /* Append this packet to LRO accumulated packet */
2649	        lro_session->m_tail->m_next = m_head;
2650	        lro_session->m_tail = buffer_tail;
2651
2652	        /* Flush if LRO packet is exceeding maximum size */
2653	        if(lro_session->len >
2654	            (XGE_HAL_LRO_DEFAULT_FRM_LEN - lldev->ifnetp->if_mtu)) {
2655	            SLIST_REMOVE(&lldev->lro_active, lro_session,
2656	                xge_lro_entry_t, next);
2657	            xge_lro_flush(lldev, lro_session);
2658	        }
2659	        status = XGE_HAL_OK;
2660	        goto _exit;
2661	    }
2662	}
2663
2664	if(SLIST_EMPTY(&lldev->lro_free))
2665	    goto _exit;
2666
2667	/* Start a new LRO session */
2668	lro_session = SLIST_FIRST(&lldev->lro_free);
2669	SLIST_REMOVE_HEAD(&lldev->lro_free, next);
2670	SLIST_INSERT_HEAD(&lldev->lro_active, lro_session, next);
2671	lro_session->source_port = header_tcp->th_sport;
2672	lro_session->dest_port = header_tcp->th_dport;
2673	lro_session->source_ip = header_ip->ip_src.s_addr;
2674	lro_session->dest_ip = header_ip->ip_dst.s_addr;
2675	lro_session->next_seq = seq + tcp_data_len;
2676	lro_session->mss = tcp_data_len;
2677	lro_session->ack_seq = header_tcp->th_ack;
2678	lro_session->window = header_tcp->th_win;
2679
2680	lro_session->lro_header_ip = header_ip;
2681
2682	/* Handle timestamp option */
2683	if(tcp_options) {
2684	    lro_session->timestamp = 1;
2685	    lro_session->tsval = ntohl(*(ptr + 1));
2686	    lro_session->tsecr = *(ptr + 2);
2687	}
2688
2689	lro_session->len = tot_len;
2690	lro_session->m_head = m_head;
2691	lro_session->m_tail = buffer_tail;
2692	status = XGE_HAL_OK;
2693
2694_exit:
2695	return status;
2696}
2697
2698/**
2699 * xge_accumulate_large_rx
2700 * Accumulate packets to form a large LRO packet based on various conditions
2701 *
2702 * @lldev Per-adapter Data
2703 * @pkt Current packet
2704 * @pkt_length Packet Length
2705 * @rxd_priv Rx Descriptor Private Data
2706 */
2707void
2708xge_accumulate_large_rx(xge_lldev_t *lldev, struct mbuf *pkt, int pkt_length,
2709	xge_rx_priv_t *rxd_priv)
2710{
2711	if(xge_lro_accumulate(lldev, pkt) != XGE_HAL_OK) {
2712	    bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map,
2713	        BUS_DMASYNC_POSTREAD);
2714	    (*lldev->ifnetp->if_input)(lldev->ifnetp, pkt);
2715	}
2716}
2717
2718/**
2719 * xge_rx_compl
2720 * If the interrupt is due to received frame (Rx completion), send it up
2721 *
2722 * @channelh Ring Channel Handle
2723 * @dtr Current Descriptor
2724 * @t_code Transfer Code indicating success or error
2725 * @userdata Per-adapter Data
2726 *
2727 * Returns XGE_HAL_OK or HAL error enums
2728 */
2729xge_hal_status_e
2730xge_rx_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
2731	void *userdata)
2732{
2733	struct ifnet       *ifnetp;
2734	xge_rx_priv_t      *rxd_priv = NULL;
2735	mbuf_t              mbuf_up  = NULL;
2736	xge_hal_status_e    status   = XGE_HAL_OK;
2737	xge_hal_dtr_info_t  ext_info;
2738	int                 index;
2739	u16                 vlan_tag;
2740
2741	/*get the user data portion*/
2742	xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
2743	if(!lldev) {
2744	    XGE_EXIT_ON_ERR("Failed to get user data", _exit, XGE_HAL_FAIL);
2745	}
2746
2747	XGE_DRV_STATS(rx_completions);
2748
2749	/* get the interface pointer */
2750	ifnetp = lldev->ifnetp;
2751
2752	do {
2753	    XGE_DRV_STATS(rx_desc_compl);
2754
2755	    if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
2756	        status = XGE_HAL_FAIL;
2757	        goto _exit;
2758	    }
2759
2760	    if(t_code) {
2761	        xge_trace(XGE_TRACE, "Packet dropped because of %d", t_code);
2762	        XGE_DRV_STATS(rx_tcode);
2763	        xge_hal_device_handle_tcode(channelh, dtr, t_code);
2764	        xge_hal_ring_dtr_post(channelh,dtr);
2765	        continue;
2766	    }
2767
2768	    /* Get the private data for this descriptor*/
2769	    rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh,
2770	        dtr);
2771	    if(!rxd_priv) {
2772	        XGE_EXIT_ON_ERR("Failed to get descriptor private data", _exit,
2773	            XGE_HAL_FAIL);
2774	    }
2775
2776	    /*
2777	     * Prepare one buffer to send it to upper layer -- since the upper
2778	     * layer frees the buffer do not use rxd_priv->buffer. Meanwhile
2779	     * prepare a new buffer, do mapping, use it in the current
2780	     * descriptor and post descriptor back to ring channel
2781	     */
2782	    mbuf_up = rxd_priv->bufferArray[0];
2783
2784	    /* Gets details of mbuf i.e., packet length */
2785	    xge_ring_dtr_get(mbuf_up, channelh, dtr, lldev, rxd_priv);
2786
2787	    status =
2788	        (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ?
2789	        xge_get_buf(dtr, rxd_priv, lldev, 0) :
2790	        xge_get_buf_3b_5b(dtr, rxd_priv, lldev);
2791
2792	    if(status != XGE_HAL_OK) {
2793	        xge_trace(XGE_ERR, "No memory");
2794	        XGE_DRV_STATS(rx_no_buf);
2795
2796	        /*
2797	         * Unable to allocate buffer. Instead of discarding, post
2798	         * descriptor back to channel for future processing of same
2799	         * packet.
2800	         */
2801	        xge_hal_ring_dtr_post(channelh, dtr);
2802	        continue;
2803	    }
2804
2805	    /* Get the extended information */
2806	    xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
2807
2808	    /*
2809	     * As we have allocated a new mbuf for this descriptor, post this
2810	     * descriptor with new mbuf back to ring channel
2811	     */
2812	    vlan_tag = ext_info.vlan;
2813	    xge_hal_ring_dtr_post(channelh, dtr);
2814	    if ((!(ext_info.proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) &&
2815	        (ext_info.proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) &&
2816	        (ext_info.l3_cksum == XGE_HAL_L3_CKSUM_OK) &&
2817	        (ext_info.l4_cksum == XGE_HAL_L4_CKSUM_OK))) {
2818
2819	        /* set Checksum Flag */
2820	        xge_set_mbuf_cflags(mbuf_up);
2821
2822	        if(lldev->enabled_lro) {
2823	            xge_accumulate_large_rx(lldev, mbuf_up, mbuf_up->m_len,
2824	                rxd_priv);
2825	        }
2826	        else {
2827	            /* Post-Read sync for buffers*/
2828	            for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2829	                bus_dmamap_sync(lldev->dma_tag_rx,
2830	                    rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
2831	            }
2832	            (*ifnetp->if_input)(ifnetp, mbuf_up);
2833	        }
2834	    }
2835	    else {
2836	        /*
2837	         * Packet with erroneous checksum , let the upper layer deal
2838	         * with it
2839	         */
2840
2841	        /* Post-Read sync for buffers*/
2842	        for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2843	            bus_dmamap_sync(lldev->dma_tag_rx,
2844	                 rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
2845	        }
2846
2847	        if(vlan_tag) {
2848	            mbuf_up->m_pkthdr.ether_vtag = vlan_tag;
2849	            mbuf_up->m_flags |= M_VLANTAG;
2850	        }
2851
2852	        if(lldev->enabled_lro)
2853	            xge_lro_flush_sessions(lldev);
2854
2855	        (*ifnetp->if_input)(ifnetp, mbuf_up);
2856	    }
2857	} while(xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code)
2858	    == XGE_HAL_OK);
2859
2860	if(lldev->enabled_lro)
2861	    xge_lro_flush_sessions(lldev);
2862
2863_exit:
2864	return status;
2865}
2866
2867/**
2868 * xge_ring_dtr_get
2869 * Get descriptors
2870 *
2871 * @mbuf_up Packet to send up
2872 * @channelh Ring Channel Handle
2873 * @dtr Descriptor
2874 * @lldev Per-adapter Data
2875 * @rxd_priv Rx Descriptor Private Data
2876 *
2877 * Returns XGE_HAL_OK or HAL error enums
2878 */
2879int
2880xge_ring_dtr_get(mbuf_t mbuf_up, xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
2881	xge_lldev_t *lldev, xge_rx_priv_t *rxd_priv)
2882{
2883	mbuf_t           m;
2884	int              pkt_length[5]={0,0}, pkt_len=0;
2885	dma_addr_t       dma_data[5];
2886	int              index;
2887
2888	m = mbuf_up;
2889	pkt_len = 0;
2890
2891	if(lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
2892	    xge_os_memzero(pkt_length, sizeof(pkt_length));
2893
2894	    /*
2895	     * Retrieve data of interest from the completed descriptor -- This
2896	     * returns the packet length
2897	     */
2898	    if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
2899	        xge_hal_ring_dtr_5b_get(channelh, dtr, dma_data, pkt_length);
2900	    }
2901	    else {
2902	        xge_hal_ring_dtr_3b_get(channelh, dtr, dma_data, pkt_length);
2903	    }
2904
2905	    for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2906	        m->m_len  = pkt_length[index];
2907
2908	        if(index < (lldev->rxd_mbuf_cnt-1)) {
2909	            m->m_next = rxd_priv->bufferArray[index + 1];
2910	            m = m->m_next;
2911	        }
2912	        else {
2913	            m->m_next = NULL;
2914	        }
2915	        pkt_len+=pkt_length[index];
2916	    }
2917
2918	    /*
2919	     * Since 2 buffer mode is an exceptional case where data is in 3rd
2920	     * buffer but not in 2nd buffer
2921	     */
2922	    if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
2923	        m->m_len = pkt_length[2];
2924	        pkt_len+=pkt_length[2];
2925	    }
2926
2927	    /*
2928	     * Update length of newly created buffer to be sent up with packet
2929	     * length
2930	     */
2931	    mbuf_up->m_pkthdr.len = pkt_len;
2932	}
2933	else {
2934	    /*
2935	     * Retrieve data of interest from the completed descriptor -- This
2936	     * returns the packet length
2937	     */
2938	    xge_hal_ring_dtr_1b_get(channelh, dtr,&dma_data[0], &pkt_length[0]);
2939
2940	    /*
2941	     * Update length of newly created buffer to be sent up with packet
2942	     * length
2943	     */
2944	    mbuf_up->m_len =  mbuf_up->m_pkthdr.len = pkt_length[0];
2945	}
2946
2947	return XGE_HAL_OK;
2948}
2949
2950/**
2951 * xge_flush_txds
2952 * Flush Tx descriptors
2953 *
2954 * @channelh Channel handle
2955 */
2956static void inline
2957xge_flush_txds(xge_hal_channel_h channelh)
2958{
2959	xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
2960	xge_hal_dtr_h tx_dtr;
2961	xge_tx_priv_t *tx_priv;
2962	u8 t_code;
2963
2964	while(xge_hal_fifo_dtr_next_completed(channelh, &tx_dtr, &t_code)
2965	    == XGE_HAL_OK) {
2966	    XGE_DRV_STATS(tx_desc_compl);
2967	    if(t_code) {
2968	        xge_trace(XGE_TRACE, "Tx descriptor with t_code %d", t_code);
2969	        XGE_DRV_STATS(tx_tcode);
2970	        xge_hal_device_handle_tcode(channelh, tx_dtr, t_code);
2971	    }
2972
2973	    tx_priv = xge_hal_fifo_dtr_private(tx_dtr);
2974	    bus_dmamap_unload(lldev->dma_tag_tx, tx_priv->dma_map);
2975	    m_freem(tx_priv->buffer);
2976	    tx_priv->buffer = NULL;
2977	    xge_hal_fifo_dtr_free(channelh, tx_dtr);
2978	}
2979}
2980
2981/**
2982 * xge_send
2983 * Transmit function
2984 *
2985 * @ifnetp Interface Handle
2986 */
2987void
2988xge_send(struct ifnet *ifnetp)
2989{
2990	int qindex = 0;
2991	xge_lldev_t *lldev = ifnetp->if_softc;
2992
2993	for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
2994	    if(mtx_trylock(&lldev->mtx_tx[qindex]) == 0) {
2995	        XGE_DRV_STATS(tx_lock_fail);
2996	        break;
2997	    }
2998	    xge_send_locked(ifnetp, qindex);
2999	    mtx_unlock(&lldev->mtx_tx[qindex]);
3000	}
3001}
3002
3003static void inline
3004xge_send_locked(struct ifnet *ifnetp, int qindex)
3005{
3006	xge_hal_dtr_h            dtr;
3007	static bus_dma_segment_t segs[XGE_MAX_SEGS];
3008	xge_hal_status_e         status;
3009	unsigned int             max_fragments;
3010	xge_lldev_t              *lldev          = ifnetp->if_softc;
3011	xge_hal_channel_h        channelh        = lldev->fifo_channel[qindex];
3012	mbuf_t                   m_head          = NULL;
3013	mbuf_t                   m_buf           = NULL;
3014	xge_tx_priv_t            *ll_tx_priv     = NULL;
3015	register unsigned int    count           = 0;
3016	unsigned int             nsegs           = 0;
3017	u16                      vlan_tag;
3018
3019	max_fragments = ((xge_hal_fifo_t *)channelh)->config->max_frags;
3020
3021	/* If device is not initialized, return */
3022	if((!lldev->initialized) || (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)))
3023	    return;
3024
3025	XGE_DRV_STATS(tx_calls);
3026
3027	/*
3028	 * This loop will be executed for each packet in the kernel maintained
3029	 * queue -- each packet can be with fragments as an mbuf chain
3030	 */
3031	for(;;) {
3032	    IF_DEQUEUE(&ifnetp->if_snd, m_head);
3033	    if (m_head == NULL) {
3034		ifnetp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
3035		return;
3036	    }
3037
3038	    for(m_buf = m_head; m_buf != NULL; m_buf = m_buf->m_next) {
3039	        if(m_buf->m_len) count += 1;
3040	    }
3041
3042	    if(count >= max_fragments) {
3043	        m_buf = m_defrag(m_head, M_NOWAIT);
3044	        if(m_buf != NULL) m_head = m_buf;
3045	        XGE_DRV_STATS(tx_defrag);
3046	    }
3047
3048	    /* Reserve descriptors */
3049	    status = xge_hal_fifo_dtr_reserve(channelh, &dtr);
3050	    if(status != XGE_HAL_OK) {
3051	        XGE_DRV_STATS(tx_no_txd);
3052	        xge_flush_txds(channelh);
3053		break;
3054	    }
3055
3056	    vlan_tag =
3057	        (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0;
3058	    xge_hal_fifo_dtr_vlan_set(dtr, vlan_tag);
3059
3060	    /* Update Tx private structure for this descriptor */
3061	    ll_tx_priv         = xge_hal_fifo_dtr_private(dtr);
3062	    ll_tx_priv->buffer = m_head;
3063
3064	    /*
3065	     * Do mapping -- Required DMA tag has been created in xge_init
3066	     * function and DMA maps have already been created in the
3067	     * xgell_tx_replenish function.
3068	     * Returns number of segments through nsegs
3069	     */
3070	    if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_tx,
3071	        ll_tx_priv->dma_map, m_head, segs, &nsegs, BUS_DMA_NOWAIT)) {
3072	        xge_trace(XGE_TRACE, "DMA map load failed");
3073	        XGE_DRV_STATS(tx_map_fail);
3074		break;
3075	    }
3076
3077	    if(lldev->driver_stats.tx_max_frags < nsegs)
3078	        lldev->driver_stats.tx_max_frags = nsegs;
3079
3080	    /* Set descriptor buffer for header and each fragment/segment */
3081	    count = 0;
3082	    do {
3083	        xge_hal_fifo_dtr_buffer_set(channelh, dtr, count,
3084	            (dma_addr_t)htole64(segs[count].ds_addr),
3085	            segs[count].ds_len);
3086	        count++;
3087	    } while(count < nsegs);
3088
3089	    /* Pre-write Sync of mapping */
3090	    bus_dmamap_sync(lldev->dma_tag_tx, ll_tx_priv->dma_map,
3091	        BUS_DMASYNC_PREWRITE);
3092
3093	    if((lldev->enabled_tso) &&
3094	        (m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3095	        XGE_DRV_STATS(tx_tso);
3096	        xge_hal_fifo_dtr_mss_set(dtr, m_head->m_pkthdr.tso_segsz);
3097	    }
3098
3099	    /* Checksum */
3100	    if(ifnetp->if_hwassist > 0) {
3101	        xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_IPV4_EN
3102	            | XGE_HAL_TXD_TX_CKO_TCP_EN | XGE_HAL_TXD_TX_CKO_UDP_EN);
3103	    }
3104
3105	    /* Post descriptor to FIFO channel */
3106	    xge_hal_fifo_dtr_post(channelh, dtr);
3107	    XGE_DRV_STATS(tx_posted);
3108
3109	    /* Send the same copy of mbuf packet to BPF (Berkely Packet Filter)
3110	     * listener so that we can use tools like tcpdump */
3111	    ETHER_BPF_MTAP(ifnetp, m_head);
3112	}
3113
3114	/* Prepend the packet back to queue */
3115	IF_PREPEND(&ifnetp->if_snd, m_head);
3116	ifnetp->if_drv_flags |= IFF_DRV_OACTIVE;
3117
3118	xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
3119	    XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh);
3120	XGE_DRV_STATS(tx_again);
3121}
3122
3123/**
3124 * xge_get_buf
3125 * Allocates new mbufs to be placed into descriptors
3126 *
3127 * @dtrh Descriptor Handle
3128 * @rxd_priv Rx Descriptor Private Data
3129 * @lldev Per-adapter Data
3130 * @index Buffer Index (if multi-buffer mode)
3131 *
3132 * Returns XGE_HAL_OK or HAL error enums
3133 */
3134int
3135xge_get_buf(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv,
3136	xge_lldev_t *lldev, int index)
3137{
3138	register mbuf_t mp            = NULL;
3139	struct          ifnet *ifnetp = lldev->ifnetp;
3140	int             status        = XGE_HAL_OK;
3141	int             buffer_size = 0, cluster_size = 0, count;
3142	bus_dmamap_t    map = rxd_priv->dmainfo[index].dma_map;
3143	bus_dma_segment_t segs[3];
3144
3145	buffer_size = (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ?
3146	    ifnetp->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE :
3147	    lldev->rxd_mbuf_len[index];
3148
3149	if(buffer_size <= MCLBYTES) {
3150	    cluster_size = MCLBYTES;
3151	    mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3152	}
3153	else {
3154	    cluster_size = MJUMPAGESIZE;
3155	    if((lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5) &&
3156	        (buffer_size > MJUMPAGESIZE)) {
3157	        cluster_size = MJUM9BYTES;
3158	    }
3159	    mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, cluster_size);
3160	}
3161	if(!mp) {
3162	    xge_trace(XGE_ERR, "Out of memory to allocate mbuf");
3163	    status = XGE_HAL_FAIL;
3164	    goto getbuf_out;
3165	}
3166
3167	/* Update mbuf's length, packet length and receive interface */
3168	mp->m_len = mp->m_pkthdr.len = buffer_size;
3169	mp->m_pkthdr.rcvif = ifnetp;
3170
3171	/* Load DMA map */
3172	if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_rx, lldev->extra_dma_map,
3173	    mp, segs, &count, BUS_DMA_NOWAIT)) {
3174	    XGE_DRV_STATS(rx_map_fail);
3175	    m_freem(mp);
3176	    XGE_EXIT_ON_ERR("DMA map load failed", getbuf_out, XGE_HAL_FAIL);
3177	}
3178
3179	/* Update descriptor private data */
3180	rxd_priv->bufferArray[index]         = mp;
3181	rxd_priv->dmainfo[index].dma_phyaddr = htole64(segs->ds_addr);
3182	rxd_priv->dmainfo[index].dma_map     = lldev->extra_dma_map;
3183	lldev->extra_dma_map = map;
3184
3185	/* Pre-Read/Write sync */
3186	bus_dmamap_sync(lldev->dma_tag_rx, map, BUS_DMASYNC_POSTREAD);
3187
3188	/* Unload DMA map of mbuf in current descriptor */
3189	bus_dmamap_unload(lldev->dma_tag_rx, map);
3190
3191	/* Set descriptor buffer */
3192	if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3193	    xge_hal_ring_dtr_1b_set(dtrh, rxd_priv->dmainfo[0].dma_phyaddr,
3194	        cluster_size);
3195	}
3196
3197getbuf_out:
3198	return status;
3199}
3200
3201/**
3202 * xge_get_buf_3b_5b
3203 * Allocates new mbufs to be placed into descriptors (in multi-buffer modes)
3204 *
3205 * @dtrh Descriptor Handle
3206 * @rxd_priv Rx Descriptor Private Data
3207 * @lldev Per-adapter Data
3208 *
3209 * Returns XGE_HAL_OK or HAL error enums
3210 */
3211int
3212xge_get_buf_3b_5b(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv,
3213	xge_lldev_t *lldev)
3214{
3215	bus_addr_t  dma_pointers[5];
3216	int         dma_sizes[5];
3217	int         status = XGE_HAL_OK, index;
3218	int         newindex = 0;
3219
3220	for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
3221	    status = xge_get_buf(dtrh, rxd_priv, lldev, index);
3222	    if(status != XGE_HAL_OK) {
3223	        for(newindex = 0; newindex < index; newindex++) {
3224	            m_freem(rxd_priv->bufferArray[newindex]);
3225	        }
3226	        XGE_EXIT_ON_ERR("mbuf allocation failed", _exit, status);
3227	    }
3228	}
3229
3230	for(index = 0; index < lldev->buffer_mode; index++) {
3231	    if(lldev->rxd_mbuf_len[index] != 0) {
3232	        dma_pointers[index] = rxd_priv->dmainfo[index].dma_phyaddr;
3233	        dma_sizes[index]    = lldev->rxd_mbuf_len[index];
3234	    }
3235	    else {
3236	        dma_pointers[index] = rxd_priv->dmainfo[index-1].dma_phyaddr;
3237	        dma_sizes[index]    = 1;
3238	    }
3239	}
3240
3241	/* Assigning second buffer to third pointer in 2 buffer mode */
3242	if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
3243	    dma_pointers[2] = dma_pointers[1];
3244	    dma_sizes[2]    = dma_sizes[1];
3245	    dma_sizes[1]    = 1;
3246	}
3247
3248	if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
3249	    xge_hal_ring_dtr_5b_set(dtrh, dma_pointers, dma_sizes);
3250	}
3251	else {
3252	    xge_hal_ring_dtr_3b_set(dtrh, dma_pointers, dma_sizes);
3253	}
3254
3255_exit:
3256	return status;
3257}
3258
3259/**
3260 * xge_tx_compl
3261 * If the interrupt is due to Tx completion, free the sent buffer
3262 *
3263 * @channelh Channel Handle
3264 * @dtr Descriptor
3265 * @t_code Transfer Code indicating success or error
3266 * @userdata Per-adapter Data
3267 *
3268 * Returns XGE_HAL_OK or HAL error enum
3269 */
3270xge_hal_status_e
3271xge_tx_compl(xge_hal_channel_h channelh,
3272	xge_hal_dtr_h dtr, u8 t_code, void *userdata)
3273{
3274	xge_tx_priv_t *ll_tx_priv = NULL;
3275	xge_lldev_t   *lldev  = (xge_lldev_t *)userdata;
3276	struct ifnet  *ifnetp = lldev->ifnetp;
3277	mbuf_t         m_buffer = NULL;
3278	int            qindex   = xge_hal_channel_id(channelh);
3279
3280	mtx_lock(&lldev->mtx_tx[qindex]);
3281
3282	XGE_DRV_STATS(tx_completions);
3283
3284	/*
3285	 * For each completed descriptor: Get private structure, free buffer,
3286	 * do unmapping, and free descriptor
3287	 */
3288	do {
3289	    XGE_DRV_STATS(tx_desc_compl);
3290
3291	    if(t_code) {
3292	        XGE_DRV_STATS(tx_tcode);
3293	        xge_trace(XGE_TRACE, "t_code %d", t_code);
3294	        xge_hal_device_handle_tcode(channelh, dtr, t_code);
3295	    }
3296
3297	    ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3298	    m_buffer   = ll_tx_priv->buffer;
3299	    bus_dmamap_unload(lldev->dma_tag_tx, ll_tx_priv->dma_map);
3300	    m_freem(m_buffer);
3301	    ll_tx_priv->buffer = NULL;
3302	    xge_hal_fifo_dtr_free(channelh, dtr);
3303	} while(xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code)
3304	    == XGE_HAL_OK);
3305	xge_send_locked(ifnetp, qindex);
3306	ifnetp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3307
3308	mtx_unlock(&lldev->mtx_tx[qindex]);
3309
3310	return XGE_HAL_OK;
3311}
3312
3313/**
3314 * xge_tx_initial_replenish
3315 * Initially allocate buffers and set them into descriptors for later use
3316 *
3317 * @channelh Tx Channel Handle
3318 * @dtrh Descriptor Handle
3319 * @index
3320 * @userdata Per-adapter Data
3321 * @reopen Channel open/reopen option
3322 *
3323 * Returns XGE_HAL_OK or HAL error enums
3324 */
3325xge_hal_status_e
3326xge_tx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3327	int index, void *userdata, xge_hal_channel_reopen_e reopen)
3328{
3329	xge_tx_priv_t *txd_priv = NULL;
3330	int            status   = XGE_HAL_OK;
3331
3332	/* Get the user data portion from channel handle */
3333	xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
3334	if(lldev == NULL) {
3335	    XGE_EXIT_ON_ERR("Failed to get user data from channel", txinit_out,
3336	        XGE_HAL_FAIL);
3337	}
3338
3339	/* Get the private data */
3340	txd_priv = (xge_tx_priv_t *) xge_hal_fifo_dtr_private(dtrh);
3341	if(txd_priv == NULL) {
3342	    XGE_EXIT_ON_ERR("Failed to get descriptor private data", txinit_out,
3343	        XGE_HAL_FAIL);
3344	}
3345
3346	/* Create DMA map for this descriptor */
3347	if(bus_dmamap_create(lldev->dma_tag_tx, BUS_DMA_NOWAIT,
3348	    &txd_priv->dma_map)) {
3349	    XGE_EXIT_ON_ERR("DMA map creation for Tx descriptor failed",
3350	        txinit_out, XGE_HAL_FAIL);
3351	}
3352
3353txinit_out:
3354	return status;
3355}
3356
3357/**
3358 * xge_rx_initial_replenish
3359 * Initially allocate buffers and set them into descriptors for later use
3360 *
3361 * @channelh Tx Channel Handle
3362 * @dtrh Descriptor Handle
3363 * @index Ring Index
3364 * @userdata Per-adapter Data
3365 * @reopen Channel open/reopen option
3366 *
3367 * Returns XGE_HAL_OK or HAL error enums
3368 */
3369xge_hal_status_e
3370xge_rx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3371	int index, void *userdata, xge_hal_channel_reopen_e reopen)
3372{
3373	xge_rx_priv_t  *rxd_priv = NULL;
3374	int             status   = XGE_HAL_OK;
3375	int             index1 = 0, index2 = 0;
3376
3377	/* Get the user data portion from channel handle */
3378	xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
3379	if(lldev == NULL) {
3380	    XGE_EXIT_ON_ERR("Failed to get user data from channel", rxinit_out,
3381	        XGE_HAL_FAIL);
3382	}
3383
3384	/* Get the private data */
3385	rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
3386	if(rxd_priv == NULL) {
3387	    XGE_EXIT_ON_ERR("Failed to get descriptor private data", rxinit_out,
3388	        XGE_HAL_FAIL);
3389	}
3390
3391	rxd_priv->bufferArray = xge_os_malloc(NULL,
3392	        (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3393
3394	if(rxd_priv->bufferArray == NULL) {
3395	    XGE_EXIT_ON_ERR("Failed to allocate Rxd private", rxinit_out,
3396	        XGE_HAL_FAIL);
3397	}
3398
3399	if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3400	    /* Create DMA map for these descriptors*/
3401	    if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT,
3402	        &rxd_priv->dmainfo[0].dma_map)) {
3403	        XGE_EXIT_ON_ERR("DMA map creation for Rx descriptor failed",
3404	            rxinit_err_out, XGE_HAL_FAIL);
3405	    }
3406	    /* Get a buffer, attach it to this descriptor */
3407	    status = xge_get_buf(dtrh, rxd_priv, lldev, 0);
3408	}
3409	else {
3410	    for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
3411	        /* Create DMA map for this descriptor */
3412	        if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT ,
3413	            &rxd_priv->dmainfo[index1].dma_map)) {
3414	            for(index2 = index1 - 1; index2 >= 0; index2--) {
3415	                bus_dmamap_destroy(lldev->dma_tag_rx,
3416	                    rxd_priv->dmainfo[index2].dma_map);
3417	            }
3418	            XGE_EXIT_ON_ERR(
3419	                "Jumbo DMA map creation for Rx descriptor failed",
3420	                rxinit_err_out, XGE_HAL_FAIL);
3421	        }
3422	    }
3423	    status = xge_get_buf_3b_5b(dtrh, rxd_priv, lldev);
3424	}
3425
3426	if(status != XGE_HAL_OK) {
3427	    for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
3428	        bus_dmamap_destroy(lldev->dma_tag_rx,
3429	            rxd_priv->dmainfo[index1].dma_map);
3430	    }
3431	    goto rxinit_err_out;
3432	}
3433	else {
3434	    goto rxinit_out;
3435	}
3436
3437rxinit_err_out:
3438	xge_os_free(NULL, rxd_priv->bufferArray,
3439	    (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3440rxinit_out:
3441	return status;
3442}
3443
3444/**
3445 * xge_rx_term
3446 * During unload terminate and free all descriptors
3447 *
3448 * @channelh Rx Channel Handle
3449 * @dtrh Rx Descriptor Handle
3450 * @state Descriptor State
3451 * @userdata Per-adapter Data
3452 * @reopen Channel open/reopen option
3453 */
3454void
3455xge_rx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3456	xge_hal_dtr_state_e state, void *userdata,
3457	xge_hal_channel_reopen_e reopen)
3458{
3459	xge_rx_priv_t *rxd_priv = NULL;
3460	xge_lldev_t   *lldev    = NULL;
3461	int            index = 0;
3462
3463	/* Descriptor state is not "Posted" */
3464	if(state != XGE_HAL_DTR_STATE_POSTED) goto rxterm_out;
3465
3466	/* Get the user data portion */
3467	lldev = xge_hal_channel_userdata(channelh);
3468
3469	/* Get the private data */
3470	rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
3471
3472	for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
3473	    if(rxd_priv->dmainfo[index].dma_map != NULL) {
3474	        bus_dmamap_sync(lldev->dma_tag_rx,
3475	            rxd_priv->dmainfo[index].dma_map, BUS_DMASYNC_POSTREAD);
3476	        bus_dmamap_unload(lldev->dma_tag_rx,
3477	            rxd_priv->dmainfo[index].dma_map);
3478	        if(rxd_priv->bufferArray[index] != NULL)
3479	            m_free(rxd_priv->bufferArray[index]);
3480	        bus_dmamap_destroy(lldev->dma_tag_rx,
3481	            rxd_priv->dmainfo[index].dma_map);
3482	    }
3483	}
3484	xge_os_free(NULL, rxd_priv->bufferArray,
3485	    (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3486
3487	/* Free the descriptor */
3488	xge_hal_ring_dtr_free(channelh, dtrh);
3489
3490rxterm_out:
3491	return;
3492}
3493
3494/**
3495 * xge_tx_term
3496 * During unload terminate and free all descriptors
3497 *
3498 * @channelh Rx Channel Handle
3499 * @dtrh Rx Descriptor Handle
3500 * @state Descriptor State
3501 * @userdata Per-adapter Data
3502 * @reopen Channel open/reopen option
3503 */
3504void
3505xge_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
3506	xge_hal_dtr_state_e state, void *userdata,
3507	xge_hal_channel_reopen_e reopen)
3508{
3509	xge_tx_priv_t *ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3510	xge_lldev_t   *lldev      = (xge_lldev_t *)userdata;
3511
3512	/* Destroy DMA map */
3513	bus_dmamap_destroy(lldev->dma_tag_tx, ll_tx_priv->dma_map);
3514}
3515
3516/**
3517 * xge_methods
3518 *
3519 * FreeBSD device interface entry points
3520 */
3521static device_method_t xge_methods[] = {
3522	DEVMETHOD(device_probe,     xge_probe),
3523	DEVMETHOD(device_attach,    xge_attach),
3524	DEVMETHOD(device_detach,    xge_detach),
3525	DEVMETHOD(device_shutdown,  xge_shutdown),
3526
3527	DEVMETHOD_END
3528};
3529
3530static driver_t xge_driver = {
3531	"nxge",
3532	xge_methods,
3533	sizeof(xge_lldev_t),
3534};
3535static devclass_t xge_devclass;
3536DRIVER_MODULE(nxge, pci, xge_driver, xge_devclass, 0, 0);
3537
3538