Deleted Added
full compact
if_nxge.c (199554) if_nxge.c (207554)
1/*-
2 * Copyright (c) 2002-2007 Neterion, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
1/*-
2 * Copyright (c) 2002-2007 Neterion, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/dev/nxge/if_nxge.c 199554 2009-11-19 21:47:54Z jhb $
26 * $FreeBSD: head/sys/dev/nxge/if_nxge.c 207554 2010-05-03 07:32:50Z sobomax $
27 */
28
29#include <dev/nxge/if_nxge.h>
30#include <dev/nxge/xge-osdep.h>
31#include <net/if_arp.h>
32#include <sys/types.h>
33#include <net/if.h>
34#include <net/if_vlan_var.h>
35
36int copyright_print = 0;
37int hal_driver_init_count = 0;
38size_t size = sizeof(int);
39
40static void inline xge_flush_txds(xge_hal_channel_h);
41
42/**
43 * xge_probe
44 * Probes for Xframe devices
45 *
46 * @dev Device handle
47 *
48 * Returns
49 * BUS_PROBE_DEFAULT if device is supported
50 * ENXIO if device is not supported
51 */
52int
53xge_probe(device_t dev)
54{
55 int devid = pci_get_device(dev);
56 int vendorid = pci_get_vendor(dev);
57 int retValue = ENXIO;
58
59 if(vendorid == XGE_PCI_VENDOR_ID) {
60 if((devid == XGE_PCI_DEVICE_ID_XENA_2) ||
61 (devid == XGE_PCI_DEVICE_ID_HERC_2)) {
62 if(!copyright_print) {
63 xge_os_printf(XGE_COPYRIGHT);
64 copyright_print = 1;
65 }
66 device_set_desc_copy(dev,
67 "Neterion Xframe 10 Gigabit Ethernet Adapter");
68 retValue = BUS_PROBE_DEFAULT;
69 }
70 }
71
72 return retValue;
73}
74
75/**
76 * xge_init_params
77 * Sets HAL parameter values (from kenv).
78 *
79 * @dconfig Device Configuration
80 * @dev Device Handle
81 */
82void
83xge_init_params(xge_hal_device_config_t *dconfig, device_t dev)
84{
85 int qindex, tindex, revision;
86 device_t checkdev;
87 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
88
89 dconfig->mtu = XGE_DEFAULT_INITIAL_MTU;
90 dconfig->pci_freq_mherz = XGE_DEFAULT_USER_HARDCODED;
91 dconfig->device_poll_millis = XGE_HAL_DEFAULT_DEVICE_POLL_MILLIS;
92 dconfig->link_stability_period = XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD;
93 dconfig->mac.rmac_bcast_en = XGE_DEFAULT_MAC_RMAC_BCAST_EN;
94 dconfig->fifo.alignment_size = XGE_DEFAULT_FIFO_ALIGNMENT_SIZE;
95
96 XGE_GET_PARAM("hw.xge.enable_tso", (*lldev), enabled_tso,
97 XGE_DEFAULT_ENABLED_TSO);
98 XGE_GET_PARAM("hw.xge.enable_lro", (*lldev), enabled_lro,
99 XGE_DEFAULT_ENABLED_LRO);
100 XGE_GET_PARAM("hw.xge.enable_msi", (*lldev), enabled_msi,
101 XGE_DEFAULT_ENABLED_MSI);
102
103 XGE_GET_PARAM("hw.xge.latency_timer", (*dconfig), latency_timer,
104 XGE_DEFAULT_LATENCY_TIMER);
105 XGE_GET_PARAM("hw.xge.max_splits_trans", (*dconfig), max_splits_trans,
106 XGE_DEFAULT_MAX_SPLITS_TRANS);
107 XGE_GET_PARAM("hw.xge.mmrb_count", (*dconfig), mmrb_count,
108 XGE_DEFAULT_MMRB_COUNT);
109 XGE_GET_PARAM("hw.xge.shared_splits", (*dconfig), shared_splits,
110 XGE_DEFAULT_SHARED_SPLITS);
111 XGE_GET_PARAM("hw.xge.isr_polling_cnt", (*dconfig), isr_polling_cnt,
112 XGE_DEFAULT_ISR_POLLING_CNT);
113 XGE_GET_PARAM("hw.xge.stats_refresh_time_sec", (*dconfig),
114 stats_refresh_time_sec, XGE_DEFAULT_STATS_REFRESH_TIME_SEC);
115
116 XGE_GET_PARAM_MAC("hw.xge.mac_tmac_util_period", tmac_util_period,
117 XGE_DEFAULT_MAC_TMAC_UTIL_PERIOD);
118 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_util_period", rmac_util_period,
119 XGE_DEFAULT_MAC_RMAC_UTIL_PERIOD);
120 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_gen_en", rmac_pause_gen_en,
121 XGE_DEFAULT_MAC_RMAC_PAUSE_GEN_EN);
122 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_rcv_en", rmac_pause_rcv_en,
123 XGE_DEFAULT_MAC_RMAC_PAUSE_RCV_EN);
124 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_time", rmac_pause_time,
125 XGE_DEFAULT_MAC_RMAC_PAUSE_TIME);
126 XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q0q3",
127 mc_pause_threshold_q0q3, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q0Q3);
128 XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q4q7",
129 mc_pause_threshold_q4q7, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q4Q7);
130
131 XGE_GET_PARAM_FIFO("hw.xge.fifo_memblock_size", memblock_size,
132 XGE_DEFAULT_FIFO_MEMBLOCK_SIZE);
133 XGE_GET_PARAM_FIFO("hw.xge.fifo_reserve_threshold", reserve_threshold,
134 XGE_DEFAULT_FIFO_RESERVE_THRESHOLD);
135 XGE_GET_PARAM_FIFO("hw.xge.fifo_max_frags", max_frags,
136 XGE_DEFAULT_FIFO_MAX_FRAGS);
137
138 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
139 XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_intr", intr, qindex,
140 XGE_DEFAULT_FIFO_QUEUE_INTR);
141 XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_max", max, qindex,
142 XGE_DEFAULT_FIFO_QUEUE_MAX);
143 XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_initial", initial,
144 qindex, XGE_DEFAULT_FIFO_QUEUE_INITIAL);
145
146 for (tindex = 0; tindex < XGE_HAL_MAX_FIFO_TTI_NUM; tindex++) {
147 dconfig->fifo.queue[qindex].tti[tindex].enabled = 1;
148 dconfig->fifo.queue[qindex].configured = 1;
149
150 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_a",
151 urange_a, qindex, tindex,
152 XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_A);
153 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_b",
154 urange_b, qindex, tindex,
155 XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_B);
156 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_c",
157 urange_c, qindex, tindex,
158 XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_C);
159 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_a",
160 ufc_a, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_A);
161 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_b",
162 ufc_b, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_B);
163 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_c",
164 ufc_c, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_C);
165 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_d",
166 ufc_d, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_D);
167 XGE_GET_PARAM_FIFO_QUEUE_TTI(
168 "hw.xge.fifo_queue_tti_timer_ci_en", timer_ci_en, qindex,
169 tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_CI_EN);
170 XGE_GET_PARAM_FIFO_QUEUE_TTI(
171 "hw.xge.fifo_queue_tti_timer_ac_en", timer_ac_en, qindex,
172 tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_AC_EN);
173 XGE_GET_PARAM_FIFO_QUEUE_TTI(
174 "hw.xge.fifo_queue_tti_timer_val_us", timer_val_us, qindex,
175 tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_VAL_US);
176 }
177 }
178
179 XGE_GET_PARAM_RING("hw.xge.ring_memblock_size", memblock_size,
180 XGE_DEFAULT_RING_MEMBLOCK_SIZE);
181
182 XGE_GET_PARAM_RING("hw.xge.ring_strip_vlan_tag", strip_vlan_tag,
183 XGE_DEFAULT_RING_STRIP_VLAN_TAG);
184
185 XGE_GET_PARAM("hw.xge.buffer_mode", (*lldev), buffer_mode,
186 XGE_DEFAULT_BUFFER_MODE);
187 if((lldev->buffer_mode < XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ||
188 (lldev->buffer_mode > XGE_HAL_RING_QUEUE_BUFFER_MODE_2)) {
189 xge_trace(XGE_ERR, "Supported buffer modes are 1 and 2");
190 lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_1;
191 }
192
193 for (qindex = 0; qindex < XGE_RING_COUNT; qindex++) {
194 dconfig->ring.queue[qindex].max_frm_len = XGE_HAL_RING_USE_MTU;
195 dconfig->ring.queue[qindex].priority = 0;
196 dconfig->ring.queue[qindex].configured = 1;
197 dconfig->ring.queue[qindex].buffer_mode =
198 (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) ?
199 XGE_HAL_RING_QUEUE_BUFFER_MODE_3 : lldev->buffer_mode;
200
201 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_max", max, qindex,
202 XGE_DEFAULT_RING_QUEUE_MAX);
203 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_initial", initial,
204 qindex, XGE_DEFAULT_RING_QUEUE_INITIAL);
205 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_dram_size_mb",
206 dram_size_mb, qindex, XGE_DEFAULT_RING_QUEUE_DRAM_SIZE_MB);
207 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_indicate_max_pkts",
208 indicate_max_pkts, qindex,
209 XGE_DEFAULT_RING_QUEUE_INDICATE_MAX_PKTS);
210 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_backoff_interval_us",
211 backoff_interval_us, qindex,
212 XGE_DEFAULT_RING_QUEUE_BACKOFF_INTERVAL_US);
213
214 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_a", ufc_a,
215 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_A);
216 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_b", ufc_b,
217 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_B);
218 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_c", ufc_c,
219 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_C);
220 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_d", ufc_d,
221 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_D);
222 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_ac_en",
223 timer_ac_en, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_AC_EN);
224 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_val_us",
225 timer_val_us, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_VAL_US);
226 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_a",
227 urange_a, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_A);
228 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_b",
229 urange_b, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_B);
230 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_c",
231 urange_c, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_C);
232 }
233
234 if(dconfig->fifo.max_frags > (PAGE_SIZE/32)) {
235 xge_os_printf("fifo_max_frags = %d", dconfig->fifo.max_frags)
236 xge_os_printf("fifo_max_frags should be <= (PAGE_SIZE / 32) = %d",
237 (int)(PAGE_SIZE / 32))
238 xge_os_printf("Using fifo_max_frags = %d", (int)(PAGE_SIZE / 32))
239 dconfig->fifo.max_frags = (PAGE_SIZE / 32);
240 }
241
242 checkdev = pci_find_device(VENDOR_ID_AMD, DEVICE_ID_8131_PCI_BRIDGE);
243 if(checkdev != NULL) {
244 /* Check Revision for 0x12 */
245 revision = pci_read_config(checkdev,
246 xge_offsetof(xge_hal_pci_config_t, revision), 1);
247 if(revision <= 0x12) {
248 /* Set mmrb_count to 1k and max splits = 2 */
249 dconfig->mmrb_count = 1;
250 dconfig->max_splits_trans = XGE_HAL_THREE_SPLIT_TRANSACTION;
251 }
252 }
253}
254
255/**
256 * xge_buffer_sizes_set
257 * Set buffer sizes based on Rx buffer mode
258 *
259 * @lldev Per-adapter Data
260 * @buffer_mode Rx Buffer Mode
261 */
262void
263xge_rx_buffer_sizes_set(xge_lldev_t *lldev, int buffer_mode, int mtu)
264{
265 int index = 0;
266 int frame_header = XGE_HAL_MAC_HEADER_MAX_SIZE;
267 int buffer_size = mtu + frame_header;
268
269 xge_os_memzero(lldev->rxd_mbuf_len, sizeof(lldev->rxd_mbuf_len));
270
271 if(buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
272 lldev->rxd_mbuf_len[buffer_mode - 1] = mtu;
273
274 lldev->rxd_mbuf_len[0] = (buffer_mode == 1) ? buffer_size:frame_header;
275
276 if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
277 lldev->rxd_mbuf_len[1] = XGE_HAL_TCPIP_HEADER_MAX_SIZE;
278
279 if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
280 index = 2;
281 buffer_size -= XGE_HAL_TCPIP_HEADER_MAX_SIZE;
282 while(buffer_size > MJUMPAGESIZE) {
283 lldev->rxd_mbuf_len[index++] = MJUMPAGESIZE;
284 buffer_size -= MJUMPAGESIZE;
285 }
286 XGE_ALIGN_TO(buffer_size, 128);
287 lldev->rxd_mbuf_len[index] = buffer_size;
288 lldev->rxd_mbuf_cnt = index + 1;
289 }
290
291 for(index = 0; index < buffer_mode; index++)
292 xge_trace(XGE_TRACE, "Buffer[%d] %d\n", index,
293 lldev->rxd_mbuf_len[index]);
294}
295
296/**
297 * xge_buffer_mode_init
298 * Init Rx buffer mode
299 *
300 * @lldev Per-adapter Data
301 * @mtu Interface MTU
302 */
303void
304xge_buffer_mode_init(xge_lldev_t *lldev, int mtu)
305{
306 int index = 0, buffer_size = 0;
307 xge_hal_ring_config_t *ring_config = &((lldev->devh)->config.ring);
308
309 buffer_size = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE;
310
311 if(lldev->enabled_lro)
312 (lldev->ifnetp)->if_capenable |= IFCAP_LRO;
313 else
314 (lldev->ifnetp)->if_capenable &= ~IFCAP_LRO;
315
316 lldev->rxd_mbuf_cnt = lldev->buffer_mode;
317 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
318 XGE_SET_BUFFER_MODE_IN_RINGS(XGE_HAL_RING_QUEUE_BUFFER_MODE_3);
319 ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_B;
320 }
321 else {
322 XGE_SET_BUFFER_MODE_IN_RINGS(lldev->buffer_mode);
323 ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_A;
324 }
325 xge_rx_buffer_sizes_set(lldev, lldev->buffer_mode, mtu);
326
327 xge_os_printf("%s: TSO %s", device_get_nameunit(lldev->device),
328 ((lldev->enabled_tso) ? "Enabled":"Disabled"));
329 xge_os_printf("%s: LRO %s", device_get_nameunit(lldev->device),
330 ((lldev->ifnetp)->if_capenable & IFCAP_LRO) ? "Enabled":"Disabled");
331 xge_os_printf("%s: Rx %d Buffer Mode Enabled",
332 device_get_nameunit(lldev->device), lldev->buffer_mode);
333}
334
335/**
336 * xge_driver_initialize
337 * Initializes HAL driver (common for all devices)
338 *
339 * Returns
340 * XGE_HAL_OK if success
341 * XGE_HAL_ERR_BAD_DRIVER_CONFIG if driver configuration parameters are invalid
342 */
343int
344xge_driver_initialize(void)
345{
346 xge_hal_uld_cbs_t uld_callbacks;
347 xge_hal_driver_config_t driver_config;
348 xge_hal_status_e status = XGE_HAL_OK;
349
350 /* Initialize HAL driver */
351 if(!hal_driver_init_count) {
352 xge_os_memzero(&uld_callbacks, sizeof(xge_hal_uld_cbs_t));
353 xge_os_memzero(&driver_config, sizeof(xge_hal_driver_config_t));
354
355 /*
356 * Initial and maximum size of the queue used to store the events
357 * like Link up/down (xge_hal_event_e)
358 */
359 driver_config.queue_size_initial = XGE_HAL_MIN_QUEUE_SIZE_INITIAL;
360 driver_config.queue_size_max = XGE_HAL_MAX_QUEUE_SIZE_MAX;
361
362 uld_callbacks.link_up = xge_callback_link_up;
363 uld_callbacks.link_down = xge_callback_link_down;
364 uld_callbacks.crit_err = xge_callback_crit_err;
365 uld_callbacks.event = xge_callback_event;
366
367 status = xge_hal_driver_initialize(&driver_config, &uld_callbacks);
368 if(status != XGE_HAL_OK) {
369 XGE_EXIT_ON_ERR("xgeX: Initialization of HAL driver failed",
370 xdi_out, status);
371 }
372 }
373 hal_driver_init_count = hal_driver_init_count + 1;
374
375 xge_hal_driver_debug_module_mask_set(0xffffffff);
376 xge_hal_driver_debug_level_set(XGE_TRACE);
377
378xdi_out:
379 return status;
380}
381
382/**
383 * xge_media_init
384 * Initializes, adds and sets media
385 *
386 * @devc Device Handle
387 */
388void
389xge_media_init(device_t devc)
390{
391 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(devc);
392
393 /* Initialize Media */
394 ifmedia_init(&lldev->media, IFM_IMASK, xge_ifmedia_change,
395 xge_ifmedia_status);
396
397 /* Add supported media */
398 ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL);
399 ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
400 ifmedia_add(&lldev->media, IFM_ETHER | IFM_AUTO, 0, NULL);
401 ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
402 ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
403
404 /* Set media */
405 ifmedia_set(&lldev->media, IFM_ETHER | IFM_AUTO);
406}
407
408/**
409 * xge_pci_space_save
410 * Save PCI configuration space
411 *
412 * @dev Device Handle
413 */
414void
415xge_pci_space_save(device_t dev)
416{
417 struct pci_devinfo *dinfo = NULL;
418
419 dinfo = device_get_ivars(dev);
420 xge_trace(XGE_TRACE, "Saving PCI configuration space");
421 pci_cfg_save(dev, dinfo, 0);
422}
423
424/**
425 * xge_pci_space_restore
426 * Restore saved PCI configuration space
427 *
428 * @dev Device Handle
429 */
430void
431xge_pci_space_restore(device_t dev)
432{
433 struct pci_devinfo *dinfo = NULL;
434
435 dinfo = device_get_ivars(dev);
436 xge_trace(XGE_TRACE, "Restoring PCI configuration space");
437 pci_cfg_restore(dev, dinfo);
438}
439
440/**
441 * xge_msi_info_save
442 * Save MSI info
443 *
444 * @lldev Per-adapter Data
445 */
446void
447xge_msi_info_save(xge_lldev_t * lldev)
448{
449 xge_os_pci_read16(lldev->pdev, NULL,
450 xge_offsetof(xge_hal_pci_config_le_t, msi_control),
451 &lldev->msi_info.msi_control);
452 xge_os_pci_read32(lldev->pdev, NULL,
453 xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address),
454 &lldev->msi_info.msi_lower_address);
455 xge_os_pci_read32(lldev->pdev, NULL,
456 xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address),
457 &lldev->msi_info.msi_higher_address);
458 xge_os_pci_read16(lldev->pdev, NULL,
459 xge_offsetof(xge_hal_pci_config_le_t, msi_data),
460 &lldev->msi_info.msi_data);
461}
462
463/**
464 * xge_msi_info_restore
465 * Restore saved MSI info
466 *
467 * @dev Device Handle
468 */
469void
470xge_msi_info_restore(xge_lldev_t *lldev)
471{
472 /*
473 * If interface is made down and up, traffic fails. It was observed that
474 * MSI information were getting reset on down. Restoring them.
475 */
476 xge_os_pci_write16(lldev->pdev, NULL,
477 xge_offsetof(xge_hal_pci_config_le_t, msi_control),
478 lldev->msi_info.msi_control);
479
480 xge_os_pci_write32(lldev->pdev, NULL,
481 xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address),
482 lldev->msi_info.msi_lower_address);
483
484 xge_os_pci_write32(lldev->pdev, NULL,
485 xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address),
486 lldev->msi_info.msi_higher_address);
487
488 xge_os_pci_write16(lldev->pdev, NULL,
489 xge_offsetof(xge_hal_pci_config_le_t, msi_data),
490 lldev->msi_info.msi_data);
491}
492
493/**
494 * xge_init_mutex
495 * Initializes mutexes used in driver
496 *
497 * @lldev Per-adapter Data
498 */
499void
500xge_mutex_init(xge_lldev_t *lldev)
501{
502 int qindex;
503
504 sprintf(lldev->mtx_name_drv, "%s_drv",
505 device_get_nameunit(lldev->device));
506 mtx_init(&lldev->mtx_drv, lldev->mtx_name_drv, MTX_NETWORK_LOCK,
507 MTX_DEF);
508
509 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
510 sprintf(lldev->mtx_name_tx[qindex], "%s_tx_%d",
511 device_get_nameunit(lldev->device), qindex);
512 mtx_init(&lldev->mtx_tx[qindex], lldev->mtx_name_tx[qindex], NULL,
513 MTX_DEF);
514 }
515}
516
517/**
518 * xge_mutex_destroy
519 * Destroys mutexes used in driver
520 *
521 * @lldev Per-adapter Data
522 */
523void
524xge_mutex_destroy(xge_lldev_t *lldev)
525{
526 int qindex;
527
528 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++)
529 mtx_destroy(&lldev->mtx_tx[qindex]);
530 mtx_destroy(&lldev->mtx_drv);
531}
532
533/**
534 * xge_print_info
535 * Print device and driver information
536 *
537 * @lldev Per-adapter Data
538 */
539void
540xge_print_info(xge_lldev_t *lldev)
541{
542 device_t dev = lldev->device;
543 xge_hal_device_t *hldev = lldev->devh;
544 xge_hal_status_e status = XGE_HAL_OK;
545 u64 val64 = 0;
546 const char *xge_pci_bus_speeds[17] = {
547 "PCI 33MHz Bus",
548 "PCI 66MHz Bus",
549 "PCIX(M1) 66MHz Bus",
550 "PCIX(M1) 100MHz Bus",
551 "PCIX(M1) 133MHz Bus",
552 "PCIX(M2) 133MHz Bus",
553 "PCIX(M2) 200MHz Bus",
554 "PCIX(M2) 266MHz Bus",
555 "PCIX(M1) Reserved",
556 "PCIX(M1) 66MHz Bus (Not Supported)",
557 "PCIX(M1) 100MHz Bus (Not Supported)",
558 "PCIX(M1) 133MHz Bus (Not Supported)",
559 "PCIX(M2) Reserved",
560 "PCIX 533 Reserved",
561 "PCI Basic Mode",
562 "PCIX Basic Mode",
563 "PCI Invalid Mode"
564 };
565
566 xge_os_printf("%s: Xframe%s %s Revision %d Driver v%s",
567 device_get_nameunit(dev),
568 ((hldev->device_id == XGE_PCI_DEVICE_ID_XENA_2) ? "I" : "II"),
569 hldev->vpd_data.product_name, hldev->revision, XGE_DRIVER_VERSION);
570 xge_os_printf("%s: Serial Number %s",
571 device_get_nameunit(dev), hldev->vpd_data.serial_num);
572
573 if(pci_get_device(dev) == XGE_PCI_DEVICE_ID_HERC_2) {
574 status = xge_hal_mgmt_reg_read(hldev, 0,
575 xge_offsetof(xge_hal_pci_bar0_t, pci_info), &val64);
576 if(status != XGE_HAL_OK)
577 xge_trace(XGE_ERR, "Error for getting bus speed");
578
579 xge_os_printf("%s: Adapter is on %s bit %s",
580 device_get_nameunit(dev), ((val64 & BIT(8)) ? "32":"64"),
581 (xge_pci_bus_speeds[((val64 & XGE_HAL_PCI_INFO) >> 60)]));
582 }
583
584 xge_os_printf("%s: Using %s Interrupts",
585 device_get_nameunit(dev),
586 (lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) ? "MSI":"Line");
587}
588
589/**
590 * xge_create_dma_tags
591 * Creates DMA tags for both Tx and Rx
592 *
593 * @dev Device Handle
594 *
595 * Returns XGE_HAL_OK or XGE_HAL_FAIL (if errors)
596 */
597xge_hal_status_e
598xge_create_dma_tags(device_t dev)
599{
600 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
601 xge_hal_status_e status = XGE_HAL_FAIL;
602 int mtu = (lldev->ifnetp)->if_mtu, maxsize;
603
604 /* DMA tag for Tx */
605 status = bus_dma_tag_create(
606 bus_get_dma_tag(dev), /* Parent */
607 PAGE_SIZE, /* Alignment */
608 0, /* Bounds */
609 BUS_SPACE_MAXADDR, /* Low Address */
610 BUS_SPACE_MAXADDR, /* High Address */
611 NULL, /* Filter Function */
612 NULL, /* Filter Function Arguments */
613 MCLBYTES * XGE_MAX_SEGS, /* Maximum Size */
614 XGE_MAX_SEGS, /* Number of Segments */
615 MCLBYTES, /* Maximum Segment Size */
616 BUS_DMA_ALLOCNOW, /* Flags */
617 NULL, /* Lock Function */
618 NULL, /* Lock Function Arguments */
619 (&lldev->dma_tag_tx)); /* DMA Tag */
620 if(status != 0)
621 goto _exit;
622
623 maxsize = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE;
624 if(maxsize <= MCLBYTES) {
625 maxsize = MCLBYTES;
626 }
627 else {
628 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
629 maxsize = MJUMPAGESIZE;
630 else
631 maxsize = (maxsize <= MJUMPAGESIZE) ? MJUMPAGESIZE : MJUM9BYTES;
632 }
633
634 /* DMA tag for Rx */
635 status = bus_dma_tag_create(
636 bus_get_dma_tag(dev), /* Parent */
637 PAGE_SIZE, /* Alignment */
638 0, /* Bounds */
639 BUS_SPACE_MAXADDR, /* Low Address */
640 BUS_SPACE_MAXADDR, /* High Address */
641 NULL, /* Filter Function */
642 NULL, /* Filter Function Arguments */
643 maxsize, /* Maximum Size */
644 1, /* Number of Segments */
645 maxsize, /* Maximum Segment Size */
646 BUS_DMA_ALLOCNOW, /* Flags */
647 NULL, /* Lock Function */
648 NULL, /* Lock Function Arguments */
649 (&lldev->dma_tag_rx)); /* DMA Tag */
650 if(status != 0)
651 goto _exit1;
652
653 status = bus_dmamap_create(lldev->dma_tag_rx, BUS_DMA_NOWAIT,
654 &lldev->extra_dma_map);
655 if(status != 0)
656 goto _exit2;
657
658 status = XGE_HAL_OK;
659 goto _exit;
660
661_exit2:
662 status = bus_dma_tag_destroy(lldev->dma_tag_rx);
663 if(status != 0)
664 xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
665_exit1:
666 status = bus_dma_tag_destroy(lldev->dma_tag_tx);
667 if(status != 0)
668 xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
669 status = XGE_HAL_FAIL;
670_exit:
671 return status;
672}
673
674/**
675 * xge_confirm_changes
676 * Disables and Enables interface to apply requested change
677 *
678 * @lldev Per-adapter Data
679 * @mtu_set Is it called for changing MTU? (Yes: 1, No: 0)
680 *
681 * Returns 0 or Error Number
682 */
683void
684xge_confirm_changes(xge_lldev_t *lldev, xge_option_e option)
685{
686 if(lldev->initialized == 0) goto _exit1;
687
688 mtx_lock(&lldev->mtx_drv);
689 if_down(lldev->ifnetp);
690 xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
691
692 if(option == XGE_SET_MTU)
693 (lldev->ifnetp)->if_mtu = lldev->mtu;
694 else
695 xge_buffer_mode_init(lldev, lldev->mtu);
696
697 xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
698 if_up(lldev->ifnetp);
699 mtx_unlock(&lldev->mtx_drv);
700 goto _exit;
701
702_exit1:
703 /* Request was to change MTU and device not initialized */
704 if(option == XGE_SET_MTU) {
705 (lldev->ifnetp)->if_mtu = lldev->mtu;
706 xge_buffer_mode_init(lldev, lldev->mtu);
707 }
708_exit:
709 return;
710}
711
712/**
713 * xge_change_lro_status
714 * Enable/Disable LRO feature
715 *
716 * @SYSCTL_HANDLER_ARGS sysctl_oid structure with arguments
717 *
718 * Returns 0 or error number.
719 */
720static int
721xge_change_lro_status(SYSCTL_HANDLER_ARGS)
722{
723 xge_lldev_t *lldev = (xge_lldev_t *)arg1;
724 int request = lldev->enabled_lro, status = XGE_HAL_OK;
725
726 status = sysctl_handle_int(oidp, &request, arg2, req);
727 if((status != XGE_HAL_OK) || (!req->newptr))
728 goto _exit;
729
730 if((request < 0) || (request > 1)) {
731 status = EINVAL;
732 goto _exit;
733 }
734
735 /* Return if current and requested states are same */
736 if(request == lldev->enabled_lro){
737 xge_trace(XGE_ERR, "LRO is already %s",
738 ((request) ? "enabled" : "disabled"));
739 goto _exit;
740 }
741
742 lldev->enabled_lro = request;
743 xge_confirm_changes(lldev, XGE_CHANGE_LRO);
744 arg2 = lldev->enabled_lro;
745
746_exit:
747 return status;
748}
749
750/**
751 * xge_add_sysctl_handlers
752 * Registers sysctl parameter value update handlers
753 *
754 * @lldev Per-adapter data
755 */
756void
757xge_add_sysctl_handlers(xge_lldev_t *lldev)
758{
759 struct sysctl_ctx_list *context_list =
760 device_get_sysctl_ctx(lldev->device);
761 struct sysctl_oid *oid = device_get_sysctl_tree(lldev->device);
762
763 SYSCTL_ADD_PROC(context_list, SYSCTL_CHILDREN(oid), OID_AUTO,
764 "enable_lro", CTLTYPE_INT | CTLFLAG_RW, lldev, 0,
765 xge_change_lro_status, "I", "Enable or disable LRO feature");
766}
767
768/**
769 * xge_attach
770 * Connects driver to the system if probe was success
771 *
772 * @dev Device Handle
773 */
774int
775xge_attach(device_t dev)
776{
777 xge_hal_device_config_t *device_config;
778 xge_hal_device_attr_t attr;
779 xge_lldev_t *lldev;
780 xge_hal_device_t *hldev;
781 xge_pci_info_t *pci_info;
782 struct ifnet *ifnetp;
783 int rid, rid0, rid1, error;
784 int msi_count = 0, status = XGE_HAL_OK;
785 int enable_msi = XGE_HAL_INTR_MODE_IRQLINE;
786
787 device_config = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t));
788 if(!device_config) {
789 XGE_EXIT_ON_ERR("Memory allocation for device configuration failed",
790 attach_out_config, ENOMEM);
791 }
792
793 lldev = (xge_lldev_t *) device_get_softc(dev);
794 if(!lldev) {
795 XGE_EXIT_ON_ERR("Adapter softc is NULL", attach_out, ENOMEM);
796 }
797 lldev->device = dev;
798
799 xge_mutex_init(lldev);
800
801 error = xge_driver_initialize();
802 if(error != XGE_HAL_OK) {
803 xge_resources_free(dev, xge_free_mutex);
804 XGE_EXIT_ON_ERR("Initializing driver failed", attach_out, ENXIO);
805 }
806
807 /* HAL device */
808 hldev =
809 (xge_hal_device_t *)xge_os_malloc(NULL, sizeof(xge_hal_device_t));
810 if(!hldev) {
811 xge_resources_free(dev, xge_free_terminate_hal_driver);
812 XGE_EXIT_ON_ERR("Memory allocation for HAL device failed",
813 attach_out, ENOMEM);
814 }
815 lldev->devh = hldev;
816
817 /* Our private structure */
818 pci_info =
819 (xge_pci_info_t*) xge_os_malloc(NULL, sizeof(xge_pci_info_t));
820 if(!pci_info) {
821 xge_resources_free(dev, xge_free_hal_device);
822 XGE_EXIT_ON_ERR("Memory allocation for PCI info. failed",
823 attach_out, ENOMEM);
824 }
825 lldev->pdev = pci_info;
826 pci_info->device = dev;
827
828 /* Set bus master */
829 pci_enable_busmaster(dev);
830
831 /* Get virtual address for BAR0 */
832 rid0 = PCIR_BAR(0);
833 pci_info->regmap0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid0,
834 RF_ACTIVE);
835 if(pci_info->regmap0 == NULL) {
836 xge_resources_free(dev, xge_free_pci_info);
837 XGE_EXIT_ON_ERR("Bus resource allocation for BAR0 failed",
838 attach_out, ENOMEM);
839 }
840 attr.bar0 = (char *)pci_info->regmap0;
841
842 pci_info->bar0resource = (xge_bus_resource_t*)
843 xge_os_malloc(NULL, sizeof(xge_bus_resource_t));
844 if(pci_info->bar0resource == NULL) {
845 xge_resources_free(dev, xge_free_bar0);
846 XGE_EXIT_ON_ERR("Memory allocation for BAR0 Resources failed",
847 attach_out, ENOMEM);
848 }
849 ((xge_bus_resource_t *)(pci_info->bar0resource))->bus_tag =
850 rman_get_bustag(pci_info->regmap0);
851 ((xge_bus_resource_t *)(pci_info->bar0resource))->bus_handle =
852 rman_get_bushandle(pci_info->regmap0);
853 ((xge_bus_resource_t *)(pci_info->bar0resource))->bar_start_addr =
854 pci_info->regmap0;
855
856 /* Get virtual address for BAR1 */
857 rid1 = PCIR_BAR(2);
858 pci_info->regmap1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid1,
859 RF_ACTIVE);
860 if(pci_info->regmap1 == NULL) {
861 xge_resources_free(dev, xge_free_bar0_resource);
862 XGE_EXIT_ON_ERR("Bus resource allocation for BAR1 failed",
863 attach_out, ENOMEM);
864 }
865 attr.bar1 = (char *)pci_info->regmap1;
866
867 pci_info->bar1resource = (xge_bus_resource_t*)
868 xge_os_malloc(NULL, sizeof(xge_bus_resource_t));
869 if(pci_info->bar1resource == NULL) {
870 xge_resources_free(dev, xge_free_bar1);
871 XGE_EXIT_ON_ERR("Memory allocation for BAR1 Resources failed",
872 attach_out, ENOMEM);
873 }
874 ((xge_bus_resource_t *)(pci_info->bar1resource))->bus_tag =
875 rman_get_bustag(pci_info->regmap1);
876 ((xge_bus_resource_t *)(pci_info->bar1resource))->bus_handle =
877 rman_get_bushandle(pci_info->regmap1);
878 ((xge_bus_resource_t *)(pci_info->bar1resource))->bar_start_addr =
879 pci_info->regmap1;
880
881 /* Save PCI config space */
882 xge_pci_space_save(dev);
883
884 attr.regh0 = (xge_bus_resource_t *) pci_info->bar0resource;
885 attr.regh1 = (xge_bus_resource_t *) pci_info->bar1resource;
886 attr.irqh = lldev->irqhandle;
887 attr.cfgh = pci_info;
888 attr.pdev = pci_info;
889
890 /* Initialize device configuration parameters */
891 xge_init_params(device_config, dev);
892
893 rid = 0;
894 if(lldev->enabled_msi) {
895 /* Number of MSI messages supported by device */
896 msi_count = pci_msi_count(dev);
897 if(msi_count > 1) {
898 /* Device supports MSI */
899 if(bootverbose) {
900 xge_trace(XGE_ERR, "MSI count: %d", msi_count);
901 xge_trace(XGE_ERR, "Now, driver supporting 1 message");
902 }
903 msi_count = 1;
904 error = pci_alloc_msi(dev, &msi_count);
905 if(error == 0) {
906 if(bootverbose)
907 xge_trace(XGE_ERR, "Allocated messages: %d", msi_count);
908 enable_msi = XGE_HAL_INTR_MODE_MSI;
909 rid = 1;
910 }
911 else {
912 if(bootverbose)
913 xge_trace(XGE_ERR, "pci_alloc_msi failed, %d", error);
914 }
915 }
916 }
917 lldev->enabled_msi = enable_msi;
918
919 /* Allocate resource for irq */
920 lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
921 (RF_SHAREABLE | RF_ACTIVE));
922 if(lldev->irq == NULL) {
923 xge_trace(XGE_ERR, "Allocating irq resource for %s failed",
924 ((rid == 0) ? "line interrupt" : "MSI"));
925 if(rid == 1) {
926 error = pci_release_msi(dev);
927 if(error != 0) {
928 xge_trace(XGE_ERR, "Releasing MSI resources failed %d",
929 error);
930 xge_trace(XGE_ERR, "Requires reboot to use MSI again");
931 }
932 xge_trace(XGE_ERR, "Trying line interrupts");
933 rid = 0;
934 lldev->enabled_msi = XGE_HAL_INTR_MODE_IRQLINE;
935 lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
936 (RF_SHAREABLE | RF_ACTIVE));
937 }
938 if(lldev->irq == NULL) {
939 xge_trace(XGE_ERR, "Allocating irq resource failed");
940 xge_resources_free(dev, xge_free_bar1_resource);
941 status = ENOMEM;
942 goto attach_out;
943 }
944 }
945
946 device_config->intr_mode = lldev->enabled_msi;
947 if(bootverbose) {
948 xge_trace(XGE_TRACE, "rid: %d, Mode: %d, MSI count: %d", rid,
949 lldev->enabled_msi, msi_count);
950 }
951
952 /* Initialize HAL device */
953 error = xge_hal_device_initialize(hldev, &attr, device_config);
954 if(error != XGE_HAL_OK) {
955 xge_resources_free(dev, xge_free_irq_resource);
956 XGE_EXIT_ON_ERR("Initializing HAL device failed", attach_out,
957 ENXIO);
958 }
959
960 xge_hal_device_private_set(hldev, lldev);
961
962 error = xge_interface_setup(dev);
963 if(error != 0) {
964 status = error;
965 goto attach_out;
966 }
967
968 ifnetp = lldev->ifnetp;
969 ifnetp->if_mtu = device_config->mtu;
970
971 xge_media_init(dev);
972
973 /* Associate interrupt handler with the device */
974 if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
975 error = bus_setup_intr(dev, lldev->irq,
976 (INTR_TYPE_NET | INTR_MPSAFE),
977#if __FreeBSD_version > 700030
978 NULL,
979#endif
980 xge_isr_msi, lldev, &lldev->irqhandle);
981 xge_msi_info_save(lldev);
982 }
983 else {
984 error = bus_setup_intr(dev, lldev->irq,
985 (INTR_TYPE_NET | INTR_MPSAFE),
986#if __FreeBSD_version > 700030
987 xge_isr_filter,
988#endif
989 xge_isr_line, lldev, &lldev->irqhandle);
990 }
991 if(error != 0) {
992 xge_resources_free(dev, xge_free_media_interface);
993 XGE_EXIT_ON_ERR("Associating interrupt handler with device failed",
994 attach_out, ENXIO);
995 }
996
997 xge_print_info(lldev);
998
999 xge_add_sysctl_handlers(lldev);
1000
1001 xge_buffer_mode_init(lldev, device_config->mtu);
1002
1003attach_out:
1004 xge_os_free(NULL, device_config, sizeof(xge_hal_device_config_t));
1005attach_out_config:
1006 return status;
1007}
1008
1009/**
1010 * xge_resources_free
1011 * Undo what-all we did during load/attach
1012 *
1013 * @dev Device Handle
1014 * @error Identifies what-all to undo
1015 */
1016void
1017xge_resources_free(device_t dev, xge_lables_e error)
1018{
1019 xge_lldev_t *lldev;
1020 xge_pci_info_t *pci_info;
1021 xge_hal_device_t *hldev;
1022 int rid, status;
1023
1024 /* LL Device */
1025 lldev = (xge_lldev_t *) device_get_softc(dev);
1026 pci_info = lldev->pdev;
1027
1028 /* HAL Device */
1029 hldev = lldev->devh;
1030
1031 switch(error) {
1032 case xge_free_all:
1033 /* Teardown interrupt handler - device association */
1034 bus_teardown_intr(dev, lldev->irq, lldev->irqhandle);
1035
1036 case xge_free_media_interface:
1037 /* Media */
1038 ifmedia_removeall(&lldev->media);
1039
1040 /* Detach Ether */
1041 ether_ifdetach(lldev->ifnetp);
1042 if_free(lldev->ifnetp);
1043
1044 xge_hal_device_private_set(hldev, NULL);
1045 xge_hal_device_disable(hldev);
1046
1047 case xge_free_terminate_hal_device:
1048 /* HAL Device */
1049 xge_hal_device_terminate(hldev);
1050
1051 case xge_free_irq_resource:
1052 /* Release IRQ resource */
1053 bus_release_resource(dev, SYS_RES_IRQ,
1054 ((lldev->enabled_msi == XGE_HAL_INTR_MODE_IRQLINE) ? 0:1),
1055 lldev->irq);
1056
1057 if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
1058 status = pci_release_msi(dev);
1059 if(status != 0) {
1060 if(bootverbose) {
1061 xge_trace(XGE_ERR,
1062 "pci_release_msi returned %d", status);
1063 }
1064 }
1065 }
1066
1067 case xge_free_bar1_resource:
1068 /* Restore PCI configuration space */
1069 xge_pci_space_restore(dev);
1070
1071 /* Free bar1resource */
1072 xge_os_free(NULL, pci_info->bar1resource,
1073 sizeof(xge_bus_resource_t));
1074
1075 case xge_free_bar1:
1076 /* Release BAR1 */
1077 rid = PCIR_BAR(2);
1078 bus_release_resource(dev, SYS_RES_MEMORY, rid,
1079 pci_info->regmap1);
1080
1081 case xge_free_bar0_resource:
1082 /* Free bar0resource */
1083 xge_os_free(NULL, pci_info->bar0resource,
1084 sizeof(xge_bus_resource_t));
1085
1086 case xge_free_bar0:
1087 /* Release BAR0 */
1088 rid = PCIR_BAR(0);
1089 bus_release_resource(dev, SYS_RES_MEMORY, rid,
1090 pci_info->regmap0);
1091
1092 case xge_free_pci_info:
1093 /* Disable Bus Master */
1094 pci_disable_busmaster(dev);
1095
1096 /* Free pci_info_t */
1097 lldev->pdev = NULL;
1098 xge_os_free(NULL, pci_info, sizeof(xge_pci_info_t));
1099
1100 case xge_free_hal_device:
1101 /* Free device configuration struct and HAL device */
1102 xge_os_free(NULL, hldev, sizeof(xge_hal_device_t));
1103
1104 case xge_free_terminate_hal_driver:
1105 /* Terminate HAL driver */
1106 hal_driver_init_count = hal_driver_init_count - 1;
1107 if(!hal_driver_init_count) {
1108 xge_hal_driver_terminate();
1109 }
1110
1111 case xge_free_mutex:
1112 xge_mutex_destroy(lldev);
1113 }
1114}
1115
1116/**
1117 * xge_detach
1118 * Detaches driver from the Kernel subsystem
1119 *
1120 * @dev Device Handle
1121 */
1122int
1123xge_detach(device_t dev)
1124{
1125 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
1126
1127 if(lldev->in_detach == 0) {
1128 lldev->in_detach = 1;
1129 xge_stop(lldev);
1130 xge_resources_free(dev, xge_free_all);
1131 }
1132
1133 return 0;
1134}
1135
1136/**
1137 * xge_shutdown
1138 * To shutdown device before system shutdown
1139 *
1140 * @dev Device Handle
1141 */
1142int
1143xge_shutdown(device_t dev)
1144{
1145 xge_lldev_t *lldev = (xge_lldev_t *) device_get_softc(dev);
1146 xge_stop(lldev);
1147
1148 return 0;
1149}
1150
1151/**
1152 * xge_interface_setup
1153 * Setup interface
1154 *
1155 * @dev Device Handle
1156 *
1157 * Returns 0 on success, ENXIO/ENOMEM on failure
1158 */
1159int
1160xge_interface_setup(device_t dev)
1161{
1162 u8 mcaddr[ETHER_ADDR_LEN];
1163 xge_hal_status_e status;
1164 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
1165 struct ifnet *ifnetp;
1166 xge_hal_device_t *hldev = lldev->devh;
1167
1168 /* Get the MAC address of the device */
1169 status = xge_hal_device_macaddr_get(hldev, 0, &mcaddr);
1170 if(status != XGE_HAL_OK) {
1171 xge_resources_free(dev, xge_free_terminate_hal_device);
1172 XGE_EXIT_ON_ERR("Getting MAC address failed", ifsetup_out, ENXIO);
1173 }
1174
1175 /* Get interface ifnet structure for this Ether device */
1176 ifnetp = lldev->ifnetp = if_alloc(IFT_ETHER);
1177 if(ifnetp == NULL) {
1178 xge_resources_free(dev, xge_free_terminate_hal_device);
1179 XGE_EXIT_ON_ERR("Allocation ifnet failed", ifsetup_out, ENOMEM);
1180 }
1181
1182 /* Initialize interface ifnet structure */
1183 if_initname(ifnetp, device_get_name(dev), device_get_unit(dev));
1184 ifnetp->if_mtu = XGE_HAL_DEFAULT_MTU;
1185 ifnetp->if_baudrate = XGE_BAUDRATE;
1186 ifnetp->if_init = xge_init;
1187 ifnetp->if_softc = lldev;
1188 ifnetp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1189 ifnetp->if_ioctl = xge_ioctl;
1190 ifnetp->if_start = xge_send;
1191
1192 /* TODO: Check and assign optimal value */
27 */
28
29#include <dev/nxge/if_nxge.h>
30#include <dev/nxge/xge-osdep.h>
31#include <net/if_arp.h>
32#include <sys/types.h>
33#include <net/if.h>
34#include <net/if_vlan_var.h>
35
36int copyright_print = 0;
37int hal_driver_init_count = 0;
38size_t size = sizeof(int);
39
40static void inline xge_flush_txds(xge_hal_channel_h);
41
42/**
43 * xge_probe
44 * Probes for Xframe devices
45 *
46 * @dev Device handle
47 *
48 * Returns
49 * BUS_PROBE_DEFAULT if device is supported
50 * ENXIO if device is not supported
51 */
52int
53xge_probe(device_t dev)
54{
55 int devid = pci_get_device(dev);
56 int vendorid = pci_get_vendor(dev);
57 int retValue = ENXIO;
58
59 if(vendorid == XGE_PCI_VENDOR_ID) {
60 if((devid == XGE_PCI_DEVICE_ID_XENA_2) ||
61 (devid == XGE_PCI_DEVICE_ID_HERC_2)) {
62 if(!copyright_print) {
63 xge_os_printf(XGE_COPYRIGHT);
64 copyright_print = 1;
65 }
66 device_set_desc_copy(dev,
67 "Neterion Xframe 10 Gigabit Ethernet Adapter");
68 retValue = BUS_PROBE_DEFAULT;
69 }
70 }
71
72 return retValue;
73}
74
75/**
76 * xge_init_params
77 * Sets HAL parameter values (from kenv).
78 *
79 * @dconfig Device Configuration
80 * @dev Device Handle
81 */
82void
83xge_init_params(xge_hal_device_config_t *dconfig, device_t dev)
84{
85 int qindex, tindex, revision;
86 device_t checkdev;
87 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
88
89 dconfig->mtu = XGE_DEFAULT_INITIAL_MTU;
90 dconfig->pci_freq_mherz = XGE_DEFAULT_USER_HARDCODED;
91 dconfig->device_poll_millis = XGE_HAL_DEFAULT_DEVICE_POLL_MILLIS;
92 dconfig->link_stability_period = XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD;
93 dconfig->mac.rmac_bcast_en = XGE_DEFAULT_MAC_RMAC_BCAST_EN;
94 dconfig->fifo.alignment_size = XGE_DEFAULT_FIFO_ALIGNMENT_SIZE;
95
96 XGE_GET_PARAM("hw.xge.enable_tso", (*lldev), enabled_tso,
97 XGE_DEFAULT_ENABLED_TSO);
98 XGE_GET_PARAM("hw.xge.enable_lro", (*lldev), enabled_lro,
99 XGE_DEFAULT_ENABLED_LRO);
100 XGE_GET_PARAM("hw.xge.enable_msi", (*lldev), enabled_msi,
101 XGE_DEFAULT_ENABLED_MSI);
102
103 XGE_GET_PARAM("hw.xge.latency_timer", (*dconfig), latency_timer,
104 XGE_DEFAULT_LATENCY_TIMER);
105 XGE_GET_PARAM("hw.xge.max_splits_trans", (*dconfig), max_splits_trans,
106 XGE_DEFAULT_MAX_SPLITS_TRANS);
107 XGE_GET_PARAM("hw.xge.mmrb_count", (*dconfig), mmrb_count,
108 XGE_DEFAULT_MMRB_COUNT);
109 XGE_GET_PARAM("hw.xge.shared_splits", (*dconfig), shared_splits,
110 XGE_DEFAULT_SHARED_SPLITS);
111 XGE_GET_PARAM("hw.xge.isr_polling_cnt", (*dconfig), isr_polling_cnt,
112 XGE_DEFAULT_ISR_POLLING_CNT);
113 XGE_GET_PARAM("hw.xge.stats_refresh_time_sec", (*dconfig),
114 stats_refresh_time_sec, XGE_DEFAULT_STATS_REFRESH_TIME_SEC);
115
116 XGE_GET_PARAM_MAC("hw.xge.mac_tmac_util_period", tmac_util_period,
117 XGE_DEFAULT_MAC_TMAC_UTIL_PERIOD);
118 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_util_period", rmac_util_period,
119 XGE_DEFAULT_MAC_RMAC_UTIL_PERIOD);
120 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_gen_en", rmac_pause_gen_en,
121 XGE_DEFAULT_MAC_RMAC_PAUSE_GEN_EN);
122 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_rcv_en", rmac_pause_rcv_en,
123 XGE_DEFAULT_MAC_RMAC_PAUSE_RCV_EN);
124 XGE_GET_PARAM_MAC("hw.xge.mac_rmac_pause_time", rmac_pause_time,
125 XGE_DEFAULT_MAC_RMAC_PAUSE_TIME);
126 XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q0q3",
127 mc_pause_threshold_q0q3, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q0Q3);
128 XGE_GET_PARAM_MAC("hw.xge.mac_mc_pause_threshold_q4q7",
129 mc_pause_threshold_q4q7, XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q4Q7);
130
131 XGE_GET_PARAM_FIFO("hw.xge.fifo_memblock_size", memblock_size,
132 XGE_DEFAULT_FIFO_MEMBLOCK_SIZE);
133 XGE_GET_PARAM_FIFO("hw.xge.fifo_reserve_threshold", reserve_threshold,
134 XGE_DEFAULT_FIFO_RESERVE_THRESHOLD);
135 XGE_GET_PARAM_FIFO("hw.xge.fifo_max_frags", max_frags,
136 XGE_DEFAULT_FIFO_MAX_FRAGS);
137
138 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
139 XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_intr", intr, qindex,
140 XGE_DEFAULT_FIFO_QUEUE_INTR);
141 XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_max", max, qindex,
142 XGE_DEFAULT_FIFO_QUEUE_MAX);
143 XGE_GET_PARAM_FIFO_QUEUE("hw.xge.fifo_queue_initial", initial,
144 qindex, XGE_DEFAULT_FIFO_QUEUE_INITIAL);
145
146 for (tindex = 0; tindex < XGE_HAL_MAX_FIFO_TTI_NUM; tindex++) {
147 dconfig->fifo.queue[qindex].tti[tindex].enabled = 1;
148 dconfig->fifo.queue[qindex].configured = 1;
149
150 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_a",
151 urange_a, qindex, tindex,
152 XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_A);
153 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_b",
154 urange_b, qindex, tindex,
155 XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_B);
156 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_urange_c",
157 urange_c, qindex, tindex,
158 XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_C);
159 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_a",
160 ufc_a, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_A);
161 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_b",
162 ufc_b, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_B);
163 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_c",
164 ufc_c, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_C);
165 XGE_GET_PARAM_FIFO_QUEUE_TTI("hw.xge.fifo_queue_tti_ufc_d",
166 ufc_d, qindex, tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_D);
167 XGE_GET_PARAM_FIFO_QUEUE_TTI(
168 "hw.xge.fifo_queue_tti_timer_ci_en", timer_ci_en, qindex,
169 tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_CI_EN);
170 XGE_GET_PARAM_FIFO_QUEUE_TTI(
171 "hw.xge.fifo_queue_tti_timer_ac_en", timer_ac_en, qindex,
172 tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_AC_EN);
173 XGE_GET_PARAM_FIFO_QUEUE_TTI(
174 "hw.xge.fifo_queue_tti_timer_val_us", timer_val_us, qindex,
175 tindex, XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_VAL_US);
176 }
177 }
178
179 XGE_GET_PARAM_RING("hw.xge.ring_memblock_size", memblock_size,
180 XGE_DEFAULT_RING_MEMBLOCK_SIZE);
181
182 XGE_GET_PARAM_RING("hw.xge.ring_strip_vlan_tag", strip_vlan_tag,
183 XGE_DEFAULT_RING_STRIP_VLAN_TAG);
184
185 XGE_GET_PARAM("hw.xge.buffer_mode", (*lldev), buffer_mode,
186 XGE_DEFAULT_BUFFER_MODE);
187 if((lldev->buffer_mode < XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ||
188 (lldev->buffer_mode > XGE_HAL_RING_QUEUE_BUFFER_MODE_2)) {
189 xge_trace(XGE_ERR, "Supported buffer modes are 1 and 2");
190 lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_1;
191 }
192
193 for (qindex = 0; qindex < XGE_RING_COUNT; qindex++) {
194 dconfig->ring.queue[qindex].max_frm_len = XGE_HAL_RING_USE_MTU;
195 dconfig->ring.queue[qindex].priority = 0;
196 dconfig->ring.queue[qindex].configured = 1;
197 dconfig->ring.queue[qindex].buffer_mode =
198 (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) ?
199 XGE_HAL_RING_QUEUE_BUFFER_MODE_3 : lldev->buffer_mode;
200
201 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_max", max, qindex,
202 XGE_DEFAULT_RING_QUEUE_MAX);
203 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_initial", initial,
204 qindex, XGE_DEFAULT_RING_QUEUE_INITIAL);
205 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_dram_size_mb",
206 dram_size_mb, qindex, XGE_DEFAULT_RING_QUEUE_DRAM_SIZE_MB);
207 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_indicate_max_pkts",
208 indicate_max_pkts, qindex,
209 XGE_DEFAULT_RING_QUEUE_INDICATE_MAX_PKTS);
210 XGE_GET_PARAM_RING_QUEUE("hw.xge.ring_queue_backoff_interval_us",
211 backoff_interval_us, qindex,
212 XGE_DEFAULT_RING_QUEUE_BACKOFF_INTERVAL_US);
213
214 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_a", ufc_a,
215 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_A);
216 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_b", ufc_b,
217 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_B);
218 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_c", ufc_c,
219 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_C);
220 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_ufc_d", ufc_d,
221 qindex, XGE_DEFAULT_RING_QUEUE_RTI_UFC_D);
222 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_ac_en",
223 timer_ac_en, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_AC_EN);
224 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_timer_val_us",
225 timer_val_us, qindex, XGE_DEFAULT_RING_QUEUE_RTI_TIMER_VAL_US);
226 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_a",
227 urange_a, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_A);
228 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_b",
229 urange_b, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_B);
230 XGE_GET_PARAM_RING_QUEUE_RTI("hw.xge.ring_queue_rti_urange_c",
231 urange_c, qindex, XGE_DEFAULT_RING_QUEUE_RTI_URANGE_C);
232 }
233
234 if(dconfig->fifo.max_frags > (PAGE_SIZE/32)) {
235 xge_os_printf("fifo_max_frags = %d", dconfig->fifo.max_frags)
236 xge_os_printf("fifo_max_frags should be <= (PAGE_SIZE / 32) = %d",
237 (int)(PAGE_SIZE / 32))
238 xge_os_printf("Using fifo_max_frags = %d", (int)(PAGE_SIZE / 32))
239 dconfig->fifo.max_frags = (PAGE_SIZE / 32);
240 }
241
242 checkdev = pci_find_device(VENDOR_ID_AMD, DEVICE_ID_8131_PCI_BRIDGE);
243 if(checkdev != NULL) {
244 /* Check Revision for 0x12 */
245 revision = pci_read_config(checkdev,
246 xge_offsetof(xge_hal_pci_config_t, revision), 1);
247 if(revision <= 0x12) {
248 /* Set mmrb_count to 1k and max splits = 2 */
249 dconfig->mmrb_count = 1;
250 dconfig->max_splits_trans = XGE_HAL_THREE_SPLIT_TRANSACTION;
251 }
252 }
253}
254
255/**
256 * xge_buffer_sizes_set
257 * Set buffer sizes based on Rx buffer mode
258 *
259 * @lldev Per-adapter Data
260 * @buffer_mode Rx Buffer Mode
261 */
262void
263xge_rx_buffer_sizes_set(xge_lldev_t *lldev, int buffer_mode, int mtu)
264{
265 int index = 0;
266 int frame_header = XGE_HAL_MAC_HEADER_MAX_SIZE;
267 int buffer_size = mtu + frame_header;
268
269 xge_os_memzero(lldev->rxd_mbuf_len, sizeof(lldev->rxd_mbuf_len));
270
271 if(buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
272 lldev->rxd_mbuf_len[buffer_mode - 1] = mtu;
273
274 lldev->rxd_mbuf_len[0] = (buffer_mode == 1) ? buffer_size:frame_header;
275
276 if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
277 lldev->rxd_mbuf_len[1] = XGE_HAL_TCPIP_HEADER_MAX_SIZE;
278
279 if(buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
280 index = 2;
281 buffer_size -= XGE_HAL_TCPIP_HEADER_MAX_SIZE;
282 while(buffer_size > MJUMPAGESIZE) {
283 lldev->rxd_mbuf_len[index++] = MJUMPAGESIZE;
284 buffer_size -= MJUMPAGESIZE;
285 }
286 XGE_ALIGN_TO(buffer_size, 128);
287 lldev->rxd_mbuf_len[index] = buffer_size;
288 lldev->rxd_mbuf_cnt = index + 1;
289 }
290
291 for(index = 0; index < buffer_mode; index++)
292 xge_trace(XGE_TRACE, "Buffer[%d] %d\n", index,
293 lldev->rxd_mbuf_len[index]);
294}
295
296/**
297 * xge_buffer_mode_init
298 * Init Rx buffer mode
299 *
300 * @lldev Per-adapter Data
301 * @mtu Interface MTU
302 */
303void
304xge_buffer_mode_init(xge_lldev_t *lldev, int mtu)
305{
306 int index = 0, buffer_size = 0;
307 xge_hal_ring_config_t *ring_config = &((lldev->devh)->config.ring);
308
309 buffer_size = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE;
310
311 if(lldev->enabled_lro)
312 (lldev->ifnetp)->if_capenable |= IFCAP_LRO;
313 else
314 (lldev->ifnetp)->if_capenable &= ~IFCAP_LRO;
315
316 lldev->rxd_mbuf_cnt = lldev->buffer_mode;
317 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
318 XGE_SET_BUFFER_MODE_IN_RINGS(XGE_HAL_RING_QUEUE_BUFFER_MODE_3);
319 ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_B;
320 }
321 else {
322 XGE_SET_BUFFER_MODE_IN_RINGS(lldev->buffer_mode);
323 ring_config->scatter_mode = XGE_HAL_RING_QUEUE_SCATTER_MODE_A;
324 }
325 xge_rx_buffer_sizes_set(lldev, lldev->buffer_mode, mtu);
326
327 xge_os_printf("%s: TSO %s", device_get_nameunit(lldev->device),
328 ((lldev->enabled_tso) ? "Enabled":"Disabled"));
329 xge_os_printf("%s: LRO %s", device_get_nameunit(lldev->device),
330 ((lldev->ifnetp)->if_capenable & IFCAP_LRO) ? "Enabled":"Disabled");
331 xge_os_printf("%s: Rx %d Buffer Mode Enabled",
332 device_get_nameunit(lldev->device), lldev->buffer_mode);
333}
334
335/**
336 * xge_driver_initialize
337 * Initializes HAL driver (common for all devices)
338 *
339 * Returns
340 * XGE_HAL_OK if success
341 * XGE_HAL_ERR_BAD_DRIVER_CONFIG if driver configuration parameters are invalid
342 */
343int
344xge_driver_initialize(void)
345{
346 xge_hal_uld_cbs_t uld_callbacks;
347 xge_hal_driver_config_t driver_config;
348 xge_hal_status_e status = XGE_HAL_OK;
349
350 /* Initialize HAL driver */
351 if(!hal_driver_init_count) {
352 xge_os_memzero(&uld_callbacks, sizeof(xge_hal_uld_cbs_t));
353 xge_os_memzero(&driver_config, sizeof(xge_hal_driver_config_t));
354
355 /*
356 * Initial and maximum size of the queue used to store the events
357 * like Link up/down (xge_hal_event_e)
358 */
359 driver_config.queue_size_initial = XGE_HAL_MIN_QUEUE_SIZE_INITIAL;
360 driver_config.queue_size_max = XGE_HAL_MAX_QUEUE_SIZE_MAX;
361
362 uld_callbacks.link_up = xge_callback_link_up;
363 uld_callbacks.link_down = xge_callback_link_down;
364 uld_callbacks.crit_err = xge_callback_crit_err;
365 uld_callbacks.event = xge_callback_event;
366
367 status = xge_hal_driver_initialize(&driver_config, &uld_callbacks);
368 if(status != XGE_HAL_OK) {
369 XGE_EXIT_ON_ERR("xgeX: Initialization of HAL driver failed",
370 xdi_out, status);
371 }
372 }
373 hal_driver_init_count = hal_driver_init_count + 1;
374
375 xge_hal_driver_debug_module_mask_set(0xffffffff);
376 xge_hal_driver_debug_level_set(XGE_TRACE);
377
378xdi_out:
379 return status;
380}
381
382/**
383 * xge_media_init
384 * Initializes, adds and sets media
385 *
386 * @devc Device Handle
387 */
388void
389xge_media_init(device_t devc)
390{
391 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(devc);
392
393 /* Initialize Media */
394 ifmedia_init(&lldev->media, IFM_IMASK, xge_ifmedia_change,
395 xge_ifmedia_status);
396
397 /* Add supported media */
398 ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL);
399 ifmedia_add(&lldev->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
400 ifmedia_add(&lldev->media, IFM_ETHER | IFM_AUTO, 0, NULL);
401 ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
402 ifmedia_add(&lldev->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
403
404 /* Set media */
405 ifmedia_set(&lldev->media, IFM_ETHER | IFM_AUTO);
406}
407
408/**
409 * xge_pci_space_save
410 * Save PCI configuration space
411 *
412 * @dev Device Handle
413 */
414void
415xge_pci_space_save(device_t dev)
416{
417 struct pci_devinfo *dinfo = NULL;
418
419 dinfo = device_get_ivars(dev);
420 xge_trace(XGE_TRACE, "Saving PCI configuration space");
421 pci_cfg_save(dev, dinfo, 0);
422}
423
424/**
425 * xge_pci_space_restore
426 * Restore saved PCI configuration space
427 *
428 * @dev Device Handle
429 */
430void
431xge_pci_space_restore(device_t dev)
432{
433 struct pci_devinfo *dinfo = NULL;
434
435 dinfo = device_get_ivars(dev);
436 xge_trace(XGE_TRACE, "Restoring PCI configuration space");
437 pci_cfg_restore(dev, dinfo);
438}
439
440/**
441 * xge_msi_info_save
442 * Save MSI info
443 *
444 * @lldev Per-adapter Data
445 */
446void
447xge_msi_info_save(xge_lldev_t * lldev)
448{
449 xge_os_pci_read16(lldev->pdev, NULL,
450 xge_offsetof(xge_hal_pci_config_le_t, msi_control),
451 &lldev->msi_info.msi_control);
452 xge_os_pci_read32(lldev->pdev, NULL,
453 xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address),
454 &lldev->msi_info.msi_lower_address);
455 xge_os_pci_read32(lldev->pdev, NULL,
456 xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address),
457 &lldev->msi_info.msi_higher_address);
458 xge_os_pci_read16(lldev->pdev, NULL,
459 xge_offsetof(xge_hal_pci_config_le_t, msi_data),
460 &lldev->msi_info.msi_data);
461}
462
463/**
464 * xge_msi_info_restore
465 * Restore saved MSI info
466 *
467 * @dev Device Handle
468 */
469void
470xge_msi_info_restore(xge_lldev_t *lldev)
471{
472 /*
473 * If interface is made down and up, traffic fails. It was observed that
474 * MSI information were getting reset on down. Restoring them.
475 */
476 xge_os_pci_write16(lldev->pdev, NULL,
477 xge_offsetof(xge_hal_pci_config_le_t, msi_control),
478 lldev->msi_info.msi_control);
479
480 xge_os_pci_write32(lldev->pdev, NULL,
481 xge_offsetof(xge_hal_pci_config_le_t, msi_lower_address),
482 lldev->msi_info.msi_lower_address);
483
484 xge_os_pci_write32(lldev->pdev, NULL,
485 xge_offsetof(xge_hal_pci_config_le_t, msi_higher_address),
486 lldev->msi_info.msi_higher_address);
487
488 xge_os_pci_write16(lldev->pdev, NULL,
489 xge_offsetof(xge_hal_pci_config_le_t, msi_data),
490 lldev->msi_info.msi_data);
491}
492
493/**
494 * xge_init_mutex
495 * Initializes mutexes used in driver
496 *
497 * @lldev Per-adapter Data
498 */
499void
500xge_mutex_init(xge_lldev_t *lldev)
501{
502 int qindex;
503
504 sprintf(lldev->mtx_name_drv, "%s_drv",
505 device_get_nameunit(lldev->device));
506 mtx_init(&lldev->mtx_drv, lldev->mtx_name_drv, MTX_NETWORK_LOCK,
507 MTX_DEF);
508
509 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
510 sprintf(lldev->mtx_name_tx[qindex], "%s_tx_%d",
511 device_get_nameunit(lldev->device), qindex);
512 mtx_init(&lldev->mtx_tx[qindex], lldev->mtx_name_tx[qindex], NULL,
513 MTX_DEF);
514 }
515}
516
517/**
518 * xge_mutex_destroy
519 * Destroys mutexes used in driver
520 *
521 * @lldev Per-adapter Data
522 */
523void
524xge_mutex_destroy(xge_lldev_t *lldev)
525{
526 int qindex;
527
528 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++)
529 mtx_destroy(&lldev->mtx_tx[qindex]);
530 mtx_destroy(&lldev->mtx_drv);
531}
532
533/**
534 * xge_print_info
535 * Print device and driver information
536 *
537 * @lldev Per-adapter Data
538 */
539void
540xge_print_info(xge_lldev_t *lldev)
541{
542 device_t dev = lldev->device;
543 xge_hal_device_t *hldev = lldev->devh;
544 xge_hal_status_e status = XGE_HAL_OK;
545 u64 val64 = 0;
546 const char *xge_pci_bus_speeds[17] = {
547 "PCI 33MHz Bus",
548 "PCI 66MHz Bus",
549 "PCIX(M1) 66MHz Bus",
550 "PCIX(M1) 100MHz Bus",
551 "PCIX(M1) 133MHz Bus",
552 "PCIX(M2) 133MHz Bus",
553 "PCIX(M2) 200MHz Bus",
554 "PCIX(M2) 266MHz Bus",
555 "PCIX(M1) Reserved",
556 "PCIX(M1) 66MHz Bus (Not Supported)",
557 "PCIX(M1) 100MHz Bus (Not Supported)",
558 "PCIX(M1) 133MHz Bus (Not Supported)",
559 "PCIX(M2) Reserved",
560 "PCIX 533 Reserved",
561 "PCI Basic Mode",
562 "PCIX Basic Mode",
563 "PCI Invalid Mode"
564 };
565
566 xge_os_printf("%s: Xframe%s %s Revision %d Driver v%s",
567 device_get_nameunit(dev),
568 ((hldev->device_id == XGE_PCI_DEVICE_ID_XENA_2) ? "I" : "II"),
569 hldev->vpd_data.product_name, hldev->revision, XGE_DRIVER_VERSION);
570 xge_os_printf("%s: Serial Number %s",
571 device_get_nameunit(dev), hldev->vpd_data.serial_num);
572
573 if(pci_get_device(dev) == XGE_PCI_DEVICE_ID_HERC_2) {
574 status = xge_hal_mgmt_reg_read(hldev, 0,
575 xge_offsetof(xge_hal_pci_bar0_t, pci_info), &val64);
576 if(status != XGE_HAL_OK)
577 xge_trace(XGE_ERR, "Error for getting bus speed");
578
579 xge_os_printf("%s: Adapter is on %s bit %s",
580 device_get_nameunit(dev), ((val64 & BIT(8)) ? "32":"64"),
581 (xge_pci_bus_speeds[((val64 & XGE_HAL_PCI_INFO) >> 60)]));
582 }
583
584 xge_os_printf("%s: Using %s Interrupts",
585 device_get_nameunit(dev),
586 (lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) ? "MSI":"Line");
587}
588
589/**
590 * xge_create_dma_tags
591 * Creates DMA tags for both Tx and Rx
592 *
593 * @dev Device Handle
594 *
595 * Returns XGE_HAL_OK or XGE_HAL_FAIL (if errors)
596 */
597xge_hal_status_e
598xge_create_dma_tags(device_t dev)
599{
600 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
601 xge_hal_status_e status = XGE_HAL_FAIL;
602 int mtu = (lldev->ifnetp)->if_mtu, maxsize;
603
604 /* DMA tag for Tx */
605 status = bus_dma_tag_create(
606 bus_get_dma_tag(dev), /* Parent */
607 PAGE_SIZE, /* Alignment */
608 0, /* Bounds */
609 BUS_SPACE_MAXADDR, /* Low Address */
610 BUS_SPACE_MAXADDR, /* High Address */
611 NULL, /* Filter Function */
612 NULL, /* Filter Function Arguments */
613 MCLBYTES * XGE_MAX_SEGS, /* Maximum Size */
614 XGE_MAX_SEGS, /* Number of Segments */
615 MCLBYTES, /* Maximum Segment Size */
616 BUS_DMA_ALLOCNOW, /* Flags */
617 NULL, /* Lock Function */
618 NULL, /* Lock Function Arguments */
619 (&lldev->dma_tag_tx)); /* DMA Tag */
620 if(status != 0)
621 goto _exit;
622
623 maxsize = mtu + XGE_HAL_MAC_HEADER_MAX_SIZE;
624 if(maxsize <= MCLBYTES) {
625 maxsize = MCLBYTES;
626 }
627 else {
628 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5)
629 maxsize = MJUMPAGESIZE;
630 else
631 maxsize = (maxsize <= MJUMPAGESIZE) ? MJUMPAGESIZE : MJUM9BYTES;
632 }
633
634 /* DMA tag for Rx */
635 status = bus_dma_tag_create(
636 bus_get_dma_tag(dev), /* Parent */
637 PAGE_SIZE, /* Alignment */
638 0, /* Bounds */
639 BUS_SPACE_MAXADDR, /* Low Address */
640 BUS_SPACE_MAXADDR, /* High Address */
641 NULL, /* Filter Function */
642 NULL, /* Filter Function Arguments */
643 maxsize, /* Maximum Size */
644 1, /* Number of Segments */
645 maxsize, /* Maximum Segment Size */
646 BUS_DMA_ALLOCNOW, /* Flags */
647 NULL, /* Lock Function */
648 NULL, /* Lock Function Arguments */
649 (&lldev->dma_tag_rx)); /* DMA Tag */
650 if(status != 0)
651 goto _exit1;
652
653 status = bus_dmamap_create(lldev->dma_tag_rx, BUS_DMA_NOWAIT,
654 &lldev->extra_dma_map);
655 if(status != 0)
656 goto _exit2;
657
658 status = XGE_HAL_OK;
659 goto _exit;
660
661_exit2:
662 status = bus_dma_tag_destroy(lldev->dma_tag_rx);
663 if(status != 0)
664 xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
665_exit1:
666 status = bus_dma_tag_destroy(lldev->dma_tag_tx);
667 if(status != 0)
668 xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
669 status = XGE_HAL_FAIL;
670_exit:
671 return status;
672}
673
674/**
675 * xge_confirm_changes
676 * Disables and Enables interface to apply requested change
677 *
678 * @lldev Per-adapter Data
679 * @mtu_set Is it called for changing MTU? (Yes: 1, No: 0)
680 *
681 * Returns 0 or Error Number
682 */
683void
684xge_confirm_changes(xge_lldev_t *lldev, xge_option_e option)
685{
686 if(lldev->initialized == 0) goto _exit1;
687
688 mtx_lock(&lldev->mtx_drv);
689 if_down(lldev->ifnetp);
690 xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
691
692 if(option == XGE_SET_MTU)
693 (lldev->ifnetp)->if_mtu = lldev->mtu;
694 else
695 xge_buffer_mode_init(lldev, lldev->mtu);
696
697 xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
698 if_up(lldev->ifnetp);
699 mtx_unlock(&lldev->mtx_drv);
700 goto _exit;
701
702_exit1:
703 /* Request was to change MTU and device not initialized */
704 if(option == XGE_SET_MTU) {
705 (lldev->ifnetp)->if_mtu = lldev->mtu;
706 xge_buffer_mode_init(lldev, lldev->mtu);
707 }
708_exit:
709 return;
710}
711
712/**
713 * xge_change_lro_status
714 * Enable/Disable LRO feature
715 *
716 * @SYSCTL_HANDLER_ARGS sysctl_oid structure with arguments
717 *
718 * Returns 0 or error number.
719 */
720static int
721xge_change_lro_status(SYSCTL_HANDLER_ARGS)
722{
723 xge_lldev_t *lldev = (xge_lldev_t *)arg1;
724 int request = lldev->enabled_lro, status = XGE_HAL_OK;
725
726 status = sysctl_handle_int(oidp, &request, arg2, req);
727 if((status != XGE_HAL_OK) || (!req->newptr))
728 goto _exit;
729
730 if((request < 0) || (request > 1)) {
731 status = EINVAL;
732 goto _exit;
733 }
734
735 /* Return if current and requested states are same */
736 if(request == lldev->enabled_lro){
737 xge_trace(XGE_ERR, "LRO is already %s",
738 ((request) ? "enabled" : "disabled"));
739 goto _exit;
740 }
741
742 lldev->enabled_lro = request;
743 xge_confirm_changes(lldev, XGE_CHANGE_LRO);
744 arg2 = lldev->enabled_lro;
745
746_exit:
747 return status;
748}
749
750/**
751 * xge_add_sysctl_handlers
752 * Registers sysctl parameter value update handlers
753 *
754 * @lldev Per-adapter data
755 */
756void
757xge_add_sysctl_handlers(xge_lldev_t *lldev)
758{
759 struct sysctl_ctx_list *context_list =
760 device_get_sysctl_ctx(lldev->device);
761 struct sysctl_oid *oid = device_get_sysctl_tree(lldev->device);
762
763 SYSCTL_ADD_PROC(context_list, SYSCTL_CHILDREN(oid), OID_AUTO,
764 "enable_lro", CTLTYPE_INT | CTLFLAG_RW, lldev, 0,
765 xge_change_lro_status, "I", "Enable or disable LRO feature");
766}
767
768/**
769 * xge_attach
770 * Connects driver to the system if probe was success
771 *
772 * @dev Device Handle
773 */
774int
775xge_attach(device_t dev)
776{
777 xge_hal_device_config_t *device_config;
778 xge_hal_device_attr_t attr;
779 xge_lldev_t *lldev;
780 xge_hal_device_t *hldev;
781 xge_pci_info_t *pci_info;
782 struct ifnet *ifnetp;
783 int rid, rid0, rid1, error;
784 int msi_count = 0, status = XGE_HAL_OK;
785 int enable_msi = XGE_HAL_INTR_MODE_IRQLINE;
786
787 device_config = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t));
788 if(!device_config) {
789 XGE_EXIT_ON_ERR("Memory allocation for device configuration failed",
790 attach_out_config, ENOMEM);
791 }
792
793 lldev = (xge_lldev_t *) device_get_softc(dev);
794 if(!lldev) {
795 XGE_EXIT_ON_ERR("Adapter softc is NULL", attach_out, ENOMEM);
796 }
797 lldev->device = dev;
798
799 xge_mutex_init(lldev);
800
801 error = xge_driver_initialize();
802 if(error != XGE_HAL_OK) {
803 xge_resources_free(dev, xge_free_mutex);
804 XGE_EXIT_ON_ERR("Initializing driver failed", attach_out, ENXIO);
805 }
806
807 /* HAL device */
808 hldev =
809 (xge_hal_device_t *)xge_os_malloc(NULL, sizeof(xge_hal_device_t));
810 if(!hldev) {
811 xge_resources_free(dev, xge_free_terminate_hal_driver);
812 XGE_EXIT_ON_ERR("Memory allocation for HAL device failed",
813 attach_out, ENOMEM);
814 }
815 lldev->devh = hldev;
816
817 /* Our private structure */
818 pci_info =
819 (xge_pci_info_t*) xge_os_malloc(NULL, sizeof(xge_pci_info_t));
820 if(!pci_info) {
821 xge_resources_free(dev, xge_free_hal_device);
822 XGE_EXIT_ON_ERR("Memory allocation for PCI info. failed",
823 attach_out, ENOMEM);
824 }
825 lldev->pdev = pci_info;
826 pci_info->device = dev;
827
828 /* Set bus master */
829 pci_enable_busmaster(dev);
830
831 /* Get virtual address for BAR0 */
832 rid0 = PCIR_BAR(0);
833 pci_info->regmap0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid0,
834 RF_ACTIVE);
835 if(pci_info->regmap0 == NULL) {
836 xge_resources_free(dev, xge_free_pci_info);
837 XGE_EXIT_ON_ERR("Bus resource allocation for BAR0 failed",
838 attach_out, ENOMEM);
839 }
840 attr.bar0 = (char *)pci_info->regmap0;
841
842 pci_info->bar0resource = (xge_bus_resource_t*)
843 xge_os_malloc(NULL, sizeof(xge_bus_resource_t));
844 if(pci_info->bar0resource == NULL) {
845 xge_resources_free(dev, xge_free_bar0);
846 XGE_EXIT_ON_ERR("Memory allocation for BAR0 Resources failed",
847 attach_out, ENOMEM);
848 }
849 ((xge_bus_resource_t *)(pci_info->bar0resource))->bus_tag =
850 rman_get_bustag(pci_info->regmap0);
851 ((xge_bus_resource_t *)(pci_info->bar0resource))->bus_handle =
852 rman_get_bushandle(pci_info->regmap0);
853 ((xge_bus_resource_t *)(pci_info->bar0resource))->bar_start_addr =
854 pci_info->regmap0;
855
856 /* Get virtual address for BAR1 */
857 rid1 = PCIR_BAR(2);
858 pci_info->regmap1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid1,
859 RF_ACTIVE);
860 if(pci_info->regmap1 == NULL) {
861 xge_resources_free(dev, xge_free_bar0_resource);
862 XGE_EXIT_ON_ERR("Bus resource allocation for BAR1 failed",
863 attach_out, ENOMEM);
864 }
865 attr.bar1 = (char *)pci_info->regmap1;
866
867 pci_info->bar1resource = (xge_bus_resource_t*)
868 xge_os_malloc(NULL, sizeof(xge_bus_resource_t));
869 if(pci_info->bar1resource == NULL) {
870 xge_resources_free(dev, xge_free_bar1);
871 XGE_EXIT_ON_ERR("Memory allocation for BAR1 Resources failed",
872 attach_out, ENOMEM);
873 }
874 ((xge_bus_resource_t *)(pci_info->bar1resource))->bus_tag =
875 rman_get_bustag(pci_info->regmap1);
876 ((xge_bus_resource_t *)(pci_info->bar1resource))->bus_handle =
877 rman_get_bushandle(pci_info->regmap1);
878 ((xge_bus_resource_t *)(pci_info->bar1resource))->bar_start_addr =
879 pci_info->regmap1;
880
881 /* Save PCI config space */
882 xge_pci_space_save(dev);
883
884 attr.regh0 = (xge_bus_resource_t *) pci_info->bar0resource;
885 attr.regh1 = (xge_bus_resource_t *) pci_info->bar1resource;
886 attr.irqh = lldev->irqhandle;
887 attr.cfgh = pci_info;
888 attr.pdev = pci_info;
889
890 /* Initialize device configuration parameters */
891 xge_init_params(device_config, dev);
892
893 rid = 0;
894 if(lldev->enabled_msi) {
895 /* Number of MSI messages supported by device */
896 msi_count = pci_msi_count(dev);
897 if(msi_count > 1) {
898 /* Device supports MSI */
899 if(bootverbose) {
900 xge_trace(XGE_ERR, "MSI count: %d", msi_count);
901 xge_trace(XGE_ERR, "Now, driver supporting 1 message");
902 }
903 msi_count = 1;
904 error = pci_alloc_msi(dev, &msi_count);
905 if(error == 0) {
906 if(bootverbose)
907 xge_trace(XGE_ERR, "Allocated messages: %d", msi_count);
908 enable_msi = XGE_HAL_INTR_MODE_MSI;
909 rid = 1;
910 }
911 else {
912 if(bootverbose)
913 xge_trace(XGE_ERR, "pci_alloc_msi failed, %d", error);
914 }
915 }
916 }
917 lldev->enabled_msi = enable_msi;
918
919 /* Allocate resource for irq */
920 lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
921 (RF_SHAREABLE | RF_ACTIVE));
922 if(lldev->irq == NULL) {
923 xge_trace(XGE_ERR, "Allocating irq resource for %s failed",
924 ((rid == 0) ? "line interrupt" : "MSI"));
925 if(rid == 1) {
926 error = pci_release_msi(dev);
927 if(error != 0) {
928 xge_trace(XGE_ERR, "Releasing MSI resources failed %d",
929 error);
930 xge_trace(XGE_ERR, "Requires reboot to use MSI again");
931 }
932 xge_trace(XGE_ERR, "Trying line interrupts");
933 rid = 0;
934 lldev->enabled_msi = XGE_HAL_INTR_MODE_IRQLINE;
935 lldev->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
936 (RF_SHAREABLE | RF_ACTIVE));
937 }
938 if(lldev->irq == NULL) {
939 xge_trace(XGE_ERR, "Allocating irq resource failed");
940 xge_resources_free(dev, xge_free_bar1_resource);
941 status = ENOMEM;
942 goto attach_out;
943 }
944 }
945
946 device_config->intr_mode = lldev->enabled_msi;
947 if(bootverbose) {
948 xge_trace(XGE_TRACE, "rid: %d, Mode: %d, MSI count: %d", rid,
949 lldev->enabled_msi, msi_count);
950 }
951
952 /* Initialize HAL device */
953 error = xge_hal_device_initialize(hldev, &attr, device_config);
954 if(error != XGE_HAL_OK) {
955 xge_resources_free(dev, xge_free_irq_resource);
956 XGE_EXIT_ON_ERR("Initializing HAL device failed", attach_out,
957 ENXIO);
958 }
959
960 xge_hal_device_private_set(hldev, lldev);
961
962 error = xge_interface_setup(dev);
963 if(error != 0) {
964 status = error;
965 goto attach_out;
966 }
967
968 ifnetp = lldev->ifnetp;
969 ifnetp->if_mtu = device_config->mtu;
970
971 xge_media_init(dev);
972
973 /* Associate interrupt handler with the device */
974 if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
975 error = bus_setup_intr(dev, lldev->irq,
976 (INTR_TYPE_NET | INTR_MPSAFE),
977#if __FreeBSD_version > 700030
978 NULL,
979#endif
980 xge_isr_msi, lldev, &lldev->irqhandle);
981 xge_msi_info_save(lldev);
982 }
983 else {
984 error = bus_setup_intr(dev, lldev->irq,
985 (INTR_TYPE_NET | INTR_MPSAFE),
986#if __FreeBSD_version > 700030
987 xge_isr_filter,
988#endif
989 xge_isr_line, lldev, &lldev->irqhandle);
990 }
991 if(error != 0) {
992 xge_resources_free(dev, xge_free_media_interface);
993 XGE_EXIT_ON_ERR("Associating interrupt handler with device failed",
994 attach_out, ENXIO);
995 }
996
997 xge_print_info(lldev);
998
999 xge_add_sysctl_handlers(lldev);
1000
1001 xge_buffer_mode_init(lldev, device_config->mtu);
1002
1003attach_out:
1004 xge_os_free(NULL, device_config, sizeof(xge_hal_device_config_t));
1005attach_out_config:
1006 return status;
1007}
1008
1009/**
1010 * xge_resources_free
1011 * Undo what-all we did during load/attach
1012 *
1013 * @dev Device Handle
1014 * @error Identifies what-all to undo
1015 */
1016void
1017xge_resources_free(device_t dev, xge_lables_e error)
1018{
1019 xge_lldev_t *lldev;
1020 xge_pci_info_t *pci_info;
1021 xge_hal_device_t *hldev;
1022 int rid, status;
1023
1024 /* LL Device */
1025 lldev = (xge_lldev_t *) device_get_softc(dev);
1026 pci_info = lldev->pdev;
1027
1028 /* HAL Device */
1029 hldev = lldev->devh;
1030
1031 switch(error) {
1032 case xge_free_all:
1033 /* Teardown interrupt handler - device association */
1034 bus_teardown_intr(dev, lldev->irq, lldev->irqhandle);
1035
1036 case xge_free_media_interface:
1037 /* Media */
1038 ifmedia_removeall(&lldev->media);
1039
1040 /* Detach Ether */
1041 ether_ifdetach(lldev->ifnetp);
1042 if_free(lldev->ifnetp);
1043
1044 xge_hal_device_private_set(hldev, NULL);
1045 xge_hal_device_disable(hldev);
1046
1047 case xge_free_terminate_hal_device:
1048 /* HAL Device */
1049 xge_hal_device_terminate(hldev);
1050
1051 case xge_free_irq_resource:
1052 /* Release IRQ resource */
1053 bus_release_resource(dev, SYS_RES_IRQ,
1054 ((lldev->enabled_msi == XGE_HAL_INTR_MODE_IRQLINE) ? 0:1),
1055 lldev->irq);
1056
1057 if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
1058 status = pci_release_msi(dev);
1059 if(status != 0) {
1060 if(bootverbose) {
1061 xge_trace(XGE_ERR,
1062 "pci_release_msi returned %d", status);
1063 }
1064 }
1065 }
1066
1067 case xge_free_bar1_resource:
1068 /* Restore PCI configuration space */
1069 xge_pci_space_restore(dev);
1070
1071 /* Free bar1resource */
1072 xge_os_free(NULL, pci_info->bar1resource,
1073 sizeof(xge_bus_resource_t));
1074
1075 case xge_free_bar1:
1076 /* Release BAR1 */
1077 rid = PCIR_BAR(2);
1078 bus_release_resource(dev, SYS_RES_MEMORY, rid,
1079 pci_info->regmap1);
1080
1081 case xge_free_bar0_resource:
1082 /* Free bar0resource */
1083 xge_os_free(NULL, pci_info->bar0resource,
1084 sizeof(xge_bus_resource_t));
1085
1086 case xge_free_bar0:
1087 /* Release BAR0 */
1088 rid = PCIR_BAR(0);
1089 bus_release_resource(dev, SYS_RES_MEMORY, rid,
1090 pci_info->regmap0);
1091
1092 case xge_free_pci_info:
1093 /* Disable Bus Master */
1094 pci_disable_busmaster(dev);
1095
1096 /* Free pci_info_t */
1097 lldev->pdev = NULL;
1098 xge_os_free(NULL, pci_info, sizeof(xge_pci_info_t));
1099
1100 case xge_free_hal_device:
1101 /* Free device configuration struct and HAL device */
1102 xge_os_free(NULL, hldev, sizeof(xge_hal_device_t));
1103
1104 case xge_free_terminate_hal_driver:
1105 /* Terminate HAL driver */
1106 hal_driver_init_count = hal_driver_init_count - 1;
1107 if(!hal_driver_init_count) {
1108 xge_hal_driver_terminate();
1109 }
1110
1111 case xge_free_mutex:
1112 xge_mutex_destroy(lldev);
1113 }
1114}
1115
1116/**
1117 * xge_detach
1118 * Detaches driver from the Kernel subsystem
1119 *
1120 * @dev Device Handle
1121 */
1122int
1123xge_detach(device_t dev)
1124{
1125 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
1126
1127 if(lldev->in_detach == 0) {
1128 lldev->in_detach = 1;
1129 xge_stop(lldev);
1130 xge_resources_free(dev, xge_free_all);
1131 }
1132
1133 return 0;
1134}
1135
1136/**
1137 * xge_shutdown
1138 * To shutdown device before system shutdown
1139 *
1140 * @dev Device Handle
1141 */
1142int
1143xge_shutdown(device_t dev)
1144{
1145 xge_lldev_t *lldev = (xge_lldev_t *) device_get_softc(dev);
1146 xge_stop(lldev);
1147
1148 return 0;
1149}
1150
1151/**
1152 * xge_interface_setup
1153 * Setup interface
1154 *
1155 * @dev Device Handle
1156 *
1157 * Returns 0 on success, ENXIO/ENOMEM on failure
1158 */
1159int
1160xge_interface_setup(device_t dev)
1161{
1162 u8 mcaddr[ETHER_ADDR_LEN];
1163 xge_hal_status_e status;
1164 xge_lldev_t *lldev = (xge_lldev_t *)device_get_softc(dev);
1165 struct ifnet *ifnetp;
1166 xge_hal_device_t *hldev = lldev->devh;
1167
1168 /* Get the MAC address of the device */
1169 status = xge_hal_device_macaddr_get(hldev, 0, &mcaddr);
1170 if(status != XGE_HAL_OK) {
1171 xge_resources_free(dev, xge_free_terminate_hal_device);
1172 XGE_EXIT_ON_ERR("Getting MAC address failed", ifsetup_out, ENXIO);
1173 }
1174
1175 /* Get interface ifnet structure for this Ether device */
1176 ifnetp = lldev->ifnetp = if_alloc(IFT_ETHER);
1177 if(ifnetp == NULL) {
1178 xge_resources_free(dev, xge_free_terminate_hal_device);
1179 XGE_EXIT_ON_ERR("Allocation ifnet failed", ifsetup_out, ENOMEM);
1180 }
1181
1182 /* Initialize interface ifnet structure */
1183 if_initname(ifnetp, device_get_name(dev), device_get_unit(dev));
1184 ifnetp->if_mtu = XGE_HAL_DEFAULT_MTU;
1185 ifnetp->if_baudrate = XGE_BAUDRATE;
1186 ifnetp->if_init = xge_init;
1187 ifnetp->if_softc = lldev;
1188 ifnetp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1189 ifnetp->if_ioctl = xge_ioctl;
1190 ifnetp->if_start = xge_send;
1191
1192 /* TODO: Check and assign optimal value */
1193 ifnetp->if_snd.ifq_maxlen = IFQ_MAXLEN;
1193 ifnetp->if_snd.ifq_maxlen = ifqmaxlen;
1194
1195 ifnetp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU |
1196 IFCAP_HWCSUM;
1197 if(lldev->enabled_tso)
1198 ifnetp->if_capabilities |= IFCAP_TSO4;
1199 if(lldev->enabled_lro)
1200 ifnetp->if_capabilities |= IFCAP_LRO;
1201
1202 ifnetp->if_capenable = ifnetp->if_capabilities;
1203
1204 /* Attach the interface */
1205 ether_ifattach(ifnetp, mcaddr);
1206
1207ifsetup_out:
1208 return status;
1209}
1210
1211/**
1212 * xge_callback_link_up
1213 * Callback for Link-up indication from HAL
1214 *
1215 * @userdata Per-adapter data
1216 */
1217void
1218xge_callback_link_up(void *userdata)
1219{
1220 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
1221 struct ifnet *ifnetp = lldev->ifnetp;
1222
1223 ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1224 if_link_state_change(ifnetp, LINK_STATE_UP);
1225}
1226
1227/**
1228 * xge_callback_link_down
1229 * Callback for Link-down indication from HAL
1230 *
1231 * @userdata Per-adapter data
1232 */
1233void
1234xge_callback_link_down(void *userdata)
1235{
1236 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
1237 struct ifnet *ifnetp = lldev->ifnetp;
1238
1239 ifnetp->if_flags |= IFF_DRV_OACTIVE;
1240 if_link_state_change(ifnetp, LINK_STATE_DOWN);
1241}
1242
1243/**
1244 * xge_callback_crit_err
1245 * Callback for Critical error indication from HAL
1246 *
1247 * @userdata Per-adapter data
1248 * @type Event type (Enumerated hardware error)
1249 * @serr_data Hardware status
1250 */
1251void
1252xge_callback_crit_err(void *userdata, xge_hal_event_e type, u64 serr_data)
1253{
1254 xge_trace(XGE_ERR, "Critical Error");
1255 xge_reset(userdata);
1256}
1257
1258/**
1259 * xge_callback_event
1260 * Callback from HAL indicating that some event has been queued
1261 *
1262 * @item Queued event item
1263 */
1264void
1265xge_callback_event(xge_queue_item_t *item)
1266{
1267 xge_lldev_t *lldev = NULL;
1268 xge_hal_device_t *hldev = NULL;
1269 struct ifnet *ifnetp = NULL;
1270
1271 hldev = item->context;
1272 lldev = xge_hal_device_private(hldev);
1273 ifnetp = lldev->ifnetp;
1274
1275 switch(item->event_type) {
1276 case XGE_LL_EVENT_TRY_XMIT_AGAIN:
1277 if(lldev->initialized) {
1278 if(xge_hal_channel_dtr_count(lldev->fifo_channel[0]) > 0) {
1279 ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1280 }
1281 else {
1282 xge_queue_produce_context(
1283 xge_hal_device_queue(lldev->devh),
1284 XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh);
1285 }
1286 }
1287 break;
1288
1289 case XGE_LL_EVENT_DEVICE_RESETTING:
1290 xge_reset(item->context);
1291 break;
1292
1293 default:
1294 break;
1295 }
1296}
1297
1298/**
1299 * xge_ifmedia_change
1300 * Media change driver callback
1301 *
1302 * @ifnetp Interface Handle
1303 *
1304 * Returns 0 if media is Ether else EINVAL
1305 */
1306int
1307xge_ifmedia_change(struct ifnet *ifnetp)
1308{
1309 xge_lldev_t *lldev = ifnetp->if_softc;
1310 struct ifmedia *ifmediap = &lldev->media;
1311
1312 return (IFM_TYPE(ifmediap->ifm_media) != IFM_ETHER) ? EINVAL:0;
1313}
1314
1315/**
1316 * xge_ifmedia_status
1317 * Media status driver callback
1318 *
1319 * @ifnetp Interface Handle
1320 * @ifmr Interface Media Settings
1321 */
1322void
1323xge_ifmedia_status(struct ifnet *ifnetp, struct ifmediareq *ifmr)
1324{
1325 xge_hal_status_e status;
1326 u64 regvalue;
1327 xge_lldev_t *lldev = ifnetp->if_softc;
1328 xge_hal_device_t *hldev = lldev->devh;
1329
1330 ifmr->ifm_status = IFM_AVALID;
1331 ifmr->ifm_active = IFM_ETHER;
1332
1333 status = xge_hal_mgmt_reg_read(hldev, 0,
1334 xge_offsetof(xge_hal_pci_bar0_t, adapter_status), &regvalue);
1335 if(status != XGE_HAL_OK) {
1336 xge_trace(XGE_TRACE, "Getting adapter status failed");
1337 goto _exit;
1338 }
1339
1340 if((regvalue & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
1341 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) == 0) {
1342 ifmr->ifm_status |= IFM_ACTIVE;
1343 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1344 if_link_state_change(ifnetp, LINK_STATE_UP);
1345 }
1346 else {
1347 if_link_state_change(ifnetp, LINK_STATE_DOWN);
1348 }
1349_exit:
1350 return;
1351}
1352
1353/**
1354 * xge_ioctl_stats
1355 * IOCTL to get statistics
1356 *
1357 * @lldev Per-adapter data
1358 * @ifreqp Interface request
1359 */
1360int
1361xge_ioctl_stats(xge_lldev_t *lldev, struct ifreq *ifreqp)
1362{
1363 xge_hal_status_e status = XGE_HAL_OK;
1364 char *data = (char *)ifreqp->ifr_data;
1365 void *info = NULL;
1366 int retValue = EINVAL;
1367
1368 switch(*data) {
1369 case XGE_QUERY_STATS:
1370 mtx_lock(&lldev->mtx_drv);
1371 status = xge_hal_stats_hw(lldev->devh,
1372 (xge_hal_stats_hw_info_t **)&info);
1373 mtx_unlock(&lldev->mtx_drv);
1374 if(status == XGE_HAL_OK) {
1375 if(copyout(info, ifreqp->ifr_data,
1376 sizeof(xge_hal_stats_hw_info_t)) == 0)
1377 retValue = 0;
1378 }
1379 else {
1380 xge_trace(XGE_ERR, "Getting statistics failed (Status: %d)",
1381 status);
1382 }
1383 break;
1384
1385 case XGE_QUERY_PCICONF:
1386 info = xge_os_malloc(NULL, sizeof(xge_hal_pci_config_t));
1387 if(info != NULL) {
1388 mtx_lock(&lldev->mtx_drv);
1389 status = xge_hal_mgmt_pci_config(lldev->devh, info,
1390 sizeof(xge_hal_pci_config_t));
1391 mtx_unlock(&lldev->mtx_drv);
1392 if(status == XGE_HAL_OK) {
1393 if(copyout(info, ifreqp->ifr_data,
1394 sizeof(xge_hal_pci_config_t)) == 0)
1395 retValue = 0;
1396 }
1397 else {
1398 xge_trace(XGE_ERR,
1399 "Getting PCI configuration failed (%d)", status);
1400 }
1401 xge_os_free(NULL, info, sizeof(xge_hal_pci_config_t));
1402 }
1403 break;
1404
1405 case XGE_QUERY_DEVSTATS:
1406 info = xge_os_malloc(NULL, sizeof(xge_hal_stats_device_info_t));
1407 if(info != NULL) {
1408 mtx_lock(&lldev->mtx_drv);
1409 status =xge_hal_mgmt_device_stats(lldev->devh, info,
1410 sizeof(xge_hal_stats_device_info_t));
1411 mtx_unlock(&lldev->mtx_drv);
1412 if(status == XGE_HAL_OK) {
1413 if(copyout(info, ifreqp->ifr_data,
1414 sizeof(xge_hal_stats_device_info_t)) == 0)
1415 retValue = 0;
1416 }
1417 else {
1418 xge_trace(XGE_ERR, "Getting device info failed (%d)",
1419 status);
1420 }
1421 xge_os_free(NULL, info,
1422 sizeof(xge_hal_stats_device_info_t));
1423 }
1424 break;
1425
1426 case XGE_QUERY_SWSTATS:
1427 info = xge_os_malloc(NULL, sizeof(xge_hal_stats_sw_err_t));
1428 if(info != NULL) {
1429 mtx_lock(&lldev->mtx_drv);
1430 status =xge_hal_mgmt_sw_stats(lldev->devh, info,
1431 sizeof(xge_hal_stats_sw_err_t));
1432 mtx_unlock(&lldev->mtx_drv);
1433 if(status == XGE_HAL_OK) {
1434 if(copyout(info, ifreqp->ifr_data,
1435 sizeof(xge_hal_stats_sw_err_t)) == 0)
1436 retValue = 0;
1437 }
1438 else {
1439 xge_trace(XGE_ERR,
1440 "Getting tcode statistics failed (%d)", status);
1441 }
1442 xge_os_free(NULL, info, sizeof(xge_hal_stats_sw_err_t));
1443 }
1444 break;
1445
1446 case XGE_QUERY_DRIVERSTATS:
1447 if(copyout(&lldev->driver_stats, ifreqp->ifr_data,
1448 sizeof(xge_driver_stats_t)) == 0) {
1449 retValue = 0;
1450 }
1451 else {
1452 xge_trace(XGE_ERR,
1453 "Copyout of driver statistics failed (%d)", status);
1454 }
1455 break;
1456
1457 case XGE_READ_VERSION:
1458 info = xge_os_malloc(NULL, XGE_BUFFER_SIZE);
1459 if(version != NULL) {
1460 strcpy(info, XGE_DRIVER_VERSION);
1461 if(copyout(info, ifreqp->ifr_data, XGE_BUFFER_SIZE) == 0)
1462 retValue = 0;
1463 xge_os_free(NULL, info, XGE_BUFFER_SIZE);
1464 }
1465 break;
1466
1467 case XGE_QUERY_DEVCONF:
1468 info = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t));
1469 if(info != NULL) {
1470 mtx_lock(&lldev->mtx_drv);
1471 status = xge_hal_mgmt_device_config(lldev->devh, info,
1472 sizeof(xge_hal_device_config_t));
1473 mtx_unlock(&lldev->mtx_drv);
1474 if(status == XGE_HAL_OK) {
1475 if(copyout(info, ifreqp->ifr_data,
1476 sizeof(xge_hal_device_config_t)) == 0)
1477 retValue = 0;
1478 }
1479 else {
1480 xge_trace(XGE_ERR, "Getting devconfig failed (%d)",
1481 status);
1482 }
1483 xge_os_free(NULL, info, sizeof(xge_hal_device_config_t));
1484 }
1485 break;
1486
1487 case XGE_QUERY_BUFFER_MODE:
1488 if(copyout(&lldev->buffer_mode, ifreqp->ifr_data,
1489 sizeof(int)) == 0)
1490 retValue = 0;
1491 break;
1492
1493 case XGE_SET_BUFFER_MODE_1:
1494 case XGE_SET_BUFFER_MODE_2:
1495 case XGE_SET_BUFFER_MODE_5:
1496 *data = (*data == XGE_SET_BUFFER_MODE_1) ? 'Y':'N';
1497 if(copyout(data, ifreqp->ifr_data, sizeof(data)) == 0)
1498 retValue = 0;
1499 break;
1500 default:
1501 xge_trace(XGE_TRACE, "Nothing is matching");
1502 retValue = ENOTTY;
1503 break;
1504 }
1505 return retValue;
1506}
1507
1508/**
1509 * xge_ioctl_registers
1510 * IOCTL to get registers
1511 *
1512 * @lldev Per-adapter data
1513 * @ifreqp Interface request
1514 */
1515int
1516xge_ioctl_registers(xge_lldev_t *lldev, struct ifreq *ifreqp)
1517{
1518 xge_register_t *data = (xge_register_t *)ifreqp->ifr_data;
1519 xge_hal_status_e status = XGE_HAL_OK;
1520 int retValue = EINVAL, offset = 0, index = 0;
1521 u64 val64 = 0;
1522
1523 /* Reading a register */
1524 if(strcmp(data->option, "-r") == 0) {
1525 data->value = 0x0000;
1526 mtx_lock(&lldev->mtx_drv);
1527 status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset,
1528 &data->value);
1529 mtx_unlock(&lldev->mtx_drv);
1530 if(status == XGE_HAL_OK) {
1531 if(copyout(data, ifreqp->ifr_data, sizeof(xge_register_t)) == 0)
1532 retValue = 0;
1533 }
1534 }
1535 /* Writing to a register */
1536 else if(strcmp(data->option, "-w") == 0) {
1537 mtx_lock(&lldev->mtx_drv);
1538 status = xge_hal_mgmt_reg_write(lldev->devh, 0, data->offset,
1539 data->value);
1540 if(status == XGE_HAL_OK) {
1541 val64 = 0x0000;
1542 status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset,
1543 &val64);
1544 if(status != XGE_HAL_OK) {
1545 xge_trace(XGE_ERR, "Reading back updated register failed");
1546 }
1547 else {
1548 if(val64 != data->value) {
1549 xge_trace(XGE_ERR,
1550 "Read and written register values mismatched");
1551 }
1552 else retValue = 0;
1553 }
1554 }
1555 else {
1556 xge_trace(XGE_ERR, "Getting register value failed");
1557 }
1558 mtx_unlock(&lldev->mtx_drv);
1559 }
1560 else {
1561 mtx_lock(&lldev->mtx_drv);
1562 for(index = 0, offset = 0; offset <= XGE_OFFSET_OF_LAST_REG;
1563 index++, offset += 0x0008) {
1564 val64 = 0;
1565 status = xge_hal_mgmt_reg_read(lldev->devh, 0, offset, &val64);
1566 if(status != XGE_HAL_OK) {
1567 xge_trace(XGE_ERR, "Getting register value failed");
1568 break;
1569 }
1570 *((u64 *)((u64 *)data + index)) = val64;
1571 retValue = 0;
1572 }
1573 mtx_unlock(&lldev->mtx_drv);
1574
1575 if(retValue == 0) {
1576 if(copyout(data, ifreqp->ifr_data,
1577 sizeof(xge_hal_pci_bar0_t)) != 0) {
1578 xge_trace(XGE_ERR, "Copyout of register values failed");
1579 retValue = EINVAL;
1580 }
1581 }
1582 else {
1583 xge_trace(XGE_ERR, "Getting register values failed");
1584 }
1585 }
1586 return retValue;
1587}
1588
1589/**
1590 * xge_ioctl
1591 * Callback to control the device - Interface configuration
1592 *
1593 * @ifnetp Interface Handle
1594 * @command Device control command
1595 * @data Parameters associated with command (if any)
1596 */
1597int
1598xge_ioctl(struct ifnet *ifnetp, unsigned long command, caddr_t data)
1599{
1600 struct ifreq *ifreqp = (struct ifreq *)data;
1601 xge_lldev_t *lldev = ifnetp->if_softc;
1602 struct ifmedia *ifmediap = &lldev->media;
1603 int retValue = 0, mask = 0;
1604
1605 if(lldev->in_detach) {
1606 return retValue;
1607 }
1608
1609 switch(command) {
1610 /* Set/Get ifnet address */
1611 case SIOCSIFADDR:
1612 case SIOCGIFADDR:
1613 ether_ioctl(ifnetp, command, data);
1614 break;
1615
1616 /* Set ifnet MTU */
1617 case SIOCSIFMTU:
1618 retValue = xge_change_mtu(lldev, ifreqp->ifr_mtu);
1619 break;
1620
1621 /* Set ifnet flags */
1622 case SIOCSIFFLAGS:
1623 if(ifnetp->if_flags & IFF_UP) {
1624 /* Link status is UP */
1625 if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
1626 xge_init(lldev);
1627 }
1628 xge_disable_promisc(lldev);
1629 xge_enable_promisc(lldev);
1630 }
1631 else {
1632 /* Link status is DOWN */
1633 /* If device is in running, make it down */
1634 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1635 xge_stop(lldev);
1636 }
1637 }
1638 break;
1639
1640 /* Add/delete multicast address */
1641 case SIOCADDMULTI:
1642 case SIOCDELMULTI:
1643 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1644 xge_setmulti(lldev);
1645 }
1646 break;
1647
1648 /* Set/Get net media */
1649 case SIOCSIFMEDIA:
1650 case SIOCGIFMEDIA:
1651 retValue = ifmedia_ioctl(ifnetp, ifreqp, ifmediap, command);
1652 break;
1653
1654 /* Set capabilities */
1655 case SIOCSIFCAP:
1656 mtx_lock(&lldev->mtx_drv);
1657 mask = ifreqp->ifr_reqcap ^ ifnetp->if_capenable;
1658 if(mask & IFCAP_TXCSUM) {
1659 if(ifnetp->if_capenable & IFCAP_TXCSUM) {
1660 ifnetp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1661 ifnetp->if_hwassist &=
1662 ~(CSUM_TCP | CSUM_UDP | CSUM_TSO);
1663 }
1664 else {
1665 ifnetp->if_capenable |= IFCAP_TXCSUM;
1666 ifnetp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1667 }
1668 }
1669 if(mask & IFCAP_TSO4) {
1670 if(ifnetp->if_capenable & IFCAP_TSO4) {
1671 ifnetp->if_capenable &= ~IFCAP_TSO4;
1672 ifnetp->if_hwassist &= ~CSUM_TSO;
1673
1674 xge_os_printf("%s: TSO Disabled",
1675 device_get_nameunit(lldev->device));
1676 }
1677 else if(ifnetp->if_capenable & IFCAP_TXCSUM) {
1678 ifnetp->if_capenable |= IFCAP_TSO4;
1679 ifnetp->if_hwassist |= CSUM_TSO;
1680
1681 xge_os_printf("%s: TSO Enabled",
1682 device_get_nameunit(lldev->device));
1683 }
1684 }
1685
1686 mtx_unlock(&lldev->mtx_drv);
1687 break;
1688
1689 /* Custom IOCTL 0 */
1690 case SIOCGPRIVATE_0:
1691 retValue = xge_ioctl_stats(lldev, ifreqp);
1692 break;
1693
1694 /* Custom IOCTL 1 */
1695 case SIOCGPRIVATE_1:
1696 retValue = xge_ioctl_registers(lldev, ifreqp);
1697 break;
1698
1699 default:
1700 retValue = EINVAL;
1701 break;
1702 }
1703 return retValue;
1704}
1705
1706/**
1707 * xge_init
1708 * Initialize the interface
1709 *
1710 * @plldev Per-adapter Data
1711 */
1712void
1713xge_init(void *plldev)
1714{
1715 xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1716
1717 mtx_lock(&lldev->mtx_drv);
1718 xge_os_memzero(&lldev->driver_stats, sizeof(xge_driver_stats_t));
1719 xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
1720 mtx_unlock(&lldev->mtx_drv);
1721}
1722
1723/**
1724 * xge_device_init
1725 * Initialize the interface (called by holding lock)
1726 *
1727 * @pdevin Per-adapter Data
1728 */
1729void
1730xge_device_init(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
1731{
1732 struct ifnet *ifnetp = lldev->ifnetp;
1733 xge_hal_device_t *hldev = lldev->devh;
1734 struct ifaddr *ifaddrp;
1735 unsigned char *macaddr;
1736 struct sockaddr_dl *sockaddrp;
1737 int status = XGE_HAL_OK;
1738
1739 mtx_assert((&lldev->mtx_drv), MA_OWNED);
1740
1741 /* If device is in running state, initializing is not required */
1742 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING)
1743 return;
1744
1745 /* Initializing timer */
1746 callout_init(&lldev->timer, CALLOUT_MPSAFE);
1747
1748 xge_trace(XGE_TRACE, "Set MTU size");
1749 status = xge_hal_device_mtu_set(hldev, ifnetp->if_mtu);
1750 if(status != XGE_HAL_OK) {
1751 xge_trace(XGE_ERR, "Setting MTU in HAL device failed");
1752 goto _exit;
1753 }
1754
1755 /* Enable HAL device */
1756 xge_hal_device_enable(hldev);
1757
1758 /* Get MAC address and update in HAL */
1759 ifaddrp = ifnetp->if_addr;
1760 sockaddrp = (struct sockaddr_dl *)ifaddrp->ifa_addr;
1761 sockaddrp->sdl_type = IFT_ETHER;
1762 sockaddrp->sdl_alen = ifnetp->if_addrlen;
1763 macaddr = LLADDR(sockaddrp);
1764 xge_trace(XGE_TRACE,
1765 "Setting MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n",
1766 *macaddr, *(macaddr + 1), *(macaddr + 2), *(macaddr + 3),
1767 *(macaddr + 4), *(macaddr + 5));
1768 status = xge_hal_device_macaddr_set(hldev, 0, macaddr);
1769 if(status != XGE_HAL_OK)
1770 xge_trace(XGE_ERR, "Setting MAC address failed (%d)", status);
1771
1772 /* Opening channels */
1773 mtx_unlock(&lldev->mtx_drv);
1774 status = xge_channel_open(lldev, option);
1775 mtx_lock(&lldev->mtx_drv);
1776 if(status != XGE_HAL_OK)
1777 goto _exit;
1778
1779 /* Set appropriate flags */
1780 ifnetp->if_drv_flags |= IFF_DRV_RUNNING;
1781 ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1782
1783 /* Checksum capability */
1784 ifnetp->if_hwassist = (ifnetp->if_capenable & IFCAP_TXCSUM) ?
1785 (CSUM_TCP | CSUM_UDP) : 0;
1786
1787 if((lldev->enabled_tso) && (ifnetp->if_capenable & IFCAP_TSO4))
1788 ifnetp->if_hwassist |= CSUM_TSO;
1789
1790 /* Enable interrupts */
1791 xge_hal_device_intr_enable(hldev);
1792
1793 callout_reset(&lldev->timer, 10*hz, xge_timer, lldev);
1794
1795 /* Disable promiscuous mode */
1796 xge_trace(XGE_TRACE, "If opted, enable promiscuous mode");
1797 xge_enable_promisc(lldev);
1798
1799 /* Device is initialized */
1800 lldev->initialized = 1;
1801 xge_os_mdelay(1000);
1802
1803_exit:
1804 return;
1805}
1806
1807/**
1808 * xge_timer
1809 * Timer timeout function to handle link status
1810 *
1811 * @devp Per-adapter Data
1812 */
1813void
1814xge_timer(void *devp)
1815{
1816 xge_lldev_t *lldev = (xge_lldev_t *)devp;
1817 xge_hal_device_t *hldev = lldev->devh;
1818
1819 /* Poll for changes */
1820 xge_hal_device_poll(hldev);
1821
1822 /* Reset timer */
1823 callout_reset(&lldev->timer, hz, xge_timer, lldev);
1824
1825 return;
1826}
1827
1828/**
1829 * xge_stop
1830 * De-activate the interface
1831 *
1832 * @lldev Per-adater Data
1833 */
1834void
1835xge_stop(xge_lldev_t *lldev)
1836{
1837 mtx_lock(&lldev->mtx_drv);
1838 xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
1839 mtx_unlock(&lldev->mtx_drv);
1840}
1841
1842/**
1843 * xge_isr_filter
1844 * ISR filter function - to filter interrupts from other devices (shared)
1845 *
1846 * @handle Per-adapter Data
1847 *
1848 * Returns
1849 * FILTER_STRAY if interrupt is from other device
1850 * FILTER_SCHEDULE_THREAD if interrupt is from Xframe device
1851 */
1852int
1853xge_isr_filter(void *handle)
1854{
1855 xge_lldev_t *lldev = (xge_lldev_t *)handle;
1856 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)((lldev->devh)->bar0);
1857 u16 retValue = FILTER_STRAY;
1858 u64 val64 = 0;
1859
1860 XGE_DRV_STATS(isr_filter);
1861
1862 val64 = xge_os_pio_mem_read64(lldev->pdev, (lldev->devh)->regh0,
1863 &bar0->general_int_status);
1864 retValue = (!val64) ? FILTER_STRAY : FILTER_SCHEDULE_THREAD;
1865
1866 return retValue;
1867}
1868
1869/**
1870 * xge_isr_line
1871 * Interrupt service routine for Line interrupts
1872 *
1873 * @plldev Per-adapter Data
1874 */
1875void
1876xge_isr_line(void *plldev)
1877{
1878 xge_hal_status_e status;
1879 xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1880 xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
1881 struct ifnet *ifnetp = lldev->ifnetp;
1882
1883 XGE_DRV_STATS(isr_line);
1884
1885 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1886 status = xge_hal_device_handle_irq(hldev);
1887 if(!(IFQ_DRV_IS_EMPTY(&ifnetp->if_snd)))
1888 xge_send(ifnetp);
1889 }
1890}
1891
1892/*
1893 * xge_isr_msi
1894 * ISR for Message signaled interrupts
1895 */
1896void
1897xge_isr_msi(void *plldev)
1898{
1899 xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1900 XGE_DRV_STATS(isr_msi);
1901 xge_hal_device_continue_irq(lldev->devh);
1902}
1903
1904/**
1905 * xge_rx_open
1906 * Initiate and open all Rx channels
1907 *
1908 * @qid Ring Index
1909 * @lldev Per-adapter Data
1910 * @rflag Channel open/close/reopen flag
1911 *
1912 * Returns 0 or Error Number
1913 */
1914int
1915xge_rx_open(int qid, xge_lldev_t *lldev, xge_hal_channel_reopen_e rflag)
1916{
1917 u64 adapter_status = 0x0;
1918 xge_hal_status_e status = XGE_HAL_FAIL;
1919
1920 xge_hal_channel_attr_t attr = {
1921 .post_qid = qid,
1922 .compl_qid = 0,
1923 .callback = xge_rx_compl,
1924 .per_dtr_space = sizeof(xge_rx_priv_t),
1925 .flags = 0,
1926 .type = XGE_HAL_CHANNEL_TYPE_RING,
1927 .userdata = lldev,
1928 .dtr_init = xge_rx_initial_replenish,
1929 .dtr_term = xge_rx_term
1930 };
1931
1932 /* If device is not ready, return */
1933 status = xge_hal_device_status(lldev->devh, &adapter_status);
1934 if(status != XGE_HAL_OK) {
1935 xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status);
1936 XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL);
1937 }
1938 else {
1939 status = xge_hal_channel_open(lldev->devh, &attr,
1940 &lldev->ring_channel[qid], rflag);
1941 }
1942
1943_exit:
1944 return status;
1945}
1946
1947/**
1948 * xge_tx_open
1949 * Initialize and open all Tx channels
1950 *
1951 * @lldev Per-adapter Data
1952 * @tflag Channel open/close/reopen flag
1953 *
1954 * Returns 0 or Error Number
1955 */
1956int
1957xge_tx_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e tflag)
1958{
1959 xge_hal_status_e status = XGE_HAL_FAIL;
1960 u64 adapter_status = 0x0;
1961 int qindex, index;
1962
1963 xge_hal_channel_attr_t attr = {
1964 .compl_qid = 0,
1965 .callback = xge_tx_compl,
1966 .per_dtr_space = sizeof(xge_tx_priv_t),
1967 .flags = 0,
1968 .type = XGE_HAL_CHANNEL_TYPE_FIFO,
1969 .userdata = lldev,
1970 .dtr_init = xge_tx_initial_replenish,
1971 .dtr_term = xge_tx_term
1972 };
1973
1974 /* If device is not ready, return */
1975 status = xge_hal_device_status(lldev->devh, &adapter_status);
1976 if(status != XGE_HAL_OK) {
1977 xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status);
1978 XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL);
1979 }
1980
1981 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
1982 attr.post_qid = qindex,
1983 status = xge_hal_channel_open(lldev->devh, &attr,
1984 &lldev->fifo_channel[qindex], tflag);
1985 if(status != XGE_HAL_OK) {
1986 for(index = 0; index < qindex; index++)
1987 xge_hal_channel_close(lldev->fifo_channel[index], tflag);
1988 }
1989 }
1990
1991_exit:
1992 return status;
1993}
1994
1995/**
1996 * xge_enable_msi
1997 * Enables MSI
1998 *
1999 * @lldev Per-adapter Data
2000 */
2001void
2002xge_enable_msi(xge_lldev_t *lldev)
2003{
2004 xge_list_t *item = NULL;
2005 xge_hal_device_t *hldev = lldev->devh;
2006 xge_hal_channel_t *channel = NULL;
2007 u16 offset = 0, val16 = 0;
2008
2009 xge_os_pci_read16(lldev->pdev, NULL,
2010 xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16);
2011
2012 /* Update msi_data */
2013 offset = (val16 & 0x80) ? 0x4c : 0x48;
2014 xge_os_pci_read16(lldev->pdev, NULL, offset, &val16);
2015 if(val16 & 0x1)
2016 val16 &= 0xfffe;
2017 else
2018 val16 |= 0x1;
2019 xge_os_pci_write16(lldev->pdev, NULL, offset, val16);
2020
2021 /* Update msi_control */
2022 xge_os_pci_read16(lldev->pdev, NULL,
2023 xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16);
2024 val16 |= 0x10;
2025 xge_os_pci_write16(lldev->pdev, NULL,
2026 xge_offsetof(xge_hal_pci_config_le_t, msi_control), val16);
2027
2028 /* Set TxMAT and RxMAT registers with MSI */
2029 xge_list_for_each(item, &hldev->free_channels) {
2030 channel = xge_container_of(item, xge_hal_channel_t, item);
2031 xge_hal_channel_msi_set(channel, 1, (u32)val16);
2032 }
2033}
2034
2035/**
2036 * xge_channel_open
2037 * Open both Tx and Rx channels
2038 *
2039 * @lldev Per-adapter Data
2040 * @option Channel reopen option
2041 */
2042int
2043xge_channel_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2044{
2045 xge_lro_entry_t *lro_session = NULL;
2046 xge_hal_status_e status = XGE_HAL_OK;
2047 int index = 0, index2 = 0;
2048
2049 if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
2050 xge_msi_info_restore(lldev);
2051 xge_enable_msi(lldev);
2052 }
2053
2054_exit2:
2055 status = xge_create_dma_tags(lldev->device);
2056 if(status != XGE_HAL_OK)
2057 XGE_EXIT_ON_ERR("DMA tag creation failed", _exit, status);
2058
2059 /* Open ring (Rx) channel */
2060 for(index = 0; index < XGE_RING_COUNT; index++) {
2061 status = xge_rx_open(index, lldev, option);
2062 if(status != XGE_HAL_OK) {
2063 /*
2064 * DMA mapping fails in the unpatched Kernel which can't
2065 * allocate contiguous memory for Jumbo frames.
2066 * Try using 5 buffer mode.
2067 */
2068 if((lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) &&
2069 (((lldev->ifnetp)->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE) >
2070 MJUMPAGESIZE)) {
2071 /* Close so far opened channels */
2072 for(index2 = 0; index2 < index; index2++) {
2073 xge_hal_channel_close(lldev->ring_channel[index2],
2074 option);
2075 }
2076
2077 /* Destroy DMA tags intended to use for 1 buffer mode */
2078 if(bus_dmamap_destroy(lldev->dma_tag_rx,
2079 lldev->extra_dma_map)) {
2080 xge_trace(XGE_ERR, "Rx extra DMA map destroy failed");
2081 }
2082 if(bus_dma_tag_destroy(lldev->dma_tag_rx))
2083 xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
2084 if(bus_dma_tag_destroy(lldev->dma_tag_tx))
2085 xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
2086
2087 /* Switch to 5 buffer mode */
2088 lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_5;
2089 xge_buffer_mode_init(lldev, (lldev->ifnetp)->if_mtu);
2090
2091 /* Restart init */
2092 goto _exit2;
2093 }
2094 else {
2095 XGE_EXIT_ON_ERR("Opening Rx channel failed", _exit1,
2096 status);
2097 }
2098 }
2099 }
2100
2101 if(lldev->enabled_lro) {
2102 SLIST_INIT(&lldev->lro_free);
2103 SLIST_INIT(&lldev->lro_active);
2104 lldev->lro_num = XGE_LRO_DEFAULT_ENTRIES;
2105
2106 for(index = 0; index < lldev->lro_num; index++) {
2107 lro_session = (xge_lro_entry_t *)
2108 xge_os_malloc(NULL, sizeof(xge_lro_entry_t));
2109 if(lro_session == NULL) {
2110 lldev->lro_num = index;
2111 break;
2112 }
2113 SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next);
2114 }
2115 }
2116
2117 /* Open FIFO (Tx) channel */
2118 status = xge_tx_open(lldev, option);
2119 if(status != XGE_HAL_OK)
2120 XGE_EXIT_ON_ERR("Opening Tx channel failed", _exit1, status);
2121
2122 goto _exit;
2123
2124_exit1:
2125 /*
2126 * Opening Rx channel(s) failed (index is <last ring index - 1>) or
2127 * Initialization of LRO failed (index is XGE_RING_COUNT)
2128 * Opening Tx channel failed (index is XGE_RING_COUNT)
2129 */
2130 for(index2 = 0; index2 < index; index2++)
2131 xge_hal_channel_close(lldev->ring_channel[index2], option);
2132
2133_exit:
2134 return status;
2135}
2136
2137/**
2138 * xge_channel_close
2139 * Close both Tx and Rx channels
2140 *
2141 * @lldev Per-adapter Data
2142 * @option Channel reopen option
2143 *
2144 */
2145void
2146xge_channel_close(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2147{
2148 int qindex = 0;
2149
2150 DELAY(1000 * 1000);
2151
2152 /* Close FIFO (Tx) channel */
2153 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++)
2154 xge_hal_channel_close(lldev->fifo_channel[qindex], option);
2155
2156 /* Close Ring (Rx) channels */
2157 for(qindex = 0; qindex < XGE_RING_COUNT; qindex++)
2158 xge_hal_channel_close(lldev->ring_channel[qindex], option);
2159
2160 if(bus_dmamap_destroy(lldev->dma_tag_rx, lldev->extra_dma_map))
2161 xge_trace(XGE_ERR, "Rx extra map destroy failed");
2162 if(bus_dma_tag_destroy(lldev->dma_tag_rx))
2163 xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
2164 if(bus_dma_tag_destroy(lldev->dma_tag_tx))
2165 xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
2166}
2167
2168/**
2169 * dmamap_cb
2170 * DMA map callback
2171 *
2172 * @arg Parameter passed from dmamap
2173 * @segs Segments
2174 * @nseg Number of segments
2175 * @error Error
2176 */
2177void
2178dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2179{
2180 if(!error) {
2181 *(bus_addr_t *) arg = segs->ds_addr;
2182 }
2183}
2184
2185/**
2186 * xge_reset
2187 * Device Reset
2188 *
2189 * @lldev Per-adapter Data
2190 */
2191void
2192xge_reset(xge_lldev_t *lldev)
2193{
2194 xge_trace(XGE_TRACE, "Reseting the chip");
2195
2196 /* If the device is not initialized, return */
2197 if(lldev->initialized) {
2198 mtx_lock(&lldev->mtx_drv);
2199 xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
2200 xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
2201 mtx_unlock(&lldev->mtx_drv);
2202 }
2203
2204 return;
2205}
2206
2207/**
2208 * xge_setmulti
2209 * Set an address as a multicast address
2210 *
2211 * @lldev Per-adapter Data
2212 */
2213void
2214xge_setmulti(xge_lldev_t *lldev)
2215{
2216 struct ifmultiaddr *ifma;
2217 u8 *lladdr;
2218 xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
2219 struct ifnet *ifnetp = lldev->ifnetp;
2220 int index = 0;
2221 int offset = 1;
2222 int table_size = 47;
2223 xge_hal_status_e status = XGE_HAL_OK;
2224 u8 initial_addr[]= {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
2225
2226 if((ifnetp->if_flags & IFF_MULTICAST) && (!lldev->all_multicast)) {
2227 status = xge_hal_device_mcast_enable(hldev);
2228 lldev->all_multicast = 1;
2229 }
2230 else if((ifnetp->if_flags & IFF_MULTICAST) && (lldev->all_multicast)) {
2231 status = xge_hal_device_mcast_disable(hldev);
2232 lldev->all_multicast = 0;
2233 }
2234
2235 if(status != XGE_HAL_OK) {
2236 xge_trace(XGE_ERR, "Enabling/disabling multicast failed");
2237 goto _exit;
2238 }
2239
2240 /* Updating address list */
2241 if_maddr_rlock(ifnetp);
2242 index = 0;
2243 TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
2244 if(ifma->ifma_addr->sa_family != AF_LINK) {
2245 continue;
2246 }
2247 lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2248 index += 1;
2249 }
2250 if_maddr_runlock(ifnetp);
2251
2252 if((!lldev->all_multicast) && (index)) {
2253 lldev->macaddr_count = (index + 1);
2254 if(lldev->macaddr_count > table_size) {
2255 goto _exit;
2256 }
2257
2258 /* Clear old addresses */
2259 for(index = 0; index < 48; index++) {
2260 xge_hal_device_macaddr_set(hldev, (offset + index),
2261 initial_addr);
2262 }
2263 }
2264
2265 /* Add new addresses */
2266 if_maddr_rlock(ifnetp);
2267 index = 0;
2268 TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
2269 if(ifma->ifma_addr->sa_family != AF_LINK) {
2270 continue;
2271 }
2272 lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2273 xge_hal_device_macaddr_set(hldev, (offset + index), lladdr);
2274 index += 1;
2275 }
2276 if_maddr_runlock(ifnetp);
2277
2278_exit:
2279 return;
2280}
2281
2282/**
2283 * xge_enable_promisc
2284 * Enable Promiscuous Mode
2285 *
2286 * @lldev Per-adapter Data
2287 */
2288void
2289xge_enable_promisc(xge_lldev_t *lldev)
2290{
2291 struct ifnet *ifnetp = lldev->ifnetp;
2292 xge_hal_device_t *hldev = lldev->devh;
2293 xge_hal_pci_bar0_t *bar0 = NULL;
2294 u64 val64 = 0;
2295
2296 bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
2297
2298 if(ifnetp->if_flags & IFF_PROMISC) {
2299 xge_hal_device_promisc_enable(lldev->devh);
2300
2301 /*
2302 * When operating in promiscuous mode, don't strip the VLAN tag
2303 */
2304 val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
2305 &bar0->rx_pa_cfg);
2306 val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2307 val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(0);
2308 xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
2309 &bar0->rx_pa_cfg);
2310
2311 xge_trace(XGE_TRACE, "Promiscuous mode ON");
2312 }
2313}
2314
2315/**
2316 * xge_disable_promisc
2317 * Disable Promiscuous Mode
2318 *
2319 * @lldev Per-adapter Data
2320 */
2321void
2322xge_disable_promisc(xge_lldev_t *lldev)
2323{
2324 xge_hal_device_t *hldev = lldev->devh;
2325 xge_hal_pci_bar0_t *bar0 = NULL;
2326 u64 val64 = 0;
2327
2328 bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
2329
2330 xge_hal_device_promisc_disable(lldev->devh);
2331
2332 /*
2333 * Strip VLAN tag when operating in non-promiscuous mode
2334 */
2335 val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
2336 &bar0->rx_pa_cfg);
2337 val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2338 val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2339 xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
2340 &bar0->rx_pa_cfg);
2341
2342 xge_trace(XGE_TRACE, "Promiscuous mode OFF");
2343}
2344
2345/**
2346 * xge_change_mtu
2347 * Change interface MTU to a requested valid size
2348 *
2349 * @lldev Per-adapter Data
2350 * @NewMtu Requested MTU
2351 *
2352 * Returns 0 or Error Number
2353 */
2354int
2355xge_change_mtu(xge_lldev_t *lldev, int new_mtu)
2356{
2357 int status = XGE_HAL_OK;
2358
2359 /* Check requested MTU size for boundary */
2360 if(xge_hal_device_mtu_check(lldev->devh, new_mtu) != XGE_HAL_OK) {
2361 XGE_EXIT_ON_ERR("Invalid MTU", _exit, EINVAL);
2362 }
2363
2364 lldev->mtu = new_mtu;
2365 xge_confirm_changes(lldev, XGE_SET_MTU);
2366
2367_exit:
2368 return status;
2369}
2370
2371/**
2372 * xge_device_stop
2373 *
2374 * Common code for both stop and part of reset. Disables device, interrupts and
2375 * closes channels
2376 *
2377 * @dev Device Handle
2378 * @option Channel normal/reset option
2379 */
2380void
2381xge_device_stop(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2382{
2383 xge_hal_device_t *hldev = lldev->devh;
2384 struct ifnet *ifnetp = lldev->ifnetp;
2385 u64 val64 = 0;
2386
2387 mtx_assert((&lldev->mtx_drv), MA_OWNED);
2388
2389 /* If device is not in "Running" state, return */
2390 if (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING))
2391 goto _exit;
2392
2393 /* Set appropriate flags */
2394 ifnetp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2395
2396 /* Stop timer */
2397 callout_stop(&lldev->timer);
2398
2399 /* Disable interrupts */
2400 xge_hal_device_intr_disable(hldev);
2401
2402 mtx_unlock(&lldev->mtx_drv);
2403 xge_queue_flush(xge_hal_device_queue(lldev->devh));
2404 mtx_lock(&lldev->mtx_drv);
2405
2406 /* Disable HAL device */
2407 if(xge_hal_device_disable(hldev) != XGE_HAL_OK) {
2408 xge_trace(XGE_ERR, "Disabling HAL device failed");
2409 xge_hal_device_status(hldev, &val64);
2410 xge_trace(XGE_ERR, "Adapter Status: 0x%llx", (long long)val64);
2411 }
2412
2413 /* Close Tx and Rx channels */
2414 xge_channel_close(lldev, option);
2415
2416 /* Reset HAL device */
2417 xge_hal_device_reset(hldev);
2418
2419 xge_os_mdelay(1000);
2420 lldev->initialized = 0;
2421
2422 if_link_state_change(ifnetp, LINK_STATE_DOWN);
2423
2424_exit:
2425 return;
2426}
2427
2428/**
2429 * xge_set_mbuf_cflags
2430 * set checksum flag for the mbuf
2431 *
2432 * @pkt Packet
2433 */
2434void
2435xge_set_mbuf_cflags(mbuf_t pkt)
2436{
2437 pkt->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2438 pkt->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2439 pkt->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2440 pkt->m_pkthdr.csum_data = htons(0xffff);
2441}
2442
2443/**
2444 * xge_lro_flush_sessions
2445 * Flush LRO session and send accumulated LRO packet to upper layer
2446 *
2447 * @lldev Per-adapter Data
2448 */
2449void
2450xge_lro_flush_sessions(xge_lldev_t *lldev)
2451{
2452 xge_lro_entry_t *lro_session = NULL;
2453
2454 while(!SLIST_EMPTY(&lldev->lro_active)) {
2455 lro_session = SLIST_FIRST(&lldev->lro_active);
2456 SLIST_REMOVE_HEAD(&lldev->lro_active, next);
2457 xge_lro_flush(lldev, lro_session);
2458 }
2459}
2460
2461/**
2462 * xge_lro_flush
2463 * Flush LRO session. Send accumulated LRO packet to upper layer
2464 *
2465 * @lldev Per-adapter Data
2466 * @lro LRO session to be flushed
2467 */
2468static void
2469xge_lro_flush(xge_lldev_t *lldev, xge_lro_entry_t *lro_session)
2470{
2471 struct ip *header_ip;
2472 struct tcphdr *header_tcp;
2473 u32 *ptr;
2474
2475 if(lro_session->append_cnt) {
2476 header_ip = lro_session->lro_header_ip;
2477 header_ip->ip_len = htons(lro_session->len - ETHER_HDR_LEN);
2478 lro_session->m_head->m_pkthdr.len = lro_session->len;
2479 header_tcp = (struct tcphdr *)(header_ip + 1);
2480 header_tcp->th_ack = lro_session->ack_seq;
2481 header_tcp->th_win = lro_session->window;
2482 if(lro_session->timestamp) {
2483 ptr = (u32 *)(header_tcp + 1);
2484 ptr[1] = htonl(lro_session->tsval);
2485 ptr[2] = lro_session->tsecr;
2486 }
2487 }
2488
2489 (*lldev->ifnetp->if_input)(lldev->ifnetp, lro_session->m_head);
2490 lro_session->m_head = NULL;
2491 lro_session->timestamp = 0;
2492 lro_session->append_cnt = 0;
2493 SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next);
2494}
2495
2496/**
2497 * xge_lro_accumulate
2498 * Accumulate packets to form a large LRO packet based on various conditions
2499 *
2500 * @lldev Per-adapter Data
2501 * @m_head Current Packet
2502 *
2503 * Returns XGE_HAL_OK or XGE_HAL_FAIL (failure)
2504 */
2505static int
2506xge_lro_accumulate(xge_lldev_t *lldev, struct mbuf *m_head)
2507{
2508 struct ether_header *header_ethernet;
2509 struct ip *header_ip;
2510 struct tcphdr *header_tcp;
2511 u32 seq, *ptr;
2512 struct mbuf *buffer_next, *buffer_tail;
2513 xge_lro_entry_t *lro_session;
2514 xge_hal_status_e status = XGE_HAL_FAIL;
2515 int hlen, ip_len, tcp_hdr_len, tcp_data_len, tot_len, tcp_options;
2516 int trim;
2517
2518 /* Get Ethernet header */
2519 header_ethernet = mtod(m_head, struct ether_header *);
2520
2521 /* Return if it is not IP packet */
2522 if(header_ethernet->ether_type != htons(ETHERTYPE_IP))
2523 goto _exit;
2524
2525 /* Get IP header */
2526 header_ip = lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1 ?
2527 (struct ip *)(header_ethernet + 1) :
2528 mtod(m_head->m_next, struct ip *);
2529
2530 /* Return if it is not TCP packet */
2531 if(header_ip->ip_p != IPPROTO_TCP)
2532 goto _exit;
2533
2534 /* Return if packet has options */
2535 if((header_ip->ip_hl << 2) != sizeof(*header_ip))
2536 goto _exit;
2537
2538 /* Return if packet is fragmented */
2539 if(header_ip->ip_off & htons(IP_MF | IP_OFFMASK))
2540 goto _exit;
2541
2542 /* Get TCP header */
2543 header_tcp = (struct tcphdr *)(header_ip + 1);
2544
2545 /* Return if not ACK or PUSH */
2546 if((header_tcp->th_flags & ~(TH_ACK | TH_PUSH)) != 0)
2547 goto _exit;
2548
2549 /* Only timestamp option is handled */
2550 tcp_options = (header_tcp->th_off << 2) - sizeof(*header_tcp);
2551 tcp_hdr_len = sizeof(*header_tcp) + tcp_options;
2552 ptr = (u32 *)(header_tcp + 1);
2553 if(tcp_options != 0) {
2554 if(__predict_false(tcp_options != TCPOLEN_TSTAMP_APPA) ||
2555 (*ptr != ntohl(TCPOPT_NOP << 24 | TCPOPT_NOP << 16 |
2556 TCPOPT_TIMESTAMP << 8 | TCPOLEN_TIMESTAMP))) {
2557 goto _exit;
2558 }
2559 }
2560
2561 /* Total length of packet (IP) */
2562 ip_len = ntohs(header_ip->ip_len);
2563
2564 /* TCP data size */
2565 tcp_data_len = ip_len - (header_tcp->th_off << 2) - sizeof(*header_ip);
2566
2567 /* If the frame is padded, trim it */
2568 tot_len = m_head->m_pkthdr.len;
2569 trim = tot_len - (ip_len + ETHER_HDR_LEN);
2570 if(trim != 0) {
2571 if(trim < 0)
2572 goto _exit;
2573 m_adj(m_head, -trim);
2574 tot_len = m_head->m_pkthdr.len;
2575 }
2576
2577 buffer_next = m_head;
2578 buffer_tail = NULL;
2579 while(buffer_next != NULL) {
2580 buffer_tail = buffer_next;
2581 buffer_next = buffer_tail->m_next;
2582 }
2583
2584 /* Total size of only headers */
2585 hlen = ip_len + ETHER_HDR_LEN - tcp_data_len;
2586
2587 /* Get sequence number */
2588 seq = ntohl(header_tcp->th_seq);
2589
2590 SLIST_FOREACH(lro_session, &lldev->lro_active, next) {
2591 if(lro_session->source_port == header_tcp->th_sport &&
2592 lro_session->dest_port == header_tcp->th_dport &&
2593 lro_session->source_ip == header_ip->ip_src.s_addr &&
2594 lro_session->dest_ip == header_ip->ip_dst.s_addr) {
2595
2596 /* Unmatched sequence number, flush LRO session */
2597 if(__predict_false(seq != lro_session->next_seq)) {
2598 SLIST_REMOVE(&lldev->lro_active, lro_session,
2599 xge_lro_entry_t, next);
2600 xge_lro_flush(lldev, lro_session);
2601 goto _exit;
2602 }
2603
2604 /* Handle timestamp option */
2605 if(tcp_options) {
2606 u32 tsval = ntohl(*(ptr + 1));
2607 if(__predict_false(lro_session->tsval > tsval ||
2608 *(ptr + 2) == 0)) {
2609 goto _exit;
2610 }
2611 lro_session->tsval = tsval;
2612 lro_session->tsecr = *(ptr + 2);
2613 }
2614
2615 lro_session->next_seq += tcp_data_len;
2616 lro_session->ack_seq = header_tcp->th_ack;
2617 lro_session->window = header_tcp->th_win;
2618
2619 /* If TCP data/payload is of 0 size, free mbuf */
2620 if(tcp_data_len == 0) {
2621 m_freem(m_head);
2622 status = XGE_HAL_OK;
2623 goto _exit;
2624 }
2625
2626 lro_session->append_cnt++;
2627 lro_session->len += tcp_data_len;
2628
2629 /* Adjust mbuf so that m_data points to payload than headers */
2630 m_adj(m_head, hlen);
2631
2632 /* Append this packet to LRO accumulated packet */
2633 lro_session->m_tail->m_next = m_head;
2634 lro_session->m_tail = buffer_tail;
2635
2636 /* Flush if LRO packet is exceeding maximum size */
2637 if(lro_session->len >
2638 (XGE_HAL_LRO_DEFAULT_FRM_LEN - lldev->ifnetp->if_mtu)) {
2639 SLIST_REMOVE(&lldev->lro_active, lro_session,
2640 xge_lro_entry_t, next);
2641 xge_lro_flush(lldev, lro_session);
2642 }
2643 status = XGE_HAL_OK;
2644 goto _exit;
2645 }
2646 }
2647
2648 if(SLIST_EMPTY(&lldev->lro_free))
2649 goto _exit;
2650
2651 /* Start a new LRO session */
2652 lro_session = SLIST_FIRST(&lldev->lro_free);
2653 SLIST_REMOVE_HEAD(&lldev->lro_free, next);
2654 SLIST_INSERT_HEAD(&lldev->lro_active, lro_session, next);
2655 lro_session->source_port = header_tcp->th_sport;
2656 lro_session->dest_port = header_tcp->th_dport;
2657 lro_session->source_ip = header_ip->ip_src.s_addr;
2658 lro_session->dest_ip = header_ip->ip_dst.s_addr;
2659 lro_session->next_seq = seq + tcp_data_len;
2660 lro_session->mss = tcp_data_len;
2661 lro_session->ack_seq = header_tcp->th_ack;
2662 lro_session->window = header_tcp->th_win;
2663
2664 lro_session->lro_header_ip = header_ip;
2665
2666 /* Handle timestamp option */
2667 if(tcp_options) {
2668 lro_session->timestamp = 1;
2669 lro_session->tsval = ntohl(*(ptr + 1));
2670 lro_session->tsecr = *(ptr + 2);
2671 }
2672
2673 lro_session->len = tot_len;
2674 lro_session->m_head = m_head;
2675 lro_session->m_tail = buffer_tail;
2676 status = XGE_HAL_OK;
2677
2678_exit:
2679 return status;
2680}
2681
2682/**
2683 * xge_accumulate_large_rx
2684 * Accumulate packets to form a large LRO packet based on various conditions
2685 *
2686 * @lldev Per-adapter Data
2687 * @pkt Current packet
2688 * @pkt_length Packet Length
2689 * @rxd_priv Rx Descriptor Private Data
2690 */
2691void
2692xge_accumulate_large_rx(xge_lldev_t *lldev, struct mbuf *pkt, int pkt_length,
2693 xge_rx_priv_t *rxd_priv)
2694{
2695 if(xge_lro_accumulate(lldev, pkt) != XGE_HAL_OK) {
2696 bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map,
2697 BUS_DMASYNC_POSTREAD);
2698 (*lldev->ifnetp->if_input)(lldev->ifnetp, pkt);
2699 }
2700}
2701
2702/**
2703 * xge_rx_compl
2704 * If the interrupt is due to received frame (Rx completion), send it up
2705 *
2706 * @channelh Ring Channel Handle
2707 * @dtr Current Descriptor
2708 * @t_code Transfer Code indicating success or error
2709 * @userdata Per-adapter Data
2710 *
2711 * Returns XGE_HAL_OK or HAL error enums
2712 */
2713xge_hal_status_e
2714xge_rx_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
2715 void *userdata)
2716{
2717 struct ifnet *ifnetp;
2718 xge_rx_priv_t *rxd_priv = NULL;
2719 mbuf_t mbuf_up = NULL;
2720 xge_hal_status_e status = XGE_HAL_OK;
2721 xge_hal_dtr_info_t ext_info;
2722 int index;
2723 u16 vlan_tag;
2724
2725 /*get the user data portion*/
2726 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
2727 if(!lldev) {
2728 XGE_EXIT_ON_ERR("Failed to get user data", _exit, XGE_HAL_FAIL);
2729 }
2730
2731 XGE_DRV_STATS(rx_completions);
2732
2733 /* get the interface pointer */
2734 ifnetp = lldev->ifnetp;
2735
2736 do {
2737 XGE_DRV_STATS(rx_desc_compl);
2738
2739 if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
2740 status = XGE_HAL_FAIL;
2741 goto _exit;
2742 }
2743
2744 if(t_code) {
2745 xge_trace(XGE_TRACE, "Packet dropped because of %d", t_code);
2746 XGE_DRV_STATS(rx_tcode);
2747 xge_hal_device_handle_tcode(channelh, dtr, t_code);
2748 xge_hal_ring_dtr_post(channelh,dtr);
2749 continue;
2750 }
2751
2752 /* Get the private data for this descriptor*/
2753 rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh,
2754 dtr);
2755 if(!rxd_priv) {
2756 XGE_EXIT_ON_ERR("Failed to get descriptor private data", _exit,
2757 XGE_HAL_FAIL);
2758 }
2759
2760 /*
2761 * Prepare one buffer to send it to upper layer -- since the upper
2762 * layer frees the buffer do not use rxd_priv->buffer. Meanwhile
2763 * prepare a new buffer, do mapping, use it in the current
2764 * descriptor and post descriptor back to ring channel
2765 */
2766 mbuf_up = rxd_priv->bufferArray[0];
2767
2768 /* Gets details of mbuf i.e., packet length */
2769 xge_ring_dtr_get(mbuf_up, channelh, dtr, lldev, rxd_priv);
2770
2771 status =
2772 (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ?
2773 xge_get_buf(dtr, rxd_priv, lldev, 0) :
2774 xge_get_buf_3b_5b(dtr, rxd_priv, lldev);
2775
2776 if(status != XGE_HAL_OK) {
2777 xge_trace(XGE_ERR, "No memory");
2778 XGE_DRV_STATS(rx_no_buf);
2779
2780 /*
2781 * Unable to allocate buffer. Instead of discarding, post
2782 * descriptor back to channel for future processing of same
2783 * packet.
2784 */
2785 xge_hal_ring_dtr_post(channelh, dtr);
2786 continue;
2787 }
2788
2789 /* Get the extended information */
2790 xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
2791
2792 /*
2793 * As we have allocated a new mbuf for this descriptor, post this
2794 * descriptor with new mbuf back to ring channel
2795 */
2796 vlan_tag = ext_info.vlan;
2797 xge_hal_ring_dtr_post(channelh, dtr);
2798 if ((!(ext_info.proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) &&
2799 (ext_info.proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) &&
2800 (ext_info.l3_cksum == XGE_HAL_L3_CKSUM_OK) &&
2801 (ext_info.l4_cksum == XGE_HAL_L4_CKSUM_OK))) {
2802
2803 /* set Checksum Flag */
2804 xge_set_mbuf_cflags(mbuf_up);
2805
2806 if(lldev->enabled_lro) {
2807 xge_accumulate_large_rx(lldev, mbuf_up, mbuf_up->m_len,
2808 rxd_priv);
2809 }
2810 else {
2811 /* Post-Read sync for buffers*/
2812 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2813 bus_dmamap_sync(lldev->dma_tag_rx,
2814 rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
2815 }
2816 (*ifnetp->if_input)(ifnetp, mbuf_up);
2817 }
2818 }
2819 else {
2820 /*
2821 * Packet with erroneous checksum , let the upper layer deal
2822 * with it
2823 */
2824
2825 /* Post-Read sync for buffers*/
2826 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2827 bus_dmamap_sync(lldev->dma_tag_rx,
2828 rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
2829 }
2830
2831 if(vlan_tag) {
2832 mbuf_up->m_pkthdr.ether_vtag = vlan_tag;
2833 mbuf_up->m_flags |= M_VLANTAG;
2834 }
2835
2836 if(lldev->enabled_lro)
2837 xge_lro_flush_sessions(lldev);
2838
2839 (*ifnetp->if_input)(ifnetp, mbuf_up);
2840 }
2841 } while(xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code)
2842 == XGE_HAL_OK);
2843
2844 if(lldev->enabled_lro)
2845 xge_lro_flush_sessions(lldev);
2846
2847_exit:
2848 return status;
2849}
2850
2851/**
2852 * xge_ring_dtr_get
2853 * Get descriptors
2854 *
2855 * @mbuf_up Packet to send up
2856 * @channelh Ring Channel Handle
2857 * @dtr Descriptor
2858 * @lldev Per-adapter Data
2859 * @rxd_priv Rx Descriptor Private Data
2860 *
2861 * Returns XGE_HAL_OK or HAL error enums
2862 */
2863int
2864xge_ring_dtr_get(mbuf_t mbuf_up, xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
2865 xge_lldev_t *lldev, xge_rx_priv_t *rxd_priv)
2866{
2867 mbuf_t m;
2868 int pkt_length[5]={0,0}, pkt_len=0;
2869 dma_addr_t dma_data[5];
2870 int index;
2871
2872 m = mbuf_up;
2873 pkt_len = 0;
2874
2875 if(lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
2876 xge_os_memzero(pkt_length, sizeof(pkt_length));
2877
2878 /*
2879 * Retrieve data of interest from the completed descriptor -- This
2880 * returns the packet length
2881 */
2882 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
2883 xge_hal_ring_dtr_5b_get(channelh, dtr, dma_data, pkt_length);
2884 }
2885 else {
2886 xge_hal_ring_dtr_3b_get(channelh, dtr, dma_data, pkt_length);
2887 }
2888
2889 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2890 m->m_len = pkt_length[index];
2891
2892 if(index < (lldev->rxd_mbuf_cnt-1)) {
2893 m->m_next = rxd_priv->bufferArray[index + 1];
2894 m = m->m_next;
2895 }
2896 else {
2897 m->m_next = NULL;
2898 }
2899 pkt_len+=pkt_length[index];
2900 }
2901
2902 /*
2903 * Since 2 buffer mode is an exceptional case where data is in 3rd
2904 * buffer but not in 2nd buffer
2905 */
2906 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
2907 m->m_len = pkt_length[2];
2908 pkt_len+=pkt_length[2];
2909 }
2910
2911 /*
2912 * Update length of newly created buffer to be sent up with packet
2913 * length
2914 */
2915 mbuf_up->m_pkthdr.len = pkt_len;
2916 }
2917 else {
2918 /*
2919 * Retrieve data of interest from the completed descriptor -- This
2920 * returns the packet length
2921 */
2922 xge_hal_ring_dtr_1b_get(channelh, dtr,&dma_data[0], &pkt_length[0]);
2923
2924 /*
2925 * Update length of newly created buffer to be sent up with packet
2926 * length
2927 */
2928 mbuf_up->m_len = mbuf_up->m_pkthdr.len = pkt_length[0];
2929 }
2930
2931 return XGE_HAL_OK;
2932}
2933
2934/**
2935 * xge_flush_txds
2936 * Flush Tx descriptors
2937 *
2938 * @channelh Channel handle
2939 */
2940static void inline
2941xge_flush_txds(xge_hal_channel_h channelh)
2942{
2943 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
2944 xge_hal_dtr_h tx_dtr;
2945 xge_tx_priv_t *tx_priv;
2946 u8 t_code;
2947
2948 while(xge_hal_fifo_dtr_next_completed(channelh, &tx_dtr, &t_code)
2949 == XGE_HAL_OK) {
2950 XGE_DRV_STATS(tx_desc_compl);
2951 if(t_code) {
2952 xge_trace(XGE_TRACE, "Tx descriptor with t_code %d", t_code);
2953 XGE_DRV_STATS(tx_tcode);
2954 xge_hal_device_handle_tcode(channelh, tx_dtr, t_code);
2955 }
2956
2957 tx_priv = xge_hal_fifo_dtr_private(tx_dtr);
2958 bus_dmamap_unload(lldev->dma_tag_tx, tx_priv->dma_map);
2959 m_freem(tx_priv->buffer);
2960 tx_priv->buffer = NULL;
2961 xge_hal_fifo_dtr_free(channelh, tx_dtr);
2962 }
2963}
2964
2965/**
2966 * xge_send
2967 * Transmit function
2968 *
2969 * @ifnetp Interface Handle
2970 */
2971void
2972xge_send(struct ifnet *ifnetp)
2973{
2974 int qindex = 0;
2975 xge_lldev_t *lldev = ifnetp->if_softc;
2976
2977 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
2978 if(mtx_trylock(&lldev->mtx_tx[qindex]) == 0) {
2979 XGE_DRV_STATS(tx_lock_fail);
2980 break;
2981 }
2982 xge_send_locked(ifnetp, qindex);
2983 mtx_unlock(&lldev->mtx_tx[qindex]);
2984 }
2985}
2986
2987static void inline
2988xge_send_locked(struct ifnet *ifnetp, int qindex)
2989{
2990 xge_hal_dtr_h dtr;
2991 static bus_dma_segment_t segs[XGE_MAX_SEGS];
2992 xge_hal_status_e status;
2993 unsigned int max_fragments;
2994 xge_lldev_t *lldev = ifnetp->if_softc;
2995 xge_hal_channel_h channelh = lldev->fifo_channel[qindex];
2996 mbuf_t m_head = NULL;
2997 mbuf_t m_buf = NULL;
2998 xge_tx_priv_t *ll_tx_priv = NULL;
2999 register unsigned int count = 0;
3000 unsigned int nsegs = 0;
3001 u16 vlan_tag;
3002
3003 max_fragments = ((xge_hal_fifo_t *)channelh)->config->max_frags;
3004
3005 /* If device is not initialized, return */
3006 if((!lldev->initialized) || (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)))
3007 return;
3008
3009 XGE_DRV_STATS(tx_calls);
3010
3011 /*
3012 * This loop will be executed for each packet in the kernel maintained
3013 * queue -- each packet can be with fragments as an mbuf chain
3014 */
3015 for(;;) {
3016 IF_DEQUEUE(&ifnetp->if_snd, m_head);
3017 if (m_head == NULL) {
3018 ifnetp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
3019 return;
3020 }
3021
3022 for(m_buf = m_head; m_buf != NULL; m_buf = m_buf->m_next) {
3023 if(m_buf->m_len) count += 1;
3024 }
3025
3026 if(count >= max_fragments) {
3027 m_buf = m_defrag(m_head, M_DONTWAIT);
3028 if(m_buf != NULL) m_head = m_buf;
3029 XGE_DRV_STATS(tx_defrag);
3030 }
3031
3032 /* Reserve descriptors */
3033 status = xge_hal_fifo_dtr_reserve(channelh, &dtr);
3034 if(status != XGE_HAL_OK) {
3035 XGE_DRV_STATS(tx_no_txd);
3036 xge_flush_txds(channelh);
3037 break;
3038 }
3039
3040 vlan_tag =
3041 (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0;
3042 xge_hal_fifo_dtr_vlan_set(dtr, vlan_tag);
3043
3044 /* Update Tx private structure for this descriptor */
3045 ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3046 ll_tx_priv->buffer = m_head;
3047
3048 /*
3049 * Do mapping -- Required DMA tag has been created in xge_init
3050 * function and DMA maps have already been created in the
3051 * xgell_tx_replenish function.
3052 * Returns number of segments through nsegs
3053 */
3054 if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_tx,
3055 ll_tx_priv->dma_map, m_head, segs, &nsegs, BUS_DMA_NOWAIT)) {
3056 xge_trace(XGE_TRACE, "DMA map load failed");
3057 XGE_DRV_STATS(tx_map_fail);
3058 break;
3059 }
3060
3061 if(lldev->driver_stats.tx_max_frags < nsegs)
3062 lldev->driver_stats.tx_max_frags = nsegs;
3063
3064 /* Set descriptor buffer for header and each fragment/segment */
3065 count = 0;
3066 do {
3067 xge_hal_fifo_dtr_buffer_set(channelh, dtr, count,
3068 (dma_addr_t)htole64(segs[count].ds_addr),
3069 segs[count].ds_len);
3070 count++;
3071 } while(count < nsegs);
3072
3073 /* Pre-write Sync of mapping */
3074 bus_dmamap_sync(lldev->dma_tag_tx, ll_tx_priv->dma_map,
3075 BUS_DMASYNC_PREWRITE);
3076
3077 if((lldev->enabled_tso) &&
3078 (m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3079 XGE_DRV_STATS(tx_tso);
3080 xge_hal_fifo_dtr_mss_set(dtr, m_head->m_pkthdr.tso_segsz);
3081 }
3082
3083 /* Checksum */
3084 if(ifnetp->if_hwassist > 0) {
3085 xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_IPV4_EN
3086 | XGE_HAL_TXD_TX_CKO_TCP_EN | XGE_HAL_TXD_TX_CKO_UDP_EN);
3087 }
3088
3089 /* Post descriptor to FIFO channel */
3090 xge_hal_fifo_dtr_post(channelh, dtr);
3091 XGE_DRV_STATS(tx_posted);
3092
3093 /* Send the same copy of mbuf packet to BPF (Berkely Packet Filter)
3094 * listener so that we can use tools like tcpdump */
3095 ETHER_BPF_MTAP(ifnetp, m_head);
3096 }
3097
3098 /* Prepend the packet back to queue */
3099 IF_PREPEND(&ifnetp->if_snd, m_head);
3100 ifnetp->if_drv_flags |= IFF_DRV_OACTIVE;
3101
3102 xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
3103 XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh);
3104 XGE_DRV_STATS(tx_again);
3105}
3106
3107/**
3108 * xge_get_buf
3109 * Allocates new mbufs to be placed into descriptors
3110 *
3111 * @dtrh Descriptor Handle
3112 * @rxd_priv Rx Descriptor Private Data
3113 * @lldev Per-adapter Data
3114 * @index Buffer Index (if multi-buffer mode)
3115 *
3116 * Returns XGE_HAL_OK or HAL error enums
3117 */
3118int
3119xge_get_buf(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv,
3120 xge_lldev_t *lldev, int index)
3121{
3122 register mbuf_t mp = NULL;
3123 struct ifnet *ifnetp = lldev->ifnetp;
3124 int status = XGE_HAL_OK;
3125 int buffer_size = 0, cluster_size = 0, count;
3126 bus_dmamap_t map = rxd_priv->dmainfo[index].dma_map;
3127 bus_dma_segment_t segs[3];
3128
3129 buffer_size = (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ?
3130 ifnetp->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE :
3131 lldev->rxd_mbuf_len[index];
3132
3133 if(buffer_size <= MCLBYTES) {
3134 cluster_size = MCLBYTES;
3135 mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3136 }
3137 else {
3138 cluster_size = MJUMPAGESIZE;
3139 if((lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5) &&
3140 (buffer_size > MJUMPAGESIZE)) {
3141 cluster_size = MJUM9BYTES;
3142 }
3143 mp = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, cluster_size);
3144 }
3145 if(!mp) {
3146 xge_trace(XGE_ERR, "Out of memory to allocate mbuf");
3147 status = XGE_HAL_FAIL;
3148 goto getbuf_out;
3149 }
3150
3151 /* Update mbuf's length, packet length and receive interface */
3152 mp->m_len = mp->m_pkthdr.len = buffer_size;
3153 mp->m_pkthdr.rcvif = ifnetp;
3154
3155 /* Load DMA map */
3156 if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_rx, lldev->extra_dma_map,
3157 mp, segs, &count, BUS_DMA_NOWAIT)) {
3158 XGE_DRV_STATS(rx_map_fail);
3159 m_freem(mp);
3160 XGE_EXIT_ON_ERR("DMA map load failed", getbuf_out, XGE_HAL_FAIL);
3161 }
3162
3163 /* Update descriptor private data */
3164 rxd_priv->bufferArray[index] = mp;
3165 rxd_priv->dmainfo[index].dma_phyaddr = htole64(segs->ds_addr);
3166 rxd_priv->dmainfo[index].dma_map = lldev->extra_dma_map;
3167 lldev->extra_dma_map = map;
3168
3169 /* Pre-Read/Write sync */
3170 bus_dmamap_sync(lldev->dma_tag_rx, map, BUS_DMASYNC_POSTREAD);
3171
3172 /* Unload DMA map of mbuf in current descriptor */
3173 bus_dmamap_unload(lldev->dma_tag_rx, map);
3174
3175 /* Set descriptor buffer */
3176 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3177 xge_hal_ring_dtr_1b_set(dtrh, rxd_priv->dmainfo[0].dma_phyaddr,
3178 cluster_size);
3179 }
3180
3181getbuf_out:
3182 return status;
3183}
3184
3185/**
3186 * xge_get_buf_3b_5b
3187 * Allocates new mbufs to be placed into descriptors (in multi-buffer modes)
3188 *
3189 * @dtrh Descriptor Handle
3190 * @rxd_priv Rx Descriptor Private Data
3191 * @lldev Per-adapter Data
3192 *
3193 * Returns XGE_HAL_OK or HAL error enums
3194 */
3195int
3196xge_get_buf_3b_5b(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv,
3197 xge_lldev_t *lldev)
3198{
3199 bus_addr_t dma_pointers[5];
3200 int dma_sizes[5];
3201 int status = XGE_HAL_OK, index;
3202 int newindex = 0;
3203
3204 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
3205 status = xge_get_buf(dtrh, rxd_priv, lldev, index);
3206 if(status != XGE_HAL_OK) {
3207 for(newindex = 0; newindex < index; newindex++) {
3208 m_freem(rxd_priv->bufferArray[newindex]);
3209 }
3210 XGE_EXIT_ON_ERR("mbuf allocation failed", _exit, status);
3211 }
3212 }
3213
3214 for(index = 0; index < lldev->buffer_mode; index++) {
3215 if(lldev->rxd_mbuf_len[index] != 0) {
3216 dma_pointers[index] = rxd_priv->dmainfo[index].dma_phyaddr;
3217 dma_sizes[index] = lldev->rxd_mbuf_len[index];
3218 }
3219 else {
3220 dma_pointers[index] = rxd_priv->dmainfo[index-1].dma_phyaddr;
3221 dma_sizes[index] = 1;
3222 }
3223 }
3224
3225 /* Assigning second buffer to third pointer in 2 buffer mode */
3226 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
3227 dma_pointers[2] = dma_pointers[1];
3228 dma_sizes[2] = dma_sizes[1];
3229 dma_sizes[1] = 1;
3230 }
3231
3232 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
3233 xge_hal_ring_dtr_5b_set(dtrh, dma_pointers, dma_sizes);
3234 }
3235 else {
3236 xge_hal_ring_dtr_3b_set(dtrh, dma_pointers, dma_sizes);
3237 }
3238
3239_exit:
3240 return status;
3241}
3242
3243/**
3244 * xge_tx_compl
3245 * If the interrupt is due to Tx completion, free the sent buffer
3246 *
3247 * @channelh Channel Handle
3248 * @dtr Descriptor
3249 * @t_code Transfer Code indicating success or error
3250 * @userdata Per-adapter Data
3251 *
3252 * Returns XGE_HAL_OK or HAL error enum
3253 */
3254xge_hal_status_e
3255xge_tx_compl(xge_hal_channel_h channelh,
3256 xge_hal_dtr_h dtr, u8 t_code, void *userdata)
3257{
3258 xge_tx_priv_t *ll_tx_priv = NULL;
3259 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
3260 struct ifnet *ifnetp = lldev->ifnetp;
3261 mbuf_t m_buffer = NULL;
3262 int qindex = xge_hal_channel_id(channelh);
3263
3264 mtx_lock(&lldev->mtx_tx[qindex]);
3265
3266 XGE_DRV_STATS(tx_completions);
3267
3268 /*
3269 * For each completed descriptor: Get private structure, free buffer,
3270 * do unmapping, and free descriptor
3271 */
3272 do {
3273 XGE_DRV_STATS(tx_desc_compl);
3274
3275 if(t_code) {
3276 XGE_DRV_STATS(tx_tcode);
3277 xge_trace(XGE_TRACE, "t_code %d", t_code);
3278 xge_hal_device_handle_tcode(channelh, dtr, t_code);
3279 }
3280
3281 ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3282 m_buffer = ll_tx_priv->buffer;
3283 bus_dmamap_unload(lldev->dma_tag_tx, ll_tx_priv->dma_map);
3284 m_freem(m_buffer);
3285 ll_tx_priv->buffer = NULL;
3286 xge_hal_fifo_dtr_free(channelh, dtr);
3287 } while(xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code)
3288 == XGE_HAL_OK);
3289 xge_send_locked(ifnetp, qindex);
3290 ifnetp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3291
3292 mtx_unlock(&lldev->mtx_tx[qindex]);
3293
3294 return XGE_HAL_OK;
3295}
3296
3297/**
3298 * xge_tx_initial_replenish
3299 * Initially allocate buffers and set them into descriptors for later use
3300 *
3301 * @channelh Tx Channel Handle
3302 * @dtrh Descriptor Handle
3303 * @index
3304 * @userdata Per-adapter Data
3305 * @reopen Channel open/reopen option
3306 *
3307 * Returns XGE_HAL_OK or HAL error enums
3308 */
3309xge_hal_status_e
3310xge_tx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3311 int index, void *userdata, xge_hal_channel_reopen_e reopen)
3312{
3313 xge_tx_priv_t *txd_priv = NULL;
3314 int status = XGE_HAL_OK;
3315
3316 /* Get the user data portion from channel handle */
3317 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
3318 if(lldev == NULL) {
3319 XGE_EXIT_ON_ERR("Failed to get user data from channel", txinit_out,
3320 XGE_HAL_FAIL);
3321 }
3322
3323 /* Get the private data */
3324 txd_priv = (xge_tx_priv_t *) xge_hal_fifo_dtr_private(dtrh);
3325 if(txd_priv == NULL) {
3326 XGE_EXIT_ON_ERR("Failed to get descriptor private data", txinit_out,
3327 XGE_HAL_FAIL);
3328 }
3329
3330 /* Create DMA map for this descriptor */
3331 if(bus_dmamap_create(lldev->dma_tag_tx, BUS_DMA_NOWAIT,
3332 &txd_priv->dma_map)) {
3333 XGE_EXIT_ON_ERR("DMA map creation for Tx descriptor failed",
3334 txinit_out, XGE_HAL_FAIL);
3335 }
3336
3337txinit_out:
3338 return status;
3339}
3340
3341/**
3342 * xge_rx_initial_replenish
3343 * Initially allocate buffers and set them into descriptors for later use
3344 *
3345 * @channelh Tx Channel Handle
3346 * @dtrh Descriptor Handle
3347 * @index Ring Index
3348 * @userdata Per-adapter Data
3349 * @reopen Channel open/reopen option
3350 *
3351 * Returns XGE_HAL_OK or HAL error enums
3352 */
3353xge_hal_status_e
3354xge_rx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3355 int index, void *userdata, xge_hal_channel_reopen_e reopen)
3356{
3357 xge_rx_priv_t *rxd_priv = NULL;
3358 int status = XGE_HAL_OK;
3359 int index1 = 0, index2 = 0;
3360
3361 /* Get the user data portion from channel handle */
3362 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
3363 if(lldev == NULL) {
3364 XGE_EXIT_ON_ERR("Failed to get user data from channel", rxinit_out,
3365 XGE_HAL_FAIL);
3366 }
3367
3368 /* Get the private data */
3369 rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
3370 if(rxd_priv == NULL) {
3371 XGE_EXIT_ON_ERR("Failed to get descriptor private data", rxinit_out,
3372 XGE_HAL_FAIL);
3373 }
3374
3375 rxd_priv->bufferArray = xge_os_malloc(NULL,
3376 (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3377
3378 if(rxd_priv->bufferArray == NULL) {
3379 XGE_EXIT_ON_ERR("Failed to allocate Rxd private", rxinit_out,
3380 XGE_HAL_FAIL);
3381 }
3382
3383 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3384 /* Create DMA map for these descriptors*/
3385 if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT,
3386 &rxd_priv->dmainfo[0].dma_map)) {
3387 XGE_EXIT_ON_ERR("DMA map creation for Rx descriptor failed",
3388 rxinit_err_out, XGE_HAL_FAIL);
3389 }
3390 /* Get a buffer, attach it to this descriptor */
3391 status = xge_get_buf(dtrh, rxd_priv, lldev, 0);
3392 }
3393 else {
3394 for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
3395 /* Create DMA map for this descriptor */
3396 if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT ,
3397 &rxd_priv->dmainfo[index1].dma_map)) {
3398 for(index2 = index1 - 1; index2 >= 0; index2--) {
3399 bus_dmamap_destroy(lldev->dma_tag_rx,
3400 rxd_priv->dmainfo[index2].dma_map);
3401 }
3402 XGE_EXIT_ON_ERR(
3403 "Jumbo DMA map creation for Rx descriptor failed",
3404 rxinit_err_out, XGE_HAL_FAIL);
3405 }
3406 }
3407 status = xge_get_buf_3b_5b(dtrh, rxd_priv, lldev);
3408 }
3409
3410 if(status != XGE_HAL_OK) {
3411 for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
3412 bus_dmamap_destroy(lldev->dma_tag_rx,
3413 rxd_priv->dmainfo[index1].dma_map);
3414 }
3415 goto rxinit_err_out;
3416 }
3417 else {
3418 goto rxinit_out;
3419 }
3420
3421rxinit_err_out:
3422 xge_os_free(NULL, rxd_priv->bufferArray,
3423 (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3424rxinit_out:
3425 return status;
3426}
3427
3428/**
3429 * xge_rx_term
3430 * During unload terminate and free all descriptors
3431 *
3432 * @channelh Rx Channel Handle
3433 * @dtrh Rx Descriptor Handle
3434 * @state Descriptor State
3435 * @userdata Per-adapter Data
3436 * @reopen Channel open/reopen option
3437 */
3438void
3439xge_rx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3440 xge_hal_dtr_state_e state, void *userdata,
3441 xge_hal_channel_reopen_e reopen)
3442{
3443 xge_rx_priv_t *rxd_priv = NULL;
3444 xge_lldev_t *lldev = NULL;
3445 int index = 0;
3446
3447 /* Descriptor state is not "Posted" */
3448 if(state != XGE_HAL_DTR_STATE_POSTED) goto rxterm_out;
3449
3450 /* Get the user data portion */
3451 lldev = xge_hal_channel_userdata(channelh);
3452
3453 /* Get the private data */
3454 rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
3455
3456 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
3457 if(rxd_priv->dmainfo[index].dma_map != NULL) {
3458 bus_dmamap_sync(lldev->dma_tag_rx,
3459 rxd_priv->dmainfo[index].dma_map, BUS_DMASYNC_POSTREAD);
3460 bus_dmamap_unload(lldev->dma_tag_rx,
3461 rxd_priv->dmainfo[index].dma_map);
3462 if(rxd_priv->bufferArray[index] != NULL)
3463 m_free(rxd_priv->bufferArray[index]);
3464 bus_dmamap_destroy(lldev->dma_tag_rx,
3465 rxd_priv->dmainfo[index].dma_map);
3466 }
3467 }
3468 xge_os_free(NULL, rxd_priv->bufferArray,
3469 (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3470
3471 /* Free the descriptor */
3472 xge_hal_ring_dtr_free(channelh, dtrh);
3473
3474rxterm_out:
3475 return;
3476}
3477
3478/**
3479 * xge_tx_term
3480 * During unload terminate and free all descriptors
3481 *
3482 * @channelh Rx Channel Handle
3483 * @dtrh Rx Descriptor Handle
3484 * @state Descriptor State
3485 * @userdata Per-adapter Data
3486 * @reopen Channel open/reopen option
3487 */
3488void
3489xge_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
3490 xge_hal_dtr_state_e state, void *userdata,
3491 xge_hal_channel_reopen_e reopen)
3492{
3493 xge_tx_priv_t *ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3494 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
3495
3496 /* Destroy DMA map */
3497 bus_dmamap_destroy(lldev->dma_tag_tx, ll_tx_priv->dma_map);
3498}
3499
3500/**
3501 * xge_methods
3502 *
3503 * FreeBSD device interface entry points
3504 */
3505static device_method_t xge_methods[] = {
3506 DEVMETHOD(device_probe, xge_probe),
3507 DEVMETHOD(device_attach, xge_attach),
3508 DEVMETHOD(device_detach, xge_detach),
3509 DEVMETHOD(device_shutdown, xge_shutdown),
3510 {0, 0}
3511};
3512
3513static driver_t xge_driver = {
3514 "nxge",
3515 xge_methods,
3516 sizeof(xge_lldev_t),
3517};
3518static devclass_t xge_devclass;
3519DRIVER_MODULE(nxge, pci, xge_driver, xge_devclass, 0, 0);
3520
1194
1195 ifnetp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU |
1196 IFCAP_HWCSUM;
1197 if(lldev->enabled_tso)
1198 ifnetp->if_capabilities |= IFCAP_TSO4;
1199 if(lldev->enabled_lro)
1200 ifnetp->if_capabilities |= IFCAP_LRO;
1201
1202 ifnetp->if_capenable = ifnetp->if_capabilities;
1203
1204 /* Attach the interface */
1205 ether_ifattach(ifnetp, mcaddr);
1206
1207ifsetup_out:
1208 return status;
1209}
1210
1211/**
1212 * xge_callback_link_up
1213 * Callback for Link-up indication from HAL
1214 *
1215 * @userdata Per-adapter data
1216 */
1217void
1218xge_callback_link_up(void *userdata)
1219{
1220 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
1221 struct ifnet *ifnetp = lldev->ifnetp;
1222
1223 ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1224 if_link_state_change(ifnetp, LINK_STATE_UP);
1225}
1226
1227/**
1228 * xge_callback_link_down
1229 * Callback for Link-down indication from HAL
1230 *
1231 * @userdata Per-adapter data
1232 */
1233void
1234xge_callback_link_down(void *userdata)
1235{
1236 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
1237 struct ifnet *ifnetp = lldev->ifnetp;
1238
1239 ifnetp->if_flags |= IFF_DRV_OACTIVE;
1240 if_link_state_change(ifnetp, LINK_STATE_DOWN);
1241}
1242
1243/**
1244 * xge_callback_crit_err
1245 * Callback for Critical error indication from HAL
1246 *
1247 * @userdata Per-adapter data
1248 * @type Event type (Enumerated hardware error)
1249 * @serr_data Hardware status
1250 */
1251void
1252xge_callback_crit_err(void *userdata, xge_hal_event_e type, u64 serr_data)
1253{
1254 xge_trace(XGE_ERR, "Critical Error");
1255 xge_reset(userdata);
1256}
1257
1258/**
1259 * xge_callback_event
1260 * Callback from HAL indicating that some event has been queued
1261 *
1262 * @item Queued event item
1263 */
1264void
1265xge_callback_event(xge_queue_item_t *item)
1266{
1267 xge_lldev_t *lldev = NULL;
1268 xge_hal_device_t *hldev = NULL;
1269 struct ifnet *ifnetp = NULL;
1270
1271 hldev = item->context;
1272 lldev = xge_hal_device_private(hldev);
1273 ifnetp = lldev->ifnetp;
1274
1275 switch(item->event_type) {
1276 case XGE_LL_EVENT_TRY_XMIT_AGAIN:
1277 if(lldev->initialized) {
1278 if(xge_hal_channel_dtr_count(lldev->fifo_channel[0]) > 0) {
1279 ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1280 }
1281 else {
1282 xge_queue_produce_context(
1283 xge_hal_device_queue(lldev->devh),
1284 XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh);
1285 }
1286 }
1287 break;
1288
1289 case XGE_LL_EVENT_DEVICE_RESETTING:
1290 xge_reset(item->context);
1291 break;
1292
1293 default:
1294 break;
1295 }
1296}
1297
1298/**
1299 * xge_ifmedia_change
1300 * Media change driver callback
1301 *
1302 * @ifnetp Interface Handle
1303 *
1304 * Returns 0 if media is Ether else EINVAL
1305 */
1306int
1307xge_ifmedia_change(struct ifnet *ifnetp)
1308{
1309 xge_lldev_t *lldev = ifnetp->if_softc;
1310 struct ifmedia *ifmediap = &lldev->media;
1311
1312 return (IFM_TYPE(ifmediap->ifm_media) != IFM_ETHER) ? EINVAL:0;
1313}
1314
1315/**
1316 * xge_ifmedia_status
1317 * Media status driver callback
1318 *
1319 * @ifnetp Interface Handle
1320 * @ifmr Interface Media Settings
1321 */
1322void
1323xge_ifmedia_status(struct ifnet *ifnetp, struct ifmediareq *ifmr)
1324{
1325 xge_hal_status_e status;
1326 u64 regvalue;
1327 xge_lldev_t *lldev = ifnetp->if_softc;
1328 xge_hal_device_t *hldev = lldev->devh;
1329
1330 ifmr->ifm_status = IFM_AVALID;
1331 ifmr->ifm_active = IFM_ETHER;
1332
1333 status = xge_hal_mgmt_reg_read(hldev, 0,
1334 xge_offsetof(xge_hal_pci_bar0_t, adapter_status), &regvalue);
1335 if(status != XGE_HAL_OK) {
1336 xge_trace(XGE_TRACE, "Getting adapter status failed");
1337 goto _exit;
1338 }
1339
1340 if((regvalue & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
1341 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) == 0) {
1342 ifmr->ifm_status |= IFM_ACTIVE;
1343 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1344 if_link_state_change(ifnetp, LINK_STATE_UP);
1345 }
1346 else {
1347 if_link_state_change(ifnetp, LINK_STATE_DOWN);
1348 }
1349_exit:
1350 return;
1351}
1352
1353/**
1354 * xge_ioctl_stats
1355 * IOCTL to get statistics
1356 *
1357 * @lldev Per-adapter data
1358 * @ifreqp Interface request
1359 */
1360int
1361xge_ioctl_stats(xge_lldev_t *lldev, struct ifreq *ifreqp)
1362{
1363 xge_hal_status_e status = XGE_HAL_OK;
1364 char *data = (char *)ifreqp->ifr_data;
1365 void *info = NULL;
1366 int retValue = EINVAL;
1367
1368 switch(*data) {
1369 case XGE_QUERY_STATS:
1370 mtx_lock(&lldev->mtx_drv);
1371 status = xge_hal_stats_hw(lldev->devh,
1372 (xge_hal_stats_hw_info_t **)&info);
1373 mtx_unlock(&lldev->mtx_drv);
1374 if(status == XGE_HAL_OK) {
1375 if(copyout(info, ifreqp->ifr_data,
1376 sizeof(xge_hal_stats_hw_info_t)) == 0)
1377 retValue = 0;
1378 }
1379 else {
1380 xge_trace(XGE_ERR, "Getting statistics failed (Status: %d)",
1381 status);
1382 }
1383 break;
1384
1385 case XGE_QUERY_PCICONF:
1386 info = xge_os_malloc(NULL, sizeof(xge_hal_pci_config_t));
1387 if(info != NULL) {
1388 mtx_lock(&lldev->mtx_drv);
1389 status = xge_hal_mgmt_pci_config(lldev->devh, info,
1390 sizeof(xge_hal_pci_config_t));
1391 mtx_unlock(&lldev->mtx_drv);
1392 if(status == XGE_HAL_OK) {
1393 if(copyout(info, ifreqp->ifr_data,
1394 sizeof(xge_hal_pci_config_t)) == 0)
1395 retValue = 0;
1396 }
1397 else {
1398 xge_trace(XGE_ERR,
1399 "Getting PCI configuration failed (%d)", status);
1400 }
1401 xge_os_free(NULL, info, sizeof(xge_hal_pci_config_t));
1402 }
1403 break;
1404
1405 case XGE_QUERY_DEVSTATS:
1406 info = xge_os_malloc(NULL, sizeof(xge_hal_stats_device_info_t));
1407 if(info != NULL) {
1408 mtx_lock(&lldev->mtx_drv);
1409 status =xge_hal_mgmt_device_stats(lldev->devh, info,
1410 sizeof(xge_hal_stats_device_info_t));
1411 mtx_unlock(&lldev->mtx_drv);
1412 if(status == XGE_HAL_OK) {
1413 if(copyout(info, ifreqp->ifr_data,
1414 sizeof(xge_hal_stats_device_info_t)) == 0)
1415 retValue = 0;
1416 }
1417 else {
1418 xge_trace(XGE_ERR, "Getting device info failed (%d)",
1419 status);
1420 }
1421 xge_os_free(NULL, info,
1422 sizeof(xge_hal_stats_device_info_t));
1423 }
1424 break;
1425
1426 case XGE_QUERY_SWSTATS:
1427 info = xge_os_malloc(NULL, sizeof(xge_hal_stats_sw_err_t));
1428 if(info != NULL) {
1429 mtx_lock(&lldev->mtx_drv);
1430 status =xge_hal_mgmt_sw_stats(lldev->devh, info,
1431 sizeof(xge_hal_stats_sw_err_t));
1432 mtx_unlock(&lldev->mtx_drv);
1433 if(status == XGE_HAL_OK) {
1434 if(copyout(info, ifreqp->ifr_data,
1435 sizeof(xge_hal_stats_sw_err_t)) == 0)
1436 retValue = 0;
1437 }
1438 else {
1439 xge_trace(XGE_ERR,
1440 "Getting tcode statistics failed (%d)", status);
1441 }
1442 xge_os_free(NULL, info, sizeof(xge_hal_stats_sw_err_t));
1443 }
1444 break;
1445
1446 case XGE_QUERY_DRIVERSTATS:
1447 if(copyout(&lldev->driver_stats, ifreqp->ifr_data,
1448 sizeof(xge_driver_stats_t)) == 0) {
1449 retValue = 0;
1450 }
1451 else {
1452 xge_trace(XGE_ERR,
1453 "Copyout of driver statistics failed (%d)", status);
1454 }
1455 break;
1456
1457 case XGE_READ_VERSION:
1458 info = xge_os_malloc(NULL, XGE_BUFFER_SIZE);
1459 if(version != NULL) {
1460 strcpy(info, XGE_DRIVER_VERSION);
1461 if(copyout(info, ifreqp->ifr_data, XGE_BUFFER_SIZE) == 0)
1462 retValue = 0;
1463 xge_os_free(NULL, info, XGE_BUFFER_SIZE);
1464 }
1465 break;
1466
1467 case XGE_QUERY_DEVCONF:
1468 info = xge_os_malloc(NULL, sizeof(xge_hal_device_config_t));
1469 if(info != NULL) {
1470 mtx_lock(&lldev->mtx_drv);
1471 status = xge_hal_mgmt_device_config(lldev->devh, info,
1472 sizeof(xge_hal_device_config_t));
1473 mtx_unlock(&lldev->mtx_drv);
1474 if(status == XGE_HAL_OK) {
1475 if(copyout(info, ifreqp->ifr_data,
1476 sizeof(xge_hal_device_config_t)) == 0)
1477 retValue = 0;
1478 }
1479 else {
1480 xge_trace(XGE_ERR, "Getting devconfig failed (%d)",
1481 status);
1482 }
1483 xge_os_free(NULL, info, sizeof(xge_hal_device_config_t));
1484 }
1485 break;
1486
1487 case XGE_QUERY_BUFFER_MODE:
1488 if(copyout(&lldev->buffer_mode, ifreqp->ifr_data,
1489 sizeof(int)) == 0)
1490 retValue = 0;
1491 break;
1492
1493 case XGE_SET_BUFFER_MODE_1:
1494 case XGE_SET_BUFFER_MODE_2:
1495 case XGE_SET_BUFFER_MODE_5:
1496 *data = (*data == XGE_SET_BUFFER_MODE_1) ? 'Y':'N';
1497 if(copyout(data, ifreqp->ifr_data, sizeof(data)) == 0)
1498 retValue = 0;
1499 break;
1500 default:
1501 xge_trace(XGE_TRACE, "Nothing is matching");
1502 retValue = ENOTTY;
1503 break;
1504 }
1505 return retValue;
1506}
1507
1508/**
1509 * xge_ioctl_registers
1510 * IOCTL to get registers
1511 *
1512 * @lldev Per-adapter data
1513 * @ifreqp Interface request
1514 */
1515int
1516xge_ioctl_registers(xge_lldev_t *lldev, struct ifreq *ifreqp)
1517{
1518 xge_register_t *data = (xge_register_t *)ifreqp->ifr_data;
1519 xge_hal_status_e status = XGE_HAL_OK;
1520 int retValue = EINVAL, offset = 0, index = 0;
1521 u64 val64 = 0;
1522
1523 /* Reading a register */
1524 if(strcmp(data->option, "-r") == 0) {
1525 data->value = 0x0000;
1526 mtx_lock(&lldev->mtx_drv);
1527 status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset,
1528 &data->value);
1529 mtx_unlock(&lldev->mtx_drv);
1530 if(status == XGE_HAL_OK) {
1531 if(copyout(data, ifreqp->ifr_data, sizeof(xge_register_t)) == 0)
1532 retValue = 0;
1533 }
1534 }
1535 /* Writing to a register */
1536 else if(strcmp(data->option, "-w") == 0) {
1537 mtx_lock(&lldev->mtx_drv);
1538 status = xge_hal_mgmt_reg_write(lldev->devh, 0, data->offset,
1539 data->value);
1540 if(status == XGE_HAL_OK) {
1541 val64 = 0x0000;
1542 status = xge_hal_mgmt_reg_read(lldev->devh, 0, data->offset,
1543 &val64);
1544 if(status != XGE_HAL_OK) {
1545 xge_trace(XGE_ERR, "Reading back updated register failed");
1546 }
1547 else {
1548 if(val64 != data->value) {
1549 xge_trace(XGE_ERR,
1550 "Read and written register values mismatched");
1551 }
1552 else retValue = 0;
1553 }
1554 }
1555 else {
1556 xge_trace(XGE_ERR, "Getting register value failed");
1557 }
1558 mtx_unlock(&lldev->mtx_drv);
1559 }
1560 else {
1561 mtx_lock(&lldev->mtx_drv);
1562 for(index = 0, offset = 0; offset <= XGE_OFFSET_OF_LAST_REG;
1563 index++, offset += 0x0008) {
1564 val64 = 0;
1565 status = xge_hal_mgmt_reg_read(lldev->devh, 0, offset, &val64);
1566 if(status != XGE_HAL_OK) {
1567 xge_trace(XGE_ERR, "Getting register value failed");
1568 break;
1569 }
1570 *((u64 *)((u64 *)data + index)) = val64;
1571 retValue = 0;
1572 }
1573 mtx_unlock(&lldev->mtx_drv);
1574
1575 if(retValue == 0) {
1576 if(copyout(data, ifreqp->ifr_data,
1577 sizeof(xge_hal_pci_bar0_t)) != 0) {
1578 xge_trace(XGE_ERR, "Copyout of register values failed");
1579 retValue = EINVAL;
1580 }
1581 }
1582 else {
1583 xge_trace(XGE_ERR, "Getting register values failed");
1584 }
1585 }
1586 return retValue;
1587}
1588
1589/**
1590 * xge_ioctl
1591 * Callback to control the device - Interface configuration
1592 *
1593 * @ifnetp Interface Handle
1594 * @command Device control command
1595 * @data Parameters associated with command (if any)
1596 */
1597int
1598xge_ioctl(struct ifnet *ifnetp, unsigned long command, caddr_t data)
1599{
1600 struct ifreq *ifreqp = (struct ifreq *)data;
1601 xge_lldev_t *lldev = ifnetp->if_softc;
1602 struct ifmedia *ifmediap = &lldev->media;
1603 int retValue = 0, mask = 0;
1604
1605 if(lldev->in_detach) {
1606 return retValue;
1607 }
1608
1609 switch(command) {
1610 /* Set/Get ifnet address */
1611 case SIOCSIFADDR:
1612 case SIOCGIFADDR:
1613 ether_ioctl(ifnetp, command, data);
1614 break;
1615
1616 /* Set ifnet MTU */
1617 case SIOCSIFMTU:
1618 retValue = xge_change_mtu(lldev, ifreqp->ifr_mtu);
1619 break;
1620
1621 /* Set ifnet flags */
1622 case SIOCSIFFLAGS:
1623 if(ifnetp->if_flags & IFF_UP) {
1624 /* Link status is UP */
1625 if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
1626 xge_init(lldev);
1627 }
1628 xge_disable_promisc(lldev);
1629 xge_enable_promisc(lldev);
1630 }
1631 else {
1632 /* Link status is DOWN */
1633 /* If device is in running, make it down */
1634 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1635 xge_stop(lldev);
1636 }
1637 }
1638 break;
1639
1640 /* Add/delete multicast address */
1641 case SIOCADDMULTI:
1642 case SIOCDELMULTI:
1643 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1644 xge_setmulti(lldev);
1645 }
1646 break;
1647
1648 /* Set/Get net media */
1649 case SIOCSIFMEDIA:
1650 case SIOCGIFMEDIA:
1651 retValue = ifmedia_ioctl(ifnetp, ifreqp, ifmediap, command);
1652 break;
1653
1654 /* Set capabilities */
1655 case SIOCSIFCAP:
1656 mtx_lock(&lldev->mtx_drv);
1657 mask = ifreqp->ifr_reqcap ^ ifnetp->if_capenable;
1658 if(mask & IFCAP_TXCSUM) {
1659 if(ifnetp->if_capenable & IFCAP_TXCSUM) {
1660 ifnetp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
1661 ifnetp->if_hwassist &=
1662 ~(CSUM_TCP | CSUM_UDP | CSUM_TSO);
1663 }
1664 else {
1665 ifnetp->if_capenable |= IFCAP_TXCSUM;
1666 ifnetp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1667 }
1668 }
1669 if(mask & IFCAP_TSO4) {
1670 if(ifnetp->if_capenable & IFCAP_TSO4) {
1671 ifnetp->if_capenable &= ~IFCAP_TSO4;
1672 ifnetp->if_hwassist &= ~CSUM_TSO;
1673
1674 xge_os_printf("%s: TSO Disabled",
1675 device_get_nameunit(lldev->device));
1676 }
1677 else if(ifnetp->if_capenable & IFCAP_TXCSUM) {
1678 ifnetp->if_capenable |= IFCAP_TSO4;
1679 ifnetp->if_hwassist |= CSUM_TSO;
1680
1681 xge_os_printf("%s: TSO Enabled",
1682 device_get_nameunit(lldev->device));
1683 }
1684 }
1685
1686 mtx_unlock(&lldev->mtx_drv);
1687 break;
1688
1689 /* Custom IOCTL 0 */
1690 case SIOCGPRIVATE_0:
1691 retValue = xge_ioctl_stats(lldev, ifreqp);
1692 break;
1693
1694 /* Custom IOCTL 1 */
1695 case SIOCGPRIVATE_1:
1696 retValue = xge_ioctl_registers(lldev, ifreqp);
1697 break;
1698
1699 default:
1700 retValue = EINVAL;
1701 break;
1702 }
1703 return retValue;
1704}
1705
1706/**
1707 * xge_init
1708 * Initialize the interface
1709 *
1710 * @plldev Per-adapter Data
1711 */
1712void
1713xge_init(void *plldev)
1714{
1715 xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1716
1717 mtx_lock(&lldev->mtx_drv);
1718 xge_os_memzero(&lldev->driver_stats, sizeof(xge_driver_stats_t));
1719 xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
1720 mtx_unlock(&lldev->mtx_drv);
1721}
1722
1723/**
1724 * xge_device_init
1725 * Initialize the interface (called by holding lock)
1726 *
1727 * @pdevin Per-adapter Data
1728 */
1729void
1730xge_device_init(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
1731{
1732 struct ifnet *ifnetp = lldev->ifnetp;
1733 xge_hal_device_t *hldev = lldev->devh;
1734 struct ifaddr *ifaddrp;
1735 unsigned char *macaddr;
1736 struct sockaddr_dl *sockaddrp;
1737 int status = XGE_HAL_OK;
1738
1739 mtx_assert((&lldev->mtx_drv), MA_OWNED);
1740
1741 /* If device is in running state, initializing is not required */
1742 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING)
1743 return;
1744
1745 /* Initializing timer */
1746 callout_init(&lldev->timer, CALLOUT_MPSAFE);
1747
1748 xge_trace(XGE_TRACE, "Set MTU size");
1749 status = xge_hal_device_mtu_set(hldev, ifnetp->if_mtu);
1750 if(status != XGE_HAL_OK) {
1751 xge_trace(XGE_ERR, "Setting MTU in HAL device failed");
1752 goto _exit;
1753 }
1754
1755 /* Enable HAL device */
1756 xge_hal_device_enable(hldev);
1757
1758 /* Get MAC address and update in HAL */
1759 ifaddrp = ifnetp->if_addr;
1760 sockaddrp = (struct sockaddr_dl *)ifaddrp->ifa_addr;
1761 sockaddrp->sdl_type = IFT_ETHER;
1762 sockaddrp->sdl_alen = ifnetp->if_addrlen;
1763 macaddr = LLADDR(sockaddrp);
1764 xge_trace(XGE_TRACE,
1765 "Setting MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n",
1766 *macaddr, *(macaddr + 1), *(macaddr + 2), *(macaddr + 3),
1767 *(macaddr + 4), *(macaddr + 5));
1768 status = xge_hal_device_macaddr_set(hldev, 0, macaddr);
1769 if(status != XGE_HAL_OK)
1770 xge_trace(XGE_ERR, "Setting MAC address failed (%d)", status);
1771
1772 /* Opening channels */
1773 mtx_unlock(&lldev->mtx_drv);
1774 status = xge_channel_open(lldev, option);
1775 mtx_lock(&lldev->mtx_drv);
1776 if(status != XGE_HAL_OK)
1777 goto _exit;
1778
1779 /* Set appropriate flags */
1780 ifnetp->if_drv_flags |= IFF_DRV_RUNNING;
1781 ifnetp->if_flags &= ~IFF_DRV_OACTIVE;
1782
1783 /* Checksum capability */
1784 ifnetp->if_hwassist = (ifnetp->if_capenable & IFCAP_TXCSUM) ?
1785 (CSUM_TCP | CSUM_UDP) : 0;
1786
1787 if((lldev->enabled_tso) && (ifnetp->if_capenable & IFCAP_TSO4))
1788 ifnetp->if_hwassist |= CSUM_TSO;
1789
1790 /* Enable interrupts */
1791 xge_hal_device_intr_enable(hldev);
1792
1793 callout_reset(&lldev->timer, 10*hz, xge_timer, lldev);
1794
1795 /* Disable promiscuous mode */
1796 xge_trace(XGE_TRACE, "If opted, enable promiscuous mode");
1797 xge_enable_promisc(lldev);
1798
1799 /* Device is initialized */
1800 lldev->initialized = 1;
1801 xge_os_mdelay(1000);
1802
1803_exit:
1804 return;
1805}
1806
1807/**
1808 * xge_timer
1809 * Timer timeout function to handle link status
1810 *
1811 * @devp Per-adapter Data
1812 */
1813void
1814xge_timer(void *devp)
1815{
1816 xge_lldev_t *lldev = (xge_lldev_t *)devp;
1817 xge_hal_device_t *hldev = lldev->devh;
1818
1819 /* Poll for changes */
1820 xge_hal_device_poll(hldev);
1821
1822 /* Reset timer */
1823 callout_reset(&lldev->timer, hz, xge_timer, lldev);
1824
1825 return;
1826}
1827
1828/**
1829 * xge_stop
1830 * De-activate the interface
1831 *
1832 * @lldev Per-adater Data
1833 */
1834void
1835xge_stop(xge_lldev_t *lldev)
1836{
1837 mtx_lock(&lldev->mtx_drv);
1838 xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
1839 mtx_unlock(&lldev->mtx_drv);
1840}
1841
1842/**
1843 * xge_isr_filter
1844 * ISR filter function - to filter interrupts from other devices (shared)
1845 *
1846 * @handle Per-adapter Data
1847 *
1848 * Returns
1849 * FILTER_STRAY if interrupt is from other device
1850 * FILTER_SCHEDULE_THREAD if interrupt is from Xframe device
1851 */
1852int
1853xge_isr_filter(void *handle)
1854{
1855 xge_lldev_t *lldev = (xge_lldev_t *)handle;
1856 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)((lldev->devh)->bar0);
1857 u16 retValue = FILTER_STRAY;
1858 u64 val64 = 0;
1859
1860 XGE_DRV_STATS(isr_filter);
1861
1862 val64 = xge_os_pio_mem_read64(lldev->pdev, (lldev->devh)->regh0,
1863 &bar0->general_int_status);
1864 retValue = (!val64) ? FILTER_STRAY : FILTER_SCHEDULE_THREAD;
1865
1866 return retValue;
1867}
1868
1869/**
1870 * xge_isr_line
1871 * Interrupt service routine for Line interrupts
1872 *
1873 * @plldev Per-adapter Data
1874 */
1875void
1876xge_isr_line(void *plldev)
1877{
1878 xge_hal_status_e status;
1879 xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1880 xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
1881 struct ifnet *ifnetp = lldev->ifnetp;
1882
1883 XGE_DRV_STATS(isr_line);
1884
1885 if(ifnetp->if_drv_flags & IFF_DRV_RUNNING) {
1886 status = xge_hal_device_handle_irq(hldev);
1887 if(!(IFQ_DRV_IS_EMPTY(&ifnetp->if_snd)))
1888 xge_send(ifnetp);
1889 }
1890}
1891
1892/*
1893 * xge_isr_msi
1894 * ISR for Message signaled interrupts
1895 */
1896void
1897xge_isr_msi(void *plldev)
1898{
1899 xge_lldev_t *lldev = (xge_lldev_t *)plldev;
1900 XGE_DRV_STATS(isr_msi);
1901 xge_hal_device_continue_irq(lldev->devh);
1902}
1903
1904/**
1905 * xge_rx_open
1906 * Initiate and open all Rx channels
1907 *
1908 * @qid Ring Index
1909 * @lldev Per-adapter Data
1910 * @rflag Channel open/close/reopen flag
1911 *
1912 * Returns 0 or Error Number
1913 */
1914int
1915xge_rx_open(int qid, xge_lldev_t *lldev, xge_hal_channel_reopen_e rflag)
1916{
1917 u64 adapter_status = 0x0;
1918 xge_hal_status_e status = XGE_HAL_FAIL;
1919
1920 xge_hal_channel_attr_t attr = {
1921 .post_qid = qid,
1922 .compl_qid = 0,
1923 .callback = xge_rx_compl,
1924 .per_dtr_space = sizeof(xge_rx_priv_t),
1925 .flags = 0,
1926 .type = XGE_HAL_CHANNEL_TYPE_RING,
1927 .userdata = lldev,
1928 .dtr_init = xge_rx_initial_replenish,
1929 .dtr_term = xge_rx_term
1930 };
1931
1932 /* If device is not ready, return */
1933 status = xge_hal_device_status(lldev->devh, &adapter_status);
1934 if(status != XGE_HAL_OK) {
1935 xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status);
1936 XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL);
1937 }
1938 else {
1939 status = xge_hal_channel_open(lldev->devh, &attr,
1940 &lldev->ring_channel[qid], rflag);
1941 }
1942
1943_exit:
1944 return status;
1945}
1946
1947/**
1948 * xge_tx_open
1949 * Initialize and open all Tx channels
1950 *
1951 * @lldev Per-adapter Data
1952 * @tflag Channel open/close/reopen flag
1953 *
1954 * Returns 0 or Error Number
1955 */
1956int
1957xge_tx_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e tflag)
1958{
1959 xge_hal_status_e status = XGE_HAL_FAIL;
1960 u64 adapter_status = 0x0;
1961 int qindex, index;
1962
1963 xge_hal_channel_attr_t attr = {
1964 .compl_qid = 0,
1965 .callback = xge_tx_compl,
1966 .per_dtr_space = sizeof(xge_tx_priv_t),
1967 .flags = 0,
1968 .type = XGE_HAL_CHANNEL_TYPE_FIFO,
1969 .userdata = lldev,
1970 .dtr_init = xge_tx_initial_replenish,
1971 .dtr_term = xge_tx_term
1972 };
1973
1974 /* If device is not ready, return */
1975 status = xge_hal_device_status(lldev->devh, &adapter_status);
1976 if(status != XGE_HAL_OK) {
1977 xge_os_printf("Adapter Status: 0x%llx", (long long) adapter_status);
1978 XGE_EXIT_ON_ERR("Device is not ready", _exit, XGE_HAL_FAIL);
1979 }
1980
1981 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
1982 attr.post_qid = qindex,
1983 status = xge_hal_channel_open(lldev->devh, &attr,
1984 &lldev->fifo_channel[qindex], tflag);
1985 if(status != XGE_HAL_OK) {
1986 for(index = 0; index < qindex; index++)
1987 xge_hal_channel_close(lldev->fifo_channel[index], tflag);
1988 }
1989 }
1990
1991_exit:
1992 return status;
1993}
1994
1995/**
1996 * xge_enable_msi
1997 * Enables MSI
1998 *
1999 * @lldev Per-adapter Data
2000 */
2001void
2002xge_enable_msi(xge_lldev_t *lldev)
2003{
2004 xge_list_t *item = NULL;
2005 xge_hal_device_t *hldev = lldev->devh;
2006 xge_hal_channel_t *channel = NULL;
2007 u16 offset = 0, val16 = 0;
2008
2009 xge_os_pci_read16(lldev->pdev, NULL,
2010 xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16);
2011
2012 /* Update msi_data */
2013 offset = (val16 & 0x80) ? 0x4c : 0x48;
2014 xge_os_pci_read16(lldev->pdev, NULL, offset, &val16);
2015 if(val16 & 0x1)
2016 val16 &= 0xfffe;
2017 else
2018 val16 |= 0x1;
2019 xge_os_pci_write16(lldev->pdev, NULL, offset, val16);
2020
2021 /* Update msi_control */
2022 xge_os_pci_read16(lldev->pdev, NULL,
2023 xge_offsetof(xge_hal_pci_config_le_t, msi_control), &val16);
2024 val16 |= 0x10;
2025 xge_os_pci_write16(lldev->pdev, NULL,
2026 xge_offsetof(xge_hal_pci_config_le_t, msi_control), val16);
2027
2028 /* Set TxMAT and RxMAT registers with MSI */
2029 xge_list_for_each(item, &hldev->free_channels) {
2030 channel = xge_container_of(item, xge_hal_channel_t, item);
2031 xge_hal_channel_msi_set(channel, 1, (u32)val16);
2032 }
2033}
2034
2035/**
2036 * xge_channel_open
2037 * Open both Tx and Rx channels
2038 *
2039 * @lldev Per-adapter Data
2040 * @option Channel reopen option
2041 */
2042int
2043xge_channel_open(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2044{
2045 xge_lro_entry_t *lro_session = NULL;
2046 xge_hal_status_e status = XGE_HAL_OK;
2047 int index = 0, index2 = 0;
2048
2049 if(lldev->enabled_msi == XGE_HAL_INTR_MODE_MSI) {
2050 xge_msi_info_restore(lldev);
2051 xge_enable_msi(lldev);
2052 }
2053
2054_exit2:
2055 status = xge_create_dma_tags(lldev->device);
2056 if(status != XGE_HAL_OK)
2057 XGE_EXIT_ON_ERR("DMA tag creation failed", _exit, status);
2058
2059 /* Open ring (Rx) channel */
2060 for(index = 0; index < XGE_RING_COUNT; index++) {
2061 status = xge_rx_open(index, lldev, option);
2062 if(status != XGE_HAL_OK) {
2063 /*
2064 * DMA mapping fails in the unpatched Kernel which can't
2065 * allocate contiguous memory for Jumbo frames.
2066 * Try using 5 buffer mode.
2067 */
2068 if((lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) &&
2069 (((lldev->ifnetp)->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE) >
2070 MJUMPAGESIZE)) {
2071 /* Close so far opened channels */
2072 for(index2 = 0; index2 < index; index2++) {
2073 xge_hal_channel_close(lldev->ring_channel[index2],
2074 option);
2075 }
2076
2077 /* Destroy DMA tags intended to use for 1 buffer mode */
2078 if(bus_dmamap_destroy(lldev->dma_tag_rx,
2079 lldev->extra_dma_map)) {
2080 xge_trace(XGE_ERR, "Rx extra DMA map destroy failed");
2081 }
2082 if(bus_dma_tag_destroy(lldev->dma_tag_rx))
2083 xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
2084 if(bus_dma_tag_destroy(lldev->dma_tag_tx))
2085 xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
2086
2087 /* Switch to 5 buffer mode */
2088 lldev->buffer_mode = XGE_HAL_RING_QUEUE_BUFFER_MODE_5;
2089 xge_buffer_mode_init(lldev, (lldev->ifnetp)->if_mtu);
2090
2091 /* Restart init */
2092 goto _exit2;
2093 }
2094 else {
2095 XGE_EXIT_ON_ERR("Opening Rx channel failed", _exit1,
2096 status);
2097 }
2098 }
2099 }
2100
2101 if(lldev->enabled_lro) {
2102 SLIST_INIT(&lldev->lro_free);
2103 SLIST_INIT(&lldev->lro_active);
2104 lldev->lro_num = XGE_LRO_DEFAULT_ENTRIES;
2105
2106 for(index = 0; index < lldev->lro_num; index++) {
2107 lro_session = (xge_lro_entry_t *)
2108 xge_os_malloc(NULL, sizeof(xge_lro_entry_t));
2109 if(lro_session == NULL) {
2110 lldev->lro_num = index;
2111 break;
2112 }
2113 SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next);
2114 }
2115 }
2116
2117 /* Open FIFO (Tx) channel */
2118 status = xge_tx_open(lldev, option);
2119 if(status != XGE_HAL_OK)
2120 XGE_EXIT_ON_ERR("Opening Tx channel failed", _exit1, status);
2121
2122 goto _exit;
2123
2124_exit1:
2125 /*
2126 * Opening Rx channel(s) failed (index is <last ring index - 1>) or
2127 * Initialization of LRO failed (index is XGE_RING_COUNT)
2128 * Opening Tx channel failed (index is XGE_RING_COUNT)
2129 */
2130 for(index2 = 0; index2 < index; index2++)
2131 xge_hal_channel_close(lldev->ring_channel[index2], option);
2132
2133_exit:
2134 return status;
2135}
2136
2137/**
2138 * xge_channel_close
2139 * Close both Tx and Rx channels
2140 *
2141 * @lldev Per-adapter Data
2142 * @option Channel reopen option
2143 *
2144 */
2145void
2146xge_channel_close(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2147{
2148 int qindex = 0;
2149
2150 DELAY(1000 * 1000);
2151
2152 /* Close FIFO (Tx) channel */
2153 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++)
2154 xge_hal_channel_close(lldev->fifo_channel[qindex], option);
2155
2156 /* Close Ring (Rx) channels */
2157 for(qindex = 0; qindex < XGE_RING_COUNT; qindex++)
2158 xge_hal_channel_close(lldev->ring_channel[qindex], option);
2159
2160 if(bus_dmamap_destroy(lldev->dma_tag_rx, lldev->extra_dma_map))
2161 xge_trace(XGE_ERR, "Rx extra map destroy failed");
2162 if(bus_dma_tag_destroy(lldev->dma_tag_rx))
2163 xge_trace(XGE_ERR, "Rx DMA tag destroy failed");
2164 if(bus_dma_tag_destroy(lldev->dma_tag_tx))
2165 xge_trace(XGE_ERR, "Tx DMA tag destroy failed");
2166}
2167
2168/**
2169 * dmamap_cb
2170 * DMA map callback
2171 *
2172 * @arg Parameter passed from dmamap
2173 * @segs Segments
2174 * @nseg Number of segments
2175 * @error Error
2176 */
2177void
2178dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2179{
2180 if(!error) {
2181 *(bus_addr_t *) arg = segs->ds_addr;
2182 }
2183}
2184
2185/**
2186 * xge_reset
2187 * Device Reset
2188 *
2189 * @lldev Per-adapter Data
2190 */
2191void
2192xge_reset(xge_lldev_t *lldev)
2193{
2194 xge_trace(XGE_TRACE, "Reseting the chip");
2195
2196 /* If the device is not initialized, return */
2197 if(lldev->initialized) {
2198 mtx_lock(&lldev->mtx_drv);
2199 xge_device_stop(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
2200 xge_device_init(lldev, XGE_HAL_CHANNEL_OC_NORMAL);
2201 mtx_unlock(&lldev->mtx_drv);
2202 }
2203
2204 return;
2205}
2206
2207/**
2208 * xge_setmulti
2209 * Set an address as a multicast address
2210 *
2211 * @lldev Per-adapter Data
2212 */
2213void
2214xge_setmulti(xge_lldev_t *lldev)
2215{
2216 struct ifmultiaddr *ifma;
2217 u8 *lladdr;
2218 xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
2219 struct ifnet *ifnetp = lldev->ifnetp;
2220 int index = 0;
2221 int offset = 1;
2222 int table_size = 47;
2223 xge_hal_status_e status = XGE_HAL_OK;
2224 u8 initial_addr[]= {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
2225
2226 if((ifnetp->if_flags & IFF_MULTICAST) && (!lldev->all_multicast)) {
2227 status = xge_hal_device_mcast_enable(hldev);
2228 lldev->all_multicast = 1;
2229 }
2230 else if((ifnetp->if_flags & IFF_MULTICAST) && (lldev->all_multicast)) {
2231 status = xge_hal_device_mcast_disable(hldev);
2232 lldev->all_multicast = 0;
2233 }
2234
2235 if(status != XGE_HAL_OK) {
2236 xge_trace(XGE_ERR, "Enabling/disabling multicast failed");
2237 goto _exit;
2238 }
2239
2240 /* Updating address list */
2241 if_maddr_rlock(ifnetp);
2242 index = 0;
2243 TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
2244 if(ifma->ifma_addr->sa_family != AF_LINK) {
2245 continue;
2246 }
2247 lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2248 index += 1;
2249 }
2250 if_maddr_runlock(ifnetp);
2251
2252 if((!lldev->all_multicast) && (index)) {
2253 lldev->macaddr_count = (index + 1);
2254 if(lldev->macaddr_count > table_size) {
2255 goto _exit;
2256 }
2257
2258 /* Clear old addresses */
2259 for(index = 0; index < 48; index++) {
2260 xge_hal_device_macaddr_set(hldev, (offset + index),
2261 initial_addr);
2262 }
2263 }
2264
2265 /* Add new addresses */
2266 if_maddr_rlock(ifnetp);
2267 index = 0;
2268 TAILQ_FOREACH(ifma, &ifnetp->if_multiaddrs, ifma_link) {
2269 if(ifma->ifma_addr->sa_family != AF_LINK) {
2270 continue;
2271 }
2272 lladdr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2273 xge_hal_device_macaddr_set(hldev, (offset + index), lladdr);
2274 index += 1;
2275 }
2276 if_maddr_runlock(ifnetp);
2277
2278_exit:
2279 return;
2280}
2281
2282/**
2283 * xge_enable_promisc
2284 * Enable Promiscuous Mode
2285 *
2286 * @lldev Per-adapter Data
2287 */
2288void
2289xge_enable_promisc(xge_lldev_t *lldev)
2290{
2291 struct ifnet *ifnetp = lldev->ifnetp;
2292 xge_hal_device_t *hldev = lldev->devh;
2293 xge_hal_pci_bar0_t *bar0 = NULL;
2294 u64 val64 = 0;
2295
2296 bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
2297
2298 if(ifnetp->if_flags & IFF_PROMISC) {
2299 xge_hal_device_promisc_enable(lldev->devh);
2300
2301 /*
2302 * When operating in promiscuous mode, don't strip the VLAN tag
2303 */
2304 val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
2305 &bar0->rx_pa_cfg);
2306 val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2307 val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(0);
2308 xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
2309 &bar0->rx_pa_cfg);
2310
2311 xge_trace(XGE_TRACE, "Promiscuous mode ON");
2312 }
2313}
2314
2315/**
2316 * xge_disable_promisc
2317 * Disable Promiscuous Mode
2318 *
2319 * @lldev Per-adapter Data
2320 */
2321void
2322xge_disable_promisc(xge_lldev_t *lldev)
2323{
2324 xge_hal_device_t *hldev = lldev->devh;
2325 xge_hal_pci_bar0_t *bar0 = NULL;
2326 u64 val64 = 0;
2327
2328 bar0 = (xge_hal_pci_bar0_t *) hldev->bar0;
2329
2330 xge_hal_device_promisc_disable(lldev->devh);
2331
2332 /*
2333 * Strip VLAN tag when operating in non-promiscuous mode
2334 */
2335 val64 = xge_os_pio_mem_read64(lldev->pdev, hldev->regh0,
2336 &bar0->rx_pa_cfg);
2337 val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2338 val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
2339 xge_os_pio_mem_write64(lldev->pdev, hldev->regh0, val64,
2340 &bar0->rx_pa_cfg);
2341
2342 xge_trace(XGE_TRACE, "Promiscuous mode OFF");
2343}
2344
2345/**
2346 * xge_change_mtu
2347 * Change interface MTU to a requested valid size
2348 *
2349 * @lldev Per-adapter Data
2350 * @NewMtu Requested MTU
2351 *
2352 * Returns 0 or Error Number
2353 */
2354int
2355xge_change_mtu(xge_lldev_t *lldev, int new_mtu)
2356{
2357 int status = XGE_HAL_OK;
2358
2359 /* Check requested MTU size for boundary */
2360 if(xge_hal_device_mtu_check(lldev->devh, new_mtu) != XGE_HAL_OK) {
2361 XGE_EXIT_ON_ERR("Invalid MTU", _exit, EINVAL);
2362 }
2363
2364 lldev->mtu = new_mtu;
2365 xge_confirm_changes(lldev, XGE_SET_MTU);
2366
2367_exit:
2368 return status;
2369}
2370
2371/**
2372 * xge_device_stop
2373 *
2374 * Common code for both stop and part of reset. Disables device, interrupts and
2375 * closes channels
2376 *
2377 * @dev Device Handle
2378 * @option Channel normal/reset option
2379 */
2380void
2381xge_device_stop(xge_lldev_t *lldev, xge_hal_channel_reopen_e option)
2382{
2383 xge_hal_device_t *hldev = lldev->devh;
2384 struct ifnet *ifnetp = lldev->ifnetp;
2385 u64 val64 = 0;
2386
2387 mtx_assert((&lldev->mtx_drv), MA_OWNED);
2388
2389 /* If device is not in "Running" state, return */
2390 if (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING))
2391 goto _exit;
2392
2393 /* Set appropriate flags */
2394 ifnetp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2395
2396 /* Stop timer */
2397 callout_stop(&lldev->timer);
2398
2399 /* Disable interrupts */
2400 xge_hal_device_intr_disable(hldev);
2401
2402 mtx_unlock(&lldev->mtx_drv);
2403 xge_queue_flush(xge_hal_device_queue(lldev->devh));
2404 mtx_lock(&lldev->mtx_drv);
2405
2406 /* Disable HAL device */
2407 if(xge_hal_device_disable(hldev) != XGE_HAL_OK) {
2408 xge_trace(XGE_ERR, "Disabling HAL device failed");
2409 xge_hal_device_status(hldev, &val64);
2410 xge_trace(XGE_ERR, "Adapter Status: 0x%llx", (long long)val64);
2411 }
2412
2413 /* Close Tx and Rx channels */
2414 xge_channel_close(lldev, option);
2415
2416 /* Reset HAL device */
2417 xge_hal_device_reset(hldev);
2418
2419 xge_os_mdelay(1000);
2420 lldev->initialized = 0;
2421
2422 if_link_state_change(ifnetp, LINK_STATE_DOWN);
2423
2424_exit:
2425 return;
2426}
2427
2428/**
2429 * xge_set_mbuf_cflags
2430 * set checksum flag for the mbuf
2431 *
2432 * @pkt Packet
2433 */
2434void
2435xge_set_mbuf_cflags(mbuf_t pkt)
2436{
2437 pkt->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2438 pkt->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2439 pkt->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2440 pkt->m_pkthdr.csum_data = htons(0xffff);
2441}
2442
2443/**
2444 * xge_lro_flush_sessions
2445 * Flush LRO session and send accumulated LRO packet to upper layer
2446 *
2447 * @lldev Per-adapter Data
2448 */
2449void
2450xge_lro_flush_sessions(xge_lldev_t *lldev)
2451{
2452 xge_lro_entry_t *lro_session = NULL;
2453
2454 while(!SLIST_EMPTY(&lldev->lro_active)) {
2455 lro_session = SLIST_FIRST(&lldev->lro_active);
2456 SLIST_REMOVE_HEAD(&lldev->lro_active, next);
2457 xge_lro_flush(lldev, lro_session);
2458 }
2459}
2460
2461/**
2462 * xge_lro_flush
2463 * Flush LRO session. Send accumulated LRO packet to upper layer
2464 *
2465 * @lldev Per-adapter Data
2466 * @lro LRO session to be flushed
2467 */
2468static void
2469xge_lro_flush(xge_lldev_t *lldev, xge_lro_entry_t *lro_session)
2470{
2471 struct ip *header_ip;
2472 struct tcphdr *header_tcp;
2473 u32 *ptr;
2474
2475 if(lro_session->append_cnt) {
2476 header_ip = lro_session->lro_header_ip;
2477 header_ip->ip_len = htons(lro_session->len - ETHER_HDR_LEN);
2478 lro_session->m_head->m_pkthdr.len = lro_session->len;
2479 header_tcp = (struct tcphdr *)(header_ip + 1);
2480 header_tcp->th_ack = lro_session->ack_seq;
2481 header_tcp->th_win = lro_session->window;
2482 if(lro_session->timestamp) {
2483 ptr = (u32 *)(header_tcp + 1);
2484 ptr[1] = htonl(lro_session->tsval);
2485 ptr[2] = lro_session->tsecr;
2486 }
2487 }
2488
2489 (*lldev->ifnetp->if_input)(lldev->ifnetp, lro_session->m_head);
2490 lro_session->m_head = NULL;
2491 lro_session->timestamp = 0;
2492 lro_session->append_cnt = 0;
2493 SLIST_INSERT_HEAD(&lldev->lro_free, lro_session, next);
2494}
2495
2496/**
2497 * xge_lro_accumulate
2498 * Accumulate packets to form a large LRO packet based on various conditions
2499 *
2500 * @lldev Per-adapter Data
2501 * @m_head Current Packet
2502 *
2503 * Returns XGE_HAL_OK or XGE_HAL_FAIL (failure)
2504 */
2505static int
2506xge_lro_accumulate(xge_lldev_t *lldev, struct mbuf *m_head)
2507{
2508 struct ether_header *header_ethernet;
2509 struct ip *header_ip;
2510 struct tcphdr *header_tcp;
2511 u32 seq, *ptr;
2512 struct mbuf *buffer_next, *buffer_tail;
2513 xge_lro_entry_t *lro_session;
2514 xge_hal_status_e status = XGE_HAL_FAIL;
2515 int hlen, ip_len, tcp_hdr_len, tcp_data_len, tot_len, tcp_options;
2516 int trim;
2517
2518 /* Get Ethernet header */
2519 header_ethernet = mtod(m_head, struct ether_header *);
2520
2521 /* Return if it is not IP packet */
2522 if(header_ethernet->ether_type != htons(ETHERTYPE_IP))
2523 goto _exit;
2524
2525 /* Get IP header */
2526 header_ip = lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1 ?
2527 (struct ip *)(header_ethernet + 1) :
2528 mtod(m_head->m_next, struct ip *);
2529
2530 /* Return if it is not TCP packet */
2531 if(header_ip->ip_p != IPPROTO_TCP)
2532 goto _exit;
2533
2534 /* Return if packet has options */
2535 if((header_ip->ip_hl << 2) != sizeof(*header_ip))
2536 goto _exit;
2537
2538 /* Return if packet is fragmented */
2539 if(header_ip->ip_off & htons(IP_MF | IP_OFFMASK))
2540 goto _exit;
2541
2542 /* Get TCP header */
2543 header_tcp = (struct tcphdr *)(header_ip + 1);
2544
2545 /* Return if not ACK or PUSH */
2546 if((header_tcp->th_flags & ~(TH_ACK | TH_PUSH)) != 0)
2547 goto _exit;
2548
2549 /* Only timestamp option is handled */
2550 tcp_options = (header_tcp->th_off << 2) - sizeof(*header_tcp);
2551 tcp_hdr_len = sizeof(*header_tcp) + tcp_options;
2552 ptr = (u32 *)(header_tcp + 1);
2553 if(tcp_options != 0) {
2554 if(__predict_false(tcp_options != TCPOLEN_TSTAMP_APPA) ||
2555 (*ptr != ntohl(TCPOPT_NOP << 24 | TCPOPT_NOP << 16 |
2556 TCPOPT_TIMESTAMP << 8 | TCPOLEN_TIMESTAMP))) {
2557 goto _exit;
2558 }
2559 }
2560
2561 /* Total length of packet (IP) */
2562 ip_len = ntohs(header_ip->ip_len);
2563
2564 /* TCP data size */
2565 tcp_data_len = ip_len - (header_tcp->th_off << 2) - sizeof(*header_ip);
2566
2567 /* If the frame is padded, trim it */
2568 tot_len = m_head->m_pkthdr.len;
2569 trim = tot_len - (ip_len + ETHER_HDR_LEN);
2570 if(trim != 0) {
2571 if(trim < 0)
2572 goto _exit;
2573 m_adj(m_head, -trim);
2574 tot_len = m_head->m_pkthdr.len;
2575 }
2576
2577 buffer_next = m_head;
2578 buffer_tail = NULL;
2579 while(buffer_next != NULL) {
2580 buffer_tail = buffer_next;
2581 buffer_next = buffer_tail->m_next;
2582 }
2583
2584 /* Total size of only headers */
2585 hlen = ip_len + ETHER_HDR_LEN - tcp_data_len;
2586
2587 /* Get sequence number */
2588 seq = ntohl(header_tcp->th_seq);
2589
2590 SLIST_FOREACH(lro_session, &lldev->lro_active, next) {
2591 if(lro_session->source_port == header_tcp->th_sport &&
2592 lro_session->dest_port == header_tcp->th_dport &&
2593 lro_session->source_ip == header_ip->ip_src.s_addr &&
2594 lro_session->dest_ip == header_ip->ip_dst.s_addr) {
2595
2596 /* Unmatched sequence number, flush LRO session */
2597 if(__predict_false(seq != lro_session->next_seq)) {
2598 SLIST_REMOVE(&lldev->lro_active, lro_session,
2599 xge_lro_entry_t, next);
2600 xge_lro_flush(lldev, lro_session);
2601 goto _exit;
2602 }
2603
2604 /* Handle timestamp option */
2605 if(tcp_options) {
2606 u32 tsval = ntohl(*(ptr + 1));
2607 if(__predict_false(lro_session->tsval > tsval ||
2608 *(ptr + 2) == 0)) {
2609 goto _exit;
2610 }
2611 lro_session->tsval = tsval;
2612 lro_session->tsecr = *(ptr + 2);
2613 }
2614
2615 lro_session->next_seq += tcp_data_len;
2616 lro_session->ack_seq = header_tcp->th_ack;
2617 lro_session->window = header_tcp->th_win;
2618
2619 /* If TCP data/payload is of 0 size, free mbuf */
2620 if(tcp_data_len == 0) {
2621 m_freem(m_head);
2622 status = XGE_HAL_OK;
2623 goto _exit;
2624 }
2625
2626 lro_session->append_cnt++;
2627 lro_session->len += tcp_data_len;
2628
2629 /* Adjust mbuf so that m_data points to payload than headers */
2630 m_adj(m_head, hlen);
2631
2632 /* Append this packet to LRO accumulated packet */
2633 lro_session->m_tail->m_next = m_head;
2634 lro_session->m_tail = buffer_tail;
2635
2636 /* Flush if LRO packet is exceeding maximum size */
2637 if(lro_session->len >
2638 (XGE_HAL_LRO_DEFAULT_FRM_LEN - lldev->ifnetp->if_mtu)) {
2639 SLIST_REMOVE(&lldev->lro_active, lro_session,
2640 xge_lro_entry_t, next);
2641 xge_lro_flush(lldev, lro_session);
2642 }
2643 status = XGE_HAL_OK;
2644 goto _exit;
2645 }
2646 }
2647
2648 if(SLIST_EMPTY(&lldev->lro_free))
2649 goto _exit;
2650
2651 /* Start a new LRO session */
2652 lro_session = SLIST_FIRST(&lldev->lro_free);
2653 SLIST_REMOVE_HEAD(&lldev->lro_free, next);
2654 SLIST_INSERT_HEAD(&lldev->lro_active, lro_session, next);
2655 lro_session->source_port = header_tcp->th_sport;
2656 lro_session->dest_port = header_tcp->th_dport;
2657 lro_session->source_ip = header_ip->ip_src.s_addr;
2658 lro_session->dest_ip = header_ip->ip_dst.s_addr;
2659 lro_session->next_seq = seq + tcp_data_len;
2660 lro_session->mss = tcp_data_len;
2661 lro_session->ack_seq = header_tcp->th_ack;
2662 lro_session->window = header_tcp->th_win;
2663
2664 lro_session->lro_header_ip = header_ip;
2665
2666 /* Handle timestamp option */
2667 if(tcp_options) {
2668 lro_session->timestamp = 1;
2669 lro_session->tsval = ntohl(*(ptr + 1));
2670 lro_session->tsecr = *(ptr + 2);
2671 }
2672
2673 lro_session->len = tot_len;
2674 lro_session->m_head = m_head;
2675 lro_session->m_tail = buffer_tail;
2676 status = XGE_HAL_OK;
2677
2678_exit:
2679 return status;
2680}
2681
2682/**
2683 * xge_accumulate_large_rx
2684 * Accumulate packets to form a large LRO packet based on various conditions
2685 *
2686 * @lldev Per-adapter Data
2687 * @pkt Current packet
2688 * @pkt_length Packet Length
2689 * @rxd_priv Rx Descriptor Private Data
2690 */
2691void
2692xge_accumulate_large_rx(xge_lldev_t *lldev, struct mbuf *pkt, int pkt_length,
2693 xge_rx_priv_t *rxd_priv)
2694{
2695 if(xge_lro_accumulate(lldev, pkt) != XGE_HAL_OK) {
2696 bus_dmamap_sync(lldev->dma_tag_rx, rxd_priv->dmainfo[0].dma_map,
2697 BUS_DMASYNC_POSTREAD);
2698 (*lldev->ifnetp->if_input)(lldev->ifnetp, pkt);
2699 }
2700}
2701
2702/**
2703 * xge_rx_compl
2704 * If the interrupt is due to received frame (Rx completion), send it up
2705 *
2706 * @channelh Ring Channel Handle
2707 * @dtr Current Descriptor
2708 * @t_code Transfer Code indicating success or error
2709 * @userdata Per-adapter Data
2710 *
2711 * Returns XGE_HAL_OK or HAL error enums
2712 */
2713xge_hal_status_e
2714xge_rx_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
2715 void *userdata)
2716{
2717 struct ifnet *ifnetp;
2718 xge_rx_priv_t *rxd_priv = NULL;
2719 mbuf_t mbuf_up = NULL;
2720 xge_hal_status_e status = XGE_HAL_OK;
2721 xge_hal_dtr_info_t ext_info;
2722 int index;
2723 u16 vlan_tag;
2724
2725 /*get the user data portion*/
2726 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
2727 if(!lldev) {
2728 XGE_EXIT_ON_ERR("Failed to get user data", _exit, XGE_HAL_FAIL);
2729 }
2730
2731 XGE_DRV_STATS(rx_completions);
2732
2733 /* get the interface pointer */
2734 ifnetp = lldev->ifnetp;
2735
2736 do {
2737 XGE_DRV_STATS(rx_desc_compl);
2738
2739 if(!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)) {
2740 status = XGE_HAL_FAIL;
2741 goto _exit;
2742 }
2743
2744 if(t_code) {
2745 xge_trace(XGE_TRACE, "Packet dropped because of %d", t_code);
2746 XGE_DRV_STATS(rx_tcode);
2747 xge_hal_device_handle_tcode(channelh, dtr, t_code);
2748 xge_hal_ring_dtr_post(channelh,dtr);
2749 continue;
2750 }
2751
2752 /* Get the private data for this descriptor*/
2753 rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh,
2754 dtr);
2755 if(!rxd_priv) {
2756 XGE_EXIT_ON_ERR("Failed to get descriptor private data", _exit,
2757 XGE_HAL_FAIL);
2758 }
2759
2760 /*
2761 * Prepare one buffer to send it to upper layer -- since the upper
2762 * layer frees the buffer do not use rxd_priv->buffer. Meanwhile
2763 * prepare a new buffer, do mapping, use it in the current
2764 * descriptor and post descriptor back to ring channel
2765 */
2766 mbuf_up = rxd_priv->bufferArray[0];
2767
2768 /* Gets details of mbuf i.e., packet length */
2769 xge_ring_dtr_get(mbuf_up, channelh, dtr, lldev, rxd_priv);
2770
2771 status =
2772 (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ?
2773 xge_get_buf(dtr, rxd_priv, lldev, 0) :
2774 xge_get_buf_3b_5b(dtr, rxd_priv, lldev);
2775
2776 if(status != XGE_HAL_OK) {
2777 xge_trace(XGE_ERR, "No memory");
2778 XGE_DRV_STATS(rx_no_buf);
2779
2780 /*
2781 * Unable to allocate buffer. Instead of discarding, post
2782 * descriptor back to channel for future processing of same
2783 * packet.
2784 */
2785 xge_hal_ring_dtr_post(channelh, dtr);
2786 continue;
2787 }
2788
2789 /* Get the extended information */
2790 xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
2791
2792 /*
2793 * As we have allocated a new mbuf for this descriptor, post this
2794 * descriptor with new mbuf back to ring channel
2795 */
2796 vlan_tag = ext_info.vlan;
2797 xge_hal_ring_dtr_post(channelh, dtr);
2798 if ((!(ext_info.proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) &&
2799 (ext_info.proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) &&
2800 (ext_info.l3_cksum == XGE_HAL_L3_CKSUM_OK) &&
2801 (ext_info.l4_cksum == XGE_HAL_L4_CKSUM_OK))) {
2802
2803 /* set Checksum Flag */
2804 xge_set_mbuf_cflags(mbuf_up);
2805
2806 if(lldev->enabled_lro) {
2807 xge_accumulate_large_rx(lldev, mbuf_up, mbuf_up->m_len,
2808 rxd_priv);
2809 }
2810 else {
2811 /* Post-Read sync for buffers*/
2812 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2813 bus_dmamap_sync(lldev->dma_tag_rx,
2814 rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
2815 }
2816 (*ifnetp->if_input)(ifnetp, mbuf_up);
2817 }
2818 }
2819 else {
2820 /*
2821 * Packet with erroneous checksum , let the upper layer deal
2822 * with it
2823 */
2824
2825 /* Post-Read sync for buffers*/
2826 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2827 bus_dmamap_sync(lldev->dma_tag_rx,
2828 rxd_priv->dmainfo[0].dma_map, BUS_DMASYNC_POSTREAD);
2829 }
2830
2831 if(vlan_tag) {
2832 mbuf_up->m_pkthdr.ether_vtag = vlan_tag;
2833 mbuf_up->m_flags |= M_VLANTAG;
2834 }
2835
2836 if(lldev->enabled_lro)
2837 xge_lro_flush_sessions(lldev);
2838
2839 (*ifnetp->if_input)(ifnetp, mbuf_up);
2840 }
2841 } while(xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code)
2842 == XGE_HAL_OK);
2843
2844 if(lldev->enabled_lro)
2845 xge_lro_flush_sessions(lldev);
2846
2847_exit:
2848 return status;
2849}
2850
2851/**
2852 * xge_ring_dtr_get
2853 * Get descriptors
2854 *
2855 * @mbuf_up Packet to send up
2856 * @channelh Ring Channel Handle
2857 * @dtr Descriptor
2858 * @lldev Per-adapter Data
2859 * @rxd_priv Rx Descriptor Private Data
2860 *
2861 * Returns XGE_HAL_OK or HAL error enums
2862 */
2863int
2864xge_ring_dtr_get(mbuf_t mbuf_up, xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
2865 xge_lldev_t *lldev, xge_rx_priv_t *rxd_priv)
2866{
2867 mbuf_t m;
2868 int pkt_length[5]={0,0}, pkt_len=0;
2869 dma_addr_t dma_data[5];
2870 int index;
2871
2872 m = mbuf_up;
2873 pkt_len = 0;
2874
2875 if(lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
2876 xge_os_memzero(pkt_length, sizeof(pkt_length));
2877
2878 /*
2879 * Retrieve data of interest from the completed descriptor -- This
2880 * returns the packet length
2881 */
2882 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
2883 xge_hal_ring_dtr_5b_get(channelh, dtr, dma_data, pkt_length);
2884 }
2885 else {
2886 xge_hal_ring_dtr_3b_get(channelh, dtr, dma_data, pkt_length);
2887 }
2888
2889 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
2890 m->m_len = pkt_length[index];
2891
2892 if(index < (lldev->rxd_mbuf_cnt-1)) {
2893 m->m_next = rxd_priv->bufferArray[index + 1];
2894 m = m->m_next;
2895 }
2896 else {
2897 m->m_next = NULL;
2898 }
2899 pkt_len+=pkt_length[index];
2900 }
2901
2902 /*
2903 * Since 2 buffer mode is an exceptional case where data is in 3rd
2904 * buffer but not in 2nd buffer
2905 */
2906 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
2907 m->m_len = pkt_length[2];
2908 pkt_len+=pkt_length[2];
2909 }
2910
2911 /*
2912 * Update length of newly created buffer to be sent up with packet
2913 * length
2914 */
2915 mbuf_up->m_pkthdr.len = pkt_len;
2916 }
2917 else {
2918 /*
2919 * Retrieve data of interest from the completed descriptor -- This
2920 * returns the packet length
2921 */
2922 xge_hal_ring_dtr_1b_get(channelh, dtr,&dma_data[0], &pkt_length[0]);
2923
2924 /*
2925 * Update length of newly created buffer to be sent up with packet
2926 * length
2927 */
2928 mbuf_up->m_len = mbuf_up->m_pkthdr.len = pkt_length[0];
2929 }
2930
2931 return XGE_HAL_OK;
2932}
2933
2934/**
2935 * xge_flush_txds
2936 * Flush Tx descriptors
2937 *
2938 * @channelh Channel handle
2939 */
2940static void inline
2941xge_flush_txds(xge_hal_channel_h channelh)
2942{
2943 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
2944 xge_hal_dtr_h tx_dtr;
2945 xge_tx_priv_t *tx_priv;
2946 u8 t_code;
2947
2948 while(xge_hal_fifo_dtr_next_completed(channelh, &tx_dtr, &t_code)
2949 == XGE_HAL_OK) {
2950 XGE_DRV_STATS(tx_desc_compl);
2951 if(t_code) {
2952 xge_trace(XGE_TRACE, "Tx descriptor with t_code %d", t_code);
2953 XGE_DRV_STATS(tx_tcode);
2954 xge_hal_device_handle_tcode(channelh, tx_dtr, t_code);
2955 }
2956
2957 tx_priv = xge_hal_fifo_dtr_private(tx_dtr);
2958 bus_dmamap_unload(lldev->dma_tag_tx, tx_priv->dma_map);
2959 m_freem(tx_priv->buffer);
2960 tx_priv->buffer = NULL;
2961 xge_hal_fifo_dtr_free(channelh, tx_dtr);
2962 }
2963}
2964
2965/**
2966 * xge_send
2967 * Transmit function
2968 *
2969 * @ifnetp Interface Handle
2970 */
2971void
2972xge_send(struct ifnet *ifnetp)
2973{
2974 int qindex = 0;
2975 xge_lldev_t *lldev = ifnetp->if_softc;
2976
2977 for(qindex = 0; qindex < XGE_FIFO_COUNT; qindex++) {
2978 if(mtx_trylock(&lldev->mtx_tx[qindex]) == 0) {
2979 XGE_DRV_STATS(tx_lock_fail);
2980 break;
2981 }
2982 xge_send_locked(ifnetp, qindex);
2983 mtx_unlock(&lldev->mtx_tx[qindex]);
2984 }
2985}
2986
2987static void inline
2988xge_send_locked(struct ifnet *ifnetp, int qindex)
2989{
2990 xge_hal_dtr_h dtr;
2991 static bus_dma_segment_t segs[XGE_MAX_SEGS];
2992 xge_hal_status_e status;
2993 unsigned int max_fragments;
2994 xge_lldev_t *lldev = ifnetp->if_softc;
2995 xge_hal_channel_h channelh = lldev->fifo_channel[qindex];
2996 mbuf_t m_head = NULL;
2997 mbuf_t m_buf = NULL;
2998 xge_tx_priv_t *ll_tx_priv = NULL;
2999 register unsigned int count = 0;
3000 unsigned int nsegs = 0;
3001 u16 vlan_tag;
3002
3003 max_fragments = ((xge_hal_fifo_t *)channelh)->config->max_frags;
3004
3005 /* If device is not initialized, return */
3006 if((!lldev->initialized) || (!(ifnetp->if_drv_flags & IFF_DRV_RUNNING)))
3007 return;
3008
3009 XGE_DRV_STATS(tx_calls);
3010
3011 /*
3012 * This loop will be executed for each packet in the kernel maintained
3013 * queue -- each packet can be with fragments as an mbuf chain
3014 */
3015 for(;;) {
3016 IF_DEQUEUE(&ifnetp->if_snd, m_head);
3017 if (m_head == NULL) {
3018 ifnetp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
3019 return;
3020 }
3021
3022 for(m_buf = m_head; m_buf != NULL; m_buf = m_buf->m_next) {
3023 if(m_buf->m_len) count += 1;
3024 }
3025
3026 if(count >= max_fragments) {
3027 m_buf = m_defrag(m_head, M_DONTWAIT);
3028 if(m_buf != NULL) m_head = m_buf;
3029 XGE_DRV_STATS(tx_defrag);
3030 }
3031
3032 /* Reserve descriptors */
3033 status = xge_hal_fifo_dtr_reserve(channelh, &dtr);
3034 if(status != XGE_HAL_OK) {
3035 XGE_DRV_STATS(tx_no_txd);
3036 xge_flush_txds(channelh);
3037 break;
3038 }
3039
3040 vlan_tag =
3041 (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0;
3042 xge_hal_fifo_dtr_vlan_set(dtr, vlan_tag);
3043
3044 /* Update Tx private structure for this descriptor */
3045 ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3046 ll_tx_priv->buffer = m_head;
3047
3048 /*
3049 * Do mapping -- Required DMA tag has been created in xge_init
3050 * function and DMA maps have already been created in the
3051 * xgell_tx_replenish function.
3052 * Returns number of segments through nsegs
3053 */
3054 if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_tx,
3055 ll_tx_priv->dma_map, m_head, segs, &nsegs, BUS_DMA_NOWAIT)) {
3056 xge_trace(XGE_TRACE, "DMA map load failed");
3057 XGE_DRV_STATS(tx_map_fail);
3058 break;
3059 }
3060
3061 if(lldev->driver_stats.tx_max_frags < nsegs)
3062 lldev->driver_stats.tx_max_frags = nsegs;
3063
3064 /* Set descriptor buffer for header and each fragment/segment */
3065 count = 0;
3066 do {
3067 xge_hal_fifo_dtr_buffer_set(channelh, dtr, count,
3068 (dma_addr_t)htole64(segs[count].ds_addr),
3069 segs[count].ds_len);
3070 count++;
3071 } while(count < nsegs);
3072
3073 /* Pre-write Sync of mapping */
3074 bus_dmamap_sync(lldev->dma_tag_tx, ll_tx_priv->dma_map,
3075 BUS_DMASYNC_PREWRITE);
3076
3077 if((lldev->enabled_tso) &&
3078 (m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3079 XGE_DRV_STATS(tx_tso);
3080 xge_hal_fifo_dtr_mss_set(dtr, m_head->m_pkthdr.tso_segsz);
3081 }
3082
3083 /* Checksum */
3084 if(ifnetp->if_hwassist > 0) {
3085 xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_IPV4_EN
3086 | XGE_HAL_TXD_TX_CKO_TCP_EN | XGE_HAL_TXD_TX_CKO_UDP_EN);
3087 }
3088
3089 /* Post descriptor to FIFO channel */
3090 xge_hal_fifo_dtr_post(channelh, dtr);
3091 XGE_DRV_STATS(tx_posted);
3092
3093 /* Send the same copy of mbuf packet to BPF (Berkely Packet Filter)
3094 * listener so that we can use tools like tcpdump */
3095 ETHER_BPF_MTAP(ifnetp, m_head);
3096 }
3097
3098 /* Prepend the packet back to queue */
3099 IF_PREPEND(&ifnetp->if_snd, m_head);
3100 ifnetp->if_drv_flags |= IFF_DRV_OACTIVE;
3101
3102 xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
3103 XGE_LL_EVENT_TRY_XMIT_AGAIN, lldev->devh);
3104 XGE_DRV_STATS(tx_again);
3105}
3106
3107/**
3108 * xge_get_buf
3109 * Allocates new mbufs to be placed into descriptors
3110 *
3111 * @dtrh Descriptor Handle
3112 * @rxd_priv Rx Descriptor Private Data
3113 * @lldev Per-adapter Data
3114 * @index Buffer Index (if multi-buffer mode)
3115 *
3116 * Returns XGE_HAL_OK or HAL error enums
3117 */
3118int
3119xge_get_buf(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv,
3120 xge_lldev_t *lldev, int index)
3121{
3122 register mbuf_t mp = NULL;
3123 struct ifnet *ifnetp = lldev->ifnetp;
3124 int status = XGE_HAL_OK;
3125 int buffer_size = 0, cluster_size = 0, count;
3126 bus_dmamap_t map = rxd_priv->dmainfo[index].dma_map;
3127 bus_dma_segment_t segs[3];
3128
3129 buffer_size = (lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) ?
3130 ifnetp->if_mtu + XGE_HAL_MAC_HEADER_MAX_SIZE :
3131 lldev->rxd_mbuf_len[index];
3132
3133 if(buffer_size <= MCLBYTES) {
3134 cluster_size = MCLBYTES;
3135 mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3136 }
3137 else {
3138 cluster_size = MJUMPAGESIZE;
3139 if((lldev->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5) &&
3140 (buffer_size > MJUMPAGESIZE)) {
3141 cluster_size = MJUM9BYTES;
3142 }
3143 mp = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, cluster_size);
3144 }
3145 if(!mp) {
3146 xge_trace(XGE_ERR, "Out of memory to allocate mbuf");
3147 status = XGE_HAL_FAIL;
3148 goto getbuf_out;
3149 }
3150
3151 /* Update mbuf's length, packet length and receive interface */
3152 mp->m_len = mp->m_pkthdr.len = buffer_size;
3153 mp->m_pkthdr.rcvif = ifnetp;
3154
3155 /* Load DMA map */
3156 if(bus_dmamap_load_mbuf_sg(lldev->dma_tag_rx, lldev->extra_dma_map,
3157 mp, segs, &count, BUS_DMA_NOWAIT)) {
3158 XGE_DRV_STATS(rx_map_fail);
3159 m_freem(mp);
3160 XGE_EXIT_ON_ERR("DMA map load failed", getbuf_out, XGE_HAL_FAIL);
3161 }
3162
3163 /* Update descriptor private data */
3164 rxd_priv->bufferArray[index] = mp;
3165 rxd_priv->dmainfo[index].dma_phyaddr = htole64(segs->ds_addr);
3166 rxd_priv->dmainfo[index].dma_map = lldev->extra_dma_map;
3167 lldev->extra_dma_map = map;
3168
3169 /* Pre-Read/Write sync */
3170 bus_dmamap_sync(lldev->dma_tag_rx, map, BUS_DMASYNC_POSTREAD);
3171
3172 /* Unload DMA map of mbuf in current descriptor */
3173 bus_dmamap_unload(lldev->dma_tag_rx, map);
3174
3175 /* Set descriptor buffer */
3176 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3177 xge_hal_ring_dtr_1b_set(dtrh, rxd_priv->dmainfo[0].dma_phyaddr,
3178 cluster_size);
3179 }
3180
3181getbuf_out:
3182 return status;
3183}
3184
3185/**
3186 * xge_get_buf_3b_5b
3187 * Allocates new mbufs to be placed into descriptors (in multi-buffer modes)
3188 *
3189 * @dtrh Descriptor Handle
3190 * @rxd_priv Rx Descriptor Private Data
3191 * @lldev Per-adapter Data
3192 *
3193 * Returns XGE_HAL_OK or HAL error enums
3194 */
3195int
3196xge_get_buf_3b_5b(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv,
3197 xge_lldev_t *lldev)
3198{
3199 bus_addr_t dma_pointers[5];
3200 int dma_sizes[5];
3201 int status = XGE_HAL_OK, index;
3202 int newindex = 0;
3203
3204 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
3205 status = xge_get_buf(dtrh, rxd_priv, lldev, index);
3206 if(status != XGE_HAL_OK) {
3207 for(newindex = 0; newindex < index; newindex++) {
3208 m_freem(rxd_priv->bufferArray[newindex]);
3209 }
3210 XGE_EXIT_ON_ERR("mbuf allocation failed", _exit, status);
3211 }
3212 }
3213
3214 for(index = 0; index < lldev->buffer_mode; index++) {
3215 if(lldev->rxd_mbuf_len[index] != 0) {
3216 dma_pointers[index] = rxd_priv->dmainfo[index].dma_phyaddr;
3217 dma_sizes[index] = lldev->rxd_mbuf_len[index];
3218 }
3219 else {
3220 dma_pointers[index] = rxd_priv->dmainfo[index-1].dma_phyaddr;
3221 dma_sizes[index] = 1;
3222 }
3223 }
3224
3225 /* Assigning second buffer to third pointer in 2 buffer mode */
3226 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_2) {
3227 dma_pointers[2] = dma_pointers[1];
3228 dma_sizes[2] = dma_sizes[1];
3229 dma_sizes[1] = 1;
3230 }
3231
3232 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
3233 xge_hal_ring_dtr_5b_set(dtrh, dma_pointers, dma_sizes);
3234 }
3235 else {
3236 xge_hal_ring_dtr_3b_set(dtrh, dma_pointers, dma_sizes);
3237 }
3238
3239_exit:
3240 return status;
3241}
3242
3243/**
3244 * xge_tx_compl
3245 * If the interrupt is due to Tx completion, free the sent buffer
3246 *
3247 * @channelh Channel Handle
3248 * @dtr Descriptor
3249 * @t_code Transfer Code indicating success or error
3250 * @userdata Per-adapter Data
3251 *
3252 * Returns XGE_HAL_OK or HAL error enum
3253 */
3254xge_hal_status_e
3255xge_tx_compl(xge_hal_channel_h channelh,
3256 xge_hal_dtr_h dtr, u8 t_code, void *userdata)
3257{
3258 xge_tx_priv_t *ll_tx_priv = NULL;
3259 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
3260 struct ifnet *ifnetp = lldev->ifnetp;
3261 mbuf_t m_buffer = NULL;
3262 int qindex = xge_hal_channel_id(channelh);
3263
3264 mtx_lock(&lldev->mtx_tx[qindex]);
3265
3266 XGE_DRV_STATS(tx_completions);
3267
3268 /*
3269 * For each completed descriptor: Get private structure, free buffer,
3270 * do unmapping, and free descriptor
3271 */
3272 do {
3273 XGE_DRV_STATS(tx_desc_compl);
3274
3275 if(t_code) {
3276 XGE_DRV_STATS(tx_tcode);
3277 xge_trace(XGE_TRACE, "t_code %d", t_code);
3278 xge_hal_device_handle_tcode(channelh, dtr, t_code);
3279 }
3280
3281 ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3282 m_buffer = ll_tx_priv->buffer;
3283 bus_dmamap_unload(lldev->dma_tag_tx, ll_tx_priv->dma_map);
3284 m_freem(m_buffer);
3285 ll_tx_priv->buffer = NULL;
3286 xge_hal_fifo_dtr_free(channelh, dtr);
3287 } while(xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code)
3288 == XGE_HAL_OK);
3289 xge_send_locked(ifnetp, qindex);
3290 ifnetp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3291
3292 mtx_unlock(&lldev->mtx_tx[qindex]);
3293
3294 return XGE_HAL_OK;
3295}
3296
3297/**
3298 * xge_tx_initial_replenish
3299 * Initially allocate buffers and set them into descriptors for later use
3300 *
3301 * @channelh Tx Channel Handle
3302 * @dtrh Descriptor Handle
3303 * @index
3304 * @userdata Per-adapter Data
3305 * @reopen Channel open/reopen option
3306 *
3307 * Returns XGE_HAL_OK or HAL error enums
3308 */
3309xge_hal_status_e
3310xge_tx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3311 int index, void *userdata, xge_hal_channel_reopen_e reopen)
3312{
3313 xge_tx_priv_t *txd_priv = NULL;
3314 int status = XGE_HAL_OK;
3315
3316 /* Get the user data portion from channel handle */
3317 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
3318 if(lldev == NULL) {
3319 XGE_EXIT_ON_ERR("Failed to get user data from channel", txinit_out,
3320 XGE_HAL_FAIL);
3321 }
3322
3323 /* Get the private data */
3324 txd_priv = (xge_tx_priv_t *) xge_hal_fifo_dtr_private(dtrh);
3325 if(txd_priv == NULL) {
3326 XGE_EXIT_ON_ERR("Failed to get descriptor private data", txinit_out,
3327 XGE_HAL_FAIL);
3328 }
3329
3330 /* Create DMA map for this descriptor */
3331 if(bus_dmamap_create(lldev->dma_tag_tx, BUS_DMA_NOWAIT,
3332 &txd_priv->dma_map)) {
3333 XGE_EXIT_ON_ERR("DMA map creation for Tx descriptor failed",
3334 txinit_out, XGE_HAL_FAIL);
3335 }
3336
3337txinit_out:
3338 return status;
3339}
3340
3341/**
3342 * xge_rx_initial_replenish
3343 * Initially allocate buffers and set them into descriptors for later use
3344 *
3345 * @channelh Tx Channel Handle
3346 * @dtrh Descriptor Handle
3347 * @index Ring Index
3348 * @userdata Per-adapter Data
3349 * @reopen Channel open/reopen option
3350 *
3351 * Returns XGE_HAL_OK or HAL error enums
3352 */
3353xge_hal_status_e
3354xge_rx_initial_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3355 int index, void *userdata, xge_hal_channel_reopen_e reopen)
3356{
3357 xge_rx_priv_t *rxd_priv = NULL;
3358 int status = XGE_HAL_OK;
3359 int index1 = 0, index2 = 0;
3360
3361 /* Get the user data portion from channel handle */
3362 xge_lldev_t *lldev = xge_hal_channel_userdata(channelh);
3363 if(lldev == NULL) {
3364 XGE_EXIT_ON_ERR("Failed to get user data from channel", rxinit_out,
3365 XGE_HAL_FAIL);
3366 }
3367
3368 /* Get the private data */
3369 rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
3370 if(rxd_priv == NULL) {
3371 XGE_EXIT_ON_ERR("Failed to get descriptor private data", rxinit_out,
3372 XGE_HAL_FAIL);
3373 }
3374
3375 rxd_priv->bufferArray = xge_os_malloc(NULL,
3376 (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3377
3378 if(rxd_priv->bufferArray == NULL) {
3379 XGE_EXIT_ON_ERR("Failed to allocate Rxd private", rxinit_out,
3380 XGE_HAL_FAIL);
3381 }
3382
3383 if(lldev->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_1) {
3384 /* Create DMA map for these descriptors*/
3385 if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT,
3386 &rxd_priv->dmainfo[0].dma_map)) {
3387 XGE_EXIT_ON_ERR("DMA map creation for Rx descriptor failed",
3388 rxinit_err_out, XGE_HAL_FAIL);
3389 }
3390 /* Get a buffer, attach it to this descriptor */
3391 status = xge_get_buf(dtrh, rxd_priv, lldev, 0);
3392 }
3393 else {
3394 for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
3395 /* Create DMA map for this descriptor */
3396 if(bus_dmamap_create(lldev->dma_tag_rx , BUS_DMA_NOWAIT ,
3397 &rxd_priv->dmainfo[index1].dma_map)) {
3398 for(index2 = index1 - 1; index2 >= 0; index2--) {
3399 bus_dmamap_destroy(lldev->dma_tag_rx,
3400 rxd_priv->dmainfo[index2].dma_map);
3401 }
3402 XGE_EXIT_ON_ERR(
3403 "Jumbo DMA map creation for Rx descriptor failed",
3404 rxinit_err_out, XGE_HAL_FAIL);
3405 }
3406 }
3407 status = xge_get_buf_3b_5b(dtrh, rxd_priv, lldev);
3408 }
3409
3410 if(status != XGE_HAL_OK) {
3411 for(index1 = 0; index1 < lldev->rxd_mbuf_cnt; index1++) {
3412 bus_dmamap_destroy(lldev->dma_tag_rx,
3413 rxd_priv->dmainfo[index1].dma_map);
3414 }
3415 goto rxinit_err_out;
3416 }
3417 else {
3418 goto rxinit_out;
3419 }
3420
3421rxinit_err_out:
3422 xge_os_free(NULL, rxd_priv->bufferArray,
3423 (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3424rxinit_out:
3425 return status;
3426}
3427
3428/**
3429 * xge_rx_term
3430 * During unload terminate and free all descriptors
3431 *
3432 * @channelh Rx Channel Handle
3433 * @dtrh Rx Descriptor Handle
3434 * @state Descriptor State
3435 * @userdata Per-adapter Data
3436 * @reopen Channel open/reopen option
3437 */
3438void
3439xge_rx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
3440 xge_hal_dtr_state_e state, void *userdata,
3441 xge_hal_channel_reopen_e reopen)
3442{
3443 xge_rx_priv_t *rxd_priv = NULL;
3444 xge_lldev_t *lldev = NULL;
3445 int index = 0;
3446
3447 /* Descriptor state is not "Posted" */
3448 if(state != XGE_HAL_DTR_STATE_POSTED) goto rxterm_out;
3449
3450 /* Get the user data portion */
3451 lldev = xge_hal_channel_userdata(channelh);
3452
3453 /* Get the private data */
3454 rxd_priv = (xge_rx_priv_t *) xge_hal_ring_dtr_private(channelh, dtrh);
3455
3456 for(index = 0; index < lldev->rxd_mbuf_cnt; index++) {
3457 if(rxd_priv->dmainfo[index].dma_map != NULL) {
3458 bus_dmamap_sync(lldev->dma_tag_rx,
3459 rxd_priv->dmainfo[index].dma_map, BUS_DMASYNC_POSTREAD);
3460 bus_dmamap_unload(lldev->dma_tag_rx,
3461 rxd_priv->dmainfo[index].dma_map);
3462 if(rxd_priv->bufferArray[index] != NULL)
3463 m_free(rxd_priv->bufferArray[index]);
3464 bus_dmamap_destroy(lldev->dma_tag_rx,
3465 rxd_priv->dmainfo[index].dma_map);
3466 }
3467 }
3468 xge_os_free(NULL, rxd_priv->bufferArray,
3469 (sizeof(rxd_priv->bufferArray) * lldev->rxd_mbuf_cnt));
3470
3471 /* Free the descriptor */
3472 xge_hal_ring_dtr_free(channelh, dtrh);
3473
3474rxterm_out:
3475 return;
3476}
3477
3478/**
3479 * xge_tx_term
3480 * During unload terminate and free all descriptors
3481 *
3482 * @channelh Rx Channel Handle
3483 * @dtrh Rx Descriptor Handle
3484 * @state Descriptor State
3485 * @userdata Per-adapter Data
3486 * @reopen Channel open/reopen option
3487 */
3488void
3489xge_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
3490 xge_hal_dtr_state_e state, void *userdata,
3491 xge_hal_channel_reopen_e reopen)
3492{
3493 xge_tx_priv_t *ll_tx_priv = xge_hal_fifo_dtr_private(dtr);
3494 xge_lldev_t *lldev = (xge_lldev_t *)userdata;
3495
3496 /* Destroy DMA map */
3497 bus_dmamap_destroy(lldev->dma_tag_tx, ll_tx_priv->dma_map);
3498}
3499
3500/**
3501 * xge_methods
3502 *
3503 * FreeBSD device interface entry points
3504 */
3505static device_method_t xge_methods[] = {
3506 DEVMETHOD(device_probe, xge_probe),
3507 DEVMETHOD(device_attach, xge_attach),
3508 DEVMETHOD(device_detach, xge_detach),
3509 DEVMETHOD(device_shutdown, xge_shutdown),
3510 {0, 0}
3511};
3512
3513static driver_t xge_driver = {
3514 "nxge",
3515 xge_methods,
3516 sizeof(xge_lldev_t),
3517};
3518static devclass_t xge_devclass;
3519DRIVER_MODULE(nxge, pci, xge_driver, xge_devclass, 0, 0);
3520