xgehal-ring.c revision 330897
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2002-2007 Neterion, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD: stable/11/sys/dev/nxge/xgehal/xgehal-ring.c 330897 2018-03-14 03:19:51Z eadler $
29 */
30
31#include <dev/nxge/include/xgehal-ring.h>
32#include <dev/nxge/include/xgehal-device.h>
33
34#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
35static ptrdiff_t
36__hal_ring_item_dma_offset(xge_hal_mempool_h mempoolh,
37	           void *item)
38{
39	int memblock_idx;
40	void *memblock;
41
42	/* get owner memblock index */
43	memblock_idx = __hal_ring_block_memblock_idx(item);
44
45	/* get owner memblock by memblock index */
46	memblock = __hal_mempool_memblock(mempoolh, memblock_idx);
47
48	return (char*)item - (char*)memblock;
49}
50#endif
51
52static dma_addr_t
53__hal_ring_item_dma_addr(xge_hal_mempool_h mempoolh, void *item,
54	    pci_dma_h *dma_handle)
55{
56	int memblock_idx;
57	void *memblock;
58	xge_hal_mempool_dma_t *memblock_dma_object;
59	ptrdiff_t dma_item_offset;
60
61	/* get owner memblock index */
62	memblock_idx = __hal_ring_block_memblock_idx((xge_hal_ring_block_t *) item);
63
64	/* get owner memblock by memblock index */
65	memblock = __hal_mempool_memblock((xge_hal_mempool_t *) mempoolh,
66	                                    memblock_idx);
67
68	/* get memblock DMA object by memblock index */
69	memblock_dma_object =
70	    __hal_mempool_memblock_dma((xge_hal_mempool_t *) mempoolh,
71	                                memblock_idx);
72
73	/* calculate offset in the memblock of this item */
74	dma_item_offset = (char*)item - (char*)memblock;
75
76	*dma_handle = memblock_dma_object->handle;
77
78	return memblock_dma_object->addr + dma_item_offset;
79}
80
81static void
82__hal_ring_rxdblock_link(xge_hal_mempool_h mempoolh,
83	         xge_hal_ring_t *ring, int from, int to)
84{
85	xge_hal_ring_block_t *to_item, *from_item;
86	dma_addr_t to_dma, from_dma;
87	pci_dma_h to_dma_handle, from_dma_handle;
88
89	/* get "from" RxD block */
90	from_item = (xge_hal_ring_block_t *)
91	            __hal_mempool_item((xge_hal_mempool_t *) mempoolh, from);
92	xge_assert(from_item);
93
94	/* get "to" RxD block */
95	to_item = (xge_hal_ring_block_t *)
96	          __hal_mempool_item((xge_hal_mempool_t *) mempoolh, to);
97	xge_assert(to_item);
98
99	/* return address of the beginning of previous RxD block */
100	to_dma = __hal_ring_item_dma_addr(mempoolh, to_item, &to_dma_handle);
101
102	/* set next pointer for this RxD block to point on
103	 * previous item's DMA start address */
104	__hal_ring_block_next_pointer_set(from_item, to_dma);
105
106	/* return "from" RxD block's DMA start address */
107	from_dma =
108	    __hal_ring_item_dma_addr(mempoolh, from_item, &from_dma_handle);
109
110#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
111	/* we must sync "from" RxD block, so hardware will see it */
112	xge_os_dma_sync(ring->channel.pdev,
113	              from_dma_handle,
114	          from_dma + XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
115	          __hal_ring_item_dma_offset(mempoolh, from_item) +
116	                XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
117	          sizeof(u64),
118	          XGE_OS_DMA_DIR_TODEVICE);
119#endif
120
121	xge_debug_ring(XGE_TRACE, "block%d:0x"XGE_OS_LLXFMT" => block%d:0x"XGE_OS_LLXFMT,
122	    from, (unsigned long long)from_dma, to,
123	    (unsigned long long)to_dma);
124}
125
126static xge_hal_status_e
127__hal_ring_mempool_item_alloc(xge_hal_mempool_h mempoolh,
128	              void *memblock,
129	              int memblock_index,
130	              xge_hal_mempool_dma_t *dma_object,
131	              void *item,
132	              int index,
133	              int is_last,
134	              void *userdata)
135{
136	int i;
137	xge_hal_ring_t *ring = (xge_hal_ring_t *)userdata;
138
139	xge_assert(item);
140	xge_assert(ring);
141
142
143	/* format rxds array */
144	for (i=ring->rxds_per_block-1; i>=0; i--) {
145	    void *rxdblock_priv;
146	    xge_hal_ring_rxd_priv_t *rxd_priv;
147	    xge_hal_ring_rxd_1_t *rxdp;
148	    int reserve_index = index * ring->rxds_per_block + i;
149	    int memblock_item_idx;
150
151	    ring->reserved_rxds_arr[reserve_index] = (char *)item +
152	            (ring->rxds_per_block - 1 - i) * ring->rxd_size;
153
154	    /* Note: memblock_item_idx is index of the item within
155	     *       the memblock. For instance, in case of three RxD-blocks
156	     *       per memblock this value can be 0,1 or 2. */
157	    rxdblock_priv =
158	        __hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh,
159	                                memblock_index, item,
160	                                &memblock_item_idx);
161	    rxdp = (xge_hal_ring_rxd_1_t *)
162	        ring->reserved_rxds_arr[reserve_index];
163	    rxd_priv = (xge_hal_ring_rxd_priv_t *) (void *)
164	        ((char*)rxdblock_priv + ring->rxd_priv_size * i);
165
166	    /* pre-format per-RxD Ring's private */
167	    rxd_priv->dma_offset = (char*)rxdp - (char*)memblock;
168	    rxd_priv->dma_addr = dma_object->addr +  rxd_priv->dma_offset;
169	    rxd_priv->dma_handle = dma_object->handle;
170#ifdef XGE_DEBUG_ASSERT
171	    rxd_priv->dma_object = dma_object;
172#endif
173
174	    /* pre-format Host_Control */
175#if defined(XGE_HAL_USE_5B_MODE)
176	    if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
177	        xge_hal_ring_rxd_5_t *rxdp_5 = (xge_hal_ring_rxd_5_t *)rxdp;
178#if defined(XGE_OS_PLATFORM_64BIT)
179	        xge_assert(memblock_index <= 0xFFFF);
180	        xge_assert(i <= 0xFFFF);
181	        /* store memblock's index */
182	        rxdp_5->host_control = (u32)memblock_index << 16;
183	        /* store index of memblock's private */
184	        rxdp_5->host_control |= (u32)(memblock_item_idx *
185	                        ring->rxds_per_block + i);
186#else
187	        /* 32-bit case */
188	        rxdp_5->host_control = (u32)rxd_priv;
189#endif
190	    } else {
191	        /* 1b and 3b modes */
192	        rxdp->host_control = (u64)(ulong_t)rxd_priv;
193	    }
194#else
195	    /* 1b and 3b modes */
196	    rxdp->host_control = (u64)(ulong_t)rxd_priv;
197#endif
198	}
199
200	__hal_ring_block_memblock_idx_set((xge_hal_ring_block_t *) item, memblock_index);
201
202	if (is_last) {
203	    /* link last one with first one */
204	    __hal_ring_rxdblock_link(mempoolh, ring, 0, index);
205	}
206
207	if (index > 0 ) {
208	     /* link this RxD block with previous one */
209	    __hal_ring_rxdblock_link(mempoolh, ring, index, index-1);
210	}
211
212	return XGE_HAL_OK;
213}
214
215 xge_hal_status_e
216__hal_ring_initial_replenish(xge_hal_channel_t *channel,
217	             xge_hal_channel_reopen_e reopen)
218{
219	xge_hal_dtr_h dtr = NULL;
220
221	while (xge_hal_channel_dtr_count(channel) > 0) {
222	    xge_hal_status_e status;
223
224	    status = xge_hal_ring_dtr_reserve(channel, &dtr);
225	    xge_assert(status == XGE_HAL_OK);
226
227	    if (channel->dtr_init) {
228	        status = channel->dtr_init(channel,
229	                                    dtr, channel->reserve_length,
230	                                    channel->userdata,
231	                reopen);
232	        if (status != XGE_HAL_OK) {
233	            xge_hal_ring_dtr_free(channel, dtr);
234	            xge_hal_channel_abort(channel,
235	                XGE_HAL_CHANNEL_OC_NORMAL);
236	            return status;
237	        }
238	    }
239
240	    xge_hal_ring_dtr_post(channel, dtr);
241	}
242
243	return XGE_HAL_OK;
244}
245
246xge_hal_status_e
247__hal_ring_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
248{
249	xge_hal_status_e status;
250	xge_hal_device_t *hldev;
251	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
252	xge_hal_ring_queue_t *queue;
253
254
255	/* Note: at this point we have channel.devh and channel.pdev
256	 *       pre-set only! */
257
258	hldev = (xge_hal_device_t *)ring->channel.devh;
259	ring->config = &hldev->config.ring;
260	queue = &ring->config->queue[attr->post_qid];
261	ring->indicate_max_pkts = queue->indicate_max_pkts;
262	ring->buffer_mode = queue->buffer_mode;
263
264	xge_assert(queue->configured);
265
266#if defined(XGE_HAL_RX_MULTI_RESERVE)
267	xge_os_spin_lock_init(&ring->channel.reserve_lock, hldev->pdev);
268#elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
269	xge_os_spin_lock_init_irq(&ring->channel.reserve_lock, hldev->irqh);
270#endif
271#if defined(XGE_HAL_RX_MULTI_POST)
272	xge_os_spin_lock_init(&ring->channel.post_lock, hldev->pdev);
273#elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
274	xge_os_spin_lock_init_irq(&ring->channel.post_lock, hldev->irqh);
275#endif
276
277	ring->rxd_size = XGE_HAL_RING_RXD_SIZEOF(queue->buffer_mode);
278	ring->rxd_priv_size =
279	    sizeof(xge_hal_ring_rxd_priv_t) + attr->per_dtr_space;
280
281	/* how many RxDs can fit into one block. Depends on configured
282	 * buffer_mode. */
283	ring->rxds_per_block = XGE_HAL_RING_RXDS_PER_BLOCK(queue->buffer_mode);
284
285	/* calculate actual RxD block private size */
286	ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
287
288	ring->reserved_rxds_arr = (void **) xge_os_malloc(ring->channel.pdev,
289	          sizeof(void*) * queue->max * ring->rxds_per_block);
290
291	if (ring->reserved_rxds_arr == NULL) {
292	    __hal_ring_close(channelh);
293	    return XGE_HAL_ERR_OUT_OF_MEMORY;
294	}
295
296	ring->mempool = __hal_mempool_create(
297	                 hldev->pdev,
298	                 ring->config->memblock_size,
299	                 XGE_HAL_RING_RXDBLOCK_SIZE,
300	                 ring->rxdblock_priv_size,
301	                 queue->initial, queue->max,
302	                 __hal_ring_mempool_item_alloc,
303	                 NULL, /* nothing to free */
304	                 ring);
305	if (ring->mempool == NULL) {
306	    __hal_ring_close(channelh);
307	    return XGE_HAL_ERR_OUT_OF_MEMORY;
308	}
309
310	status = __hal_channel_initialize(channelh,
311	                  attr,
312	                  ring->reserved_rxds_arr,
313	                  queue->initial * ring->rxds_per_block,
314	                  queue->max * ring->rxds_per_block,
315	                  0 /* no threshold for ring! */);
316	if (status != XGE_HAL_OK) {
317	    __hal_ring_close(channelh);
318	    return status;
319	}
320
321	/* sanity check that everything formatted ok */
322	xge_assert(ring->reserved_rxds_arr[0] ==
323	        (char *)ring->mempool->items_arr[0] +
324	          (ring->rxds_per_block * ring->rxd_size - ring->rxd_size));
325
326	    /* Note:
327	 * Specifying dtr_init callback means two things:
328	 * 1) dtrs need to be initialized by ULD at channel-open time;
329	 * 2) dtrs need to be posted at channel-open time
330	 *    (that's what the initial_replenish() below does)
331	 * Currently we don't have a case when the 1) is done without the 2).
332	 */
333	if (ring->channel.dtr_init) {
334	    if ((status = __hal_ring_initial_replenish (
335	                    (xge_hal_channel_t *) channelh,
336	                    XGE_HAL_CHANNEL_OC_NORMAL) )
337	                    != XGE_HAL_OK) {
338	        __hal_ring_close(channelh);
339	        return status;
340	    }
341	}
342
343	/* initial replenish will increment the counter in its post() routine,
344	 * we have to reset it */
345	ring->channel.usage_cnt = 0;
346
347	return XGE_HAL_OK;
348}
349
350void
351__hal_ring_close(xge_hal_channel_h channelh)
352{
353	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
354	xge_hal_ring_queue_t *queue;
355#if defined(XGE_HAL_RX_MULTI_RESERVE)||defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)||\
356	defined(XGE_HAL_RX_MULTI_POST) || defined(XGE_HAL_RX_MULTI_POST_IRQ)
357	xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh;
358#endif
359
360	xge_assert(ring->channel.pdev);
361
362	queue = &ring->config->queue[ring->channel.post_qid];
363
364	if (ring->mempool) {
365	    __hal_mempool_destroy(ring->mempool);
366	}
367
368	if (ring->reserved_rxds_arr) {
369	    xge_os_free(ring->channel.pdev,
370	              ring->reserved_rxds_arr,
371	          sizeof(void*) * queue->max * ring->rxds_per_block);
372	}
373
374	__hal_channel_terminate(channelh);
375
376#if defined(XGE_HAL_RX_MULTI_RESERVE)
377	xge_os_spin_lock_destroy(&ring->channel.reserve_lock, hldev->pdev);
378#elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
379	xge_os_spin_lock_destroy_irq(&ring->channel.reserve_lock, hldev->pdev);
380#endif
381#if defined(XGE_HAL_RX_MULTI_POST)
382	xge_os_spin_lock_destroy(&ring->channel.post_lock, hldev->pdev);
383#elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
384	xge_os_spin_lock_destroy_irq(&ring->channel.post_lock, hldev->pdev);
385#endif
386}
387
388void
389__hal_ring_prc_enable(xge_hal_channel_h channelh)
390{
391	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
392	xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh;
393	xge_hal_pci_bar0_t *bar0;
394	u64 val64;
395	void *first_block;
396	int block_num;
397	xge_hal_ring_queue_t *queue;
398	pci_dma_h dma_handle;
399
400	xge_assert(ring);
401	xge_assert(ring->channel.pdev);
402	bar0 = (xge_hal_pci_bar0_t *) (void *)
403	        ((xge_hal_device_t *)ring->channel.devh)->bar0;
404
405	queue = &ring->config->queue[ring->channel.post_qid];
406	xge_assert(queue->buffer_mode == 1 ||
407	        queue->buffer_mode == 3 ||
408	        queue->buffer_mode == 5);
409
410	/* last block in fact becomes first. This is just the way it
411	 * is filled up and linked by item_alloc() */
412
413	block_num = queue->initial;
414	first_block = __hal_mempool_item(ring->mempool, block_num - 1);
415	val64 = __hal_ring_item_dma_addr(ring->mempool,
416	                 first_block, &dma_handle);
417	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
418	        val64, &bar0->prc_rxd0_n[ring->channel.post_qid]);
419
420	xge_debug_ring(XGE_TRACE, "ring%d PRC DMA addr 0x"XGE_OS_LLXFMT" initialized",
421	        ring->channel.post_qid, (unsigned long long)val64);
422
423	val64 = xge_os_pio_mem_read64(ring->channel.pdev,
424	    ring->channel.regh0, &bar0->prc_ctrl_n[ring->channel.post_qid]);
425	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC &&
426	    !queue->rth_en) {
427	    val64 |= XGE_HAL_PRC_CTRL_RTH_DISABLE;
428	}
429	val64 |= XGE_HAL_PRC_CTRL_RC_ENABLED;
430
431	val64 |= vBIT((queue->buffer_mode >> 1),14,2);/* 1,3 or 5 => 0,1 or 2 */
432	val64 &= ~XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
433	val64 |= XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(
434	    (hldev->config.pci_freq_mherz * queue->backoff_interval_us));
435
436	/* Beware: no snoop by the bridge if (no_snoop_bits) */
437	val64 |= XGE_HAL_PRC_CTRL_NO_SNOOP(queue->no_snoop_bits);
438
439	    /* Herc: always use group_reads */
440	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
441	        val64 |= XGE_HAL_PRC_CTRL_GROUP_READS;
442
443	if (hldev->config.bimodal_interrupts)
444	    if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
445	        val64 |= XGE_HAL_PRC_CTRL_BIMODAL_INTERRUPT;
446
447	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
448	        val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
449
450	/* Configure Receive Protocol Assist */
451	val64 = xge_os_pio_mem_read64(ring->channel.pdev,
452	        ring->channel.regh0, &bar0->rx_pa_cfg);
453	val64 |= XGE_HAL_RX_PA_CFG_SCATTER_MODE(ring->config->scatter_mode);
454	val64 |= (XGE_HAL_RX_PA_CFG_IGNORE_SNAP_OUI | XGE_HAL_RX_PA_CFG_IGNORE_LLC_CTRL);
455	/* Clean STRIP_VLAN_TAG bit and set as config from upper layer */
456	val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
457	val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(ring->config->strip_vlan_tag);
458
459	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
460	        val64, &bar0->rx_pa_cfg);
461
462	xge_debug_ring(XGE_TRACE, "ring%d enabled in buffer_mode %d",
463	        ring->channel.post_qid, queue->buffer_mode);
464}
465
466void
467__hal_ring_prc_disable(xge_hal_channel_h channelh)
468{
469	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
470	xge_hal_pci_bar0_t *bar0;
471	u64 val64;
472
473	xge_assert(ring);
474	xge_assert(ring->channel.pdev);
475	bar0 = (xge_hal_pci_bar0_t *) (void *)
476	        ((xge_hal_device_t *)ring->channel.devh)->bar0;
477
478	val64 = xge_os_pio_mem_read64(ring->channel.pdev,
479	ring->channel.regh0,
480	              &bar0->prc_ctrl_n[ring->channel.post_qid]);
481	val64 &= ~((u64) XGE_HAL_PRC_CTRL_RC_ENABLED);
482	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
483	        val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
484}
485
486void
487__hal_ring_hw_initialize(xge_hal_device_h devh)
488{
489	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
490	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
491	u64 val64;
492	int i, j;
493
494	/* Rx DMA intialization. */
495
496	val64 = 0;
497	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
498	    if (!hldev->config.ring.queue[i].configured)
499	        continue;
500	    val64 |= vBIT(hldev->config.ring.queue[i].priority,
501	                        (5 + (i * 8)), 3);
502	}
503	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
504	        &bar0->rx_queue_priority);
505	xge_debug_ring(XGE_TRACE, "Rings priority configured to 0x"XGE_OS_LLXFMT,
506	        (unsigned long long)val64);
507
508	/* Configuring ring queues according to per-ring configuration */
509	val64 = 0;
510	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
511	    if (!hldev->config.ring.queue[i].configured)
512	        continue;
513	    val64 |= vBIT(hldev->config.ring.queue[i].dram_size_mb,(i*8),8);
514	}
515	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
516	                     &bar0->rx_queue_cfg);
517	xge_debug_ring(XGE_TRACE, "DRAM configured to 0x"XGE_OS_LLXFMT,
518	        (unsigned long long)val64);
519
520	if (!hldev->config.rts_qos_en &&
521	    !hldev->config.rts_port_en &&
522	    !hldev->config.rts_mac_en) {
523
524	    /*
525	     * Activate default (QoS-based) Rx steering
526	     */
527
528	    val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
529	                      &bar0->rts_qos_steering);
530	    for (j = 0; j < 8 /* QoS max */; j++)
531	    {
532	        for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++)
533	        {
534	            if (!hldev->config.ring.queue[i].configured)
535	                continue;
536	            if (!hldev->config.ring.queue[i].rth_en)
537	                val64 |= (BIT(i) >> (j*8));
538	        }
539	    }
540	    xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
541	                   &bar0->rts_qos_steering);
542	    xge_debug_ring(XGE_TRACE, "QoS steering configured to 0x"XGE_OS_LLXFMT,
543	               (unsigned long long)val64);
544
545	}
546
547	/* Note: If a queue does not exist, it should be assigned a maximum
548	 *   length of zero. Otherwise, packet loss could occur.
549	 *   P. 4-4 User guide.
550	 *
551	 * All configured rings will be properly set at device open time
552	 * by utilizing device_mtu_set() API call. */
553	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
554	    if (hldev->config.ring.queue[i].configured)
555	        continue;
556	    xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
557	                         &bar0->rts_frm_len_n[i]);
558	}
559
560#ifdef XGE_HAL_HERC_EMULATION
561	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
562	    ((u8 *)bar0 + 0x2e60)); /* mc_rldram_mrs_herc */
563	val64 |= 0x0000000000010000;
564	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
565	    ((u8 *)bar0 + 0x2e60));
566
567	val64 |= 0x003a000000000000;
568	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
569	    ((u8 *)bar0 + 0x2e40)); /* mc_rldram_ref_herc */
570	xge_os_mdelay(2000);
571#endif
572
573	/* now enabling MC-RLDRAM after setting MC_QUEUE sizes */
574	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
575	                            &bar0->mc_rldram_mrs);
576	val64 |= XGE_HAL_MC_RLDRAM_QUEUE_SIZE_ENABLE |
577	     XGE_HAL_MC_RLDRAM_MRS_ENABLE;
578	__hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32),
579	                     &bar0->mc_rldram_mrs);
580	xge_os_wmb();
581	__hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)val64,
582	                     &bar0->mc_rldram_mrs);
583
584	/* RLDRAM initialization procedure require 500us to complete */
585	xge_os_mdelay(1);
586
587	/* Temporary fixes for Herc RLDRAM */
588	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
589	    val64 = XGE_HAL_MC_RLDRAM_SET_REF_PERIOD(0x0279);
590	    xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
591	                         &bar0->mc_rldram_ref_per_herc);
592
593	    val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
594	                            &bar0->mc_rldram_mrs_herc);
595	    xge_debug_ring(XGE_TRACE, "default mc_rldram_mrs_herc 0x"XGE_OS_LLXFMT,
596	               (unsigned long long)val64);
597
598	    val64 = 0x0003570003010300ULL;
599	    xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
600	                           &bar0->mc_rldram_mrs_herc);
601
602	    xge_os_mdelay(1);
603	}
604
605	/*
606	 * Assign MSI-X vectors
607	 */
608	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
609	    xge_list_t *item;
610	    xge_hal_channel_t *channel = NULL;
611
612	    if (!hldev->config.ring.queue[i].configured ||
613	        !hldev->config.ring.queue[i].intr_vector ||
614	        hldev->config.intr_mode != XGE_HAL_INTR_MODE_MSIX)
615	        continue;
616
617	    /* find channel */
618	    xge_list_for_each(item, &hldev->free_channels) {
619	        xge_hal_channel_t *tmp;
620	        tmp = xge_container_of(item, xge_hal_channel_t,
621	                       item);
622	        if (tmp->type == XGE_HAL_CHANNEL_TYPE_RING &&
623	            tmp->post_qid == i) {
624	            channel = tmp;
625	            break;
626	        }
627	    }
628
629	    if (channel) {
630	        xge_hal_channel_msix_set(channel,
631	            hldev->config.ring.queue[i].intr_vector);
632	    }
633	}
634
635	xge_debug_ring(XGE_TRACE, "%s", "ring channels initialized");
636}
637
638void
639__hal_ring_mtu_set(xge_hal_device_h devh, int new_frmlen)
640{
641	int i;
642	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
643	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
644
645	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
646	    if (!hldev->config.ring.queue[i].configured)
647	        continue;
648	    if (hldev->config.ring.queue[i].max_frm_len !=
649	                    XGE_HAL_RING_USE_MTU) {
650	        xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
651	                XGE_HAL_MAC_RTS_FRM_LEN_SET(
652	            hldev->config.ring.queue[i].max_frm_len),
653	            &bar0->rts_frm_len_n[i]);
654	    } else {
655	        xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
656	                   XGE_HAL_MAC_RTS_FRM_LEN_SET(new_frmlen),
657	                   &bar0->rts_frm_len_n[i]);
658	    }
659	}
660	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
661	               XGE_HAL_RMAC_MAX_PYLD_LEN(new_frmlen),
662	                   &bar0->rmac_max_pyld_len);
663}
664