1/*-
2 * Copyright (c) 2002-2007 Neterion, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include <dev/nxge/include/xgehal-ring.h>
30#include <dev/nxge/include/xgehal-device.h>
31
32#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
33static ptrdiff_t
34__hal_ring_item_dma_offset(xge_hal_mempool_h mempoolh,
35	           void *item)
36{
37	int memblock_idx;
38	void *memblock;
39
40	/* get owner memblock index */
41	memblock_idx = __hal_ring_block_memblock_idx(item);
42
43	/* get owner memblock by memblock index */
44	memblock = __hal_mempool_memblock(mempoolh, memblock_idx);
45
46	return (char*)item - (char*)memblock;
47}
48#endif
49
50static dma_addr_t
51__hal_ring_item_dma_addr(xge_hal_mempool_h mempoolh, void *item,
52	    pci_dma_h *dma_handle)
53{
54	int memblock_idx;
55	void *memblock;
56	xge_hal_mempool_dma_t *memblock_dma_object;
57	ptrdiff_t dma_item_offset;
58
59	/* get owner memblock index */
60	memblock_idx = __hal_ring_block_memblock_idx((xge_hal_ring_block_t *) item);
61
62	/* get owner memblock by memblock index */
63	memblock = __hal_mempool_memblock((xge_hal_mempool_t *) mempoolh,
64	                                    memblock_idx);
65
66	/* get memblock DMA object by memblock index */
67	memblock_dma_object =
68	    __hal_mempool_memblock_dma((xge_hal_mempool_t *) mempoolh,
69	                                memblock_idx);
70
71	/* calculate offset in the memblock of this item */
72	dma_item_offset = (char*)item - (char*)memblock;
73
74	*dma_handle = memblock_dma_object->handle;
75
76	return memblock_dma_object->addr + dma_item_offset;
77}
78
79static void
80__hal_ring_rxdblock_link(xge_hal_mempool_h mempoolh,
81	         xge_hal_ring_t *ring, int from, int to)
82{
83	xge_hal_ring_block_t *to_item, *from_item;
84	dma_addr_t to_dma, from_dma;
85	pci_dma_h to_dma_handle, from_dma_handle;
86
87	/* get "from" RxD block */
88	from_item = (xge_hal_ring_block_t *)
89	            __hal_mempool_item((xge_hal_mempool_t *) mempoolh, from);
90	xge_assert(from_item);
91
92	/* get "to" RxD block */
93	to_item = (xge_hal_ring_block_t *)
94	          __hal_mempool_item((xge_hal_mempool_t *) mempoolh, to);
95	xge_assert(to_item);
96
97	/* return address of the beginning of previous RxD block */
98	to_dma = __hal_ring_item_dma_addr(mempoolh, to_item, &to_dma_handle);
99
100	/* set next pointer for this RxD block to point on
101	 * previous item's DMA start address */
102	__hal_ring_block_next_pointer_set(from_item, to_dma);
103
104	/* return "from" RxD block's DMA start address */
105	from_dma =
106	    __hal_ring_item_dma_addr(mempoolh, from_item, &from_dma_handle);
107
108#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
109	/* we must sync "from" RxD block, so hardware will see it */
110	xge_os_dma_sync(ring->channel.pdev,
111	              from_dma_handle,
112	          from_dma + XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
113	          __hal_ring_item_dma_offset(mempoolh, from_item) +
114	                XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
115	          sizeof(u64),
116	          XGE_OS_DMA_DIR_TODEVICE);
117#endif
118
119	xge_debug_ring(XGE_TRACE, "block%d:0x"XGE_OS_LLXFMT" => block%d:0x"XGE_OS_LLXFMT,
120	    from, (unsigned long long)from_dma, to,
121	    (unsigned long long)to_dma);
122}
123
124static xge_hal_status_e
125__hal_ring_mempool_item_alloc(xge_hal_mempool_h mempoolh,
126	              void *memblock,
127	              int memblock_index,
128	              xge_hal_mempool_dma_t *dma_object,
129	              void *item,
130	              int index,
131	              int is_last,
132	              void *userdata)
133{
134	int i;
135	xge_hal_ring_t *ring = (xge_hal_ring_t *)userdata;
136
137	xge_assert(item);
138	xge_assert(ring);
139
140
141	/* format rxds array */
142	for (i=ring->rxds_per_block-1; i>=0; i--) {
143	    void *rxdblock_priv;
144	    xge_hal_ring_rxd_priv_t *rxd_priv;
145	    xge_hal_ring_rxd_1_t *rxdp;
146	    int reserve_index = index * ring->rxds_per_block + i;
147	    int memblock_item_idx;
148
149	    ring->reserved_rxds_arr[reserve_index] = (char *)item +
150	            (ring->rxds_per_block - 1 - i) * ring->rxd_size;
151
152	    /* Note: memblock_item_idx is index of the item within
153	     *       the memblock. For instance, in case of three RxD-blocks
154	     *       per memblock this value can be 0,1 or 2. */
155	    rxdblock_priv =
156	        __hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh,
157	                                memblock_index, item,
158	                                &memblock_item_idx);
159	    rxdp = (xge_hal_ring_rxd_1_t *)
160	        ring->reserved_rxds_arr[reserve_index];
161	    rxd_priv = (xge_hal_ring_rxd_priv_t *) (void *)
162	        ((char*)rxdblock_priv + ring->rxd_priv_size * i);
163
164	    /* pre-format per-RxD Ring's private */
165	    rxd_priv->dma_offset = (char*)rxdp - (char*)memblock;
166	    rxd_priv->dma_addr = dma_object->addr +  rxd_priv->dma_offset;
167	    rxd_priv->dma_handle = dma_object->handle;
168#ifdef XGE_DEBUG_ASSERT
169	    rxd_priv->dma_object = dma_object;
170#endif
171
172	    /* pre-format Host_Control */
173#if defined(XGE_HAL_USE_5B_MODE)
174	    if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
175	        xge_hal_ring_rxd_5_t *rxdp_5 = (xge_hal_ring_rxd_5_t *)rxdp;
176#if defined(XGE_OS_PLATFORM_64BIT)
177	        xge_assert(memblock_index <= 0xFFFF);
178	        xge_assert(i <= 0xFFFF);
179	        /* store memblock's index */
180	        rxdp_5->host_control = (u32)memblock_index << 16;
181	        /* store index of memblock's private */
182	        rxdp_5->host_control |= (u32)(memblock_item_idx *
183	                        ring->rxds_per_block + i);
184#else
185	        /* 32-bit case */
186	        rxdp_5->host_control = (u32)rxd_priv;
187#endif
188	    } else {
189	        /* 1b and 3b modes */
190	        rxdp->host_control = (u64)(ulong_t)rxd_priv;
191	    }
192#else
193	    /* 1b and 3b modes */
194	    rxdp->host_control = (u64)(ulong_t)rxd_priv;
195#endif
196	}
197
198	__hal_ring_block_memblock_idx_set((xge_hal_ring_block_t *) item, memblock_index);
199
200	if (is_last) {
201	    /* link last one with first one */
202	    __hal_ring_rxdblock_link(mempoolh, ring, 0, index);
203	}
204
205	if (index > 0 ) {
206	     /* link this RxD block with previous one */
207	    __hal_ring_rxdblock_link(mempoolh, ring, index, index-1);
208	}
209
210	return XGE_HAL_OK;
211}
212
213 xge_hal_status_e
214__hal_ring_initial_replenish(xge_hal_channel_t *channel,
215	             xge_hal_channel_reopen_e reopen)
216{
217	xge_hal_dtr_h dtr = NULL;
218
219	while (xge_hal_channel_dtr_count(channel) > 0) {
220	    xge_hal_status_e status;
221
222	    status = xge_hal_ring_dtr_reserve(channel, &dtr);
223	    xge_assert(status == XGE_HAL_OK);
224
225	    if (channel->dtr_init) {
226	        status = channel->dtr_init(channel,
227	                                    dtr, channel->reserve_length,
228	                                    channel->userdata,
229	                reopen);
230	        if (status != XGE_HAL_OK) {
231	            xge_hal_ring_dtr_free(channel, dtr);
232	            xge_hal_channel_abort(channel,
233	                XGE_HAL_CHANNEL_OC_NORMAL);
234	            return status;
235	        }
236	    }
237
238	    xge_hal_ring_dtr_post(channel, dtr);
239	}
240
241	return XGE_HAL_OK;
242}
243
244xge_hal_status_e
245__hal_ring_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
246{
247	xge_hal_status_e status;
248	xge_hal_device_t *hldev;
249	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
250	xge_hal_ring_queue_t *queue;
251
252
253	/* Note: at this point we have channel.devh and channel.pdev
254	 *       pre-set only! */
255
256	hldev = (xge_hal_device_t *)ring->channel.devh;
257	ring->config = &hldev->config.ring;
258	queue = &ring->config->queue[attr->post_qid];
259	ring->indicate_max_pkts = queue->indicate_max_pkts;
260	ring->buffer_mode = queue->buffer_mode;
261
262	xge_assert(queue->configured);
263
264#if defined(XGE_HAL_RX_MULTI_RESERVE)
265	xge_os_spin_lock_init(&ring->channel.reserve_lock, hldev->pdev);
266#elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
267	xge_os_spin_lock_init_irq(&ring->channel.reserve_lock, hldev->irqh);
268#endif
269#if defined(XGE_HAL_RX_MULTI_POST)
270	xge_os_spin_lock_init(&ring->channel.post_lock, hldev->pdev);
271#elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
272	xge_os_spin_lock_init_irq(&ring->channel.post_lock, hldev->irqh);
273#endif
274
275	ring->rxd_size = XGE_HAL_RING_RXD_SIZEOF(queue->buffer_mode);
276	ring->rxd_priv_size =
277	    sizeof(xge_hal_ring_rxd_priv_t) + attr->per_dtr_space;
278
279	/* how many RxDs can fit into one block. Depends on configured
280	 * buffer_mode. */
281	ring->rxds_per_block = XGE_HAL_RING_RXDS_PER_BLOCK(queue->buffer_mode);
282
283	/* calculate actual RxD block private size */
284	ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
285
286	ring->reserved_rxds_arr = (void **) xge_os_malloc(ring->channel.pdev,
287	          sizeof(void*) * queue->max * ring->rxds_per_block);
288
289	if (ring->reserved_rxds_arr == NULL) {
290	    __hal_ring_close(channelh);
291	    return XGE_HAL_ERR_OUT_OF_MEMORY;
292	}
293
294	ring->mempool = __hal_mempool_create(
295	                 hldev->pdev,
296	                 ring->config->memblock_size,
297	                 XGE_HAL_RING_RXDBLOCK_SIZE,
298	                 ring->rxdblock_priv_size,
299	                 queue->initial, queue->max,
300	                 __hal_ring_mempool_item_alloc,
301	                 NULL, /* nothing to free */
302	                 ring);
303	if (ring->mempool == NULL) {
304	    __hal_ring_close(channelh);
305	    return XGE_HAL_ERR_OUT_OF_MEMORY;
306	}
307
308	status = __hal_channel_initialize(channelh,
309	                  attr,
310	                  ring->reserved_rxds_arr,
311	                  queue->initial * ring->rxds_per_block,
312	                  queue->max * ring->rxds_per_block,
313	                  0 /* no threshold for ring! */);
314	if (status != XGE_HAL_OK) {
315	    __hal_ring_close(channelh);
316	    return status;
317	}
318
319	/* sanity check that everything formatted ok */
320	xge_assert(ring->reserved_rxds_arr[0] ==
321	        (char *)ring->mempool->items_arr[0] +
322	          (ring->rxds_per_block * ring->rxd_size - ring->rxd_size));
323
324	    /* Note:
325	 * Specifying dtr_init callback means two things:
326	 * 1) dtrs need to be initialized by ULD at channel-open time;
327	 * 2) dtrs need to be posted at channel-open time
328	 *    (that's what the initial_replenish() below does)
329	 * Currently we don't have a case when the 1) is done without the 2).
330	 */
331	if (ring->channel.dtr_init) {
332	    if ((status = __hal_ring_initial_replenish (
333	                    (xge_hal_channel_t *) channelh,
334	                    XGE_HAL_CHANNEL_OC_NORMAL) )
335	                    != XGE_HAL_OK) {
336	        __hal_ring_close(channelh);
337	        return status;
338	    }
339	}
340
341	/* initial replenish will increment the counter in its post() routine,
342	 * we have to reset it */
343	ring->channel.usage_cnt = 0;
344
345	return XGE_HAL_OK;
346}
347
348void
349__hal_ring_close(xge_hal_channel_h channelh)
350{
351	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
352	xge_hal_ring_queue_t *queue;
353#if defined(XGE_HAL_RX_MULTI_RESERVE)||defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)||\
354	defined(XGE_HAL_RX_MULTI_POST) || defined(XGE_HAL_RX_MULTI_POST_IRQ)
355	xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh;
356#endif
357
358	xge_assert(ring->channel.pdev);
359
360	queue = &ring->config->queue[ring->channel.post_qid];
361
362	if (ring->mempool) {
363	    __hal_mempool_destroy(ring->mempool);
364	}
365
366	if (ring->reserved_rxds_arr) {
367	    xge_os_free(ring->channel.pdev,
368	              ring->reserved_rxds_arr,
369	          sizeof(void*) * queue->max * ring->rxds_per_block);
370	}
371
372	__hal_channel_terminate(channelh);
373
374#if defined(XGE_HAL_RX_MULTI_RESERVE)
375	xge_os_spin_lock_destroy(&ring->channel.reserve_lock, hldev->pdev);
376#elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
377	xge_os_spin_lock_destroy_irq(&ring->channel.reserve_lock, hldev->pdev);
378#endif
379#if defined(XGE_HAL_RX_MULTI_POST)
380	xge_os_spin_lock_destroy(&ring->channel.post_lock, hldev->pdev);
381#elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
382	xge_os_spin_lock_destroy_irq(&ring->channel.post_lock, hldev->pdev);
383#endif
384}
385
386void
387__hal_ring_prc_enable(xge_hal_channel_h channelh)
388{
389	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
390	xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh;
391	xge_hal_pci_bar0_t *bar0;
392	u64 val64;
393	void *first_block;
394	int block_num;
395	xge_hal_ring_queue_t *queue;
396	pci_dma_h dma_handle;
397
398	xge_assert(ring);
399	xge_assert(ring->channel.pdev);
400	bar0 = (xge_hal_pci_bar0_t *) (void *)
401	        ((xge_hal_device_t *)ring->channel.devh)->bar0;
402
403	queue = &ring->config->queue[ring->channel.post_qid];
404	xge_assert(queue->buffer_mode == 1 ||
405	        queue->buffer_mode == 3 ||
406	        queue->buffer_mode == 5);
407
408	/* last block in fact becomes first. This is just the way it
409	 * is filled up and linked by item_alloc() */
410
411	block_num = queue->initial;
412	first_block = __hal_mempool_item(ring->mempool, block_num - 1);
413	val64 = __hal_ring_item_dma_addr(ring->mempool,
414	                 first_block, &dma_handle);
415	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
416	        val64, &bar0->prc_rxd0_n[ring->channel.post_qid]);
417
418	xge_debug_ring(XGE_TRACE, "ring%d PRC DMA addr 0x"XGE_OS_LLXFMT" initialized",
419	        ring->channel.post_qid, (unsigned long long)val64);
420
421	val64 = xge_os_pio_mem_read64(ring->channel.pdev,
422	    ring->channel.regh0, &bar0->prc_ctrl_n[ring->channel.post_qid]);
423	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC &&
424	    !queue->rth_en) {
425	    val64 |= XGE_HAL_PRC_CTRL_RTH_DISABLE;
426	}
427	val64 |= XGE_HAL_PRC_CTRL_RC_ENABLED;
428
429	val64 |= vBIT((queue->buffer_mode >> 1),14,2);/* 1,3 or 5 => 0,1 or 2 */
430	val64 &= ~XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
431	val64 |= XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(
432	    (hldev->config.pci_freq_mherz * queue->backoff_interval_us));
433
434	/* Beware: no snoop by the bridge if (no_snoop_bits) */
435	val64 |= XGE_HAL_PRC_CTRL_NO_SNOOP(queue->no_snoop_bits);
436
437	    /* Herc: always use group_reads */
438	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
439	        val64 |= XGE_HAL_PRC_CTRL_GROUP_READS;
440
441	if (hldev->config.bimodal_interrupts)
442	    if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
443	        val64 |= XGE_HAL_PRC_CTRL_BIMODAL_INTERRUPT;
444
445	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
446	        val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
447
448	/* Configure Receive Protocol Assist */
449	val64 = xge_os_pio_mem_read64(ring->channel.pdev,
450	        ring->channel.regh0, &bar0->rx_pa_cfg);
451	val64 |= XGE_HAL_RX_PA_CFG_SCATTER_MODE(ring->config->scatter_mode);
452	val64 |= (XGE_HAL_RX_PA_CFG_IGNORE_SNAP_OUI | XGE_HAL_RX_PA_CFG_IGNORE_LLC_CTRL);
453	/* Clean STRIP_VLAN_TAG bit and set as config from upper layer */
454	val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
455	val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(ring->config->strip_vlan_tag);
456
457	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
458	        val64, &bar0->rx_pa_cfg);
459
460	xge_debug_ring(XGE_TRACE, "ring%d enabled in buffer_mode %d",
461	        ring->channel.post_qid, queue->buffer_mode);
462}
463
464void
465__hal_ring_prc_disable(xge_hal_channel_h channelh)
466{
467	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
468	xge_hal_pci_bar0_t *bar0;
469	u64 val64;
470
471	xge_assert(ring);
472	xge_assert(ring->channel.pdev);
473	bar0 = (xge_hal_pci_bar0_t *) (void *)
474	        ((xge_hal_device_t *)ring->channel.devh)->bar0;
475
476	val64 = xge_os_pio_mem_read64(ring->channel.pdev,
477	ring->channel.regh0,
478	              &bar0->prc_ctrl_n[ring->channel.post_qid]);
479	val64 &= ~((u64) XGE_HAL_PRC_CTRL_RC_ENABLED);
480	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
481	        val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
482}
483
484void
485__hal_ring_hw_initialize(xge_hal_device_h devh)
486{
487	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
488	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
489	u64 val64;
490	int i, j;
491
492	/* Rx DMA intialization. */
493
494	val64 = 0;
495	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
496	    if (!hldev->config.ring.queue[i].configured)
497	        continue;
498	    val64 |= vBIT(hldev->config.ring.queue[i].priority,
499	                        (5 + (i * 8)), 3);
500	}
501	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
502	        &bar0->rx_queue_priority);
503	xge_debug_ring(XGE_TRACE, "Rings priority configured to 0x"XGE_OS_LLXFMT,
504	        (unsigned long long)val64);
505
506	/* Configuring ring queues according to per-ring configuration */
507	val64 = 0;
508	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
509	    if (!hldev->config.ring.queue[i].configured)
510	        continue;
511	    val64 |= vBIT(hldev->config.ring.queue[i].dram_size_mb,(i*8),8);
512	}
513	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
514	                     &bar0->rx_queue_cfg);
515	xge_debug_ring(XGE_TRACE, "DRAM configured to 0x"XGE_OS_LLXFMT,
516	        (unsigned long long)val64);
517
518	if (!hldev->config.rts_qos_en &&
519	    !hldev->config.rts_port_en &&
520	    !hldev->config.rts_mac_en) {
521
522	    /*
523	     * Activate default (QoS-based) Rx steering
524	     */
525
526	    val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
527	                      &bar0->rts_qos_steering);
528	    for (j = 0; j < 8 /* QoS max */; j++)
529	    {
530	        for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++)
531	        {
532	            if (!hldev->config.ring.queue[i].configured)
533	                continue;
534	            if (!hldev->config.ring.queue[i].rth_en)
535	                val64 |= (BIT(i) >> (j*8));
536	        }
537	    }
538	    xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
539	                   &bar0->rts_qos_steering);
540	    xge_debug_ring(XGE_TRACE, "QoS steering configured to 0x"XGE_OS_LLXFMT,
541	               (unsigned long long)val64);
542
543	}
544
545	/* Note: If a queue does not exist, it should be assigned a maximum
546	 *   length of zero. Otherwise, packet loss could occur.
547	 *   P. 4-4 User guide.
548	 *
549	 * All configured rings will be properly set at device open time
550	 * by utilizing device_mtu_set() API call. */
551	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
552	    if (hldev->config.ring.queue[i].configured)
553	        continue;
554	    xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
555	                         &bar0->rts_frm_len_n[i]);
556	}
557
558#ifdef XGE_HAL_HERC_EMULATION
559	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
560	    ((u8 *)bar0 + 0x2e60)); /* mc_rldram_mrs_herc */
561	val64 |= 0x0000000000010000;
562	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
563	    ((u8 *)bar0 + 0x2e60));
564
565	val64 |= 0x003a000000000000;
566	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
567	    ((u8 *)bar0 + 0x2e40)); /* mc_rldram_ref_herc */
568	xge_os_mdelay(2000);
569#endif
570
571	/* now enabling MC-RLDRAM after setting MC_QUEUE sizes */
572	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
573	                            &bar0->mc_rldram_mrs);
574	val64 |= XGE_HAL_MC_RLDRAM_QUEUE_SIZE_ENABLE |
575	     XGE_HAL_MC_RLDRAM_MRS_ENABLE;
576	__hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32),
577	                     &bar0->mc_rldram_mrs);
578	xge_os_wmb();
579	__hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)val64,
580	                     &bar0->mc_rldram_mrs);
581
582	/* RLDRAM initialization procedure require 500us to complete */
583	xge_os_mdelay(1);
584
585	/* Temporary fixes for Herc RLDRAM */
586	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
587	    val64 = XGE_HAL_MC_RLDRAM_SET_REF_PERIOD(0x0279);
588	    xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
589	                         &bar0->mc_rldram_ref_per_herc);
590
591	    val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
592	                            &bar0->mc_rldram_mrs_herc);
593	    xge_debug_ring(XGE_TRACE, "default mc_rldram_mrs_herc 0x"XGE_OS_LLXFMT,
594	               (unsigned long long)val64);
595
596	    val64 = 0x0003570003010300ULL;
597	    xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
598	                           &bar0->mc_rldram_mrs_herc);
599
600	    xge_os_mdelay(1);
601	}
602
603	/*
604	 * Assign MSI-X vectors
605	 */
606	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
607	    xge_list_t *item;
608	    xge_hal_channel_t *channel = NULL;
609
610	    if (!hldev->config.ring.queue[i].configured ||
611	        !hldev->config.ring.queue[i].intr_vector ||
612	        !hldev->config.intr_mode != XGE_HAL_INTR_MODE_MSIX)
613	        continue;
614
615	    /* find channel */
616	    xge_list_for_each(item, &hldev->free_channels) {
617	        xge_hal_channel_t *tmp;
618	        tmp = xge_container_of(item, xge_hal_channel_t,
619	                       item);
620	        if (tmp->type == XGE_HAL_CHANNEL_TYPE_RING &&
621	            tmp->post_qid == i) {
622	            channel = tmp;
623	            break;
624	        }
625	    }
626
627	    if (channel) {
628	        xge_hal_channel_msix_set(channel,
629	            hldev->config.ring.queue[i].intr_vector);
630	    }
631	}
632
633	xge_debug_ring(XGE_TRACE, "%s", "ring channels initialized");
634}
635
636void
637__hal_ring_mtu_set(xge_hal_device_h devh, int new_frmlen)
638{
639	int i;
640	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
641	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
642
643	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
644	    if (!hldev->config.ring.queue[i].configured)
645	        continue;
646	    if (hldev->config.ring.queue[i].max_frm_len !=
647	                    XGE_HAL_RING_USE_MTU) {
648	        xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
649	                XGE_HAL_MAC_RTS_FRM_LEN_SET(
650	            hldev->config.ring.queue[i].max_frm_len),
651	            &bar0->rts_frm_len_n[i]);
652	    } else {
653	        xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
654	                   XGE_HAL_MAC_RTS_FRM_LEN_SET(new_frmlen),
655	                   &bar0->rts_frm_len_n[i]);
656	    }
657	}
658	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
659	               XGE_HAL_RMAC_MAX_PYLD_LEN(new_frmlen),
660	                   &bar0->rmac_max_pyld_len);
661}
662