1171095Ssam/*-
2171095Ssam * Copyright (c) 2002-2007 Neterion, Inc.
3171095Ssam * All rights reserved.
4171095Ssam *
5171095Ssam * Redistribution and use in source and binary forms, with or without
6171095Ssam * modification, are permitted provided that the following conditions
7171095Ssam * are met:
8171095Ssam * 1. Redistributions of source code must retain the above copyright
9171095Ssam *    notice, this list of conditions and the following disclaimer.
10171095Ssam * 2. Redistributions in binary form must reproduce the above copyright
11171095Ssam *    notice, this list of conditions and the following disclaimer in the
12171095Ssam *    documentation and/or other materials provided with the distribution.
13171095Ssam *
14171095Ssam * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15171095Ssam * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16171095Ssam * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17171095Ssam * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18171095Ssam * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19171095Ssam * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20171095Ssam * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21171095Ssam * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22171095Ssam * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23171095Ssam * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24171095Ssam * SUCH DAMAGE.
25171095Ssam *
26171095Ssam * $FreeBSD$
27171095Ssam */
28171095Ssam
29171095Ssam#include <dev/nxge/include/xgehal-ring.h>
30171095Ssam#include <dev/nxge/include/xgehal-device.h>
31171095Ssam
32171095Ssam#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
33171095Ssamstatic ptrdiff_t
34171095Ssam__hal_ring_item_dma_offset(xge_hal_mempool_h mempoolh,
35173139Srwatson	           void *item)
36171095Ssam{
37171095Ssam	int memblock_idx;
38171095Ssam	void *memblock;
39171095Ssam
40171095Ssam	/* get owner memblock index */
41171095Ssam	memblock_idx = __hal_ring_block_memblock_idx(item);
42171095Ssam
43171095Ssam	/* get owner memblock by memblock index */
44171095Ssam	memblock = __hal_mempool_memblock(mempoolh, memblock_idx);
45171095Ssam
46171095Ssam	return (char*)item - (char*)memblock;
47171095Ssam}
48171095Ssam#endif
49171095Ssam
50171095Ssamstatic dma_addr_t
51171095Ssam__hal_ring_item_dma_addr(xge_hal_mempool_h mempoolh, void *item,
52173139Srwatson	    pci_dma_h *dma_handle)
53171095Ssam{
54171095Ssam	int memblock_idx;
55171095Ssam	void *memblock;
56171095Ssam	xge_hal_mempool_dma_t *memblock_dma_object;
57171095Ssam	ptrdiff_t dma_item_offset;
58171095Ssam
59171095Ssam	/* get owner memblock index */
60171095Ssam	memblock_idx = __hal_ring_block_memblock_idx((xge_hal_ring_block_t *) item);
61171095Ssam
62171095Ssam	/* get owner memblock by memblock index */
63171095Ssam	memblock = __hal_mempool_memblock((xge_hal_mempool_t *) mempoolh,
64173139Srwatson	                                    memblock_idx);
65171095Ssam
66171095Ssam	/* get memblock DMA object by memblock index */
67171095Ssam	memblock_dma_object =
68173139Srwatson	    __hal_mempool_memblock_dma((xge_hal_mempool_t *) mempoolh,
69173139Srwatson	                                memblock_idx);
70171095Ssam
71171095Ssam	/* calculate offset in the memblock of this item */
72171095Ssam	dma_item_offset = (char*)item - (char*)memblock;
73171095Ssam
74171095Ssam	*dma_handle = memblock_dma_object->handle;
75171095Ssam
76171095Ssam	return memblock_dma_object->addr + dma_item_offset;
77171095Ssam}
78171095Ssam
79171095Ssamstatic void
80171095Ssam__hal_ring_rxdblock_link(xge_hal_mempool_h mempoolh,
81173139Srwatson	         xge_hal_ring_t *ring, int from, int to)
82171095Ssam{
83171095Ssam	xge_hal_ring_block_t *to_item, *from_item;
84171095Ssam	dma_addr_t to_dma, from_dma;
85171095Ssam	pci_dma_h to_dma_handle, from_dma_handle;
86171095Ssam
87171095Ssam	/* get "from" RxD block */
88171095Ssam	from_item = (xge_hal_ring_block_t *)
89173139Srwatson	            __hal_mempool_item((xge_hal_mempool_t *) mempoolh, from);
90171095Ssam	xge_assert(from_item);
91171095Ssam
92171095Ssam	/* get "to" RxD block */
93171095Ssam	to_item = (xge_hal_ring_block_t *)
94173139Srwatson	          __hal_mempool_item((xge_hal_mempool_t *) mempoolh, to);
95171095Ssam	xge_assert(to_item);
96171095Ssam
97171095Ssam	/* return address of the beginning of previous RxD block */
98171095Ssam	to_dma = __hal_ring_item_dma_addr(mempoolh, to_item, &to_dma_handle);
99171095Ssam
100171095Ssam	/* set next pointer for this RxD block to point on
101171095Ssam	 * previous item's DMA start address */
102171095Ssam	__hal_ring_block_next_pointer_set(from_item, to_dma);
103171095Ssam
104171095Ssam	/* return "from" RxD block's DMA start address */
105171095Ssam	from_dma =
106173139Srwatson	    __hal_ring_item_dma_addr(mempoolh, from_item, &from_dma_handle);
107171095Ssam
108171095Ssam#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
109171095Ssam	/* we must sync "from" RxD block, so hardware will see it */
110171095Ssam	xge_os_dma_sync(ring->channel.pdev,
111171095Ssam	              from_dma_handle,
112173139Srwatson	          from_dma + XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
113173139Srwatson	          __hal_ring_item_dma_offset(mempoolh, from_item) +
114173139Srwatson	                XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
115173139Srwatson	          sizeof(u64),
116173139Srwatson	          XGE_OS_DMA_DIR_TODEVICE);
117171095Ssam#endif
118171095Ssam
119171095Ssam	xge_debug_ring(XGE_TRACE, "block%d:0x"XGE_OS_LLXFMT" => block%d:0x"XGE_OS_LLXFMT,
120173139Srwatson	    from, (unsigned long long)from_dma, to,
121173139Srwatson	    (unsigned long long)to_dma);
122171095Ssam}
123171095Ssam
124171095Ssamstatic xge_hal_status_e
125171095Ssam__hal_ring_mempool_item_alloc(xge_hal_mempool_h mempoolh,
126173139Srwatson	              void *memblock,
127173139Srwatson	              int memblock_index,
128173139Srwatson	              xge_hal_mempool_dma_t *dma_object,
129173139Srwatson	              void *item,
130173139Srwatson	              int index,
131173139Srwatson	              int is_last,
132173139Srwatson	              void *userdata)
133171095Ssam{
134171095Ssam	int i;
135171095Ssam	xge_hal_ring_t *ring = (xge_hal_ring_t *)userdata;
136171095Ssam
137171095Ssam	xge_assert(item);
138171095Ssam	xge_assert(ring);
139171095Ssam
140171095Ssam
141171095Ssam	/* format rxds array */
142171095Ssam	for (i=ring->rxds_per_block-1; i>=0; i--) {
143173139Srwatson	    void *rxdblock_priv;
144173139Srwatson	    xge_hal_ring_rxd_priv_t *rxd_priv;
145173139Srwatson	    xge_hal_ring_rxd_1_t *rxdp;
146173139Srwatson	    int reserve_index = index * ring->rxds_per_block + i;
147173139Srwatson	    int memblock_item_idx;
148171095Ssam
149173139Srwatson	    ring->reserved_rxds_arr[reserve_index] = (char *)item +
150173139Srwatson	            (ring->rxds_per_block - 1 - i) * ring->rxd_size;
151171095Ssam
152173139Srwatson	    /* Note: memblock_item_idx is index of the item within
153173139Srwatson	     *       the memblock. For instance, in case of three RxD-blocks
154173139Srwatson	     *       per memblock this value can be 0,1 or 2. */
155173139Srwatson	    rxdblock_priv =
156173139Srwatson	        __hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh,
157173139Srwatson	                                memblock_index, item,
158173139Srwatson	                                &memblock_item_idx);
159173139Srwatson	    rxdp = (xge_hal_ring_rxd_1_t *)
160173139Srwatson	        ring->reserved_rxds_arr[reserve_index];
161173139Srwatson	    rxd_priv = (xge_hal_ring_rxd_priv_t *) (void *)
162173139Srwatson	        ((char*)rxdblock_priv + ring->rxd_priv_size * i);
163171095Ssam
164173139Srwatson	    /* pre-format per-RxD Ring's private */
165173139Srwatson	    rxd_priv->dma_offset = (char*)rxdp - (char*)memblock;
166173139Srwatson	    rxd_priv->dma_addr = dma_object->addr +  rxd_priv->dma_offset;
167173139Srwatson	    rxd_priv->dma_handle = dma_object->handle;
168171095Ssam#ifdef XGE_DEBUG_ASSERT
169173139Srwatson	    rxd_priv->dma_object = dma_object;
170171095Ssam#endif
171171095Ssam
172173139Srwatson	    /* pre-format Host_Control */
173171095Ssam#if defined(XGE_HAL_USE_5B_MODE)
174173139Srwatson	    if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
175173139Srwatson	        xge_hal_ring_rxd_5_t *rxdp_5 = (xge_hal_ring_rxd_5_t *)rxdp;
176171095Ssam#if defined(XGE_OS_PLATFORM_64BIT)
177173139Srwatson	        xge_assert(memblock_index <= 0xFFFF);
178173139Srwatson	        xge_assert(i <= 0xFFFF);
179173139Srwatson	        /* store memblock's index */
180173139Srwatson	        rxdp_5->host_control = (u32)memblock_index << 16;
181173139Srwatson	        /* store index of memblock's private */
182173139Srwatson	        rxdp_5->host_control |= (u32)(memblock_item_idx *
183173139Srwatson	                        ring->rxds_per_block + i);
184171095Ssam#else
185173139Srwatson	        /* 32-bit case */
186173139Srwatson	        rxdp_5->host_control = (u32)rxd_priv;
187171095Ssam#endif
188173139Srwatson	    } else {
189173139Srwatson	        /* 1b and 3b modes */
190173139Srwatson	        rxdp->host_control = (u64)(ulong_t)rxd_priv;
191173139Srwatson	    }
192171095Ssam#else
193173139Srwatson	    /* 1b and 3b modes */
194173139Srwatson	    rxdp->host_control = (u64)(ulong_t)rxd_priv;
195171095Ssam#endif
196171095Ssam	}
197171095Ssam
198171095Ssam	__hal_ring_block_memblock_idx_set((xge_hal_ring_block_t *) item, memblock_index);
199171095Ssam
200171095Ssam	if (is_last) {
201173139Srwatson	    /* link last one with first one */
202173139Srwatson	    __hal_ring_rxdblock_link(mempoolh, ring, 0, index);
203171095Ssam	}
204171095Ssam
205171095Ssam	if (index > 0 ) {
206173139Srwatson	     /* link this RxD block with previous one */
207173139Srwatson	    __hal_ring_rxdblock_link(mempoolh, ring, index, index-1);
208171095Ssam	}
209171095Ssam
210171095Ssam	return XGE_HAL_OK;
211171095Ssam}
212171095Ssam
213171095Ssam xge_hal_status_e
214171095Ssam__hal_ring_initial_replenish(xge_hal_channel_t *channel,
215173139Srwatson	             xge_hal_channel_reopen_e reopen)
216171095Ssam{
217173139Srwatson	xge_hal_dtr_h dtr = NULL;
218171095Ssam
219171095Ssam	while (xge_hal_channel_dtr_count(channel) > 0) {
220173139Srwatson	    xge_hal_status_e status;
221171095Ssam
222173139Srwatson	    status = xge_hal_ring_dtr_reserve(channel, &dtr);
223173139Srwatson	    xge_assert(status == XGE_HAL_OK);
224171095Ssam
225173139Srwatson	    if (channel->dtr_init) {
226173139Srwatson	        status = channel->dtr_init(channel,
227173139Srwatson	                                    dtr, channel->reserve_length,
228173139Srwatson	                                    channel->userdata,
229173139Srwatson	                reopen);
230173139Srwatson	        if (status != XGE_HAL_OK) {
231173139Srwatson	            xge_hal_ring_dtr_free(channel, dtr);
232173139Srwatson	            xge_hal_channel_abort(channel,
233173139Srwatson	                XGE_HAL_CHANNEL_OC_NORMAL);
234173139Srwatson	            return status;
235173139Srwatson	        }
236173139Srwatson	    }
237171095Ssam
238173139Srwatson	    xge_hal_ring_dtr_post(channel, dtr);
239171095Ssam	}
240171095Ssam
241171095Ssam	return XGE_HAL_OK;
242171095Ssam}
243171095Ssam
244171095Ssamxge_hal_status_e
245171095Ssam__hal_ring_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
246171095Ssam{
247171095Ssam	xge_hal_status_e status;
248171095Ssam	xge_hal_device_t *hldev;
249171095Ssam	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
250171095Ssam	xge_hal_ring_queue_t *queue;
251171095Ssam
252171095Ssam
253171095Ssam	/* Note: at this point we have channel.devh and channel.pdev
254171095Ssam	 *       pre-set only! */
255171095Ssam
256171095Ssam	hldev = (xge_hal_device_t *)ring->channel.devh;
257171095Ssam	ring->config = &hldev->config.ring;
258171095Ssam	queue = &ring->config->queue[attr->post_qid];
259171095Ssam	ring->indicate_max_pkts = queue->indicate_max_pkts;
260171095Ssam	ring->buffer_mode = queue->buffer_mode;
261171095Ssam
262171095Ssam	xge_assert(queue->configured);
263171095Ssam
264171095Ssam#if defined(XGE_HAL_RX_MULTI_RESERVE)
265171095Ssam	xge_os_spin_lock_init(&ring->channel.reserve_lock, hldev->pdev);
266171095Ssam#elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
267171095Ssam	xge_os_spin_lock_init_irq(&ring->channel.reserve_lock, hldev->irqh);
268171095Ssam#endif
269171095Ssam#if defined(XGE_HAL_RX_MULTI_POST)
270171095Ssam	xge_os_spin_lock_init(&ring->channel.post_lock, hldev->pdev);
271171095Ssam#elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
272171095Ssam	xge_os_spin_lock_init_irq(&ring->channel.post_lock, hldev->irqh);
273171095Ssam#endif
274171095Ssam
275171095Ssam	ring->rxd_size = XGE_HAL_RING_RXD_SIZEOF(queue->buffer_mode);
276171095Ssam	ring->rxd_priv_size =
277173139Srwatson	    sizeof(xge_hal_ring_rxd_priv_t) + attr->per_dtr_space;
278171095Ssam
279171095Ssam	/* how many RxDs can fit into one block. Depends on configured
280171095Ssam	 * buffer_mode. */
281171095Ssam	ring->rxds_per_block = XGE_HAL_RING_RXDS_PER_BLOCK(queue->buffer_mode);
282171095Ssam
283171095Ssam	/* calculate actual RxD block private size */
284171095Ssam	ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
285171095Ssam
286171095Ssam	ring->reserved_rxds_arr = (void **) xge_os_malloc(ring->channel.pdev,
287173139Srwatson	          sizeof(void*) * queue->max * ring->rxds_per_block);
288171095Ssam
289171095Ssam	if (ring->reserved_rxds_arr == NULL) {
290173139Srwatson	    __hal_ring_close(channelh);
291173139Srwatson	    return XGE_HAL_ERR_OUT_OF_MEMORY;
292171095Ssam	}
293171095Ssam
294171095Ssam	ring->mempool = __hal_mempool_create(
295173139Srwatson	                 hldev->pdev,
296173139Srwatson	                 ring->config->memblock_size,
297173139Srwatson	                 XGE_HAL_RING_RXDBLOCK_SIZE,
298173139Srwatson	                 ring->rxdblock_priv_size,
299173139Srwatson	                 queue->initial, queue->max,
300173139Srwatson	                 __hal_ring_mempool_item_alloc,
301173139Srwatson	                 NULL, /* nothing to free */
302173139Srwatson	                 ring);
303171095Ssam	if (ring->mempool == NULL) {
304173139Srwatson	    __hal_ring_close(channelh);
305173139Srwatson	    return XGE_HAL_ERR_OUT_OF_MEMORY;
306171095Ssam	}
307171095Ssam
308171095Ssam	status = __hal_channel_initialize(channelh,
309173139Srwatson	                  attr,
310173139Srwatson	                  ring->reserved_rxds_arr,
311173139Srwatson	                  queue->initial * ring->rxds_per_block,
312173139Srwatson	                  queue->max * ring->rxds_per_block,
313173139Srwatson	                  0 /* no threshold for ring! */);
314171095Ssam	if (status != XGE_HAL_OK) {
315173139Srwatson	    __hal_ring_close(channelh);
316173139Srwatson	    return status;
317171095Ssam	}
318171095Ssam
319171095Ssam	/* sanity check that everything formatted ok */
320171095Ssam	xge_assert(ring->reserved_rxds_arr[0] ==
321173139Srwatson	        (char *)ring->mempool->items_arr[0] +
322173139Srwatson	          (ring->rxds_per_block * ring->rxd_size - ring->rxd_size));
323171095Ssam
324173139Srwatson	    /* Note:
325171095Ssam	 * Specifying dtr_init callback means two things:
326171095Ssam	 * 1) dtrs need to be initialized by ULD at channel-open time;
327171095Ssam	 * 2) dtrs need to be posted at channel-open time
328171095Ssam	 *    (that's what the initial_replenish() below does)
329171095Ssam	 * Currently we don't have a case when the 1) is done without the 2).
330171095Ssam	 */
331171095Ssam	if (ring->channel.dtr_init) {
332173139Srwatson	    if ((status = __hal_ring_initial_replenish (
333173139Srwatson	                    (xge_hal_channel_t *) channelh,
334173139Srwatson	                    XGE_HAL_CHANNEL_OC_NORMAL) )
335173139Srwatson	                    != XGE_HAL_OK) {
336173139Srwatson	        __hal_ring_close(channelh);
337173139Srwatson	        return status;
338173139Srwatson	    }
339171095Ssam	}
340171095Ssam
341171095Ssam	/* initial replenish will increment the counter in its post() routine,
342171095Ssam	 * we have to reset it */
343171095Ssam	ring->channel.usage_cnt = 0;
344171095Ssam
345171095Ssam	return XGE_HAL_OK;
346171095Ssam}
347171095Ssam
348171095Ssamvoid
349171095Ssam__hal_ring_close(xge_hal_channel_h channelh)
350171095Ssam{
351171095Ssam	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
352171095Ssam	xge_hal_ring_queue_t *queue;
353171095Ssam#if defined(XGE_HAL_RX_MULTI_RESERVE)||defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)||\
354173139Srwatson	defined(XGE_HAL_RX_MULTI_POST) || defined(XGE_HAL_RX_MULTI_POST_IRQ)
355171095Ssam	xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh;
356171095Ssam#endif
357171095Ssam
358171095Ssam	xge_assert(ring->channel.pdev);
359171095Ssam
360171095Ssam	queue = &ring->config->queue[ring->channel.post_qid];
361171095Ssam
362171095Ssam	if (ring->mempool) {
363173139Srwatson	    __hal_mempool_destroy(ring->mempool);
364171095Ssam	}
365171095Ssam
366171095Ssam	if (ring->reserved_rxds_arr) {
367173139Srwatson	    xge_os_free(ring->channel.pdev,
368173139Srwatson	              ring->reserved_rxds_arr,
369173139Srwatson	          sizeof(void*) * queue->max * ring->rxds_per_block);
370171095Ssam	}
371171095Ssam
372171095Ssam	__hal_channel_terminate(channelh);
373171095Ssam
374171095Ssam#if defined(XGE_HAL_RX_MULTI_RESERVE)
375171095Ssam	xge_os_spin_lock_destroy(&ring->channel.reserve_lock, hldev->pdev);
376171095Ssam#elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
377171095Ssam	xge_os_spin_lock_destroy_irq(&ring->channel.reserve_lock, hldev->pdev);
378171095Ssam#endif
379171095Ssam#if defined(XGE_HAL_RX_MULTI_POST)
380171095Ssam	xge_os_spin_lock_destroy(&ring->channel.post_lock, hldev->pdev);
381171095Ssam#elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
382171095Ssam	xge_os_spin_lock_destroy_irq(&ring->channel.post_lock, hldev->pdev);
383171095Ssam#endif
384171095Ssam}
385171095Ssam
386171095Ssamvoid
387171095Ssam__hal_ring_prc_enable(xge_hal_channel_h channelh)
388171095Ssam{
389171095Ssam	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
390171095Ssam	xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh;
391171095Ssam	xge_hal_pci_bar0_t *bar0;
392171095Ssam	u64 val64;
393171095Ssam	void *first_block;
394171095Ssam	int block_num;
395171095Ssam	xge_hal_ring_queue_t *queue;
396171095Ssam	pci_dma_h dma_handle;
397171095Ssam
398171095Ssam	xge_assert(ring);
399171095Ssam	xge_assert(ring->channel.pdev);
400171095Ssam	bar0 = (xge_hal_pci_bar0_t *) (void *)
401173139Srwatson	        ((xge_hal_device_t *)ring->channel.devh)->bar0;
402171095Ssam
403171095Ssam	queue = &ring->config->queue[ring->channel.post_qid];
404171095Ssam	xge_assert(queue->buffer_mode == 1 ||
405173139Srwatson	        queue->buffer_mode == 3 ||
406173139Srwatson	        queue->buffer_mode == 5);
407171095Ssam
408171095Ssam	/* last block in fact becomes first. This is just the way it
409171095Ssam	 * is filled up and linked by item_alloc() */
410171095Ssam
411171095Ssam	block_num = queue->initial;
412171095Ssam	first_block = __hal_mempool_item(ring->mempool, block_num - 1);
413171095Ssam	val64 = __hal_ring_item_dma_addr(ring->mempool,
414173139Srwatson	                 first_block, &dma_handle);
415171095Ssam	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
416173139Srwatson	        val64, &bar0->prc_rxd0_n[ring->channel.post_qid]);
417171095Ssam
418171095Ssam	xge_debug_ring(XGE_TRACE, "ring%d PRC DMA addr 0x"XGE_OS_LLXFMT" initialized",
419173139Srwatson	        ring->channel.post_qid, (unsigned long long)val64);
420171095Ssam
421171095Ssam	val64 = xge_os_pio_mem_read64(ring->channel.pdev,
422173139Srwatson	    ring->channel.regh0, &bar0->prc_ctrl_n[ring->channel.post_qid]);
423171095Ssam	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC &&
424171095Ssam	    !queue->rth_en) {
425173139Srwatson	    val64 |= XGE_HAL_PRC_CTRL_RTH_DISABLE;
426171095Ssam	}
427171095Ssam	val64 |= XGE_HAL_PRC_CTRL_RC_ENABLED;
428171095Ssam
429171095Ssam	val64 |= vBIT((queue->buffer_mode >> 1),14,2);/* 1,3 or 5 => 0,1 or 2 */
430171095Ssam	val64 &= ~XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
431171095Ssam	val64 |= XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(
432173139Srwatson	    (hldev->config.pci_freq_mherz * queue->backoff_interval_us));
433171095Ssam
434171095Ssam	/* Beware: no snoop by the bridge if (no_snoop_bits) */
435171095Ssam	val64 |= XGE_HAL_PRC_CTRL_NO_SNOOP(queue->no_snoop_bits);
436171095Ssam
437173139Srwatson	    /* Herc: always use group_reads */
438171095Ssam	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
439173139Srwatson	        val64 |= XGE_HAL_PRC_CTRL_GROUP_READS;
440171095Ssam
441171095Ssam	if (hldev->config.bimodal_interrupts)
442173139Srwatson	    if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
443173139Srwatson	        val64 |= XGE_HAL_PRC_CTRL_BIMODAL_INTERRUPT;
444171095Ssam
445171095Ssam	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
446173139Srwatson	        val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
447171095Ssam
448171095Ssam	/* Configure Receive Protocol Assist */
449171095Ssam	val64 = xge_os_pio_mem_read64(ring->channel.pdev,
450173139Srwatson	        ring->channel.regh0, &bar0->rx_pa_cfg);
451171095Ssam	val64 |= XGE_HAL_RX_PA_CFG_SCATTER_MODE(ring->config->scatter_mode);
452171095Ssam	val64 |= (XGE_HAL_RX_PA_CFG_IGNORE_SNAP_OUI | XGE_HAL_RX_PA_CFG_IGNORE_LLC_CTRL);
453171095Ssam	/* Clean STRIP_VLAN_TAG bit and set as config from upper layer */
454171095Ssam	val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
455171095Ssam	val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(ring->config->strip_vlan_tag);
456171095Ssam
457171095Ssam	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
458173139Srwatson	        val64, &bar0->rx_pa_cfg);
459171095Ssam
460171095Ssam	xge_debug_ring(XGE_TRACE, "ring%d enabled in buffer_mode %d",
461173139Srwatson	        ring->channel.post_qid, queue->buffer_mode);
462171095Ssam}
463171095Ssam
464171095Ssamvoid
465171095Ssam__hal_ring_prc_disable(xge_hal_channel_h channelh)
466171095Ssam{
467171095Ssam	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
468171095Ssam	xge_hal_pci_bar0_t *bar0;
469171095Ssam	u64 val64;
470171095Ssam
471171095Ssam	xge_assert(ring);
472171095Ssam	xge_assert(ring->channel.pdev);
473171095Ssam	bar0 = (xge_hal_pci_bar0_t *) (void *)
474173139Srwatson	        ((xge_hal_device_t *)ring->channel.devh)->bar0;
475171095Ssam
476171095Ssam	val64 = xge_os_pio_mem_read64(ring->channel.pdev,
477171095Ssam	ring->channel.regh0,
478173139Srwatson	              &bar0->prc_ctrl_n[ring->channel.post_qid]);
479171095Ssam	val64 &= ~((u64) XGE_HAL_PRC_CTRL_RC_ENABLED);
480171095Ssam	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
481173139Srwatson	        val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
482171095Ssam}
483171095Ssam
484171095Ssamvoid
485171095Ssam__hal_ring_hw_initialize(xge_hal_device_h devh)
486171095Ssam{
487171095Ssam	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
488171095Ssam	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
489171095Ssam	u64 val64;
490171095Ssam	int i, j;
491171095Ssam
492171095Ssam	/* Rx DMA intialization. */
493171095Ssam
494171095Ssam	val64 = 0;
495171095Ssam	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
496173139Srwatson	    if (!hldev->config.ring.queue[i].configured)
497173139Srwatson	        continue;
498173139Srwatson	    val64 |= vBIT(hldev->config.ring.queue[i].priority,
499173139Srwatson	                        (5 + (i * 8)), 3);
500171095Ssam	}
501171095Ssam	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
502173139Srwatson	        &bar0->rx_queue_priority);
503171095Ssam	xge_debug_ring(XGE_TRACE, "Rings priority configured to 0x"XGE_OS_LLXFMT,
504173139Srwatson	        (unsigned long long)val64);
505171095Ssam
506171095Ssam	/* Configuring ring queues according to per-ring configuration */
507171095Ssam	val64 = 0;
508171095Ssam	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
509173139Srwatson	    if (!hldev->config.ring.queue[i].configured)
510173139Srwatson	        continue;
511173139Srwatson	    val64 |= vBIT(hldev->config.ring.queue[i].dram_size_mb,(i*8),8);
512171095Ssam	}
513171095Ssam	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
514171095Ssam	                     &bar0->rx_queue_cfg);
515171095Ssam	xge_debug_ring(XGE_TRACE, "DRAM configured to 0x"XGE_OS_LLXFMT,
516173139Srwatson	        (unsigned long long)val64);
517171095Ssam
518171095Ssam	if (!hldev->config.rts_qos_en &&
519171095Ssam	    !hldev->config.rts_port_en &&
520171095Ssam	    !hldev->config.rts_mac_en) {
521171095Ssam
522173139Srwatson	    /*
523173139Srwatson	     * Activate default (QoS-based) Rx steering
524173139Srwatson	     */
525171095Ssam
526173139Srwatson	    val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
527173139Srwatson	                      &bar0->rts_qos_steering);
528173139Srwatson	    for (j = 0; j < 8 /* QoS max */; j++)
529173139Srwatson	    {
530173139Srwatson	        for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++)
531173139Srwatson	        {
532173139Srwatson	            if (!hldev->config.ring.queue[i].configured)
533173139Srwatson	                continue;
534173139Srwatson	            if (!hldev->config.ring.queue[i].rth_en)
535173139Srwatson	                val64 |= (BIT(i) >> (j*8));
536173139Srwatson	        }
537173139Srwatson	    }
538173139Srwatson	    xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
539173139Srwatson	                   &bar0->rts_qos_steering);
540173139Srwatson	    xge_debug_ring(XGE_TRACE, "QoS steering configured to 0x"XGE_OS_LLXFMT,
541173139Srwatson	               (unsigned long long)val64);
542171095Ssam
543171095Ssam	}
544171095Ssam
545171095Ssam	/* Note: If a queue does not exist, it should be assigned a maximum
546173139Srwatson	 *   length of zero. Otherwise, packet loss could occur.
547173139Srwatson	 *   P. 4-4 User guide.
548171095Ssam	 *
549171095Ssam	 * All configured rings will be properly set at device open time
550171095Ssam	 * by utilizing device_mtu_set() API call. */
551171095Ssam	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
552173139Srwatson	    if (hldev->config.ring.queue[i].configured)
553173139Srwatson	        continue;
554173139Srwatson	    xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
555173139Srwatson	                         &bar0->rts_frm_len_n[i]);
556171095Ssam	}
557171095Ssam
558171095Ssam#ifdef XGE_HAL_HERC_EMULATION
559171095Ssam	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
560173139Srwatson	    ((u8 *)bar0 + 0x2e60)); /* mc_rldram_mrs_herc */
561171095Ssam	val64 |= 0x0000000000010000;
562171095Ssam	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
563173139Srwatson	    ((u8 *)bar0 + 0x2e60));
564171095Ssam
565171095Ssam	val64 |= 0x003a000000000000;
566171095Ssam	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
567173139Srwatson	    ((u8 *)bar0 + 0x2e40)); /* mc_rldram_ref_herc */
568171095Ssam	xge_os_mdelay(2000);
569171095Ssam#endif
570171095Ssam
571171095Ssam	/* now enabling MC-RLDRAM after setting MC_QUEUE sizes */
572171095Ssam	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
573171095Ssam	                            &bar0->mc_rldram_mrs);
574171095Ssam	val64 |= XGE_HAL_MC_RLDRAM_QUEUE_SIZE_ENABLE |
575173139Srwatson	     XGE_HAL_MC_RLDRAM_MRS_ENABLE;
576171095Ssam	__hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32),
577171095Ssam	                     &bar0->mc_rldram_mrs);
578171095Ssam	xge_os_wmb();
579171095Ssam	__hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)val64,
580171095Ssam	                     &bar0->mc_rldram_mrs);
581171095Ssam
582171095Ssam	/* RLDRAM initialization procedure require 500us to complete */
583171095Ssam	xge_os_mdelay(1);
584171095Ssam
585171095Ssam	/* Temporary fixes for Herc RLDRAM */
586171095Ssam	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
587173139Srwatson	    val64 = XGE_HAL_MC_RLDRAM_SET_REF_PERIOD(0x0279);
588173139Srwatson	    xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
589173139Srwatson	                         &bar0->mc_rldram_ref_per_herc);
590171095Ssam
591173139Srwatson	    val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
592171095Ssam	                            &bar0->mc_rldram_mrs_herc);
593173139Srwatson	    xge_debug_ring(XGE_TRACE, "default mc_rldram_mrs_herc 0x"XGE_OS_LLXFMT,
594173139Srwatson	               (unsigned long long)val64);
595171095Ssam
596173139Srwatson	    val64 = 0x0003570003010300ULL;
597173139Srwatson	    xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
598173139Srwatson	                           &bar0->mc_rldram_mrs_herc);
599171095Ssam
600173139Srwatson	    xge_os_mdelay(1);
601171095Ssam	}
602171095Ssam
603171095Ssam	/*
604171095Ssam	 * Assign MSI-X vectors
605171095Ssam	 */
606171095Ssam	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
607173139Srwatson	    xge_list_t *item;
608173139Srwatson	    xge_hal_channel_t *channel = NULL;
609171095Ssam
610173139Srwatson	    if (!hldev->config.ring.queue[i].configured ||
611173139Srwatson	        !hldev->config.ring.queue[i].intr_vector ||
612173139Srwatson	        !hldev->config.intr_mode != XGE_HAL_INTR_MODE_MSIX)
613173139Srwatson	        continue;
614171095Ssam
615173139Srwatson	    /* find channel */
616173139Srwatson	    xge_list_for_each(item, &hldev->free_channels) {
617173139Srwatson	        xge_hal_channel_t *tmp;
618173139Srwatson	        tmp = xge_container_of(item, xge_hal_channel_t,
619173139Srwatson	                       item);
620173139Srwatson	        if (tmp->type == XGE_HAL_CHANNEL_TYPE_RING &&
621173139Srwatson	            tmp->post_qid == i) {
622173139Srwatson	            channel = tmp;
623173139Srwatson	            break;
624173139Srwatson	        }
625173139Srwatson	    }
626171095Ssam
627173139Srwatson	    if (channel) {
628173139Srwatson	        xge_hal_channel_msix_set(channel,
629173139Srwatson	            hldev->config.ring.queue[i].intr_vector);
630173139Srwatson	    }
631171095Ssam	}
632171095Ssam
633171095Ssam	xge_debug_ring(XGE_TRACE, "%s", "ring channels initialized");
634171095Ssam}
635171095Ssam
636171095Ssamvoid
637171095Ssam__hal_ring_mtu_set(xge_hal_device_h devh, int new_frmlen)
638171095Ssam{
639171095Ssam	int i;
640171095Ssam	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
641171095Ssam	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
642171095Ssam
643171095Ssam	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
644173139Srwatson	    if (!hldev->config.ring.queue[i].configured)
645173139Srwatson	        continue;
646173139Srwatson	    if (hldev->config.ring.queue[i].max_frm_len !=
647173139Srwatson	                    XGE_HAL_RING_USE_MTU) {
648173139Srwatson	        xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
649173139Srwatson	                XGE_HAL_MAC_RTS_FRM_LEN_SET(
650173139Srwatson	            hldev->config.ring.queue[i].max_frm_len),
651173139Srwatson	            &bar0->rts_frm_len_n[i]);
652173139Srwatson	    } else {
653173139Srwatson	        xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
654173139Srwatson	                   XGE_HAL_MAC_RTS_FRM_LEN_SET(new_frmlen),
655173139Srwatson	                   &bar0->rts_frm_len_n[i]);
656173139Srwatson	    }
657171095Ssam	}
658171095Ssam	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
659173139Srwatson	               XGE_HAL_RMAC_MAX_PYLD_LEN(new_frmlen),
660173139Srwatson	                   &bar0->rmac_max_pyld_len);
661171095Ssam}
662