1221167Sgnn/*-
2221167Sgnn * Copyright(c) 2002-2011 Exar Corp.
3221167Sgnn * All rights reserved.
4221167Sgnn *
5221167Sgnn * Redistribution and use in source and binary forms, with or without
6221167Sgnn * modification are permitted provided the following conditions are met:
7221167Sgnn *
8221167Sgnn *    1. Redistributions of source code must retain the above copyright notice,
9221167Sgnn *       this list of conditions and the following disclaimer.
10221167Sgnn *
11221167Sgnn *    2. Redistributions in binary form must reproduce the above copyright
12221167Sgnn *       notice, this list of conditions and the following disclaimer in the
13221167Sgnn *       documentation and/or other materials provided with the distribution.
14221167Sgnn *
15221167Sgnn *    3. Neither the name of the Exar Corporation nor the names of its
16221167Sgnn *       contributors may be used to endorse or promote products derived from
17221167Sgnn *       this software without specific prior written permission.
18221167Sgnn *
19221167Sgnn * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20221167Sgnn * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21221167Sgnn * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22221167Sgnn * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23221167Sgnn * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24221167Sgnn * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25221167Sgnn * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26221167Sgnn * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27221167Sgnn * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28221167Sgnn * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29221167Sgnn * POSSIBILITY OF SUCH DAMAGE.
30221167Sgnn */
31221167Sgnn/*$FreeBSD$*/
32221167Sgnn
33221167Sgnn#include <dev/vxge/vxgehal/vxgehal.h>
34221167Sgnn
35221167Sgnn/*
36221167Sgnn * __hal_ring_block_memblock_idx - Return the memblock index
37221167Sgnn * @block: Virtual address of memory block
38221167Sgnn *
39221167Sgnn * This function returns the index of memory block
40221167Sgnn */
41221167Sgnnstatic inline u32
42221167Sgnn__hal_ring_block_memblock_idx(
43221167Sgnn    vxge_hal_ring_block_t block)
44221167Sgnn{
45221167Sgnn	return (u32)*((u64 *) ((void *)((u8 *) block +
46221167Sgnn	    VXGE_HAL_RING_MEMBLOCK_IDX_OFFSET)));
47221167Sgnn}
48221167Sgnn
49221167Sgnn/*
50221167Sgnn * __hal_ring_block_memblock_idx_set - Sets the memblock index
51221167Sgnn * @block: Virtual address of memory block
52221167Sgnn * @memblock_idx: Index of memory block
53221167Sgnn *
54221167Sgnn * This function sets index to a memory block
55221167Sgnn */
56221167Sgnnstatic inline void
57221167Sgnn__hal_ring_block_memblock_idx_set(
58221167Sgnn    vxge_hal_ring_block_t block,
59221167Sgnn    u32 memblock_idx)
60221167Sgnn{
61221167Sgnn	*((u64 *) ((void *)((u8 *) block +
62221167Sgnn	    VXGE_HAL_RING_MEMBLOCK_IDX_OFFSET))) = memblock_idx;
63221167Sgnn}
64221167Sgnn
65221167Sgnn/*
66221167Sgnn * __hal_ring_block_next_pointer - Returns the dma address of next block
67221167Sgnn * @block: RxD block
68221167Sgnn *
69221167Sgnn * Returns the dma address of next block stored in the RxD block
70221167Sgnn */
71221167Sgnnstatic inline dma_addr_t
72221167Sgnn/* LINTED */
73221167Sgnn__hal_ring_block_next_pointer(
74221167Sgnn    vxge_hal_ring_block_t *block)
75221167Sgnn{
76221167Sgnn	return (dma_addr_t)*((u64 *) ((void *)((u8 *) block +
77221167Sgnn	    VXGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET)));
78221167Sgnn}
79221167Sgnn
80221167Sgnn/*
81221167Sgnn * __hal_ring_block_next_pointer_set - Sets the next block pointer in RxD block
82221167Sgnn * @block: RxD block
83221167Sgnn * @dma_next: dma address of next block
84221167Sgnn *
85221167Sgnn * Sets the next block pointer in RxD block
86221167Sgnn */
87221167Sgnnstatic inline void
88221167Sgnn__hal_ring_block_next_pointer_set(
89221167Sgnn    vxge_hal_ring_block_t *block,
90221167Sgnn    dma_addr_t dma_next)
91221167Sgnn{
92221167Sgnn	*((u64 *) ((void *)((u8 *) block +
93221167Sgnn	    VXGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET))) = dma_next;
94221167Sgnn}
95221167Sgnn
96221167Sgnn/*
97221167Sgnn * __hal_ring_first_block_address_get - Returns the dma address of the
98221167Sgnn *		first block
99221167Sgnn * @ringh: Handle to the ring
100221167Sgnn *
101221167Sgnn * Returns the dma address of the first RxD block
102221167Sgnn */
103221167Sgnnu64
104221167Sgnn__hal_ring_first_block_address_get(
105221167Sgnn    vxge_hal_ring_h ringh)
106221167Sgnn{
107221167Sgnn	__hal_ring_t *ring = (__hal_ring_t *) ringh;
108221167Sgnn	vxge_hal_mempool_dma_t *dma_object;
109221167Sgnn
110221167Sgnn	dma_object = __hal_mempool_memblock_dma(ring->mempool, 0);
111221167Sgnn
112221167Sgnn	vxge_assert(dma_object != NULL);
113221167Sgnn
114221167Sgnn	return (dma_object->addr);
115221167Sgnn}
116221167Sgnn
117221167Sgnn
118221167Sgnn#if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
119221167Sgnn/*
120221167Sgnn * __hal_ring_item_dma_offset - Return the dma offset of an item
121221167Sgnn * @mempoolh: Handle to the memory pool of the ring
122221167Sgnn * @item: Item for which to get the dma offset
123221167Sgnn *
124221167Sgnn * This function returns the dma offset of a given item
125221167Sgnn */
126221167Sgnnstatic ptrdiff_t
127221167Sgnn__hal_ring_item_dma_offset(
128221167Sgnn    vxge_hal_mempool_h mempoolh,
129221167Sgnn    void *item)
130221167Sgnn{
131221167Sgnn	u32 memblock_idx;
132221167Sgnn	void *memblock;
133221167Sgnn	vxge_hal_mempool_t *mempool = (vxge_hal_mempool_t *) mempoolh;
134221167Sgnn	__hal_device_t *hldev;
135221167Sgnn
136221167Sgnn	vxge_assert((mempoolh != NULL) && (item != NULL) &&
137221167Sgnn	    (dma_handle != NULL));
138221167Sgnn
139221167Sgnn	hldev = (__hal_device_t *) mempool->devh;
140221167Sgnn
141221167Sgnn	vxge_hal_trace_log_ring("==> %s:%s:%d",
142221167Sgnn	    __FILE__, __func__, __LINE__);
143221167Sgnn
144221167Sgnn	vxge_hal_trace_log_ring(
145221167Sgnn	    "mempoolh = 0x"VXGE_OS_STXFMT", item = 0x"VXGE_OS_STXFMT,
146221167Sgnn	    (ptr_t) mempoolh, (ptr_t) item);
147221167Sgnn
148221167Sgnn	/* get owner memblock index */
149221167Sgnn	memblock_idx = __hal_ring_block_memblock_idx(item);
150221167Sgnn
151221167Sgnn	/* get owner memblock by memblock index */
152221167Sgnn	memblock = __hal_mempool_memblock(mempoolh, memblock_idx);
153221167Sgnn
154221167Sgnn	vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
155221167Sgnn	    __FILE__, __func__, __LINE__);
156221167Sgnn
157221167Sgnn	return ((u8 *) item - (u8 *) memblock);
158221167Sgnn}
159221167Sgnn#endif
160221167Sgnn
161221167Sgnn/*
162221167Sgnn * __hal_ring_item_dma_addr - Return the dma address of an item
163221167Sgnn * @mempoolh: Handle to the memory pool of the ring
164221167Sgnn * @item: Item for which to get the dma offset
165221167Sgnn * @dma_handle: dma handle
166221167Sgnn *
167221167Sgnn * This function returns the dma address of a given item
168221167Sgnn */
169221167Sgnnstatic dma_addr_t
170221167Sgnn__hal_ring_item_dma_addr(
171221167Sgnn    vxge_hal_mempool_h mempoolh,
172221167Sgnn    void *item,
173221167Sgnn    pci_dma_h *dma_handle)
174221167Sgnn{
175221167Sgnn	u32 memblock_idx;
176221167Sgnn	void *memblock;
177221167Sgnn	vxge_hal_mempool_dma_t *memblock_dma_object;
178221167Sgnn	vxge_hal_mempool_t *mempool = (vxge_hal_mempool_t *) mempoolh;
179221167Sgnn	__hal_device_t *hldev;
180221167Sgnn	ptrdiff_t dma_item_offset;
181221167Sgnn
182221167Sgnn	vxge_assert((mempoolh != NULL) && (item != NULL) &&
183221167Sgnn	    (dma_handle != NULL));
184221167Sgnn
185221167Sgnn	hldev = (__hal_device_t *) mempool->devh;
186221167Sgnn
187221167Sgnn	vxge_hal_trace_log_ring("==> %s:%s:%d",
188221167Sgnn	    __FILE__, __func__, __LINE__);
189221167Sgnn
190221167Sgnn	vxge_hal_trace_log_ring(
191221167Sgnn	    "mempoolh = 0x"VXGE_OS_STXFMT", item = 0x"VXGE_OS_STXFMT", "
192221167Sgnn	    "dma_handle = 0x"VXGE_OS_STXFMT, (ptr_t) mempoolh,
193221167Sgnn	    (ptr_t) item, (ptr_t) dma_handle);
194221167Sgnn
195221167Sgnn	/* get owner memblock index */
196221167Sgnn	memblock_idx = __hal_ring_block_memblock_idx((u8 *) item);
197221167Sgnn
198221167Sgnn	/* get owner memblock by memblock index */
199221167Sgnn	memblock = __hal_mempool_memblock(
200221167Sgnn	    (vxge_hal_mempool_t *) mempoolh, memblock_idx);
201221167Sgnn
202221167Sgnn	/* get memblock DMA object by memblock index */
203221167Sgnn	memblock_dma_object = __hal_mempool_memblock_dma(
204221167Sgnn	    (vxge_hal_mempool_t *) mempoolh, memblock_idx);
205221167Sgnn
206221167Sgnn	/* calculate offset in the memblock of this item */
207221167Sgnn	/* LINTED */
208221167Sgnn	dma_item_offset = (u8 *) item - (u8 *) memblock;
209221167Sgnn
210221167Sgnn	*dma_handle = memblock_dma_object->handle;
211221167Sgnn
212221167Sgnn	vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
213221167Sgnn	    __FILE__, __func__, __LINE__);
214221167Sgnn
215221167Sgnn	return (memblock_dma_object->addr + dma_item_offset);
216221167Sgnn}
217221167Sgnn
218221167Sgnn/*
219221167Sgnn * __hal_ring_rxdblock_link - Link the RxD blocks
220221167Sgnn * @mempoolh: Handle to the memory pool of the ring
221221167Sgnn * @ring: ring
222221167Sgnn * @from: RxD block from which to link
223221167Sgnn * @to: RxD block to which to link to
224221167Sgnn *
225221167Sgnn * This function returns the dma address of a given item
226221167Sgnn */
227221167Sgnnstatic void
228221167Sgnn__hal_ring_rxdblock_link(
229221167Sgnn    vxge_hal_mempool_h mempoolh,
230221167Sgnn    __hal_ring_t *ring,
231221167Sgnn    u32 from,
232221167Sgnn    u32 to)
233221167Sgnn{
234221167Sgnn	vxge_hal_ring_block_t *to_item, *from_item;
235221167Sgnn	dma_addr_t to_dma, from_dma;
236221167Sgnn	pci_dma_h to_dma_handle, from_dma_handle;
237221167Sgnn	__hal_device_t *hldev;
238221167Sgnn
239221167Sgnn	vxge_assert((mempoolh != NULL) && (ring != NULL));
240221167Sgnn
241221167Sgnn	hldev = (__hal_device_t *) ring->channel.devh;
242221167Sgnn
243221167Sgnn	vxge_hal_trace_log_ring("==> %s:%s:%d",
244221167Sgnn	    __FILE__, __func__, __LINE__);
245221167Sgnn
246221167Sgnn	vxge_hal_trace_log_ring(
247221167Sgnn	    "mempoolh = 0x"VXGE_OS_STXFMT", ring = 0x"VXGE_OS_STXFMT", "
248221167Sgnn	    "from = %d, to = %d", (ptr_t) mempoolh, (ptr_t) ring, from, to);
249221167Sgnn
250221167Sgnn	/* get "from" RxD block */
251221167Sgnn	from_item = (vxge_hal_ring_block_t *) __hal_mempool_item(
252221167Sgnn	    (vxge_hal_mempool_t *) mempoolh, from);
253221167Sgnn	vxge_assert(from_item);
254221167Sgnn
255221167Sgnn	/* get "to" RxD block */
256221167Sgnn	to_item = (vxge_hal_ring_block_t *) __hal_mempool_item(
257221167Sgnn	    (vxge_hal_mempool_t *) mempoolh, to);
258221167Sgnn	vxge_assert(to_item);
259221167Sgnn
260221167Sgnn	/* return address of the beginning of previous RxD block */
261221167Sgnn	to_dma = __hal_ring_item_dma_addr(mempoolh, to_item, &to_dma_handle);
262221167Sgnn
263221167Sgnn	/*
264221167Sgnn	 * set next pointer for this RxD block to point on
265221167Sgnn	 * previous item's DMA start address
266221167Sgnn	 */
267221167Sgnn	__hal_ring_block_next_pointer_set(from_item, to_dma);
268221167Sgnn
269221167Sgnn	/* return "from" RxD block's DMA start address */
270221167Sgnn	from_dma = __hal_ring_item_dma_addr(
271221167Sgnn	    mempoolh, from_item, &from_dma_handle);
272221167Sgnn
273221167Sgnn#if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
274221167Sgnn	/* we must sync "from" RxD block, so hardware will see it */
275221167Sgnn	vxge_os_dma_sync(ring->channel.pdev,
276221167Sgnn	    from_dma_handle,
277221167Sgnn	    from_dma + VXGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
278221167Sgnn	    __hal_ring_item_dma_offset(mempoolh, from_item) +
279221167Sgnn	    VXGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
280221167Sgnn	    sizeof(u64),
281221167Sgnn	    VXGE_OS_DMA_DIR_TODEVICE);
282221167Sgnn#endif
283221167Sgnn
284221167Sgnn	vxge_hal_info_log_ring(
285221167Sgnn	    "block%d:0x"VXGE_OS_STXFMT" => block%d:0x"VXGE_OS_STXFMT,
286221167Sgnn	    from, (ptr_t) from_dma, to, (ptr_t) to_dma);
287221167Sgnn
288221167Sgnn	vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
289221167Sgnn	    __FILE__, __func__, __LINE__);
290221167Sgnn
291221167Sgnn}
292221167Sgnn
293221167Sgnn/*
294221167Sgnn * __hal_ring_mempool_item_alloc - Allocate List blocks for RxD block callback
295221167Sgnn * @mempoolh: Handle to memory pool
296221167Sgnn * @memblock: Address of this memory block
297221167Sgnn * @memblock_index: Index of this memory block
298221167Sgnn * @dma_object: dma object for this block
299221167Sgnn * @item: Pointer to this item
300221167Sgnn * @index: Index of this item in memory block
301221167Sgnn * @is_last: If this is last item in the block
302221167Sgnn * @userdata: Specific data of user
303221167Sgnn *
304221167Sgnn * This function is callback passed to __hal_mempool_create to create memory
305221167Sgnn * pool for RxD block
306221167Sgnn */
307221167Sgnnstatic vxge_hal_status_e
308221167Sgnn__hal_ring_mempool_item_alloc(
309221167Sgnn    vxge_hal_mempool_h mempoolh,
310221167Sgnn    void *memblock,
311221167Sgnn    u32 memblock_index,
312221167Sgnn    vxge_hal_mempool_dma_t *dma_object,
313221167Sgnn    void *item,
314221167Sgnn    u32 item_index,
315221167Sgnn    u32 is_last,
316221167Sgnn    void *userdata)
317221167Sgnn{
318221167Sgnn	u32 i;
319221167Sgnn	__hal_ring_t *ring = (__hal_ring_t *) userdata;
320221167Sgnn	__hal_device_t *hldev;
321221167Sgnn
322221167Sgnn	vxge_assert((item != NULL) && (ring != NULL));
323221167Sgnn
324221167Sgnn	hldev = (__hal_device_t *) ring->channel.devh;
325221167Sgnn
326221167Sgnn	vxge_hal_trace_log_pool("==> %s:%s:%d",
327221167Sgnn	    __FILE__, __func__, __LINE__);
328221167Sgnn
329221167Sgnn	vxge_hal_trace_log_pool(
330221167Sgnn	    "mempoolh = 0x"VXGE_OS_STXFMT", memblock = 0x"VXGE_OS_STXFMT", "
331221167Sgnn	    "memblock_index = %d, dma_object = 0x"VXGE_OS_STXFMT", "
332221167Sgnn	    "item = 0x"VXGE_OS_STXFMT", item_index = %d, is_last = %d, "
333221167Sgnn	    "userdata = 0x"VXGE_OS_STXFMT, (ptr_t) mempoolh, (ptr_t) memblock,
334221167Sgnn	    memblock_index, (ptr_t) dma_object, (ptr_t) item, item_index, is_last,
335221167Sgnn	    (ptr_t) userdata);
336221167Sgnn
337221167Sgnn	/* format rxds array */
338221167Sgnn	for (i = 0; i < ring->rxds_per_block; i++) {
339221167Sgnn
340221167Sgnn		void *uld_priv;
341221167Sgnn		void *rxdblock_priv;
342221167Sgnn		__hal_ring_rxd_priv_t *rxd_priv;
343221167Sgnn		vxge_hal_ring_rxd_1_t *rxdp;
344221167Sgnn		u32 memblock_item_idx;
345221167Sgnn		u32 dtr_index = item_index * ring->rxds_per_block + i;
346221167Sgnn
347221167Sgnn		ring->channel.dtr_arr[dtr_index].dtr =
348221167Sgnn		    ((u8 *) item) + i * ring->rxd_size;
349221167Sgnn
350221167Sgnn		/*
351221167Sgnn		 * Note: memblock_item_idx is index of the item within
352221167Sgnn		 * the memblock. For instance, in case of three RxD-blocks
353221167Sgnn		 * per memblock this value can be 0, 1 or 2.
354221167Sgnn		 */
355221167Sgnn		rxdblock_priv = __hal_mempool_item_priv(
356221167Sgnn		    (vxge_hal_mempool_t *) mempoolh,
357221167Sgnn		    memblock_index,
358221167Sgnn		    item,
359221167Sgnn		    &memblock_item_idx);
360221167Sgnn
361221167Sgnn		rxdp = (vxge_hal_ring_rxd_1_t *)
362221167Sgnn		    ring->channel.dtr_arr[dtr_index].dtr;
363221167Sgnn
364221167Sgnn		uld_priv = ((u8 *) rxdblock_priv + ring->rxd_priv_size * i);
365221167Sgnn		rxd_priv =
366221167Sgnn		    (__hal_ring_rxd_priv_t *) ((void *)(((char *) uld_priv) +
367221167Sgnn		    ring->per_rxd_space));
368221167Sgnn
369221167Sgnn		((vxge_hal_ring_rxd_5_t *) rxdp)->host_control = dtr_index;
370221167Sgnn
371221167Sgnn		ring->channel.dtr_arr[dtr_index].uld_priv = (void *)uld_priv;
372221167Sgnn		ring->channel.dtr_arr[dtr_index].hal_priv = (void *)rxd_priv;
373221167Sgnn
374221167Sgnn		/* pre-format per-RxD Ring's private */
375221167Sgnn		/* LINTED */
376221167Sgnn		rxd_priv->dma_offset = (u8 *) rxdp - (u8 *) memblock;
377221167Sgnn		rxd_priv->dma_addr = dma_object->addr + rxd_priv->dma_offset;
378221167Sgnn		rxd_priv->dma_handle = dma_object->handle;
379221167Sgnn#if defined(VXGE_DEBUG_ASSERT)
380221167Sgnn		rxd_priv->dma_object = dma_object;
381221167Sgnn#endif
382221167Sgnn		rxd_priv->db_bytes = ring->rxd_size;
383221167Sgnn
384221167Sgnn		if (i == (ring->rxds_per_block - 1)) {
385221167Sgnn			rxd_priv->db_bytes +=
386221167Sgnn			    (((vxge_hal_mempool_t *) mempoolh)->memblock_size -
387221167Sgnn			    (ring->rxds_per_block * ring->rxd_size));
388221167Sgnn		}
389221167Sgnn	}
390221167Sgnn
391221167Sgnn	__hal_ring_block_memblock_idx_set((u8 *) item, memblock_index);
392221167Sgnn	if (is_last) {
393221167Sgnn		/* link last one with first one */
394221167Sgnn		__hal_ring_rxdblock_link(mempoolh, ring, item_index, 0);
395221167Sgnn	}
396221167Sgnn
397221167Sgnn	if (item_index > 0) {
398221167Sgnn		/* link this RxD block with previous one */
399221167Sgnn		__hal_ring_rxdblock_link(mempoolh, ring, item_index - 1, item_index);
400221167Sgnn	}
401221167Sgnn
402221167Sgnn	vxge_hal_trace_log_pool("<== %s:%s:%d  Result: 0",
403221167Sgnn	    __FILE__, __func__, __LINE__);
404221167Sgnn
405221167Sgnn	return (VXGE_HAL_OK);
406221167Sgnn}
407221167Sgnn
408221167Sgnn/*
409221167Sgnn * __hal_ring_mempool_item_free - Free RxD blockt callback
410221167Sgnn * @mempoolh: Handle to memory pool
411221167Sgnn * @memblock: Address of this memory block
412221167Sgnn * @memblock_index: Index of this memory block
413221167Sgnn * @dma_object: dma object for this block
414221167Sgnn * @item: Pointer to this item
415221167Sgnn * @index: Index of this item in memory block
416221167Sgnn * @is_last: If this is last item in the block
417221167Sgnn * @userdata: Specific data of user
418221167Sgnn *
419221167Sgnn * This function is callback passed to __hal_mempool_free to destroy memory
420221167Sgnn * pool for RxD block
421221167Sgnn */
422221167Sgnnstatic vxge_hal_status_e
423221167Sgnn__hal_ring_mempool_item_free(
424221167Sgnn    vxge_hal_mempool_h mempoolh,
425221167Sgnn    void *memblock,
426221167Sgnn    u32 memblock_index,
427221167Sgnn    vxge_hal_mempool_dma_t *dma_object,
428221167Sgnn    void *item,
429221167Sgnn    u32 item_index,
430221167Sgnn    u32 is_last,
431221167Sgnn    void *userdata)
432221167Sgnn{
433221167Sgnn	__hal_ring_t *ring = (__hal_ring_t *) userdata;
434221167Sgnn	__hal_device_t *hldev;
435221167Sgnn
436221167Sgnn	vxge_assert((item != NULL) && (ring != NULL));
437221167Sgnn
438221167Sgnn	hldev = (__hal_device_t *) ring->channel.devh;
439221167Sgnn
440221167Sgnn	vxge_hal_trace_log_pool("==> %s:%s:%d",
441221167Sgnn	    __FILE__, __func__, __LINE__);
442221167Sgnn
443221167Sgnn	vxge_hal_trace_log_pool(
444221167Sgnn	    "mempoolh = 0x"VXGE_OS_STXFMT", memblock = 0x"VXGE_OS_STXFMT", "
445221167Sgnn	    "memblock_index = %d, dma_object = 0x"VXGE_OS_STXFMT", "
446221167Sgnn	    "item = 0x"VXGE_OS_STXFMT", item_index = %d, is_last = %d, "
447221167Sgnn	    "userdata = 0x"VXGE_OS_STXFMT, (ptr_t) mempoolh, (ptr_t) memblock,
448221167Sgnn	    memblock_index, (ptr_t) dma_object, (ptr_t) item, item_index, is_last,
449221167Sgnn	    (ptr_t) userdata);
450221167Sgnn
451221167Sgnn	vxge_hal_trace_log_pool("<== %s:%s:%d  Result: 0",
452221167Sgnn	    __FILE__, __func__, __LINE__);
453221167Sgnn
454221167Sgnn	return (VXGE_HAL_OK);
455221167Sgnn}
456221167Sgnn
457221167Sgnn/*
458221167Sgnn * __hal_ring_initial_replenish - Initial replenish of RxDs
459221167Sgnn * @ring: ring
460221167Sgnn * @reopen: Flag to denote if it is open or repopen
461221167Sgnn *
462221167Sgnn * This function replenishes the RxDs from reserve array to work array
463221167Sgnn */
464221167Sgnnstatic vxge_hal_status_e
465221167Sgnn__hal_ring_initial_replenish(
466221167Sgnn    __hal_ring_t *ring,
467221167Sgnn    vxge_hal_reopen_e reopen)
468221167Sgnn{
469221167Sgnn	vxge_hal_rxd_h rxd;
470221167Sgnn	void *uld_priv;
471221167Sgnn	__hal_device_t *hldev;
472221167Sgnn	vxge_hal_status_e status;
473221167Sgnn
474221167Sgnn	vxge_assert(ring != NULL);
475221167Sgnn
476221167Sgnn	hldev = (__hal_device_t *) ring->channel.devh;
477221167Sgnn
478221167Sgnn	vxge_hal_trace_log_ring("==> %s:%s:%d",
479221167Sgnn	    __FILE__, __func__, __LINE__);
480221167Sgnn
481221167Sgnn	vxge_hal_trace_log_ring("ring = 0x"VXGE_OS_STXFMT", reopen = %d",
482221167Sgnn	    (ptr_t) ring, reopen);
483221167Sgnn
484221167Sgnn	while (vxge_hal_ring_rxd_reserve(ring->channel.vph, &rxd, &uld_priv) ==
485221167Sgnn	    VXGE_HAL_OK) {
486221167Sgnn
487221167Sgnn		if (ring->rxd_init) {
488221167Sgnn			status = ring->rxd_init(ring->channel.vph,
489221167Sgnn			    rxd,
490221167Sgnn			    uld_priv,
491221167Sgnn			    VXGE_HAL_RING_RXD_INDEX(rxd),
492221167Sgnn			    ring->channel.userdata,
493221167Sgnn			    reopen);
494221167Sgnn			if (status != VXGE_HAL_OK) {
495221167Sgnn				vxge_hal_ring_rxd_free(ring->channel.vph, rxd);
496221167Sgnn				vxge_hal_trace_log_ring("<== %s:%s:%d \
497221167Sgnn				    Result: %d",
498221167Sgnn				    __FILE__, __func__, __LINE__, status);
499221167Sgnn				return (status);
500221167Sgnn			}
501221167Sgnn		}
502221167Sgnn
503221167Sgnn		vxge_hal_ring_rxd_post(ring->channel.vph, rxd);
504221167Sgnn	}
505221167Sgnn
506221167Sgnn	vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0",
507221167Sgnn	    __FILE__, __func__, __LINE__);
508221167Sgnn	return (VXGE_HAL_OK);
509221167Sgnn}
510221167Sgnn
511221167Sgnn/*
512221167Sgnn * __hal_ring_create - Create a Ring
513221167Sgnn * @vpath_handle: Handle returned by virtual path open
514221167Sgnn * @attr: Ring configuration parameters structure
515221167Sgnn *
516221167Sgnn * This function creates Ring and initializes it.
517221167Sgnn *
518221167Sgnn */
519221167Sgnnvxge_hal_status_e
520221167Sgnn__hal_ring_create(
521221167Sgnn    vxge_hal_vpath_h vpath_handle,
522221167Sgnn    vxge_hal_ring_attr_t *attr)
523221167Sgnn{
524221167Sgnn	vxge_hal_status_e status;
525221167Sgnn	__hal_ring_t *ring;
526221167Sgnn	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
527221167Sgnn	vxge_hal_ring_config_t *config;
528221167Sgnn	__hal_device_t *hldev;
529221167Sgnn
530221167Sgnn	vxge_assert((vpath_handle != NULL) && (attr != NULL));
531221167Sgnn
532221167Sgnn	hldev = (__hal_device_t *) vp->vpath->hldev;
533221167Sgnn
534221167Sgnn	vxge_hal_trace_log_ring("==> %s:%s:%d",
535221167Sgnn	    __FILE__, __func__, __LINE__);
536221167Sgnn
537221167Sgnn	vxge_hal_trace_log_ring(
538221167Sgnn	    "vpath_handle = 0x"VXGE_OS_STXFMT", attr = 0x"VXGE_OS_STXFMT,
539221167Sgnn	    (ptr_t) vpath_handle, (ptr_t) attr);
540221167Sgnn
541221167Sgnn	if ((vpath_handle == NULL) || (attr == NULL)) {
542221167Sgnn		vxge_hal_err_log_ring("null pointer passed == > %s : %d",
543221167Sgnn		    __func__, __LINE__);
544221167Sgnn		vxge_hal_trace_log_ring("<== %s:%s:%d  Result:1",
545221167Sgnn		    __FILE__, __func__, __LINE__);
546221167Sgnn		return (VXGE_HAL_FAIL);
547221167Sgnn	}
548221167Sgnn
549221167Sgnn	config =
550221167Sgnn	    &vp->vpath->hldev->header.config.vp_config[vp->vpath->vp_id].ring;
551221167Sgnn
552221167Sgnn	config->ring_length = ((config->ring_length +
553221167Sgnn	    vxge_hal_ring_rxds_per_block_get(config->buffer_mode) - 1) /
554221167Sgnn	    vxge_hal_ring_rxds_per_block_get(config->buffer_mode)) *
555221167Sgnn	    vxge_hal_ring_rxds_per_block_get(config->buffer_mode);
556221167Sgnn
557221167Sgnn	ring = (__hal_ring_t *) vxge_hal_channel_allocate(
558221167Sgnn	    (vxge_hal_device_h) vp->vpath->hldev,
559221167Sgnn	    vpath_handle,
560221167Sgnn	    VXGE_HAL_CHANNEL_TYPE_RING,
561221167Sgnn	    config->ring_length,
562221167Sgnn	    attr->per_rxd_space,
563221167Sgnn	    attr->userdata);
564221167Sgnn
565221167Sgnn	if (ring == NULL) {
566221167Sgnn		vxge_hal_err_log_ring("Memory allocation failed == > %s : %d",
567221167Sgnn		    __func__, __LINE__);
568221167Sgnn		vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
569221167Sgnn		    __FILE__, __func__, __LINE__,
570221167Sgnn		    VXGE_HAL_ERR_OUT_OF_MEMORY);
571221167Sgnn		return (VXGE_HAL_ERR_OUT_OF_MEMORY);
572221167Sgnn	}
573221167Sgnn
574221167Sgnn	vp->vpath->ringh = (vxge_hal_ring_h) ring;
575221167Sgnn
576221167Sgnn	ring->stats = &vp->vpath->sw_stats->ring_stats;
577221167Sgnn
578221167Sgnn	ring->config = config;
579221167Sgnn	ring->callback = attr->callback;
580221167Sgnn	ring->rxd_init = attr->rxd_init;
581221167Sgnn	ring->rxd_term = attr->rxd_term;
582221167Sgnn
583221167Sgnn	ring->indicate_max_pkts = config->indicate_max_pkts;
584221167Sgnn	ring->buffer_mode = config->buffer_mode;
585221167Sgnn
586221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST)
587221167Sgnn	vxge_os_spin_lock_init(&ring->channel.post_lock, hldev->pdev);
588221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
589221167Sgnn	vxge_os_spin_lock_init_irq(&ring->channel.post_lock, hldev->irqh);
590221167Sgnn#endif
591221167Sgnn
592221167Sgnn	ring->rxd_size = vxge_hal_ring_rxd_size_get(config->buffer_mode);
593221167Sgnn	ring->rxd_priv_size =
594221167Sgnn	    sizeof(__hal_ring_rxd_priv_t) + attr->per_rxd_space;
595221167Sgnn	ring->per_rxd_space = attr->per_rxd_space;
596221167Sgnn
597221167Sgnn	ring->rxd_priv_size =
598221167Sgnn	    ((ring->rxd_priv_size + __vxge_os_cacheline_size - 1) /
599221167Sgnn	    __vxge_os_cacheline_size) * __vxge_os_cacheline_size;
600221167Sgnn
601221167Sgnn	/*
602221167Sgnn	 * how many RxDs can fit into one block. Depends on configured
603221167Sgnn	 * buffer_mode.
604221167Sgnn	 */
605221167Sgnn	ring->rxds_per_block =
606221167Sgnn	    vxge_hal_ring_rxds_per_block_get(config->buffer_mode);
607221167Sgnn
608221167Sgnn	/* calculate actual RxD block private size */
609221167Sgnn	ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
610221167Sgnn
611221167Sgnn	ring->rxd_mem_avail =
612221167Sgnn	    ((__hal_vpath_handle_t *) ring->channel.vph)->vpath->rxd_mem_size;
613221167Sgnn
614221167Sgnn	ring->db_byte_count = 0;
615221167Sgnn
616221167Sgnn	ring->mempool = vxge_hal_mempool_create(
617221167Sgnn	    (vxge_hal_device_h) vp->vpath->hldev,
618221167Sgnn	    VXGE_OS_HOST_PAGE_SIZE,
619221167Sgnn	    VXGE_OS_HOST_PAGE_SIZE,
620221167Sgnn	    ring->rxdblock_priv_size,
621221167Sgnn	    ring->config->ring_length / ring->rxds_per_block,
622221167Sgnn	    ring->config->ring_length / ring->rxds_per_block,
623221167Sgnn	    __hal_ring_mempool_item_alloc,
624221167Sgnn	    __hal_ring_mempool_item_free,
625221167Sgnn	    ring);
626221167Sgnn
627221167Sgnn	if (ring->mempool == NULL) {
628221167Sgnn		__hal_ring_delete(vpath_handle);
629221167Sgnn		vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
630221167Sgnn		    __FILE__, __func__, __LINE__, VXGE_HAL_ERR_OUT_OF_MEMORY);
631221167Sgnn		return (VXGE_HAL_ERR_OUT_OF_MEMORY);
632221167Sgnn	}
633221167Sgnn
634221167Sgnn	status = vxge_hal_channel_initialize(&ring->channel);
635221167Sgnn	if (status != VXGE_HAL_OK) {
636221167Sgnn		__hal_ring_delete(vpath_handle);
637221167Sgnn		vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
638221167Sgnn		    __FILE__, __func__, __LINE__, status);
639221167Sgnn		return (status);
640221167Sgnn	}
641221167Sgnn
642221167Sgnn
643221167Sgnn	/*
644221167Sgnn	 * Note:
645221167Sgnn	 * Specifying rxd_init callback means two things:
646221167Sgnn	 * 1) rxds need to be initialized by ULD at channel-open time;
647221167Sgnn	 * 2) rxds need to be posted at channel-open time
648221167Sgnn	 *	(that's what the initial_replenish() below does)
649221167Sgnn	 * Currently we don't have a case when the 1) is done without the 2).
650221167Sgnn	 */
651221167Sgnn	if (ring->rxd_init) {
652221167Sgnn		if ((status = __hal_ring_initial_replenish(
653221167Sgnn		    ring,
654221167Sgnn		    VXGE_HAL_OPEN_NORMAL))
655221167Sgnn		    != VXGE_HAL_OK) {
656221167Sgnn			__hal_ring_delete(vpath_handle);
657221167Sgnn			vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
658221167Sgnn			    __FILE__, __func__, __LINE__, status);
659221167Sgnn			return (status);
660221167Sgnn		}
661221167Sgnn	}
662221167Sgnn
663221167Sgnn	/*
664221167Sgnn	 * initial replenish will increment the counter in its post() routine,
665221167Sgnn	 * we have to reset it
666221167Sgnn	 */
667221167Sgnn	ring->stats->common_stats.usage_cnt = 0;
668221167Sgnn
669221167Sgnn	vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
670221167Sgnn	    __FILE__, __func__, __LINE__);
671221167Sgnn	return (VXGE_HAL_OK);
672221167Sgnn}
673221167Sgnn
674221167Sgnn/*
675221167Sgnn * __hal_ring_abort - Returns the RxD
676221167Sgnn * @ringh: Ring to be reset
677221167Sgnn * @reopen: See  vxge_hal_reopen_e {}.
678221167Sgnn *
679221167Sgnn * This function terminates the RxDs of ring
680221167Sgnn */
681221167Sgnnvoid
682221167Sgnn__hal_ring_abort(
683221167Sgnn    vxge_hal_ring_h ringh,
684221167Sgnn    vxge_hal_reopen_e reopen)
685221167Sgnn{
686221167Sgnn	u32 i = 0;
687221167Sgnn	vxge_hal_rxd_h rxdh;
688221167Sgnn
689221167Sgnn	__hal_device_t *hldev;
690221167Sgnn	__hal_ring_t *ring = (__hal_ring_t *) ringh;
691221167Sgnn
692221167Sgnn	vxge_assert(ringh != NULL);
693221167Sgnn
694221167Sgnn	hldev = (__hal_device_t *) ring->channel.devh;
695221167Sgnn
696221167Sgnn	vxge_hal_trace_log_ring("==> %s:%s:%d",
697221167Sgnn	    __FILE__, __func__, __LINE__);
698221167Sgnn
699221167Sgnn	vxge_hal_trace_log_ring("ring = 0x"VXGE_OS_STXFMT", reopen = %d",
700221167Sgnn	    (ptr_t) ringh, reopen);
701221167Sgnn
702221167Sgnn	if (ring->rxd_term) {
703221167Sgnn		__hal_channel_for_each_dtr(&ring->channel, rxdh, i) {
704221167Sgnn			if (!__hal_channel_is_posted_dtr(&ring->channel, i)) {
705221167Sgnn				ring->rxd_term(ring->channel.vph, rxdh,
706221167Sgnn				    VXGE_HAL_RING_ULD_PRIV(ring, rxdh),
707221167Sgnn				    VXGE_HAL_RXD_STATE_FREED,
708221167Sgnn				    ring->channel.userdata,
709221167Sgnn				    reopen);
710221167Sgnn			}
711221167Sgnn		}
712221167Sgnn	}
713221167Sgnn
714221167Sgnn	for (;;) {
715221167Sgnn		__hal_channel_dtr_try_complete(&ring->channel, &rxdh);
716221167Sgnn		if (rxdh == NULL)
717221167Sgnn			break;
718221167Sgnn
719221167Sgnn		__hal_channel_dtr_complete(&ring->channel);
720221167Sgnn		if (ring->rxd_term) {
721221167Sgnn			ring->rxd_term(ring->channel.vph, rxdh,
722221167Sgnn			    VXGE_HAL_RING_ULD_PRIV(ring, rxdh),
723221167Sgnn			    VXGE_HAL_RXD_STATE_POSTED,
724221167Sgnn			    ring->channel.userdata,
725221167Sgnn			    reopen);
726221167Sgnn		}
727221167Sgnn		__hal_channel_dtr_free(&ring->channel,
728221167Sgnn		    VXGE_HAL_RING_RXD_INDEX(rxdh));
729221167Sgnn	}
730221167Sgnn
731221167Sgnn	vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
732221167Sgnn	    __FILE__, __func__, __LINE__);
733221167Sgnn}
734221167Sgnn
735221167Sgnn/*
736221167Sgnn * __hal_ring_reset - Resets the ring
737221167Sgnn * @ringh: Ring to be reset
738221167Sgnn *
739221167Sgnn * This function resets the ring during vpath reset operation
740221167Sgnn */
741221167Sgnnvxge_hal_status_e
742221167Sgnn__hal_ring_reset(
743221167Sgnn    vxge_hal_ring_h ringh)
744221167Sgnn{
745221167Sgnn	__hal_ring_t *ring = (__hal_ring_t *) ringh;
746221167Sgnn	__hal_device_t *hldev;
747221167Sgnn	vxge_hal_status_e status;
748221167Sgnn	__hal_vpath_handle_t *vph = (__hal_vpath_handle_t *) ring->channel.vph;
749221167Sgnn
750221167Sgnn	vxge_assert(ringh != NULL);
751221167Sgnn
752221167Sgnn	hldev = (__hal_device_t *) ring->channel.devh;
753221167Sgnn
754221167Sgnn	vxge_hal_trace_log_ring("==> %s:%s:%d",
755221167Sgnn	    __FILE__, __func__, __LINE__);
756221167Sgnn
757221167Sgnn	vxge_hal_trace_log_ring("ring = 0x"VXGE_OS_STXFMT,
758221167Sgnn	    (ptr_t) ringh);
759221167Sgnn
760221167Sgnn	__hal_ring_abort(ringh, VXGE_HAL_RESET_ONLY);
761221167Sgnn
762221167Sgnn	status = __hal_channel_reset(&ring->channel);
763221167Sgnn
764221167Sgnn	if (status != VXGE_HAL_OK) {
765221167Sgnn
766221167Sgnn		vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
767221167Sgnn		    __FILE__, __func__, __LINE__, status);
768221167Sgnn		return (status);
769221167Sgnn
770221167Sgnn	}
771221167Sgnn	ring->rxd_mem_avail = vph->vpath->rxd_mem_size;
772221167Sgnn	ring->db_byte_count = 0;
773221167Sgnn
774221167Sgnn
775221167Sgnn	if (ring->rxd_init) {
776221167Sgnn		if ((status = __hal_ring_initial_replenish(
777221167Sgnn		    ring,
778221167Sgnn		    VXGE_HAL_RESET_ONLY))
779221167Sgnn		    != VXGE_HAL_OK) {
780221167Sgnn			vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
781221167Sgnn			    __FILE__, __func__, __LINE__, status);
782221167Sgnn			return (status);
783221167Sgnn		}
784221167Sgnn	}
785221167Sgnn
786221167Sgnn	vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
787221167Sgnn	    __FILE__, __func__, __LINE__);
788221167Sgnn
789221167Sgnn	return (VXGE_HAL_OK);
790221167Sgnn}
791221167Sgnn
792221167Sgnn/*
793221167Sgnn * __hal_ring_delete - Removes the ring
794221167Sgnn * @vpath_handle: Virtual path handle to which this queue belongs
795221167Sgnn *
796221167Sgnn * This function freeup the memory pool and removes the ring
797221167Sgnn */
798221167Sgnnvoid
799221167Sgnn__hal_ring_delete(
800221167Sgnn    vxge_hal_vpath_h vpath_handle)
801221167Sgnn{
802221167Sgnn	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
803221167Sgnn	__hal_device_t *hldev;
804221167Sgnn	__hal_ring_t *ring;
805221167Sgnn
806221167Sgnn	vxge_assert(vpath_handle != NULL);
807221167Sgnn
808221167Sgnn	hldev = (__hal_device_t *) vp->vpath->hldev;
809221167Sgnn
810221167Sgnn	vxge_hal_trace_log_ring("==> %s:%s:%d",
811221167Sgnn	    __FILE__, __func__, __LINE__);
812221167Sgnn
813221167Sgnn	vxge_hal_trace_log_ring("vpath_handle = 0x"VXGE_OS_STXFMT,
814221167Sgnn	    (ptr_t) vpath_handle);
815221167Sgnn
816221167Sgnn	ring = (__hal_ring_t *) vp->vpath->ringh;
817221167Sgnn
818221167Sgnn	vxge_assert(ring != NULL);
819221167Sgnn
820221167Sgnn	vxge_assert(ring->channel.pdev);
821221167Sgnn
822221167Sgnn	__hal_ring_abort(vp->vpath->ringh, VXGE_HAL_OPEN_NORMAL);
823221167Sgnn
824221167Sgnn
825221167Sgnn	if (ring->mempool) {
826221167Sgnn		vxge_hal_mempool_destroy(ring->mempool);
827221167Sgnn	}
828221167Sgnn
829221167Sgnn	vxge_hal_channel_terminate(&ring->channel);
830221167Sgnn
831221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST)
832221167Sgnn	vxge_os_spin_lock_destroy(&ring->channel.post_lock, hldev->pdev);
833221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
834221167Sgnn	vxge_os_spin_lock_destroy_irq(&ring->channel.post_lock, hldev->pdev);
835221167Sgnn#endif
836221167Sgnn
837221167Sgnn	vxge_hal_channel_free(&ring->channel);
838221167Sgnn
839221167Sgnn	vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
840221167Sgnn	    __FILE__, __func__, __LINE__);
841221167Sgnn
842221167Sgnn}
843221167Sgnn
844221167Sgnn/*
845221167Sgnn * __hal_ring_frame_length_set	- Set the maximum frame length of recv frames.
846221167Sgnn * @vpath: virtual Path
847221167Sgnn * @new_frmlen: New frame length
848221167Sgnn *
849221167Sgnn *
850221167Sgnn * Returns: VXGE_HAL_OK - success.
851221167Sgnn * VXGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
852221167Sgnn *
853221167Sgnn */
854221167Sgnnvxge_hal_status_e
855221167Sgnn__hal_ring_frame_length_set(
856221167Sgnn    __hal_virtualpath_t *vpath,
857221167Sgnn    u32 new_frmlen)
858221167Sgnn{
859221167Sgnn	u64 val64;
860221167Sgnn	__hal_device_t *hldev;
861221167Sgnn
862221167Sgnn	vxge_assert(vpath != NULL);
863221167Sgnn
864221167Sgnn	hldev = (__hal_device_t *) vpath->hldev;
865221167Sgnn
866221167Sgnn	vxge_hal_trace_log_ring("==> %s:%s:%d",
867221167Sgnn	    __FILE__, __func__, __LINE__);
868221167Sgnn
869221167Sgnn	vxge_hal_trace_log_ring(
870221167Sgnn	    "vpath = 0x"VXGE_OS_STXFMT", new_frmlen = %d",
871221167Sgnn	    (ptr_t) vpath, new_frmlen);
872221167Sgnn
873221167Sgnn	if (vpath->vp_open == VXGE_HAL_VP_NOT_OPEN) {
874221167Sgnn
875221167Sgnn		vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
876221167Sgnn		    __FILE__, __func__, __LINE__,
877221167Sgnn		    VXGE_HAL_ERR_VPATH_NOT_OPEN);
878221167Sgnn		return (VXGE_HAL_ERR_VPATH_NOT_OPEN);
879221167Sgnn
880221167Sgnn	}
881221167Sgnn
882221167Sgnn	val64 = vxge_os_pio_mem_read64(
883221167Sgnn	    vpath->hldev->header.pdev,
884221167Sgnn	    vpath->hldev->header.regh0,
885221167Sgnn	    &vpath->vp_reg->rxmac_vcfg0);
886221167Sgnn
887221167Sgnn	val64 &= ~VXGE_HAL_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
888221167Sgnn
889221167Sgnn	if (vpath->vp_config->ring.max_frm_len !=
890221167Sgnn	    VXGE_HAL_MAX_RING_FRM_LEN_USE_MTU) {
891221167Sgnn
892221167Sgnn		val64 |= VXGE_HAL_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
893221167Sgnn		    vpath->vp_config->ring.max_frm_len +
894221167Sgnn		    VXGE_HAL_MAC_HEADER_MAX_SIZE);
895221167Sgnn
896221167Sgnn	} else {
897221167Sgnn
898221167Sgnn		val64 |= VXGE_HAL_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_frmlen +
899221167Sgnn		    VXGE_HAL_MAC_HEADER_MAX_SIZE);
900221167Sgnn	}
901221167Sgnn
902221167Sgnn	vxge_os_pio_mem_write64(
903221167Sgnn	    vpath->hldev->header.pdev,
904221167Sgnn	    vpath->hldev->header.regh0,
905221167Sgnn	    val64,
906221167Sgnn	    &vpath->vp_reg->rxmac_vcfg0);
907221167Sgnn
908221167Sgnn	vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
909221167Sgnn	    __FILE__, __func__, __LINE__);
910221167Sgnn
911221167Sgnn	return (VXGE_HAL_OK);
912221167Sgnn}
913221167Sgnn
914221167Sgnn/*
915221167Sgnn * vxge_hal_ring_rxd_reserve - Reserve ring descriptor.
916221167Sgnn * @vpath_handle: virtual Path handle.
917221167Sgnn * @rxdh: Reserved descriptor. On success HAL fills this "out" parameter
918221167Sgnn *		with a valid handle.
919221167Sgnn * @rxd_priv: Buffer to return pointer to per rxd private space
920221167Sgnn *
921221167Sgnn * Reserve Rx descriptor for the subsequent filling-in (by upper layer
922221167Sgnn * driver (ULD)) and posting on the corresponding channel (@channelh)
923221167Sgnn * via vxge_hal_ring_rxd_post().
924221167Sgnn *
925221167Sgnn * Returns: VXGE_HAL_OK - success.
926221167Sgnn * VXGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
927221167Sgnn *
928221167Sgnn */
929221167Sgnnvxge_hal_status_e
930221167Sgnnvxge_hal_ring_rxd_reserve(
931221167Sgnn    vxge_hal_vpath_h vpath_handle,
932221167Sgnn    vxge_hal_rxd_h * rxdh,
933221167Sgnn    void **rxd_priv)
934221167Sgnn{
935221167Sgnn	vxge_hal_status_e status;
936221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST_IRQ)
937221167Sgnn	unsigned long flags;
938221167Sgnn#endif
939221167Sgnn	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
940221167Sgnn	__hal_device_t *hldev;
941221167Sgnn	__hal_ring_t *ring;
942221167Sgnn
943221167Sgnn	vxge_assert((vpath_handle != NULL) && (rxdh != NULL) &&
944221167Sgnn	    (rxd_priv != NULL));
945221167Sgnn
946221167Sgnn	hldev = (__hal_device_t *) vp->vpath->hldev;
947221167Sgnn
948221167Sgnn	vxge_hal_trace_log_ring("==> %s:%s:%d",
949221167Sgnn	    __FILE__, __func__, __LINE__);
950221167Sgnn
951221167Sgnn	vxge_hal_trace_log_ring(
952221167Sgnn	    "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT", "
953221167Sgnn	    "rxd_priv = 0x"VXGE_OS_STXFMT, (ptr_t) vpath_handle,
954221167Sgnn	    (ptr_t) rxdh, (ptr_t) rxd_priv);
955221167Sgnn
956221167Sgnn	ring = (__hal_ring_t *) vp->vpath->ringh;
957221167Sgnn
958221167Sgnn	vxge_assert(ring != NULL);
959221167Sgnn
960221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST)
961221167Sgnn	vxge_os_spin_lock(&ring->channel.post_lock);
962221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
963221167Sgnn	vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
964221167Sgnn#endif
965221167Sgnn
966221167Sgnn	status = __hal_channel_dtr_reserve(&ring->channel, rxdh);
967221167Sgnn
968221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST)
969221167Sgnn	vxge_os_spin_unlock(&ring->channel.post_lock);
970221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
971221167Sgnn	vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
972221167Sgnn#endif
973221167Sgnn
974221167Sgnn	if (status == VXGE_HAL_OK) {
975221167Sgnn		vxge_hal_ring_rxd_1_t *rxdp = (vxge_hal_ring_rxd_1_t *)*rxdh;
976221167Sgnn
977221167Sgnn		/* instead of memset: reset	this RxD */
978221167Sgnn		rxdp->control_0 = rxdp->control_1 = 0;
979221167Sgnn
980221167Sgnn		*rxd_priv = VXGE_HAL_RING_ULD_PRIV(ring, rxdp);
981221167Sgnn
982221167Sgnn#if defined(VXGE_OS_MEMORY_CHECK)
983221167Sgnn		VXGE_HAL_RING_HAL_PRIV(ring, rxdp)->allocated = 1;
984221167Sgnn#endif
985221167Sgnn	}
986221167Sgnn
987221167Sgnn	vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
988221167Sgnn	    __FILE__, __func__, __LINE__);
989221167Sgnn	return (status);
990221167Sgnn}
991221167Sgnn
992221167Sgnn/*
993221167Sgnn * vxge_hal_ring_rxd_pre_post - Prepare rxd and post
994221167Sgnn * @vpath_handle: virtual Path handle.
995221167Sgnn * @rxdh: Descriptor handle.
996221167Sgnn *
997221167Sgnn * This routine prepares a rxd and posts
998221167Sgnn */
999221167Sgnnvoid
1000221167Sgnnvxge_hal_ring_rxd_pre_post(
1001221167Sgnn    vxge_hal_vpath_h vpath_handle,
1002221167Sgnn    vxge_hal_rxd_h rxdh)
1003221167Sgnn{
1004221167Sgnn
1005221167Sgnn#if defined(VXGE_DEBUG_ASSERT)
1006221167Sgnn	vxge_hal_ring_rxd_1_t *rxdp = (vxge_hal_ring_rxd_1_t *) rxdh;
1007221167Sgnn
1008221167Sgnn#endif
1009221167Sgnn
1010221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1011221167Sgnn	unsigned long flags;
1012221167Sgnn
1013221167Sgnn#endif
1014221167Sgnn	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1015221167Sgnn	__hal_device_t *hldev;
1016221167Sgnn	__hal_ring_t *ring;
1017221167Sgnn
1018221167Sgnn	vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1019221167Sgnn
1020221167Sgnn	hldev = (__hal_device_t *) vp->vpath->hldev;
1021221167Sgnn
1022221167Sgnn	vxge_hal_trace_log_ring("==> %s:%s:%d",
1023221167Sgnn	    __FILE__, __func__, __LINE__);
1024221167Sgnn
1025221167Sgnn	vxge_hal_trace_log_ring(
1026221167Sgnn	    "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT,
1027221167Sgnn	    (ptr_t) vpath_handle, (ptr_t) rxdh);
1028221167Sgnn
1029221167Sgnn	ring = (__hal_ring_t *) vp->vpath->ringh;
1030221167Sgnn
1031221167Sgnn	vxge_assert(ring != NULL);
1032221167Sgnn
1033221167Sgnn#if defined(VXGE_DEBUG_ASSERT)
1034221167Sgnn	/* make	sure device overwrites the (illegal) t_code on completion */
1035221167Sgnn	rxdp->control_0 |=
1036221167Sgnn	    VXGE_HAL_RING_RXD_T_CODE(VXGE_HAL_RING_RXD_T_CODE_UNUSED);
1037221167Sgnn#endif
1038221167Sgnn
1039221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST)
1040221167Sgnn	vxge_os_spin_lock(&ring->channel.post_lock);
1041221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1042221167Sgnn	vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1043221167Sgnn#endif
1044221167Sgnn
1045221167Sgnn#if defined(VXGE_DEBUG_ASSERT) && defined(VXGE_HAL_RING_ENFORCE_ORDER)
1046221167Sgnn	if (TRUE) {
1047221167Sgnn		if (VXGE_HAL_RING_RXD_INDEX(rxdp) != 0) {
1048221167Sgnn			vxge_hal_rxd_h prev_rxdh;
1049221167Sgnn			__hal_ring_rxd_priv_t *rxdp_priv;
1050221167Sgnn			u32 index;
1051221167Sgnn
1052221167Sgnn			rxdp_priv = VXGE_HAL_RING_HAL_PRIV(ring, rxdp);
1053221167Sgnn
1054221167Sgnn			if (VXGE_HAL_RING_RXD_INDEX(rxdp) == 0)
1055221167Sgnn				index = ring->channel.length;
1056221167Sgnn			else
1057221167Sgnn				index = VXGE_HAL_RING_RXD_INDEX(rxdp) - 1;
1058221167Sgnn
1059221167Sgnn			prev_rxdh = ring->channel.dtr_arr[index].dtr;
1060221167Sgnn
1061221167Sgnn			if (prev_rxdh != NULL &&
1062221167Sgnn			    (rxdp_priv->dma_offset & (~0xFFF)) !=
1063221167Sgnn			    rxdp_priv->dma_offset) {
1064221167Sgnn				vxge_assert((char *) prev_rxdh +
1065221167Sgnn				    ring->rxd_size == rxdh);
1066221167Sgnn			}
1067221167Sgnn		}
1068221167Sgnn	}
1069221167Sgnn#endif
1070221167Sgnn
1071221167Sgnn	__hal_channel_dtr_post(&ring->channel, VXGE_HAL_RING_RXD_INDEX(rxdh));
1072221167Sgnn
1073221167Sgnn	ring->db_byte_count +=
1074221167Sgnn	    VXGE_HAL_RING_HAL_PRIV(ring, rxdh)->db_bytes;
1075221167Sgnn
1076221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST)
1077221167Sgnn	vxge_os_spin_unlock(&ring->channel.post_lock);
1078221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1079221167Sgnn	vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1080221167Sgnn#endif
1081221167Sgnn
1082221167Sgnn	vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
1083221167Sgnn	    __FILE__, __func__, __LINE__);
1084221167Sgnn}
1085221167Sgnn
1086221167Sgnn/*
1087221167Sgnn * vxge_hal_ring_rxd_post_post - Process rxd after post.
1088221167Sgnn * @vpath_handle: virtual Path handle.
1089221167Sgnn * @rxdh: Descriptor handle.
1090221167Sgnn *
1091221167Sgnn * Processes rxd after post
1092221167Sgnn */
1093221167Sgnnvoid
1094221167Sgnnvxge_hal_ring_rxd_post_post(
1095221167Sgnn    vxge_hal_vpath_h vpath_handle,
1096221167Sgnn    vxge_hal_rxd_h rxdh)
1097221167Sgnn{
1098221167Sgnn	vxge_hal_ring_rxd_1_t *rxdp = (vxge_hal_ring_rxd_1_t *) rxdh;
1099221167Sgnn
1100221167Sgnn#if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
1101221167Sgnn	__hal_ring_rxd_priv_t *priv;
1102221167Sgnn
1103221167Sgnn#endif
1104221167Sgnn	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1105221167Sgnn	__hal_device_t *hldev;
1106221167Sgnn	__hal_ring_t *ring;
1107221167Sgnn
1108221167Sgnn	vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1109221167Sgnn
1110221167Sgnn	hldev = (__hal_device_t *) vp->vpath->hldev;
1111221167Sgnn
1112221167Sgnn	vxge_hal_trace_log_ring("==> %s:%s:%d",
1113221167Sgnn	    __FILE__, __func__, __LINE__);
1114221167Sgnn
1115221167Sgnn	vxge_hal_trace_log_ring(
1116221167Sgnn	    "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT,
1117221167Sgnn	    (ptr_t) vpath_handle, (ptr_t) rxdh);
1118221167Sgnn
1119221167Sgnn	ring = (__hal_ring_t *) vp->vpath->ringh;
1120221167Sgnn
1121221167Sgnn	vxge_assert(ring != NULL);
1122221167Sgnn
1123221167Sgnn	/* do POST */
1124221167Sgnn	rxdp->control_0 |= VXGE_HAL_RING_RXD_LIST_OWN_ADAPTER;
1125221167Sgnn
1126221167Sgnn	rxdp->control_1 |= VXGE_HAL_RING_RXD_LIST_TAIL_OWN_ADAPTER;
1127221167Sgnn
1128221167Sgnn#if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
1129221167Sgnn	priv = __hal_ring_rxd_priv(ring, rxdp);
1130221167Sgnn	vxge_os_dma_sync(ring->channel.pdev,
1131221167Sgnn	    priv->dma_handle,
1132221167Sgnn	    priv->dma_addr,
1133221167Sgnn	    priv->dma_offset,
1134221167Sgnn	    ring->rxd_size,
1135221167Sgnn	    VXGE_OS_DMA_DIR_TODEVICE);
1136221167Sgnn#endif
1137221167Sgnn	if (ring->stats->common_stats.usage_cnt > 0)
1138221167Sgnn		ring->stats->common_stats.usage_cnt--;
1139221167Sgnn
1140221167Sgnn	vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
1141221167Sgnn	    __FILE__, __func__, __LINE__);
1142221167Sgnn}
1143221167Sgnn
1144221167Sgnn/*
1145221167Sgnn * vxge_hal_ring_rxd_post - Post descriptor on the ring.
1146221167Sgnn * @vpath_handle: virtual Path handle.
1147221167Sgnn * @rxdh: Descriptor obtained via vxge_hal_ring_rxd_reserve().
1148221167Sgnn *
1149221167Sgnn * Post	descriptor on the ring.
1150221167Sgnn * Prior to posting the	descriptor should be filled in accordance with
1151221167Sgnn * Host/X3100 interface specification for a given service (LL, etc.).
1152221167Sgnn *
1153221167Sgnn */
1154221167Sgnnvoid
1155221167Sgnnvxge_hal_ring_rxd_post(
1156221167Sgnn    vxge_hal_vpath_h vpath_handle,
1157221167Sgnn    vxge_hal_rxd_h rxdh)
1158221167Sgnn{
1159221167Sgnn	vxge_hal_ring_rxd_1_t *rxdp = (vxge_hal_ring_rxd_1_t *) rxdh;
1160221167Sgnn
1161221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1162221167Sgnn	unsigned long flags;
1163221167Sgnn#endif
1164221167Sgnn
1165221167Sgnn	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1166221167Sgnn	__hal_device_t *hldev;
1167221167Sgnn	__hal_ring_t *ring;
1168221167Sgnn
1169221167Sgnn	vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1170221167Sgnn
1171221167Sgnn	hldev = (__hal_device_t *) vp->vpath->hldev;
1172221167Sgnn
1173221167Sgnn	vxge_hal_trace_log_ring("==> %s:%s:%d",
1174221167Sgnn	    __FILE__, __func__, __LINE__);
1175221167Sgnn
1176221167Sgnn	vxge_hal_trace_log_ring(
1177221167Sgnn	    "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT,
1178221167Sgnn	    (ptr_t) vpath_handle, (ptr_t) rxdh);
1179221167Sgnn
1180221167Sgnn	ring = (__hal_ring_t *) vp->vpath->ringh;
1181221167Sgnn
1182221167Sgnn	vxge_assert(ring != NULL);
1183221167Sgnn
1184221167Sgnn	/* Based on Titan HW bugzilla # 3039, we need to reset the tcode */
1185221167Sgnn	rxdp->control_0 = 0;
1186221167Sgnn
1187221167Sgnn#if defined(VXGE_DEBUG_ASSERT)
1188221167Sgnn	/* make	sure device overwrites the (illegal) t_code on completion */
1189221167Sgnn	rxdp->control_0 |=
1190221167Sgnn	    VXGE_HAL_RING_RXD_T_CODE(VXGE_HAL_RING_RXD_T_CODE_UNUSED);
1191221167Sgnn#endif
1192221167Sgnn
1193221167Sgnn	rxdp->control_1 |= VXGE_HAL_RING_RXD_LIST_TAIL_OWN_ADAPTER;
1194221167Sgnn	rxdp->control_0 |= VXGE_HAL_RING_RXD_LIST_OWN_ADAPTER;
1195221167Sgnn
1196221167Sgnn#if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
1197221167Sgnn	{
1198221167Sgnn		__hal_ring_rxd_priv_t *rxdp_temp1;
1199221167Sgnn		rxdp_temp1 = VXGE_HAL_RING_HAL_PRIV(ring, rxdp);
1200221167Sgnn		vxge_os_dma_sync(ring->channel.pdev,
1201221167Sgnn		    rxdp_temp1->dma_handle,
1202221167Sgnn		    rxdp_temp1->dma_addr,
1203221167Sgnn		    rxdp_temp1->dma_offset,
1204221167Sgnn		    ring->rxd_size,
1205221167Sgnn		    VXGE_OS_DMA_DIR_TODEVICE);
1206221167Sgnn	}
1207221167Sgnn#endif
1208221167Sgnn
1209221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST)
1210221167Sgnn	vxge_os_spin_lock(&ring->channel.post_lock);
1211221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1212221167Sgnn	vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1213221167Sgnn#endif
1214221167Sgnn
1215221167Sgnn#if defined(VXGE_DEBUG_ASSERT) && defined(VXGE_HAL_RING_ENFORCE_ORDER)
1216221167Sgnn	if (TRUE) {
1217221167Sgnn		if (VXGE_HAL_RING_RXD_INDEX(rxdp) != 0) {
1218221167Sgnn
1219221167Sgnn			vxge_hal_rxd_h prev_rxdh;
1220221167Sgnn			__hal_ring_rxd_priv_t *rxdp_temp2;
1221221167Sgnn
1222221167Sgnn			rxdp_temp2 = VXGE_HAL_RING_HAL_PRIV(ring, rxdp);
1223221167Sgnn			prev_rxdh =
1224221167Sgnn			    ring->channel.dtr_arr[VXGE_HAL_RING_RXD_INDEX(rxdp) - 1].dtr;
1225221167Sgnn
1226221167Sgnn			if (prev_rxdh != NULL &&
1227221167Sgnn			    (rxdp_temp2->dma_offset & (~0xFFF)) != rxdp_temp2->dma_offset)
1228221167Sgnn				vxge_assert((char *) prev_rxdh + ring->rxd_size == rxdh);
1229221167Sgnn		}
1230221167Sgnn	}
1231221167Sgnn#endif
1232221167Sgnn
1233221167Sgnn	__hal_channel_dtr_post(&ring->channel, VXGE_HAL_RING_RXD_INDEX(rxdh));
1234221167Sgnn
1235221167Sgnn	ring->db_byte_count +=
1236221167Sgnn	    VXGE_HAL_RING_HAL_PRIV(ring, rxdp)->db_bytes;
1237221167Sgnn
1238221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST)
1239221167Sgnn	vxge_os_spin_unlock(&ring->channel.post_lock);
1240221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1241221167Sgnn	vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1242221167Sgnn#endif
1243221167Sgnn
1244221167Sgnn	if (ring->stats->common_stats.usage_cnt > 0)
1245221167Sgnn		ring->stats->common_stats.usage_cnt--;
1246221167Sgnn
1247221167Sgnn	vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
1248221167Sgnn	    __FILE__, __func__, __LINE__);
1249221167Sgnn}
1250221167Sgnn
1251221167Sgnn/*
1252221167Sgnn * vxge_hal_ring_rxd_post_post_wmb - Process rxd after post with memory barrier
1253221167Sgnn * @vpath_handle: virtual Path handle.
1254221167Sgnn * @rxdh: Descriptor handle.
1255221167Sgnn *
1256221167Sgnn * Processes rxd after post with memory barrier.
1257221167Sgnn */
1258221167Sgnnvoid
1259221167Sgnnvxge_hal_ring_rxd_post_post_wmb(
1260221167Sgnn    vxge_hal_vpath_h vpath_handle,
1261221167Sgnn    vxge_hal_rxd_h rxdh)
1262221167Sgnn{
1263221167Sgnn	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1264221167Sgnn	__hal_device_t *hldev;
1265221167Sgnn
1266221167Sgnn	vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1267221167Sgnn
1268221167Sgnn	hldev = (__hal_device_t *) vp->vpath->hldev;
1269221167Sgnn
1270221167Sgnn	vxge_hal_trace_log_ring("==> %s:%s:%d",
1271221167Sgnn	    __FILE__, __func__, __LINE__);
1272221167Sgnn
1273221167Sgnn	vxge_hal_trace_log_ring(
1274221167Sgnn	    "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT,
1275221167Sgnn	    (ptr_t) vpath_handle, (ptr_t) rxdh);
1276221167Sgnn
1277221167Sgnn	/* Do memory barrier before changing the ownership */
1278221167Sgnn	vxge_os_wmb();
1279221167Sgnn
1280221167Sgnn	vxge_hal_ring_rxd_post_post(vpath_handle, rxdh);
1281221167Sgnn
1282221167Sgnn	vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
1283221167Sgnn	    __FILE__, __func__, __LINE__);
1284221167Sgnn}
1285221167Sgnn
1286221167Sgnn/*
1287221167Sgnn * vxge_hal_ring_rxd_post_post_db - Post Doorbell after posting the rxd(s).
1288221167Sgnn * @vpath_handle: virtual Path handle.
1289221167Sgnn *
1290221167Sgnn * Post Doorbell after posting the rxd(s).
1291221167Sgnn */
1292221167Sgnnvoid
1293221167Sgnnvxge_hal_ring_rxd_post_post_db(
1294221167Sgnn    vxge_hal_vpath_h vpath_handle)
1295221167Sgnn{
1296221167Sgnn	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1297221167Sgnn	__hal_device_t *hldev;
1298221167Sgnn	__hal_ring_t *ring;
1299221167Sgnn
1300221167Sgnn	vxge_assert(vpath_handle != NULL);
1301221167Sgnn
1302221167Sgnn	hldev = (__hal_device_t *) vp->vpath->hldev;
1303221167Sgnn
1304221167Sgnn	ring = (__hal_ring_t *) vp->vpath->ringh;
1305221167Sgnn
1306221167Sgnn	vxge_hal_trace_log_ring("==> %s:%s:%d",
1307221167Sgnn	    __FILE__, __func__, __LINE__);
1308221167Sgnn
1309221167Sgnn	vxge_hal_trace_log_ring("vpath_handle = 0x"VXGE_OS_STXFMT,
1310221167Sgnn	    (ptr_t) vpath_handle);
1311221167Sgnn
1312221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST)
1313221167Sgnn	vxge_os_spin_lock(&ring->channel.post_lock);
1314221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1315221167Sgnn	vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1316221167Sgnn#endif
1317221167Sgnn
1318221167Sgnn	if (ring->db_byte_count <= ring->rxd_mem_avail) {
1319221167Sgnn		__hal_rxd_db_post(vpath_handle, ring->db_byte_count);
1320221167Sgnn		ring->rxd_mem_avail -= ring->db_byte_count;
1321221167Sgnn		ring->db_byte_count = 0;
1322221167Sgnn	} else {
1323221167Sgnn		__hal_rxd_db_post(vpath_handle, ring->rxd_mem_avail);
1324221167Sgnn		ring->db_byte_count -= ring->rxd_mem_avail;
1325221167Sgnn		ring->rxd_mem_avail = 0;
1326221167Sgnn	}
1327221167Sgnn
1328221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST)
1329221167Sgnn	vxge_os_spin_unlock(&ring->channel.post_lock);
1330221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1331221167Sgnn	vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1332221167Sgnn#endif
1333221167Sgnn
1334221167Sgnn	vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
1335221167Sgnn	    __FILE__, __func__, __LINE__);
1336221167Sgnn}
1337221167Sgnn
1338221167Sgnn/*
1339221167Sgnn * vxge_hal_ring_is_next_rxd_completed - Check if the next rxd is completed
1340221167Sgnn * @vpath_handle: Virtual Path handle.
1341221167Sgnn *
1342226436Seadler * Checks if the _next_	completed descriptor is	in host	memory
1343221167Sgnn *
1344221167Sgnn * Returns: VXGE_HAL_OK - success.
1345221167Sgnn * VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed	descriptors
1346221167Sgnn * are currently available for processing.
1347221167Sgnn */
1348221167Sgnnvxge_hal_status_e
1349221167Sgnnvxge_hal_ring_is_next_rxd_completed(
1350221167Sgnn    vxge_hal_vpath_h vpath_handle)
1351221167Sgnn{
1352221167Sgnn	__hal_ring_t *ring;
1353221167Sgnn	vxge_hal_rxd_h rxdh;
1354221167Sgnn	vxge_hal_ring_rxd_1_t *rxdp;	/* doesn't matter 1, 3 or 5... */
1355221167Sgnn	__hal_device_t *hldev;
1356221167Sgnn	vxge_hal_status_e status = VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1357221167Sgnn	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1358221167Sgnn
1359221167Sgnn	vxge_assert(vpath_handle != NULL);
1360221167Sgnn
1361221167Sgnn	hldev = (__hal_device_t *) vp->vpath->hldev;
1362221167Sgnn
1363221167Sgnn	vxge_hal_trace_log_ring("==> %s:%s:%d",
1364221167Sgnn	    __FILE__, __func__, __LINE__);
1365221167Sgnn
1366221167Sgnn	vxge_hal_trace_log_ring("vpath_handle = 0x"VXGE_OS_STXFMT,
1367221167Sgnn	    (ptr_t) vpath_handle);
1368221167Sgnn
1369221167Sgnn	ring = (__hal_ring_t *) vp->vpath->ringh;
1370221167Sgnn
1371221167Sgnn	vxge_assert(ring != NULL);
1372221167Sgnn
1373221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST)
1374221167Sgnn	vxge_os_spin_lock(&ring->channel.post_lock);
1375221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1376221167Sgnn	vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1377221167Sgnn#endif
1378221167Sgnn
1379221167Sgnn	__hal_channel_dtr_try_complete(&ring->channel, &rxdh);
1380221167Sgnn
1381221167Sgnn	rxdp = (vxge_hal_ring_rxd_1_t *) rxdh;
1382221167Sgnn
1383221167Sgnn	if (rxdp != NULL) {
1384221167Sgnn
1385221167Sgnn		/* check whether it is not the end */
1386221167Sgnn		if ((!(rxdp->control_0 & VXGE_HAL_RING_RXD_LIST_OWN_ADAPTER)) &&
1387221167Sgnn		    (!(rxdp->control_1 &
1388221167Sgnn		    VXGE_HAL_RING_RXD_LIST_TAIL_OWN_ADAPTER))) {
1389221167Sgnn
1390221167Sgnn			status = VXGE_HAL_OK;
1391221167Sgnn		}
1392221167Sgnn	}
1393221167Sgnn
1394221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST)
1395221167Sgnn	vxge_os_spin_unlock(&ring->channel.post_lock);
1396221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1397221167Sgnn	vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1398221167Sgnn#endif
1399221167Sgnn
1400221167Sgnn	vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
1401221167Sgnn	    __FILE__, __func__, __LINE__, status);
1402221167Sgnn	return (status);
1403221167Sgnn}
1404221167Sgnn
1405221167Sgnn/*
1406221167Sgnn * vxge_hal_ring_rxd_next_completed - Get the _next_ completed descriptor.
1407221167Sgnn * @channelh: Channel handle.
1408221167Sgnn * @rxdh: Descriptor handle. Returned by HAL.
1409221167Sgnn * @rxd_priv: Buffer to return a pointer to the per rxd space allocated
1410221167Sgnn * @t_code:	Transfer code, as per X3100 User Guide,
1411221167Sgnn *			Receive	Descriptor Format. Returned	by HAL.
1412221167Sgnn *
1413221167Sgnn * Retrieve the	_next_ completed descriptor.
1414221167Sgnn * HAL uses ring callback (*vxge_hal_ring_callback_f) to notifiy
1415221167Sgnn * upper-layer driver (ULD) of new completed descriptors. After that
1416221167Sgnn * the ULD can use vxge_hal_ring_rxd_next_completed to retrieve the rest
1417221167Sgnn * completions (the very first completion is passed by HAL via
1418221167Sgnn * vxge_hal_ring_callback_f).
1419221167Sgnn *
1420221167Sgnn * Implementation-wise,	the upper-layer	driver is free to call
1421221167Sgnn * vxge_hal_ring_rxd_next_completed either immediately from inside the
1422221167Sgnn * ring callback, or in a deferred fashion and separate (from HAL)
1423221167Sgnn * context.
1424221167Sgnn *
1425221167Sgnn * Non-zero @t_code means failure to fill-in receive buffer(s)
1426221167Sgnn * of the descriptor.
1427221167Sgnn * For instance, parity	error detected during the data transfer.
1428221167Sgnn * In this case	X3100 will	complete the descriptor	and	indicate
1429221167Sgnn * for the host	that the received data is not to be	used.
1430221167Sgnn * For details please refer	to X3100 User Guide.
1431221167Sgnn *
1432221167Sgnn * Returns: VXGE_HAL_OK - success.
1433221167Sgnn * VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed	descriptors
1434221167Sgnn * are currently available for processing.
1435221167Sgnn *
1436221167Sgnn * See also: vxge_hal_ring_callback_f {},
1437221167Sgnn * vxge_hal_fifo_rxd_next_completed(), vxge_hal_status_e {}.
1438221167Sgnn */
1439221167Sgnnvxge_hal_status_e
1440221167Sgnnvxge_hal_ring_rxd_next_completed(
1441221167Sgnn    vxge_hal_vpath_h vpath_handle,
1442221167Sgnn    vxge_hal_rxd_h *rxdh,
1443221167Sgnn    void **rxd_priv,
1444221167Sgnn    u8 *t_code)
1445221167Sgnn{
1446221167Sgnn	__hal_ring_t *ring;
1447221167Sgnn	vxge_hal_ring_rxd_5_t *rxdp;	/* doesn't matter 1, 3 or 5... */
1448221167Sgnn#if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
1449221167Sgnn	__hal_ring_rxd_priv_t *priv;
1450221167Sgnn#endif
1451221167Sgnn	__hal_device_t *hldev;
1452221167Sgnn	vxge_hal_status_e status = VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1453221167Sgnn	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1454221167Sgnn	u64 own, control_0, control_1;
1455221167Sgnn
1456221167Sgnn	vxge_assert((vpath_handle != NULL) && (rxdh != NULL) &&
1457221167Sgnn	    (rxd_priv != NULL) && (t_code != NULL));
1458221167Sgnn
1459221167Sgnn	hldev = (__hal_device_t *) vp->vpath->hldev;
1460221167Sgnn
1461221167Sgnn	vxge_hal_trace_log_ring("==> %s:%s:%d",
1462221167Sgnn	    __FILE__, __func__, __LINE__);
1463221167Sgnn
1464221167Sgnn	vxge_hal_trace_log_ring(
1465221167Sgnn	    "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT", "
1466221167Sgnn	    "rxd_priv = 0x"VXGE_OS_STXFMT", t_code = 0x"VXGE_OS_STXFMT,
1467221167Sgnn	    (ptr_t) vpath_handle, (ptr_t) rxdh, (ptr_t) rxd_priv,
1468221167Sgnn	    (ptr_t) t_code);
1469221167Sgnn
1470221167Sgnn	ring = (__hal_ring_t *) vp->vpath->ringh;
1471221167Sgnn
1472221167Sgnn	vxge_assert(ring != NULL);
1473221167Sgnn
1474221167Sgnn	*rxdh = 0;
1475221167Sgnn	*rxd_priv = NULL;
1476221167Sgnn
1477221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST)
1478221167Sgnn	vxge_os_spin_lock(&ring->channel.post_lock);
1479221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1480221167Sgnn	vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1481221167Sgnn#endif
1482221167Sgnn
1483221167Sgnn	__hal_channel_dtr_try_complete(&ring->channel, rxdh);
1484221167Sgnn
1485221167Sgnn	rxdp = (vxge_hal_ring_rxd_5_t *)*rxdh;
1486221167Sgnn	if (rxdp != NULL) {
1487221167Sgnn
1488221167Sgnn#if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING)
1489221167Sgnn		/*
1490221167Sgnn		 * Note: 24 bytes at most means:
1491221167Sgnn		 *	- Control_3 in case of 5-buffer	mode
1492221167Sgnn		 *	- Control_1 and	Control_2
1493221167Sgnn		 *
1494221167Sgnn		 * This is the only length needs to be invalidated
1495221167Sgnn		 * type of channels.
1496221167Sgnn		 */
1497221167Sgnn		priv = __hal_ring_rxd_priv(ring, rxdp);
1498221167Sgnn		vxge_os_dma_sync(ring->channel.pdev,
1499221167Sgnn		    priv->dma_handle,
1500221167Sgnn		    priv->dma_addr,
1501221167Sgnn		    priv->dma_offset,
1502221167Sgnn		    24,
1503221167Sgnn		    VXGE_OS_DMA_DIR_FROMDEVICE);
1504221167Sgnn#endif
1505221167Sgnn		*t_code = (u8) VXGE_HAL_RING_RXD_T_CODE_GET(rxdp->control_0);
1506221167Sgnn
1507221167Sgnn		control_0 = rxdp->control_0;
1508221167Sgnn		control_1 = rxdp->control_1;
1509221167Sgnn		own = control_0 & VXGE_HAL_RING_RXD_LIST_OWN_ADAPTER;
1510221167Sgnn
1511221167Sgnn		/* check whether it is not the end */
1512221167Sgnn		if ((!own && !(control_1 & VXGE_HAL_RING_RXD_LIST_TAIL_OWN_ADAPTER)) ||
1513221167Sgnn		    (*t_code == VXGE_HAL_RING_RXD_T_CODE_FRM_DROP)) {
1514221167Sgnn
1515221167Sgnn#ifndef	VXGE_HAL_IRQ_POLLING
1516221167Sgnn			if (++ring->cmpl_cnt > ring->indicate_max_pkts) {
1517221167Sgnn				/*
1518221167Sgnn				 * reset it. since we don't want to return
1519221167Sgnn				 * garbage to the ULD
1520221167Sgnn				 */
1521221167Sgnn				*rxdh = 0;
1522221167Sgnn				status = VXGE_HAL_COMPLETIONS_REMAIN;
1523221167Sgnn			} else {
1524221167Sgnn#endif
1525221167Sgnn				__hal_channel_dtr_complete(&ring->channel);
1526221167Sgnn
1527221167Sgnn				*rxd_priv = VXGE_HAL_RING_ULD_PRIV(ring, rxdp);
1528221167Sgnn
1529221167Sgnn				ring->rxd_mem_avail +=
1530221167Sgnn				    (VXGE_HAL_RING_HAL_PRIV(ring, rxdp))->db_bytes;
1531221167Sgnn
1532221167Sgnn				ring->stats->common_stats.usage_cnt++;
1533221167Sgnn				if (ring->stats->common_stats.usage_max <
1534221167Sgnn				    ring->stats->common_stats.usage_cnt)
1535221167Sgnn					ring->stats->common_stats.usage_max =
1536221167Sgnn					    ring->stats->common_stats.usage_cnt;
1537221167Sgnn
1538221167Sgnn				switch (ring->buffer_mode) {
1539221167Sgnn				case VXGE_HAL_RING_RXD_BUFFER_MODE_1:
1540221167Sgnn					ring->channel.poll_bytes +=
1541221167Sgnn					    (u32) VXGE_HAL_RING_RXD_1_BUFFER0_SIZE_GET(
1542221167Sgnn					    rxdp->control_1);
1543221167Sgnn					break;
1544221167Sgnn				case VXGE_HAL_RING_RXD_BUFFER_MODE_3:
1545221167Sgnn					ring->channel.poll_bytes +=
1546221167Sgnn					    (u32) VXGE_HAL_RING_RXD_3_BUFFER0_SIZE_GET(
1547221167Sgnn					    rxdp->control_1) +
1548221167Sgnn					    (u32) VXGE_HAL_RING_RXD_3_BUFFER1_SIZE_GET(
1549221167Sgnn					    rxdp->control_1) +
1550221167Sgnn					    (u32) VXGE_HAL_RING_RXD_3_BUFFER2_SIZE_GET(
1551221167Sgnn					    rxdp->control_1);
1552221167Sgnn					break;
1553221167Sgnn				case VXGE_HAL_RING_RXD_BUFFER_MODE_5:
1554221167Sgnn					ring->channel.poll_bytes +=
1555221167Sgnn					    (u32) VXGE_HAL_RING_RXD_5_BUFFER0_SIZE_GET(
1556221167Sgnn					    rxdp->control_1) +
1557221167Sgnn					    (u32) VXGE_HAL_RING_RXD_5_BUFFER1_SIZE_GET(
1558221167Sgnn					    rxdp->control_1) +
1559221167Sgnn					    (u32) VXGE_HAL_RING_RXD_5_BUFFER2_SIZE_GET(
1560221167Sgnn					    rxdp->control_1) +
1561221167Sgnn					    (u32) VXGE_HAL_RING_RXD_5_BUFFER3_SIZE_GET(
1562221167Sgnn					    rxdp->control_2) +
1563221167Sgnn					    (u32) VXGE_HAL_RING_RXD_5_BUFFER4_SIZE_GET(
1564221167Sgnn					    rxdp->control_2);
1565221167Sgnn					break;
1566221167Sgnn				}
1567221167Sgnn
1568221167Sgnn				status = VXGE_HAL_OK;
1569221167Sgnn#ifndef	VXGE_HAL_IRQ_POLLING
1570221167Sgnn			}
1571221167Sgnn#endif
1572221167Sgnn		}
1573221167Sgnn	}
1574221167Sgnn
1575221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST)
1576221167Sgnn	vxge_os_spin_unlock(&ring->channel.post_lock);
1577221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1578221167Sgnn	vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1579221167Sgnn#endif
1580221167Sgnn
1581221167Sgnn	vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
1582221167Sgnn	    __FILE__, __func__, __LINE__, status);
1583221167Sgnn	return (status);
1584221167Sgnn}
1585221167Sgnn
1586221167Sgnn
1587221167Sgnn/*
1588221167Sgnn * vxge_hal_ring_handle_tcode - Handle transfer code.
1589221167Sgnn * @vpath_handle: Virtual Path handle.
1590221167Sgnn * @rxdh: Descriptor handle.
1591221167Sgnn * @t_code: One of the enumerated (and documented in the X3100 user guide)
1592221167Sgnn *	 "transfer codes".
1593221167Sgnn *
1594221167Sgnn * Handle descriptor's transfer code. The latter comes with each completed
1595221167Sgnn * descriptor.
1596221167Sgnn *
1597221167Sgnn * Returns: one of the vxge_hal_status_e {} enumerated types.
1598221167Sgnn * VXGE_HAL_OK			- for success.
1599221167Sgnn * VXGE_HAL_ERR_CRITICAL	- when encounters critical error.
1600221167Sgnn */
1601221167Sgnnvxge_hal_status_e
1602221167Sgnnvxge_hal_ring_handle_tcode(
1603221167Sgnn    vxge_hal_vpath_h vpath_handle,
1604221167Sgnn    vxge_hal_rxd_h rxdh,
1605221167Sgnn    u8 t_code)
1606221167Sgnn{
1607221167Sgnn	__hal_device_t *hldev;
1608221167Sgnn	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1609221167Sgnn
1610221167Sgnn	vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1611221167Sgnn
1612221167Sgnn	hldev = (__hal_device_t *) vp->vpath->hldev;
1613221167Sgnn
1614221167Sgnn	vxge_hal_trace_log_ring("==> %s:%s:%d",
1615221167Sgnn	    __FILE__, __func__, __LINE__);
1616221167Sgnn
1617221167Sgnn	vxge_hal_trace_log_ring(
1618221167Sgnn	    "vpath_handle = 0x"VXGE_OS_STXFMT", "
1619221167Sgnn	    "rxdh = 0x"VXGE_OS_STXFMT", t_code = 0x%d",
1620221167Sgnn	    (ptr_t) vpath_handle, (ptr_t) rxdh, t_code);
1621221167Sgnn
1622221167Sgnn	switch (t_code) {
1623221167Sgnn	case 0x0:
1624221167Sgnn		/* 0x0: Transfer ok. */
1625221167Sgnn		break;
1626221167Sgnn	case 0x1:
1627221167Sgnn		/*
1628221167Sgnn		 * 0x1: Layer 3 checksum presentation
1629221167Sgnn		 *	configuration mismatch.
1630221167Sgnn		 */
1631221167Sgnn		break;
1632221167Sgnn	case 0x2:
1633221167Sgnn		/*
1634221167Sgnn		 * 0x2: Layer 4 checksum presentation
1635221167Sgnn		 *	configuration mismatch.
1636221167Sgnn		 */
1637221167Sgnn		break;
1638221167Sgnn	case 0x3:
1639221167Sgnn		/*
1640221167Sgnn		 * 0x3: Layer 3 and Layer 4 checksum
1641221167Sgnn		 *	presentation configuration mismatch.
1642221167Sgnn		 */
1643221167Sgnn		break;
1644221167Sgnn	case 0x4:
1645221167Sgnn		/* 0x4: Reserved. */
1646221167Sgnn		break;
1647221167Sgnn	case 0x5:
1648221167Sgnn		/*
1649221167Sgnn		 * 0x5: Layer 3 error unparseable packet,
1650221167Sgnn		 *	such as unknown IPv6 header.
1651221167Sgnn		 */
1652221167Sgnn		break;
1653221167Sgnn	case 0x6:
1654221167Sgnn		/*
1655221167Sgnn		 * 0x6: Layer 2 error frame integrity
1656221167Sgnn		 *	error, such as FCS or ECC).
1657221167Sgnn		 */
1658221167Sgnn		break;
1659221167Sgnn	case 0x7:
1660221167Sgnn		/*
1661221167Sgnn		 * 0x7: Buffer size error the RxD buffer(s)
1662221167Sgnn		 *	were not appropriately sized and
1663221167Sgnn		 *	data loss occurred.
1664221167Sgnn		 */
1665221167Sgnn		break;
1666221167Sgnn	case 0x8:
1667221167Sgnn		/* 0x8: Internal ECC error RxD corrupted. */
1668221167Sgnn		__hal_device_handle_error(vp->vpath->hldev,
1669221167Sgnn		    vp->vpath->vp_id, VXGE_HAL_EVENT_ECCERR);
1670221167Sgnn		break;
1671221167Sgnn	case 0x9:
1672221167Sgnn		/*
1673221167Sgnn		 * 0x9: Benign overflow the contents of
1674221167Sgnn		 *	Segment1 exceeded the capacity of
1675221167Sgnn		 *	Buffer1 and the remainder was placed
1676221167Sgnn		 *	in Buffer2. Segment2 now starts in
1677221167Sgnn		 *	Buffer3. No data loss or errors occurred.
1678221167Sgnn		 */
1679221167Sgnn		break;
1680221167Sgnn	case 0xA:
1681221167Sgnn		/*
1682221167Sgnn		 * 0xA: Buffer size 0 one of the RxDs
1683221167Sgnn		 *	assigned buffers has a size of 0 bytes.
1684221167Sgnn		 */
1685221167Sgnn		break;
1686221167Sgnn	case 0xB:
1687221167Sgnn		/* 0xB: Reserved. */
1688221167Sgnn		break;
1689221167Sgnn	case 0xC:
1690221167Sgnn		/*
1691221167Sgnn		 * 0xC: Frame dropped either due to
1692221167Sgnn		 *	VPath Reset or because of a VPIN mismatch.
1693221167Sgnn		 */
1694221167Sgnn		break;
1695221167Sgnn	case 0xD:
1696221167Sgnn		/* 0xD: Reserved. */
1697221167Sgnn		break;
1698221167Sgnn	case 0xE:
1699221167Sgnn		/* 0xE: Reserved. */
1700221167Sgnn		break;
1701221167Sgnn	case 0xF:
1702221167Sgnn		/*
1703221167Sgnn		 * 0xF: Multiple errors more than one
1704221167Sgnn		 *	transfer code condition occurred.
1705221167Sgnn		 */
1706221167Sgnn		break;
1707221167Sgnn	default:
1708221167Sgnn		vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
1709221167Sgnn		    __FILE__, __func__, __LINE__, VXGE_HAL_ERR_INVALID_TCODE);
1710221167Sgnn		return (VXGE_HAL_ERR_INVALID_TCODE);
1711221167Sgnn	}
1712221167Sgnn
1713221167Sgnn	vp->vpath->sw_stats->ring_stats.rxd_t_code_err_cnt[t_code]++;
1714221167Sgnn
1715221167Sgnn	vxge_hal_trace_log_ring("<== %s:%s:%d  Result: %d",
1716221167Sgnn	    __FILE__, __func__, __LINE__, VXGE_HAL_OK);
1717221167Sgnn	return (VXGE_HAL_OK);
1718221167Sgnn}
1719221167Sgnn
1720221167Sgnn
1721221167Sgnn/*
1722221167Sgnn * vxge_hal_ring_rxd_private_get - Get ULD private per-descriptor data.
1723221167Sgnn * @vpath_handle: Virtual Path handle.
1724221167Sgnn * @rxdh: Descriptor handle.
1725221167Sgnn *
1726221167Sgnn * Returns: private ULD	info associated	with the descriptor.
1727221167Sgnn * ULD requests	per-descriptor space via vxge_hal_ring_attr.
1728221167Sgnn *
1729221167Sgnn */
1730221167Sgnnvoid *
1731221167Sgnnvxge_hal_ring_rxd_private_get(
1732221167Sgnn    vxge_hal_vpath_h vpath_handle,
1733221167Sgnn    vxge_hal_rxd_h rxdh)
1734221167Sgnn{
1735221167Sgnn	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1736221167Sgnn
1737221167Sgnn	return (VXGE_HAL_RING_ULD_PRIV(
1738221167Sgnn	    ((__hal_ring_t *) vp->vpath->ringh), rxdh));
1739221167Sgnn
1740221167Sgnn}
1741221167Sgnn
1742221167Sgnn/*
1743221167Sgnn * vxge_hal_ring_rxd_free - Free descriptor.
1744221167Sgnn * @vpath_handle: Virtual Path handle.
1745221167Sgnn * @rxdh: Descriptor handle.
1746221167Sgnn *
1747221167Sgnn * Free	the reserved descriptor. This operation is "symmetrical" to
1748221167Sgnn * vxge_hal_ring_rxd_reserve. The "free-ing" completes the descriptor's
1749221167Sgnn * lifecycle.
1750221167Sgnn *
1751221167Sgnn * After free-ing (see vxge_hal_ring_rxd_free()) the descriptor again can
1752221167Sgnn * be:
1753221167Sgnn *
1754221167Sgnn * - reserved (vxge_hal_ring_rxd_reserve);
1755221167Sgnn *
1756221167Sgnn * - posted	(vxge_hal_ring_rxd_post);
1757221167Sgnn *
1758221167Sgnn * - completed (vxge_hal_ring_rxd_next_completed);
1759221167Sgnn *
1760221167Sgnn * - and recycled again	(vxge_hal_ring_rxd_free).
1761221167Sgnn *
1762221167Sgnn * For alternative state transitions and more details please refer to
1763221167Sgnn * the design doc.
1764221167Sgnn *
1765221167Sgnn */
1766221167Sgnnvoid
1767221167Sgnnvxge_hal_ring_rxd_free(
1768221167Sgnn    vxge_hal_vpath_h vpath_handle,
1769221167Sgnn    vxge_hal_rxd_h rxdh)
1770221167Sgnn{
1771221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1772221167Sgnn	unsigned long flags;
1773221167Sgnn
1774221167Sgnn#endif
1775221167Sgnn	__hal_ring_t *ring;
1776221167Sgnn	__hal_device_t *hldev;
1777221167Sgnn	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1778221167Sgnn
1779221167Sgnn	vxge_assert((vpath_handle != NULL) && (rxdh != NULL));
1780221167Sgnn
1781221167Sgnn	hldev = (__hal_device_t *) vp->vpath->hldev;
1782221167Sgnn
1783221167Sgnn	vxge_hal_trace_log_ring("==> %s:%s:%d",
1784221167Sgnn	    __FILE__, __func__, __LINE__);
1785221167Sgnn
1786221167Sgnn	vxge_hal_trace_log_ring(
1787221167Sgnn	    "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT,
1788221167Sgnn	    (ptr_t) vpath_handle, (ptr_t) rxdh);
1789221167Sgnn
1790221167Sgnn	ring = (__hal_ring_t *) vp->vpath->ringh;
1791221167Sgnn
1792221167Sgnn	vxge_assert(ring != NULL);
1793221167Sgnn
1794221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST)
1795221167Sgnn	vxge_os_spin_lock(&ring->channel.post_lock);
1796221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1797221167Sgnn	vxge_os_spin_lock_irq(&ring->channel.post_lock, flags);
1798221167Sgnn#endif
1799221167Sgnn
1800221167Sgnn	__hal_channel_dtr_free(&ring->channel, VXGE_HAL_RING_RXD_INDEX(rxdh));
1801221167Sgnn#if defined(VXGE_OS_MEMORY_CHECK)
1802221167Sgnn	VXGE_HAL_RING_HAL_PRIV(ring, rxdh)->allocated = 0;
1803221167Sgnn#endif
1804221167Sgnn
1805221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST)
1806221167Sgnn	vxge_os_spin_unlock(&ring->channel.post_lock);
1807221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ)
1808221167Sgnn	vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags);
1809221167Sgnn#endif
1810221167Sgnn
1811221167Sgnn	vxge_hal_trace_log_ring("<== %s:%s:%d  Result: 0",
1812221167Sgnn	    __FILE__, __func__, __LINE__);
1813221167Sgnn}
1814