1/*-
2 * Copyright (c) 2002-2007 Neterion, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#ifdef XGE_DEBUG_FP
30#include <dev/nxge/include/xgehal-ring.h>
31#endif
32
33__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_ring_rxd_priv_t*
34__hal_ring_rxd_priv(xge_hal_ring_t *ring, xge_hal_dtr_h dtrh)
35{
36
37	xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
38	xge_hal_ring_rxd_priv_t *rxd_priv;
39
40	xge_assert(rxdp);
41
42#if defined(XGE_HAL_USE_5B_MODE)
43	xge_assert(ring);
44	if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
45	    xge_hal_ring_rxd_5_t *rxdp_5 = (xge_hal_ring_rxd_5_t *)dtrh;
46#if defined (XGE_OS_PLATFORM_64BIT)
47	    int memblock_idx = rxdp_5->host_control >> 16;
48	    int i = rxdp_5->host_control & 0xFFFF;
49	    rxd_priv = (xge_hal_ring_rxd_priv_t *)
50	        ((char*)ring->mempool->memblocks_priv_arr[memblock_idx] + ring->rxd_priv_size * i);
51#else
52	    /* 32-bit case */
53	    rxd_priv = (xge_hal_ring_rxd_priv_t *)rxdp_5->host_control;
54#endif
55	} else
56#endif
57	{
58	    rxd_priv = (xge_hal_ring_rxd_priv_t *)
59	            (ulong_t)rxdp->host_control;
60	}
61
62	xge_assert(rxd_priv);
63	xge_assert(rxd_priv->dma_object);
64
65	xge_assert(rxd_priv->dma_object->handle == rxd_priv->dma_handle);
66
67	xge_assert(rxd_priv->dma_object->addr + rxd_priv->dma_offset ==
68	                        rxd_priv->dma_addr);
69
70	return rxd_priv;
71}
72
73__HAL_STATIC_RING __HAL_INLINE_RING int
74__hal_ring_block_memblock_idx(xge_hal_ring_block_t *block)
75{
76	   return (int)*((u64 *)(void *)((char *)block +
77	                           XGE_HAL_RING_MEMBLOCK_IDX_OFFSET));
78}
79
80__HAL_STATIC_RING __HAL_INLINE_RING void
81__hal_ring_block_memblock_idx_set(xge_hal_ring_block_t*block, int memblock_idx)
82{
83	   *((u64 *)(void *)((char *)block +
84	                   XGE_HAL_RING_MEMBLOCK_IDX_OFFSET)) =
85	                   memblock_idx;
86}
87
88
89__HAL_STATIC_RING __HAL_INLINE_RING dma_addr_t
90__hal_ring_block_next_pointer(xge_hal_ring_block_t *block)
91{
92	return (dma_addr_t)*((u64 *)(void *)((char *)block +
93	        XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET));
94}
95
96__HAL_STATIC_RING __HAL_INLINE_RING void
97__hal_ring_block_next_pointer_set(xge_hal_ring_block_t *block,
98	        dma_addr_t dma_next)
99{
100	*((u64 *)(void *)((char *)block +
101	          XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
102}
103
104/**
105 * xge_hal_ring_dtr_private - Get ULD private per-descriptor data.
106 * @channelh: Channel handle.
107 * @dtrh: Descriptor handle.
108 *
109 * Returns: private ULD info associated with the descriptor.
110 * ULD requests per-descriptor space via xge_hal_channel_open().
111 *
112 * See also: xge_hal_fifo_dtr_private().
113 * Usage: See ex_rx_compl{}.
114 */
115__HAL_STATIC_RING __HAL_INLINE_RING void*
116xge_hal_ring_dtr_private(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
117{
118	return (char *)__hal_ring_rxd_priv((xge_hal_ring_t *) channelh, dtrh) +
119	                sizeof(xge_hal_ring_rxd_priv_t);
120}
121
122/**
123 * xge_hal_ring_dtr_reserve - Reserve ring descriptor.
124 * @channelh: Channel handle.
125 * @dtrh: Reserved descriptor. On success HAL fills this "out" parameter
126 *        with a valid handle.
127 *
128 * Reserve Rx descriptor for the subsequent filling-in (by upper layer
129 * driver (ULD)) and posting on the corresponding channel (@channelh)
130 * via xge_hal_ring_dtr_post().
131 *
132 * Returns: XGE_HAL_OK - success.
133 * XGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
134 *
135 * See also: xge_hal_fifo_dtr_reserve(), xge_hal_ring_dtr_free(),
136 * xge_hal_fifo_dtr_reserve_sp(), xge_hal_status_e{}.
137 * Usage: See ex_post_all_rx{}.
138 */
139__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
140xge_hal_ring_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
141{
142	xge_hal_status_e status;
143#if defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
144	unsigned long flags;
145#endif
146
147#if defined(XGE_HAL_RX_MULTI_RESERVE)
148	xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->reserve_lock);
149#elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
150	xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->reserve_lock,
151	flags);
152#endif
153
154	status = __hal_channel_dtr_alloc(channelh, dtrh);
155
156#if defined(XGE_HAL_RX_MULTI_RESERVE)
157	xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->reserve_lock);
158#elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
159	xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->reserve_lock,
160	             flags);
161#endif
162
163	if (status == XGE_HAL_OK) {
164	    xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)*dtrh;
165
166	    /* instead of memset: reset this RxD */
167	    rxdp->control_1 = rxdp->control_2 = 0;
168
169#if defined(XGE_OS_MEMORY_CHECK)
170	    __hal_ring_rxd_priv((xge_hal_ring_t *) channelh, rxdp)->allocated = 1;
171#endif
172	}
173
174	return status;
175}
176
177/**
178 * xge_hal_ring_dtr_info_get - Get extended information associated with
179 * a completed receive descriptor for 1b mode.
180 * @channelh: Channel handle.
181 * @dtrh: Descriptor handle.
182 * @ext_info: See xge_hal_dtr_info_t{}. Returned by HAL.
183 *
184 * Retrieve extended information associated with a completed receive descriptor.
185 *
186 * See also: xge_hal_dtr_info_t{}, xge_hal_ring_dtr_1b_get(),
187 * xge_hal_ring_dtr_5b_get().
188 */
189__HAL_STATIC_RING __HAL_INLINE_RING void
190xge_hal_ring_dtr_info_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
191	        xge_hal_dtr_info_t *ext_info)
192{
193	/* cast to 1-buffer mode RxD: the code below relies on the fact
194	 * that control_1 and control_2 are formatted the same way.. */
195	xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
196
197	ext_info->l3_cksum = XGE_HAL_RXD_GET_L3_CKSUM(rxdp->control_1);
198	ext_info->l4_cksum = XGE_HAL_RXD_GET_L4_CKSUM(rxdp->control_1);
199	    ext_info->frame = XGE_HAL_RXD_GET_FRAME_TYPE(rxdp->control_1);
200	    ext_info->proto = XGE_HAL_RXD_GET_FRAME_PROTO(rxdp->control_1);
201	ext_info->vlan = XGE_HAL_RXD_GET_VLAN_TAG(rxdp->control_2);
202
203	/* Herc only, a few extra cycles imposed on Xena and/or
204	 * when RTH is not enabled.
205	 * Alternatively, could check
206	 * xge_hal_device_check_id(), hldev->config.rth_en, queue->rth_en */
207	ext_info->rth_it_hit = XGE_HAL_RXD_GET_RTH_IT_HIT(rxdp->control_1);
208	ext_info->rth_spdm_hit =
209	XGE_HAL_RXD_GET_RTH_SPDM_HIT(rxdp->control_1);
210	ext_info->rth_hash_type =
211	XGE_HAL_RXD_GET_RTH_HASH_TYPE(rxdp->control_1);
212	ext_info->rth_value = XGE_HAL_RXD_1_GET_RTH_VALUE(rxdp->control_2);
213}
214
215/**
216 * xge_hal_ring_dtr_info_nb_get - Get extended information associated
217 * with a completed receive descriptor for 3b or 5b
218 * modes.
219 * @channelh: Channel handle.
220 * @dtrh: Descriptor handle.
221 * @ext_info: See xge_hal_dtr_info_t{}. Returned by HAL.
222 *
223 * Retrieve extended information associated with a completed receive descriptor.
224 *
225 * See also: xge_hal_dtr_info_t{}, xge_hal_ring_dtr_1b_get(),
226 *           xge_hal_ring_dtr_5b_get().
227 */
228__HAL_STATIC_RING __HAL_INLINE_RING void
229xge_hal_ring_dtr_info_nb_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
230	        xge_hal_dtr_info_t *ext_info)
231{
232	/* cast to 1-buffer mode RxD: the code below relies on the fact
233	 * that control_1 and control_2 are formatted the same way.. */
234	xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
235
236	ext_info->l3_cksum = XGE_HAL_RXD_GET_L3_CKSUM(rxdp->control_1);
237	ext_info->l4_cksum = XGE_HAL_RXD_GET_L4_CKSUM(rxdp->control_1);
238	    ext_info->frame = XGE_HAL_RXD_GET_FRAME_TYPE(rxdp->control_1);
239	    ext_info->proto = XGE_HAL_RXD_GET_FRAME_PROTO(rxdp->control_1);
240	    ext_info->vlan = XGE_HAL_RXD_GET_VLAN_TAG(rxdp->control_2);
241	/* Herc only, a few extra cycles imposed on Xena and/or
242	 * when RTH is not enabled. Same comment as above. */
243	ext_info->rth_it_hit = XGE_HAL_RXD_GET_RTH_IT_HIT(rxdp->control_1);
244	ext_info->rth_spdm_hit =
245	XGE_HAL_RXD_GET_RTH_SPDM_HIT(rxdp->control_1);
246	ext_info->rth_hash_type =
247	XGE_HAL_RXD_GET_RTH_HASH_TYPE(rxdp->control_1);
248	ext_info->rth_value = (u32)rxdp->buffer0_ptr;
249}
250
251/**
252 * xge_hal_ring_dtr_1b_set - Prepare 1-buffer-mode descriptor.
253 * @dtrh: Descriptor handle.
254 * @dma_pointer: DMA address of a single receive buffer this descriptor
255 *               should carry. Note that by the time
256 *               xge_hal_ring_dtr_1b_set
257 *               is called, the receive buffer should be already mapped
258 *               to the corresponding Xframe device.
259 * @size: Size of the receive @dma_pointer buffer.
260 *
261 * Prepare 1-buffer-mode Rx descriptor for posting
262 * (via xge_hal_ring_dtr_post()).
263 *
264 * This inline helper-function does not return any parameters and always
265 * succeeds.
266 *
267 * See also: xge_hal_ring_dtr_3b_set(), xge_hal_ring_dtr_5b_set().
268 * Usage: See ex_post_all_rx{}.
269 */
270__HAL_STATIC_RING __HAL_INLINE_RING void
271xge_hal_ring_dtr_1b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointer, int size)
272{
273	xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
274	rxdp->buffer0_ptr = dma_pointer;
275	rxdp->control_2 &= (~XGE_HAL_RXD_1_MASK_BUFFER0_SIZE);
276	rxdp->control_2 |= XGE_HAL_RXD_1_SET_BUFFER0_SIZE(size);
277
278	xge_debug_ring(XGE_TRACE, "xge_hal_ring_dtr_1b_set: rxdp %p control_2 %p buffer0_ptr %p",
279	            (xge_hal_ring_rxd_1_t *)dtrh,
280	            rxdp->control_2,
281	            rxdp->buffer0_ptr);
282}
283
284/**
285 * xge_hal_ring_dtr_1b_get - Get data from the completed 1-buf
286 * descriptor.
287 * @channelh: Channel handle.
288 * @dtrh: Descriptor handle.
289 * @dma_pointer: DMA address of a single receive buffer _this_ descriptor
290 *               carries. Returned by HAL.
291 * @pkt_length: Length (in bytes) of the data in the buffer pointed by
292 *              @dma_pointer. Returned by HAL.
293 *
294 * Retrieve protocol data from the completed 1-buffer-mode Rx descriptor.
295 * This inline helper-function uses completed descriptor to populate receive
296 * buffer pointer and other "out" parameters. The function always succeeds.
297 *
298 * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get().
299 * Usage: See ex_rx_compl{}.
300 */
301__HAL_STATIC_RING __HAL_INLINE_RING void
302xge_hal_ring_dtr_1b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
303	    dma_addr_t *dma_pointer, int *pkt_length)
304{
305	xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
306
307	*pkt_length = XGE_HAL_RXD_1_GET_BUFFER0_SIZE(rxdp->control_2);
308	*dma_pointer = rxdp->buffer0_ptr;
309
310	((xge_hal_channel_t *)channelh)->poll_bytes += *pkt_length;
311}
312
313/**
314 * xge_hal_ring_dtr_3b_set - Prepare 3-buffer-mode descriptor.
315 * @dtrh: Descriptor handle.
316 * @dma_pointers: Array of DMA addresses. Contains exactly 3 receive buffers
317 *               _this_ descriptor should carry.
318 *               Note that by the time xge_hal_ring_dtr_3b_set
319 *               is called, the receive buffers should be mapped
320 *               to the corresponding Xframe device.
321 * @sizes: Array of receive buffer sizes. Contains 3 sizes: one size per
322 *         buffer from @dma_pointers.
323 *
324 * Prepare 3-buffer-mode Rx descriptor for posting (via
325 * xge_hal_ring_dtr_post()).
326 * This inline helper-function does not return any parameters and always
327 * succeeds.
328 *
329 * See also: xge_hal_ring_dtr_1b_set(), xge_hal_ring_dtr_5b_set().
330 */
331__HAL_STATIC_RING __HAL_INLINE_RING void
332xge_hal_ring_dtr_3b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[],
333	        int sizes[])
334{
335	xge_hal_ring_rxd_3_t *rxdp = (xge_hal_ring_rxd_3_t *)dtrh;
336	rxdp->buffer0_ptr = dma_pointers[0];
337	rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER0_SIZE);
338	rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER0_SIZE(sizes[0]);
339	rxdp->buffer1_ptr = dma_pointers[1];
340	rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER1_SIZE);
341	rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER1_SIZE(sizes[1]);
342	rxdp->buffer2_ptr = dma_pointers[2];
343	rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER2_SIZE);
344	rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER2_SIZE(sizes[2]);
345}
346
347/**
348 * xge_hal_ring_dtr_3b_get - Get data from the completed 3-buf
349 * descriptor.
350 * @channelh: Channel handle.
351 * @dtrh: Descriptor handle.
352 * @dma_pointers: DMA addresses of the 3 receive buffers _this_ descriptor
353 *                carries. The first two buffers contain ethernet and
354 *                (IP + transport) headers. The 3rd buffer contains packet
355 *                data.
356 *                Returned by HAL.
357 * @sizes: Array of receive buffer sizes. Contains 3 sizes: one size per
358 * buffer from @dma_pointers. Returned by HAL.
359 *
360 * Retrieve protocol data from the completed 3-buffer-mode Rx descriptor.
361 * This inline helper-function uses completed descriptor to populate receive
362 * buffer pointer and other "out" parameters. The function always succeeds.
363 *
364 * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get().
365 */
366__HAL_STATIC_RING __HAL_INLINE_RING void
367xge_hal_ring_dtr_3b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
368	    dma_addr_t dma_pointers[], int sizes[])
369{
370	xge_hal_ring_rxd_3_t *rxdp = (xge_hal_ring_rxd_3_t *)dtrh;
371
372	dma_pointers[0] = rxdp->buffer0_ptr;
373	sizes[0] = XGE_HAL_RXD_3_GET_BUFFER0_SIZE(rxdp->control_2);
374
375	dma_pointers[1] = rxdp->buffer1_ptr;
376	sizes[1] = XGE_HAL_RXD_3_GET_BUFFER1_SIZE(rxdp->control_2);
377
378	dma_pointers[2] = rxdp->buffer2_ptr;
379	sizes[2] = XGE_HAL_RXD_3_GET_BUFFER2_SIZE(rxdp->control_2);
380
381	((xge_hal_channel_t *)channelh)->poll_bytes += sizes[0] + sizes[1] +
382	    sizes[2];
383}
384
385/**
386 * xge_hal_ring_dtr_5b_set - Prepare 5-buffer-mode descriptor.
387 * @dtrh: Descriptor handle.
388 * @dma_pointers: Array of DMA addresses. Contains exactly 5 receive buffers
389 *               _this_ descriptor should carry.
390 *               Note that by the time xge_hal_ring_dtr_5b_set
391 *               is called, the receive buffers should be mapped
392 *               to the corresponding Xframe device.
393 * @sizes: Array of receive buffer sizes. Contains 5 sizes: one size per
394 *         buffer from @dma_pointers.
395 *
396 * Prepare 3-buffer-mode Rx descriptor for posting (via
397 * xge_hal_ring_dtr_post()).
398 * This inline helper-function does not return any parameters and always
399 * succeeds.
400 *
401 * See also: xge_hal_ring_dtr_1b_set(), xge_hal_ring_dtr_3b_set().
402 */
403__HAL_STATIC_RING __HAL_INLINE_RING void
404xge_hal_ring_dtr_5b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[],
405	        int sizes[])
406{
407	xge_hal_ring_rxd_5_t *rxdp = (xge_hal_ring_rxd_5_t *)dtrh;
408	rxdp->buffer0_ptr = dma_pointers[0];
409	rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER0_SIZE);
410	rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER0_SIZE(sizes[0]);
411	rxdp->buffer1_ptr = dma_pointers[1];
412	rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER1_SIZE);
413	rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER1_SIZE(sizes[1]);
414	rxdp->buffer2_ptr = dma_pointers[2];
415	rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER2_SIZE);
416	rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER2_SIZE(sizes[2]);
417	rxdp->buffer3_ptr = dma_pointers[3];
418	rxdp->control_3 &= (~XGE_HAL_RXD_5_MASK_BUFFER3_SIZE);
419	rxdp->control_3 |= XGE_HAL_RXD_5_SET_BUFFER3_SIZE(sizes[3]);
420	rxdp->buffer4_ptr = dma_pointers[4];
421	rxdp->control_3 &= (~XGE_HAL_RXD_5_MASK_BUFFER4_SIZE);
422	rxdp->control_3 |= XGE_HAL_RXD_5_SET_BUFFER4_SIZE(sizes[4]);
423}
424
425/**
426 * xge_hal_ring_dtr_5b_get - Get data from the completed 5-buf
427 * descriptor.
428 * @channelh: Channel handle.
429 * @dtrh: Descriptor handle.
430 * @dma_pointers: DMA addresses of the 5 receive buffers _this_ descriptor
431 *                carries. The first 4 buffers contains L2 (ethernet) through
432 *                L5 headers. The 5th buffer contain received (applicaion)
433 *                data. Returned by HAL.
434 * @sizes: Array of receive buffer sizes. Contains 5 sizes: one size per
435 * buffer from @dma_pointers. Returned by HAL.
436 *
437 * Retrieve protocol data from the completed 5-buffer-mode Rx descriptor.
438 * This inline helper-function uses completed descriptor to populate receive
439 * buffer pointer and other "out" parameters. The function always succeeds.
440 *
441 * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get().
442 */
443__HAL_STATIC_RING __HAL_INLINE_RING void
444xge_hal_ring_dtr_5b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
445	    dma_addr_t dma_pointers[], int sizes[])
446{
447	xge_hal_ring_rxd_5_t *rxdp = (xge_hal_ring_rxd_5_t *)dtrh;
448
449	dma_pointers[0] = rxdp->buffer0_ptr;
450	sizes[0] = XGE_HAL_RXD_5_GET_BUFFER0_SIZE(rxdp->control_2);
451
452	dma_pointers[1] = rxdp->buffer1_ptr;
453	sizes[1] = XGE_HAL_RXD_5_GET_BUFFER1_SIZE(rxdp->control_2);
454
455	dma_pointers[2] = rxdp->buffer2_ptr;
456	sizes[2] = XGE_HAL_RXD_5_GET_BUFFER2_SIZE(rxdp->control_2);
457
458	dma_pointers[3] = rxdp->buffer3_ptr;
459	sizes[3] = XGE_HAL_RXD_5_GET_BUFFER3_SIZE(rxdp->control_3);
460
461	dma_pointers[4] = rxdp->buffer4_ptr;
462	sizes[4] = XGE_HAL_RXD_5_GET_BUFFER4_SIZE(rxdp->control_3);
463
464	((xge_hal_channel_t *)channelh)->poll_bytes += sizes[0] + sizes[1] +
465	    sizes[2] + sizes[3] + sizes[4];
466}
467
468
469/**
470 * xge_hal_ring_dtr_pre_post - FIXME.
471 * @channelh: Channel handle.
472 * @dtrh: Descriptor handle.
473 *
474 * TBD
475 */
476__HAL_STATIC_RING __HAL_INLINE_RING void
477xge_hal_ring_dtr_pre_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
478{
479	xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
480#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
481	xge_hal_ring_rxd_priv_t *priv;
482	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
483#endif
484#if defined(XGE_HAL_RX_MULTI_POST_IRQ)
485	unsigned long flags;
486#endif
487
488	rxdp->control_2 |= XGE_HAL_RXD_NOT_COMPLETED;
489
490#ifdef XGE_DEBUG_ASSERT
491	    /* make sure Xena overwrites the (illegal) t_code on completion */
492	    XGE_HAL_RXD_SET_T_CODE(rxdp->control_1, XGE_HAL_RXD_T_CODE_UNUSED_C);
493#endif
494
495	xge_debug_ring(XGE_TRACE, "xge_hal_ring_dtr_pre_post: rxd 0x"XGE_OS_LLXFMT" posted %d  post_qid %d",
496	        (unsigned long long)(ulong_t)dtrh,
497	        ((xge_hal_ring_t *)channelh)->channel.post_index,
498	        ((xge_hal_ring_t *)channelh)->channel.post_qid);
499
500#if defined(XGE_HAL_RX_MULTI_POST)
501	xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->post_lock);
502#elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
503	xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->post_lock,
504	flags);
505#endif
506
507#if defined(XGE_DEBUG_ASSERT) && defined(XGE_HAL_RING_ENFORCE_ORDER)
508	{
509	    xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
510
511	    if (channel->post_index != 0) {
512	        xge_hal_dtr_h prev_dtrh;
513	        xge_hal_ring_rxd_priv_t *rxdp_priv;
514
515	        rxdp_priv = __hal_ring_rxd_priv((xge_hal_ring_t*)channel, rxdp);
516	        prev_dtrh = channel->work_arr[channel->post_index - 1];
517
518	        if (prev_dtrh != NULL &&
519	            (rxdp_priv->dma_offset & (~0xFFF)) !=
520	                    rxdp_priv->dma_offset) {
521	            xge_assert((char *)prev_dtrh +
522	                ((xge_hal_ring_t*)channel)->rxd_size == dtrh);
523	        }
524	    }
525	}
526#endif
527
528	__hal_channel_dtr_post(channelh, dtrh);
529
530#if defined(XGE_HAL_RX_MULTI_POST)
531	xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->post_lock);
532#elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
533	xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->post_lock,
534	               flags);
535#endif
536}
537
538
539/**
540 * xge_hal_ring_dtr_post_post - FIXME.
541 * @channelh: Channel handle.
542 * @dtrh: Descriptor handle.
543 *
544 * TBD
545 */
546__HAL_STATIC_RING __HAL_INLINE_RING void
547xge_hal_ring_dtr_post_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
548{
549	xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
550	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
551#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
552	xge_hal_ring_rxd_priv_t *priv;
553#endif
554	/* do POST */
555	rxdp->control_1 |= XGE_HAL_RXD_POSTED_4_XFRAME;
556
557#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
558	priv = __hal_ring_rxd_priv(ring, rxdp);
559	xge_os_dma_sync(ring->channel.pdev,
560	              priv->dma_handle, priv->dma_addr,
561	          priv->dma_offset, ring->rxd_size,
562	          XGE_OS_DMA_DIR_TODEVICE);
563#endif
564
565	xge_debug_ring(XGE_TRACE, "xge_hal_ring_dtr_post_post: rxdp %p control_1 %p",
566	              (xge_hal_ring_rxd_1_t *)dtrh,
567	              rxdp->control_1);
568
569	if (ring->channel.usage_cnt > 0)
570	    ring->channel.usage_cnt--;
571}
572
573/**
574 * xge_hal_ring_dtr_post_post_wmb.
575 * @channelh: Channel handle.
576 * @dtrh: Descriptor handle.
577 *
578 * Similar as xge_hal_ring_dtr_post_post, but in addition it does memory barrier.
579 */
580__HAL_STATIC_RING __HAL_INLINE_RING void
581xge_hal_ring_dtr_post_post_wmb(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
582{
583	xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
584	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
585#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
586	xge_hal_ring_rxd_priv_t *priv;
587#endif
588	/* Do memory barrier before changing the ownership */
589	xge_os_wmb();
590
591	/* do POST */
592	rxdp->control_1 |= XGE_HAL_RXD_POSTED_4_XFRAME;
593
594#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
595	priv = __hal_ring_rxd_priv(ring, rxdp);
596	xge_os_dma_sync(ring->channel.pdev,
597	              priv->dma_handle, priv->dma_addr,
598	          priv->dma_offset, ring->rxd_size,
599	          XGE_OS_DMA_DIR_TODEVICE);
600#endif
601
602	if (ring->channel.usage_cnt > 0)
603	    ring->channel.usage_cnt--;
604
605	xge_debug_ring(XGE_TRACE, "xge_hal_ring_dtr_post_post_wmb: rxdp %p control_1 %p rxds_with_host %d",
606	              (xge_hal_ring_rxd_1_t *)dtrh,
607	              rxdp->control_1, ring->channel.usage_cnt);
608
609}
610
611/**
612 * xge_hal_ring_dtr_post - Post descriptor on the ring channel.
613 * @channelh: Channel handle.
614 * @dtrh: Descriptor obtained via xge_hal_ring_dtr_reserve().
615 *
616 * Post descriptor on the 'ring' type channel.
617 * Prior to posting the descriptor should be filled in accordance with
618 * Host/Xframe interface specification for a given service (LL, etc.).
619 *
620 * See also: xge_hal_fifo_dtr_post_many(), xge_hal_fifo_dtr_post().
621 * Usage: See ex_post_all_rx{}.
622 */
623__HAL_STATIC_RING __HAL_INLINE_RING void
624xge_hal_ring_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
625{
626	xge_hal_ring_dtr_pre_post(channelh, dtrh);
627	xge_hal_ring_dtr_post_post(channelh, dtrh);
628}
629
630/**
631 * xge_hal_ring_dtr_next_completed - Get the _next_ completed
632 * descriptor.
633 * @channelh: Channel handle.
634 * @dtrh: Descriptor handle. Returned by HAL.
635 * @t_code: Transfer code, as per Xframe User Guide,
636 *          Receive Descriptor Format. Returned by HAL.
637 *
638 * Retrieve the _next_ completed descriptor.
639 * HAL uses channel callback (*xge_hal_channel_callback_f) to notifiy
640 * upper-layer driver (ULD) of new completed descriptors. After that
641 * the ULD can use xge_hal_ring_dtr_next_completed to retrieve the rest
642 * completions (the very first completion is passed by HAL via
643 * xge_hal_channel_callback_f).
644 *
645 * Implementation-wise, the upper-layer driver is free to call
646 * xge_hal_ring_dtr_next_completed either immediately from inside the
647 * channel callback, or in a deferred fashion and separate (from HAL)
648 * context.
649 *
650 * Non-zero @t_code means failure to fill-in receive buffer(s)
651 * of the descriptor.
652 * For instance, parity error detected during the data transfer.
653 * In this case Xframe will complete the descriptor and indicate
654 * for the host that the received data is not to be used.
655 * For details please refer to Xframe User Guide.
656 *
657 * Returns: XGE_HAL_OK - success.
658 * XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
659 * are currently available for processing.
660 *
661 * See also: xge_hal_channel_callback_f{},
662 * xge_hal_fifo_dtr_next_completed(), xge_hal_status_e{}.
663 * Usage: See ex_rx_compl{}.
664 */
665__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
666xge_hal_ring_dtr_next_completed(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh,
667	            u8 *t_code)
668{
669	xge_hal_ring_rxd_1_t *rxdp; /* doesn't matter 1, 3 or 5... */
670	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
671#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
672	xge_hal_ring_rxd_priv_t *priv;
673#endif
674
675	__hal_channel_dtr_try_complete(ring, dtrh);
676	rxdp = (xge_hal_ring_rxd_1_t *)*dtrh;
677	if (rxdp == NULL) {
678	    return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
679	}
680
681#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
682	/* Note: 24 bytes at most means:
683	 *  - Control_3 in case of 5-buffer mode
684	 *  - Control_1 and Control_2
685	 *
686	 * This is the only length needs to be invalidated
687	 * type of channels.*/
688	priv = __hal_ring_rxd_priv(ring, rxdp);
689	xge_os_dma_sync(ring->channel.pdev,
690	              priv->dma_handle, priv->dma_addr,
691	          priv->dma_offset, 24,
692	          XGE_OS_DMA_DIR_FROMDEVICE);
693#endif
694
695	/* check whether it is not the end */
696	if (!(rxdp->control_2 & XGE_HAL_RXD_NOT_COMPLETED) &&
697	    !(rxdp->control_1 & XGE_HAL_RXD_POSTED_4_XFRAME)) {
698#ifndef XGE_HAL_IRQ_POLLING
699	    if (++ring->cmpl_cnt > ring->indicate_max_pkts) {
700	        /* reset it. since we don't want to return
701	         * garbage to the ULD */
702	        *dtrh = 0;
703	        return XGE_HAL_COMPLETIONS_REMAIN;
704	    }
705#endif
706
707#ifdef XGE_DEBUG_ASSERT
708#if defined(XGE_HAL_USE_5B_MODE)
709#if !defined(XGE_OS_PLATFORM_64BIT)
710	    if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
711	        xge_assert(((xge_hal_ring_rxd_5_t *)
712	                rxdp)->host_control!=0);
713	    }
714#endif
715
716#else
717	    xge_assert(rxdp->host_control!=0);
718#endif
719#endif
720
721	    __hal_channel_dtr_complete(ring);
722
723	    *t_code = (u8)XGE_HAL_RXD_GET_T_CODE(rxdp->control_1);
724
725	            /* see XGE_HAL_SET_RXD_T_CODE() above.. */
726	    xge_assert(*t_code != XGE_HAL_RXD_T_CODE_UNUSED_C);
727
728	    xge_debug_ring(XGE_TRACE,
729	        "compl_index %d post_qid %d t_code %d rxd 0x"XGE_OS_LLXFMT,
730	        ((xge_hal_channel_t*)ring)->compl_index,
731	        ((xge_hal_channel_t*)ring)->post_qid, *t_code,
732	        (unsigned long long)(ulong_t)rxdp);
733
734	    ring->channel.usage_cnt++;
735	    if (ring->channel.stats.usage_max < ring->channel.usage_cnt)
736	        ring->channel.stats.usage_max = ring->channel.usage_cnt;
737
738	    return XGE_HAL_OK;
739	}
740
741	/* reset it. since we don't want to return
742	 * garbage to the ULD */
743	*dtrh = 0;
744	return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
745}
746
747/**
748 * xge_hal_ring_dtr_free - Free descriptor.
749 * @channelh: Channel handle.
750 * @dtrh: Descriptor handle.
751 *
752 * Free the reserved descriptor. This operation is "symmetrical" to
753 * xge_hal_ring_dtr_reserve. The "free-ing" completes the descriptor's
754 * lifecycle.
755 *
756 * After free-ing (see xge_hal_ring_dtr_free()) the descriptor again can
757 * be:
758 *
759 * - reserved (xge_hal_ring_dtr_reserve);
760 *
761 * - posted (xge_hal_ring_dtr_post);
762 *
763 * - completed (xge_hal_ring_dtr_next_completed);
764 *
765 * - and recycled again (xge_hal_ring_dtr_free).
766 *
767 * For alternative state transitions and more details please refer to
768 * the design doc.
769 *
770 * See also: xge_hal_ring_dtr_reserve(), xge_hal_fifo_dtr_free().
771 * Usage: See ex_rx_compl{}.
772 */
773__HAL_STATIC_RING __HAL_INLINE_RING void
774xge_hal_ring_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
775{
776#if defined(XGE_HAL_RX_MULTI_FREE_IRQ)
777	unsigned long flags;
778#endif
779
780#if defined(XGE_HAL_RX_MULTI_FREE)
781	xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->free_lock);
782#elif defined(XGE_HAL_RX_MULTI_FREE_IRQ)
783	xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->free_lock,
784	flags);
785#endif
786
787	__hal_channel_dtr_free(channelh, dtrh);
788#if defined(XGE_OS_MEMORY_CHECK)
789	__hal_ring_rxd_priv((xge_hal_ring_t * ) channelh, dtrh)->allocated = 0;
790#endif
791
792#if defined(XGE_HAL_RX_MULTI_FREE)
793	xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->free_lock);
794#elif defined(XGE_HAL_RX_MULTI_FREE_IRQ)
795	xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->free_lock,
796	flags);
797#endif
798}
799
800/**
801 * xge_hal_ring_is_next_dtr_completed - Check if the next dtr is completed
802 * @channelh: Channel handle.
803 *
804 * Checks if the _next_ completed descriptor is in host memory
805 *
806 * Returns: XGE_HAL_OK - success.
807 * XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
808 * are currently available for processing.
809 */
810__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
811xge_hal_ring_is_next_dtr_completed(xge_hal_channel_h channelh)
812{
813	xge_hal_ring_rxd_1_t *rxdp; /* doesn't matter 1, 3 or 5... */
814	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
815	xge_hal_dtr_h dtrh;
816
817	__hal_channel_dtr_try_complete(ring, &dtrh);
818	rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
819	if (rxdp == NULL) {
820	    return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
821	}
822
823	/* check whether it is not the end */
824	if (!(rxdp->control_2 & XGE_HAL_RXD_NOT_COMPLETED) &&
825	    !(rxdp->control_1 & XGE_HAL_RXD_POSTED_4_XFRAME)) {
826
827#ifdef XGE_DEBUG_ASSERT
828#if defined(XGE_HAL_USE_5B_MODE)
829#if !defined(XGE_OS_PLATFORM_64BIT)
830	    if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
831	        xge_assert(((xge_hal_ring_rxd_5_t *)
832	                rxdp)->host_control!=0);
833	    }
834#endif
835
836#else
837	    xge_assert(rxdp->host_control!=0);
838#endif
839#endif
840	    return XGE_HAL_OK;
841	}
842
843	return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
844}
845