xgehal-fifo-fp.c revision 331722
1/*-
2 * Copyright (c) 2002-2007 Neterion, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: stable/11/sys/dev/nxge/xgehal/xgehal-fifo-fp.c 331722 2018-03-29 02:50:57Z eadler $
27 */
28
29#ifdef XGE_DEBUG_FP
30#include <dev/nxge/include/xgehal-fifo.h>
31#endif
32
33__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_fifo_txdl_priv_t*
34__hal_fifo_txdl_priv(xge_hal_dtr_h dtrh)
35{
36	xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t*)dtrh;
37	xge_hal_fifo_txdl_priv_t *txdl_priv;
38
39	xge_assert(txdp);
40	txdl_priv = (xge_hal_fifo_txdl_priv_t *)
41	            (ulong_t)txdp->host_control;
42
43	xge_assert(txdl_priv);
44	xge_assert(txdl_priv->dma_object);
45	xge_assert(txdl_priv->dma_addr);
46
47	xge_assert(txdl_priv->dma_object->handle == txdl_priv->dma_handle);
48
49	return txdl_priv;
50}
51
52__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
53__hal_fifo_dtr_post_single(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
54	        u64 ctrl_1)
55{
56	xge_hal_fifo_t            *fifo    = (xge_hal_fifo_t *)channelh;
57	xge_hal_fifo_hw_pair_t    *hw_pair = fifo->hw_pair;
58	xge_hal_fifo_txd_t        *txdp    = (xge_hal_fifo_txd_t *)dtrh;
59	xge_hal_fifo_txdl_priv_t  *txdl_priv;
60	u64           ctrl;
61
62	txdp->control_1 |= XGE_HAL_TXD_LIST_OWN_XENA;
63
64#ifdef XGE_DEBUG_ASSERT
65	    /* make sure Xena overwrites the (illegal) t_code value on completion */
66	    XGE_HAL_SET_TXD_T_CODE(txdp->control_1, XGE_HAL_TXD_T_CODE_UNUSED_5);
67#endif
68
69	txdl_priv = __hal_fifo_txdl_priv(dtrh);
70
71#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
72	/* sync the TxDL to device */
73	xge_os_dma_sync(fifo->channel.pdev,
74	              txdl_priv->dma_handle,
75	          txdl_priv->dma_addr,
76	          txdl_priv->dma_offset,
77	          txdl_priv->frags << 5 /* sizeof(xge_hal_fifo_txd_t) */,
78	          XGE_OS_DMA_DIR_TODEVICE);
79#endif
80	/* write the pointer first */
81	xge_os_pio_mem_write64(fifo->channel.pdev,
82	             fifo->channel.regh1,
83	                     txdl_priv->dma_addr,
84	                     &hw_pair->txdl_pointer);
85
86	/* spec: 0x00 = 1 TxD in the list */
87	ctrl = XGE_HAL_TX_FIFO_LAST_TXD_NUM(txdl_priv->frags - 1);
88	ctrl |= ctrl_1;
89	ctrl |= fifo->no_snoop_bits;
90
91	if (txdp->control_1 & XGE_HAL_TXD_LSO_COF_CTRL(XGE_HAL_TXD_TCP_LSO)) {
92	    ctrl |= XGE_HAL_TX_FIFO_SPECIAL_FUNC;
93	}
94
95	/*
96	 * according to the XENA spec:
97	 *
98	 * It is important to note that pointers and list control words are
99	 * always written in pairs: in the first write, the host must write a
100	 * pointer, and in the second write, it must write the list control
101	 * word. Any other access will result in an error. Also, all 16 bytes
102	 * of the pointer/control structure must be written, including any
103	 * reserved bytes.
104	 */
105	xge_os_wmb();
106
107	/*
108	 * we want touch work_arr in order with ownership bit set to HW
109	 */
110	__hal_channel_dtr_post(channelh, dtrh);
111
112	xge_os_pio_mem_write64(fifo->channel.pdev, fifo->channel.regh1,
113	        ctrl, &hw_pair->list_control);
114
115	xge_debug_fifo(XGE_TRACE, "posted txdl 0x"XGE_OS_LLXFMT" ctrl 0x"XGE_OS_LLXFMT" "
116	    "into 0x"XGE_OS_LLXFMT"", (unsigned long long)txdl_priv->dma_addr,
117	    (unsigned long long)ctrl,
118	    (unsigned long long)(ulong_t)&hw_pair->txdl_pointer);
119
120#ifdef XGE_HAL_FIFO_DUMP_TXD
121	xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"
122	    XGE_OS_LLXFMT" dma "XGE_OS_LLXFMT,
123	    txdp->control_1, txdp->control_2, txdp->buffer_pointer,
124	    txdp->host_control, txdl_priv->dma_addr);
125#endif
126
127	fifo->channel.stats.total_posts++;
128	fifo->channel.usage_cnt++;
129	if (fifo->channel.stats.usage_max < fifo->channel.usage_cnt)
130	    fifo->channel.stats.usage_max = fifo->channel.usage_cnt;
131}
132
133__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
134__hal_fifo_txdl_free_many(xge_hal_channel_h channelh,
135	          xge_hal_fifo_txd_t *txdp, int list_size, int frags)
136{
137	xge_hal_fifo_txdl_priv_t *current_txdl_priv;
138	xge_hal_fifo_txdl_priv_t *next_txdl_priv;
139	int invalid_frags = frags % list_size;
140	if (invalid_frags){
141	    xge_debug_fifo(XGE_ERR,
142	        "freeing corrupt dtrh %p, fragments %d list size %d",
143	        txdp, frags, list_size);
144	    xge_assert(invalid_frags == 0);
145	}
146	while(txdp){
147	    xge_debug_fifo(XGE_TRACE,
148	        "freeing linked dtrh %p, fragments %d list size %d",
149	        txdp, frags, list_size);
150	    current_txdl_priv = __hal_fifo_txdl_priv(txdp);
151#if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK)
152	    current_txdl_priv->allocated = 0;
153#endif
154	    __hal_channel_dtr_free(channelh, txdp);
155	    next_txdl_priv = current_txdl_priv->next_txdl_priv;
156	    xge_assert(frags);
157	    frags -= list_size;
158	    if (next_txdl_priv) {
159	        current_txdl_priv->next_txdl_priv = NULL;
160	        txdp = next_txdl_priv->first_txdp;
161	    }
162	    else {
163	        xge_debug_fifo(XGE_TRACE,
164	        "freed linked dtrh fragments %d list size %d",
165	        frags, list_size);
166	        break;
167	    }
168	}
169	xge_assert(frags == 0)
170}
171
172__HAL_STATIC_FIFO  __HAL_INLINE_FIFO void
173__hal_fifo_txdl_restore_many(xge_hal_channel_h channelh,
174	          xge_hal_fifo_txd_t *txdp, int txdl_count)
175{
176	xge_hal_fifo_txdl_priv_t *current_txdl_priv;
177	xge_hal_fifo_txdl_priv_t *next_txdl_priv;
178	int i = txdl_count;
179
180	xge_assert(((xge_hal_channel_t *)channelh)->reserve_length +
181	    txdl_count <= ((xge_hal_channel_t *)channelh)->reserve_initial);
182
183	current_txdl_priv = __hal_fifo_txdl_priv(txdp);
184	do{
185	    xge_assert(i);
186#if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK)
187	    current_txdl_priv->allocated = 0;
188#endif
189	    next_txdl_priv = current_txdl_priv->next_txdl_priv;
190	    txdp = current_txdl_priv->first_txdp;
191	    current_txdl_priv->next_txdl_priv = NULL;
192	    __hal_channel_dtr_restore(channelh, (xge_hal_dtr_h )txdp, --i);
193	    xge_debug_fifo(XGE_TRACE,
194	        "dtrh %p restored at offset %d", txdp, i);
195	    current_txdl_priv = next_txdl_priv;
196	} while(current_txdl_priv);
197	__hal_channel_dtr_restore(channelh, NULL, txdl_count);
198}
199/**
200 * xge_hal_fifo_dtr_private - Retrieve per-descriptor private data.
201 * @channelh: Channel handle.
202 * @dtrh: Descriptor handle.
203 *
204 * Retrieve per-descriptor private data.
205 * Note that ULD requests per-descriptor space via
206 * xge_hal_channel_open().
207 *
208 * Returns: private ULD data associated with the descriptor.
209 * Usage: See ex_xmit{} and ex_tx_compl{}.
210 */
211__HAL_STATIC_FIFO __HAL_INLINE_FIFO void*
212xge_hal_fifo_dtr_private(xge_hal_dtr_h dtrh)
213{
214	xge_hal_fifo_txd_t *txdp    = (xge_hal_fifo_txd_t *)dtrh;
215
216	return ((char *)(ulong_t)txdp->host_control) +
217	                sizeof(xge_hal_fifo_txdl_priv_t);
218}
219
220/**
221 * xge_hal_fifo_dtr_buffer_cnt - Get number of buffers carried by the
222 * descriptor.
223 * @dtrh: Descriptor handle.
224 *
225 * Returns: Number of buffers stored in the given descriptor. Can be used
226 * _after_ the descriptor is set up for posting (see
227 * xge_hal_fifo_dtr_post()) and _before_ it is deallocated (see
228 * xge_hal_fifo_dtr_free()).
229 *
230 */
231__HAL_STATIC_FIFO __HAL_INLINE_FIFO int
232xge_hal_fifo_dtr_buffer_cnt(xge_hal_dtr_h dtrh)
233{
234	xge_hal_fifo_txdl_priv_t  *txdl_priv;
235
236	txdl_priv = __hal_fifo_txdl_priv(dtrh);
237
238	return txdl_priv->frags;
239}
240/**
241 * xge_hal_fifo_dtr_reserve_many- Reserve fifo descriptors which span more
242 *  than single txdl.
243 * @channelh: Channel handle.
244 * @dtrh: Reserved descriptor. On success HAL fills this "out" parameter
245 *        with a valid handle.
246 * @frags: minimum number of fragments to be reserved.
247 *
248 * Reserve TxDL(s) (that is, fifo descriptor)
249 * for the subsequent filling-in by upper layerdriver (ULD))
250 * and posting on the corresponding channel (@channelh)
251 * via xge_hal_fifo_dtr_post().
252 *
253 * Returns: XGE_HAL_OK - success;
254 * XGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
255 *
256 * See also: xge_hal_fifo_dtr_reserve_sp(), xge_hal_fifo_dtr_free(),
257 * xge_hal_ring_dtr_reserve(), xge_hal_status_e{}.
258 * Usage: See ex_xmit{}.
259 */
260__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
261xge_hal_fifo_dtr_reserve_many(xge_hal_channel_h channelh,
262	            xge_hal_dtr_h *dtrh, const int frags)
263{
264	xge_hal_status_e status = XGE_HAL_OK;
265	int alloc_frags = 0, dang_frags = 0;
266	xge_hal_fifo_txd_t *curr_txdp = NULL;
267	xge_hal_fifo_txd_t *next_txdp;
268	xge_hal_fifo_txdl_priv_t *next_txdl_priv, *curr_txdl_priv = NULL;
269	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
270	int max_frags = fifo->config->max_frags;
271	xge_hal_dtr_h dang_dtrh = NULL;
272#if defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
273	unsigned long flags=0;
274#endif
275	xge_debug_fifo(XGE_TRACE, "dtr_reserve_many called for frags %d",
276	    frags);
277	xge_assert(frags < (fifo->txdl_per_memblock * max_frags));
278#if defined(XGE_HAL_TX_MULTI_RESERVE)
279	xge_os_spin_lock(&fifo->channel.reserve_lock);
280#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
281	xge_os_spin_lock_irq(&fifo->channel.reserve_lock, flags);
282#endif
283	while(alloc_frags < frags) {
284	    status = __hal_channel_dtr_alloc(channelh,
285	            (xge_hal_dtr_h *)(void*)&next_txdp);
286	    if (status != XGE_HAL_OK){
287	        xge_debug_fifo(XGE_ERR,
288	            "failed to allocate linked fragments rc %d",
289	             status);
290	        xge_assert(status == XGE_HAL_INF_OUT_OF_DESCRIPTORS);
291	        if (*dtrh) {
292	            xge_assert(alloc_frags/max_frags);
293	            __hal_fifo_txdl_restore_many(channelh,
294	                (xge_hal_fifo_txd_t *) *dtrh, alloc_frags/max_frags);
295	        }
296	        if (dang_dtrh) {
297	            xge_assert(dang_frags/max_frags);
298	            __hal_fifo_txdl_restore_many(channelh,
299	                (xge_hal_fifo_txd_t *) dang_dtrh, dang_frags/max_frags);
300	        }
301	        break;
302	    }
303	    xge_debug_fifo(XGE_TRACE, "allocated linked dtrh %p"
304	        " for frags %d", next_txdp, frags);
305	    next_txdl_priv = __hal_fifo_txdl_priv(next_txdp);
306	    xge_assert(next_txdl_priv);
307	    xge_assert(next_txdl_priv->first_txdp == next_txdp);
308	    next_txdl_priv->dang_txdl = NULL;
309	    next_txdl_priv->dang_frags = 0;
310	    next_txdl_priv->next_txdl_priv = NULL;
311#if defined(XGE_OS_MEMORY_CHECK)
312	    next_txdl_priv->allocated = 1;
313#endif
314	    if (!curr_txdp || !curr_txdl_priv) {
315	        curr_txdp = next_txdp;
316	        curr_txdl_priv = next_txdl_priv;
317	        *dtrh = (xge_hal_dtr_h)next_txdp;
318	        alloc_frags = max_frags;
319	        continue;
320	    }
321	    if (curr_txdl_priv->memblock ==
322	        next_txdl_priv->memblock) {
323	        xge_debug_fifo(XGE_TRACE,
324	            "linking dtrh %p, with %p",
325	            *dtrh, next_txdp);
326	        xge_assert (next_txdp ==
327	            curr_txdp + max_frags);
328	        alloc_frags += max_frags;
329	        curr_txdl_priv->next_txdl_priv = next_txdl_priv;
330	    }
331	    else {
332	        xge_assert(*dtrh);
333	        xge_assert(dang_dtrh == NULL);
334	        dang_dtrh = *dtrh;
335	        dang_frags = alloc_frags;
336	        xge_debug_fifo(XGE_TRACE,
337	            "dangling dtrh %p, linked with dtrh %p",
338	            *dtrh, next_txdp);
339	        next_txdl_priv->dang_txdl = (xge_hal_fifo_txd_t *) *dtrh;
340	        next_txdl_priv->dang_frags = alloc_frags;
341	        alloc_frags = max_frags;
342	        *dtrh  = next_txdp;
343	    }
344	    curr_txdp = next_txdp;
345	    curr_txdl_priv = next_txdl_priv;
346	}
347
348#if defined(XGE_HAL_TX_MULTI_RESERVE)
349	xge_os_spin_unlock(&fifo->channel.reserve_lock);
350#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
351	xge_os_spin_unlock_irq(&fifo->channel.reserve_lock, flags);
352#endif
353
354	if (status == XGE_HAL_OK) {
355	    xge_hal_fifo_txdl_priv_t * txdl_priv;
356	    xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)*dtrh;
357	    xge_hal_stats_channel_info_t *statsp = &fifo->channel.stats;
358	    txdl_priv = __hal_fifo_txdl_priv(txdp);
359	    /* reset the TxDL's private */
360	    txdl_priv->align_dma_offset = 0;
361	    txdl_priv->align_vaddr_start = txdl_priv->align_vaddr;
362	    txdl_priv->align_used_frags = 0;
363	    txdl_priv->frags = 0;
364	    txdl_priv->bytes_sent = 0;
365	    txdl_priv->alloc_frags = alloc_frags;
366	    /* reset TxD0 */
367	    txdp->control_1 = txdp->control_2 = 0;
368
369#if defined(XGE_OS_MEMORY_CHECK)
370	    txdl_priv->allocated = 1;
371#endif
372	    /* update statistics */
373	    statsp->total_posts_dtrs_many++;
374	    statsp->total_posts_frags_many += txdl_priv->alloc_frags;
375	    if (txdl_priv->dang_frags){
376	        statsp->total_posts_dang_dtrs++;
377	        statsp->total_posts_dang_frags += txdl_priv->dang_frags;
378	    }
379	}
380
381	return status;
382}
383
384/**
385 * xge_hal_fifo_dtr_reserve - Reserve fifo descriptor.
386 * @channelh: Channel handle.
387 * @dtrh: Reserved descriptor. On success HAL fills this "out" parameter
388 *        with a valid handle.
389 *
390 * Reserve a single TxDL (that is, fifo descriptor)
391 * for the subsequent filling-in by upper layerdriver (ULD))
392 * and posting on the corresponding channel (@channelh)
393 * via xge_hal_fifo_dtr_post().
394 *
395 * Note: it is the responsibility of ULD to reserve multiple descriptors
396 * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
397 * carries up to configured number (fifo.max_frags) of contiguous buffers.
398 *
399 * Returns: XGE_HAL_OK - success;
400 * XGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
401 *
402 * See also: xge_hal_fifo_dtr_reserve_sp(), xge_hal_fifo_dtr_free(),
403 * xge_hal_ring_dtr_reserve(), xge_hal_status_e{}.
404 * Usage: See ex_xmit{}.
405 */
406__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
407xge_hal_fifo_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
408{
409	xge_hal_status_e status;
410#if defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
411	unsigned long flags=0;
412#endif
413
414#if defined(XGE_HAL_TX_MULTI_RESERVE)
415	xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->reserve_lock);
416#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
417	xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->reserve_lock,
418	                     flags);
419#endif
420
421	status = __hal_channel_dtr_alloc(channelh, dtrh);
422
423#if defined(XGE_HAL_TX_MULTI_RESERVE)
424	xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->reserve_lock);
425#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
426	xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->reserve_lock,
427	                       flags);
428#endif
429
430	if (status == XGE_HAL_OK) {
431	    xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)*dtrh;
432	    xge_hal_fifo_txdl_priv_t *txdl_priv;
433
434	    txdl_priv = __hal_fifo_txdl_priv(txdp);
435
436	    /* reset the TxDL's private */
437	    txdl_priv->align_dma_offset = 0;
438	    txdl_priv->align_vaddr_start = txdl_priv->align_vaddr;
439	    txdl_priv->align_used_frags = 0;
440	    txdl_priv->frags = 0;
441	    txdl_priv->alloc_frags =
442	        ((xge_hal_fifo_t *)channelh)->config->max_frags;
443	    txdl_priv->dang_txdl = NULL;
444	    txdl_priv->dang_frags = 0;
445	    txdl_priv->next_txdl_priv = NULL;
446	    txdl_priv->bytes_sent = 0;
447
448	    /* reset TxD0 */
449	    txdp->control_1 = txdp->control_2 = 0;
450
451#if defined(XGE_OS_MEMORY_CHECK)
452	    txdl_priv->allocated = 1;
453#endif
454	}
455
456	return status;
457}
458
459/**
460 * xge_hal_fifo_dtr_reserve_sp - Reserve fifo descriptor and store it in
461 * the ULD-provided "scratch" memory.
462 * @channelh: Channel handle.
463 * @dtr_sp_size: Size of the %dtr_sp "scratch pad" that HAL can use for TxDL.
464 * @dtr_sp: "Scratch pad" supplied by upper-layer driver (ULD).
465 *
466 * Reserve TxDL and fill-in ULD supplied "scratch pad". The difference
467 * between this API and xge_hal_fifo_dtr_reserve() is (possibly) -
468 * performance.
469 *
470 * If upper-layer uses ULP-defined commands, and if those commands have enough
471 * space for HAL/Xframe descriptors - tnan it is better (read: faster) to fit
472 * all the per-command information into one command, which is typically
473 * one contiguous block.
474 *
475 * Note: Unlike xge_hal_fifo_dtr_reserve(), this function can be used to
476 * allocate a single descriptor for transmit operation.
477 *
478 * See also: xge_hal_fifo_dtr_reserve(), xge_hal_fifo_dtr_free(),
479 * xge_hal_ring_dtr_reserve(), xge_hal_status_e{}.
480 */
481__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
482xge_hal_fifo_dtr_reserve_sp(xge_hal_channel_h channelh, int dtr_sp_size,
483	        xge_hal_dtr_h dtr_sp)
484{
485	/* FIXME: implement */
486	return XGE_HAL_OK;
487}
488
489/**
490 * xge_hal_fifo_dtr_post - Post descriptor on the fifo channel.
491 * @channelh: Channel handle.
492 * @dtrh: Descriptor obtained via xge_hal_fifo_dtr_reserve() or
493 * xge_hal_fifo_dtr_reserve_sp()
494 * @frags: Number of contiguous buffers that are part of a single
495 *         transmit operation.
496 *
497 * Post descriptor on the 'fifo' type channel for transmission.
498 * Prior to posting the descriptor should be filled in accordance with
499 * Host/Xframe interface specification for a given service (LL, etc.).
500 *
501 * See also: xge_hal_fifo_dtr_post_many(), xge_hal_ring_dtr_post().
502 * Usage: See ex_xmit{}.
503 */
504__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
505xge_hal_fifo_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
506{
507	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
508	xge_hal_fifo_txdl_priv_t *txdl_priv;
509	xge_hal_fifo_txd_t *txdp_last;
510	xge_hal_fifo_txd_t *txdp_first;
511#if defined(XGE_HAL_TX_MULTI_POST_IRQ)
512	unsigned long flags = 0;
513#endif
514
515	txdl_priv = __hal_fifo_txdl_priv(dtrh);
516
517	txdp_first = (xge_hal_fifo_txd_t *)dtrh;
518	txdp_first->control_1 |= XGE_HAL_TXD_GATHER_CODE_FIRST;
519	txdp_first->control_2 |= fifo->interrupt_type;
520
521	txdp_last = (xge_hal_fifo_txd_t *)dtrh + (txdl_priv->frags - 1);
522	txdp_last->control_1 |= XGE_HAL_TXD_GATHER_CODE_LAST;
523
524#if defined(XGE_HAL_TX_MULTI_POST)
525	xge_os_spin_lock(fifo->post_lock_ptr);
526#elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
527	xge_os_spin_lock_irq(fifo->post_lock_ptr, flags);
528#endif
529
530	__hal_fifo_dtr_post_single(channelh, dtrh,
531	     (u64)(XGE_HAL_TX_FIFO_FIRST_LIST | XGE_HAL_TX_FIFO_LAST_LIST));
532
533#if defined(XGE_HAL_TX_MULTI_POST)
534	xge_os_spin_unlock(fifo->post_lock_ptr);
535#elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
536	xge_os_spin_unlock_irq(fifo->post_lock_ptr, flags);
537#endif
538}
539
540/**
541 * xge_hal_fifo_dtr_post_many - Post multiple descriptors on fifo
542 * channel.
543 * @channelh: Channel to post descriptor.
544 * @num: Number of descriptors (i.e., fifo TxDLs) in the %dtrs[].
545 * @dtrs: Descriptors obtained via xge_hal_fifo_dtr_reserve().
546 * @frags_arr: Number of fragments carried @dtrs descriptors.
547 * Note that frag_arr[i] corresponds to descriptor dtrs[i].
548 *
549 * Post multi-descriptor on the fifo channel. The operation is atomic:
550 * all descriptrs are posted on the channel "back-to-back' without
551 * letting other posts (possibly driven by multiple transmitting threads)
552 * to interleave.
553 *
554 * See also: xge_hal_fifo_dtr_post(), xge_hal_ring_dtr_post().
555 */
556__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
557xge_hal_fifo_dtr_post_many(xge_hal_channel_h channelh, int num,
558	        xge_hal_dtr_h dtrs[])
559{
560	int i;
561	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
562	xge_hal_fifo_txd_t *txdp_last;
563	xge_hal_fifo_txd_t *txdp_first;
564	xge_hal_fifo_txdl_priv_t *txdl_priv_last;
565#if defined(XGE_HAL_TX_MULTI_POST_IRQ)
566	unsigned long flags = 0;
567#endif
568
569	xge_assert(num > 1);
570
571	txdp_first = (xge_hal_fifo_txd_t *)dtrs[0];
572	txdp_first->control_1 |= XGE_HAL_TXD_GATHER_CODE_FIRST;
573	txdp_first->control_2 |= fifo->interrupt_type;
574
575	txdl_priv_last = __hal_fifo_txdl_priv(dtrs[num-1]);
576	txdp_last = (xge_hal_fifo_txd_t *)dtrs[num-1] +
577	                (txdl_priv_last->frags - 1);
578	txdp_last->control_1 |= XGE_HAL_TXD_GATHER_CODE_LAST;
579
580#if defined(XGE_HAL_TX_MULTI_POST)
581	xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->post_lock);
582#elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
583	xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->post_lock,
584	flags);
585#endif
586
587	for (i=0; i<num; i++) {
588	    xge_hal_fifo_txdl_priv_t *txdl_priv;
589	    u64 val64;
590	    xge_hal_dtr_h dtrh = dtrs[i];
591
592	    txdl_priv = __hal_fifo_txdl_priv(dtrh);
593	    txdl_priv = txdl_priv; /* Cheat lint */
594
595	    val64 = 0;
596	    if (i == 0) {
597	         val64 |= XGE_HAL_TX_FIFO_FIRST_LIST;
598	    } else if (i == num -1) {
599	         val64 |= XGE_HAL_TX_FIFO_LAST_LIST;
600	    }
601
602	    val64 |= XGE_HAL_TX_FIFO_SPECIAL_FUNC;
603	    __hal_fifo_dtr_post_single(channelh, dtrh, val64);
604	}
605
606#if defined(XGE_HAL_TX_MULTI_POST)
607	xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->post_lock);
608#elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
609	xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->post_lock,
610	flags);
611#endif
612
613	fifo->channel.stats.total_posts_many++;
614}
615
616/**
617 * xge_hal_fifo_dtr_next_completed - Retrieve next completed descriptor.
618 * @channelh: Channel handle.
619 * @dtrh: Descriptor handle. Returned by HAL.
620 * @t_code: Transfer code, as per Xframe User Guide,
621 *          Transmit Descriptor Format.
622 *          Returned by HAL.
623 *
624 * Retrieve the _next_ completed descriptor.
625 * HAL uses channel callback (*xge_hal_channel_callback_f) to notifiy
626 * upper-layer driver (ULD) of new completed descriptors. After that
627 * the ULD can use xge_hal_fifo_dtr_next_completed to retrieve the rest
628 * completions (the very first completion is passed by HAL via
629 * xge_hal_channel_callback_f).
630 *
631 * Implementation-wise, the upper-layer driver is free to call
632 * xge_hal_fifo_dtr_next_completed either immediately from inside the
633 * channel callback, or in a deferred fashion and separate (from HAL)
634 * context.
635 *
636 * Non-zero @t_code means failure to process the descriptor.
637 * The failure could happen, for instance, when the link is
638 * down, in which case Xframe completes the descriptor because it
639 * is not able to send the data out.
640 *
641 * For details please refer to Xframe User Guide.
642 *
643 * Returns: XGE_HAL_OK - success.
644 * XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
645 * are currently available for processing.
646 *
647 * See also: xge_hal_channel_callback_f{},
648 * xge_hal_ring_dtr_next_completed().
649 * Usage: See ex_tx_compl{}.
650 */
651__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
652xge_hal_fifo_dtr_next_completed(xge_hal_channel_h channelh,
653	        xge_hal_dtr_h *dtrh, u8 *t_code)
654{
655	xge_hal_fifo_txd_t        *txdp;
656	xge_hal_fifo_t            *fifo    = (xge_hal_fifo_t *)channelh;
657#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
658	xge_hal_fifo_txdl_priv_t  *txdl_priv;
659#endif
660
661	__hal_channel_dtr_try_complete(channelh, dtrh);
662	txdp = (xge_hal_fifo_txd_t *)*dtrh;
663	if (txdp == NULL) {
664	    return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
665	}
666
667#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
668	txdl_priv = __hal_fifo_txdl_priv(txdp);
669
670	/* sync TxDL to read the ownership
671	 *
672	 * Note: 16bytes means Control_1 & Control_2 */
673	xge_os_dma_sync(fifo->channel.pdev,
674	              txdl_priv->dma_handle,
675	          txdl_priv->dma_addr,
676	          txdl_priv->dma_offset,
677	          16,
678	          XGE_OS_DMA_DIR_FROMDEVICE);
679#endif
680
681	/* check whether host owns it */
682	if ( !(txdp->control_1 & XGE_HAL_TXD_LIST_OWN_XENA) ) {
683
684	    xge_assert(txdp->host_control!=0);
685
686	    __hal_channel_dtr_complete(channelh);
687
688	    *t_code = (u8)XGE_HAL_GET_TXD_T_CODE(txdp->control_1);
689
690	            /* see XGE_HAL_SET_TXD_T_CODE() above.. */
691	            xge_assert(*t_code != XGE_HAL_TXD_T_CODE_UNUSED_5);
692
693	    if (fifo->channel.usage_cnt > 0)
694	        fifo->channel.usage_cnt--;
695
696	    return XGE_HAL_OK;
697	}
698
699	/* no more completions */
700	*dtrh = 0;
701	return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
702}
703
704/**
705 * xge_hal_fifo_dtr_free - Free descriptor.
706 * @channelh: Channel handle.
707 * @dtr: Descriptor handle.
708 *
709 * Free the reserved descriptor. This operation is "symmetrical" to
710 * xge_hal_fifo_dtr_reserve or xge_hal_fifo_dtr_reserve_sp.
711 * The "free-ing" completes the descriptor's lifecycle.
712 *
713 * After free-ing (see xge_hal_fifo_dtr_free()) the descriptor again can
714 * be:
715 *
716 * - reserved (xge_hal_fifo_dtr_reserve);
717 *
718 * - posted (xge_hal_fifo_dtr_post);
719 *
720 * - completed (xge_hal_fifo_dtr_next_completed);
721 *
722 * - and recycled again (xge_hal_fifo_dtr_free).
723 *
724 * For alternative state transitions and more details please refer to
725 * the design doc.
726 *
727 * See also: xge_hal_ring_dtr_free(), xge_hal_fifo_dtr_reserve().
728 * Usage: See ex_tx_compl{}.
729 */
730__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
731xge_hal_fifo_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtr)
732{
733#if defined(XGE_HAL_TX_MULTI_FREE_IRQ)
734	unsigned long flags = 0;
735#endif
736	xge_hal_fifo_txdl_priv_t *txdl_priv = __hal_fifo_txdl_priv(
737	                (xge_hal_fifo_txd_t *)dtr);
738	int max_frags = ((xge_hal_fifo_t *)channelh)->config->max_frags;
739#if defined(XGE_HAL_TX_MULTI_FREE)
740	xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->free_lock);
741#elif defined(XGE_HAL_TX_MULTI_FREE_IRQ)
742	xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->free_lock,
743	flags);
744#endif
745
746	if (txdl_priv->alloc_frags > max_frags) {
747	    xge_hal_fifo_txd_t *dang_txdp = (xge_hal_fifo_txd_t *)
748	                    txdl_priv->dang_txdl;
749	    int dang_frags = txdl_priv->dang_frags;
750	    int alloc_frags = txdl_priv->alloc_frags;
751	    txdl_priv->dang_txdl = NULL;
752	    txdl_priv->dang_frags = 0;
753	    txdl_priv->alloc_frags = 0;
754	    /* dtrh must have a linked list of dtrh */
755	    xge_assert(txdl_priv->next_txdl_priv);
756
757	    /* free any dangling dtrh first */
758	    if (dang_txdp) {
759	        xge_debug_fifo(XGE_TRACE,
760	            "freeing dangled dtrh %p for %d fragments",
761	            dang_txdp, dang_frags);
762	        __hal_fifo_txdl_free_many(channelh, dang_txdp,
763	            max_frags, dang_frags);
764	    }
765
766	    /* now free the reserved dtrh list */
767	    xge_debug_fifo(XGE_TRACE,
768	            "freeing dtrh %p list of %d fragments", dtr,
769	            alloc_frags);
770	    __hal_fifo_txdl_free_many(channelh,
771	            (xge_hal_fifo_txd_t *)dtr, max_frags,
772	            alloc_frags);
773	}
774	else
775	    __hal_channel_dtr_free(channelh, dtr);
776
777	((xge_hal_channel_t *)channelh)->poll_bytes += txdl_priv->bytes_sent;
778
779#if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK)
780	__hal_fifo_txdl_priv(dtr)->allocated = 0;
781#endif
782
783#if defined(XGE_HAL_TX_MULTI_FREE)
784	xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->free_lock);
785#elif defined(XGE_HAL_TX_MULTI_FREE_IRQ)
786	xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->free_lock,
787	flags);
788#endif
789}
790
791
792/**
793 * xge_hal_fifo_dtr_buffer_set_aligned - Align transmit buffer and fill
794 * in fifo descriptor.
795 * @channelh: Channel handle.
796 * @dtrh: Descriptor handle.
797 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
798 *            (of buffers).
799 * @vaddr: Virtual address of the data buffer.
800 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
801 * @size: Size of the data buffer (in bytes).
802 * @misaligned_size: Size (in bytes) of the misaligned portion of the
803 * data buffer. Calculated by the caller, based on the platform/OS/other
804 * specific criteria, which is outside of HAL's domain. See notes below.
805 *
806 * This API is part of the transmit descriptor preparation for posting
807 * (via xge_hal_fifo_dtr_post()). The related "preparation" APIs include
808 * xge_hal_fifo_dtr_mss_set() and xge_hal_fifo_dtr_cksum_set_bits().
809 * All three APIs fill in the fields of the fifo descriptor,
810 * in accordance with the Xframe specification.
811 * On the PCI-X based systems aligning transmit data typically provides better
812 * transmit performance. The typical alignment granularity: L2 cacheline size.
813 * However, HAL does not make assumptions in terms of the alignment granularity;
814 * this is specified via additional @misaligned_size parameter described above.
815 * Prior to calling xge_hal_fifo_dtr_buffer_set_aligned(),
816 * ULD is supposed to check alignment of a given fragment/buffer. For this HAL
817 * provides a separate xge_hal_check_alignment() API sufficient to cover
818 * most (but not all) possible alignment criteria.
819 * If the buffer appears to be aligned, the ULD calls
820 * xge_hal_fifo_dtr_buffer_set().
821 * Otherwise, ULD calls xge_hal_fifo_dtr_buffer_set_aligned().
822 *
823 * Note; This API is a "superset" of xge_hal_fifo_dtr_buffer_set(). In
824 * addition to filling in the specified descriptor it aligns transmit data on
825 * the specified boundary.
826 * Note: Decision on whether to align or not to align a given contiguous
827 * transmit buffer is outside of HAL's domain. To this end ULD can use any
828 * programmable criteria, which can help to 1) boost transmit performance,
829 * and/or 2) provide a workaround for PCI bridge bugs, if any.
830 *
831 * See also: xge_hal_fifo_dtr_buffer_set(),
832 * xge_hal_check_alignment().
833 *
834 * See also: xge_hal_fifo_dtr_reserve(), xge_hal_fifo_dtr_post(),
835 * xge_hal_fifo_dtr_mss_set(), xge_hal_fifo_dtr_cksum_set_bits()
836 */
837__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
838xge_hal_fifo_dtr_buffer_set_aligned(xge_hal_channel_h channelh,
839	        xge_hal_dtr_h dtrh, int frag_idx, void *vaddr,
840	        dma_addr_t dma_pointer, int size, int misaligned_size)
841{
842	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
843	xge_hal_fifo_txdl_priv_t *txdl_priv;
844	xge_hal_fifo_txd_t *txdp;
845	int remaining_size;
846	ptrdiff_t prev_boff;
847
848	txdl_priv = __hal_fifo_txdl_priv(dtrh);
849	txdp = (xge_hal_fifo_txd_t *)dtrh + txdl_priv->frags;
850
851	if (frag_idx != 0) {
852	    txdp->control_1 = txdp->control_2 = 0;
853	}
854
855	/* On some systems buffer size could be zero.
856	 * It is the responsibility of ULD and *not HAL* to
857	 * detect it and skip it. */
858	xge_assert(size > 0);
859	xge_assert(frag_idx < txdl_priv->alloc_frags);
860	xge_assert(misaligned_size != 0 &&
861	        misaligned_size <= fifo->config->alignment_size);
862
863	remaining_size = size - misaligned_size;
864	xge_assert(remaining_size >= 0);
865
866	xge_os_memcpy((char*)txdl_priv->align_vaddr_start,
867	                  vaddr, misaligned_size);
868
869	    if (txdl_priv->align_used_frags >= fifo->config->max_aligned_frags) {
870	        return XGE_HAL_ERR_OUT_ALIGNED_FRAGS;
871	    }
872
873	/* setup new buffer */
874	prev_boff = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
875	txdp->buffer_pointer = (u64)txdl_priv->align_dma_addr + prev_boff;
876	txdp->control_1 |= XGE_HAL_TXD_BUFFER0_SIZE(misaligned_size);
877	txdl_priv->bytes_sent += misaligned_size;
878	fifo->channel.stats.total_buffers++;
879	txdl_priv->frags++;
880	txdl_priv->align_used_frags++;
881	txdl_priv->align_vaddr_start += fifo->config->alignment_size;
882	    txdl_priv->align_dma_offset = 0;
883
884#if defined(XGE_OS_DMA_REQUIRES_SYNC)
885	/* sync new buffer */
886	xge_os_dma_sync(fifo->channel.pdev,
887	          txdl_priv->align_dma_handle,
888	          txdp->buffer_pointer,
889	          0,
890	          misaligned_size,
891	          XGE_OS_DMA_DIR_TODEVICE);
892#endif
893
894	if (remaining_size) {
895	    xge_assert(frag_idx < txdl_priv->alloc_frags);
896	    txdp++;
897	    txdp->buffer_pointer = (u64)dma_pointer +
898	                misaligned_size;
899	    txdp->control_1 =
900	        XGE_HAL_TXD_BUFFER0_SIZE(remaining_size);
901	    txdl_priv->bytes_sent += remaining_size;
902	    txdp->control_2 = 0;
903	    fifo->channel.stats.total_buffers++;
904	    txdl_priv->frags++;
905	}
906
907	return XGE_HAL_OK;
908}
909
910/**
911 * xge_hal_fifo_dtr_buffer_append - Append the contents of virtually
912 * contiguous data buffer to a single physically contiguous buffer.
913 * @channelh: Channel handle.
914 * @dtrh: Descriptor handle.
915 * @vaddr: Virtual address of the data buffer.
916 * @size: Size of the data buffer (in bytes).
917 *
918 * This API is part of the transmit descriptor preparation for posting
919 * (via xge_hal_fifo_dtr_post()).
920 * The main difference of this API wrt to the APIs
921 * xge_hal_fifo_dtr_buffer_set_aligned() is that this API appends the
922 * contents of virtually contiguous data buffers received from
923 * upper layer into a single physically contiguous data buffer and the
924 * device will do a DMA from this buffer.
925 *
926 * See Also: xge_hal_fifo_dtr_buffer_finalize(), xge_hal_fifo_dtr_buffer_set(),
927 * xge_hal_fifo_dtr_buffer_set_aligned().
928 */
929__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
930xge_hal_fifo_dtr_buffer_append(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
931	    void *vaddr, int size)
932{
933	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
934	xge_hal_fifo_txdl_priv_t *txdl_priv;
935	ptrdiff_t used;
936
937	xge_assert(size > 0);
938
939	txdl_priv = __hal_fifo_txdl_priv(dtrh);
940
941	used = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
942	used += txdl_priv->align_dma_offset;
943	if (used + (unsigned int)size > (unsigned int)fifo->align_size)
944	        return XGE_HAL_ERR_OUT_ALIGNED_FRAGS;
945
946	xge_os_memcpy((char*)txdl_priv->align_vaddr_start +
947	    txdl_priv->align_dma_offset, vaddr, size);
948
949	fifo->channel.stats.copied_frags++;
950
951	txdl_priv->align_dma_offset += size;
952	return XGE_HAL_OK;
953}
954
955/**
956 * xge_hal_fifo_dtr_buffer_finalize - Prepares a descriptor that contains the
957 * single physically contiguous buffer.
958 *
959 * @channelh: Channel handle.
960 * @dtrh: Descriptor handle.
961 * @frag_idx: Index of the data buffer in the Txdl list.
962 *
963 * This API in conjuction with xge_hal_fifo_dtr_buffer_append() prepares
964 * a descriptor that consists of a single physically contiguous buffer
965 * which inturn contains the contents of one or more virtually contiguous
966 * buffers received from the upper layer.
967 *
968 * See Also: xge_hal_fifo_dtr_buffer_append().
969*/
970__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
971xge_hal_fifo_dtr_buffer_finalize(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
972	    int frag_idx)
973{
974	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
975	xge_hal_fifo_txdl_priv_t *txdl_priv;
976	xge_hal_fifo_txd_t *txdp;
977	ptrdiff_t prev_boff;
978
979	xge_assert(frag_idx < fifo->config->max_frags);
980
981	txdl_priv = __hal_fifo_txdl_priv(dtrh);
982	txdp = (xge_hal_fifo_txd_t *)dtrh + txdl_priv->frags;
983
984	if (frag_idx != 0) {
985	    txdp->control_1 = txdp->control_2 = 0;
986	}
987
988	prev_boff = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
989	txdp->buffer_pointer = (u64)txdl_priv->align_dma_addr + prev_boff;
990	txdp->control_1 |=
991	            XGE_HAL_TXD_BUFFER0_SIZE(txdl_priv->align_dma_offset);
992	txdl_priv->bytes_sent += (unsigned int)txdl_priv->align_dma_offset;
993	fifo->channel.stats.total_buffers++;
994	fifo->channel.stats.copied_buffers++;
995	txdl_priv->frags++;
996	txdl_priv->align_used_frags++;
997
998#if defined(XGE_OS_DMA_REQUIRES_SYNC)
999	/* sync pre-mapped buffer */
1000	xge_os_dma_sync(fifo->channel.pdev,
1001	          txdl_priv->align_dma_handle,
1002	          txdp->buffer_pointer,
1003	          0,
1004	          txdl_priv->align_dma_offset,
1005	          XGE_OS_DMA_DIR_TODEVICE);
1006#endif
1007
1008	/* increment vaddr_start for the next buffer_append() iteration */
1009	txdl_priv->align_vaddr_start += txdl_priv->align_dma_offset;
1010	    txdl_priv->align_dma_offset = 0;
1011}
1012
1013/**
1014 * xge_hal_fifo_dtr_buffer_set - Set transmit buffer pointer in the
1015 * descriptor.
1016 * @channelh: Channel handle.
1017 * @dtrh: Descriptor handle.
1018 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1019 *            (of buffers).
1020 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1021 * @size: Size of the data buffer (in bytes).
1022 *
1023 * This API is part of the preparation of the transmit descriptor for posting
1024 * (via xge_hal_fifo_dtr_post()). The related "preparation" APIs include
1025 * xge_hal_fifo_dtr_mss_set() and xge_hal_fifo_dtr_cksum_set_bits().
1026 * All three APIs fill in the fields of the fifo descriptor,
1027 * in accordance with the Xframe specification.
1028 *
1029 * See also: xge_hal_fifo_dtr_buffer_set_aligned(),
1030 * xge_hal_check_alignment().
1031 *
1032 * See also: xge_hal_fifo_dtr_reserve(), xge_hal_fifo_dtr_post(),
1033 * xge_hal_fifo_dtr_mss_set(), xge_hal_fifo_dtr_cksum_set_bits()
1034 * Prepare transmit descriptor for transmission (via
1035 * xge_hal_fifo_dtr_post()).
1036 * See also: xge_hal_fifo_dtr_vlan_set().
1037 * Note: Compare with xge_hal_fifo_dtr_buffer_set_aligned().
1038 *
1039 * Usage: See ex_xmit{}.
1040 */
1041__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
1042xge_hal_fifo_dtr_buffer_set(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1043	    int frag_idx, dma_addr_t dma_pointer, int size)
1044{
1045	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
1046	xge_hal_fifo_txdl_priv_t *txdl_priv;
1047	xge_hal_fifo_txd_t *txdp;
1048
1049	txdl_priv = __hal_fifo_txdl_priv(dtrh);
1050	txdp = (xge_hal_fifo_txd_t *)dtrh + txdl_priv->frags;
1051
1052	if (frag_idx != 0) {
1053	    txdp->control_1 = txdp->control_2 = 0;
1054	}
1055
1056	/* Note:
1057	 * it is the responsibility of upper layers and not HAL
1058	 * detect it and skip zero-size fragment
1059	 */
1060	xge_assert(size > 0);
1061	xge_assert(frag_idx < txdl_priv->alloc_frags);
1062
1063	txdp->buffer_pointer = (u64)dma_pointer;
1064	txdp->control_1 |= XGE_HAL_TXD_BUFFER0_SIZE(size);
1065	txdl_priv->bytes_sent += size;
1066	fifo->channel.stats.total_buffers++;
1067	txdl_priv->frags++;
1068}
1069
1070/**
1071 * xge_hal_fifo_dtr_mss_set - Set MSS.
1072 * @dtrh: Descriptor handle.
1073 * @mss: MSS size for _this_ TCP connection. Passed by TCP stack down to the
1074 *       ULD, which in turn inserts the MSS into the @dtrh.
1075 *
1076 * This API is part of the preparation of the transmit descriptor for posting
1077 * (via xge_hal_fifo_dtr_post()). The related "preparation" APIs include
1078 * xge_hal_fifo_dtr_buffer_set(), xge_hal_fifo_dtr_buffer_set_aligned(),
1079 * and xge_hal_fifo_dtr_cksum_set_bits().
1080 * All these APIs fill in the fields of the fifo descriptor,
1081 * in accordance with the Xframe specification.
1082 *
1083 * See also: xge_hal_fifo_dtr_reserve(),
1084 * xge_hal_fifo_dtr_post(), xge_hal_fifo_dtr_vlan_set().
1085 * Usage: See ex_xmit{}.
1086 */
1087__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
1088xge_hal_fifo_dtr_mss_set(xge_hal_dtr_h dtrh, int mss)
1089{
1090	xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
1091
1092	txdp->control_1 |= XGE_HAL_TXD_LSO_COF_CTRL(XGE_HAL_TXD_TCP_LSO);
1093	txdp->control_1 |= XGE_HAL_TXD_TCP_LSO_MSS(mss);
1094}
1095
1096/**
1097 * xge_hal_fifo_dtr_cksum_set_bits - Offload checksum.
1098 * @dtrh: Descriptor handle.
1099 * @cksum_bits: Specifies which checksums are to be offloaded: IPv4,
1100 *              and/or TCP and/or UDP.
1101 *
1102 * Ask Xframe to calculate IPv4 & transport checksums for _this_ transmit
1103 * descriptor.
1104 * This API is part of the preparation of the transmit descriptor for posting
1105 * (via xge_hal_fifo_dtr_post()). The related "preparation" APIs include
1106 * xge_hal_fifo_dtr_mss_set(), xge_hal_fifo_dtr_buffer_set_aligned(),
1107 * and xge_hal_fifo_dtr_buffer_set().
1108 * All these APIs fill in the fields of the fifo descriptor,
1109 * in accordance with the Xframe specification.
1110 *
1111 * See also: xge_hal_fifo_dtr_reserve(),
1112 * xge_hal_fifo_dtr_post(), XGE_HAL_TXD_TX_CKO_IPV4_EN,
1113 * XGE_HAL_TXD_TX_CKO_TCP_EN.
1114 * Usage: See ex_xmit{}.
1115 */
1116__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
1117xge_hal_fifo_dtr_cksum_set_bits(xge_hal_dtr_h dtrh, u64 cksum_bits)
1118{
1119	xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
1120
1121	txdp->control_2 |= cksum_bits;
1122}
1123
1124
1125/**
1126 * xge_hal_fifo_dtr_vlan_set - Set VLAN tag.
1127 * @dtrh: Descriptor handle.
1128 * @vlan_tag: 16bit VLAN tag.
1129 *
1130 * Insert VLAN tag into specified transmit descriptor.
1131 * The actual insertion of the tag into outgoing frame is done by the hardware.
1132 * See also: xge_hal_fifo_dtr_buffer_set(), xge_hal_fifo_dtr_mss_set().
1133 */
1134__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
1135xge_hal_fifo_dtr_vlan_set(xge_hal_dtr_h dtrh, u16 vlan_tag)
1136{
1137	xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
1138
1139	txdp->control_2 |= XGE_HAL_TXD_VLAN_ENABLE;
1140	txdp->control_2 |= XGE_HAL_TXD_VLAN_TAG(vlan_tag);
1141}
1142
1143/**
1144 * xge_hal_fifo_is_next_dtr_completed - Checks if the next dtr is completed
1145 * @channelh: Channel handle.
1146 */
1147__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
1148xge_hal_fifo_is_next_dtr_completed(xge_hal_channel_h channelh)
1149{
1150	xge_hal_fifo_txd_t *txdp;
1151	xge_hal_dtr_h dtrh;
1152
1153	__hal_channel_dtr_try_complete(channelh, &dtrh);
1154	txdp = (xge_hal_fifo_txd_t *)dtrh;
1155	if (txdp == NULL) {
1156	    return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1157	}
1158
1159	/* check whether host owns it */
1160	if ( !(txdp->control_1 & XGE_HAL_TXD_LIST_OWN_XENA) ) {
1161	    xge_assert(txdp->host_control!=0);
1162	    return XGE_HAL_OK;
1163	}
1164
1165	/* no more completions */
1166	return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1167}
1168