vxgehal-fifo.c revision 330897
1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright(c) 2002-2011 Exar Corp.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification are permitted provided the following conditions are met:
9 *
10 *    1. Redistributions of source code must retain the above copyright notice,
11 *       this list of conditions and the following disclaimer.
12 *
13 *    2. Redistributions in binary form must reproduce the above copyright
14 *       notice, this list of conditions and the following disclaimer in the
15 *       documentation and/or other materials provided with the distribution.
16 *
17 *    3. Neither the name of the Exar Corporation nor the names of its
18 *       contributors may be used to endorse or promote products derived from
19 *       this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33/*$FreeBSD: stable/11/sys/dev/vxge/vxgehal/vxgehal-fifo.c 330897 2018-03-14 03:19:51Z eadler $*/
34
35#include <dev/vxge/vxgehal/vxgehal.h>
36
37/*
38 * __hal_fifo_mempool_item_alloc - Allocate List blocks for TxD list callback
39 * @mempoolh: Handle to memory pool
40 * @memblock: Address of this memory block
41 * @memblock_index: Index of this memory block
42 * @dma_object: dma object for this block
43 * @item: Pointer to this item
44 * @index: Index of this item in memory block
45 * @is_last: If this is last item in the block
46 * @userdata: Specific data of user
47 *
48 * This function is callback passed to __hal_mempool_create to create memory
49 * pool for TxD list
50 */
51static vxge_hal_status_e
52__hal_fifo_mempool_item_alloc(
53    vxge_hal_mempool_h mempoolh,
54    void *memblock,
55    u32 memblock_index,
56    vxge_hal_mempool_dma_t *dma_object,
57    void *item,
58    u32 item_index,
59    u32 is_last,
60    void *userdata)
61{
62	u32 i;
63	void *block_priv;
64	u32 memblock_item_idx;
65
66	__hal_fifo_t *fifo = (__hal_fifo_t *) userdata;
67
68	vxge_assert(fifo != NULL);
69	vxge_assert(item);
70
71#if (VXGE_COMPONENT_HAL_POOL & VXGE_DEBUG_MODULE_MASK)
72	{
73		__hal_device_t *hldev = (__hal_device_t *) fifo->channel.devh;
74
75		vxge_hal_trace_log_pool("==> %s:%s:%d",
76		    __FILE__, __func__, __LINE__);
77
78		vxge_hal_trace_log_pool(
79		    "mempoolh = 0x"VXGE_OS_STXFMT", "
80		    "memblock = 0x"VXGE_OS_STXFMT", memblock_index = %d, "
81		    "dma_object = 0x"VXGE_OS_STXFMT", \
82		    item = 0x"VXGE_OS_STXFMT", "
83		    "item_index = %d, is_last = %d, userdata = 0x"VXGE_OS_STXFMT,
84		    (ptr_t) mempoolh, (ptr_t) memblock, memblock_index,
85		    (ptr_t) dma_object, (ptr_t) item, item_index, is_last,
86		    (ptr_t) userdata);
87	}
88#endif
89
90	block_priv = __hal_mempool_item_priv((vxge_hal_mempool_t *) mempoolh,
91	    memblock_index, item, &memblock_item_idx);
92
93	vxge_assert(block_priv != NULL);
94
95	for (i = 0; i < fifo->txdl_per_memblock; i++) {
96
97		__hal_fifo_txdl_priv_t *txdl_priv;
98		vxge_hal_fifo_txd_t *txdp;
99
100		int dtr_index = item_index * fifo->txdl_per_memblock + i;
101
102		txdp = (vxge_hal_fifo_txd_t *) ((void *)
103		    ((char *) item + i * fifo->txdl_size));
104
105		txdp->host_control = dtr_index;
106
107		fifo->channel.dtr_arr[dtr_index].dtr = txdp;
108
109		fifo->channel.dtr_arr[dtr_index].uld_priv = (void *)
110		    ((char *) block_priv + fifo->txdl_priv_size * i);
111
112		fifo->channel.dtr_arr[dtr_index].hal_priv = (void *)
113		    (((char *) fifo->channel.dtr_arr[dtr_index].uld_priv) +
114		    fifo->per_txdl_space);
115
116		txdl_priv = (__hal_fifo_txdl_priv_t *)
117		    fifo->channel.dtr_arr[dtr_index].hal_priv;
118
119		vxge_assert(txdl_priv);
120
121		/* pre-format HAL's TxDL's private */
122		/* LINTED */
123		txdl_priv->dma_offset = (char *) txdp - (char *) memblock;
124		txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
125		txdl_priv->dma_handle = dma_object->handle;
126		txdl_priv->memblock = memblock;
127		txdl_priv->first_txdp = (vxge_hal_fifo_txd_t *) txdp;
128		txdl_priv->next_txdl_priv = NULL;
129		txdl_priv->dang_txdl = NULL;
130		txdl_priv->dang_frags = 0;
131		txdl_priv->alloc_frags = 0;
132
133#if defined(VXGE_DEBUG_ASSERT)
134		txdl_priv->dma_object = dma_object;
135#endif
136
137#if defined(VXGE_HAL_ALIGN_XMIT)
138		txdl_priv->align_vaddr = NULL;
139		txdl_priv->align_dma_addr = (dma_addr_t) 0;
140
141#ifndef	VXGE_HAL_ALIGN_XMIT_ALLOC_RT
142		/* CONSTCOND */
143		if (TRUE) {
144			vxge_hal_status_e status;
145
146			if (fifo->config->alignment_size) {
147				status = __hal_fifo_txdl_align_alloc_map(fifo,
148				    txdp);
149				if (status != VXGE_HAL_OK) {
150
151#if (VXGE_COMPONENT_HAL_POOL & VXGE_DEBUG_MODULE_MASK)
152					__hal_device_t *hldev;
153					hldev = (__hal_device_t *)
154					    fifo->channel.devh;
155
156					vxge_hal_err_log_pool(
157					    "align buffer[%d] %d bytes, \
158					    status %d",
159					    (item_index * fifo->txdl_per_memblock + i),
160					    fifo->align_size, status);
161
162					vxge_hal_trace_log_pool(
163					    "<== %s:%s:%d  Result: 0",
164					    __FILE__, __func__, __LINE__);
165#endif
166					return (status);
167				}
168			}
169		}
170#endif
171#endif
172		if (fifo->txdl_init) {
173			fifo->txdl_init(fifo->channel.vph,
174			    (vxge_hal_txdl_h) txdp,
175			    VXGE_HAL_FIFO_ULD_PRIV(fifo, txdp),
176			    VXGE_HAL_FIFO_TXDL_INDEX(txdp),
177			    fifo->channel.userdata, VXGE_HAL_OPEN_NORMAL);
178		}
179	}
180
181#if (VXGE_COMPONENT_HAL_POOL & VXGE_DEBUG_MODULE_MASK)
182	{
183		__hal_device_t *hldev = (__hal_device_t *) fifo->channel.devh;
184
185		vxge_hal_trace_log_pool("<== %s:%s:%d  Result: 0",
186		    __FILE__, __func__, __LINE__);
187	}
188#endif
189
190	return (VXGE_HAL_OK);
191}
192
193
194/*
195 * __hal_fifo_mempool_item_free - Free List blocks for TxD list callback
196 * @mempoolh: Handle to memory pool
197 * @memblock: Address of this memory block
198 * @memblock_index: Index of this memory block
199 * @dma_object: dma object for this block
200 * @item: Pointer to this item
201 * @index: Index of this item in memory block
202 * @is_last: If this is last item in the block
203 * @userdata: Specific data of user
204 *
205 * This function is callback passed to __hal_mempool_free to destroy memory
206 * pool for TxD list
207 */
208static vxge_hal_status_e
209__hal_fifo_mempool_item_free(
210    vxge_hal_mempool_h mempoolh,
211    void *memblock,
212    u32 memblock_index,
213    vxge_hal_mempool_dma_t *dma_object,
214    void *item,
215    u32 item_index,
216    u32 is_last,
217    void *userdata)
218{
219	vxge_assert(item);
220
221#if (VXGE_COMPONENT_HAL_POOL & VXGE_DEBUG_MODULE_MASK)
222	{
223		__hal_fifo_t *fifo = (__hal_fifo_t *) userdata;
224
225		vxge_assert(fifo != NULL);
226
227		__hal_device_t *hldev = (__hal_device_t *) fifo->channel.devh;
228
229		vxge_hal_trace_log_pool("==> %s:%s:%d",
230		    __FILE__, __func__, __LINE__);
231
232		vxge_hal_trace_log_pool("mempoolh = 0x"VXGE_OS_STXFMT", "
233		    "memblock = 0x"VXGE_OS_STXFMT", memblock_index = %d, "
234		    "dma_object = 0x"VXGE_OS_STXFMT", \
235		    item = 0x"VXGE_OS_STXFMT", "
236		    "item_index = %d, is_last = %d, userdata = 0x"VXGE_OS_STXFMT,
237		    (ptr_t) mempoolh, (ptr_t) memblock, memblock_index,
238		    (ptr_t) dma_object, (ptr_t) item, item_index, is_last,
239		    (ptr_t) userdata);
240	}
241#endif
242
243#if defined(VXGE_HAL_ALIGN_XMIT)
244	{
245		__hal_fifo_t *fifo = (__hal_fifo_t *) userdata;
246
247		vxge_assert(fifo != NULL);
248		if (fifo->config->alignment_size) {
249
250			int i;
251			vxge_hal_fifo_txd_t *txdp;
252
253			for (i = 0; i < fifo->txdl_per_memblock; i++) {
254				txdp = (void *)
255				    ((char *) item + i * fifo->txdl_size);
256				__hal_fifo_txdl_align_free_unmap(fifo, txdp);
257			}
258		}
259	}
260#endif
261
262#if (VXGE_COMPONENT_HAL_POOL & VXGE_DEBUG_MODULE_MASK)
263	{
264		__hal_fifo_t *fifo = (__hal_fifo_t *) userdata;
265
266		vxge_assert(fifo != NULL);
267
268		__hal_device_t *hldev = (__hal_device_t *) fifo->channel.devh;
269
270		vxge_hal_trace_log_pool("<== %s:%s:%d  Result: 0",
271		    __FILE__, __func__, __LINE__);
272	}
273#endif
274
275	return (VXGE_HAL_OK);
276}
277
278/*
279 * __hal_fifo_create - Create a FIFO
280 * @vpath_handle: Handle returned by virtual path open
281 * @attr: FIFO configuration parameters structure
282 *
283 * This function creates FIFO and initializes it.
284 *
285 */
286vxge_hal_status_e
287__hal_fifo_create(
288    vxge_hal_vpath_h vpath_handle,
289    vxge_hal_fifo_attr_t *attr)
290{
291	vxge_hal_status_e status;
292	__hal_fifo_t *fifo;
293	vxge_hal_fifo_config_t *config;
294	u32 txdl_size, memblock_size, txdl_per_memblock;
295	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
296	__hal_device_t *hldev;
297
298	vxge_assert((vpath_handle != NULL) && (attr != NULL));
299
300	hldev = (__hal_device_t *) vp->vpath->hldev;
301
302	vxge_hal_trace_log_fifo("==> %s:%s:%d",
303	    __FILE__, __func__, __LINE__);
304
305	vxge_hal_trace_log_fifo(
306	    "vpath_handle = 0x"VXGE_OS_STXFMT", attr = 0x"VXGE_OS_STXFMT,
307	    (ptr_t) vpath_handle, (ptr_t) attr);
308
309	if ((vpath_handle == NULL) || (attr == NULL)) {
310		vxge_hal_err_log_fifo("null pointer passed == > %s : %d",
311		    __func__, __LINE__);
312		vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: %d",
313		    __FILE__, __func__, __LINE__,
314		    VXGE_HAL_ERR_INVALID_HANDLE);
315		return (VXGE_HAL_ERR_INVALID_HANDLE);
316	}
317
318	config =
319	    &vp->vpath->hldev->header.config.vp_config[vp->vpath->vp_id].fifo;
320
321	txdl_size = config->max_frags * sizeof(vxge_hal_fifo_txd_t);
322
323	if (txdl_size <= VXGE_OS_HOST_PAGE_SIZE)
324		memblock_size = VXGE_OS_HOST_PAGE_SIZE;
325	else
326		memblock_size = txdl_size;
327
328	txdl_per_memblock = memblock_size / txdl_size;
329
330	config->fifo_length = ((config->fifo_length + txdl_per_memblock - 1) /
331	    txdl_per_memblock) * txdl_per_memblock;
332
333	fifo = (__hal_fifo_t *) vxge_hal_channel_allocate(
334	    (vxge_hal_device_h) vp->vpath->hldev,
335	    vpath_handle,
336	    VXGE_HAL_CHANNEL_TYPE_FIFO,
337	    config->fifo_length,
338	    attr->per_txdl_space,
339	    attr->userdata);
340
341	if (fifo == NULL) {
342		vxge_hal_err_log_fifo("Memory allocation failed == > %s : %d",
343		    __func__, __LINE__);
344		vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: %d",
345		    __FILE__, __func__, __LINE__,
346		    VXGE_HAL_ERR_OUT_OF_MEMORY);
347		return (VXGE_HAL_ERR_OUT_OF_MEMORY);
348	}
349
350	vp->vpath->fifoh = fifo;
351
352	fifo->stats = &vp->vpath->sw_stats->fifo_stats;
353
354	fifo->config = config;
355
356	fifo->memblock_size = memblock_size;
357
358#if defined(VXGE_HAL_TX_MULTI_POST)
359	vxge_os_spin_lock_init(&fifo->channel.post_lock,
360	    vp->vpath->hldev->header.pdev);
361#elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
362	vxge_os_spin_lock_init_irq(&fifo->channel.post_lock,
363	    vp->vpath->hldev->header.irqh);
364#endif
365
366	fifo->align_size =
367	    fifo->config->alignment_size * fifo->config->max_aligned_frags;
368
369	/* apply "interrupts per txdl" attribute */
370	fifo->interrupt_type = VXGE_HAL_FIFO_TXD_INT_TYPE_UTILZ;
371	if (fifo->config->intr) {
372		fifo->interrupt_type = VXGE_HAL_FIFO_TXD_INT_TYPE_PER_LIST;
373	}
374
375	fifo->no_snoop_bits = config->no_snoop_bits;
376
377	/*
378	 * FIFO memory management strategy:
379	 *
380	 * TxDL splitted into three independent parts:
381	 *	- set of TxD's
382	 *	- TxD HAL private part
383	 *	- upper layer private part
384	 *
385	 * Adaptative memory allocation used. i.e. Memory allocated on
386	 * demand with the size which will fit into one memory block.
387	 * One memory block may contain more than one TxDL. In simple case
388	 * memory block size can be equal to CPU page size. On more
389	 * sophisticated OS's memory block can be contiguous across
390	 * several pages.
391	 *
392	 * During "reserve" operations more memory can be allocated on demand
393	 * for example due to FIFO full condition.
394	 *
395	 * Pool of memory memblocks never shrinks except __hal_fifo_close
396	 * routine which will essentially stop channel and free the resources.
397	 */
398
399	/* TxDL common private size == TxDL private + ULD private */
400	fifo->txdl_priv_size =
401	    sizeof(__hal_fifo_txdl_priv_t) + attr->per_txdl_space;
402	fifo->txdl_priv_size =
403	    ((fifo->txdl_priv_size + __vxge_os_cacheline_size - 1) /
404	    __vxge_os_cacheline_size) * __vxge_os_cacheline_size;
405
406	fifo->per_txdl_space = attr->per_txdl_space;
407
408	/* recompute txdl size to be cacheline aligned */
409	fifo->txdl_size = txdl_size;
410	fifo->txdl_per_memblock = txdl_per_memblock;
411
412	/*
413	 * since txdl_init() callback will be called from item_alloc(),
414	 * the same way channels userdata might be used prior to
415	 * channel_initialize()
416	 */
417	fifo->txdl_init = attr->txdl_init;
418	fifo->txdl_term = attr->txdl_term;
419	fifo->callback = attr->callback;
420
421	if (fifo->txdl_per_memblock == 0) {
422		__hal_fifo_delete(vpath_handle);
423		vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: %d",
424		    __FILE__, __func__, __LINE__,
425		    VXGE_HAL_ERR_INVALID_BLOCK_SIZE);
426		return (VXGE_HAL_ERR_INVALID_BLOCK_SIZE);
427	}
428
429	/* calculate actual TxDL block private size */
430	fifo->txdlblock_priv_size =
431	    fifo->txdl_priv_size * fifo->txdl_per_memblock;
432
433	fifo->mempool =
434	    vxge_hal_mempool_create((vxge_hal_device_h) vp->vpath->hldev,
435	    fifo->memblock_size,
436	    fifo->memblock_size,
437	    fifo->txdlblock_priv_size,
438	    fifo->config->fifo_length /
439	    fifo->txdl_per_memblock,
440	    fifo->config->fifo_length /
441	    fifo->txdl_per_memblock,
442	    __hal_fifo_mempool_item_alloc,
443	    __hal_fifo_mempool_item_free,
444	    fifo);
445
446	if (fifo->mempool == NULL) {
447		__hal_fifo_delete(vpath_handle);
448		vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: %d",
449		    __FILE__, __func__, __LINE__, VXGE_HAL_ERR_OUT_OF_MEMORY);
450		return (VXGE_HAL_ERR_OUT_OF_MEMORY);
451	}
452
453	status = vxge_hal_channel_initialize(&fifo->channel);
454	if (status != VXGE_HAL_OK) {
455		__hal_fifo_delete(vpath_handle);
456		vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: %d",
457		    __FILE__, __func__, __LINE__, status);
458		return (status);
459	}
460
461	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
462	    __FILE__, __func__, __LINE__);
463	return (VXGE_HAL_OK);
464}
465
466/*
467 * __hal_fifo_abort - Returns the TxD
468 * @fifoh: Fifo to be reset
469 * @reopen: See  vxge_hal_reopen_e {}.
470 *
471 * This function terminates the TxDs of fifo
472 */
473void
474__hal_fifo_abort(
475    vxge_hal_fifo_h fifoh,
476    vxge_hal_reopen_e reopen)
477{
478	u32 i = 0;
479	__hal_fifo_t *fifo = (__hal_fifo_t *) fifoh;
480	__hal_device_t *hldev;
481	vxge_hal_txdl_h txdlh;
482
483	vxge_assert(fifoh != NULL);
484
485	hldev = (__hal_device_t *) fifo->channel.devh;
486
487	vxge_hal_trace_log_fifo("==> %s:%s:%d",
488	    __FILE__, __func__, __LINE__);
489
490	vxge_hal_trace_log_fifo("fifo = 0x"VXGE_OS_STXFMT", reopen = %d",
491	    (ptr_t) fifoh, reopen);
492
493	if (fifo->txdl_term) {
494		__hal_channel_for_each_dtr(&fifo->channel, txdlh, i) {
495			if (!__hal_channel_is_posted_dtr(&fifo->channel,
496			    i)) {
497				fifo->txdl_term(fifo->channel.vph, txdlh,
498				    VXGE_HAL_FIFO_ULD_PRIV(fifo, txdlh),
499				    VXGE_HAL_TXDL_STATE_FREED,
500				    fifo->channel.userdata,
501				    reopen);
502			}
503		}
504	}
505
506	for (;;) {
507		__hal_channel_dtr_try_complete(&fifo->channel, &txdlh);
508
509		if (txdlh == NULL)
510			break;
511
512		__hal_channel_dtr_complete(&fifo->channel);
513
514		if (fifo->txdl_term) {
515			fifo->txdl_term(fifo->channel.vph, txdlh,
516			    VXGE_HAL_FIFO_ULD_PRIV(fifo, txdlh),
517			    VXGE_HAL_TXDL_STATE_POSTED,
518			    fifo->channel.userdata,
519			    reopen);
520		}
521
522		__hal_channel_dtr_free(&fifo->channel,
523		    VXGE_HAL_FIFO_TXDL_INDEX(txdlh));
524	}
525
526	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
527	    __FILE__, __func__, __LINE__);
528}
529
530/*
531 * __hal_fifo_reset - Resets the fifo
532 * @fifoh: Fifo to be reset
533 *
534 * This function resets the fifo during vpath reset operation
535 */
536vxge_hal_status_e
537__hal_fifo_reset(
538    vxge_hal_fifo_h fifoh)
539{
540	vxge_hal_status_e status;
541	__hal_device_t *hldev;
542	__hal_fifo_t *fifo = (__hal_fifo_t *) fifoh;
543
544	vxge_assert(fifoh != NULL);
545
546	hldev = (__hal_device_t *) fifo->channel.devh;
547
548	vxge_hal_trace_log_fifo("==> %s:%s:%d",
549	    __FILE__, __func__, __LINE__);
550
551	vxge_hal_trace_log_fifo("fifo = 0x"VXGE_OS_STXFMT,
552	    (ptr_t) fifoh);
553
554	__hal_fifo_abort(fifoh, VXGE_HAL_RESET_ONLY);
555
556	status = __hal_channel_reset(&fifo->channel);
557
558	if (status != VXGE_HAL_OK) {
559
560		vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: %d",
561		    __FILE__, __func__, __LINE__, status);
562		return (status);
563
564	}
565
566	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
567	    __FILE__, __func__, __LINE__);
568
569	return (VXGE_HAL_OK);
570}
571
572/*
573 * vxge_hal_fifo_doorbell_reset - Resets the doorbell fifo
574 * @vapth_handle: Vpath Handle
575 *
576 * This function resets the doorbell fifo during if fifo error occurs
577 */
578vxge_hal_status_e
579vxge_hal_fifo_doorbell_reset(
580    vxge_hal_vpath_h vpath_handle)
581{
582	u32 i;
583	vxge_hal_txdl_h txdlh;
584	__hal_fifo_t *fifo;
585	__hal_virtualpath_t *vpath;
586	__hal_fifo_txdl_priv_t *txdl_priv;
587	__hal_device_t *hldev;
588	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
589	vxge_hal_status_e status = VXGE_HAL_OK;
590
591	vxge_assert(vpath_handle != NULL);
592
593	hldev = vp->vpath->hldev;
594
595	vxge_hal_trace_log_fifo("==> %s:%s:%d",
596	    __FILE__, __func__, __LINE__);
597
598	vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT,
599	    (ptr_t) vpath_handle);
600
601	fifo = (__hal_fifo_t *) vp->vpath->fifoh;
602
603	vpath = ((__hal_vpath_handle_t *) fifo->channel.vph)->vpath;
604
605	status = __hal_non_offload_db_reset(fifo->channel.vph);
606
607	if (status != VXGE_HAL_OK) {
608		vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
609		    __FILE__, __func__, __LINE__);
610		return (status);
611	}
612
613	__hal_channel_for_each_posted_dtr(&fifo->channel, txdlh, i) {
614
615		txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh);
616
617		__hal_non_offload_db_post(fifo->channel.vph,
618		    ((VXGE_HAL_FIFO_TXD_NO_BW_LIMIT_GET(
619		    ((vxge_hal_fifo_txd_t *) txdlh)->control_1)) ?
620		    (((u64) txdl_priv->dma_addr) | 0x1) :
621		    (u64) txdl_priv->dma_addr),
622		    txdl_priv->frags - 1,
623		    vpath->vp_config->fifo.no_snoop_bits);
624	}
625
626	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
627	    __FILE__, __func__, __LINE__);
628
629	return (status);
630}
631
632/*
633 * __hal_fifo_delete - Removes the FIFO
634 * @vpath_handle: Virtual path handle to which this queue belongs
635 *
636 * This function freeup the memory pool and removes the FIFO
637 */
638void
639__hal_fifo_delete(
640    vxge_hal_vpath_h vpath_handle)
641{
642	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
643	__hal_fifo_t *fifo;
644	__hal_device_t *hldev;
645
646	vxge_assert(vpath_handle != NULL);
647
648	hldev = vp->vpath->hldev;
649
650	vxge_hal_trace_log_fifo("==> %s:%s:%d",
651	    __FILE__, __func__, __LINE__);
652
653	vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT,
654	    (ptr_t) vpath_handle);
655
656	fifo = (__hal_fifo_t *) vp->vpath->fifoh;
657
658	vxge_assert(fifo != NULL);
659
660	if (fifo->mempool) {
661		__hal_fifo_abort(vp->vpath->fifoh, VXGE_HAL_OPEN_NORMAL);
662		vxge_hal_mempool_destroy(fifo->mempool);
663	}
664
665	vxge_hal_channel_terminate(&fifo->channel);
666
667#if defined(VXGE_HAL_TX_MULTI_POST)
668	vxge_os_spin_lock_destroy(&fifo->channel.post_lock,
669	    vp->vpath->hldev->header.pdev);
670#elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
671	vxge_os_spin_lock_destroy_irq(&fifo->channel.post_lock,
672	    vp->vpath->hldev->header.pdev);
673#endif
674
675	vxge_hal_channel_free(&fifo->channel);
676
677	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
678	    __FILE__, __func__, __LINE__);
679}
680
681#if defined(VXGE_HAL_ALIGN_XMIT)
682/*
683 * __hal_fifo_txdl_align_free_unmap - Unmap the alignement buffers
684 * @fifo: Fifo
685 * @txdp: txdl
686 *
687 * This function unmaps dma memory for the alignment buffers
688 */
689void
690__hal_fifo_txdl_align_free_unmap(
691    __hal_fifo_t *fifo,
692    vxge_hal_fifo_txd_t *txdp)
693{
694	__hal_device_t *hldev;
695	__hal_fifo_txdl_priv_t *txdl_priv;
696
697	vxge_assert((fifo != NULL) && (txdp != NULL));
698
699	hldev = (__hal_device_t *) fifo->channel.devh;
700
701	vxge_hal_trace_log_fifo("==> %s:%s:%d",
702	    __FILE__, __func__, __LINE__);
703
704	vxge_hal_trace_log_fifo(
705	    "fifo = 0x"VXGE_OS_STXFMT",  txdp = 0x"VXGE_OS_STXFMT,
706	    (ptr_t) fifo, (ptr_t) txdp);
707
708	txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdp);
709
710	if (txdl_priv->align_vaddr != NULL) {
711		__hal_blockpool_free(fifo->channel.devh,
712		    txdl_priv->align_vaddr,
713		    fifo->align_size,
714		    &txdl_priv->align_dma_addr,
715		    &txdl_priv->align_dma_handle,
716		    &txdl_priv->align_dma_acch);
717
718		txdl_priv->align_vaddr = NULL;
719		txdl_priv->align_dma_addr = 0;
720	}
721
722	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
723	    __FILE__, __func__, __LINE__);
724}
725
726/*
727 * __hal_fifo_txdl_align_alloc_map - Maps the alignement buffers
728 * @fifo: Fifo
729 * @txdp: txdl
730 *
731 * This function maps dma memory for the alignment buffers
732 */
733vxge_hal_status_e
734__hal_fifo_txdl_align_alloc_map(
735    __hal_fifo_t *fifo,
736    vxge_hal_fifo_txd_t *txdp)
737{
738	__hal_device_t *hldev;
739	__hal_fifo_txdl_priv_t *txdl_priv;
740
741	vxge_assert((fifo != NULL) && (txdp != NULL));
742
743	hldev = (__hal_device_t *) fifo->channel.devh;
744
745	vxge_hal_trace_log_fifo("==> %s:%s:%d",
746	    __FILE__, __func__, __LINE__);
747
748	vxge_hal_trace_log_fifo(
749	    "fifo = 0x"VXGE_OS_STXFMT",  txdp = 0x"VXGE_OS_STXFMT,
750	    (ptr_t) fifo, (ptr_t) txdp);
751
752	txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdp);
753
754	/* allocate alignment DMA-buffer */
755	txdl_priv->align_vaddr =
756	    (u8 *) __hal_blockpool_malloc(fifo->channel.devh,
757	    fifo->align_size,
758	    &txdl_priv->align_dma_addr,
759	    &txdl_priv->align_dma_handle,
760	    &txdl_priv->align_dma_acch);
761	if (txdl_priv->align_vaddr == NULL) {
762		vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: %d",
763		    __FILE__, __func__, __LINE__, VXGE_HAL_ERR_OUT_OF_MEMORY);
764		return (VXGE_HAL_ERR_OUT_OF_MEMORY);
765	}
766
767	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
768	    __FILE__, __func__, __LINE__);
769	return (VXGE_HAL_OK);
770}
771#endif
772/*
773 * vxge_hal_fifo_free_txdl_count_get - returns the number of txdls
774 *                               available in the fifo
775 * @vpath_handle: Virtual path handle.
776 */
777u32
778vxge_hal_fifo_free_txdl_count_get(vxge_hal_vpath_h vpath_handle)
779{
780	return __hal_channel_free_dtr_count(&((__hal_fifo_t *)
781	    ((__hal_vpath_handle_t *) vpath_handle)->vpath->fifoh)->channel);
782}
783
784/*
785 * vxge_hal_fifo_txdl_private_get - Retrieve per-descriptor private data.
786 * @vpath_handle: Virtual path handle.
787 * @txdlh: Descriptor handle.
788 *
789 * Retrieve per-descriptor private data.
790 * Note that ULD requests per-descriptor space via
791 * vxge_hal_fifo_attr_t passed to
792 * vxge_hal_vpath_open().
793 *
794 * Returns: private ULD data associated with the descriptor.
795 */
796void *
797vxge_hal_fifo_txdl_private_get(
798    vxge_hal_vpath_h vpath_handle,
799    vxge_hal_txdl_h txdlh)
800{
801	return (VXGE_HAL_FIFO_ULD_PRIV(((__hal_fifo_t *)
802	    ((__hal_vpath_handle_t *) vpath_handle)->vpath->fifoh), txdlh));
803}
804
805/*
806 * vxge_hal_fifo_txdl_reserve - Reserve fifo descriptor.
807 * @vapth_handle: virtual path handle.
808 * @txdlh: Reserved descriptor. On success HAL fills this "out" parameter
809 *	with a valid handle.
810 * @txdl_priv: Buffer to return the pointer to per txdl space
811 *
812 * Reserve a single TxDL (that is, fifo descriptor)
813 * for the subsequent filling-in by upper layerdriver (ULD))
814 * and posting on the corresponding channel (@channelh)
815 * via vxge_hal_fifo_txdl_post().
816 *
817 * Note: it is the responsibility of ULD to reserve multiple descriptors
818 * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
819 * carries up to configured number (fifo.max_frags) of contiguous buffers.
820 *
821 * Returns: VXGE_HAL_OK - success;
822 * VXGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
823 *
824 */
825vxge_hal_status_e
826vxge_hal_fifo_txdl_reserve(
827    vxge_hal_vpath_h vpath_handle,
828    vxge_hal_txdl_h *txdlh,
829    void **txdl_priv)
830{
831	u32 i;
832	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
833	__hal_device_t *hldev;
834	__hal_fifo_t *fifo;
835	vxge_hal_status_e status;
836
837#if defined(VXGE_HAL_TX_MULTI_POST_IRQ)
838	unsigned long flags = 0;
839
840#endif
841
842	vxge_assert((vpath_handle != NULL) && (txdlh != NULL));
843
844	hldev = vp->vpath->hldev;
845
846	vxge_hal_trace_log_fifo("==> %s:%s:%d",
847	    __FILE__, __func__, __LINE__);
848
849	vxge_hal_trace_log_fifo(
850	    "vpath_handle = 0x"VXGE_OS_STXFMT",  txdlh = 0x"VXGE_OS_STXFMT,
851	    (ptr_t) vpath_handle, (ptr_t) txdlh);
852
853	fifo = (__hal_fifo_t *) vp->vpath->fifoh;
854
855	vxge_assert(fifo != NULL);
856
857#if defined(VXGE_HAL_TX_MULTI_POST)
858	vxge_os_spin_lock(&fifo->channel.post_lock);
859#elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
860	vxge_os_spin_lock_irq(&fifo->channel.post_lock, flags);
861#endif
862
863	status = __hal_channel_dtr_reserve(&fifo->channel, txdlh);
864
865#if defined(VXGE_HAL_TX_MULTI_POST)
866	vxge_os_spin_unlock(&fifo->channel.post_lock);
867#elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
868	vxge_os_spin_unlock_irq(&fifo->channel.post_lock, flags);
869#endif
870
871	if (status == VXGE_HAL_OK) {
872		vxge_hal_fifo_txd_t *txdp = (vxge_hal_fifo_txd_t *)*txdlh;
873		__hal_fifo_txdl_priv_t *priv;
874
875		priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdp);
876
877		/* reset the TxDL's private */
878		priv->align_dma_offset = 0;
879		priv->align_vaddr_start = priv->align_vaddr;
880		priv->align_used_frags = 0;
881		priv->frags = 0;
882		priv->alloc_frags = fifo->config->max_frags;
883		priv->dang_txdl = NULL;
884		priv->dang_frags = 0;
885		priv->next_txdl_priv = NULL;
886		priv->bytes_sent = 0;
887
888		*txdl_priv = VXGE_HAL_FIFO_ULD_PRIV(fifo, txdp);
889
890		for (i = 0; i < fifo->config->max_frags; i++) {
891			txdp = ((vxge_hal_fifo_txd_t *)*txdlh) + i;
892			txdp->control_0 = txdp->control_1 = 0;
893		}
894
895#if defined(VXGE_OS_MEMORY_CHECK)
896		priv->allocated = 1;
897#endif
898	}
899
900	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
901	    __FILE__, __func__, __LINE__);
902	return (status);
903}
904
905/*
906 * vxge_hal_fifo_txdl_buffer_set - Set transmit buffer pointer in the
907 * descriptor.
908 * @vpath_handle: virtual path handle.
909 * @txdlh: Descriptor handle.
910 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
911 *	   (of buffers).
912 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
913 * @size: Size of the data buffer (in bytes).
914 *
915 * This API is part of the preparation of the transmit descriptor for posting
916 * (via vxge_hal_fifo_txdl_post()). The related "preparation" APIs include
917 * vxge_hal_fifo_txdl_mss_set() and vxge_hal_fifo_txdl_cksum_set_bits().
918 * All three APIs fill in the fields of the fifo descriptor,
919 * in accordance with the X3100 specification.
920 *
921 */
922void
923vxge_hal_fifo_txdl_buffer_set(
924    vxge_hal_vpath_h vpath_handle,
925    vxge_hal_txdl_h txdlh,
926    u32 frag_idx,
927    dma_addr_t dma_pointer,
928    unsigned long size)
929{
930	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
931	__hal_fifo_t *fifo;
932	__hal_device_t *hldev;
933	__hal_fifo_txdl_priv_t *txdl_priv;
934	vxge_hal_fifo_txd_t *txdp;
935
936	vxge_assert((vpath_handle != NULL) && (txdlh != NULL) &&
937	    (dma_pointer != 0) && (size != 0));
938
939	hldev = vp->vpath->hldev;
940
941	vxge_hal_trace_log_fifo("==> %s:%s:%d",
942	    __FILE__, __func__, __LINE__);
943
944	vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", "
945	    "txdlh = 0x"VXGE_OS_STXFMT", frag_idx = %d, "
946	    "dma_pointer = 0x"VXGE_OS_LLXFMT", size = %lu",
947	    (ptr_t) vpath_handle, (ptr_t) txdlh,
948	    frag_idx, (u64) dma_pointer, size);
949
950	fifo = (__hal_fifo_t *) vp->vpath->fifoh;
951
952	vxge_assert(fifo != NULL);
953
954	txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh);
955
956	txdp = (vxge_hal_fifo_txd_t *) txdlh + txdl_priv->frags;
957
958	/*
959	 * Note:
960	 * it is the responsibility of upper layers and not HAL
961	 * detect it and skip zero-size fragment
962	 */
963	vxge_assert(size > 0);
964	vxge_assert(frag_idx < txdl_priv->alloc_frags);
965
966	txdp->buffer_pointer = (u64) dma_pointer;
967	txdp->control_0 |= VXGE_HAL_FIFO_TXD_BUFFER_SIZE(size);
968	txdl_priv->bytes_sent += size;
969	fifo->stats->total_buffers++;
970	txdl_priv->frags++;
971
972	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
973	    __FILE__, __func__, __LINE__);
974}
975
976/*
977 * vxge_hal_fifo_txdl_buffer_set_aligned - Align transmit buffer and fill
978 * in fifo descriptor.
979 * @vpath_handle: Virtual path handle.
980 * @txdlh: Descriptor handle.
981 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
982 *	   (of buffers).
983 * @vaddr: Virtual address of the data buffer.
984 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
985 * @size: Size of the data buffer (in bytes).
986 * @misaligned_size: Size (in bytes) of the misaligned portion of the
987 * data buffer. Calculated by the caller, based on the platform/OS/other
988 * specific criteria, which is outside of HAL's domain. See notes below.
989 *
990 * This API is part of the transmit descriptor preparation for posting
991 * (via vxge_hal_fifo_txdl_post()). The related "preparation" APIs include
992 * vxge_hal_fifo_txdl_mss_set() and vxge_hal_fifo_txdl_cksum_set_bits().
993 * All three APIs fill in the fields of the fifo descriptor,
994 * in accordance with the X3100 specification.
995 * On the PCI-X based systems aligning transmit data typically provides better
996 * transmit performance. The typical alignment granularity: L2 cacheline size.
997 * However, HAL does not make assumptions in terms of the alignment granularity;
998 * this is specified via additional @misaligned_size parameter described above.
999 * Prior to calling vxge_hal_fifo_txdl_buffer_set_aligned(),
1000 * ULD is supposed to check alignment of a given fragment/buffer. For this HAL
1001 * provides a separate vxge_hal_check_alignment() API sufficient to cover
1002 * most (but not all) possible alignment criteria.
1003 * If the buffer appears to be aligned, the ULD calls
1004 * vxge_hal_fifo_txdl_buffer_set().
1005 * Otherwise, ULD calls vxge_hal_fifo_txdl_buffer_set_aligned().
1006 *
1007 * Note; This API is a "superset" of vxge_hal_fifo_txdl_buffer_set(). In
1008 * addition to filling in the specified descriptor it aligns transmit data on
1009 * the specified boundary.
1010 * Note: Decision on whether to align or not to align a given contiguous
1011 * transmit buffer is outside of HAL's domain. To this end ULD can use any
1012 * programmable criteria, which can help to 1) boost transmit performance,
1013 * and/or 2) provide a workaround for PCI bridge bugs, if any.
1014 *
1015 */
1016vxge_hal_status_e
1017vxge_hal_fifo_txdl_buffer_set_aligned(
1018    vxge_hal_vpath_h vpath_handle,
1019    vxge_hal_txdl_h txdlh,
1020    u32 frag_idx,
1021    void *vaddr,
1022    dma_addr_t dma_pointer,
1023    u32 size,
1024    u32 misaligned_size)
1025{
1026	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1027	__hal_fifo_t *fifo;
1028	__hal_device_t *hldev;
1029	__hal_fifo_txdl_priv_t *txdl_priv;
1030	vxge_hal_fifo_txd_t *txdp;
1031	int remaining_size;
1032	ptrdiff_t prev_boff;
1033
1034	vxge_assert((vpath_handle != NULL) && (txdlh != NULL) &&
1035	    (vaddr != NULL) && (dma_pointer != 0) &&
1036	    (size != 0) && (misaligned_size != 0));
1037
1038	hldev = vp->vpath->hldev;
1039
1040	vxge_hal_trace_log_fifo("==> %s:%s:%d",
1041	    __FILE__, __func__, __LINE__);
1042
1043	vxge_hal_trace_log_fifo(
1044	    "vpath_handle = 0x"VXGE_OS_STXFMT", txdlh = 0x"VXGE_OS_STXFMT", "
1045	    "frag_idx = %d, vaddr = 0x"VXGE_OS_STXFMT", "
1046	    "dma_pointer = 0x"VXGE_OS_LLXFMT", size = %d, "
1047	    "misaligned_size = %d", (ptr_t) vpath_handle,
1048	    (ptr_t) txdlh, frag_idx, (ptr_t) vaddr, (u64) dma_pointer, size,
1049	    misaligned_size);
1050
1051	fifo = (__hal_fifo_t *) vp->vpath->fifoh;
1052
1053	vxge_assert(fifo != NULL);
1054
1055	txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh);
1056
1057	txdp = (vxge_hal_fifo_txd_t *) txdlh + txdl_priv->frags;
1058
1059	/*
1060	 * On some systems buffer size could be zero.
1061	 * It is the responsibility of ULD and *not HAL* to
1062	 * detect it and skip it.
1063	 */
1064	vxge_assert(size > 0);
1065	vxge_assert(frag_idx < txdl_priv->alloc_frags);
1066	vxge_assert(misaligned_size != 0 &&
1067	    misaligned_size <= fifo->config->alignment_size);
1068
1069	remaining_size = size - misaligned_size;
1070	vxge_assert(remaining_size >= 0);
1071
1072	vxge_os_memcpy((char *) txdl_priv->align_vaddr_start,
1073	    vaddr, misaligned_size);
1074
1075	if (txdl_priv->align_used_frags >= fifo->config->max_aligned_frags) {
1076		return (VXGE_HAL_ERR_OUT_ALIGNED_FRAGS);
1077	}
1078
1079	/* setup new buffer */
1080	/* LINTED */
1081	prev_boff = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
1082	txdp->buffer_pointer = (u64) txdl_priv->align_dma_addr + prev_boff;
1083	txdp->control_0 |= VXGE_HAL_FIFO_TXD_BUFFER_SIZE(misaligned_size);
1084	txdl_priv->bytes_sent += misaligned_size;
1085	fifo->stats->total_buffers++;
1086	txdl_priv->frags++;
1087	txdl_priv->align_used_frags++;
1088	txdl_priv->align_vaddr_start += fifo->config->alignment_size;
1089	txdl_priv->align_dma_offset = 0;
1090
1091#if defined(VXGE_OS_DMA_REQUIRES_SYNC)
1092	/* sync new buffer */
1093	vxge_os_dma_sync(fifo->channel.pdev,
1094	    txdl_priv->align_dma_handle,
1095	    txdp->buffer_pointer,
1096	    0,
1097	    misaligned_size,
1098	    VXGE_OS_DMA_DIR_TODEVICE);
1099#endif
1100
1101	if (remaining_size) {
1102		vxge_assert(frag_idx < txdl_priv->alloc_frags);
1103		txdp++;
1104		txdp->buffer_pointer = (u64) dma_pointer + misaligned_size;
1105		txdp->control_0 |=
1106		    VXGE_HAL_FIFO_TXD_BUFFER_SIZE(remaining_size);
1107		txdl_priv->bytes_sent += remaining_size;
1108		fifo->stats->total_buffers++;
1109		txdl_priv->frags++;
1110	}
1111
1112	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
1113	    __FILE__, __func__, __LINE__);
1114	return (VXGE_HAL_OK);
1115}
1116
1117/*
1118 * vxge_hal_fifo_txdl_buffer_append - Append the contents of virtually
1119 *		contiguous data buffer to a single physically contiguous buffer.
1120 * @vpath_handle: Virtual path handle.
1121 * @txdlh: Descriptor handle.
1122 * @vaddr: Virtual address of the data buffer.
1123 * @size: Size of the data buffer (in bytes).
1124 *
1125 * This API is part of the transmit descriptor preparation for posting
1126 * (via vxge_hal_fifo_txdl_post()).
1127 * The main difference of this API wrt to the APIs
1128 * vxge_hal_fifo_txdl_buffer_set_aligned() is that this API appends the
1129 * contents of virtually contiguous data buffers received from
1130 * upper layer into a single physically contiguous data buffer and the
1131 * device will do a DMA from this buffer.
1132 *
1133 * See Also: vxge_hal_fifo_txdl_buffer_finalize(),
1134 * vxge_hal_fifo_txdl_buffer_set(),
1135 * vxge_hal_fifo_txdl_buffer_set_aligned().
1136 */
1137vxge_hal_status_e
1138vxge_hal_fifo_txdl_buffer_append(
1139    vxge_hal_vpath_h vpath_handle,
1140    vxge_hal_txdl_h txdlh,
1141    void *vaddr,
1142    u32 size)
1143{
1144	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1145	__hal_fifo_t *fifo;
1146	__hal_device_t *hldev;
1147	__hal_fifo_txdl_priv_t *txdl_priv;
1148	ptrdiff_t used;
1149
1150	vxge_assert((vpath_handle != NULL) && (txdlh != NULL) &&
1151	    (vaddr != NULL) && (size == 0));
1152
1153	hldev = vp->vpath->hldev;
1154
1155	vxge_hal_trace_log_fifo("==> %s:%s:%d",
1156	    __FILE__, __func__, __LINE__);
1157
1158	vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", "
1159	    "txdlh = 0x"VXGE_OS_STXFMT", vaddr = 0x"VXGE_OS_STXFMT", "
1160	    "size = %d", (ptr_t) vpath_handle, (ptr_t) txdlh,
1161	    (ptr_t) vaddr, size);
1162
1163	fifo = (__hal_fifo_t *) vp->vpath->fifoh;
1164
1165	vxge_assert(fifo != NULL);
1166
1167	txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh);
1168
1169	/* LINTED */
1170	used = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
1171	used += txdl_priv->align_dma_offset;
1172
1173	if (used + (unsigned int)size > (unsigned int)fifo->align_size)
1174		return (VXGE_HAL_ERR_OUT_ALIGNED_FRAGS);
1175
1176	vxge_os_memcpy((char *) txdl_priv->align_vaddr_start +
1177	    txdl_priv->align_dma_offset, vaddr, size);
1178
1179	fifo->stats->copied_frags++;
1180
1181	txdl_priv->align_dma_offset += size;
1182
1183	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
1184	    __FILE__, __func__, __LINE__);
1185	return (VXGE_HAL_OK);
1186}
1187
1188/*
1189 * vxge_hal_fifo_txdl_buffer_finalize - Prepares a descriptor that contains the
1190 * single physically contiguous buffer.
1191 *
1192 * @vpath_handle: Virtual path handle.
1193 * @txdlh: Descriptor handle.
1194 * @frag_idx: Index of the data buffer in the Txdl list.
1195 *
1196 * This API in conjunction with vxge_hal_fifo_txdl_buffer_append() prepares
1197 * a descriptor that consists of a single physically contiguous buffer
1198 * which inturn contains the contents of one or more virtually contiguous
1199 * buffers received from the upper layer.
1200 *
1201 * See Also: vxge_hal_fifo_txdl_buffer_append().
1202 */
1203void
1204vxge_hal_fifo_txdl_buffer_finalize(
1205    vxge_hal_vpath_h vpath_handle,
1206    vxge_hal_txdl_h txdlh,
1207    u32 frag_idx)
1208{
1209	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1210	__hal_fifo_t *fifo;
1211	__hal_device_t *hldev;
1212	__hal_fifo_txdl_priv_t *txdl_priv;
1213	vxge_hal_fifo_txd_t *txdp;
1214	ptrdiff_t prev_boff;
1215
1216	vxge_assert((vpath_handle != NULL) &&
1217	    (txdlh != NULL) && (frag_idx != 0));
1218
1219	hldev = vp->vpath->hldev;
1220
1221	vxge_hal_trace_log_fifo("==> %s:%s:%d",
1222	    __FILE__, __func__, __LINE__);
1223
1224	vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", "
1225	    "txdlh = 0x"VXGE_OS_STXFMT", frag_idx = %d", (ptr_t) vpath_handle,
1226	    (ptr_t) txdlh, frag_idx);
1227
1228	fifo = (__hal_fifo_t *) vp->vpath->fifoh;
1229
1230	vxge_assert(fifo != NULL);
1231
1232	txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh);
1233	txdp = (vxge_hal_fifo_txd_t *) txdlh + txdl_priv->frags;
1234
1235	/* LINTED */
1236	prev_boff = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
1237	txdp->buffer_pointer = (u64) txdl_priv->align_dma_addr + prev_boff;
1238	txdp->control_0 |=
1239	    VXGE_HAL_FIFO_TXD_BUFFER_SIZE(txdl_priv->align_dma_offset);
1240	txdl_priv->bytes_sent += (unsigned int)txdl_priv->align_dma_offset;
1241	fifo->stats->total_buffers++;
1242	fifo->stats->copied_buffers++;
1243	txdl_priv->frags++;
1244	txdl_priv->align_used_frags++;
1245
1246#if defined(VXGE_OS_DMA_REQUIRES_SYNC)
1247	/* sync pre-mapped buffer */
1248	vxge_os_dma_sync(fifo->channel.pdev,
1249	    txdl_priv->align_dma_handle,
1250	    txdp->buffer_pointer,
1251	    0,
1252	    txdl_priv->align_dma_offset,
1253	    VXGE_OS_DMA_DIR_TODEVICE);
1254#endif
1255
1256	/* increment vaddr_start for the next buffer_append() iteration */
1257	txdl_priv->align_vaddr_start += txdl_priv->align_dma_offset;
1258	txdl_priv->align_dma_offset = 0;
1259
1260	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
1261	    __FILE__, __func__, __LINE__);
1262}
1263
1264/*
1265 * vxge_hal_fifo_txdl_new_frame_set - Start the new packet by setting TXDL flags
1266 * @vpath_handle: virtual path handle.
1267 * @txdlh: Descriptor handle.
1268 * @tagged: Is the frame tagged
1269 *
1270 * This API is part of the preparation of the transmit descriptor for posting
1271 * (via vxge_hal_fifo_txdl_post()). This api is used to mark the end of previous
1272 * frame and start of a new frame.
1273 *
1274 */
1275void
1276vxge_hal_fifo_txdl_new_frame_set(
1277    vxge_hal_vpath_h vpath_handle,
1278    vxge_hal_txdl_h txdlh,
1279    u32 tagged)
1280{
1281	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1282	__hal_fifo_t *fifo;
1283	__hal_device_t *hldev;
1284	__hal_fifo_txdl_priv_t *txdl_priv;
1285	vxge_hal_fifo_txd_t *txdp;
1286
1287	vxge_assert((vpath_handle != NULL) && (txdlh != NULL));
1288
1289	hldev = vp->vpath->hldev;
1290
1291	vxge_hal_trace_log_fifo("==> %s:%s:%d",
1292	    __FILE__, __func__, __LINE__);
1293
1294	vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", "
1295	    "txdlh = 0x"VXGE_OS_STXFMT", tagged = %d",
1296	    (ptr_t) vpath_handle, (ptr_t) txdlh, tagged);
1297
1298	fifo = (__hal_fifo_t *) vp->vpath->fifoh;
1299
1300	vxge_assert(fifo != NULL);
1301
1302	txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh);
1303
1304	txdp = (vxge_hal_fifo_txd_t *) txdlh + txdl_priv->frags;
1305
1306	txdp->control_0 |=
1307	    VXGE_HAL_FIFO_TXD_HOST_STEER(vp->vpath->vp_config->wire_port);
1308	txdp->control_0 |= VXGE_HAL_FIFO_TXD_GATHER_CODE(
1309	    VXGE_HAL_FIFO_TXD_GATHER_CODE_FIRST);
1310	txdp->control_1 |= fifo->interrupt_type;
1311	txdp->control_1 |= VXGE_HAL_FIFO_TXD_INT_NUMBER(
1312	    vp->vpath->tx_intr_num);
1313	if (tagged)
1314		txdp->control_1 |= VXGE_HAL_FIFO_TXD_NO_BW_LIMIT;
1315	if (txdl_priv->frags) {
1316
1317		txdp = (vxge_hal_fifo_txd_t *) txdlh + (txdl_priv->frags - 1);
1318
1319		txdp->control_0 |= VXGE_HAL_FIFO_TXD_GATHER_CODE(
1320		    VXGE_HAL_FIFO_TXD_GATHER_CODE_LAST);
1321
1322	}
1323
1324	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
1325	    __FILE__, __func__, __LINE__);
1326}
1327
1328/*
1329 * vxge_hal_fifo_txdl_post - Post descriptor on the fifo channel.
1330 * @vpath_handle: Virtual path handle.
1331 * @txdlh: Descriptor obtained via vxge_hal_fifo_txdl_reserve()
1332 * @tagged: Is the frame tagged
1333 *
1334 * Post descriptor on the 'fifo' type channel for transmission.
1335 * Prior to posting the descriptor should be filled in accordance with
1336 * Host/X3100 interface specification for a given service (LL, etc.).
1337 *
1338 */
1339void
1340vxge_hal_fifo_txdl_post(
1341    vxge_hal_vpath_h vpath_handle,
1342    vxge_hal_txdl_h txdlh,
1343    u32 tagged)
1344{
1345	u64 list_ptr;
1346	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1347	__hal_fifo_t *fifo;
1348	__hal_device_t *hldev;
1349	__hal_fifo_txdl_priv_t *txdl_priv;
1350	vxge_hal_fifo_txd_t *txdp_last;
1351	vxge_hal_fifo_txd_t *txdp_first;
1352
1353#if defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1354	unsigned long flags = 0;
1355
1356#endif
1357
1358	vxge_assert((vpath_handle != NULL) && (txdlh != NULL));
1359
1360	hldev = vp->vpath->hldev;
1361
1362	vxge_hal_trace_log_fifo("==> %s:%s:%d",
1363	    __FILE__, __func__, __LINE__);
1364
1365	vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", "
1366	    "txdlh = 0x"VXGE_OS_STXFMT", tagged = %d",
1367	    (ptr_t) vpath_handle, (ptr_t) txdlh, tagged);
1368
1369	fifo = (__hal_fifo_t *) vp->vpath->fifoh;
1370
1371	vxge_assert(fifo != NULL);
1372
1373	txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh);
1374
1375	txdp_first = (vxge_hal_fifo_txd_t *) txdlh;
1376	txdp_first->control_0 |=
1377	    VXGE_HAL_FIFO_TXD_HOST_STEER(vp->vpath->vp_config->wire_port);
1378	txdp_first->control_0 |=
1379	    VXGE_HAL_FIFO_TXD_GATHER_CODE(VXGE_HAL_FIFO_TXD_GATHER_CODE_FIRST);
1380	txdp_first->control_1 |=
1381	    VXGE_HAL_FIFO_TXD_INT_NUMBER(vp->vpath->tx_intr_num);
1382	txdp_first->control_1 |= fifo->interrupt_type;
1383	list_ptr = (u64) txdl_priv->dma_addr;
1384	if (tagged) {
1385		txdp_first->control_1 |= VXGE_HAL_FIFO_TXD_NO_BW_LIMIT;
1386		list_ptr |= 0x1;
1387	}
1388
1389	txdp_last =
1390	    (vxge_hal_fifo_txd_t *) txdlh + (txdl_priv->frags - 1);
1391	txdp_last->control_0 |=
1392	    VXGE_HAL_FIFO_TXD_GATHER_CODE(VXGE_HAL_FIFO_TXD_GATHER_CODE_LAST);
1393
1394#if defined(VXGE_HAL_TX_MULTI_POST)
1395	vxge_os_spin_lock(&fifo->channel.post_lock);
1396#elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1397	vxge_os_spin_lock_irq(&fifo->channel.post_lock, flags);
1398#endif
1399
1400	txdp_first->control_0 |= VXGE_HAL_FIFO_TXD_LIST_OWN_ADAPTER;
1401
1402#if defined(VXGE_DEBUG_ASSERT)
1403	/* make sure device overwrites the t_code value on completion */
1404	txdp_first->control_0 |=
1405	    VXGE_HAL_FIFO_TXD_T_CODE(VXGE_HAL_FIFO_TXD_T_CODE_UNUSED);
1406#endif
1407
1408#if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_TXDL_STREAMING)
1409	/* sync the TxDL to device */
1410	vxge_os_dma_sync(fifo->channel.pdev,
1411	    txdl_priv->dma_handle,
1412	    txdl_priv->dma_addr,
1413	    txdl_priv->dma_offset,
1414	    txdl_priv->frags << 5, /* sizeof(vxge_hal_fifo_txd_t) */
1415	    VXGE_OS_DMA_DIR_TODEVICE);
1416#endif
1417	/*
1418	 * we want touch dtr_arr in order with ownership bit set to HW
1419	 */
1420	__hal_channel_dtr_post(&fifo->channel, VXGE_HAL_FIFO_TXDL_INDEX(txdlh));
1421
1422	__hal_non_offload_db_post(vpath_handle,
1423	    list_ptr,
1424	    txdl_priv->frags - 1,
1425	    vp->vpath->vp_config->fifo.no_snoop_bits);
1426
1427#if defined(VXGE_HAL_FIFO_DUMP_TXD)
1428	vxge_hal_info_log_fifo(
1429	    ""VXGE_OS_LLXFMT":"VXGE_OS_LLXFMT":"VXGE_OS_LLXFMT":"
1430	    VXGE_OS_LLXFMT" dma "VXGE_OS_LLXFMT,
1431	    txdp_first->control_0, txdp_first->control_1,
1432	    txdp_first->buffer_pointer, VXGE_HAL_FIFO_TXDL_INDEX(txdp_first),
1433	    txdl_priv->dma_addr);
1434#endif
1435
1436	fifo->stats->total_posts++;
1437	fifo->stats->common_stats.usage_cnt++;
1438	if (fifo->stats->common_stats.usage_max <
1439	    fifo->stats->common_stats.usage_cnt)
1440		fifo->stats->common_stats.usage_max =
1441		    fifo->stats->common_stats.usage_cnt;
1442
1443#if defined(VXGE_HAL_TX_MULTI_POST)
1444	vxge_os_spin_unlock(&fifo->channel.post_lock);
1445#elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1446	vxge_os_spin_unlock_irq(&fifo->channel.post_lock, flags);
1447#endif
1448
1449	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
1450	    __FILE__, __func__, __LINE__);
1451}
1452
1453/*
1454 * vxge_hal_fifo_is_next_txdl_completed - Checks if the next txdl is completed
1455 * @vpath_handle: Virtual path handle.
1456 */
1457vxge_hal_status_e
1458vxge_hal_fifo_is_next_txdl_completed(vxge_hal_vpath_h vpath_handle)
1459{
1460	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1461	__hal_fifo_t *fifo;
1462	__hal_device_t *hldev;
1463	vxge_hal_fifo_txd_t *txdp;
1464	vxge_hal_txdl_h txdlh;
1465	vxge_hal_status_e status = VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1466
1467#if defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1468	unsigned long flags = 0;
1469
1470#endif
1471
1472
1473	vxge_assert(vpath_handle != NULL);
1474
1475	hldev = vp->vpath->hldev;
1476
1477	vxge_hal_trace_log_fifo("==> %s:%s:%d",
1478	    __FILE__, __func__, __LINE__);
1479
1480	vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT,
1481	    (ptr_t) vpath_handle);
1482
1483	fifo = (__hal_fifo_t *) vp->vpath->fifoh;
1484
1485	vxge_assert(fifo != NULL);
1486
1487#if defined(VXGE_HAL_TX_MULTI_POST)
1488	vxge_os_spin_lock(&fifo->channel.post_lock);
1489#elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1490	vxge_os_spin_lock_irq(&fifo->channel.post_lock, flags);
1491#endif
1492
1493	__hal_channel_dtr_try_complete(&fifo->channel, &txdlh);
1494
1495	txdp = (vxge_hal_fifo_txd_t *) txdlh;
1496	if ((txdp != NULL) &&
1497	    (!(txdp->control_0 & VXGE_HAL_FIFO_TXD_LIST_OWN_ADAPTER))) {
1498		status = VXGE_HAL_OK;
1499	}
1500
1501#if defined(VXGE_HAL_TX_MULTI_POST)
1502	vxge_os_spin_unlock(&fifo->channel.post_lock);
1503#elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1504	vxge_os_spin_unlock_irq(&fifo->channel.post_lock, flags);
1505#endif
1506
1507	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: %d",
1508	    __FILE__, __func__, __LINE__, status);
1509
1510	/* no more completions */
1511	return (status);
1512}
1513
1514/*
1515 * vxge_hal_fifo_txdl_next_completed - Retrieve next completed descriptor.
1516 * @vpath_handle: Virtual path handle.
1517 * @txdlh: Descriptor handle. Returned by HAL.
1518 * @txdl_priv: Buffer to return the pointer to per txdl space
1519 * @t_code: Transfer code, as per X3100 User Guide,
1520 *	 Transmit Descriptor Format.
1521 *	 Returned by HAL.
1522 *
1523 * Retrieve the _next_ completed descriptor.
1524 * HAL uses channel callback (*vxge_hal_channel_callback_f) to notifiy
1525 * upper-layer driver (ULD) of new completed descriptors. After that
1526 * the ULD can use vxge_hal_fifo_txdl_next_completed to retrieve the rest
1527 * completions (the very first completion is passed by HAL via
1528 * vxge_hal_channel_callback_f).
1529 *
1530 * Implementation-wise, the upper-layer driver is free to call
1531 * vxge_hal_fifo_txdl_next_completed either immediately from inside the
1532 * channel callback, or in a deferred fashion and separate (from HAL)
1533 * context.
1534 *
1535 * Non-zero @t_code means failure to process the descriptor.
1536 * The failure could happen, for instance, when the link is
1537 * down, in which case X3100 completes the descriptor because it
1538 * is not able to send the data out.
1539 *
1540 * For details please refer to X3100 User Guide.
1541 *
1542 * Returns: VXGE_HAL_OK - success.
1543 * VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1544 * are currently available for processing.
1545 *
1546 */
1547vxge_hal_status_e
1548vxge_hal_fifo_txdl_next_completed(
1549    vxge_hal_vpath_h vpath_handle,
1550    vxge_hal_txdl_h * txdlh,
1551    void **txdl_priv,
1552    vxge_hal_fifo_tcode_e * t_code)
1553{
1554	__hal_fifo_t *fifo;
1555	__hal_device_t *hldev;
1556	vxge_hal_fifo_txd_t *txdp;
1557
1558#if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_TXDL_STREAMING)
1559	__hal_fifo_txdl_priv_t *priv;
1560
1561#endif
1562#if defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1563	unsigned long flags = 0;
1564
1565#endif
1566
1567	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1568	vxge_hal_status_e status = VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1569
1570	vxge_assert((vpath_handle != NULL) &&
1571	    (txdlh != NULL) && (t_code != NULL));
1572
1573	hldev = vp->vpath->hldev;
1574
1575	vxge_hal_trace_log_fifo("==> %s:%s:%d",
1576	    __FILE__, __func__, __LINE__);
1577
1578	vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", "
1579	    "txdlh = 0x"VXGE_OS_STXFMT", t_code = 0x"VXGE_OS_STXFMT,
1580	    (ptr_t) vpath_handle, (ptr_t) txdlh, (ptr_t) t_code);
1581
1582	fifo = (__hal_fifo_t *) vp->vpath->fifoh;
1583
1584	vxge_assert(fifo != NULL);
1585
1586	*txdlh = 0;
1587
1588#if defined(VXGE_HAL_TX_MULTI_POST)
1589	vxge_os_spin_lock(&fifo->channel.post_lock);
1590#elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1591	vxge_os_spin_lock_irq(&fifo->channel.post_lock, flags);
1592#endif
1593
1594	__hal_channel_dtr_try_complete(&fifo->channel, txdlh);
1595
1596	txdp = (vxge_hal_fifo_txd_t *) * txdlh;
1597	if (txdp != NULL) {
1598
1599#if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_TXDL_STREAMING)
1600		priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdp);
1601
1602		/*
1603		 * sync TxDL to read the ownership
1604		 *
1605		 * Note: 16bytes means Control_1 & Control_2
1606		 */
1607		vxge_os_dma_sync(fifo->channel.pdev,
1608		    priv->dma_handle,
1609		    priv->dma_addr,
1610		    priv->dma_offset,
1611		    16,
1612		    VXGE_OS_DMA_DIR_FROMDEVICE);
1613#endif
1614
1615		/* check whether host owns it */
1616		if (!(txdp->control_0 & VXGE_HAL_FIFO_TXD_LIST_OWN_ADAPTER)) {
1617
1618			__hal_channel_dtr_complete(&fifo->channel);
1619
1620			*txdl_priv = VXGE_HAL_FIFO_ULD_PRIV(fifo, txdp);
1621
1622			*t_code = (vxge_hal_fifo_tcode_e)
1623			    VXGE_HAL_FIFO_TXD_T_CODE_GET(txdp->control_0);
1624
1625			if (fifo->stats->common_stats.usage_cnt > 0)
1626				fifo->stats->common_stats.usage_cnt--;
1627
1628			status = VXGE_HAL_OK;
1629		}
1630	}
1631
1632	/* no more completions */
1633#if defined(VXGE_HAL_TX_MULTI_POST)
1634	vxge_os_spin_unlock(&fifo->channel.post_lock);
1635#elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1636	vxge_os_spin_unlock_irq(&fifo->channel.post_lock, flags);
1637#endif
1638
1639	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: %d",
1640	    __FILE__, __func__, __LINE__, status);
1641
1642	return (status);
1643}
1644
1645/*
1646 * vxge_hal_fifo_handle_tcode - Handle transfer code.
1647 * @vpath_handle: Virtual Path handle.
1648 * @txdlh: Descriptor handle.
1649 * @t_code: One of the enumerated (and documented in the X3100 user guide)
1650 *	 "transfer codes".
1651 *
1652 * Handle descriptor's transfer code. The latter comes with each completed
1653 * descriptor.
1654 *
1655 * Returns: one of the vxge_hal_status_e {} enumerated types.
1656 * VXGE_HAL_OK			- for success.
1657 * VXGE_HAL_ERR_CRITICAL	- when encounters critical error.
1658 */
1659vxge_hal_status_e
1660vxge_hal_fifo_handle_tcode(
1661    vxge_hal_vpath_h vpath_handle,
1662    vxge_hal_txdl_h txdlh,
1663    vxge_hal_fifo_tcode_e t_code)
1664{
1665	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1666	__hal_device_t *hldev;
1667
1668	vxge_assert((vpath_handle != NULL) && (txdlh != NULL));
1669
1670	hldev = vp->vpath->hldev;
1671
1672	vxge_hal_trace_log_fifo("==> %s:%s:%d",
1673	    __FILE__, __func__, __LINE__);
1674
1675	vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", "
1676	    "txdlh = 0x"VXGE_OS_STXFMT", t_code = 0x%d",
1677	    (ptr_t) vpath_handle, (ptr_t) txdlh, t_code);
1678
1679	switch ((t_code & 0x7)) {
1680	case 0:
1681		/* 000: Transfer operation completed successfully. */
1682		break;
1683	case 1:
1684		/*
1685		 * 001: a PCI read transaction (either TxD or frame data)
1686		 *	returned with corrupt data.
1687		 */
1688		break;
1689	case 2:
1690		/* 010: a PCI read transaction was returned with no data. */
1691		break;
1692	case 3:
1693		/*
1694		 * 011: The host attempted to send either a frame or LSO
1695		 *	MSS that was too long (>9800B).
1696		 */
1697		break;
1698	case 4:
1699		/*
1700		 * 100: Error detected during TCP/UDP Large Send
1701		 *	Offload operation, due to improper header template,
1702		 *	unsupported protocol, etc.
1703		 */
1704		break;
1705	default:
1706		vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: %d",
1707		    __FILE__, __func__, __LINE__, VXGE_HAL_ERR_INVALID_TCODE);
1708		return (VXGE_HAL_ERR_INVALID_TCODE);
1709	}
1710
1711	vp->vpath->sw_stats->fifo_stats.txd_t_code_err_cnt[t_code]++;
1712
1713	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: %d",
1714	    __FILE__, __func__, __LINE__, VXGE_HAL_OK);
1715	return (VXGE_HAL_OK);
1716}
1717
1718/*
1719 * __hal_fifo_txdl_free_many - Free the fragments
1720 * @fifo: FIFO
1721 * @txdp: Poniter to a TxD
1722 * @list_size: List size
1723 * @frags: Number of fragments
1724 *
1725 * This routinf frees the fragments in a txdl
1726 */
1727void
1728__hal_fifo_txdl_free_many(
1729    __hal_fifo_t *fifo,
1730    vxge_hal_fifo_txd_t * txdp,
1731    u32 list_size,
1732    u32 frags)
1733{
1734	__hal_fifo_txdl_priv_t *current_txdl_priv;
1735	__hal_fifo_txdl_priv_t *next_txdl_priv;
1736	u32 invalid_frags = frags % list_size;
1737	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) fifo->channel.vph;
1738	__hal_device_t *hldev;
1739
1740	vxge_assert((fifo != NULL) && (txdp != NULL));
1741
1742	hldev = vp->vpath->hldev;
1743
1744	vxge_hal_trace_log_fifo("==> %s:%s:%d",
1745	    __FILE__, __func__, __LINE__);
1746
1747	vxge_hal_trace_log_fifo(
1748	    "fifo = 0x"VXGE_OS_STXFMT", txdp = 0x"VXGE_OS_STXFMT", "
1749	    "list_size = %d, frags = %d", (ptr_t) fifo, (ptr_t) txdp,
1750	    list_size, frags);
1751
1752	if (invalid_frags) {
1753		vxge_hal_trace_log_fifo(
1754		    "freeing corrupt txdlh 0x"VXGE_OS_STXFMT", "
1755		    "fragments %d list size %d",
1756		    (ptr_t) txdp, frags, list_size);
1757		vxge_assert(invalid_frags == 0);
1758	}
1759	while (txdp) {
1760		vxge_hal_trace_log_fifo("freeing linked txdlh 0x"VXGE_OS_STXFMT
1761		    ", " "fragments %d list size %d",
1762		    (ptr_t) txdp, frags, list_size);
1763		current_txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdp);
1764#if defined(VXGE_DEBUG_ASSERT) && defined(VXGE_OS_MEMORY_CHECK)
1765		current_txdl_priv->allocated = 0;
1766#endif
1767		__hal_channel_dtr_free(&fifo->channel,
1768		    VXGE_HAL_FIFO_TXDL_INDEX(txdp));
1769		next_txdl_priv = current_txdl_priv->next_txdl_priv;
1770		vxge_assert(frags);
1771		frags -= list_size;
1772		if (next_txdl_priv) {
1773			current_txdl_priv->next_txdl_priv = NULL;
1774			txdp = next_txdl_priv->first_txdp;
1775		} else {
1776			vxge_hal_trace_log_fifo(
1777			    "freed linked txdlh fragments %d list size %d",
1778			    frags, list_size);
1779			break;
1780		}
1781	}
1782
1783	vxge_assert(frags == 0);
1784
1785	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
1786	    __FILE__, __func__, __LINE__);
1787}
1788
1789/*
1790 * vxge_hal_fifo_txdl_free - Free descriptor.
1791 * @vpath_handle: Virtual path handle.
1792 * @txdlh: Descriptor handle.
1793 *
1794 * Free the reserved descriptor. This operation is "symmetrical" to
1795 * vxge_hal_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1796 * lifecycle.
1797 *
1798 * After free-ing (see vxge_hal_fifo_txdl_free()) the descriptor again can
1799 * be:
1800 *
1801 * - reserved (vxge_hal_fifo_txdl_reserve);
1802 *
1803 * - posted (vxge_hal_fifo_txdl_post);
1804 *
1805 * - completed (vxge_hal_fifo_txdl_next_completed);
1806 *
1807 * - and recycled again (vxge_hal_fifo_txdl_free).
1808 *
1809 * For alternative state transitions and more details please refer to
1810 * the design doc.
1811 *
1812 */
1813void
1814vxge_hal_fifo_txdl_free(
1815    vxge_hal_vpath_h vpath_handle,
1816    vxge_hal_txdl_h txdlh)
1817{
1818	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1819	__hal_fifo_t *fifo;
1820	__hal_device_t *hldev;
1821	__hal_fifo_txdl_priv_t *txdl_priv;
1822	u32 max_frags;
1823
1824#if defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1825	u32 flags = 0;
1826
1827#endif
1828	vxge_assert((vpath_handle != NULL) && (txdlh != NULL));
1829
1830	hldev = vp->vpath->hldev;
1831
1832	vxge_hal_trace_log_fifo("==> %s:%s:%d",
1833	    __FILE__, __func__, __LINE__);
1834
1835	vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", "
1836	    "txdlh = 0x"VXGE_OS_STXFMT, (ptr_t) vpath_handle, (ptr_t) txdlh);
1837
1838	fifo = (__hal_fifo_t *) vp->vpath->fifoh;
1839
1840	vxge_assert(fifo != NULL);
1841
1842	txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh);
1843
1844	max_frags = fifo->config->max_frags;
1845
1846#if defined(VXGE_HAL_TX_MULTI_POST)
1847	vxge_os_spin_lock(&fifo->channel.post_lock);
1848#elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1849	vxge_os_spin_lock_irq(&fifo->channel.post_lock, flags);
1850#endif
1851
1852	if (txdl_priv->alloc_frags > max_frags) {
1853		vxge_hal_fifo_txd_t *dang_txdp = (vxge_hal_fifo_txd_t *)
1854		txdl_priv->dang_txdl;
1855		u32 dang_frags = txdl_priv->dang_frags;
1856		u32 alloc_frags = txdl_priv->alloc_frags;
1857		txdl_priv->dang_txdl = NULL;
1858		txdl_priv->dang_frags = 0;
1859		txdl_priv->alloc_frags = 0;
1860		/* txdlh must have a linked list of txdlh */
1861		vxge_assert(txdl_priv->next_txdl_priv);
1862
1863		/* free any dangling txdlh first */
1864		if (dang_txdp) {
1865			vxge_hal_info_log_fifo(
1866			    "freeing dangled txdlh 0x"VXGE_OS_STXFMT" for %d "
1867			    "fragments", (ptr_t) dang_txdp, dang_frags);
1868			__hal_fifo_txdl_free_many(fifo, dang_txdp,
1869			    max_frags, dang_frags);
1870		}
1871
1872		/* now free the reserved txdlh list */
1873		vxge_hal_info_log_fifo(
1874		    "freeing txdlh 0x"VXGE_OS_STXFMT" list of %d fragments",
1875		    (ptr_t) txdlh, alloc_frags);
1876		__hal_fifo_txdl_free_many(fifo,
1877		    (vxge_hal_fifo_txd_t *) txdlh, max_frags,
1878		    alloc_frags);
1879	} else {
1880		__hal_channel_dtr_free(&fifo->channel,
1881		    VXGE_HAL_FIFO_TXDL_INDEX(txdlh));
1882	}
1883
1884	fifo->channel.poll_bytes += txdl_priv->bytes_sent;
1885
1886#if defined(VXGE_DEBUG_ASSERT) && defined(VXGE_OS_MEMORY_CHECK)
1887	txdl_priv->allocated = 0;
1888#endif
1889
1890#if defined(VXGE_HAL_TX_MULTI_POST)
1891	vxge_os_spin_unlock(&fifo->channel.post_lock);
1892#elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1893	vxge_os_spin_unlock_irq(&fifo->channel.post_lock, flags);
1894#endif
1895
1896	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
1897	    __FILE__, __func__, __LINE__);
1898}
1899