10Sduke/*-
212278Syan * Copyright(c) 2002-2011 Exar Corp.
30Sduke * All rights reserved.
40Sduke *
50Sduke * Redistribution and use in source and binary forms, with or without
60Sduke * modification are permitted provided the following conditions are met:
70Sduke *
80Sduke *    1. Redistributions of source code must retain the above copyright notice,
90Sduke *       this list of conditions and the following disclaimer.
100Sduke *
110Sduke *    2. Redistributions in binary form must reproduce the above copyright
120Sduke *       notice, this list of conditions and the following disclaimer in the
130Sduke *       documentation and/or other materials provided with the distribution.
140Sduke *
150Sduke *    3. Neither the name of the Exar Corporation nor the names of its
160Sduke *       contributors may be used to endorse or promote products derived from
170Sduke *       this software without specific prior written permission.
180Sduke *
192362Sohair * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
202362Sohair * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
212362Sohair * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
220Sduke * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
230Sduke * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
240Sduke * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
250Sduke * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
260Sduke * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
270Sduke * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
280Sduke * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2912278Syan * POSSIBILITY OF SUCH DAMAGE.
3012278Syan */
3112278Syan/*$FreeBSD$*/
320Sduke
330Sduke#include <dev/vxge/vxgehal/vxgehal.h>
340Sduke
350Sduke/*
360Sduke * __hal_fifo_mempool_item_alloc - Allocate List blocks for TxD list callback
370Sduke * @mempoolh: Handle to memory pool
380Sduke * @memblock: Address of this memory block
39 * @memblock_index: Index of this memory block
40 * @dma_object: dma object for this block
41 * @item: Pointer to this item
42 * @index: Index of this item in memory block
43 * @is_last: If this is last item in the block
44 * @userdata: Specific data of user
45 *
46 * This function is callback passed to __hal_mempool_create to create memory
47 * pool for TxD list
48 */
49static vxge_hal_status_e
50__hal_fifo_mempool_item_alloc(
51    vxge_hal_mempool_h mempoolh,
52    void *memblock,
53    u32 memblock_index,
54    vxge_hal_mempool_dma_t *dma_object,
55    void *item,
56    u32 item_index,
57    u32 is_last,
58    void *userdata)
59{
60	u32 i;
61	void *block_priv;
62	u32 memblock_item_idx;
63
64	__hal_fifo_t *fifo = (__hal_fifo_t *) userdata;
65
66	vxge_assert(fifo != NULL);
67	vxge_assert(item);
68
69#if (VXGE_COMPONENT_HAL_POOL & VXGE_DEBUG_MODULE_MASK)
70	{
71		__hal_device_t *hldev = (__hal_device_t *) fifo->channel.devh;
72
73		vxge_hal_trace_log_pool("==> %s:%s:%d",
74		    __FILE__, __func__, __LINE__);
75
76		vxge_hal_trace_log_pool(
77		    "mempoolh = 0x"VXGE_OS_STXFMT", "
78		    "memblock = 0x"VXGE_OS_STXFMT", memblock_index = %d, "
79		    "dma_object = 0x"VXGE_OS_STXFMT", \
80		    item = 0x"VXGE_OS_STXFMT", "
81		    "item_index = %d, is_last = %d, userdata = 0x"VXGE_OS_STXFMT,
82		    (ptr_t) mempoolh, (ptr_t) memblock, memblock_index,
83		    (ptr_t) dma_object, (ptr_t) item, item_index, is_last,
84		    (ptr_t) userdata);
85	}
86#endif
87
88	block_priv = __hal_mempool_item_priv((vxge_hal_mempool_t *) mempoolh,
89	    memblock_index, item, &memblock_item_idx);
90
91	vxge_assert(block_priv != NULL);
92
93	for (i = 0; i < fifo->txdl_per_memblock; i++) {
94
95		__hal_fifo_txdl_priv_t *txdl_priv;
96		vxge_hal_fifo_txd_t *txdp;
97
98		int dtr_index = item_index * fifo->txdl_per_memblock + i;
99
100		txdp = (vxge_hal_fifo_txd_t *) ((void *)
101		    ((char *) item + i * fifo->txdl_size));
102
103		txdp->host_control = dtr_index;
104
105		fifo->channel.dtr_arr[dtr_index].dtr = txdp;
106
107		fifo->channel.dtr_arr[dtr_index].uld_priv = (void *)
108		    ((char *) block_priv + fifo->txdl_priv_size * i);
109
110		fifo->channel.dtr_arr[dtr_index].hal_priv = (void *)
111		    (((char *) fifo->channel.dtr_arr[dtr_index].uld_priv) +
112		    fifo->per_txdl_space);
113
114		txdl_priv = (__hal_fifo_txdl_priv_t *)
115		    fifo->channel.dtr_arr[dtr_index].hal_priv;
116
117		vxge_assert(txdl_priv);
118
119		/* pre-format HAL's TxDL's private */
120		/* LINTED */
121		txdl_priv->dma_offset = (char *) txdp - (char *) memblock;
122		txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
123		txdl_priv->dma_handle = dma_object->handle;
124		txdl_priv->memblock = memblock;
125		txdl_priv->first_txdp = (vxge_hal_fifo_txd_t *) txdp;
126		txdl_priv->next_txdl_priv = NULL;
127		txdl_priv->dang_txdl = NULL;
128		txdl_priv->dang_frags = 0;
129		txdl_priv->alloc_frags = 0;
130
131#if defined(VXGE_DEBUG_ASSERT)
132		txdl_priv->dma_object = dma_object;
133#endif
134
135#if defined(VXGE_HAL_ALIGN_XMIT)
136		txdl_priv->align_vaddr = NULL;
137		txdl_priv->align_dma_addr = (dma_addr_t) 0;
138
139#ifndef	VXGE_HAL_ALIGN_XMIT_ALLOC_RT
140		/* CONSTCOND */
141		if (TRUE) {
142			vxge_hal_status_e status;
143
144			if (fifo->config->alignment_size) {
145				status = __hal_fifo_txdl_align_alloc_map(fifo,
146				    txdp);
147				if (status != VXGE_HAL_OK) {
148
149#if (VXGE_COMPONENT_HAL_POOL & VXGE_DEBUG_MODULE_MASK)
150					__hal_device_t *hldev;
151					hldev = (__hal_device_t *)
152					    fifo->channel.devh;
153
154					vxge_hal_err_log_pool(
155					    "align buffer[%d] %d bytes, \
156					    status %d",
157					    (item_index * fifo->txdl_per_memblock + i),
158					    fifo->align_size, status);
159
160					vxge_hal_trace_log_pool(
161					    "<== %s:%s:%d  Result: 0",
162					    __FILE__, __func__, __LINE__);
163#endif
164					return (status);
165				}
166			}
167		}
168#endif
169#endif
170		if (fifo->txdl_init) {
171			fifo->txdl_init(fifo->channel.vph,
172			    (vxge_hal_txdl_h) txdp,
173			    VXGE_HAL_FIFO_ULD_PRIV(fifo, txdp),
174			    VXGE_HAL_FIFO_TXDL_INDEX(txdp),
175			    fifo->channel.userdata, VXGE_HAL_OPEN_NORMAL);
176		}
177	}
178
179#if (VXGE_COMPONENT_HAL_POOL & VXGE_DEBUG_MODULE_MASK)
180	{
181		__hal_device_t *hldev = (__hal_device_t *) fifo->channel.devh;
182
183		vxge_hal_trace_log_pool("<== %s:%s:%d  Result: 0",
184		    __FILE__, __func__, __LINE__);
185	}
186#endif
187
188	return (VXGE_HAL_OK);
189}
190
191
192/*
193 * __hal_fifo_mempool_item_free - Free List blocks for TxD list callback
194 * @mempoolh: Handle to memory pool
195 * @memblock: Address of this memory block
196 * @memblock_index: Index of this memory block
197 * @dma_object: dma object for this block
198 * @item: Pointer to this item
199 * @index: Index of this item in memory block
200 * @is_last: If this is last item in the block
201 * @userdata: Specific data of user
202 *
203 * This function is callback passed to __hal_mempool_free to destroy memory
204 * pool for TxD list
205 */
206static vxge_hal_status_e
207__hal_fifo_mempool_item_free(
208    vxge_hal_mempool_h mempoolh,
209    void *memblock,
210    u32 memblock_index,
211    vxge_hal_mempool_dma_t *dma_object,
212    void *item,
213    u32 item_index,
214    u32 is_last,
215    void *userdata)
216{
217	vxge_assert(item);
218
219#if (VXGE_COMPONENT_HAL_POOL & VXGE_DEBUG_MODULE_MASK)
220	{
221		__hal_fifo_t *fifo = (__hal_fifo_t *) userdata;
222
223		vxge_assert(fifo != NULL);
224
225		__hal_device_t *hldev = (__hal_device_t *) fifo->channel.devh;
226
227		vxge_hal_trace_log_pool("==> %s:%s:%d",
228		    __FILE__, __func__, __LINE__);
229
230		vxge_hal_trace_log_pool("mempoolh = 0x"VXGE_OS_STXFMT", "
231		    "memblock = 0x"VXGE_OS_STXFMT", memblock_index = %d, "
232		    "dma_object = 0x"VXGE_OS_STXFMT", \
233		    item = 0x"VXGE_OS_STXFMT", "
234		    "item_index = %d, is_last = %d, userdata = 0x"VXGE_OS_STXFMT,
235		    (ptr_t) mempoolh, (ptr_t) memblock, memblock_index,
236		    (ptr_t) dma_object, (ptr_t) item, item_index, is_last,
237		    (ptr_t) userdata);
238	}
239#endif
240
241#if defined(VXGE_HAL_ALIGN_XMIT)
242	{
243		__hal_fifo_t *fifo = (__hal_fifo_t *) userdata;
244
245		vxge_assert(fifo != NULL);
246		if (fifo->config->alignment_size) {
247
248			int i;
249			vxge_hal_fifo_txd_t *txdp;
250
251			for (i = 0; i < fifo->txdl_per_memblock; i++) {
252				txdp = (void *)
253				    ((char *) item + i * fifo->txdl_size);
254				__hal_fifo_txdl_align_free_unmap(fifo, txdp);
255			}
256		}
257	}
258#endif
259
260#if (VXGE_COMPONENT_HAL_POOL & VXGE_DEBUG_MODULE_MASK)
261	{
262		__hal_fifo_t *fifo = (__hal_fifo_t *) userdata;
263
264		vxge_assert(fifo != NULL);
265
266		__hal_device_t *hldev = (__hal_device_t *) fifo->channel.devh;
267
268		vxge_hal_trace_log_pool("<== %s:%s:%d  Result: 0",
269		    __FILE__, __func__, __LINE__);
270	}
271#endif
272
273	return (VXGE_HAL_OK);
274}
275
276/*
277 * __hal_fifo_create - Create a FIFO
278 * @vpath_handle: Handle returned by virtual path open
279 * @attr: FIFO configuration parameters structure
280 *
281 * This function creates FIFO and initializes it.
282 *
283 */
284vxge_hal_status_e
285__hal_fifo_create(
286    vxge_hal_vpath_h vpath_handle,
287    vxge_hal_fifo_attr_t *attr)
288{
289	vxge_hal_status_e status;
290	__hal_fifo_t *fifo;
291	vxge_hal_fifo_config_t *config;
292	u32 txdl_size, memblock_size, txdl_per_memblock;
293	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
294	__hal_device_t *hldev;
295
296	vxge_assert((vpath_handle != NULL) && (attr != NULL));
297
298	hldev = (__hal_device_t *) vp->vpath->hldev;
299
300	vxge_hal_trace_log_fifo("==> %s:%s:%d",
301	    __FILE__, __func__, __LINE__);
302
303	vxge_hal_trace_log_fifo(
304	    "vpath_handle = 0x"VXGE_OS_STXFMT", attr = 0x"VXGE_OS_STXFMT,
305	    (ptr_t) vpath_handle, (ptr_t) attr);
306
307	if ((vpath_handle == NULL) || (attr == NULL)) {
308		vxge_hal_err_log_fifo("null pointer passed == > %s : %d",
309		    __func__, __LINE__);
310		vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: %d",
311		    __FILE__, __func__, __LINE__,
312		    VXGE_HAL_ERR_INVALID_HANDLE);
313		return (VXGE_HAL_ERR_INVALID_HANDLE);
314	}
315
316	config =
317	    &vp->vpath->hldev->header.config.vp_config[vp->vpath->vp_id].fifo;
318
319	txdl_size = config->max_frags * sizeof(vxge_hal_fifo_txd_t);
320
321	if (txdl_size <= VXGE_OS_HOST_PAGE_SIZE)
322		memblock_size = VXGE_OS_HOST_PAGE_SIZE;
323	else
324		memblock_size = txdl_size;
325
326	txdl_per_memblock = memblock_size / txdl_size;
327
328	config->fifo_length = ((config->fifo_length + txdl_per_memblock - 1) /
329	    txdl_per_memblock) * txdl_per_memblock;
330
331	fifo = (__hal_fifo_t *) vxge_hal_channel_allocate(
332	    (vxge_hal_device_h) vp->vpath->hldev,
333	    vpath_handle,
334	    VXGE_HAL_CHANNEL_TYPE_FIFO,
335	    config->fifo_length,
336	    attr->per_txdl_space,
337	    attr->userdata);
338
339	if (fifo == NULL) {
340		vxge_hal_err_log_fifo("Memory allocation failed == > %s : %d",
341		    __func__, __LINE__);
342		vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: %d",
343		    __FILE__, __func__, __LINE__,
344		    VXGE_HAL_ERR_OUT_OF_MEMORY);
345		return (VXGE_HAL_ERR_OUT_OF_MEMORY);
346	}
347
348	vp->vpath->fifoh = fifo;
349
350	fifo->stats = &vp->vpath->sw_stats->fifo_stats;
351
352	fifo->config = config;
353
354	fifo->memblock_size = memblock_size;
355
356#if defined(VXGE_HAL_TX_MULTI_POST)
357	vxge_os_spin_lock_init(&fifo->channel.post_lock,
358	    vp->vpath->hldev->header.pdev);
359#elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
360	vxge_os_spin_lock_init_irq(&fifo->channel.post_lock,
361	    vp->vpath->hldev->header.irqh);
362#endif
363
364	fifo->align_size =
365	    fifo->config->alignment_size * fifo->config->max_aligned_frags;
366
367	/* apply "interrupts per txdl" attribute */
368	fifo->interrupt_type = VXGE_HAL_FIFO_TXD_INT_TYPE_UTILZ;
369	if (fifo->config->intr) {
370		fifo->interrupt_type = VXGE_HAL_FIFO_TXD_INT_TYPE_PER_LIST;
371	}
372
373	fifo->no_snoop_bits = config->no_snoop_bits;
374
375	/*
376	 * FIFO memory management strategy:
377	 *
378	 * TxDL splitted into three independent parts:
379	 *	- set of TxD's
380	 *	- TxD HAL private part
381	 *	- upper layer private part
382	 *
383	 * Adaptative memory allocation used. i.e. Memory allocated on
384	 * demand with the size which will fit into one memory block.
385	 * One memory block may contain more than one TxDL. In simple case
386	 * memory block size can be equal to CPU page size. On more
387	 * sophisticated OS's memory block can be contigious across
388	 * several pages.
389	 *
390	 * During "reserve" operations more memory can be allocated on demand
391	 * for example due to FIFO full condition.
392	 *
393	 * Pool of memory memblocks never shrinks except __hal_fifo_close
394	 * routine which will essentially stop channel and free the resources.
395	 */
396
397	/* TxDL common private size == TxDL private + ULD private */
398	fifo->txdl_priv_size =
399	    sizeof(__hal_fifo_txdl_priv_t) + attr->per_txdl_space;
400	fifo->txdl_priv_size =
401	    ((fifo->txdl_priv_size + __vxge_os_cacheline_size - 1) /
402	    __vxge_os_cacheline_size) * __vxge_os_cacheline_size;
403
404	fifo->per_txdl_space = attr->per_txdl_space;
405
406	/* recompute txdl size to be cacheline aligned */
407	fifo->txdl_size = txdl_size;
408	fifo->txdl_per_memblock = txdl_per_memblock;
409
410	/*
411	 * since txdl_init() callback will be called from item_alloc(),
412	 * the same way channels userdata might be used prior to
413	 * channel_initialize()
414	 */
415	fifo->txdl_init = attr->txdl_init;
416	fifo->txdl_term = attr->txdl_term;
417	fifo->callback = attr->callback;
418
419	if (fifo->txdl_per_memblock == 0) {
420		__hal_fifo_delete(vpath_handle);
421		vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: %d",
422		    __FILE__, __func__, __LINE__,
423		    VXGE_HAL_ERR_INVALID_BLOCK_SIZE);
424		return (VXGE_HAL_ERR_INVALID_BLOCK_SIZE);
425	}
426
427	/* calculate actual TxDL block private size */
428	fifo->txdlblock_priv_size =
429	    fifo->txdl_priv_size * fifo->txdl_per_memblock;
430
431	fifo->mempool =
432	    vxge_hal_mempool_create((vxge_hal_device_h) vp->vpath->hldev,
433	    fifo->memblock_size,
434	    fifo->memblock_size,
435	    fifo->txdlblock_priv_size,
436	    fifo->config->fifo_length /
437	    fifo->txdl_per_memblock,
438	    fifo->config->fifo_length /
439	    fifo->txdl_per_memblock,
440	    __hal_fifo_mempool_item_alloc,
441	    __hal_fifo_mempool_item_free,
442	    fifo);
443
444	if (fifo->mempool == NULL) {
445		__hal_fifo_delete(vpath_handle);
446		vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: %d",
447		    __FILE__, __func__, __LINE__, VXGE_HAL_ERR_OUT_OF_MEMORY);
448		return (VXGE_HAL_ERR_OUT_OF_MEMORY);
449	}
450
451	status = vxge_hal_channel_initialize(&fifo->channel);
452	if (status != VXGE_HAL_OK) {
453		__hal_fifo_delete(vpath_handle);
454		vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: %d",
455		    __FILE__, __func__, __LINE__, status);
456		return (status);
457	}
458
459	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
460	    __FILE__, __func__, __LINE__);
461	return (VXGE_HAL_OK);
462}
463
464/*
465 * __hal_fifo_abort - Returns the TxD
466 * @fifoh: Fifo to be reset
467 * @reopen: See  vxge_hal_reopen_e {}.
468 *
469 * This function terminates the TxDs of fifo
470 */
471void
472__hal_fifo_abort(
473    vxge_hal_fifo_h fifoh,
474    vxge_hal_reopen_e reopen)
475{
476	u32 i = 0;
477	__hal_fifo_t *fifo = (__hal_fifo_t *) fifoh;
478	__hal_device_t *hldev;
479	vxge_hal_txdl_h txdlh;
480
481	vxge_assert(fifoh != NULL);
482
483	hldev = (__hal_device_t *) fifo->channel.devh;
484
485	vxge_hal_trace_log_fifo("==> %s:%s:%d",
486	    __FILE__, __func__, __LINE__);
487
488	vxge_hal_trace_log_fifo("fifo = 0x"VXGE_OS_STXFMT", reopen = %d",
489	    (ptr_t) fifoh, reopen);
490
491	if (fifo->txdl_term) {
492		__hal_channel_for_each_dtr(&fifo->channel, txdlh, i) {
493			if (!__hal_channel_is_posted_dtr(&fifo->channel,
494			    i)) {
495				fifo->txdl_term(fifo->channel.vph, txdlh,
496				    VXGE_HAL_FIFO_ULD_PRIV(fifo, txdlh),
497				    VXGE_HAL_TXDL_STATE_FREED,
498				    fifo->channel.userdata,
499				    reopen);
500			}
501		}
502	}
503
504	for (;;) {
505		__hal_channel_dtr_try_complete(&fifo->channel, &txdlh);
506
507		if (txdlh == NULL)
508			break;
509
510		__hal_channel_dtr_complete(&fifo->channel);
511
512		if (fifo->txdl_term) {
513			fifo->txdl_term(fifo->channel.vph, txdlh,
514			    VXGE_HAL_FIFO_ULD_PRIV(fifo, txdlh),
515			    VXGE_HAL_TXDL_STATE_POSTED,
516			    fifo->channel.userdata,
517			    reopen);
518		}
519
520		__hal_channel_dtr_free(&fifo->channel,
521		    VXGE_HAL_FIFO_TXDL_INDEX(txdlh));
522	}
523
524	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
525	    __FILE__, __func__, __LINE__);
526}
527
528/*
529 * __hal_fifo_reset - Resets the fifo
530 * @fifoh: Fifo to be reset
531 *
532 * This function resets the fifo during vpath reset operation
533 */
534vxge_hal_status_e
535__hal_fifo_reset(
536    vxge_hal_fifo_h fifoh)
537{
538	vxge_hal_status_e status;
539	__hal_device_t *hldev;
540	__hal_fifo_t *fifo = (__hal_fifo_t *) fifoh;
541
542	vxge_assert(fifoh != NULL);
543
544	hldev = (__hal_device_t *) fifo->channel.devh;
545
546	vxge_hal_trace_log_fifo("==> %s:%s:%d",
547	    __FILE__, __func__, __LINE__);
548
549	vxge_hal_trace_log_fifo("fifo = 0x"VXGE_OS_STXFMT,
550	    (ptr_t) fifoh);
551
552	__hal_fifo_abort(fifoh, VXGE_HAL_RESET_ONLY);
553
554	status = __hal_channel_reset(&fifo->channel);
555
556	if (status != VXGE_HAL_OK) {
557
558		vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: %d",
559		    __FILE__, __func__, __LINE__, status);
560		return (status);
561
562	}
563
564	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
565	    __FILE__, __func__, __LINE__);
566
567	return (VXGE_HAL_OK);
568}
569
570/*
571 * vxge_hal_fifo_doorbell_reset - Resets the doorbell fifo
572 * @vapth_handle: Vpath Handle
573 *
574 * This function resets the doorbell fifo during if fifo error occurs
575 */
576vxge_hal_status_e
577vxge_hal_fifo_doorbell_reset(
578    vxge_hal_vpath_h vpath_handle)
579{
580	u32 i;
581	vxge_hal_txdl_h txdlh;
582	__hal_fifo_t *fifo;
583	__hal_virtualpath_t *vpath;
584	__hal_fifo_txdl_priv_t *txdl_priv;
585	__hal_device_t *hldev;
586	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
587	vxge_hal_status_e status = VXGE_HAL_OK;
588
589	vxge_assert(vpath_handle != NULL);
590
591	hldev = vp->vpath->hldev;
592
593	vxge_hal_trace_log_fifo("==> %s:%s:%d",
594	    __FILE__, __func__, __LINE__);
595
596	vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT,
597	    (ptr_t) vpath_handle);
598
599	fifo = (__hal_fifo_t *) vp->vpath->fifoh;
600
601	vpath = ((__hal_vpath_handle_t *) fifo->channel.vph)->vpath;
602
603	status = __hal_non_offload_db_reset(fifo->channel.vph);
604
605	if (status != VXGE_HAL_OK) {
606		vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
607		    __FILE__, __func__, __LINE__);
608		return (status);
609	}
610
611	__hal_channel_for_each_posted_dtr(&fifo->channel, txdlh, i) {
612
613		txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh);
614
615		__hal_non_offload_db_post(fifo->channel.vph,
616		    ((VXGE_HAL_FIFO_TXD_NO_BW_LIMIT_GET(
617		    ((vxge_hal_fifo_txd_t *) txdlh)->control_1)) ?
618		    (((u64) txdl_priv->dma_addr) | 0x1) :
619		    (u64) txdl_priv->dma_addr),
620		    txdl_priv->frags - 1,
621		    vpath->vp_config->fifo.no_snoop_bits);
622	}
623
624	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
625	    __FILE__, __func__, __LINE__);
626
627	return (status);
628}
629
630/*
631 * __hal_fifo_delete - Removes the FIFO
632 * @vpath_handle: Virtual path handle to which this queue belongs
633 *
634 * This function freeup the memory pool and removes the FIFO
635 */
636void
637__hal_fifo_delete(
638    vxge_hal_vpath_h vpath_handle)
639{
640	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
641	__hal_fifo_t *fifo;
642	__hal_device_t *hldev;
643
644	vxge_assert(vpath_handle != NULL);
645
646	hldev = vp->vpath->hldev;
647
648	vxge_hal_trace_log_fifo("==> %s:%s:%d",
649	    __FILE__, __func__, __LINE__);
650
651	vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT,
652	    (ptr_t) vpath_handle);
653
654	fifo = (__hal_fifo_t *) vp->vpath->fifoh;
655
656	vxge_assert(fifo != NULL);
657
658	if (fifo->mempool) {
659		__hal_fifo_abort(vp->vpath->fifoh, VXGE_HAL_OPEN_NORMAL);
660		vxge_hal_mempool_destroy(fifo->mempool);
661	}
662
663	vxge_hal_channel_terminate(&fifo->channel);
664
665#if defined(VXGE_HAL_TX_MULTI_POST)
666	vxge_os_spin_lock_destroy(&fifo->channel.post_lock,
667	    vp->vpath->hldev->header.pdev);
668#elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
669	vxge_os_spin_lock_destroy_irq(&fifo->channel.post_lock,
670	    vp->vpath->hldev->header.pdev);
671#endif
672
673	vxge_hal_channel_free(&fifo->channel);
674
675	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
676	    __FILE__, __func__, __LINE__);
677}
678
679#if defined(VXGE_HAL_ALIGN_XMIT)
680/*
681 * __hal_fifo_txdl_align_free_unmap - Unmap the alignement buffers
682 * @fifo: Fifo
683 * @txdp: txdl
684 *
685 * This function unmaps dma memory for the alignment buffers
686 */
687void
688__hal_fifo_txdl_align_free_unmap(
689    __hal_fifo_t *fifo,
690    vxge_hal_fifo_txd_t *txdp)
691{
692	__hal_device_t *hldev;
693	__hal_fifo_txdl_priv_t *txdl_priv;
694
695	vxge_assert((fifo != NULL) && (txdp != NULL));
696
697	hldev = (__hal_device_t *) fifo->channel.devh;
698
699	vxge_hal_trace_log_fifo("==> %s:%s:%d",
700	    __FILE__, __func__, __LINE__);
701
702	vxge_hal_trace_log_fifo(
703	    "fifo = 0x"VXGE_OS_STXFMT",  txdp = 0x"VXGE_OS_STXFMT,
704	    (ptr_t) fifo, (ptr_t) txdp);
705
706	txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdp);
707
708	if (txdl_priv->align_vaddr != NULL) {
709		__hal_blockpool_free(fifo->channel.devh,
710		    txdl_priv->align_vaddr,
711		    fifo->align_size,
712		    &txdl_priv->align_dma_addr,
713		    &txdl_priv->align_dma_handle,
714		    &txdl_priv->align_dma_acch);
715
716		txdl_priv->align_vaddr = NULL;
717		txdl_priv->align_dma_addr = 0;
718	}
719
720	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
721	    __FILE__, __func__, __LINE__);
722}
723
724/*
725 * __hal_fifo_txdl_align_alloc_map - Maps the alignement buffers
726 * @fifo: Fifo
727 * @txdp: txdl
728 *
729 * This function maps dma memory for the alignment buffers
730 */
731vxge_hal_status_e
732__hal_fifo_txdl_align_alloc_map(
733    __hal_fifo_t *fifo,
734    vxge_hal_fifo_txd_t *txdp)
735{
736	__hal_device_t *hldev;
737	__hal_fifo_txdl_priv_t *txdl_priv;
738
739	vxge_assert((fifo != NULL) && (txdp != NULL));
740
741	hldev = (__hal_device_t *) fifo->channel.devh;
742
743	vxge_hal_trace_log_fifo("==> %s:%s:%d",
744	    __FILE__, __func__, __LINE__);
745
746	vxge_hal_trace_log_fifo(
747	    "fifo = 0x"VXGE_OS_STXFMT",  txdp = 0x"VXGE_OS_STXFMT,
748	    (ptr_t) fifo, (ptr_t) txdp);
749
750	txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdp);
751
752	/* allocate alignment DMA-buffer */
753	txdl_priv->align_vaddr =
754	    (u8 *) __hal_blockpool_malloc(fifo->channel.devh,
755	    fifo->align_size,
756	    &txdl_priv->align_dma_addr,
757	    &txdl_priv->align_dma_handle,
758	    &txdl_priv->align_dma_acch);
759	if (txdl_priv->align_vaddr == NULL) {
760		vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: %d",
761		    __FILE__, __func__, __LINE__, VXGE_HAL_ERR_OUT_OF_MEMORY);
762		return (VXGE_HAL_ERR_OUT_OF_MEMORY);
763	}
764
765	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
766	    __FILE__, __func__, __LINE__);
767	return (VXGE_HAL_OK);
768}
769#endif
770/*
771 * vxge_hal_fifo_free_txdl_count_get - returns the number of txdls
772 *                               available in the fifo
773 * @vpath_handle: Virtual path handle.
774 */
775u32
776vxge_hal_fifo_free_txdl_count_get(vxge_hal_vpath_h vpath_handle)
777{
778	return __hal_channel_free_dtr_count(&((__hal_fifo_t *)
779	    ((__hal_vpath_handle_t *) vpath_handle)->vpath->fifoh)->channel);
780}
781
782/*
783 * vxge_hal_fifo_txdl_private_get - Retrieve per-descriptor private data.
784 * @vpath_handle: Virtual path handle.
785 * @txdlh: Descriptor handle.
786 *
787 * Retrieve per-descriptor private data.
788 * Note that ULD requests per-descriptor space via
789 * vxge_hal_fifo_attr_t passed to
790 * vxge_hal_vpath_open().
791 *
792 * Returns: private ULD data associated with the descriptor.
793 */
794void *
795vxge_hal_fifo_txdl_private_get(
796    vxge_hal_vpath_h vpath_handle,
797    vxge_hal_txdl_h txdlh)
798{
799	return (VXGE_HAL_FIFO_ULD_PRIV(((__hal_fifo_t *)
800	    ((__hal_vpath_handle_t *) vpath_handle)->vpath->fifoh), txdlh));
801}
802
803/*
804 * vxge_hal_fifo_txdl_reserve - Reserve fifo descriptor.
805 * @vapth_handle: virtual path handle.
806 * @txdlh: Reserved descriptor. On success HAL fills this "out" parameter
807 *	with a valid handle.
808 * @txdl_priv: Buffer to return the pointer to per txdl space
809 *
810 * Reserve a single TxDL (that is, fifo descriptor)
811 * for the subsequent filling-in by upper layerdriver (ULD))
812 * and posting on the corresponding channel (@channelh)
813 * via vxge_hal_fifo_txdl_post().
814 *
815 * Note: it is the responsibility of ULD to reserve multiple descriptors
816 * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
817 * carries up to configured number (fifo.max_frags) of contiguous buffers.
818 *
819 * Returns: VXGE_HAL_OK - success;
820 * VXGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
821 *
822 */
823vxge_hal_status_e
824vxge_hal_fifo_txdl_reserve(
825    vxge_hal_vpath_h vpath_handle,
826    vxge_hal_txdl_h *txdlh,
827    void **txdl_priv)
828{
829	u32 i;
830	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
831	__hal_device_t *hldev;
832	__hal_fifo_t *fifo;
833	vxge_hal_status_e status;
834
835#if defined(VXGE_HAL_TX_MULTI_POST_IRQ)
836	unsigned long flags = 0;
837
838#endif
839
840	vxge_assert((vpath_handle != NULL) && (txdlh != NULL));
841
842	hldev = vp->vpath->hldev;
843
844	vxge_hal_trace_log_fifo("==> %s:%s:%d",
845	    __FILE__, __func__, __LINE__);
846
847	vxge_hal_trace_log_fifo(
848	    "vpath_handle = 0x"VXGE_OS_STXFMT",  txdlh = 0x"VXGE_OS_STXFMT,
849	    (ptr_t) vpath_handle, (ptr_t) txdlh);
850
851	fifo = (__hal_fifo_t *) vp->vpath->fifoh;
852
853	vxge_assert(fifo != NULL);
854
855#if defined(VXGE_HAL_TX_MULTI_POST)
856	vxge_os_spin_lock(&fifo->channel.post_lock);
857#elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
858	vxge_os_spin_lock_irq(&fifo->channel.post_lock, flags);
859#endif
860
861	status = __hal_channel_dtr_reserve(&fifo->channel, txdlh);
862
863#if defined(VXGE_HAL_TX_MULTI_POST)
864	vxge_os_spin_unlock(&fifo->channel.post_lock);
865#elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
866	vxge_os_spin_unlock_irq(&fifo->channel.post_lock, flags);
867#endif
868
869	if (status == VXGE_HAL_OK) {
870		vxge_hal_fifo_txd_t *txdp = (vxge_hal_fifo_txd_t *)*txdlh;
871		__hal_fifo_txdl_priv_t *priv;
872
873		priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdp);
874
875		/* reset the TxDL's private */
876		priv->align_dma_offset = 0;
877		priv->align_vaddr_start = priv->align_vaddr;
878		priv->align_used_frags = 0;
879		priv->frags = 0;
880		priv->alloc_frags = fifo->config->max_frags;
881		priv->dang_txdl = NULL;
882		priv->dang_frags = 0;
883		priv->next_txdl_priv = NULL;
884		priv->bytes_sent = 0;
885
886		*txdl_priv = VXGE_HAL_FIFO_ULD_PRIV(fifo, txdp);
887
888		for (i = 0; i < fifo->config->max_frags; i++) {
889			txdp = ((vxge_hal_fifo_txd_t *)*txdlh) + i;
890			txdp->control_0 = txdp->control_1 = 0;
891		}
892
893#if defined(VXGE_OS_MEMORY_CHECK)
894		priv->allocated = 1;
895#endif
896	}
897
898	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
899	    __FILE__, __func__, __LINE__);
900	return (status);
901}
902
903/*
904 * vxge_hal_fifo_txdl_buffer_set - Set transmit buffer pointer in the
905 * descriptor.
906 * @vpath_handle: virtual path handle.
907 * @txdlh: Descriptor handle.
908 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
909 *	   (of buffers).
910 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
911 * @size: Size of the data buffer (in bytes).
912 *
913 * This API is part of the preparation of the transmit descriptor for posting
914 * (via vxge_hal_fifo_txdl_post()). The related "preparation" APIs include
915 * vxge_hal_fifo_txdl_mss_set() and vxge_hal_fifo_txdl_cksum_set_bits().
916 * All three APIs fill in the fields of the fifo descriptor,
917 * in accordance with the X3100 specification.
918 *
919 */
920void
921vxge_hal_fifo_txdl_buffer_set(
922    vxge_hal_vpath_h vpath_handle,
923    vxge_hal_txdl_h txdlh,
924    u32 frag_idx,
925    dma_addr_t dma_pointer,
926    unsigned long size)
927{
928	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
929	__hal_fifo_t *fifo;
930	__hal_device_t *hldev;
931	__hal_fifo_txdl_priv_t *txdl_priv;
932	vxge_hal_fifo_txd_t *txdp;
933
934	vxge_assert((vpath_handle != NULL) && (txdlh != NULL) &&
935	    (dma_pointer != 0) && (size != 0));
936
937	hldev = vp->vpath->hldev;
938
939	vxge_hal_trace_log_fifo("==> %s:%s:%d",
940	    __FILE__, __func__, __LINE__);
941
942	vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", "
943	    "txdlh = 0x"VXGE_OS_STXFMT", frag_idx = %d, "
944	    "dma_pointer = 0x"VXGE_OS_LLXFMT", size = %lu",
945	    (ptr_t) vpath_handle, (ptr_t) txdlh,
946	    frag_idx, (u64) dma_pointer, size);
947
948	fifo = (__hal_fifo_t *) vp->vpath->fifoh;
949
950	vxge_assert(fifo != NULL);
951
952	txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh);
953
954	txdp = (vxge_hal_fifo_txd_t *) txdlh + txdl_priv->frags;
955
956	/*
957	 * Note:
958	 * it is the responsibility of upper layers and not HAL
959	 * detect it and skip zero-size fragment
960	 */
961	vxge_assert(size > 0);
962	vxge_assert(frag_idx < txdl_priv->alloc_frags);
963
964	txdp->buffer_pointer = (u64) dma_pointer;
965	txdp->control_0 |= VXGE_HAL_FIFO_TXD_BUFFER_SIZE(size);
966	txdl_priv->bytes_sent += size;
967	fifo->stats->total_buffers++;
968	txdl_priv->frags++;
969
970	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
971	    __FILE__, __func__, __LINE__);
972}
973
974/*
975 * vxge_hal_fifo_txdl_buffer_set_aligned - Align transmit buffer and fill
976 * in fifo descriptor.
977 * @vpath_handle: Virtual path handle.
978 * @txdlh: Descriptor handle.
979 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
980 *	   (of buffers).
981 * @vaddr: Virtual address of the data buffer.
982 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
983 * @size: Size of the data buffer (in bytes).
984 * @misaligned_size: Size (in bytes) of the misaligned portion of the
985 * data buffer. Calculated by the caller, based on the platform/OS/other
986 * specific criteria, which is outside of HAL's domain. See notes below.
987 *
988 * This API is part of the transmit descriptor preparation for posting
989 * (via vxge_hal_fifo_txdl_post()). The related "preparation" APIs include
990 * vxge_hal_fifo_txdl_mss_set() and vxge_hal_fifo_txdl_cksum_set_bits().
991 * All three APIs fill in the fields of the fifo descriptor,
992 * in accordance with the X3100 specification.
993 * On the PCI-X based systems aligning transmit data typically provides better
994 * transmit performance. The typical alignment granularity: L2 cacheline size.
995 * However, HAL does not make assumptions in terms of the alignment granularity;
996 * this is specified via additional @misaligned_size parameter described above.
997 * Prior to calling vxge_hal_fifo_txdl_buffer_set_aligned(),
998 * ULD is supposed to check alignment of a given fragment/buffer. For this HAL
999 * provides a separate vxge_hal_check_alignment() API sufficient to cover
1000 * most (but not all) possible alignment criteria.
1001 * If the buffer appears to be aligned, the ULD calls
1002 * vxge_hal_fifo_txdl_buffer_set().
1003 * Otherwise, ULD calls vxge_hal_fifo_txdl_buffer_set_aligned().
1004 *
1005 * Note; This API is a "superset" of vxge_hal_fifo_txdl_buffer_set(). In
1006 * addition to filling in the specified descriptor it aligns transmit data on
1007 * the specified boundary.
1008 * Note: Decision on whether to align or not to align a given contiguous
1009 * transmit buffer is outside of HAL's domain. To this end ULD can use any
1010 * programmable criteria, which can help to 1) boost transmit performance,
1011 * and/or 2) provide a workaround for PCI bridge bugs, if any.
1012 *
1013 */
1014vxge_hal_status_e
1015vxge_hal_fifo_txdl_buffer_set_aligned(
1016    vxge_hal_vpath_h vpath_handle,
1017    vxge_hal_txdl_h txdlh,
1018    u32 frag_idx,
1019    void *vaddr,
1020    dma_addr_t dma_pointer,
1021    u32 size,
1022    u32 misaligned_size)
1023{
1024	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1025	__hal_fifo_t *fifo;
1026	__hal_device_t *hldev;
1027	__hal_fifo_txdl_priv_t *txdl_priv;
1028	vxge_hal_fifo_txd_t *txdp;
1029	int remaining_size;
1030	ptrdiff_t prev_boff;
1031
1032	vxge_assert((vpath_handle != NULL) && (txdlh != NULL) &&
1033	    (vaddr != 0) && (dma_pointer != 0) &&
1034	    (size != 0) && (misaligned_size != 0));
1035
1036	hldev = vp->vpath->hldev;
1037
1038	vxge_hal_trace_log_fifo("==> %s:%s:%d",
1039	    __FILE__, __func__, __LINE__);
1040
1041	vxge_hal_trace_log_fifo(
1042	    "vpath_handle = 0x"VXGE_OS_STXFMT", txdlh = 0x"VXGE_OS_STXFMT", "
1043	    "frag_idx = %d, vaddr = 0x"VXGE_OS_STXFMT", "
1044	    "dma_pointer = 0x"VXGE_OS_LLXFMT", size = %d, "
1045	    "misaligned_size = %d", (ptr_t) vpath_handle,
1046	    (ptr_t) txdlh, frag_idx, (ptr_t) vaddr, (u64) dma_pointer, size,
1047	    misaligned_size);
1048
1049	fifo = (__hal_fifo_t *) vp->vpath->fifoh;
1050
1051	vxge_assert(fifo != NULL);
1052
1053	txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh);
1054
1055	txdp = (vxge_hal_fifo_txd_t *) txdlh + txdl_priv->frags;
1056
1057	/*
1058	 * On some systems buffer size could be zero.
1059	 * It is the responsibility of ULD and *not HAL* to
1060	 * detect it and skip it.
1061	 */
1062	vxge_assert(size > 0);
1063	vxge_assert(frag_idx < txdl_priv->alloc_frags);
1064	vxge_assert(misaligned_size != 0 &&
1065	    misaligned_size <= fifo->config->alignment_size);
1066
1067	remaining_size = size - misaligned_size;
1068	vxge_assert(remaining_size >= 0);
1069
1070	vxge_os_memcpy((char *) txdl_priv->align_vaddr_start,
1071	    vaddr, misaligned_size);
1072
1073	if (txdl_priv->align_used_frags >= fifo->config->max_aligned_frags) {
1074		return (VXGE_HAL_ERR_OUT_ALIGNED_FRAGS);
1075	}
1076
1077	/* setup new buffer */
1078	/* LINTED */
1079	prev_boff = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
1080	txdp->buffer_pointer = (u64) txdl_priv->align_dma_addr + prev_boff;
1081	txdp->control_0 |= VXGE_HAL_FIFO_TXD_BUFFER_SIZE(misaligned_size);
1082	txdl_priv->bytes_sent += misaligned_size;
1083	fifo->stats->total_buffers++;
1084	txdl_priv->frags++;
1085	txdl_priv->align_used_frags++;
1086	txdl_priv->align_vaddr_start += fifo->config->alignment_size;
1087	txdl_priv->align_dma_offset = 0;
1088
1089#if defined(VXGE_OS_DMA_REQUIRES_SYNC)
1090	/* sync new buffer */
1091	vxge_os_dma_sync(fifo->channel.pdev,
1092	    txdl_priv->align_dma_handle,
1093	    txdp->buffer_pointer,
1094	    0,
1095	    misaligned_size,
1096	    VXGE_OS_DMA_DIR_TODEVICE);
1097#endif
1098
1099	if (remaining_size) {
1100		vxge_assert(frag_idx < txdl_priv->alloc_frags);
1101		txdp++;
1102		txdp->buffer_pointer = (u64) dma_pointer + misaligned_size;
1103		txdp->control_0 |=
1104		    VXGE_HAL_FIFO_TXD_BUFFER_SIZE(remaining_size);
1105		txdl_priv->bytes_sent += remaining_size;
1106		fifo->stats->total_buffers++;
1107		txdl_priv->frags++;
1108	}
1109
1110	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
1111	    __FILE__, __func__, __LINE__);
1112	return (VXGE_HAL_OK);
1113}
1114
1115/*
1116 * vxge_hal_fifo_txdl_buffer_append - Append the contents of virtually
1117 *		contiguous data buffer to a single physically contiguous buffer.
1118 * @vpath_handle: Virtual path handle.
1119 * @txdlh: Descriptor handle.
1120 * @vaddr: Virtual address of the data buffer.
1121 * @size: Size of the data buffer (in bytes).
1122 *
1123 * This API is part of the transmit descriptor preparation for posting
1124 * (via vxge_hal_fifo_txdl_post()).
1125 * The main difference of this API wrt to the APIs
1126 * vxge_hal_fifo_txdl_buffer_set_aligned() is that this API appends the
1127 * contents of virtually contiguous data buffers received from
1128 * upper layer into a single physically contiguous data buffer and the
1129 * device will do a DMA from this buffer.
1130 *
1131 * See Also: vxge_hal_fifo_txdl_buffer_finalize(),
1132 * vxge_hal_fifo_txdl_buffer_set(),
1133 * vxge_hal_fifo_txdl_buffer_set_aligned().
1134 */
1135vxge_hal_status_e
1136vxge_hal_fifo_txdl_buffer_append(
1137    vxge_hal_vpath_h vpath_handle,
1138    vxge_hal_txdl_h txdlh,
1139    void *vaddr,
1140    u32 size)
1141{
1142	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1143	__hal_fifo_t *fifo;
1144	__hal_device_t *hldev;
1145	__hal_fifo_txdl_priv_t *txdl_priv;
1146	ptrdiff_t used;
1147
1148	vxge_assert((vpath_handle != NULL) && (txdlh != NULL) && (vaddr != 0) &&
1149	    (size == 0));
1150
1151	hldev = vp->vpath->hldev;
1152
1153	vxge_hal_trace_log_fifo("==> %s:%s:%d",
1154	    __FILE__, __func__, __LINE__);
1155
1156	vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", "
1157	    "txdlh = 0x"VXGE_OS_STXFMT", vaddr = 0x"VXGE_OS_STXFMT", "
1158	    "size = %d", (ptr_t) vpath_handle, (ptr_t) txdlh,
1159	    (ptr_t) vaddr, size);
1160
1161	fifo = (__hal_fifo_t *) vp->vpath->fifoh;
1162
1163	vxge_assert(fifo != NULL);
1164
1165	txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh);
1166
1167	/* LINTED */
1168	used = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
1169	used += txdl_priv->align_dma_offset;
1170
1171	if (used + (unsigned int)size > (unsigned int)fifo->align_size)
1172		return (VXGE_HAL_ERR_OUT_ALIGNED_FRAGS);
1173
1174	vxge_os_memcpy((char *) txdl_priv->align_vaddr_start +
1175	    txdl_priv->align_dma_offset, vaddr, size);
1176
1177	fifo->stats->copied_frags++;
1178
1179	txdl_priv->align_dma_offset += size;
1180
1181	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
1182	    __FILE__, __func__, __LINE__);
1183	return (VXGE_HAL_OK);
1184}
1185
1186/*
1187 * vxge_hal_fifo_txdl_buffer_finalize - Prepares a descriptor that contains the
1188 * single physically contiguous buffer.
1189 *
1190 * @vpath_handle: Virtual path handle.
1191 * @txdlh: Descriptor handle.
1192 * @frag_idx: Index of the data buffer in the Txdl list.
1193 *
1194 * This API in conjuction with vxge_hal_fifo_txdl_buffer_append() prepares
1195 * a descriptor that consists of a single physically contiguous buffer
1196 * which inturn contains the contents of one or more virtually contiguous
1197 * buffers received from the upper layer.
1198 *
1199 * See Also: vxge_hal_fifo_txdl_buffer_append().
1200 */
1201void
1202vxge_hal_fifo_txdl_buffer_finalize(
1203    vxge_hal_vpath_h vpath_handle,
1204    vxge_hal_txdl_h txdlh,
1205    u32 frag_idx)
1206{
1207	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1208	__hal_fifo_t *fifo;
1209	__hal_device_t *hldev;
1210	__hal_fifo_txdl_priv_t *txdl_priv;
1211	vxge_hal_fifo_txd_t *txdp;
1212	ptrdiff_t prev_boff;
1213
1214	vxge_assert((vpath_handle != NULL) &&
1215	    (txdlh != NULL) && (frag_idx != 0));
1216
1217	hldev = vp->vpath->hldev;
1218
1219	vxge_hal_trace_log_fifo("==> %s:%s:%d",
1220	    __FILE__, __func__, __LINE__);
1221
1222	vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", "
1223	    "txdlh = 0x"VXGE_OS_STXFMT", frag_idx = %d", (ptr_t) vpath_handle,
1224	    (ptr_t) txdlh, frag_idx);
1225
1226	fifo = (__hal_fifo_t *) vp->vpath->fifoh;
1227
1228	vxge_assert(fifo != NULL);
1229
1230	txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh);
1231	txdp = (vxge_hal_fifo_txd_t *) txdlh + txdl_priv->frags;
1232
1233	/* LINTED */
1234	prev_boff = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
1235	txdp->buffer_pointer = (u64) txdl_priv->align_dma_addr + prev_boff;
1236	txdp->control_0 |=
1237	    VXGE_HAL_FIFO_TXD_BUFFER_SIZE(txdl_priv->align_dma_offset);
1238	txdl_priv->bytes_sent += (unsigned int)txdl_priv->align_dma_offset;
1239	fifo->stats->total_buffers++;
1240	fifo->stats->copied_buffers++;
1241	txdl_priv->frags++;
1242	txdl_priv->align_used_frags++;
1243
1244#if defined(VXGE_OS_DMA_REQUIRES_SYNC)
1245	/* sync pre-mapped buffer */
1246	vxge_os_dma_sync(fifo->channel.pdev,
1247	    txdl_priv->align_dma_handle,
1248	    txdp->buffer_pointer,
1249	    0,
1250	    txdl_priv->align_dma_offset,
1251	    VXGE_OS_DMA_DIR_TODEVICE);
1252#endif
1253
1254	/* increment vaddr_start for the next buffer_append() iteration */
1255	txdl_priv->align_vaddr_start += txdl_priv->align_dma_offset;
1256	txdl_priv->align_dma_offset = 0;
1257
1258	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
1259	    __FILE__, __func__, __LINE__);
1260}
1261
1262/*
1263 * vxge_hal_fifo_txdl_new_frame_set - Start the new packet by setting TXDL flags
1264 * @vpath_handle: virtual path handle.
1265 * @txdlh: Descriptor handle.
1266 * @tagged: Is the frame tagged
1267 *
1268 * This API is part of the preparation of the transmit descriptor for posting
1269 * (via vxge_hal_fifo_txdl_post()). This api is used to mark the end of previous
1270 * frame and start of a new frame.
1271 *
1272 */
1273void
1274vxge_hal_fifo_txdl_new_frame_set(
1275    vxge_hal_vpath_h vpath_handle,
1276    vxge_hal_txdl_h txdlh,
1277    u32 tagged)
1278{
1279	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1280	__hal_fifo_t *fifo;
1281	__hal_device_t *hldev;
1282	__hal_fifo_txdl_priv_t *txdl_priv;
1283	vxge_hal_fifo_txd_t *txdp;
1284
1285	vxge_assert((vpath_handle != NULL) && (txdlh != NULL));
1286
1287	hldev = vp->vpath->hldev;
1288
1289	vxge_hal_trace_log_fifo("==> %s:%s:%d",
1290	    __FILE__, __func__, __LINE__);
1291
1292	vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", "
1293	    "txdlh = 0x"VXGE_OS_STXFMT", tagged = %d",
1294	    (ptr_t) vpath_handle, (ptr_t) txdlh, tagged);
1295
1296	fifo = (__hal_fifo_t *) vp->vpath->fifoh;
1297
1298	vxge_assert(fifo != NULL);
1299
1300	txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh);
1301
1302	txdp = (vxge_hal_fifo_txd_t *) txdlh + txdl_priv->frags;
1303
1304	txdp->control_0 |=
1305	    VXGE_HAL_FIFO_TXD_HOST_STEER(vp->vpath->vp_config->wire_port);
1306	txdp->control_0 |= VXGE_HAL_FIFO_TXD_GATHER_CODE(
1307	    VXGE_HAL_FIFO_TXD_GATHER_CODE_FIRST);
1308	txdp->control_1 |= fifo->interrupt_type;
1309	txdp->control_1 |= VXGE_HAL_FIFO_TXD_INT_NUMBER(
1310	    vp->vpath->tx_intr_num);
1311	if (tagged)
1312		txdp->control_1 |= VXGE_HAL_FIFO_TXD_NO_BW_LIMIT;
1313	if (txdl_priv->frags) {
1314
1315		txdp = (vxge_hal_fifo_txd_t *) txdlh + (txdl_priv->frags - 1);
1316
1317		txdp->control_0 |= VXGE_HAL_FIFO_TXD_GATHER_CODE(
1318		    VXGE_HAL_FIFO_TXD_GATHER_CODE_LAST);
1319
1320	}
1321
1322	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
1323	    __FILE__, __func__, __LINE__);
1324}
1325
1326/*
1327 * vxge_hal_fifo_txdl_post - Post descriptor on the fifo channel.
1328 * @vpath_handle: Virtual path handle.
1329 * @txdlh: Descriptor obtained via vxge_hal_fifo_txdl_reserve()
1330 * @tagged: Is the frame tagged
1331 *
1332 * Post descriptor on the 'fifo' type channel for transmission.
1333 * Prior to posting the descriptor should be filled in accordance with
1334 * Host/X3100 interface specification for a given service (LL, etc.).
1335 *
1336 */
1337void
1338vxge_hal_fifo_txdl_post(
1339    vxge_hal_vpath_h vpath_handle,
1340    vxge_hal_txdl_h txdlh,
1341    u32 tagged)
1342{
1343	u64 list_ptr;
1344	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1345	__hal_fifo_t *fifo;
1346	__hal_device_t *hldev;
1347	__hal_fifo_txdl_priv_t *txdl_priv;
1348	vxge_hal_fifo_txd_t *txdp_last;
1349	vxge_hal_fifo_txd_t *txdp_first;
1350
1351#if defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1352	unsigned long flags = 0;
1353
1354#endif
1355
1356	vxge_assert((vpath_handle != NULL) && (txdlh != NULL));
1357
1358	hldev = vp->vpath->hldev;
1359
1360	vxge_hal_trace_log_fifo("==> %s:%s:%d",
1361	    __FILE__, __func__, __LINE__);
1362
1363	vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", "
1364	    "txdlh = 0x"VXGE_OS_STXFMT", tagged = %d",
1365	    (ptr_t) vpath_handle, (ptr_t) txdlh, tagged);
1366
1367	fifo = (__hal_fifo_t *) vp->vpath->fifoh;
1368
1369	vxge_assert(fifo != NULL);
1370
1371	txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh);
1372
1373	txdp_first = (vxge_hal_fifo_txd_t *) txdlh;
1374	txdp_first->control_0 |=
1375	    VXGE_HAL_FIFO_TXD_HOST_STEER(vp->vpath->vp_config->wire_port);
1376	txdp_first->control_0 |=
1377	    VXGE_HAL_FIFO_TXD_GATHER_CODE(VXGE_HAL_FIFO_TXD_GATHER_CODE_FIRST);
1378	txdp_first->control_1 |=
1379	    VXGE_HAL_FIFO_TXD_INT_NUMBER(vp->vpath->tx_intr_num);
1380	txdp_first->control_1 |= fifo->interrupt_type;
1381	list_ptr = (u64) txdl_priv->dma_addr;
1382	if (tagged) {
1383		txdp_first->control_1 |= VXGE_HAL_FIFO_TXD_NO_BW_LIMIT;
1384		list_ptr |= 0x1;
1385	}
1386
1387	txdp_last =
1388	    (vxge_hal_fifo_txd_t *) txdlh + (txdl_priv->frags - 1);
1389	txdp_last->control_0 |=
1390	    VXGE_HAL_FIFO_TXD_GATHER_CODE(VXGE_HAL_FIFO_TXD_GATHER_CODE_LAST);
1391
1392#if defined(VXGE_HAL_TX_MULTI_POST)
1393	vxge_os_spin_lock(&fifo->channel.post_lock);
1394#elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1395	vxge_os_spin_lock_irq(&fifo->channel.post_lock, flags);
1396#endif
1397
1398	txdp_first->control_0 |= VXGE_HAL_FIFO_TXD_LIST_OWN_ADAPTER;
1399
1400#if defined(VXGE_DEBUG_ASSERT)
1401	/* make sure device overwrites the t_code value on completion */
1402	txdp_first->control_0 |=
1403	    VXGE_HAL_FIFO_TXD_T_CODE(VXGE_HAL_FIFO_TXD_T_CODE_UNUSED);
1404#endif
1405
1406#if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_TXDL_STREAMING)
1407	/* sync the TxDL to device */
1408	vxge_os_dma_sync(fifo->channel.pdev,
1409	    txdl_priv->dma_handle,
1410	    txdl_priv->dma_addr,
1411	    txdl_priv->dma_offset,
1412	    txdl_priv->frags << 5, /* sizeof(vxge_hal_fifo_txd_t) */
1413	    VXGE_OS_DMA_DIR_TODEVICE);
1414#endif
1415	/*
1416	 * we want touch dtr_arr in order with ownership bit set to HW
1417	 */
1418	__hal_channel_dtr_post(&fifo->channel, VXGE_HAL_FIFO_TXDL_INDEX(txdlh));
1419
1420	__hal_non_offload_db_post(vpath_handle,
1421	    list_ptr,
1422	    txdl_priv->frags - 1,
1423	    vp->vpath->vp_config->fifo.no_snoop_bits);
1424
1425#if defined(VXGE_HAL_FIFO_DUMP_TXD)
1426	vxge_hal_info_log_fifo(
1427	    ""VXGE_OS_LLXFMT":"VXGE_OS_LLXFMT":"VXGE_OS_LLXFMT":"
1428	    VXGE_OS_LLXFMT" dma "VXGE_OS_LLXFMT,
1429	    txdp_first->control_0, txdp_first->control_1,
1430	    txdp_first->buffer_pointer, VXGE_HAL_FIFO_TXDL_INDEX(txdp_first),
1431	    txdl_priv->dma_addr);
1432#endif
1433
1434	fifo->stats->total_posts++;
1435	fifo->stats->common_stats.usage_cnt++;
1436	if (fifo->stats->common_stats.usage_max <
1437	    fifo->stats->common_stats.usage_cnt)
1438		fifo->stats->common_stats.usage_max =
1439		    fifo->stats->common_stats.usage_cnt;
1440
1441#if defined(VXGE_HAL_TX_MULTI_POST)
1442	vxge_os_spin_unlock(&fifo->channel.post_lock);
1443#elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1444	vxge_os_spin_unlock_irq(&fifo->channel.post_lock, flags);
1445#endif
1446
1447	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
1448	    __FILE__, __func__, __LINE__);
1449}
1450
1451/*
1452 * vxge_hal_fifo_is_next_txdl_completed - Checks if the next txdl is completed
1453 * @vpath_handle: Virtual path handle.
1454 */
1455vxge_hal_status_e
1456vxge_hal_fifo_is_next_txdl_completed(vxge_hal_vpath_h vpath_handle)
1457{
1458	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1459	__hal_fifo_t *fifo;
1460	__hal_device_t *hldev;
1461	vxge_hal_fifo_txd_t *txdp;
1462	vxge_hal_txdl_h txdlh;
1463	vxge_hal_status_e status = VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1464
1465#if defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1466	unsigned long flags = 0;
1467
1468#endif
1469
1470
1471	vxge_assert(vpath_handle != NULL);
1472
1473	hldev = vp->vpath->hldev;
1474
1475	vxge_hal_trace_log_fifo("==> %s:%s:%d",
1476	    __FILE__, __func__, __LINE__);
1477
1478	vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT,
1479	    (ptr_t) vpath_handle);
1480
1481	fifo = (__hal_fifo_t *) vp->vpath->fifoh;
1482
1483	vxge_assert(fifo != NULL);
1484
1485#if defined(VXGE_HAL_TX_MULTI_POST)
1486	vxge_os_spin_lock(&fifo->channel.post_lock);
1487#elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1488	vxge_os_spin_lock_irq(&fifo->channel.post_lock, flags);
1489#endif
1490
1491	__hal_channel_dtr_try_complete(&fifo->channel, &txdlh);
1492
1493	txdp = (vxge_hal_fifo_txd_t *) txdlh;
1494	if ((txdp != NULL) &&
1495	    (!(txdp->control_0 & VXGE_HAL_FIFO_TXD_LIST_OWN_ADAPTER))) {
1496		status = VXGE_HAL_OK;
1497	}
1498
1499#if defined(VXGE_HAL_TX_MULTI_POST)
1500	vxge_os_spin_unlock(&fifo->channel.post_lock);
1501#elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1502	vxge_os_spin_unlock_irq(&fifo->channel.post_lock, flags);
1503#endif
1504
1505	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: %d",
1506	    __FILE__, __func__, __LINE__, status);
1507
1508	/* no more completions */
1509	return (status);
1510}
1511
1512/*
1513 * vxge_hal_fifo_txdl_next_completed - Retrieve next completed descriptor.
1514 * @vpath_handle: Virtual path handle.
1515 * @txdlh: Descriptor handle. Returned by HAL.
1516 * @txdl_priv: Buffer to return the pointer to per txdl space
1517 * @t_code: Transfer code, as per X3100 User Guide,
1518 *	 Transmit Descriptor Format.
1519 *	 Returned by HAL.
1520 *
1521 * Retrieve the _next_ completed descriptor.
1522 * HAL uses channel callback (*vxge_hal_channel_callback_f) to notifiy
1523 * upper-layer driver (ULD) of new completed descriptors. After that
1524 * the ULD can use vxge_hal_fifo_txdl_next_completed to retrieve the rest
1525 * completions (the very first completion is passed by HAL via
1526 * vxge_hal_channel_callback_f).
1527 *
1528 * Implementation-wise, the upper-layer driver is free to call
1529 * vxge_hal_fifo_txdl_next_completed either immediately from inside the
1530 * channel callback, or in a deferred fashion and separate (from HAL)
1531 * context.
1532 *
1533 * Non-zero @t_code means failure to process the descriptor.
1534 * The failure could happen, for instance, when the link is
1535 * down, in which case X3100 completes the descriptor because it
1536 * is not able to send the data out.
1537 *
1538 * For details please refer to X3100 User Guide.
1539 *
1540 * Returns: VXGE_HAL_OK - success.
1541 * VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1542 * are currently available for processing.
1543 *
1544 */
1545vxge_hal_status_e
1546vxge_hal_fifo_txdl_next_completed(
1547    vxge_hal_vpath_h vpath_handle,
1548    vxge_hal_txdl_h * txdlh,
1549    void **txdl_priv,
1550    vxge_hal_fifo_tcode_e * t_code)
1551{
1552	__hal_fifo_t *fifo;
1553	__hal_device_t *hldev;
1554	vxge_hal_fifo_txd_t *txdp;
1555
1556#if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_TXDL_STREAMING)
1557	__hal_fifo_txdl_priv_t *priv;
1558
1559#endif
1560#if defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1561	unsigned long flags = 0;
1562
1563#endif
1564
1565	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1566	vxge_hal_status_e status = VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1567
1568	vxge_assert((vpath_handle != NULL) &&
1569	    (txdlh != NULL) && (t_code != NULL));
1570
1571	hldev = vp->vpath->hldev;
1572
1573	vxge_hal_trace_log_fifo("==> %s:%s:%d",
1574	    __FILE__, __func__, __LINE__);
1575
1576	vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", "
1577	    "txdlh = 0x"VXGE_OS_STXFMT", t_code = 0x"VXGE_OS_STXFMT,
1578	    (ptr_t) vpath_handle, (ptr_t) txdlh, (ptr_t) t_code);
1579
1580	fifo = (__hal_fifo_t *) vp->vpath->fifoh;
1581
1582	vxge_assert(fifo != NULL);
1583
1584	*txdlh = 0;
1585
1586#if defined(VXGE_HAL_TX_MULTI_POST)
1587	vxge_os_spin_lock(&fifo->channel.post_lock);
1588#elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1589	vxge_os_spin_lock_irq(&fifo->channel.post_lock, flags);
1590#endif
1591
1592	__hal_channel_dtr_try_complete(&fifo->channel, txdlh);
1593
1594	txdp = (vxge_hal_fifo_txd_t *) * txdlh;
1595	if (txdp != NULL) {
1596
1597#if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_TXDL_STREAMING)
1598		priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdp);
1599
1600		/*
1601		 * sync TxDL to read the ownership
1602		 *
1603		 * Note: 16bytes means Control_1 & Control_2
1604		 */
1605		vxge_os_dma_sync(fifo->channel.pdev,
1606		    priv->dma_handle,
1607		    priv->dma_addr,
1608		    priv->dma_offset,
1609		    16,
1610		    VXGE_OS_DMA_DIR_FROMDEVICE);
1611#endif
1612
1613		/* check whether host owns it */
1614		if (!(txdp->control_0 & VXGE_HAL_FIFO_TXD_LIST_OWN_ADAPTER)) {
1615
1616			__hal_channel_dtr_complete(&fifo->channel);
1617
1618			*txdl_priv = VXGE_HAL_FIFO_ULD_PRIV(fifo, txdp);
1619
1620			*t_code = (vxge_hal_fifo_tcode_e)
1621			    VXGE_HAL_FIFO_TXD_T_CODE_GET(txdp->control_0);
1622
1623			if (fifo->stats->common_stats.usage_cnt > 0)
1624				fifo->stats->common_stats.usage_cnt--;
1625
1626			status = VXGE_HAL_OK;
1627		}
1628	}
1629
1630	/* no more completions */
1631#if defined(VXGE_HAL_TX_MULTI_POST)
1632	vxge_os_spin_unlock(&fifo->channel.post_lock);
1633#elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1634	vxge_os_spin_unlock_irq(&fifo->channel.post_lock, flags);
1635#endif
1636
1637	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: %d",
1638	    __FILE__, __func__, __LINE__, status);
1639
1640	return (status);
1641}
1642
1643/*
1644 * vxge_hal_fifo_handle_tcode - Handle transfer code.
1645 * @vpath_handle: Virtual Path handle.
1646 * @txdlh: Descriptor handle.
1647 * @t_code: One of the enumerated (and documented in the X3100 user guide)
1648 *	 "transfer codes".
1649 *
1650 * Handle descriptor's transfer code. The latter comes with each completed
1651 * descriptor.
1652 *
1653 * Returns: one of the vxge_hal_status_e {} enumerated types.
1654 * VXGE_HAL_OK			- for success.
1655 * VXGE_HAL_ERR_CRITICAL	- when encounters critical error.
1656 */
1657vxge_hal_status_e
1658vxge_hal_fifo_handle_tcode(
1659    vxge_hal_vpath_h vpath_handle,
1660    vxge_hal_txdl_h txdlh,
1661    vxge_hal_fifo_tcode_e t_code)
1662{
1663	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1664	__hal_device_t *hldev;
1665
1666	vxge_assert((vpath_handle != NULL) && (txdlh != NULL));
1667
1668	hldev = vp->vpath->hldev;
1669
1670	vxge_hal_trace_log_fifo("==> %s:%s:%d",
1671	    __FILE__, __func__, __LINE__);
1672
1673	vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", "
1674	    "txdlh = 0x"VXGE_OS_STXFMT", t_code = 0x%d",
1675	    (ptr_t) vpath_handle, (ptr_t) txdlh, t_code);
1676
1677	switch ((t_code & 0x7)) {
1678	case 0:
1679		/* 000: Transfer operation completed successfully. */
1680		break;
1681	case 1:
1682		/*
1683		 * 001: a PCI read transaction (either TxD or frame data)
1684		 *	returned with corrupt data.
1685		 */
1686		break;
1687	case 2:
1688		/* 010: a PCI read transaction was returned with no data. */
1689		break;
1690	case 3:
1691		/*
1692		 * 011: The host attempted to send either a frame or LSO
1693		 *	MSS that was too long (>9800B).
1694		 */
1695		break;
1696	case 4:
1697		/*
1698		 * 100: Error detected during TCP/UDP Large Send
1699		 *	Offload operation, due to improper header template,
1700		 *	unsupported protocol, etc.
1701		 */
1702		break;
1703	default:
1704		vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: %d",
1705		    __FILE__, __func__, __LINE__, VXGE_HAL_ERR_INVALID_TCODE);
1706		return (VXGE_HAL_ERR_INVALID_TCODE);
1707	}
1708
1709	vp->vpath->sw_stats->fifo_stats.txd_t_code_err_cnt[t_code]++;
1710
1711	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: %d",
1712	    __FILE__, __func__, __LINE__, VXGE_HAL_OK);
1713	return (VXGE_HAL_OK);
1714}
1715
1716/*
1717 * __hal_fifo_txdl_free_many - Free the fragments
1718 * @fifo: FIFO
1719 * @txdp: Poniter to a TxD
1720 * @list_size: List size
1721 * @frags: Number of fragments
1722 *
1723 * This routinf frees the fragments in a txdl
1724 */
1725void
1726__hal_fifo_txdl_free_many(
1727    __hal_fifo_t *fifo,
1728    vxge_hal_fifo_txd_t * txdp,
1729    u32 list_size,
1730    u32 frags)
1731{
1732	__hal_fifo_txdl_priv_t *current_txdl_priv;
1733	__hal_fifo_txdl_priv_t *next_txdl_priv;
1734	u32 invalid_frags = frags % list_size;
1735	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) fifo->channel.vph;
1736	__hal_device_t *hldev;
1737
1738	vxge_assert((fifo != NULL) && (txdp != NULL));
1739
1740	hldev = vp->vpath->hldev;
1741
1742	vxge_hal_trace_log_fifo("==> %s:%s:%d",
1743	    __FILE__, __func__, __LINE__);
1744
1745	vxge_hal_trace_log_fifo(
1746	    "fifo = 0x"VXGE_OS_STXFMT", txdp = 0x"VXGE_OS_STXFMT", "
1747	    "list_size = %d, frags = %d", (ptr_t) fifo, (ptr_t) txdp,
1748	    list_size, frags);
1749
1750	if (invalid_frags) {
1751		vxge_hal_trace_log_fifo(
1752		    "freeing corrupt txdlh 0x"VXGE_OS_STXFMT", "
1753		    "fragments %d list size %d",
1754		    (ptr_t) txdp, frags, list_size);
1755		vxge_assert(invalid_frags == 0);
1756	}
1757	while (txdp) {
1758		vxge_hal_trace_log_fifo("freeing linked txdlh 0x"VXGE_OS_STXFMT
1759		    ", " "fragments %d list size %d",
1760		    (ptr_t) txdp, frags, list_size);
1761		current_txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdp);
1762#if defined(VXGE_DEBUG_ASSERT) && defined(VXGE_OS_MEMORY_CHECK)
1763		current_txdl_priv->allocated = 0;
1764#endif
1765		__hal_channel_dtr_free(&fifo->channel,
1766		    VXGE_HAL_FIFO_TXDL_INDEX(txdp));
1767		next_txdl_priv = current_txdl_priv->next_txdl_priv;
1768		vxge_assert(frags);
1769		frags -= list_size;
1770		if (next_txdl_priv) {
1771			current_txdl_priv->next_txdl_priv = NULL;
1772			txdp = next_txdl_priv->first_txdp;
1773		} else {
1774			vxge_hal_trace_log_fifo(
1775			    "freed linked txdlh fragments %d list size %d",
1776			    frags, list_size);
1777			break;
1778		}
1779	}
1780
1781	vxge_assert(frags == 0);
1782
1783	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
1784	    __FILE__, __func__, __LINE__);
1785}
1786
1787/*
1788 * vxge_hal_fifo_txdl_free - Free descriptor.
1789 * @vpath_handle: Virtual path handle.
1790 * @txdlh: Descriptor handle.
1791 *
1792 * Free the reserved descriptor. This operation is "symmetrical" to
1793 * vxge_hal_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1794 * lifecycle.
1795 *
1796 * After free-ing (see vxge_hal_fifo_txdl_free()) the descriptor again can
1797 * be:
1798 *
1799 * - reserved (vxge_hal_fifo_txdl_reserve);
1800 *
1801 * - posted (vxge_hal_fifo_txdl_post);
1802 *
1803 * - completed (vxge_hal_fifo_txdl_next_completed);
1804 *
1805 * - and recycled again (vxge_hal_fifo_txdl_free).
1806 *
1807 * For alternative state transitions and more details please refer to
1808 * the design doc.
1809 *
1810 */
1811void
1812vxge_hal_fifo_txdl_free(
1813    vxge_hal_vpath_h vpath_handle,
1814    vxge_hal_txdl_h txdlh)
1815{
1816	__hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1817	__hal_fifo_t *fifo;
1818	__hal_device_t *hldev;
1819	__hal_fifo_txdl_priv_t *txdl_priv;
1820	u32 max_frags;
1821
1822#if defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1823	u32 flags = 0;
1824
1825#endif
1826	vxge_assert((vpath_handle != NULL) && (txdlh != NULL));
1827
1828	hldev = vp->vpath->hldev;
1829
1830	vxge_hal_trace_log_fifo("==> %s:%s:%d",
1831	    __FILE__, __func__, __LINE__);
1832
1833	vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", "
1834	    "txdlh = 0x"VXGE_OS_STXFMT, (ptr_t) vpath_handle, (ptr_t) txdlh);
1835
1836	fifo = (__hal_fifo_t *) vp->vpath->fifoh;
1837
1838	vxge_assert(fifo != NULL);
1839
1840	txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh);
1841
1842	max_frags = fifo->config->max_frags;
1843
1844#if defined(VXGE_HAL_TX_MULTI_POST)
1845	vxge_os_spin_lock(&fifo->channel.post_lock);
1846#elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1847	vxge_os_spin_lock_irq(&fifo->channel.post_lock, flags);
1848#endif
1849
1850	if (txdl_priv->alloc_frags > max_frags) {
1851		vxge_hal_fifo_txd_t *dang_txdp = (vxge_hal_fifo_txd_t *)
1852		txdl_priv->dang_txdl;
1853		u32 dang_frags = txdl_priv->dang_frags;
1854		u32 alloc_frags = txdl_priv->alloc_frags;
1855		txdl_priv->dang_txdl = NULL;
1856		txdl_priv->dang_frags = 0;
1857		txdl_priv->alloc_frags = 0;
1858		/* txdlh must have a linked list of txdlh */
1859		vxge_assert(txdl_priv->next_txdl_priv);
1860
1861		/* free any dangling txdlh first */
1862		if (dang_txdp) {
1863			vxge_hal_info_log_fifo(
1864			    "freeing dangled txdlh 0x"VXGE_OS_STXFMT" for %d "
1865			    "fragments", (ptr_t) dang_txdp, dang_frags);
1866			__hal_fifo_txdl_free_many(fifo, dang_txdp,
1867			    max_frags, dang_frags);
1868		}
1869
1870		/* now free the reserved txdlh list */
1871		vxge_hal_info_log_fifo(
1872		    "freeing txdlh 0x"VXGE_OS_STXFMT" list of %d fragments",
1873		    (ptr_t) txdlh, alloc_frags);
1874		__hal_fifo_txdl_free_many(fifo,
1875		    (vxge_hal_fifo_txd_t *) txdlh, max_frags,
1876		    alloc_frags);
1877	} else {
1878		__hal_channel_dtr_free(&fifo->channel,
1879		    VXGE_HAL_FIFO_TXDL_INDEX(txdlh));
1880	}
1881
1882	fifo->channel.poll_bytes += txdl_priv->bytes_sent;
1883
1884#if defined(VXGE_DEBUG_ASSERT) && defined(VXGE_OS_MEMORY_CHECK)
1885	txdl_priv->allocated = 0;
1886#endif
1887
1888#if defined(VXGE_HAL_TX_MULTI_POST)
1889	vxge_os_spin_unlock(&fifo->channel.post_lock);
1890#elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1891	vxge_os_spin_unlock_irq(&fifo->channel.post_lock, flags);
1892#endif
1893
1894	vxge_hal_trace_log_fifo("<== %s:%s:%d  Result: 0",
1895	    __FILE__, __func__, __LINE__);
1896}
1897