1221167Sgnn/*- 2221167Sgnn * Copyright(c) 2002-2011 Exar Corp. 3221167Sgnn * All rights reserved. 4221167Sgnn * 5221167Sgnn * Redistribution and use in source and binary forms, with or without 6221167Sgnn * modification are permitted provided the following conditions are met: 7221167Sgnn * 8221167Sgnn * 1. Redistributions of source code must retain the above copyright notice, 9221167Sgnn * this list of conditions and the following disclaimer. 10221167Sgnn * 11221167Sgnn * 2. Redistributions in binary form must reproduce the above copyright 12221167Sgnn * notice, this list of conditions and the following disclaimer in the 13221167Sgnn * documentation and/or other materials provided with the distribution. 14221167Sgnn * 15221167Sgnn * 3. Neither the name of the Exar Corporation nor the names of its 16221167Sgnn * contributors may be used to endorse or promote products derived from 17221167Sgnn * this software without specific prior written permission. 18221167Sgnn * 19221167Sgnn * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20221167Sgnn * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21221167Sgnn * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22221167Sgnn * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23221167Sgnn * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24221167Sgnn * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25221167Sgnn * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26221167Sgnn * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27221167Sgnn * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28221167Sgnn * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29221167Sgnn * POSSIBILITY OF SUCH DAMAGE. 30221167Sgnn */ 31221167Sgnn/*$FreeBSD$*/ 32221167Sgnn 33221167Sgnn#include <dev/vxge/vxgehal/vxgehal.h> 34221167Sgnn 35221167Sgnn/* 36221167Sgnn * __hal_ring_block_memblock_idx - Return the memblock index 37221167Sgnn * @block: Virtual address of memory block 38221167Sgnn * 39221167Sgnn * This function returns the index of memory block 40221167Sgnn */ 41221167Sgnnstatic inline u32 42221167Sgnn__hal_ring_block_memblock_idx( 43221167Sgnn vxge_hal_ring_block_t block) 44221167Sgnn{ 45221167Sgnn return (u32)*((u64 *) ((void *)((u8 *) block + 46221167Sgnn VXGE_HAL_RING_MEMBLOCK_IDX_OFFSET))); 47221167Sgnn} 48221167Sgnn 49221167Sgnn/* 50221167Sgnn * __hal_ring_block_memblock_idx_set - Sets the memblock index 51221167Sgnn * @block: Virtual address of memory block 52221167Sgnn * @memblock_idx: Index of memory block 53221167Sgnn * 54221167Sgnn * This function sets index to a memory block 55221167Sgnn */ 56221167Sgnnstatic inline void 57221167Sgnn__hal_ring_block_memblock_idx_set( 58221167Sgnn vxge_hal_ring_block_t block, 59221167Sgnn u32 memblock_idx) 60221167Sgnn{ 61221167Sgnn *((u64 *) ((void *)((u8 *) block + 62221167Sgnn VXGE_HAL_RING_MEMBLOCK_IDX_OFFSET))) = memblock_idx; 63221167Sgnn} 64221167Sgnn 65260286Sdim#if 0 66221167Sgnn/* 67221167Sgnn * __hal_ring_block_next_pointer - Returns the dma address of next block 68221167Sgnn * @block: RxD block 69221167Sgnn * 70221167Sgnn * Returns the dma address of next block stored in the RxD block 71221167Sgnn */ 72221167Sgnnstatic inline dma_addr_t 73221167Sgnn/* LINTED */ 74221167Sgnn__hal_ring_block_next_pointer( 75221167Sgnn vxge_hal_ring_block_t *block) 76221167Sgnn{ 77221167Sgnn return (dma_addr_t)*((u64 *) ((void *)((u8 *) block + 78221167Sgnn VXGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET))); 79221167Sgnn} 80260286Sdim#endif 81221167Sgnn 82221167Sgnn/* 83221167Sgnn * __hal_ring_block_next_pointer_set - Sets the next block pointer in RxD block 84221167Sgnn * @block: RxD block 85221167Sgnn * @dma_next: dma address of next block 86221167Sgnn * 87221167Sgnn * Sets the next block pointer in RxD block 88221167Sgnn */ 89221167Sgnnstatic inline void 90221167Sgnn__hal_ring_block_next_pointer_set( 91221167Sgnn vxge_hal_ring_block_t *block, 92221167Sgnn dma_addr_t dma_next) 93221167Sgnn{ 94221167Sgnn *((u64 *) ((void *)((u8 *) block + 95221167Sgnn VXGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET))) = dma_next; 96221167Sgnn} 97221167Sgnn 98221167Sgnn/* 99221167Sgnn * __hal_ring_first_block_address_get - Returns the dma address of the 100221167Sgnn * first block 101221167Sgnn * @ringh: Handle to the ring 102221167Sgnn * 103221167Sgnn * Returns the dma address of the first RxD block 104221167Sgnn */ 105221167Sgnnu64 106221167Sgnn__hal_ring_first_block_address_get( 107221167Sgnn vxge_hal_ring_h ringh) 108221167Sgnn{ 109221167Sgnn __hal_ring_t *ring = (__hal_ring_t *) ringh; 110221167Sgnn vxge_hal_mempool_dma_t *dma_object; 111221167Sgnn 112221167Sgnn dma_object = __hal_mempool_memblock_dma(ring->mempool, 0); 113221167Sgnn 114221167Sgnn vxge_assert(dma_object != NULL); 115221167Sgnn 116221167Sgnn return (dma_object->addr); 117221167Sgnn} 118221167Sgnn 119221167Sgnn 120221167Sgnn#if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING) 121221167Sgnn/* 122221167Sgnn * __hal_ring_item_dma_offset - Return the dma offset of an item 123221167Sgnn * @mempoolh: Handle to the memory pool of the ring 124221167Sgnn * @item: Item for which to get the dma offset 125221167Sgnn * 126221167Sgnn * This function returns the dma offset of a given item 127221167Sgnn */ 128221167Sgnnstatic ptrdiff_t 129221167Sgnn__hal_ring_item_dma_offset( 130221167Sgnn vxge_hal_mempool_h mempoolh, 131221167Sgnn void *item) 132221167Sgnn{ 133221167Sgnn u32 memblock_idx; 134221167Sgnn void *memblock; 135221167Sgnn vxge_hal_mempool_t *mempool = (vxge_hal_mempool_t *) mempoolh; 136221167Sgnn __hal_device_t *hldev; 137221167Sgnn 138221167Sgnn vxge_assert((mempoolh != NULL) && (item != NULL) && 139221167Sgnn (dma_handle != NULL)); 140221167Sgnn 141221167Sgnn hldev = (__hal_device_t *) mempool->devh; 142221167Sgnn 143221167Sgnn vxge_hal_trace_log_ring("==> %s:%s:%d", 144221167Sgnn __FILE__, __func__, __LINE__); 145221167Sgnn 146221167Sgnn vxge_hal_trace_log_ring( 147221167Sgnn "mempoolh = 0x"VXGE_OS_STXFMT", item = 0x"VXGE_OS_STXFMT, 148221167Sgnn (ptr_t) mempoolh, (ptr_t) item); 149221167Sgnn 150221167Sgnn /* get owner memblock index */ 151221167Sgnn memblock_idx = __hal_ring_block_memblock_idx(item); 152221167Sgnn 153221167Sgnn /* get owner memblock by memblock index */ 154221167Sgnn memblock = __hal_mempool_memblock(mempoolh, memblock_idx); 155221167Sgnn 156221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0", 157221167Sgnn __FILE__, __func__, __LINE__); 158221167Sgnn 159221167Sgnn return ((u8 *) item - (u8 *) memblock); 160221167Sgnn} 161221167Sgnn#endif 162221167Sgnn 163221167Sgnn/* 164221167Sgnn * __hal_ring_item_dma_addr - Return the dma address of an item 165221167Sgnn * @mempoolh: Handle to the memory pool of the ring 166221167Sgnn * @item: Item for which to get the dma offset 167221167Sgnn * @dma_handle: dma handle 168221167Sgnn * 169221167Sgnn * This function returns the dma address of a given item 170221167Sgnn */ 171221167Sgnnstatic dma_addr_t 172221167Sgnn__hal_ring_item_dma_addr( 173221167Sgnn vxge_hal_mempool_h mempoolh, 174221167Sgnn void *item, 175221167Sgnn pci_dma_h *dma_handle) 176221167Sgnn{ 177221167Sgnn u32 memblock_idx; 178221167Sgnn void *memblock; 179221167Sgnn vxge_hal_mempool_dma_t *memblock_dma_object; 180221167Sgnn vxge_hal_mempool_t *mempool = (vxge_hal_mempool_t *) mempoolh; 181221167Sgnn __hal_device_t *hldev; 182221167Sgnn ptrdiff_t dma_item_offset; 183221167Sgnn 184221167Sgnn vxge_assert((mempoolh != NULL) && (item != NULL) && 185221167Sgnn (dma_handle != NULL)); 186221167Sgnn 187221167Sgnn hldev = (__hal_device_t *) mempool->devh; 188221167Sgnn 189221167Sgnn vxge_hal_trace_log_ring("==> %s:%s:%d", 190221167Sgnn __FILE__, __func__, __LINE__); 191221167Sgnn 192221167Sgnn vxge_hal_trace_log_ring( 193221167Sgnn "mempoolh = 0x"VXGE_OS_STXFMT", item = 0x"VXGE_OS_STXFMT", " 194221167Sgnn "dma_handle = 0x"VXGE_OS_STXFMT, (ptr_t) mempoolh, 195221167Sgnn (ptr_t) item, (ptr_t) dma_handle); 196221167Sgnn 197221167Sgnn /* get owner memblock index */ 198221167Sgnn memblock_idx = __hal_ring_block_memblock_idx((u8 *) item); 199221167Sgnn 200221167Sgnn /* get owner memblock by memblock index */ 201221167Sgnn memblock = __hal_mempool_memblock( 202221167Sgnn (vxge_hal_mempool_t *) mempoolh, memblock_idx); 203221167Sgnn 204221167Sgnn /* get memblock DMA object by memblock index */ 205221167Sgnn memblock_dma_object = __hal_mempool_memblock_dma( 206221167Sgnn (vxge_hal_mempool_t *) mempoolh, memblock_idx); 207221167Sgnn 208221167Sgnn /* calculate offset in the memblock of this item */ 209221167Sgnn /* LINTED */ 210221167Sgnn dma_item_offset = (u8 *) item - (u8 *) memblock; 211221167Sgnn 212221167Sgnn *dma_handle = memblock_dma_object->handle; 213221167Sgnn 214221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0", 215221167Sgnn __FILE__, __func__, __LINE__); 216221167Sgnn 217221167Sgnn return (memblock_dma_object->addr + dma_item_offset); 218221167Sgnn} 219221167Sgnn 220221167Sgnn/* 221221167Sgnn * __hal_ring_rxdblock_link - Link the RxD blocks 222221167Sgnn * @mempoolh: Handle to the memory pool of the ring 223221167Sgnn * @ring: ring 224221167Sgnn * @from: RxD block from which to link 225221167Sgnn * @to: RxD block to which to link to 226221167Sgnn * 227221167Sgnn * This function returns the dma address of a given item 228221167Sgnn */ 229221167Sgnnstatic void 230221167Sgnn__hal_ring_rxdblock_link( 231221167Sgnn vxge_hal_mempool_h mempoolh, 232221167Sgnn __hal_ring_t *ring, 233221167Sgnn u32 from, 234221167Sgnn u32 to) 235221167Sgnn{ 236221167Sgnn vxge_hal_ring_block_t *to_item, *from_item; 237221167Sgnn dma_addr_t to_dma, from_dma; 238221167Sgnn pci_dma_h to_dma_handle, from_dma_handle; 239221167Sgnn __hal_device_t *hldev; 240221167Sgnn 241221167Sgnn vxge_assert((mempoolh != NULL) && (ring != NULL)); 242221167Sgnn 243221167Sgnn hldev = (__hal_device_t *) ring->channel.devh; 244221167Sgnn 245221167Sgnn vxge_hal_trace_log_ring("==> %s:%s:%d", 246221167Sgnn __FILE__, __func__, __LINE__); 247221167Sgnn 248221167Sgnn vxge_hal_trace_log_ring( 249221167Sgnn "mempoolh = 0x"VXGE_OS_STXFMT", ring = 0x"VXGE_OS_STXFMT", " 250221167Sgnn "from = %d, to = %d", (ptr_t) mempoolh, (ptr_t) ring, from, to); 251221167Sgnn 252221167Sgnn /* get "from" RxD block */ 253221167Sgnn from_item = (vxge_hal_ring_block_t *) __hal_mempool_item( 254221167Sgnn (vxge_hal_mempool_t *) mempoolh, from); 255221167Sgnn vxge_assert(from_item); 256221167Sgnn 257221167Sgnn /* get "to" RxD block */ 258221167Sgnn to_item = (vxge_hal_ring_block_t *) __hal_mempool_item( 259221167Sgnn (vxge_hal_mempool_t *) mempoolh, to); 260221167Sgnn vxge_assert(to_item); 261221167Sgnn 262221167Sgnn /* return address of the beginning of previous RxD block */ 263221167Sgnn to_dma = __hal_ring_item_dma_addr(mempoolh, to_item, &to_dma_handle); 264221167Sgnn 265221167Sgnn /* 266221167Sgnn * set next pointer for this RxD block to point on 267221167Sgnn * previous item's DMA start address 268221167Sgnn */ 269221167Sgnn __hal_ring_block_next_pointer_set(from_item, to_dma); 270221167Sgnn 271221167Sgnn /* return "from" RxD block's DMA start address */ 272221167Sgnn from_dma = __hal_ring_item_dma_addr( 273221167Sgnn mempoolh, from_item, &from_dma_handle); 274221167Sgnn 275221167Sgnn#if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING) 276221167Sgnn /* we must sync "from" RxD block, so hardware will see it */ 277221167Sgnn vxge_os_dma_sync(ring->channel.pdev, 278221167Sgnn from_dma_handle, 279221167Sgnn from_dma + VXGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET, 280221167Sgnn __hal_ring_item_dma_offset(mempoolh, from_item) + 281221167Sgnn VXGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET, 282221167Sgnn sizeof(u64), 283221167Sgnn VXGE_OS_DMA_DIR_TODEVICE); 284221167Sgnn#endif 285221167Sgnn 286221167Sgnn vxge_hal_info_log_ring( 287221167Sgnn "block%d:0x"VXGE_OS_STXFMT" => block%d:0x"VXGE_OS_STXFMT, 288221167Sgnn from, (ptr_t) from_dma, to, (ptr_t) to_dma); 289221167Sgnn 290221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0", 291221167Sgnn __FILE__, __func__, __LINE__); 292221167Sgnn 293221167Sgnn} 294221167Sgnn 295221167Sgnn/* 296221167Sgnn * __hal_ring_mempool_item_alloc - Allocate List blocks for RxD block callback 297221167Sgnn * @mempoolh: Handle to memory pool 298221167Sgnn * @memblock: Address of this memory block 299221167Sgnn * @memblock_index: Index of this memory block 300221167Sgnn * @dma_object: dma object for this block 301221167Sgnn * @item: Pointer to this item 302221167Sgnn * @index: Index of this item in memory block 303221167Sgnn * @is_last: If this is last item in the block 304221167Sgnn * @userdata: Specific data of user 305221167Sgnn * 306221167Sgnn * This function is callback passed to __hal_mempool_create to create memory 307221167Sgnn * pool for RxD block 308221167Sgnn */ 309221167Sgnnstatic vxge_hal_status_e 310221167Sgnn__hal_ring_mempool_item_alloc( 311221167Sgnn vxge_hal_mempool_h mempoolh, 312221167Sgnn void *memblock, 313221167Sgnn u32 memblock_index, 314221167Sgnn vxge_hal_mempool_dma_t *dma_object, 315221167Sgnn void *item, 316221167Sgnn u32 item_index, 317221167Sgnn u32 is_last, 318221167Sgnn void *userdata) 319221167Sgnn{ 320221167Sgnn u32 i; 321221167Sgnn __hal_ring_t *ring = (__hal_ring_t *) userdata; 322221167Sgnn __hal_device_t *hldev; 323221167Sgnn 324221167Sgnn vxge_assert((item != NULL) && (ring != NULL)); 325221167Sgnn 326221167Sgnn hldev = (__hal_device_t *) ring->channel.devh; 327221167Sgnn 328221167Sgnn vxge_hal_trace_log_pool("==> %s:%s:%d", 329221167Sgnn __FILE__, __func__, __LINE__); 330221167Sgnn 331221167Sgnn vxge_hal_trace_log_pool( 332221167Sgnn "mempoolh = 0x"VXGE_OS_STXFMT", memblock = 0x"VXGE_OS_STXFMT", " 333221167Sgnn "memblock_index = %d, dma_object = 0x"VXGE_OS_STXFMT", " 334221167Sgnn "item = 0x"VXGE_OS_STXFMT", item_index = %d, is_last = %d, " 335221167Sgnn "userdata = 0x"VXGE_OS_STXFMT, (ptr_t) mempoolh, (ptr_t) memblock, 336221167Sgnn memblock_index, (ptr_t) dma_object, (ptr_t) item, item_index, is_last, 337221167Sgnn (ptr_t) userdata); 338221167Sgnn 339221167Sgnn /* format rxds array */ 340221167Sgnn for (i = 0; i < ring->rxds_per_block; i++) { 341221167Sgnn 342221167Sgnn void *uld_priv; 343221167Sgnn void *rxdblock_priv; 344221167Sgnn __hal_ring_rxd_priv_t *rxd_priv; 345221167Sgnn vxge_hal_ring_rxd_1_t *rxdp; 346221167Sgnn u32 memblock_item_idx; 347221167Sgnn u32 dtr_index = item_index * ring->rxds_per_block + i; 348221167Sgnn 349221167Sgnn ring->channel.dtr_arr[dtr_index].dtr = 350221167Sgnn ((u8 *) item) + i * ring->rxd_size; 351221167Sgnn 352221167Sgnn /* 353221167Sgnn * Note: memblock_item_idx is index of the item within 354221167Sgnn * the memblock. For instance, in case of three RxD-blocks 355221167Sgnn * per memblock this value can be 0, 1 or 2. 356221167Sgnn */ 357221167Sgnn rxdblock_priv = __hal_mempool_item_priv( 358221167Sgnn (vxge_hal_mempool_t *) mempoolh, 359221167Sgnn memblock_index, 360221167Sgnn item, 361221167Sgnn &memblock_item_idx); 362221167Sgnn 363221167Sgnn rxdp = (vxge_hal_ring_rxd_1_t *) 364221167Sgnn ring->channel.dtr_arr[dtr_index].dtr; 365221167Sgnn 366221167Sgnn uld_priv = ((u8 *) rxdblock_priv + ring->rxd_priv_size * i); 367221167Sgnn rxd_priv = 368221167Sgnn (__hal_ring_rxd_priv_t *) ((void *)(((char *) uld_priv) + 369221167Sgnn ring->per_rxd_space)); 370221167Sgnn 371221167Sgnn ((vxge_hal_ring_rxd_5_t *) rxdp)->host_control = dtr_index; 372221167Sgnn 373221167Sgnn ring->channel.dtr_arr[dtr_index].uld_priv = (void *)uld_priv; 374221167Sgnn ring->channel.dtr_arr[dtr_index].hal_priv = (void *)rxd_priv; 375221167Sgnn 376221167Sgnn /* pre-format per-RxD Ring's private */ 377221167Sgnn /* LINTED */ 378221167Sgnn rxd_priv->dma_offset = (u8 *) rxdp - (u8 *) memblock; 379221167Sgnn rxd_priv->dma_addr = dma_object->addr + rxd_priv->dma_offset; 380221167Sgnn rxd_priv->dma_handle = dma_object->handle; 381221167Sgnn#if defined(VXGE_DEBUG_ASSERT) 382221167Sgnn rxd_priv->dma_object = dma_object; 383221167Sgnn#endif 384221167Sgnn rxd_priv->db_bytes = ring->rxd_size; 385221167Sgnn 386221167Sgnn if (i == (ring->rxds_per_block - 1)) { 387221167Sgnn rxd_priv->db_bytes += 388221167Sgnn (((vxge_hal_mempool_t *) mempoolh)->memblock_size - 389221167Sgnn (ring->rxds_per_block * ring->rxd_size)); 390221167Sgnn } 391221167Sgnn } 392221167Sgnn 393221167Sgnn __hal_ring_block_memblock_idx_set((u8 *) item, memblock_index); 394221167Sgnn if (is_last) { 395221167Sgnn /* link last one with first one */ 396221167Sgnn __hal_ring_rxdblock_link(mempoolh, ring, item_index, 0); 397221167Sgnn } 398221167Sgnn 399221167Sgnn if (item_index > 0) { 400221167Sgnn /* link this RxD block with previous one */ 401221167Sgnn __hal_ring_rxdblock_link(mempoolh, ring, item_index - 1, item_index); 402221167Sgnn } 403221167Sgnn 404221167Sgnn vxge_hal_trace_log_pool("<== %s:%s:%d Result: 0", 405221167Sgnn __FILE__, __func__, __LINE__); 406221167Sgnn 407221167Sgnn return (VXGE_HAL_OK); 408221167Sgnn} 409221167Sgnn 410221167Sgnn/* 411221167Sgnn * __hal_ring_mempool_item_free - Free RxD blockt callback 412221167Sgnn * @mempoolh: Handle to memory pool 413221167Sgnn * @memblock: Address of this memory block 414221167Sgnn * @memblock_index: Index of this memory block 415221167Sgnn * @dma_object: dma object for this block 416221167Sgnn * @item: Pointer to this item 417221167Sgnn * @index: Index of this item in memory block 418221167Sgnn * @is_last: If this is last item in the block 419221167Sgnn * @userdata: Specific data of user 420221167Sgnn * 421221167Sgnn * This function is callback passed to __hal_mempool_free to destroy memory 422221167Sgnn * pool for RxD block 423221167Sgnn */ 424221167Sgnnstatic vxge_hal_status_e 425221167Sgnn__hal_ring_mempool_item_free( 426221167Sgnn vxge_hal_mempool_h mempoolh, 427221167Sgnn void *memblock, 428221167Sgnn u32 memblock_index, 429221167Sgnn vxge_hal_mempool_dma_t *dma_object, 430221167Sgnn void *item, 431221167Sgnn u32 item_index, 432221167Sgnn u32 is_last, 433221167Sgnn void *userdata) 434221167Sgnn{ 435221167Sgnn __hal_ring_t *ring = (__hal_ring_t *) userdata; 436221167Sgnn __hal_device_t *hldev; 437221167Sgnn 438221167Sgnn vxge_assert((item != NULL) && (ring != NULL)); 439221167Sgnn 440221167Sgnn hldev = (__hal_device_t *) ring->channel.devh; 441221167Sgnn 442221167Sgnn vxge_hal_trace_log_pool("==> %s:%s:%d", 443221167Sgnn __FILE__, __func__, __LINE__); 444221167Sgnn 445221167Sgnn vxge_hal_trace_log_pool( 446221167Sgnn "mempoolh = 0x"VXGE_OS_STXFMT", memblock = 0x"VXGE_OS_STXFMT", " 447221167Sgnn "memblock_index = %d, dma_object = 0x"VXGE_OS_STXFMT", " 448221167Sgnn "item = 0x"VXGE_OS_STXFMT", item_index = %d, is_last = %d, " 449221167Sgnn "userdata = 0x"VXGE_OS_STXFMT, (ptr_t) mempoolh, (ptr_t) memblock, 450221167Sgnn memblock_index, (ptr_t) dma_object, (ptr_t) item, item_index, is_last, 451221167Sgnn (ptr_t) userdata); 452221167Sgnn 453221167Sgnn vxge_hal_trace_log_pool("<== %s:%s:%d Result: 0", 454221167Sgnn __FILE__, __func__, __LINE__); 455221167Sgnn 456221167Sgnn return (VXGE_HAL_OK); 457221167Sgnn} 458221167Sgnn 459221167Sgnn/* 460221167Sgnn * __hal_ring_initial_replenish - Initial replenish of RxDs 461221167Sgnn * @ring: ring 462221167Sgnn * @reopen: Flag to denote if it is open or repopen 463221167Sgnn * 464221167Sgnn * This function replenishes the RxDs from reserve array to work array 465221167Sgnn */ 466221167Sgnnstatic vxge_hal_status_e 467221167Sgnn__hal_ring_initial_replenish( 468221167Sgnn __hal_ring_t *ring, 469221167Sgnn vxge_hal_reopen_e reopen) 470221167Sgnn{ 471221167Sgnn vxge_hal_rxd_h rxd; 472221167Sgnn void *uld_priv; 473221167Sgnn __hal_device_t *hldev; 474221167Sgnn vxge_hal_status_e status; 475221167Sgnn 476221167Sgnn vxge_assert(ring != NULL); 477221167Sgnn 478221167Sgnn hldev = (__hal_device_t *) ring->channel.devh; 479221167Sgnn 480221167Sgnn vxge_hal_trace_log_ring("==> %s:%s:%d", 481221167Sgnn __FILE__, __func__, __LINE__); 482221167Sgnn 483221167Sgnn vxge_hal_trace_log_ring("ring = 0x"VXGE_OS_STXFMT", reopen = %d", 484221167Sgnn (ptr_t) ring, reopen); 485221167Sgnn 486221167Sgnn while (vxge_hal_ring_rxd_reserve(ring->channel.vph, &rxd, &uld_priv) == 487221167Sgnn VXGE_HAL_OK) { 488221167Sgnn 489221167Sgnn if (ring->rxd_init) { 490221167Sgnn status = ring->rxd_init(ring->channel.vph, 491221167Sgnn rxd, 492221167Sgnn uld_priv, 493221167Sgnn VXGE_HAL_RING_RXD_INDEX(rxd), 494221167Sgnn ring->channel.userdata, 495221167Sgnn reopen); 496221167Sgnn if (status != VXGE_HAL_OK) { 497221167Sgnn vxge_hal_ring_rxd_free(ring->channel.vph, rxd); 498221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d \ 499221167Sgnn Result: %d", 500221167Sgnn __FILE__, __func__, __LINE__, status); 501221167Sgnn return (status); 502221167Sgnn } 503221167Sgnn } 504221167Sgnn 505221167Sgnn vxge_hal_ring_rxd_post(ring->channel.vph, rxd); 506221167Sgnn } 507221167Sgnn 508221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0", 509221167Sgnn __FILE__, __func__, __LINE__); 510221167Sgnn return (VXGE_HAL_OK); 511221167Sgnn} 512221167Sgnn 513221167Sgnn/* 514221167Sgnn * __hal_ring_create - Create a Ring 515221167Sgnn * @vpath_handle: Handle returned by virtual path open 516221167Sgnn * @attr: Ring configuration parameters structure 517221167Sgnn * 518221167Sgnn * This function creates Ring and initializes it. 519221167Sgnn * 520221167Sgnn */ 521221167Sgnnvxge_hal_status_e 522221167Sgnn__hal_ring_create( 523221167Sgnn vxge_hal_vpath_h vpath_handle, 524221167Sgnn vxge_hal_ring_attr_t *attr) 525221167Sgnn{ 526221167Sgnn vxge_hal_status_e status; 527221167Sgnn __hal_ring_t *ring; 528221167Sgnn __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; 529221167Sgnn vxge_hal_ring_config_t *config; 530221167Sgnn __hal_device_t *hldev; 531221167Sgnn 532221167Sgnn vxge_assert((vpath_handle != NULL) && (attr != NULL)); 533221167Sgnn 534221167Sgnn hldev = (__hal_device_t *) vp->vpath->hldev; 535221167Sgnn 536221167Sgnn vxge_hal_trace_log_ring("==> %s:%s:%d", 537221167Sgnn __FILE__, __func__, __LINE__); 538221167Sgnn 539221167Sgnn vxge_hal_trace_log_ring( 540221167Sgnn "vpath_handle = 0x"VXGE_OS_STXFMT", attr = 0x"VXGE_OS_STXFMT, 541221167Sgnn (ptr_t) vpath_handle, (ptr_t) attr); 542221167Sgnn 543221167Sgnn if ((vpath_handle == NULL) || (attr == NULL)) { 544221167Sgnn vxge_hal_err_log_ring("null pointer passed == > %s : %d", 545221167Sgnn __func__, __LINE__); 546221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result:1", 547221167Sgnn __FILE__, __func__, __LINE__); 548221167Sgnn return (VXGE_HAL_FAIL); 549221167Sgnn } 550221167Sgnn 551221167Sgnn config = 552221167Sgnn &vp->vpath->hldev->header.config.vp_config[vp->vpath->vp_id].ring; 553221167Sgnn 554221167Sgnn config->ring_length = ((config->ring_length + 555221167Sgnn vxge_hal_ring_rxds_per_block_get(config->buffer_mode) - 1) / 556221167Sgnn vxge_hal_ring_rxds_per_block_get(config->buffer_mode)) * 557221167Sgnn vxge_hal_ring_rxds_per_block_get(config->buffer_mode); 558221167Sgnn 559221167Sgnn ring = (__hal_ring_t *) vxge_hal_channel_allocate( 560221167Sgnn (vxge_hal_device_h) vp->vpath->hldev, 561221167Sgnn vpath_handle, 562221167Sgnn VXGE_HAL_CHANNEL_TYPE_RING, 563221167Sgnn config->ring_length, 564221167Sgnn attr->per_rxd_space, 565221167Sgnn attr->userdata); 566221167Sgnn 567221167Sgnn if (ring == NULL) { 568221167Sgnn vxge_hal_err_log_ring("Memory allocation failed == > %s : %d", 569221167Sgnn __func__, __LINE__); 570221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d", 571221167Sgnn __FILE__, __func__, __LINE__, 572221167Sgnn VXGE_HAL_ERR_OUT_OF_MEMORY); 573221167Sgnn return (VXGE_HAL_ERR_OUT_OF_MEMORY); 574221167Sgnn } 575221167Sgnn 576221167Sgnn vp->vpath->ringh = (vxge_hal_ring_h) ring; 577221167Sgnn 578221167Sgnn ring->stats = &vp->vpath->sw_stats->ring_stats; 579221167Sgnn 580221167Sgnn ring->config = config; 581221167Sgnn ring->callback = attr->callback; 582221167Sgnn ring->rxd_init = attr->rxd_init; 583221167Sgnn ring->rxd_term = attr->rxd_term; 584221167Sgnn 585221167Sgnn ring->indicate_max_pkts = config->indicate_max_pkts; 586221167Sgnn ring->buffer_mode = config->buffer_mode; 587221167Sgnn 588221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST) 589221167Sgnn vxge_os_spin_lock_init(&ring->channel.post_lock, hldev->pdev); 590221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ) 591221167Sgnn vxge_os_spin_lock_init_irq(&ring->channel.post_lock, hldev->irqh); 592221167Sgnn#endif 593221167Sgnn 594221167Sgnn ring->rxd_size = vxge_hal_ring_rxd_size_get(config->buffer_mode); 595221167Sgnn ring->rxd_priv_size = 596221167Sgnn sizeof(__hal_ring_rxd_priv_t) + attr->per_rxd_space; 597221167Sgnn ring->per_rxd_space = attr->per_rxd_space; 598221167Sgnn 599221167Sgnn ring->rxd_priv_size = 600221167Sgnn ((ring->rxd_priv_size + __vxge_os_cacheline_size - 1) / 601221167Sgnn __vxge_os_cacheline_size) * __vxge_os_cacheline_size; 602221167Sgnn 603221167Sgnn /* 604221167Sgnn * how many RxDs can fit into one block. Depends on configured 605221167Sgnn * buffer_mode. 606221167Sgnn */ 607221167Sgnn ring->rxds_per_block = 608221167Sgnn vxge_hal_ring_rxds_per_block_get(config->buffer_mode); 609221167Sgnn 610221167Sgnn /* calculate actual RxD block private size */ 611221167Sgnn ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block; 612221167Sgnn 613221167Sgnn ring->rxd_mem_avail = 614221167Sgnn ((__hal_vpath_handle_t *) ring->channel.vph)->vpath->rxd_mem_size; 615221167Sgnn 616221167Sgnn ring->db_byte_count = 0; 617221167Sgnn 618221167Sgnn ring->mempool = vxge_hal_mempool_create( 619221167Sgnn (vxge_hal_device_h) vp->vpath->hldev, 620221167Sgnn VXGE_OS_HOST_PAGE_SIZE, 621221167Sgnn VXGE_OS_HOST_PAGE_SIZE, 622221167Sgnn ring->rxdblock_priv_size, 623221167Sgnn ring->config->ring_length / ring->rxds_per_block, 624221167Sgnn ring->config->ring_length / ring->rxds_per_block, 625221167Sgnn __hal_ring_mempool_item_alloc, 626221167Sgnn __hal_ring_mempool_item_free, 627221167Sgnn ring); 628221167Sgnn 629221167Sgnn if (ring->mempool == NULL) { 630221167Sgnn __hal_ring_delete(vpath_handle); 631221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d", 632221167Sgnn __FILE__, __func__, __LINE__, VXGE_HAL_ERR_OUT_OF_MEMORY); 633221167Sgnn return (VXGE_HAL_ERR_OUT_OF_MEMORY); 634221167Sgnn } 635221167Sgnn 636221167Sgnn status = vxge_hal_channel_initialize(&ring->channel); 637221167Sgnn if (status != VXGE_HAL_OK) { 638221167Sgnn __hal_ring_delete(vpath_handle); 639221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d", 640221167Sgnn __FILE__, __func__, __LINE__, status); 641221167Sgnn return (status); 642221167Sgnn } 643221167Sgnn 644221167Sgnn 645221167Sgnn /* 646221167Sgnn * Note: 647221167Sgnn * Specifying rxd_init callback means two things: 648221167Sgnn * 1) rxds need to be initialized by ULD at channel-open time; 649221167Sgnn * 2) rxds need to be posted at channel-open time 650221167Sgnn * (that's what the initial_replenish() below does) 651221167Sgnn * Currently we don't have a case when the 1) is done without the 2). 652221167Sgnn */ 653221167Sgnn if (ring->rxd_init) { 654221167Sgnn if ((status = __hal_ring_initial_replenish( 655221167Sgnn ring, 656221167Sgnn VXGE_HAL_OPEN_NORMAL)) 657221167Sgnn != VXGE_HAL_OK) { 658221167Sgnn __hal_ring_delete(vpath_handle); 659221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d", 660221167Sgnn __FILE__, __func__, __LINE__, status); 661221167Sgnn return (status); 662221167Sgnn } 663221167Sgnn } 664221167Sgnn 665221167Sgnn /* 666221167Sgnn * initial replenish will increment the counter in its post() routine, 667221167Sgnn * we have to reset it 668221167Sgnn */ 669221167Sgnn ring->stats->common_stats.usage_cnt = 0; 670221167Sgnn 671221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0", 672221167Sgnn __FILE__, __func__, __LINE__); 673221167Sgnn return (VXGE_HAL_OK); 674221167Sgnn} 675221167Sgnn 676221167Sgnn/* 677221167Sgnn * __hal_ring_abort - Returns the RxD 678221167Sgnn * @ringh: Ring to be reset 679221167Sgnn * @reopen: See vxge_hal_reopen_e {}. 680221167Sgnn * 681221167Sgnn * This function terminates the RxDs of ring 682221167Sgnn */ 683221167Sgnnvoid 684221167Sgnn__hal_ring_abort( 685221167Sgnn vxge_hal_ring_h ringh, 686221167Sgnn vxge_hal_reopen_e reopen) 687221167Sgnn{ 688221167Sgnn u32 i = 0; 689221167Sgnn vxge_hal_rxd_h rxdh; 690221167Sgnn 691221167Sgnn __hal_device_t *hldev; 692221167Sgnn __hal_ring_t *ring = (__hal_ring_t *) ringh; 693221167Sgnn 694221167Sgnn vxge_assert(ringh != NULL); 695221167Sgnn 696221167Sgnn hldev = (__hal_device_t *) ring->channel.devh; 697221167Sgnn 698221167Sgnn vxge_hal_trace_log_ring("==> %s:%s:%d", 699221167Sgnn __FILE__, __func__, __LINE__); 700221167Sgnn 701221167Sgnn vxge_hal_trace_log_ring("ring = 0x"VXGE_OS_STXFMT", reopen = %d", 702221167Sgnn (ptr_t) ringh, reopen); 703221167Sgnn 704221167Sgnn if (ring->rxd_term) { 705221167Sgnn __hal_channel_for_each_dtr(&ring->channel, rxdh, i) { 706221167Sgnn if (!__hal_channel_is_posted_dtr(&ring->channel, i)) { 707221167Sgnn ring->rxd_term(ring->channel.vph, rxdh, 708221167Sgnn VXGE_HAL_RING_ULD_PRIV(ring, rxdh), 709221167Sgnn VXGE_HAL_RXD_STATE_FREED, 710221167Sgnn ring->channel.userdata, 711221167Sgnn reopen); 712221167Sgnn } 713221167Sgnn } 714221167Sgnn } 715221167Sgnn 716221167Sgnn for (;;) { 717221167Sgnn __hal_channel_dtr_try_complete(&ring->channel, &rxdh); 718221167Sgnn if (rxdh == NULL) 719221167Sgnn break; 720221167Sgnn 721221167Sgnn __hal_channel_dtr_complete(&ring->channel); 722221167Sgnn if (ring->rxd_term) { 723221167Sgnn ring->rxd_term(ring->channel.vph, rxdh, 724221167Sgnn VXGE_HAL_RING_ULD_PRIV(ring, rxdh), 725221167Sgnn VXGE_HAL_RXD_STATE_POSTED, 726221167Sgnn ring->channel.userdata, 727221167Sgnn reopen); 728221167Sgnn } 729221167Sgnn __hal_channel_dtr_free(&ring->channel, 730221167Sgnn VXGE_HAL_RING_RXD_INDEX(rxdh)); 731221167Sgnn } 732221167Sgnn 733221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0", 734221167Sgnn __FILE__, __func__, __LINE__); 735221167Sgnn} 736221167Sgnn 737221167Sgnn/* 738221167Sgnn * __hal_ring_reset - Resets the ring 739221167Sgnn * @ringh: Ring to be reset 740221167Sgnn * 741221167Sgnn * This function resets the ring during vpath reset operation 742221167Sgnn */ 743221167Sgnnvxge_hal_status_e 744221167Sgnn__hal_ring_reset( 745221167Sgnn vxge_hal_ring_h ringh) 746221167Sgnn{ 747221167Sgnn __hal_ring_t *ring = (__hal_ring_t *) ringh; 748221167Sgnn __hal_device_t *hldev; 749221167Sgnn vxge_hal_status_e status; 750221167Sgnn __hal_vpath_handle_t *vph = (__hal_vpath_handle_t *) ring->channel.vph; 751221167Sgnn 752221167Sgnn vxge_assert(ringh != NULL); 753221167Sgnn 754221167Sgnn hldev = (__hal_device_t *) ring->channel.devh; 755221167Sgnn 756221167Sgnn vxge_hal_trace_log_ring("==> %s:%s:%d", 757221167Sgnn __FILE__, __func__, __LINE__); 758221167Sgnn 759221167Sgnn vxge_hal_trace_log_ring("ring = 0x"VXGE_OS_STXFMT, 760221167Sgnn (ptr_t) ringh); 761221167Sgnn 762221167Sgnn __hal_ring_abort(ringh, VXGE_HAL_RESET_ONLY); 763221167Sgnn 764221167Sgnn status = __hal_channel_reset(&ring->channel); 765221167Sgnn 766221167Sgnn if (status != VXGE_HAL_OK) { 767221167Sgnn 768221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d", 769221167Sgnn __FILE__, __func__, __LINE__, status); 770221167Sgnn return (status); 771221167Sgnn 772221167Sgnn } 773221167Sgnn ring->rxd_mem_avail = vph->vpath->rxd_mem_size; 774221167Sgnn ring->db_byte_count = 0; 775221167Sgnn 776221167Sgnn 777221167Sgnn if (ring->rxd_init) { 778221167Sgnn if ((status = __hal_ring_initial_replenish( 779221167Sgnn ring, 780221167Sgnn VXGE_HAL_RESET_ONLY)) 781221167Sgnn != VXGE_HAL_OK) { 782221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d", 783221167Sgnn __FILE__, __func__, __LINE__, status); 784221167Sgnn return (status); 785221167Sgnn } 786221167Sgnn } 787221167Sgnn 788221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0", 789221167Sgnn __FILE__, __func__, __LINE__); 790221167Sgnn 791221167Sgnn return (VXGE_HAL_OK); 792221167Sgnn} 793221167Sgnn 794221167Sgnn/* 795221167Sgnn * __hal_ring_delete - Removes the ring 796221167Sgnn * @vpath_handle: Virtual path handle to which this queue belongs 797221167Sgnn * 798221167Sgnn * This function freeup the memory pool and removes the ring 799221167Sgnn */ 800221167Sgnnvoid 801221167Sgnn__hal_ring_delete( 802221167Sgnn vxge_hal_vpath_h vpath_handle) 803221167Sgnn{ 804221167Sgnn __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; 805221167Sgnn __hal_device_t *hldev; 806221167Sgnn __hal_ring_t *ring; 807221167Sgnn 808221167Sgnn vxge_assert(vpath_handle != NULL); 809221167Sgnn 810221167Sgnn hldev = (__hal_device_t *) vp->vpath->hldev; 811221167Sgnn 812221167Sgnn vxge_hal_trace_log_ring("==> %s:%s:%d", 813221167Sgnn __FILE__, __func__, __LINE__); 814221167Sgnn 815221167Sgnn vxge_hal_trace_log_ring("vpath_handle = 0x"VXGE_OS_STXFMT, 816221167Sgnn (ptr_t) vpath_handle); 817221167Sgnn 818221167Sgnn ring = (__hal_ring_t *) vp->vpath->ringh; 819221167Sgnn 820221167Sgnn vxge_assert(ring != NULL); 821221167Sgnn 822221167Sgnn vxge_assert(ring->channel.pdev); 823221167Sgnn 824221167Sgnn __hal_ring_abort(vp->vpath->ringh, VXGE_HAL_OPEN_NORMAL); 825221167Sgnn 826221167Sgnn 827221167Sgnn if (ring->mempool) { 828221167Sgnn vxge_hal_mempool_destroy(ring->mempool); 829221167Sgnn } 830221167Sgnn 831221167Sgnn vxge_hal_channel_terminate(&ring->channel); 832221167Sgnn 833221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST) 834221167Sgnn vxge_os_spin_lock_destroy(&ring->channel.post_lock, hldev->pdev); 835221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ) 836221167Sgnn vxge_os_spin_lock_destroy_irq(&ring->channel.post_lock, hldev->pdev); 837221167Sgnn#endif 838221167Sgnn 839221167Sgnn vxge_hal_channel_free(&ring->channel); 840221167Sgnn 841221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0", 842221167Sgnn __FILE__, __func__, __LINE__); 843221167Sgnn 844221167Sgnn} 845221167Sgnn 846221167Sgnn/* 847221167Sgnn * __hal_ring_frame_length_set - Set the maximum frame length of recv frames. 848221167Sgnn * @vpath: virtual Path 849221167Sgnn * @new_frmlen: New frame length 850221167Sgnn * 851221167Sgnn * 852221167Sgnn * Returns: VXGE_HAL_OK - success. 853221167Sgnn * VXGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available. 854221167Sgnn * 855221167Sgnn */ 856221167Sgnnvxge_hal_status_e 857221167Sgnn__hal_ring_frame_length_set( 858221167Sgnn __hal_virtualpath_t *vpath, 859221167Sgnn u32 new_frmlen) 860221167Sgnn{ 861221167Sgnn u64 val64; 862221167Sgnn __hal_device_t *hldev; 863221167Sgnn 864221167Sgnn vxge_assert(vpath != NULL); 865221167Sgnn 866221167Sgnn hldev = (__hal_device_t *) vpath->hldev; 867221167Sgnn 868221167Sgnn vxge_hal_trace_log_ring("==> %s:%s:%d", 869221167Sgnn __FILE__, __func__, __LINE__); 870221167Sgnn 871221167Sgnn vxge_hal_trace_log_ring( 872221167Sgnn "vpath = 0x"VXGE_OS_STXFMT", new_frmlen = %d", 873221167Sgnn (ptr_t) vpath, new_frmlen); 874221167Sgnn 875221167Sgnn if (vpath->vp_open == VXGE_HAL_VP_NOT_OPEN) { 876221167Sgnn 877221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d", 878221167Sgnn __FILE__, __func__, __LINE__, 879221167Sgnn VXGE_HAL_ERR_VPATH_NOT_OPEN); 880221167Sgnn return (VXGE_HAL_ERR_VPATH_NOT_OPEN); 881221167Sgnn 882221167Sgnn } 883221167Sgnn 884221167Sgnn val64 = vxge_os_pio_mem_read64( 885221167Sgnn vpath->hldev->header.pdev, 886221167Sgnn vpath->hldev->header.regh0, 887221167Sgnn &vpath->vp_reg->rxmac_vcfg0); 888221167Sgnn 889221167Sgnn val64 &= ~VXGE_HAL_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff); 890221167Sgnn 891221167Sgnn if (vpath->vp_config->ring.max_frm_len != 892221167Sgnn VXGE_HAL_MAX_RING_FRM_LEN_USE_MTU) { 893221167Sgnn 894221167Sgnn val64 |= VXGE_HAL_RXMAC_VCFG0_RTS_MAX_FRM_LEN( 895221167Sgnn vpath->vp_config->ring.max_frm_len + 896221167Sgnn VXGE_HAL_MAC_HEADER_MAX_SIZE); 897221167Sgnn 898221167Sgnn } else { 899221167Sgnn 900221167Sgnn val64 |= VXGE_HAL_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_frmlen + 901221167Sgnn VXGE_HAL_MAC_HEADER_MAX_SIZE); 902221167Sgnn } 903221167Sgnn 904221167Sgnn vxge_os_pio_mem_write64( 905221167Sgnn vpath->hldev->header.pdev, 906221167Sgnn vpath->hldev->header.regh0, 907221167Sgnn val64, 908221167Sgnn &vpath->vp_reg->rxmac_vcfg0); 909221167Sgnn 910221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0", 911221167Sgnn __FILE__, __func__, __LINE__); 912221167Sgnn 913221167Sgnn return (VXGE_HAL_OK); 914221167Sgnn} 915221167Sgnn 916221167Sgnn/* 917221167Sgnn * vxge_hal_ring_rxd_reserve - Reserve ring descriptor. 918221167Sgnn * @vpath_handle: virtual Path handle. 919221167Sgnn * @rxdh: Reserved descriptor. On success HAL fills this "out" parameter 920221167Sgnn * with a valid handle. 921221167Sgnn * @rxd_priv: Buffer to return pointer to per rxd private space 922221167Sgnn * 923221167Sgnn * Reserve Rx descriptor for the subsequent filling-in (by upper layer 924221167Sgnn * driver (ULD)) and posting on the corresponding channel (@channelh) 925221167Sgnn * via vxge_hal_ring_rxd_post(). 926221167Sgnn * 927221167Sgnn * Returns: VXGE_HAL_OK - success. 928221167Sgnn * VXGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available. 929221167Sgnn * 930221167Sgnn */ 931221167Sgnnvxge_hal_status_e 932221167Sgnnvxge_hal_ring_rxd_reserve( 933221167Sgnn vxge_hal_vpath_h vpath_handle, 934221167Sgnn vxge_hal_rxd_h * rxdh, 935221167Sgnn void **rxd_priv) 936221167Sgnn{ 937221167Sgnn vxge_hal_status_e status; 938221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST_IRQ) 939221167Sgnn unsigned long flags; 940221167Sgnn#endif 941221167Sgnn __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; 942221167Sgnn __hal_device_t *hldev; 943221167Sgnn __hal_ring_t *ring; 944221167Sgnn 945221167Sgnn vxge_assert((vpath_handle != NULL) && (rxdh != NULL) && 946221167Sgnn (rxd_priv != NULL)); 947221167Sgnn 948221167Sgnn hldev = (__hal_device_t *) vp->vpath->hldev; 949221167Sgnn 950221167Sgnn vxge_hal_trace_log_ring("==> %s:%s:%d", 951221167Sgnn __FILE__, __func__, __LINE__); 952221167Sgnn 953221167Sgnn vxge_hal_trace_log_ring( 954221167Sgnn "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT", " 955221167Sgnn "rxd_priv = 0x"VXGE_OS_STXFMT, (ptr_t) vpath_handle, 956221167Sgnn (ptr_t) rxdh, (ptr_t) rxd_priv); 957221167Sgnn 958221167Sgnn ring = (__hal_ring_t *) vp->vpath->ringh; 959221167Sgnn 960221167Sgnn vxge_assert(ring != NULL); 961221167Sgnn 962221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST) 963221167Sgnn vxge_os_spin_lock(&ring->channel.post_lock); 964221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ) 965221167Sgnn vxge_os_spin_lock_irq(&ring->channel.post_lock, flags); 966221167Sgnn#endif 967221167Sgnn 968221167Sgnn status = __hal_channel_dtr_reserve(&ring->channel, rxdh); 969221167Sgnn 970221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST) 971221167Sgnn vxge_os_spin_unlock(&ring->channel.post_lock); 972221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ) 973221167Sgnn vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags); 974221167Sgnn#endif 975221167Sgnn 976221167Sgnn if (status == VXGE_HAL_OK) { 977221167Sgnn vxge_hal_ring_rxd_1_t *rxdp = (vxge_hal_ring_rxd_1_t *)*rxdh; 978221167Sgnn 979221167Sgnn /* instead of memset: reset this RxD */ 980221167Sgnn rxdp->control_0 = rxdp->control_1 = 0; 981221167Sgnn 982221167Sgnn *rxd_priv = VXGE_HAL_RING_ULD_PRIV(ring, rxdp); 983221167Sgnn 984221167Sgnn#if defined(VXGE_OS_MEMORY_CHECK) 985221167Sgnn VXGE_HAL_RING_HAL_PRIV(ring, rxdp)->allocated = 1; 986221167Sgnn#endif 987221167Sgnn } 988221167Sgnn 989221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0", 990221167Sgnn __FILE__, __func__, __LINE__); 991221167Sgnn return (status); 992221167Sgnn} 993221167Sgnn 994221167Sgnn/* 995221167Sgnn * vxge_hal_ring_rxd_pre_post - Prepare rxd and post 996221167Sgnn * @vpath_handle: virtual Path handle. 997221167Sgnn * @rxdh: Descriptor handle. 998221167Sgnn * 999221167Sgnn * This routine prepares a rxd and posts 1000221167Sgnn */ 1001221167Sgnnvoid 1002221167Sgnnvxge_hal_ring_rxd_pre_post( 1003221167Sgnn vxge_hal_vpath_h vpath_handle, 1004221167Sgnn vxge_hal_rxd_h rxdh) 1005221167Sgnn{ 1006221167Sgnn 1007221167Sgnn#if defined(VXGE_DEBUG_ASSERT) 1008221167Sgnn vxge_hal_ring_rxd_1_t *rxdp = (vxge_hal_ring_rxd_1_t *) rxdh; 1009221167Sgnn 1010221167Sgnn#endif 1011221167Sgnn 1012221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST_IRQ) 1013221167Sgnn unsigned long flags; 1014221167Sgnn 1015221167Sgnn#endif 1016221167Sgnn __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; 1017221167Sgnn __hal_device_t *hldev; 1018221167Sgnn __hal_ring_t *ring; 1019221167Sgnn 1020221167Sgnn vxge_assert((vpath_handle != NULL) && (rxdh != NULL)); 1021221167Sgnn 1022221167Sgnn hldev = (__hal_device_t *) vp->vpath->hldev; 1023221167Sgnn 1024221167Sgnn vxge_hal_trace_log_ring("==> %s:%s:%d", 1025221167Sgnn __FILE__, __func__, __LINE__); 1026221167Sgnn 1027221167Sgnn vxge_hal_trace_log_ring( 1028221167Sgnn "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT, 1029221167Sgnn (ptr_t) vpath_handle, (ptr_t) rxdh); 1030221167Sgnn 1031221167Sgnn ring = (__hal_ring_t *) vp->vpath->ringh; 1032221167Sgnn 1033221167Sgnn vxge_assert(ring != NULL); 1034221167Sgnn 1035221167Sgnn#if defined(VXGE_DEBUG_ASSERT) 1036221167Sgnn /* make sure device overwrites the (illegal) t_code on completion */ 1037221167Sgnn rxdp->control_0 |= 1038221167Sgnn VXGE_HAL_RING_RXD_T_CODE(VXGE_HAL_RING_RXD_T_CODE_UNUSED); 1039221167Sgnn#endif 1040221167Sgnn 1041221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST) 1042221167Sgnn vxge_os_spin_lock(&ring->channel.post_lock); 1043221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ) 1044221167Sgnn vxge_os_spin_lock_irq(&ring->channel.post_lock, flags); 1045221167Sgnn#endif 1046221167Sgnn 1047221167Sgnn#if defined(VXGE_DEBUG_ASSERT) && defined(VXGE_HAL_RING_ENFORCE_ORDER) 1048221167Sgnn if (TRUE) { 1049221167Sgnn if (VXGE_HAL_RING_RXD_INDEX(rxdp) != 0) { 1050221167Sgnn vxge_hal_rxd_h prev_rxdh; 1051221167Sgnn __hal_ring_rxd_priv_t *rxdp_priv; 1052221167Sgnn u32 index; 1053221167Sgnn 1054221167Sgnn rxdp_priv = VXGE_HAL_RING_HAL_PRIV(ring, rxdp); 1055221167Sgnn 1056221167Sgnn if (VXGE_HAL_RING_RXD_INDEX(rxdp) == 0) 1057221167Sgnn index = ring->channel.length; 1058221167Sgnn else 1059221167Sgnn index = VXGE_HAL_RING_RXD_INDEX(rxdp) - 1; 1060221167Sgnn 1061221167Sgnn prev_rxdh = ring->channel.dtr_arr[index].dtr; 1062221167Sgnn 1063221167Sgnn if (prev_rxdh != NULL && 1064221167Sgnn (rxdp_priv->dma_offset & (~0xFFF)) != 1065221167Sgnn rxdp_priv->dma_offset) { 1066221167Sgnn vxge_assert((char *) prev_rxdh + 1067221167Sgnn ring->rxd_size == rxdh); 1068221167Sgnn } 1069221167Sgnn } 1070221167Sgnn } 1071221167Sgnn#endif 1072221167Sgnn 1073221167Sgnn __hal_channel_dtr_post(&ring->channel, VXGE_HAL_RING_RXD_INDEX(rxdh)); 1074221167Sgnn 1075221167Sgnn ring->db_byte_count += 1076221167Sgnn VXGE_HAL_RING_HAL_PRIV(ring, rxdh)->db_bytes; 1077221167Sgnn 1078221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST) 1079221167Sgnn vxge_os_spin_unlock(&ring->channel.post_lock); 1080221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ) 1081221167Sgnn vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags); 1082221167Sgnn#endif 1083221167Sgnn 1084221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0", 1085221167Sgnn __FILE__, __func__, __LINE__); 1086221167Sgnn} 1087221167Sgnn 1088221167Sgnn/* 1089221167Sgnn * vxge_hal_ring_rxd_post_post - Process rxd after post. 1090221167Sgnn * @vpath_handle: virtual Path handle. 1091221167Sgnn * @rxdh: Descriptor handle. 1092221167Sgnn * 1093221167Sgnn * Processes rxd after post 1094221167Sgnn */ 1095221167Sgnnvoid 1096221167Sgnnvxge_hal_ring_rxd_post_post( 1097221167Sgnn vxge_hal_vpath_h vpath_handle, 1098221167Sgnn vxge_hal_rxd_h rxdh) 1099221167Sgnn{ 1100221167Sgnn vxge_hal_ring_rxd_1_t *rxdp = (vxge_hal_ring_rxd_1_t *) rxdh; 1101221167Sgnn 1102221167Sgnn#if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING) 1103221167Sgnn __hal_ring_rxd_priv_t *priv; 1104221167Sgnn 1105221167Sgnn#endif 1106221167Sgnn __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; 1107221167Sgnn __hal_device_t *hldev; 1108221167Sgnn __hal_ring_t *ring; 1109221167Sgnn 1110221167Sgnn vxge_assert((vpath_handle != NULL) && (rxdh != NULL)); 1111221167Sgnn 1112221167Sgnn hldev = (__hal_device_t *) vp->vpath->hldev; 1113221167Sgnn 1114221167Sgnn vxge_hal_trace_log_ring("==> %s:%s:%d", 1115221167Sgnn __FILE__, __func__, __LINE__); 1116221167Sgnn 1117221167Sgnn vxge_hal_trace_log_ring( 1118221167Sgnn "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT, 1119221167Sgnn (ptr_t) vpath_handle, (ptr_t) rxdh); 1120221167Sgnn 1121221167Sgnn ring = (__hal_ring_t *) vp->vpath->ringh; 1122221167Sgnn 1123221167Sgnn vxge_assert(ring != NULL); 1124221167Sgnn 1125221167Sgnn /* do POST */ 1126221167Sgnn rxdp->control_0 |= VXGE_HAL_RING_RXD_LIST_OWN_ADAPTER; 1127221167Sgnn 1128221167Sgnn rxdp->control_1 |= VXGE_HAL_RING_RXD_LIST_TAIL_OWN_ADAPTER; 1129221167Sgnn 1130221167Sgnn#if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING) 1131221167Sgnn priv = __hal_ring_rxd_priv(ring, rxdp); 1132221167Sgnn vxge_os_dma_sync(ring->channel.pdev, 1133221167Sgnn priv->dma_handle, 1134221167Sgnn priv->dma_addr, 1135221167Sgnn priv->dma_offset, 1136221167Sgnn ring->rxd_size, 1137221167Sgnn VXGE_OS_DMA_DIR_TODEVICE); 1138221167Sgnn#endif 1139221167Sgnn if (ring->stats->common_stats.usage_cnt > 0) 1140221167Sgnn ring->stats->common_stats.usage_cnt--; 1141221167Sgnn 1142221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0", 1143221167Sgnn __FILE__, __func__, __LINE__); 1144221167Sgnn} 1145221167Sgnn 1146221167Sgnn/* 1147221167Sgnn * vxge_hal_ring_rxd_post - Post descriptor on the ring. 1148221167Sgnn * @vpath_handle: virtual Path handle. 1149221167Sgnn * @rxdh: Descriptor obtained via vxge_hal_ring_rxd_reserve(). 1150221167Sgnn * 1151221167Sgnn * Post descriptor on the ring. 1152221167Sgnn * Prior to posting the descriptor should be filled in accordance with 1153221167Sgnn * Host/X3100 interface specification for a given service (LL, etc.). 1154221167Sgnn * 1155221167Sgnn */ 1156221167Sgnnvoid 1157221167Sgnnvxge_hal_ring_rxd_post( 1158221167Sgnn vxge_hal_vpath_h vpath_handle, 1159221167Sgnn vxge_hal_rxd_h rxdh) 1160221167Sgnn{ 1161221167Sgnn vxge_hal_ring_rxd_1_t *rxdp = (vxge_hal_ring_rxd_1_t *) rxdh; 1162221167Sgnn 1163221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST_IRQ) 1164221167Sgnn unsigned long flags; 1165221167Sgnn#endif 1166221167Sgnn 1167221167Sgnn __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; 1168221167Sgnn __hal_device_t *hldev; 1169221167Sgnn __hal_ring_t *ring; 1170221167Sgnn 1171221167Sgnn vxge_assert((vpath_handle != NULL) && (rxdh != NULL)); 1172221167Sgnn 1173221167Sgnn hldev = (__hal_device_t *) vp->vpath->hldev; 1174221167Sgnn 1175221167Sgnn vxge_hal_trace_log_ring("==> %s:%s:%d", 1176221167Sgnn __FILE__, __func__, __LINE__); 1177221167Sgnn 1178221167Sgnn vxge_hal_trace_log_ring( 1179221167Sgnn "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT, 1180221167Sgnn (ptr_t) vpath_handle, (ptr_t) rxdh); 1181221167Sgnn 1182221167Sgnn ring = (__hal_ring_t *) vp->vpath->ringh; 1183221167Sgnn 1184221167Sgnn vxge_assert(ring != NULL); 1185221167Sgnn 1186221167Sgnn /* Based on Titan HW bugzilla # 3039, we need to reset the tcode */ 1187221167Sgnn rxdp->control_0 = 0; 1188221167Sgnn 1189221167Sgnn#if defined(VXGE_DEBUG_ASSERT) 1190221167Sgnn /* make sure device overwrites the (illegal) t_code on completion */ 1191221167Sgnn rxdp->control_0 |= 1192221167Sgnn VXGE_HAL_RING_RXD_T_CODE(VXGE_HAL_RING_RXD_T_CODE_UNUSED); 1193221167Sgnn#endif 1194221167Sgnn 1195221167Sgnn rxdp->control_1 |= VXGE_HAL_RING_RXD_LIST_TAIL_OWN_ADAPTER; 1196221167Sgnn rxdp->control_0 |= VXGE_HAL_RING_RXD_LIST_OWN_ADAPTER; 1197221167Sgnn 1198221167Sgnn#if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING) 1199221167Sgnn { 1200221167Sgnn __hal_ring_rxd_priv_t *rxdp_temp1; 1201221167Sgnn rxdp_temp1 = VXGE_HAL_RING_HAL_PRIV(ring, rxdp); 1202221167Sgnn vxge_os_dma_sync(ring->channel.pdev, 1203221167Sgnn rxdp_temp1->dma_handle, 1204221167Sgnn rxdp_temp1->dma_addr, 1205221167Sgnn rxdp_temp1->dma_offset, 1206221167Sgnn ring->rxd_size, 1207221167Sgnn VXGE_OS_DMA_DIR_TODEVICE); 1208221167Sgnn } 1209221167Sgnn#endif 1210221167Sgnn 1211221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST) 1212221167Sgnn vxge_os_spin_lock(&ring->channel.post_lock); 1213221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ) 1214221167Sgnn vxge_os_spin_lock_irq(&ring->channel.post_lock, flags); 1215221167Sgnn#endif 1216221167Sgnn 1217221167Sgnn#if defined(VXGE_DEBUG_ASSERT) && defined(VXGE_HAL_RING_ENFORCE_ORDER) 1218221167Sgnn if (TRUE) { 1219221167Sgnn if (VXGE_HAL_RING_RXD_INDEX(rxdp) != 0) { 1220221167Sgnn 1221221167Sgnn vxge_hal_rxd_h prev_rxdh; 1222221167Sgnn __hal_ring_rxd_priv_t *rxdp_temp2; 1223221167Sgnn 1224221167Sgnn rxdp_temp2 = VXGE_HAL_RING_HAL_PRIV(ring, rxdp); 1225221167Sgnn prev_rxdh = 1226221167Sgnn ring->channel.dtr_arr[VXGE_HAL_RING_RXD_INDEX(rxdp) - 1].dtr; 1227221167Sgnn 1228221167Sgnn if (prev_rxdh != NULL && 1229221167Sgnn (rxdp_temp2->dma_offset & (~0xFFF)) != rxdp_temp2->dma_offset) 1230221167Sgnn vxge_assert((char *) prev_rxdh + ring->rxd_size == rxdh); 1231221167Sgnn } 1232221167Sgnn } 1233221167Sgnn#endif 1234221167Sgnn 1235221167Sgnn __hal_channel_dtr_post(&ring->channel, VXGE_HAL_RING_RXD_INDEX(rxdh)); 1236221167Sgnn 1237221167Sgnn ring->db_byte_count += 1238221167Sgnn VXGE_HAL_RING_HAL_PRIV(ring, rxdp)->db_bytes; 1239221167Sgnn 1240221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST) 1241221167Sgnn vxge_os_spin_unlock(&ring->channel.post_lock); 1242221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ) 1243221167Sgnn vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags); 1244221167Sgnn#endif 1245221167Sgnn 1246221167Sgnn if (ring->stats->common_stats.usage_cnt > 0) 1247221167Sgnn ring->stats->common_stats.usage_cnt--; 1248221167Sgnn 1249221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0", 1250221167Sgnn __FILE__, __func__, __LINE__); 1251221167Sgnn} 1252221167Sgnn 1253221167Sgnn/* 1254221167Sgnn * vxge_hal_ring_rxd_post_post_wmb - Process rxd after post with memory barrier 1255221167Sgnn * @vpath_handle: virtual Path handle. 1256221167Sgnn * @rxdh: Descriptor handle. 1257221167Sgnn * 1258221167Sgnn * Processes rxd after post with memory barrier. 1259221167Sgnn */ 1260221167Sgnnvoid 1261221167Sgnnvxge_hal_ring_rxd_post_post_wmb( 1262221167Sgnn vxge_hal_vpath_h vpath_handle, 1263221167Sgnn vxge_hal_rxd_h rxdh) 1264221167Sgnn{ 1265221167Sgnn __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; 1266221167Sgnn __hal_device_t *hldev; 1267221167Sgnn 1268221167Sgnn vxge_assert((vpath_handle != NULL) && (rxdh != NULL)); 1269221167Sgnn 1270221167Sgnn hldev = (__hal_device_t *) vp->vpath->hldev; 1271221167Sgnn 1272221167Sgnn vxge_hal_trace_log_ring("==> %s:%s:%d", 1273221167Sgnn __FILE__, __func__, __LINE__); 1274221167Sgnn 1275221167Sgnn vxge_hal_trace_log_ring( 1276221167Sgnn "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT, 1277221167Sgnn (ptr_t) vpath_handle, (ptr_t) rxdh); 1278221167Sgnn 1279221167Sgnn /* Do memory barrier before changing the ownership */ 1280221167Sgnn vxge_os_wmb(); 1281221167Sgnn 1282221167Sgnn vxge_hal_ring_rxd_post_post(vpath_handle, rxdh); 1283221167Sgnn 1284221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0", 1285221167Sgnn __FILE__, __func__, __LINE__); 1286221167Sgnn} 1287221167Sgnn 1288221167Sgnn/* 1289221167Sgnn * vxge_hal_ring_rxd_post_post_db - Post Doorbell after posting the rxd(s). 1290221167Sgnn * @vpath_handle: virtual Path handle. 1291221167Sgnn * 1292221167Sgnn * Post Doorbell after posting the rxd(s). 1293221167Sgnn */ 1294221167Sgnnvoid 1295221167Sgnnvxge_hal_ring_rxd_post_post_db( 1296221167Sgnn vxge_hal_vpath_h vpath_handle) 1297221167Sgnn{ 1298221167Sgnn __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; 1299221167Sgnn __hal_device_t *hldev; 1300221167Sgnn __hal_ring_t *ring; 1301221167Sgnn 1302221167Sgnn vxge_assert(vpath_handle != NULL); 1303221167Sgnn 1304221167Sgnn hldev = (__hal_device_t *) vp->vpath->hldev; 1305221167Sgnn 1306221167Sgnn ring = (__hal_ring_t *) vp->vpath->ringh; 1307221167Sgnn 1308221167Sgnn vxge_hal_trace_log_ring("==> %s:%s:%d", 1309221167Sgnn __FILE__, __func__, __LINE__); 1310221167Sgnn 1311221167Sgnn vxge_hal_trace_log_ring("vpath_handle = 0x"VXGE_OS_STXFMT, 1312221167Sgnn (ptr_t) vpath_handle); 1313221167Sgnn 1314221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST) 1315221167Sgnn vxge_os_spin_lock(&ring->channel.post_lock); 1316221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ) 1317221167Sgnn vxge_os_spin_lock_irq(&ring->channel.post_lock, flags); 1318221167Sgnn#endif 1319221167Sgnn 1320221167Sgnn if (ring->db_byte_count <= ring->rxd_mem_avail) { 1321221167Sgnn __hal_rxd_db_post(vpath_handle, ring->db_byte_count); 1322221167Sgnn ring->rxd_mem_avail -= ring->db_byte_count; 1323221167Sgnn ring->db_byte_count = 0; 1324221167Sgnn } else { 1325221167Sgnn __hal_rxd_db_post(vpath_handle, ring->rxd_mem_avail); 1326221167Sgnn ring->db_byte_count -= ring->rxd_mem_avail; 1327221167Sgnn ring->rxd_mem_avail = 0; 1328221167Sgnn } 1329221167Sgnn 1330221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST) 1331221167Sgnn vxge_os_spin_unlock(&ring->channel.post_lock); 1332221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ) 1333221167Sgnn vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags); 1334221167Sgnn#endif 1335221167Sgnn 1336221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0", 1337221167Sgnn __FILE__, __func__, __LINE__); 1338221167Sgnn} 1339221167Sgnn 1340221167Sgnn/* 1341221167Sgnn * vxge_hal_ring_is_next_rxd_completed - Check if the next rxd is completed 1342221167Sgnn * @vpath_handle: Virtual Path handle. 1343221167Sgnn * 1344226436Seadler * Checks if the _next_ completed descriptor is in host memory 1345221167Sgnn * 1346221167Sgnn * Returns: VXGE_HAL_OK - success. 1347221167Sgnn * VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors 1348221167Sgnn * are currently available for processing. 1349221167Sgnn */ 1350221167Sgnnvxge_hal_status_e 1351221167Sgnnvxge_hal_ring_is_next_rxd_completed( 1352221167Sgnn vxge_hal_vpath_h vpath_handle) 1353221167Sgnn{ 1354221167Sgnn __hal_ring_t *ring; 1355221167Sgnn vxge_hal_rxd_h rxdh; 1356221167Sgnn vxge_hal_ring_rxd_1_t *rxdp; /* doesn't matter 1, 3 or 5... */ 1357221167Sgnn __hal_device_t *hldev; 1358221167Sgnn vxge_hal_status_e status = VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS; 1359221167Sgnn __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; 1360221167Sgnn 1361221167Sgnn vxge_assert(vpath_handle != NULL); 1362221167Sgnn 1363221167Sgnn hldev = (__hal_device_t *) vp->vpath->hldev; 1364221167Sgnn 1365221167Sgnn vxge_hal_trace_log_ring("==> %s:%s:%d", 1366221167Sgnn __FILE__, __func__, __LINE__); 1367221167Sgnn 1368221167Sgnn vxge_hal_trace_log_ring("vpath_handle = 0x"VXGE_OS_STXFMT, 1369221167Sgnn (ptr_t) vpath_handle); 1370221167Sgnn 1371221167Sgnn ring = (__hal_ring_t *) vp->vpath->ringh; 1372221167Sgnn 1373221167Sgnn vxge_assert(ring != NULL); 1374221167Sgnn 1375221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST) 1376221167Sgnn vxge_os_spin_lock(&ring->channel.post_lock); 1377221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ) 1378221167Sgnn vxge_os_spin_lock_irq(&ring->channel.post_lock, flags); 1379221167Sgnn#endif 1380221167Sgnn 1381221167Sgnn __hal_channel_dtr_try_complete(&ring->channel, &rxdh); 1382221167Sgnn 1383221167Sgnn rxdp = (vxge_hal_ring_rxd_1_t *) rxdh; 1384221167Sgnn 1385221167Sgnn if (rxdp != NULL) { 1386221167Sgnn 1387221167Sgnn /* check whether it is not the end */ 1388221167Sgnn if ((!(rxdp->control_0 & VXGE_HAL_RING_RXD_LIST_OWN_ADAPTER)) && 1389221167Sgnn (!(rxdp->control_1 & 1390221167Sgnn VXGE_HAL_RING_RXD_LIST_TAIL_OWN_ADAPTER))) { 1391221167Sgnn 1392221167Sgnn status = VXGE_HAL_OK; 1393221167Sgnn } 1394221167Sgnn } 1395221167Sgnn 1396221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST) 1397221167Sgnn vxge_os_spin_unlock(&ring->channel.post_lock); 1398221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ) 1399221167Sgnn vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags); 1400221167Sgnn#endif 1401221167Sgnn 1402221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d", 1403221167Sgnn __FILE__, __func__, __LINE__, status); 1404221167Sgnn return (status); 1405221167Sgnn} 1406221167Sgnn 1407221167Sgnn/* 1408221167Sgnn * vxge_hal_ring_rxd_next_completed - Get the _next_ completed descriptor. 1409221167Sgnn * @channelh: Channel handle. 1410221167Sgnn * @rxdh: Descriptor handle. Returned by HAL. 1411221167Sgnn * @rxd_priv: Buffer to return a pointer to the per rxd space allocated 1412221167Sgnn * @t_code: Transfer code, as per X3100 User Guide, 1413221167Sgnn * Receive Descriptor Format. Returned by HAL. 1414221167Sgnn * 1415221167Sgnn * Retrieve the _next_ completed descriptor. 1416221167Sgnn * HAL uses ring callback (*vxge_hal_ring_callback_f) to notifiy 1417221167Sgnn * upper-layer driver (ULD) of new completed descriptors. After that 1418221167Sgnn * the ULD can use vxge_hal_ring_rxd_next_completed to retrieve the rest 1419221167Sgnn * completions (the very first completion is passed by HAL via 1420221167Sgnn * vxge_hal_ring_callback_f). 1421221167Sgnn * 1422221167Sgnn * Implementation-wise, the upper-layer driver is free to call 1423221167Sgnn * vxge_hal_ring_rxd_next_completed either immediately from inside the 1424221167Sgnn * ring callback, or in a deferred fashion and separate (from HAL) 1425221167Sgnn * context. 1426221167Sgnn * 1427221167Sgnn * Non-zero @t_code means failure to fill-in receive buffer(s) 1428221167Sgnn * of the descriptor. 1429221167Sgnn * For instance, parity error detected during the data transfer. 1430221167Sgnn * In this case X3100 will complete the descriptor and indicate 1431221167Sgnn * for the host that the received data is not to be used. 1432221167Sgnn * For details please refer to X3100 User Guide. 1433221167Sgnn * 1434221167Sgnn * Returns: VXGE_HAL_OK - success. 1435221167Sgnn * VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors 1436221167Sgnn * are currently available for processing. 1437221167Sgnn * 1438221167Sgnn * See also: vxge_hal_ring_callback_f {}, 1439221167Sgnn * vxge_hal_fifo_rxd_next_completed(), vxge_hal_status_e {}. 1440221167Sgnn */ 1441221167Sgnnvxge_hal_status_e 1442221167Sgnnvxge_hal_ring_rxd_next_completed( 1443221167Sgnn vxge_hal_vpath_h vpath_handle, 1444221167Sgnn vxge_hal_rxd_h *rxdh, 1445221167Sgnn void **rxd_priv, 1446221167Sgnn u8 *t_code) 1447221167Sgnn{ 1448221167Sgnn __hal_ring_t *ring; 1449221167Sgnn vxge_hal_ring_rxd_5_t *rxdp; /* doesn't matter 1, 3 or 5... */ 1450221167Sgnn#if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING) 1451221167Sgnn __hal_ring_rxd_priv_t *priv; 1452221167Sgnn#endif 1453221167Sgnn __hal_device_t *hldev; 1454221167Sgnn vxge_hal_status_e status = VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS; 1455221167Sgnn __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; 1456221167Sgnn u64 own, control_0, control_1; 1457221167Sgnn 1458221167Sgnn vxge_assert((vpath_handle != NULL) && (rxdh != NULL) && 1459221167Sgnn (rxd_priv != NULL) && (t_code != NULL)); 1460221167Sgnn 1461221167Sgnn hldev = (__hal_device_t *) vp->vpath->hldev; 1462221167Sgnn 1463221167Sgnn vxge_hal_trace_log_ring("==> %s:%s:%d", 1464221167Sgnn __FILE__, __func__, __LINE__); 1465221167Sgnn 1466221167Sgnn vxge_hal_trace_log_ring( 1467221167Sgnn "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT", " 1468221167Sgnn "rxd_priv = 0x"VXGE_OS_STXFMT", t_code = 0x"VXGE_OS_STXFMT, 1469221167Sgnn (ptr_t) vpath_handle, (ptr_t) rxdh, (ptr_t) rxd_priv, 1470221167Sgnn (ptr_t) t_code); 1471221167Sgnn 1472221167Sgnn ring = (__hal_ring_t *) vp->vpath->ringh; 1473221167Sgnn 1474221167Sgnn vxge_assert(ring != NULL); 1475221167Sgnn 1476221167Sgnn *rxdh = 0; 1477221167Sgnn *rxd_priv = NULL; 1478221167Sgnn 1479221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST) 1480221167Sgnn vxge_os_spin_lock(&ring->channel.post_lock); 1481221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ) 1482221167Sgnn vxge_os_spin_lock_irq(&ring->channel.post_lock, flags); 1483221167Sgnn#endif 1484221167Sgnn 1485221167Sgnn __hal_channel_dtr_try_complete(&ring->channel, rxdh); 1486221167Sgnn 1487221167Sgnn rxdp = (vxge_hal_ring_rxd_5_t *)*rxdh; 1488221167Sgnn if (rxdp != NULL) { 1489221167Sgnn 1490221167Sgnn#if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_RXD_STREAMING) 1491221167Sgnn /* 1492221167Sgnn * Note: 24 bytes at most means: 1493221167Sgnn * - Control_3 in case of 5-buffer mode 1494221167Sgnn * - Control_1 and Control_2 1495221167Sgnn * 1496221167Sgnn * This is the only length needs to be invalidated 1497221167Sgnn * type of channels. 1498221167Sgnn */ 1499221167Sgnn priv = __hal_ring_rxd_priv(ring, rxdp); 1500221167Sgnn vxge_os_dma_sync(ring->channel.pdev, 1501221167Sgnn priv->dma_handle, 1502221167Sgnn priv->dma_addr, 1503221167Sgnn priv->dma_offset, 1504221167Sgnn 24, 1505221167Sgnn VXGE_OS_DMA_DIR_FROMDEVICE); 1506221167Sgnn#endif 1507221167Sgnn *t_code = (u8) VXGE_HAL_RING_RXD_T_CODE_GET(rxdp->control_0); 1508221167Sgnn 1509221167Sgnn control_0 = rxdp->control_0; 1510221167Sgnn control_1 = rxdp->control_1; 1511221167Sgnn own = control_0 & VXGE_HAL_RING_RXD_LIST_OWN_ADAPTER; 1512221167Sgnn 1513221167Sgnn /* check whether it is not the end */ 1514221167Sgnn if ((!own && !(control_1 & VXGE_HAL_RING_RXD_LIST_TAIL_OWN_ADAPTER)) || 1515221167Sgnn (*t_code == VXGE_HAL_RING_RXD_T_CODE_FRM_DROP)) { 1516221167Sgnn 1517221167Sgnn#ifndef VXGE_HAL_IRQ_POLLING 1518221167Sgnn if (++ring->cmpl_cnt > ring->indicate_max_pkts) { 1519221167Sgnn /* 1520221167Sgnn * reset it. since we don't want to return 1521221167Sgnn * garbage to the ULD 1522221167Sgnn */ 1523221167Sgnn *rxdh = 0; 1524221167Sgnn status = VXGE_HAL_COMPLETIONS_REMAIN; 1525221167Sgnn } else { 1526221167Sgnn#endif 1527221167Sgnn __hal_channel_dtr_complete(&ring->channel); 1528221167Sgnn 1529221167Sgnn *rxd_priv = VXGE_HAL_RING_ULD_PRIV(ring, rxdp); 1530221167Sgnn 1531221167Sgnn ring->rxd_mem_avail += 1532221167Sgnn (VXGE_HAL_RING_HAL_PRIV(ring, rxdp))->db_bytes; 1533221167Sgnn 1534221167Sgnn ring->stats->common_stats.usage_cnt++; 1535221167Sgnn if (ring->stats->common_stats.usage_max < 1536221167Sgnn ring->stats->common_stats.usage_cnt) 1537221167Sgnn ring->stats->common_stats.usage_max = 1538221167Sgnn ring->stats->common_stats.usage_cnt; 1539221167Sgnn 1540221167Sgnn switch (ring->buffer_mode) { 1541221167Sgnn case VXGE_HAL_RING_RXD_BUFFER_MODE_1: 1542221167Sgnn ring->channel.poll_bytes += 1543221167Sgnn (u32) VXGE_HAL_RING_RXD_1_BUFFER0_SIZE_GET( 1544221167Sgnn rxdp->control_1); 1545221167Sgnn break; 1546221167Sgnn case VXGE_HAL_RING_RXD_BUFFER_MODE_3: 1547221167Sgnn ring->channel.poll_bytes += 1548221167Sgnn (u32) VXGE_HAL_RING_RXD_3_BUFFER0_SIZE_GET( 1549221167Sgnn rxdp->control_1) + 1550221167Sgnn (u32) VXGE_HAL_RING_RXD_3_BUFFER1_SIZE_GET( 1551221167Sgnn rxdp->control_1) + 1552221167Sgnn (u32) VXGE_HAL_RING_RXD_3_BUFFER2_SIZE_GET( 1553221167Sgnn rxdp->control_1); 1554221167Sgnn break; 1555221167Sgnn case VXGE_HAL_RING_RXD_BUFFER_MODE_5: 1556221167Sgnn ring->channel.poll_bytes += 1557221167Sgnn (u32) VXGE_HAL_RING_RXD_5_BUFFER0_SIZE_GET( 1558221167Sgnn rxdp->control_1) + 1559221167Sgnn (u32) VXGE_HAL_RING_RXD_5_BUFFER1_SIZE_GET( 1560221167Sgnn rxdp->control_1) + 1561221167Sgnn (u32) VXGE_HAL_RING_RXD_5_BUFFER2_SIZE_GET( 1562221167Sgnn rxdp->control_1) + 1563221167Sgnn (u32) VXGE_HAL_RING_RXD_5_BUFFER3_SIZE_GET( 1564221167Sgnn rxdp->control_2) + 1565221167Sgnn (u32) VXGE_HAL_RING_RXD_5_BUFFER4_SIZE_GET( 1566221167Sgnn rxdp->control_2); 1567221167Sgnn break; 1568221167Sgnn } 1569221167Sgnn 1570221167Sgnn status = VXGE_HAL_OK; 1571221167Sgnn#ifndef VXGE_HAL_IRQ_POLLING 1572221167Sgnn } 1573221167Sgnn#endif 1574221167Sgnn } 1575221167Sgnn } 1576221167Sgnn 1577221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST) 1578221167Sgnn vxge_os_spin_unlock(&ring->channel.post_lock); 1579221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ) 1580221167Sgnn vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags); 1581221167Sgnn#endif 1582221167Sgnn 1583221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d", 1584221167Sgnn __FILE__, __func__, __LINE__, status); 1585221167Sgnn return (status); 1586221167Sgnn} 1587221167Sgnn 1588221167Sgnn 1589221167Sgnn/* 1590221167Sgnn * vxge_hal_ring_handle_tcode - Handle transfer code. 1591221167Sgnn * @vpath_handle: Virtual Path handle. 1592221167Sgnn * @rxdh: Descriptor handle. 1593221167Sgnn * @t_code: One of the enumerated (and documented in the X3100 user guide) 1594221167Sgnn * "transfer codes". 1595221167Sgnn * 1596221167Sgnn * Handle descriptor's transfer code. The latter comes with each completed 1597221167Sgnn * descriptor. 1598221167Sgnn * 1599221167Sgnn * Returns: one of the vxge_hal_status_e {} enumerated types. 1600221167Sgnn * VXGE_HAL_OK - for success. 1601221167Sgnn * VXGE_HAL_ERR_CRITICAL - when encounters critical error. 1602221167Sgnn */ 1603221167Sgnnvxge_hal_status_e 1604221167Sgnnvxge_hal_ring_handle_tcode( 1605221167Sgnn vxge_hal_vpath_h vpath_handle, 1606221167Sgnn vxge_hal_rxd_h rxdh, 1607221167Sgnn u8 t_code) 1608221167Sgnn{ 1609221167Sgnn __hal_device_t *hldev; 1610221167Sgnn __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; 1611221167Sgnn 1612221167Sgnn vxge_assert((vpath_handle != NULL) && (rxdh != NULL)); 1613221167Sgnn 1614221167Sgnn hldev = (__hal_device_t *) vp->vpath->hldev; 1615221167Sgnn 1616221167Sgnn vxge_hal_trace_log_ring("==> %s:%s:%d", 1617221167Sgnn __FILE__, __func__, __LINE__); 1618221167Sgnn 1619221167Sgnn vxge_hal_trace_log_ring( 1620221167Sgnn "vpath_handle = 0x"VXGE_OS_STXFMT", " 1621221167Sgnn "rxdh = 0x"VXGE_OS_STXFMT", t_code = 0x%d", 1622221167Sgnn (ptr_t) vpath_handle, (ptr_t) rxdh, t_code); 1623221167Sgnn 1624221167Sgnn switch (t_code) { 1625221167Sgnn case 0x0: 1626221167Sgnn /* 0x0: Transfer ok. */ 1627221167Sgnn break; 1628221167Sgnn case 0x1: 1629221167Sgnn /* 1630221167Sgnn * 0x1: Layer 3 checksum presentation 1631221167Sgnn * configuration mismatch. 1632221167Sgnn */ 1633221167Sgnn break; 1634221167Sgnn case 0x2: 1635221167Sgnn /* 1636221167Sgnn * 0x2: Layer 4 checksum presentation 1637221167Sgnn * configuration mismatch. 1638221167Sgnn */ 1639221167Sgnn break; 1640221167Sgnn case 0x3: 1641221167Sgnn /* 1642221167Sgnn * 0x3: Layer 3 and Layer 4 checksum 1643221167Sgnn * presentation configuration mismatch. 1644221167Sgnn */ 1645221167Sgnn break; 1646221167Sgnn case 0x4: 1647221167Sgnn /* 0x4: Reserved. */ 1648221167Sgnn break; 1649221167Sgnn case 0x5: 1650221167Sgnn /* 1651221167Sgnn * 0x5: Layer 3 error unparseable packet, 1652221167Sgnn * such as unknown IPv6 header. 1653221167Sgnn */ 1654221167Sgnn break; 1655221167Sgnn case 0x6: 1656221167Sgnn /* 1657221167Sgnn * 0x6: Layer 2 error frame integrity 1658221167Sgnn * error, such as FCS or ECC). 1659221167Sgnn */ 1660221167Sgnn break; 1661221167Sgnn case 0x7: 1662221167Sgnn /* 1663221167Sgnn * 0x7: Buffer size error the RxD buffer(s) 1664221167Sgnn * were not appropriately sized and 1665221167Sgnn * data loss occurred. 1666221167Sgnn */ 1667221167Sgnn break; 1668221167Sgnn case 0x8: 1669221167Sgnn /* 0x8: Internal ECC error RxD corrupted. */ 1670221167Sgnn __hal_device_handle_error(vp->vpath->hldev, 1671221167Sgnn vp->vpath->vp_id, VXGE_HAL_EVENT_ECCERR); 1672221167Sgnn break; 1673221167Sgnn case 0x9: 1674221167Sgnn /* 1675221167Sgnn * 0x9: Benign overflow the contents of 1676221167Sgnn * Segment1 exceeded the capacity of 1677221167Sgnn * Buffer1 and the remainder was placed 1678221167Sgnn * in Buffer2. Segment2 now starts in 1679221167Sgnn * Buffer3. No data loss or errors occurred. 1680221167Sgnn */ 1681221167Sgnn break; 1682221167Sgnn case 0xA: 1683221167Sgnn /* 1684221167Sgnn * 0xA: Buffer size 0 one of the RxDs 1685221167Sgnn * assigned buffers has a size of 0 bytes. 1686221167Sgnn */ 1687221167Sgnn break; 1688221167Sgnn case 0xB: 1689221167Sgnn /* 0xB: Reserved. */ 1690221167Sgnn break; 1691221167Sgnn case 0xC: 1692221167Sgnn /* 1693221167Sgnn * 0xC: Frame dropped either due to 1694221167Sgnn * VPath Reset or because of a VPIN mismatch. 1695221167Sgnn */ 1696221167Sgnn break; 1697221167Sgnn case 0xD: 1698221167Sgnn /* 0xD: Reserved. */ 1699221167Sgnn break; 1700221167Sgnn case 0xE: 1701221167Sgnn /* 0xE: Reserved. */ 1702221167Sgnn break; 1703221167Sgnn case 0xF: 1704221167Sgnn /* 1705221167Sgnn * 0xF: Multiple errors more than one 1706221167Sgnn * transfer code condition occurred. 1707221167Sgnn */ 1708221167Sgnn break; 1709221167Sgnn default: 1710221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d", 1711221167Sgnn __FILE__, __func__, __LINE__, VXGE_HAL_ERR_INVALID_TCODE); 1712221167Sgnn return (VXGE_HAL_ERR_INVALID_TCODE); 1713221167Sgnn } 1714221167Sgnn 1715221167Sgnn vp->vpath->sw_stats->ring_stats.rxd_t_code_err_cnt[t_code]++; 1716221167Sgnn 1717221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: %d", 1718221167Sgnn __FILE__, __func__, __LINE__, VXGE_HAL_OK); 1719221167Sgnn return (VXGE_HAL_OK); 1720221167Sgnn} 1721221167Sgnn 1722221167Sgnn 1723221167Sgnn/* 1724221167Sgnn * vxge_hal_ring_rxd_private_get - Get ULD private per-descriptor data. 1725221167Sgnn * @vpath_handle: Virtual Path handle. 1726221167Sgnn * @rxdh: Descriptor handle. 1727221167Sgnn * 1728221167Sgnn * Returns: private ULD info associated with the descriptor. 1729221167Sgnn * ULD requests per-descriptor space via vxge_hal_ring_attr. 1730221167Sgnn * 1731221167Sgnn */ 1732221167Sgnnvoid * 1733221167Sgnnvxge_hal_ring_rxd_private_get( 1734221167Sgnn vxge_hal_vpath_h vpath_handle, 1735221167Sgnn vxge_hal_rxd_h rxdh) 1736221167Sgnn{ 1737221167Sgnn __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; 1738221167Sgnn 1739221167Sgnn return (VXGE_HAL_RING_ULD_PRIV( 1740221167Sgnn ((__hal_ring_t *) vp->vpath->ringh), rxdh)); 1741221167Sgnn 1742221167Sgnn} 1743221167Sgnn 1744221167Sgnn/* 1745221167Sgnn * vxge_hal_ring_rxd_free - Free descriptor. 1746221167Sgnn * @vpath_handle: Virtual Path handle. 1747221167Sgnn * @rxdh: Descriptor handle. 1748221167Sgnn * 1749221167Sgnn * Free the reserved descriptor. This operation is "symmetrical" to 1750221167Sgnn * vxge_hal_ring_rxd_reserve. The "free-ing" completes the descriptor's 1751221167Sgnn * lifecycle. 1752221167Sgnn * 1753221167Sgnn * After free-ing (see vxge_hal_ring_rxd_free()) the descriptor again can 1754221167Sgnn * be: 1755221167Sgnn * 1756221167Sgnn * - reserved (vxge_hal_ring_rxd_reserve); 1757221167Sgnn * 1758221167Sgnn * - posted (vxge_hal_ring_rxd_post); 1759221167Sgnn * 1760221167Sgnn * - completed (vxge_hal_ring_rxd_next_completed); 1761221167Sgnn * 1762221167Sgnn * - and recycled again (vxge_hal_ring_rxd_free). 1763221167Sgnn * 1764221167Sgnn * For alternative state transitions and more details please refer to 1765221167Sgnn * the design doc. 1766221167Sgnn * 1767221167Sgnn */ 1768221167Sgnnvoid 1769221167Sgnnvxge_hal_ring_rxd_free( 1770221167Sgnn vxge_hal_vpath_h vpath_handle, 1771221167Sgnn vxge_hal_rxd_h rxdh) 1772221167Sgnn{ 1773221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST_IRQ) 1774221167Sgnn unsigned long flags; 1775221167Sgnn 1776221167Sgnn#endif 1777221167Sgnn __hal_ring_t *ring; 1778221167Sgnn __hal_device_t *hldev; 1779221167Sgnn __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; 1780221167Sgnn 1781221167Sgnn vxge_assert((vpath_handle != NULL) && (rxdh != NULL)); 1782221167Sgnn 1783221167Sgnn hldev = (__hal_device_t *) vp->vpath->hldev; 1784221167Sgnn 1785221167Sgnn vxge_hal_trace_log_ring("==> %s:%s:%d", 1786221167Sgnn __FILE__, __func__, __LINE__); 1787221167Sgnn 1788221167Sgnn vxge_hal_trace_log_ring( 1789221167Sgnn "vpath_handle = 0x"VXGE_OS_STXFMT", rxdh = 0x"VXGE_OS_STXFMT, 1790221167Sgnn (ptr_t) vpath_handle, (ptr_t) rxdh); 1791221167Sgnn 1792221167Sgnn ring = (__hal_ring_t *) vp->vpath->ringh; 1793221167Sgnn 1794221167Sgnn vxge_assert(ring != NULL); 1795221167Sgnn 1796221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST) 1797221167Sgnn vxge_os_spin_lock(&ring->channel.post_lock); 1798221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ) 1799221167Sgnn vxge_os_spin_lock_irq(&ring->channel.post_lock, flags); 1800221167Sgnn#endif 1801221167Sgnn 1802221167Sgnn __hal_channel_dtr_free(&ring->channel, VXGE_HAL_RING_RXD_INDEX(rxdh)); 1803221167Sgnn#if defined(VXGE_OS_MEMORY_CHECK) 1804221167Sgnn VXGE_HAL_RING_HAL_PRIV(ring, rxdh)->allocated = 0; 1805221167Sgnn#endif 1806221167Sgnn 1807221167Sgnn#if defined(VXGE_HAL_RX_MULTI_POST) 1808221167Sgnn vxge_os_spin_unlock(&ring->channel.post_lock); 1809221167Sgnn#elif defined(VXGE_HAL_RX_MULTI_POST_IRQ) 1810221167Sgnn vxge_os_spin_unlock_irq(&ring->channel.post_lock, flags); 1811221167Sgnn#endif 1812221167Sgnn 1813221167Sgnn vxge_hal_trace_log_ring("<== %s:%s:%d Result: 0", 1814221167Sgnn __FILE__, __func__, __LINE__); 1815221167Sgnn} 1816