Lines Matching refs:ring

29 #include <dev/nxge/include/xgehal-ring.h>
81 xge_hal_ring_t *ring, int from, int to)
110 xge_os_dma_sync(ring->channel.pdev,
135 xge_hal_ring_t *ring = (xge_hal_ring_t *)userdata;
138 xge_assert(ring);
142 for (i=ring->rxds_per_block-1; i>=0; i--) {
146 int reserve_index = index * ring->rxds_per_block + i;
149 ring->reserved_rxds_arr[reserve_index] = (char *)item +
150 (ring->rxds_per_block - 1 - i) * ring->rxd_size;
160 ring->reserved_rxds_arr[reserve_index];
162 ((char*)rxdblock_priv + ring->rxd_priv_size * i);
174 if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
183 ring->rxds_per_block + i);
202 __hal_ring_rxdblock_link(mempoolh, ring, 0, index);
207 __hal_ring_rxdblock_link(mempoolh, ring, index, index-1);
249 xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
256 hldev = (xge_hal_device_t *)ring->channel.devh;
257 ring->config = &hldev->config.ring;
258 queue = &ring->config->queue[attr->post_qid];
259 ring->indicate_max_pkts = queue->indicate_max_pkts;
260 ring->buffer_mode = queue->buffer_mode;
265 xge_os_spin_lock_init(&ring->channel.reserve_lock, hldev->pdev);
267 xge_os_spin_lock_init_irq(&ring->channel.reserve_lock, hldev->irqh);
270 xge_os_spin_lock_init(&ring->channel.post_lock, hldev->pdev);
272 xge_os_spin_lock_init_irq(&ring->channel.post_lock, hldev->irqh);
275 ring->rxd_size = XGE_HAL_RING_RXD_SIZEOF(queue->buffer_mode);
276 ring->rxd_priv_size =
281 ring->rxds_per_block = XGE_HAL_RING_RXDS_PER_BLOCK(queue->buffer_mode);
284 ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
286 ring->reserved_rxds_arr = (void **) xge_os_malloc(ring->channel.pdev,
287 sizeof(void*) * queue->max * ring->rxds_per_block);
289 if (ring->reserved_rxds_arr == NULL) {
294 ring->mempool = __hal_mempool_create(
296 ring->config->memblock_size,
298 ring->rxdblock_priv_size,
302 ring);
303 if (ring->mempool == NULL) {
310 ring->reserved_rxds_arr,
311 queue->initial * ring->rxds_per_block,
312 queue->max * ring->rxds_per_block,
313 0 /* no threshold for ring! */);
320 xge_assert(ring->reserved_rxds_arr[0] ==
321 (char *)ring->mempool->items_arr[0] +
322 (ring->rxds_per_block * ring->rxd_size - ring->rxd_size));
331 if (ring->channel.dtr_init) {
343 ring->channel.usage_cnt = 0;
351 xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
355 xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh;
358 xge_assert(ring->channel.pdev);
360 queue = &ring->config->queue[ring->channel.post_qid];
362 if (ring->mempool) {
363 __hal_mempool_destroy(ring->mempool);
366 if (ring->reserved_rxds_arr) {
367 xge_os_free(ring->channel.pdev,
368 ring->reserved_rxds_arr,
369 sizeof(void*) * queue->max * ring->rxds_per_block);
375 xge_os_spin_lock_destroy(&ring->channel.reserve_lock, hldev->pdev);
377 xge_os_spin_lock_destroy_irq(&ring->channel.reserve_lock, hldev->pdev);
380 xge_os_spin_lock_destroy(&ring->channel.post_lock, hldev->pdev);
382 xge_os_spin_lock_destroy_irq(&ring->channel.post_lock, hldev->pdev);
389 xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
390 xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh;
398 xge_assert(ring);
399 xge_assert(ring->channel.pdev);
401 ((xge_hal_device_t *)ring->channel.devh)->bar0;
403 queue = &ring->config->queue[ring->channel.post_qid];
412 first_block = __hal_mempool_item(ring->mempool, block_num - 1);
413 val64 = __hal_ring_item_dma_addr(ring->mempool,
415 xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
416 val64, &bar0->prc_rxd0_n[ring->channel.post_qid]);
418 xge_debug_ring(XGE_TRACE, "ring%d PRC DMA addr 0x"XGE_OS_LLXFMT" initialized",
419 ring->channel.post_qid, (unsigned long long)val64);
421 val64 = xge_os_pio_mem_read64(ring->channel.pdev,
422 ring->channel.regh0, &bar0->prc_ctrl_n[ring->channel.post_qid]);
445 xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
446 val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
449 val64 = xge_os_pio_mem_read64(ring->channel.pdev,
450 ring->channel.regh0, &bar0->rx_pa_cfg);
451 val64 |= XGE_HAL_RX_PA_CFG_SCATTER_MODE(ring->config->scatter_mode);
455 val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(ring->config->strip_vlan_tag);
457 xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
460 xge_debug_ring(XGE_TRACE, "ring%d enabled in buffer_mode %d",
461 ring->channel.post_qid, queue->buffer_mode);
467 xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
471 xge_assert(ring);
472 xge_assert(ring->channel.pdev);
474 ((xge_hal_device_t *)ring->channel.devh)->bar0;
476 val64 = xge_os_pio_mem_read64(ring->channel.pdev,
477 ring->channel.regh0,
478 &bar0->prc_ctrl_n[ring->channel.post_qid]);
480 xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
481 val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
496 if (!hldev->config.ring.queue[i].configured)
498 val64 |= vBIT(hldev->config.ring.queue[i].priority,
506 /* Configuring ring queues according to per-ring configuration */
509 if (!hldev->config.ring.queue[i].configured)
511 val64 |= vBIT(hldev->config.ring.queue[i].dram_size_mb,(i*8),8);
532 if (!hldev->config.ring.queue[i].configured)
534 if (!hldev->config.ring.queue[i].rth_en)
552 if (hldev->config.ring.queue[i].configured)
610 if (!hldev->config.ring.queue[i].configured ||
611 !hldev->config.ring.queue[i].intr_vector ||
629 hldev->config.ring.queue[i].intr_vector);
633 xge_debug_ring(XGE_TRACE, "%s", "ring channels initialized");
644 if (!hldev->config.ring.queue[i].configured)
646 if (hldev->config.ring.queue[i].max_frm_len !=
650 hldev->config.ring.queue[i].max_frm_len),