Lines Matching refs:ring

49 				 struct mlx4_en_rx_ring *ring,
53 ((struct mlx4_en_rx_desc *)ring->buf) + index;
61 * If the number of used fragments does not fill up the ring
74 mlx4_en_alloc_mbuf(struct mlx4_en_rx_ring *ring)
79 mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ring->rx_mb_size);
81 mb->m_pkthdr.len = mb->m_len = ring->rx_mb_size;
92 if (mb_head->m_pkthdr.len >= ring->rx_mb_size)
110 mlx4_en_alloc_buf(struct mlx4_en_rx_ring *ring, struct mlx4_en_rx_desc *rx_desc,
123 if (unlikely(ring->spare.mbuf == NULL)) {
124 mb = mlx4_en_alloc_mbuf(ring);
132 err = -bus_dmamap_load_mbuf_sg(ring->dma_tag, ring->spare.dma_map,
133 mb, ring->spare.segs, &nsegs, BUS_DMA_NOWAIT);
140 ring->spare.mbuf = mb;
145 ring->spare.segs[i].ds_addr = 0;
146 ring->spare.segs[i].ds_len = 0;
149 bus_dmamap_sync(ring->dma_tag, ring->spare.dma_map,
155 bus_dmamap_sync(ring->dma_tag, mb_list->dma_map,
157 bus_dmamap_unload(ring->dma_tag, mb_list->dma_map);
160 mb = mlx4_en_alloc_mbuf(ring);
167 err = -bus_dmamap_load_mbuf_sg(ring->dma_tag, mb_list->dma_map,
179 rx_desc->data[i].lkey = ring->rx_mr_key_be;
190 bus_dmamap_sync(ring->dma_tag, mb_list->dma_map, BUS_DMASYNC_PREREAD);
196 mb_list->dma_map = ring->spare.dma_map;
197 ring->spare.dma_map = map;
200 mb_list->mbuf = ring->spare.mbuf;
201 ring->spare.mbuf = NULL;
205 rx_desc->data[0].addr = cpu_to_be64(ring->spare.segs[0].ds_addr);
208 if (ring->spare.segs[i].ds_len != 0) {
209 rx_desc->data[i].byte_count = cpu_to_be32(ring->spare.segs[i].ds_len);
210 rx_desc->data[i].lkey = ring->rx_mr_key_be;
211 rx_desc->data[i].addr = cpu_to_be64(ring->spare.segs[i].ds_addr);
223 mlx4_en_free_buf(struct mlx4_en_rx_ring *ring, struct mlx4_en_rx_mbuf *mb_list)
226 bus_dmamap_sync(ring->dma_tag, map, BUS_DMASYNC_POSTREAD);
227 bus_dmamap_unload(ring->dma_tag, map);
234 struct mlx4_en_rx_ring *ring, int index)
237 ((struct mlx4_en_rx_desc *)ring->buf) + index;
238 struct mlx4_en_rx_mbuf *mb_list = ring->mbuf + index;
242 if (mlx4_en_alloc_buf(ring, rx_desc, mb_list)) {
250 mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
252 *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
257 struct mlx4_en_rx_ring *ring;
265 ring = priv->rx_ring[ring_ind];
267 err = mlx4_en_prepare_rx_desc(priv, ring,
268 ring->actual_size);
270 if (ring->actual_size == 0) {
276 rounddown_pow_of_two(ring->actual_size);
278 "reducing ring size to %d\n",
279 ring->actual_size, new_size);
283 ring->actual_size++;
284 ring->prod++;
291 ring = priv->rx_ring[ring_ind];
292 while (ring->actual_size > new_size) {
293 ring->actual_size--;
294 ring->prod--;
295 mlx4_en_free_buf(ring,
296 ring->mbuf + ring->actual_size);
304 struct mlx4_en_rx_ring *ring)
309 ring->cons, ring->prod);
312 BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
313 while (ring->cons != ring->prod) {
314 index = ring->cons & ring->size_mask;
316 mlx4_en_free_buf(ring, ring->mbuf + index);
317 ++ring->cons;
370 struct mlx4_en_rx_ring *ring;
375 ring = kzalloc(sizeof(struct mlx4_en_rx_ring), GFP_KERNEL);
376 if (!ring) {
377 en_err(priv, "Failed to allocate RX ring structure\n");
394 &ring->dma_tag))) {
399 ring->prod = 0;
400 ring->cons = 0;
401 ring->size = size;
402 ring->size_mask = size - 1;
404 ring->log_stride = ilog2(sizeof(struct mlx4_en_rx_desc));
405 ring->buf_size = (ring->size * sizeof(struct mlx4_en_rx_desc)) + TXBB_SIZE;
409 ring->mbuf = kzalloc(tmp, GFP_KERNEL);
410 if (ring->mbuf == NULL) {
415 err = -bus_dmamap_create(ring->dma_tag, 0, &ring->spare.dma_map);
420 err = -bus_dmamap_create(ring->dma_tag, 0,
421 &ring->mbuf[x].dma_map);
424 bus_dmamap_destroy(ring->dma_tag,
425 ring->mbuf[x].dma_map);
429 en_dbg(DRV, priv, "Allocated MBUF ring at addr:%p size:%d\n",
430 ring->mbuf, tmp);
432 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
433 ring->buf_size, 2 * PAGE_SIZE);
437 err = mlx4_en_map_buffer(&ring->wqres.buf);
442 ring->buf = ring->wqres.buf.direct.buf;
443 *pring = ring;
447 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
450 bus_dmamap_destroy(ring->dma_tag,
451 ring->mbuf[x].dma_map);
453 bus_dmamap_destroy(ring->dma_tag, ring->spare.dma_map);
455 vfree(ring->mbuf);
457 bus_dma_tag_destroy(ring->dma_tag);
459 kfree(ring);
465 struct mlx4_en_rx_ring *ring;
473 ring = priv->rx_ring[ring_ind];
475 ring->prod = 0;
476 ring->cons = 0;
477 ring->actual_size = 0;
478 ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
479 ring->rx_mb_size = priv->rx_mb_size;
483 __be32 *ptr = (__be32 *)ring->buf;
487 ring->buf += TXBB_SIZE;
490 ring->log_stride = ilog2(sizeof(struct mlx4_en_rx_desc));
491 ring->buf_size = ring->size * sizeof(struct mlx4_en_rx_desc);
493 memset(ring->buf, 0, ring->buf_size);
494 mlx4_en_update_rx_prod_db(ring);
498 for (i = 0; i < ring->size; i++)
499 mlx4_en_init_rx_desc(priv, ring, i);
501 ring->rx_mr_key_be = cpu_to_be32(priv->mdev->mr.key);
506 if (tcp_lro_init(&ring->lro))
509 ring->lro.ifp = priv->dev;
520 ring = priv->rx_ring[ring_ind];
522 ring->size_mask = ring->actual_size - 1;
523 mlx4_en_update_rx_prod_db(ring);
535 ring = priv->rx_ring[ring_ind];
537 ring->buf -= TXBB_SIZE;
550 struct mlx4_en_rx_ring *ring = *pring;
553 mlx4_en_unmap_buffer(&ring->wqres.buf);
554 mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * sizeof(struct mlx4_en_rx_desc) + TXBB_SIZE);
556 bus_dmamap_destroy(ring->dma_tag, ring->mbuf[x].dma_map);
558 if (ring->spare.mbuf != NULL) {
559 bus_dmamap_sync(ring->dma_tag, ring->spare.dma_map,
561 bus_dmamap_unload(ring->dma_tag, ring->spare.dma_map);
562 m_freem(ring->spare.mbuf);
564 bus_dmamap_destroy(ring->dma_tag, ring->spare.dma_map);
565 vfree(ring->mbuf);
566 bus_dma_tag_destroy(ring->dma_tag);
567 kfree(ring);
570 mlx4_en_cleanup_filters(priv, ring);
575 struct mlx4_en_rx_ring *ring)
578 tcp_lro_free(&ring->lro);
580 mlx4_en_free_rx_buf(priv, ring);
582 ring->buf -= TXBB_SIZE;
623 mlx4_en_rx_mb(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
642 bus_dmamap_sync(ring->dma_tag, mb_list->dma_map,
654 if (mlx4_en_alloc_buf(ring, rx_desc, mb_list))
742 struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
752 u32 size_mask = ring->size_mask;
769 mb_list = ring->mbuf + index;
770 rx_desc = ((struct mlx4_en_rx_desc *)ring->buf) + index;
784 length -= ring->fcs_del;
786 mb = mlx4_en_rx_mb(priv, ring, rx_desc, mb_list, length);
788 ring->errors++;
792 ring->bytes += length;
793 ring->packets++;
827 if (ring->lro.lro_cnt != 0 &&
828 tcp_lro_rx(&ring->lro, mb, 0) == 0)
853 tcp_lro_flush_all(&ring->lro);
859 ring->cons = mcq->cons_index;
860 ring->prod += polled; /* Polled descriptors were realocated in place */
861 mlx4_en_update_rx_prod_db(ring);
917 struct mlx4_en_rx_ring *ring,
939 mlx4_en_fill_qp_context(priv, ring->actual_size, sizeof(struct mlx4_en_rx_desc), 0, 0,
940 qpn, ring->cqn, -1, context);
941 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
946 ring->fcs_del = ETH_FCS_LEN;
948 ring->fcs_del = 0;
950 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
955 mlx4_en_update_rx_prod_db(ring);