Lines Matching refs:ring

60 	struct mlx4_en_tx_ring *ring;
65 ring = kzalloc_node(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL, node);
66 if (!ring) {
67 ring = kzalloc(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL);
68 if (!ring) {
69 en_err(priv, "Failed allocating TX ring\n");
87 &ring->dma_tag)))
90 ring->size = size;
91 ring->size_mask = size - 1;
92 ring->stride = stride;
93 ring->inline_thold = MAX(MIN_PKT_LEN, MIN(priv->prof->inline_thold, MAX_INLINE));
94 mtx_init(&ring->tx_lock.m, "mlx4 tx", NULL, MTX_DEF);
95 mtx_init(&ring->comp_lock.m, "mlx4 comp", NULL, MTX_DEF);
98 ring->tx_info = kzalloc_node(tmp, GFP_KERNEL, node);
99 if (!ring->tx_info) {
100 ring->tx_info = kzalloc(tmp, GFP_KERNEL);
101 if (!ring->tx_info) {
109 err = -bus_dmamap_create(ring->dma_tag, 0,
110 &ring->tx_info[x].dma_map);
113 bus_dmamap_destroy(ring->dma_tag,
114 ring->tx_info[x].dma_map);
120 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
121 ring->tx_info, tmp);
123 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
126 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
133 err = mlx4_en_map_buffer(&ring->wqres.buf);
139 ring->buf = ring->wqres.buf.direct.buf;
141 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
142 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
143 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
145 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn,
148 en_err(priv, "failed reserving qp for TX ring\n");
152 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL);
154 en_err(priv, "Failed allocating qp %d\n", ring->qpn);
157 ring->qp.event = mlx4_en_sqp_event;
159 err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
162 ring->bf.uar = &mdev->priv_uar;
163 ring->bf.uar->map = mdev->uar_map;
164 ring->bf_enabled = false;
166 ring->bf_enabled = true;
167 ring->queue_index = queue_idx;
169 *pring = ring;
173 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
175 mlx4_en_unmap_buffer(&ring->wqres.buf);
177 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
180 bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map);
182 vfree(ring->tx_info);
184 bus_dma_tag_destroy(ring->dma_tag);
186 kfree(ring);
194 struct mlx4_en_tx_ring *ring = *pring;
196 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
198 if (ring->bf_enabled)
199 mlx4_bf_free(mdev->dev, &ring->bf);
200 mlx4_qp_remove(mdev->dev, &ring->qp);
201 mlx4_qp_free(mdev->dev, &ring->qp);
202 mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
203 mlx4_en_unmap_buffer(&ring->wqres.buf);
204 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
205 for (x = 0; x != ring->size; x++)
206 bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map);
207 vfree(ring->tx_info);
208 mtx_destroy(&ring->tx_lock.m);
209 mtx_destroy(&ring->comp_lock.m);
210 bus_dma_tag_destroy(ring->dma_tag);
211 kfree(ring);
216 struct mlx4_en_tx_ring *ring,
222 ring->cqn = cq;
223 ring->prod = 0;
224 ring->cons = 0xffffffff;
225 ring->last_nr_txbb = 1;
226 ring->poll_cnt = 0;
227 memset(ring->buf, 0, ring->buf_size);
228 ring->watchdog_time = 0;
230 ring->qp_state = MLX4_QP_STATE_RST;
231 ring->doorbell_qpn = ring->qp.qpn << 8;
233 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
234 ring->cqn, user_prio, &ring->context);
235 if (ring->bf_enabled)
236 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
238 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
239 &ring->qp, &ring->qp_state);
244 struct mlx4_en_tx_ring *ring)
248 mlx4_qp_modify(mdev->dev, NULL, ring->qp_state,
249 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
272 struct mlx4_en_tx_ring *ring, u32 index, u8 owner)
274 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
276 (ring->buf + (index * TXBB_SIZE));
291 struct mlx4_en_tx_ring *ring, u32 index)
296 tx_info = &ring->tx_info[index];
302 bus_dmamap_sync(ring->dma_tag, tx_info->dma_map,
304 bus_dmamap_unload(ring->dma_tag, tx_info->dma_map);
311 int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
317 ring->cons += ring->last_nr_txbb;
319 ring->cons, ring->prod);
321 if ((u32) (ring->prod - ring->cons) > ring->size) {
326 while (ring->cons != ring->prod) {
327 ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
328 ring->cons & ring->size_mask);
329 ring->cons += ring->last_nr_txbb;
340 mlx4_en_tx_ring_is_full(struct mlx4_en_tx_ring *ring)
343 wqs = ring->size - (ring->prod - ring->cons);
352 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
360 u32 size_mask = ring->size_mask;
369 ring_index = ring->cons & size_mask;
393 txbbs_skipped += ring->last_nr_txbb;
394 ring_index = (ring_index + ring->last_nr_txbb) & size_mask;
396 ring->last_nr_txbb = mlx4_en_free_tx_desc(
397 priv, ring, ring_index);
398 mlx4_en_stamp_wqe(priv, ring, stamp_index,
399 !!((ring->cons + txbbs_stamp) &
400 ring->size));
413 * the ring consumer.
418 ring->cons += txbbs_skipped;
427 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
429 if (priv->port_up == 0 || !spin_trylock(&ring->comp_lock))
433 spin_unlock(&ring->comp_lock);
440 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
447 if (!spin_trylock(&ring->comp_lock)) {
452 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
460 spin_unlock(&ring->comp_lock);
466 struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind];
477 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
478 if (spin_trylock(&ring->comp_lock)) {
480 spin_unlock(&ring->comp_lock);
485 mlx4_en_get_inline_hdr_size(struct mlx4_en_tx_ring *ring, struct mbuf *mb)
490 retval = MIN(ring->inline_thold, mb->m_len);
494 retval = MIN(ring->inline_thold, mb->m_pkthdr.len);
642 struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind];
663 /* check if TX ring is full */
664 if (unlikely(mlx4_en_tx_ring_is_full(ring))) {
671 KASSERT(((~ring->prod) & ring->size_mask) >=
672 (MLX4_EN_TX_WQE_MAX_WQEBBS - 1), ("Wrapping around TX ring"));
676 (u32) (ring->prod - ring->cons - 1));
682 owner_bit = (ring->prod & ring->size) ?
684 index = ring->prod & ring->size_mask;
686 (ring->buf + index * TXBB_SIZE);
687 tx_info = &ring->tx_info[index];
707 ring->tx_csum++;
742 ring->oversized_packets++;
752 ring->bytes += payload_len + (num_pkts * ihs);
753 ring->packets += num_pkts;
754 ring->tso_packets++;
763 ihs = mlx4_en_get_inline_hdr_size(ring, mb);
764 ring->bytes += max_t (unsigned int,
766 ring->packets++;
775 err = bus_dmamap_load_mbuf_sg(ring->dma_tag, tx_info->dma_map,
779 ring->defrag_attempts++;
782 ring->oversized_packets++;
787 err = bus_dmamap_load_mbuf_sg(ring->dma_tag, tx_info->dma_map,
792 ring->oversized_packets++;
798 bus_dmamap_sync(ring->dma_tag, tx_info->dma_map,
802 bus_dmamap_unload(ring->dma_tag, tx_info->dma_map);
815 pad = (~(ring->prod + pad)) & ring->size_mask;
820 * pad in order to achieve a TX ring wraparound:
848 bf_prod = ring->prod;
888 ring->prod += tx_info->nr_txbb;
890 if (ring->bf_enabled && bf_size <= MAX_BF &&
894 *(volatile __be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
906 mlx4_bf_copy(((u8 *)ring->bf.reg) + ring->bf.offset,
909 ring->bf.offset ^= ring->bf.buf_size;
918 writel(cpu_to_be32(ring->doorbell_qpn),
919 ((u8 *)ring->bf.uar->map) + MLX4_SEND_DOORBELL);
933 struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind];
946 if (ring->watchdog_time == 0)
947 ring->watchdog_time = ticks + MLX4_EN_WATCHDOG_TIMEOUT;
949 ring->watchdog_time = 0;
958 struct mlx4_en_tx_ring *ring;
974 ring = priv->tx_ring[i];
976 spin_lock(&ring->tx_lock);
979 spin_unlock(&ring->tx_lock);
992 * Flush ring buffers.