Lines Matching refs:ring

71 	struct mlx4_en_tx_ring *ring;
76 ring = kzalloc_node(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL, node);
77 if (!ring) {
78 ring = kzalloc(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL);
79 if (!ring) {
80 en_err(priv, "Failed allocating TX ring\n");
98 &ring->dma_tag)))
101 ring->size = size;
102 ring->size_mask = size - 1;
103 ring->stride = stride;
104 ring->inline_thold = MAX(MIN_PKT_LEN, MIN(inline_thold, MAX_INLINE));
105 mtx_init(&ring->tx_lock.m, "mlx4 tx", NULL, MTX_DEF);
106 mtx_init(&ring->comp_lock.m, "mlx4 comp", NULL, MTX_DEF);
108 /* Allocate the buf ring */
109 ring->br = buf_ring_alloc(MLX4_EN_DEF_TX_QUEUE_SIZE, M_DEVBUF,
110 M_WAITOK, &ring->tx_lock.m);
111 if (ring->br == NULL) {
112 en_err(priv, "Failed allocating tx_info ring\n");
118 ring->tx_info = kzalloc_node(tmp, GFP_KERNEL, node);
119 if (!ring->tx_info) {
120 ring->tx_info = kzalloc(tmp, GFP_KERNEL);
121 if (!ring->tx_info) {
129 err = -bus_dmamap_create(ring->dma_tag, 0,
130 &ring->tx_info[x].dma_map);
133 bus_dmamap_destroy(ring->dma_tag,
134 ring->tx_info[x].dma_map);
140 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
141 ring->tx_info, tmp);
143 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
146 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
153 err = mlx4_en_map_buffer(&ring->wqres.buf);
159 ring->buf = ring->wqres.buf.direct.buf;
161 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
162 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
163 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
165 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn,
168 en_err(priv, "failed reserving qp for TX ring\n");
172 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
174 en_err(priv, "Failed allocating qp %d\n", ring->qpn);
177 ring->qp.event = mlx4_en_sqp_event;
179 err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
182 ring->bf.uar = &mdev->priv_uar;
183 ring->bf.uar->map = mdev->uar_map;
184 ring->bf_enabled = false;
186 ring->bf_enabled = true;
187 ring->queue_index = queue_idx;
189 CPU_SET(queue_idx, &ring->affinity_mask);
191 *pring = ring;
195 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
197 mlx4_en_unmap_buffer(&ring->wqres.buf);
199 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
202 bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map);
204 vfree(ring->tx_info);
206 buf_ring_free(ring->br, M_DEVBUF);
208 bus_dma_tag_destroy(ring->dma_tag);
210 kfree(ring);
218 struct mlx4_en_tx_ring *ring = *pring;
220 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
222 buf_ring_free(ring->br, M_DEVBUF);
223 if (ring->bf_enabled)
224 mlx4_bf_free(mdev->dev, &ring->bf);
225 mlx4_qp_remove(mdev->dev, &ring->qp);
226 mlx4_qp_free(mdev->dev, &ring->qp);
227 mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
228 mlx4_en_unmap_buffer(&ring->wqres.buf);
229 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
230 for (x = 0; x != ring->size; x++)
231 bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map);
232 vfree(ring->tx_info);
233 mtx_destroy(&ring->tx_lock.m);
234 mtx_destroy(&ring->comp_lock.m);
235 bus_dma_tag_destroy(ring->dma_tag);
236 kfree(ring);
241 struct mlx4_en_tx_ring *ring,
247 ring->cqn = cq;
248 ring->prod = 0;
249 ring->cons = 0xffffffff;
250 ring->last_nr_txbb = 1;
251 ring->poll_cnt = 0;
252 ring->blocked = 0;
253 memset(ring->buf, 0, ring->buf_size);
255 ring->qp_state = MLX4_QP_STATE_RST;
256 ring->doorbell_qpn = ring->qp.qpn << 8;
258 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
259 ring->cqn, user_prio, &ring->context);
260 if (ring->bf_enabled)
261 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
263 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
264 &ring->qp, &ring->qp_state);
269 struct mlx4_en_tx_ring *ring)
273 mlx4_qp_modify(mdev->dev, NULL, ring->qp_state,
274 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
297 struct mlx4_en_tx_ring *ring, u32 index, u8 owner)
299 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
301 (ring->buf + (index * TXBB_SIZE));
316 struct mlx4_en_tx_ring *ring, u32 index)
321 tx_info = &ring->tx_info[index];
327 bus_dmamap_sync(ring->dma_tag, tx_info->dma_map,
329 bus_dmamap_unload(ring->dma_tag, tx_info->dma_map);
336 int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
342 ring->cons += ring->last_nr_txbb;
344 ring->cons, ring->prod);
346 if ((u32) (ring->prod - ring->cons) > ring->size) {
351 while (ring->cons != ring->prod) {
352 ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
353 ring->cons & ring->size_mask);
354 ring->cons += ring->last_nr_txbb;
365 mlx4_en_tx_ring_is_full(struct mlx4_en_tx_ring *ring)
368 wqs = ring->size - (ring->prod - ring->cons);
377 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
385 u32 size_mask = ring->size_mask;
394 ring_index = ring->cons & size_mask;
418 txbbs_skipped += ring->last_nr_txbb;
419 ring_index = (ring_index + ring->last_nr_txbb) & size_mask;
421 ring->last_nr_txbb = mlx4_en_free_tx_desc(
422 priv, ring, ring_index);
423 mlx4_en_stamp_wqe(priv, ring, stamp_index,
424 !!((ring->cons + txbbs_stamp) &
425 ring->size));
438 * the ring consumer.
443 ring->cons += txbbs_skipped;
445 /* Wakeup Tx queue if it was stopped and ring is not full */
446 if (unlikely(ring->blocked) && !mlx4_en_tx_ring_is_full(ring)) {
447 ring->blocked = 0;
450 ring->wake_queue++;
460 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
462 if (priv->port_up == 0 || !spin_trylock(&ring->comp_lock))
466 spin_unlock(&ring->comp_lock);
473 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
480 if (!spin_trylock(&ring->comp_lock)) {
485 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
493 spin_unlock(&ring->comp_lock);
499 struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind];
510 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
511 if (spin_trylock(&ring->comp_lock)) {
513 spin_unlock(&ring->comp_lock);
518 mlx4_en_get_inline_hdr_size(struct mlx4_en_tx_ring *ring, struct mbuf *mb)
523 retval = MIN(ring->inline_thold, mb->m_len);
527 retval = MIN(ring->inline_thold, mb->m_pkthdr.len);
687 struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind];
708 /* check if TX ring is full */
709 if (unlikely(mlx4_en_tx_ring_is_full(ring))) {
710 /* every full native Tx ring stops queue */
711 if (ring->blocked == 0)
716 ring->blocked = 1;
718 ring->queue_stopped++;
726 KASSERT(((~ring->prod) & ring->size_mask) >=
727 (MLX4_EN_TX_WQE_MAX_WQEBBS - 1), ("Wrapping around TX ring"));
731 (u32) (ring->prod - ring->cons - 1));
737 owner_bit = (ring->prod & ring->size) ?
739 index = ring->prod & ring->size_mask;
741 (ring->buf + index * TXBB_SIZE);
742 tx_info = &ring->tx_info[index];
762 ring->tx_csum++;
787 ring->oversized_packets++;
797 ring->bytes += payload_len + (num_pkts * ihs);
798 ring->packets += num_pkts;
808 ihs = mlx4_en_get_inline_hdr_size(ring, mb);
809 ring->bytes += max_t (unsigned int,
811 ring->packets++;
830 err = bus_dmamap_load_mbuf_sg(ring->dma_tag, tx_info->dma_map,
836 ring->oversized_packets++;
841 err = bus_dmamap_load_mbuf_sg(ring->dma_tag, tx_info->dma_map,
846 ring->oversized_packets++;
850 bus_dmamap_sync(ring->dma_tag, tx_info->dma_map,
862 pad = (~(ring->prod + pad)) & ring->size_mask;
867 * pad in order to achieve a TX ring wraparound:
895 bf_prod = ring->prod;
951 ring->prod += tx_info->nr_txbb;
953 if (ring->bf_enabled && bf_size <= MAX_BF &&
957 *(volatile __be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
969 mlx4_bf_copy(((u8 *)ring->bf.reg) + ring->bf.offset,
972 ring->bf.offset ^= ring->bf.buf_size;
981 writel(cpu_to_be32(ring->doorbell_qpn),
982 ((u8 *)ring->bf.uar->map) + MLX4_SEND_DOORBELL);
996 struct mlx4_en_tx_ring *ring;
1000 ring = priv->tx_ring[tx_ind];
1004 err = drbr_enqueue(dev, ring->br, m);
1014 err = drbr_enqueue(dev, ring->br, m);
1017 while ((next = drbr_peek(dev, ring->br)) != NULL) {
1020 drbr_advance(dev, ring->br);
1022 drbr_putback(dev, ring->br, next);
1026 drbr_advance(dev, ring->br);
1033 ring->watchdog_time = ticks;
1041 struct mlx4_en_tx_ring *ring;
1049 tx_ind = cq->ring;
1050 ring = priv->tx_ring[tx_ind];
1055 spin_lock(&ring->tx_lock);
1056 if (!drbr_empty(dev, ring->br))
1058 spin_unlock(&ring->tx_lock);
1066 struct mlx4_en_tx_ring *ring;
1083 ring = priv->tx_ring[i];
1084 if (spin_trylock(&ring->tx_lock)) {
1086 spin_unlock(&ring->tx_lock);
1090 err = drbr_enqueue(dev, ring->br, m);
1099 * Flush ring buffers.
1105 struct mlx4_en_tx_ring *ring;
1112 ring = priv->tx_ring[i];
1113 spin_lock(&ring->tx_lock);
1114 while ((m = buf_ring_dequeue_sc(ring->br)) != NULL)
1116 spin_unlock(&ring->tx_lock);