Searched refs:tx_ring (Results 1 - 25 of 53) sorted by relevance

123

/freebsd-10.1-release/sys/ofed/drivers/infiniband/ulp/sdp/
H A Dsdp_tx.c52 if (!callout_pending(&ssk->tx_ring.timer))
53 callout_reset(&ssk->tx_ring.timer, SDP_TX_POLL_TIMEOUT,
57 if (force || (++ssk->tx_ring.poll_cnt & (SDP_TX_POLL_MODER - 1)) == 0)
85 mseq = ring_head(ssk->tx_ring);
121 tx_req = &ssk->tx_ring.buffer[mseq & (SDP_TX_SIZE - 1)];
157 atomic_inc(&ssk->tx_ring.head);
158 atomic_dec(&ssk->tx_ring.credits);
170 struct sdp_tx_ring *tx_ring = &ssk->tx_ring; local
172 if (unlikely(mseq != ring_tail(*tx_ring))) {
[all...]
H A Dsdp.h147 #define tx_ring_posted(ssk) (ring_posted(ssk->tx_ring) + \
148 (ssk->tx_ring.rdma_inflight ? ssk->tx_ring.rdma_inflight->busy : 0))
150 #define tx_ring_posted(ssk) ring_posted(ssk->tx_ring)
320 #define tx_credits(ssk) (atomic_read(&ssk->tx_ring.credits))
401 struct sdp_tx_ring tx_ring; member in struct:sdp_sock
498 ib_req_notify_cq(ssk->tx_ring.cq, IB_CQ_NEXT_COMP);
503 * - free slots in tx_ring (not including SDP_MIN_TX_CREDITS
H A Dsdp_cma.c112 qp_init_attr.send_cq = ssk->tx_ring.cq;
173 atomic_set(&ssk->tx_ring.credits, ssk->max_bufs);
205 atomic_set(&ssk->tx_ring.credits, ssk->max_bufs);
/freebsd-10.1-release/sys/dev/qlxge/
H A Dqls_hw.c645 txr_done = ha->tx_ring[txr_idx].txr_done;
646 txr_next = ha->tx_ring[txr_idx].txr_next;
649 ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS;
651 ha->tx_ring[txr_idx].txr_free = txr_done - txr_next;
653 ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS +
657 if (ha->tx_ring[txr_idx].txr_free <= QLA_TX_MIN_FREE)
691 if (ha->tx_ring[txr_idx].txr_free <= (NUM_TX_DESCRIPTORS >> 2)) {
695 ha->tx_ring[txr_idx].txr_free);
700 tx_mac = (q81_tx_mac_t *)&ha->tx_ring[txr_idx].wq_vaddr[txr_next];
712 ha->tx_ring[txr_id
[all...]
H A Dqls_isr.c57 txb = &ha->tx_ring[txr_idx].tx_buf[tx_idx];
69 ha->tx_ring[txr_idx].txr_done++;
71 if (ha->tx_ring[txr_idx].txr_done == NUM_TX_DESCRIPTORS)
72 ha->tx_ring[txr_idx].txr_done = 0;
H A Dqls_os.c169 "%s: tx_ring[%d].tx_frames= %p\n",
171 (void *)ha->tx_ring[i].tx_frames);
174 "%s: tx_ring[%d].tx_tso_frames= %p\n",
176 (void *)ha->tx_ring[i].tx_tso_frames);
179 "%s: tx_ring[%d].tx_vlan_frames= %p\n",
181 (void *)ha->tx_ring[i].tx_vlan_frames);
184 "%s: tx_ring[%d].txr_free= 0x%08x\n",
186 ha->tx_ring[i].txr_free);
189 "%s: tx_ring[%d].txr_next= 0x%08x\n",
191 ha->tx_ring[
[all...]
/freebsd-10.1-release/sys/dev/et/
H A Dif_et.c862 struct et_txdesc_ring *tx_ring; local
881 tx_ring = &sc->sc_tx_ring;
883 &tx_ring->tr_dtag, (uint8_t **)&tx_ring->tr_desc, &tx_ring->tr_dmap,
884 &tx_ring->tr_paddr, "TX ring");
1041 struct et_txdesc_ring *tx_ring; local
1117 tx_ring = &sc->sc_tx_ring;
1118 et_dma_ring_free(sc, &tx_ring->tr_dtag, (void *)&tx_ring
1376 struct et_txdesc_ring *tx_ring; local
1499 struct et_txdesc_ring *tx_ring; local
1682 struct et_txdesc_ring *tx_ring; local
1810 struct et_txdesc_ring *tx_ring; local
2154 struct et_txdesc_ring *tx_ring; local
2251 struct et_txdesc_ring *tx_ring; local
[all...]
/freebsd-10.1-release/sys/ofed/drivers/infiniband/hw/mlx4/
H A Dmad.c532 tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
533 if (tun_qp->tx_ring[tun_tx_ix].ah)
534 ib_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah);
535 tun_qp->tx_ring[tun_tx_ix].ah = ah;
537 tun_qp->tx_ring[tun_tx_ix].buf.map,
554 tun_qp->tx_ring[tun_tx_ix].buf.map,
558 list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
1357 sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
1358 if (sqp->tx_ring[wire_tx_ix].ah)
1359 ib_destroy_ah(sqp->tx_ring[wire_tx_i
[all...]
/freebsd-10.1-release/sys/ofed/drivers/net/mlx4/
H A Den_netdev.c1210 struct mlx4_en_tx_ring *tx_ring; local
1311 tx_ring = priv->tx_ring[i];
1313 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
1325 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1326 *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
1389 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
1483 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
1489 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
1523 ring = priv->tx_ring[
2404 struct mlx4_en_tx_ring *tx_ring; local
[all...]
H A Den_tx.c374 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
467 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
480 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
532 struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind];
775 ring = priv->tx_ring[tx_ind];
1006 ring = priv->tx_ring[tx_ind];
1056 ring = priv->tx_ring[tx_ind];
1081 ring = priv->tx_ring[i];
1108 ring = priv->tx_ring[i];
/freebsd-10.1-release/sys/dev/xen/netback/
H A Dnetback.c164 const netif_tx_back_ring_t *tx_ring,
328 netif_tx_back_ring_t tx_ring; member in union:xnb_ring_config::__anon10064
551 &xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring;
734 BACK_RING_INIT(&ring->back_ring.tx_ring,
1429 txb = &xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring;
1468 * \param[in] tx_ring Pointer to the Ring that is the input to this function
1473 xnb_ring2pkt(struct xnb_pkt *pkt, const netif_tx_back_ring_t *tx_ring, argument
1493 if (RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) {
1494 netif_tx_request_t *tx = RING_GET_REQUEST(tx_ring, idx);
1505 RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, id
[all...]
/freebsd-10.1-release/sys/contrib/octeon-sdk/
H A Dcvmx-mgmt-port.c95 int tx_write_index; /* Where the next TX will write in the tx_ring and tx_buffers */
99 cvmx_mgmt_port_ring_entry_t tx_ring[CVMX_MGMT_PORT_NUM_TX_BUFFERS]; member in struct:__anon7183
183 if (oring1.s.osize != CVMX_MGMT_PORT_NUM_TX_BUFFERS || cvmx_mgmt_port_state_ptr[port].tx_ring[0].u64 == 0)
199 if (cvmx_mgmt_port_state_ptr[port].tx_ring[0].u64 == 0)
286 state->tx_ring[i].s.len = CVMX_MGMT_PORT_TX_BUFFER_SIZE;
287 state->tx_ring[i].s.addr = cvmx_ptr_to_phys(state->tx_buffers[i]);
292 oring1.s.obase = cvmx_ptr_to_phys(state->tx_ring)>>3;
562 state->tx_ring[state->tx_write_index].s.len = packet_len;
564 state->tx_ring[state->tx_write_index].s.tstamp = 0;
617 state->tx_ring[stat
[all...]
/freebsd-10.1-release/sys/dev/e1000/
H A Dif_igb.h282 struct tx_ring *txr;
292 struct tx_ring { struct
447 struct tx_ring *tx_rings;
H A Dif_em.h277 struct tx_ring { struct
392 struct tx_ring *tx_rings;
H A Dif_em.c210 struct tx_ring *, struct mbuf *);
214 static void em_start_locked(struct ifnet *, struct tx_ring *);
235 static int em_allocate_transmit_buffers(struct tx_ring *);
237 static void em_free_transmit_buffers(struct tx_ring *);
249 static void em_txeof(struct tx_ring *);
255 static void em_transmit_checksum_setup(struct tx_ring *, struct mbuf *, int,
257 static void em_tso_setup(struct tx_ring *, struct mbuf *, int, struct ip *,
267 static int em_xmit(struct tx_ring *, struct mbuf **);
878 struct tx_ring *txr = adapter->tx_rings;
917 em_mq_start_locked(struct ifnet *ifp, struct tx_ring *tx
[all...]
H A Dif_igb.c194 static int igb_mq_start_locked(struct ifnet *, struct tx_ring *);
199 static void igb_start_locked(struct tx_ring *, struct ifnet *ifp);
219 static int igb_allocate_transmit_buffers(struct tx_ring *);
221 static void igb_setup_transmit_ring(struct tx_ring *);
224 static void igb_free_transmit_buffers(struct tx_ring *);
237 static bool igb_txeof(struct tx_ring *);
245 static int igb_tx_ctx_setup(struct tx_ring *,
247 static int igb_tso_setup(struct tx_ring *,
259 static int igb_xmit(struct tx_ring *, struct mbuf **);
857 struct tx_ring *tx
[all...]
/freebsd-10.1-release/sys/dev/ixgbe/
H A Dixgbe.h282 struct tx_ring *txr;
292 struct tx_ring { struct
447 struct tx_ring *tx_rings;
H A Dixv.h241 struct tx_ring *txr;
251 struct tx_ring { struct
364 struct tx_ring *tx_rings;
H A Dixgbe.c107 static void ixgbe_start_locked(struct tx_ring *, struct ifnet *);
110 static int ixgbe_mq_start_locked(struct ifnet *, struct tx_ring *);
132 static int ixgbe_allocate_transmit_buffers(struct tx_ring *);
134 static void ixgbe_setup_transmit_ring(struct tx_ring *);
137 static void ixgbe_free_transmit_buffers(struct tx_ring *);
150 static void ixgbe_txeof(struct tx_ring *);
157 static int ixgbe_xmit(struct tx_ring *, struct mbuf **);
164 static int ixgbe_tx_ctx_setup(struct tx_ring *,
166 static int ixgbe_tso_setup(struct tx_ring *,
203 static void ixgbe_atr(struct tx_ring *, struc
[all...]
H A Dixv.c79 static void ixv_start_locked(struct tx_ring *, struct ifnet *);
83 struct tx_ring *, struct mbuf *);
102 static int ixv_allocate_transmit_buffers(struct tx_ring *);
104 static void ixv_setup_transmit_ring(struct tx_ring *);
107 static void ixv_free_transmit_buffers(struct tx_ring *);
118 static bool ixv_txeof(struct tx_ring *);
124 static int ixv_xmit(struct tx_ring *, struct mbuf **);
133 static bool ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
134 static bool ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
512 ixv_start_locked(struct tx_ring *tx
[all...]
/freebsd-10.1-release/sys/dev/netmap/
H A Dif_em_netmap.h48 struct tx_ring *txr = adapter->tx_rings;
69 struct tx_ring *txr = adapter->tx_rings;
131 struct tx_ring *txr = &adapter->tx_rings[kring->ring_id];
/freebsd-10.1-release/sys/dev/rt/
H A Dif_rt.c354 error = rt_alloc_tx_ring(sc, &sc->tx_ring[i], i);
452 rt_free_tx_ring(sc, &sc->tx_ring[i]);
569 rt_free_tx_ring(sc, &sc->tx_ring[i]);
705 rt_reset_tx_ring(sc, &sc->tx_ring[i]);
710 sc->tx_ring[i].desc_phys_addr);
874 RT_SOFTC_TX_RING_ASSERT_LOCKED(&sc->tx_ring[qid]);
877 ring = &sc->tx_ring[qid];
1016 RT_SOFTC_TX_RING_LOCK(&sc->tx_ring[qid]);
1018 if (sc->tx_ring[qid].data_queued >=
1020 RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qi
[all...]
/freebsd-10.1-release/sys/dev/ixl/
H A Dixl_txrx.c66 struct tx_ring *txr;
95 ixl_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
136 struct tx_ring *txr = &que->txr;
156 struct tx_ring *txr = &que->txr;
208 struct tx_ring *txr = &que->txr;
385 struct tx_ring *txr = &que->txr;
460 struct tx_ring *txr = &que->txr;
509 struct tx_ring *txr = &que->txr;
675 struct tx_ring *txr = &que->txr;
776 struct tx_ring *tx
[all...]
/freebsd-10.1-release/sys/dev/qlxgbe/
H A Dql_hw.c99 (void *)ha->tx_ring[i].count);
394 if (ha->hw.dma_buf.flags.tx_ring) {
395 ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
396 ha->hw.dma_buf.flags.tx_ring = 0;
426 hw->dma_buf.tx_ring.alignment = 8;
427 hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
429 if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
434 vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
435 paddr = hw->dma_buf.tx_ring.dma_addr;
457 ha->hw.dma_buf.flags.tx_ring
[all...]
H A Dql_def.h178 qla_tx_ring_t tx_ring[NUM_TX_RINGS]; member in struct:qla_host

Completed in 331 milliseconds

123