Lines Matching refs:srp

78  * This function must advance (srp->tc_next) AND adjust (srp->tx_free)
86 static boolean_t bge_recycle_ring(bge_t *bgep, send_ring_t *srp);
90 bge_recycle_ring(bge_t *bgep, send_ring_t *srp)
100 ASSERT(mutex_owned(srp->tc_lock));
109 ASSERT(srp->tx_free <= srp->desc.nslots);
112 for (n = 0, slot = srp->tc_next; slot != *srp->cons_index_p;
113 slot = NEXT(slot, srp->desc.nslots)) {
114 ssbdp = &srp->sw_sbds[slot];
139 bgep->watchdog = (slot == srp->tx_next) ? 0 : 1;
144 srp->tc_next = slot;
145 ASSERT(srp->tx_free + n <= srp->desc.nslots);
146 bge_atomic_renounce(&srp->tx_free, n);
151 txbuf_queue = srp->txbuf_push_queue;
161 if ((srp->txbuf_pop_queue->count < srp->tx_buffers_low) &&
162 (srp->txbuf_pop_queue->count < txbuf_queue->count)) {
163 srp->txbuf_push_queue = srp->txbuf_pop_queue;
164 srp->txbuf_pop_queue = txbuf_queue;
167 if (srp->tx_flow != 0 || bgep->tx_resched_needed)
207 send_ring_t *srp;
214 srp = &bgep->send[ring];
217 * For each ring, (srp->cons_index_p) points to the
221 ASSERT(srp->cons_index_p == SEND_INDEX_P(bsp, ring));
223 if (*srp->cons_index_p == srp->tc_next)
225 if (mutex_tryenter(srp->tc_lock) == 0)
227 tx_done |= bge_recycle_ring(bgep, srp);
228 mutex_exit(srp->tc_lock);
243 } while (++srp, ++ring < tx_rings);
296 bge_get_txbuf(bge_t *bgep, send_ring_t *srp)
301 txbuf_queue = srp->txbuf_pop_queue;
305 txbuf_queue = srp->txbuf_push_queue;
310 if (srp->tx_array < srp->tx_array_max) {
311 mutex_enter(srp->tx_lock);
312 txbuf_item = bge_alloc_txbuf_array(bgep, srp);
313 mutex_exit(srp->tx_lock);
358 bge_send_serial(bge_t *bgep, send_ring_t *srp)
378 mutex_enter(srp->tx_lock);
379 else if (mutex_tryenter(srp->tx_lock) == 0)
383 txfill_next = srp->txfill_next;
384 tx_next = srp->tx_next;
387 pktp = &srp->pktp[txfill_next];
390 srp->tx_block++;
397 if (srp->tx_free <= 1)
403 if (!bge_atomic_reserve(&srp->tx_free, 1)) {
404 srp->tx_nobd++;
426 ssbdp = &srp->sw_sbds[tx_next];
449 tx_next = NEXT(tx_next, srp->desc.nslots);
456 bge_atomic_sub64(&srp->tx_flow, count);
457 srp->txfill_next = txfill_next;
459 if (srp->tx_next > tx_next) {
461 (srp->desc.nslots - srp->tx_next) *
464 count -= srp->desc.nslots - srp->tx_next;
465 ssbdp = &srp->sw_sbds[0];
469 bge_mbx_put(bgep, srp->chip_mbx_reg, tx_next);
470 srp->tx_next = tx_next;
473 if (srp->tx_flow != 0 && srp->tx_free > 1)
477 mutex_exit(srp->tx_lock);
483 send_ring_t *srp = arg;
484 bge_t *bgep = srp->bgep;
499 txbuf_item = bge_get_txbuf(bgep, srp);
502 srp->tx_nobuf++;
504 bge_send_serial(bgep, srp);
544 pkt_slot = bge_atomic_next(&srp->txpkt_next, BGE_SEND_BUF_MAX);
545 pktp = &srp->pktp[pkt_slot];
550 atomic_inc_64(&srp->tx_flow);
557 bge_send_serial(bgep, srp);
559 srp->pushed_bytes += MBLKL(mp);
583 send_ring_t *srp;
588 srp = &bgep->send[ring];
589 bge_send_serial(bgep, srp);
592 (srp->tx_flow < srp->tx_buffers_low) &&