Lines Matching defs:tbp

620 	struct txbuf		*tbp;
701 for (i = 0, tbp = dp->tx_buf;
702 i < dp->gc.gc_tx_buf_size; i++, tbp++) {
708 &tbp->txb_bdh)) != DDI_SUCCESS) {
717 if ((err = ddi_dma_mem_alloc(tbp->txb_bdh,
722 &tbp->txb_bah)) != DDI_SUCCESS) {
727 ddi_dma_free_handle(&tbp->txb_bdh);
731 if ((err = ddi_dma_addr_bind_handle(tbp->txb_bdh,
740 ddi_dma_mem_free(&tbp->txb_bah);
741 ddi_dma_free_handle(&tbp->txb_bdh);
745 tbp->txb_buf = buf;
746 tbp->txb_buf_dma = buf_cookie.dmac_laddress;
775 struct txbuf *tbp;
788 for (i = dp->gc.gc_tx_buf_size, tbp = dp->tx_buf; i--; tbp++) {
790 (void) ddi_dma_unbind_handle(tbp->txb_bdh);
791 ddi_dma_mem_free(&tbp->txb_bah);
792 ddi_dma_free_handle(&tbp->txb_bdh);
966 gem_txbuf_free_dma_resources(struct txbuf *tbp)
968 if (tbp->txb_mp) {
969 freemsg(tbp->txb_mp);
970 tbp->txb_mp = NULL;
972 tbp->txb_nfrags = 0;
973 tbp->txb_flag = 0;
987 struct txbuf *tbp;
1010 tbp = GET_TXBUF(dp, head);
1012 gem_txbuf_free_dma_resources(tbp);
1013 ASSERT(tbp->txb_mp == NULL);
1015 tbp = tbp->txb_next;
1022 if (tbp->txb_mp || tbp->txb_nfrags) {
1027 tbp->txb_mp, tbp->txb_nfrags));
1031 tbp = tbp->txb_next;
1036 "gem_clean_tx_buf: tbp->txb_mp != NULL");
1056 struct txbuf *tbp;
1104 tbp = GET_TXBUF(dp, head);
1107 dp->tx_active_head = (++sn), tbp = tbp->txb_next) {
1110 ASSERT(tbp->txb_desc == desc_head);
1112 ndescs = tbp->txb_ndescs;
1118 SLOT(tbp->txb_desc, tx_ring_size), ndescs);
1125 if (!dp->tx_blocked && (tbp->txb_flag & GEM_TXFLAG_INTR)) {
1137 if (now - tbp->txb_stime >= 50) {
1139 dp->name, (now - tbp->txb_stime)*10);
1158 tbp = GET_TXBUF(dp, head);
1168 for (sn = head; sn != tail; sn++, tbp = tbp->txb_next) {
1169 gem_txbuf_free_dma_resources(tbp);
1179 tbp = GET_TXBUF(dp, new_tail);
1181 if (tbp->txb_nfrags) {
1185 ASSERT(tbp->txb_mp == NULL);
1186 tbp = tbp->txb_next;
1220 struct txbuf *tbp;
1229 tbp = GET_TXBUF(dp, sn);
1238 tbp->txb_desc = sn;
1239 tbp->txb_ndescs = (*tx_desc_write)(dp,
1241 tbp->txb_dmacookie,
1242 tbp->txb_nfrags, flags | tbp->txb_flag);
1243 tbp->txb_stime = now;
1244 ASSERT(tbp->txb_ndescs == 1);
1248 tbp = tbp->txb_next;
1254 gem_setup_txbuf_copy(struct gem_dev *dp, mblk_t *mp, struct txbuf *tbp)
1263 ASSERT(tbp->txb_mp == NULL);
1267 bp = tbp->txb_buf;
1271 flag = tbp->txb_flag;
1298 (void) ddi_dma_sync(tbp->txb_bdh, (off_t)0, off, DDI_DMA_SYNC_FORDEV);
1300 tbp->txb_dmacookie[0].dmac_laddress = tbp->txb_buf_dma;
1301 tbp->txb_dmacookie[0].dmac_size = off;
1306 tbp->txb_dmacookie[0].dmac_laddress,
1307 tbp->txb_dmacookie[0].dmac_size,
1312 tbp->txb_mp = mp;
1313 tbp->txb_nfrags = 1;
1316 tbp->txb_dmacookie[0].dmac_size > 16*3) {
1317 tbp->txb_dmacookie[1].dmac_laddress =
1318 tbp->txb_dmacookie[0].dmac_laddress + 16;
1319 tbp->txb_dmacookie[2].dmac_laddress =
1320 tbp->txb_dmacookie[1].dmac_laddress + 16;
1322 tbp->txb_dmacookie[2].dmac_size =
1323 tbp->txb_dmacookie[0].dmac_size - 16*2;
1324 tbp->txb_dmacookie[1].dmac_size = 16;
1325 tbp->txb_dmacookie[0].dmac_size = 16;
1326 tbp->txb_nfrags = 3;
1435 struct txbuf *tbp;
1501 tbp = GET_TXBUF(dp, head + avail - 1);
1502 tbp->txb_flag = GEM_TXFLAG_INTR;
1507 tbp = GET_TXBUF(dp, head);
1509 for (i = nmblk; i > 0; i--, tbp = tbp->txb_next) {
1531 txflag = tbp->txb_flag;
1533 txflag |= gem_txbuf_options(dp, mp, (uint8_t *)tbp->txb_buf);
1534 tbp->txb_flag = txflag;
1536 len_total += gem_setup_txbuf_copy(dp, mp, tbp);
1541 /* Append the tbp at the tail of the active tx buffer list */
1653 struct txbuf *tbp;
1692 tbp = GET_TXBUF(dp, dp->tx_active_head);
1693 if (now - tbp->txb_stime < dp->gc.gc_tx_timeout) {