Lines Matching refs:txd

659 hn_txpkt_sglist(struct hn_tx_ring *txr, struct hn_txdesc *txd)
662 KASSERT(txd->chim_index == HN_NVS_CHIM_IDX_INVALID &&
663 txd->chim_size == 0, ("invalid rndis sglist txd"));
665 &txd->send_ctx, txr->hn_gpa, txr->hn_gpa_cnt));
669 hn_txpkt_chim(struct hn_tx_ring *txr, struct hn_txdesc *txd)
673 KASSERT(txd->chim_index != HN_NVS_CHIM_IDX_INVALID &&
674 txd->chim_size > 0, ("invalid rndis chim txd"));
678 rndis.nvs_chim_idx = txd->chim_index;
679 rndis.nvs_chim_sz = txd->chim_size;
682 &rndis, sizeof(rndis), &txd->send_ctx));
2644 hn_txdesc_dmamap_load(struct hn_tx_ring *txr, struct hn_txdesc *txd,
2650 KASSERT(txd->chim_index == HN_NVS_CHIM_IDX_INVALID, ("txd uses chim"));
2652 error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag, txd->data_dmap,
2665 txd->data_dmap, m, segs, nsegs, BUS_DMA_NOWAIT);
2668 bus_dmamap_sync(txr->hn_tx_data_dtag, txd->data_dmap,
2670 txd->flags |= HN_TXD_FLAG_DMAMAP;
2676 hn_txdesc_put(struct hn_tx_ring *txr, struct hn_txdesc *txd)
2679 KASSERT((txd->flags & HN_TXD_FLAG_ONLIST) == 0,
2680 ("put an onlist txd %#x", txd->flags));
2681 KASSERT((txd->flags & HN_TXD_FLAG_ONAGG) == 0,
2682 ("put an onagg txd %#x", txd->flags));
2684 KASSERT(txd->refs > 0, ("invalid txd refs %d", txd->refs));
2685 if (atomic_fetchadd_int(&txd->refs, -1) != 1)
2688 if (!STAILQ_EMPTY(&txd->agg_list)) {
2691 while ((tmp_txd = STAILQ_FIRST(&txd->agg_list)) != NULL) {
2707 STAILQ_REMOVE_HEAD(&txd->agg_list, agg_link);
2714 if (txd->chim_index != HN_NVS_CHIM_IDX_INVALID) {
2715 KASSERT((txd->flags & HN_TXD_FLAG_DMAMAP) == 0,
2716 ("chim txd uses dmamap"));
2717 hn_chim_free(txr->hn_sc, txd->chim_index);
2718 txd->chim_index = HN_NVS_CHIM_IDX_INVALID;
2719 txd->chim_size = 0;
2720 } else if (txd->flags & HN_TXD_FLAG_DMAMAP) {
2722 txd->data_dmap, BUS_DMASYNC_POSTWRITE);
2724 txd->data_dmap);
2725 txd->flags &= ~HN_TXD_FLAG_DMAMAP;
2728 if (txd->m != NULL) {
2729 m_freem(txd->m);
2730 txd->m = NULL;
2733 txd->flags |= HN_TXD_FLAG_ONLIST;
2738 ("txdesc_put: invalid txd avail %d", txr->hn_txdesc_avail));
2740 SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link);
2746 buf_ring_enqueue(txr->hn_txdesc_br, txd);
2755 struct hn_txdesc *txd;
2759 txd = SLIST_FIRST(&txr->hn_txlist);
2760 if (txd != NULL) {
2762 ("txdesc_get: invalid txd avail %d", txr->hn_txdesc_avail));
2768 txd = buf_ring_dequeue_sc(txr->hn_txdesc_br);
2771 if (txd != NULL) {
2777 KASSERT(txd->m == NULL && txd->refs == 0 &&
2778 STAILQ_EMPTY(&txd->agg_list) &&
2779 txd->chim_index == HN_NVS_CHIM_IDX_INVALID &&
2780 txd->chim_size == 0 &&
2781 (txd->flags & HN_TXD_FLAG_ONLIST) &&
2782 (txd->flags & HN_TXD_FLAG_ONAGG) == 0 &&
2783 (txd->flags & HN_TXD_FLAG_DMAMAP) == 0, ("invalid txd"));
2784 txd->flags &= ~HN_TXD_FLAG_ONLIST;
2785 txd->refs = 1;
2787 return txd;
2791 hn_txdesc_hold(struct hn_txdesc *txd)
2795 KASSERT(txd->refs > 0, ("invalid txd refs %d", txd->refs));
2796 atomic_add_int(&txd->refs, 1);
2800 hn_txdesc_agg(struct hn_txdesc *agg_txd, struct hn_txdesc *txd)
2806 KASSERT((txd->flags & HN_TXD_FLAG_ONAGG) == 0,
2808 KASSERT(STAILQ_EMPTY(&txd->agg_list),
2811 txd->flags |= HN_TXD_FLAG_ONAGG;
2812 STAILQ_INSERT_TAIL(&agg_txd->agg_list, txd, agg_link);
2843 struct hn_txdesc *txd = sndc->hn_cbarg;
2846 txr = txd->txr;
2852 hn_txdesc_put(txr, txd);
2923 struct hn_txdesc *txd;
2927 txd = txr->hn_agg_txd;
2928 KASSERT(txd != NULL, ("no aggregate txdesc"));
2938 * Since txd's mbuf will _not_ be freed upon hn_txpkt()
2942 m = txd->m;
2943 error = hn_txpkt(ifp, txr, txd);
2945 /* txd is freed, but m is not. */
2962 hn_try_txagg(struct ifnet *ifp, struct hn_tx_ring *txr, struct hn_txdesc *txd,
2988 hn_txdesc_agg(agg_txd, txd);
3012 txd->chim_index = hn_chim_alloc(txr->hn_sc);
3013 if (txd->chim_index == HN_NVS_CHIM_IDX_INVALID)
3018 (txd->chim_index * txr->hn_sc->hn_chim_szmax);
3022 txr->hn_agg_txd = txd;
3032 * If this function fails, then both txd and m_head0 will be freed.
3035 hn_encap(struct ifnet *ifp, struct hn_tx_ring *txr, struct hn_txdesc *txd,
3046 pkt = txd->rndis_pkt;
3049 chim = hn_try_txagg(ifp, txr, txd, pkt_size);
3145 struct hn_txdesc *tgt_txd = txd;
3169 KASSERT(txd->chim_index == HN_NVS_CHIM_IDX_INVALID,
3171 KASSERT(pkt == txd->rndis_pkt, ("RNDIS pkt not in txdesc"));
3173 error = hn_txdesc_dmamap_load(txr, txd, &m_head, segs, &nsegs);
3178 * This mbuf is not linked w/ the txd yet, so free it now.
3183 freed = hn_txdesc_put(txr, txd);
3185 ("fail to free txd upon txdma error"));
3197 txr->hn_gpa[0].gpa_page = atop(txd->rndis_pkt_paddr);
3198 txr->hn_gpa[0].gpa_ofs = txd->rndis_pkt_paddr & PAGE_MASK;
3213 txd->chim_index = HN_NVS_CHIM_IDX_INVALID;
3214 txd->chim_size = 0;
3217 txd->m = m_head;
3220 hn_nvs_sendctx_init(&txd->send_ctx, hn_txpkt_done, txd);
3233 * If this function fails, then txd will be freed, but the mbuf
3234 * associated w/ the txd will _not_ be freed.
3237 hn_txpkt(struct ifnet *ifp, struct hn_tx_ring *txr, struct hn_txdesc *txd)
3245 * Make sure that this txd and any aggregated txds are not
3248 hn_txdesc_hold(txd);
3250 error = txr->hn_sendpkt(txr, txd);
3255 ETHER_BPF_MTAP(ifp, txd->m);
3256 STAILQ_FOREACH(tmp_txd, &txd->agg_list, agg_link)
3276 hn_txdesc_put(txr, txd);
3307 txd->m = NULL;
3308 freed = hn_txdesc_put(txr, txd);
3310 ("fail to free txd upon send error"));
5247 struct hn_txdesc *txd = &txr->hn_txdesc[i];
5249 txd->txr = txr;
5250 txd->chim_index = HN_NVS_CHIM_IDX_INVALID;
5251 STAILQ_INIT(&txd->agg_list);
5257 (void **)&txd->rndis_pkt,
5259 &txd->rndis_pkt_dmap);
5267 txd->rndis_pkt_dmap,
5268 txd->rndis_pkt, HN_RNDIS_PKT_LEN,
5269 hyperv_dma_map_paddr, &txd->rndis_pkt_paddr,
5275 txd->rndis_pkt, txd->rndis_pkt_dmap);
5281 &txd->data_dmap);
5286 txd->rndis_pkt_dmap);
5288 txd->rndis_pkt, txd->rndis_pkt_dmap);
5293 txd->flags |= HN_TXD_FLAG_ONLIST;
5295 SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link);
5297 buf_ring_enqueue(txr->hn_txdesc_br, txd);
5346 hn_txdesc_dmamap_destroy(struct hn_txdesc *txd)
5348 struct hn_tx_ring *txr = txd->txr;
5350 KASSERT(txd->m == NULL, ("still has mbuf installed"));
5351 KASSERT((txd->flags & HN_TXD_FLAG_DMAMAP) == 0, ("still dma mapped"));
5353 bus_dmamap_unload(txr->hn_tx_rndis_dtag, txd->rndis_pkt_dmap);
5354 bus_dmamem_free(txr->hn_tx_rndis_dtag, txd->rndis_pkt,
5355 txd->rndis_pkt_dmap);
5356 bus_dmamap_destroy(txr->hn_tx_data_dtag, txd->data_dmap);
5360 hn_txdesc_gc(struct hn_tx_ring *txr, struct hn_txdesc *txd)
5363 KASSERT(txd->refs == 0 || txd->refs == 1,
5364 ("invalid txd refs %d", txd->refs));
5366 /* Aggregated txds will be freed by their aggregating txd. */
5367 if (txd->refs > 0 && (txd->flags & HN_TXD_FLAG_ONAGG) == 0) {
5370 freed = hn_txdesc_put(txr, txd);
5386 * to the aggregating txd, two passes are used here:
5683 struct hn_txdesc *txd;
5719 txd = hn_txdesc_get(txr);
5720 if (txd == NULL) {
5727 error = hn_encap(ifp, txr, txd, &m_head);
5729 /* Both txd and m_head are freed */
5747 error = hn_txpkt(ifp, txr, txd);
5749 /* txd is freed, but m_head is not */
5863 struct hn_txdesc *txd;
5877 txd = hn_txdesc_get(txr);
5878 if (txd == NULL) {
5885 error = hn_encap(ifp, txr, txd, &m_head);
5887 /* Both txd and m_head are freed; discard */
5905 error = hn_txpkt(ifp, txr, txd);
5907 /* txd is freed, but m_head is not */