Lines Matching defs:rxq

209 	struct netfront_rxq 	*rxq;
280 xn_get_rx_mbuf(struct netfront_rxq *rxq, RING_IDX ri)
286 m = rxq->mbufs[i];
287 rxq->mbufs[i] = NULL;
292 xn_get_rx_ref(struct netfront_rxq *rxq, RING_IDX ri)
295 grant_ref_t ref = rxq->grant_ref[i];
298 rxq->grant_ref[i] = GRANT_REF_INVALID;
421 XN_RX_LOCK(&np->rxq[i]);
426 XN_RX_UNLOCK(&np->rxq[i]);
446 XN_RX_LOCK(&info->rxq[i]);
451 XN_RX_UNLOCK(&info->rxq[i]);
463 struct netfront_rxq *rxq,
473 KASSERT(rxq->id == txq->id, ("Mismatch between RX and TX queue ids"));
475 KASSERT(rxq->xen_intr_handle == txq->xen_intr_handle,
481 snprintf(path, path_size, "%s/queue-%u", node, rxq->id);
493 err = xs_printf(*xst, path, "rx-ring-ref","%u", rxq->ring_ref);
499 xen_intr_port(rxq->xen_intr_handle));
553 err = write_queue_xenstore_keys(dev, &info->rxq[0],
566 err = write_queue_xenstore_keys(dev, &info->rxq[i],
624 xn_rxq_intr(struct netfront_rxq *rxq)
627 XN_RX_LOCK(rxq);
628 xn_rxeof(rxq);
629 XN_RX_UNLOCK(rxq);
665 disconnect_rxq(struct netfront_rxq *rxq)
668 xn_release_rx_bufs(rxq);
669 gnttab_free_grant_references(rxq->gref_head);
670 gnttab_end_foreign_access(rxq->ring_ref, NULL);
676 rxq->xen_intr_handle = 0;
680 destroy_rxq(struct netfront_rxq *rxq)
683 callout_drain(&rxq->rx_refill);
684 free(rxq->ring.sring, M_DEVBUF);
693 destroy_rxq(&np->rxq[i]);
695 free(np->rxq, M_DEVBUF);
696 np->rxq = NULL;
706 struct netfront_rxq *rxq;
708 info->rxq = malloc(sizeof(struct netfront_rxq) * num_queues,
712 rxq = &info->rxq[q];
714 rxq->id = q;
715 rxq->info = info;
716 rxq->ring_ref = GRANT_REF_INVALID;
717 rxq->ring.sring = NULL;
718 snprintf(rxq->name, XN_QUEUE_NAME_LEN, "xnrx_%u", q);
719 mtx_init(&rxq->lock, rxq->name, "netfront receive lock",
723 rxq->mbufs[i] = NULL;
724 rxq->grant_ref[i] = GRANT_REF_INVALID;
730 &rxq->gref_head) != 0) {
739 FRONT_RING_INIT(&rxq->ring, rxs, PAGE_SIZE);
742 &rxq->ring_ref);
748 callout_init(&rxq->rx_refill, 1);
754 gnttab_free_grant_references(rxq->gref_head);
755 free(rxq->ring.sring, M_DEVBUF);
758 disconnect_rxq(&info->rxq[q]);
759 destroy_rxq(&info->rxq[q]);
762 free(info->rxq, M_DEVBUF);
908 if (info->rxq)
924 info->rxq[q].xen_intr_handle = info->txq[q].xen_intr_handle;
1043 xn_alloc_one_rx_buffer(struct netfront_rxq *rxq)
1056 xn_alloc_rx_buffers(struct netfront_rxq *rxq)
1061 XN_RX_LOCK_ASSERT(rxq);
1063 if (__predict_false(rxq->info->carrier == 0))
1066 for (req_prod = rxq->ring.req_prod_pvt;
1067 req_prod - rxq->ring.rsp_cons < NET_RX_RING_SIZE;
1075 m = xn_alloc_one_rx_buffer(rxq);
1081 KASSERT(rxq->mbufs[id] == NULL, ("non-NULL xn_rx_chain"));
1082 rxq->mbufs[id] = m;
1084 ref = gnttab_claim_grant_reference(&rxq->gref_head);
1087 rxq->grant_ref[id] = ref;
1090 req = RING_GET_REQUEST(&rxq->ring, req_prod);
1093 xenbus_get_otherend_id(rxq->info->xbdev), pfn, 0);
1098 rxq->ring.req_prod_pvt = req_prod;
1101 if (req_prod - rxq->ring.rsp_cons < NET_RX_SLOTS_MIN) {
1102 callout_reset_curcpu(&rxq->rx_refill, hz/10,
1103 xn_alloc_rx_buffers_callout, rxq);
1109 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rxq->ring, notify);
1111 xen_intr_signal(rxq->xen_intr_handle);
1116 struct netfront_rxq *rxq;
1118 rxq = (struct netfront_rxq *)arg;
1119 XN_RX_LOCK(rxq);
1120 xn_alloc_rx_buffers(rxq);
1121 XN_RX_UNLOCK(rxq);
1125 xn_release_rx_bufs(struct netfront_rxq *rxq)
1131 m = rxq->mbufs[i];
1136 ref = rxq->grant_ref[i];
1141 gnttab_release_grant_reference(&rxq->gref_head, ref);
1142 rxq->mbufs[i] = NULL;
1143 rxq->grant_ref[i] = GRANT_REF_INVALID;
1149 xn_rxeof(struct netfront_rxq *rxq)
1152 struct netfront_info *np = rxq->info;
1154 struct lro_ctrl *lro = &rxq->lro;
1164 XN_RX_LOCK_ASSERT(rxq);
1176 rp = rxq->ring.sring->rsp_prod;
1179 i = rxq->ring.rsp_cons;
1181 memcpy(rx, RING_GET_RESPONSE(&rxq->ring, i), sizeof(*rx));
1185 err = xn_get_responses(rxq, &rinfo, rp, &i, &m);
1220 rxq->ring.rsp_cons = i;
1222 xn_alloc_rx_buffers(rxq);
1224 RING_FINAL_CHECK_FOR_RESPONSES(&rxq->ring, work_to_do);
1341 struct netfront_rxq *rxq = &np->rxq[txq->id];
1344 xn_rxq_intr(rxq);
1349 xn_move_rx_slot(struct netfront_rxq *rxq, struct mbuf *m,
1352 int new = xn_rxidx(rxq->ring.req_prod_pvt);
1354 KASSERT(rxq->mbufs[new] == NULL, ("mbufs != NULL"));
1355 rxq->mbufs[new] = m;
1356 rxq->grant_ref[new] = ref;
1357 RING_GET_REQUEST(&rxq->ring, rxq->ring.req_prod_pvt)->id = new;
1358 RING_GET_REQUEST(&rxq->ring, rxq->ring.req_prod_pvt)->gref = ref;
1359 rxq->ring.req_prod_pvt++;
1363 xn_get_extras(struct netfront_rxq *rxq,
1380 RING_GET_RESPONSE(&rxq->ring, ++(*cons));
1389 m = xn_get_rx_mbuf(rxq, *cons);
1390 ref = xn_get_rx_ref(rxq, *cons);
1391 xn_move_rx_slot(rxq, m, ref);
1398 xn_get_responses(struct netfront_rxq *rxq,
1405 grant_ref_t ref = xn_get_rx_ref(rxq, *cons);
1411 m0 = m = m_prev = xn_get_rx_mbuf(rxq, *cons);
1414 err = xn_get_extras(rxq, extras, rp, cons);
1430 xn_move_rx_slot(rxq, m, ref);
1452 gnttab_release_grant_reference(&rxq->gref_head, ref);
1480 rx = RING_GET_RESPONSE(&rxq->ring, *cons + frags);
1481 m = xn_get_rx_mbuf(rxq, *cons + frags);
1497 ref = xn_get_rx_ref(rxq, *cons + frags);
1702 struct netfront_rxq *rxq;
1714 rxq = &np->rxq[i];
1715 XN_RX_LOCK(rxq);
1716 xn_alloc_rx_buffers(rxq);
1717 rxq->ring.sring->rsp_event = rxq->ring.rsp_cons + 1;
1718 if (RING_HAS_UNCONSUMED_RESPONSES(&rxq->ring))
1719 xn_rxeof(rxq);
1720 XN_RX_UNLOCK(rxq);
1886 xn_rebuild_rx_bufs(struct netfront_rxq *rxq)
1896 if (rxq->mbufs[i] == NULL)
1899 m = rxq->mbufs[requeue_idx] = xn_get_rx_mbuf(rxq, i);
1900 ref = rxq->grant_ref[requeue_idx] = xn_get_rx_ref(rxq, i);
1902 req = RING_GET_REQUEST(&rxq->ring, requeue_idx);
1906 xenbus_get_otherend_id(rxq->info->xbdev),
1915 rxq->ring.req_prod_pvt = requeue_idx;
1924 struct netfront_rxq *rxq;
1953 rxq = &np->rxq[i];
1954 xn_rebuild_rx_bufs(rxq);
1971 struct netfront_rxq *rxq;
1977 rxq = &np->rxq[i];
1982 XN_RX_LOCK(rxq);
1983 xn_alloc_rx_buffers(rxq);
1984 XN_RX_UNLOCK(rxq);
2056 tcp_lro_free(&np->rxq[i].lro);
2061 err = tcp_lro_init(&np->rxq[i].lro);
2068 np->rxq[i].lro.ifp = ifp;
2272 free(np->rxq, M_DEVBUF);
2285 XN_RX_LOCK(&np->rxq[i]);
2290 XN_RX_UNLOCK(&np->rxq[i]);
2295 disconnect_rxq(&np->rxq[i]);