Lines Matching defs:queue

44 /* Number of bytes allowed on the internal guest Rx queue. */
52 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
56 atomic_inc(&queue->inflight_packets);
59 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
61 atomic_dec(&queue->inflight_packets);
67 wake_up(&queue->dealloc_wq);
77 static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue)
81 rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
83 napi_schedule(&queue->napi);
89 struct xenvif_queue *queue = dev_id;
92 old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending);
95 if (!xenvif_handle_tx_interrupt(queue)) {
96 atomic_andnot(NETBK_TX_EOI, &queue->eoi_pending);
105 struct xenvif_queue *queue =
113 if (unlikely(queue->vif->disabled)) {
118 work_done = xenvif_tx_action(queue, budget);
122 /* If the queue is rate-limited, it shall be
125 if (likely(!queue->rate_limited))
126 xenvif_napi_schedule_or_enable_events(queue);
132 static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue)
136 rc = xenvif_have_rx_work(queue, false);
138 xenvif_kick_thread(queue);
144 struct xenvif_queue *queue = dev_id;
147 old = atomic_fetch_or(NETBK_RX_EOI, &queue->eoi_pending);
150 if (!xenvif_handle_rx_interrupt(queue)) {
151 atomic_andnot(NETBK_RX_EOI, &queue->eoi_pending);
160 struct xenvif_queue *queue = dev_id;
164 old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending);
167 has_tx = xenvif_handle_tx_interrupt(queue);
168 has_rx = xenvif_handle_rx_interrupt(queue);
171 atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending);
208 struct xenvif_queue *queue = NULL;
223 /* Obtain the queue to be used to transmit this packet */
226 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n",
230 queue = &vif->queues[index];
232 /* Drop the packet if queue is not ready */
233 if (queue->task == NULL ||
234 queue->dealloc_task == NULL ||
258 if (!xenvif_rx_queue_tail(queue, skb))
261 xenvif_kick_thread(queue);
274 struct xenvif_queue *queue = NULL;
285 /* Aggregate tx and rx stats from each queue */
287 queue = &vif->queues[index];
288 rx_bytes += queue->stats.rx_bytes;
289 rx_packets += queue->stats.rx_packets;
290 tx_bytes += queue->stats.tx_bytes;
291 tx_packets += queue->stats.tx_packets;
306 struct xenvif_queue *queue = NULL;
311 queue = &vif->queues[queue_index];
312 napi_enable(&queue->napi);
313 enable_irq(queue->tx_irq);
314 if (queue->tx_irq != queue->rx_irq)
315 enable_irq(queue->rx_irq);
316 xenvif_napi_schedule_or_enable_events(queue);
322 struct xenvif_queue *queue = NULL;
327 queue = &vif->queues[queue_index];
328 disable_irq(queue->tx_irq);
329 if (queue->tx_irq != queue->rx_irq)
330 disable_irq(queue->rx_irq);
331 napi_disable(&queue->napi);
332 del_timer_sync(&queue->credit_timeout);
561 int xenvif_init_queue(struct xenvif_queue *queue)
565 queue->credit_bytes = queue->remaining_credit = ~0UL;
566 queue->credit_usec = 0UL;
567 timer_setup(&queue->credit_timeout, xenvif_tx_credit_callback, 0);
568 queue->credit_window_start = get_jiffies_64();
570 queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
572 skb_queue_head_init(&queue->rx_queue);
573 skb_queue_head_init(&queue->tx_queue);
575 queue->pending_cons = 0;
576 queue->pending_prod = MAX_PENDING_REQS;
578 queue->pending_ring[i] = i;
580 spin_lock_init(&queue->callback_lock);
581 spin_lock_init(&queue->response_lock);
588 queue->mmap_pages);
590 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
595 queue->pending_tx_info[i].callback_struct = (struct ubuf_info_msgzc)
599 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
671 static void xenvif_disconnect_queue(struct xenvif_queue *queue)
673 if (queue->task) {
674 kthread_stop_put(queue->task);
675 queue->task = NULL;
678 if (queue->dealloc_task) {
679 kthread_stop(queue->dealloc_task);
680 queue->dealloc_task = NULL;
683 if (queue->napi.poll) {
684 netif_napi_del(&queue->napi);
685 queue->napi.poll = NULL;
688 if (queue->tx_irq) {
689 unbind_from_irqhandler(queue->tx_irq, queue);
690 if (queue->tx_irq == queue->rx_irq)
691 queue->rx_irq = 0;
692 queue->tx_irq = 0;
695 if (queue->rx_irq) {
696 unbind_from_irqhandler(queue->rx_irq, queue);
697 queue->rx_irq = 0;
700 xenvif_unmap_frontend_data_rings(queue);
703 int xenvif_connect_data(struct xenvif_queue *queue,
709 struct xenbus_device *dev = xenvif_to_xenbus_device(queue->vif);
713 BUG_ON(queue->tx_irq);
714 BUG_ON(queue->task);
715 BUG_ON(queue->dealloc_task);
717 err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
722 init_waitqueue_head(&queue->wq);
723 init_waitqueue_head(&queue->dealloc_wq);
724 atomic_set(&queue->inflight_packets, 0);
726 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll);
728 queue->stalled = true;
730 task = kthread_run(xenvif_kthread_guest_rx, queue,
731 "%s-guest-rx", queue->name);
734 queue->task = task;
741 task = kthread_run(xenvif_dealloc_kthread, queue,
742 "%s-dealloc", queue->name);
745 queue->dealloc_task = task;
751 queue->name, queue);
754 queue->tx_irq = queue->rx_irq = err;
755 disable_irq(queue->tx_irq);
758 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
759 "%s-tx", queue->name);
762 queue->tx_irq_name, queue);
765 queue->tx_irq = err;
766 disable_irq(queue->tx_irq);
768 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
769 "%s-rx", queue->name);
772 queue->rx_irq_name, queue);
775 queue->rx_irq = err;
776 disable_irq(queue->rx_irq);
782 pr_warn("Could not allocate kthread for %s\n", queue->name);
785 xenvif_disconnect_queue(queue);
804 struct xenvif_queue *queue = NULL;
811 queue = &vif->queues[queue_index];
813 xenvif_disconnect_queue(queue);
835 * Used for queue teardown from xenvif_free(), and on the
838 void xenvif_deinit_queue(struct xenvif_queue *queue)
840 gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);