Lines Matching refs:queue

42 static void xenvif_update_needed_slots(struct xenvif_queue *queue,
55 WRITE_ONCE(queue->rx_slots_needed, needed);
58 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
63 needed = READ_ONCE(queue->rx_slots_needed);
68 prod = queue->rx.sring->req_prod;
69 cons = queue->rx.req_cons;
74 queue->rx.sring->req_event = prod + 1;
80 } while (queue->rx.sring->req_prod != prod);
85 bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
90 spin_lock_irqsave(&queue->rx_queue.lock, flags);
92 if (queue->rx_queue_len >= queue->rx_queue_max) {
93 struct net_device *dev = queue->vif->dev;
95 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
98 if (skb_queue_empty(&queue->rx_queue))
99 xenvif_update_needed_slots(queue, skb);
101 __skb_queue_tail(&queue->rx_queue, skb);
103 queue->rx_queue_len += skb->len;
106 spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
111 static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
115 spin_lock_irq(&queue->rx_queue.lock);
117 skb = __skb_dequeue(&queue->rx_queue);
119 xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue));
121 queue->rx_queue_len -= skb->len;
122 if (queue->rx_queue_len < queue->rx_queue_max) {
125 txq = netdev_get_tx_queue(queue->vif->dev, queue->id);
130 spin_unlock_irq(&queue->rx_queue.lock);
135 static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
139 while ((skb = xenvif_rx_dequeue(queue)) != NULL)
143 static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
148 skb = skb_peek(&queue->rx_queue);
153 xenvif_rx_dequeue(queue);
155 queue->vif->dev->stats.rx_dropped++;
159 static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
164 gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num);
166 for (i = 0; i < queue->rx_copy.num; i++) {
169 op = &queue->rx_copy.op[i];
177 rsp = RING_GET_RESPONSE(&queue->rx,
178 queue->rx_copy.idx[i]);
183 queue->rx_copy.num = 0;
186 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify);
188 notify_remote_via_irq(queue->rx_irq);
190 __skb_queue_purge(queue->rx_copy.completed);
193 static void xenvif_rx_copy_add(struct xenvif_queue *queue,
201 if (queue->rx_copy.num == COPY_BATCH_SIZE)
202 xenvif_rx_copy_flush(queue);
204 op = &queue->rx_copy.op[queue->rx_copy.num];
222 op->dest.domid = queue->vif->domid;
226 queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons;
227 queue->rx_copy.num++;
252 static void xenvif_rx_next_skb(struct xenvif_queue *queue,
258 skb = xenvif_rx_dequeue(queue);
260 queue->stats.tx_bytes += skb->len;
261 queue->stats.tx_packets++;
272 if ((1 << gso_type) & queue->vif->gso_mask) {
287 if (queue->vif->xdp_headroom) {
293 extra->u.xdp.headroom = queue->vif->xdp_headroom;
328 static void xenvif_rx_complete(struct xenvif_queue *queue,
332 queue->rx.rsp_prod_pvt = queue->rx.req_cons;
334 __skb_queue_tail(queue->rx_copy.completed, pkt->skb);
355 static void xenvif_rx_next_chunk(struct xenvif_queue *queue,
393 static void xenvif_rx_data_slot(struct xenvif_queue *queue,
398 unsigned int offset = queue->vif->xdp_headroom;
405 xenvif_rx_next_chunk(queue, pkt, offset, &data, &len);
406 xenvif_rx_copy_add(queue, req, offset, data, len);
437 static void xenvif_rx_extra_slot(struct xenvif_queue *queue,
461 static void xenvif_rx_skb(struct xenvif_queue *queue)
465 xenvif_rx_next_skb(queue, &pkt);
467 queue->last_rx_time = jiffies;
473 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons);
474 rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons);
478 xenvif_rx_extra_slot(queue, &pkt, req, rsp);
480 xenvif_rx_data_slot(queue, &pkt, req, rsp);
482 queue->rx.req_cons++;
486 xenvif_rx_complete(queue, &pkt);
491 static void xenvif_rx_action(struct xenvif_queue *queue)
497 queue->rx_copy.completed = &completed_skbs;
499 while (xenvif_rx_ring_slots_available(queue) &&
500 !skb_queue_empty(&queue->rx_queue) &&
502 xenvif_rx_skb(queue);
507 xenvif_rx_copy_flush(queue);
510 static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue)
514 prod = queue->rx.sring->req_prod;
515 cons = queue->rx.req_cons;
520 static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue)
522 unsigned int needed = READ_ONCE(queue->rx_slots_needed);
524 return !queue->stalled &&
525 xenvif_rx_queue_slots(queue) < needed &&
527 queue->last_rx_time + queue->vif->stall_timeout);
530 static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
532 unsigned int needed = READ_ONCE(queue->rx_slots_needed);
534 return queue->stalled && xenvif_rx_queue_slots(queue) >= needed;
537 bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
539 return xenvif_rx_ring_slots_available(queue) ||
540 (queue->vif->stall_timeout &&
541 (xenvif_rx_queue_stalled(queue) ||
542 xenvif_rx_queue_ready(queue))) ||
544 queue->vif->disabled;
547 static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
552 skb = skb_peek(&queue->rx_queue);
563 * queue (and not just the head at the beginning). In particular, if
564 * the queue is initially empty an infinite timeout is used and this
570 static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
574 if (xenvif_have_rx_work(queue, true))
580 prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
581 if (xenvif_have_rx_work(queue, true))
584 &queue->eoi_pending) &
586 xen_irq_lateeoi(queue->rx_irq, 0);
588 ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
592 finish_wait(&queue->wq, &wait);
595 static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
597 struct xenvif *vif = queue->vif;
599 queue->stalled = true;
601 /* At least one queue has stalled? Disable the carrier. */
610 static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
612 struct xenvif *vif = queue->vif;
614 queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
615 queue->stalled = false;
628 struct xenvif_queue *queue = data;
629 struct xenvif *vif = queue->vif;
632 xenvif_queue_carrier_on(queue);
635 xenvif_wait_for_rx_work(queue);
645 * associated with queue 0.
647 if (unlikely(vif->disabled && queue->id == 0)) {
652 if (!skb_queue_empty(&queue->rx_queue))
653 xenvif_rx_action(queue);
660 if (xenvif_rx_queue_stalled(queue))
661 xenvif_queue_carrier_off(queue);
662 else if (xenvif_rx_queue_ready(queue))
663 xenvif_queue_carrier_on(queue);
671 xenvif_rx_queue_drop_expired(queue);
677 xenvif_rx_queue_purge(queue);