Lines Matching refs:sq

298 	/* Record whether sq is in reset state. */
381 struct send_queue *sq;
509 static void __free_old_xmit(struct send_queue *sq, bool in_napi,
515 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
670 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
958 static void free_old_xmit(struct send_queue *sq, bool in_napi)
962 __free_old_xmit(sq, in_napi, &stats);
970 u64_stats_update_begin(&sq->stats.syncp);
971 u64_stats_add(&sq->stats.bytes, stats.bytes);
972 u64_stats_add(&sq->stats.packets, stats.packets);
973 u64_stats_update_end(&sq->stats.syncp);
988 struct send_queue *sq)
990 bool use_napi = sq->napi.weight;
993 qnum = sq - vi->sq;
1005 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
1007 u64_stats_update_begin(&sq->stats.syncp);
1008 u64_stats_inc(&sq->stats.stop);
1009 u64_stats_update_end(&sq->stats.syncp);
1011 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
1012 virtqueue_napi_schedule(&sq->napi, sq->vq);
1013 } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
1015 free_old_xmit(sq, false);
1016 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
1018 u64_stats_update_begin(&sq->stats.syncp);
1019 u64_stats_inc(&sq->stats.wake);
1020 u64_stats_update_end(&sq->stats.syncp);
1021 virtqueue_disable_cb(sq->vq);
1028 struct send_queue *sq,
1058 sg_init_table(sq->sg, nr_frags + 1);
1059 sg_set_buf(sq->sg, xdpf->data, xdpf->len);
1063 sg_set_page(&sq->sg[i + 1], skb_frag_page(frag),
1067 err = virtqueue_add_outbuf(sq->vq, sq->sg, nr_frags + 1,
1075 /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
1079 * three issues at the same time: 1. the choice of sq. 2. judge and execute the
1099 v->sq + qp; \
1106 txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
1120 struct send_queue *sq;
1133 sq = virtnet_xdp_get_sq(vi);
1141 __free_old_xmit(sq, false, &stats);
1146 if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
1152 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
1153 check_sq_full_and_disable(vi, dev, sq);
1156 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
1160 u64_stats_update_begin(&sq->stats.syncp);
1161 u64_stats_add(&sq->stats.bytes, stats.bytes);
1162 u64_stats_add(&sq->stats.packets, stats.packets);
1163 u64_stats_add(&sq->stats.xdp_tx, n);
1164 u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit);
1165 u64_stats_add(&sq->stats.kicks, kicks);
1166 u64_stats_update_end(&sq->stats.syncp);
1168 virtnet_xdp_put_sq(vi, sq);
2302 struct send_queue *sq = &vi->sq[index];
2305 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
2309 if (sq->reset) {
2315 virtqueue_disable_cb(sq->vq);
2316 free_old_xmit(sq, true);
2317 } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
2319 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
2321 u64_stats_update_begin(&sq->stats.syncp);
2322 u64_stats_inc(&sq->stats.wake);
2323 u64_stats_update_end(&sq->stats.syncp);
2355 struct send_queue *sq;
2380 sq = virtnet_xdp_get_sq(vi);
2381 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
2382 u64_stats_update_begin(&sq->stats.syncp);
2383 u64_stats_inc(&sq->stats.kicks);
2384 u64_stats_update_end(&sq->stats.syncp);
2386 virtnet_xdp_put_sq(vi, sq);
2394 virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
2415 virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
2458 struct send_queue *sq = container_of(napi, struct send_queue, napi);
2459 struct virtnet_info *vi = sq->vq->vdev->priv;
2460 unsigned int index = vq2txq(sq->vq);
2473 virtqueue_disable_cb(sq->vq);
2474 free_old_xmit(sq, true);
2476 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
2478 u64_stats_update_begin(&sq->stats.syncp);
2479 u64_stats_inc(&sq->stats.wake);
2480 u64_stats_update_end(&sq->stats.syncp);
2485 opaque = virtqueue_enable_cb_prepare(sq->vq);
2490 virtqueue_disable_cb(sq->vq);
2495 if (unlikely(virtqueue_poll(sq->vq, opaque))) {
2498 virtqueue_disable_cb(sq->vq);
2508 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
2512 struct virtnet_info *vi = sq->vq->vdev->priv;
2537 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
2540 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
2546 sg_set_buf(sq->sg, hdr, hdr_len);
2547 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
2552 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
2559 struct send_queue *sq = &vi->sq[qnum];
2563 bool use_napi = sq->napi.weight;
2568 virtqueue_disable_cb(sq->vq);
2570 free_old_xmit(sq, false);
2573 unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
2579 err = xmit_skb(sq, skb);
2599 check_sq_full_and_disable(vi, dev, sq);
2602 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
2603 u64_stats_update_begin(&sq->stats.syncp);
2604 u64_stats_inc(&sq->stats.kicks);
2605 u64_stats_update_end(&sq->stats.syncp);
2638 struct send_queue *sq, u32 ring_num)
2644 qindex = sq - vi->sq;
2647 virtnet_napi_tx_disable(&sq->napi);
2656 /* Prevent rx poll from accessing sq. */
2657 sq->reset = true;
2664 err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
2669 sq->reset = false;
2674 virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
2801 struct send_queue *sq = &vi->sq[i];
2804 start = u64_stats_fetch_begin(&sq->stats.syncp);
2805 tpackets = u64_stats_read(&sq->stats.packets);
2806 tbytes = u64_stats_read(&sq->stats.bytes);
2807 terrors = u64_stats_read(&sq->stats.tx_timeouts);
2808 } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
3031 virtqueue_set_affinity(vi->sq[i].vq, NULL);
3068 virtqueue_set_affinity(vi->sq[i].vq, mask);
3177 vi->sq[queue].intr_coal.max_usecs = max_usecs;
3178 vi->sq[queue].intr_coal.max_packets = max_packets;
3191 ring->tx_max_pending = vi->sq[0].vq->num_max;
3193 ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
3204 struct send_queue *sq;
3211 tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
3220 if (ring->tx_pending > vi->sq[0].vq->num_max)
3225 sq = vi->sq + i;
3228 err = virtnet_tx_resize(vi, sq, ring->tx_pending);
3728 /* stats_sum_queue - Calculate the sum of the same fields in sq or rq.
4172 struct send_queue *sq = &vi->sq[i];
4180 stats_base = (const u8 *)&sq->stats;
4182 start = u64_stats_fetch_begin(&sq->stats.syncp);
4184 } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
4247 vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs;
4248 vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames;
4466 vi->sq[queue_number].napi.weight,
4489 vi->sq[queue_number].napi.weight = napi_weight;
4511 if (vi->sq[0].napi.weight)
4532 vi->sq[queue].napi.weight,
4546 vi->sq[queue].napi.weight = napi_weight;
4563 ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
4564 ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
4571 if (vi->sq[queue].napi.weight)
4754 struct send_queue *sq = &vi->sq[i];
4760 virtnet_fill_stats(vi, i * 2 + 1, &ctx, (void *)&sq->stats, true, 0);
4970 virtnet_napi_tx_disable(&vi->sq[i].napi);
5008 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
5009 &vi->sq[i].napi);
5025 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
5026 &vi->sq[i].napi);
5099 struct send_queue *sq = &priv->sq[txqueue];
5102 u64_stats_update_begin(&sq->stats.syncp);
5103 u64_stats_inc(&sq->stats.tx_timeouts);
5104 u64_stats_update_end(&sq->stats.syncp);
5106 netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
5107 txqueue, sq->name, sq->vq->index, sq->vq->name,
5175 __netif_napi_del(&vi->sq[i].napi);
5184 kfree(vi->sq);
5236 struct virtqueue *vq = vi->sq[i].vq;
5324 sprintf(vi->sq[i].name, "output.%u", i);
5326 names[txq2vq(i)] = vi->sq[i].name;
5345 vi->sq[i].vq = vqs[txq2vq(i)];
5374 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
5375 if (!vi->sq)
5386 netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi,
5395 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
5398 u64_stats_init(&vi->sq[i].stats.syncp);
5405 kfree(vi->sq);
5802 if (vi->sq[0].napi.weight)
5811 if (vi->sq[i].napi.weight)
5812 vi->sq[i].intr_coal.max_packets = 1;