Lines Matching defs:nvchan

323 	struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
324 int node = cpu_to_node(nvchan->channel->target_cpu);
328 nvchan->mrc.slots = vzalloc_node(size, node);
329 if (!nvchan->mrc.slots)
330 nvchan->mrc.slots = vzalloc(size);
332 return nvchan->mrc.slots ? 0 : -ENOMEM;
438 struct netvsc_channel *nvchan = &net_device->chan_table[i];
440 nvchan->recv_buf = kzalloc(net_device->recv_section_size, GFP_KERNEL);
441 if (nvchan->recv_buf == NULL) {
1062 struct netvsc_channel * const nvchan =
1064 struct vmbus_channel *out_channel = nvchan->channel;
1119 atomic_inc_return(&nvchan->queue_sends);
1136 atomic_read(&nvchan->queue_sends) < 1 &&
1192 struct netvsc_channel *nvchan;
1204 nvchan = &net_device->chan_table[packet->q_idx];
1216 msdp = &nvchan->msd;
1309 struct netvsc_channel *nvchan)
1311 struct multi_recv_comp *mrc = &nvchan->mrc;
1326 ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
1366 struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx];
1367 struct multi_recv_comp *mrc = &nvchan->mrc;
1374 send_recv_completions(ndev, nvdev, nvchan);
1394 struct netvsc_channel *nvchan,
1398 struct vmbus_channel *channel = nvchan->channel;
1460 nvchan->rsc.cnt = 0;
1469 /* We're going to copy (sections of) the packet into nvchan->recv_buf;
1470 * make sure that nvchan->recv_buf is large enough to hold the packet.
1473 nvchan->rsc.cnt = 0;
1484 nvchan->rsc.is_last = (i == count - 1);
1490 nvchan, data, buflen);
1494 nvchan->rsc.cnt = 0;
1603 struct netvsc_channel *nvchan,
1609 struct vmbus_channel *channel = nvchan->channel;
1620 return netvsc_receive(ndev, net_device, nvchan, desc);
1648 struct netvsc_channel *nvchan
1650 struct netvsc_device *net_device = nvchan->net_device;
1651 struct vmbus_channel *channel = nvchan->channel;
1658 if (!nvchan->desc)
1659 nvchan->desc = hv_pkt_iter_first(channel);
1661 nvchan->xdp_flush = false;
1663 while (nvchan->desc && work_done < budget) {
1664 work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
1665 ndev, nvchan->desc, budget);
1666 nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
1669 if (nvchan->xdp_flush)
1673 ret = send_recv_completions(ndev, net_device, nvchan);
1698 struct netvsc_channel *nvchan = context;
1699 struct vmbus_channel *channel = nvchan->channel;
1705 if (napi_schedule_prep(&nvchan->napi)) {
1709 __napi_schedule_irqoff(&nvchan->napi);
1745 struct netvsc_channel *nvchan = &net_device->chan_table[i];
1747 nvchan->channel = device->channel;
1748 nvchan->net_device = net_device;
1749 u64_stats_init(&nvchan->tx_stats.syncp);
1750 u64_stats_init(&nvchan->rx_stats.syncp);
1752 ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i, 0);
1759 ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq,