Lines Matching defs:net_device

35 int netvsc_switch_datapath(struct net_device *ndev, bool vf)
130 struct netvsc_device *net_device;
132 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
133 if (!net_device)
136 init_waitqueue_head(&net_device->wait_drain);
137 net_device->destroy = false;
138 net_device->tx_disable = true;
140 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
141 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
143 init_completion(&net_device->channel_init_wait);
144 init_waitqueue_head(&net_device->subchan_open);
145 INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
147 return net_device;
179 struct netvsc_device *net_device,
180 struct net_device *ndev)
191 if (net_device->recv_section_cnt) {
193 revoke_packet = &net_device->revoke_packet;
224 net_device->recv_section_cnt = 0;
229 struct netvsc_device *net_device,
230 struct net_device *ndev)
241 if (net_device->send_section_cnt) {
243 revoke_packet = &net_device->revoke_packet;
275 net_device->send_section_cnt = 0;
280 struct netvsc_device *net_device,
281 struct net_device *ndev)
285 if (net_device->recv_buf_gpadl_handle.gpadl_handle) {
287 &net_device->recv_buf_gpadl_handle);
301 struct netvsc_device *net_device,
302 struct net_device *ndev)
306 if (net_device->send_buf_gpadl_handle.gpadl_handle) {
308 &net_device->send_buf_gpadl_handle);
321 int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
323 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
327 size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data);
336 struct netvsc_device *net_device,
340 struct net_device *ndev = hv_get_drvdata(device);
350 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
354 net_device->recv_buf = vzalloc(buf_size);
355 if (!net_device->recv_buf) {
363 net_device->recv_buf_size = buf_size;
370 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
372 &net_device->recv_buf_gpadl_handle);
380 init_packet = &net_device->channel_init_pkt;
384 gpadl_handle = net_device->recv_buf_gpadl_handle.gpadl_handle;
402 wait_for_completion(&net_device->channel_init_wait);
425 net_device->recv_section_size = resp->sections[0].sub_alloc_size;
426 net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
429 if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size *
430 (u64)net_device->recv_section_cnt > (u64)buf_size) {
432 net_device->recv_section_size);
438 struct netvsc_channel *nvchan = &net_device->chan_table[i];
440 nvchan->recv_buf = kzalloc(net_device->recv_section_size, GFP_KERNEL);
451 net_device->recv_completion_cnt = net_device->recv_section_cnt + 1;
452 ret = netvsc_alloc_recv_comp_ring(net_device, 0);
460 net_device->send_buf = vzalloc(buf_size);
461 if (!net_device->send_buf) {
467 net_device->send_buf_size = buf_size;
473 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
475 &net_device->send_buf_gpadl_handle);
483 init_packet = &net_device->channel_init_pkt;
487 net_device->send_buf_gpadl_handle.gpadl_handle;
504 wait_for_completion(&net_device->channel_init_wait);
518 net_device->send_section_size = init_packet->msg.
520 if (net_device->send_section_size < NETVSC_MTU_MIN) {
522 net_device->send_section_size);
528 net_device->send_section_cnt = buf_size / net_device->send_section_size;
531 net_device->send_section_size, net_device->send_section_cnt);
534 net_device->send_section_map = bitmap_zalloc(net_device->send_section_cnt,
536 if (!net_device->send_section_map) {
544 netvsc_revoke_recv_buf(device, net_device, ndev);
545 netvsc_revoke_send_buf(device, net_device, ndev);
546 netvsc_teardown_recv_gpadl(device, net_device, ndev);
547 netvsc_teardown_send_gpadl(device, net_device, ndev);
555 struct netvsc_device *net_device,
559 struct net_device *ndev = hv_get_drvdata(device);
578 wait_for_completion(&net_device->channel_init_wait);
617 struct netvsc_device *net_device,
620 struct net_device *ndev = hv_get_drvdata(device);
629 init_packet = &net_device->channel_init_pkt;
633 if (negotiate_nvsp_ver(device, net_device, init_packet,
635 net_device->nvsp_version = ver_list[i];
644 if (hv_is_isolation_supported() && net_device->nvsp_version < NVSP_PROTOCOL_VERSION_61) {
646 net_device->nvsp_version, NVSP_PROTOCOL_VERSION_61);
651 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
656 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
680 ret = netvsc_init_buf(device, net_device, device_info);
691 struct net_device *ndev = hv_get_drvdata(device);
693 struct netvsc_device *net_device
701 netvsc_revoke_recv_buf(device, net_device, ndev);
703 netvsc_teardown_recv_gpadl(device, net_device, ndev);
705 netvsc_revoke_send_buf(device, net_device, ndev);
707 netvsc_teardown_send_gpadl(device, net_device, ndev);
712 for (i = 0; i < net_device->num_chn; i++) {
716 napi_disable(&net_device->chan_table[i].napi);
718 netif_napi_del(&net_device->chan_table[i].napi);
722 * At this point, no one should be accessing net_device
735 netvsc_teardown_recv_gpadl(device, net_device, ndev);
736 netvsc_teardown_send_gpadl(device, net_device, ndev);
740 free_netvsc_device_rcu(net_device);
746 static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
749 sync_change_bit(index, net_device->send_section_map);
752 static void netvsc_send_tx_complete(struct net_device *ndev,
753 struct netvsc_device *net_device,
780 netvsc_free_send_slot(net_device, send_index);
783 tx_stats = &net_device->chan_table[q_idx].tx_stats;
795 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
797 if (unlikely(net_device->destroy)) {
799 wake_up(&net_device->wait_drain);
803 if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
812 static void netvsc_send_completion(struct net_device *ndev,
813 struct netvsc_device *net_device,
836 complete(&net_device->channel_init_wait);
907 netvsc_send_tx_complete(ndev, net_device, incoming_channel,
919 memcpy(&net_device->channel_init_pkt, nvsp_packet,
921 complete(&net_device->channel_init_wait);
924 static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
926 unsigned long *map_addr = net_device->send_section_map;
929 for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
937 static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
945 char *start = net_device->send_buf;
946 char *dest = start + (section_index * net_device->send_section_size)
955 remain = packet->total_data_buflen & (net_device->pkt_align - 1);
957 padding = net_device->pkt_align - remain;
1055 struct netvsc_device *net_device,
1063 &net_device->chan_table[packet->q_idx];
1065 struct net_device *ndev = hv_get_drvdata(device);
1137 !net_device->tx_disable) {
1180 int netvsc_send(struct net_device *ndev,
1188 struct netvsc_device *net_device
1201 if (unlikely(!net_device || net_device->destroy))
1204 nvchan = &net_device->chan_table[packet->q_idx];
1213 return netvsc_send_pkt(device, packet, net_device, pb, skb);
1220 try_batch = msd_len > 0 && msdp->count < net_device->max_pkt;
1221 if (try_batch && msd_len + pktlen + net_device->pkt_align <
1222 net_device->send_section_size) {
1226 net_device->send_section_size) {
1230 } else if (pktlen + net_device->pkt_align <
1231 net_device->send_section_size) {
1232 section_index = netvsc_get_next_send_section(net_device);
1249 netvsc_copy_to_send_buf(net_device,
1287 int m_ret = netvsc_send_pkt(device, msd_send, net_device,
1291 netvsc_free_send_slot(net_device,
1298 ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
1301 netvsc_free_send_slot(net_device, section_index);
1307 static int send_recv_completions(struct net_device *ndev,
1362 static void enq_receive_complete(struct net_device *ndev,
1392 static int netvsc_receive(struct net_device *ndev,
1393 struct netvsc_device *net_device,
1404 char *recv_buf = net_device->recv_buf;
1458 if (unlikely(offset > net_device->recv_buf_size ||
1459 buflen > net_device->recv_buf_size - offset)) {
1472 if (unlikely(buflen > net_device->recv_section_size)) {
1477 buflen, net_device->recv_section_size);
1489 ret = rndis_filter_receive(ndev, net_device,
1499 enq_receive_complete(ndev, net_device, q_idx,
1505 static void netvsc_send_table(struct net_device *ndev,
1551 static void netvsc_send_vf(struct net_device *ndev,
1575 static void netvsc_receive_inband(struct net_device *ndev,
1604 struct netvsc_device *net_device,
1605 struct net_device *ndev,
1616 netvsc_send_completion(ndev, net_device, channel, desc, budget);
1620 return netvsc_receive(ndev, net_device, nvchan, desc);
1623 netvsc_receive_inband(ndev, net_device, desc);
1650 struct netvsc_device *net_device = nvchan->net_device;
1653 struct net_device *ndev = hv_get_drvdata(device);
1664 work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
1673 ret = send_recv_completions(ndev, net_device, nvchan);
1721 struct netvsc_device *net_device;
1722 struct net_device *ndev = hv_get_drvdata(device);
1725 net_device = alloc_net_device();
1726 if (!net_device)
1745 struct netvsc_channel *nvchan = &net_device->chan_table[i];
1748 nvchan->net_device = net_device;
1769 netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll);
1779 netvsc_channel_cb, net_device->chan_table);
1789 napi_enable(&net_device->chan_table[0].napi);
1792 ret = netvsc_connect_vsp(device, net_device, device_info);
1802 rcu_assign_pointer(net_device_ctx->nvdev, net_device);
1804 return net_device;
1808 napi_disable(&net_device->chan_table[0].napi);
1814 netif_napi_del(&net_device->chan_table[0].napi);
1817 free_netvsc_device(&net_device->rcu);