Lines Matching refs:amt

21 #include <net/amt.h>
22 #include <uapi/linux/amt.h>
123 u32 hash = jhash(src, sizeof(*src), tunnel->amt->hash_seed);
125 return reciprocal_scale(hash, tunnel->amt->hash_buckets);
200 u32 hash = jhash(group, sizeof(*group), tunnel->amt->hash_seed);
202 return reciprocal_scale(hash, tunnel->amt->hash_buckets);
231 netdev_dbg(snode->gnode->amt->dev,
237 netdev_dbg(snode->gnode->amt->dev,
253 static void amt_del_group(struct amt_dev *amt, struct amt_group_node *gnode)
260 dev_put(amt->dev);
265 netdev_dbg(amt->dev, "Leave group %pI4\n",
269 netdev_dbg(amt->dev, "Leave group %pI6\n",
272 for (i = 0; i < amt->hash_buckets; i++)
293 struct amt_dev *amt = gnode->amt;
302 amt_del_group(amt, gnode);
318 struct amt_dev *amt = tunnel->amt;
323 msecs_to_jiffies(amt_gmi(amt)));
351 netdev_dbg(amt->dev, "Source %pI4 from %pI4 Acted %s\n",
357 netdev_dbg(amt->dev, "Source %pI6 from %pI6 Acted %s\n",
414 struct amt_dev *amt = gnode->amt;
420 buckets = amt->hash_buckets;
443 amt_del_group(amt, gnode);
449 dev_put(amt->dev);
463 static struct amt_group_node *amt_add_group(struct amt_dev *amt,
473 if (tunnel->nr_groups >= amt->max_groups)
477 (sizeof(struct hlist_head) * amt->hash_buckets),
482 gnode->amt = amt;
490 for (i = 0; i < amt->hash_buckets; i++)
498 netdev_dbg(amt->dev, "Join group %pI4\n",
502 netdev_dbg(amt->dev, "Join group %pI6\n",
509 static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt)
512 int hlen = LL_RESERVED_SPACE(amt->dev);
513 int tlen = amt->dev->needed_tailroom;
524 skb = netdev_alloc_skb_ip_align(amt->dev, len);
553 ether_addr_copy(eth->h_source, amt->dev->dev_addr);
562 ihv3->qqic = amt->qi;
566 ihv3->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv);
580 static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
583 if (validate && amt->status >= status)
585 netdev_dbg(amt->dev, "Update GW status %s -> %s",
586 status_str[amt->status], status_str[status]);
587 WRITE_ONCE(amt->status, status);
596 netdev_dbg(tunnel->amt->dev,
611 static void amt_send_discovery(struct amt_dev *amt)
625 sock = rcu_dereference(amt->sock);
629 if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
632 rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
633 amt->discovery_ip, amt->local_ip,
634 amt->gw_port, amt->relay_port,
636 amt->stream_dev->ifindex);
638 amt->dev->stats.tx_errors++;
642 hlen = LL_RESERVED_SPACE(amt->dev);
643 tlen = amt->dev->needed_tailroom;
645 skb = netdev_alloc_skb_ip_align(amt->dev, len);
648 amt->dev->stats.tx_errors++;
662 amtd->nonce = amt->nonce;
666 udph->source = amt->gw_port;
667 udph->dest = amt->relay_port;
672 udph->check = csum_tcpudp_magic(amt->local_ip, amt->discovery_ip,
683 iph->daddr = amt->discovery_ip;
684 iph->saddr = amt->local_ip;
689 ip_select_ident(amt->net, skb, NULL);
691 err = ip_local_out(amt->net, sock->sk, skb);
693 amt->dev->stats.tx_errors++;
695 amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true);
700 static void amt_send_request(struct amt_dev *amt, bool v6)
714 sock = rcu_dereference(amt->sock);
718 if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
721 rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
722 amt->remote_ip, amt->local_ip,
723 amt->gw_port, amt->relay_port,
725 amt->stream_dev->ifindex);
727 amt->dev->stats.tx_errors++;
731 hlen = LL_RESERVED_SPACE(amt->dev);
732 tlen = amt->dev->needed_tailroom;
734 skb = netdev_alloc_skb_ip_align(amt->dev, len);
737 amt->dev->stats.tx_errors++;
753 amtrh->nonce = amt->nonce;
757 udph->source = amt->gw_port;
758 udph->dest = amt->relay_port;
763 udph->check = csum_tcpudp_magic(amt->local_ip, amt->remote_ip,
774 iph->daddr = amt->remote_ip;
775 iph->saddr = amt->local_ip;
780 ip_select_ident(amt->net, skb, NULL);
782 err = ip_local_out(amt->net, sock->sk, skb);
784 amt->dev->stats.tx_errors++;
790 static void amt_send_igmp_gq(struct amt_dev *amt,
795 skb = amt_build_igmp_gq(amt);
804 static struct sk_buff *amt_build_mld_gq(struct amt_dev *amt)
808 int hlen = LL_RESERVED_SPACE(amt->dev);
809 int tlen = amt->dev->needed_tailroom;
818 skb = netdev_alloc_skb_ip_align(amt->dev, len);
840 if (ipv6_dev_get_saddr(amt->net, amt->dev, &ip6h->daddr, 0,
842 amt->dev->stats.tx_errors++;
848 ether_addr_copy(eth->h_source, amt->dev->dev_addr);
861 mld2q->mld2q_qrv = amt->qrv;
863 mld2q->mld2q_qqic = amt->qi;
876 static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel)
880 skb = amt_build_mld_gq(amt);
888 static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel)
893 static bool amt_queue_event(struct amt_dev *amt, enum amt_event event,
898 spin_lock_bh(&amt->lock);
899 if (amt->nr_events >= AMT_MAX_EVENTS) {
900 spin_unlock_bh(&amt->lock);
904 index = (amt->event_idx + amt->nr_events) % AMT_MAX_EVENTS;
905 amt->events[index].event = event;
906 amt->events[index].skb = skb;
907 amt->nr_events++;
908 amt->event_idx %= AMT_MAX_EVENTS;
909 queue_work(amt_wq, &amt->event_wq);
910 spin_unlock_bh(&amt->lock);
917 struct amt_dev *amt = container_of(to_delayed_work(work),
921 spin_lock_bh(&amt->lock);
922 get_random_bytes(&amt->key, sizeof(siphash_key_t));
923 spin_unlock_bh(&amt->lock);
924 mod_delayed_work(amt_wq, &amt->secret_wq,
928 static void amt_event_send_discovery(struct amt_dev *amt)
930 if (amt->status > AMT_STATUS_SENT_DISCOVERY)
932 get_random_bytes(&amt->nonce, sizeof(__be32));
934 amt_send_discovery(amt);
936 mod_delayed_work(amt_wq, &amt->discovery_wq,
942 struct amt_dev *amt = container_of(to_delayed_work(work),
946 if (amt_queue_event(amt, AMT_EVENT_SEND_DISCOVERY, NULL))
947 mod_delayed_work(amt_wq, &amt->discovery_wq,
951 static void amt_event_send_request(struct amt_dev *amt)
955 if (amt->status < AMT_STATUS_RECEIVED_ADVERTISEMENT)
958 if (amt->req_cnt > AMT_MAX_REQ_COUNT) {
959 netdev_dbg(amt->dev, "Gateway is not ready");
960 amt->qi = AMT_INIT_REQ_TIMEOUT;
961 WRITE_ONCE(amt->ready4, false);
962 WRITE_ONCE(amt->ready6, false);
963 amt->remote_ip = 0;
964 amt_update_gw_status(amt, AMT_STATUS_INIT, false);
965 amt->req_cnt = 0;
966 amt->nonce = 0;
970 if (!amt->req_cnt) {
971 WRITE_ONCE(amt->ready4, false);
972 WRITE_ONCE(amt->ready6, false);
973 get_random_bytes(&amt->nonce, sizeof(__be32));
976 amt_send_request(amt, false);
977 amt_send_request(amt, true);
978 amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true);
979 amt->req_cnt++;
981 exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT);
982 mod_delayed_work(amt_wq, &amt->req_wq, msecs_to_jiffies(exp * 1000));
987 struct amt_dev *amt = container_of(to_delayed_work(work),
991 if (amt_queue_event(amt, AMT_EVENT_SEND_REQUEST, NULL))
992 mod_delayed_work(amt_wq, &amt->req_wq,
996 static bool amt_send_membership_update(struct amt_dev *amt,
1007 sock = rcu_dereference_bh(amt->sock);
1011 err = skb_cow_head(skb, LL_RESERVED_SPACE(amt->dev) + sizeof(*amtmu) +
1018 fl4.flowi4_oif = amt->stream_dev->ifindex;
1019 fl4.daddr = amt->remote_ip;
1020 fl4.saddr = amt->local_ip;
1023 rt = ip_route_output_key(amt->net, &fl4);
1025 netdev_dbg(amt->dev, "no route to %pI4\n", &amt->remote_ip);
1033 amtmu->nonce = amt->nonce;
1034 amtmu->response_mac = amt->mac;
1046 amt->gw_port,
1047 amt->relay_port,
1050 amt_update_gw_status(amt, AMT_STATUS_SENT_UPDATE, true);
1054 static void amt_send_multicast_data(struct amt_dev *amt,
1066 sock = rcu_dereference_bh(amt->sock);
1077 fl4.flowi4_oif = amt->stream_dev->ifindex;
1079 fl4.saddr = amt->local_ip;
1081 rt = ip_route_output_key(amt->net, &fl4);
1083 netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4);
1103 amt->relay_port,
1109 static bool amt_send_membership_query(struct amt_dev *amt,
1120 sock = rcu_dereference_bh(amt->sock);
1124 err = skb_cow_head(skb, LL_RESERVED_SPACE(amt->dev) + sizeof(*amtmq) +
1131 fl4.flowi4_oif = amt->stream_dev->ifindex;
1133 fl4.saddr = amt->local_ip;
1136 rt = ip_route_output_key(amt->net, &fl4);
1138 netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4);
1161 amt->relay_port,
1171 struct amt_dev *amt = netdev_priv(dev);
1245 if (amt->mode == AMT_MODE_GATEWAY) {
1249 if ((!v6 && !READ_ONCE(amt->ready4)) ||
1250 (v6 && !READ_ONCE(amt->ready6)))
1252 if (amt_send_membership_update(amt, skb, v6))
1255 } else if (amt->mode == AMT_MODE_RELAY) {
1264 if (amt_send_membership_query(amt, skb, tunnel, v6))
1271 list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) {
1288 amt_send_multicast_data(amt, skb, tunnel, v6);
1321 struct amt_dev *amt = tunnel->amt;
1328 for (i = 0; i < amt->hash_buckets; i++)
1330 amt_del_group(amt, gnode);
1340 struct amt_dev *amt = tunnel->amt;
1342 spin_lock_bh(&amt->lock);
1345 amt->nr_tunnels--;
1348 spin_unlock_bh(&amt->lock);
1352 static void amt_cleanup_srcs(struct amt_dev *amt,
1361 for (i = 0; i < amt->hash_buckets; i++) {
1369 for (i = 0; i < amt->hash_buckets; i++) {
1373 netdev_dbg(snode->gnode->amt->dev,
1379 netdev_dbg(snode->gnode->amt->dev,
1388 static void amt_add_srcs(struct amt_dev *amt, struct amt_tunnel_list *tunnel,
1414 if (tunnel->nr_sources >= amt->max_sources)
1434 netdev_dbg(snode->gnode->amt->dev,
1440 netdev_dbg(snode->gnode->amt->dev,
1476 struct amt_dev *amt = tunnel->amt;
1519 for (i = 0; i < amt->hash_buckets; i++) {
1542 for (i = 0; i < amt->hash_buckets; i++) {
1587 netdev_dbg(amt->dev, "Invalid type\n");
1592 static void amt_mcast_is_in_handler(struct amt_dev *amt,
1645 static void amt_mcast_is_ex_handler(struct amt_dev *amt,
1674 msecs_to_jiffies(amt_gmi(amt))))
1675 dev_hold(amt->dev);
1703 msecs_to_jiffies(amt_gmi(amt))))
1704 dev_hold(amt->dev);
1709 static void amt_mcast_to_in_handler(struct amt_dev *amt,
1770 static void amt_mcast_to_ex_handler(struct amt_dev *amt,
1800 msecs_to_jiffies(amt_gmi(amt))))
1801 dev_hold(amt->dev);
1830 msecs_to_jiffies(amt_gmi(amt))))
1831 dev_hold(amt->dev);
1836 static void amt_mcast_allow_handler(struct amt_dev *amt,
1881 static void amt_mcast_block_handler(struct amt_dev *amt,
1937 static void amt_igmpv2_report_handler(struct amt_dev *amt, struct sk_buff *skb,
1952 gnode = amt_add_group(amt, tunnel, &group, &host, false);
1956 msecs_to_jiffies(amt_gmi(amt))))
1957 dev_hold(amt->dev);
1974 static void amt_igmpv2_leave_handler(struct amt_dev *amt, struct sk_buff *skb,
1989 amt_del_group(amt, gnode);
1992 static void amt_igmpv3_report_handler(struct amt_dev *amt, struct sk_buff *skb,
2023 gnode = amt_add_group(amt, tunnel, &group, &host,
2029 amt_add_srcs(amt, tunnel, gnode, grec, false);
2032 amt_mcast_is_in_handler(amt, tunnel, gnode, grec,
2036 amt_mcast_is_ex_handler(amt, tunnel, gnode, grec,
2040 amt_mcast_to_in_handler(amt, tunnel, gnode, grec,
2044 amt_mcast_to_ex_handler(amt, tunnel, gnode, grec,
2048 amt_mcast_allow_handler(amt, tunnel, gnode, grec,
2052 amt_mcast_block_handler(amt, tunnel, gnode, grec,
2058 amt_cleanup_srcs(amt, tunnel, gnode);
2063 static void amt_igmp_report_handler(struct amt_dev *amt, struct sk_buff *skb,
2070 amt_igmpv3_report_handler(amt, skb, tunnel);
2073 amt_igmpv2_report_handler(amt, skb, tunnel);
2076 amt_igmpv2_leave_handler(amt, skb, tunnel);
2098 static void amt_mldv1_report_handler(struct amt_dev *amt, struct sk_buff *skb,
2111 gnode = amt_add_group(amt, tunnel, &group, &host, true);
2115 msecs_to_jiffies(amt_gmi(amt))))
2116 dev_hold(amt->dev);
2135 static void amt_mldv1_leave_handler(struct amt_dev *amt, struct sk_buff *skb,
2149 amt_del_group(amt, gnode);
2154 static void amt_mldv2_report_handler(struct amt_dev *amt, struct sk_buff *skb,
2185 gnode = amt_add_group(amt, tunnel, &group, &host,
2191 amt_add_srcs(amt, tunnel, gnode, grec, true);
2194 amt_mcast_is_in_handler(amt, tunnel, gnode, grec,
2198 amt_mcast_is_ex_handler(amt, tunnel, gnode, grec,
2202 amt_mcast_to_in_handler(amt, tunnel, gnode, grec,
2206 amt_mcast_to_ex_handler(amt, tunnel, gnode, grec,
2210 amt_mcast_allow_handler(amt, tunnel, gnode, grec,
2214 amt_mcast_block_handler(amt, tunnel, gnode, grec,
2220 amt_cleanup_srcs(amt, tunnel, gnode);
2225 static void amt_mld_report_handler(struct amt_dev *amt, struct sk_buff *skb,
2232 amt_mldv1_report_handler(amt, skb, tunnel);
2235 amt_mldv2_report_handler(amt, skb, tunnel);
2238 amt_mldv1_leave_handler(amt, skb, tunnel);
2246 static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb)
2266 if (amt->status != AMT_STATUS_SENT_DISCOVERY ||
2267 amt->nonce != amta->nonce)
2270 amt->remote_ip = amta->ip4;
2271 netdev_dbg(amt->dev, "advertised remote ip = %pI4\n", &amt->remote_ip);
2272 mod_delayed_work(amt_wq, &amt->req_wq, 0);
2274 amt_update_gw_status(amt, AMT_STATUS_RECEIVED_ADVERTISEMENT, true);
2278 static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb)
2285 if (READ_ONCE(amt->status) != AMT_STATUS_SENT_UPDATE)
2336 err = gro_cells_receive(&amt->gro_cells, skb);
2338 dev_sw_netstats_rx_add(amt->dev, len);
2340 amt->dev->stats.rx_dropped++;
2345 static bool amt_membership_query_handler(struct amt_dev *amt,
2362 if (amtmq->nonce != amt->nonce)
2379 if (READ_ONCE(amt->ready4))
2392 WRITE_ONCE(amt->ready4, true);
2393 amt->mac = amtmq->response_mac;
2394 amt->req_cnt = 0;
2395 amt->qi = ihv3->qqic;
2404 if (READ_ONCE(amt->ready6))
2418 WRITE_ONCE(amt->ready6, true);
2419 amt->mac = amtmq->response_mac;
2420 amt->req_cnt = 0;
2421 amt->qi = mld2q->mld2q_qqic;
2436 amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true);
2437 dev_sw_netstats_rx_add(amt->dev, len);
2439 amt->dev->stats.rx_dropped++;
2446 static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb)
2469 list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) {
2474 msecs_to_jiffies(amt_gmi(amt))
2478 netdev_dbg(amt->dev, "Invalid MAC\n");
2493 netdev_dbg(amt->dev, "Invalid IGMP\n");
2498 amt_igmp_report_handler(amt, skb, tunnel);
2512 netdev_dbg(amt->dev, "Invalid MLD\n");
2517 amt_mld_report_handler(amt, skb, tunnel);
2528 netdev_dbg(amt->dev, "Unsupported Protocol\n");
2539 dev_sw_netstats_rx_add(amt->dev, len);
2541 amt->dev->stats.rx_dropped++;
2547 static void amt_send_advertisement(struct amt_dev *amt, __be32 nonce,
2562 sock = rcu_dereference(amt->sock);
2566 if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
2569 rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
2570 daddr, amt->local_ip,
2571 dport, amt->relay_port,
2573 amt->stream_dev->ifindex);
2575 amt->dev->stats.tx_errors++;
2579 hlen = LL_RESERVED_SPACE(amt->dev);
2580 tlen = amt->dev->needed_tailroom;
2582 skb = netdev_alloc_skb_ip_align(amt->dev, len);
2585 amt->dev->stats.tx_errors++;
2600 amta->ip4 = amt->local_ip;
2604 udph->source = amt->relay_port;
2610 udph->check = csum_tcpudp_magic(amt->local_ip, daddr,
2622 iph->saddr = amt->local_ip;
2627 ip_select_ident(amt->net, skb, NULL);
2629 err = ip_local_out(amt->net, sock->sk, skb);
2631 amt->dev->stats.tx_errors++;
2637 static bool amt_discovery_handler(struct amt_dev *amt, struct sk_buff *skb)
2653 amt_send_advertisement(amt, amtd->nonce, iph->saddr, udph->source);
2658 static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb)
2678 list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list)
2682 spin_lock_bh(&amt->lock);
2683 if (amt->nr_tunnels >= amt->max_tunnels) {
2684 spin_unlock_bh(&amt->lock);
2690 (sizeof(struct hlist_head) * amt->hash_buckets),
2693 spin_unlock_bh(&amt->lock);
2701 tunnel->amt = amt;
2703 for (i = 0; i < amt->hash_buckets; i++)
2708 list_add_tail_rcu(&tunnel->list, &amt->tunnel_list);
2709 tunnel->key = amt->key;
2711 amt->nr_tunnels++;
2713 msecs_to_jiffies(amt_gmi(amt)));
2714 spin_unlock_bh(&amt->lock);
2724 if (!netif_running(amt->dev) || !netif_running(amt->stream_dev))
2728 amt_send_igmp_gq(amt, tunnel);
2730 amt_send_mld_gq(amt, tunnel);
2735 static void amt_gw_rcv(struct amt_dev *amt, struct sk_buff *skb)
2743 if (amt->mode == AMT_MODE_GATEWAY) {
2746 err = amt_advertisement_handler(amt, skb);
2749 err = amt_membership_query_handler(amt, skb);
2754 netdev_dbg(amt->dev, "Invalid type of Gateway\n");
2760 amt->dev->stats.rx_dropped++;
2769 struct amt_dev *amt;
2775 amt = rcu_dereference_sk_user_data(sk);
2776 if (!amt) {
2782 skb->dev = amt->dev;
2790 if (amt->mode == AMT_MODE_GATEWAY) {
2793 if (iph->saddr != amt->discovery_ip) {
2794 netdev_dbg(amt->dev, "Invalid Relay IP\n");
2798 if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) {
2799 netdev_dbg(amt->dev, "AMT Event queue full\n");
2805 if (iph->saddr != amt->remote_ip) {
2806 netdev_dbg(amt->dev, "Invalid Relay IP\n");
2810 err = amt_multicast_data_handler(amt, skb);
2816 if (iph->saddr != amt->remote_ip) {
2817 netdev_dbg(amt->dev, "Invalid Relay IP\n");
2821 if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) {
2822 netdev_dbg(amt->dev, "AMT Event queue full\n");
2829 netdev_dbg(amt->dev, "Invalid type of Gateway\n");
2835 err = amt_discovery_handler(amt, skb);
2838 err = amt_request_handler(amt, skb);
2841 err = amt_update_handler(amt, skb);
2848 netdev_dbg(amt->dev, "Invalid type of relay\n");
2854 amt->dev->stats.rx_dropped++;
2866 struct amt_dev *amt = container_of(work, struct amt_dev, event_wq);
2872 spin_lock_bh(&amt->lock);
2873 if (amt->nr_events == 0) {
2874 spin_unlock_bh(&amt->lock);
2877 event = amt->events[amt->event_idx].event;
2878 skb = amt->events[amt->event_idx].skb;
2879 amt->events[amt->event_idx].event = AMT_EVENT_NONE;
2880 amt->events[amt->event_idx].skb = NULL;
2881 amt->nr_events--;
2882 amt->event_idx++;
2883 amt->event_idx %= AMT_MAX_EVENTS;
2884 spin_unlock_bh(&amt->lock);
2888 amt_gw_rcv(amt, skb);
2891 amt_event_send_discovery(amt);
2894 amt_event_send_request(amt);
2905 struct amt_dev *amt;
2909 amt = rcu_dereference_sk_user_data(sk);
2910 if (!amt)
2913 if (amt->mode != AMT_MODE_GATEWAY)
2920 netdev_dbg(amt->dev, "Received IGMP Unreachable of %s\n",
2927 if (READ_ONCE(amt->status) >= AMT_STATUS_RECEIVED_ADVERTISEMENT)
2928 mod_delayed_work(amt_wq, &amt->req_wq, 0);
2938 amt->dev->stats.rx_dropped++;
2961 static int amt_socket_create(struct amt_dev *amt)
2966 sock = amt_create_sock(amt->net, amt->relay_port);
2972 tunnel_cfg.sk_user_data = amt;
2977 setup_udp_tunnel_sock(amt->net, sock, &tunnel_cfg);
2979 rcu_assign_pointer(amt->sock, sock);
2985 struct amt_dev *amt = netdev_priv(dev);
2988 amt->ready4 = false;
2989 amt->ready6 = false;
2990 amt->event_idx = 0;
2991 amt->nr_events = 0;
2993 err = amt_socket_create(amt);
2997 amt->req_cnt = 0;
2998 amt->remote_ip = 0;
2999 amt->nonce = 0;
3000 get_random_bytes(&amt->key, sizeof(siphash_key_t));
3002 amt->status = AMT_STATUS_INIT;
3003 if (amt->mode == AMT_MODE_GATEWAY) {
3004 mod_delayed_work(amt_wq, &amt->discovery_wq, 0);
3005 mod_delayed_work(amt_wq, &amt->req_wq, 0);
3006 } else if (amt->mode == AMT_MODE_RELAY) {
3007 mod_delayed_work(amt_wq, &amt->secret_wq,
3015 struct amt_dev *amt = netdev_priv(dev);
3021 cancel_delayed_work_sync(&amt->req_wq);
3022 cancel_delayed_work_sync(&amt->discovery_wq);
3023 cancel_delayed_work_sync(&amt->secret_wq);
3026 sock = rtnl_dereference(amt->sock);
3027 RCU_INIT_POINTER(amt->sock, NULL);
3032 cancel_work_sync(&amt->event_wq);
3034 skb = amt->events[i].skb;
3036 amt->events[i].event = AMT_EVENT_NONE;
3037 amt->events[i].skb = NULL;
3040 amt->ready4 = false;
3041 amt->ready6 = false;
3042 amt->req_cnt = 0;
3043 amt->remote_ip = 0;
3045 list_for_each_entry_safe(tunnel, tmp, &amt->tunnel_list, list) {
3047 amt->nr_tunnels--;
3057 .name = "amt",
3062 struct amt_dev *amt = netdev_priv(dev);
3065 amt->dev = dev;
3067 err = gro_cells_init(&amt->gro_cells, dev);
3076 struct amt_dev *amt = netdev_priv(dev);
3078 gro_cells_destroy(&amt->gro_cells);
3168 struct amt_dev *amt = netdev_priv(dev);
3171 amt->net = net;
3172 amt->mode = nla_get_u32(data[IFLA_AMT_MODE]);
3176 amt->max_tunnels = nla_get_u32(data[IFLA_AMT_MAX_TUNNELS]);
3178 amt->max_tunnels = AMT_MAX_TUNNELS;
3180 spin_lock_init(&amt->lock);
3181 amt->max_groups = AMT_MAX_GROUP;
3182 amt->max_sources = AMT_MAX_SOURCE;
3183 amt->hash_buckets = AMT_HSIZE;
3184 amt->nr_tunnels = 0;
3185 get_random_bytes(&amt->hash_seed, sizeof(amt->hash_seed));
3186 amt->stream_dev = dev_get_by_index(net,
3188 if (!amt->stream_dev) {
3194 if (amt->stream_dev->type != ARPHRD_ETHER) {
3200 amt->local_ip = nla_get_in_addr(data[IFLA_AMT_LOCAL_IP]);
3201 if (ipv4_is_loopback(amt->local_ip) ||
3202 ipv4_is_zeronet(amt->local_ip) ||
3203 ipv4_is_multicast(amt->local_ip)) {
3210 amt->relay_port = nla_get_be16(data[IFLA_AMT_RELAY_PORT]);
3212 amt->relay_port = htons(IANA_AMT_UDP_PORT);
3215 amt->gw_port = nla_get_be16(data[IFLA_AMT_GATEWAY_PORT]);
3217 amt->gw_port = htons(IANA_AMT_UDP_PORT);
3219 if (!amt->relay_port) {
3224 if (amt->mode == AMT_MODE_RELAY) {
3225 amt->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv);
3226 amt->qri = 10;
3227 dev->needed_headroom = amt->stream_dev->needed_headroom +
3229 dev->mtu = amt->stream_dev->mtu - AMT_RELAY_HLEN;
3238 if (!amt->gw_port) {
3243 amt->remote_ip = 0;
3244 amt->discovery_ip = nla_get_in_addr(data[IFLA_AMT_DISCOVERY_IP]);
3245 if (ipv4_is_loopback(amt->discovery_ip) ||
3246 ipv4_is_zeronet(amt->discovery_ip) ||
3247 ipv4_is_multicast(amt->discovery_ip)) {
3253 dev->needed_headroom = amt->stream_dev->needed_headroom +
3255 dev->mtu = amt->stream_dev->mtu - AMT_GW_HLEN;
3259 amt->qi = AMT_INIT_QUERY_INTERVAL;
3267 err = netdev_upper_dev_link(amt->stream_dev, dev, extack);
3273 INIT_DELAYED_WORK(&amt->discovery_wq, amt_discovery_work);
3274 INIT_DELAYED_WORK(&amt->req_wq, amt_req_work);
3275 INIT_DELAYED_WORK(&amt->secret_wq, amt_secret_work);
3276 INIT_WORK(&amt->event_wq, amt_event_work);
3277 INIT_LIST_HEAD(&amt->tunnel_list);
3280 dev_put(amt->stream_dev);
3286 struct amt_dev *amt = netdev_priv(dev);
3289 netdev_upper_dev_unlink(amt->stream_dev, dev);
3290 dev_put(amt->stream_dev);
3307 struct amt_dev *amt = netdev_priv(dev);
3309 if (nla_put_u32(skb, IFLA_AMT_MODE, amt->mode))
3311 if (nla_put_be16(skb, IFLA_AMT_RELAY_PORT, amt->relay_port))
3313 if (nla_put_be16(skb, IFLA_AMT_GATEWAY_PORT, amt->gw_port))
3315 if (nla_put_u32(skb, IFLA_AMT_LINK, amt->stream_dev->ifindex))
3317 if (nla_put_in_addr(skb, IFLA_AMT_LOCAL_IP, amt->local_ip))
3319 if (nla_put_in_addr(skb, IFLA_AMT_DISCOVERY_IP, amt->discovery_ip))
3321 if (amt->remote_ip)
3322 if (nla_put_in_addr(skb, IFLA_AMT_REMOTE_IP, amt->remote_ip))
3324 if (nla_put_u32(skb, IFLA_AMT_MAX_TUNNELS, amt->max_tunnels))
3334 .kind = "amt",
3349 struct amt_dev *amt;
3353 amt = netdev_priv(upper_dev);
3354 if (amt->stream_dev == dev)
3367 struct amt_dev *amt;
3374 amt = netdev_priv(upper_dev);
3378 amt_dellink(amt->dev, &list);
3382 if (amt->mode == AMT_MODE_RELAY)
3387 dev_set_mtu(amt->dev, new_mtu);
3410 amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 0);
3448 MODULE_ALIAS_RTNL_LINK("amt");