Lines Matching refs:skb

35 static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
43 WARN_ON_ONCE(!skb_mac_header_was_set(skb));
44 eth_hdr = (struct ethhdr *)skb_mac_header(skb);
59 if (!pskb_may_pull(skb, total_length))
62 hsr_V1_hdr = (struct hsrv1_ethhdr_sp *)skb_mac_header(skb);
69 if (!pskb_may_pull(skb, total_length))
73 &((struct hsrv0_ethhdr_sp *)skb_mac_header(skb))->hsr_sup;
87 if (!pskb_may_pull(skb, total_length))
89 skb_pull(skb, total_length);
90 hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data;
91 skb_push(skb, total_length);
103 if (!pskb_may_pull(skb, total_length))
107 skb_pull(skb, total_length);
108 hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data;
109 skb_push(skb, total_length);
123 struct sk_buff *skb;
128 skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC);
130 if (!skb)
133 skb_reset_mac_header(skb);
135 if (skb->ip_summed == CHECKSUM_PARTIAL)
136 skb->csum_start -= HSR_HLEN;
142 dst = skb_mac_header(skb);
145 skb->protocol = eth_hdr(skb)->h_proto;
146 return skb;
172 /* trim the skb by len - HSR_HLEN to exclude RCT */
206 static struct sk_buff *prp_fill_rct(struct sk_buff *skb,
214 if (!skb)
215 return skb;
220 if (skb_put_padto(skb, min_size))
223 trailer = (struct prp_rct *)skb_put(skb, HSR_HLEN);
224 lsdu_size = skb->len - 14;
231 skb->protocol = eth_hdr(skb)->h_proto;
233 return skb;
249 static struct sk_buff *hsr_fill_tag(struct sk_buff *skb,
257 if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN))
260 lsdu_size = skb->len - 14;
264 hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb);
272 skb->protocol = hsr_ethhdr->ethhdr.h_proto;
274 return skb;
284 struct sk_buff *skb;
298 /* Create the new skb with enough headroom to fit the HSR tag */
299 skb = __pskb_copy(frame->skb_std,
301 if (!skb)
303 skb_reset_mac_header(skb);
305 if (skb->ip_summed == CHECKSUM_PARTIAL)
306 skb->csum_start += HSR_HLEN;
312 src = skb_mac_header(skb);
313 dst = skb_push(skb, HSR_HLEN);
315 skb_reset_mac_header(skb);
317 /* skb_put_padto free skb on error and hsr_fill_tag returns NULL in
320 return hsr_fill_tag(skb, frame, port, port->hsr->prot_version);
326 struct sk_buff *skb;
334 WARN_ONCE(!trailer, "errored PRP skb");
342 skb = skb_copy_expand(frame->skb_std, 0,
345 return prp_fill_rct(skb, frame, port);
348 static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
354 was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST);
355 hsr_addr_subst_source(node_src, skb);
356 skb_pull(skb, ETH_HLEN);
357 recv_len = skb->len;
358 res = netif_rx(skb);
369 static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port,
373 hsr_addr_subst_dest(frame->node_src, skb, port);
378 ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr);
386 ether_addr_copy(eth_hdr(skb)->h_source,
389 return dev_queue_xmit(skb);
402 struct sk_buff *skb;
417 skb = frame->skb_hsr;
418 if (skb && prp_drop_frame(frame, port) &&
419 is_unicast_ether_addr(eth_hdr(skb)->h_dest) &&
421 eth_hdr(skb)->h_dest)) {
431 skb = frame->skb_hsr;
432 if (skb && is_unicast_ether_addr(eth_hdr(skb)->h_dest) &&
434 eth_hdr(skb)->h_dest)) {
445 skb = frame->skb_std;
446 if (skb && is_unicast_ether_addr(eth_hdr(skb)->h_dest) &&
448 eth_hdr(skb)->h_dest)) {
471 struct sk_buff *skb;
516 skb = hsr->proto_ops->create_tagged_frame(frame, port);
518 skb = hsr->proto_ops->get_untagged_frame(frame, port);
520 if (!skb) {
525 skb->dev = port->dev;
527 hsr_deliver_master(skb, port->dev, frame->node_src);
529 if (!hsr_xmit(skb, port, frame))
537 static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
540 if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) {
542 skb->pkt_type = PACKET_HOST;
547 if (skb->pkt_type == PACKET_HOST ||
548 skb->pkt_type == PACKET_MULTICAST ||
549 skb->pkt_type == PACKET_BROADCAST) {
556 static void handle_std_frame(struct sk_buff *skb,
564 frame->skb_std = skb;
578 int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
587 /* Check if skb contains hsr_ethhdr */
588 if (skb->mac_len < sizeof(struct hsr_ethhdr))
594 frame->skb_hsr = skb;
595 frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
600 handle_std_frame(skb, frame);
605 int prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
609 struct prp_rct *rct = skb_get_PRP_rct(skb);
612 prp_check_lsdu_size(skb, rct, frame->is_supervision)) {
615 frame->skb_prp = skb;
619 handle_std_frame(skb, frame);
625 struct sk_buff *skb, struct hsr_port *port)
634 /* Check if skb contains ethhdr */
635 if (skb->mac_len < sizeof(struct ethhdr))
639 frame->is_supervision = is_supervision_frame(port->hsr, skb);
645 frame->node_src = hsr_get_node(port, n_db, skb,
650 ethhdr = (struct ethhdr *)skb_mac_header(skb);
661 netdev_warn_once(skb->dev, "VLAN not yet supported");
667 ret = hsr->proto_ops->fill_frame_info(proto, skb, frame);
671 check_local_dest(port->hsr, skb, frame);
677 void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
682 if (fill_frame_info(&frame, skb, port) < 0)
693 port->dev->stats.tx_bytes += skb->len;
704 kfree_skb(skb);