Lines Matching refs:ro

131 	struct raw_sock *ro = raw_sk(sk);
137 if (!ro->recv_own_msgs && oskb->sk == sk)
141 if (!ro->fd_frames && can_is_canfd_skb(oskb))
148 if (!ro->xl_frames)
152 if (ro->raw_vcid_opts.flags & CAN_RAW_XL_VCID_RX_FILTER) {
154 if ((cxl->prio & ro->rx_vcid_mask_shifted) !=
155 (ro->rx_vcid_shifted & ro->rx_vcid_mask_shifted))
165 if (this_cpu_ptr(ro->uniq)->skb == oskb &&
166 this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
167 if (!ro->join_filters)
170 this_cpu_inc(ro->uniq->join_rx_count);
172 if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count)
175 this_cpu_ptr(ro->uniq)->skb = oskb;
176 this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
177 this_cpu_ptr(ro->uniq)->join_rx_count = 1;
179 if (ro->join_filters && ro->count > 1)
274 struct raw_sock *ro = raw_sk(sk);
276 raw_disable_filters(net, dev, sk, ro->filter, ro->count);
277 raw_disable_errfilter(net, dev, sk, ro->err_mask);
283 struct raw_sock *ro = raw_sk(sk);
286 err = raw_enable_filters(net, dev, sk, ro->filter, ro->count);
288 err = raw_enable_errfilter(net, dev, sk, ro->err_mask);
290 raw_disable_filters(net, dev, sk, ro->filter,
291 ro->count);
297 static void raw_notify(struct raw_sock *ro, unsigned long msg,
300 struct sock *sk = &ro->sk;
305 if (ro->dev != dev)
312 if (ro->bound) {
314 netdev_put(dev, &ro->dev_tracker);
317 if (ro->count > 1)
318 kfree(ro->filter);
320 ro->ifindex = 0;
321 ro->bound = 0;
322 ro->dev = NULL;
323 ro->count = 0;
364 struct raw_sock *ro = raw_sk(sk);
366 ro->bound = 0;
367 ro->ifindex = 0;
368 ro->dev = NULL;
371 ro->dfilter.can_id = 0;
372 ro->dfilter.can_mask = MASK_ALL;
373 ro->filter = &ro->dfilter;
374 ro->count = 1;
377 ro->loopback = 1;
378 ro->recv_own_msgs = 0;
379 ro->fd_frames = 0;
380 ro->xl_frames = 0;
381 ro->join_filters = 0;
384 ro->uniq = alloc_percpu(struct uniqframe);
385 if (unlikely(!ro->uniq))
390 list_add_tail(&ro->notifier, &raw_notifier_list);
399 struct raw_sock *ro;
404 ro = raw_sk(sk);
407 while (raw_busy_notifier == ro) {
412 list_del(&ro->notifier);
419 if (ro->bound) {
420 if (ro->dev) {
421 raw_disable_allfilters(dev_net(ro->dev), ro->dev, sk);
422 netdev_put(ro->dev, &ro->dev_tracker);
428 if (ro->count > 1)
429 kfree(ro->filter);
431 ro->ifindex = 0;
432 ro->bound = 0;
433 ro->dev = NULL;
434 ro->count = 0;
435 free_percpu(ro->uniq);
452 struct raw_sock *ro = raw_sk(sk);
466 if (ro->bound && addr->can_ifindex == ro->ifindex)
498 if (ro->bound) {
500 if (ro->dev) {
501 raw_disable_allfilters(dev_net(ro->dev),
502 ro->dev, sk);
503 /* drop reference to old ro->dev */
504 netdev_put(ro->dev, &ro->dev_tracker);
509 ro->ifindex = ifindex;
510 ro->bound = 1;
511 /* bind() ok -> hold a reference for new ro->dev */
512 ro->dev = dev;
513 if (ro->dev)
514 netdev_hold(ro->dev, &ro->dev_tracker, GFP_KERNEL);
538 struct raw_sock *ro = raw_sk(sk);
545 addr->can_ifindex = ro->ifindex;
554 struct raw_sock *ro = raw_sk(sk);
589 dev = ro->dev;
590 if (ro->bound && dev) {
599 if (ro->bound) {
614 raw_disable_filters(sock_net(sk), dev, sk, ro->filter,
615 ro->count);
619 if (ro->count > 1)
620 kfree(ro->filter);
625 ro->dfilter = sfilter;
626 filter = &ro->dfilter;
628 ro->filter = filter;
629 ro->count = count;
649 dev = ro->dev;
650 if (ro->bound && dev) {
658 if (ro->bound) {
668 ro->err_mask);
672 ro->err_mask = err_mask;
681 if (optlen != sizeof(ro->loopback))
684 if (copy_from_sockptr(&ro->loopback, optval, optlen))
690 if (optlen != sizeof(ro->recv_own_msgs))
693 if (copy_from_sockptr(&ro->recv_own_msgs, optval, optlen))
706 if (ro->xl_frames && !fd_frames)
709 ro->fd_frames = fd_frames;
713 if (optlen != sizeof(ro->xl_frames))
716 if (copy_from_sockptr(&ro->xl_frames, optval, optlen))
720 if (ro->xl_frames)
721 ro->fd_frames = ro->xl_frames;
725 if (optlen != sizeof(ro->raw_vcid_opts))
728 if (copy_from_sockptr(&ro->raw_vcid_opts, optval, optlen))
732 ro->tx_vcid_shifted = ro->raw_vcid_opts.tx_vcid << CANXL_VCID_OFFSET;
733 ro->rx_vcid_shifted = ro->raw_vcid_opts.rx_vcid << CANXL_VCID_OFFSET;
734 ro->rx_vcid_mask_shifted = ro->raw_vcid_opts.rx_vcid_mask << CANXL_VCID_OFFSET;
738 if (optlen != sizeof(ro->join_filters))
741 if (copy_from_sockptr(&ro->join_filters, optval, optlen))
756 struct raw_sock *ro = raw_sk(sk);
772 if (ro->count > 0) {
773 int fsize = ro->count * sizeof(struct can_filter);
784 if (copy_to_user(optval, ro->filter, len))
799 val = &ro->err_mask;
805 val = &ro->loopback;
811 val = &ro->recv_own_msgs;
817 val = &ro->fd_frames;
823 val = &ro->xl_frames;
830 if (len < sizeof(ro->raw_vcid_opts)) {
833 if (put_user(sizeof(ro->raw_vcid_opts), optlen))
836 if (len > sizeof(ro->raw_vcid_opts))
837 len = sizeof(ro->raw_vcid_opts);
838 if (copy_to_user(optval, &ro->raw_vcid_opts, len))
848 val = &ro->join_filters;
862 static void raw_put_canxl_vcid(struct raw_sock *ro, struct sk_buff *skb)
870 if (!(ro->raw_vcid_opts.flags & CAN_RAW_XL_VCID_TX_PASS))
874 if (ro->raw_vcid_opts.flags & CAN_RAW_XL_VCID_TX_SET) {
876 cxl->prio |= ro->tx_vcid_shifted;
880 static unsigned int raw_check_txframe(struct raw_sock *ro, struct sk_buff *skb, int mtu)
887 if (ro->fd_frames && can_is_canfd_skb(skb) &&
892 if (ro->xl_frames && can_is_canxl_skb(skb) &&
902 struct raw_sock *ro = raw_sk(sk);
925 ifindex = ro->ifindex;
949 txmtu = raw_check_txframe(ro, skb, dev->mtu);
955 raw_put_canxl_vcid(ro, skb);
971 err = can_send(skb, ro->loopback);