1/*
2 * af_can.c - Protocol family CAN core module
3 *            (used by different CAN protocol modules)
4 *
5 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of Volkswagen nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * Alternatively, provided that this notice is retained in full, this
21 * software may be distributed under the terms of the GNU General
22 * Public License ("GPL") version 2, in which case the provisions of the
23 * GPL apply INSTEAD OF those given above.
24 *
25 * The provided data structures and external interfaces from this code
26 * are not restricted to be used by modules with a GPL compatible license.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
39 * DAMAGE.
40 *
41 * Send feedback to <socketcan-users@lists.berlios.de>
42 *
43 */
44
45#include <linux/module.h>
46#include <linux/init.h>
47#include <linux/kmod.h>
48#include <linux/slab.h>
49#include <linux/list.h>
50#include <linux/spinlock.h>
51#include <linux/rcupdate.h>
52#include <linux/uaccess.h>
53#include <linux/net.h>
54#include <linux/netdevice.h>
55#include <linux/socket.h>
56#include <linux/if_ether.h>
57#include <linux/if_arp.h>
58#include <linux/skbuff.h>
59#include <linux/can.h>
60#include <linux/can/core.h>
61#include <net/net_namespace.h>
62#include <net/sock.h>
63
64#include "af_can.h"
65
66static __initdata const char banner[] = KERN_INFO
67	"can: controller area network core (" CAN_VERSION_STRING ")\n";
68
69MODULE_DESCRIPTION("Controller Area Network PF_CAN core");
70MODULE_LICENSE("Dual BSD/GPL");
71MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>, "
72	      "Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
73
74MODULE_ALIAS_NETPROTO(PF_CAN);
75
76static int stats_timer __read_mostly = 1;
77module_param(stats_timer, int, S_IRUGO);
78MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
79
80/* receive filters subscribed for 'all' CAN devices */
81struct dev_rcv_lists can_rx_alldev_list;
82static DEFINE_SPINLOCK(can_rcvlists_lock);
83
84static struct kmem_cache *rcv_cache __read_mostly;
85
86/* table of registered CAN protocols */
87static struct can_proto *proto_tab[CAN_NPROTO] __read_mostly;
88static DEFINE_SPINLOCK(proto_tab_lock);
89
90struct timer_list can_stattimer;   /* timer for statistics update */
91struct s_stats    can_stats;       /* packet statistics */
92struct s_pstats   can_pstats;      /* receive list statistics */
93
94/*
95 * af_can socket functions
96 */
97
98static int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
99{
100	struct sock *sk = sock->sk;
101
102	switch (cmd) {
103
104	case SIOCGSTAMP:
105		return sock_get_timestamp(sk, (struct timeval __user *)arg);
106
107	default:
108		return -ENOIOCTLCMD;
109	}
110}
111
112static void can_sock_destruct(struct sock *sk)
113{
114	skb_queue_purge(&sk->sk_receive_queue);
115}
116
117static int can_create(struct net *net, struct socket *sock, int protocol,
118		      int kern)
119{
120	struct sock *sk;
121	struct can_proto *cp;
122	int err = 0;
123
124	sock->state = SS_UNCONNECTED;
125
126	if (protocol < 0 || protocol >= CAN_NPROTO)
127		return -EINVAL;
128
129	if (!net_eq(net, &init_net))
130		return -EAFNOSUPPORT;
131
132#ifdef CONFIG_MODULES
133	/* try to load protocol module kernel is modular */
134	if (!proto_tab[protocol]) {
135		err = request_module("can-proto-%d", protocol);
136
137		/*
138		 * In case of error we only print a message but don't
139		 * return the error code immediately.  Below we will
140		 * return -EPROTONOSUPPORT
141		 */
142		if (err && printk_ratelimit())
143			printk(KERN_ERR "can: request_module "
144			       "(can-proto-%d) failed.\n", protocol);
145	}
146#endif
147
148	spin_lock(&proto_tab_lock);
149	cp = proto_tab[protocol];
150	if (cp && !try_module_get(cp->prot->owner))
151		cp = NULL;
152	spin_unlock(&proto_tab_lock);
153
154	/* check for available protocol and correct usage */
155
156	if (!cp)
157		return -EPROTONOSUPPORT;
158
159	if (cp->type != sock->type) {
160		err = -EPROTONOSUPPORT;
161		goto errout;
162	}
163
164	sock->ops = cp->ops;
165
166	sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot);
167	if (!sk) {
168		err = -ENOMEM;
169		goto errout;
170	}
171
172	sock_init_data(sock, sk);
173	sk->sk_destruct = can_sock_destruct;
174
175	if (sk->sk_prot->init)
176		err = sk->sk_prot->init(sk);
177
178	if (err) {
179		/* release sk on errors */
180		sock_orphan(sk);
181		sock_put(sk);
182	}
183
184 errout:
185	module_put(cp->prot->owner);
186	return err;
187}
188
189/*
190 * af_can tx path
191 */
192
193/**
194 * can_send - transmit a CAN frame (optional with local loopback)
195 * @skb: pointer to socket buffer with CAN frame in data section
196 * @loop: loopback for listeners on local CAN sockets (recommended default!)
197 *
198 * Due to the loopback this routine must not be called from hardirq context.
199 *
200 * Return:
201 *  0 on success
202 *  -ENETDOWN when the selected interface is down
203 *  -ENOBUFS on full driver queue (see net_xmit_errno())
204 *  -ENOMEM when local loopback failed at calling skb_clone()
205 *  -EPERM when trying to send on a non-CAN interface
206 *  -EINVAL when the skb->data does not contain a valid CAN frame
207 */
208int can_send(struct sk_buff *skb, int loop)
209{
210	struct sk_buff *newskb = NULL;
211	struct can_frame *cf = (struct can_frame *)skb->data;
212	int err;
213
214	if (skb->len != sizeof(struct can_frame) || cf->can_dlc > 8) {
215		kfree_skb(skb);
216		return -EINVAL;
217	}
218
219	if (skb->dev->type != ARPHRD_CAN) {
220		kfree_skb(skb);
221		return -EPERM;
222	}
223
224	if (!(skb->dev->flags & IFF_UP)) {
225		kfree_skb(skb);
226		return -ENETDOWN;
227	}
228
229	skb->protocol = htons(ETH_P_CAN);
230	skb_reset_network_header(skb);
231	skb_reset_transport_header(skb);
232
233	if (loop) {
234		/* local loopback of sent CAN frames */
235
236		/* indication for the CAN driver: do loopback */
237		skb->pkt_type = PACKET_LOOPBACK;
238
239		/*
240		 * The reference to the originating sock may be required
241		 * by the receiving socket to check whether the frame is
242		 * its own. Example: can_raw sockopt CAN_RAW_RECV_OWN_MSGS
243		 * Therefore we have to ensure that skb->sk remains the
244		 * reference to the originating sock by restoring skb->sk
245		 * after each skb_clone() or skb_orphan() usage.
246		 */
247
248		if (!(skb->dev->flags & IFF_ECHO)) {
249			/*
250			 * If the interface is not capable to do loopback
251			 * itself, we do it here.
252			 */
253			newskb = skb_clone(skb, GFP_ATOMIC);
254			if (!newskb) {
255				kfree_skb(skb);
256				return -ENOMEM;
257			}
258
259			newskb->sk = skb->sk;
260			newskb->ip_summed = CHECKSUM_UNNECESSARY;
261			newskb->pkt_type = PACKET_BROADCAST;
262		}
263	} else {
264		/* indication for the CAN driver: no loopback required */
265		skb->pkt_type = PACKET_HOST;
266	}
267
268	/* send to netdevice */
269	err = dev_queue_xmit(skb);
270	if (err > 0)
271		err = net_xmit_errno(err);
272
273	if (err) {
274		kfree_skb(newskb);
275		return err;
276	}
277
278	if (newskb)
279		netif_rx_ni(newskb);
280
281	/* update statistics */
282	can_stats.tx_frames++;
283	can_stats.tx_frames_delta++;
284
285	return 0;
286}
287EXPORT_SYMBOL(can_send);
288
289/*
290 * af_can rx path
291 */
292
293static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
294{
295	if (!dev)
296		return &can_rx_alldev_list;
297	else
298		return (struct dev_rcv_lists *)dev->ml_priv;
299}
300
301/**
302 * find_rcv_list - determine optimal filterlist inside device filter struct
303 * @can_id: pointer to CAN identifier of a given can_filter
304 * @mask: pointer to CAN mask of a given can_filter
305 * @d: pointer to the device filter struct
306 *
307 * Description:
308 *  Returns the optimal filterlist to reduce the filter handling in the
309 *  receive path. This function is called by service functions that need
310 *  to register or unregister a can_filter in the filter lists.
311 *
312 *  A filter matches in general, when
313 *
314 *          <received_can_id> & mask == can_id & mask
315 *
316 *  so every bit set in the mask (even CAN_EFF_FLAG, CAN_RTR_FLAG) describe
317 *  relevant bits for the filter.
318 *
319 *  The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
320 *  filter for error frames (CAN_ERR_FLAG bit set in mask). For error frames
321 *  there is a special filterlist and a special rx path filter handling.
322 *
323 * Return:
324 *  Pointer to optimal filterlist for the given can_id/mask pair.
325 *  Constistency checked mask.
326 *  Reduced can_id to have a preprocessed filter compare value.
327 */
328static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
329					struct dev_rcv_lists *d)
330{
331	canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */
332
333	/* filter for error frames in extra filterlist */
334	if (*mask & CAN_ERR_FLAG) {
335		/* clear CAN_ERR_FLAG in filter entry */
336		*mask &= CAN_ERR_MASK;
337		return &d->rx[RX_ERR];
338	}
339
340	/* with cleared CAN_ERR_FLAG we have a simple mask/value filterpair */
341
342#define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG)
343
344	/* ensure valid values in can_mask for 'SFF only' frame filtering */
345	if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG))
346		*mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS);
347
348	/* reduce condition testing at receive time */
349	*can_id &= *mask;
350
351	/* inverse can_id/can_mask filter */
352	if (inv)
353		return &d->rx[RX_INV];
354
355	/* mask == 0 => no condition testing at receive time */
356	if (!(*mask))
357		return &d->rx[RX_ALL];
358
359	/* extra filterlists for the subscription of a single non-RTR can_id */
360	if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) &&
361	    !(*can_id & CAN_RTR_FLAG)) {
362
363		if (*can_id & CAN_EFF_FLAG) {
364			if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) {
365				/* RFC: a future use-case for hash-tables? */
366				return &d->rx[RX_EFF];
367			}
368		} else {
369			if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
370				return &d->rx_sff[*can_id];
371		}
372	}
373
374	/* default: filter via can_id/can_mask */
375	return &d->rx[RX_FIL];
376}
377
378/**
379 * can_rx_register - subscribe CAN frames from a specific interface
380 * @dev: pointer to netdevice (NULL => subcribe from 'all' CAN devices list)
381 * @can_id: CAN identifier (see description)
382 * @mask: CAN mask (see description)
383 * @func: callback function on filter match
384 * @data: returned parameter for callback function
385 * @ident: string for calling module indentification
386 *
387 * Description:
388 *  Invokes the callback function with the received sk_buff and the given
389 *  parameter 'data' on a matching receive filter. A filter matches, when
390 *
391 *          <received_can_id> & mask == can_id & mask
392 *
393 *  The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
394 *  filter for error frames (CAN_ERR_FLAG bit set in mask).
395 *
396 *  The provided pointer to the sk_buff is guaranteed to be valid as long as
397 *  the callback function is running. The callback function must *not* free
398 *  the given sk_buff while processing it's task. When the given sk_buff is
399 *  needed after the end of the callback function it must be cloned inside
400 *  the callback function with skb_clone().
401 *
402 * Return:
403 *  0 on success
404 *  -ENOMEM on missing cache mem to create subscription entry
405 *  -ENODEV unknown device
406 */
407int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
408		    void (*func)(struct sk_buff *, void *), void *data,
409		    char *ident)
410{
411	struct receiver *r;
412	struct hlist_head *rl;
413	struct dev_rcv_lists *d;
414	int err = 0;
415
416	/* insert new receiver  (dev,canid,mask) -> (func,data) */
417
418	if (dev && dev->type != ARPHRD_CAN)
419		return -ENODEV;
420
421	r = kmem_cache_alloc(rcv_cache, GFP_KERNEL);
422	if (!r)
423		return -ENOMEM;
424
425	spin_lock(&can_rcvlists_lock);
426
427	d = find_dev_rcv_lists(dev);
428	if (d) {
429		rl = find_rcv_list(&can_id, &mask, d);
430
431		r->can_id  = can_id;
432		r->mask    = mask;
433		r->matches = 0;
434		r->func    = func;
435		r->data    = data;
436		r->ident   = ident;
437
438		hlist_add_head_rcu(&r->list, rl);
439		d->entries++;
440
441		can_pstats.rcv_entries++;
442		if (can_pstats.rcv_entries_max < can_pstats.rcv_entries)
443			can_pstats.rcv_entries_max = can_pstats.rcv_entries;
444	} else {
445		kmem_cache_free(rcv_cache, r);
446		err = -ENODEV;
447	}
448
449	spin_unlock(&can_rcvlists_lock);
450
451	return err;
452}
453EXPORT_SYMBOL(can_rx_register);
454
455/*
456 * can_rx_delete_receiver - rcu callback for single receiver entry removal
457 */
458static void can_rx_delete_receiver(struct rcu_head *rp)
459{
460	struct receiver *r = container_of(rp, struct receiver, rcu);
461
462	kmem_cache_free(rcv_cache, r);
463}
464
465/**
466 * can_rx_unregister - unsubscribe CAN frames from a specific interface
467 * @dev: pointer to netdevice (NULL => unsubcribe from 'all' CAN devices list)
468 * @can_id: CAN identifier
469 * @mask: CAN mask
470 * @func: callback function on filter match
471 * @data: returned parameter for callback function
472 *
473 * Description:
474 *  Removes subscription entry depending on given (subscription) values.
475 */
476void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
477		       void (*func)(struct sk_buff *, void *), void *data)
478{
479	struct receiver *r = NULL;
480	struct hlist_head *rl;
481	struct hlist_node *next;
482	struct dev_rcv_lists *d;
483
484	if (dev && dev->type != ARPHRD_CAN)
485		return;
486
487	spin_lock(&can_rcvlists_lock);
488
489	d = find_dev_rcv_lists(dev);
490	if (!d) {
491		printk(KERN_ERR "BUG: receive list not found for "
492		       "dev %s, id %03X, mask %03X\n",
493		       DNAME(dev), can_id, mask);
494		goto out;
495	}
496
497	rl = find_rcv_list(&can_id, &mask, d);
498
499	/*
500	 * Search the receiver list for the item to delete.  This should
501	 * exist, since no receiver may be unregistered that hasn't
502	 * been registered before.
503	 */
504
505	hlist_for_each_entry_rcu(r, next, rl, list) {
506		if (r->can_id == can_id && r->mask == mask &&
507		    r->func == func && r->data == data)
508			break;
509	}
510
511	/*
512	 * Check for bugs in CAN protocol implementations:
513	 * If no matching list item was found, the list cursor variable next
514	 * will be NULL, while r will point to the last item of the list.
515	 */
516
517	if (!next) {
518		printk(KERN_ERR "BUG: receive list entry not found for "
519		       "dev %s, id %03X, mask %03X\n",
520		       DNAME(dev), can_id, mask);
521		r = NULL;
522		goto out;
523	}
524
525	hlist_del_rcu(&r->list);
526	d->entries--;
527
528	if (can_pstats.rcv_entries > 0)
529		can_pstats.rcv_entries--;
530
531	/* remove device structure requested by NETDEV_UNREGISTER */
532	if (d->remove_on_zero_entries && !d->entries) {
533		kfree(d);
534		dev->ml_priv = NULL;
535	}
536
537 out:
538	spin_unlock(&can_rcvlists_lock);
539
540	/* schedule the receiver item for deletion */
541	if (r)
542		call_rcu(&r->rcu, can_rx_delete_receiver);
543}
544EXPORT_SYMBOL(can_rx_unregister);
545
546static inline void deliver(struct sk_buff *skb, struct receiver *r)
547{
548	r->func(skb, r->data);
549	r->matches++;
550}
551
552static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
553{
554	struct receiver *r;
555	struct hlist_node *n;
556	int matches = 0;
557	struct can_frame *cf = (struct can_frame *)skb->data;
558	canid_t can_id = cf->can_id;
559
560	if (d->entries == 0)
561		return 0;
562
563	if (can_id & CAN_ERR_FLAG) {
564		/* check for error frame entries only */
565		hlist_for_each_entry_rcu(r, n, &d->rx[RX_ERR], list) {
566			if (can_id & r->mask) {
567				deliver(skb, r);
568				matches++;
569			}
570		}
571		return matches;
572	}
573
574	/* check for unfiltered entries */
575	hlist_for_each_entry_rcu(r, n, &d->rx[RX_ALL], list) {
576		deliver(skb, r);
577		matches++;
578	}
579
580	/* check for can_id/mask entries */
581	hlist_for_each_entry_rcu(r, n, &d->rx[RX_FIL], list) {
582		if ((can_id & r->mask) == r->can_id) {
583			deliver(skb, r);
584			matches++;
585		}
586	}
587
588	/* check for inverted can_id/mask entries */
589	hlist_for_each_entry_rcu(r, n, &d->rx[RX_INV], list) {
590		if ((can_id & r->mask) != r->can_id) {
591			deliver(skb, r);
592			matches++;
593		}
594	}
595
596	/* check filterlists for single non-RTR can_ids */
597	if (can_id & CAN_RTR_FLAG)
598		return matches;
599
600	if (can_id & CAN_EFF_FLAG) {
601		hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) {
602			if (r->can_id == can_id) {
603				deliver(skb, r);
604				matches++;
605			}
606		}
607	} else {
608		can_id &= CAN_SFF_MASK;
609		hlist_for_each_entry_rcu(r, n, &d->rx_sff[can_id], list) {
610			deliver(skb, r);
611			matches++;
612		}
613	}
614
615	return matches;
616}
617
618static int can_rcv(struct sk_buff *skb, struct net_device *dev,
619		   struct packet_type *pt, struct net_device *orig_dev)
620{
621	struct dev_rcv_lists *d;
622	struct can_frame *cf = (struct can_frame *)skb->data;
623	int matches;
624
625	if (!net_eq(dev_net(dev), &init_net))
626		goto drop;
627
628	if (WARN_ONCE(dev->type != ARPHRD_CAN ||
629		      skb->len != sizeof(struct can_frame) ||
630		      cf->can_dlc > 8,
631		      "PF_CAN: dropped non conform skbuf: "
632		      "dev type %d, len %d, can_dlc %d\n",
633		      dev->type, skb->len, cf->can_dlc))
634		goto drop;
635
636	/* update statistics */
637	can_stats.rx_frames++;
638	can_stats.rx_frames_delta++;
639
640	rcu_read_lock();
641
642	/* deliver the packet to sockets listening on all devices */
643	matches = can_rcv_filter(&can_rx_alldev_list, skb);
644
645	/* find receive list for this device */
646	d = find_dev_rcv_lists(dev);
647	if (d)
648		matches += can_rcv_filter(d, skb);
649
650	rcu_read_unlock();
651
652	/* consume the skbuff allocated by the netdevice driver */
653	consume_skb(skb);
654
655	if (matches > 0) {
656		can_stats.matches++;
657		can_stats.matches_delta++;
658	}
659
660	return NET_RX_SUCCESS;
661
662drop:
663	kfree_skb(skb);
664	return NET_RX_DROP;
665}
666
667/*
668 * af_can protocol functions
669 */
670
671/**
672 * can_proto_register - register CAN transport protocol
673 * @cp: pointer to CAN protocol structure
674 *
675 * Return:
676 *  0 on success
677 *  -EINVAL invalid (out of range) protocol number
678 *  -EBUSY  protocol already in use
679 *  -ENOBUF if proto_register() fails
680 */
681int can_proto_register(struct can_proto *cp)
682{
683	int proto = cp->protocol;
684	int err = 0;
685
686	if (proto < 0 || proto >= CAN_NPROTO) {
687		printk(KERN_ERR "can: protocol number %d out of range\n",
688		       proto);
689		return -EINVAL;
690	}
691
692	err = proto_register(cp->prot, 0);
693	if (err < 0)
694		return err;
695
696	spin_lock(&proto_tab_lock);
697	if (proto_tab[proto]) {
698		printk(KERN_ERR "can: protocol %d already registered\n",
699		       proto);
700		err = -EBUSY;
701	} else {
702		proto_tab[proto] = cp;
703
704		/* use generic ioctl function if not defined by module */
705		if (!cp->ops->ioctl)
706			cp->ops->ioctl = can_ioctl;
707	}
708	spin_unlock(&proto_tab_lock);
709
710	if (err < 0)
711		proto_unregister(cp->prot);
712
713	return err;
714}
715EXPORT_SYMBOL(can_proto_register);
716
717/**
718 * can_proto_unregister - unregister CAN transport protocol
719 * @cp: pointer to CAN protocol structure
720 */
721void can_proto_unregister(struct can_proto *cp)
722{
723	int proto = cp->protocol;
724
725	spin_lock(&proto_tab_lock);
726	if (!proto_tab[proto]) {
727		printk(KERN_ERR "BUG: can: protocol %d is not registered\n",
728		       proto);
729	}
730	proto_tab[proto] = NULL;
731	spin_unlock(&proto_tab_lock);
732
733	proto_unregister(cp->prot);
734}
735EXPORT_SYMBOL(can_proto_unregister);
736
737/*
738 * af_can notifier to create/remove CAN netdevice specific structs
739 */
740static int can_notifier(struct notifier_block *nb, unsigned long msg,
741			void *data)
742{
743	struct net_device *dev = (struct net_device *)data;
744	struct dev_rcv_lists *d;
745
746	if (!net_eq(dev_net(dev), &init_net))
747		return NOTIFY_DONE;
748
749	if (dev->type != ARPHRD_CAN)
750		return NOTIFY_DONE;
751
752	switch (msg) {
753
754	case NETDEV_REGISTER:
755
756		/* create new dev_rcv_lists for this device */
757		d = kzalloc(sizeof(*d), GFP_KERNEL);
758		if (!d) {
759			printk(KERN_ERR
760			       "can: allocation of receive list failed\n");
761			return NOTIFY_DONE;
762		}
763		BUG_ON(dev->ml_priv);
764		dev->ml_priv = d;
765
766		break;
767
768	case NETDEV_UNREGISTER:
769		spin_lock(&can_rcvlists_lock);
770
771		d = dev->ml_priv;
772		if (d) {
773			if (d->entries)
774				d->remove_on_zero_entries = 1;
775			else {
776				kfree(d);
777				dev->ml_priv = NULL;
778			}
779		} else
780			printk(KERN_ERR "can: notifier: receive list not "
781			       "found for dev %s\n", dev->name);
782
783		spin_unlock(&can_rcvlists_lock);
784
785		break;
786	}
787
788	return NOTIFY_DONE;
789}
790
791/*
792 * af_can module init/exit functions
793 */
794
795static struct packet_type can_packet __read_mostly = {
796	.type = cpu_to_be16(ETH_P_CAN),
797	.dev  = NULL,
798	.func = can_rcv,
799};
800
801static const struct net_proto_family can_family_ops = {
802	.family = PF_CAN,
803	.create = can_create,
804	.owner  = THIS_MODULE,
805};
806
807/* notifier block for netdevice event */
808static struct notifier_block can_netdev_notifier __read_mostly = {
809	.notifier_call = can_notifier,
810};
811
812static __init int can_init(void)
813{
814	printk(banner);
815
816	memset(&can_rx_alldev_list, 0, sizeof(can_rx_alldev_list));
817
818	rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver),
819				      0, 0, NULL);
820	if (!rcv_cache)
821		return -ENOMEM;
822
823	if (stats_timer) {
824		/* the statistics are updated every second (timer triggered) */
825		setup_timer(&can_stattimer, can_stat_update, 0);
826		mod_timer(&can_stattimer, round_jiffies(jiffies + HZ));
827	} else
828		can_stattimer.function = NULL;
829
830	can_init_proc();
831
832	/* protocol register */
833	sock_register(&can_family_ops);
834	register_netdevice_notifier(&can_netdev_notifier);
835	dev_add_pack(&can_packet);
836
837	return 0;
838}
839
840static __exit void can_exit(void)
841{
842	struct net_device *dev;
843
844	if (stats_timer)
845		del_timer(&can_stattimer);
846
847	can_remove_proc();
848
849	/* protocol unregister */
850	dev_remove_pack(&can_packet);
851	unregister_netdevice_notifier(&can_netdev_notifier);
852	sock_unregister(PF_CAN);
853
854	/* remove created dev_rcv_lists from still registered CAN devices */
855	rcu_read_lock();
856	for_each_netdev_rcu(&init_net, dev) {
857		if (dev->type == ARPHRD_CAN && dev->ml_priv){
858
859			struct dev_rcv_lists *d = dev->ml_priv;
860
861			BUG_ON(d->entries);
862			kfree(d);
863			dev->ml_priv = NULL;
864		}
865	}
866	rcu_read_unlock();
867
868	rcu_barrier(); /* Wait for completion of call_rcu()'s */
869
870	kmem_cache_destroy(rcv_cache);
871}
872
873module_init(can_init);
874module_exit(can_exit);
875