1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * ipmi_devintf.c
4 *
5 * Linux device interface for the IPMI message handler.
6 *
7 * Author: MontaVista Software, Inc.
8 *         Corey Minyard <minyard@mvista.com>
9 *         source@mvista.com
10 *
11 * Copyright 2002 MontaVista Software Inc.
12 */
13
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/errno.h>
17#include <linux/poll.h>
18#include <linux/sched.h>
19#include <linux/spinlock.h>
20#include <linux/slab.h>
21#include <linux/ipmi.h>
22#include <linux/mutex.h>
23#include <linux/init.h>
24#include <linux/device.h>
25#include <linux/compat.h>
26
27struct ipmi_file_private
28{
29	struct ipmi_user     *user;
30	spinlock_t           recv_msg_lock;
31	struct list_head     recv_msgs;
32	struct fasync_struct *fasync_queue;
33	wait_queue_head_t    wait;
34	struct mutex	     recv_mutex;
35	int                  default_retries;
36	unsigned int         default_retry_time_ms;
37};
38
39static void file_receive_handler(struct ipmi_recv_msg *msg,
40				 void                 *handler_data)
41{
42	struct ipmi_file_private *priv = handler_data;
43	int                      was_empty;
44	unsigned long            flags;
45
46	spin_lock_irqsave(&priv->recv_msg_lock, flags);
47	was_empty = list_empty(&priv->recv_msgs);
48	list_add_tail(&msg->link, &priv->recv_msgs);
49	spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
50
51	if (was_empty) {
52		wake_up_interruptible(&priv->wait);
53		kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN);
54	}
55}
56
57static __poll_t ipmi_poll(struct file *file, poll_table *wait)
58{
59	struct ipmi_file_private *priv = file->private_data;
60	__poll_t             mask = 0;
61	unsigned long            flags;
62
63	poll_wait(file, &priv->wait, wait);
64
65	spin_lock_irqsave(&priv->recv_msg_lock, flags);
66
67	if (!list_empty(&priv->recv_msgs))
68		mask |= (EPOLLIN | EPOLLRDNORM);
69
70	spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
71
72	return mask;
73}
74
75static int ipmi_fasync(int fd, struct file *file, int on)
76{
77	struct ipmi_file_private *priv = file->private_data;
78
79	return fasync_helper(fd, file, on, &priv->fasync_queue);
80}
81
82static const struct ipmi_user_hndl ipmi_hndlrs =
83{
84	.ipmi_recv_hndl	= file_receive_handler,
85};
86
87static int ipmi_open(struct inode *inode, struct file *file)
88{
89	int                      if_num = iminor(inode);
90	int                      rv;
91	struct ipmi_file_private *priv;
92
93	priv = kmalloc(sizeof(*priv), GFP_KERNEL);
94	if (!priv)
95		return -ENOMEM;
96
97	rv = ipmi_create_user(if_num,
98			      &ipmi_hndlrs,
99			      priv,
100			      &priv->user);
101	if (rv) {
102		kfree(priv);
103		goto out;
104	}
105
106	file->private_data = priv;
107
108	spin_lock_init(&priv->recv_msg_lock);
109	INIT_LIST_HEAD(&priv->recv_msgs);
110	init_waitqueue_head(&priv->wait);
111	priv->fasync_queue = NULL;
112	mutex_init(&priv->recv_mutex);
113
114	/* Use the low-level defaults. */
115	priv->default_retries = -1;
116	priv->default_retry_time_ms = 0;
117
118out:
119	return rv;
120}
121
122static int ipmi_release(struct inode *inode, struct file *file)
123{
124	struct ipmi_file_private *priv = file->private_data;
125	int                      rv;
126	struct ipmi_recv_msg *msg, *next;
127
128	rv = ipmi_destroy_user(priv->user);
129	if (rv)
130		return rv;
131
132	list_for_each_entry_safe(msg, next, &priv->recv_msgs, link)
133		ipmi_free_recv_msg(msg);
134
135	kfree(priv);
136
137	return 0;
138}
139
140static int handle_send_req(struct ipmi_user *user,
141			   struct ipmi_req *req,
142			   int             retries,
143			   unsigned int    retry_time_ms)
144{
145	int              rv;
146	struct ipmi_addr addr;
147	struct kernel_ipmi_msg msg;
148
149	if (req->addr_len > sizeof(struct ipmi_addr))
150		return -EINVAL;
151
152	if (copy_from_user(&addr, req->addr, req->addr_len))
153		return -EFAULT;
154
155	msg.netfn = req->msg.netfn;
156	msg.cmd = req->msg.cmd;
157	msg.data_len = req->msg.data_len;
158	msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
159	if (!msg.data)
160		return -ENOMEM;
161
162	/* From here out we cannot return, we must jump to "out" for
163	   error exits to free msgdata. */
164
165	rv = ipmi_validate_addr(&addr, req->addr_len);
166	if (rv)
167		goto out;
168
169	if (req->msg.data != NULL) {
170		if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) {
171			rv = -EMSGSIZE;
172			goto out;
173		}
174
175		if (copy_from_user(msg.data,
176				   req->msg.data,
177				   req->msg.data_len)) {
178			rv = -EFAULT;
179			goto out;
180		}
181	} else {
182		msg.data_len = 0;
183	}
184
185	rv = ipmi_request_settime(user,
186				  &addr,
187				  req->msgid,
188				  &msg,
189				  NULL,
190				  0,
191				  retries,
192				  retry_time_ms);
193 out:
194	kfree(msg.data);
195	return rv;
196}
197
198static int handle_recv(struct ipmi_file_private *priv,
199			bool trunc, struct ipmi_recv *rsp,
200			int (*copyout)(struct ipmi_recv *, void __user *),
201			void __user *to)
202{
203	int              addr_len;
204	struct list_head *entry;
205	struct ipmi_recv_msg  *msg;
206	unsigned long    flags;
207	int rv = 0, rv2 = 0;
208
209	/* We claim a mutex because we don't want two
210	   users getting something from the queue at a time.
211	   Since we have to release the spinlock before we can
212	   copy the data to the user, it's possible another
213	   user will grab something from the queue, too.  Then
214	   the messages might get out of order if something
215	   fails and the message gets put back onto the
216	   queue.  This mutex prevents that problem. */
217	mutex_lock(&priv->recv_mutex);
218
219	/* Grab the message off the list. */
220	spin_lock_irqsave(&priv->recv_msg_lock, flags);
221	if (list_empty(&(priv->recv_msgs))) {
222		spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
223		rv = -EAGAIN;
224		goto recv_err;
225	}
226	entry = priv->recv_msgs.next;
227	msg = list_entry(entry, struct ipmi_recv_msg, link);
228	list_del(entry);
229	spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
230
231	addr_len = ipmi_addr_length(msg->addr.addr_type);
232	if (rsp->addr_len < addr_len) {
233		rv = -EINVAL;
234		goto recv_putback_on_err;
235	}
236
237	if (copy_to_user(rsp->addr, &msg->addr, addr_len)) {
238		rv = -EFAULT;
239		goto recv_putback_on_err;
240	}
241	rsp->addr_len = addr_len;
242
243	rsp->recv_type = msg->recv_type;
244	rsp->msgid = msg->msgid;
245	rsp->msg.netfn = msg->msg.netfn;
246	rsp->msg.cmd = msg->msg.cmd;
247
248	if (msg->msg.data_len > 0) {
249		if (rsp->msg.data_len < msg->msg.data_len) {
250			if (trunc) {
251				rv2 = -EMSGSIZE;
252				msg->msg.data_len = rsp->msg.data_len;
253			} else {
254				rv = -EMSGSIZE;
255				goto recv_putback_on_err;
256			}
257		}
258
259		if (copy_to_user(rsp->msg.data,
260				 msg->msg.data,
261				 msg->msg.data_len)) {
262			rv = -EFAULT;
263			goto recv_putback_on_err;
264		}
265		rsp->msg.data_len = msg->msg.data_len;
266	} else {
267		rsp->msg.data_len = 0;
268	}
269
270	rv = copyout(rsp, to);
271	if (rv)
272		goto recv_putback_on_err;
273
274	mutex_unlock(&priv->recv_mutex);
275	ipmi_free_recv_msg(msg);
276	return rv2;
277
278recv_putback_on_err:
279	/* If we got an error, put the message back onto
280	   the head of the queue. */
281	spin_lock_irqsave(&priv->recv_msg_lock, flags);
282	list_add(entry, &priv->recv_msgs);
283	spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
284recv_err:
285	mutex_unlock(&priv->recv_mutex);
286	return rv;
287}
288
289static int copyout_recv(struct ipmi_recv *rsp, void __user *to)
290{
291	return copy_to_user(to, rsp, sizeof(struct ipmi_recv)) ? -EFAULT : 0;
292}
293
294static long ipmi_ioctl(struct file   *file,
295		       unsigned int  cmd,
296		       unsigned long data)
297{
298	int                      rv = -EINVAL;
299	struct ipmi_file_private *priv = file->private_data;
300	void __user *arg = (void __user *)data;
301
302	switch (cmd)
303	{
304	case IPMICTL_SEND_COMMAND:
305	{
306		struct ipmi_req req;
307		int retries;
308		unsigned int retry_time_ms;
309
310		if (copy_from_user(&req, arg, sizeof(req))) {
311			rv = -EFAULT;
312			break;
313		}
314
315		mutex_lock(&priv->recv_mutex);
316		retries = priv->default_retries;
317		retry_time_ms = priv->default_retry_time_ms;
318		mutex_unlock(&priv->recv_mutex);
319
320		rv = handle_send_req(priv->user, &req, retries, retry_time_ms);
321		break;
322	}
323
324	case IPMICTL_SEND_COMMAND_SETTIME:
325	{
326		struct ipmi_req_settime req;
327
328		if (copy_from_user(&req, arg, sizeof(req))) {
329			rv = -EFAULT;
330			break;
331		}
332
333		rv = handle_send_req(priv->user,
334				     &req.req,
335				     req.retries,
336				     req.retry_time_ms);
337		break;
338	}
339
340	case IPMICTL_RECEIVE_MSG:
341	case IPMICTL_RECEIVE_MSG_TRUNC:
342	{
343		struct ipmi_recv      rsp;
344
345		if (copy_from_user(&rsp, arg, sizeof(rsp)))
346			rv = -EFAULT;
347		else
348			rv = handle_recv(priv, cmd == IPMICTL_RECEIVE_MSG_TRUNC,
349					 &rsp, copyout_recv, arg);
350		break;
351	}
352
353	case IPMICTL_REGISTER_FOR_CMD:
354	{
355		struct ipmi_cmdspec val;
356
357		if (copy_from_user(&val, arg, sizeof(val))) {
358			rv = -EFAULT;
359			break;
360		}
361
362		rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
363					   IPMI_CHAN_ALL);
364		break;
365	}
366
367	case IPMICTL_UNREGISTER_FOR_CMD:
368	{
369		struct ipmi_cmdspec   val;
370
371		if (copy_from_user(&val, arg, sizeof(val))) {
372			rv = -EFAULT;
373			break;
374		}
375
376		rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
377					     IPMI_CHAN_ALL);
378		break;
379	}
380
381	case IPMICTL_REGISTER_FOR_CMD_CHANS:
382	{
383		struct ipmi_cmdspec_chans val;
384
385		if (copy_from_user(&val, arg, sizeof(val))) {
386			rv = -EFAULT;
387			break;
388		}
389
390		rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd,
391					   val.chans);
392		break;
393	}
394
395	case IPMICTL_UNREGISTER_FOR_CMD_CHANS:
396	{
397		struct ipmi_cmdspec_chans val;
398
399		if (copy_from_user(&val, arg, sizeof(val))) {
400			rv = -EFAULT;
401			break;
402		}
403
404		rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd,
405					     val.chans);
406		break;
407	}
408
409	case IPMICTL_SET_GETS_EVENTS_CMD:
410	{
411		int val;
412
413		if (copy_from_user(&val, arg, sizeof(val))) {
414			rv = -EFAULT;
415			break;
416		}
417
418		rv = ipmi_set_gets_events(priv->user, val);
419		break;
420	}
421
422	/* The next four are legacy, not per-channel. */
423	case IPMICTL_SET_MY_ADDRESS_CMD:
424	{
425		unsigned int val;
426
427		if (copy_from_user(&val, arg, sizeof(val))) {
428			rv = -EFAULT;
429			break;
430		}
431
432		rv = ipmi_set_my_address(priv->user, 0, val);
433		break;
434	}
435
436	case IPMICTL_GET_MY_ADDRESS_CMD:
437	{
438		unsigned int  val;
439		unsigned char rval;
440
441		rv = ipmi_get_my_address(priv->user, 0, &rval);
442		if (rv)
443			break;
444
445		val = rval;
446
447		if (copy_to_user(arg, &val, sizeof(val))) {
448			rv = -EFAULT;
449			break;
450		}
451		break;
452	}
453
454	case IPMICTL_SET_MY_LUN_CMD:
455	{
456		unsigned int val;
457
458		if (copy_from_user(&val, arg, sizeof(val))) {
459			rv = -EFAULT;
460			break;
461		}
462
463		rv = ipmi_set_my_LUN(priv->user, 0, val);
464		break;
465	}
466
467	case IPMICTL_GET_MY_LUN_CMD:
468	{
469		unsigned int  val;
470		unsigned char rval;
471
472		rv = ipmi_get_my_LUN(priv->user, 0, &rval);
473		if (rv)
474			break;
475
476		val = rval;
477
478		if (copy_to_user(arg, &val, sizeof(val))) {
479			rv = -EFAULT;
480			break;
481		}
482		break;
483	}
484
485	case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD:
486	{
487		struct ipmi_channel_lun_address_set val;
488
489		if (copy_from_user(&val, arg, sizeof(val))) {
490			rv = -EFAULT;
491			break;
492		}
493
494		return ipmi_set_my_address(priv->user, val.channel, val.value);
495	}
496
497	case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD:
498	{
499		struct ipmi_channel_lun_address_set val;
500
501		if (copy_from_user(&val, arg, sizeof(val))) {
502			rv = -EFAULT;
503			break;
504		}
505
506		rv = ipmi_get_my_address(priv->user, val.channel, &val.value);
507		if (rv)
508			break;
509
510		if (copy_to_user(arg, &val, sizeof(val))) {
511			rv = -EFAULT;
512			break;
513		}
514		break;
515	}
516
517	case IPMICTL_SET_MY_CHANNEL_LUN_CMD:
518	{
519		struct ipmi_channel_lun_address_set val;
520
521		if (copy_from_user(&val, arg, sizeof(val))) {
522			rv = -EFAULT;
523			break;
524		}
525
526		rv = ipmi_set_my_LUN(priv->user, val.channel, val.value);
527		break;
528	}
529
530	case IPMICTL_GET_MY_CHANNEL_LUN_CMD:
531	{
532		struct ipmi_channel_lun_address_set val;
533
534		if (copy_from_user(&val, arg, sizeof(val))) {
535			rv = -EFAULT;
536			break;
537		}
538
539		rv = ipmi_get_my_LUN(priv->user, val.channel, &val.value);
540		if (rv)
541			break;
542
543		if (copy_to_user(arg, &val, sizeof(val))) {
544			rv = -EFAULT;
545			break;
546		}
547		break;
548	}
549
550	case IPMICTL_SET_TIMING_PARMS_CMD:
551	{
552		struct ipmi_timing_parms parms;
553
554		if (copy_from_user(&parms, arg, sizeof(parms))) {
555			rv = -EFAULT;
556			break;
557		}
558
559		mutex_lock(&priv->recv_mutex);
560		priv->default_retries = parms.retries;
561		priv->default_retry_time_ms = parms.retry_time_ms;
562		mutex_unlock(&priv->recv_mutex);
563		rv = 0;
564		break;
565	}
566
567	case IPMICTL_GET_TIMING_PARMS_CMD:
568	{
569		struct ipmi_timing_parms parms;
570
571		mutex_lock(&priv->recv_mutex);
572		parms.retries = priv->default_retries;
573		parms.retry_time_ms = priv->default_retry_time_ms;
574		mutex_unlock(&priv->recv_mutex);
575
576		if (copy_to_user(arg, &parms, sizeof(parms))) {
577			rv = -EFAULT;
578			break;
579		}
580
581		rv = 0;
582		break;
583	}
584
585	case IPMICTL_GET_MAINTENANCE_MODE_CMD:
586	{
587		int mode;
588
589		mode = ipmi_get_maintenance_mode(priv->user);
590		if (copy_to_user(arg, &mode, sizeof(mode))) {
591			rv = -EFAULT;
592			break;
593		}
594		rv = 0;
595		break;
596	}
597
598	case IPMICTL_SET_MAINTENANCE_MODE_CMD:
599	{
600		int mode;
601
602		if (copy_from_user(&mode, arg, sizeof(mode))) {
603			rv = -EFAULT;
604			break;
605		}
606		rv = ipmi_set_maintenance_mode(priv->user, mode);
607		break;
608	}
609
610	default:
611		rv = -ENOTTY;
612		break;
613	}
614
615	return rv;
616}
617
618#ifdef CONFIG_COMPAT
619/*
620 * The following code contains code for supporting 32-bit compatible
621 * ioctls on 64-bit kernels.  This allows running 32-bit apps on the
622 * 64-bit kernel
623 */
624#define COMPAT_IPMICTL_SEND_COMMAND	\
625	_IOR(IPMI_IOC_MAGIC, 13, struct compat_ipmi_req)
626#define COMPAT_IPMICTL_SEND_COMMAND_SETTIME	\
627	_IOR(IPMI_IOC_MAGIC, 21, struct compat_ipmi_req_settime)
628#define COMPAT_IPMICTL_RECEIVE_MSG	\
629	_IOWR(IPMI_IOC_MAGIC, 12, struct compat_ipmi_recv)
630#define COMPAT_IPMICTL_RECEIVE_MSG_TRUNC	\
631	_IOWR(IPMI_IOC_MAGIC, 11, struct compat_ipmi_recv)
632
633struct compat_ipmi_msg {
634	u8		netfn;
635	u8		cmd;
636	u16		data_len;
637	compat_uptr_t	data;
638};
639
640struct compat_ipmi_req {
641	compat_uptr_t		addr;
642	compat_uint_t		addr_len;
643	compat_long_t		msgid;
644	struct compat_ipmi_msg	msg;
645};
646
647struct compat_ipmi_recv {
648	compat_int_t		recv_type;
649	compat_uptr_t		addr;
650	compat_uint_t		addr_len;
651	compat_long_t		msgid;
652	struct compat_ipmi_msg	msg;
653};
654
655struct compat_ipmi_req_settime {
656	struct compat_ipmi_req	req;
657	compat_int_t		retries;
658	compat_uint_t		retry_time_ms;
659};
660
661/*
662 * Define some helper functions for copying IPMI data
663 */
664static void get_compat_ipmi_msg(struct ipmi_msg *p64,
665				struct compat_ipmi_msg *p32)
666{
667	p64->netfn = p32->netfn;
668	p64->cmd = p32->cmd;
669	p64->data_len = p32->data_len;
670	p64->data = compat_ptr(p32->data);
671}
672
673static void get_compat_ipmi_req(struct ipmi_req *p64,
674				struct compat_ipmi_req *p32)
675{
676	p64->addr = compat_ptr(p32->addr);
677	p64->addr_len = p32->addr_len;
678	p64->msgid = p32->msgid;
679	get_compat_ipmi_msg(&p64->msg, &p32->msg);
680}
681
682static void get_compat_ipmi_req_settime(struct ipmi_req_settime *p64,
683		struct compat_ipmi_req_settime *p32)
684{
685	get_compat_ipmi_req(&p64->req, &p32->req);
686	p64->retries = p32->retries;
687	p64->retry_time_ms = p32->retry_time_ms;
688}
689
690static void get_compat_ipmi_recv(struct ipmi_recv *p64,
691				 struct compat_ipmi_recv *p32)
692{
693	memset(p64, 0, sizeof(struct ipmi_recv));
694	p64->recv_type = p32->recv_type;
695	p64->addr = compat_ptr(p32->addr);
696	p64->addr_len = p32->addr_len;
697	p64->msgid = p32->msgid;
698	get_compat_ipmi_msg(&p64->msg, &p32->msg);
699}
700
701static int copyout_recv32(struct ipmi_recv *p64, void __user *to)
702{
703	struct compat_ipmi_recv v32;
704	memset(&v32, 0, sizeof(struct compat_ipmi_recv));
705	v32.recv_type = p64->recv_type;
706	v32.addr = ptr_to_compat(p64->addr);
707	v32.addr_len = p64->addr_len;
708	v32.msgid = p64->msgid;
709	v32.msg.netfn = p64->msg.netfn;
710	v32.msg.cmd = p64->msg.cmd;
711	v32.msg.data_len = p64->msg.data_len;
712	v32.msg.data = ptr_to_compat(p64->msg.data);
713	return copy_to_user(to, &v32, sizeof(v32)) ? -EFAULT : 0;
714}
715
716/*
717 * Handle compatibility ioctls
718 */
719static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
720			      unsigned long arg)
721{
722	struct ipmi_file_private *priv = filep->private_data;
723
724	switch(cmd) {
725	case COMPAT_IPMICTL_SEND_COMMAND:
726	{
727		struct ipmi_req	rp;
728		struct compat_ipmi_req r32;
729		int retries;
730		unsigned int retry_time_ms;
731
732		if (copy_from_user(&r32, compat_ptr(arg), sizeof(r32)))
733			return -EFAULT;
734
735		get_compat_ipmi_req(&rp, &r32);
736
737		mutex_lock(&priv->recv_mutex);
738		retries = priv->default_retries;
739		retry_time_ms = priv->default_retry_time_ms;
740		mutex_unlock(&priv->recv_mutex);
741
742		return handle_send_req(priv->user, &rp,
743				       retries, retry_time_ms);
744	}
745	case COMPAT_IPMICTL_SEND_COMMAND_SETTIME:
746	{
747		struct ipmi_req_settime	sp;
748		struct compat_ipmi_req_settime sp32;
749
750		if (copy_from_user(&sp32, compat_ptr(arg), sizeof(sp32)))
751			return -EFAULT;
752
753		get_compat_ipmi_req_settime(&sp, &sp32);
754
755		return handle_send_req(priv->user, &sp.req,
756				sp.retries, sp.retry_time_ms);
757	}
758	case COMPAT_IPMICTL_RECEIVE_MSG:
759	case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC:
760	{
761		struct ipmi_recv   recv64;
762		struct compat_ipmi_recv recv32;
763
764		if (copy_from_user(&recv32, compat_ptr(arg), sizeof(recv32)))
765			return -EFAULT;
766
767		get_compat_ipmi_recv(&recv64, &recv32);
768
769		return handle_recv(priv,
770				 cmd == COMPAT_IPMICTL_RECEIVE_MSG_TRUNC,
771				 &recv64, copyout_recv32, compat_ptr(arg));
772	}
773	default:
774		return ipmi_ioctl(filep, cmd, arg);
775	}
776}
777#endif
778
779static const struct file_operations ipmi_fops = {
780	.owner		= THIS_MODULE,
781	.unlocked_ioctl	= ipmi_ioctl,
782#ifdef CONFIG_COMPAT
783	.compat_ioctl   = compat_ipmi_ioctl,
784#endif
785	.open		= ipmi_open,
786	.release	= ipmi_release,
787	.fasync		= ipmi_fasync,
788	.poll		= ipmi_poll,
789	.llseek		= noop_llseek,
790};
791
792#define DEVICE_NAME     "ipmidev"
793
794static int ipmi_major;
795module_param(ipmi_major, int, 0);
796MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device.  By"
797		 " default, or if you set it to zero, it will choose the next"
798		 " available device.  Setting it to -1 will disable the"
799		 " interface.  Other values will set the major device number"
800		 " to that value.");
801
802/* Keep track of the devices that are registered. */
803struct ipmi_reg_list {
804	dev_t            dev;
805	struct list_head link;
806};
807static LIST_HEAD(reg_list);
808static DEFINE_MUTEX(reg_list_mutex);
809
810static const struct class ipmi_class = {
811	.name = "ipmi",
812};
813
814static void ipmi_new_smi(int if_num, struct device *device)
815{
816	dev_t dev = MKDEV(ipmi_major, if_num);
817	struct ipmi_reg_list *entry;
818
819	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
820	if (!entry) {
821		pr_err("ipmi_devintf: Unable to create the ipmi class device link\n");
822		return;
823	}
824	entry->dev = dev;
825
826	mutex_lock(&reg_list_mutex);
827	device_create(&ipmi_class, device, dev, NULL, "ipmi%d", if_num);
828	list_add(&entry->link, &reg_list);
829	mutex_unlock(&reg_list_mutex);
830}
831
832static void ipmi_smi_gone(int if_num)
833{
834	dev_t dev = MKDEV(ipmi_major, if_num);
835	struct ipmi_reg_list *entry;
836
837	mutex_lock(&reg_list_mutex);
838	list_for_each_entry(entry, &reg_list, link) {
839		if (entry->dev == dev) {
840			list_del(&entry->link);
841			kfree(entry);
842			break;
843		}
844	}
845	device_destroy(&ipmi_class, dev);
846	mutex_unlock(&reg_list_mutex);
847}
848
849static struct ipmi_smi_watcher smi_watcher =
850{
851	.owner    = THIS_MODULE,
852	.new_smi  = ipmi_new_smi,
853	.smi_gone = ipmi_smi_gone,
854};
855
856static int __init init_ipmi_devintf(void)
857{
858	int rv;
859
860	if (ipmi_major < 0)
861		return -EINVAL;
862
863	pr_info("ipmi device interface\n");
864
865	rv = class_register(&ipmi_class);
866	if (rv)
867		return rv;
868
869	rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops);
870	if (rv < 0) {
871		class_unregister(&ipmi_class);
872		pr_err("ipmi: can't get major %d\n", ipmi_major);
873		return rv;
874	}
875
876	if (ipmi_major == 0) {
877		ipmi_major = rv;
878	}
879
880	rv = ipmi_smi_watcher_register(&smi_watcher);
881	if (rv) {
882		unregister_chrdev(ipmi_major, DEVICE_NAME);
883		class_unregister(&ipmi_class);
884		pr_warn("ipmi: can't register smi watcher\n");
885		return rv;
886	}
887
888	return 0;
889}
890module_init(init_ipmi_devintf);
891
892static void __exit cleanup_ipmi(void)
893{
894	struct ipmi_reg_list *entry, *entry2;
895	mutex_lock(&reg_list_mutex);
896	list_for_each_entry_safe(entry, entry2, &reg_list, link) {
897		list_del(&entry->link);
898		device_destroy(&ipmi_class, entry->dev);
899		kfree(entry);
900	}
901	mutex_unlock(&reg_list_mutex);
902	class_unregister(&ipmi_class);
903	ipmi_smi_watcher_unregister(&smi_watcher);
904	unregister_chrdev(ipmi_major, DEVICE_NAME);
905}
906module_exit(cleanup_ipmi);
907
908MODULE_LICENSE("GPL");
909MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
910MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");
911