1/*
2 * ipmi_msghandler.c
3 *
4 * Incoming and outgoing message routing for an IPMI interface.
5 *
6 * Author: MontaVista Software, Inc.
7 *         Corey Minyard <minyard@mvista.com>
8 *         source@mvista.com
9 *
10 * Copyright 2002 MontaVista Software Inc.
11 *
12 *  This program is free software; you can redistribute it and/or modify it
13 *  under the terms of the GNU General Public License as published by the
14 *  Free Software Foundation; either version 2 of the License, or (at your
15 *  option) any later version.
16 *
17 *
18 *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 *  You should have received a copy of the GNU General Public License along
30 *  with this program; if not, write to the Free Software Foundation, Inc.,
31 *  675 Mass Ave, Cambridge, MA 02139, USA.
32 */
33
34#include <linux/module.h>
35#include <linux/errno.h>
36#include <asm/system.h>
37#include <linux/poll.h>
38#include <linux/spinlock.h>
39#include <linux/mutex.h>
40#include <linux/slab.h>
41#include <linux/ipmi.h>
42#include <linux/ipmi_smi.h>
43#include <linux/notifier.h>
44#include <linux/init.h>
45#include <linux/proc_fs.h>
46#include <linux/rcupdate.h>
47
48#define PFX "IPMI message handler: "
49
50#define IPMI_DRIVER_VERSION "39.1"
51
52static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
53static int ipmi_init_msghandler(void);
54
55static int initialized;
56
57#ifdef CONFIG_PROC_FS
58static struct proc_dir_entry *proc_ipmi_root;
59#endif /* CONFIG_PROC_FS */
60
61/* Remain in auto-maintenance mode for this amount of time (in ms). */
62#define IPMI_MAINTENANCE_MODE_TIMEOUT 30000
63
64#define MAX_EVENTS_IN_QUEUE	25
65
66/* Don't let a message sit in a queue forever, always time it with at lest
67   the max message timer.  This is in milliseconds. */
68#define MAX_MSG_TIMEOUT		60000
69
70
71/*
72 * The main "user" data structure.
73 */
74struct ipmi_user
75{
76	struct list_head link;
77
78	/* Set to "0" when the user is destroyed. */
79	int valid;
80
81	struct kref refcount;
82
83	/* The upper layer that handles receive messages. */
84	struct ipmi_user_hndl *handler;
85	void             *handler_data;
86
87	/* The interface this user is bound to. */
88	ipmi_smi_t intf;
89
90	/* Does this interface receive IPMI events? */
91	int gets_events;
92};
93
94struct cmd_rcvr
95{
96	struct list_head link;
97
98	ipmi_user_t   user;
99	unsigned char netfn;
100	unsigned char cmd;
101	unsigned int  chans;
102
103	/*
104	 * This is used to form a linked lised during mass deletion.
105	 * Since this is in an RCU list, we cannot use the link above
106	 * or change any data until the RCU period completes.  So we
107	 * use this next variable during mass deletion so we can have
108	 * a list and don't have to wait and restart the search on
109	 * every individual deletion of a command. */
110	struct cmd_rcvr *next;
111};
112
113struct seq_table
114{
115	unsigned int         inuse : 1;
116	unsigned int         broadcast : 1;
117
118	unsigned long        timeout;
119	unsigned long        orig_timeout;
120	unsigned int         retries_left;
121
122	/* To verify on an incoming send message response that this is
123           the message that the response is for, we keep a sequence id
124           and increment it every time we send a message. */
125	long                 seqid;
126
127	/* This is held so we can properly respond to the message on a
128           timeout, and it is used to hold the temporary data for
129           retransmission, too. */
130	struct ipmi_recv_msg *recv_msg;
131};
132
133/* Store the information in a msgid (long) to allow us to find a
134   sequence table entry from the msgid. */
135#define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
136
137#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
138	do {								\
139		seq = ((msgid >> 26) & 0x3f);				\
140		seqid = (msgid & 0x3fffff);				\
141        } while (0)
142
143#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
144
145struct ipmi_channel
146{
147	unsigned char medium;
148	unsigned char protocol;
149
150	/* My slave address.  This is initialized to IPMI_BMC_SLAVE_ADDR,
151	   but may be changed by the user. */
152	unsigned char address;
153
154	/* My LUN.  This should generally stay the SMS LUN, but just in
155	   case... */
156	unsigned char lun;
157};
158
159#ifdef CONFIG_PROC_FS
160struct ipmi_proc_entry
161{
162	char                   *name;
163	struct ipmi_proc_entry *next;
164};
165#endif
166
167struct bmc_device
168{
169	struct platform_device *dev;
170	struct ipmi_device_id  id;
171	unsigned char          guid[16];
172	int                    guid_set;
173
174	struct kref	       refcount;
175
176	/* bmc device attributes */
177	struct device_attribute device_id_attr;
178	struct device_attribute provides_dev_sdrs_attr;
179	struct device_attribute revision_attr;
180	struct device_attribute firmware_rev_attr;
181	struct device_attribute version_attr;
182	struct device_attribute add_dev_support_attr;
183	struct device_attribute manufacturer_id_attr;
184	struct device_attribute product_id_attr;
185	struct device_attribute guid_attr;
186	struct device_attribute aux_firmware_rev_attr;
187};
188
189#define IPMI_IPMB_NUM_SEQ	64
190#define IPMI_MAX_CHANNELS       16
191struct ipmi_smi
192{
193	/* What interface number are we? */
194	int intf_num;
195
196	struct kref refcount;
197
198	/* Used for a list of interfaces. */
199	struct list_head link;
200
201	/* The list of upper layers that are using me.  seq_lock
202	 * protects this. */
203	struct list_head users;
204
205	/* Information to supply to users. */
206	unsigned char ipmi_version_major;
207	unsigned char ipmi_version_minor;
208
209	/* Used for wake ups at startup. */
210	wait_queue_head_t waitq;
211
212	struct bmc_device *bmc;
213	char *my_dev_name;
214	char *sysfs_name;
215
216	/* This is the lower-layer's sender routine.  Note that you
217	 * must either be holding the ipmi_interfaces_mutex or be in
218	 * an umpreemptible region to use this.  You must fetch the
219	 * value into a local variable and make sure it is not NULL. */
220	struct ipmi_smi_handlers *handlers;
221	void                     *send_info;
222
223#ifdef CONFIG_PROC_FS
224	/* A list of proc entries for this interface.  This does not
225	   need a lock, only one thread creates it and only one thread
226	   destroys it. */
227	spinlock_t             proc_entry_lock;
228	struct ipmi_proc_entry *proc_entries;
229#endif
230
231	/* Driver-model device for the system interface. */
232	struct device          *si_dev;
233
234	/* A table of sequence numbers for this interface.  We use the
235           sequence numbers for IPMB messages that go out of the
236           interface to match them up with their responses.  A routine
237           is called periodically to time the items in this list. */
238	spinlock_t       seq_lock;
239	struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
240	int curr_seq;
241
242	/* Messages that were delayed for some reason (out of memory,
243           for instance), will go in here to be processed later in a
244           periodic timer interrupt. */
245	spinlock_t       waiting_msgs_lock;
246	struct list_head waiting_msgs;
247
248	/* The list of command receivers that are registered for commands
249	   on this interface. */
250	struct mutex     cmd_rcvrs_mutex;
251	struct list_head cmd_rcvrs;
252
253	/* Events that were queues because no one was there to receive
254           them. */
255	spinlock_t       events_lock; /* For dealing with event stuff. */
256	struct list_head waiting_events;
257	unsigned int     waiting_events_count; /* How many events in queue? */
258	int              delivering_events;
259
260	/* The event receiver for my BMC, only really used at panic
261	   shutdown as a place to store this. */
262	unsigned char event_receiver;
263	unsigned char event_receiver_lun;
264	unsigned char local_sel_device;
265	unsigned char local_event_generator;
266
267	/* For handling of maintenance mode. */
268	int maintenance_mode;
269	int maintenance_mode_enable;
270	int auto_maintenance_timeout;
271	spinlock_t maintenance_mode_lock; /* Used in a timer... */
272
273	/* A cheap hack, if this is non-null and a message to an
274	   interface comes in with a NULL user, call this routine with
275	   it.  Note that the message will still be freed by the
276	   caller.  This only works on the system interface. */
277	void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
278
279	/* When we are scanning the channels for an SMI, this will
280	   tell which channel we are scanning. */
281	int curr_channel;
282
283	/* Channel information */
284	struct ipmi_channel channels[IPMI_MAX_CHANNELS];
285
286	/* Proc FS stuff. */
287	struct proc_dir_entry *proc_dir;
288	char                  proc_dir_name[10];
289
290	spinlock_t   counter_lock; /* For making counters atomic. */
291
292	/* Commands we got that were invalid. */
293	unsigned int sent_invalid_commands;
294
295	/* Commands we sent to the MC. */
296	unsigned int sent_local_commands;
297	/* Responses from the MC that were delivered to a user. */
298	unsigned int handled_local_responses;
299	/* Responses from the MC that were not delivered to a user. */
300	unsigned int unhandled_local_responses;
301
302	/* Commands we sent out to the IPMB bus. */
303	unsigned int sent_ipmb_commands;
304	/* Commands sent on the IPMB that had errors on the SEND CMD */
305	unsigned int sent_ipmb_command_errs;
306	/* Each retransmit increments this count. */
307	unsigned int retransmitted_ipmb_commands;
308	/* When a message times out (runs out of retransmits) this is
309           incremented. */
310	unsigned int timed_out_ipmb_commands;
311
312	/* This is like above, but for broadcasts.  Broadcasts are
313           *not* included in the above count (they are expected to
314           time out). */
315	unsigned int timed_out_ipmb_broadcasts;
316
317	/* Responses I have sent to the IPMB bus. */
318	unsigned int sent_ipmb_responses;
319
320	/* The response was delivered to the user. */
321	unsigned int handled_ipmb_responses;
322	/* The response had invalid data in it. */
323	unsigned int invalid_ipmb_responses;
324	/* The response didn't have anyone waiting for it. */
325	unsigned int unhandled_ipmb_responses;
326
327	/* Commands we sent out to the IPMB bus. */
328	unsigned int sent_lan_commands;
329	/* Commands sent on the IPMB that had errors on the SEND CMD */
330	unsigned int sent_lan_command_errs;
331	/* Each retransmit increments this count. */
332	unsigned int retransmitted_lan_commands;
333	/* When a message times out (runs out of retransmits) this is
334           incremented. */
335	unsigned int timed_out_lan_commands;
336
337	/* Responses I have sent to the IPMB bus. */
338	unsigned int sent_lan_responses;
339
340	/* The response was delivered to the user. */
341	unsigned int handled_lan_responses;
342	/* The response had invalid data in it. */
343	unsigned int invalid_lan_responses;
344	/* The response didn't have anyone waiting for it. */
345	unsigned int unhandled_lan_responses;
346
347	/* The command was delivered to the user. */
348	unsigned int handled_commands;
349	/* The command had invalid data in it. */
350	unsigned int invalid_commands;
351	/* The command didn't have anyone waiting for it. */
352	unsigned int unhandled_commands;
353
354	/* Invalid data in an event. */
355	unsigned int invalid_events;
356	/* Events that were received with the proper format. */
357	unsigned int events;
358};
359#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
360
361/**
362 * The driver model view of the IPMI messaging driver.
363 */
364static struct device_driver ipmidriver = {
365	.name = "ipmi",
366	.bus = &platform_bus_type
367};
368static DEFINE_MUTEX(ipmidriver_mutex);
369
370static struct list_head ipmi_interfaces = LIST_HEAD_INIT(ipmi_interfaces);
371static DEFINE_MUTEX(ipmi_interfaces_mutex);
372
373/* List of watchers that want to know when smi's are added and
374   deleted. */
375static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
376static DEFINE_MUTEX(smi_watchers_mutex);
377
378
379static void free_recv_msg_list(struct list_head *q)
380{
381	struct ipmi_recv_msg *msg, *msg2;
382
383	list_for_each_entry_safe(msg, msg2, q, link) {
384		list_del(&msg->link);
385		ipmi_free_recv_msg(msg);
386	}
387}
388
389static void free_smi_msg_list(struct list_head *q)
390{
391	struct ipmi_smi_msg *msg, *msg2;
392
393	list_for_each_entry_safe(msg, msg2, q, link) {
394		list_del(&msg->link);
395		ipmi_free_smi_msg(msg);
396	}
397}
398
399static void clean_up_interface_data(ipmi_smi_t intf)
400{
401	int              i;
402	struct cmd_rcvr  *rcvr, *rcvr2;
403	struct list_head list;
404
405	free_smi_msg_list(&intf->waiting_msgs);
406	free_recv_msg_list(&intf->waiting_events);
407
408	/*
409	 * Wholesale remove all the entries from the list in the
410	 * interface and wait for RCU to know that none are in use.
411	 */
412	mutex_lock(&intf->cmd_rcvrs_mutex);
413	INIT_LIST_HEAD(&list);
414	list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
415	mutex_unlock(&intf->cmd_rcvrs_mutex);
416
417	list_for_each_entry_safe(rcvr, rcvr2, &list, link)
418		kfree(rcvr);
419
420	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
421		if ((intf->seq_table[i].inuse)
422		    && (intf->seq_table[i].recv_msg))
423		{
424			ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
425		}
426	}
427}
428
429static void intf_free(struct kref *ref)
430{
431	ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
432
433	clean_up_interface_data(intf);
434	kfree(intf);
435}
436
437struct watcher_entry {
438	int              intf_num;
439	ipmi_smi_t       intf;
440	struct list_head link;
441};
442
443int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
444{
445	ipmi_smi_t intf;
446	struct list_head to_deliver = LIST_HEAD_INIT(to_deliver);
447	struct watcher_entry *e, *e2;
448
449	mutex_lock(&smi_watchers_mutex);
450
451	mutex_lock(&ipmi_interfaces_mutex);
452
453	/* Build a list of things to deliver. */
454	list_for_each_entry(intf, &ipmi_interfaces, link) {
455		if (intf->intf_num == -1)
456			continue;
457		e = kmalloc(sizeof(*e), GFP_KERNEL);
458		if (!e)
459			goto out_err;
460		kref_get(&intf->refcount);
461		e->intf = intf;
462		e->intf_num = intf->intf_num;
463		list_add_tail(&e->link, &to_deliver);
464	}
465
466	/* We will succeed, so add it to the list. */
467	list_add(&watcher->link, &smi_watchers);
468
469	mutex_unlock(&ipmi_interfaces_mutex);
470
471	list_for_each_entry_safe(e, e2, &to_deliver, link) {
472		list_del(&e->link);
473		watcher->new_smi(e->intf_num, e->intf->si_dev);
474		kref_put(&e->intf->refcount, intf_free);
475		kfree(e);
476	}
477
478	mutex_unlock(&smi_watchers_mutex);
479
480	return 0;
481
482 out_err:
483	mutex_unlock(&ipmi_interfaces_mutex);
484	mutex_unlock(&smi_watchers_mutex);
485	list_for_each_entry_safe(e, e2, &to_deliver, link) {
486		list_del(&e->link);
487		kref_put(&e->intf->refcount, intf_free);
488		kfree(e);
489	}
490	return -ENOMEM;
491}
492
493int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
494{
495	mutex_lock(&smi_watchers_mutex);
496	list_del(&(watcher->link));
497	mutex_unlock(&smi_watchers_mutex);
498	return 0;
499}
500
501/*
502 * Must be called with smi_watchers_mutex held.
503 */
504static void
505call_smi_watchers(int i, struct device *dev)
506{
507	struct ipmi_smi_watcher *w;
508
509	list_for_each_entry(w, &smi_watchers, link) {
510		if (try_module_get(w->owner)) {
511			w->new_smi(i, dev);
512			module_put(w->owner);
513		}
514	}
515}
516
517static int
518ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
519{
520	if (addr1->addr_type != addr2->addr_type)
521		return 0;
522
523	if (addr1->channel != addr2->channel)
524		return 0;
525
526	if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
527		struct ipmi_system_interface_addr *smi_addr1
528		    = (struct ipmi_system_interface_addr *) addr1;
529		struct ipmi_system_interface_addr *smi_addr2
530		    = (struct ipmi_system_interface_addr *) addr2;
531		return (smi_addr1->lun == smi_addr2->lun);
532	}
533
534	if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
535	    || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
536	{
537		struct ipmi_ipmb_addr *ipmb_addr1
538		    = (struct ipmi_ipmb_addr *) addr1;
539		struct ipmi_ipmb_addr *ipmb_addr2
540		    = (struct ipmi_ipmb_addr *) addr2;
541
542		return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
543			&& (ipmb_addr1->lun == ipmb_addr2->lun));
544	}
545
546	if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) {
547		struct ipmi_lan_addr *lan_addr1
548			= (struct ipmi_lan_addr *) addr1;
549		struct ipmi_lan_addr *lan_addr2
550		    = (struct ipmi_lan_addr *) addr2;
551
552		return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
553			&& (lan_addr1->local_SWID == lan_addr2->local_SWID)
554			&& (lan_addr1->session_handle
555			    == lan_addr2->session_handle)
556			&& (lan_addr1->lun == lan_addr2->lun));
557	}
558
559	return 1;
560}
561
562int ipmi_validate_addr(struct ipmi_addr *addr, int len)
563{
564	if (len < sizeof(struct ipmi_system_interface_addr)) {
565		return -EINVAL;
566	}
567
568	if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
569		if (addr->channel != IPMI_BMC_CHANNEL)
570			return -EINVAL;
571		return 0;
572	}
573
574	if ((addr->channel == IPMI_BMC_CHANNEL)
575	    || (addr->channel >= IPMI_MAX_CHANNELS)
576	    || (addr->channel < 0))
577		return -EINVAL;
578
579	if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
580	    || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
581	{
582		if (len < sizeof(struct ipmi_ipmb_addr)) {
583			return -EINVAL;
584		}
585		return 0;
586	}
587
588	if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
589		if (len < sizeof(struct ipmi_lan_addr)) {
590			return -EINVAL;
591		}
592		return 0;
593	}
594
595	return -EINVAL;
596}
597
598unsigned int ipmi_addr_length(int addr_type)
599{
600	if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
601		return sizeof(struct ipmi_system_interface_addr);
602
603	if ((addr_type == IPMI_IPMB_ADDR_TYPE)
604	    || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
605	{
606		return sizeof(struct ipmi_ipmb_addr);
607	}
608
609	if (addr_type == IPMI_LAN_ADDR_TYPE)
610		return sizeof(struct ipmi_lan_addr);
611
612	return 0;
613}
614
615static void deliver_response(struct ipmi_recv_msg *msg)
616{
617	if (!msg->user) {
618		ipmi_smi_t    intf = msg->user_msg_data;
619		unsigned long flags;
620
621		/* Special handling for NULL users. */
622		if (intf->null_user_handler) {
623			intf->null_user_handler(intf, msg);
624			spin_lock_irqsave(&intf->counter_lock, flags);
625			intf->handled_local_responses++;
626			spin_unlock_irqrestore(&intf->counter_lock, flags);
627		} else {
628			/* No handler, so give up. */
629			spin_lock_irqsave(&intf->counter_lock, flags);
630			intf->unhandled_local_responses++;
631			spin_unlock_irqrestore(&intf->counter_lock, flags);
632		}
633		ipmi_free_recv_msg(msg);
634	} else {
635		ipmi_user_t user = msg->user;
636		user->handler->ipmi_recv_hndl(msg, user->handler_data);
637	}
638}
639
640static void
641deliver_err_response(struct ipmi_recv_msg *msg, int err)
642{
643	msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
644	msg->msg_data[0] = err;
645	msg->msg.netfn |= 1; /* Convert to a response. */
646	msg->msg.data_len = 1;
647	msg->msg.data = msg->msg_data;
648	deliver_response(msg);
649}
650
651/* Find the next sequence number not being used and add the given
652   message with the given timeout to the sequence table.  This must be
653   called with the interface's seq_lock held. */
654static int intf_next_seq(ipmi_smi_t           intf,
655			 struct ipmi_recv_msg *recv_msg,
656			 unsigned long        timeout,
657			 int                  retries,
658			 int                  broadcast,
659			 unsigned char        *seq,
660			 long                 *seqid)
661{
662	int          rv = 0;
663	unsigned int i;
664
665	for (i = intf->curr_seq;
666	     (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
667	     i = (i+1)%IPMI_IPMB_NUM_SEQ)
668	{
669		if (!intf->seq_table[i].inuse)
670			break;
671	}
672
673	if (!intf->seq_table[i].inuse) {
674		intf->seq_table[i].recv_msg = recv_msg;
675
676		/* Start with the maximum timeout, when the send response
677		   comes in we will start the real timer. */
678		intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
679		intf->seq_table[i].orig_timeout = timeout;
680		intf->seq_table[i].retries_left = retries;
681		intf->seq_table[i].broadcast = broadcast;
682		intf->seq_table[i].inuse = 1;
683		intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
684		*seq = i;
685		*seqid = intf->seq_table[i].seqid;
686		intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
687	} else {
688		rv = -EAGAIN;
689	}
690
691	return rv;
692}
693
694/* Return the receive message for the given sequence number and
695   release the sequence number so it can be reused.  Some other data
696   is passed in to be sure the message matches up correctly (to help
697   guard against message coming in after their timeout and the
698   sequence number being reused). */
699static int intf_find_seq(ipmi_smi_t           intf,
700			 unsigned char        seq,
701			 short                channel,
702			 unsigned char        cmd,
703			 unsigned char        netfn,
704			 struct ipmi_addr     *addr,
705			 struct ipmi_recv_msg **recv_msg)
706{
707	int           rv = -ENODEV;
708	unsigned long flags;
709
710	if (seq >= IPMI_IPMB_NUM_SEQ)
711		return -EINVAL;
712
713	spin_lock_irqsave(&(intf->seq_lock), flags);
714	if (intf->seq_table[seq].inuse) {
715		struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
716
717		if ((msg->addr.channel == channel)
718		    && (msg->msg.cmd == cmd)
719		    && (msg->msg.netfn == netfn)
720		    && (ipmi_addr_equal(addr, &(msg->addr))))
721		{
722			*recv_msg = msg;
723			intf->seq_table[seq].inuse = 0;
724			rv = 0;
725		}
726	}
727	spin_unlock_irqrestore(&(intf->seq_lock), flags);
728
729	return rv;
730}
731
732
733/* Start the timer for a specific sequence table entry. */
734static int intf_start_seq_timer(ipmi_smi_t intf,
735				long       msgid)
736{
737	int           rv = -ENODEV;
738	unsigned long flags;
739	unsigned char seq;
740	unsigned long seqid;
741
742
743	GET_SEQ_FROM_MSGID(msgid, seq, seqid);
744
745	spin_lock_irqsave(&(intf->seq_lock), flags);
746	/* We do this verification because the user can be deleted
747           while a message is outstanding. */
748	if ((intf->seq_table[seq].inuse)
749	    && (intf->seq_table[seq].seqid == seqid))
750	{
751		struct seq_table *ent = &(intf->seq_table[seq]);
752		ent->timeout = ent->orig_timeout;
753		rv = 0;
754	}
755	spin_unlock_irqrestore(&(intf->seq_lock), flags);
756
757	return rv;
758}
759
760/* Got an error for the send message for a specific sequence number. */
761static int intf_err_seq(ipmi_smi_t   intf,
762			long         msgid,
763			unsigned int err)
764{
765	int                  rv = -ENODEV;
766	unsigned long        flags;
767	unsigned char        seq;
768	unsigned long        seqid;
769	struct ipmi_recv_msg *msg = NULL;
770
771
772	GET_SEQ_FROM_MSGID(msgid, seq, seqid);
773
774	spin_lock_irqsave(&(intf->seq_lock), flags);
775	/* We do this verification because the user can be deleted
776           while a message is outstanding. */
777	if ((intf->seq_table[seq].inuse)
778	    && (intf->seq_table[seq].seqid == seqid))
779	{
780		struct seq_table *ent = &(intf->seq_table[seq]);
781
782		ent->inuse = 0;
783		msg = ent->recv_msg;
784		rv = 0;
785	}
786	spin_unlock_irqrestore(&(intf->seq_lock), flags);
787
788	if (msg)
789		deliver_err_response(msg, err);
790
791	return rv;
792}
793
794
795int ipmi_create_user(unsigned int          if_num,
796		     struct ipmi_user_hndl *handler,
797		     void                  *handler_data,
798		     ipmi_user_t           *user)
799{
800	unsigned long flags;
801	ipmi_user_t   new_user;
802	int           rv = 0;
803	ipmi_smi_t    intf;
804
805	/* There is no module usecount here, because it's not
806           required.  Since this can only be used by and called from
807           other modules, they will implicitly use this module, and
808           thus this can't be removed unless the other modules are
809           removed. */
810
811	if (handler == NULL)
812		return -EINVAL;
813
814	/* Make sure the driver is actually initialized, this handles
815	   problems with initialization order. */
816	if (!initialized) {
817		rv = ipmi_init_msghandler();
818		if (rv)
819			return rv;
820
821		/* The init code doesn't return an error if it was turned
822		   off, but it won't initialize.  Check that. */
823		if (!initialized)
824			return -ENODEV;
825	}
826
827	new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
828	if (!new_user)
829		return -ENOMEM;
830
831	mutex_lock(&ipmi_interfaces_mutex);
832	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
833		if (intf->intf_num == if_num)
834			goto found;
835	}
836	/* Not found, return an error */
837	rv = -EINVAL;
838	goto out_kfree;
839
840 found:
841	/* Note that each existing user holds a refcount to the interface. */
842	kref_get(&intf->refcount);
843
844	kref_init(&new_user->refcount);
845	new_user->handler = handler;
846	new_user->handler_data = handler_data;
847	new_user->intf = intf;
848	new_user->gets_events = 0;
849
850	if (!try_module_get(intf->handlers->owner)) {
851		rv = -ENODEV;
852		goto out_kref;
853	}
854
855	if (intf->handlers->inc_usecount) {
856		rv = intf->handlers->inc_usecount(intf->send_info);
857		if (rv) {
858			module_put(intf->handlers->owner);
859			goto out_kref;
860		}
861	}
862
863	/* Hold the lock so intf->handlers is guaranteed to be good
864	 * until now */
865	mutex_unlock(&ipmi_interfaces_mutex);
866
867	new_user->valid = 1;
868	spin_lock_irqsave(&intf->seq_lock, flags);
869	list_add_rcu(&new_user->link, &intf->users);
870	spin_unlock_irqrestore(&intf->seq_lock, flags);
871	*user = new_user;
872	return 0;
873
874out_kref:
875	kref_put(&intf->refcount, intf_free);
876out_kfree:
877	mutex_unlock(&ipmi_interfaces_mutex);
878	kfree(new_user);
879	return rv;
880}
881
882static void free_user(struct kref *ref)
883{
884	ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
885	kfree(user);
886}
887
888int ipmi_destroy_user(ipmi_user_t user)
889{
890	ipmi_smi_t       intf = user->intf;
891	int              i;
892	unsigned long    flags;
893	struct cmd_rcvr  *rcvr;
894	struct cmd_rcvr  *rcvrs = NULL;
895
896	user->valid = 0;
897
898	/* Remove the user from the interface's sequence table. */
899	spin_lock_irqsave(&intf->seq_lock, flags);
900	list_del_rcu(&user->link);
901
902	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
903		if (intf->seq_table[i].inuse
904		    && (intf->seq_table[i].recv_msg->user == user))
905		{
906			intf->seq_table[i].inuse = 0;
907			ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
908		}
909	}
910	spin_unlock_irqrestore(&intf->seq_lock, flags);
911
912	/*
913	 * Remove the user from the command receiver's table.  First
914	 * we build a list of everything (not using the standard link,
915	 * since other things may be using it till we do
916	 * synchronize_rcu()) then free everything in that list.
917	 */
918	mutex_lock(&intf->cmd_rcvrs_mutex);
919	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
920		if (rcvr->user == user) {
921			list_del_rcu(&rcvr->link);
922			rcvr->next = rcvrs;
923			rcvrs = rcvr;
924		}
925	}
926	mutex_unlock(&intf->cmd_rcvrs_mutex);
927	synchronize_rcu();
928	while (rcvrs) {
929		rcvr = rcvrs;
930		rcvrs = rcvr->next;
931		kfree(rcvr);
932	}
933
934	mutex_lock(&ipmi_interfaces_mutex);
935	if (intf->handlers) {
936		module_put(intf->handlers->owner);
937		if (intf->handlers->dec_usecount)
938			intf->handlers->dec_usecount(intf->send_info);
939	}
940	mutex_unlock(&ipmi_interfaces_mutex);
941
942	kref_put(&intf->refcount, intf_free);
943
944	kref_put(&user->refcount, free_user);
945
946	return 0;
947}
948
949void ipmi_get_version(ipmi_user_t   user,
950		      unsigned char *major,
951		      unsigned char *minor)
952{
953	*major = user->intf->ipmi_version_major;
954	*minor = user->intf->ipmi_version_minor;
955}
956
957int ipmi_set_my_address(ipmi_user_t   user,
958			unsigned int  channel,
959			unsigned char address)
960{
961	if (channel >= IPMI_MAX_CHANNELS)
962		return -EINVAL;
963	user->intf->channels[channel].address = address;
964	return 0;
965}
966
967int ipmi_get_my_address(ipmi_user_t   user,
968			unsigned int  channel,
969			unsigned char *address)
970{
971	if (channel >= IPMI_MAX_CHANNELS)
972		return -EINVAL;
973	*address = user->intf->channels[channel].address;
974	return 0;
975}
976
977int ipmi_set_my_LUN(ipmi_user_t   user,
978		    unsigned int  channel,
979		    unsigned char LUN)
980{
981	if (channel >= IPMI_MAX_CHANNELS)
982		return -EINVAL;
983	user->intf->channels[channel].lun = LUN & 0x3;
984	return 0;
985}
986
987int ipmi_get_my_LUN(ipmi_user_t   user,
988		    unsigned int  channel,
989		    unsigned char *address)
990{
991	if (channel >= IPMI_MAX_CHANNELS)
992		return -EINVAL;
993	*address = user->intf->channels[channel].lun;
994	return 0;
995}
996
997int ipmi_get_maintenance_mode(ipmi_user_t user)
998{
999	int           mode;
1000	unsigned long flags;
1001
1002	spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1003	mode = user->intf->maintenance_mode;
1004	spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1005
1006	return mode;
1007}
1008EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1009
1010static void maintenance_mode_update(ipmi_smi_t intf)
1011{
1012	if (intf->handlers->set_maintenance_mode)
1013		intf->handlers->set_maintenance_mode(
1014			intf->send_info, intf->maintenance_mode_enable);
1015}
1016
1017int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
1018{
1019	int           rv = 0;
1020	unsigned long flags;
1021	ipmi_smi_t    intf = user->intf;
1022
1023	spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1024	if (intf->maintenance_mode != mode) {
1025		switch (mode) {
1026		case IPMI_MAINTENANCE_MODE_AUTO:
1027			intf->maintenance_mode = mode;
1028			intf->maintenance_mode_enable
1029				= (intf->auto_maintenance_timeout > 0);
1030			break;
1031
1032		case IPMI_MAINTENANCE_MODE_OFF:
1033			intf->maintenance_mode = mode;
1034			intf->maintenance_mode_enable = 0;
1035			break;
1036
1037		case IPMI_MAINTENANCE_MODE_ON:
1038			intf->maintenance_mode = mode;
1039			intf->maintenance_mode_enable = 1;
1040			break;
1041
1042		default:
1043			rv = -EINVAL;
1044			goto out_unlock;
1045		}
1046
1047		maintenance_mode_update(intf);
1048	}
1049 out_unlock:
1050	spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1051
1052	return rv;
1053}
1054EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1055
1056int ipmi_set_gets_events(ipmi_user_t user, int val)
1057{
1058	unsigned long        flags;
1059	ipmi_smi_t           intf = user->intf;
1060	struct ipmi_recv_msg *msg, *msg2;
1061	struct list_head     msgs;
1062
1063	INIT_LIST_HEAD(&msgs);
1064
1065	spin_lock_irqsave(&intf->events_lock, flags);
1066	user->gets_events = val;
1067
1068	if (intf->delivering_events)
1069		/*
1070		 * Another thread is delivering events for this, so
1071		 * let it handle any new events.
1072		 */
1073		goto out;
1074
1075	/* Deliver any queued events. */
1076	while (user->gets_events && !list_empty(&intf->waiting_events)) {
1077		list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1078			list_move_tail(&msg->link, &msgs);
1079		intf->waiting_events_count = 0;
1080
1081		intf->delivering_events = 1;
1082		spin_unlock_irqrestore(&intf->events_lock, flags);
1083
1084		list_for_each_entry_safe(msg, msg2, &msgs, link) {
1085			msg->user = user;
1086			kref_get(&user->refcount);
1087			deliver_response(msg);
1088		}
1089
1090		spin_lock_irqsave(&intf->events_lock, flags);
1091		intf->delivering_events = 0;
1092	}
1093
1094 out:
1095	spin_unlock_irqrestore(&intf->events_lock, flags);
1096
1097	return 0;
1098}
1099
1100static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t    intf,
1101				      unsigned char netfn,
1102				      unsigned char cmd,
1103				      unsigned char chan)
1104{
1105	struct cmd_rcvr *rcvr;
1106
1107	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1108		if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1109					&& (rcvr->chans & (1 << chan)))
1110			return rcvr;
1111	}
1112	return NULL;
1113}
1114
1115static int is_cmd_rcvr_exclusive(ipmi_smi_t    intf,
1116				 unsigned char netfn,
1117				 unsigned char cmd,
1118				 unsigned int  chans)
1119{
1120	struct cmd_rcvr *rcvr;
1121
1122	list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1123		if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1124					&& (rcvr->chans & chans))
1125			return 0;
1126	}
1127	return 1;
1128}
1129
1130int ipmi_register_for_cmd(ipmi_user_t   user,
1131			  unsigned char netfn,
1132			  unsigned char cmd,
1133			  unsigned int  chans)
1134{
1135	ipmi_smi_t      intf = user->intf;
1136	struct cmd_rcvr *rcvr;
1137	int             rv = 0;
1138
1139
1140	rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1141	if (!rcvr)
1142		return -ENOMEM;
1143	rcvr->cmd = cmd;
1144	rcvr->netfn = netfn;
1145	rcvr->chans = chans;
1146	rcvr->user = user;
1147
1148	mutex_lock(&intf->cmd_rcvrs_mutex);
1149	/* Make sure the command/netfn is not already registered. */
1150	if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1151		rv = -EBUSY;
1152		goto out_unlock;
1153	}
1154
1155	list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1156
1157 out_unlock:
1158	mutex_unlock(&intf->cmd_rcvrs_mutex);
1159	if (rv)
1160		kfree(rcvr);
1161
1162	return rv;
1163}
1164
1165int ipmi_unregister_for_cmd(ipmi_user_t   user,
1166			    unsigned char netfn,
1167			    unsigned char cmd,
1168			    unsigned int  chans)
1169{
1170	ipmi_smi_t      intf = user->intf;
1171	struct cmd_rcvr *rcvr;
1172	struct cmd_rcvr *rcvrs = NULL;
1173	int i, rv = -ENOENT;
1174
1175	mutex_lock(&intf->cmd_rcvrs_mutex);
1176	for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1177		if (((1 << i) & chans) == 0)
1178			continue;
1179		rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1180		if (rcvr == NULL)
1181			continue;
1182		if (rcvr->user == user) {
1183			rv = 0;
1184			rcvr->chans &= ~chans;
1185			if (rcvr->chans == 0) {
1186				list_del_rcu(&rcvr->link);
1187				rcvr->next = rcvrs;
1188				rcvrs = rcvr;
1189			}
1190		}
1191	}
1192	mutex_unlock(&intf->cmd_rcvrs_mutex);
1193	synchronize_rcu();
1194	while (rcvrs) {
1195		rcvr = rcvrs;
1196		rcvrs = rcvr->next;
1197		kfree(rcvr);
1198	}
1199	return rv;
1200}
1201
1202void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
1203{
1204	ipmi_smi_t intf = user->intf;
1205	if (intf->handlers)
1206		intf->handlers->set_run_to_completion(intf->send_info, val);
1207}
1208
1209static unsigned char
1210ipmb_checksum(unsigned char *data, int size)
1211{
1212	unsigned char csum = 0;
1213
1214	for (; size > 0; size--, data++)
1215		csum += *data;
1216
1217	return -csum;
1218}
1219
1220static inline void format_ipmb_msg(struct ipmi_smi_msg   *smi_msg,
1221				   struct kernel_ipmi_msg *msg,
1222				   struct ipmi_ipmb_addr *ipmb_addr,
1223				   long                  msgid,
1224				   unsigned char         ipmb_seq,
1225				   int                   broadcast,
1226				   unsigned char         source_address,
1227				   unsigned char         source_lun)
1228{
1229	int i = broadcast;
1230
1231	/* Format the IPMB header data. */
1232	smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1233	smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1234	smi_msg->data[2] = ipmb_addr->channel;
1235	if (broadcast)
1236		smi_msg->data[3] = 0;
1237	smi_msg->data[i+3] = ipmb_addr->slave_addr;
1238	smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1239	smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
1240	smi_msg->data[i+6] = source_address;
1241	smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1242	smi_msg->data[i+8] = msg->cmd;
1243
1244	/* Now tack on the data to the message. */
1245	if (msg->data_len > 0)
1246		memcpy(&(smi_msg->data[i+9]), msg->data,
1247		       msg->data_len);
1248	smi_msg->data_size = msg->data_len + 9;
1249
1250	/* Now calculate the checksum and tack it on. */
1251	smi_msg->data[i+smi_msg->data_size]
1252		= ipmb_checksum(&(smi_msg->data[i+6]),
1253				smi_msg->data_size-6);
1254
1255	/* Add on the checksum size and the offset from the
1256	   broadcast. */
1257	smi_msg->data_size += 1 + i;
1258
1259	smi_msg->msgid = msgid;
1260}
1261
1262static inline void format_lan_msg(struct ipmi_smi_msg   *smi_msg,
1263				  struct kernel_ipmi_msg *msg,
1264				  struct ipmi_lan_addr  *lan_addr,
1265				  long                  msgid,
1266				  unsigned char         ipmb_seq,
1267				  unsigned char         source_lun)
1268{
1269	/* Format the IPMB header data. */
1270	smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1271	smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1272	smi_msg->data[2] = lan_addr->channel;
1273	smi_msg->data[3] = lan_addr->session_handle;
1274	smi_msg->data[4] = lan_addr->remote_SWID;
1275	smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1276	smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
1277	smi_msg->data[7] = lan_addr->local_SWID;
1278	smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1279	smi_msg->data[9] = msg->cmd;
1280
1281	/* Now tack on the data to the message. */
1282	if (msg->data_len > 0)
1283		memcpy(&(smi_msg->data[10]), msg->data,
1284		       msg->data_len);
1285	smi_msg->data_size = msg->data_len + 10;
1286
1287	/* Now calculate the checksum and tack it on. */
1288	smi_msg->data[smi_msg->data_size]
1289		= ipmb_checksum(&(smi_msg->data[7]),
1290				smi_msg->data_size-7);
1291
1292	/* Add on the checksum size and the offset from the
1293	   broadcast. */
1294	smi_msg->data_size += 1;
1295
1296	smi_msg->msgid = msgid;
1297}
1298
1299/* Separate from ipmi_request so that the user does not have to be
1300   supplied in certain circumstances (mainly at panic time).  If
1301   messages are supplied, they will be freed, even if an error
1302   occurs. */
1303static int i_ipmi_request(ipmi_user_t          user,
1304			  ipmi_smi_t           intf,
1305			  struct ipmi_addr     *addr,
1306			  long                 msgid,
1307			  struct kernel_ipmi_msg *msg,
1308			  void                 *user_msg_data,
1309			  void                 *supplied_smi,
1310			  struct ipmi_recv_msg *supplied_recv,
1311			  int                  priority,
1312			  unsigned char        source_address,
1313			  unsigned char        source_lun,
1314			  int                  retries,
1315			  unsigned int         retry_time_ms)
1316{
1317	int                      rv = 0;
1318	struct ipmi_smi_msg      *smi_msg;
1319	struct ipmi_recv_msg     *recv_msg;
1320	unsigned long            flags;
1321	struct ipmi_smi_handlers *handlers;
1322
1323
1324	if (supplied_recv) {
1325		recv_msg = supplied_recv;
1326	} else {
1327		recv_msg = ipmi_alloc_recv_msg();
1328		if (recv_msg == NULL) {
1329			return -ENOMEM;
1330		}
1331	}
1332	recv_msg->user_msg_data = user_msg_data;
1333
1334	if (supplied_smi) {
1335		smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1336	} else {
1337		smi_msg = ipmi_alloc_smi_msg();
1338		if (smi_msg == NULL) {
1339			ipmi_free_recv_msg(recv_msg);
1340			return -ENOMEM;
1341		}
1342	}
1343
1344	rcu_read_lock();
1345	handlers = intf->handlers;
1346	if (!handlers) {
1347		rv = -ENODEV;
1348		goto out_err;
1349	}
1350
1351	recv_msg->user = user;
1352	if (user)
1353		kref_get(&user->refcount);
1354	recv_msg->msgid = msgid;
1355	/* Store the message to send in the receive message so timeout
1356	   responses can get the proper response data. */
1357	recv_msg->msg = *msg;
1358
1359	if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1360		struct ipmi_system_interface_addr *smi_addr;
1361
1362		if (msg->netfn & 1) {
1363			/* Responses are not allowed to the SMI. */
1364			rv = -EINVAL;
1365			goto out_err;
1366		}
1367
1368		smi_addr = (struct ipmi_system_interface_addr *) addr;
1369		if (smi_addr->lun > 3) {
1370			spin_lock_irqsave(&intf->counter_lock, flags);
1371			intf->sent_invalid_commands++;
1372			spin_unlock_irqrestore(&intf->counter_lock, flags);
1373			rv = -EINVAL;
1374			goto out_err;
1375		}
1376
1377		memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1378
1379		if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1380		    && ((msg->cmd == IPMI_SEND_MSG_CMD)
1381			|| (msg->cmd == IPMI_GET_MSG_CMD)
1382			|| (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
1383		{
1384			/* We don't let the user do these, since we manage
1385			   the sequence numbers. */
1386			spin_lock_irqsave(&intf->counter_lock, flags);
1387			intf->sent_invalid_commands++;
1388			spin_unlock_irqrestore(&intf->counter_lock, flags);
1389			rv = -EINVAL;
1390			goto out_err;
1391		}
1392
1393		if (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1394		      && ((msg->cmd == IPMI_COLD_RESET_CMD)
1395			  || (msg->cmd == IPMI_WARM_RESET_CMD)))
1396		     || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST))
1397		{
1398			spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1399			intf->auto_maintenance_timeout
1400				= IPMI_MAINTENANCE_MODE_TIMEOUT;
1401			if (!intf->maintenance_mode
1402			    && !intf->maintenance_mode_enable)
1403			{
1404				intf->maintenance_mode_enable = 1;
1405				maintenance_mode_update(intf);
1406			}
1407			spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1408					       flags);
1409		}
1410
1411		if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1412			spin_lock_irqsave(&intf->counter_lock, flags);
1413			intf->sent_invalid_commands++;
1414			spin_unlock_irqrestore(&intf->counter_lock, flags);
1415			rv = -EMSGSIZE;
1416			goto out_err;
1417		}
1418
1419		smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1420		smi_msg->data[1] = msg->cmd;
1421		smi_msg->msgid = msgid;
1422		smi_msg->user_data = recv_msg;
1423		if (msg->data_len > 0)
1424			memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1425		smi_msg->data_size = msg->data_len + 2;
1426		spin_lock_irqsave(&intf->counter_lock, flags);
1427		intf->sent_local_commands++;
1428		spin_unlock_irqrestore(&intf->counter_lock, flags);
1429	} else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
1430		   || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
1431	{
1432		struct ipmi_ipmb_addr *ipmb_addr;
1433		unsigned char         ipmb_seq;
1434		long                  seqid;
1435		int                   broadcast = 0;
1436
1437		if (addr->channel >= IPMI_MAX_CHANNELS) {
1438		        spin_lock_irqsave(&intf->counter_lock, flags);
1439			intf->sent_invalid_commands++;
1440			spin_unlock_irqrestore(&intf->counter_lock, flags);
1441			rv = -EINVAL;
1442			goto out_err;
1443		}
1444
1445		if (intf->channels[addr->channel].medium
1446		    != IPMI_CHANNEL_MEDIUM_IPMB)
1447		{
1448			spin_lock_irqsave(&intf->counter_lock, flags);
1449			intf->sent_invalid_commands++;
1450			spin_unlock_irqrestore(&intf->counter_lock, flags);
1451			rv = -EINVAL;
1452			goto out_err;
1453		}
1454
1455		if (retries < 0) {
1456		    if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1457			retries = 0; /* Don't retry broadcasts. */
1458		    else
1459			retries = 4;
1460		}
1461		if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1462		    /* Broadcasts add a zero at the beginning of the
1463		       message, but otherwise is the same as an IPMB
1464		       address. */
1465		    addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1466		    broadcast = 1;
1467		}
1468
1469
1470		/* Default to 1 second retries. */
1471		if (retry_time_ms == 0)
1472		    retry_time_ms = 1000;
1473
1474		/* 9 for the header and 1 for the checksum, plus
1475                   possibly one for the broadcast. */
1476		if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1477			spin_lock_irqsave(&intf->counter_lock, flags);
1478			intf->sent_invalid_commands++;
1479			spin_unlock_irqrestore(&intf->counter_lock, flags);
1480			rv = -EMSGSIZE;
1481			goto out_err;
1482		}
1483
1484		ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1485		if (ipmb_addr->lun > 3) {
1486			spin_lock_irqsave(&intf->counter_lock, flags);
1487			intf->sent_invalid_commands++;
1488			spin_unlock_irqrestore(&intf->counter_lock, flags);
1489			rv = -EINVAL;
1490			goto out_err;
1491		}
1492
1493		memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1494
1495		if (recv_msg->msg.netfn & 0x1) {
1496			/* It's a response, so use the user's sequence
1497                           from msgid. */
1498			spin_lock_irqsave(&intf->counter_lock, flags);
1499			intf->sent_ipmb_responses++;
1500			spin_unlock_irqrestore(&intf->counter_lock, flags);
1501			format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1502					msgid, broadcast,
1503					source_address, source_lun);
1504
1505			/* Save the receive message so we can use it
1506			   to deliver the response. */
1507			smi_msg->user_data = recv_msg;
1508		} else {
1509			/* It's a command, so get a sequence for it. */
1510
1511			spin_lock_irqsave(&(intf->seq_lock), flags);
1512
1513			spin_lock(&intf->counter_lock);
1514			intf->sent_ipmb_commands++;
1515			spin_unlock(&intf->counter_lock);
1516
1517			/* Create a sequence number with a 1 second
1518                           timeout and 4 retries. */
1519			rv = intf_next_seq(intf,
1520					   recv_msg,
1521					   retry_time_ms,
1522					   retries,
1523					   broadcast,
1524					   &ipmb_seq,
1525					   &seqid);
1526			if (rv) {
1527				/* We have used up all the sequence numbers,
1528				   probably, so abort. */
1529				spin_unlock_irqrestore(&(intf->seq_lock),
1530						       flags);
1531				goto out_err;
1532			}
1533
1534			/* Store the sequence number in the message,
1535                           so that when the send message response
1536                           comes back we can start the timer. */
1537			format_ipmb_msg(smi_msg, msg, ipmb_addr,
1538					STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1539					ipmb_seq, broadcast,
1540					source_address, source_lun);
1541
1542			/* Copy the message into the recv message data, so we
1543			   can retransmit it later if necessary. */
1544			memcpy(recv_msg->msg_data, smi_msg->data,
1545			       smi_msg->data_size);
1546			recv_msg->msg.data = recv_msg->msg_data;
1547			recv_msg->msg.data_len = smi_msg->data_size;
1548
1549			/* We don't unlock until here, because we need
1550                           to copy the completed message into the
1551                           recv_msg before we release the lock.
1552                           Otherwise, race conditions may bite us.  I
1553                           know that's pretty paranoid, but I prefer
1554                           to be correct. */
1555			spin_unlock_irqrestore(&(intf->seq_lock), flags);
1556		}
1557	} else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
1558		struct ipmi_lan_addr  *lan_addr;
1559		unsigned char         ipmb_seq;
1560		long                  seqid;
1561
1562		if (addr->channel >= IPMI_MAX_CHANNELS) {
1563			spin_lock_irqsave(&intf->counter_lock, flags);
1564			intf->sent_invalid_commands++;
1565			spin_unlock_irqrestore(&intf->counter_lock, flags);
1566			rv = -EINVAL;
1567			goto out_err;
1568		}
1569
1570		if ((intf->channels[addr->channel].medium
1571		    != IPMI_CHANNEL_MEDIUM_8023LAN)
1572		    && (intf->channels[addr->channel].medium
1573			!= IPMI_CHANNEL_MEDIUM_ASYNC))
1574		{
1575			spin_lock_irqsave(&intf->counter_lock, flags);
1576			intf->sent_invalid_commands++;
1577			spin_unlock_irqrestore(&intf->counter_lock, flags);
1578			rv = -EINVAL;
1579			goto out_err;
1580		}
1581
1582		retries = 4;
1583
1584		/* Default to 1 second retries. */
1585		if (retry_time_ms == 0)
1586		    retry_time_ms = 1000;
1587
1588		/* 11 for the header and 1 for the checksum. */
1589		if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1590			spin_lock_irqsave(&intf->counter_lock, flags);
1591			intf->sent_invalid_commands++;
1592			spin_unlock_irqrestore(&intf->counter_lock, flags);
1593			rv = -EMSGSIZE;
1594			goto out_err;
1595		}
1596
1597		lan_addr = (struct ipmi_lan_addr *) addr;
1598		if (lan_addr->lun > 3) {
1599			spin_lock_irqsave(&intf->counter_lock, flags);
1600			intf->sent_invalid_commands++;
1601			spin_unlock_irqrestore(&intf->counter_lock, flags);
1602			rv = -EINVAL;
1603			goto out_err;
1604		}
1605
1606		memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1607
1608		if (recv_msg->msg.netfn & 0x1) {
1609			/* It's a response, so use the user's sequence
1610                           from msgid. */
1611			spin_lock_irqsave(&intf->counter_lock, flags);
1612			intf->sent_lan_responses++;
1613			spin_unlock_irqrestore(&intf->counter_lock, flags);
1614			format_lan_msg(smi_msg, msg, lan_addr, msgid,
1615				       msgid, source_lun);
1616
1617			/* Save the receive message so we can use it
1618			   to deliver the response. */
1619			smi_msg->user_data = recv_msg;
1620		} else {
1621			/* It's a command, so get a sequence for it. */
1622
1623			spin_lock_irqsave(&(intf->seq_lock), flags);
1624
1625			spin_lock(&intf->counter_lock);
1626			intf->sent_lan_commands++;
1627			spin_unlock(&intf->counter_lock);
1628
1629			/* Create a sequence number with a 1 second
1630                           timeout and 4 retries. */
1631			rv = intf_next_seq(intf,
1632					   recv_msg,
1633					   retry_time_ms,
1634					   retries,
1635					   0,
1636					   &ipmb_seq,
1637					   &seqid);
1638			if (rv) {
1639				/* We have used up all the sequence numbers,
1640				   probably, so abort. */
1641				spin_unlock_irqrestore(&(intf->seq_lock),
1642						       flags);
1643				goto out_err;
1644			}
1645
1646			/* Store the sequence number in the message,
1647                           so that when the send message response
1648                           comes back we can start the timer. */
1649			format_lan_msg(smi_msg, msg, lan_addr,
1650				       STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1651				       ipmb_seq, source_lun);
1652
1653			/* Copy the message into the recv message data, so we
1654			   can retransmit it later if necessary. */
1655			memcpy(recv_msg->msg_data, smi_msg->data,
1656			       smi_msg->data_size);
1657			recv_msg->msg.data = recv_msg->msg_data;
1658			recv_msg->msg.data_len = smi_msg->data_size;
1659
1660			/* We don't unlock until here, because we need
1661                           to copy the completed message into the
1662                           recv_msg before we release the lock.
1663                           Otherwise, race conditions may bite us.  I
1664                           know that's pretty paranoid, but I prefer
1665                           to be correct. */
1666			spin_unlock_irqrestore(&(intf->seq_lock), flags);
1667		}
1668	} else {
1669	    /* Unknown address type. */
1670		spin_lock_irqsave(&intf->counter_lock, flags);
1671		intf->sent_invalid_commands++;
1672		spin_unlock_irqrestore(&intf->counter_lock, flags);
1673		rv = -EINVAL;
1674		goto out_err;
1675	}
1676
1677#ifdef DEBUG_MSGING
1678	{
1679		int m;
1680		for (m = 0; m < smi_msg->data_size; m++)
1681			printk(" %2.2x", smi_msg->data[m]);
1682		printk("\n");
1683	}
1684#endif
1685
1686	handlers->sender(intf->send_info, smi_msg, priority);
1687	rcu_read_unlock();
1688
1689	return 0;
1690
1691 out_err:
1692	rcu_read_unlock();
1693	ipmi_free_smi_msg(smi_msg);
1694	ipmi_free_recv_msg(recv_msg);
1695	return rv;
1696}
1697
1698static int check_addr(ipmi_smi_t       intf,
1699		      struct ipmi_addr *addr,
1700		      unsigned char    *saddr,
1701		      unsigned char    *lun)
1702{
1703	if (addr->channel >= IPMI_MAX_CHANNELS)
1704		return -EINVAL;
1705	*lun = intf->channels[addr->channel].lun;
1706	*saddr = intf->channels[addr->channel].address;
1707	return 0;
1708}
1709
1710int ipmi_request_settime(ipmi_user_t      user,
1711			 struct ipmi_addr *addr,
1712			 long             msgid,
1713			 struct kernel_ipmi_msg  *msg,
1714			 void             *user_msg_data,
1715			 int              priority,
1716			 int              retries,
1717			 unsigned int     retry_time_ms)
1718{
1719	unsigned char saddr, lun;
1720	int           rv;
1721
1722	if (!user)
1723		return -EINVAL;
1724	rv = check_addr(user->intf, addr, &saddr, &lun);
1725	if (rv)
1726		return rv;
1727	return i_ipmi_request(user,
1728			      user->intf,
1729			      addr,
1730			      msgid,
1731			      msg,
1732			      user_msg_data,
1733			      NULL, NULL,
1734			      priority,
1735			      saddr,
1736			      lun,
1737			      retries,
1738			      retry_time_ms);
1739}
1740
1741int ipmi_request_supply_msgs(ipmi_user_t          user,
1742			     struct ipmi_addr     *addr,
1743			     long                 msgid,
1744			     struct kernel_ipmi_msg *msg,
1745			     void                 *user_msg_data,
1746			     void                 *supplied_smi,
1747			     struct ipmi_recv_msg *supplied_recv,
1748			     int                  priority)
1749{
1750	unsigned char saddr, lun;
1751	int           rv;
1752
1753	if (!user)
1754		return -EINVAL;
1755	rv = check_addr(user->intf, addr, &saddr, &lun);
1756	if (rv)
1757		return rv;
1758	return i_ipmi_request(user,
1759			      user->intf,
1760			      addr,
1761			      msgid,
1762			      msg,
1763			      user_msg_data,
1764			      supplied_smi,
1765			      supplied_recv,
1766			      priority,
1767			      saddr,
1768			      lun,
1769			      -1, 0);
1770}
1771
1772#ifdef CONFIG_PROC_FS
1773static int ipmb_file_read_proc(char *page, char **start, off_t off,
1774			       int count, int *eof, void *data)
1775{
1776	char       *out = (char *) page;
1777	ipmi_smi_t intf = data;
1778	int        i;
1779	int        rv = 0;
1780
1781	for (i = 0; i < IPMI_MAX_CHANNELS; i++)
1782		rv += sprintf(out+rv, "%x ", intf->channels[i].address);
1783	out[rv-1] = '\n'; /* Replace the final space with a newline */
1784	out[rv] = '\0';
1785	rv++;
1786	return rv;
1787}
1788
1789static int version_file_read_proc(char *page, char **start, off_t off,
1790				  int count, int *eof, void *data)
1791{
1792	char       *out = (char *) page;
1793	ipmi_smi_t intf = data;
1794
1795	return sprintf(out, "%d.%d\n",
1796		       ipmi_version_major(&intf->bmc->id),
1797		       ipmi_version_minor(&intf->bmc->id));
1798}
1799
1800static int stat_file_read_proc(char *page, char **start, off_t off,
1801			       int count, int *eof, void *data)
1802{
1803	char       *out = (char *) page;
1804	ipmi_smi_t intf = data;
1805
1806	out += sprintf(out, "sent_invalid_commands:       %d\n",
1807		       intf->sent_invalid_commands);
1808	out += sprintf(out, "sent_local_commands:         %d\n",
1809		       intf->sent_local_commands);
1810	out += sprintf(out, "handled_local_responses:     %d\n",
1811		       intf->handled_local_responses);
1812	out += sprintf(out, "unhandled_local_responses:   %d\n",
1813		       intf->unhandled_local_responses);
1814	out += sprintf(out, "sent_ipmb_commands:          %d\n",
1815		       intf->sent_ipmb_commands);
1816	out += sprintf(out, "sent_ipmb_command_errs:      %d\n",
1817		       intf->sent_ipmb_command_errs);
1818	out += sprintf(out, "retransmitted_ipmb_commands: %d\n",
1819		       intf->retransmitted_ipmb_commands);
1820	out += sprintf(out, "timed_out_ipmb_commands:     %d\n",
1821		       intf->timed_out_ipmb_commands);
1822	out += sprintf(out, "timed_out_ipmb_broadcasts:   %d\n",
1823		       intf->timed_out_ipmb_broadcasts);
1824	out += sprintf(out, "sent_ipmb_responses:         %d\n",
1825		       intf->sent_ipmb_responses);
1826	out += sprintf(out, "handled_ipmb_responses:      %d\n",
1827		       intf->handled_ipmb_responses);
1828	out += sprintf(out, "invalid_ipmb_responses:      %d\n",
1829		       intf->invalid_ipmb_responses);
1830	out += sprintf(out, "unhandled_ipmb_responses:    %d\n",
1831		       intf->unhandled_ipmb_responses);
1832	out += sprintf(out, "sent_lan_commands:           %d\n",
1833		       intf->sent_lan_commands);
1834	out += sprintf(out, "sent_lan_command_errs:       %d\n",
1835		       intf->sent_lan_command_errs);
1836	out += sprintf(out, "retransmitted_lan_commands:  %d\n",
1837		       intf->retransmitted_lan_commands);
1838	out += sprintf(out, "timed_out_lan_commands:      %d\n",
1839		       intf->timed_out_lan_commands);
1840	out += sprintf(out, "sent_lan_responses:          %d\n",
1841		       intf->sent_lan_responses);
1842	out += sprintf(out, "handled_lan_responses:       %d\n",
1843		       intf->handled_lan_responses);
1844	out += sprintf(out, "invalid_lan_responses:       %d\n",
1845		       intf->invalid_lan_responses);
1846	out += sprintf(out, "unhandled_lan_responses:     %d\n",
1847		       intf->unhandled_lan_responses);
1848	out += sprintf(out, "handled_commands:            %d\n",
1849		       intf->handled_commands);
1850	out += sprintf(out, "invalid_commands:            %d\n",
1851		       intf->invalid_commands);
1852	out += sprintf(out, "unhandled_commands:          %d\n",
1853		       intf->unhandled_commands);
1854	out += sprintf(out, "invalid_events:              %d\n",
1855		       intf->invalid_events);
1856	out += sprintf(out, "events:                      %d\n",
1857		       intf->events);
1858
1859	return (out - ((char *) page));
1860}
1861#endif /* CONFIG_PROC_FS */
1862
1863int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1864			    read_proc_t *read_proc, write_proc_t *write_proc,
1865			    void *data, struct module *owner)
1866{
1867	int                    rv = 0;
1868#ifdef CONFIG_PROC_FS
1869	struct proc_dir_entry  *file;
1870	struct ipmi_proc_entry *entry;
1871
1872	/* Create a list element. */
1873	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1874	if (!entry)
1875		return -ENOMEM;
1876	entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
1877	if (!entry->name) {
1878		kfree(entry);
1879		return -ENOMEM;
1880	}
1881	strcpy(entry->name, name);
1882
1883	file = create_proc_entry(name, 0, smi->proc_dir);
1884	if (!file) {
1885		kfree(entry->name);
1886		kfree(entry);
1887		rv = -ENOMEM;
1888	} else {
1889		file->data = data;
1890		file->read_proc = read_proc;
1891		file->write_proc = write_proc;
1892		file->owner = owner;
1893
1894		spin_lock(&smi->proc_entry_lock);
1895		/* Stick it on the list. */
1896		entry->next = smi->proc_entries;
1897		smi->proc_entries = entry;
1898		spin_unlock(&smi->proc_entry_lock);
1899	}
1900#endif /* CONFIG_PROC_FS */
1901
1902	return rv;
1903}
1904
1905static int add_proc_entries(ipmi_smi_t smi, int num)
1906{
1907	int rv = 0;
1908
1909#ifdef CONFIG_PROC_FS
1910	sprintf(smi->proc_dir_name, "%d", num);
1911	smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
1912	if (!smi->proc_dir)
1913		rv = -ENOMEM;
1914	else {
1915		smi->proc_dir->owner = THIS_MODULE;
1916	}
1917
1918	if (rv == 0)
1919		rv = ipmi_smi_add_proc_entry(smi, "stats",
1920					     stat_file_read_proc, NULL,
1921					     smi, THIS_MODULE);
1922
1923	if (rv == 0)
1924		rv = ipmi_smi_add_proc_entry(smi, "ipmb",
1925					     ipmb_file_read_proc, NULL,
1926					     smi, THIS_MODULE);
1927
1928	if (rv == 0)
1929		rv = ipmi_smi_add_proc_entry(smi, "version",
1930					     version_file_read_proc, NULL,
1931					     smi, THIS_MODULE);
1932#endif /* CONFIG_PROC_FS */
1933
1934	return rv;
1935}
1936
1937static void remove_proc_entries(ipmi_smi_t smi)
1938{
1939#ifdef CONFIG_PROC_FS
1940	struct ipmi_proc_entry *entry;
1941
1942	spin_lock(&smi->proc_entry_lock);
1943	while (smi->proc_entries) {
1944		entry = smi->proc_entries;
1945		smi->proc_entries = entry->next;
1946
1947		remove_proc_entry(entry->name, smi->proc_dir);
1948		kfree(entry->name);
1949		kfree(entry);
1950	}
1951	spin_unlock(&smi->proc_entry_lock);
1952	remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
1953#endif /* CONFIG_PROC_FS */
1954}
1955
1956static int __find_bmc_guid(struct device *dev, void *data)
1957{
1958	unsigned char *id = data;
1959	struct bmc_device *bmc = dev_get_drvdata(dev);
1960	return memcmp(bmc->guid, id, 16) == 0;
1961}
1962
1963static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
1964					     unsigned char *guid)
1965{
1966	struct device *dev;
1967
1968	dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
1969	if (dev)
1970		return dev_get_drvdata(dev);
1971	else
1972		return NULL;
1973}
1974
1975struct prod_dev_id {
1976	unsigned int  product_id;
1977	unsigned char device_id;
1978};
1979
1980static int __find_bmc_prod_dev_id(struct device *dev, void *data)
1981{
1982	struct prod_dev_id *id = data;
1983	struct bmc_device *bmc = dev_get_drvdata(dev);
1984
1985	return (bmc->id.product_id == id->product_id
1986		&& bmc->id.device_id == id->device_id);
1987}
1988
1989static struct bmc_device *ipmi_find_bmc_prod_dev_id(
1990	struct device_driver *drv,
1991	unsigned int product_id, unsigned char device_id)
1992{
1993	struct prod_dev_id id = {
1994		.product_id = product_id,
1995		.device_id = device_id,
1996	};
1997	struct device *dev;
1998
1999	dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
2000	if (dev)
2001		return dev_get_drvdata(dev);
2002	else
2003		return NULL;
2004}
2005
2006static ssize_t device_id_show(struct device *dev,
2007			      struct device_attribute *attr,
2008			      char *buf)
2009{
2010	struct bmc_device *bmc = dev_get_drvdata(dev);
2011
2012	return snprintf(buf, 10, "%u\n", bmc->id.device_id);
2013}
2014
2015static ssize_t provides_dev_sdrs_show(struct device *dev,
2016				      struct device_attribute *attr,
2017				      char *buf)
2018{
2019	struct bmc_device *bmc = dev_get_drvdata(dev);
2020
2021	return snprintf(buf, 10, "%u\n",
2022			(bmc->id.device_revision & 0x80) >> 7);
2023}
2024
2025static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2026			     char *buf)
2027{
2028	struct bmc_device *bmc = dev_get_drvdata(dev);
2029
2030	return snprintf(buf, 20, "%u\n",
2031			bmc->id.device_revision & 0x0F);
2032}
2033
2034static ssize_t firmware_rev_show(struct device *dev,
2035				 struct device_attribute *attr,
2036				 char *buf)
2037{
2038	struct bmc_device *bmc = dev_get_drvdata(dev);
2039
2040	return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
2041			bmc->id.firmware_revision_2);
2042}
2043
2044static ssize_t ipmi_version_show(struct device *dev,
2045				 struct device_attribute *attr,
2046				 char *buf)
2047{
2048	struct bmc_device *bmc = dev_get_drvdata(dev);
2049
2050	return snprintf(buf, 20, "%u.%u\n",
2051			ipmi_version_major(&bmc->id),
2052			ipmi_version_minor(&bmc->id));
2053}
2054
2055static ssize_t add_dev_support_show(struct device *dev,
2056				    struct device_attribute *attr,
2057				    char *buf)
2058{
2059	struct bmc_device *bmc = dev_get_drvdata(dev);
2060
2061	return snprintf(buf, 10, "0x%02x\n",
2062			bmc->id.additional_device_support);
2063}
2064
2065static ssize_t manufacturer_id_show(struct device *dev,
2066				    struct device_attribute *attr,
2067				    char *buf)
2068{
2069	struct bmc_device *bmc = dev_get_drvdata(dev);
2070
2071	return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
2072}
2073
2074static ssize_t product_id_show(struct device *dev,
2075			       struct device_attribute *attr,
2076			       char *buf)
2077{
2078	struct bmc_device *bmc = dev_get_drvdata(dev);
2079
2080	return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
2081}
2082
2083static ssize_t aux_firmware_rev_show(struct device *dev,
2084				     struct device_attribute *attr,
2085				     char *buf)
2086{
2087	struct bmc_device *bmc = dev_get_drvdata(dev);
2088
2089	return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2090			bmc->id.aux_firmware_revision[3],
2091			bmc->id.aux_firmware_revision[2],
2092			bmc->id.aux_firmware_revision[1],
2093			bmc->id.aux_firmware_revision[0]);
2094}
2095
2096static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2097			 char *buf)
2098{
2099	struct bmc_device *bmc = dev_get_drvdata(dev);
2100
2101	return snprintf(buf, 100, "%Lx%Lx\n",
2102			(long long) bmc->guid[0],
2103			(long long) bmc->guid[8]);
2104}
2105
2106static void remove_files(struct bmc_device *bmc)
2107{
2108	if (!bmc->dev)
2109		return;
2110
2111	device_remove_file(&bmc->dev->dev,
2112			   &bmc->device_id_attr);
2113	device_remove_file(&bmc->dev->dev,
2114			   &bmc->provides_dev_sdrs_attr);
2115	device_remove_file(&bmc->dev->dev,
2116			   &bmc->revision_attr);
2117	device_remove_file(&bmc->dev->dev,
2118			   &bmc->firmware_rev_attr);
2119	device_remove_file(&bmc->dev->dev,
2120			   &bmc->version_attr);
2121	device_remove_file(&bmc->dev->dev,
2122			   &bmc->add_dev_support_attr);
2123	device_remove_file(&bmc->dev->dev,
2124			   &bmc->manufacturer_id_attr);
2125	device_remove_file(&bmc->dev->dev,
2126			   &bmc->product_id_attr);
2127
2128	if (bmc->id.aux_firmware_revision_set)
2129		device_remove_file(&bmc->dev->dev,
2130				   &bmc->aux_firmware_rev_attr);
2131	if (bmc->guid_set)
2132		device_remove_file(&bmc->dev->dev,
2133				   &bmc->guid_attr);
2134}
2135
2136static void
2137cleanup_bmc_device(struct kref *ref)
2138{
2139	struct bmc_device *bmc;
2140
2141	bmc = container_of(ref, struct bmc_device, refcount);
2142
2143	remove_files(bmc);
2144	platform_device_unregister(bmc->dev);
2145	kfree(bmc);
2146}
2147
2148static void ipmi_bmc_unregister(ipmi_smi_t intf)
2149{
2150	struct bmc_device *bmc = intf->bmc;
2151
2152	if (intf->sysfs_name) {
2153		sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name);
2154		kfree(intf->sysfs_name);
2155		intf->sysfs_name = NULL;
2156	}
2157	if (intf->my_dev_name) {
2158		sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
2159		kfree(intf->my_dev_name);
2160		intf->my_dev_name = NULL;
2161	}
2162
2163	mutex_lock(&ipmidriver_mutex);
2164	kref_put(&bmc->refcount, cleanup_bmc_device);
2165	intf->bmc = NULL;
2166	mutex_unlock(&ipmidriver_mutex);
2167}
2168
2169static int create_files(struct bmc_device *bmc)
2170{
2171	int err;
2172
2173	bmc->device_id_attr.attr.name = "device_id";
2174	bmc->device_id_attr.attr.owner = THIS_MODULE;
2175	bmc->device_id_attr.attr.mode = S_IRUGO;
2176	bmc->device_id_attr.show = device_id_show;
2177
2178	bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
2179	bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE;
2180	bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
2181	bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
2182
2183	bmc->revision_attr.attr.name = "revision";
2184	bmc->revision_attr.attr.owner = THIS_MODULE;
2185	bmc->revision_attr.attr.mode = S_IRUGO;
2186	bmc->revision_attr.show = revision_show;
2187
2188	bmc->firmware_rev_attr.attr.name = "firmware_revision";
2189	bmc->firmware_rev_attr.attr.owner = THIS_MODULE;
2190	bmc->firmware_rev_attr.attr.mode = S_IRUGO;
2191	bmc->firmware_rev_attr.show = firmware_rev_show;
2192
2193	bmc->version_attr.attr.name = "ipmi_version";
2194	bmc->version_attr.attr.owner = THIS_MODULE;
2195	bmc->version_attr.attr.mode = S_IRUGO;
2196	bmc->version_attr.show = ipmi_version_show;
2197
2198	bmc->add_dev_support_attr.attr.name = "additional_device_support";
2199	bmc->add_dev_support_attr.attr.owner = THIS_MODULE;
2200	bmc->add_dev_support_attr.attr.mode = S_IRUGO;
2201	bmc->add_dev_support_attr.show = add_dev_support_show;
2202
2203	bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
2204	bmc->manufacturer_id_attr.attr.owner = THIS_MODULE;
2205	bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
2206	bmc->manufacturer_id_attr.show = manufacturer_id_show;
2207
2208	bmc->product_id_attr.attr.name = "product_id";
2209	bmc->product_id_attr.attr.owner = THIS_MODULE;
2210	bmc->product_id_attr.attr.mode = S_IRUGO;
2211	bmc->product_id_attr.show = product_id_show;
2212
2213	bmc->guid_attr.attr.name = "guid";
2214	bmc->guid_attr.attr.owner = THIS_MODULE;
2215	bmc->guid_attr.attr.mode = S_IRUGO;
2216	bmc->guid_attr.show = guid_show;
2217
2218	bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2219	bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE;
2220	bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
2221	bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
2222
2223	err = device_create_file(&bmc->dev->dev,
2224			   &bmc->device_id_attr);
2225	if (err) goto out;
2226	err = device_create_file(&bmc->dev->dev,
2227			   &bmc->provides_dev_sdrs_attr);
2228	if (err) goto out_devid;
2229	err = device_create_file(&bmc->dev->dev,
2230			   &bmc->revision_attr);
2231	if (err) goto out_sdrs;
2232	err = device_create_file(&bmc->dev->dev,
2233			   &bmc->firmware_rev_attr);
2234	if (err) goto out_rev;
2235	err = device_create_file(&bmc->dev->dev,
2236			   &bmc->version_attr);
2237	if (err) goto out_firm;
2238	err = device_create_file(&bmc->dev->dev,
2239			   &bmc->add_dev_support_attr);
2240	if (err) goto out_version;
2241	err = device_create_file(&bmc->dev->dev,
2242			   &bmc->manufacturer_id_attr);
2243	if (err) goto out_add_dev;
2244	err = device_create_file(&bmc->dev->dev,
2245			   &bmc->product_id_attr);
2246	if (err) goto out_manu;
2247	if (bmc->id.aux_firmware_revision_set) {
2248		err = device_create_file(&bmc->dev->dev,
2249				   &bmc->aux_firmware_rev_attr);
2250		if (err) goto out_prod_id;
2251	}
2252	if (bmc->guid_set) {
2253		err = device_create_file(&bmc->dev->dev,
2254				   &bmc->guid_attr);
2255		if (err) goto out_aux_firm;
2256	}
2257
2258	return 0;
2259
2260out_aux_firm:
2261	if (bmc->id.aux_firmware_revision_set)
2262		device_remove_file(&bmc->dev->dev,
2263				   &bmc->aux_firmware_rev_attr);
2264out_prod_id:
2265	device_remove_file(&bmc->dev->dev,
2266			   &bmc->product_id_attr);
2267out_manu:
2268	device_remove_file(&bmc->dev->dev,
2269			   &bmc->manufacturer_id_attr);
2270out_add_dev:
2271	device_remove_file(&bmc->dev->dev,
2272			   &bmc->add_dev_support_attr);
2273out_version:
2274	device_remove_file(&bmc->dev->dev,
2275			   &bmc->version_attr);
2276out_firm:
2277	device_remove_file(&bmc->dev->dev,
2278			   &bmc->firmware_rev_attr);
2279out_rev:
2280	device_remove_file(&bmc->dev->dev,
2281			   &bmc->revision_attr);
2282out_sdrs:
2283	device_remove_file(&bmc->dev->dev,
2284			   &bmc->provides_dev_sdrs_attr);
2285out_devid:
2286	device_remove_file(&bmc->dev->dev,
2287			   &bmc->device_id_attr);
2288out:
2289	return err;
2290}
2291
2292static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2293			     const char *sysfs_name)
2294{
2295	int               rv;
2296	struct bmc_device *bmc = intf->bmc;
2297	struct bmc_device *old_bmc;
2298	int               size;
2299	char              dummy[1];
2300
2301	mutex_lock(&ipmidriver_mutex);
2302
2303	/*
2304	 * Try to find if there is an bmc_device struct
2305	 * representing the interfaced BMC already
2306	 */
2307	if (bmc->guid_set)
2308		old_bmc = ipmi_find_bmc_guid(&ipmidriver, bmc->guid);
2309	else
2310		old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver,
2311						    bmc->id.product_id,
2312						    bmc->id.device_id);
2313
2314	/*
2315	 * If there is already an bmc_device, free the new one,
2316	 * otherwise register the new BMC device
2317	 */
2318	if (old_bmc) {
2319		kfree(bmc);
2320		intf->bmc = old_bmc;
2321		bmc = old_bmc;
2322
2323		kref_get(&bmc->refcount);
2324		mutex_unlock(&ipmidriver_mutex);
2325
2326		printk(KERN_INFO
2327		       "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
2328		       " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2329		       bmc->id.manufacturer_id,
2330		       bmc->id.product_id,
2331		       bmc->id.device_id);
2332	} else {
2333		char name[14];
2334		unsigned char orig_dev_id = bmc->id.device_id;
2335		int warn_printed = 0;
2336
2337		snprintf(name, sizeof(name),
2338			 "ipmi_bmc.%4.4x", bmc->id.product_id);
2339
2340		while (ipmi_find_bmc_prod_dev_id(&ipmidriver,
2341						 bmc->id.product_id,
2342						 bmc->id.device_id)) {
2343			if (!warn_printed) {
2344				printk(KERN_WARNING PFX
2345				       "This machine has two different BMCs"
2346				       " with the same product id and device"
2347				       " id.  This is an error in the"
2348				       " firmware, but incrementing the"
2349				       " device id to work around the problem."
2350				       " Prod ID = 0x%x, Dev ID = 0x%x\n",
2351				       bmc->id.product_id, bmc->id.device_id);
2352				warn_printed = 1;
2353			}
2354			bmc->id.device_id++; /* Wraps at 255 */
2355			if (bmc->id.device_id == orig_dev_id) {
2356				printk(KERN_ERR PFX
2357				       "Out of device ids!\n");
2358				break;
2359			}
2360		}
2361
2362		bmc->dev = platform_device_alloc(name, bmc->id.device_id);
2363		if (!bmc->dev) {
2364			mutex_unlock(&ipmidriver_mutex);
2365			printk(KERN_ERR
2366			       "ipmi_msghandler:"
2367			       " Unable to allocate platform device\n");
2368			return -ENOMEM;
2369		}
2370		bmc->dev->dev.driver = &ipmidriver;
2371		dev_set_drvdata(&bmc->dev->dev, bmc);
2372		kref_init(&bmc->refcount);
2373
2374		rv = platform_device_add(bmc->dev);
2375		mutex_unlock(&ipmidriver_mutex);
2376		if (rv) {
2377			platform_device_put(bmc->dev);
2378			bmc->dev = NULL;
2379			printk(KERN_ERR
2380			       "ipmi_msghandler:"
2381			       " Unable to register bmc device: %d\n",
2382			       rv);
2383			/* Don't go to out_err, you can only do that if
2384			   the device is registered already. */
2385			return rv;
2386		}
2387
2388		rv = create_files(bmc);
2389		if (rv) {
2390			mutex_lock(&ipmidriver_mutex);
2391			platform_device_unregister(bmc->dev);
2392			mutex_unlock(&ipmidriver_mutex);
2393
2394			return rv;
2395		}
2396
2397		printk(KERN_INFO
2398		       "ipmi: Found new BMC (man_id: 0x%6.6x, "
2399		       " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2400		       bmc->id.manufacturer_id,
2401		       bmc->id.product_id,
2402		       bmc->id.device_id);
2403	}
2404
2405	/*
2406	 * create symlink from system interface device to bmc device
2407	 * and back.
2408	 */
2409	intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL);
2410	if (!intf->sysfs_name) {
2411		rv = -ENOMEM;
2412		printk(KERN_ERR
2413		       "ipmi_msghandler: allocate link to BMC: %d\n",
2414		       rv);
2415		goto out_err;
2416	}
2417
2418	rv = sysfs_create_link(&intf->si_dev->kobj,
2419			       &bmc->dev->dev.kobj, intf->sysfs_name);
2420	if (rv) {
2421		kfree(intf->sysfs_name);
2422		intf->sysfs_name = NULL;
2423		printk(KERN_ERR
2424		       "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2425		       rv);
2426		goto out_err;
2427	}
2428
2429	size = snprintf(dummy, 0, "ipmi%d", ifnum);
2430	intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
2431	if (!intf->my_dev_name) {
2432		kfree(intf->sysfs_name);
2433		intf->sysfs_name = NULL;
2434		rv = -ENOMEM;
2435		printk(KERN_ERR
2436		       "ipmi_msghandler: allocate link from BMC: %d\n",
2437		       rv);
2438		goto out_err;
2439	}
2440	snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum);
2441
2442	rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
2443			       intf->my_dev_name);
2444	if (rv) {
2445		kfree(intf->sysfs_name);
2446		intf->sysfs_name = NULL;
2447		kfree(intf->my_dev_name);
2448		intf->my_dev_name = NULL;
2449		printk(KERN_ERR
2450		       "ipmi_msghandler:"
2451		       " Unable to create symlink to bmc: %d\n",
2452		       rv);
2453		goto out_err;
2454	}
2455
2456	return 0;
2457
2458out_err:
2459	ipmi_bmc_unregister(intf);
2460	return rv;
2461}
2462
2463static int
2464send_guid_cmd(ipmi_smi_t intf, int chan)
2465{
2466	struct kernel_ipmi_msg            msg;
2467	struct ipmi_system_interface_addr si;
2468
2469	si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2470	si.channel = IPMI_BMC_CHANNEL;
2471	si.lun = 0;
2472
2473	msg.netfn = IPMI_NETFN_APP_REQUEST;
2474	msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
2475	msg.data = NULL;
2476	msg.data_len = 0;
2477	return i_ipmi_request(NULL,
2478			      intf,
2479			      (struct ipmi_addr *) &si,
2480			      0,
2481			      &msg,
2482			      intf,
2483			      NULL,
2484			      NULL,
2485			      0,
2486			      intf->channels[0].address,
2487			      intf->channels[0].lun,
2488			      -1, 0);
2489}
2490
2491static void
2492guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2493{
2494	if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2495	    || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2496	    || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
2497		/* Not for me */
2498		return;
2499
2500	if (msg->msg.data[0] != 0) {
2501		/* Error from getting the GUID, the BMC doesn't have one. */
2502		intf->bmc->guid_set = 0;
2503		goto out;
2504	}
2505
2506	if (msg->msg.data_len < 17) {
2507		intf->bmc->guid_set = 0;
2508		printk(KERN_WARNING PFX
2509		       "guid_handler: The GUID response from the BMC was too"
2510		       " short, it was %d but should have been 17.  Assuming"
2511		       " GUID is not available.\n",
2512		       msg->msg.data_len);
2513		goto out;
2514	}
2515
2516	memcpy(intf->bmc->guid, msg->msg.data, 16);
2517	intf->bmc->guid_set = 1;
2518 out:
2519	wake_up(&intf->waitq);
2520}
2521
2522static void
2523get_guid(ipmi_smi_t intf)
2524{
2525	int rv;
2526
2527	intf->bmc->guid_set = 0x2;
2528	intf->null_user_handler = guid_handler;
2529	rv = send_guid_cmd(intf, 0);
2530	if (rv)
2531		/* Send failed, no GUID available. */
2532		intf->bmc->guid_set = 0;
2533	wait_event(intf->waitq, intf->bmc->guid_set != 2);
2534	intf->null_user_handler = NULL;
2535}
2536
2537static int
2538send_channel_info_cmd(ipmi_smi_t intf, int chan)
2539{
2540	struct kernel_ipmi_msg            msg;
2541	unsigned char                     data[1];
2542	struct ipmi_system_interface_addr si;
2543
2544	si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2545	si.channel = IPMI_BMC_CHANNEL;
2546	si.lun = 0;
2547
2548	msg.netfn = IPMI_NETFN_APP_REQUEST;
2549	msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
2550	msg.data = data;
2551	msg.data_len = 1;
2552	data[0] = chan;
2553	return i_ipmi_request(NULL,
2554			      intf,
2555			      (struct ipmi_addr *) &si,
2556			      0,
2557			      &msg,
2558			      intf,
2559			      NULL,
2560			      NULL,
2561			      0,
2562			      intf->channels[0].address,
2563			      intf->channels[0].lun,
2564			      -1, 0);
2565}
2566
2567static void
2568channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2569{
2570	int rv = 0;
2571	int chan;
2572
2573	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2574	    && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
2575	    && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD))
2576	{
2577		/* It's the one we want */
2578		if (msg->msg.data[0] != 0) {
2579			/* Got an error from the channel, just go on. */
2580
2581			if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
2582				/* If the MC does not support this
2583				   command, that is legal.  We just
2584				   assume it has one IPMB at channel
2585				   zero. */
2586				intf->channels[0].medium
2587					= IPMI_CHANNEL_MEDIUM_IPMB;
2588				intf->channels[0].protocol
2589					= IPMI_CHANNEL_PROTOCOL_IPMB;
2590				rv = -ENOSYS;
2591
2592				intf->curr_channel = IPMI_MAX_CHANNELS;
2593				wake_up(&intf->waitq);
2594				goto out;
2595			}
2596			goto next_channel;
2597		}
2598		if (msg->msg.data_len < 4) {
2599			/* Message not big enough, just go on. */
2600			goto next_channel;
2601		}
2602		chan = intf->curr_channel;
2603		intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
2604		intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
2605
2606	next_channel:
2607		intf->curr_channel++;
2608		if (intf->curr_channel >= IPMI_MAX_CHANNELS)
2609			wake_up(&intf->waitq);
2610		else
2611			rv = send_channel_info_cmd(intf, intf->curr_channel);
2612
2613		if (rv) {
2614			/* Got an error somehow, just give up. */
2615			intf->curr_channel = IPMI_MAX_CHANNELS;
2616			wake_up(&intf->waitq);
2617
2618			printk(KERN_WARNING PFX
2619			       "Error sending channel information: %d\n",
2620			       rv);
2621		}
2622	}
2623 out:
2624	return;
2625}
2626
2627int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2628		      void		       *send_info,
2629		      struct ipmi_device_id    *device_id,
2630		      struct device            *si_dev,
2631		      const char               *sysfs_name,
2632		      unsigned char            slave_addr)
2633{
2634	int              i, j;
2635	int              rv;
2636	ipmi_smi_t       intf;
2637	ipmi_smi_t       tintf;
2638	struct list_head *link;
2639
2640	/* Make sure the driver is actually initialized, this handles
2641	   problems with initialization order. */
2642	if (!initialized) {
2643		rv = ipmi_init_msghandler();
2644		if (rv)
2645			return rv;
2646		/* The init code doesn't return an error if it was turned
2647		   off, but it won't initialize.  Check that. */
2648		if (!initialized)
2649			return -ENODEV;
2650	}
2651
2652	intf = kmalloc(sizeof(*intf), GFP_KERNEL);
2653	if (!intf)
2654		return -ENOMEM;
2655	memset(intf, 0, sizeof(*intf));
2656
2657	intf->ipmi_version_major = ipmi_version_major(device_id);
2658	intf->ipmi_version_minor = ipmi_version_minor(device_id);
2659
2660	intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
2661	if (!intf->bmc) {
2662		kfree(intf);
2663		return -ENOMEM;
2664	}
2665	intf->intf_num = -1; /* Mark it invalid for now. */
2666	kref_init(&intf->refcount);
2667	intf->bmc->id = *device_id;
2668	intf->si_dev = si_dev;
2669	for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
2670		intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
2671		intf->channels[j].lun = 2;
2672	}
2673	if (slave_addr != 0)
2674		intf->channels[0].address = slave_addr;
2675	INIT_LIST_HEAD(&intf->users);
2676	intf->handlers = handlers;
2677	intf->send_info = send_info;
2678	spin_lock_init(&intf->seq_lock);
2679	for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
2680		intf->seq_table[j].inuse = 0;
2681		intf->seq_table[j].seqid = 0;
2682	}
2683	intf->curr_seq = 0;
2684#ifdef CONFIG_PROC_FS
2685	spin_lock_init(&intf->proc_entry_lock);
2686#endif
2687	spin_lock_init(&intf->waiting_msgs_lock);
2688	INIT_LIST_HEAD(&intf->waiting_msgs);
2689	spin_lock_init(&intf->events_lock);
2690	INIT_LIST_HEAD(&intf->waiting_events);
2691	intf->waiting_events_count = 0;
2692	mutex_init(&intf->cmd_rcvrs_mutex);
2693	spin_lock_init(&intf->maintenance_mode_lock);
2694	INIT_LIST_HEAD(&intf->cmd_rcvrs);
2695	init_waitqueue_head(&intf->waitq);
2696
2697	spin_lock_init(&intf->counter_lock);
2698	intf->proc_dir = NULL;
2699
2700	mutex_lock(&smi_watchers_mutex);
2701	mutex_lock(&ipmi_interfaces_mutex);
2702	/* Look for a hole in the numbers. */
2703	i = 0;
2704	link = &ipmi_interfaces;
2705	list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
2706		if (tintf->intf_num != i) {
2707			link = &tintf->link;
2708			break;
2709		}
2710		i++;
2711	}
2712	/* Add the new interface in numeric order. */
2713	if (i == 0)
2714		list_add_rcu(&intf->link, &ipmi_interfaces);
2715	else
2716		list_add_tail_rcu(&intf->link, link);
2717
2718	rv = handlers->start_processing(send_info, intf);
2719	if (rv)
2720		goto out;
2721
2722	get_guid(intf);
2723
2724	if ((intf->ipmi_version_major > 1)
2725	    || ((intf->ipmi_version_major == 1)
2726		&& (intf->ipmi_version_minor >= 5)))
2727	{
2728		/* Start scanning the channels to see what is
2729		   available. */
2730		intf->null_user_handler = channel_handler;
2731		intf->curr_channel = 0;
2732		rv = send_channel_info_cmd(intf, 0);
2733		if (rv)
2734			goto out;
2735
2736		/* Wait for the channel info to be read. */
2737		wait_event(intf->waitq,
2738			   intf->curr_channel >= IPMI_MAX_CHANNELS);
2739		intf->null_user_handler = NULL;
2740	} else {
2741		/* Assume a single IPMB channel at zero. */
2742		intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
2743		intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
2744	}
2745
2746	if (rv == 0)
2747		rv = add_proc_entries(intf, i);
2748
2749	rv = ipmi_bmc_register(intf, i, sysfs_name);
2750
2751 out:
2752	if (rv) {
2753		if (intf->proc_dir)
2754			remove_proc_entries(intf);
2755		intf->handlers = NULL;
2756		list_del_rcu(&intf->link);
2757		mutex_unlock(&ipmi_interfaces_mutex);
2758		mutex_unlock(&smi_watchers_mutex);
2759		synchronize_rcu();
2760		kref_put(&intf->refcount, intf_free);
2761	} else {
2762		/*
2763		 * Keep memory order straight for RCU readers.  Make
2764		 * sure everything else is committed to memory before
2765		 * setting intf_num to mark the interface valid.
2766		 */
2767		smp_wmb();
2768		intf->intf_num = i;
2769		mutex_unlock(&ipmi_interfaces_mutex);
2770		/* After this point the interface is legal to use. */
2771		call_smi_watchers(i, intf->si_dev);
2772		mutex_unlock(&smi_watchers_mutex);
2773	}
2774
2775	return rv;
2776}
2777
2778static void cleanup_smi_msgs(ipmi_smi_t intf)
2779{
2780	int              i;
2781	struct seq_table *ent;
2782
2783	/* No need for locks, the interface is down. */
2784	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
2785		ent = &(intf->seq_table[i]);
2786		if (!ent->inuse)
2787			continue;
2788		deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED);
2789	}
2790}
2791
2792int ipmi_unregister_smi(ipmi_smi_t intf)
2793{
2794	struct ipmi_smi_watcher *w;
2795	int    intf_num = intf->intf_num;
2796
2797	ipmi_bmc_unregister(intf);
2798
2799	mutex_lock(&smi_watchers_mutex);
2800	mutex_lock(&ipmi_interfaces_mutex);
2801	intf->intf_num = -1;
2802	intf->handlers = NULL;
2803	list_del_rcu(&intf->link);
2804	mutex_unlock(&ipmi_interfaces_mutex);
2805	synchronize_rcu();
2806
2807	cleanup_smi_msgs(intf);
2808
2809	remove_proc_entries(intf);
2810
2811	/* Call all the watcher interfaces to tell them that
2812	   an interface is gone. */
2813	list_for_each_entry(w, &smi_watchers, link)
2814		w->smi_gone(intf_num);
2815	mutex_unlock(&smi_watchers_mutex);
2816
2817	kref_put(&intf->refcount, intf_free);
2818	return 0;
2819}
2820
2821static int handle_ipmb_get_msg_rsp(ipmi_smi_t          intf,
2822				   struct ipmi_smi_msg *msg)
2823{
2824	struct ipmi_ipmb_addr ipmb_addr;
2825	struct ipmi_recv_msg  *recv_msg;
2826	unsigned long         flags;
2827
2828
2829	/* This is 11, not 10, because the response must contain a
2830	 * completion code. */
2831	if (msg->rsp_size < 11) {
2832		/* Message not big enough, just ignore it. */
2833		spin_lock_irqsave(&intf->counter_lock, flags);
2834		intf->invalid_ipmb_responses++;
2835		spin_unlock_irqrestore(&intf->counter_lock, flags);
2836		return 0;
2837	}
2838
2839	if (msg->rsp[2] != 0) {
2840		/* An error getting the response, just ignore it. */
2841		return 0;
2842	}
2843
2844	ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
2845	ipmb_addr.slave_addr = msg->rsp[6];
2846	ipmb_addr.channel = msg->rsp[3] & 0x0f;
2847	ipmb_addr.lun = msg->rsp[7] & 3;
2848
2849	/* It's a response from a remote entity.  Look up the sequence
2850	   number and handle the response. */
2851	if (intf_find_seq(intf,
2852			  msg->rsp[7] >> 2,
2853			  msg->rsp[3] & 0x0f,
2854			  msg->rsp[8],
2855			  (msg->rsp[4] >> 2) & (~1),
2856			  (struct ipmi_addr *) &(ipmb_addr),
2857			  &recv_msg))
2858	{
2859		/* We were unable to find the sequence number,
2860		   so just nuke the message. */
2861		spin_lock_irqsave(&intf->counter_lock, flags);
2862		intf->unhandled_ipmb_responses++;
2863		spin_unlock_irqrestore(&intf->counter_lock, flags);
2864		return 0;
2865	}
2866
2867	memcpy(recv_msg->msg_data,
2868	       &(msg->rsp[9]),
2869	       msg->rsp_size - 9);
2870	/* THe other fields matched, so no need to set them, except
2871           for netfn, which needs to be the response that was
2872           returned, not the request value. */
2873	recv_msg->msg.netfn = msg->rsp[4] >> 2;
2874	recv_msg->msg.data = recv_msg->msg_data;
2875	recv_msg->msg.data_len = msg->rsp_size - 10;
2876	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2877	spin_lock_irqsave(&intf->counter_lock, flags);
2878	intf->handled_ipmb_responses++;
2879	spin_unlock_irqrestore(&intf->counter_lock, flags);
2880	deliver_response(recv_msg);
2881
2882	return 0;
2883}
2884
2885static int handle_ipmb_get_msg_cmd(ipmi_smi_t          intf,
2886				   struct ipmi_smi_msg *msg)
2887{
2888	struct cmd_rcvr          *rcvr;
2889	int                      rv = 0;
2890	unsigned char            netfn;
2891	unsigned char            cmd;
2892	unsigned char            chan;
2893	ipmi_user_t              user = NULL;
2894	struct ipmi_ipmb_addr    *ipmb_addr;
2895	struct ipmi_recv_msg     *recv_msg;
2896	unsigned long            flags;
2897	struct ipmi_smi_handlers *handlers;
2898
2899	if (msg->rsp_size < 10) {
2900		/* Message not big enough, just ignore it. */
2901		spin_lock_irqsave(&intf->counter_lock, flags);
2902		intf->invalid_commands++;
2903		spin_unlock_irqrestore(&intf->counter_lock, flags);
2904		return 0;
2905	}
2906
2907	if (msg->rsp[2] != 0) {
2908		/* An error getting the response, just ignore it. */
2909		return 0;
2910	}
2911
2912	netfn = msg->rsp[4] >> 2;
2913	cmd = msg->rsp[8];
2914	chan = msg->rsp[3] & 0xf;
2915
2916	rcu_read_lock();
2917	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
2918	if (rcvr) {
2919		user = rcvr->user;
2920		kref_get(&user->refcount);
2921	} else
2922		user = NULL;
2923	rcu_read_unlock();
2924
2925	if (user == NULL) {
2926		/* We didn't find a user, deliver an error response. */
2927		spin_lock_irqsave(&intf->counter_lock, flags);
2928		intf->unhandled_commands++;
2929		spin_unlock_irqrestore(&intf->counter_lock, flags);
2930
2931		msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
2932		msg->data[1] = IPMI_SEND_MSG_CMD;
2933		msg->data[2] = msg->rsp[3];
2934		msg->data[3] = msg->rsp[6];
2935                msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
2936		msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
2937		msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
2938                /* rqseq/lun */
2939                msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
2940		msg->data[8] = msg->rsp[8]; /* cmd */
2941		msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
2942		msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
2943		msg->data_size = 11;
2944
2945#ifdef DEBUG_MSGING
2946	{
2947		int m;
2948		printk("Invalid command:");
2949		for (m = 0; m < msg->data_size; m++)
2950			printk(" %2.2x", msg->data[m]);
2951		printk("\n");
2952	}
2953#endif
2954		rcu_read_lock();
2955		handlers = intf->handlers;
2956		if (handlers) {
2957			handlers->sender(intf->send_info, msg, 0);
2958			/* We used the message, so return the value
2959			   that causes it to not be freed or
2960			   queued. */
2961			rv = -1;
2962		}
2963		rcu_read_unlock();
2964	} else {
2965		/* Deliver the message to the user. */
2966		spin_lock_irqsave(&intf->counter_lock, flags);
2967		intf->handled_commands++;
2968		spin_unlock_irqrestore(&intf->counter_lock, flags);
2969
2970		recv_msg = ipmi_alloc_recv_msg();
2971		if (!recv_msg) {
2972			/* We couldn't allocate memory for the
2973                           message, so requeue it for handling
2974                           later. */
2975			rv = 1;
2976			kref_put(&user->refcount, free_user);
2977		} else {
2978			/* Extract the source address from the data. */
2979			ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
2980			ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
2981			ipmb_addr->slave_addr = msg->rsp[6];
2982			ipmb_addr->lun = msg->rsp[7] & 3;
2983			ipmb_addr->channel = msg->rsp[3] & 0xf;
2984
2985			/* Extract the rest of the message information
2986			   from the IPMB header.*/
2987			recv_msg->user = user;
2988			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2989			recv_msg->msgid = msg->rsp[7] >> 2;
2990			recv_msg->msg.netfn = msg->rsp[4] >> 2;
2991			recv_msg->msg.cmd = msg->rsp[8];
2992			recv_msg->msg.data = recv_msg->msg_data;
2993
2994			/* We chop off 10, not 9 bytes because the checksum
2995			   at the end also needs to be removed. */
2996			recv_msg->msg.data_len = msg->rsp_size - 10;
2997			memcpy(recv_msg->msg_data,
2998			       &(msg->rsp[9]),
2999			       msg->rsp_size - 10);
3000			deliver_response(recv_msg);
3001		}
3002	}
3003
3004	return rv;
3005}
3006
3007static int handle_lan_get_msg_rsp(ipmi_smi_t          intf,
3008				  struct ipmi_smi_msg *msg)
3009{
3010	struct ipmi_lan_addr  lan_addr;
3011	struct ipmi_recv_msg  *recv_msg;
3012	unsigned long         flags;
3013
3014
3015	/* This is 13, not 12, because the response must contain a
3016	 * completion code. */
3017	if (msg->rsp_size < 13) {
3018		/* Message not big enough, just ignore it. */
3019		spin_lock_irqsave(&intf->counter_lock, flags);
3020		intf->invalid_lan_responses++;
3021		spin_unlock_irqrestore(&intf->counter_lock, flags);
3022		return 0;
3023	}
3024
3025	if (msg->rsp[2] != 0) {
3026		/* An error getting the response, just ignore it. */
3027		return 0;
3028	}
3029
3030	lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
3031	lan_addr.session_handle = msg->rsp[4];
3032	lan_addr.remote_SWID = msg->rsp[8];
3033	lan_addr.local_SWID = msg->rsp[5];
3034	lan_addr.channel = msg->rsp[3] & 0x0f;
3035	lan_addr.privilege = msg->rsp[3] >> 4;
3036	lan_addr.lun = msg->rsp[9] & 3;
3037
3038	/* It's a response from a remote entity.  Look up the sequence
3039	   number and handle the response. */
3040	if (intf_find_seq(intf,
3041			  msg->rsp[9] >> 2,
3042			  msg->rsp[3] & 0x0f,
3043			  msg->rsp[10],
3044			  (msg->rsp[6] >> 2) & (~1),
3045			  (struct ipmi_addr *) &(lan_addr),
3046			  &recv_msg))
3047	{
3048		/* We were unable to find the sequence number,
3049		   so just nuke the message. */
3050		spin_lock_irqsave(&intf->counter_lock, flags);
3051		intf->unhandled_lan_responses++;
3052		spin_unlock_irqrestore(&intf->counter_lock, flags);
3053		return 0;
3054	}
3055
3056	memcpy(recv_msg->msg_data,
3057	       &(msg->rsp[11]),
3058	       msg->rsp_size - 11);
3059	/* The other fields matched, so no need to set them, except
3060           for netfn, which needs to be the response that was
3061           returned, not the request value. */
3062	recv_msg->msg.netfn = msg->rsp[6] >> 2;
3063	recv_msg->msg.data = recv_msg->msg_data;
3064	recv_msg->msg.data_len = msg->rsp_size - 12;
3065	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3066	spin_lock_irqsave(&intf->counter_lock, flags);
3067	intf->handled_lan_responses++;
3068	spin_unlock_irqrestore(&intf->counter_lock, flags);
3069	deliver_response(recv_msg);
3070
3071	return 0;
3072}
3073
3074static int handle_lan_get_msg_cmd(ipmi_smi_t          intf,
3075				  struct ipmi_smi_msg *msg)
3076{
3077	struct cmd_rcvr          *rcvr;
3078	int                      rv = 0;
3079	unsigned char            netfn;
3080	unsigned char            cmd;
3081	unsigned char            chan;
3082	ipmi_user_t              user = NULL;
3083	struct ipmi_lan_addr     *lan_addr;
3084	struct ipmi_recv_msg     *recv_msg;
3085	unsigned long            flags;
3086
3087	if (msg->rsp_size < 12) {
3088		/* Message not big enough, just ignore it. */
3089		spin_lock_irqsave(&intf->counter_lock, flags);
3090		intf->invalid_commands++;
3091		spin_unlock_irqrestore(&intf->counter_lock, flags);
3092		return 0;
3093	}
3094
3095	if (msg->rsp[2] != 0) {
3096		/* An error getting the response, just ignore it. */
3097		return 0;
3098	}
3099
3100	netfn = msg->rsp[6] >> 2;
3101	cmd = msg->rsp[10];
3102	chan = msg->rsp[3] & 0xf;
3103
3104	rcu_read_lock();
3105	rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3106	if (rcvr) {
3107		user = rcvr->user;
3108		kref_get(&user->refcount);
3109	} else
3110		user = NULL;
3111	rcu_read_unlock();
3112
3113	if (user == NULL) {
3114		/* We didn't find a user, just give up. */
3115		spin_lock_irqsave(&intf->counter_lock, flags);
3116		intf->unhandled_commands++;
3117		spin_unlock_irqrestore(&intf->counter_lock, flags);
3118
3119		rv = 0; /* Don't do anything with these messages, just
3120			   allow them to be freed. */
3121	} else {
3122		/* Deliver the message to the user. */
3123		spin_lock_irqsave(&intf->counter_lock, flags);
3124		intf->handled_commands++;
3125		spin_unlock_irqrestore(&intf->counter_lock, flags);
3126
3127		recv_msg = ipmi_alloc_recv_msg();
3128		if (!recv_msg) {
3129			/* We couldn't allocate memory for the
3130                           message, so requeue it for handling
3131                           later. */
3132			rv = 1;
3133			kref_put(&user->refcount, free_user);
3134		} else {
3135			/* Extract the source address from the data. */
3136			lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
3137			lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
3138			lan_addr->session_handle = msg->rsp[4];
3139			lan_addr->remote_SWID = msg->rsp[8];
3140			lan_addr->local_SWID = msg->rsp[5];
3141			lan_addr->lun = msg->rsp[9] & 3;
3142			lan_addr->channel = msg->rsp[3] & 0xf;
3143			lan_addr->privilege = msg->rsp[3] >> 4;
3144
3145			/* Extract the rest of the message information
3146			   from the IPMB header.*/
3147			recv_msg->user = user;
3148			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3149			recv_msg->msgid = msg->rsp[9] >> 2;
3150			recv_msg->msg.netfn = msg->rsp[6] >> 2;
3151			recv_msg->msg.cmd = msg->rsp[10];
3152			recv_msg->msg.data = recv_msg->msg_data;
3153
3154			/* We chop off 12, not 11 bytes because the checksum
3155			   at the end also needs to be removed. */
3156			recv_msg->msg.data_len = msg->rsp_size - 12;
3157			memcpy(recv_msg->msg_data,
3158			       &(msg->rsp[11]),
3159			       msg->rsp_size - 12);
3160			deliver_response(recv_msg);
3161		}
3162	}
3163
3164	return rv;
3165}
3166
3167static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
3168				     struct ipmi_smi_msg  *msg)
3169{
3170	struct ipmi_system_interface_addr *smi_addr;
3171
3172	recv_msg->msgid = 0;
3173	smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
3174	smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3175	smi_addr->channel = IPMI_BMC_CHANNEL;
3176	smi_addr->lun = msg->rsp[0] & 3;
3177	recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
3178	recv_msg->msg.netfn = msg->rsp[0] >> 2;
3179	recv_msg->msg.cmd = msg->rsp[1];
3180	memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
3181	recv_msg->msg.data = recv_msg->msg_data;
3182	recv_msg->msg.data_len = msg->rsp_size - 3;
3183}
3184
3185static int handle_read_event_rsp(ipmi_smi_t          intf,
3186				 struct ipmi_smi_msg *msg)
3187{
3188	struct ipmi_recv_msg *recv_msg, *recv_msg2;
3189	struct list_head     msgs;
3190	ipmi_user_t          user;
3191	int                  rv = 0;
3192	int                  deliver_count = 0;
3193	unsigned long        flags;
3194
3195	if (msg->rsp_size < 19) {
3196		/* Message is too small to be an IPMB event. */
3197		spin_lock_irqsave(&intf->counter_lock, flags);
3198		intf->invalid_events++;
3199		spin_unlock_irqrestore(&intf->counter_lock, flags);
3200		return 0;
3201	}
3202
3203	if (msg->rsp[2] != 0) {
3204		/* An error getting the event, just ignore it. */
3205		return 0;
3206	}
3207
3208	INIT_LIST_HEAD(&msgs);
3209
3210	spin_lock_irqsave(&intf->events_lock, flags);
3211
3212	spin_lock(&intf->counter_lock);
3213	intf->events++;
3214	spin_unlock(&intf->counter_lock);
3215
3216	/* Allocate and fill in one message for every user that is getting
3217	   events. */
3218	rcu_read_lock();
3219	list_for_each_entry_rcu(user, &intf->users, link) {
3220		if (!user->gets_events)
3221			continue;
3222
3223		recv_msg = ipmi_alloc_recv_msg();
3224		if (!recv_msg) {
3225			rcu_read_unlock();
3226			list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
3227						 link) {
3228				list_del(&recv_msg->link);
3229				ipmi_free_recv_msg(recv_msg);
3230			}
3231			/* We couldn't allocate memory for the
3232                           message, so requeue it for handling
3233                           later. */
3234			rv = 1;
3235			goto out;
3236		}
3237
3238		deliver_count++;
3239
3240		copy_event_into_recv_msg(recv_msg, msg);
3241		recv_msg->user = user;
3242		kref_get(&user->refcount);
3243		list_add_tail(&(recv_msg->link), &msgs);
3244	}
3245	rcu_read_unlock();
3246
3247	if (deliver_count) {
3248		/* Now deliver all the messages. */
3249		list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
3250			list_del(&recv_msg->link);
3251			deliver_response(recv_msg);
3252		}
3253	} else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
3254		/* No one to receive the message, put it in queue if there's
3255		   not already too many things in the queue. */
3256		recv_msg = ipmi_alloc_recv_msg();
3257		if (!recv_msg) {
3258			/* We couldn't allocate memory for the
3259                           message, so requeue it for handling
3260                           later. */
3261			rv = 1;
3262			goto out;
3263		}
3264
3265		copy_event_into_recv_msg(recv_msg, msg);
3266		list_add_tail(&(recv_msg->link), &(intf->waiting_events));
3267		intf->waiting_events_count++;
3268	} else {
3269		/* There's too many things in the queue, discard this
3270		   message. */
3271		printk(KERN_WARNING PFX "Event queue full, discarding an"
3272		       " incoming event\n");
3273	}
3274
3275 out:
3276	spin_unlock_irqrestore(&(intf->events_lock), flags);
3277
3278	return rv;
3279}
3280
3281static int handle_bmc_rsp(ipmi_smi_t          intf,
3282			  struct ipmi_smi_msg *msg)
3283{
3284	struct ipmi_recv_msg *recv_msg;
3285	unsigned long        flags;
3286	struct ipmi_user     *user;
3287
3288	recv_msg = (struct ipmi_recv_msg *) msg->user_data;
3289	if (recv_msg == NULL)
3290	{
3291		printk(KERN_WARNING"IPMI message received with no owner. This\n"
3292			"could be because of a malformed message, or\n"
3293			"because of a hardware error.  Contact your\n"
3294			"hardware vender for assistance\n");
3295		return 0;
3296	}
3297
3298	user = recv_msg->user;
3299	/* Make sure the user still exists. */
3300	if (user && !user->valid) {
3301		/* The user for the message went away, so give up. */
3302		spin_lock_irqsave(&intf->counter_lock, flags);
3303		intf->unhandled_local_responses++;
3304		spin_unlock_irqrestore(&intf->counter_lock, flags);
3305		ipmi_free_recv_msg(recv_msg);
3306	} else {
3307		struct ipmi_system_interface_addr *smi_addr;
3308
3309		spin_lock_irqsave(&intf->counter_lock, flags);
3310		intf->handled_local_responses++;
3311		spin_unlock_irqrestore(&intf->counter_lock, flags);
3312		recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3313		recv_msg->msgid = msg->msgid;
3314		smi_addr = ((struct ipmi_system_interface_addr *)
3315			    &(recv_msg->addr));
3316		smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3317		smi_addr->channel = IPMI_BMC_CHANNEL;
3318		smi_addr->lun = msg->rsp[0] & 3;
3319		recv_msg->msg.netfn = msg->rsp[0] >> 2;
3320		recv_msg->msg.cmd = msg->rsp[1];
3321		memcpy(recv_msg->msg_data,
3322		       &(msg->rsp[2]),
3323		       msg->rsp_size - 2);
3324		recv_msg->msg.data = recv_msg->msg_data;
3325		recv_msg->msg.data_len = msg->rsp_size - 2;
3326		deliver_response(recv_msg);
3327	}
3328
3329	return 0;
3330}
3331
3332/* Handle a new message.  Return 1 if the message should be requeued,
3333   0 if the message should be freed, or -1 if the message should not
3334   be freed or requeued. */
3335static int handle_new_recv_msg(ipmi_smi_t          intf,
3336			       struct ipmi_smi_msg *msg)
3337{
3338	int requeue;
3339	int chan;
3340
3341#ifdef DEBUG_MSGING
3342	int m;
3343	printk("Recv:");
3344	for (m = 0; m < msg->rsp_size; m++)
3345		printk(" %2.2x", msg->rsp[m]);
3346	printk("\n");
3347#endif
3348	if (msg->rsp_size < 2) {
3349		/* Message is too small to be correct. */
3350		printk(KERN_WARNING PFX "BMC returned to small a message"
3351		       " for netfn %x cmd %x, got %d bytes\n",
3352		       (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
3353
3354		/* Generate an error response for the message. */
3355		msg->rsp[0] = msg->data[0] | (1 << 2);
3356		msg->rsp[1] = msg->data[1];
3357		msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3358		msg->rsp_size = 3;
3359	} else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */
3360		   || (msg->rsp[1] != msg->data[1]))		  /* Command */
3361	{
3362		/* The response is not even marginally correct. */
3363		printk(KERN_WARNING PFX "BMC returned incorrect response,"
3364		       " expected netfn %x cmd %x, got netfn %x cmd %x\n",
3365		       (msg->data[0] >> 2) | 1, msg->data[1],
3366		       msg->rsp[0] >> 2, msg->rsp[1]);
3367
3368		/* Generate an error response for the message. */
3369		msg->rsp[0] = msg->data[0] | (1 << 2);
3370		msg->rsp[1] = msg->data[1];
3371		msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3372		msg->rsp_size = 3;
3373	}
3374
3375	if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3376	    && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
3377	    && (msg->user_data != NULL))
3378	{
3379		/* It's a response to a response we sent.  For this we
3380		   deliver a send message response to the user. */
3381		struct ipmi_recv_msg     *recv_msg = msg->user_data;
3382
3383		requeue = 0;
3384		if (msg->rsp_size < 2)
3385			/* Message is too small to be correct. */
3386			goto out;
3387
3388		chan = msg->data[2] & 0x0f;
3389		if (chan >= IPMI_MAX_CHANNELS)
3390			/* Invalid channel number */
3391			goto out;
3392
3393		if (!recv_msg)
3394			goto out;
3395
3396		/* Make sure the user still exists. */
3397		if (!recv_msg->user || !recv_msg->user->valid)
3398			goto out;
3399
3400		recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
3401		recv_msg->msg.data = recv_msg->msg_data;
3402		recv_msg->msg.data_len = 1;
3403		recv_msg->msg_data[0] = msg->rsp[2];
3404		deliver_response(recv_msg);
3405	} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3406		   && (msg->rsp[1] == IPMI_GET_MSG_CMD))
3407	{
3408		/* It's from the receive queue. */
3409		chan = msg->rsp[3] & 0xf;
3410		if (chan >= IPMI_MAX_CHANNELS) {
3411			/* Invalid channel number */
3412			requeue = 0;
3413			goto out;
3414		}
3415
3416		switch (intf->channels[chan].medium) {
3417		case IPMI_CHANNEL_MEDIUM_IPMB:
3418			if (msg->rsp[4] & 0x04) {
3419				/* It's a response, so find the
3420				   requesting message and send it up. */
3421				requeue = handle_ipmb_get_msg_rsp(intf, msg);
3422			} else {
3423				/* It's a command to the SMS from some other
3424				   entity.  Handle that. */
3425				requeue = handle_ipmb_get_msg_cmd(intf, msg);
3426			}
3427			break;
3428
3429		case IPMI_CHANNEL_MEDIUM_8023LAN:
3430		case IPMI_CHANNEL_MEDIUM_ASYNC:
3431			if (msg->rsp[6] & 0x04) {
3432				/* It's a response, so find the
3433				   requesting message and send it up. */
3434				requeue = handle_lan_get_msg_rsp(intf, msg);
3435			} else {
3436				/* It's a command to the SMS from some other
3437				   entity.  Handle that. */
3438				requeue = handle_lan_get_msg_cmd(intf, msg);
3439			}
3440			break;
3441
3442		default:
3443			/* We don't handle the channel type, so just
3444			 * free the message. */
3445			requeue = 0;
3446		}
3447
3448	} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3449		   && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD))
3450	{
3451		/* It's an asyncronous event. */
3452		requeue = handle_read_event_rsp(intf, msg);
3453	} else {
3454		/* It's a response from the local BMC. */
3455		requeue = handle_bmc_rsp(intf, msg);
3456	}
3457
3458 out:
3459	return requeue;
3460}
3461
3462/* Handle a new message from the lower layer. */
3463void ipmi_smi_msg_received(ipmi_smi_t          intf,
3464			   struct ipmi_smi_msg *msg)
3465{
3466	unsigned long flags;
3467	int           rv;
3468
3469
3470	if ((msg->data_size >= 2)
3471	    && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
3472	    && (msg->data[1] == IPMI_SEND_MSG_CMD)
3473	    && (msg->user_data == NULL))
3474	{
3475		/* This is the local response to a command send, start
3476                   the timer for these.  The user_data will not be
3477                   NULL if this is a response send, and we will let
3478                   response sends just go through. */
3479
3480		/* Check for errors, if we get certain errors (ones
3481                   that mean basically we can try again later), we
3482                   ignore them and start the timer.  Otherwise we
3483                   report the error immediately. */
3484		if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
3485		    && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
3486		    && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
3487		    && (msg->rsp[2] != IPMI_BUS_ERR)
3488		    && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR))
3489		{
3490			int chan = msg->rsp[3] & 0xf;
3491
3492			/* Got an error sending the message, handle it. */
3493			spin_lock_irqsave(&intf->counter_lock, flags);
3494			if (chan >= IPMI_MAX_CHANNELS)
3495				; /* This shouldn't happen */
3496			else if ((intf->channels[chan].medium
3497				  == IPMI_CHANNEL_MEDIUM_8023LAN)
3498				 || (intf->channels[chan].medium
3499				     == IPMI_CHANNEL_MEDIUM_ASYNC))
3500				intf->sent_lan_command_errs++;
3501			else
3502				intf->sent_ipmb_command_errs++;
3503			spin_unlock_irqrestore(&intf->counter_lock, flags);
3504			intf_err_seq(intf, msg->msgid, msg->rsp[2]);
3505		} else {
3506			/* The message was sent, start the timer. */
3507			intf_start_seq_timer(intf, msg->msgid);
3508		}
3509
3510		ipmi_free_smi_msg(msg);
3511		goto out;
3512	}
3513
3514	/* To preserve message order, if the list is not empty, we
3515           tack this message onto the end of the list. */
3516	spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3517	if (!list_empty(&intf->waiting_msgs)) {
3518		list_add_tail(&msg->link, &intf->waiting_msgs);
3519		spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3520		goto out;
3521	}
3522	spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3523
3524	rv = handle_new_recv_msg(intf, msg);
3525	if (rv > 0) {
3526		/* Could not handle the message now, just add it to a
3527                   list to handle later. */
3528		spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3529		list_add_tail(&msg->link, &intf->waiting_msgs);
3530		spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3531	} else if (rv == 0) {
3532		ipmi_free_smi_msg(msg);
3533	}
3534
3535 out:
3536	return;
3537}
3538
3539void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3540{
3541	ipmi_user_t user;
3542
3543	rcu_read_lock();
3544	list_for_each_entry_rcu(user, &intf->users, link) {
3545		if (!user->handler->ipmi_watchdog_pretimeout)
3546			continue;
3547
3548		user->handler->ipmi_watchdog_pretimeout(user->handler_data);
3549	}
3550	rcu_read_unlock();
3551}
3552
3553
3554static struct ipmi_smi_msg *
3555smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
3556		  unsigned char seq, long seqid)
3557{
3558	struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
3559	if (!smi_msg)
3560		/* If we can't allocate the message, then just return, we
3561		   get 4 retries, so this should be ok. */
3562		return NULL;
3563
3564	memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
3565	smi_msg->data_size = recv_msg->msg.data_len;
3566	smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
3567
3568#ifdef DEBUG_MSGING
3569	{
3570		int m;
3571		printk("Resend: ");
3572		for (m = 0; m < smi_msg->data_size; m++)
3573			printk(" %2.2x", smi_msg->data[m]);
3574		printk("\n");
3575	}
3576#endif
3577	return smi_msg;
3578}
3579
3580static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3581			      struct list_head *timeouts, long timeout_period,
3582			      int slot, unsigned long *flags)
3583{
3584	struct ipmi_recv_msg     *msg;
3585	struct ipmi_smi_handlers *handlers;
3586
3587	if (intf->intf_num == -1)
3588		return;
3589
3590	if (!ent->inuse)
3591		return;
3592
3593	ent->timeout -= timeout_period;
3594	if (ent->timeout > 0)
3595		return;
3596
3597	if (ent->retries_left == 0) {
3598		/* The message has used all its retries. */
3599		ent->inuse = 0;
3600		msg = ent->recv_msg;
3601		list_add_tail(&msg->link, timeouts);
3602		spin_lock(&intf->counter_lock);
3603		if (ent->broadcast)
3604			intf->timed_out_ipmb_broadcasts++;
3605		else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3606			intf->timed_out_lan_commands++;
3607		else
3608			intf->timed_out_ipmb_commands++;
3609		spin_unlock(&intf->counter_lock);
3610	} else {
3611		struct ipmi_smi_msg *smi_msg;
3612		/* More retries, send again. */
3613
3614		/* Start with the max timer, set to normal
3615		   timer after the message is sent. */
3616		ent->timeout = MAX_MSG_TIMEOUT;
3617		ent->retries_left--;
3618		spin_lock(&intf->counter_lock);
3619		if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3620			intf->retransmitted_lan_commands++;
3621		else
3622			intf->retransmitted_ipmb_commands++;
3623		spin_unlock(&intf->counter_lock);
3624
3625		smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
3626					    ent->seqid);
3627		if (!smi_msg)
3628			return;
3629
3630		spin_unlock_irqrestore(&intf->seq_lock, *flags);
3631
3632		/* Send the new message.  We send with a zero
3633		 * priority.  It timed out, I doubt time is
3634		 * that critical now, and high priority
3635		 * messages are really only for messages to the
3636		 * local MC, which don't get resent. */
3637		handlers = intf->handlers;
3638		if (handlers)
3639			intf->handlers->sender(intf->send_info,
3640					       smi_msg, 0);
3641		else
3642			ipmi_free_smi_msg(smi_msg);
3643
3644		spin_lock_irqsave(&intf->seq_lock, *flags);
3645	}
3646}
3647
3648static void ipmi_timeout_handler(long timeout_period)
3649{
3650	ipmi_smi_t           intf;
3651	struct list_head     timeouts;
3652	struct ipmi_recv_msg *msg, *msg2;
3653	struct ipmi_smi_msg  *smi_msg, *smi_msg2;
3654	unsigned long        flags;
3655	int                  i;
3656
3657	rcu_read_lock();
3658	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3659		/* See if any waiting messages need to be processed. */
3660		spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3661		list_for_each_entry_safe(smi_msg, smi_msg2,
3662					 &intf->waiting_msgs, link) {
3663			if (!handle_new_recv_msg(intf, smi_msg)) {
3664				list_del(&smi_msg->link);
3665				ipmi_free_smi_msg(smi_msg);
3666			} else {
3667				/* To preserve message order, quit if we
3668				   can't handle a message. */
3669				break;
3670			}
3671		}
3672		spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3673
3674		/* Go through the seq table and find any messages that
3675		   have timed out, putting them in the timeouts
3676		   list. */
3677		INIT_LIST_HEAD(&timeouts);
3678		spin_lock_irqsave(&intf->seq_lock, flags);
3679		for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
3680			check_msg_timeout(intf, &(intf->seq_table[i]),
3681					  &timeouts, timeout_period, i,
3682					  &flags);
3683		spin_unlock_irqrestore(&intf->seq_lock, flags);
3684
3685		list_for_each_entry_safe(msg, msg2, &timeouts, link)
3686			deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
3687
3688		/*
3689		 * Maintenance mode handling.  Check the timeout
3690		 * optimistically before we claim the lock.  It may
3691		 * mean a timeout gets missed occasionally, but that
3692		 * only means the timeout gets extended by one period
3693		 * in that case.  No big deal, and it avoids the lock
3694		 * most of the time.
3695		 */
3696		if (intf->auto_maintenance_timeout > 0) {
3697			spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
3698			if (intf->auto_maintenance_timeout > 0) {
3699				intf->auto_maintenance_timeout
3700					-= timeout_period;
3701				if (!intf->maintenance_mode
3702				    && (intf->auto_maintenance_timeout <= 0))
3703				{
3704					intf->maintenance_mode_enable = 0;
3705					maintenance_mode_update(intf);
3706				}
3707			}
3708			spin_unlock_irqrestore(&intf->maintenance_mode_lock,
3709					       flags);
3710		}
3711	}
3712	rcu_read_unlock();
3713}
3714
3715static void ipmi_request_event(void)
3716{
3717	ipmi_smi_t               intf;
3718	struct ipmi_smi_handlers *handlers;
3719
3720	rcu_read_lock();
3721	/* Called from the timer, no need to check if handlers is
3722	 * valid. */
3723	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3724		/* No event requests when in maintenance mode. */
3725		if (intf->maintenance_mode_enable)
3726			continue;
3727
3728		handlers = intf->handlers;
3729		if (handlers)
3730			handlers->request_events(intf->send_info);
3731	}
3732	rcu_read_unlock();
3733}
3734
3735static struct timer_list ipmi_timer;
3736
3737/* Call every ~100 ms. */
3738#define IPMI_TIMEOUT_TIME	100
3739
3740/* How many jiffies does it take to get to the timeout time. */
3741#define IPMI_TIMEOUT_JIFFIES	((IPMI_TIMEOUT_TIME * HZ) / 1000)
3742
3743/* Request events from the queue every second (this is the number of
3744   IPMI_TIMEOUT_TIMES between event requests).  Hopefully, in the
3745   future, IPMI will add a way to know immediately if an event is in
3746   the queue and this silliness can go away. */
3747#define IPMI_REQUEST_EV_TIME	(1000 / (IPMI_TIMEOUT_TIME))
3748
3749static atomic_t stop_operation;
3750static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3751
3752static void ipmi_timeout(unsigned long data)
3753{
3754	if (atomic_read(&stop_operation))
3755		return;
3756
3757	ticks_to_req_ev--;
3758	if (ticks_to_req_ev == 0) {
3759		ipmi_request_event();
3760		ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3761	}
3762
3763	ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
3764
3765	mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
3766}
3767
3768
3769static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
3770static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
3771
3772static void free_smi_msg(struct ipmi_smi_msg *msg)
3773{
3774	atomic_dec(&smi_msg_inuse_count);
3775	kfree(msg);
3776}
3777
3778struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
3779{
3780	struct ipmi_smi_msg *rv;
3781	rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
3782	if (rv) {
3783		rv->done = free_smi_msg;
3784		rv->user_data = NULL;
3785		atomic_inc(&smi_msg_inuse_count);
3786	}
3787	return rv;
3788}
3789
3790static void free_recv_msg(struct ipmi_recv_msg *msg)
3791{
3792	atomic_dec(&recv_msg_inuse_count);
3793	kfree(msg);
3794}
3795
3796struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
3797{
3798	struct ipmi_recv_msg *rv;
3799
3800	rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
3801	if (rv) {
3802		rv->user = NULL;
3803		rv->done = free_recv_msg;
3804		atomic_inc(&recv_msg_inuse_count);
3805	}
3806	return rv;
3807}
3808
3809void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
3810{
3811	if (msg->user)
3812		kref_put(&msg->user->refcount, free_user);
3813	msg->done(msg);
3814}
3815
3816#ifdef CONFIG_IPMI_PANIC_EVENT
3817
3818static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
3819{
3820}
3821
3822static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
3823{
3824}
3825
3826#ifdef CONFIG_IPMI_PANIC_STRING
3827static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3828{
3829	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3830	    && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
3831	    && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
3832	    && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3833	{
3834		/* A get event receiver command, save it. */
3835		intf->event_receiver = msg->msg.data[1];
3836		intf->event_receiver_lun = msg->msg.data[2] & 0x3;
3837	}
3838}
3839
3840static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3841{
3842	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3843	    && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3844	    && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
3845	    && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3846	{
3847		/* A get device id command, save if we are an event
3848		   receiver or generator. */
3849		intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
3850		intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
3851	}
3852}
3853#endif
3854
3855static void send_panic_events(char *str)
3856{
3857	struct kernel_ipmi_msg            msg;
3858	ipmi_smi_t                        intf;
3859	unsigned char                     data[16];
3860	struct ipmi_system_interface_addr *si;
3861	struct ipmi_addr                  addr;
3862	struct ipmi_smi_msg               smi_msg;
3863	struct ipmi_recv_msg              recv_msg;
3864
3865	si = (struct ipmi_system_interface_addr *) &addr;
3866	si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3867	si->channel = IPMI_BMC_CHANNEL;
3868	si->lun = 0;
3869
3870	/* Fill in an event telling that we have failed. */
3871	msg.netfn = 0x04; /* Sensor or Event. */
3872	msg.cmd = 2; /* Platform event command. */
3873	msg.data = data;
3874	msg.data_len = 8;
3875	data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
3876	data[1] = 0x03; /* This is for IPMI 1.0. */
3877	data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
3878	data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
3879	data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
3880
3881	/* Put a few breadcrumbs in.  Hopefully later we can add more things
3882	   to make the panic events more useful. */
3883	if (str) {
3884		data[3] = str[0];
3885		data[6] = str[1];
3886		data[7] = str[2];
3887	}
3888
3889	smi_msg.done = dummy_smi_done_handler;
3890	recv_msg.done = dummy_recv_done_handler;
3891
3892	/* For every registered interface, send the event. */
3893	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3894		if (!intf->handlers)
3895			/* Interface is not ready. */
3896			continue;
3897
3898		/* Send the event announcing the panic. */
3899		intf->handlers->set_run_to_completion(intf->send_info, 1);
3900		i_ipmi_request(NULL,
3901			       intf,
3902			       &addr,
3903			       0,
3904			       &msg,
3905			       intf,
3906			       &smi_msg,
3907			       &recv_msg,
3908			       0,
3909			       intf->channels[0].address,
3910			       intf->channels[0].lun,
3911			       0, 1); /* Don't retry, and don't wait. */
3912	}
3913
3914#ifdef CONFIG_IPMI_PANIC_STRING
3915	/* On every interface, dump a bunch of OEM event holding the
3916	   string. */
3917	if (!str)
3918		return;
3919
3920	/* For every registered interface, send the event. */
3921	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3922		char                  *p = str;
3923		struct ipmi_ipmb_addr *ipmb;
3924		int                   j;
3925
3926		if (intf->intf_num == -1)
3927			/* Interface was not ready yet. */
3928			continue;
3929
3930		/*
3931		 * intf_num is used as an marker to tell if the
3932		 * interface is valid.  Thus we need a read barrier to
3933		 * make sure data fetched before checking intf_num
3934		 * won't be used.
3935		 */
3936		smp_rmb();
3937
3938		/* First job here is to figure out where to send the
3939		   OEM events.  There's no way in IPMI to send OEM
3940		   events using an event send command, so we have to
3941		   find the SEL to put them in and stick them in
3942		   there. */
3943
3944		/* Get capabilities from the get device id. */
3945		intf->local_sel_device = 0;
3946		intf->local_event_generator = 0;
3947		intf->event_receiver = 0;
3948
3949		/* Request the device info from the local MC. */
3950		msg.netfn = IPMI_NETFN_APP_REQUEST;
3951		msg.cmd = IPMI_GET_DEVICE_ID_CMD;
3952		msg.data = NULL;
3953		msg.data_len = 0;
3954		intf->null_user_handler = device_id_fetcher;
3955		i_ipmi_request(NULL,
3956			       intf,
3957			       &addr,
3958			       0,
3959			       &msg,
3960			       intf,
3961			       &smi_msg,
3962			       &recv_msg,
3963			       0,
3964			       intf->channels[0].address,
3965			       intf->channels[0].lun,
3966			       0, 1); /* Don't retry, and don't wait. */
3967
3968		if (intf->local_event_generator) {
3969			/* Request the event receiver from the local MC. */
3970			msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
3971			msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
3972			msg.data = NULL;
3973			msg.data_len = 0;
3974			intf->null_user_handler = event_receiver_fetcher;
3975			i_ipmi_request(NULL,
3976				       intf,
3977				       &addr,
3978				       0,
3979				       &msg,
3980				       intf,
3981				       &smi_msg,
3982				       &recv_msg,
3983				       0,
3984				       intf->channels[0].address,
3985				       intf->channels[0].lun,
3986				       0, 1); /* no retry, and no wait. */
3987		}
3988		intf->null_user_handler = NULL;
3989
3990		/* Validate the event receiver.  The low bit must not
3991		   be 1 (it must be a valid IPMB address), it cannot
3992		   be zero, and it must not be my address. */
3993                if (((intf->event_receiver & 1) == 0)
3994		    && (intf->event_receiver != 0)
3995		    && (intf->event_receiver != intf->channels[0].address))
3996		{
3997			/* The event receiver is valid, send an IPMB
3998			   message. */
3999			ipmb = (struct ipmi_ipmb_addr *) &addr;
4000			ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
4001			ipmb->channel = 0;
4002			ipmb->lun = intf->event_receiver_lun;
4003			ipmb->slave_addr = intf->event_receiver;
4004		} else if (intf->local_sel_device) {
4005			/* The event receiver was not valid (or was
4006			   me), but I am an SEL device, just dump it
4007			   in my SEL. */
4008			si = (struct ipmi_system_interface_addr *) &addr;
4009			si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4010			si->channel = IPMI_BMC_CHANNEL;
4011			si->lun = 0;
4012		} else
4013			continue; /* No where to send the event. */
4014
4015
4016		msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
4017		msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
4018		msg.data = data;
4019		msg.data_len = 16;
4020
4021		j = 0;
4022		while (*p) {
4023			int size = strlen(p);
4024
4025			if (size > 11)
4026				size = 11;
4027			data[0] = 0;
4028			data[1] = 0;
4029			data[2] = 0xf0; /* OEM event without timestamp. */
4030			data[3] = intf->channels[0].address;
4031			data[4] = j++; /* sequence # */
4032			/* Always give 11 bytes, so strncpy will fill
4033			   it with zeroes for me. */
4034			strncpy(data+5, p, 11);
4035			p += size;
4036
4037			i_ipmi_request(NULL,
4038				       intf,
4039				       &addr,
4040				       0,
4041				       &msg,
4042				       intf,
4043				       &smi_msg,
4044				       &recv_msg,
4045				       0,
4046				       intf->channels[0].address,
4047				       intf->channels[0].lun,
4048				       0, 1); /* no retry, and no wait. */
4049		}
4050	}
4051#endif /* CONFIG_IPMI_PANIC_STRING */
4052}
4053#endif /* CONFIG_IPMI_PANIC_EVENT */
4054
4055static int has_panicked;
4056
4057static int panic_event(struct notifier_block *this,
4058		       unsigned long         event,
4059                       void                  *ptr)
4060{
4061	ipmi_smi_t intf;
4062
4063	if (has_panicked)
4064		return NOTIFY_DONE;
4065	has_panicked = 1;
4066
4067	/* For every registered interface, set it to run to completion. */
4068	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4069		if (!intf->handlers)
4070			/* Interface is not ready. */
4071			continue;
4072
4073		intf->handlers->set_run_to_completion(intf->send_info, 1);
4074	}
4075
4076#ifdef CONFIG_IPMI_PANIC_EVENT
4077	send_panic_events(ptr);
4078#endif
4079
4080	return NOTIFY_DONE;
4081}
4082
4083static struct notifier_block panic_block = {
4084	.notifier_call	= panic_event,
4085	.next		= NULL,
4086	.priority	= 200	/* priority: INT_MAX >= x >= 0 */
4087};
4088
4089static int ipmi_init_msghandler(void)
4090{
4091	int rv;
4092
4093	if (initialized)
4094		return 0;
4095
4096	rv = driver_register(&ipmidriver);
4097	if (rv) {
4098		printk(KERN_ERR PFX "Could not register IPMI driver\n");
4099		return rv;
4100	}
4101
4102	printk(KERN_INFO "ipmi message handler version "
4103	       IPMI_DRIVER_VERSION "\n");
4104
4105#ifdef CONFIG_PROC_FS
4106	proc_ipmi_root = proc_mkdir("ipmi", NULL);
4107	if (!proc_ipmi_root) {
4108	    printk(KERN_ERR PFX "Unable to create IPMI proc dir");
4109	    return -ENOMEM;
4110	}
4111
4112	proc_ipmi_root->owner = THIS_MODULE;
4113#endif /* CONFIG_PROC_FS */
4114
4115	setup_timer(&ipmi_timer, ipmi_timeout, 0);
4116	mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4117
4118	atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
4119
4120	initialized = 1;
4121
4122	return 0;
4123}
4124
4125static __init int ipmi_init_msghandler_mod(void)
4126{
4127	ipmi_init_msghandler();
4128	return 0;
4129}
4130
4131static __exit void cleanup_ipmi(void)
4132{
4133	int count;
4134
4135	if (!initialized)
4136		return;
4137
4138	atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
4139
4140	/* This can't be called if any interfaces exist, so no worry about
4141	   shutting down the interfaces. */
4142
4143	/* Tell the timer to stop, then wait for it to stop.  This avoids
4144	   problems with race conditions removing the timer here. */
4145	atomic_inc(&stop_operation);
4146	del_timer_sync(&ipmi_timer);
4147
4148#ifdef CONFIG_PROC_FS
4149	remove_proc_entry(proc_ipmi_root->name, &proc_root);
4150#endif /* CONFIG_PROC_FS */
4151
4152	driver_unregister(&ipmidriver);
4153
4154	initialized = 0;
4155
4156	/* Check for buffer leaks. */
4157	count = atomic_read(&smi_msg_inuse_count);
4158	if (count != 0)
4159		printk(KERN_WARNING PFX "SMI message count %d at exit\n",
4160		       count);
4161	count = atomic_read(&recv_msg_inuse_count);
4162	if (count != 0)
4163		printk(KERN_WARNING PFX "recv message count %d at exit\n",
4164		       count);
4165}
4166module_exit(cleanup_ipmi);
4167
4168module_init(ipmi_init_msghandler_mod);
4169MODULE_LICENSE("GPL");
4170MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
4171MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
4172MODULE_VERSION(IPMI_DRIVER_VERSION);
4173
4174EXPORT_SYMBOL(ipmi_create_user);
4175EXPORT_SYMBOL(ipmi_destroy_user);
4176EXPORT_SYMBOL(ipmi_get_version);
4177EXPORT_SYMBOL(ipmi_request_settime);
4178EXPORT_SYMBOL(ipmi_request_supply_msgs);
4179EXPORT_SYMBOL(ipmi_register_smi);
4180EXPORT_SYMBOL(ipmi_unregister_smi);
4181EXPORT_SYMBOL(ipmi_register_for_cmd);
4182EXPORT_SYMBOL(ipmi_unregister_for_cmd);
4183EXPORT_SYMBOL(ipmi_smi_msg_received);
4184EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4185EXPORT_SYMBOL(ipmi_alloc_smi_msg);
4186EXPORT_SYMBOL(ipmi_addr_length);
4187EXPORT_SYMBOL(ipmi_validate_addr);
4188EXPORT_SYMBOL(ipmi_set_gets_events);
4189EXPORT_SYMBOL(ipmi_smi_watcher_register);
4190EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
4191EXPORT_SYMBOL(ipmi_set_my_address);
4192EXPORT_SYMBOL(ipmi_get_my_address);
4193EXPORT_SYMBOL(ipmi_set_my_LUN);
4194EXPORT_SYMBOL(ipmi_get_my_LUN);
4195EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
4196EXPORT_SYMBOL(ipmi_user_set_run_to_completion);
4197EXPORT_SYMBOL(ipmi_free_recv_msg);
4198